aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/BasicAA/call-attrs.ll42
-rw-r--r--test/Analysis/BasicAA/modref.ll6
-rw-r--r--test/Analysis/BranchProbabilityInfo/basic.ll37
-rw-r--r--test/Analysis/ConstantFolding/gep-constanfolding-error.ll52
-rw-r--r--test/Analysis/ConstantFolding/timeout.ll73
-rw-r--r--test/Analysis/CostModel/AMDGPU/add-sub.ll30
-rw-r--r--test/Analysis/CostModel/AMDGPU/bit-ops.ll12
-rw-r--r--test/Analysis/CostModel/AMDGPU/br.ll4
-rw-r--r--test/Analysis/CostModel/AMDGPU/extractelement.ll24
-rw-r--r--test/Analysis/CostModel/AMDGPU/fabs.ll18
-rw-r--r--test/Analysis/CostModel/AMDGPU/fadd.ll18
-rw-r--r--test/Analysis/CostModel/AMDGPU/fdiv.ll18
-rw-r--r--test/Analysis/CostModel/AMDGPU/fmul.ll18
-rw-r--r--test/Analysis/CostModel/AMDGPU/fsub.ll18
-rw-r--r--test/Analysis/CostModel/AMDGPU/insertelement.ll8
-rw-r--r--test/Analysis/CostModel/AMDGPU/mul.ll18
-rw-r--r--test/Analysis/CostModel/AMDGPU/shifts.ll12
-rw-r--r--test/Analysis/CostModel/PowerPC/load_store.ll2
-rw-r--r--test/Analysis/CostModel/PowerPC/unaligned_ld_st.ll26
-rw-r--r--test/Analysis/CostModel/SystemZ/cmp-ext.ll2403
-rw-r--r--test/Analysis/CostModel/SystemZ/cmpsel.ll1987
-rw-r--r--test/Analysis/CostModel/SystemZ/ext-load.ll56
-rw-r--r--test/Analysis/CostModel/SystemZ/fp-arith.ll119
-rw-r--r--test/Analysis/CostModel/SystemZ/fp-cast.ll541
-rw-r--r--test/Analysis/CostModel/SystemZ/int-arith.ll326
-rw-r--r--test/Analysis/CostModel/SystemZ/int-cast.ll199
-rw-r--r--test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll66
-rw-r--r--test/Analysis/CostModel/SystemZ/lit.local.cfg2
-rw-r--r--test/Analysis/CostModel/SystemZ/load_store.ll137
-rw-r--r--test/Analysis/CostModel/SystemZ/logical.ll277
-rw-r--r--test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll259
-rw-r--r--test/Analysis/CostModel/SystemZ/scalar-cmp-cmp-log-sel.ll1624
-rw-r--r--test/Analysis/CostModel/SystemZ/shuffle.ll112
-rw-r--r--test/Analysis/CostModel/SystemZ/vectorinstrs.ll56
-rw-r--r--test/Analysis/CostModel/X86/arith-fp.ll24
-rw-r--r--test/Analysis/CostModel/X86/bitreverse.ll52
-rw-r--r--test/Analysis/CostModel/X86/shuffle-single-src.ll151
-rw-r--r--test/Analysis/CostModel/X86/vshift-ashr-cost.ll199
-rw-r--r--test/Analysis/CostModel/X86/vshift-lshr-cost.ll174
-rw-r--r--test/Analysis/CostModel/X86/vshift-shl-cost.ll164
-rw-r--r--test/Analysis/Delinearization/a.ll2
-rw-r--r--test/Analysis/Delinearization/iv_times_constant_in_subscript.ll2
-rw-r--r--test/Analysis/DemandedBits/intrinsics.ll25
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll30
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/interp-intrinsics.ll22
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll13
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll2
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll2
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll2
-rw-r--r--test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll10
-rw-r--r--test/Analysis/IVUsers/quadradic-exit-value.ll (renamed from test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll)15
-rw-r--r--test/Analysis/LazyValueAnalysis/invalidation.ll64
-rw-r--r--test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll84
-rw-r--r--test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll8
-rw-r--r--test/Analysis/LoopAccessAnalysis/pr31098.ll99
-rw-r--r--test/Analysis/MemoryDependenceAnalysis/invalidation.ll14
-rw-r--r--test/Analysis/MemorySSA/assume.ll (renamed from test/Transforms/Util/MemorySSA/assume.ll)0
-rw-r--r--test/Analysis/MemorySSA/atomic-clobber.ll (renamed from test/Transforms/Util/MemorySSA/atomic-clobber.ll)0
-rw-r--r--test/Analysis/MemorySSA/basicaa-memcpy.ll (renamed from test/Transforms/Util/MemorySSA/basicaa-memcpy.ll)0
-rw-r--r--test/Analysis/MemorySSA/constant-memory.ll (renamed from test/Transforms/Util/MemorySSA/constant-memory.ll)0
-rw-r--r--test/Analysis/MemorySSA/cyclicphi.ll (renamed from test/Transforms/Util/MemorySSA/cyclicphi.ll)0
-rw-r--r--test/Analysis/MemorySSA/forward-unreachable.ll (renamed from test/Transforms/Util/MemorySSA/forward-unreachable.ll)0
-rw-r--r--test/Analysis/MemorySSA/function-clobber.ll (renamed from test/Transforms/Util/MemorySSA/function-clobber.ll)0
-rw-r--r--test/Analysis/MemorySSA/function-mem-attrs.ll (renamed from test/Transforms/Util/MemorySSA/function-mem-attrs.ll)0
-rw-r--r--test/Analysis/MemorySSA/invariant-groups.ll301
-rw-r--r--test/Analysis/MemorySSA/lifetime-simple.ll (renamed from test/Transforms/Util/MemorySSA/lifetime-simple.ll)14
-rw-r--r--test/Analysis/MemorySSA/load-invariant.ll (renamed from test/Transforms/Util/MemorySSA/load-invariant.ll)3
-rw-r--r--test/Analysis/MemorySSA/many-dom-backedge.ll (renamed from test/Transforms/Util/MemorySSA/many-dom-backedge.ll)0
-rw-r--r--test/Analysis/MemorySSA/many-doms.ll (renamed from test/Transforms/Util/MemorySSA/many-doms.ll)0
-rw-r--r--test/Analysis/MemorySSA/multi-edges.ll (renamed from test/Transforms/Util/MemorySSA/multi-edges.ll)0
-rw-r--r--test/Analysis/MemorySSA/multiple-backedges-hal.ll (renamed from test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll)0
-rw-r--r--test/Analysis/MemorySSA/multiple-locations.ll (renamed from test/Transforms/Util/MemorySSA/multiple-locations.ll)0
-rw-r--r--test/Analysis/MemorySSA/no-disconnected.ll (renamed from test/Transforms/Util/MemorySSA/no-disconnected.ll)0
-rw-r--r--test/Analysis/MemorySSA/optimize-use.ll (renamed from test/Transforms/Util/MemorySSA/optimize-use.ll)0
-rw-r--r--test/Analysis/MemorySSA/phi-translation.ll (renamed from test/Transforms/Util/MemorySSA/phi-translation.ll)0
-rw-r--r--test/Analysis/MemorySSA/pr28880.ll (renamed from test/Transforms/Util/MemorySSA/pr28880.ll)0
-rw-r--r--test/Analysis/MemorySSA/ptr-const-mem.ll23
-rw-r--r--test/Analysis/MemorySSA/volatile-clobber.ll (renamed from test/Transforms/Util/MemorySSA/volatile-clobber.ll)0
-rw-r--r--test/Analysis/RegionInfo/outgoing_edge.ll33
-rw-r--r--test/Analysis/RegionInfo/outgoing_edge_1.ll39
-rw-r--r--test/Analysis/ScalarEvolution/2011-04-26-FoldAddRec.ll1
-rw-r--r--test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll1
-rw-r--r--test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll1
-rw-r--r--test/Analysis/ScalarEvolution/flags-from-poison.ll26
-rw-r--r--test/Analysis/ScalarEvolution/implied-via-addition.ll50
-rw-r--r--test/Analysis/ScalarEvolution/implied-via-division.ll331
-rw-r--r--test/Analysis/ScalarEvolution/invalidation.ll13
-rw-r--r--test/Analysis/ScalarEvolution/max-addops-inline.ll17
-rw-r--r--test/Analysis/ScalarEvolution/pr18606-min-zeros.ll63
-rw-r--r--test/Analysis/ScalarEvolution/pr24757.ll2
-rw-r--r--test/Analysis/ScalarEvolution/sext-inreg.ll4
-rw-r--r--test/Analysis/ScalarEvolution/sext-mul.ll89
-rw-r--r--test/Analysis/ScalarEvolution/sext-zero.ll39
-rw-r--r--test/Analysis/ScalarEvolution/trip-count-pow2.ll38
-rw-r--r--test/Analysis/ScalarEvolution/tripmultiple_calculation.ll125
-rw-r--r--test/Analysis/ScalarEvolution/zext-wrap.ll4
-rw-r--r--test/Analysis/ValueTracking/known-nonnull-at.ll57
-rw-r--r--test/Assembler/alloca-addrspace-parse-error-0.ll11
-rw-r--r--test/Assembler/alloca-addrspace-parse-error-1.ll12
-rw-r--r--test/Assembler/alloca-addrspace0.ll24
-rw-r--r--test/Assembler/auto_upgrade_intrinsics.ll30
-rw-r--r--test/Assembler/auto_upgrade_nvvm_intrinsics.ll102
-rw-r--r--test/Assembler/datalayout-alloca-addrspace-mismatch-0.ll9
-rw-r--r--test/Assembler/datalayout-alloca-addrspace-mismatch-1.ll9
-rw-r--r--test/Assembler/datalayout-alloca-addrspace-mismatch-2.ll11
-rw-r--r--test/Assembler/datalayout-alloca-addrspace.ll23
-rw-r--r--test/Assembler/debug-info.ll6
-rw-r--r--test/Assembler/diexpression.ll6
-rw-r--r--test/Assembler/fast-math-flags.ll12
-rw-r--r--test/Assembler/invalid-datalayout-alloca-addrspace.ll4
-rw-r--r--test/Bitcode/DIGlobalVariableExpression2.ll31
-rw-r--r--test/Bitcode/DIGlobalVariableExpression2.ll.bcbin0 -> 864 bytes
-rw-r--r--test/Bitcode/compatibility-3.6.ll5
-rw-r--r--test/Bitcode/compatibility-3.7.ll5
-rw-r--r--test/Bitcode/compatibility-3.8.ll5
-rw-r--r--test/Bitcode/compatibility-3.9.ll9
-rw-r--r--test/Bitcode/compatibility-4.0.ll1690
-rw-r--r--test/Bitcode/compatibility-4.0.ll.bcbin0 -> 16400 bytes
-rw-r--r--test/Bitcode/compatibility.ll11
-rw-r--r--test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll9
-rw-r--r--test/Bitcode/thinlto-function-summary.ll2
-rw-r--r--test/Bitcode/thinlto-type-vcalls.ll105
-rw-r--r--test/Bitcode/upgrade-debug-info-for-profiling.ll10
-rw-r--r--test/Bitcode/upgrade-debug-info-for-profiling.ll.bcbin0 -> 888 bytes
-rw-r--r--test/Bitcode/upgrade-pointer-address-space.ll5
-rw-r--r--test/Bitcode/upgrade-pointer-address-space.ll.bcbin0 -> 1676 bytes
-rwxr-xr-xtest/BugPoint/compile-custom.ll2
-rw-r--r--test/BugPoint/invalid-debuginfo.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll28
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll38
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll107
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-instructionselect.mir2979
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll573
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir14
-rw-r--r--test/CodeGen/AArch64/GlobalISel/call-translator.ll26
-rw-r--r--test/CodeGen/AArch64/GlobalISel/debug-insts.ll68
-rw-r--r--test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll48
-rw-r--r--test/CodeGen/AArch64/GlobalISel/gisel-abort.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll48
-rw-r--r--test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll8
-rw-r--r--test/CodeGen/AArch64/GlobalISel/inline-asm.ll10
-rw-r--r--test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll30
-rw-r--r--test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll60
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-add.mir27
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-and.mir7
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-combines.mir180
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-constant.mir4
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-div.mir2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll53
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-ext.mir2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir48
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir201
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-gep.mir2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir141
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir206
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir26
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-mul.mir27
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir29
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-or.mir7
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-pow.mir38
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-rem.mir13
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-shift.mir47
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-simple.mir125
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-sub.mir7
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir39
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-xor.mir7
-rw-r--r--test/CodeGen/AArch64/GlobalISel/no-regclass.mir30
-rw-r--r--test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir45
-rw-r--r--test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir4
-rw-r--r--test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir25
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-binop.mir1042
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-bitcast.mir212
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-br.mir71
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-cbz.mir108
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-constant.mir77
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir69
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir478
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-int-ext.mir274
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir150
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-load.mir515
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-muladd.mir50
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-property.mir21
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-store.mir463
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-trunc.mir81
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-xor.mir165
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select.mir311
-rw-r--r--test/CodeGen/AArch64/GlobalISel/translate-gep.ll4
-rw-r--r--test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll16
-rw-r--r--test/CodeGen/AArch64/GlobalISel/vastart.ll13
-rw-r--r--test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll68
-rw-r--r--test/CodeGen/AArch64/aarch64-fold-lslfast.ll74
-rw-r--r--test/CodeGen/AArch64/aarch64-gep-opt.ll8
-rw-r--r--test/CodeGen/AArch64/aarch64-named-reg-w18.ll14
-rw-r--r--test/CodeGen/AArch64/aarch64-named-reg-x18.ll14
-rw-r--r--test/CodeGen/AArch64/and-sink.ll90
-rw-r--r--test/CodeGen/AArch64/argument-blocks.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-abi-varargs.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-abi.ll5
-rw-r--r--test/CodeGen/AArch64/arm64-addr-type-promotion.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-addrmode.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-atomic.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-bitfield-extract.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-blockaddress.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-builtins-linux.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-code-model-large-abs.ll36
-rw-r--r--test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll36
-rw-r--r--test/CodeGen/AArch64/arm64-const-addr.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-crc32.ll1
-rw-r--r--test/CodeGen/AArch64/arm64-elf-globals.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-extern-weak.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll91
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-memset-inline.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-movi.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-neon-copy.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-neon-v8.1a.ll1
-rw-r--r--test/CodeGen/AArch64/arm64-opt-remarks-lazy-bfi.ll63
-rw-r--r--test/CodeGen/AArch64/arm64-regress-opt-cmp.mir2
-rw-r--r--test/CodeGen/AArch64/arm64-shrink-wrapping.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-spill-remarks.ll117
-rw-r--r--test/CodeGen/AArch64/arm64-summary-remarks.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-variadic-aapcs.ll2
-rw-r--r--test/CodeGen/AArch64/bitfield-insert.ll12
-rw-r--r--test/CodeGen/AArch64/blockaddress.ll6
-rw-r--r--test/CodeGen/AArch64/br-cond-not-merge.ll94
-rw-r--r--test/CodeGen/AArch64/branch-relax-cbz.ll13
-rw-r--r--test/CodeGen/AArch64/code-model-large-abs.ll30
-rw-r--r--test/CodeGen/AArch64/concat_vector-scalar-combine.ll6
-rw-r--r--test/CodeGen/AArch64/cpus.ll2
-rw-r--r--test/CodeGen/AArch64/dag-numsignbits.ll33
-rw-r--r--test/CodeGen/AArch64/eliminate-trunc.ll4
-rw-r--r--test/CodeGen/AArch64/extern-weak.ll18
-rw-r--r--test/CodeGen/AArch64/fast-isel-tail-call.ll24
-rw-r--r--test/CodeGen/AArch64/fast-isel-tbz.ll18
-rw-r--r--test/CodeGen/AArch64/fpimm.ll10
-rw-r--r--test/CodeGen/AArch64/jump-table.ll6
-rw-r--r--test/CodeGen/AArch64/large-consts.ll6
-rw-r--r--test/CodeGen/AArch64/ldst-opt-aa.mir30
-rw-r--r--test/CodeGen/AArch64/ldst-opt.mir2
-rw-r--r--test/CodeGen/AArch64/literal_pools_float.ll12
-rw-r--r--test/CodeGen/AArch64/live-interval-analysis.mir22
-rw-r--r--test/CodeGen/AArch64/load-combine-big-endian.ll584
-rw-r--r--test/CodeGen/AArch64/load-combine.ll548
-rw-r--r--test/CodeGen/AArch64/machine-combiner-madd.ll2
-rw-r--r--test/CodeGen/AArch64/machine-copy-remove.mir672
-rw-r--r--test/CodeGen/AArch64/machine-outliner.ll43
-rw-r--r--test/CodeGen/AArch64/mature-mc-support.ll2
-rw-r--r--test/CodeGen/AArch64/merge-store.ll3
-rw-r--r--test/CodeGen/AArch64/misched-fusion-aes.ll207
-rw-r--r--test/CodeGen/AArch64/misched-fusion-lit.ll46
-rw-r--r--test/CodeGen/AArch64/misched-fusion.ll12
-rw-r--r--test/CodeGen/AArch64/movimm-wzr.mir2
-rw-r--r--test/CodeGen/AArch64/movw-shift-encoding.ll8
-rw-r--r--test/CodeGen/AArch64/neon-fma-FMF.ll53
-rw-r--r--test/CodeGen/AArch64/optimize-cond-branch.ll2
-rw-r--r--test/CodeGen/AArch64/pr27816.ll48
-rw-r--r--test/CodeGen/AArch64/prefixdata.ll29
-rw-r--r--test/CodeGen/AArch64/regcoal-physreg.mir51
-rw-r--r--test/CodeGen/AArch64/regress-tblgen-chains.ll2
-rw-r--r--test/CodeGen/AArch64/remat.ll2
-rw-r--r--test/CodeGen/AArch64/selectiondag-order.ll96
-rw-r--r--test/CodeGen/AArch64/stack-protector-target.ll10
-rw-r--r--test/CodeGen/AArch64/stack_guard_remat.ll14
-rw-r--r--test/CodeGen/AArch64/tail-dup-repeat-worklist.ll69
-rw-r--r--test/CodeGen/AArch64/tailcall-string-rvo.ll47
-rw-r--r--test/CodeGen/AArch64/tbz-tbnz.ll16
-rw-r--r--test/CodeGen/AArch64/thread-pointer.ll60
-rw-r--r--test/CodeGen/AArch64/vector_merge_dep_check.ll3
-rw-r--r--test/CodeGen/AArch64/xray-tail-call-sled.ll69
-rw-r--r--test/CodeGen/AMDGPU/32-bit-local-address-space.ll24
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir28
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir142
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir29
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir69
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/shader-epilogs.ll11
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/smrd.ll89
-rw-r--r--test/CodeGen/AMDGPU/add-debug.ll2
-rw-r--r--test/CodeGen/AMDGPU/add.i16.ll18
-rw-r--r--test/CodeGen/AMDGPU/add.ll16
-rw-r--r--test/CodeGen/AMDGPU/add.v2i16.ll283
-rw-r--r--test/CodeGen/AMDGPU/add_i128.ll8
-rw-r--r--test/CodeGen/AMDGPU/add_i64.ll12
-rw-r--r--test/CodeGen/AMDGPU/addrspacecast-captured.ll47
-rw-r--r--test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll28
-rw-r--r--test/CodeGen/AMDGPU/addrspacecast.ll129
-rw-r--r--test/CodeGen/AMDGPU/amdgcn.bitcast.ll59
-rw-r--r--test/CodeGen/AMDGPU/amdgcn.private-memory.ll2
-rw-r--r--test/CodeGen/AMDGPU/amdgcn.sendmsg-m0.ll41
-rw-r--r--test/CodeGen/AMDGPU/amdgcn.sendmsg.ll161
-rw-r--r--test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll9
-rw-r--r--test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll16
-rw-r--r--test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll88
-rw-r--r--test/CodeGen/AMDGPU/amdgpu-shader-calling-convention.ll5
-rw-r--r--test/CodeGen/AMDGPU/amdgpu.private-memory.ll92
-rw-r--r--test/CodeGen/AMDGPU/amdgpu.work-item-intrinsics.deprecated.ll30
-rw-r--r--test/CodeGen/AMDGPU/and-gcn.ll2
-rw-r--r--test/CodeGen/AMDGPU/and.ll80
-rw-r--r--test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll100
-rw-r--r--test/CodeGen/AMDGPU/annotate-kernel-features.ll78
-rw-r--r--test/CodeGen/AMDGPU/anonymous-gv.ll4
-rw-r--r--test/CodeGen/AMDGPU/any_extend_vector_inreg.ll58
-rw-r--r--test/CodeGen/AMDGPU/anyext.ll4
-rw-r--r--test/CodeGen/AMDGPU/array-ptr-calc-i32.ll8
-rw-r--r--test/CodeGen/AMDGPU/array-ptr-calc-i64.ll2
-rw-r--r--test/CodeGen/AMDGPU/ashr.v2i16.ll161
-rw-r--r--test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll10
-rw-r--r--test/CodeGen/AMDGPU/atomic_load_add.ll8
-rw-r--r--test/CodeGen/AMDGPU/atomic_load_sub.ll8
-rw-r--r--test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll8
-rw-r--r--test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll34
-rw-r--r--test/CodeGen/AMDGPU/attr-amdgpu-num-vgpr.ll2
-rw-r--r--test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll26
-rw-r--r--test/CodeGen/AMDGPU/attr-unparseable.ll16
-rw-r--r--test/CodeGen/AMDGPU/barrier-elimination.ll30
-rw-r--r--test/CodeGen/AMDGPU/basic-branch.ll9
-rw-r--r--test/CodeGen/AMDGPU/basic-loop.ll2
-rw-r--r--test/CodeGen/AMDGPU/bfe-patterns.ll163
-rw-r--r--test/CodeGen/AMDGPU/bfe_uint.ll4
-rw-r--r--test/CodeGen/AMDGPU/bfi_int.ll6
-rw-r--r--test/CodeGen/AMDGPU/bfm.ll4
-rw-r--r--test/CodeGen/AMDGPU/big_alu.ll110
-rw-r--r--test/CodeGen/AMDGPU/bitcast-vector-extract.ll32
-rw-r--r--test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll54
-rw-r--r--test/CodeGen/AMDGPU/bitreverse.ll20
-rw-r--r--test/CodeGen/AMDGPU/br_cc.f16.ll48
-rw-r--r--test/CodeGen/AMDGPU/branch-condition-and.ll17
-rw-r--r--test/CodeGen/AMDGPU/branch-relax-spill.ll2
-rw-r--r--test/CodeGen/AMDGPU/branch-relaxation.ll39
-rw-r--r--test/CodeGen/AMDGPU/bswap.ll14
-rw-r--r--test/CodeGen/AMDGPU/build_vector.ll4
-rw-r--r--test/CodeGen/AMDGPU/call.ll6
-rw-r--r--test/CodeGen/AMDGPU/calling-conventions.ll43
-rw-r--r--test/CodeGen/AMDGPU/captured-frame-index.ll78
-rw-r--r--test/CodeGen/AMDGPU/cf-loop-on-constant.ll12
-rw-r--r--test/CodeGen/AMDGPU/cf-stack-bug.ll8
-rw-r--r--test/CodeGen/AMDGPU/cf_end.ll2
-rw-r--r--test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll18
-rw-r--r--test/CodeGen/AMDGPU/cgp-addressing-modes.ll222
-rw-r--r--test/CodeGen/AMDGPU/cgp-bitfield-extract.ll19
-rw-r--r--test/CodeGen/AMDGPU/clamp-modifier.ll222
-rw-r--r--test/CodeGen/AMDGPU/clamp-omod-special-case.mir424
-rw-r--r--test/CodeGen/AMDGPU/clamp.ll529
-rw-r--r--test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll4
-rw-r--r--test/CodeGen/AMDGPU/coalescer-subrange-crash.ll32
-rw-r--r--test/CodeGen/AMDGPU/coalescer_remat.ll2
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-deduce-ro-arg.ll33
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll1260
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-1.ll9
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-2.ll10
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-3.ll10
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-kernel-code-props.ll32
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-kernel-debug-props.ll67
-rw-r--r--test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll2
-rw-r--r--test/CodeGen/AMDGPU/combine_vloads.ll2
-rw-r--r--test/CodeGen/AMDGPU/commute-compares.ll106
-rw-r--r--test/CodeGen/AMDGPU/commute-shifts.ll17
-rw-r--r--test/CodeGen/AMDGPU/commute_modifiers.ll20
-rw-r--r--test/CodeGen/AMDGPU/concat_vectors.ll64
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir16
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-mi-operands.ll20
-rw-r--r--test/CodeGen/AMDGPU/control-flow-fastregalloc.ll37
-rw-r--r--test/CodeGen/AMDGPU/convergent-inlineasm.ll5
-rw-r--r--test/CodeGen/AMDGPU/copy-illegal-type.ll22
-rw-r--r--test/CodeGen/AMDGPU/copy-to-reg.ll2
-rw-r--r--test/CodeGen/AMDGPU/ctlz.ll35
-rw-r--r--test/CodeGen/AMDGPU/ctlz_zero_undef.ll34
-rw-r--r--test/CodeGen/AMDGPU/ctpop.ll30
-rw-r--r--test/CodeGen/AMDGPU/ctpop64.ll22
-rw-r--r--test/CodeGen/AMDGPU/cttz_zero_undef.ll8
-rw-r--r--test/CodeGen/AMDGPU/cube.ll18
-rw-r--r--test/CodeGen/AMDGPU/cvt_f32_ubyte.ll32
-rw-r--r--test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll12
-rw-r--r--test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll10
-rw-r--r--test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll2
-rw-r--r--test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll4
-rw-r--r--test/CodeGen/AMDGPU/debug.ll2
-rw-r--r--test/CodeGen/AMDGPU/debugger-emit-prologue.ll2
-rw-r--r--test/CodeGen/AMDGPU/debugger-insert-nops.ll26
-rw-r--r--test/CodeGen/AMDGPU/debugger-reserve-regs.ll3
-rw-r--r--test/CodeGen/AMDGPU/default-fp-mode.ll52
-rw-r--r--test/CodeGen/AMDGPU/detect-dead-lanes.mir18
-rw-r--r--test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll2
-rw-r--r--test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll2
-rw-r--r--test/CodeGen/AMDGPU/ds-combine-large-stride.ll412
-rw-r--r--test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll2
-rw-r--r--test/CodeGen/AMDGPU/ds-sub-offset.ll14
-rw-r--r--test/CodeGen/AMDGPU/ds_read2.ll48
-rw-r--r--test/CodeGen/AMDGPU/ds_read2_offset_order.ll2
-rw-r--r--test/CodeGen/AMDGPU/ds_read2_superreg.ll24
-rw-r--r--test/CodeGen/AMDGPU/ds_read2st64.ll30
-rw-r--r--test/CodeGen/AMDGPU/ds_write2.ll42
-rw-r--r--test/CodeGen/AMDGPU/ds_write2st64.ll10
-rw-r--r--test/CodeGen/AMDGPU/dynamic_stackalloc.ll2
-rw-r--r--test/CodeGen/AMDGPU/early-if-convert-cost.ll110
-rw-r--r--test/CodeGen/AMDGPU/early-if-convert.ll454
-rw-r--r--test/CodeGen/AMDGPU/early-inline-alias.ll12
-rw-r--r--test/CodeGen/AMDGPU/early-inline.ll25
-rw-r--r--test/CodeGen/AMDGPU/elf.ll8
-rw-r--r--test/CodeGen/AMDGPU/elf.r600.ll2
-rw-r--r--test/CodeGen/AMDGPU/else.ll18
-rw-r--r--test/CodeGen/AMDGPU/empty-function.ll4
-rw-r--r--test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll22
-rw-r--r--test/CodeGen/AMDGPU/endcf-loop-header.ll2
-rw-r--r--test/CodeGen/AMDGPU/env-amdgiz.ll11
-rw-r--r--test/CodeGen/AMDGPU/env-amdgizcl.ll11
-rw-r--r--test/CodeGen/AMDGPU/exceed-max-sgprs.ll10
-rw-r--r--test/CodeGen/AMDGPU/extend-bit-ops-i16.ll6
-rw-r--r--test/CodeGen/AMDGPU/extload-align.ll2
-rw-r--r--test/CodeGen/AMDGPU/extload-private.ll16
-rw-r--r--test/CodeGen/AMDGPU/extload.ll8
-rw-r--r--test/CodeGen/AMDGPU/extract-vector-elt-build-vector-combine.ll6
-rw-r--r--test/CodeGen/AMDGPU/extract_vector_elt-f16.ll128
-rw-r--r--test/CodeGen/AMDGPU/extract_vector_elt-f64.ll6
-rw-r--r--test/CodeGen/AMDGPU/extract_vector_elt-i16.ll125
-rw-r--r--test/CodeGen/AMDGPU/extract_vector_elt-i64.ll12
-rw-r--r--test/CodeGen/AMDGPU/extract_vector_elt-i8.ll20
-rw-r--r--test/CodeGen/AMDGPU/extractelt-to-trunc.ll12
-rw-r--r--test/CodeGen/AMDGPU/fabs.f16.ll123
-rw-r--r--test/CodeGen/AMDGPU/fabs.f64.ll16
-rw-r--r--test/CodeGen/AMDGPU/fabs.ll14
-rw-r--r--test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll16
-rw-r--r--test/CodeGen/AMDGPU/fadd.f16.ll88
-rw-r--r--test/CodeGen/AMDGPU/fadd.ll19
-rw-r--r--test/CodeGen/AMDGPU/fadd64.ll8
-rw-r--r--test/CodeGen/AMDGPU/fcanonicalize.f16.ll321
-rw-r--r--test/CodeGen/AMDGPU/fcanonicalize.ll146
-rw-r--r--test/CodeGen/AMDGPU/fceil.ll12
-rw-r--r--test/CodeGen/AMDGPU/fceil64.ll12
-rw-r--r--test/CodeGen/AMDGPU/fcmp-cnd.ll2
-rw-r--r--test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll2
-rw-r--r--test/CodeGen/AMDGPU/fcmp.f16.ll64
-rw-r--r--test/CodeGen/AMDGPU/fcmp.ll4
-rw-r--r--test/CodeGen/AMDGPU/fcmp64.ll12
-rw-r--r--test/CodeGen/AMDGPU/fconst64.ll2
-rw-r--r--test/CodeGen/AMDGPU/fcopysign.f16.ll264
-rw-r--r--test/CodeGen/AMDGPU/fcopysign.f32.ll6
-rw-r--r--test/CodeGen/AMDGPU/fcopysign.f64.ll8
-rw-r--r--test/CodeGen/AMDGPU/fdiv.f16.ll54
-rw-r--r--test/CodeGen/AMDGPU/fdiv.f64.ll101
-rw-r--r--test/CodeGen/AMDGPU/fdiv.ll28
-rw-r--r--test/CodeGen/AMDGPU/ffloor.f64.ll16
-rw-r--r--test/CodeGen/AMDGPU/ffloor.ll6
-rw-r--r--test/CodeGen/AMDGPU/fix-vgpr-copies.mir44
-rw-r--r--test/CodeGen/AMDGPU/flat-address-space.ll56
-rw-r--r--test/CodeGen/AMDGPU/flat-for-global-subtarget-feature.ll4
-rw-r--r--test/CodeGen/AMDGPU/flat-scratch-reg.ll8
-rw-r--r--test/CodeGen/AMDGPU/flat_atomics.ll194
-rw-r--r--test/CodeGen/AMDGPU/flat_atomics_i64.ll194
-rw-r--r--test/CodeGen/AMDGPU/fma-combine.ll52
-rw-r--r--test/CodeGen/AMDGPU/fma.f64.ll6
-rw-r--r--test/CodeGen/AMDGPU/fma.ll10
-rw-r--r--test/CodeGen/AMDGPU/fmax3.f64.ll2
-rw-r--r--test/CodeGen/AMDGPU/fmax3.ll4
-rw-r--r--test/CodeGen/AMDGPU/fmax_legacy.f64.ll8
-rw-r--r--test/CodeGen/AMDGPU/fmax_legacy.ll14
-rw-r--r--test/CodeGen/AMDGPU/fmaxnum.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/fmaxnum.ll34
-rw-r--r--test/CodeGen/AMDGPU/fmed3.ll851
-rw-r--r--test/CodeGen/AMDGPU/fmin3.ll4
-rw-r--r--test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll47
-rw-r--r--test/CodeGen/AMDGPU/fmin_legacy.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/fmin_legacy.ll20
-rw-r--r--test/CodeGen/AMDGPU/fminnum.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/fminnum.ll34
-rw-r--r--test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll55
-rw-r--r--test/CodeGen/AMDGPU/fmul.f16.ll76
-rw-r--r--test/CodeGen/AMDGPU/fmul.ll16
-rw-r--r--test/CodeGen/AMDGPU/fmul64.ll6
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f16.ll50
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f32.ll36
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f64.ll16
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.v2f16.ll107
-rw-r--r--test/CodeGen/AMDGPU/fnearbyint.ll12
-rw-r--r--test/CodeGen/AMDGPU/fneg-combines.ll951
-rw-r--r--test/CodeGen/AMDGPU/fneg-fabs.f16.ll156
-rw-r--r--test/CodeGen/AMDGPU/fneg-fabs.f64.ll16
-rw-r--r--test/CodeGen/AMDGPU/fneg-fabs.ll16
-rw-r--r--test/CodeGen/AMDGPU/fneg.f16.ll120
-rw-r--r--test/CodeGen/AMDGPU/fneg.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/fneg.ll12
-rw-r--r--test/CodeGen/AMDGPU/fold-cndmask.mir34
-rw-r--r--test/CodeGen/AMDGPU/fold-immediate-output-mods.mir306
-rw-r--r--test/CodeGen/AMDGPU/fp-classify.ll18
-rw-r--r--test/CodeGen/AMDGPU/fp16_to_fp32.ll2
-rw-r--r--test/CodeGen/AMDGPU/fp16_to_fp64.ll2
-rw-r--r--test/CodeGen/AMDGPU/fp32_to_fp16.ll2
-rw-r--r--test/CodeGen/AMDGPU/fp_to_sint.f64.ll12
-rw-r--r--test/CodeGen/AMDGPU/fp_to_sint.ll20
-rw-r--r--test/CodeGen/AMDGPU/fp_to_uint.f64.ll16
-rw-r--r--test/CodeGen/AMDGPU/fp_to_uint.ll18
-rw-r--r--test/CodeGen/AMDGPU/fpext.f16.ll243
-rw-r--r--test/CodeGen/AMDGPU/fpext.ll10
-rw-r--r--test/CodeGen/AMDGPU/fptosi.f16.ll43
-rw-r--r--test/CodeGen/AMDGPU/fptoui.f16.ll37
-rw-r--r--test/CodeGen/AMDGPU/fptrunc.f16.ll163
-rw-r--r--test/CodeGen/AMDGPU/fptrunc.ll10
-rw-r--r--test/CodeGen/AMDGPU/fract.f64.ll8
-rw-r--r--test/CodeGen/AMDGPU/fract.ll8
-rw-r--r--test/CodeGen/AMDGPU/frem.ll25
-rw-r--r--test/CodeGen/AMDGPU/fsqrt.f64.ll4
-rw-r--r--test/CodeGen/AMDGPU/fsqrt.ll20
-rw-r--r--test/CodeGen/AMDGPU/fsub.f16.ll141
-rw-r--r--test/CodeGen/AMDGPU/fsub.ll81
-rw-r--r--test/CodeGen/AMDGPU/fsub64.ll20
-rw-r--r--test/CodeGen/AMDGPU/ftrunc.f64.ll14
-rw-r--r--test/CodeGen/AMDGPU/ftrunc.ll12
-rw-r--r--test/CodeGen/AMDGPU/gep-address-space.ll8
-rw-r--r--test/CodeGen/AMDGPU/global-constant.ll4
-rw-r--r--test/CodeGen/AMDGPU/global-directive.ll2
-rw-r--r--test/CodeGen/AMDGPU/global-extload-i16.ll64
-rw-r--r--test/CodeGen/AMDGPU/global-variable-relocs.ll22
-rw-r--r--test/CodeGen/AMDGPU/global_atomics.ll196
-rw-r--r--test/CodeGen/AMDGPU/global_atomics_i64.ll194
-rw-r--r--test/CodeGen/AMDGPU/gv-const-addrspace.ll10
-rw-r--r--test/CodeGen/AMDGPU/gv-offset-folding.ll4
-rw-r--r--test/CodeGen/AMDGPU/half.ll221
-rw-r--r--test/CodeGen/AMDGPU/hsa-default-device.ll2
-rw-r--r--test/CodeGen/AMDGPU/hsa-fp-mode.ll31
-rw-r--r--test/CodeGen/AMDGPU/hsa-func.ll2
-rw-r--r--test/CodeGen/AMDGPU/hsa-globals.ll2
-rw-r--r--test/CodeGen/AMDGPU/hsa-group-segment.ll2
-rw-r--r--test/CodeGen/AMDGPU/hsa-note-no-func.ll4
-rw-r--r--test/CodeGen/AMDGPU/hsa.ll2
-rw-r--r--test/CodeGen/AMDGPU/i1-copy-implicit-def.ll2
-rw-r--r--test/CodeGen/AMDGPU/i1-copy-phi.ll2
-rw-r--r--test/CodeGen/AMDGPU/i8-to-double-to-float.ll2
-rw-r--r--test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll2
-rw-r--r--test/CodeGen/AMDGPU/icmp.i16.ll40
-rw-r--r--test/CodeGen/AMDGPU/icmp64.ll20
-rw-r--r--test/CodeGen/AMDGPU/illegal-sgpr-to-vgpr-copy.ll45
-rw-r--r--test/CodeGen/AMDGPU/image-attributes.ll20
-rw-r--r--test/CodeGen/AMDGPU/image-resource-id.ll40
-rw-r--r--test/CodeGen/AMDGPU/imm.ll151
-rw-r--r--test/CodeGen/AMDGPU/imm16.ll66
-rw-r--r--test/CodeGen/AMDGPU/immv216.ll446
-rw-r--r--test/CodeGen/AMDGPU/indirect-addressing-si-noopt.ll2
-rw-r--r--test/CodeGen/AMDGPU/indirect-addressing-si.ll49
-rw-r--r--test/CodeGen/AMDGPU/indirect-private-64.ll22
-rw-r--r--test/CodeGen/AMDGPU/infinite-loop-evergreen.ll2
-rw-r--r--test/CodeGen/AMDGPU/infinite-loop.ll2
-rw-r--r--test/CodeGen/AMDGPU/inline-asm.ll81
-rw-r--r--test/CodeGen/AMDGPU/inline-calls.ll8
-rw-r--r--test/CodeGen/AMDGPU/inline-constraints.ll12
-rw-r--r--test/CodeGen/AMDGPU/inlineasm-16.ll8
-rw-r--r--test/CodeGen/AMDGPU/inlineasm-illegal-type.ll20
-rw-r--r--test/CodeGen/AMDGPU/inlineasm-packed.ll57
-rw-r--r--test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir40
-rw-r--r--test/CodeGen/AMDGPU/insert-waits-callee.mir25
-rw-r--r--test/CodeGen/AMDGPU/insert-waits-exp.mir12
-rw-r--r--test/CodeGen/AMDGPU/insert_subreg.ll2
-rw-r--r--test/CodeGen/AMDGPU/insert_vector_elt.ll156
-rw-r--r--test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll470
-rw-r--r--test/CodeGen/AMDGPU/inserted-wait-states.mir226
-rw-r--r--test/CodeGen/AMDGPU/internalize.ll35
-rw-r--r--test/CodeGen/AMDGPU/invalid-addrspacecast.ll2
-rw-r--r--test/CodeGen/AMDGPU/invalid-opencl-version-metadata1.ll6
-rw-r--r--test/CodeGen/AMDGPU/invalid-opencl-version-metadata2.ll7
-rw-r--r--test/CodeGen/AMDGPU/invalid-opencl-version-metadata3.ll7
-rw-r--r--test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll4
-rw-r--r--test/CodeGen/AMDGPU/invert-br-undef-vcc.mir2
-rw-r--r--test/CodeGen/AMDGPU/kcache-fold.ll186
-rw-r--r--test/CodeGen/AMDGPU/kernarg-stack-alignment.ll20
-rw-r--r--test/CodeGen/AMDGPU/kernel-args.ll72
-rw-r--r--test/CodeGen/AMDGPU/large-alloca-compute.ll4
-rw-r--r--test/CodeGen/AMDGPU/large-alloca-graphics.ll3
-rw-r--r--test/CodeGen/AMDGPU/large-constant-initializer.ll2
-rw-r--r--test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll72
-rw-r--r--test/CodeGen/AMDGPU/lds-alignment.ll28
-rw-r--r--test/CodeGen/AMDGPU/lds-initializer.ll2
-rw-r--r--test/CodeGen/AMDGPU/lds-m0-init-in-loop.ll2
-rw-r--r--test/CodeGen/AMDGPU/lds-oqap-crash.ll2
-rw-r--r--test/CodeGen/AMDGPU/lds-output-queue.ll4
-rw-r--r--test/CodeGen/AMDGPU/lds-size.ll2
-rw-r--r--test/CodeGen/AMDGPU/lds-zero-initializer.ll2
-rw-r--r--test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll2
-rw-r--r--test/CodeGen/AMDGPU/limit-coalesce.mir71
-rw-r--r--test/CodeGen/AMDGPU/literals.ll8
-rw-r--r--test/CodeGen/AMDGPU/liveness.mir2
-rw-r--r--test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll437
-rw-r--r--test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll631
-rw-r--r--test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll56
-rw-r--r--test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll29
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.export.ll237
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll59
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.gather4.ll525
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.getlod.ll44
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.image.ll49
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.image.sample-masked.ll94
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.image.sample.ll309
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll309
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.load.dword.ll7
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.packf16.ll28
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll188
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll177
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll20
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.class.ll60
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cos.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cubeid.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cubema.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cubesc.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cubetc.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll166
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.id.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.ptr.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll14
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll20
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.div.scale.ll40
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ds.bpermute.ll9
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ds.permute.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ds.swizzle.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.exp.compr.ll162
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll484
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll62
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fdiv.fast.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.f16.ll39
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.ll28
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fmul.legacy.ll10
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.fract.ll9
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll12
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll12
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll51
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.ll74
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.image.getlod.ll16
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.image.ll172
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.ll252
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.o.ll259
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll91
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.lerp.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.log.clamp.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll24
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.pk.u16.u8.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.u32.u8.ll10
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.msad.u8.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll26
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.qsad.pk.u16.u8.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.queue.ptr.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rcp.legacy.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll42
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rsq.legacy.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.rsq.ll14
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll11
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.decperflevel.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.getreg.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.incperflevel.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.memrealtime.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.memtime.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.s.sleep.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sad.hi.u8.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sad.u16.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sad.u8.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll556
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll127
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll29
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.sin.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll623
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.wave.barrier.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.workgroup.id.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.workitem.id.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.ceil.f16.ll24
-rw-r--r--test/CodeGen/AMDGPU/llvm.cos.f16.ll46
-rw-r--r--test/CodeGen/AMDGPU/llvm.cos.ll4
-rw-r--r--test/CodeGen/AMDGPU/llvm.dbg.value.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.exp2.f16.ll20
-rw-r--r--test/CodeGen/AMDGPU/llvm.exp2.ll8
-rw-r--r--test/CodeGen/AMDGPU/llvm.floor.f16.ll20
-rw-r--r--test/CodeGen/AMDGPU/llvm.fma.f16.ll177
-rw-r--r--test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll63
-rw-r--r--test/CodeGen/AMDGPU/llvm.log2.f16.ll32
-rw-r--r--test/CodeGen/AMDGPU/llvm.log2.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.maxnum.f16.ll90
-rw-r--r--test/CodeGen/AMDGPU/llvm.memcpy.ll22
-rw-r--r--test/CodeGen/AMDGPU/llvm.minnum.f16.ll98
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.cube.ll (renamed from test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll)4
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.dot4.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.group.barrier.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll20
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.recipsqrt.clamped.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.recipsqrt.ieee.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.r600.tex.ll2
-rw-r--r--test/CodeGen/AMDGPU/llvm.rint.f16.ll33
-rw-r--r--test/CodeGen/AMDGPU/llvm.rint.f64.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.rint.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.round.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/llvm.round.ll81
-rw-r--r--test/CodeGen/AMDGPU/llvm.sin.f16.ll46
-rw-r--r--test/CodeGen/AMDGPU/llvm.sin.ll16
-rw-r--r--test/CodeGen/AMDGPU/llvm.sqrt.f16.ll20
-rw-r--r--test/CodeGen/AMDGPU/llvm.trunc.f16.ll20
-rw-r--r--test/CodeGen/AMDGPU/load-constant-f64.ll2
-rw-r--r--test/CodeGen/AMDGPU/load-constant-i1.ll88
-rw-r--r--test/CodeGen/AMDGPU/load-constant-i16.ll80
-rw-r--r--test/CodeGen/AMDGPU/load-constant-i32.ll40
-rw-r--r--test/CodeGen/AMDGPU/load-constant-i64.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-constant-i8.ll112
-rw-r--r--test/CodeGen/AMDGPU/load-global-f32.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-global-f64.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-global-i1.ll88
-rw-r--r--test/CodeGen/AMDGPU/load-global-i16.ll80
-rw-r--r--test/CodeGen/AMDGPU/load-global-i32.ll40
-rw-r--r--test/CodeGen/AMDGPU/load-global-i64.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-global-i8.ll112
-rw-r--r--test/CodeGen/AMDGPU/load-input-fold.ll9
-rw-r--r--test/CodeGen/AMDGPU/load-local-f32.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-local-f64.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-local-i1.ll88
-rw-r--r--test/CodeGen/AMDGPU/load-local-i16.ll80
-rw-r--r--test/CodeGen/AMDGPU/load-local-i32.ll40
-rw-r--r--test/CodeGen/AMDGPU/load-local-i64.ll12
-rw-r--r--test/CodeGen/AMDGPU/load-local-i8.ll112
-rw-r--r--test/CodeGen/AMDGPU/load-weird-sizes.ll4
-rw-r--r--test/CodeGen/AMDGPU/local-64.ll32
-rw-r--r--test/CodeGen/AMDGPU/local-atomics.ll108
-rw-r--r--test/CodeGen/AMDGPU/local-atomics64.ll100
-rw-r--r--test/CodeGen/AMDGPU/local-memory.amdgcn.ll10
-rw-r--r--test/CodeGen/AMDGPU/local-memory.ll4
-rw-r--r--test/CodeGen/AMDGPU/local-memory.r600.ll4
-rw-r--r--test/CodeGen/AMDGPU/local-stack-slot-bug.ll7
-rw-r--r--test/CodeGen/AMDGPU/loop-address.ll2
-rw-r--r--test/CodeGen/AMDGPU/loop-idiom.ll4
-rw-r--r--test/CodeGen/AMDGPU/loop_break.ll265
-rw-r--r--test/CodeGen/AMDGPU/lower-mem-intrinsics.ll117
-rw-r--r--test/CodeGen/AMDGPU/lower-range-metadata-intrinsic-call.ll14
-rw-r--r--test/CodeGen/AMDGPU/lshl.ll15
-rw-r--r--test/CodeGen/AMDGPU/lshr.ll15
-rw-r--r--test/CodeGen/AMDGPU/lshr.v2i16.ll149
-rw-r--r--test/CodeGen/AMDGPU/mad-combine.ll28
-rw-r--r--test/CodeGen/AMDGPU/mad24-get-global-id.ll2
-rw-r--r--test/CodeGen/AMDGPU/mad_int24.ll2
-rw-r--r--test/CodeGen/AMDGPU/mad_uint24.ll8
-rw-r--r--test/CodeGen/AMDGPU/madak.ll20
-rw-r--r--test/CodeGen/AMDGPU/madmk.ll20
-rw-r--r--test/CodeGen/AMDGPU/max.i16.ll90
-rw-r--r--test/CodeGen/AMDGPU/max.ll46
-rw-r--r--test/CodeGen/AMDGPU/max3.ll4
-rw-r--r--test/CodeGen/AMDGPU/mem-builtins.ll12
-rw-r--r--test/CodeGen/AMDGPU/merge-stores.ll100
-rw-r--r--test/CodeGen/AMDGPU/min.ll303
-rw-r--r--test/CodeGen/AMDGPU/min3.ll8
-rw-r--r--test/CodeGen/AMDGPU/missing-store.ll2
-rw-r--r--test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll2
-rw-r--r--test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll4
-rw-r--r--test/CodeGen/AMDGPU/mubuf.ll26
-rw-r--r--test/CodeGen/AMDGPU/mul.ll62
-rw-r--r--test/CodeGen/AMDGPU/mul_int24.ll14
-rw-r--r--test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll26
-rw-r--r--test/CodeGen/AMDGPU/mul_uint24-r600.ll12
-rw-r--r--test/CodeGen/AMDGPU/mulhu.ll17
-rw-r--r--test/CodeGen/AMDGPU/multi-divergent-exit-region.ll710
-rw-r--r--test/CodeGen/AMDGPU/multilevel-break.ll4
-rw-r--r--test/CodeGen/AMDGPU/nested-loop-conditions.ll269
-rw-r--r--test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll4
-rw-r--r--test/CodeGen/AMDGPU/no-shrink-extloads.ll36
-rw-r--r--test/CodeGen/AMDGPU/nop-data.ll87
-rw-r--r--test/CodeGen/AMDGPU/nullptr.ll113
-rw-r--r--test/CodeGen/AMDGPU/omod.ll297
-rw-r--r--test/CodeGen/AMDGPU/opencl-image-metadata.ll2
-rw-r--r--test/CodeGen/AMDGPU/operand-folding.ll14
-rw-r--r--test/CodeGen/AMDGPU/operand-spacing.ll2
-rw-r--r--test/CodeGen/AMDGPU/optimize-if-exec-masking.mir20
-rw-r--r--test/CodeGen/AMDGPU/or.ll44
-rw-r--r--test/CodeGen/AMDGPU/over-max-lds-size.ll2
-rw-r--r--test/CodeGen/AMDGPU/pack.v2f16.ll219
-rw-r--r--test/CodeGen/AMDGPU/pack.v2i16.ll181
-rw-r--r--test/CodeGen/AMDGPU/packetizer.ll2
-rw-r--r--test/CodeGen/AMDGPU/parallelandifcollapse.ll2
-rw-r--r--test/CodeGen/AMDGPU/parallelorifcollapse.ll2
-rw-r--r--test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll638
-rw-r--r--test/CodeGen/AMDGPU/partially-dead-super-register-immediate.ll2
-rw-r--r--test/CodeGen/AMDGPU/predicates.ll8
-rw-r--r--test/CodeGen/AMDGPU/private-access-no-objects.ll8
-rw-r--r--test/CodeGen/AMDGPU/private-element-size.ll102
-rw-r--r--test/CodeGen/AMDGPU/private-memory-atomics.ll4
-rw-r--r--test/CodeGen/AMDGPU/private-memory-broken.ll2
-rw-r--r--test/CodeGen/AMDGPU/private-memory-r600.ll37
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll4
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-bitcast-function.ll4
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-globals.ll4
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll2
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-lifetime.ll10
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll14
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-no-opts.ll4
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-padding-size-estimate.ll6
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll10
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll8
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll14
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll16
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll2
-rw-r--r--test/CodeGen/AMDGPU/promote-alloca-volatile.ll10
-rw-r--r--test/CodeGen/AMDGPU/pv.ll458
-rw-r--r--test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll4
-rw-r--r--test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll2
-rw-r--r--test/CodeGen/AMDGPU/r600.alu-limits.ll29
-rw-r--r--test/CodeGen/AMDGPU/r600.amdgpu-alias-analysis.ll7
-rw-r--r--test/CodeGen/AMDGPU/r600.bitcast.ll16
-rw-r--r--test/CodeGen/AMDGPU/r600.global_atomics.ll542
-rw-r--r--test/CodeGen/AMDGPU/r600.private-memory.ll2
-rw-r--r--test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll16
-rw-r--r--test/CodeGen/AMDGPU/rcp-pattern.ll47
-rw-r--r--test/CodeGen/AMDGPU/read-register-invalid-subtarget.ll2
-rw-r--r--test/CodeGen/AMDGPU/read-register-invalid-type-i32.ll2
-rw-r--r--test/CodeGen/AMDGPU/read-register-invalid-type-i64.ll2
-rw-r--r--test/CodeGen/AMDGPU/read_register.ll14
-rw-r--r--test/CodeGen/AMDGPU/readcyclecounter.ll2
-rw-r--r--test/CodeGen/AMDGPU/reduce-load-width-alignment.ll6
-rw-r--r--test/CodeGen/AMDGPU/reduce-store-width-alignment.ll10
-rw-r--r--test/CodeGen/AMDGPU/reg-coalescer-sched-crash.ll2
-rw-r--r--test/CodeGen/AMDGPU/regcoalesce-dbg.mir76
-rw-r--r--test/CodeGen/AMDGPU/register-count-comments.ll4
-rw-r--r--test/CodeGen/AMDGPU/rename-disconnected-bug.ll2
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs.mir4
-rw-r--r--test/CodeGen/AMDGPU/reorder-stores.ll8
-rw-r--r--test/CodeGen/AMDGPU/ret.ll207
-rw-r--r--test/CodeGen/AMDGPU/ret_jump.ll108
-rw-r--r--test/CodeGen/AMDGPU/rotl.i64.ll4
-rw-r--r--test/CodeGen/AMDGPU/rotl.ll6
-rw-r--r--test/CodeGen/AMDGPU/rotr.i64.ll8
-rw-r--r--test/CodeGen/AMDGPU/rotr.ll6
-rw-r--r--test/CodeGen/AMDGPU/rsq.ll16
-rw-r--r--test/CodeGen/AMDGPU/runtime-metadata.ll396
-rw-r--r--test/CodeGen/AMDGPU/s_addk_i32.ll29
-rw-r--r--test/CodeGen/AMDGPU/s_movk_i32.ll26
-rw-r--r--test/CodeGen/AMDGPU/s_mulk_i32.ll10
-rw-r--r--test/CodeGen/AMDGPU/sad.ll34
-rw-r--r--test/CodeGen/AMDGPU/saddo.ll10
-rw-r--r--test/CodeGen/AMDGPU/salu-to-valu.ll40
-rw-r--r--test/CodeGen/AMDGPU/sampler-resource-id.ll6
-rw-r--r--test/CodeGen/AMDGPU/scalar-store-cache-flush.mir14
-rw-r--r--test/CodeGen/AMDGPU/scalar_to_vector.ll51
-rw-r--r--test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll130
-rw-r--r--test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll137
-rw-r--r--test/CodeGen/AMDGPU/schedule-fs-loop.ll121
-rw-r--r--test/CodeGen/AMDGPU/schedule-global-loads.ll4
-rw-r--r--test/CodeGen/AMDGPU/schedule-if-2.ll2
-rw-r--r--test/CodeGen/AMDGPU/schedule-if.ll2
-rw-r--r--test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll4
-rw-r--r--test/CodeGen/AMDGPU/schedule-regpressure-limit.ll591
-rw-r--r--test/CodeGen/AMDGPU/schedule-regpressure-limit2.ll288
-rw-r--r--test/CodeGen/AMDGPU/schedule-regpressure.mir57
-rw-r--r--test/CodeGen/AMDGPU/scratch-buffer.ll16
-rw-r--r--test/CodeGen/AMDGPU/sdiv.ll30
-rw-r--r--test/CodeGen/AMDGPU/sdivrem24.ll34
-rw-r--r--test/CodeGen/AMDGPU/sdivrem64.ll12
-rw-r--r--test/CodeGen/AMDGPU/sdwa-peephole.ll395
-rw-r--r--test/CodeGen/AMDGPU/select-fabs-fneg-extract-legacy.ll4
-rw-r--r--test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll92
-rw-r--r--test/CodeGen/AMDGPU/select-i1.ll4
-rw-r--r--test/CodeGen/AMDGPU/select-opt.ll18
-rw-r--r--test/CodeGen/AMDGPU/select-vectors.ll26
-rw-r--r--test/CodeGen/AMDGPU/select.f16.ll118
-rw-r--r--test/CodeGen/AMDGPU/select.ll2
-rw-r--r--test/CodeGen/AMDGPU/select64.ll10
-rw-r--r--test/CodeGen/AMDGPU/selectcc-cnd.ll2
-rw-r--r--test/CodeGen/AMDGPU/selectcc-cnde-int.ll2
-rw-r--r--test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll2
-rw-r--r--test/CodeGen/AMDGPU/selectcc-opt.ll8
-rw-r--r--test/CodeGen/AMDGPU/selectcc.ll2
-rw-r--r--test/CodeGen/AMDGPU/selected-stack-object.ll2
-rw-r--r--test/CodeGen/AMDGPU/set-dx10.ll24
-rw-r--r--test/CodeGen/AMDGPU/setcc-equivalent.ll4
-rw-r--r--test/CodeGen/AMDGPU/setcc-fneg-constant.ll258
-rw-r--r--test/CodeGen/AMDGPU/setcc-opt.ll40
-rw-r--r--test/CodeGen/AMDGPU/setcc.ll60
-rw-r--r--test/CodeGen/AMDGPU/setcc64.ll48
-rw-r--r--test/CodeGen/AMDGPU/seto.ll7
-rw-r--r--test/CodeGen/AMDGPU/setuo.ll7
-rw-r--r--test/CodeGen/AMDGPU/sext-eliminate.ll4
-rw-r--r--test/CodeGen/AMDGPU/sext-in-reg-failure-r600.ll2
-rw-r--r--test/CodeGen/AMDGPU/sext-in-reg.ll353
-rw-r--r--test/CodeGen/AMDGPU/sgpr-control-flow.ll8
-rw-r--r--test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll2
-rw-r--r--test/CodeGen/AMDGPU/sgpr-copy.ll205
-rw-r--r--test/CodeGen/AMDGPU/sgprcopies.ll58
-rw-r--r--test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll16
-rw-r--r--test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll60
-rw-r--r--test/CodeGen/AMDGPU/shift-i64-opts.ll40
-rw-r--r--test/CodeGen/AMDGPU/shl.ll82
-rw-r--r--test/CodeGen/AMDGPU/shl.v2i16.ll152
-rw-r--r--test/CodeGen/AMDGPU/shl_add_constant.ll10
-rw-r--r--test/CodeGen/AMDGPU/shl_add_ptr.ll36
-rw-r--r--test/CodeGen/AMDGPU/shrink-add-sub-constant.ll186
-rw-r--r--test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir12
-rw-r--r--test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll47
-rw-r--r--test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll40
-rw-r--r--test/CodeGen/AMDGPU/si-annotate-cf.ll8
-rw-r--r--test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll2
-rw-r--r--test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir2
-rw-r--r--test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll2
-rw-r--r--test/CodeGen/AMDGPU/si-literal-folding.ll14
-rw-r--r--test/CodeGen/AMDGPU/si-lod-bias.ll49
-rw-r--r--test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll58
-rw-r--r--test/CodeGen/AMDGPU/si-scheduler.ll49
-rw-r--r--test/CodeGen/AMDGPU/si-sgpr-spill.ll843
-rw-r--r--test/CodeGen/AMDGPU/si-spill-cf.ll732
-rw-r--r--test/CodeGen/AMDGPU/si-spill-sgpr-stack.ll8
-rw-r--r--test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll39
-rw-r--r--test/CodeGen/AMDGPU/si-vector-hang.ll2
-rw-r--r--test/CodeGen/AMDGPU/sign_extend.ll26
-rw-r--r--test/CodeGen/AMDGPU/sint_to_fp.f64.ll10
-rw-r--r--test/CodeGen/AMDGPU/sint_to_fp.i64.ll16
-rw-r--r--test/CodeGen/AMDGPU/sint_to_fp.ll16
-rw-r--r--test/CodeGen/AMDGPU/sitofp.f16.ll60
-rw-r--r--test/CodeGen/AMDGPU/skip-if-dead.ll9
-rw-r--r--test/CodeGen/AMDGPU/smed3.ll108
-rw-r--r--test/CodeGen/AMDGPU/sminmax.ll22
-rw-r--r--test/CodeGen/AMDGPU/sminmax.v2i16.ll224
-rw-r--r--test/CodeGen/AMDGPU/smrd-vccz-bug.ll4
-rw-r--r--test/CodeGen/AMDGPU/smrd.ll114
-rw-r--r--test/CodeGen/AMDGPU/sopk-compares.ll76
-rw-r--r--test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll2
-rw-r--r--test/CodeGen/AMDGPU/spill-cfg-position.ll78
-rw-r--r--test/CodeGen/AMDGPU/spill-m0.ll55
-rw-r--r--test/CodeGen/AMDGPU/spill-scavenge-offset.ll2
-rw-r--r--test/CodeGen/AMDGPU/spill-wide-sgpr.ll16
-rw-r--r--test/CodeGen/AMDGPU/split-scalar-i64-add.ll10
-rw-r--r--test/CodeGen/AMDGPU/split-smrd.ll29
-rw-r--r--test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll4
-rw-r--r--test/CodeGen/AMDGPU/splitkit.mir105
-rw-r--r--test/CodeGen/AMDGPU/sra.ll38
-rw-r--r--test/CodeGen/AMDGPU/srem.ll26
-rw-r--r--test/CodeGen/AMDGPU/srl.ll16
-rw-r--r--test/CodeGen/AMDGPU/ssubo.ll10
-rw-r--r--test/CodeGen/AMDGPU/store-barrier.ll2
-rw-r--r--test/CodeGen/AMDGPU/store-global.ll46
-rw-r--r--test/CodeGen/AMDGPU/store-local.ll24
-rw-r--r--test/CodeGen/AMDGPU/store-private.ll48
-rw-r--r--test/CodeGen/AMDGPU/store-v3i64.ll16
-rw-r--r--test/CodeGen/AMDGPU/store-vector-ptrs.ll2
-rw-r--r--test/CodeGen/AMDGPU/store_typed.ll4
-rw-r--r--test/CodeGen/AMDGPU/structurize.ll2
-rw-r--r--test/CodeGen/AMDGPU/structurize1.ll2
-rw-r--r--test/CodeGen/AMDGPU/sub.i16.ll22
-rw-r--r--test/CodeGen/AMDGPU/sub.ll20
-rw-r--r--test/CodeGen/AMDGPU/sub.v2i16.ll278
-rw-r--r--test/CodeGen/AMDGPU/subreg-coalescer-crash.ll50
-rw-r--r--test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll2
-rw-r--r--test/CodeGen/AMDGPU/subreg-eliminate-dead.ll2
-rw-r--r--test/CodeGen/AMDGPU/subreg-intervals.mir4
-rw-r--r--test/CodeGen/AMDGPU/subreg_interference.mir24
-rw-r--r--test/CodeGen/AMDGPU/target-cpu.ll12
-rw-r--r--test/CodeGen/AMDGPU/trap.ll78
-rw-r--r--test/CodeGen/AMDGPU/trunc-bitcast-vector.ll16
-rw-r--r--test/CodeGen/AMDGPU/trunc-cmp-constant.ll26
-rw-r--r--test/CodeGen/AMDGPU/trunc-store-f64-to-f16.ll12
-rw-r--r--test/CodeGen/AMDGPU/trunc-store-i1.ll8
-rw-r--r--test/CodeGen/AMDGPU/trunc-store.ll4
-rw-r--r--test/CodeGen/AMDGPU/trunc-vector-store-assertion-failure.ll2
-rw-r--r--test/CodeGen/AMDGPU/trunc.ll30
-rw-r--r--test/CodeGen/AMDGPU/tti-unroll-prefs.ll2
-rw-r--r--test/CodeGen/AMDGPU/uaddo.ll119
-rw-r--r--test/CodeGen/AMDGPU/udiv.ll72
-rw-r--r--test/CodeGen/AMDGPU/udivrem.ll6
-rw-r--r--test/CodeGen/AMDGPU/udivrem24.ll34
-rw-r--r--test/CodeGen/AMDGPU/udivrem64.ll12
-rw-r--r--test/CodeGen/AMDGPU/uint_to_fp.f64.ll18
-rw-r--r--test/CodeGen/AMDGPU/uint_to_fp.i64.ll16
-rw-r--r--test/CodeGen/AMDGPU/uint_to_fp.ll18
-rw-r--r--test/CodeGen/AMDGPU/uitofp.f16.ll56
-rw-r--r--test/CodeGen/AMDGPU/umed3.ll113
-rw-r--r--test/CodeGen/AMDGPU/unaligned-load-store.ll62
-rw-r--r--test/CodeGen/AMDGPU/undefined-subreg-liverange.ll12
-rw-r--r--test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll6
-rw-r--r--test/CodeGen/AMDGPU/uniform-branch-intrinsic-cond.ll1
-rw-r--r--test/CodeGen/AMDGPU/uniform-cfg.ll68
-rw-r--r--test/CodeGen/AMDGPU/uniform-crash.ll4
-rw-r--r--test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll6
-rw-r--r--test/CodeGen/AMDGPU/unify-metadata.ll4
-rw-r--r--test/CodeGen/AMDGPU/unigine-liveness-crash.ll55
-rw-r--r--test/CodeGen/AMDGPU/unknown-processor.ll2
-rw-r--r--test/CodeGen/AMDGPU/unroll.ll68
-rw-r--r--test/CodeGen/AMDGPU/unsupported-cc.ll20
-rw-r--r--test/CodeGen/AMDGPU/urecip.ll13
-rw-r--r--test/CodeGen/AMDGPU/urem.ll14
-rw-r--r--test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll36
-rw-r--r--test/CodeGen/AMDGPU/usubo.ll114
-rw-r--r--test/CodeGen/AMDGPU/v1i64-kernel-arg.ll4
-rw-r--r--test/CodeGen/AMDGPU/v_cndmask.ll52
-rw-r--r--test/CodeGen/AMDGPU/v_cvt_pk_u8_f32.ll12
-rw-r--r--test/CodeGen/AMDGPU/v_mac.ll49
-rw-r--r--test/CodeGen/AMDGPU/v_mac_f16.ll317
-rw-r--r--test/CodeGen/AMDGPU/v_madak_f16.ll8
-rw-r--r--test/CodeGen/AMDGPU/valu-i1.ll97
-rw-r--r--test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir4
-rw-r--r--test/CodeGen/AMDGPU/vector-alloca.ll10
-rw-r--r--test/CodeGen/AMDGPU/vector-extract-insert.ll8
-rw-r--r--test/CodeGen/AMDGPU/vectorize-global-local.ll80
-rw-r--r--test/CodeGen/AMDGPU/vertex-fetch-encoding.ll8
-rw-r--r--test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll10
-rw-r--r--test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll105
-rw-r--r--test/CodeGen/AMDGPU/vi-removed-intrinsics.ll6
-rw-r--r--test/CodeGen/AMDGPU/vop-shrink.ll4
-rw-r--r--test/CodeGen/AMDGPU/vselect.ll8
-rw-r--r--test/CodeGen/AMDGPU/vselect64.ll2
-rw-r--r--test/CodeGen/AMDGPU/vtx-fetch-branch.ll2
-rw-r--r--test/CodeGen/AMDGPU/vtx-schedule.ll2
-rw-r--r--test/CodeGen/AMDGPU/wait.ll72
-rw-r--r--test/CodeGen/AMDGPU/waitcnt-flat.ll2
-rw-r--r--test/CodeGen/AMDGPU/waitcnt.mir75
-rw-r--r--test/CodeGen/AMDGPU/wqm.ll120
-rw-r--r--test/CodeGen/AMDGPU/write-register-vgpr-into-sgpr.ll2
-rw-r--r--test/CodeGen/AMDGPU/write_register.ll14
-rw-r--r--test/CodeGen/AMDGPU/wrong-transalu-pos-fix.ll2
-rw-r--r--test/CodeGen/AMDGPU/xfail.r600.bitcast.ll6
-rw-r--r--test/CodeGen/AMDGPU/xor.ll38
-rw-r--r--test/CodeGen/AMDGPU/zero_extend.ll10
-rw-r--r--test/CodeGen/AMDGPU/zext-i64-bit-operand.ll4
-rw-r--r--test/CodeGen/AMDGPU/zext-lid.ll83
-rw-r--r--test/CodeGen/ARM/2007-05-22-tailmerge-3.ll8
-rw-r--r--test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll1
-rw-r--r--test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll5
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir406
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll567
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll51
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel.ll144
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir282
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir233
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir280
-rw-r--r--test/CodeGen/ARM/alloc-no-stack-realign.ll100
-rw-r--r--test/CodeGen/ARM/arg-copy-elide.ll61
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll58
-rw-r--r--test/CodeGen/ARM/arm-position-independence.ll144
-rw-r--r--test/CodeGen/ARM/atomic-cmpxchg.ll14
-rw-r--r--test/CodeGen/ARM/atomic-op.ll4
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll35
-rw-r--r--test/CodeGen/ARM/bfi.ll12
-rw-r--r--test/CodeGen/ARM/bic.ll13
-rw-r--r--test/CodeGen/ARM/bool-ext-inc.ll32
-rw-r--r--test/CodeGen/ARM/build-attributes.ll77
-rw-r--r--test/CodeGen/ARM/cmp1-peephole-thumb.mir78
-rw-r--r--test/CodeGen/ARM/cmp2-peephole-thumb.mir108
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll8
-rw-r--r--test/CodeGen/ARM/constantpool-promote.ll62
-rw-r--r--test/CodeGen/ARM/debug-info-s16-reg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-sreg2.ll2
-rw-r--r--test/CodeGen/ARM/div.ll9
-rw-r--r--test/CodeGen/ARM/fast-isel-align.ll4
-rw-r--r--test/CodeGen/ARM/fast-isel-cmp-imm.ll16
-rw-r--r--test/CodeGen/ARM/fold-stack-adjust.ll2
-rw-r--r--test/CodeGen/ARM/fp-only-sp.ll62
-rw-r--r--test/CodeGen/ARM/fp16-promote.ll32
-rw-r--r--test/CodeGen/ARM/fp16-v3.ll2
-rw-r--r--test/CodeGen/ARM/fpcmp-opt.ll8
-rw-r--r--test/CodeGen/ARM/fpcmp.ll4
-rw-r--r--test/CodeGen/ARM/fpcmp_ueq.ll2
-rw-r--r--test/CodeGen/ARM/fpscr-intrinsics.ll44
-rw-r--r--test/CodeGen/ARM/gpr-paired-spill.ll18
-rw-r--r--test/CodeGen/ARM/ifcvt10.ll2
-rw-r--r--test/CodeGen/ARM/illegal-bitfield-loadstore.ll184
-rw-r--r--test/CodeGen/ARM/indirectbr.ll1
-rw-r--r--test/CodeGen/ARM/interval-update-remat.ll4
-rw-r--r--test/CodeGen/ARM/intrinsics-coprocessor.ll1
-rw-r--r--test/CodeGen/ARM/ldm-stm-i256.ll38
-rw-r--r--test/CodeGen/ARM/ldrd.ll28
-rw-r--r--test/CodeGen/ARM/load-combine-big-endian.ll779
-rw-r--r--test/CodeGen/ARM/load-combine.ll692
-rw-r--r--test/CodeGen/ARM/longMAC.ll262
-rw-r--r--test/CodeGen/ARM/lowerMUL-newload.ll115
-rw-r--r--test/CodeGen/ARM/mature-mc-support.ll2
-rw-r--r--test/CodeGen/ARM/misched-fp-basic.ll69
-rw-r--r--test/CodeGen/ARM/misched-int-basic-thumb2.mir175
-rw-r--r--test/CodeGen/ARM/misched-int-basic.mir128
-rw-r--r--test/CodeGen/ARM/movt.ll8
-rw-r--r--test/CodeGen/ARM/msr-it-block.ll8
-rw-r--r--test/CodeGen/ARM/neon_vabs.ll95
-rw-r--r--test/CodeGen/ARM/no-cmov2bfi.ll19
-rw-r--r--test/CodeGen/ARM/phi.ll1
-rw-r--r--test/CodeGen/ARM/pr32545.ll22
-rw-r--r--test/CodeGen/ARM/prera-ldst-aliasing.mir40
-rw-r--r--test/CodeGen/ARM/prera-ldst-insertpt.mir105
-rw-r--r--test/CodeGen/ARM/rbit.ll3
-rw-r--r--test/CodeGen/ARM/rev.ll14
-rw-r--r--test/CodeGen/ARM/select_const.ll326
-rw-r--r--test/CodeGen/ARM/select_xform.ll12
-rw-r--r--test/CodeGen/ARM/setcc-logic.ll74
-rw-r--r--test/CodeGen/ARM/setcc-sentinals.ll14
-rw-r--r--test/CodeGen/ARM/single-issue-r52.mir86
-rw-r--r--test/CodeGen/ARM/sjljeh-swifterror.ll27
-rw-r--r--test/CodeGen/ARM/smml.ll43
-rw-r--r--test/CodeGen/ARM/smul.ll29
-rw-r--r--test/CodeGen/ARM/softfp-fabs-fneg.ll3
-rw-r--r--test/CodeGen/ARM/special-reg-mcore.ll82
-rw-r--r--test/CodeGen/ARM/special-reg-v8m-main.ll8
-rw-r--r--test/CodeGen/ARM/stack_guard_remat.ll8
-rw-r--r--test/CodeGen/ARM/static-addr-hoisting.ll6
-rw-r--r--test/CodeGen/ARM/tail-opts.ll52
-rw-r--r--test/CodeGen/ARM/thumb1-div.ll67
-rw-r--r--test/CodeGen/ARM/unschedule-first-call.ll136
-rw-r--r--test/CodeGen/ARM/v6-jumptable-clobber.mir384
-rw-r--r--test/CodeGen/ARM/v8m-tail-call.ll23
-rw-r--r--test/CodeGen/ARM/v8m.base-jumptable_alignment.ll51
-rw-r--r--test/CodeGen/ARM/va_arg.ll8
-rw-r--r--test/CodeGen/ARM/vcmp-crash.ll11
-rw-r--r--test/CodeGen/ARM/vldm-liveness.ll19
-rw-r--r--test/CodeGen/ARM/vldm-liveness.mir40
-rw-r--r--test/CodeGen/ARM/vsel.ll8
-rw-r--r--test/CodeGen/ARM/vuzp.ll22
-rw-r--r--test/CodeGen/AVR/inline-asm/inline-asm.ll2
-rw-r--r--test/CodeGen/AVR/inline-asm/inline-asm2.ll2
-rw-r--r--test/CodeGen/AVR/inline-asm/multibyte.ll2
-rw-r--r--test/CodeGen/AVR/intrinsics/stacksave-restore.ll27
-rw-r--r--test/CodeGen/AVR/no-print-operand-twice.ll8
-rw-r--r--test/CodeGen/AVR/pseudo/ADCWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ADDWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ANDIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ANDWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ASRWRd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/COMWRd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/CPCWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/CPWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/EORWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/FRMIDX.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/INWRdA.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LDDWRdPtrQ.mir5
-rw-r--r--test/CodeGen/AVR/pseudo/LDDWRdYQ.mir5
-rw-r--r--test/CodeGen/AVR/pseudo/LDIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LDSWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LDWRdPtr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LDWRdPtrPd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LDWRdPtrPi.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LSLWRd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/LSRWRd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ORIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ORWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/OUTWARr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/POPWRd.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/PUSHWRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SBCIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SBCWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SEXT.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/STDWPtrQRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/STSWKRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/STWPtrPdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/STWPtrPiRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/STWPtrRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SUBIWRdK.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/SUBWRdRr.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/ZEXT.mir2
-rw-r--r--test/CodeGen/AVR/pseudo/expand-lddw-dst-src-same.mir3
-rw-r--r--test/CodeGen/AVR/relax-mem/STDWPtrQRr.mir2
-rw-r--r--test/CodeGen/BPF/cc_args.ll2
-rw-r--r--test/CodeGen/BPF/cc_args_be.ll2
-rw-r--r--test/CodeGen/BPF/cc_ret.ll2
-rw-r--r--test/CodeGen/BPF/fi_ri.ll2
-rw-r--r--test/CodeGen/BPF/intrinsics.ll4
-rw-r--r--test/CodeGen/BPF/mem_offset.ll17
-rw-r--r--test/CodeGen/BPF/objdump_intrinsics.ll4
-rw-r--r--test/CodeGen/BPF/objdump_trivial.ll11
-rw-r--r--test/CodeGen/BPF/sanity.ll2
-rw-r--r--test/CodeGen/BPF/undef.ll67
-rw-r--r--test/CodeGen/BPF/warn-call.ll69
-rw-r--r--test/CodeGen/BPF/warn-stack.ll76
-rw-r--r--test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll3
-rw-r--r--test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll3
-rw-r--r--test/CodeGen/Generic/2007-12-17-InvokeAsm.ll2
-rw-r--r--test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll4
-rw-r--r--test/CodeGen/Generic/MachineBranchProb.ll6
-rw-r--r--test/CodeGen/Generic/externally_available.ll2
-rw-r--r--test/CodeGen/Generic/icmp-illegal.ll1
-rw-r--r--test/CodeGen/Generic/inline-asm-mem-clobber.ll3
-rw-r--r--test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll1
-rw-r--r--test/CodeGen/Generic/overloaded-intrinsic-name.ll35
-rw-r--r--test/CodeGen/Generic/pr24662.ll12
-rw-r--r--test/CodeGen/Generic/select-cc.ll6
-rw-r--r--test/CodeGen/Generic/v-split.ll4
-rw-r--r--test/CodeGen/Generic/vector-redux.ll3
-rw-r--r--test/CodeGen/Generic/vector.ll3
-rw-r--r--test/CodeGen/Hexagon/BranchPredict.ll6
-rw-r--r--test/CodeGen/Hexagon/adde.ll55
-rw-r--r--test/CodeGen/Hexagon/addh-sext-trunc.ll2
-rw-r--r--test/CodeGen/Hexagon/addh-shifted.ll2
-rw-r--r--test/CodeGen/Hexagon/addh.ll2
-rw-r--r--test/CodeGen/Hexagon/alu64.ll132
-rw-r--r--test/CodeGen/Hexagon/args.ll8
-rw-r--r--test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll1
-rw-r--r--test/CodeGen/Hexagon/bit-bitsplit-at.ll33
-rw-r--r--test/CodeGen/Hexagon/bit-bitsplit-src.ll35
-rw-r--r--test/CodeGen/Hexagon/bit-bitsplit.ll17
-rw-r--r--test/CodeGen/Hexagon/bit-eval.ll2
-rw-r--r--test/CodeGen/Hexagon/bit-ext-sat.ll57
-rw-r--r--test/CodeGen/Hexagon/bit-extract-off.ll23
-rw-r--r--test/CodeGen/Hexagon/bit-extract.ll75
-rw-r--r--test/CodeGen/Hexagon/bit-has.ll64
-rw-r--r--test/CodeGen/Hexagon/bit-phi.ll1
-rw-r--r--test/CodeGen/Hexagon/bit-rie.ll4
-rw-r--r--test/CodeGen/Hexagon/bit-skip-byval.ll2
-rw-r--r--test/CodeGen/Hexagon/bit-validate-reg.ll5
-rw-r--r--test/CodeGen/Hexagon/bitmanip.ll135
-rw-r--r--test/CodeGen/Hexagon/block-addr.ll2
-rw-r--r--test/CodeGen/Hexagon/branchfolder-keep-impdef.ll2
-rw-r--r--test/CodeGen/Hexagon/brev_ld.ll12
-rw-r--r--test/CodeGen/Hexagon/brev_st.ll10
-rw-r--r--test/CodeGen/Hexagon/builtin-expect.ll44
-rw-r--r--test/CodeGen/Hexagon/cext-valid-packet1.ll4
-rw-r--r--test/CodeGen/Hexagon/circ_ld.ll12
-rw-r--r--test/CodeGen/Hexagon/circ_ldw.ll2
-rw-r--r--test/CodeGen/Hexagon/circ_st.ll10
-rw-r--r--test/CodeGen/Hexagon/clr_set_toggle.ll30
-rw-r--r--test/CodeGen/Hexagon/cmp.ll22
-rw-r--r--test/CodeGen/Hexagon/combine.ll2
-rw-r--r--test/CodeGen/Hexagon/compound.ll4
-rw-r--r--test/CodeGen/Hexagon/constp-combine-neg.ll6
-rw-r--r--test/CodeGen/Hexagon/convert-to-dot-old.ll110
-rw-r--r--test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll36
-rw-r--r--test/CodeGen/Hexagon/dead-store-stack.ll2
-rw-r--r--test/CodeGen/Hexagon/early-if-merge-loop.ll91
-rw-r--r--test/CodeGen/Hexagon/early-if-phi-i1.ll2
-rw-r--r--test/CodeGen/Hexagon/early-if-vecpred.ll37
-rw-r--r--test/CodeGen/Hexagon/eh_return.ll2
-rw-r--r--test/CodeGen/Hexagon/eliminate-pred-spill.ll5
-rw-r--r--test/CodeGen/Hexagon/expand-condsets-dead-bad.ll54
-rw-r--r--test/CodeGen/Hexagon/expand-condsets-dead-pred.ll45
-rw-r--r--test/CodeGen/Hexagon/expand-condsets-rm-reg.mir2
-rw-r--r--test/CodeGen/Hexagon/expand-vstorerw-undef2.ll216
-rw-r--r--test/CodeGen/Hexagon/extload-combine.ll18
-rw-r--r--test/CodeGen/Hexagon/extract-basic.ll6
-rw-r--r--test/CodeGen/Hexagon/fadd.ll2
-rw-r--r--test/CodeGen/Hexagon/find-loop-instr.ll79
-rw-r--r--test/CodeGen/Hexagon/float-amode.ll14
-rw-r--r--test/CodeGen/Hexagon/fmul.ll2
-rw-r--r--test/CodeGen/Hexagon/fsel.ll4
-rw-r--r--test/CodeGen/Hexagon/fsub.ll2
-rw-r--r--test/CodeGen/Hexagon/fusedandshift.ll4
-rw-r--r--test/CodeGen/Hexagon/gp-rel.ll4
-rw-r--r--test/CodeGen/Hexagon/hwloop-cleanup.ll6
-rw-r--r--test/CodeGen/Hexagon/hwloop-loop1.ll16
-rw-r--r--test/CodeGen/Hexagon/hwloop1.ll16
-rw-r--r--test/CodeGen/Hexagon/hwloop2.ll2
-rw-r--r--test/CodeGen/Hexagon/hwloop4.ll6
-rw-r--r--test/CodeGen/Hexagon/hwloop5.ll4
-rw-r--r--test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll4
-rw-r--r--test/CodeGen/Hexagon/ifcvt-simple-bprob.ll36
-rw-r--r--test/CodeGen/Hexagon/inline-asm-vecpred128.ll15
-rw-r--r--test/CodeGen/Hexagon/insert-basic.ll8
-rw-r--r--test/CodeGen/Hexagon/insert4.ll4
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_alu.ll38
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_perm.ll24
-rw-r--r--test/CodeGen/Hexagon/intrinsics/byte-store-double.ll41
-rw-r--r--test/CodeGen/Hexagon/intrinsics/byte-store.ll41
-rw-r--r--test/CodeGen/Hexagon/intrinsics/cr.ll30
-rw-r--r--test/CodeGen/Hexagon/intrinsics/system_user.ll2
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_alu.ll254
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_bit.ll58
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_complex.ll94
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_fp.ll44
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll430
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_perm.ll16
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_pred.ll94
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_shift.ll202
-rw-r--r--test/CodeGen/Hexagon/isel-exti1.ll22
-rw-r--r--test/CodeGen/Hexagon/isel-i1arg-crash.ll6
-rw-r--r--test/CodeGen/Hexagon/isel-op-zext-i1.ll13
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll36
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll36
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/lcssa.ll46
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll24
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll83
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll84
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/pmpy.ll33
-rw-r--r--test/CodeGen/Hexagon/memops-stack.ll36
-rw-r--r--test/CodeGen/Hexagon/newvalueSameReg.ll4
-rw-r--r--test/CodeGen/Hexagon/newvaluejump.ll2
-rw-r--r--test/CodeGen/Hexagon/newvaluejump2.ll2
-rw-r--r--test/CodeGen/Hexagon/newvaluejump3.ll79
-rw-r--r--test/CodeGen/Hexagon/opt-addr-mode.ll4
-rw-r--r--test/CodeGen/Hexagon/opt-fabs.ll2
-rw-r--r--test/CodeGen/Hexagon/opt-fneg.ll6
-rw-r--r--test/CodeGen/Hexagon/opt-spill-volatile.ll10
-rw-r--r--test/CodeGen/Hexagon/pic-local.ll4
-rw-r--r--test/CodeGen/Hexagon/pic-simple.ll6
-rw-r--r--test/CodeGen/Hexagon/pic-static.ll6
-rw-r--r--test/CodeGen/Hexagon/pred-absolute-store.ll4
-rw-r--r--test/CodeGen/Hexagon/predicate-logical.ll2
-rw-r--r--test/CodeGen/Hexagon/predicate-rcmp.ll2
-rw-r--r--test/CodeGen/Hexagon/rdf-copy-undef2.ll4
-rw-r--r--test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll8
-rw-r--r--test/CodeGen/Hexagon/rdf-phi-up.ll8
-rw-r--r--test/CodeGen/Hexagon/readcyclecounter.ll10
-rw-r--r--test/CodeGen/Hexagon/regalloc-block-overlap.ll143
-rw-r--r--test/CodeGen/Hexagon/ret-struct-by-val.ll2
-rw-r--r--test/CodeGen/Hexagon/runtime-stkchk.ll12
-rw-r--r--test/CodeGen/Hexagon/section_7275.ll10
-rw-r--r--test/CodeGen/Hexagon/signed_immediates.ll6
-rw-r--r--test/CodeGen/Hexagon/stack-align1.ll6
-rw-r--r--test/CodeGen/Hexagon/stack-align2.ll10
-rw-r--r--test/CodeGen/Hexagon/stack-alloca1.ll2
-rw-r--r--test/CodeGen/Hexagon/stack-alloca2.ll6
-rw-r--r--test/CodeGen/Hexagon/static.ll6
-rw-r--r--test/CodeGen/Hexagon/store-shift.ll12
-rw-r--r--test/CodeGen/Hexagon/sube.ll49
-rw-r--r--test/CodeGen/Hexagon/subi-asl.ll6
-rw-r--r--test/CodeGen/Hexagon/swp-const-tc.ll2
-rw-r--r--test/CodeGen/Hexagon/swp-matmul-bitext.ll2
-rw-r--r--test/CodeGen/Hexagon/swp-max.ll4
-rw-r--r--test/CodeGen/Hexagon/swp-multi-loops.ll8
-rw-r--r--test/CodeGen/Hexagon/swp-stages4.ll94
-rw-r--r--test/CodeGen/Hexagon/swp-stages5.ll78
-rw-r--r--test/CodeGen/Hexagon/swp-vmult.ll8
-rw-r--r--test/CodeGen/Hexagon/swp-vsum.ll6
-rw-r--r--test/CodeGen/Hexagon/tail-dup-subreg-map.ll2
-rw-r--r--test/CodeGen/Hexagon/tfr-to-combine.ll6
-rw-r--r--test/CodeGen/Hexagon/tls_pic.ll4
-rw-r--r--test/CodeGen/Hexagon/two-crash.ll2
-rw-r--r--test/CodeGen/Hexagon/undo-dag-shift.ll59
-rw-r--r--test/CodeGen/Hexagon/vaddh.ll2
-rw-r--r--test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll2
-rw-r--r--test/CodeGen/Hexagon/vect/vect-loadv4i16.ll4
-rw-r--r--test/CodeGen/Hexagon/vect/vect-shift-imm.ll12
-rw-r--r--test/CodeGen/Hexagon/vect/vect-shuffle.ll2
-rw-r--r--test/CodeGen/Hexagon/vect/vect-vshifts.ll4
-rw-r--r--test/CodeGen/Hexagon/vect/vect-xor.ll2
-rw-r--r--test/CodeGen/MIR/AArch64/atomic-memoperands.mir30
-rw-r--r--test/CodeGen/MIR/AArch64/register-operand-bank.mir20
-rw-r--r--test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir20
-rw-r--r--test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir18
-rw-r--r--test/CodeGen/MIR/AMDGPU/intrinsics.mir6
-rw-r--r--test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir20
-rw-r--r--test/CodeGen/MIR/AMDGPU/target-index-operands.mir21
-rw-r--r--test/CodeGen/MIR/Generic/llvmIR.mir4
-rw-r--r--test/CodeGen/MIR/Generic/llvmIRMissing.mir4
-rw-r--r--test/CodeGen/MIR/Generic/machine-basic-block-ir-block-reference.mir2
-rw-r--r--test/CodeGen/MIR/Generic/machine-function-missing-body-error.mir15
-rw-r--r--test/CodeGen/MIR/Generic/machine-function-missing-body.mir15
-rw-r--r--test/CodeGen/MIR/Generic/machine-function-missing-function.mir4
-rw-r--r--test/CodeGen/MIR/Generic/machine-function-missing-name.mir4
-rw-r--r--test/CodeGen/MIR/Generic/machine-function.mir10
-rw-r--r--test/CodeGen/MIR/Generic/register-info.mir4
-rw-r--r--test/CodeGen/MIR/Generic/runPass.mir2
-rw-r--r--test/CodeGen/MIR/X86/dynamic-regmask.ll30
-rw-r--r--test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir2
-rw-r--r--test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir2
-rw-r--r--test/CodeGen/MIR/X86/register-operand-class-invalid0.mir13
-rw-r--r--test/CodeGen/MIR/X86/register-operand-class-invalid1.mir14
-rw-r--r--test/CodeGen/MIR/X86/register-operand-class.mir27
-rw-r--r--test/CodeGen/MIR/X86/used-physical-register-info.mir109
-rw-r--r--test/CodeGen/MSP430/AddrMode-bis-rx.ll14
-rw-r--r--test/CodeGen/MSP430/AddrMode-bis-xr.ll14
-rw-r--r--test/CodeGen/MSP430/AddrMode-mov-rx.ll14
-rw-r--r--test/CodeGen/MSP430/AddrMode-mov-xr.ll14
-rw-r--r--test/CodeGen/MSP430/Inst16mm.ll2
-rw-r--r--test/CodeGen/MSP430/Inst16mr.ll12
-rw-r--r--test/CodeGen/MSP430/Inst16ri.ll10
-rw-r--r--test/CodeGen/MSP430/Inst16rm.ll10
-rw-r--r--test/CodeGen/MSP430/Inst16rr.ll12
-rw-r--r--test/CodeGen/MSP430/Inst8mr.ll12
-rw-r--r--test/CodeGen/MSP430/Inst8ri.ll10
-rw-r--r--test/CodeGen/MSP430/Inst8rm.ll10
-rw-r--r--test/CodeGen/MSP430/Inst8rr.ll10
-rw-r--r--test/CodeGen/MSP430/bit.ll20
-rw-r--r--test/CodeGen/MSP430/byval.ll2
-rw-r--r--test/CodeGen/MSP430/cc_args.ll63
-rw-r--r--test/CodeGen/MSP430/cc_ret.ll12
-rw-r--r--test/CodeGen/MSP430/jumptable.ll4
-rw-r--r--test/CodeGen/MSP430/memset.ll6
-rw-r--r--test/CodeGen/MSP430/setcc.ll56
-rw-r--r--test/CodeGen/MSP430/struct-return.ll23
-rw-r--r--test/CodeGen/MSP430/vararg.ll10
-rw-r--r--test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll4
-rw-r--r--test/CodeGen/Mips/2010-07-20-Switch.ll53
-rw-r--r--test/CodeGen/Mips/Fast-ISel/check-disabled-mcpus.ll30
-rw-r--r--test/CodeGen/Mips/Fast-ISel/fastcc-miss.ll2
-rw-r--r--test/CodeGen/Mips/abicalls.ll12
-rw-r--r--test/CodeGen/Mips/blockaddr.ll12
-rw-r--r--test/CodeGen/Mips/brconnez.ll4
-rw-r--r--test/CodeGen/Mips/cconv/arguments-float.ll45
-rw-r--r--test/CodeGen/Mips/cconv/arguments-fp128.ll2
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll4
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-float.ll14
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-fp128.ll2
-rw-r--r--test/CodeGen/Mips/cconv/arguments-struct.ll2
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll2
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll2
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs.ll62
-rw-r--r--test/CodeGen/Mips/cconv/arguments.ll6
-rw-r--r--test/CodeGen/Mips/cconv/return-float.ll6
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-float.ll6
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-fp128.ll4
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-struct-f128.ll4
-rw-r--r--test/CodeGen/Mips/cconv/return-struct.ll36
-rw-r--r--test/CodeGen/Mips/cconv/return.ll12
-rw-r--r--test/CodeGen/Mips/cconv/roundl-call.ll10
-rw-r--r--test/CodeGen/Mips/cins.ll92
-rw-r--r--test/CodeGen/Mips/compactbranches/compact-branches-64.ll3
-rw-r--r--test/CodeGen/Mips/compactbranches/compact-branches.ll10
-rw-r--r--test/CodeGen/Mips/compactbranches/empty-block.mir92
-rw-r--r--test/CodeGen/Mips/cstmaterialization/stack.ll2
-rw-r--r--test/CodeGen/Mips/dext.ll105
-rw-r--r--test/CodeGen/Mips/elf_eflags.ll9
-rw-r--r--test/CodeGen/Mips/fastcc.ll76
-rw-r--r--test/CodeGen/Mips/fcmp.ll50
-rw-r--r--test/CodeGen/Mips/fcopysign-f32-f64.ll4
-rw-r--r--test/CodeGen/Mips/global-address.ll12
-rw-r--r--test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll6
-rw-r--r--test/CodeGen/Mips/llvm-ir/ashr.ll30
-rw-r--r--test/CodeGen/Mips/llvm-ir/call.ll24
-rw-r--r--test/CodeGen/Mips/llvm-ir/lshr.ll28
-rw-r--r--test/CodeGen/Mips/llvm-ir/shl.ll28
-rw-r--r--test/CodeGen/Mips/load-store-left-right.ll219
-rw-r--r--test/CodeGen/Mips/mature-mc-support.ll2
-rw-r--r--test/CodeGen/Mips/micromips-compact-branches.ll3
-rw-r--r--test/CodeGen/Mips/micromips-li.ll2
-rw-r--r--test/CodeGen/Mips/mips64-f128-call.ll27
-rw-r--r--test/CodeGen/Mips/mips64-f128.ll58
-rw-r--r--test/CodeGen/Mips/mips64-libcall.ll2
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll12
-rw-r--r--test/CodeGen/Mips/mno-ldc1-sdc1.ll46
-rw-r--r--test/CodeGen/Mips/msa/3r_4r_widen.ll84
-rw-r--r--test/CodeGen/Mips/msa/basic_operations.ll59
-rw-r--r--test/CodeGen/Mips/msa/bitwise.ll16
-rw-r--r--test/CodeGen/Mips/msa/bmzi_bmnzi.ll55
-rw-r--r--test/CodeGen/Mips/msa/f16-llvm-ir.ll14
-rw-r--r--test/CodeGen/Mips/msa/i5-b.ll8
-rw-r--r--test/CodeGen/Mips/msa/i5_ld_st.ll32
-rw-r--r--test/CodeGen/Mips/msa/immediates.ll10
-rw-r--r--test/CodeGen/Mips/o32_cc_byval.ll54
-rw-r--r--test/CodeGen/Mips/o32_cc_vararg.ll8
-rw-r--r--test/CodeGen/Mips/return_address.ll2
-rw-r--r--test/CodeGen/Mips/stackcoloring.ll8
-rw-r--r--test/CodeGen/Mips/start-asm-file.ll2
-rw-r--r--test/CodeGen/Mips/stchar.ll4
-rw-r--r--test/CodeGen/Mips/tailcall/tailcall-wrong-isa.ll21
-rw-r--r--test/CodeGen/Mips/tailcall/tailcall.ll69
-rw-r--r--test/CodeGen/Mips/tnaked.ll2
-rw-r--r--test/CodeGen/Mips/xray-mips-attribute-instrumentation.ll147
-rw-r--r--test/CodeGen/Mips/xray-section-group.ll31
-rw-r--r--test/CodeGen/NVPTX/LoadStoreVectorizer.ll34
-rw-r--r--test/CodeGen/NVPTX/access-non-generic.ll3
-rw-r--r--test/CodeGen/NVPTX/add-128bit.ll2
-rw-r--r--test/CodeGen/NVPTX/aggregate-return.ll35
-rw-r--r--test/CodeGen/NVPTX/bug22322.ll8
-rw-r--r--test/CodeGen/NVPTX/combine-min-max.ll134
-rw-r--r--test/CodeGen/NVPTX/convert-fp.ll115
-rw-r--r--test/CodeGen/NVPTX/ctlz.ll128
-rw-r--r--test/CodeGen/NVPTX/f16-instructions.ll1063
-rw-r--r--test/CodeGen/NVPTX/f16x2-instructions.ll1426
-rw-r--r--test/CodeGen/NVPTX/fast-math.ll137
-rw-r--r--test/CodeGen/NVPTX/fcos-no-fast-math.ll14
-rw-r--r--test/CodeGen/NVPTX/fsin-no-fast-math.ll14
-rw-r--r--test/CodeGen/NVPTX/global-variable-big.ll9
-rw-r--r--test/CodeGen/NVPTX/half.ll8
-rw-r--r--test/CodeGen/NVPTX/idioms.ll31
-rw-r--r--test/CodeGen/NVPTX/intrinsics.ll101
-rw-r--r--test/CodeGen/NVPTX/ldg-invariant.ll24
-rw-r--r--test/CodeGen/NVPTX/ldparam-v4.ll5
-rw-r--r--test/CodeGen/NVPTX/lower-aggr-copies.ll44
-rw-r--r--test/CodeGen/NVPTX/lower-alloca.ll2
-rw-r--r--test/CodeGen/NVPTX/math-intrins.ll25
-rw-r--r--test/CodeGen/NVPTX/misaligned-vector-ldst.ll58
-rw-r--r--test/CodeGen/NVPTX/named-barriers.ll40
-rw-r--r--test/CodeGen/NVPTX/nvvm-reflect.ll65
-rw-r--r--test/CodeGen/NVPTX/param-load-store.ll939
-rw-r--r--test/CodeGen/NVPTX/rsqrt.ll13
-rw-r--r--test/CodeGen/NVPTX/sqrt-approx.ll150
-rw-r--r--test/CodeGen/NVPTX/vec-param-load.ll83
-rw-r--r--test/CodeGen/NVPTX/vec8.ll13
-rw-r--r--test/CodeGen/NVPTX/vector-call.ll22
-rw-r--r--test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll36
-rw-r--r--test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll1
-rw-r--r--test/CodeGen/PowerPC/BreakableToken-reduced.ll4
-rw-r--r--test/CodeGen/PowerPC/aantidep-def-ec.mir16
-rw-r--r--test/CodeGen/PowerPC/addegluecrash.ll58
-rw-r--r--test/CodeGen/PowerPC/addi-licm.ll8
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll59
-rw-r--r--test/CodeGen/PowerPC/atomics-regression.ll9546
-rw-r--r--test/CodeGen/PowerPC/bitcasts-direct-move.ll4
-rw-r--r--test/CodeGen/PowerPC/branch_coalesce.ll31
-rw-r--r--test/CodeGen/PowerPC/complex-return.ll12
-rw-r--r--test/CodeGen/PowerPC/crbit-asm.ll7
-rw-r--r--test/CodeGen/PowerPC/crbits.ll9
-rw-r--r--test/CodeGen/PowerPC/ctrloop-i128.ll34
-rw-r--r--test/CodeGen/PowerPC/ctrloop-intrin.ll12
-rw-r--r--test/CodeGen/PowerPC/expand-contiguous-isel.ll151
-rw-r--r--test/CodeGen/PowerPC/expand-isel-1.mir57
-rw-r--r--test/CodeGen/PowerPC/expand-isel-2.mir57
-rw-r--r--test/CodeGen/PowerPC/expand-isel-3.mir58
-rw-r--r--test/CodeGen/PowerPC/expand-isel-4.mir59
-rw-r--r--test/CodeGen/PowerPC/expand-isel-5.mir54
-rw-r--r--test/CodeGen/PowerPC/expand-isel-6.mir57
-rw-r--r--test/CodeGen/PowerPC/expand-isel-7.mir58
-rw-r--r--test/CodeGen/PowerPC/expand-isel-8.mir65
-rw-r--r--test/CodeGen/PowerPC/expand-isel.ll227
-rw-r--r--test/CodeGen/PowerPC/fast-isel-load-store.ll2
-rw-r--r--test/CodeGen/PowerPC/fma-aggr-FMF.ll35
-rw-r--r--test/CodeGen/PowerPC/fold-zero.ll21
-rw-r--r--test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll8
-rw-r--r--test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll32
-rw-r--r--test/CodeGen/PowerPC/i1-ext-fold.ll25
-rw-r--r--test/CodeGen/PowerPC/i1-to-double.ll22
-rw-r--r--test/CodeGen/PowerPC/i64_fp_round.ll11
-rw-r--r--test/CodeGen/PowerPC/ifcvt.ll11
-rw-r--r--test/CodeGen/PowerPC/indirectbr.ll36
-rw-r--r--test/CodeGen/PowerPC/isel.ll19
-rw-r--r--test/CodeGen/PowerPC/jaggedstructs.ll52
-rw-r--r--test/CodeGen/PowerPC/lsa.ll16
-rw-r--r--test/CodeGen/PowerPC/mature-mc-support.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-obj.ll5
-rw-r--r--test/CodeGen/PowerPC/misched-inorder-latency.ll4
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll32
-rw-r--r--test/CodeGen/PowerPC/p8-isel-sched.ll13
-rw-r--r--test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll601
-rw-r--r--test/CodeGen/PowerPC/ppc-crbits-onoff.ll13
-rw-r--r--test/CodeGen/PowerPC/ppc-shrink-wrapping.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-align-long-double.ll41
-rw-r--r--test/CodeGen/PowerPC/ppc64-gep-opt.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc64le-aggregates.ll5
-rw-r--r--test/CodeGen/PowerPC/pr30451.ll20
-rw-r--r--test/CodeGen/PowerPC/pr32063.ll16
-rw-r--r--test/CodeGen/PowerPC/pr32140.ll59
-rw-r--r--test/CodeGen/PowerPC/pristine-and-livein.mir330
-rw-r--r--test/CodeGen/PowerPC/select-i1-vs-i1.ll186
-rw-r--r--test/CodeGen/PowerPC/select_const.ll789
-rw-r--r--test/CodeGen/PowerPC/setcc-logic.ll478
-rw-r--r--test/CodeGen/PowerPC/setcc-to-sub.ll73
-rw-r--r--test/CodeGen/PowerPC/sjlj_no0x.ll29
-rw-r--r--test/CodeGen/PowerPC/srl-mask.ll11
-rw-r--r--test/CodeGen/PowerPC/stacksize.ll86
-rw-r--r--test/CodeGen/PowerPC/structsinmem.ll28
-rw-r--r--test/CodeGen/PowerPC/structsinregs.ll60
-rw-r--r--test/CodeGen/PowerPC/subreg-postra-2.ll7
-rw-r--r--test/CodeGen/PowerPC/subreg-postra.ll6
-rw-r--r--test/CodeGen/PowerPC/subtract_from_imm.ll41
-rw-r--r--test/CodeGen/PowerPC/swaps-le-4.ll8
-rw-r--r--test/CodeGen/PowerPC/swaps-le-7.ll4
-rw-r--r--test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll6
-rw-r--r--test/CodeGen/PowerPC/tail-dup-break-cfg.ll140
-rw-r--r--test/CodeGen/PowerPC/tail-dup-layout.ll494
-rw-r--r--test/CodeGen/PowerPC/toc-load-sched-bug.ll28
-rw-r--r--test/CodeGen/PowerPC/vec_absd.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_cmp.ll40
-rw-r--r--test/CodeGen/PowerPC/vsx-args.ll12
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy1.ll18
-rw-r--r--test/CodeGen/PowerPC/vsx-p9.ll12
-rw-r--r--test/CodeGen/SPARC/mature-mc-support.ll2
-rw-r--r--test/CodeGen/SPARC/register-clobber.ll35
-rw-r--r--test/CodeGen/SPARC/reserved-regs.ll4
-rwxr-xr-xtest/CodeGen/SPARC/sjlj.ll11
-rw-r--r--test/CodeGen/SystemZ/DAGCombine_trunc_extract.ll18
-rw-r--r--test/CodeGen/SystemZ/DAGCombiner_illegal_BUILD_VECTOR.ll26
-rw-r--r--test/CodeGen/SystemZ/expand-zext-pseudo.ll132
-rw-r--r--test/CodeGen/SystemZ/extract-vector-elt-zEC12.ll21
-rw-r--r--test/CodeGen/SystemZ/fold-memory-op-impl.ll129
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-05.ll8
-rw-r--r--test/CodeGen/SystemZ/int-cmp-44.ll6
-rw-r--r--test/CodeGen/SystemZ/locr-legal-regclass.ll20
-rw-r--r--test/CodeGen/SystemZ/mature-mc-support.ll2
-rw-r--r--test/CodeGen/SystemZ/memchr-01.ll54
-rw-r--r--test/CodeGen/SystemZ/memchr-02.ll57
-rw-r--r--test/CodeGen/SystemZ/memcmp-02.ll139
-rw-r--r--test/CodeGen/SystemZ/pr32372.ll31
-rw-r--r--test/CodeGen/SystemZ/pr32505.ll20
-rw-r--r--test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll413
-rw-r--r--test/CodeGen/SystemZ/stack-guard.ll8
-rw-r--r--test/CodeGen/SystemZ/strcmp-02.ll72
-rw-r--r--test/CodeGen/SystemZ/strlen-02.ll39
-rw-r--r--test/CodeGen/SystemZ/unaligned-01.ll5
-rw-r--r--test/CodeGen/SystemZ/undef-flag.ll22
-rw-r--r--test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll5784
-rw-r--r--test/CodeGen/SystemZ/vec-cmpsel.ll3378
-rw-r--r--test/CodeGen/SystemZ/vec-sext.ll91
-rw-r--r--test/CodeGen/SystemZ/vec-trunc-to-i1.ll37
-rw-r--r--test/CodeGen/SystemZ/vec-zext.ll91
-rw-r--r--test/CodeGen/SystemZ/vectorizer-output-3xi32.ll10
-rw-r--r--test/CodeGen/Thumb/2010-07-15-debugOrdering.ll2
-rw-r--r--test/CodeGen/Thumb/PR17309.ll16
-rw-r--r--test/CodeGen/Thumb/cmp-add-fold.ll3
-rw-r--r--test/CodeGen/Thumb/copy_thumb.ll12
-rw-r--r--test/CodeGen/Thumb/ispositive.ll9
-rw-r--r--test/CodeGen/Thumb/long.ll150
-rw-r--r--test/CodeGen/Thumb/mature-mc-support.ll2
-rw-r--r--test/CodeGen/Thumb/remove-unneeded-push-pop.ll1052
-rw-r--r--test/CodeGen/Thumb/stack-access.ll26
-rw-r--r--test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll10
-rw-r--r--test/CodeGen/Thumb/stack_guard_remat.ll8
-rw-r--r--test/CodeGen/Thumb/stm-deprecated.ll19
-rw-r--r--test/CodeGen/Thumb/tbb-reuse.mir151
-rw-r--r--test/CodeGen/Thumb/thumb-shrink-wrapping.ll22
-rw-r--r--test/CodeGen/Thumb2/cbnz.ll2
-rw-r--r--test/CodeGen/Thumb2/float-cmp.ll16
-rw-r--r--test/CodeGen/Thumb2/ifcvt-compare.ll2
-rw-r--r--test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll4
-rw-r--r--test/CodeGen/Thumb2/intrinsics-coprocessor.ll93
-rw-r--r--test/CodeGen/Thumb2/stack_guard_remat.ll8
-rw-r--r--test/CodeGen/Thumb2/tbb-removeadd.mir124
-rw-r--r--test/CodeGen/Thumb2/thumb2-pack.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-rev.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-smla.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-smul.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt-uxt.ll51
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt_rot.ll31
-rw-r--r--test/CodeGen/Thumb2/thumb2-uxt_rot.ll41
-rw-r--r--test/CodeGen/Thumb2/thumb2-uxtb.ll120
-rw-r--r--test/CodeGen/Thumb2/v8_IT_4.ll5
-rw-r--r--test/CodeGen/WebAssembly/address-offsets.ll4
-rw-r--r--test/CodeGen/WebAssembly/byval.ll25
-rw-r--r--test/CodeGen/WebAssembly/call.ll25
-rw-r--r--test/CodeGen/WebAssembly/cfg-stackify.ll8
-rw-r--r--test/CodeGen/WebAssembly/cfi.ll2
-rw-r--r--test/CodeGen/WebAssembly/comparisons_f32.ll104
-rw-r--r--test/CodeGen/WebAssembly/comparisons_f64.ll104
-rw-r--r--test/CodeGen/WebAssembly/comparisons_i32.ll24
-rw-r--r--test/CodeGen/WebAssembly/comparisons_i64.ll24
-rw-r--r--test/CodeGen/WebAssembly/conv.ll4
-rw-r--r--test/CodeGen/WebAssembly/copysign-casts.ll10
-rw-r--r--test/CodeGen/WebAssembly/cpus.ll16
-rw-r--r--test/CodeGen/WebAssembly/dbgvalue.ll4
-rw-r--r--test/CodeGen/WebAssembly/dead-vreg.ll2
-rw-r--r--test/CodeGen/WebAssembly/divrem-constant.ll2
-rw-r--r--test/CodeGen/WebAssembly/f16.ll29
-rw-r--r--test/CodeGen/WebAssembly/f32.ll64
-rw-r--r--test/CodeGen/WebAssembly/f64.ll64
-rw-r--r--test/CodeGen/WebAssembly/fast-isel-noreg.ll2
-rw-r--r--test/CodeGen/WebAssembly/fast-isel.ll3
-rw-r--r--test/CodeGen/WebAssembly/frem.ll4
-rw-r--r--test/CodeGen/WebAssembly/func.ll6
-rw-r--r--test/CodeGen/WebAssembly/function-bitcasts.ll32
-rw-r--r--test/CodeGen/WebAssembly/global.ll54
-rw-r--r--test/CodeGen/WebAssembly/globl.ll2
-rw-r--r--test/CodeGen/WebAssembly/i128.ll2
-rw-r--r--test/CodeGen/WebAssembly/i32-load-store-alignment.ll4
-rw-r--r--test/CodeGen/WebAssembly/i32.ll88
-rw-r--r--test/CodeGen/WebAssembly/i64-load-store-alignment.ll4
-rw-r--r--test/CodeGen/WebAssembly/i64.ll88
-rw-r--r--test/CodeGen/WebAssembly/ident.ll2
-rw-r--r--test/CodeGen/WebAssembly/immediates.ll2
-rw-r--r--test/CodeGen/WebAssembly/implicit-def.ll2
-rw-r--r--test/CodeGen/WebAssembly/inline-asm.ll5
-rw-r--r--test/CodeGen/WebAssembly/irreducible-cfg.ll4
-rw-r--r--test/CodeGen/WebAssembly/legalize.ll4
-rw-r--r--test/CodeGen/WebAssembly/load-ext.ll4
-rw-r--r--test/CodeGen/WebAssembly/load-store-i1.ll4
-rw-r--r--test/CodeGen/WebAssembly/load.ll14
-rw-r--r--test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll2
-rw-r--r--test/CodeGen/WebAssembly/lower-em-exceptions-whitelist.ll2
-rw-r--r--test/CodeGen/WebAssembly/lower-em-exceptions.ll2
-rw-r--r--test/CodeGen/WebAssembly/lower-em-sjlj.ll2
-rw-r--r--test/CodeGen/WebAssembly/mem-intrinsics.ll4
-rw-r--r--test/CodeGen/WebAssembly/memory-addr32.ll17
-rw-r--r--test/CodeGen/WebAssembly/non-executable-stack.ll2
-rw-r--r--test/CodeGen/WebAssembly/offset-folding.ll2
-rw-r--r--test/CodeGen/WebAssembly/offset.ll4
-rw-r--r--test/CodeGen/WebAssembly/phi.ll9
-rw-r--r--test/CodeGen/WebAssembly/reg-stackify.ll8
-rw-r--r--test/CodeGen/WebAssembly/return-int32.ll8
-rw-r--r--test/CodeGen/WebAssembly/return-void.ll6
-rw-r--r--test/CodeGen/WebAssembly/returned.ll4
-rw-r--r--test/CodeGen/WebAssembly/select.ll6
-rw-r--r--test/CodeGen/WebAssembly/signext-zeroext.ll4
-rw-r--r--test/CodeGen/WebAssembly/simd-arith.ll10
-rw-r--r--test/CodeGen/WebAssembly/stack-alignment.ll102
-rw-r--r--test/CodeGen/WebAssembly/store-trunc.ll4
-rw-r--r--test/CodeGen/WebAssembly/store.ll18
-rw-r--r--test/CodeGen/WebAssembly/switch.ll2
-rw-r--r--test/CodeGen/WebAssembly/unreachable.ll2
-rw-r--r--test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll7
-rw-r--r--test/CodeGen/WebAssembly/unused-argument.ll4
-rw-r--r--test/CodeGen/WebAssembly/userstack.ll173
-rw-r--r--test/CodeGen/WebAssembly/varargs.ll11
-rw-r--r--test/CodeGen/WebAssembly/vtable.ll13
-rw-r--r--test/CodeGen/X86/2003-11-03-GlobalBool.ll6
-rw-r--r--test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll19
-rw-r--r--test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll9
-rw-r--r--test/CodeGen/X86/2005-01-17-CycleInDAG.ll17
-rw-r--r--test/CodeGen/X86/2005-02-14-IllegalAssembler.ll3
-rw-r--r--test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll35
-rw-r--r--test/CodeGen/X86/2006-03-01-InstrSchedBug.ll28
-rw-r--r--test/CodeGen/X86/2006-03-02-InstrSchedBug.ll17
-rw-r--r--test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll1
-rw-r--r--test/CodeGen/X86/2008-02-14-BitMiscompile.ll3
-rw-r--r--test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll5
-rw-r--r--test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll58
-rw-r--r--test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll2
-rw-r--r--test/CodeGen/X86/2011-10-19-widen_vselect.ll100
-rw-r--r--test/CodeGen/X86/2011-10-21-widen-cmp.ll6
-rw-r--r--test/CodeGen/X86/2011-11-30-or.ll2
-rw-r--r--test/CodeGen/X86/2011-12-15-vec_shift.ll2
-rw-r--r--test/CodeGen/X86/2011-12-8-bitcastintprom.ll1
-rw-r--r--test/CodeGen/X86/2012-07-10-extload64.ll32
-rw-r--r--test/CodeGen/X86/2012-11-28-merge-store-alias.ll2
-rw-r--r--test/CodeGen/X86/DynamicCalleeSavedRegisters.ll60
-rw-r--r--test/CodeGen/X86/GlobalISel/X86-regbankselect.mir634
-rw-r--r--test/CodeGen/X86/GlobalISel/binop-isel.ll186
-rw-r--r--test/CodeGen/X86/GlobalISel/constant.ll54
-rw-r--r--test/CodeGen/X86/GlobalISel/frameIndex-instructionselect.mir36
-rw-r--r--test/CodeGen/X86/GlobalISel/frameIndex.ll30
-rw-r--r--test/CodeGen/X86/GlobalISel/irtranslator-call.ll1
-rw-r--r--test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll310
-rw-r--r--test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll29
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-add.mir40
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-const.mir43
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-sub.mir40
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-isel.ll189
-rw-r--r--test/CodeGen/X86/GlobalISel/select-constant.mir143
-rw-r--r--test/CodeGen/X86/GlobalISel/x86_64-instructionselect.mir1022
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll18
-rw-r--r--test/CodeGen/X86/StackColoring-dbg.ll8
-rw-r--r--test/CodeGen/X86/StackColoring.ll194
-rw-r--r--test/CodeGen/X86/absolute-cmp.ll39
-rw-r--r--test/CodeGen/X86/absolute-rotate.ll4
-rw-r--r--test/CodeGen/X86/add-of-carry.ll31
-rw-r--r--test/CodeGen/X86/adde-carry.ll180
-rw-r--r--test/CodeGen/X86/aes_intrinsics.ll64
-rw-r--r--test/CodeGen/X86/and-sink.ll181
-rw-r--r--test/CodeGen/X86/arg-copy-elide.ll299
-rw-r--r--test/CodeGen/X86/atomic128.ll504
-rw-r--r--test/CodeGen/X86/avg.ll871
-rw-r--r--test/CodeGen/X86/avx-cvt-3.ll148
-rw-r--r--test/CodeGen/X86/avx-cvt.ll3
-rw-r--r--test/CodeGen/X86/avx-intrinsics-fast-isel.ll40
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll4
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll3005
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86_64.ll92
-rwxr-xr-xtest/CodeGen/X86/avx-shuffle-x86_32.ll3
-rwxr-xr-xtest/CodeGen/X86/avx-trunc.ll2
-rw-r--r--test/CodeGen/X86/avx-vbroadcast.ll314
-rw-r--r--test/CodeGen/X86/avx-vperm2x128.ll3
-rw-r--r--test/CodeGen/X86/avx-vzeroupper.ll5
-rwxr-xr-xtest/CodeGen/X86/avx2-conversions.ll4
-rw-r--r--test/CodeGen/X86/avx2-gather.ll60
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-fast-isel.ll26
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll30
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86.ll133
-rw-r--r--test/CodeGen/X86/avx2-shift.ll12
-rw-r--r--test/CodeGen/X86/avx2-vbroadcast.ll430
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll12
-rw-r--r--test/CodeGen/X86/avx512-adc-sbb.ll27
-rw-r--r--test/CodeGen/X86/avx512-any_extend_load.ll21
-rw-r--r--test/CodeGen/X86/avx512-arith.ll1
-rw-r--r--test/CodeGen/X86/avx512-bugfix-26264.ll26
-rw-r--r--test/CodeGen/X86/avx512-calling-conv.ll5
-rw-r--r--test/CodeGen/X86/avx512-cmp-kor-sequence.ll1
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll72
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll525
-rw-r--r--test/CodeGen/X86/avx512-ext.ll411
-rw-r--r--test/CodeGen/X86/avx512-extract-subvector.ll95
-rw-r--r--test/CodeGen/X86/avx512-fsel.ll35
-rw-r--r--test/CodeGen/X86/avx512-gather-scatter-intrin.ll226
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll1496
-rw-r--r--test/CodeGen/X86/avx512-insert-extract_i1.ll37
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-upgrade.ll19
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll928
-rw-r--r--test/CodeGen/X86/avx512-load-store.ll189
-rw-r--r--test/CodeGen/X86/avx512-logic.ll50
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll2197
-rw-r--r--test/CodeGen/X86/avx512-mask-spills.ll4
-rw-r--r--test/CodeGen/X86/avx512-masked-memop-64-32.ll60
-rw-r--r--test/CodeGen/X86/avx512-masked_memop-16-8.ll13
-rw-r--r--test/CodeGen/X86/avx512-memfold.ll73
-rw-r--r--test/CodeGen/X86/avx512-mov.ll2
-rw-r--r--test/CodeGen/X86/avx512-pmovxrm.ll6
-rw-r--r--test/CodeGen/X86/avx512-regcall-Mask.ll14
-rw-r--r--test/CodeGen/X86/avx512-regcall-NoMask.ll37
-rw-r--r--test/CodeGen/X86/avx512-select.ll8
-rw-r--r--test/CodeGen/X86/avx512-skx-insert-subvec.ll19
-rw-r--r--test/CodeGen/X86/avx512-trunc.ll277
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll22
-rw-r--r--test/CodeGen/X86/avx512-vbroadcasti128.ll24
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll161
-rw-r--r--test/CodeGen/X86/avx512-vpermv3-commute.ll18
-rw-r--r--test/CodeGen/X86/avx512-vpternlog-commute.ll817
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll572
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics.ll240
-rw-r--r--test/CodeGen/X86/avx512bw-mask-op.ll90
-rw-r--r--test/CodeGen/X86/avx512bw-vec-cmp.ll36
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll24
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll891
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics.ll2330
-rw-r--r--test/CodeGen/X86/avx512bwvl-vec-cmp.ll72
-rw-r--r--test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll47
-rw-r--r--test/CodeGen/X86/avx512cd-intrinsics.ll98
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll71
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics.ll84
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll38
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics.ll123
-rw-r--r--test/CodeGen/X86/avx512dq-mask-op.ll8
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll186
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics.ll201
-rw-r--r--test/CodeGen/X86/avx512er-intrinsics.ll125
-rw-r--r--test/CodeGen/X86/avx512ifma-intrinsics.ll97
-rw-r--r--test/CodeGen/X86/avx512ifmavl-intrinsics.ll97
-rw-r--r--test/CodeGen/X86/avx512vbmivl-intrinsics.ll38
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll157
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll2092
-rw-r--r--test/CodeGen/X86/avx512vl-logic.ll72
-rw-r--r--test/CodeGen/X86/avx512vl-vbroadcast.ll30
-rw-r--r--test/CodeGen/X86/avx512vl-vec-cmp.ll144
-rw-r--r--test/CodeGen/X86/bc-extract.ll37
-rw-r--r--test/CodeGen/X86/bitcast-mmx.ll126
-rw-r--r--test/CodeGen/X86/bitreverse.ll573
-rw-r--r--test/CodeGen/X86/block-placement.ll69
-rw-r--r--test/CodeGen/X86/block-placement.mir4
-rw-r--r--test/CodeGen/X86/bool-ext-inc.ll32
-rw-r--r--test/CodeGen/X86/branchfolding-debugloc.ll83
-rw-r--r--test/CodeGen/X86/brcond.ll39
-rw-r--r--test/CodeGen/X86/break-false-dep.ll57
-rw-r--r--test/CodeGen/X86/bt.ll8
-rw-r--r--test/CodeGen/X86/buildvec-insertvec.ll517
-rw-r--r--test/CodeGen/X86/bypass-slow-division-32.ll15
-rw-r--r--test/CodeGen/X86/catchpad-lifetime.ll12
-rw-r--r--test/CodeGen/X86/catchpad-weight.ll8
-rw-r--r--test/CodeGen/X86/chain_order.ll4
-rw-r--r--test/CodeGen/X86/clear_upper_vector_element_bits.ll1296
-rw-r--r--test/CodeGen/X86/clflushopt.ll13
-rw-r--r--test/CodeGen/X86/clzero.ll23
-rw-r--r--test/CodeGen/X86/cmov.ll5
-rw-r--r--test/CodeGen/X86/cmovcmov.ll19
-rw-r--r--test/CodeGen/X86/code_placement_outline_optional_branches.ll77
-rw-r--r--test/CodeGen/X86/codegen-prepare-addrmode-sext.ll41
-rw-r--r--test/CodeGen/X86/codegen-prepare-extload.ll15
-rw-r--r--test/CodeGen/X86/codegen-prepare.ll1
-rw-r--r--test/CodeGen/X86/combine-abs.ll99
-rw-r--r--test/CodeGen/X86/combine-and.ll25
-rw-r--r--test/CodeGen/X86/combine-fcopysign.ll2
-rw-r--r--test/CodeGen/X86/combine-shl.ll30
-rw-r--r--test/CodeGen/X86/combine-testm-and.ll10
-rw-r--r--test/CodeGen/X86/combiner-aa-0.ll20
-rw-r--r--test/CodeGen/X86/combiner-aa-1.ll23
-rw-r--r--test/CodeGen/X86/commute-3dnow.ll270
-rw-r--r--test/CodeGen/X86/commute-clmul.ll73
-rw-r--r--test/CodeGen/X86/commute-fcmp.ll38
-rw-r--r--test/CodeGen/X86/commute-xop.ll247
-rw-r--r--test/CodeGen/X86/compare-global.ll2
-rw-r--r--test/CodeGen/X86/complex-fastmath.ll215
-rw-r--r--test/CodeGen/X86/compress_expand.ll102
-rw-r--r--test/CodeGen/X86/conditional-indecrement.ll96
-rw-r--r--test/CodeGen/X86/conditional-tailcall.ll163
-rw-r--r--test/CodeGen/X86/copy-eflags.ll17
-rw-r--r--test/CodeGen/X86/copy-propagation.ll33
-rw-r--r--test/CodeGen/X86/crash.ll4
-rw-r--r--test/CodeGen/X86/ctpop-combine.ll10
-rw-r--r--test/CodeGen/X86/dag-fmf-cse.ll2
-rw-r--r--test/CodeGen/X86/dag-merge-fast-accesses.ll12
-rw-r--r--test/CodeGen/X86/dagcombine-and-setcc.ll7
-rw-r--r--test/CodeGen/X86/dagcombine-cse.ll37
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll12
-rw-r--r--test/CodeGen/X86/div-rem-simplify.ll187
-rw-r--r--test/CodeGen/X86/divrem8_ext.ll3
-rw-r--r--test/CodeGen/X86/dont-trunc-store-double-to-float.ll6
-rw-r--r--test/CodeGen/X86/dropped_constructor.ll19
-rw-r--r--test/CodeGen/X86/dwarf-headers.ll109
-rw-r--r--test/CodeGen/X86/dynamic-alloca-lifetime.ll12
-rw-r--r--test/CodeGen/X86/elf-associated.ll39
-rwxr-xr-xtest/CodeGen/X86/evex-to-vex-compress.mir230
-rw-r--r--test/CodeGen/X86/extract-store.ll577
-rw-r--r--test/CodeGen/X86/extractelement-index.ll58
-rw-r--r--test/CodeGen/X86/extractelement-legalization-store-ordering.ll15
-rw-r--r--test/CodeGen/X86/fadd-combines.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-abort-warm.ll14
-rw-r--r--test/CodeGen/X86/fast-isel-cmp.ll1194
-rw-r--r--test/CodeGen/X86/fast-isel-deadcode.ll8
-rw-r--r--test/CodeGen/X86/fast-isel-load-i1.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-nontemporal.ll47
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov.ll63
-rw-r--r--test/CodeGen/X86/fast-isel-select-sse.ll92
-rw-r--r--test/CodeGen/X86/fast-isel-x86-64.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-x86.ll2
-rw-r--r--test/CodeGen/X86/fast-isel.ll4
-rw-r--r--test/CodeGen/X86/fentry-insertion.ll16
-rw-r--r--test/CodeGen/X86/file-source-filename.ll4
-rw-r--r--test/CodeGen/X86/fma-fneg-combine.ll32
-rw-r--r--test/CodeGen/X86/fma.ll402
-rw-r--r--test/CodeGen/X86/fma_patterns.ll2
-rw-r--r--test/CodeGen/X86/fold-vector-sext-zext.ll9
-rw-r--r--test/CodeGen/X86/fp-intrinsics.ll111
-rw-r--r--test/CodeGen/X86/fp-select-cmp-and.ll32
-rw-r--r--test/CodeGen/X86/fp-une-cmp.ll4
-rw-r--r--test/CodeGen/X86/fp128-cast.ll2
-rw-r--r--test/CodeGen/X86/fp128-compare.ll6
-rw-r--r--test/CodeGen/X86/fp128-g.ll3
-rw-r--r--test/CodeGen/X86/fp128-i128.ll249
-rw-r--r--test/CodeGen/X86/fp128-libcalls.ll6
-rw-r--r--test/CodeGen/X86/fp128-load.ll6
-rw-r--r--test/CodeGen/X86/fp128-select.ll12
-rw-r--r--test/CodeGen/X86/huge-stack-offset2.ll62
-rw-r--r--test/CodeGen/X86/i256-add.ll122
-rw-r--r--test/CodeGen/X86/i386-shrink-wrapping.ll5
-rw-r--r--test/CodeGen/X86/illegal-bitfield-loadstore.ll141
-rw-r--r--test/CodeGen/X86/implicit-null-check.ll83
-rw-r--r--test/CodeGen/X86/implicit-null-checks.mir902
-rw-r--r--test/CodeGen/X86/implicit-use-spill.mir2
-rw-r--r--test/CodeGen/X86/imul.ll230
-rw-r--r--test/CodeGen/X86/inline-asm-A-constraint.ll35
-rw-r--r--test/CodeGen/X86/inline-asm-tied.ll30
-rw-r--r--test/CodeGen/X86/insertelement-zero.ll180
-rw-r--r--test/CodeGen/X86/isel-sink.ll1
-rw-r--r--test/CodeGen/X86/jump_sign.ll4
-rw-r--r--test/CodeGen/X86/known-bits-vector.ll151
-rw-r--r--test/CodeGen/X86/known-bits.ll170
-rw-r--r--test/CodeGen/X86/known-signbits-vector.ll139
-rw-r--r--test/CodeGen/X86/lea-opt-with-debug.mir122
-rw-r--r--test/CodeGen/X86/lfence.ll8
-rw-r--r--test/CodeGen/X86/licm-nested.ll2
-rw-r--r--test/CodeGen/X86/live-range-nosubreg.ll5
-rw-r--r--test/CodeGen/X86/load-combine.ll1314
-rw-r--r--test/CodeGen/X86/load-slice.ll12
-rw-r--r--test/CodeGen/X86/local_stack_symbol_ordering.ll36
-rw-r--r--test/CodeGen/X86/logical-load-fold.ll4
-rw-r--r--test/CodeGen/X86/longlong-deadload.ll24
-rw-r--r--test/CodeGen/X86/lzcnt-zext-cmp.ll241
-rw-r--r--test/CodeGen/X86/machine-outliner-debuginfo.ll75
-rw-r--r--test/CodeGen/X86/machine-outliner-tailcalls.ll35
-rw-r--r--test/CodeGen/X86/machine-outliner.ll110
-rw-r--r--test/CodeGen/X86/machine-region-info.mir83
-rw-r--r--test/CodeGen/X86/machine-trace-metrics-crash.ll4
-rw-r--r--test/CodeGen/X86/madd.ll103
-rw-r--r--test/CodeGen/X86/masked_gather_scatter.ll173
-rw-r--r--test/CodeGen/X86/masked_memop.ll70
-rw-r--r--test/CodeGen/X86/mature-mc-support.ll2
-rw-r--r--test/CodeGen/X86/memcmp.ll446
-rw-r--r--test/CodeGen/X86/mempcpy-32.ll20
-rw-r--r--test/CodeGen/X86/mempcpy.ll10
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-128.ll50
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-256.ll14
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-512.ll90
-rw-r--r--test/CodeGen/X86/merge-store-partially-alias-loads.ll8
-rw-r--r--test/CodeGen/X86/merge_store.ll1
-rw-r--r--test/CodeGen/X86/merge_store_duplicated_loads.ll88
-rw-r--r--test/CodeGen/X86/misched-aa-colored.ll10
-rw-r--r--test/CodeGen/X86/mmx-cvt.ll369
-rw-r--r--test/CodeGen/X86/mmx-fold-load.ll581
-rw-r--r--test/CodeGen/X86/mul-constant-i16.ll589
-rw-r--r--test/CodeGen/X86/mul-constant-i32.ll515
-rw-r--r--test/CodeGen/X86/mul-constant-i64.ll581
-rw-r--r--test/CodeGen/X86/mul-i256.ll296
-rw-r--r--test/CodeGen/X86/mulx32.ll21
-rw-r--r--test/CodeGen/X86/mulx64.ll20
-rw-r--r--test/CodeGen/X86/neg_cmp.ll46
-rw-r--r--test/CodeGen/X86/negative-sin.ll2
-rw-r--r--test/CodeGen/X86/nontemporal-2.ll29
-rw-r--r--test/CodeGen/X86/nontemporal-loads.ll24
-rw-r--r--test/CodeGen/X86/oddshuffles.ll34
-rw-r--r--test/CodeGen/X86/overflow.ll64
-rw-r--r--test/CodeGen/X86/peep-setb.ll79
-rw-r--r--test/CodeGen/X86/peep-test-4.ll206
-rw-r--r--test/CodeGen/X86/pmul.ll224
-rw-r--r--test/CodeGen/X86/pointer-vector.ll4
-rw-r--r--test/CodeGen/X86/pr11334.ll10
-rw-r--r--test/CodeGen/X86/pr12312.ll308
-rw-r--r--test/CodeGen/X86/pr14204.ll6
-rw-r--r--test/CodeGen/X86/pr14314.ll33
-rw-r--r--test/CodeGen/X86/pr16031.ll31
-rw-r--r--test/CodeGen/X86/pr17764.ll3
-rw-r--r--test/CodeGen/X86/pr18014.ll2
-rw-r--r--test/CodeGen/X86/pr18023.ll31
-rw-r--r--test/CodeGen/X86/pr18344.ll89
-rw-r--r--test/CodeGen/X86/pr22338.ll57
-rw-r--r--test/CodeGen/X86/pr26350.ll18
-rw-r--r--test/CodeGen/X86/pr2656.ll29
-rw-r--r--test/CodeGen/X86/pr27591.ll14
-rw-r--r--test/CodeGen/X86/pr28173.ll27
-rw-r--r--test/CodeGen/X86/pr29112.ll5
-rw-r--r--test/CodeGen/X86/pr29170.ll23
-rw-r--r--test/CodeGen/X86/pr30284.ll6
-rw-r--r--test/CodeGen/X86/pr30430.ll56
-rw-r--r--test/CodeGen/X86/pr30562.ll22
-rw-r--r--test/CodeGen/X86/pr30693.ll147
-rw-r--r--test/CodeGen/X86/pr31773.ll20
-rw-r--r--test/CodeGen/X86/pr32108.ll26
-rw-r--r--test/CodeGen/X86/pr32241.ll86
-rw-r--r--test/CodeGen/X86/pr32256.ll59
-rw-r--r--test/CodeGen/X86/pr32278.ll11
-rw-r--r--test/CodeGen/X86/pr32284.ll117
-rw-r--r--test/CodeGen/X86/pr32329.ll126
-rw-r--r--test/CodeGen/X86/pr32340.ll77
-rw-r--r--test/CodeGen/X86/pr32345.ll169
-rw-r--r--test/CodeGen/X86/pr32420.ll36
-rw-r--r--test/CodeGen/X86/pr32451.ll69
-rw-r--r--test/CodeGen/X86/pr32484.ll32
-rw-r--r--test/CodeGen/X86/pr32588.ll27
-rw-r--r--test/CodeGen/X86/pre-coalesce-2.ll281
-rw-r--r--test/CodeGen/X86/pre-coalesce.ll51
-rw-r--r--test/CodeGen/X86/pre-coalesce.mir122
-rw-r--r--test/CodeGen/X86/prefixdata.ll27
-rw-r--r--test/CodeGen/X86/promote-vec3.ll23
-rw-r--r--test/CodeGen/X86/psubus.ll338
-rw-r--r--test/CodeGen/X86/recip-fastmath.ll699
-rw-r--r--test/CodeGen/X86/recip-fastmath2.ll1064
-rw-r--r--test/CodeGen/X86/recip-pic.ll27
-rw-r--r--test/CodeGen/X86/reduce-trunc-shl.ll2
-rw-r--r--test/CodeGen/X86/regparm.ll48
-rw-r--r--test/CodeGen/X86/rot32.ll19
-rw-r--r--test/CodeGen/X86/rot64.ll39
-rw-r--r--test/CodeGen/X86/rotate.ll83
-rw-r--r--test/CodeGen/X86/rtm.ll65
-rw-r--r--test/CodeGen/X86/sad.ll881
-rw-r--r--test/CodeGen/X86/sad_variations.ll347
-rw-r--r--test/CodeGen/X86/safestack.ll5
-rw-r--r--test/CodeGen/X86/safestack_ssp.ll7
-rw-r--r--test/CodeGen/X86/scalar-int-to-fp.ll761
-rw-r--r--test/CodeGen/X86/select.ll530
-rw-r--r--test/CodeGen/X86/select_const.ll244
-rw-r--r--test/CodeGen/X86/select_meta.ll2
-rw-r--r--test/CodeGen/X86/selectiondag-order.ll97
-rw-r--r--test/CodeGen/X86/setcc-logic.ll482
-rw-r--r--test/CodeGen/X86/setcc-lowering.ll25
-rw-r--r--test/CodeGen/X86/setcc-sentinals.ll13
-rw-r--r--test/CodeGen/X86/setcc-wide-types.ll140
-rw-r--r--test/CodeGen/X86/setcc.ll24
-rw-r--r--test/CodeGen/X86/sext-i1.ll97
-rw-r--r--test/CodeGen/X86/sfence.ll8
-rw-r--r--test/CodeGen/X86/sha.ll6
-rw-r--r--test/CodeGen/X86/shrink-compare.ll6
-rw-r--r--test/CodeGen/X86/shrink_vmul.ll4
-rw-r--r--test/CodeGen/X86/shuffle-combine-crash-2.ll20
-rw-r--r--test/CodeGen/X86/shuffle-of-splat-multiuses.ll100
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-256.ll52
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-512.ll58
-rw-r--r--test/CodeGen/X86/split-extend-vector-inreg.ll47
-rw-r--r--test/CodeGen/X86/split-store.ll27
-rw-r--r--test/CodeGen/X86/sse-align-10.ll5
-rw-r--r--test/CodeGen/X86/sse-fsignum.ll52
-rw-r--r--test/CodeGen/X86/sse-intrinsics-fast-isel.ll8
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86.ll42
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86_64.ll78
-rw-r--r--test/CodeGen/X86/sse-minmax.ll290
-rw-r--r--test/CodeGen/X86/sse-regcall.ll54
-rw-r--r--test/CodeGen/X86/sse1.ll34
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-fast-isel.ll4
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll8
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll82
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86_64.ll78
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub.ll9
-rw-r--r--test/CodeGen/X86/sse3-intrinsics-x86.ll34
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-fast-isel.ll28
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll13
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-x86.ll16
-rw-r--r--test/CodeGen/X86/sse41.ll74
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-fast-isel.ll22
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-x86.ll63
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-x86_64.ll28
-rw-r--r--test/CodeGen/X86/sse42.ll58
-rw-r--r--test/CodeGen/X86/sse42_64.ll21
-rw-r--r--test/CodeGen/X86/ssse3-intrinsics-x86.ll29
-rw-r--r--test/CodeGen/X86/stack-align.ll8
-rw-r--r--test/CodeGen/X86/stack-folding-adx-x86_64.ll18
-rw-r--r--test/CodeGen/X86/stack-folding-bmi.ll121
-rw-r--r--test/CodeGen/X86/stack-folding-bmi2.ll77
-rw-r--r--test/CodeGen/X86/stack-folding-fp-avx1.ll22
-rw-r--r--test/CodeGen/X86/stack-folding-fp-avx512vl.ll39
-rw-r--r--test/CodeGen/X86/stack-folding-fp-sse42.ll4
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx512.ll1502
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx512vl.ll2231
-rw-r--r--test/CodeGen/X86/stack-folding-int-sse42.ll2
-rw-r--r--test/CodeGen/X86/stack-folding-sha.ll72
-rw-r--r--test/CodeGen/X86/stack-folding-tbm.ll201
-rw-r--r--test/CodeGen/X86/stack-protector-remarks.ll103
-rw-r--r--test/CodeGen/X86/stack-protector-target.ll40
-rw-r--r--test/CodeGen/X86/stack-protector-weight.ll8
-rw-r--r--test/CodeGen/X86/stack_guard_remat.ll8
-rw-r--r--test/CodeGen/X86/stores-merging.ll11
-rw-r--r--test/CodeGen/X86/subvector-broadcast.ll97
-rw-r--r--test/CodeGen/X86/swifterror.ll27
-rw-r--r--test/CodeGen/X86/tail-call-conditional.mir85
-rw-r--r--test/CodeGen/X86/tail-dup-debugloc.ll56
-rw-r--r--test/CodeGen/X86/tail-dup-no-other-successor.ll53
-rw-r--r--test/CodeGen/X86/tail-dup-repeat.ll2
-rw-r--r--test/CodeGen/X86/tail-merge-debugloc.ll42
-rw-r--r--test/CodeGen/X86/tail-merge-identical.ll41
-rw-r--r--test/CodeGen/X86/tail-merge-unreachable.ll2
-rw-r--r--test/CodeGen/X86/tail-opts.ll126
-rw-r--r--test/CodeGen/X86/twoaddr-coalesce-3.ll4
-rw-r--r--test/CodeGen/X86/unaligned-32-byte-memops.ll2
-rw-r--r--test/CodeGen/X86/unreachableblockelim.ll2
-rw-r--r--test/CodeGen/X86/unused_stackslots.ll8
-rw-r--r--test/CodeGen/X86/unwindraise.ll8
-rw-r--r--test/CodeGen/X86/update-terminator-debugloc.ll91
-rw-r--r--test/CodeGen/X86/vec_cast2.ll22
-rw-r--r--test/CodeGen/X86/vec_extract-mmx.ll35
-rw-r--r--test/CodeGen/X86/vec_fp_to_int.ll57
-rw-r--r--test/CodeGen/X86/vec_fpext.ll4
-rw-r--r--test/CodeGen/X86/vec_fptrunc.ll4
-rw-r--r--test/CodeGen/X86/vec_int_to_fp.ll97
-rw-r--r--test/CodeGen/X86/vec_logical.ll23
-rw-r--r--test/CodeGen/X86/vec_minmax_match.ll77
-rw-r--r--test/CodeGen/X86/vec_minmax_sint.ll48
-rw-r--r--test/CodeGen/X86/vec_minmax_uint.ll88
-rw-r--r--test/CodeGen/X86/vec_sdiv_to_shift.ll91
-rw-r--r--test/CodeGen/X86/vec_shift4.ll12
-rw-r--r--test/CodeGen/X86/vec_shift5.ll12
-rw-r--r--test/CodeGen/X86/vec_shift7.ll11
-rw-r--r--test/CodeGen/X86/vec_uint_to_fp-fastmath.ll8
-rw-r--r--test/CodeGen/X86/vec_unsafe-fp-math.ll2
-rw-r--r--test/CodeGen/X86/vec_zero_cse.ll68
-rw-r--r--test/CodeGen/X86/vector-bitreverse.ll103
-rw-r--r--test/CodeGen/X86/vector-blend.ll18
-rw-r--r--test/CodeGen/X86/vector-compare-all_of.ll946
-rw-r--r--test/CodeGen/X86/vector-compare-any_of.ll882
-rw-r--r--test/CodeGen/X86/vector-compare-results.ll1628
-rw-r--r--test/CodeGen/X86/vector-extend-inreg.ll120
-rw-r--r--test/CodeGen/X86/vector-half-conversions.ll217
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-128.ll47
-rw-r--r--test/CodeGen/X86/vector-idiv-sdiv-256.ll8
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-128.ll71
-rw-r--r--test/CodeGen/X86/vector-idiv-udiv-256.ll6
-rw-r--r--test/CodeGen/X86/vector-interleave.ll62
-rw-r--r--test/CodeGen/X86/vector-lzcnt-128.ll62
-rw-r--r--test/CodeGen/X86/vector-lzcnt-256.ll100
-rw-r--r--test/CodeGen/X86/vector-popcnt-256.ll18
-rw-r--r--test/CodeGen/X86/vector-rotate-128.ll288
-rw-r--r--test/CodeGen/X86/vector-rotate-256.ll225
-rw-r--r--test/CodeGen/X86/vector-sext.ll90
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-128.ll91
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-256.ll43
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-512.ll5
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-128.ll64
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-256.ll78
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-512.ll5
-rw-r--r--test/CodeGen/X86/vector-shift-shl-128.ll60
-rw-r--r--test/CodeGen/X86/vector-shift-shl-256.ll72
-rw-r--r--test/CodeGen/X86/vector-shift-shl-512.ll5
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v16.ll62
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v2.ll8
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v16.ll57
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v32.ll122
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v4.ll194
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v8.ll61
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v16.ll206
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v32.ll7
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll100
-rw-r--r--test/CodeGen/X86/vector-shuffle-avx512.ll1019
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx2.ll192
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll74
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll2
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll2
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-sse41.ll23
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-ssse3.ll108
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-xop.ll37
-rw-r--r--test/CodeGen/X86/vector-shuffle-masked.ll297
-rw-r--r--test/CodeGen/X86/vector-shuffle-v1.ll72
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-128.ll1070
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-256.ll316
-rw-r--r--test/CodeGen/X86/vector-sqrt.ll28
-rw-r--r--test/CodeGen/X86/vector-trunc-math.ll623
-rw-r--r--test/CodeGen/X86/vector-trunc.ll54
-rw-r--r--test/CodeGen/X86/vector-tzcnt-128.ll40
-rw-r--r--test/CodeGen/X86/vector-tzcnt-256.ll30
-rw-r--r--test/CodeGen/X86/vector-zext.ll332
-rw-r--r--test/CodeGen/X86/vectorcall.ll4
-rw-r--r--test/CodeGen/X86/viabs.ll146
-rw-r--r--test/CodeGen/X86/vselect-minmax.ll448
-rw-r--r--test/CodeGen/X86/vselect-pcmp.ll323
-rw-r--r--test/CodeGen/X86/vsplit-and.ll8
-rw-r--r--test/CodeGen/X86/wide-integer-cmp.ll156
-rw-r--r--test/CodeGen/X86/widen_bitops-0.ll12
-rw-r--r--test/CodeGen/X86/widen_conv-1.ll4
-rw-r--r--test/CodeGen/X86/widen_conv-3.ll8
-rw-r--r--test/CodeGen/X86/widen_conv-4.ll8
-rw-r--r--test/CodeGen/X86/widen_load-2.ll30
-rw-r--r--test/CodeGen/X86/widened-broadcast.ll42
-rw-r--r--test/CodeGen/X86/win-alloca-expander.ll24
-rw-r--r--test/CodeGen/X86/win32-eh.ll157
-rw-r--r--test/CodeGen/X86/win64_eh_leaf2.ll22
-rw-r--r--test/CodeGen/X86/x32-va_start.ll8
-rw-r--r--test/CodeGen/X86/x86-64-intrcc-nosse.ll20
-rw-r--r--test/CodeGen/X86/x86-64-intrcc.ll17
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll34
-rw-r--r--test/CodeGen/X86/x86-sanitizer-shrink-wrapping.ll2
-rw-r--r--test/CodeGen/X86/xaluo.ll1518
-rw-r--r--test/CodeGen/X86/xmulo.ll788
-rw-r--r--test/CodeGen/X86/xop-ifma.ll129
-rw-r--r--test/CodeGen/X86/xop-intrinsics-fast-isel.ll14
-rw-r--r--test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll39
-rw-r--r--test/CodeGen/X86/xop-intrinsics-x86_64.ll32
-rw-r--r--test/CodeGen/X86/xop-mask-comments.ll8
-rw-r--r--test/CodeGen/X86/xor-combine-debugloc.ll69
-rw-r--r--test/CodeGen/X86/xray-log-args.ll35
-rw-r--r--test/CodeGen/XCore/fneg.ll4
-rw-r--r--test/CodeGen/XCore/section-name.ll9
-rw-r--r--test/CodeGen/XCore/varargs.ll2
-rw-r--r--test/DebugInfo/AArch64/asan-stack-vars.ll326
-rw-r--r--test/DebugInfo/AMDGPU/lit.local.cfg2
-rw-r--r--test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll70
-rw-r--r--test/DebugInfo/AMDGPU/pointer-address-space.ll104
-rw-r--r--test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll92
-rw-r--r--test/DebugInfo/AMDGPU/variable-locations.ll111
-rw-r--r--test/DebugInfo/ARM/s-super-register.ll4
-rw-r--r--test/DebugInfo/COFF/array-odr-violation.ll100
-rw-r--r--test/DebugInfo/COFF/globals.ll2
-rw-r--r--test/DebugInfo/COFF/typedef.ll2
-rw-r--r--test/DebugInfo/COFF/types-data-members.ll2
-rw-r--r--test/DebugInfo/Generic/2010-01-05-DbgScope.ll2
-rw-r--r--test/DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll2
-rw-r--r--test/DebugInfo/Generic/array.ll2
-rw-r--r--test/DebugInfo/Generic/debuginfofinder-inlined-cu.ll31
-rw-r--r--test/DebugInfo/Generic/gmlt_profiling.ll32
-rw-r--r--test/DebugInfo/Generic/invalid.ll17
-rw-r--r--test/DebugInfo/Generic/store-tail-merge.ll72
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-header.elf-x86-64bin0 -> 2376 bytes
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-header.s149
-rw-r--r--test/DebugInfo/Inputs/gmlt.ll1
-rw-r--r--test/DebugInfo/MIR/ARM/split-superreg-complex.mir122
-rw-r--r--test/DebugInfo/MIR/X86/live-debug-values-spill.mir468
-rw-r--r--test/DebugInfo/Mips/InlinedFnLocalVar.ll2
-rw-r--r--test/DebugInfo/PDB/DIA/pdbdump-linenumbers.test14
-rw-r--r--test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test52
-rw-r--r--test/DebugInfo/PDB/Inputs/longname-truncation.yaml26
-rw-r--r--test/DebugInfo/PDB/Inputs/one-symbol.yaml11
-rw-r--r--test/DebugInfo/PDB/Inputs/symbolformat.cpp8
-rw-r--r--test/DebugInfo/PDB/Inputs/symbolformat.pdbbin35840 -> 110592 bytes
-rw-r--r--test/DebugInfo/PDB/Native/pdb-native-compilands.test65
-rw-r--r--test/DebugInfo/PDB/Native/pdb-native-summary.test11
-rw-r--r--test/DebugInfo/PDB/pdb-longname-truncation.test3
-rw-r--r--test/DebugInfo/PDB/pdb-minimal-construct.test11
-rw-r--r--test/DebugInfo/PDB/pdb-yaml-types.test74
-rw-r--r--test/DebugInfo/PDB/pdbdump-headers.test229
-rw-r--r--test/DebugInfo/PDB/pdbdump-readwrite.test15
-rw-r--r--test/DebugInfo/PDB/pdbdump-write.test9
-rw-r--r--test/DebugInfo/PDB/pdbdump-yaml-types.test4
-rw-r--r--test/DebugInfo/PDB/pdbdump-yaml.test16
-rw-r--r--test/DebugInfo/PowerPC/tls-fission.ll3
-rw-r--r--test/DebugInfo/WebAssembly/dbg-declare.ll6
-rw-r--r--test/DebugInfo/X86/PR26148.ll2
-rw-r--r--test/DebugInfo/X86/dbg-abstract-vars-g-gmlt.ll105
-rw-r--r--test/DebugInfo/X86/dbg-value-const-byref.ll4
-rw-r--r--test/DebugInfo/X86/dbg-value-g-gmlt.ll100
-rw-r--r--test/DebugInfo/X86/dbg-value-regmask-clobber.ll6
-rw-r--r--test/DebugInfo/X86/debug-info-producer-with-flags.ll44
-rw-r--r--test/DebugInfo/X86/debug_and_nodebug_CUs.ll82
-rw-r--r--test/DebugInfo/X86/default-subrange-array.ll53
-rw-r--r--test/DebugInfo/X86/discriminator.ll2
-rw-r--r--test/DebugInfo/X86/dw_op_minus_direct.ll4
-rw-r--r--test/DebugInfo/X86/externaltyperef.ll52
-rw-r--r--test/DebugInfo/X86/fission-ranges.ll8
-rw-r--r--test/DebugInfo/X86/gnu-public-names-tu.ll54
-rw-r--r--test/DebugInfo/X86/ref_addr_relocation.ll2
-rw-r--r--test/DebugInfo/X86/single-dbg_value.ll4
-rw-r--r--test/DebugInfo/X86/single-fi.ll40
-rw-r--r--test/DebugInfo/X86/split-global.ll2
-rw-r--r--test/DebugInfo/X86/stack-value-dwarf4.ll44
-rw-r--r--test/DebugInfo/X86/subreg.ll5
-rw-r--r--test/DebugInfo/X86/subregisters.ll6
-rw-r--r--test/DebugInfo/X86/tls.ll1
-rw-r--r--test/DebugInfo/dwarfdump-header.test29
-rw-r--r--test/DebugInfo/strip-loop-metadata.ll124
-rw-r--r--test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_PIC_relocations.s46
-rw-r--r--test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s15
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/ELF_x86-64_none.yaml30
-rw-r--r--test/Feature/OperandBundles/dse.ll2
-rw-r--r--test/Feature/fp-intrinsics.ll102
-rw-r--r--test/Feature/optnone-opt.ll1
-rw-r--r--test/FileCheck/line-count.txt26
-rw-r--r--test/FileCheck/regex-scope.txt23
-rw-r--r--test/Instrumentation/AddressSanitizer/freebsd.ll4
-rw-r--r--test/Instrumentation/AddressSanitizer/global_metadata_windows.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll4
-rw-r--r--test/Instrumentation/AddressSanitizer/lifetime-throw.ll12
-rw-r--r--test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll8
-rw-r--r--test/Instrumentation/AddressSanitizer/lifetime.ll34
-rw-r--r--test/Instrumentation/AddressSanitizer/ps4.ll14
-rw-r--r--test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime-be.ll28
-rw-r--r--test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime.ll28
-rw-r--r--test/Instrumentation/AddressSanitizer/stack_layout.ll12
-rw-r--r--test/Instrumentation/InstrProfiling/PR23499.ll10
-rw-r--r--test/Instrumentation/InstrProfiling/icall.ll6
-rw-r--r--test/Instrumentation/InstrProfiling/platform.ll4
-rw-r--r--test/Instrumentation/InstrProfiling/profiling.ll6
-rw-r--r--test/Instrumentation/MemorySanitizer/AArch64/vararg.ll8
-rw-r--r--test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll8
-rw-r--r--test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll8
-rw-r--r--test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll8
-rw-r--r--test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll8
-rw-r--r--test/Instrumentation/MemorySanitizer/alloca.ll59
-rw-r--r--test/Instrumentation/MemorySanitizer/csr.ll52
-rw-r--r--test/Instrumentation/SanitizerCoverage/coverage.ll3
-rw-r--r--test/Instrumentation/SanitizerCoverage/coverage2-dbg.ll4
-rw-r--r--test/Instrumentation/SanitizerCoverage/trace-pc-guard-comdat.ll42
-rw-r--r--test/Instrumentation/SanitizerCoverage/trace-pc-guard-nocomdat.ll42
-rw-r--r--test/Instrumentation/SanitizerCoverage/tracing.ll9
-rw-r--r--test/Instrumentation/SanitizerCoverage/wineh.ll111
-rw-r--r--test/LTO/Resolution/X86/Inputs/link-odr-availextern-ae.ll6
-rw-r--r--test/LTO/Resolution/X86/Inputs/link-odr-availextern-odr.ll6
-rw-r--r--test/LTO/Resolution/X86/alias.ll2
-rw-r--r--test/LTO/Resolution/X86/asm-output.ll19
-rw-r--r--test/LTO/Resolution/X86/comdat.ll2
-rw-r--r--test/LTO/Resolution/X86/common2.ll16
-rw-r--r--test/LTO/Resolution/X86/commons.ll2
-rw-r--r--test/LTO/Resolution/X86/diagnostic-handler-remarks-with-hotness.ll37
-rw-r--r--test/LTO/Resolution/X86/diagnostic-handler-remarks.ll33
-rw-r--r--test/LTO/Resolution/X86/empty-bitcode.test2
-rw-r--r--test/LTO/Resolution/X86/intrinsic.ll2
-rw-r--r--test/LTO/Resolution/X86/link-odr-availextern.ll38
-rw-r--r--test/LTO/Resolution/X86/lowertypetests.ll21
-rw-r--r--test/LTO/Resolution/X86/mixed_lto.ll4
-rw-r--r--test/LTO/Resolution/X86/multi-thinlto.ll2
-rw-r--r--test/LTO/Resolution/X86/symtab-elf.ll15
-rw-r--r--test/LTO/Resolution/X86/symtab.ll53
-rw-r--r--test/LTO/X86/diagnostic-handler-remarks-with-hotness.ll43
-rw-r--r--test/LTO/X86/diagnostic-handler-remarks.ll1
-rw-r--r--test/LTO/X86/remangle_intrinsics_tbaa.ll2
-rw-r--r--test/LTO/X86/strip-debug-info-no-call-loc.ll56
-rw-r--r--test/LTO/X86/symver-asm.ll43
-rw-r--r--test/LTO/X86/symver-asm2.ll30
-rw-r--r--test/Linker/2011-08-18-unique-class-type2.ll2
-rw-r--r--test/Linker/Inputs/linkage.d.ll5
-rw-r--r--test/Linker/available_externally_a.ll2
-rw-r--r--test/Linker/link-flags.ll6
-rw-r--r--test/MC/AArch64/alias-addsubimm.s21
-rw-r--r--test/MC/AArch64/alias-logicalimm.s9
-rw-r--r--test/MC/AArch64/armv8.1a-lse.s5175
-rw-r--r--test/MC/AArch64/error-location-post-layout.s9
-rw-r--r--test/MC/AArch64/label-arithmetic-diags-elf.s4
-rw-r--r--test/MC/AArch64/neon-add-sub-instructions.s2
-rw-r--r--test/MC/AArch64/neon-diagnostics.s14
-rw-r--r--test/MC/AArch64/nofp-crypto-diagnostic.s8
-rw-r--r--test/MC/AMDGPU/code-object-metadata-kernel-args.s68
-rw-r--r--test/MC/AMDGPU/code-object-metadata-kernel-attrs.s28
-rw-r--r--test/MC/AMDGPU/code-object-metadata-kernel-code-props.s24
-rw-r--r--test/MC/AMDGPU/code-object-metadata-kernel-debug-props.s26
-rw-r--r--test/MC/AMDGPU/code-object-metadata-unknown-key.s41
-rw-r--r--test/MC/AMDGPU/ds.s92
-rw-r--r--test/MC/AMDGPU/exp.s88
-rw-r--r--test/MC/AMDGPU/expressions.s2
-rw-r--r--test/MC/AMDGPU/gfx7_asm_all.s68629
-rw-r--r--test/MC/AMDGPU/gfx8_asm_all.s98847
-rw-r--r--test/MC/AMDGPU/hsa-exp.s4
-rw-r--r--test/MC/AMDGPU/hsa.s34
-rw-r--r--test/MC/AMDGPU/literals.s8
-rw-r--r--test/MC/AMDGPU/literalv216-err.s22
-rw-r--r--test/MC/AMDGPU/literalv216.s112
-rw-r--r--test/MC/AMDGPU/metadata.s35
-rw-r--r--test/MC/AMDGPU/regression/bug28168.s12
-rw-r--r--test/MC/AMDGPU/sop1.s14
-rw-r--r--test/MC/AMDGPU/sop2-err.s7
-rw-r--r--test/MC/AMDGPU/sop2.s8
-rw-r--r--test/MC/AMDGPU/sopp-gfx9.s71
-rw-r--r--test/MC/AMDGPU/sopp.s23
-rw-r--r--test/MC/AMDGPU/vop-err.s290
-rw-r--r--test/MC/AMDGPU/vop1-gfx9-err.s25
-rw-r--r--test/MC/AMDGPU/vop1-gfx9.s13
-rw-r--r--test/MC/AMDGPU/vop1.s2
-rw-r--r--test/MC/AMDGPU/vop2.s14
-rw-r--r--test/MC/AMDGPU/vop3-gfx9.s48
-rw-r--r--test/MC/AMDGPU/vop3-modifiers-err.s15
-rw-r--r--test/MC/AMDGPU/vop3-modifiers.s388
-rw-r--r--test/MC/AMDGPU/vop3.s67
-rw-r--r--test/MC/AMDGPU/vop3p-err.s120
-rw-r--r--test/MC/AMDGPU/vop3p.s216
-rw-r--r--test/MC/ARM/Inputs/1.s3
-rw-r--r--test/MC/ARM/Inputs/2.s3
-rw-r--r--test/MC/ARM/Inputs/3.s3
-rw-r--r--test/MC/ARM/Inputs/4.s2
-rw-r--r--test/MC/ARM/Inputs/5.s2
-rw-r--r--test/MC/ARM/Inputs/6.s12
-rw-r--r--test/MC/ARM/Inputs/7.s3
-rw-r--r--test/MC/ARM/Inputs/attr.s5
-rw-r--r--test/MC/ARM/Inputs/ident.s1
-rw-r--r--test/MC/ARM/arm-thumb-trustzone.s4
-rw-r--r--test/MC/ARM/basic-arm-instructions-v8.1a.s4
-rw-r--r--test/MC/ARM/basic-thumb2-instructions.s33
-rw-r--r--test/MC/ARM/branch-disassemble.s15
-rw-r--r--test/MC/ARM/coff-relocations.s14
-rw-r--r--test/MC/ARM/data-in-code.ll17
-rw-r--r--test/MC/ARM/diagnostics.s62
-rw-r--r--test/MC/ARM/dwarf-asm-multiple-sections.s37
-rw-r--r--test/MC/ARM/elf-thumbfunc.s10
-rw-r--r--test/MC/ARM/error-location-post-layout.s12
-rw-r--r--test/MC/ARM/inline-asm-diags.ll9
-rw-r--r--test/MC/ARM/inline-asm-srcloc.ll37
-rw-r--r--test/MC/ARM/invalid-special-reg.s11
-rw-r--r--test/MC/ARM/ldr-pseudo-cond-darwin.s2
-rw-r--r--test/MC/ARM/ldr-pseudo-cond.s2
-rw-r--r--test/MC/ARM/lsl-zero-errors.s103
-rw-r--r--test/MC/ARM/lsl-zero.s57
-rw-r--r--test/MC/ARM/mappingsymbols.s48
-rw-r--r--test/MC/ARM/multi-section-mapping.s1
-rw-r--r--test/MC/ARM/negative-immediates-fail.s13
-rw-r--r--test/MC/ARM/negative-immediates-thumb1-fail.s15
-rw-r--r--test/MC/ARM/negative-immediates-thumb1.s19
-rw-r--r--test/MC/ARM/negative-immediates.s128
-rw-r--r--test/MC/ARM/quad-relocation.s9
-rw-r--r--test/MC/ARM/simple-fp-encoding.s12
-rw-r--r--test/MC/ARM/thumb-diagnostics.s10
-rw-r--r--test/MC/ARM/thumb-mov.s100
-rw-r--r--test/MC/ARM/thumb-not-mclass.s4
-rw-r--r--test/MC/ARM/thumb2-diagnostics.s17
-rw-r--r--test/MC/ARM/thumbv8m.s4
-rw-r--r--test/MC/ARM/udf-arm-diagnostics.s2
-rw-r--r--test/MC/ARM/udf-thumb-2-diagnostics.s2
-rw-r--r--test/MC/ARM/unpred-control-flow-in-it-block.s57
-rw-r--r--test/MC/AsmParser/macro-duplicate-params-names-err.s7
-rw-r--r--test/MC/AsmParser/section_names.s20
-rw-r--r--test/MC/COFF/section-comdat.s12
-rw-r--r--test/MC/Disassembler/AMDGPU/aperture-regs.ll13
-rw-r--r--test/MC/Disassembler/AMDGPU/ds_vi.txt26
-rw-r--r--test/MC/Disassembler/AMDGPU/gfx8_dasm_all.txt87676
-rw-r--r--test/MC/Disassembler/AMDGPU/mac.txt19
-rw-r--r--test/MC/Disassembler/AMDGPU/si-support.txt4
-rw-r--r--test/MC/Disassembler/AMDGPU/sop1_vi.txt2
-rw-r--r--test/MC/Disassembler/AMDGPU/vop1_gfx9.txt4
-rw-r--r--test/MC/Disassembler/AMDGPU/vop3_vi.txt6
-rw-r--r--test/MC/Disassembler/Hexagon/alu32_alu.txt44
-rw-r--r--test/MC/Disassembler/Hexagon/alu32_perm.txt26
-rw-r--r--test/MC/Disassembler/Hexagon/alu32_pred.txt88
-rw-r--r--test/MC/Disassembler/Hexagon/cr.txt32
-rw-r--r--test/MC/Disassembler/Hexagon/j.txt112
-rw-r--r--test/MC/Disassembler/Hexagon/ld.txt192
-rw-r--r--test/MC/Disassembler/Hexagon/nv_j.txt88
-rw-r--r--test/MC/Disassembler/Hexagon/nv_st.txt30
-rw-r--r--test/MC/Disassembler/Hexagon/st.txt92
-rw-r--r--test/MC/Disassembler/Hexagon/system_user.txt6
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_alu.txt274
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_bit.txt58
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_complex.txt98
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_fp.txt48
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_mpy.txt358
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_perm.txt22
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_pred.txt98
-rw-r--r--test/MC/Disassembler/Hexagon/xtype_shift.txt224
-rw-r--r--test/MC/Disassembler/PowerPC/vsx.txt4
-rw-r--r--test/MC/Disassembler/X86/avx-512.txt95
-rw-r--r--test/MC/Disassembler/X86/fp-stack.txt2098
-rw-r--r--test/MC/Disassembler/X86/x86-16.txt1583
-rw-r--r--test/MC/Disassembler/X86/x86-32.txt6
-rw-r--r--test/MC/Disassembler/X86/x86-64.txt4
-rw-r--r--test/MC/ELF/ARM/gnu-type-hash-diagnostics.s4
-rw-r--r--test/MC/ELF/gen-dwarf.s13
-rw-r--r--test/MC/ELF/section-metadata-err1.s5
-rw-r--r--test/MC/ELF/section-metadata-err2.s6
-rw-r--r--test/MC/ELF/section-metadata-err3.s6
-rw-r--r--test/MC/ELF/section-metadata-err4.s5
-rw-r--r--test/MC/ELF/section-numeric-invalid-type.s14
-rw-r--r--test/MC/ELF/section-numeric-type.s20
-rw-r--r--test/MC/ELF/section-sym-err.s6
-rw-r--r--test/MC/ELF/section-sym-err2.s6
-rw-r--r--test/MC/ELF/section-sym-redefine.s138
-rw-r--r--test/MC/ELF/section.s122
-rw-r--r--test/MC/Hexagon/align.s24
-rw-r--r--test/MC/Hexagon/asmMap.s308
-rw-r--r--test/MC/Hexagon/bug20416.s13
-rw-r--r--test/MC/Hexagon/capitalizedEndloop.s2
-rw-r--r--test/MC/Hexagon/common-redeclare.s6
-rw-r--r--test/MC/Hexagon/dcfetch-symbol.s8
-rw-r--r--test/MC/Hexagon/decode_acc_type.s150
-rw-r--r--test/MC/Hexagon/dis-duplex-p0.s15
-rw-r--r--test/MC/Hexagon/duplex-registers.s2
-rw-r--r--test/MC/Hexagon/elf-flags.s2
-rw-r--r--test/MC/Hexagon/equ.s9
-rw-r--r--test/MC/Hexagon/ext-callt-rel.s6
-rw-r--r--test/MC/Hexagon/extended_relocations.ll23
-rw-r--r--test/MC/Hexagon/extender.s210
-rw-r--r--test/MC/Hexagon/fixups.s4
-rw-r--r--test/MC/Hexagon/iconst.s4
-rw-r--r--test/MC/Hexagon/inst_cmp_eq.ll2
-rw-r--r--test/MC/Hexagon/inst_cmp_eqi.ll2
-rw-r--r--test/MC/Hexagon/inst_cmp_gt.ll4
-rw-r--r--test/MC/Hexagon/inst_cmp_gti.ll2
-rw-r--r--test/MC/Hexagon/inst_cmp_lt.ll2
-rw-r--r--test/MC/Hexagon/inst_cmp_ugt.ll2
-rw-r--r--test/MC/Hexagon/inst_cmp_ugti.ll2
-rw-r--r--test/MC/Hexagon/inst_cmp_ult.ll4
-rw-r--r--test/MC/Hexagon/inst_select.ll4
-rw-r--r--test/MC/Hexagon/instructions/ld.s15
-rw-r--r--test/MC/Hexagon/instructions/nv_st.s6
-rw-r--r--test/MC/Hexagon/instructions/st.s10
-rw-r--r--test/MC/Hexagon/instructions/system_user.s3
-rw-r--r--test/MC/Hexagon/jumpdoublepound.s2
-rw-r--r--test/MC/Hexagon/labels.s10
-rw-r--r--test/MC/Hexagon/load-GPRel.s33
-rw-r--r--test/MC/Hexagon/missing_label.s8
-rw-r--r--test/MC/Hexagon/non-relocatable.s10
-rw-r--r--test/MC/Hexagon/not-over.s55
-rw-r--r--test/MC/Hexagon/not_found.s4
-rw-r--r--test/MC/Hexagon/offset.s7
-rw-r--r--test/MC/Hexagon/operand-range.s7
-rw-r--r--test/MC/Hexagon/parse-pound-hi.s60
-rw-r--r--test/MC/Hexagon/reg_altnames.s10
-rw-r--r--test/MC/Hexagon/register-alt-names.s2
-rw-r--r--test/MC/Hexagon/relaxed_newvalue.s4
-rw-r--r--test/MC/Hexagon/relocations.s20
-rw-r--r--test/MC/Hexagon/store-GPRel.s46
-rw-r--r--test/MC/Hexagon/two-extenders.s135
-rw-r--r--test/MC/Hexagon/v60-misc.s6
-rw-r--r--test/MC/Hexagon/v60-vmem.s24
-rw-r--r--test/MC/Hexagon/v62_all.s552
-rw-r--r--test/MC/Hexagon/v62_jumps.s13
-rw-r--r--test/MC/Hexagon/v62a.s19
-rw-r--r--test/MC/Hexagon/v62a_regs.s44
-rw-r--r--test/MC/MachO/darwin-version-min-load-command.s7
-rw-r--r--test/MC/Mips/bopt-directive.s16
-rw-r--r--test/MC/Mips/branch-pseudos-bad.s4
-rw-r--r--test/MC/Mips/elf-debug-section.s6
-rw-r--r--test/MC/Mips/elf_eflags.s119
-rw-r--r--test/MC/Mips/end-directive.s22
-rw-r--r--test/MC/Mips/expansion-j-sym-pic.s8
-rw-r--r--test/MC/Mips/expansion-jal-sym-pic.s40
-rw-r--r--test/MC/Mips/instalias-imm-expanding.s78
-rw-r--r--test/MC/Mips/macro-bcc-imm.s40
-rw-r--r--test/MC/Mips/macro-ddiv.s364
-rw-r--r--test/MC/Mips/macro-ddivu.s301
-rw-r--r--test/MC/Mips/macro-div-bad.s2
-rw-r--r--test/MC/Mips/macro-div.s223
-rw-r--r--test/MC/Mips/macro-divu-bad.s2
-rw-r--r--test/MC/Mips/macro-divu.s27
-rw-r--r--test/MC/Mips/macro-dla.s48
-rw-r--r--test/MC/Mips/macro-li.s2
-rw-r--r--test/MC/Mips/micromips32r6/valid.s8
-rw-r--r--test/MC/Mips/micromips64r6/valid.s16
-rw-r--r--test/MC/Mips/mips64-instalias-imm-expanding.s741
-rw-r--r--test/MC/Mips/mips64extins.s2
-rw-r--r--test/MC/Mips/mul-macro-variants.s154
-rw-r--r--test/MC/Mips/set-nomacro.s9
-rw-r--r--test/MC/Mips/sext_64_32.ll3
-rw-r--r--test/MC/PowerPC/ppc64-encoding-vmx.s6
-rw-r--r--test/MC/PowerPC/vsx.s9
-rw-r--r--test/MC/WebAssembly/file-headers.ll9
-rw-r--r--test/MC/WebAssembly/lit.local.cfg2
-rw-r--r--test/MC/X86/abs8.s8
-rw-r--r--test/MC/X86/avx512-encodings.s48
-rw-r--r--test/MC/X86/avx512vl-encoding.s128
-rw-r--r--test/MC/X86/data-prefix-fail.s25
-rw-r--r--test/MC/X86/data-prefix16.s9
-rw-r--r--test/MC/X86/data-prefix32.s9
-rw-r--r--test/MC/X86/data-prefix64.s9
-rw-r--r--test/MC/X86/intel-syntax-avx512.s112
-rw-r--r--test/MC/X86/intel-syntax-bitwise-ops.s48
-rw-r--r--test/MC/X86/intel-syntax.s2
-rw-r--r--test/MC/X86/line-table-sections.s15
-rw-r--r--test/MC/X86/x86-16.s10
-rw-r--r--test/MC/X86/x86-32-coverage.s40
-rw-r--r--test/MC/X86/x86-32.s18
-rw-r--r--test/MC/X86/x86-64.s14
-rw-r--r--test/MC/X86/x86_64-encoding.s8
-rw-r--r--test/Object/AMDGPU/elf-definitions.yaml (renamed from test/Object/AMDGPU/elf-definitios.yaml)0
-rw-r--r--test/Object/ARM/nm-mapping-symbol.s1
-rw-r--r--test/Object/Inputs/invalid-reloc.elf-x86-64bin0 -> 624 bytes
-rw-r--r--test/Object/Inputs/macho-invalid-notebin0 -> 76 bytes
-rw-r--r--test/Object/Inputs/solaris-nosymbols.yaml7
-rw-r--r--test/Object/X86/nm-ir.ll3
-rw-r--r--test/Object/archive-extract.test13
-rw-r--r--test/Object/archive-format.test25
-rw-r--r--test/Object/archive-pad.test19
-rw-r--r--test/Object/macho-invalid.test9
-rw-r--r--test/Object/nm-shared-object.test2
-rw-r--r--test/Object/obj2yaml-invalid-reloc.test37
-rw-r--r--test/ObjectYAML/MachO/DWARF-debug_aranges.yaml6
-rw-r--r--test/ObjectYAML/MachO/DWARF-debug_info.yaml12
-rw-r--r--test/ObjectYAML/MachO/DWARF-debug_line.yaml46
-rw-r--r--test/ObjectYAML/MachO/DWARF-pubsections.yaml12
-rw-r--r--test/ObjectYAML/MachO/DWARF2-AddrSize8-FormValues.yaml507
-rw-r--r--test/ObjectYAML/MachO/DWARF5-abbrevValues.yaml307
-rw-r--r--test/ObjectYAML/MachO/DWARF5-debug_info.yaml582
-rw-r--r--test/ObjectYAML/MachO/build_version_command.yaml35
-rw-r--r--test/ObjectYAML/wasm/code_section.yaml72
-rw-r--r--test/ObjectYAML/wasm/custom_section.yaml17
-rw-r--r--test/ObjectYAML/wasm/data_section.yaml28
-rw-r--r--test/ObjectYAML/wasm/elem_section.yaml40
-rw-r--r--test/ObjectYAML/wasm/export_section.yaml27
-rw-r--r--test/ObjectYAML/wasm/function_section.yaml17
-rw-r--r--test/ObjectYAML/wasm/global_section.yaml25
-rw-r--r--test/ObjectYAML/wasm/header.yaml9
-rw-r--r--test/ObjectYAML/wasm/header_invalid_version.yaml6
-rw-r--r--test/ObjectYAML/wasm/import_section.yaml41
-rw-r--r--test/ObjectYAML/wasm/memory_section.yaml23
-rw-r--r--test/ObjectYAML/wasm/start_section.yaml15
-rw-r--r--test/ObjectYAML/wasm/table_section.yaml25
-rw-r--r--test/ObjectYAML/wasm/type_section.yaml33
-rw-r--r--test/Other/Inputs/glob-input0
-rw-r--r--test/Other/cgscc-devirt-iteration.ll18
-rw-r--r--test/Other/constant-fold-gep.ll2
-rw-r--r--test/Other/debugcounter-newgvn.ll22
-rw-r--r--test/Other/debugcounter-predicateinfo.ll39
-rw-r--r--test/Other/invariant.group.barrier.ll62
-rw-r--r--test/Other/lit-globbing.ll28
-rw-r--r--test/Other/loop-pm-invalidation.ll277
-rw-r--r--test/Other/new-pass-manager.ll95
-rw-r--r--test/Other/new-pm-defaults.ll191
-rw-r--r--test/Other/new-pm-lto-defaults.ll101
-rw-r--r--test/Other/optimization-remarks-invalidation.ll80
-rw-r--r--test/Other/optimization-remarks-lazy-bfi.ll (renamed from test/Transforms/LoopDistribute/diagnostics-with-hotness-lazy-BFI.ll)0
-rw-r--r--test/Other/writing-to-stdout.ll16
-rw-r--r--test/TableGen/GlobalISelEmitter.td407
-rw-r--r--test/TableGen/RegisterBankEmitter.td15
-rw-r--r--test/ThinLTO/X86/Inputs/cache-import-lists1.ll11
-rw-r--r--test/ThinLTO/X86/Inputs/cache-import-lists2.ll11
-rw-r--r--test/ThinLTO/X86/Inputs/cache-typeid-resolutions-import.ll15
-rw-r--r--test/ThinLTO/X86/Inputs/cache-typeid-resolutions1.ll6
-rw-r--r--test/ThinLTO/X86/Inputs/cache-typeid-resolutions2.ll10
-rw-r--r--test/ThinLTO/X86/Inputs/cache-typeid-resolutions3.ll15
-rw-r--r--test/ThinLTO/X86/cache-config.ll32
-rw-r--r--test/ThinLTO/X86/cache-import-lists.ll24
-rw-r--r--test/ThinLTO/X86/cache-typeid-resolutions.ll47
-rw-r--r--test/ThinLTO/X86/cache.ll18
-rw-r--r--test/ThinLTO/X86/crash_debuginfo.ll3
-rw-r--r--test/ThinLTO/X86/deadstrip.ll4
-rw-r--r--test/ThinLTO/X86/debuginfo-compositetype-import.ll4
-rw-r--r--test/ThinLTO/X86/diagnostic-handler-remarks.ll1
-rw-r--r--test/ThinLTO/X86/dicompositetype-unique.ll2
-rw-r--r--test/ThinLTO/X86/distributed_import.ll50
-rw-r--r--test/ThinLTO/X86/emit_imports.ll2
-rw-r--r--test/ThinLTO/X86/empty_module_with_cache.ll8
-rw-r--r--test/ThinLTO/X86/error-newpm.ll13
-rw-r--r--test/ThinLTO/X86/funcimport2.ll4
-rw-r--r--test/ThinLTO/X86/internalize.ll2
-rw-r--r--test/ThinLTO/X86/lazyload_metadata.ll6
-rw-r--r--test/ThinLTO/X86/linkonce_aliasee_ref_import.ll2
-rw-r--r--test/ThinLTO/X86/local_name_conflict.ll21
-rw-r--r--test/ThinLTO/X86/module_asm2.ll2
-rw-r--r--test/ThinLTO/X86/module_asm_glob.ll2
-rw-r--r--test/ThinLTO/X86/reference_non_importable.ll2
-rw-r--r--test/ThinLTO/X86/tli-nobuiltin.ll46
-rw-r--r--test/ThinLTO/X86/weak_resolution.ll4
-rw-r--r--test/Transforms/ADCE/delete-profiling-calls-to-constant.ll6
-rw-r--r--test/Transforms/AddDiscriminators/basic.ll2
-rw-r--r--test/Transforms/AddDiscriminators/call-nested.ll2
-rw-r--r--test/Transforms/AddDiscriminators/call.ll14
-rw-r--r--test/Transforms/AddDiscriminators/diamond.ll4
-rw-r--r--test/Transforms/AddDiscriminators/first-only.ll2
-rw-r--r--test/Transforms/AddDiscriminators/inlined.ll4
-rw-r--r--test/Transforms/AddDiscriminators/memcpy-discriminator.ll104
-rw-r--r--test/Transforms/AddDiscriminators/multiple.ll4
-rw-r--r--test/Transforms/AddDiscriminators/oneline.ll14
-rw-r--r--test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll25
-rw-r--r--test/Transforms/ArgumentPromotion/aggregate-promote.ll43
-rw-r--r--test/Transforms/ArgumentPromotion/attrs.ll59
-rw-r--r--test/Transforms/ArgumentPromotion/byval-2.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/byval.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/callgraph-update.ll23
-rw-r--r--test/Transforms/ArgumentPromotion/chained.ll32
-rw-r--r--test/Transforms/ArgumentPromotion/control-flow.ll28
-rw-r--r--test/Transforms/ArgumentPromotion/control-flow2.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/crash.ll82
-rw-r--r--test/Transforms/ArgumentPromotion/dbg.ll3
-rw-r--r--test/Transforms/ArgumentPromotion/fp80.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/inalloca.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/pr27568.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/profile.ll23
-rw-r--r--test/Transforms/ArgumentPromotion/reserve-tbaa.ll3
-rw-r--r--test/Transforms/ArgumentPromotion/sret.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/tail.ll1
-rw-r--r--test/Transforms/ArgumentPromotion/variadic.ll1
-rw-r--r--test/Transforms/AtomicExpand/SPARC/libcalls.ll44
-rw-r--r--test/Transforms/BBVectorize/X86/loop1.ll2
-rw-r--r--test/Transforms/BBVectorize/X86/wr-aliases.ll8
-rw-r--r--test/Transforms/BBVectorize/loop1.ll2
-rw-r--r--test/Transforms/BDCE/basic.ll38
-rw-r--r--test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll2
-rw-r--r--test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll216
-rw-r--r--test/Transforms/CodeGenPrepare/X86/computedgoto.ll294
-rw-r--r--test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll26
-rw-r--r--test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll8
-rw-r--r--test/Transforms/CodeGenPrepare/basic.ll42
-rw-r--r--test/Transforms/CodeGenPrepare/builtin-condition.ll20
-rw-r--r--test/Transforms/CodeGenPrepare/section.ll22
-rw-r--r--test/Transforms/ConstProp/loads.ll4
-rw-r--r--test/Transforms/ConstantHoisting/X86/ehpad.ll62
-rw-r--r--test/Transforms/ConstantMerge/dont-merge.ll38
-rw-r--r--test/Transforms/ConstantMerge/merge-dbg.ll32
-rw-r--r--test/Transforms/Coroutines/ArgAddr.ll4
-rw-r--r--test/Transforms/Coroutines/coro-frame.ll61
-rw-r--r--test/Transforms/Coroutines/coro-spill-after-phi.ll60
-rw-r--r--test/Transforms/Coroutines/coro-split-00.ll4
-rw-r--r--test/Transforms/Coroutines/coro-split-01.ll4
-rw-r--r--test/Transforms/Coroutines/coro-split-02.ll4
-rw-r--r--test/Transforms/Coroutines/coro-split-dbg.ll8
-rw-r--r--test/Transforms/Coroutines/coro-split-eh.ll145
-rw-r--r--test/Transforms/Coroutines/ex0.ll4
-rw-r--r--test/Transforms/Coroutines/ex1.ll4
-rw-r--r--test/Transforms/Coroutines/ex2.ll4
-rw-r--r--test/Transforms/Coroutines/ex3.ll4
-rw-r--r--test/Transforms/Coroutines/ex4.ll4
-rw-r--r--test/Transforms/Coroutines/ex5.ll4
-rw-r--r--test/Transforms/Coroutines/no-suspend.ll10
-rw-r--r--test/Transforms/Coroutines/phi-coro-end.ll4
-rw-r--r--test/Transforms/Coroutines/restart-trigger.ll4
-rw-r--r--test/Transforms/CorrelatedValuePropagation/alloca.ll8
-rw-r--r--test/Transforms/CorrelatedValuePropagation/basic.ll4
-rw-r--r--test/Transforms/DeadArgElim/call_profile.ll22
-rw-r--r--test/Transforms/DeadStoreElimination/dominate.ll6
-rw-r--r--test/Transforms/DeadStoreElimination/lifetime.ll10
-rw-r--r--test/Transforms/DeadStoreElimination/operand-bundles.ll12
-rw-r--r--test/Transforms/EarlyCSE/readnone-mayunwind.ll15
-rw-r--r--test/Transforms/FunctionAttrs/nonnull.ll146
-rw-r--r--test/Transforms/FunctionImport/funcimport.ll16
-rw-r--r--test/Transforms/FunctionImport/unnamed-globals.ll10
-rw-r--r--test/Transforms/GVN/PRE/rle-addrspace-cast.ll2
-rw-r--r--test/Transforms/GVN/PRE/rle.ll4
-rw-r--r--test/Transforms/GVN/cond_br2.ll8
-rw-r--r--test/Transforms/GVN/debugloc.ll77
-rw-r--r--test/Transforms/GVN/fence.ll20
-rw-r--r--test/Transforms/GVN/invariant.group.ll6
-rw-r--r--test/Transforms/GVN/lifetime-simple.ll8
-rw-r--r--test/Transforms/GVNHoist/hoist-inline.ll38
-rw-r--r--test/Transforms/GVNHoist/hoist-pr31891.ll83
-rw-r--r--test/Transforms/GVNHoist/hoist-very-busy.ll55
-rw-r--r--test/Transforms/GVNHoist/ld_hoist1.ll (renamed from test/Transforms/InstMerge/ld_hoist1.ll)2
-rw-r--r--test/Transforms/GVNHoist/ld_hoist_st_sink.ll (renamed from test/Transforms/InstMerge/ld_hoist_st_sink.ll)8
-rw-r--r--test/Transforms/GVNHoist/pr29034.ll4
-rw-r--r--test/Transforms/GlobalDCE/crash-assertingvh.ll (renamed from test/Transforms/JumpThreading/crash-assertingvh.ll)5
-rw-r--r--test/Transforms/GlobalOpt/2009-03-05-dbg.ll2
-rw-r--r--test/Transforms/GlobalOpt/externally-initialized-aggregate.ll4
-rw-r--r--test/Transforms/GlobalSplit/basic.ll20
-rw-r--r--test/Transforms/IPConstantProp/naked-return.ll3
-rw-r--r--test/Transforms/IRCE/bad-loop-structure.ll45
-rw-r--r--test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll4
-rw-r--r--test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll6
-rw-r--r--test/Transforms/IndVarSimplify/exit_value_test2.ll8
-rw-r--r--test/Transforms/IndVarSimplify/pr32045.ll39
-rw-r--r--test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll130
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/basic.ll173
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll160
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll175
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll146
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/lit.local.cfg3
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll134
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll143
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/select.ll264
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll140
-rw-r--r--test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll24
-rw-r--r--test/Transforms/InferAddressSpaces/NVPTX/lit.local.cfg2
-rw-r--r--test/Transforms/Inline/AArch64/gep-cost.ll30
-rw-r--r--test/Transforms/Inline/AArch64/lit.local.cfg2
-rw-r--r--test/Transforms/Inline/alloca-bonus.ll8
-rw-r--r--test/Transforms/Inline/arg-attr-propagation.ll50
-rw-r--r--test/Transforms/Inline/bfi-update.ll93
-rw-r--r--test/Transforms/Inline/cgscc-incremental-invalidate.ll111
-rw-r--r--test/Transforms/Inline/cgscc-invalidate.ll32
-rw-r--r--test/Transforms/Inline/clear-analyses.ll32
-rw-r--r--test/Transforms/Inline/crash-lifetime-marker.ll4
-rw-r--r--test/Transforms/Inline/function-count-update-2.ll33
-rw-r--r--test/Transforms/Inline/function-count-update-3.ll78
-rw-r--r--test/Transforms/Inline/function-count-update.ll50
-rw-r--r--test/Transforms/Inline/inline-cold-callee.ll1
-rw-r--r--test/Transforms/Inline/inline-cold-callsite.ll54
-rw-r--r--test/Transforms/Inline/inline-hot-callsite-2.ll56
-rw-r--r--test/Transforms/Inline/inline-hot-callsite.ll2
-rw-r--r--test/Transforms/Inline/inline_stats.ll3
-rw-r--r--test/Transforms/Inline/internal-scc-members.ll31
-rw-r--r--test/Transforms/Inline/last-call-bonus.ll52
-rw-r--r--test/Transforms/Inline/lifetime-no-datalayout.ll4
-rw-r--r--test/Transforms/Inline/lifetime.ll40
-rw-r--r--test/Transforms/Inline/monster_scc.ll460
-rw-r--r--test/Transforms/Inline/optimization-remarks-with-hotness.ll3
-rw-r--r--test/Transforms/Inline/optimization-remarks.ll3
-rw-r--r--test/Transforms/Inline/prof-update.ll39
-rw-r--r--test/Transforms/InstCombine/2008-01-29-AddICmp.ll85
-rw-r--r--test/Transforms/InstCombine/2008-05-22-NegValVector.ll6
-rw-r--r--test/Transforms/InstCombine/2008-11-20-DivMulRem.ll67
-rw-r--r--test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll9
-rw-r--r--test/Transforms/InstCombine/2012-07-25-LoadPart.ll4
-rw-r--r--test/Transforms/InstCombine/X86FsubCmpCombine.ll181
-rw-r--r--test/Transforms/InstCombine/add-sitofp.ll12
-rw-r--r--test/Transforms/InstCombine/add.ll179
-rw-r--r--test/Transforms/InstCombine/alloca.ll4
-rw-r--r--test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll322
-rw-r--r--test/Transforms/InstCombine/amdgcn-intrinsics.ll903
-rw-r--r--test/Transforms/InstCombine/and-or-icmps.ll172
-rw-r--r--test/Transforms/InstCombine/and.ll216
-rw-r--r--test/Transforms/InstCombine/and2.ll87
-rw-r--r--test/Transforms/InstCombine/apint-shift.ll249
-rw-r--r--test/Transforms/InstCombine/apint-sub.ll2
-rw-r--r--test/Transforms/InstCombine/assume.ll4
-rw-r--r--test/Transforms/InstCombine/bitcast-bigendian.ll33
-rw-r--r--test/Transforms/InstCombine/bitcast.ll147
-rw-r--r--test/Transforms/InstCombine/bitreverse-fold.ll14
-rw-r--r--test/Transforms/InstCombine/bitreverse-known-bits.ll51
-rw-r--r--test/Transforms/InstCombine/bswap-fold.ll211
-rw-r--r--test/Transforms/InstCombine/builtin-object-size-offset.ll12
-rw-r--r--test/Transforms/InstCombine/builtin-object-size-ptr.ll8
-rw-r--r--test/Transforms/InstCombine/call-guard.ll32
-rw-r--r--test/Transforms/InstCombine/call_nonnull_arg.ll4
-rw-r--r--test/Transforms/InstCombine/cast-call-combine-prof.ll53
-rw-r--r--test/Transforms/InstCombine/compare-alloca.ll8
-rw-r--r--test/Transforms/InstCombine/compare-unescaped.ll2
-rw-r--r--test/Transforms/InstCombine/consecutive-fences.ll47
-rw-r--r--test/Transforms/InstCombine/constant-fold-math.ll8
-rw-r--r--test/Transforms/InstCombine/convergent.ll2
-rw-r--r--test/Transforms/InstCombine/deadcode.ll8
-rw-r--r--test/Transforms/InstCombine/debuginfo-dce.ll106
-rw-r--r--test/Transforms/InstCombine/double-float-shrink-2.ll445
-rw-r--r--test/Transforms/InstCombine/element-atomic-memcpy-to-loads.ll92
-rw-r--r--test/Transforms/InstCombine/exact.ll4
-rw-r--r--test/Transforms/InstCombine/fabs-libcall.ll21
-rw-r--r--test/Transforms/InstCombine/fabs.ll75
-rw-r--r--test/Transforms/InstCombine/fast-math.ll23
-rw-r--r--test/Transforms/InstCombine/fcmp.ll203
-rw-r--r--test/Transforms/InstCombine/float-shrink-compare.ll381
-rw-r--r--test/Transforms/InstCombine/fma.ll9
-rw-r--r--test/Transforms/InstCombine/getelementptr.ll38
-rw-r--r--test/Transforms/InstCombine/icmp-add.ll247
-rw-r--r--test/Transforms/InstCombine/icmp-shl-nsw.ll186
-rw-r--r--test/Transforms/InstCombine/icmp.ll231
-rw-r--r--test/Transforms/InstCombine/insert-extract-shuffle.ll10
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll23
-rw-r--r--test/Transforms/InstCombine/lifetime-asan.ll12
-rw-r--r--test/Transforms/InstCombine/lifetime.ll40
-rw-r--r--test/Transforms/InstCombine/load-cmp.ll2
-rw-r--r--test/Transforms/InstCombine/lshr.ll102
-rw-r--r--test/Transforms/InstCombine/malloc-free-delete.ll8
-rw-r--r--test/Transforms/InstCombine/max-of-nots.ll22
-rw-r--r--test/Transforms/InstCombine/memcmp-1.ll43
-rw-r--r--test/Transforms/InstCombine/memcpy-addrspace.ll85
-rw-r--r--test/Transforms/InstCombine/memcpy-from-global.ll4
-rw-r--r--test/Transforms/InstCombine/memcpy-to-load.ll6
-rw-r--r--test/Transforms/InstCombine/memset_chk-1.ll8
-rw-r--r--test/Transforms/InstCombine/minmax-fold.ll157
-rw-r--r--test/Transforms/InstCombine/narrow-switch.ll46
-rw-r--r--test/Transforms/InstCombine/narrow.ll140
-rw-r--r--test/Transforms/InstCombine/not-fcmp.ll13
-rw-r--r--test/Transforms/InstCombine/not.ll104
-rw-r--r--test/Transforms/InstCombine/nvvm-intrins.ll471
-rw-r--r--test/Transforms/InstCombine/objsize.ll102
-rw-r--r--test/Transforms/InstCombine/or.ll188
-rw-r--r--test/Transforms/InstCombine/phi-select-constant.ll57
-rw-r--r--test/Transforms/InstCombine/phi-select-constexpr.ll19
-rw-r--r--test/Transforms/InstCombine/pow-1.ll6
-rw-r--r--test/Transforms/InstCombine/pr17827.ll4
-rw-r--r--test/Transforms/InstCombine/pr19420.ll54
-rw-r--r--test/Transforms/InstCombine/pr31990_wrong_memcpy.ll26
-rw-r--r--test/Transforms/InstCombine/prefetch-load.ll34
-rw-r--r--test/Transforms/InstCombine/preserved-analyses.ll33
-rw-r--r--test/Transforms/InstCombine/readnone-maythrow.ll34
-rw-r--r--test/Transforms/InstCombine/rem.ll544
-rw-r--r--test/Transforms/InstCombine/select-bitext.ll6
-rw-r--r--test/Transforms/InstCombine/select-cmp-br.ll342
-rw-r--r--test/Transforms/InstCombine/select.ll50
-rw-r--r--test/Transforms/InstCombine/select_meta.ll32
-rw-r--r--test/Transforms/InstCombine/shift-sra.ll137
-rw-r--r--test/Transforms/InstCombine/shift.ll253
-rw-r--r--test/Transforms/InstCombine/shufflevec-bitcast.ll16
-rw-r--r--test/Transforms/InstCombine/signext.ll17
-rw-r--r--test/Transforms/InstCombine/sitofp.ll190
-rw-r--r--test/Transforms/InstCombine/srem.ll8
-rw-r--r--test/Transforms/InstCombine/stpcpy_chk-1.ll8
-rw-r--r--test/Transforms/InstCombine/strcpy_chk-1.ll10
-rw-r--r--test/Transforms/InstCombine/sub-xor.ll10
-rw-r--r--test/Transforms/InstCombine/sub.ll444
-rw-r--r--test/Transforms/InstCombine/trunc.ll73
-rw-r--r--test/Transforms/InstCombine/type_pun.ll98
-rw-r--r--test/Transforms/InstCombine/urem.ll50
-rw-r--r--test/Transforms/InstCombine/vararg.ll12
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll19
-rw-r--r--test/Transforms/InstCombine/vec_sext.ll2
-rw-r--r--test/Transforms/InstCombine/vector-casts.ll106
-rw-r--r--test/Transforms/InstCombine/vector-srem.ll13
-rw-r--r--test/Transforms/InstCombine/vector-urem.ll8
-rw-r--r--test/Transforms/InstCombine/vector_insertelt_shuffle.ll109
-rw-r--r--test/Transforms/InstCombine/win-math.ll35
-rw-r--r--test/Transforms/InstCombine/x86-avx2.ll24
-rw-r--r--test/Transforms/InstCombine/x86-avx512.ll864
-rw-r--r--test/Transforms/InstCombine/x86-muldq.ll160
-rw-r--r--test/Transforms/InstCombine/x86-pack.ll366
-rw-r--r--test/Transforms/InstCombine/x86-pshufb.ll42
-rw-r--r--test/Transforms/InstCombine/x86-vpermil.ll68
-rw-r--r--test/Transforms/InstCombine/xor.ll186
-rw-r--r--test/Transforms/InstCombine/xor2.ll28
-rw-r--r--test/Transforms/InstCombine/zero-point-zero-add.ll2
-rw-r--r--test/Transforms/InstCombine/zext-or-icmp.ll30
-rw-r--r--test/Transforms/InstCombine/zext-phi.ll32
-rw-r--r--test/Transforms/InstCombine/zext.ll2
-rw-r--r--test/Transforms/InstSimplify/AndOrXor.ll22
-rw-r--r--test/Transforms/InstSimplify/addsub.ll78
-rw-r--r--test/Transforms/InstSimplify/assume.ll60
-rw-r--r--test/Transforms/InstSimplify/bitreverse.ll31
-rw-r--r--test/Transforms/InstSimplify/div.ll56
-rw-r--r--test/Transforms/InstSimplify/fdiv.ll22
-rw-r--r--test/Transforms/InstSimplify/floating-point-arithmetic.ll54
-rw-r--r--test/Transforms/InstSimplify/icmp-constant.ll155
-rw-r--r--test/Transforms/InstSimplify/mul.ll11
-rw-r--r--test/Transforms/InstSimplify/rem.ll84
-rw-r--r--test/Transforms/InstSimplify/select.ll3
-rw-r--r--test/Transforms/InstSimplify/shift-knownbits.ll43
-rw-r--r--test/Transforms/InstSimplify/shufflevector.ll212
-rw-r--r--test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll209
-rw-r--r--test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll292
-rw-r--r--test/Transforms/JumpThreading/guards.ll183
-rw-r--r--test/Transforms/JumpThreading/thread-loads.ll225
-rw-r--r--test/Transforms/LICM/atomics.ll148
-rw-r--r--test/Transforms/LICM/constexpr.ll2
-rw-r--r--test/Transforms/LICM/hoist-bitcast-load.ll3
-rw-r--r--test/Transforms/LICM/hoist-deref-load.ll3
-rw-r--r--test/Transforms/LICM/hoist-fast-fdiv.ll34
-rw-r--r--test/Transforms/LICM/hoist-nounwind.ll2
-rw-r--r--test/Transforms/LICM/hoist-round.ll5
-rw-r--r--test/Transforms/LICM/hoisting.ll173
-rw-r--r--test/Transforms/LICM/loopsink.ll1
-rw-r--r--test/Transforms/LICM/opt-remarks.ll2
-rw-r--r--test/Transforms/LICM/pr32129.ll18
-rw-r--r--test/Transforms/LICM/scalar-promote-unwind.ll263
-rw-r--r--test/Transforms/LICM/scalar-promote.ll (renamed from test/Transforms/LICM/scalar_promote.ll)27
-rw-r--r--test/Transforms/LICM/scalar_promote-unwind.ll72
-rw-r--r--test/Transforms/LICM/sink.ll2
-rw-r--r--test/Transforms/LICM/unrolled-deeply-nested.ll76
-rw-r--r--test/Transforms/LoadCombine/deadcode.ll39
-rw-r--r--test/Transforms/LoadCombine/load-combine-aa.ll26
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll2
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll19
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll10
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll4
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll2
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll192
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll80
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll10
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll4
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll4
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll2
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll4
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll38
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll2
-rw-r--r--test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll32
-rw-r--r--test/Transforms/LoadStoreVectorizer/X86/load-width.ll38
-rw-r--r--test/Transforms/LoopDeletion/invalidation.ll42
-rw-r--r--test/Transforms/LoopDeletion/multiple-exit-conditions.ll2
-rw-r--r--test/Transforms/LoopDeletion/multiple-exits.ll92
-rw-r--r--test/Transforms/LoopIdiom/unroll.ll2
-rw-r--r--test/Transforms/LoopLoadElim/backward.ll1
-rw-r--r--test/Transforms/LoopLoadElim/forward.ll1
-rw-r--r--test/Transforms/LoopPredication/basic.ll571
-rw-r--r--test/Transforms/LoopPredication/nested.ll160
-rw-r--r--test/Transforms/LoopPredication/visited.ll140
-rw-r--r--test/Transforms/LoopRotate/phi-dbgvalue.ll79
-rw-r--r--test/Transforms/LoopSimplify/dbg-loc.ll7
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll87
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll8
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll31
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll8
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll1
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/canonical.ll65
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll (renamed from test/Analysis/ScalarEvolution/incorrect-offset-scaling.ll)12
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll52
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll58
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/nested-loop.ll65
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll97
-rw-r--r--test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll2
-rw-r--r--test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll154
-rw-r--r--test/Transforms/LoopUnroll/basic.ll1
-rw-r--r--test/Transforms/LoopUnroll/epilog_const_phi.ll65
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-bad-cost.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-crashers.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics-2.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics-cmp.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics-dce.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics-geps.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics-phi-prop.ll1
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-heuristics.ll10
-rw-r--r--test/Transforms/LoopUnroll/full-unroll-keep-first-exit.ll1
-rw-r--r--test/Transforms/LoopUnroll/partial-unroll-const-bounds.ll10
-rw-r--r--test/Transforms/LoopUnroll/peel-loop-irreducible.ll36
-rw-r--r--test/Transforms/LoopUnroll/peel-loop-not-forced.ll53
-rw-r--r--test/Transforms/LoopUnroll/peel-loop-pgo.ll2
-rw-r--r--test/Transforms/LoopUnroll/peel-loop.ll2
-rw-r--r--test/Transforms/LoopUnroll/peel-loop2.ll61
-rw-r--r--test/Transforms/LoopUnroll/pr31718.ll55
-rw-r--r--test/Transforms/LoopUnroll/revisit.ll156
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop.ll3
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop1.ll3
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop2.ll7
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop3.ll3
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop5.ll10
-rw-r--r--test/Transforms/LoopUnroll/unloop.ll2
-rw-r--r--test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll4
-rw-r--r--test/Transforms/LoopUnroll/unroll-pragmas.ll24
-rw-r--r--test/Transforms/LoopUnroll/update-loop-info-in-subloops.ll1
-rw-r--r--test/Transforms/LoopUnswitch/AMDGPU/divergent-unswitch.ll85
-rw-r--r--test/Transforms/LoopUnswitch/AMDGPU/lit.local.cfg2
-rw-r--r--test/Transforms/LoopUnswitch/basictest.ll211
-rw-r--r--test/Transforms/LoopUnswitch/cold-loop.ll52
-rw-r--r--test/Transforms/LoopUnswitch/copy-metadata.ll4
-rw-r--r--test/Transforms/LoopUnswitch/crash.ll2
-rw-r--r--test/Transforms/LoopUnswitch/simplify-with-nonvalness.ll58
-rw-r--r--test/Transforms/LoopUnswitch/trivial-unswitch.ll46
-rw-r--r--test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll82
-rw-r--r--test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll341
-rw-r--r--test/Transforms/LoopVectorize/AArch64/induction-trunc.ll30
-rw-r--r--test/Transforms/LoopVectorize/AArch64/interleaved-vs-scalar.ll38
-rw-r--r--test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll226
-rw-r--r--test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll23
-rw-r--r--test/Transforms/LoopVectorize/AArch64/pr31900.ll37
-rw-r--r--test/Transforms/LoopVectorize/AArch64/smallest-and-widest-types.ll33
-rw-r--r--test/Transforms/LoopVectorize/AMDGPU/lit.local.cfg2
-rw-r--r--test/Transforms/LoopVectorize/AMDGPU/unroll-in-loop-vectorizer.ll28
-rw-r--r--test/Transforms/LoopVectorize/ARM/interleaved_cost.ll164
-rw-r--r--test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll38
-rw-r--r--test/Transforms/LoopVectorize/SystemZ/lit.local.cfg2
-rw-r--r--test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll33
-rw-r--r--test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll70
-rw-r--r--test/Transforms/LoopVectorize/X86/avx512.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll43
-rw-r--r--test/Transforms/LoopVectorize/X86/gather-vs-interleave.ll41
-rw-r--r--test/Transforms/LoopVectorize/X86/int128_no_gather.ll4
-rw-r--r--test/Transforms/LoopVectorize/X86/interleaving.ll1
-rw-r--r--test/Transforms/LoopVectorize/X86/metadata-enable.ll14
-rwxr-xr-xtest/Transforms/LoopVectorize/X86/scatter_crash.ll106
-rw-r--r--test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll57
-rw-r--r--test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll2
-rw-r--r--test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll50
-rw-r--r--test/Transforms/LoopVectorize/discriminator.ll70
-rw-r--r--test/Transforms/LoopVectorize/first-order-recurrence.ll398
-rw-r--r--test/Transforms/LoopVectorize/float-induction.ll236
-rw-r--r--test/Transforms/LoopVectorize/if-conversion.ll4
-rw-r--r--test/Transforms/LoopVectorize/if-pred-stores.ll2
-rw-r--r--test/Transforms/LoopVectorize/induction-step.ll101
-rw-r--r--test/Transforms/LoopVectorize/induction.ll92
-rw-r--r--test/Transforms/LoopVectorize/lcssa-crash.ll23
-rw-r--r--test/Transforms/LoopVectorize/lifetime.ll24
-rw-r--r--test/Transforms/LoopVectorize/loop-scalars.ll143
-rw-r--r--test/Transforms/LoopVectorize/multiple-strides-vectorization.ll8
-rw-r--r--test/Transforms/LoopVectorize/partial-lcssa.ll54
-rw-r--r--test/Transforms/LoopVectorize/pr31098.ll100
-rw-r--r--test/Transforms/LoopVectorize/pr31190.ll12
-rw-r--r--test/Transforms/LoopVectorize/reduction.ll46
-rw-r--r--test/Transforms/LoopVectorize/reverse_iter.ll5
-rw-r--r--test/Transforms/LoopVectorize/unroll-novec-memcheck-metadata.ll36
-rw-r--r--test/Transforms/LoopVectorize/vector-geps.ll61
-rw-r--r--test/Transforms/LoopVersioning/loop-invariant-bound.ll7
-rw-r--r--test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll2
-rw-r--r--test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll2
-rw-r--r--test/Transforms/LowerTypeTests/Inputs/import.yaml31
-rw-r--r--test/Transforms/LowerTypeTests/Inputs/use-typeid1-typeid2.yaml5
-rw-r--r--test/Transforms/LowerTypeTests/export-allones.ll161
-rw-r--r--test/Transforms/LowerTypeTests/export-bytearray.ll40
-rw-r--r--test/Transforms/LowerTypeTests/export-inline.ll35
-rw-r--r--test/Transforms/LowerTypeTests/export-single.ll17
-rw-r--r--test/Transforms/LowerTypeTests/external-global.ll14
-rw-r--r--test/Transforms/LowerTypeTests/import-unsat.ll3
-rw-r--r--test/Transforms/LowerTypeTests/import.ll170
-rw-r--r--test/Transforms/Mem2Reg/ignore-lifetime.ll12
-rw-r--r--test/Transforms/Mem2Reg/preserve-nonnull-load-metadata.ll89
-rw-r--r--test/Transforms/MemCpyOpt/lifetime.ll8
-rw-r--r--test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll12
-rw-r--r--test/Transforms/MemCpyOpt/memcpy-undef.ll6
-rw-r--r--test/Transforms/MemCpyOpt/memcpy.ll28
-rw-r--r--test/Transforms/MemCpyOpt/pr29105.ll16
-rw-r--r--test/Transforms/MergeFunc/mergefunc-preserve-debug-info.ll223
-rw-r--r--test/Transforms/MetaRenamer/metarenamer.ll15
-rw-r--r--test/Transforms/NewGVN/2007-07-26-PhiErasure.ll45
-rw-r--r--test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll19
-rw-r--r--test/Transforms/NewGVN/basic-cyclic-opt.ll82
-rw-r--r--test/Transforms/NewGVN/bitcast-of-call.ll18
-rw-r--r--test/Transforms/NewGVN/calloc-load-removal.ll1
-rw-r--r--test/Transforms/NewGVN/calls-nonlocal.ll2
-rw-r--r--test/Transforms/NewGVN/cond_br2.ll8
-rw-r--r--test/Transforms/NewGVN/condprop-xfail.ll123
-rw-r--r--test/Transforms/NewGVN/condprop.ll268
-rw-r--r--test/Transforms/NewGVN/deadstore.ll79
-rw-r--r--test/Transforms/NewGVN/debugloc.ll78
-rw-r--r--test/Transforms/NewGVN/edge.ll1
-rw-r--r--test/Transforms/NewGVN/fence.ll20
-rw-r--r--test/Transforms/NewGVN/flags.ll1
-rw-r--r--test/Transforms/NewGVN/fold-const-expr.ll3
-rw-r--r--test/Transforms/NewGVN/lifetime-simple.ll8
-rw-r--r--test/Transforms/NewGVN/load-constant-mem.ll26
-rw-r--r--test/Transforms/NewGVN/loadforward.ll32
-rw-r--r--test/Transforms/NewGVN/malloc-load-removal.ll1
-rw-r--r--test/Transforms/NewGVN/phi-edge-handling.ll60
-rw-r--r--test/Transforms/NewGVN/pr10820.ll2
-rw-r--r--test/Transforms/NewGVN/pr14166.ll1
-rw-r--r--test/Transforms/NewGVN/pr17732.ll2
-rw-r--r--test/Transforms/NewGVN/pr31594.ll9
-rw-r--r--test/Transforms/NewGVN/pr31613.ll9
-rw-r--r--test/Transforms/NewGVN/pr31682.ll1
-rw-r--r--test/Transforms/NewGVN/pr31758.ll34
-rw-r--r--test/Transforms/NewGVN/pr32403.ll65
-rw-r--r--test/Transforms/NewGVN/pr32607.ll33
-rw-r--r--test/Transforms/NewGVN/predicates.ll111
-rw-r--r--test/Transforms/NewGVN/propagate-ir-flags.ll1
-rw-r--r--test/Transforms/NewGVN/readattrs.ll1
-rw-r--r--test/Transforms/NewGVN/refine-stores.ll189
-rw-r--r--test/Transforms/NewGVN/rle-nonlocal.ll30
-rw-r--r--test/Transforms/NewGVN/rle.ll59
-rw-r--r--test/Transforms/NewGVN/storeoverstore.ll42
-rw-r--r--test/Transforms/NewGVN/tbaa.ll1
-rw-r--r--test/Transforms/NewGVN/volatile-nonvolatile.ll1
-rw-r--r--test/Transforms/ObjCARC/contract-storestrong.ll13
-rw-r--r--test/Transforms/PGOProfile/Inputs/memop_size_annotation.proftext27
-rw-r--r--test/Transforms/PGOProfile/Inputs/thinlto_samplepgo_icp.ll27
-rw-r--r--test/Transforms/PGOProfile/comdat_internal.ll6
-rw-r--r--test/Transforms/PGOProfile/indirect_call_promotion.ll4
-rw-r--r--test/Transforms/PGOProfile/memcpy.ll35
-rw-r--r--test/Transforms/PGOProfile/memop_size_annotation.ll59
-rw-r--r--test/Transforms/PGOProfile/memop_size_opt.ll100
-rw-r--r--test/Transforms/PGOProfile/multiple_hash_profile.ll4
-rw-r--r--test/Transforms/PGOProfile/statics_counter_naming.ll11
-rw-r--r--test/Transforms/PGOProfile/thinlto_samplepgo_icp.ll63
-rw-r--r--test/Transforms/RewriteStatepointsForGC/base-vector.ll15
-rw-r--r--test/Transforms/SCCP/indirectbr.ll76
-rw-r--r--test/Transforms/SCCP/loadtest.ll4
-rw-r--r--test/Transforms/SCCP/overdefined-div.ll32
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/gather-root.ll132
-rw-r--r--test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll6
-rw-r--r--test/Transforms/SLPVectorizer/SystemZ/SLP-cmp-cost-query.ll36
-rw-r--r--test/Transforms/SLPVectorizer/X86/bitreverse.ll420
-rw-r--r--test/Transforms/SLPVectorizer/X86/blending-shuffle.ll167
-rw-r--r--test/Transforms/SLPVectorizer/X86/extractelement.ll61
-rw-r--r--test/Transforms/SLPVectorizer/X86/horizontal-list.ll1615
-rw-r--r--test/Transforms/SLPVectorizer/X86/horizontal.ll123
-rw-r--r--test/Transforms/SLPVectorizer/X86/reduction_loads.ll27
-rw-r--r--test/Transforms/SLPVectorizer/X86/scheduling.ll6
-rw-r--r--test/Transforms/SLPVectorizer/X86/store-jumbled.ll68
-rw-r--r--test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll984
-rw-r--r--test/Transforms/SLPVectorizer/X86/vector.ll41
-rw-r--r--test/Transforms/SROA/alloca-address-space.ll84
-rw-r--r--test/Transforms/SROA/basictest.ll26
-rw-r--r--test/Transforms/SROA/pr26972.ll4
-rw-r--r--test/Transforms/SROA/preserve-nonnull.ll26
-rw-r--r--test/Transforms/SROA/vector-lifetime-intrinsic.ll8
-rw-r--r--test/Transforms/SafeStack/AArch64/abi_ssp.ll7
-rw-r--r--test/Transforms/SafeStack/X86/abi_ssp.ll21
-rw-r--r--test/Transforms/SafeStack/X86/call.ll8
-rw-r--r--test/Transforms/SafeStack/X86/coloring-ssp.ll12
-rw-r--r--test/Transforms/SafeStack/X86/coloring.ll16
-rw-r--r--test/Transforms/SafeStack/X86/coloring2.ll162
-rw-r--r--test/Transforms/SafeStack/X86/debug-loc2.ll4
-rw-r--r--test/Transforms/SafeStack/X86/layout-frag.ll16
-rw-r--r--test/Transforms/SampleProfile/Inputs/import.prof4
-rw-r--r--test/Transforms/SampleProfile/Inputs/indirect-call.afdobin0 -> 1744 bytes
-rw-r--r--test/Transforms/SampleProfile/Inputs/indirect-call.prof13
-rw-r--r--test/Transforms/SampleProfile/branch.ll5
-rw-r--r--test/Transforms/SampleProfile/calls.ll16
-rw-r--r--test/Transforms/SampleProfile/cov-zero-samples.ll8
-rw-r--r--test/Transforms/SampleProfile/discriminator.ll4
-rw-r--r--test/Transforms/SampleProfile/early-inline.ll4
-rw-r--r--test/Transforms/SampleProfile/fnptr.ll8
-rw-r--r--test/Transforms/SampleProfile/import.ll31
-rw-r--r--test/Transforms/SampleProfile/indirect-call-gcc.ll26
-rw-r--r--test/Transforms/SampleProfile/indirect-call.ll82
-rw-r--r--test/Transforms/SampleProfile/inline-coverage.ll4
-rw-r--r--test/Transforms/SampleProfile/inline.ll8
-rw-r--r--test/Transforms/SampleProfile/propagate.ll10
-rw-r--r--test/Transforms/SampleProfile/remarks.ll14
-rw-r--r--test/Transforms/Scalarizer/vector-gep.ll122
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll6
-rw-r--r--test/Transforms/SimplifyCFG/ARM/switch-to-lookup-table.ll10
-rw-r--r--test/Transforms/SimplifyCFG/CoveredLookupTable.ll2
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll2
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch-table-bug.ll2
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll7
-rw-r--r--test/Transforms/SimplifyCFG/critedge-assume.ll83
-rw-r--r--test/Transforms/SimplifyCFG/div-rem-pairs.ll119
-rw-r--r--test/Transforms/SimplifyCFG/empty-cleanuppad.ll8
-rw-r--r--test/Transforms/SimplifyCFG/lifetime.ll8
-rw-r--r--test/Transforms/SimplifyCFG/merge-cond-stores.ll163
-rw-r--r--test/Transforms/SimplifyCFG/rangereduce.ll2
-rw-r--r--test/Transforms/SimplifyCFG/remove-debug-2.ll68
-rw-r--r--test/Transforms/SimplifyCFG/switch_create.ll2
-rw-r--r--test/Transforms/StraightLineStrengthReduce/AMDGPU/reassociate-geps-and-slsr-addrspace.ll8
-rw-r--r--test/Transforms/StripSymbols/strip-dead-debug-info.ll16
-rw-r--r--test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll57
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll9
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/comdat.ll80
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll16
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/no-type-md.ll37
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll21
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll75
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/split.ll43
-rw-r--r--test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll5
-rw-r--r--test/Transforms/Util/MemorySSA/invariant-groups.ll30
-rw-r--r--test/Transforms/Util/PredicateInfo/condprop.ll471
-rw-r--r--test/Transforms/Util/PredicateInfo/diamond.ll68
-rw-r--r--test/Transforms/Util/PredicateInfo/edge.ll242
-rw-r--r--test/Transforms/Util/PredicateInfo/testandor.ll211
-rw-r--r--test/Transforms/Util/clone-dicompileunit.ll66
-rw-r--r--test/Transforms/Util/simplify-dbg-declare-load.ll2
-rw-r--r--test/Transforms/Util/strip-nonlinetable-debuginfo-loops.ll71
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/export.yaml20
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/import-indir.yaml41
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/import-single-impl.yaml13
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/import-uniform-ret-val.yaml19
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val0.yaml11
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val1.yaml11
-rw-r--r--test/Transforms/WholeProgramDevirt/Inputs/import-vcp.yaml19
-rw-r--r--test/Transforms/WholeProgramDevirt/bad-read-from-vtable.ll4
-rw-r--r--test/Transforms/WholeProgramDevirt/export-nothing.ll7
-rw-r--r--test/Transforms/WholeProgramDevirt/export-single-impl.ll78
-rw-r--r--test/Transforms/WholeProgramDevirt/export-uniform-ret-val.ll36
-rw-r--r--test/Transforms/WholeProgramDevirt/export-unique-ret-val.ll79
-rw-r--r--test/Transforms/WholeProgramDevirt/export-unsuccessful-checked.ll28
-rw-r--r--test/Transforms/WholeProgramDevirt/export-vcp.ll83
-rw-r--r--test/Transforms/WholeProgramDevirt/import-indir.ll95
-rw-r--r--test/Transforms/WholeProgramDevirt/import.ll108
-rw-r--r--test/Transforms/WholeProgramDevirt/unique-retval.ll15
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-accesses-memory.ll46
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-decl.ll32
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-no-this.ll4
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-non-constant-arg.ll4
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-too-wide-ints.ll54
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-type-mismatch.ll14
-rw-r--r--test/Transforms/WholeProgramDevirt/vcp-uses-this.ll4
-rw-r--r--test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll6
-rw-r--r--test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll6
-rw-r--r--test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll6
-rw-r--r--test/Verifier/amdgpu-cc.ll55
-rw-r--r--test/Verifier/dbg-line-without-file.ll15
-rw-r--r--test/Verifier/dbg-orphaned-compileunit.ll3
-rw-r--r--test/Verifier/diderivedtype-address-space-atomic-type.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-const-type.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-friend.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-inheritance.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-member.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-ptr-to-member-type.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-restrict-type.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-rvalue-reference-type.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-typedef.ll6
-rw-r--r--test/Verifier/diderivedtype-address-space-volatile-type.ll6
-rw-r--r--test/Verifier/diexpression-swap.ll5
-rw-r--r--test/Verifier/fnarg-debuginfo.ll26
-rw-r--r--test/Verifier/fnarg-nodebug.ll59
-rw-r--r--test/Verifier/fp-intrinsics.ll43
-rw-r--r--test/Verifier/function-metadata-bad.ll2
-rw-r--r--test/Verifier/metadata-function-prof.ll2
-rw-r--r--test/tools/dsymutil/X86/generate-empty-CU.test33
-rw-r--r--test/tools/gold/X86/Inputs/thinlto_weak_library1.ll17
-rw-r--r--test/tools/gold/X86/Inputs/thinlto_weak_library2.ll20
-rw-r--r--test/tools/gold/X86/cache.ll4
-rw-r--r--test/tools/gold/X86/error-unopenable.ll8
-rw-r--r--test/tools/gold/X86/parallel.ll4
-rw-r--r--test/tools/gold/X86/stats.ll7
-rw-r--r--test/tools/gold/X86/thinlto.ll3
-rw-r--r--test/tools/gold/X86/thinlto_object_suffix_replace.ll41
-rw-r--r--test/tools/gold/X86/thinlto_weak_library.ll41
-rw-r--r--test/tools/gold/X86/thinlto_weak_resolution.ll16
-rw-r--r--test/tools/llvm-ar/Inputs/absolute-paths.libbin0 -> 972 bytes
-rw-r--r--test/tools/llvm-ar/absolute-paths.test20
-rw-r--r--test/tools/llvm-config/paths.test21
-rw-r--r--test/tools/llvm-cov/Inputs/multiple-files2.covmappingbin0 -> 136 bytes
-rw-r--r--test/tools/llvm-cov/demangle.test3
-rw-r--r--test/tools/llvm-cov/multiple-files.test20
-rw-r--r--test/tools/llvm-cov/report.cpp4
-rw-r--r--test/tools/llvm-cov/warnings.h2
-rw-r--r--test/tools/llvm-cxxfilt/coff-import.test5
-rw-r--r--test/tools/llvm-cxxfilt/types.test5
-rw-r--r--test/tools/llvm-cxxfilt/underscore.test11
-rw-r--r--test/tools/llvm-dwp/X86/compressfail.test2
-rw-r--r--test/tools/llvm-dwp/X86/nocompress.test2
-rw-r--r--test/tools/llvm-extract/recursive.ll32
-rw-r--r--test/tools/llvm-lto2/X86/nodatalayout.ll2
-rw-r--r--test/tools/llvm-lto2/X86/pipeline.ll10
-rw-r--r--test/tools/llvm-lto2/errors.ll9
-rw-r--r--test/tools/llvm-nm/ARM/Inputs/print-size.macho-armv7mbin0 -> 356 bytes
-rw-r--r--test/tools/llvm-nm/ARM/lit.local.cfg2
-rw-r--r--test/tools/llvm-nm/ARM/macho-print-size.test3
-rw-r--r--test/tools/llvm-nm/lit.local.cfg2
-rw-r--r--test/tools/llvm-nm/wasm/exports.yaml22
-rw-r--r--test/tools/llvm-nm/wasm/imports.yaml25
-rw-r--r--test/tools/llvm-objdump/AArch64/Inputs/print-armv8crypto.obj.macho-aarch64bin0 -> 316 bytes
-rw-r--r--test/tools/llvm-objdump/AArch64/mach-print-armv8crypto.test3
-rw-r--r--test/tools/llvm-objdump/AArch64/macho-print-mrs.test2
-rw-r--r--test/tools/llvm-objdump/AMDGPU/Inputs/source-lines.cl6
-rw-r--r--test/tools/llvm-objdump/AMDGPU/lit.local.cfg2
-rw-r--r--test/tools/llvm-objdump/AMDGPU/source-lines.ll109
-rw-r--r--test/tools/llvm-objdump/ARM/Inputs/divs.macho-armv7sbin0 -> 156 bytes
-rw-r--r--test/tools/llvm-objdump/ARM/macho-nomcpu-armv7s.test3
-rw-r--r--test/tools/llvm-objdump/ARM/v5t-subarch.s10
-rw-r--r--test/tools/llvm-objdump/ARM/v5te-subarch.s10
-rw-r--r--test/tools/llvm-objdump/ARM/v5tej-subarch.s7
-rw-r--r--test/tools/llvm-objdump/ARM/v6-neg-subfeatures.s10
-rw-r--r--test/tools/llvm-objdump/ARM/v6-subarch.s9
-rw-r--r--test/tools/llvm-objdump/ARM/v6-subfeatures.s9
-rw-r--r--test/tools/llvm-objdump/ARM/v6k-subarch.s9
-rw-r--r--test/tools/llvm-objdump/ARM/v6m-subarch.s9
-rw-r--r--test/tools/llvm-objdump/ARM/v6t2-subarch.s10
-rw-r--r--test/tools/llvm-objdump/ARM/v7a-neg-subfeature.s44
-rw-r--r--test/tools/llvm-objdump/ARM/v7a-subfeature.s36
-rw-r--r--test/tools/llvm-objdump/ARM/v7m-neg-subfeatures.s18
-rw-r--r--test/tools/llvm-objdump/ARM/v7m-subarch.s10
-rw-r--r--test/tools/llvm-objdump/ARM/v7m-subfeatures.s26
-rw-r--r--test/tools/llvm-objdump/ARM/v7r-subfeatures.s20
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-add-addr-imm-scaledbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-add_addr_ulebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-bad-opcode-valuebin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-bind-add-addr-ulebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-do-bind-no-segIndexbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-ulebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-malformed-uleb128bin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-too-bigbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-dylib-special-immbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-seg-too-bigbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-segoff-too-bigbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-set-addend-slebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-set-symbolbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-set-type-immbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-bind-uleb-times-skipping-ulebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-do-bind-no-dylib-ordinalbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-do-bind-no-symbolbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-lazy-do-bind-add-addr-imm-scaledbin0 -> 8448 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-lazy-do-bind-uleb-times-skipping-ulebbin0 -> 8448 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-lazy-do_bind_add_addr_ulebbin0 -> 8448 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-add-addr-imm-scaledbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-add-addr-ulebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb-too-bigbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-bad-opcode-valuebin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-imm-timesbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-seg-too-bigbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-segoff-too-bigbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-set-type-immbin0 -> 8432 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-uleb-malformed-uleb128bin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-uleb-timesbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-rebase-uleb-times-skipping-ulebbin0 -> 8456 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-immbin0 -> 8464 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-ulebbin0 -> 8464 bytes
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-special-immbin0 -> 8464 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/test.wasmbin165 -> 181 bytes
-rw-r--r--test/tools/llvm-objdump/Mips/disassemble-all.test16
-rw-r--r--test/tools/llvm-objdump/Mips/lit.local.cfg3
-rwxr-xr-xtest/tools/llvm-objdump/X86/Inputs/Objc2.64bit.obj.dylib-x86_64bin0 -> 66544 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/macho-invalid-bind-entrybin0 -> 3448 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/nofirst-symbol.macho-x86_64bin0 -> 336 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/note.macho-x86bin0 -> 76 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/openbsd-phdrs.elf-x86-64bin0 -> 600 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/phdr-note.elf-x86-64bin0 -> 5048 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/phdrs.elf-x86-64bin0 -> 4720 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/stripped-elf.sobin0 -> 6088 bytes
-rw-r--r--test/tools/llvm-objdump/X86/Inputs/stub-nosyms.macho-x86_64bin0 -> 528 bytes
-rwxr-xr-xtest/tools/llvm-objdump/X86/Inputs/thread.macho-i386bin0 -> 9204 bytes
-rw-r--r--test/tools/llvm-objdump/X86/invalid-macho-build-version.yaml44
-rw-r--r--test/tools/llvm-objdump/X86/macho-build-version.yaml57
-rw-r--r--test/tools/llvm-objdump/X86/macho-info-plist-nofollow.test10
-rw-r--r--test/tools/llvm-objdump/X86/macho-nofirst-symbol-disassembly.test8
-rw-r--r--test/tools/llvm-objdump/X86/macho-objc-meta-data.test62
-rw-r--r--test/tools/llvm-objdump/X86/macho-print-thread.test11
-rw-r--r--test/tools/llvm-objdump/X86/macho-private-headers.test8
-rw-r--r--test/tools/llvm-objdump/X86/macho-stub-nosyms-disassembly.test3
-rw-r--r--test/tools/llvm-objdump/X86/malformed-machos.test3
-rw-r--r--test/tools/llvm-objdump/X86/openbsd-headers.test2
-rw-r--r--test/tools/llvm-objdump/X86/phdrs.test4
-rw-r--r--test/tools/llvm-objdump/X86/stripped-shared.test10
-rw-r--r--test/tools/llvm-objdump/macho-bad-bind.test101
-rw-r--r--test/tools/llvm-objdump/macho-bad-ordinal.test8
-rw-r--r--test/tools/llvm-objdump/malformed-macho.test5
-rw-r--r--test/tools/llvm-objdump/wasm.txt4
-rw-r--r--test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.cpp167
-rw-r--r--test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.pdbbin0 -> 118784 bytes
-rw-r--r--test/tools/llvm-pdbdump/class-layout.test40
-rw-r--r--test/tools/llvm-pdbdump/enum-layout.test2
-rw-r--r--test/tools/llvm-pdbdump/regex-filter.test29
-rw-r--r--test/tools/llvm-pdbdump/simple-padding-graphical.test121
-rw-r--r--test/tools/llvm-pdbdump/simple-padding-text.test94
-rw-r--r--test/tools/llvm-profdata/memop-size-prof.proftext123
-rw-r--r--test/tools/llvm-profdata/value-prof.proftext25
-rw-r--r--test/tools/llvm-readobj/Inputs/codeview-cycle.objbin0 -> 1034 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/codeview-label.objbin0 -> 830 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/codeview-merging-anon.objbin0 -> 1181 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/codeview-unsorted.objbin0 -> 1058 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/trivial.obj.wasmbin0 -> 221 bytes
-rw-r--r--test/tools/llvm-readobj/codeview-label.test16
-rw-r--r--test/tools/llvm-readobj/codeview-merging-anon.test29
-rw-r--r--test/tools/llvm-readobj/codeview-merging-cycle.test19
-rw-r--r--test/tools/llvm-readobj/codeview-merging-unsorted.test40
-rw-r--r--test/tools/llvm-readobj/codeview-merging.test56
-rw-r--r--test/tools/llvm-readobj/file-headers.test10
-rw-r--r--test/tools/llvm-readobj/relocations.test25
-rw-r--r--test/tools/llvm-readobj/sections.test57
-rw-r--r--test/tools/llvm-readobj/symbols.test21
-rw-r--r--test/tools/llvm-strings/Inputs/numbers10
-rw-r--r--test/tools/llvm-strings/radix.test33
-rw-r--r--test/tools/llvm-symbolizer/Inputs/discrimbin0 -> 9973 bytes
-rw-r--r--test/tools/llvm-symbolizer/Inputs/discrim.c8
-rw-r--r--test/tools/llvm-symbolizer/Inputs/discrim.inp5
-rw-r--r--test/tools/llvm-symbolizer/sym-verbose.test39
-rw-r--r--test/tools/llvm-xray/X86/Inputs/fdr-log-version-1.xraybin0 -> 232 bytes
-rw-r--r--test/tools/llvm-xray/X86/Inputs/simple-instrmap.yaml12
-rw-r--r--test/tools/llvm-xray/X86/account-deduce-tail-call.yaml2
-rw-r--r--test/tools/llvm-xray/X86/account-keep-going.yaml2
-rw-r--r--test/tools/llvm-xray/X86/account-simple-case.yaml2
-rw-r--r--test/tools/llvm-xray/X86/account-simple-sorting.yaml18
-rw-r--r--test/tools/llvm-xray/X86/convert-fdr-to-yaml.txt24
-rw-r--r--test/tools/llvm-xray/X86/convert-with-yaml-instrmap.txt2
-rw-r--r--test/tools/llvm-xray/X86/graph-color-simple-case.yaml75
-rw-r--r--test/tools/llvm-xray/X86/graph-deduce-tail-call.yaml75
-rw-r--r--test/tools/llvm-xray/X86/graph-simple-case.yaml44
-rw-r--r--test/tools/llvm-xray/X86/graph-zero-latency-calls.yaml20
-rw-r--r--test/tools/llvm-xray/X86/no-subcommand-noassert.txt3
-rw-r--r--test/tools/sancov/AArch64/print_coverage_pcs.test2
-rw-r--r--test/tools/sancov/Inputs/src_blacklist.txt2
-rw-r--r--test/tools/sancov/blacklist.test26
-rw-r--r--test/tools/sancov/validation.test6
-rw-r--r--test/tools/yaml2obj/invalid_output_file.test4
-rw-r--r--test/tools/yaml2obj/lit.local.cfg1
-rw-r--r--test/tools/yaml2obj/missing_document_tag.yaml3
-rw-r--r--test/tools/yaml2obj/unsupported_document_tag.yaml2
3396 files changed, 460267 insertions, 49975 deletions
diff --git a/test/Analysis/BasicAA/call-attrs.ll b/test/Analysis/BasicAA/call-attrs.ll
new file mode 100644
index 000000000000..9cd17e486799
--- /dev/null
+++ b/test/Analysis/BasicAA/call-attrs.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
+
+declare void @readonly_attr(i8* readonly nocapture)
+declare void @writeonly_attr(i8* writeonly nocapture)
+declare void @readnone_attr(i8* readnone nocapture)
+
+declare void @readonly_func(i8* nocapture) readonly
+declare void @writeonly_func(i8* nocapture) writeonly
+declare void @readnone_func(i8* nocapture) readnone
+
+declare void @read_write(i8* writeonly nocapture, i8* readonly nocapture, i8* readnone nocapture)
+
+declare void @func()
+
+define void @test(i8* noalias %p) {
+entry:
+ call void @readonly_attr(i8* %p)
+ call void @readonly_func(i8* %p)
+
+ call void @writeonly_attr(i8* %p)
+ call void @writeonly_func(i8* %p)
+
+ call void @readnone_attr(i8* %p)
+ call void @readnone_func(i8* %p)
+
+ call void @read_write(i8* %p, i8* %p, i8* %p)
+
+ call void @func() ["deopt" (i8* %p)]
+ call void @writeonly_attr(i8* %p) ["deopt" (i8* %p)]
+
+ ret void
+}
+
+; CHECK: Just Ref: Ptr: i8* %p <-> call void @readonly_attr(i8* %p)
+; CHECK: Just Ref: Ptr: i8* %p <-> call void @readonly_func(i8* %p)
+; CHECK: Just Mod: Ptr: i8* %p <-> call void @writeonly_attr(i8* %p)
+; CHECK: Just Mod: Ptr: i8* %p <-> call void @writeonly_func(i8* %p)
+; CHECK: NoModRef: Ptr: i8* %p <-> call void @readnone_attr(i8* %p)
+; CHECK: NoModRef: Ptr: i8* %p <-> call void @readnone_func(i8* %p)
+; CHECK: Both ModRef: Ptr: i8* %p <-> call void @read_write(i8* %p, i8* %p, i8* %p)
+; CHECK: Just Ref: Ptr: i8* %p <-> call void @func() [ "deopt"(i8* %p) ]
+; CHECK: Both ModRef: Ptr: i8* %p <-> call void @writeonly_attr(i8* %p) [ "deopt"(i8* %p) ]
diff --git a/test/Analysis/BasicAA/modref.ll b/test/Analysis/BasicAA/modref.ll
index e42793936c3d..71a3eac3a74e 100644
--- a/test/Analysis/BasicAA/modref.ll
+++ b/test/Analysis/BasicAA/modref.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -basicaa -gvn -dse -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @external(i32*)
@@ -67,7 +67,7 @@ define void @test3(i8* %P, i8 %X) {
%P2 = getelementptr i8, i8* %P, i32 2
store i8 %Y, i8* %P2 ;; Not read by lifetime.end, should be removed.
; CHECK: store i8 2, i8* %P2
- call void @llvm.lifetime.end(i64 1, i8* %P)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %P)
store i8 2, i8* %P2
; CHECK-NOT: store
ret void
@@ -81,7 +81,7 @@ define void @test3a(i8* %P, i8 %X) {
%P2 = getelementptr i8, i8* %P, i32 2
store i8 %Y, i8* %P2
; CHECK-NEXT: call void @llvm.lifetime.end
- call void @llvm.lifetime.end(i64 10, i8* %P)
+ call void @llvm.lifetime.end.p0i8(i64 10, i8* %P)
ret void
; CHECK-NEXT: ret void
}
diff --git a/test/Analysis/BranchProbabilityInfo/basic.ll b/test/Analysis/BranchProbabilityInfo/basic.ll
index 67d3e9e850c3..94ea5a3d1d8e 100644
--- a/test/Analysis/BranchProbabilityInfo/basic.ll
+++ b/test/Analysis/BranchProbabilityInfo/basic.ll
@@ -143,6 +143,43 @@ exit:
declare i32 @regular_function(i32 %i)
+define i32 @test_cold_call_sites_with_prof(i32 %a, i32 %b, i1 %flag, i1 %flag2) {
+; CHECK: Printing analysis {{.*}} for function 'test_cold_call_sites_with_prof'
+entry:
+ br i1 %flag, label %then, label %else
+; CHECK: edge entry -> then probability is 0x07878788 / 0x80000000 = 5.88%
+; CHECK: edge entry -> else probability is 0x78787878 / 0x80000000 = 94.12% [HOT edge]
+
+then:
+ br i1 %flag2, label %then2, label %else2, !prof !3
+; CHECK: edge then -> then2 probability is 0x7ebb907a / 0x80000000 = 99.01% [HOT edge]
+; CHECK: edge then -> else2 probability is 0x01446f86 / 0x80000000 = 0.99%
+
+then2:
+ br label %join
+; CHECK: edge then2 -> join probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge]
+
+else2:
+ br label %join
+; CHECK: edge else2 -> join probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge]
+
+join:
+ %joinresult = phi i32 [ %a, %then2 ], [ %b, %else2 ]
+ call void @coldfunc()
+ br label %exit
+; CHECK: edge join -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge]
+
+else:
+ br label %exit
+; CHECK: edge else -> exit probability is 0x80000000 / 0x80000000 = 100.00% [HOT edge]
+
+exit:
+ %result = phi i32 [ %joinresult, %join ], [ %b, %else ]
+ ret i32 %result
+}
+
+!3 = !{!"branch_weights", i32 100, i32 1}
+
define i32 @test_cold_call_sites(i32* %a) {
; Test that edges to blocks post-dominated by cold call sites
; are marked as not expected to be taken.
diff --git a/test/Analysis/ConstantFolding/gep-constanfolding-error.ll b/test/Analysis/ConstantFolding/gep-constanfolding-error.ll
new file mode 100644
index 000000000000..50ad61a8f100
--- /dev/null
+++ b/test/Analysis/ConstantFolding/gep-constanfolding-error.ll
@@ -0,0 +1,52 @@
+; RUN: opt -gvn -S -o - %s | FileCheck %s
+; RUN: opt -newgvn -S -o - %s | FileCheck %s
+; Test that the constantfolding getelementptr computation results in
+; j[5][4][1] (j+239)
+; and not [1][4][4][1] (#449) which is an incorrect out-of-range error
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv7-none-eabi"
+
+@f = local_unnamed_addr global i32 2, align 4
+@t6 = local_unnamed_addr global i32 1, align 4
+@j = local_unnamed_addr global [6 x [6 x [7 x i8]]] [[6 x [7 x i8]] [[7 x i8] c"\06\00\00\00\00\00\00", [7 x i8] zeroinitializer, [7 x i8] zeroinitializer, [7 x i8] zeroinitializer, [7 x i8] zeroinitializer, [7 x i8] zeroinitializer], [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer], align 1
+@p = internal global i64 0, align 8
+@y = local_unnamed_addr global i64* @p, align 4
+@b = internal unnamed_addr global i32 0, align 4
+@h = common local_unnamed_addr global i16 0, align 2
+@a = common local_unnamed_addr global i32 0, align 4
+@k = common local_unnamed_addr global i32 0, align 4
+@t11 = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @main() local_unnamed_addr {
+entry:
+ %0 = load i32, i32* @t6, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* @t6, align 4
+ store i16 4, i16* @h, align 2
+ %1 = load i32, i32* @a, align 4
+ %conv = trunc i32 %1 to i8
+ store i32 1, i32* @f, align 4
+ %2 = load i64, i64* @p, align 8
+ %cmp4 = icmp slt i64 %2, 2
+ %conv6 = zext i1 %cmp4 to i8
+ %3 = load i16, i16* @h, align 2
+ %conv7 = sext i16 %3 to i32
+ %add = add nsw i32 %conv7, 1
+ %f.promoted = load i32, i32* @f, align 4
+ %4 = mul i32 %conv7, 7
+ %5 = add i32 %4, 5
+ %6 = sub i32 -1, %f.promoted
+ %7 = icmp sgt i32 %6, -2
+ %smax = select i1 %7, i32 %6, i32 -2
+ %8 = sub i32 6, %smax
+ %scevgep = getelementptr [6 x [6 x [7 x i8]]], [6 x [6 x [7 x i8]]]* @j, i32 0, i32 0, i32 %5, i32 %8
+ %9 = add i32 %f.promoted, %smax
+ %10 = add i32 %9, 2
+ call void @llvm.memset.p0i8.i32(i8* %scevgep, i8 %conv6, i32 %10, i32 1, i1 false)
+; CHECK: call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([6 x [6 x [7 x i8]]], [6 x [6 x [7 x i8]]]* @j, i32 0, i64 5, i64 4, i32 1), i8 %conv6, i32 1, i32 1, i1 false)
+; CHECK-NOT: call void @llvm.memset.p0i8.i32(i8* getelementptr ([6 x [6 x [7 x i8]]], [6 x [6 x [7 x i8]]]* @j, i64 1, i64 4, i64 4, i32 1)
+ ret i32 0
+}
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1)
diff --git a/test/Analysis/ConstantFolding/timeout.ll b/test/Analysis/ConstantFolding/timeout.ll
new file mode 100644
index 000000000000..3d28c2adbe48
--- /dev/null
+++ b/test/Analysis/ConstantFolding/timeout.ll
@@ -0,0 +1,73 @@
+; NOTE: This is a timeout test for some O(something silly) constant folding behaviour. It may not be the best test. Providing it finishes, it passes.
+; RUN: opt < %s -O3 -S | FileCheck %s
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv8-none-eabi"
+
+%struct.ST = type { %struct.ST* }
+
+@global = internal global [121 x i8] zeroinitializer, align 1
+
+define void @func() #0 {
+;CHECK-LABEL: func
+entry:
+ %s = alloca %struct.ST*, align 4
+ %j = alloca i32, align 4
+ store %struct.ST* bitcast ([121 x i8]* @global to %struct.ST*), %struct.ST** %s, align 4
+ store i32 0, i32* %j, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32, i32* %j, align 4
+ %cmp = icmp slt i32 %0, 30
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load %struct.ST*, %struct.ST** %s, align 4
+ %2 = bitcast %struct.ST* %1 to i8*
+ %add.ptr = getelementptr inbounds i8, i8* %2, i32 4
+ %3 = ptrtoint i8* %add.ptr to i32
+ %4 = load %struct.ST*, %struct.ST** %s, align 4
+ %5 = bitcast %struct.ST* %4 to i8*
+ %add.ptr1 = getelementptr inbounds i8, i8* %5, i32 4
+ %6 = ptrtoint i8* %add.ptr1 to i32
+ %rem = urem i32 %6, 2
+ %cmp2 = icmp eq i32 %rem, 0
+ br i1 %cmp2, label %cond.true, label %cond.false
+
+cond.true: ; preds = %for.body
+ br label %cond.end
+
+cond.false: ; preds = %for.body
+ %7 = load %struct.ST*, %struct.ST** %s, align 4
+ %8 = bitcast %struct.ST* %7 to i8*
+ %add.ptr3 = getelementptr inbounds i8, i8* %8, i32 4
+ %9 = ptrtoint i8* %add.ptr3 to i32
+ %rem4 = urem i32 %9, 2
+ br label %cond.end
+
+cond.end: ; preds = %cond.false, %cond.true
+ %cond = phi i32 [ 0, %cond.true ], [ %rem4, %cond.false ]
+ %add = add i32 %3, %cond
+ %10 = inttoptr i32 %add to %struct.ST*
+ %11 = load %struct.ST*, %struct.ST** %s, align 4
+ %next = getelementptr inbounds %struct.ST, %struct.ST* %11, i32 0, i32 0
+ store %struct.ST* %10, %struct.ST** %next, align 4
+ %12 = load %struct.ST*, %struct.ST** %s, align 4
+ %next5 = getelementptr inbounds %struct.ST, %struct.ST* %12, i32 0, i32 0
+ %13 = load %struct.ST*, %struct.ST** %next5, align 4
+ store %struct.ST* %13, %struct.ST** %s, align 4
+ br label %for.inc
+
+for.inc: ; preds = %cond.end
+ %14 = load i32, i32* %j, align 4
+ %inc = add nsw i32 %14, 1
+ store i32 %inc, i32* %j, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %15 = load %struct.ST*, %struct.ST** %s, align 4
+ %next6 = getelementptr inbounds %struct.ST, %struct.ST* %15, i32 0, i32 0
+ store %struct.ST* null, %struct.ST** %next6, align 4
+ ret void
+}
+
diff --git a/test/Analysis/CostModel/AMDGPU/add-sub.ll b/test/Analysis/CostModel/AMDGPU/add-sub.ll
index 76b21d26faaa..6419eb11b2be 100644
--- a/test/Analysis/CostModel/AMDGPU/add-sub.ll
+++ b/test/Analysis/CostModel/AMDGPU/add-sub.ll
@@ -3,7 +3,7 @@
; CHECK: 'add_i32'
; CHECK: estimated cost of 1 for {{.*}} add i32
-define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%add = add i32 %vec, %b
store i32 %add, i32 addrspace(1)* %out
@@ -12,7 +12,7 @@ define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #
; CHECK: 'add_v2i32'
; CHECK: estimated cost of 2 for {{.*}} add <2 x i32>
-define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
+define amdgpu_kernel void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
%add = add <2 x i32> %vec, %b
store <2 x i32> %add, <2 x i32> addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %va
; CHECK: 'add_v3i32'
; CHECK: estimated cost of 3 for {{.*}} add <3 x i32>
-define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
+define amdgpu_kernel void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
%add = add <3 x i32> %vec, %b
store <3 x i32> %add, <3 x i32> addrspace(1)* %out
@@ -30,7 +30,7 @@ define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %va
; CHECK: 'add_v4i32'
; CHECK: estimated cost of 4 for {{.*}} add <4 x i32>
-define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
+define amdgpu_kernel void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
%add = add <4 x i32> %vec, %b
store <4 x i32> %add, <4 x i32> addrspace(1)* %out
@@ -39,7 +39,7 @@ define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %va
; CHECK: 'add_i64'
; CHECK: estimated cost of 2 for {{.*}} add i64
-define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%add = add i64 %vec, %b
store i64 %add, i64 addrspace(1)* %out
@@ -48,7 +48,7 @@ define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #
; CHECK: 'add_v2i64'
; CHECK: estimated cost of 4 for {{.*}} add <2 x i64>
-define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
+define amdgpu_kernel void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
%add = add <2 x i64> %vec, %b
store <2 x i64> %add, <2 x i64> addrspace(1)* %out
@@ -57,7 +57,7 @@ define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %va
; CHECK: 'add_v3i64'
; CHECK: estimated cost of 6 for {{.*}} add <3 x i64>
-define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
+define amdgpu_kernel void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
%add = add <3 x i64> %vec, %b
store <3 x i64> %add, <3 x i64> addrspace(1)* %out
@@ -66,7 +66,7 @@ define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %va
; CHECK: 'add_v4i64'
; CHECK: estimated cost of 8 for {{.*}} add <4 x i64>
-define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
+define amdgpu_kernel void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
%add = add <4 x i64> %vec, %b
store <4 x i64> %add, <4 x i64> addrspace(1)* %out
@@ -75,7 +75,7 @@ define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %va
; CHECK: 'add_v16i64'
; CHECK: estimated cost of 32 for {{.*}} add <16 x i64>
-define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 {
+define amdgpu_kernel void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 {
%vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr
%add = add <16 x i64> %vec, %b
store <16 x i64> %add, <16 x i64> addrspace(1)* %out
@@ -84,7 +84,7 @@ define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)*
; CHECK: 'add_i16'
; CHECK: estimated cost of 1 for {{.*}} add i16
-define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
+define amdgpu_kernel void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
%vec = load i16, i16 addrspace(1)* %vaddr
%add = add i16 %vec, %b
store i16 %add, i16 addrspace(1)* %out
@@ -93,7 +93,7 @@ define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #
; CHECK: 'add_v2i16'
; CHECK: estimated cost of 2 for {{.*}} add <2 x i16>
-define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
+define amdgpu_kernel void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
%add = add <2 x i16> %vec, %b
store <2 x i16> %add, <2 x i16> addrspace(1)* %out
@@ -102,7 +102,7 @@ define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %va
; CHECK: 'sub_i32'
; CHECK: estimated cost of 1 for {{.*}} sub i32
-define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%sub = sub i32 %vec, %b
store i32 %sub, i32 addrspace(1)* %out
@@ -111,7 +111,7 @@ define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #
; CHECK: 'sub_i64'
; CHECK: estimated cost of 2 for {{.*}} sub i64
-define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%sub = sub i64 %vec, %b
store i64 %sub, i64 addrspace(1)* %out
@@ -119,7 +119,7 @@ define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #
}
; CHECK: 'sub_i16'
; CHECK: estimated cost of 1 for {{.*}} sub i16
-define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
+define amdgpu_kernel void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 {
%vec = load i16, i16 addrspace(1)* %vaddr
%sub = sub i16 %vec, %b
store i16 %sub, i16 addrspace(1)* %out
@@ -128,7 +128,7 @@ define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #
; CHECK: 'sub_v2i16'
; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16>
-define void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
+define amdgpu_kernel void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 {
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
%sub = sub <2 x i16> %vec, %b
store <2 x i16> %sub, <2 x i16> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/bit-ops.ll b/test/Analysis/CostModel/AMDGPU/bit-ops.ll
index a809dbd77bbf..aa70f5032cbc 100644
--- a/test/Analysis/CostModel/AMDGPU/bit-ops.ll
+++ b/test/Analysis/CostModel/AMDGPU/bit-ops.ll
@@ -2,7 +2,7 @@
; CHECK: 'or_i32'
; CHECK: estimated cost of 1 for {{.*}} or i32
-define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%or = or i32 %vec, %b
store i32 %or, i32 addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0
; CHECK: 'or_i64'
; CHECK: estimated cost of 2 for {{.*}} or i64
-define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%or = or i64 %vec, %b
store i64 %or, i64 addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0
; CHECK: 'xor_i32'
; CHECK: estimated cost of 1 for {{.*}} xor i32
-define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%or = xor i32 %vec, %b
store i32 %or, i32 addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #
; CHECK: 'xor_i64'
; CHECK: estimated cost of 2 for {{.*}} xor i64
-define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%or = xor i64 %vec, %b
store i64 %or, i64 addrspace(1)* %out
@@ -39,7 +39,7 @@ define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #
; CHECK: 'and_i32'
; CHECK: estimated cost of 1 for {{.*}} and i32
-define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%or = and i32 %vec, %b
store i32 %or, i32 addrspace(1)* %out
@@ -48,7 +48,7 @@ define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #
; CHECK: 'and_i64'
; CHECK: estimated cost of 2 for {{.*}} and i64
-define void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%or = and i64 %vec, %b
store i64 %or, i64 addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/br.ll b/test/Analysis/CostModel/AMDGPU/br.ll
index 0b9649397563..494f8d2c8b2c 100644
--- a/test/Analysis/CostModel/AMDGPU/br.ll
+++ b/test/Analysis/CostModel/AMDGPU/br.ll
@@ -4,7 +4,7 @@
; CHECK: estimated cost of 10 for instruction: br i1
; CHECK: estimated cost of 10 for instruction: br label
; CHECK: estimated cost of 10 for instruction: ret void
-define void @test_br_cost(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @test_br_cost(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
bb0:
br i1 undef, label %bb1, label %bb2
@@ -21,7 +21,7 @@ bb2:
; CHECK: 'test_switch_cost'
; CHECK: Unknown cost for instruction: switch
-define void @test_switch_cost(i32 %a) #0 {
+define amdgpu_kernel void @test_switch_cost(i32 %a) #0 {
entry:
switch i32 %a, label %default [
i32 0, label %case0
diff --git a/test/Analysis/CostModel/AMDGPU/extractelement.ll b/test/Analysis/CostModel/AMDGPU/extractelement.ll
index c328d7686466..1efbb5873acb 100644
--- a/test/Analysis/CostModel/AMDGPU/extractelement.ll
+++ b/test/Analysis/CostModel/AMDGPU/extractelement.ll
@@ -2,7 +2,7 @@
; CHECK: 'extractelement_v2i32'
; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i32>
-define void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
%elt = extractelement <2 x i32> %vec, i32 1
store i32 %elt, i32 addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @extractelement_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)
; CHECK: 'extractelement_v2f32'
; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x float>
-define void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%elt = extractelement <2 x float> %vec, i32 1
store float %elt, float addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @extractelement_v2f32(float addrspace(1)* %out, <2 x float> addrspac
; CHECK: 'extractelement_v3i32'
; CHECK: estimated cost of 0 for {{.*}} extractelement <3 x i32>
-define void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr) {
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
%elt = extractelement <3 x i32> %vec, i32 1
store i32 %elt, i32 addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @extractelement_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)
; CHECK: 'extractelement_v4i32'
; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i32>
-define void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr) {
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
%elt = extractelement <4 x i32> %vec, i32 1
store i32 %elt, i32 addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @extractelement_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)
; CHECK: 'extractelement_v8i32'
; CHECK: estimated cost of 0 for {{.*}} extractelement <8 x i32>
-define void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr) {
%vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
%elt = extractelement <8 x i32> %vec, i32 1
store i32 %elt, i32 addrspace(1)* %out
@@ -48,7 +48,7 @@ define void @extractelement_v8i32(i32 addrspace(1)* %out, <8 x i32> addrspace(1)
; FIXME: Should be non-0
; CHECK: 'extractelement_v8i32_dynindex'
; CHECK: estimated cost of 2 for {{.*}} extractelement <8 x i32>
-define void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr, i32 %idx) {
+define amdgpu_kernel void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> addrspace(1)* %vaddr, i32 %idx) {
%vec = load <8 x i32>, <8 x i32> addrspace(1)* %vaddr
%elt = extractelement <8 x i32> %vec, i32 %idx
store i32 %elt, i32 addrspace(1)* %out
@@ -57,7 +57,7 @@ define void @extractelement_v8i32_dynindex(i32 addrspace(1)* %out, <8 x i32> add
; CHECK: 'extractelement_v2i64'
; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i64>
-define void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
%elt = extractelement <2 x i64> %vec, i64 1
store i64 %elt, i64 addrspace(1)* %out
@@ -66,7 +66,7 @@ define void @extractelement_v2i64(i64 addrspace(1)* %out, <2 x i64> addrspace(1)
; CHECK: 'extractelement_v3i64'
; CHECK: estimated cost of 0 for {{.*}} extractelement <3 x i64>
-define void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr) {
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
%elt = extractelement <3 x i64> %vec, i64 1
store i64 %elt, i64 addrspace(1)* %out
@@ -75,7 +75,7 @@ define void @extractelement_v3i64(i64 addrspace(1)* %out, <3 x i64> addrspace(1)
; CHECK: 'extractelement_v4i64'
; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i64>
-define void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr) {
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
%elt = extractelement <4 x i64> %vec, i64 1
store i64 %elt, i64 addrspace(1)* %out
@@ -84,7 +84,7 @@ define void @extractelement_v4i64(i64 addrspace(1)* %out, <4 x i64> addrspace(1)
; CHECK: 'extractelement_v8i64'
; CHECK: estimated cost of 0 for {{.*}} extractelement <8 x i64>
-define void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr) {
%vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr
%elt = extractelement <8 x i64> %vec, i64 1
store i64 %elt, i64 addrspace(1)* %out
@@ -93,7 +93,7 @@ define void @extractelement_v8i64(i64 addrspace(1)* %out, <8 x i64> addrspace(1)
; CHECK: 'extractelement_v4i8'
; CHECK: estimated cost of 0 for {{.*}} extractelement <4 x i8>
-define void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %vaddr) {
%vec = load <4 x i8>, <4 x i8> addrspace(1)* %vaddr
%elt = extractelement <4 x i8> %vec, i8 1
store i8 %elt, i8 addrspace(1)* %out
@@ -102,7 +102,7 @@ define void @extractelement_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %
; CHECK: 'extractelement_v2i16'
; CHECK: estimated cost of 0 for {{.*}} extractelement <2 x i16>
-define void @extractelement_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @extractelement_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
%elt = extractelement <2 x i16> %vec, i16 1
store i16 %elt, i16 addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/fabs.ll b/test/Analysis/CostModel/AMDGPU/fabs.ll
index 9c551ec8afe5..0d49e2967d2d 100644
--- a/test/Analysis/CostModel/AMDGPU/fabs.ll
+++ b/test/Analysis/CostModel/AMDGPU/fabs.ll
@@ -2,7 +2,7 @@
; CHECK: 'fabs_f32'
; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32
-define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
%vec = load float, float addrspace(1)* %vaddr
%fabs = call float @llvm.fabs.f32(float %vec) #1
store float %fabs, float addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
; CHECK: 'fabs_v2f32'
; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32
-define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1
store <2 x float> %fabs, <2 x float> addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; CHECK: 'fabs_v3f32'
; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32
-define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1
store <3 x float> %fabs, <3 x float> addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; CHECK: 'fabs_f64'
; CHECK: estimated cost of 0 for {{.*}} call double @llvm.fabs.f64
-define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
%vec = load double, double addrspace(1)* %vaddr
%fabs = call double @llvm.fabs.f64(double %vec) #1
store double %fabs, double addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0
; CHECK: 'fabs_v2f64'
; CHECK: estimated cost of 0 for {{.*}} call <2 x double> @llvm.fabs.v2f64
-define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %vec) #1
store <2 x double> %fabs, <2 x double> addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; CHECK: 'fabs_v3f64'
; CHECK: estimated cost of 0 for {{.*}} call <3 x double> @llvm.fabs.v3f64
-define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%fabs = call <3 x double> @llvm.fabs.v3f64(<3 x double> %vec) #1
store <3 x double> %fabs, <3 x double> addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; CHECK: 'fabs_f16'
; CHECK: estimated cost of 0 for {{.*}} call half @llvm.fabs.f16
-define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
%vec = load half, half addrspace(1)* %vaddr
%fabs = call half @llvm.fabs.f16(half %vec) #1
store half %fabs, half addrspace(1)* %out
@@ -65,7 +65,7 @@ define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
; CHECK: 'fabs_v2f16'
; CHECK: estimated cost of 0 for {{.*}} call <2 x half> @llvm.fabs.v2f16
-define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %vec) #1
store <2 x half> %fabs, <2 x half> addrspace(1)* %out
@@ -74,7 +74,7 @@ define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; CHECK: 'fabs_v3f16'
; CHECK: estimated cost of 0 for {{.*}} call <3 x half> @llvm.fabs.v3f16
-define void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 {
%vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr
%fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %vec) #1
store <3 x half> %fabs, <3 x half> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/fadd.ll b/test/Analysis/CostModel/AMDGPU/fadd.ll
index 00e91bd6223a..d7ac73592998 100644
--- a/test/Analysis/CostModel/AMDGPU/fadd.ll
+++ b/test/Analysis/CostModel/AMDGPU/fadd.ll
@@ -3,7 +3,7 @@
; ALL: 'fadd_f32'
; ALL: estimated cost of 1 for {{.*}} fadd float
-define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+define amdgpu_kernel void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
%vec = load float, float addrspace(1)* %vaddr
%add = fadd float %vec, %b
store float %add, float addrspace(1)* %out
@@ -12,7 +12,7 @@ define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa
; ALL: 'fadd_v2f32'
; ALL: estimated cost of 2 for {{.*}} fadd <2 x float>
-define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+define amdgpu_kernel void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%add = fadd <2 x float> %vec, %b
store <2 x float> %add, <2 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; ALL: 'fadd_v3f32'
; ALL: estimated cost of 3 for {{.*}} fadd <3 x float>
-define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+define amdgpu_kernel void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%add = fadd <3 x float> %vec, %b
store <3 x float> %add, <3 x float> addrspace(1)* %out
@@ -31,7 +31,7 @@ define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; ALL: 'fadd_f64'
; FASTF64: estimated cost of 2 for {{.*}} fadd double
; SLOWF64: estimated cost of 3 for {{.*}} fadd double
-define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+define amdgpu_kernel void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
%vec = load double, double addrspace(1)* %vaddr
%add = fadd double %vec, %b
store double %add, double addrspace(1)* %out
@@ -41,7 +41,7 @@ define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do
; ALL: 'fadd_v2f64'
; FASTF64: estimated cost of 4 for {{.*}} fadd <2 x double>
; SLOWF64: estimated cost of 6 for {{.*}} fadd <2 x double>
-define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+define amdgpu_kernel void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%add = fadd <2 x double> %vec, %b
store <2 x double> %add, <2 x double> addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; ALL: 'fadd_v3f64'
; FASTF64: estimated cost of 6 for {{.*}} fadd <3 x double>
; SLOWF64: estimated cost of 9 for {{.*}} fadd <3 x double>
-define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+define amdgpu_kernel void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%add = fadd <3 x double> %vec, %b
store <3 x double> %add, <3 x double> addrspace(1)* %out
@@ -60,7 +60,7 @@ define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; ALL 'fadd_f16'
; ALL estimated cost of 1 for {{.*}} fadd half
-define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+define amdgpu_kernel void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
%vec = load half, half addrspace(1)* %vaddr
%add = fadd half %vec, %b
store half %add, half addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %
; ALL 'fadd_v2f16'
; ALL estimated cost of 2 for {{.*}} fadd <2 x half>
-define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+define amdgpu_kernel void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%add = fadd <2 x half> %vec, %b
store <2 x half> %add, <2 x half> addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; ALL 'fadd_v4f16'
; ALL estimated cost of 4 for {{.*}} fadd <4 x half>
-define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+define amdgpu_kernel void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
%add = fadd <4 x half> %vec, %b
store <4 x half> %add, <4 x half> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/fdiv.ll b/test/Analysis/CostModel/AMDGPU/fdiv.ll
index 3f374422ad9d..caa9bff7b2a9 100644
--- a/test/Analysis/CostModel/AMDGPU/fdiv.ll
+++ b/test/Analysis/CostModel/AMDGPU/fdiv.ll
@@ -5,7 +5,7 @@
; CHECK: 'fdiv_f32'
; ALL: estimated cost of 10 for {{.*}} fdiv float
-define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
%vec = load float, float addrspace(1)* %vaddr
%add = fdiv float %vec, %b
store float %add, float addrspace(1)* %out
@@ -14,7 +14,7 @@ define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa
; ALL: 'fdiv_v2f32'
; ALL: estimated cost of 20 for {{.*}} fdiv <2 x float>
-define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+define amdgpu_kernel void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%add = fdiv <2 x float> %vec, %b
store <2 x float> %add, <2 x float> addrspace(1)* %out
@@ -23,7 +23,7 @@ define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; ALL: 'fdiv_v3f32'
; ALL: estimated cost of 30 for {{.*}} fdiv <3 x float>
-define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+define amdgpu_kernel void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%add = fdiv <3 x float> %vec, %b
store <3 x float> %add, <3 x float> addrspace(1)* %out
@@ -35,7 +35,7 @@ define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double
; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double
; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double
-define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+define amdgpu_kernel void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
%vec = load double, double addrspace(1)* %vaddr
%add = fdiv double %vec, %b
store double %add, double addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do
; CISLOWF64: estimated cost of 66 for {{.*}} fdiv <2 x double>
; SIFASTF64: estimated cost of 64 for {{.*}} fdiv <2 x double>
; SISLOWF64: estimated cost of 72 for {{.*}} fdiv <2 x double>
-define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+define amdgpu_kernel void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%add = fdiv <2 x double> %vec, %b
store <2 x double> %add, <2 x double> addrspace(1)* %out
@@ -59,7 +59,7 @@ define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; CISLOWF64: estimated cost of 99 for {{.*}} fdiv <3 x double>
; SIFASTF64: estimated cost of 96 for {{.*}} fdiv <3 x double>
; SISLOWF64: estimated cost of 108 for {{.*}} fdiv <3 x double>
-define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+define amdgpu_kernel void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%add = fdiv <3 x double> %vec, %b
store <3 x double> %add, <3 x double> addrspace(1)* %out
@@ -68,7 +68,7 @@ define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; ALL: 'fdiv_f16'
; ALL: estimated cost of 10 for {{.*}} fdiv half
-define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+define amdgpu_kernel void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
%vec = load half, half addrspace(1)* %vaddr
%add = fdiv half %vec, %b
store half %add, half addrspace(1)* %out
@@ -77,7 +77,7 @@ define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %
; ALL: 'fdiv_v2f16'
; ALL: estimated cost of 20 for {{.*}} fdiv <2 x half>
-define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+define amdgpu_kernel void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%add = fdiv <2 x half> %vec, %b
store <2 x half> %add, <2 x half> addrspace(1)* %out
@@ -86,7 +86,7 @@ define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; ALL: 'fdiv_v4f16'
; ALL: estimated cost of 40 for {{.*}} fdiv <4 x half>
-define void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+define amdgpu_kernel void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
%add = fdiv <4 x half> %vec, %b
store <4 x half> %add, <4 x half> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/fmul.ll b/test/Analysis/CostModel/AMDGPU/fmul.ll
index 6303bb7988c5..915c35a23b30 100644
--- a/test/Analysis/CostModel/AMDGPU/fmul.ll
+++ b/test/Analysis/CostModel/AMDGPU/fmul.ll
@@ -3,7 +3,7 @@
; ALL: 'fmul_f32'
; ALL: estimated cost of 1 for {{.*}} fmul float
-define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+define amdgpu_kernel void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
%vec = load float, float addrspace(1)* %vaddr
%add = fmul float %vec, %b
store float %add, float addrspace(1)* %out
@@ -12,7 +12,7 @@ define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa
; ALL: 'fmul_v2f32'
; ALL: estimated cost of 2 for {{.*}} fmul <2 x float>
-define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+define amdgpu_kernel void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%add = fmul <2 x float> %vec, %b
store <2 x float> %add, <2 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; ALL: 'fmul_v3f32'
; ALL: estimated cost of 3 for {{.*}} fmul <3 x float>
-define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+define amdgpu_kernel void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%add = fmul <3 x float> %vec, %b
store <3 x float> %add, <3 x float> addrspace(1)* %out
@@ -31,7 +31,7 @@ define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; ALL: 'fmul_f64'
; FASTF64: estimated cost of 2 for {{.*}} fmul double
; SLOWF64: estimated cost of 3 for {{.*}} fmul double
-define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+define amdgpu_kernel void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
%vec = load double, double addrspace(1)* %vaddr
%add = fmul double %vec, %b
store double %add, double addrspace(1)* %out
@@ -41,7 +41,7 @@ define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do
; ALL: 'fmul_v2f64'
; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double>
; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double>
-define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+define amdgpu_kernel void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%add = fmul <2 x double> %vec, %b
store <2 x double> %add, <2 x double> addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; ALL: 'fmul_v3f64'
; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double>
; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double>
-define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+define amdgpu_kernel void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%add = fmul <3 x double> %vec, %b
store <3 x double> %add, <3 x double> addrspace(1)* %out
@@ -60,7 +60,7 @@ define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; ALL 'fmul_f16'
; ALL estimated cost of 1 for {{.*}} fmul half
-define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+define amdgpu_kernel void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
%vec = load half, half addrspace(1)* %vaddr
%add = fmul half %vec, %b
store half %add, half addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %
; ALL 'fmul_v2f16'
; ALL estimated cost of 2 for {{.*}} fmul <2 x half>
-define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+define amdgpu_kernel void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%add = fmul <2 x half> %vec, %b
store <2 x half> %add, <2 x half> addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; ALL 'fmul_v4f16'
; ALL estimated cost of 4 for {{.*}} fmul <4 x half>
-define void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+define amdgpu_kernel void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
%add = fmul <4 x half> %vec, %b
store <4 x half> %add, <4 x half> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/fsub.ll b/test/Analysis/CostModel/AMDGPU/fsub.ll
index e0850be9867e..cb89d292f717 100644
--- a/test/Analysis/CostModel/AMDGPU/fsub.ll
+++ b/test/Analysis/CostModel/AMDGPU/fsub.ll
@@ -3,7 +3,7 @@
; ALL: 'fsub_f32'
; ALL: estimated cost of 1 for {{.*}} fsub float
-define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+define amdgpu_kernel void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
%vec = load float, float addrspace(1)* %vaddr
%add = fsub float %vec, %b
store float %add, float addrspace(1)* %out
@@ -12,7 +12,7 @@ define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa
; ALL: 'fsub_v2f32'
; ALL: estimated cost of 2 for {{.*}} fsub <2 x float>
-define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%add = fsub <2 x float> %vec, %b
store <2 x float> %add, <2 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; ALL: 'fsub_v3f32'
; ALL: estimated cost of 3 for {{.*}} fsub <3 x float>
-define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+define amdgpu_kernel void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%add = fsub <3 x float> %vec, %b
store <3 x float> %add, <3 x float> addrspace(1)* %out
@@ -31,7 +31,7 @@ define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; ALL: 'fsub_f64'
; FASTF64: estimated cost of 2 for {{.*}} fsub double
; SLOWF64: estimated cost of 3 for {{.*}} fsub double
-define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
%vec = load double, double addrspace(1)* %vaddr
%add = fsub double %vec, %b
store double %add, double addrspace(1)* %out
@@ -41,7 +41,7 @@ define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do
; ALL: 'fsub_v2f64'
; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double>
; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double>
-define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%add = fsub <2 x double> %vec, %b
store <2 x double> %add, <2 x double> addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; ALL: 'fsub_v3f64'
; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double>
; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double>
-define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+define amdgpu_kernel void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%add = fsub <3 x double> %vec, %b
store <3 x double> %add, <3 x double> addrspace(1)* %out
@@ -60,7 +60,7 @@ define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; ALL: 'fsub_f16'
; ALL: estimated cost of 1 for {{.*}} fsub half
-define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+define amdgpu_kernel void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
%vec = load half, half addrspace(1)* %vaddr
%add = fsub half %vec, %b
store half %add, half addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %
; ALL: 'fsub_v2f16'
; ALL: estimated cost of 2 for {{.*}} fsub <2 x half>
-define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+define amdgpu_kernel void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%add = fsub <2 x half> %vec, %b
store <2 x half> %add, <2 x half> addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; ALL: 'fsub_v4f16'
; ALL: estimated cost of 4 for {{.*}} fsub <4 x half>
-define void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+define amdgpu_kernel void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
%add = fsub <4 x half> %vec, %b
store <4 x half> %add, <4 x half> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/insertelement.ll b/test/Analysis/CostModel/AMDGPU/insertelement.ll
index 1765afe3169e..6f296a3e7a34 100644
--- a/test/Analysis/CostModel/AMDGPU/insertelement.ll
+++ b/test/Analysis/CostModel/AMDGPU/insertelement.ll
@@ -2,7 +2,7 @@
; CHECK: 'insertelement_v2i32'
; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i32>
-define void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) {
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
%insert = insertelement <2 x i32> %vec, i32 1, i32 123
store <2 x i32> %insert, <2 x i32> addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspa
; CHECK: 'insertelement_v2i64'
; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i64>
-define void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) {
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
%insert = insertelement <2 x i64> %vec, i64 1, i64 123
store <2 x i64> %insert, <2 x i64> addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspa
; CHECK: 'insertelement_v2i16'
; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i16>
-define void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) {
%vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr
%insert = insertelement <2 x i16> %vec, i16 1, i16 123
store <2 x i16> %insert, <2 x i16> addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspa
; CHECK: 'insertelement_v2i8'
; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i8>
-define void @insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) {
+define amdgpu_kernel void @insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) {
%vec = load <2 x i8>, <2 x i8> addrspace(1)* %vaddr
%insert = insertelement <2 x i8> %vec, i8 1, i8 123
store <2 x i8> %insert, <2 x i8> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/mul.ll b/test/Analysis/CostModel/AMDGPU/mul.ll
index cbc755a6e6a9..aac7b68f50c2 100644
--- a/test/Analysis/CostModel/AMDGPU/mul.ll
+++ b/test/Analysis/CostModel/AMDGPU/mul.ll
@@ -2,7 +2,7 @@
; CHECK: 'mul_i32'
; CHECK: estimated cost of 3 for {{.*}} mul i32
-define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%mul = mul i32 %vec, %b
store i32 %mul, i32 addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #
; CHECK: 'mul_v2i32'
; CHECK: estimated cost of 6 for {{.*}} mul <2 x i32>
-define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
+define amdgpu_kernel void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 {
%vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr
%mul = mul <2 x i32> %vec, %b
store <2 x i32> %mul, <2 x i32> addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %va
; CHECK: 'mul_v3i32'
; CHECK: estimated cost of 9 for {{.*}} mul <3 x i32>
-define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
+define amdgpu_kernel void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 {
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr
%mul = mul <3 x i32> %vec, %b
store <3 x i32> %mul, <3 x i32> addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %va
; CHECK: 'mul_v4i32'
; CHECK: estimated cost of 12 for {{.*}} mul <4 x i32>
-define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
+define amdgpu_kernel void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 {
%vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr
%mul = mul <4 x i32> %vec, %b
store <4 x i32> %mul, <4 x i32> addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %va
; CHECK: 'mul_i64'
; CHECK: estimated cost of 16 for {{.*}} mul i64
-define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%mul = mul i64 %vec, %b
store i64 %mul, i64 addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #
; CHECK: 'mul_v2i64'
; CHECK: estimated cost of 32 for {{.*}} mul <2 x i64>
-define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
+define amdgpu_kernel void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 {
%vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr
%mul = mul <2 x i64> %vec, %b
store <2 x i64> %mul, <2 x i64> addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %va
; CHECK: 'mul_v3i64'
; CHECK: estimated cost of 48 for {{.*}} mul <3 x i64>
-define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
+define amdgpu_kernel void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 {
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr
%mul = mul <3 x i64> %vec, %b
store <3 x i64> %mul, <3 x i64> addrspace(1)* %out
@@ -65,7 +65,7 @@ define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %va
; CHECK: 'mul_v4i64'
; CHECK: estimated cost of 64 for {{.*}} mul <4 x i64>
-define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
+define amdgpu_kernel void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 {
%vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr
%mul = mul <4 x i64> %vec, %b
store <4 x i64> %mul, <4 x i64> addrspace(1)* %out
@@ -75,7 +75,7 @@ define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %va
; CHECK: 'mul_v8i64'
; CHECK: estimated cost of 128 for {{.*}} mul <8 x i64>
-define void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 {
+define amdgpu_kernel void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 {
%vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr
%mul = mul <8 x i64> %vec, %b
store <8 x i64> %mul, <8 x i64> addrspace(1)* %out
diff --git a/test/Analysis/CostModel/AMDGPU/shifts.ll b/test/Analysis/CostModel/AMDGPU/shifts.ll
index 003aed7b2fc8..85fb0ebe14e5 100644
--- a/test/Analysis/CostModel/AMDGPU/shifts.ll
+++ b/test/Analysis/CostModel/AMDGPU/shifts.ll
@@ -3,7 +3,7 @@
; ALL: 'shl_i32'
; ALL: estimated cost of 1 for {{.*}} shl i32
-define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%or = shl i32 %vec, %b
store i32 %or, i32 addrspace(1)* %out
@@ -13,7 +13,7 @@ define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #
; ALL: 'shl_i64'
; FAST64: estimated cost of 2 for {{.*}} shl i64
; SLOW64: estimated cost of 3 for {{.*}} shl i64
-define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%or = shl i64 %vec, %b
store i64 %or, i64 addrspace(1)* %out
@@ -22,7 +22,7 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #
; ALL: 'lshr_i32'
; ALL: estimated cost of 1 for {{.*}} lshr i32
-define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%or = lshr i32 %vec, %b
store i32 %or, i32 addrspace(1)* %out
@@ -32,7 +32,7 @@ define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b)
; ALL: 'lshr_i64'
; FAST64: estimated cost of 2 for {{.*}} lshr i64
; SLOW64: estimated cost of 3 for {{.*}} lshr i64
-define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%or = lshr i64 %vec, %b
store i64 %or, i64 addrspace(1)* %out
@@ -41,7 +41,7 @@ define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b)
; ALL: 'ashr_i32'
; ALL: estimated cost of 1 for {{.*}} ashr i32
-define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
+define amdgpu_kernel void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 {
%vec = load i32, i32 addrspace(1)* %vaddr
%or = ashr i32 %vec, %b
store i32 %or, i32 addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b)
; ALL: 'ashr_i64'
; FAST64: estimated cost of 2 for {{.*}} ashr i64
; SLOW64: estimated cost of 3 for {{.*}} ashr i64
-define void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
+define amdgpu_kernel void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 {
%vec = load i64, i64 addrspace(1)* %vaddr
%or = ashr i64 %vec, %b
store i64 %or, i64 addrspace(1)* %out
diff --git a/test/Analysis/CostModel/PowerPC/load_store.ll b/test/Analysis/CostModel/PowerPC/load_store.ll
index d48be5b5f62b..b77dd444774f 100644
--- a/test/Analysis/CostModel/PowerPC/load_store.ll
+++ b/test/Analysis/CostModel/PowerPC/load_store.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -cost-model -analyze -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 -disable-ppc-unaligned | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/Analysis/CostModel/PowerPC/unaligned_ld_st.ll b/test/Analysis/CostModel/PowerPC/unaligned_ld_st.ll
new file mode 100644
index 000000000000..6addf25949e6
--- /dev/null
+++ b/test/Analysis/CostModel/PowerPC/unaligned_ld_st.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=+vsx | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i32 @test(i32 %arg) {
+
+ ; CHECK: cost of 1 {{.*}} load
+ load i8, i8* undef, align 1
+ ; CHECK: cost of 1 {{.*}} load
+ load i16, i16* undef, align 1
+ ; CHECK: cost of 1 {{.*}} load
+ load i32, i32* undef, align 1
+ ; CHECK: cost of 1 {{.*}} load
+ load i64, i64* undef, align 1
+
+ ; CHECK: cost of 1 {{.*}} store
+ store i8 undef, i8* undef, align 1
+ ; CHECK: cost of 1 {{.*}} store
+ store i16 undef, i16* undef, align 1
+ ; CHECK: cost of 1 {{.*}} store
+ store i32 undef, i32* undef, align 1
+ ; CHECK: cost of 1 {{.*}} store
+ store i64 undef, i64* undef, align 1
+
+ ret i32 undef
+}
diff --git a/test/Analysis/CostModel/SystemZ/cmp-ext.ll b/test/Analysis/CostModel/SystemZ/cmp-ext.ll
new file mode 100644
index 000000000000..e33587876aa7
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/cmp-ext.ll
@@ -0,0 +1,2403 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+
+define i8 @fun0(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = sext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun0
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i8
+}
+
+define i16 @fun1(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = sext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun1
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i16
+}
+
+define i32 @fun2(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = sext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun2
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i32
+}
+
+define i64 @fun3(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = sext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun3
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i64
+}
+
+define i8 @fun4(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = sext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun4
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i8
+}
+
+define i16 @fun5(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = sext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun5
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i16
+}
+
+define i32 @fun6(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = sext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun6
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i32
+}
+
+define i64 @fun7(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = sext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun7
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i64
+}
+
+define i8 @fun8(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = sext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun8
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i8
+}
+
+define i16 @fun9(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = sext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun9
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i16
+}
+
+define i32 @fun10(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = sext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun10
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i32
+}
+
+define i64 @fun11(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = sext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun11
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i64
+}
+
+define i8 @fun12(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = sext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun12
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i8
+}
+
+define i16 @fun13(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = sext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun13
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i16
+}
+
+define i32 @fun14(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = sext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun14
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext i1 %cmp to i32
+}
+
+define i64 @fun15(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = sext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun15
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i64
+}
+
+define i8 @fun16(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = sext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun16
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i8
+}
+
+define i16 @fun17(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = sext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun17
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i16
+}
+
+define i32 @fun18(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = sext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun18
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i32
+}
+
+define i64 @fun19(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = sext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun19
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 5 for instruction: %v = sext i1 %cmp to i64
+}
+
+define i8 @fun20(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = sext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun20
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i8
+}
+
+define i16 @fun21(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = sext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun21
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i16
+}
+
+define i32 @fun22(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = sext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun22
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext i1 %cmp to i32
+}
+
+define i64 @fun23(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = sext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun23
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 5 for instruction: %v = sext i1 %cmp to i64
+}
+
+define <2 x i8> @fun24(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun24
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun25(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun25
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun26(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun26
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun27(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun27
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun28(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun28
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun29(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun29
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun30(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun30
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun31(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun31
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun32(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun32
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun33(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun33
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun34(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun34
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun35(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun35
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun36(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun36
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun37(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun37
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun38(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun38
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun39(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun39
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun40(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun40
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun41(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun41
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun42(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun42
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun43(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun43
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun44(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun44
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun45(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun45
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun46(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun46
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun47(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun47
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <2 x i1> %cmp to <2 x i64>
+}
+
+define <4 x i8> @fun48(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun48
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun49(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun49
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun50(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun50
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun51(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun51
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = sext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun52(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun52
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun53(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun53
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun54(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun54
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun55(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun55
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 5 for instruction: %v = sext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun56(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun56
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun57(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun57
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun58(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun58
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun59(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun59
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun60(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun60
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun61(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun61
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun62(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun62
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun63(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun63
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun64(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun64
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun65(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun65
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun66(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun66
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun67(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun67
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun68(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun68
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun69(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun69
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun70(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun70
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun71(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = sext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun71
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <4 x i1> %cmp to <4 x i64>
+}
+
+define <8 x i8> @fun72(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun72
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun73(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun73
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun74(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun74
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 5 for instruction: %v = sext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun75(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun75
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 15 for instruction: %v = sext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun76(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun76
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun77(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun77
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun78(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun78
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun79(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun79
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 11 for instruction: %v = sext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun80(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun80
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun81(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun81
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun82(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun82
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun83(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun83
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = sext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun84(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun84
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun85(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun85
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun86(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun86
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun87(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun87
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun88(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun88
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun89(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun89
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun90(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun90
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun91(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun91
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = sext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun92(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun92
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun93(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun93
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun94(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun94
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun95(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = sext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun95
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <8 x i1> %cmp to <8 x i64>
+}
+
+define <16 x i8> @fun96(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun96
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun97(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun97
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun98(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun98
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 11 for instruction: %v = sext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun99(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun99
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 31 for instruction: %v = sext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun100(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun100
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = sext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun101(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun101
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun102(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun102
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = sext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun103(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun103
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 23 for instruction: %v = sext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun104(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun104
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun105(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun105
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun106(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun106
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun107(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun107
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 15 for instruction: %v = sext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun108(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun108
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = sext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun109(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun109
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 6 for instruction: %v = sext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun110(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun110
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun111(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun111
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun112(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun112
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = sext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun113(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun113
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = sext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun114(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun114
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun115(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun115
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 15 for instruction: %v = sext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun116(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun116
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = sext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun117(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun117
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 6 for instruction: %v = sext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun118(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun118
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = sext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun119(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = sext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun119
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 0 for instruction: %v = sext <16 x i1> %cmp to <16 x i64>
+}
+
+define i8 @fun120(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = zext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun120
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i8
+}
+
+define i16 @fun121(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = zext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun121
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i16
+}
+
+define i32 @fun122(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = zext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun122
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i32
+}
+
+define i64 @fun123(i8 %val1, i8 %val2) {
+ %cmp = icmp eq i8 %val1, %val2
+ %v = zext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun123
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i64
+}
+
+define i8 @fun124(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = zext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun124
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i8
+}
+
+define i16 @fun125(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = zext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun125
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i16
+}
+
+define i32 @fun126(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = zext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun126
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i32
+}
+
+define i64 @fun127(i16 %val1, i16 %val2) {
+ %cmp = icmp eq i16 %val1, %val2
+ %v = zext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun127
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i64
+}
+
+define i8 @fun128(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = zext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun128
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i8
+}
+
+define i16 @fun129(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = zext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun129
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i16
+}
+
+define i32 @fun130(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = zext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun130
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i32
+}
+
+define i64 @fun131(i32 %val1, i32 %val2) {
+ %cmp = icmp eq i32 %val1, %val2
+ %v = zext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun131
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i64
+}
+
+define i8 @fun132(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = zext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun132
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i8
+}
+
+define i16 @fun133(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = zext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun133
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i16
+}
+
+define i32 @fun134(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = zext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun134
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i32
+}
+
+define i64 @fun135(i64 %val1, i64 %val2) {
+ %cmp = icmp eq i64 %val1, %val2
+ %v = zext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun135
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext i1 %cmp to i64
+}
+
+define i8 @fun136(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = zext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun136
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i8
+}
+
+define i16 @fun137(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = zext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun137
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i16
+}
+
+define i32 @fun138(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = zext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun138
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i32
+}
+
+define i64 @fun139(float %val1, float %val2) {
+ %cmp = fcmp ogt float %val1, %val2
+ %v = zext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun139
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i64
+}
+
+define i8 @fun140(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = zext i1 %cmp to i8
+ ret i8 %v
+
+; CHECK: fun140
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i8
+}
+
+define i16 @fun141(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = zext i1 %cmp to i16
+ ret i16 %v
+
+; CHECK: fun141
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i16
+}
+
+define i32 @fun142(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = zext i1 %cmp to i32
+ ret i32 %v
+
+; CHECK: fun142
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i32
+}
+
+define i64 @fun143(double %val1, double %val2) {
+ %cmp = fcmp ogt double %val1, %val2
+ %v = zext i1 %cmp to i64
+ ret i64 %v
+
+; CHECK: fun143
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext i1 %cmp to i64
+}
+
+define <2 x i8> @fun144(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun144
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun145(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun145
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun146(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun146
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun147(<2 x i8> %val1, <2 x i8> %val2) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun147
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun148(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun148
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun149(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun149
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun150(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun150
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun151(<2 x i16> %val1, <2 x i16> %val2) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun151
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun152(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun152
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun153(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun153
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun154(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun154
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun155(<2 x i32> %val1, <2 x i32> %val2) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun155
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun156(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun156
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun157(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun157
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun158(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun158
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun159(<2 x i64> %val1, <2 x i64> %val2) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun159
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun160(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun160
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun161(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun161
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun162(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun162
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun163(<2 x float> %val1, <2 x float> %val2) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun163
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i64>
+}
+
+define <2 x i8> @fun164(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i8>
+ ret <2 x i8> %v
+
+; CHECK: fun164
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i8>
+}
+
+define <2 x i16> @fun165(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %v
+
+; CHECK: fun165
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i16>
+}
+
+define <2 x i32> @fun166(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i32>
+ ret <2 x i32> %v
+
+; CHECK: fun166
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <2 x i1> %cmp to <2 x i32>
+}
+
+define <2 x i64> @fun167(<2 x double> %val1, <2 x double> %val2) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %v = zext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK: fun167
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <2 x i1> %cmp to <2 x i64>
+}
+
+define <4 x i8> @fun168(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun168
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun169(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun169
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun170(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun170
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %v = zext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun171(<4 x i8> %val1, <4 x i8> %val2) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun171
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 9 for instruction: %v = zext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun172(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun172
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun173(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun173
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun174(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun174
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun175(<4 x i16> %val1, <4 x i16> %val2) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun175
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = zext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun176(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun176
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun177(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun177
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun178(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun178
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun179(<4 x i32> %val1, <4 x i32> %val2) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun179
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 5 for instruction: %v = zext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun180(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun180
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun181(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun181
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun182(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun182
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun183(<4 x i64> %val1, <4 x i64> %val2) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun183
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun184(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun184
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun185(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun185
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun186(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun186
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun187(<4 x float> %val1, <4 x float> %val2) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun187
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 5 for instruction: %v = zext <4 x i1> %cmp to <4 x i64>
+}
+
+define <4 x i8> @fun188(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i8>
+ ret <4 x i8> %v
+
+; CHECK: fun188
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i8>
+}
+
+define <4 x i16> @fun189(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i16>
+ ret <4 x i16> %v
+
+; CHECK: fun189
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i16>
+}
+
+define <4 x i32> @fun190(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %v
+
+; CHECK: fun190
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i32>
+}
+
+define <4 x i64> @fun191(<4 x double> %val1, <4 x double> %val2) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %v = zext <4 x i1> %cmp to <4 x i64>
+ ret <4 x i64> %v
+
+; CHECK: fun191
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <4 x i1> %cmp to <4 x i64>
+}
+
+define <8 x i8> @fun192(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun192
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun193(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun193
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun194(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun194
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 7 for instruction: %v = zext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun195(<8 x i8> %val1, <8 x i8> %val2) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun195
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 19 for instruction: %v = zext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun196(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun196
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun197(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun197
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun198(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun198
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 5 for instruction: %v = zext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun199(<8 x i16> %val1, <8 x i16> %val2) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun199
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 15 for instruction: %v = zext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun200(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun200
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun201(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun201
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun202(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun202
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun203(<8 x i32> %val1, <8 x i32> %val2) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun203
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 11 for instruction: %v = zext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun204(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun204
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun205(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun205
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun206(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun206
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun207(<8 x i64> %val1, <8 x i64> %val2) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun207
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun208(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun208
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun209(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun209
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun210(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun210
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun211(<8 x float> %val1, <8 x float> %val2) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun211
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 11 for instruction: %v = zext <8 x i1> %cmp to <8 x i64>
+}
+
+define <8 x i8> @fun212(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i8>
+ ret <8 x i8> %v
+
+; CHECK: fun212
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i8>
+}
+
+define <8 x i16> @fun213(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %v
+
+; CHECK: fun213
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i16>
+}
+
+define <8 x i32> @fun214(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i32>
+ ret <8 x i32> %v
+
+; CHECK: fun214
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i32>
+}
+
+define <8 x i64> @fun215(<8 x double> %val1, <8 x double> %val2) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %v = zext <8 x i1> %cmp to <8 x i64>
+ ret <8 x i64> %v
+
+; CHECK: fun215
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <8 x i1> %cmp to <8 x i64>
+}
+
+define <16 x i8> @fun216(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun216
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %v = zext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun217(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun217
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 5 for instruction: %v = zext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun218(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun218
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 15 for instruction: %v = zext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun219(<16 x i8> %val1, <16 x i8> %val2) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun219
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 39 for instruction: %v = zext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun220(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun220
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun221(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun221
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %v = zext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun222(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun222
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 11 for instruction: %v = zext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun223(<16 x i16> %val1, <16 x i16> %val2) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun223
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 31 for instruction: %v = zext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun224(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun224
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun225(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun225
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun226(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun226
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun227(<16 x i32> %val1, <16 x i32> %val2) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun227
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 23 for instruction: %v = zext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun228(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun228
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun229(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun229
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun230(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun230
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun231(<16 x i64> %val1, <16 x i64> %val2) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun231
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun232(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun232
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun233(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun233
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun234(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun234
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %v = zext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun235(<16 x float> %val1, <16 x float> %val2) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun235
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 23 for instruction: %v = zext <16 x i1> %cmp to <16 x i64>
+}
+
+define <16 x i8> @fun236(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %v
+
+; CHECK: fun236
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i8>
+}
+
+define <16 x i16> @fun237(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i16>
+ ret <16 x i16> %v
+
+; CHECK: fun237
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i16>
+}
+
+define <16 x i32> @fun238(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i32>
+ ret <16 x i32> %v
+
+; CHECK: fun238
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i32>
+}
+
+define <16 x i64> @fun239(<16 x double> %val1, <16 x double> %val2) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %v = zext <16 x i1> %cmp to <16 x i64>
+ ret <16 x i64> %v
+
+; CHECK: fun239
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %v = zext <16 x i1> %cmp to <16 x i64>
+}
+
diff --git a/test/Analysis/CostModel/SystemZ/cmpsel.ll b/test/Analysis/CostModel/SystemZ/cmpsel.ll
new file mode 100644
index 000000000000..de72ec3a8b4e
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/cmpsel.ll
@@ -0,0 +1,1987 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; Note: Cost estimates of select of a fp-type is somewhat arbitrary, since it
+; involves a conditional jump.
+; Note: Vector fp32 is not directly supported, and not quite exact in
+; estimates (but it is big absolute values).
+
+define i8 @fun0(i8 %val1, i8 %val2,
+ i8 %val3, i8 %val4) {
+ %cmp = icmp eq i8 %val1, %val2
+ %sel = select i1 %cmp, i8 %val3, i8 %val4
+ ret i8 %sel
+
+; CHECK: fun0
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i8 %val3, i8 %val4
+}
+
+define i16 @fun1(i8 %val1, i8 %val2,
+ i16 %val3, i16 %val4) {
+ %cmp = icmp eq i8 %val1, %val2
+ %sel = select i1 %cmp, i16 %val3, i16 %val4
+ ret i16 %sel
+
+; CHECK: fun1
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i16 %val3, i16 %val4
+}
+
+define i32 @fun2(i8 %val1, i8 %val2,
+ i32 %val3, i32 %val4) {
+ %cmp = icmp eq i8 %val1, %val2
+ %sel = select i1 %cmp, i32 %val3, i32 %val4
+ ret i32 %sel
+
+; CHECK: fun2
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i32 %val3, i32 %val4
+}
+
+define i64 @fun3(i8 %val1, i8 %val2,
+ i64 %val3, i64 %val4) {
+ %cmp = icmp eq i8 %val1, %val2
+ %sel = select i1 %cmp, i64 %val3, i64 %val4
+ ret i64 %sel
+
+; CHECK: fun3
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i64 %val3, i64 %val4
+}
+
+define float @fun4(i8 %val1, i8 %val2,
+ float %val3, float %val4) {
+ %cmp = icmp eq i8 %val1, %val2
+ %sel = select i1 %cmp, float %val3, float %val4
+ ret float %sel
+
+; CHECK: fun4
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, float %val3, float %val4
+}
+
+define double @fun5(i8 %val1, i8 %val2,
+ double %val3, double %val4) {
+ %cmp = icmp eq i8 %val1, %val2
+ %sel = select i1 %cmp, double %val3, double %val4
+ ret double %sel
+
+; CHECK: fun5
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i8 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, double %val3, double %val4
+}
+
+define i8 @fun6(i16 %val1, i16 %val2,
+ i8 %val3, i8 %val4) {
+ %cmp = icmp eq i16 %val1, %val2
+ %sel = select i1 %cmp, i8 %val3, i8 %val4
+ ret i8 %sel
+
+; CHECK: fun6
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i8 %val3, i8 %val4
+}
+
+define i16 @fun7(i16 %val1, i16 %val2,
+ i16 %val3, i16 %val4) {
+ %cmp = icmp eq i16 %val1, %val2
+ %sel = select i1 %cmp, i16 %val3, i16 %val4
+ ret i16 %sel
+
+; CHECK: fun7
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i16 %val3, i16 %val4
+}
+
+define i32 @fun8(i16 %val1, i16 %val2,
+ i32 %val3, i32 %val4) {
+ %cmp = icmp eq i16 %val1, %val2
+ %sel = select i1 %cmp, i32 %val3, i32 %val4
+ ret i32 %sel
+
+; CHECK: fun8
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i32 %val3, i32 %val4
+}
+
+define i64 @fun9(i16 %val1, i16 %val2,
+ i64 %val3, i64 %val4) {
+ %cmp = icmp eq i16 %val1, %val2
+ %sel = select i1 %cmp, i64 %val3, i64 %val4
+ ret i64 %sel
+
+; CHECK: fun9
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i64 %val3, i64 %val4
+}
+
+define float @fun10(i16 %val1, i16 %val2,
+ float %val3, float %val4) {
+ %cmp = icmp eq i16 %val1, %val2
+ %sel = select i1 %cmp, float %val3, float %val4
+ ret float %sel
+
+; CHECK: fun10
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, float %val3, float %val4
+}
+
+define double @fun11(i16 %val1, i16 %val2,
+ double %val3, double %val4) {
+ %cmp = icmp eq i16 %val1, %val2
+ %sel = select i1 %cmp, double %val3, double %val4
+ ret double %sel
+
+; CHECK: fun11
+; CHECK: cost of 3 for instruction: %cmp = icmp eq i16 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, double %val3, double %val4
+}
+
+define i8 @fun12(i32 %val1, i32 %val2,
+ i8 %val3, i8 %val4) {
+ %cmp = icmp eq i32 %val1, %val2
+ %sel = select i1 %cmp, i8 %val3, i8 %val4
+ ret i8 %sel
+
+; CHECK: fun12
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i8 %val3, i8 %val4
+}
+
+define i16 @fun13(i32 %val1, i32 %val2,
+ i16 %val3, i16 %val4) {
+ %cmp = icmp eq i32 %val1, %val2
+ %sel = select i1 %cmp, i16 %val3, i16 %val4
+ ret i16 %sel
+
+; CHECK: fun13
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i16 %val3, i16 %val4
+}
+
+define i32 @fun14(i32 %val1, i32 %val2,
+ i32 %val3, i32 %val4) {
+ %cmp = icmp eq i32 %val1, %val2
+ %sel = select i1 %cmp, i32 %val3, i32 %val4
+ ret i32 %sel
+
+; CHECK: fun14
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i32 %val3, i32 %val4
+}
+
+define i64 @fun15(i32 %val1, i32 %val2,
+ i64 %val3, i64 %val4) {
+ %cmp = icmp eq i32 %val1, %val2
+ %sel = select i1 %cmp, i64 %val3, i64 %val4
+ ret i64 %sel
+
+; CHECK: fun15
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i64 %val3, i64 %val4
+}
+
+define float @fun16(i32 %val1, i32 %val2,
+ float %val3, float %val4) {
+ %cmp = icmp eq i32 %val1, %val2
+ %sel = select i1 %cmp, float %val3, float %val4
+ ret float %sel
+
+; CHECK: fun16
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, float %val3, float %val4
+}
+
+define double @fun17(i32 %val1, i32 %val2,
+ double %val3, double %val4) {
+ %cmp = icmp eq i32 %val1, %val2
+ %sel = select i1 %cmp, double %val3, double %val4
+ ret double %sel
+
+; CHECK: fun17
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i32 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, double %val3, double %val4
+}
+
+define i8 @fun18(i64 %val1, i64 %val2,
+ i8 %val3, i8 %val4) {
+ %cmp = icmp eq i64 %val1, %val2
+ %sel = select i1 %cmp, i8 %val3, i8 %val4
+ ret i8 %sel
+
+; CHECK: fun18
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i8 %val3, i8 %val4
+}
+
+define i16 @fun19(i64 %val1, i64 %val2,
+ i16 %val3, i16 %val4) {
+ %cmp = icmp eq i64 %val1, %val2
+ %sel = select i1 %cmp, i16 %val3, i16 %val4
+ ret i16 %sel
+
+; CHECK: fun19
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i16 %val3, i16 %val4
+}
+
+define i32 @fun20(i64 %val1, i64 %val2,
+ i32 %val3, i32 %val4) {
+ %cmp = icmp eq i64 %val1, %val2
+ %sel = select i1 %cmp, i32 %val3, i32 %val4
+ ret i32 %sel
+
+; CHECK: fun20
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i32 %val3, i32 %val4
+}
+
+define i64 @fun21(i64 %val1, i64 %val2,
+ i64 %val3, i64 %val4) {
+ %cmp = icmp eq i64 %val1, %val2
+ %sel = select i1 %cmp, i64 %val3, i64 %val4
+ ret i64 %sel
+
+; CHECK: fun21
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i64 %val3, i64 %val4
+}
+
+define float @fun22(i64 %val1, i64 %val2,
+ float %val3, float %val4) {
+ %cmp = icmp eq i64 %val1, %val2
+ %sel = select i1 %cmp, float %val3, float %val4
+ ret float %sel
+
+; CHECK: fun22
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, float %val3, float %val4
+}
+
+define double @fun23(i64 %val1, i64 %val2,
+ double %val3, double %val4) {
+ %cmp = icmp eq i64 %val1, %val2
+ %sel = select i1 %cmp, double %val3, double %val4
+ ret double %sel
+
+; CHECK: fun23
+; CHECK: cost of 1 for instruction: %cmp = icmp eq i64 %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, double %val3, double %val4
+}
+
+define <2 x i8> @fun24(<2 x i8> %val1, <2 x i8> %val2,
+ <2 x i8> %val3, <2 x i8> %val4) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+
+; CHECK: fun24
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+}
+
+define <2 x i16> @fun25(<2 x i8> %val1, <2 x i8> %val2,
+ <2 x i16> %val3, <2 x i16> %val4) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+
+; CHECK: fun25
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+}
+
+define <2 x i32> @fun26(<2 x i8> %val1, <2 x i8> %val2,
+ <2 x i32> %val3, <2 x i32> %val4) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+
+; CHECK: fun26
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+}
+
+define <2 x i64> @fun27(<2 x i8> %val1, <2 x i8> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+
+; CHECK: fun27
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+}
+
+define <2 x float> @fun28(<2 x i8> %val1, <2 x i8> %val2,
+ <2 x float> %val3, <2 x float> %val4) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+
+; CHECK: fun28
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+}
+
+define <2 x double> @fun29(<2 x i8> %val1, <2 x i8> %val2,
+ <2 x double> %val3, <2 x double> %val4) {
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+
+; CHECK: fun29
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i8> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+}
+
+define <2 x i8> @fun30(<2 x i16> %val1, <2 x i16> %val2,
+ <2 x i8> %val3, <2 x i8> %val4) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+
+; CHECK: fun30
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+}
+
+define <2 x i16> @fun31(<2 x i16> %val1, <2 x i16> %val2,
+ <2 x i16> %val3, <2 x i16> %val4) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+
+; CHECK: fun31
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+}
+
+define <2 x i32> @fun32(<2 x i16> %val1, <2 x i16> %val2,
+ <2 x i32> %val3, <2 x i32> %val4) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+
+; CHECK: fun32
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+}
+
+define <2 x i64> @fun33(<2 x i16> %val1, <2 x i16> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+
+; CHECK: fun33
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 3 for instruction: %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+}
+
+define <2 x float> @fun34(<2 x i16> %val1, <2 x i16> %val2,
+ <2 x float> %val3, <2 x float> %val4) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+
+; CHECK: fun34
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+}
+
+define <2 x double> @fun35(<2 x i16> %val1, <2 x i16> %val2,
+ <2 x double> %val3, <2 x double> %val4) {
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+
+; CHECK: fun35
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i16> %val1, %val2
+; CHECK: cost of 3 for instruction: %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+}
+
+define <2 x i8> @fun36(<2 x i32> %val1, <2 x i32> %val2,
+ <2 x i8> %val3, <2 x i8> %val4) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+
+; CHECK: fun36
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+}
+
+define <2 x i16> @fun37(<2 x i32> %val1, <2 x i32> %val2,
+ <2 x i16> %val3, <2 x i16> %val4) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+
+; CHECK: fun37
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+}
+
+define <2 x i32> @fun38(<2 x i32> %val1, <2 x i32> %val2,
+ <2 x i32> %val3, <2 x i32> %val4) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+
+; CHECK: fun38
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+}
+
+define <2 x i64> @fun39(<2 x i32> %val1, <2 x i32> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+
+; CHECK: fun39
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+}
+
+define <2 x float> @fun40(<2 x i32> %val1, <2 x i32> %val2,
+ <2 x float> %val3, <2 x float> %val4) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+
+; CHECK: fun40
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+}
+
+define <2 x double> @fun41(<2 x i32> %val1, <2 x i32> %val2,
+ <2 x double> %val3, <2 x double> %val4) {
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+
+; CHECK: fun41
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+}
+
+define <2 x i8> @fun42(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i8> %val3, <2 x i8> %val4) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+
+; CHECK: fun42
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+}
+
+define <2 x i16> @fun43(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i16> %val3, <2 x i16> %val4) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+
+; CHECK: fun43
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+}
+
+define <2 x i32> @fun44(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> %val3, <2 x i32> %val4) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+
+; CHECK: fun44
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+}
+
+define <2 x i64> @fun45(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+
+; CHECK: fun45
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+}
+
+define <2 x float> @fun46(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x float> %val3, <2 x float> %val4) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+
+; CHECK: fun46
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+}
+
+define <2 x double> @fun47(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x double> %val3, <2 x double> %val4) {
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+
+; CHECK: fun47
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <2 x i64> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+}
+
+define <4 x i8> @fun48(<4 x i8> %val1, <4 x i8> %val2,
+ <4 x i8> %val3, <4 x i8> %val4) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+
+; CHECK: fun48
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+}
+
+define <4 x i16> @fun49(<4 x i8> %val1, <4 x i8> %val2,
+ <4 x i16> %val3, <4 x i16> %val4) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+
+; CHECK: fun49
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+}
+
+define <4 x i32> @fun50(<4 x i8> %val1, <4 x i8> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+
+; CHECK: fun50
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+}
+
+define <4 x i64> @fun51(<4 x i8> %val1, <4 x i8> %val2,
+ <4 x i64> %val3, <4 x i64> %val4) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+
+; CHECK: fun51
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 9 for instruction: %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+}
+
+define <4 x float> @fun52(<4 x i8> %val1, <4 x i8> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+
+; CHECK: fun52
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 3 for instruction: %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+}
+
+define <4 x double> @fun53(<4 x i8> %val1, <4 x i8> %val2,
+ <4 x double> %val3, <4 x double> %val4) {
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+
+; CHECK: fun53
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i8> %val1, %val2
+; CHECK: cost of 9 for instruction: %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+}
+
+define <4 x i8> @fun54(<4 x i16> %val1, <4 x i16> %val2,
+ <4 x i8> %val3, <4 x i8> %val4) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+
+; CHECK: fun54
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+}
+
+define <4 x i16> @fun55(<4 x i16> %val1, <4 x i16> %val2,
+ <4 x i16> %val3, <4 x i16> %val4) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+
+; CHECK: fun55
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+}
+
+define <4 x i32> @fun56(<4 x i16> %val1, <4 x i16> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+
+; CHECK: fun56
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+}
+
+define <4 x i64> @fun57(<4 x i16> %val1, <4 x i16> %val2,
+ <4 x i64> %val3, <4 x i64> %val4) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+
+; CHECK: fun57
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 7 for instruction: %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+}
+
+define <4 x float> @fun58(<4 x i16> %val1, <4 x i16> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+
+; CHECK: fun58
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+}
+
+define <4 x double> @fun59(<4 x i16> %val1, <4 x i16> %val2,
+ <4 x double> %val3, <4 x double> %val4) {
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+
+; CHECK: fun59
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i16> %val1, %val2
+; CHECK: cost of 7 for instruction: %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+}
+
+define <4 x i8> @fun60(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i8> %val3, <4 x i8> %val4) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+
+; CHECK: fun60
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+}
+
+define <4 x i16> @fun61(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i16> %val3, <4 x i16> %val4) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+
+; CHECK: fun61
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+}
+
+define <4 x i32> @fun62(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+
+; CHECK: fun62
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+}
+
+define <4 x i64> @fun63(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i64> %val3, <4 x i64> %val4) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+
+; CHECK: fun63
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+}
+
+define <4 x float> @fun64(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+
+; CHECK: fun64
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+}
+
+define <4 x double> @fun65(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x double> %val3, <4 x double> %val4) {
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+
+; CHECK: fun65
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <4 x i32> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+}
+
+define <4 x i8> @fun66(<4 x i64> %val1, <4 x i64> %val2,
+ <4 x i8> %val3, <4 x i8> %val4) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+
+; CHECK: fun66
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+}
+
+define <4 x i16> @fun67(<4 x i64> %val1, <4 x i64> %val2,
+ <4 x i16> %val3, <4 x i16> %val4) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+
+; CHECK: fun67
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+}
+
+define <4 x i32> @fun68(<4 x i64> %val1, <4 x i64> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+
+; CHECK: fun68
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+}
+
+define <4 x i64> @fun69(<4 x i64> %val1, <4 x i64> %val2,
+ <4 x i64> %val3, <4 x i64> %val4) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+
+; CHECK: fun69
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+}
+
+define <4 x float> @fun70(<4 x i64> %val1, <4 x i64> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+
+; CHECK: fun70
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+}
+
+define <4 x double> @fun71(<4 x i64> %val1, <4 x i64> %val2,
+ <4 x double> %val3, <4 x double> %val4) {
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+
+; CHECK: fun71
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <4 x i64> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+}
+
+define <8 x i8> @fun72(<8 x i8> %val1, <8 x i8> %val2,
+ <8 x i8> %val3, <8 x i8> %val4) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+
+; CHECK: fun72
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+}
+
+define <8 x i16> @fun73(<8 x i8> %val1, <8 x i8> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+
+; CHECK: fun73
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+}
+
+define <8 x i32> @fun74(<8 x i8> %val1, <8 x i8> %val2,
+ <8 x i32> %val3, <8 x i32> %val4) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+
+; CHECK: fun74
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 7 for instruction: %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+}
+
+define <8 x i64> @fun75(<8 x i8> %val1, <8 x i8> %val2,
+ <8 x i64> %val3, <8 x i64> %val4) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+
+; CHECK: fun75
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 19 for instruction: %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+}
+
+define <8 x float> @fun76(<8 x i8> %val1, <8 x i8> %val2,
+ <8 x float> %val3, <8 x float> %val4) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+
+; CHECK: fun76
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 7 for instruction: %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+}
+
+define <8 x double> @fun77(<8 x i8> %val1, <8 x i8> %val2,
+ <8 x double> %val3, <8 x double> %val4) {
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+
+; CHECK: fun77
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i8> %val1, %val2
+; CHECK: cost of 19 for instruction: %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+}
+
+define <8 x i8> @fun78(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i8> %val3, <8 x i8> %val4) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+
+; CHECK: fun78
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+}
+
+define <8 x i16> @fun79(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+
+; CHECK: fun79
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+}
+
+define <8 x i32> @fun80(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> %val3, <8 x i32> %val4) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+
+; CHECK: fun80
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+}
+
+define <8 x i64> @fun81(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i64> %val3, <8 x i64> %val4) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+
+; CHECK: fun81
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 15 for instruction: %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+}
+
+define <8 x float> @fun82(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x float> %val3, <8 x float> %val4) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+
+; CHECK: fun82
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+}
+
+define <8 x double> @fun83(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x double> %val3, <8 x double> %val4) {
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+
+; CHECK: fun83
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <8 x i16> %val1, %val2
+; CHECK: cost of 15 for instruction: %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+}
+
+define <8 x i8> @fun84(<8 x i32> %val1, <8 x i32> %val2,
+ <8 x i8> %val3, <8 x i8> %val4) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+
+; CHECK: fun84
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+}
+
+define <8 x i16> @fun85(<8 x i32> %val1, <8 x i32> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+
+; CHECK: fun85
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+}
+
+define <8 x i32> @fun86(<8 x i32> %val1, <8 x i32> %val2,
+ <8 x i32> %val3, <8 x i32> %val4) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+
+; CHECK: fun86
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+}
+
+define <8 x i64> @fun87(<8 x i32> %val1, <8 x i32> %val2,
+ <8 x i64> %val3, <8 x i64> %val4) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+
+; CHECK: fun87
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 11 for instruction: %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+}
+
+define <8 x float> @fun88(<8 x i32> %val1, <8 x i32> %val2,
+ <8 x float> %val3, <8 x float> %val4) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+
+; CHECK: fun88
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+}
+
+define <8 x double> @fun89(<8 x i32> %val1, <8 x i32> %val2,
+ <8 x double> %val3, <8 x double> %val4) {
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+
+; CHECK: fun89
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <8 x i32> %val1, %val2
+; CHECK: cost of 11 for instruction: %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+}
+
+define <8 x i8> @fun90(<8 x i64> %val1, <8 x i64> %val2,
+ <8 x i8> %val3, <8 x i8> %val4) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+
+; CHECK: fun90
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+}
+
+define <8 x i16> @fun91(<8 x i64> %val1, <8 x i64> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+
+; CHECK: fun91
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+}
+
+define <8 x i32> @fun92(<8 x i64> %val1, <8 x i64> %val2,
+ <8 x i32> %val3, <8 x i32> %val4) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+
+; CHECK: fun92
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+}
+
+define <8 x i64> @fun93(<8 x i64> %val1, <8 x i64> %val2,
+ <8 x i64> %val3, <8 x i64> %val4) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+
+; CHECK: fun93
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+}
+
+define <8 x float> @fun94(<8 x i64> %val1, <8 x i64> %val2,
+ <8 x float> %val3, <8 x float> %val4) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+
+; CHECK: fun94
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+}
+
+define <8 x double> @fun95(<8 x i64> %val1, <8 x i64> %val2,
+ <8 x double> %val3, <8 x double> %val4) {
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+
+; CHECK: fun95
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <8 x i64> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+}
+
+define <16 x i8> @fun96(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+
+; CHECK: fun96
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+}
+
+define <16 x i16> @fun97(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i16> %val3, <16 x i16> %val4) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+
+; CHECK: fun97
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+}
+
+define <16 x i32> @fun98(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> %val3, <16 x i32> %val4) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+
+; CHECK: fun98
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 15 for instruction: %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+}
+
+define <16 x i64> @fun99(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i64> %val3, <16 x i64> %val4) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+
+; CHECK: fun99
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 39 for instruction: %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+}
+
+define <16 x float> @fun100(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x float> %val3, <16 x float> %val4) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+
+; CHECK: fun100
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 15 for instruction: %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+}
+
+define <16 x double> @fun101(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x double> %val3, <16 x double> %val4) {
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+
+; CHECK: fun101
+; CHECK: cost of 1 for instruction: %cmp = icmp eq <16 x i8> %val1, %val2
+; CHECK: cost of 39 for instruction: %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+}
+
+define <16 x i8> @fun102(<16 x i16> %val1, <16 x i16> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+
+; CHECK: fun102
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+}
+
+define <16 x i16> @fun103(<16 x i16> %val1, <16 x i16> %val2,
+ <16 x i16> %val3, <16 x i16> %val4) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+
+; CHECK: fun103
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+}
+
+define <16 x i32> @fun104(<16 x i16> %val1, <16 x i16> %val2,
+ <16 x i32> %val3, <16 x i32> %val4) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+
+; CHECK: fun104
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 11 for instruction: %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+}
+
+define <16 x i64> @fun105(<16 x i16> %val1, <16 x i16> %val2,
+ <16 x i64> %val3, <16 x i64> %val4) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+
+; CHECK: fun105
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 31 for instruction: %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+}
+
+define <16 x float> @fun106(<16 x i16> %val1, <16 x i16> %val2,
+ <16 x float> %val3, <16 x float> %val4) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+
+; CHECK: fun106
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 11 for instruction: %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+}
+
+define <16 x double> @fun107(<16 x i16> %val1, <16 x i16> %val2,
+ <16 x double> %val3, <16 x double> %val4) {
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+
+; CHECK: fun107
+; CHECK: cost of 2 for instruction: %cmp = icmp eq <16 x i16> %val1, %val2
+; CHECK: cost of 31 for instruction: %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+}
+
+define <16 x i8> @fun108(<16 x i32> %val1, <16 x i32> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+
+; CHECK: fun108
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+}
+
+define <16 x i16> @fun109(<16 x i32> %val1, <16 x i32> %val2,
+ <16 x i16> %val3, <16 x i16> %val4) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+
+; CHECK: fun109
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+}
+
+define <16 x i32> @fun110(<16 x i32> %val1, <16 x i32> %val2,
+ <16 x i32> %val3, <16 x i32> %val4) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+
+; CHECK: fun110
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+}
+
+define <16 x i64> @fun111(<16 x i32> %val1, <16 x i32> %val2,
+ <16 x i64> %val3, <16 x i64> %val4) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+
+; CHECK: fun111
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 23 for instruction: %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+}
+
+define <16 x float> @fun112(<16 x i32> %val1, <16 x i32> %val2,
+ <16 x float> %val3, <16 x float> %val4) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+
+; CHECK: fun112
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+}
+
+define <16 x double> @fun113(<16 x i32> %val1, <16 x i32> %val2,
+ <16 x double> %val3, <16 x double> %val4) {
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+
+; CHECK: fun113
+; CHECK: cost of 4 for instruction: %cmp = icmp eq <16 x i32> %val1, %val2
+; CHECK: cost of 23 for instruction: %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+}
+
+define <16 x i8> @fun114(<16 x i64> %val1, <16 x i64> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+
+; CHECK: fun114
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+}
+
+define <16 x i16> @fun115(<16 x i64> %val1, <16 x i64> %val2,
+ <16 x i16> %val3, <16 x i16> %val4) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+
+; CHECK: fun115
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+}
+
+define <16 x i32> @fun116(<16 x i64> %val1, <16 x i64> %val2,
+ <16 x i32> %val3, <16 x i32> %val4) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+
+; CHECK: fun116
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+}
+
+define <16 x i64> @fun117(<16 x i64> %val1, <16 x i64> %val2,
+ <16 x i64> %val3, <16 x i64> %val4) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+
+; CHECK: fun117
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+}
+
+define <16 x float> @fun118(<16 x i64> %val1, <16 x i64> %val2,
+ <16 x float> %val3, <16 x float> %val4) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+
+; CHECK: fun118
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+}
+
+define <16 x double> @fun119(<16 x i64> %val1, <16 x i64> %val2,
+ <16 x double> %val3, <16 x double> %val4) {
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+
+; CHECK: fun119
+; CHECK: cost of 8 for instruction: %cmp = icmp eq <16 x i64> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+}
+
+define i8 @fun120(float %val1, float %val2,
+ i8 %val3, i8 %val4) {
+ %cmp = fcmp ogt float %val1, %val2
+ %sel = select i1 %cmp, i8 %val3, i8 %val4
+ ret i8 %sel
+
+; CHECK: fun120
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i8 %val3, i8 %val4
+}
+
+define i16 @fun121(float %val1, float %val2,
+ i16 %val3, i16 %val4) {
+ %cmp = fcmp ogt float %val1, %val2
+ %sel = select i1 %cmp, i16 %val3, i16 %val4
+ ret i16 %sel
+
+; CHECK: fun121
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i16 %val3, i16 %val4
+}
+
+define i32 @fun122(float %val1, float %val2,
+ i32 %val3, i32 %val4) {
+ %cmp = fcmp ogt float %val1, %val2
+ %sel = select i1 %cmp, i32 %val3, i32 %val4
+ ret i32 %sel
+
+; CHECK: fun122
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i32 %val3, i32 %val4
+}
+
+define i64 @fun123(float %val1, float %val2,
+ i64 %val3, i64 %val4) {
+ %cmp = fcmp ogt float %val1, %val2
+ %sel = select i1 %cmp, i64 %val3, i64 %val4
+ ret i64 %sel
+
+; CHECK: fun123
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i64 %val3, i64 %val4
+}
+
+define float @fun124(float %val1, float %val2,
+ float %val3, float %val4) {
+ %cmp = fcmp ogt float %val1, %val2
+ %sel = select i1 %cmp, float %val3, float %val4
+ ret float %sel
+
+; CHECK: fun124
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, float %val3, float %val4
+}
+
+define double @fun125(float %val1, float %val2,
+ double %val3, double %val4) {
+ %cmp = fcmp ogt float %val1, %val2
+ %sel = select i1 %cmp, double %val3, double %val4
+ ret double %sel
+
+; CHECK: fun125
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt float %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, double %val3, double %val4
+}
+
+define i8 @fun126(double %val1, double %val2,
+ i8 %val3, i8 %val4) {
+ %cmp = fcmp ogt double %val1, %val2
+ %sel = select i1 %cmp, i8 %val3, i8 %val4
+ ret i8 %sel
+
+; CHECK: fun126
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i8 %val3, i8 %val4
+}
+
+define i16 @fun127(double %val1, double %val2,
+ i16 %val3, i16 %val4) {
+ %cmp = fcmp ogt double %val1, %val2
+ %sel = select i1 %cmp, i16 %val3, i16 %val4
+ ret i16 %sel
+
+; CHECK: fun127
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i16 %val3, i16 %val4
+}
+
+define i32 @fun128(double %val1, double %val2,
+ i32 %val3, i32 %val4) {
+ %cmp = fcmp ogt double %val1, %val2
+ %sel = select i1 %cmp, i32 %val3, i32 %val4
+ ret i32 %sel
+
+; CHECK: fun128
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i32 %val3, i32 %val4
+}
+
+define i64 @fun129(double %val1, double %val2,
+ i64 %val3, i64 %val4) {
+ %cmp = fcmp ogt double %val1, %val2
+ %sel = select i1 %cmp, i64 %val3, i64 %val4
+ ret i64 %sel
+
+; CHECK: fun129
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select i1 %cmp, i64 %val3, i64 %val4
+}
+
+define float @fun130(double %val1, double %val2,
+ float %val3, float %val4) {
+ %cmp = fcmp ogt double %val1, %val2
+ %sel = select i1 %cmp, float %val3, float %val4
+ ret float %sel
+
+; CHECK: fun130
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, float %val3, float %val4
+}
+
+define double @fun131(double %val1, double %val2,
+ double %val3, double %val4) {
+ %cmp = fcmp ogt double %val1, %val2
+ %sel = select i1 %cmp, double %val3, double %val4
+ ret double %sel
+
+; CHECK: fun131
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt double %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select i1 %cmp, double %val3, double %val4
+}
+
+define <2 x i8> @fun132(<2 x float> %val1, <2 x float> %val2,
+ <2 x i8> %val3, <2 x i8> %val4) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+
+; CHECK: fun132
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+}
+
+define <2 x i16> @fun133(<2 x float> %val1, <2 x float> %val2,
+ <2 x i16> %val3, <2 x i16> %val4) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+
+; CHECK: fun133
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+}
+
+define <2 x i32> @fun134(<2 x float> %val1, <2 x float> %val2,
+ <2 x i32> %val3, <2 x i32> %val4) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+
+; CHECK: fun134
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+}
+
+define <2 x i64> @fun135(<2 x float> %val1, <2 x float> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+
+; CHECK: fun135
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+}
+
+define <2 x float> @fun136(<2 x float> %val1, <2 x float> %val2,
+ <2 x float> %val3, <2 x float> %val4) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+
+; CHECK: fun136
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+}
+
+define <2 x double> @fun137(<2 x float> %val1, <2 x float> %val2,
+ <2 x double> %val3, <2 x double> %val4) {
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+
+; CHECK: fun137
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <2 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+}
+
+define <2 x i8> @fun138(<2 x double> %val1, <2 x double> %val2,
+ <2 x i8> %val3, <2 x i8> %val4) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+
+; CHECK: fun138
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+}
+
+define <2 x i16> @fun139(<2 x double> %val1, <2 x double> %val2,
+ <2 x i16> %val3, <2 x i16> %val4) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+
+; CHECK: fun139
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+}
+
+define <2 x i32> @fun140(<2 x double> %val1, <2 x double> %val2,
+ <2 x i32> %val3, <2 x i32> %val4) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+
+; CHECK: fun140
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+}
+
+define <2 x i64> @fun141(<2 x double> %val1, <2 x double> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+
+; CHECK: fun141
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+}
+
+define <2 x float> @fun142(<2 x double> %val1, <2 x double> %val2,
+ <2 x float> %val3, <2 x float> %val4) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+
+; CHECK: fun142
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+}
+
+define <2 x double> @fun143(<2 x double> %val1, <2 x double> %val2,
+ <2 x double> %val3, <2 x double> %val4) {
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+
+; CHECK: fun143
+; CHECK: cost of 1 for instruction: %cmp = fcmp ogt <2 x double> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+}
+
+define <4 x i8> @fun144(<4 x float> %val1, <4 x float> %val2,
+ <4 x i8> %val3, <4 x i8> %val4) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+
+; CHECK: fun144
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+}
+
+define <4 x i16> @fun145(<4 x float> %val1, <4 x float> %val2,
+ <4 x i16> %val3, <4 x i16> %val4) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+
+; CHECK: fun145
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+}
+
+define <4 x i32> @fun146(<4 x float> %val1, <4 x float> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+
+; CHECK: fun146
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+}
+
+define <4 x i64> @fun147(<4 x float> %val1, <4 x float> %val2,
+ <4 x i64> %val3, <4 x i64> %val4) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+
+; CHECK: fun147
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+}
+
+define <4 x float> @fun148(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+
+; CHECK: fun148
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 1 for instruction: %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+}
+
+define <4 x double> @fun149(<4 x float> %val1, <4 x float> %val2,
+ <4 x double> %val3, <4 x double> %val4) {
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+
+; CHECK: fun149
+; CHECK: cost of 10 for instruction: %cmp = fcmp ogt <4 x float> %val1, %val2
+; CHECK: cost of 5 for instruction: %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+}
+
+define <4 x i8> @fun150(<4 x double> %val1, <4 x double> %val2,
+ <4 x i8> %val3, <4 x i8> %val4) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+
+; CHECK: fun150
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+}
+
+define <4 x i16> @fun151(<4 x double> %val1, <4 x double> %val2,
+ <4 x i16> %val3, <4 x i16> %val4) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+
+; CHECK: fun151
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+}
+
+define <4 x i32> @fun152(<4 x double> %val1, <4 x double> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+
+; CHECK: fun152
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+}
+
+define <4 x i64> @fun153(<4 x double> %val1, <4 x double> %val2,
+ <4 x i64> %val3, <4 x i64> %val4) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+
+; CHECK: fun153
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+}
+
+define <4 x float> @fun154(<4 x double> %val1, <4 x double> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+
+; CHECK: fun154
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+}
+
+define <4 x double> @fun155(<4 x double> %val1, <4 x double> %val2,
+ <4 x double> %val3, <4 x double> %val4) {
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+
+; CHECK: fun155
+; CHECK: cost of 2 for instruction: %cmp = fcmp ogt <4 x double> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+}
+
+define <8 x i8> @fun156(<8 x float> %val1, <8 x float> %val2,
+ <8 x i8> %val3, <8 x i8> %val4) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+
+; CHECK: fun156
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+}
+
+define <8 x i16> @fun157(<8 x float> %val1, <8 x float> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+
+; CHECK: fun157
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+}
+
+define <8 x i32> @fun158(<8 x float> %val1, <8 x float> %val2,
+ <8 x i32> %val3, <8 x i32> %val4) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+
+; CHECK: fun158
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+}
+
+define <8 x i64> @fun159(<8 x float> %val1, <8 x float> %val2,
+ <8 x i64> %val3, <8 x i64> %val4) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+
+; CHECK: fun159
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 11 for instruction: %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+}
+
+define <8 x float> @fun160(<8 x float> %val1, <8 x float> %val2,
+ <8 x float> %val3, <8 x float> %val4) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+
+; CHECK: fun160
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 2 for instruction: %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+}
+
+define <8 x double> @fun161(<8 x float> %val1, <8 x float> %val2,
+ <8 x double> %val3, <8 x double> %val4) {
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+
+; CHECK: fun161
+; CHECK: cost of 20 for instruction: %cmp = fcmp ogt <8 x float> %val1, %val2
+; CHECK: cost of 11 for instruction: %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+}
+
+define <8 x i8> @fun162(<8 x double> %val1, <8 x double> %val2,
+ <8 x i8> %val3, <8 x i8> %val4) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+
+; CHECK: fun162
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+}
+
+define <8 x i16> @fun163(<8 x double> %val1, <8 x double> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+
+; CHECK: fun163
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+}
+
+define <8 x i32> @fun164(<8 x double> %val1, <8 x double> %val2,
+ <8 x i32> %val3, <8 x i32> %val4) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+
+; CHECK: fun164
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+}
+
+define <8 x i64> @fun165(<8 x double> %val1, <8 x double> %val2,
+ <8 x i64> %val3, <8 x i64> %val4) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+
+; CHECK: fun165
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+}
+
+define <8 x float> @fun166(<8 x double> %val1, <8 x double> %val2,
+ <8 x float> %val3, <8 x float> %val4) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+
+; CHECK: fun166
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+}
+
+define <8 x double> @fun167(<8 x double> %val1, <8 x double> %val2,
+ <8 x double> %val3, <8 x double> %val4) {
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+
+; CHECK: fun167
+; CHECK: cost of 4 for instruction: %cmp = fcmp ogt <8 x double> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+}
+
+define <16 x i8> @fun168(<16 x float> %val1, <16 x float> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+
+; CHECK: fun168
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+}
+
+define <16 x i16> @fun169(<16 x float> %val1, <16 x float> %val2,
+ <16 x i16> %val3, <16 x i16> %val4) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+
+; CHECK: fun169
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+}
+
+define <16 x i32> @fun170(<16 x float> %val1, <16 x float> %val2,
+ <16 x i32> %val3, <16 x i32> %val4) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+
+; CHECK: fun170
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+}
+
+define <16 x i64> @fun171(<16 x float> %val1, <16 x float> %val2,
+ <16 x i64> %val3, <16 x i64> %val4) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+
+; CHECK: fun171
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 23 for instruction: %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+}
+
+define <16 x float> @fun172(<16 x float> %val1, <16 x float> %val2,
+ <16 x float> %val3, <16 x float> %val4) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+
+; CHECK: fun172
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 4 for instruction: %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+}
+
+define <16 x double> @fun173(<16 x float> %val1, <16 x float> %val2,
+ <16 x double> %val3, <16 x double> %val4) {
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+
+; CHECK: fun173
+; CHECK: cost of 40 for instruction: %cmp = fcmp ogt <16 x float> %val1, %val2
+; CHECK: cost of 23 for instruction: %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+}
+
+define <16 x i8> @fun174(<16 x double> %val1, <16 x double> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+
+; CHECK: fun174
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+}
+
+define <16 x i16> @fun175(<16 x double> %val1, <16 x double> %val2,
+ <16 x i16> %val3, <16 x i16> %val4) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+
+; CHECK: fun175
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+}
+
+define <16 x i32> @fun176(<16 x double> %val1, <16 x double> %val2,
+ <16 x i32> %val3, <16 x i32> %val4) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+
+; CHECK: fun176
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+}
+
+define <16 x i64> @fun177(<16 x double> %val1, <16 x double> %val2,
+ <16 x i64> %val3, <16 x i64> %val4) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+
+; CHECK: fun177
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+}
+
+define <16 x float> @fun178(<16 x double> %val1, <16 x double> %val2,
+ <16 x float> %val3, <16 x float> %val4) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+
+; CHECK: fun178
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+}
+
+define <16 x double> @fun179(<16 x double> %val1, <16 x double> %val2,
+ <16 x double> %val3, <16 x double> %val4) {
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+
+; CHECK: fun179
+; CHECK: cost of 8 for instruction: %cmp = fcmp ogt <16 x double> %val1, %val2
+; CHECK: cost of 8 for instruction: %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+}
+
diff --git a/test/Analysis/CostModel/SystemZ/ext-load.ll b/test/Analysis/CostModel/SystemZ/ext-load.ll
new file mode 100644
index 000000000000..d3d501a6d297
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/ext-load.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; Test that an extension of a load does not get an additional cost in cases
+; where the load performs the extension.
+
+define void @sext() {
+ %li8 = load i8, i8* undef
+ sext i8 %li8 to i16
+ sext i8 %li8 to i32
+ sext i8 %li8 to i64
+
+ %li16 = load i16, i16* undef
+ sext i16 %li16 to i32
+ sext i16 %li16 to i64
+
+ %li32 = load i32, i32* undef
+ sext i32 %li32 to i64
+
+ ret void
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li8 = load i8, i8* undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %1 = sext i8 %li8 to i16
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %2 = sext i8 %li8 to i32
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %3 = sext i8 %li8 to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li16 = load i16, i16* undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %4 = sext i16 %li16 to i32
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %5 = sext i16 %li16 to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %6 = sext i32 %li32 to i64
+}
+
+define void @zext() {
+ %li8 = load i8, i8* undef
+ zext i8 %li8 to i16
+ zext i8 %li8 to i32
+ zext i8 %li8 to i64
+
+ %li16 = load i16, i16* undef
+ zext i16 %li16 to i32
+ zext i16 %li16 to i64
+
+ %li32 = load i32, i32* undef
+ zext i32 %li32 to i64
+
+ ret void
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li8 = load i8, i8* undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %1 = zext i8 %li8 to i16
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %2 = zext i8 %li8 to i32
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %3 = zext i8 %li8 to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li16 = load i16, i16* undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %4 = zext i16 %li16 to i32
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %5 = zext i16 %li16 to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %6 = zext i32 %li32 to i64
+}
diff --git a/test/Analysis/CostModel/SystemZ/fp-arith.ll b/test/Analysis/CostModel/SystemZ/fp-arith.ll
new file mode 100644
index 000000000000..08a7c291138f
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/fp-arith.ll
@@ -0,0 +1,119 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; Note: The scalarized vector instructions cost is not including any
+; extracts, due to the undef operands
+;
+; Note: FRem is implemented with libcall, so not included here.
+
+define void @fadd() {
+ %res0 = fadd float undef, undef
+ %res1 = fadd double undef, undef
+ %res2 = fadd fp128 undef, undef
+ %res3 = fadd <2 x float> undef, undef
+ %res4 = fadd <2 x double> undef, undef
+ %res5 = fadd <4 x float> undef, undef
+ %res6 = fadd <4 x double> undef, undef
+ %res7 = fadd <8 x float> undef, undef
+ %res8 = fadd <8 x double> undef, undef
+ %res9 = fadd <16 x float> undef, undef
+ %res10 = fadd <16 x double> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fadd float undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fadd double undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fadd fp128 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fadd <2 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fadd <2 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fadd <4 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fadd <4 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fadd <8 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fadd <8 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fadd <16 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fadd <16 x double> undef, undef
+
+ ret void;
+}
+
+define void @fsub() {
+ %res0 = fsub float undef, undef
+ %res1 = fsub double undef, undef
+ %res2 = fsub fp128 undef, undef
+ %res3 = fsub <2 x float> undef, undef
+ %res4 = fsub <2 x double> undef, undef
+ %res5 = fsub <4 x float> undef, undef
+ %res6 = fsub <4 x double> undef, undef
+ %res7 = fsub <8 x float> undef, undef
+ %res8 = fsub <8 x double> undef, undef
+ %res9 = fsub <16 x float> undef, undef
+ %res10 = fsub <16 x double> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fsub float undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fsub double undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fsub fp128 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fsub <2 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fsub <2 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fsub <4 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fsub <4 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fsub <8 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fsub <8 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fsub <16 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fsub <16 x double> undef, undef
+
+ ret void;
+}
+
+define void @fmul() {
+ %res0 = fmul float undef, undef
+ %res1 = fmul double undef, undef
+ %res2 = fmul fp128 undef, undef
+ %res3 = fmul <2 x float> undef, undef
+ %res4 = fmul <2 x double> undef, undef
+ %res5 = fmul <4 x float> undef, undef
+ %res6 = fmul <4 x double> undef, undef
+ %res7 = fmul <8 x float> undef, undef
+ %res8 = fmul <8 x double> undef, undef
+ %res9 = fmul <16 x float> undef, undef
+ %res10 = fmul <16 x double> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fmul float undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fmul double undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fmul fp128 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fmul <2 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fmul <2 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fmul <4 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fmul <4 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fmul <8 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fmul <8 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fmul <16 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fmul <16 x double> undef, undef
+
+ ret void;
+}
+
+define void @fdiv() {
+ %res0 = fdiv float undef, undef
+ %res1 = fdiv double undef, undef
+ %res2 = fdiv fp128 undef, undef
+ %res3 = fdiv <2 x float> undef, undef
+ %res4 = fdiv <2 x double> undef, undef
+ %res5 = fdiv <4 x float> undef, undef
+ %res6 = fdiv <4 x double> undef, undef
+ %res7 = fdiv <8 x float> undef, undef
+ %res8 = fdiv <8 x double> undef, undef
+ %res9 = fdiv <16 x float> undef, undef
+ %res10 = fdiv <16 x double> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = fdiv float undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = fdiv double undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = fdiv fp128 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res3 = fdiv <2 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = fdiv <2 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res5 = fdiv <4 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res6 = fdiv <4 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %res7 = fdiv <8 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res8 = fdiv <8 x double> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %res9 = fdiv <16 x float> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res10 = fdiv <16 x double> undef, undef
+
+ ret void;
+}
+
diff --git a/test/Analysis/CostModel/SystemZ/fp-cast.ll b/test/Analysis/CostModel/SystemZ/fp-cast.ll
new file mode 100644
index 000000000000..4ea5a5033d73
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/fp-cast.ll
@@ -0,0 +1,541 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; Note: The scalarized vector instructions costs are not including any
+; extracts, due to the undef operands.
+
+define void @fpext() {
+ %v0 = fpext double undef to fp128
+ %v1 = fpext float undef to fp128
+ %v2 = fpext float undef to double
+ %v3 = fpext <2 x double> undef to <2 x fp128>
+ %v4 = fpext <2 x float> undef to <2 x fp128>
+ %v5 = fpext <2 x float> undef to <2 x double>
+ %v6 = fpext <4 x double> undef to <4 x fp128>
+ %v7 = fpext <4 x float> undef to <4 x fp128>
+ %v8 = fpext <4 x float> undef to <4 x double>
+ %v9 = fpext <8 x double> undef to <8 x fp128>
+ %v10 = fpext <8 x float> undef to <8 x fp128>
+ %v11 = fpext <8 x float> undef to <8 x double>
+ %v12 = fpext <16 x float> undef to <16 x double>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = fpext double undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = fpext float undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = fpext float undef to double
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v3 = fpext <2 x double> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v4 = fpext <2 x float> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v5 = fpext <2 x float> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v6 = fpext <4 x double> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v7 = fpext <4 x float> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v8 = fpext <4 x float> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v9 = fpext <8 x double> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v10 = fpext <8 x float> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v11 = fpext <8 x float> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v12 = fpext <16 x float> undef to <16 x double>
+
+ ret void;
+}
+
+define void @fptosi() {
+ %v0 = fptosi fp128 undef to i64
+ %v1 = fptosi fp128 undef to i32
+ %v2 = fptosi fp128 undef to i16
+ %v3 = fptosi fp128 undef to i8
+ %v4 = fptosi double undef to i64
+ %v5 = fptosi double undef to i32
+ %v6 = fptosi double undef to i16
+ %v7 = fptosi double undef to i8
+ %v8 = fptosi float undef to i64
+ %v9 = fptosi float undef to i32
+ %v10 = fptosi float undef to i16
+ %v11 = fptosi float undef to i8
+ %v12 = fptosi <2 x fp128> undef to <2 x i64>
+ %v13 = fptosi <2 x fp128> undef to <2 x i32>
+ %v14 = fptosi <2 x fp128> undef to <2 x i16>
+ %v15 = fptosi <2 x fp128> undef to <2 x i8>
+ %v16 = fptosi <2 x double> undef to <2 x i64>
+ %v17 = fptosi <2 x double> undef to <2 x i32>
+ %v18 = fptosi <2 x double> undef to <2 x i16>
+ %v19 = fptosi <2 x double> undef to <2 x i8>
+ %v20 = fptosi <2 x float> undef to <2 x i64>
+ %v21 = fptosi <2 x float> undef to <2 x i32>
+ %v22 = fptosi <2 x float> undef to <2 x i16>
+ %v23 = fptosi <2 x float> undef to <2 x i8>
+ %v24 = fptosi <4 x fp128> undef to <4 x i64>
+ %v25 = fptosi <4 x fp128> undef to <4 x i32>
+ %v26 = fptosi <4 x fp128> undef to <4 x i16>
+ %v27 = fptosi <4 x fp128> undef to <4 x i8>
+ %v28 = fptosi <4 x double> undef to <4 x i64>
+ %v29 = fptosi <4 x double> undef to <4 x i32>
+ %v30 = fptosi <4 x double> undef to <4 x i16>
+ %v31 = fptosi <4 x double> undef to <4 x i8>
+ %v32 = fptosi <4 x float> undef to <4 x i64>
+ %v33 = fptosi <4 x float> undef to <4 x i32>
+ %v34 = fptosi <4 x float> undef to <4 x i16>
+ %v35 = fptosi <4 x float> undef to <4 x i8>
+ %v36 = fptosi <8 x fp128> undef to <8 x i64>
+ %v37 = fptosi <8 x fp128> undef to <8 x i32>
+ %v38 = fptosi <8 x fp128> undef to <8 x i16>
+ %v39 = fptosi <8 x fp128> undef to <8 x i8>
+ %v40 = fptosi <8 x double> undef to <8 x i64>
+ %v41 = fptosi <8 x double> undef to <8 x i32>
+ %v42 = fptosi <8 x double> undef to <8 x i16>
+ %v43 = fptosi <8 x double> undef to <8 x i8>
+ %v44 = fptosi <8 x float> undef to <8 x i64>
+ %v45 = fptosi <8 x float> undef to <8 x i32>
+ %v46 = fptosi <8 x float> undef to <8 x i16>
+ %v47 = fptosi <8 x float> undef to <8 x i8>
+ %v48 = fptosi <16 x double> undef to <16 x i64>
+ %v49 = fptosi <16 x double> undef to <16 x i32>
+ %v50 = fptosi <16 x double> undef to <16 x i16>
+ %v51 = fptosi <16 x double> undef to <16 x i8>
+ %v52 = fptosi <16 x float> undef to <16 x i64>
+ %v53 = fptosi <16 x float> undef to <16 x i32>
+ %v54 = fptosi <16 x float> undef to <16 x i16>
+ %v55 = fptosi <16 x float> undef to <16 x i8>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = fptosi fp128 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = fptosi fp128 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = fptosi fp128 undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v3 = fptosi fp128 undef to i8
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v4 = fptosi double undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v5 = fptosi double undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v6 = fptosi double undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v7 = fptosi double undef to i8
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v8 = fptosi float undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v9 = fptosi float undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v10 = fptosi float undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v11 = fptosi float undef to i8
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v12 = fptosi <2 x fp128> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v13 = fptosi <2 x fp128> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v14 = fptosi <2 x fp128> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v15 = fptosi <2 x fp128> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v16 = fptosi <2 x double> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v17 = fptosi <2 x double> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v18 = fptosi <2 x double> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v19 = fptosi <2 x double> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v20 = fptosi <2 x float> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 14 for instruction: %v21 = fptosi <2 x float> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v22 = fptosi <2 x float> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v23 = fptosi <2 x float> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v24 = fptosi <4 x fp128> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v25 = fptosi <4 x fp128> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v26 = fptosi <4 x fp128> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v27 = fptosi <4 x fp128> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v28 = fptosi <4 x double> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v29 = fptosi <4 x double> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v30 = fptosi <4 x double> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v31 = fptosi <4 x double> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 11 for instruction: %v32 = fptosi <4 x float> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v33 = fptosi <4 x float> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v34 = fptosi <4 x float> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v35 = fptosi <4 x float> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v36 = fptosi <8 x fp128> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v37 = fptosi <8 x fp128> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v38 = fptosi <8 x fp128> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v39 = fptosi <8 x fp128> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v40 = fptosi <8 x double> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v41 = fptosi <8 x double> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v42 = fptosi <8 x double> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v43 = fptosi <8 x double> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 21 for instruction: %v44 = fptosi <8 x float> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v45 = fptosi <8 x float> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v46 = fptosi <8 x float> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v47 = fptosi <8 x float> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v48 = fptosi <16 x double> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v49 = fptosi <16 x double> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v50 = fptosi <16 x double> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v51 = fptosi <16 x double> undef to <16 x i8>
+; CHECK: Cost Model: Found an estimated cost of 41 for instruction: %v52 = fptosi <16 x float> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v53 = fptosi <16 x float> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v54 = fptosi <16 x float> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v55 = fptosi <16 x float> undef to <16 x i8>
+
+ ret void;
+}
+
+
+define void @fptoui() {
+ %v0 = fptoui fp128 undef to i64
+ %v1 = fptoui fp128 undef to i32
+ %v2 = fptoui fp128 undef to i16
+ %v3 = fptoui fp128 undef to i8
+ %v4 = fptoui double undef to i64
+ %v5 = fptoui double undef to i32
+ %v6 = fptoui double undef to i16
+ %v7 = fptoui double undef to i8
+ %v8 = fptoui float undef to i64
+ %v9 = fptoui float undef to i32
+ %v10 = fptoui float undef to i16
+ %v11 = fptoui float undef to i8
+ %v12 = fptoui <2 x fp128> undef to <2 x i64>
+ %v13 = fptoui <2 x fp128> undef to <2 x i32>
+ %v14 = fptoui <2 x fp128> undef to <2 x i16>
+ %v15 = fptoui <2 x fp128> undef to <2 x i8>
+ %v16 = fptoui <2 x double> undef to <2 x i64>
+ %v17 = fptoui <2 x double> undef to <2 x i32>
+ %v18 = fptoui <2 x double> undef to <2 x i16>
+ %v19 = fptoui <2 x double> undef to <2 x i8>
+ %v20 = fptoui <2 x float> undef to <2 x i64>
+ %v21 = fptoui <2 x float> undef to <2 x i32>
+ %v22 = fptoui <2 x float> undef to <2 x i16>
+ %v23 = fptoui <2 x float> undef to <2 x i8>
+ %v24 = fptoui <4 x fp128> undef to <4 x i64>
+ %v25 = fptoui <4 x fp128> undef to <4 x i32>
+ %v26 = fptoui <4 x fp128> undef to <4 x i16>
+ %v27 = fptoui <4 x fp128> undef to <4 x i8>
+ %v28 = fptoui <4 x double> undef to <4 x i64>
+ %v29 = fptoui <4 x double> undef to <4 x i32>
+ %v30 = fptoui <4 x double> undef to <4 x i16>
+ %v31 = fptoui <4 x double> undef to <4 x i8>
+ %v32 = fptoui <4 x float> undef to <4 x i64>
+ %v33 = fptoui <4 x float> undef to <4 x i32>
+ %v34 = fptoui <4 x float> undef to <4 x i16>
+ %v35 = fptoui <4 x float> undef to <4 x i8>
+ %v36 = fptoui <8 x fp128> undef to <8 x i64>
+ %v37 = fptoui <8 x fp128> undef to <8 x i32>
+ %v38 = fptoui <8 x fp128> undef to <8 x i16>
+ %v39 = fptoui <8 x fp128> undef to <8 x i8>
+ %v40 = fptoui <8 x double> undef to <8 x i64>
+ %v41 = fptoui <8 x double> undef to <8 x i32>
+ %v42 = fptoui <8 x double> undef to <8 x i16>
+ %v43 = fptoui <8 x double> undef to <8 x i8>
+ %v44 = fptoui <8 x float> undef to <8 x i64>
+ %v45 = fptoui <8 x float> undef to <8 x i32>
+ %v46 = fptoui <8 x float> undef to <8 x i16>
+ %v47 = fptoui <8 x float> undef to <8 x i8>
+ %v48 = fptoui <16 x double> undef to <16 x i64>
+ %v49 = fptoui <16 x double> undef to <16 x i32>
+ %v50 = fptoui <16 x double> undef to <16 x i16>
+ %v51 = fptoui <16 x double> undef to <16 x i8>
+ %v52 = fptoui <16 x float> undef to <16 x i64>
+ %v53 = fptoui <16 x float> undef to <16 x i32>
+ %v54 = fptoui <16 x float> undef to <16 x i16>
+ %v55 = fptoui <16 x float> undef to <16 x i8>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = fptoui fp128 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = fptoui fp128 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = fptoui fp128 undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v3 = fptoui fp128 undef to i8
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v4 = fptoui double undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v5 = fptoui double undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v6 = fptoui double undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v7 = fptoui double undef to i8
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v8 = fptoui float undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v9 = fptoui float undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v10 = fptoui float undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v11 = fptoui float undef to i8
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v12 = fptoui <2 x fp128> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v13 = fptoui <2 x fp128> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v14 = fptoui <2 x fp128> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v15 = fptoui <2 x fp128> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v16 = fptoui <2 x double> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v17 = fptoui <2 x double> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v18 = fptoui <2 x double> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v19 = fptoui <2 x double> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v20 = fptoui <2 x float> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 14 for instruction: %v21 = fptoui <2 x float> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v22 = fptoui <2 x float> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v23 = fptoui <2 x float> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v24 = fptoui <4 x fp128> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v25 = fptoui <4 x fp128> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v26 = fptoui <4 x fp128> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v27 = fptoui <4 x fp128> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v28 = fptoui <4 x double> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v29 = fptoui <4 x double> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v30 = fptoui <4 x double> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v31 = fptoui <4 x double> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 11 for instruction: %v32 = fptoui <4 x float> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v33 = fptoui <4 x float> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v34 = fptoui <4 x float> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 13 for instruction: %v35 = fptoui <4 x float> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v36 = fptoui <8 x fp128> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v37 = fptoui <8 x fp128> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v38 = fptoui <8 x fp128> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v39 = fptoui <8 x fp128> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v40 = fptoui <8 x double> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v41 = fptoui <8 x double> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v42 = fptoui <8 x double> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v43 = fptoui <8 x double> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 21 for instruction: %v44 = fptoui <8 x float> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v45 = fptoui <8 x float> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v46 = fptoui <8 x float> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 25 for instruction: %v47 = fptoui <8 x float> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v48 = fptoui <16 x double> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v49 = fptoui <16 x double> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v50 = fptoui <16 x double> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v51 = fptoui <16 x double> undef to <16 x i8>
+; CHECK: Cost Model: Found an estimated cost of 41 for instruction: %v52 = fptoui <16 x float> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v53 = fptoui <16 x float> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v54 = fptoui <16 x float> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 49 for instruction: %v55 = fptoui <16 x float> undef to <16 x i8>
+
+ ret void;
+}
+
+define void @fptrunc() {
+ %v0 = fptrunc fp128 undef to double
+ %v1 = fptrunc fp128 undef to float
+ %v2 = fptrunc double undef to float
+ %v3 = fptrunc <2 x fp128> undef to <2 x double>
+ %v4 = fptrunc <2 x fp128> undef to <2 x float>
+ %v5 = fptrunc <2 x double> undef to <2 x float>
+ %v6 = fptrunc <4 x fp128> undef to <4 x double>
+ %v7 = fptrunc <4 x fp128> undef to <4 x float>
+ %v8 = fptrunc <4 x double> undef to <4 x float>
+ %v9 = fptrunc <8 x fp128> undef to <8 x double>
+ %v10 = fptrunc <8 x fp128> undef to <8 x float>
+ %v11 = fptrunc <8 x double> undef to <8 x float>
+ %v12 = fptrunc <16 x double> undef to <16 x float>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = fptrunc fp128 undef to double
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = fptrunc fp128 undef to float
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = fptrunc double undef to float
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v3 = fptrunc <2 x fp128> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v4 = fptrunc <2 x fp128> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v5 = fptrunc <2 x double> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v6 = fptrunc <4 x fp128> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v7 = fptrunc <4 x fp128> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v8 = fptrunc <4 x double> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v9 = fptrunc <8 x fp128> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v10 = fptrunc <8 x fp128> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v11 = fptrunc <8 x double> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v12 = fptrunc <16 x double> undef to <16 x float>
+
+ ret void;
+}
+
+define void @sitofp() {
+ %v0 = sitofp i64 undef to fp128
+ %v1 = sitofp i64 undef to double
+ %v2 = sitofp i64 undef to float
+ %v3 = sitofp i32 undef to fp128
+ %v4 = sitofp i32 undef to double
+ %v5 = sitofp i32 undef to float
+ %v6 = sitofp i16 undef to fp128
+ %v7 = sitofp i16 undef to double
+ %v8 = sitofp i16 undef to float
+ %v9 = sitofp i8 undef to fp128
+ %v10 = sitofp i8 undef to double
+ %v11 = sitofp i8 undef to float
+ %v12 = sitofp <2 x i64> undef to <2 x fp128>
+ %v13 = sitofp <2 x i64> undef to <2 x double>
+ %v14 = sitofp <2 x i64> undef to <2 x float>
+ %v15 = sitofp <2 x i32> undef to <2 x fp128>
+ %v16 = sitofp <2 x i32> undef to <2 x double>
+ %v17 = sitofp <2 x i32> undef to <2 x float>
+ %v18 = sitofp <2 x i16> undef to <2 x fp128>
+ %v19 = sitofp <2 x i16> undef to <2 x double>
+ %v20 = sitofp <2 x i16> undef to <2 x float>
+ %v21 = sitofp <2 x i8> undef to <2 x fp128>
+ %v22 = sitofp <2 x i8> undef to <2 x double>
+ %v23 = sitofp <2 x i8> undef to <2 x float>
+ %v24 = sitofp <4 x i64> undef to <4 x fp128>
+ %v25 = sitofp <4 x i64> undef to <4 x double>
+ %v26 = sitofp <4 x i64> undef to <4 x float>
+ %v27 = sitofp <4 x i32> undef to <4 x fp128>
+ %v28 = sitofp <4 x i32> undef to <4 x double>
+ %v29 = sitofp <4 x i32> undef to <4 x float>
+ %v30 = sitofp <4 x i16> undef to <4 x fp128>
+ %v31 = sitofp <4 x i16> undef to <4 x double>
+ %v32 = sitofp <4 x i16> undef to <4 x float>
+ %v33 = sitofp <4 x i8> undef to <4 x fp128>
+ %v34 = sitofp <4 x i8> undef to <4 x double>
+ %v35 = sitofp <4 x i8> undef to <4 x float>
+ %v36 = sitofp <8 x i64> undef to <8 x fp128>
+ %v37 = sitofp <8 x i64> undef to <8 x double>
+ %v38 = sitofp <8 x i64> undef to <8 x float>
+ %v39 = sitofp <8 x i32> undef to <8 x fp128>
+ %v40 = sitofp <8 x i32> undef to <8 x double>
+ %v41 = sitofp <8 x i32> undef to <8 x float>
+ %v42 = sitofp <8 x i16> undef to <8 x fp128>
+ %v43 = sitofp <8 x i16> undef to <8 x double>
+ %v44 = sitofp <8 x i16> undef to <8 x float>
+ %v45 = sitofp <8 x i8> undef to <8 x fp128>
+ %v46 = sitofp <8 x i8> undef to <8 x double>
+ %v47 = sitofp <8 x i8> undef to <8 x float>
+ %v48 = sitofp <16 x i64> undef to <16 x double>
+ %v49 = sitofp <16 x i64> undef to <16 x float>
+ %v50 = sitofp <16 x i32> undef to <16 x double>
+ %v51 = sitofp <16 x i32> undef to <16 x float>
+ %v52 = sitofp <16 x i16> undef to <16 x double>
+ %v53 = sitofp <16 x i16> undef to <16 x float>
+ %v54 = sitofp <16 x i8> undef to <16 x double>
+ %v55 = sitofp <16 x i8> undef to <16 x float>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = sitofp i64 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = sitofp i64 undef to double
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = sitofp i64 undef to float
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v3 = sitofp i32 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v4 = sitofp i32 undef to double
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v5 = sitofp i32 undef to float
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v6 = sitofp i16 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v7 = sitofp i16 undef to double
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v8 = sitofp i16 undef to float
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v9 = sitofp i8 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v10 = sitofp i8 undef to double
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v11 = sitofp i8 undef to float
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v12 = sitofp <2 x i64> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v13 = sitofp <2 x i64> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v14 = sitofp <2 x i64> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v15 = sitofp <2 x i32> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v16 = sitofp <2 x i32> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v17 = sitofp <2 x i32> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v18 = sitofp <2 x i16> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v19 = sitofp <2 x i16> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v20 = sitofp <2 x i16> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v21 = sitofp <2 x i8> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v22 = sitofp <2 x i8> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v23 = sitofp <2 x i8> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v24 = sitofp <4 x i64> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v25 = sitofp <4 x i64> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v26 = sitofp <4 x i64> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v27 = sitofp <4 x i32> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v28 = sitofp <4 x i32> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v29 = sitofp <4 x i32> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v30 = sitofp <4 x i16> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v31 = sitofp <4 x i16> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v32 = sitofp <4 x i16> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v33 = sitofp <4 x i8> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v34 = sitofp <4 x i8> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v35 = sitofp <4 x i8> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v36 = sitofp <8 x i64> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v37 = sitofp <8 x i64> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v38 = sitofp <8 x i64> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v39 = sitofp <8 x i32> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v40 = sitofp <8 x i32> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v41 = sitofp <8 x i32> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v42 = sitofp <8 x i16> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v43 = sitofp <8 x i16> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v44 = sitofp <8 x i16> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v45 = sitofp <8 x i8> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v46 = sitofp <8 x i8> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v47 = sitofp <8 x i8> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v48 = sitofp <16 x i64> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %v49 = sitofp <16 x i64> undef to <16 x float>
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %v50 = sitofp <16 x i32> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %v51 = sitofp <16 x i32> undef to <16 x float>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v52 = sitofp <16 x i16> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v53 = sitofp <16 x i16> undef to <16 x float>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v54 = sitofp <16 x i8> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v55 = sitofp <16 x i8> undef to <16 x float>
+
+ ret void;
+}
+
+define void @uitofp() {
+ %v0 = uitofp i64 undef to fp128
+ %v1 = uitofp i64 undef to double
+ %v2 = uitofp i64 undef to float
+ %v3 = uitofp i32 undef to fp128
+ %v4 = uitofp i32 undef to double
+ %v5 = uitofp i32 undef to float
+ %v6 = uitofp i16 undef to fp128
+ %v7 = uitofp i16 undef to double
+ %v8 = uitofp i16 undef to float
+ %v9 = uitofp i8 undef to fp128
+ %v10 = uitofp i8 undef to double
+ %v11 = uitofp i8 undef to float
+ %v12 = uitofp <2 x i64> undef to <2 x fp128>
+ %v13 = uitofp <2 x i64> undef to <2 x double>
+ %v14 = uitofp <2 x i64> undef to <2 x float>
+ %v15 = uitofp <2 x i32> undef to <2 x fp128>
+ %v16 = uitofp <2 x i32> undef to <2 x double>
+ %v17 = uitofp <2 x i32> undef to <2 x float>
+ %v18 = uitofp <2 x i16> undef to <2 x fp128>
+ %v19 = uitofp <2 x i16> undef to <2 x double>
+ %v20 = uitofp <2 x i16> undef to <2 x float>
+ %v21 = uitofp <2 x i8> undef to <2 x fp128>
+ %v22 = uitofp <2 x i8> undef to <2 x double>
+ %v23 = uitofp <2 x i8> undef to <2 x float>
+ %v24 = uitofp <4 x i64> undef to <4 x fp128>
+ %v25 = uitofp <4 x i64> undef to <4 x double>
+ %v26 = uitofp <4 x i64> undef to <4 x float>
+ %v27 = uitofp <4 x i32> undef to <4 x fp128>
+ %v28 = uitofp <4 x i32> undef to <4 x double>
+ %v29 = uitofp <4 x i32> undef to <4 x float>
+ %v30 = uitofp <4 x i16> undef to <4 x fp128>
+ %v31 = uitofp <4 x i16> undef to <4 x double>
+ %v32 = uitofp <4 x i16> undef to <4 x float>
+ %v33 = uitofp <4 x i8> undef to <4 x fp128>
+ %v34 = uitofp <4 x i8> undef to <4 x double>
+ %v35 = uitofp <4 x i8> undef to <4 x float>
+ %v36 = uitofp <8 x i64> undef to <8 x fp128>
+ %v37 = uitofp <8 x i64> undef to <8 x double>
+ %v38 = uitofp <8 x i64> undef to <8 x float>
+ %v39 = uitofp <8 x i32> undef to <8 x fp128>
+ %v40 = uitofp <8 x i32> undef to <8 x double>
+ %v41 = uitofp <8 x i32> undef to <8 x float>
+ %v42 = uitofp <8 x i16> undef to <8 x fp128>
+ %v43 = uitofp <8 x i16> undef to <8 x double>
+ %v44 = uitofp <8 x i16> undef to <8 x float>
+ %v45 = uitofp <8 x i8> undef to <8 x fp128>
+ %v46 = uitofp <8 x i8> undef to <8 x double>
+ %v47 = uitofp <8 x i8> undef to <8 x float>
+ %v48 = uitofp <16 x i64> undef to <16 x double>
+ %v49 = uitofp <16 x i64> undef to <16 x float>
+ %v50 = uitofp <16 x i32> undef to <16 x double>
+ %v51 = uitofp <16 x i32> undef to <16 x float>
+ %v52 = uitofp <16 x i16> undef to <16 x double>
+ %v53 = uitofp <16 x i16> undef to <16 x float>
+ %v54 = uitofp <16 x i8> undef to <16 x double>
+ %v55 = uitofp <16 x i8> undef to <16 x float>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = uitofp i64 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = uitofp i64 undef to double
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = uitofp i64 undef to float
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v3 = uitofp i32 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v4 = uitofp i32 undef to double
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v5 = uitofp i32 undef to float
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v6 = uitofp i16 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v7 = uitofp i16 undef to double
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v8 = uitofp i16 undef to float
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v9 = uitofp i8 undef to fp128
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v10 = uitofp i8 undef to double
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v11 = uitofp i8 undef to float
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v12 = uitofp <2 x i64> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v13 = uitofp <2 x i64> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v14 = uitofp <2 x i64> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v15 = uitofp <2 x i32> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v16 = uitofp <2 x i32> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v17 = uitofp <2 x i32> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v18 = uitofp <2 x i16> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v19 = uitofp <2 x i16> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v20 = uitofp <2 x i16> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v21 = uitofp <2 x i8> undef to <2 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v22 = uitofp <2 x i8> undef to <2 x double>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v23 = uitofp <2 x i8> undef to <2 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v24 = uitofp <4 x i64> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v25 = uitofp <4 x i64> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v26 = uitofp <4 x i64> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v27 = uitofp <4 x i32> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v28 = uitofp <4 x i32> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v29 = uitofp <4 x i32> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v30 = uitofp <4 x i16> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v31 = uitofp <4 x i16> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v32 = uitofp <4 x i16> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v33 = uitofp <4 x i8> undef to <4 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v34 = uitofp <4 x i8> undef to <4 x double>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v35 = uitofp <4 x i8> undef to <4 x float>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v36 = uitofp <8 x i64> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v37 = uitofp <8 x i64> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v38 = uitofp <8 x i64> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 16 for instruction: %v39 = uitofp <8 x i32> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v40 = uitofp <8 x i32> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v41 = uitofp <8 x i32> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v42 = uitofp <8 x i16> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v43 = uitofp <8 x i16> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v44 = uitofp <8 x i16> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %v45 = uitofp <8 x i8> undef to <8 x fp128>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v46 = uitofp <8 x i8> undef to <8 x double>
+; CHECK: Cost Model: Found an estimated cost of 32 for instruction: %v47 = uitofp <8 x i8> undef to <8 x float>
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %v48 = uitofp <16 x i64> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %v49 = uitofp <16 x i64> undef to <16 x float>
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %v50 = uitofp <16 x i32> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %v51 = uitofp <16 x i32> undef to <16 x float>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v52 = uitofp <16 x i16> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v53 = uitofp <16 x i16> undef to <16 x float>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v54 = uitofp <16 x i8> undef to <16 x double>
+; CHECK: Cost Model: Found an estimated cost of 64 for instruction: %v55 = uitofp <16 x i8> undef to <16 x float>
+
+ ret void;
+}
diff --git a/test/Analysis/CostModel/SystemZ/int-arith.ll b/test/Analysis/CostModel/SystemZ/int-arith.ll
new file mode 100644
index 000000000000..518c9b01e4e1
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/int-arith.ll
@@ -0,0 +1,326 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; Note: The scalarized vector instructions costs are not including any
+; extracts, due to the undef operands.
+
+define void @add() {
+ %res0 = add i8 undef, undef
+ %res1 = add i16 undef, undef
+ %res2 = add i32 undef, undef
+ %res3 = add i64 undef, undef
+ %res4 = add <2 x i8> undef, undef
+ %res5 = add <2 x i16> undef, undef
+ %res6 = add <2 x i32> undef, undef
+ %res7 = add <2 x i64> undef, undef
+ %res8 = add <4 x i8> undef, undef
+ %res9 = add <4 x i16> undef, undef
+ %res10 = add <4 x i32> undef, undef
+ %res11 = add <4 x i64> undef, undef
+ %res12 = add <8 x i8> undef, undef
+ %res13 = add <8 x i16> undef, undef
+ %res14 = add <8 x i32> undef, undef
+ %res15 = add <8 x i64> undef, undef
+ %res16 = add <16 x i8> undef, undef
+ %res17 = add <16 x i16> undef, undef
+ %res18 = add <16 x i32> undef, undef
+ %res19 = add <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = add i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = add i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = add i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = add i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = add <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = add <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = add <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = add <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = add <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = add <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = add <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = add <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = add <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = add <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = add <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = add <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = add <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = add <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = add <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = add <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @sub() {
+ %res0 = sub i8 undef, undef
+ %res1 = sub i16 undef, undef
+ %res2 = sub i32 undef, undef
+ %res3 = sub i64 undef, undef
+ %res4 = sub <2 x i8> undef, undef
+ %res5 = sub <2 x i16> undef, undef
+ %res6 = sub <2 x i32> undef, undef
+ %res7 = sub <2 x i64> undef, undef
+ %res8 = sub <4 x i8> undef, undef
+ %res9 = sub <4 x i16> undef, undef
+ %res10 = sub <4 x i32> undef, undef
+ %res11 = sub <4 x i64> undef, undef
+ %res12 = sub <8 x i8> undef, undef
+ %res13 = sub <8 x i16> undef, undef
+ %res14 = sub <8 x i32> undef, undef
+ %res15 = sub <8 x i64> undef, undef
+ %res16 = sub <16 x i8> undef, undef
+ %res17 = sub <16 x i16> undef, undef
+ %res18 = sub <16 x i32> undef, undef
+ %res19 = sub <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = sub i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = sub i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = sub i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = sub i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = sub <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = sub <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = sub <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = sub <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = sub <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = sub <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = sub <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = sub <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = sub <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = sub <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = sub <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = sub <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = sub <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = sub <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = sub <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = sub <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @mul() {
+ %res0 = mul i8 undef, undef
+ %res1 = mul i16 undef, undef
+ %res2 = mul i32 undef, undef
+ %res3 = mul i64 undef, undef
+ %res4 = mul <2 x i8> undef, undef
+ %res5 = mul <2 x i16> undef, undef
+ %res6 = mul <2 x i32> undef, undef
+ %res7 = mul <2 x i64> undef, undef
+ %res8 = mul <4 x i8> undef, undef
+ %res9 = mul <4 x i16> undef, undef
+ %res10 = mul <4 x i32> undef, undef
+ %res11 = mul <4 x i64> undef, undef
+ %res12 = mul <8 x i8> undef, undef
+ %res13 = mul <8 x i16> undef, undef
+ %res14 = mul <8 x i32> undef, undef
+ %res15 = mul <8 x i64> undef, undef
+ %res16 = mul <16 x i8> undef, undef
+ %res17 = mul <16 x i16> undef, undef
+ %res18 = mul <16 x i32> undef, undef
+ %res19 = mul <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = mul i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = mul i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = mul i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = mul i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = mul <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = mul <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = mul <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %res7 = mul <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = mul <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = mul <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = mul <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res11 = mul <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = mul <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = mul <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = mul <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res15 = mul <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = mul <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = mul <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = mul <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res19 = mul <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @sdiv() {
+ %res0 = sdiv i8 undef, undef
+ %res1 = sdiv i16 undef, undef
+ %res2 = sdiv i32 undef, undef
+ %res3 = sdiv i64 undef, undef
+ %res4 = sdiv <2 x i8> undef, undef
+ %res5 = sdiv <2 x i16> undef, undef
+ %res6 = sdiv <2 x i32> undef, undef
+ %res7 = sdiv <2 x i64> undef, undef
+ %res8 = sdiv <4 x i8> undef, undef
+ %res9 = sdiv <4 x i16> undef, undef
+ %res10 = sdiv <4 x i32> undef, undef
+ %res11 = sdiv <4 x i64> undef, undef
+ %res12 = sdiv <8 x i8> undef, undef
+ %res13 = sdiv <8 x i16> undef, undef
+ %res14 = sdiv <8 x i32> undef, undef
+ %res15 = sdiv <8 x i64> undef, undef
+ %res16 = sdiv <16 x i8> undef, undef
+ %res17 = sdiv <16 x i16> undef, undef
+ %res18 = sdiv <16 x i32> undef, undef
+ %res19 = sdiv <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = sdiv i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = sdiv i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = sdiv i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = sdiv i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = sdiv <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = sdiv <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = sdiv <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %res7 = sdiv <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = sdiv <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = sdiv <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = sdiv <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res11 = sdiv <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = sdiv <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = sdiv <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = sdiv <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res15 = sdiv <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = sdiv <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = sdiv <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = sdiv <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res19 = sdiv <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @srem() {
+ %res0 = srem i8 undef, undef
+ %res1 = srem i16 undef, undef
+ %res2 = srem i32 undef, undef
+ %res3 = srem i64 undef, undef
+ %res4 = srem <2 x i8> undef, undef
+ %res5 = srem <2 x i16> undef, undef
+ %res6 = srem <2 x i32> undef, undef
+ %res7 = srem <2 x i64> undef, undef
+ %res8 = srem <4 x i8> undef, undef
+ %res9 = srem <4 x i16> undef, undef
+ %res10 = srem <4 x i32> undef, undef
+ %res11 = srem <4 x i64> undef, undef
+ %res12 = srem <8 x i8> undef, undef
+ %res13 = srem <8 x i16> undef, undef
+ %res14 = srem <8 x i32> undef, undef
+ %res15 = srem <8 x i64> undef, undef
+ %res16 = srem <16 x i8> undef, undef
+ %res17 = srem <16 x i16> undef, undef
+ %res18 = srem <16 x i32> undef, undef
+ %res19 = srem <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = srem i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = srem i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = srem i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = srem i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = srem <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = srem <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = srem <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %res7 = srem <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = srem <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = srem <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = srem <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res11 = srem <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = srem <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = srem <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = srem <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res15 = srem <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = srem <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = srem <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = srem <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res19 = srem <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @udiv() {
+ %res0 = udiv i8 undef, undef
+ %res1 = udiv i16 undef, undef
+ %res2 = udiv i32 undef, undef
+ %res3 = udiv i64 undef, undef
+ %res4 = udiv <2 x i8> undef, undef
+ %res5 = udiv <2 x i16> undef, undef
+ %res6 = udiv <2 x i32> undef, undef
+ %res7 = udiv <2 x i64> undef, undef
+ %res8 = udiv <4 x i8> undef, undef
+ %res9 = udiv <4 x i16> undef, undef
+ %res10 = udiv <4 x i32> undef, undef
+ %res11 = udiv <4 x i64> undef, undef
+ %res12 = udiv <8 x i8> undef, undef
+ %res13 = udiv <8 x i16> undef, undef
+ %res14 = udiv <8 x i32> undef, undef
+ %res15 = udiv <8 x i64> undef, undef
+ %res16 = udiv <16 x i8> undef, undef
+ %res17 = udiv <16 x i16> undef, undef
+ %res18 = udiv <16 x i32> undef, undef
+ %res19 = udiv <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = udiv i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = udiv i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = udiv i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res3 = udiv i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = udiv <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = udiv <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = udiv <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %res7 = udiv <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = udiv <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = udiv <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = udiv <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res11 = udiv <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = udiv <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = udiv <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = udiv <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res15 = udiv <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = udiv <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = udiv <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = udiv <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res19 = udiv <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @urem() {
+ %res0 = urem i8 undef, undef
+ %res1 = urem i16 undef, undef
+ %res2 = urem i32 undef, undef
+ %res3 = urem i64 undef, undef
+ %res4 = urem <2 x i8> undef, undef
+ %res5 = urem <2 x i16> undef, undef
+ %res6 = urem <2 x i32> undef, undef
+ %res7 = urem <2 x i64> undef, undef
+ %res8 = urem <4 x i8> undef, undef
+ %res9 = urem <4 x i16> undef, undef
+ %res10 = urem <4 x i32> undef, undef
+ %res11 = urem <4 x i64> undef, undef
+ %res12 = urem <8 x i8> undef, undef
+ %res13 = urem <8 x i16> undef, undef
+ %res14 = urem <8 x i32> undef, undef
+ %res15 = urem <8 x i64> undef, undef
+ %res16 = urem <16 x i8> undef, undef
+ %res17 = urem <16 x i16> undef, undef
+ %res18 = urem <16 x i32> undef, undef
+ %res19 = urem <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = urem i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = urem i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = urem i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res3 = urem i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = urem <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = urem <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = urem <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %res7 = urem <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = urem <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = urem <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = urem <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res11 = urem <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = urem <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = urem <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = urem <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res15 = urem <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = urem <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = urem <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = urem <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res19 = urem <16 x i64> undef, undef
+
+ ret void;
+}
diff --git a/test/Analysis/CostModel/SystemZ/int-cast.ll b/test/Analysis/CostModel/SystemZ/int-cast.ll
new file mode 100644
index 000000000000..7764c6ff756f
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/int-cast.ll
@@ -0,0 +1,199 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+
+define void @sext() {
+ %v0 = sext i8 undef to i16
+ %v1 = sext i8 undef to i32
+ %v2 = sext i8 undef to i64
+ %v3 = sext i16 undef to i32
+ %v4 = sext i16 undef to i64
+ %v5 = sext i32 undef to i64
+ %v6 = sext <2 x i8> undef to <2 x i16>
+ %v7 = sext <2 x i8> undef to <2 x i32>
+ %v8 = sext <2 x i8> undef to <2 x i64>
+ %v9 = sext <2 x i16> undef to <2 x i32>
+ %v10 = sext <2 x i16> undef to <2 x i64>
+ %v11 = sext <2 x i32> undef to <2 x i64>
+ %v12 = sext <4 x i8> undef to <4 x i16>
+ %v13 = sext <4 x i8> undef to <4 x i32>
+ %v14 = sext <4 x i8> undef to <4 x i64>
+ %v15 = sext <4 x i16> undef to <4 x i32>
+ %v16 = sext <4 x i16> undef to <4 x i64>
+ %v17 = sext <4 x i32> undef to <4 x i64>
+ %v18 = sext <8 x i8> undef to <8 x i16>
+ %v19 = sext <8 x i8> undef to <8 x i32>
+ %v20 = sext <8 x i8> undef to <8 x i64>
+ %v21 = sext <8 x i16> undef to <8 x i32>
+ %v22 = sext <8 x i16> undef to <8 x i64>
+ %v23 = sext <8 x i32> undef to <8 x i64>
+ %v24 = sext <16 x i8> undef to <16 x i16>
+ %v25 = sext <16 x i8> undef to <16 x i32>
+ %v26 = sext <16 x i8> undef to <16 x i64>
+ %v27 = sext <16 x i16> undef to <16 x i32>
+ %v28 = sext <16 x i16> undef to <16 x i64>
+ %v29 = sext <16 x i32> undef to <16 x i64>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = sext i8 undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = sext i8 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = sext i8 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v3 = sext i16 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v4 = sext i16 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v5 = sext i32 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v6 = sext <2 x i8> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v7 = sext <2 x i8> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v8 = sext <2 x i8> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v9 = sext <2 x i16> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v10 = sext <2 x i16> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v11 = sext <2 x i32> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v12 = sext <4 x i8> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v13 = sext <4 x i8> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v14 = sext <4 x i8> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v15 = sext <4 x i16> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %v16 = sext <4 x i16> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v17 = sext <4 x i32> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v18 = sext <8 x i8> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %v19 = sext <8 x i8> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 15 for instruction: %v20 = sext <8 x i8> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v21 = sext <8 x i16> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 11 for instruction: %v22 = sext <8 x i16> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v23 = sext <8 x i32> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v24 = sext <16 x i8> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 11 for instruction: %v25 = sext <16 x i8> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 31 for instruction: %v26 = sext <16 x i8> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v27 = sext <16 x i16> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 22 for instruction: %v28 = sext <16 x i16> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v29 = sext <16 x i32> undef to <16 x i64>
+
+ ret void
+}
+
+define void @zext() {
+ %v0 = zext i8 undef to i16
+ %v1 = zext i8 undef to i32
+ %v2 = zext i8 undef to i64
+ %v3 = zext i16 undef to i32
+ %v4 = zext i16 undef to i64
+ %v5 = zext i32 undef to i64
+ %v6 = zext <2 x i8> undef to <2 x i16>
+ %v7 = zext <2 x i8> undef to <2 x i32>
+ %v8 = zext <2 x i8> undef to <2 x i64>
+ %v9 = zext <2 x i16> undef to <2 x i32>
+ %v10 = zext <2 x i16> undef to <2 x i64>
+ %v11 = zext <2 x i32> undef to <2 x i64>
+ %v12 = zext <4 x i8> undef to <4 x i16>
+ %v13 = zext <4 x i8> undef to <4 x i32>
+ %v14 = zext <4 x i8> undef to <4 x i64>
+ %v15 = zext <4 x i16> undef to <4 x i32>
+ %v16 = zext <4 x i16> undef to <4 x i64>
+ %v17 = zext <4 x i32> undef to <4 x i64>
+ %v18 = zext <8 x i8> undef to <8 x i16>
+ %v19 = zext <8 x i8> undef to <8 x i32>
+ %v20 = zext <8 x i8> undef to <8 x i64>
+ %v21 = zext <8 x i16> undef to <8 x i32>
+ %v22 = zext <8 x i16> undef to <8 x i64>
+ %v23 = zext <8 x i32> undef to <8 x i64>
+ %v24 = zext <16 x i8> undef to <16 x i16>
+ %v25 = zext <16 x i8> undef to <16 x i32>
+ %v26 = zext <16 x i8> undef to <16 x i64>
+ %v27 = zext <16 x i16> undef to <16 x i32>
+ %v28 = zext <16 x i16> undef to <16 x i64>
+ %v29 = zext <16 x i32> undef to <16 x i64>
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v0 = zext i8 undef to i16
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v1 = zext i8 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v2 = zext i8 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v3 = zext i16 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v4 = zext i16 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v5 = zext i32 undef to i64
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v6 = zext <2 x i8> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v7 = zext <2 x i8> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v8 = zext <2 x i8> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v9 = zext <2 x i16> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v10 = zext <2 x i16> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v11 = zext <2 x i32> undef to <2 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v12 = zext <4 x i8> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v13 = zext <4 x i8> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v14 = zext <4 x i8> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v15 = zext <4 x i16> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %v16 = zext <4 x i16> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v17 = zext <4 x i32> undef to <4 x i64>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v18 = zext <8 x i8> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %v19 = zext <8 x i8> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 15 for instruction: %v20 = zext <8 x i8> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v21 = zext <8 x i16> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 11 for instruction: %v22 = zext <8 x i16> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v23 = zext <8 x i32> undef to <8 x i64>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v24 = zext <16 x i8> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 11 for instruction: %v25 = zext <16 x i8> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 31 for instruction: %v26 = zext <16 x i8> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v27 = zext <16 x i16> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 22 for instruction: %v28 = zext <16 x i16> undef to <16 x i64>
+; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %v29 = zext <16 x i32> undef to <16 x i64>
+
+ ret void
+}
+
+define void @trunc() {
+ %v0 = trunc i16 undef to i8
+ %v1 = trunc i32 undef to i16
+ %v2 = trunc i32 undef to i8
+ %v3 = trunc i64 undef to i32
+ %v4 = trunc i64 undef to i16
+ %v5 = trunc i64 undef to i8
+ %v6 = trunc <2 x i16> undef to <2 x i8>
+ %v7 = trunc <2 x i32> undef to <2 x i16>
+ %v8 = trunc <2 x i32> undef to <2 x i8>
+ %v9 = trunc <2 x i64> undef to <2 x i32>
+ %v10 = trunc <2 x i64> undef to <2 x i16>
+ %v11 = trunc <2 x i64> undef to <2 x i8>
+ %v12 = trunc <4 x i16> undef to <4 x i8>
+ %v13 = trunc <4 x i32> undef to <4 x i16>
+ %v14 = trunc <4 x i32> undef to <4 x i8>
+ %v15 = trunc <4 x i64> undef to <4 x i32>
+ %v16 = trunc <4 x i64> undef to <4 x i16>
+ %v17 = trunc <4 x i64> undef to <4 x i8>
+ %v18 = trunc <8 x i16> undef to <8 x i8>
+ %v19 = trunc <8 x i32> undef to <8 x i16>
+ %v20 = trunc <8 x i32> undef to <8 x i8>
+ %v21 = trunc <8 x i64> undef to <8 x i32>
+ %v22 = trunc <8 x i64> undef to <8 x i16>
+ %v23 = trunc <8 x i64> undef to <8 x i8>
+ %v24 = trunc <16 x i16> undef to <16 x i8>
+ %v25 = trunc <16 x i32> undef to <16 x i16>
+ %v26 = trunc <16 x i32> undef to <16 x i8>
+ %v27 = trunc <16 x i64> undef to <16 x i32>
+ %v28 = trunc <16 x i64> undef to <16 x i16>
+ %v29 = trunc <16 x i64> undef to <16 x i8>
+
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %v0 = trunc i16 undef to i8
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %v1 = trunc i32 undef to i16
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %v2 = trunc i32 undef to i8
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %v3 = trunc i64 undef to i32
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %v4 = trunc i64 undef to i16
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %v5 = trunc i64 undef to i8
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v6 = trunc <2 x i16> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v7 = trunc <2 x i32> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v8 = trunc <2 x i32> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v9 = trunc <2 x i64> undef to <2 x i32>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v10 = trunc <2 x i64> undef to <2 x i16>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v11 = trunc <2 x i64> undef to <2 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v12 = trunc <4 x i16> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v13 = trunc <4 x i32> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v14 = trunc <4 x i32> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v15 = trunc <4 x i64> undef to <4 x i32>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v16 = trunc <4 x i64> undef to <4 x i16>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v17 = trunc <4 x i64> undef to <4 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v18 = trunc <8 x i16> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v19 = trunc <8 x i32> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v20 = trunc <8 x i32> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v21 = trunc <8 x i64> undef to <8 x i32>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v22 = trunc <8 x i64> undef to <8 x i16>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v23 = trunc <8 x i64> undef to <8 x i8>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %v24 = trunc <16 x i16> undef to <16 x i8>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %v25 = trunc <16 x i32> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %v26 = trunc <16 x i32> undef to <16 x i8>
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %v27 = trunc <16 x i64> undef to <16 x i32>
+; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %v28 = trunc <16 x i64> undef to <16 x i16>
+; CHECK: Cost Model: Found an estimated cost of 7 for instruction: %v29 = trunc <16 x i64> undef to <16 x i8>
+
+ ret void
+}
diff --git a/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
new file mode 100644
index 000000000000..ff5b2a2053f0
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
@@ -0,0 +1,66 @@
+; RUN: opt < %s -O3 -S -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+;
+; Regression test for a crash in getIntrinsicInstrCost().
+; Don't call getScalarizationOverhead(RetTy, true, false) if RetTy is void type.
+
+%"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642" = type <{ %"class.llvm::FoldingSetImpl::Node.298.1750.9978.10462.10946.11430.11914.12398.12882.13366.13850.15302.15786.16270.17722.19174.21110.25950.26918.29338.29822.30306.30790.31274.31758.32242.32726.33694.36598.38050.41625", %"class.llvm::ilist_node.228.300.1752.9980.10464.10948.11432.11916.12400.12884.13368.13852.15304.15788.16272.17724.19176.21112.25952.26920.29340.29824.30308.30792.31276.31760.32244.32728.33696.36600.38052.41628", i16, %union.anon.230.302.1754.9982.10466.10950.11434.11918.12402.12886.13370.13854.15306.15790.16274.17726.19178.21114.25954.26922.29342.29826.30310.30794.31278.31762.32246.32730.33698.36602.38054.41630, i32, %"class.llvm::SDUse.304.1756.9984.10468.10952.11436.11920.12404.12888.13372.13856.15308.15792.16276.17728.19180.21116.25956.26924.29344.29828.30312.30796.31280.31764.32248.32732.33700.36604.38056.41632"*, %"struct.llvm::EVT.305.1757.9985.10469.10953.11437.11921.12405.12889.13373.13857.15309.15793.16277.17729.19181.21117.25957.26925.29345.29829.30313.30797.31281.31765.32249.32733.33701.36605.38057.41637"*, %"class.llvm::SDUse.304.1756.9984.10468.10952.11436.11920.12404.12888.13372.13856.15308.15792.16276.17728.19180.21116.25956.26924.29344.29828.30312.30796.31280.31764.32248.32732.33700.36604.38056.41632"*, i16, i16, i32, %"class.llvm::DebugLoc.309.1761.9989.10473.10957.11441.11925.12409.12893.13377.13861.15313.15797.16281.17733.19185.21121.25961.26929.29349.29833.30317.30801.31285.31769.32253.32737.33705.36609.38061.41641", i16, [6 x i8] }>
+%"class.llvm::FoldingSetImpl::Node.298.1750.9978.10462.10946.11430.11914.12398.12882.13366.13850.15302.15786.16270.17722.19174.21110.25950.26918.29338.29822.30306.30790.31274.31758.32242.32726.33694.36598.38050.41625" = type { i8* }
+%"class.llvm::ilist_node.228.300.1752.9980.10464.10948.11432.11916.12400.12884.13368.13852.15304.15788.16272.17724.19176.21112.25952.26920.29340.29824.30308.30792.31276.31760.32244.32728.33696.36600.38052.41628" = type { %"class.llvm::ilist_node_impl.229.299.1751.9979.10463.10947.11431.11915.12399.12883.13367.13851.15303.15787.16271.17723.19175.21111.25951.26919.29339.29823.30307.30791.31275.31759.32243.32727.33695.36599.38051.41627" }
+%"class.llvm::ilist_node_impl.229.299.1751.9979.10463.10947.11431.11915.12399.12883.13367.13851.15303.15787.16271.17723.19175.21111.25951.26919.29339.29823.30307.30791.31275.31759.32243.32727.33695.36599.38051.41627" = type { %"class.llvm::ilist_node_base.83.1535.9763.10247.10731.11215.11699.12183.12667.13151.13635.15087.15571.16055.17507.18959.20895.25735.26703.29123.29607.30091.30575.31059.31543.32027.32511.33479.36383.37835.41626" }
+%"class.llvm::ilist_node_base.83.1535.9763.10247.10731.11215.11699.12183.12667.13151.13635.15087.15571.16055.17507.18959.20895.25735.26703.29123.29607.30091.30575.31059.31543.32027.32511.33479.36383.37835.41626" = type { %"class.llvm::ilist_node_base.83.1535.9763.10247.10731.11215.11699.12183.12667.13151.13635.15087.15571.16055.17507.18959.20895.25735.26703.29123.29607.30091.30575.31059.31543.32027.32511.33479.36383.37835.41626"*, %"class.llvm::ilist_node_base.83.1535.9763.10247.10731.11215.11699.12183.12667.13151.13635.15087.15571.16055.17507.18959.20895.25735.26703.29123.29607.30091.30575.31059.31543.32027.32511.33479.36383.37835.41626"* }
+%union.anon.230.302.1754.9982.10466.10950.11434.11918.12402.12886.13370.13854.15306.15790.16274.17726.19178.21114.25954.26922.29342.29826.30310.30794.31278.31762.32246.32730.33698.36602.38054.41630 = type { %"class.llvm::SDNode::LSBaseSDNodeBitfields.301.1753.9981.10465.10949.11433.11917.12401.12885.13369.13853.15305.15789.16273.17725.19177.21113.25953.26921.29341.29825.30309.30793.31277.31761.32245.32729.33697.36601.38053.41629" }
+%"class.llvm::SDNode::LSBaseSDNodeBitfields.301.1753.9981.10465.10949.11433.11917.12401.12885.13369.13853.15305.15789.16273.17725.19177.21113.25953.26921.29341.29825.30309.30793.31277.31761.32245.32729.33697.36601.38053.41629" = type { i16 }
+%"struct.llvm::EVT.305.1757.9985.10469.10953.11437.11921.12405.12889.13373.13857.15309.15793.16277.17729.19181.21117.25957.26925.29345.29829.30313.30797.31281.31765.32249.32733.33701.36605.38057.41637" = type { %"class.llvm::MVT.62.1514.9742.10226.10710.11194.11678.12162.12646.13130.13614.15066.15550.16034.17486.18938.20874.25714.26682.29102.29586.30070.30554.31038.31522.32006.32490.33458.36362.37814.41633", %"class.llvm::Type.77.1529.9757.10241.10725.11209.11693.12177.12661.13145.13629.15081.15565.16049.17501.18953.20889.25729.26697.29117.29601.30085.30569.31053.31537.32021.32505.33473.36377.37829.41636"* }
+%"class.llvm::MVT.62.1514.9742.10226.10710.11194.11678.12162.12646.13130.13614.15066.15550.16034.17486.18938.20874.25714.26682.29102.29586.30070.30554.31038.31522.32006.32490.33458.36362.37814.41633" = type { i8 }
+%"class.llvm::Type.77.1529.9757.10241.10725.11209.11693.12177.12661.13145.13629.15081.15565.16049.17501.18953.20889.25729.26697.29117.29601.30085.30569.31053.31537.32021.32505.33473.36377.37829.41636" = type { %"class.llvm::LLVMContext.76.1528.9756.10240.10724.11208.11692.12176.12660.13144.13628.15080.15564.16048.17500.18952.20888.25728.26696.29116.29600.30084.30568.31052.31536.32020.32504.33472.36376.37828.41635"*, i32, i32, %"class.llvm::Type.77.1529.9757.10241.10725.11209.11693.12177.12661.13145.13629.15081.15565.16049.17501.18953.20889.25729.26697.29117.29601.30085.30569.31053.31537.32021.32505.33473.36377.37829.41636"** }
+%"class.llvm::LLVMContext.76.1528.9756.10240.10724.11208.11692.12176.12660.13144.13628.15080.15564.16048.17500.18952.20888.25728.26696.29116.29600.30084.30568.31052.31536.32020.32504.33472.36376.37828.41635" = type { %"class.llvm::LLVMContextImpl.75.1527.9755.10239.10723.11207.11691.12175.12659.13143.13627.15079.15563.16047.17499.18951.20887.25727.26695.29115.29599.30083.30567.31051.31535.32019.32503.33471.36375.37827.41634"* }
+%"class.llvm::LLVMContextImpl.75.1527.9755.10239.10723.11207.11691.12175.12659.13143.13627.15079.15563.16047.17499.18951.20887.25727.26695.29115.29599.30083.30567.31051.31535.32019.32503.33471.36375.37827.41634" = type opaque
+%"class.llvm::SDUse.304.1756.9984.10468.10952.11436.11920.12404.12888.13372.13856.15308.15792.16276.17728.19180.21116.25956.26924.29344.29828.30312.30796.31280.31764.32248.32732.33700.36604.38056.41632" = type { %"class.llvm::SDValue.303.1755.9983.10467.10951.11435.11919.12403.12887.13371.13855.15307.15791.16275.17727.19179.21115.25955.26923.29343.29827.30311.30795.31279.31763.32247.32731.33699.36603.38055.41631", %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642"*, %"class.llvm::SDUse.304.1756.9984.10468.10952.11436.11920.12404.12888.13372.13856.15308.15792.16276.17728.19180.21116.25956.26924.29344.29828.30312.30796.31280.31764.32248.32732.33700.36604.38056.41632"**, %"class.llvm::SDUse.304.1756.9984.10468.10952.11436.11920.12404.12888.13372.13856.15308.15792.16276.17728.19180.21116.25956.26924.29344.29828.30312.30796.31280.31764.32248.32732.33700.36604.38056.41632"* }
+%"class.llvm::SDValue.303.1755.9983.10467.10951.11435.11919.12403.12887.13371.13855.15307.15791.16275.17727.19179.21115.25955.26923.29343.29827.30311.30795.31279.31763.32247.32731.33699.36603.38055.41631" = type <{ %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642"*, i32, [4 x i8] }>
+%"class.llvm::DebugLoc.309.1761.9989.10473.10957.11441.11925.12409.12893.13377.13861.15313.15797.16281.17733.19185.21121.25961.26929.29349.29833.30317.30801.31285.31769.32253.32737.33705.36609.38061.41641" = type { %"class.llvm::TypedTrackingMDRef.308.1760.9988.10472.10956.11440.11924.12408.12892.13376.13860.15312.15796.16280.17732.19184.21120.25960.26928.29348.29832.30316.30800.31284.31768.32252.32736.33704.36608.38060.41640" }
+%"class.llvm::TypedTrackingMDRef.308.1760.9988.10472.10956.11440.11924.12408.12892.13376.13860.15312.15796.16280.17732.19184.21120.25960.26928.29348.29832.30316.30800.31284.31768.32252.32736.33704.36608.38060.41640" = type { %"class.llvm::TrackingMDRef.307.1759.9987.10471.10955.11439.11923.12407.12891.13375.13859.15311.15795.16279.17731.19183.21119.25959.26927.29347.29831.30315.30799.31283.31767.32251.32735.33703.36607.38059.41639" }
+%"class.llvm::TrackingMDRef.307.1759.9987.10471.10955.11439.11923.12407.12891.13375.13859.15311.15795.16279.17731.19183.21119.25959.26927.29347.29831.30315.30799.31283.31767.32251.32735.33703.36607.38059.41639" = type { %"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638"* }
+%"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 }
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+
+; Function Attrs: nounwind ssp uwtable
+define hidden void @fun(%"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642"* %N) #1 align 2 {
+; CHECK: *
+entry:
+ %NumOperands.i = getelementptr inbounds %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642", %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642"* %N, i64 0, i32 8
+ %0 = load i16, i16* %NumOperands.i, align 8, !tbaa !1
+ br i1 undef, label %for.cond.cleanup, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %wide.trip.count192 = zext i16 %0 to i64
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %indvars.iv190 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next191, %for.body ]
+ call void @llvm.lifetime.end(i64 16, i8* nonnull null)
+ %indvars.iv.next191 = add nuw nsw i64 %indvars.iv190, 1
+ %exitcond193 = icmp eq i64 %indvars.iv.next191, %wide.trip.count192
+ br i1 %exitcond193, label %for.cond.cleanup, label %for.body
+}
+
+attributes #0 = { argmemonly nounwind }
+attributes #1 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 5.0.0 (trunk 297799) (llvm/trunk 297808)"}
+!1 = !{!2, !3, i64 56}
+!2 = !{!"_ZTSN4llvm6SDNodeE", !3, i64 24, !4, i64 26, !6, i64 28, !7, i64 32, !7, i64 40, !7, i64 48, !3, i64 56, !3, i64 58, !6, i64 60, !8, i64 64, !3, i64 72}
+!3 = !{!"short", !4, i64 0}
+!4 = !{!"omnipotent char", !5, i64 0}
+!5 = !{!"Simple C++ TBAA"}
+!6 = !{!"int", !4, i64 0}
+!7 = !{!"any pointer", !4, i64 0}
+!8 = !{!"_ZTSN4llvm8DebugLocE", !9, i64 0}
+!9 = !{!"_ZTSN4llvm18TypedTrackingMDRefINS_6MDNodeEEE", !10, i64 0}
+!10 = !{!"_ZTSN4llvm13TrackingMDRefE", !7, i64 0}
diff --git a/test/Analysis/CostModel/SystemZ/lit.local.cfg b/test/Analysis/CostModel/SystemZ/lit.local.cfg
new file mode 100644
index 000000000000..2f3cf7d3f043
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'SystemZ' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Analysis/CostModel/SystemZ/load_store.ll b/test/Analysis/CostModel/SystemZ/load_store.ll
new file mode 100644
index 000000000000..1ac92292c829
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/load_store.ll
@@ -0,0 +1,137 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+
+define void @store() {
+ store i8 undef, i8* undef
+ store i16 undef, i16* undef
+ store i32 undef, i32* undef
+ store i64 undef, i64* undef
+ store float undef, float* undef
+ store double undef, double* undef
+ store fp128 undef, fp128* undef
+ store <2 x i8> undef, <2 x i8>* undef
+ store <2 x i16> undef, <2 x i16>* undef
+ store <2 x i32> undef, <2 x i32>* undef
+ store <2 x i64> undef, <2 x i64>* undef
+ store <2 x float> undef, <2 x float>* undef
+ store <2 x double> undef, <2 x double>* undef
+ store <4 x i8> undef, <4 x i8>* undef
+ store <4 x i16> undef, <4 x i16>* undef
+ store <4 x i32> undef, <4 x i32>* undef
+ store <4 x i64> undef, <4 x i64>* undef
+ store <4 x float> undef, <4 x float>* undef
+ store <4 x double> undef, <4 x double>* undef
+ store <8 x i8> undef, <8 x i8>* undef
+ store <8 x i16> undef, <8 x i16>* undef
+ store <8 x i32> undef, <8 x i32>* undef
+ store <8 x i64> undef, <8 x i64>* undef
+ store <8 x float> undef, <8 x float>* undef
+ store <8 x double> undef, <8 x double>* undef
+ store <16 x i8> undef, <16 x i8>* undef
+ store <16 x i16> undef, <16 x i16>* undef
+ store <16 x i32> undef, <16 x i32>* undef
+ store <16 x i64> undef, <16 x i64>* undef
+ store <16 x float> undef, <16 x float>* undef
+ store <16 x double> undef, <16 x double>* undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store i8 undef, i8* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store i16 undef, i16* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store i32 undef, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store i64 undef, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store float undef, float* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store double undef, double* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: store fp128 undef, fp128* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <2 x i8> undef, <2 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <2 x i16> undef, <2 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <2 x i32> undef, <2 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <2 x i64> undef, <2 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <2 x float> undef, <2 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <2 x double> undef, <2 x double>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <4 x i8> undef, <4 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <4 x i16> undef, <4 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <4 x i32> undef, <4 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: store <4 x i64> undef, <4 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <4 x float> undef, <4 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: store <4 x double> undef, <4 x double>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <8 x i8> undef, <8 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <8 x i16> undef, <8 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: store <8 x i32> undef, <8 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: store <8 x i64> undef, <8 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: store <8 x float> undef, <8 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: store <8 x double> undef, <8 x double>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: store <16 x i8> undef, <16 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: store <16 x i16> undef, <16 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: store <16 x i32> undef, <16 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: store <16 x i64> undef, <16 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: store <16 x float> undef, <16 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: store <16 x double> undef, <16 x double>* undef
+
+ ret void;
+}
+
+define void @load() {
+ load i8, i8* undef
+ load i16, i16* undef
+ load i32, i32* undef
+ load i64, i64* undef
+ load float, float* undef
+ load double, double* undef
+ load fp128, fp128* undef
+ load <2 x i8>, <2 x i8>* undef
+ load <2 x i16>, <2 x i16>* undef
+ load <2 x i32>, <2 x i32>* undef
+ load <2 x i64>, <2 x i64>* undef
+ load <2 x float>, <2 x float>* undef
+ load <2 x double>, <2 x double>* undef
+ load <4 x i8>, <4 x i8>* undef
+ load <4 x i16>, <4 x i16>* undef
+ load <4 x i32>, <4 x i32>* undef
+ load <4 x i64>, <4 x i64>* undef
+ load <4 x float>, <4 x float>* undef
+ load <4 x double>, <4 x double>* undef
+ load <8 x i8>, <8 x i8>* undef
+ load <8 x i16>, <8 x i16>* undef
+ load <8 x i32>, <8 x i32>* undef
+ load <8 x i64>, <8 x i64>* undef
+ load <8 x float>, <8 x float>* undef
+ load <8 x double>, <8 x double>* undef
+ load <16 x i8>, <16 x i8>* undef
+ load <16 x i16>, <16 x i16>* undef
+ load <16 x i32>, <16 x i32>* undef
+ load <16 x i64>, <16 x i64>* undef
+ load <16 x float>, <16 x float>* undef
+ load <16 x double>, <16 x double>* undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = load i8, i8* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = load i16, i16* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %5 = load float, float* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %6 = load double, double* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %7 = load fp128, fp128* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %8 = load <2 x i8>, <2 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %9 = load <2 x i16>, <2 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %10 = load <2 x i32>, <2 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %11 = load <2 x i64>, <2 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %12 = load <2 x float>, <2 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %13 = load <2 x double>, <2 x double>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %14 = load <4 x i8>, <4 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %15 = load <4 x i16>, <4 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %16 = load <4 x i32>, <4 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %17 = load <4 x i64>, <4 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %18 = load <4 x float>, <4 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %19 = load <4 x double>, <4 x double>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %20 = load <8 x i8>, <8 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %21 = load <8 x i16>, <8 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %22 = load <8 x i32>, <8 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %23 = load <8 x i64>, <8 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %24 = load <8 x float>, <8 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %25 = load <8 x double>, <8 x double>* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %26 = load <16 x i8>, <16 x i8>* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %27 = load <16 x i16>, <16 x i16>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %28 = load <16 x i32>, <16 x i32>* undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %29 = load <16 x i64>, <16 x i64>* undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %30 = load <16 x float>, <16 x float>* undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %31 = load <16 x double>, <16 x double>* undef
+
+ ret void;
+}
diff --git a/test/Analysis/CostModel/SystemZ/logical.ll b/test/Analysis/CostModel/SystemZ/logical.ll
new file mode 100644
index 000000000000..41984e0a29c4
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/logical.ll
@@ -0,0 +1,277 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+
+define void @and() {
+ %res0 = and i8 undef, undef
+ %res1 = and i16 undef, undef
+ %res2 = and i32 undef, undef
+ %res3 = and i64 undef, undef
+ %res4 = and <2 x i8> undef, undef
+ %res5 = and <2 x i16> undef, undef
+ %res6 = and <2 x i32> undef, undef
+ %res7 = and <2 x i64> undef, undef
+ %res8 = and <4 x i8> undef, undef
+ %res9 = and <4 x i16> undef, undef
+ %res10 = and <4 x i32> undef, undef
+ %res11 = and <4 x i64> undef, undef
+ %res12 = and <8 x i8> undef, undef
+ %res13 = and <8 x i16> undef, undef
+ %res14 = and <8 x i32> undef, undef
+ %res15 = and <8 x i64> undef, undef
+ %res16 = and <16 x i8> undef, undef
+ %res17 = and <16 x i16> undef, undef
+ %res18 = and <16 x i32> undef, undef
+ %res19 = and <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = and i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = and i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = and i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = and i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = and <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = and <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = and <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = and <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = and <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = and <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = and <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = and <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = and <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = and <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = and <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = and <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = and <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = and <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = and <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = and <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @ashr() {
+ %res0 = ashr i8 undef, undef
+ %res1 = ashr i16 undef, undef
+ %res2 = ashr i32 undef, undef
+ %res3 = ashr i64 undef, undef
+ %res4 = ashr <2 x i8> undef, undef
+ %res5 = ashr <2 x i16> undef, undef
+ %res6 = ashr <2 x i32> undef, undef
+ %res7 = ashr <2 x i64> undef, undef
+ %res8 = ashr <4 x i8> undef, undef
+ %res9 = ashr <4 x i16> undef, undef
+ %res10 = ashr <4 x i32> undef, undef
+ %res11 = ashr <4 x i64> undef, undef
+ %res12 = ashr <8 x i8> undef, undef
+ %res13 = ashr <8 x i16> undef, undef
+ %res14 = ashr <8 x i32> undef, undef
+ %res15 = ashr <8 x i64> undef, undef
+ %res16 = ashr <16 x i8> undef, undef
+ %res17 = ashr <16 x i16> undef, undef
+ %res18 = ashr <16 x i32> undef, undef
+ %res19 = ashr <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res0 = ashr i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res1 = ashr i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = ashr i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = ashr i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = ashr <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = ashr <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = ashr <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = ashr <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = ashr <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = ashr <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = ashr <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = ashr <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = ashr <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = ashr <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = ashr <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = ashr <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = ashr <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = ashr <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = ashr <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = ashr <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @lshr() {
+ %res0 = lshr i8 undef, undef
+ %res1 = lshr i16 undef, undef
+ %res2 = lshr i32 undef, undef
+ %res3 = lshr i64 undef, undef
+ %res4 = lshr <2 x i8> undef, undef
+ %res5 = lshr <2 x i16> undef, undef
+ %res6 = lshr <2 x i32> undef, undef
+ %res7 = lshr <2 x i64> undef, undef
+ %res8 = lshr <4 x i8> undef, undef
+ %res9 = lshr <4 x i16> undef, undef
+ %res10 = lshr <4 x i32> undef, undef
+ %res11 = lshr <4 x i64> undef, undef
+ %res12 = lshr <8 x i8> undef, undef
+ %res13 = lshr <8 x i16> undef, undef
+ %res14 = lshr <8 x i32> undef, undef
+ %res15 = lshr <8 x i64> undef, undef
+ %res16 = lshr <16 x i8> undef, undef
+ %res17 = lshr <16 x i16> undef, undef
+ %res18 = lshr <16 x i32> undef, undef
+ %res19 = lshr <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res0 = lshr i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res1 = lshr i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = lshr i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = lshr i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = lshr <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = lshr <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = lshr <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = lshr <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = lshr <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = lshr <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = lshr <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = lshr <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = lshr <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = lshr <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = lshr <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = lshr <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = lshr <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = lshr <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = lshr <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = lshr <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @or() {
+ %res0 = or i8 undef, undef
+ %res1 = or i16 undef, undef
+ %res2 = or i32 undef, undef
+ %res3 = or i64 undef, undef
+ %res4 = or <2 x i8> undef, undef
+ %res5 = or <2 x i16> undef, undef
+ %res6 = or <2 x i32> undef, undef
+ %res7 = or <2 x i64> undef, undef
+ %res8 = or <4 x i8> undef, undef
+ %res9 = or <4 x i16> undef, undef
+ %res10 = or <4 x i32> undef, undef
+ %res11 = or <4 x i64> undef, undef
+ %res12 = or <8 x i8> undef, undef
+ %res13 = or <8 x i16> undef, undef
+ %res14 = or <8 x i32> undef, undef
+ %res15 = or <8 x i64> undef, undef
+ %res16 = or <16 x i8> undef, undef
+ %res17 = or <16 x i16> undef, undef
+ %res18 = or <16 x i32> undef, undef
+ %res19 = or <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = or i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = or i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = or i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = or i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = or <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = or <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = or <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = or <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = or <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = or <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = or <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = or <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = or <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = or <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = or <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = or <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = or <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = or <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = or <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = or <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @shl() {
+ %res0 = shl i8 undef, undef
+ %res1 = shl i16 undef, undef
+ %res2 = shl i32 undef, undef
+ %res3 = shl i64 undef, undef
+ %res4 = shl <2 x i8> undef, undef
+ %res5 = shl <2 x i16> undef, undef
+ %res6 = shl <2 x i32> undef, undef
+ %res7 = shl <2 x i64> undef, undef
+ %res8 = shl <4 x i8> undef, undef
+ %res9 = shl <4 x i16> undef, undef
+ %res10 = shl <4 x i32> undef, undef
+ %res11 = shl <4 x i64> undef, undef
+ %res12 = shl <8 x i8> undef, undef
+ %res13 = shl <8 x i16> undef, undef
+ %res14 = shl <8 x i32> undef, undef
+ %res15 = shl <8 x i64> undef, undef
+ %res16 = shl <16 x i8> undef, undef
+ %res17 = shl <16 x i16> undef, undef
+ %res18 = shl <16 x i32> undef, undef
+ %res19 = shl <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = shl i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = shl i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = shl i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = shl i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = shl <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = shl <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = shl <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = shl <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = shl <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = shl <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = shl <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = shl <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = shl <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = shl <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = shl <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = shl <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = shl <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = shl <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = shl <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = shl <16 x i64> undef, undef
+
+ ret void;
+}
+
+define void @xor() {
+ %res0 = xor i8 undef, undef
+ %res1 = xor i16 undef, undef
+ %res2 = xor i32 undef, undef
+ %res3 = xor i64 undef, undef
+ %res4 = xor <2 x i8> undef, undef
+ %res5 = xor <2 x i16> undef, undef
+ %res6 = xor <2 x i32> undef, undef
+ %res7 = xor <2 x i64> undef, undef
+ %res8 = xor <4 x i8> undef, undef
+ %res9 = xor <4 x i16> undef, undef
+ %res10 = xor <4 x i32> undef, undef
+ %res11 = xor <4 x i64> undef, undef
+ %res12 = xor <8 x i8> undef, undef
+ %res13 = xor <8 x i16> undef, undef
+ %res14 = xor <8 x i32> undef, undef
+ %res15 = xor <8 x i64> undef, undef
+ %res16 = xor <16 x i8> undef, undef
+ %res17 = xor <16 x i16> undef, undef
+ %res18 = xor <16 x i32> undef, undef
+ %res19 = xor <16 x i64> undef, undef
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res0 = xor i8 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res1 = xor i16 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res2 = xor i32 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = xor i64 undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res4 = xor <2 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res5 = xor <2 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res6 = xor <2 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res7 = xor <2 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res8 = xor <4 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res9 = xor <4 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res10 = xor <4 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res11 = xor <4 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res12 = xor <8 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res13 = xor <8 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res14 = xor <8 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res15 = xor <8 x i64> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res16 = xor <16 x i8> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res17 = xor <16 x i16> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res18 = xor <16 x i32> undef, undef
+; CHECK: Cost Model: Found an estimated cost of 8 for instruction: %res19 = xor <16 x i64> undef, undef
+
+ ret void;
+}
diff --git a/test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll b/test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll
new file mode 100644
index 000000000000..1b6a50d303f2
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll
@@ -0,0 +1,259 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; Test that loads into operations that can fold one memory operand get zero
+; cost. In the case that both operands are loaded, one load should get a cost
+; value.
+
+define void @add() {
+ %li32 = load i32, i32* undef
+ add i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ add i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ add i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ add i64 %li64_0, %li64_1
+
+ ret void;
+
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = add i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = add i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = add i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = add i64 %li64_0, %li64_1
+}
+
+define void @sub() {
+ %li32 = load i32, i32* undef
+ sub i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ sub i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ sub i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ sub i64 %li64_0, %li64_1
+
+ ret void;
+
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = sub i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = sub i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = sub i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = sub i64 %li64_0, %li64_1
+}
+
+define void @mul() {
+ %li32 = load i32, i32* undef
+ mul i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ mul i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ mul i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ mul i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = mul i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = mul i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = mul i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = mul i64 %li64_0, %li64_1
+}
+
+define void @sdiv() {
+ %li32 = load i32, i32* undef
+ sdiv i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ sdiv i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ sdiv i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ sdiv i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = sdiv i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %2 = sdiv i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = sdiv i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = sdiv i64 %li64_0, %li64_1
+}
+
+define void @udiv() {
+ %li32 = load i32, i32* undef
+ udiv i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ udiv i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ udiv i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ udiv i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = udiv i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %2 = udiv i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %3 = udiv i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %4 = udiv i64 %li64_0, %li64_1
+}
+
+define void @and() {
+ %li32 = load i32, i32* undef
+ and i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ and i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ and i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ and i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = and i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = and i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = and i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = and i64 %li64_0, %li64_1
+}
+
+define void @or() {
+ %li32 = load i32, i32* undef
+ or i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ or i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ or i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ or i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = or i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = or i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = or i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = or i64 %li64_0, %li64_1
+}
+
+define void @xor() {
+ %li32 = load i32, i32* undef
+ xor i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ xor i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ xor i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ xor i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = xor i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = xor i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = xor i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = xor i64 %li64_0, %li64_1
+}
+
+define void @icmp() {
+ %li32 = load i32, i32* undef
+ icmp eq i32 %li32, undef
+
+ %li32_0 = load i32, i32* undef
+ %li32_1 = load i32, i32* undef
+ icmp eq i32 %li32_0, %li32_1
+
+ %li64 = load i64, i64* undef
+ icmp eq i64 %li64, undef
+
+ %li64_0 = load i64, i64* undef
+ %li64_1 = load i64, i64* undef
+ icmp eq i64 %li64_0, %li64_1
+
+ ret void;
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = icmp eq i32 %li32, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = icmp eq i32 %li32_0, %li32_1
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = icmp eq i64 %li64, undef
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = icmp eq i64 %li64_0, %li64_1
+}
diff --git a/test/Analysis/CostModel/SystemZ/scalar-cmp-cmp-log-sel.ll b/test/Analysis/CostModel/SystemZ/scalar-cmp-cmp-log-sel.ll
new file mode 100644
index 000000000000..9ba980780ded
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/scalar-cmp-cmp-log-sel.ll
@@ -0,0 +1,1624 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+;
+; TODO: add more tests for differing operand types of the two compares.
+
+define i8 @fun0(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun0
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun1(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun1
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun2(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun2
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun3(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun3
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun4(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun4
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun5(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun5
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun6(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun6
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun7(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun7
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun8(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun8
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun9(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun9
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun10(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun10
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun11(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun11
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun12(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun12
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun13(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun13
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun14(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun14
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun15(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun15
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun16(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun16
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun17(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun17
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun18(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun18
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun19(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun19
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun20(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun20
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun21(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun21
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun22(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun22
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun23(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun23
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun24(float %val1, float %val2, float %val3, float %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun24
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun25(float %val1, float %val2, float %val3, float %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun25
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun26(float %val1, float %val2, float %val3, float %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun26
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun27(float %val1, float %val2, float %val3, float %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun27
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun28(float %val1, float %val2, float %val3, float %val4,
+ float %val5, float %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun28
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun29(float %val1, float %val2, float %val3, float %val4,
+ double %val5, double %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun29
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun30(double %val1, double %val2, double %val3, double %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun30
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun31(double %val1, double %val2, double %val3, double %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun31
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun32(double %val1, double %val2, double %val3, double %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun32
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun33(double %val1, double %val2, double %val3, double %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun33
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun34(double %val1, double %val2, double %val3, double %val4,
+ float %val5, float %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun34
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun35(double %val1, double %val2, double %val3, double %val4,
+ double %val5, double %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = and i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun35
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = and i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun36(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun36
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun37(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun37
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun38(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun38
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun39(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun39
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun40(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun40
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun41(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun41
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun42(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun42
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun43(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun43
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun44(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun44
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun45(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun45
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun46(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun46
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun47(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun47
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun48(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun48
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun49(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun49
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun50(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun50
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun51(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun51
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun52(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun52
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun53(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun53
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun54(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun54
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun55(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun55
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun56(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun56
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun57(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun57
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun58(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun58
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun59(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun59
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun60(float %val1, float %val2, float %val3, float %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun60
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun61(float %val1, float %val2, float %val3, float %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun61
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun62(float %val1, float %val2, float %val3, float %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun62
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun63(float %val1, float %val2, float %val3, float %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun63
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun64(float %val1, float %val2, float %val3, float %val4,
+ float %val5, float %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun64
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun65(float %val1, float %val2, float %val3, float %val4,
+ double %val5, double %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun65
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun66(double %val1, double %val2, double %val3, double %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun66
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun67(double %val1, double %val2, double %val3, double %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun67
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun68(double %val1, double %val2, double %val3, double %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun68
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun69(double %val1, double %val2, double %val3, double %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun69
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun70(double %val1, double %val2, double %val3, double %val4,
+ float %val5, float %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun70
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun71(double %val1, double %val2, double %val3, double %val4,
+ double %val5, double %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = or i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun71
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 1 for instruction: %and = or i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun72(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun72
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun73(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun73
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun74(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun74
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun75(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun75
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun76(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun76
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun77(i8 %val1, i8 %val2, i8 %val3, i8 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i8 %val1, %val2
+ %cmp1 = icmp eq i8 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun77
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i8 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i8 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun78(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun78
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun79(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun79
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun80(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun80
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun81(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun81
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun82(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun82
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun83(i16 %val1, i16 %val2, i16 %val3, i16 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i16 %val1, %val2
+ %cmp1 = icmp eq i16 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun83
+; CHECK: cost of 3 for instruction: %cmp0 = icmp eq i16 %val1, %val2
+; CHECK: cost of 3 for instruction: %cmp1 = icmp eq i16 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun84(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun84
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun85(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun85
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun86(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun86
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun87(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun87
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun88(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun88
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun89(i32 %val1, i32 %val2, i32 %val3, i32 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i32 %val1, %val2
+ %cmp1 = icmp eq i32 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun89
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i32 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i32 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun90(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun90
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun91(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun91
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun92(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun92
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun93(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun93
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun94(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ float %val5, float %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun94
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun95(i64 %val1, i64 %val2, i64 %val3, i64 %val4,
+ double %val5, double %val6) {
+ %cmp0 = icmp eq i64 %val1, %val2
+ %cmp1 = icmp eq i64 %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun95
+; CHECK: cost of 1 for instruction: %cmp0 = icmp eq i64 %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = icmp eq i64 %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun96(float %val1, float %val2, float %val3, float %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun96
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun97(float %val1, float %val2, float %val3, float %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun97
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun98(float %val1, float %val2, float %val3, float %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun98
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun99(float %val1, float %val2, float %val3, float %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun99
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun100(float %val1, float %val2, float %val3, float %val4,
+ float %val5, float %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun100
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun101(float %val1, float %val2, float %val3, float %val4,
+ double %val5, double %val6) {
+ %cmp0 = fcmp ogt float %val1, %val2
+ %cmp1 = fcmp ogt float %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun101
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt float %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt float %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
+define i8 @fun102(double %val1, double %val2, double %val3, double %val4,
+ i8 %val5, i8 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i8 %val5, i8 %val6
+ ret i8 %sel
+
+; CHECK: fun102
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6
+}
+
+define i16 @fun103(double %val1, double %val2, double %val3, double %val4,
+ i16 %val5, i16 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i16 %val5, i16 %val6
+ ret i16 %sel
+
+; CHECK: fun103
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6
+}
+
+define i32 @fun104(double %val1, double %val2, double %val3, double %val4,
+ i32 %val5, i32 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i32 %val5, i32 %val6
+ ret i32 %sel
+
+; CHECK: fun104
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6
+}
+
+define i64 @fun105(double %val1, double %val2, double %val3, double %val4,
+ i64 %val5, i64 %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, i64 %val5, i64 %val6
+ ret i64 %sel
+
+; CHECK: fun105
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 1 for instruction: %sel = select i1 %and, i64 %val5, i64 %val6
+}
+
+define float @fun106(double %val1, double %val2, double %val3, double %val4,
+ float %val5, float %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, float %val5, float %val6
+ ret float %sel
+
+; CHECK: fun106
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, float %val5, float %val6
+}
+
+define double @fun107(double %val1, double %val2, double %val3, double %val4,
+ double %val5, double %val6) {
+ %cmp0 = fcmp ogt double %val1, %val2
+ %cmp1 = fcmp ogt double %val3, %val4
+ %and = xor i1 %cmp0, %cmp1
+ %sel = select i1 %and, double %val5, double %val6
+ ret double %sel
+
+; CHECK: fun107
+; CHECK: cost of 1 for instruction: %cmp0 = fcmp ogt double %val1, %val2
+; CHECK: cost of 1 for instruction: %cmp1 = fcmp ogt double %val3, %val4
+; CHECK: cost of 7 for instruction: %and = xor i1 %cmp0, %cmp1
+; CHECK: cost of 4 for instruction: %sel = select i1 %and, double %val5, double %val6
+}
+
diff --git a/test/Analysis/CostModel/SystemZ/shuffle.ll b/test/Analysis/CostModel/SystemZ/shuffle.ll
new file mode 100644
index 000000000000..e40dc1f09ba0
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/shuffle.ll
@@ -0,0 +1,112 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+
+; CHECK: shuffle
+define void @shuffle() {
+
+ ;; Reverse shuffles
+ shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
+
+ shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
+
+ shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+
+ shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+
+ shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+
+ ;; Alternate shuffles
+ shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+
+ shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+
+ shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+
+ shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 0, i32 3>
+ shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 2, i32 1>
+
+ shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 0, i32 3>
+ shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 2, i32 1>
+
+ ;; Broadcast shuffles
+ shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> zeroinitializer
+ shufflevector <32 x i8> undef, <32 x i8> undef, <32 x i32> zeroinitializer
+
+ shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> zeroinitializer
+ shufflevector <16 x i16> undef, <16 x i16> undef, <16 x i32> zeroinitializer
+
+ shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> zeroinitializer
+ shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> zeroinitializer
+
+ shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> zeroinitializer
+ shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> zeroinitializer
+
+ shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> zeroinitializer
+ shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> zeroinitializer
+
+ ;; Random shuffles
+ shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 4, i32 17, i32 2, i32 19, i32 0, i32 21, i32 8, i32 23, i32 6, i32 10, i32 10, i32 27, i32 29, i32 29, i32 14, i32 31>
+ shufflevector <18 x i8> undef, <18 x i8> undef, <18 x i32> <i32 4, i32 17, i32 2, i32 19, i32 0, i32 21, i32 8, i32 23, i32 6, i32 10, i32 10, i32 27, i32 29, i32 29, i32 14, i32 31, i32 0, i32 1>
+
+ shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 9, i32 9, i32 2, i32 2, i32 4, i32 13, i32 15, i32 15>
+ shufflevector <12 x i16> undef, <12 x i16> undef, <12 x i32> <i32 9, i32 9, i32 2, i32 2, i32 4, i32 13, i32 15, i32 15, i32 9, i32 2, i32 2, i32 4>
+
+ shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 4, i32 7>
+ shufflevector <6 x i32> undef, <6 x i32> undef, <6 x i32> <i32 0, i32 0, i32 4, i32 7, i32 4, i32 7>
+
+ shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 2>
+ shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 2>
+
+ shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 2, i32 1>
+ shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 2>
+
+ ret void
+
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %1 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %5 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %6 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %7 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %8 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %9 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %10 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %11 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %12 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %13 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %14 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %15 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %16 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %17 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 0, i32 3>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %18 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 2, i32 1>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %19 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 0, i32 3>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %20 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 2, i32 1>
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %21 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %22 = shufflevector <32 x i8> undef, <32 x i8> undef, <32 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %23 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %24 = shufflevector <16 x i16> undef, <16 x i16> undef, <16 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %25 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %26 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %27 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %28 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %29 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %30 = shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> zeroinitializer
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %31 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 4, i32 17, i32 2, i32 19, i32 0, i32 21, i32 8, i32 23, i32 6, i32 10, i32 10, i32 27, i32 29, i32 29, i32 14, i32 31>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %32 = shufflevector <18 x i8> undef, <18 x i8> undef, <18 x i32> <i32 4, i32 17, i32 2, i32 19, i32 0, i32 21, i32 8, i32 23, i32 6, i32 10, i32 10, i32 27, i32 29, i32 29, i32 14, i32 31, i32 0, i32 1>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %33 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 9, i32 9, i32 2, i32 2, i32 4, i32 13, i32 15, i32 15>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %34 = shufflevector <12 x i16> undef, <12 x i16> undef, <12 x i32> <i32 9, i32 9, i32 2, i32 2, i32 4, i32 13, i32 15, i32 15, i32 9, i32 2, i32 2, i32 4>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %35 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 4, i32 7>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %36 = shufflevector <6 x i32> undef, <6 x i32> undef, <6 x i32> <i32 0, i32 0, i32 4, i32 7, i32 4, i32 7>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %37 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 2>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %38 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 2>
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %39 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 2, i32 1>
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %40 = shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 2>
+}
diff --git a/test/Analysis/CostModel/SystemZ/vectorinstrs.ll b/test/Analysis/CostModel/SystemZ/vectorinstrs.ll
new file mode 100644
index 000000000000..b55707651267
--- /dev/null
+++ b/test/Analysis/CostModel/SystemZ/vectorinstrs.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=systemz-unknown -mcpu=z13 | FileCheck %s
+
+; CHECK: vecinstrs
+define void @vecinstrs() {
+
+ ;; Extract element is penalized somewhat with a cost of 2 for index 0.
+ extractelement <16 x i8> undef, i32 0
+ extractelement <16 x i8> undef, i32 1
+
+ extractelement <8 x i16> undef, i32 0
+ extractelement <8 x i16> undef, i32 1
+
+ extractelement <4 x i32> undef, i32 0
+ extractelement <4 x i32> undef, i32 1
+
+ extractelement <2 x i64> undef, i32 0
+ extractelement <2 x i64> undef, i32 1
+
+ extractelement <2 x double> undef, i32 0
+ extractelement <2 x double> undef, i32 1
+
+ ; Extraction of i1 means extract + test under mask before branch.
+ extractelement <2 x i1> undef, i32 0
+ extractelement <4 x i1> undef, i32 1
+ extractelement <8 x i1> undef, i32 2
+
+ ;; Insert element
+ insertelement <16 x i8> undef, i8 undef, i32 0
+ insertelement <8 x i16> undef, i16 undef, i32 0
+ insertelement <4 x i32> undef, i32 undef, i32 0
+
+ ; vlvgp will do two grs into a vector register: only add cost half of the time.
+ insertelement <2 x i64> undef, i64 undef, i32 0
+ insertelement <2 x i64> undef, i64 undef, i32 1
+
+ ret void
+
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = extractelement <16 x i8> undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %2 = extractelement <16 x i8> undef, i32 1
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %3 = extractelement <8 x i16> undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = extractelement <8 x i16> undef, i32 1
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %5 = extractelement <4 x i32> undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %6 = extractelement <4 x i32> undef, i32 1
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %7 = extractelement <2 x i64> undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %8 = extractelement <2 x i64> undef, i32 1
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %9 = extractelement <2 x double> undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %10 = extractelement <2 x double> undef, i32 1
+; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %11 = extractelement <2 x i1> undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %12 = extractelement <4 x i1> undef, i32 1
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %13 = extractelement <8 x i1> undef, i32 2
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %14 = insertelement <16 x i8> undef, i8 undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %15 = insertelement <8 x i16> undef, i16 undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %16 = insertelement <4 x i32> undef, i32 undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %17 = insertelement <2 x i64> undef, i64 undef, i32 0
+; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %18 = insertelement <2 x i64> undef, i64 undef, i32 1
+}
diff --git a/test/Analysis/CostModel/X86/arith-fp.ll b/test/Analysis/CostModel/X86/arith-fp.ll
index 689442f67a13..e5043010c11f 100644
--- a/test/Analysis/CostModel/X86/arith-fp.ll
+++ b/test/Analysis/CostModel/X86/arith-fp.ll
@@ -456,20 +456,20 @@ define i32 @fma(i32 %arg) {
; AVX2: cost of 1 {{.*}} %F32 = call float @llvm.fma.f32
; AVX512: cost of 1 {{.*}} %F32 = call float @llvm.fma.f32
%F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
- ; SSE2: cost of 52 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
- ; SSE42: cost of 52 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
+ ; SSE2: cost of 43 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
+ ; SSE42: cost of 43 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
; AVX: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
; AVX2: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
; AVX512: cost of 1 {{.*}} %V4F32 = call <4 x float> @llvm.fma.v4f32
%V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
- ; SSE2: cost of 104 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
- ; SSE42: cost of 104 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
+ ; SSE2: cost of 86 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
+ ; SSE42: cost of 86 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
; AVX: cost of 1 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
; AVX2: cost of 1 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
; AVX512: cost of 1 {{.*}} %V8F32 = call <8 x float> @llvm.fma.v8f32
%V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
- ; SSE2: cost of 208 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
- ; SSE42: cost of 208 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
+ ; SSE2: cost of 172 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
+ ; SSE42: cost of 172 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
; AVX: cost of 4 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
; AVX2: cost of 4 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
; AVX512: cost of 1 {{.*}} %V16F32 = call <16 x float> @llvm.fma.v16f32
@@ -481,20 +481,20 @@ define i32 @fma(i32 %arg) {
; AVX2: cost of 1 {{.*}} %F64 = call double @llvm.fma.f64
; AVX512: cost of 1 {{.*}} %F64 = call double @llvm.fma.f64
%F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
- ; SSE2: cost of 24 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
- ; SSE42: cost of 24 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
+ ; SSE2: cost of 21 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
+ ; SSE42: cost of 21 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
; AVX: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
; AVX2: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
; AVX512: cost of 1 {{.*}} %V2F64 = call <2 x double> @llvm.fma.v2f64
%V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
- ; SSE2: cost of 48 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
- ; SSE42: cost of 48 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
+ ; SSE2: cost of 42 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
+ ; SSE42: cost of 42 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
; AVX: cost of 1 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
; AVX2: cost of 1 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
; AVX512: cost of 1 {{.*}} %V4F64 = call <4 x double> @llvm.fma.v4f64
%V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
- ; SSE2: cost of 96 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
- ; SSE42: cost of 96 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
+ ; SSE2: cost of 84 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
+ ; SSE42: cost of 84 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
; AVX: cost of 4 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
; AVX2: cost of 4 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
; AVX512: cost of 1 {{.*}} %V8F64 = call <8 x double> @llvm.fma.v8f64
diff --git a/test/Analysis/CostModel/X86/bitreverse.ll b/test/Analysis/CostModel/X86/bitreverse.ll
index c9eea20c3404..2eb63babdc34 100644
--- a/test/Analysis/CostModel/X86/bitreverse.ll
+++ b/test/Analysis/CostModel/X86/bitreverse.ll
@@ -1,7 +1,11 @@
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE2
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE42
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+; RUN: opt < %s -mtriple=i686-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X86 -check-prefix=SSE2
+; RUN: opt < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X86 -check-prefix=SSE42
+; RUN: opt < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X86 -check-prefix=AVX
+; RUN: opt < %s -mtriple=i686-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X86 -check-prefix=AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X64 -check-prefix=SSE2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X64 -check-prefix=SSE42
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X64 -check-prefix=AVX
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=X64 -check-prefix=AVX2
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=XOP -check-prefix=XOPAVX
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=XOP -check-prefix=XOPAVX2
@@ -14,10 +18,8 @@ declare i8 @llvm.bitreverse.i8(i8)
define i64 @var_bitreverse_i64(i64 %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_i64':
-; SSE2: Found an estimated cost of 1 for instruction: %bitreverse
-; SSE42: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX2: Found an estimated cost of 1 for instruction: %bitreverse
+; X86: Found an estimated cost of 28 for instruction: %bitreverse
+; X64: Found an estimated cost of 14 for instruction: %bitreverse
; XOP: Found an estimated cost of 3 for instruction: %bitreverse
%bitreverse = call i64 @llvm.bitreverse.i64(i64 %a)
ret i64 %bitreverse
@@ -25,10 +27,8 @@ define i64 @var_bitreverse_i64(i64 %a) {
define i32 @var_bitreverse_i32(i32 %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_i32':
-; SSE2: Found an estimated cost of 1 for instruction: %bitreverse
-; SSE42: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX2: Found an estimated cost of 1 for instruction: %bitreverse
+; X86: Found an estimated cost of 14 for instruction: %bitreverse
+; X64: Found an estimated cost of 14 for instruction: %bitreverse
; XOP: Found an estimated cost of 3 for instruction: %bitreverse
%bitreverse = call i32 @llvm.bitreverse.i32(i32 %a)
ret i32 %bitreverse
@@ -36,10 +36,8 @@ define i32 @var_bitreverse_i32(i32 %a) {
define i16 @var_bitreverse_i16(i16 %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_i16':
-; SSE2: Found an estimated cost of 1 for instruction: %bitreverse
-; SSE42: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX2: Found an estimated cost of 1 for instruction: %bitreverse
+; X86: Found an estimated cost of 14 for instruction: %bitreverse
+; X64: Found an estimated cost of 14 for instruction: %bitreverse
; XOP: Found an estimated cost of 3 for instruction: %bitreverse
%bitreverse = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %bitreverse
@@ -47,10 +45,8 @@ define i16 @var_bitreverse_i16(i16 %a) {
define i8 @var_bitreverse_i8(i8 %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_i8':
-; SSE2: Found an estimated cost of 1 for instruction: %bitreverse
-; SSE42: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX: Found an estimated cost of 1 for instruction: %bitreverse
-; AVX2: Found an estimated cost of 1 for instruction: %bitreverse
+; X86: Found an estimated cost of 11 for instruction: %bitreverse
+; X64: Found an estimated cost of 11 for instruction: %bitreverse
; XOP: Found an estimated cost of 3 for instruction: %bitreverse
%bitreverse = call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %bitreverse
@@ -70,7 +66,7 @@ declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>)
define <2 x i64> @var_bitreverse_v2i64(<2 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v2i64':
-; SSE2: Found an estimated cost of 6 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 29 for instruction: %bitreverse
; SSE42: Found an estimated cost of 5 for instruction: %bitreverse
; AVX: Found an estimated cost of 5 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -81,7 +77,7 @@ define <2 x i64> @var_bitreverse_v2i64(<2 x i64> %a) {
define <4 x i64> @var_bitreverse_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v4i64':
-; SSE2: Found an estimated cost of 12 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 58 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
; AVX: Found an estimated cost of 10 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -92,7 +88,7 @@ define <4 x i64> @var_bitreverse_v4i64(<4 x i64> %a) {
define <4 x i32> @var_bitreverse_v4i32(<4 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v4i32':
-; SSE2: Found an estimated cost of 12 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 27 for instruction: %bitreverse
; SSE42: Found an estimated cost of 5 for instruction: %bitreverse
; AVX: Found an estimated cost of 5 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -103,7 +99,7 @@ define <4 x i32> @var_bitreverse_v4i32(<4 x i32> %a) {
define <8 x i32> @var_bitreverse_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v8i32':
-; SSE2: Found an estimated cost of 24 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 54 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
; AVX: Found an estimated cost of 10 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -114,7 +110,7 @@ define <8 x i32> @var_bitreverse_v8i32(<8 x i32> %a) {
define <8 x i16> @var_bitreverse_v8i16(<8 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v8i16':
-; SSE2: Found an estimated cost of 24 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 27 for instruction: %bitreverse
; SSE42: Found an estimated cost of 5 for instruction: %bitreverse
; AVX: Found an estimated cost of 5 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -125,7 +121,7 @@ define <8 x i16> @var_bitreverse_v8i16(<8 x i16> %a) {
define <16 x i16> @var_bitreverse_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v16i16':
-; SSE2: Found an estimated cost of 48 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 54 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
; AVX: Found an estimated cost of 10 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -136,7 +132,7 @@ define <16 x i16> @var_bitreverse_v16i16(<16 x i16> %a) {
define <16 x i8> @var_bitreverse_v16i8(<16 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v16i8':
-; SSE2: Found an estimated cost of 48 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 20 for instruction: %bitreverse
; SSE42: Found an estimated cost of 5 for instruction: %bitreverse
; AVX: Found an estimated cost of 5 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
@@ -147,7 +143,7 @@ define <16 x i8> @var_bitreverse_v16i8(<16 x i8> %a) {
define <32 x i8> @var_bitreverse_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v32i8':
-; SSE2: Found an estimated cost of 96 for instruction: %bitreverse
+; SSE2: Found an estimated cost of 40 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
; AVX: Found an estimated cost of 10 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
diff --git a/test/Analysis/CostModel/X86/shuffle-single-src.ll b/test/Analysis/CostModel/X86/shuffle-single-src.ll
index a953ec17d80f..e43e1afcdf59 100644
--- a/test/Analysis/CostModel/X86/shuffle-single-src.ll
+++ b/test/Analysis/CostModel/X86/shuffle-single-src.ll
@@ -1,30 +1,61 @@
-; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefix=SKX
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE2
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSSE3
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.2 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE42
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Verify the cost model for 1 src shuffles
;
-; SKX-LABEL: 'test_vXf64'
+; AVX512-LABEL: 'test_vXf64'
define void @test_vXf64(<4 x double> %src256, <8 x double> %src512, <16 x double> %src1024) {
- ; SKX: cost of 1 {{.*}} %V256 = shufflevector
+ ; SSE2: cost of 4 {{.*}} %V256 = shufflevector
+ ; SSSE3: cost of 4 {{.*}} %V256 = shufflevector
+ ; SSE42: cost of 4 {{.*}} %V256 = shufflevector
+ ; AVX1: cost of 6 {{.*}} %V256 = shufflevector
+ ; AVX2: cost of 6 {{.*}} %V256 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V256 = shufflevector
%V256 = shufflevector <4 x double> %src256, <4 x double> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V512 = shufflevector
+ ; SSE2: cost of 24 {{.*}} %V512 = shufflevector
+ ; SSSE3: cost of 24 {{.*}} %V512 = shufflevector
+ ; SSE42: cost of 24 {{.*}} %V512 = shufflevector
+ ; AVX1: cost of 12 {{.*}} %V512 = shufflevector
+ ; AVX2: cost of 12 {{.*}} %V512 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V512 = shufflevector
%V512 = shufflevector <8 x double> %src512, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 6, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 2 {{.*}} %V1024 = shufflevector
+ ; SSE2: cost of 112 {{.*}} %V1024 = shufflevector
+ ; SSSE3: cost of 112 {{.*}} %V1024 = shufflevector
+ ; SSE42: cost of 112 {{.*}} %V1024 = shufflevector
+ ; AVX1: cost of 72 {{.*}} %V1024 = shufflevector
+ ; AVX2: cost of 72 {{.*}} %V1024 = shufflevector
+ ; AVX512: cost of 2 {{.*}} %V1024 = shufflevector
%V1024 = shufflevector <16 x double> %src1024, <16 x double> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 13, i32 11, i32 10, i32 8, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
}
-; SKX-LABEL: 'test_vXi64'
+; AVX512-LABEL: 'test_vXi64'
define void @test_vXi64(<4 x i64> %src256, <8 x i64> %src512) {
- ; SKX: cost of 1 {{.*}} %V256 = shufflevector
+ ; SSE2: cost of 8 {{.*}} %V256 = shufflevector
+ ; SSSE3: cost of 8 {{.*}} %V256 = shufflevector
+ ; SSE42: cost of 8 {{.*}} %V256 = shufflevector
+ ; AVX1: cost of 8 {{.*}} %V256 = shufflevector
+ ; AVX2: cost of 1 {{.*}} %V256 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V256 = shufflevector
%V256 = shufflevector <4 x i64> %src256, <4 x i64> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V512 = shufflevector
+ ; SSE2: cost of 48 {{.*}} %V512 = shufflevector
+ ; SSSE3: cost of 48 {{.*}} %V512 = shufflevector
+ ; SSE42: cost of 48 {{.*}} %V512 = shufflevector
+ ; AVX1: cost of 16 {{.*}} %V512 = shufflevector
+ ; AVX2: cost of 16 {{.*}} %V512 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V512 = shufflevector
%V512 = shufflevector <8 x i64> %src512, <8 x i64> undef, <8 x i32> <i32 7, i32 6, i32 6, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
@@ -33,13 +64,28 @@ define void @test_vXi64(<4 x i64> %src256, <8 x i64> %src512) {
; CHECK-LABEL: 'test_vXf32'
define void @test_vXf32(<4 x float> %src128, <8 x float> %src256, <16 x float> %src512) {
- ; SKX: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE2: cost of 6 {{.*}} %V128 = shufflevector
+ ; SSSE3: cost of 6 {{.*}} %V128 = shufflevector
+ ; SSE42: cost of 6 {{.*}} %V128 = shufflevector
+ ; AVX1: cost of 6 {{.*}} %V128 = shufflevector
+ ; AVX2: cost of 6 {{.*}} %V128 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V128 = shufflevector
%V128 = shufflevector <4 x float> %src128, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V256 = shufflevector
+ ; SSE2: cost of 12 {{.*}} %V256 = shufflevector
+ ; SSSE3: cost of 12 {{.*}} %V256 = shufflevector
+ ; SSE42: cost of 12 {{.*}} %V256 = shufflevector
+ ; AVX1: cost of 14 {{.*}} %V256 = shufflevector
+ ; AVX2: cost of 14 {{.*}} %V256 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V256 = shufflevector
%V256 = shufflevector <8 x float> %src256, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 6, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V512 = shufflevector
+ ; SSE2: cost of 72 {{.*}} %V512 = shufflevector
+ ; SSSE3: cost of 72 {{.*}} %V512 = shufflevector
+ ; SSE42: cost of 72 {{.*}} %V512 = shufflevector
+ ; AVX1: cost of 28 {{.*}} %V512 = shufflevector
+ ; AVX2: cost of 28 {{.*}} %V512 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V512 = shufflevector
%V512 = shufflevector <16 x float> %src512, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 13, i32 11, i32 10, i32 8, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
@@ -48,16 +94,36 @@ define void @test_vXf32(<4 x float> %src128, <8 x float> %src256, <16 x float> %
; CHECK-LABEL: 'test_vXi32'
define void @test_vXi32(<4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512, <32 x i32> %src1024) {
- ; SKX: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE2: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSSE3: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE42: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX1: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX2: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V128 = shufflevector
%V128 = shufflevector <4 x i32> %src128, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V256 = shufflevector
+ ; SSE2: cost of 16 {{.*}} %V256 = shufflevector
+ ; SSSE3: cost of 16 {{.*}} %V256 = shufflevector
+ ; SSE42: cost of 16 {{.*}} %V256 = shufflevector
+ ; AVX1: cost of 16 {{.*}} %V256 = shufflevector
+ ; AVX2: cost of 1 {{.*}} %V256 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V256 = shufflevector
%V256 = shufflevector <8 x i32> %src256, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 5, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V512 = shufflevector
+ ; SSE2: cost of 96 {{.*}} %V512 = shufflevector
+ ; SSSE3: cost of 96 {{.*}} %V512 = shufflevector
+ ; SSE42: cost of 96 {{.*}} %V512 = shufflevector
+ ; AVX1: cost of 32 {{.*}} %V512 = shufflevector
+ ; AVX2: cost of 32 {{.*}} %V512 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V512 = shufflevector
%V512 = shufflevector <16 x i32> %src512, <16 x i32> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 13, i32 10, i32 9, i32 8, i32 8, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 2 {{.*}} %V1024 = shufflevector
+ ; SSE2: cost of 448 {{.*}} %V1024 = shufflevector
+ ; SSSE3: cost of 448 {{.*}} %V1024 = shufflevector
+ ; SSE42: cost of 448 {{.*}} %V1024 = shufflevector
+ ; AVX1: cost of 192 {{.*}} %V1024 = shufflevector
+ ; AVX2: cost of 192 {{.*}} %V1024 = shufflevector
+ ; AVX512: cost of 2 {{.*}} %V1024 = shufflevector
%V1024 = shufflevector <32 x i32> %src1024, <32 x i32> undef, <32 x i32> <i32 31, i32 30, i32 20, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 11, i32 9, i32 8, i32 7, i32 11, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
}
@@ -65,29 +131,70 @@ define void @test_vXi32(<4 x i32> %src128, <8 x i32> %src256, <16 x i32> %src512
; CHECK-LABEL: 'test_vXi16'
define void @test_vXi16(<8 x i16> %src128, <16 x i16> %src256, <32 x i16> %src512, <64 x i16> %src1024) {
- ; SKX: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE2: cost of 16 {{.*}} %V128 = shufflevector
+ ; SSSE3: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE42: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX1: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX2: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX512F: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX512BW: cost of 1 {{.*}} %V128 = shufflevector
%V128 = shufflevector <8 x i16> %src128, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 6, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V256 = shufflevector
+ ; SSE2: cost of 32 {{.*}} %V256 = shufflevector
+ ; SSSE3: cost of 32 {{.*}} %V256 = shufflevector
+ ; SSE42: cost of 32 {{.*}} %V256 = shufflevector
+ ; AVX1: cost of 32 {{.*}} %V256 = shufflevector
+ ; AVX2: cost of 4 {{.*}} %V256 = shufflevector
+ ; AVX512F: cost of 4 {{.*}} %V256 = shufflevector
+ ; AVX512BW cost of 1 {{.*}} %V256 = shufflevector
%V256 = shufflevector <16 x i16> %src256, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 13, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 1 {{.*}} %V512 = shufflevector
+ ; SSE2: cost of 192 {{.*}} %V512 = shufflevector
+ ; SSSE3: cost of 192 {{.*}} %V512 = shufflevector
+ ; SSE42: cost of 192 {{.*}} %V512 = shufflevector
+ ; AVX1: cost of 64 {{.*}} %V512 = shufflevector
+ ; AVX2: cost of 64 {{.*}} %V512 = shufflevector
+ ; AVX512F: cost of 64 {{.*}} %V512 = shufflevector
+ ; AVX512BW: cost of 1 {{.*}} %V512 = shufflevector
%V512 = shufflevector <32 x i16> %src512, <32 x i16> undef, <32 x i32> <i32 31, i32 30, i32 20, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 11, i32 9, i32 8, i32 7, i32 11, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 2 {{.*}} %V1024 = shufflevector
+ ; SSE2: cost of 896 {{.*}} %V1024 = shufflevector
+ ; SSSE3: cost of 896 {{.*}} %V1024 = shufflevector
+ ; SSE42: cost of 896 {{.*}} %V1024 = shufflevector
+ ; AVX1: cost of 384 {{.*}} %V1024 = shufflevector
+ ; AVX2: cost of 384 {{.*}} %V1024 = shufflevector
+ ; AVX512F: cost of 384 {{.*}} %V1024 = shufflevector
+ ; AVX512BW: cost of 2 {{.*}} %V1024 = shufflevector
%V1024 = shufflevector <64 x i16> %src1024, <64 x i16> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 20, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
}
; CHECK-LABEL: 'test_vXi8'
define void @test_vXi8(<16 x i8> %src128, <32 x i8> %src256, <64 x i8> %src512) {
- ; SKX: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE2: cost of 32 {{.*}} %V128 = shufflevector
+ ; SSSE3: cost of 1 {{.*}} %V128 = shufflevector
+ ; SSE42: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX1: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX2: cost of 1 {{.*}} %V128 = shufflevector
+ ; AVX512: cost of 1 {{.*}} %V128 = shufflevector
%V128 = shufflevector <16 x i8> %src128, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 11, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 3 {{.*}} %V256 = shufflevector
+ ; SSE2: cost of 64 {{.*}} %V256 = shufflevector
+ ; SSSE3: cost of 64 {{.*}} %V256 = shufflevector
+ ; SSE42: cost of 64 {{.*}} %V256 = shufflevector
+ ; AVX1: cost of 64 {{.*}} %V256 = shufflevector
+ ; AVX2: cost of 4 {{.*}} %V256 = shufflevector
+ ; AVX512F: cost of 4 {{.*}} %V256 = shufflevector
+ ; AVX512BW: cost of 3 {{.*}} %V256 = shufflevector
%V256 = shufflevector <32 x i8> %src256, <32 x i8> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 8, i32 8, i32 7, i32 6, i32 8, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; SKX: cost of 8 {{.*}} %V512 = shufflevector
+ ; SSE2: cost of 384 {{.*}} %V512 = shufflevector
+ ; SSSE3: cost of 384 {{.*}} %V512 = shufflevector
+ ; SSE42: cost of 384 {{.*}} %V512 = shufflevector
+ ; AVX1: cost of 128 {{.*}} %V512 = shufflevector
+ ; AVX2: cost of 128 {{.*}} %V512 = shufflevector
+ ; AVX512F: cost of 128 {{.*}} %V512 = shufflevector
+ ; AVX512BW: cost of 8 {{.*}} %V512 = shufflevector
%V512 = shufflevector <64 x i8> %src512, <64 x i8> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 20, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
diff --git a/test/Analysis/CostModel/X86/vshift-ashr-cost.ll b/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
index 13519018d957..a23b13fb2e25 100644
--- a/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
@@ -7,9 +7,9 @@
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BWVL
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
; Verify the cost of vector arithmetic shift right instructions.
@@ -23,7 +23,7 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41: Found an estimated cost of 12 for instruction: %shift
; AVX: Found an estimated cost of 12 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <2 x i64> %a, %b
ret <2 x i64> %shift
@@ -35,7 +35,7 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41: Found an estimated cost of 24 for instruction: %shift
; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <4 x i64> %a, %b
ret <4 x i64> %shift
@@ -98,7 +98,8 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE41: Found an estimated cost of 14 for instruction: %shift
; AVX: Found an estimated cost of 14 for instruction: %shift
; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
+; AVX512F: Found an estimated cost of 14 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <8 x i16> %a, %b
ret <8 x i16> %shift
@@ -110,7 +111,8 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE41: Found an estimated cost of 28 for instruction: %shift
; AVX: Found an estimated cost of 28 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
+; AVX512F: Found an estimated cost of 10 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
@@ -124,8 +126,6 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <32 x i16> %a, %b
ret <32 x i16> %shift
@@ -151,8 +151,6 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX2: Found an estimated cost of 24 for instruction: %shift
; AVX512F: Found an estimated cost of 24 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; AVX512VL: Found an estimated cost of 24 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 24 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <32 x i8> %a, %b
ret <32 x i8> %shift
@@ -166,8 +164,6 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2: Found an estimated cost of 48 for instruction: %shift
; AVX512F: Found an estimated cost of 48 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; AVX512VL: Found an estimated cost of 48 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 24 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <64 x i8> %a, %b
ret <64 x i8> %shift
@@ -177,130 +173,140 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; Uniform Variable Shifts
;
-define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) {
+define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v2i64':
-; SSE2: Found an estimated cost of 12 for instruction: %shift
-; SSE41: Found an estimated cost of 12 for instruction: %shift
-; AVX: Found an estimated cost of 12 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
%shift = ashr <2 x i64> %a, %splat
ret <2 x i64> %shift
}
-define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
+define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i64':
-; SSE2: Found an estimated cost of 24 for instruction: %shift
-; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
-; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
+; SSE2: Found an estimated cost of 8 for instruction: %shift
+; SSE41: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX2: Found an estimated cost of 8 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
+ %insert = insertelement <4 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = ashr <4 x i64> %a, %splat
ret <4 x i64> %shift
}
-define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
+define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i64':
-; SSE2: Found an estimated cost of 48 for instruction: %shift
-; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
-; AVX2: Found an estimated cost of 8 for instruction: %shift
+; SSE2: Found an estimated cost of 16 for instruction: %shift
+; SSE41: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 16 for instruction: %shift
+; AVX2: Found an estimated cost of 16 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
- %splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 8 for instruction: %shift
+ %insert = insertelement <8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i64> %a, %splat
ret <8 x i64> %shift
}
-define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
+define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i32':
-; SSE2: Found an estimated cost of 16 for instruction: %shift
-; SSE41: Found an estimated cost of 12 for instruction: %shift
-; AVX: Found an estimated cost of 12 for instruction: %shift
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <4 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
%shift = ashr <4 x i32> %a, %splat
ret <4 x i32> %shift
}
-define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
+define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
-; SSE2: Found an estimated cost of 32 for instruction: %shift
-; SSE41: Found an estimated cost of 24 for instruction: %shift
-; AVX: Found an estimated cost of 24 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i32> %a, %splat
ret <8 x i32> %shift
}
-define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
+define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
-; SSE2: Found an estimated cost of 64 for instruction: %shift
-; SSE41: Found an estimated cost of 48 for instruction: %shift
-; AVX: Found an estimated cost of 48 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i32> %a, %splat
ret <16 x i32> %shift
}
-define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) {
+define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i16':
-; SSE2: Found an estimated cost of 32 for instruction: %shift
-; SSE41: Found an estimated cost of 14 for instruction: %shift
-; AVX: Found an estimated cost of 14 for instruction: %shift
-; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i16> %a, %splat
ret <8 x i16> %shift
}
-define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
+define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i16':
-; SSE2: Found an estimated cost of 64 for instruction: %shift
-; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
-; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <16 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i16> %a, %splat
ret <16 x i16> %shift
}
-define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
+define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i16':
-; SSE2: Found an estimated cost of 128 for instruction: %shift
-; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
-; AVX2: Found an estimated cost of 20 for instruction: %shift
-; AVX512F: Found an estimated cost of 20 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
- %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
+ %insert = insertelement <32 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i16> %a, %splat
ret <32 x i16> %shift
}
-define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) {
+define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i8':
; SSE2: Found an estimated cost of 54 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
@@ -308,12 +314,13 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX2: Found an estimated cost of 24 for instruction: %shift
; AVX512: Found an estimated cost of 24 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i8> %a, %splat
ret <16 x i8> %shift
}
-define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
+define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i8':
; SSE2: Found an estimated cost of 108 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
@@ -321,12 +328,13 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX2: Found an estimated cost of 24 for instruction: %shift
; AVX512: Found an estimated cost of 24 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
ret <32 x i8> %shift
}
-define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
+define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v64i8':
; SSE2: Found an estimated cost of 216 for instruction: %shift
; SSE41: Found an estimated cost of 96 for instruction: %shift
@@ -334,10 +342,9 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2: Found an estimated cost of 48 for instruction: %shift
; AVX512F: Found an estimated cost of 48 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; AVX512VL: Found an estimated cost of 48 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 24 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
- %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = ashr <64 x i8> %a, %splat
ret <64 x i8> %shift
}
@@ -352,7 +359,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) {
; SSE41: Found an estimated cost of 12 for instruction: %shift
; AVX: Found an estimated cost of 12 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <2 x i64> %a, <i64 1, i64 7>
ret <2 x i64> %shift
@@ -364,7 +371,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) {
; SSE41: Found an estimated cost of 24 for instruction: %shift
; AVX: Found an estimated cost of 24 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 15, i64 31>
ret <4 x i64> %shift
@@ -427,7 +434,8 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) {
; SSE41: Found an estimated cost of 14 for instruction: %shift
; AVX: Found an estimated cost of 14 for instruction: %shift
; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
+; AVX512F: Found an estimated cost of 14 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <8 x i16> %shift
@@ -439,7 +447,8 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) {
; SSE41: Found an estimated cost of 28 for instruction: %shift
; AVX: Found an estimated cost of 28 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
+; AVX512F: Found an estimated cost of 10 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <16 x i16> %shift
@@ -453,8 +462,6 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
@@ -492,8 +499,6 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) {
; AVX2: Found an estimated cost of 48 for instruction: %shift
; AVX512F: Found an estimated cost of 48 for instruction: %shift
; AVX512BW: Found an estimated cost of 24 for instruction: %shift
-; AVX512VL: Found an estimated cost of 48 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 24 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
@@ -509,7 +514,7 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) {
; SSE41: Found an estimated cost of 4 for instruction: %shift
; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <2 x i64> %a, <i64 7, i64 7>
ret <2 x i64> %shift
@@ -521,7 +526,7 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) {
; SSE41: Found an estimated cost of 8 for instruction: %shift
; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
-; AVX512: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
@@ -610,8 +615,6 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 2 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -651,8 +654,6 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; AVX2: Found an estimated cost of 8 for instruction: %shift
; AVX512F: Found an estimated cost of 8 for instruction: %shift
; AVX512BW: Found an estimated cost of 4 for instruction: %shift
-; AVX512VL: Found an estimated cost of 8 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 4 for instruction: %shift
; XOPAVX: Found an estimated cost of 16 for instruction: %shift
; XOPAVX2: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
diff --git a/test/Analysis/CostModel/X86/vshift-lshr-cost.ll b/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
index 1e0fbce710ef..546b2bb50f26 100644
--- a/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
@@ -7,9 +7,9 @@
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BWVL
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
; Verify the cost of vector logical shift right instructions.
@@ -101,7 +101,8 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE41: Found an estimated cost of 14 for instruction: %shift
; AVX: Found an estimated cost of 14 for instruction: %shift
; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
+; AVX512F: Found an estimated cost of 14 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <8 x i16> %a, %b
ret <8 x i16> %shift
@@ -113,7 +114,8 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE41: Found an estimated cost of 28 for instruction: %shift
; AVX: Found an estimated cost of 28 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
+; AVX512F: Found an estimated cost of 10 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
@@ -127,8 +129,6 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = lshr <32 x i16> %a, %b
ret <32 x i16> %shift
@@ -166,8 +166,6 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; AVX512VL: Found an estimated cost of 22 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = lshr <64 x i8> %a, %b
ret <64 x i8> %shift
@@ -177,133 +175,140 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; Uniform Variable Shifts
;
-define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) {
+define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v2i64':
-; SSE2: Found an estimated cost of 4 for instruction: %shift
-; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <2 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
%shift = lshr <2 x i64> %a, %splat
ret <2 x i64> %shift
}
-define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
+define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i64':
-; SSE2: Found an estimated cost of 8 for instruction: %shift
-; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = lshr <4 x i64> %a, %splat
ret <4 x i64> %shift
}
-define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
+define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i64':
-; SSE2: Found an estimated cost of 16 for instruction: %shift
-; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
%shift = lshr <8 x i64> %a, %splat
ret <8 x i64> %shift
}
-define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
+define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i32':
-; SSE2: Found an estimated cost of 16 for instruction: %shift
-; SSE41: Found an estimated cost of 11 for instruction: %shift
-; AVX: Found an estimated cost of 11 for instruction: %shift
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <4 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
%shift = lshr <4 x i32> %a, %splat
ret <4 x i32> %shift
}
-define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
+define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
-; SSE2: Found an estimated cost of 32 for instruction: %shift
-; SSE41: Found an estimated cost of 22 for instruction: %shift
-; AVX: Found an estimated cost of 22 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = lshr <8 x i32> %a, %splat
ret <8 x i32> %shift
}
-define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
+define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
-; SSE2: Found an estimated cost of 64 for instruction: %shift
-; SSE41: Found an estimated cost of 44 for instruction: %shift
-; AVX: Found an estimated cost of 44 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i32> %a, %splat
ret <16 x i32> %shift
}
-define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) {
+define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i16':
-; SSE2: Found an estimated cost of 32 for instruction: %shift
-; SSE41: Found an estimated cost of 14 for instruction: %shift
-; AVX: Found an estimated cost of 14 for instruction: %shift
-; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
%shift = lshr <8 x i16> %a, %splat
ret <8 x i16> %shift
}
-define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
+define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i16':
-; SSE2: Found an estimated cost of 64 for instruction: %shift
-; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
-; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <16 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i16> %a, %splat
ret <16 x i16> %shift
}
-define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
+define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i16':
-; SSE2: Found an estimated cost of 128 for instruction: %shift
-; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
-; AVX2: Found an estimated cost of 20 for instruction: %shift
-; AVX512F: Found an estimated cost of 20 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
- %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
+ %insert = insertelement <32 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i16> %a, %splat
ret <32 x i16> %shift
}
-define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) {
+define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i8':
; SSE2: Found an estimated cost of 26 for instruction: %shift
; SSE41: Found an estimated cost of 12 for instruction: %shift
@@ -311,12 +316,13 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX2: Found an estimated cost of 12 for instruction: %shift
; AVX512: Found an estimated cost of 12 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i8> %a, %splat
ret <16 x i8> %shift
}
-define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
+define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 24 for instruction: %shift
@@ -324,12 +330,13 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
ret <32 x i8> %shift
}
-define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
+define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 48 for instruction: %shift
@@ -337,10 +344,9 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; AVX512VL: Found an estimated cost of 22 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
- %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = lshr <64 x i8> %a, %splat
ret <64 x i8> %shift
}
@@ -433,7 +439,8 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) {
; SSE41: Found an estimated cost of 14 for instruction: %shift
; AVX: Found an estimated cost of 14 for instruction: %shift
; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
+; AVX512F: Found an estimated cost of 14 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <8 x i16> %shift
@@ -445,7 +452,8 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) {
; SSE41: Found an estimated cost of 28 for instruction: %shift
; AVX: Found an estimated cost of 28 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
+; AVX512F: Found an estimated cost of 10 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <16 x i16> %shift
@@ -459,8 +467,6 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
@@ -498,8 +504,6 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) {
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; AVX512VL: Found an estimated cost of 22 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = lshr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
@@ -618,8 +622,6 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 2 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -659,8 +661,6 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512F: Found an estimated cost of 4 for instruction: %shift
; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; AVX512VL: Found an estimated cost of 4 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 2 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = lshr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
diff --git a/test/Analysis/CostModel/X86/vshift-shl-cost.ll b/test/Analysis/CostModel/X86/vshift-shl-cost.ll
index 031d530dcd56..90356f5ce8be 100644
--- a/test/Analysis/CostModel/X86/vshift-shl-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-shl-cost.ll
@@ -7,9 +7,9 @@
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BWVL
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512bw,+avx512vl -cost-model -analyze | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
; Verify the cost of vector shift left instructions.
@@ -102,7 +102,8 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE41: Found an estimated cost of 14 for instruction: %shift
; AVX: Found an estimated cost of 14 for instruction: %shift
; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
+; AVX512F: Found an estimated cost of 14 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = shl <8 x i16> %a, %b
ret <8 x i16> %shift
@@ -114,7 +115,8 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE41: Found an estimated cost of 28 for instruction: %shift
; AVX: Found an estimated cost of 28 for instruction: %shift
; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
+; AVX512F: Found an estimated cost of 10 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
@@ -165,8 +167,6 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; AVX512VL: Found an estimated cost of 22 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = shl <64 x i8> %a, %b
ret <64 x i8> %shift
@@ -176,133 +176,140 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; Uniform Variable Shifts
;
-define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) {
+define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v2i64':
-; SSE2: Found an estimated cost of 4 for instruction: %shift
-; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <2 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
%shift = shl <2 x i64> %a, %splat
ret <2 x i64> %shift
}
-define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) {
+define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i64':
-; SSE2: Found an estimated cost of 8 for instruction: %shift
-; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 2 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = shl <4 x i64> %a, %splat
ret <4 x i64> %shift
}
-define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
+define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, i64 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i64':
-; SSE2: Found an estimated cost of 16 for instruction: %shift
-; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
%shift = shl <8 x i64> %a, %splat
ret <8 x i64> %shift
}
-define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
+define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i32':
-; SSE2: Found an estimated cost of 10 for instruction: %shift
-; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
+; XOP: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <4 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
%shift = shl <4 x i32> %a, %splat
ret <4 x i32> %shift
}
-define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
+define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
-; SSE2: Found an estimated cost of 20 for instruction: %shift
-; SSE41: Found an estimated cost of 8 for instruction: %shift
-; AVX: Found an estimated cost of 8 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 2 for instruction: %shift
; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = shl <8 x i32> %a, %splat
ret <8 x i32> %shift
}
-define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
+define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, i32 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
-; SSE2: Found an estimated cost of 40 for instruction: %shift
-; SSE41: Found an estimated cost of 16 for instruction: %shift
-; AVX: Found an estimated cost of 16 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i32> %a, %splat
ret <16 x i32> %shift
}
-define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) {
+define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i16':
-; SSE2: Found an estimated cost of 32 for instruction: %shift
-; SSE41: Found an estimated cost of 14 for instruction: %shift
-; AVX: Found an estimated cost of 14 for instruction: %shift
-; AVX2: Found an estimated cost of 14 for instruction: %shift
-; AVX512: Found an estimated cost of 14 for instruction: %shift
+; SSE2: Found an estimated cost of 1 for instruction: %shift
+; SSE41: Found an estimated cost of 1 for instruction: %shift
+; AVX: Found an estimated cost of 1 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
%shift = shl <8 x i16> %a, %splat
ret <8 x i16> %shift
}
-define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) {
+define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i16':
-; SSE2: Found an estimated cost of 64 for instruction: %shift
-; SSE41: Found an estimated cost of 28 for instruction: %shift
-; AVX: Found an estimated cost of 28 for instruction: %shift
-; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
+ %insert = insertelement <16 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i16> %a, %splat
ret <16 x i16> %shift
}
-define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
+define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, i16 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i16':
-; SSE2: Found an estimated cost of 128 for instruction: %shift
-; SSE41: Found an estimated cost of 56 for instruction: %shift
-; AVX: Found an estimated cost of 56 for instruction: %shift
-; AVX2: Found an estimated cost of 20 for instruction: %shift
-; AVX512F: Found an estimated cost of 20 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 20 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
+ %insert = insertelement <32 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i16> %a, %splat
ret <32 x i16> %shift
}
-define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) {
+define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i8':
; SSE2: Found an estimated cost of 26 for instruction: %shift
; SSE41: Found an estimated cost of 11 for instruction: %shift
@@ -310,12 +317,13 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 1 for instruction: %shift
- %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i8> %a, %splat
ret <16 x i8> %shift
}
-define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
+define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v32i8':
; SSE2: Found an estimated cost of 52 for instruction: %shift
; SSE41: Found an estimated cost of 22 for instruction: %shift
@@ -323,12 +331,13 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX2: Found an estimated cost of 11 for instruction: %shift
; AVX512: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
- %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
ret <32 x i8> %shift
}
-define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
+define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, i8 %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v64i8':
; SSE2: Found an estimated cost of 104 for instruction: %shift
; SSE41: Found an estimated cost of 44 for instruction: %shift
@@ -336,10 +345,9 @@ define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) {
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; AVX512VL: Found an estimated cost of 22 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
- %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = shl <64 x i8> %a, %splat
ret <64 x i8> %shift
}
@@ -459,8 +467,6 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 2 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
@@ -499,8 +505,6 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) {
; AVX2: Found an estimated cost of 22 for instruction: %shift
; AVX512F: Found an estimated cost of 22 for instruction: %shift
; AVX512BW: Found an estimated cost of 11 for instruction: %shift
-; AVX512VL: Found an estimated cost of 22 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 11 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
@@ -621,8 +625,6 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
; AVX512BW: Found an estimated cost of 1 for instruction: %shift
-; AVX512VL: Found an estimated cost of 2 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -662,8 +664,6 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512F: Found an estimated cost of 4 for instruction: %shift
; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; AVX512VL: Found an estimated cost of 4 for instruction: %shift
-; AVX512BWVL: Found an estimated cost of 2 for instruction: %shift
; XOPAVX: Found an estimated cost of 8 for instruction: %shift
; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = shl <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
diff --git a/test/Analysis/Delinearization/a.ll b/test/Analysis/Delinearization/a.ll
index 917fc355726c..a105c205c5e6 100644
--- a/test/Analysis/Delinearization/a.ll
+++ b/test/Analysis/Delinearization/a.ll
@@ -10,7 +10,7 @@
; AddRec: {{{(28 + (4 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(12 * %o)}<%for.j>,+,20}<%for.k>
; CHECK: Base offset: %A
; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 4 bytes.
-; CHECK: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<%for.j>][{7,+,5}<nw><%for.k>]
+; CHECK: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<nw><%for.j>][{7,+,5}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, i32* nocapture %A) #0 {
entry:
diff --git a/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll b/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
index 0c893bf11379..bd2f34df6a16 100644
--- a/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
+++ b/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
@@ -11,7 +11,7 @@
; AddRec: {{((%m * %b * 8) + %A),+,(2 * %m * 8)}<%for.i>,+,(2 * 8)}<%for.j>
; CHECK: Base offset: %A
; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
-; CHECK: ArrayRef[{%b,+,2}<%for.i>][{0,+,2}<%for.j>]
+; CHECK: ArrayRef[{%b,+,2}<nsw><%for.i>][{0,+,2}<%for.j>]
define void @foo(i64 %n, i64 %m, i64 %b, double* %A) {
diff --git a/test/Analysis/DemandedBits/intrinsics.ll b/test/Analysis/DemandedBits/intrinsics.ll
new file mode 100644
index 000000000000..5a6d17284a72
--- /dev/null
+++ b/test/Analysis/DemandedBits/intrinsics.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -demanded-bits -analyze < %s | FileCheck %s
+; RUN: opt -S -disable-output -passes="print<demanded-bits>" < %s 2>&1 | FileCheck %s
+
+; CHECK-DAG: DemandedBits: 0xFF000000 for %1 = or i32 %x, 1
+; CHECK-DAG: DemandedBits: 0xFF for %2 = call i32 @llvm.bitreverse.i32(i32 %1)
+; CHECK-DAG: DemandedBits: 0xFF for %3 = trunc i32 %2 to i8
+define i8 @test_bswap(i32 %x) {
+ %1 = or i32 %x, 1
+ %2 = call i32 @llvm.bswap.i32(i32 %1)
+ %3 = trunc i32 %2 to i8
+ ret i8 %3
+}
+declare i32 @llvm.bswap.i32(i32)
+
+; CHECK-DAG: DemandedBits: 0xFF000000 for %1 = or i32 %x, 1
+; CHECK-DAG: DemandedBits: 0xFF for %2 = call i32 @llvm.bswap.i32(i32 %1)
+; CHECK-DAG: DemandedBits: 0xFF for %3 = trunc i32 %2 to i8
+define i8 @test_bitreverse(i32 %x) {
+ %1 = or i32 %x, 1
+ %2 = call i32 @llvm.bitreverse.i32(i32 %1)
+ %3 = trunc i32 %2 to i8
+ ret i8 %3
+}
+declare i32 @llvm.bitreverse.i32(i32)
+
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
index e3323dc5e21c..3214dd41eeb4 100644
--- a/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
+++ b/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
@@ -12,4 +12,34 @@ define {i32, i1} @test2(i32* %ptr, i32 %cmp, i32 %new) {
ret {i32, i1} %orig
}
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @test_atomic_inc_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
+ %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+ ret i32 %ret
+}
+
+; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+define i64 @test_atomic_inc_i64(i64 addrspace(1)* %ptr, i64 %val) #0 {
+ %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+ ret i64 %ret
+}
+
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @test_atomic_dec_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
+ %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+ ret i32 %ret
+}
+
+; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+define i64 @test_atomic_dec_i64(i64 addrspace(1)* %ptr, i64 %val) #0 {
+ %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+ ret i64 %ret
+}
+
+declare i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #1
+declare i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #1
+declare i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #1
+
attributes #0 = { nounwind }
+attributes #1 = { nounwind argmemonly }
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/interp-intrinsics.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/interp-intrinsics.ll
deleted file mode 100644
index d1c90ba608c8..000000000000
--- a/test/Analysis/DivergenceAnalysis/AMDGPU/interp-intrinsics.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: opt -mtriple amdgcn--- -analyze -divergence %s | FileCheck %s
-
-; CHECK-LABEL: 'fs_interp'
-; CHECK: DIVERGENT: %v = call float @llvm.SI.fs.interp(
-define amdgpu_ps void @fs_interp(i32 inreg %prim_mask, <2 x i32> %interp_param) #1 {
- %v = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %prim_mask, <2 x i32> %interp_param)
- store volatile float %v, float addrspace(1)* undef
- ret void
-}
-
-; CHECK-LABEL: 'fs_constant'
-; CHECK: DIVERGENT: %v = call float @llvm.SI.fs.constant(
-define amdgpu_ps void @fs_constant(i32 inreg %prim_mask, <2 x i32> %interp_param) #1 {
- %v = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %prim_mask)
- store volatile float %v, float addrspace(1)* undef
- ret void
-}
-
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #0
-declare float @llvm.SI.fs.constant(i32, i32, i32) #0
-
-attributes #0 = { nounwind readnone }
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll
new file mode 100644
index 000000000000..d22669522591
--- /dev/null
+++ b/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll
@@ -0,0 +1,13 @@
+; RUN: opt -mtriple=amdgcn-- -analyze -divergence %s | FileCheck %s
+
+; CHECK: DIVERGENT: %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
+define amdgpu_kernel void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) #0 {
+ %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
+ store i32 %swizzle, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
+
+attributes #0 = { nounwind convergent }
+attributes #1 = { nounwind readnone convergent }
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
index 73674d0599e2..0acb050c2519 100644
--- a/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
+++ b/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
@@ -3,7 +3,7 @@
; CHECK: DIVERGENT:
; CHECK-NOT: %arg0
; CHECK-NOT: %arg1
-; CHECK-NOT; %arg2
+; CHECK-NOT: %arg2
; CHECK: <2 x i32> %arg3
; CHECK: DIVERGENT: <3 x i32> %arg4
; CHECK: DIVERGENT: float %arg5
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
index b4fa79a6ba9f..6144ffea5b61 100644
--- a/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
+++ b/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
@@ -5,7 +5,7 @@
; CHECK: DIVERGENT: %tmp11 = load volatile float, float addrspace(1)* %tmp5, align 4
; The post dominator tree does not have a root node in this case
-define void @no_return_blocks(float addrspace(1)* noalias nocapture readonly %arg, float addrspace(1)* noalias nocapture readonly %arg1) #0 {
+define amdgpu_kernel void @no_return_blocks(float addrspace(1)* noalias nocapture readonly %arg, float addrspace(1)* noalias nocapture readonly %arg1) #0 {
bb0:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tmp2 = sext i32 %tmp to i64
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
index ca93dda2c573..7ade8eabd451 100644
--- a/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
+++ b/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
@@ -1,7 +1,7 @@
; RUN: opt %s -mtriple amdgcn-- -analyze -divergence | FileCheck %s
; CHECK: DIVERGENT: %tmp = cmpxchg volatile
-define void @unreachable_loop(i32 %tidx) #0 {
+define amdgpu_kernel void @unreachable_loop(i32 %tidx) #0 {
entry:
unreachable
diff --git a/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll b/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
index 669ee802c516..98fbc88a2cfd 100644
--- a/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
+++ b/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
@@ -7,35 +7,35 @@ declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
; CHECK: DIVERGENT: %id.x = call i32 @llvm.amdgcn.workitem.id.x()
-define void @workitem_id_x() #1 {
+define amdgpu_kernel void @workitem_id_x() #1 {
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
store volatile i32 %id.x, i32 addrspace(1)* undef
ret void
}
; CHECK: DIVERGENT: %id.y = call i32 @llvm.amdgcn.workitem.id.y()
-define void @workitem_id_y() #1 {
+define amdgpu_kernel void @workitem_id_y() #1 {
%id.y = call i32 @llvm.amdgcn.workitem.id.y()
store volatile i32 %id.y, i32 addrspace(1)* undef
ret void
}
; CHECK: DIVERGENT: %id.z = call i32 @llvm.amdgcn.workitem.id.z()
-define void @workitem_id_z() #1 {
+define amdgpu_kernel void @workitem_id_z() #1 {
%id.z = call i32 @llvm.amdgcn.workitem.id.z()
store volatile i32 %id.z, i32 addrspace(1)* undef
ret void
}
; CHECK: DIVERGENT: %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0)
-define void @mbcnt_lo() #1 {
+define amdgpu_kernel void @mbcnt_lo() #1 {
%mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0)
store volatile i32 %mbcnt.lo, i32 addrspace(1)* undef
ret void
}
; CHECK: DIVERGENT: %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0)
-define void @mbcnt_hi() #1 {
+define amdgpu_kernel void @mbcnt_hi() #1 {
%mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0)
store volatile i32 %mbcnt.hi, i32 addrspace(1)* undef
ret void
diff --git a/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll b/test/Analysis/IVUsers/quadradic-exit-value.ll
index 09f0e1aa2a09..214afcb2ffa7 100644
--- a/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll
+++ b/test/Analysis/IVUsers/quadradic-exit-value.ll
@@ -1,5 +1,12 @@
-; RUN: opt < %s -analyze -iv-users | FileCheck %s
-; RUN: opt -passes='function(require<scalar-evolution>,loop(print<ivusers>))' -S < %s 2>&1| FileCheck %s
+; This test ensures that IVUsers works correctly in the legacy pass manager
+; without LCSSA and in the specific ways that some of its users (LSR) require.
+;
+; FIXME: We need some way to match the precision here in the new PM where loop
+; passes *always* work on LCSSA. This should stop using a different set of
+; checks at that point.
+
+; RUN: opt < %s -analyze -iv-users | FileCheck %s --check-prefixes=CHECK,CHECK-NO-LCSSA
+; RUN: opt < %s -disable-output -passes='print<ivusers>' 2>&1 | FileCheck %s
; Provide legal integer types.
target datalayout = "n8:16:32:64"
@@ -7,7 +14,7 @@ target datalayout = "n8:16:32:64"
; The value of %r is dependent on a polynomial iteration expression.
;
; CHECK-LABEL: IV Users for loop %foo.loop
-; CHECK: {1,+,3,+,2}<%foo.loop>
+; CHECK-NO-LCSSA: {1,+,3,+,2}<%foo.loop>
define i64 @foo(i64 %n) {
entry:
br label %foo.loop
@@ -29,7 +36,7 @@ exit:
; sure they aren't marked as post-inc users.
;
; CHECK-LABEL: IV Users for loop %test2.loop
-; CHECK: %sext.us = {0,+,(16777216 + (-16777216 * %sub.us))<nuw><nsw>,+,33554432}<%test2.loop> in %f = ashr i32 %sext.us, 24
+; CHECK-NO-LCSSA: %sext.us = {0,+,(16777216 + (-16777216 * %sub.us))<nuw><nsw>,+,33554432}<%test2.loop> in %f = ashr i32 %sext.us, 24
define i32 @test2() {
entry:
br label %test2.loop
diff --git a/test/Analysis/LazyValueAnalysis/invalidation.ll b/test/Analysis/LazyValueAnalysis/invalidation.ll
new file mode 100644
index 000000000000..67b6c9859396
--- /dev/null
+++ b/test/Analysis/LazyValueAnalysis/invalidation.ll
@@ -0,0 +1,64 @@
+; Test that the lazy value analysis gets invalidated when its dependencies go
+; away. Sadly, you can neither print nor verify LVI so we just have to require
+; it and check that the pass manager does the right thing.
+;
+; Check basic invalidation.
+; RUN: opt -disable-output -disable-verify -debug-pass-manager %s 2>&1 \
+; RUN: -passes='require<lazy-value-info>,invalidate<lazy-value-info>,require<lazy-value-info>' \
+; RUN: | FileCheck %s --check-prefix=CHECK-INVALIDATE
+; CHECK-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-INVALIDATE: Running analysis: LazyValueAnalysis
+; CHECK-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-INVALIDATE: Invalidating analysis: LazyValueAnalysis
+; CHECK-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-INVALIDATE: Running analysis: LazyValueAnalysis
+;
+; Check DomTree specifically.
+; RUN: opt -disable-output -disable-verify -debug-pass-manager %s 2>&1 \
+; RUN: -passes='require<domtree>,require<lazy-value-info>,invalidate<domtree>,require<lazy-value-info>' \
+; RUN: | FileCheck %s --check-prefix=CHECK-DT-INVALIDATE
+; CHECK-DT-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-DT-INVALIDATE: Running analysis: LazyValueAnalysis
+; CHECK-DT-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-DT-INVALIDATE: Invalidating analysis: DominatorTreeAnalysis
+; CHECK-DT-INVALIDATE: Invalidating analysis: LazyValueAnalysis
+; CHECK-AC-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-DT-INVALIDATE: Running analysis: LazyValueAnalysis
+
+target triple = "x86_64-unknown-linux-gnu"
+
+@.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+
+declare void @hoo(i64*)
+
+declare i32 @printf(i8* nocapture readonly, ...)
+
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+
+define void @goo(i32 %N, i64* %b) {
+entry:
+ %a.i = alloca i64, align 8
+ %tmp = bitcast i64* %a.i to i8*
+ %c = getelementptr inbounds i64, i64* %b, i64 0
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %cmp = icmp slt i32 %i.0, %N
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %tmp)
+ call void @hoo(i64* %a.i)
+ call void @hoo(i64* %c)
+ %tmp1 = load volatile i64, i64* %a.i, align 8
+ %call.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i64 %tmp1)
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %tmp)
+ %inc = add nsw i32 %i.0, 1
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ ret void
+}
diff --git a/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll b/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll
new file mode 100644
index 000000000000..00ab21e46d5d
--- /dev/null
+++ b/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll
@@ -0,0 +1,84 @@
+; RUN: opt < %s -jump-threading -print-lazy-value-info -disable-output 2>&1 | FileCheck %s
+
+; Testing LVI cache after jump-threading
+
+; Jump-threading transforms the IR below to one where
+; loop and backedge basic blocks are merged into one.
+; basic block (named backedge) with the branch being:
+; %cont = icmp slt i32 %iv.next, 400
+; br i1 %cont, label %backedge, label %exit
+define i8 @test1(i32 %a, i32 %length) {
+; CHECK-LABEL: LVI for function 'test1':
+entry:
+ br label %loop
+; CHECK-LABEL: backedge:
+; CHECK-NEXT: ; CachedLatticeValues for: ' %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]'
+; CHECK-DAG: ; at beginning of BasicBlock: '%backedge' LatticeVal: 'constantrange<0, 400>'
+; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]
+; CHECK-NEXT: ; CachedLatticeValues for: ' %iv.next = add nsw i32 %iv, 1'
+; CHECK-NEXT: ; at beginning of BasicBlock: '%backedge' LatticeVal: 'constantrange<1, 401>'
+; CHECK-NEXT: %iv.next = add nsw i32 %iv, 1
+; CHECK-NEXT: %cont = icmp slt i32 %iv.next, 400
+; CHECK-NEXT: br i1 %cont, label %backedge, label %exit
+
+; CHECK-NOT: loop
+loop:
+ %iv = phi i32 [0, %entry], [%iv.next, %backedge]
+ %cnd = icmp sge i32 %iv, 0
+ br i1 %cnd, label %backedge, label %exit
+
+backedge:
+ %iv.next = add nsw i32 %iv, 1
+ %cont = icmp slt i32 %iv.next, 400
+ br i1 %cont, label %loop, label %exit
+
+exit:
+ ret i8 0
+}
+
+
+; Here JT does not transform the code, but LVICache is populated during the processing of blocks.
+define i8 @test2(i32 %n) {
+; CHECK-LABEL: LVI for function 'test2':
+; CHECK-LABEL: entry:
+; CHECK-LABEL: ; OverDefined values for block are:
+; CHECK-NEXT: ;i32 %n
+; CHECK-NEXT: br label %loop
+entry:
+ br label %loop
+
+; CHECK-LABEL: loop:
+; CHECK-LABEL: ; OverDefined values for block are:
+; CHECK-NEXT: ; %iv2 = phi i32 [ %n, %entry ], [ %iv2.next, %backedge ]
+; CHECK-NEXT: ; CachedLatticeValues for: ' %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]'
+; CHECK-DAG: ; at beginning of BasicBlock: '%loop' LatticeVal: 'constantrange<0, -2147483647>'
+; CHECK-DAG: ; at beginning of BasicBlock: '%backedge' LatticeVal: 'constantrange<0, -2147483648>'
+; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]
+; CHECK: %cnd = and i1 %cnd1, %cnd2
+; CHECK: br i1 %cnd, label %backedge, label %exit
+loop:
+ %iv = phi i32 [0, %entry], [%iv.next, %backedge]
+ %iv2 = phi i32 [%n, %entry], [%iv2.next, %backedge]
+ %cnd1 = icmp sge i32 %iv, 0
+ %cnd2 = icmp sgt i32 %iv2, 0
+ %cnd = and i1 %cnd1, %cnd2
+ br i1 %cnd, label %backedge, label %exit
+
+; CHECK-LABEL: backedge:
+; CHECK-NEXT: ; CachedLatticeValues for: ' %iv.next = add nsw i32 %iv, 1'
+; CHECK-NEXT: ; at beginning of BasicBlock: '%backedge' LatticeVal: 'constantrange<1, -2147483647>'
+; CHECK-NEXT: %iv.next = add nsw i32 %iv, 1
+; CHECK-NEXT: %iv2.next = sub nsw i32 %iv2, 1
+; CHECK: %cont = and i1 %cont1, %cont2
+; CHECK: br i1 %cont, label %loop, label %exit
+backedge:
+ %iv.next = add nsw i32 %iv, 1
+ %iv2.next = sub nsw i32 %iv2, 1
+ %cont1 = icmp slt i32 %iv.next, 400
+ %cont2 = icmp sgt i32 %iv2.next, 0
+ %cont = and i1 %cont1, %cont2
+ br i1 %cont, label %loop, label %exit
+
+exit:
+ ret i8 0
+}
diff --git a/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll b/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
index 87a6c18ab303..60c2a3930b5c 100644
--- a/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
+++ b/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
@@ -13,9 +13,9 @@
; int v3[Z][Z];
; } s;
;
-; void slow_function (s* const obj) {
+; void slow_function (s* const obj, int z) {
; for (int j=0; j<Z; j++) {
-; for (int k=0; k<Z; k++) {
+; for (int k=0; k<z; k++) {
; int x = obj->v1[k] + obj->v2[j];
; obj->v3[j][k] += x;
; }
@@ -35,7 +35,7 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] }
-define void @Test(%struct.s* nocapture %obj) #0 {
+define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
br label %.outer.preheader
@@ -63,6 +63,6 @@ define void @Test(%struct.s* nocapture %obj) #0 {
%8 = add nsw i32 %5, %7
store i32 %8, i32* %6
%j.next = add nuw nsw i64 %j, 1
- %exitcond.inner = icmp eq i64 %j.next, 32
+ %exitcond.inner = icmp eq i64 %j.next, %z
br i1 %exitcond.inner, label %.outer, label %.inner
}
diff --git a/test/Analysis/LoopAccessAnalysis/pr31098.ll b/test/Analysis/LoopAccessAnalysis/pr31098.ll
new file mode 100644
index 000000000000..04b73828f514
--- /dev/null
+++ b/test/Analysis/LoopAccessAnalysis/pr31098.ll
@@ -0,0 +1,99 @@
+; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
+; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that the compile-time-unknown depenendece-distance is resolved
+; statically. Due to the non-unit stride of the accesses in this testcase
+; we are currently not able to create runtime dependence checks, and therefore
+; if we don't resolve the dependence statically we cannot vectorize the loop.
+;
+; Specifically in this example, during dependence analysis we get 6 unknown
+; dependence distances between the 8 real/imaginary accesses below:
+; dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D.
+; At compile time we can prove for all of the above that |dist|>loopBound*step
+; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude
+; that there are no dependencies (without runtime tests):
+; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc.
+
+; #include <stdlib.h>
+; class Complex {
+; private:
+; float real_;
+; float imaginary_;
+;
+; public:
+; Complex() : real_(0), imaginary_(0) { }
+; Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { }
+; Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { }
+;
+; inline float real() const { return real_; }
+; inline float imaginary() const { return imaginary_; }
+;
+; Complex operator+(const Complex& rhs) const
+; {
+; return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_);
+; }
+;
+; Complex operator-(const Complex& rhs) const
+; {
+; return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_);
+; }
+; };
+;
+; void Test(Complex *out, size_t size)
+; {
+; size_t D = size / 2;
+; for (size_t offset = 0; offset < D; ++offset)
+; {
+; Complex t0 = out[offset];
+; Complex t1 = out[offset + D];
+; out[offset] = t1 + t0;
+; out[offset + D] = t0 - t1;
+; }
+; }
+
+; CHECK-LABEL: Test
+; CHECK: Memory dependences are safe
+
+
+%class.Complex = type { float, float }
+
+define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr {
+entry:
+ %div = lshr i64 %size, 1
+ %cmp47 = icmp eq i64 %div, 0
+ br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.cond.cleanup.loopexit:
+ br label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+ %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0
+ %1 = load float, float* %0, align 4
+ %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1
+ %2 = load float, float* %imaginary_.i.i, align 4
+ %add = add nuw i64 %offset.048, %div
+ %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0
+ %4 = load float, float* %3, align 4
+ %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1
+ %5 = load float, float* %imaginary_.i.i28, align 4
+ %add.i = fadd fast float %4, %1
+ %add4.i = fadd fast float %5, %2
+ store float %add.i, float* %0, align 4
+ store float %add4.i, float* %imaginary_.i.i, align 4
+ %sub.i = fsub fast float %1, %4
+ %sub4.i = fsub fast float %2, %5
+ store float %sub.i, float* %3, align 4
+ store float %sub4.i, float* %imaginary_.i.i28, align 4
+ %inc = add nuw nsw i64 %offset.048, 1
+ %exitcond = icmp eq i64 %inc, %div
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
diff --git a/test/Analysis/MemoryDependenceAnalysis/invalidation.ll b/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
index 6e5d4a4bf846..478ffc2914b7 100644
--- a/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
+++ b/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
@@ -13,20 +13,6 @@
; CHECK-AA-INVALIDATE: Running pass: GVN
; CHECK-AA-INVALIDATE: Running analysis: MemoryDependenceAnalysis
;
-; Check the assumptions analysis specifically.
-; FIXME: We don't have any test cases that actually fail if the assumption
-; cache becomes stale. This just tests what we believe to be correct.
-; RUN: opt -disable-output -debug-pass-manager %s 2>&1 \
-; RUN: -passes='require<memdep>,invalidate<assumptions>,gvn' \
-; RUN: | FileCheck %s --check-prefix=CHECK-ASSUMPTIONS-INVALIDATE
-; CHECK-ASSUMPTIONS-INVALIDATE: Running pass: RequireAnalysisPass
-; CHECK-ASSUMPTIONS-INVALIDATE: Running analysis: MemoryDependenceAnalysis
-; CHECK-ASSUMPTIONS-INVALIDATE: Running pass: InvalidateAnalysisPass
-; CHECK-ASSUMPTIONS-INVALIDATE: Invalidating analysis: AssumptionAnalysis
-; CHECK-ASSUMPTIONS-INVALIDATE: Invalidating analysis: MemoryDependenceAnalysis
-; CHECK-ASSUMPTIONS-INVALIDATE: Running pass: GVN
-; CHECK-ASSUMPTIONS-INVALIDATE: Running analysis: MemoryDependenceAnalysis
-;
; Check domtree specifically.
; RUN: opt -disable-output -debug-pass-manager %s 2>&1 \
; RUN: -passes='require<memdep>,invalidate<domtree>,gvn' \
diff --git a/test/Transforms/Util/MemorySSA/assume.ll b/test/Analysis/MemorySSA/assume.ll
index d771c78eb1cf..d771c78eb1cf 100644
--- a/test/Transforms/Util/MemorySSA/assume.ll
+++ b/test/Analysis/MemorySSA/assume.ll
diff --git a/test/Transforms/Util/MemorySSA/atomic-clobber.ll b/test/Analysis/MemorySSA/atomic-clobber.ll
index acd819a89351..acd819a89351 100644
--- a/test/Transforms/Util/MemorySSA/atomic-clobber.ll
+++ b/test/Analysis/MemorySSA/atomic-clobber.ll
diff --git a/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll b/test/Analysis/MemorySSA/basicaa-memcpy.ll
index bfd7c899b59a..bfd7c899b59a 100644
--- a/test/Transforms/Util/MemorySSA/basicaa-memcpy.ll
+++ b/test/Analysis/MemorySSA/basicaa-memcpy.ll
diff --git a/test/Transforms/Util/MemorySSA/constant-memory.ll b/test/Analysis/MemorySSA/constant-memory.ll
index bc970e72fc4d..bc970e72fc4d 100644
--- a/test/Transforms/Util/MemorySSA/constant-memory.ll
+++ b/test/Analysis/MemorySSA/constant-memory.ll
diff --git a/test/Transforms/Util/MemorySSA/cyclicphi.ll b/test/Analysis/MemorySSA/cyclicphi.ll
index 6e91db959e4c..6e91db959e4c 100644
--- a/test/Transforms/Util/MemorySSA/cyclicphi.ll
+++ b/test/Analysis/MemorySSA/cyclicphi.ll
diff --git a/test/Transforms/Util/MemorySSA/forward-unreachable.ll b/test/Analysis/MemorySSA/forward-unreachable.ll
index 2bbf399daae4..2bbf399daae4 100644
--- a/test/Transforms/Util/MemorySSA/forward-unreachable.ll
+++ b/test/Analysis/MemorySSA/forward-unreachable.ll
diff --git a/test/Transforms/Util/MemorySSA/function-clobber.ll b/test/Analysis/MemorySSA/function-clobber.ll
index a01893a5b954..a01893a5b954 100644
--- a/test/Transforms/Util/MemorySSA/function-clobber.ll
+++ b/test/Analysis/MemorySSA/function-clobber.ll
diff --git a/test/Transforms/Util/MemorySSA/function-mem-attrs.ll b/test/Analysis/MemorySSA/function-mem-attrs.ll
index 11383771a413..11383771a413 100644
--- a/test/Transforms/Util/MemorySSA/function-mem-attrs.ll
+++ b/test/Analysis/MemorySSA/function-mem-attrs.ll
diff --git a/test/Analysis/MemorySSA/invariant-groups.ll b/test/Analysis/MemorySSA/invariant-groups.ll
new file mode 100644
index 000000000000..26a0a32e86f7
--- /dev/null
+++ b/test/Analysis/MemorySSA/invariant-groups.ll
@@ -0,0 +1,301 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+;
+; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
+; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
+; them when/if we decide to support invariant groups.
+
+@g = external global i32
+
+define i32 @foo(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4, !invariant.group !0
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* @g, align 4
+
+ %1 = bitcast i32* %a to i8*
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; This have to be MemoryUse(2), because we can't skip the barrier based on
+; invariant.group.
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a32, align 4, !invariant.group !0
+ ret i32 %2
+}
+
+define i32 @skipBarrier(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4, !invariant.group !0
+
+ %1 = bitcast i32* %a to i8*
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; We can skip the barrier only if the "skip" is not based on !invariant.group.
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a32, align 4, !invariant.group !0
+ ret i32 %2
+}
+
+define i32 @skipBarrier2(i32* %a) {
+
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %v = load i32
+ %v = load i32, i32* %a, align 4, !invariant.group !0
+
+ %1 = bitcast i32* %a to i8*
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; We can skip the barrier only if the "skip" is not based on !invariant.group.
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %v2 = load i32
+ %v2 = load i32, i32* %a32, align 4, !invariant.group !0
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* @g, align 4
+
+; FIXME: based on invariant.group it should be MemoryUse(liveOnEntry)
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %v3 = load i32
+ %v3 = load i32, i32* %a32, align 4, !invariant.group !0
+ %add = add nsw i32 %v2, %v3
+ %add2 = add nsw i32 %add, %v
+ ret i32 %add2
+}
+
+define i32 @handleInvariantGroups(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4, !invariant.group !0
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* @g, align 4
+ %1 = bitcast i32* %a to i8*
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a32, align 4, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: store i32 2
+ store i32 2, i32* @g, align 4
+
+; FIXME: This can be changed to MemoryUse(2)
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %3 = load i32
+ %3 = load i32, i32* %a32, align 4, !invariant.group !0
+ %add = add nsw i32 %2, %3
+ ret i32 %add
+}
+
+define i32 @loop(i1 %a) {
+entry:
+ %0 = alloca i32, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %0, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber(i32* %0)
+ br i1 %a, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %1 = load i32
+ %1 = load i32, i32* %0, !invariant.group !0
+ br i1 %a, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load
+ %2 = load i32, i32* %0, align 4, !invariant.group !0
+ br i1 %a, label %Ret, label %Loop.Body
+
+Ret:
+ ret i32 %2
+}
+
+define i8 @loop2(i8* %p) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8
+ store i8 4, i8* %p, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber8(i8* %p)
+
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ br i1 undef, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; 4 = MemoryPhi({entry,2},{Loop.Body,3},{Loop.End,5})
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %0 = load i8
+ %0 = load i8, i8* %after, !invariant.group !0
+
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %1 = load i8
+ %1 = load i8, i8* %p, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(4)
+ store i8 4, i8* %after, !invariant.group !0
+
+ br i1 undef, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; 5 = MemoryPhi({entry,2},{Loop.Body,3})
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %2 = load
+ %2 = load i8, i8* %after, align 4, !invariant.group !0
+
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %3 = load
+ %3 = load i8, i8* %p, align 4, !invariant.group !0
+ br i1 undef, label %Ret, label %Loop.Body
+
+Ret:
+ ret i8 %3
+}
+
+
+define i8 @loop3(i8* %p) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8
+ store i8 4, i8* %p, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber8(i8* %p)
+
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ br i1 undef, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; CHECK: 6 = MemoryPhi({entry,2},{Loop.Body,3},{Loop.next,4},{Loop.End,5})
+; CHECK: MemoryUse(6)
+; CHECK-NEXT: %0 = load i8
+ %0 = load i8, i8* %after, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(6)
+; CHECK-NEXT: call void @clobber8
+ call void @clobber8(i8* %after)
+
+; FIXME: MemoryUse(6)
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %1 = load i8
+ %1 = load i8, i8* %after, !invariant.group !0
+
+ br i1 undef, label %Loop.next, label %Loop.Body
+Loop.next:
+; CHECK: 4 = MemoryDef(3)
+; CHECK-NEXT: call void @clobber8
+ call void @clobber8(i8* %after)
+
+; FIXME: MemoryUse(6)
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %2 = load i8
+ %2 = load i8, i8* %after, !invariant.group !0
+
+ br i1 undef, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; CHECK: 7 = MemoryPhi({entry,2},{Loop.next,4})
+; CHECK: MemoryUse(7)
+; CHECK-NEXT: %3 = load
+ %3 = load i8, i8* %after, align 4, !invariant.group !0
+
+; CHECK: 5 = MemoryDef(7)
+; CHECK-NEXT: call void @clobber8
+ call void @clobber8(i8* %after)
+
+; FIXME: MemoryUse(7)
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %4 = load
+ %4 = load i8, i8* %after, align 4, !invariant.group !0
+ br i1 undef, label %Ret, label %Loop.Body
+
+Ret:
+ ret i8 %3
+}
+
+define i8 @loop4(i8* %p) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8
+ store i8 4, i8* %p, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber8(i8* %p)
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ br i1 undef, label %Loop.Pre, label %Loop.End
+
+Loop.Pre:
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %0 = load i8
+ %0 = load i8, i8* %after, !invariant.group !0
+ br label %Loop.Body
+Loop.Body:
+; CHECK: 4 = MemoryPhi({Loop.Pre,2},{Loop.Body,3},{Loop.End,5})
+; CHECK-NEXT: MemoryUse(4)
+; CHECK-NEXT: %1 = load i8
+ %1 = load i8, i8* %after, !invariant.group !0
+
+; FIXME: MemoryUse(2)
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %2 = load i8
+ %2 = load i8, i8* %p, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(4)
+ store i8 4, i8* %after, !invariant.group !0
+ br i1 undef, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; CHECK: 5 = MemoryPhi({entry,2},{Loop.Body,3})
+; CHECK-NEXT: MemoryUse(5)
+; CHECK-NEXT: %3 = load
+ %3 = load i8, i8* %after, align 4, !invariant.group !0
+
+; FIXME: MemoryUse(2)
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %4 = load
+ %4 = load i8, i8* %p, align 4, !invariant.group !0
+ br i1 undef, label %Ret, label %Loop.Body
+
+Ret:
+ ret i8 %3
+}
+
+declare i8* @llvm.invariant.group.barrier(i8*)
+declare void @clobber(i32*)
+declare void @clobber8(i8*)
+
+
+!0 = !{!"group1"}
diff --git a/test/Transforms/Util/MemorySSA/lifetime-simple.ll b/test/Analysis/MemorySSA/lifetime-simple.ll
index cdb36e31eb96..f1db15cc577d 100644
--- a/test/Transforms/Util/MemorySSA/lifetime-simple.ll
+++ b/test/Analysis/MemorySSA/lifetime-simple.ll
@@ -7,17 +7,17 @@
define i8 @test(i8* %P, i8* %Q) {
entry:
; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @llvm.lifetime.start(i64 32, i8* %P)
- call void @llvm.lifetime.start(i64 32, i8* %P)
-; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
+; CHECK: MemoryUse(1)
; CHECK-NEXT: %0 = load i8, i8* %P
%0 = load i8, i8* %P
; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: store i8 1, i8* %P
store i8 1, i8* %P
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: call void @llvm.lifetime.end(i64 32, i8* %P)
- call void @llvm.lifetime.end(i64 32, i8* %P)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: %1 = load i8, i8* %P
%1 = load i8, i8* %P
@@ -26,5 +26,5 @@ entry:
%2 = load i8, i8* %Q
ret i8 %1
}
-declare void @llvm.lifetime.start(i64 %S, i8* nocapture %P) readonly
-declare void @llvm.lifetime.end(i64 %S, i8* nocapture %P)
+declare void @llvm.lifetime.start.p0i8(i64 %S, i8* nocapture %P) readonly
+declare void @llvm.lifetime.end.p0i8(i64 %S, i8* nocapture %P)
diff --git a/test/Transforms/Util/MemorySSA/load-invariant.ll b/test/Analysis/MemorySSA/load-invariant.ll
index 3c55db11597b..defb74641eaa 100644
--- a/test/Transforms/Util/MemorySSA/load-invariant.ll
+++ b/test/Analysis/MemorySSA/load-invariant.ll
@@ -3,9 +3,6 @@
;
; Invariant loads should be considered live on entry, because, once the
; location is known to be dereferenceable, the value can never change.
-;
-; Currently XFAILed because this optimization was held back from the initial
-; commit.
@g = external global i32
diff --git a/test/Transforms/Util/MemorySSA/many-dom-backedge.ll b/test/Analysis/MemorySSA/many-dom-backedge.ll
index c2216a47bb20..c2216a47bb20 100644
--- a/test/Transforms/Util/MemorySSA/many-dom-backedge.ll
+++ b/test/Analysis/MemorySSA/many-dom-backedge.ll
diff --git a/test/Transforms/Util/MemorySSA/many-doms.ll b/test/Analysis/MemorySSA/many-doms.ll
index 1f57cbf1c4df..1f57cbf1c4df 100644
--- a/test/Transforms/Util/MemorySSA/many-doms.ll
+++ b/test/Analysis/MemorySSA/many-doms.ll
diff --git a/test/Transforms/Util/MemorySSA/multi-edges.ll b/test/Analysis/MemorySSA/multi-edges.ll
index 5d47728d6f5a..5d47728d6f5a 100644
--- a/test/Transforms/Util/MemorySSA/multi-edges.ll
+++ b/test/Analysis/MemorySSA/multi-edges.ll
diff --git a/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll b/test/Analysis/MemorySSA/multiple-backedges-hal.ll
index 005a37c9add2..005a37c9add2 100644
--- a/test/Transforms/Util/MemorySSA/multiple-backedges-hal.ll
+++ b/test/Analysis/MemorySSA/multiple-backedges-hal.ll
diff --git a/test/Transforms/Util/MemorySSA/multiple-locations.ll b/test/Analysis/MemorySSA/multiple-locations.ll
index 9a3e87e4ab6d..9a3e87e4ab6d 100644
--- a/test/Transforms/Util/MemorySSA/multiple-locations.ll
+++ b/test/Analysis/MemorySSA/multiple-locations.ll
diff --git a/test/Transforms/Util/MemorySSA/no-disconnected.ll b/test/Analysis/MemorySSA/no-disconnected.ll
index d1dcb15893ad..d1dcb15893ad 100644
--- a/test/Transforms/Util/MemorySSA/no-disconnected.ll
+++ b/test/Analysis/MemorySSA/no-disconnected.ll
diff --git a/test/Transforms/Util/MemorySSA/optimize-use.ll b/test/Analysis/MemorySSA/optimize-use.ll
index 8a8f2dd50959..8a8f2dd50959 100644
--- a/test/Transforms/Util/MemorySSA/optimize-use.ll
+++ b/test/Analysis/MemorySSA/optimize-use.ll
diff --git a/test/Transforms/Util/MemorySSA/phi-translation.ll b/test/Analysis/MemorySSA/phi-translation.ll
index c91faf2ac20b..c91faf2ac20b 100644
--- a/test/Transforms/Util/MemorySSA/phi-translation.ll
+++ b/test/Analysis/MemorySSA/phi-translation.ll
diff --git a/test/Transforms/Util/MemorySSA/pr28880.ll b/test/Analysis/MemorySSA/pr28880.ll
index ae64c0c5d73e..ae64c0c5d73e 100644
--- a/test/Transforms/Util/MemorySSA/pr28880.ll
+++ b/test/Analysis/MemorySSA/pr28880.ll
diff --git a/test/Analysis/MemorySSA/ptr-const-mem.ll b/test/Analysis/MemorySSA/ptr-const-mem.ll
new file mode 100644
index 000000000000..a326d8d717a1
--- /dev/null
+++ b/test/Analysis/MemorySSA/ptr-const-mem.ll
@@ -0,0 +1,23 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze -memssa-check-limit=0 < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output -memssa-check-limit=0 < %s 2>&1 | FileCheck %s
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "amdgcn"
+
+@g4 = external unnamed_addr constant i8, align 1
+
+define signext i8 @cmp_constant(i8* %q, i8 %v) local_unnamed_addr {
+entry:
+
+ store i8 %v, i8* %q, align 1
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 %v, i8* %q, align 1
+
+ %0 = load i8, i8* @g4, align 1
+; Make sure that this load is liveOnEntry just based on the fact that @g4 is
+; constant memory.
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: load i8, i8* @g4, align 1
+
+ ret i8 %0
+}
+
diff --git a/test/Transforms/Util/MemorySSA/volatile-clobber.ll b/test/Analysis/MemorySSA/volatile-clobber.ll
index d6f960f3e382..d6f960f3e382 100644
--- a/test/Transforms/Util/MemorySSA/volatile-clobber.ll
+++ b/test/Analysis/MemorySSA/volatile-clobber.ll
diff --git a/test/Analysis/RegionInfo/outgoing_edge.ll b/test/Analysis/RegionInfo/outgoing_edge.ll
new file mode 100644
index 000000000000..39e1a39d7e5b
--- /dev/null
+++ b/test/Analysis/RegionInfo/outgoing_edge.ll
@@ -0,0 +1,33 @@
+; REQUIRES: asserts
+; RUN: opt -regions -analyze < %s | FileCheck %s
+; RUN: opt < %s -passes='print<regions>' 2>&1 | FileCheck %s
+
+; While working on improvements to the region info analysis, this test
+; case caused an incorrect region bb2 => bb3 to be detected. It is incorrect
+; because bb2 has an outgoing edge to bb4. This is interesting because
+; bb2 dom bb3 and bb3 pdom bb2, which should have been enough to prevent incoming
+; forward edges into the region and outgoing forward edges from the region.
+
+define void @meread_() nounwind {
+bb:
+ br label %bb1
+
+bb1: ; preds = %bb4, %bb
+ br label %bb2
+
+bb2: ; preds = %bb1
+ br i1 true, label %bb3, label %bb4
+
+bb3: ; preds = %bb2
+ br i1 true, label %bb4, label %bb5
+
+bb4: ; preds = %bb3, %bb2
+ br label %bb1
+
+bb5: ; preds = %bb3
+ ret void
+ }
+
+; CHECK: [0] bb => <Function Return>
+; CHECK-NEXT: [1] bb1 => bb5
+; CHECK-NEXT: End region tree
diff --git a/test/Analysis/RegionInfo/outgoing_edge_1.ll b/test/Analysis/RegionInfo/outgoing_edge_1.ll
new file mode 100644
index 000000000000..6f51131a188c
--- /dev/null
+++ b/test/Analysis/RegionInfo/outgoing_edge_1.ll
@@ -0,0 +1,39 @@
+; REQUIRES: asserts
+; RUN: opt -regions -analyze < %s | FileCheck %s
+; RUN: opt < %s -passes='print<regions>' 2>&1 | FileCheck %s
+
+; While working on improvements to region info analysis, this test
+; case caused an incorrect region bb2 => bb3 to be detected.
+
+define internal i8 @main_read() nounwind {
+bb:
+ br label %bb1
+
+bb1:
+ br i1 true, label %bb2, label %bb7
+
+bb2:
+ br i1 true, label %bb4, label %bb3
+
+bb3:
+ br i1 true, label %bb4, label %bb8
+
+bb4:
+ br label %bb5
+
+bb5:
+ br label %bb6
+
+bb6:
+ br label %bb1
+
+bb7:
+ br label %bb5
+
+bb8:
+ ret i8 1
+}
+
+; CHECK: [0] bb => <Function Return>
+; CHECK-NEXT: [1] bb1 => bb8
+; CHECK-NEXT: End region tree
diff --git a/test/Analysis/ScalarEvolution/2011-04-26-FoldAddRec.ll b/test/Analysis/ScalarEvolution/2011-04-26-FoldAddRec.ll
index 973dd7d6dd0d..4f126fbf6b3e 100644
--- a/test/Analysis/ScalarEvolution/2011-04-26-FoldAddRec.ll
+++ b/test/Analysis/ScalarEvolution/2011-04-26-FoldAddRec.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -analyze -iv-users
+; RUN: opt < %s -passes='print<ivusers>'
; PR9633: Tests that SCEV handles the mul.i2 recurrence being folded to
; constant zero.
diff --git a/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll b/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll
index 66df9d19234f..853d43c4f875 100644
--- a/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll
+++ b/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -iv-users -S -disable-output
+; RUN: opt < %s -passes='require<ivusers>' -S -disable-output
;
; PR12868: Infinite recursion:
; getUDivExpr()->getZeroExtendExpr()->isLoopBackedgeGuardedBy()
diff --git a/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll b/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
index f61b667dcfa7..8e519d409ede 100644
--- a/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
+++ b/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -iv-users
+; RUN: opt < %s -passes='require<ivusers>'
; PR4538
; ModuleID = 'bugpoint-reduced-simplified.bc'
diff --git a/test/Analysis/ScalarEvolution/flags-from-poison.ll b/test/Analysis/ScalarEvolution/flags-from-poison.ll
index 8e73fe4fd54c..44ee830d9c62 100644
--- a/test/Analysis/ScalarEvolution/flags-from-poison.ll
+++ b/test/Analysis/ScalarEvolution/flags-from-poison.ll
@@ -272,17 +272,16 @@ exit:
ret void
}
-; Without inbounds, GEP does not propagate poison in the very
-; conservative approach used here.
-define void @test-add-no-inbounds(float* %input, i32 %offset, i32 %numIterations) {
-; CHECK-LABEL: @test-add-no-inbounds
+; Any poison input makes getelementptr produce poison
+define void @test-gep-propagates-poison(float* %input, i32 %offset, i32 %numIterations) {
+; CHECK-LABEL: @test-gep-propagates-poison
entry:
br label %loop
loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
; CHECK: %index32 =
-; CHECK: --> {%offset,+,1}<nw>
+; CHECK: --> {%offset,+,1}<nsw>
%index32 = add nsw i32 %i, %offset
%ptr = getelementptr float, float* %input, i32 %index32
@@ -317,17 +316,16 @@ exit:
ret void
}
-; Multiplication by a non-constant should not propagate poison in the
-; very conservative approach used here.
-define void @test-add-mul-no-propagation(float* %input, i32 %offset, i32 %numIterations) {
-; CHECK-LABEL: @test-add-mul-no-propagation
+; Any poison input to multiplication propages poison.
+define void @test-mul-propagates-poison(float* %input, i32 %offset, i32 %numIterations) {
+; CHECK-LABEL: @test-mul-propagates-poison
entry:
br label %loop
loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
; CHECK: %index32 =
-; CHECK: --> {%offset,+,1}<nw>
+; CHECK: --> {%offset,+,1}<nsw>
%index32 = add nsw i32 %i, %offset
%indexmul = mul nsw i32 %index32, %offset
@@ -340,17 +338,15 @@ exit:
ret void
}
-; Multiplication by a non-zero constant does not propagate poison
-; without a no-wrap flag.
-define void @test-add-mul-no-propagation2(float* %input, i32 %offset, i32 %numIterations) {
-; CHECK-LABEL: @test-add-mul-no-propagation2
+define void @test-mul-propagates-poison-2(float* %input, i32 %offset, i32 %numIterations) {
+; CHECK-LABEL: @test-mul-propagates-poison-2
entry:
br label %loop
loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
; CHECK: %index32 =
-; CHECK: --> {%offset,+,1}<nw>
+; CHECK: --> {%offset,+,1}<nsw>
%index32 = add nsw i32 %i, %offset
%indexmul = mul i32 %index32, 2
diff --git a/test/Analysis/ScalarEvolution/implied-via-addition.ll b/test/Analysis/ScalarEvolution/implied-via-addition.ll
new file mode 100644
index 000000000000..c9c276cef466
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/implied-via-addition.ll
@@ -0,0 +1,50 @@
+; RUN: opt -indvars -S < %s | FileCheck %s
+
+declare void @use(i1)
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define void @test_01(i8 %t) {
+; CHECK-LABEL: test_01
+ entry:
+ %st = sext i8 %t to i16
+ %cmp1 = icmp slt i16 %st, 42
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %loop
+
+ loop:
+; CHECK-LABEL: loop
+ %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ]
+ %idx.inc = add i8 %idx, 1
+ %c = icmp slt i8 %idx, 42
+; CHECK: call void @use(i1 true)
+ call void @use(i1 %c)
+ %be = icmp slt i8 %idx.inc, 42
+ br i1 %be, label %loop, label %exit
+
+ exit:
+ ret void
+}
+
+define void @test_02(i8 %t) {
+; CHECK-LABEL: test_02
+ entry:
+ %t.ptr = inttoptr i8 %t to i8*
+ %p.42 = inttoptr i8 42 to i8*
+ %cmp1 = icmp slt i8* %t.ptr, %p.42
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %loop
+
+ loop:
+; CHECK-LABEL: loop
+ %idx = phi i8* [ %t.ptr, %entry ], [ %snext, %loop ]
+ %snext = getelementptr inbounds i8, i8* %idx, i64 1
+ %c = icmp slt i8* %idx, %p.42
+; CHECK: call void @use(i1 true)
+ call void @use(i1 %c)
+ %be = icmp slt i8* %snext, %p.42
+ br i1 %be, label %loop, label %exit
+
+ exit:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/implied-via-division.ll b/test/Analysis/ScalarEvolution/implied-via-division.ll
new file mode 100644
index 000000000000..43f4c04fa927
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/implied-via-division.ll
@@ -0,0 +1,331 @@
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define void @test_1(i32 %n) nounwind {
+; Prove that (n > 1) ===> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_1
+; CHECK: Loop %header: backedge-taken count is (-1 + %n.div.2)<nsw>
+entry:
+ %cmp1 = icmp sgt i32 %n, 1
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sgt i32 %n.div.2, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_1neg(i32 %n) nounwind {
+; Prove that (n > 0) =\=> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_1neg
+; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax %n.div.2))<nsw>
+entry:
+ %cmp1 = icmp sgt i32 %n, 0
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sgt i32 %n.div.2, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_2(i32 %n) nounwind {
+; Prove that (n >= 2) ===> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_2
+; CHECK: Loop %header: backedge-taken count is (-1 + %n.div.2)<nsw>
+entry:
+ %cmp1 = icmp sge i32 %n, 2
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sgt i32 %n.div.2, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_2neg(i32 %n) nounwind {
+; Prove that (n >= 1) =\=> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_2neg
+; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax %n.div.2))<nsw>
+entry:
+ %cmp1 = icmp sge i32 %n, 1
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sgt i32 %n.div.2, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_3(i32 %n) nounwind {
+; Prove that (n > -2) ===> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_3
+; CHECK: Loop %header: backedge-taken count is (1 + %n.div.2)<nsw>
+entry:
+ %cmp1 = icmp sgt i32 %n, -2
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sge i32 %n.div.2, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_3neg(i32 %n) nounwind {
+; Prove that (n > -3) =\=> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_3neg
+; CHECK: Loop %header: backedge-taken count is (0 smax (1 + %n.div.2)<nsw>)
+entry:
+ %cmp1 = icmp sgt i32 %n, -3
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sge i32 %n.div.2, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_4(i32 %n) nounwind {
+; Prove that (n >= -1) ===> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_4
+; CHECK: Loop %header: backedge-taken count is (1 + %n.div.2)<nsw>
+entry:
+ %cmp1 = icmp sge i32 %n, -1
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sge i32 %n.div.2, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_4neg(i32 %n) nounwind {
+; Prove that (n >= -2) =\=> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_4neg
+; CHECK: Loop %header: backedge-taken count is (0 smax (1 + %n.div.2)<nsw>)
+entry:
+ %cmp1 = icmp sge i32 %n, -2
+ %n.div.2 = sdiv i32 %n, 2
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i32 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp sge i32 %n.div.2, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_01(i32 %n) nounwind {
+; Prove that (n > 1) ===> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_ext_01
+; CHECK: Loop %header: backedge-taken count is (-1 + (sext i32 %n.div.2 to i64))<nsw>
+entry:
+ %cmp1 = icmp sgt i32 %n, 1
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sgt i64 %n.div.2.ext, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_01neg(i32 %n) nounwind {
+; Prove that (n > 0) =\=> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_ext_01neg
+; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax (sext i32 %n.div.2 to i64)))<nsw>
+entry:
+ %cmp1 = icmp sgt i32 %n, 0
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sgt i64 %n.div.2.ext, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_02(i32 %n) nounwind {
+; Prove that (n >= 2) ===> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_ext_02
+; CHECK: Loop %header: backedge-taken count is (-1 + (sext i32 %n.div.2 to i64))<nsw>
+entry:
+ %cmp1 = icmp sge i32 %n, 2
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sgt i64 %n.div.2.ext, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_02neg(i32 %n) nounwind {
+; Prove that (n >= 1) =\=> (n / 2 > 0).
+; CHECK: Determining loop execution counts for: @test_ext_02neg
+; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax (sext i32 %n.div.2 to i64)))<nsw>
+entry:
+ %cmp1 = icmp sge i32 %n, 1
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sgt i64 %n.div.2.ext, %indvar.next
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_03(i32 %n) nounwind {
+; Prove that (n > -2) ===> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_ext_03
+; CHECK: Loop %header: backedge-taken count is (1 + (sext i32 %n.div.2 to i64))<nsw>
+entry:
+ %cmp1 = icmp sgt i32 %n, -2
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sge i64 %n.div.2.ext, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_03neg(i32 %n) nounwind {
+; Prove that (n > -3) =\=> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_ext_03neg
+; CHECK: Loop %header: backedge-taken count is (0 smax (1 + (sext i32 %n.div.2 to i64))<nsw>)
+entry:
+ %cmp1 = icmp sgt i32 %n, -3
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sge i64 %n.div.2.ext, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_04(i32 %n) nounwind {
+; Prove that (n >= -1) ===> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_ext_04
+; CHECK: Loop %header: backedge-taken count is (1 + (sext i32 %n.div.2 to i64))<nsw>
+entry:
+ %cmp1 = icmp sge i32 %n, -1
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sge i64 %n.div.2.ext, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_ext_04neg(i32 %n) nounwind {
+; Prove that (n >= -2) =\=> (n / 2 >= 0).
+; CHECK: Determining loop execution counts for: @test_ext_04neg
+; CHECK: Loop %header: backedge-taken count is (0 smax (1 + (sext i32 %n.div.2 to i64))<nsw>)
+entry:
+ %cmp1 = icmp sge i32 %n, -2
+ %n.div.2 = sdiv i32 %n, 2
+ %n.div.2.ext = sext i32 %n.div.2 to i64
+ call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
+ br label %header
+
+header:
+ %indvar = phi i64 [ %indvar.next, %header ], [ 0, %entry ]
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp sge i64 %n.div.2.ext, %indvar
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/invalidation.ll b/test/Analysis/ScalarEvolution/invalidation.ll
index 1fcaddb525e6..f750b3d4e5c5 100644
--- a/test/Analysis/ScalarEvolution/invalidation.ll
+++ b/test/Analysis/ScalarEvolution/invalidation.ll
@@ -8,19 +8,6 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; RUN: opt < %s -passes='require<scalar-evolution>,invalidate<assumptions>,print<scalar-evolution>' \
-; RUN: -debug-pass-manager -disable-output 2>&1 \
-; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-AC-INVALIDATE
-;
-; CHECK-AC-INVALIDATE: Running pass: RequireAnalysisPass
-; CHECK-AC-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
-; CHECK-AC-INVALIDATE: Running analysis: AssumptionAnalysis
-; CHECK-AC-INVALIDATE: Running pass: InvalidateAnalysisPass
-; CHECK-AC-INVALIDATE: Invalidating analysis: AssumptionAnalysis
-; CHECK-AC-INVALIDATE: Running pass: ScalarEvolutionPrinterPass
-; CHECK-AC-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
-; CHECK-AC-INVALIDATE: Running analysis: AssumptionAnalysis
-
; RUN: opt < %s -passes='require<scalar-evolution>,invalidate<domtree>,print<scalar-evolution>' \
; RUN: -debug-pass-manager -disable-output 2>&1 \
; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-DT-INVALIDATE
diff --git a/test/Analysis/ScalarEvolution/max-addops-inline.ll b/test/Analysis/ScalarEvolution/max-addops-inline.ll
new file mode 100644
index 000000000000..2701ed32839f
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/max-addops-inline.ll
@@ -0,0 +1,17 @@
+; RUN: opt -analyze -scalar-evolution -scev-addops-inline-threshold=1 < %s | FileCheck --check-prefix=CHECK1 %s
+; RUN: opt -analyze -scalar-evolution -scev-addops-inline-threshold=10 < %s | FileCheck --check-prefix=CHECK10 %s
+
+define i32 @foo(i64 %p0, i32 %p1) {
+; CHECK1: %add2 = add nsw i32 %mul1, %add
+; CHECK1-NEXT: --> ((trunc i64 %p0 to i32) * (1 + (trunc i64 %p0 to i32)) * (1 + %p1))
+
+; CHECK10: %add2 = add nsw i32 %mul1, %add
+; CHECK10-NEXT: --> ((trunc i64 %p0 to i32) * (1 + ((trunc i64 %p0 to i32) * (1 + %p1)) + %p1))
+entry:
+ %tr = trunc i64 %p0 to i32
+ %mul = mul nsw i32 %tr, %p1
+ %add = add nsw i32 %mul, %tr
+ %mul1 = mul nsw i32 %add, %tr
+ %add2 = add nsw i32 %mul1, %add
+ ret i32 %add2
+}
diff --git a/test/Analysis/ScalarEvolution/pr18606-min-zeros.ll b/test/Analysis/ScalarEvolution/pr18606-min-zeros.ll
new file mode 100644
index 000000000000..f4fdf9d3932d
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/pr18606-min-zeros.ll
@@ -0,0 +1,63 @@
+; RUN: opt -S -indvars < %s | FileCheck %s
+
+; CHECK: @test
+; CHECK: %5 = add i32 %local_6_, %local_0_
+; CEHCK: %37 = mul i32 %36, %36
+
+define i32 @test(i32, i32) {
+bci_0:
+ br label %bci_30
+
+bci_68: ; preds = %bci_45
+ %local_6_.lcssa = phi i32 [ %local_6_, %bci_45 ]
+ %.lcssa1.lcssa = phi i32 [ %37, %bci_45 ]
+ %.lcssa.lcssa = phi i32 [ 34, %bci_45 ]
+ %2 = add i32 %local_6_.lcssa, 262
+ %3 = add i32 %2, %.lcssa1.lcssa
+ %4 = add i32 %3, %.lcssa.lcssa
+ ret i32 %4
+
+bci_30: ; preds = %bci_45, %bci_0
+ %local_0_ = phi i32 [ %0, %bci_0 ], [ %38, %bci_45 ]
+ %local_6_ = phi i32 [ 2, %bci_0 ], [ %39, %bci_45 ]
+ %5 = add i32 %local_6_, %local_0_
+ br label %bci_45
+
+bci_45: ; preds = %bci_30
+ %6 = mul i32 %5, %5
+ %7 = mul i32 %6, %6
+ %8 = mul i32 %7, %7
+ %9 = mul i32 %8, %8
+ %10 = mul i32 %9, %9
+ %11 = mul i32 %10, %10
+ %12 = mul i32 %11, %11
+ %13 = mul i32 %12, %12
+ %14 = mul i32 %13, %13
+ %15 = mul i32 %14, %14
+ %16 = mul i32 %15, %15
+ %17 = mul i32 %16, %16
+ %18 = mul i32 %17, %17
+ %19 = mul i32 %18, %18
+ %20 = mul i32 %19, %19
+ %21 = mul i32 %20, %20
+ %22 = mul i32 %21, %21
+ %23 = mul i32 %22, %22
+ %24 = mul i32 %23, %23
+ %25 = mul i32 %24, %24
+ %26 = mul i32 %25, %25
+ %27 = mul i32 %26, %26
+ %28 = mul i32 %27, %27
+ %29 = mul i32 %28, %28
+ %30 = mul i32 %29, %29
+ %31 = mul i32 %30, %30
+ %32 = mul i32 %31, %31
+ %33 = mul i32 %32, %32
+ %34 = mul i32 %33, %33
+ %35 = mul i32 %34, %34
+ %36 = mul i32 %35, %35
+ %37 = mul i32 %36, %36
+ %38 = add i32 %37, -11
+ %39 = add i32 %local_6_, 1
+ %40 = icmp sgt i32 %39, 76
+ br i1 %40, label %bci_68, label %bci_30
+}
diff --git a/test/Analysis/ScalarEvolution/pr24757.ll b/test/Analysis/ScalarEvolution/pr24757.ll
index 815adcde0e9d..83baade34ad3 100644
--- a/test/Analysis/ScalarEvolution/pr24757.ll
+++ b/test/Analysis/ScalarEvolution/pr24757.ll
@@ -1,6 +1,6 @@
; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
-; CHECK: Loop %bb1: backedge-taken count is (zext i7 (trunc i8 %a.promoted to i7) to i8)
+; CHECK: Loop %bb1: backedge-taken count is ((2 * %a.promoted) /u 2)
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
diff --git a/test/Analysis/ScalarEvolution/sext-inreg.ll b/test/Analysis/ScalarEvolution/sext-inreg.ll
index 2201d633f20e..39b3fa50227b 100644
--- a/test/Analysis/ScalarEvolution/sext-inreg.ll
+++ b/test/Analysis/ScalarEvolution/sext-inreg.ll
@@ -15,10 +15,10 @@ bb:
%t2 = ashr i64 %t1, 7
; CHECK: %t2 = ashr i64 %t1, 7
; CHECK-NEXT: sext i57 {0,+,199}<%bb> to i64
-; CHECK-NOT: i57
+; CHECK-SAME: Exits: (sext i57 (199 * (trunc i64 (-1 + (2780916192016515319 * %n)) to i57)) to i64)
; CHECK: %s2 = ashr i64 %s1, 5
; CHECK-NEXT: sext i59 {0,+,199}<%bb> to i64
-; CHECK-NOT: i59
+; CHECK-SAME: Exits: (sext i59 (199 * (trunc i64 (-1 + (2780916192016515319 * %n)) to i59)) to i64)
%s1 = shl i64 %i.01, 5
%s2 = ashr i64 %s1, 5
%t3 = getelementptr i64, i64* %x, i64 %i.01
diff --git a/test/Analysis/ScalarEvolution/sext-mul.ll b/test/Analysis/ScalarEvolution/sext-mul.ll
new file mode 100644
index 000000000000..ca25d9e2efad
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/sext-mul.ll
@@ -0,0 +1,89 @@
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
+
+; CHECK: %tmp9 = shl i64 %tmp8, 33
+; CHECK-NEXT: --> {{.*}} Exits: (-8589934592 + (8589934592 * (zext i32 %arg2 to i64)))
+; CHECK: %tmp10 = ashr exact i64 %tmp9, 32
+; CHECK-NEXT: --> {{.*}} Exits: (sext i32 (-2 + (2 * %arg2)) to i64)
+; CHECK: %tmp11 = getelementptr inbounds i32, i32* %arg, i64 %tmp10
+; CHECK-NEXT: --> {{.*}} Exits: ((4 * (sext i32 (-2 + (2 * %arg2)) to i64)) + %arg)
+; CHECK: %tmp14 = or i64 %tmp10, 1
+; CHECK-NEXT: --> {{.*}} Exits: (1 + (sext i32 (-2 + (2 * %arg2)) to i64))<nsw>
+; CHECK: %tmp15 = getelementptr inbounds i32, i32* %arg, i64 %tmp14
+; CHECK-NEXT: --> {{.*}} Exits: (4 + (4 * (sext i32 (-2 + (2 * %arg2)) to i64)) + %arg)
+; CHECK:Loop %bb7: backedge-taken count is (-1 + (zext i32 %arg2 to i64))<nsw>
+; CHECK-NEXT:Loop %bb7: max backedge-taken count is -1
+; CHECK-NEXT:Loop %bb7: Predicated backedge-taken count is (-1 + (zext i32 %arg2 to i64))<nsw>
+
+define void @foo(i32* nocapture %arg, i32 %arg1, i32 %arg2) {
+bb:
+ %tmp = icmp sgt i32 %arg2, 0
+ br i1 %tmp, label %bb3, label %bb6
+
+bb3: ; preds = %bb
+ %tmp4 = zext i32 %arg2 to i64
+ br label %bb7
+
+bb5: ; preds = %bb7
+ br label %bb6
+
+bb6: ; preds = %bb5, %bb
+ ret void
+
+bb7: ; preds = %bb7, %bb3
+ %tmp8 = phi i64 [ %tmp18, %bb7 ], [ 0, %bb3 ]
+ %tmp9 = shl i64 %tmp8, 33
+ %tmp10 = ashr exact i64 %tmp9, 32
+ %tmp11 = getelementptr inbounds i32, i32* %arg, i64 %tmp10
+ %tmp12 = load i32, i32* %tmp11, align 4
+ %tmp13 = sub nsw i32 %tmp12, %arg1
+ store i32 %tmp13, i32* %tmp11, align 4
+ %tmp14 = or i64 %tmp10, 1
+ %tmp15 = getelementptr inbounds i32, i32* %arg, i64 %tmp14
+ %tmp16 = load i32, i32* %tmp15, align 4
+ %tmp17 = mul nsw i32 %tmp16, %arg1
+ store i32 %tmp17, i32* %tmp15, align 4
+ %tmp18 = add nuw nsw i64 %tmp8, 1
+ %tmp19 = icmp eq i64 %tmp18, %tmp4
+ br i1 %tmp19, label %bb5, label %bb7
+}
+
+; CHECK: %t10 = ashr exact i128 %t9, 1
+; CHECK-NEXT: --> {{.*}} Exits: (sext i127 (-633825300114114700748351602688 + (633825300114114700748351602688 * (zext i32 %arg5 to i127))) to i128)
+; CHECK: %t14 = or i128 %t10, 1
+; CHECK-NEXT: --> {{.*}} Exits: (1 + (sext i127 (-633825300114114700748351602688 + (633825300114114700748351602688 * (zext i32 %arg5 to i127))) to i128))<nsw>
+; CHECK: Loop %bb7: backedge-taken count is (-1 + (zext i32 %arg5 to i128))<nsw>
+; CHECK-NEXT: Loop %bb7: max backedge-taken count is -1
+; CHECK-NEXT: Loop %bb7: Predicated backedge-taken count is (-1 + (zext i32 %arg5 to i128))<nsw>
+
+define void @goo(i32* nocapture %arg3, i32 %arg4, i32 %arg5) {
+bb:
+ %t = icmp sgt i32 %arg5, 0
+ br i1 %t, label %bb3, label %bb6
+
+bb3: ; preds = %bb
+ %t4 = zext i32 %arg5 to i128
+ br label %bb7
+
+bb5: ; preds = %bb7
+ br label %bb6
+
+bb6: ; preds = %bb5, %bb
+ ret void
+
+bb7: ; preds = %bb7, %bb3
+ %t8 = phi i128 [ %t18, %bb7 ], [ 0, %bb3 ]
+ %t9 = shl i128 %t8, 100
+ %t10 = ashr exact i128 %t9, 1
+ %t11 = getelementptr inbounds i32, i32* %arg3, i128 %t10
+ %t12 = load i32, i32* %t11, align 4
+ %t13 = sub nsw i32 %t12, %arg4
+ store i32 %t13, i32* %t11, align 4
+ %t14 = or i128 %t10, 1
+ %t15 = getelementptr inbounds i32, i32* %arg3, i128 %t14
+ %t16 = load i32, i32* %t15, align 4
+ %t17 = mul nsw i32 %t16, %arg4
+ store i32 %t17, i32* %t15, align 4
+ %t18 = add nuw nsw i128 %t8, 1
+ %t19 = icmp eq i128 %t18, %t4
+ br i1 %t19, label %bb5, label %bb7
+}
diff --git a/test/Analysis/ScalarEvolution/sext-zero.ll b/test/Analysis/ScalarEvolution/sext-zero.ll
new file mode 100644
index 000000000000..cac42638e959
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/sext-zero.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
+
+; CHECK: %tmp9 = shl i64 %tmp8, 33
+; CHECK-NEXT: --> {{.*}} Exits: (-8589934592 + (8589934592 * (zext i32 %arg2 to i64)))
+; CHECK-NEXT: %tmp10 = ashr exact i64 %tmp9, 0
+; CHECK-NEXT: --> {{.*}} Exits: (-8589934592 + (8589934592 * (zext i32 %arg2 to i64)))
+
+define void @foo(i32* nocapture %arg, i32 %arg1, i32 %arg2) {
+bb:
+ %tmp = icmp sgt i32 %arg2, 0
+ br i1 %tmp, label %bb3, label %bb6
+
+bb3: ; preds = %bb
+ %tmp4 = zext i32 %arg2 to i64
+ br label %bb7
+
+bb5: ; preds = %bb7
+ br label %bb6
+
+bb6: ; preds = %bb5, %bb
+ ret void
+
+bb7: ; preds = %bb7, %bb3
+ %tmp8 = phi i64 [ %tmp18, %bb7 ], [ 0, %bb3 ]
+ %tmp9 = shl i64 %tmp8, 33
+ %tmp10 = ashr exact i64 %tmp9, 0
+ %tmp11 = getelementptr inbounds i32, i32* %arg, i64 %tmp10
+ %tmp12 = load i32, i32* %tmp11, align 4
+ %tmp13 = sub nsw i32 %tmp12, %arg1
+ store i32 %tmp13, i32* %tmp11, align 4
+ %tmp14 = or i64 %tmp10, 1
+ %tmp15 = getelementptr inbounds i32, i32* %arg, i64 %tmp14
+ %tmp16 = load i32, i32* %tmp15, align 4
+ %tmp17 = mul nsw i32 %tmp16, %arg1
+ store i32 %tmp17, i32* %tmp15, align 4
+ %tmp18 = add nuw nsw i64 %tmp8, 1
+ %tmp19 = icmp eq i64 %tmp18, %tmp4
+ br i1 %tmp19, label %bb5, label %bb7
+}
diff --git a/test/Analysis/ScalarEvolution/trip-count-pow2.ll b/test/Analysis/ScalarEvolution/trip-count-pow2.ll
index 2c5b72e49daf..8d053060b50c 100644
--- a/test/Analysis/ScalarEvolution/trip-count-pow2.ll
+++ b/test/Analysis/ScalarEvolution/trip-count-pow2.ll
@@ -48,6 +48,40 @@ exit:
ret void
; CHECK-LABEL: @test3
-; CHECK: Loop %loop: Unpredictable backedge-taken count.
-; CHECK: Loop %loop: Unpredictable max backedge-taken count.
+; CHECK: Loop %loop: backedge-taken count is ((-32 + (32 * %n)) /u 32)
+; CHECK: Loop %loop: max backedge-taken count is ((-32 + (32 * %n)) /u 32)
+}
+
+define void @test4(i32 %n) {
+entry:
+ %s = mul i32 %n, 4
+ br label %loop
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, 12
+ %t = icmp ne i32 %i.next, %s
+ br i1 %t, label %loop, label %exit
+exit:
+ ret void
+
+; CHECK-LABEL: @test4
+; CHECK: Loop %loop: backedge-taken count is ((-4 + (-1431655764 * %n)) /u 4)
+; CHECK: Loop %loop: max backedge-taken count is ((-4 + (-1431655764 * %n)) /u 4)
+}
+
+define void @test5(i32 %n) {
+entry:
+ %s = mul i32 %n, 4
+ br label %loop
+loop:
+ %i = phi i32 [ %s, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, -4
+ %t = icmp ne i32 %i.next, 0
+ br i1 %t, label %loop, label %exit
+exit:
+ ret void
+
+; CHECK-LABEL: @test5
+; CHECK: Loop %loop: backedge-taken count is ((-4 + (4 * %n)) /u 4)
+; CHECK: Loop %loop: max backedge-taken count is ((-4 + (4 * %n)) /u 4)
}
diff --git a/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll b/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll
new file mode 100644
index 000000000000..133532e31a5b
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll
@@ -0,0 +1,125 @@
+; RUN: opt -S -analyze -scalar-evolution < %s 2>&1 | FileCheck %s
+
+; umin is represented using -1 * umax in scalar evolution. -1 is considered as the
+; constant of the multiply expression (-1 * ((-1 + (-1 * %a)) umax (-1 + (-1 * %b)))).
+; Returns the greatest power of 2 divisor by evaluating the minimal trailing zeros
+; for the trip count expression.
+;
+; int foo(uint32_t a, uint32_t b, uint32_t *c) {
+; for (uint32_t i = 0; i < (uint32_t)(a < b ? a : b) + 1; i++)
+; c[i] = i;
+; return 0;
+; }
+;
+; CHECK: Loop %for.body: Trip multiple is 1
+
+define i32 @foo(i32 %a, i32 %b, i32* %c) {
+entry:
+ %cmp = icmp ult i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ %add = add i32 %cond, 1
+ %cmp18 = icmp eq i32 %add, 0
+ br i1 %cmp18, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret i32 0
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %i.09 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %c, i32 %i.09
+ store i32 %i.09, i32* %arrayidx, align 4
+ %inc = add nuw i32 %i.09, 1
+ %cmp1 = icmp ult i32 %inc, %add
+ br i1 %cmp1, label %for.body, label %for.cond.cleanup.loopexit
+}
+
+; Overflow may happen for the multiply expression n * 3, verify that trip
+; multiple is set to 1 if NUW/NSW are not set.
+;
+; __attribute__((noinline)) void a(unsigned n) {
+; #pragma unroll(3)
+; for (unsigned i = 0; i != n * 3; ++i)
+; printf("TEST%u\n", i);
+; }
+; int main() { a(2863311531U); }
+;
+; CHECK: Loop %for.body: Trip multiple is 1
+
+@.str2 = private unnamed_addr constant [8 x i8] c"TEST%u\0A\00", align 1
+
+define void @foo2(i32 %n) {
+entry:
+ %mul = mul i32 %n, 3
+ %cmp4 = icmp eq i32 %mul, 0
+ br i1 %cmp4, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+ %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str2, i32 0, i32 0), i32 %i.05)
+ %inc = add nuw i32 %i.05, 1
+ %cmp = icmp eq i32 %inc, %mul
+ br i1 %cmp, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+declare i32 @printf(i8* nocapture readonly, ...)
+
+
+; If we couldn't prove no overflow for the multiply expression 24 * n,
+; returns the greatest power of 2 divisor. If overflows happens
+; the trip count is still divisible by the greatest power of 2 divisor.
+;
+; CHECK: Loop %l3: Trip multiple is 8
+
+declare void @f()
+
+define i32 @foo3(i32 %n) {
+entry:
+ %loop_ctl = mul i32 %n, 24
+ br label %l3
+
+l3:
+ %x.0 = phi i32 [ 0, %entry ], [ %inc, %l3 ]
+ call void @f()
+ %inc = add i32 %x.0, 1
+ %exitcond = icmp eq i32 %inc, %loop_ctl
+ br i1 %exitcond, label %exit, label %l3
+
+exit:
+ ret i32 0
+}
+
+; If the trip count is a constant, verify that we obtained the trip
+; count itself. For huge trip counts, or zero, we return 1.
+;
+; CHECK: Loop %l3: Trip multiple is 3
+
+define i32 @foo4(i32 %n) {
+entry:
+ br label %l3
+
+l3:
+ %x.0 = phi i32 [ 0, %entry ], [ %inc, %l3 ]
+ call void @f()
+ %inc = add i32 %x.0, 1
+ %exitcond = icmp eq i32 %inc, 3
+ br i1 %exitcond, label %exit, label %l3
+
+exit:
+ ret i32 0
+}
+
diff --git a/test/Analysis/ScalarEvolution/zext-wrap.ll b/test/Analysis/ScalarEvolution/zext-wrap.ll
index 5bc149e2309a..34462208fbb3 100644
--- a/test/Analysis/ScalarEvolution/zext-wrap.ll
+++ b/test/Analysis/ScalarEvolution/zext-wrap.ll
@@ -6,6 +6,10 @@ entry:
br label %bb.i
bb.i: ; preds = %bb1.i, %bb.nph
+; We should be able to find the range for this expression.
+; CHECK: %l_95.0.i1 = phi i8
+; CHECK: --> {0,+,-1}<%bb.i> U: [2,1) S: [2,1){{ *}}Exits: 2
+
%l_95.0.i1 = phi i8 [ %tmp1, %bb.i ], [ 0, %entry ]
; This cast shouldn't be folded into the addrec.
diff --git a/test/Analysis/ValueTracking/known-nonnull-at.ll b/test/Analysis/ValueTracking/known-nonnull-at.ll
index 8a0d1f3aff3b..93ef4f8c4c48 100644
--- a/test/Analysis/ValueTracking/known-nonnull-at.ll
+++ b/test/Analysis/ValueTracking/known-nonnull-at.ll
@@ -8,8 +8,7 @@ declare void @bar(i8* %a, i8* nonnull %b)
define i1 @caller1(i8* %x, i8* %y) {
; CHECK-LABEL: @caller1(
; CHECK-NEXT: call void @bar(i8* %x, i8* %y)
-; CHECK-NEXT: [[NULL_CHECK:%.*]] = icmp eq i8* %y, null
-; CHECK-NEXT: ret i1 [[NULL_CHECK]]
+; CHECK-NEXT: ret i1 false
;
call void @bar(i8* %x, i8* %y)
%null_check = icmp eq i8* %y, null
@@ -34,24 +33,68 @@ define i1 @caller2(i8* %x, i8* %y) {
define i1 @caller3(i8* %x, i8* %y) {
; CHECK-LABEL: @caller3(
; CHECK-NEXT: call void @bar(i8* %x, i8* %y)
-; CHECK-NEXT: [[NULL_CHECK:%.*]] = icmp ne i8* %y, null
-; CHECK-NEXT: ret i1 [[NULL_CHECK]]
+; CHECK-NEXT: ret i1 true
;
call void @bar(i8* %x, i8* %y)
%null_check = icmp ne i8* %y, null
ret i1 %null_check
}
-; Don't know anything about 'y'.
+; FIXME: The call is guaranteed to execute, so 'y' must be nonnull throughout.
define i1 @caller4(i8* %x, i8* %y) {
; CHECK-LABEL: @caller4(
-; CHECK-NEXT: call void @bar(i8* %y, i8* %x)
; CHECK-NEXT: [[NULL_CHECK:%.*]] = icmp ne i8* %y, null
+; CHECK-NEXT: call void @bar(i8* %x, i8* %y)
; CHECK-NEXT: ret i1 [[NULL_CHECK]]
;
- call void @bar(i8* %y, i8* %x)
%null_check = icmp ne i8* %y, null
+ call void @bar(i8* %x, i8* %y)
+ ret i1 %null_check
+}
+
+; The call to bar() does not dominate the null check, so no change.
+
+define i1 @caller5(i8* %x, i8* %y) {
+; CHECK-LABEL: @caller5(
+; CHECK-NEXT: [[NULL_CHECK:%.*]] = icmp eq i8* %y, null
+; CHECK-NEXT: br i1 [[NULL_CHECK]], label %t, label %f
+; CHECK: t:
+; CHECK-NEXT: ret i1 [[NULL_CHECK]]
+; CHECK: f:
+; CHECK-NEXT: call void @bar(i8* %x, i8* %y)
+; CHECK-NEXT: ret i1 [[NULL_CHECK]]
+;
+ %null_check = icmp eq i8* %y, null
+ br i1 %null_check, label %t, label %f
+t:
ret i1 %null_check
+f:
+ call void @bar(i8* %x, i8* %y)
+ ret i1 %null_check
+}
+
+; Make sure that an invoke works similarly to a call.
+
+declare i32 @esfp(...)
+
+define i1 @caller6(i8* %x, i8* %y) personality i8* bitcast (i32 (...)* @esfp to i8*){
+; CHECK-LABEL: @caller6(
+; CHECK-NEXT: invoke void @bar(i8* %x, i8* nonnull %y)
+; CHECK-NEXT: to label %cont unwind label %exc
+; CHECK: cont:
+; CHECK-NEXT: ret i1 false
+;
+ invoke void @bar(i8* %x, i8* nonnull %y)
+ to label %cont unwind label %exc
+
+cont:
+ %null_check = icmp eq i8* %y, null
+ ret i1 %null_check
+
+exc:
+ %lp = landingpad { i8*, i32 }
+ filter [0 x i8*] zeroinitializer
+ unreachable
}
diff --git a/test/Assembler/alloca-addrspace-parse-error-0.ll b/test/Assembler/alloca-addrspace-parse-error-0.ll
new file mode 100644
index 000000000000..a9db43c08d2e
--- /dev/null
+++ b/test/Assembler/alloca-addrspace-parse-error-0.ll
@@ -0,0 +1,11 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+target datalayout = "A1"
+
+; CHECK: :8:3: error: expected metadata after comma
+define void @use_alloca() {
+ %alloca = alloca i32, addrspace(1),
+ ret void
+}
+
+!0 = !{}
diff --git a/test/Assembler/alloca-addrspace-parse-error-1.ll b/test/Assembler/alloca-addrspace-parse-error-1.ll
new file mode 100644
index 000000000000..5209f417da63
--- /dev/null
+++ b/test/Assembler/alloca-addrspace-parse-error-1.ll
@@ -0,0 +1,12 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+target datalayout = "A1"
+
+; addrspace and align in wrong order
+; CHECK: :8:39: error: expected metadata after comma
+define void @use_alloca() {
+ %alloca = alloca i32, addrspace(1), align 4
+ ret void
+}
+
+!0 = !{}
diff --git a/test/Assembler/alloca-addrspace0.ll b/test/Assembler/alloca-addrspace0.ll
new file mode 100644
index 000000000000..09b7a323f62f
--- /dev/null
+++ b/test/Assembler/alloca-addrspace0.ll
@@ -0,0 +1,24 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+target datalayout = "A0"
+; CHECK: target datalayout = "A0"
+
+
+; CHECK: %alloca_scalar_no_align = alloca i32
+; CHECK-NEXT: %alloca_scalar_align4 = alloca i32, align 4
+; CHECK-NEXT: %alloca_scalar_no_align_metadata = alloca i32, !foo !0
+; CHECK-NEXT: %alloca_scalar_align4_metadata = alloca i32, align 4, !foo !0
+; CHECK-NEXT: %alloca_inalloca_scalar_no_align = alloca inalloca i32
+; CHECK-NEXT: %alloca_inalloca_scalar_align4_metadata = alloca inalloca i32, align 4, !foo !0
+define void @use_alloca() {
+ %alloca_scalar_no_align = alloca i32, addrspace(0)
+ %alloca_scalar_align4 = alloca i32, align 4, addrspace(0)
+ %alloca_scalar_no_align_metadata = alloca i32, addrspace(0), !foo !0
+ %alloca_scalar_align4_metadata = alloca i32, align 4, addrspace(0), !foo !0
+ %alloca_inalloca_scalar_no_align = alloca inalloca i32, addrspace(0)
+ %alloca_inalloca_scalar_align4_metadata = alloca inalloca i32, align 4, addrspace(0), !foo !0
+
+ ret void
+}
+
+!0 = !{}
diff --git a/test/Assembler/auto_upgrade_intrinsics.ll b/test/Assembler/auto_upgrade_intrinsics.ll
index 2f0f4f779e7c..d00fe5882bcd 100644
--- a/test/Assembler/auto_upgrade_intrinsics.ll
+++ b/test/Assembler/auto_upgrade_intrinsics.ll
@@ -53,11 +53,20 @@ entry:
define i32 @test.objectsize() {
; CHECK-LABEL: @test.objectsize(
-; CHECK: @llvm.objectsize.i32.p0i8
+; CHECK: @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
%s = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
ret i32 %s
}
+declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) nounwind readonly
+define i64 @test.objectsize.2() {
+; CHECK-LABEL: @test.objectsize.2(
+; CHECK: @llvm.objectsize.i64.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
+ %s = call i64 @llvm.objectsize.i64.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
+ ret i64 %s
+}
+
+
declare <2 x double> @llvm.masked.load.v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x double> %src0)
define <2 x double> @tests.masked.load(<2 x double>* %ptr, <2 x i1> %mask, <2 x double> %passthru) {
@@ -101,6 +110,25 @@ define void @test.stackprotectorcheck() {
ret void
}
+declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind readonly
+declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+
+define void @tests.lifetime.start.end() {
+ ; CHECK-LABEL: @tests.lifetime.start.end(
+ %a = alloca i8
+ call void @llvm.lifetime.start(i64 1, i8* %a)
+ ; CHECK: call void @llvm.lifetime.start.p0i8(i64 1, i8* %a)
+ store i8 0, i8* %a
+ call void @llvm.lifetime.end(i64 1, i8* %a)
+ ; CHECK: call void @llvm.lifetime.end.p0i8(i64 1, i8* %a)
+ ret void
+}
+
+
; This is part of @test.objectsize(), since llvm.objectsize declaration gets
; emitted at the end.
; CHECK: declare i32 @llvm.objectsize.i32.p0i8
+
+
+; CHECK: declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+; CHECK: declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
diff --git a/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
new file mode 100644
index 000000000000..e8c298ddc7bc
--- /dev/null
+++ b/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -0,0 +1,102 @@
+; Test to make sure NVVM intrinsics are automatically upgraded.
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; RUN: verify-uselistorder %s
+
+declare i32 @llvm.nvvm.brev32(i32)
+declare i64 @llvm.nvvm.brev64(i64)
+declare i32 @llvm.nvvm.clz.i(i32)
+declare i32 @llvm.nvvm.clz.ll(i64)
+declare i32 @llvm.nvvm.popc.i(i32)
+declare i32 @llvm.nvvm.popc.ll(i64)
+declare float @llvm.nvvm.h2f(i16)
+
+declare i32 @llvm.nvvm.abs.i(i32)
+declare i64 @llvm.nvvm.abs.ll(i64)
+
+declare i32 @llvm.nvvm.max.i(i32, i32)
+declare i64 @llvm.nvvm.max.ll(i64, i64)
+declare i32 @llvm.nvvm.max.ui(i32, i32)
+declare i64 @llvm.nvvm.max.ull(i64, i64)
+declare i32 @llvm.nvvm.min.i(i32, i32)
+declare i64 @llvm.nvvm.min.ll(i64, i64)
+declare i32 @llvm.nvvm.min.ui(i32, i32)
+declare i64 @llvm.nvvm.min.ull(i64, i64)
+
+; CHECK-LABEL: @simple_upgrade
+define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
+; CHECK: call i32 @llvm.bitreverse.i32(i32 %a)
+ %r1 = call i32 @llvm.nvvm.brev32(i32 %a)
+
+; CHECK: call i64 @llvm.bitreverse.i64(i64 %b)
+ %r2 = call i64 @llvm.nvvm.brev64(i64 %b)
+
+; CHECK: call i32 @llvm.ctlz.i32(i32 %a, i1 false)
+ %r3 = call i32 @llvm.nvvm.clz.i(i32 %a)
+
+; CHECK: [[clz:%[a-zA-Z0-9.]+]] = call i64 @llvm.ctlz.i64(i64 %b, i1 false)
+; CHECK: trunc i64 [[clz]] to i32
+ %r4 = call i32 @llvm.nvvm.clz.ll(i64 %b)
+
+; CHECK: call i32 @llvm.ctpop.i32(i32 %a)
+ %r5 = call i32 @llvm.nvvm.popc.i(i32 %a)
+
+; CHECK: [[popc:%[a-zA-Z0-9.]+]] = call i64 @llvm.ctpop.i64(i64 %b)
+; CHECK: trunc i64 [[popc]] to i32
+ %r6 = call i32 @llvm.nvvm.popc.ll(i64 %b)
+
+; CHECK: call float @llvm.convert.from.fp16.f32(i16 %c)
+ %r7 = call float @llvm.nvvm.h2f(i16 %c)
+ ret void
+}
+
+; CHECK-LABEL @abs
+define void @abs(i32 %a, i64 %b) {
+; CHECK-DAG: [[negi:%[a-zA-Z0-9.]+]] = sub i32 0, %a
+; CHECK-DAG: [[cmpi:%[a-zA-Z0-9.]+]] = icmp sge i32 %a, 0
+; CHECK: select i1 [[cmpi]], i32 %a, i32 [[negi]]
+ %r1 = call i32 @llvm.nvvm.abs.i(i32 %a)
+
+; CHECK-DAG: [[negll:%[a-zA-Z0-9.]+]] = sub i64 0, %b
+; CHECK-DAG: [[cmpll:%[a-zA-Z0-9.]+]] = icmp sge i64 %b, 0
+; CHECK: select i1 [[cmpll]], i64 %b, i64 [[negll]]
+ %r2 = call i64 @llvm.nvvm.abs.ll(i64 %b)
+
+ ret void
+}
+
+; CHECK-LABEL: @min_max
+define void @min_max(i32 %a1, i32 %a2, i64 %b1, i64 %b2) {
+; CHECK: [[maxi:%[a-zA-Z0-9.]+]] = icmp sge i32 %a1, %a2
+; CHECK: select i1 [[maxi]], i32 %a1, i32 %a2
+ %r1 = call i32 @llvm.nvvm.max.i(i32 %a1, i32 %a2)
+
+; CHECK: [[maxll:%[a-zA-Z0-9.]+]] = icmp sge i64 %b1, %b2
+; CHECK: select i1 [[maxll]], i64 %b1, i64 %b2
+ %r2 = call i64 @llvm.nvvm.max.ll(i64 %b1, i64 %b2)
+
+; CHECK: [[maxui:%[a-zA-Z0-9.]+]] = icmp uge i32 %a1, %a2
+; CHECK: select i1 [[maxui]], i32 %a1, i32 %a2
+ %r3 = call i32 @llvm.nvvm.max.ui(i32 %a1, i32 %a2)
+
+; CHECK: [[maxull:%[a-zA-Z0-9.]+]] = icmp uge i64 %b1, %b2
+; CHECK: select i1 [[maxull]], i64 %b1, i64 %b2
+ %r4 = call i64 @llvm.nvvm.max.ull(i64 %b1, i64 %b2)
+
+; CHECK: [[mini:%[a-zA-Z0-9.]+]] = icmp sle i32 %a1, %a2
+; CHECK: select i1 [[mini]], i32 %a1, i32 %a2
+ %r5 = call i32 @llvm.nvvm.min.i(i32 %a1, i32 %a2)
+
+; CHECK: [[minll:%[a-zA-Z0-9.]+]] = icmp sle i64 %b1, %b2
+; CHECK: select i1 [[minll]], i64 %b1, i64 %b2
+ %r6 = call i64 @llvm.nvvm.min.ll(i64 %b1, i64 %b2)
+
+; CHECK: [[minui:%[a-zA-Z0-9.]+]] = icmp ule i32 %a1, %a2
+; CHECK: select i1 [[minui]], i32 %a1, i32 %a2
+ %r7 = call i32 @llvm.nvvm.min.ui(i32 %a1, i32 %a2)
+
+; CHECK: [[minull:%[a-zA-Z0-9.]+]] = icmp ule i64 %b1, %b2
+; CHECK: select i1 [[minull]], i64 %b1, i64 %b2
+ %r8 = call i64 @llvm.nvvm.min.ull(i64 %b1, i64 %b2)
+
+ ret void
+}
diff --git a/test/Assembler/datalayout-alloca-addrspace-mismatch-0.ll b/test/Assembler/datalayout-alloca-addrspace-mismatch-0.ll
new file mode 100644
index 000000000000..31920183c659
--- /dev/null
+++ b/test/Assembler/datalayout-alloca-addrspace-mismatch-0.ll
@@ -0,0 +1,9 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+target datalayout = "A1"
+
+; CHECK: :7:41: error: address space must match datalayout
+define void @use_alloca() {
+ %alloca_scalar_no_align = alloca i32, addrspace(2)
+ ret void
+}
diff --git a/test/Assembler/datalayout-alloca-addrspace-mismatch-1.ll b/test/Assembler/datalayout-alloca-addrspace-mismatch-1.ll
new file mode 100644
index 000000000000..8778a05291c5
--- /dev/null
+++ b/test/Assembler/datalayout-alloca-addrspace-mismatch-1.ll
@@ -0,0 +1,9 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+target datalayout = "A1"
+
+; CHECK: :7:50: error: address space must match datalayout
+define void @use_alloca() {
+ %alloca_scalar_no_align = alloca i32, align 4, addrspace(2)
+ ret void
+}
diff --git a/test/Assembler/datalayout-alloca-addrspace-mismatch-2.ll b/test/Assembler/datalayout-alloca-addrspace-mismatch-2.ll
new file mode 100644
index 000000000000..b6e2738a4f6e
--- /dev/null
+++ b/test/Assembler/datalayout-alloca-addrspace-mismatch-2.ll
@@ -0,0 +1,11 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+target datalayout = "A1"
+
+; CHECK: :7:50: error: address space must match datalayout
+define void @use_alloca() {
+ %alloca_scalar_no_align = alloca i32, align 4, addrspace(2), !foo !0
+ ret void
+}
+
+!0 = !{}
diff --git a/test/Assembler/datalayout-alloca-addrspace.ll b/test/Assembler/datalayout-alloca-addrspace.ll
new file mode 100644
index 000000000000..578b7ef0b37d
--- /dev/null
+++ b/test/Assembler/datalayout-alloca-addrspace.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+target datalayout = "A1"
+; CHECK: target datalayout = "A1"
+
+; CHECK: %alloca_scalar_no_align = alloca i32, addrspace(1)
+; CHECK-NEXT: %alloca_scalar_align4 = alloca i32, align 4, addrspace(1)
+; CHECK-NEXT: %alloca_scalar_no_align_metadata = alloca i32, addrspace(1), !foo !0
+; CHECK-NEXT: %alloca_scalar_align4_metadata = alloca i32, align 4, addrspace(1), !foo !0
+; CHECK-NEXT: %alloca_inalloca_scalar_no_align = alloca inalloca i32, addrspace(1)
+; CHECK-NEXT: %alloca_inalloca_scalar_align4_metadata = alloca inalloca i32, align 4, addrspace(1), !foo !0
+define void @use_alloca() {
+ %alloca_scalar_no_align = alloca i32, addrspace(1)
+ %alloca_scalar_align4 = alloca i32, align 4, addrspace(1)
+ %alloca_scalar_no_align_metadata = alloca i32, addrspace(1), !foo !0
+ %alloca_scalar_align4_metadata = alloca i32, align 4, addrspace(1), !foo !0
+ %alloca_inalloca_scalar_no_align = alloca inalloca i32, addrspace(1)
+ %alloca_inalloca_scalar_align4_metadata = alloca inalloca i32, align 4, addrspace(1), !foo !0
+
+ ret void
+}
+
+!0 = !{}
diff --git a/test/Assembler/debug-info.ll b/test/Assembler/debug-info.ll
index da19678dbf1e..6be3a308e627 100644
--- a/test/Assembler/debug-info.ll
+++ b/test/Assembler/debug-info.ll
@@ -37,8 +37,8 @@
!13 = distinct !{}
!14 = !DIFile(filename: "", directory: "")
-; CHECK-NEXT: !13 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 32, align: 32)
-!15 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 32, align: 32)
+; CHECK-NEXT: !13 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 32, align: 32, dwarfAddressSpace: 1)
+!15 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 32, align: 32, dwarfAddressSpace: 1)
; CHECK-NEXT: !14 = !DICompositeType(tag: DW_TAG_structure_type, name: "MyType", file: !10, line: 2, size: 32, align: 32, identifier: "MangledMyType")
; CHECK-NEXT: !15 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Base", scope: !14, file: !10, line: 3, size: 128, align: 32, offset: 64, flags: DIFlagPublic, elements: !16, runtimeLang: DW_LANG_C_plus_plus_11, vtableHolder: !15, templateParams: !18, identifier: "MangledBase")
@@ -84,4 +84,4 @@
; CHECK-NEXT: !33 = !DIFile(filename: "file", directory: "dir")
!35 = !DIFile(filename: "file", directory: "dir", checksumkind: CSK_MD5, checksum: "000102030405060708090a0b0c0d0e0f")
!36 = !DIFile(filename: "file", directory: "dir", checksumkind: CSK_None)
-!37 = !DIFile(filename: "file", directory: "dir", checksumkind: CSK_None, checksum: "") \ No newline at end of file
+!37 = !DIFile(filename: "file", directory: "dir", checksumkind: CSK_None, checksum: "")
diff --git a/test/Assembler/diexpression.ll b/test/Assembler/diexpression.ll
index dd69c0edecc2..c2fa3ee14c23 100644
--- a/test/Assembler/diexpression.ll
+++ b/test/Assembler/diexpression.ll
@@ -1,16 +1,18 @@
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
; RUN: verify-uselistorder %s
-; CHECK: !named = !{!0, !1, !2, !3, !4}
-!named = !{!0, !1, !2, !3, !4}
+; CHECK: !named = !{!0, !1, !2, !3, !4, !5}
+!named = !{!0, !1, !2, !3, !4, !5}
; CHECK: !0 = !DIExpression()
; CHECK-NEXT: !1 = !DIExpression(DW_OP_deref)
; CHECK-NEXT: !2 = !DIExpression(DW_OP_plus, 3)
; CHECK-NEXT: !3 = !DIExpression(DW_OP_LLVM_fragment, 3, 7)
; CHECK-NEXT: !4 = !DIExpression(DW_OP_deref, DW_OP_plus, 3, DW_OP_LLVM_fragment, 3, 7)
+; CHECK-NEXT: !5 = !DIExpression(DW_OP_constu, 2, DW_OP_swap, DW_OP_xderef)
!0 = !DIExpression()
!1 = !DIExpression(DW_OP_deref)
!2 = !DIExpression(DW_OP_plus, 3)
!3 = !DIExpression(DW_OP_LLVM_fragment, 3, 7)
!4 = !DIExpression(DW_OP_deref, DW_OP_plus, 3, DW_OP_LLVM_fragment, 3, 7)
+!5 = !DIExpression(DW_OP_constu, 2, DW_OP_swap, DW_OP_xderef)
diff --git a/test/Assembler/fast-math-flags.ll b/test/Assembler/fast-math-flags.ll
index f0d3ecc761d1..4ef3607e1d00 100644
--- a/test/Assembler/fast-math-flags.ll
+++ b/test/Assembler/fast-math-flags.ll
@@ -74,6 +74,18 @@ entry:
ret float %e
}
+; CHECK: @contract(
+define float @contract(float %x, float %y) {
+entry:
+; CHECK: %a = fsub contract float %x, %y
+ %a = fsub contract float %x, %y
+; CHECK: %b = fadd contract float %x, %y
+ %b = fadd contract float %x, %y
+; CHECK: %c = fmul contract float %a, %b
+ %c = fmul contract float %a, %b
+ ret float %c
+}
+
; CHECK: no_nan_inf
define float @no_nan_inf(float %x, float %y) {
entry:
diff --git a/test/Assembler/invalid-datalayout-alloca-addrspace.ll b/test/Assembler/invalid-datalayout-alloca-addrspace.ll
new file mode 100644
index 000000000000..9a0e07acf766
--- /dev/null
+++ b/test/Assembler/invalid-datalayout-alloca-addrspace.ll
@@ -0,0 +1,4 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+target datalayout = "A16777216"
+; CHECK: Invalid address space, must be a 24bit integer
diff --git a/test/Bitcode/DIGlobalVariableExpression2.ll b/test/Bitcode/DIGlobalVariableExpression2.ll
new file mode 100644
index 000000000000..55974d5317dd
--- /dev/null
+++ b/test/Bitcode/DIGlobalVariableExpression2.ll
@@ -0,0 +1,31 @@
+; RUN: llvm-dis -o - %s.bc | FileCheck %s
+
+; CHECK: @g = common global i32 0, align 4, !dbg ![[G:[0-9]+]]
+; CHECK-DAG: ![[G]] = distinct !DIGlobalVariableExpression(var: ![[GVAR:[0-9]+]])
+; CHECK-DAG: distinct !DICompileUnit({{.*}}, globals: ![[GLOBS:[0-9]+]]
+; CHECK-DAG: ![[GLOBS]] = !{![[GEXPR:[0-9]+]]}
+; CHECK-DAG: ![[GEXPR]] = distinct !DIGlobalVariableExpression(var: ![[GVAR]])
+; CHECK-DAG: ![[GVAR]] = !DIGlobalVariable(name: "g",
+
+; Test the bitcode upgrade for DIGlobalVariable -> DIGlobalVariableExpression.
+
+; ModuleID = 'a.c'
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+@g = common global i32 0, align 4
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!6, !7, !8}
+!llvm.ident = !{!9}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.7.0 (clang-stage1-configure-RA_build 241111)", isOptimized: false, runtimeVersion: 0, emissionKind: 1, enums: !2, retainedTypes: !2, subprograms: !2, globals: !3, imports: !2)
+!1 = !DIFile(filename: "a.c", directory: "/tmp")
+!2 = !{}
+!3 = !{!4}
+!4 = !DIGlobalVariable(name: "g", scope: !0, file: !1, line: 1, type: !5, isLocal: false, isDefinition: true, variable: i32* @g)
+!5 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!6 = !{i32 2, !"Dwarf Version", i32 2}
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = !{i32 1, !"PIC Level", i32 2}
+!9 = !{!"clang version 3.7.0 (clang-stage1-configure-RA_build 241111)"}
diff --git a/test/Bitcode/DIGlobalVariableExpression2.ll.bc b/test/Bitcode/DIGlobalVariableExpression2.ll.bc
new file mode 100644
index 000000000000..5f6b398263c9
--- /dev/null
+++ b/test/Bitcode/DIGlobalVariableExpression2.ll.bc
Binary files differ
diff --git a/test/Bitcode/compatibility-3.6.ll b/test/Bitcode/compatibility-3.6.ll
index 87958fc34183..8d51ee11a209 100644
--- a/test/Bitcode/compatibility-3.6.ll
+++ b/test/Bitcode/compatibility-3.6.ll
@@ -981,7 +981,7 @@ exit:
; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
call void @f.nobuiltin() builtin
- ; CHECK: call void @f.nobuiltin() #33
+ ; CHECK: call void @f.nobuiltin() #34
call fastcc noalias i32* @f.noalias() noinline
; CHECK: call fastcc noalias i32* @f.noalias() #11
@@ -1183,7 +1183,8 @@ define void @intrinsics.codegen() {
; CHECK: attributes #30 = { argmemonly nounwind readonly }
; CHECK: attributes #31 = { argmemonly nounwind }
; CHECK: attributes #32 = { nounwind readonly }
-; CHECK: attributes #33 = { builtin }
+; CHECK: attributes #33 = { inaccessiblemem_or_argmemonly nounwind }
+; CHECK: attributes #34 = { builtin }
;; Metadata
diff --git a/test/Bitcode/compatibility-3.7.ll b/test/Bitcode/compatibility-3.7.ll
index 4ae0aed20181..ebdf4c30587c 100644
--- a/test/Bitcode/compatibility-3.7.ll
+++ b/test/Bitcode/compatibility-3.7.ll
@@ -1022,7 +1022,7 @@ exit:
; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
call void @f.nobuiltin() builtin
- ; CHECK: call void @f.nobuiltin() #36
+ ; CHECK: call void @f.nobuiltin() #37
call fastcc noalias i32* @f.noalias() noinline
; CHECK: call fastcc noalias i32* @f.noalias() #12
@@ -1246,7 +1246,8 @@ define void @misc.metadata() {
; CHECK: attributes #33 = { argmemonly nounwind readonly }
; CHECK: attributes #34 = { argmemonly nounwind }
; CHECK: attributes #35 = { nounwind readonly }
-; CHECK: attributes #36 = { builtin }
+; CHECK: attributes #36 = { inaccessiblemem_or_argmemonly nounwind }
+; CHECK: attributes #37 = { builtin }
;; Metadata
diff --git a/test/Bitcode/compatibility-3.8.ll b/test/Bitcode/compatibility-3.8.ll
index 79c1ecfac9fc..57ea3e068376 100644
--- a/test/Bitcode/compatibility-3.8.ll
+++ b/test/Bitcode/compatibility-3.8.ll
@@ -1170,7 +1170,7 @@ exit:
; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
call void @f.nobuiltin() builtin
- ; CHECK: call void @f.nobuiltin() #39
+ ; CHECK: call void @f.nobuiltin() #40
call fastcc noalias i32* @f.noalias() noinline
; CHECK: call fastcc noalias i32* @f.noalias() #12
@@ -1556,7 +1556,8 @@ normal:
; CHECK: attributes #36 = { argmemonly nounwind readonly }
; CHECK: attributes #37 = { argmemonly nounwind }
; CHECK: attributes #38 = { nounwind readonly }
-; CHECK: attributes #39 = { builtin }
+; CHECK: attributes #39 = { inaccessiblemem_or_argmemonly nounwind }
+; CHECK: attributes #40 = { builtin }
;; Metadata
diff --git a/test/Bitcode/compatibility-3.9.ll b/test/Bitcode/compatibility-3.9.ll
index 300be3324e6a..2a6cfe14cdb1 100644
--- a/test/Bitcode/compatibility-3.9.ll
+++ b/test/Bitcode/compatibility-3.9.ll
@@ -1241,7 +1241,7 @@ exit:
; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
call void @f.nobuiltin() builtin
- ; CHECK: call void @f.nobuiltin() #40
+ ; CHECK: call void @f.nobuiltin() #41
call fastcc noalias i32* @f.noalias() noinline
; CHECK: call fastcc noalias i32* @f.noalias() #12
@@ -1588,7 +1588,7 @@ normal:
}
declare void @f.writeonly() writeonly
-; CHECK: declare void @f.writeonly() #39
+; CHECK: declare void @f.writeonly() #40
; CHECK: attributes #0 = { alignstack=4 }
; CHECK: attributes #1 = { alignstack=8 }
@@ -1629,8 +1629,9 @@ declare void @f.writeonly() writeonly
; CHECK: attributes #36 = { argmemonly nounwind readonly }
; CHECK: attributes #37 = { argmemonly nounwind }
; CHECK: attributes #38 = { nounwind readonly }
-; CHECK: attributes #39 = { writeonly }
-; CHECK: attributes #40 = { builtin }
+; CHECK: attributes #39 = { inaccessiblemem_or_argmemonly nounwind }
+; CHECK: attributes #40 = { writeonly }
+; CHECK: attributes #41 = { builtin }
;; Metadata
diff --git a/test/Bitcode/compatibility-4.0.ll b/test/Bitcode/compatibility-4.0.ll
new file mode 100644
index 000000000000..c83c107a2927
--- /dev/null
+++ b/test/Bitcode/compatibility-4.0.ll
@@ -0,0 +1,1690 @@
+; Bitcode compatibility test for llvm 4.0.0
+;
+; N.b: This is 4.0-compatible IR. The CHECK lines occasionally differ from
+; the IR used to generate the bitcode, and may need to be updated.
+
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+target datalayout = "E"
+; CHECK: target datalayout = "E"
+
+target triple = "x86_64-apple-macosx10.10.0"
+; CHECK: target triple = "x86_64-apple-macosx10.10.0"
+
+;; Module-level assembly
+module asm "beep boop"
+; CHECK: module asm "beep boop"
+
+;; Comdats
+$comdat.any = comdat any
+; CHECK: $comdat.any = comdat any
+$comdat.exactmatch = comdat exactmatch
+; CHECK: $comdat.exactmatch = comdat exactmatch
+$comdat.largest = comdat largest
+; CHECK: $comdat.largest = comdat largest
+$comdat.noduplicates = comdat noduplicates
+; CHECK: $comdat.noduplicates = comdat noduplicates
+$comdat.samesize = comdat samesize
+; CHECK: $comdat.samesize = comdat samesize
+
+;; Constants
+@const.true = constant i1 true
+; CHECK: @const.true = constant i1 true
+@const.false = constant i1 false
+; CHECK: @const.false = constant i1 false
+@const.int = constant i32 zeroinitializer
+; CHECK: @const.int = constant i32 0
+@const.float = constant double 0.0
+; CHECK: @const.float = constant double 0.0
+@const.null = constant i8* null
+; CHECK: @const.null = constant i8* null
+%const.struct.type = type { i32, i8 }
+%const.struct.type.packed = type <{ i32, i8 }>
+@const.struct = constant %const.struct.type { i32 -1, i8 undef }
+; CHECK: @const.struct = constant %const.struct.type { i32 -1, i8 undef }
+@const.struct.packed = constant %const.struct.type.packed <{ i32 -1, i8 1 }>
+; CHECK: @const.struct.packed = constant %const.struct.type.packed <{ i32 -1, i8 1 }>
+
+; CHECK: @constant.array.i8 = constant [3 x i8] c"\00\01\00"
+@constant.array.i8 = constant [3 x i8] [i8 -0, i8 1, i8 0]
+; CHECK: @constant.array.i16 = constant [3 x i16] [i16 0, i16 1, i16 0]
+@constant.array.i16 = constant [3 x i16] [i16 -0, i16 1, i16 0]
+; CHECK: @constant.array.i32 = constant [3 x i32] [i32 0, i32 1, i32 0]
+@constant.array.i32 = constant [3 x i32] [i32 -0, i32 1, i32 0]
+; CHECK: @constant.array.i64 = constant [3 x i64] [i64 0, i64 1, i64 0]
+@constant.array.i64 = constant [3 x i64] [i64 -0, i64 1, i64 0]
+; CHECK: @constant.array.f16 = constant [3 x half] [half 0xH8000, half 0xH3C00, half 0xH0000]
+@constant.array.f16 = constant [3 x half] [half -0.0, half 1.0, half 0.0]
+; CHECK: @constant.array.f32 = constant [3 x float] [float -0.000000e+00, float 1.000000e+00, float 0.000000e+00]
+@constant.array.f32 = constant [3 x float] [float -0.0, float 1.0, float 0.0]
+; CHECK: @constant.array.f64 = constant [3 x double] [double -0.000000e+00, double 1.000000e+00, double 0.000000e+00]
+@constant.array.f64 = constant [3 x double] [double -0.0, double 1.0, double 0.0]
+
+; CHECK: @constant.vector.i8 = constant <3 x i8> <i8 0, i8 1, i8 0>
+@constant.vector.i8 = constant <3 x i8> <i8 -0, i8 1, i8 0>
+; CHECK: @constant.vector.i16 = constant <3 x i16> <i16 0, i16 1, i16 0>
+@constant.vector.i16 = constant <3 x i16> <i16 -0, i16 1, i16 0>
+; CHECK: @constant.vector.i32 = constant <3 x i32> <i32 0, i32 1, i32 0>
+@constant.vector.i32 = constant <3 x i32> <i32 -0, i32 1, i32 0>
+; CHECK: @constant.vector.i64 = constant <3 x i64> <i64 0, i64 1, i64 0>
+@constant.vector.i64 = constant <3 x i64> <i64 -0, i64 1, i64 0>
+; CHECK: @constant.vector.f16 = constant <3 x half> <half 0xH8000, half 0xH3C00, half 0xH0000>
+@constant.vector.f16 = constant <3 x half> <half -0.0, half 1.0, half 0.0>
+; CHECK: @constant.vector.f32 = constant <3 x float> <float -0.000000e+00, float 1.000000e+00, float 0.000000e+00>
+@constant.vector.f32 = constant <3 x float> <float -0.0, float 1.0, float 0.0>
+; CHECK: @constant.vector.f64 = constant <3 x double> <double -0.000000e+00, double 1.000000e+00, double 0.000000e+00>
+@constant.vector.f64 = constant <3 x double> <double -0.0, double 1.0, double 0.0>
+
+;; Global Variables
+; Format: [@<GlobalVarName> =] [Linkage] [Visibility] [DLLStorageClass]
+; [ThreadLocal] [(unnamed_addr|local_unnamed_addr)] [AddrSpace] [ExternallyInitialized]
+; <global | constant> <Type> [<InitializerConstant>]
+; [, section "name"] [, comdat [($name)]] [, align <Alignment>]
+
+; Global Variables -- Simple
+@g1 = global i32 0
+; CHECK: @g1 = global i32 0
+@g2 = constant i32 0
+; CHECK: @g2 = constant i32 0
+
+; Global Variables -- Linkage
+@g.private = private global i32 0
+; CHECK: @g.private = private global i32 0
+@g.internal = internal global i32 0
+; CHECK: @g.internal = internal global i32 0
+@g.available_externally = available_externally global i32 0
+; CHECK: @g.available_externally = available_externally global i32 0
+@g.linkonce = linkonce global i32 0
+; CHECK: @g.linkonce = linkonce global i32 0
+@g.weak = weak global i32 0
+; CHECK: @g.weak = weak global i32 0
+@g.common = common global i32 0
+; CHECK: @g.common = common global i32 0
+@g.appending = appending global [4 x i8] c"test"
+; CHECK: @g.appending = appending global [4 x i8] c"test"
+@g.extern_weak = extern_weak global i32
+; CHECK: @g.extern_weak = extern_weak global i32
+@g.linkonce_odr = linkonce_odr global i32 0
+; CHECK: @g.linkonce_odr = linkonce_odr global i32 0
+@g.weak_odr = weak_odr global i32 0
+; CHECK: @g.weak_odr = weak_odr global i32 0
+@g.external = external global i32
+; CHECK: @g.external = external global i32
+
+; Global Variables -- Visibility
+@g.default = default global i32 0
+; CHECK: @g.default = global i32 0
+@g.hidden = hidden global i32 0
+; CHECK: @g.hidden = hidden global i32 0
+@g.protected = protected global i32 0
+; CHECK: @g.protected = protected global i32 0
+
+; Global Variables -- DLLStorageClass
+@g.dlldefault = default global i32 0
+; CHECK: @g.dlldefault = global i32 0
+@g.dllimport = external dllimport global i32
+; CHECK: @g.dllimport = external dllimport global i32
+@g.dllexport = dllexport global i32 0
+; CHECK: @g.dllexport = dllexport global i32 0
+
+; Global Variables -- ThreadLocal
+@g.notthreadlocal = global i32 0
+; CHECK: @g.notthreadlocal = global i32 0
+@g.generaldynamic = thread_local global i32 0
+; CHECK: @g.generaldynamic = thread_local global i32 0
+@g.localdynamic = thread_local(localdynamic) global i32 0
+; CHECK: @g.localdynamic = thread_local(localdynamic) global i32 0
+@g.initialexec = thread_local(initialexec) global i32 0
+; CHECK: @g.initialexec = thread_local(initialexec) global i32 0
+@g.localexec = thread_local(localexec) global i32 0
+; CHECK: @g.localexec = thread_local(localexec) global i32 0
+
+; Global Variables -- unnamed_addr and local_unnamed_addr
+@g.unnamed_addr = unnamed_addr global i32 0
+; CHECK: @g.unnamed_addr = unnamed_addr global i32 0
+@g.local_unnamed_addr = local_unnamed_addr global i32 0
+; CHECK: @g.local_unnamed_addr = local_unnamed_addr global i32 0
+
+; Global Variables -- AddrSpace
+@g.addrspace = addrspace(1) global i32 0
+; CHECK: @g.addrspace = addrspace(1) global i32 0
+
+; Global Variables -- ExternallyInitialized
+@g.externally_initialized = external externally_initialized global i32
+; CHECK: @g.externally_initialized = external externally_initialized global i32
+
+; Global Variables -- section
+@g.section = global i32 0, section "_DATA"
+; CHECK: @g.section = global i32 0, section "_DATA"
+
+; Global Variables -- comdat
+@comdat.any = global i32 0, comdat
+; CHECK: @comdat.any = global i32 0, comdat
+@comdat.exactmatch = global i32 0, comdat
+; CHECK: @comdat.exactmatch = global i32 0, comdat
+@comdat.largest = global i32 0, comdat
+; CHECK: @comdat.largest = global i32 0, comdat
+@comdat.noduplicates = global i32 0, comdat
+; CHECK: @comdat.noduplicates = global i32 0, comdat
+@comdat.samesize = global i32 0, comdat
+; CHECK: @comdat.samesize = global i32 0, comdat
+
+; Force two globals from different comdats into sections with the same name.
+$comdat1 = comdat any
+$comdat2 = comdat any
+@g.comdat1 = global i32 0, section "SharedSection", comdat($comdat1)
+; CHECK: @g.comdat1 = global i32 0, section "SharedSection", comdat($comdat1)
+@g.comdat2 = global i32 0, section "SharedSection", comdat($comdat2)
+; CHECK: @g.comdat2 = global i32 0, section "SharedSection", comdat($comdat2)
+
+; Global Variables -- align
+@g.align = global i32 0, align 4
+; CHECK: @g.align = global i32 0, align 4
+
+; Global Variables -- Intrinsics
+%pri.func.data = type { i32, void ()*, i8* }
+@g.used1 = global i32 0
+@g.used2 = global i32 0
+@g.used3 = global i8 0
+declare void @g.f1()
+@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+
+;; Aliases
+; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
+; [unnamed_addr] alias <AliaseeTy> @<Aliasee>
+
+; Aliases -- Linkage
+@a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, i32* @g.private
+@a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, i32* @g.internal
+@a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+@a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, i32* @g.weak
+@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+@a.external = external alias i32, i32* @g1
+; CHECK: @a.external = alias i32, i32* @g1
+
+; Aliases -- Visibility
+@a.default = default alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, i32* @g.default
+@a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+@a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, i32* @g.protected
+
+; Aliases -- DLLStorageClass
+@a.dlldefault = default alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+@a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+@a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+
+; Aliases -- ThreadLocal
+@a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+
+; Aliases -- unnamed_addr and local_unnamed_addr
+@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+@a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+
+;; IFunc
+; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
+; <ResolverTy>* @<Resolver>
+
+; IFunc -- Linkage
+@ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
+; CHECK: @ifunc.external = ifunc void (), i8* ()* @ifunc_resolver
+@ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
+; CHECK: @ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
+@ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
+; CHECK: @ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
+
+; IFunc -- Visibility
+@ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
+; CHECK: @ifunc.default = ifunc void (), i8* ()* @ifunc_resolver
+@ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
+; CHECK: @ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
+@ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
+; CHECK: @ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
+
+define i8* @ifunc_resolver() {
+entry:
+ ret i8* null
+}
+
+;; Functions
+; Format: define [linkage] [visibility] [DLLStorageClass]
+; [cconv] [ret attrs]
+; <ResultType> @<FunctionName> ([argument list])
+; [(unnamed_addr|local_unnamed_addr)] [fn Attrs] [section "name"] [comdat [($name)]]
+; [align N] [gc] [prefix Constant] [prologue Constant]
+; [personality Constant] { ... }
+
+; Functions -- Simple
+declare void @f1 ()
+; CHECK: declare void @f1()
+
+define void @f2 () {
+; CHECK: define void @f2()
+entry:
+ ret void
+}
+
+; Functions -- linkage
+define private void @f.private() {
+; CHECK: define private void @f.private()
+entry:
+ ret void
+}
+define internal void @f.internal() {
+; CHECK: define internal void @f.internal()
+entry:
+ ret void
+}
+define available_externally void @f.available_externally() {
+; CHECK: define available_externally void @f.available_externally()
+entry:
+ ret void
+}
+define linkonce void @f.linkonce() {
+; CHECK: define linkonce void @f.linkonce()
+entry:
+ ret void
+}
+define weak void @f.weak() {
+; CHECK: define weak void @f.weak()
+entry:
+ ret void
+}
+define linkonce_odr void @f.linkonce_odr() {
+; CHECK: define linkonce_odr void @f.linkonce_odr()
+entry:
+ ret void
+}
+define weak_odr void @f.weak_odr() {
+; CHECK: define weak_odr void @f.weak_odr()
+entry:
+ ret void
+}
+declare external void @f.external()
+; CHECK: declare void @f.external()
+declare extern_weak void @f.extern_weak()
+; CHECK: declare extern_weak void @f.extern_weak()
+
+; Functions -- visibility
+declare default void @f.default()
+; CHECK: declare void @f.default()
+declare hidden void @f.hidden()
+; CHECK: declare hidden void @f.hidden()
+declare protected void @f.protected()
+; CHECK: declare protected void @f.protected()
+
+; Functions -- DLLStorageClass
+declare dllimport void @f.dllimport()
+; CHECK: declare dllimport void @f.dllimport()
+declare dllexport void @f.dllexport()
+; CHECK: declare dllexport void @f.dllexport()
+
+; Functions -- cconv (Calling conventions)
+declare ccc void @f.ccc()
+; CHECK: declare void @f.ccc()
+declare fastcc void @f.fastcc()
+; CHECK: declare fastcc void @f.fastcc()
+declare coldcc void @f.coldcc()
+; CHECK: declare coldcc void @f.coldcc()
+declare cc10 void @f.cc10()
+; CHECK: declare ghccc void @f.cc10()
+declare ghccc void @f.ghccc()
+; CHECK: declare ghccc void @f.ghccc()
+declare cc11 void @f.cc11()
+; CHECK: declare cc11 void @f.cc11()
+declare webkit_jscc void @f.webkit_jscc()
+; CHECK: declare webkit_jscc void @f.webkit_jscc()
+declare anyregcc void @f.anyregcc()
+; CHECK: declare anyregcc void @f.anyregcc()
+declare preserve_mostcc void @f.preserve_mostcc()
+; CHECK: declare preserve_mostcc void @f.preserve_mostcc()
+declare preserve_allcc void @f.preserve_allcc()
+; CHECK: declare preserve_allcc void @f.preserve_allcc()
+declare cc64 void @f.cc64()
+; CHECK: declare x86_stdcallcc void @f.cc64()
+declare x86_stdcallcc void @f.x86_stdcallcc()
+; CHECK: declare x86_stdcallcc void @f.x86_stdcallcc()
+declare cc65 void @f.cc65()
+; CHECK: declare x86_fastcallcc void @f.cc65()
+declare x86_fastcallcc void @f.x86_fastcallcc()
+; CHECK: declare x86_fastcallcc void @f.x86_fastcallcc()
+declare cc66 void @f.cc66()
+; CHECK: declare arm_apcscc void @f.cc66()
+declare arm_apcscc void @f.arm_apcscc()
+; CHECK: declare arm_apcscc void @f.arm_apcscc()
+declare cc67 void @f.cc67()
+; CHECK: declare arm_aapcscc void @f.cc67()
+declare arm_aapcscc void @f.arm_aapcscc()
+; CHECK: declare arm_aapcscc void @f.arm_aapcscc()
+declare cc68 void @f.cc68()
+; CHECK: declare arm_aapcs_vfpcc void @f.cc68()
+declare arm_aapcs_vfpcc void @f.arm_aapcs_vfpcc()
+; CHECK: declare arm_aapcs_vfpcc void @f.arm_aapcs_vfpcc()
+declare cc69 void @f.cc69()
+; CHECK: declare msp430_intrcc void @f.cc69()
+declare msp430_intrcc void @f.msp430_intrcc()
+; CHECK: declare msp430_intrcc void @f.msp430_intrcc()
+declare cc70 void @f.cc70()
+; CHECK: declare x86_thiscallcc void @f.cc70()
+declare x86_thiscallcc void @f.x86_thiscallcc()
+; CHECK: declare x86_thiscallcc void @f.x86_thiscallcc()
+declare cc71 void @f.cc71()
+; CHECK: declare ptx_kernel void @f.cc71()
+declare ptx_kernel void @f.ptx_kernel()
+; CHECK: declare ptx_kernel void @f.ptx_kernel()
+declare cc72 void @f.cc72()
+; CHECK: declare ptx_device void @f.cc72()
+declare ptx_device void @f.ptx_device()
+; CHECK: declare ptx_device void @f.ptx_device()
+declare cc75 void @f.cc75()
+; CHECK: declare spir_func void @f.cc75()
+declare spir_func void @f.spir_func()
+; CHECK: declare spir_func void @f.spir_func()
+declare cc76 void @f.cc76()
+; CHECK: declare spir_kernel void @f.cc76()
+declare spir_kernel void @f.spir_kernel()
+; CHECK: declare spir_kernel void @f.spir_kernel()
+declare cc77 void @f.cc77()
+; CHECK: declare intel_ocl_bicc void @f.cc77()
+declare intel_ocl_bicc void @f.intel_ocl_bicc()
+; CHECK: declare intel_ocl_bicc void @f.intel_ocl_bicc()
+declare cc78 void @f.cc78()
+; CHECK: declare x86_64_sysvcc void @f.cc78()
+declare x86_64_sysvcc void @f.x86_64_sysvcc()
+; CHECK: declare x86_64_sysvcc void @f.x86_64_sysvcc()
+declare cc79 void @f.cc79()
+; CHECK: declare x86_64_win64cc void @f.cc79()
+declare x86_64_win64cc void @f.x86_64_win64cc()
+; CHECK: declare x86_64_win64cc void @f.x86_64_win64cc()
+declare cc80 void @f.cc80()
+; CHECK: declare x86_vectorcallcc void @f.cc80()
+declare x86_vectorcallcc void @f.x86_vectorcallcc()
+; CHECK: declare x86_vectorcallcc void @f.x86_vectorcallcc()
+declare cc81 void @f.cc81()
+; CHECK: declare hhvmcc void @f.cc81()
+declare hhvmcc void @f.hhvmcc()
+; CHECK: declare hhvmcc void @f.hhvmcc()
+declare cc82 void @f.cc82()
+; CHECK: declare hhvm_ccc void @f.cc82()
+declare hhvm_ccc void @f.hhvm_ccc()
+; CHECK: declare hhvm_ccc void @f.hhvm_ccc()
+declare cc83 void @f.cc83()
+; CHECK: declare x86_intrcc void @f.cc83()
+declare x86_intrcc void @f.x86_intrcc()
+; CHECK: declare x86_intrcc void @f.x86_intrcc()
+declare cc84 void @f.cc84()
+; CHECK: declare avr_intrcc void @f.cc84()
+declare avr_intrcc void @f.avr_intrcc()
+; CHECK: declare avr_intrcc void @f.avr_intrcc()
+declare cc85 void @f.cc85()
+; CHECK: declare avr_signalcc void @f.cc85()
+declare avr_signalcc void @f.avr_signalcc()
+; CHECK: declare avr_signalcc void @f.avr_signalcc()
+declare cc87 void @f.cc87()
+; CHECK: declare amdgpu_vs void @f.cc87()
+declare amdgpu_vs void @f.amdgpu_vs()
+; CHECK: declare amdgpu_vs void @f.amdgpu_vs()
+declare cc88 void @f.cc88()
+; CHECK: declare amdgpu_gs void @f.cc88()
+declare amdgpu_gs void @f.amdgpu_gs()
+; CHECK: declare amdgpu_gs void @f.amdgpu_gs()
+declare cc89 void @f.cc89()
+; CHECK: declare amdgpu_ps void @f.cc89()
+declare amdgpu_ps void @f.amdgpu_ps()
+; CHECK: declare amdgpu_ps void @f.amdgpu_ps()
+declare cc90 void @f.cc90()
+; CHECK: declare amdgpu_cs void @f.cc90()
+declare amdgpu_cs void @f.amdgpu_cs()
+; CHECK: declare amdgpu_cs void @f.amdgpu_cs()
+declare cc91 void @f.cc91()
+; CHECK: declare amdgpu_kernel void @f.cc91()
+declare amdgpu_kernel void @f.amdgpu_kernel()
+; CHECK: declare amdgpu_kernel void @f.amdgpu_kernel()
+declare cc1023 void @f.cc1023()
+; CHECK: declare cc1023 void @f.cc1023()
+
+; Functions -- ret attrs (Return attributes)
+declare zeroext i64 @f.zeroext()
+; CHECK: declare zeroext i64 @f.zeroext()
+declare signext i64 @f.signext()
+; CHECK: declare signext i64 @f.signext()
+declare inreg i32* @f.inreg()
+; CHECK: declare inreg i32* @f.inreg()
+declare noalias i32* @f.noalias()
+; CHECK: declare noalias i32* @f.noalias()
+declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull i32* @f.nonnull()
+declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+
+; Functions -- Parameter attributes
+declare void @f.param.zeroext(i8 zeroext)
+; CHECK: declare void @f.param.zeroext(i8 zeroext)
+declare void @f.param.signext(i8 signext)
+; CHECK: declare void @f.param.signext(i8 signext)
+declare void @f.param.inreg(i8 inreg)
+; CHECK: declare void @f.param.inreg(i8 inreg)
+declare void @f.param.byval({ i8, i8 }* byval)
+; CHECK: declare void @f.param.byval({ i8, i8 }* byval)
+declare void @f.param.inalloca(i8* inalloca)
+; CHECK: declare void @f.param.inalloca(i8* inalloca)
+declare void @f.param.sret(i8* sret)
+; CHECK: declare void @f.param.sret(i8* sret)
+declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(i8* noalias)
+declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(i8* nocapture)
+declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(i8* nest)
+declare i8* @f.param.returned(i8* returned)
+; CHECK: declare i8* @f.param.returned(i8* returned)
+declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(i8* nonnull)
+declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+
+; Functions -- unnamed_addr and local_unnamed_addr
+declare void @f.unnamed_addr() unnamed_addr
+; CHECK: declare void @f.unnamed_addr() unnamed_addr
+declare void @f.local_unnamed_addr() local_unnamed_addr
+; CHECK: declare void @f.local_unnamed_addr() local_unnamed_addr
+
+; Functions -- fn Attrs (Function attributes)
+declare void @f.alignstack4() alignstack(4)
+; CHECK: declare void @f.alignstack4() #0
+declare void @f.alignstack8() alignstack(8)
+; CHECK: declare void @f.alignstack8() #1
+declare void @f.alwaysinline() alwaysinline
+; CHECK: declare void @f.alwaysinline() #2
+declare void @f.cold() cold
+; CHECK: declare void @f.cold() #3
+declare void @f.convergent() convergent
+; CHECK: declare void @f.convergent() #4
+declare void @f.inlinehint() inlinehint
+; CHECK: declare void @f.inlinehint() #5
+declare void @f.jumptable() unnamed_addr jumptable
+; CHECK: declare void @f.jumptable() unnamed_addr #6
+declare void @f.minsize() minsize
+; CHECK: declare void @f.minsize() #7
+declare void @f.naked() naked
+; CHECK: declare void @f.naked() #8
+declare void @f.nobuiltin() nobuiltin
+; CHECK: declare void @f.nobuiltin() #9
+declare void @f.noduplicate() noduplicate
+; CHECK: declare void @f.noduplicate() #10
+declare void @f.noimplicitfloat() noimplicitfloat
+; CHECK: declare void @f.noimplicitfloat() #11
+declare void @f.noinline() noinline
+; CHECK: declare void @f.noinline() #12
+declare void @f.nonlazybind() nonlazybind
+; CHECK: declare void @f.nonlazybind() #13
+declare void @f.noredzone() noredzone
+; CHECK: declare void @f.noredzone() #14
+declare void @f.noreturn() noreturn
+; CHECK: declare void @f.noreturn() #15
+declare void @f.nounwind() nounwind
+; CHECK: declare void @f.nounwind() #16
+declare void @f.optnone() noinline optnone
+; CHECK: declare void @f.optnone() #17
+declare void @f.optsize() optsize
+; CHECK: declare void @f.optsize() #18
+declare void @f.readnone() readnone
+; CHECK: declare void @f.readnone() #19
+declare void @f.readonly() readonly
+; CHECK: declare void @f.readonly() #20
+declare void @f.returns_twice() returns_twice
+; CHECK: declare void @f.returns_twice() #21
+declare void @f.safestack() safestack
+; CHECK: declare void @f.safestack() #22
+declare void @f.sanitize_address() sanitize_address
+; CHECK: declare void @f.sanitize_address() #23
+declare void @f.sanitize_memory() sanitize_memory
+; CHECK: declare void @f.sanitize_memory() #24
+declare void @f.sanitize_thread() sanitize_thread
+; CHECK: declare void @f.sanitize_thread() #25
+declare void @f.ssp() ssp
+; CHECK: declare void @f.ssp() #26
+declare void @f.sspreq() sspreq
+; CHECK: declare void @f.sspreq() #27
+declare void @f.sspstrong() sspstrong
+; CHECK: declare void @f.sspstrong() #28
+declare void @f.thunk() "thunk"
+; CHECK: declare void @f.thunk() #29
+declare void @f.uwtable() uwtable
+; CHECK: declare void @f.uwtable() #30
+declare void @f.kvpair() "cpu"="cortex-a8"
+; CHECK:declare void @f.kvpair() #31
+declare void @f.norecurse() norecurse
+; CHECK: declare void @f.norecurse() #32
+declare void @f.inaccessiblememonly() inaccessiblememonly
+; CHECK: declare void @f.inaccessiblememonly() #33
+declare void @f.inaccessiblemem_or_argmemonly() inaccessiblemem_or_argmemonly
+; CHECK: declare void @f.inaccessiblemem_or_argmemonly() #34
+
+; Functions -- section
+declare void @f.section() section "80"
+; CHECK: declare void @f.section() section "80"
+
+; Functions -- comdat
+define void @f.comdat_any() comdat($comdat.any) {
+; CHECK: define void @f.comdat_any() comdat($comdat.any)
+entry:
+ ret void
+}
+define void @f.comdat_exactmatch() comdat($comdat.exactmatch) {
+; CHECK: define void @f.comdat_exactmatch() comdat($comdat.exactmatch)
+entry:
+ ret void
+}
+define void @f.comdat_largest() comdat($comdat.largest) {
+; CHECK: define void @f.comdat_largest() comdat($comdat.largest)
+entry:
+ ret void
+}
+define void @f.comdat_noduplicates() comdat($comdat.noduplicates) {
+; CHECK: define void @f.comdat_noduplicates() comdat($comdat.noduplicates)
+entry:
+ ret void
+}
+define void @f.comdat_samesize() comdat($comdat.samesize) {
+; CHECK: define void @f.comdat_samesize() comdat($comdat.samesize)
+entry:
+ ret void
+}
+
+; Functions -- align
+declare void @f.align2() align 2
+; CHECK: declare void @f.align2() align 2
+declare void @f.align4() align 4
+; CHECK: declare void @f.align4() align 4
+declare void @f.align8() align 8
+; CHECK: declare void @f.align8() align 8
+
+; Functions -- GC
+declare void @f.gcshadow() gc "shadow-stack"
+; CHECK: declare void @f.gcshadow() gc "shadow-stack"
+
+; Functions -- Prefix data
+declare void @f.prefixi32() prefix i32 1684365668
+; CHECK: declare void @f.prefixi32() prefix i32 1684365668
+declare void @f.prefixarray() prefix [4 x i32] [i32 0, i32 1, i32 2, i32 3]
+; CHECK: declare void @f.prefixarray() prefix [4 x i32] [i32 0, i32 1, i32 2, i32 3]
+
+; Functions -- Prologue data
+declare void @f.prologuei32() prologue i32 1684365669
+; CHECK: declare void @f.prologuei32() prologue i32 1684365669
+declare void @f.prologuearray() prologue [4 x i32] [i32 0, i32 1, i32 2, i32 3]
+; CHECK: declare void @f.prologuearray() prologue [4 x i32] [i32 0, i32 1, i32 2, i32 3]
+
+; Functions -- Personality constant
+declare void @llvm.donothing() nounwind readnone
+; CHECK: declare void @llvm.donothing() #35
+define void @f.no_personality() personality i8 3 {
+; CHECK: define void @f.no_personality() personality i8 3
+ invoke void @llvm.donothing() to label %normal unwind label %exception
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+declare i32 @f.personality_handler()
+; CHECK: declare i32 @f.personality_handler()
+define void @f.personality() personality i32 ()* @f.personality_handler {
+; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+ invoke void @llvm.donothing() to label %normal unwind label %exception
+exception:
+ %cleanup = landingpad i32 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+;; Atomic Memory Ordering Constraints
+define void @atomics(i32* %word) {
+ %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 singlethread seq_cst monotonic
+ %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 singlethread monotonic
+ %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 singlethread monotonic
+ fence acquire
+ ; CHECK: fence acquire
+ fence release
+ ; CHECK: fence release
+ fence acq_rel
+ ; CHECK: fence acq_rel
+ fence singlethread seq_cst
+ ; CHECK: fence singlethread seq_cst
+
+ %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, i32* %word singlethread seq_cst, align 16
+
+ store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, i32* %word singlethread monotonic, align 4
+ ret void
+}
+
+;; Fast Math Flags
+define void @fastmathflags(float %op1, float %op2) {
+ %f.nnan = fadd nnan float %op1, %op2
+ ; CHECK: %f.nnan = fadd nnan float %op1, %op2
+ %f.ninf = fadd ninf float %op1, %op2
+ ; CHECK: %f.ninf = fadd ninf float %op1, %op2
+ %f.nsz = fadd nsz float %op1, %op2
+ ; CHECK: %f.nsz = fadd nsz float %op1, %op2
+ %f.arcp = fadd arcp float %op1, %op2
+ ; CHECK: %f.arcp = fadd arcp float %op1, %op2
+ %f.fast = fadd fast float %op1, %op2
+ ; CHECK: %f.fast = fadd fast float %op1, %op2
+ ret void
+}
+
+; Check various fast math flags and floating-point types on calls.
+
+declare float @fmf1()
+declare double @fmf2()
+declare <4 x double> @fmf3()
+
+; CHECK-LABEL: fastMathFlagsForCalls(
+define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) {
+ %call.fast = call fast float @fmf1()
+ ; CHECK: %call.fast = call fast float @fmf1()
+
+ ; Throw in some other attributes to make sure those stay in the right places.
+
+ %call.nsz.arcp = notail call nsz arcp double @fmf2()
+ ; CHECK: %call.nsz.arcp = notail call nsz arcp double @fmf2()
+
+ %call.nnan.ninf = tail call nnan ninf fastcc <4 x double> @fmf3()
+ ; CHECK: %call.nnan.ninf = tail call nnan ninf fastcc <4 x double> @fmf3()
+
+ ret void
+}
+
+;; Type System
+%opaquety = type opaque
+define void @typesystem() {
+ %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+
+ %t0 = alloca i1942652
+ ; CHECK: %t0 = alloca i1942652
+ %t1 = alloca half
+ ; CHECK: %t1 = alloca half
+ %t2 = alloca float
+ ; CHECK: %t2 = alloca float
+ %t3 = alloca double
+ ; CHECK: %t3 = alloca double
+ %t4 = alloca fp128
+ ; CHECK: %t4 = alloca fp128
+ %t5 = alloca x86_fp80
+ ; CHECK: %t5 = alloca x86_fp80
+ %t6 = alloca ppc_fp128
+ ; CHECK: %t6 = alloca ppc_fp128
+ %t7 = alloca x86_mmx
+ ; CHECK: %t7 = alloca x86_mmx
+ %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca %opaquety*
+
+ ret void
+}
+
+declare void @llvm.token(token)
+; CHECK: declare void @llvm.token(token)
+
+;; Inline Assembler Expressions
+define void @inlineasm(i32 %arg) {
+ call i32 asm "bswap $0", "=r,r"(i32 %arg)
+ ; CHECK: call i32 asm "bswap $0", "=r,r"(i32 %arg)
+ call i32 asm sideeffect "blt $1, $2, $3", "=r,r,rm"(i32 %arg, i32 %arg)
+ ; CHECK: call i32 asm sideeffect "blt $1, $2, $3", "=r,r,rm"(i32 %arg, i32 %arg)
+ ret void
+}
+
+;; Instructions
+
+; Instructions -- Terminators
+define void @instructions.terminators(i8 %val) personality i32 -10 {
+ br i1 false, label %iftrue, label %iffalse
+ ; CHECK: br i1 false, label %iftrue, label %iffalse
+ br label %iftrue
+ ; CHECK: br label %iftrue
+iftrue:
+ ret void
+ ; CHECK: ret void
+iffalse:
+
+ switch i8 %val, label %defaultdest [
+ ; CHECK: switch i8 %val, label %defaultdest [
+ i8 0, label %defaultdest.0
+ ; CHECK: i8 0, label %defaultdest.0
+ i8 1, label %defaultdest.1
+ ; CHECK: i8 1, label %defaultdest.1
+ i8 2, label %defaultdest.2
+ ; CHECK: i8 2, label %defaultdest.2
+ ]
+ ; CHECK: ]
+defaultdest:
+ ret void
+defaultdest.0:
+ ret void
+defaultdest.1:
+ ret void
+defaultdest.2:
+
+ indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+
+ invoke fastcc void @f.fastcc()
+ ; CHECK: invoke fastcc void @f.fastcc()
+ to label %defaultdest unwind label %exc
+ ; CHECK: to label %defaultdest unwind label %exc
+exc:
+ %cleanup = landingpad i32 cleanup
+
+ resume i32 undef
+ ; CHECK: resume i32 undef
+ unreachable
+ ; CHECK: unreachable
+
+ ret void
+}
+
+define i32 @instructions.win_eh.1() personality i32 -3 {
+entry:
+ %arg1 = alloca i32
+ %arg2 = alloca i32
+ invoke void @f.ccc() to label %normal unwind label %catchswitch1
+ invoke void @f.ccc() to label %normal unwind label %catchswitch2
+ invoke void @f.ccc() to label %normal unwind label %catchswitch3
+
+catchswitch1:
+ %cs1 = catchswitch within none [label %catchpad1] unwind to caller
+
+catchpad1:
+ catchpad within %cs1 []
+ br label %normal
+ ; CHECK: catchpad within %cs1 []
+ ; CHECK-NEXT: br label %normal
+
+catchswitch2:
+ %cs2 = catchswitch within none [label %catchpad2] unwind to caller
+
+catchpad2:
+ catchpad within %cs2 [i32* %arg1]
+ br label %normal
+ ; CHECK: catchpad within %cs2 [i32* %arg1]
+ ; CHECK-NEXT: br label %normal
+
+catchswitch3:
+ %cs3 = catchswitch within none [label %catchpad3] unwind label %cleanuppad1
+
+catchpad3:
+ catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ br label %normal
+ ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ ; CHECK-NEXT: br label %normal
+
+cleanuppad1:
+ %clean.1 = cleanuppad within none []
+ unreachable
+ ; CHECK: %clean.1 = cleanuppad within none []
+ ; CHECK-NEXT: unreachable
+
+normal:
+ ret i32 0
+}
+;
+define i32 @instructions.win_eh.2() personality i32 -4 {
+entry:
+ invoke void @f.ccc() to label %invoke.cont unwind label %catchswitch
+
+invoke.cont:
+ invoke void @f.ccc() to label %continue unwind label %cleanup
+
+cleanup:
+ %clean = cleanuppad within none []
+ ; CHECK: %clean = cleanuppad within none []
+ cleanupret from %clean unwind to caller
+ ; CHECK: cleanupret from %clean unwind to caller
+
+catchswitch:
+ %cs = catchswitch within none [label %catchpad] unwind label %terminate
+
+catchpad:
+ %catch = catchpad within %cs []
+ br label %body
+ ; CHECK: %catch = catchpad within %cs []
+ ; CHECK-NEXT: br label %body
+
+body:
+ invoke void @f.ccc() [ "funclet"(token %catch) ]
+ to label %continue unwind label %terminate.inner
+ catchret from %catch to label %return
+ ; CHECK: catchret from %catch to label %return
+
+return:
+ ret i32 0
+
+terminate.inner:
+ cleanuppad within %catch []
+ unreachable
+ ; CHECK: cleanuppad within %catch []
+ ; CHECK-NEXT: unreachable
+
+terminate:
+ cleanuppad within none []
+ unreachable
+ ; CHECK: cleanuppad within none []
+ ; CHECK-NEXT: unreachable
+
+continue:
+ ret i32 0
+}
+
+; Instructions -- Binary Operations
+define void @instructions.binops(i8 %op1, i8 %op2) {
+ ; nuw x nsw
+ add i8 %op1, %op2
+ ; CHECK: add i8 %op1, %op2
+ add nuw i8 %op1, %op2
+ ; CHECK: add nuw i8 %op1, %op2
+ add nsw i8 %op1, %op2
+ ; CHECK: add nsw i8 %op1, %op2
+ add nuw nsw i8 %op1, %op2
+ ; CHECK: add nuw nsw i8 %op1, %op2
+ sub i8 %op1, %op2
+ ; CHECK: sub i8 %op1, %op2
+ sub nuw i8 %op1, %op2
+ ; CHECK: sub nuw i8 %op1, %op2
+ sub nsw i8 %op1, %op2
+ ; CHECK: sub nsw i8 %op1, %op2
+ sub nuw nsw i8 %op1, %op2
+ ; CHECK: sub nuw nsw i8 %op1, %op2
+ mul i8 %op1, %op2
+ ; CHECK: mul i8 %op1, %op2
+ mul nuw i8 %op1, %op2
+ ; CHECK: mul nuw i8 %op1, %op2
+ mul nsw i8 %op1, %op2
+ ; CHECK: mul nsw i8 %op1, %op2
+ mul nuw nsw i8 %op1, %op2
+ ; CHECK: mul nuw nsw i8 %op1, %op2
+
+ ; exact
+ udiv i8 %op1, %op2
+ ; CHECK: udiv i8 %op1, %op2
+ udiv exact i8 %op1, %op2
+ ; CHECK: udiv exact i8 %op1, %op2
+ sdiv i8 %op1, %op2
+ ; CHECK: sdiv i8 %op1, %op2
+ sdiv exact i8 %op1, %op2
+ ; CHECK: sdiv exact i8 %op1, %op2
+
+ ; none
+ urem i8 %op1, %op2
+ ; CHECK: urem i8 %op1, %op2
+ srem i8 %op1, %op2
+ ; CHECK: srem i8 %op1, %op2
+
+ ret void
+}
+
+; Instructions -- Bitwise Binary Operations
+define void @instructions.bitwise_binops(i8 %op1, i8 %op2) {
+ ; nuw x nsw
+ shl i8 %op1, %op2
+ ; CHECK: shl i8 %op1, %op2
+ shl nuw i8 %op1, %op2
+ ; CHECK: shl nuw i8 %op1, %op2
+ shl nsw i8 %op1, %op2
+ ; CHECK: shl nsw i8 %op1, %op2
+ shl nuw nsw i8 %op1, %op2
+ ; CHECK: shl nuw nsw i8 %op1, %op2
+
+ ; exact
+ lshr i8 %op1, %op2
+ ; CHECK: lshr i8 %op1, %op2
+ lshr exact i8 %op1, %op2
+ ; CHECK: lshr exact i8 %op1, %op2
+ ashr i8 %op1, %op2
+ ; CHECK: ashr i8 %op1, %op2
+ ashr exact i8 %op1, %op2
+ ; CHECK: ashr exact i8 %op1, %op2
+
+ ; none
+ and i8 %op1, %op2
+ ; CHECK: and i8 %op1, %op2
+ or i8 %op1, %op2
+ ; CHECK: or i8 %op1, %op2
+ xor i8 %op1, %op2
+ ; CHECK: xor i8 %op1, %op2
+
+ ret void
+}
+
+; Instructions -- Vector Operations
+define void @instructions.vectorops(<4 x float> %vec, <4 x float> %vec2) {
+ extractelement <4 x float> %vec, i8 0
+ ; CHECK: extractelement <4 x float> %vec, i8 0
+ insertelement <4 x float> %vec, float 3.500000e+00, i8 0
+ ; CHECK: insertelement <4 x float> %vec, float 3.500000e+00, i8 0
+ shufflevector <4 x float> %vec, <4 x float> %vec2, <2 x i32> zeroinitializer
+ ; CHECK: shufflevector <4 x float> %vec, <4 x float> %vec2, <2 x i32> zeroinitializer
+
+ ret void
+}
+
+; Instructions -- Aggregate Operations
+define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
+ [3 x i8] %arr, { i8, { i32 }} %n,
+ <2 x i8*> %pvec, <2 x i64> %offsets) {
+ extractvalue { i8, i32 } %up, 0
+ ; CHECK: extractvalue { i8, i32 } %up, 0
+ extractvalue <{ i8, i32 }> %p, 1
+ ; CHECK: extractvalue <{ i8, i32 }> %p, 1
+ extractvalue [3 x i8] %arr, 2
+ ; CHECK: extractvalue [3 x i8] %arr, 2
+ extractvalue { i8, { i32 } } %n, 1, 0
+ ; CHECK: extractvalue { i8, { i32 } } %n, 1, 0
+
+ insertvalue { i8, i32 } %up, i8 1, 0
+ ; CHECK: insertvalue { i8, i32 } %up, i8 1, 0
+ insertvalue <{ i8, i32 }> %p, i32 2, 1
+ ; CHECK: insertvalue <{ i8, i32 }> %p, i32 2, 1
+ insertvalue [3 x i8] %arr, i8 0, 0
+ ; CHECK: insertvalue [3 x i8] %arr, i8 0, 0
+ insertvalue { i8, { i32 } } %n, i32 0, 1, 0
+ ; CHECK: insertvalue { i8, { i32 } } %n, i32 0, 1, 0
+
+ %up.ptr = alloca { i8, i32 }
+ %p.ptr = alloca <{ i8, i32 }>
+ %arr.ptr = alloca [3 x i8]
+ %n.ptr = alloca { i8, { i32 } }
+
+ getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+
+ ret void
+}
+
+; Instructions -- Memory Access and Addressing Operations
+!7 = !{i32 1}
+!8 = !{}
+!9 = !{i64 4}
+define void @instructions.memops(i32** %base) {
+ alloca i32, i8 4, align 4
+ ; CHECK: alloca i32, i8 4, align 4
+ alloca inalloca i32, i8 4, align 4
+ ; CHECK: alloca inalloca i32, i8 4, align 4
+
+ load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+
+ store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+
+ ret void
+}
+
+; Instructions -- Conversion Operations
+define void @instructions.conversions() {
+ trunc i32 -1 to i1
+ ; CHECK: trunc i32 -1 to i1
+ zext i32 -1 to i64
+ ; CHECK: zext i32 -1 to i64
+ sext i32 -1 to i64
+ ; CHECK: sext i32 -1 to i64
+ fptrunc float undef to half
+ ; CHECK: fptrunc float undef to half
+ fpext half undef to float
+ ; CHECK: fpext half undef to float
+ fptoui float undef to i32
+ ; CHECK: fptoui float undef to i32
+ fptosi float undef to i32
+ ; CHECK: fptosi float undef to i32
+ uitofp i32 1 to float
+ ; CHECK: uitofp i32 1 to float
+ sitofp i32 -1 to float
+ ; CHECK: sitofp i32 -1 to float
+ ptrtoint i8* null to i64
+ ; CHECK: ptrtoint i8* null to i64
+ inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to i8*
+ bitcast i32 0 to i32
+ ; CHECK: bitcast i32 0 to i32
+ addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+
+ ret void
+}
+
+; Instructions -- Other Operations
+define void @instructions.other(i32 %op1, i32 %op2, half %fop1, half %fop2) {
+entry:
+ icmp eq i32 %op1, %op2
+ ; CHECK: icmp eq i32 %op1, %op2
+ icmp ne i32 %op1, %op2
+ ; CHECK: icmp ne i32 %op1, %op2
+ icmp ugt i32 %op1, %op2
+ ; CHECK: icmp ugt i32 %op1, %op2
+ icmp uge i32 %op1, %op2
+ ; CHECK: icmp uge i32 %op1, %op2
+ icmp ult i32 %op1, %op2
+ ; CHECK: icmp ult i32 %op1, %op2
+ icmp ule i32 %op1, %op2
+ ; CHECK: icmp ule i32 %op1, %op2
+ icmp sgt i32 %op1, %op2
+ ; CHECK: icmp sgt i32 %op1, %op2
+ icmp sge i32 %op1, %op2
+ ; CHECK: icmp sge i32 %op1, %op2
+ icmp slt i32 %op1, %op2
+ ; CHECK: icmp slt i32 %op1, %op2
+ icmp sle i32 %op1, %op2
+ ; CHECK: icmp sle i32 %op1, %op2
+
+ fcmp false half %fop1, %fop2
+ ; CHECK: fcmp false half %fop1, %fop2
+ fcmp oeq half %fop1, %fop2
+ ; CHECK: fcmp oeq half %fop1, %fop2
+ fcmp ogt half %fop1, %fop2
+ ; CHECK: fcmp ogt half %fop1, %fop2
+ fcmp oge half %fop1, %fop2
+ ; CHECK: fcmp oge half %fop1, %fop2
+ fcmp olt half %fop1, %fop2
+ ; CHECK: fcmp olt half %fop1, %fop2
+ fcmp ole half %fop1, %fop2
+ ; CHECK: fcmp ole half %fop1, %fop2
+ fcmp one half %fop1, %fop2
+ ; CHECK: fcmp one half %fop1, %fop2
+ fcmp ord half %fop1, %fop2
+ ; CHECK: fcmp ord half %fop1, %fop2
+ fcmp ueq half %fop1, %fop2
+ ; CHECK: fcmp ueq half %fop1, %fop2
+ fcmp ugt half %fop1, %fop2
+ ; CHECK: fcmp ugt half %fop1, %fop2
+ fcmp uge half %fop1, %fop2
+ ; CHECK: fcmp uge half %fop1, %fop2
+ fcmp ult half %fop1, %fop2
+ ; CHECK: fcmp ult half %fop1, %fop2
+ fcmp ule half %fop1, %fop2
+ ; CHECK: fcmp ule half %fop1, %fop2
+ fcmp une half %fop1, %fop2
+ ; CHECK: fcmp une half %fop1, %fop2
+ fcmp uno half %fop1, %fop2
+ ; CHECK: fcmp uno half %fop1, %fop2
+ fcmp true half %fop1, %fop2
+ ; CHECK: fcmp true half %fop1, %fop2
+
+ br label %exit
+L1:
+ %v1 = add i32 %op1, %op2
+ br label %exit
+L2:
+ %v2 = add i32 %op1, %op2
+ br label %exit
+exit:
+ phi i32 [ %v1, %L1 ], [ %v2, %L2 ], [ %op1, %entry ]
+ ; CHECK: phi i32 [ %v1, %L1 ], [ %v2, %L2 ], [ %op1, %entry ]
+
+ select i1 true, i32 0, i32 1
+ ; CHECK: select i1 true, i32 0, i32 1
+ select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
+ ; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
+
+ call void @f.nobuiltin() builtin
+ ; CHECK: call void @f.nobuiltin() #41
+
+ call fastcc noalias i32* @f.noalias() noinline
+ ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ tail call ghccc nonnull i32* @f.nonnull() minsize
+ ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+
+ ret void
+}
+
+define void @instructions.call_musttail(i8* inalloca %val) {
+ musttail call void @f.param.inalloca(i8* inalloca %val)
+ ; CHECK: musttail call void @f.param.inalloca(i8* inalloca %val)
+
+ ret void
+}
+
+define void @instructions.call_notail() {
+ notail call void @f1()
+ ; CHECK: notail call void @f1()
+
+ ret void
+}
+
+define void @instructions.landingpad() personality i32 -2 {
+ invoke void @llvm.donothing() to label %proceed unwind label %catch1
+ invoke void @llvm.donothing() to label %proceed unwind label %catch2
+ invoke void @llvm.donothing() to label %proceed unwind label %catch3
+ invoke void @llvm.donothing() to label %proceed unwind label %catch4
+
+catch1:
+ landingpad i32
+ ; CHECK: landingpad i32
+ cleanup
+ ; CHECK: cleanup
+ br label %proceed
+
+catch2:
+ landingpad i32
+ ; CHECK: landingpad i32
+ cleanup
+ ; CHECK: cleanup
+ catch i32* null
+ ; CHECK: catch i32* null
+ br label %proceed
+
+catch3:
+ landingpad i32
+ ; CHECK: landingpad i32
+ cleanup
+ ; CHECK: cleanup
+ catch i32* null
+ ; CHECK: catch i32* null
+ catch i32* null
+ ; CHECK: catch i32* null
+ br label %proceed
+
+catch4:
+ landingpad i32
+ ; CHECK: landingpad i32
+ filter [2 x i32] zeroinitializer
+ ; CHECK: filter [2 x i32] zeroinitializer
+ br label %proceed
+
+proceed:
+ ret void
+}
+
+;; Intrinsic Functions
+
+; Intrinsic Functions -- Variable Argument Handling
+declare void @llvm.va_start(i8*)
+declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_end(i8*)
+define void @instructions.va_arg(i8* %v, ...) {
+ %ap = alloca i8*
+ %ap2 = bitcast i8** %ap to i8*
+
+ call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(i8* %ap2)
+
+ va_arg i8* %ap2, i32
+ ; CHECK: va_arg i8* %ap2, i32
+
+ call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+
+ call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(i8* %ap2)
+
+ ret void
+}
+
+; Intrinsic Functions -- Accurate Garbage Collection
+declare void @llvm.gcroot(i8**, i8*)
+declare i8* @llvm.gcread(i8*, i8**)
+declare void @llvm.gcwrite(i8*, i8*, i8**)
+define void @intrinsics.gc() gc "shadow-stack" {
+ %ptrloc = alloca i8*
+ call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+
+ call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+
+ %ref = alloca i8
+ call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+
+ ret void
+}
+
+; Intrinsic Functions -- Code Generation
+declare i8* @llvm.returnaddress(i32)
+declare i8* @llvm.frameaddress(i32)
+declare i32 @llvm.read_register.i32(metadata)
+declare i64 @llvm.read_register.i64(metadata)
+declare void @llvm.write_register.i32(metadata, i32)
+declare void @llvm.write_register.i64(metadata, i64)
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.pcmarker(i32)
+declare i64 @llvm.readcyclecounter()
+declare void @llvm.clear_cache(i8*, i8*)
+declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
+
+!10 = !{!"rax"}
+define void @intrinsics.codegen() {
+ call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ call i8* @llvm.frameaddress(i32 1)
+ ; CHECK: call i8* @llvm.frameaddress(i32 1)
+
+ call i32 @llvm.read_register.i32(metadata !10)
+ ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
+ call i64 @llvm.read_register.i64(metadata !10)
+ ; CHECK: call i64 @llvm.read_register.i64(metadata !10)
+ call void @llvm.write_register.i32(metadata !10, i32 0)
+ ; CHECK: call void @llvm.write_register.i32(metadata !10, i32 0)
+ call void @llvm.write_register.i64(metadata !10, i64 0)
+ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
+
+ %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call i8* @llvm.stacksave()
+ call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(i8* %stack)
+
+ call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
+
+ call void @llvm.pcmarker(i32 1)
+ ; CHECK: call void @llvm.pcmarker(i32 1)
+
+ call i64 @llvm.readcyclecounter()
+ ; CHECK: call i64 @llvm.readcyclecounter()
+
+ call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+
+ call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+
+ ret void
+}
+
+declare void @llvm.localescape(...)
+declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
+define void @intrinsics.localescape() {
+ %static.alloca = alloca i32
+ call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+
+ call void @intrinsics.localrecover()
+
+ ret void
+}
+define void @intrinsics.localrecover() {
+ %func = bitcast void ()* @intrinsics.localescape to i8*
+ %fp = call i8* @llvm.frameaddress(i32 1)
+ call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+
+ ret void
+}
+
+; We need this function to provide `uses' for some metadata tests.
+define void @misc.metadata() {
+ call void @f1(), !srcloc !11
+ call void @f1(), !srcloc !12
+ call void @f1(), !srcloc !13
+ call void @f1(), !srcloc !14
+ ret void
+}
+
+declare void @op_bundle_callee_0()
+declare void @op_bundle_callee_1(i32,i32)
+
+define void @call_with_operand_bundle0(i32* %ptr) {
+; CHECK-LABEL: call_with_operand_bundle0(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ]
+; CHECK: call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ]
+ ret void
+}
+
+define void @call_with_operand_bundle1(i32* %ptr) {
+; CHECK-LABEL: call_with_operand_bundle1(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+
+ call void @op_bundle_callee_0()
+ call void @op_bundle_callee_0() [ "foo"() ]
+ call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ]
+; CHECK: @op_bundle_callee_0(){{$}}
+; CHECK-NEXT: call void @op_bundle_callee_0() [ "foo"() ]
+; CHECK-NEXT: call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ]
+ ret void
+}
+
+define void @call_with_operand_bundle2(i32* %ptr) {
+; CHECK-LABEL: call_with_operand_bundle2(
+ entry:
+ call void @op_bundle_callee_0() [ "foo"() ]
+; CHECK: call void @op_bundle_callee_0() [ "foo"() ]
+ ret void
+}
+
+define void @call_with_operand_bundle3(i32* %ptr) {
+; CHECK-LABEL: call_with_operand_bundle3(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+; CHECK: call void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+ ret void
+}
+
+define void @call_with_operand_bundle4(i32* %ptr) {
+; CHECK-LABEL: call_with_operand_bundle4(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ call void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+; CHECK: call void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+ ret void
+}
+
+; Invoke versions of the above tests:
+
+
+define void @invoke_with_operand_bundle0(i32* %ptr) personality i8 3 {
+; CHECK-LABEL: @invoke_with_operand_bundle0(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ] to label %normal unwind label %exception
+; CHECK: invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "bar"(float 0.000000e+00, i64 100, i32 %l) ]
+
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+define void @invoke_with_operand_bundle1(i32* %ptr) personality i8 3 {
+; CHECK-LABEL: @invoke_with_operand_bundle1(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+
+ invoke void @op_bundle_callee_0() to label %normal unwind label %exception
+; CHECK: invoke void @op_bundle_callee_0(){{$}}
+
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+
+normal:
+ invoke void @op_bundle_callee_0() [ "foo"() ] to label %normal1 unwind label %exception1
+; CHECK: invoke void @op_bundle_callee_0() [ "foo"() ]
+
+exception1:
+ %cleanup1 = landingpad i8 cleanup
+ br label %normal1
+
+normal1:
+ invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] to label %normal2 unwind label %exception2
+; CHECK: invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+
+exception2:
+ %cleanup2 = landingpad i8 cleanup
+ br label %normal2
+
+normal2:
+ ret void
+}
+
+define void @invoke_with_operand_bundle2(i32* %ptr) personality i8 3 {
+; CHECK-LABEL: @invoke_with_operand_bundle2(
+ entry:
+ invoke void @op_bundle_callee_0() [ "foo"() ] to label %normal unwind label %exception
+; CHECK: invoke void @op_bundle_callee_0() [ "foo"() ]
+
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+define void @invoke_with_operand_bundle3(i32* %ptr) personality i8 3 {
+; CHECK-LABEL: @invoke_with_operand_bundle3(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ] to label %normal unwind label %exception
+; CHECK: invoke void @op_bundle_callee_0() [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+define void @invoke_with_operand_bundle4(i32* %ptr) personality i8 3 {
+; CHECK-LABEL: @invoke_with_operand_bundle4(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ invoke void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+ to label %normal unwind label %exception
+; CHECK: invoke void @op_bundle_callee_1(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+declare void @vaargs_func(...)
+define void @invoke_with_operand_bundle_vaarg(i32* %ptr) personality i8 3 {
+; CHECK-LABEL: @invoke_with_operand_bundle_vaarg(
+ entry:
+ %l = load i32, i32* %ptr
+ %x = add i32 42, 1
+ invoke void (...) @vaargs_func(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+ to label %normal unwind label %exception
+; CHECK: invoke void (...) @vaargs_func(i32 10, i32 %x) [ "foo"(i32 42, i64 100, i32 %x), "foo"(i32 42, float 0.000000e+00, i32 %l) ]
+
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+
+declare void @f.writeonly() writeonly
+; CHECK: declare void @f.writeonly() #40
+
+;; Constant Expressions
+
+define i8** @constexpr() {
+ ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+ ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+}
+
+; CHECK: attributes #0 = { alignstack=4 }
+; CHECK: attributes #1 = { alignstack=8 }
+; CHECK: attributes #2 = { alwaysinline }
+; CHECK: attributes #3 = { cold }
+; CHECK: attributes #4 = { convergent }
+; CHECK: attributes #5 = { inlinehint }
+; CHECK: attributes #6 = { jumptable }
+; CHECK: attributes #7 = { minsize }
+; CHECK: attributes #8 = { naked }
+; CHECK: attributes #9 = { nobuiltin }
+; CHECK: attributes #10 = { noduplicate }
+; CHECK: attributes #11 = { noimplicitfloat }
+; CHECK: attributes #12 = { noinline }
+; CHECK: attributes #13 = { nonlazybind }
+; CHECK: attributes #14 = { noredzone }
+; CHECK: attributes #15 = { noreturn }
+; CHECK: attributes #16 = { nounwind }
+; CHECK: attributes #17 = { noinline optnone }
+; CHECK: attributes #18 = { optsize }
+; CHECK: attributes #19 = { readnone }
+; CHECK: attributes #20 = { readonly }
+; CHECK: attributes #21 = { returns_twice }
+; CHECK: attributes #22 = { safestack }
+; CHECK: attributes #23 = { sanitize_address }
+; CHECK: attributes #24 = { sanitize_memory }
+; CHECK: attributes #25 = { sanitize_thread }
+; CHECK: attributes #26 = { ssp }
+; CHECK: attributes #27 = { sspreq }
+; CHECK: attributes #28 = { sspstrong }
+; CHECK: attributes #29 = { "thunk" }
+; CHECK: attributes #30 = { uwtable }
+; CHECK: attributes #31 = { "cpu"="cortex-a8" }
+; CHECK: attributes #32 = { norecurse }
+; CHECK: attributes #33 = { inaccessiblememonly }
+; CHECK: attributes #34 = { inaccessiblemem_or_argmemonly }
+; CHECK: attributes #35 = { nounwind readnone }
+; CHECK: attributes #36 = { argmemonly nounwind readonly }
+; CHECK: attributes #37 = { argmemonly nounwind }
+; CHECK: attributes #38 = { nounwind readonly }
+; CHECK: attributes #39 = { inaccessiblemem_or_argmemonly nounwind }
+; CHECK: attributes #40 = { writeonly }
+; CHECK: attributes #41 = { builtin }
+
+;; Metadata
+
+; Metadata -- Module flags
+!llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+; CHECK: !llvm.module.flags = !{!0, !1, !2, !4, !5, !6}
+
+!0 = !{i32 1, !"mod1", i32 0}
+; CHECK: !0 = !{i32 1, !"mod1", i32 0}
+!1 = !{i32 2, !"mod2", i32 0}
+; CHECK: !1 = !{i32 2, !"mod2", i32 0}
+!2 = !{i32 3, !"mod3", !3}
+; CHECK: !2 = !{i32 3, !"mod3", !3}
+!3 = !{!"mod6", !0}
+; CHECK: !3 = !{!"mod6", !0}
+!4 = !{i32 4, !"mod4", i32 0}
+; CHECK: !4 = !{i32 4, !"mod4", i32 0}
+!5 = !{i32 5, !"mod5", !0}
+; CHECK: !5 = !{i32 5, !"mod5", !0}
+!6 = !{i32 6, !"mod6", !0}
+; CHECK: !6 = !{i32 6, !"mod6", !0}
+
+; Metadata -- Check `distinct'
+!11 = distinct !{}
+; CHECK: !11 = distinct !{}
+!12 = distinct !{}
+; CHECK: !12 = distinct !{}
+!13 = !{!11}
+; CHECK: !13 = !{!11}
+!14 = !{!12}
+; CHECK: !14 = !{!12}
diff --git a/test/Bitcode/compatibility-4.0.ll.bc b/test/Bitcode/compatibility-4.0.ll.bc
new file mode 100644
index 000000000000..a2988ff95402
--- /dev/null
+++ b/test/Bitcode/compatibility-4.0.ll.bc
Binary files differ
diff --git a/test/Bitcode/compatibility.ll b/test/Bitcode/compatibility.ll
index e2b13f47d3b0..b1f52bbe059f 100644
--- a/test/Bitcode/compatibility.ll
+++ b/test/Bitcode/compatibility.ll
@@ -760,6 +760,8 @@ define void @fastmathflags(float %op1, float %op2) {
; CHECK: %f.nsz = fadd nsz float %op1, %op2
%f.arcp = fadd arcp float %op1, %op2
; CHECK: %f.arcp = fadd arcp float %op1, %op2
+ %f.contract = fadd contract float %op1, %op2
+ ; CHECK: %f.contract = fadd contract float %op1, %op2
%f.fast = fadd fast float %op1, %op2
; CHECK: %f.fast = fadd fast float %op1, %op2
ret void
@@ -1244,7 +1246,7 @@ exit:
; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2>
call void @f.nobuiltin() builtin
- ; CHECK: call void @f.nobuiltin() #40
+ ; CHECK: call void @f.nobuiltin() #41
call fastcc noalias i32* @f.noalias() noinline
; CHECK: call fastcc noalias i32* @f.noalias() #12
@@ -1609,7 +1611,7 @@ normal:
declare void @f.writeonly() writeonly
-; CHECK: declare void @f.writeonly() #39
+; CHECK: declare void @f.writeonly() #40
;; Constant Expressions
@@ -1657,8 +1659,9 @@ define i8** @constexpr() {
; CHECK: attributes #36 = { argmemonly nounwind readonly }
; CHECK: attributes #37 = { argmemonly nounwind }
; CHECK: attributes #38 = { nounwind readonly }
-; CHECK: attributes #39 = { writeonly }
-; CHECK: attributes #40 = { builtin }
+; CHECK: attributes #39 = { inaccessiblemem_or_argmemonly nounwind }
+; CHECK: attributes #40 = { writeonly }
+; CHECK: attributes #41 = { builtin }
;; Metadata
diff --git a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
index 9e6e72cda3aa..3a5adea202e2 100644
--- a/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
+++ b/test/Bitcode/thinlto-function-summary-callgraph-profile-summary.ll
@@ -10,7 +10,7 @@
; CHECK-NEXT: <VERSION
; See if the call to func is registered, using the expected callsite count
; and profile count, with value id matching the subsequent value symbol table.
-; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=[[HOT1:.*]] op5=3 op6=[[COLD:.*]] op7=1 op8=[[HOT2:.*]] op9=3 op10=[[NONE1:.*]] op11=2 op12=[[HOT3:.*]] op13=3 op14=[[NONE2:.*]] op15=2 op16=[[NONE3:.*]] op17=2/>
+; CHECK-NEXT: <PERMODULE_PROFILE {{.*}} op4=[[HOT1:.*]] op5=3 op6=[[COLD:.*]] op7=1 op8=[[HOT2:.*]] op9=3 op10=[[HOT4:.*]] op11=3 op12=[[NONE1:.*]] op13=2 op14=[[HOT3:.*]] op15=3 op16=[[NONE2:.*]] op17=2 op18=[[NONE3:.*]] op19=2 op20=[[LEGACY:.*]] op21=3/>
; CHECK-NEXT: </GLOBALVAL_SUMMARY_BLOCK>
; CHECK-LABEL: <VALUE_SYMTAB
; CHECK-NEXT: <FNENTRY {{.*}} record string = 'hot_function
@@ -21,6 +21,8 @@
; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT1]] {{.*}} record string = 'hot1'
; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT2]] {{.*}} record string = 'hot2'
; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT3]] {{.*}} record string = 'hot3'
+; CHECK-DAG: <ENTRY abbrevid=6 op0=[[HOT4]] {{.*}} record string = 'hot4'
+; CHECK-DAG: <COMBINED_ENTRY abbrevid=11 op0=[[LEGACY]] op1=123/>
; CHECK-LABEL: </VALUE_SYMTAB>
; COMBINED: <GLOBALVAL_SUMMARY_BLOCK
@@ -48,6 +50,7 @@ entry:
Cold: ; 1/1000 goes here
call void @cold()
call void @hot2()
+ call void @hot4(), !prof !15
call void @none1()
br label %exit
Hot: ; 999/1000 goes here
@@ -68,6 +71,7 @@ exit:
declare void @hot1() #1
declare void @hot2() #1
declare void @hot3() #1
+declare void @hot4() #1
declare void @cold() #1
declare void @none1() #1
declare void @none2() #1
@@ -80,7 +84,7 @@ declare void @none3() #1
!llvm.module.flags = !{!1}
-!20 = !{!"function_entry_count", i64 110}
+!20 = !{!"function_entry_count", i64 110, i64 123}
!1 = !{i32 1, !"ProfileSummary", !2}
!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
@@ -96,3 +100,4 @@ declare void @none3() #1
!12 = !{i32 10000, i64 100, i32 1}
!13 = !{i32 999000, i64 100, i32 1}
!14 = !{i32 999999, i64 1, i32 2}
+!15 = !{!"branch_weights", i32 100}
diff --git a/test/Bitcode/thinlto-function-summary.ll b/test/Bitcode/thinlto-function-summary.ll
index 594aaab566d1..ff61b7713f0f 100644
--- a/test/Bitcode/thinlto-function-summary.ll
+++ b/test/Bitcode/thinlto-function-summary.ll
@@ -17,7 +17,7 @@
; BC-NEXT: <FNENTRY {{.*}} op0=4 {{.*}}> record string = 'variadic'
; BC-NEXT: <FNENTRY {{.*}} op0=1 {{.*}}> record string = 'foo'
; BC-NEXT: <FNENTRY {{.*}} op0=2 {{.*}}> record string = 'bar'
-; BC-NEXT: <FNENTRY {{.*}} op0=5 {{.*}}> record string = 'f'
+; BC-NEXT: <ENTRY {{.*}} op0=5 {{.*}}> record string = 'f'
; BC-NEXT: <ENTRY {{.*}} record string = 'h'
; BC-NEXT: <FNENTRY {{.*}} op0=3 {{.*}}> record string = 'anon.
diff --git a/test/Bitcode/thinlto-type-vcalls.ll b/test/Bitcode/thinlto-type-vcalls.ll
new file mode 100644
index 000000000000..40d229d12148
--- /dev/null
+++ b/test/Bitcode/thinlto-type-vcalls.ll
@@ -0,0 +1,105 @@
+; RUN: opt -module-summary %s -o %t.o
+; RUN: llvm-bcanalyzer -dump %t.o | FileCheck %s
+; RUN: llvm-lto -thinlto -o %t2 %t.o
+; RUN: llvm-bcanalyzer -dump %t2.thinlto.bc | FileCheck --check-prefix=COMBINED %s
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+; COMBINED: <TYPE_TEST_ASSUME_VCALLS op0=6699318081062747564 op1=16/>
+; COMBINED-NEXT: <COMBINED
+; COMBINED-NEXT: <TYPE_CHECKED_LOAD_VCALLS op0=6699318081062747564 op1=16/>
+; COMBINED-NEXT: <COMBINED
+; COMBINED-NEXT: <TYPE_TEST_ASSUME_VCALLS op0=6699318081062747564 op1=24 op2=-2012135647395072713 op3=32/>
+; COMBINED-NEXT: <COMBINED
+; COMBINED-NEXT: <TYPE_TEST_ASSUME_CONST_VCALL op0=6699318081062747564 op1=16 op2=42/>
+; COMBINED-NEXT: <TYPE_TEST_ASSUME_CONST_VCALL op0=6699318081062747564 op1=24 op2=43/>
+; COMBINED-NEXT: <COMBINED
+; COMBINED-NEXT: <TYPE_CHECKED_LOAD_CONST_VCALL op0=6699318081062747564 op1=16 op2=42/>
+; COMBINED-NEXT: <COMBINED
+; COMBINED-NEXT: <TYPE_TESTS op0=7546896869197086323/>
+; COMBINED-NEXT: <COMBINED
+
+; CHECK: <TYPE_TEST_ASSUME_VCALLS op0=6699318081062747564 op1=16/>
+define void @f1([3 x i8*]* %vtable) {
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"foo")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 2
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to void (i8*, i32)*
+ call void %fptr_casted(i8* null, i32 undef)
+ ret void
+}
+
+; CHECK: <TYPE_TEST_ASSUME_VCALLS op0=6699318081062747564 op1=24 op2=-2012135647395072713 op3=32/>
+define void @f2([3 x i8*]* %vtable, [3 x i8*]* %vtable2) {
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"foo")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 3
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to void (i8*, i32)*
+ call void %fptr_casted(i8* null, i32 undef)
+
+ %vtablei82 = bitcast [3 x i8*]* %vtable2 to i8*
+ %p2 = call i1 @llvm.type.test(i8* %vtablei82, metadata !"bar")
+ call void @llvm.assume(i1 %p2)
+ %fptrptr2 = getelementptr [3 x i8*], [3 x i8*]* %vtable2, i32 0, i32 4
+ %fptr2 = load i8*, i8** %fptrptr2
+ %fptr_casted2 = bitcast i8* %fptr2 to void (i8*, i128)*
+ call void %fptr_casted2(i8* null, i128 0)
+
+ ret void
+}
+
+; CHECK: <TYPE_CHECKED_LOAD_VCALLS op0=6699318081062747564 op1=16/>
+define void @f3(i8* %vtable) {
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtable, i32 16, metadata !"foo")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %fptr_casted = bitcast i8* %fptr to void (i8*, i32)*
+ call void %fptr_casted(i8* null, i32 undef)
+ ret void
+}
+
+; CHECK: <TYPE_TEST_ASSUME_CONST_VCALL op0=6699318081062747564 op1=16 op2=42/>
+; CHECK-NEXT: <TYPE_TEST_ASSUME_CONST_VCALL op0=6699318081062747564 op1=24 op2=43/>
+define void @f4([3 x i8*]* %vtable, [3 x i8*]* %vtable2) {
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"foo")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 2
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to void (i8*, i32)*
+ call void %fptr_casted(i8* null, i32 42)
+
+ %vtablei82 = bitcast [3 x i8*]* %vtable2 to i8*
+ %p2 = call i1 @llvm.type.test(i8* %vtablei82, metadata !"foo")
+ call void @llvm.assume(i1 %p2)
+ %fptrptr2 = getelementptr [3 x i8*], [3 x i8*]* %vtable2, i32 0, i32 3
+ %fptr2 = load i8*, i8** %fptrptr2
+ %fptr_casted2 = bitcast i8* %fptr2 to void (i8*, i32)*
+ call void %fptr_casted2(i8* null, i32 43)
+ ret void
+}
+
+; CHECK: <TYPE_CHECKED_LOAD_CONST_VCALL op0=6699318081062747564 op1=16 op2=42/>
+define void @f5(i8* %vtable) {
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtable, i32 16, metadata !"foo")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %fptr_casted = bitcast i8* %fptr to void (i8*, i32)*
+ call void %fptr_casted(i8* null, i32 42)
+ ret void
+}
+
+; CHECK-NOT: <TYPE_CHECKED_LOAD_CONST_VCALL op0=7546896869197086323
+; CHECK: <TYPE_TESTS op0=7546896869197086323/>
+; CHECK-NOT: <TYPE_CHECKED_LOAD_CONST_VCALL op0=7546896869197086323
+define {i8*, i1} @f6(i8* %vtable) {
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtable, i32 16, metadata !"baz")
+ ret {i8*, i1} %pair
+}
+
+declare i1 @llvm.type.test(i8*, metadata) nounwind readnone
+declare void @llvm.assume(i1)
+declare {i8*, i1} @llvm.type.checked.load(i8*, i32, metadata)
diff --git a/test/Bitcode/upgrade-debug-info-for-profiling.ll b/test/Bitcode/upgrade-debug-info-for-profiling.ll
new file mode 100644
index 000000000000..d50f87fa6200
--- /dev/null
+++ b/test/Bitcode/upgrade-debug-info-for-profiling.ll
@@ -0,0 +1,10 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+; RUN: verify-uselistorder < %s.bc
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+
+!llvm.dbg.cu = !{!1}
+; CHECK: DICompileUnit(language: DW_LANG_C99, file: !{{[0-9]+}}, isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2, emissionKind: FullDebug)
+!2 = !DIFile(filename: "foo.c", directory: "/path/to/dir")
diff --git a/test/Bitcode/upgrade-debug-info-for-profiling.ll.bc b/test/Bitcode/upgrade-debug-info-for-profiling.ll.bc
new file mode 100644
index 000000000000..2bb54e9b3c55
--- /dev/null
+++ b/test/Bitcode/upgrade-debug-info-for-profiling.ll.bc
Binary files differ
diff --git a/test/Bitcode/upgrade-pointer-address-space.ll b/test/Bitcode/upgrade-pointer-address-space.ll
new file mode 100644
index 000000000000..8b85055651d3
--- /dev/null
+++ b/test/Bitcode/upgrade-pointer-address-space.ll
@@ -0,0 +1,5 @@
+; RUN: llvm-dis -o - %s.bc | FileCheck %s
+
+; CHECK-DAG: !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !{{[0-9]+}}, size: {{[0-9]+}})
+; CHECK-DAG: !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !{{[0-9]+}}, size: {{[0-9]+}})
+; CHECK-DAG: !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !{{[0-9]+}}, size: {{[0-9]+}})
diff --git a/test/Bitcode/upgrade-pointer-address-space.ll.bc b/test/Bitcode/upgrade-pointer-address-space.ll.bc
new file mode 100644
index 000000000000..0ad735e2430b
--- /dev/null
+++ b/test/Bitcode/upgrade-pointer-address-space.ll.bc
Binary files differ
diff --git a/test/BugPoint/compile-custom.ll b/test/BugPoint/compile-custom.ll
index d152f08626f8..847d1184f016 100755
--- a/test/BugPoint/compile-custom.ll
+++ b/test/BugPoint/compile-custom.ll
@@ -1,4 +1,4 @@
-; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext --compile-custom --compile-command="%python %s.py arg1 arg2" --output-prefix %t %s | FileCheck %s
+; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext --compile-custom --compile-command="%python %/s.py arg1 arg2" --output-prefix %t %s | FileCheck %s
; REQUIRES: loadable_module
; Test that arguments are correctly passed in --compile-command. The output
diff --git a/test/BugPoint/invalid-debuginfo.ll b/test/BugPoint/invalid-debuginfo.ll
index 91b01493d1f9..2005a13b6757 100644
--- a/test/BugPoint/invalid-debuginfo.ll
+++ b/test/BugPoint/invalid-debuginfo.ll
@@ -1,6 +1,6 @@
; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crash-too-many-cus -silence-passes 2>&1 | FileCheck %s
; REQUIRES: loadable_module
-; CHECK: All DICompileUnits must be listed in llvm.dbg.cu
+; CHECK: DICompileUnit not listed in llvm.dbg.cu
; When bugpoint hacks at this testcase it will at one point create illegal IR
; that won't even pass the Verifier. A bugpoint *driver* built with assertions
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll b/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll
new file mode 100644
index 000000000000..a70cee0efcb6
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-apple-ios9.0"
+
+; CHECK-LABEL: name: test_varargs
+; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT i32 42
+; CHECK: [[D_ONE:%[0-9]+]](s64) = G_FCONSTANT double 1.000000e+00
+; CHECK: [[TWELVE:%[0-9]+]](s64) = G_CONSTANT i64 12
+; CHECK: [[THREE:%[0-9]+]](s8) = G_CONSTANT i8 3
+; CHECK: [[ONE:%[0-9]+]](s16) = G_CONSTANT i16 1
+; CHECK: [[FOUR:%[0-9]+]](s32) = G_CONSTANT i32 4
+; CHECK: [[F_ONE:%[0-9]+]](s32) = G_FCONSTANT float 1.000000e+00
+; CHECK: [[TWO:%[0-9]+]](s64) = G_FCONSTANT double 2.000000e+00
+
+; CHECK: %w0 = COPY [[ANSWER]]
+; CHECK: %d0 = COPY [[D_ONE]]
+; CHECK: %x1 = COPY [[TWELVE]]
+; CHECK: G_STORE [[THREE]](s8), {{%[0-9]+}}(p0) :: (store 1 into stack, align 0)
+; CHECK: G_STORE [[ONE]](s16), {{%[0-9]+}}(p0) :: (store 2 into stack + 8, align 0)
+; CHECK: G_STORE [[FOUR]](s32), {{%[0-9]+}}(p0) :: (store 4 into stack + 16, align 0)
+; CHECK: G_STORE [[F_ONE]](s32), {{%[0-9]+}}(p0) :: (store 4 into stack + 24, align 0)
+; CHECK: G_STORE [[TWO]](s64), {{%[0-9]+}}(p0) :: (store 8 into stack + 32, align 0)
+declare void @varargs(i32, double, i64, ...)
+define void @test_varargs() {
+ call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i8 3, i16 1, i32 4, float 1.0, double 2.0)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll b/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
index 95b2ea2b4ffc..59b9bb49f0ee 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
@@ -56,3 +56,41 @@ define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
define [1 x double] @args_arr([1 x double] %d0) {
ret [1 x double] %d0
}
+
+; CHECK-LABEL: name: test_varargs
+; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT i32 42
+; CHECK: [[D_ONE:%[0-9]+]](s64) = G_FCONSTANT double 1.000000e+00
+; CHECK: [[TWELVE:%[0-9]+]](s64) = G_CONSTANT i64 12
+; CHECK: [[THREE:%[0-9]+]](s8) = G_CONSTANT i8 3
+; CHECK: [[ONE:%[0-9]+]](s16) = G_CONSTANT i16 1
+; CHECK: [[FOUR:%[0-9]+]](s32) = G_CONSTANT i32 4
+; CHECK: [[F_ONE:%[0-9]+]](s32) = G_FCONSTANT float 1.000000e+00
+; CHECK: [[TWO:%[0-9]+]](s64) = G_FCONSTANT double 2.000000e+00
+
+; CHECK: %w0 = COPY [[ANSWER]]
+; CHECK: %d0 = COPY [[D_ONE]]
+; CHECK: %x1 = COPY [[TWELVE]]
+; CHECK: %w2 = COPY [[THREE]](s8)
+; CHECK: %w3 = COPY [[ONE]](s16)
+; CHECK: %w4 = COPY [[FOUR]](s32)
+; CHECK: %s1 = COPY [[F_ONE]](s32)
+; CHECK: %d2 = COPY [[TWO]](s64)
+declare void @varargs(i32, double, i64, ...)
+define void @test_varargs() {
+ call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i8 3, i16 1, i32 4, float 1.0, double 2.0)
+ ret void
+}
+
+; signext/zeroext parameters on the stack: not part of any real ABI as far as I
+; know, but ELF currently allocates 8 bytes for a signext parameter on the
+; stack. The ADJCALLSTACK ops should reflect this, even if the difference is
+; theoretical.
+declare void @stack_ext_needed([8 x i64], i8 signext %in)
+; CHECK-LABEL: name: test_stack_ext_needed
+; CHECK: ADJCALLSTACKDOWN 8
+; CHECK: BL @stack_ext_needed
+; CHECK: ADJCALLSTACKUP 8
+define void @test_stack_ext_needed() {
+ call void @stack_ext_needed([8 x i64] undef, i8 signext 42)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 8d1dbc246e6a..e40199d82c9d 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefix=ERROR
; RUN: llc -O0 -global-isel -global-isel-abort=0 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefix=FALLBACK
-; RUN: llc -O0 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o %t.out 2> %t.err
+; RUN: llc -O0 -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -verify-machineinstrs %s -o %t.out 2> %t.err
; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
; This file checks that the fallback path to selection dag works.
@@ -14,10 +14,11 @@ target triple = "aarch64--"
; We use __fixunstfti as the common denominator for __fixunstfti on Linux and
; ___fixunstfti on iOS
-; ERROR: Unable to lower arguments
+; ERROR: unable to lower arguments: i128 (i128)* (in function: ABIi128)
; FALLBACK: ldr q0,
; FALLBACK-NEXT: bl __fixunstfti
;
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower arguments: i128 (i128)* (in function: ABIi128)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for ABIi128
; FALLBACK-WITH-REPORT-OUT-LABEL: ABIi128:
; FALLBACK-WITH-REPORT-OUT: ldr q0,
@@ -31,6 +32,7 @@ define i128 @ABIi128(i128 %arg1) {
; It happens that we don't handle ConstantArray instances yet during
; translation. Any other constant would be fine too.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate constant: [1 x double] (in function: constant)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for constant
; FALLBACK-WITH-REPORT-OUT-LABEL: constant:
; FALLBACK-WITH-REPORT-OUT: fmov d0, #1.0
@@ -41,6 +43,7 @@ define [1 x double] @constant() {
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %vreg4, %vreg2; mem:ST4[%addr] GPR:%vreg4,%vreg2 (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -60,6 +63,7 @@ false:
}
; General legalizer inability to handle types whose size wasn't a power of 2.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(s42) = G_LOAD %vreg0; mem:LD6[%addr](align=8) (in function: odd_type)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
define void @odd_type(i42* %addr) {
@@ -67,8 +71,17 @@ define void @odd_type(i42* %addr) {
ret void
}
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(<7 x s32>) = G_LOAD %vreg0; mem:LD28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
+; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
+define void @odd_vector(<7 x i32>* %addr) {
+ %vec = load <7 x i32>, <7 x i32>* %addr
+ ret void
+}
+
; RegBankSelect crashed when given invalid mappings, and AArch64's
; implementation produce valid-but-nonsense mappings for G_SEQUENCE.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to map instruction
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for sequence_mapping
; FALLBACK-WITH-REPORT-OUT-LABEL: sequence_mapping:
define void @sequence_mapping([2 x i64] %in) {
@@ -76,42 +89,68 @@ define void @sequence_mapping([2 x i64] %in) {
}
; Legalizer was asserting when it enountered an unexpected default action.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to map instruction
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for legal_default
; FALLBACK-WITH-REPORT-LABEL: legal_default:
-define void @legal_default(i64 %in) {
- insertvalue [2 x i64] undef, i64 %in, 0
+define void @legal_default([8 x i8] %in) {
+ insertvalue { [4 x i8], [8 x i8], [4 x i8] } undef, [8 x i8] %in, 1
ret void
}
-; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for debug_insts
-; FALLBACK-WITH-REPORT-LABEL: debug_insts:
-define void @debug_insts(i32 %in) #0 !dbg !7 {
-entry:
- %in.addr = alloca i32, align 4
- store i32 %in, i32* %in.addr, align 4
- call void @llvm.dbg.declare(metadata i32* %in.addr, metadata !11, metadata !12), !dbg !13
- ret void, !dbg !14
+ ; AArch64 was asserting instead of returning an invalid mapping for unknown
+ ; sizes.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: ret: ' ret i128 undef' (in function: sequence_sizes)
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for sequence_sizes
+; FALLBACK-WITH-REPORT-LABEL: sequence_sizes:
+define i128 @sequence_sizes([8 x i8] %in) {
+ ret i128 undef
+}
+
+; Just to make sure we don't accidentally emit a normal load/store.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %vreg2<def>(s64) = G_LOAD %vreg0; mem:LD8[%addr] GPR:%vreg2,%vreg0 (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
+; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
+define i64 @atomic_ops(i64* %addr) {
+ store atomic i64 0, i64* %addr unordered, align 8
+ %res = load atomic i64, i64* %addr seq_cst, align 8
+ ret i64 %res
+}
+
+; Make sure we don't mess up metadata arguments.
+declare void @llvm.write_register.i64(metadata, i64)
+
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call: ' call void @llvm.write_register.i64(metadata !0, i64 0)' (in function: test_write_register_intrin)
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_write_register_intrin
+; FALLBACK-WITH-REPORT-LABEL: test_write_register_intrin:
+define void @test_write_register_intrin() {
+ call void @llvm.write_register.i64(metadata !{!"sp"}, i64 0)
+ ret void
}
-; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata)
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!3, !4, !5}
-!llvm.ident = !{!6}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (trunk 289075) (llvm/trunk 289080)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
-!1 = !DIFile(filename: "tmp.c", directory: "/Users/tim/llvm/build")
-!2 = !{}
-!3 = !{i32 2, !"Dwarf Version", i32 4}
-!4 = !{i32 2, !"Debug Info Version", i32 3}
-!5 = !{i32 1, !"PIC Level", i32 2}
-!6 = !{!"clang version 4.0.0 (trunk 289075) (llvm/trunk 289080)"}
-!7 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
-!8 = !DISubroutineType(types: !9)
-!9 = !{null, !10}
-!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
-!11 = !DILocalVariable(name: "in", arg: 1, scope: !7, file: !1, line: 1, type: !10)
-!12 = !DIExpression()
-!13 = !DILocation(line: 1, column: 14, scope: !7)
-!14 = !DILocation(line: 2, column: 1, scope: !7)
+@_ZTIi = external global i8*
+declare i32 @__gxx_personality_v0(...)
+
+; Check that we fallback on invoke translation failures.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: invoke: ' invoke void %callee(i128 0)
+; FALLBACK-WITH-REPORT-NEXT: to label %continue unwind label %broken' (in function: invoke_weird_type)
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for invoke_weird_type
+; FALLBACK-WITH-REPORT-OUT-LABEL: invoke_weird_type:
+define void @invoke_weird_type(void(i128)* %callee) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+ invoke void %callee(i128 0)
+ to label %continue unwind label %broken
+
+broken:
+ landingpad { i8*, i32 } catch i8* bitcast(i8** @_ZTIi to i8*)
+ ret void
+
+continue:
+ ret void
+}
+
+; Check that we fallback on invoke translation failures.
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(s128) = G_FCONSTANT quad 2
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_quad_dump
+; FALLBACK-WITH-REPORT-OUT-LABEL: test_quad_dump:
+define fp128 @test_quad_dump() {
+ ret fp128 0xL00000000000000004000000000000000
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-instructionselect.mir b/test/CodeGen/AArch64/GlobalISel/arm64-instructionselect.mir
deleted file mode 100644
index ece5a858b49c..000000000000
--- a/test/CodeGen/AArch64/GlobalISel/arm64-instructionselect.mir
+++ /dev/null
@@ -1,2979 +0,0 @@
-# RUN: llc -O0 -mtriple=aarch64-apple-ios -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=IOS
-# RUN: llc -O0 -mtriple=aarch64-linux-gnu -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-DEFAULT
-# RUN: llc -O0 -mtriple=aarch64-linux-gnu -relocation-model=pic -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-PIC
-
-# Test the instruction selector.
-# As we support more instructions, we need to split this up.
-
---- |
- target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-
- define void @add_s8_gpr() { ret void }
- define void @add_s16_gpr() { ret void }
- define void @add_s32_gpr() { ret void }
- define void @add_s64_gpr() { ret void }
-
- define void @sub_s8_gpr() { ret void }
- define void @sub_s16_gpr() { ret void }
- define void @sub_s32_gpr() { ret void }
- define void @sub_s64_gpr() { ret void }
-
- define void @or_s1_gpr() { ret void }
- define void @or_s16_gpr() { ret void }
- define void @or_s32_gpr() { ret void }
- define void @or_s64_gpr() { ret void }
- define void @or_v2s32_fpr() { ret void }
-
- define void @xor_s8_gpr() { ret void }
- define void @xor_s16_gpr() { ret void }
- define void @xor_s32_gpr() { ret void }
- define void @xor_s64_gpr() { ret void }
-
- define void @and_s8_gpr() { ret void }
- define void @and_s16_gpr() { ret void }
- define void @and_s32_gpr() { ret void }
- define void @and_s64_gpr() { ret void }
-
- define void @shl_s8_gpr() { ret void }
- define void @shl_s16_gpr() { ret void }
- define void @shl_s32_gpr() { ret void }
- define void @shl_s64_gpr() { ret void }
-
- define void @lshr_s32_gpr() { ret void }
- define void @lshr_s64_gpr() { ret void }
-
- define void @ashr_s32_gpr() { ret void }
- define void @ashr_s64_gpr() { ret void }
-
- define void @mul_s8_gpr() { ret void }
- define void @mul_s16_gpr() { ret void }
- define void @mul_s32_gpr() { ret void }
- define void @mul_s64_gpr() { ret void }
-
- define void @sdiv_s32_gpr() { ret void }
- define void @sdiv_s64_gpr() { ret void }
-
- define void @udiv_s32_gpr() { ret void }
- define void @udiv_s64_gpr() { ret void }
-
- define void @fadd_s32_gpr() { ret void }
- define void @fadd_s64_gpr() { ret void }
-
- define void @fsub_s32_gpr() { ret void }
- define void @fsub_s64_gpr() { ret void }
-
- define void @fmul_s32_gpr() { ret void }
- define void @fmul_s64_gpr() { ret void }
-
- define void @fdiv_s32_gpr() { ret void }
- define void @fdiv_s64_gpr() { ret void }
-
- define void @sitofp_s32_s32_fpr() { ret void }
- define void @sitofp_s32_s64_fpr() { ret void }
- define void @sitofp_s64_s32_fpr() { ret void }
- define void @sitofp_s64_s64_fpr() { ret void }
-
- define void @uitofp_s32_s32_fpr() { ret void }
- define void @uitofp_s32_s64_fpr() { ret void }
- define void @uitofp_s64_s32_fpr() { ret void }
- define void @uitofp_s64_s64_fpr() { ret void }
-
- define void @fptosi_s32_s32_gpr() { ret void }
- define void @fptosi_s32_s64_gpr() { ret void }
- define void @fptosi_s64_s32_gpr() { ret void }
- define void @fptosi_s64_s64_gpr() { ret void }
-
- define void @fptoui_s32_s32_gpr() { ret void }
- define void @fptoui_s32_s64_gpr() { ret void }
- define void @fptoui_s64_s32_gpr() { ret void }
- define void @fptoui_s64_s64_gpr() { ret void }
-
- define void @fptrunc() { ret void }
- define void @fpext() { ret void }
-
- define void @unconditional_br() { ret void }
- define void @conditional_br() { ret void }
-
- define void @load_s64_gpr(i64* %addr) { ret void }
- define void @load_s32_gpr(i32* %addr) { ret void }
- define void @load_s16_gpr(i16* %addr) { ret void }
- define void @load_s8_gpr(i8* %addr) { ret void }
- define void @load_s64_fpr(i64* %addr) { ret void }
- define void @load_s32_fpr(i32* %addr) { ret void }
- define void @load_s16_fpr(i16* %addr) { ret void }
- define void @load_s8_fpr(i8* %addr) { ret void }
-
- define void @store_s64_gpr(i64* %addr) { ret void }
- define void @store_s32_gpr(i32* %addr) { ret void }
- define void @store_s16_gpr(i16* %addr) { ret void }
- define void @store_s8_gpr(i8* %addr) { ret void }
- define void @store_s64_fpr(i64* %addr) { ret void }
- define void @store_s32_fpr(i32* %addr) { ret void }
-
- define void @frame_index() {
- %ptr0 = alloca i64
- ret void
- }
-
- define void @selected_property() { ret void }
-
- define i32 @const_s32() { ret i32 42 }
- define i64 @const_s64() { ret i64 1234567890123 }
-
- define i32 @fconst_s32() { ret i32 42 }
- define i64 @fconst_s64() { ret i64 1234567890123 }
-
- define i8* @gep(i8* %in) { ret i8* undef }
-
- @var_local = global i8 0
- define i8* @global_local() { ret i8* undef }
-
- @var_got = external global i8
- define i8* @global_got() { ret i8* undef }
-
- define void @trunc() { ret void }
-
- define void @anyext_gpr() { ret void }
- define void @zext_gpr() { ret void }
- define void @sext_gpr() { ret void }
-
- define void @casts() { ret void }
-
- define void @bitcast_s32_gpr() { ret void }
- define void @bitcast_s32_fpr() { ret void }
- define void @bitcast_s32_gpr_fpr() { ret void }
- define void @bitcast_s32_fpr_gpr() { ret void }
- define void @bitcast_s64_gpr() { ret void }
- define void @bitcast_s64_fpr() { ret void }
- define void @bitcast_s64_gpr_fpr() { ret void }
- define void @bitcast_s64_fpr_gpr() { ret void }
-
- define void @icmp() { ret void }
- define void @fcmp() { ret void }
-
- define void @phi() { ret void }
-
- define void @select() { ret void }
-...
-
----
-# CHECK-LABEL: name: add_s8_gpr
-name: add_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ADDWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s8) = COPY %w0
- %1(s8) = COPY %w1
- %2(s8) = G_ADD %0, %1
-...
-
----
-# CHECK-LABEL: name: add_s16_gpr
-name: add_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ADDWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_ADD %0, %1
-...
-
----
-# Check that we select a 32-bit GPR G_ADD into ADDWrr on GPR32.
-# Also check that we constrain the register class of the COPY to GPR32.
-# CHECK-LABEL: name: add_s32_gpr
-name: add_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ADDWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_ADD %0, %1
-...
-
----
-# Same as add_s32_gpr, for 64-bit operations.
-# CHECK-LABEL: name: add_s64_gpr
-name: add_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = ADDXrr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_ADD %0, %1
-...
-
----
-# CHECK-LABEL: name: sub_s8_gpr
-name: sub_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = SUBWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s8) = COPY %w0
- %1(s8) = COPY %w1
- %2(s8) = G_SUB %0, %1
-...
-
----
-# CHECK-LABEL: name: sub_s16_gpr
-name: sub_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = SUBWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_SUB %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_SUB operations.
-# CHECK-LABEL: name: sub_s32_gpr
-name: sub_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = SUBWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_SUB %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_SUB operations.
-# CHECK-LABEL: name: sub_s64_gpr
-name: sub_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = SUBXrr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_SUB %0, %1
-...
-
----
-# CHECK-LABEL: name: or_s1_gpr
-name: or_s1_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ORRWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s1) = COPY %w0
- %1(s1) = COPY %w1
- %2(s1) = G_OR %0, %1
-...
-
----
-# CHECK-LABEL: name: or_s16_gpr
-name: or_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ORRWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_OR %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_OR operations.
-# CHECK-LABEL: name: or_s32_gpr
-name: or_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ORRWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_OR %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_OR operations.
-# CHECK-LABEL: name: or_s64_gpr
-name: or_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = ORRXrr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_OR %0, %1
-...
-
----
-# 64-bit G_OR on vector registers.
-# CHECK-LABEL: name: or_v2s32_fpr
-name: or_v2s32_fpr
-legalized: true
-regBankSelected: true
-#
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-# CHECK-NEXT: - { id: 2, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %d1
-# The actual OR does not matter as long as it is operating
-# on 64-bit width vector.
-# CHECK: %2 = ORRv8i8 %0, %1
-body: |
- bb.0:
- liveins: %d0, %d1
-
- %0(<2 x s32>) = COPY %d0
- %1(<2 x s32>) = COPY %d1
- %2(<2 x s32>) = G_OR %0, %1
-...
-
----
-# CHECK-LABEL: name: xor_s8_gpr
-name: xor_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = EORWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s8) = COPY %w0
- %1(s8) = COPY %w1
- %2(s8) = G_XOR %0, %1
-...
-
----
-# CHECK-LABEL: name: xor_s16_gpr
-name: xor_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = EORWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_XOR %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_XOR operations.
-# CHECK-LABEL: name: xor_s32_gpr
-name: xor_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = EORWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_XOR %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_XOR operations.
-# CHECK-LABEL: name: xor_s64_gpr
-name: xor_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = EORXrr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_XOR %0, %1
-...
-
----
-# CHECK-LABEL: name: and_s8_gpr
-name: and_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ANDWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s8) = COPY %w0
- %1(s8) = COPY %w1
- %2(s8) = G_AND %0, %1
-...
-
----
-# CHECK-LABEL: name: and_s16_gpr
-name: and_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ANDWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_AND %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_AND operations.
-# CHECK-LABEL: name: and_s32_gpr
-name: and_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ANDWrr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_AND %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_AND operations.
-# CHECK-LABEL: name: and_s64_gpr
-name: and_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = ANDXrr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_AND %0, %1
-...
-
----
-# CHECK-LABEL: name: shl_s8_gpr
-name: shl_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = LSLVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s8) = COPY %w0
- %1(s8) = COPY %w1
- %2(s8) = G_SHL %0, %1
-...
-
----
-# CHECK-LABEL: name: shl_s16_gpr
-name: shl_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = LSLVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_SHL %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_SHL operations.
-# CHECK-LABEL: name: shl_s32_gpr
-name: shl_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = LSLVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_SHL %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_SHL operations.
-# CHECK-LABEL: name: shl_s64_gpr
-name: shl_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = LSLVXr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_SHL %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_LSHR operations.
-# CHECK-LABEL: name: lshr_s32_gpr
-name: lshr_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = LSRVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_LSHR %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_LSHR operations.
-# CHECK-LABEL: name: lshr_s64_gpr
-name: lshr_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = LSRVXr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_LSHR %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_ASHR operations.
-# CHECK-LABEL: name: ashr_s32_gpr
-name: ashr_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = ASRVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_ASHR %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_ASHR operations.
-# CHECK-LABEL: name: ashr_s64_gpr
-name: ashr_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = ASRVXr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_ASHR %0, %1
-...
-
----
-# CHECK-LABEL: name: mul_s8_gpr
-name: mul_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = MADDWrrr %0, %1, %wzr
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s8) = COPY %w0
- %1(s8) = COPY %w1
- %2(s8) = G_MUL %0, %1
-...
-
----
-# CHECK-LABEL: name: mul_s16_gpr
-name: mul_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = MADDWrrr %0, %1, %wzr
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s16) = COPY %w0
- %1(s16) = COPY %w1
- %2(s16) = G_MUL %0, %1
-...
-
----
-# Check that we select s32 GPR G_MUL. This is trickier than other binops because
-# there is only MADDWrrr, and we have to use the WZR physreg.
-# CHECK-LABEL: name: mul_s32_gpr
-name: mul_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = MADDWrrr %0, %1, %wzr
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_MUL %0, %1
-...
-
----
-# Same as mul_s32_gpr for the s64 type.
-# CHECK-LABEL: name: mul_s64_gpr
-name: mul_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = MADDXrrr %0, %1, %xzr
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_MUL %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_SDIV operations.
-# CHECK-LABEL: name: sdiv_s32_gpr
-name: sdiv_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = SDIVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_SDIV %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_SDIV operations.
-# CHECK-LABEL: name: sdiv_s64_gpr
-name: sdiv_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = SDIVXr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_SDIV %0, %1
-...
-
----
-# Same as add_s32_gpr, for G_UDIV operations.
-# CHECK-LABEL: name: udiv_s32_gpr
-name: udiv_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %w1
-# CHECK: %2 = UDIVWr %0, %1
-body: |
- bb.0:
- liveins: %w0, %w1
-
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s32) = G_UDIV %0, %1
-...
-
----
-# Same as add_s64_gpr, for G_UDIV operations.
-# CHECK-LABEL: name: udiv_s64_gpr
-name: udiv_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: %2 = UDIVXr %0, %1
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(s64) = COPY %x0
- %1(s64) = COPY %x1
- %2(s64) = G_UDIV %0, %1
-...
-
----
-# Check that we select a s32 FPR G_FADD into FADDSrr.
-# CHECK-LABEL: name: fadd_s32_gpr
-name: fadd_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-# CHECK-NEXT: - { id: 2, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = COPY %s1
-# CHECK: %2 = FADDSrr %0, %1
-body: |
- bb.0:
- liveins: %s0, %s1
-
- %0(s32) = COPY %s0
- %1(s32) = COPY %s1
- %2(s32) = G_FADD %0, %1
-...
-
----
-# CHECK-LABEL: name: fadd_s64_gpr
-name: fadd_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-# CHECK-NEXT: - { id: 2, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %d1
-# CHECK: %2 = FADDDrr %0, %1
-body: |
- bb.0:
- liveins: %d0, %d1
-
- %0(s64) = COPY %d0
- %1(s64) = COPY %d1
- %2(s64) = G_FADD %0, %1
-...
-
----
-# CHECK-LABEL: name: fsub_s32_gpr
-name: fsub_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-# CHECK-NEXT: - { id: 2, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = COPY %s1
-# CHECK: %2 = FSUBSrr %0, %1
-body: |
- bb.0:
- liveins: %s0, %s1
-
- %0(s32) = COPY %s0
- %1(s32) = COPY %s1
- %2(s32) = G_FSUB %0, %1
-...
-
----
-# CHECK-LABEL: name: fsub_s64_gpr
-name: fsub_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-# CHECK-NEXT: - { id: 2, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %d1
-# CHECK: %2 = FSUBDrr %0, %1
-body: |
- bb.0:
- liveins: %d0, %d1
-
- %0(s64) = COPY %d0
- %1(s64) = COPY %d1
- %2(s64) = G_FSUB %0, %1
-...
-
----
-# CHECK-LABEL: name: fmul_s32_gpr
-name: fmul_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-# CHECK-NEXT: - { id: 2, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = COPY %s1
-# CHECK: %2 = FMULSrr %0, %1
-body: |
- bb.0:
- liveins: %s0, %s1
-
- %0(s32) = COPY %s0
- %1(s32) = COPY %s1
- %2(s32) = G_FMUL %0, %1
-...
-
----
-# CHECK-LABEL: name: fmul_s64_gpr
-name: fmul_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-# CHECK-NEXT: - { id: 2, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %d1
-# CHECK: %2 = FMULDrr %0, %1
-body: |
- bb.0:
- liveins: %d0, %d1
-
- %0(s64) = COPY %d0
- %1(s64) = COPY %d1
- %2(s64) = G_FMUL %0, %1
-...
-
----
-# CHECK-LABEL: name: fdiv_s32_gpr
-name: fdiv_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-# CHECK-NEXT: - { id: 2, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = COPY %s1
-# CHECK: %2 = FDIVSrr %0, %1
-body: |
- bb.0:
- liveins: %s0, %s1
-
- %0(s32) = COPY %s0
- %1(s32) = COPY %s1
- %2(s32) = G_FDIV %0, %1
-...
-
----
-# CHECK-LABEL: name: fdiv_s64_gpr
-name: fdiv_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-# CHECK-NEXT: - { id: 2, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %d1
-# CHECK: %2 = FDIVDrr %0, %1
-body: |
- bb.0:
- liveins: %d0, %d1
-
- %0(s64) = COPY %d0
- %1(s64) = COPY %d1
- %2(s64) = G_FDIV %0, %1
-...
-
----
-# CHECK-LABEL: name: sitofp_s32_s32_fpr
-name: sitofp_s32_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = SCVTFUWSri %0
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s32) = G_SITOFP %0
-...
-
----
-# CHECK-LABEL: name: sitofp_s32_s64_fpr
-name: sitofp_s32_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = SCVTFUXSri %0
-body: |
- bb.0:
- liveins: %x0
-
- %0(s64) = COPY %x0
- %1(s32) = G_SITOFP %0
-...
-
----
-# CHECK-LABEL: name: sitofp_s64_s32_fpr
-name: sitofp_s64_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = SCVTFUWDri %0
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s64) = G_SITOFP %0
-...
-
----
-# CHECK-LABEL: name: sitofp_s64_s64_fpr
-name: sitofp_s64_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = SCVTFUXDri %0
-body: |
- bb.0:
- liveins: %x0
-
- %0(s64) = COPY %x0
- %1(s64) = G_SITOFP %0
-...
-
----
-# CHECK-LABEL: name: uitofp_s32_s32_fpr
-name: uitofp_s32_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = UCVTFUWSri %0
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s32) = G_UITOFP %0
-...
-
----
-# CHECK-LABEL: name: uitofp_s32_s64_fpr
-name: uitofp_s32_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = UCVTFUXSri %0
-body: |
- bb.0:
- liveins: %x0
-
- %0(s64) = COPY %x0
- %1(s32) = G_UITOFP %0
-...
-
----
-# CHECK-LABEL: name: uitofp_s64_s32_fpr
-name: uitofp_s64_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = UCVTFUWDri %0
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s64) = G_UITOFP %0
-...
-
----
-# CHECK-LABEL: name: uitofp_s64_s64_fpr
-name: uitofp_s64_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = UCVTFUXDri %0
-body: |
- bb.0:
- liveins: %x0
-
- %0(s64) = COPY %x0
- %1(s64) = G_UITOFP %0
-...
-
----
-# CHECK-LABEL: name: fptosi_s32_s32_gpr
-name: fptosi_s32_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = FCVTZSUWSr %0
-body: |
- bb.0:
- liveins: %s0
-
- %0(s32) = COPY %s0
- %1(s32) = G_FPTOSI %0
-...
-
----
-# CHECK-LABEL: name: fptosi_s32_s64_gpr
-name: fptosi_s32_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = FCVTZSUWDr %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s32) = G_FPTOSI %0
-...
-
----
-# CHECK-LABEL: name: fptosi_s64_s32_gpr
-name: fptosi_s64_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = FCVTZSUXSr %0
-body: |
- bb.0:
- liveins: %s0
-
- %0(s32) = COPY %s0
- %1(s64) = G_FPTOSI %0
-...
-
----
-# CHECK-LABEL: name: fptosi_s64_s64_gpr
-name: fptosi_s64_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = FCVTZSUXDr %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s64) = G_FPTOSI %0
-...
-
----
-# CHECK-LABEL: name: fptoui_s32_s32_gpr
-name: fptoui_s32_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = FCVTZUUWSr %0
-body: |
- bb.0:
- liveins: %s0
-
- %0(s32) = COPY %s0
- %1(s32) = G_FPTOUI %0
-...
-
----
-# CHECK-LABEL: name: fptoui_s32_s64_gpr
-name: fptoui_s32_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = FCVTZUUWDr %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s32) = G_FPTOUI %0
-...
-
----
-# CHECK-LABEL: name: fptoui_s64_s32_gpr
-name: fptoui_s64_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = FCVTZUUXSr %0
-body: |
- bb.0:
- liveins: %s0
-
- %0(s32) = COPY %s0
- %1(s64) = G_FPTOUI %0
-...
-
----
-# CHECK-LABEL: name: fptoui_s64_s64_gpr
-name: fptoui_s64_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = FCVTZUUXDr %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s64) = G_FPTOUI %0
-...
-
----
-# CHECK-LABEL: name: fptrunc
-name: fptrunc
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK: - { id: 0, class: fpr64 }
-# CHECK: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = FCVTSDr %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s32) = G_FPTRUNC %0
-...
-
----
-# CHECK-LABEL: name: fpext
-name: fpext
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK: - { id: 0, class: fpr32 }
-# CHECK: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = FCVTDSr %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s32) = COPY %s0
- %1(s64) = G_FPEXT %0
-...
-
----
-# CHECK-LABEL: name: unconditional_br
-name: unconditional_br
-legalized: true
-regBankSelected: true
-
-# CHECK: body:
-# CHECK: bb.0:
-# CHECK: successors: %bb.0
-# CHECK: B %bb.0
-body: |
- bb.0:
- successors: %bb.0
-
- G_BR %bb.0
-...
-
----
-# CHECK-LABEL: name: conditional_br
-name: conditional_br
-legalized: true
-regBankSelected: true
-
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# CHECK: bb.0:
-# CHECK: TBNZW %0, 0, %bb.1
-# CHECK: B %bb.0
-body: |
- bb.0:
- successors: %bb.0, %bb.1
- %0(s1) = COPY %w0
- G_BRCOND %0(s1), %bb.1
- G_BR %bb.0
-
- bb.1:
-...
-
----
-# CHECK-LABEL: name: load_s64_gpr
-name: load_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRXui %0, 0 :: (load 8 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s32_gpr
-name: load_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRWui %0, 0 :: (load 4 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s16_gpr
-name: load_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRHHui %0, 0 :: (load 2 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s8_gpr
-name: load_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRBBui %0, 0 :: (load 1 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s64_fpr
-name: load_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRDui %0, 0 :: (load 8 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s32_fpr
-name: load_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRSui %0, 0 :: (load 4 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s16_fpr
-name: load_s16_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: fpr16 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRHui %0, 0 :: (load 2 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: load_s8_fpr
-name: load_s8_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: fpr8 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = LDRBui %0, 0 :: (load 1 from %ir.addr)
-body: |
- bb.0:
- liveins: %x0
-
- %0(p0) = COPY %x0
- %1(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: store_s64_gpr
-name: store_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %x1
-# CHECK: STRXui %1, %0, 0 :: (store 8 into %ir.addr)
-body: |
- bb.0:
- liveins: %x0, %x1
-
- %0(p0) = COPY %x0
- %1(s64) = COPY %x1
- G_STORE %1, %0 :: (store 8 into %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: store_s32_gpr
-name: store_s32_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %w1
-# CHECK: STRWui %1, %0, 0 :: (store 4 into %ir.addr)
-body: |
- bb.0:
- liveins: %x0, %w1
-
- %0(p0) = COPY %x0
- %1(s32) = COPY %w1
- G_STORE %1, %0 :: (store 4 into %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: store_s16_gpr
-name: store_s16_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %w1
-# CHECK: STRHHui %1, %0, 0 :: (store 2 into %ir.addr)
-body: |
- bb.0:
- liveins: %x0, %w1
-
- %0(p0) = COPY %x0
- %1(s16) = COPY %w1
- G_STORE %1, %0 :: (store 2 into %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: store_s8_gpr
-name: store_s8_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %w1
-# CHECK: STRBBui %1, %0, 0 :: (store 1 into %ir.addr)
-body: |
- bb.0:
- liveins: %x0, %w1
-
- %0(p0) = COPY %x0
- %1(s8) = COPY %w1
- G_STORE %1, %0 :: (store 1 into %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: store_s64_fpr
-name: store_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %d1
-# CHECK: STRDui %1, %0, 0 :: (store 8 into %ir.addr)
-body: |
- bb.0:
- liveins: %x0, %d1
-
- %0(p0) = COPY %x0
- %1(s64) = COPY %d1
- G_STORE %1, %0 :: (store 8 into %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: store_s32_fpr
-name: store_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %s1
-# CHECK: STRSui %1, %0, 0 :: (store 4 into %ir.addr)
-body: |
- bb.0:
- liveins: %x0, %s1
-
- %0(p0) = COPY %x0
- %1(s32) = COPY %s1
- G_STORE %1, %0 :: (store 4 into %ir.addr)
-
-...
-
----
-# CHECK-LABEL: name: frame_index
-name: frame_index
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp }
-registers:
- - { id: 0, class: gpr }
-
-stack:
- - { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
-
-# CHECK: body:
-# CHECK: %0 = ADDXri %stack.0.ptr0, 0, 0
-body: |
- bb.0:
- %0(p0) = G_FRAME_INDEX %stack.0.ptr0
-...
-
----
-# Check that we set the "selected" property.
-# CHECK-LABEL: name: selected_property
-# CHECK: legalized: true
-# CHECK-NEXT: regBankSelected: true
-# CHECK-NEXT: selected: true
-name: selected_property
-legalized: true
-regBankSelected: true
-selected: false
-body: |
- bb.0:
-...
-
----
-# CHECK-LABEL: name: const_s32
-name: const_s32
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = MOVi32imm 42
-body: |
- bb.0:
- %0(s32) = G_CONSTANT i32 42
-...
-
----
-# CHECK-LABEL: name: const_s64
-name: const_s64
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = MOVi64imm 1234567890123
-body: |
- bb.0:
- %0(s64) = G_CONSTANT i64 1234567890123
-...
-
----
-# CHECK-LABEL: name: fconst_s32
-name: fconst_s32
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: fpr }
-
-# CHECK: body:
-# CHECK: [[TMP:%[0-9]+]] = MOVi32imm 1080033280
-# CHECK: %0 = COPY [[TMP]]
-body: |
- bb.0:
- %0(s32) = G_FCONSTANT float 3.5
-...
-
----
-# CHECK-LABEL: name: fconst_s64
-name: fconst_s64
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: fpr }
-
-# CHECK: body:
-# CHECK: [[TMP:%[0-9]+]] = MOVi64imm 4607182418800017408
-# CHECK: %0 = COPY [[TMP]]
-body: |
- bb.0:
- %0(s64) = G_FCONSTANT double 1.0
-...
-
----
-# CHECK-LABEL: name: gep
-name: gep
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
-
-# CHECK: body:
-# CHECK: %1 = MOVi64imm 42
-# CHECK: %2 = ADDXrr %0, %1
-body: |
- bb.0:
- liveins: %x0
- %0(p0) = COPY %x0
- %1(s64) = G_CONSTANT i64 42
- %2(p0) = G_GEP %0, %1(s64)
-...
-
----
-# Global defined in the same linkage unit so no GOT is needed
-# CHECK-LABEL: name: global_local
-name: global_local
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0 = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
-# LINUX-DEFAULT: %0 = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
-# LINUX-PIC: %0 = LOADgot target-flags(aarch64-got) @var_local
-body: |
- bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_local
-...
-
----
-# CHECK-LABEL: name: global_got
-name: global_got
-legalized: true
-regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0 = LOADgot target-flags(aarch64-got) @var_got
-# LINUX-DEFAULT: %0 = MOVaddr target-flags(aarch64-page) @var_got, target-flags(aarch64-pageoff, aarch64-nc) @var_got
-# LINUX-PIC: %0 = LOADgot target-flags(aarch64-got) @var_got
-body: |
- bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_got
-...
-
----
-# CHECK-LABEL: name: trunc
-name: trunc
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-# CHECK-NEXT: - { id: 3, class: gpr32 }
-# CHECK-NEXT: - { id: 4, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
-
-# CHECK: body:
-# CHECK: %1 = COPY %0
-# CHECK: %3 = COPY %2.sub_32
-# CHECK: %4 = COPY %2.sub_32
-body: |
- bb.0:
- liveins: %w0, %x0
-
- %0(s32) = COPY %w0
- %1(s1) = G_TRUNC %0
-
- %2(s64) = COPY %x0
- %3(s32) = G_TRUNC %2
- %4(s8) = G_TRUNC %2
-...
-
----
-# CHECK-LABEL: name: anyext_gpr
-name: anyext_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32all }
-# CHECK-NEXT: - { id: 1, class: gpr64all }
-# CHECK-NEXT: - { id: 2, class: gpr32all }
-# CHECK-NEXT: - { id: 3, class: gpr32all }
-# CHECK-NEXT: - { id: 4, class: gpr64all }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %4 = SUBREG_TO_REG 0, %0, 15
-# CHECK: %1 = COPY %4
-# CHECK: %2 = COPY %w0
-# CHECK: %3 = COPY %2
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s64) = G_ANYEXT %0
- %2(s8) = COPY %w0
- %3(s32) = G_ANYEXT %2
-...
-
----
-# CHECK-LABEL: name: zext_gpr
-name: zext_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-# CHECK-NEXT: - { id: 3, class: gpr32 }
-# CHECK-NEXT: - { id: 4, class: gpr32 }
-# CHECK-NEXT: - { id: 5, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %5 = SUBREG_TO_REG 0, %0, 15
-# CHECK: %1 = UBFMXri %5, 0, 31
-# CHECK: %2 = COPY %w0
-# CHECK: %3 = UBFMWri %2, 0, 7
-# CHECK: %4 = UBFMWri %2, 0, 7
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s64) = G_ZEXT %0
- %2(s8) = COPY %w0
- %3(s32) = G_ZEXT %2
- %4(s16)= G_ZEXT %2
-...
-
----
-# CHECK-LABEL: name: sext_gpr
-name: sext_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-# CHECK-NEXT: - { id: 3, class: gpr32 }
-# CHECK-NEXT: - { id: 4, class: gpr32 }
-# CHECK-NEXT: - { id: 5, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %5 = SUBREG_TO_REG 0, %0, 15
-# CHECK: %1 = SBFMXri %5, 0, 31
-# CHECK: %2 = COPY %w0
-# CHECK: %3 = SBFMWri %2, 0, 7
-# CHECK: %4 = SBFMWri %2, 0, 7
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s64) = G_SEXT %0
- %2(s8) = COPY %w0
- %3(s32) = G_SEXT %2
- %4(s16) = G_SEXT %2
-...
-
----
-# CHECK-LABEL: name: casts
-name: casts
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64all }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-# CHECK-NEXT: - { id: 3, class: gpr64 }
-# CHECK-NEXT: - { id: 4, class: gpr32 }
-# CHECK-NEXT: - { id: 5, class: gpr32 }
-# CHECK-NEXT: - { id: 6, class: gpr32 }
-# CHECK-NEXT: - { id: 7, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %0
-# CHECK: %2 = COPY %0
-# CHECK: %3 = COPY %2
-# CHECK: %4 = COPY %2.sub_32
-# CHECK: %5 = COPY %2.sub_32
-# CHECK: %6 = COPY %2.sub_32
-# CHECK: %7 = COPY %2.sub_32
-body: |
- bb.0:
- liveins: %x0
- %0(s64) = COPY %x0
- %1(<8 x s8>) = G_BITCAST %0(s64)
- %2(p0) = G_INTTOPTR %0
-
- %3(s64) = G_PTRTOINT %2
- %4(s32) = G_PTRTOINT %2
- %5(s16) = G_PTRTOINT %2
- %6(s8) = G_PTRTOINT %2
- %7(s1) = G_PTRTOINT %2
-...
-
----
-# CHECK-LABEL: name: bitcast_s32_gpr
-name: bitcast_s32_gpr
-legalized: true
-regBankSelected: true
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32all }
-# CHECK-NEXT: - { id: 1, class: gpr32all }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s32) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s32_fpr
-name: bitcast_s32_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %s0
-
- %0(s32) = COPY %s0
- %1(s32) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s32_gpr_fpr
-name: bitcast_s32_gpr_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32all }
-# CHECK-NEXT: - { id: 1, class: fpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %w0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %w0
-
- %0(s32) = COPY %w0
- %1(s32) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s32_fpr_gpr
-name: bitcast_s32_fpr_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32all }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %s0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %s0
-
- %0(s32) = COPY %s0
- %1(s32) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s64_gpr
-name: bitcast_s64_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64all }
-# CHECK-NEXT: - { id: 1, class: gpr64all }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %x0
-
- %0(s64) = COPY %x0
- %1(s64) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s64_fpr
-name: bitcast_s64_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: fpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s64) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s64_gpr_fpr
-name: bitcast_s64_gpr_fpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64all }
-# CHECK-NEXT: - { id: 1, class: fpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: fpr }
-# CHECK: body:
-# CHECK: %0 = COPY %x0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %x0
-
- %0(s64) = COPY %x0
- %1(s64) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: bitcast_s64_fpr_gpr
-name: bitcast_s64_fpr_gpr
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr64 }
-# CHECK-NEXT: - { id: 1, class: gpr64all }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
-
-# CHECK: body:
-# CHECK: %0 = COPY %d0
-# CHECK: %1 = COPY %0
-body: |
- bb.0:
- liveins: %d0
-
- %0(s64) = COPY %d0
- %1(s64) = G_BITCAST %0
-...
-
----
-# CHECK-LABEL: name: icmp
-name: icmp
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr64 }
-# CHECK-NEXT: - { id: 3, class: gpr32 }
-# CHECK-NEXT: - { id: 4, class: gpr64 }
-# CHECK-NEXT: - { id: 5, class: gpr32 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
-
-# CHECK: body:
-# CHECK: %wzr = SUBSWrr %0, %0, implicit-def %nzcv
-# CHECK: %1 = CSINCWr %wzr, %wzr, 1, implicit %nzcv
-
-# CHECK: %xzr = SUBSXrr %2, %2, implicit-def %nzcv
-# CHECK: %3 = CSINCWr %wzr, %wzr, 3, implicit %nzcv
-
-# CHECK: %xzr = SUBSXrr %4, %4, implicit-def %nzcv
-# CHECK: %5 = CSINCWr %wzr, %wzr, 0, implicit %nzcv
-
-body: |
- bb.0:
- liveins: %w0, %x0
-
- %0(s32) = COPY %w0
- %1(s1) = G_ICMP intpred(eq), %0, %0
-
- %2(s64) = COPY %x0
- %3(s1) = G_ICMP intpred(uge), %2, %2
-
- %4(p0) = COPY %x0
- %5(s1) = G_ICMP intpred(ne), %4, %4
-...
-
----
-# CHECK-LABEL: name: fcmp
-name: fcmp
-legalized: true
-regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: fpr64 }
-# CHECK-NEXT: - { id: 3, class: gpr32 }
-# CHECK-NEXT: - { id: 4, class: gpr32 }
-# CHECK-NEXT: - { id: 5, class: gpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
- - { id: 3, class: gpr }
-
-# CHECK: body:
-# CHECK: FCMPSrr %0, %0, implicit-def %nzcv
-# CHECK: [[TST_MI:%[0-9]+]] = CSINCWr %wzr, %wzr, 4, implicit %nzcv
-# CHECK: [[TST_GT:%[0-9]+]] = CSINCWr %wzr, %wzr, 12, implicit %nzcv
-# CHECK: %1 = ORRWrr [[TST_MI]], [[TST_GT]]
-
-# CHECK: FCMPDrr %2, %2, implicit-def %nzcv
-# CHECK: %3 = CSINCWr %wzr, %wzr, 5, implicit %nzcv
-
-body: |
- bb.0:
- liveins: %w0, %x0
-
- %0(s32) = COPY %s0
- %1(s1) = G_FCMP floatpred(one), %0, %0
-
- %2(s64) = COPY %d0
- %3(s1) = G_FCMP floatpred(uge), %2, %2
-
-...
-
----
-# CHECK-LABEL: name: phi
-name: phi
-legalized: true
-regBankSelected: true
-tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: fpr32 }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: bb.1:
-# CHECK: %2 = PHI %0, %bb.0, %2, %bb.1
-
-body: |
- bb.0:
- liveins: %s0, %w0
- successors: %bb.1
- %0(s32) = COPY %s0
- %1(s1) = COPY %w0
-
- bb.1:
- successors: %bb.1, %bb.2
- %2(s32) = PHI %0, %bb.0, %2, %bb.1
- G_BRCOND %1, %bb.1
-
- bb.2:
- %s0 = COPY %2
- RET_ReallyLR implicit %s0
-...
-
----
-# CHECK-LABEL: name: select
-name: select
-legalized: true
-regBankSelected: true
-tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32 }
-# CHECK-NEXT: - { id: 1, class: gpr32 }
-# CHECK-NEXT: - { id: 2, class: gpr32 }
-# CHECK-NEXT: - { id: 3, class: gpr32 }
-# CHECK-NEXT: - { id: 4, class: gpr64 }
-# CHECK-NEXT: - { id: 5, class: gpr64 }
-# CHECK-NEXT: - { id: 6, class: gpr64 }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
-
-# CHECK: body:
-# CHECK: %wzr = ANDSWri %0, 0, implicit-def %nzcv
-# CHECK: %3 = CSELWr %1, %2, 1, implicit %nzcv
-# CHECK: %wzr = ANDSWri %0, 0, implicit-def %nzcv
-# CHECK: %6 = CSELXr %4, %5, 1, implicit %nzcv
-body: |
- bb.0:
- liveins: %w0, %w1, %w2
- %0(s1) = COPY %w0
-
- %1(s32) = COPY %w1
- %2(s32) = COPY %w2
- %3(s32) = G_SELECT %0, %1, %2
-
- %4(s64) = COPY %x0
- %5(s64) = COPY %x1
- %6(s64) = G_SELECT %0, %4, %5
-...
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
index 579ef777223c..006308641184 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-apple-ios %s -stop-after=irtranslator -o - -global-isel | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-apple-ios %s -stop-after=irtranslator -o - -global-isel | FileCheck %s
; CHECK: name: test_stack_guard
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 15b4012f383d..02848021dbc0 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -52,18 +52,40 @@ define void @allocai64() {
; CHECK: body:
;
; ABI/constant lowering and IR-level entry basic block.
-; CHECK: {{bb.[0-9]+}} (%ir-block.{{[0-9]+}}):
+; CHECK: {{bb.[0-9]+}}.entry:
;
; Make sure we have one successor and only one.
-; CHECK-NEXT: successors: %[[END:bb.[0-9]+.end]](0x80000000)
+; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+.bb2]](0x80000000)
;
; Check that we emit the correct branch.
-; CHECK: G_BR %[[END]]
+; CHECK: G_BR %[[BB2]]
;
; Check that end contains the return instruction.
-; CHECK: [[END]]:
+; CHECK: [[END:bb.[0-9]+.end]]:
; CHECK-NEXT: RET_ReallyLR
+;
+; CHECK: {{bb.[0-9]+}}.bb2:
+; CHECK-NEXT: successors: %[[END]](0x80000000)
+; CHECK: G_BR %[[END]]
define void @uncondbr() {
+entry:
+ br label %bb2
+end:
+ ret void
+bb2:
+ br label %end
+}
+
+; CHECK-LABEL: name: uncondbr_fallthrough
+; CHECK: body:
+; CHECK: {{bb.[0-9]+}}.entry:
+; CHECK-NEXT: successors: %[[END:bb.[0-9]+.end]](0x80000000)
+; We don't emit a branch here, as we can fallthrough to the successor.
+; CHECK-NOT: G_BR
+; CHECK: [[END]]:
+; CHECK-NEXT: RET_ReallyLR
+define void @uncondbr_fallthrough() {
+entry:
br label %end
end:
ret void
@@ -117,33 +139,35 @@ false:
; CHECK: G_BRCOND %[[regicmp100]](s1), %[[BB_CASE100]]
; CHECK: G_BR %[[BB_NOTCASE100_CHECKNEXT]]
;
-; CHECK: [[BB_CASE100]]:
-; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000)
-; CHECK: %[[regretc100:[0-9]+]](s32) = G_ADD %0, %[[reg1]]
-; CHECK: G_BR %[[BB_RET]]
; CHECK: [[BB_NOTCASE100_CHECKNEXT]]:
; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+.case200]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+.entry]](0x40000000)
; CHECK: %[[regicmp200:[0-9]+]](s1) = G_ICMP intpred(eq), %[[reg200]](s32), %0
; CHECK: G_BRCOND %[[regicmp200]](s1), %[[BB_CASE200]]
; CHECK: G_BR %[[BB_NOTCASE200_CHECKNEXT]]
;
-; CHECK: [[BB_CASE200]]:
-; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000)
-; CHECK: %[[regretc200:[0-9]+]](s32) = G_ADD %0, %[[reg2]]
-; CHECK: G_BR %[[BB_RET]]
; CHECK: [[BB_NOTCASE200_CHECKNEXT]]:
; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+.default]](0x80000000)
; CHECK: G_BR %[[BB_DEFAULT]]
;
; CHECK: [[BB_DEFAULT]]:
-; CHECK-NEXT: successors: %[[BB_RET]](0x80000000)
+; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000)
; CHECK: %[[regretdefault:[0-9]+]](s32) = G_ADD %0, %[[reg0]]
; CHECK: G_BR %[[BB_RET]]
;
+; CHECK: [[BB_CASE100]]:
+; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000)
+; CHECK: %[[regretc100:[0-9]+]](s32) = G_ADD %0, %[[reg1]]
+; CHECK: G_BR %[[BB_RET]]
+;
+; CHECK: [[BB_CASE200]]:
+; CHECK-NEXT: successors: %[[BB_RET]](0x80000000)
+; CHECK: %[[regretc200:[0-9]+]](s32) = G_ADD %0, %[[reg2]]
+;
; CHECK: [[BB_RET]]:
; CHECK-NEXT: %[[regret:[0-9]+]](s32) = PHI %[[regretdefault]](s32), %[[BB_DEFAULT]], %[[regretc100]](s32), %[[BB_CASE100]]
; CHECK: %w0 = COPY %[[regret]](s32)
; CHECK: RET_ReallyLR implicit %w0
+;
define i32 @switch(i32 %argc) {
entry:
switch i32 %argc, label %default [
@@ -168,6 +192,95 @@ return:
ret i32 %res
}
+ ; The switch lowering code changes the CFG, which means that the original
+ ; %entry block is no longer a predecessor for the phi instruction. We need to
+ ; use the correct lowered MachineBasicBlock instead.
+; CHECK-LABEL: name: test_cfg_remap
+; CHECK: {{bb.[0-9]+.entry}}:
+; CHECK-NEXT: successors: %{{bb.[0-9]+.next}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+.entry]](0x40000000)
+; CHECK: [[NOTCASE1_BLOCK]]:
+; CHECK-NEXT: successors: %{{bb.[0-9]+.other}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]](0x40000000)
+; CHECK: [[NOTCASE57_BLOCK]]:
+; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+.phi.block]](0x80000000)
+; CHECK: G_BR %[[PHI_BLOCK]]
+;
+; CHECK: [[PHI_BLOCK]]:
+; CHECK-NEXT: PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]], %{{.*}}(s32),
+;
+define i32 @test_cfg_remap(i32 %in) {
+entry:
+ switch i32 %in, label %phi.block [i32 1, label %next
+ i32 57, label %other]
+
+next:
+ br label %phi.block
+
+other:
+ ret i32 undef
+
+phi.block:
+ %res = phi i32 [1, %entry], [42, %next]
+ ret i32 %res
+}
+
+; CHECK-LABEL: name: test_cfg_remap_multiple_preds
+; CHECK: PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}.entry, [[ENTRY]](s32), %bb.{{[0-9]+}}.entry
+define i32 @test_cfg_remap_multiple_preds(i32 %in) {
+entry:
+ switch i32 %in, label %odd [i32 1, label %next
+ i32 57, label %other
+ i32 128, label %phi.block
+ i32 256, label %phi.block]
+odd:
+ unreachable
+
+next:
+ br label %phi.block
+
+other:
+ ret i32 undef
+
+phi.block:
+ %res = phi i32 [1, %entry], [1, %entry], [42, %next]
+ ret i32 12
+}
+
+; Tests for indirect br.
+; CHECK-LABEL: name: indirectbr
+; CHECK: body:
+;
+; ABI/constant lowering and IR-level entry basic block.
+; CHECK: {{bb.[0-9]+.entry}}:
+; Make sure we have one successor
+; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+.L1]](0x80000000)
+; CHECK-NOT: G_BR
+;
+; Check basic block L1 has 2 successors: BBL1 and BBL2
+; CHECK: [[BB_L1]] (address-taken):
+; CHECK-NEXT: successors: %[[BB_L1]](0x40000000),
+; CHECK: %[[BB_L2:bb.[0-9]+.L2]](0x40000000)
+; CHECK: G_BRINDIRECT %{{[0-9]+}}(p0)
+;
+; Check basic block L2 is the return basic block
+; CHECK: [[BB_L2]] (address-taken):
+; CHECK-NEXT: RET_ReallyLR
+
+@indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8
+
+define void @indirectbr() {
+entry:
+ br label %L1
+L1: ; preds = %entry, %L1
+ %i = phi i32 [ 0, %entry ], [ %inc, %L1 ]
+ %inc = add i32 %i, 1
+ %idxprom = zext i32 %i to i64
+ %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @indirectbr.L, i64 0, i64 %idxprom
+ %brtarget = load i8*, i8** %arrayidx, align 8
+ indirectbr i8* %brtarget, [label %L1, label %L2]
+L2: ; preds = %L1
+ ret void
+}
+
; Tests for or.
; CHECK-LABEL: name: ori64
; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
@@ -293,11 +406,11 @@ define i64* @trivial_bitcast(i8* %a) {
; CHECK: [[A:%[0-9]+]](p0) = COPY %x0
; CHECK: G_BR %[[CAST:bb\.[0-9]+.cast]]
+; CHECK: [[END:bb\.[0-9]+.end]]:
+
; CHECK: [[CAST]]:
; CHECK: {{%[0-9]+}}(p0) = COPY [[A]]
-; CHECK: G_BR %[[END:bb\.[0-9]+.end]]
-
-; CHECK: [[END]]:
+; CHECK: G_BR %[[END]]
define i64* @trivial_bitcast_with_copy(i8* %a) {
br label %cast
@@ -375,7 +488,8 @@ define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2)
; CHECK-LABEL: name: intrinsics
; CHECK: [[CUR:%[0-9]+]](s32) = COPY %w0
; CHECK: [[BITS:%[0-9]+]](s32) = COPY %w1
-; CHECK: [[PTR:%[0-9]+]](p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), 0
+; CHECK: [[CREG:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[PTR:%[0-9]+]](p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), [[CREG]]
; CHECK: [[PTR_VEC:%[0-9]+]](p0) = G_FRAME_INDEX %stack.0.ptr.vec
; CHECK: [[VEC:%[0-9]+]](<8 x s8>) = G_LOAD [[PTR_VEC]]
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.st2), [[VEC]](<8 x s8>), [[VEC]](<8 x s8>), [[PTR]](p0)
@@ -433,8 +547,8 @@ define void @unreachable(i32 %a) {
; CHECK-LABEL: name: constant_int
; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
; CHECK: [[ONE:%[0-9]+]](s32) = G_CONSTANT i32 1
-; CHECK: G_BR
+; CHECK: {{bb.[0-9]+}}.next:
; CHECK: [[SUM1:%[0-9]+]](s32) = G_ADD [[IN]], [[ONE]]
; CHECK: [[SUM2:%[0-9]+]](s32) = G_ADD [[IN]], [[ONE]]
; CHECK: [[RES:%[0-9]+]](s32) = G_ADD [[SUM1]], [[SUM2]]
@@ -796,7 +910,7 @@ define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
; CHECK-LABEL: name: test_insertvalue
; CHECK: [[VAL:%[0-9]+]](s32) = COPY %w1
; CHECK: [[STRUCT:%[0-9]+]](s128) = G_LOAD
-; CHECK: [[NEWSTRUCT:%[0-9]+]](s128) = G_INSERT [[STRUCT]](s128), [[VAL]](s32), 64
+; CHECK: [[NEWSTRUCT:%[0-9]+]](s128) = G_INSERT [[STRUCT]], [[VAL]](s32), 64
; CHECK: G_STORE [[NEWSTRUCT]](s128),
define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
%struct = load %struct.nested, %struct.nested* %addr
@@ -805,10 +919,30 @@ define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
ret void
}
+define [1 x i64] @test_trivial_insert([1 x i64] %s, i64 %val) {
+; CHECK-LABEL: name: test_trivial_insert
+; CHECK: [[STRUCT:%[0-9]+]](s64) = COPY %x0
+; CHECK: [[VAL:%[0-9]+]](s64) = COPY %x1
+; CHECK: [[RES:%[0-9]+]](s64) = COPY [[VAL]](s64)
+; CHECK: %x0 = COPY [[RES]]
+ %res = insertvalue [1 x i64] %s, i64 %val, 0
+ ret [1 x i64] %res
+}
+
+define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
+; CHECK-LABEL: name: test_trivial_insert_ptr
+; CHECK: [[STRUCT:%[0-9]+]](s64) = COPY %x0
+; CHECK: [[VAL:%[0-9]+]](p0) = COPY %x1
+; CHECK: [[RES:%[0-9]+]](s64) = G_PTRTOINT [[VAL]](p0)
+; CHECK: %x0 = COPY [[RES]]
+ %res = insertvalue [1 x i8*] %s, i8* %val, 0
+ ret [1 x i8*] %res
+}
+
; CHECK-LABEL: name: test_insertvalue_agg
; CHECK: [[SMALLSTRUCT:%[0-9]+]](s64) = G_LOAD
; CHECK: [[STRUCT:%[0-9]+]](s128) = G_LOAD
-; CHECK: [[RES:%[0-9]+]](s128) = G_INSERT [[STRUCT]](s128), [[SMALLSTRUCT]](s64), 32
+; CHECK: [[RES:%[0-9]+]](s128) = G_INSERT [[STRUCT]], [[SMALLSTRUCT]](s64), 32
; CHECK: G_STORE [[RES]](s128)
define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%smallstruct = load {i8, i32}, {i8, i32}* %addr2
@@ -840,6 +974,30 @@ define i8* @test_select_ptr(i1 %tst, i8* %lhs, i8* %rhs) {
ret i8* %res
}
+; CHECK-LABEL: name: test_select_vec
+; CHECK: [[TST:%[0-9]+]](s1) = COPY %w0
+; CHECK: [[LHS:%[0-9]+]](<4 x s32>) = COPY %q0
+; CHECK: [[RHS:%[0-9]+]](<4 x s32>) = COPY %q1
+; CHECK: [[RES:%[0-9]+]](<4 x s32>) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
+; CHECK: %q0 = COPY [[RES]]
+define <4 x i32> @test_select_vec(i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs) {
+ %res = select i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs
+ ret <4 x i32> %res
+}
+
+; CHECK-LABEL: name: test_vselect_vec
+; CHECK: [[TST32:%[0-9]+]](<4 x s32>) = COPY %q0
+; CHECK: [[LHS:%[0-9]+]](<4 x s32>) = COPY %q1
+; CHECK: [[RHS:%[0-9]+]](<4 x s32>) = COPY %q2
+; CHECK: [[TST:%[0-9]+]](<4 x s1>) = G_TRUNC [[TST32]](<4 x s32>)
+; CHECK: [[RES:%[0-9]+]](<4 x s32>) = G_SELECT [[TST]](<4 x s1>), [[LHS]], [[RHS]]
+; CHECK: %q0 = COPY [[RES]]
+define <4 x i32> @test_vselect_vec(<4 x i32> %tst32, <4 x i32> %lhs, <4 x i32> %rhs) {
+ %tst = trunc <4 x i32> %tst32 to <4 x i1>
+ %res = select <4 x i1> %tst, <4 x i32> %lhs, <4 x i32> %rhs
+ ret <4 x i32> %res
+}
+
; CHECK-LABEL: name: test_fptosi
; CHECK: [[FPADDR:%[0-9]+]](p0) = COPY %x0
; CHECK: [[FP:%[0-9]+]](s32) = G_LOAD [[FPADDR]](p0)
@@ -927,6 +1085,19 @@ define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
ret void
}
+; CHECK-LABEL: name: trivial_float_comparison
+; CHECK: [[ENTRY_R1:%[0-9]+]](s1) = G_CONSTANT i1 false
+; CHECK: [[ENTRY_R2:%[0-9]+]](s1) = G_CONSTANT i1 true
+; CHECK: [[R1:%[0-9]+]](s1) = COPY [[ENTRY_R1]](s1)
+; CHECK: [[R2:%[0-9]+]](s1) = COPY [[ENTRY_R2]](s1)
+; CHECK: G_ADD [[R1]], [[R2]]
+define i1 @trivial_float_comparison(double %a, double %b) {
+ %r1 = fcmp false double %a, %b
+ %r2 = fcmp true double %a, %b
+ %sum = add i1 %r1, %r2
+ ret i1 %sum
+}
+
@var = global i32 0
define i32* @test_global() {
@@ -969,6 +1140,34 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
ret void
}
+declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i32 %align, i1 %volatile)
+define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
+; CHECK-LABEL: name: test_memmove
+; CHECK: [[DST:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[SRC:%[0-9]+]](p0) = COPY %x1
+; CHECK: [[SIZE:%[0-9]+]](s64) = COPY %x2
+; CHECK: %x0 = COPY [[DST]]
+; CHECK: %x1 = COPY [[SRC]]
+; CHECK: %x2 = COPY [[SIZE]]
+; CHECK: BL $memmove, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2
+ call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i32 1, i1 0)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i32 %align, i1 %volatile)
+define void @test_memset(i8* %dst, i8 %val, i64 %size) {
+; CHECK-LABEL: name: test_memset
+; CHECK: [[DST:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[SRC:%[0-9]+]](s8) = COPY %w1
+; CHECK: [[SIZE:%[0-9]+]](s64) = COPY %x2
+; CHECK: %x0 = COPY [[DST]]
+; CHECK: %w1 = COPY [[SRC]]
+; CHECK: %x2 = COPY [[SIZE]]
+; CHECK: BL $memset, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %w1, implicit %x2
+ call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i32 1, i1 0)
+ ret void
+}
+
declare i64 @llvm.objectsize.i64(i8*, i1)
declare i32 @llvm.objectsize.i32(i8*, i1)
define void @test_objectsize(i8* %addr0, i8* %addr1) {
@@ -1004,9 +1203,341 @@ define i8* @test_const_placement() {
; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
; CHECK: [[VAL_INT:%[0-9]+]](s32) = G_CONSTANT i32 42
; CHECK: [[VAL:%[0-9]+]](p0) = G_INTTOPTR [[VAL_INT]](s32)
-; CHECK: G_BR
+; CHECK: {{bb.[0-9]+}}.next:
br label %next
next:
ret i8* inttoptr(i32 42 to i8*)
}
+
+declare void @llvm.va_end(i8*)
+define void @test_va_end(i8* %list) {
+; CHECK-LABEL: name: test_va_end
+; CHECK-NOT: va_end
+; CHECK-NOT: INTRINSIC
+; CHECK: RET_ReallyLR
+ call void @llvm.va_end(i8* %list)
+ ret void
+}
+
+define void @test_va_arg(i8* %list) {
+; CHECK-LABEL: test_va_arg
+; CHECK: [[LIST:%[0-9]+]](p0) = COPY %x0
+; CHECK: G_VAARG [[LIST]](p0), 8
+; CHECK: G_VAARG [[LIST]](p0), 1
+; CHECK: G_VAARG [[LIST]](p0), 16
+
+ %v0 = va_arg i8* %list, i64
+ %v1 = va_arg i8* %list, i8
+ %v2 = va_arg i8* %list, i128
+ ret void
+}
+
+declare float @llvm.pow.f32(float, float)
+define float @test_pow_intrin(float %l, float %r) {
+; CHECK-LABEL: name: test_pow_intrin
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %s1
+; CHECK: [[RES:%[0-9]+]](s32) = G_FPOW [[LHS]], [[RHS]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.pow.f32(float %l, float %r)
+ ret float %res
+}
+
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
+define void @test_lifetime_intrin() {
+; CHECK-LABEL: name: test_lifetime_intrin
+; CHECK: RET_ReallyLR
+ %slot = alloca i8, i32 4
+ call void @llvm.lifetime.start.p0i8(i64 0, i8* %slot)
+ call void @llvm.lifetime.end.p0i8(i64 0, i8* %slot)
+ ret void
+}
+
+define void @test_load_store_atomics(i8* %addr) {
+; CHECK-LABEL: name: test_load_store_atomics
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[V0:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
+; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
+; CHECK: [[V1:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
+; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
+; CHECK: [[V2:%[0-9]+]](s8) = G_LOAD [[ADDR]](p0) :: (load singlethread seq_cst 1 from %ir.addr)
+; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store singlethread monotonic 1 into %ir.addr)
+ %v0 = load atomic i8, i8* %addr unordered, align 1
+ store atomic i8 %v0, i8* %addr monotonic, align 1
+
+ %v1 = load atomic i8, i8* %addr acquire, align 1
+ store atomic i8 %v1, i8* %addr release, align 1
+
+ %v2 = load atomic i8, i8* %addr singlethread seq_cst, align 1
+ store atomic i8 %v2, i8* %addr singlethread monotonic, align 1
+
+ ret void
+}
+
+define float @test_fneg_f32(float %x) {
+; CHECK-LABEL: name: test_fneg_f32
+; CHECK: [[ARG:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FNEG [[ARG]]
+; CHECK: %s0 = COPY [[RES]](s32)
+ %neg = fsub float -0.000000e+00, %x
+ ret float %neg
+}
+
+define double @test_fneg_f64(double %x) {
+; CHECK-LABEL: name: test_fneg_f64
+; CHECK: [[ARG:%[0-9]+]](s64) = COPY %d0
+; CHECK: [[RES:%[0-9]+]](s64) = G_FNEG [[ARG]]
+; CHECK: %d0 = COPY [[RES]](s64)
+ %neg = fsub double -0.000000e+00, %x
+ ret double %neg
+}
+
+define void @test_trivial_inlineasm() {
+; CHECK-LABEL: name: test_trivial_inlineasm
+; CHECK: INLINEASM $wibble, 1
+; CHECK: INLINEASM $wibble, 0
+ call void asm sideeffect "wibble", ""()
+ call void asm "wibble", ""()
+ ret void
+}
+
+define <2 x i32> @test_insertelement(<2 x i32> %vec, i32 %elt, i32 %idx){
+; CHECK-LABEL: name: test_insertelement
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = COPY %d0
+; CHECK: [[ELT:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[IDX:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[RES:%[0-9]+]](<2 x s32>) = G_INSERT_VECTOR_ELT [[VEC]], [[ELT]](s32), [[IDX]](s32)
+; CHECK: %d0 = COPY [[RES]](<2 x s32>)
+ %res = insertelement <2 x i32> %vec, i32 %elt, i32 %idx
+ ret <2 x i32> %res
+}
+
+define i32 @test_extractelement(<2 x i32> %vec, i32 %idx) {
+; CHECK-LABEL: name: test_extractelement
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = COPY %d0
+; CHECK: [[IDX:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RES:%[0-9]+]](s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDX]](s32)
+; CHECK: %w0 = COPY [[RES]](s32)
+ %res = extractelement <2 x i32> %vec, i32 %idx
+ ret i32 %res
+}
+
+define i32 @test_singleelementvector(i32 %elt){
+; CHECK-LABEL: name: test_singleelementvector
+; CHECK: [[ELT:%[0-9]+]](s32) = COPY %w0
+; CHECK-NOT: G_INSERT_VECTOR_ELT
+; CHECK-NOT: G_EXTRACT_VECTOR_ELT
+; CHECK: %w0 = COPY [[ELT]](s32)
+ %vec = insertelement <1 x i32> undef, i32 %elt, i32 0
+ %res = extractelement <1 x i32> %vec, i32 0
+ ret i32 %res
+}
+
+define <2 x i32> @test_constantaggzerovector_v2i32() {
+; CHECK-LABEL: name: test_constantaggzerovector_v2i32
+; CHECK: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32)
+; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+ ret <2 x i32> zeroinitializer
+}
+
+define <2 x float> @test_constantaggzerovector_v2f32() {
+; CHECK-LABEL: name: test_constantaggzerovector_v2f32
+; CHECK: [[ZERO:%[0-9]+]](s32) = G_FCONSTANT float 0.000000e+00
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32)
+; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+ ret <2 x float> zeroinitializer
+}
+
+define i32 @test_constantaggzerovector_v3i32() {
+; CHECK-LABEL: name: test_constantaggzerovector_v3i32
+; CHECK: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[VEC:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32), [[ZERO]](s32)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
+ %elt = extractelement <3 x i32> zeroinitializer, i32 1
+ ret i32 %elt
+}
+
+define <2 x i32> @test_constantdatavector_v2i32() {
+; CHECK-LABEL: name: test_constantdatavector_v2i32
+; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32)
+; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+ ret <2 x i32> <i32 1, i32 2>
+}
+
+define i32 @test_constantdatavector_v3i32() {
+; CHECK-LABEL: name: test_constantdatavector_v3i32
+; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
+; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
+; CHECK: [[VEC:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32), [[C3]](s32)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
+ %elt = extractelement <3 x i32> <i32 1, i32 2, i32 3>, i32 1
+ ret i32 %elt
+}
+
+define <4 x i32> @test_constantdatavector_v4i32() {
+; CHECK-LABEL: name: test_constantdatavector_v4i32
+; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
+; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
+; CHECK: [[C4:%[0-9]+]](s32) = G_CONSTANT i32 4
+; CHECK: [[VEC:%[0-9]+]](<4 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32)
+; CHECK: %q0 = COPY [[VEC]](<4 x s32>)
+ ret <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+}
+
+define <2 x double> @test_constantdatavector_v2f64() {
+; CHECK-LABEL: name: test_constantdatavector_v2f64
+; CHECK: [[FC1:%[0-9]+]](s64) = G_FCONSTANT double 1.000000e+00
+; CHECK: [[FC2:%[0-9]+]](s64) = G_FCONSTANT double 2.000000e+00
+; CHECK: [[VEC:%[0-9]+]](<2 x s64>) = G_MERGE_VALUES [[FC1]](s64), [[FC2]](s64)
+; CHECK: %q0 = COPY [[VEC]](<2 x s64>)
+ ret <2 x double> <double 1.0, double 2.0>
+}
+
+define i32 @test_constantaggzerovector_v1s32(i32 %arg){
+; CHECK-LABEL: name: test_constantaggzerovector_v1s32
+; CHECK: [[ARG:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-NOT: G_MERGE_VALUES
+; CHECK: G_ADD [[ARG]], [[C0]]
+ %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
+ %add = add <1 x i32> %vec, zeroinitializer
+ %res = extractelement <1 x i32> %add, i32 0
+ ret i32 %res
+}
+
+define i32 @test_constantdatavector_v1s32(i32 %arg){
+; CHECK-LABEL: name: test_constantdatavector_v1s32
+; CHECK: [[ARG:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-NOT: G_MERGE_VALUES
+; CHECK: G_ADD [[ARG]], [[C1]]
+ %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
+ %add = add <1 x i32> %vec, <i32 1>
+ %res = extractelement <1 x i32> %add, i32 0
+ ret i32 %res
+}
+
+declare ghccc float @different_call_conv_target(float %x)
+define float @test_different_call_conv_target(float %x) {
+; CHECK-LABEL: name: test_different_call_conv
+; CHECK: [[X:%[0-9]+]](s32) = COPY %s0
+; CHECK: %s8 = COPY [[X]]
+; CHECK: BL @different_call_conv_target, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s8, implicit-def %s0
+ %res = call ghccc float @different_call_conv_target(float %x)
+ ret float %res
+}
+
+define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
+; CHECK-LABEL: name: test_shufflevector_s32_v2s32
+; CHECK: [[ARG:%[0-9]+]](s32) = COPY %w0
+; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
+; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+ %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
+ %res = shufflevector <1 x i32> %vec, <1 x i32> undef, <2 x i32> zeroinitializer
+ ret <2 x i32> %res
+}
+
+define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
+; CHECK-LABEL: name: test_shufflevector_v2s32_s32
+; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK: [[RES:%[0-9]+]](s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[C1]](s32)
+; CHECK: %w0 = COPY [[RES]](s32)
+ %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <1 x i32> <i32 1>
+ %res = extractelement <1 x i32> %vec, i32 0
+ ret i32 %res
+}
+
+define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) {
+; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32
+; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32)
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[MASK]](<2 x s32>)
+; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+ %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x i32> %res
+}
+
+define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) {
+; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
+; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[MASK:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
+; CHECK: [[VEC:%[0-9]+]](<3 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[MASK]](<3 x s32>)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
+ %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
+ %res = extractelement <3 x i32> %vec, i32 0
+ ret i32 %res
+}
+
+define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg2) {
+; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
+; CHECK: [[ARG1:%[0-9]+]](<2 x s32>) = COPY %d0
+; CHECK: [[ARG2:%[0-9]+]](<2 x s32>) = COPY %d1
+; CHECK: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
+; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
+; CHECK: [[MASK:%[0-9]+]](<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32)
+; CHECK: [[VEC:%[0-9]+]](<4 x s32>) = G_SHUFFLE_VECTOR [[ARG1]](<2 x s32>), [[ARG2]], [[MASK]](<4 x s32>)
+; CHECK: %q0 = COPY [[VEC]](<4 x s32>)
+ %res = shufflevector <2 x i32> %arg1, <2 x i32> %arg2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) {
+; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
+; CHECK: [[ARG:%[0-9]+]](<4 x s32>) = COPY %q0
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-DAG: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
+; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32)
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<4 x s32>), [[UNDEF]], [[MASK]](<2 x s32>)
+; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+ %res = shufflevector <4 x i32> %arg, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %res
+}
+
+
+define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) {
+; CHECK-LABEL: name: test_shufflevector_v8s8_v16s8
+; CHECK: [[ARG1:%[0-9]+]](<8 x s8>) = COPY %d0
+; CHECK: [[ARG2:%[0-9]+]](<8 x s8>) = COPY %d1
+; CHECK: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[C8:%[0-9]+]](s32) = G_CONSTANT i32 8
+; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK: [[C9:%[0-9]+]](s32) = G_CONSTANT i32 9
+; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
+; CHECK: [[C10:%[0-9]+]](s32) = G_CONSTANT i32 10
+; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
+; CHECK: [[C11:%[0-9]+]](s32) = G_CONSTANT i32 11
+; CHECK: [[C4:%[0-9]+]](s32) = G_CONSTANT i32 4
+; CHECK: [[C12:%[0-9]+]](s32) = G_CONSTANT i32 12
+; CHECK: [[C5:%[0-9]+]](s32) = G_CONSTANT i32 5
+; CHECK: [[C13:%[0-9]+]](s32) = G_CONSTANT i32 13
+; CHECK: [[C6:%[0-9]+]](s32) = G_CONSTANT i32 6
+; CHECK: [[C14:%[0-9]+]](s32) = G_CONSTANT i32 14
+; CHECK: [[C7:%[0-9]+]](s32) = G_CONSTANT i32 7
+; CHECK: [[C15:%[0-9]+]](s32) = G_CONSTANT i32 15
+; CHECK: [[MASK:%[0-9]+]](<16 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C8]](s32), [[C1]](s32), [[C9]](s32), [[C2]](s32), [[C10]](s32), [[C3]](s32), [[C11]](s32), [[C4]](s32), [[C12]](s32), [[C5]](s32), [[C13]](s32), [[C6]](s32), [[C14]](s32), [[C7]](s32), [[C15]](s32)
+; CHECK: [[VEC:%[0-9]+]](<16 x s8>) = G_SHUFFLE_VECTOR [[ARG1]](<8 x s8>), [[ARG2]], [[MASK]](<16 x s32>)
+; CHECK: %q0 = COPY [[VEC]](<16 x s8>)
+ %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <16 x i8> %res
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir b/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
index 4c67c0daaf74..739fdd5cb4c5 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
@@ -1,5 +1,5 @@
-# RUN: llc -O0 -run-pass=regbankselect -global-isel %s -o - 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
-# RUN: llc -O0 -run-pass=regbankselect -global-isel %s -regbankselect-greedy -o - 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+# RUN: llc -O0 -run-pass=regbankselect -global-isel %s -o - -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
+# RUN: llc -O0 -run-pass=regbankselect -global-isel %s -regbankselect-greedy -o - -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
--- |
; ModuleID = 'generic-virtual-registers-type-error.mir'
@@ -315,8 +315,8 @@ body: |
; Fast mode tries to reuse the source of the copy for the destination.
; Now, the default mapping says that %0 and %1 need to be in FPR.
; The repairing code insert two copies to materialize that.
- ; FAST-NEXT: %3(s64) = COPY %0
- ; FAST-NEXT: %4(s64) = COPY %1
+ ; FAST-NEXT: %3(<2 x s32>) = COPY %0
+ ; FAST-NEXT: %4(<2 x s32>) = COPY %1
; The mapping of G_OR is on FPR.
; FAST-NEXT: %2(<2 x s32>) = G_OR %3, %4
@@ -362,13 +362,13 @@ body: |
; Fast mode tries to reuse the source of the copy for the destination.
; Now, the default mapping says that %0 and %1 need to be in FPR.
; The repairing code insert two copies to materialize that.
- ; FAST-NEXT: %3(s64) = COPY %0
- ; FAST-NEXT: %4(s64) = COPY %1
+ ; FAST-NEXT: %3(<2 x s32>) = COPY %0
+ ; FAST-NEXT: %4(<2 x s32>) = COPY %1
; The mapping of G_OR is on FPR.
; FAST-NEXT: %2(<2 x s32>) = G_OR %3, %4
; Greedy mode remapped the instruction on the GPR bank.
- ; GREEDY-NEXT: %3(s64) = G_OR %0, %1
+ ; GREEDY-NEXT: %3(<2 x s32>) = G_OR %0, %1
; We need to keep %2 into FPR because we do not know anything about it.
; GREEDY-NEXT: %2(<2 x s32>) = COPY %3
%0(<2 x s32>) = COPY %x0
diff --git a/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index 7bedad38de1a..f8d95c88cc8f 100644
--- a/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -1,7 +1,9 @@
; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
; CHECK-LABEL: name: test_trivial_call
+; CHECK: ADJCALLSTACKDOWN 0, implicit-def %sp, implicit %sp
; CHECK: BL @trivial_callee, csr_aarch64_aapcs, implicit-def %lr
+; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp
declare void @trivial_callee()
define void @test_trivial_call() {
call void @trivial_callee()
@@ -61,7 +63,13 @@ define void @test_multiple_args(i64 %in) {
; CHECK: [[I64:%[0-9]+]](s64) = COPY %x0
; CHECK: [[I8:%[0-9]+]](s8) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
-; CHECK: [[ARG:%[0-9]+]](s192) = G_SEQUENCE [[DBL]](s64), 0, [[I64]](s64), 64, [[I8]](s8), 128
+
+; CHECK: [[UNDEF:%[0-9]+]](s192) = IMPLICIT_DEF
+; CHECK: [[ARG0:%[0-9]+]](s192) = G_INSERT [[UNDEF]], [[DBL]](s64), 0
+; CHECK: [[ARG1:%[0-9]+]](s192) = G_INSERT [[ARG0]], [[I64]](s64), 64
+; CHECK: [[ARG2:%[0-9]+]](s192) = G_INSERT [[ARG1]], [[I8]](s8), 128
+; CHECK: [[ARG:%[0-9]+]](s192) = COPY [[ARG2]]
+
; CHECK: G_STORE [[ARG]](s192), [[ADDR]](p0)
; CHECK: RET_ReallyLR
define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr) {
@@ -73,7 +81,11 @@ define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr)
; CHECK-LABEL: name: test_struct_return
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
; CHECK: [[VAL:%[0-9]+]](s192) = G_LOAD [[ADDR]](p0)
-; CHECK: [[DBL:%[0-9]+]](s64), [[I64:%[0-9]+]](s64), [[I32:%[0-9]+]](s32) = G_EXTRACT [[VAL]](s192), 0, 64, 128
+
+; CHECK: [[DBL:%[0-9]+]](s64) = G_EXTRACT [[VAL]](s192), 0
+; CHECK: [[I64:%[0-9]+]](s64) = G_EXTRACT [[VAL]](s192), 64
+; CHECK: [[I32:%[0-9]+]](s32) = G_EXTRACT [[VAL]](s192), 128
+
; CHECK: %d0 = COPY [[DBL]](s64)
; CHECK: %x0 = COPY [[I64]](s64)
; CHECK: %w1 = COPY [[I32]](s32)
@@ -84,8 +96,14 @@ define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
}
; CHECK-LABEL: name: test_arr_call
+; CHECK: hasCalls: true
; CHECK: [[ARG:%[0-9]+]](s256) = G_LOAD
-; CHECK: [[E0:%[0-9]+]](s64), [[E1:%[0-9]+]](s64), [[E2:%[0-9]+]](s64), [[E3:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 0, 64, 128, 192
+
+; CHECK: [[E0:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 0
+; CHECK: [[E1:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 64
+; CHECK: [[E2:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 128
+; CHECK: [[E3:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 192
+
; CHECK: %x0 = COPY [[E0]](s64)
; CHECK: %x1 = COPY [[E1]](s64)
; CHECK: %x2 = COPY [[E2]](s64)
@@ -168,6 +186,7 @@ define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
; CHECK: [[C42:%[0-9]+]](s64) = G_CONSTANT i64 42
; CHECK: [[C12:%[0-9]+]](s64) = G_CONSTANT i64 12
; CHECK: [[PTR:%[0-9]+]](p0) = G_CONSTANT i64 0
+; CHECK: ADJCALLSTACKDOWN 24, implicit-def %sp, implicit %sp
; CHECK: [[SP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[C42_OFFS:%[0-9]+]](s64) = G_CONSTANT i64 0
; CHECK: [[C42_LOC:%[0-9]+]](p0) = G_GEP [[SP]], [[C42_OFFS]](s64)
@@ -181,6 +200,7 @@ define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
; CHECK: [[PTR_LOC:%[0-9]+]](p0) = G_GEP [[SP]], [[PTR_OFFS]](s64)
; CHECK: G_STORE [[PTR]](p0), [[PTR_LOC]](p0) :: (store 8 into stack + 16, align 0)
; CHECK: BL @test_stack_slots
+; CHECK: ADJCALLSTACKUP 24, 0, implicit-def %sp, implicit %sp
define void @test_call_stack() {
call void @test_stack_slots([8 x i64] undef, i64 42, i64 12, i64* null)
ret void
diff --git a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
new file mode 100644
index 000000000000..5a76661180f2
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
@@ -0,0 +1,68 @@
+; RUN: llc -global-isel -mtriple=aarch64 %s -stop-after=irtranslator -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -global-isel --global-isel-abort=0 -o /dev/null
+
+; CHECK-LABEL: name: debug_declare
+; CHECK: stack:
+; CHECK: - { id: {{.*}}, name: in.addr, offset: {{.*}}, size: {{.*}}, alignment: {{.*}}, di-variable: '!11',
+; CHECK-NEXT: di-expression: '!12', di-location: '!13' }
+; CHECK: DBG_VALUE debug-use %0(s32), debug-use _, !11, !12, debug-location !13
+define void @debug_declare(i32 %in) #0 !dbg !7 {
+entry:
+ %in.addr = alloca i32, align 4
+ store i32 %in, i32* %in.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %in.addr, metadata !11, metadata !12), !dbg !13
+ call void @llvm.dbg.declare(metadata i32 %in, metadata !11, metadata !12), !dbg !13
+ ret void, !dbg !14
+}
+
+; CHECK-LABEL: name: debug_declare_vla
+; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !11, !12, debug-location !13
+define void @debug_declare_vla(i32 %in) #0 !dbg !7 {
+entry:
+ %vla.addr = alloca i32, i32 %in
+ call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !11, metadata !12), !dbg !13
+ ret void, !dbg !14
+}
+
+; CHECK-LABEL: name: debug_value
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
+define void @debug_value(i32 %in) #0 !dbg !7 {
+ %addr = alloca i32
+; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !11, !12, debug-location !13
+ call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !11, metadata !12), !dbg !13
+ store i32 %in, i32* %addr
+; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !11, !15, debug-location !13
+ call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !11, metadata !15), !dbg !13
+; CHECK: DBG_VALUE 123, 0, !11, !12, debug-location !13
+ call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !11, metadata !12), !dbg !13
+; CHECK: DBG_VALUE float 1.000000e+00, 0, !11, !12, debug-location !13
+ call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !11, metadata !12), !dbg !13
+; CHECK: DBG_VALUE _, 0, !11, !12, debug-location !13
+ call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !11, metadata !12), !dbg !13
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (trunk 289075) (llvm/trunk 289080)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "tmp.c", directory: "/Users/tim/llvm/build")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 4.0.0 (trunk 289075) (llvm/trunk 289080)"}
+!7 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !DILocalVariable(name: "in", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!12 = !DIExpression()
+!13 = !DILocation(line: 1, column: 14, scope: !7)
+!14 = !DILocation(line: 2, column: 1, scope: !7)
+!15 = !DIExpression(DW_OP_deref)
diff --git a/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll b/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
new file mode 100644
index 000000000000..196910e96ce3
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - -stop-after=irtranslator | FileCheck %s
+
+; CHECK-LABEL: name: test_simple_alloca
+; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -1
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
+; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
+; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
+; CHECK: [[ALIGNED_ALLOC:%[0-9]+]](p0) = G_PTR_MASK [[ALLOC]], 4
+; CHECK: %sp = COPY [[ALIGNED_ALLOC]]
+; CHECK: [[ALLOC:%[0-9]+]](p0) = COPY [[ALIGNED_ALLOC]]
+; CHECK: %x0 = COPY [[ALLOC]]
+define i8* @test_simple_alloca(i32 %numelts) {
+ %addr = alloca i8, i32 %numelts
+ ret i8* %addr
+}
+
+; CHECK-LABEL: name: test_aligned_alloca
+; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -1
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
+; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
+; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
+; CHECK: [[ALIGNED_ALLOC:%[0-9]+]](p0) = G_PTR_MASK [[ALLOC]], 5
+; CHECK: %sp = COPY [[ALIGNED_ALLOC]]
+; CHECK: [[ALLOC:%[0-9]+]](p0) = COPY [[ALIGNED_ALLOC]]
+; CHECK: %x0 = COPY [[ALLOC]]
+define i8* @test_aligned_alloca(i32 %numelts) {
+ %addr = alloca i8, i32 %numelts, align 32
+ ret i8* %addr
+}
+
+; CHECK-LABEL: name: test_natural_alloca
+; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -16
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
+; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
+; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
+; CHECK: %sp = COPY [[ALLOC]]
+; CHECK: [[ALLOC_TMP:%[0-9]+]](p0) = COPY [[ALLOC]]
+; CHECK: %x0 = COPY [[ALLOC_TMP]]
+define i128* @test_natural_alloca(i32 %numelts) {
+ %addr = alloca i128, i32 %numelts
+ ret i128* %addr
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/gisel-abort.ll b/test/CodeGen/AArch64/GlobalISel/gisel-abort.ll
index 76eafdd5af5e..a1480c46fe40 100644
--- a/test/CodeGen/AArch64/GlobalISel/gisel-abort.ll
+++ b/test/CodeGen/AArch64/GlobalISel/gisel-abort.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
; CHECK-NOT: fallback
; CHECK: empty
diff --git a/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll b/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll
new file mode 100644
index 000000000000..3ecdb7bbedfb
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/gisel-commandline-option.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -O0 -aarch64-enable-global-isel-at-O=0 \
+; RUN: | FileCheck %s --check-prefix ENABLED --check-prefix NOFALLBACK
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -O0 -aarch64-enable-global-isel-at-O=0 -global-isel-abort=2 \
+; RUN: | FileCheck %s --check-prefix ENABLED --check-prefix FALLBACK
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -global-isel \
+; RUN: | FileCheck %s --check-prefix ENABLED --check-prefix NOFALLBACK
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -global-isel -global-isel-abort=2 \
+; RUN: | FileCheck %s --check-prefix ENABLED --check-prefix FALLBACK
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -O1 -aarch64-enable-global-isel-at-O=3 \
+; RUN: | FileCheck %s --check-prefix ENABLED
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -O1 -aarch64-enable-global-isel-at-O=0 \
+; RUN: | FileCheck %s --check-prefix DISABLED
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: -aarch64-enable-global-isel-at-O=-1 \
+; RUN: | FileCheck %s --check-prefix DISABLED
+
+; RUN: llc -mtriple=aarch64-- -debug-pass=Structure %s -o /dev/null 2>&1 \
+; RUN: | FileCheck %s --check-prefix DISABLED
+
+; ENABLED: IRTranslator
+; ENABLED-NEXT: Legalizer
+; ENABLED-NEXT: RegBankSelect
+; ENABLED-NEXT: InstructionSelect
+; ENABLED-NEXT: ResetMachineFunction
+
+; FALLBACK: AArch64 Instruction Selection
+; NOFALLBACK-NOT: AArch64 Instruction Selection
+
+; DISABLED-NOT: IRTranslator
+
+; DISABLED: AArch64 Instruction Selection
+; DISABLED: Expand ISel Pseudo-instructions
+
+define void @empty() {
+ ret void
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll b/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll
new file mode 100644
index 000000000000..e333f742e04d
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll
@@ -0,0 +1,8 @@
+;RUN: llc -mtriple=aarch64-unknown-unknown -o - -global-isel -global-isel-abort=2 %s 2>&1 | FileCheck %s
+; CHECK: fallback
+; CHECK-LABEL: foo
+define i16 @foo(half* %p) {
+ %tmp0 = load half, half* %p
+ %tmp1 = fptoui half %tmp0 to i16
+ ret i16 %tmp1
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/inline-asm.ll b/test/CodeGen/AArch64/GlobalISel/inline-asm.ll
new file mode 100644
index 000000000000..8ff7c4495dcc
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/inline-asm.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 %s -o - | FileCheck %s
+
+; CHECK-LABEL: test_asm:
+; CHECK: {{APP|InlineAsm Start}}
+; CHECK: mov x0, {{x[0-9]+}}
+; CHECK: {{NO_APP|InlineAsm End}}
+define void @test_asm() {
+ call void asm sideeffect "mov x0, $0", "r"(i64 42)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll b/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll
new file mode 100644
index 000000000000..8d1b02216ea7
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll
@@ -0,0 +1,30 @@
+; RUN: llc -O0 -mtriple=aarch64-apple-ios -global-isel -stop-after=irtranslator %s -o - | FileCheck %s
+
+; Check that we don't invalidate the vreg map.
+; This test is brittle: the invalidation only triggers when we grow the map.
+
+; CHECK-LABEL: name: test_bitcast_invalid_vreg
+define i32 @test_bitcast_invalid_vreg() {
+ %tmp0 = add i32 1, 2
+ %tmp1 = add i32 3, 4
+ %tmp2 = add i32 5, 6
+ %tmp3 = add i32 7, 8
+ %tmp4 = add i32 9, 10
+ %tmp5 = add i32 11, 12
+ %tmp6 = add i32 13, 14
+ %tmp7 = add i32 15, 16
+ %tmp8 = add i32 17, 18
+ %tmp9 = add i32 19, 20
+ %tmp10 = add i32 21, 22
+ %tmp11 = add i32 23, 24
+ %tmp12 = add i32 25, 26
+ %tmp13 = add i32 27, 28
+ %tmp14 = add i32 29, 30
+ %tmp15 = add i32 30, 30
+
+; At this point we mapped 46 values. The 'i32 100' constant will grow the map.
+; CHECK: %46(s32) = G_CONSTANT i32 100
+; CHECK: %w0 = COPY %46(s32)
+ %res = bitcast i32 100 to i32
+ ret i32 %res
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
index 718364af2aca..ef4445111d7b 100644
--- a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
@@ -6,7 +6,7 @@ declare i32 @foo(i32)
declare i32 @__gxx_personality_v0(...)
declare i32 @llvm.eh.typeid.for(i8*)
-; CHECK: name: bar
+; CHECK-LABEL: name: bar
; CHECK: body:
; CHECK-NEXT: bb.1 (%ir-block.0):
; CHECK: successors: %[[GOOD:bb.[0-9]+.continue]]{{.*}}%[[BAD:bb.[0-9]+.broken]]
@@ -15,19 +15,24 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit-def %w0
; CHECK: {{%[0-9]+}}(s32) = COPY %w0
; CHECK: EH_LABEL
+; CHECK: G_BR %[[GOOD]]
; CHECK: [[BAD]] (landing-pad):
; CHECK: EH_LABEL
+; CHECK: [[UNDEF:%[0-9]+]](s128) = IMPLICIT_DEF
; CHECK: [[PTR:%[0-9]+]](p0) = COPY %x0
-; CHECK: [[SEL:%[0-9]+]](p0) = COPY %x1
-; CHECK: [[PTR_SEL:%[0-9]+]](s128) = G_SEQUENCE [[PTR]](p0), 0, [[SEL]](p0), 64
-; CHECK: [[PTR_RET:%[0-9]+]](s64), [[SEL_RET:%[0-9]+]](s32) = G_EXTRACT [[PTR_SEL]](s128), 0, 64
+; CHECK: [[VAL_WITH_PTR:%[0-9]+]](s128) = G_INSERT [[UNDEF]], [[PTR]](p0), 0
+; CHECK: [[SEL_PTR:%[0-9]+]](p0) = COPY %x1
+; CHECK: [[SEL:%[0-9]+]](s32) = G_PTRTOINT [[SEL_PTR]]
+; CHECK: [[PTR_SEL:%[0-9]+]](s128) = G_INSERT [[VAL_WITH_PTR]], [[SEL]](s32), 64
+; CHECK: [[PTR_RET:%[0-9]+]](s64) = G_EXTRACT [[PTR_SEL]](s128), 0
+; CHECK: [[SEL_RET:%[0-9]+]](s32) = G_EXTRACT [[PTR_SEL]](s128), 64
; CHECK: %x0 = COPY [[PTR_RET]]
; CHECK: %w1 = COPY [[SEL_RET]]
; CHECK: [[GOOD]]:
; CHECK: [[SEL:%[0-9]+]](s32) = G_CONSTANT i32 1
-; CHECK: {{%[0-9]+}}(s128) = G_INSERT {{%[0-9]+}}(s128), [[SEL]](s32), 64
+; CHECK: {{%[0-9]+}}(s128) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 64
define { i8*, i32 } @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
%res32 = invoke i32 @foo(i32 42) to label %continue unwind label %broken
@@ -42,3 +47,48 @@ continue:
%res.good = insertvalue { i8*, i32 } undef, i32 %sel.int, 1
ret { i8*, i32 } %res.good
}
+
+; CHECK-LABEL: name: test_invoke_indirect
+; CHECK: [[CALLEE:%[0-9]+]](p0) = COPY %x0
+; CHECK: BLR [[CALLEE]]
+define void @test_invoke_indirect(void()* %callee) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+ invoke void %callee() to label %continue unwind label %broken
+
+broken:
+ landingpad { i8*, i32 } catch i8* bitcast(i8** @_ZTIi to i8*)
+ ret void
+
+continue:
+ ret void
+}
+
+; CHECK-LABEL: name: test_invoke_varargs
+
+; CHECK: [[NULL:%[0-9]+]](p0) = G_CONSTANT i64 0
+; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT i32 42
+; CHECK: [[ONE:%[0-9]+]](s32) = G_FCONSTANT float 1.0
+
+; CHECK: %x0 = COPY [[NULL]]
+
+; CHECK: [[SP:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFFSET:%[0-9]+]](s64) = G_CONSTANT i64 0
+; CHECK: [[SLOT:%[0-9]+]](p0) = G_GEP [[SP]], [[OFFSET]](s64)
+; CHECK: G_STORE [[ANSWER]](s32), [[SLOT]]
+
+; CHECK: [[SP:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFFSET:%[0-9]+]](s64) = G_CONSTANT i64 8
+; CHECK: [[SLOT:%[0-9]+]](p0) = G_GEP [[SP]], [[OFFSET]](s64)
+; CHECK: G_STORE [[ONE]](s32), [[SLOT]]
+
+; CHECK: BL @printf
+declare void @printf(i8*, ...)
+define void @test_invoke_varargs() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+ invoke void(i8*, ...) @printf(i8* null, i32 42, float 1.0) to label %continue unwind label %broken
+
+broken:
+ landingpad { i8*, i32 } catch i8* bitcast(i8** @_ZTIi to i8*)
+ ret void
+
+continue:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-add.mir b/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
index 252e60c6b2ec..9b27198b961a 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -33,14 +33,14 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_add_big
- ; CHECK-NOT: G_EXTRACT
- ; CHECK-NOT: G_SEQUENCE
+ ; CHECK-NOT: G_MERGE_VALUES
+ ; CHECK-NOT: G_UNMERGE_VALUES
; CHECK-DAG: [[CARRY0_32:%.*]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[CARRY0:%[0-9]+]](s1) = G_TRUNC [[CARRY0_32]]
; CHECK: [[RES_LO:%.*]](s64), [[CARRY:%.*]](s1) = G_UADDE %0, %2, [[CARRY0]]
; CHECK: [[RES_HI:%.*]](s64), {{%.*}}(s1) = G_UADDE %1, %3, [[CARRY]]
- ; CHECK-NOT: G_EXTRACT
- ; CHECK-NOT: G_SEQUENCE
+ ; CHECK-NOT: G_MERGE_VALUES
+ ; CHECK-NOT: G_UNMERGE_VALUES
; CHECK: %x0 = COPY [[RES_LO]]
; CHECK: %x1 = COPY [[RES_HI]]
@@ -48,10 +48,10 @@ body: |
%1(s64) = COPY %x1
%2(s64) = COPY %x2
%3(s64) = COPY %x3
- %4(s128) = G_SEQUENCE %0, 0, %1, 64
- %5(s128) = G_SEQUENCE %2, 0, %3, 64
+ %4(s128) = G_MERGE_VALUES %0, %1
+ %5(s128) = G_MERGE_VALUES %2, %3
%6(s128) = G_ADD %4, %5
- %7(s64), %8(s64) = G_EXTRACT %6, 0, 64
+ %7(s64), %8(s64) = G_UNMERGE_VALUES %6
%x0 = COPY %7
%x1 = COPY %8
...
@@ -69,7 +69,10 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_add_small
- ; CHECK: [[RES:%.*]](s8) = G_ADD %2, %3
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_ADD [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
%0(s64) = COPY %x0
%1(s64) = COPY %x1
@@ -109,10 +112,10 @@ body: |
%1(<2 x s64>) = COPY %q1
%2(<2 x s64>) = COPY %q2
%3(<2 x s64>) = COPY %q3
- %4(<4 x s64>) = G_SEQUENCE %0, 0, %1, 128
- %5(<4 x s64>) = G_SEQUENCE %2, 0, %3, 128
+ %4(<4 x s64>) = G_MERGE_VALUES %0, %1
+ %5(<4 x s64>) = G_MERGE_VALUES %2, %3
%6(<4 x s64>) = G_ADD %4, %5
- %7(<2 x s64>), %8(<2 x s64>) = G_EXTRACT %6, 0, 128
+ %7(<2 x s64>), %8(<2 x s64>) = G_UNMERGE_VALUES %6
%q0 = COPY %7
%q1 = COPY %8
...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-and.mir b/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
index 69459bfacb0a..75e1d5163532 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -22,7 +22,10 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_and_small
- ; CHECK: %4(s8) = G_AND %2, %3
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_AND [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
%0(s64) = COPY %x0
%1(s64) = COPY %x1
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir b/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
index 926a62761ce0..29f83b362895 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir b/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
index cc1dc80488ba..fab6dcf43346 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
@@ -1,92 +1,132 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--"
- define void @test_combines() {
- entry:
- ret void
- }
+ define void @test_combines_1() { ret void }
+ define void @test_combines_2() { ret void }
+ define void @test_combines_3() { ret void }
+ define void @test_combines_4() { ret void }
+ define void @test_combines_5() { ret void }
+ define void @test_combines_6() { ret void }
...
---
-name: test_combines
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
- - { id: 6, class: _ }
- - { id: 7, class: _ }
- - { id: 8, class: _ }
- - { id: 9, class: _ }
- - { id: 10, class: _ }
- - { id: 11, class: _ }
- - { id: 12, class: _ }
- - { id: 13, class: _ }
- - { id: 14, class: _ }
- - { id: 15, class: _ }
- - { id: 16, class: _ }
- - { id: 17, class: _ }
- - { id: 18, class: _ }
- - { id: 19, class: _ }
- - { id: 20, class: _ }
- - { id: 21, class: _ }
- - { id: 22, class: _ }
- - { id: 23, class: _ }
- - { id: 24, class: _ }
+name: test_combines_1
body: |
- bb.0.entry:
- liveins: %w0, %w1, %x2, %x3
+ bb.0:
+ liveins: %w0
- %0(s32) = COPY %w0
- %1(s32) = COPY %w1
- %2(s8) = G_TRUNC %0
+ %0:_(s32) = COPY %w0
+ %1:_(s8) = G_TRUNC %0
; Only one of these extracts can be eliminated, the offsets don't match
; properly in the other cases.
- ; CHECK-LABEL: name: test_combines
- ; CHECK: %3(s32) = G_SEQUENCE %2(s8), 1
- ; CHECK: %4(s8) = G_EXTRACT %3(s32), 0
+ ; CHECK-LABEL: name: test_combines_1
+ ; CHECK: %2(s32) = G_SEQUENCE %1(s8), 1
+ ; CHECK: %3(s8) = G_EXTRACT %2(s32), 0
; CHECK-NOT: G_EXTRACT
- ; CHECK: %6(s8) = G_EXTRACT %3(s32), 2
- ; CHECK: %7(s32) = G_ZEXT %2(s8)
- %3(s32) = G_SEQUENCE %2, 1
- %4(s8) = G_EXTRACT %3, 0
- %5(s8) = G_EXTRACT %3, 1
- %6(s8) = G_EXTRACT %3, 2
- %7(s32) = G_ZEXT %5
+ ; CHECK: %5(s8) = G_EXTRACT %2(s32), 2
+ ; CHECK: %6(s32) = G_ZEXT %1(s8)
+
+ %2:_(s32) = G_SEQUENCE %1, 1
+ %3:_(s8) = G_EXTRACT %2, 0
+ %4:_(s8) = G_EXTRACT %2, 1
+ %5:_(s8) = G_EXTRACT %2, 2
+ %6:_(s32) = G_ZEXT %4
+...
+
+---
+name: test_combines_2
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0:_(s32) = COPY %w0
; Similarly, here the types don't match.
- ; CHECK: %10(s32) = G_SEQUENCE %8(s16), 0, %9(s16), 16
- ; CHECK: %11(s1) = G_EXTRACT %10(s32), 0
- ; CHECK: %12(s32) = G_EXTRACT %10(s32), 0
- %8(s16) = G_TRUNC %0
- %9(s16) = G_ADD %8, %8
- %10(s32) = G_SEQUENCE %8, 0, %9, 16
- %11(s1) = G_EXTRACT %10, 0
- %12(s32) = G_EXTRACT %10, 0
+ ; CHECK-LABEL: name: test_combines_2
+ ; CHECK: %2(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 32
+ ; CHECK: %3(s1) = G_EXTRACT %2(s64), 0
+ ; CHECK: %4(s64) = G_EXTRACT %2(s64), 0
+ %1:_(s32) = G_ADD %0, %0
+ %2:_(s64) = G_SEQUENCE %0, 0, %1, 32
+ %3:_(s1) = G_EXTRACT %2, 0
+ %4:_(s64) = G_EXTRACT %2, 0
+...
+
+---
+name: test_combines_3
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_combines_3
+ ; CHECK: %1(s32) = G_ADD %0, %0
+ ; CHECK-NOT: G_SEQUENCE
+ ; CHECK-NOT: G_EXTRACT
+ ; CHECK: %5(s32) = G_ADD %0, %1
+ %1:_(s32) = G_ADD %0, %0
+ %2:_(s64) = G_SEQUENCE %0, 0, %1, 32
+ %3:_(s32) = G_EXTRACT %2, 0
+ %4:_(s32) = G_EXTRACT %2, 32
+ %5:_(s32) = G_ADD %3, %4
+...
+---
+name: test_combines_4
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_combines_4
+ ; CHECK: %2(<2 x s32>) = G_EXTRACT %1(s128), 0
+ ; CHECK: %3(<2 x s32>) = G_ADD %2, %2
+ %1:_(s128) = G_SEQUENCE %0, 0, %0, 64
+ %2:_(<2 x s32>) = G_EXTRACT %1, 0
+ %3:_(<2 x s32>) = G_ADD %2, %2
+...
+
+---
+name: test_combines_5
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_combines_5
+ ; CHECK-NOT: G_SEQUENCE
; CHECK-NOT: G_EXTRACT
- ; CHECK: %15(s16) = G_ADD %8, %9
- %13(s16), %14(s16) = G_EXTRACT %10, 0, 16
- %15(s16) = G_ADD %13, %14
+ ; CHECK: %5(s32) = G_ADD %0, %1
+ %1:_(s32) = G_ADD %0, %0
+ %2:_(s64) = G_SEQUENCE %0, 0, %1, 32
+ %3:_(s32) = G_EXTRACT %2, 0
+ %4:_(s32) = G_EXTRACT %2, 32
+ %5:_(s32) = G_ADD %3, %4
+...
+
+---
+name: test_combines_6
+body: |
+ bb.0:
+ liveins: %w0
- ; CHECK: %18(<2 x s32>) = G_EXTRACT %17(s128), 0
- ; CHECK: %19(<2 x s32>) = G_ADD %18, %18
- %16(s64) = COPY %x0
- %17(s128) = G_SEQUENCE %16, 0, %16, 64
- %18(<2 x s32>) = G_EXTRACT %17, 0
- %19(<2 x s32>) = G_ADD %18, %18
+ ; CHECK-LABEL: name: test_combines_6
+ ; CHECK: %0(s32) = COPY %w0
+ %0:_(s32) = COPY %w0
+ ; Check that we replace all the uses of a G_EXTRACT.
; CHECK-NOT: G_SEQUENCE
; CHECK-NOT: G_EXTRACT
- ; CHECK: %24(s32) = G_ADD %0, %20
- %20(s32) = G_ADD %0, %0
- %21(s64) = G_SEQUENCE %0, 0, %20, 32
- %22(s32) = G_EXTRACT %21, 0
- %23(s32) = G_EXTRACT %21, 32
- %24(s32) = G_ADD %22, %23
+ ; CHECK: %3(s32) = G_MUL %0, %0
+ ; CHECK: %4(s32) = G_ADD %0, %3
+ %1:_(s32) = G_SEQUENCE %0, 0
+ %2:_(s32) = G_EXTRACT %1, 0
+ %3:_(s32) = G_MUL %2, %2
+ %4:_(s32) = G_ADD %2, %3
...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir b/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
index 56a7d4736ae8..16d9e59698fe 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -57,7 +57,7 @@ body: |
; CHECK: %0(s32) = G_FCONSTANT float 1.000000e+00
; CHECK: %1(s64) = G_FCONSTANT double 2.000000e+00
; CHECK: [[TMP:%[0-9]+]](s32) = G_FCONSTANT half 0xH0000
- ; CHECK; %2(s16) = G_FPTRUNC [[TMP]]
+ ; CHECK: %2(s16) = G_FPTRUNC [[TMP]]
%0(s32) = G_FCONSTANT float 1.0
%1(s64) = G_FCONSTANT double 2.0
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-div.mir b/test/CodeGen/AArch64/GlobalISel/legalize-div.mir
index aaef45d3c928..c6e0aabfd2c0 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-div.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-div.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
new file mode 100644
index 000000000000..23e7d5163e5a
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -0,0 +1,53 @@
+; RUN: llc -O0 -mtriple=aarch64-apple-ios -verify-machineinstrs -global-isel -stop-after=legalizer %s -o - | FileCheck %s
+
+@_ZTIi = external global i8*
+
+declare i32 @foo(i32)
+declare i32 @__gxx_personality_v0(...)
+declare i32 @llvm.eh.typeid.for(i8*)
+declare void @_Unwind_Resume(i8*)
+
+; CHECK: name: bar
+; CHECK: body:
+; CHECK-NEXT: bb.1 (%ir-block.0):
+; CHECK: successors: %{{bb.[0-9]+.continue.*}}%[[LP:bb.[0-9]+.cleanup]]
+
+; CHECK: [[LP]] (landing-pad):
+; CHECK: EH_LABEL
+
+; CHECK: [[PTR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[STRUCT_PTR:%[0-9]+]](s64) = G_PTRTOINT [[PTR]](p0)
+
+; CHECK: [[SEL_PTR:%[0-9]+]](p0) = COPY %x1
+; CHECK: [[SEL:%[0-9]+]](s32) = G_PTRTOINT [[SEL_PTR]]
+; CHECK: [[STRUCT_SEL:%[0-9]+]](s64) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 0
+
+; CHECK: [[STRUCT:%[0-9]+]](s128) = G_MERGE_VALUES [[STRUCT_PTR]](s64), [[STRUCT_SEL]]
+
+; CHECK: [[PTR:%[0-9]+]](p0) = G_EXTRACT [[STRUCT]](s128), 0
+; CHECK: G_STORE [[PTR]](p0), {{%[0-9]+}}(p0)
+
+; CHECK: [[SEL:%[0-9]+]](s32) = G_EXTRACT [[STRUCT]](s128), 64
+; CHECK: G_STORE [[SEL]](s32), {{%[0-9]+}}(p0)
+
+define void @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+ %exn.slot = alloca i8*
+ %ehselector.slot = alloca i32
+ %1 = invoke i32 @foo(i32 42) to label %continue unwind label %cleanup
+
+cleanup:
+ %2 = landingpad { i8*, i32 } cleanup
+ %3 = extractvalue { i8*, i32 } %2, 0
+ store i8* %3, i8** %exn.slot, align 8
+ %4 = extractvalue { i8*, i32 } %2, 1
+ store i32 %4, i32* %ehselector.slot, align 4
+ br label %eh.resume
+
+continue:
+ ret void
+
+eh.resume:
+ %exn = load i8*, i8** %exn.slot, align 8
+ call void @_Unwind_Resume(i8* %exn)
+ unreachable
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir b/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir
index 9907f009d931..70b55e4ebc66 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir b/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir
index 72bd613fab3a..8cdc7b78b1e9 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir b/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir
new file mode 100644
index 000000000000..8b5cbdfa55e3
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir
@@ -0,0 +1,48 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+ define void @test_fneg_f32() {
+ entry:
+ ret void
+ }
+ define void @test_fneg_f64() {
+ entry:
+ ret void
+ }
+...
+---
+name: test_fneg_f32
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1:
+ liveins: %s0
+ ; CHECK-LABEL: name: test_fneg_f32
+ ; CHECK: [[VAR:%[0-9]+]](s32) = COPY %s0
+ ; CHECK: [[ZERO:%[0-9]+]](s32) = G_FCONSTANT float -0.000000e+00
+ ; CHECK: [[RES:%[0-9]+]](s32) = G_FSUB [[ZERO]], [[VAR]]
+ ; CHECK: %s0 = COPY [[RES]](s32)
+ %0(s32) = COPY %s0
+ %1(s32) = G_FNEG %0
+ %s0 = COPY %1(s32)
+...
+---
+name: test_fneg_f64
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1:
+ liveins: %d0
+ ; CHECK-LABEL: name: test_fneg_f64
+ ; CHECK: [[VAR:%[0-9]+]](s64) = COPY %d0
+ ; CHECK: [[ZERO:%[0-9]+]](s64) = G_FCONSTANT double -0.000000e+00
+ ; CHECK: [[RES:%[0-9]+]](s64) = G_FSUB [[ZERO]], [[VAR]]
+ ; CHECK: %d0 = COPY [[RES]](s64)
+ %0(s64) = COPY %d0
+ %1(s64) = G_FNEG %0
+ %d0 = COPY %1(s64)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir b/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
new file mode 100644
index 000000000000..f79d0382ea7c
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
@@ -0,0 +1,201 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+
+ define void @test_fptosi_s32_s32() { ret void }
+ define void @test_fptoui_s32_s32() { ret void }
+ define void @test_fptosi_s32_s64() { ret void }
+ define void @test_fptoui_s32_s64() { ret void }
+
+ define void @test_fptosi_s64_s32() { ret void }
+ define void @test_fptoui_s64_s32() { ret void }
+ define void @test_fptosi_s64_s64() { ret void }
+ define void @test_fptoui_s64_s64() { ret void }
+
+ define void @test_fptosi_s1_s32() { ret void }
+ define void @test_fptoui_s1_s32() { ret void }
+
+ define void @test_fptosi_s8_s64() { ret void }
+ define void @test_fptoui_s8_s64() { ret void }
+
+ define void @test_fptosi_s16_s32() { ret void }
+ define void @test_fptoui_s16_s32() { ret void }
+...
+
+---
+name: test_fptosi_s32_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptosi_s32_s32
+ ; CHECK: %1(s32) = G_FPTOSI %0
+ %1:_(s32) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s32_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptoui_s32_s32
+ ; CHECK: %1(s32) = G_FPTOUI %0
+ %1:_(s32) = G_FPTOUI %0
+...
+
+---
+name: test_fptosi_s32_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_fptosi_s32_s64
+ ; CHECK: %1(s32) = G_FPTOSI %0
+ %1:_(s32) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s32_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_fptoui_s32_s64
+ ; CHECK: %1(s32) = G_FPTOUI %0
+ %1:_(s32) = G_FPTOUI %0
+...
+
+---
+name: test_fptosi_s64_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptosi_s64_s32
+ ; CHECK: %1(s64) = G_FPTOSI %0
+ %1:_(s64) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s64_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptoui_s64_s32
+ ; CHECK: %1(s64) = G_FPTOUI %0
+ %1:_(s64) = G_FPTOUI %0
+...
+
+---
+name: test_fptosi_s64_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_fptosi_s64_s64
+ ; CHECK: %1(s64) = G_FPTOSI %0
+ %1:_(s64) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s64_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_fptoui_s64_s64
+ ; CHECK: %1(s64) = G_FPTOUI %0
+ %1:_(s64) = G_FPTOUI %0
+...
+
+
+
+---
+name: test_fptosi_s1_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptosi_s1_s32
+ ; CHECK: %2(s32) = G_FPTOSI %0
+ ; CHECK: %1(s1) = G_TRUNC %2
+ %1:_(s1) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s1_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptoui_s1_s32
+ ; CHECK: %2(s32) = G_FPTOUI %0
+ ; CHECK: %1(s1) = G_TRUNC %2
+ %1:_(s1) = G_FPTOUI %0
+...
+
+---
+name: test_fptosi_s8_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_fptosi_s8_s64
+ ; CHECK: %2(s32) = G_FPTOSI %0
+ ; CHECK: %1(s8) = G_TRUNC %2
+ %1:_(s8) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s8_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_fptoui_s8_s64
+ ; CHECK: %2(s32) = G_FPTOUI %0
+ ; CHECK: %1(s8) = G_TRUNC %2
+ %1:_(s8) = G_FPTOUI %0
+...
+
+---
+name: test_fptosi_s16_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptosi_s16_s32
+ ; CHECK: %2(s32) = G_FPTOSI %0
+ ; CHECK: %1(s16) = G_TRUNC %2
+ %1:_(s16) = G_FPTOSI %0
+...
+
+---
+name: test_fptoui_s16_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_fptoui_s16_s32
+ ; CHECK: %2(s32) = G_FPTOUI %0
+ ; CHECK: %1(s16) = G_TRUNC %2
+ %1:_(s16) = G_FPTOUI %0
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir b/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir
index 3f11c123ba51..d6ec983c2067 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir b/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
new file mode 100644
index 000000000000..917f181099ec
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
@@ -0,0 +1,141 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+ define void @test_inserts_1() { ret void }
+ define void @test_inserts_2() { ret void }
+ define void @test_inserts_3() { ret void }
+ define void @test_inserts_4() { ret void }
+ define void @test_inserts_5() { ret void }
+ define void @test_inserts_6() { ret void }
+...
+
+---
+name: test_inserts_1
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; Low part of insertion wipes out the old register entirely, so %0 gets
+ ; forwarded to the G_STORE. Hi part is unchanged so (split) G_LOAD gets
+ ; forwarded.
+ ; CHECK-LABEL: name: test_inserts_1
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: G_STORE %0(s64)
+ ; CHECK: G_STORE [[HI]]
+ %0:_(s64) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s128) = G_INSERT %3(s128), %0(s64), 0
+ G_STORE %4(s128), %2(p0) :: (store 16)
+ RET_ReallyLR
+...
+
+---
+name: test_inserts_2
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; Low insertion wipes out the old register entirely, so %0 gets forwarded
+ ; to the G_STORE again. Second insertion is real.
+ ; CHECK-LABEL: name: test_inserts_2
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[NEWHI:%[0-9]+]](s64) = G_INSERT [[HI]], %1(s32), 0
+ ; CHECK: G_STORE %0(s64)
+ ; CHECK: G_STORE [[NEWHI]]
+ %0:_(s64) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s128) = G_INSERT %3(s128), %0(s64), 0
+ %5:_(s128) = G_INSERT %4(s128), %1(s32), 64
+ G_STORE %5(s128), %2(p0) :: (store 16)
+ RET_ReallyLR
+...
+
+---
+name: test_inserts_3
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; I'm not entirely convinced inserting a p0 into an s64 is valid, but it's
+ ; certainly better than the alternative of directly forwarding the value
+ ; which would cause a nasty type mismatch.
+ ; CHECK-LABEL: name: test_inserts_3
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[NEWLO:%[0-9]+]](s64) = G_PTRTOINT %0(p0)
+ ; CHECK: G_STORE [[NEWLO]](s64)
+ ; CHECK: G_STORE [[HI]]
+ %0:_(p0) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s128) = G_INSERT %3(s128), %0(p0), 0
+ G_STORE %4(s128), %2(p0) :: (store 16)
+ RET_ReallyLR
+...
+
+---
+name: test_inserts_4
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; A narrow insert gets surrounded by a G_ANYEXT/G_TRUNC pair.
+ ; CHECK-LABEL: name: test_inserts_4
+ ; CHECK: [[VALEXT:%[0-9]+]](s32) = G_ANYEXT %1(s8)
+ ; CHECK: [[VAL:%[0-9]+]](s32) = G_INSERT [[VALEXT]], %0(s1), 0
+ ; CHECK: %3(s8) = G_TRUNC [[VAL]](s32)
+ %0:_(s1) = COPY %w0
+ %1:_(s8) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s8) = G_INSERT %1(s8), %0(s1), 0
+ G_STORE %3(s8), %2(p0) :: (store 1)
+ RET_ReallyLR
+...
+
+---
+name: test_inserts_5
+body: |
+ bb.0:
+ liveins: %x0, %x1, %x2
+
+
+ ; CHECK-LABEL: name: test_inserts_5
+ ; CHECK: [[INS_LO:%[0-9]+]](s32) = G_EXTRACT %2(s64), 0
+ ; CHECK: [[VAL_LO:%[0-9]+]](s64) = G_INSERT %0, [[INS_LO]](s32), 32
+ ; CHECK: [[INS_HI:%[0-9]+]](s32) = G_EXTRACT %2(s64), 32
+ ; CHECK: [[VAL_HI:%[0-9]+]](s64) = G_INSERT %1, [[INS_HI]](s32), 0
+ ; CHECK: %4(s128) = G_MERGE_VALUES [[VAL_LO]](s64), [[VAL_HI]](s64)
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+ %2:_(s64) = COPY %x2
+ %3:_(s128) = G_MERGE_VALUES %0, %1
+ %4:_(s128) = G_INSERT %3, %2, 32
+ RET_ReallyLR
+...
+
+---
+name: test_inserts_6
+body: |
+ bb.0:
+ liveins: %x0, %x1, %x2
+
+
+ ; CHECK-LABEL: name: test_inserts_6
+ ; CHECK: [[VAL_LO:%[0-9]+]](s64) = G_INSERT %0, %2(s32), 32
+ ; CHECK: %4(s128) = G_MERGE_VALUES [[VAL_LO]](s64), %1(s64)
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+ %2:_(s32) = COPY %w2
+ %3:_(s128) = G_MERGE_VALUES %0, %1
+ %4:_(s128) = G_INSERT %3, %2, 32
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir b/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
new file mode 100644
index 000000000000..69e72bcb1f38
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
@@ -0,0 +1,206 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+
+ define void @test_sitofp_s32_s32() { ret void }
+ define void @test_uitofp_s32_s32() { ret void }
+ define void @test_sitofp_s32_s64() { ret void }
+ define void @test_uitofp_s32_s64() { ret void }
+
+ define void @test_sitofp_s64_s32() { ret void }
+ define void @test_uitofp_s64_s32() { ret void }
+ define void @test_sitofp_s64_s64() { ret void }
+ define void @test_uitofp_s64_s64() { ret void }
+
+ define void @test_sitofp_s32_s1() { ret void }
+ define void @test_uitofp_s32_s1() { ret void }
+
+ define void @test_sitofp_s64_s8() { ret void }
+ define void @test_uitofp_s64_s8() { ret void }
+
+ define void @test_sitofp_s32_s16() { ret void }
+ define void @test_uitofp_s32_s16() { ret void }
+...
+
+---
+name: test_sitofp_s32_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_sitofp_s32_s32
+ ; CHECK: %1(s32) = G_SITOFP %0
+ %1:_(s32) = G_SITOFP %0
+...
+
+---
+name: test_uitofp_s32_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_uitofp_s32_s32
+ ; CHECK: %1(s32) = G_UITOFP %0
+ %1:_(s32) = G_UITOFP %0
+...
+
+---
+name: test_sitofp_s32_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_sitofp_s32_s64
+ ; CHECK: %1(s32) = G_SITOFP %0
+ %1:_(s32) = G_SITOFP %0
+...
+
+---
+name: test_uitofp_s32_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_uitofp_s32_s64
+ ; CHECK: %1(s32) = G_UITOFP %0
+ %1:_(s32) = G_UITOFP %0
+...
+
+---
+name: test_sitofp_s64_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_sitofp_s64_s32
+ ; CHECK: %1(s64) = G_SITOFP %0
+ %1:_(s64) = G_SITOFP %0
+...
+
+---
+name: test_uitofp_s64_s32
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+
+ ; CHECK-LABEL: name: test_uitofp_s64_s32
+ ; CHECK: %1(s64) = G_UITOFP %0
+ %1:_(s64) = G_UITOFP %0
+...
+
+---
+name: test_sitofp_s64_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_sitofp_s64_s64
+ ; CHECK: %1(s64) = G_SITOFP %0
+ %1:_(s64) = G_SITOFP %0
+...
+
+---
+name: test_uitofp_s64_s64
+body: |
+ bb.0:
+ liveins: %x0
+ %0:_(s64) = COPY %x0
+
+ ; CHECK-LABEL: name: test_uitofp_s64_s64
+ ; CHECK: %1(s64) = G_UITOFP %0
+ %1:_(s64) = G_UITOFP %0
+...
+
+
+---
+name: test_sitofp_s32_s1
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ %1:_(s1) = G_TRUNC %0
+
+ ; CHECK-LABEL: name: test_sitofp_s32_s1
+ ; CHECK: %3(s32) = G_SEXT %1
+ ; CHECK: %2(s32) = G_SITOFP %3
+ %2:_(s32) = G_SITOFP %1
+...
+
+---
+name: test_uitofp_s32_s1
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ %1:_(s1) = G_TRUNC %0
+
+ ; CHECK-LABEL: name: test_uitofp_s32_s1
+ ; CHECK: %3(s32) = G_ZEXT %1
+ ; CHECK: %2(s32) = G_UITOFP %3
+ %2:_(s32) = G_UITOFP %1
+...
+
+---
+name: test_sitofp_s64_s8
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ %1:_(s8) = G_TRUNC %0
+
+ ; CHECK-LABEL: name: test_sitofp_s64_s8
+ ; CHECK: %3(s32) = G_SEXT %1
+ ; CHECK: %2(s64) = G_SITOFP %3
+ %2:_(s64) = G_SITOFP %1
+...
+
+---
+name: test_uitofp_s64_s8
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ %1:_(s8) = G_TRUNC %0
+
+ ; CHECK-LABEL: name: test_uitofp_s64_s8
+ ; CHECK: %3(s32) = G_ZEXT %1
+ ; CHECK: %2(s64) = G_UITOFP %3
+ %2:_(s64) = G_UITOFP %1
+...
+
+---
+name: test_sitofp_s32_s16
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ %1:_(s16) = G_TRUNC %0
+
+ ; CHECK-LABEL: name: test_sitofp_s32_s16
+ ; CHECK: %3(s32) = G_SEXT %1
+ ; CHECK: %2(s32) = G_SITOFP %3
+ %2:_(s32) = G_SITOFP %1
+...
+
+---
+name: test_uitofp_s32_s16
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ %1:_(s16) = G_TRUNC %0
+
+ ; CHECK-LABEL: name: test_uitofp_s32_s16
+ ; CHECK: %3(s32) = G_ZEXT %1
+ ; CHECK: %2(s32) = G_UITOFP %3
+ %2:_(s32) = G_UITOFP %1
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir b/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
index 6a86686fa4bd..c806b4a7060d 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -24,6 +24,7 @@ registers:
- { id: 5, class: _ }
- { id: 6, class: _ }
- { id: 7, class: _ }
+ - { id: 8, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
@@ -51,6 +52,15 @@ body: |
; CHECK: %7(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
%7(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
+
+ ; CHECK: [[OFFSET0:%[0-9]+]](s64) = G_CONSTANT i64 0
+ ; CHECK: [[GEP0:%[0-9]+]](p0) = G_GEP %0, [[OFFSET0]](s64)
+ ; CHECK: [[LOAD0:%[0-9]+]](s64) = G_LOAD [[GEP0]](p0) :: (load 16 from %ir.addr)
+ ; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 8
+ ; CHECK: [[GEP1:%[0-9]+]](p0) = G_GEP %0, [[OFFSET1]](s64)
+ ; CHECK: [[LOAD1:%[0-9]+]](s64) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.addr)
+ ; CHECK: %8(s128) = G_MERGE_VALUES [[LOAD0]](s64), [[LOAD1]](s64)
+ %8(s128) = G_LOAD %0(p0) :: (load 16 from %ir.addr)
...
---
@@ -62,6 +72,8 @@ registers:
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
@@ -70,7 +82,7 @@ body: |
%0(p0) = COPY %x0
%1(s32) = COPY %w1
- ; CHECK: [[BIT8:%[0-9]+]](s8) = G_ANYEXT %2(s1)
+ ; CHECK: [[BIT8:%[0-9]+]](s8) = G_ZEXT %2(s1)
; CHECK: G_STORE [[BIT8]](s8), %0(p0) :: (store 1 into %ir.addr)
%2(s1) = G_TRUNC %1
G_STORE %2, %0 :: (store 1 into %ir.addr)
@@ -92,4 +104,14 @@ body: |
; CHECK: G_STORE %0(p0), %0(p0) :: (store 8 into %ir.addr)
G_STORE %0(p0), %0(p0) :: (store 8 into %ir.addr)
+
+ ; CHECK: [[OFFSET0:%[0-9]+]](s64) = G_CONSTANT i64 0
+ ; CHECK: [[GEP0:%[0-9]+]](p0) = G_GEP %0, [[OFFSET0]](s64)
+ ; CHECK: G_STORE %5(s64), [[GEP0]](p0) :: (store 16 into %ir.addr)
+ ; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 8
+ ; CHECK: [[GEP1:%[0-9]+]](p0) = G_GEP %0, [[OFFSET1]](s64)
+ ; CHECK: G_STORE %6(s64), [[GEP1]](p0) :: (store 16 into %ir.addr)
+ %6(s64) = G_PTRTOINT %0(p0)
+ %7(s128) = G_MERGE_VALUES %5, %6
+ G_STORE %7, %0 :: (store 16 into %ir.addr)
...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir b/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
index eb642d4b1a74..1ea6e9c292f5 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -7,6 +7,7 @@
entry:
ret void
}
+ define void @test_mul_overflow() { ret void }
...
---
@@ -22,7 +23,10 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_mul_small
- ; CHECK: %4(s8) = G_MUL %2, %3
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_MUL [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
%0(s64) = COPY %x0
%1(s64) = COPY %x1
@@ -32,3 +36,22 @@ body: |
%5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
+
+
+---
+name: test_mul_overflow
+body: |
+ bb.0:
+ liveins: %x0, %x1, %w2, %w3
+
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+
+ ; CHECK-LABEL: name: test_mul_overflow
+ ; CHECK: %2(s64) = G_MUL %0, %1
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_SMULH %0, %1
+ ; CHECK: [[ZERO:%[0-9]+]](s64) = G_CONSTANT i64 0
+ ; CHECK: %3(s1) = G_ICMP intpred(ne), [[HI]](s64), [[ZERO]]
+ %2:_(s64), %3:_(s1) = G_SMULO %0, %1
+
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir b/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir
new file mode 100644
index 000000000000..9928ea54d2c9
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir
@@ -0,0 +1,29 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+ define void @test_legalize_merge_v3s32() {
+ ret void
+ }
+...
+---
+name: test_legalize_merge_v3s32
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %w0, %w1, %w2
+ ; CHECK-LABEL: name: test_legalize_merge_v3s32
+ ; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+ ; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %w1
+ ; CHECK: [[ARG3:%[0-9]+]](s32) = COPY %w2
+ ; CHECK: (<3 x s32>) = G_MERGE_VALUES [[ARG1]](s32), [[ARG2]](s32), [[ARG3]](s32)
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = COPY %w2
+ %3(<3 x s32>) = G_MERGE_VALUES %0(s32), %1(s32), %2(s32)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-or.mir b/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
index edf10cd411eb..e8b850982460 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -22,7 +22,10 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_or_small
- ; CHECK: %4(s8) = G_OR %2, %3
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_OR [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
%0(s64) = COPY %x0
%1(s64) = COPY %x1
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir b/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
new file mode 100644
index 000000000000..2becc2e134b5
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
@@ -0,0 +1,38 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+ define void @test_pow() {
+ entry:
+ ret void
+ }
+...
+
+---
+name: test_pow
+body: |
+ bb.0.entry:
+ liveins: %d0, %d1, %s2, %s3
+
+ ; CHECK-LABEL: name: test_pow
+ ; CHECK: hasCalls: true
+
+ %0:_(s64) = COPY %d0
+ %1:_(s64) = COPY %d1
+ %2:_(s32) = COPY %s2
+ %3:_(s32) = COPY %s3
+
+ ; CHECK: %d0 = COPY %0
+ ; CHECK: %d1 = COPY %1
+ ; CHECK: BL $pow, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0
+ ; CHECK: %4(s64) = COPY %d0
+ %4:_(s64) = G_FPOW %0, %1
+
+ ; CHECK: %s0 = COPY %2
+ ; CHECK: %s1 = COPY %3
+ ; CHECK: BL $powf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0
+ ; CHECK: %5(s32) = COPY %s0
+ %5:_(s32) = G_FPOW %2, %3
+
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir b/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir
index e77f3487609f..50a4d93cbe20 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -45,8 +45,15 @@ body: |
; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %7
; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]]
; CHECK: [[QUOT:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
- ; CHECK: [[PROD:%[0-9]+]](s8) = G_MUL [[QUOT]], %7
- ; CHECK: [[RES:%[0-9]+]](s8) = G_SUB %6, [[PROD]]
+
+ ; CHECK: [[QUOT32_2:%.*]](s32) = G_ANYEXT [[QUOT]](s8)
+ ; CHECK: [[RHS32_2:%.*]](s32) = G_ANYEXT %7(s8)
+ ; CHECK: [[PROD32:%.*]](s32) = G_MUL [[QUOT32_2]], [[RHS32_2]]
+ ; CHECK: [[PROD:%.*]](s8) = G_TRUNC [[PROD32]](s32)
+
+ ; CHECK: [[LHS32_2:%.*]](s32) = G_ANYEXT %6(s8)
+ ; CHECK: [[PROD32_2:%.*]](s32) = G_ANYEXT [[PROD]](s8)
+ ; CHECK: [[RES:%[0-9]+]](s32) = G_SUB [[LHS32_2]], [[PROD32_2]]
%6(s8) = G_TRUNC %0
%7(s8) = G_TRUNC %1
%8(s8) = G_SREM %6, %7
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir b/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
new file mode 100644
index 000000000000..f75a2982a3f2
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
@@ -0,0 +1,47 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+ define void @test_shift() {
+ entry:
+ ret void
+ }
+...
+
+---
+name: test_shift
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+body: |
+ bb.0.entry:
+ liveins: %x0, %x1, %x2, %x3
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+
+ ; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %2
+ ; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %3
+ ; CHECK: [[RES32:%[0-9]+]](s32) = G_ASHR [[LHS32]], [[RHS32]]
+ ; CHECK: %4(s8) = G_TRUNC [[RES32]]
+ %4(s8) = G_ASHR %2, %3
+
+ ; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2
+ ; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3
+ ; CHECK: [[RES32:%[0-9]+]](s32) = G_LSHR [[LHS32]], [[RHS32]]
+ ; CHECK: %5(s8) = G_TRUNC [[RES32]]
+ %5(s8) = G_LSHR %2, %3
+
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_SHL [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
+ %6(s8) = G_SHL %2, %3
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir b/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
index 41a9c33bfad8..cd24bccfe771 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -31,103 +31,56 @@ registers:
- { id: 14, class: _ }
- { id: 15, class: _ }
- { id: 16, class: _ }
- - { id: 17, class: _ }
- - { id: 18, class: _ }
- - { id: 19, class: _ }
- - { id: 20, class: _ }
- - { id: 21, class: _ }
- - { id: 22, class: _ }
- - { id: 23, class: _ }
- - { id: 24, class: _ }
- - { id: 25, class: _ }
- - { id: 26, class: _ }
- - { id: 27, class: _ }
- - { id: 28, class: _ }
- - { id: 29, class: _ }
- - { id: 30, class: _ }
- - { id: 31, class: _ }
- - { id: 32, class: _ }
- - { id: 33, class: _ }
- - { id: 34, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
%0(s64) = COPY %x0
+ %1(s1) = G_TRUNC %0
+ %2(s8) = G_TRUNC %0
+ %3(s16) = G_TRUNC %0
+ %4(s32) = G_TRUNC %0
+
; CHECK-LABEL: name: test_simple
- ; CHECK: %1(p0) = G_INTTOPTR %0
- ; CHECK: %2(s64) = G_PTRTOINT %1
- %1(p0) = G_INTTOPTR %0
- %2(s64) = G_PTRTOINT %1
+ ; CHECK: %5(p0) = G_INTTOPTR %0
+ ; CHECK: %6(s64) = G_PTRTOINT %5
+ %5(p0) = G_INTTOPTR %0
+ %6(s64) = G_PTRTOINT %5
- ; CHECK: G_BRCOND %3(s1), %bb.1.next
- %3(s1) = G_TRUNC %0
- G_BRCOND %3, %bb.1.next
+ ; CHECK: G_BRCOND %1(s1), %bb.1.next
+ G_BRCOND %1, %bb.1.next
bb.1.next:
- %4(s32) = G_TRUNC %0
-
- ; CHECK: %5(s1) = G_FPTOSI %4
- ; CHECK: %6(s8) = G_FPTOUI %4
- ; CHECK: %7(s16) = G_FPTOSI %4
- ; CHECK: %8(s32) = G_FPTOUI %4
- ; CHECK: %9(s64) = G_FPTOSI %4
- %5(s1) = G_FPTOSI %4
- %6(s8) = G_FPTOUI %4
- %7(s16) = G_FPTOSI %4
- %8(s32) = G_FPTOUI %4
- %9(s64) = G_FPTOSI %4
- ; CHECK: %10(s1) = G_FPTOUI %0
- ; CHECK: %11(s8) = G_FPTOSI %0
- ; CHECK: %12(s16) = G_FPTOUI %0
- ; CHECK: %13(s32) = G_FPTOSI %0
- ; CHECK: %14(s32) = G_FPTOUI %0
- %10(s1) = G_FPTOUI %0
- %11(s8) = G_FPTOSI %0
- %12(s16) = G_FPTOUI %0
- %13(s32) = G_FPTOSI %0
- %14(s32) = G_FPTOUI %0
+ ; CHECK: [[LHS:%[0-9]+]](s32) = G_ANYEXT %1(s1)
+ ; CHECK: [[RHS:%[0-9]+]](s32) = G_ANYEXT %1(s1)
+ ; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT %1(s1), [[LHS]], [[RHS]]
+ ; CHECK: %7(s1) = G_TRUNC [[RES]](s32)
+ %7(s1) = G_SELECT %1, %1, %1
- ; CHECK: %15(s32) = G_UITOFP %5
- ; CHECK: %16(s32) = G_SITOFP %11
- ; CHECK: %17(s32) = G_UITOFP %7
- ; CHECK: %18(s32) = G_SITOFP %4
- ; CHECK: %19(s32) = G_UITOFP %0
- %15(s32) = G_UITOFP %5
- %16(s32) = G_SITOFP %11
- %17(s32) = G_UITOFP %7
- %18(s32) = G_SITOFP %4
- %19(s32) = G_UITOFP %0
+ ; CHECK: [[LHS:%[0-9]+]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[RHS:%[0-9]+]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT %1(s1), [[LHS]], [[RHS]]
+ ; CHECK: %8(s8) = G_TRUNC [[RES]](s32)
+ %8(s8) = G_SELECT %1, %2, %2
- ; CHECK: %20(s64) = G_SITOFP %5
- ; CHECK: %21(s64) = G_UITOFP %11
- ; CHECK: %22(s64) = G_SITOFP %7
- ; CHECK: %23(s64) = G_UITOFP %4
- ; CHECK: %24(s64) = G_SITOFP %0
- %20(s64) = G_SITOFP %5
- %21(s64) = G_UITOFP %11
- %22(s64) = G_SITOFP %7
- %23(s64) = G_UITOFP %4
- %24(s64) = G_SITOFP %0
+ ; CHECK: [[LHS:%[0-9]+]](s32) = G_ANYEXT %3(s16)
+ ; CHECK: [[RHS:%[0-9]+]](s32) = G_ANYEXT %3(s16)
+ ; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT %1(s1), [[LHS]], [[RHS]]
+ ; CHECK: %9(s16) = G_TRUNC [[RES]](s32)
+ %9(s16) = G_SELECT %1, %3, %3
- ; CHECK: %25(s1) = G_SELECT %10(s1), %10, %5
- ; CHECK: %26(s8) = G_SELECT %10(s1), %6, %11
- ; CHECK: %27(s16) = G_SELECT %10(s1), %12, %7
- ; CHECK: %28(s32) = G_SELECT %10(s1), %15, %16
- ; CHECK: %29(s64) = G_SELECT %10(s1), %9, %24
- %25(s1) = G_SELECT %10, %10, %5
- %26(s8) = G_SELECT %10, %6, %11
- %27(s16) = G_SELECT %10, %12, %7
- %28(s32) = G_SELECT %10, %15, %16
- %29(s64) = G_SELECT %10, %9, %24
+ %10(s32) = G_SELECT %1, %4, %4
+ %11(s64) = G_SELECT %1, %0, %0
- ; CHECK: %30(<2 x s32>) = G_BITCAST %9
- ; CHECK: %31(s64) = G_BITCAST %30
- ; CHECK: %32(s32) = G_BITCAST %15
- %30(<2 x s32>) = G_BITCAST %9
- %31(s64) = G_BITCAST %30
- %32(s32) = G_BITCAST %15
- %33(<4 x s8>) = G_BITCAST %15
- %34(<2 x s16>) = G_BITCAST %15
+ ; CHECK: %12(<2 x s32>) = G_BITCAST %0
+ ; CHECK: %13(s64) = G_BITCAST %12
+ ; CHECK: %14(s32) = G_BITCAST %10
+ ; CHECK: %15(<4 x s8>) = G_BITCAST %0
+ ; CHECK: %16(<2 x s16>) = G_BITCAST %0
+ %12(<2 x s32>) = G_BITCAST %0
+ %13(s64) = G_BITCAST %12
+ %14(s32) = G_BITCAST %10
+ %15(<4 x s8>) = G_BITCAST %0
+ %16(<2 x s16>) = G_BITCAST %0
...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir b/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
index e5403cb73c37..82a1dd09c1a1 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -22,7 +22,10 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_sub_small
- ; CHECK: [[RES:%.*]](s8) = G_SUB %2, %3
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_SUB [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
%0(s64) = COPY %x0
%1(s64) = COPY %x1
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir b/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
new file mode 100644
index 000000000000..8bda08d0a1d1
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
@@ -0,0 +1,39 @@
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64--"
+ define void @test_vaarg() { ret void }
+...
+
+---
+name: test_vaarg
+body: |
+ bb.0:
+ %0:_(p0) = COPY %x0
+
+ ; CHECK-LABEL: name: test_vaarg
+ ; CHECK: [[LIST:%[0-9]+]](p0) = G_LOAD %0(p0) :: (load 8)
+ ; CHECK: %1(s8) = G_LOAD [[LIST]](p0) :: (load 1, align 8)
+ ; CHECK: [[SLOTSIZE:%[0-9]+]](s64) = G_CONSTANT i64 8
+ ; CHECK: [[NEXT:%[0-9]+]](p0) = G_GEP [[LIST]], [[SLOTSIZE]](s64)
+ ; CHECK: G_STORE [[NEXT]](p0), %0(p0) :: (store 8)
+ %1:_(s8) = G_VAARG %0(p0), 1
+
+ ; CHECK: [[LIST:%[0-9]+]](p0) = G_LOAD %0(p0) :: (load 8)
+ ; CHECK: %2(s64) = G_LOAD [[LIST]](p0) :: (load 8)
+ ; CHECK: [[SLOTSIZE:%[0-9]+]](s64) = G_CONSTANT i64 8
+ ; CHECK: [[NEXT:%[0-9]+]](p0) = G_GEP [[LIST]], [[SLOTSIZE]](s64)
+ ; CHECK: G_STORE [[NEXT]](p0), %0(p0) :: (store 8)
+ %2:_(s64) = G_VAARG %0(p0), 8
+
+ ; CHECK: [[LIST:%[0-9]+]](p0) = G_LOAD %0(p0) :: (load 8)
+ ; CHECK: [[ALIGNM1:%[0-9]+]](s64) = G_CONSTANT i64 15
+ ; CHECK: [[ALIGNTMP:%[0-9]+]](p0) = G_GEP [[LIST]], [[ALIGNM1]](s64)
+ ; CHECK: [[LIST:%[0-9]+]](p0) = G_PTR_MASK [[ALIGNTMP]], 4
+ ; CHECK: %3(s64) = G_LOAD [[LIST]](p0) :: (load 8, align 16)
+ ; CHECK: [[SLOTSIZE:%[0-9]+]](s64) = G_CONSTANT i64 8
+ ; CHECK: [[NEXT:%[0-9]+]](p0) = G_GEP [[LIST]], [[SLOTSIZE]](s64)
+ ; CHECK: G_STORE [[NEXT]](p0), %0(p0) :: (store 8)
+ %3:_(s64) = G_VAARG %0(p0), 16
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir b/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
index 919e674965c0..460b3d16f1c0 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -22,7 +22,10 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_xor_small
- ; CHECK: %4(s8) = G_XOR %2, %3
+ ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8)
+ ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8)
+ ; CHECK: [[RES32:%.*]](s32) = G_XOR [[OP0]], [[OP1]]
+ ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32)
%0(s64) = COPY %x0
%1(s64) = COPY %x1
diff --git a/test/CodeGen/AArch64/GlobalISel/no-regclass.mir b/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
new file mode 100644
index 000000000000..6832ce0ee8bd
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
@@ -0,0 +1,30 @@
+# RUN: llc -O0 -mtriple=aarch64-apple-ios -global-isel -start-before=legalizer -stop-after=instruction-select %s -o - | FileCheck %s
+
+# We run the legalizer to combine the trivial EXTRACT_SEQ pair, leaving %1 and
+# %2 orphaned after instruction-selection (no instructions define or use
+# them). This shouldn't be a problem.
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @unused_reg() { ret void }
+
+---
+# CHECK-LABEL: name: unused_reg
+name: unused_reg
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %w0 = COPY %0
+
+body: |
+ bb.0:
+ liveins: %w0
+ %0:gpr(s32) = COPY %w0
+ %1:gpr(s32) = G_SEQUENCE %0(s32), 0
+ %2:gpr(s32) = G_EXTRACT %1(s32), 0
+ %w0 = COPY %2(s32)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
new file mode 100644
index 000000000000..73d4d2054729
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
@@ -0,0 +1,45 @@
+# RUN: llc -O0 -mtriple arm64-- -run-pass=regbankselect -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @test_dbg_value() !dbg !5 {
+ ; Keep the dbg metadata live by referencing it in the IR.
+ call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !9), !dbg !10
+ ret void
+ }
+
+ declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!3, !4}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "llvm", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+ !1 = !DIFile(filename: "test.ll", directory: "/tmp")
+ !2 = !{}
+ !3 = !{i32 2, !"Dwarf Version", i32 4}
+ !4 = !{i32 2, !"Debug Info Version", i32 3}
+ !5 = distinct !DISubprogram(name: "test_dbg_value", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+ !6 = !DISubroutineType(types: !2)
+ !7 = !DILocalVariable(name: "in", arg: 1, scope: !5, file: !1, line: 1, type: !8)
+ !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+ !9 = !DIExpression()
+ !10 = !DILocation(line: 1, column: 1, scope: !5)
+...
+
+---
+# CHECK-LABEL: name: test_dbg_value
+name: test_dbg_value
+legalized: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+body: |
+ bb.0:
+ liveins: %w0
+ %0:_(s32) = COPY %w0
+ ; CHECK: DBG_VALUE debug-use %0(s32), debug-use _, !7, !9, debug-location !10
+ DBG_VALUE debug-use %0(s32), debug-use _, !7, !9, debug-location !10
+
+ ; CHECK: DBG_VALUE _, 0, !7, !9, debug-location !10
+ DBG_VALUE _, 0, !7, !9, debug-location !10
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir b/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
index 12162eb54a83..14ee40c941bf 100644
--- a/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
+++ b/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
@@ -622,7 +622,7 @@ body: |
; CHECK: %0(p0) = COPY %x0
; CHECK: %1(s32) = G_LOAD %0
%0(p0) = COPY %x0
- %1(s32) = G_LOAD %0
+ %1(s32) = G_LOAD %0 :: (load 4)
...
---
@@ -643,7 +643,7 @@ body: |
; CHECK: G_STORE %1(s32), %0(p0)
%0(p0) = COPY %x0
%1(s32) = COPY %w1
- G_STORE %1, %0
+ G_STORE %1, %0 :: (store 4)
...
---
diff --git a/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir b/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir
new file mode 100644
index 000000000000..15ccf1f5459c
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir
@@ -0,0 +1,25 @@
+# RUN: llc %s -mtriple aarch64-- -o - -run-pass regbankselect | FileCheck %s
+--- |
+ define void @foo() { ret void }
+...
+---
+# CHECK-LABEL: foo
+# Check that we produce a valid mapping for REG_SEQUENCE.
+# This used to fail the RegisterBankInfo verify because
+# we were using the exclusively the type of the definition
+# whereas since REG_SEQUENCE are kind of target opcode
+# their definition may not have a type.
+#
+# CHECK: id: 0, class: dd
+name: foo
+legalized: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: dd }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0 = REG_SEQUENCE %d0, %subreg.dsub0, %d1, %subreg.dsub1
+
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-binop.mir b/test/CodeGen/AArch64/GlobalISel/select-binop.mir
new file mode 100644
index 000000000000..8ae2e1b2eb7d
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-binop.mir
@@ -0,0 +1,1042 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @add_s32_gpr() { ret void }
+ define void @add_s64_gpr() { ret void }
+
+ define void @add_imm_s32_gpr() { ret void }
+ define void @add_imm_s64_gpr() { ret void }
+
+ define void @add_imm_s32_gpr_bb() { ret void }
+
+ define void @sub_s32_gpr() { ret void }
+ define void @sub_s64_gpr() { ret void }
+
+ define void @or_s32_gpr() { ret void }
+ define void @or_s64_gpr() { ret void }
+ define void @or_v2s32_fpr() { ret void }
+
+ define void @and_s32_gpr() { ret void }
+ define void @and_s64_gpr() { ret void }
+
+ define void @shl_s32_gpr() { ret void }
+ define void @shl_s64_gpr() { ret void }
+
+ define void @lshr_s32_gpr() { ret void }
+ define void @lshr_s64_gpr() { ret void }
+
+ define void @ashr_s32_gpr() { ret void }
+ define void @ashr_s64_gpr() { ret void }
+
+ define void @mul_s32_gpr() { ret void }
+ define void @mul_s64_gpr() { ret void }
+
+ define void @mulh_s64_gpr() { ret void }
+
+ define void @sdiv_s32_gpr() { ret void }
+ define void @sdiv_s64_gpr() { ret void }
+
+ define void @udiv_s32_gpr() { ret void }
+ define void @udiv_s64_gpr() { ret void }
+
+ define void @fadd_s32_fpr() { ret void }
+ define void @fadd_s64_fpr() { ret void }
+
+ define void @fsub_s32_fpr() { ret void }
+ define void @fsub_s64_fpr() { ret void }
+
+ define void @fmul_s32_fpr() { ret void }
+ define void @fmul_s64_fpr() { ret void }
+
+ define void @fdiv_s32_fpr() { ret void }
+ define void @fdiv_s64_fpr() { ret void }
+
+...
+
+---
+# Check that we select a 32-bit GPR G_ADD into ADDWrr on GPR32.
+# Also check that we constrain the register class of the COPY to GPR32.
+# CHECK-LABEL: name: add_s32_gpr
+name: add_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = ADDWrr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_ADD %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s32_gpr, for 64-bit operations.
+# CHECK-LABEL: name: add_s64_gpr
+name: add_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = ADDXrr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_ADD %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# CHECK-LABEL: name: add_imm_s32_gpr
+name: add_imm_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr32sp }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %2 = ADDWri %0, 1, 0
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_CONSTANT i32 1
+ %2(s32) = G_ADD %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# CHECK-LABEL: name: add_imm_s64_gpr
+name: add_imm_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr64sp }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %2 = ADDXri %0, 1, 0
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(s64) = COPY %x0
+ %1(s64) = G_CONSTANT i32 1
+ %2(s64) = G_ADD %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# CHECK-LABEL: name: add_imm_s32_gpr_bb
+name: add_imm_s32_gpr_bb
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr32sp }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: bb.1:
+# CHECK: %2 = ADDWri %0, 1, 0
+body: |
+ bb.0:
+ liveins: %w0, %w1
+ successors: %bb.1
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_CONSTANT i32 1
+ G_BR %bb.1
+
+ bb.1:
+ %2(s32) = G_ADD %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s32_gpr, for G_SUB operations.
+# CHECK-LABEL: name: sub_s32_gpr
+name: sub_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = SUBSWrr %0, %1, implicit-def %nzcv
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_SUB %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_SUB operations.
+# CHECK-LABEL: name: sub_s64_gpr
+name: sub_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = SUBSXrr %0, %1, implicit-def %nzcv
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_SUB %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Same as add_s32_gpr, for G_OR operations.
+# CHECK-LABEL: name: or_s32_gpr
+name: or_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = ORRWrr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_OR %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_OR operations.
+# CHECK-LABEL: name: or_s64_gpr
+name: or_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = ORRXrr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_OR %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# 64-bit G_OR on vector registers.
+# CHECK-LABEL: name: or_v2s32_fpr
+name: or_v2s32_fpr
+legalized: true
+regBankSelected: true
+#
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+# CHECK-NEXT: - { id: 2, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %d1
+# The actual OR does not matter as long as it is operating
+# on 64-bit width vector.
+# CHECK: %2 = ORRv8i8 %0, %1
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(<2 x s32>) = COPY %d0
+ %1(<2 x s32>) = COPY %d1
+ %2(<2 x s32>) = G_OR %0, %1
+ %d0 = COPY %2(<2 x s32>)
+...
+
+---
+# Same as add_s32_gpr, for G_AND operations.
+# CHECK-LABEL: name: and_s32_gpr
+name: and_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = ANDWrr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_AND %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_AND operations.
+# CHECK-LABEL: name: and_s64_gpr
+name: and_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = ANDXrr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_AND %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Same as add_s32_gpr, for G_SHL operations.
+# CHECK-LABEL: name: shl_s32_gpr
+name: shl_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = LSLVWr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_SHL %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_SHL operations.
+# CHECK-LABEL: name: shl_s64_gpr
+name: shl_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = LSLVXr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_SHL %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Same as add_s32_gpr, for G_LSHR operations.
+# CHECK-LABEL: name: lshr_s32_gpr
+name: lshr_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = LSRVWr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_LSHR %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_LSHR operations.
+# CHECK-LABEL: name: lshr_s64_gpr
+name: lshr_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = LSRVXr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_LSHR %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Same as add_s32_gpr, for G_ASHR operations.
+# CHECK-LABEL: name: ashr_s32_gpr
+name: ashr_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = ASRVWr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_ASHR %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_ASHR operations.
+# CHECK-LABEL: name: ashr_s64_gpr
+name: ashr_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = ASRVXr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_ASHR %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Check that we select s32 GPR G_MUL. This is trickier than other binops because
+# there is only MADDWrrr, and we have to use the WZR physreg.
+# CHECK-LABEL: name: mul_s32_gpr
+name: mul_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = MADDWrrr %0, %1, %wzr
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_MUL %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as mul_s32_gpr for the s64 type.
+# CHECK-LABEL: name: mul_s64_gpr
+name: mul_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = MADDXrrr %0, %1, %xzr
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_MUL %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Same as mul_s32_gpr for the s64 type.
+# CHECK-LABEL: name: mulh_s64_gpr
+name: mulh_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+# CHECK-NEXT: - { id: 3, class: gpr64 }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = SMULHrr %0, %1
+# CHECK: %3 = UMULHrr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0:gpr(s64) = COPY %x0
+ %1:gpr(s64) = COPY %x1
+ %2:gpr(s64) = G_SMULH %0, %1
+ %3:gpr(s64) = G_UMULH %0, %1
+ %x0 = COPY %2(s64)
+ %x0 = COPY %3(s64)
+...
+
+---
+# Same as add_s32_gpr, for G_SDIV operations.
+# CHECK-LABEL: name: sdiv_s32_gpr
+name: sdiv_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = SDIVWr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_SDIV %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_SDIV operations.
+# CHECK-LABEL: name: sdiv_s64_gpr
+name: sdiv_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = SDIVXr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_SDIV %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Same as add_s32_gpr, for G_UDIV operations.
+# CHECK-LABEL: name: udiv_s32_gpr
+name: udiv_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = UDIVWr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_UDIV %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as add_s64_gpr, for G_UDIV operations.
+# CHECK-LABEL: name: udiv_s64_gpr
+name: udiv_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = UDIVXr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_UDIV %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Check that we select a s32 FPR G_FADD into FADDSrr.
+# CHECK-LABEL: name: fadd_s32_fpr
+name: fadd_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+# CHECK-NEXT: - { id: 2, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = COPY %s1
+# CHECK: %2 = FADDSrr %0, %1
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FADD %0, %1
+ %s0 = COPY %2(s32)
+...
+
+---
+# CHECK-LABEL: name: fadd_s64_fpr
+name: fadd_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+# CHECK-NEXT: - { id: 2, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %d1
+# CHECK: %2 = FADDDrr %0, %1
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FADD %0, %1
+ %d0 = COPY %2(s64)
+...
+
+---
+# CHECK-LABEL: name: fsub_s32_fpr
+name: fsub_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+# CHECK-NEXT: - { id: 2, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = COPY %s1
+# CHECK: %2 = FSUBSrr %0, %1
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FSUB %0, %1
+ %s0 = COPY %2(s32)
+...
+
+---
+# CHECK-LABEL: name: fsub_s64_fpr
+name: fsub_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+# CHECK-NEXT: - { id: 2, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %d1
+# CHECK: %2 = FSUBDrr %0, %1
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FSUB %0, %1
+ %d0 = COPY %2(s64)
+...
+
+---
+# CHECK-LABEL: name: fmul_s32_fpr
+name: fmul_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+# CHECK-NEXT: - { id: 2, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = COPY %s1
+# CHECK: %2 = FMULSrr %0, %1
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FMUL %0, %1
+ %s0 = COPY %2(s32)
+...
+
+---
+# CHECK-LABEL: name: fmul_s64_fpr
+name: fmul_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+# CHECK-NEXT: - { id: 2, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %d1
+# CHECK: %2 = FMULDrr %0, %1
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FMUL %0, %1
+ %d0 = COPY %2(s64)
+...
+
+---
+# CHECK-LABEL: name: fdiv_s32_fpr
+name: fdiv_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+# CHECK-NEXT: - { id: 2, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = COPY %s1
+# CHECK: %2 = FDIVSrr %0, %1
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FDIV %0, %1
+ %s0 = COPY %2(s32)
+...
+
+---
+# CHECK-LABEL: name: fdiv_s64_fpr
+name: fdiv_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+# CHECK-NEXT: - { id: 2, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %d1
+# CHECK: %2 = FDIVDrr %0, %1
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FDIV %0, %1
+ %d0 = COPY %2(s64)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir b/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
new file mode 100644
index 000000000000..5ca63dbc214d
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
@@ -0,0 +1,212 @@
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @bitcast_s32_gpr() { ret void }
+ define void @bitcast_s32_fpr() { ret void }
+ define void @bitcast_s32_gpr_fpr() { ret void }
+ define void @bitcast_s32_fpr_gpr() { ret void }
+ define void @bitcast_s64_gpr() { ret void }
+ define void @bitcast_s64_fpr() { ret void }
+ define void @bitcast_s64_gpr_fpr() { ret void }
+ define void @bitcast_s64_fpr_gpr() { ret void }
+...
+
+---
+# CHECK-LABEL: name: bitcast_s32_gpr
+name: bitcast_s32_gpr
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32all }
+# CHECK-NEXT: - { id: 1, class: gpr32all }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_BITCAST %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s32_fpr
+name: bitcast_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %s0
+
+ %0(s32) = COPY %s0
+ %1(s32) = G_BITCAST %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s32_gpr_fpr
+name: bitcast_s32_gpr_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32all }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_BITCAST %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s32_fpr_gpr
+name: bitcast_s32_fpr_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32all }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %s0
+
+ %0(s32) = COPY %s0
+ %1(s32) = G_BITCAST %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s64_gpr
+name: bitcast_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64all }
+# CHECK-NEXT: - { id: 1, class: gpr64all }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s64) = G_BITCAST %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s64_fpr
+name: bitcast_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s64) = G_BITCAST %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s64_gpr_fpr
+name: bitcast_s64_gpr_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64all }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s64) = G_BITCAST %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: bitcast_s64_fpr_gpr
+name: bitcast_s64_fpr_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64all }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s64) = G_BITCAST %0
+ %x0 = COPY %1(s64)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-br.mir b/test/CodeGen/AArch64/GlobalISel/select-br.mir
new file mode 100644
index 000000000000..f46f190260f6
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-br.mir
@@ -0,0 +1,71 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @unconditional_br() { ret void }
+ define void @conditional_br() { ret void }
+ define void @indirect_br() { ret void }
+...
+
+---
+# CHECK-LABEL: name: unconditional_br
+name: unconditional_br
+legalized: true
+regBankSelected: true
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: successors: %bb.0
+# CHECK: B %bb.0
+body: |
+ bb.0:
+ successors: %bb.0
+
+ G_BR %bb.0
+...
+
+---
+# CHECK-LABEL: name: conditional_br
+name: conditional_br
+legalized: true
+regBankSelected: true
+
+registers:
+ - { id: 0, class: gpr }
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: TBNZW %0, 0, %bb.1
+# CHECK: B %bb.0
+body: |
+ bb.0:
+ successors: %bb.0, %bb.1
+ %0(s1) = COPY %w0
+ G_BRCOND %0(s1), %bb.1
+ G_BR %bb.0
+
+ bb.1:
+...
+
+---
+# CHECK-LABEL: name: indirect_br
+name: indirect_br
+legalized: true
+regBankSelected: true
+
+registers:
+ - { id: 0, class: gpr }
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: %0 = COPY %x0
+# CHECK: BR %0
+body: |
+ bb.0:
+ successors: %bb.0, %bb.1
+ %0(p0) = COPY %x0
+ G_BRINDIRECT %0(p0)
+
+ bb.1:
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-cbz.mir b/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
new file mode 100644
index 000000000000..2decb994b967
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
@@ -0,0 +1,108 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ define void @cbz_s32() { ret void }
+ define void @cbz_s64() { ret void }
+ define void @cbnz_s32() { ret void }
+ define void @cbnz_s64() { ret void }
+...
+
+---
+# CHECK-LABEL: name: cbz_s32
+name: cbz_s32
+legalized: true
+regBankSelected: true
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: %0 = COPY %w0
+# CHECK: CBZW %0, %bb.1
+# CHECK: B %bb.0
+body: |
+ bb.0:
+ liveins: %w0
+ successors: %bb.0, %bb.1
+
+ %0:gpr(s32) = COPY %w0
+ %1:gpr(s32) = G_CONSTANT i32 0
+ %2:gpr(s1) = G_ICMP intpred(eq), %0, %1
+ G_BRCOND %2(s1), %bb.1
+ G_BR %bb.0
+
+ bb.1:
+...
+
+---
+# CHECK-LABEL: name: cbz_s64
+name: cbz_s64
+legalized: true
+regBankSelected: true
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: %0 = COPY %x0
+# CHECK: CBZX %0, %bb.1
+# CHECK: B %bb.0
+body: |
+ bb.0:
+ liveins: %x0
+ successors: %bb.0, %bb.1
+
+ %0:gpr(s64) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 0
+ %2:gpr(s1) = G_ICMP intpred(eq), %0, %1
+ G_BRCOND %2(s1), %bb.1
+ G_BR %bb.0
+
+ bb.1:
+...
+
+---
+# CHECK-LABEL: name: cbnz_s32
+name: cbnz_s32
+legalized: true
+regBankSelected: true
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: %0 = COPY %w0
+# CHECK: CBNZW %0, %bb.1
+# CHECK: B %bb.0
+body: |
+ bb.0:
+ liveins: %w0
+ successors: %bb.0, %bb.1
+
+ %0:gpr(s32) = COPY %w0
+ %1:gpr(s32) = G_CONSTANT i32 0
+ %2:gpr(s1) = G_ICMP intpred(ne), %0, %1
+ G_BRCOND %2(s1), %bb.1
+ G_BR %bb.0
+
+ bb.1:
+...
+
+---
+# CHECK-LABEL: name: cbnz_s64
+name: cbnz_s64
+legalized: true
+regBankSelected: true
+
+# CHECK: body:
+# CHECK: bb.0:
+# CHECK: %0 = COPY %x0
+# CHECK: CBNZX %0, %bb.1
+# CHECK: B %bb.0
+body: |
+ bb.0:
+ liveins: %x0
+ successors: %bb.0, %bb.1
+
+ %0:gpr(s64) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 0
+ %2:gpr(s1) = G_ICMP intpred(ne), %0, %1
+ G_BRCOND %2(s1), %bb.1
+ G_BR %bb.0
+
+ bb.1:
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-constant.mir b/test/CodeGen/AArch64/GlobalISel/select-constant.mir
new file mode 100644
index 000000000000..1a5bac9fb7d6
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-constant.mir
@@ -0,0 +1,77 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define i32 @const_s32() { ret i32 42 }
+ define i64 @const_s64() { ret i64 1234567890123 }
+
+ define i32 @fconst_s32() { ret i32 42 }
+ define i64 @fconst_s64() { ret i64 1234567890123 }
+...
+
+---
+# CHECK-LABEL: name: const_s32
+name: const_s32
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = MOVi32imm 42
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT i32 42
+ %w0 = COPY %0(s32)
+...
+
+---
+# CHECK-LABEL: name: const_s64
+name: const_s64
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = MOVi64imm 1234567890123
+body: |
+ bb.0:
+ %0(s64) = G_CONSTANT i64 1234567890123
+ %x0 = COPY %0(s64)
+...
+
+---
+# CHECK-LABEL: name: fconst_s32
+name: fconst_s32
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: fpr }
+
+# CHECK: body:
+# CHECK: [[TMP:%[0-9]+]] = MOVi32imm 1080033280
+# CHECK: %0 = COPY [[TMP]]
+body: |
+ bb.0:
+ %0(s32) = G_FCONSTANT float 3.5
+ %s0 = COPY %0(s32)
+...
+
+---
+# CHECK-LABEL: name: fconst_s64
+name: fconst_s64
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: fpr }
+
+# CHECK: body:
+# CHECK: [[TMP:%[0-9]+]] = MOVi64imm 4607182418800017408
+# CHECK: %0 = COPY [[TMP]]
+body: |
+ bb.0:
+ %0(s64) = G_FCONSTANT double 1.0
+ %d0 = COPY %0(s64)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
new file mode 100644
index 000000000000..2f36ec8d2aaa
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
@@ -0,0 +1,69 @@
+# RUN: llc -O0 -mtriple arm64-- -run-pass=instruction-select -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @test_dbg_value(i32 %a) !dbg !5 {
+ %tmp0 = add i32 %a, %a
+ call void @llvm.dbg.value(metadata i32 %tmp0, i64 0, metadata !7, metadata !9), !dbg !10
+ ret void
+ }
+
+ define void @test_dbg_value_dead(i32 %a) !dbg !5 {
+ call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !9), !dbg !10
+ ret void
+ }
+
+ declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!3, !4}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "llvm", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+ !1 = !DIFile(filename: "test.ll", directory: "/tmp")
+ !2 = !{}
+ !3 = !{i32 2, !"Dwarf Version", i32 4}
+ !4 = !{i32 2, !"Debug Info Version", i32 3}
+ !5 = distinct !DISubprogram(name: "test_dbg_value", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+ !6 = !DISubroutineType(types: !2)
+ !7 = !DILocalVariable(name: "in", arg: 1, scope: !5, file: !1, line: 1, type: !8)
+ !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+ !9 = !DIExpression()
+ !10 = !DILocation(line: 1, column: 1, scope: !5)
+...
+
+---
+# CHECK-LABEL: name: test_dbg_value
+name: test_dbg_value
+legalized: true
+regBankSelected: true
+body: |
+ bb.0:
+ liveins: %w0
+ %0:gpr(s32) = COPY %w0
+ %1:gpr(s32) = G_ADD %0, %0
+ %w0 = COPY %1(s32)
+
+ ; CHECK: %0 = COPY %w0
+ ; CHECK-NEXT: %1 = ADDWrr %0, %0
+ ; CHECK-NEXT: %w0 = COPY %1
+ ; CHECK-NEXT: DBG_VALUE debug-use %1, debug-use _, !7, !9, debug-location !10
+
+ DBG_VALUE debug-use %1(s32), debug-use _, !7, !9, debug-location !10
+...
+
+---
+# CHECK-LABEL: name: test_dbg_value_dead
+name: test_dbg_value_dead
+legalized: true
+regBankSelected: true
+body: |
+ bb.0:
+ liveins: %w0
+ %0:gpr(s32) = COPY %w0
+
+ ; CHECK-NOT: COPY
+ ; CHECK: DBG_VALUE debug-use _, debug-use _, !7, !9, debug-location !10
+
+ DBG_VALUE debug-use %0(s32), debug-use _, !7, !9, debug-location !10
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir b/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
new file mode 100644
index 000000000000..fbb11a1c7a4c
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
@@ -0,0 +1,478 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @fptrunc() { ret void }
+ define void @fpext() { ret void }
+
+ define void @sitofp_s32_s32_fpr() { ret void }
+ define void @sitofp_s32_s64_fpr() { ret void }
+ define void @sitofp_s64_s32_fpr() { ret void }
+ define void @sitofp_s64_s64_fpr() { ret void }
+
+ define void @uitofp_s32_s32_fpr() { ret void }
+ define void @uitofp_s32_s64_fpr() { ret void }
+ define void @uitofp_s64_s32_fpr() { ret void }
+ define void @uitofp_s64_s64_fpr() { ret void }
+
+ define void @fptosi_s32_s32_gpr() { ret void }
+ define void @fptosi_s32_s64_gpr() { ret void }
+ define void @fptosi_s64_s32_gpr() { ret void }
+ define void @fptosi_s64_s64_gpr() { ret void }
+
+ define void @fptoui_s32_s32_gpr() { ret void }
+ define void @fptoui_s32_s64_gpr() { ret void }
+ define void @fptoui_s64_s32_gpr() { ret void }
+ define void @fptoui_s64_s64_gpr() { ret void }
+...
+
+---
+# CHECK-LABEL: name: fptrunc
+name: fptrunc
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK: - { id: 0, class: fpr64 }
+# CHECK: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = FCVTSDr %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s32) = G_FPTRUNC %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: fpext
+name: fpext
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK: - { id: 0, class: fpr32 }
+# CHECK: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = FCVTDSr %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s32) = COPY %s0
+ %1(s64) = G_FPEXT %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: sitofp_s32_s32_fpr
+name: sitofp_s32_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = SCVTFUWSri %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_SITOFP %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: sitofp_s32_s64_fpr
+name: sitofp_s32_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = SCVTFUXSri %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s32) = G_SITOFP %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: sitofp_s64_s32_fpr
+name: sitofp_s64_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = SCVTFUWDri %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s64) = G_SITOFP %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: sitofp_s64_s64_fpr
+name: sitofp_s64_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = SCVTFUXDri %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s64) = G_SITOFP %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: uitofp_s32_s32_fpr
+name: uitofp_s32_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = UCVTFUWSri %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_UITOFP %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: uitofp_s32_s64_fpr
+name: uitofp_s32_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = UCVTFUXSri %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s32) = G_UITOFP %0
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: uitofp_s64_s32_fpr
+name: uitofp_s64_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = UCVTFUWDri %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s64) = G_UITOFP %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: uitofp_s64_s64_fpr
+name: uitofp_s64_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = UCVTFUXDri %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s64) = G_UITOFP %0
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: fptosi_s32_s32_gpr
+name: fptosi_s32_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = FCVTZSUWSr %0
+body: |
+ bb.0:
+ liveins: %s0
+
+ %0(s32) = COPY %s0
+ %1(s32) = G_FPTOSI %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: fptosi_s32_s64_gpr
+name: fptosi_s32_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = FCVTZSUWDr %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s32) = G_FPTOSI %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: fptosi_s64_s32_gpr
+name: fptosi_s64_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = FCVTZSUXSr %0
+body: |
+ bb.0:
+ liveins: %s0
+
+ %0(s32) = COPY %s0
+ %1(s64) = G_FPTOSI %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: fptosi_s64_s64_gpr
+name: fptosi_s64_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = FCVTZSUXDr %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s64) = G_FPTOSI %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: fptoui_s32_s32_gpr
+name: fptoui_s32_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = FCVTZUUWSr %0
+body: |
+ bb.0:
+ liveins: %s0
+
+ %0(s32) = COPY %s0
+ %1(s32) = G_FPTOUI %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: fptoui_s32_s64_gpr
+name: fptoui_s32_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = FCVTZUUWDr %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s32) = G_FPTOUI %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: fptoui_s64_s32_gpr
+name: fptoui_s64_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %s0
+# CHECK: %1 = FCVTZUUXSr %0
+body: |
+ bb.0:
+ liveins: %s0
+
+ %0(s32) = COPY %s0
+ %1(s64) = G_FPTOUI %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: fptoui_s64_s64_gpr
+name: fptoui_s64_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %d0
+# CHECK: %1 = FCVTZUUXDr %0
+body: |
+ bb.0:
+ liveins: %d0
+
+ %0(s64) = COPY %d0
+ %1(s64) = G_FPTOUI %0
+ %x0 = COPY %1(s64)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir b/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
new file mode 100644
index 000000000000..2ba8b7366252
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
@@ -0,0 +1,274 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @anyext_s64_from_s32() { ret void }
+ define void @anyext_s32_from_s8() { ret void }
+
+ define void @zext_s64_from_s32() { ret void }
+ define void @zext_s32_from_s16() { ret void }
+ define void @zext_s32_from_s8() { ret void }
+ define void @zext_s16_from_s8() { ret void }
+
+ define void @sext_s64_from_s32() { ret void }
+ define void @sext_s32_from_s16() { ret void }
+ define void @sext_s32_from_s8() { ret void }
+ define void @sext_s16_from_s8() { ret void }
+...
+
+---
+# CHECK-LABEL: name: anyext_s64_from_s32
+name: anyext_s64_from_s32
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32all }
+# CHECK-NEXT: - { id: 1, class: gpr64all }
+# CHECK-NEXT: - { id: 2, class: gpr64all }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %2 = SUBREG_TO_REG 0, %0, 15
+# CHECK: %1 = COPY %2
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s64) = G_ANYEXT %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: anyext_s32_from_s8
+name: anyext_s32_from_s8
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32all }
+# CHECK-NEXT: - { id: 1, class: gpr32all }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s8) = COPY %w0
+ %1(s32) = G_ANYEXT %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: zext_s64_from_s32
+name: zext_s64_from_s32
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %2 = SUBREG_TO_REG 0, %0, 15
+# CHECK: %1 = UBFMXri %2, 0, 31
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s64) = G_ZEXT %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: zext_s32_from_s16
+name: zext_s32_from_s16
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = UBFMWri %0, 0, 15
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s16) = COPY %w0
+ %1(s32) = G_ZEXT %0
+ %w0 = COPY %1
+...
+
+---
+# CHECK-LABEL: name: zext_s32_from_s8
+name: zext_s32_from_s8
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = UBFMWri %0, 0, 7
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s8) = COPY %w0
+ %1(s32) = G_ZEXT %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: zext_s16_from_s8
+name: zext_s16_from_s8
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = UBFMWri %0, 0, 7
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s8) = COPY %w0
+ %1(s16) = G_ZEXT %0
+ %w0 = COPY %1(s16)
+...
+
+---
+# CHECK-LABEL: name: sext_s64_from_s32
+name: sext_s64_from_s32
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %2 = SUBREG_TO_REG 0, %0, 15
+# CHECK: %1 = SBFMXri %2, 0, 31
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s64) = G_SEXT %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: sext_s32_from_s16
+name: sext_s32_from_s16
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = SBFMWri %0, 0, 15
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s16) = COPY %w0
+ %1(s32) = G_SEXT %0
+ %w0 = COPY %1
+...
+
+---
+# CHECK-LABEL: name: sext_s32_from_s8
+name: sext_s32_from_s8
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = SBFMWri %0, 0, 7
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s8) = COPY %w0
+ %1(s32) = G_SEXT %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: sext_s16_from_s8
+name: sext_s16_from_s8
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = SBFMWri %0, 0, 7
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s8) = COPY %w0
+ %1(s16) = G_SEXT %0
+ %w0 = COPY %1(s16)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir b/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
new file mode 100644
index 000000000000..6537408f6d98
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
@@ -0,0 +1,150 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @inttoptr_p0_s64() { ret void }
+ define void @ptrtoint_s64_p0() { ret void }
+ define void @ptrtoint_s32_p0() { ret void }
+ define void @ptrtoint_s16_p0() { ret void }
+ define void @ptrtoint_s8_p0() { ret void }
+ define void @ptrtoint_s1_p0() { ret void }
+...
+
+---
+# CHECK-LABEL: name: inttoptr_p0_s64
+name: inttoptr_p0_s64
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64all }
+# CHECK-NEXT: - { id: 1, class: gpr64all }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %x0
+ %0(s64) = COPY %x0
+ %1(p0) = G_INTTOPTR %0
+ %x0 = COPY %1(p0)
+...
+
+---
+# CHECK-LABEL: name: ptrtoint_s64_p0
+name: ptrtoint_s64_p0
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %x0
+ %0(p0) = COPY %x0
+ %1(s64) = G_PTRTOINT %0
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: ptrtoint_s32_p0
+name: ptrtoint_s32_p0
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0.sub_32
+body: |
+ bb.0:
+ liveins: %x0
+ %0(p0) = COPY %x0
+ %1(s32) = G_PTRTOINT %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: ptrtoint_s16_p0
+name: ptrtoint_s16_p0
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0.sub_32
+body: |
+ bb.0:
+ liveins: %x0
+ %0(p0) = COPY %x0
+ %1(s16) = G_PTRTOINT %0
+ %w0 = COPY %1(s16)
+...
+
+---
+# CHECK-LABEL: name: ptrtoint_s8_p0
+name: ptrtoint_s8_p0
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0.sub_32
+body: |
+ bb.0:
+ liveins: %x0
+ %0(p0) = COPY %x0
+ %1(s8) = G_PTRTOINT %0
+ %w0 = COPY %1(s8)
+...
+
+---
+# CHECK-LABEL: name: ptrtoint_s1_p0
+name: ptrtoint_s1_p0
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %0.sub_32
+body: |
+ bb.0:
+ liveins: %x0
+ %0(p0) = COPY %x0
+ %1(s1) = G_PTRTOINT %0
+ %w0 = COPY %1(s1)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-load.mir b/test/CodeGen/AArch64/GlobalISel/select-load.mir
new file mode 100644
index 000000000000..9188e2b0c0fc
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-load.mir
@@ -0,0 +1,515 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @load_s64_gpr(i64* %addr) { ret void }
+ define void @load_s32_gpr(i32* %addr) { ret void }
+ define void @load_s16_gpr(i16* %addr) { ret void }
+ define void @load_s8_gpr(i8* %addr) { ret void }
+
+ define void @load_fi_s64_gpr() {
+ %ptr0 = alloca i64
+ ret void
+ }
+
+ define void @load_gep_128_s64_gpr(i64* %addr) { ret void }
+ define void @load_gep_512_s32_gpr(i32* %addr) { ret void }
+ define void @load_gep_64_s16_gpr(i16* %addr) { ret void }
+ define void @load_gep_1_s8_gpr(i8* %addr) { ret void }
+
+ define void @load_s64_fpr(i64* %addr) { ret void }
+ define void @load_s32_fpr(i32* %addr) { ret void }
+ define void @load_s16_fpr(i16* %addr) { ret void }
+ define void @load_s8_fpr(i8* %addr) { ret void }
+
+ define void @load_gep_8_s64_fpr(i64* %addr) { ret void }
+ define void @load_gep_16_s32_fpr(i32* %addr) { ret void }
+ define void @load_gep_64_s16_fpr(i16* %addr) { ret void }
+ define void @load_gep_32_s8_fpr(i8* %addr) { ret void }
+
+...
+
+---
+# CHECK-LABEL: name: load_s64_gpr
+name: load_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRXui %0, 0 :: (load 8 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: load_s32_gpr
+name: load_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRWui %0, 0 :: (load 4 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: load_s16_gpr
+name: load_s16_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRHHui %0, 0 :: (load 2 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
+ %w0 = COPY %1(s16)
+...
+
+---
+# CHECK-LABEL: name: load_s8_gpr
+name: load_s8_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRBBui %0, 0 :: (load 1 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
+ %w0 = COPY %1(s8)
+...
+
+---
+# CHECK-LABEL: name: load_fi_s64_gpr
+name: load_fi_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+stack:
+ - { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
+
+# CHECK: body:
+# CHECK: %1 = LDRXui %stack.0.ptr0, 0 :: (load 8)
+# CHECK: %x0 = COPY %1
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = G_FRAME_INDEX %stack.0.ptr0
+ %1(s64) = G_LOAD %0 :: (load 8)
+ %x0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: load_gep_128_s64_gpr
+name: load_gep_128_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRXui %0, 16 :: (load 8 from %ir.addr)
+# CHECK: %x0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 128
+ %2(p0) = G_GEP %0, %1
+ %3(s64) = G_LOAD %2 :: (load 8 from %ir.addr)
+ %x0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_gep_512_s32_gpr
+name: load_gep_512_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRWui %0, 128 :: (load 4 from %ir.addr)
+# CHECK: %w0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 512
+ %2(p0) = G_GEP %0, %1
+ %3(s32) = G_LOAD %2 :: (load 4 from %ir.addr)
+ %w0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_gep_64_s16_gpr
+name: load_gep_64_s16_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRHHui %0, 32 :: (load 2 from %ir.addr)
+# CHECK: %w0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 64
+ %2(p0) = G_GEP %0, %1
+ %3(s16) = G_LOAD %2 :: (load 2 from %ir.addr)
+ %w0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_gep_1_s8_gpr
+name: load_gep_1_s8_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRBBui %0, 1 :: (load 1 from %ir.addr)
+# CHECK: %w0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 1
+ %2(p0) = G_GEP %0, %1
+ %3(s8) = G_LOAD %2 :: (load 1 from %ir.addr)
+ %w0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_s64_fpr
+name: load_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRDui %0, 0 :: (load 8 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
+ %d0 = COPY %1(s64)
+...
+
+---
+# CHECK-LABEL: name: load_s32_fpr
+name: load_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRSui %0, 0 :: (load 4 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
+ %s0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: load_s16_fpr
+name: load_s16_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr16 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRHui %0, 0 :: (load 2 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
+ %h0 = COPY %1(s16)
+...
+
+---
+# CHECK-LABEL: name: load_s8_fpr
+name: load_s8_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr8 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = LDRBui %0, 0 :: (load 1 from %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
+ %b0 = COPY %1(s8)
+...
+
+---
+# CHECK-LABEL: name: load_gep_8_s64_fpr
+name: load_gep_8_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRDui %0, 1 :: (load 8 from %ir.addr)
+# CHECK: %d0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 8
+ %2(p0) = G_GEP %0, %1
+ %3(s64) = G_LOAD %2 :: (load 8 from %ir.addr)
+ %d0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_gep_16_s32_fpr
+name: load_gep_16_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRSui %0, 4 :: (load 4 from %ir.addr)
+# CHECK: %s0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 16
+ %2(p0) = G_GEP %0, %1
+ %3(s32) = G_LOAD %2 :: (load 4 from %ir.addr)
+ %s0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_gep_64_s16_fpr
+name: load_gep_64_s16_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: fpr16 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRHui %0, 32 :: (load 2 from %ir.addr)
+# CHECK: %h0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 64
+ %2(p0) = G_GEP %0, %1
+ %3(s16) = G_LOAD %2 :: (load 2 from %ir.addr)
+ %h0 = COPY %3
+...
+
+---
+# CHECK-LABEL: name: load_gep_32_s8_fpr
+name: load_gep_32_s8_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: fpr8 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %3 = LDRBui %0, 32 :: (load 1 from %ir.addr)
+# CHECK: %b0 = COPY %3
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 32
+ %2(p0) = G_GEP %0, %1
+ %3(s8) = G_LOAD %2 :: (load 1 from %ir.addr)
+ %b0 = COPY %3
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-muladd.mir b/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
new file mode 100644
index 000000000000..7d5b43bc16d5
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
@@ -0,0 +1,50 @@
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @SMADDLrrr_gpr() { ret void }
+...
+
+---
+# CHECK-LABEL: name: SMADDLrrr_gpr
+name: SMADDLrrr_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+# CHECK-NEXT: - { id: 3, class: gpr }
+# CHECK-NEXT: - { id: 4, class: gpr }
+# CHECK-NEXT: - { id: 5, class: gpr }
+# CHECK-NEXT: - { id: 6, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = COPY %w2
+# CHECK: %6 = SMADDLrrr %1, %2, %0
+body: |
+ bb.0:
+ liveins: %x0, %w1, %w2
+
+ %0(s64) = COPY %x0
+ %1(s32) = COPY %w1
+ %2(s32) = COPY %w2
+ %3(s64) = G_SEXT %1
+ %4(s64) = G_SEXT %2
+ %5(s64) = G_MUL %3, %4
+ %6(s64) = G_ADD %0, %5
+ %x0 = COPY %6
+...
+
diff --git a/test/CodeGen/AArch64/GlobalISel/select-property.mir b/test/CodeGen/AArch64/GlobalISel/select-property.mir
new file mode 100644
index 000000000000..86961ac597e1
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-property.mir
@@ -0,0 +1,21 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @selected_property() { ret void }
+...
+
+---
+# Check that we set the "selected" property.
+# CHECK-LABEL: name: selected_property
+# CHECK: legalized: true
+# CHECK-NEXT: regBankSelected: true
+# CHECK-NEXT: selected: true
+name: selected_property
+legalized: true
+regBankSelected: true
+selected: false
+body: |
+ bb.0:
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-store.mir b/test/CodeGen/AArch64/GlobalISel/select-store.mir
new file mode 100644
index 000000000000..9b8f5c566ce0
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-store.mir
@@ -0,0 +1,463 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @store_s64_gpr(i64* %addr) { ret void }
+ define void @store_s32_gpr(i32* %addr) { ret void }
+ define void @store_s16_gpr(i16* %addr) { ret void }
+ define void @store_s8_gpr(i8* %addr) { ret void }
+
+ define void @store_zero_s64_gpr(i64* %addr) { ret void }
+ define void @store_zero_s32_gpr(i32* %addr) { ret void }
+
+ define void @store_fi_s64_gpr() {
+ %ptr0 = alloca i64
+ ret void
+ }
+
+ define void @store_gep_128_s64_gpr(i64* %addr) { ret void }
+ define void @store_gep_512_s32_gpr(i32* %addr) { ret void }
+ define void @store_gep_64_s16_gpr(i16* %addr) { ret void }
+ define void @store_gep_1_s8_gpr(i8* %addr) { ret void }
+
+ define void @store_s64_fpr(i64* %addr) { ret void }
+ define void @store_s32_fpr(i32* %addr) { ret void }
+
+ define void @store_gep_8_s64_fpr(i64* %addr) { ret void }
+ define void @store_gep_8_s32_fpr(i32* %addr) { ret void }
+...
+
+---
+# CHECK-LABEL: name: store_s64_gpr
+name: store_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: STRXui %1, %0, 0 :: (store 8 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(p0) = COPY %x0
+ %1(s64) = COPY %x1
+ G_STORE %1, %0 :: (store 8 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_s32_gpr
+name: store_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: STRWui %1, %0, 0 :: (store 4 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(p0) = COPY %x0
+ %1(s32) = COPY %w1
+ G_STORE %1, %0 :: (store 4 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_s16_gpr
+name: store_s16_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: STRHHui %1, %0, 0 :: (store 2 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(p0) = COPY %x0
+ %1(s16) = COPY %w1
+ G_STORE %1, %0 :: (store 2 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_s8_gpr
+name: store_s8_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: STRBBui %1, %0, 0 :: (store 1 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(p0) = COPY %x0
+ %1(s8) = COPY %w1
+ G_STORE %1, %0 :: (store 1 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_zero_s64_gpr
+name: store_zero_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: STRXui %xzr, %0, 0 :: (store 8 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 0
+ G_STORE %1, %0 :: (store 8 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_zero_s32_gpr
+name: store_zero_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: STRWui %wzr, %0, 0 :: (store 4 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(s32) = G_CONSTANT i32 0
+ G_STORE %1, %0 :: (store 4 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_fi_s64_gpr
+name: store_fi_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+stack:
+ - { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: STRXui %0, %stack.0.ptr0, 0 :: (store 8)
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(p0) = COPY %x0
+ %1(p0) = G_FRAME_INDEX %stack.0.ptr0
+ G_STORE %0, %1 :: (store 8)
+...
+
+---
+# CHECK-LABEL: name: store_gep_128_s64_gpr
+name: store_gep_128_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: STRXui %1, %0, 16 :: (store 8 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(p0) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_CONSTANT i64 128
+ %3(p0) = G_GEP %0, %2
+ G_STORE %1, %3 :: (store 8 into %ir.addr)
+...
+
+---
+# CHECK-LABEL: name: store_gep_512_s32_gpr
+name: store_gep_512_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: STRWui %1, %0, 128 :: (store 4 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(p0) = COPY %x0
+ %1(s32) = COPY %w1
+ %2(s64) = G_CONSTANT i64 512
+ %3(p0) = G_GEP %0, %2
+ G_STORE %1, %3 :: (store 4 into %ir.addr)
+...
+
+---
+# CHECK-LABEL: name: store_gep_64_s16_gpr
+name: store_gep_64_s16_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: STRHHui %1, %0, 32 :: (store 2 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(p0) = COPY %x0
+ %1(s16) = COPY %w1
+ %2(s64) = G_CONSTANT i64 64
+ %3(p0) = G_GEP %0, %2
+ G_STORE %1, %3 :: (store 2 into %ir.addr)
+...
+
+---
+# CHECK-LABEL: name: store_gep_1_s8_gpr
+name: store_gep_1_s8_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %w1
+# CHECK: STRBBui %1, %0, 1 :: (store 1 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %w1
+
+ %0(p0) = COPY %x0
+ %1(s8) = COPY %w1
+ %2(s64) = G_CONSTANT i64 1
+ %3(p0) = G_GEP %0, %2
+ G_STORE %1, %3 :: (store 1 into %ir.addr)
+...
+
+---
+# CHECK-LABEL: name: store_s64_fpr
+name: store_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %d1
+# CHECK: STRDui %1, %0, 0 :: (store 8 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %d1
+
+ %0(p0) = COPY %x0
+ %1(s64) = COPY %d1
+ G_STORE %1, %0 :: (store 8 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_s32_fpr
+name: store_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %s1
+# CHECK: STRSui %1, %0, 0 :: (store 4 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %s1
+
+ %0(p0) = COPY %x0
+ %1(s32) = COPY %s1
+ G_STORE %1, %0 :: (store 4 into %ir.addr)
+
+...
+
+---
+# CHECK-LABEL: name: store_gep_8_s64_fpr
+name: store_gep_8_s64_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %d1
+# CHECK: STRDui %1, %0, 1 :: (store 8 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %d1
+
+ %0(p0) = COPY %x0
+ %1(s64) = COPY %d1
+ %2(s64) = G_CONSTANT i64 8
+ %3(p0) = G_GEP %0, %2
+ G_STORE %1, %3 :: (store 8 into %ir.addr)
+...
+
+---
+# CHECK-LABEL: name: store_gep_8_s32_fpr
+name: store_gep_8_s32_fpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+# CHECK-NEXT: - { id: 1, class: fpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: fpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %s1
+# CHECK: STRSui %1, %0, 2 :: (store 4 into %ir.addr)
+body: |
+ bb.0:
+ liveins: %x0, %s1
+
+ %0(p0) = COPY %x0
+ %1(s32) = COPY %s1
+ %2(s64) = G_CONSTANT i64 8
+ %3(p0) = G_GEP %0, %2
+ G_STORE %1, %3 :: (store 4 into %ir.addr)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-trunc.mir b/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
new file mode 100644
index 000000000000..fc3546e777f7
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
@@ -0,0 +1,81 @@
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @trunc_s32_s64() { ret void }
+ define void @trunc_s8_s64() { ret void }
+ define void @trunc_s1_s32() { ret void }
+...
+
+---
+# CHECK-LABEL: name: trunc_s32_s64
+name: trunc_s32_s64
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %1 = COPY %0.sub_32
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s32) = G_TRUNC %0
+ %w0 = COPY %1(s32)
+...
+
+---
+# CHECK-LABEL: name: trunc_s8_s64
+name: trunc_s8_s64
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %1 = COPY %0.sub_32
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s8) = G_TRUNC %0
+ %w0 = COPY %1(s8)
+...
+
+---
+# CHECK-LABEL: name: trunc_s1_s32
+name: trunc_s1_s32
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+
+# CHECK: body:
+# CHECK: %1 = COPY %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s1) = G_TRUNC %0
+ %w0 = COPY %1(s1)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-xor.mir b/test/CodeGen/AArch64/GlobalISel/select-xor.mir
new file mode 100644
index 000000000000..e787849c8d1b
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select-xor.mir
@@ -0,0 +1,165 @@
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @xor_s32_gpr() { ret void }
+ define void @xor_s64_gpr() { ret void }
+ define void @xor_constant_n1_s32_gpr() { ret void }
+ define void @xor_constant_n1_s64_gpr() { ret void }
+ define void @xor_constant_n1_s32_gpr_2bb() { ret void }
+
+...
+
+---
+# Check that we select a 32-bit GPR G_XOR into EORWrr on GPR32.
+# Also check that we constrain the register class of the COPY to GPR32.
+# CHECK-LABEL: name: xor_s32_gpr
+name: xor_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %1 = COPY %w1
+# CHECK: %2 = EORWrr %0, %1
+body: |
+ bb.0:
+ liveins: %w0, %w1
+
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_XOR %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as xor_s64_gpr, for 64-bit operations.
+# CHECK-LABEL: name: xor_s64_gpr
+name: xor_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr64 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %1 = COPY %x1
+# CHECK: %2 = EORXrr %0, %1
+body: |
+ bb.0:
+ liveins: %x0, %x1
+
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_XOR %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Check that we select a 32-bit GPR G_XOR into EORWrr on GPR32.
+# Also check that we constrain the register class of the COPY to GPR32.
+# CHECK-LABEL: name: xor_constant_n1_s32_gpr
+name: xor_constant_n1_s32_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %w0
+# CHECK: %2 = ORNWrr %wzr, %0
+body: |
+ bb.0:
+ liveins: %w0
+
+ %0(s32) = COPY %w0
+ %1(s32) = G_CONSTANT i32 -1
+ %2(s32) = G_XOR %0, %1
+ %w0 = COPY %2(s32)
+...
+
+---
+# Same as xor_constant_n1_s64_gpr, for 64-bit operations.
+# CHECK-LABEL: name: xor_constant_n1_s64_gpr
+name: xor_constant_n1_s64_gpr
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64 }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %0 = COPY %x0
+# CHECK: %2 = ORNXrr %xzr, %0
+body: |
+ bb.0:
+ liveins: %x0
+
+ %0(s64) = COPY %x0
+ %1(s64) = G_CONSTANT i64 -1
+ %2(s64) = G_XOR %0, %1
+ %x0 = COPY %2(s64)
+...
+
+---
+# Check that we can obtain constants from other basic blocks.
+# CHECK-LABEL: name: xor_constant_n1_s32_gpr_2bb
+name: xor_constant_n1_s32_gpr_2bb
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: B %bb.1
+# CHECK: %0 = COPY %w0
+# CHECK: %2 = ORNWrr %wzr, %0
+
+body: |
+ bb.0:
+ liveins: %w0, %w1
+ successors: %bb.1
+ %1(s32) = G_CONSTANT i32 -1
+ G_BR %bb.1
+ bb.1:
+ %0(s32) = COPY %w0
+ %2(s32) = G_XOR %0, %1
+ %w0 = COPY %2(s32)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select.mir b/test/CodeGen/AArch64/GlobalISel/select.mir
new file mode 100644
index 000000000000..8bffa085fdca
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/select.mir
@@ -0,0 +1,311 @@
+# RUN: llc -O0 -mtriple=aarch64-apple-ios -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=IOS
+# RUN: llc -O0 -mtriple=aarch64-linux-gnu -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-DEFAULT
+# RUN: llc -O0 -mtriple=aarch64-linux-gnu -relocation-model=pic -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-PIC
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @frame_index() {
+ %ptr0 = alloca i64
+ ret void
+ }
+
+ define i8* @gep(i8* %in) { ret i8* undef }
+
+ define i8* @ptr_mask(i8* %in) { ret i8* undef }
+
+ @var_local = global i8 0
+ define i8* @global_local() { ret i8* undef }
+
+ @var_got = external global i8
+ define i8* @global_got() { ret i8* undef }
+
+ define void @icmp() { ret void }
+ define void @fcmp() { ret void }
+
+ define void @phi() { ret void }
+
+ define void @select() { ret void }
+...
+
+---
+# CHECK-LABEL: name: frame_index
+name: frame_index
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr64sp }
+registers:
+ - { id: 0, class: gpr }
+
+stack:
+ - { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
+
+# CHECK: body:
+# CHECK: %0 = ADDXri %stack.0.ptr0, 0, 0
+body: |
+ bb.0:
+ %0(p0) = G_FRAME_INDEX %stack.0.ptr0
+ %x0 = COPY %0(p0)
+...
+
+---
+# CHECK-LABEL: name: gep
+name: gep
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+
+# CHECK: body:
+# CHECK: %1 = MOVi64imm 42
+# CHECK: %2 = ADDXrr %0, %1
+body: |
+ bb.0:
+ liveins: %x0
+ %0(p0) = COPY %x0
+ %1(s64) = G_CONSTANT i64 42
+ %2(p0) = G_GEP %0, %1(s64)
+ %x0 = COPY %2(p0)
+...
+
+---
+# CHECK-LABEL: name: ptr_mask
+name: ptr_mask
+legalized: true
+regBankSelected: true
+
+# CHECK: body:
+# CHECK: %1 = ANDXri %0, 8060
+body: |
+ bb.0:
+ liveins: %x0
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(p0) = G_PTR_MASK %0, 3
+ %x0 = COPY %1(p0)
+...
+
+---
+# Global defined in the same linkage unit so no GOT is needed
+# CHECK-LABEL: name: global_local
+name: global_local
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr }
+
+# CHECK: body:
+# IOS: %0 = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
+# LINUX-DEFAULT: %0 = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
+# LINUX-PIC: %0 = LOADgot target-flags(aarch64-got) @var_local
+body: |
+ bb.0:
+ %0(p0) = G_GLOBAL_VALUE @var_local
+ %x0 = COPY %0(p0)
+...
+
+---
+# CHECK-LABEL: name: global_got
+name: global_got
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr }
+
+# CHECK: body:
+# IOS: %0 = LOADgot target-flags(aarch64-got) @var_got
+# LINUX-DEFAULT: %0 = MOVaddr target-flags(aarch64-page) @var_got, target-flags(aarch64-pageoff, aarch64-nc) @var_got
+# LINUX-PIC: %0 = LOADgot target-flags(aarch64-got) @var_got
+body: |
+ bb.0:
+ %0(p0) = G_GLOBAL_VALUE @var_got
+ %x0 = COPY %0(p0)
+...
+
+---
+# CHECK-LABEL: name: icmp
+name: icmp
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr64 }
+# CHECK-NEXT: - { id: 3, class: gpr32 }
+# CHECK-NEXT: - { id: 4, class: gpr64 }
+# CHECK-NEXT: - { id: 5, class: gpr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+
+# CHECK: body:
+# CHECK: %wzr = SUBSWrr %0, %0, implicit-def %nzcv
+# CHECK: %1 = CSINCWr %wzr, %wzr, 1, implicit %nzcv
+
+# CHECK: %xzr = SUBSXrr %2, %2, implicit-def %nzcv
+# CHECK: %3 = CSINCWr %wzr, %wzr, 3, implicit %nzcv
+
+# CHECK: %xzr = SUBSXrr %4, %4, implicit-def %nzcv
+# CHECK: %5 = CSINCWr %wzr, %wzr, 0, implicit %nzcv
+
+body: |
+ bb.0:
+ liveins: %w0, %x0
+
+ %0(s32) = COPY %w0
+ %1(s1) = G_ICMP intpred(eq), %0, %0
+ %w0 = COPY %1(s1)
+
+ %2(s64) = COPY %x0
+ %3(s1) = G_ICMP intpred(uge), %2, %2
+ %w0 = COPY %3(s1)
+
+ %4(p0) = COPY %x0
+ %5(s1) = G_ICMP intpred(ne), %4, %4
+ %w0 = COPY %5(s1)
+...
+
+---
+# CHECK-LABEL: name: fcmp
+name: fcmp
+legalized: true
+regBankSelected: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: fpr64 }
+# CHECK-NEXT: - { id: 3, class: gpr32 }
+# CHECK-NEXT: - { id: 4, class: gpr32 }
+# CHECK-NEXT: - { id: 5, class: gpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: fpr }
+ - { id: 3, class: gpr }
+
+# CHECK: body:
+# CHECK: FCMPSrr %0, %0, implicit-def %nzcv
+# CHECK: [[TST_MI:%[0-9]+]] = CSINCWr %wzr, %wzr, 5, implicit %nzcv
+# CHECK: [[TST_GT:%[0-9]+]] = CSINCWr %wzr, %wzr, 13, implicit %nzcv
+# CHECK: %1 = ORRWrr [[TST_MI]], [[TST_GT]]
+
+# CHECK: FCMPDrr %2, %2, implicit-def %nzcv
+# CHECK: %3 = CSINCWr %wzr, %wzr, 4, implicit %nzcv
+
+body: |
+ bb.0:
+ liveins: %w0, %x0
+
+ %0(s32) = COPY %s0
+ %1(s1) = G_FCMP floatpred(one), %0, %0
+ %w0 = COPY %1(s1)
+
+ %2(s64) = COPY %d0
+ %3(s1) = G_FCMP floatpred(uge), %2, %2
+ %w0 = COPY %3(s1)
+
+...
+
+---
+# CHECK-LABEL: name: phi
+name: phi
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: fpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: fpr32 }
+registers:
+ - { id: 0, class: fpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: fpr }
+
+# CHECK: body:
+# CHECK: bb.1:
+# CHECK: %2 = PHI %0, %bb.0, %2, %bb.1
+
+body: |
+ bb.0:
+ liveins: %s0, %w0
+ successors: %bb.1
+ %0(s32) = COPY %s0
+ %1(s1) = COPY %w0
+
+ bb.1:
+ successors: %bb.1, %bb.2
+ %2(s32) = PHI %0, %bb.0, %2, %bb.1
+ G_BRCOND %1, %bb.1
+
+ bb.2:
+ %s0 = COPY %2
+ RET_ReallyLR implicit %s0
+...
+
+---
+# CHECK-LABEL: name: select
+name: select
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr32 }
+# CHECK-NEXT: - { id: 1, class: gpr32 }
+# CHECK-NEXT: - { id: 2, class: gpr32 }
+# CHECK-NEXT: - { id: 3, class: gpr32 }
+# CHECK-NEXT: - { id: 4, class: gpr64 }
+# CHECK-NEXT: - { id: 5, class: gpr64 }
+# CHECK-NEXT: - { id: 6, class: gpr64 }
+# CHECK-NEXT: - { id: 7, class: gpr64 }
+# CHECK-NEXT: - { id: 8, class: gpr64 }
+# CHECK-NEXT: - { id: 9, class: gpr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
+ - { id: 7, class: gpr }
+ - { id: 8, class: gpr }
+ - { id: 9, class: gpr }
+
+# CHECK: body:
+# CHECK: %wzr = ANDSWri %0, 0, implicit-def %nzcv
+# CHECK: %3 = CSELWr %1, %2, 1, implicit %nzcv
+# CHECK: %wzr = ANDSWri %0, 0, implicit-def %nzcv
+# CHECK: %6 = CSELXr %4, %5, 1, implicit %nzcv
+# CHECK: %wzr = ANDSWri %0, 0, implicit-def %nzcv
+# CHECK: %9 = CSELXr %7, %8, 1, implicit %nzcv
+body: |
+ bb.0:
+ liveins: %w0, %w1, %w2
+ %0(s1) = COPY %w0
+
+ %1(s32) = COPY %w1
+ %2(s32) = COPY %w2
+ %3(s32) = G_SELECT %0, %1, %2
+ %w0 = COPY %3(s32)
+
+ %4(s64) = COPY %x0
+ %5(s64) = COPY %x1
+ %6(s64) = G_SELECT %0, %4, %5
+ %x0 = COPY %6(s64)
+
+ %7(p0) = COPY %x0
+ %8(p0) = COPY %x1
+ %9(p0) = G_SELECT %0, %7, %8
+ %x0 = COPY %9(p0)
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/translate-gep.ll b/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
index 14dbc7c3c31a..e4c18757418d 100644
--- a/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
+++ b/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
@@ -58,8 +58,8 @@ define i32* @const_then_var(%type1* %addr, i64 %idx) {
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 272
-; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT i64 4
+; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: [[RES:%[0-9]+]](p0) = COPY [[BASE2]](p0)
@@ -74,9 +74,9 @@ define i32* @var_then_const(%type1* %addr, i64 %idx) {
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT i64 64
+; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT i64 40
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
-; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT i64 40
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: %x0 = COPY [[BASE2]](p0)
diff --git a/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll b/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
new file mode 100644
index 000000000000..3bd56fa4cebc
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple=aarch64-apple-ios -stop-after=instruction-select -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+
+define void @test_varargs_sentinel(i8* %list, i64, i64, i64, i64, i64, i64, i64,
+ i32, ...) {
+; CHECK-LABEL: name: test_varargs_sentinel
+; CHECK: fixedStack:
+; CHECK: - { id: [[VARARGS_SLOT:[0-9]+]], offset: 8
+; CHECK: body:
+; CHECK: [[LIST:%[0-9]+]] = COPY %x0
+; CHECK: [[VARARGS_AREA:%[0-9]+]] = ADDXri %fixed-stack.[[VARARGS_SLOT]], 0, 0
+; CHECK: STRXui [[VARARGS_AREA]], [[LIST]], 0 :: (store 8 into %ir.list, align 0)
+ call void @llvm.va_start(i8* %list)
+ ret void
+}
+
+declare void @llvm.va_start(i8*)
diff --git a/test/CodeGen/AArch64/GlobalISel/vastart.ll b/test/CodeGen/AArch64/GlobalISel/vastart.ll
new file mode 100644
index 000000000000..ae44e8fc5dea
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/vastart.ll
@@ -0,0 +1,13 @@
+; RUN: llc -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - -mtriple=aarch64-apple-ios7.0 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-IOS %s
+; RUN: llc -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LINUX %s
+
+
+declare void @llvm.va_start(i8*)
+define void @test_va_start(i8* %list) {
+; CHECK-LABEL: name: test_va_start
+; CHECK: [[LIST:%[0-9]+]](p0) = COPY %x0
+; CHECK-IOS: G_VASTART [[LIST]](p0) :: (store 8 into %ir.list, align 0)
+; CHECK-LINUX: G_VASTART [[LIST]](p0) :: (store 32 into %ir.list, align 0)
+ call void @llvm.va_start(i8* %list)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll b/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll
new file mode 100644
index 000000000000..3fe7e65bf245
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll
@@ -0,0 +1,68 @@
+; RUN: opt -codegenprepare < %s -S | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+%struct.match_state = type { i64, i64 }
+
+; %add is also promoted by forking an extra sext.
+define void @promoteTwoOne(i32 %i, i32 %j, i64* %P1, i64* %P2 ) {
+; CHECK-LABEL: @promoteTwoOne
+; CHECK-LABEL: entry:
+; CHECK: %[[SEXT1:.*]] = sext i32 %i to i64
+; CHECK: %[[SEXT2:.*]] = sext i32 %j to i64
+; CHECK: %add = add nsw i64 %[[SEXT1]], %[[SEXT2]]
+entry:
+ %add = add nsw i32 %i, %j
+ %s = sext i32 %add to i64
+ %addr1 = getelementptr inbounds i64, i64* %P1, i64 %s
+ store i64 %s, i64* %addr1
+ %s2 = sext i32 %i to i64
+ %addr2 = getelementptr inbounds i64, i64* %P2, i64 %s2
+ store i64 %s2, i64* %addr2
+ ret void
+}
+
+; Both %add1 and %add2 are promoted by forking extra sexts.
+define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, i64* %P1, i64* %P2) {
+; CHECK-LABEL: @promoteTwoTwo
+; CHECK-LABEL:entry:
+; CHECK: %[[SEXT1:.*]] = sext i32 %j to i64
+; CHECK: %[[SEXT2:.*]] = sext i32 %i to i64
+; CHECK: %add1 = add nsw i64 %[[SEXT1]], %[[SEXT2]]
+; CHECK: %[[SEXT3:.*]] = sext i32 %k to i64
+; CHECK: %add2 = add nsw i64 %[[SEXT1]], %[[SEXT3]]
+entry:
+ %add1 = add nsw i32 %j, %i
+ %s = sext i32 %add1 to i64
+ %addr1 = getelementptr inbounds i64, i64* %P1, i64 %s
+ store i64 %s, i64* %addr1
+ %add2 = add nsw i32 %j, %k
+ %s2 = sext i32 %add2 to i64
+ %addr2 = getelementptr inbounds i64, i64* %P2, i64 %s2
+ store i64 %s2, i64* %addr2
+ ret void
+}
+
+define i64 @promoteGEPSunk(i1 %cond, i64* %base, i32 %i) {
+; CHECK-LABEL: @promoteGEPSunk
+; CHECK-LABEL: entry:
+; CHECK: %[[SEXT:.*]] = sext i32 %i to i64
+; CHECK: %add = add nsw i64 %[[SEXT]], 1
+; CHECK: %add2 = add nsw i64 %[[SEXT]], 2
+entry:
+ %add = add nsw i32 %i, 1
+ %s = sext i32 %add to i64
+ %addr = getelementptr inbounds i64, i64* %base, i64 %s
+ %add2 = add nsw i32 %i, 2
+ %s2 = sext i32 %add2 to i64
+ %addr2 = getelementptr inbounds i64, i64* %base, i64 %s2
+ br i1 %cond, label %if.then, label %if.then2
+if.then:
+ %v = load i64, i64* %addr
+ %v2 = load i64, i64* %addr2
+ %r = add i64 %v, %v2
+ ret i64 %r
+if.then2:
+ ret i64 0;
+}
diff --git a/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
new file mode 100644
index 000000000000..0dfe04b664d0
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+lsl-fast | FileCheck %s
+
+%struct.a = type [256 x i16]
+%struct.b = type [256 x i32]
+%struct.c = type [256 x i64]
+
+declare void @foo()
+define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
+; CHECK-LABEL: halfword:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
+; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
+ %result = load i16, i16* %arrayidx86, align 2
+ call void @foo()
+ store i16 %result, i16* %arrayidx86, align 2
+ ret i16 %result
+}
+
+define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
+; CHECK-LABEL: word:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
+; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
+ %result = load i32, i32* %arrayidx86, align 4
+ call void @foo()
+ store i32 %result, i32* %arrayidx86, align 4
+ ret i32 %result
+}
+
+define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
+; CHECK-LABEL: doubleword:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
+; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
+ %result = load i64, i64* %arrayidx86, align 8
+ call void @foo()
+ store i64 %result, i64* %arrayidx86, align 8
+ ret i64 %result
+}
+
+define i64 @multi_use_non_memory(i64 %a, i64 %b) {
+; CHECK-LABEL: multi_use_non_memory:
+; CHECK: lsl [[REG1:x[0-9]+]], x0, #3
+; CHECK-NOT: cmp [[REG1]], x1, lsl # 3
+; CHECK-NEXT: lsl [[REG2:x[0-9]+]], x1, #3
+; CHECK-NEXT: cmp [[REG1]], [[REG2]]
+entry:
+ %mul1 = shl i64 %a, 3
+ %mul2 = shl i64 %b, 3
+ %cmp = icmp slt i64 %mul1, %mul2
+ br i1 %cmp, label %truebb, label %falsebb
+truebb:
+ tail call void @foo()
+ unreachable
+falsebb:
+ %cmp2 = icmp sgt i64 %mul1, %mul2
+ br i1 %cmp2, label %exitbb, label %endbb
+exitbb:
+ ret i64 %mul1
+endbb:
+ ret i64 %mul2
+}
diff --git a/test/CodeGen/AArch64/aarch64-gep-opt.ll b/test/CodeGen/AArch64/aarch64-gep-opt.ll
index 6e4a47b04406..df9534ffde09 100644
--- a/test/CodeGen/AArch64/aarch64-gep-opt.ll
+++ b/test/CodeGen/AArch64/aarch64-gep-opt.ll
@@ -96,9 +96,13 @@ exit:
; CHECK-NoAA: add i64 [[TMP:%[a-zA-Z0-9]+]], 528
; CHECK-NoAA: add i64 [[TMP]], 532
; CHECK-NoAA: if.true:
-; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 532
+; CHECK-NoAA: inttoptr
+; CHECK-NoAA: bitcast
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, {{.*}}, i64 532
; CHECK-NoAA: exit:
-; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 528
+; CHECK-NoAA: inttoptr
+; CHECK-NoAA: bitcast
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, {{.*}}, i64 528
; CHECK-UseAA-LABEL: test_GEP_across_BB(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
diff --git a/test/CodeGen/AArch64/aarch64-named-reg-w18.ll b/test/CodeGen/AArch64/aarch64-named-reg-w18.ll
new file mode 100644
index 000000000000..341c7683dbaa
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-named-reg-w18.ll
@@ -0,0 +1,14 @@
+; RUN: not llc -mtriple=aarch64-fuchsia -o - %s 2>&1 | FileCheck %s --check-prefix=ERROR
+; RUN: llc -mtriple=aarch64-fuchsia -mattr=+reserve-x18 -o - %s
+
+define void @set_w18(i32 %x) {
+entry:
+; FIXME: Include an allocatable-specific error message
+; ERROR: Invalid register name "w18".
+ tail call void @llvm.write_register.i32(metadata !0, i32 %x)
+ ret void
+}
+
+declare void @llvm.write_register.i32(metadata, i32) nounwind
+
+!0 = !{!"w18"}
diff --git a/test/CodeGen/AArch64/aarch64-named-reg-x18.ll b/test/CodeGen/AArch64/aarch64-named-reg-x18.ll
new file mode 100644
index 000000000000..eed852710ba0
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-named-reg-x18.ll
@@ -0,0 +1,14 @@
+; RUN: not llc -mtriple=aarch64-fuchsia -o - %s 2>&1 | FileCheck %s --check-prefix=ERROR
+; RUN: llc -mtriple=aarch64-fuchsia -mattr=+reserve-x18 -o - %s
+
+define void @set_x18(i64 %x) {
+entry:
+; FIXME: Include an allocatable-specific error message
+; ERROR: Invalid register name "x18".
+ tail call void @llvm.write_register.i64(metadata !0, i64 %x)
+ ret void
+}
+
+declare void @llvm.write_register.i64(metadata, i64) nounwind
+
+!0 = !{!"x18"}
diff --git a/test/CodeGen/AArch64/and-sink.ll b/test/CodeGen/AArch64/and-sink.ll
new file mode 100644
index 000000000000..91b7bd0db172
--- /dev/null
+++ b/test/CodeGen/AArch64/and-sink.ll
@@ -0,0 +1,90 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: opt -S -codegenprepare -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
+
+@A = global i32 zeroinitializer
+@B = global i32 zeroinitializer
+@C = global i32 zeroinitializer
+
+; Test that and is sunk into cmp block to form tbz.
+define i32 @and_sink1(i32 %a, i1 %c) {
+; CHECK-LABEL: and_sink1:
+; CHECK: tbz w1, #0
+; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
+; CHECK: tbnz {{w[0-9]+}}, #2
+
+; CHECK-CGP-LABEL: @and_sink1(
+; CHECK-CGP-NOT: and i32
+ %and = and i32 %a, 4
+ br i1 %c, label %bb0, label %bb2
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP: and i32
+; CHECK-CGP-NEXT: icmp eq i32
+; CHECK-CGP-NEXT: store
+; CHECK-CGP-NEXT: br
+ %cmp = icmp eq i32 %and, 0
+ store i32 0, i32* @A
+ br i1 %cmp, label %bb1, label %bb2
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+; Test that both 'and' and cmp get sunk to form tbz.
+define i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
+; CHECK-LABEL: and_sink2:
+; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
+; CHECK: tbz w1, #0
+; CHECK: str wzr, [x{{[0-9]+}}, :lo12:B]
+; CHECK: tbz w2, #0
+; CHECK: str wzr, [x{{[0-9]+}}, :lo12:C]
+; CHECK: tbnz {{w[0-9]+}}, #2
+
+; CHECK-CGP-LABEL: @and_sink2(
+; CHECK-CGP-NOT: and i32
+ %and = and i32 %a, 4
+ store i32 0, i32* @A
+ br i1 %c, label %bb0, label %bb3
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP-NOT: and i32
+; CHECK-CGP-NOT: icmp
+ %cmp = icmp eq i32 %and, 0
+ store i32 0, i32* @B
+ br i1 %c2, label %bb1, label %bb3
+bb1:
+; CHECK-CGP-LABEL: bb1:
+; CHECK-CGP: and i32
+; CHECK-CGP-NEXT: icmp eq i32
+; CHECK-CGP-NEXT: store
+; CHECK-CGP-NEXT: br
+ store i32 0, i32* @C
+ br i1 %cmp, label %bb2, label %bb0
+bb2:
+ ret i32 1
+bb3:
+ ret i32 0
+}
+
+; Test that 'and' is not sunk since cbz is a better alternative.
+define i32 @and_sink3(i32 %a) {
+; CHECK-LABEL: and_sink3:
+; CHECK: and [[REG:w[0-9]+]], w0, #0x3
+; CHECK: [[LOOP:.L[A-Z0-9_]+]]:
+; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
+; CHECK: cbz [[REG]], [[LOOP]]
+
+; CHECK-CGP-LABEL: @and_sink3(
+; CHECK-CGP-NEXT: and i32
+ %and = and i32 %a, 3
+ br label %bb0
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP-NOT: and i32
+ %cmp = icmp eq i32 %and, 0
+ store i32 0, i32* @A
+ br i1 %cmp, label %bb0, label %bb2
+bb2:
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/argument-blocks.ll b/test/CodeGen/AArch64/argument-blocks.ll
index 3169abc2dcb3..b5374ca8ced5 100644
--- a/test/CodeGen/AArch64/argument-blocks.ll
+++ b/test/CodeGen/AArch64/argument-blocks.ll
@@ -59,10 +59,10 @@ define i64 @test_hfa_ignores_gprs([7 x float], [2 x float] %in, i64, i64 %res) {
}
; [2 x float] should not be promoted to double by the Darwin varargs handling,
-; but should go in an 8-byte aligned slot.
+; but should go in an 8-byte aligned slot and can be merged as integer stores.
define void @test_varargs_stackalign() {
; CHECK-LABEL: test_varargs_stackalign:
-; CHECK-DARWINPCS: stp {{w[0-9]+}}, {{w[0-9]+}}, [sp, #16]
+; CHECK-DARWINPCS: str {{x[0-9]+}}, [sp, #16]
call void(...) @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0])
ret void
diff --git a/test/CodeGen/AArch64/arm64-abi-varargs.ll b/test/CodeGen/AArch64/arm64-abi-varargs.ll
index a29f8c4b57ab..0a7965571480 100644
--- a/test/CodeGen/AArch64/arm64-abi-varargs.ll
+++ b/test/CodeGen/AArch64/arm64-abi-varargs.ll
@@ -3,7 +3,7 @@
; rdar://13625505
; Here we have 9 fixed integer arguments the 9th argument in on stack, the
; varargs start right after at 8-byte alignment.
-define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp {
+define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp {
; CHECK-LABEL: fn9:
; 9th fixed argument
; CHECK: ldr {{w[0-9]+}}, [sp, #64]
@@ -30,7 +30,6 @@ define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7,
%a10 = alloca i32, align 4
%a11 = alloca i32, align 4
%a12 = alloca i32, align 4
- store i32 %a1, i32* %1, align 4
store i32 %a2, i32* %2, align 4
store i32 %a3, i32* %3, align 4
store i32 %a4, i32* %4, align 4
@@ -39,6 +38,7 @@ define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7,
store i32 %a7, i32* %7, align 4
store i32 %a8, i32* %8, align 4
store i32 %a9, i32* %9, align 4
+ store i32 %a9, i32* %a1
%10 = bitcast i8** %args to i8*
call void @llvm.va_start(i8* %10)
%11 = va_arg i8** %args, i32
@@ -93,7 +93,7 @@ define i32 @main() nounwind ssp {
%10 = load i32, i32* %a10, align 4
%11 = load i32, i32* %a11, align 4
%12 = load i32, i32* %a12, align 4
- call void (i32, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12)
+ call void (i32*, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(i32* %a1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12)
ret i32 0
}
diff --git a/test/CodeGen/AArch64/arm64-abi.ll b/test/CodeGen/AArch64/arm64-abi.ll
index fb52b1d99fc9..6cf0ab35b9b5 100644
--- a/test/CodeGen/AArch64/arm64-abi.ll
+++ b/test/CodeGen/AArch64/arm64-abi.ll
@@ -205,10 +205,7 @@ declare i32 @args_i32(i32, i32, i32, i32, i32, i32, i32, i32, i16 signext, i32,
define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind {
entry:
; CHECK-LABEL: test8
-; CHECK: strb {{w[0-9]+}}, [sp, #3]
-; CHECK: strb wzr, [sp, #2]
-; CHECK: strb {{w[0-9]+}}, [sp, #1]
-; CHECK: strb wzr, [sp]
+; CHECK: str w8, [sp]
; CHECK: bl
; FAST-LABEL: test8
; FAST: strb {{w[0-9]+}}, [sp]
diff --git a/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
index c57be5684ade..0009fe52e177 100644
--- a/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
+++ b/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
@@ -10,14 +10,17 @@ define zeroext i8 @fullGtU(i32 %i1, i32 %i2) {
; CHECK: fullGtU
; CHECK: adrp [[PAGE:x[0-9]+]], _block@GOTPAGE
; CHECK: ldr [[ADDR:x[0-9]+]], {{\[}}[[PAGE]], _block@GOTPAGEOFF]
+; CHECK: sxtw [[I1:x[0-9]+]], w0
+; CHECK: sxtw [[I2:x[0-9]+]], w1
; CHECK-NEXT: ldr [[BLOCKBASE:x[0-9]+]], {{\[}}[[ADDR]]]
-; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], w0, sxtw]
-; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], w1, sxtw]
+; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], [[I1]]]
+; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], [[I2]]]
+
; CHECK-NEXT: cmp [[BLOCKVAL1]], [[BLOCKVAL2]]
; CHECK-NEXT: b.ne
; Next BB
-; CHECK: add [[BLOCKBASE2:x[0-9]+]], [[BLOCKBASE]], w1, sxtw
-; CHECK-NEXT: add [[BLOCKBASE1:x[0-9]+]], [[BLOCKBASE]], w0, sxtw
+; CHECK: add [[BLOCKBASE2:x[0-9]+]], [[BLOCKBASE]], [[I2]]
+; CHECK-NEXT: add [[BLOCKBASE1:x[0-9]+]], [[BLOCKBASE]], [[I1]]
; CHECK-NEXT: ldrb [[LOADEDVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE1]], #1]
; CHECK-NEXT: ldrb [[LOADEDVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE2]], #1]
; CHECK-NEXT: cmp [[LOADEDVAL1]], [[LOADEDVAL2]]
diff --git a/test/CodeGen/AArch64/arm64-addrmode.ll b/test/CodeGen/AArch64/arm64-addrmode.ll
index e8fc4e68fcbe..6da767921632 100644
--- a/test/CodeGen/AArch64/arm64-addrmode.ll
+++ b/test/CodeGen/AArch64/arm64-addrmode.ll
@@ -112,8 +112,8 @@ define void @t10(i64 %a) {
define void @t11(i64 %a) {
; CHECK-LABEL: t11:
-; CHECK: mov w[[NUM:[0-9]+]], #19070976
-; CHECK: movk w[[NUM:[0-9]+]], #17767
+; CHECK: mov w[[NUM:[0-9]+]], #17767
+; CHECK: movk w[[NUM:[0-9]+]], #291
; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
%1 = add i64 %a, 19088743 ;0x1234567
%2 = inttoptr i64 %1 to i64*
diff --git a/test/CodeGen/AArch64/arm64-atomic.ll b/test/CodeGen/AArch64/arm64-atomic.ll
index c87103481adf..2c9a3bbaa500 100644
--- a/test/CodeGen/AArch64/arm64-atomic.ll
+++ b/test/CodeGen/AArch64/arm64-atomic.ll
@@ -9,10 +9,10 @@ define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
+; CHECK-NEXT: ret
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: [[EXITBB]]:
+; CHECK-NEXT: ret
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
@@ -27,10 +27,12 @@ define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], [[NEW]], [x0]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
+; CHECK-NEXT: mov x0, x[[ADDR]]
+; CHECK-NEXT: ret
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: [[EXITBB]]:
+; CHECK-NEXT: mov x0, x[[ADDR]]
+; CHECK-NEXT: ret
%new = load i32, i32* %pnew
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
%val = extractvalue { i32, i1 } %pair, 0
@@ -41,15 +43,15 @@ define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
; CHECK-LABEL: val_compare_and_swap_rel:
; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0
; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
-; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x[[ADDR]]
+; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x[[ADDR]]]
; CHECK-NEXT: cmp [[RESULT]], w1
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
-; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]
+; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
+; CHECK-NEXT: ret
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: [[EXITBB]]:
+; CHECK-NEXT: ret
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
@@ -64,10 +66,10 @@ define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
-; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
+; CHECK-NEXT: ret
; CHECK-NEXT: [[FAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: [[EXITBB]]:
+; CHECK-NEXT: ret
%pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
%val = extractvalue { i64, i1 } %pair, 0
ret i64 %val
diff --git a/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/test/CodeGen/AArch64/arm64-bitfield-extract.ll
index 339dbbe18fc0..91aed060677a 100644
--- a/test/CodeGen/AArch64/arm64-bitfield-extract.ll
+++ b/test/CodeGen/AArch64/arm64-bitfield-extract.ll
@@ -348,8 +348,8 @@ entry:
; CHECK-LABEL: fct16:
; CHECK: ldr [[REG1:w[0-9]+]],
; Create the constant
-; CHECK: mov [[REGCST:w[0-9]+]], #1703936
-; CHECK: movk [[REGCST]], #33120
+; CHECK: mov [[REGCST:w[0-9]+]], #33120
+; CHECK: movk [[REGCST]], #26, lsl #16
; Do the masking
; CHECK: and [[REG2:w[0-9]+]], [[REG1]], [[REGCST]]
; CHECK-NEXT: bfxil [[REG2]], w1, #16, #3
@@ -377,8 +377,8 @@ entry:
; CHECK-LABEL: fct17:
; CHECK: ldr [[REG1:x[0-9]+]],
; Create the constant
-; CHECK: mov w[[REGCST:[0-9]+]], #1703936
-; CHECK: movk w[[REGCST]], #33120
+; CHECK: mov w[[REGCST:[0-9]+]], #33120
+; CHECK: movk w[[REGCST]], #26, lsl #16
; Do the masking
; CHECK: and [[REG2:x[0-9]+]], [[REG1]], x[[REGCST]]
; CHECK-NEXT: bfxil [[REG2]], x1, #16, #3
diff --git a/test/CodeGen/AArch64/arm64-blockaddress.ll b/test/CodeGen/AArch64/arm64-blockaddress.ll
index 5df840216352..b50ffdef5ddd 100644
--- a/test/CodeGen/AArch64/arm64-blockaddress.ll
+++ b/test/CodeGen/AArch64/arm64-blockaddress.ll
@@ -15,10 +15,10 @@ entry:
; CHECK-LINUX: add {{x[0-9]+}}, [[REG]], :lo12:.Ltmp1
; CHECK-LARGE-LABEL: t:
-; CHECK-LARGE: movz [[ADDR_REG:x[0-9]+]], #:abs_g3:[[DEST_LBL:.Ltmp[0-9]+]]
-; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g2_nc:[[DEST_LBL]]
+; CHECK-LARGE: movz [[ADDR_REG:x[0-9]+]], #:abs_g0_nc:[[DEST_LBL:.Ltmp[0-9]+]]
; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g1_nc:[[DEST_LBL]]
-; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g0_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g2_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g3:[[DEST_LBL]]
%recover = alloca i64, align 8
store volatile i64 ptrtoint (i8* blockaddress(@t, %mylabel) to i64), i64* %recover, align 8
diff --git a/test/CodeGen/AArch64/arm64-builtins-linux.ll b/test/CodeGen/AArch64/arm64-builtins-linux.ll
index 64239582f230..f86ee1afe555 100644
--- a/test/CodeGen/AArch64/arm64-builtins-linux.ll
+++ b/test/CodeGen/AArch64/arm64-builtins-linux.ll
@@ -1,4 +1,6 @@
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-fuchsia | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-fuchsia -code-model=kernel | FileCheck --check-prefix=FUCHSIA-KERNEL %s
; Function Attrs: nounwind readnone
declare i8* @llvm.thread.pointer() #1
@@ -6,6 +8,8 @@ declare i8* @llvm.thread.pointer() #1
define i8* @thread_pointer() {
; CHECK: thread_pointer:
; CHECK: mrs {{x[0-9]+}}, TPIDR_EL0
+; FUCHSIA-KERNEL: thread_pointer:
+; FUCHSIA-KERNEL: mrs {{x[0-9]+}}, TPIDR_EL1
%1 = tail call i8* @llvm.thread.pointer()
ret i8* %1
}
diff --git a/test/CodeGen/AArch64/arm64-code-model-large-abs.ll b/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
index 9f50fea370e4..171941748c8f 100644
--- a/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
+++ b/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
@@ -9,10 +9,10 @@ define i8* @global_addr() {
; CHECK-LABEL: global_addr:
ret i8* @var8
; The movz/movk calculation should end up returned directly in x0.
-; CHECK: movz x0, #:abs_g3:var8
-; CHECK: movk x0, #:abs_g2_nc:var8
+; CHECK: movz x0, #:abs_g0_nc:var8
; CHECK: movk x0, #:abs_g1_nc:var8
-; CHECK: movk x0, #:abs_g0_nc:var8
+; CHECK: movk x0, #:abs_g2_nc:var8
+; CHECK: movk x0, #:abs_g3:var8
; CHECK-NEXT: ret
}
@@ -20,10 +20,10 @@ define i8 @global_i8() {
; CHECK-LABEL: global_i8:
%val = load i8, i8* @var8
ret i8 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var8
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var8
; CHECK: ldrb w0, [x[[ADDR_REG]]]
}
@@ -31,10 +31,10 @@ define i16 @global_i16() {
; CHECK-LABEL: global_i16:
%val = load i16, i16* @var16
ret i16 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var16
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var16
; CHECK: ldrh w0, [x[[ADDR_REG]]]
}
@@ -42,10 +42,10 @@ define i32 @global_i32() {
; CHECK-LABEL: global_i32:
%val = load i32, i32* @var32
ret i32 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var32
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var32
; CHECK: ldr w0, [x[[ADDR_REG]]]
}
@@ -53,10 +53,10 @@ define i64 @global_i64() {
; CHECK-LABEL: global_i64:
%val = load i64, i64* @var64
ret i64 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var64
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var64
; CHECK: ldr x0, [x[[ADDR_REG]]]
}
@@ -64,9 +64,9 @@ define <2 x i64> @constpool() {
; CHECK-LABEL: constpool:
ret <2 x i64> <i64 123456789, i64 987654321100>
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:[[CPADDR:.LCPI[0-9]+_[0-9]+]]
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:[[CPADDR]]
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:[[CPADDR:.LCPI[0-9]+_[0-9]+]]
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:[[CPADDR]]
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:[[CPADDR]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:[[CPADDR]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:[[CPADDR]]
; CHECK: ldr q0, [x[[ADDR_REG]]]
}
diff --git a/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll b/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
index c9f668f2c424..a104b65ea861 100644
--- a/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
+++ b/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
@@ -258,8 +258,7 @@ false:
; => We have one zext of %zextld left and we created one sext of %ld2.
; 2. We try to promote the operand of %sextaddza.
; a. This creates one sext of %zexta and one of %zextld
-; b. The sext of %zexta does not lead to any load, it stays here, even if it
-; could have been combine with the zext of %a.
+; b. The sext of %zexta can be combined with the zext of %a.
; c. The sext of %zextld leads to %ld and can be combined with it. This is
; done by promoting %zextld. This is fine with the current heuristic:
; neutral.
@@ -281,16 +280,14 @@ false:
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %addr1
; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
-; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, i32* %addr2
; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64
-; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_1]]
-; We do not combine this one: see 2.b.
-; OPT-NEXT: [[ZEXTA:%[a-zA-Z_0-9-]+]] = zext i8 %a to i32
-; OPT-NEXT: [[SEXTZEXTA:%[a-zA-Z_0-9-]+]] = sext i32 [[ZEXTA]] to i64
-; OPT-NEXT: [[RESZA:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTZEXTA]], [[ZEXTLD1_3]]
+; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_3]]
+; OPT-NEXT: [[ZEXTLD1_4:%[a-zA-Z_0-9-]+]] = zext i8 %a to i64
+; OPT-NEXT: [[RESZA:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ZEXTLD1_4]], [[ZEXTLD1_2]]
; OPT-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
-; OPT-NEXT: [[RESB:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTB]], [[ZEXTLD1_2]]
+; OPT-NEXT: [[RESB:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTB]], [[ZEXTLD1_1]]
;
; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32
; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
@@ -636,3 +633,24 @@ define i64 @doNotPromoteBecauseOfPairedLoad(i32* %p, i32 %cst) {
%final = add i64 %sextres, %zextLd0
ret i64 %final
}
+
+define i64 @promoteZextShl(i1 %c, i16* %P) {
+entry:
+; OPTALL-LABEL: promoteZextShl
+; OPTALL-LABEL: entry:
+; OPT: %[[LD:.*]] = load i16, i16* %P
+; OPT: %[[EXT:.*]] = zext i16 %[[LD]] to i64
+; OPT-LABEL: if.then:
+; OPT: shl nsw i64 %[[EXT]], 1
+; DISABLE-LABEL: if.then:
+; DISABLE: %r = sext i32 %shl2 to i64
+ %ld = load i16, i16* %P
+ br i1 %c, label %end, label %if.then
+if.then:
+ %z = zext i16 %ld to i32
+ %shl2 = shl nsw i32 %z, 1
+ %r = sext i32 %shl2 to i64
+ ret i64 %r
+end:
+ ret i64 0
+}
diff --git a/test/CodeGen/AArch64/arm64-const-addr.ll b/test/CodeGen/AArch64/arm64-const-addr.ll
index e55db2904489..bbb1ce4aced7 100644
--- a/test/CodeGen/AArch64/arm64-const-addr.ll
+++ b/test/CodeGen/AArch64/arm64-const-addr.ll
@@ -5,8 +5,8 @@
; Test if the constant base address gets only materialized once.
define i32 @test1() nounwind {
; CHECK-LABEL: test1
-; CHECK: mov w8, #68091904
-; CHECK-NEXT: movk w8, #49152
+; CHECK: mov w8, #49152
+; CHECK-NEXT: movk w8, #1039, lsl #16
; CHECK-NEXT: ldp w9, w10, [x8, #4]
; CHECK: ldr w8, [x8, #12]
%at = inttoptr i64 68141056 to %T*
diff --git a/test/CodeGen/AArch64/arm64-crc32.ll b/test/CodeGen/AArch64/arm64-crc32.ll
index 22111de5a3aa..df9465a6bda5 100644
--- a/test/CodeGen/AArch64/arm64-crc32.ll
+++ b/test/CodeGen/AArch64/arm64-crc32.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=arm64-eabi -mattr=+crc -o - %s | FileCheck %s
+; RUN: llc -mtriple=arm64-eabi -mcpu=cortex-a53 -mattr=+crc -o - %s | FileCheck %s
define i32 @test_crc32b(i32 %cur, i8 %next) {
; CHECK-LABEL: test_crc32b:
diff --git a/test/CodeGen/AArch64/arm64-elf-globals.ll b/test/CodeGen/AArch64/arm64-elf-globals.ll
index b1d5524aee87..92dc8179f8ea 100644
--- a/test/CodeGen/AArch64/arm64-elf-globals.ll
+++ b/test/CodeGen/AArch64/arm64-elf-globals.ll
@@ -2,6 +2,10 @@
; RUN: llc -mtriple=arm64-linux-gnu -o - %s -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST
; RUN: llc -mtriple=arm64-linux-gnu -relocation-model=pic -o - %s -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-PIC
; RUN: llc -mtriple=arm64-linux-gnu -O0 -relocation-model=pic -o - %s -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST-PIC
+; RUN: llc -mtriple=aarch64-fuchsia -code-model=kernel -o - %s -mcpu=cyclone | FileCheck %s
+; RUN: llc -mtriple=aarch64-fuchsia -code-model=kernel -o - %s -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST
+; RUN: llc -mtriple=aarch64-fuchsia -code-model=kernel -relocation-model=pic -o - %s -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-PIC
+; RUN: llc -mtriple=aarch64-fuchsia -code-model=kernel -O0 -relocation-model=pic -o - %s -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST-PIC
@var8 = external global i8, align 1
@var16 = external global i16, align 2
diff --git a/test/CodeGen/AArch64/arm64-extern-weak.ll b/test/CodeGen/AArch64/arm64-extern-weak.ll
index f00efbcea780..990782cb69a0 100644
--- a/test/CodeGen/AArch64/arm64-extern-weak.ll
+++ b/test/CodeGen/AArch64/arm64-extern-weak.ll
@@ -15,10 +15,10 @@ define i32()* @foo() {
; In the large model, the usual relocations are absolute and can
; materialise 0.
-; CHECK-LARGE: movz x0, #:abs_g3:var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:var
+; CHECK-LARGE: movz x0, #:abs_g0_nc:var
; CHECK-LARGE: movk x0, #:abs_g1_nc:var
-; CHECK-LARGE: movk x0, #:abs_g0_nc:var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:var
+; CHECK-LARGE: movk x0, #:abs_g3:var
}
@@ -33,10 +33,10 @@ define i32* @bar() {
; In the large model, the usual relocations are absolute and can
; materialise 0.
-; CHECK-LARGE: movz [[ARR_VAR:x[0-9]+]], #:abs_g3:arr_var
-; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g2_nc:arr_var
+; CHECK-LARGE: movz [[ARR_VAR:x[0-9]+]], #:abs_g0_nc:arr_var
; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g1_nc:arr_var
-; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g0_nc:arr_var
+; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g2_nc:arr_var
+; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g3:arr_var
}
@defined_weak_var = internal unnamed_addr global i32 0
@@ -46,8 +46,8 @@ define i32* @wibble() {
; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
-; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
+; CHECK-LARGE: movz x0, #:abs_g0_nc:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g0_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g3:defined_weak_var
}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
index 9dae7a6f5b69..4aa10da7243d 100644
--- a/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
+++ b/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
@@ -37,9 +37,9 @@ entry:
define signext i8 @foo3() nounwind ssp {
entry:
; CHECK-LABEL: @foo3
-; CHECK: mov x[[REG:[0-9]+]], #12343736008704
+; CHECK: mov x[[REG:[0-9]+]], #12274
; CHECK: movk x[[REG]], #29646, lsl #16
-; CHECK: movk x[[REG]], #12274
+; CHECK: movk x[[REG]], #2874, lsl #32
%0 = load i8*, i8** @pd2, align 8
%arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234
%1 = load i8, i8* %arrayidx, align 1
diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
index 071b2d0dbca4..a502800923fd 100644
--- a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
+++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
@@ -6216,11 +6216,11 @@ define <4 x i16> @test_v4i16_post_reg_ld1lane_forced_narrow(i16* %bar, i16** %pt
declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
; CHECK-LABEL: test_ld1lane_build:
-; CHECK-DAG: ld1.s { [[REG0:v[0-9]+]] }[0], [x0]
-; CHECK-DAG: ld1.s { [[REG0:v[0-9]+]] }[1], [x1]
-; CHECK-DAG: ld1.s { [[REG1:v[0-9]+]] }[0], [x2]
-; CHECK-DAG: ld1.s { [[REG1:v[0-9]+]] }[1], [x3]
-; CHECK: sub.2s v[[REGNUM2:[0-9]+]], [[REG0]], [[REG1]]
+; CHECK-DAG: ldr s[[REGNUM0:[0-9]+]], [x0]
+; CHECK-DAG: ld1.s { v[[REGNUM0:[0-9]+]] }[1], [x1]
+; CHECK-DAG: ldr s[[REGNUM1:[0-9]+]], [x2]
+; CHECK-DAG: ld1.s { v[[REGNUM1:[0-9]+]] }[1], [x3]
+; CHECK: sub.2s v[[REGNUM2:[0-9]+]], v[[REGNUM0]], v[[REGNUM1]]
; CHECK-NEXT: str d[[REGNUM2]], [x4]
; CHECK-NEXT: ret
define void @test_ld1lane_build(i32* %ptr0, i32* %ptr1, i32* %ptr2, i32* %ptr3, <2 x i32>* %out) {
@@ -6238,3 +6238,84 @@ define void @test_ld1lane_build(i32* %ptr0, i32* %ptr1, i32* %ptr2, i32* %ptr3,
store <2 x i32> %sub, <2 x i32>* %out, align 16
ret void
}
+
+; CHECK-LABEL: test_ld1lane_build_i16:
+; CHECK-DAG: ldr h[[REGNUM1:[0-9]+]], [x0]
+; CHECK-DAG: ld1.h { v[[REGNUM1]] }[1], [x1]
+; CHECK-DAG: ld1.h { v[[REGNUM1]] }[2], [x2]
+; CHECK-DAG: ld1.h { v[[REGNUM1]] }[3], [x3]
+; CHECK: sub.4h v[[REGNUM2:[0-9]+]], v[[REGNUM1]], v0
+; CHECK-NEXT: str d[[REGNUM2]], [x4]
+; CHECK-NEXT: ret
+define void @test_ld1lane_build_i16(i16* %a, i16* %b, i16* %c, i16* %d, <4 x i16> %e, <4 x i16>* %p) {
+ %ld.a = load i16, i16* %a
+ %ld.b = load i16, i16* %b
+ %ld.c = load i16, i16* %c
+ %ld.d = load i16, i16* %d
+ %v.a = insertelement <4 x i16> undef, i16 %ld.a, i64 0
+ %v.b = insertelement <4 x i16> %v.a, i16 %ld.b, i64 1
+ %v.c = insertelement <4 x i16> %v.b, i16 %ld.c, i64 2
+ %v = insertelement <4 x i16> %v.c, i16 %ld.d, i64 3
+ %sub = sub nsw <4 x i16> %v, %e
+ store <4 x i16> %sub, <4 x i16>* %p
+ ret void
+}
+
+; CHECK-LABEL: test_ld1lane_build_half:
+; CHECK-DAG: ldr h[[REGNUM1:[0-9]+]], [x0]
+; CHECK-DAG: ld1.h { v[[REGNUM1]] }[1], [x1]
+; CHECK-DAG: ld1.h { v[[REGNUM1]] }[2], [x2]
+; CHECK-DAG: ld1.h { v[[REGNUM1]] }[3], [x3]
+; CHECK-DAG: fcvtl v[[REGNUM01:[0-9]+]].4s, v0.4h
+; CHECK-DAG: fcvtl v[[REGNUM11:[0-9]+]].4s, v[[REGNUM1]].4h
+; CHECK: fsub.4s v[[REGNUM2:[0-9]+]], v[[REGNUM11]], v[[REGNUM01]]
+; CHECK-DAG: fcvtn v[[REGNUM3:[0-9]+]].4h, v[[REGNUM2]].4s
+; CHECK-NEXT: str d[[REGNUM2]], [x4]
+; CHECK-NEXT: ret
+define void @test_ld1lane_build_half(half* %a, half* %b, half* %c, half* %d, <4 x half> %e, <4 x half>* %p) {
+ %ld.a = load half, half* %a
+ %ld.b = load half, half* %b
+ %ld.c = load half, half* %c
+ %ld.d = load half, half* %d
+ %v.a = insertelement <4 x half> undef, half %ld.a, i64 0
+ %v.b = insertelement <4 x half> %v.a, half %ld.b, i64 1
+ %v.c = insertelement <4 x half> %v.b, half %ld.c, i64 2
+ %v = insertelement <4 x half> %v.c, half %ld.d, i64 3
+ %sub = fsub <4 x half> %v, %e
+ store <4 x half> %sub, <4 x half>* %p
+ ret void
+}
+
+; CHECK-LABEL: test_ld1lane_build_i8:
+; CHECK-DAG: ldr b[[REGNUM1:[0-9]+]], [x0]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[1], [x1]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[2], [x2]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[3], [x3]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[4], [x4]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[5], [x5]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[6], [x6]
+; CHECK-DAG: ld1.b { v[[REGNUM1]] }[7], [x7]
+; CHECK: sub.8b v[[REGNUM2:[0-9]+]], v[[REGNUM1]], v0
+; CHECK-NEXT: str d[[REGNUM2]], [x
+; CHECK-NEXT: ret
+define void @test_ld1lane_build_i8(i8* %a, i8* %b, i8* %c, i8* %d, i8* %e, i8* %f, i8* %g, i8* %h, <8 x i8> %v, <8 x i8>* %p) {
+ %ld.a = load i8, i8* %a
+ %ld.b = load i8, i8* %b
+ %ld.c = load i8, i8* %c
+ %ld.d = load i8, i8* %d
+ %ld.e = load i8, i8* %e
+ %ld.f = load i8, i8* %f
+ %ld.g = load i8, i8* %g
+ %ld.h = load i8, i8* %h
+ %v.a = insertelement <8 x i8> undef, i8 %ld.a, i64 0
+ %v.b = insertelement <8 x i8> %v.a, i8 %ld.b, i64 1
+ %v.c = insertelement <8 x i8> %v.b, i8 %ld.c, i64 2
+ %v.d = insertelement <8 x i8> %v.c, i8 %ld.d, i64 3
+ %v.e = insertelement <8 x i8> %v.d, i8 %ld.e, i64 4
+ %v.f = insertelement <8 x i8> %v.e, i8 %ld.f, i64 5
+ %v.g = insertelement <8 x i8> %v.f, i8 %ld.g, i64 6
+ %v1 = insertelement <8 x i8> %v.g, i8 %ld.h, i64 7
+ %sub = sub nsw <8 x i8> %v1, %v
+ store <8 x i8> %sub, <8 x i8>* %p
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm.ll b/test/CodeGen/AArch64/arm64-inline-asm.ll
index f3f359380440..f28d0ab07c5a 100644
--- a/test/CodeGen/AArch64/arm64-inline-asm.ll
+++ b/test/CodeGen/AArch64/arm64-inline-asm.ll
@@ -236,14 +236,14 @@ define void @test_zero_reg(i32* %addr) {
define <2 x float> @test_vreg_64bit(<2 x float> %in) nounwind {
; CHECK-LABEL: test_vreg_64bit:
%1 = tail call <2 x float> asm sideeffect "fadd ${0}.2s, ${1}.2s, ${1}.2s", "={v14},w"(<2 x float> %in) nounwind
- ; CHECK fadd v14.2s, v0.2s, v0.2s:
+ ; CHECK: fadd v14.2s, v0.2s, v0.2s
ret <2 x float> %1
}
define <4 x float> @test_vreg_128bit(<4 x float> %in) nounwind {
; CHECK-LABEL: test_vreg_128bit:
%1 = tail call <4 x float> asm sideeffect "fadd ${0}.4s, ${1}.4s, ${1}.4s", "={v14},w"(<4 x float> %in) nounwind
- ; CHECK fadd v14.4s, v0.4s, v0.4s:
+ ; CHECK: fadd v14.4s, v0.4s, v0.4s
ret <4 x float> %1
}
diff --git a/test/CodeGen/AArch64/arm64-memset-inline.ll b/test/CodeGen/AArch64/arm64-memset-inline.ll
index 8f22f97ca087..384aaa8541df 100644
--- a/test/CodeGen/AArch64/arm64-memset-inline.ll
+++ b/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -13,8 +13,8 @@ define void @t2() nounwind ssp {
entry:
; CHECK-LABEL: t2:
; CHECK: strh wzr, [sp, #32]
-; CHECK: stp xzr, xzr, [sp, #16]
-; CHECK: str xzr, [sp, #8]
+; CHECK: stp xzr, xzr, [sp, #8]
+; CHECK: str xzr, [sp, #24]
%buf = alloca [26 x i8], align 1
%0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false)
diff --git a/test/CodeGen/AArch64/arm64-movi.ll b/test/CodeGen/AArch64/arm64-movi.ll
index c24490665d62..8d6caa81d978 100644
--- a/test/CodeGen/AArch64/arm64-movi.ll
+++ b/test/CodeGen/AArch64/arm64-movi.ll
@@ -51,24 +51,24 @@ define i32 @movz() nounwind {
define i64 @movz_3movk() nounwind {
; CHECK-LABEL: movz_3movk:
-; CHECK: mov x0, #1407374883553280
-; CHECK-NEXT: movk x0, #4660, lsl #32
+; CHECK: mov x0, #22136
; CHECK-NEXT: movk x0, #43981, lsl #16
-; CHECK-NEXT: movk x0, #22136
+; CHECK-NEXT: movk x0, #4660, lsl #32
+; CHECK-NEXT: movk x0, #5, lsl #48
ret i64 1427392313513592
}
define i64 @movz_movk_skip1() nounwind {
; CHECK-LABEL: movz_movk_skip1:
-; CHECK: mov x0, #21474836480
-; CHECK-NEXT: movk x0, #17185, lsl #16
+; CHECK: mov x0, #1126236160
+; CHECK-NEXT: movk x0, #5, lsl #32
ret i64 22601072640
}
define i64 @movz_skip1_movk() nounwind {
; CHECK-LABEL: movz_skip1_movk:
-; CHECK: mov x0, #147695335374848
-; CHECK-NEXT: movk x0, #4660
+; CHECK: mov x0, #4660
+; CHECK-NEXT: movk x0, #34388, lsl #32
ret i64 147695335379508
}
@@ -84,8 +84,8 @@ define i64 @movn() nounwind {
define i64 @movn_skip1_movk() nounwind {
; CHECK-LABEL: movn_skip1_movk:
-; CHECK: mov x0, #-176093659137
-; CHECK-NEXT: movk x0, #4660
+; CHECK: mov x0, #-60876
+; CHECK-NEXT: movk x0, #65494, lsl #32
ret i64 -176093720012
}
@@ -195,8 +195,8 @@ define i64 @orr_movk13() nounwind {
; rdar://13944082
define i64 @g() nounwind {
; CHECK-LABEL: g:
-; CHECK: mov x0, #-281474976710656
-; CHECK: movk x0, #2
+; CHECK: mov x0, #2
+; CHECK: movk x0, #65535, lsl #48
entry:
ret i64 -281474976710654
}
diff --git a/test/CodeGen/AArch64/arm64-neon-copy.ll b/test/CodeGen/AArch64/arm64-neon-copy.ll
index 8d9a8c06aa3c..a7b95e717910 100644
--- a/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -906,7 +906,7 @@ define <8 x i8> @getl(<16 x i8> %x) #0 {
; CHECK: str q0
; CHECK-DAG: and [[MASKED_IDX:x[0-9]+]], x0, #0x7
; CHECK: bfi [[PTR:x[0-9]+]], [[MASKED_IDX]], #1, #3
-; CHECK-DAG: ld1 { v[[R:[0-9]+]].h }[0], {{\[}}[[PTR]]{{\]}}
+; CHECK-DAG: ldr h[[R:[0-9]+]], {{\[}}[[PTR]]{{\]}}
; CHECK-DAG: ins v[[R]].h[1], v0.h[1]
; CHECK-DAG: ins v[[R]].h[2], v0.h[2]
; CHECK-DAG: ins v[[R]].h[3], v0.h[3]
diff --git a/test/CodeGen/AArch64/arm64-neon-v8.1a.ll b/test/CodeGen/AArch64/arm64-neon-v8.1a.ll
index 45dba479ccc4..ae087ab8cf05 100644
--- a/test/CodeGen/AArch64/arm64-neon-v8.1a.ll
+++ b/test/CodeGen/AArch64/arm64-neon-v8.1a.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V8a
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+rdm -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V81a
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+v8.1a -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V81a
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+v8.1a -aarch64-neon-syntax=apple | FileCheck %s --check-prefix=CHECK-V81a-apple
diff --git a/test/CodeGen/AArch64/arm64-opt-remarks-lazy-bfi.ll b/test/CodeGen/AArch64/arm64-opt-remarks-lazy-bfi.ll
new file mode 100644
index 000000000000..7efb4bf6d596
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-opt-remarks-lazy-bfi.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -pass-remarks-analysis=asm-printer \
+; RUN: -pass-remarks-with-hotness=1 -asm-verbose=0 \
+; RUN: -debug-only=lazy-machine-block-freq,block-freq \
+; RUN: -debug-pass=Executions 2>&1 | FileCheck %s -check-prefix=HOTNESS
+
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -pass-remarks-analysis=asm-printer \
+; RUN: -pass-remarks-with-hotness=0 -asm-verbose=0 \
+; RUN: -debug-only=lazy-machine-block-freq,block-freq \
+; RUN: -debug-pass=Executions 2>&1 | FileCheck %s -check-prefix=NO_HOTNESS
+
+; REQUIRES: asserts
+
+
+; Verify that we don't new populate MachineBFI for passes that already use
+; MBFI, e.g. GreedyRegAlloc. (This hard-codes the previous pass to the
+; GreedyRegAlloc, please adjust accordingly.)
+
+; HOTNESS: Executing Pass 'Spill Code Placement Analysis'
+; HOTNESS-NEXT: Executing Pass 'Lazy Machine Block Frequency Analysis'
+; HOTNESS-NEXT: Executing Pass 'Machine Optimization Remark Emitter'
+; HOTNESS-NEXT: MachineBlockFrequencyInfo is available
+; HOTNESS-NEXT: Executing Pass 'Greedy Register Allocator'
+
+
+; Verify that we only populate MachineBFI on behalf of ORE when hotness is
+; requested. (This hard-codes the previous pass to the Assembly Printer,
+; please adjust accordingly.)
+
+; HOTNESS: Executing Pass 'Implement the 'patchable-function' attribute'
+; HOTNESS-NEXT: Freeing Pass 'Implement the 'patchable-function' attribute'
+; HOTNESS-NEXT: Executing Pass 'Lazy Machine Block Frequency Analysis'
+; HOTNESS-NEXT: Executing Pass 'Machine Optimization Remark Emitter'
+; HOTNESS-NEXT: Building MachineBlockFrequencyInfo on the fly
+; HOTNESS-NEXT: Building LoopInfo on the fly
+; HOTNESS-NEXT: Building DominatorTree on the fly
+; HOTNESS-NOT: Executing Pass
+; HOTNESS: block-frequency: empty_func
+; HOTNESS-NOT: Executing Pass
+; HOTNESS: Executing Pass 'AArch64 Assembly Printer'
+
+; HOTNESS: arm64-summary-remarks.ll:5:0: 1 instructions in function (hotness: 33)
+
+
+; NO_HOTNESS: Executing Pass 'Implement the 'patchable-function' attribute'
+; NO_HOTNESS-NEXT: Freeing Pass 'Implement the 'patchable-function' attribute'
+; NO_HOTNESS-NEXT: Executing Pass 'Lazy Machine Block Frequency Analysis'
+; NO_HOTNESS-NEXT: Executing Pass 'Machine Optimization Remark Emitter'
+; NO_HOTNESS-NEXT: Executing Pass 'AArch64 Assembly Printer'
+
+; NO_HOTNESS: arm64-summary-remarks.ll:5:0: 1 instructions in function{{$}}
+
+define void @empty_func() nounwind ssp !dbg !3 !prof !4 {
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1)
+!1 = !DIFile(filename: "arm64-summary-remarks.ll", directory: "")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "empty_func", scope: !1, file: !1, line: 5, scopeLine: 5, unit: !0)
+!4 = !{!"function_entry_count", i64 33}
diff --git a/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir b/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
index bda025af5193..9ad47c721c3a 100644
--- a/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
+++ b/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s 2>&1 | FileCheck %s
+# RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s
# CHECK: %1 = ANDWri {{.*}}
# CHECK-NEXT: %wzr = SUBSWri {{.*}}
--- |
diff --git a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index 255cd8e4a0d3..4df220eddbbb 100644
--- a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -346,19 +346,15 @@ entry:
; CHECK-NEXT: sub w1, w1, #1
; CHECK-NEXT: add [[SUM]], [[SUM]], [[VA_VAL]]
; CHECK-NEXT: cbnz w1, [[LOOP_LABEL]]
-; DISABLE-NEXT: b [[IFEND_LABEL]]
-;
-; DISABLE: [[ELSE_LABEL]]: ; %if.else
-; DISABLE: lsl w0, w1, #1
-;
-; CHECK: [[IFEND_LABEL]]:
+; CHECK-NEXT: [[IFEND_LABEL]]:
; Epilogue code.
; CHECK: add sp, sp, #16
; CHECK-NEXT: ret
;
-; ENABLE: [[ELSE_LABEL]]: ; %if.else
-; ENABLE-NEXT: lsl w0, w1, #1
-; ENABLE_NEXT: ret
+; CHECK: [[ELSE_LABEL]]: ; %if.else
+; CHECK-NEXT: lsl w0, w1, #1
+; DISABLE-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
define i32 @variadicFunc(i32 %cond, i32 %count, ...) #0 {
entry:
%ap = alloca i8*, align 8
diff --git a/test/CodeGen/AArch64/arm64-spill-remarks.ll b/test/CodeGen/AArch64/arm64-spill-remarks.ll
new file mode 100644
index 000000000000..bc9340352d75
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-spill-remarks.ll
@@ -0,0 +1,117 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-missed=regalloc 2>&1 | FileCheck -check-prefix=REMARK %s
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-missed=regalloc -pass-remarks-with-hotness 2>&1 | FileCheck -check-prefix=HOTNESS %s
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple 2>&1 | FileCheck -check-prefix=NO_REMARK %s
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-output=%t.yaml -pass-remarks-with-hotness 2>&1 | FileCheck -check-prefix=NO_REMARK %s
+; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
+
+; This has two nested loops, each with one value that has to be spilled and
+; then reloaded.
+
+; (loop3:)
+; REMARK: remark: /tmp/kk.c:3:20: 1 spills 1 reloads generated in loop{{$}}
+; (loop2:)
+; REMARK: remark: /tmp/kk.c:2:20: 1 spills 1 reloads generated in loop{{$}}
+; (loop:)
+; REMARK: remark: /tmp/kk.c:1:20: 2 spills 2 reloads generated in loop{{$}}
+
+; (loop3:)
+; HOTNESS: remark: /tmp/kk.c:3:20: 1 spills 1 reloads generated in loop (hotness: 300)
+; (loop2:)
+; HOTNESS: remark: /tmp/kk.c:2:20: 1 spills 1 reloads generated in loop (hotness: 30000)
+; (loop:)
+; HOTNESS: remark: /tmp/kk.c:1:20: 2 spills 2 reloads generated in loop (hotness: 300)
+
+; NO_REMARK-NOT: remark
+
+; YAML: --- !Missed
+; YAML: Pass: regalloc
+; YAML: Name: LoopSpillReload
+; YAML: DebugLoc: { File: /tmp/kk.c, Line: 3, Column: 20 }
+; YAML: Function: fpr128
+; YAML: Hotness: 300
+; YAML: Args:
+; YAML: - NumSpills: '1'
+; YAML: - String: ' spills '
+; YAML: - NumReloads: '1'
+; YAML: - String: ' reloads '
+; YAML: - String: generated in loop
+; YAML: ...
+; YAML: --- !Missed
+; YAML: Pass: regalloc
+; YAML: Name: LoopSpillReload
+; YAML: DebugLoc: { File: /tmp/kk.c, Line: 2, Column: 20 }
+; YAML: Function: fpr128
+; YAML: Hotness: 30000
+; YAML: Args:
+; YAML: - NumSpills: '1'
+; YAML: - String: ' spills '
+; YAML: - NumReloads: '1'
+; YAML: - String: ' reloads '
+; YAML: - String: generated in loop
+; YAML: ...
+; YAML: --- !Missed
+; YAML: Pass: regalloc
+; YAML: Name: LoopSpillReload
+; YAML: DebugLoc: { File: /tmp/kk.c, Line: 1, Column: 20 }
+; YAML: Function: fpr128
+; YAML: Hotness: 300
+; YAML: Args:
+; YAML: - NumSpills: '2'
+; YAML: - String: ' spills '
+; YAML: - NumReloads: '2'
+; YAML: - String: ' reloads '
+; YAML: - String: generated in loop
+; YAML: ...
+
+define void @fpr128(<4 x float>* %p) nounwind ssp !prof !11 {
+entry:
+ br label %loop, !dbg !8
+
+loop:
+ %i = phi i32 [ 0, %entry], [ %i.2, %end2 ]
+ br label %loop2, !dbg !9
+
+loop2:
+ %j = phi i32 [ 0, %loop], [ %j.2, %loop2 ]
+ call void asm sideeffect "; inlineasm", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{q31},~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
+ %j.2 = add i32 %j, 1
+ %c2 = icmp slt i32 %j.2, 100
+ br i1 %c2, label %loop2, label %end2, !prof !12
+
+end2:
+ call void asm sideeffect "; inlineasm", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{q31},~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
+ %i.2 = add i32 %i, 1
+ %c = icmp slt i32 %i.2, 100
+ br i1 %c, label %loop, label %end, !prof !12
+
+end:
+ br label %loop3
+
+loop3:
+ %k = phi i32 [ 0, %end], [ %k.2, %loop3 ]
+ call void asm sideeffect "; inlineasm", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{q31},~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
+ %k.2 = add i32 %k, 1
+ %c3 = icmp slt i32 %k.2, 100
+ br i1 %c3, label %loop3, label %end3, !dbg !10, !prof !12
+
+end3:
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 ", isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/kk.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"PIC Level", i32 2}
+!5 = !{!"clang version 3.9.0 "}
+!6 = distinct !DISubprogram(name: "success", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !2)
+!8 = !DILocation(line: 1, column: 20, scope: !6)
+!9 = !DILocation(line: 2, column: 20, scope: !6)
+!10 = !DILocation(line: 3, column: 20, scope: !6)
+!11 = !{!"function_entry_count", i64 3}
+!12 = !{!"branch_weights", i32 99, i32 1}
diff --git a/test/CodeGen/AArch64/arm64-summary-remarks.ll b/test/CodeGen/AArch64/arm64-summary-remarks.ll
new file mode 100644
index 000000000000..70e7fdffd63d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-summary-remarks.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -pass-remarks-analysis=asm-printer 2>&1 | FileCheck %s
+
+; CHECK: arm64-summary-remarks.ll:5:0: 1 instructions in function
+
+define void @empty_func() nounwind ssp !dbg !3 {
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1)
+!1 = !DIFile(filename: "arm64-summary-remarks.ll", directory: "")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "empty_func", scope: !1, file: !1, line: 5, scopeLine: 5, unit: !0)
diff --git a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
index 16ddf690fe95..375877c51798 100644
--- a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
+++ b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
@@ -99,7 +99,7 @@ define void @test_nospare([8 x i64], [8 x float], ...) {
; __stack field should point just past them.
define void @test_offsetstack([8 x i64], [2 x i64], [3 x float], ...) {
; CHECK-LABEL: test_offsetstack:
-; CHECK: sub sp, sp, #80
+; CHECK: stp {{q[0-9]+}}, {{q[0-9]+}}, [sp, #-80]!
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #96
; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
; CHECK: str [[STACK_TOP]], [x[[VAR]]]
diff --git a/test/CodeGen/AArch64/bitfield-insert.ll b/test/CodeGen/AArch64/bitfield-insert.ll
index 735be244d457..42b0051a2dd6 100644
--- a/test/CodeGen/AArch64/bitfield-insert.ll
+++ b/test/CodeGen/AArch64/bitfield-insert.ll
@@ -428,8 +428,8 @@ define i32 @test5(i32 %a) {
; BFXIL will use the same constant as the ORR, so we don't care how the constant
; is materialized (it's an equal cost either way).
; CHECK-LABEL: @test6
-; CHECK: mov [[REG:w[0-9]+]], #720896
-; CHECK: movk [[REG]], #23250
+; CHECK: mov [[REG:w[0-9]+]], #23250
+; CHECK: movk [[REG]], #11, lsl #16
; CHECK: bfxil w0, [[REG]], #0, #20
define i32 @test6(i32 %a) {
%1 = and i32 %a, 4293918720 ; 0xfff00000
@@ -440,8 +440,8 @@ define i32 @test6(i32 %a) {
; BFIs that require the same number of instruction to materialize the constant
; as the original ORR are okay.
; CHECK-LABEL: @test7
-; CHECK: mov [[REG:w[0-9]+]], #327680
-; CHECK: movk [[REG]], #44393
+; CHECK: mov [[REG:w[0-9]+]], #44393
+; CHECK: movk [[REG]], #5, lsl #16
; CHECK: bfi w0, [[REG]], #1, #19
define i32 @test7(i32 %a) {
%1 = and i32 %a, 4293918721 ; 0xfff00001
@@ -454,9 +454,9 @@ define i32 @test7(i32 %a) {
; 'and' with a 'movk', which would decrease ILP while using the same number of
; instructions.
; CHECK-LABEL: @test8
-; CHECK: mov [[REG2:x[0-9]+]], #157599529959424
+; CHECK: mov [[REG2:x[0-9]+]], #2035482624
; CHECK: and [[REG1:x[0-9]+]], x0, #0xff000000000000ff
-; CHECK: movk [[REG2]], #31059, lsl #16
+; CHECK: movk [[REG2]], #36694, lsl #32
; CHECK: orr x0, [[REG1]], [[REG2]]
define i64 @test8(i64 %a) {
%1 = and i64 %a, -72057594037927681 ; 0xff000000000000ff
diff --git a/test/CodeGen/AArch64/blockaddress.ll b/test/CodeGen/AArch64/blockaddress.ll
index 7c0755a13d0e..3683332c2c64 100644
--- a/test/CodeGen/AArch64/blockaddress.ll
+++ b/test/CodeGen/AArch64/blockaddress.ll
@@ -14,10 +14,10 @@ define void @test_blockaddress() {
; CHECK: ldr [[NEWDEST:x[0-9]+]]
; CHECK: br [[NEWDEST]]
-; CHECK-LARGE: movz [[ADDR_REG:x[0-9]+]], #:abs_g3:[[DEST_LBL:.Ltmp[0-9]+]]
-; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g2_nc:[[DEST_LBL]]
+; CHECK-LARGE: movz [[ADDR_REG:x[0-9]+]], #:abs_g0_nc:[[DEST_LBL:.Ltmp[0-9]+]]
; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g1_nc:[[DEST_LBL]]
-; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g0_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g2_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g3:[[DEST_LBL]]
; CHECK-LARGE: str [[ADDR_REG]],
; CHECK-LARGE: ldr [[NEWDEST:x[0-9]+]]
; CHECK-LARGE: br [[NEWDEST]]
diff --git a/test/CodeGen/AArch64/br-cond-not-merge.ll b/test/CodeGen/AArch64/br-cond-not-merge.ll
new file mode 100644
index 000000000000..bf21ef307905
--- /dev/null
+++ b/test/CodeGen/AArch64/br-cond-not-merge.ll
@@ -0,0 +1,94 @@
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK --check-prefix=OPT %s
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs -O0 -fast-isel=0 < %s | FileCheck --check-prefix=CHECK --check-prefix=NOOPT %s
+
+declare void @foo()
+
+; Check that the inverted or doesn't inhibit the splitting of the
+; complex conditional into three branch instructions.
+; CHECK-LABEL: test_and_not:
+; CHECK: cbz w0, [[L:\.LBB[0-9_]+]]
+; OPT: cmp w1, #2
+; NOOPT: subs w{{[0-9]+}}, w{{[0-9]+}}, #2
+; CHECK: b.lo [[L]]
+; OPT: cmp w2, #2
+; NOOPT: subs w{{[0-9]+}}, w{{[0-9]+}}, #2
+; CHECK: b.hi [[L]]
+define void @test_and_not(i32 %a, i32 %b, i32 %c) {
+bb1:
+ %cmp1 = icmp ult i32 %a, 1
+ %cmp2 = icmp ult i32 %b, 2
+ %cmp3 = icmp ult i32 %c, 3
+ %or = or i1 %cmp1, %cmp2
+ %not.or = xor i1 %or, -1
+ %and = and i1 %not.or, %cmp3
+ br i1 %and, label %bb2, label %bb3
+
+bb2:
+ ret void
+
+bb3:
+ call void @foo()
+ ret void
+}
+
+; Check that non-canonicalized xor not is handled correctly by FindMergedConditions.
+; CHECK-LABEL: test_and_not2:
+; CHECK: cbz w0, [[L:\.LBB[0-9_]+]]
+; OPT: cmp w1, #2
+; NOOPT: subs w{{[0-9]+}}, w{{[0-9]+}}, #2
+; CHECK: b.lo [[L]]
+; OPT: cmp w2, #2
+; NOOPT: subs w{{[0-9]+}}, w{{[0-9]+}}, #2
+; CHECK: b.hi [[L]]
+define void @test_and_not2(i32 %a, i32 %b, i32 %c) {
+bb1:
+ %cmp1 = icmp ult i32 %a, 1
+ %cmp2 = icmp ult i32 %b, 2
+ %cmp3 = icmp ult i32 %c, 3
+ %or = or i1 %cmp1, %cmp2
+ %not.or = xor i1 -1, %or
+ %and = and i1 %not.or, %cmp3
+ br i1 %and, label %bb2, label %bb3
+
+bb2:
+ ret void
+
+bb3:
+ call void @foo()
+ ret void
+}
+
+; Check that cmps in different blocks are handled correctly by FindMergedConditions.
+; CHECK-LABEL: test_cmp_other_block:
+; OPT: cmp w{{[0-9]+}}, #0
+; OPT: b.gt [[L:\.LBB[0-9_]+]]
+; OPT: tbz w1, #0, [[L]]
+;
+; NOOPT: subs w{{[0-9]+}}, w{{[0-9]+}}, #0
+; NOOPT: cset [[R1:w[0-9]+]], gt
+; NOOPT: str w1, [sp, #[[SLOT2:[0-9]+]]]
+; NOOPT: str [[R1]], [sp, #[[SLOT1:[0-9]+]]]
+; NOOPT: b .LBB
+; NOOPT: ldr [[R2:w[0-9]+]], [sp, #[[SLOT1]]]
+; NOOPT: tbnz [[R2]], #0, [[L:\.LBB[0-9_]+]]
+; NOOPT: ldr [[R3:w[0-9]+]], [sp, #[[SLOT2]]]
+; NOOPT: tbz [[R3]], #0, [[L]]
+define void @test_cmp_other_block(i32* %p, i1 %c) {
+entry:
+ %l = load i32, i32* %p
+ %cmp = icmp sgt i32 %l, 0
+ br label %bb1
+
+bb1:
+ %cmp.i = xor i1 %cmp, true
+ %or.cond1.i = and i1 %cmp.i, %c
+ br i1 %or.cond1.i, label %bb2, label %bb3
+
+bb2:
+ ret void
+
+bb3:
+ call void @foo()
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/branch-relax-cbz.ll b/test/CodeGen/AArch64/branch-relax-cbz.ll
index c654b94e49cf..d13c0f677bcb 100644
--- a/test/CodeGen/AArch64/branch-relax-cbz.ll
+++ b/test/CodeGen/AArch64/branch-relax-cbz.ll
@@ -6,23 +6,22 @@
; CHECK-NEXT: ; BB#1: ; %b3
; CHECK: ldr [[LOAD:w[0-9]+]]
-; CHECK: cbz [[LOAD]], [[SKIP_LONG_B:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: b [[B8:LBB[0-9]+_[0-9]+]]
-
-; CHECK-NEXT: [[SKIP_LONG_B]]:
+; CHECK: cbnz [[LOAD]], [[B8:LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: b [[B7:LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: [[B8]]: ; %b8
+; CHECK-NEXT: ret
+
; CHECK-NEXT: [[B2]]: ; %b2
; CHECK: mov w{{[0-9]+}}, #93
; CHECK: bl _extfunc
; CHECK: cbz w{{[0-9]+}}, [[B7]]
-
-; CHECK-NEXT: [[B8]]: ; %b8
-; CHECK-NEXT: ret
+; CHECK-NEXT: b [[B8]]
; CHECK-NEXT: [[B7]]: ; %b7
; CHECK: mov w{{[0-9]+}}, #13
; CHECK: b _extfunc
+
define void @split_block_no_fallthrough(i64 %val) #0 {
bb:
%c0 = icmp sgt i64 %val, -5
diff --git a/test/CodeGen/AArch64/code-model-large-abs.ll b/test/CodeGen/AArch64/code-model-large-abs.ll
index 1680815d93ea..82169acc3e56 100644
--- a/test/CodeGen/AArch64/code-model-large-abs.ll
+++ b/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -9,10 +9,10 @@ define i8* @global_addr() {
; CHECK-LABEL: global_addr:
ret i8* @var8
; The movz/movk calculation should end up returned directly in x0.
-; CHECK: movz x0, #:abs_g3:var8
-; CHECK: movk x0, #:abs_g2_nc:var8
+; CHECK: movz x0, #:abs_g0_nc:var8
; CHECK: movk x0, #:abs_g1_nc:var8
-; CHECK: movk x0, #:abs_g0_nc:var8
+; CHECK: movk x0, #:abs_g2_nc:var8
+; CHECK: movk x0, #:abs_g3:var8
; CHECK-NEXT: ret
}
@@ -20,10 +20,10 @@ define i8 @global_i8() {
; CHECK-LABEL: global_i8:
%val = load i8, i8* @var8
ret i8 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var8
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var8
; CHECK: ldrb w0, [x[[ADDR_REG]]]
}
@@ -31,10 +31,10 @@ define i16 @global_i16() {
; CHECK-LABEL: global_i16:
%val = load i16, i16* @var16
ret i16 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var16
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var16
; CHECK: ldrh w0, [x[[ADDR_REG]]]
}
@@ -42,10 +42,10 @@ define i32 @global_i32() {
; CHECK-LABEL: global_i32:
%val = load i32, i32* @var32
ret i32 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var32
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var32
; CHECK: ldr w0, [x[[ADDR_REG]]]
}
@@ -53,9 +53,9 @@ define i64 @global_i64() {
; CHECK-LABEL: global_i64:
%val = load i64, i64* @var64
ret i64 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var64
; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:var64
; CHECK: ldr x0, [x[[ADDR_REG]]]
}
diff --git a/test/CodeGen/AArch64/concat_vector-scalar-combine.ll b/test/CodeGen/AArch64/concat_vector-scalar-combine.ll
index 1c64af636cb3..3abb14241ea0 100644
--- a/test/CodeGen/AArch64/concat_vector-scalar-combine.ll
+++ b/test/CodeGen/AArch64/concat_vector-scalar-combine.ll
@@ -38,7 +38,7 @@ entry:
define <8 x i8> @test_concat_scalars_2x_v2i8_to_v8i8(i32 %x, i32 %y) #0 {
entry:
; CHECK-LABEL: test_concat_scalars_2x_v2i8_to_v8i8:
-; CHECK-NEXT: ins.h v0[0], w0
+; CHECK-NEXT: fmov s0, w0
; CHECK-NEXT: ins.h v0[1], w1
; CHECK-NEXT: ins.h v0[3], w1
; CHECK-NEXT: ret
@@ -84,7 +84,7 @@ define <8 x i8> @test_concat_scalars_mixed_2x_v2i8_to_v8i8(float %dummy, i32 %x,
entry:
; CHECK-LABEL: test_concat_scalars_mixed_2x_v2i8_to_v8i8:
; CHECK-NEXT: fmov s[[X:[0-9]+]], w0
-; CHECK-NEXT: ins.h v0[0], v[[X]][0]
+; CHECK-NEXT: mov.16b v0, v[[X]]
; CHECK-NEXT: ins.h v0[1], v1[0]
; CHECK-NEXT: ins.h v0[2], v[[X]][0]
; CHECK-NEXT: ins.h v0[3], v1[0]
@@ -99,7 +99,7 @@ entry:
define <2 x float> @test_concat_scalars_fp_2x_v2i8_to_v8i8(float %dummy, half %x, half %y) #0 {
entry:
; CHECK-LABEL: test_concat_scalars_fp_2x_v2i8_to_v8i8:
-; CHECK-NEXT: ins.h v0[0], v1[0]
+; CHECK-NEXT: mov.16b v0, v1
; CHECK-NEXT: ins.h v0[1], v2[0]
; CHECK-NEXT: ins.h v0[2], v1[0]
; CHECK-NEXT: ins.h v0[3], v2[0]
diff --git a/test/CodeGen/AArch64/cpus.ll b/test/CodeGen/AArch64/cpus.ll
index 50685cf5d343..f65144def245 100644
--- a/test/CodeGen/AArch64/cpus.ll
+++ b/test/CodeGen/AArch64/cpus.ll
@@ -12,7 +12,7 @@
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=exynos-m3 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=falkor 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=kryo 2>&1 | FileCheck %s
-; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=vulcan 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=thunderx2t99 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
; CHECK-NOT: {{.*}} is not a recognized processor for this target
diff --git a/test/CodeGen/AArch64/dag-numsignbits.ll b/test/CodeGen/AArch64/dag-numsignbits.ll
new file mode 100644
index 000000000000..217c3df77c9c
--- /dev/null
+++ b/test/CodeGen/AArch64/dag-numsignbits.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=aarch64-unknown | FileCheck %s
+
+; PR32273
+
+define void @signbits_vXi1(<4 x i16> %a1) {
+; CHECK-LABEL: signbits_vXi1
+; CHECK: cmgt v0.4h, v1.4h, v0.4h
+; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
+; CHECK-NEXT: shl v0.4h, v0.4h, #15
+; CHECK-NEXT: sshr v0.4h, v0.4h, #15
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: umov w3, v0.h[3]
+; CHECK-NEXT: mov w1, wzr
+; CHECK-NEXT: mov w2, wzr
+; CHECK-NEXT: b foo
+ %tmp3 = shufflevector <4 x i16> %a1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp5 = add <4 x i16> %tmp3, <i16 18249, i16 6701, i16 -18744, i16 -25086>
+ %tmp6 = icmp slt <4 x i16> %tmp5, <i16 1, i16 1, i16 1, i16 1>
+ %tmp7 = and <4 x i1> %tmp6, <i1 true, i1 false, i1 false, i1 true>
+ %tmp8 = sext <4 x i1> %tmp7 to <4 x i16>
+ %tmp9 = extractelement <4 x i16> %tmp8, i32 0
+ %tmp10 = zext i16 %tmp9 to i32
+ %tmp11 = extractelement <4 x i16> %tmp8, i32 1
+ %tmp12 = zext i16 %tmp11 to i32
+ %tmp13 = extractelement <4 x i16> %tmp8, i32 2
+ %tmp14 = zext i16 %tmp13 to i32
+ %tmp15 = extractelement <4 x i16> %tmp8, i32 3
+ %tmp16 = zext i16 %tmp15 to i32
+ tail call void @foo(i32 %tmp10, i32 %tmp12, i32 %tmp14, i32 %tmp16)
+ ret void
+}
+
+declare void @foo(i32, i32, i32, i32)
diff --git a/test/CodeGen/AArch64/eliminate-trunc.ll b/test/CodeGen/AArch64/eliminate-trunc.ll
index bc4ac7d71704..83730d15d7f5 100644
--- a/test/CodeGen/AArch64/eliminate-trunc.ll
+++ b/test/CodeGen/AArch64/eliminate-trunc.ll
@@ -6,7 +6,7 @@
; CHECK-NOT: add {{x[0-9]+}}, {{x[0-9]+}}, #1
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #1
; CHECK-NEXT: cmp {{w[0-9]+}}, {{w[0-9]+}}
-define void @test1_signed([8 x i8]* nocapture %a, i8* nocapture readonly %box, i8 %limit) minsize {
+define void @test1_signed([8 x i8]* nocapture %a, i8* nocapture readonly %box, i8 %limit, i64 %inv) minsize {
entry:
%conv = zext i8 %limit to i32
%cmp223 = icmp eq i8 %limit, 0
@@ -14,7 +14,7 @@ entry:
for.body4.us:
%indvars.iv = phi i64 [ 0, %for.body4.lr.ph.us ], [ %indvars.iv.next, %for.body4.us ]
- %arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %indvars.iv26, i64 %indvars.iv
+ %arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %indvars.iv, i64 %inv
%0 = load i8, i8* %arrayidx6.us, align 1
%idxprom7.us = zext i8 %0 to i64
%arrayidx8.us = getelementptr inbounds i8, i8* %box, i64 %idxprom7.us
diff --git a/test/CodeGen/AArch64/extern-weak.ll b/test/CodeGen/AArch64/extern-weak.ll
index 921009cf821d..ac2153ad8ffe 100644
--- a/test/CodeGen/AArch64/extern-weak.ll
+++ b/test/CodeGen/AArch64/extern-weak.ll
@@ -16,10 +16,10 @@ define i32()* @foo() {
; In the large model, the usual relocations are absolute and can
; materialise 0.
-; CHECK-LARGE: movz x0, #:abs_g3:var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:var
+; CHECK-LARGE: movz x0, #:abs_g0_nc:var
; CHECK-LARGE: movk x0, #:abs_g1_nc:var
-; CHECK-LARGE: movk x0, #:abs_g0_nc:var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:var
+; CHECK-LARGE: movk x0, #:abs_g3:var
}
@@ -37,10 +37,10 @@ define i32* @bar() {
; In the large model, the usual relocations are absolute and can
; materialise 0.
-; CHECK-LARGE: movz [[ADDR:x[0-9]+]], #:abs_g3:arr_var
-; CHECK-LARGE: movk [[ADDR]], #:abs_g2_nc:arr_var
+; CHECK-LARGE: movz [[ADDR:x[0-9]+]], #:abs_g0_nc:arr_var
; CHECK-LARGE: movk [[ADDR]], #:abs_g1_nc:arr_var
-; CHECK-LARGE: movk [[ADDR]], #:abs_g0_nc:arr_var
+; CHECK-LARGE: movk [[ADDR]], #:abs_g2_nc:arr_var
+; CHECK-LARGE: movk [[ADDR]], #:abs_g3:arr_var
}
@defined_weak_var = internal unnamed_addr global i32 0
@@ -51,8 +51,8 @@ define i32* @wibble() {
; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
-; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
+; CHECK-LARGE: movz x0, #:abs_g0_nc:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g0_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g3:defined_weak_var
}
diff --git a/test/CodeGen/AArch64/fast-isel-tail-call.ll b/test/CodeGen/AArch64/fast-isel-tail-call.ll
new file mode 100644
index 000000000000..0efaa3734486
--- /dev/null
+++ b/test/CodeGen/AArch64/fast-isel-tail-call.ll
@@ -0,0 +1,24 @@
+; RUN: llc -fast-isel -pass-remarks-missed=isel -pass-remarks-missed=isel \
+; RUN: -mtriple arm64-- < %s 2> %t | FileCheck %s
+; RUN: cat %t | FileCheck %s --check-prefix MISSED
+
+%struct = type { [4 x i32] }
+
+declare %struct @external()
+
+; Check that, when fastisel falls back to SDAG, we don't emit instructions
+; that follow a tail-call and would have been dropped by pure SDAGISel.
+
+; Here, the %struct extractvalue should fail FastISel.
+
+; MISSED: FastISel missed: %tmp1 = extractvalue %struct %tmp0, 0
+
+; CHECK-LABEL: test:
+; CHECK: b external
+; CHECK-NEXT: .Lfunc_end0:
+define i32 @test() nounwind {
+ %tmp0 = tail call %struct @external()
+ %tmp1 = extractvalue %struct %tmp0, 0
+ %tmp2 = extractvalue [4 x i32] %tmp1, 0
+ ret i32 %tmp2
+}
diff --git a/test/CodeGen/AArch64/fast-isel-tbz.ll b/test/CodeGen/AArch64/fast-isel-tbz.ll
index af817777143d..d6d10318bf02 100644
--- a/test/CodeGen/AArch64/fast-isel-tbz.ll
+++ b/test/CodeGen/AArch64/fast-isel-tbz.ll
@@ -278,8 +278,24 @@ bb2:
; Test that we don't fold the 'and' instruction into the compare.
define i32 @icmp_eq_and_i32(i32 %a, i1 %c) {
; CHECK-LABEL: icmp_eq_and_i32
-; CHECK: and [[REG:w[0-9]+]], w0, #0x4
+; CHECK: and [[REG:w[0-9]+]], w0, #0x3
; CHECK-NEXT: cbz [[REG]], {{LBB.+_3}}
+ %1 = and i32 %a, 3
+ br i1 %c, label %bb0, label %bb2
+bb0:
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+; Test that we do fold the 'and' instruction into the compare and
+; generate a tbz instruction for the conditional branch.
+define i32 @icmp_eq_and1bit_i32(i32 %a, i1 %c) {
+; CHECK-LABEL: icmp_eq_and1bit_i32
+; CHECK: tbz {{w[0-9]+}}, #2, {{LBB.+_3}}
%1 = and i32 %a, 4
br i1 %c, label %bb0, label %bb2
bb0:
diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll
index b4faef750a2c..d19777c4d27e 100644
--- a/test/CodeGen/AArch64/fpimm.ll
+++ b/test/CodeGen/AArch64/fpimm.ll
@@ -38,18 +38,18 @@ define void @check_double() {
}
; LARGE-LABEL: check_float2
-; LARGE: mov [[REG:w[0-9]+]], #1078525952
-; LARGE-NEXT: movk [[REG]], #4059
+; LARGE: mov [[REG:w[0-9]+]], #4059
+; LARGE-NEXT: movk [[REG]], #16457, lsl #16
; LARGE-NEXT: fmov s0, [[REG]]
define float @check_float2() {
ret float 3.14159274101257324218750
}
; LARGE-LABEL: check_double2
-; LARGE: mov [[REG:x[0-9]+]], #4614219293217783808
-; LARGE-NEXT: movk [[REG]], #8699, lsl #32
+; LARGE: mov [[REG:x[0-9]+]], #11544
; LARGE-NEXT: movk [[REG]], #21572, lsl #16
-; LARGE-NEXT: movk [[REG]], #11544
+; LARGE-NEXT: movk [[REG]], #8699, lsl #32
+; LARGE-NEXT: movk [[REG]], #16393, lsl #48
; LARGE-NEXT: fmov d0, [[REG]]
define double @check_double2() {
ret double 3.1415926535897931159979634685441851615905761718750
diff --git a/test/CodeGen/AArch64/jump-table.ll b/test/CodeGen/AArch64/jump-table.ll
index d6a7fceac84d..f71d4356be35 100644
--- a/test/CodeGen/AArch64/jump-table.ll
+++ b/test/CodeGen/AArch64/jump-table.ll
@@ -16,10 +16,10 @@ define i32 @test_jumptable(i32 %in) {
; CHECK: ldr [[DEST:x[0-9]+]], [x[[JT]], {{x[0-9]+}}, lsl #3]
; CHECK: br [[DEST]]
-; CHECK-LARGE: movz x[[JTADDR:[0-9]+]], #:abs_g3:.LJTI0_0
-; CHECK-LARGE: movk x[[JTADDR]], #:abs_g2_nc:.LJTI0_0
+; CHECK-LARGE: movz x[[JTADDR:[0-9]+]], #:abs_g0_nc:.LJTI0_0
; CHECK-LARGE: movk x[[JTADDR]], #:abs_g1_nc:.LJTI0_0
-; CHECK-LARGE: movk x[[JTADDR]], #:abs_g0_nc:.LJTI0_0
+; CHECK-LARGE: movk x[[JTADDR]], #:abs_g2_nc:.LJTI0_0
+; CHECK-LARGE: movk x[[JTADDR]], #:abs_g3:.LJTI0_0
; CHECK-LARGE: ldr [[DEST:x[0-9]+]], [x[[JTADDR]], {{x[0-9]+}}, lsl #3]
; CHECK-LARGE: br [[DEST]]
diff --git a/test/CodeGen/AArch64/large-consts.ll b/test/CodeGen/AArch64/large-consts.ll
index 6bf85e829f61..e351c3530696 100644
--- a/test/CodeGen/AArch64/large-consts.ll
+++ b/test/CodeGen/AArch64/large-consts.ll
@@ -5,10 +5,10 @@
define double @foo() {
-; CHECK: movz [[CPADDR:x[0-9]+]], #:abs_g3:.LCPI0_0 // encoding: [0bAAA01000,A,0b111AAAAA,0xd2]
-; CHECK: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b110AAAAA,0xf2]
+; CHECK: movz [[CPADDR:x[0-9]+]], #:abs_g0_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b100AAAAA,0xd2]
; CHECK: movk [[CPADDR]], #:abs_g1_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b101AAAAA,0xf2]
-; CHECK: movk [[CPADDR]], #:abs_g0_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b100AAAAA,0xf2]
+; CHECK: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b110AAAAA,0xf2]
+; CHECK: movk [[CPADDR]], #:abs_g3:.LCPI0_0 // encoding: [0bAAA01000,A,0b111AAAAA,0xf2]
ret double 3.14159
}
diff --git a/test/CodeGen/AArch64/ldst-opt-aa.mir b/test/CodeGen/AArch64/ldst-opt-aa.mir
new file mode 100644
index 000000000000..808926ae3cd1
--- /dev/null
+++ b/test/CodeGen/AArch64/ldst-opt-aa.mir
@@ -0,0 +1,30 @@
+# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-ldst-opt %s -verify-machineinstrs -o - | FileCheck %s
+--- |
+ define void @ldr_str_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) {
+ entry:
+ %0 = load i32, i32* %y, align 4
+ store i32 %0, i32* %x, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1
+ %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1
+ store i32 %1, i32* %arrayidx3, align 4
+ ret void
+ }
+
+...
+---
+# CHECK-LABEL: name: ldr_str_aa
+# CHECK: %w8, %w9 = LDPWi %x1, 0
+# CHECK: STPWi %w8, %w9, %x0, 0
+name: ldr_str_aa
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: %x0, %x1
+
+ %w8 = LDRWui %x1, 0 :: (load 4 from %ir.y)
+ STRWui killed %w8, %x0, 0 :: (store 4 into %ir.x)
+ %w9 = LDRWui killed %x1, 1 :: (load 4 from %ir.arrayidx2)
+ STRWui killed %w9, killed %x0, 1 :: (store 4 into %ir.arrayidx3)
+ RET undef %lr
+
diff --git a/test/CodeGen/AArch64/ldst-opt.mir b/test/CodeGen/AArch64/ldst-opt.mir
index 85b655b717ca..f7641d3ffd04 100644
--- a/test/CodeGen/AArch64/ldst-opt.mir
+++ b/test/CodeGen/AArch64/ldst-opt.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-ldst-opt %s -verify-machineinstrs -o - 2>&1 | FileCheck %s
+# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-ldst-opt %s -verify-machineinstrs -o - | FileCheck %s
---
name: promote-load-from-store
tracksRegLiveness: true
diff --git a/test/CodeGen/AArch64/literal_pools_float.ll b/test/CodeGen/AArch64/literal_pools_float.ll
index f5d6a17f3a11..6ad685ad7c49 100644
--- a/test/CodeGen/AArch64/literal_pools_float.ll
+++ b/test/CodeGen/AArch64/literal_pools_float.ll
@@ -15,10 +15,10 @@ define void @floating_lits() {
; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g0_nc:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g3:[[CURLIT]]
; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
; CHECK-LARGE: fadd
; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
@@ -33,10 +33,10 @@ define void @floating_lits() {
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
; CHECK-NOFP-NOT: fadd
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g0_nc:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g3:[[CURLIT]]
; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
diff --git a/test/CodeGen/AArch64/live-interval-analysis.mir b/test/CodeGen/AArch64/live-interval-analysis.mir
new file mode 100644
index 000000000000..d44300973566
--- /dev/null
+++ b/test/CodeGen/AArch64/live-interval-analysis.mir
@@ -0,0 +1,22 @@
+# RUN: llc -o /dev/null %s -mtriple=aarch64-darwin-ios -run-pass=liveintervals -debug-only=regalloc -precompute-phys-liveness 2>&1 | FileCheck %s
+# REQUIRES: asserts
+--- |
+ define void @reserved_reg_liveness() { ret void }
+...
+---
+# CHECK-LABEL: ********** INTERVALS **********
+# W29 is reserved, so we should only see dead defs
+# CHECK-DAG: W29 [0B,0d:{{[0-9]+}})[32r,32d:{{[0-9]+}})[64r,64d:{{[0-9]+}})
+# For normal registers like x28 we should see the full intervals
+# CHECK-DAG: W28 [0B,16r:{{[0-9]+}})[32r,48r:{{[0-9]+}})[48r,48d:{{[0-9]+}})
+# CHECK: # End machine code for function reserved_reg_liveness.
+name: reserved_reg_liveness
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %x28_fp
+ %6 : xseqpairsclass = COPY %x28_fp
+ %x28_fp = COPY %6
+ %x28 = COPY %x28
+ %fp = COPY %fp
+...
diff --git a/test/CodeGen/AArch64/load-combine-big-endian.ll b/test/CodeGen/AArch64/load-combine-big-endian.ll
new file mode 100644
index 000000000000..918ceaeb1b4f
--- /dev/null
+++ b/test/CodeGen/AArch64/load-combine-big-endian.ll
@@ -0,0 +1,584 @@
+; RUN: llc < %s -mtriple=arm64eb-unknown | FileCheck %s
+
+; i8* p; // p is 4 byte aligned
+; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_big_endian(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_big_endian:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 4
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ret i32 %tmp17
+}
+
+; i8* p; // p is 4 byte aligned
+; ((i32) (((i16) p[0] << 8) | (i16) p[1]) << 16) | (i32) (((i16) p[3] << 8) | (i16) p[4])
+define i32 @load_i32_by_i16_by_i8_big_endian(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16_by_i8_big_endian:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 4
+ %tmp2 = zext i8 %tmp1 to i16
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i16
+ %tmp6 = shl nuw nsw i16 %tmp2, 8
+ %tmp7 = or i16 %tmp6, %tmp5
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i16
+ %tmp11 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp12 = load i8, i8* %tmp11, align 1
+ %tmp13 = zext i8 %tmp12 to i16
+ %tmp14 = shl nuw nsw i16 %tmp10, 8
+ %tmp15 = or i16 %tmp14, %tmp13
+ %tmp16 = zext i16 %tmp7 to i32
+ %tmp17 = zext i16 %tmp15 to i32
+ %tmp18 = shl nuw nsw i32 %tmp16, 16
+ %tmp19 = or i32 %tmp18, %tmp17
+ ret i32 %tmp19
+}
+
+; i16* p; // p is 4 byte aligned
+; ((i32) p[0] << 16) | (i32) p[1]
+define i32 @load_i32_by_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = zext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i16* p_16; // p_16 is 4 byte aligned
+; i8* p_8 = (i8*) p_16;
+; (i32) (p_16[0] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i16_i8(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16_i8:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = bitcast i32* %arg to i8*
+ %tmp2 = load i16, i16* %tmp, align 4
+ %tmp3 = zext i16 %tmp2 to i32
+ %tmp4 = shl nuw nsw i32 %tmp3, 16
+ %tmp5 = getelementptr inbounds i8, i8* %tmp1, i32 2
+ %tmp6 = load i8, i8* %tmp5, align 1
+ %tmp7 = zext i8 %tmp6 to i32
+ %tmp8 = shl nuw nsw i32 %tmp7, 8
+ %tmp9 = getelementptr inbounds i8, i8* %tmp1, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = or i32 %tmp8, %tmp11
+ %tmp13 = or i32 %tmp12, %tmp4
+ ret i32 %tmp13
+}
+
+; i8* p; // p is 8 byte aligned
+; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
+define i64 @load_i64_by_i8_bswap(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8_bswap:
+; CHECK: ldr x8, [x0]
+; CHECK-NEXT: rev x0, x8
+; CHECK-NEXT: ret
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i64
+ %tmp6 = shl nuw nsw i64 %tmp5, 8
+ %tmp7 = or i64 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i64
+ %tmp11 = shl nuw nsw i64 %tmp10, 16
+ %tmp12 = or i64 %tmp7, %tmp11
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i64
+ %tmp16 = shl nuw nsw i64 %tmp15, 24
+ %tmp17 = or i64 %tmp12, %tmp16
+ %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp19 = load i8, i8* %tmp18, align 1
+ %tmp20 = zext i8 %tmp19 to i64
+ %tmp21 = shl nuw nsw i64 %tmp20, 32
+ %tmp22 = or i64 %tmp17, %tmp21
+ %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp24 = load i8, i8* %tmp23, align 1
+ %tmp25 = zext i8 %tmp24 to i64
+ %tmp26 = shl nuw nsw i64 %tmp25, 40
+ %tmp27 = or i64 %tmp22, %tmp26
+ %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp29 = load i8, i8* %tmp28, align 1
+ %tmp30 = zext i8 %tmp29 to i64
+ %tmp31 = shl nuw nsw i64 %tmp30, 48
+ %tmp32 = or i64 %tmp27, %tmp31
+ %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp34 = load i8, i8* %tmp33, align 1
+ %tmp35 = zext i8 %tmp34 to i64
+ %tmp36 = shl nuw i64 %tmp35, 56
+ %tmp37 = or i64 %tmp32, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p is 8 byte aligned
+; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
+define i64 @load_i64_by_i8(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = shl nuw i64 %tmp2, 56
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i64
+ %tmp7 = shl nuw nsw i64 %tmp6, 48
+ %tmp8 = or i64 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i64
+ %tmp12 = shl nuw nsw i64 %tmp11, 40
+ %tmp13 = or i64 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i64
+ %tmp17 = shl nuw nsw i64 %tmp16, 32
+ %tmp18 = or i64 %tmp13, %tmp17
+ %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp20 = load i8, i8* %tmp19, align 1
+ %tmp21 = zext i8 %tmp20 to i64
+ %tmp22 = shl nuw nsw i64 %tmp21, 24
+ %tmp23 = or i64 %tmp18, %tmp22
+ %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp25 = load i8, i8* %tmp24, align 1
+ %tmp26 = zext i8 %tmp25 to i64
+ %tmp27 = shl nuw nsw i64 %tmp26, 16
+ %tmp28 = or i64 %tmp23, %tmp27
+ %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp30 = load i8, i8* %tmp29, align 1
+ %tmp31 = zext i8 %tmp30 to i64
+ %tmp32 = shl nuw nsw i64 %tmp31, 8
+ %tmp33 = or i64 %tmp28, %tmp32
+ %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i64
+ %tmp37 = or i64 %tmp33, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
+define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK: ldur w8, [x0, #1]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
+define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset:
+; CHECK: ldur w8, [x0, #-4]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
+define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; CHECK: ldur w0, [x0, #1]
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
+define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
+; CHECK: ldur w0, [x0, #-4]
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+declare i16 @llvm.bswap.i16(i16)
+
+; i16* p; // p is 4 byte aligned
+; (i32) bswap(p[0]) | (i32) bswap(p[1] << 16)
+define i32 @load_i32_by_bswap_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_bswap_i16:
+; CHECK: ldr w8, [x0]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
+ %tmp2 = zext i16 %tmp11 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
+ %tmp5 = zext i16 %tmp41 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 16
+ %tmp7 = or i32 %tmp6, %tmp2
+ ret i32 %tmp7
+}
+
+; i16* p; // p is 4 byte aligned
+; (i32) p[1] | (sext(p[0] << 16) to i32)
+define i32 @load_i32_by_sext_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_sext_i16:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp2 = sext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = zext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
+define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK: add x8, x0, w1, uxtw
+; CHECK-NEXT: ldr w8, [x8, #12]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+ %tmp = add nuw nsw i32 %i, 3
+ %tmp2 = add nuw nsw i32 %i, 2
+ %tmp3 = add nuw nsw i32 %i, 1
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = zext i32 %i to i64
+ %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
+ %tmp7 = load i8, i8* %tmp6, align 4
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = zext i32 %tmp3 to i64
+ %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
+ %tmp11 = load i8, i8* %tmp10, align 1
+ %tmp12 = zext i8 %tmp11 to i32
+ %tmp13 = shl nuw nsw i32 %tmp12, 8
+ %tmp14 = or i32 %tmp13, %tmp8
+ %tmp15 = zext i32 %tmp2 to i64
+ %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
+ %tmp17 = load i8, i8* %tmp16, align 1
+ %tmp18 = zext i8 %tmp17 to i32
+ %tmp19 = shl nuw nsw i32 %tmp18, 16
+ %tmp20 = or i32 %tmp14, %tmp19
+ %tmp21 = zext i32 %tmp to i64
+ %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
+ %tmp23 = load i8, i8* %tmp22, align 1
+ %tmp24 = zext i8 %tmp23 to i32
+ %tmp25 = shl nuw i32 %tmp24, 24
+ %tmp26 = or i32 %tmp20, %tmp25
+ ret i32 %tmp26
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
+define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK: add x8, x0, w1, uxtw
+; CHECK-NEXT: ldur w8, [x8, #13]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+ %tmp = add nuw nsw i32 %i, 4
+ %tmp2 = add nuw nsw i32 %i, 3
+ %tmp3 = add nuw nsw i32 %i, 2
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = add nuw nsw i32 %i, 1
+ %tmp27 = zext i32 %tmp5 to i64
+ %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
+ %tmp29 = load i8, i8* %tmp28, align 4
+ %tmp30 = zext i8 %tmp29 to i32
+ %tmp31 = zext i32 %tmp3 to i64
+ %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
+ %tmp33 = load i8, i8* %tmp32, align 1
+ %tmp34 = zext i8 %tmp33 to i32
+ %tmp35 = shl nuw nsw i32 %tmp34, 8
+ %tmp36 = or i32 %tmp35, %tmp30
+ %tmp37 = zext i32 %tmp2 to i64
+ %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
+ %tmp39 = load i8, i8* %tmp38, align 1
+ %tmp40 = zext i8 %tmp39 to i32
+ %tmp41 = shl nuw nsw i32 %tmp40, 16
+ %tmp42 = or i32 %tmp36, %tmp41
+ %tmp43 = zext i32 %tmp to i64
+ %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
+ %tmp45 = load i8, i8* %tmp44, align 1
+ %tmp46 = zext i8 %tmp45 to i32
+ %tmp47 = shl nuw i32 %tmp46, 24
+ %tmp48 = or i32 %tmp42, %tmp47
+ ret i32 %tmp48
+}
+; i8* p; // p is 2 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8)
+define i32 @zext_load_i32_by_i8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8:
+; CHECK: ldrb w8, [x0]
+; CHECK-NEXT: ldrb w9, [x0, #1]
+; CHECK-NEXT: bfi w8, w9, #8, #8
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 8) | ((i32) p[1] << 16)
+define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK: ldrb w8, [x0]
+; CHECK-NEXT: ldrb w9, [x0, #1]
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: bfi w0, w9, #16, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 16) | ((i32) p[1] << 24)
+define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK: ldrb w8, [x0]
+; CHECK-NEXT: ldrb w9, [x0, #1]
+; CHECK-NEXT: lsl w0, w8, #16
+; CHECK-NEXT: bfi w0, w9, #24, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+; i8* p; // p is 2 byte aligned
+; (i32) p[1] | ((i32) p[0] << 8)
+define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK: ldrb w8, [x0, #1]
+; CHECK-NEXT: ldrb w9, [x0]
+; CHECK-NEXT: bfi w8, w9, #8, #8
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 8) | ((i32) p[0] << 16)
+define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK: ldrb w8, [x0, #1]
+; CHECK-NEXT: ldrb w9, [x0]
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: bfi w0, w9, #16, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 16) | ((i32) p[0] << 24)
+define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK: ldrb w8, [x0, #1]
+; CHECK-NEXT: ldrb w9, [x0]
+; CHECK-NEXT: lsl w0, w8, #16
+; CHECK-NEXT: bfi w0, w9, #24, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p;
+; i16* p1.i16 = (i16*) p;
+; (p1.i16[0] << 8) | ((i16) p[2])
+;
+; This is essentialy a i16 load from p[1], but we don't fold the pattern now
+; because in the original DAG we don't have p[1] address available
+define i16 @load_i16_from_nonzero_offset(i8* %p) {
+; CHECK-LABEL: load_i16_from_nonzero_offset:
+; CHECK: ldrh w8, [x0]
+; CHECK-NEXT: ldrb w0, [x0, #2]
+; CHECK-NEXT: bfi w0, w8, #8, #24
+; CHECK-NEXT: ret
+
+ %p1.i16 = bitcast i8* %p to i16*
+ %p2.i8 = getelementptr i8, i8* %p, i64 2
+ %v1 = load i16, i16* %p1.i16
+ %v2.i8 = load i8, i8* %p2.i8
+ %v2 = zext i8 %v2.i8 to i16
+ %v1.shl = shl i16 %v1, 8
+ %res = or i16 %v1.shl, %v2
+ ret i16 %res
+}
diff --git a/test/CodeGen/AArch64/load-combine.ll b/test/CodeGen/AArch64/load-combine.ll
new file mode 100644
index 000000000000..f0ed40357f12
--- /dev/null
+++ b/test/CodeGen/AArch64/load-combine.ll
@@ -0,0 +1,548 @@
+; RUN: llc < %s -mtriple=arm64-unknown | FileCheck %s
+
+; i8* p; // p is 1 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i8_unaligned(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_unaligned:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p is 4 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i8_aligned(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_aligned:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p is 4 byte aligned
+; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_bswap:
+; CHECK: ldr w8, [x0]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 4
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ret i32 %tmp17
+}
+
+; i8* p; // p is 8 byte aligned
+; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
+define i64 @load_i64_by_i8(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i64
+ %tmp6 = shl nuw nsw i64 %tmp5, 8
+ %tmp7 = or i64 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i64
+ %tmp11 = shl nuw nsw i64 %tmp10, 16
+ %tmp12 = or i64 %tmp7, %tmp11
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i64
+ %tmp16 = shl nuw nsw i64 %tmp15, 24
+ %tmp17 = or i64 %tmp12, %tmp16
+ %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp19 = load i8, i8* %tmp18, align 1
+ %tmp20 = zext i8 %tmp19 to i64
+ %tmp21 = shl nuw nsw i64 %tmp20, 32
+ %tmp22 = or i64 %tmp17, %tmp21
+ %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp24 = load i8, i8* %tmp23, align 1
+ %tmp25 = zext i8 %tmp24 to i64
+ %tmp26 = shl nuw nsw i64 %tmp25, 40
+ %tmp27 = or i64 %tmp22, %tmp26
+ %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp29 = load i8, i8* %tmp28, align 1
+ %tmp30 = zext i8 %tmp29 to i64
+ %tmp31 = shl nuw nsw i64 %tmp30, 48
+ %tmp32 = or i64 %tmp27, %tmp31
+ %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp34 = load i8, i8* %tmp33, align 1
+ %tmp35 = zext i8 %tmp34 to i64
+ %tmp36 = shl nuw i64 %tmp35, 56
+ %tmp37 = or i64 %tmp32, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p is 8 byte aligned
+; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
+define i64 @load_i64_by_i8_bswap(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8_bswap:
+; CHECK: ldr x8, [x0]
+; CHECK-NEXT: rev x0, x8
+; CHECK-NEXT: ret
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = shl nuw i64 %tmp2, 56
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i64
+ %tmp7 = shl nuw nsw i64 %tmp6, 48
+ %tmp8 = or i64 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i64
+ %tmp12 = shl nuw nsw i64 %tmp11, 40
+ %tmp13 = or i64 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i64
+ %tmp17 = shl nuw nsw i64 %tmp16, 32
+ %tmp18 = or i64 %tmp13, %tmp17
+ %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp20 = load i8, i8* %tmp19, align 1
+ %tmp21 = zext i8 %tmp20 to i64
+ %tmp22 = shl nuw nsw i64 %tmp21, 24
+ %tmp23 = or i64 %tmp18, %tmp22
+ %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp25 = load i8, i8* %tmp24, align 1
+ %tmp26 = zext i8 %tmp25 to i64
+ %tmp27 = shl nuw nsw i64 %tmp26, 16
+ %tmp28 = or i64 %tmp23, %tmp27
+ %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp30 = load i8, i8* %tmp29, align 1
+ %tmp31 = zext i8 %tmp30 to i64
+ %tmp32 = shl nuw nsw i64 %tmp31, 8
+ %tmp33 = or i64 %tmp28, %tmp32
+ %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i64
+ %tmp37 = or i64 %tmp33, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
+define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK: ldur w0, [x0, #1]
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
+define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset:
+; CHECK: ldur w0, [x0, #-4]
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
+define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; CHECK: ldur w8, [x0, #1]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
+define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
+; CHECK: ldur w8, [x0, #-4]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+declare i16 @llvm.bswap.i16(i16)
+
+; i16* p; // p is 4 byte aligned
+; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
+define i32 @load_i32_by_bswap_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_bswap_i16:
+; CHECK: ldr w8, [x0]
+; CHECK-NEXT: rev w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
+ %tmp2 = zext i16 %tmp11 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
+ %tmp5 = zext i16 %tmp41 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i16* p; // p is 4 byte aligned
+; (i32) p[0] | (sext(p[1] << 16) to i32)
+define i32 @load_i32_by_sext_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_sext_i16:
+; CHECK: ldr w0, [x0]
+; CHECK-NEXT: ret
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = sext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 16
+ %tmp7 = or i32 %tmp6, %tmp2
+ ret i32 %tmp7
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
+define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK: add x8, x0, w1, uxtw
+; CHECK-NEXT: ldr w0, [x8, #12]
+; CHECK-NEXT: ret
+ %tmp = add nuw nsw i32 %i, 3
+ %tmp2 = add nuw nsw i32 %i, 2
+ %tmp3 = add nuw nsw i32 %i, 1
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = zext i32 %i to i64
+ %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
+ %tmp7 = load i8, i8* %tmp6, align 4
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = zext i32 %tmp3 to i64
+ %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
+ %tmp11 = load i8, i8* %tmp10, align 1
+ %tmp12 = zext i8 %tmp11 to i32
+ %tmp13 = shl nuw nsw i32 %tmp12, 8
+ %tmp14 = or i32 %tmp13, %tmp8
+ %tmp15 = zext i32 %tmp2 to i64
+ %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
+ %tmp17 = load i8, i8* %tmp16, align 1
+ %tmp18 = zext i8 %tmp17 to i32
+ %tmp19 = shl nuw nsw i32 %tmp18, 16
+ %tmp20 = or i32 %tmp14, %tmp19
+ %tmp21 = zext i32 %tmp to i64
+ %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
+ %tmp23 = load i8, i8* %tmp22, align 1
+ %tmp24 = zext i8 %tmp23 to i32
+ %tmp25 = shl nuw i32 %tmp24, 24
+ %tmp26 = or i32 %tmp20, %tmp25
+ ret i32 %tmp26
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
+define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK: add x8, x0, w1, uxtw
+; CHECK-NEXT: ldur w0, [x8, #13]
+; CHECK-NEXT: ret
+ %tmp = add nuw nsw i32 %i, 4
+ %tmp2 = add nuw nsw i32 %i, 3
+ %tmp3 = add nuw nsw i32 %i, 2
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = add nuw nsw i32 %i, 1
+ %tmp27 = zext i32 %tmp5 to i64
+ %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
+ %tmp29 = load i8, i8* %tmp28, align 4
+ %tmp30 = zext i8 %tmp29 to i32
+ %tmp31 = zext i32 %tmp3 to i64
+ %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
+ %tmp33 = load i8, i8* %tmp32, align 1
+ %tmp34 = zext i8 %tmp33 to i32
+ %tmp35 = shl nuw nsw i32 %tmp34, 8
+ %tmp36 = or i32 %tmp35, %tmp30
+ %tmp37 = zext i32 %tmp2 to i64
+ %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
+ %tmp39 = load i8, i8* %tmp38, align 1
+ %tmp40 = zext i8 %tmp39 to i32
+ %tmp41 = shl nuw nsw i32 %tmp40, 16
+ %tmp42 = or i32 %tmp36, %tmp41
+ %tmp43 = zext i32 %tmp to i64
+ %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
+ %tmp45 = load i8, i8* %tmp44, align 1
+ %tmp46 = zext i8 %tmp45 to i32
+ %tmp47 = shl nuw i32 %tmp46, 24
+ %tmp48 = or i32 %tmp42, %tmp47
+ ret i32 %tmp48
+}
+
+; i8* p; // p is 2 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8)
+define i32 @zext_load_i32_by_i8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8:
+; CHECK: ldrb w8, [x0]
+; CHECK-NEXT: ldrb w9, [x0, #1]
+; CHECK-NEXT: bfi w8, w9, #8, #8
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 8) | ((i32) p[1] << 16)
+define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK: ldrb w8, [x0]
+; CHECK-NEXT: ldrb w9, [x0, #1]
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: bfi w0, w9, #16, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 16) | ((i32) p[1] << 24)
+define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK: ldrb w8, [x0]
+; CHECK-NEXT: ldrb w9, [x0, #1]
+; CHECK-NEXT: lsl w0, w8, #16
+; CHECK-NEXT: bfi w0, w9, #24, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+; i8* p; // p is 2 byte aligned
+; (i32) p[1] | ((i32) p[0] << 8)
+define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK: ldrb w8, [x0, #1]
+; CHECK-NEXT: ldrb w9, [x0]
+; CHECK-NEXT: bfi w8, w9, #8, #8
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 8) | ((i32) p[0] << 16)
+define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK: ldrb w8, [x0, #1]
+; CHECK-NEXT: ldrb w9, [x0]
+; CHECK-NEXT: lsl w0, w8, #8
+; CHECK-NEXT: bfi w0, w9, #16, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 16) | ((i32) p[0] << 24)
+define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK: ldrb w8, [x0, #1]
+; CHECK-NEXT: ldrb w9, [x0]
+; CHECK-NEXT: lsl w0, w8, #16
+; CHECK-NEXT: bfi w0, w9, #24, #8
+; CHECK-NEXT: ret
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
diff --git a/test/CodeGen/AArch64/machine-combiner-madd.ll b/test/CodeGen/AArch64/machine-combiner-madd.ll
index ea3113789461..4efe4e9cfb01 100644
--- a/test/CodeGen/AArch64/machine-combiner-madd.ll
+++ b/test/CodeGen/AArch64/machine-combiner-madd.ll
@@ -6,7 +6,7 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=exynos-m1 < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=exynos-m2 < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=kryo < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=vulcan < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=thunderx2t99 < %s | FileCheck %s
; Make sure that inst-combine fuses the multiply add in the addressing mode of
; the load.
diff --git a/test/CodeGen/AArch64/machine-copy-remove.mir b/test/CodeGen/AArch64/machine-copy-remove.mir
new file mode 100644
index 000000000000..6f2d3a3009b0
--- /dev/null
+++ b/test/CodeGen/AArch64/machine-copy-remove.mir
@@ -0,0 +1,672 @@
+# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-copyelim %s -verify-machineinstrs -o - | FileCheck %s
+---
+# Check that bb.0 COPY is seen through to allow the bb.1 COPY of XZR to be removed.
+# CHECK-LABEL: name: test1
+# CHECK-NOT: COPY %xzr
+name: test1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ %x0 = COPY %x1
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Similar to test1, but with reversed COPY.
+# CHECK-LABEL: name: test2
+# CHECK-NOT: COPY %xzr
+name: test2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ %x1 = COPY %x0
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Similar to test1, but with a clobber that prevents removal of the XZR COPY.
+# CHECK-LABEL: name: test3
+# CHECK: COPY %xzr
+name: test3
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1, %x2
+
+ %x0 = COPY %x1
+ %x1 = LDRXui %x1, 0
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Similar to test2, but with a clobber that prevents removal of the XZR COPY.
+# CHECK-LABEL: name: test4
+# CHECK: COPY %xzr
+name: test4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1, %x2
+
+ %x1 = COPY %x0
+ %x1 = LDRXui %x1, 0
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Similar to test2, but with a clobber that prevents removal of the XZR COPY.
+# CHECK-LABEL: name: test5
+# CHECK: COPY %xzr
+name: test5
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1, %x2
+
+ %x1 = COPY %x0
+ %x0 = LDRXui %x1, 0
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Similar to test1, but with two levels of COPYs.
+# CHECK-LABEL: name: test6
+# CHECK-NOT: COPY %xzr
+name: test6
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1, %x2
+
+ %x2 = COPY %x0
+ %x1 = COPY %x2
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Similar to test1, but with two levels of COPYs and a clobber preventing COPY of XZR removal.
+# CHECK-LABEL: name: test7
+# CHECK: COPY %xzr
+name: test7
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1, %x2
+
+ %x2 = COPY %x0
+ %x0 = LDRXui %x1, 0
+ %x1 = COPY %x2
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Check that the TargetRegs vector clobber update loop in
+# AArch64RedundantCopyElimination::optimizeCopy works correctly.
+# CHECK-LABEL: name: test8
+# CHECK: x0 = COPY %xzr
+# CHECK: x1 = COPY %xzr
+name: test8
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ %x1 = COPY %x0
+ CBNZX %x1, %bb.2
+
+ bb.1:
+ successors: %bb.3
+ liveins: %x0, %x2
+
+ %x0, %x1 = LDPXi %x2, 0
+ %x0 = COPY %xzr
+ %x1 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Check that copy isn't removed from a block with multiple predecessors.
+# CHECK-LABEL: name: test9
+# CHECK: x0 = COPY %xzr
+# CHECK-NEXT: B %bb.3
+name: test9
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ CBNZX %x0, %bb.2
+
+ bb.1:
+ successors: %bb.3
+ liveins: %x0, %x2
+
+ %x0 = COPY %xzr
+ B %bb.3
+
+ bb.2:
+ successors: %bb.1, %bb.3
+ liveins: %x1
+
+ %x0 = LDRXui %x1, 0
+
+ CBNZX %x1, %bb.1
+
+ bb.3:
+ liveins: %x0
+
+ RET_ReallyLR implicit %x0
+
+...
+# Eliminate redundant MOVi32imm 7 in bb.1
+# Note: 32-bit compare/32-bit move imm
+# Kill marker should be removed from compare.
+# CHECK-LABEL: name: test10
+# CHECK: SUBSWri %w0, 7, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test10
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 7
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Eliminate redundant MOVi32imm 7 in bb.1
+# Note: 64-bit compare/32-bit move imm w/implicit def
+# Kill marker should be removed from compare.
+# CHECK-LABEL: name: test11
+# CHECK: SUBSXri %x0, 7, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test11
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 7, implicit-def %x0
+ STRXui killed %x0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Eliminate redundant MOVi32imm 7 in bb.1
+# Note: 64-bit compare/32-bit move imm
+# Kill marker should be removed from compare.
+# CHECK-LABEL: name: test12
+# CHECK: SUBSXri %x0, 7, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test12
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 7
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Don't eliminate MOVi32imm 7 in bb.1 as we don't necessarily know the upper 32-bits.
+# Note: 32-bit compare/32-bit move imm w/implicit def
+# Kill marker should remain on compare.
+# CHECK-LABEL: name: test13
+# CHECK: SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK: MOVi32imm
+name: test13
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 7, implicit-def %x0
+ STRXui killed %x0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# We can't eliminate the MOVi32imm because of the clobbering LDRWui.
+# CHECK-LABEL: name: test14
+# CHECK: bb.1:
+# CHECK: MOVi32imm
+name: test14
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1, %x2
+
+ dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+ %w0 = LDRWui %x1, 0
+ STRWui killed %w0, killed %x2, 0
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 7
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# We can't eliminate the MOVi32imm because of the clobbering LDRWui.
+# CHECK-LABEL: name: test15
+# CHECK: bb.1:
+# CHECK: MOVi32imm
+name: test15
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1, %x2
+
+ dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1, %x2
+
+ %w0 = LDRWui %x1, 0
+ STRWui killed %w0, killed %x2, 0
+ %w0 = MOVi32imm 7
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Check that bb.0 COPY is seen through to allow the bb.1 MOVi32imm to be removed.
+# CHECK-LABEL: name: test16
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %wzr = SUBSWri %w0, 7, 0, implicit-def %nzcv
+ %w2 = COPY %w0
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w2 = MOVi32imm 7
+ STRWui killed %w2, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Check that bb.1 MOVi32imm is not removed due to self clobbering compare.
+# CHECK-LABEL: name: test17
+# CHECK: bb.1:
+# CHECK: MOVi32imm
+name: test17
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %w0 = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 7
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Make sure the MOVi64imm is not removed. In one version of this patch the
+# MOVi64imm immediate was truncated to 32 bits and incorrectly matched because
+# the low 32 bits of 4252017623040 are all zero.
+# CHECK-LABEL: name: test18
+# CHECK: bb.1:
+# CHECK: MOVi64imm
+name: test18
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ CBNZX killed %x0, %bb.2
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %x0 = MOVi64imm 4252017623040
+ STRXui killed %x0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Eliminate redundant MOVi32imm -1 in bb.1
+# Note: 32-bit compare/32-bit move imm
+# Kill marker should be removed from compare.
+# CHECK-LABEL: name: test19
+# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test19
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm -1
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Eliminate redundant MOVi64imm -1 in bb.1
+# Note: 64-bit compare/64-bit move imm
+# Kill marker should be removed from compare.
+# CHECK-LABEL: name: test20
+# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK-NOT: MOVi64imm
+name: test20
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %x0 = MOVi64imm -1
+ STRXui killed %x0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Eliminate redundant MOVi32imm -1 in bb.1
+# Note: 64-bit compare/32-bit move imm
+# Kill marker should be removed from compare.
+# CHECK-LABEL: name: test21
+# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test21
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %x0, %x1
+
+ dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm -1
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Don't eliminate MOVi64imm -1 in bb.1 as we don't necessarily know the upper 32-bits.
+# Note: 32-bit compare/64-bit move imm
+# CHECK-LABEL: name: test22
+# CHECK: bb.1:
+# CHECK: MOVi64imm
+name: test22
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %x0 = MOVi64imm -1
+ STRXui killed %x0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
+...
+# Eliminate redundant MOVi32imm 4096 in bb.1 when the compare has a shifted immediate.
+# CHECK-LABEL: name: test23
+# CHECK: bb.1:
+# CHECK-NOT: MOVi32imm
+name: test23
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.1, %bb.2
+ liveins: %w0, %x1
+
+ dead %wzr = SUBSWri killed %w0, 1, 12, implicit-def %nzcv
+ Bcc 1, %bb.2, implicit killed %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2
+ liveins: %x1
+
+ %w0 = MOVi32imm 4096
+ STRWui killed %w0, killed %x1, 0
+
+ bb.2:
+ RET_ReallyLR
diff --git a/test/CodeGen/AArch64/machine-outliner.ll b/test/CodeGen/AArch64/machine-outliner.ll
new file mode 100644
index 000000000000..b5094fe47508
--- /dev/null
+++ b/test/CodeGen/AArch64/machine-outliner.ll
@@ -0,0 +1,43 @@
+; RUN: llc -enable-machine-outliner -mtriple=aarch64-apple-darwin < %s | FileCheck %s
+
+define void @cat() #0 {
+; CHECK-LABEL: _cat:
+; CHECK: b l_OUTLINED_FUNCTION_0
+; CHECK-NOT: ret
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ %4 = alloca i32, align 4
+ store i32 0, i32* %1, align 4
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ ret void
+}
+
+define void @dog() #0 {
+; CHECK-LABEL: _dog:
+; CHECK: b l_OUTLINED_FUNCTION_0
+; CHECK-NOT: ret
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ %4 = alloca i32, align 4
+ store i32 0, i32* %1, align 4
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ ret void
+}
+
+; CHECK-LABEL: l_OUTLINED_FUNCTION_0:
+; CHECK: orr w8, wzr, #0x1
+; CHECK-NEXT: stp w8, wzr, [sp, #8]
+; CHECK-NEXT: orr w8, wzr, #0x2
+; CHECK-NEXT: str w8, [sp, #4]
+; CHECK-NEXT: orr w8, wzr, #0x3
+; CHECK-NEXT: str w8, [sp], #16
+; CHECK-NEXT: ret
+
+
+attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false" "target-cpu"="cyclone" }
diff --git a/test/CodeGen/AArch64/mature-mc-support.ll b/test/CodeGen/AArch64/mature-mc-support.ll
index 276c54d2cc4e..dbc027143f99 100644
--- a/test/CodeGen/AArch64/mature-mc-support.ll
+++ b/test/CodeGen/AArch64/mature-mc-support.ll
@@ -9,4 +9,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/AArch64/merge-store.ll b/test/CodeGen/AArch64/merge-store.ll
index 1d0196ad521d..1d26e4a42b17 100644
--- a/test/CodeGen/AArch64/merge-store.ll
+++ b/test/CodeGen/AArch64/merge-store.ll
@@ -4,8 +4,7 @@
@g0 = external global <3 x float>, align 16
@g1 = external global <3 x float>, align 4
-; CHECK: ldr s[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]]{{\]}}, #4
-; CHECK: ld1{{\.?s?}} { v[[R0]]{{\.?s?}} }[1], {{\[}}[[R1]]{{\]}}
+; CHECK: ldr q[[R0:[0-9]+]], {{\[}}[[R1:x[0-9]+]], :lo12:g0
; CHECK: str d[[R0]]
define void @blam() {
diff --git a/test/CodeGen/AArch64/misched-fusion-aes.ll b/test/CodeGen/AArch64/misched-fusion-aes.ll
new file mode 100644
index 000000000000..f29dfb3a9802
--- /dev/null
+++ b/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -0,0 +1,207 @@
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1
+
+declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
+declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
+declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
+declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d)
+
+define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
+ %d0 = load <16 x i8>, <16 x i8>* %a0
+ %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
+ %d1 = load <16 x i8>, <16 x i8>* %a1
+ %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
+ %d2 = load <16 x i8>, <16 x i8>* %a2
+ %a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
+ %d3 = load <16 x i8>, <16 x i8>* %a3
+ %k0 = load <16 x i8>, <16 x i8>* %b0
+ %e00 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d0, <16 x i8> %k0)
+ %f00 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
+ %e01 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d1, <16 x i8> %k0)
+ %f01 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
+ %e02 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d2, <16 x i8> %k0)
+ %f02 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
+ %e03 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d3, <16 x i8> %k0)
+ %f03 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
+ %b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
+ %k1 = load <16 x i8>, <16 x i8>* %b1
+ %e10 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f00, <16 x i8> %k1)
+ %f10 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
+ %e11 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f01, <16 x i8> %k1)
+ %f11 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
+ %e12 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f02, <16 x i8> %k1)
+ %f12 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
+ %e13 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f03, <16 x i8> %k1)
+ %f13 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
+ %b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
+ %k2 = load <16 x i8>, <16 x i8>* %b2
+ %e20 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f10, <16 x i8> %k2)
+ %f20 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e10)
+ %e21 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f11, <16 x i8> %k2)
+ %f21 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e11)
+ %e22 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f12, <16 x i8> %k2)
+ %f22 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e12)
+ %e23 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f13, <16 x i8> %k2)
+ %f23 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e13)
+ %b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
+ %k3 = load <16 x i8>, <16 x i8>* %b3
+ %e30 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f20, <16 x i8> %k3)
+ %f30 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e20)
+ %e31 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f21, <16 x i8> %k3)
+ %f31 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e21)
+ %e32 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f22, <16 x i8> %k3)
+ %f32 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e22)
+ %e33 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f23, <16 x i8> %k3)
+ %f33 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e23)
+ %g0 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f30, <16 x i8> %d)
+ %h0 = xor <16 x i8> %g0, %e
+ %g1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f31, <16 x i8> %d)
+ %h1 = xor <16 x i8> %g1, %e
+ %g2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f32, <16 x i8> %d)
+ %h2 = xor <16 x i8> %g2, %e
+ %g3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f33, <16 x i8> %d)
+ %h3 = xor <16 x i8> %g3, %e
+ store <16 x i8> %h0, <16 x i8>* %c0
+ %c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
+ store <16 x i8> %h1, <16 x i8>* %c1
+ %c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
+ store <16 x i8> %h2, <16 x i8>* %c2
+ %c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
+ store <16 x i8> %h3, <16 x i8>* %c3
+ ret void
+
+; CHECK-LABEL: aesea:
+; CHECKA57: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
+; CHECKA57: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECKA57: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
+; CHECKA57: aesmc {{v[0-7].16b}}, [[VB]]
+; CHECKA57: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
+; CHECKA57: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
+; CHECKA57: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
+; CHECKA57: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
+; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
+; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}}
+; CHECKM1: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
+; CHECKM1: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1: aesmc {{v[0-7].16b}}, [[VD]]
+; CHECKM1: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
+; CHECKM1: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
+; CHECKM1: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
+; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
+}
+
+define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
+ %d0 = load <16 x i8>, <16 x i8>* %a0
+ %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
+ %d1 = load <16 x i8>, <16 x i8>* %a1
+ %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
+ %d2 = load <16 x i8>, <16 x i8>* %a2
+ %a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
+ %d3 = load <16 x i8>, <16 x i8>* %a3
+ %k0 = load <16 x i8>, <16 x i8>* %b0
+ %e00 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d0, <16 x i8> %k0)
+ %f00 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
+ %e01 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d1, <16 x i8> %k0)
+ %f01 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
+ %e02 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d2, <16 x i8> %k0)
+ %f02 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
+ %e03 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d3, <16 x i8> %k0)
+ %f03 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
+ %b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
+ %k1 = load <16 x i8>, <16 x i8>* %b1
+ %e10 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f00, <16 x i8> %k1)
+ %f10 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
+ %e11 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f01, <16 x i8> %k1)
+ %f11 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
+ %e12 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f02, <16 x i8> %k1)
+ %f12 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
+ %e13 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f03, <16 x i8> %k1)
+ %f13 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
+ %b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
+ %k2 = load <16 x i8>, <16 x i8>* %b2
+ %e20 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f10, <16 x i8> %k2)
+ %f20 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e10)
+ %e21 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f11, <16 x i8> %k2)
+ %f21 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e11)
+ %e22 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f12, <16 x i8> %k2)
+ %f22 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e12)
+ %e23 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f13, <16 x i8> %k2)
+ %f23 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e13)
+ %b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
+ %k3 = load <16 x i8>, <16 x i8>* %b3
+ %e30 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f20, <16 x i8> %k3)
+ %f30 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e20)
+ %e31 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f21, <16 x i8> %k3)
+ %f31 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e21)
+ %e32 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f22, <16 x i8> %k3)
+ %f32 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e22)
+ %e33 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f23, <16 x i8> %k3)
+ %f33 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e23)
+ %g0 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f30, <16 x i8> %d)
+ %h0 = xor <16 x i8> %g0, %e
+ %g1 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f31, <16 x i8> %d)
+ %h1 = xor <16 x i8> %g1, %e
+ %g2 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f32, <16 x i8> %d)
+ %h2 = xor <16 x i8> %g2, %e
+ %g3 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f33, <16 x i8> %d)
+ %h3 = xor <16 x i8> %g3, %e
+ store <16 x i8> %h0, <16 x i8>* %c0
+ %c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
+ store <16 x i8> %h1, <16 x i8>* %c1
+ %c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
+ store <16 x i8> %h2, <16 x i8>* %c2
+ %c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
+ store <16 x i8> %h3, <16 x i8>* %c3
+ ret void
+
+; CHECK-LABEL: aesda:
+; CHECKA57: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
+; CHECKA57: aesimc {{v[0-7].16b}}, [[VA]]
+; CHECKA57: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
+; CHECKA57: aesimc {{v[0-7].16b}}, [[VB]]
+; CHECKA57: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
+; CHECKA57: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
+; CHECKA57: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
+; CHECKA57: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
+; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1: aesimc {{v[0-7].16b}}, [[VA]]
+; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
+; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}}
+; CHECKM1: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
+; CHECKM1: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1: aesimc {{v[0-7].16b}}, [[VD]]
+; CHECKM1: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
+; CHECKM1: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
+; CHECKM1: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
+; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
+}
diff --git a/test/CodeGen/AArch64/misched-fusion-lit.ll b/test/CodeGen/AArch64/misched-fusion-lit.ll
new file mode 100644
index 000000000000..45aa67ef1d54
--- /dev/null
+++ b/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -0,0 +1,46 @@
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=-fuse-literals | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKDONT
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-literals | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSE
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSE
+
+@g = common local_unnamed_addr global i8* null, align 8
+
+define i8* @litp(i32 %a, i32 %b) {
+entry:
+ %add = add nsw i32 %b, %a
+ %idx.ext = sext i32 %add to i64
+ %add.ptr = getelementptr i8, i8* bitcast (i8* (i32, i32)* @litp to i8*), i64 %idx.ext
+ store i8* %add.ptr, i8** @g, align 8
+ ret i8* %add.ptr
+
+; CHECK-LABEL: litp:
+; CHECK: adrp [[R:x[0-9]+]], litp
+; CHECKDONT-NEXT: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECKFUSE-NEXT: add {{x[0-9]+}}, [[R]], :lo12:litp
+}
+
+define i32 @liti(i32 %a, i32 %b) {
+entry:
+ %add = add i32 %a, -262095121
+ %add1 = add i32 %add, %b
+ ret i32 %add1
+
+; CHECK-LABEL: liti:
+; CHECK: mov [[R:w[0-9]+]], {{#[0-9]+}}
+; CHECKDONT-NEXT: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECKFUSE-NEXT: movk [[R]], {{#[0-9]+}}, lsl #16
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @litl(i64 %a, i64 %b) {
+entry:
+ %add = add i64 %a, 2208998440489107183
+ %add1 = add i64 %add, %b
+ ret i64 %add1
+
+; CHECK-LABEL: litl:
+; CHECK: mov [[R:x[0-9]+]], {{#[0-9]+}}
+; CHECK-NEXT: movk [[R]], {{#[0-9]+}}, lsl #16
+; CHECK: movk [[R]], {{#[0-9]+}}, lsl #32
+; CHECKDONT-NEXT: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECKFUSE-NEXT: movk [[R]], {{#[0-9]+}}, lsl #48
+}
diff --git a/test/CodeGen/AArch64/misched-fusion.ll b/test/CodeGen/AArch64/misched-fusion.ll
index d5dd9c757dfd..1d504a2f1931 100644
--- a/test/CodeGen/AArch64/misched-fusion.ll
+++ b/test/CodeGen/AArch64/misched-fusion.ll
@@ -1,22 +1,14 @@
; RUN: llc -o - %s -mattr=+arith-cbz-fusion | FileCheck %s
; RUN: llc -o - %s -mcpu=cyclone | FileCheck %s
-target triple = "arm64-apple-ios"
+target triple = "aarch64-unknown"
declare void @foobar(i32 %v0, i32 %v1)
; Make sure sub is scheduled in front of cbnz
; CHECK-LABEL: test_sub_cbz:
-; CHECK: add w[[ADDRES:[0-9]+]], w1, #7
; CHECK: sub w[[SUBRES:[0-9]+]], w0, #13
-; CHECK-NEXT: cbnz w[[SUBRES]], [[SKIPBLOCK:LBB[0-9_]+]]
-; CHECK: mov [[REGTY:[x,w]]]0, [[REGTY]][[ADDRES]]
-; CHECK: mov [[REGTY]]1, [[REGTY]][[SUBRES]]
-; CHECK: bl _foobar
-; CHECK: [[SKIPBLOCK]]:
-; CHECK: mov [[REGTY]]0, [[REGTY]][[SUBRES]]
-; CHECK: mov [[REGTY]]1, [[REGTY]][[ADDRES]]
-; CHECK: bl _foobar
+; CHECK-NEXT: cbnz w[[SUBRES]], {{.?LBB[0-9_]+}}
define void @test_sub_cbz(i32 %a0, i32 %a1) {
entry:
; except for the fusion opportunity the sub/add should be equal so the
diff --git a/test/CodeGen/AArch64/movimm-wzr.mir b/test/CodeGen/AArch64/movimm-wzr.mir
index 093f85bd9319..60e9bfa03a96 100644
--- a/test/CodeGen/AArch64/movimm-wzr.mir
+++ b/test/CodeGen/AArch64/movimm-wzr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -run-pass=aarch64-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -run-pass=aarch64-expand-pseudo %s -o - | FileCheck %s
--- |
; ModuleID = 'simple.ll'
diff --git a/test/CodeGen/AArch64/movw-shift-encoding.ll b/test/CodeGen/AArch64/movw-shift-encoding.ll
index 178fccce333b..673bd85bd167 100644
--- a/test/CodeGen/AArch64/movw-shift-encoding.ll
+++ b/test/CodeGen/AArch64/movw-shift-encoding.ll
@@ -8,8 +8,8 @@
define i32* @get_var() {
ret i32* @var
-; CHECK: movz x0, #:abs_g3:var // encoding: [0bAAA00000,A,0b111AAAAA,0xd2]
-; CHECK: movk x0, #:abs_g2_nc:var // encoding: [0bAAA00000,A,0b110AAAAA,0xf2]
-; CHECK: movk x0, #:abs_g1_nc:var // encoding: [0bAAA00000,A,0b101AAAAA,0xf2]
-; CHECK: movk x0, #:abs_g0_nc:var // encoding: [0bAAA00000,A,0b100AAAAA,0xf2]
+; CHECK: movz x0, #:abs_g0_nc:var // encoding: [0bAAA00000,A,0b100AAAAA,0xd2]
+; CHECK: movk x0, #:abs_g1_nc:var // encoding: [0bAAA00000,A,0b101AAAAA,0xf2]
+; CHECK: movk x0, #:abs_g2_nc:var // encoding: [0bAAA00000,A,0b110AAAAA,0xf2]
+; CHECK: movk x0, #:abs_g3:var // encoding: [0bAAA00000,A,0b111AAAAA,0xf2]
}
diff --git a/test/CodeGen/AArch64/neon-fma-FMF.ll b/test/CodeGen/AArch64/neon-fma-FMF.ll
new file mode 100644
index 000000000000..25beef6592b2
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-fma-FMF.ll
@@ -0,0 +1,53 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <2 x float> @fma(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; CHECK-LABEL: fma:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp1 = fmul contract <2 x float> %A, %B;
+ %tmp2 = fadd contract <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <2 x float> @no_fma_1(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; CHECK-LABEL: no_fma_1:
+; CHECK: fmul
+; CHECK: fadd
+ %tmp1 = fmul contract <2 x float> %A, %B;
+ %tmp2 = fadd <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <2 x float> @no_fma_2(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; CHECK-LABEL: no_fma_2:
+; CHECK: fmul
+; CHECK: fadd
+ %tmp1 = fmul <2 x float> %A, %B;
+ %tmp2 = fadd contract <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <2 x float> @fma_sub(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; CHECK-LABEL: fma_sub:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp1 = fmul contract <2 x float> %A, %B;
+ %tmp2 = fsub contract <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <2 x float> @no_fma_sub_1(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; CHECK-LABEL: no_fma_sub_1:
+; CHECK: fmul
+; CHECK: fsub
+ %tmp1 = fmul contract <2 x float> %A, %B;
+ %tmp2 = fsub <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <2 x float> @no_fma_sub_2(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; CHECK-LABEL: no_fma_sub_2:
+; CHECK: fmul
+; CHECK: fsub
+ %tmp1 = fmul <2 x float> %A, %B;
+ %tmp2 = fsub contract <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
diff --git a/test/CodeGen/AArch64/optimize-cond-branch.ll b/test/CodeGen/AArch64/optimize-cond-branch.ll
index 4e3ca6f16e78..ab4ad5e2ce93 100644
--- a/test/CodeGen/AArch64/optimize-cond-branch.ll
+++ b/test/CodeGen/AArch64/optimize-cond-branch.ll
@@ -11,7 +11,7 @@ target triple = "arm64--"
;
; CHECK-LABEL: func
; CHECK-NOT: and
-; CHECK: tbnz
+; CHECK: tbz
define void @func() {
%c0 = icmp sgt i64 0, 0
br i1 %c0, label %b1, label %b6
diff --git a/test/CodeGen/AArch64/pr27816.ll b/test/CodeGen/AArch64/pr27816.ll
new file mode 100644
index 000000000000..df15755cf3f5
--- /dev/null
+++ b/test/CodeGen/AArch64/pr27816.ll
@@ -0,0 +1,48 @@
+; RUN: llc %s -mtriple=aarch64 -o - | FileCheck %s
+
+%struct.A = type { i8, i8, i8, i8, i8, i8, i8, i8, i32 }
+
+; The existence of the final i32 value should not prevent the i8s from
+; being merged.
+
+; CHECK-LABEL: @merge_const_store
+; CHECK-NOT: strb
+; CHECK: str x8, [x1]
+; CHECK-NOT: strb
+; CHECK: str wzr, [x1, #8]
+; CHECK-NOT: strb
+define void @merge_const_store(i32 %count, %struct.A* nocapture %p) {
+ %1 = icmp sgt i32 %count, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+.lr.ph:
+ %i.02 = phi i32 [ %add, %.lr.ph ], [ 0, %0 ]
+ %.01 = phi %struct.A* [ %addr, %.lr.ph ], [ %p, %0 ]
+ %a2 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
+ store i8 1, i8* %a2, align 1
+ %a3 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
+ store i8 2, i8* %a3, align 1
+ %a4 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 2
+ store i8 3, i8* %a4, align 1
+ %a5 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 3
+ store i8 4, i8* %a5, align 1
+ %a6 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 4
+ store i8 5, i8* %a6, align 1
+ %a7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 5
+ store i8 6, i8* %a7, align 1
+ %a8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 6
+ store i8 7, i8* %a8, align 1
+ %a9 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 7
+ store i8 8, i8* %a9, align 1
+
+ ;
+ %addr_last = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 8
+ store i32 0, i32* %addr_last, align 4
+
+
+ %add = add nsw i32 %i.02, 1
+ %addr = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
+ %exitcond = icmp eq i32 %add, %count
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+._crit_edge:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/prefixdata.ll b/test/CodeGen/AArch64/prefixdata.ll
new file mode 100644
index 000000000000..f62734c16e52
--- /dev/null
+++ b/test/CodeGen/AArch64/prefixdata.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=aarch64-apple-darwin | FileCheck --check-prefix=MACHO %s
+; RUN: llc < %s -mtriple=aarch64-pc-linux | FileCheck --check-prefix=ELF %s
+
+@i = linkonce_odr global i32 1
+
+; MACHO: ltmp0:
+; MACHO-NEXT: .long 1
+; MACHO-NEXT: .alt_entry _f
+; MACHO-NEXT: _f:
+; ELF: .type f,@function
+; ELF-NEXT: .word 1
+; ELF-NEXT: // 0x1
+; ELF-NEXT: f:
+define void @f() prefix i32 1 {
+ ret void
+}
+
+; MACHO: ltmp1:
+; MACHO-NEXT: .quad _i
+; MACHO-NEXT: .alt_entry _g
+; MACHO-NEXT: _g:
+; ELF: .type g,@function
+; ELF-NEXT: .xword i
+; ELF-NEXT: g:
+define void @g() prefix i32* @i {
+ ret void
+}
+
+; MACHO: .subsections_via_symbols
diff --git a/test/CodeGen/AArch64/regcoal-physreg.mir b/test/CodeGen/AArch64/regcoal-physreg.mir
index c6133991171b..813106366968 100644
--- a/test/CodeGen/AArch64/regcoal-physreg.mir
+++ b/test/CodeGen/AArch64/regcoal-physreg.mir
@@ -1,5 +1,7 @@
# RUN: llc -mtriple=aarch64-apple-ios -run-pass=simple-register-coalescing %s -o - | FileCheck %s
--- |
+ declare void @f2()
+
define void @func0() { ret void }
define void @func1() { ret void }
define void @func2() { ret void }
@@ -8,36 +10,25 @@
# Check coalescing of COPYs from reserved physregs.
# CHECK-LABEL: name: func0
name: func0
-registers:
- - { id: 0, class: gpr32 }
- - { id: 1, class: gpr64 }
- - { id: 2, class: gpr64 }
- - { id: 3, class: gpr32 }
- - { id: 4, class: gpr64 }
- - { id: 5, class: gpr32 }
- - { id: 6, class: xseqpairsclass }
- - { id: 7, class: gpr64 }
- - { id: 8, class: gpr64sp }
- - { id: 9, class: gpr64sp }
body: |
bb.0:
; We usually should not coalesce copies from allocatable physregs.
; CHECK: %0 = COPY %w7
; CHECK: STRWui %0, %x1, 0
- %0 = COPY %w7
+ %0 : gpr32 = COPY %w7
STRWui %0, %x1, 0
; It is fine to coalesce copies from reserved physregs
; CHECK-NOT: COPY
; CHECK: STRXui %fp, %x1, 0
- %1 = COPY %fp
+ %1 : gpr64 = COPY %fp
STRXui %1, %x1, 0
; It is not fine to coalesce copies from reserved physregs when they are
; clobbered.
; CHECK: %2 = COPY %fp
; CHECK: STRXui %2, %x1, 0
- %2 = COPY %fp
+ %2 : gpr64 = COPY %fp
%fp = SUBXri %fp, 4, 0
STRXui %2, %x1, 0
@@ -45,7 +36,7 @@ body: |
; clobbered.
; CHECK-NOT: COPY
; CHECK: STRWui %wzr, %x1
- %3 = COPY %wzr
+ %3 : gpr32 = COPY %wzr
dead %wzr = SUBSWri %w1, 0, 0, implicit-def %nzcv
STRWui %3, %x1, 0
@@ -53,13 +44,13 @@ body: |
; clobbered.
; CHECK-NOT: COPY
; CHECK: STRXui %xzr, %x1
- %4 = COPY %xzr
+ %4 : gpr64 = COPY %xzr
dead %wzr = SUBSWri %w1, 0, 0, implicit-def %nzcv
STRXui %4, %x1, 0
; Coalescing COPYs into constant physregs.
; CHECK: %wzr = SUBSWri %w1, 0, 0
- %5 = SUBSWri %w1, 0, 0, implicit-def %nzcv
+ %5 : gpr32 = SUBSWri %w1, 0, 0, implicit-def %nzcv
%wzr = COPY %5
; Only coalesce when the source register is reserved as a whole (this is
@@ -67,12 +58,24 @@ body: |
; of the non-reserved part).
; CHECK: %6 = COPY %x28_fp
; CHECK: HINT 0, implicit %6
- %6 = COPY %x28_fp
+ %6 : xseqpairsclass = COPY %x28_fp
HINT 0, implicit %6
+ ; It is not fine to coalesce copies from reserved physregs when they are
+ ; clobbered by the regmask on a call.
+ ; CHECK: %7 = COPY %x18
+ ; CHECK: BL @f2, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ ; CHECK: STRXui %7, %x1, 0
+
+ ; Need a def of x18 so that it's not deduced as "constant".
+ %x18 = COPY %xzr
+ %7 : gpr64 = COPY %x18
+ BL @f2, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ STRXui %7, %x1, 0
+
; This can be coalesced.
; CHECK: %fp = SUBXri %fp, 4, 0
- %8 = SUBXri %fp, 4, 0
+ %8 : gpr64sp = SUBXri %fp, 4, 0
%fp = COPY %8
; Cannot coalesce when there are reads of the physreg.
@@ -80,7 +83,7 @@ body: |
; CHECK: %9 = SUBXri %fp, 8, 0
; CHECK: STRXui %fp, %fp, 0
; CHECK: %fp = COPY %9
- %9 = SUBXri %fp, 8, 0
+ %9 : gpr64sp = SUBXri %fp, 8, 0
STRXui %fp, %fp, 0
%fp = COPY %9
...
@@ -88,8 +91,6 @@ body: |
# Check coalescing of COPYs from reserved physregs.
# CHECK-LABEL: name: func1
name: func1
-registers:
- - { id: 0, class: gpr64sp }
body: |
bb.0:
successors: %bb.1, %bb.2
@@ -99,7 +100,7 @@ body: |
; CHECK: %0 = SUBXri %fp, 12, 0
; CHECK: CBZX undef %x0, %bb.1
; CHECK: B %bb.2
- %0 = SUBXri %fp, 12, 0
+ %0 : gpr64sp = SUBXri %fp, 12, 0
CBZX undef %x0, %bb.1
B %bb.2
@@ -114,8 +115,6 @@ body: |
---
# CHECK-LABEL: name: func2
name: func2
-registers:
- - { id: 0, class: gpr64sp }
body: |
bb.0:
successors: %bb.1, %bb.2
@@ -123,7 +122,7 @@ body: |
; CHECK-NOT: COPY
; CHECK: CBZX undef %x0, %bb.1
; CHECK-NEXT: B %bb.2
- %0 = COPY %fp
+ %0 : gpr64sp = COPY %fp
CBZX undef %x0, %bb.1
B %bb.2
diff --git a/test/CodeGen/AArch64/regress-tblgen-chains.ll b/test/CodeGen/AArch64/regress-tblgen-chains.ll
index 4bec512403c4..24038cda5078 100644
--- a/test/CodeGen/AArch64/regress-tblgen-chains.ll
+++ b/test/CodeGen/AArch64/regress-tblgen-chains.ll
@@ -28,7 +28,7 @@ define i64 @test_chains() {
; CHECK: ldurb {{w[0-9]+}}, [x29, [[LOCADDR:#-?[0-9]+]]]
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #1
; CHECK: sturb w[[STRVAL:[0-9]+]], [x29, [[LOCADDR]]]
-; CHECK; and w0, w[[STRVAL]], #0xff
+; CHECK: and w0, w[[STRVAL]], #0xff
%ret.1 = load i8, i8* %locvar
%ret.2 = zext i8 %ret.1 to i64
diff --git a/test/CodeGen/AArch64/remat.ll b/test/CodeGen/AArch64/remat.ll
index 5081a9da3404..80a054beb2a5 100644
--- a/test/CodeGen/AArch64/remat.ll
+++ b/test/CodeGen/AArch64/remat.ll
@@ -8,7 +8,7 @@
; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=exynos-m3 -o - %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=falkor -o - %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=kryo -o - %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=vulcan -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=thunderx2t99 -o - %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnuabi -mattr=+custom-cheap-as-move -o - %s | FileCheck %s
%X = type { i64, i64, i64 }
diff --git a/test/CodeGen/AArch64/selectiondag-order.ll b/test/CodeGen/AArch64/selectiondag-order.ll
new file mode 100644
index 000000000000..9427906160fd
--- /dev/null
+++ b/test/CodeGen/AArch64/selectiondag-order.ll
@@ -0,0 +1,96 @@
+; Check that debug intrinsics do not affect code generation.
+
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mattr=+avx | FileCheck --check-prefix=AARCH64-CHECK %s
+
+define i64 @simulate(<2 x i32> %a) {
+entry:
+ %rand = tail call i64 @lrand48()
+ br label %body
+
+body: ; preds = %body, %entry
+ %0 = phi <2 x i32> [ %add, %body ], [ zeroinitializer, %entry ]
+ %add = add <2 x i32> %0, %a
+ %rand1 = tail call i64 @lrand48() #3
+ %cmp = icmp eq i64 %rand1, 0
+ br i1 %cmp, label %end, label %body
+
+end: ; preds = %body
+ %c = bitcast <2 x i32> %add to i64
+ %res = add i64 %rand, %c
+ ret i64 %res
+}
+
+; AARCH64-CHECK: simulate:
+; AARCH64-CHECK: movi d9, #0000000000000000
+; AARCH64-CHECK: bl lrand48
+; AARCH64-CHECK: mov x19, x0
+; AARCH64-CHECK: BB0_1:
+
+
+define i64 @simulateWithDebugIntrinsic(<2 x i32> %a) local_unnamed_addr {
+entry:
+ %rand = tail call i64 @lrand48() #3
+ tail call void @llvm.dbg.value(metadata i64 %rand, i64 0, metadata !6, metadata !7), !dbg !8
+ br label %body
+
+body: ; preds = %body, %entry
+ %0 = phi <2 x i32> [ %add, %body ], [ zeroinitializer, %entry ]
+ %add = add <2 x i32> %0, %a
+ %rand1 = tail call i64 @lrand48() #3
+ %cmp = icmp eq i64 %rand1, 0
+ br i1 %cmp, label %end, label %body
+
+end: ; preds = %body
+ %c = bitcast <2 x i32> %add to i64
+ %res = add i64 %rand, %c
+ ret i64 %res
+}
+
+; AARCH64-CHECK: simulateWithDebugIntrinsic
+; AARCH64-CHECK: movi d9, #0000000000000000
+; AARCH64-CHECK: bl lrand48
+; AARCH64-CHECK: mov x19, x0
+; AARCH64-CHECK: BB1_1:
+
+
+define i64 @simulateWithDbgDeclare(<2 x i32> %a) local_unnamed_addr {
+entry:
+ %rand = tail call i64 @lrand48() #3
+ tail call void @llvm.dbg.declare(metadata i64 %rand, metadata !6, metadata !7), !dbg !8
+ br label %body
+
+body: ; preds = %body, %entry
+ %0 = phi <2 x i32> [ %add, %body ], [ zeroinitializer, %entry ]
+ %add = add <2 x i32> %0, %a
+ %rand1 = tail call i64 @lrand48() #3
+ %cmp = icmp eq i64 %rand1, 0
+ br i1 %cmp, label %end, label %body
+
+end: ; preds = %body
+ %c = bitcast <2 x i32> %add to i64
+ %res = add i64 %rand, %c
+ ret i64 %res
+}
+
+; AARCH64-CHECK: simulateWithDbgDeclare:
+; AARCH64-CHECK: movi d9, #0000000000000000
+; AARCH64-CHECK: bl lrand48
+; AARCH64-CHECK: mov x19, x0
+; AARCH64-CHECK: BB2_1:
+
+declare i64 @lrand48()
+
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!1}
+!llvm.module.flags = !{!3, !4}
+
+!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2, runtimeVersion: 0, emissionKind: FullDebug)
+!2 = !DIFile(filename: "test.ll", directory: ".")
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "simulateWithDebugIntrinsic", scope: !2, file: !2, line: 64, isLocal: false, isDefinition: true, scopeLine: 65, unit: !1)
+!6 = !DILocalVariable(name: "randv", scope: !5, file: !2, line: 69)
+!7 = !DIExpression()
+!8 = !DILocation(line: 132, column: 2, scope: !5)
diff --git a/test/CodeGen/AArch64/stack-protector-target.ll b/test/CodeGen/AArch64/stack-protector-target.ll
index d4d806289bff..787e4a76ec01 100644
--- a/test/CodeGen/AArch64/stack-protector-target.ll
+++ b/test/CodeGen/AArch64/stack-protector-target.ll
@@ -1,5 +1,7 @@
; Test target-specific stack cookie location.
; RUN: llc -mtriple=aarch64-linux-android < %s -o - | FileCheck --check-prefix=ANDROID-AARCH64 %s
+; RUN: llc -mtriple=aarch64-fuchsia < %s -o - | FileCheck --check-prefixes=FUCHSIA-AARCH64-COMMON,FUCHSIA-AARCH64-USER %s
+; RUN: llc -mtriple=aarch64-fuchsia -code-model=kernel < %s -o - | FileCheck --check-prefixes=FUCHSIA-AARCH64-COMMON,FUCHSIA-AARCH64-KERNEL %s
define void @_Z1fv() sspreq {
entry:
@@ -17,3 +19,11 @@ declare void @_Z7CapturePi(i32*)
; ANDROID-AARCH64: ldr [[C:.*]], {{\[}}[[A]], #40]
; ANDROID-AARCH64: ldr [[D:.*]], [sp,
; ANDROID-AARCH64: cmp [[C]], [[D]]
+
+; FUCHSIA-AARCH64-USER: mrs [[A:.*]], TPIDR_EL0
+; FUCHSIA-AARCH64-KERNEL: mrs [[A:.*]], TPIDR_EL1
+; FUCHSIA-AARCH64-COMMON: ldur [[B:.*]], {{\[}}[[A]], #-16]
+; FUCHSIA-AARCH64-COMMON: str [[B]], [sp,
+; FUCHSIA-AARCH64-COMMON: ldur [[C:.*]], {{\[}}[[A]], #-16]
+; FUCHSIA-AARCH64-COMMON: ldr [[D:.*]], [sp,
+; FUCHSIA-AARCH64-COMMON: cmp [[C]], [[D]]
diff --git a/test/CodeGen/AArch64/stack_guard_remat.ll b/test/CodeGen/AArch64/stack_guard_remat.ll
index d6bae62e5edc..2b7b3485311a 100644
--- a/test/CodeGen/AArch64/stack_guard_remat.ll
+++ b/test/CodeGen/AArch64/stack_guard_remat.ll
@@ -15,10 +15,10 @@
; PIC-LINUX: ldr {{x[0-9]+}}, {{\[}}[[R1]]{{\]}}
; STATIC-LARGE: foo2
-; STATIC-LARGE: movz [[R0:x[0-9]+]], #:abs_g3:__stack_chk_guard
-; STATIC-LARGE: movk [[R0]], #:abs_g2_nc:__stack_chk_guard
+; STATIC-LARGE: movz [[R0:x[0-9]+]], #:abs_g0_nc:__stack_chk_guard
; STATIC-LARGE: movk [[R0]], #:abs_g1_nc:__stack_chk_guard
-; STATIC-LARGE: movk [[R0]], #:abs_g0_nc:__stack_chk_guard
+; STATIC-LARGE: movk [[R0]], #:abs_g2_nc:__stack_chk_guard
+; STATIC-LARGE: movk [[R0]], #:abs_g3:__stack_chk_guard
; STATIC-LARGE: ldr {{x[0-9]+}}, {{\[}}[[R0]]{{\]}}
; STATIC-SMALL: foo2
@@ -29,20 +29,20 @@ define i32 @test_stack_guard_remat() #0 {
entry:
%a1 = alloca [256 x i32], align 4
%0 = bitcast [256 x i32]* %a1 to i8*
- call void @llvm.lifetime.start(i64 1024, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %0)
%arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
call void asm sideeffect "foo2", "~{w0},~{w1},~{w2},~{w3},~{w4},~{w5},~{w6},~{w7},~{w8},~{w9},~{w10},~{w11},~{w12},~{w13},~{w14},~{w15},~{w16},~{w17},~{w18},~{w19},~{w20},~{w21},~{w22},~{w23},~{w24},~{w25},~{w26},~{w27},~{w28},~{w29},~{w30}"()
- call void @llvm.lifetime.end(i64 1024, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %0)
ret i32 0
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/AArch64/tail-dup-repeat-worklist.ll b/test/CodeGen/AArch64/tail-dup-repeat-worklist.ll
deleted file mode 100644
index c2997c50f4d4..000000000000
--- a/test/CodeGen/AArch64/tail-dup-repeat-worklist.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; RUN: llc -O3 -o - -verify-machineinstrs %s | FileCheck %s
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-target triple = "aarch64-unknown-linux-gnu"
-
-%struct.s1 = type { %struct.s3*, %struct.s1* }
-%struct.s2 = type opaque
-%struct.s3 = type { i32 }
-
-; Function Attrs: nounwind
-define internal fastcc i32 @repeated_dup_worklist(%struct.s1** %pp1, %struct.s2* %p2, i32 %state, i1 %i1_1, i32 %i32_1) unnamed_addr #0 {
-entry:
- br label %while.cond.outer
-
-; The loop gets laid out:
-; %while.cond.outer
-; %(null)
-; %(null)
-; %dup2
-; and then %dup1 gets chosen as the next block.
-; when dup2 is duplicated into dup1, %worklist could erroneously be placed on
-; the worklist, because all of its current predecessors are now scheduled.
-; However, after dup2 is tail-duplicated, %worklist can't be on the worklist
-; because it now has unscheduled predecessors.q
-; CHECK-LABEL: repeated_dup_worklist
-; CHECK: // %entry
-; CHECK: // %while.cond.outer
-; first %(null) block
-; CHECK: // in Loop:
-; CHECK: ldr
-; CHECK-NEXT: tbnz
-; second %(null) block
-; CHECK: // in Loop:
-; CHECK: // %dup2
-; CHECK: // %worklist
-; CHECK: // %if.then96.i
-while.cond.outer: ; preds = %dup1, %entry
- %progress.0.ph = phi i32 [ 0, %entry ], [ %progress.1, %dup1 ]
- %inc77 = add nsw i32 %progress.0.ph, 1
- %cmp = icmp slt i32 %progress.0.ph, %i32_1
- br i1 %cmp, label %dup2, label %dup1
-
-dup2: ; preds = %if.then96.i, %worklist, %while.cond.outer
- %progress.1.ph = phi i32 [ 0, %while.cond.outer ], [ %progress.1, %if.then96.i ], [ %progress.1, %worklist ]
- %.pr = load %struct.s1*, %struct.s1** %pp1, align 8
- br label %dup1
-
-dup1: ; preds = %dup2, %while.cond.outer
- %0 = phi %struct.s1* [ %.pr, %dup2 ], [ undef, %while.cond.outer ]
- %progress.1 = phi i32 [ %progress.1.ph, %dup2 ], [ %inc77, %while.cond.outer ]
- br i1 %i1_1, label %while.cond.outer, label %worklist
-
-worklist: ; preds = %dup1
- %snode94 = getelementptr inbounds %struct.s1, %struct.s1* %0, i64 0, i32 0
- %1 = load %struct.s3*, %struct.s3** %snode94, align 8
- %2 = getelementptr inbounds %struct.s3, %struct.s3* %1, i32 0, i32 0
- %3 = load i32, i32* %2, align 4
- %tobool95.i = icmp eq i32 %3, 0
- br i1 %tobool95.i, label %if.then96.i, label %dup2
-
-if.then96.i: ; preds = %worklist
- call fastcc void @free_s3(%struct.s2* %p2, %struct.s3* %1) #1
- br label %dup2
-}
-
-; Function Attrs: nounwind
-declare fastcc void @free_s3(%struct.s2*, %struct.s3*) unnamed_addr #0
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
diff --git a/test/CodeGen/AArch64/tailcall-string-rvo.ll b/test/CodeGen/AArch64/tailcall-string-rvo.ll
new file mode 100644
index 000000000000..bdc09235afd9
--- /dev/null
+++ b/test/CodeGen/AArch64/tailcall-string-rvo.ll
@@ -0,0 +1,47 @@
+; RUN: llc -relocation-model=static -verify-machineinstrs -O2 < %s | FileCheck %s
+
+; The call to function TestBar should be a tail call, when in C++ the string
+; `ret` is RVO returned.
+; string TestFoo() {
+; string ret = undef;
+; TestBar(&ret); // tail call optimized
+; return ret;
+; }
+
+target triple = "aarch64-linux-gnu"
+
+%class.basic_string.11.42.73 = type { %"class.__gnu_cxx::__versa_string.10.41.72" }
+%"class.__gnu_cxx::__versa_string.10.41.72" = type { %"class.__gnu_cxx::__sso_string_base.9.40.71" }
+%"class.__gnu_cxx::__sso_string_base.9.40.71" = type { %"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69", i64, %union.anon.8.39.70 }
+%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { i8* }
+%union.anon.8.39.70 = type { i64, [8 x i8] }
+
+declare void @TestBaz(%class.basic_string.11.42.73* noalias sret %arg)
+
+define void @TestBar(%class.basic_string.11.42.73* noalias sret %arg) {
+bb:
+ call void @TestBaz(%class.basic_string.11.42.73* noalias sret %arg)
+ ret void
+}
+
+define void @TestFoo(%class.basic_string.11.42.73* noalias sret %arg) {
+; CHECK-LABEL: TestFoo:
+; CHECK: b TestBar
+bb:
+ %tmp = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2
+ %tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70**
+ store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8
+ %tmp2 = bitcast %union.anon.8.39.70* %tmp to i8*
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i32 1, i1 false)
+ %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1
+ store i64 13, i64* %tmp3, align 8
+ %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
+ store i8 0, i8* %tmp4, align 1
+ tail call void @TestBar(%class.basic_string.11.42.73* noalias sret %arg)
+ ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0
+
+attributes #0 = { argmemonly nounwind }
diff --git a/test/CodeGen/AArch64/tbz-tbnz.ll b/test/CodeGen/AArch64/tbz-tbnz.ll
index 0dd265c18ec7..7ef78ca52a24 100644
--- a/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -10,7 +10,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: sub [[CMP:w[0-9]+]], w0, #12
-; CHECK: tbz [[CMP]], #31
+; CHECK: tbnz [[CMP]], #31
if.then:
call void @t()
@@ -28,7 +28,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: sub [[CMP:x[0-9]+]], x0, #12
-; CHECK: tbz [[CMP]], #63
+; CHECK: tbnz [[CMP]], #63
if.then:
call void @t()
@@ -118,7 +118,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: sub [[CMP:w[0-9]+]], w0, #12
-; CHECK: tbz [[CMP]], #31
+; CHECK: tbnz [[CMP]], #31
if.then:
call void @t()
@@ -178,7 +178,7 @@ define void @test9(i64 %val1) {
br i1 %tst, label %if.then, label %if.end
; CHECK-NOT: cmp
-; CHECK: tbz x0, #63
+; CHECK: tbnz x0, #63
if.then:
call void @t()
@@ -194,7 +194,7 @@ define void @test10(i64 %val1) {
br i1 %tst, label %if.then, label %if.end
; CHECK-NOT: cmp
-; CHECK: tbz x0, #63
+; CHECK: tbnz x0, #63
if.then:
call void @t()
@@ -209,7 +209,7 @@ define void @test11(i64 %val1, i64* %ptr) {
; CHECK: ldr [[CMP:x[0-9]+]], [x1]
; CHECK-NOT: cmp
-; CHECK: tbz [[CMP]], #63
+; CHECK: tbnz [[CMP]], #63
%val = load i64, i64* %ptr
%tst = icmp slt i64 %val, 0
@@ -229,7 +229,7 @@ define void @test12(i64 %val1) {
br i1 %tst, label %if.then, label %if.end
; CHECK-NOT: cmp
-; CHECK: tbz x0, #63
+; CHECK: tbnz x0, #63
if.then:
call void @t()
@@ -247,7 +247,7 @@ define void @test13(i64 %val1, i64 %val2) {
; CHECK: orr [[CMP:x[0-9]+]], x0, x1
; CHECK-NOT: cmp
-; CHECK: tbz [[CMP]], #63
+; CHECK: tbnz [[CMP]], #63
if.then:
call void @t()
diff --git a/test/CodeGen/AArch64/thread-pointer.ll b/test/CodeGen/AArch64/thread-pointer.ll
new file mode 100644
index 000000000000..91585791a58e
--- /dev/null
+++ b/test/CodeGen/AArch64/thread-pointer.ll
@@ -0,0 +1,60 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
+
+@x = thread_local local_unnamed_addr global i32 0, align 4
+@y = thread_local local_unnamed_addr global i32 0, align 4
+
+; Machine LICM should hoist the mrs into the loop preheader.
+; CHECK-LABEL: @test1
+; CHECK: BB#1:
+; CHECK: mrs x[[BASE:[0-9]+]], TPIDR_EL0
+; CHECK: add x[[REG1:[0-9]+]], x[[BASE]], :tprel_hi12:x
+; CHECK: add x[[REG2:[0-9]+]], x[[REG1]], :tprel_lo12_nc:x
+;
+; CHECK: .LBB0_2:
+; CHECK: ldr w0, [x[[REG2]]]
+; CHECK: bl bar
+; CHECK: sub w[[REG3:[0-9]+]], w{{[0-9]+}}, #1
+; CHECK: cbnz w[[REG3]], .LBB0_2
+
+define void @test1(i32 %n) local_unnamed_addr {
+entry:
+ %cmp3 = icmp sgt i32 %n, 0
+ br i1 %cmp3, label %bb1, label %bb2
+
+bb1:
+ br label %for.body
+
+for.body:
+ %i.04 = phi i32 [ %inc, %for.body ], [ 0, %bb1 ]
+ %0 = load i32, i32* @x, align 4
+ tail call void @bar(i32 %0) #2
+ %inc = add nuw nsw i32 %i.04, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %bb2, label %for.body
+
+bb2:
+ ret void
+}
+
+; Machine CSE should combine the the mrs between the load of %x and %y.
+; CHECK-LABEL: @test2
+; CHECK: mrs x{{[0-9]+}}, TPIDR_EL0
+; CHECK-NOT: mrs x{{[0-9]+}}, TPIDR_EL0
+; CHECK: ret
+define void @test2(i32 %c) local_unnamed_addr #0 {
+entry:
+ %0 = load i32, i32* @x, align 4
+ tail call void @bar(i32 %0) #2
+ %cmp = icmp eq i32 %c, 0
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then:
+ %1 = load i32, i32* @y, align 4
+ tail call void @bar(i32 %1) #2
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+declare void @bar(i32) local_unnamed_addr
diff --git a/test/CodeGen/AArch64/vector_merge_dep_check.ll b/test/CodeGen/AArch64/vector_merge_dep_check.ll
index 9220947e8362..e4e64ef8c8db 100644
--- a/test/CodeGen/AArch64/vector_merge_dep_check.ll
+++ b/test/CodeGen/AArch64/vector_merge_dep_check.ll
@@ -1,5 +1,4 @@
-; RUN: llc --combiner-alias-analysis=false < %s | FileCheck %s
-; RUN: llc --combiner-alias-analysis=true < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s
; This test checks that we do not merge stores together which have
; dependencies through their non-chain operands (e.g. one store is the
diff --git a/test/CodeGen/AArch64/xray-tail-call-sled.ll b/test/CodeGen/AArch64/xray-tail-call-sled.ll
new file mode 100644
index 000000000000..6ada3ce8d551
--- /dev/null
+++ b/test/CodeGen/AArch64/xray-tail-call-sled.ll
@@ -0,0 +1,69 @@
+; RUN: llc -filetype=asm -o - -mtriple=aarch64-linux-gnu < %s | FileCheck %s
+
+define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-always" {
+; CHECK: .p2align 2
+; CHECK-LABEL: .Lxray_sled_0:
+; CHECK-NEXT: b #32
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-LABEL: .Ltmp0:
+ ret i32 0
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: .p2align 2
+; CHECK-LABEL: .Lxray_sled_1:
+; CHECK-NEXT: b #32
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-LABEL: .Ltmp1:
+; CHECK-NEXT: ret
+}
+; CHECK: .p2align 4
+; CHECK-NEXT: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .section xray_instr_map,{{.*}}
+; CHECK-LABEL: Lxray_synthetic_0:
+; CHECK: .xword .Lxray_sled_0
+; CHECK: .xword .Lxray_sled_1
+
+define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" {
+; CHECK: .p2align 2
+; CHECK-LABEL: .Lxray_sled_2:
+; CHECK-NEXT: b #32
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-LABEL: .Ltmp2:
+; CHECK: .p2align 2
+; CHECK-LABEL: .Lxray_sled_3:
+; CHECK-NEXT: b #32
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-LABEL: .Ltmp3:
+ %retval = tail call i32 @callee()
+; CHECK: b callee
+ ret i32 %retval
+}
+; CHECK: .p2align 4
+; CHECK-NEXT: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .section xray_instr_map,{{.*}}
+; CHECK-LABEL: Lxray_synthetic_1:
+; CHECK: .xword .Lxray_sled_2
+; CHECK: .xword .Lxray_sled_3
diff --git a/test/CodeGen/AMDGPU/32-bit-local-address-space.ll b/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
index edad18e244d0..ca661cf9a712 100644
--- a/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
+++ b/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
@@ -13,7 +13,7 @@
; FUNC-LABEL: {{^}}local_address_load:
; SI: v_mov_b32_e{{32|64}} [[PTR:v[0-9]]]
; SI: ds_read_b32 v{{[0-9]+}}, [[PTR]]
-define void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
+define amdgpu_kernel void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = load i32, i32 addrspace(3)* %in
store i32 %0, i32 addrspace(1)* %out
@@ -24,7 +24,7 @@ entry:
; SI: s_add_i32 [[SPTR:s[0-9]]]
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; SI: ds_read_b32 [[VPTR]]
-define void @local_address_gep(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %offset) {
+define amdgpu_kernel void @local_address_gep(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %offset) {
entry:
%0 = getelementptr i32, i32 addrspace(3)* %in, i32 %offset
%1 = load i32, i32 addrspace(3)* %0
@@ -35,7 +35,7 @@ entry:
; FUNC-LABEL: {{^}}local_address_gep_const_offset:
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
; SI: ds_read_b32 v{{[0-9]+}}, [[VPTR]] offset:4
-define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
+define amdgpu_kernel void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = getelementptr i32, i32 addrspace(3)* %in, i32 1
%1 = load i32, i32 addrspace(3)* %0
@@ -48,7 +48,7 @@ entry:
; SI: s_add_i32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; SI: ds_read_b32 [[VPTR]]
-define void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
+define amdgpu_kernel void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = getelementptr i32, i32 addrspace(3)* %in, i32 16385
%1 = load i32, i32 addrspace(3)* %0
@@ -60,7 +60,7 @@ entry:
; SI: v_cmp_ne_u32
; SI-NOT: v_cmp_ne_u32
; SI: v_cndmask_b32
-define void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds) nounwind {
+define amdgpu_kernel void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds) nounwind {
%cmp = icmp ne i32 addrspace(3)* %lds, null
%x = select i1 %cmp, i32 123, i32 456
store i32 %x, i32 addrspace(1)* %out
@@ -71,7 +71,7 @@ define void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds)
; SI: s_mul_i32
; SI-NEXT: s_add_i32
; SI: ds_read_b32
-define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %lds, i32 %tid) {
+define amdgpu_kernel void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %lds, i32 %tid) {
%ptr = getelementptr [3 x float], [3 x float] addrspace(3)* %lds, i32 %tid, i32 0
%val = load float, float addrspace(3)* %ptr
store float %val, float addrspace(1)* %out
@@ -83,7 +83,7 @@ define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %
; FUNC-LABEL: {{^}}infer_ptr_alignment_global_offset:
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0
; SI: ds_read_b32 v{{[0-9]+}}, [[REG]]
-define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
+define amdgpu_kernel void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
%val = load float, float addrspace(3)* @g_lds
store float %val, float addrspace(1)* %out
ret void
@@ -95,14 +95,14 @@ define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %ti
; FUNC-LABEL: {{^}}global_ptr:
; SI: ds_write_b32
-define void @global_ptr() nounwind {
+define amdgpu_kernel void @global_ptr() nounwind {
store i32 addrspace(3)* getelementptr ([16383 x i32], [16383 x i32] addrspace(3)* @dst, i32 0, i32 16), i32 addrspace(3)* addrspace(3)* @ptr
ret void
}
; FUNC-LABEL: {{^}}local_address_store:
; SI: ds_write_b32
-define void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
+define amdgpu_kernel void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
store i32 %val, i32 addrspace(3)* %out
ret void
}
@@ -111,7 +111,7 @@ define void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
; SI: s_add_i32 [[SADDR:s[0-9]+]],
; SI: v_mov_b32_e32 [[ADDR:v[0-9]+]], [[SADDR]]
; SI: ds_write_b32 [[ADDR]], v{{[0-9]+}}
-define void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32 %offset) {
+define amdgpu_kernel void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32 %offset) {
%gep = getelementptr i32, i32 addrspace(3)* %out, i32 %offset
store i32 %val, i32 addrspace(3)* %gep, align 4
ret void
@@ -121,7 +121,7 @@ define void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
; SI: v_mov_b32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
; SI: ds_write_b32 [[VPTR]], [[VAL]] offset:4
-define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
+define amdgpu_kernel void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
%gep = getelementptr i32, i32 addrspace(3)* %out, i32 1
store i32 %val, i32 addrspace(3)* %gep, align 4
ret void
@@ -132,7 +132,7 @@ define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %v
; SI: s_add_i32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; SI: ds_write_b32 [[VPTR]], v{{[0-9]+$}}
-define void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
+define amdgpu_kernel void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
%gep = getelementptr i32, i32 addrspace(3)* %out, i32 16385
store i32 %val, i32 addrspace(3)* %gep, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
new file mode 100644
index 000000000000..56a9e7022db9
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
@@ -0,0 +1,28 @@
+# RUN: llc -march=amdgcn -mcpu=hawaii -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN
+
+# REQUIRES: global-isel
+
+--- |
+ define amdgpu_kernel void @global_addrspace(i32 addrspace(1)* %global0) { ret void }
+...
+---
+
+name: global_addrspace
+legalized: true
+regBankSelected: true
+
+# GCN: global_addrspace
+# GCN: [[PTR:%[0-9]+]] = COPY %vgpr0_vgpr1
+# GCN: FLAT_LOAD_DWORD [[PTR]], 0, 0, 0
+
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1
+
+ %0:vgpr(p1) = COPY %vgpr0_vgpr1
+ %1:vgpr(s32) = G_LOAD %0 :: (load 4 from %ir.global0)
+ %vgpr0 = COPY %1
+
+...
+---
diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
new file mode 100644
index 000000000000..ea2ad2ba83a5
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
@@ -0,0 +1,142 @@
+# RUN: llc -march=amdgcn -mcpu=tahiti -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN,SI,SICI,SIVI
+# RUN: llc -march=amdgcn -mcpu=hawaii -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN,CI,SICI
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN,VI,SIVI
+
+# REQUIRES: global-isel
+
+--- |
+ define amdgpu_kernel void @smrd_imm(i32 addrspace(2)* %const0) { ret void }
+...
+---
+
+name: smrd_imm
+legalized: true
+regBankSelected: true
+
+# GCN: body:
+# GCN: [[PTR:%[0-9]+]] = COPY %sgpr0_sgpr1
+
+# Immediate offset:
+# SICI: S_LOAD_DWORD_IMM [[PTR]], 1, 0
+# VI: S_LOAD_DWORD_IMM [[PTR]], 4, 0
+
+# Max immediate offset for SI
+# SICI: S_LOAD_DWORD_IMM [[PTR]], 255, 0
+# VI: S_LOAD_DWORD_IMM [[PTR]], 1020, 0
+
+# Immediate overflow for SI
+# SI: [[K1024:%[0-9]+]] = S_MOV_B32 1024
+# SI: S_LOAD_DWORD_SGPR [[PTR]], [[K1024]], 0
+# CI: S_LOAD_DWORD_IMM_ci [[PTR]], 256, 0
+# VI: S_LOAD_DWORD_IMM [[PTR]], 1024, 0
+
+# Max immediate offset for VI
+# SI: [[K1048572:%[0-9]+]] = S_MOV_B32 1048572
+# CI: S_LOAD_DWORD_IMM_ci [[PTR]], 262143
+# VI: S_LOAD_DWORD_IMM [[PTR]], 1048572
+
+#
+# Immediate overflow for VI
+# SIVI: [[K1048576:%[0-9]+]] = S_MOV_B32 1048576
+# SIVI: S_LOAD_DWORD_SGPR [[PTR]], [[K1048576]], 0
+# CI: S_LOAD_DWORD_IMM_ci [[PTR]], 262144, 0
+
+# Max immediate for CI
+# SIVI: [[K_LO:%[0-9]+]] = S_MOV_B32 4294967292
+# SIVI: [[K_HI:%[0-9]+]] = S_MOV_B32 3
+# SIVI: [[K:%[0-9]+]] = REG_SEQUENCE [[K_LO]], 1, [[K_HI]], 2
+# SIVI-DAG: [[K_SUB0:%[0-9]+]] = COPY [[K]].sub0
+# SIVI-DAG: [[PTR_LO:%[0-9]+]] = COPY [[PTR]].sub0
+# SIVI: [[ADD_PTR_LO:%[0-9]+]] = S_ADD_U32 [[PTR_LO]], [[K_SUB0]]
+# SIVI-DAG: [[K_SUB1:%[0-9]+]] = COPY [[K]].sub1
+# SIVI-DAG: [[PTR_HI:%[0-9]+]] = COPY [[PTR]].sub1
+# SIVI: [[ADD_PTR_HI:%[0-9]+]] = S_ADDC_U32 [[PTR_HI]], [[K_SUB1]]
+# SIVI: [[ADD_PTR:%[0-9]+]] = REG_SEQUENCE [[ADD_PTR_LO]], 1, [[ADD_PTR_HI]], 2
+# SIVI: S_LOAD_DWORD_IMM [[ADD_PTR]], 0, 0
+# CI: S_LOAD_DWORD_IMM_ci [[PTR]], 4294967295, 0
+
+# Immediate overflow for CI
+# GCN: [[K_LO:%[0-9]+]] = S_MOV_B32 0
+# GCN: [[K_HI:%[0-9]+]] = S_MOV_B32 4
+# GCN: [[K:%[0-9]+]] = REG_SEQUENCE [[K_LO]], 1, [[K_HI]], 2
+# GCN-DAG: [[K_SUB0:%[0-9]+]] = COPY [[K]].sub0
+# GCN-DAG: [[PTR_LO:%[0-9]+]] = COPY [[PTR]].sub0
+# GCN: [[ADD_PTR_LO:%[0-9]+]] = S_ADD_U32 [[PTR_LO]], [[K_SUB0]]
+# GCN-DAG: [[K_SUB1:%[0-9]+]] = COPY [[K]].sub1
+# GCN-DAG: [[PTR_HI:%[0-9]+]] = COPY [[PTR]].sub1
+# GCN: [[ADD_PTR_HI:%[0-9]+]] = S_ADDC_U32 [[PTR_HI]], [[K_SUB1]]
+# GCN: [[ADD_PTR:%[0-9]+]] = REG_SEQUENCE [[ADD_PTR_LO]], 1, [[ADD_PTR_HI]], 2
+# GCN: S_LOAD_DWORD_IMM [[ADD_PTR]], 0, 0
+
+# Max 32-bit byte offset
+# SIVI: [[K4294967292:%[0-9]+]] = S_MOV_B32 4294967292
+# SIVI: S_LOAD_DWORD_SGPR [[PTR]], [[K4294967292]], 0
+# CI: S_LOAD_DWORD_IMM_ci [[PTR]], 1073741823, 0
+
+# Overflow 32-bit byte offset
+# SIVI: [[K_LO:%[0-9]+]] = S_MOV_B32 0
+# SIVI: [[K_HI:%[0-9]+]] = S_MOV_B32 1
+# SIVI: [[K:%[0-9]+]] = REG_SEQUENCE [[K_LO]], 1, [[K_HI]], 2
+# SIVI-DAG: [[K_SUB0:%[0-9]+]] = COPY [[K]].sub0
+# SIVI-DAG: [[PTR_LO:%[0-9]+]] = COPY [[PTR]].sub0
+# SIVI: [[ADD_PTR_LO:%[0-9]+]] = S_ADD_U32 [[PTR_LO]], [[K_SUB0]]
+# SIVI-DAG: [[K_SUB1:%[0-9]+]] = COPY [[K]].sub1
+# SIVI-DAG: [[PTR_HI:%[0-9]+]] = COPY [[PTR]].sub1
+# SIVI: [[ADD_PTR_HI:%[0-9]+]] = S_ADDC_U32 [[PTR_HI]], [[K_SUB1]]
+# SIVI: [[ADD_PTR:%[0-9]+]] = REG_SEQUENCE [[ADD_PTR_LO]], 1, [[ADD_PTR_HI]], 2
+# SIVI: S_LOAD_DWORD_IMM [[ADD_PTR]], 0, 0
+# CI: S_LOAD_DWORD_IMM_ci [[PTR]], 1073741824, 0
+
+body: |
+ bb.0:
+ liveins: %sgpr0_sgpr1
+
+ %0:sgpr(p2) = COPY %sgpr0_sgpr1
+
+ %1:sgpr(s64) = G_CONSTANT i64 4
+ %2:sgpr(p2) = G_GEP %0, %1
+ %3:sgpr(s32) = G_LOAD %2 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %3
+
+ %4:sgpr(s64) = G_CONSTANT i64 1020
+ %5:sgpr(p2) = G_GEP %0, %4
+ %6:sgpr(s32) = G_LOAD %5 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %6
+
+ %7:sgpr(s64) = G_CONSTANT i64 1024
+ %8:sgpr(p2) = G_GEP %0, %7
+ %9:sgpr(s32) = G_LOAD %8 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %9
+
+ %10:sgpr(s64) = G_CONSTANT i64 1048572
+ %11:sgpr(p2) = G_GEP %0, %10
+ %12:sgpr(s32) = G_LOAD %11 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %12
+
+ %13:sgpr(s64) = G_CONSTANT i64 1048576
+ %14:sgpr(p2) = G_GEP %0, %13
+ %15:sgpr(s32) = G_LOAD %14 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %15
+
+ %16:sgpr(s64) = G_CONSTANT i64 17179869180
+ %17:sgpr(p2) = G_GEP %0, %16
+ %18:sgpr(s32) = G_LOAD %17 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %18
+
+ %19:sgpr(s64) = G_CONSTANT i64 17179869184
+ %20:sgpr(p2) = G_GEP %0, %19
+ %21:sgpr(s32) = G_LOAD %20 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %21
+
+ %22:sgpr(s64) = G_CONSTANT i64 4294967292
+ %23:sgpr(p2) = G_GEP %0, %22
+ %24:sgpr(s32) = G_LOAD %23 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %24
+
+ %25:sgpr(s64) = G_CONSTANT i64 4294967296
+ %26:sgpr(p2) = G_GEP %0, %25
+ %27:sgpr(s32) = G_LOAD %26 :: (load 4 from %ir.const0)
+ %sgpr0 = COPY %27
+
+...
+---
diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
new file mode 100644
index 000000000000..ea435725bf25
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
@@ -0,0 +1,29 @@
+# RUN: llc -march=amdgcn -mcpu=hawaii -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN
+
+# REQUIRES: global-isel
+
+--- |
+ define amdgpu_kernel void @global_addrspace(i32 addrspace(1)* %global0) { ret void }
+...
+---
+
+name: global_addrspace
+legalized: true
+regBankSelected: true
+
+# GCN: global_addrspace
+# GCN: [[PTR:%[0-9]+]] = COPY %vgpr0_vgpr1
+# GCN: [[VAL:%[0-9]+]] = COPY %vgpr2
+# GCN: FLAT_STORE_DWORD [[PTR]], [[VAL]], 0, 0, 0
+
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2
+
+ %0:vgpr(p1) = COPY %vgpr0_vgpr1
+ %1:vgpr(s32) = COPY %vgpr2
+ G_STORE %1, %0 :: (store 4 into %ir.global0)
+
+...
+---
diff --git a/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir b/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
new file mode 100644
index 000000000000..3496b1ab71fe
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
@@ -0,0 +1,69 @@
+# RUN: llc -march=amdgcn -mcpu=hawaii -run-pass=regbankselect -global-isel %s -verify-machineinstrs -o - | FileCheck %s
+
+# REQUIRES: global-isel
+
+--- |
+ define amdgpu_kernel void @load_constant(i32 addrspace(2)* %ptr0) { ret void }
+ define amdgpu_kernel void @load_global_uniform(i32 addrspace(1)* %ptr1) {
+ %tmp0 = load i32, i32 addrspace(1)* %ptr1
+ ret void
+ }
+ define amdgpu_kernel void @load_global_non_uniform(i32 addrspace(1)* %ptr2) {
+ %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
+ %tmp1 = getelementptr i32, i32 addrspace(1)* %ptr2, i32 %tmp0
+ %tmp2 = load i32, i32 addrspace(1)* %tmp1
+ ret void
+ }
+ declare i32 @llvm.amdgcn.workitem.id.x() #0
+ attributes #0 = { nounwind readnone }
+...
+
+---
+name : load_constant
+legalized: true
+
+# CHECK-LABEL: name: load_constant
+# CHECK: registers:
+# CHECK: - { id: 0, class: sgpr }
+# CHECK: - { id: 1, class: sgpr }
+
+body: |
+ bb.0:
+ liveins: %sgpr0_sgpr1
+ %0:_(p2) = COPY %sgpr0_sgpr1
+ %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr0)
+...
+
+---
+name: load_global_uniform
+legalized: true
+
+# CHECK-LABEL: name: load_global_uniform
+# CHECK: registers:
+# CHECK: - { id: 0, class: sgpr }
+# CHECK: - { id: 1, class: sgpr }
+
+body: |
+ bb.0:
+ liveins: %sgpr0_sgpr1
+ %0:_(p1) = COPY %sgpr0_sgpr1
+ %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr1)
+...
+
+---
+name: load_global_non_uniform
+legalized: true
+
+# CHECK-LABEL: name: load_global_non_uniform
+# CHECK: registers:
+# CHECK: - { id: 0, class: sgpr }
+# CHECK: - { id: 1, class: vgpr }
+# CHECK: - { id: 2, class: vgpr }
+
+
+body: |
+ bb.0:
+ liveins: %sgpr0_sgpr1
+ %0:_(p1) = COPY %sgpr0_sgpr1
+ %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.tmp1)
+...
diff --git a/test/CodeGen/AMDGPU/GlobalISel/shader-epilogs.ll b/test/CodeGen/AMDGPU/GlobalISel/shader-epilogs.ll
new file mode 100644
index 000000000000..a1bf987e6552
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/shader-epilogs.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs -global-isel | FileCheck --check-prefix=GCN %s
+
+; REQUIRES: global-isel
+
+; GCN-LABEL: vs_epilog
+; GCN: s_endpgm
+
+define amdgpu_vs void @vs_epilog() {
+main_body:
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/GlobalISel/smrd.ll b/test/CodeGen/AMDGPU/GlobalISel/smrd.ll
new file mode 100644
index 000000000000..8a6b3df9cff8
--- /dev/null
+++ b/test/CodeGen/AMDGPU/GlobalISel/smrd.ll
@@ -0,0 +1,89 @@
+; FIXME: Need to add support for mubuf stores to enable this on SI.
+; XUN: llc < %s -march=amdgcn -mcpu=SI -show-mc-encoding -verify-machineinstrs -global-isel | FileCheck --check-prefix=SI --check-prefix=GCN --check-prefix=SIVI %s
+; RUN: llc < %s -march=amdgcn -mcpu=bonaire -show-mc-encoding -verify-machineinstrs -global-isel | FileCheck --check-prefix=CI --check-prefix=GCN %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs -global-isel | FileCheck --check-prefix=VI --check-prefix=GCN --check-prefix=SIVI %s
+
+; REQUIRES: global-isel
+
+; SMRD load with an immediate offset.
+; GCN-LABEL: {{^}}smrd0:
+; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
+define amdgpu_kernel void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
+ %1 = load i32, i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with the largest possible immediate offset.
+; GCN-LABEL: {{^}}smrd1:
+; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff,0x{{[0-9]+[137]}}
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
+define amdgpu_kernel void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
+ %1 = load i32, i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with an offset greater than the largest possible immediate.
+; GCN-LABEL: {{^}}smrd2:
+; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
+; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
+; GCN: s_endpgm
+define amdgpu_kernel void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
+ %1 = load i32, i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with a 64-bit offset
+; GCN-LABEL: {{^}}smrd3:
+; FIXME: There are too many copies here because we don't fold immediates
+; through REG_SEQUENCE
+; XSI: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0xb ; encoding: [0x0b
+; TODO: Add VI checks
+; XGCN: s_endpgm
+define amdgpu_kernel void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
+ %1 = load i32, i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with the largest possible immediate offset on VI
+; GCN-LABEL: {{^}}smrd4:
+; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc
+; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
+; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
+define amdgpu_kernel void @smrd4(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 262143
+ %1 = load i32, i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with an offset greater than the largest possible immediate on VI
+; GCN-LABEL: {{^}}smrd5:
+; SIVI: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000
+; SIVI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
+; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
+; GCN: s_endpgm
+define amdgpu_kernel void @smrd5(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 262144
+ %1 = load i32, i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/AMDGPU/add-debug.ll b/test/CodeGen/AMDGPU/add-debug.ll
index 529905dd36a2..b90c20b97482 100644
--- a/test/CodeGen/AMDGPU/add-debug.ll
+++ b/test/CodeGen/AMDGPU/add-debug.ll
@@ -3,7 +3,7 @@
; REQUIRES: asserts
; Check that SelectionDAGDumper does not crash on int_SI_if.
-define void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
+define amdgpu_kernel void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/add.i16.ll b/test/CodeGen/AMDGPU/add.i16.ll
index 6c5cdd3877d1..3b274c9d2027 100644
--- a/test/CodeGen/AMDGPU/add.i16.ll
+++ b/test/CodeGen/AMDGPU/add.i16.ll
@@ -6,7 +6,7 @@
; VI: flat_load_ushort [[B:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -23,7 +23,7 @@ define void @v_test_add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0x7b, [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_add_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -38,7 +38,7 @@ define void @v_test_add_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0xfffffcb3, [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_add_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -53,7 +53,7 @@ define void @v_test_add_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], -1, [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_add_i16_inline_neg1(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_add_i16_inline_neg1(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -69,7 +69,7 @@ define void @v_test_add_i16_inline_neg1(i16 addrspace(1)* %out, i16 addrspace(1)
; VI: flat_load_ushort [[B:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; VI-NEXT: buffer_store_dword [[ADD]]
-define void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -84,12 +84,12 @@ define void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_add_i16_zext_to_i64:
+; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
; VI-DAG: v_add_u16_e32 v[[ADD:[0-9]+]], [[B]], [[A]]
-; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0
; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:[[VZERO]]{{\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -109,7 +109,7 @@ define void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; VI-NEXT: v_bfe_i32 [[SEXT:v[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: buffer_store_dword [[SEXT]]
-define void @v_test_add_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_add_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -130,7 +130,7 @@ define void @v_test_add_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; VI-NEXT: v_bfe_i32 v[[LO:[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; VI-NEXT: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @v_test_add_i16_sext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_add_i16_sext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
diff --git a/test/CodeGen/AMDGPU/add.ll b/test/CodeGen/AMDGPU/add.ll
index a6247c735240..7e4546d2cfb3 100644
--- a/test/CodeGen/AMDGPU/add.ll
+++ b/test/CodeGen/AMDGPU/add.ll
@@ -8,7 +8,7 @@
;SI: v_add_i32_e32 [[REG:v[0-9]+]], vcc, {{v[0-9]+, v[0-9]+}}
;SI-NOT: [[REG]]
;SI: buffer_store_dword [[REG]],
-define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -24,7 +24,7 @@ define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
@@ -44,7 +44,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
;SI: v_add_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@@ -71,7 +71,7 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
-define void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
+define amdgpu_kernel void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
entry:
%0 = add <8 x i32> %a, %b
store <8 x i32> %0, <8 x i32> addrspace(1)* %out
@@ -112,7 +112,7 @@ entry:
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
-define void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
+define amdgpu_kernel void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
entry:
%0 = add <16 x i32> %a, %b
store <16 x i32> %0, <16 x i32> addrspace(1)* %out
@@ -129,7 +129,7 @@ entry:
; EG-DAG: ADD_INT
; EG-DAG: ADD_INT {{[* ]*}}
; EG-NOT: SUB
-define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%0 = add i64 %a, %b
store i64 %0, i64 addrspace(1)* %out
@@ -150,7 +150,7 @@ entry:
; EG-DAG: ADD_INT
; EG-DAG: ADD_INT {{[* ]*}}
; EG-NOT: SUB
-define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
entry:
%0 = load i64, i64 addrspace(1)* %in
%1 = add i64 %a, %0
@@ -169,7 +169,7 @@ entry:
; EG-DAG: ADD_INT
; EG-DAG: ADD_INT {{[* ]*}}
; EG-NOT: SUB
-define void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
+define amdgpu_kernel void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/add.v2i16.ll b/test/CodeGen/AMDGPU/add.v2i16.ll
new file mode 100644
index 000000000000..e137ef4bc236
--- /dev/null
+++ b/test/CodeGen/AMDGPU/add.v2i16.ll
@@ -0,0 +1,283 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_add_v2i16:
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; VI: v_add_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_add_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = add <2 x i16> %a, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_test_add_v2i16:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9: v_mov_b32_e32 [[VVAL1:v[0-9]+]]
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, [[VVAL1]], [[VAL0]]
+
+; VI: s_add_i32
+; VI: s_add_i32
+define amdgpu_kernel void @s_test_add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %in0, <2 x i16> addrspace(2)* %in1) #1 {
+ %a = load <2 x i16>, <2 x i16> addrspace(2)* %in0
+ %b = load <2 x i16>, <2 x i16> addrspace(2)* %in1
+ %add = add <2 x i16> %a, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_test_add_self_v2i16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, [[VAL]], [[VAL]]
+
+; VI: s_add_i32
+; VI: s_add_i32
+define amdgpu_kernel void @s_test_add_self_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %in0) #1 {
+ %a = load <2 x i16>, <2 x i16> addrspace(2)* %in0
+ %add = add <2 x i16> %a, %a
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: VI should not scalarize arg access.
+; GCN-LABEL: {{^}}s_test_add_v2i16_kernarg:
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+
+; VI: v_add_i32
+; VI: v_add_i32_sdwa
+define amdgpu_kernel void @s_test_add_v2i16_kernarg(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #1 {
+ %add = add <2 x i16> %a, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_add_v2i16_constant:
+; GFX9: s_mov_b32 [[CONST:s[0-9]+]], 0x1c8007b{{$}}
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}
+
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0x7b, v{{[0-9]+}}
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0x1c8, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_add_v2i16_constant(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = add <2 x i16> %a, <i16 123, i16 456>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_add_v2i16_neg_constant:
+; GFX9: s_mov_b32 [[CONST:s[0-9]+]], 0xfc21fcb3{{$}}
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}
+
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0xfffffcb3, v{{[0-9]+}}
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0xfffffc21, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_add_v2i16_neg_constant(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = add <2 x i16> %a, <i16 -845, i16 -991>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_add_v2i16_inline_neg1:
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, -1{{$}}
+
+; VI: flat_load_ushort [[LOAD0:v[0-9]+]]
+; VI: flat_load_ushort [[LOAD1:v[0-9]+]]
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, -1, [[LOAD0]]
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, -1, [[LOAD1]]
+; VI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_test_add_v2i16_inline_neg1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = add <2 x i16> %a, <i16 -1, i16 -1>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_add_v2i16_inline_lo_zero_hi:
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 32{{$}}
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, [[K]], v{{[0-9]+}}{{$}}
+
+; VI-NOT: v_add_u16
+; VI: v_add_u16_e32 v{{[0-9]+}}, 32, v{{[0-9]+}}
+; VI-NOT: v_add_u16
+; VI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_test_add_v2i16_inline_lo_zero_hi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = add <2 x i16> %a, <i16 32, i16 0>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; The high element gives fp
+; GCN-LABEL: {{^}}v_test_add_v2i16_inline_fp_split:
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 1.0
+; GFX9: v_pk_add_u16 v{{[0-9]+}}, [[K]], v{{[0-9]+}}{{$}}
+
+; VI-NOT: v_add_u16
+; VI: v_add_u16_e32 v{{[0-9]+}}, 0x3f80, v{{[0-9]+}}
+; VI-NOT: v_add_u16
+; VI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_test_add_v2i16_inline_fp_split(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = add <2 x i16> %a, <i16 0, i16 16256>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_add_v2i16_zext_to_v2i32:
+; GFX9: flat_load_dword [[A:v[0-9]+]]
+; GFX9: flat_load_dword [[B:v[0-9]+]]
+
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GFX9-DAG: v_and_b32_e32 v[[ELT0:[0-9]+]], 0xffff, [[ADD]]
+; GFX9-DAG: v_lshrrev_b32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
+; GFX9: buffer_store_dwordx2 v{{\[}}[[ELT0]]:[[ELT1]]{{\]}}
+
+; VI: flat_load_ushort v[[A_HI:[0-9]+]]
+; VI: flat_load_ushort v[[A_LO:[0-9]+]]
+; VI: flat_load_ushort v[[B_HI:[0-9]+]]
+; VI: flat_load_ushort v[[B_LO:[0-9]+]]
+
+; VI: v_add_u16_e32 v[[ADD_HI:[0-9]+]], v[[B_HI]], v[[A_HI]]
+; VI-NOT: and
+; VI-NOT: shl
+; VI: v_add_u16_e32 v[[ADD_LO:[0-9]+]], v[[B_LO]], v[[A_LO]]
+; VI-NOT: and
+; VI-NOT: shl
+; VI: buffer_store_dwordx2 v{{\[}}[[ADD_LO]]:[[ADD_HI]]{{\]}}
+define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = add <2 x i16> %a, %b
+ %ext = zext <2 x i16> %add to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_add_v2i16_zext_to_v2i64:
+; GFX9: flat_load_dword [[A:v[0-9]+]]
+; GFX9: flat_load_dword [[B:v[0-9]+]]
+
+; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GFX9-DAG: v_and_b32_e32 v[[ELT0:[0-9]+]], 0xffff, [[ADD]]
+; GFX9-DAG: v_lshrrev_b32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
+; GFX9: buffer_store_dwordx4
+
+; VI: flat_load_ushort v[[A_LO:[0-9]+]]
+; VI: flat_load_ushort v[[A_HI:[0-9]+]]
+; VI: flat_load_ushort v[[B_LO:[0-9]+]]
+; VI: flat_load_ushort v[[B_HI:[0-9]+]]
+
+; VI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+; VI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+; VI: v_add_u16_e32
+; VI: v_add_u16_e32
+
+; VI: buffer_store_dwordx4
+define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i64>, <2 x i64> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = add <2 x i16> %a, %b
+ %ext = zext <2 x i16> %add to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_add_v2i16_sext_to_v2i32:
+; GFX9: flat_load_dword [[A:v[0-9]+]]
+; GFX9: flat_load_dword [[B:v[0-9]+]]
+
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GFX9-DAG: v_bfe_i32 v[[ELT0:[0-9]+]], [[ADD]], 0, 16
+; GFX9-DAG: v_ashrrev_i32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
+; GFX9: buffer_store_dwordx2 v{{\[}}[[ELT0]]:[[ELT1]]{{\]}}
+
+; VI: v_add_u16_e32
+; VI: v_add_u16_e32
+; VI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; VI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; VI: buffer_store_dwordx2
+define amdgpu_kernel void @v_test_add_v2i16_sext_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = add <2 x i16> %a, %b
+ %ext = sext <2 x i16> %add to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_add_v2i16_sext_to_v2i64:
+; GCN: flat_load_dword
+; GCN: flat_load_dword
+
+; GFX9: v_pk_add_u16
+; GFX9: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+
+; VI: v_add_u16_sdwa
+; VI: v_add_u16_e32
+
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_add_v2i16_sext_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i64>, <2 x i64> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = add <2 x i16> %a, %b
+ %ext = sext <2 x i16> %add to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/add_i128.ll b/test/CodeGen/AMDGPU/add_i128.ll
index c80157ca9c58..00a125c2e44f 100644
--- a/test/CodeGen/AMDGPU/add_i128.ll
+++ b/test/CodeGen/AMDGPU/add_i128.ll
@@ -6,7 +6,7 @@
; GCN-NEXT: v_addc_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc
; GCN-NEXT: v_addc_u32_e32 v[[HI:[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc
; GCN: buffer_store_dwordx4 v{{\[}}[[LO]]:[[HI]]],
-define void @test_i128_vreg(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %inA, i128 addrspace(1)* noalias %inB) {
+define amdgpu_kernel void @test_i128_vreg(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %inA, i128 addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr i128, i128 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i128, i128 addrspace(1)* %inB, i32 %tid
@@ -23,7 +23,7 @@ define void @test_i128_vreg(i128 addrspace(1)* noalias %out, i128 addrspace(1)*
; GCN: v_addc_u32
; GCN: v_addc_u32
; GCN: v_addc_u32
-define void @sgpr_operand(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in, i128 %a) {
+define amdgpu_kernel void @sgpr_operand(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in, i128 %a) {
%foo = load i128, i128 addrspace(1)* %in, align 8
%result = add i128 %foo, %a
store i128 %result, i128 addrspace(1)* %out
@@ -35,7 +35,7 @@ define void @sgpr_operand(i128 addrspace(1)* noalias %out, i128 addrspace(1)* no
; GCN: v_addc_u32
; GCN: v_addc_u32
; GCN: v_addc_u32
-define void @sgpr_operand_reversed(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in, i128 %a) {
+define amdgpu_kernel void @sgpr_operand_reversed(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in, i128 %a) {
%foo = load i128, i128 addrspace(1)* %in, align 8
%result = add i128 %a, %foo
store i128 %result, i128 addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @sgpr_operand_reversed(i128 addrspace(1)* noalias %out, i128 addrspa
; GCN: s_addc_u32
; GCN: s_addc_u32
; GCN: s_addc_u32
-define void @test_sreg(i128 addrspace(1)* noalias %out, i128 %a, i128 %b) {
+define amdgpu_kernel void @test_sreg(i128 addrspace(1)* noalias %out, i128 %a, i128 %b) {
%result = add i128 %a, %b
store i128 %result, i128 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/add_i64.ll b/test/CodeGen/AMDGPU/add_i64.ll
index 3d360b7d0b7a..62733d5bfb6c 100644
--- a/test/CodeGen/AMDGPU/add_i64.ll
+++ b/test/CodeGen/AMDGPU/add_i64.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() readnone
; SI-LABEL: {{^}}test_i64_vreg:
; SI: v_add_i32
; SI: v_addc_u32
-define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) {
+define amdgpu_kernel void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
@@ -21,7 +21,7 @@ define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noa
; SI-LABEL: {{^}}sgpr_operand:
; SI: v_add_i32
; SI: v_addc_u32
-define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
+define amdgpu_kernel void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
%foo = load i64, i64 addrspace(1)* %in, align 8
%result = add i64 %foo, %a
store i64 %result, i64 addrspace(1)* %out
@@ -34,7 +34,7 @@ define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noal
; SI-LABEL: {{^}}sgpr_operand_reversed:
; SI: v_add_i32
; SI: v_addc_u32
-define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
+define amdgpu_kernel void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
%foo = load i64, i64 addrspace(1)* %in, align 8
%result = add i64 %a, %foo
store i64 %result, i64 addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace
; SI: s_addc_u32
; SI: s_add_u32
; SI: s_addc_u32
-define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a, <2 x i64> %b) {
+define amdgpu_kernel void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a, <2 x i64> %b) {
%result = add <2 x i64> %a, %b
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
ret void
@@ -58,7 +58,7 @@ define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a,
; SI: v_addc_u32
; SI: v_add_i32
; SI: v_addc_u32
-define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
+define amdgpu_kernel void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
@@ -76,7 +76,7 @@ define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> add
; SI-NOT: addc
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; SI: buffer_store_dword [[VRESULT]],
-define void @trunc_i64_add_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @trunc_i64_add_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
%add = add i64 %b, %a
%trunc = trunc i64 %add to i32
store i32 %trunc, i32 addrspace(1)* %out, align 8
diff --git a/test/CodeGen/AMDGPU/addrspacecast-captured.ll b/test/CodeGen/AMDGPU/addrspacecast-captured.ll
new file mode 100644
index 000000000000..138bc36b9e1b
--- /dev/null
+++ b/test/CodeGen/AMDGPU/addrspacecast-captured.ll
@@ -0,0 +1,47 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck %s
+
+; Nothing should be done if the addrspacecast is captured.
+
+declare void @consume_ptr2int(i32) #0
+
+; CHECK-LABEL: @addrspacecast_captured(
+; CHECK: %data = alloca i32, align 4
+; CHECK: %cast = addrspacecast i32* %data to i32 addrspace(4)*
+; CHECK: %ptr2int = ptrtoint i32 addrspace(4)* %cast to i32
+; CHECK: store i32 %ptr2int, i32 addrspace(1)* %out
+define amdgpu_kernel void @addrspacecast_captured(i32 addrspace(1)* %out) #0 {
+entry:
+ %data = alloca i32, align 4
+ %cast = addrspacecast i32* %data to i32 addrspace(4)*
+ %ptr2int = ptrtoint i32 addrspace(4)* %cast to i32
+ store i32 %ptr2int, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: @addrspacecast_captured_store(
+; CHECK: %data = alloca i32, align 4
+; CHECK: %cast = addrspacecast i32* %data to i32 addrspace(4)*
+; CHECK: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %out
+define amdgpu_kernel void @addrspacecast_captured_store(i32 addrspace(4)* addrspace(1)* %out) #0 {
+entry:
+ %data = alloca i32, align 4
+ %cast = addrspacecast i32* %data to i32 addrspace(4)*
+ store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: @addrspacecast_captured_call(
+; CHECK: %data = alloca i32, align 4
+; CHECK: %cast = addrspacecast i32* %data to i32 addrspace(4)*
+; CHECK: %ptr2int = ptrtoint i32 addrspace(4)* %cast to i32
+; CHECK: call void @consume_ptr2int(i32 %ptr2int)
+define amdgpu_kernel void @addrspacecast_captured_call() #0 {
+entry:
+ %data = alloca i32, align 4
+ %cast = addrspacecast i32* %data to i32 addrspace(4)*
+ %ptr2int = ptrtoint i32 addrspace(4)* %cast to i32
+ call void @consume_ptr2int(i32 %ptr2int)
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll b/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
index 67a193999204..8cabc7dae133 100644
--- a/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
+++ b/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
@@ -9,57 +9,57 @@ declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrs
@global.arr = unnamed_addr addrspace(1) global [256 x i32] undef, align 4
; HSA: @store_cast_0_flat_to_group_addrspacecast() #1
-define void @store_cast_0_flat_to_group_addrspacecast() #1 {
+define amdgpu_kernel void @store_cast_0_flat_to_group_addrspacecast() #1 {
store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
ret void
}
; HSA: @store_cast_0_group_to_flat_addrspacecast() #2
-define void @store_cast_0_group_to_flat_addrspacecast() #1 {
+define amdgpu_kernel void @store_cast_0_group_to_flat_addrspacecast() #1 {
store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*)
ret void
}
-; HSA: define void @store_constant_cast_group_gv_to_flat() #2
-define void @store_constant_cast_group_gv_to_flat() #1 {
+; HSA: define amdgpu_kernel void @store_constant_cast_group_gv_to_flat() #2
+define amdgpu_kernel void @store_constant_cast_group_gv_to_flat() #1 {
store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds.i32 to i32 addrspace(4)*)
ret void
}
; HSA: @store_constant_cast_group_gv_gep_to_flat() #2
-define void @store_constant_cast_group_gv_gep_to_flat() #1 {
+define amdgpu_kernel void @store_constant_cast_group_gv_gep_to_flat() #1 {
store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
ret void
}
; HSA: @store_constant_cast_global_gv_to_flat() #1
-define void @store_constant_cast_global_gv_to_flat() #1 {
+define amdgpu_kernel void @store_constant_cast_global_gv_to_flat() #1 {
store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global.i32 to i32 addrspace(4)*)
ret void
}
; HSA: @store_constant_cast_global_gv_gep_to_flat() #1
-define void @store_constant_cast_global_gv_gep_to_flat() #1 {
+define amdgpu_kernel void @store_constant_cast_global_gv_gep_to_flat() #1 {
store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(1)* @global.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
ret void
}
; HSA: @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
-define void @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
%val = load i32, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
store i32 %val, i32 addrspace(1)* %out
ret void
}
; HSA: @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
-define void @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
%val = atomicrmw add i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 1 seq_cst
store i32 %val, i32 addrspace(1)* %out
ret void
}
; HSA: @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
-define void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
%val = cmpxchg i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 0, i32 1 seq_cst seq_cst
%val0 = extractvalue { i32, i1 } %val, 0
store i32 %val0, i32 addrspace(1)* %out
@@ -67,28 +67,28 @@ define void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out)
}
; HSA: @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
-define void @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
call void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* %out, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 32, i32 4, i1 false)
ret void
}
; Can't just search the pointer value
; HSA: @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #2
-define void @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #1 {
+define amdgpu_kernel void @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #1 {
store i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 addrspace(4)* addrspace(1)* %out
ret void
}
; Can't just search pointer types
; HSA: @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #2
-define void @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #1 {
store i64 ptrtoint (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i64), i64 addrspace(1)* %out
ret void
}
; Cast group to flat, do GEP, cast back to group
; HSA: @store_constant_cast_group_gv_gep_to_flat_to_group() #2
-define void @store_constant_cast_group_gv_gep_to_flat_to_group() #1 {
+define amdgpu_kernel void @store_constant_cast_group_gv_gep_to_flat_to_group() #1 {
store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
ret void
}
diff --git a/test/CodeGen/AMDGPU/addrspacecast.ll b/test/CodeGen/AMDGPU/addrspacecast.ll
index 0a2130c96add..6ec93c72ec52 100644
--- a/test/CodeGen/AMDGPU/addrspacecast.ll
+++ b/test/CodeGen/AMDGPU/addrspacecast.ll
@@ -1,14 +1,23 @@
-; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=HSA %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=HSA -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=HSA -check-prefix=GFX9 %s
; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast:
; HSA: enable_sgpr_private_segment_buffer = 1
; HSA: enable_sgpr_dispatch_ptr = 0
-; HSA: enable_sgpr_queue_ptr = 1
+; CI: enable_sgpr_queue_ptr = 1
+; GFX9: enable_sgpr_queue_ptr = 0
-; HSA-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
-; HSA-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
+; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
+; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
+
+; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
+; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(15, 16, 16)
+; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
+; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_SHARED_BASE]]
+
+; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base
-; HSA-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
; HSA-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
@@ -17,7 +26,13 @@
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]]
-define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #0 {
+
+; At most 2 digits. Make sure src_shared_base is not counted as a high
+; number SGPR.
+
+; CI: NumSgprs: {{[0-9][0-9]+}}
+; GFX9: NumSgprs: {{[0-9]+}}
+define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #0 {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
store volatile i32 7, i32 addrspace(4)* %stof
ret void
@@ -26,21 +41,32 @@ define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #0 {
; HSA-LABEL: {{^}}use_private_to_flat_addrspacecast:
; HSA: enable_sgpr_private_segment_buffer = 1
; HSA: enable_sgpr_dispatch_ptr = 0
-; HSA: enable_sgpr_queue_ptr = 1
+; CI: enable_sgpr_queue_ptr = 1
+; GFX9: enable_sgpr_queue_ptr = 0
-; HSA-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
-; HSA-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}
+; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}
+; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
+
+; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
+; GFX9-DAG: s_getreg_b32 [[SSRC_PRIVATE:s[0-9]+]], hwreg(15, 0, 16)
+; GFX9-DAG: s_lshl_b32 [[SSRC_PRIVATE_BASE:s[0-9]+]], [[SSRC_PRIVATE]], 16
+; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_PRIVATE_BASE]]
+
+; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_private_base
-; HSA-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
; HSA-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
+; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], 0
; HSA-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]]
; HSA-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]]
-define void @use_private_to_flat_addrspacecast(i32* %ptr) #0 {
+
+; CI: NumSgprs: {{[0-9][0-9]+}}
+; GFX9: NumSgprs: {{[0-9]+}}
+define amdgpu_kernel void @use_private_to_flat_addrspacecast(i32* %ptr) #0 {
%stof = addrspacecast i32* %ptr to i32 addrspace(4)*
store volatile i32 7, i32 addrspace(4)* %stof
ret void
@@ -55,7 +81,7 @@ define void @use_private_to_flat_addrspacecast(i32* %ptr) #0 {
; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; HSA: flat_store_dword v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}}, [[K]]
-define void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #0 {
%stof = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(4)*
store volatile i32 7, i32 addrspace(4)* %stof
ret void
@@ -67,7 +93,7 @@ define void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #0 {
; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA: flat_load_dword v{{[0-9]+}}, v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}}
-define void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #0 {
+define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #0 {
%stof = addrspacecast i32 addrspace(2)* %ptr to i32 addrspace(4)*
%ld = load volatile i32, i32 addrspace(4)* %stof
ret void
@@ -84,7 +110,7 @@ define void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #0 {
; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
; HSA: ds_write_b32 [[CASTPTR]], v[[K]]
-define void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #0 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(3)*
store volatile i32 0, i32 addrspace(3)* %ftos
ret void
@@ -98,10 +124,10 @@ define void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #0 {
; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}
; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]]
-; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
+; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], 0, v[[VPTR_LO]]
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
; HSA: buffer_store_dword v[[K]], [[CASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
-define void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #0 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32*
store volatile i32 0, i32* %ftos
ret void
@@ -115,7 +141,7 @@ define void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #0 {
; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0
; HSA: flat_store_dword v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}}, [[K]]
-define void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #0 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(1)*
store volatile i32 0, i32 addrspace(1)* %ftos
ret void
@@ -126,21 +152,27 @@ define void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #0 {
; HSA: s_load_dwordx2 s{{\[}}[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]{{\]}}, s[4:5], 0x0
; HSA: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}}, 0x0
-define void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #0 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(2)*
load volatile i32, i32 addrspace(2)* %ftos
ret void
}
; HSA-LABEL: {{^}}cast_0_group_to_flat_addrspacecast:
-; HSA: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10
-; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
+; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10
+; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
+; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(15, 16, 16)
+; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16
+; GFX9-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[SSRC_SHARED_BASE]]
+
+; GFX9-XXX: v_mov_b32_e32 v[[HI:[0-9]+]], src_shared_base
+
; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, v[[K]]
-define void @cast_0_group_to_flat_addrspacecast() #0 {
+define amdgpu_kernel void @cast_0_group_to_flat_addrspacecast() #0 {
%cast = addrspacecast i32 addrspace(3)* null to i32 addrspace(4)*
- store i32 7, i32 addrspace(4)* %cast
+ store volatile i32 7, i32 addrspace(4)* %cast
ret void
}
@@ -148,9 +180,9 @@ define void @cast_0_group_to_flat_addrspacecast() #0 {
; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
; HSA: ds_write_b32 [[PTR]], [[K]]
-define void @cast_0_flat_to_group_addrspacecast() #0 {
+define amdgpu_kernel void @cast_0_flat_to_group_addrspacecast() #0 {
%cast = addrspacecast i32 addrspace(4)* null to i32 addrspace(3)*
- store i32 7, i32 addrspace(3)* %cast
+ store volatile i32 7, i32 addrspace(3)* %cast
ret void
}
@@ -159,9 +191,9 @@ define void @cast_0_flat_to_group_addrspacecast() #0 {
; HSA: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
; HSA: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, v[[K]]
-define void @cast_neg1_group_to_flat_addrspacecast() #0 {
+define amdgpu_kernel void @cast_neg1_group_to_flat_addrspacecast() #0 {
%cast = addrspacecast i32 addrspace(3)* inttoptr (i32 -1 to i32 addrspace(3)*) to i32 addrspace(4)*
- store i32 7, i32 addrspace(4)* %cast
+ store volatile i32 7, i32 addrspace(4)* %cast
ret void
}
@@ -169,31 +201,34 @@ define void @cast_neg1_group_to_flat_addrspacecast() #0 {
; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
; HSA: ds_write_b32 [[PTR]], [[K]]
-define void @cast_neg1_flat_to_group_addrspacecast() #0 {
+define amdgpu_kernel void @cast_neg1_flat_to_group_addrspacecast() #0 {
%cast = addrspacecast i32 addrspace(4)* inttoptr (i64 -1 to i32 addrspace(4)*) to i32 addrspace(3)*
- store i32 7, i32 addrspace(3)* %cast
+ store volatile i32 7, i32 addrspace(3)* %cast
ret void
}
+; FIXME: Shouldn't need to enable queue ptr
; HSA-LABEL: {{^}}cast_0_private_to_flat_addrspacecast:
-; HSA: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11
-; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
+; CI: enable_sgpr_queue_ptr = 1
+; GFX9: enable_sgpr_queue_ptr = 0
+
; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
+; HSA: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, v[[K]]
-define void @cast_0_private_to_flat_addrspacecast() #0 {
+define amdgpu_kernel void @cast_0_private_to_flat_addrspacecast() #0 {
%cast = addrspacecast i32* null to i32 addrspace(4)*
- store i32 7, i32 addrspace(4)* %cast
+ store volatile i32 7, i32 addrspace(4)* %cast
ret void
}
; HSA-LABEL: {{^}}cast_0_flat_to_private_addrspacecast:
-; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
+; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
; HSA: buffer_store_dword [[K]], [[PTR]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen
-define void @cast_0_flat_to_private_addrspacecast() #0 {
+define amdgpu_kernel void @cast_0_flat_to_private_addrspacecast() #0 {
%cast = addrspacecast i32 addrspace(4)* null to i32 addrspace(0)*
- store i32 7, i32* %cast
+ store volatile i32 7, i32* %cast
ret void
}
@@ -203,7 +238,7 @@ define void @cast_0_flat_to_private_addrspacecast() #0 {
; HSA-LABEL: {{^}}branch_use_flat_i32:
; HSA: flat_store_dword {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}
; HSA: s_endpgm
-define void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 {
+define amdgpu_kernel void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 {
entry:
%cmp = icmp ne i32 %c, 0
br i1 %cmp, label %local, label %global
@@ -218,7 +253,7 @@ global:
end:
%fptr = phi i32 addrspace(4)* [ %flat_local, %local ], [ %flat_global, %global ]
- store i32 %x, i32 addrspace(4)* %fptr, align 4
+ store volatile i32 %x, i32 addrspace(4)* %fptr, align 4
; %val = load i32, i32 addrspace(4)* %fptr, align 4
; store i32 %val, i32 addrspace(1)* %out, align 4
ret void
@@ -226,22 +261,26 @@ end:
; Check for prologue initializing special SGPRs pointing to scratch.
; HSA-LABEL: {{^}}store_flat_scratch:
-; HSA-DAG: s_mov_b32 flat_scratch_lo, s9
-; HSA-DAG: s_add_u32 [[ADD:s[0-9]+]], s8, s11
-; HSA: s_lshr_b32 flat_scratch_hi, [[ADD]], 8
+; CI-DAG: s_mov_b32 flat_scratch_lo, s9
+; CI-DAG: s_add_u32 [[ADD:s[0-9]+]], s8, s11
+; CI: s_lshr_b32 flat_scratch_hi, [[ADD]], 8
+
+; GFX9: s_add_u32 flat_scratch_lo, s6, s9
+; GFX9: s_addc_u32 flat_scratch_hi, s7, 0
+
; HSA: flat_store_dword
; HSA: s_barrier
; HSA: flat_load_dword
-define void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 {
+define amdgpu_kernel void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 {
%alloca = alloca i32, i32 9, align 4
%x = call i32 @llvm.amdgcn.workitem.id.x() #2
%pptr = getelementptr i32, i32* %alloca, i32 %x
%fptr = addrspacecast i32* %pptr to i32 addrspace(4)*
- store i32 %x, i32 addrspace(4)* %fptr
+ store volatile i32 %x, i32 addrspace(4)* %fptr
; Dummy call
call void @llvm.amdgcn.s.barrier() #1
- %reload = load i32, i32 addrspace(4)* %fptr, align 4
- store i32 %reload, i32 addrspace(1)* %out, align 4
+ %reload = load volatile i32, i32 addrspace(4)* %fptr, align 4
+ store volatile i32 %reload, i32 addrspace(1)* %out, align 4
ret void
}
diff --git a/test/CodeGen/AMDGPU/amdgcn.bitcast.ll b/test/CodeGen/AMDGPU/amdgcn.bitcast.ll
index 87ef5978ebfc..ef742f56faec 100644
--- a/test/CodeGen/AMDGPU/amdgcn.bitcast.ll
+++ b/test/CodeGen/AMDGPU/amdgcn.bitcast.ll
@@ -3,24 +3,20 @@
; This test just checks that the compiler doesn't crash.
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
; FUNC-LABEL: {{^}}v32i8_to_v8i32:
-; SI: s_endpgm
-define amdgpu_ps void @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
+define amdgpu_ps float @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
entry:
%1 = load <32 x i8>, <32 x i8> addrspace(2)* %0
%2 = bitcast <32 x i8> %1 to <8 x i32>
%3 = extractelement <8 x i32> %2, i32 1
%4 = icmp ne i32 %3, 0
%5 = select i1 %4, float 0.0, float 1.0
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %5, float %5, float %5, float %5)
- ret void
+ ret float %5
}
; FUNC-LABEL: {{^}}i8ptr_v16i8ptr:
; SI: s_endpgm
-define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
%1 = load <16 x i8>, <16 x i8> addrspace(1)* %0
@@ -28,28 +24,50 @@ entry:
ret void
}
-define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%load = load float, float addrspace(1)* %in, align 4
- %bc = bitcast float %load to <2 x i16>
- store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
+ %fadd32 = fadd float %load, 1.0
+ %bc = bitcast float %fadd32 to <2 x i16>
+ %add.bitcast = add <2 x i16> %bc, <i16 2, i16 2>
+ store <2 x i16> %add.bitcast, <2 x i16> addrspace(1)* %out
ret void
}
-define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
- %bc = bitcast <2 x i16> %load to float
- store float %bc, float addrspace(1)* %out, align 4
+ %add.v2i16 = add <2 x i16> %load, <i16 2, i16 2>
+ %bc = bitcast <2 x i16> %add.v2i16 to float
+ %fadd.bitcast = fadd float %bc, 1.0
+ store float %fadd.bitcast, float addrspace(1)* %out
+ ret void
+}
+
+define amdgpu_kernel void @f32_to_v2f16(<2 x half> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %load = load float, float addrspace(1)* %in, align 4
+ %fadd32 = fadd float %load, 1.0
+ %bc = bitcast float %fadd32 to <2 x half>
+ %add.bitcast = fadd <2 x half> %bc, <half 2.0, half 2.0>
+ store <2 x half> %add.bitcast, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+define amdgpu_kernel void @v2f16_to_f32(float addrspace(1)* %out, <2 x half> addrspace(1)* %in) nounwind {
+ %load = load <2 x half>, <2 x half> addrspace(1)* %in, align 4
+ %add.v2f16 = fadd <2 x half> %load, <half 2.0, half 2.0>
+ %bc = bitcast <2 x half> %add.v2f16 to float
+ %fadd.bitcast = fadd float %bc, 1.0
+ store float %fadd.bitcast, float addrspace(1)* %out
ret void
}
-define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%bc = bitcast <4 x i8> %load to i32
store i32 %bc, i32 addrspace(1)* %out, align 4
ret void
}
-define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%bc = bitcast i32 %load to <4 x i8>
store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
@@ -58,17 +76,18 @@ define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nou
; FUNC-LABEL: {{^}}bitcast_v2i32_to_f64:
; SI: s_endpgm
-define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
%add = add <2 x i32> %val, <i32 4, i32 9>
%bc = bitcast <2 x i32> %add to double
- store double %bc, double addrspace(1)* %out, align 8
+ %fadd.bc = fadd double %bc, 1.0
+ store double %fadd.bc, double addrspace(1)* %out, align 8
ret void
}
; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32:
; SI: s_endpgm
-define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
%val = load double, double addrspace(1)* %in, align 8
%add = fadd double %val, 4.0
%bc = bitcast double %add to <2 x i32>
@@ -77,7 +96,7 @@ define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace
}
; FUNC-LABEL: {{^}}bitcast_v2i64_to_v2f64:
-define void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) {
+define amdgpu_kernel void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) {
entry:
%cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %if, label %end
@@ -93,7 +112,7 @@ end:
}
; FUNC-LABEL: {{^}}bitcast_v2f64_to_v2i64:
-define void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) {
+define amdgpu_kernel void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) {
entry:
%cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %if, label %end
diff --git a/test/CodeGen/AMDGPU/amdgcn.private-memory.ll b/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
index a6d055891d4b..79450b97c218 100644
--- a/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
+++ b/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
@@ -15,7 +15,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; GCN-NOT: v0
; GCN: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, v0, v{{[0-9]+}}
; GCN: buffer_store_dword [[RESULT]]
-define void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = alloca [2 x i32]
%1 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 0
diff --git a/test/CodeGen/AMDGPU/amdgcn.sendmsg-m0.ll b/test/CodeGen/AMDGPU/amdgcn.sendmsg-m0.ll
deleted file mode 100644
index 8d8885852afe..000000000000
--- a/test/CodeGen/AMDGPU/amdgcn.sendmsg-m0.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
-
-; GCN-LABEL: {{^}}main:
-; GCN: s_mov_b32 m0, s0
-; VI-NEXT: s_nop 0
-; GCN-NEXT: sendmsg(MSG_GS_DONE, GS_OP_NOP)
-; GCN-NEXT: s_endpgm
-
-define amdgpu_gs void @main(i32 inreg %a) #0 {
- call void @llvm.amdgcn.s.sendmsg(i32 3, i32 %a)
- ret void
-}
-
-; GCN-LABEL: {{^}}main_halt:
-; GCN: s_mov_b32 m0, s0
-; VI-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsghalt sendmsg(MSG_INTERRUPT)
-; GCN-NEXT: s_endpgm
-
-define void @main_halt(i32 inreg %a) #0 {
- call void @llvm.amdgcn.s.sendmsghalt(i32 1, i32 %a)
- ret void
-}
-
-; GCN-LABEL: {{^}}legacy:
-; GCN: s_mov_b32 m0, s0
-; VI-NEXT: s_nop 0
-; GCN-NEXT: sendmsg(MSG_GS_DONE, GS_OP_NOP)
-; GCN-NEXT: s_endpgm
-
-define amdgpu_gs void @legacy(i32 inreg %a) #0 {
- call void @llvm.SI.sendmsg(i32 3, i32 %a)
- ret void
-}
-
-declare void @llvm.amdgcn.s.sendmsg(i32, i32) #0
-declare void @llvm.amdgcn.s.sendmsghalt(i32, i32) #0
-declare void @llvm.SI.sendmsg(i32, i32) #0
-
-attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/amdgcn.sendmsg.ll b/test/CodeGen/AMDGPU/amdgcn.sendmsg.ll
deleted file mode 100644
index 31f9cfca6def..000000000000
--- a/test/CodeGen/AMDGPU/amdgcn.sendmsg.ll
+++ /dev/null
@@ -1,161 +0,0 @@
-;RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
-;RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
-
-; CHECK-LABEL: {{^}}test_interrupt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_INTERRUPT)
-define void @test_interrupt() {
-body:
- call void @llvm.amdgcn.s.sendmsg(i32 1, i32 0);
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_emit:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT, 0)
-define void @test_gs_emit() {
-body:
- call void @llvm.amdgcn.s.sendmsg(i32 34, i32 0);
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_cut:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS, GS_OP_CUT, 1)
-define void @test_gs_cut() {
-body:
- call void @llvm.amdgcn.s.sendmsg(i32 274, i32 0);
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_emit_cut:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT_CUT, 2)
-define void @test_gs_emit_cut() {
-body:
- call void @llvm.amdgcn.s.sendmsg(i32 562, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_done:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
-define void @test_gs_done() {
-body:
- call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
- ret void
-}
-
-
-; CHECK-LABEL: {{^}}test_interrupt_halt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsghalt sendmsg(MSG_INTERRUPT)
-define void @test_interrupt_halt() {
-body:
- call void @llvm.amdgcn.s.sendmsghalt(i32 1, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_emit_halt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsghalt sendmsg(MSG_GS, GS_OP_EMIT, 0)
-define void @test_gs_emit_halt() {
-body:
- call void @llvm.amdgcn.s.sendmsghalt(i32 34, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_cut_halt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsghalt sendmsg(MSG_GS, GS_OP_CUT, 1)
-define void @test_gs_cut_halt() {
-body:
- call void @llvm.amdgcn.s.sendmsghalt(i32 274, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_emit_cut_halt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsghalt sendmsg(MSG_GS, GS_OP_EMIT_CUT, 2)
-define void @test_gs_emit_cut_halt() {
-body:
- call void @llvm.amdgcn.s.sendmsghalt(i32 562, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_gs_done_halt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsghalt sendmsg(MSG_GS_DONE, GS_OP_NOP)
-define void @test_gs_done_halt() {
-body:
- call void @llvm.amdgcn.s.sendmsghalt(i32 3, i32 0)
- ret void
-}
-
-; Legacy
-; CHECK-LABEL: {{^}}test_legacy_interrupt:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_INTERRUPT)
-define void @test_legacy_interrupt() {
-body:
- call void @llvm.SI.sendmsg(i32 1, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_legacy_gs_emit:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT, 0)
-define void @test_legacy_gs_emit() {
-body:
- call void @llvm.SI.sendmsg(i32 34, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_legacy_gs_cut:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS, GS_OP_CUT, 1)
-define void @test_legacy_gs_cut() {
-body:
- call void @llvm.SI.sendmsg(i32 274, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_legacy_gs_emit_cut:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT_CUT, 2)
-define void @test_legacy_gs_emit_cut() {
-body:
- call void @llvm.SI.sendmsg(i32 562, i32 0)
- ret void
-}
-
-; CHECK-LABEL: {{^}}test_legacy_gs_done:
-; CHECK: s_mov_b32 m0, 0
-; CHECK-NOT: s_mov_b32 m0
-; CHECK: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
-define void @test_legacy_gs_done() {
-body:
- call void @llvm.SI.sendmsg(i32 3, i32 0)
- ret void
-}
-
-; Function Attrs: nounwind
-declare void @llvm.amdgcn.s.sendmsg(i32, i32) #0
-declare void @llvm.amdgcn.s.sendmsghalt(i32, i32) #0
-declare void @llvm.SI.sendmsg(i32, i32) #0
-
-attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll b/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll
new file mode 100644
index 000000000000..e68ed9cac93f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/amdgpu-alias-analysis.ll
@@ -0,0 +1,9 @@
+; RUN: opt -mtriple=amdgcn-- -O3 -aa-eval -print-all-alias-modref-info -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt -mtriple=r600-- -O3 -aa-eval -print-all-alias-modref-info -disable-output < %s 2>&1 | FileCheck %s
+
+; CHECK: NoAlias: i8 addrspace(1)* %p1, i8* %p
+
+define void @test(i8* %p, i8 addrspace(1)* %p1) {
+ ret void
+}
+
diff --git a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
index d78c75165be2..0e5605961e10 100644
--- a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
+++ b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
@@ -4,7 +4,7 @@
; NOOP-LABEL: @noop_fdiv_fpmath(
; NOOP: %md.25ulp = fdiv float %a, %b, !fpmath !0
-define void @noop_fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #3 {
+define amdgpu_kernel void @noop_fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #3 {
%md.25ulp = fdiv float %a, %b, !fpmath !0
store volatile float %md.25ulp, float addrspace(1)* %out
ret void
@@ -18,7 +18,7 @@ define void @noop_fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #3 {
; CHECK: %md.3ulp = call float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !3
; CHECK: %fast.md.25ulp = call fast float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
; CHECK: arcp.md.25ulp = call arcp float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
-define void @fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #1 {
+define amdgpu_kernel void @fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #1 {
%no.md = fdiv float %a, %b
store volatile float %no.md, float addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @fdiv_fpmath(float addrspace(1)* %out, float %a, float %b) #1 {
; CHECK: %arcp.25ulp = fdiv arcp float 1.000000e+00, %x, !fpmath !0
; CHECK: %fast.no.md = fdiv fast float 1.000000e+00, %x{{$}}
; CHECK: %fast.25ulp = fdiv fast float 1.000000e+00, %x, !fpmath !0
-define void @rcp_fdiv_fpmath(float addrspace(1)* %out, float %x) #1 {
+define amdgpu_kernel void @rcp_fdiv_fpmath(float addrspace(1)* %out, float %x) #1 {
%no.md = fdiv float 1.0, %x
store volatile float %no.md, float addrspace(1)* %out
@@ -89,7 +89,7 @@ define void @rcp_fdiv_fpmath(float addrspace(1)* %out, float %x) #1 {
; CHECK: %[[B1:[0-9]+]] = extractelement <2 x float> %b, i64 1
; CHECK: %[[FDIV1:[0-9]+]] = call float @llvm.amdgcn.fdiv.fast(float %[[A1]], float %[[B1]]), !fpmath !0
; CHECK: %md.25ulp = insertelement <2 x float> %[[INS0]], float %[[FDIV1]], i64 1
-define void @fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #1 {
+define amdgpu_kernel void @fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #1 {
%no.md = fdiv <2 x float> %a, %b
store volatile <2 x float> %no.md, <2 x float> addrspace(1)* %out
@@ -120,7 +120,7 @@ define void @fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float> %a,
; CHECK: fdiv fast float 1.000000e+00, %{{[0-9]+}}, !fpmath !0
; CHECK: fdiv fast float 1.000000e+00, %{{[0-9]+}}, !fpmath !0
; CHECK: store volatile <2 x float> %fast.25ulp, <2 x float> addrspace(1)* %out
-define void @rcp_fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float> %x) #1 {
+define amdgpu_kernel void @rcp_fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float> %x) #1 {
%no.md = fdiv <2 x float> <float 1.0, float 1.0>, %x
store volatile <2 x float> %no.md, <2 x float> addrspace(1)* %out
@@ -158,7 +158,7 @@ define void @rcp_fdiv_fpmath_vector(<2 x float> addrspace(1)* %out, <2 x float>
; CHECK: %[[X1:[0-9]+]] = extractelement <2 x float> %x, i64 1
; CHECK: fdiv fast float 2.000000e+00, %[[X1]], !fpmath !0
; CHECK: store volatile <2 x float> %fast.25ulp
-define void @rcp_fdiv_fpmath_vector_nonsplat(<2 x float> addrspace(1)* %out, <2 x float> %x) #1 {
+define amdgpu_kernel void @rcp_fdiv_fpmath_vector_nonsplat(<2 x float> addrspace(1)* %out, <2 x float> %x) #1 {
%no.md = fdiv <2 x float> <float 1.0, float 2.0>, %x
store volatile <2 x float> %no.md, <2 x float> addrspace(1)* %out
@@ -186,7 +186,7 @@ define void @rcp_fdiv_fpmath_vector_nonsplat(<2 x float> addrspace(1)* %out, <2
; CHECK: call fast float @llvm.amdgcn.fdiv.fast(float %{{[0-9]+}}, float %{{[0-9]+}}), !fpmath !0
; CHECK: call fast float @llvm.amdgcn.fdiv.fast(float %{{[0-9]+}}, float %{{[0-9]+}}), !fpmath !0
; CHECK: store volatile <2 x float> %fast.25ulp
-define void @rcp_fdiv_fpmath_vector_partial_constant(<2 x float> addrspace(1)* %out, <2 x float> %x, <2 x float> %y) #1 {
+define amdgpu_kernel void @rcp_fdiv_fpmath_vector_partial_constant(<2 x float> addrspace(1)* %out, <2 x float> %x, <2 x float> %y) #1 {
%x.insert = insertelement <2 x float> %x, float 1.0, i32 0
%arcp.25ulp = fdiv arcp <2 x float> %x.insert, %y, !fpmath !0
@@ -206,7 +206,7 @@ define void @rcp_fdiv_fpmath_vector_partial_constant(<2 x float> addrspace(1)* %
; CHECK: %md.3ulp = fdiv float %a, %b, !fpmath !3
; CHECK: call fast float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
; CHECK: call arcp float @llvm.amdgcn.fdiv.fast(float %a, float %b), !fpmath !0
-define void @fdiv_fpmath_f32_denormals(float addrspace(1)* %out, float %a, float %b) #2 {
+define amdgpu_kernel void @fdiv_fpmath_f32_denormals(float addrspace(1)* %out, float %a, float %b) #2 {
%no.md = fdiv float %a, %b
store volatile float %no.md, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
index 13e4192ccd72..95a206e1dd00 100644
--- a/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
+++ b/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll
@@ -6,7 +6,7 @@
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = add i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @add_i3(i3 %a, i3 %b) {
@@ -19,7 +19,7 @@ define i3 @add_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = add nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @add_nsw_i3(i3 %a, i3 %b) {
@@ -32,7 +32,7 @@ define i3 @add_nsw_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = add nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @add_nuw_i3(i3 %a, i3 %b) {
@@ -58,7 +58,7 @@ define i3 @add_nuw_nsw_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = sub i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @sub_i3(i3 %a, i3 %b) {
@@ -84,7 +84,7 @@ define i3 @sub_nsw_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @sub_nuw_i3(i3 %a, i3 %b) {
@@ -110,7 +110,7 @@ define i3 @sub_nuw_nsw_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = mul i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @mul_i3(i3 %a, i3 %b) {
@@ -123,7 +123,7 @@ define i3 @mul_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @mul_nsw_i3(i3 %a, i3 %b) {
@@ -136,7 +136,7 @@ define i3 @mul_nsw_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @mul_nuw_i3(i3 %a, i3 %b) {
@@ -188,7 +188,7 @@ define i3 @srem_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = shl i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @shl_i3(i3 %a, i3 %b) {
@@ -201,7 +201,7 @@ define i3 @shl_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @shl_nsw_i3(i3 %a, i3 %b) {
@@ -214,7 +214,7 @@ define i3 @shl_nsw_i3(i3 %a, i3 %b) {
; SI-NEXT: ret i3 %r
; VI: %[[A_32:[0-9]+]] = zext i3 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i3 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_3:[0-9]+]] = trunc i32 %[[R_32]] to i3
; VI-NEXT: ret i3 %[[R_3]]
define i3 @shl_nuw_i3(i3 %a, i3 %b) {
@@ -525,7 +525,7 @@ define i3 @bitreverse_i3(i3 %a) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = add i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @add_i16(i16 %a, i16 %b) {
@@ -559,7 +559,7 @@ define i16 @constant_add_nuw_i16() {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = add nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @add_nsw_i16(i16 %a, i16 %b) {
@@ -572,7 +572,7 @@ define i16 @add_nsw_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = add nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @add_nuw_i16(i16 %a, i16 %b) {
@@ -598,7 +598,7 @@ define i16 @add_nuw_nsw_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = sub i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @sub_i16(i16 %a, i16 %b) {
@@ -624,7 +624,7 @@ define i16 @sub_nsw_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @sub_nuw_i16(i16 %a, i16 %b) {
@@ -650,7 +650,7 @@ define i16 @sub_nuw_nsw_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = mul i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @mul_i16(i16 %a, i16 %b) {
@@ -663,7 +663,7 @@ define i16 @mul_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @mul_nsw_i16(i16 %a, i16 %b) {
@@ -676,7 +676,7 @@ define i16 @mul_nsw_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @mul_nuw_i16(i16 %a, i16 %b) {
@@ -728,7 +728,7 @@ define i16 @srem_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = shl i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @shl_i16(i16 %a, i16 %b) {
@@ -741,7 +741,7 @@ define i16 @shl_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nsw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @shl_nsw_i16(i16 %a, i16 %b) {
@@ -754,7 +754,7 @@ define i16 @shl_nsw_i16(i16 %a, i16 %b) {
; SI-NEXT: ret i16 %r
; VI: %[[A_32:[0-9]+]] = zext i16 %a to i32
; VI-NEXT: %[[B_32:[0-9]+]] = zext i16 %b to i32
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw i32 %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw i32 %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc i32 %[[R_32]] to i16
; VI-NEXT: ret i16 %[[R_16]]
define i16 @shl_nuw_i16(i16 %a, i16 %b) {
@@ -1072,7 +1072,7 @@ define i16 @bitreverse_i16(i16 %a) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = add <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @add_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1085,7 +1085,7 @@ define <3 x i15> @add_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = add nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @add_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1098,7 +1098,7 @@ define <3 x i15> @add_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = add nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @add_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1124,7 +1124,7 @@ define <3 x i15> @add_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = sub <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @sub_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1150,7 +1150,7 @@ define <3 x i15> @sub_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @sub_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1176,7 +1176,7 @@ define <3 x i15> @sub_nuw_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = mul <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @mul_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1189,7 +1189,7 @@ define <3 x i15> @mul_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @mul_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1202,7 +1202,7 @@ define <3 x i15> @mul_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @mul_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1254,7 +1254,7 @@ define <3 x i15> @srem_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = shl <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @shl_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1267,7 +1267,7 @@ define <3 x i15> @shl_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @shl_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1280,7 +1280,7 @@ define <3 x i15> @shl_nsw_3xi15(<3 x i15> %a, <3 x i15> %b) {
; SI-NEXT: ret <3 x i15> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i15> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i15> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_15:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i15>
; VI-NEXT: ret <3 x i15> %[[R_15]]
define <3 x i15> @shl_nuw_3xi15(<3 x i15> %a, <3 x i15> %b) {
@@ -1591,7 +1591,7 @@ define <3 x i15> @bitreverse_3xi15(<3 x i15> %a) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = add <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @add_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1604,7 +1604,7 @@ define <3 x i16> @add_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = add nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @add_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1617,7 +1617,7 @@ define <3 x i16> @add_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = add nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = add nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @add_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1643,7 +1643,7 @@ define <3 x i16> @add_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = sub <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @sub_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1669,7 +1669,7 @@ define <3 x i16> @sub_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = sub nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @sub_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1695,7 +1695,7 @@ define <3 x i16> @sub_nuw_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = mul <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @mul_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1708,7 +1708,7 @@ define <3 x i16> @mul_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @mul_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1721,7 +1721,7 @@ define <3 x i16> @mul_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = mul nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @mul_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1773,7 +1773,7 @@ define <3 x i16> @srem_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = shl <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @shl_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1786,7 +1786,7 @@ define <3 x i16> @shl_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nsw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @shl_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
@@ -1799,7 +1799,7 @@ define <3 x i16> @shl_nsw_3xi16(<3 x i16> %a, <3 x i16> %b) {
; SI-NEXT: ret <3 x i16> %r
; VI: %[[A_32:[0-9]+]] = zext <3 x i16> %a to <3 x i32>
; VI-NEXT: %[[B_32:[0-9]+]] = zext <3 x i16> %b to <3 x i32>
-; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw <3 x i32> %[[A_32]], %[[B_32]]
+; VI-NEXT: %[[R_32:[0-9]+]] = shl nuw nsw <3 x i32> %[[A_32]], %[[B_32]]
; VI-NEXT: %[[R_16:[0-9]+]] = trunc <3 x i32> %[[R_32]] to <3 x i16>
; VI-NEXT: ret <3 x i16> %[[R_16]]
define <3 x i16> @shl_nuw_3xi16(<3 x i16> %a, <3 x i16> %b) {
diff --git a/test/CodeGen/AMDGPU/amdgpu-shader-calling-convention.ll b/test/CodeGen/AMDGPU/amdgpu-shader-calling-convention.ll
index dd16907b748c..0ba8836b20dc 100644
--- a/test/CodeGen/AMDGPU/amdgpu-shader-calling-convention.ll
+++ b/test/CodeGen/AMDGPU/amdgpu-shader-calling-convention.ll
@@ -13,9 +13,10 @@ define amdgpu_cs float @shader_cc(<4 x i32> inreg, <4 x i32> inreg, i32 inreg %w
; GCN-LABEL: {{^}}kernel_cc:
; GCN: s_endpgm
-define float @kernel_cc(<4 x i32> inreg, <4 x i32> inreg, i32 inreg %w, float %v) {
+define amdgpu_kernel void @kernel_cc(<4 x i32> inreg, <4 x i32> inreg, i32 inreg %w, float %v) {
%vi = bitcast float %v to i32
%x = add i32 %vi, %w
%xf = bitcast i32 %x to float
- ret float %xf
+ store float %xf, float addrspace(1)* undef
+ ret void
}
diff --git a/test/CodeGen/AMDGPU/amdgpu.private-memory.ll b/test/CodeGen/AMDGPU/amdgpu.private-memory.ll
index 1511e1343808..1f4b1eaa209a 100644
--- a/test/CodeGen/AMDGPU/amdgpu.private-memory.ll
+++ b/test/CodeGen/AMDGPU/amdgpu.private-memory.ll
@@ -1,9 +1,9 @@
-; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
-; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE
-; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
-; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA
-; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
-; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE
+; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA
+; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT -check-prefix=OPT %s
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT -check-prefix=OPT %s
@@ -27,8 +27,6 @@
; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120
; HSA-PROMOTE: .end_amd_kernel_code_t
-; FIXME: These should be merged
-; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x1
; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x2
; SI-PROMOTE: ds_write_b32
@@ -58,9 +56,9 @@
; HSAOPT: [[LDZU:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP1]], align 4, !range !1, !invariant.load !0
; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16
-; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !1
-; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !1
-; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !1
+; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !2
+; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !2
+; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !2
; HSAOPT: [[Y_SIZE_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[EXTRACTY]], [[LDZU]]
; HSAOPT: [[YZ_X_XID:%[0-9]+]] = mul i32 [[Y_SIZE_X_Z_SIZE]], [[WORKITEM_ID_X]]
@@ -77,10 +75,10 @@
; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !0
; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !0
-; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !0
-; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !0
-; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !0
-define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !1
+; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !1
+; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !1
+define amdgpu_kernel void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -102,7 +100,7 @@ entry:
; OPT-LABEL: @high_alignment(
; OPT: getelementptr inbounds [256 x [8 x i32]], [256 x [8 x i32]] addrspace(3)* @high_alignment.stack, i32 0, i32 %{{[0-9]+}}
-define void @high_alignment(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+define amdgpu_kernel void @high_alignment(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
entry:
%stack = alloca [8 x i32], align 16
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -127,7 +125,7 @@ entry:
; OPT: alloca [5 x i32]
; SI-NOT: ds_write
-define void @no_replace_inbounds_gep(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+define amdgpu_kernel void @no_replace_inbounds_gep(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -162,7 +160,7 @@ entry:
; SI-NOT: v_movrel
%struct.point = type { i32, i32 }
-define void @multiple_structs(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @multiple_structs(i32 addrspace(1)* %out) #0 {
entry:
%a = alloca %struct.point
%b = alloca %struct.point
@@ -191,7 +189,7 @@ entry:
; R600-NOT: MOVA_INT
; SI-NOT: v_movrel
-define void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
entry:
%prv_array_const = alloca [2 x i32]
%prv_array = alloca [2 x i32]
@@ -227,11 +225,15 @@ for.end:
; R600: MOVA_INT
-; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} ; encoding: [0x00,0x00,0x68,0xe0
-; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:2 ; encoding: [0x02,0x00,0x68,0xe0
+; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:6 ; encoding: [0x06,0x00,0x68,0xe0
+; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding: [0x04,0x00,0x68,0xe0
; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort.
-; SI-PROMOTE: buffer_load_ushort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}}
-define void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
+; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}}
+
+; SI-PROMOTE: s_load_dword [[IDX:s[0-9]+]]
+; SI-PROMOTE: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16
+; SI-PROMOTE: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[SCALED_IDX]], 16
+define amdgpu_kernel void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = alloca [2 x i16]
%1 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 0
@@ -249,12 +251,12 @@ entry:
; R600: MOVA_INT
-; SI-PROMOTE-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} ; encoding:
-; SI-PROMOTE-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:1 ; encoding:
+; SI-PROMOTE-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding:
+; SI-PROMOTE-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:5 ; encoding:
-; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} ; encoding: [0x00,0x00,0x60,0xe0
-; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:1 ; encoding: [0x01,0x00,0x60,0xe0
-define void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
+; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding: [0x04,0x00,0x60,0xe0
+; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:5 ; encoding: [0x05,0x00,0x60,0xe0
+define amdgpu_kernel void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = alloca [2 x i8]
%1 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 0
@@ -277,7 +279,7 @@ entry:
;
; A total of 5 bytes should be allocated and used.
; SI: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ;
-define void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
entry:
%0 = alloca [3 x i8], align 1
%1 = alloca [2 x i8], align 1
@@ -301,7 +303,7 @@ entry:
ret void
}
-define void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i8]]
%gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
@@ -315,7 +317,7 @@ entry:
ret void
}
-define void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i32]]
%gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
@@ -328,7 +330,7 @@ entry:
ret void
}
-define void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i64]]
%gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
@@ -343,7 +345,7 @@ entry:
%struct.pair32 = type { i32, i32 }
-define void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x %struct.pair32]]
%gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
@@ -356,7 +358,7 @@ entry:
ret void
}
-define void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x %struct.pair32]
%gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
@@ -369,7 +371,7 @@ entry:
ret void
}
-define void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
%tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
@@ -390,7 +392,7 @@ entry:
; SI-NOT: ds_write
; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:5 ;
-define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32]
%tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
store i32 5, i32* %tmp0
@@ -406,7 +408,7 @@ define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
; OPT-LABEL: @pointer_typed_alloca(
; OPT: getelementptr inbounds [256 x i32 addrspace(1)*], [256 x i32 addrspace(1)*] addrspace(3)* @pointer_typed_alloca.A.addr, i32 0, i32 %{{[0-9]+}}
; OPT: load i32 addrspace(1)*, i32 addrspace(1)* addrspace(3)* %{{[0-9]+}}, align 4
-define void @pointer_typed_alloca(i32 addrspace(1)* %A) {
+define amdgpu_kernel void @pointer_typed_alloca(i32 addrspace(1)* %A) {
entry:
%A.addr = alloca i32 addrspace(1)*, align 4
store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
@@ -458,7 +460,7 @@ entry:
; SI: buffer_load_dword
; SI: buffer_load_dword
-define void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) {
%alloca = alloca [2 x <16 x i32>]
%tmp0 = getelementptr [2 x <16 x i32>], [2 x <16 x i32>]* %alloca, i32 0, i32 %a
%tmp5 = load <16 x i32>, <16 x i32>* %tmp0
@@ -502,7 +504,7 @@ define void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) {
; SI: buffer_load_dword
; SI: buffer_load_dword
-define void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) {
%alloca = alloca [2 x <16 x float>]
%tmp0 = getelementptr [2 x <16 x float>], [2 x <16 x float>]* %alloca, i32 0, i32 %a
%tmp5 = load <16 x float>, <16 x float>* %tmp0
@@ -518,7 +520,7 @@ define void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) {
; SI: buffer_load_dword
; SI: buffer_load_dword
-define void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) {
%alloca = alloca [16 x <2 x float>]
%tmp0 = getelementptr [16 x <2 x float>], [16 x <2 x float>]* %alloca, i32 0, i32 %a
%tmp5 = load <2 x float>, <2 x float>* %tmp0
@@ -529,7 +531,7 @@ define void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) {
; OPT-LABEL: @direct_alloca_read_0xi32(
; OPT: store [0 x i32] undef, [0 x i32] addrspace(3)*
; OPT: load [0 x i32], [0 x i32] addrspace(3)*
-define void @direct_alloca_read_0xi32([0 x i32] addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @direct_alloca_read_0xi32([0 x i32] addrspace(1)* %out, i32 %index) {
entry:
%tmp = alloca [0 x i32]
store [0 x i32] [], [0 x i32]* %tmp
@@ -541,7 +543,7 @@ entry:
; OPT-LABEL: @direct_alloca_read_1xi32(
; OPT: store [1 x i32] zeroinitializer, [1 x i32] addrspace(3)*
; OPT: load [1 x i32], [1 x i32] addrspace(3)*
-define void @direct_alloca_read_1xi32([1 x i32] addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @direct_alloca_read_1xi32([1 x i32] addrspace(1)* %out, i32 %index) {
entry:
%tmp = alloca [1 x i32]
store [1 x i32] [i32 0], [1 x i32]* %tmp
@@ -553,6 +555,8 @@ entry:
attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" }
; HSAOPT: !0 = !{}
-; HSAOPT: !1 = !{i32 0, i32 2048}
+; HSAOPT: !1 = !{i32 0, i32 257}
+; HSAOPT: !2 = !{i32 0, i32 256}
-; NOHSAOPT: !0 = !{i32 0, i32 2048}
+; NOHSAOPT: !0 = !{i32 0, i32 257}
+; NOHSAOPT: !1 = !{i32 0, i32 256}
diff --git a/test/CodeGen/AMDGPU/amdgpu.work-item-intrinsics.deprecated.ll b/test/CodeGen/AMDGPU/amdgpu.work-item-intrinsics.deprecated.ll
index e515ca00d184..187320805c11 100644
--- a/test/CodeGen/AMDGPU/amdgpu.work-item-intrinsics.deprecated.ll
+++ b/test/CodeGen/AMDGPU/amdgpu.work-item-intrinsics.deprecated.ll
@@ -12,7 +12,7 @@
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[0].X
-define void @ngroups_x (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @ngroups_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.ngroups.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -27,7 +27,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[0].Y
-define void @ngroups_y (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @ngroups_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.ngroups.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -42,7 +42,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[0].Z
-define void @ngroups_z (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @ngroups_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.ngroups.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -57,7 +57,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[0].W
-define void @global_size_x (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @global_size_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.global.size.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -72,7 +72,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[1].X
-define void @global_size_y (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @global_size_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.global.size.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -87,7 +87,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[1].Y
-define void @global_size_z (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @global_size_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.global.size.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -102,7 +102,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[1].Z
-define void @local_size_x (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -117,7 +117,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[1].W
-define void @local_size_y (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -132,7 +132,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV {{\*? *}}[[VAL]], KC0[2].X
-define void @local_size_z (i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -153,7 +153,7 @@ entry:
; GCN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
; GCN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
; GCN: COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: 0
-define void @tgid_x_legacy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tgid_x_legacy(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -165,7 +165,7 @@ entry:
; GCN-NOHSA: buffer_store_dword [[VVAL]]
; GCN-NOHSA: COMPUTE_PGM_RSRC2:USER_SGPR: 2
-define void @tgid_y_legacy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tgid_y_legacy(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -181,7 +181,7 @@ entry:
; GCN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
; GCN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1
; GCN: COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: 0
-define void @tgid_z_legacy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tgid_z_legacy(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -194,7 +194,7 @@ entry:
; FUNC-LABEL: {{^}}tidig_x_legacy:
; GCN-NOHSA: buffer_store_dword v0
-define void @tidig_x_legacy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tidig_x_legacy(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -208,7 +208,7 @@ entry:
; FUNC-LABEL: {{^}}tidig_y_legacy:
; GCN-NOHSA: buffer_store_dword v1
-define void @tidig_y_legacy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tidig_y_legacy(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -221,7 +221,7 @@ entry:
; FUNC-LABEL: {{^}}tidig_z_legacy:
; GCN-NOHSA: buffer_store_dword v2
-define void @tidig_z_legacy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tidig_z_legacy(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.z() #0
store i32 %0, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/and-gcn.ll b/test/CodeGen/AMDGPU/and-gcn.ll
index dde5f8c21769..2aec03aff8a3 100644
--- a/test/CodeGen/AMDGPU/and-gcn.ll
+++ b/test/CodeGen/AMDGPU/and-gcn.ll
@@ -4,7 +4,7 @@
; FUNC-LABEL: {{^}}v_and_i64_br:
; SI: v_and_b32
; SI: v_and_b32
-define void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
+define amdgpu_kernel void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
entry:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%tmp0 = icmp eq i32 %tid, 0
diff --git a/test/CodeGen/AMDGPU/and.ll b/test/CodeGen/AMDGPU/and.ll
index 5d9dcf64debf..c356f8b87cfc 100644
--- a/test/CodeGen/AMDGPU/and.ll
+++ b/test/CodeGen/AMDGPU/and.ll
@@ -11,7 +11,7 @@ declare i32 @llvm.r600.read.tidig.x() #0
; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
%b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
@@ -31,7 +31,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
%b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
@@ -42,7 +42,7 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
; FUNC-LABEL: {{^}}s_and_i32:
; SI: s_and_b32
-define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%and = and i32 %a, %b
store i32 %and, i32 addrspace(1)* %out, align 4
ret void
@@ -50,7 +50,7 @@ define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
; FUNC-LABEL: {{^}}s_and_constant_i32:
; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
-define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
%and = and i32 %a, 1234567
store i32 %and, i32 addrspace(1)* %out, align 4
ret void
@@ -66,7 +66,7 @@ define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
; SI: buffer_store_dword [[VK]]
-define void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%and = and i32 %a, 1234567
; Just to stop future replacement of copy to vgpr + store with VALU op.
@@ -83,7 +83,7 @@ define void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32
; SI: s_add_i32
; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, [[K]]
; SI: buffer_store_dword [[VK]]
-define void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%and = and i32 %a, 1234567
%foo = add i32 %and, 1234567
%bar = add i32 %foo, %b
@@ -93,7 +93,7 @@ define void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32
; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
+define amdgpu_kernel void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
@@ -109,7 +109,7 @@ define void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; SI-DAG: s_load_dword [[SA:s[0-9]+]]
; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
-define void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
+define amdgpu_kernel void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -123,7 +123,7 @@ define void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1
; SI-DAG: s_load_dword [[SA:s[0-9]+]]
; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
-define void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
+define amdgpu_kernel void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -135,7 +135,7 @@ define void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; FUNC-LABEL: {{^}}v_and_constant_i32
; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
-define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%and = and i32 %a, 1234567
store i32 %and, i32 addrspace(1)* %out, align 4
@@ -144,7 +144,7 @@ define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr)
; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
-define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%and = and i32 %a, 64
store i32 %and, i32 addrspace(1)* %out, align 4
@@ -153,7 +153,7 @@ define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %
; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
-define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%and = and i32 %a, -16
store i32 %and, i32 addrspace(1)* %out, align 4
@@ -162,7 +162,7 @@ define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1
; FUNC-LABEL: {{^}}s_and_i64
; SI: s_and_b64
-define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%and = and i64 %a, %b
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -171,7 +171,7 @@ define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
; FIXME: Should use SGPRs
; FUNC-LABEL: {{^}}s_and_i1:
; SI: v_and_b32
-define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
+define amdgpu_kernel void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
%and = and i1 %a, %b
store i1 %and, i1 addrspace(1)* %out
ret void
@@ -181,7 +181,7 @@ define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000{{$}}
; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80{{$}}
; SI: buffer_store_dwordx2
-define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
%and = and i64 %a, 549756338176
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -191,7 +191,7 @@ define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
; XSI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x80000{{$}}
; XSI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0x80{{$}}
; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}
-define void @s_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @s_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%and0 = and i64 %a, 549756338176
%and1 = and i64 %b, 549756338176
store volatile i64 %and0, i64 addrspace(1)* %out
@@ -205,7 +205,7 @@ define void @s_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 %a, i64 %b
; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687{{$}}
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
%and = and i64 %a, 1234567
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -223,7 +223,7 @@ define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
+define amdgpu_kernel void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
%shl.a = shl i64 %a, 1
%shl.b = shl i64 %b, 1
%and0 = and i64 %shl.a, 62
@@ -238,7 +238,7 @@ define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64
; FUNC-LABEL: {{^}}v_and_i64:
; SI: v_and_b32
; SI: v_and_b32
-define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
+define amdgpu_kernel void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
%and = and i64 %a, %b
@@ -250,7 +250,7 @@ define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addr
; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, {{v[0-9]+}}
; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, {{v[0-9]+}}
; SI: buffer_store_dwordx2
-define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%and = and i64 %a, 1231231234567
store i64 %and, i64 addrspace(1)* %out, align 8
@@ -268,7 +268,7 @@ define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr)
; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI1]]
; SI: buffer_store_dwordx2
; SI: buffer_store_dwordx2
-define void @v_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load volatile i64, i64 addrspace(1)* %aptr
%b = load volatile i64, i64 addrspace(1)* %aptr
%and0 = and i64 %a, 1231231234567
@@ -288,7 +288,7 @@ define void @v_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI-NOT: and
; SI: buffer_store_dwordx2 v{{\[}}[[RESLO0]]
; SI: buffer_store_dwordx2 v{{\[}}[[RESLO1]]
-define void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load volatile i64, i64 addrspace(1)* %aptr
%b = load volatile i64, i64 addrspace(1)* %aptr
%and0 = and i64 %a, 63
@@ -304,7 +304,7 @@ define void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspac
; SI: v_and_b32_e32 {{v[0-9]+}}, 0x12d687, [[VAL]]
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%and = and i64 %a, 1234567
store i64 %and, i64 addrspace(1)* %out, align 8
@@ -317,7 +317,7 @@ define void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%and = and i64 %a, 64
store i64 %and, i64 addrspace(1)* %out, align 8
@@ -331,7 +331,7 @@ define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %apt
; SI: v_and_b32_e32 v[[VAL_LO]], -8, v[[VAL_LO]]
; SI-NOT: and
; SI: buffer_store_dwordx2 v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
-define void @v_and_inline_neg_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_and_inline_neg_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%and = and i64 %a, -8
store i64 %and, i64 addrspace(1)* %out, align 8
@@ -344,7 +344,7 @@ define void @v_and_inline_neg_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 64
; SI-NOT: and
; SI: buffer_store_dword
-define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 64
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -358,7 +358,7 @@ define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; SI-NOT: and
; SI: s_add_u32
; SI-NEXT: s_addc_u32
-define void @s_and_inline_imm_64_i64_noshrink(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a, i64 %b) {
+define amdgpu_kernel void @s_and_inline_imm_64_i64_noshrink(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a, i64 %b) {
%shl = shl i64 %a, 1
%and = and i64 %shl, 64
%add = add i64 %and, %b
@@ -372,7 +372,7 @@ define void @s_and_inline_imm_64_i64_noshrink(i64 addrspace(1)* %out, i64 addrsp
; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 1
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -387,7 +387,7 @@ define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3ff00000
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 4607182418800017408
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -402,7 +402,7 @@ define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbff00000
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 13830554455654793216
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -417,7 +417,7 @@ define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3fe00000
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 4602678819172646912
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -432,7 +432,7 @@ define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbfe00000
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 13826050856027422720
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -445,7 +445,7 @@ define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 2.0
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 4611686018427387904
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -458,7 +458,7 @@ define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, -2.0
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 13835058055282163712
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -473,7 +473,7 @@ define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x40100000
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 4616189618054758400
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -488,7 +488,7 @@ define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xc0100000
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 13839561654909534208
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -505,7 +505,7 @@ define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 1082130432
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -518,7 +518,7 @@ define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, -1065353216
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -531,7 +531,7 @@ define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrsp
; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 4647714815446351872
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
@@ -544,7 +544,7 @@ define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrs
; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
; SI-NOT: and
; SI: buffer_store_dwordx2
-define void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 13871086852301127680
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index 084a6933da26..e2620ce353c6 100644
--- a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -11,22 +11,22 @@ declare i32 @llvm.amdgcn.workitem.id.z() #0
declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #0
declare i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #0
-; HSA: define void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workgroup.id.x()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_tgid_y(i32 addrspace(1)* %ptr) #2 {
-define void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #2 {
+define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workgroup.id.y()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #2 {
-define void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #2 {
+define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workgroup.id.y()
store volatile i32 %val0, i32 addrspace(1)* %ptr
%val1 = call i32 @llvm.amdgcn.workgroup.id.y()
@@ -34,8 +34,8 @@ define void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tgid_x_y(i32 addrspace(1)* %ptr) #2 {
-define void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #2 {
+define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workgroup.id.x()
%val1 = call i32 @llvm.amdgcn.workgroup.id.y()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -43,15 +43,15 @@ define void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tgid_z(i32 addrspace(1)* %ptr) #3 {
-define void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #3 {
+define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workgroup.id.z()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_tgid_x_z(i32 addrspace(1)* %ptr) #3 {
-define void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #3 {
+define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workgroup.id.x()
%val1 = call i32 @llvm.amdgcn.workgroup.id.z()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -59,8 +59,8 @@ define void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tgid_y_z(i32 addrspace(1)* %ptr) #4 {
-define void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #4 {
+define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workgroup.id.y()
%val1 = call i32 @llvm.amdgcn.workgroup.id.z()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -68,8 +68,8 @@ define void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #4 {
-define void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #4 {
+define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workgroup.id.x()
%val1 = call i32 @llvm.amdgcn.workgroup.id.y()
%val2 = call i32 @llvm.amdgcn.workgroup.id.z()
@@ -79,29 +79,29 @@ define void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workitem.id.x()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_tidig_y(i32 addrspace(1)* %ptr) #5 {
-define void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #5 {
+define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workitem.id.y()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_tidig_z(i32 addrspace(1)* %ptr) #6 {
-define void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #6 {
+define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workitem.id.z()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workitem.id.x()
%val1 = call i32 @llvm.amdgcn.workgroup.id.x()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -109,8 +109,8 @@ define void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #7 {
-define void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #7 {
+define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workitem.id.y()
%val1 = call i32 @llvm.amdgcn.workgroup.id.y()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -118,8 +118,8 @@ define void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #8 {
-define void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #8 {
+define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workitem.id.x()
%val1 = call i32 @llvm.amdgcn.workitem.id.y()
%val2 = call i32 @llvm.amdgcn.workitem.id.z()
@@ -129,8 +129,8 @@ define void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_all_workitems(i32 addrspace(1)* %ptr) #9 {
-define void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #9 {
+define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.amdgcn.workitem.id.x()
%val1 = call i32 @llvm.amdgcn.workitem.id.y()
%val2 = call i32 @llvm.amdgcn.workitem.id.z()
@@ -146,8 +146,8 @@ define void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #10 {
-define void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #10 {
+define amdgpu_kernel void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 {
%dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
%bc = bitcast i8 addrspace(2)* %dispatch.ptr to i32 addrspace(2)*
%val = load i32, i32 addrspace(2)* %bc
@@ -155,8 +155,8 @@ define void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_queue_ptr(i32 addrspace(1)* %ptr) #11 {
-define void @use_queue_ptr(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_queue_ptr(i32 addrspace(1)* %ptr) #11 {
+define amdgpu_kernel void @use_queue_ptr(i32 addrspace(1)* %ptr) #1 {
%dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
%bc = bitcast i8 addrspace(2)* %dispatch.ptr to i32 addrspace(2)*
%val = load i32, i32 addrspace(2)* %bc
@@ -164,58 +164,58 @@ define void @use_queue_ptr(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #11 {
-define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #11 {
+define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %stof
ret void
}
-; HSA: define void @use_private_to_flat_addrspacecast(i32* %ptr) #11 {
-define void @use_private_to_flat_addrspacecast(i32* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_private_to_flat_addrspacecast(i32* %ptr) #11 {
+define amdgpu_kernel void @use_private_to_flat_addrspacecast(i32* %ptr) #1 {
%stof = addrspacecast i32* %ptr to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %stof
ret void
}
-; HSA: define void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #1 {
-define void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #1 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(3)*
store volatile i32 0, i32 addrspace(3)* %ftos
ret void
}
-; HSA: define void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #1 {
-define void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #1 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32*
store volatile i32 0, i32* %ftos
ret void
}
; No-op addrspacecast should not use queue ptr
-; HSA: define void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #1 {
-define void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #1 {
%stof = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %stof
ret void
}
-; HSA: define void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #1 {
-define void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #1 {
+define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #1 {
%stof = addrspacecast i32 addrspace(2)* %ptr to i32 addrspace(4)*
%ld = load volatile i32, i32 addrspace(4)* %stof
ret void
}
-; HSA: define void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #1 {
-define void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #1 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(1)*
store volatile i32 0, i32 addrspace(1)* %ftos
ret void
}
-; HSA: define void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #1 {
-define void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #1 {
%ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(2)*
%ld = load volatile i32, i32 addrspace(2)* %ftos
ret void
diff --git a/test/CodeGen/AMDGPU/annotate-kernel-features.ll b/test/CodeGen/AMDGPU/annotate-kernel-features.ll
index a4e7bb67d507..09750da4cb8c 100644
--- a/test/CodeGen/AMDGPU/annotate-kernel-features.ll
+++ b/test/CodeGen/AMDGPU/annotate-kernel-features.ll
@@ -12,22 +12,22 @@ declare i32 @llvm.r600.read.local.size.x() #0
declare i32 @llvm.r600.read.local.size.y() #0
declare i32 @llvm.r600.read.local.size.z() #0
-; ALL: define void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.tgid.x()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; ALL: define void @use_tgid_y(i32 addrspace(1)* %ptr) #2 {
-define void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #2 {
+define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.tgid.y()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; ALL: define void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #2 {
-define void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #2 {
+define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tgid.y()
store volatile i32 %val0, i32 addrspace(1)* %ptr
%val1 = call i32 @llvm.r600.read.tgid.y()
@@ -35,8 +35,8 @@ define void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tgid_x_y(i32 addrspace(1)* %ptr) #2 {
-define void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #2 {
+define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tgid.x()
%val1 = call i32 @llvm.r600.read.tgid.y()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -44,15 +44,15 @@ define void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tgid_z(i32 addrspace(1)* %ptr) #3 {
-define void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #3 {
+define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.tgid.z()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; ALL: define void @use_tgid_x_z(i32 addrspace(1)* %ptr) #3 {
-define void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #3 {
+define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tgid.x()
%val1 = call i32 @llvm.r600.read.tgid.z()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -60,8 +60,8 @@ define void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tgid_y_z(i32 addrspace(1)* %ptr) #4 {
-define void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #4 {
+define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tgid.y()
%val1 = call i32 @llvm.r600.read.tgid.z()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -69,8 +69,8 @@ define void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #4 {
-define void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #4 {
+define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tgid.x()
%val1 = call i32 @llvm.r600.read.tgid.y()
%val2 = call i32 @llvm.r600.read.tgid.z()
@@ -80,29 +80,29 @@ define void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.tidig.x()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; ALL: define void @use_tidig_y(i32 addrspace(1)* %ptr) #5 {
-define void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #5 {
+define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.tidig.y()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; ALL: define void @use_tidig_z(i32 addrspace(1)* %ptr) #6 {
-define void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #6 {
+define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.tidig.z()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; ALL: define void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tidig.x()
%val1 = call i32 @llvm.r600.read.tgid.x()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -110,8 +110,8 @@ define void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #7 {
-define void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #7 {
+define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tidig.y()
%val1 = call i32 @llvm.r600.read.tgid.y()
store volatile i32 %val0, i32 addrspace(1)* %ptr
@@ -119,8 +119,8 @@ define void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #8 {
-define void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #8 {
+define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tidig.x()
%val1 = call i32 @llvm.r600.read.tidig.y()
%val2 = call i32 @llvm.r600.read.tidig.z()
@@ -130,8 +130,8 @@ define void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; ALL: define void @use_all_workitems(i32 addrspace(1)* %ptr) #9 {
-define void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
+; ALL: define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #9 {
+define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
%val0 = call i32 @llvm.r600.read.tidig.x()
%val1 = call i32 @llvm.r600.read.tidig.y()
%val2 = call i32 @llvm.r600.read.tidig.z()
@@ -147,25 +147,25 @@ define void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
ret void
}
-; HSA: define void @use_get_local_size_x(i32 addrspace(1)* %ptr) #10 {
-; NOHSA: define void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
-define void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #10 {
+; NOHSA: define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.local.size.x()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_get_local_size_y(i32 addrspace(1)* %ptr) #10 {
-; NOHSA: define void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
-define void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #10 {
+; NOHSA: define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.local.size.y()
store i32 %val, i32 addrspace(1)* %ptr
ret void
}
-; HSA: define void @use_get_local_size_z(i32 addrspace(1)* %ptr) #10 {
-; NOHSA: define void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
-define void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
+; HSA: define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #10 {
+; NOHSA: define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.r600.read.local.size.z()
store i32 %val, i32 addrspace(1)* %ptr
ret void
diff --git a/test/CodeGen/AMDGPU/anonymous-gv.ll b/test/CodeGen/AMDGPU/anonymous-gv.ll
index f37b0f3382f4..04fbe2ae1f94 100644
--- a/test/CodeGen/AMDGPU/anonymous-gv.ll
+++ b/test/CodeGen/AMDGPU/anonymous-gv.ll
@@ -6,13 +6,13 @@
; CHECK-LABEL: {{^}}test:
; CHECK: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, __unnamed_1
; CHECK: s_endpgm
-define void @test() {
+define amdgpu_kernel void @test() {
store i32 1, i32 addrspace(1)* @0
ret void
}
; CHECK-LABEL: {{^}}__unnamed_2:
; CHECK: s_endpgm
-define void @1() {
+define amdgpu_kernel void @1() {
ret void
}
diff --git a/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll b/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll
new file mode 100644
index 000000000000..c61c23222bc7
--- /dev/null
+++ b/test/CodeGen/AMDGPU/any_extend_vector_inreg.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}any_extend_vector_inreg_v16i8_to_v4i32:
+; GCN: {{buffer|flat}}_load_dwordx4
+; GCN-DAG: {{buffer|flat}}_load_dwordx4
+; GCN-DAG: {{buffer|flat}}_load_dword
+
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+; GCN: {{buffer|flat}}_store_byte
+define amdgpu_kernel void @any_extend_vector_inreg_v16i8_to_v4i32(<8 x i8> addrspace(1)* nocapture readonly %arg, <16 x i8> addrspace(1)* %arg1) local_unnamed_addr #0 {
+bb:
+ %tmp = bitcast <8 x i8> addrspace(1)* %arg to <16 x i8> addrspace(1)*
+ %tmp2 = load <16 x i8>, <16 x i8> addrspace(1)* %tmp, align 16
+ %tmp3 = extractelement <16 x i8> %tmp2, i64 4
+ %tmp6 = extractelement <16 x i8> %tmp2, i64 11
+ %tmp10 = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %arg, i64 2
+ %tmp11 = bitcast <8 x i8> addrspace(1)* %tmp10 to <16 x i8> addrspace(1)*
+ %tmp12 = load <16 x i8>, <16 x i8> addrspace(1)* %tmp11, align 16
+ %tmp13 = extractelement <16 x i8> %tmp12, i64 7
+ %tmp17 = extractelement <16 x i8> %tmp12, i64 12
+ %tmp21 = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %arg, i64 4
+ %tmp22 = bitcast <8 x i8> addrspace(1)* %tmp21 to <16 x i8> addrspace(1)*
+ %tmp23 = load <16 x i8>, <16 x i8> addrspace(1)* %tmp22, align 16
+ %tmp24 = extractelement <16 x i8> %tmp23, i64 3
+ %tmp1 = insertelement <16 x i8> undef, i8 %tmp3, i32 2
+ %tmp4 = insertelement <16 x i8> %tmp1, i8 0, i32 3
+ %tmp5 = insertelement <16 x i8> %tmp4, i8 0, i32 4
+ %tmp7 = insertelement <16 x i8> %tmp5, i8 %tmp6, i32 5
+ %tmp8 = insertelement <16 x i8> %tmp7, i8 0, i32 6
+ %tmp9 = insertelement <16 x i8> %tmp8, i8 %tmp13, i32 7
+ %tmp14 = insertelement <16 x i8> %tmp9, i8 0, i32 8
+ %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp17, i32 9
+ %tmp16 = insertelement <16 x i8> %tmp15, i8 0, i32 10
+ %tmp18 = insertelement <16 x i8> %tmp16, i8 0, i32 11
+ %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp24, i32 12
+ store <16 x i8> %tmp19, <16 x i8> addrspace(1)* %arg1, align 1
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/anyext.ll b/test/CodeGen/AMDGPU/anyext.ll
index 87b4c86427c8..3f220c408412 100644
--- a/test/CodeGen/AMDGPU/anyext.ll
+++ b/test/CodeGen/AMDGPU/anyext.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
; GCN-LABEL: {{^}}anyext_i1_i32:
; GCN: v_cndmask_b32_e64
-define void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) {
entry:
%tmp = icmp eq i32 %cond, 0
%tmp1 = zext i1 %tmp to i8
@@ -22,7 +22,7 @@ entry:
; VI: v_xor_b32_e32 [[XOR:v[0-9]+]], -1, [[ADD]]
; VI: v_and_b32_e32 [[AND:v[0-9]+]], 1, [[XOR]]
; VI: buffer_store_dword [[AND]]
-define void @s_anyext_i16_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %a, i16 addrspace(1)* %b) {
+define amdgpu_kernel void @s_anyext_i16_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %a, i16 addrspace(1)* %b) {
entry:
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%tid.y = call i32 @llvm.amdgcn.workitem.id.y()
diff --git a/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll b/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
index f190bd0cb01e..daa3442097cf 100644
--- a/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
+++ b/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
@@ -12,11 +12,7 @@ declare void @llvm.amdgcn.s.barrier() #2
; SI-LABEL: {{^}}test_private_array_ptr_calc:
-; FIXME: We end up with zero argument for ADD, because
-; SIRegisterInfo::eliminateFrameIndex() blindly replaces the frame index
-; with the appropriate offset. We should fold this into the store.
-
-; SI-ALLOCA: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 0, v{{[0-9]+}}
+; SI-ALLOCA: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 16, v{{[0-9]+}}
; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:64
; SI-ALLOCA: s_barrier
; SI-ALLOCA: buffer_load_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:64
@@ -28,7 +24,7 @@ declare void @llvm.amdgcn.s.barrier() #2
; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 64
; SI-PROMOTE: ds_write_b32 [[PTRREG]]
-define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) #0 {
+define amdgpu_kernel void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) #0 {
%alloca = alloca [16 x i32], align 16
%mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0);
%tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo)
diff --git a/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll b/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll
index b914edf2928e..ddeffc10a089 100644
--- a/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll
+++ b/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll
@@ -7,7 +7,7 @@ declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
; SI-DAG: v_mul_lo_i32
; SI-DAG: v_mul_hi_i32
; SI: s_endpgm
-define void @test_array_ptr_calc(i32 addrspace(1)* noalias %out, [1025 x i32] addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
+define amdgpu_kernel void @test_array_ptr_calc(i32 addrspace(1)* noalias %out, [1025 x i32] addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
%mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo)
%a_ptr = getelementptr [1025 x i32], [1025 x i32] addrspace(1)* %inA, i32 %tid, i32 0
diff --git a/test/CodeGen/AMDGPU/ashr.v2i16.ll b/test/CodeGen/AMDGPU/ashr.v2i16.ll
new file mode 100644
index 000000000000..96a5e3b23758
--- /dev/null
+++ b/test/CodeGen/AMDGPU/ashr.v2i16.ll
@@ -0,0 +1,161 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
+
+; GCN-LABEL: {{^}}s_ashr_v2i16:
+; GFX9: s_load_dword [[LHS:s[0-9]+]]
+; GFX9: s_load_dword [[RHS:s[0-9]+]]
+; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
+; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
+
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
+; CI: v_ashrrev_i32_e32
+; CI: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
+; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_or_b32_e32
+define amdgpu_kernel void @s_ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
+ %result = ashr <2 x i16> %lhs, %rhs
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_ashr_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+
+; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
+; CI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], [[RHS]]
+; CI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, [[LHS]]
+; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in.gep, i32 1
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
+ %result = ashr <2 x i16> %a, %b
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}ashr_v_s_v2i16:
+; GFX9: s_load_dword [[RHS:s[0-9]+]]
+; GFX9: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+define amdgpu_kernel void @ashr_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = ashr <2 x i16> %vgpr, %sgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}ashr_s_v_v2i16:
+; GFX9: s_load_dword [[LHS:s[0-9]+]]
+; GFX9: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+define amdgpu_kernel void @ashr_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = ashr <2 x i16> %sgpr, %vgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}ashr_imm_v_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], -4
+define amdgpu_kernel void @ashr_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = ashr <2 x i16> <i16 -4, i16 -4>, %vgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}ashr_v_imm_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], 8, [[LHS]]
+define amdgpu_kernel void @ashr_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = ashr <2 x i16> %vgpr, <i16 8, i16 8>
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_ashr_v4i16:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GCN: {{buffer|flat}}_load_dwordx2
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; GCN: {{buffer|flat}}_store_dwordx2
+define amdgpu_kernel void @v_ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
+ %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in.gep, i32 1
+ %a = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
+ %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
+ %result = ashr <4 x i16> %a, %b
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}ashr_v_imm_v4i16:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GCN: {{buffer|flat}}_store_dwordx2
+define amdgpu_kernel void @ashr_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
+ %result = ashr <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8>
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll b/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
index 25eae0b41ae4..4f9526ddab55 100644
--- a/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
+++ b/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
@@ -12,7 +12,7 @@
; GCN-DAG: v_mov_b32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
; GCN: ds_cmpst_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
; GCN: s_endpgm
-define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap) nounwind {
+define amdgpu_kernel void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
%result = extractvalue { i32, i1 } %pair, 0
@@ -33,7 +33,7 @@ define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrs
; GCN: ds_cmpst_rtn_b64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32
; GCN: buffer_store_dwordx2 [[RESULT]],
; GCN: s_endpgm
-define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind {
+define amdgpu_kernel void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
%result = extractvalue { i64, i1 } %pair, 0
@@ -45,7 +45,7 @@ define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrs
; SI: ds_cmpst_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; CIVI: ds_cmpst_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
@@ -65,7 +65,7 @@ define void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i3
; GCN-DAG: v_mov_b32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
; GCN: ds_cmpst_b32 [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
; GCN: s_endpgm
-define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %swap) nounwind {
+define amdgpu_kernel void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %swap) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
%result = extractvalue { i32, i1 } %pair, 0
@@ -84,7 +84,7 @@ define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %sw
; GCN-DAG: v_mov_b32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
; GCN: ds_cmpst_b64 [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_cmpxchg_noret_i64_offset(i64 addrspace(3)* %ptr, i64 %swap) nounwind {
+define amdgpu_kernel void @lds_atomic_cmpxchg_noret_i64_offset(i64 addrspace(3)* %ptr, i64 %swap) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
%result = extractvalue { i64, i1 } %pair, 0
diff --git a/test/CodeGen/AMDGPU/atomic_load_add.ll b/test/CodeGen/AMDGPU/atomic_load_add.ll
index 4b014e09b630..e0fe6641fa11 100644
--- a/test/CodeGen/AMDGPU/atomic_load_add.ll
+++ b/test/CodeGen/AMDGPU/atomic_load_add.ll
@@ -5,7 +5,7 @@
; FUNC-LABEL: {{^}}atomic_add_local:
; R600: LDS_ADD *
; SI: ds_add_u32
-define void @atomic_add_local(i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_add_local(i32 addrspace(3)* %local) {
%unused = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
ret void
}
@@ -13,7 +13,7 @@ define void @atomic_add_local(i32 addrspace(3)* %local) {
; FUNC-LABEL: {{^}}atomic_add_local_const_offset:
; R600: LDS_ADD *
; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
%gep = getelementptr i32, i32 addrspace(3)* %local, i32 4
%val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
ret void
@@ -22,7 +22,7 @@ define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
; FUNC-LABEL: {{^}}atomic_add_ret_local:
; R600: LDS_ADD_RET *
; SI: ds_add_rtn_u32
-define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
%val = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
store i32 %val, i32 addrspace(1)* %out
ret void
@@ -31,7 +31,7 @@ define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %loc
; FUNC-LABEL: {{^}}atomic_add_ret_local_const_offset:
; R600: LDS_ADD_RET *
; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
-define void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
%gep = getelementptr i32, i32 addrspace(3)* %local, i32 5
%val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
store i32 %val, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/atomic_load_sub.ll b/test/CodeGen/AMDGPU/atomic_load_sub.ll
index c6e5b1136d7c..a0275893919a 100644
--- a/test/CodeGen/AMDGPU/atomic_load_sub.ll
+++ b/test/CodeGen/AMDGPU/atomic_load_sub.ll
@@ -5,7 +5,7 @@
; FUNC-LABEL: {{^}}atomic_sub_local:
; R600: LDS_SUB *
; SI: ds_sub_u32
-define void @atomic_sub_local(i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_sub_local(i32 addrspace(3)* %local) {
%unused = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
ret void
}
@@ -13,7 +13,7 @@ define void @atomic_sub_local(i32 addrspace(3)* %local) {
; FUNC-LABEL: {{^}}atomic_sub_local_const_offset:
; R600: LDS_SUB *
; SI: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
%gep = getelementptr i32, i32 addrspace(3)* %local, i32 4
%val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
ret void
@@ -22,7 +22,7 @@ define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
; FUNC-LABEL: {{^}}atomic_sub_ret_local:
; R600: LDS_SUB_RET *
; SI: ds_sub_rtn_u32
-define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
%val = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
store i32 %val, i32 addrspace(1)* %out
ret void
@@ -31,7 +31,7 @@ define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %loc
; FUNC-LABEL: {{^}}atomic_sub_ret_local_const_offset:
; R600: LDS_SUB_RET *
; SI: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
-define void @atomic_sub_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+define amdgpu_kernel void @atomic_sub_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
%gep = getelementptr i32, i32 addrspace(3)* %local, i32 5
%val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
store i32 %val, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll b/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll
index cab377feacb2..63a6f6a8d32c 100644
--- a/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll
+++ b/test/CodeGen/AMDGPU/attr-amdgpu-flat-work-group-size.ll
@@ -5,7 +5,7 @@
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @min_64_max_64() #0 {
+define amdgpu_kernel void @min_64_max_64() #0 {
entry:
ret void
}
@@ -16,7 +16,7 @@ attributes #0 = {"amdgpu-flat-work-group-size"="64,64"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @min_64_max_128() #1 {
+define amdgpu_kernel void @min_64_max_128() #1 {
entry:
ret void
}
@@ -27,7 +27,7 @@ attributes #1 = {"amdgpu-flat-work-group-size"="64,128"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @min_128_max_128() #2 {
+define amdgpu_kernel void @min_128_max_128() #2 {
entry:
ret void
}
@@ -39,7 +39,7 @@ attributes #2 = {"amdgpu-flat-work-group-size"="128,128"}
; CHECK: NumSGPRsForWavesPerEU: 13
; CHECK: NumVGPRsForWavesPerEU: 32
@var = addrspace(1) global float 0.0
-define void @min_1024_max_2048() #3 {
+define amdgpu_kernel void @min_1024_max_2048() #3 {
%val0 = load volatile float, float addrspace(1)* @var
%val1 = load volatile float, float addrspace(1)* @var
%val2 = load volatile float, float addrspace(1)* @var
diff --git a/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll b/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
index e4f6e72e6977..ac2f7b4a4a4b 100644
--- a/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
+++ b/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
@@ -4,25 +4,22 @@
; If spilling to smem, additional registers are used for the resource
; descriptor.
-; ALL-LABEL: {{^}}max_12_sgprs:
+; ALL-LABEL: {{^}}max_9_sgprs:
-; FIXME: Should be ablo to skip this copying of the private segment
-; buffer because all the SGPR spills are to VGPRs.
-
-; ALL: s_mov_b64 s[10:11], s[2:3]
-; ALL: s_mov_b64 s[8:9], s[0:1]
; ALL: SGPRBlocks: 1
-; ALL: NumSGPRsForWavesPerEU: 14
-define void @max_12_sgprs(i32 addrspace(1)* %out1,
+; ALL: NumSGPRsForWavesPerEU: 9
+define amdgpu_kernel void @max_9_sgprs(i32 addrspace(1)* %out1,
i32 addrspace(1)* %out2,
i32 addrspace(1)* %out3,
i32 addrspace(1)* %out4,
- i32 %one, i32 %two, i32 %three, i32 %four) #0 {
+ i32 addrspace(1)* %out5,
+ i32 %one, i32 %two, i32 %three, i32 %four, i32 %five) #0 {
store i32 %one, i32 addrspace(1)* %out1
store i32 %two, i32 addrspace(1)* %out2
store i32 %three, i32 addrspace(1)* %out3
store i32 %four, i32 addrspace(1)* %out4
+ store i32 %five, i32 addrspace(1)* %out5
ret void
}
@@ -52,23 +49,26 @@ define void @max_12_sgprs(i32 addrspace(1)* %out1,
; TOSMEM: SGPRBlocks: 1
; TOSMEM: NumSGPRsForWavesPerEU: 16
-define void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
+define amdgpu_kernel void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
i32 addrspace(1)* %out2,
i32 addrspace(1)* %out3,
i32 addrspace(1)* %out4,
i32 %one, i32 %two, i32 %three, i32 %four) #2 {
- store volatile i32 0, i32* undef
%x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
- store volatile i32 %x.0, i32 addrspace(1)* undef
%x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
- store volatile i32 %x.0, i32 addrspace(1)* undef
%x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
- store volatile i32 %x.0, i32 addrspace(1)* undef
%x.3 = call i64 @llvm.amdgcn.dispatch.id()
- store volatile i64 %x.3, i64 addrspace(1)* undef
%x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
- store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
%x.5 = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
+ store volatile i32 0, i32* undef
+ br label %stores
+
+stores:
+ store volatile i32 %x.0, i32 addrspace(1)* undef
+ store volatile i32 %x.0, i32 addrspace(1)* undef
+ store volatile i32 %x.0, i32 addrspace(1)* undef
+ store volatile i64 %x.3, i64 addrspace(1)* undef
+ store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
store volatile i8 addrspace(2)* %x.5, i8 addrspace(2)* addrspace(1)* undef
store i32 %one, i32 addrspace(1)* %out1
@@ -90,7 +90,7 @@ define void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
; XALL: SGPRBlocks: 2
; XALL: NumSGPRsForWavesPerEU: 18
-;define void @max_12_sgprs_12_input_sgprs(i32 addrspace(1)* %out1,
+;define amdgpu_kernel void @max_12_sgprs_12_input_sgprs(i32 addrspace(1)* %out1,
; i32 addrspace(1)* %out2,
; i32 addrspace(1)* %out3,
; i32 addrspace(1)* %out4,
diff --git a/test/CodeGen/AMDGPU/attr-amdgpu-num-vgpr.ll b/test/CodeGen/AMDGPU/attr-amdgpu-num-vgpr.ll
index 97feb7276b7d..979665ff0a80 100644
--- a/test/CodeGen/AMDGPU/attr-amdgpu-num-vgpr.ll
+++ b/test/CodeGen/AMDGPU/attr-amdgpu-num-vgpr.ll
@@ -5,7 +5,7 @@
; CHECK-LABEL: {{^}}max_20_vgprs:
; CHECK: VGPRBlocks: 4
; CHECK: NumVGPRsForWavesPerEU: 20
-define void @max_20_vgprs() #1 {
+define amdgpu_kernel void @max_20_vgprs() #1 {
%val0 = load volatile float, float addrspace(1)* @var
%val1 = load volatile float, float addrspace(1)* @var
%val2 = load volatile float, float addrspace(1)* @var
diff --git a/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll b/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll
index 4f4efccc2260..3dda73bc336e 100644
--- a/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll
+++ b/test/CodeGen/AMDGPU/attr-amdgpu-waves-per-eu.ll
@@ -4,9 +4,9 @@
; CHECK-LABEL: {{^}}empty_exactly_1:
; CHECK: SGPRBlocks: 12
; CHECK: VGPRBlocks: 32
-; CHECK: NumSGPRsForWavesPerEU: 97
+; CHECK: NumSGPRsForWavesPerEU: 102
; CHECK: NumVGPRsForWavesPerEU: 129
-define void @empty_exactly_1() #0 {
+define amdgpu_kernel void @empty_exactly_1() #0 {
entry:
ret void
}
@@ -16,9 +16,9 @@ attributes #0 = {"amdgpu-waves-per-eu"="1,1"}
; CHECK-LABEL: {{^}}empty_exactly_5:
; CHECK: SGPRBlocks: 12
; CHECK: VGPRBlocks: 10
-; CHECK: NumSGPRsForWavesPerEU: 97
+; CHECK: NumSGPRsForWavesPerEU: 102
; CHECK: NumVGPRsForWavesPerEU: 41
-define void @empty_exactly_5() #1 {
+define amdgpu_kernel void @empty_exactly_5() #1 {
entry:
ret void
}
@@ -30,7 +30,7 @@ attributes #1 = {"amdgpu-waves-per-eu"="5,5"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @empty_exactly_10() #2 {
+define amdgpu_kernel void @empty_exactly_10() #2 {
entry:
ret void
}
@@ -42,7 +42,7 @@ attributes #2 = {"amdgpu-waves-per-eu"="10,10"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @empty_at_least_1() #3 {
+define amdgpu_kernel void @empty_at_least_1() #3 {
entry:
ret void
}
@@ -54,7 +54,7 @@ attributes #3 = {"amdgpu-waves-per-eu"="1"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @empty_at_least_5() #4 {
+define amdgpu_kernel void @empty_at_least_5() #4 {
entry:
ret void
}
@@ -66,7 +66,7 @@ attributes #4 = {"amdgpu-waves-per-eu"="5"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @empty_at_least_10() #5 {
+define amdgpu_kernel void @empty_at_least_10() #5 {
entry:
ret void
}
@@ -78,9 +78,9 @@ attributes #5 = {"amdgpu-waves-per-eu"="10"}
; CHECK-LABEL: {{^}}empty_at_most_5:
; CHECK: SGPRBlocks: 12
; CHECK: VGPRBlocks: 10
-; CHECK: NumSGPRsForWavesPerEU: 97
+; CHECK: NumSGPRsForWavesPerEU: 102
; CHECK: NumVGPRsForWavesPerEU: 41
-define void @empty_at_most_5() #6 {
+define amdgpu_kernel void @empty_at_most_5() #6 {
entry:
ret void
}
@@ -92,7 +92,7 @@ attributes #6 = {"amdgpu-waves-per-eu"="1,5"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @empty_at_most_10() #7 {
+define amdgpu_kernel void @empty_at_most_10() #7 {
entry:
ret void
}
@@ -106,7 +106,7 @@ attributes #7 = {"amdgpu-waves-per-eu"="1,10"}
; CHECK: VGPRBlocks: 0
; CHECK: NumSGPRsForWavesPerEU: 1
; CHECK: NumVGPRsForWavesPerEU: 1
-define void @empty_between_5_and_10() #8 {
+define amdgpu_kernel void @empty_between_5_and_10() #8 {
entry:
ret void
}
@@ -120,7 +120,7 @@ attributes #8 = {"amdgpu-waves-per-eu"="5,10"}
; CHECK: VGPRBlocks: 5
; CHECK: NumSGPRsForWavesPerEU: 13
; CHECK: NumVGPRsForWavesPerEU: 24
-define void @exactly_10() #9 {
+define amdgpu_kernel void @exactly_10() #9 {
%val0 = load volatile float, float addrspace(1)* @var
%val1 = load volatile float, float addrspace(1)* @var
%val2 = load volatile float, float addrspace(1)* @var
diff --git a/test/CodeGen/AMDGPU/attr-unparseable.ll b/test/CodeGen/AMDGPU/attr-unparseable.ll
index 0282bc34c0ee..17adb89900cd 100644
--- a/test/CodeGen/AMDGPU/attr-unparseable.ll
+++ b/test/CodeGen/AMDGPU/attr-unparseable.ll
@@ -1,56 +1,56 @@
; RUN: not llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s 2>&1 | FileCheck %s
; CHECK: can't parse integer attribute amdgpu-num-sgpr
-define void @unparseable_single_0() #0 {
+define amdgpu_kernel void @unparseable_single_0() #0 {
entry:
ret void
}
attributes #0 = {"amdgpu-num-sgpr"}
; CHECK: can't parse integer attribute amdgpu-num-sgpr
-define void @unparseable_single_1() #1 {
+define amdgpu_kernel void @unparseable_single_1() #1 {
entry:
ret void
}
attributes #1 = {"amdgpu-num-sgpr"="k"}
; CHECK: can't parse integer attribute amdgpu-num-sgpr
-define void @unparseable_single_2() #2 {
+define amdgpu_kernel void @unparseable_single_2() #2 {
entry:
ret void
}
attributes #2 = {"amdgpu-num-sgpr"="1,2"}
; CHECK: can't parse first integer attribute amdgpu-flat-work-group-size
-define void @unparseable_pair_0() #3 {
+define amdgpu_kernel void @unparseable_pair_0() #3 {
entry:
ret void
}
attributes #3 = {"amdgpu-flat-work-group-size"}
; CHECK: can't parse first integer attribute amdgpu-flat-work-group-size
-define void @unparseable_pair_1() #4 {
+define amdgpu_kernel void @unparseable_pair_1() #4 {
entry:
ret void
}
attributes #4 = {"amdgpu-flat-work-group-size"="k"}
; CHECK: can't parse second integer attribute amdgpu-flat-work-group-size
-define void @unparseable_pair_2() #5 {
+define amdgpu_kernel void @unparseable_pair_2() #5 {
entry:
ret void
}
attributes #5 = {"amdgpu-flat-work-group-size"="1"}
; CHECK: can't parse second integer attribute amdgpu-flat-work-group-size
-define void @unparseable_pair_3() #6 {
+define amdgpu_kernel void @unparseable_pair_3() #6 {
entry:
ret void
}
attributes #6 = {"amdgpu-flat-work-group-size"="1,k"}
; CHECK: can't parse second integer attribute amdgpu-flat-work-group-size
-define void @unparseable_pair_4() #7 {
+define amdgpu_kernel void @unparseable_pair_4() #7 {
entry:
ret void
}
diff --git a/test/CodeGen/AMDGPU/barrier-elimination.ll b/test/CodeGen/AMDGPU/barrier-elimination.ll
new file mode 100644
index 000000000000..c526baaab9cd
--- /dev/null
+++ b/test/CodeGen/AMDGPU/barrier-elimination.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=amdgcn < %s | FileCheck %s
+
+; CHECK-LABEL: {{^}}unknown_wgs:
+; CHECK: s_barrier
+define amdgpu_kernel void @unknown_wgs() {
+ tail call void @llvm.amdgcn.s.barrier() #0
+ ret void
+}
+
+; CHECK-LABEL: {{^}}flat_wgs_attr_32_128:
+; CHECK: s_barrier
+define amdgpu_kernel void @flat_wgs_attr_32_128() #1 {
+ tail call void @llvm.amdgcn.s.barrier() #0
+ ret void
+}
+
+; CHECK-LABEL: {{^}}flat_wgs_attr_32_64:
+; CHECK: :
+; CHECK-NEXT: ; wave barrier
+; CHECK-NEXT: s_endpgm
+define amdgpu_kernel void @flat_wgs_attr_32_64() #2 {
+ tail call void @llvm.amdgcn.s.barrier() #0
+ ret void
+}
+
+declare void @llvm.amdgcn.s.barrier() #0
+
+attributes #0 = { convergent nounwind }
+attributes #1 = { nounwind "amdgpu-flat-work-group-size"="32,128" }
+attributes #2 = { nounwind "amdgpu-flat-work-group-size"="32,64" }
diff --git a/test/CodeGen/AMDGPU/basic-branch.ll b/test/CodeGen/AMDGPU/basic-branch.ll
index 104dd45e8a1a..e245e4296df2 100644
--- a/test/CodeGen/AMDGPU/basic-branch.ll
+++ b/test/CodeGen/AMDGPU/basic-branch.ll
@@ -8,17 +8,14 @@
; GCNNOOPT: v_writelane_b32
; GCN: s_cbranch_scc1 [[END:BB[0-9]+_[0-9]+]]
-
-; GCN: ; BB#1
; GCNNOOPT: v_readlane_b32
; GCNNOOPT: v_readlane_b32
; GCN: buffer_store_dword
-; GCNOPT-NEXT: s_waitcnt vmcnt(0) expcnt(0)
-; TODO: This waitcnt can be eliminated
+; GCNNOOPT: s_endpgm
; GCN: {{^}}[[END]]:
; GCN: s_endpgm
-define void @test_branch(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) #0 {
+define amdgpu_kernel void @test_branch(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) #0 {
%cmp = icmp ne i32 %val, 0
br i1 %cmp, label %store, label %end
@@ -42,7 +39,7 @@ end:
; GCN: {{^}}[[END]]:
; GCN: s_endpgm
-define void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 {
+define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 {
%cmp0 = icmp ne i1 %val, 0
br i1 %cmp0, label %store, label %end
diff --git a/test/CodeGen/AMDGPU/basic-loop.ll b/test/CodeGen/AMDGPU/basic-loop.ll
index f0263caf5d6b..de45190cdaa5 100644
--- a/test/CodeGen/AMDGPU/basic-loop.ll
+++ b/test/CodeGen/AMDGPU/basic-loop.ll
@@ -2,7 +2,7 @@
; RUN: llc -O0 -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s
; CHECK-LABEL: {{^}}test_loop:
-define void @test_loop(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
+define amdgpu_kernel void @test_loop(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
entry:
br label %loop.body
diff --git a/test/CodeGen/AMDGPU/bfe-patterns.ll b/test/CodeGen/AMDGPU/bfe-patterns.ll
new file mode 100644
index 000000000000..c23cc1c88b52
--- /dev/null
+++ b/test/CodeGen/AMDGPU/bfe-patterns.ll
@@ -0,0 +1,163 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}v_ubfe_sub_i32:
+; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
+; GCN: v_bfe_u32 v{{[0-9]+}}, [[SRC]], 0, [[WIDTH]]
+define amdgpu_kernel void @v_ubfe_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
+ %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %src = load volatile i32, i32 addrspace(1)* %in0.gep
+ %width = load volatile i32, i32 addrspace(1)* %in0.gep
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = lshr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_ubfe_sub_multi_use_shl_i32:
+; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
+; GCN: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 32, [[WIDTH]]
+
+; SI-NEXT: v_lshl_b32_e32 [[SHL:v[0-9]+]], [[SRC]], [[SUB]]
+; SI-NEXT: v_lshr_b32_e32 [[BFE:v[0-9]+]], [[SHL]], [[SUB]]
+
+; VI-NEXT: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], [[SUB]], [[SRC]]
+; VI-NEXT: v_lshrrev_b32_e32 [[BFE:v[0-9]+]], [[SUB]], [[SHL]]
+
+; GCN: [[BFE]]
+; GCN: [[SHL]]
+define amdgpu_kernel void @v_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
+ %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %src = load volatile i32, i32 addrspace(1)* %in0.gep
+ %width = load volatile i32, i32 addrspace(1)* %in0.gep
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = lshr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ store volatile i32 %shl, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_ubfe_sub_i32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: s_load_dword [[WIDTH:s[0-9]+]]
+; GCN: v_mov_b32_e32 [[VWIDTH:v[0-9]+]], {{s[0-9]+}}
+; GCN: v_bfe_u32 v{{[0-9]+}}, [[SRC]], 0, [[VWIDTH]]
+define amdgpu_kernel void @s_ubfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = lshr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_ubfe_sub_multi_use_shl_i32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: s_load_dword [[WIDTH:s[0-9]+]]
+; GCN: s_sub_i32 [[SUB:s[0-9]+]], 32, [[WIDTH]]
+; GCN-NEXT: s_lshl_b32 [[SHL:s[0-9]+]], [[SRC]], [[SUB]]
+; GCN-NEXT: s_lshr_b32 s{{[0-9]+}}, [[SHL]], [[SUB]]
+define amdgpu_kernel void @s_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = lshr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ store volatile i32 %shl, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_sbfe_sub_i32:
+; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
+; GCN: v_bfe_i32 v{{[0-9]+}}, [[SRC]], 0, [[WIDTH]]
+define amdgpu_kernel void @v_sbfe_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
+ %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %src = load volatile i32, i32 addrspace(1)* %in0.gep
+ %width = load volatile i32, i32 addrspace(1)* %in0.gep
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = ashr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_sbfe_sub_multi_use_shl_i32:
+; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
+; GCN: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 32, [[WIDTH]]
+
+; SI-NEXT: v_lshl_b32_e32 [[SHL:v[0-9]+]], [[SRC]], [[SUB]]
+; SI-NEXT: v_ashr_i32_e32 [[BFE:v[0-9]+]], [[SHL]], [[SUB]]
+
+; VI-NEXT: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], [[SUB]], [[SRC]]
+; VI-NEXT: v_ashrrev_i32_e32 [[BFE:v[0-9]+]], [[SUB]], [[SHL]]
+
+; GCN: [[BFE]]
+; GCN: [[SHL]]
+define amdgpu_kernel void @v_sbfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
+ %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %src = load volatile i32, i32 addrspace(1)* %in0.gep
+ %width = load volatile i32, i32 addrspace(1)* %in0.gep
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = ashr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ store volatile i32 %shl, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_sbfe_sub_i32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: s_load_dword [[WIDTH:s[0-9]+]]
+; GCN: v_mov_b32_e32 [[VWIDTH:v[0-9]+]], {{s[0-9]+}}
+; GCN: v_bfe_i32 v{{[0-9]+}}, [[SRC]], 0, [[VWIDTH]]
+define amdgpu_kernel void @s_sbfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = ashr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_sbfe_sub_multi_use_shl_i32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: s_load_dword [[WIDTH:s[0-9]+]]
+; GCN: s_sub_i32 [[SUB:s[0-9]+]], 32, [[WIDTH]]
+; GCN-NEXT: s_lshl_b32 [[SHL:s[0-9]+]], [[SRC]], [[SUB]]
+; GCN-NEXT: s_ashr_i32 s{{[0-9]+}}, [[SHL]], [[SUB]]
+define amdgpu_kernel void @s_sbfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
+ %sub = sub i32 32, %width
+ %shl = shl i32 %src, %sub
+ %bfe = ashr i32 %shl, %sub
+ store i32 %bfe, i32 addrspace(1)* %out.gep
+ store volatile i32 %shl, i32 addrspace(1)* undef
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/bfe_uint.ll b/test/CodeGen/AMDGPU/bfe_uint.ll
index 32e3fc26106f..2c8c9a5ec932 100644
--- a/test/CodeGen/AMDGPU/bfe_uint.ll
+++ b/test/CodeGen/AMDGPU/bfe_uint.ll
@@ -2,7 +2,7 @@
; CHECK: {{^}}bfe_def:
; CHECK: BFE_UINT
-define void @bfe_def(i32 addrspace(1)* %out, i32 %x) {
+define amdgpu_kernel void @bfe_def(i32 addrspace(1)* %out, i32 %x) {
entry:
%0 = lshr i32 %x, 5
%1 = and i32 %0, 15 ; 0xf
@@ -17,7 +17,7 @@ entry:
; CHECK: {{^}}bfe_shift:
; CHECK-NOT: BFE_UINT
-define void @bfe_shift(i32 addrspace(1)* %out, i32 %x) {
+define amdgpu_kernel void @bfe_shift(i32 addrspace(1)* %out, i32 %x) {
entry:
%0 = lshr i32 %x, 16
%1 = and i32 %0, 65535 ; 0xffff
diff --git a/test/CodeGen/AMDGPU/bfi_int.ll b/test/CodeGen/AMDGPU/bfi_int.ll
index 5156137fd78a..7870e5f378d3 100644
--- a/test/CodeGen/AMDGPU/bfi_int.ll
+++ b/test/CodeGen/AMDGPU/bfi_int.ll
@@ -9,7 +9,7 @@
; R600: BFI_INT
; SI: @bfi_def
; SI: v_bfi_b32
-define void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
+define amdgpu_kernel void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = xor i32 %x, -1
%1 = and i32 %z, %0
@@ -25,7 +25,7 @@ entry:
; R600: BFI_INT
; SI: @bfi_sha256_ch
; SI: v_bfi_b32
-define void @bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
+define amdgpu_kernel void @bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = xor i32 %y, %z
%1 = and i32 %x, %0
@@ -42,7 +42,7 @@ entry:
; SI: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
; SI: v_bfi_b32 {{v[0-9]+}}, [[DST]], {{s[0-9]+, v[0-9]+}}
-define void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
+define amdgpu_kernel void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = and i32 %x, %z
%1 = or i32 %x, %z
diff --git a/test/CodeGen/AMDGPU/bfm.ll b/test/CodeGen/AMDGPU/bfm.ll
index 790458d0d60c..5673995588da 100644
--- a/test/CodeGen/AMDGPU/bfm.ll
+++ b/test/CodeGen/AMDGPU/bfm.ll
@@ -4,7 +4,7 @@
; FUNC-LABEL: {{^}}bfm_pattern:
; SI: s_bfm_b32 {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-define void @bfm_pattern(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @bfm_pattern(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
%a = shl i32 1, %x
%b = sub i32 %a, 1
%c = shl i32 %b, %y
@@ -14,7 +14,7 @@ define void @bfm_pattern(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
; FUNC-LABEL: {{^}}bfm_pattern_simple:
; SI: s_bfm_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0
-define void @bfm_pattern_simple(i32 addrspace(1)* %out, i32 %x) #0 {
+define amdgpu_kernel void @bfm_pattern_simple(i32 addrspace(1)* %out, i32 %x) #0 {
%a = shl i32 1, %x
%b = sub i32 %a, 1
store i32 %b, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/big_alu.ll b/test/CodeGen/AMDGPU/big_alu.ll
index 0ab22b350f50..51387c8b79cb 100644
--- a/test/CodeGen/AMDGPU/big_alu.ll
+++ b/test/CodeGen/AMDGPU/big_alu.ll
@@ -2,7 +2,7 @@
; This test ensures that R600 backend can handle ifcvt properly
-define amdgpu_ps void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7, <4 x float> inreg %reg8, <4 x float> inreg %reg9) {
+define amdgpu_ps void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7, <4 x float> inreg %reg8, <4 x float> inreg %reg9) #0 {
main_body:
%tmp = extractelement <4 x float> %reg0, i32 0
%tmp1 = extractelement <4 x float> %reg0, i32 1
@@ -224,28 +224,31 @@ ENDIF136: ; preds = %ENDIF154, %main_bod
%result.i = fadd float %mul.i, %one.sub.ac.i
%tmp204 = fadd float %result.i, 0x3FF4CCCCC0000000
%tmp205 = fmul float %tmp204, 0x3FE1C71C80000000
- %tmp206 = call float @llvm.AMDGPU.clamp.f32(float %tmp205, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i = call float @llvm.maxnum.f32(float %tmp205, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
%tmp207 = fadd float %result.i, 0x3FF4CCCCC0000000
%tmp208 = fmul float %tmp207, 0x3FE1C71C80000000
- %tmp209 = call float @llvm.AMDGPU.clamp.f32(float %tmp208, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i15 = call float @llvm.maxnum.f32(float %tmp208, float 0.000000e+00)
+ %clamp.i16 = call float @llvm.minnum.f32(float %max.0.i15, float 1.000000e+00)
%tmp210 = fadd float %result.i, 2.000000e+00
%tmp211 = fmul float %tmp210, 0x3FD611A7A0000000
- %tmp212 = call float @llvm.AMDGPU.clamp.f32(float %tmp211, float 0.000000e+00, float 1.000000e+00)
- %tmp213 = fmul float 2.000000e+00, %tmp206
+ %max.0.i13 = call float @llvm.maxnum.f32(float %tmp211, float 0.000000e+00)
+ %clamp.i14 = call float @llvm.minnum.f32(float %max.0.i13, float 1.000000e+00)
+ %tmp213 = fmul float 2.000000e+00, %clamp.i
%tmp214 = fsub float -0.000000e+00, %tmp213
%tmp215 = fadd float 3.000000e+00, %tmp214
- %tmp216 = fmul float %tmp206, %tmp215
- %tmp217 = fmul float %tmp206, %tmp216
- %tmp218 = fmul float 2.000000e+00, %tmp209
+ %tmp216 = fmul float %clamp.i, %tmp215
+ %tmp217 = fmul float %clamp.i, %tmp216
+ %tmp218 = fmul float 2.000000e+00, %clamp.i16
%tmp219 = fsub float -0.000000e+00, %tmp218
%tmp220 = fadd float 3.000000e+00, %tmp219
- %tmp221 = fmul float %tmp209, %tmp220
- %tmp222 = fmul float %tmp209, %tmp221
- %tmp223 = fmul float 2.000000e+00, %tmp212
+ %tmp221 = fmul float %clamp.i16, %tmp220
+ %tmp222 = fmul float %clamp.i16, %tmp221
+ %tmp223 = fmul float 2.000000e+00, %clamp.i14
%tmp224 = fsub float -0.000000e+00, %tmp223
%tmp225 = fadd float 3.000000e+00, %tmp224
- %tmp226 = fmul float %tmp212, %tmp225
- %tmp227 = fmul float %tmp212, %tmp226
+ %tmp226 = fmul float %clamp.i14, %tmp225
+ %tmp227 = fmul float %clamp.i14, %tmp226
%tmp228 = fmul float %tmp26, 0x3F368B5CC0000000
%tmp229 = fmul float %tmp27, 0x3F368B5CC0000000
%tmp230 = insertelement <4 x float> undef, float %tmp228, i32 0
@@ -282,28 +285,31 @@ ENDIF136: ; preds = %ENDIF154, %main_bod
%tmp261 = fmul float %tmp257, 0.000000e+00
%tmp262 = fadd float %result.i, 0x3FF4CCCCC0000000
%tmp263 = fmul float %tmp262, 0x3FE1C71C80000000
- %tmp264 = call float @llvm.AMDGPU.clamp.f32(float %tmp263, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i11 = call float @llvm.maxnum.f32(float %tmp263, float 0.000000e+00)
+ %clamp.i12 = call float @llvm.minnum.f32(float %max.0.i11, float 1.000000e+00)
%tmp265 = fadd float %result.i, 0x3FF4CCCCC0000000
%tmp266 = fmul float %tmp265, 0x3FE1C71C80000000
- %tmp267 = call float @llvm.AMDGPU.clamp.f32(float %tmp266, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i9 = call float @llvm.maxnum.f32(float %tmp266, float 0.000000e+00)
+ %clamp.i10 = call float @llvm.minnum.f32(float %max.0.i9, float 1.000000e+00)
%tmp268 = fadd float %result.i, 2.000000e+00
%tmp269 = fmul float %tmp268, 0x3FD611A7A0000000
- %tmp270 = call float @llvm.AMDGPU.clamp.f32(float %tmp269, float 0.000000e+00, float 1.000000e+00)
- %tmp271 = fmul float 2.000000e+00, %tmp264
+ %max.0.i7 = call float @llvm.maxnum.f32(float %tmp269, float 0.000000e+00)
+ %clamp.i8 = call float @llvm.minnum.f32(float %max.0.i7, float 1.000000e+00)
+ %tmp271 = fmul float 2.000000e+00, %clamp.i12
%tmp272 = fsub float -0.000000e+00, %tmp271
%tmp273 = fadd float 3.000000e+00, %tmp272
- %tmp274 = fmul float %tmp264, %tmp273
- %tmp275 = fmul float %tmp264, %tmp274
- %tmp276 = fmul float 2.000000e+00, %tmp267
+ %tmp274 = fmul float %clamp.i12, %tmp273
+ %tmp275 = fmul float %clamp.i12, %tmp274
+ %tmp276 = fmul float 2.000000e+00, %clamp.i10
%tmp277 = fsub float -0.000000e+00, %tmp276
%tmp278 = fadd float 3.000000e+00, %tmp277
- %tmp279 = fmul float %tmp267, %tmp278
- %tmp280 = fmul float %tmp267, %tmp279
- %tmp281 = fmul float 2.000000e+00, %tmp270
+ %tmp279 = fmul float %clamp.i10, %tmp278
+ %tmp280 = fmul float %clamp.i10, %tmp279
+ %tmp281 = fmul float 2.000000e+00, %clamp.i8
%tmp282 = fsub float -0.000000e+00, %tmp281
%tmp283 = fadd float 3.000000e+00, %tmp282
- %tmp284 = fmul float %tmp270, %tmp283
- %tmp285 = fmul float %tmp270, %tmp284
+ %tmp284 = fmul float %clamp.i8, %tmp283
+ %tmp285 = fmul float %clamp.i8, %tmp284
%tmp286 = fmul float %tmp26, 0x3F22DFD6A0000000
%tmp287 = fmul float %tmp27, 0x3F22DFD6A0000000
%tmp288 = insertelement <4 x float> undef, float %tmp286, i32 0
@@ -390,7 +396,8 @@ ENDIF136: ; preds = %ENDIF154, %main_bod
%tmp369 = fadd float %tmp368, %tmp367
%tmp370 = fadd float %tmp369, 0xBFEFAE1480000000
%tmp371 = fmul float %tmp370, 0xC023FFFFC0000000
- %tmp372 = call float @llvm.AMDGPU.clamp.f32(float %tmp371, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i5 = call float @llvm.maxnum.f32(float %tmp371, float 0.000000e+00)
+ %clamp.i6 = call float @llvm.minnum.f32(float %max.0.i5, float 1.000000e+00)
%tmp373 = fsub float -0.000000e+00, %tmp339
%tmp374 = fadd float %result.i, %tmp373
%tmp375 = fadd float %tmp374, 0x3FBEB851E0000000
@@ -416,12 +423,13 @@ ENDIF136: ; preds = %ENDIF154, %main_bod
%tmp395 = fadd float %tmp394, %tmp393
%tmp396 = fadd float %tmp395, 0xBFEFAE1480000000
%tmp397 = fmul float %tmp396, 0xC0490001A0000000
- %tmp398 = call float @llvm.AMDGPU.clamp.f32(float %tmp397, float 0.000000e+00, float 1.000000e+00)
- %tmp399 = fmul float 2.000000e+00, %tmp372
+ %max.0.i3 = call float @llvm.maxnum.f32(float %tmp397, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %tmp399 = fmul float 2.000000e+00, %clamp.i6
%tmp400 = fsub float -0.000000e+00, %tmp399
%tmp401 = fadd float 3.000000e+00, %tmp400
- %tmp402 = fmul float %tmp372, %tmp401
- %tmp403 = fmul float %tmp372, %tmp402
+ %tmp402 = fmul float %clamp.i6, %tmp401
+ %tmp403 = fmul float %clamp.i6, %tmp402
%one.sub.a.i169 = fsub float 1.000000e+00, %tmp403
%one.sub.ac.i170 = fmul float %one.sub.a.i169, %tmp349
%mul.i171 = fmul float %tmp258, %tmp349
@@ -438,11 +446,11 @@ ENDIF136: ; preds = %ENDIF154, %main_bod
%one.sub.ac.i158 = fmul float %one.sub.a.i157, 0.000000e+00
%mul.i159 = fmul float %tmp261, 0.000000e+00
%result.i160 = fadd float %mul.i159, %one.sub.ac.i158
- %tmp404 = fmul float 2.000000e+00, %tmp398
+ %tmp404 = fmul float 2.000000e+00, %clamp.i4
%tmp405 = fsub float -0.000000e+00, %tmp404
%tmp406 = fadd float 3.000000e+00, %tmp405
- %tmp407 = fmul float %tmp398, %tmp406
- %tmp408 = fmul float %tmp398, %tmp407
+ %tmp407 = fmul float %clamp.i4, %tmp406
+ %tmp408 = fmul float %clamp.i4, %tmp407
%one.sub.a.i153 = fsub float 1.000000e+00, %tmp408
%one.sub.ac.i154 = fmul float %one.sub.a.i153, %tmp375
%mul.i155 = fmul float %tmp258, %tmp375
@@ -1157,12 +1165,13 @@ IF179: ; preds = %ENDIF175
%tmp882 = fadd float %tmp881, %tmp880
%tmp883 = fadd float %tmp882, 0xBFEFAE1480000000
%tmp884 = fmul float %tmp883, 0xC043FFFE20000000
- %tmp885 = call float @llvm.AMDGPU.clamp.f32(float %tmp884, float 0.000000e+00, float 1.000000e+00)
- %tmp886 = fmul float 2.000000e+00, %tmp885
+ %max.0.i1 = call float @llvm.maxnum.f32(float %tmp884, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp886 = fmul float 2.000000e+00, %clamp.i2
%tmp887 = fsub float -0.000000e+00, %tmp886
%tmp888 = fadd float 3.000000e+00, %tmp887
- %tmp889 = fmul float %tmp885, %tmp888
- %tmp890 = fmul float %tmp885, %tmp889
+ %tmp889 = fmul float %clamp.i2, %tmp888
+ %tmp890 = fmul float %clamp.i2, %tmp889
%one.sub.a.i41 = fsub float 1.000000e+00, %tmp890
%one.sub.ac.i42 = fmul float %one.sub.a.i41, %tmp866
%mul.i43 = fmul float %temp84.5, %tmp866
@@ -1288,25 +1297,14 @@ ENDIF178: ; preds = %IF179, %ENDIF175
ret void
}
-; Function Attrs: nounwind readnone
-declare float @llvm.r600.dot4(<4 x float>, <4 x float>) #0
-
-; Function Attrs: nounwind readnone
-declare float @llvm.r600.recipsqrt.clamped.f32(float) #0
-
-; Function Attrs: nounwind readonly
+declare float @llvm.r600.dot4(<4 x float>, <4 x float>) #1
+declare float @llvm.r600.recipsqrt.clamped.f32(float) #1
declare float @llvm.fabs.f32(float) #1
-
-; Function Attrs: nounwind readnone
-declare float @llvm.exp2.f32(float) #0
-
-; Function Attrs: nounwind readnone
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #0
-
+declare float @llvm.exp2.f32(float) #1
declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32)
+declare <4 x float> @llvm.r600.tex(<4 x float>, i32, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.r600.tex(<4 x float>, i32, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind readonly }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/bitcast-vector-extract.ll b/test/CodeGen/AMDGPU/bitcast-vector-extract.ll
index 3a55870c2882..cf95f74afb84 100644
--- a/test/CodeGen/AMDGPU/bitcast-vector-extract.ll
+++ b/test/CodeGen/AMDGPU/bitcast-vector-extract.ll
@@ -11,7 +11,7 @@
; GCN: buffer_store_dwordx4
; GCN-NOT: v_mov_b32
; GCN: buffer_store_dwordx4
-define void @store_bitcast_constant_v8i32_to_v8f32(<8 x float> addrspace(1)* %out, <8 x i32> %vec) {
+define amdgpu_kernel void @store_bitcast_constant_v8i32_to_v8f32(<8 x float> addrspace(1)* %out, <8 x i32> %vec) {
%vec0.bc = bitcast <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 8> to <8 x float>
store volatile <8 x float> %vec0.bc, <8 x float> addrspace(1)* %out
@@ -27,7 +27,7 @@ define void @store_bitcast_constant_v8i32_to_v8f32(<8 x float> addrspace(1)* %ou
; GCN: buffer_store_dwordx4
; GCN-NOT: v_mov_b32
; GCN: buffer_store_dwordx4
-define void @store_bitcast_constant_v4i64_to_v8f32(<8 x float> addrspace(1)* %out, <4 x i64> %vec) {
+define amdgpu_kernel void @store_bitcast_constant_v4i64_to_v8f32(<8 x float> addrspace(1)* %out, <4 x i64> %vec) {
%vec0.bc = bitcast <4 x i64> <i64 7, i64 7, i64 7, i64 8> to <8 x float>
store volatile <8 x float> %vec0.bc, <8 x float> addrspace(1)* %out
@@ -43,7 +43,7 @@ define void @store_bitcast_constant_v4i64_to_v8f32(<8 x float> addrspace(1)* %ou
; GCN: buffer_store_dwordx4
; GCN-NOT: v_mov_b32
; GCN: buffer_store_dwordx4
-define void @store_bitcast_constant_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %vec) {
+define amdgpu_kernel void @store_bitcast_constant_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %vec) {
%vec0.bc = bitcast <4 x i64> <i64 7, i64 7, i64 7, i64 8> to <4 x double>
store volatile <4 x double> %vec0.bc, <4 x double> addrspace(1)* %out
@@ -59,7 +59,7 @@ define void @store_bitcast_constant_v4i64_to_v4f64(<4 x double> addrspace(1)* %o
; GCN: buffer_store_dwordx4
; GCN-NOT: v_mov_b32
; GCN: buffer_store_dwordx4
-define void @store_bitcast_constant_v8i32_to_v16i16(<8 x float> addrspace(1)* %out, <16 x i16> %vec) {
+define amdgpu_kernel void @store_bitcast_constant_v8i32_to_v16i16(<8 x float> addrspace(1)* %out, <16 x i16> %vec) {
%vec0.bc = bitcast <16 x i16> <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 8> to <8 x float>
store volatile <8 x float> %vec0.bc, <8 x float> addrspace(1)* %out
@@ -67,3 +67,27 @@ define void @store_bitcast_constant_v8i32_to_v16i16(<8 x float> addrspace(1)* %o
store volatile <8 x float> %vec1.bc, <8 x float> addrspace(1)* %out
ret void
}
+
+; GCN-LABEL: {{^}}store_value_lowered_to_undef_bitcast_source:
+; GCN-NOT: store_dword
+define amdgpu_kernel void @store_value_lowered_to_undef_bitcast_source(<2 x i32> addrspace(1)* %out, i64 %a, i64 %b, i32 %c) #0 {
+ %undef = call i64 @llvm.amdgcn.icmp.i64(i64 %a, i64 %b, i32 %c) #1
+ %bc = bitcast i64 %undef to <2 x i32>
+ store volatile <2 x i32> %bc, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_value_lowered_to_undef_bitcast_source_extractelt:
+; GCN-NOT: store_dword
+define amdgpu_kernel void @store_value_lowered_to_undef_bitcast_source_extractelt(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) #0 {
+ %undef = call i64 @llvm.amdgcn.icmp.i64(i64 %a, i64 %b, i32 %c) #1
+ %bc = bitcast i64 %undef to <2 x i32>
+ %elt1 = extractelement <2 x i32> %bc, i32 1
+ store volatile i32 %elt1, i32 addrspace(1)* %out
+ ret void
+}
+
+declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone convergent }
diff --git a/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll b/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll
index f7dc1a9d37e8..3616ec1f45d3 100644
--- a/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll
+++ b/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll
@@ -7,7 +7,7 @@
; GCN-LABEL: {{^}}materialize_0_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_0_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_0_i32(i32 addrspace(1)* %out) {
store i32 0, i32 addrspace(1)* %out
ret void
}
@@ -16,7 +16,7 @@ define void @materialize_0_i32(i32 addrspace(1)* %out) {
; GCN: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], v[[LOK]]{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_0_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_0_i64(i64 addrspace(1)* %out) {
store i64 0, i64 addrspace(1)* %out
ret void
}
@@ -24,7 +24,7 @@ define void @materialize_0_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_neg1_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], -1{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_neg1_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_neg1_i32(i32 addrspace(1)* %out) {
store i32 -1, i32 addrspace(1)* %out
ret void
}
@@ -33,7 +33,7 @@ define void @materialize_neg1_i32(i32 addrspace(1)* %out) {
; GCN: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}}
; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], v[[LOK]]{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_neg1_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_neg1_i64(i64 addrspace(1)* %out) {
store i64 -1, i64 addrspace(1)* %out
ret void
}
@@ -41,7 +41,7 @@ define void @materialize_neg1_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_signbit_i32:
; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 1{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_signbit_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_signbit_i32(i32 addrspace(1)* %out) {
store i32 -2147483648, i32 addrspace(1)* %out
ret void
}
@@ -50,7 +50,7 @@ define void @materialize_signbit_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HIK:[0-9]+]], 1{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_signbit_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_signbit_i64(i64 addrspace(1)* %out) {
store i64 -9223372036854775808, i64 addrspace(1)* %out
ret void
}
@@ -58,7 +58,7 @@ define void @materialize_signbit_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_rev_neg16_i32:
; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], -16{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_rev_neg16_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_neg16_i32(i32 addrspace(1)* %out) {
store i32 268435455, i32 addrspace(1)* %out
ret void
}
@@ -67,7 +67,7 @@ define void @materialize_rev_neg16_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HIK:[0-9]+]], -16{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_rev_neg16_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_neg16_i64(i64 addrspace(1)* %out) {
store i64 1152921504606846975, i64 addrspace(1)* %out
ret void
}
@@ -75,7 +75,7 @@ define void @materialize_rev_neg16_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_rev_neg17_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0xf7ffffff{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_rev_neg17_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_neg17_i32(i32 addrspace(1)* %out) {
store i32 -134217729, i32 addrspace(1)* %out
ret void
}
@@ -84,7 +84,7 @@ define void @materialize_rev_neg17_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], 0xf7ffffff{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_rev_neg17_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_neg17_i64(i64 addrspace(1)* %out) {
store i64 -576460752303423489, i64 addrspace(1)* %out
ret void
}
@@ -92,7 +92,7 @@ define void @materialize_rev_neg17_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_rev_64_i32:
; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 64{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_rev_64_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_64_i32(i32 addrspace(1)* %out) {
store i32 33554432, i32 addrspace(1)* %out
ret void
}
@@ -101,7 +101,7 @@ define void @materialize_rev_64_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HIK:[0-9]+]], 64{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_rev_64_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_64_i64(i64 addrspace(1)* %out) {
store i64 144115188075855872, i64 addrspace(1)* %out
ret void
}
@@ -109,7 +109,7 @@ define void @materialize_rev_64_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_rev_65_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x82000000{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_rev_65_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_65_i32(i32 addrspace(1)* %out) {
store i32 -2113929216, i32 addrspace(1)* %out
ret void
}
@@ -118,7 +118,7 @@ define void @materialize_rev_65_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], 0x82000000{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_rev_65_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_65_i64(i64 addrspace(1)* %out) {
store i64 -9079256848778919936, i64 addrspace(1)* %out
ret void
}
@@ -126,7 +126,7 @@ define void @materialize_rev_65_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_rev_3_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], -2.0{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_rev_3_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_3_i32(i32 addrspace(1)* %out) {
store i32 -1073741824, i32 addrspace(1)* %out
ret void
}
@@ -135,7 +135,7 @@ define void @materialize_rev_3_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], -2.0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_rev_3_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_3_i64(i64 addrspace(1)* %out) {
store i64 -4611686018427387904, i64 addrspace(1)* %out
ret void
}
@@ -143,7 +143,7 @@ define void @materialize_rev_3_i64(i64 addrspace(1)* %out) {
; GCN-LABEL: {{^}}materialize_rev_1.0_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x1fc{{$}}
; GCN: buffer_store_dword [[K]]
-define void @materialize_rev_1.0_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_1.0_i32(i32 addrspace(1)* %out) {
store i32 508, i32 addrspace(1)* %out
ret void
}
@@ -152,70 +152,70 @@ define void @materialize_rev_1.0_i32(i32 addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0x1fc{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}}
-define void @materialize_rev_1.0_i64(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @materialize_rev_1.0_i64(i64 addrspace(1)* %out) {
store i64 508, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}s_materialize_0_i32:
; GCN: s_mov_b32 s{{[0-9]+}}, 0{{$}}
-define void @s_materialize_0_i32() {
+define amdgpu_kernel void @s_materialize_0_i32() {
call void asm sideeffect "; use $0", "s"(i32 0)
ret void
}
; GCN-LABEL: {{^}}s_materialize_1_i32:
; GCN: s_mov_b32 s{{[0-9]+}}, 1{{$}}
-define void @s_materialize_1_i32() {
+define amdgpu_kernel void @s_materialize_1_i32() {
call void asm sideeffect "; use $0", "s"(i32 1)
ret void
}
; GCN-LABEL: {{^}}s_materialize_neg1_i32:
; GCN: s_mov_b32 s{{[0-9]+}}, -1{{$}}
-define void @s_materialize_neg1_i32() {
+define amdgpu_kernel void @s_materialize_neg1_i32() {
call void asm sideeffect "; use $0", "s"(i32 -1)
ret void
}
; GCN-LABEL: {{^}}s_materialize_signbit_i32:
; GCN: s_brev_b32 s{{[0-9]+}}, 1{{$}}
-define void @s_materialize_signbit_i32() {
+define amdgpu_kernel void @s_materialize_signbit_i32() {
call void asm sideeffect "; use $0", "s"(i32 -2147483648)
ret void
}
; GCN-LABEL: {{^}}s_materialize_rev_64_i32:
; GCN: s_brev_b32 s{{[0-9]+}}, 64{{$}}
-define void @s_materialize_rev_64_i32() {
+define amdgpu_kernel void @s_materialize_rev_64_i32() {
call void asm sideeffect "; use $0", "s"(i32 33554432)
ret void
}
; GCN-LABEL: {{^}}s_materialize_rev_65_i32:
; GCN: s_mov_b32 s{{[0-9]+}}, 0x82000000{{$}}
-define void @s_materialize_rev_65_i32() {
+define amdgpu_kernel void @s_materialize_rev_65_i32() {
call void asm sideeffect "; use $0", "s"(i32 -2113929216)
ret void
}
; GCN-LABEL: {{^}}s_materialize_rev_neg16_i32:
; GCN: s_brev_b32 s{{[0-9]+}}, -16{{$}}
-define void @s_materialize_rev_neg16_i32() {
+define amdgpu_kernel void @s_materialize_rev_neg16_i32() {
call void asm sideeffect "; use $0", "s"(i32 268435455)
ret void
}
; GCN-LABEL: {{^}}s_materialize_rev_neg17_i32:
; GCN: s_mov_b32 s{{[0-9]+}}, 0xf7ffffff{{$}}
-define void @s_materialize_rev_neg17_i32() {
+define amdgpu_kernel void @s_materialize_rev_neg17_i32() {
call void asm sideeffect "; use $0", "s"(i32 -134217729)
ret void
}
; GCN-LABEL: {{^}}s_materialize_rev_1.0_i32:
; GCN: s_movk_i32 s{{[0-9]+}}, 0x1fc{{$}}
-define void @s_materialize_rev_1.0_i32() {
+define amdgpu_kernel void @s_materialize_rev_1.0_i32() {
call void asm sideeffect "; use $0", "s"(i32 508)
ret void
}
diff --git a/test/CodeGen/AMDGPU/bitreverse.ll b/test/CodeGen/AMDGPU/bitreverse.ll
index 43a4200cb3bd..539373f7bdeb 100644
--- a/test/CodeGen/AMDGPU/bitreverse.ll
+++ b/test/CodeGen/AMDGPU/bitreverse.ll
@@ -14,7 +14,7 @@ declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) #1
; FUNC-LABEL: {{^}}s_brev_i16:
; SI: s_brev_b32
-define void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 {
+define amdgpu_kernel void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 {
%brev = call i16 @llvm.bitreverse.i16(i16 %val) #1
store i16 %brev, i16 addrspace(1)* %out
ret void
@@ -22,7 +22,7 @@ define void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 {
; FUNC-LABEL: {{^}}v_brev_i16:
; SI: v_bfrev_b32_e32
-define void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) #0 {
+define amdgpu_kernel void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) #0 {
%val = load i16, i16 addrspace(1)* %valptr
%brev = call i16 @llvm.bitreverse.i16(i16 %val) #1
store i16 %brev, i16 addrspace(1)* %out
@@ -35,7 +35,7 @@ define void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalia
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; SI: buffer_store_dword [[VRESULT]],
; SI: s_endpgm
-define void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 {
+define amdgpu_kernel void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 {
%brev = call i32 @llvm.bitreverse.i32(i32 %val) #1
store i32 %brev, i32 addrspace(1)* %out
ret void
@@ -46,7 +46,7 @@ define void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 {
; SI: v_bfrev_b32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 {
+define amdgpu_kernel void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 {
%val = load i32, i32 addrspace(1)* %valptr
%brev = call i32 @llvm.bitreverse.i32(i32 %val) #1
store i32 %brev, i32 addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalia
; FUNC-LABEL: {{^}}s_brev_v2i32:
; SI: s_brev_b32
; SI: s_brev_b32
-define void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> %val) #0 {
+define amdgpu_kernel void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> %val) #0 {
%brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1
store <2 x i32> %brev, <2 x i32> addrspace(1)* %out
ret void
@@ -65,7 +65,7 @@ define void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> %val)
; FUNC-LABEL: {{^}}v_brev_v2i32:
; SI: v_bfrev_b32_e32
; SI: v_bfrev_b32_e32
-define void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) #0 {
+define amdgpu_kernel void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) #0 {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr
%brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1
store <2 x i32> %brev, <2 x i32> addrspace(1)* %out
@@ -73,7 +73,7 @@ define void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrsp
}
; FUNC-LABEL: {{^}}s_brev_i64:
-define void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val) #0 {
+define amdgpu_kernel void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val) #0 {
%brev = call i64 @llvm.bitreverse.i64(i64 %val) #1
store i64 %brev, i64 addrspace(1)* %out
ret void
@@ -81,7 +81,7 @@ define void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val) #0 {
; FUNC-LABEL: {{^}}v_brev_i64:
; SI-NOT: v_or_b32_e64 v{{[0-9]+}}, 0, 0
-define void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %valptr) #0 {
+define amdgpu_kernel void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %valptr) #0 {
%val = load i64, i64 addrspace(1)* %valptr
%brev = call i64 @llvm.bitreverse.i64(i64 %val) #1
store i64 %brev, i64 addrspace(1)* %out
@@ -89,14 +89,14 @@ define void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalia
}
; FUNC-LABEL: {{^}}s_brev_v2i64:
-define void @s_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %val) #0 {
+define amdgpu_kernel void @s_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %val) #0 {
%brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1
store <2 x i64> %brev, <2 x i64> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_brev_v2i64:
-define void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %valptr) #0 {
+define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %valptr) #0 {
%val = load <2 x i64>, <2 x i64> addrspace(1)* %valptr
%brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1
store <2 x i64> %brev, <2 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/br_cc.f16.ll b/test/CodeGen/AMDGPU/br_cc.f16.ll
index 0072d384f217..b7a0c8738dfa 100644
--- a/test/CodeGen/AMDGPU/br_cc.f16.ll
+++ b/test/CodeGen/AMDGPU/br_cc.f16.ll
@@ -1,27 +1,26 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; GCN-LABEL: {{^}}br_cc_f16
+; GCN-LABEL: {{^}}br_cc_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_cmp_nlt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_nlt_f32_e32 vcc, v[[B_F32]], v[[A_F32]]
; VI: v_cmp_nlt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; GCN: s_cbranch_vccnz
; GCN: one{{$}}
-; SI: v_cvt_f16_f32_e32 v[[A_F16:[0-9]+]], v[[A_F32]]
-; SI: s_branch
-; VI: buffer_store_short
-; VI: s_endpgm
+; SI: v_cvt_f16_f32_e32 v[[A_F16:[0-9]+]], v[[B_F32]]
+; GCN: buffer_store_short
+; GCN: s_endpgm
; GCN: two{{$}}
-; SI: v_cvt_f16_f32_e32 v[[B_F16:[0-9]+]], v[[B_F32]]
+; SI: v_cvt_f16_f32_e32 v[[B_F16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[B_F16]]
; GCN: s_endpgm
-define void @br_cc_f16(
+define amdgpu_kernel void @br_cc_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -40,29 +39,27 @@ two:
ret void
}
-; GCN-LABEL: {{^}}br_cc_f16_imm_a
-; SI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x3800{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; GCN-LABEL: {{^}}br_cc_f16_imm_a:
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_cmp_ngt_f32_e32 vcc, v[[B_F32]], v[[A_F32]]
-; SI: s_cbranch_vccz
+; SI: v_cmp_nlt_f32_e32 vcc, 0.5, v[[B_F32]]
+; SI: s_cbranch_vccnz
; VI: v_cmp_nlt_f16_e32 vcc, 0.5, v[[B_F16]]
; VI: s_cbranch_vccnz
-; VI: one{{$}}
+; GCN: one{{$}}
; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x380{{0|1}}{{$}}
-; GCN: two{{$}}
-; SI: v_cvt_f16_f32_e32 v[[B_F16:[0-9]+]], v[[B_F32]]
-
-; SI: one{{$}}
; SI: buffer_store_short v[[A_F16]]
; SI: s_endpgm
-define void @br_cc_f16_imm_a(
+
+; GCN: two{{$}}
+; SI: v_cvt_f16_f32_e32 v[[B_F16:[0-9]+]], v[[B_F32]]
+
+define amdgpu_kernel void @br_cc_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
@@ -79,13 +76,12 @@ two:
ret void
}
-; GCN-LABEL: {{^}}br_cc_f16_imm_b
-; SI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x3800{{$}}
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; GCN-LABEL: {{^}}br_cc_f16_imm_b:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_cmp_nlt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; SI: v_cmp_ngt_f32_e32 vcc, 0.5, v[[A_F32]]
+
; VI: v_cmp_ngt_f16_e32 vcc, 0.5, v[[A_F16]]
; GCN: s_cbranch_vccnz
@@ -96,7 +92,7 @@ two:
; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x3800{{$}}
; GCN: buffer_store_short v[[B_F16]]
; GCN: s_endpgm
-define void @br_cc_f16_imm_b(
+define amdgpu_kernel void @br_cc_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/branch-condition-and.ll b/test/CodeGen/AMDGPU/branch-condition-and.ll
index 94616a4be8fd..68b77ea3490e 100644
--- a/test/CodeGen/AMDGPU/branch-condition-and.ll
+++ b/test/CodeGen/AMDGPU/branch-condition-and.ll
@@ -15,12 +15,16 @@
; GCN: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], vcc, [[OTHERCC]]
; GCN: s_and_saveexec_b64 [[SAVED:s\[[0-9]+:[0-9]+\]]], [[AND]]
; GCN: s_xor_b64 {{s\[[0-9]+:[0-9]+\]}}, exec, [[SAVED]]
-;
-; TODO: The following sequence is a bug (missing s_endpgm)!
-;
-; GCN: s_branch [[BB:BB[0-9]+_[0-9]+]]
-; GCN: [[BB]]:
-; GCN-NEXT: .Lfunc_end0:
+; GCN: ; mask branch [[BB5:BB[0-9]+_[0-9]+]]
+
+; GCN-NEXT: BB{{[0-9]+_[0-9]+}}: ; %bb4
+; GCN: ds_write_b32
+; GCN: s_waitcnt
+
+; GCN-NEXT: [[BB5]]
+; GCN: s_or_b64 exec, exec
+; GCN-NEXT: s_endpgm
+; GCN-NEXT: .Lfunc_end
define amdgpu_ps void @ham(float %arg, float %arg1) #0 {
bb:
%tmp = fcmp ogt float %arg, 0.000000e+00
@@ -29,6 +33,7 @@ bb:
br i1 %tmp3, label %bb4, label %bb5
bb4: ; preds = %bb
+ store volatile i32 4, i32 addrspace(3)* undef
unreachable
bb5: ; preds = %bb
diff --git a/test/CodeGen/AMDGPU/branch-relax-spill.ll b/test/CodeGen/AMDGPU/branch-relax-spill.ll
index 86b8dd89e7d0..ede15559c4ff 100644
--- a/test/CodeGen/AMDGPU/branch-relax-spill.ll
+++ b/test/CodeGen/AMDGPU/branch-relax-spill.ll
@@ -5,7 +5,7 @@
; FAIL: LLVM ERROR: Error while trying to spill VCC from class SReg_64: Cannot scavenge register without an emergency spill slot!
-define void @spill(i32 addrspace(1)* %arg, i32 %cnd) #0 {
+define amdgpu_kernel void @spill(i32 addrspace(1)* %arg, i32 %cnd) #0 {
entry:
%sgpr0 = tail call i32 asm sideeffect "s_mov_b32 s0, 0", "={SGPR0}"() #0
%sgpr1 = tail call i32 asm sideeffect "s_mov_b32 s1, 0", "={SGPR1}"() #0
diff --git a/test/CodeGen/AMDGPU/branch-relaxation.ll b/test/CodeGen/AMDGPU/branch-relaxation.ll
index 39505404a868..263059d4a6ed 100644
--- a/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -26,7 +26,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
; GCN: buffer_store_dword [[V_CND]]
; GCN: s_endpgm
-define void @uniform_conditional_max_short_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
+define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
bb:
%cmp = icmp eq i32 %cnd, 0
br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch
@@ -68,7 +68,7 @@ bb3:
; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
; GCN: buffer_store_dword [[V_CND]]
; GCN: s_endpgm
-define void @uniform_conditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
+define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
bb0:
%cmp = icmp eq i32 %cnd, 0
br i1 %cmp, label %bb3, label %bb2 ; +9 dword branch
@@ -108,7 +108,7 @@ bb3:
; GCN: [[ENDBB]]:
; GCN: buffer_store_dword [[V_CND]]
; GCN: s_endpgm
-define void @uniform_conditional_min_long_forward_vcnd_branch(float addrspace(1)* %arg, float %cnd) #0 {
+define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(float addrspace(1)* %arg, float %cnd) #0 {
bb0:
%cmp = fcmp oeq float %cnd, 0.0
br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
@@ -141,7 +141,7 @@ bb3:
; GCN: s_or_b64 exec, exec, [[SAVE]]
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @min_long_forward_vbranch(i32 addrspace(1)* %arg) #0 {
+define amdgpu_kernel void @min_long_forward_vbranch(i32 addrspace(1)* %arg) #0 {
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = zext i32 %tid to i64
@@ -188,7 +188,7 @@ bb3:
; GCN-NEXT: [[ENDBB]]:
; GCN-NEXT: s_endpgm
-define void @long_backward_sbranch(i32 addrspace(1)* %arg) #0 {
+define amdgpu_kernel void @long_backward_sbranch(i32 addrspace(1)* %arg) #0 {
bb:
br label %bb2
@@ -243,7 +243,7 @@ bb3:
; GCN: buffer_store_dword [[BB4_K]]
; GCN-NEXT: s_endpgm
; GCN-NEXT: .Lfunc_end{{[0-9]+}}:
-define void @uniform_unconditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
+define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
bb0:
%tmp = icmp ne i32 %arg1, 0
br i1 %tmp, label %bb2, label %bb3
@@ -285,7 +285,7 @@ bb4:
; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0{{$}}
; GCN-NEXT: s_setpc_b64 vcc
; GCN-NEXT .Lfunc_end{{[0-9]+}}:
-define void @uniform_unconditional_min_long_backward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
+define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
entry:
br label %loop
@@ -335,8 +335,14 @@ loop:
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: [[BB3]]: ; %bb3
+; GCN-NEXT: ;;#ASMSTART
+; GCN-NEXT: v_nop_e64
+; GCN-NEXT: ;;#ASMEND
+; GCN-NEXT: ;;#ASMSTART
+; GCN-NEXT: v_nop_e64
+; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_endpgm
-define void @expand_requires_expand(i32 %cond0) #0 {
+define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 {
bb0:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%cmp0 = icmp slt i32 %cond0, 0
@@ -356,6 +362,12 @@ bb2:
br label %bb3
bb3:
+; These NOPs prevent tail-duplication-based outlining
+; from firing, which defeats the need to expand the branches and this test.
+ call void asm sideeffect
+ "v_nop_e64", ""() #0
+ call void asm sideeffect
+ "v_nop_e64", ""() #0
ret void
}
@@ -385,8 +397,9 @@ bb3:
; GCN-NEXT: [[ENDIF]]: ; %endif
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
+; GCN-NEXT: s_sleep 5
; GCN-NEXT: s_endpgm
-define void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) #0 {
+define amdgpu_kernel void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%d_cmp = icmp ult i32 %tid, 16
@@ -402,6 +415,9 @@ if_uniform:
br label %endif
endif:
+ ; layout can remove the split branch if it can copy the return block.
+ ; This call makes the return block long enough that it doesn't get copied.
+ call void @llvm.amdgcn.s.sleep(i32 5);
ret void
}
@@ -446,7 +462,7 @@ endif:
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
; GCN: buffer_store_dword
; GCN-NEXT: s_endpgm
-define void @analyze_mask_branch() #0 {
+define amdgpu_kernel void @analyze_mask_branch() #0 {
entry:
%reg = call float asm sideeffect "v_mov_b32_e64 $0, 0", "=v"()
%cmp0 = fcmp ogt float %reg, 0.000000e+00
@@ -475,7 +491,8 @@ ret:
; GCN-LABEL: {{^}}long_branch_hang:
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 6
-; GCN-NEXT: s_cbranch_scc0 [[LONG_BR_0:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: s_cbranch_scc1 {{BB[0-9]+_[0-9]+}}
+; GCN-NEXT: s_branch [[LONG_BR_0:BB[0-9]+_[0-9]+]]
; GCN-NEXT: BB{{[0-9]+_[0-9]+}}:
; GCN: s_add_u32 vcc_lo, vcc_lo, [[LONG_BR_DEST0:BB[0-9]+_[0-9]+]]-(
diff --git a/test/CodeGen/AMDGPU/bswap.ll b/test/CodeGen/AMDGPU/bswap.ll
index c68951731098..d2dacd7c17b3 100644
--- a/test/CodeGen/AMDGPU/bswap.ll
+++ b/test/CodeGen/AMDGPU/bswap.ll
@@ -17,7 +17,7 @@ declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) nounwind readnone
; SI: v_bfi_b32 [[RESULT:v[0-9]+]], [[K]], [[TMP1]], [[TMP0]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%bswap = call i32 @llvm.bswap.i32(i32 %val) nounwind readnone
store i32 %bswap, i32 addrspace(1)* %out, align 4
@@ -32,7 +32,7 @@ define void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
; SI-DAG: v_alignbit_b32
; SI-DAG: v_bfi_b32
; SI: s_endpgm
-define void @test_bswap_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
%bswap = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %val) nounwind readnone
store <2 x i32> %bswap, <2 x i32> addrspace(1)* %out, align 8
@@ -53,7 +53,7 @@ define void @test_bswap_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(
; SI-DAG: v_alignbit_b32
; SI-DAG: v_bfi_b32
; SI: s_endpgm
-define void @test_bswap_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) nounwind {
%val = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
%bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %val) nounwind readnone
store <4 x i32> %bswap, <4 x i32> addrspace(1)* %out, align 16
@@ -86,7 +86,7 @@ define void @test_bswap_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(
; SI-DAG: v_alignbit_b32
; SI-DAG: v_bfi_b32
; SI: s_endpgm
-define void @test_bswap_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) nounwind {
%val = load <8 x i32>, <8 x i32> addrspace(1)* %in, align 32
%bswap = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %val) nounwind readnone
store <8 x i32> %bswap, <8 x i32> addrspace(1)* %out, align 32
@@ -95,21 +95,21 @@ define void @test_bswap_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(
; FUNC-LABEL: {{^}}test_bswap_i64:
; SI-NOT: v_or_b32_e64 v{{[0-9]+}}, 0, 0
-define void @test_bswap_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%val = load i64, i64 addrspace(1)* %in, align 8
%bswap = call i64 @llvm.bswap.i64(i64 %val) nounwind readnone
store i64 %bswap, i64 addrspace(1)* %out, align 8
ret void
}
-define void @test_bswap_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) nounwind {
%val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
%bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %val) nounwind readnone
store <2 x i64> %bswap, <2 x i64> addrspace(1)* %out, align 16
ret void
}
-define void @test_bswap_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_bswap_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) nounwind {
%val = load <4 x i64>, <4 x i64> addrspace(1)* %in, align 32
%bswap = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %val) nounwind readnone
store <4 x i64> %bswap, <4 x i64> addrspace(1)* %out, align 32
diff --git a/test/CodeGen/AMDGPU/build_vector.ll b/test/CodeGen/AMDGPU/build_vector.ll
index 0a5774c601d3..d77b0ab9fbb6 100644
--- a/test/CodeGen/AMDGPU/build_vector.ll
+++ b/test/CodeGen/AMDGPU/build_vector.ll
@@ -10,7 +10,7 @@
; SI-DAG: v_mov_b32_e32 v[[X:[0-9]]], 5
; SI-DAG: v_mov_b32_e32 v[[Y:[0-9]]], 6
; SI: buffer_store_dwordx2 v{{\[}}[[X]]:[[Y]]{{\]}}
-define void @build_vector2 (<2 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @build_vector2 (<2 x i32> addrspace(1)* %out) {
entry:
store <2 x i32> <i32 5, i32 6>, <2 x i32> addrspace(1)* %out
ret void
@@ -28,7 +28,7 @@ entry:
; SI-DAG: v_mov_b32_e32 v[[Z:[0-9]]], 7
; SI-DAG: v_mov_b32_e32 v[[W:[0-9]]], 8
; SI: buffer_store_dwordx4 v{{\[}}[[X]]:[[W]]{{\]}}
-define void @build_vector4 (<4 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @build_vector4 (<4 x i32> addrspace(1)* %out) {
entry:
store <4 x i32> <i32 5, i32 6, i32 7, i32 8>, <4 x i32> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/call.ll b/test/CodeGen/AMDGPU/call.ll
index 6d101e1537cc..769c7bb3eee7 100644
--- a/test/CodeGen/AMDGPU/call.ll
+++ b/test/CodeGen/AMDGPU/call.ll
@@ -10,7 +10,7 @@
declare i32 @external_function(i32) nounwind
-define void @test_call_external(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_call_external(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -25,7 +25,7 @@ define i32 @defined_function(i32 %x) nounwind noinline {
ret i32 %y
}
-define void @test_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -35,7 +35,7 @@ define void @test_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-define void @test_tail_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_tail_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
diff --git a/test/CodeGen/AMDGPU/calling-conventions.ll b/test/CodeGen/AMDGPU/calling-conventions.ll
index 57adc8be6a99..677147b6f4e5 100644
--- a/test/CodeGen/AMDGPU/calling-conventions.ll
+++ b/test/CodeGen/AMDGPU/calling-conventions.ll
@@ -1,9 +1,10 @@
-; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; Make sure we don't crash or assert on spir_kernel calling convention.
-; SI-LABEL: {{^}}kernel:
-; SI: s_endpgm
+; GCN-LABEL: {{^}}kernel:
+; GCN: s_endpgm
define spir_kernel void @kernel(i32 addrspace(1)* %out) {
entry:
store i32 0, i32 addrspace(1)* %out
@@ -11,10 +12,34 @@ entry:
}
; FIXME: This is treated like a kernel
-; SI-LABEL: {{^}}func:
-; SI: s_endpgm
-define spir_func void @func(i32 addrspace(1)* %out) {
-entry:
- store i32 0, i32 addrspace(1)* %out
- ret void
+; XGCN-LABEL: {{^}}func:
+; XGCN: s_endpgm
+; define spir_func void @func(i32 addrspace(1)* %out) {
+; entry:
+; store i32 0, i32 addrspace(1)* %out
+; ret void
+; }
+
+; GCN-LABEL: {{^}}ps_ret_cc_f16:
+; SI: v_cvt_f16_f32_e32 v0, v0
+; SI: v_cvt_f32_f16_e32 v0, v0
+; SI: v_add_f32_e32 v0, 1.0, v0
+
+; VI: v_add_f16_e32 v0, 1.0, v0
+; VI: ; return
+define amdgpu_ps half @ps_ret_cc_f16(half %arg0) {
+ %add = fadd half %arg0, 1.0
+ ret half %add
+}
+
+; GCN-LABEL: {{^}}ps_ret_cc_inreg_f16:
+; SI: v_cvt_f16_f32_e32 v0, s0
+; SI: v_cvt_f32_f16_e32 v0, v0
+; SI: v_add_f32_e32 v0, 1.0, v0
+
+; VI: v_add_f16_e64 v0, s0, 1.0
+; VI: ; return
+define amdgpu_ps half @ps_ret_cc_inreg_f16(half inreg %arg0) {
+ %add = fadd half %arg0, 1.0
+ ret half %add
}
diff --git a/test/CodeGen/AMDGPU/captured-frame-index.ll b/test/CodeGen/AMDGPU/captured-frame-index.ll
index 49af159581f7..5fe1b2728506 100644
--- a/test/CodeGen/AMDGPU/captured-frame-index.ll
+++ b/test/CodeGen/AMDGPU/captured-frame-index.ll
@@ -1,24 +1,24 @@
; RUN: llc -mtriple=amdgcn-- -mattr=-promote-alloca -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}store_fi_lifetime:
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4{{$}}
; GCN: buffer_store_dword [[FI]]
-define void @store_fi_lifetime(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @store_fi_lifetime(i32 addrspace(1)* %out, i32 %in) #0 {
entry:
%b = alloca i8
- call void @llvm.lifetime.start(i64 1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %b)
store volatile i8* %b, i8* addrspace(1)* undef
- call void @llvm.lifetime.end(i64 1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %b)
ret void
}
; GCN-LABEL: {{^}}stored_fi_to_lds:
; GCN: s_load_dword [[LDSPTR:s[0-9]+]]
; GCN: buffer_store_dword v{{[0-9]+}}, off,
-; GCN: v_mov_b32_e32 [[ZERO0:v[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 [[ZERO0:v[0-9]+]], 4{{$}}
; GCN: v_mov_b32_e32 [[VLDSPTR:v[0-9]+]], [[LDSPTR]]
; GCN: ds_write_b32 [[VLDSPTR]], [[ZERO0]]
-define void @stored_fi_to_lds(float* addrspace(3)* %ptr) #0 {
+define amdgpu_kernel void @stored_fi_to_lds(float* addrspace(3)* %ptr) #0 {
%tmp = alloca float
store float 4.0, float *%tmp
store float* %tmp, float* addrspace(3)* %ptr
@@ -27,18 +27,18 @@ define void @stored_fi_to_lds(float* addrspace(3)* %ptr) #0 {
; Offset is applied
; GCN-LABEL: {{^}}stored_fi_to_lds_2_small_objects:
-; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
-; GCN-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
+; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 4{{$}}
; GCN-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
+; GCN-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:8{{$}}
; GCN-DAG: s_load_dword [[LDSPTR:s[0-9]+]]
; GCN-DAG: v_mov_b32_e32 [[VLDSPTR:v[0-9]+]], [[LDSPTR]]
; GCN: ds_write_b32 [[VLDSPTR]], [[ZERO]]
-; GCN-DAG: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
+; GCN-DAG: v_mov_b32_e32 [[FI1:v[0-9]+]], 8{{$}}
; GCN: ds_write_b32 [[VLDSPTR]], [[FI1]]
-define void @stored_fi_to_lds_2_small_objects(float* addrspace(3)* %ptr) #0 {
+define amdgpu_kernel void @stored_fi_to_lds_2_small_objects(float* addrspace(3)* %ptr) #0 {
%tmp0 = alloca float
%tmp1 = alloca float
store float 4.0, float* %tmp0
@@ -51,10 +51,10 @@ define void @stored_fi_to_lds_2_small_objects(float* addrspace(3)* %ptr) #0 {
; Same frame index is used multiple times in the store
; GCN-LABEL: {{^}}stored_fi_to_self:
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x4d2{{$}}
-; GCN: buffer_store_dword [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
-; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
-; GCN: buffer_store_dword [[ZERO]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
-define void @stored_fi_to_self() #0 {
+; GCN: buffer_store_dword [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
+; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 4{{$}}
+; GCN: buffer_store_dword [[ZERO]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
+define amdgpu_kernel void @stored_fi_to_self() #0 {
%tmp = alloca i32*
; Avoid optimizing everything out
@@ -66,14 +66,14 @@ define void @stored_fi_to_self() #0 {
; GCN-LABEL: {{^}}stored_fi_to_self_offset:
; GCN-DAG: v_mov_b32_e32 [[K0:v[0-9]+]], 32{{$}}
-; GCN: buffer_store_dword [[K0]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
+; GCN: buffer_store_dword [[K0]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
; GCN-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x4d2{{$}}
-; GCN: buffer_store_dword [[K1]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2048{{$}}
+; GCN: buffer_store_dword [[K1]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2052{{$}}
-; GCN: v_mov_b32_e32 [[OFFSETK:v[0-9]+]], 0x800{{$}}
-; GCN: buffer_store_dword [[OFFSETK]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2048{{$}}
-define void @stored_fi_to_self_offset() #0 {
+; GCN: v_mov_b32_e32 [[OFFSETK:v[0-9]+]], 0x804{{$}}
+; GCN: buffer_store_dword [[OFFSETK]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2052{{$}}
+define amdgpu_kernel void @stored_fi_to_self_offset() #0 {
%tmp0 = alloca [512 x i32]
%tmp1 = alloca i32*
@@ -89,16 +89,16 @@ define void @stored_fi_to_self_offset() #0 {
}
; GCN-LABEL: {{^}}stored_fi_to_fi:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:8{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12{{$}}
-; GCN: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
-; GCN: buffer_store_dword [[FI1]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:8{{$}}
+; GCN: v_mov_b32_e32 [[FI1:v[0-9]+]], 8{{$}}
+; GCN: buffer_store_dword [[FI1]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12{{$}}
-; GCN: v_mov_b32_e32 [[FI2:v[0-9]+]], 8{{$}}
-; GCN: buffer_store_dword [[FI2]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
-define void @stored_fi_to_fi() #0 {
+; GCN: v_mov_b32_e32 [[FI2:v[0-9]+]], 12{{$}}
+; GCN: buffer_store_dword [[FI2]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:8{{$}}
+define amdgpu_kernel void @stored_fi_to_fi() #0 {
%tmp0 = alloca i32*
%tmp1 = alloca i32*
%tmp2 = alloca i32*
@@ -115,10 +115,10 @@ define void @stored_fi_to_fi() #0 {
}
; GCN-LABEL: {{^}}stored_fi_to_global:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
+; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4{{$}}
; GCN: buffer_store_dword [[FI]]
-define void @stored_fi_to_global(float* addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @stored_fi_to_global(float* addrspace(1)* %ptr) #0 {
%tmp = alloca float
store float 0.0, float *%tmp
store float* %tmp, float* addrspace(1)* %ptr
@@ -127,16 +127,16 @@ define void @stored_fi_to_global(float* addrspace(1)* %ptr) #0 {
; Offset is applied
; GCN-LABEL: {{^}}stored_fi_to_global_2_small_objects:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:8{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12{{$}}
-; GCN: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
+; GCN: v_mov_b32_e32 [[FI1:v[0-9]+]], 8{{$}}
; GCN: buffer_store_dword [[FI1]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: v_mov_b32_e32 [[FI2:v[0-9]+]], 8{{$}}
+; GCN-DAG: v_mov_b32_e32 [[FI2:v[0-9]+]], 12{{$}}
; GCN: buffer_store_dword [[FI2]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @stored_fi_to_global_2_small_objects(float* addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @stored_fi_to_global_2_small_objects(float* addrspace(1)* %ptr) #0 {
%tmp0 = alloca float
%tmp1 = alloca float
%tmp2 = alloca float
@@ -150,10 +150,10 @@ define void @stored_fi_to_global_2_small_objects(float* addrspace(1)* %ptr) #0 {
; GCN-LABEL: {{^}}stored_fi_to_global_huge_frame_offset:
; GCN: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
-; GCN: buffer_store_dword [[BASE_0]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
+; GCN: buffer_store_dword [[BASE_0]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4{{$}}
; FIXME: Re-initialize
-; GCN: v_mov_b32_e32 [[BASE_0_1:v[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 [[BASE_0_1:v[0-9]+]], 4{{$}}
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
; GCN-DAG: v_add_i32_e32 [[BASE_1_OFF_1:v[0-9]+]], vcc, 0x3ffc, [[BASE_0_1]]
@@ -163,7 +163,7 @@ define void @stored_fi_to_global_2_small_objects(float* addrspace(1)* %ptr) #0 {
; GCN: buffer_store_dword [[K]], [[BASE_1_OFF_1]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN: buffer_store_dword [[BASE_1_OFF_2]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @stored_fi_to_global_huge_frame_offset(i32* addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @stored_fi_to_global_huge_frame_offset(i32* addrspace(1)* %ptr) #0 {
%tmp0 = alloca [4096 x i32]
%tmp1 = alloca [4096 x i32]
%gep0.tmp0 = getelementptr [4096 x i32], [4096 x i32]* %tmp0, i32 0, i32 0
@@ -184,9 +184,9 @@ define void @stored_fi_to_global_huge_frame_offset(i32* addrspace(1)* %ptr) #0 {
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN: s_add_u32 s{{[0-9]+}}, s[[PC_LO]], g1@gotpcrel32@lo+4
; GCN: s_addc_u32 s{{[0-9]+}}, s[[PC_HI]], g1@gotpcrel32@hi+4
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4{{$}}
; GCN: buffer_store_dword [[FI]]
-define void @cannot_select_assertzext_valuetype(i32 addrspace(1)* %out, i32 %idx) #0 {
+define amdgpu_kernel void @cannot_select_assertzext_valuetype(i32 addrspace(1)* %out, i32 %idx) #0 {
entry:
%b = alloca i32, align 4
%tmp1 = load volatile i32*, i32* addrspace(1)* @g1, align 4
@@ -196,8 +196,8 @@ entry:
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { nounwind }
attributes #1 = { argmemonly nounwind }
diff --git a/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
index 0d919bbf85e3..697f26b83a4d 100644
--- a/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
+++ b/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
@@ -2,12 +2,12 @@
; RUN: llc -march=amdgcn -verify-machineinstrs -O0 < %s
; GCN-LABEL: {{^}}test_loop:
-; GCN: [[LABEL:BB[0-9+]_[0-9]+]]:
+; GCN: [[LABEL:BB[0-9+]_[0-9]+]]: ; %for.body{{$}}
; GCN: ds_read_b32
; GCN: ds_write_b32
; GCN: s_branch [[LABEL]]
; GCN: s_endpgm
-define void @test_loop(float addrspace(3)* %ptr, i32 %n) nounwind {
+define amdgpu_kernel void @test_loop(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
%cmp = icmp eq i32 %n, -1
br i1 %cmp, label %for.exit, label %for.body
@@ -31,7 +31,7 @@ for.body:
; GCN: ds_read_b32
; GCN: ds_write_b32
; GCN: s_branch [[LABEL]]
-define void @loop_const_true(float addrspace(3)* %ptr, i32 %n) nounwind {
+define amdgpu_kernel void @loop_const_true(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
br label %for.body
@@ -52,7 +52,7 @@ for.body:
; GCN-LABEL: {{^}}loop_const_false:
; GCN-NOT: s_branch
; GCN: s_endpgm
-define void @loop_const_false(float addrspace(3)* %ptr, i32 %n) nounwind {
+define amdgpu_kernel void @loop_const_false(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
br label %for.body
@@ -74,7 +74,7 @@ for.body:
; GCN-LABEL: {{^}}loop_const_undef:
; GCN-NOT: s_branch
; GCN: s_endpgm
-define void @loop_const_undef(float addrspace(3)* %ptr, i32 %n) nounwind {
+define amdgpu_kernel void @loop_const_undef(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
br label %for.body
@@ -104,7 +104,7 @@ for.body:
; GCN: s_cbranch_vccnz [[LOOPBB]]
; GCN-NEXT: ; BB#2
; GCN-NEXT: s_endpgm
-define void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind {
+define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind {
entry:
br label %for.body
diff --git a/test/CodeGen/AMDGPU/cf-stack-bug.ll b/test/CodeGen/AMDGPU/cf-stack-bug.ll
index 75b87e486226..53fe89730f3a 100644
--- a/test/CodeGen/AMDGPU/cf-stack-bug.ll
+++ b/test/CodeGen/AMDGPU/cf-stack-bug.ll
@@ -35,7 +35,7 @@
; BUG32-NOT: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
; FUNC-LABEL: {{^}}nested3:
-define void @nested3(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @nested3(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
br i1 %0, label %if.1, label %end
@@ -68,7 +68,7 @@ end:
; BUG32-NOT: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
; FUNC-LABEL: {{^}}nested4:
-define void @nested4(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @nested4(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
br i1 %0, label %if.1, label %end
@@ -109,7 +109,7 @@ end:
; BUG32-NOT: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
; FUNC-LABEL: {{^}}nested7:
-define void @nested7(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @nested7(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
br i1 %0, label %if.1, label %end
@@ -174,7 +174,7 @@ end:
; BUG32: Applying bug work-around
; NOBUG-NOT: Applying bug work-around
; FUNC-LABEL: {{^}}nested8:
-define void @nested8(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @nested8(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp sgt i32 %cond, 0
br i1 %0, label %if.1, label %end
diff --git a/test/CodeGen/AMDGPU/cf_end.ll b/test/CodeGen/AMDGPU/cf_end.ll
index c74ee22868d5..3c990e0a4bd6 100644
--- a/test/CodeGen/AMDGPU/cf_end.ll
+++ b/test/CodeGen/AMDGPU/cf_end.ll
@@ -4,6 +4,6 @@
; EG: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x80]
; CM: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x88]
-define void @eop() {
+define amdgpu_kernel void @eop() {
ret void
}
diff --git a/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll b/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
index 6db9a0761a01..cbdcf6aeaf42 100644
--- a/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
+++ b/test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll
@@ -11,7 +11,7 @@
; GCN-LABEL: {{^}}test_no_sink_flat_small_offset_i32:
; GCN: flat_load_dword
; GCN: {{^}}BB0_2:
-define void @test_no_sink_flat_small_offset_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
+define amdgpu_kernel void @test_no_sink_flat_small_offset_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
entry:
%out.gep = getelementptr i32, i32 addrspace(4)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(4)* %in, i64 7
@@ -36,14 +36,14 @@ done:
; OPT-CI-NOT: getelementptr
; OPT: br i1
-; OPT-CI: ptrtoint
-; OPT-CI: add
-; OPT-CI: inttoptr
+; OPT-CI: addrspacecast
+; OPT-CI: getelementptr
+; OPT-CI: bitcast
; OPT: br label
; GCN-LABEL: {{^}}test_sink_noop_addrspacecast_flat_to_global_i32:
; CI: buffer_load_dword {{v[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:28
-define void @test_sink_noop_addrspacecast_flat_to_global_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
+define amdgpu_kernel void @test_sink_noop_addrspacecast_flat_to_global_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
entry:
%out.gep = getelementptr i32, i32 addrspace(4)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(4)* %in, i64 7
@@ -69,14 +69,14 @@ done:
; OPT-CI-NOT: getelementptr
; OPT: br i1
-; OPT-CI: ptrtoint
-; OPT-CI: add
-; OPT-CI: inttoptr
+; OPT-CI: addrspacecast
+; OPT-CI: getelementptr
+; OPT-CI: bitcast
; OPT: br label
; GCN-LABEL: {{^}}test_sink_noop_addrspacecast_flat_to_constant_i32:
; CI: s_load_dword {{s[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
-define void @test_sink_noop_addrspacecast_flat_to_constant_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
+define amdgpu_kernel void @test_sink_noop_addrspacecast_flat_to_constant_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %in, i32 %cond) {
entry:
%out.gep = getelementptr i32, i32 addrspace(4)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(4)* %in, i64 7
diff --git a/test/CodeGen/AMDGPU/cgp-addressing-modes.ll b/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
index 2ed2857ff340..c1cf56e5058e 100644
--- a/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
+++ b/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
@@ -5,15 +5,17 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
; OPT-LABEL: @test_sink_global_small_offset_i32(
; OPT-CI-NOT: getelementptr i32, i32 addrspace(1)* %in
; OPT-VI: getelementptr i32, i32 addrspace(1)* %in
; OPT: br i1
-; OPT-CI: ptrtoint
+; OPT-CI: getelementptr i8,
; GCN-LABEL: {{^}}test_sink_global_small_offset_i32:
; GCN: {{^}}BB0_2:
-define void @test_sink_global_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_sink_global_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(1)* %in, i64 7
@@ -43,7 +45,7 @@ done:
; GCN: buffer_load_sbyte {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
; GCN: {{^}}BB1_2:
; GCN: s_or_b64 exec
-define void @test_sink_global_small_max_i32_ds_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @test_sink_global_small_max_i32_ds_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 99999
%in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 65535
@@ -70,7 +72,7 @@ done:
; GCN: buffer_load_sbyte {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:4095{{$}}
; GCN: {{^}}BB2_2:
; GCN: s_or_b64 exec
-define void @test_sink_global_small_max_mubuf_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @test_sink_global_small_max_mubuf_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 1024
%in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 4095
@@ -97,7 +99,7 @@ done:
; GCN: buffer_load_sbyte {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
; GCN: {{^}}BB3_2:
; GCN: s_or_b64 exec
-define void @test_sink_global_small_max_plus_1_mubuf_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @test_sink_global_small_max_plus_1_mubuf_offset(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 99999
%in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 4096
@@ -122,14 +124,55 @@ done:
; OPT-LABEL: @test_sink_scratch_small_offset_i32(
; OPT-NOT: getelementptr [512 x i32]
; OPT: br i1
-; OPT: ptrtoint
+; OPT: getelementptr i8,
; GCN-LABEL: {{^}}test_sink_scratch_small_offset_i32:
; GCN: s_and_saveexec_b64
; GCN: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:4092{{$}}
; GCN: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:4092{{$}}
; GCN: {{^}}BB4_2:
-define void @test_sink_scratch_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %arg) {
+define amdgpu_kernel void @test_sink_scratch_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %arg) {
+entry:
+ %alloca = alloca [512 x i32], align 4
+ %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i64 999998
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i64 999999
+ %add.arg = add i32 %arg, 8
+ %alloca.gep = getelementptr [512 x i32], [512 x i32]* %alloca, i32 0, i32 1022
+ %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %tmp0 = icmp eq i32 %tid, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ store volatile i32 123, i32* %alloca.gep
+ %tmp1 = load volatile i32, i32* %alloca.gep
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(1)* %out.gep.0
+ %load = load volatile i32, i32* %alloca.gep
+ store i32 %load, i32 addrspace(1)* %out.gep.1
+ br label %done
+
+done:
+ ret void
+}
+
+; This ends up not fitting due to the reserved 4 bytes at offset 0
+; OPT-LABEL: @test_sink_scratch_small_offset_i32_reserved(
+; OPT-NOT: getelementptr [512 x i32]
+; OPT: br i1
+; OPT: getelementptr i8,
+
+; GCN-LABEL: {{^}}test_sink_scratch_small_offset_i32_reserved:
+; GCN: s_and_saveexec_b64
+; GCN: v_mov_b32_e32 [[BASE_FI0:v[0-9]+]], 4
+; GCN: buffer_store_dword {{v[0-9]+}}, [[BASE_FI0]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:4092{{$}}
+; GCN: v_mov_b32_e32 [[BASE_FI1:v[0-9]+]], 4
+; GCN: buffer_load_dword {{v[0-9]+}}, [[BASE_FI1]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:4092{{$}}
+; GCN: {{^BB[0-9]+}}_2:
+
+define amdgpu_kernel void @test_sink_scratch_small_offset_i32_reserved(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %arg) {
entry:
%alloca = alloca [512 x i32], align 4
%out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i64 999998
@@ -165,8 +208,8 @@ done:
; GCN: s_and_saveexec_b64
; GCN: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-; GCN: {{^}}BB5_2:
-define void @test_no_sink_scratch_large_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %arg) {
+; GCN: {{^BB[0-9]+}}_2:
+define amdgpu_kernel void @test_no_sink_scratch_large_offset_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %arg) {
entry:
%alloca = alloca [512 x i32], align 4
%out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i64 999998
@@ -197,8 +240,8 @@ done:
; GCN: s_and_saveexec_b64
; CI: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; VI: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-; GCN: {{^}}BB6_2:
-define void @test_sink_global_vreg_sreg_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
+; GCN: {{^BB[0-9]+}}_2:
+define amdgpu_kernel void @test_sink_global_vreg_sreg_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
entry:
%offset.ext = zext i32 %offset to i64
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
@@ -228,7 +271,7 @@ done:
; GCN: s_and_saveexec_b64
; SI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0x7{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_small_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 7
@@ -257,7 +300,7 @@ done:
; GCN: s_and_saveexec_b64
; SI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0xff{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_max_8_bit_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_max_8_bit_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 255
@@ -290,7 +333,7 @@ done:
; SI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[OFFSET]]{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_max_8_bit_offset_p1_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_max_8_bit_offset_p1_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 256
@@ -322,7 +365,7 @@ done:
; GCN: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, 3{{$}}
; SI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_max_32_bit_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_max_32_bit_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 4294967295
@@ -353,7 +396,7 @@ done:
; GCN: s_addc_u32
; SI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_max_32_bit_offset_p1_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_max_32_bit_offset_p1_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 17179869181
@@ -383,7 +426,7 @@ done:
; VI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0xffffc{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_max_20_bit_byte_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_max_20_bit_byte_offset_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 262143
@@ -421,7 +464,7 @@ done:
; VI: s_load_dword s{{[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[OFFSET]]{{$}}
; GCN: s_or_b64 exec, exec
-define void @test_sink_constant_max_20_bit_byte_offset_p1_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @test_sink_constant_max_20_bit_byte_offset_p1_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999
%in.gep = getelementptr i32, i32 addrspace(2)* %in, i64 262144
@@ -445,13 +488,13 @@ done:
%struct.foo = type { [3 x float], [3 x float] }
; OPT-LABEL: @sink_ds_address(
-; OPT: ptrtoint %struct.foo addrspace(3)* %ptr to i64
+; OPT: getelementptr i8,
; GCN-LABEL: {{^}}sink_ds_address:
; GCN: s_load_dword [[SREG1:s[0-9]+]],
; GCN: v_mov_b32_e32 [[VREG1:v[0-9]+]], [[SREG1]]
; GCN-DAG: ds_read2_b32 v[{{[0-9+:[0-9]+}}], [[VREG1]] offset0:3 offset1:5
-define void @sink_ds_address(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
+define amdgpu_kernel void @sink_ds_address(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
entry:
%x = getelementptr inbounds %struct.foo, %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 0
%y = getelementptr inbounds %struct.foo, %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 2
@@ -476,9 +519,8 @@ bb34:
; OPT-LABEL: @test_sink_constant_small_max_mubuf_offset_load_i32_align_1(
; OPT: br i1 %tmp0,
; OPT: if:
-; OPT: %sunkaddr = ptrtoint i8 addrspace(2)* %in to i64
-; OPT: %sunkaddr1 = add i64 %sunkaddr, 4095
-define void @test_sink_constant_small_max_mubuf_offset_load_i32_align_1(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
+; OPT: getelementptr i8, {{.*}} 4095
+define amdgpu_kernel void @test_sink_constant_small_max_mubuf_offset_load_i32_align_1(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 1024
%in.gep = getelementptr i8, i8 addrspace(2)* %in, i64 4095
@@ -500,7 +542,141 @@ done:
ret void
}
+; OPT-LABEL: @test_sink_local_small_offset_atomicrmw_i32(
+; OPT: %0 = bitcast i32 addrspace(3)* %in to i8 addrspace(3)*
+; OPT: %sunkaddr = getelementptr i8, i8 addrspace(3)* %0, i32 28
+; OPT: %1 = bitcast i8 addrspace(3)* %sunkaddr to i32 addrspace(3)*
+; OPT: %tmp1 = atomicrmw add i32 addrspace(3)* %1, i32 2 seq_cst
+define amdgpu_kernel void @test_sink_local_small_offset_atomicrmw_i32(i32 addrspace(3)* %out, i32 addrspace(3)* %in) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(3)* %out, i32 999999
+ %in.gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
+ %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %tmp0 = icmp eq i32 %tid, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = atomicrmw add i32 addrspace(3)* %in.gep, i32 2 seq_cst
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(3)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_sink_local_small_offset_cmpxchg_i32(
+; OPT: %0 = bitcast i32 addrspace(3)* %in to i8 addrspace(3)*
+; OPT: %sunkaddr = getelementptr i8, i8 addrspace(3)* %0, i32 28
+; OPT: %1 = bitcast i8 addrspace(3)* %sunkaddr to i32 addrspace(3)*
+; OPT: %tmp1.struct = cmpxchg i32 addrspace(3)* %1, i32 undef, i32 2 seq_cst monotonic
+define amdgpu_kernel void @test_sink_local_small_offset_cmpxchg_i32(i32 addrspace(3)* %out, i32 addrspace(3)* %in) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(3)* %out, i32 999999
+ %in.gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
+ %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %tmp0 = icmp eq i32 %tid, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1.struct = cmpxchg i32 addrspace(3)* %in.gep, i32 undef, i32 2 seq_cst monotonic
+ %tmp1 = extractvalue { i32, i1 } %tmp1.struct, 0
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(3)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_wrong_operand_local_small_offset_cmpxchg_i32(
+; OPT: %in.gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
+; OPT: br i1
+; OPT: cmpxchg i32 addrspace(3)* addrspace(3)* undef, i32 addrspace(3)* %in.gep, i32 addrspace(3)* undef seq_cst monotonic
+define amdgpu_kernel void @test_wrong_operand_local_small_offset_cmpxchg_i32(i32 addrspace(3)* addrspace(3)* %out, i32 addrspace(3)* %in) {
+entry:
+ %out.gep = getelementptr i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* %out, i32 999999
+ %in.gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
+ %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %tmp0 = icmp eq i32 %tid, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1.struct = cmpxchg i32 addrspace(3)* addrspace(3)* undef, i32 addrspace(3)* %in.gep, i32 addrspace(3)* undef seq_cst monotonic
+ %tmp1 = extractvalue { i32 addrspace(3)*, i1 } %tmp1.struct, 0
+ br label %endif
+
+endif:
+ %x = phi i32 addrspace(3)* [ %tmp1, %if ], [ null, %entry ]
+ store i32 addrspace(3)* %x, i32 addrspace(3)* addrspace(3)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_sink_local_small_offset_atomic_inc_i32(
+; OPT: %0 = bitcast i32 addrspace(3)* %in to i8 addrspace(3)*
+; OPT: %sunkaddr = getelementptr i8, i8 addrspace(3)* %0, i32 28
+; OPT: %1 = bitcast i8 addrspace(3)* %sunkaddr to i32 addrspace(3)*
+; OPT: %tmp1 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %1, i32 2, i32 0, i32 0, i1 false)
+define amdgpu_kernel void @test_sink_local_small_offset_atomic_inc_i32(i32 addrspace(3)* %out, i32 addrspace(3)* %in) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(3)* %out, i32 999999
+ %in.gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
+ %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %tmp0 = icmp eq i32 %tid, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %in.gep, i32 2, i32 0, i32 0, i1 false)
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(3)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
+; OPT-LABEL: @test_sink_local_small_offset_atomic_dec_i32(
+; OPT: %0 = bitcast i32 addrspace(3)* %in to i8 addrspace(3)*
+; OPT: %sunkaddr = getelementptr i8, i8 addrspace(3)* %0, i32 28
+; OPT: %1 = bitcast i8 addrspace(3)* %sunkaddr to i32 addrspace(3)*
+; OPT: %tmp1 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %1, i32 2, i32 0, i32 0, i1 false)
+define amdgpu_kernel void @test_sink_local_small_offset_atomic_dec_i32(i32 addrspace(3)* %out, i32 addrspace(3)* %in) {
+entry:
+ %out.gep = getelementptr i32, i32 addrspace(3)* %out, i32 999999
+ %in.gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
+ %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %tmp0 = icmp eq i32 %tid, 0
+ br i1 %tmp0, label %endif, label %if
+
+if:
+ %tmp1 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %in.gep, i32 2, i32 0, i32 0, i1 false)
+ br label %endif
+
+endif:
+ %x = phi i32 [ %tmp1, %if ], [ 0, %entry ]
+ store i32 %x, i32 addrspace(3)* %out.gep
+ br label %done
+
+done:
+ ret void
+}
+
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #2
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
+attributes #2 = { nounwind argmemonly }
diff --git a/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
index 066ef951cc31..53adf09026ec 100644
--- a/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
+++ b/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
@@ -36,7 +36,7 @@
; GCN: BB0_3:
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @sink_ubfe_i32(i32 addrspace(1)* %out, i32 %arg1) #0 {
+define amdgpu_kernel void @sink_ubfe_i32(i32 addrspace(1)* %out, i32 %arg1) #0 {
entry:
%shr = lshr i32 %arg1, 8
br i1 undef, label %bb0, label %bb1
@@ -76,7 +76,7 @@ ret:
; OPT: ret
; GCN-LABEL: {{^}}sink_sbfe_i32:
-define void @sink_sbfe_i32(i32 addrspace(1)* %out, i32 %arg1) #0 {
+define amdgpu_kernel void @sink_sbfe_i32(i32 addrspace(1)* %out, i32 %arg1) #0 {
entry:
%shr = ashr i32 %arg1, 8
br i1 undef, label %bb0, label %bb1
@@ -120,20 +120,21 @@ ret:
; GCN-LABEL: {{^}}sink_ubfe_i16:
; GCN-NOT: lshr
-; VI: s_bfe_u32 s0, s0, 0xc0004
+; VI: s_load_dword [[ARG:s[0-9]+]], s[0:1], 0x2c
+; VI: s_bfe_u32 [[BFE:s[0-9]+]], [[ARG]], 0xc0004
; GCN: s_cbranch_scc1
; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004
-; VI: s_and_b32 s0, s0, 0xff
+; VI: s_and_b32 s{{[0-9]+}}, [[BFE]], 0xff
; GCN: BB2_2:
; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70004
-; VI: s_and_b32 s0, s0, 0x7f
+; VI: s_and_b32 s{{[0-9]+}}, [[BFE]], 0x7f
; GCN: BB2_3:
; GCN: buffer_store_short
; GCN: s_endpgm
-define void @sink_ubfe_i16(i16 addrspace(1)* %out, i16 %arg1) #0 {
+define amdgpu_kernel void @sink_ubfe_i16(i16 addrspace(1)* %out, i16 %arg1) #0 {
entry:
%shr = lshr i16 %arg1, 4
br i1 undef, label %bb0, label %bb1
@@ -186,7 +187,7 @@ ret:
; GCN: BB3_3:
; GCN: buffer_store_dwordx2
-define void @sink_ubfe_i64_span_midpoint(i64 addrspace(1)* %out, i64 %arg1) #0 {
+define amdgpu_kernel void @sink_ubfe_i64_span_midpoint(i64 addrspace(1)* %out, i64 %arg1) #0 {
entry:
%shr = lshr i64 %arg1, 30
br i1 undef, label %bb0, label %bb1
@@ -235,7 +236,7 @@ ret:
; GCN: BB4_3:
; GCN: buffer_store_dwordx2
-define void @sink_ubfe_i64_low32(i64 addrspace(1)* %out, i64 %arg1) #0 {
+define amdgpu_kernel void @sink_ubfe_i64_low32(i64 addrspace(1)* %out, i64 %arg1) #0 {
entry:
%shr = lshr i64 %arg1, 15
br i1 undef, label %bb0, label %bb1
@@ -282,7 +283,7 @@ ret:
; GCN: BB5_3:
; GCN: buffer_store_dwordx2
-define void @sink_ubfe_i64_high32(i64 addrspace(1)* %out, i64 %arg1) #0 {
+define amdgpu_kernel void @sink_ubfe_i64_high32(i64 addrspace(1)* %out, i64 %arg1) #0 {
entry:
%shr = lshr i64 %arg1, 35
br i1 undef, label %bb0, label %bb1
diff --git a/test/CodeGen/AMDGPU/clamp-modifier.ll b/test/CodeGen/AMDGPU/clamp-modifier.ll
new file mode 100644
index 000000000000..c3a7d5e14d87
--- /dev/null
+++ b/test/CodeGen/AMDGPU/clamp-modifier.ll
@@ -0,0 +1,222 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}v_clamp_add_src_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN-NOT: [[A]]
+; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}}
+define amdgpu_kernel void @v_clamp_add_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_multi_use_src_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[ADD]], [[ADD]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_multi_use_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* %out.gep
+ store volatile float %add, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_dbg_use_src_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN-NOT: [[A]]
+; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}}
+define amdgpu_kernel void @v_clamp_dbg_use_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ call void @llvm.dbg.value(metadata float %add, i64 0, metadata !4, metadata !9), !dbg !10
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_add_neg_src_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_floor_f32_e32 [[FLOOR:v[0-9]+]], [[A]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, -[[FLOOR]], -[[FLOOR]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_add_neg_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %floor = call float @llvm.floor.f32(float %a)
+ %neg.floor = fsub float -0.0, %floor
+ %max = call float @llvm.maxnum.f32(float %neg.floor, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_non_clamp_max_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
+; GCN: v_max_f32_e32 v{{[0-9]+}}, 0, [[ADD]]{{$}}
+define amdgpu_kernel void @v_non_clamp_max_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ store float %max, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_add_src_f32_denormals:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], [[A]], 1.0 clamp{{$}}
+define amdgpu_kernel void @v_clamp_add_src_f32_denormals(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_add_src_f16_denorm:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; VI: v_add_f16_e64 [[ADD:v[0-9]+]], [[A]], 1.0 clamp{{$}}
+
+; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[A]]
+; SI: v_add_f32_e64 [[ADD:v[0-9]+]], [[CVT]], 1.0 clamp{{$}}
+; SI: v_cvt_f16_f32_e32 v{{[0-9]+}}, [[ADD]]
+define amdgpu_kernel void @v_clamp_add_src_f16_denorm(half addrspace(1)* %out, half addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load half, half addrspace(1)* %gep0
+ %add = fadd half %a, 1.0
+ %max = call half @llvm.maxnum.f16(half %add, half 0.0)
+ %clamp = call half @llvm.minnum.f16(half %max, half 1.0)
+ store half %clamp, half addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_add_src_f16_no_denormals:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; VI-NOT: [[A]]
+; VI: v_add_f16_e64 v{{[0-9]+}}, [[A]], 1.0 clamp{{$}}
+
+; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[A]]
+; SI: v_add_f32_e64 [[ADD:v[0-9]+]], [[CVT]], 1.0 clamp{{$}}
+; SI: v_cvt_f16_f32_e32 v{{[0-9]+}}, [[ADD]]
+define amdgpu_kernel void @v_clamp_add_src_f16_no_denormals(half addrspace(1)* %out, half addrspace(1)* %aptr) #3 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load half, half addrspace(1)* %gep0
+ %add = fadd half %a, 1.0
+ %max = call half @llvm.maxnum.f16(half %add, half 0.0)
+ %clamp = call half @llvm.minnum.f16(half %max, half 1.0)
+ store half %clamp, half addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_add_src_v2f32:
+; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[A:[0-9]+]]:[[B:[0-9]+]]{{\]}}
+; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, v[[A]], 1.0 clamp{{$}}
+; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, v[[B]], 1.0 clamp{{$}}
+define amdgpu_kernel void @v_clamp_add_src_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr <2 x float>, <2 x float> addrspace(1)* %out, i32 %tid
+ %a = load <2 x float>, <2 x float> addrspace(1)* %gep0
+ %add = fadd <2 x float> %a, <float 1.0, float 1.0>
+ %max = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %add, <2 x float> zeroinitializer)
+ %clamp = call <2 x float> @llvm.minnum.v2f32(<2 x float> %max, <2 x float> <float 1.0, float 1.0>)
+ store <2 x float> %clamp, <2 x float> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_add_src_f64:
+; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
+; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, [[A]], 1.0 clamp{{$}}
+define amdgpu_kernel void @v_clamp_add_src_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %a = load double, double addrspace(1)* %gep0
+ %add = fadd double %a, 1.0
+ %max = call double @llvm.maxnum.f64(double %add, double 0.0)
+ %clamp = call double @llvm.minnum.f64(double %max, double 1.0)
+ store double %clamp, double addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_mac_to_mad:
+; GCN: v_mad_f32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]}} clamp{{$}}
+define amdgpu_kernel void @v_clamp_mac_to_mad(float addrspace(1)* %out, float addrspace(1)* %aptr, float %a) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %b = load float, float addrspace(1)* %gep0
+
+ %mul = fmul float %a, %a
+ %add = fadd float %mul, %b
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ %res = fadd float %clamp, %b
+ store float %res, float addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.floor.f32(float) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1
+declare double @llvm.fabs.f64(double) #1
+declare double @llvm.minnum.f64(double, double) #1
+declare double @llvm.maxnum.f64(double, double) #1
+declare half @llvm.fabs.f16(half) #1
+declare half @llvm.minnum.f16(half, half) #1
+declare half @llvm.maxnum.f16(half, half) #1
+declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) #1
+declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) #1
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind "target-features"="+fp32-denormals" }
+attributes #3 = { nounwind "target-features"="-fp64-fp16-denormals" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug)
+!1 = !DIFile(filename: "/tmp/foo.cl", directory: "/dev/null")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !DILocalVariable(name: "add", arg: 1, scope: !5, file: !1, line: 1)
+!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{null, !8}
+!8 = !DIBasicType(name: "float", size: 32, align: 32)
+!9 = !DIExpression()
+!10 = !DILocation(line: 1, column: 42, scope: !5)
diff --git a/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
new file mode 100644
index 000000000000..fbfd0fbf9308
--- /dev/null
+++ b/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
@@ -0,0 +1,424 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s
+--- |
+ define amdgpu_ps void @v_max_self_clamp_not_set_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_ps void @v_clamp_omod_already_set_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_ps void @v_omod_mul_omod_already_set_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_ps void @v_omod_mul_clamp_already_set_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_ps void @v_omod_add_omod_already_set_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_ps void @v_omod_add_clamp_already_set_f32() #0 {
+ ret void
+ }
+
+ attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
+
+...
+---
+# GCN-LABEL: name: v_max_self_clamp_not_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec
+
+name: v_max_self_clamp_not_set_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: sgpr_128 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vreg_64 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vreg_64 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vreg_64 }
+ - { id: 26, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %25 = REG_SEQUENCE %3, 1, %24, 2
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+ %13 = REG_SEQUENCE killed %5, 17, %12, 18
+ %14 = S_MOV_B32 2
+ %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+ %16 = REG_SEQUENCE killed %4, 17, %12, 18
+ %18 = COPY %26
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+ %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+ %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+---
+# GCN-LABEL: name: v_clamp_omod_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN: %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec
+name: v_clamp_omod_already_set_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: sgpr_128 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vreg_64 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vreg_64 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vreg_64 }
+ - { id: 26, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %25 = REG_SEQUENCE %3, 1, %24, 2
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+ %13 = REG_SEQUENCE killed %5, 17, %12, 18
+ %14 = S_MOV_B32 2
+ %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+ %16 = REG_SEQUENCE killed %4, 17, %12, 18
+ %18 = COPY %26
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+ %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+ %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec
+ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+...
+---
+# Don't fold a mul that looks like an omod if itself has omod set
+
+# GCN-LABEL: name: v_omod_mul_omod_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
+name: v_omod_mul_omod_already_set_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: sgpr_128 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vreg_64 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vreg_64 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vreg_64 }
+ - { id: 26, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %25 = REG_SEQUENCE %3, 1, %24, 2
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+ %13 = REG_SEQUENCE killed %5, 17, %12, 18
+ %14 = S_MOV_B32 2
+ %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+ %16 = REG_SEQUENCE killed %4, 17, %12, 18
+ %18 = COPY %26
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+ %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+ %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
+ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+---
+# Don't fold a mul that looks like an omod if itself has clamp set
+# This might be OK, but would require folding the clamp at the same time.
+# GCN-LABEL: name: v_omod_mul_clamp_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
+
+name: v_omod_mul_clamp_already_set_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: sgpr_128 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vreg_64 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vreg_64 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vreg_64 }
+ - { id: 26, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %25 = REG_SEQUENCE %3, 1, %24, 2
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+ %13 = REG_SEQUENCE killed %5, 17, %12, 18
+ %14 = S_MOV_B32 2
+ %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+ %16 = REG_SEQUENCE killed %4, 17, %12, 18
+ %18 = COPY %26
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+ %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+ %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
+ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+
+
+
+
+
+
+
+
+
+
+
+
+
+---
+# Don't fold a mul that looks like an omod if itself has omod set
+
+# GCN-LABEL: name: v_omod_add_omod_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
+name: v_omod_add_omod_already_set_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: sgpr_128 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vreg_64 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vreg_64 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vreg_64 }
+ - { id: 26, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %25 = REG_SEQUENCE %3, 1, %24, 2
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+ %13 = REG_SEQUENCE killed %5, 17, %12, 18
+ %14 = S_MOV_B32 2
+ %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+ %16 = REG_SEQUENCE killed %4, 17, %12, 18
+ %18 = COPY %26
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+ %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+ %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
+ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+---
+# Don't fold a mul that looks like an omod if itself has clamp set
+# This might be OK, but would require folding the clamp at the same time.
+# GCN-LABEL: name: v_omod_add_clamp_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
+
+name: v_omod_add_clamp_already_set_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: sgpr_128 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vreg_64 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vreg_64 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vreg_64 }
+ - { id: 26, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %25 = REG_SEQUENCE %3, 1, %24, 2
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+ %13 = REG_SEQUENCE killed %5, 17, %12, 18
+ %14 = S_MOV_B32 2
+ %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+ %16 = REG_SEQUENCE killed %4, 17, %12, 18
+ %18 = COPY %26
+ %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+ %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+ %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
+ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/clamp.ll b/test/CodeGen/AMDGPU/clamp.ll
new file mode 100644
index 000000000000..9735c7074be2
--- /dev/null
+++ b/test/CodeGen/AMDGPU/clamp.ll
@@ -0,0 +1,529 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}v_clamp_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_neg_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, -[[A]], -[[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_neg_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %fneg.a = fsub float -0.0, %a
+ %max = call float @llvm.maxnum.f32(float %fneg.a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_negabs_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, -|[[A]]|, -|[[A]]| clamp{{$}}
+define amdgpu_kernel void @v_clamp_negabs_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %fabs.a = call float @llvm.fabs.f32(float %a)
+ %fneg.fabs.a = fsub float -0.0, %fabs.a
+
+ %max = call float @llvm.maxnum.f32(float %fneg.fabs.a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_negzero_f32:
+; GCN-DAG: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN-DAG: v_bfrev_b32_e32 [[SIGNBIT:v[0-9]+]], 1
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[SIGNBIT]], 1.0
+define amdgpu_kernel void @v_clamp_negzero_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %max = call float @llvm.maxnum.f32(float %a, float -0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_multi_use_max_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], 0, [[A]]
+; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], 1.0, [[MAX]]
+define amdgpu_kernel void @v_clamp_multi_use_max_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ store volatile float %max, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_f16:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; VI: v_max_f16_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+
+; SI: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], [[A]] clamp{{$}}
+; SI: v_cvt_f16_f32_e32 v{{[0-9]+}}, [[CVT]]
+define amdgpu_kernel void @v_clamp_f16(half addrspace(1)* %out, half addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load half, half addrspace(1)* %gep0
+ %max = call half @llvm.maxnum.f16(half %a, half 0.0)
+ %med = call half @llvm.minnum.f16(half %max, half 1.0)
+
+ store half %med, half addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_neg_f16:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; VI: v_max_f16_e64 v{{[0-9]+}}, -[[A]], -[[A]] clamp{{$}}
+
+; FIXME: Better to fold neg into max
+; SI: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], -[[A]] clamp{{$}}
+; SI: v_cvt_f16_f32_e32 v{{[0-9]+}}, [[CVT]]
+define amdgpu_kernel void @v_clamp_neg_f16(half addrspace(1)* %out, half addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load half, half addrspace(1)* %gep0
+ %fneg.a = fsub half -0.0, %a
+ %max = call half @llvm.maxnum.f16(half %fneg.a, half 0.0)
+ %med = call half @llvm.minnum.f16(half %max, half 1.0)
+
+ store half %med, half addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_negabs_f16:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; VI: v_max_f16_e64 v{{[0-9]+}}, -|[[A]]|, -|[[A]]| clamp{{$}}
+
+; FIXME: Better to fold neg/abs into max
+
+; SI: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], -|[[A]]| clamp{{$}}
+; SI: v_cvt_f16_f32_e32 v{{[0-9]+}}, [[CVT]]
+define amdgpu_kernel void @v_clamp_negabs_f16(half addrspace(1)* %out, half addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load half, half addrspace(1)* %gep0
+ %fabs.a = call half @llvm.fabs.f16(half %a)
+ %fneg.fabs.a = fsub half -0.0, %fabs.a
+
+ %max = call half @llvm.maxnum.f16(half %fneg.fabs.a, half 0.0)
+ %med = call half @llvm.minnum.f16(half %max, half 1.0)
+
+ store half %med, half addrspace(1)* %out.gep
+ ret void
+}
+
+; FIXME: Do f64 instructions support clamp?
+; GCN-LABEL: {{^}}v_clamp_f64:
+; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
+; GCN: v_max_f64 v{{\[[0-9]+:[0-9]+\]}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %a = load double, double addrspace(1)* %gep0
+ %max = call double @llvm.maxnum.f64(double %a, double 0.0)
+ %med = call double @llvm.minnum.f64(double %max, double 1.0)
+
+ store double %med, double addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_neg_f64:
+; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
+; GCN: v_max_f64 v{{\[[0-9]+:[0-9]+\]}}, -[[A]], -[[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_neg_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %a = load double, double addrspace(1)* %gep0
+ %fneg.a = fsub double -0.0, %a
+ %max = call double @llvm.maxnum.f64(double %fneg.a, double 0.0)
+ %med = call double @llvm.minnum.f64(double %max, double 1.0)
+
+ store double %med, double addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_negabs_f64:
+; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
+; GCN: v_max_f64 v{{\[[0-9]+:[0-9]+\]}}, -|[[A]]|, -|[[A]]| clamp{{$}}
+define amdgpu_kernel void @v_clamp_negabs_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %a = load double, double addrspace(1)* %gep0
+ %fabs.a = call double @llvm.fabs.f64(double %a)
+ %fneg.fabs.a = fsub double -0.0, %fabs.a
+
+ %max = call double @llvm.maxnum.f64(double %fneg.fabs.a, double 0.0)
+ %med = call double @llvm.minnum.f64(double %max, double 1.0)
+
+ store double %med, double addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_aby_negzero_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32
+define amdgpu_kernel void @v_clamp_med3_aby_negzero_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float -0.0, float 1.0, float %a)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_aby_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_aby_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float %a)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_bay_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_bay_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 1.0, float 0.0, float %a)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_yab_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_yab_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float %a, float 0.0, float 1.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_yba_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_yba_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float %a, float 1.0, float 0.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_ayb_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_ayb_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %a, float 1.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_bya_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_bya_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 1.0, float %a, float 0.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constants_to_one_f32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 1.0
+define amdgpu_kernel void @v_clamp_constants_to_one_f32(float addrspace(1)* %out) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float 4.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constants_to_zero_f32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+define amdgpu_kernel void @v_clamp_constants_to_zero_f32(float addrspace(1)* %out) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float -4.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constant_preserve_f32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0.5
+define amdgpu_kernel void @v_clamp_constant_preserve_f32(float addrspace(1)* %out) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float 0.5)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constant_preserve_denorm_f32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0x7fffff{{$}}
+define amdgpu_kernel void @v_clamp_constant_preserve_denorm_f32(float addrspace(1)* %out) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float bitcast (i32 8388607 to float))
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constant_qnan_f32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+define amdgpu_kernel void @v_clamp_constant_qnan_f32(float addrspace(1)* %out) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float 0x7FF8000000000000)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constant_snan_f32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+define amdgpu_kernel void @v_clamp_constant_snan_f32(float addrspace(1)* %out) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float bitcast (i32 2139095041 to float))
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; ---------------------------------------------------------------------
+; Test non-default behaviors enabling snans and disabling dx10_clamp
+; ---------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_clamp_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], 0, 1.0
+define amdgpu_kernel void @v_clamp_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_f32_snan_dx10clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_f32_snan_dx10clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #3 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_f32_snan_no_dx10clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], 0, [[A]]
+; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], 1.0, [[MAX]]
+define amdgpu_kernel void @v_clamp_f32_snan_no_dx10clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #4 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_f32_snan_no_dx10clamp_nnan_src:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], 0, 1.0
+define amdgpu_kernel void @v_clamp_f32_snan_no_dx10clamp_nnan_src(float addrspace(1)* %out, float addrspace(1)* %aptr) #4 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd nnan float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %med = call float @llvm.minnum.f32(float %max, float 1.0)
+
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_aby_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_aby_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float %a)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_bay_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}}
+define amdgpu_kernel void @v_clamp_med3_bay_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 1.0, float 0.0, float %a)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_yab_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], 0, 1.0
+define amdgpu_kernel void @v_clamp_med3_yab_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float %a, float 0.0, float 1.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_yba_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], 1.0, 0
+define amdgpu_kernel void @v_clamp_med3_yba_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float %a, float 1.0, float 0.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_ayb_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, 0, [[A]], 1.0
+define amdgpu_kernel void @v_clamp_med3_ayb_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %a, float 1.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_med3_bya_f32_no_dx10_clamp:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, 1.0, [[A]], 0
+define amdgpu_kernel void @v_clamp_med3_bya_f32_no_dx10_clamp(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %med = call float @llvm.amdgcn.fmed3.f32(float 1.0, float %a, float 0.0)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constant_qnan_f32_no_dx10_clamp:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0x7fc00000
+define amdgpu_kernel void @v_clamp_constant_qnan_f32_no_dx10_clamp(float addrspace(1)* %out) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float 0x7FF8000000000000)
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_clamp_constant_snan_f32_no_dx10_clamp:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0x7f800001
+define amdgpu_kernel void @v_clamp_constant_snan_f32_no_dx10_clamp(float addrspace(1)* %out) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %med = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float bitcast (i32 2139095041 to float))
+ store float %med, float addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1
+declare double @llvm.fabs.f64(double) #1
+declare double @llvm.minnum.f64(double, double) #1
+declare double @llvm.maxnum.f64(double, double) #1
+declare half @llvm.fabs.f16(half) #1
+declare half @llvm.minnum.f16(half, half) #1
+declare half @llvm.maxnum.f16(half, half) #1
+
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind "target-features"="-dx10-clamp,-fp-exceptions" "no-nans-fp-math"="false" }
+attributes #3 = { nounwind "target-features"="+dx10-clamp,+fp-exceptions" "no-nans-fp-math"="false" }
+attributes #4 = { nounwind "target-features"="-dx10-clamp,+fp-exceptions" "no-nans-fp-math"="false" }
diff --git a/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll b/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
index 9b4b61cf728a..208d97feb642 100644
--- a/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
+++ b/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
@@ -8,7 +8,7 @@ declare i1 @llvm.amdgcn.class.f32(float, i32)
; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, vcc
; GCN: v_cndmask_b32_e64 v0, 0, 1, s{{\[[0-9]+:[0-9]+\]}}
-define void @vcc_shrink_vcc_def(float %arg, i32 %arg1, float %arg2, i32 %arg3) {
+define amdgpu_kernel void @vcc_shrink_vcc_def(float %arg, i32 %arg1, float %arg2, i32 %arg3) {
bb0:
%tmp = icmp sgt i32 %arg1, 4
%c = icmp eq i32 %arg3, 0
@@ -35,7 +35,7 @@ bb2:
; GCN-NOT: vcc
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, vcc
; GCN: v_cndmask_b32_e64 v0, 0, 1, s{{\[[0-9]+:[0-9]+\]}}
-define void @preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) {
+define amdgpu_kernel void @preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) {
bb0:
%tmp = icmp sgt i32 %arg1, 4
%undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef)
diff --git a/test/CodeGen/AMDGPU/coalescer-subrange-crash.ll b/test/CodeGen/AMDGPU/coalescer-subrange-crash.ll
index 7ff133b86e72..ef1b3d25f883 100644
--- a/test/CodeGen/AMDGPU/coalescer-subrange-crash.ll
+++ b/test/CodeGen/AMDGPU/coalescer-subrange-crash.ll
@@ -1,5 +1,4 @@
-; RUN: llc -march=amdgcn < %s | FileCheck %s
-; REQUIRES: asserts
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
;
; This testcase used to cause the following crash:
;
@@ -18,14 +17,16 @@
;
; Test for a valid output:
; CHECK: image_sample_c_d_o
-
-target triple = "amdgcn--"
-
define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @main([17 x <16 x i8>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg, [16 x <16 x i8>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg1, [32 x <8 x i32>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg2, [16 x <8 x i32>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg3, [16 x <4 x i32>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg4, float inreg %arg5, i32 inreg %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <3 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, <2 x i32> %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, i32 %arg20, float %arg21, i32 %arg22) #0 {
main_body:
- %tmp = call float @llvm.SI.fs.interp(i32 3, i32 0, i32 %arg6, <2 x i32> %arg8)
- %tmp23 = fadd float %tmp, 0xBFA99999A0000000
- %tmp24 = fadd float %tmp, 0x3FA99999A0000000
+ %i.i = extractelement <2 x i32> %arg8, i32 0
+ %j.i = extractelement <2 x i32> %arg8, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 3, i32 0, i32 %arg6) #1
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 3, i32 0, i32 %arg6) #1
+ %tmp23 = fadd float %p2.i, 0xBFA99999A0000000
+ %tmp24 = fadd float %p2.i, 0x3FA99999A0000000
%tmp25 = bitcast float %tmp23 to i32
%tmp26 = insertelement <16 x i32> <i32 212739, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp25, i32 1
%tmp27 = insertelement <16 x i32> %tmp26, i32 undef, i32 2
@@ -35,7 +36,8 @@ main_body:
%tmp31 = insertelement <16 x i32> %tmp30, i32 undef, i32 6
%tmp32 = insertelement <16 x i32> %tmp31, i32 undef, i32 7
%tmp33 = insertelement <16 x i32> %tmp32, i32 undef, i32 8
- %tmp34 = call <4 x float> @llvm.SI.image.sample.c.d.o.v16i32(<16 x i32> %tmp33, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %tmp33.bc = bitcast <16 x i32> %tmp33 to <16 x float>
+ %tmp34 = call <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v16f32.v8i32(<16 x float> %tmp33.bc, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 true)
%tmp35 = extractelement <4 x float> %tmp34, i32 0
%tmp36 = bitcast float %tmp24 to i32
%tmp37 = insertelement <16 x i32> <i32 212739, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp36, i32 1
@@ -46,7 +48,8 @@ main_body:
%tmp42 = insertelement <16 x i32> %tmp41, i32 undef, i32 6
%tmp43 = insertelement <16 x i32> %tmp42, i32 undef, i32 7
%tmp44 = insertelement <16 x i32> %tmp43, i32 undef, i32 8
- %tmp45 = call <4 x float> @llvm.SI.image.sample.c.d.o.v16i32(<16 x i32> %tmp44, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %tmp44.bc = bitcast <16 x i32> %tmp44 to <16 x float>
+ %tmp45 = call <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v16f32.v8i32(<16 x float> %tmp44.bc, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 true)
%tmp46 = extractelement <4 x float> %tmp45, i32 0
%tmp47 = fmul float %tmp35, %tmp46
%tmp48 = insertvalue <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef, float %tmp47, 14
@@ -54,9 +57,10 @@ main_body:
ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> %tmp49
}
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-declare <4 x float> @llvm.SI.image.sample.c.d.o.v16i32(<16 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v16f32.v8i32(<16 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-attributes #0 = { "InitialPSInputAddr"="36983" "target-cpu"="tonga" }
+attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/coalescer_remat.ll b/test/CodeGen/AMDGPU/coalescer_remat.ll
index 4c7875c3a039..3e1b76a1df09 100644
--- a/test/CodeGen/AMDGPU/coalescer_remat.ll
+++ b/test/CodeGen/AMDGPU/coalescer_remat.ll
@@ -13,7 +13,7 @@ declare float @llvm.fma.f32(float, float, float)
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0
; It's probably OK if this is slightly higher:
; CHECK: ; NumVgprs: 8
-define void @foobar(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %flag) {
+define amdgpu_kernel void @foobar(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %flag) {
entry:
%cmpflag = icmp eq i32 %flag, 1
br i1 %cmpflag, label %loop, label %exit
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-deduce-ro-arg.ll b/test/CodeGen/AMDGPU/code-object-metadata-deduce-ro-arg.ll
new file mode 100644
index 000000000000..a33c3646e253
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-deduce-ro-arg.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck %s
+
+; CHECK: - Name: test_ro_arg
+; CHECK: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: F32
+; CHECK-NEXT: AccQual: ReadOnly
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: IsConst: true
+; CHECK-NEXT: IsRestrict: true
+; CHECK-NEXT: TypeName: 'float*'
+
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: F32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: 'float*'
+
+define amdgpu_kernel void @test_ro_arg(float addrspace(1)* noalias readonly %in, float addrspace(1)* %out)
+ !kernel_arg_addr_space !0 !kernel_arg_access_qual !1 !kernel_arg_type !2
+ !kernel_arg_base_type !2 !kernel_arg_type_qual !3 {
+ ret void
+}
+
+!0 = !{i32 1, i32 1}
+!1 = !{!"none", !"none"}
+!2 = !{!"float*", !"float*"}
+!3 = !{!"const restrict", !""}
+
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
new file mode 100644
index 000000000000..88ba310a92ca
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
@@ -0,0 +1,1260 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX700 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX800 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX900 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -amdgpu-dump-comd -amdgpu-verify-comd -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -amdgpu-dump-comd -amdgpu-verify-comd -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-dump-comd -amdgpu-verify-comd -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
+
+%struct.A = type { i8, float }
+%opencl.image1d_t = type opaque
+%opencl.image2d_t = type opaque
+%opencl.image3d_t = type opaque
+%opencl.queue_t = type opaque
+%opencl.pipe_t = type opaque
+%struct.B = type { i32 addrspace(1)*}
+%opencl.clk_event_t = type opaque
+
+; CHECK: ---
+; CHECK: Version: [ 1, 0 ]
+; CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+; CHECK: Kernels:
+
+; CHECK: - Name: test_char
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 1
+; CHECK-NEXT: Align: 1
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: char
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_char(i8 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !9
+ !kernel_arg_base_type !9 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_ushort2
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: U16
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: ushort2
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_ushort2(<2 x i16> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !10
+ !kernel_arg_base_type !10 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_int3
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 16
+; CHECK-NEXT: Align: 16
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int3
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_int3(<3 x i32> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !11
+ !kernel_arg_base_type !11 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_ulong4
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 32
+; CHECK-NEXT: Align: 32
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: U64
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: ulong4
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_ulong4(<4 x i64> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !12
+ !kernel_arg_base_type !12 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_half8
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 16
+; CHECK-NEXT: Align: 16
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: F16
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: half8
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_half8(<8 x half> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !13
+ !kernel_arg_base_type !13 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_float16
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 64
+; CHECK-NEXT: Align: 64
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: F32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: float16
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_float16(<16 x float> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !14
+ !kernel_arg_base_type !14 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_double16
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 128
+; CHECK-NEXT: Align: 128
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: F64
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: double16
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_double16(<16 x double> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !15
+ !kernel_arg_base_type !15 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_pointer
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_pointer(i32 addrspace(1)* %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !16
+ !kernel_arg_base_type !16 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_image
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: Image
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: image2d_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_image(%opencl.image2d_t addrspace(1)* %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !17
+ !kernel_arg_base_type !17 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_sampler
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: Sampler
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: sampler_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_sampler(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !18
+ !kernel_arg_base_type !18 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_queue
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: Queue
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: queue_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_queue(%opencl.queue_t addrspace(1)* %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !19
+ !kernel_arg_base_type !19 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_struct
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Private
+; CHECK-NEXT: TypeName: struct A
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_struct(%struct.A* byval %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !20
+ !kernel_arg_base_type !20 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_i128
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 16
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: i128
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_i128(i128 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !21
+ !kernel_arg_base_type !21 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_multi_arg
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I16
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: short2
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: char3
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_multi_arg(i32 %a, <2 x i16> %b, <3 x i8> %c)
+ !kernel_arg_addr_space !22 !kernel_arg_access_qual !23 !kernel_arg_type !24
+ !kernel_arg_base_type !24 !kernel_arg_type_qual !25 {
+ ret void
+}
+
+; CHECK: - Name: test_addr_space
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Constant
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: PointeeAlign: 4
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_addr_space(i32 addrspace(1)* %g,
+ i32 addrspace(2)* %c,
+ i32 addrspace(3)* %l)
+ !kernel_arg_addr_space !50 !kernel_arg_access_qual !23 !kernel_arg_type !51
+ !kernel_arg_base_type !51 !kernel_arg_type_qual !25 {
+ ret void
+}
+
+; CHECK: - Name: test_type_qual
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: IsVolatile: true
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: IsConst: true
+; CHECK-NEXT: IsRestrict: true
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: Pipe
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: IsPipe: true
+; CHECK-NEXT: TypeName: 'int *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_type_qual(i32 addrspace(1)* %a,
+ i32 addrspace(1)* %b,
+ %opencl.pipe_t addrspace(1)* %c)
+ !kernel_arg_addr_space !22 !kernel_arg_access_qual !23 !kernel_arg_type !51
+ !kernel_arg_base_type !51 !kernel_arg_type_qual !70 {
+ ret void
+}
+
+; CHECK: - Name: test_access_qual
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: Image
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: ReadOnly
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: image1d_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: Image
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: WriteOnly
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: image2d_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: Image
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: ReadWrite
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: image3d_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_access_qual(%opencl.image1d_t addrspace(1)* %ro,
+ %opencl.image2d_t addrspace(1)* %wo,
+ %opencl.image3d_t addrspace(1)* %rw)
+ !kernel_arg_addr_space !60 !kernel_arg_access_qual !61 !kernel_arg_type !62
+ !kernel_arg_base_type !62 !kernel_arg_type_qual !25 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_half
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: half
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_half(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !26 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_float
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: float
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_float(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !27 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_double
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: double
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_double(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !28 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_char
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: char
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_char(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !29 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_short
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: short
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_short(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !30 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_long
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: long
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_long(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !31 {
+ ret void
+}
+
+; CHECK: - Name: test_vec_type_hint_unknown
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: VecTypeHint: unknown
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_vec_type_hint_unknown(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !32 {
+ ret void
+}
+
+; CHECK: - Name: test_reqd_wgs_vec_type_hint
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: ReqdWorkGroupSize: [ 1, 2, 4 ]
+; CHECK-NEXT: VecTypeHint: int
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_reqd_wgs_vec_type_hint(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !5
+ !reqd_work_group_size !6 {
+ ret void
+}
+
+; CHECK: - Name: test_wgs_hint_vec_type_hint
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Attrs:
+; CHECK-NEXT: WorkGroupSizeHint: [ 8, 16, 32 ]
+; CHECK-NEXT: VecTypeHint: uint4
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: int
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_wgs_hint_vec_type_hint(i32 %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
+ !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !7
+ !work_group_size_hint !8 {
+ ret void
+}
+
+; CHECK: - Name: test_arg_ptr_to_ptr
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: 'int **'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_arg_ptr_to_ptr(i32* addrspace(1)* %a)
+ !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !80
+ !kernel_arg_base_type !80 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_arg_struct_contains_ptr
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Private
+; CHECK-NEXT: TypeName: struct B
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_arg_struct_contains_ptr(%struct.B* byval %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !82
+ !kernel_arg_base_type !82 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_arg_vector_of_ptr
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 16
+; CHECK-NEXT: Align: 16
+; CHECK-NEXT: ValueKind: ByValue
+; CHECK-NEXT: ValueType: I32
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: TypeName: 'global int* __attribute__((ext_vector_type(2)))'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x i32 addrspace(1)*> %a)
+ !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !83
+ !kernel_arg_base_type !83 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_arg_unknown_builtin_type
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: Struct
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: clk_event_t
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_arg_unknown_builtin_type(
+ %opencl.clk_event_t addrspace(1)* %a)
+ !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !84
+ !kernel_arg_base_type !84 !kernel_arg_type_qual !4 {
+ ret void
+}
+
+; CHECK: - Name: test_pointee_align
+; CHECK-NEXT: Language: OpenCL C
+; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: GlobalBuffer
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Global
+; CHECK-NEXT: TypeName: 'long *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: PointeeAlign: 1
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'char *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: PointeeAlign: 2
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'char2 *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: PointeeAlign: 4
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'char3 *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: PointeeAlign: 4
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'char4 *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: PointeeAlign: 8
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'char8 *'
+; CHECK-NEXT: - Size: 4
+; CHECK-NEXT: Align: 4
+; CHECK-NEXT: ValueKind: DynamicSharedPointer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: PointeeAlign: 16
+; CHECK-NEXT: AccQual: Default
+; CHECK-NEXT: AddrSpaceQual: Local
+; CHECK-NEXT: TypeName: 'char16 *'
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: ValueType: I64
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenPrintfBuffer
+; CHECK-NEXT: ValueType: I8
+; CHECK-NEXT: AddrSpaceQual: Global
+define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
+ i8 addrspace(3)* %b,
+ <2 x i8> addrspace(3)* %c,
+ <3 x i8> addrspace(3)* %d,
+ <4 x i8> addrspace(3)* %e,
+ <8 x i8> addrspace(3)* %f,
+ <16 x i8> addrspace(3)* %g)
+ !kernel_arg_addr_space !91 !kernel_arg_access_qual !92 !kernel_arg_type !93
+ !kernel_arg_base_type !93 !kernel_arg_type_qual !94 {
+ ret void
+}
+
+!llvm.printf.fmts = !{!100, !101}
+
+!1 = !{i32 0}
+!2 = !{!"none"}
+!3 = !{!"int"}
+!4 = !{!""}
+!5 = !{i32 undef, i32 1}
+!6 = !{i32 1, i32 2, i32 4}
+!7 = !{<4 x i32> undef, i32 0}
+!8 = !{i32 8, i32 16, i32 32}
+!9 = !{!"char"}
+!10 = !{!"ushort2"}
+!11 = !{!"int3"}
+!12 = !{!"ulong4"}
+!13 = !{!"half8"}
+!14 = !{!"float16"}
+!15 = !{!"double16"}
+!16 = !{!"int *"}
+!17 = !{!"image2d_t"}
+!18 = !{!"sampler_t"}
+!19 = !{!"queue_t"}
+!20 = !{!"struct A"}
+!21 = !{!"i128"}
+!22 = !{i32 0, i32 0, i32 0}
+!23 = !{!"none", !"none", !"none"}
+!24 = !{!"int", !"short2", !"char3"}
+!25 = !{!"", !"", !""}
+!26 = !{half undef, i32 1}
+!27 = !{float undef, i32 1}
+!28 = !{double undef, i32 1}
+!29 = !{i8 undef, i32 1}
+!30 = !{i16 undef, i32 1}
+!31 = !{i64 undef, i32 1}
+!32 = !{i32 *undef, i32 1}
+!50 = !{i32 1, i32 2, i32 3}
+!51 = !{!"int *", !"int *", !"int *"}
+!60 = !{i32 1, i32 1, i32 1}
+!61 = !{!"read_only", !"write_only", !"read_write"}
+!62 = !{!"image1d_t", !"image2d_t", !"image3d_t"}
+!70 = !{!"volatile", !"const restrict", !"pipe"}
+!80 = !{!"int **"}
+!81 = !{i32 1}
+!82 = !{!"struct B"}
+!83 = !{!"global int* __attribute__((ext_vector_type(2)))"}
+!84 = !{!"clk_event_t"}
+!opencl.ocl.version = !{!90}
+!90 = !{i32 2, i32 0}
+!91 = !{i32 0, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3}
+!92 = !{!"none", !"none", !"none", !"none", !"none", !"none", !"none"}
+!93 = !{!"long *", !"char *", !"char2 *", !"char3 *", !"char4 *", !"char8 *", !"char16 *"}
+!94 = !{!"", !"", !"", !"", !"", !"", !""}
+!100 = !{!"1:1:4:%d\5Cn"}
+!101 = !{!"2:1:8:%g\5Cn"}
+
+; NOTES: Displaying notes found at file offset 0x{{[0-9]+}}
+; NOTES-NEXT: Owner Data size Description
+; NOTES-NEXT: AMD 0x00000008 Unknown note type: (0x00000001)
+; NOTES-NEXT: AMD 0x0000001b Unknown note type: (0x00000003)
+; GFX700: AMD 0x00009171 Unknown note type: (0x0000000a)
+; GFX800: AMD 0x00009190 Unknown note type: (0x0000000a)
+; GFX900: AMD 0x00009171 Unknown note type: (0x0000000a)
+
+; PARSER: AMDGPU Code Object Metadata Parser Test: PASS
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-1.ll b/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-1.ll
new file mode 100644
index 000000000000..f41da9f92136
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-1.ll
@@ -0,0 +1,9 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata | FileCheck %s
+
+; Make sure llc does not crash for invalid opencl version metadata.
+
+; CHECK: ---
+; CHECK: Version: [ 1, 0 ]
+; CHECK: ...
+
+!opencl.ocl.version = !{}
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-2.ll b/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-2.ll
new file mode 100644
index 000000000000..0509663d9849
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-2.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata | FileCheck %s
+
+; Make sure llc does not crash for invalid opencl version metadata.
+
+; CHECK: ---
+; CHECK: Version: [ 1, 0 ]
+; CHECK: ...
+
+!opencl.ocl.version = !{!0}
+!0 = !{}
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-3.ll b/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-3.ll
new file mode 100644
index 000000000000..7404cec5d78a
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-invalid-ocl-version-3.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata | FileCheck %s
+
+; Make sure llc does not crash for invalid opencl version metadata.
+
+; CHECK: ---
+; CHECK: Version: [ 1, 0 ]
+; CHECK: ...
+
+!opencl.ocl.version = !{!0}
+!0 = !{i32 1}
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-kernel-code-props.ll b/test/CodeGen/AMDGPU/code-object-metadata-kernel-code-props.ll
new file mode 100644
index 000000000000..3b232e40cf25
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-kernel-code-props.ll
@@ -0,0 +1,32 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX700 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX800 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX900 --check-prefix=NOTES %s
+
+; CHECK: ---
+; CHECK: Version: [ 1, 0 ]
+
+; CHECK: Kernels:
+; CHECK: - Name: test
+; CHECK: CodeProps:
+; CHECK: KernargSegmentSize: 24
+; GFX700: WavefrontNumSGPRs: 6
+; GFX800: WavefrontNumSGPRs: 96
+; GFX900: WavefrontNumSGPRs: 6
+; GFX700: WorkitemNumVGPRs: 4
+; GFX800: WorkitemNumVGPRs: 6
+; GFX900: WorkitemNumVGPRs: 6
+; CHECK: KernargSegmentAlign: 4
+; CHECK: GroupSegmentAlign: 4
+; CHECK: PrivateSegmentAlign: 4
+; CHECK: WavefrontSize: 6
+define amdgpu_kernel void @test(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fadd half %a.val, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-kernel-debug-props.ll b/test/CodeGen/AMDGPU/code-object-metadata-kernel-debug-props.ll
new file mode 100644
index 000000000000..801029be8cb9
--- /dev/null
+++ b/test/CodeGen/AMDGPU/code-object-metadata-kernel-debug-props.ll
@@ -0,0 +1,67 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX700 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX800 --check-prefix=NOTES %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj -o - < %s | llvm-readobj -amdgpu-code-object-metadata -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX900 --check-prefix=NOTES %s
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+; CHECK: ---
+; CHECK: Version: [ 1, 0 ]
+
+; CHECK: Kernels:
+; CHECK: - Name: test
+; CHECK: DebugProps:
+; CHECK: DebuggerABIVersion: [ 1, 0 ]
+; CHECK: ReservedNumVGPRs: 4
+; CHECK: ReservedFirstVGPR: 11
+; CHECK: PrivateSegmentBufferSGPR: 0
+; CHECK: WavefrontPrivateSegmentOffsetSGPR: 11
+define amdgpu_kernel void @test(i32 addrspace(1)* %A) #0 !dbg !7 !kernel_arg_addr_space !12 !kernel_arg_access_qual !13 !kernel_arg_type !14 !kernel_arg_base_type !14 !kernel_arg_type_qual !15 {
+entry:
+ %A.addr = alloca i32 addrspace(1)*, align 4
+ store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !16, metadata !17), !dbg !18
+ %0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !19
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 0, !dbg !19
+ store i32 777, i32 addrspace(1)* %arrayidx, align 4, !dbg !20
+ %1 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !21
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %1, i64 1, !dbg !21
+ store i32 888, i32 addrspace(1)* %arrayidx1, align 4, !dbg !22
+ %2 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !23
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %2, i64 2, !dbg !23
+ store i32 999, i32 addrspace(1)* %arrayidx2, align 4, !dbg !24
+ ret void, !dbg !25
+}
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="gfx800" "target-features"="+16-bit-insts,+amdgpu-debugger-emit-prologue,+amdgpu-debugger-insert-nops,+amdgpu-debugger-reserve-regs,+dpp,+fp64-fp16-denormals,+s-memrealtime,-fp32-denormals" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!opencl.ocl.version = !{!3}
+!llvm.module.flags = !{!4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "code-object-metadata-kernel-debug-props.cl", directory: "/some/random/directory")
+!2 = !{}
+!3 = !{i32 1, i32 0}
+!4 = !{i32 2, !"Dwarf Version", i32 2}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{!"clang version 5.0.0"}
+!7 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10}
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64)
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!12 = !{i32 1}
+!13 = !{!"none"}
+!14 = !{!"int*"}
+!15 = !{!""}
+!16 = !DILocalVariable(name: "A", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!17 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)
+!18 = !DILocation(line: 1, column: 30, scope: !7)
+!19 = !DILocation(line: 2, column: 3, scope: !7)
+!20 = !DILocation(line: 2, column: 8, scope: !7)
+!21 = !DILocation(line: 3, column: 3, scope: !7)
+!22 = !DILocation(line: 3, column: 8, scope: !7)
+!23 = !DILocation(line: 4, column: 3, scope: !7)
+!24 = !DILocation(line: 4, column: 8, scope: !7)
+!25 = !DILocation(line: 5, column: 1, scope: !7)
diff --git a/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll b/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll
index 585172092676..155de5353bcb 100644
--- a/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll
+++ b/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll
@@ -8,7 +8,7 @@
; SI-LLC-LABEL: {{^}}test:
; SI-LLC: s_mul_i32
; SI-LLC-NOT: mul
-define void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) {
+define amdgpu_kernel void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) {
entry:
%0 = mul nsw i32 %a, 3
%1 = sext i32 %0 to i64
diff --git a/test/CodeGen/AMDGPU/combine_vloads.ll b/test/CodeGen/AMDGPU/combine_vloads.ll
index 01572afa6205..f8d4e01085c2 100644
--- a/test/CodeGen/AMDGPU/combine_vloads.ll
+++ b/test/CodeGen/AMDGPU/combine_vloads.ll
@@ -12,7 +12,7 @@
; EG-LABEL: {{^}}combine_vloads:
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @combine_vloads(<8 x i8> addrspace(1)* nocapture %src, <8 x i8> addrspace(1)* nocapture %result) nounwind {
+define amdgpu_kernel void @combine_vloads(<8 x i8> addrspace(1)* nocapture %src, <8 x i8> addrspace(1)* nocapture %result) nounwind {
entry:
br label %for.body
diff --git a/test/CodeGen/AMDGPU/commute-compares.ll b/test/CodeGen/AMDGPU/commute-compares.ll
index a4c51b233f41..973c4544d97a 100644
--- a/test/CodeGen/AMDGPU/commute-compares.ll
+++ b/test/CodeGen/AMDGPU/commute-compares.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN-LABEL: {{^}}commute_eq_64_i32:
; GCN: v_cmp_eq_u32_e32 vcc, 64, v{{[0-9]+}}
-define void @commute_eq_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_eq_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -21,7 +21,7 @@ define void @commute_eq_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1
; GCN-LABEL: {{^}}commute_ne_64_i32:
; GCN: v_cmp_ne_u32_e32 vcc, 64, v{{[0-9]+}}
-define void @commute_ne_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ne_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -36,7 +36,7 @@ define void @commute_ne_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1
; GCN-LABEL: {{^}}commute_ne_litk_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3039
; GCN: v_cmp_ne_u32_e32 vcc, [[K]], v{{[0-9]+}}
-define void @commute_ne_litk_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ne_litk_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -49,7 +49,7 @@ define void @commute_ne_litk_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; GCN-LABEL: {{^}}commute_ugt_64_i32:
; GCN: v_cmp_lt_u32_e32 vcc, 64, v{{[0-9]+}}
-define void @commute_ugt_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ugt_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -62,7 +62,7 @@ define void @commute_ugt_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_uge_64_i32:
; GCN: v_cmp_lt_u32_e32 vcc, 63, v{{[0-9]+}}
-define void @commute_uge_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_uge_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -75,7 +75,7 @@ define void @commute_uge_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_ult_64_i32:
; GCN: v_cmp_gt_u32_e32 vcc, 64, v{{[0-9]+}}
-define void @commute_ult_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ult_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -88,7 +88,7 @@ define void @commute_ult_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_ule_63_i32:
; GCN: v_cmp_gt_u32_e32 vcc, 64, v{{[0-9]+}}
-define void @commute_ule_63_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ule_63_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -104,7 +104,7 @@ define void @commute_ule_63_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_ule_64_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x41{{$}}
; GCN: v_cmp_gt_u32_e32 vcc, [[K]], v{{[0-9]+}}
-define void @commute_ule_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ule_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -117,7 +117,7 @@ define void @commute_ule_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_sgt_neg1_i32:
; GCN: v_cmp_lt_i32_e32 vcc, -1, v{{[0-9]+}}
-define void @commute_sgt_neg1_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_sgt_neg1_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -130,7 +130,7 @@ define void @commute_sgt_neg1_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; GCN-LABEL: {{^}}commute_sge_neg2_i32:
; GCN: v_cmp_lt_i32_e32 vcc, -3, v{{[0-9]+}}
-define void @commute_sge_neg2_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_sge_neg2_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -143,7 +143,7 @@ define void @commute_sge_neg2_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; GCN-LABEL: {{^}}commute_slt_neg16_i32:
; GCN: v_cmp_gt_i32_e32 vcc, -16, v{{[0-9]+}}
-define void @commute_slt_neg16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_slt_neg16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -156,7 +156,7 @@ define void @commute_slt_neg16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; GCN-LABEL: {{^}}commute_sle_5_i32:
; GCN: v_cmp_gt_i32_e32 vcc, 6, v{{[0-9]+}}
-define void @commute_sle_5_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_sle_5_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -173,7 +173,7 @@ define void @commute_sle_5_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1
; GCN-LABEL: {{^}}commute_eq_64_i64:
; GCN: v_cmp_eq_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_eq_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_eq_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -186,7 +186,7 @@ define void @commute_eq_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1
; GCN-LABEL: {{^}}commute_ne_64_i64:
; GCN: v_cmp_ne_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ne_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ne_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -199,7 +199,7 @@ define void @commute_ne_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1
; GCN-LABEL: {{^}}commute_ugt_64_i64:
; GCN: v_cmp_lt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ugt_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ugt_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -212,7 +212,7 @@ define void @commute_ugt_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_uge_64_i64:
; GCN: v_cmp_lt_u64_e32 vcc, 63, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_uge_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_uge_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -225,7 +225,7 @@ define void @commute_uge_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_ult_64_i64:
; GCN: v_cmp_gt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ult_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ult_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -238,7 +238,7 @@ define void @commute_ult_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_ule_63_i64:
; GCN: v_cmp_gt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ule_63_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ule_63_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -254,7 +254,7 @@ define void @commute_ule_63_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_ule_64_i64:
; GCN-DAG: s_movk_i32 s[[KLO:[0-9]+]], 0x41{{$}}
; GCN: v_cmp_gt_u64_e32 vcc, s{{\[}}[[KLO]]:{{[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ule_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ule_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -267,7 +267,7 @@ define void @commute_ule_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #
; GCN-LABEL: {{^}}commute_sgt_neg1_i64:
; GCN: v_cmp_lt_i64_e32 vcc, -1, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_sgt_neg1_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_sgt_neg1_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -280,7 +280,7 @@ define void @commute_sgt_neg1_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN-LABEL: {{^}}commute_sge_neg2_i64:
; GCN: v_cmp_lt_i64_e32 vcc, -3, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_sge_neg2_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_sge_neg2_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -293,7 +293,7 @@ define void @commute_sge_neg2_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN-LABEL: {{^}}commute_slt_neg16_i64:
; GCN: v_cmp_gt_i64_e32 vcc, -16, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_slt_neg16_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_slt_neg16_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -306,7 +306,7 @@ define void @commute_slt_neg16_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in
; GCN-LABEL: {{^}}commute_sle_5_i64:
; GCN: v_cmp_gt_i64_e32 vcc, 6, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_sle_5_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_sle_5_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -324,7 +324,7 @@ define void @commute_sle_5_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1
; GCN-LABEL: {{^}}commute_oeq_2.0_f32:
; GCN: v_cmp_eq_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_oeq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_oeq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -338,7 +338,7 @@ define void @commute_oeq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ogt_2.0_f32:
; GCN: v_cmp_lt_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_ogt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ogt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -351,7 +351,7 @@ define void @commute_ogt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_oge_2.0_f32:
; GCN: v_cmp_le_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_oge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_oge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -364,7 +364,7 @@ define void @commute_oge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_olt_2.0_f32:
; GCN: v_cmp_gt_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_olt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_olt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -377,7 +377,7 @@ define void @commute_olt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ole_2.0_f32:
; GCN: v_cmp_ge_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_ole_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ole_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -390,7 +390,7 @@ define void @commute_ole_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_one_2.0_f32:
; GCN: v_cmp_lg_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_one_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_one_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -403,7 +403,7 @@ define void @commute_one_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ord_2.0_f32:
; GCN: v_cmp_o_f32_e32 vcc, [[REG:v[0-9]+]], [[REG]]
-define void @commute_ord_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ord_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -416,7 +416,7 @@ define void @commute_ord_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ueq_2.0_f32:
; GCN: v_cmp_nlg_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_ueq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ueq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -429,7 +429,7 @@ define void @commute_ueq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ugt_2.0_f32:
; GCN: v_cmp_nge_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_ugt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ugt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -442,7 +442,7 @@ define void @commute_ugt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_uge_2.0_f32:
; GCN: v_cmp_ngt_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_uge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_uge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -455,7 +455,7 @@ define void @commute_uge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ult_2.0_f32:
; GCN: v_cmp_nle_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_ult_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ult_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -468,7 +468,7 @@ define void @commute_ult_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_ule_2.0_f32:
; GCN: v_cmp_nlt_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_ule_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ule_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -481,7 +481,7 @@ define void @commute_ule_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_une_2.0_f32:
; GCN: v_cmp_neq_f32_e32 vcc, 2.0, v{{[0-9]+}}
-define void @commute_une_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_une_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -494,7 +494,7 @@ define void @commute_une_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_uno_2.0_f32:
; GCN: v_cmp_u_f32_e32 vcc, [[REG:v[0-9]+]], [[REG]]
-define void @commute_uno_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_uno_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -512,7 +512,7 @@ define void @commute_uno_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in
; GCN-LABEL: {{^}}commute_oeq_2.0_f64:
; GCN: v_cmp_eq_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_oeq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_oeq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -526,7 +526,7 @@ define void @commute_oeq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ogt_2.0_f64:
; GCN: v_cmp_lt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ogt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ogt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -539,7 +539,7 @@ define void @commute_ogt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_oge_2.0_f64:
; GCN: v_cmp_le_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_oge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_oge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -552,7 +552,7 @@ define void @commute_oge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_olt_2.0_f64:
; GCN: v_cmp_gt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_olt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_olt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -565,7 +565,7 @@ define void @commute_olt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ole_2.0_f64:
; GCN: v_cmp_ge_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ole_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ole_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -578,7 +578,7 @@ define void @commute_ole_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_one_2.0_f64:
; GCN: v_cmp_lg_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_one_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_one_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -591,7 +591,7 @@ define void @commute_one_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ord_2.0_f64:
; GCN: v_cmp_o_f64_e32 vcc, [[REG:v\[[0-9]+:[0-9]+\]]], [[REG]]
-define void @commute_ord_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ord_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -604,7 +604,7 @@ define void @commute_ord_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ueq_2.0_f64:
; GCN: v_cmp_nlg_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ueq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ueq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -617,7 +617,7 @@ define void @commute_ueq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ugt_2.0_f64:
; GCN: v_cmp_nge_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ugt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ugt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -630,7 +630,7 @@ define void @commute_ugt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_uge_2.0_f64:
; GCN: v_cmp_ngt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_uge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_uge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -643,7 +643,7 @@ define void @commute_uge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ult_2.0_f64:
; GCN: v_cmp_nle_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ult_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ult_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -656,7 +656,7 @@ define void @commute_ult_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_ule_2.0_f64:
; GCN: v_cmp_nlt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_ule_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_ule_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -669,7 +669,7 @@ define void @commute_ule_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_une_2.0_f64:
; GCN: v_cmp_neq_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
-define void @commute_une_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_une_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -682,7 +682,7 @@ define void @commute_une_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_uno_2.0_f64:
; GCN: v_cmp_u_f64_e32 vcc, [[REG:v\[[0-9]+:[0-9]+\]]], [[REG]]
-define void @commute_uno_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @commute_uno_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -701,9 +701,9 @@ define void @commute_uno_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %i
; GCN-LABEL: {{^}}commute_frameindex:
; XGCN: v_cmp_eq_u32_e32 vcc, 0, v{{[0-9]+}}
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4{{$}}
; GCN: v_cmp_eq_u32_e32 vcc, [[FI]], v{{[0-9]+}}
-define void @commute_frameindex(i32 addrspace(1)* nocapture %out) #0 {
+define amdgpu_kernel void @commute_frameindex(i32 addrspace(1)* nocapture %out) #0 {
entry:
%stack0 = alloca i32
%ptr0 = load volatile i32*, i32* addrspace(1)* undef
diff --git a/test/CodeGen/AMDGPU/commute-shifts.ll b/test/CodeGen/AMDGPU/commute-shifts.ll
index 862f236514ca..84d8bf2bd706 100644
--- a/test/CodeGen/AMDGPU/commute-shifts.ll
+++ b/test/CodeGen/AMDGPU/commute-shifts.ll
@@ -4,10 +4,10 @@
; GCN-LABEL: {{^}}main:
; SI: v_lshl_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 1
-define amdgpu_ps void @main(float %arg0, float %arg1) #0 {
+define amdgpu_ps float @main(float %arg0, float %arg1) #0 {
bb:
%tmp = fptosi float %arg0 to i32
- %tmp1 = call <4 x float> @llvm.SI.image.load.v4i32(<4 x i32> undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp1 = call <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32> undef, <8 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false)
%tmp2.f = extractelement <4 x float> %tmp1, i32 0
%tmp2 = bitcast float %tmp2.f to i32
%tmp3 = and i32 %tmp, 7
@@ -15,15 +15,14 @@ bb:
%tmp5 = and i32 %tmp2, %tmp4
%tmp6 = icmp eq i32 %tmp5, 0
%tmp7 = select i1 %tmp6, float 0.000000e+00, float %arg1
- %tmp8 = call i32 @llvm.SI.packf16(float undef, float %tmp7)
- %tmp9 = bitcast i32 %tmp8 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float undef, float %tmp9, float undef, float %tmp9)
- ret void
+ %tmp8 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %tmp7)
+ %tmp9 = bitcast <2 x half> %tmp8 to float
+ ret float %tmp9
}
-declare <4 x float> @llvm.SI.image.load.v4i32(<4 x i32>, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-declare i32 @llvm.SI.packf16(float, float) #1
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
+declare <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/commute_modifiers.ll b/test/CodeGen/AMDGPU/commute_modifiers.ll
index ed4ec82eb3e3..8820e4fd80e5 100644
--- a/test/CodeGen/AMDGPU/commute_modifiers.ll
+++ b/test/CodeGen/AMDGPU/commute_modifiers.ll
@@ -8,7 +8,7 @@ declare float @llvm.fma.f32(float, float, float) nounwind readnone
; SI: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: v_add_f32_e64 [[REG:v[0-9]+]], |[[X]]|, 2.0
; SI: buffer_store_dword [[REG]]
-define void @commute_add_imm_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_add_imm_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float, float addrspace(1)* %gep.0
@@ -22,7 +22,7 @@ define void @commute_add_imm_fabs_f32(float addrspace(1)* %out, float addrspace(
; SI: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, -4.0
; SI: buffer_store_dword [[REG]]
-define void @commute_mul_imm_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_mul_imm_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float, float addrspace(1)* %gep.0
@@ -37,7 +37,7 @@ define void @commute_mul_imm_fneg_fabs_f32(float addrspace(1)* %out, float addrs
; SI: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: v_mul_f32_e32 [[REG:v[0-9]+]], -4.0, [[X]]
; SI: buffer_store_dword [[REG]]
-define void @commute_mul_imm_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_mul_imm_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float, float addrspace(1)* %gep.0
@@ -53,7 +53,7 @@ define void @commute_mul_imm_fneg_f32(float addrspace(1)* %out, float addrspace(
; SI: v_mov_b32_e32 [[K:v[0-9]+]], 0x44800000
; SI: v_add_f32_e64 [[REG:v[0-9]+]], [[K]], |[[X]]|
; SI: buffer_store_dword [[REG]]
-define void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float, float addrspace(1)* %gep.0
@@ -68,7 +68,7 @@ define void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(
; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_add_f32_e64 [[REG:v[0-9]+]], [[X]], |[[Y]]|
; SI: buffer_store_dword [[REG]]
-define void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -85,7 +85,7 @@ define void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)*
; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], [[X]], -[[Y]]
; SI: buffer_store_dword [[REG]]
-define void @commute_mul_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_mul_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -102,7 +102,7 @@ define void @commute_mul_fneg_f32(float addrspace(1)* %out, float addrspace(1)*
; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], [[X]], -|[[Y]]|
; SI: buffer_store_dword [[REG]]
-define void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -121,7 +121,7 @@ define void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace
; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, |[[Y]]|
; SI: buffer_store_dword [[REG]]
-define void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -139,7 +139,7 @@ define void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrs
; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, -|[[Y]]|
; SI: buffer_store_dword [[REG]]
-define void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -161,7 +161,7 @@ define void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float
; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, |[[R2]]|
; SI: buffer_store_dword [[RESULT]]
-define void @fma_a_2.0_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @fma_a_2.0_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/concat_vectors.ll b/test/CodeGen/AMDGPU/concat_vectors.ll
index 2e6be5d10f09..7394842d156f 100644
--- a/test/CodeGen/AMDGPU/concat_vectors.ll
+++ b/test/CodeGen/AMDGPU/concat_vectors.ll
@@ -8,7 +8,7 @@
; value if we want to ensure scratch memory is not being used.
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v1i32(<2 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
+define amdgpu_kernel void @test_concat_v1i32(<2 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
%concat = shufflevector <1 x i32> %a, <1 x i32> %b, <2 x i32> <i32 0, i32 1>
store <2 x i32> %concat, <2 x i32> addrspace(1)* %out, align 8
ret void
@@ -17,7 +17,7 @@ define void @test_concat_v1i32(<2 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x
; FUNC-LABEL: {{^}}test_concat_v2i32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v2i32(<4 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+define amdgpu_kernel void @test_concat_v2i32(<4 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
%concat = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i32> %concat, <4 x i32> addrspace(1)* %out, align 16
ret void
@@ -26,7 +26,7 @@ define void @test_concat_v2i32(<4 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x
; FUNC-LABEL: {{^}}test_concat_v4i32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v4i32(<8 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+define amdgpu_kernel void @test_concat_v4i32(<8 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
%concat = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i32> %concat, <8 x i32> addrspace(1)* %out, align 32
ret void
@@ -35,7 +35,7 @@ define void @test_concat_v4i32(<8 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x
; FUNC-LABEL: {{^}}test_concat_v8i32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v8i32(<16 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) nounwind {
+define amdgpu_kernel void @test_concat_v8i32(<16 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) nounwind {
%concat = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i32> %concat, <16 x i32> addrspace(1)* %out, align 64
ret void
@@ -44,7 +44,7 @@ define void @test_concat_v8i32(<16 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x
; FUNC-LABEL: {{^}}test_concat_v16i32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v16i32(<32 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) nounwind {
+define amdgpu_kernel void @test_concat_v16i32(<32 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) nounwind {
%concat = shufflevector <16 x i32> %a, <16 x i32> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i32> %concat, <32 x i32> addrspace(1)* %out, align 128
ret void
@@ -53,7 +53,7 @@ define void @test_concat_v16i32(<32 x i32> addrspace(1)* %out, <16 x i32> %a, <1
; FUNC-LABEL: {{^}}test_concat_v1f32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v1f32(<2 x float> addrspace(1)* %out, <1 x float> %a, <1 x float> %b) nounwind {
+define amdgpu_kernel void @test_concat_v1f32(<2 x float> addrspace(1)* %out, <1 x float> %a, <1 x float> %b) nounwind {
%concat = shufflevector <1 x float> %a, <1 x float> %b, <2 x i32> <i32 0, i32 1>
store <2 x float> %concat, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -62,7 +62,7 @@ define void @test_concat_v1f32(<2 x float> addrspace(1)* %out, <1 x float> %a, <
; FUNC-LABEL: {{^}}test_concat_v2f32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v2f32(<4 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
+define amdgpu_kernel void @test_concat_v2f32(<4 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
%concat = shufflevector <2 x float> %a, <2 x float> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x float> %concat, <4 x float> addrspace(1)* %out, align 16
ret void
@@ -71,7 +71,7 @@ define void @test_concat_v2f32(<4 x float> addrspace(1)* %out, <2 x float> %a, <
; FUNC-LABEL: {{^}}test_concat_v4f32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v4f32(<8 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
+define amdgpu_kernel void @test_concat_v4f32(<8 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
%concat = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x float> %concat, <8 x float> addrspace(1)* %out, align 32
ret void
@@ -80,7 +80,7 @@ define void @test_concat_v4f32(<8 x float> addrspace(1)* %out, <4 x float> %a, <
; FUNC-LABEL: {{^}}test_concat_v8f32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v8f32(<16 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
+define amdgpu_kernel void @test_concat_v8f32(<16 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
%concat = shufflevector <8 x float> %a, <8 x float> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x float> %concat, <16 x float> addrspace(1)* %out, align 64
ret void
@@ -89,7 +89,7 @@ define void @test_concat_v8f32(<16 x float> addrspace(1)* %out, <8 x float> %a,
; FUNC-LABEL: {{^}}test_concat_v16f32:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v16f32(<32 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
+define amdgpu_kernel void @test_concat_v16f32(<32 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
%concat = shufflevector <16 x float> %a, <16 x float> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x float> %concat, <32 x float> addrspace(1)* %out, align 128
ret void
@@ -98,7 +98,7 @@ define void @test_concat_v16f32(<32 x float> addrspace(1)* %out, <16 x float> %a
; FUNC-LABEL: {{^}}test_concat_v1i64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v1i64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v1i64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
%concat = shufflevector <1 x double> %a, <1 x double> %b, <2 x i32> <i32 0, i32 1>
store <2 x double> %concat, <2 x double> addrspace(1)* %out, align 16
ret void
@@ -107,7 +107,7 @@ define void @test_concat_v1i64(<2 x double> addrspace(1)* %out, <1 x double> %a,
; FUNC-LABEL: {{^}}test_concat_v2i64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v2i64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v2i64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
%concat = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x double> %concat, <4 x double> addrspace(1)* %out, align 32
ret void
@@ -116,7 +116,7 @@ define void @test_concat_v2i64(<4 x double> addrspace(1)* %out, <2 x double> %a,
; FUNC-LABEL: {{^}}test_concat_v4i64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v4i64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v4i64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
%concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x double> %concat, <8 x double> addrspace(1)* %out, align 64
ret void
@@ -125,7 +125,7 @@ define void @test_concat_v4i64(<8 x double> addrspace(1)* %out, <4 x double> %a,
; FUNC-LABEL: {{^}}test_concat_v8i64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v8i64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v8i64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
%concat = shufflevector <8 x double> %a, <8 x double> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x double> %concat, <16 x double> addrspace(1)* %out, align 128
ret void
@@ -134,7 +134,7 @@ define void @test_concat_v8i64(<16 x double> addrspace(1)* %out, <8 x double> %a
; FUNC-LABEL: {{^}}test_concat_v16i64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v16i64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v16i64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
%concat = shufflevector <16 x double> %a, <16 x double> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x double> %concat, <32 x double> addrspace(1)* %out, align 256
ret void
@@ -143,7 +143,7 @@ define void @test_concat_v16i64(<32 x double> addrspace(1)* %out, <16 x double>
; FUNC-LABEL: {{^}}test_concat_v1f64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v1f64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v1f64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
%concat = shufflevector <1 x double> %a, <1 x double> %b, <2 x i32> <i32 0, i32 1>
store <2 x double> %concat, <2 x double> addrspace(1)* %out, align 16
ret void
@@ -152,7 +152,7 @@ define void @test_concat_v1f64(<2 x double> addrspace(1)* %out, <1 x double> %a,
; FUNC-LABEL: {{^}}test_concat_v2f64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v2f64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v2f64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
%concat = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x double> %concat, <4 x double> addrspace(1)* %out, align 32
ret void
@@ -161,7 +161,7 @@ define void @test_concat_v2f64(<4 x double> addrspace(1)* %out, <2 x double> %a,
; FUNC-LABEL: {{^}}test_concat_v4f64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v4f64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v4f64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
%concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x double> %concat, <8 x double> addrspace(1)* %out, align 64
ret void
@@ -170,7 +170,7 @@ define void @test_concat_v4f64(<8 x double> addrspace(1)* %out, <4 x double> %a,
; FUNC-LABEL: {{^}}test_concat_v8f64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v8f64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v8f64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
%concat = shufflevector <8 x double> %a, <8 x double> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x double> %concat, <16 x double> addrspace(1)* %out, align 128
ret void
@@ -179,7 +179,7 @@ define void @test_concat_v8f64(<16 x double> addrspace(1)* %out, <8 x double> %a
; FUNC-LABEL: {{^}}test_concat_v16f64:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v16f64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
+define amdgpu_kernel void @test_concat_v16f64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
%concat = shufflevector <16 x double> %a, <16 x double> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x double> %concat, <32 x double> addrspace(1)* %out, align 256
ret void
@@ -188,7 +188,7 @@ define void @test_concat_v16f64(<32 x double> addrspace(1)* %out, <16 x double>
; FUNC-LABEL: {{^}}test_concat_v1i1:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v1i1(<2 x i1> addrspace(1)* %out, <1 x i1> %a, <1 x i1> %b) nounwind {
+define amdgpu_kernel void @test_concat_v1i1(<2 x i1> addrspace(1)* %out, <1 x i1> %a, <1 x i1> %b) nounwind {
%concat = shufflevector <1 x i1> %a, <1 x i1> %b, <2 x i32> <i32 0, i32 1>
store <2 x i1> %concat, <2 x i1> addrspace(1)* %out
ret void
@@ -197,7 +197,7 @@ define void @test_concat_v1i1(<2 x i1> addrspace(1)* %out, <1 x i1> %a, <1 x i1>
; FUNC-LABEL: {{^}}test_concat_v2i1:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v2i1(<4 x i1> addrspace(1)* %out, <2 x i1> %a, <2 x i1> %b) nounwind {
+define amdgpu_kernel void @test_concat_v2i1(<4 x i1> addrspace(1)* %out, <2 x i1> %a, <2 x i1> %b) nounwind {
%concat = shufflevector <2 x i1> %a, <2 x i1> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i1> %concat, <4 x i1> addrspace(1)* %out
ret void
@@ -206,7 +206,7 @@ define void @test_concat_v2i1(<4 x i1> addrspace(1)* %out, <2 x i1> %a, <2 x i1>
; FUNC-LABEL: {{^}}test_concat_v4i1:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v4i1(<8 x i1> addrspace(1)* %out, <4 x i1> %a, <4 x i1> %b) nounwind {
+define amdgpu_kernel void @test_concat_v4i1(<8 x i1> addrspace(1)* %out, <4 x i1> %a, <4 x i1> %b) nounwind {
%concat = shufflevector <4 x i1> %a, <4 x i1> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i1> %concat, <8 x i1> addrspace(1)* %out
ret void
@@ -215,7 +215,7 @@ define void @test_concat_v4i1(<8 x i1> addrspace(1)* %out, <4 x i1> %a, <4 x i1>
; FUNC-LABEL: {{^}}test_concat_v8i1:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v8i1(<16 x i1> addrspace(1)* %out, <8 x i1> %a, <8 x i1> %b) nounwind {
+define amdgpu_kernel void @test_concat_v8i1(<16 x i1> addrspace(1)* %out, <8 x i1> %a, <8 x i1> %b) nounwind {
%concat = shufflevector <8 x i1> %a, <8 x i1> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i1> %concat, <16 x i1> addrspace(1)* %out
ret void
@@ -224,7 +224,7 @@ define void @test_concat_v8i1(<16 x i1> addrspace(1)* %out, <8 x i1> %a, <8 x i1
; FUNC-LABEL: {{^}}test_concat_v16i1:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v16i1(<32 x i1> addrspace(1)* %out, <16 x i1> %a, <16 x i1> %b) nounwind {
+define amdgpu_kernel void @test_concat_v16i1(<32 x i1> addrspace(1)* %out, <16 x i1> %a, <16 x i1> %b) nounwind {
%concat = shufflevector <16 x i1> %a, <16 x i1> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i1> %concat, <32 x i1> addrspace(1)* %out
ret void
@@ -233,7 +233,7 @@ define void @test_concat_v16i1(<32 x i1> addrspace(1)* %out, <16 x i1> %a, <16 x
; FUNC-LABEL: {{^}}test_concat_v32i1:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v32i1(<64 x i1> addrspace(1)* %out, <32 x i1> %a, <32 x i1> %b) nounwind {
+define amdgpu_kernel void @test_concat_v32i1(<64 x i1> addrspace(1)* %out, <32 x i1> %a, <32 x i1> %b) nounwind {
%concat = shufflevector <32 x i1> %a, <32 x i1> %b, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
store <64 x i1> %concat, <64 x i1> addrspace(1)* %out
ret void
@@ -242,7 +242,7 @@ define void @test_concat_v32i1(<64 x i1> addrspace(1)* %out, <32 x i1> %a, <32 x
; FUNC-LABEL: {{^}}test_concat_v1i16:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v1i16(<2 x i16> addrspace(1)* %out, <1 x i16> %a, <1 x i16> %b) nounwind {
+define amdgpu_kernel void @test_concat_v1i16(<2 x i16> addrspace(1)* %out, <1 x i16> %a, <1 x i16> %b) nounwind {
%concat = shufflevector <1 x i16> %a, <1 x i16> %b, <2 x i32> <i32 0, i32 1>
store <2 x i16> %concat, <2 x i16> addrspace(1)* %out, align 4
ret void
@@ -251,7 +251,7 @@ define void @test_concat_v1i16(<2 x i16> addrspace(1)* %out, <1 x i16> %a, <1 x
; FUNC-LABEL: {{^}}test_concat_v2i16:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v2i16(<4 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) nounwind {
+define amdgpu_kernel void @test_concat_v2i16(<4 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) nounwind {
%concat = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i16> %concat, <4 x i16> addrspace(1)* %out, align 8
ret void
@@ -260,7 +260,7 @@ define void @test_concat_v2i16(<4 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x
; FUNC-LABEL: {{^}}test_concat_v4i16:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v4i16(<8 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b) nounwind {
+define amdgpu_kernel void @test_concat_v4i16(<8 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b) nounwind {
%concat = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i16> %concat, <8 x i16> addrspace(1)* %out, align 16
ret void
@@ -269,7 +269,7 @@ define void @test_concat_v4i16(<8 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x
; FUNC-LABEL: {{^}}test_concat_v8i16:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v8i16(<16 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x i16> %b) nounwind {
+define amdgpu_kernel void @test_concat_v8i16(<16 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x i16> %b) nounwind {
%concat = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i16> %concat, <16 x i16> addrspace(1)* %out, align 32
ret void
@@ -278,7 +278,7 @@ define void @test_concat_v8i16(<16 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x
; FUNC-LABEL: {{^}}test_concat_v16i16:
; SI-NOT: s_mov_b32 s{{[0-9]}}, 0x80f000
; SI-NOT: movrel
-define void @test_concat_v16i16(<32 x i16> addrspace(1)* %out, <16 x i16> %a, <16 x i16> %b) nounwind {
+define amdgpu_kernel void @test_concat_v16i16(<32 x i16> addrspace(1)* %out, <16 x i16> %a, <16 x i16> %b) nounwind {
%concat = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i16> %concat, <32 x i16> addrspace(1)* %out, align 64
ret void
@@ -286,7 +286,7 @@ define void @test_concat_v16i16(<32 x i16> addrspace(1)* %out, <16 x i16> %a, <1
; FUNC-LABEL: {{^}}concat_vector_crash:
; SI: s_endpgm
-define void @concat_vector_crash(<8 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @concat_vector_crash(<8 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
bb:
%tmp = load <2 x float>, <2 x float> addrspace(1)* %in, align 4
%tmp1 = shufflevector <2 x float> %tmp, <2 x float> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index 34bb2588ad62..62b47beb1251 100644
--- a/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -1,12 +1,12 @@
# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination -o - %s | FileCheck -check-prefix=GCN %s
--- |
- define void @s_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ define amdgpu_kernel void @s_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%and = and i32 %a, 1234567
store volatile i32 %and, i32 addrspace(1)* %out
ret void
}
- define void @v_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
+ define amdgpu_kernel void @v_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
@@ -17,13 +17,13 @@
ret void
}
- define void @s_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ define amdgpu_kernel void @s_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%shl = shl i32 %a, 12
store volatile i32 %shl, i32 addrspace(1)* %out
ret void
}
- define void @v_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
+ define amdgpu_kernel void @v_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
@@ -34,13 +34,13 @@
ret void
}
- define void @s_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ define amdgpu_kernel void @s_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%ashr = ashr i32 %a, 12
store volatile i32 %ashr, i32 addrspace(1)* %out
ret void
}
- define void @v_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
+ define amdgpu_kernel void @v_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
@@ -51,13 +51,13 @@
ret void
}
- define void @s_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ define amdgpu_kernel void @s_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%lshr = lshr i32 %a, 12
store volatile i32 %lshr, i32 addrspace(1)* %out
ret void
}
- define void @v_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
+ define amdgpu_kernel void @v_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
diff --git a/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll b/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
index 0ff75ab58003..0831d250b9e7 100644
--- a/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
+++ b/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll
@@ -5,7 +5,7 @@
; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fold_mi_v_and_0(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @fold_mi_v_and_0(i32 addrspace(1)* %out) {
%x = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%size = call i32 @llvm.amdgcn.groupstaticsize()
%and = and i32 %size, %x
@@ -17,7 +17,7 @@ define void @fold_mi_v_and_0(i32 addrspace(1)* %out) {
; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fold_mi_s_and_0(i32 addrspace(1)* %out, i32 %x) #0 {
+define amdgpu_kernel void @fold_mi_s_and_0(i32 addrspace(1)* %out, i32 %x) #0 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%and = and i32 %size, %x
store i32 %and, i32 addrspace(1)* %out
@@ -28,7 +28,7 @@ define void @fold_mi_s_and_0(i32 addrspace(1)* %out, i32 %x) #0 {
; GCN: v_mbcnt_lo_u32_b32_e64 [[RESULT:v[0-9]+]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fold_mi_v_or_0(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @fold_mi_v_or_0(i32 addrspace(1)* %out) {
%x = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%size = call i32 @llvm.amdgcn.groupstaticsize()
%or = or i32 %size, %x
@@ -42,7 +42,7 @@ define void @fold_mi_v_or_0(i32 addrspace(1)* %out) {
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
; GCN-NOT: [[VVAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @fold_mi_s_or_0(i32 addrspace(1)* %out, i32 %x) #0 {
+define amdgpu_kernel void @fold_mi_s_or_0(i32 addrspace(1)* %out, i32 %x) #0 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%or = or i32 %size, %x
store i32 %or, i32 addrspace(1)* %out
@@ -53,7 +53,7 @@ define void @fold_mi_s_or_0(i32 addrspace(1)* %out, i32 %x) #0 {
; GCN: v_mbcnt_lo_u32_b32_e64 [[RESULT:v[0-9]+]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fold_mi_v_xor_0(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @fold_mi_v_xor_0(i32 addrspace(1)* %out) {
%x = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%size = call i32 @llvm.amdgcn.groupstaticsize()
%xor = xor i32 %size, %x
@@ -67,7 +67,7 @@ define void @fold_mi_v_xor_0(i32 addrspace(1)* %out) {
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
; GCN-NOT: [[VVAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @fold_mi_s_xor_0(i32 addrspace(1)* %out, i32 %x) #0 {
+define amdgpu_kernel void @fold_mi_s_xor_0(i32 addrspace(1)* %out, i32 %x) #0 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%xor = xor i32 %size, %x
store i32 %xor, i32 addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @fold_mi_s_xor_0(i32 addrspace(1)* %out, i32 %x) #0 {
; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], -1{{$}}
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fold_mi_s_not_0(i32 addrspace(1)* %out, i32 %x) #0 {
+define amdgpu_kernel void @fold_mi_s_not_0(i32 addrspace(1)* %out, i32 %x) #0 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%xor = xor i32 %size, -1
store i32 %xor, i32 addrspace(1)* %out
@@ -91,7 +91,7 @@ define void @fold_mi_s_not_0(i32 addrspace(1)* %out, i32 %x) #0 {
; GCN-NEXT: v_not_b32_e32 v[[RESULT_LO]]
; GCN-NEXT: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], -1{{$}}
; GCN-NEXT: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
-define void @fold_mi_v_not_0(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @fold_mi_v_not_0(i64 addrspace(1)* %out) {
%vreg = load volatile i64, i64 addrspace(1)* undef
%ctpop = call i64 @llvm.ctpop.i64(i64 %vreg)
%xor = xor i64 %ctpop, -1
@@ -110,7 +110,7 @@ define void @fold_mi_v_not_0(i64 addrspace(1)* %out) {
; GCN-DAG: v_or_b32_e32 v[[RESULT_LO]], v[[VREG1_LO]], v[[RESULT_LO]]
; GCN-DAG: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], v[[VREG1_HI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
-define void @fold_mi_or_neg1(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @fold_mi_or_neg1(i64 addrspace(1)* %out) {
%vreg0 = load volatile i64, i64 addrspace(1)* undef
%vreg1 = load volatile i64, i64 addrspace(1)* undef
%ctpop = call i64 @llvm.ctpop.i64(i64 %vreg0)
@@ -126,7 +126,7 @@ define void @fold_mi_or_neg1(i64 addrspace(1)* %out) {
; GCN: v_not_b32
; GCN: v_and_b32
; GCN-NOT: v_and_b32
-define void @fold_mi_and_neg1(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @fold_mi_and_neg1(i64 addrspace(1)* %out) {
%vreg0 = load volatile i64, i64 addrspace(1)* undef
%vreg1 = load volatile i64, i64 addrspace(1)* undef
%ctpop = call i64 @llvm.ctpop.i64(i64 %vreg0)
diff --git a/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll b/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
index 13383cbc1741..d3e6c11ef908 100644
--- a/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
+++ b/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
@@ -10,6 +10,8 @@
; GCN-LABEL: {{^}}divergent_if_endif:
+; VGPR: workitem_private_segment_byte_size = 12{{$}}
+
; GCN: {{^}}; BB#0:
; GCN: s_mov_b32 m0, -1
@@ -26,12 +28,13 @@
; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
-; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 ; 4-byte Folded Spill
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
-; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:8 ; 4-byte Folded Spill
; Spill load
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
+
; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
; GCN: s_waitcnt vmcnt(0) expcnt(0)
@@ -55,11 +58,11 @@
-; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
; VMEM: s_waitcnt vmcnt(0)
; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
-; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:8 ; 4-byte Folded Reload
; VMEM: s_waitcnt vmcnt(0)
; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
@@ -69,7 +72,7 @@
; GCN: buffer_load_dword [[RELOAD_VAL:v[0-9]+]], off, s[0:3], s7 offset:[[VAL_OFFSET]] ; 4-byte Folded Reload
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RELOAD_VAL]]
-define void @divergent_if_endif(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @divergent_if_endif(i32 addrspace(1)* %out) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%load0 = load volatile i32, i32 addrspace(3)* undef
@@ -88,6 +91,8 @@ endif:
}
; GCN-LABEL: {{^}}divergent_loop:
+; VGPR: workitem_private_segment_byte_size = 16{{$}}
+
; GCN: {{^}}; BB#0:
; GCN: s_mov_b32 m0, -1
@@ -100,7 +105,7 @@ endif:
; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
; Spill load
-; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
+; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
; Spill saved exec
; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
@@ -108,9 +113,9 @@ endif:
; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
-; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:16 ; 4-byte Folded Spill
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[0:3], s7 offset:20 ; 4-byte Folded Spill
; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
-; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:20 ; 4-byte Folded Spill
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[0:3], s7 offset:24 ; 4-byte Folded Spill
; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
@@ -120,7 +125,7 @@ endif:
; GCN: [[LOOP:BB[0-9]+_[0-9]+]]:
-; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
+; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]]
; GCN: v_cmp_ne_u32_e32 vcc,
; GCN: s_and_b64 vcc, exec, vcc
@@ -133,11 +138,11 @@ endif:
; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
-; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:16 ; 4-byte Folded Reload
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off, s[0:3], s7 offset:20 ; 4-byte Folded Reload
; VMEM: s_waitcnt vmcnt(0)
; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], v[[V_RELOAD_SAVEEXEC_LO]]
-; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:20 ; 4-byte Folded Reload
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off, s[0:3], s7 offset:24 ; 4-byte Folded Reload
; VMEM: s_waitcnt vmcnt(0)
; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], v[[V_RELOAD_SAVEEXEC_HI]]
@@ -145,7 +150,7 @@ endif:
; GCN: buffer_load_dword v[[VAL_END:[0-9]+]], off, s[0:3], s7 offset:[[VAL_SUB_OFFSET]] ; 4-byte Folded Reload
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[VAL_END]]
-define void @divergent_loop(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @divergent_loop(i32 addrspace(1)* %out) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%load0 = load volatile i32, i32 addrspace(3)* undef
@@ -180,7 +185,7 @@ end:
; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}
; Spill load
-; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 ; 4-byte Folded Spill
+; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], s7 offset:4 ; 4-byte Folded Spill
; Spill saved exec
; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]
@@ -237,14 +242,14 @@ end:
; GCN: BB{{[0-9]+}}_2: ; %if
; GCN: ds_read_b32
-; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
+; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Spill
; GCN: s_waitcnt vmcnt(0) expcnt(0)
; GCN-NEXT: s_branch [[ENDIF:BB[0-9]+_[0-9]+]]
; GCN: [[ELSE]]: ; %else
-; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 ; 4-byte Folded Reload
+; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[0:3], s7 offset:4 ; 4-byte Folded Reload
; GCN: v_subrev_i32_e32 [[SUB:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
; GCN: buffer_store_dword [[ADD]], off, s[0:3], s7 offset:[[FLOW_RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
; GCN: s_waitcnt vmcnt(0) expcnt(0)
@@ -267,7 +272,7 @@ end:
; GCN: buffer_load_dword v[[RESULT:[0-9]+]], off, s[0:3], s7 offset:[[RESULT_OFFSET]] ; 4-byte Folded Reload
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RESULT]]
-define void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%load0 = load volatile i32, i32 addrspace(3)* undef
diff --git a/test/CodeGen/AMDGPU/convergent-inlineasm.ll b/test/CodeGen/AMDGPU/convergent-inlineasm.ll
index 755f439c6863..0074a41e44cf 100644
--- a/test/CodeGen/AMDGPU/convergent-inlineasm.ll
+++ b/test/CodeGen/AMDGPU/convergent-inlineasm.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN: v_cmp_ne_u32_e64
; GCN: ; mask branch
; GCN: BB{{[0-9]+_[0-9]+}}:
-define void @convergent_inlineasm(i64 addrspace(1)* nocapture %arg) {
+define amdgpu_kernel void @convergent_inlineasm(i64 addrspace(1)* nocapture %arg) {
bb:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = tail call i64 asm "v_cmp_ne_u32_e64 $0, 0, $1", "=s,v"(i32 1) #1
@@ -29,7 +29,8 @@ bb5: ; preds = %bb3, %bb
; GCN: v_cmp_ne_u32_e64
; GCN: BB{{[0-9]+_[0-9]+}}:
-define void @nonconvergent_inlineasm(i64 addrspace(1)* nocapture %arg) {
+
+define amdgpu_kernel void @nonconvergent_inlineasm(i64 addrspace(1)* nocapture %arg) {
bb:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = tail call i64 asm "v_cmp_ne_u32_e64 $0, 0, $1", "=s,v"(i32 1)
diff --git a/test/CodeGen/AMDGPU/copy-illegal-type.ll b/test/CodeGen/AMDGPU/copy-illegal-type.ll
index 7434d745b259..026dd7ca6c87 100644
--- a/test/CodeGen/AMDGPU/copy-illegal-type.ll
+++ b/test/CodeGen/AMDGPU/copy-illegal-type.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
; GCN: buffer_load_dword [[REG:v[0-9]+]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
-define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
ret void
@@ -19,7 +19,7 @@ define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)*
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
-define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
@@ -32,7 +32,7 @@ define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
-define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
@@ -47,7 +47,7 @@ define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(
; GCN: buffer_store_dword [[REG]]
; GCN: buffer_store_dword [[REG]]
; GCN: s_endpgm
-define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
@@ -65,7 +65,7 @@ define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(
; GCN-DAG: buffer_store_dword
; GCN: s_endpgm
-define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
@@ -85,7 +85,7 @@ define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> add
; GCN: {{buffer|flat}}_store_dword
; GCN: {{buffer|flat}}_store_dword
; GCN: s_endpgm
-define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in.ptr, align 4
@@ -101,7 +101,7 @@ define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8>
; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
; GCN: s_endpgm
-define void @test_copy_v3i8_align4(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v3i8_align4(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
%val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
ret void
@@ -113,7 +113,7 @@ define void @test_copy_v3i8_align4(<3 x i8> addrspace(1)* %out, <3 x i8> addrspa
; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:2{{$}}
; GCN: s_endpgm
-define void @test_copy_v3i8_align2(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v3i8_align2(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
%val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 2
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 2
ret void
@@ -128,7 +128,7 @@ define void @test_copy_v3i8_align2(<3 x i8> addrspace(1)* %out, <3 x i8> addrspa
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: s_endpgm
-define void @test_copy_v3i8_align1(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v3i8_align1(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
%val = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 1
store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 1
ret void
@@ -141,7 +141,7 @@ define void @test_copy_v3i8_align1(<3 x i8> addrspace(1)* %out, <3 x i8> addrspa
; GCN: buffer_load_ubyte
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%val = load volatile <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
ret void
@@ -157,7 +157,7 @@ define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8>
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: s_endpgm
-define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%val = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/copy-to-reg.ll b/test/CodeGen/AMDGPU/copy-to-reg.ll
index 3422a889a520..f35b0706f3d3 100644
--- a/test/CodeGen/AMDGPU/copy-to-reg.ll
+++ b/test/CodeGen/AMDGPU/copy-to-reg.ll
@@ -6,7 +6,7 @@
; Make sure this doesn't crash
; CHECK-LABEL: {{^}}copy_to_reg_frameindex:
-define void @copy_to_reg_frameindex(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @copy_to_reg_frameindex(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%alloca = alloca [16 x i32]
br label %loop
diff --git a/test/CodeGen/AMDGPU/ctlz.ll b/test/CodeGen/AMDGPU/ctlz.ll
index 1a0027dd4a3c..e252971e3f42 100644
--- a/test/CodeGen/AMDGPU/ctlz.ll
+++ b/test/CodeGen/AMDGPU/ctlz.ll
@@ -27,7 +27,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; EG: FFBH_UINT
; EG: CNDE_INT
-define void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
store i32 %ctlz, i32 addrspace(1)* %out, align 4
ret void
@@ -43,7 +43,7 @@ define void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
; EG: FFBH_UINT
; EG: CNDE_INT
-define void @v_ctlz_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr, align 4
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
store i32 %ctlz, i32 addrspace(1)* %out, align 4
@@ -61,7 +61,7 @@ define void @v_ctlz_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalia
; EG: CNDE_INT
; EG: FFBH_UINT
; EG: CNDE_INT
-define void @v_ctlz_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
%ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 false) nounwind readnone
store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
@@ -89,7 +89,7 @@ define void @v_ctlz_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrsp
; EG-DAG: FFBH_UINT
; EG-DAG: CNDE_INT
-define void @v_ctlz_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
%val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
%ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 false) nounwind readnone
store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
@@ -98,10 +98,11 @@ define void @v_ctlz_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrsp
; FUNC-LABEL: {{^}}v_ctlz_i8:
; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
-; GCN-DAG: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI-DAG: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; VI-DAG: v_ffbh_u32_sdwa [[RESULT:v[0-9]+]], [[VAL]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GCN: buffer_store_byte [[RESULT]],
; GCN: s_endpgm
-define void @v_ctlz_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
%val = load i8, i8 addrspace(1)* %valptr
%ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 false) nounwind readnone
store i8 %ctlz, i8 addrspace(1)* %out
@@ -119,14 +120,14 @@ define void @v_ctlz_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %
; GCN-DAG: v_cndmask_b32_e32 v[[CTLZ:[0-9]+]], [[VFFBH_HI]], [[VFFBH_LO]]
; GCN-DAG: v_mov_b32_e32 v[[CTLZ_HI:[0-9]+]], 0{{$}}
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CTLZ]]:[[CTLZ_HI]]{{\]}}
-define void @s_ctlz_i64(i64 addrspace(1)* noalias %out, i64 %val) nounwind {
+define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, i64 %val) nounwind {
%ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 false)
store i64 %ctlz, i64 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}s_ctlz_i64_trunc:
-define void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
+define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
%ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 false)
%trunc = trunc i64 %ctlz to i32
store i32 %trunc, i32 addrspace(1)* %out
@@ -145,7 +146,7 @@ define void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 %val) nounwind
; GCN-DAG: v_cmp_ne_u32_e32 vcc, 0, [[OR]]
; GCN-DAG: v_cndmask_b32_e32 v[[CLTZ_LO:[0-9]+]], 64, v[[CTLZ:[0-9]+]], vcc
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CLTZ_LO]]:[[CTLZ_HI]]{{\]}}
-define void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
@@ -156,7 +157,7 @@ define void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalia
}
; FUNC-LABEL: {{^}}v_ctlz_i64_trunc:
-define void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -172,7 +173,7 @@ define void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)*
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
- define void @v_ctlz_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp eq i32 %val, 0
@@ -186,7 +187,7 @@ define void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)*
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @v_ctlz_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp ne i32 %val, 0
@@ -202,7 +203,7 @@ define void @v_ctlz_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspac
; GCN: v_cmp
; GCN: v_cndmask
; GCN: s_endpgm
-define void @v_ctlz_i32_sel_eq_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_i32_sel_eq_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp eq i32 %ctlz, 32
@@ -217,7 +218,7 @@ define void @v_ctlz_i32_sel_eq_bitwidth(i32 addrspace(1)* noalias %out, i32 addr
; GCN: v_cmp
; GCN: v_cndmask
; GCN: s_endpgm
-define void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
%cmp = icmp ne i32 %ctlz, 32
@@ -230,7 +231,7 @@ define void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addr
; GCN: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
; GCN: {{buffer|flat}}_store_byte [[FFBH]],
- define void @v_ctlz_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%valptr.gep = getelementptr i8, i8 addrspace(1)* %valptr, i32 %tid
%val = load i8, i8 addrspace(1)* %valptr.gep
@@ -245,7 +246,7 @@ define void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addr
; SI: buffer_load_ushort [[VAL:v[0-9]+]],
; SI: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
; SI: buffer_store_short [[FFBH]],
- define void @v_ctlz_i16_sel_eq_neg1(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_i16_sel_eq_neg1(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) nounwind {
%val = load i16, i16 addrspace(1)* %valptr
%ctlz = call i16 @llvm.ctlz.i16(i16 %val, i1 false) nounwind readnone
%cmp = icmp eq i16 %val, 0
@@ -260,7 +261,7 @@ define void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addr
; GCN: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
; GCN: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0x7f, [[FFBH]]
; GCN: {{buffer|flat}}_store_byte [[TRUNC]],
-define void @v_ctlz_i7_sel_eq_neg1(i7 addrspace(1)* noalias %out, i7 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_i7_sel_eq_neg1(i7 addrspace(1)* noalias %out, i7 addrspace(1)* noalias %valptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%valptr.gep = getelementptr i7, i7 addrspace(1)* %valptr, i32 %tid
%val = load i7, i7 addrspace(1)* %valptr.gep
diff --git a/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index d390f64deeab..87ba563a740f 100644
--- a/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -22,7 +22,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; GCN: s_endpgm
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
-define void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+define amdgpu_kernel void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
store i32 %ctlz, i32 addrspace(1)* %out, align 4
ret void
@@ -35,7 +35,7 @@ define void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nou
; GCN: s_endpgm
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
-define void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr, align 4
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
store i32 %ctlz, i32 addrspace(1)* %out, align 4
@@ -51,7 +51,7 @@ define void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
-define void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
%ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
@@ -71,7 +71,7 @@ define void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
; EG: FFBH_UINT {{\*? *}}[[RESULT]]
-define void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
%val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
%ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
@@ -82,7 +82,7 @@ define void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x
; GCN: buffer_load_ubyte [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_byte [[RESULT]],
-define void @v_ctlz_zero_undef_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
%val = load i8, i8 addrspace(1)* %valptr
%ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 true) nounwind readnone
store i8 %ctlz, i8 addrspace(1)* %out
@@ -100,14 +100,14 @@ define void @v_ctlz_zero_undef_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)
; GCN-DAG: v_cndmask_b32_e32 v[[CTLZ:[0-9]+]], [[VFFBH_HI]], [[VFFBH_LO]]
; GCN-DAG: v_mov_b32_e32 v[[CTLZ_HI:[0-9]+]], 0{{$}}
; GCN: {{buffer|flat}}_store_dwordx2 v{{\[}}[[CTLZ]]:[[CTLZ_HI]]{{\]}}
-define void @s_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 %val) nounwind {
+define amdgpu_kernel void @s_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 %val) nounwind {
%ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 true)
store i64 %ctlz, i64 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}s_ctlz_zero_undef_i64_trunc:
-define void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
+define amdgpu_kernel void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
%ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 true)
%trunc = trunc i64 %ctlz to i32
store i32 %trunc, i32 addrspace(1)* %out
@@ -123,7 +123,7 @@ define void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 %va
; GCN-DAG: v_cndmask_b32_e64 v[[CTLZ:[0-9]+]], [[FFBH_HI]], [[FFBH_LO]]
; GCN-DAG: v_mov_b32_e32 v[[CTLZ_HI:[0-9]+]], 0{{$}}
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CTLZ]]:[[CTLZ_HI]]{{\]}}
-define void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
@@ -134,7 +134,7 @@ define void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 addrspace
}
; FUNC-LABEL: {{^}}v_ctlz_zero_undef_i64_trunc:
-define void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -149,7 +149,7 @@ define void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 add
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
- define void @v_ctlz_zero_undef_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 0
@@ -162,7 +162,7 @@ define void @v_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias %out, i64 add
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
-define void @v_ctlz_zero_undef_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp ne i32 %val, 0
@@ -175,7 +175,7 @@ define void @v_ctlz_zero_undef_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i
; GCN: {{buffer|flat}}_load_ubyte [[VAL:v[0-9]+]],
; GCN: v_ffbh_u32_e32 [[FFBH:v[0-9]+]], [[VAL]]
; GCN: {{buffer|flat}}_store_byte [[FFBH]],
-define void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%valptr.gep = getelementptr i8, i8 addrspace(1)* %valptr, i32 %tid
%val = load i8, i8 addrspace(1)* %valptr.gep
@@ -194,7 +194,7 @@ define void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8
; GCN-DAG: buffer_store_dword [[RESULT0]]
; GCN-DAG: buffer_store_byte [[RESULT1]]
; GCN: s_endpgm
- define void @v_ctlz_zero_undef_i32_sel_eq_neg1_two_use(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_neg1_two_use(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 0
@@ -211,7 +211,7 @@ define void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
- define void @v_ctlz_zero_undef_i32_sel_eq_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 0
@@ -227,7 +227,7 @@ define void @v_ctlz_zero_undef_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
-define void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp ne i32 %val, 0
@@ -243,7 +243,7 @@ define void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noalias %out, i32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
- define void @v_ctlz_zero_undef_i32_sel_eq_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_eq_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp eq i32 %val, 1
@@ -259,7 +259,7 @@ define void @v_ctlz_zero_undef_i32_sel_ne_0(i32 addrspace(1)* noalias %out, i32
; GCN: v_cmp
; GCN: v_cndmask
; GCN: buffer_store_dword
-define void @v_ctlz_zero_undef_i32_sel_ne_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_ctlz_zero_undef_i32_sel_ne_cmp_non0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr
%ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
%cmp = icmp ne i32 %val, 1
diff --git a/test/CodeGen/AMDGPU/ctpop.ll b/test/CodeGen/AMDGPU/ctpop.ll
index 9692236bb363..a29e72ea57cb 100644
--- a/test/CodeGen/AMDGPU/ctpop.ll
+++ b/test/CodeGen/AMDGPU/ctpop.ll
@@ -16,7 +16,7 @@ declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readnone
; GCN: s_endpgm
; EG: BCNT_INT
-define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+define amdgpu_kernel void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
store i32 %ctpop, i32 addrspace(1)* %out, align 4
ret void
@@ -30,7 +30,7 @@ define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
; GCN: s_endpgm
; EG: BCNT_INT
-define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
store i32 %ctpop, i32 addrspace(1)* %out, align 4
@@ -48,7 +48,7 @@ define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noali
; EG: BCNT_INT
; EG: BCNT_INT
-define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1) nounwind {
+define amdgpu_kernel void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1) nounwind {
%val0 = load i32, i32 addrspace(1)* %in0, align 4
%val1 = load i32, i32 addrspace(1)* %in1, align 4
%ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
@@ -64,7 +64,7 @@ define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace
; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1, i32 %sval) nounwind {
+define amdgpu_kernel void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1, i32 %sval) nounwind {
%val0 = load i32, i32 addrspace(1)* %in0, align 4
%ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
%add = add i32 %ctpop0, %sval
@@ -79,7 +79,7 @@ define void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(
; EG: BCNT_INT
; EG: BCNT_INT
-define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) nounwind {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
%ctpop = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %val) nounwind readnone
store <2 x i32> %ctpop, <2 x i32> addrspace(1)* %out, align 8
@@ -97,7 +97,7 @@ define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrs
; EG: BCNT_INT
; EG: BCNT_INT
; EG: BCNT_INT
-define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %in) nounwind {
%val = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
%ctpop = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val) nounwind readnone
store <4 x i32> %ctpop, <4 x i32> addrspace(1)* %out, align 16
@@ -123,7 +123,7 @@ define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrs
; EG: BCNT_INT
; EG: BCNT_INT
; EG: BCNT_INT
-define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrspace(1)* noalias %in) nounwind {
%val = load <8 x i32>, <8 x i32> addrspace(1)* %in, align 32
%ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %val) nounwind readnone
store <8 x i32> %ctpop, <8 x i32> addrspace(1)* %out, align 32
@@ -165,7 +165,7 @@ define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrs
; EG: BCNT_INT
; EG: BCNT_INT
; EG: BCNT_INT
-define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> addrspace(1)* noalias %in) nounwind {
%val = load <16 x i32>, <16 x i32> addrspace(1)* %in, align 32
%ctpop = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %val) nounwind readnone
store <16 x i32> %ctpop, <16 x i32> addrspace(1)* %out, align 32
@@ -179,7 +179,7 @@ define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> ad
; GCN: s_endpgm
; EG: BCNT_INT
-define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %ctpop, 4
@@ -194,7 +194,7 @@ define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32
; GCN: s_endpgm
; EG: BCNT_INT
-define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 4, %ctpop
@@ -209,7 +209,7 @@ define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out,
; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %ctpop, 99999
@@ -225,7 +225,7 @@ define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspa
; GCN: s_endpgm
; EG: BCNT_INT
-define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
+define amdgpu_kernel void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %ctpop, %const
@@ -241,7 +241,7 @@ define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1
; GCN: s_endpgm
; EG: BCNT_INT
-define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
+define amdgpu_kernel void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%add = add i32 %const, %ctpop
@@ -258,7 +258,7 @@ define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspa
; GCN: s_endpgm
; EG: BCNT_INT
-define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
+define amdgpu_kernel void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
%gep = getelementptr i32, i32 addrspace(1)* %constptr, i32 4
@@ -279,7 +279,7 @@ define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrsp
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; EG: BCNT_INT
-define void @ctpop_i32_in_br(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %ctpop_arg, i32 %cond) {
+define amdgpu_kernel void @ctpop_i32_in_br(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %ctpop_arg, i32 %cond) {
entry:
%tmp0 = icmp eq i32 %cond, 0
br i1 %tmp0, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/ctpop64.ll b/test/CodeGen/AMDGPU/ctpop64.ll
index cd5d805e5db3..2610684ad9ee 100644
--- a/test/CodeGen/AMDGPU/ctpop64.ll
+++ b/test/CodeGen/AMDGPU/ctpop64.ll
@@ -17,7 +17,7 @@ declare i128 @llvm.ctpop.i128(i128) nounwind readnone
; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; GCN: buffer_store_dword [[VRESULT]],
; GCN: s_endpgm
-define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
+define amdgpu_kernel void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
%truncctpop = trunc i64 %ctpop to i32
store i32 %truncctpop, i32 addrspace(1)* %out, align 4
@@ -31,7 +31,7 @@ define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
; VI-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%val = load i64, i64 addrspace(1)* %in, align 8
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
%truncctpop = trunc i64 %ctpop to i32
@@ -48,7 +48,7 @@ define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noali
; GCN-DAG: v_mov_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
; GCN: s_endpgm
-define void @v_ctpop_i64_user(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %s.val) nounwind {
+define amdgpu_kernel void @v_ctpop_i64_user(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %s.val) nounwind {
%val = load i64, i64 addrspace(1)* %in, align 8
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
%or = or i64 %ctpop, %s.val
@@ -60,7 +60,7 @@ define void @v_ctpop_i64_user(i64 addrspace(1)* noalias %out, i64 addrspace(1)*
; GCN: s_bcnt1_i32_b64
; GCN: s_bcnt1_i32_b64
; GCN: s_endpgm
-define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val) nounwind {
+define amdgpu_kernel void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val) nounwind {
%ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
%truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
@@ -73,7 +73,7 @@ define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val)
; GCN: s_bcnt1_i32_b64
; GCN: s_bcnt1_i32_b64
; GCN: s_endpgm
-define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val) nounwind {
+define amdgpu_kernel void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val) nounwind {
%ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
%truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
@@ -86,7 +86,7 @@ define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val)
; GCN: v_bcnt_u32_b32
; GCN: v_bcnt_u32_b32
; GCN: s_endpgm
-define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
%val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
%ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
%truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
@@ -104,7 +104,7 @@ define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrs
; GCN: v_bcnt_u32_b32
; GCN: v_bcnt_u32_b32
; GCN: s_endpgm
-define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
%val = load <4 x i64>, <4 x i64> addrspace(1)* %in, align 32
%ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
%truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
@@ -121,7 +121,7 @@ define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrs
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[ZERO]]
; GCN: buffer_store_dwordx2 {{v\[}}[[VLO]]:[[VHI]]{{\]}}
; GCN: s_endpgm
-define void @ctpop_i64_in_br(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %ctpop_arg, i32 %cond) {
+define amdgpu_kernel void @ctpop_i64_in_br(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %ctpop_arg, i32 %cond) {
entry:
%tmp0 = icmp eq i32 %cond, 0
br i1 %tmp0, label %if, label %else
@@ -146,7 +146,7 @@ endif:
; GCN: s_bcnt1_i32_b64 [[SRESULT1:s[0-9]+]],
; GCN: s_add_i32 s{{[0-9]+}}, [[SRESULT1]], [[SRESULT0]]
; GCN: s_endpgm
-define void @s_ctpop_i128(i32 addrspace(1)* noalias %out, i128 %val) nounwind {
+define amdgpu_kernel void @s_ctpop_i128(i32 addrspace(1)* noalias %out, i128 %val) nounwind {
%ctpop = call i128 @llvm.ctpop.i128(i128 %val) nounwind readnone
%truncctpop = trunc i128 %ctpop to i32
store i32 %truncctpop, i32 addrspace(1)* %out, align 4
@@ -159,7 +159,7 @@ define void @s_ctpop_i128(i32 addrspace(1)* noalias %out, i128 %val) nounwind {
; GCN: s_bcnt1_i32_b64 [[REG1:s[0-9]+]],
; GCN: s_add_i32 {{s[0-9]+}}, [[REG0]], [[REG1]]
; GCN: s_endpgm
-define void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val) nounwind {
+define amdgpu_kernel void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val) nounwind {
%ctpop = call i65 @llvm.ctpop.i65(i65 %val) nounwind readnone
%truncctpop = trunc i65 %ctpop to i32
store i32 %truncctpop, i32 addrspace(1)* %out, align 4
@@ -181,7 +181,7 @@ define void @s_ctpop_i65(i32 addrspace(1)* noalias %out, i65 %val) nounwind {
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @v_ctpop_i128(i32 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v_ctpop_i128(i32 addrspace(1)* noalias %out, i128 addrspace(1)* noalias %in) nounwind {
%val = load i128, i128 addrspace(1)* %in, align 8
%ctpop = call i128 @llvm.ctpop.i128(i128 %val) nounwind readnone
%truncctpop = trunc i128 %ctpop to i32
diff --git a/test/CodeGen/AMDGPU/cttz_zero_undef.ll b/test/CodeGen/AMDGPU/cttz_zero_undef.ll
index e33cc18eb05f..1fa6407647eb 100644
--- a/test/CodeGen/AMDGPU/cttz_zero_undef.ll
+++ b/test/CodeGen/AMDGPU/cttz_zero_undef.ll
@@ -14,7 +14,7 @@ declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) nounwind readnone
; SI: s_endpgm
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
-define void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+define amdgpu_kernel void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
%cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
store i32 %cttz, i32 addrspace(1)* %out, align 4
ret void
@@ -27,7 +27,7 @@ define void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nou
; SI: s_endpgm
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
-define void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
%val = load i32, i32 addrspace(1)* %valptr, align 4
%cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
store i32 %cttz, i32 addrspace(1)* %out, align 4
@@ -43,7 +43,7 @@ define void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
; EG: FFBL_INT {{\*? *}}[[RESULT]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
-define void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %valptr, align 8
%cttz = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
store <2 x i32> %cttz, <2 x i32> addrspace(1)* %out, align 8
@@ -63,7 +63,7 @@ define void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x
; EG: FFBL_INT {{\*? *}}[[RESULT]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
; EG: FFBL_INT {{\*? *}}[[RESULT]]
-define void @v_cttz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+define amdgpu_kernel void @v_cttz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
%val = load <4 x i32>, <4 x i32> addrspace(1)* %valptr, align 16
%cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
store <4 x i32> %cttz, <4 x i32> addrspace(1)* %out, align 16
diff --git a/test/CodeGen/AMDGPU/cube.ll b/test/CodeGen/AMDGPU/cube.ll
index 9b512c439b0e..7b5f1aff7ea6 100644
--- a/test/CodeGen/AMDGPU/cube.ll
+++ b/test/CodeGen/AMDGPU/cube.ll
@@ -6,16 +6,13 @@ declare float @llvm.amdgcn.cubesc(float, float, float) #0
declare float @llvm.amdgcn.cubetc(float, float, float) #0
declare float @llvm.amdgcn.cubema(float, float, float) #0
-declare <4 x float> @llvm.AMDGPU.cube(<4 x float>) #0
-
-
; GCN-LABEL: {{^}}cube:
; GCN-DAG: v_cubeid_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN-DAG: v_cubesc_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN-DAG: v_cubetc_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN-DAG: v_cubema_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: _store_dwordx4
-define void @cube(<4 x float> addrspace(1)* %out, float %a, float %b, float %c) #1 {
+define amdgpu_kernel void @cube(<4 x float> addrspace(1)* %out, float %a, float %b, float %c) #1 {
%cubeid = call float @llvm.amdgcn.cubeid(float %a, float %b, float %c)
%cubesc = call float @llvm.amdgcn.cubesc(float %a, float %b, float %c)
%cubetc = call float @llvm.amdgcn.cubetc(float %a, float %b, float %c)
@@ -29,18 +26,5 @@ define void @cube(<4 x float> addrspace(1)* %out, float %a, float %b, float %c)
ret void
}
-; GCN-LABEL: {{^}}legacy_cube:
-; GCN-DAG: v_cubeid_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cubesc_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cubetc_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cubema_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; GCN: _store_dwordx4
-define void @legacy_cube(<4 x float> addrspace(1)* %out, <4 x float> %abcx) #1 {
- %cube = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %abcx)
- store <4 x float> %cube, <4 x float> addrspace(1)* %out
- ret void
-}
-
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
-
diff --git a/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll b/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
index 7baaa81fba59..e16daa6fad9d 100644
--- a/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
+++ b/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
@@ -10,7 +10,7 @@ declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
; GCN-NOT: lshr
; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
; GCN: buffer_store_dword [[CONV]],
-define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
%load = load i8, i8 addrspace(1)* %in, align 1
%cvt = uitofp i8 %load to float
store float %cvt, float addrspace(1)* %out, align 4
@@ -22,7 +22,7 @@ define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* n
; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[LD]]
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LD]]
; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
-define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in, align 2
%cvt = uitofp <2 x i8> %load to <2 x float>
store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
@@ -36,7 +36,7 @@ define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8>
; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[VAL]]
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[VAL]]
; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
-define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
%cvt = uitofp <3 x i8> %load to <3 x float>
store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
@@ -52,7 +52,7 @@ define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8>
; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, [[LOADREG]]
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
; GCN: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
-define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%cvt = uitofp <4 x i8> %load to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
@@ -76,7 +76,7 @@ define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8>
; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[HIRESULT:[0-9]+]]
; GCN: buffer_store_dwordx4
-define void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
%cvt = uitofp <4 x i8> %load to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
@@ -110,7 +110,7 @@ define void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out
; GCN: {{buffer|flat}}_store_dword
; GCN: s_endpgm
-define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in.ptr, align 4
@@ -124,7 +124,7 @@ define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <
; Make sure this doesn't crash.
; GCN-LABEL: {{^}}load_v7i8_to_v7f32:
; GCN: s_endpgm
-define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <7 x i8>, <7 x i8> addrspace(1)* %in, align 1
%cvt = uitofp <7 x i8> %load to <7 x float>
store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
@@ -147,7 +147,7 @@ define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8>
; GCN-NOT: lshr
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in, align 8
%cvt = uitofp <8 x i8> %load to <8 x float>
store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
@@ -159,7 +159,7 @@ define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8>
; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 2, [[LOADREG]]
; GCN-NEXT: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[ADD]]
; GCN: buffer_store_dword [[CONV]],
-define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%add = add i32 %load, 2
%inreg = and i32 %add, 255
@@ -169,7 +169,7 @@ define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addr
}
; GCN-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
-define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%inreg = and i32 %load, 65280
%shr = lshr i32 %inreg, 8
@@ -181,7 +181,7 @@ define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addr
; We don't get these ones because of the zext, but instcombine removes
; them so it shouldn't really matter.
; GCN-LABEL: {{^}}i8_zext_i32_to_f32:
-define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
%load = load i8, i8 addrspace(1)* %in, align 1
%ext = zext i8 %load to i32
%cvt = uitofp i32 %ext to float
@@ -190,7 +190,7 @@ define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1
}
; GCN-LABEL: {{^}}v4i8_zext_v4i32_to_v4f32:
-define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
%ext = zext <4 x i8> %load to <4 x i32>
%cvt = uitofp <4 x i32> %ext to <4 x float>
@@ -203,7 +203,7 @@ define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
-define void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%and = and i32 %val, 255
%cvt = uitofp i32 %and to float
@@ -216,7 +216,7 @@ define void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspac
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte1_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
-define void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%srl = lshr i32 %val, 8
%and = and i32 %srl, 255
@@ -230,7 +230,7 @@ define void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspac
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte2_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
-define void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%srl = lshr i32 %val, 16
%and = and i32 %srl, 255
@@ -244,7 +244,7 @@ define void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspac
; GCN-NOT: [[VAL]]
; GCN: v_cvt_f32_ubyte3_e32 [[CONV:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[CONV]]
-define void @extract_byte3_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @extract_byte3_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32, i32 addrspace(1)* %in
%srl = lshr i32 %val, 24
%and = and i32 %srl, 255
diff --git a/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll b/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll
index e7773c6e2a4f..c10cf1a8a6f2 100644
--- a/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll
+++ b/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll
@@ -10,7 +10,7 @@ declare float @llvm.floor.f32(float) #1
; SI-NOT: add
; SI-NONAN: v_cvt_flr_i32_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}
; SI: s_endpgm
-define void @cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
%floor = call float @llvm.floor.f32(float %x) #1
%cvt = fptosi float %floor to i32
store i32 %cvt, i32 addrspace(1)* %out
@@ -22,7 +22,7 @@ define void @cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_flr_i32_f32
; SI-NONAN: v_cvt_flr_i32_f32_e32 v{{[0-9]+}}, [[TMP]]
; SI: s_endpgm
-define void @cvt_flr_i32_f32_1(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_flr_i32_f32_1(i32 addrspace(1)* %out, float %x) #0 {
%fadd = fadd float %x, 1.0
%floor = call float @llvm.floor.f32(float %fadd) #1
%cvt = fptosi float %floor to i32
@@ -35,7 +35,7 @@ define void @cvt_flr_i32_f32_1(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_flr_i32_f32
; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|
; SI: s_endpgm
-define void @cvt_flr_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_flr_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%floor = call float @llvm.floor.f32(float %x.fabs) #1
%cvt = fptosi float %floor to i32
@@ -48,7 +48,7 @@ define void @cvt_flr_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_flr_i32_f32
; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}
; SI: s_endpgm
-define void @cvt_flr_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_flr_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
%x.fneg = fsub float -0.000000e+00, %x
%floor = call float @llvm.floor.f32(float %x.fneg) #1
%cvt = fptosi float %floor to i32
@@ -61,7 +61,7 @@ define void @cvt_flr_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_flr_i32_f32
; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, -|s{{[0-9]+}}|
; SI: s_endpgm
-define void @cvt_flr_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_flr_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%x.fabs.fneg = fsub float -0.000000e+00, %x.fabs
%floor = call float @llvm.floor.f32(float %x.fabs.fneg) #1
@@ -75,7 +75,7 @@ define void @cvt_flr_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
; SI: v_floor_f32
; SI: v_cvt_u32_f32_e32
; SI: s_endpgm
-define void @no_cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @no_cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
%floor = call float @llvm.floor.f32(float %x) #1
%cvt = fptoui float %floor to i32
store i32 %cvt, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll b/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll
index d38411dcca61..9b771ebdf7b3 100644
--- a/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll
+++ b/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll
@@ -9,7 +9,7 @@ declare float @llvm.floor.f32(float) #1
; SI-SAFE-NOT: v_cvt_rpi_i32_f32
; SI-NONAN: v_cvt_rpi_i32_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}
; SI: s_endpgm
-define void @cvt_rpi_i32_f32(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_rpi_i32_f32(i32 addrspace(1)* %out, float %x) #0 {
%fadd = fadd float %x, 0.5
%floor = call float @llvm.floor.f32(float %fadd) #1
%cvt = fptosi float %floor to i32
@@ -21,7 +21,7 @@ define void @cvt_rpi_i32_f32(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_rpi_i32_f32
; SI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}}
; SI: s_endpgm
-define void @cvt_rpi_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_rpi_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%fadd = fadd float %x.fabs, 0.5
%floor = call float @llvm.floor.f32(float %fadd) #1
@@ -37,7 +37,7 @@ define void @cvt_rpi_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_flr_i32_f32
; SI-NONAN: v_cvt_flr_i32_f32_e32 {{v[0-9]+}}, [[TMP]]
; SI: s_endpgm
-define void @cvt_rpi_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_rpi_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
%x.fneg = fsub float -0.000000e+00, %x
%fadd = fadd float %x.fneg, 0.5
%floor = call float @llvm.floor.f32(float %fadd) #1
@@ -55,7 +55,7 @@ define void @cvt_rpi_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
; SI-SAFE-NOT: v_cvt_flr_i32_f32
; SI-NONAN: v_cvt_flr_i32_f32_e32 {{v[0-9]+}}, [[TMP]]
; SI: s_endpgm
-define void @cvt_rpi_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @cvt_rpi_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%x.fabs.fneg = fsub float -0.000000e+00, %x.fabs
%fadd = fadd float %x.fabs.fneg, 0.5
@@ -71,7 +71,7 @@ define void @cvt_rpi_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
; SI: v_floor_f32
; SI: v_cvt_u32_f32
; SI: s_endpgm
-define void @no_cvt_rpi_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @no_cvt_rpi_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
%fadd = fadd float %x, 0.5
%floor = call float @llvm.floor.f32(float %fadd) #1
%cvt = fptoui float %floor to i32
diff --git a/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll b/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll
index a32c16dfac38..11acbc274eb5 100644
--- a/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll
+++ b/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll
@@ -9,7 +9,7 @@
; CHECK: buffer_store_dword v{{[0-9]+}}, [[VADDR]], [[SADDR]]
; CHECK: buffer_store_dword v{{[0-9]+}}, [[VADDR]], [[SADDR]]
-define void @store_same_base_ptr(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @store_same_base_ptr(i32 addrspace(1)* %out) {
entry:
%id = call i32 @llvm.amdgcn.workitem.id.x() #0
%offset = sext i32 %id to i64
diff --git a/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll b/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll
index fb43ff4fbddd..ceff889b3a7e 100644
--- a/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll
+++ b/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll
@@ -10,7 +10,7 @@
; CHECK: {{^}}sint:
; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%sint = load i32, i32 addrspace(1) * %in
@@ -24,7 +24,7 @@ entry:
;CHECK: {{^}}uint:
;CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%uint = load i32, i32 addrspace(1) * %in
diff --git a/test/CodeGen/AMDGPU/debug.ll b/test/CodeGen/AMDGPU/debug.ll
index a2e0e878b740..f149aaddb8ef 100644
--- a/test/CodeGen/AMDGPU/debug.ll
+++ b/test/CodeGen/AMDGPU/debug.ll
@@ -4,7 +4,7 @@
; Test for a crash in the custom assembly dump code.
; SI: s_endpgm
-define void @test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out) {
store i32 0, i32 addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/AMDGPU/debugger-emit-prologue.ll b/test/CodeGen/AMDGPU/debugger-emit-prologue.ll
index 49a7e722f29c..734905ba2b08 100644
--- a/test/CodeGen/AMDGPU/debugger-emit-prologue.ll
+++ b/test/CodeGen/AMDGPU/debugger-emit-prologue.ll
@@ -23,7 +23,7 @@
; NOATTR-NOT: DebuggerPrivateSegmentBufferSGPR
; Function Attrs: nounwind
-define void @test(i32 addrspace(1)* %A) #0 !dbg !12 {
+define amdgpu_kernel void @test(i32 addrspace(1)* %A) #0 !dbg !12 {
entry:
%A.addr = alloca i32 addrspace(1)*, align 4
store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
diff --git a/test/CodeGen/AMDGPU/debugger-insert-nops.ll b/test/CodeGen/AMDGPU/debugger-insert-nops.ll
index 6638f4e25821..fcdbfb10a8ca 100644
--- a/test/CodeGen/AMDGPU/debugger-insert-nops.ll
+++ b/test/CodeGen/AMDGPU/debugger-insert-nops.ll
@@ -1,27 +1,35 @@
-; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-insert-nops -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECKNOP
-; CHECK: test01.cl:2:{{[0-9]+}}
-; CHECK-NEXT: s_nop 0
+; This test expects that we have one instance for each line in some order with "s_nop 0" instances after each.
-; CHECK: test01.cl:3:{{[0-9]+}}
-; CHECK-NEXT: s_nop 0
+; Check that each line appears at least once
+; CHECK-DAG: test01.cl:2:3
+; CHECK-DAG: test01.cl:3:3
+; CHECK-DAG: test01.cl:4:3
-; CHECK: test01.cl:4:{{[0-9]+}}
-; CHECK-NEXT: s_nop 0
+
+; Check that each of each of the lines consists of the line output, followed by "s_nop 0"
+; CHECKNOP: test01.cl:{{[234]}}:3
+; CHECKNOP-NEXT: s_nop 0
+; CHECKNOP: test01.cl:{{[234]}}:3
+; CHECKNOP-NEXT: s_nop 0
+; CHECKNOP: test01.cl:{{[234]}}:3
+; CHECKNOP-NEXT: s_nop 0
; CHECK: test01.cl:5:{{[0-9]+}}
; CHECK-NEXT: s_nop 0
; CHECK-NEXT: s_endpgm
; Function Attrs: nounwind
-define void @test(i32 addrspace(1)* %A) #0 !dbg !12 {
+define amdgpu_kernel void @test(i32 addrspace(1)* %A) #0 !dbg !12 {
entry:
%A.addr = alloca i32 addrspace(1)*, align 4
store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !17, metadata !18), !dbg !19
%0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !20
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i32 0, !dbg !20
- store i32 1, i32 addrspace(1)* %arrayidx, align 4, !dbg !21
+ store i32 1, i32 addrspace(1)* %arrayidx, align 4, !dbg !20
%1 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4, !dbg !22
%arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %1, i32 1, !dbg !22
store i32 2, i32 addrspace(1)* %arrayidx1, align 4, !dbg !23
diff --git a/test/CodeGen/AMDGPU/debugger-reserve-regs.ll b/test/CodeGen/AMDGPU/debugger-reserve-regs.ll
index d30bb20bb03a..764c60b12bf9 100644
--- a/test/CodeGen/AMDGPU/debugger-reserve-regs.ll
+++ b/test/CodeGen/AMDGPU/debugger-reserve-regs.ll
@@ -1,11 +1,12 @@
; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+amdgpu-debugger-reserve-regs -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=gfx901 -mattr=+amdgpu-debugger-reserve-regs -verify-machineinstrs < %s | FileCheck %s
; CHECK: reserved_vgpr_first = {{[0-9]+}}
; CHECK-NEXT: reserved_vgpr_count = 4
; CHECK: ReservedVGPRFirst: {{[0-9]+}}
; CHECK-NEXT: ReservedVGPRCount: 4
; Function Attrs: nounwind
-define void @test(i32 addrspace(1)* %A) #0 !dbg !12 {
+define amdgpu_kernel void @test(i32 addrspace(1)* %A) #0 !dbg !12 {
entry:
%A.addr = alloca i32 addrspace(1)*, align 4
store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
diff --git a/test/CodeGen/AMDGPU/default-fp-mode.ll b/test/CodeGen/AMDGPU/default-fp-mode.ll
index 28d065e3b32b..ad9111a28654 100644
--- a/test/CodeGen/AMDGPU/default-fp-mode.ll
+++ b/test/CodeGen/AMDGPU/default-fp-mode.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}test_default_si:
; GCN: FloatMode: 192
; GCN: IeeeMode: 1
-define void @test_default_si(float addrspace(1)* %out0, double addrspace(1)* %out1) #0 {
+define amdgpu_kernel void @test_default_si(float addrspace(1)* %out0, double addrspace(1)* %out1) #0 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -12,7 +12,7 @@ define void @test_default_si(float addrspace(1)* %out0, double addrspace(1)* %ou
; GCN-LABEL: {{^}}test_default_vi:
; GCN: FloatMode: 192
; GCN: IeeeMode: 1
-define void @test_default_vi(float addrspace(1)* %out0, double addrspace(1)* %out1) #1 {
+define amdgpu_kernel void @test_default_vi(float addrspace(1)* %out0, double addrspace(1)* %out1) #1 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -21,7 +21,7 @@ define void @test_default_vi(float addrspace(1)* %out0, double addrspace(1)* %ou
; GCN-LABEL: {{^}}test_f64_denormals:
; GCN: FloatMode: 192
; GCN: IeeeMode: 1
-define void @test_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #2 {
+define amdgpu_kernel void @test_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #2 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -30,7 +30,7 @@ define void @test_f64_denormals(float addrspace(1)* %out0, double addrspace(1)*
; GCN-LABEL: {{^}}test_f32_denormals:
; GCNL: FloatMode: 48
; GCN: IeeeMode: 1
-define void @test_f32_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #3 {
+define amdgpu_kernel void @test_f32_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #3 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -39,7 +39,7 @@ define void @test_f32_denormals(float addrspace(1)* %out0, double addrspace(1)*
; GCN-LABEL: {{^}}test_f32_f64_denormals:
; GCN: FloatMode: 240
; GCN: IeeeMode: 1
-define void @test_f32_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #4 {
+define amdgpu_kernel void @test_f32_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #4 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -48,12 +48,40 @@ define void @test_f32_f64_denormals(float addrspace(1)* %out0, double addrspace(
; GCN-LABEL: {{^}}test_no_denormals
; GCN: FloatMode: 0
; GCN: IeeeMode: 1
-define void @test_no_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #5 {
+define amdgpu_kernel void @test_no_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #5 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
}
+; GCN-LABEL: {{^}}test_f16_f64_denormals:
+; GCN: FloatMode: 192
+; GCN: IeeeMode: 1
+define amdgpu_kernel void @test_f16_f64_denormals(half addrspace(1)* %out0, double addrspace(1)* %out1) #6 {
+ store half 0.0, half addrspace(1)* %out0
+ store double 0.0, double addrspace(1)* %out1
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_no_f16_f64_denormals:
+; GCN: FloatMode: 0
+; GCN: IeeeMode: 1
+define amdgpu_kernel void @test_no_f16_f64_denormals(half addrspace(1)* %out0, double addrspace(1)* %out1) #7 {
+ store half 0.0, half addrspace(1)* %out0
+ store double 0.0, double addrspace(1)* %out1
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_f32_f16_f64_denormals:
+; GCN: FloatMode: 240
+; GCN: IeeeMode: 1
+define amdgpu_kernel void @test_f32_f16_f64_denormals(half addrspace(1)* %out0, float addrspace(1)* %out1, double addrspace(1)* %out2) #8 {
+ store half 0.0, half addrspace(1)* %out0
+ store float 0.0, float addrspace(1)* %out1
+ store double 0.0, double addrspace(1)* %out2
+ ret void
+}
+
; GCN-LABEL: {{^}}kill_gs_const:
; GCN: IeeeMode: 0
define amdgpu_gs void @kill_gs_const() {
@@ -69,22 +97,22 @@ main_body:
; GCN-LABEL: {{^}}kill_vcc_implicit_def:
; GCN: IeeeMode: 0
-define amdgpu_ps void @kill_vcc_implicit_def([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) {
+define amdgpu_ps float @kill_vcc_implicit_def([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) {
entry:
%tmp0 = fcmp olt float %13, 0.0
call void @llvm.AMDGPU.kill(float %14)
%tmp1 = select i1 %tmp0, float 1.0, float 0.0
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 1, i32 1, float %tmp1, float %tmp1, float %tmp1, float %tmp1)
- ret void
+ ret float %tmp1
}
-
declare void @llvm.AMDGPU.kill(float)
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
attributes #0 = { nounwind "target-cpu"="tahiti" }
attributes #1 = { nounwind "target-cpu"="fiji" }
attributes #2 = { nounwind "target-features"="+fp64-denormals" }
attributes #3 = { nounwind "target-features"="+fp32-denormals" }
attributes #4 = { nounwind "target-features"="+fp32-denormals,+fp64-denormals" }
-attributes #5 = { nounwind "target-features"="-fp32-denormals,-fp64-denormals" }
+attributes #5 = { nounwind "target-features"="-fp32-denormals,-fp64-fp16-denormals" }
+attributes #6 = { nounwind "target-features"="+fp64-fp16-denormals" }
+attributes #7 = { nounwind "target-features"="-fp64-fp16-denormals" }
+attributes #8 = { nounwind "target-features"="+fp32-denormals,+fp64-fp16-denormals" }
diff --git a/test/CodeGen/AMDGPU/detect-dead-lanes.mir b/test/CodeGen/AMDGPU/detect-dead-lanes.mir
index 9d70f67ef491..32e6f7cc0cdc 100644
--- a/test/CodeGen/AMDGPU/detect-dead-lanes.mir
+++ b/test/CodeGen/AMDGPU/detect-dead-lanes.mir
@@ -1,14 +1,14 @@
# RUN: llc -march=amdgcn -run-pass detect-dead-lanes -o - %s | FileCheck %s
--- |
- define void @test0() { ret void }
- define void @test1() { ret void }
- define void @test2() { ret void }
- define void @test3() { ret void }
- define void @test4() { ret void }
- define void @test5() { ret void }
- define void @loop0() { ret void }
- define void @loop1() { ret void }
- define void @loop2() { ret void }
+ define amdgpu_kernel void @test0() { ret void }
+ define amdgpu_kernel void @test1() { ret void }
+ define amdgpu_kernel void @test2() { ret void }
+ define amdgpu_kernel void @test3() { ret void }
+ define amdgpu_kernel void @test4() { ret void }
+ define amdgpu_kernel void @test5() { ret void }
+ define amdgpu_kernel void @loop0() { ret void }
+ define amdgpu_kernel void @loop1() { ret void }
+ define amdgpu_kernel void @loop2() { ret void }
...
---
# Combined use/def transfer check, the basics.
diff --git a/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll b/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll
index cdd2c0cd4f43..6dfe1294bb47 100644
--- a/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll
+++ b/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll
@@ -9,7 +9,7 @@
; CHECK: ALU_PUSH_BEFORE
; CHECK-NEXT: JUMP
; CHECK-NEXT: LOOP_BREAK
-define void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) nounwind {
+define amdgpu_kernel void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) nounwind {
entry:
%cmp5 = icmp sgt i32 %iterations, 0
br i1 %cmp5, label %for.body, label %for.end
diff --git a/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll b/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll
index 5e1ebfde3e10..878b5ebe9409 100644
--- a/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll
+++ b/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll
@@ -9,7 +9,7 @@
; GCN: buffer_load_dword
; GCN: ds_write2_b32
; GCN: s_endpgm
-define void @reschedule_global_load_lds_store(i32 addrspace(1)* noalias %gptr0, i32 addrspace(1)* noalias %gptr1, i32 addrspace(3)* noalias %lptr, i32 %c) #0 {
+define amdgpu_kernel void @reschedule_global_load_lds_store(i32 addrspace(1)* noalias %gptr0, i32 addrspace(1)* noalias %gptr1, i32 addrspace(3)* noalias %lptr, i32 %c) #0 {
entry:
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx = shl i32 %tid, 2
diff --git a/test/CodeGen/AMDGPU/ds-combine-large-stride.ll b/test/CodeGen/AMDGPU/ds-combine-large-stride.ll
new file mode 100644
index 000000000000..a723b0210ade
--- /dev/null
+++ b/test/CodeGen/AMDGPU/ds-combine-large-stride.ll
@@ -0,0 +1,412 @@
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+
+; GCN-LABEL: ds_read32_combine_stride_400:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:100
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:100
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:100
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B3]] offset1:100
+define amdgpu_kernel void @ds_read32_combine_stride_400(float addrspace(3)* nocapture readonly %arg, float *nocapture %arg1) {
+bb:
+ %tmp = load float, float addrspace(3)* %arg, align 4
+ %tmp2 = fadd float %tmp, 0.000000e+00
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 100
+ %tmp4 = load float, float addrspace(3)* %tmp3, align 4
+ %tmp5 = fadd float %tmp2, %tmp4
+ %tmp6 = getelementptr inbounds float, float addrspace(3)* %arg, i32 200
+ %tmp7 = load float, float addrspace(3)* %tmp6, align 4
+ %tmp8 = fadd float %tmp5, %tmp7
+ %tmp9 = getelementptr inbounds float, float addrspace(3)* %arg, i32 300
+ %tmp10 = load float, float addrspace(3)* %tmp9, align 4
+ %tmp11 = fadd float %tmp8, %tmp10
+ %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 400
+ %tmp13 = load float, float addrspace(3)* %tmp12, align 4
+ %tmp14 = fadd float %tmp11, %tmp13
+ %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 500
+ %tmp16 = load float, float addrspace(3)* %tmp15, align 4
+ %tmp17 = fadd float %tmp14, %tmp16
+ %tmp18 = getelementptr inbounds float, float addrspace(3)* %arg, i32 600
+ %tmp19 = load float, float addrspace(3)* %tmp18, align 4
+ %tmp20 = fadd float %tmp17, %tmp19
+ %tmp21 = getelementptr inbounds float, float addrspace(3)* %arg, i32 700
+ %tmp22 = load float, float addrspace(3)* %tmp21, align 4
+ %tmp23 = fadd float %tmp20, %tmp22
+ store float %tmp23, float *%arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_read32_combine_stride_400_back:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:100
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:100
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:100
+; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B3]] offset1:100
+define amdgpu_kernel void @ds_read32_combine_stride_400_back(float addrspace(3)* nocapture readonly %arg, float *nocapture %arg1) {
+bb:
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 700
+ %tmp2 = load float, float addrspace(3)* %tmp, align 4
+ %tmp3 = fadd float %tmp2, 0.000000e+00
+ %tmp4 = getelementptr inbounds float, float addrspace(3)* %arg, i32 600
+ %tmp5 = load float, float addrspace(3)* %tmp4, align 4
+ %tmp6 = fadd float %tmp3, %tmp5
+ %tmp7 = getelementptr inbounds float, float addrspace(3)* %arg, i32 500
+ %tmp8 = load float, float addrspace(3)* %tmp7, align 4
+ %tmp9 = fadd float %tmp6, %tmp8
+ %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 400
+ %tmp11 = load float, float addrspace(3)* %tmp10, align 4
+ %tmp12 = fadd float %tmp9, %tmp11
+ %tmp13 = getelementptr inbounds float, float addrspace(3)* %arg, i32 300
+ %tmp14 = load float, float addrspace(3)* %tmp13, align 4
+ %tmp15 = fadd float %tmp12, %tmp14
+ %tmp16 = getelementptr inbounds float, float addrspace(3)* %arg, i32 200
+ %tmp17 = load float, float addrspace(3)* %tmp16, align 4
+ %tmp18 = fadd float %tmp15, %tmp17
+ %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 100
+ %tmp20 = load float, float addrspace(3)* %tmp19, align 4
+ %tmp21 = fadd float %tmp18, %tmp20
+ %tmp22 = load float, float addrspace(3)* %arg, align 4
+ %tmp23 = fadd float %tmp21, %tmp22
+ store float %tmp23, float *%arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_read32_combine_stride_8192:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:32
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:64 offset1:96
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:128 offset1:160
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:192 offset1:224
+define amdgpu_kernel void @ds_read32_combine_stride_8192(float addrspace(3)* nocapture readonly %arg, float *nocapture %arg1) {
+bb:
+ %tmp = load float, float addrspace(3)* %arg, align 4
+ %tmp2 = fadd float %tmp, 0.000000e+00
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2048
+ %tmp4 = load float, float addrspace(3)* %tmp3, align 4
+ %tmp5 = fadd float %tmp2, %tmp4
+ %tmp6 = getelementptr inbounds float, float addrspace(3)* %arg, i32 4096
+ %tmp7 = load float, float addrspace(3)* %tmp6, align 4
+ %tmp8 = fadd float %tmp5, %tmp7
+ %tmp9 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6144
+ %tmp10 = load float, float addrspace(3)* %tmp9, align 4
+ %tmp11 = fadd float %tmp8, %tmp10
+ %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 8192
+ %tmp13 = load float, float addrspace(3)* %tmp12, align 4
+ %tmp14 = fadd float %tmp11, %tmp13
+ %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10240
+ %tmp16 = load float, float addrspace(3)* %tmp15, align 4
+ %tmp17 = fadd float %tmp14, %tmp16
+ %tmp18 = getelementptr inbounds float, float addrspace(3)* %arg, i32 12288
+ %tmp19 = load float, float addrspace(3)* %tmp18, align 4
+ %tmp20 = fadd float %tmp17, %tmp19
+ %tmp21 = getelementptr inbounds float, float addrspace(3)* %arg, i32 14336
+ %tmp22 = load float, float addrspace(3)* %tmp21, align 4
+ %tmp23 = fadd float %tmp20, %tmp22
+ store float %tmp23, float *%arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_read32_combine_stride_8192_shifted:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:32
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:32
+; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[B3]] offset1:32
+define amdgpu_kernel void @ds_read32_combine_stride_8192_shifted(float addrspace(3)* nocapture readonly %arg, float *nocapture %arg1) {
+bb:
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 2
+ %tmp2 = load float, float addrspace(3)* %tmp, align 4
+ %tmp3 = fadd float %tmp2, 0.000000e+00
+ %tmp4 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2050
+ %tmp5 = load float, float addrspace(3)* %tmp4, align 4
+ %tmp6 = fadd float %tmp3, %tmp5
+ %tmp7 = getelementptr inbounds float, float addrspace(3)* %arg, i32 4098
+ %tmp8 = load float, float addrspace(3)* %tmp7, align 4
+ %tmp9 = fadd float %tmp6, %tmp8
+ %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6146
+ %tmp11 = load float, float addrspace(3)* %tmp10, align 4
+ %tmp12 = fadd float %tmp9, %tmp11
+ %tmp13 = getelementptr inbounds float, float addrspace(3)* %arg, i32 8194
+ %tmp14 = load float, float addrspace(3)* %tmp13, align 4
+ %tmp15 = fadd float %tmp12, %tmp14
+ %tmp16 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10242
+ %tmp17 = load float, float addrspace(3)* %tmp16, align 4
+ %tmp18 = fadd float %tmp15, %tmp17
+ store float %tmp18, float *%arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_read64_combine_stride_400:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:50
+; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:100 offset1:150
+; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:200 offset1:250
+; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:50
+define amdgpu_kernel void @ds_read64_combine_stride_400(double addrspace(3)* nocapture readonly %arg, double *nocapture %arg1) {
+bb:
+ %tmp = load double, double addrspace(3)* %arg, align 8
+ %tmp2 = fadd double %tmp, 0.000000e+00
+ %tmp3 = getelementptr inbounds double, double addrspace(3)* %arg, i32 50
+ %tmp4 = load double, double addrspace(3)* %tmp3, align 8
+ %tmp5 = fadd double %tmp2, %tmp4
+ %tmp6 = getelementptr inbounds double, double addrspace(3)* %arg, i32 100
+ %tmp7 = load double, double addrspace(3)* %tmp6, align 8
+ %tmp8 = fadd double %tmp5, %tmp7
+ %tmp9 = getelementptr inbounds double, double addrspace(3)* %arg, i32 150
+ %tmp10 = load double, double addrspace(3)* %tmp9, align 8
+ %tmp11 = fadd double %tmp8, %tmp10
+ %tmp12 = getelementptr inbounds double, double addrspace(3)* %arg, i32 200
+ %tmp13 = load double, double addrspace(3)* %tmp12, align 8
+ %tmp14 = fadd double %tmp11, %tmp13
+ %tmp15 = getelementptr inbounds double, double addrspace(3)* %arg, i32 250
+ %tmp16 = load double, double addrspace(3)* %tmp15, align 8
+ %tmp17 = fadd double %tmp14, %tmp16
+ %tmp18 = getelementptr inbounds double, double addrspace(3)* %arg, i32 300
+ %tmp19 = load double, double addrspace(3)* %tmp18, align 8
+ %tmp20 = fadd double %tmp17, %tmp19
+ %tmp21 = getelementptr inbounds double, double addrspace(3)* %arg, i32 350
+ %tmp22 = load double, double addrspace(3)* %tmp21, align 8
+ %tmp23 = fadd double %tmp20, %tmp22
+ store double %tmp23, double *%arg1, align 8
+ ret void
+}
+
+; GCN-LABEL: ds_read64_combine_stride_8192_shifted:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
+; GCN-DAG: ds_read2st64_b64 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:16
+; GCN-DAG: ds_read2st64_b64 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:16
+; GCN-DAG: ds_read2st64_b64 v[{{[0-9]+:[0-9]+}}], [[B3]] offset1:16
+define amdgpu_kernel void @ds_read64_combine_stride_8192_shifted(double addrspace(3)* nocapture readonly %arg, double *nocapture %arg1) {
+bb:
+ %tmp = getelementptr inbounds double, double addrspace(3)* %arg, i32 1
+ %tmp2 = load double, double addrspace(3)* %tmp, align 8
+ %tmp3 = fadd double %tmp2, 0.000000e+00
+ %tmp4 = getelementptr inbounds double, double addrspace(3)* %arg, i32 1025
+ %tmp5 = load double, double addrspace(3)* %tmp4, align 8
+ %tmp6 = fadd double %tmp3, %tmp5
+ %tmp7 = getelementptr inbounds double, double addrspace(3)* %arg, i32 2049
+ %tmp8 = load double, double addrspace(3)* %tmp7, align 8
+ %tmp9 = fadd double %tmp6, %tmp8
+ %tmp10 = getelementptr inbounds double, double addrspace(3)* %arg, i32 3073
+ %tmp11 = load double, double addrspace(3)* %tmp10, align 8
+ %tmp12 = fadd double %tmp9, %tmp11
+ %tmp13 = getelementptr inbounds double, double addrspace(3)* %arg, i32 4097
+ %tmp14 = load double, double addrspace(3)* %tmp13, align 8
+ %tmp15 = fadd double %tmp12, %tmp14
+ %tmp16 = getelementptr inbounds double, double addrspace(3)* %arg, i32 5121
+ %tmp17 = load double, double addrspace(3)* %tmp16, align 8
+ %tmp18 = fadd double %tmp15, %tmp17
+ store double %tmp18, double *%arg1, align 8
+ ret void
+}
+
+; GCN-LABEL: ds_write32_combine_stride_400:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GCN-DAG: ds_write2_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+; GCN-DAG: ds_write2_b32 [[B1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+; GCN-DAG: ds_write2_b32 [[B2]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+; GCN-DAG: ds_write2_b32 [[B3]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+define amdgpu_kernel void @ds_write32_combine_stride_400(float addrspace(3)* nocapture %arg) {
+bb:
+ store float 1.000000e+00, float addrspace(3)* %arg, align 4
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 100
+ store float 1.000000e+00, float addrspace(3)* %tmp, align 4
+ %tmp1 = getelementptr inbounds float, float addrspace(3)* %arg, i32 200
+ store float 1.000000e+00, float addrspace(3)* %tmp1, align 4
+ %tmp2 = getelementptr inbounds float, float addrspace(3)* %arg, i32 300
+ store float 1.000000e+00, float addrspace(3)* %tmp2, align 4
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 400
+ store float 1.000000e+00, float addrspace(3)* %tmp3, align 4
+ %tmp4 = getelementptr inbounds float, float addrspace(3)* %arg, i32 500
+ store float 1.000000e+00, float addrspace(3)* %tmp4, align 4
+ %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 600
+ store float 1.000000e+00, float addrspace(3)* %tmp5, align 4
+ %tmp6 = getelementptr inbounds float, float addrspace(3)* %arg, i32 700
+ store float 1.000000e+00, float addrspace(3)* %tmp6, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_write32_combine_stride_400_back:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GCN-DAG: ds_write2_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+; GCN-DAG: ds_write2_b32 [[B1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+; GCN-DAG: ds_write2_b32 [[B2]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+; GCN-DAG: ds_write2_b32 [[B3]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
+define amdgpu_kernel void @ds_write32_combine_stride_400_back(float addrspace(3)* nocapture %arg) {
+bb:
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 700
+ store float 1.000000e+00, float addrspace(3)* %tmp, align 4
+ %tmp1 = getelementptr inbounds float, float addrspace(3)* %arg, i32 600
+ store float 1.000000e+00, float addrspace(3)* %tmp1, align 4
+ %tmp2 = getelementptr inbounds float, float addrspace(3)* %arg, i32 500
+ store float 1.000000e+00, float addrspace(3)* %tmp2, align 4
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 400
+ store float 1.000000e+00, float addrspace(3)* %tmp3, align 4
+ %tmp4 = getelementptr inbounds float, float addrspace(3)* %arg, i32 300
+ store float 1.000000e+00, float addrspace(3)* %tmp4, align 4
+ %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 200
+ store float 1.000000e+00, float addrspace(3)* %tmp5, align 4
+ %tmp6 = getelementptr inbounds float, float addrspace(3)* %arg, i32 100
+ store float 1.000000e+00, float addrspace(3)* %tmp6, align 4
+ store float 1.000000e+00, float addrspace(3)* %arg, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_write32_combine_stride_8192:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: ds_write2st64_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
+; GCN-DAG: ds_write2st64_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset0:64 offset1:96
+; GCN-DAG: ds_write2st64_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset0:128 offset1:160
+; GCN-DAG: ds_write2st64_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset0:192 offset1:224
+define amdgpu_kernel void @ds_write32_combine_stride_8192(float addrspace(3)* nocapture %arg) {
+bb:
+ store float 1.000000e+00, float addrspace(3)* %arg, align 4
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 2048
+ store float 1.000000e+00, float addrspace(3)* %tmp, align 4
+ %tmp1 = getelementptr inbounds float, float addrspace(3)* %arg, i32 4096
+ store float 1.000000e+00, float addrspace(3)* %tmp1, align 4
+ %tmp2 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6144
+ store float 1.000000e+00, float addrspace(3)* %tmp2, align 4
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 8192
+ store float 1.000000e+00, float addrspace(3)* %tmp3, align 4
+ %tmp4 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10240
+ store float 1.000000e+00, float addrspace(3)* %tmp4, align 4
+ %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 12288
+ store float 1.000000e+00, float addrspace(3)* %tmp5, align 4
+ %tmp6 = getelementptr inbounds float, float addrspace(3)* %arg, i32 14336
+ store float 1.000000e+00, float addrspace(3)* %tmp6, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_write32_combine_stride_8192_shifted:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 4, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4004, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8004, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 4, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4004, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8004, [[BASE]]
+; GCN-DAG: ds_write2st64_b32 [[B1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
+; GCN-DAG: ds_write2st64_b32 [[B2]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
+; GCN-DAG: ds_write2st64_b32 [[B3]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
+define amdgpu_kernel void @ds_write32_combine_stride_8192_shifted(float addrspace(3)* nocapture %arg) {
+bb:
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1
+ store float 1.000000e+00, float addrspace(3)* %tmp, align 4
+ %tmp1 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2049
+ store float 1.000000e+00, float addrspace(3)* %tmp1, align 4
+ %tmp2 = getelementptr inbounds float, float addrspace(3)* %arg, i32 4097
+ store float 1.000000e+00, float addrspace(3)* %tmp2, align 4
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6145
+ store float 1.000000e+00, float addrspace(3)* %tmp3, align 4
+ %tmp4 = getelementptr inbounds float, float addrspace(3)* %arg, i32 8193
+ store float 1.000000e+00, float addrspace(3)* %tmp4, align 4
+ %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10241
+ store float 1.000000e+00, float addrspace(3)* %tmp5, align 4
+ ret void
+}
+
+; GCN-LABEL: ds_write64_combine_stride_400:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
+; GCN-DAG: ds_write2_b64 [[BASE]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:50
+; GCN-DAG: ds_write2_b64 [[BASE]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset0:100 offset1:150
+; GCN-DAG: ds_write2_b64 [[BASE]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset0:200 offset1:250
+; GCN-DAG: ds_write2_b64 [[B1]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:50
+define amdgpu_kernel void @ds_write64_combine_stride_400(double addrspace(3)* nocapture %arg) {
+bb:
+ store double 1.000000e+00, double addrspace(3)* %arg, align 8
+ %tmp = getelementptr inbounds double, double addrspace(3)* %arg, i32 50
+ store double 1.000000e+00, double addrspace(3)* %tmp, align 8
+ %tmp1 = getelementptr inbounds double, double addrspace(3)* %arg, i32 100
+ store double 1.000000e+00, double addrspace(3)* %tmp1, align 8
+ %tmp2 = getelementptr inbounds double, double addrspace(3)* %arg, i32 150
+ store double 1.000000e+00, double addrspace(3)* %tmp2, align 8
+ %tmp3 = getelementptr inbounds double, double addrspace(3)* %arg, i32 200
+ store double 1.000000e+00, double addrspace(3)* %tmp3, align 8
+ %tmp4 = getelementptr inbounds double, double addrspace(3)* %arg, i32 250
+ store double 1.000000e+00, double addrspace(3)* %tmp4, align 8
+ %tmp5 = getelementptr inbounds double, double addrspace(3)* %arg, i32 300
+ store double 1.000000e+00, double addrspace(3)* %tmp5, align 8
+ %tmp6 = getelementptr inbounds double, double addrspace(3)* %arg, i32 350
+ store double 1.000000e+00, double addrspace(3)* %tmp6, align 8
+ ret void
+}
+
+; GCN-LABEL: ds_write64_combine_stride_8192_shifted:
+; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
+; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
+; GCN-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
+; GCN-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
+; GFX9-DAG: v_add_i32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
+; GCN-DAG: ds_write2st64_b64 [[B1]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:16
+; GCN-DAG: ds_write2st64_b64 [[B2]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:16
+; GCN-DAG: ds_write2st64_b64 [[B3]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:16
+define amdgpu_kernel void @ds_write64_combine_stride_8192_shifted(double addrspace(3)* nocapture %arg) {
+bb:
+ %tmp = getelementptr inbounds double, double addrspace(3)* %arg, i32 1
+ store double 1.000000e+00, double addrspace(3)* %tmp, align 8
+ %tmp1 = getelementptr inbounds double, double addrspace(3)* %arg, i32 1025
+ store double 1.000000e+00, double addrspace(3)* %tmp1, align 8
+ %tmp2 = getelementptr inbounds double, double addrspace(3)* %arg, i32 2049
+ store double 1.000000e+00, double addrspace(3)* %tmp2, align 8
+ %tmp3 = getelementptr inbounds double, double addrspace(3)* %arg, i32 3073
+ store double 1.000000e+00, double addrspace(3)* %tmp3, align 8
+ %tmp4 = getelementptr inbounds double, double addrspace(3)* %arg, i32 4097
+ store double 1.000000e+00, double addrspace(3)* %tmp4, align 8
+ %tmp5 = getelementptr inbounds double, double addrspace(3)* %arg, i32 5121
+ store double 1.000000e+00, double addrspace(3)* %tmp5, align 8
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll b/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
index f461d6978f13..5997e27fd815 100644
--- a/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
+++ b/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
@@ -23,7 +23,7 @@ declare void @llvm.amdgcn.s.barrier() #1
; CI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]] offset0:32 offset1:34
; CI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR]] offset:256
; CHECK: s_endpgm
-define void @signed_ds_offset_addressing_loop(float addrspace(1)* noalias nocapture %out, float addrspace(3)* noalias nocapture readonly %lptr, i32 %n) #2 {
+define amdgpu_kernel void @signed_ds_offset_addressing_loop(float addrspace(1)* noalias nocapture %out, float addrspace(3)* noalias nocapture readonly %lptr, i32 %n) #2 {
entry:
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%mul = shl nsw i32 %x.i, 1
diff --git a/test/CodeGen/AMDGPU/ds-sub-offset.ll b/test/CodeGen/AMDGPU/ds-sub-offset.ll
index 16fb019ae0f3..d74bd5aa15ac 100644
--- a/test/CodeGen/AMDGPU/ds-sub-offset.ll
+++ b/test/CodeGen/AMDGPU/ds-sub-offset.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN: v_sub_i32_e32 [[BASEPTR:v[0-9]+]], vcc, 0, [[SHL]]
; GCN: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x7b
; GCN: ds_write_b32 [[BASEPTR]], [[VAL]] offset:12
-define void @write_ds_sub0_offset0_global() #0 {
+define amdgpu_kernel void @write_ds_sub0_offset0_global() #0 {
entry:
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #1
%sub1 = sub i32 0, %x.i
@@ -24,7 +24,7 @@ entry:
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN: ds_write_b8 [[NEG]], [[K]] offset:65535
-define void @add_x_shl_neg_to_sub_max_offset() #1 {
+define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
@@ -39,7 +39,7 @@ define void @add_x_shl_neg_to_sub_max_offset() #1 {
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x10000, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN: ds_write_b8 [[NEG]], [[K]]{{$}}
-define void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
+define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
@@ -58,7 +58,7 @@ define void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:456{{$}}
; GCN: s_endpgm
-define void @add_x_shl_neg_to_sub_multi_use() #1 {
+define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
@@ -80,7 +80,7 @@ define void @add_x_shl_neg_to_sub_multi_use() #1 {
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:123{{$}}
; GCN: s_endpgm
-define void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
+define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
@@ -95,7 +95,7 @@ define void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GCN: ds_write2_b32 [[NEG]], {{v[0-9]+}}, {{v[0-9]+}} offset0:254 offset1:255
-define void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
+define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
@@ -109,7 +109,7 @@ define void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x3fc, [[SCALED]]
; GCN: ds_write2_b32 [[NEG]], {{v[0-9]+}}, {{v[0-9]+}} offset1:1{{$}}
-define void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1() #1 {
+define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
diff --git a/test/CodeGen/AMDGPU/ds_read2.ll b/test/CodeGen/AMDGPU/ds_read2.ll
index 9a313230e303..2c474dbe7b08 100644
--- a/test/CodeGen/AMDGPU/ds_read2.ll
+++ b/test/CodeGen/AMDGPU/ds_read2.ll
@@ -12,7 +12,7 @@
; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @simple_read2_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 4
@@ -31,7 +31,7 @@ define void @simple_read2_f32(float addrspace(1)* %out) #0 {
; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 4
@@ -49,7 +49,7 @@ define void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:1028
; SI: s_endpgm
-define void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 4
@@ -66,7 +66,7 @@ define void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 {
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR:v[0-9]+]] offset1:8
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR]] offset0:11 offset1:27
; SI: s_endpgm
-define void @simple_read2_f32_x2(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_x2(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 0
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
@@ -98,7 +98,7 @@ define void @simple_read2_f32_x2(float addrspace(1)* %out) #0 {
; SI: s_barrier
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR]] offset0:11 offset1:27
; SI: s_endpgm
-define void @simple_read2_f32_x2_barrier(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_x2_barrier(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 0
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
@@ -133,7 +133,7 @@ define void @simple_read2_f32_x2_barrier(float addrspace(1)* %out) #0 {
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR:v[0-9]+]] offset0:2 offset1:8
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR]] offset0:11 offset1:27
; SI: s_endpgm
-define void @simple_read2_f32_x2_nonzero_base(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_x2_nonzero_base(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
@@ -170,7 +170,7 @@ define void @simple_read2_f32_x2_nonzero_base(float addrspace(1)* %out) #0 {
; SI: ds_read_b32
; SI: ds_read_b32
; SI: s_endpgm
-define void @read2_ptr_is_subreg_arg_f32(float addrspace(1)* %out, <2 x float addrspace(3)*> %lds.ptr) #0 {
+define amdgpu_kernel void @read2_ptr_is_subreg_arg_f32(float addrspace(1)* %out, <2 x float addrspace(3)*> %lds.ptr) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
%index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
@@ -196,7 +196,7 @@ define void @read2_ptr_is_subreg_arg_f32(float addrspace(1)* %out, <2 x float ad
; SI: ds_read_b32
; SI: ds_read_b32
; SI: s_endpgm
-define void @read2_ptr_is_subreg_arg_offset_f32(float addrspace(1)* %out, <2 x float addrspace(3)*> %lds.ptr) #0 {
+define amdgpu_kernel void @read2_ptr_is_subreg_arg_offset_f32(float addrspace(1)* %out, <2 x float addrspace(3)*> %lds.ptr) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
%index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
@@ -219,7 +219,7 @@ define void @read2_ptr_is_subreg_arg_offset_f32(float addrspace(1)* %out, <2 x f
; SI-LABEL: {{^}}read2_ptr_is_subreg_f32:
; SI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:8{{$}}
; SI: s_endpgm
-define void @read2_ptr_is_subreg_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @read2_ptr_is_subreg_f32(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr.0 = insertelement <2 x [512 x float] addrspace(3)*> undef, [512 x float] addrspace(3)* @lds, i32 0
%ptr.1 = insertelement <2 x [512 x float] addrspace(3)*> %ptr.0, [512 x float] addrspace(3)* @lds, i32 1
@@ -243,7 +243,7 @@ define void @read2_ptr_is_subreg_f32(float addrspace(1)* %out) #0 {
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:32
; SI: s_endpgm
-define void @simple_read2_f32_volatile_0(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_volatile_0(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load volatile float, float addrspace(3)* %arrayidx0, align 4
@@ -261,7 +261,7 @@ define void @simple_read2_f32_volatile_0(float addrspace(1)* %out) #0 {
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}
; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:32
; SI: s_endpgm
-define void @simple_read2_f32_volatile_1(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f32_volatile_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 4
@@ -280,7 +280,7 @@ define void @simple_read2_f32_volatile_1(float addrspace(1)* %out) #0 {
; SI-LABEL: @unaligned_read2_f32
; SI-NOT: ds_read2_b32
; SI: s_endpgm
-define void @unaligned_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @unaligned_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 1
@@ -296,7 +296,7 @@ define void @unaligned_read2_f32(float addrspace(1)* %out, float addrspace(3)* %
; SI-LABEL: @misaligned_2_simple_read2_f32
; SI-NOT: ds_read2_b32
; SI: s_endpgm
-define void @misaligned_2_simple_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @misaligned_2_simple_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 2
@@ -315,7 +315,7 @@ define void @misaligned_2_simple_read2_f32(float addrspace(1)* %out, float addrs
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: buffer_store_dwordx2 [[RESULT]]
; SI: s_endpgm
-define void @simple_read2_f64(double addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f64(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 8
@@ -331,7 +331,7 @@ define void @simple_read2_f64(double addrspace(1)* %out) #0 {
; SI-LABEL: @simple_read2_f64_max_offset
; SI: ds_read2_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset1:255
; SI: s_endpgm
-define void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 8
@@ -349,7 +349,7 @@ define void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 {
; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:2056
; SI: s_endpgm
-define void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 8
@@ -367,7 +367,7 @@ define void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 {
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:1
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:14 offset1:15
; SI: s_endpgm
-define void @misaligned_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @misaligned_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 4
@@ -385,7 +385,7 @@ define void @misaligned_read2_f64(double addrspace(1)* %out, double addrspace(3)
; SI-LABEL: @load_constant_adjacent_offsets
; SI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset1:1
-define void @load_constant_adjacent_offsets(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @load_constant_adjacent_offsets(i32 addrspace(1)* %out) {
%val0 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
%val1 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 1), align 4
%sum = add i32 %val0, %val1
@@ -396,7 +396,7 @@ define void @load_constant_adjacent_offsets(i32 addrspace(1)* %out) {
; SI-LABEL: @load_constant_disjoint_offsets
; SI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset1:2
-define void @load_constant_disjoint_offsets(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @load_constant_disjoint_offsets(i32 addrspace(1)* %out) {
%val0 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
%val1 = load i32, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 2), align 4
%sum = add i32 %val0, %val1
@@ -410,7 +410,7 @@ define void @load_constant_disjoint_offsets(i32 addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset1:1
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]] offset0:2 offset1:3
-define void @load_misaligned64_constant_offsets(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @load_misaligned64_constant_offsets(i64 addrspace(1)* %out) {
%val0 = load i64, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 0), align 4
%val1 = load i64, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 1), align 4
%sum = add i64 %val0, %val1
@@ -426,7 +426,7 @@ define void @load_misaligned64_constant_offsets(i64 addrspace(1)* %out) {
; SI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASE0]] offset1:1
; SI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[BASE1]] offset1:1
; SI: s_endpgm
-define void @load_misaligned64_constant_large_offsets(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @load_misaligned64_constant_large_offsets(i64 addrspace(1)* %out) {
%val0 = load i64, i64 addrspace(3)* getelementptr inbounds ([4096 x i64], [4096 x i64] addrspace(3)* @bar.large, i32 0, i32 2048), align 4
%val1 = load i64, i64 addrspace(3)* getelementptr inbounds ([4096 x i64], [4096 x i64] addrspace(3)* @bar.large, i32 0, i32 4095), align 4
%sum = add i64 %val0, %val1
@@ -437,7 +437,7 @@ define void @load_misaligned64_constant_large_offsets(i64 addrspace(1)* %out) {
@sgemm.lA = internal unnamed_addr addrspace(3) global [264 x float] undef, align 4
@sgemm.lB = internal unnamed_addr addrspace(3) global [776 x float] undef, align 4
-define void @sgemm_inner_loop_read2_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb) #0 {
+define amdgpu_kernel void @sgemm_inner_loop_read2_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb) #0 {
%x.i = tail call i32 @llvm.amdgcn.workgroup.id.x() #1
%y.i = tail call i32 @llvm.amdgcn.workitem.id.y() #1
%arrayidx44 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
@@ -481,13 +481,13 @@ define void @sgemm_inner_loop_read2_sequence(float addrspace(1)* %C, i32 %lda, i
ret void
}
-define void @misaligned_read2_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @misaligned_read2_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(3)* %in) #0 {
%load = load <2 x i32>, <2 x i32> addrspace(3)* %in, align 4
store <2 x i32> %load, <2 x i32> addrspace(1)* %out, align 8
ret void
}
-define void @misaligned_read2_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @misaligned_read2_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %in) #0 {
%load = load i64, i64 addrspace(3)* %in, align 4
store i64 %load, i64 addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/ds_read2_offset_order.ll b/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
index 4a3f3fb99700..9668743cf128 100644
--- a/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
+++ b/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
@@ -10,7 +10,7 @@
; SI-DAG: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:2 offset1:3
; SI-DAG: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:56
; SI-DAG: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:11 offset1:12
-define void @offset_order(float addrspace(1)* %out) {
+define amdgpu_kernel void @offset_order(float addrspace(1)* %out) {
entry:
%ptr0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 0
%val0 = load float, float addrspace(3)* %ptr0
diff --git a/test/CodeGen/AMDGPU/ds_read2_superreg.ll b/test/CodeGen/AMDGPU/ds_read2_superreg.ll
index 9d8375d64037..3dfdaf3936a6 100644
--- a/test/CodeGen/AMDGPU/ds_read2_superreg.ll
+++ b/test/CodeGen/AMDGPU/ds_read2_superreg.ll
@@ -12,7 +12,7 @@
; CI: s_waitcnt lgkmcnt(0)
; CI: buffer_store_dwordx2 [[RESULT]]
; CI: s_endpgm
-define void @simple_read2_v2f32_superreg_align4(<2 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v2f32_superreg_align4(<2 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <2 x float>], [512 x <2 x float>] addrspace(3)* @lds.v2, i32 0, i32 %x.i
%val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0, align 4
@@ -26,7 +26,7 @@ define void @simple_read2_v2f32_superreg_align4(<2 x float> addrspace(1)* %out)
; CI: s_waitcnt lgkmcnt(0)
; CI: buffer_store_dwordx2 [[RESULT]]
; CI: s_endpgm
-define void @simple_read2_v2f32_superreg(<2 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v2f32_superreg(<2 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <2 x float>], [512 x <2 x float>] addrspace(3)* @lds.v2, i32 0, i32 %x.i
%val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0
@@ -43,7 +43,7 @@ define void @simple_read2_v2f32_superreg(<2 x float> addrspace(1)* %out) #0 {
; CI: v_add_f32_e32 v[[ADD2:[0-9]+]], v[[ADD1]], v[[ADD0]]
; CI: buffer_store_dword v[[ADD2]]
; CI: s_endpgm
-define void @simple_read2_v4f32_superreg_align4(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v4f32_superreg_align4(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <4 x float>], [512 x <4 x float>] addrspace(3)* @lds.v4, i32 0, i32 %x.i
%val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0, align 4
@@ -68,7 +68,7 @@ define void @simple_read2_v4f32_superreg_align4(float addrspace(1)* %out) #0 {
; CI-DAG: v_add_f32_e32 v[[ADD1:[0-9]+]], v[[REG_Y]], v[[ADD0]]
; CI: buffer_store_dword v[[ADD1]]
; CI: s_endpgm
-define void @simple_read2_v3f32_superreg_align4(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v3f32_superreg_align4(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <3 x float>], [512 x <3 x float>] addrspace(3)* @lds.v3, i32 0, i32 %x.i
%val0 = load <3 x float>, <3 x float> addrspace(3)* %arrayidx0, align 4
@@ -88,7 +88,7 @@ define void @simple_read2_v3f32_superreg_align4(float addrspace(1)* %out) #0 {
; CI: ds_read2_b64 [[REG_ZW:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}} offset1:1{{$}}
; CI: buffer_store_dwordx4 [[REG_ZW]]
; CI: s_endpgm
-define void @simple_read2_v4f32_superreg_align8(<4 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v4f32_superreg_align8(<4 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <4 x float>], [512 x <4 x float>] addrspace(3)* @lds.v4, i32 0, i32 %x.i
%val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0, align 8
@@ -101,7 +101,7 @@ define void @simple_read2_v4f32_superreg_align8(<4 x float> addrspace(1)* %out)
; CI-DAG: ds_read2_b64 [[REG_ZW:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}} offset1:1{{$}}
; CI: buffer_store_dwordx4 [[REG_ZW]]
; CI: s_endpgm
-define void @simple_read2_v4f32_superreg(<4 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v4f32_superreg(<4 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <4 x float>], [512 x <4 x float>] addrspace(3)* @lds.v4, i32 0, i32 %x.i
%val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0
@@ -117,7 +117,7 @@ define void @simple_read2_v4f32_superreg(<4 x float> addrspace(1)* %out) #0 {
; CI-DAG: buffer_store_dwordx4 [[VEC_HI]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
; CI-DAG: buffer_store_dwordx4 [[VEC_LO]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64{{$}}
; CI: s_endpgm
-define void @simple_read2_v8f32_superreg(<8 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v8f32_superreg(<8 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <8 x float>], [512 x <8 x float>] addrspace(3)* @lds.v8, i32 0, i32 %x.i
%val0 = load <8 x float>, <8 x float> addrspace(3)* %arrayidx0
@@ -138,7 +138,7 @@ define void @simple_read2_v8f32_superreg(<8 x float> addrspace(1)* %out) #0 {
; CI-DAG: buffer_store_dwordx4 [[VEC8_11]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:32
; CI-DAG: buffer_store_dwordx4 [[VEC12_15]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:48
; CI: s_endpgm
-define void @simple_read2_v16f32_superreg(<16 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v16f32_superreg(<16 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x <16 x float>], [512 x <16 x float>] addrspace(3)* @lds.v16, i32 0, i32 %x.i
%val0 = load <16 x float>, <16 x float> addrspace(3)* %arrayidx0
@@ -150,10 +150,10 @@ define void @simple_read2_v16f32_superreg(<16 x float> addrspace(1)* %out) #0 {
; Do scalar loads into the super register we need.
; CI-LABEL: {{^}}simple_read2_v2f32_superreg_scalar_loads_align4:
; CI-DAG: ds_read2_b32 v{{\[}}[[REG_ELT0:[0-9]+]]:[[REG_ELT1:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:1{{$}}
-; CI-NOT: v_mov
+; CI-NOT: v_mov {{v[0-9]+}}, {{[sv][0-9]+}}
; CI: buffer_store_dwordx2 v{{\[}}[[REG_ELT0]]:[[REG_ELT1]]{{\]}}
; CI: s_endpgm
-define void @simple_read2_v2f32_superreg_scalar_loads_align4(<2 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v2f32_superreg_scalar_loads_align4(<2 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%arrayidx1 = getelementptr inbounds float, float addrspace(3)* %arrayidx0, i32 1
@@ -173,10 +173,10 @@ define void @simple_read2_v2f32_superreg_scalar_loads_align4(<2 x float> addrspa
; CI-LABEL: {{^}}simple_read2_v4f32_superreg_scalar_loads_align4:
; CI-DAG: ds_read2_b32 v{{\[}}[[REG_ELT0:[0-9]+]]:[[REG_ELT1:[0-9]+]]{{\]}}, v{{[0-9]+}} offset1:1{{$}}
; CI-DAG: ds_read2_b32 v{{\[}}[[REG_ELT2:[0-9]+]]:[[REG_ELT3:[0-9]+]]{{\]}}, v{{[0-9]+}} offset0:2 offset1:3{{$}}
-; CI-NOT: v_mov
+; CI-NOT: v_mov {{v[0-9]+}}, {{[sv][0-9]+}}
; CI: buffer_store_dwordx4 v{{\[}}[[REG_ELT0]]:[[REG_ELT3]]{{\]}}
; CI: s_endpgm
-define void @simple_read2_v4f32_superreg_scalar_loads_align4(<4 x float> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2_v4f32_superreg_scalar_loads_align4(<4 x float> addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%arrayidx1 = getelementptr inbounds float, float addrspace(3)* %arrayidx0, i32 1
diff --git a/test/CodeGen/AMDGPU/ds_read2st64.ll b/test/CodeGen/AMDGPU/ds_read2st64.ll
index 99f01b4f2622..81b35a46aa18 100644
--- a/test/CodeGen/AMDGPU/ds_read2st64.ll
+++ b/test/CodeGen/AMDGPU/ds_read2st64.ll
@@ -10,7 +10,7 @@
; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 4
@@ -29,7 +29,7 @@ define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
@@ -49,7 +49,7 @@ define void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(
; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
@@ -69,7 +69,7 @@ define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float add
; SI-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:256
; SI-DAG: ds_read_b32 {{v[0-9]+}}, [[BIGADD]]{{$}}
; SI: s_endpgm
-define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
@@ -86,7 +86,7 @@ define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, floa
; SI-LABEL: @odd_invalid_read2st64_f32_0
; SI-NOT: ds_read2st64_b32
; SI: s_endpgm
-define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float, float addrspace(3)* %arrayidx0, align 4
@@ -102,7 +102,7 @@ define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 {
; SI-LABEL: @odd_invalid_read2st64_f32_1
; SI-NOT: ds_read2st64_b32
; SI: s_endpgm
-define void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
@@ -122,7 +122,7 @@ define void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 {
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: buffer_store_dwordx2 [[RESULT]]
; SI: s_endpgm
-define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 {
+define amdgpu_kernel void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 8
@@ -141,7 +141,7 @@ define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 {
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: buffer_store_dwordx2 [[RESULT]]
; SI: s_endpgm
-define void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
@@ -161,7 +161,7 @@ define void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspac
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:1
; SI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:128 offset1:129
; SI: s_endpgm
-define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 4
@@ -181,7 +181,7 @@ define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspac
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}}
; SI: buffer_store_dwordx2 [[RESULT]]
; SI: s_endpgm
-define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 256
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
@@ -197,11 +197,11 @@ define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double a
; SI-LABEL: @simple_read2st64_f64_over_max_offset
; SI-NOT: ds_read2st64_b64
-; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset:512
-; SI: v_add_i32_e32 [[BIGADD:v[0-9]+]], vcc, 0x10000, {{v[0-9]+}}
+; SI-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset:512
+; SI-DAG: v_add_i32_e32 [[BIGADD:v[0-9]+]], vcc, 0x10000, {{v[0-9]+}}
; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, [[BIGADD]]
; SI: s_endpgm
-define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
@@ -218,7 +218,7 @@ define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, dou
; SI-LABEL: @invalid_read2st64_f64_odd_offset
; SI-NOT: ds_read2st64_b64
; SI: s_endpgm
-define void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%add.x.0 = add nsw i32 %x.i, 64
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
@@ -239,7 +239,7 @@ define void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double
; SI-NOT: ds_read2st_b64
; SI: ds_read2_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset1:8
; SI: s_endpgm
-define void @byte_size_only_divisible_64_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @byte_size_only_divisible_64_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
%val0 = load double, double addrspace(3)* %arrayidx0, align 8
diff --git a/test/CodeGen/AMDGPU/ds_write2.ll b/test/CodeGen/AMDGPU/ds_write2.ll
index ae230dac9378..ab1cf0ba25b5 100644
--- a/test/CodeGen/AMDGPU/ds_write2.ll
+++ b/test/CodeGen/AMDGPU/ds_write2.ll
@@ -9,7 +9,7 @@
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2_b32 [[VPTR]], [[VAL]], [[VAL]] offset1:8
; SI: s_endpgm
-define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i
%val = load float, float addrspace(1)* %in.gep, align 4
@@ -27,7 +27,7 @@ define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset1:8
; SI: s_endpgm
-define void @simple_write2_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
@@ -46,7 +46,7 @@ define void @simple_write2_two_val_f32(float addrspace(1)* %C, float addrspace(1
; SI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}}
; SI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:32
; SI: s_endpgm
-define void @simple_write2_two_val_f32_volatile_0(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
+define amdgpu_kernel void @simple_write2_two_val_f32_volatile_0(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
%in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
@@ -65,7 +65,7 @@ define void @simple_write2_two_val_f32_volatile_0(float addrspace(1)* %C, float
; SI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}}
; SI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:32
; SI: s_endpgm
-define void @simple_write2_two_val_f32_volatile_1(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
+define amdgpu_kernel void @simple_write2_two_val_f32_volatile_1(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
%in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
@@ -86,7 +86,7 @@ define void @simple_write2_two_val_f32_volatile_1(float addrspace(1)* %C, float
; SI: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset1:8
; SI: s_endpgm
-define void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in.gep.0, i32 1
@@ -107,7 +107,7 @@ define void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset1:8
; SI: s_endpgm
-define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %x.i
%val = load <2 x float>, <2 x float> addrspace(1)* %in.gep, align 8
@@ -126,7 +126,7 @@ define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x floa
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset1:8
; SI: s_endpgm
-define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 %x.i
%val = load <4 x float>, <4 x float> addrspace(1)* %in.gep, align 16
@@ -146,7 +146,7 @@ define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x floa
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset1:255
; SI: s_endpgm
-define void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
@@ -164,7 +164,7 @@ define void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float
; SI: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}}
; SI: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:1028
; SI: s_endpgm
-define void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
+define amdgpu_kernel void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
%in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
@@ -182,7 +182,7 @@ define void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float add
; SI: ds_write2_b32 [[BASEADDR:v[0-9]+]], [[VAL0:v[0-9]+]], [[VAL1:v[0-9]+]] offset1:8
; SI: ds_write2_b32 [[BASEADDR:v[0-9]+]], [[VAL0]], [[VAL1]] offset0:11 offset1:27
; SI: s_endpgm
-define void @simple_write2_two_val_f32_x2(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
+define amdgpu_kernel void @simple_write2_two_val_f32_x2(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %tid.x
%in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %tid.x
@@ -212,7 +212,7 @@ define void @simple_write2_two_val_f32_x2(float addrspace(1)* %C, float addrspac
; SI: ds_write2_b32 [[BASEADDR:v[0-9]+]], [[VAL0:v[0-9]+]], [[VAL1:v[0-9]+]] offset0:3 offset1:8
; SI: ds_write2_b32 [[BASEADDR:v[0-9]+]], [[VAL0]], [[VAL1]] offset0:11 offset1:27
; SI: s_endpgm
-define void @simple_write2_two_val_f32_x2_nonzero_base(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
+define amdgpu_kernel void @simple_write2_two_val_f32_x2_nonzero_base(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %tid.x
%in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %tid.x
@@ -243,7 +243,7 @@ define void @simple_write2_two_val_f32_x2_nonzero_base(float addrspace(1)* %C, f
; SI: ds_write_b32
; SI: ds_write_b32
; SI: s_endpgm
-define void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1, <2 x float addrspace(3)*> %lds.ptr) #0 {
+define amdgpu_kernel void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1, <2 x float addrspace(3)*> %lds.ptr) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
%in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
@@ -270,7 +270,7 @@ define void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* %C, float add
; SI: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}}
; SI: ds_write2_b64 [[VPTR]], [[VAL]], [[VAL]] offset1:8
; SI: s_endpgm
-define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
%val = load double, double addrspace(1)* %in.gep, align 8
@@ -288,7 +288,7 @@ define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace
; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset1:1
; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:14 offset1:15
; SI: s_endpgm
-define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
%val = load double, double addrspace(1)* %in.gep, align 8
@@ -306,7 +306,7 @@ define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, doubl
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}}
; SI: ds_write2_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset1:8
; SI: s_endpgm
-define void @simple_write2_two_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_two_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr double, double addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr double, double addrspace(1)* %in.gep.0, i32 1
@@ -325,7 +325,7 @@ define void @simple_write2_two_val_f64(double addrspace(1)* %C, double addrspace
; SI-LABEL: @store_constant_adjacent_offsets
; SI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; SI: ds_write2_b32 [[ZERO]], v{{[0-9]+}}, v{{[0-9]+}} offset1:1
-define void @store_constant_adjacent_offsets() {
+define amdgpu_kernel void @store_constant_adjacent_offsets() {
store i32 123, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
store i32 123, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 1), align 4
ret void
@@ -335,7 +335,7 @@ define void @store_constant_adjacent_offsets() {
; SI-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x7b{{$}}
; SI-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; SI: ds_write2_b32 [[ZERO]], [[VAL]], [[VAL]] offset1:2
-define void @store_constant_disjoint_offsets() {
+define amdgpu_kernel void @store_constant_disjoint_offsets() {
store i32 123, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 0), align 4
store i32 123, i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @foo, i32 0, i32 2), align 4
ret void
@@ -348,7 +348,7 @@ define void @store_constant_disjoint_offsets() {
; SI-DAG: ds_write2_b32 [[ZERO]], v{{[0-9]+}}, v{{[0-9]+}} offset1:1
; SI-DAG: ds_write2_b32 [[ZERO]], v{{[0-9]+}}, v{{[0-9]+}} offset0:2 offset1:3
; SI: s_endpgm
-define void @store_misaligned64_constant_offsets() {
+define amdgpu_kernel void @store_misaligned64_constant_offsets() {
store i64 123, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 0), align 4
store i64 123, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 1), align 4
ret void
@@ -362,7 +362,7 @@ define void @store_misaligned64_constant_offsets() {
; SI-DAG: ds_write2_b32 [[BASE0]], v{{[0-9]+}}, v{{[0-9]+}} offset1:1
; SI-DAG: ds_write2_b32 [[BASE1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:1
; SI: s_endpgm
-define void @store_misaligned64_constant_large_offsets() {
+define amdgpu_kernel void @store_misaligned64_constant_large_offsets() {
store i64 123, i64 addrspace(3)* getelementptr inbounds ([4096 x i64], [4096 x i64] addrspace(3)* @bar.large, i32 0, i32 2048), align 4
store i64 123, i64 addrspace(3)* getelementptr inbounds ([4096 x i64], [4096 x i64] addrspace(3)* @bar.large, i32 0, i32 4095), align 4
ret void
@@ -371,7 +371,7 @@ define void @store_misaligned64_constant_large_offsets() {
@sgemm.lA = internal unnamed_addr addrspace(3) global [264 x float] undef, align 4
@sgemm.lB = internal unnamed_addr addrspace(3) global [776 x float] undef, align 4
-define void @write2_sgemm_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @write2_sgemm_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workgroup.id.x() #1
%y.i = tail call i32 @llvm.amdgcn.workitem.id.y() #1
%val = load float, float addrspace(1)* %in
@@ -410,7 +410,7 @@ define void @write2_sgemm_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb, f
; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:3 offset1:2{{$}}
; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:1{{$}}
; CI: s_endpgm
-define void @simple_write2_v4f32_superreg_align4(<4 x float> addrspace(3)* %out, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2_v4f32_superreg_align4(<4 x float> addrspace(3)* %out, <4 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %in
%val0 = load <4 x float>, <4 x float> addrspace(1)* %in.gep, align 4
diff --git a/test/CodeGen/AMDGPU/ds_write2st64.ll b/test/CodeGen/AMDGPU/ds_write2st64.ll
index 872e77361406..a395af34b67b 100644
--- a/test/CodeGen/AMDGPU/ds_write2st64.ll
+++ b/test/CodeGen/AMDGPU/ds_write2st64.ll
@@ -7,7 +7,7 @@
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2st64_b32 [[VPTR]], [[VAL]], [[VAL]] offset1:1
; SI: s_endpgm
-define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i
%val = load float, float addrspace(1)* %in.gep, align 4
@@ -25,7 +25,7 @@ define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float add
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:2 offset1:5
; SI: s_endpgm
-define void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
@@ -46,7 +46,7 @@ define void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float add
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; SI: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset1:255
; SI: s_endpgm
-define void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in, float addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
@@ -66,7 +66,7 @@ define void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, fl
; SI-DAG: v_add_i32_e32 [[VPTR:v[0-9]+]],
; SI: ds_write2st64_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset0:4 offset1:127
; SI: s_endpgm
-define void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr double, double addrspace(1)* %in, i32 %x.i
%in.gep.1 = getelementptr double, double addrspace(1)* %in.gep.0, i32 1
@@ -85,7 +85,7 @@ define void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, d
; SI-NOT: ds_write2st64_b64
; SI: ds_write2_b64 {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset1:8
; SI: s_endpgm
-define void @byte_size_only_divisible_64_write2st64_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
+define amdgpu_kernel void @byte_size_only_divisible_64_write2st64_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
%val = load double, double addrspace(1)* %in.gep, align 8
diff --git a/test/CodeGen/AMDGPU/dynamic_stackalloc.ll b/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
index 580dc00f935e..b1107ea7fbcb 100644
--- a/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
+++ b/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
@@ -4,7 +4,7 @@
; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-define void @test_dynamic_stackalloc(i32 addrspace(1)* %out, i32 %n) {
+define amdgpu_kernel void @test_dynamic_stackalloc(i32 addrspace(1)* %out, i32 %n) {
%alloca = alloca i32, i32 %n
store volatile i32 0, i32* %alloca
ret void
diff --git a/test/CodeGen/AMDGPU/early-if-convert-cost.ll b/test/CodeGen/AMDGPU/early-if-convert-cost.ll
new file mode 100644
index 000000000000..ace01593808b
--- /dev/null
+++ b/test/CodeGen/AMDGPU/early-if-convert-cost.ll
@@ -0,0 +1,110 @@
+; RUN: llc -stress-early-ifcvt -amdgpu-early-ifcvt=1 -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: Most of these cases that don't trigger because of broken cost
+; heuristics. Should not need -stress-early-ifcvt
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64:
+; GCN: buffer_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
+; GCN: v_cmp_neq_f64_e32 vcc, 1.0, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
+; GCN: v_add_f64 v{{\[}}[[ADD_LO:[0-9]+]]:[[ADD_HI:[0-9]+]]{{\]}}, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}, v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
+; GCN-DAG: v_cndmask_b32_e32 v[[RESULT_LO:[0-9]+]], v[[ADD_LO]], v[[VAL_LO]], vcc
+; GCN-DAG: v_cndmask_b32_e32 v[[RESULT_HI:[0-9]+]], v[[ADD_HI]], v[[VAL_HI]], vcc
+; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+entry:
+ %v = load double, double addrspace(1)* %in
+ %cc = fcmp oeq double %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fadd double %v, %v
+ br label %endif
+
+endif:
+ %r = phi double [ %v, %entry ], [ %u, %if ]
+ store double %r, double addrspace(1)* %out
+ ret void
+}
+
+; vcc branch with SGPR inputs
+; GCN-LABEL: {{^}}test_vccnz_sgpr_ifcvt_triangle64:
+; GCN: v_cmp_neq_f64
+; GCN: v_add_f64
+; GCN: v_cndmask_b32_e32
+; GCN: v_cndmask_b32_e32
+define amdgpu_kernel void @test_vccnz_sgpr_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(2)* %in) #0 {
+entry:
+ %v = load double, double addrspace(2)* %in
+ %cc = fcmp oeq double %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fadd double %v, %v
+ br label %endif
+
+endif:
+ %r = phi double [ %v, %entry ], [ %u, %if ]
+ store double %r, double addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle96:
+; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
+
+; GCN: v_add_i32_e32
+; GCN: v_add_i32_e32
+; GCN: v_add_i32_e32
+; GCN: s_mov_b64 vcc, [[CMP]]
+
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+
+; GCN-DAG: buffer_store_dword v
+; GCN-DAG: buffer_store_dwordx2
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle96(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %in, float %cnd) #0 {
+entry:
+ %v = load <3 x i32>, <3 x i32> addrspace(1)* %in
+ %cc = fcmp oeq float %cnd, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add <3 x i32> %v, %v
+ br label %endif
+
+endif:
+ %r = phi <3 x i32> [ %v, %entry ], [ %u, %if ]
+ store <3 x i32> %r, <3 x i32> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle128:
+; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
+
+; GCN: v_add_i32_e32
+; GCN: v_add_i32_e32
+; GCN: v_add_i32_e32
+; GCN: v_add_i32_e32
+; GCN: s_mov_b64 vcc, [[CMP]]
+
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+
+; GCN: buffer_store_dwordx4
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in, float %cnd) #0 {
+entry:
+ %v = load <4 x i32>, <4 x i32> addrspace(1)* %in
+ %cc = fcmp oeq float %cnd, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add <4 x i32> %v, %v
+ br label %endif
+
+endif:
+ %r = phi <4 x i32> [ %v, %entry ], [ %u, %if ]
+ store <4 x i32> %r, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/early-if-convert.ll b/test/CodeGen/AMDGPU/early-if-convert.ll
new file mode 100644
index 000000000000..9439130deb9e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/early-if-convert.ll
@@ -0,0 +1,454 @@
+; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; XUN: llc -march=amdgcn -mcpu=tonga -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: This leaves behind a now unnecessary and with exec
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: v_cmp_neq_f32_e32 vcc, 1.0, [[VAL]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], [[VAL]], [[VAL]]
+; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], [[ADD]], [[VAL]], vcc
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+entry:
+ %v = load float, float addrspace(1)* %in
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fadd float %v, %v
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_diamond:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: v_cmp_neq_f32_e32 vcc, 1.0, [[VAL]]
+; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[VAL]], [[VAL]]
+; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VAL]], [[VAL]]
+; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], [[ADD]], [[MUL]], vcc
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @test_vccnz_ifcvt_diamond(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+entry:
+ %v = load float, float addrspace(1)* %in
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %else
+
+if:
+ %u0 = fadd float %v, %v
+ br label %endif
+
+else:
+ %u1 = fmul float %v, %v
+ br label %endif
+
+endif:
+ %r = phi float [ %u0, %if ], [ %u1, %else ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_vcc_clobber:
+; GCN: ; clobber vcc
+; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc
+; GCN: s_mov_b64 vcc, [[CMP]]
+; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle_vcc_clobber(i32 addrspace(1)* %out, i32 addrspace(1)* %in, float %k) #0 {
+entry:
+ %v = load i32, i32 addrspace(1)* %in
+ %cc = fcmp oeq float %k, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ call void asm "; clobber $0", "~{VCC}"() #0
+ %u = add i32 %v, %v
+ br label %endif
+
+endif:
+ %r = phi i32 [ %v, %entry ], [ %u, %if ]
+ store i32 %r, i32 addrspace(1)* %out
+ ret void
+}
+
+; Longest chain of cheap instructions to convert
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_max_cheap:
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_cndmask_b32_e32
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle_max_cheap(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+entry:
+ %v = load float, float addrspace(1)* %in
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u.0 = fmul float %v, %v
+ %u.1 = fmul float %v, %u.0
+ %u.2 = fmul float %v, %u.1
+ %u.3 = fmul float %v, %u.2
+ %u.4 = fmul float %v, %u.3
+ %u.5 = fmul float %v, %u.4
+ %u.6 = fmul float %v, %u.5
+ %u.7 = fmul float %v, %u.6
+ %u.8 = fmul float %v, %u.7
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u.8, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; Short chain of cheap instructions to not convert
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_min_expensive:
+; GCN: s_cbranch_vccnz [[ENDIF:BB[0-9]+_[0-9]+]]
+
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+; GCN: v_mul_f32
+
+; GCN: [[ENDIF]]:
+; GCN: buffer_store_dword
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle_min_expensive(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+entry:
+ %v = load float, float addrspace(1)* %in
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u.0 = fmul float %v, %v
+ %u.1 = fmul float %v, %u.0
+ %u.2 = fmul float %v, %u.1
+ %u.3 = fmul float %v, %u.2
+ %u.4 = fmul float %v, %u.3
+ %u.5 = fmul float %v, %u.4
+ %u.6 = fmul float %v, %u.5
+ %u.7 = fmul float %v, %u.6
+ %u.8 = fmul float %v, %u.7
+ %u.9 = fmul float %v, %u.8
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u.9, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; Should still branch over fdiv expansion
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_expensive:
+; GCN: v_cmp_neq_f32_e32
+; GCN: s_cbranch_vccnz [[ENDIF:BB[0-9]+_[0-9]+]]
+
+; GCN: v_div_scale_f32
+
+; GCN: [[ENDIF]]:
+; GCN: buffer_store_dword
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle_expensive(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+entry:
+ %v = load float, float addrspace(1)* %in
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fdiv float %v, %v
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; vcc branch with SGPR inputs
+; GCN-LABEL: {{^}}test_vccnz_sgpr_ifcvt_triangle:
+; GCN: v_cmp_neq_f32_e64
+; GCN: s_cbranch_vccnz [[ENDIF:BB[0-9]+_[0-9]+]]
+
+; GCN: s_add_i32
+
+; GCN: [[ENDIF]]:
+; GCN: buffer_store_dword
+define amdgpu_kernel void @test_vccnz_sgpr_ifcvt_triangle(i32 addrspace(1)* %out, i32 addrspace(2)* %in, float %cnd) #0 {
+entry:
+ %v = load i32, i32 addrspace(2)* %in
+ %cc = fcmp oeq float %cnd, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add i32 %v, %v
+ br label %endif
+
+endif:
+ %r = phi i32 [ %v, %entry ], [ %u, %if ]
+ store i32 %r, i32 addrspace(1)* %out
+ ret void
+
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_constant_load:
+; GCN: v_cndmask_b32
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle_constant_load(float addrspace(1)* %out, float addrspace(2)* %in) #0 {
+entry:
+ %v = load float, float addrspace(2)* %in
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fadd float %v, %v
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; Due to broken cost heuristic, this is not if converted like
+; test_vccnz_ifcvt_triangle_constant_load even though it should be.
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_argload:
+; GCN: v_cndmask_b32
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle_argload(float addrspace(1)* %out, float %v) #0 {
+entry:
+ %cc = fcmp oeq float %v, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fadd float %v, %v
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; Scalar branch and scalar inputs
+; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle:
+; GCN: s_load_dword [[VAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x0
+; GCN: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], [[VAL]]
+; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
+; GCN-NEXT: s_cselect_b32 [[SELECT:s[0-9]+]], [[ADD]], [[VAL]]
+define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle(i32 addrspace(2)* %in, i32 %cond) #0 {
+entry:
+ %v = load i32, i32 addrspace(2)* %in
+ %cc = icmp eq i32 %cond, 1
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add i32 %v, %v
+ br label %endif
+
+endif:
+ %r = phi i32 [ %v, %entry ], [ %u, %if ]
+ call void asm sideeffect "; reg use $0", "s"(i32 %r) #0
+ ret void
+}
+
+; FIXME: Should be able to use VALU compare and select
+; Scalar branch but VGPR select operands
+; GCN-LABEL: {{^}}test_scc1_vgpr_ifcvt_triangle:
+; GCN: s_cmp_lg_u32
+; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
+
+; GCN: v_add_f32_e32
+
+; GCN: [[ENDIF]]:
+; GCN: buffer_store_dword
+define amdgpu_kernel void @test_scc1_vgpr_ifcvt_triangle(float addrspace(1)* %out, float addrspace(1)* %in, i32 %cond) #0 {
+entry:
+ %v = load float, float addrspace(1)* %in
+ %cc = icmp eq i32 %cond, 1
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = fadd float %v, %v
+ br label %endif
+
+endif:
+ %r = phi float [ %v, %entry ], [ %u, %if ]
+ store float %r, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle64:
+; GCN: s_add_u32
+; GCN: s_addc_u32
+; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
+; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle64(i64 addrspace(2)* %in, i32 %cond) #0 {
+entry:
+ %v = load i64, i64 addrspace(2)* %in
+ %cc = icmp eq i32 %cond, 1
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add i64 %v, %v
+ br label %endif
+
+endif:
+ %r = phi i64 [ %v, %entry ], [ %u, %if ]
+ call void asm sideeffect "; reg use $0", "s"(i64 %r) #0
+ ret void
+}
+
+; TODO: Can do s_cselect_b64; s_cselect_b32
+; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle96:
+; GCN: s_add_i32
+; GCN: s_add_i32
+; GCN: s_add_i32
+; GCN: s_add_i32
+; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
+; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
+; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle96(<3 x i32> addrspace(2)* %in, i32 %cond) #0 {
+entry:
+ %v = load <3 x i32>, <3 x i32> addrspace(2)* %in
+ %cc = icmp eq i32 %cond, 1
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add <3 x i32> %v, %v
+ br label %endif
+
+endif:
+ %r = phi <3 x i32> [ %v, %entry ], [ %u, %if ]
+ %r.ext = shufflevector <3 x i32> %r, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ call void asm sideeffect "; reg use $0", "s"(<4 x i32> %r.ext) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle128:
+; GCN: s_add_i32
+; GCN: s_add_i32
+; GCN: s_add_i32
+; GCN: s_add_i32
+; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
+; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
+; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle128(<4 x i32> addrspace(2)* %in, i32 %cond) #0 {
+entry:
+ %v = load <4 x i32>, <4 x i32> addrspace(2)* %in
+ %cc = icmp eq i32 %cond, 1
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add <4 x i32> %v, %v
+ br label %endif
+
+endif:
+ %r = phi <4 x i32> [ %v, %entry ], [ %u, %if ]
+ call void asm sideeffect "; reg use $0", "s"(<4 x i32> %r) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}uniform_if_swap_br_targets_scc_constant_select:
+; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
+; GCN: s_cselect_b32 s{{[0-9]+}}, 1, 0{{$}}
+define amdgpu_kernel void @uniform_if_swap_br_targets_scc_constant_select(i32 %cond, i32 addrspace(1)* %out) {
+entry:
+ %cmp0 = icmp eq i32 %cond, 0
+ br i1 %cmp0, label %else, label %if
+
+if:
+ br label %done
+
+else:
+ br label %done
+
+done:
+ %value = phi i32 [0, %if], [1, %else]
+ store i32 %value, i32 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}ifcvt_undef_scc:
+; GCN: {{^}}; BB#0:
+; GCN-NEXT: s_load_dwordx2
+; GCN-NEXT: s_cselect_b32 s{{[0-9]+}}, 1, 0
+define amdgpu_kernel void @ifcvt_undef_scc(i32 %cond, i32 addrspace(1)* %out) {
+entry:
+ br i1 undef, label %else, label %if
+
+if:
+ br label %done
+
+else:
+ br label %done
+
+done:
+ %value = phi i32 [0, %if], [1, %else]
+ store i32 %value, i32 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle256:
+; GCN: v_cmp_neq_f32
+; GCN: s_cbranch_vccnz [[ENDIF:BB[0-9]+_[0-9]+]]
+
+; GCN: v_add_i32
+; GCN: v_add_i32
+
+; GCN: [[ENDIF]]:
+; GCN: buffer_store_dword
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle256(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in, float %cnd) #0 {
+entry:
+ %v = load <8 x i32>, <8 x i32> addrspace(1)* %in
+ %cc = fcmp oeq float %cnd, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add <8 x i32> %v, %v
+ br label %endif
+
+endif:
+ %r = phi <8 x i32> [ %v, %entry ], [ %u, %if ]
+ store <8 x i32> %r, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle512:
+; GCN: v_cmp_neq_f32
+; GCN: s_cbranch_vccnz [[ENDIF:BB[0-9]+_[0-9]+]]
+
+; GCN: v_add_i32
+; GCN: v_add_i32
+
+; GCN: [[ENDIF]]:
+; GCN: buffer_store_dword
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle512(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in, float %cnd) #0 {
+entry:
+ %v = load <16 x i32>, <16 x i32> addrspace(1)* %in
+ %cc = fcmp oeq float %cnd, 1.000000e+00
+ br i1 %cc, label %if, label %endif
+
+if:
+ %u = add <16 x i32> %v, %v
+ br label %endif
+
+endif:
+ %r = phi <16 x i32> [ %v, %entry ], [ %u, %if ]
+ store <16 x i32> %r, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/early-inline-alias.ll b/test/CodeGen/AMDGPU/early-inline-alias.ll
new file mode 100644
index 000000000000..42dfa4e7ab4f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/early-inline-alias.ll
@@ -0,0 +1,12 @@
+; RUN: opt -mtriple=amdgcn-- -O1 -S -inline-threshold=1 %s | FileCheck %s
+
+; CHECK: @add1alias = alias i32 (i32), i32 (i32)* @add1
+; CHECK: @add1alias2 = alias i32 (i32), i32 (i32)* @add1
+
+@add1alias = alias i32 (i32), i32 (i32)* @add1
+@add1alias2 = alias i32 (i32), i32 (i32)* @add1
+
+define i32 @add1(i32) {
+ %2 = add nsw i32 %0, 1
+ ret i32 %2
+}
diff --git a/test/CodeGen/AMDGPU/early-inline.ll b/test/CodeGen/AMDGPU/early-inline.ll
new file mode 100644
index 000000000000..c871d54bec7e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/early-inline.ll
@@ -0,0 +1,25 @@
+; RUN: opt -mtriple=amdgcn-- -O1 -S -inline-threshold=1 -amdgpu-early-inline-all %s | FileCheck %s
+
+; CHECK: @c_alias
+@c_alias = alias i32 (i32), i32 (i32)* @callee
+
+define i32 @callee(i32 %x) {
+entry:
+ %mul1 = mul i32 %x, %x
+ %mul2 = mul i32 %mul1, %x
+ %mul3 = mul i32 %mul1, %mul2
+ %mul4 = mul i32 %mul3, %mul2
+ %mul5 = mul i32 %mul4, %mul3
+ ret i32 %mul5
+}
+
+; CHECK-LABEL: @caller
+; CHECK: mul i32
+; CHECK-NOT: call i32
+
+define amdgpu_kernel void @caller(i32 %x) {
+entry:
+ %res = call i32 @callee(i32 %x)
+ store volatile i32 %res, i32 addrspace(1)* undef
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/elf.ll b/test/CodeGen/AMDGPU/elf.ll
index 628dd5ec839e..b22f8608d7e3 100644
--- a/test/CodeGen/AMDGPU/elf.ll
+++ b/test/CodeGen/AMDGPU/elf.ll
@@ -24,11 +24,13 @@
; TONGA-NEXT: .long 704
; CONFIG: .p2align 8
; CONFIG: test:
-define amdgpu_ps void @test(i32 %p) {
+define amdgpu_ps void @test(i32 %p) #0 {
%i = add i32 %p, 2
%r = bitcast i32 %i to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r, float %r, float %r, float %r, i1 true, i1 false)
ret void
}
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/elf.r600.ll b/test/CodeGen/AMDGPU/elf.r600.ll
index 51cd08500932..93c5e5575033 100644
--- a/test/CodeGen/AMDGPU/elf.r600.ll
+++ b/test/CodeGen/AMDGPU/elf.r600.ll
@@ -9,7 +9,7 @@
; CONFIG-NEXT: .long 2
; CONFIG-NEXT: .long 165900
; CONFIG-NEXT: .long 0
-define void @test(float addrspace(1)* %out, i32 %p) {
+define amdgpu_kernel void @test(float addrspace(1)* %out, i32 %p) {
%i = add i32 %p, 2
%r = bitcast i32 %i to float
store float %r, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/else.ll b/test/CodeGen/AMDGPU/else.ll
index ef1e64763d4a..22338e4f50e5 100644
--- a/test/CodeGen/AMDGPU/else.ll
+++ b/test/CodeGen/AMDGPU/else.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}else_no_execfix:
; CHECK: ; %Flow
; CHECK-NEXT: s_or_saveexec_b64 [[DST:s\[[0-9]+:[0-9]+\]]],
; CHECK-NEXT: s_xor_b64 exec, exec, [[DST]]
; CHECK-NEXT: ; mask branch
-define amdgpu_ps float @else_no_execfix(i32 %z, float %v) {
+define amdgpu_ps float @else_no_execfix(i32 %z, float %v) #0 {
main_body:
%cc = icmp sgt i32 %z, 5
br i1 %cc, label %if, label %else
@@ -33,7 +33,7 @@ end:
; CHECK-NEXT: s_and_b64 [[AND_INIT:s\[[0-9]+:[0-9]+\]]], exec, [[DST]]
; CHECK-NEXT: s_xor_b64 exec, exec, [[AND_INIT]]
; CHECK-NEXT: ; mask branch
-define amdgpu_ps void @else_execfix_leave_wqm(i32 %z, float %v) {
+define amdgpu_ps void @else_execfix_leave_wqm(i32 %z, float %v) #0 {
main_body:
%cc = icmp sgt i32 %z, 5
br i1 %cc, label %if, label %else
@@ -44,8 +44,7 @@ if:
else:
%c = fmul float %v, 3.0
- %c.i = bitcast float %c to i32
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %c.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %c, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%v.else = extractelement <4 x float> %tex, i32 0
br label %end
@@ -55,6 +54,9 @@ end:
ret void
}
-declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) nounwind
+declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare <4 x float> @llvm.SI.image.sample.i32(i32, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) nounwind readnone
+attributes #0 = { nounwind }
+attributes #1 = { nounwind writeonly }
+attributes #2 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/empty-function.ll b/test/CodeGen/AMDGPU/empty-function.ll
index a060900811ea..1231cb4d1de2 100644
--- a/test/CodeGen/AMDGPU/empty-function.ll
+++ b/test/CodeGen/AMDGPU/empty-function.ll
@@ -7,14 +7,14 @@
; SI-LABEL: {{^}}empty_function_ret:
; SI: s_endpgm
; SI: codeLenInByte = 4
-define void @empty_function_ret() #0 {
+define amdgpu_kernel void @empty_function_ret() #0 {
ret void
}
; SI: .text
; SI-LABEL: {{^}}empty_function_unreachable:
; SI: codeLenInByte = 0
-define void @empty_function_unreachable() #0 {
+define amdgpu_kernel void @empty_function_unreachable() #0 {
unreachable
}
diff --git a/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll b/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll
new file mode 100644
index 000000000000..6eb1fc1d0cc2
--- /dev/null
+++ b/test/CodeGen/AMDGPU/enable-no-signed-zeros-fp-math.ll
@@ -0,0 +1,22 @@
+; RUN: llc -march=amdgcn -enable-no-signed-zeros-fp-math=0 < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SAFE %s
+; RUN: llc -march=amdgcn -enable-no-signed-zeros-fp-math=1 < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-UNSAFE %s
+; RUN: llc -march=amdgcn -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-UNSAFE %s
+
+; Test that the -enable-no-signed-zeros-fp-math flag works
+
+; GCN-LABEL: {{^}}fneg_fsub_f32:
+; GCN: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
+
+; GCN-UNSAFE-NOT: xor
+define amdgpu_kernel void @fneg_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
+ %a = load float, float addrspace(1)* %in, align 4
+ %b = load float, float addrspace(1)* %b_ptr, align 4
+ %result = fsub float %a, %b
+ %neg.result = fsub float -0.0, %result
+ store float %neg.result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/endcf-loop-header.ll b/test/CodeGen/AMDGPU/endcf-loop-header.ll
index c67095438ee5..bd861e0c663e 100644
--- a/test/CodeGen/AMDGPU/endcf-loop-header.ll
+++ b/test/CodeGen/AMDGPU/endcf-loop-header.ll
@@ -12,7 +12,7 @@
; CHECK: [[LOOP_LABEL:[0-9A-Za-z_]+]]: ; %loop{{$}}
; CHECK-NOT: s_or_b64 exec, exec
; CHECK: s_cbranch_execnz [[LOOP_LABEL]]
-define void @test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out) {
entry:
%cond = call i32 @llvm.r600.read.tidig.x() #0
%tmp0 = icmp eq i32 %cond, 0
diff --git a/test/CodeGen/AMDGPU/env-amdgiz.ll b/test/CodeGen/AMDGPU/env-amdgiz.ll
new file mode 100644
index 000000000000..70e4fb30d3aa
--- /dev/null
+++ b/test/CodeGen/AMDGPU/env-amdgiz.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa-amdgiz -verify-machineinstrs < %s
+; Just check the target feature and data layout is accepted without error.
+
+target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"
+target triple = "amdgcn-amd-amdhsa-amdgiz"
+
+define void @foo() {
+entry:
+ ret void
+}
+
diff --git a/test/CodeGen/AMDGPU/env-amdgizcl.ll b/test/CodeGen/AMDGPU/env-amdgizcl.ll
new file mode 100644
index 000000000000..feb213562c80
--- /dev/null
+++ b/test/CodeGen/AMDGPU/env-amdgizcl.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa-amdgizcl -verify-machineinstrs < %s
+; Just check the target feature and data layout is accepted without error.
+
+target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"
+target triple = "amdgcn-amd-amdhsa-amdgizcl"
+
+define void @foo() {
+entry:
+ ret void
+}
+
diff --git a/test/CodeGen/AMDGPU/exceed-max-sgprs.ll b/test/CodeGen/AMDGPU/exceed-max-sgprs.ll
index 8ef54b9e95d3..40d115bfc060 100644
--- a/test/CodeGen/AMDGPU/exceed-max-sgprs.ll
+++ b/test/CodeGen/AMDGPU/exceed-max-sgprs.ll
@@ -1,7 +1,7 @@
; RUN: not llc -march=amdgcn -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=ERROR %s
; ERROR: error: scalar registers limit of 104 exceeded (106) in use_too_many_sgprs_tahiti
-define void @use_too_many_sgprs_tahiti() #0 {
+define amdgpu_kernel void @use_too_many_sgprs_tahiti() #0 {
call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" ()
call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" ()
call void asm sideeffect "", "~{SGPR16_SGPR17_SGPR18_SGPR19_SGPR20_SGPR21_SGPR22_SGPR23}" ()
@@ -20,7 +20,7 @@ define void @use_too_many_sgprs_tahiti() #0 {
}
; ERROR: error: scalar registers limit of 104 exceeded (106) in use_too_many_sgprs_bonaire
-define void @use_too_many_sgprs_bonaire() #1 {
+define amdgpu_kernel void @use_too_many_sgprs_bonaire() #1 {
call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" ()
call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" ()
call void asm sideeffect "", "~{SGPR16_SGPR17_SGPR18_SGPR19_SGPR20_SGPR21_SGPR22_SGPR23}" ()
@@ -39,7 +39,7 @@ define void @use_too_many_sgprs_bonaire() #1 {
}
; ERROR: error: scalar registers limit of 104 exceeded (106) in use_too_many_sgprs_bonaire_flat_scr
-define void @use_too_many_sgprs_bonaire_flat_scr() #1 {
+define amdgpu_kernel void @use_too_many_sgprs_bonaire_flat_scr() #1 {
call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" ()
call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" ()
call void asm sideeffect "", "~{SGPR16_SGPR17_SGPR18_SGPR19_SGPR20_SGPR21_SGPR22_SGPR23}" ()
@@ -59,7 +59,7 @@ define void @use_too_many_sgprs_bonaire_flat_scr() #1 {
}
; ERROR: error: scalar registers limit of 96 exceeded (98) in use_too_many_sgprs_iceland
-define void @use_too_many_sgprs_iceland() #2 {
+define amdgpu_kernel void @use_too_many_sgprs_iceland() #2 {
call void asm sideeffect "", "~{VCC}" ()
call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" ()
call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" ()
@@ -77,7 +77,7 @@ define void @use_too_many_sgprs_iceland() #2 {
}
; ERROR: error: addressable scalar registers limit of 102 exceeded (103) in use_too_many_sgprs_fiji
-define void @use_too_many_sgprs_fiji() #3 {
+define amdgpu_kernel void @use_too_many_sgprs_fiji() #3 {
call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" ()
call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" ()
call void asm sideeffect "", "~{SGPR16_SGPR17_SGPR18_SGPR19_SGPR20_SGPR21_SGPR22_SGPR23}" ()
diff --git a/test/CodeGen/AMDGPU/extend-bit-ops-i16.ll b/test/CodeGen/AMDGPU/extend-bit-ops-i16.ll
index cf384da2c5be..0fa06b87eba2 100644
--- a/test/CodeGen/AMDGPU/extend-bit-ops-i16.ll
+++ b/test/CodeGen/AMDGPU/extend-bit-ops-i16.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: and_zext:
; GCN: v_and_b32_e32 [[VAL16:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xffff, [[VAL16]]
-define void @and_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @and_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr = getelementptr i16, i16 addrspace(1)* %in, i32 %id
%a = load i16, i16 addrspace(1)* %in
@@ -18,7 +18,7 @@ define void @and_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
; GCN-LABEL: or_zext:
; GCN: v_or_b32_e32 [[VAL16:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xffff, [[VAL16]]
-define void @or_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @or_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr = getelementptr i16, i16 addrspace(1)* %in, i32 %id
%a = load i16, i16 addrspace(1)* %in
@@ -33,7 +33,7 @@ define void @or_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
; GCN-LABEL: xor_zext:
; GCN: v_xor_b32_e32 [[VAL16:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xffff, [[VAL16]]
-define void @xor_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @xor_zext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr = getelementptr i16, i16 addrspace(1)* %in, i32 %id
%a = load i16, i16 addrspace(1)* %in
diff --git a/test/CodeGen/AMDGPU/extload-align.ll b/test/CodeGen/AMDGPU/extload-align.ll
index 9d2eb74c7ba9..4644800421d8 100644
--- a/test/CodeGen/AMDGPU/extload-align.ll
+++ b/test/CodeGen/AMDGPU/extload-align.ll
@@ -9,7 +9,7 @@
; DEBUG: mem:LD2[<unknown>]{{[^(]}}
; DEBUG: {{^}}# End machine code for function extload_align.
-define void @extload_align(i32* %out, i32 %index) #0 {
+define amdgpu_kernel void @extload_align(i32* %out, i32 %index) #0 {
%v0 = alloca [4 x i16]
%a1 = getelementptr inbounds [4 x i16], [4 x i16]* %v0, i32 0, i32 0
%a2 = getelementptr inbounds [4 x i16], [4 x i16]* %v0, i32 0, i32 1
diff --git a/test/CodeGen/AMDGPU/extload-private.ll b/test/CodeGen/AMDGPU/extload-private.ll
index 6cebe5f495c5..fd298b361d03 100644
--- a/test/CodeGen/AMDGPU/extload-private.ll
+++ b/test/CodeGen/AMDGPU/extload-private.ll
@@ -2,8 +2,8 @@
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}load_i8_sext_private:
-; SI: buffer_load_sbyte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+$}}
-define void @load_i8_sext_private(i32 addrspace(1)* %out) {
+; SI: buffer_load_sbyte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4{{$}}
+define amdgpu_kernel void @load_i8_sext_private(i32 addrspace(1)* %out) {
entry:
%tmp0 = alloca i8
%tmp1 = load i8, i8* %tmp0
@@ -13,8 +13,8 @@ entry:
}
; FUNC-LABEL: {{^}}load_i8_zext_private:
-; SI: buffer_load_ubyte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+$}}
-define void @load_i8_zext_private(i32 addrspace(1)* %out) {
+; SI: buffer_load_ubyte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4{{$}}
+define amdgpu_kernel void @load_i8_zext_private(i32 addrspace(1)* %out) {
entry:
%tmp0 = alloca i8
%tmp1 = load i8, i8* %tmp0
@@ -24,8 +24,8 @@ entry:
}
; FUNC-LABEL: {{^}}load_i16_sext_private:
-; SI: buffer_load_sshort v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+$}}
-define void @load_i16_sext_private(i32 addrspace(1)* %out) {
+; SI: buffer_load_sshort v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4{{$}}
+define amdgpu_kernel void @load_i16_sext_private(i32 addrspace(1)* %out) {
entry:
%tmp0 = alloca i16
%tmp1 = load i16, i16* %tmp0
@@ -35,8 +35,8 @@ entry:
}
; FUNC-LABEL: {{^}}load_i16_zext_private:
-; SI: buffer_load_ushort v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+$}}
-define void @load_i16_zext_private(i32 addrspace(1)* %out) {
+; SI: buffer_load_ushort v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4{{$}}
+define amdgpu_kernel void @load_i16_zext_private(i32 addrspace(1)* %out) {
entry:
%tmp0 = alloca i16
%tmp1 = load volatile i16, i16* %tmp0
diff --git a/test/CodeGen/AMDGPU/extload.ll b/test/CodeGen/AMDGPU/extload.ll
index 8b3e087d1f45..a7b8e86220aa 100644
--- a/test/CodeGen/AMDGPU/extload.ll
+++ b/test/CodeGen/AMDGPU/extload.ll
@@ -10,7 +10,7 @@
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+.[XYZW]]],
; EG: VTX_READ_32 [[VAL]]
-define void @global_anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
+define amdgpu_kernel void @global_anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32, i32 addrspace(1)* %cast
%x = bitcast i32 %load to <4 x i8>
@@ -25,7 +25,7 @@ define void @global_anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 a
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+.[XYZW]]],
; EG: VTX_READ_32 [[VAL]]
-define void @global_anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrspace(1)* nocapture noalias %src) nounwind {
+define amdgpu_kernel void @global_anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32, i32 addrspace(1)* %cast
%x = bitcast i32 %load to <2 x i16>
@@ -40,7 +40,7 @@ define void @global_anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i1
; EG: LDS_READ_RET {{.*}}, [[VAL:T[0-9]+.[XYZW]]]
; EG: LDS_WRITE * [[VAL]]
-define void @local_anyext_load_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
+define amdgpu_kernel void @local_anyext_load_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32, i32 addrspace(3)* %cast
%x = bitcast i32 %load to <4 x i8>
@@ -55,7 +55,7 @@ define void @local_anyext_load_i8(i8 addrspace(3)* nocapture noalias %out, i8 ad
; EG: LDS_READ_RET {{.*}}, [[VAL:T[0-9]+.[XYZW]]]
; EG: LDS_WRITE * [[VAL]]
-define void @local_anyext_load_i16(i16 addrspace(3)* nocapture noalias %out, i16 addrspace(3)* nocapture noalias %src) nounwind {
+define amdgpu_kernel void @local_anyext_load_i16(i16 addrspace(3)* nocapture noalias %out, i16 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32, i32 addrspace(3)* %cast
%x = bitcast i32 %load to <2 x i16>
diff --git a/test/CodeGen/AMDGPU/extract-vector-elt-build-vector-combine.ll b/test/CodeGen/AMDGPU/extract-vector-elt-build-vector-combine.ll
index 4edff152e66e..be85ca933c33 100644
--- a/test/CodeGen/AMDGPU/extract-vector-elt-build-vector-combine.ll
+++ b/test/CodeGen/AMDGPU/extract-vector-elt-build-vector-combine.ll
@@ -13,7 +13,7 @@
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
-define void @store_build_vector_multiple_uses_v4i32(<4 x i32> addrspace(1)* noalias %out0,
+define amdgpu_kernel void @store_build_vector_multiple_uses_v4i32(<4 x i32> addrspace(1)* noalias %out0,
<4 x i32> addrspace(1)* noalias %out1,
i32 addrspace(1)* noalias %out2,
i32 addrspace(1)* %in) {
@@ -55,7 +55,7 @@ define void @store_build_vector_multiple_uses_v4i32(<4 x i32> addrspace(1)* noal
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
-define void @store_build_vector_multiple_extract_uses_v4i32(<4 x i32> addrspace(1)* noalias %out0,
+define amdgpu_kernel void @store_build_vector_multiple_extract_uses_v4i32(<4 x i32> addrspace(1)* noalias %out0,
<4 x i32> addrspace(1)* noalias %out1,
i32 addrspace(1)* noalias %out2,
i32 addrspace(1)* %in) {
@@ -99,7 +99,7 @@ define void @store_build_vector_multiple_extract_uses_v4i32(<4 x i32> addrspace(
; GCN: buffer_store_dwordx2
; GCN: buffer_store_dwordx2
-define void @store_build_vector_multiple_uses_v4i32_bitcast_to_v2i64(<2 x i64> addrspace(1)* noalias %out0,
+define amdgpu_kernel void @store_build_vector_multiple_uses_v4i32_bitcast_to_v2i64(<2 x i64> addrspace(1)* noalias %out0,
<4 x i32> addrspace(1)* noalias %out1,
i64 addrspace(1)* noalias %out2,
i32 addrspace(1)* %in) {
diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-f16.ll b/test/CodeGen/AMDGPU/extract_vector_elt-f16.ll
new file mode 100644
index 000000000000..1f567ae05081
--- /dev/null
+++ b/test/CodeGen/AMDGPU/extract_vector_elt-f16.ll
@@ -0,0 +1,128 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}extract_vector_elt_v2f16:
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16
+; GCN-DAG: v_mov_b32_e32 [[VELT0:v[0-9]+]], [[VEC]]
+; GCN-DAG: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]]
+; GCN-DAG: buffer_store_short [[VELT0]]
+; GCN-DAG: buffer_store_short [[VELT1]]
+define amdgpu_kernel void @extract_vector_elt_v2f16(half addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr) #0 {
+ %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr
+ %p0 = extractelement <2 x half> %vec, i32 0
+ %p1 = extractelement <2 x half> %vec, i32 1
+ %out1 = getelementptr half, half addrspace(1)* %out, i32 10
+ store half %p1, half addrspace(1)* %out, align 2
+ store half %p0, half addrspace(1)* %out1, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}extract_vector_elt_v2f16_dynamic_sgpr:
+; GCN: s_load_dword [[IDX:s[0-9]+]]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; GCN: s_lshl_b32 [[IDX_SCALED:s[0-9]+]], [[IDX]], 16
+; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], [[IDX_SCALED]]
+; GCN: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]]
+; GCN: buffer_store_short [[VELT1]]
+; GCN: ScratchSize: 0
+define amdgpu_kernel void @extract_vector_elt_v2f16_dynamic_sgpr(half addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr, i32 %idx) #0 {
+ %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr
+ %elt = extractelement <2 x half> %vec, i32 %idx
+ store half %elt, half addrspace(1)* %out, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}extract_vector_elt_v2f16_dynamic_vgpr:
+; GCN-DAG: s_load_dword [[VEC:s[0-9]+]]
+; GCN-DAG: {{flat|buffer}}_load_dword [[IDX:v[0-9]+]]
+; GCN: v_lshlrev_b32_e32 [[IDX_SCALED:v[0-9]+]], 16, [[IDX]]
+
+; SI: v_lshr_b32_e32 [[ELT:v[0-9]+]], [[VEC]], [[IDX_SCALED]]
+; VI: v_lshrrev_b32_e64 [[ELT:v[0-9]+]], [[IDX_SCALED]], [[VEC]]
+
+
+; SI: buffer_store_short [[ELT]]
+; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[ELT]]
+; GCN: ScratchSize: 0{{$}}
+define amdgpu_kernel void @extract_vector_elt_v2f16_dynamic_vgpr(half addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr
+ %idx = load i32, i32 addrspace(1)* %gep
+ %elt = extractelement <2 x half> %vec, i32 %idx
+ store half %elt, half addrspace(1)* %out.gep, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}extract_vector_elt_v3f16:
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+define amdgpu_kernel void @extract_vector_elt_v3f16(half addrspace(1)* %out, <3 x half> %foo) #0 {
+ %p0 = extractelement <3 x half> %foo, i32 0
+ %p1 = extractelement <3 x half> %foo, i32 2
+ %out1 = getelementptr half, half addrspace(1)* %out, i32 1
+ store half %p1, half addrspace(1)* %out, align 2
+ store half %p0, half addrspace(1)* %out1, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}extract_vector_elt_v4f16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+define amdgpu_kernel void @extract_vector_elt_v4f16(half addrspace(1)* %out, <4 x half> %foo) #0 {
+ %p0 = extractelement <4 x half> %foo, i32 0
+ %p1 = extractelement <4 x half> %foo, i32 2
+ %out1 = getelementptr half, half addrspace(1)* %out, i32 10
+ store half %p1, half addrspace(1)* %out, align 2
+ store half %p0, half addrspace(1)* %out1, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v3f16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+define amdgpu_kernel void @dynamic_extract_vector_elt_v3f16(half addrspace(1)* %out, <3 x half> %foo, i32 %idx) #0 {
+ %p0 = extractelement <3 x half> %foo, i32 %idx
+ %out1 = getelementptr half, half addrspace(1)* %out, i32 1
+ store half %p0, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v4f16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+define amdgpu_kernel void @dynamic_extract_vector_elt_v4f16(half addrspace(1)* %out, <4 x half> %foo, i32 %idx) #0 {
+ %p0 = extractelement <4 x half> %foo, i32 %idx
+ %out1 = getelementptr half, half addrspace(1)* %out, i32 1
+ store half %p0, half addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll b/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
index 4594379dae03..db5bf0b4e808 100644
--- a/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
+++ b/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
@@ -5,7 +5,7 @@
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx2
; GCN: buffer_store_dwordx2
-define void @extract_vector_elt_v3f64_2(double addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @extract_vector_elt_v3f64_2(double addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
%ld = load volatile <3 x double>, <3 x double> addrspace(1)* %in
%elt = extractelement <3 x double> %ld, i32 2
store volatile double %elt, double addrspace(1)* %out
@@ -13,14 +13,14 @@ define void @extract_vector_elt_v3f64_2(double addrspace(1)* %out, <3 x double>
}
; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3f64:
-define void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %out, <3 x double> %foo, i32 %elt) #0 {
+define amdgpu_kernel void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %out, <3 x double> %foo, i32 %elt) #0 {
%dynelt = extractelement <3 x double> %foo, i32 %elt
store volatile double %dynelt, double addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4f64:
-define void @dyn_extract_vector_elt_v4f64(double addrspace(1)* %out, <4 x double> %foo, i32 %elt) #0 {
+define amdgpu_kernel void @dyn_extract_vector_elt_v4f64(double addrspace(1)* %out, <4 x double> %foo, i32 %elt) #0 {
%dynelt = extractelement <4 x double> %foo, i32 %elt
store volatile double %dynelt, double addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
index c407f0efffb4..9b117d48a980 100644
--- a/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
+++ b/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
@@ -1,25 +1,67 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SICIVI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=SICIVI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; GCN-LABEL: {{^}}extract_vector_elt_v2i16:
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_store_short
-; GCN: buffer_store_short
-define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) #0 {
- %p0 = extractelement <2 x i16> %foo, i32 0
- %p1 = extractelement <2 x i16> %foo, i32 1
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16
+; GCN-DAG: v_mov_b32_e32 [[VELT0:v[0-9]+]], [[VEC]]
+; GCN-DAG: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]]
+; GCN-DAG: buffer_store_short [[VELT0]]
+; GCN-DAG: buffer_store_short [[VELT1]]
+define amdgpu_kernel void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %p0 = extractelement <2 x i16> %vec, i32 0
+ %p1 = extractelement <2 x i16> %vec, i32 1
%out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10
store i16 %p1, i16 addrspace(1)* %out, align 2
store i16 %p0, i16 addrspace(1)* %out1, align 2
ret void
}
+; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_sgpr:
+; GCN: s_load_dword [[IDX:s[0-9]+]]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; GCN: s_lshl_b32 [[IDX_SCALED:s[0-9]+]], [[IDX]], 16
+; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], [[IDX_SCALED]]
+; GCN: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]]
+; GCN: buffer_store_short [[VELT1]]
+; GCN: ScratchSize: 0
+define amdgpu_kernel void @extract_vector_elt_v2i16_dynamic_sgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %idx) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt = extractelement <2 x i16> %vec, i32 %idx
+ store i16 %elt, i16 addrspace(1)* %out, align 2
+ ret void
+}
+
+; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_vgpr:
+; GCN-DAG: s_load_dword [[VEC:s[0-9]+]]
+; GCN-DAG: {{flat|buffer}}_load_dword [[IDX:v[0-9]+]]
+; GCN: v_lshlrev_b32_e32 [[IDX_SCALED:v[0-9]+]], 16, [[IDX]]
+
+; SI: v_lshr_b32_e32 [[ELT:v[0-9]+]], [[VEC]], [[IDX_SCALED]]
+; VI: v_lshrrev_b32_e64 [[ELT:v[0-9]+]], [[IDX_SCALED]], [[VEC]]
+
+; SI: buffer_store_short [[ELT]]
+; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[ELT]]
+; GCN: ScratchSize: 0{{$}}
+define amdgpu_kernel void @extract_vector_elt_v2i16_dynamic_vgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext
+ %idx = load volatile i32, i32 addrspace(1)* %gep
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt = extractelement <2 x i16> %vec, i32 %idx
+ store i16 %elt, i16 addrspace(1)* %out.gep, align 2
+ ret void
+}
+
; GCN-LABEL: {{^}}extract_vector_elt_v3i16:
; GCN: buffer_load_ushort
; GCN: buffer_store_short
; GCN: buffer_store_short
-define void @extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo) #0 {
%p0 = extractelement <3 x i16> %foo, i32 0
%p1 = extractelement <3 x i16> %foo, i32 2
%out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
@@ -29,16 +71,23 @@ define void @extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo) #0
}
; GCN-LABEL: {{^}}extract_vector_elt_v4i16:
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_store_short
-; GCN: buffer_store_short
-define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) #0 {
+; SICIVI: buffer_load_ushort
+; SICIVI: buffer_load_ushort
+; SICIVI: buffer_store_short
+; SICIVI: buffer_store_short
+
+; GFX9-DAG: s_load_dword [[LOAD0:s[0-9]+]], s[0:1], 0x2c
+; GFX9-DAG: s_load_dword [[LOAD1:s[0-9]+]], s[0:1], 0x30
+; GFX9-DAG: v_mov_b32_e32 [[VLOAD0:v[0-9]+]], [[LOAD0]]
+; GFX9-DAG: buffer_store_short [[VLOAD0]], off
+; GFX9-DAG: v_mov_b32_e32 [[VLOAD1:v[0-9]+]], [[LOAD1]]
+; GFX9-DAG: buffer_store_short [[VLOAD1]], off
+define amdgpu_kernel void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) #0 {
%p0 = extractelement <4 x i16> %foo, i32 0
%p1 = extractelement <4 x i16> %foo, i32 2
%out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10
- store i16 %p1, i16 addrspace(1)* %out, align 2
- store i16 %p0, i16 addrspace(1)* %out1, align 2
+ store volatile i16 %p1, i16 addrspace(1)* %out, align 2
+ store volatile i16 %p0, i16 addrspace(1)* %out1, align 2
ret void
}
@@ -47,13 +96,16 @@ define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) #0
; GCN: buffer_load_ushort
; GCN: buffer_load_ushort
-; GCN: buffer_store_short
-; GCN: buffer_store_short
-; GCN: buffer_store_short
+; SICIVI: buffer_store_short
+; SICIVI: buffer_store_short
+; SICIVI: buffer_store_short
+
+; GFX9: buffer_store_dword
+; GFX9: buffer_store_dword
; GCN: buffer_load_ushort
; GCN: buffer_store_short
-define void @dynamic_extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo, i32 %idx) #0 {
+define amdgpu_kernel void @dynamic_extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo, i32 %idx) #0 {
%p0 = extractelement <3 x i16> %foo, i32 %idx
%out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 %p0, i16 addrspace(1)* %out
@@ -61,23 +113,32 @@ define void @dynamic_extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16>
}
; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v4i16:
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
+; SICIVI: buffer_load_ushort
+; SICIVI: buffer_load_ushort
+; SICIVI: buffer_load_ushort
+; SICIVI: buffer_load_ushort
-; GCN: buffer_store_short
-; GCN: buffer_store_short
-; GCN: buffer_store_short
-; GCN: buffer_store_short
+; SICIVI: buffer_store_short
+; SICIVI: buffer_store_short
+; SICIVI: buffer_store_short
+; SICIVI: buffer_store_short
-; GCN: buffer_load_ushort
-; GCN: buffer_store_short
-define void @dynamic_extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo, i32 %idx) #0 {
+; SICIVI: buffer_load_ushort
+; SICIVI: buffer_store_short
+
+; GFX9: s_load_dword
+; GFX9: buffer_store_dword
+; GFX9: buffer_store_dword
+; GFX9: buffer_load_ushort
+; GFX9: buffer_store_short
+define amdgpu_kernel void @dynamic_extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo, i32 %idx) #0 {
%p0 = extractelement <4 x i16> %foo, i32 %idx
%out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 %p0, i16 addrspace(1)* %out
ret void
}
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
index 1df91c93329a..a8d127879a32 100644
--- a/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
+++ b/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
@@ -8,7 +8,7 @@
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dwordx2
-define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) #0 {
+define amdgpu_kernel void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) #0 {
%vec = bitcast i64 %val to <2 x i32>
%elt0 = extractelement <2 x i32> %vec, i32 0
%elt1 = extractelement <2 x i32> %vec, i32 1
@@ -20,7 +20,7 @@ define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspa
}
; GCN-LABEL: {{^}}extract_vector_elt_v2i64:
-define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) #0 {
%p0 = extractelement <2 x i64> %foo, i32 0
%p1 = extractelement <2 x i64> %foo, i32 1
%out1 = getelementptr i64, i64 addrspace(1)* %out, i32 1
@@ -30,14 +30,14 @@ define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) #0
}
; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64:
-define void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) #0 {
+define amdgpu_kernel void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) #0 {
%dynelt = extractelement <2 x i64> %foo, i32 %elt
store volatile i64 %dynelt, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64_2:
-define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) #0 {
+define amdgpu_kernel void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) #0 {
%load = load volatile <2 x i64>, <2 x i64> addrspace(1)* %foo
%or = or <2 x i64> %load, %arst
%dynelt = extractelement <2 x i64> %or, i32 %elt
@@ -46,14 +46,14 @@ define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> ad
}
; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3i64:
-define void @dyn_extract_vector_elt_v3i64(i64 addrspace(1)* %out, <3 x i64> %foo, i32 %elt) #0 {
+define amdgpu_kernel void @dyn_extract_vector_elt_v3i64(i64 addrspace(1)* %out, <3 x i64> %foo, i32 %elt) #0 {
%dynelt = extractelement <3 x i64> %foo, i32 %elt
store volatile i64 %dynelt, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4i64:
-define void @dyn_extract_vector_elt_v4i64(i64 addrspace(1)* %out, <4 x i64> %foo, i32 %elt) #0 {
+define amdgpu_kernel void @dyn_extract_vector_elt_v4i64(i64 addrspace(1)* %out, <4 x i64> %foo, i32 %elt) #0 {
%dynelt = extractelement <4 x i64> %foo, i32 %elt
store volatile i64 %dynelt, i64 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
index 6f4ae827f432..b7d768fd5525 100644
--- a/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
+++ b/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
@@ -4,7 +4,7 @@
; FUNC-LABEL: {{^}}extract_vector_elt_v1i8:
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 {
%p0 = extractelement <1 x i8> %foo, i32 0
store i8 %p0, i8 addrspace(1)* %out
ret void
@@ -15,7 +15,7 @@ define void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 {
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 {
%p0 = extractelement <2 x i8> %foo, i32 0
%p1 = extractelement <2 x i8> %foo, i32 1
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -29,7 +29,7 @@ define void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 {
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 {
%p0 = extractelement <3 x i8> %foo, i32 0
%p1 = extractelement <3 x i8> %foo, i32 2
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -43,7 +43,7 @@ define void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 {
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 {
%p0 = extractelement <4 x i8> %foo, i32 0
%p1 = extractelement <4 x i8> %foo, i32 2
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -57,7 +57,7 @@ define void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 {
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 {
%p0 = extractelement <8 x i8> %foo, i32 0
%p1 = extractelement <8 x i8> %foo, i32 2
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -71,7 +71,7 @@ define void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 {
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0 {
%p0 = extractelement <16 x i8> %foo, i32 0
%p1 = extractelement <16 x i8> %foo, i32 2
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -85,7 +85,7 @@ define void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0 {
%p0 = extractelement <32 x i8> %foo, i32 0
%p1 = extractelement <32 x i8> %foo, i32 2
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -99,7 +99,7 @@ define void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0 {
+define amdgpu_kernel void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0 {
%p0 = extractelement <64 x i8> %foo, i32 0
%p1 = extractelement <64 x i8> %foo, i32 2
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
@@ -120,7 +120,7 @@ define void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0
; GCN: buffer_store_byte
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
-define void @dynamic_extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo, i32 %idx) #0 {
+define amdgpu_kernel void @dynamic_extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo, i32 %idx) #0 {
%p0 = extractelement <3 x i8> %foo, i32 %idx
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i8 %p0, i8 addrspace(1)* %out
@@ -141,7 +141,7 @@ define void @dynamic_extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %fo
; GCN: buffer_store_byte
; GCN: buffer_load_ubyte
; GCN: buffer_store_byte
-define void @dynamic_extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo, i32 %idx) #0 {
+define amdgpu_kernel void @dynamic_extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo, i32 %idx) #0 {
%p0 = extractelement <4 x i8> %foo, i32 %idx
%out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i8 %p0, i8 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/extractelt-to-trunc.ll b/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
index e160c20a03a0..34999fa3aea4 100644
--- a/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
+++ b/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
@@ -7,7 +7,7 @@
; GCN-DAG: buffer_load_dword [[A:v[0-9]+]]
; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, [[B]], [[A]]
; GCN: buffer_store_dword [[ADD]]
-define void @bitcast_int_to_vector_extract_0(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
+define amdgpu_kernel void @bitcast_int_to_vector_extract_0(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
%a = load i64, i64 addrspace(1)* %in
%add = add i64 %a, %b
%val.bc = bitcast i64 %add to <2 x i32>
@@ -20,7 +20,7 @@ define void @bitcast_int_to_vector_extract_0(i32 addrspace(1)* %out, i64 addrspa
; GCN: buffer_load_dwordx2
; GCN: v_add_f64
; GCN: buffer_store_dword v
-define void @bitcast_fp_to_vector_extract_0(i32 addrspace(1)* %out, double addrspace(1)* %in, double %b) {
+define amdgpu_kernel void @bitcast_fp_to_vector_extract_0(i32 addrspace(1)* %out, double addrspace(1)* %in, double %b) {
%a = load double, double addrspace(1)* %in
%add = fadd double %a, %b
%val.bc = bitcast double %add to <2 x i32>
@@ -33,7 +33,7 @@ define void @bitcast_fp_to_vector_extract_0(i32 addrspace(1)* %out, double addrs
; GCN: buffer_load_dwordx2
; GCN: v_add_i32
; GCN: buffer_store_dword
-define void @bitcast_int_to_fpvector_extract_0(float addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
+define amdgpu_kernel void @bitcast_int_to_fpvector_extract_0(float addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
%a = load i64, i64 addrspace(1)* %in
%add = add i64 %a, %b
%val.bc = bitcast i64 %add to <2 x float>
@@ -45,7 +45,7 @@ define void @bitcast_int_to_fpvector_extract_0(float addrspace(1)* %out, i64 add
; GCN-LABEL: {{^}}no_extract_volatile_load_extract0:
; GCN: buffer_load_dwordx4
; GCN: buffer_store_dword v
-define void @no_extract_volatile_load_extract0(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @no_extract_volatile_load_extract0(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
entry:
%vec = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%elt0 = extractelement <4 x i32> %vec, i32 0
@@ -57,7 +57,7 @@ entry:
; GCN: buffer_load_dwordx4
; GCN: buffer_store_dword v
-define void @no_extract_volatile_load_extract2(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @no_extract_volatile_load_extract2(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
entry:
%vec = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%elt2 = extractelement <4 x i32> %vec, i32 2
@@ -68,7 +68,7 @@ entry:
; GCN-LABEL: {{^}}no_extract_volatile_load_dynextract:
; GCN: buffer_load_dwordx4
; GCN: buffer_store_dword v
-define void @no_extract_volatile_load_dynextract(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
+define amdgpu_kernel void @no_extract_volatile_load_dynextract(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
entry:
%vec = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%eltN = extractelement <4 x i32> %vec, i32 %idx
diff --git a/test/CodeGen/AMDGPU/fabs.f16.ll b/test/CodeGen/AMDGPU/fabs.f16.ll
index c64aa6228c71..d4ef7124a334 100644
--- a/test/CodeGen/AMDGPU/fabs.f16.ll
+++ b/test/CodeGen/AMDGPU/fabs.f16.ll
@@ -1,69 +1,74 @@
; RUN: llc -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; DAGCombiner will transform:
; (fabs (f16 bitcast (i16 a))) => (f16 bitcast (and (i16 a), 0x7FFFFFFF))
; unless isFabsFree returns true
-; GCN-LABEL: {{^}}fabs_free_f16:
+; GCN-LABEL: {{^}}s_fabs_free_f16:
; GCN: flat_load_ushort [[VAL:v[0-9]+]],
; GCN: v_and_b32_e32 [[RESULT:v[0-9]+]], 0x7fff, [[VAL]]
; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fabs_free_f16(half addrspace(1)* %out, i16 %in) {
+define amdgpu_kernel void @s_fabs_free_f16(half addrspace(1)* %out, i16 %in) {
%bc= bitcast i16 %in to half
%fabs = call half @llvm.fabs.f16(half %bc)
store half %fabs, half addrspace(1)* %out
ret void
}
-; GCN-LABEL: {{^}}fabs_f16:
+; GCN-LABEL: {{^}}s_fabs_f16:
; CI: flat_load_ushort [[VAL:v[0-9]+]],
-; CI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[VAL]]
-; CI: v_cvt_f16_f32_e64 [[RESULT:v[0-9]+]], |[[CVT0]]|
+; CI: v_and_b32_e32 [[CVT0:v[0-9]+]], 0x7fff, [[VAL]]
; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fabs_f16(half addrspace(1)* %out, half %in) {
+define amdgpu_kernel void @s_fabs_f16(half addrspace(1)* %out, half %in) {
%fabs = call half @llvm.fabs.f16(half %in)
store half %fabs, half addrspace(1)* %out
ret void
}
; FIXME: Should be able to use single and
-; GCN-LABEL: {{^}}fabs_v2f16:
-; CI: v_cvt_f32_f16_e32 v{{[0-9]+}},
-; CI: v_cvt_f32_f16_e32 v{{[0-9]+}},
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|
+; GCN-LABEL: {{^}}s_fabs_v2f16:
+; CI: s_movk_i32 [[MASK:s[0-9]+]], 0x7fff
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]]
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]]
+; CI: v_or_b32_e32
-; VI: flat_load_ushort [[LO:v[0-9]+]]
; VI: flat_load_ushort [[HI:v[0-9]+]]
+; VI: flat_load_ushort [[LO:v[0-9]+]]
; VI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x7fff{{$}}
-; VI-DAG: v_and_b32_e32 [[FABS_LO:v[0-9]+]], [[MASK]], [[LO]]
; VI-DAG: v_and_b32_e32 [[FABS_LO:v[0-9]+]], [[MASK]], [[HI]]
-; VI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
-; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffff,
-; VI: v_or_b32
+; VI-DAG: v_and_b32_sdwa [[FABS_HI:v[0-9]+]], [[MASK]], [[LO]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, [[FABS_HI]], [[FABS_LO]]
; VI: flat_store_dword
-define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) {
+
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: s_and_b32 s{{[0-9]+}}, [[VAL]], 0x7fff7fff
+define amdgpu_kernel void @s_fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) {
%fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
store <2 x half> %fabs, <2 x half> addrspace(1)* %out
ret void
}
-; GCN-LABEL: {{^}}fabs_v4f16:
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|
+; GCN-LABEL: {{^}}s_fabs_v4f16:
+; CI: s_movk_i32 [[MASK:s[0-9]+]], 0x7fff
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]]
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]]
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]]
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]]
; VI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x7fff{{$}}
-; VI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
-; VI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
-; VI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
-; VI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, [[MASK]], v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, [[MASK]], v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: flat_store_dwordx2
-define void @fabs_v4f16(<4 x half> addrspace(1)* %out, <4 x half> %in) {
+define amdgpu_kernel void @s_fabs_v4f16(<4 x half> addrspace(1)* %out, <4 x half> %in) {
%fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %in)
store <4 x half> %fabs, <4 x half> addrspace(1)* %out
ret void
@@ -72,22 +77,74 @@ define void @fabs_v4f16(<4 x half> addrspace(1)* %out, <4 x half> %in) {
; GCN-LABEL: {{^}}fabs_fold_f16:
; GCN: flat_load_ushort [[IN0:v[0-9]+]]
; GCN: flat_load_ushort [[IN1:v[0-9]+]]
+
; CI-DAG: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[IN0]]
-; CI-DAG: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], [[IN1]]
-; CI: v_mul_f32_e64 [[RESULT:v[0-9]+]], |[[CVT1]]|, [[CVT0]]
+; CI-DAG: v_cvt_f32_f16_e64 [[ABS_CVT1:v[0-9]+]], |[[IN1]]|
+; CI: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[CVT0]], [[ABS_CVT1]]
; CI: v_cvt_f16_f32_e32 [[CVTRESULT:v[0-9]+]], [[RESULT]]
; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVTRESULT]]
; VI-NOT: and
; VI: v_mul_f16_e64 [[RESULT:v[0-9]+]], |[[IN1]]|, [[IN0]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fabs_fold_f16(half addrspace(1)* %out, half %in0, half %in1) {
+define amdgpu_kernel void @fabs_fold_f16(half addrspace(1)* %out, half %in0, half %in1) {
%fabs = call half @llvm.fabs.f16(half %in0)
%fmul = fmul half %fabs, %in1
store half %fmul, half addrspace(1)* %out
ret void
}
-declare half @llvm.fabs.f16(half) readnone
-declare <2 x half> @llvm.fabs.v2f16(<2 x half>) readnone
-declare <4 x half> @llvm.fabs.v4f16(<4 x half>) readnone
+; GCN-LABEL: {{^}}v_fabs_v2f16:
+; GCN: flat_load_dword [[VAL:v[0-9]+]]
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7fff7fff, [[VAL]]
+define amdgpu_kernel void @v_fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.in = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep.in, align 2
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
+ store <2 x half> %fabs, <2 x half> addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_free_v2f16:
+; GCN: s_load_dword [[VAL:s[0-9]+]]
+; GCN: s_and_b32 s{{[0-9]+}}, [[VAL]], 0x7fff7fff
+define amdgpu_kernel void @fabs_free_v2f16(<2 x half> addrspace(1)* %out, i32 %in) #0 {
+ %bc = bitcast i32 %in to <2 x half>
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %bc)
+ store <2 x half> %fabs, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fabs_fold_v2f16:
+; GCN: flat_load_dword [[VAL:v[0-9]+]]
+
+; CI: v_cvt_f32_f16_e32
+; CI: v_cvt_f32_f16_e32
+; CI: v_mul_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, v{{[0-9]+}}
+; CI: v_cvt_f16_f32
+; CI: v_mul_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, v{{[0-9]+}}
+; CI: v_cvt_f16_f32
+
+; VI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_mul_f16_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, v{{[0-9]+}}
+; VI: v_mul_f16_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, v{{[0-9]+}}
+
+; GFX9: v_and_b32_e32 [[FABS:v[0-9]+]], 0x7fff7fff, [[VAL]]
+; GFX9: v_pk_mul_f16 v{{[0-9]+}}, [[FABS]], v{{[0-9]+$}}
+define amdgpu_kernel void @v_fabs_fold_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
+ %fmul = fmul <2 x half> %fabs, %val
+ store <2 x half> %fmul, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+declare half @llvm.fabs.f16(half) #1
+declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #1
+declare <4 x half> @llvm.fabs.v4f16(<4 x half>) #1
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fabs.f64.ll b/test/CodeGen/AMDGPU/fabs.f64.ll
index f7780b875ff5..998e02f7bdf8 100644
--- a/test/CodeGen/AMDGPU/fabs.f64.ll
+++ b/test/CodeGen/AMDGPU/fabs.f64.ll
@@ -10,7 +10,7 @@ declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone
; FUNC-LABEL: {{^}}v_fabs_f64:
; SI: v_and_b32
; SI: s_endpgm
-define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%tidext = sext i32 %tid to i64
%gep = getelementptr double, double addrspace(1)* %in, i64 %tidext
@@ -24,7 +24,7 @@ define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
; SI: v_and_b32
; SI-NOT: v_and_b32
; SI: s_endpgm
-define void @fabs_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double %in) {
%fabs = call double @llvm.fabs.f64(double %in)
store double %fabs, double addrspace(1)* %out
ret void
@@ -34,7 +34,7 @@ define void @fabs_f64(double addrspace(1)* %out, double %in) {
; SI: v_and_b32
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in)
store <2 x double> %fabs, <2 x double> addrspace(1)* %out
ret void
@@ -46,7 +46,7 @@ define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
; SI: v_and_b32
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
%fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in)
store <4 x double> %fabs, <4 x double> addrspace(1)* %out
ret void
@@ -57,7 +57,7 @@ define void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
; SI-NOT: and
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|
; SI: s_endpgm
-define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
+define amdgpu_kernel void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
%fabs = call double @llvm.fabs.f64(double %in0)
%fmul = fmul double %fabs, %in1
store double %fmul, double addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1)
; SI-NOT: and
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|
; SI: s_endpgm
-define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
+define amdgpu_kernel void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in1) {
%fabs = call double @fabs(double %in0)
%fmul = fmul double %fabs, %in1
store double %fmul, double addrspace(1)* %out
@@ -79,7 +79,7 @@ define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in
; FUNC-LABEL: {{^}}fabs_free_f64:
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
%bc= bitcast i64 %in to double
%fabs = call double @llvm.fabs.f64(double %bc)
store double %fabs, double addrspace(1)* %out
@@ -89,7 +89,7 @@ define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) {
; FUNC-LABEL: {{^}}fabs_fn_free_f64:
; SI: v_and_b32
; SI: s_endpgm
-define void @fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
%bc= bitcast i64 %in to double
%fabs = call double @fabs(double %bc)
store double %fabs, double addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fabs.ll b/test/CodeGen/AMDGPU/fabs.ll
index 98e7f9e3e9ad..ac8fa3e45ef5 100644
--- a/test/CodeGen/AMDGPU/fabs.ll
+++ b/test/CodeGen/AMDGPU/fabs.ll
@@ -13,7 +13,7 @@
; GCN: v_and_b32
-define void @fabs_fn_free(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @fabs_fn_free(float addrspace(1)* %out, i32 %in) {
%bc= bitcast i32 %in to float
%fabs = call float @fabs(float %bc)
store float %fabs, float addrspace(1)* %out
@@ -26,7 +26,7 @@ define void @fabs_fn_free(float addrspace(1)* %out, i32 %in) {
; GCN: v_and_b32
-define void @fabs_free(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @fabs_free(float addrspace(1)* %out, i32 %in) {
%bc= bitcast i32 %in to float
%fabs = call float @llvm.fabs.f32(float %bc)
store float %fabs, float addrspace(1)* %out
@@ -37,7 +37,7 @@ define void @fabs_free(float addrspace(1)* %out, i32 %in) {
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; GCN: v_and_b32
-define void @fabs_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fabs_f32(float addrspace(1)* %out, float %in) {
%fabs = call float @llvm.fabs.f32(float %in)
store float %fabs, float addrspace(1)* %out
ret void
@@ -49,7 +49,7 @@ define void @fabs_f32(float addrspace(1)* %out, float %in) {
; GCN: v_and_b32
; GCN: v_and_b32
-define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
store <2 x float> %fabs, <2 x float> addrspace(1)* %out
ret void
@@ -65,7 +65,7 @@ define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
; GCN: v_and_b32
; GCN: v_and_b32
; GCN: v_and_b32
-define void @fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
%fabs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in)
store <4 x float> %fabs, <4 x float> addrspace(1)* %out
ret void
@@ -76,7 +76,7 @@ define void @fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
; GCN-NOT: and
; GCN: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, |[[ABS_VALUE]]|
-define void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) {
+define amdgpu_kernel void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) {
%fabs = call float @fabs(float %in0)
%fmul = fmul float %fabs, %in1
store float %fmul, float addrspace(1)* %out
@@ -88,7 +88,7 @@ define void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) {
; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
; GCN-NOT: and
; GCN: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, |[[ABS_VALUE]]|
-define void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) {
+define amdgpu_kernel void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) {
%fabs = call float @llvm.fabs.f32(float %in0)
%fmul = fmul float %fabs, %in1
store float %fmul, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll b/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
index b74bce76f79c..9edf55cbc69f 100644
--- a/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
+++ b/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
@@ -28,7 +28,7 @@
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
-define void @fast_add_fmuladd_fmul() #0 {
+define amdgpu_kernel void @fast_add_fmuladd_fmul() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -55,7 +55,7 @@ define void @fast_add_fmuladd_fmul() #0 {
; GCN-FASTFMA: v_fma_f32 [[FMA0:v[0-9]+]], [[U]], [[V]], -[[Z]]
; GCN-FASTFMA: v_fma_f32 [[FMA1:v[0-9]+]], [[X]], [[Y]], [[FMA0]]
; GCN-FASTFMA: buffer_store_dword [[FMA1]]
-define void @fast_sub_fmuladd_fmul() #0 {
+define amdgpu_kernel void @fast_sub_fmuladd_fmul() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -87,7 +87,7 @@ define void @fast_sub_fmuladd_fmul() #0 {
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
-define void @fast_add_fmuladd_fmul_multi_use_mul() #0 {
+define amdgpu_kernel void @fast_add_fmuladd_fmul_multi_use_mul() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -120,7 +120,7 @@ define void @fast_add_fmuladd_fmul_multi_use_mul() #0 {
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
-define void @fast_add_fmuladd_fmul_multi_use_mul_commute() #0 {
+define amdgpu_kernel void @fast_add_fmuladd_fmul_multi_use_mul_commute() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -145,7 +145,7 @@ define void @fast_add_fmuladd_fmul_multi_use_mul_commute() #0 {
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
-define void @fast_add_fmuladd_fmul_multi_use_fmuladd() #0 {
+define amdgpu_kernel void @fast_add_fmuladd_fmul_multi_use_fmuladd() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -170,7 +170,7 @@ define void @fast_add_fmuladd_fmul_multi_use_fmuladd() #0 {
; GCN-SLOWFMA: v_mul_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
; GCN-SLOWFMA: v_add_f32_e32
-define void @fast_add_fmuladd_fmul_multi_use_fmuladd_commute() #0 {
+define amdgpu_kernel void @fast_add_fmuladd_fmul_multi_use_fmuladd_commute() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -205,7 +205,7 @@ define void @fast_add_fmuladd_fmul_multi_use_fmuladd_commute() #0 {
; GCN: buffer_store_dword [[MUL]]
; GCN: buffer_store_dword [[MAD]]
-define void @fast_sub_fmuladd_fmul_multi_use_mul() #0 {
+define amdgpu_kernel void @fast_sub_fmuladd_fmul_multi_use_mul() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -241,7 +241,7 @@ define void @fast_sub_fmuladd_fmul_multi_use_mul() #0 {
; GCN-SLOWFMA-DAG: v_mul_f32_e32 v{{[0-9]+}}, [[Y]], [[X]]
; GCN-SLOWFMA: v_add_f32_e32
; GCN-SLOWFMA: v_subrev_f32_e32
-define void @fast_sub_fmuladd_fmul_multi_use_fmuladd() #0 {
+define amdgpu_kernel void @fast_sub_fmuladd_fmul_multi_use_fmuladd() #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
diff --git a/test/CodeGen/AMDGPU/fadd.f16.ll b/test/CodeGen/AMDGPU/fadd.f16.ll
index 9ca077564e2b..f76ecf58d905 100644
--- a/test/CodeGen/AMDGPU/fadd.f16.ll
+++ b/test/CodeGen/AMDGPU/fadd.f16.ll
@@ -11,7 +11,7 @@
; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fadd_f16(
+define amdgpu_kernel void @fadd_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -25,14 +25,13 @@ entry:
; GCN-LABEL: {{^}}fadd_f16_imm_a
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x3c00{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], 1.0, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], 1.0, v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fadd_f16_imm_a(
+define amdgpu_kernel void @fadd_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
@@ -44,14 +43,13 @@ entry:
; GCN-LABEL: {{^}}fadd_f16_imm_b
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4000{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], 2.0, v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], 2.0, v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fadd_f16_imm_b(
+define amdgpu_kernel void @fadd_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -61,27 +59,31 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fadd_v2f16
+; GCN-LABEL: {{^}}fadd_v2f16:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_add_f16_e32 v[[R_F16_LO:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_add_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fadd_v2f16(
+define amdgpu_kernel void @fadd_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -93,25 +95,26 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fadd_v2f16_imm_a
+; GCN-LABEL: {{^}}fadd_v2f16_imm_a:
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x3c00{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4000{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], 1.0, v[[B_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], 2.0, v[[B_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[B_V2_F16]]
-; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 2.0, v[[B_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 2.0, v[[B_F16_1]]
+; VI-DAG: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[B_V2_F16]]
+; VI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fadd_v2f16_imm_a(
+define amdgpu_kernel void @fadd_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b) {
entry:
@@ -121,25 +124,26 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fadd_v2f16_imm_b
+; GCN-LABEL: {{^}}fadd_v2f16_imm_b:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4000{{$}}
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x3c00{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], 2.0, v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], 1.0, v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 2.0, v[[A_V2_F16]]
-; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 1.0, v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[A_F16_1]]
+; VI-DAG: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 2.0, v[[A_V2_F16]]
+; VI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_1]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fadd_v2f16_imm_b(
+define amdgpu_kernel void @fadd_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/fadd.ll b/test/CodeGen/AMDGPU/fadd.ll
index 0f683f7bfa23..621a0de281db 100644
--- a/test/CodeGen/AMDGPU/fadd.ll
+++ b/test/CodeGen/AMDGPU/fadd.ll
@@ -5,7 +5,7 @@
; FUNC-LABEL: {{^}}fadd_f32:
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI: v_add_f32
-define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
+define amdgpu_kernel void @fadd_f32(float addrspace(1)* %out, float %a, float %b) #0 {
%add = fadd float %a, %b
store float %add, float addrspace(1)* %out, align 4
ret void
@@ -16,7 +16,7 @@ define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
; SI: v_add_f32
; SI: v_add_f32
-define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
+define amdgpu_kernel void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
%add = fadd <2 x float> %a, %b
store <2 x float> %add, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -31,7 +31,7 @@ define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x flo
; SI: v_add_f32
; SI: v_add_f32
; SI: v_add_f32
-define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
%b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16
@@ -57,8 +57,19 @@ define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
; SI: v_add_f32
; SI: v_add_f32
; SI: v_add_f32
-define void @fadd_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) {
+define amdgpu_kernel void @fadd_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) #0 {
%add = fadd <8 x float> %a, %b
store <8 x float> %add, <8 x float> addrspace(1)* %out, align 32
ret void
}
+
+; FUNC-LABEL: {{^}}fadd_0_nsz_attr_f32:
+; SI-NOT: v_add_f32
+define amdgpu_kernel void @fadd_0_nsz_attr_f32(float addrspace(1)* %out, float %a) #1 {
+ %add = fadd float %a, 0.0
+ store float %add, float addrspace(1)* %out, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" } \ No newline at end of file
diff --git a/test/CodeGen/AMDGPU/fadd64.ll b/test/CodeGen/AMDGPU/fadd64.ll
index 6f0c9de8ebaf..7eb7747de215 100644
--- a/test/CodeGen/AMDGPU/fadd64.ll
+++ b/test/CodeGen/AMDGPU/fadd64.ll
@@ -3,7 +3,7 @@
; CHECK-LABEL: {{^}}v_fadd_f64:
; CHECK: v_add_f64 {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}
-define void @v_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @v_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -14,7 +14,7 @@ define void @v_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}s_fadd_f64:
; CHECK: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @s_fadd_f64(double addrspace(1)* %out, double %r0, double %r1) {
+define amdgpu_kernel void @s_fadd_f64(double addrspace(1)* %out, double %r0, double %r1) {
%r2 = fadd double %r0, %r1
store double %r2, double addrspace(1)* %out
ret void
@@ -24,7 +24,7 @@ define void @s_fadd_f64(double addrspace(1)* %out, double %r0, double %r1) {
; CHECK: v_add_f64
; CHECK: v_add_f64
; CHECK: _store_dwordx4
-define void @v_fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
+define amdgpu_kernel void @v_fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
<2 x double> addrspace(1)* %in2) {
%r0 = load <2 x double>, <2 x double> addrspace(1)* %in1
%r1 = load <2 x double>, <2 x double> addrspace(1)* %in2
@@ -37,7 +37,7 @@ define void @v_fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspac
; CHECK: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
; CHECK: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
; CHECK: _store_dwordx4
-define void @s_fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %r0, <2 x double> %r1) {
+define amdgpu_kernel void @s_fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %r0, <2 x double> %r1) {
%r2 = fadd <2 x double> %r0, %r1
store <2 x double> %r2, <2 x double> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
index ad3992f4cd03..f2686a5582dc 100644
--- a/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
+++ b/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
@@ -1,11 +1,15 @@
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+declare half @llvm.fabs.f16(half) #0
declare half @llvm.canonicalize.f16(half) #0
+declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0
+declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>) #0
; GCN-LABEL: {{^}}v_test_canonicalize_var_f16:
; GCN: v_mul_f16_e32 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}}
; GCN: buffer_store_short [[REG]]
-define void @v_test_canonicalize_var_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_var_f16(half addrspace(1)* %out) #1 {
%val = load half, half addrspace(1)* %out
%canonicalized = call half @llvm.canonicalize.f16(half %val)
store half %canonicalized, half addrspace(1)* %out
@@ -15,17 +19,51 @@ define void @v_test_canonicalize_var_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}s_test_canonicalize_var_f16:
; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, {{s[0-9]+}}
; GCN: buffer_store_short [[REG]]
-define void @s_test_canonicalize_var_f16(half addrspace(1)* %out, i16 zeroext %val.arg) #1 {
+define amdgpu_kernel void @s_test_canonicalize_var_f16(half addrspace(1)* %out, i16 zeroext %val.arg) #1 {
%val = bitcast i16 %val.arg to half
%canonicalized = call half @llvm.canonicalize.f16(half %val)
store half %canonicalized, half addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}v_test_canonicalize_fabs_var_f16:
+; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, |{{v[0-9]+}}|
+; GCN: buffer_store_short [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fabs_var_f16(half addrspace(1)* %out) #1 {
+ %val = load half, half addrspace(1)* %out
+ %val.fabs = call half @llvm.fabs.f16(half %val)
+ %canonicalized = call half @llvm.canonicalize.f16(half %val.fabs)
+ store half %canonicalized, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_fabs_var_f16:
+; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, -|{{v[0-9]+}}|
+; GCN: buffer_store_short [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_f16(half addrspace(1)* %out) #1 {
+ %val = load half, half addrspace(1)* %out
+ %val.fabs = call half @llvm.fabs.f16(half %val)
+ %val.fabs.fneg = fsub half -0.0, %val.fabs
+ %canonicalized = call half @llvm.canonicalize.f16(half %val.fabs.fneg)
+ store half %canonicalized, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_var_f16:
+; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, -{{v[0-9]+}}
+; GCN: buffer_store_short [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_var_f16(half addrspace(1)* %out) #1 {
+ %val = load half, half addrspace(1)* %out
+ %val.fneg = fsub half -0.0, %val
+ %canonicalized = call half @llvm.canonicalize.f16(half %val.fneg)
+ store half %canonicalized, half addrspace(1)* %out
+ ret void
+}
+
; GCN-LABEL: {{^}}test_fold_canonicalize_p0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_p0_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p0_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -34,7 +72,7 @@ define void @test_fold_canonicalize_p0_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff8000{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_n0_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n0_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half -0.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -43,7 +81,7 @@ define void @test_fold_canonicalize_n0_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_p1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_p1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 1.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -52,7 +90,7 @@ define void @test_fold_canonicalize_p1_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffbc00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_n1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half -1.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -61,16 +99,16 @@ define void @test_fold_canonicalize_n1_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_literal_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_literal_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_literal_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 16.0)
store half %canonicalized, half addrspace(1)* %out
ret void
}
-; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal0_f16:
-; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
+; GCN-LABEL: {{^}}test_default_denormals_fold_canonicalize_denormal0_f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_no_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH03FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -79,16 +117,16 @@ define void @test_no_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #3 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH03FF)
store half %canonicalized, half addrspace(1)* %out
ret void
}
-; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal1_f16:
-; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
+; GCN-LABEL: {{^}}test_default_denormals_fold_canonicalize_denormal1_f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff83ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_no_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH83FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -97,7 +135,7 @@ define void @test_no_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff83ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #3 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH83FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -106,7 +144,7 @@ define void @test_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_qnan_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH7C00)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -115,7 +153,7 @@ define void @test_fold_canonicalize_qnan_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half bitcast (i16 -1 to half))
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -124,7 +162,7 @@ define void @test_fold_canonicalize_qnan_value_neg1_f16(half addrspace(1)* %out)
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg2_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg2_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half bitcast (i16 -2 to half))
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -133,7 +171,7 @@ define void @test_fold_canonicalize_qnan_value_neg2_f16(half addrspace(1)* %out)
; GCN-LABEL: {{^}}test_fold_canonicalize_snan0_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan0_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan0_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH7C01)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -142,7 +180,7 @@ define void @test_fold_canonicalize_snan0_value_f16(half addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan1_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan1_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan1_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH7DFF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -151,7 +189,7 @@ define void @test_fold_canonicalize_snan1_value_f16(half addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan2_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan2_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan2_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xHFDFF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -160,13 +198,244 @@ define void @test_fold_canonicalize_snan2_value_f16(half addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan3_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan3_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan3_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xHFC01)
store half %canonicalized, half addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}v_test_canonicalize_var_v2f16:
+; VI: v_mul_f16_e32 [[REG0:v[0-9]+]], 1.0, {{v[0-9]+}}
+; VI-DAG: v_mul_f16_e32 [[REG1:v[0-9]+]], 1.0, {{v[0-9]+}}
+; VI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI-NOT: v_and_b32
+
+; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{v[0-9]+$}}
+; GFX9: buffer_store_dword [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Fold modifier
+; GCN-LABEL: {{^}}v_test_canonicalize_fabs_var_v2f16:
+; VI-DAG: v_bfe_u32
+; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0x7fff7fff, v{{[0-9]+}}
+; VI: v_mul_f16_e32 [[REG0:v[0-9]+]], 1.0, v{{[0-9]+}}
+; VI: v_mul_f16_e32 [[REG1:v[0-9]+]], 1.0, v{{[0-9]+}}
+; VI-NOT: 0xffff
+; VI: v_or_b32
+
+; GFX9: v_and_b32_e32 [[ABS:v[0-9]+]], 0x7fff7fff, v{{[0-9]+}}
+; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, [[ABS]]{{$}}
+; GCN: buffer_store_dword
+define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %val.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val.fabs)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_fabs_var_v2f16:
+; VI: v_or_b32_e32 v{{[0-9]+}}, 0x80008000, v{{[0-9]+}}
+; VI: v_mul_f16_e32 [[REG0:v[0-9]+]], 1.0, v{{[0-9]+}}
+; VI: v_mul_f16_e32 [[REG1:v[0-9]+]], 1.0, v{{[0-9]+}}
+; VI: v_or_b32
+
+; GFX9: v_and_b32_e32 [[ABS:v[0-9]+]], 0x7fff7fff, v{{[0-9]+}}
+; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, [[ABS]] neg_lo:[0,1] neg_hi:[0,1]{{$}}
+; GCN: buffer_store_dword
+define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %val.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
+ %val.fabs.fneg = fsub <2 x half> <half -0.0, half -0.0>, %val.fabs
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val.fabs.fneg)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Fold modifier
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_var_v2f16:
+; VI: v_xor_b32_e32 [[FNEG:v[0-9]+]], 0x80008000, v{{[0-9]+}}
+; VI-DAG: v_lshrrev_b32_e32 [[FNEG_HI:v[0-9]+]], 16, [[FNEG]]
+; VI-DAG: v_mul_f16_e32 [[REG1:v[0-9]+]], 1.0, [[FNEG_HI]]
+; VI-DAG: v_mul_f16_e32 [[REG0:v[0-9]+]], 1.0, [[FNEG]]
+; VI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI-NOT: 0xffff
+
+; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}} neg_lo:[0,1] neg_hi:[0,1]{{$}}
+; GFX9: buffer_store_dword [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %fneg.val = fsub <2 x half> <half -0.0, half -0.0>, %val
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %fneg.val)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_test_canonicalize_var_v2f16:
+; VI: v_mul_f16_e64 [[REG0:v[0-9]+]], 1.0, {{s[0-9]+}}
+; VI-DAG: v_mul_f16_e64 [[REG1:v[0-9]+]], 1.0, {{s[0-9]+}}
+; VI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI-NOT: v_and_b32
+
+; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{s[0-9]+$}}
+; GFX9: buffer_store_dword [[REG]]
+define amdgpu_kernel void @s_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out, i32 zeroext %val.arg) #1 {
+ %val = bitcast i32 %val.arg to <2 x half>
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_p0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_p0_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> zeroinitializer)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_n0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80008000{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_n0_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half -0.0, half -0.0>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_p1_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c003c00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_p1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 1.0, half 1.0>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_n1_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbc00bc00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_n1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half -1.0, half -1.0>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_literal_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4c004c00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_literal_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 16.0, half 16.0>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff03ff{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH03FF, half 0xH03FF>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff03ff{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspace(1)* %out) #3 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH03FF, half 0xH03FF>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal1_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x83ff83ff{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH83FF, half 0xH83FF>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal1_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x83ff83ff{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspace(1)* %out) #3 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH83FF, half 0xH83FF>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7c007c00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_qnan_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH7C00, half 0xH7C00>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg1_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> bitcast (i32 -1 to <2 x half>))
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg2_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half bitcast (i16 -2 to half), half bitcast (i16 -2 to half)>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_snan0_value_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_snan0_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH7C01, half 0xH7C01>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_snan1_value_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_snan1_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH7DFF, half 0xH7DFF>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_snan2_value_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_snan2_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xHFDFF, half 0xHFDFF>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fold_canonicalize_snan3_value_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @test_fold_canonicalize_snan3_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xHFC01, half 0xHFC01>)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "target-features"="-flat-for-global" }
-attributes #2 = { nounwind "target-features"="-flat-for-global,-fp16-denormals,-fp16-denormals" }
-attributes #3 = { nounwind "target-features"="-flat-for-global,+fp16-denormals,+fp64-denormals" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind "target-features"="-fp64-fp16-denormals" }
+attributes #3 = { nounwind "target-features"="+fp64-fp16-denormals" }
diff --git a/test/CodeGen/AMDGPU/fcanonicalize.ll b/test/CodeGen/AMDGPU/fcanonicalize.ll
index 981d88dfe94e..8c385f40b1c5 100644
--- a/test/CodeGen/AMDGPU/fcanonicalize.ll
+++ b/test/CodeGen/AMDGPU/fcanonicalize.ll
@@ -1,12 +1,14 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+declare float @llvm.fabs.f32(float) #0
declare float @llvm.canonicalize.f32(float) #0
+declare double @llvm.fabs.f64(double) #0
declare double @llvm.canonicalize.f64(double) #0
; GCN-LABEL: {{^}}v_test_canonicalize_var_f32:
; GCN: v_mul_f32_e32 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}}
; GCN: buffer_store_dword [[REG]]
-define void @v_test_canonicalize_var_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_var_f32(float addrspace(1)* %out) #1 {
%val = load float, float addrspace(1)* %out
%canonicalized = call float @llvm.canonicalize.f32(float %val)
store float %canonicalized, float addrspace(1)* %out
@@ -16,16 +18,50 @@ define void @v_test_canonicalize_var_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}s_test_canonicalize_var_f32:
; GCN: v_mul_f32_e64 [[REG:v[0-9]+]], 1.0, {{s[0-9]+}}
; GCN: buffer_store_dword [[REG]]
-define void @s_test_canonicalize_var_f32(float addrspace(1)* %out, float %val) #1 {
+define amdgpu_kernel void @s_test_canonicalize_var_f32(float addrspace(1)* %out, float %val) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float %val)
store float %canonicalized, float addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}v_test_canonicalize_fabs_var_f32:
+; GCN: v_mul_f32_e64 [[REG:v[0-9]+]], 1.0, |{{v[0-9]+}}|
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fabs_var_f32(float addrspace(1)* %out) #1 {
+ %val = load float, float addrspace(1)* %out
+ %val.fabs = call float @llvm.fabs.f32(float %val)
+ %canonicalized = call float @llvm.canonicalize.f32(float %val.fabs)
+ store float %canonicalized, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_fabs_var_f32:
+; GCN: v_mul_f32_e64 [[REG:v[0-9]+]], 1.0, -|{{v[0-9]+}}|
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_f32(float addrspace(1)* %out) #1 {
+ %val = load float, float addrspace(1)* %out
+ %val.fabs = call float @llvm.fabs.f32(float %val)
+ %val.fabs.fneg = fsub float -0.0, %val.fabs
+ %canonicalized = call float @llvm.canonicalize.f32(float %val.fabs.fneg)
+ store float %canonicalized, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_var_f32:
+; GCN: v_mul_f32_e64 [[REG:v[0-9]+]], 1.0, -{{v[0-9]+}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_var_f32(float addrspace(1)* %out) #1 {
+ %val = load float, float addrspace(1)* %out
+ %val.fneg = fsub float -0.0, %val
+ %canonicalized = call float @llvm.canonicalize.f32(float %val.fneg)
+ store float %canonicalized, float addrspace(1)* %out
+ ret void
+}
+
; GCN-LABEL: {{^}}test_fold_canonicalize_p0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_p0_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p0_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float 0.0)
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -34,7 +70,7 @@ define void @test_fold_canonicalize_p0_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n0_f32:
; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_n0_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n0_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float -0.0)
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -43,7 +79,7 @@ define void @test_fold_canonicalize_n0_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_p1_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 1.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_p1_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p1_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float 1.0)
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -52,7 +88,7 @@ define void @test_fold_canonicalize_p1_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n1_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -1.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_n1_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n1_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float -1.0)
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -61,7 +97,7 @@ define void @test_fold_canonicalize_n1_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_literal_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x41800000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_literal_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_literal_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float 16.0)
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -70,7 +106,7 @@ define void @test_fold_canonicalize_literal_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_no_denormals_fold_canonicalize_denormal0_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal0_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 8388607 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -79,7 +115,7 @@ define void @test_no_denormals_fold_canonicalize_denormal0_f32(float addrspace(1
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fffff{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_denormals_fold_canonicalize_denormal0_f32(float addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_f32(float addrspace(1)* %out) #3 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 8388607 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -88,7 +124,7 @@ define void @test_denormals_fold_canonicalize_denormal0_f32(float addrspace(1)*
; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal1_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_no_denormals_fold_canonicalize_denormal1_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal1_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 2155872255 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -97,7 +133,7 @@ define void @test_no_denormals_fold_canonicalize_denormal1_f32(float addrspace(1
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal1_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x807fffff{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_denormals_fold_canonicalize_denormal1_f32(float addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_f32(float addrspace(1)* %out) #3 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 2155872255 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -106,7 +142,7 @@ define void @test_denormals_fold_canonicalize_denormal1_f32(float addrspace(1)*
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_qnan_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float 0x7FF8000000000000)
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -115,7 +151,7 @@ define void @test_fold_canonicalize_qnan_f32(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg1_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg1_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 -1 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -124,7 +160,7 @@ define void @test_fold_canonicalize_qnan_value_neg1_f32(float addrspace(1)* %out
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg2_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg2_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 -2 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -133,7 +169,7 @@ define void @test_fold_canonicalize_qnan_value_neg2_f32(float addrspace(1)* %out
; GCN-LABEL: {{^}}test_fold_canonicalize_snan0_value_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan0_value_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan0_value_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 2139095041 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -142,7 +178,7 @@ define void @test_fold_canonicalize_snan0_value_f32(float addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan1_value_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan1_value_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan1_value_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 2143289343 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -151,7 +187,7 @@ define void @test_fold_canonicalize_snan1_value_f32(float addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan2_value_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan2_value_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan2_value_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 4286578689 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -160,7 +196,7 @@ define void @test_fold_canonicalize_snan2_value_f32(float addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan3_value_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7fc00000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan3_value_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan3_value_f32(float addrspace(1)* %out) #1 {
%canonicalized = call float @llvm.canonicalize.f32(float bitcast (i32 4290772991 to float))
store float %canonicalized, float addrspace(1)* %out
ret void
@@ -169,7 +205,7 @@ define void @test_fold_canonicalize_snan3_value_f32(float addrspace(1)* %out) #1
; GCN-LABEL: {{^}}v_test_canonicalize_var_f64:
; GCN: v_mul_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 1.0, {{v\[[0-9]+:[0-9]+\]}}
; GCN: buffer_store_dwordx2 [[REG]]
-define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
%val = load double, double addrspace(1)* %out
%canonicalized = call double @llvm.canonicalize.f64(double %val)
store double %canonicalized, double addrspace(1)* %out
@@ -179,17 +215,51 @@ define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}s_test_canonicalize_var_f64:
; GCN: v_mul_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 1.0, {{s\[[0-9]+:[0-9]+\]}}
; GCN: buffer_store_dwordx2 [[REG]]
-define void @s_test_canonicalize_var_f64(double addrspace(1)* %out, double %val) #1 {
+define amdgpu_kernel void @s_test_canonicalize_var_f64(double addrspace(1)* %out, double %val) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double %val)
store double %canonicalized, double addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}v_test_canonicalize_fabs_var_f64:
+; GCN: v_mul_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 1.0, |{{v\[[0-9]+:[0-9]+\]}}|
+; GCN: buffer_store_dwordx2 [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fabs_var_f64(double addrspace(1)* %out) #1 {
+ %val = load double, double addrspace(1)* %out
+ %val.fabs = call double @llvm.fabs.f64(double %val)
+ %canonicalized = call double @llvm.canonicalize.f64(double %val.fabs)
+ store double %canonicalized, double addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_fabs_var_f64:
+; GCN: v_mul_f64 [[REG:v\[[0-9]+:[0-9]\]]], 1.0, -|{{v\[[0-9]+:[0-9]+\]}}|
+; GCN: buffer_store_dwordx2 [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_f64(double addrspace(1)* %out) #1 {
+ %val = load double, double addrspace(1)* %out
+ %val.fabs = call double @llvm.fabs.f64(double %val)
+ %val.fabs.fneg = fsub double -0.0, %val.fabs
+ %canonicalized = call double @llvm.canonicalize.f64(double %val.fabs.fneg)
+ store double %canonicalized, double addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_canonicalize_fneg_var_f64:
+; GCN: v_mul_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 1.0, -{{v\[[0-9]+:[0-9]+\]}}
+; GCN: buffer_store_dwordx2 [[REG]]
+define amdgpu_kernel void @v_test_canonicalize_fneg_var_f64(double addrspace(1)* %out) #1 {
+ %val = load double, double addrspace(1)* %out
+ %val.fneg = fsub double -0.0, %val
+ %canonicalized = call double @llvm.canonicalize.f64(double %val.fneg)
+ store double %canonicalized, double addrspace(1)* %out
+ ret void
+}
+
; GCN-LABEL: {{^}}test_fold_canonicalize_p0_f64:
; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], v[[LO]]{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_p0_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p0_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double 0.0)
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -199,7 +269,7 @@ define void @test_fold_canonicalize_p0_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HI:[0-9]+]], 1{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_n0_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n0_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double -0.0)
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -209,7 +279,7 @@ define void @test_fold_canonicalize_n0_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x3ff00000{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_p1_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p1_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double 1.0)
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -219,7 +289,7 @@ define void @test_fold_canonicalize_p1_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0xbff00000{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_n1_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n1_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double -1.0)
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -229,7 +299,7 @@ define void @test_fold_canonicalize_n1_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x40300000{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_literal_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_literal_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double 16.0)
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -239,7 +309,7 @@ define void @test_fold_canonicalize_literal_f64(double addrspace(1)* %out) #1 {
; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], v[[LO]]{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_no_denormals_fold_canonicalize_denormal0_f64(double addrspace(1)* %out) #2 {
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal0_f64(double addrspace(1)* %out) #2 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 4503599627370495 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -249,7 +319,7 @@ define void @test_no_denormals_fold_canonicalize_denormal0_f64(double addrspace(
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], -1{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0xfffff{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_denormals_fold_canonicalize_denormal0_f64(double addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_f64(double addrspace(1)* %out) #3 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 4503599627370495 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -259,7 +329,7 @@ define void @test_denormals_fold_canonicalize_denormal0_f64(double addrspace(1)*
; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], v[[LO]]{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_no_denormals_fold_canonicalize_denormal1_f64(double addrspace(1)* %out) #2 {
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal1_f64(double addrspace(1)* %out) #2 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 9227875636482146303 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -269,7 +339,7 @@ define void @test_no_denormals_fold_canonicalize_denormal1_f64(double addrspace(
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], -1{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x800fffff{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_denormals_fold_canonicalize_denormal1_f64(double addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_f64(double addrspace(1)* %out) #3 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 9227875636482146303 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -279,7 +349,7 @@ define void @test_denormals_fold_canonicalize_denormal1_f64(double addrspace(1)*
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_qnan_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double 0x7FF8000000000000)
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -289,7 +359,7 @@ define void @test_fold_canonicalize_qnan_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_qnan_value_neg1_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 -1 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -299,7 +369,7 @@ define void @test_fold_canonicalize_qnan_value_neg1_f64(double addrspace(1)* %ou
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_qnan_value_neg2_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 -2 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -309,7 +379,7 @@ define void @test_fold_canonicalize_qnan_value_neg2_f64(double addrspace(1)* %ou
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_snan0_value_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan0_value_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 9218868437227405313 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -319,7 +389,7 @@ define void @test_fold_canonicalize_snan0_value_f64(double addrspace(1)* %out) #
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_snan1_value_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan1_value_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 9223372036854775807 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -329,7 +399,7 @@ define void @test_fold_canonicalize_snan1_value_f64(double addrspace(1)* %out) #
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_snan2_value_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan2_value_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 18442240474082181121 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -339,7 +409,7 @@ define void @test_fold_canonicalize_snan2_value_f64(double addrspace(1)* %out) #
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7ff80000{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_fold_canonicalize_snan3_value_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan3_value_f64(double addrspace(1)* %out) #1 {
%canonicalized = call double @llvm.canonicalize.f64(double bitcast (i64 18446744073709551615 to double))
store double %canonicalized, double addrspace(1)* %out
ret void
@@ -347,5 +417,5 @@ define void @test_fold_canonicalize_snan3_value_f64(double addrspace(1)* %out) #
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
-attributes #2 = { nounwind "target-features"="-fp32-denormals,-fp64-denormals" }
-attributes #3 = { nounwind "target-features"="+fp32-denormals,+fp64-denormals" }
+attributes #2 = { nounwind "target-features"="-fp32-denormals,-fp64-fp16-denormals" }
+attributes #3 = { nounwind "target-features"="+fp32-denormals,+fp64-fp16-denormals" }
diff --git a/test/CodeGen/AMDGPU/fceil.ll b/test/CodeGen/AMDGPU/fceil.ll
index efdda78f852b..0b913fda8580 100644
--- a/test/CodeGen/AMDGPU/fceil.ll
+++ b/test/CodeGen/AMDGPU/fceil.ll
@@ -13,7 +13,7 @@ declare <16 x float> @llvm.ceil.v16f32(<16 x float>) nounwind readnone
; SI: v_ceil_f32_e32
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
; EG: CEIL {{\*? *}}[[RESULT]]
-define void @fceil_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @fceil_f32(float addrspace(1)* %out, float %x) {
%y = call float @llvm.ceil.f32(float %x) nounwind readnone
store float %y, float addrspace(1)* %out
ret void
@@ -25,7 +25,7 @@ define void @fceil_f32(float addrspace(1)* %out, float %x) {
; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
; EG: CEIL {{\*? *}}[[RESULT]]
; EG: CEIL {{\*? *}}[[RESULT]]
-define void @fceil_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
+define amdgpu_kernel void @fceil_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
%y = call <2 x float> @llvm.ceil.v2f32(<2 x float> %x) nounwind readnone
store <2 x float> %y, <2 x float> addrspace(1)* %out
ret void
@@ -41,7 +41,7 @@ define void @fceil_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
-define void @fceil_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
+define amdgpu_kernel void @fceil_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
%y = call <3 x float> @llvm.ceil.v3f32(<3 x float> %x) nounwind readnone
store <3 x float> %y, <3 x float> addrspace(1)* %out
ret void
@@ -57,7 +57,7 @@ define void @fceil_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
; EG: CEIL {{\*? *}}[[RESULT]]
; EG: CEIL {{\*? *}}[[RESULT]]
; EG: CEIL {{\*? *}}[[RESULT]]
-define void @fceil_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
+define amdgpu_kernel void @fceil_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
%y = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone
store <4 x float> %y, <4 x float> addrspace(1)* %out
ret void
@@ -82,7 +82,7 @@ define void @fceil_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
-define void @fceil_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
+define amdgpu_kernel void @fceil_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
%y = call <8 x float> @llvm.ceil.v8f32(<8 x float> %x) nounwind readnone
store <8 x float> %y, <8 x float> addrspace(1)* %out
ret void
@@ -125,7 +125,7 @@ define void @fceil_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
-define void @fceil_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
+define amdgpu_kernel void @fceil_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
%y = call <16 x float> @llvm.ceil.v16f32(<16 x float> %x) nounwind readnone
store <16 x float> %y, <16 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fceil64.ll b/test/CodeGen/AMDGPU/fceil64.ll
index 98448db5dd24..61572a855620 100644
--- a/test/CodeGen/AMDGPU/fceil64.ll
+++ b/test/CodeGen/AMDGPU/fceil64.ll
@@ -31,7 +31,7 @@ declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
; SI: v_cndmask_b32
; SI: v_add_f64
; SI: s_endpgm
-define void @fceil_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @fceil_f64(double addrspace(1)* %out, double %x) {
%y = call double @llvm.ceil.f64(double %x) nounwind readnone
store double %y, double addrspace(1)* %out
ret void
@@ -40,7 +40,7 @@ define void @fceil_f64(double addrspace(1)* %out, double %x) {
; FUNC-LABEL: {{^}}fceil_v2f64:
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
-define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+define amdgpu_kernel void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
%y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) nounwind readnone
store <2 x double> %y, <2 x double> addrspace(1)* %out
ret void
@@ -50,7 +50,7 @@ define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; FIXME-CI: v_ceil_f64_e32
; FIXME-CI: v_ceil_f64_e32
; FIXME-CI: v_ceil_f64_e32
-; define void @fceil_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; define amdgpu_kernel void @fceil_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
; %y = call <3 x double> @llvm.ceil.v3f64(<3 x double> %x) nounwind readnone
; store <3 x double> %y, <3 x double> addrspace(1)* %out
; ret void
@@ -61,7 +61,7 @@ define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
-define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+define amdgpu_kernel void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
%y = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
store <4 x double> %y, <4 x double> addrspace(1)* %out
ret void
@@ -76,7 +76,7 @@ define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
-define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+define amdgpu_kernel void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
%y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
store <8 x double> %y, <8 x double> addrspace(1)* %out
ret void
@@ -99,7 +99,7 @@ define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
; CI: v_ceil_f64_e32
-define void @fceil_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+define amdgpu_kernel void @fceil_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
%y = call <16 x double> @llvm.ceil.v16f64(<16 x double> %x) nounwind readnone
store <16 x double> %y, <16 x double> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fcmp-cnd.ll b/test/CodeGen/AMDGPU/fcmp-cnd.ll
index 530274f920f0..7f8be804309e 100644
--- a/test/CodeGen/AMDGPU/fcmp-cnd.ll
+++ b/test/CodeGen/AMDGPU/fcmp-cnd.ll
@@ -4,7 +4,7 @@
;registers and literal.x depending on what the optimizer does.
;CHECK: CNDE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
entry:
%0 = load float, float addrspace(1)* %in
%cmp = fcmp oeq float %0, 0.000000e+00
diff --git a/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll b/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll
index c402805feb39..2a848e80b81b 100644
--- a/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll
+++ b/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll
@@ -6,7 +6,7 @@
; CHECK: SET{{[A-Z]+}}_DX10
-define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
entry:
%0 = load float, float addrspace(1)* %in
%cmp = fcmp oeq float %0, 0.000000e+00
diff --git a/test/CodeGen/AMDGPU/fcmp.f16.ll b/test/CodeGen/AMDGPU/fcmp.f16.ll
index a62726f7f068..7916226462f7 100644
--- a/test/CodeGen/AMDGPU/fcmp.f16.ll
+++ b/test/CodeGen/AMDGPU/fcmp.f16.ll
@@ -11,7 +11,7 @@
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_lt(
+define amdgpu_kernel void @fcmp_f16_lt(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -28,16 +28,16 @@ entry:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cvt_f32_f16_e64 v[[A_F32:[0-9]+]], |v[[A_F16]]|
+; SI: v_cvt_f32_f16_e64 v[[B_F32:[0-9]+]], |v[[B_F16]]|
-; SI: v_cmp_lt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, |v[[A_F32]]|, |v[[B_F32]]|
+; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
; VI: v_cmp_lt_f16_e64 s{{\[[0-9]+:[0-9]+\]}}, |v[[A_F16]]|, |v[[B_F16]]|
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_lt_abs(
+define amdgpu_kernel void @fcmp_f16_lt_abs(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -62,7 +62,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_eq(
+define amdgpu_kernel void @fcmp_f16_eq(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -85,7 +85,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_le(
+define amdgpu_kernel void @fcmp_f16_le(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -108,7 +108,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_gt(
+define amdgpu_kernel void @fcmp_f16_gt(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -131,7 +131,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_lg(
+define amdgpu_kernel void @fcmp_f16_lg(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -154,7 +154,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_ge(
+define amdgpu_kernel void @fcmp_f16_ge(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -177,7 +177,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_o(
+define amdgpu_kernel void @fcmp_f16_o(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -200,7 +200,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_u(
+define amdgpu_kernel void @fcmp_f16_u(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -223,7 +223,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_nge(
+define amdgpu_kernel void @fcmp_f16_nge(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -246,7 +246,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_nlg(
+define amdgpu_kernel void @fcmp_f16_nlg(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -269,7 +269,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_ngt(
+define amdgpu_kernel void @fcmp_f16_ngt(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -292,7 +292,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_nle(
+define amdgpu_kernel void @fcmp_f16_nle(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -315,7 +315,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_neq(
+define amdgpu_kernel void @fcmp_f16_neq(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -338,7 +338,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fcmp_f16_nlt(
+define amdgpu_kernel void @fcmp_f16_nlt(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -368,7 +368,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_lt(
+define amdgpu_kernel void @fcmp_v2f16_lt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -398,7 +398,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_eq(
+define amdgpu_kernel void @fcmp_v2f16_eq(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -428,7 +428,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_le(
+define amdgpu_kernel void @fcmp_v2f16_le(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -458,7 +458,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_gt(
+define amdgpu_kernel void @fcmp_v2f16_gt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -488,7 +488,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_lg(
+define amdgpu_kernel void @fcmp_v2f16_lg(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -518,7 +518,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_ge(
+define amdgpu_kernel void @fcmp_v2f16_ge(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -548,7 +548,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_o(
+define amdgpu_kernel void @fcmp_v2f16_o(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -578,7 +578,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_u(
+define amdgpu_kernel void @fcmp_v2f16_u(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -608,7 +608,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_nge(
+define amdgpu_kernel void @fcmp_v2f16_nge(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -638,7 +638,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_nlg(
+define amdgpu_kernel void @fcmp_v2f16_nlg(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -668,7 +668,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_ngt(
+define amdgpu_kernel void @fcmp_v2f16_ngt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -698,7 +698,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_nle(
+define amdgpu_kernel void @fcmp_v2f16_nle(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -728,7 +728,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_neq(
+define amdgpu_kernel void @fcmp_v2f16_neq(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -758,7 +758,7 @@ entry:
; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
; GCN: s_endpgm
-define void @fcmp_v2f16_nlt(
+define amdgpu_kernel void @fcmp_v2f16_nlt(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
diff --git a/test/CodeGen/AMDGPU/fcmp.ll b/test/CodeGen/AMDGPU/fcmp.ll
index 97d954fcc3c2..b548670edb06 100644
--- a/test/CodeGen/AMDGPU/fcmp.ll
+++ b/test/CodeGen/AMDGPU/fcmp.ll
@@ -3,7 +3,7 @@
; CHECK: {{^}}fcmp_sext:
; CHECK: SETE_DX10 T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @fcmp_sext(i32 addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @fcmp_sext(i32 addrspace(1)* %out, float addrspace(1)* %in) {
entry:
%0 = load float, float addrspace(1)* %in
%arrayidx1 = getelementptr inbounds float, float addrspace(1)* %in, i32 1
@@ -22,7 +22,7 @@ entry:
; CHECK: SET{{[N]*}}E_DX10 * T{{[0-9]+\.[XYZW],}}
; CHECK-NEXT: {{[0-9]+\(5.0}}
-define void @fcmp_br(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_br(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp oeq float %in, 5.0
br i1 %0, label %IF, label %ENDIF
diff --git a/test/CodeGen/AMDGPU/fcmp64.ll b/test/CodeGen/AMDGPU/fcmp64.ll
index acce82fdfe53..b9e1921d4c45 100644
--- a/test/CodeGen/AMDGPU/fcmp64.ll
+++ b/test/CodeGen/AMDGPU/fcmp64.ll
@@ -3,7 +3,7 @@
; CHECK-LABEL: {{^}}flt_f64:
; CHECK: v_cmp_nge_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
-define void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -15,7 +15,7 @@ define void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}fle_f64:
; CHECK: v_cmp_ngt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
-define void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -27,7 +27,7 @@ define void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}fgt_f64:
; CHECK: v_cmp_nle_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
-define void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -39,7 +39,7 @@ define void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}fge_f64:
; CHECK: v_cmp_nlt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
-define void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -51,7 +51,7 @@ define void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}fne_f64:
; CHECK: v_cmp_neq_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
-define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -63,7 +63,7 @@ define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}feq_f64:
; CHECK: v_cmp_nlg_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
-define void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
diff --git a/test/CodeGen/AMDGPU/fconst64.ll b/test/CodeGen/AMDGPU/fconst64.ll
index 89af37545c99..125597796245 100644
--- a/test/CodeGen/AMDGPU/fconst64.ll
+++ b/test/CodeGen/AMDGPU/fconst64.ll
@@ -5,7 +5,7 @@
; CHECK-DAG: s_mov_b32 {{s[0-9]+}}, 0x40140000
; CHECK-DAG: s_mov_b32 {{s[0-9]+}}, 0
-define void @fconst_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @fconst_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
%r1 = load double, double addrspace(1)* %in
%r2 = fadd double %r1, 5.000000e+00
store double %r2, double addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fcopysign.f16.ll b/test/CodeGen/AMDGPU/fcopysign.f16.ll
new file mode 100644
index 000000000000..4e2bf765cd95
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -0,0 +1,264 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX8 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
+
+declare half @llvm.copysign.f16(half, half)
+declare float @llvm.copysign.f32(float, float)
+declare double @llvm.copysign.f64(double, double)
+declare <2 x half> @llvm.copysign.v2f16(<2 x half>, <2 x half>)
+declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>)
+declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
+
+; GCN-LABEL: {{^}}test_copysign_f16:
+; SI: buffer_load_ushort v[[SIGN:[0-9]+]]
+; SI: buffer_load_ushort v[[MAG:[0-9]+]]
+; SI: s_brev_b32 s[[CONST:[0-9]+]], -2
+; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
+; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_F32]]
+; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
+; GFX89: buffer_load_ushort v[[SIGN:[0-9]+]]
+; GFX89: buffer_load_ushort v[[MAG:[0-9]+]]
+; GFX89: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
+; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN]]
+; GCN: buffer_store_short v[[OUT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_f16(
+ half addrspace(1)* %arg_out,
+ half addrspace(1)* %arg_mag,
+ half addrspace(1)* %arg_sign) {
+entry:
+ %mag = load half, half addrspace(1)* %arg_mag
+ %sign = load half, half addrspace(1)* %arg_sign
+ %out = call half @llvm.copysign.f16(half %mag, half %sign)
+ store half %out, half addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f32_mag_f16_sign_f32:
+; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: buffer_load_dword v[[SIGN:[0-9]+]]
+; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; GCN-DAG: v_cvt_f32_f16_e32 v[[MAG_EXT:[0-9]+]], v[[MAG]]
+; GCN: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG_EXT]], v[[SIGN]]
+; GCN: buffer_store_dword v[[OUT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f32_mag_f16_sign_f32(
+ float addrspace(1)* %arg_out,
+ half addrspace(1)* %arg_mag,
+ float addrspace(1)* %arg_sign) {
+entry:
+ %mag = load half, half addrspace(1)* %arg_mag
+ %mag.ext = fpext half %mag to float
+ %sign = load float, float addrspace(1)* %arg_sign
+ %out = call float @llvm.copysign.f32(float %mag.ext, float %sign)
+ store float %out, float addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f64_mag_f16_sign_f64:
+; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]{{\]}}
+; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; GCN-DAG: v_cvt_f32_f16_e32 v[[MAG_EXT:[0-9]+]], v[[MAG]]
+; GCN-DAG: v_cvt_f64_f32_e32 v{{\[}}[[MAG_EXT_LO:[0-9]+]]:[[MAG_EXT_HI:[0-9]+]]{{\]}}, v[[MAG_EXT]]
+; GCN: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_EXT_HI]], v[[SIGN_HI]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[MAG_EXT_LO]]:[[OUT_HI]]{{\]}}
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f64_mag_f16_sign_f64(
+ double addrspace(1)* %arg_out,
+ half addrspace(1)* %arg_mag,
+ double addrspace(1)* %arg_sign) {
+entry:
+ %mag = load half, half addrspace(1)* %arg_mag
+ %mag.ext = fpext half %mag to double
+ %sign = load double, double addrspace(1)* %arg_sign
+ %out = call double @llvm.copysign.f64(double %mag.ext, double %sign)
+ store double %out, double addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f32_mag_f32_sign_f16:
+; GCN-DAG: buffer_load_dword v[[MAG:[0-9]+]]
+; GCN-DAG: buffer_load_ushort v[[SIGN:[0-9]+]]
+; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
+; SI: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_F32]]
+; GFX89-DAG: v_lshlrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN]]
+; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_SHIFT]]
+; GCN: buffer_store_dword v[[OUT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f32_mag_f32_sign_f16(
+ float addrspace(1)* %arg_out,
+ float addrspace(1)* %arg_mag,
+ half addrspace(1)* %arg_sign) {
+entry:
+ %mag = load float, float addrspace(1)* %arg_mag
+ %sign = load half, half addrspace(1)* %arg_sign
+ %sign.ext = fpext half %sign to float
+ %out = call float @llvm.copysign.f32(float %mag, float %sign.ext)
+ store float %out, float addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f64_mag_f64_sign_f16:
+; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[MAG_LO:[0-9]+]]:[[MAG_HI:[0-9]+]]{{\]}}
+; GCN-DAG: buffer_load_ushort v[[SIGN:[0-9]+]]
+; GCN-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
+; SI: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_HI]], v[[SIGN_F32]]
+; GFX89-DAG: v_lshlrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN]]
+; GFX89: v_bfi_b32 v[[OUT_HI:[0-9]+]], s[[CONST]], v[[MAG_HI]], v[[SIGN_SHIFT]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[MAG_LO]]:[[OUT_HI]]{{\]}}
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f64_mag_f64_sign_f16(
+ double addrspace(1)* %arg_out,
+ double addrspace(1)* %arg_mag,
+ half addrspace(1)* %arg_sign) {
+entry:
+ %mag = load double, double addrspace(1)* %arg_mag
+ %sign = load half, half addrspace(1)* %arg_sign
+ %sign.ext = fpext half %sign to double
+ %out = call double @llvm.copysign.f64(double %mag, double %sign.ext)
+ store double %out, double addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f16_sign_f32:
+; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: buffer_load_dword v[[SIGN:[0-9]+]]
+; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
+; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN]]
+; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
+; GFX89-DAG: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
+; GFX89-DAG: v_lshrrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN]]
+; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_SHIFT]]
+; GCN: buffer_store_short v[[OUT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f16_mag_f16_sign_f32(
+ half addrspace(1)* %arg_out,
+ half addrspace(1)* %arg_mag,
+ float addrspace(1)* %arg_sign) {
+entry:
+ %mag = load half, half addrspace(1)* %arg_mag
+ %sign = load float, float addrspace(1)* %arg_sign
+ %sign.trunc = fptrunc float %sign to half
+ %out = call half @llvm.copysign.f16(half %mag, half %sign.trunc)
+ store half %out, half addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f16_sign_f64:
+; GCN-DAG: buffer_load_ushort v[[MAG:[0-9]+]]
+; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[SIGN_LO:[0-9]+]]:[[SIGN_HI:[0-9]+]]{{\]}}
+; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG]]
+; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_HI]]
+; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
+; GFX89-DAG: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
+; GFX89-DAG: v_lshrrev_b32_e32 v[[SIGN_SHIFT:[0-9]+]], 16, v[[SIGN_HI]]
+; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG]], v[[SIGN_SHIFT]]
+; GCN: buffer_store_short v[[OUT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f16_mag_f16_sign_f64(
+ half addrspace(1)* %arg_out,
+ half addrspace(1)* %arg_mag,
+ double addrspace(1)* %arg_sign) {
+entry:
+ %mag = load half, half addrspace(1)* %arg_mag
+ %sign = load double, double addrspace(1)* %arg_sign
+ %sign.trunc = fptrunc double %sign to half
+ %out = call half @llvm.copysign.f16(half %mag, half %sign.trunc)
+ store half %out, half addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f32_sign_f16:
+; GCN-DAG: buffer_load_dword v[[MAG:[0-9]+]]
+; GCN-DAG: buffer_load_ushort v[[SIGN:[0-9]+]]
+; SI-DAG: s_brev_b32 s[[CONST:[0-9]+]], -2
+; SI-DAG: v_cvt_f16_f32_e32 v[[MAG_TRUNC:[0-9]+]], v[[MAG]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[SIGN_F32:[0-9]+]], v[[SIGN]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[MAG_F32:[0-9]+]], v[[MAG_TRUNC]]
+; SI: v_bfi_b32 v[[OUT_F32:[0-9]+]], s[[CONST]], v[[MAG_F32]], v[[SIGN_F32]]
+; SI: v_cvt_f16_f32_e32 v[[OUT:[0-9]+]], v[[OUT_F32]]
+; GFX89-DAG: s_movk_i32 s[[CONST:[0-9]+]], 0x7fff
+; GFX89-DAG: v_cvt_f16_f32_e32 v[[MAG_TRUNC:[0-9]+]], v[[MAG]]
+; GFX89: v_bfi_b32 v[[OUT:[0-9]+]], s[[CONST]], v[[MAG_TRUNC]], v[[SIGN]]
+; GCN: buffer_store_short v[[OUT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f16_mag_f32_sign_f16(
+ half addrspace(1)* %arg_out,
+ float addrspace(1)* %arg_mag,
+ half addrspace(1)* %arg_sign) {
+entry:
+ %mag = load float, float addrspace(1)* %arg_mag
+ %mag.trunc = fptrunc float %mag to half
+ %sign = load half, half addrspace(1)* %arg_sign
+ %out = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
+ store half %out, half addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_out_f16_mag_f64_sign_f16:
+; GCN: v_bfi_b32
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_out_f16_mag_f64_sign_f16(
+ half addrspace(1)* %arg_out,
+ double addrspace(1)* %arg_mag,
+ half addrspace(1)* %arg_sign) {
+entry:
+ %mag = load double, double addrspace(1)* %arg_mag
+ %mag.trunc = fptrunc double %mag to half
+ %sign = load half, half addrspace(1)* %arg_sign
+ %out = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
+ store half %out, half addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_v2f16:
+; GCN: v_bfi_b32
+; GCN: v_bfi_b32
+; VI: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_v2f16(
+ <2 x half> addrspace(1)* %arg_out,
+ <2 x half> %arg_mag,
+ <2 x half> %arg_sign) {
+entry:
+ %out = call <2 x half> @llvm.copysign.v2f16(<2 x half> %arg_mag, <2 x half> %arg_sign)
+ store <2 x half> %out, <2 x half> addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_v3f16:
+; GCN: v_bfi_b32
+; GCN: v_bfi_b32
+; GCN: v_bfi_b32
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_v3f16(
+ <3 x half> addrspace(1)* %arg_out,
+ <3 x half> %arg_mag,
+ <3 x half> %arg_sign) {
+entry:
+ %out = call <3 x half> @llvm.copysign.v3f16(<3 x half> %arg_mag, <3 x half> %arg_sign)
+ store <3 x half> %out, <3 x half> addrspace(1)* %arg_out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_copysign_v4f16:
+; GCN: v_bfi_b32
+; GCN: v_bfi_b32
+; GCN: v_bfi_b32
+; GCN: v_bfi_b32
+; GCN: s_endpgm
+define amdgpu_kernel void @test_copysign_v4f16(
+ <4 x half> addrspace(1)* %arg_out,
+ <4 x half> %arg_mag,
+ <4 x half> %arg_sign) {
+entry:
+ %out = call <4 x half> @llvm.copysign.v4f16(<4 x half> %arg_mag, <4 x half> %arg_sign)
+ store <4 x half> %out, <4 x half> addrspace(1)* %arg_out
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/fcopysign.f32.ll b/test/CodeGen/AMDGPU/fcopysign.f32.ll
index 632de18dafcb..e5893e5995a3 100644
--- a/test/CodeGen/AMDGPU/fcopysign.f32.ll
+++ b/test/CodeGen/AMDGPU/fcopysign.f32.ll
@@ -20,7 +20,7 @@ declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind read
; GCN: s_endpgm
; EG: BFI_INT
-define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign) nounwind {
+define amdgpu_kernel void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign) nounwind {
%result = call float @llvm.copysign.f32(float %mag, float %sign)
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -31,7 +31,7 @@ define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign
; EG: BFI_INT
; EG: BFI_INT
-define void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %mag, <2 x float> %sign) nounwind {
+define amdgpu_kernel void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %mag, <2 x float> %sign) nounwind {
%result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
store <2 x float> %result, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -44,7 +44,7 @@ define void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %ma
; EG: BFI_INT
; EG: BFI_INT
; EG: BFI_INT
-define void @test_copysign_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %mag, <4 x float> %sign) nounwind {
+define amdgpu_kernel void @test_copysign_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %mag, <4 x float> %sign) nounwind {
%result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
ret void
diff --git a/test/CodeGen/AMDGPU/fcopysign.f64.ll b/test/CodeGen/AMDGPU/fcopysign.f64.ll
index 12c942beee6c..67779a8ff3b9 100644
--- a/test/CodeGen/AMDGPU/fcopysign.f64.ll
+++ b/test/CodeGen/AMDGPU/fcopysign.f64.ll
@@ -17,7 +17,7 @@ declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) nounwind r
; GCN-DAG: v_mov_b32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VMAG_LO]]:[[VRESULT_HI]]{{\]}}
; GCN: s_endpgm
-define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %sign) nounwind {
+define amdgpu_kernel void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %sign) nounwind {
%result = call double @llvm.copysign.f64(double %mag, double %sign)
store double %result, double addrspace(1)* %out, align 8
ret void
@@ -32,7 +32,7 @@ define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %s
; GCN-DAG: v_bfi_b32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN]]
; GCN-DAG: v_mov_b32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VMAG_LO]]:[[VRESULT_HI]]{{\]}}
-define void @test_copysign_f64_f32(double addrspace(1)* %out, double %mag, float %sign) nounwind {
+define amdgpu_kernel void @test_copysign_f64_f32(double addrspace(1)* %out, double %mag, float %sign) nounwind {
%c = fpext float %sign to double
%result = call double @llvm.copysign.f64(double %mag, double %c)
store double %result, double addrspace(1)* %out, align 8
@@ -41,7 +41,7 @@ define void @test_copysign_f64_f32(double addrspace(1)* %out, double %mag, float
; FUNC-LABEL: {{^}}test_copysign_v2f64:
; GCN: s_endpgm
-define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %mag, <2 x double> %sign) nounwind {
+define amdgpu_kernel void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %mag, <2 x double> %sign) nounwind {
%result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
store <2 x double> %result, <2 x double> addrspace(1)* %out, align 8
ret void
@@ -49,7 +49,7 @@ define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %
; FUNC-LABEL: {{^}}test_copysign_v4f64:
; GCN: s_endpgm
-define void @test_copysign_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %mag, <4 x double> %sign) nounwind {
+define amdgpu_kernel void @test_copysign_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %mag, <4 x double> %sign) nounwind {
%result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
store <4 x double> %result, <4 x double> addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/fdiv.f16.ll b/test/CodeGen/AMDGPU/fdiv.f16.ll
index 70b70bdaaaa7..7f84e973c958 100644
--- a/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -31,7 +31,7 @@
; VI: v_cvt_f16_f32_e32 [[CVT_BACK:v[0-9]+]], [[MUL]]
; VI: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK]], [[RHS]], [[LHS]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_fdiv_f16(
+define amdgpu_kernel void @v_fdiv_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) #0 {
@@ -54,7 +54,7 @@ entry:
; VI: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
; VI-NOT: [[RESULT]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_rcp_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_rcp_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -72,7 +72,7 @@ entry:
; VI: v_rcp_f16_e64 [[RESULT:v[0-9]+]], |[[VAL]]|
; VI-NOT: [RESULT]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_rcp_f16_abs(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_rcp_f16_abs(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -91,7 +91,7 @@ entry:
; VI: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
; VI-NOT: [[RESULT]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_rcp_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_rcp_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -109,7 +109,7 @@ entry:
; VI: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[VAL]]
; VI-NOT: [RESULT]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_rcp_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_rcp_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -127,7 +127,7 @@ entry:
; VI: v_rsq_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
; VI-NOT: [RESULT]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_rsq_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_rsq_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -147,7 +147,7 @@ entry:
; VI-NEXT: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[SQRT]]
; VI-NOT: [RESULT]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_rsq_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_rsq_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -168,7 +168,7 @@ entry:
; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[RCP]], [[LHS]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_fdiv_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #0 {
+define amdgpu_kernel void @v_fdiv_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -190,7 +190,7 @@ entry:
; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[RCP]], [[LHS]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @v_fdiv_f16_unsafe(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #2 {
+define amdgpu_kernel void @v_fdiv_f16_unsafe(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #2 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -204,6 +204,42 @@ entry:
ret void
}
+; FUNC-LABEL: {{^}}div_arcp_2_x_pat_f16:
+; SI: v_mul_f32_e32 v{{[0-9]+}}, 0.5, v{{[0-9]+}}
+
+; VI: v_mul_f16_e32 [[MUL:v[0-9]+]], 0.5, v{{[0-9]+}}
+; VI: buffer_store_short [[MUL]]
+define amdgpu_kernel void @div_arcp_2_x_pat_f16(half addrspace(1)* %out) #0 {
+ %x = load half, half addrspace(1)* undef
+ %rcp = fdiv arcp half %x, 2.0
+ store half %rcp, half addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}div_arcp_k_x_pat_f16:
+; SI: v_mul_f32_e32 v{{[0-9]+}}, 0x3dcccccd, v{{[0-9]+}}
+
+; VI: v_mul_f16_e32 [[MUL:v[0-9]+]], 0x2e66, v{{[0-9]+}}
+; VI: buffer_store_short [[MUL]]
+define amdgpu_kernel void @div_arcp_k_x_pat_f16(half addrspace(1)* %out) #0 {
+ %x = load half, half addrspace(1)* undef
+ %rcp = fdiv arcp half %x, 10.0
+ store half %rcp, half addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}div_arcp_neg_k_x_pat_f16:
+; SI: v_mul_f32_e32 v{{[0-9]+}}, 0xbdcccccd, v{{[0-9]+}}
+
+; VI: v_mul_f16_e32 [[MUL:v[0-9]+]], 0xae66, v{{[0-9]+}}
+; VI: buffer_store_short [[MUL]]
+define amdgpu_kernel void @div_arcp_neg_k_x_pat_f16(half addrspace(1)* %out) #0 {
+ %x = load half, half addrspace(1)* undef
+ %rcp = fdiv arcp half %x, -10.0
+ store half %rcp, half addrspace(1)* %out, align 4
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare half @llvm.sqrt.f16(half) #1
declare half @llvm.fabs.f16(half) #1
diff --git a/test/CodeGen/AMDGPU/fdiv.f64.ll b/test/CodeGen/AMDGPU/fdiv.f64.ll
index 20f9e4df07fd..d16bdf43ee26 100644
--- a/test/CodeGen/AMDGPU/fdiv.f64.ll
+++ b/test/CodeGen/AMDGPU/fdiv.f64.ll
@@ -1,11 +1,11 @@
-; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=COMMON %s
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=COMMON %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN %s
-; COMMON-LABEL: {{^}}fdiv_f64:
-; COMMON-DAG: buffer_load_dwordx2 [[NUM:v\[[0-9]+:[0-9]+\]]], off, {{s\[[0-9]+:[0-9]+\]}}, 0
-; COMMON-DAG: buffer_load_dwordx2 [[DEN:v\[[0-9]+:[0-9]+\]]], off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:8
+; GCN-LABEL: {{^}}fdiv_f64:
+; GCN-DAG: buffer_load_dwordx2 [[NUM:v\[[0-9]+:[0-9]+\]]], off, {{s\[[0-9]+:[0-9]+\]}}, 0
+; GCN-DAG: buffer_load_dwordx2 [[DEN:v\[[0-9]+:[0-9]+\]]], off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:8
; CI-DAG: v_div_scale_f64 [[SCALE0:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, [[DEN]], [[DEN]], [[NUM]]
; CI-DAG: v_div_scale_f64 [[SCALE1:v\[[0-9]+:[0-9]+\]]], vcc, [[NUM]], [[DEN]], [[NUM]]
@@ -13,23 +13,23 @@
; SI-DAG: v_div_scale_f64 [[SCALE0:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, [[DEN]], [[DEN]], [[NUM]]
; SI-DAG: v_div_scale_f64 [[SCALE1:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, [[NUM]], [[DEN]], [[NUM]]
-; COMMON-DAG: v_rcp_f64_e32 [[RCP_SCALE0:v\[[0-9]+:[0-9]+\]]], [[SCALE0]]
+; GCN-DAG: v_rcp_f64_e32 [[RCP_SCALE0:v\[[0-9]+:[0-9]+\]]], [[SCALE0]]
; SI-DAG: v_cmp_eq_u32_e32 vcc, {{v[0-9]+}}, {{v[0-9]+}}
; SI-DAG: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, {{v[0-9]+}}
; SI-DAG: s_xor_b64 vcc, [[CMP0]], vcc
-; COMMON-DAG: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[RCP_SCALE0]], 1.0
-; COMMON-DAG: v_fma_f64 [[FMA1:v\[[0-9]+:[0-9]+\]]], [[RCP_SCALE0]], [[FMA0]], [[RCP_SCALE0]]
-; COMMON-DAG: v_fma_f64 [[FMA2:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[FMA1]], 1.0
-; COMMON-DAG: v_fma_f64 [[FMA3:v\[[0-9]+:[0-9]+\]]], [[FMA1]], [[FMA2]], [[FMA1]]
-; COMMON-DAG: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], [[SCALE1]], [[FMA3]]
-; COMMON-DAG: v_fma_f64 [[FMA4:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[MUL]], [[SCALE1]]
-; COMMON: v_div_fmas_f64 [[FMAS:v\[[0-9]+:[0-9]+\]]], [[FMA4]], [[FMA3]], [[MUL]]
-; COMMON: v_div_fixup_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[FMAS]], [[DEN]], [[NUM]]
-; COMMON: buffer_store_dwordx2 [[RESULT]]
-; COMMON: s_endpgm
-define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) nounwind {
+; GCN-DAG: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[RCP_SCALE0]], 1.0
+; GCN-DAG: v_fma_f64 [[FMA1:v\[[0-9]+:[0-9]+\]]], [[RCP_SCALE0]], [[FMA0]], [[RCP_SCALE0]]
+; GCN-DAG: v_fma_f64 [[FMA2:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[FMA1]], 1.0
+; GCN-DAG: v_fma_f64 [[FMA3:v\[[0-9]+:[0-9]+\]]], [[FMA1]], [[FMA2]], [[FMA1]]
+; GCN-DAG: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], [[SCALE1]], [[FMA3]]
+; GCN-DAG: v_fma_f64 [[FMA4:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[MUL]], [[SCALE1]]
+; GCN: v_div_fmas_f64 [[FMAS:v\[[0-9]+:[0-9]+\]]], [[FMA4]], [[FMA3]], [[MUL]]
+; GCN: v_div_fixup_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[FMAS]], [[DEN]], [[NUM]]
+; GCN: buffer_store_dwordx2 [[RESULT]]
+; GCN: s_endpgm
+define amdgpu_kernel void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%gep.1 = getelementptr double, double addrspace(1)* %in, i32 1
%num = load volatile double, double addrspace(1)* %in
%den = load volatile double, double addrspace(1)* %gep.1
@@ -38,31 +38,31 @@ define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) nounw
ret void
}
-; COMMON-LABEL: {{^}}fdiv_f64_s_v:
-define void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, double %num) nounwind {
+; GCN-LABEL: {{^}}fdiv_f64_s_v:
+define amdgpu_kernel void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, double %num) #0 {
%den = load double, double addrspace(1)* %in
%result = fdiv double %num, %den
store double %result, double addrspace(1)* %out
ret void
}
-; COMMON-LABEL: {{^}}fdiv_f64_v_s:
-define void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, double %den) nounwind {
+; GCN-LABEL: {{^}}fdiv_f64_v_s:
+define amdgpu_kernel void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, double %den) #0 {
%num = load double, double addrspace(1)* %in
%result = fdiv double %num, %den
store double %result, double addrspace(1)* %out
ret void
}
-; COMMON-LABEL: {{^}}fdiv_f64_s_s:
-define void @fdiv_f64_s_s(double addrspace(1)* %out, double %num, double %den) nounwind {
+; GCN-LABEL: {{^}}fdiv_f64_s_s:
+define amdgpu_kernel void @fdiv_f64_s_s(double addrspace(1)* %out, double %num, double %den) #0 {
%result = fdiv double %num, %den
store double %result, double addrspace(1)* %out
ret void
}
-; COMMON-LABEL: {{^}}v_fdiv_v2f64:
-define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) nounwind {
+; GCN-LABEL: {{^}}v_fdiv_v2f64:
+define amdgpu_kernel void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
%gep.1 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in, i32 1
%num = load <2 x double>, <2 x double> addrspace(1)* %in
%den = load <2 x double>, <2 x double> addrspace(1)* %gep.1
@@ -71,15 +71,15 @@ define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspac
ret void
}
-; COMMON-LABEL: {{^}}s_fdiv_v2f64:
-define void @s_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %num, <2 x double> %den) {
+; GCN-LABEL: {{^}}s_fdiv_v2f64:
+define amdgpu_kernel void @s_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %num, <2 x double> %den) {
%result = fdiv <2 x double> %num, %den
store <2 x double> %result, <2 x double> addrspace(1)* %out
ret void
}
-; COMMON-LABEL: {{^}}v_fdiv_v4f64:
-define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) nounwind {
+; GCN-LABEL: {{^}}v_fdiv_v4f64:
+define amdgpu_kernel void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
%gep.1 = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
%num = load <4 x double>, <4 x double> addrspace(1)* %in
%den = load <4 x double>, <4 x double> addrspace(1)* %gep.1
@@ -88,9 +88,46 @@ define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspac
ret void
}
-; COMMON-LABEL: {{^}}s_fdiv_v4f64:
-define void @s_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %num, <4 x double> %den) {
+; GCN-LABEL: {{^}}s_fdiv_v4f64:
+define amdgpu_kernel void @s_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %num, <4 x double> %den) #0 {
%result = fdiv <4 x double> %num, %den
store <4 x double> %result, <4 x double> addrspace(1)* %out
ret void
}
+
+; GCN-LABEL: {{^}}div_fast_2_x_pat_f64:
+; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, 0.5
+; GCN: buffer_store_dwordx2 [[MUL]]
+define amdgpu_kernel void @div_fast_2_x_pat_f64(double addrspace(1)* %out) #1 {
+ %x = load double, double addrspace(1)* undef
+ %rcp = fdiv fast double %x, 2.0
+ store double %rcp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fast_k_x_pat_f64:
+; GCN-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0x9999999a
+; GCN-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0x3fb99999
+; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
+; GCN: buffer_store_dwordx2 [[MUL]]
+define amdgpu_kernel void @div_fast_k_x_pat_f64(double addrspace(1)* %out) #1 {
+ %x = load double, double addrspace(1)* undef
+ %rcp = fdiv fast double %x, 10.0
+ store double %rcp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fast_neg_k_x_pat_f64:
+; GCN-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0x9999999a
+; GCN-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0xbfb99999
+; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
+; GCN: buffer_store_dwordx2 [[MUL]]
+define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(double addrspace(1)* %out) #1 {
+ %x = load double, double addrspace(1)* undef
+ %rcp = fdiv fast double %x, -10.0
+ store double %rcp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/AMDGPU/fdiv.ll b/test/CodeGen/AMDGPU/fdiv.ll
index 0e95de9c555c..b3a2b6643720 100644
--- a/test/CodeGen/AMDGPU/fdiv.ll
+++ b/test/CodeGen/AMDGPU/fdiv.ll
@@ -27,7 +27,7 @@
; GCN: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
; GCN: v_div_fmas_f32 [[FMAS:v[0-9]+]], [[F]], [[B]], [[E]]
; GCN: v_div_fixup_f32 v{{[0-9]+}}, [[FMAS]],
-define void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 {
entry:
%fdiv = fdiv float %a, %b
store float %fdiv, float addrspace(1)* %out
@@ -52,7 +52,7 @@ entry:
; GCN-NOT: s_setreg
; GCN: v_div_fmas_f32 [[FMAS:v[0-9]+]], [[F]], [[B]], [[E]]
; GCN: v_div_fixup_f32 v{{[0-9]+}}, [[FMAS]],
-define void @fdiv_f32_denormals(float addrspace(1)* %out, float %a, float %b) #2 {
+define amdgpu_kernel void @fdiv_f32_denormals(float addrspace(1)* %out, float %a, float %b) #2 {
entry:
%fdiv = fdiv float %a, %b
store float %fdiv, float addrspace(1)* %out
@@ -65,7 +65,7 @@ entry:
; GCN: v_rcp_f32
; GCN: v_mul_f32
; GCN: v_mul_f32
-define void @fdiv_25ulp_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @fdiv_25ulp_f32(float addrspace(1)* %out, float %a, float %b) #0 {
entry:
%fdiv = fdiv float %a, %b, !fpmath !0
store float %fdiv, float addrspace(1)* %out
@@ -77,7 +77,7 @@ entry:
; GCN: v_fma_f32
; GCN: v_div_fmas_f32
; GCN: v_div_fixup_f32
-define void @fdiv_25ulp_denormals_f32(float addrspace(1)* %out, float %a, float %b) #2 {
+define amdgpu_kernel void @fdiv_25ulp_denormals_f32(float addrspace(1)* %out, float %a, float %b) #2 {
entry:
%fdiv = fdiv float %a, %b, !fpmath !0
store float %fdiv, float addrspace(1)* %out
@@ -89,7 +89,7 @@ entry:
; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}, [[RCP]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fdiv_fast_denormals_f32(float addrspace(1)* %out, float %a, float %b) #2 {
+define amdgpu_kernel void @fdiv_fast_denormals_f32(float addrspace(1)* %out, float %a, float %b) #2 {
entry:
%fdiv = fdiv fast float %a, %b
store float %fdiv, float addrspace(1)* %out
@@ -104,7 +104,7 @@ entry:
; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}, [[RCP]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fdiv_f32_fast_math(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @fdiv_f32_fast_math(float addrspace(1)* %out, float %a, float %b) #0 {
entry:
%fdiv = fdiv fast float %a, %b
store float %fdiv, float addrspace(1)* %out
@@ -119,7 +119,7 @@ entry:
; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}, [[RCP]]
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fdiv_f32_arcp_math(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @fdiv_f32_arcp_math(float addrspace(1)* %out, float %a, float %b) #0 {
entry:
%fdiv = fdiv arcp float %a, %b
store float %fdiv, float addrspace(1)* %out
@@ -136,7 +136,7 @@ entry:
; GCN: v_div_scale_f32
; GCN: v_div_scale_f32
; GCN: v_div_scale_f32
-define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
+define amdgpu_kernel void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
entry:
%fdiv = fdiv <2 x float> %a, %b
store <2 x float> %fdiv, <2 x float> addrspace(1)* %out
@@ -146,7 +146,7 @@ entry:
; FUNC-LABEL: {{^}}fdiv_ulp25_v2f32:
; GCN: v_cmp_gt_f32
; GCN: v_cmp_gt_f32
-define void @fdiv_ulp25_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
+define amdgpu_kernel void @fdiv_ulp25_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
entry:
%fdiv = fdiv arcp <2 x float> %a, %b, !fpmath !0
store <2 x float> %fdiv, <2 x float> addrspace(1)* %out
@@ -161,7 +161,7 @@ entry:
; GCN: v_rcp_f32
; GCN: v_rcp_f32
-define void @fdiv_v2f32_fast_math(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
+define amdgpu_kernel void @fdiv_v2f32_fast_math(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
entry:
%fdiv = fdiv fast <2 x float> %a, %b
store <2 x float> %fdiv, <2 x float> addrspace(1)* %out
@@ -176,7 +176,7 @@ entry:
; GCN: v_rcp_f32
; GCN: v_rcp_f32
-define void @fdiv_v2f32_arcp_math(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
+define amdgpu_kernel void @fdiv_v2f32_arcp_math(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) #0 {
entry:
%fdiv = fdiv arcp <2 x float> %a, %b
store <2 x float> %fdiv, <2 x float> addrspace(1)* %out
@@ -197,7 +197,7 @@ entry:
; GCN: v_div_fixup_f32
; GCN: v_div_fixup_f32
; GCN: v_div_fixup_f32
-define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1) * %in
%b = load <4 x float>, <4 x float> addrspace(1) * %b_ptr
@@ -220,7 +220,7 @@ define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
; GCN: v_rcp_f32
; GCN: v_rcp_f32
; GCN: v_rcp_f32
-define void @fdiv_v4f32_fast_math(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fdiv_v4f32_fast_math(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1) * %in
%b = load <4 x float>, <4 x float> addrspace(1) * %b_ptr
@@ -243,7 +243,7 @@ define void @fdiv_v4f32_fast_math(<4 x float> addrspace(1)* %out, <4 x float> ad
; GCN: v_rcp_f32
; GCN: v_rcp_f32
; GCN: v_rcp_f32
-define void @fdiv_v4f32_arcp_math(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fdiv_v4f32_arcp_math(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1) * %in
%b = load <4 x float>, <4 x float> addrspace(1) * %b_ptr
diff --git a/test/CodeGen/AMDGPU/ffloor.f64.ll b/test/CodeGen/AMDGPU/ffloor.f64.ll
index 83ffbdfa23a5..407cccb8443e 100644
--- a/test/CodeGen/AMDGPU/ffloor.f64.ll
+++ b/test/CodeGen/AMDGPU/ffloor.f64.ll
@@ -19,7 +19,7 @@ declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
; SI: v_cndmask_b32_e32
; SI: v_add_f64
; SI: s_endpgm
-define void @ffloor_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @ffloor_f64(double addrspace(1)* %out, double %x) {
%y = call double @llvm.floor.f64(double %x) nounwind readnone
store double %y, double addrspace(1)* %out
ret void
@@ -34,7 +34,7 @@ define void @ffloor_f64(double addrspace(1)* %out, double %x) {
; SI: v_cndmask_b32_e32
; SI: v_add_f64 {{v[[0-9]+:[0-9]+]}}, -[[INPUT]]
; SI: s_endpgm
-define void @ffloor_f64_neg(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @ffloor_f64_neg(double addrspace(1)* %out, double %x) {
%neg = fsub double 0.0, %x
%y = call double @llvm.floor.f64(double %neg) nounwind readnone
store double %y, double addrspace(1)* %out
@@ -50,7 +50,7 @@ define void @ffloor_f64_neg(double addrspace(1)* %out, double %x) {
; SI: v_cndmask_b32_e32
; SI: v_add_f64 {{v[[0-9]+:[0-9]+]}}, -|[[INPUT]]|
; SI: s_endpgm
-define void @ffloor_f64_neg_abs(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @ffloor_f64_neg_abs(double addrspace(1)* %out, double %x) {
%abs = call double @llvm.fabs.f64(double %x)
%neg = fsub double 0.0, %abs
%y = call double @llvm.floor.f64(double %neg) nounwind readnone
@@ -61,7 +61,7 @@ define void @ffloor_f64_neg_abs(double addrspace(1)* %out, double %x) {
; FUNC-LABEL: {{^}}ffloor_v2f64:
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
-define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+define amdgpu_kernel void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
%y = call <2 x double> @llvm.floor.v2f64(<2 x double> %x) nounwind readnone
store <2 x double> %y, <2 x double> addrspace(1)* %out
ret void
@@ -72,7 +72,7 @@ define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
; CI-NOT: v_floor_f64_e32
-define void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+define amdgpu_kernel void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
%y = call <3 x double> @llvm.floor.v3f64(<3 x double> %x) nounwind readnone
store <3 x double> %y, <3 x double> addrspace(1)* %out
ret void
@@ -83,7 +83,7 @@ define void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
-define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+define amdgpu_kernel void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
%y = call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
store <4 x double> %y, <4 x double> addrspace(1)* %out
ret void
@@ -98,7 +98,7 @@ define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
-define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+define amdgpu_kernel void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
%y = call <8 x double> @llvm.floor.v8f64(<8 x double> %x) nounwind readnone
store <8 x double> %y, <8 x double> addrspace(1)* %out
ret void
@@ -121,7 +121,7 @@ define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
; CI: v_floor_f64_e32
-define void @ffloor_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+define amdgpu_kernel void @ffloor_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
%y = call <16 x double> @llvm.floor.v16f64(<16 x double> %x) nounwind readnone
store <16 x double> %y, <16 x double> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/ffloor.ll b/test/CodeGen/AMDGPU/ffloor.ll
index d7f35a45075c..720fe7a45e3d 100644
--- a/test/CodeGen/AMDGPU/ffloor.ll
+++ b/test/CodeGen/AMDGPU/ffloor.ll
@@ -5,7 +5,7 @@
; FUNC-LABEL: {{^}}floor_f32:
; SI: v_floor_f32_e32
; R600: FLOOR
-define void @floor_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @floor_f32(float addrspace(1)* %out, float %in) {
%tmp = call float @llvm.floor.f32(float %in) #0
store float %tmp, float addrspace(1)* %out
ret void
@@ -15,7 +15,7 @@ define void @floor_f32(float addrspace(1)* %out, float %in) {
; SI: v_floor_f32_e32
; SI: v_floor_f32_e32
-define void @floor_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @floor_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
%tmp = call <2 x float> @llvm.floor.v2f32(<2 x float> %in) #0
store <2 x float> %tmp, <2 x float> addrspace(1)* %out
ret void
@@ -31,7 +31,7 @@ define void @floor_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
; R600: FLOOR
; R600: FLOOR
; R600: FLOOR
-define void @floor_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @floor_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
%tmp = call <4 x float> @llvm.floor.v4f32(<4 x float> %in) #0
store <4 x float> %tmp, <4 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fix-vgpr-copies.mir b/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
new file mode 100644
index 000000000000..4951e0df4d3e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
@@ -0,0 +1,44 @@
+# RUN: llc -march=amdgcn -start-after=greedy -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s
+# Check that we first do all vector instructions and only then change exec
+# CHECK-DAG: COPY %vgpr10_vgpr11
+# CHECK-DAG: COPY %vgpr12_vgpr13
+# CHECK: %exec = COPY
+
+---
+name: main
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%sgpr4_sgpr5' }
+ - { reg: '%sgpr6' }
+ - { reg: '%vgpr0' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %vgpr3, %vgpr10_vgpr11, %vgpr12_vgpr13
+
+ %vcc = V_CMP_NE_U32_e64 0, killed %vgpr3, implicit %exec
+ %sgpr4_sgpr5 = COPY %exec, implicit-def %exec
+ %sgpr6_sgpr7 = S_AND_B64 %sgpr4_sgpr5, killed %vcc, implicit-def dead %scc
+ %sgpr4_sgpr5 = S_XOR_B64 %sgpr6_sgpr7, killed %sgpr4_sgpr5, implicit-def dead %scc
+ %vgpr61_vgpr62 = COPY %vgpr10_vgpr11
+ %vgpr155_vgpr156 = COPY %vgpr12_vgpr13
+ %exec = S_MOV_B64_term killed %sgpr6_sgpr7
+...
diff --git a/test/CodeGen/AMDGPU/flat-address-space.ll b/test/CodeGen/AMDGPU/flat-address-space.ll
index 55b5482d031f..c867e4fca229 100644
--- a/test/CodeGen/AMDGPU/flat-address-space.ll
+++ b/test/CodeGen/AMDGPU/flat-address-space.ll
@@ -17,43 +17,43 @@
; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], s[[HI_SREG]]
; CHECK: flat_store_dword v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}, v[[DATA]]
-define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
+define amdgpu_kernel void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
%fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
- store i32 %x, i32 addrspace(4)* %fptr, align 4
+ store volatile i32 %x, i32 addrspace(4)* %fptr, align 4
ret void
}
; CHECK-LABEL: {{^}}store_flat_i64:
; CHECK: flat_store_dwordx2
-define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
+define amdgpu_kernel void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
%fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
- store i64 %x, i64 addrspace(4)* %fptr, align 8
+ store volatile i64 %x, i64 addrspace(4)* %fptr, align 8
ret void
}
; CHECK-LABEL: {{^}}store_flat_v4i32:
; CHECK: flat_store_dwordx4
-define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
+define amdgpu_kernel void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
%fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
- store <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16
+ store volatile <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16
ret void
}
; CHECK-LABEL: {{^}}store_flat_trunc_i16:
; CHECK: flat_store_short
-define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
+define amdgpu_kernel void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
%fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
%y = trunc i32 %x to i16
- store i16 %y, i16 addrspace(4)* %fptr, align 2
+ store volatile i16 %y, i16 addrspace(4)* %fptr, align 2
ret void
}
; CHECK-LABEL: {{^}}store_flat_trunc_i8:
; CHECK: flat_store_byte
-define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
+define amdgpu_kernel void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
%fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
%y = trunc i32 %x to i8
- store i8 %y, i8 addrspace(4)* %fptr, align 2
+ store volatile i8 %y, i8 addrspace(4)* %fptr, align 2
ret void
}
@@ -61,36 +61,36 @@ define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
; CHECK-LABEL: load_flat_i32:
; CHECK: flat_load_dword
-define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
- %fload = load i32, i32 addrspace(4)* %fptr, align 4
+ %fload = load volatile i32, i32 addrspace(4)* %fptr, align 4
store i32 %fload, i32 addrspace(1)* %out, align 4
ret void
}
; CHECK-LABEL: load_flat_i64:
; CHECK: flat_load_dwordx2
-define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
- %fload = load i64, i64 addrspace(4)* %fptr, align 8
+ %fload = load volatile i64, i64 addrspace(4)* %fptr, align 8
store i64 %fload, i64 addrspace(1)* %out, align 8
ret void
}
; CHECK-LABEL: load_flat_v4i32:
; CHECK: flat_load_dwordx4
-define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
- %fload = load <4 x i32>, <4 x i32> addrspace(4)* %fptr, align 32
+ %fload = load volatile <4 x i32>, <4 x i32> addrspace(4)* %fptr, align 32
store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8
ret void
}
; CHECK-LABEL: sextload_flat_i8:
; CHECK: flat_load_sbyte
-define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
- %fload = load i8, i8 addrspace(4)* %fptr, align 4
+ %fload = load volatile i8, i8 addrspace(4)* %fptr, align 4
%ext = sext i8 %fload to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
ret void
@@ -98,9 +98,9 @@ define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* n
; CHECK-LABEL: zextload_flat_i8:
; CHECK: flat_load_ubyte
-define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
- %fload = load i8, i8 addrspace(4)* %fptr, align 4
+ %fload = load volatile i8, i8 addrspace(4)* %fptr, align 4
%ext = zext i8 %fload to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
ret void
@@ -108,9 +108,9 @@ define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* n
; CHECK-LABEL: sextload_flat_i16:
; CHECK: flat_load_sshort
-define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
- %fload = load i16, i16 addrspace(4)* %fptr, align 4
+ %fload = load volatile i16, i16 addrspace(4)* %fptr, align 4
%ext = sext i16 %fload to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
ret void
@@ -118,9 +118,9 @@ define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)*
; CHECK-LABEL: zextload_flat_i16:
; CHECK: flat_load_ushort
-define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
+define amdgpu_kernel void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
%fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
- %fload = load i16, i16 addrspace(4)* %fptr, align 4
+ %fload = load volatile i16, i16 addrspace(4)* %fptr, align 4
%ext = zext i16 %fload to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
ret void
@@ -131,7 +131,7 @@ define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)*
; CHECK: flat_load_ubyte
; CHECK: flat_load_ubyte
; CHECK: flat_load_ubyte
-define void @flat_scratch_unaligned_load() {
+define amdgpu_kernel void @flat_scratch_unaligned_load() {
%scratch = alloca i32
%fptr = addrspacecast i32* %scratch to i32 addrspace(4)*
%ld = load volatile i32, i32 addrspace(4)* %fptr, align 1
@@ -143,7 +143,7 @@ define void @flat_scratch_unaligned_load() {
; CHECK: flat_store_byte
; CHECK: flat_store_byte
; CHECK: flat_store_byte
-define void @flat_scratch_unaligned_store() {
+define amdgpu_kernel void @flat_scratch_unaligned_store() {
%scratch = alloca i32
%fptr = addrspacecast i32* %scratch to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %fptr, align 1
@@ -154,7 +154,7 @@ define void @flat_scratch_unaligned_store() {
; HSA: flat_load_dword
; HSA: flat_load_dword
; FIXME: These tests are broken for os = mesa3d, becasue it doesn't initialize flat_scr
-define void @flat_scratch_multidword_load() {
+define amdgpu_kernel void @flat_scratch_multidword_load() {
%scratch = alloca <2 x i32>
%fptr = addrspacecast <2 x i32>* %scratch to <2 x i32> addrspace(4)*
%ld = load volatile <2 x i32>, <2 x i32> addrspace(4)* %fptr
@@ -165,7 +165,7 @@ define void @flat_scratch_multidword_load() {
; HSA: flat_store_dword
; HSA: flat_store_dword
; FIXME: These tests are broken for os = mesa3d, becasue it doesn't initialize flat_scr
-define void @flat_scratch_multidword_store() {
+define amdgpu_kernel void @flat_scratch_multidword_store() {
%scratch = alloca <2 x i32>
%fptr = addrspacecast <2 x i32>* %scratch to <2 x i32> addrspace(4)*
store volatile <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* %fptr
diff --git a/test/CodeGen/AMDGPU/flat-for-global-subtarget-feature.ll b/test/CodeGen/AMDGPU/flat-for-global-subtarget-feature.ll
index df9ba00c6974..dac1500cd46c 100644
--- a/test/CodeGen/AMDGPU/flat-for-global-subtarget-feature.ll
+++ b/test/CodeGen/AMDGPU/flat-for-global-subtarget-feature.ll
@@ -23,7 +23,7 @@
; NOHSA-DEFAULT: buffer_store_dword
; NOHSA-NODEFAULT: flat_store_dword
; NOHSA-NOADDR64: flat_store_dword
-define void @test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out) {
entry:
store i32 0, i32 addrspace(1)* %out
ret void
@@ -36,7 +36,7 @@ entry:
; NOHSA-DEFAULT: buffer_store_dword
; NOHSA-NODEFAULT: flat_store_dword
; NOHSA-NOADDR64: flat_store_dword
-define void @test_addr64(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test_addr64(i32 addrspace(1)* %out) {
entry:
%out.addr = alloca i32 addrspace(1)*, align 4
diff --git a/test/CodeGen/AMDGPU/flat-scratch-reg.ll b/test/CodeGen/AMDGPU/flat-scratch-reg.ll
index b71c8bcb76c7..23f40daf3d23 100644
--- a/test/CodeGen/AMDGPU/flat-scratch-reg.ll
+++ b/test/CodeGen/AMDGPU/flat-scratch-reg.ll
@@ -19,7 +19,7 @@
; CI: ; NumSgprs: 8
; VI-NOXNACK: ; NumSgprs: 8
; VI-XNACK: ; NumSgprs: 12
-define void @no_vcc_no_flat() {
+define amdgpu_kernel void @no_vcc_no_flat() {
entry:
call void asm sideeffect "", "~{SGPR7}"()
ret void
@@ -33,7 +33,7 @@ entry:
; CI: ; NumSgprs: 10
; VI-NOXNACK: ; NumSgprs: 10
; VI-XNACK: ; NumSgprs: 12
-define void @vcc_no_flat() {
+define amdgpu_kernel void @vcc_no_flat() {
entry:
call void asm sideeffect "", "~{SGPR7},~{VCC}"()
ret void
@@ -50,7 +50,7 @@ entry:
; HSA-CI: ; NumSgprs: 8
; HSA-VI-NOXNACK: ; NumSgprs: 8
; HSA-VI-XNACK: ; NumSgprs: 12
-define void @no_vcc_flat() {
+define amdgpu_kernel void @no_vcc_flat() {
entry:
call void asm sideeffect "", "~{SGPR7},~{FLAT_SCR}"()
ret void
@@ -66,7 +66,7 @@ entry:
; HSA-CI: ; NumSgprs: 10
; HSA-VI-NOXNACK: ; NumSgprs: 10
; HSA-VI-XNACK: ; NumSgprs: 12
-define void @vcc_flat() {
+define amdgpu_kernel void @vcc_flat() {
entry:
call void asm sideeffect "", "~{SGPR7},~{VCC},~{FLAT_SCR}"()
ret void
diff --git a/test/CodeGen/AMDGPU/flat_atomics.ll b/test/CodeGen/AMDGPU/flat_atomics.ll
index 7400dbcf8909..cc95d80570e0 100644
--- a/test/CodeGen/AMDGPU/flat_atomics.ll
+++ b/test/CodeGen/AMDGPU/flat_atomics.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}atomic_add_i32_offset:
; GCN: flat_atomic_add v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -13,7 +13,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32_ret_offset:
; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -23,7 +23,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32_addr64_offset:
; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -34,7 +34,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64_offset:
; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -45,7 +45,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32:
; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_add_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -54,7 +54,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32_ret:
; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile add i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -63,7 +63,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32_addr64:
; GCN: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_add_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -73,7 +73,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i32_ret_addr64:
; GCN: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile add i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -83,7 +83,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_offset:
; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_and_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -93,7 +93,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_ret_offset:
; GCN: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile and i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -103,7 +103,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_addr64_offset:
; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_and_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -114,7 +114,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64_offset:
; GCN: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -125,7 +125,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32:
; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_and_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -134,7 +134,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_ret:
; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile and i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -143,7 +143,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_addr64:
; GCN: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_and_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -153,7 +153,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i32_ret_addr64:
; GCN: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile and i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -163,7 +163,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_offset:
; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_sub_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -173,7 +173,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_ret_offset:
; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile sub i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -183,7 +183,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_addr64_offset:
; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_sub_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -194,7 +194,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset:
; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -205,7 +205,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32:
; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_sub_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -214,7 +214,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_ret:
; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile sub i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -223,7 +223,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_addr64:
; GCN: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_sub_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -233,7 +233,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i32_ret_addr64:
; GCN: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile sub i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -243,7 +243,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_offset:
; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_max_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -253,7 +253,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_ret_offset:
; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile max i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -263,7 +263,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_addr64_offset:
; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_max_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -274,7 +274,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64_offset:
; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -285,7 +285,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32:
; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_max_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -294,7 +294,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_ret:
; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile max i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -303,7 +303,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_addr64:
; GCN: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_max_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -313,7 +313,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i32_ret_addr64:
; GCN: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile max i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -323,7 +323,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_offset:
; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umax_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -333,7 +333,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_ret_offset:
; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile umax i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -343,7 +343,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_addr64_offset:
; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umax_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -354,7 +354,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset:
; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -365,7 +365,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32:
; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umax_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -374,7 +374,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_ret:
; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile umax i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -383,7 +383,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_addr64:
; GCN: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umax_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -393,7 +393,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i32_ret_addr64:
; GCN: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile umax i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -403,7 +403,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_offset:
; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_min_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -413,7 +413,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_ret_offset:
; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile min i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -423,7 +423,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_addr64_offset:
; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_min_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -434,7 +434,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64_offset:
; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -445,7 +445,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32:
; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_min_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -454,7 +454,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_ret:
; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile min i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -463,7 +463,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_addr64:
; GCN: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_min_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -473,7 +473,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i32_ret_addr64:
; GCN: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile min i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -483,7 +483,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32_offset:
; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umin_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -493,7 +493,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32_ret_offset:
; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -503,7 +503,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32_addr64_offset:
; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umin_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -514,7 +514,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset:
; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -525,16 +525,16 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32:
; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umin_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst
ret void
}
; GCN-LABEL: {{^}}atomic_umin_i32_ret:
-; GCN: flat_atomic_umin v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
+; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -543,7 +543,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32_addr64:
; GCN: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umin_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -553,7 +553,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i32_ret_addr64:
; GCN: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]{{$}}
- define void @atomic_umin_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+ define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -563,7 +563,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_offset:
; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_or_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -573,7 +573,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_ret_offset:
; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile or i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -583,7 +583,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_addr64_offset:
; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_or_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -594,7 +594,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64_offset:
; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -605,7 +605,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32:
; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_or_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -614,7 +614,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_ret:
; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile or i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -623,7 +623,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_addr64:
; GCN: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_or_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -633,7 +633,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i32_ret_addr64:
; GCN: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile or i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -643,7 +643,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_offset:
; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_xchg_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -653,7 +653,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_ret_offset:
; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile xchg i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -663,7 +663,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_xchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -674,7 +674,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset:
; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -685,7 +685,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32:
; GCN: flat_atomic_swap v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_xchg_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -694,7 +694,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_ret:
; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile xchg i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -703,7 +703,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_addr64:
; GCN: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_xchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -713,7 +713,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i32_ret_addr64:
; GCN: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile xchg i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -725,7 +725,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_offset:
; GCN: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i32_offset(i32 addrspace(4)* %out, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_offset(i32 addrspace(4)* %out, i32 %in, i32 %old) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst
@@ -735,7 +735,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_offset:
; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = cmpxchg volatile i32 addrspace(4)* %gep, i32 %old, i32 %in seq_cst seq_cst
@@ -746,7 +746,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset:
; GCN: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -757,7 +757,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64_offset:
; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -769,7 +769,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32:
; GCN: flat_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i32(i32 addrspace(4)* %out, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32(i32 addrspace(4)* %out, i32 %in, i32 %old) {
entry:
%val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst
ret void
@@ -778,7 +778,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret:
; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}] glc
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define void @atomic_cmpxchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i32 %old) {
entry:
%val = cmpxchg volatile i32 addrspace(4)* %out, i32 %old, i32 %in seq_cst seq_cst
%flag = extractvalue { i32, i1 } %val, 0
@@ -788,7 +788,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_addr64:
; GCN: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst
@@ -798,7 +798,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64:
; GCN: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RET]]
-define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = cmpxchg volatile i32 addrspace(4)* %ptr, i32 %old, i32 %in seq_cst seq_cst
@@ -809,7 +809,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_offset:
; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_xor_i32_offset(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_offset(i32 addrspace(4)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -819,7 +819,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_ret_offset:
; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
%val = atomicrmw volatile xor i32 addrspace(4)* %gep, i32 %in seq_cst
@@ -829,7 +829,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_addr64_offset:
; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_xor_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -840,7 +840,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset:
; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -851,7 +851,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32:
; GCN: flat_atomic_xor v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_xor_i32(i32 addrspace(4)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32(i32 addrspace(4)* %out, i32 %in) {
entry:
%val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst
ret void
@@ -860,7 +860,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_ret:
; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_ret(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile xor i32 addrspace(4)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(4)* %out2
@@ -869,7 +869,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_addr64:
; GCN: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_xor_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_addr64(i32 addrspace(4)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -879,7 +879,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i32_ret_addr64:
; GCN: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_ret_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%val = atomicrmw volatile xor i32 addrspace(4)* %ptr, i32 %in seq_cst
@@ -890,7 +890,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i32_offset:
; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i32_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_load_i32_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %in, i32 4
%val = load atomic i32, i32 addrspace(4)* %gep seq_cst, align 4
@@ -901,7 +901,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i32:
; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i32(i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_load_i32(i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
entry:
%val = load atomic i32, i32 addrspace(4)* %in seq_cst, align 4
store i32 %val, i32 addrspace(4)* %out
@@ -911,7 +911,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i32_addr64_offset:
; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i32_addr64_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i32_addr64_offset(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -923,7 +923,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i32_addr64:
; GCN: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i32_addr64(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i32_addr64(i32 addrspace(4)* %in, i32 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %in, i64 %index
%val = load atomic i32, i32 addrspace(4)* %ptr seq_cst, align 4
@@ -933,7 +933,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i32_offset:
; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32_offset(i32 %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, i32 addrspace(4)* %out) {
entry:
%gep = getelementptr i32, i32 addrspace(4)* %out, i32 4
store atomic i32 %in, i32 addrspace(4)* %gep seq_cst, align 4
@@ -942,7 +942,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i32:
; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32(i32 %in, i32 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_store_i32(i32 %in, i32 addrspace(4)* %out) {
entry:
store atomic i32 %in, i32 addrspace(4)* %out seq_cst, align 4
ret void
@@ -950,7 +950,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i32_addr64_offset:
; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
@@ -960,7 +960,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i32_addr64:
; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32_addr64(i32 %in, i32 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, i32 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index
store atomic i32 %in, i32 addrspace(4)* %ptr seq_cst, align 4
diff --git a/test/CodeGen/AMDGPU/flat_atomics_i64.ll b/test/CodeGen/AMDGPU/flat_atomics_i64.ll
index 0bd6c2dd5b86..723dde9ab68f 100644
--- a/test/CodeGen/AMDGPU/flat_atomics_i64.ll
+++ b/test/CodeGen/AMDGPU/flat_atomics_i64.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}atomic_add_i64_offset:
; GCN: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
-define void @atomic_add_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -13,7 +13,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_ret_offset:
; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -23,7 +23,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_addr64_offset:
; GCN: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
-define void @atomic_add_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -34,7 +34,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_ret_addr64_offset:
; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -45,7 +45,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64:
; GCN: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_add_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -54,7 +54,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_ret:
; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile add i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -63,7 +63,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_addr64:
; GCN: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_add_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -73,7 +73,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_ret_addr64:
; GCN: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_add_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile add i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -83,7 +83,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_offset:
; GCN: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_and_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -93,7 +93,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_ret_offset:
; GCN: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile and i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -103,7 +103,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_addr64_offset:
; GCN: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_and_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -114,7 +114,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_ret_addr64_offset:
; GCN: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -125,7 +125,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64:
; GCN: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_and_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile and i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -134,7 +134,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_ret:
; GCN: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile and i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -143,7 +143,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_addr64:
; GCN: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_and_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile and i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -153,7 +153,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_ret_addr64:
; GCN: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_and_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile and i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -163,7 +163,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_offset:
; GCN: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_sub_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile sub i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -173,7 +173,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_ret_offset:
; GCN: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile sub i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -183,7 +183,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_addr64_offset:
; GCN: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_sub_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -194,7 +194,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_ret_addr64_offset:
; GCN: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -205,7 +205,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64:
; GCN: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_sub_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile sub i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -214,7 +214,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_ret:
; GCN: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile sub i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -223,7 +223,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_addr64:
; GCN: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_sub_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile sub i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -233,7 +233,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_ret_addr64:
; GCN: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_sub_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile sub i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -243,7 +243,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_offset:
; GCN: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_max_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile max i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -253,7 +253,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_ret_offset:
; GCN: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile max i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -263,7 +263,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_addr64_offset:
; GCN: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_max_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -274,7 +274,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_ret_addr64_offset:
; GCN: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -285,7 +285,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64:
; GCN: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_max_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile max i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -294,7 +294,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_ret:
; GCN: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile max i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -303,7 +303,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_addr64:
; GCN: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_max_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile max i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -313,7 +313,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_ret_addr64:
; GCN: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_max_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile max i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -323,7 +323,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_offset:
; GCN: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umax_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile umax i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -333,7 +333,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_ret_offset:
; GCN: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile umax i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -343,7 +343,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_addr64_offset:
; GCN: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umax_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -354,7 +354,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_ret_addr64_offset:
; GCN: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -365,7 +365,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64:
; GCN: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umax_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umax i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -374,7 +374,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_ret:
; GCN: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umax i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -383,7 +383,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_addr64:
; GCN: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umax_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile umax i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -393,7 +393,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_ret_addr64:
; GCN: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umax_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile umax i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -403,7 +403,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_offset:
; GCN: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_min_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile min i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -413,7 +413,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_ret_offset:
; GCN: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile min i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -423,7 +423,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_addr64_offset:
; GCN: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_min_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -434,7 +434,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_ret_addr64_offset:
; GCN: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -445,7 +445,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64:
; GCN: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_min_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile min i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -454,7 +454,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_ret:
; GCN: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile min i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -463,7 +463,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_addr64:
; GCN: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_min_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile min i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -473,7 +473,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_ret_addr64:
; GCN: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_min_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile min i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -483,7 +483,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_offset:
; GCN: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umin_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile umin i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -493,7 +493,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_ret_offset:
; GCN: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile umin i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -503,7 +503,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_addr64_offset:
; GCN: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umin_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -514,7 +514,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_ret_addr64_offset:
; GCN: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -525,7 +525,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64:
; GCN: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umin_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umin i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -534,7 +534,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_ret:
; GCN: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umin i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -543,7 +543,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_addr64:
; GCN: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umin_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile umin i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -553,7 +553,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_ret_addr64:
; GCN: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_umin_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile umin i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -563,7 +563,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_offset:
; GCN: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_or_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile or i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -573,7 +573,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_ret_offset:
; GCN: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile or i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -583,7 +583,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_addr64_offset:
; GCN: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_or_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -594,7 +594,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_ret_addr64_offset:
; GCN: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -605,7 +605,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64:
; GCN: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_or_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile or i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -614,7 +614,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_ret:
; GCN: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile or i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -623,7 +623,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_addr64:
; GCN: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_or_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile or i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -633,7 +633,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_ret_addr64:
; GCN: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_or_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile or i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -643,7 +643,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_offset:
; GCN: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xchg_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile xchg i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -653,7 +653,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_ret_offset:
; GCN: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile xchg i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -663,7 +663,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_addr64_offset:
; GCN: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xchg_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -674,7 +674,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_ret_addr64_offset:
; GCN: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -685,7 +685,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64:
; GCN: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xchg_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xchg i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -694,7 +694,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_ret:
; GCN: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xchg i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -703,7 +703,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_addr64:
; GCN: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xchg_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile xchg i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -713,7 +713,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_ret_addr64:
; GCN: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xchg_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile xchg i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -723,7 +723,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_offset:
; GCN: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xor_i64_offset(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64_offset(i64 addrspace(4)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile xor i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -733,7 +733,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_ret_offset:
; GCN: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%tmp0 = atomicrmw volatile xor i64 addrspace(4)* %gep, i64 %in seq_cst
@@ -743,7 +743,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_addr64_offset:
; GCN: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xor_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -754,7 +754,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_ret_addr64_offset:
; GCN: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -765,7 +765,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64:
; GCN: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xor_i64(i64 addrspace(4)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64(i64 addrspace(4)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xor i64 addrspace(4)* %out, i64 %in seq_cst
ret void
@@ -774,7 +774,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_ret:
; GCN: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xor i64 addrspace(4)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(4)* %out2
@@ -783,7 +783,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_addr64:
; GCN: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xor_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile xor i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -793,7 +793,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_ret_addr64:
; GCN: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_xor_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%tmp0 = atomicrmw volatile xor i64 addrspace(4)* %ptr, i64 %in seq_cst
@@ -804,7 +804,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i64_offset:
; GCN: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i64_offset(i64 addrspace(4)* %in, i64 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_load_i64_offset(i64 addrspace(4)* %in, i64 addrspace(4)* %out) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %in, i64 4
%val = load atomic i64, i64 addrspace(4)* %gep seq_cst, align 8
@@ -815,7 +815,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i64:
; GCN: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i64(i64 addrspace(4)* %in, i64 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_load_i64(i64 addrspace(4)* %in, i64 addrspace(4)* %out) {
entry:
%val = load atomic i64, i64 addrspace(4)* %in seq_cst, align 8
store i64 %val, i64 addrspace(4)* %out
@@ -825,7 +825,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i64_addr64_offset:
; GCN: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i64_addr64_offset(i64 addrspace(4)* %in, i64 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i64_addr64_offset(i64 addrspace(4)* %in, i64 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %in, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -837,7 +837,7 @@ entry:
; GCN-LABEL: {{^}}atomic_load_i64_addr64:
; GCN: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
-define void @atomic_load_i64_addr64(i64 addrspace(4)* %in, i64 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i64_addr64(i64 addrspace(4)* %in, i64 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %in, i64 %index
%val = load atomic i64, i64 addrspace(4)* %ptr seq_cst, align 8
@@ -847,7 +847,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i64_offset:
; GCN: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_offset(i64 %in, i64 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_store_i64_offset(i64 %in, i64 addrspace(4)* %out) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
store atomic i64 %in, i64 addrspace(4)* %gep seq_cst, align 8
@@ -856,7 +856,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i64:
; GCN: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc
-define void @atomic_store_i64(i64 %in, i64 addrspace(4)* %out) {
+define amdgpu_kernel void @atomic_store_i64(i64 %in, i64 addrspace(4)* %out) {
entry:
store atomic i64 %in, i64 addrspace(4)* %out seq_cst, align 8
ret void
@@ -864,7 +864,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i64_addr64_offset:
; GCN: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -874,7 +874,7 @@ entry:
; GCN-LABEL: {{^}}atomic_store_i64_addr64:
; GCN: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(4)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i64_addr64(i64 %in, i64 addrspace(4)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
store atomic i64 %in, i64 addrspace(4)* %ptr seq_cst, align 8
@@ -883,7 +883,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_offset:
; GCN: flat_atomic_cmpswap_x2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64_offset(i64 addrspace(4)* %out, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_offset(i64 addrspace(4)* %out, i64 %in, i64 %old) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%val = cmpxchg volatile i64 addrspace(4)* %gep, i64 %old, i64 %in seq_cst seq_cst
@@ -892,7 +892,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_soffset:
; GCN: flat_atomic_cmpswap_x2 v[{{[0-9]+}}:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64_soffset(i64 addrspace(4)* %out, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(i64 addrspace(4)* %out, i64 %in, i64 %old) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 9000
%val = cmpxchg volatile i64 addrspace(4)* %gep, i64 %old, i64 %in seq_cst seq_cst
@@ -902,7 +902,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_ret_offset:
; GCN: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %old) {
entry:
%gep = getelementptr i64, i64 addrspace(4)* %out, i64 4
%val = cmpxchg volatile i64 addrspace(4)* %gep, i64 %old, i64 %in seq_cst seq_cst
@@ -913,7 +913,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_addr64_offset:
; GCN: flat_atomic_cmpswap_x2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -924,7 +924,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64_offset:
; GCN: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4
@@ -936,7 +936,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64:
; GCN: flat_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64(i64 addrspace(4)* %out, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64(i64 addrspace(4)* %out, i64 %in, i64 %old) {
entry:
%val = cmpxchg volatile i64 addrspace(4)* %out, i64 %old, i64 %in seq_cst seq_cst
ret void
@@ -945,7 +945,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_ret:
; GCN: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %old) {
entry:
%val = cmpxchg volatile i64 addrspace(4)* %out, i64 %old, i64 %in seq_cst seq_cst
%extract0 = extractvalue { i64, i1 } %val, 0
@@ -955,7 +955,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_addr64:
; GCN: flat_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(i64 addrspace(4)* %out, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%val = cmpxchg volatile i64 addrspace(4)* %ptr, i64 %old, i64 %in seq_cst seq_cst
@@ -965,7 +965,7 @@ entry:
; GCN-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64:
; GCN: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index
%val = cmpxchg volatile i64 addrspace(4)* %ptr, i64 %old, i64 %in seq_cst seq_cst
diff --git a/test/CodeGen/AMDGPU/fma-combine.ll b/test/CodeGen/AMDGPU/fma-combine.ll
index 50c5a5abf7fa..4113ba8dc1f0 100644
--- a/test/CodeGen/AMDGPU/fma-combine.ll
+++ b/test/CodeGen/AMDGPU/fma-combine.ll
@@ -18,7 +18,7 @@ declare float @llvm.fma.f32(float, float, float) #0
; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[C]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -46,7 +46,7 @@ define void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addr
; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
; SI: s_endpgm
-define void @combine_to_fma_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -75,7 +75,7 @@ define void @combine_to_fma_f64_0_2use(double addrspace(1)* noalias %out, double
; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[C]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @combine_to_fma_f64_1(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_f64_1(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -99,7 +99,7 @@ define void @combine_to_fma_f64_1(double addrspace(1)* noalias %out, double addr
; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], -[[C]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -127,7 +127,7 @@ define void @combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double
; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
; SI: s_endpgm
-define void @combine_to_fma_fsub_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -156,7 +156,7 @@ define void @combine_to_fma_fsub_f64_0_2use(double addrspace(1)* noalias %out, d
; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], [[C]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -184,7 +184,7 @@ define void @combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double
; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
; SI: s_endpgm
-define void @combine_to_fma_fsub_1_f64_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_1_f64_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -213,7 +213,7 @@ define void @combine_to_fma_fsub_1_f64_2use(double addrspace(1)* noalias %out, d
; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], -[[C]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @combine_to_fma_fsub_2_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_2_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -242,7 +242,7 @@ define void @combine_to_fma_fsub_2_f64(double addrspace(1)* noalias %out, double
; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
; SI: s_endpgm
-define void @combine_to_fma_fsub_2_f64_2uses_neg(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_2_f64_2uses_neg(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -276,7 +276,7 @@ define void @combine_to_fma_fsub_2_f64_2uses_neg(double addrspace(1)* noalias %o
; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
; SI: s_endpgm
-define void @combine_to_fma_fsub_2_f64_2uses_mul(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_fma_fsub_2_f64_2uses_mul(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -317,7 +317,7 @@ define void @combine_to_fma_fsub_2_f64_2uses_mul(double addrspace(1)* noalias %o
; SI-UNSAFE: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[X]], [[Y]], [[FMA0]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @aggressive_combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @aggressive_combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -358,7 +358,7 @@ define void @aggressive_combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %
; SI-UNSAFE: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[Y]], [[Z]], [[FMA0]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @aggressive_combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @aggressive_combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -390,7 +390,7 @@ define void @aggressive_combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
-define void @test_f32_mul_add_x_one_y(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_add_x_one_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load volatile float, float addrspace(1)* %in1
@@ -406,7 +406,7 @@ define void @test_f32_mul_add_x_one_y(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
-define void @test_f32_mul_y_add_x_one(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_y_add_x_one(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load volatile float, float addrspace(1)* %in1
@@ -422,7 +422,7 @@ define void @test_f32_mul_y_add_x_one(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
-define void @test_f32_mul_add_x_negone_y(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_add_x_negone_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -438,7 +438,7 @@ define void @test_f32_mul_add_x_negone_y(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
-define void @test_f32_mul_y_add_x_negone(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_y_add_x_negone(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -454,7 +454,7 @@ define void @test_f32_mul_y_add_x_negone(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
-define void @test_f32_mul_sub_one_x_y(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_sub_one_x_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -470,7 +470,7 @@ define void @test_f32_mul_sub_one_x_y(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
-define void @test_f32_mul_y_sub_one_x(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_y_sub_one_x(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -486,7 +486,7 @@ define void @test_f32_mul_y_sub_one_x(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
-define void @test_f32_mul_sub_negone_x_y(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_sub_negone_x_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -502,7 +502,7 @@ define void @test_f32_mul_sub_negone_x_y(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
-define void @test_f32_mul_y_sub_negone_x(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_y_sub_negone_x(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -518,7 +518,7 @@ define void @test_f32_mul_y_sub_negone_x(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
-define void @test_f32_mul_sub_x_one_y(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_sub_x_one_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -534,7 +534,7 @@ define void @test_f32_mul_sub_x_one_y(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
-define void @test_f32_mul_y_sub_x_one(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_y_sub_x_one(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -550,7 +550,7 @@ define void @test_f32_mul_y_sub_x_one(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
-define void @test_f32_mul_sub_x_negone_y(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_sub_x_negone_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -566,7 +566,7 @@ define void @test_f32_mul_sub_x_negone_y(float addrspace(1)* %out,
; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
;
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
-define void @test_f32_mul_y_sub_x_negone(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_mul_y_sub_x_negone(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%x = load float, float addrspace(1)* %in1
@@ -588,7 +588,7 @@ define void @test_f32_mul_y_sub_x_negone(float addrspace(1)* %out,
;
; SI-FMA: v_fma_f32 [[VR:v[0-9]]], -[[VT:v[0-9]]], [[VY:v[0-9]]], [[VY]]
; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VT]], [[VR]]
-define void @test_f32_interp(float addrspace(1)* %out,
+define amdgpu_kernel void @test_f32_interp(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2,
float addrspace(1)* %in3) {
@@ -610,7 +610,7 @@ define void @test_f32_interp(float addrspace(1)* %out,
;
; SI-FMA: v_fma_f64 [[VR:v\[[0-9]+:[0-9]+\]]], -[[VT:v\[[0-9]+:[0-9]+\]]], [[VY:v\[[0-9]+:[0-9]+\]]], [[VY]]
; SI-FMA: v_fma_f64 v{{\[[0-9]+:[0-9]+\]}}, [[VX:v\[[0-9]+:[0-9]+\]]], [[VT]], [[VR]]
-define void @test_f64_interp(double addrspace(1)* %out,
+define amdgpu_kernel void @test_f64_interp(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2,
double addrspace(1)* %in3) {
diff --git a/test/CodeGen/AMDGPU/fma.f64.ll b/test/CodeGen/AMDGPU/fma.f64.ll
index cf6d7d824992..4d3f3712621e 100644
--- a/test/CodeGen/AMDGPU/fma.f64.ll
+++ b/test/CodeGen/AMDGPU/fma.f64.ll
@@ -8,7 +8,7 @@ declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) n
; FUNC-LABEL: {{^}}fma_f64:
; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2, double addrspace(1)* %in3) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -21,7 +21,7 @@ define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; FUNC-LABEL: {{^}}fma_v2f64:
; SI: v_fma_f64
; SI: v_fma_f64
-define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
+define amdgpu_kernel void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
<2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) {
%r0 = load <2 x double>, <2 x double> addrspace(1)* %in1
%r1 = load <2 x double>, <2 x double> addrspace(1)* %in2
@@ -36,7 +36,7 @@ define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1
; SI: v_fma_f64
; SI: v_fma_f64
; SI: v_fma_f64
-define void @fma_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
+define amdgpu_kernel void @fma_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
<4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) {
%r0 = load <4 x double>, <4 x double> addrspace(1)* %in1
%r1 = load <4 x double>, <4 x double> addrspace(1)* %in2
diff --git a/test/CodeGen/AMDGPU/fma.ll b/test/CodeGen/AMDGPU/fma.ll
index d04a5946b98c..659cecb59ebf 100644
--- a/test/CodeGen/AMDGPU/fma.ll
+++ b/test/CodeGen/AMDGPU/fma.ll
@@ -12,7 +12,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; EG: MEM_RAT_{{.*}} STORE_{{.*}} [[RES:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}},
; EG: FMA {{\*? *}}[[RES]]
-define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
+define amdgpu_kernel void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2, float addrspace(1)* %in3) {
%r0 = load float, float addrspace(1)* %in1
%r1 = load float, float addrspace(1)* %in2
@@ -29,7 +29,7 @@ define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
; EG: MEM_RAT_{{.*}} STORE_{{.*}} [[RES:T[0-9]]].[[CHLO:[XYZW]]][[CHHI:[XYZW]]], {{T[0-9]\.[XYZW]}},
; EG-DAG: FMA {{\*? *}}[[RES]].[[CHLO]]
; EG-DAG: FMA {{\*? *}}[[RES]].[[CHHI]]
-define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
+define amdgpu_kernel void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
<2 x float> addrspace(1)* %in2, <2 x float> addrspace(1)* %in3) {
%r0 = load <2 x float>, <2 x float> addrspace(1)* %in1
%r1 = load <2 x float>, <2 x float> addrspace(1)* %in2
@@ -50,7 +50,7 @@ define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)*
; EG-DAG: FMA {{\*? *}}[[RES]].Y
; EG-DAG: FMA {{\*? *}}[[RES]].Z
; EG-DAG: FMA {{\*? *}}[[RES]].W
-define void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
+define amdgpu_kernel void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
<4 x float> addrspace(1)* %in2, <4 x float> addrspace(1)* %in3) {
%r0 = load <4 x float>, <4 x float> addrspace(1)* %in1
%r1 = load <4 x float>, <4 x float> addrspace(1)* %in2
@@ -62,7 +62,7 @@ define void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)*
; FUNC-LABEL: @fma_commute_mul_inline_imm_f32
; SI: v_fma_f32 {{v[0-9]+}}, {{v[0-9]+}}, 2.0, {{v[0-9]+}}
-define void @fma_commute_mul_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+define amdgpu_kernel void @fma_commute_mul_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
@@ -77,7 +77,7 @@ define void @fma_commute_mul_inline_imm_f32(float addrspace(1)* noalias %out, fl
}
; FUNC-LABEL: @fma_commute_mul_s_f32
-define void @fma_commute_mul_s_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b, float %b) nounwind {
+define amdgpu_kernel void @fma_commute_mul_s_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b, float %b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
diff --git a/test/CodeGen/AMDGPU/fmax3.f64.ll b/test/CodeGen/AMDGPU/fmax3.f64.ll
index 4d42a4630e22..8b9104b79e7f 100644
--- a/test/CodeGen/AMDGPU/fmax3.f64.ll
+++ b/test/CodeGen/AMDGPU/fmax3.f64.ll
@@ -11,7 +11,7 @@ declare double @llvm.maxnum.f64(double, double) nounwind readnone
; SI: v_max_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[REGA]], [[REGC]]
; SI: buffer_store_dwordx2 [[RESULT]],
; SI: s_endpgm
-define void @test_fmax3_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
+define amdgpu_kernel void @test_fmax3_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
%bptr = getelementptr double, double addrspace(1)* %aptr, i32 1
%cptr = getelementptr double, double addrspace(1)* %aptr, i32 2
%a = load volatile double, double addrspace(1)* %aptr, align 8
diff --git a/test/CodeGen/AMDGPU/fmax3.ll b/test/CodeGen/AMDGPU/fmax3.ll
index 7c01ca85f6b9..a96eb5db9e2a 100644
--- a/test/CodeGen/AMDGPU/fmax3.ll
+++ b/test/CodeGen/AMDGPU/fmax3.ll
@@ -10,7 +10,7 @@ declare float @llvm.maxnum.f32(float, float) nounwind readnone
; SI: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_fmax3_olt_0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @test_fmax3_olt_0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
%a = load volatile float, float addrspace(1)* %aptr, align 4
%b = load volatile float, float addrspace(1)* %bptr, align 4
%c = load volatile float, float addrspace(1)* %cptr, align 4
@@ -28,7 +28,7 @@ define void @test_fmax3_olt_0(float addrspace(1)* %out, float addrspace(1)* %apt
; SI: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_fmax3_olt_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @test_fmax3_olt_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
%a = load volatile float, float addrspace(1)* %aptr, align 4
%b = load volatile float, float addrspace(1)* %bptr, align 4
%c = load volatile float, float addrspace(1)* %cptr, align 4
diff --git a/test/CodeGen/AMDGPU/fmax_legacy.f64.ll b/test/CodeGen/AMDGPU/fmax_legacy.f64.ll
index da498caa6b54..083346e9d1cb 100644
--- a/test/CodeGen/AMDGPU/fmax_legacy.f64.ll
+++ b/test/CodeGen/AMDGPU/fmax_legacy.f64.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.amdgcn.workitem.id.x() #1
; FUNC-LABEL: @test_fmax_legacy_uge_f64
-define void @test_fmax_legacy_uge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_uge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -19,7 +19,7 @@ define void @test_fmax_legacy_uge_f64(double addrspace(1)* %out, double addrspac
}
; FUNC-LABEL: @test_fmax_legacy_oge_f64
-define void @test_fmax_legacy_oge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_oge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -34,7 +34,7 @@ define void @test_fmax_legacy_oge_f64(double addrspace(1)* %out, double addrspac
}
; FUNC-LABEL: @test_fmax_legacy_ugt_f64
-define void @test_fmax_legacy_ugt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ugt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -49,7 +49,7 @@ define void @test_fmax_legacy_ugt_f64(double addrspace(1)* %out, double addrspac
}
; FUNC-LABEL: @test_fmax_legacy_ogt_f64
-define void @test_fmax_legacy_ogt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ogt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/fmax_legacy.ll b/test/CodeGen/AMDGPU/fmax_legacy.ll
index 4a4c92a38a35..7643c3ea533c 100644
--- a/test/CodeGen/AMDGPU/fmax_legacy.ll
+++ b/test/CodeGen/AMDGPU/fmax_legacy.ll
@@ -13,7 +13,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
-define void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -33,7 +33,7 @@ define void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
-define void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -53,7 +53,7 @@ define void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
-define void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -73,7 +73,7 @@ define void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
-define void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -93,7 +93,7 @@ define void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(
; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
-define void @test_fmax_legacy_ogt_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr <1 x float>, <1 x float> addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr <1 x float>, <1 x float> addrspace(1)* %gep.0, i32 1
@@ -114,7 +114,7 @@ define void @test_fmax_legacy_ogt_v1f32(<1 x float> addrspace(1)* %out, <1 x flo
; SI-NONAN: v_max_f32_e32
; SI-NONAN: v_max_f32_e32
; SI-NONAN: v_max_f32_e32
-define void @test_fmax_legacy_ogt_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr <3 x float>, <3 x float> addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr <3 x float>, <3 x float> addrspace(1)* %gep.0, i32 1
@@ -137,7 +137,7 @@ define void @test_fmax_legacy_ogt_v3f32(<3 x float> addrspace(1)* %out, <3 x flo
; SI-NOT: v_max_
; EG: MAX
-define void @test_fmax_legacy_ogt_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmax_legacy_ogt_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/fmaxnum.f64.ll b/test/CodeGen/AMDGPU/fmaxnum.f64.ll
index fec3a358a4fa..20af278bf98c 100644
--- a/test/CodeGen/AMDGPU/fmaxnum.f64.ll
+++ b/test/CodeGen/AMDGPU/fmaxnum.f64.ll
@@ -9,7 +9,7 @@ declare <16 x double> @llvm.maxnum.v16f64(<16 x double>, <16 x double>) #0
; FUNC-LABEL: @test_fmax_f64
; SI: v_max_f64
-define void @test_fmax_f64(double addrspace(1)* %out, double %a, double %b) nounwind {
+define amdgpu_kernel void @test_fmax_f64(double addrspace(1)* %out, double %a, double %b) nounwind {
%val = call double @llvm.maxnum.f64(double %a, double %b) #0
store double %val, double addrspace(1)* %out, align 8
ret void
@@ -18,7 +18,7 @@ define void @test_fmax_f64(double addrspace(1)* %out, double %a, double %b) noun
; FUNC-LABEL: @test_fmax_v2f64
; SI: v_max_f64
; SI: v_max_f64
-define void @test_fmax_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
%val = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b) #0
store <2 x double> %val, <2 x double> addrspace(1)* %out, align 16
ret void
@@ -29,7 +29,7 @@ define void @test_fmax_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <
; SI: v_max_f64
; SI: v_max_f64
; SI: v_max_f64
-define void @test_fmax_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
%val = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %a, <4 x double> %b) #0
store <4 x double> %val, <4 x double> addrspace(1)* %out, align 32
ret void
@@ -44,7 +44,7 @@ define void @test_fmax_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <
; SI: v_max_f64
; SI: v_max_f64
; SI: v_max_f64
-define void @test_fmax_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
%val = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) #0
store <8 x double> %val, <8 x double> addrspace(1)* %out, align 64
ret void
@@ -67,7 +67,7 @@ define void @test_fmax_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <
; SI: v_max_f64
; SI: v_max_f64
; SI: v_max_f64
-define void @test_fmax_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
%val = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %a, <16 x double> %b) #0
store <16 x double> %val, <16 x double> addrspace(1)* %out, align 128
ret void
diff --git a/test/CodeGen/AMDGPU/fmaxnum.ll b/test/CodeGen/AMDGPU/fmaxnum.ll
index 4058247a6da9..277b8ce04c4e 100644
--- a/test/CodeGen/AMDGPU/fmaxnum.ll
+++ b/test/CodeGen/AMDGPU/fmaxnum.ll
@@ -14,7 +14,7 @@ declare double @llvm.maxnum.f64(double, double)
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MAX_DX10 {{.*}}[[OUT]]
-define void @test_fmax_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
+define amdgpu_kernel void @test_fmax_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
%val = call float @llvm.maxnum.f32(float %a, float %b) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -27,7 +27,7 @@ define void @test_fmax_f32(float addrspace(1)* %out, float %a, float %b) nounwin
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+]]
; EG: MAX_DX10 {{.*}}[[OUT]]
; EG: MAX_DX10 {{.*}}[[OUT]]
-define void @test_fmax_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
%val = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %a, <2 x float> %b) #0
store <2 x float> %val, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -44,7 +44,7 @@ define void @test_fmax_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2
; EG: MAX_DX10 {{.*}}[[OUT]]
; EG: MAX_DX10 {{.*}}[[OUT]]
; EG: MAX_DX10 {{.*}}[[OUT]]
-define void @test_fmax_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
%val = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b) #0
store <4 x float> %val, <4 x float> addrspace(1)* %out, align 16
ret void
@@ -70,7 +70,7 @@ define void @test_fmax_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4
; EG-DAG: MAX_DX10 {{.*}}[[OUT2]].Y
; EG-DAG: MAX_DX10 {{.*}}[[OUT2]].Z
; EG-DAG: MAX_DX10 {{.*}}[[OUT2]].W
-define void @test_fmax_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
%val = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %a, <8 x float> %b) #0
store <8 x float> %val, <8 x float> addrspace(1)* %out, align 32
ret void
@@ -114,7 +114,7 @@ define void @test_fmax_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8
; EG-DAG: MAX_DX10 {{.*}}[[OUT4]].Y
; EG-DAG: MAX_DX10 {{.*}}[[OUT4]].Z
; EG-DAG: MAX_DX10 {{.*}}[[OUT4]].W
-define void @test_fmax_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmax_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
%val = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %b) #0
store <16 x float> %val, <16 x float> addrspace(1)* %out, align 64
ret void
@@ -128,7 +128,7 @@ define void @test_fmax_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a,
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float 1.0, float 2.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -143,7 +143,7 @@ define void @constant_fold_fmax_f32(float addrspace(1)* %out) nounwind {
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
; EG: 2143289344(nan)
-define void @constant_fold_fmax_f32_nan_nan(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_nan_nan(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float 0x7FF8000000000000, float 0x7FF8000000000000) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -157,7 +157,7 @@ define void @constant_fold_fmax_f32_nan_nan(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32_val_nan(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_val_nan(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float 1.0, float 0x7FF8000000000000) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -171,7 +171,7 @@ define void @constant_fold_fmax_f32_val_nan(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32_nan_val(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_nan_val(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float 0x7FF8000000000000, float 1.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -185,7 +185,7 @@ define void @constant_fold_fmax_f32_nan_val(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32_p0_p0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_p0_p0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float 0.0, float 0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -199,7 +199,7 @@ define void @constant_fold_fmax_f32_p0_p0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32_p0_n0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_p0_n0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float 0.0, float -0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -213,7 +213,7 @@ define void @constant_fold_fmax_f32_p0_n0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32_n0_p0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_n0_p0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float -0.0, float 0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -227,7 +227,7 @@ define void @constant_fold_fmax_f32_n0_p0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmax_f32_n0_n0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmax_f32_n0_n0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.maxnum.f32(float -0.0, float -0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -239,7 +239,7 @@ define void @constant_fold_fmax_f32_n0_n0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MAX_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @fmax_var_immediate_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmax_var_immediate_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.maxnum.f32(float %a, float 2.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -250,7 +250,7 @@ define void @fmax_var_immediate_f32(float addrspace(1)* %out, float %a) nounwind
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MAX_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmax_immediate_var_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmax_immediate_var_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.maxnum.f32(float 2.0, float %a) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -262,7 +262,7 @@ define void @fmax_immediate_var_f32(float addrspace(1)* %out, float %a) nounwind
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MAX_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmax_var_literal_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmax_var_literal_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.maxnum.f32(float %a, float 99.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -274,7 +274,7 @@ define void @fmax_var_literal_f32(float addrspace(1)* %out, float %a) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MAX_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmax_literal_var_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmax_literal_var_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.maxnum.f32(float 99.0, float %a) #0
store float %val, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/fmed3.ll b/test/CodeGen/AMDGPU/fmed3.ll
index e66678069130..d2cfc713ed37 100644
--- a/test/CodeGen/AMDGPU/fmed3.ll
+++ b/test/CodeGen/AMDGPU/fmed3.ll
@@ -1,18 +1,33 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=NOSNAN -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mattr=+fp-exceptions -verify-machineinstrs < %s | FileCheck -check-prefix=SNAN -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=NOSNAN -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mattr=+fp-exceptions -verify-machineinstrs < %s | FileCheck -check-prefix=SNAN -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=NOSNAN -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp-exceptions -verify-machineinstrs < %s | FileCheck -check-prefix=SNAN -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=NOSNAN -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp-exceptions -verify-machineinstrs < %s | FileCheck -check-prefix=SNAN -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 %s
+
+
+; GCN-LABEL: {{^}}v_test_nnan_input_fmed3_r_i_i_f32:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v{{[0-9]+}}
+; GCN: v_med3_f32 v{{[0-9]+}}, [[ADD]], 2.0, 4.0
+define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %a.add = fadd nnan float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %a.add, float 2.0)
+ %med = call float @llvm.minnum.f32(float %max, float 4.0)
-declare i32 @llvm.amdgcn.workitem.id.x() #0
-declare float @llvm.minnum.f32(float, float) #0
-declare float @llvm.maxnum.f32(float, float) #0
-declare double @llvm.minnum.f64(double, double) #0
-declare double @llvm.maxnum.f64(double, double) #0
+ store float %med, float addrspace(1)* %outgep
+ ret void
+}
; GCN-LABEL: {{^}}v_test_fmed3_r_i_i_f32:
; NOSNAN: v_med3_f32 v{{[0-9]+}}, v{{[0-9]+}}, 2.0, 4.0
; SNAN: v_max_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
; SNAN: v_min_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
-define void @v_test_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -30,7 +45,7 @@ define void @v_test_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrspace(1)
; SNAN: v_max_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
; SNAN: v_min_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
-define void @v_test_fmed3_r_i_i_commute0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_commute0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -48,7 +63,7 @@ define void @v_test_fmed3_r_i_i_commute0_f32(float addrspace(1)* %out, float add
; SNAN: v_max_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
; SNAN: v_min_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
-define void @v_test_fmed3_r_i_i_commute1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_commute1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -64,7 +79,7 @@ define void @v_test_fmed3_r_i_i_commute1_f32(float addrspace(1)* %out, float add
; GCN-LABEL: {{^}}v_test_fmed3_r_i_i_constant_order_f32:
; GCN: v_max_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
; GCN: v_min_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
-define void @v_test_fmed3_r_i_i_constant_order_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_constant_order_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -81,7 +96,7 @@ define void @v_test_fmed3_r_i_i_constant_order_f32(float addrspace(1)* %out, flo
; GCN-LABEL: {{^}}v_test_fmed3_r_i_i_multi_use_f32:
; GCN: v_max_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
; GCN: v_min_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
-define void @v_test_fmed3_r_i_i_multi_use_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_multi_use_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -98,7 +113,7 @@ define void @v_test_fmed3_r_i_i_multi_use_f32(float addrspace(1)* %out, float ad
; GCN-LABEL: {{^}}v_test_fmed3_r_i_i_f64:
; GCN: v_max_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, 2.0
; GCN: v_min_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, 4.0
-define void @v_test_fmed3_r_i_i_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_f64(double addrspace(1)* %out, double addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr double, double addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr double, double addrspace(1)* %out, i32 %tid
@@ -113,7 +128,7 @@ define void @v_test_fmed3_r_i_i_f64(double addrspace(1)* %out, double addrspace(
; GCN-LABEL: {{^}}v_test_fmed3_r_i_i_no_nans_f32:
; GCN: v_med3_f32 v{{[0-9]+}}, v{{[0-9]+}}, 2.0, 4.0
-define void @v_test_fmed3_r_i_i_no_nans_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
+define amdgpu_kernel void @v_test_fmed3_r_i_i_no_nans_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -131,7 +146,7 @@ define void @v_test_fmed3_r_i_i_no_nans_f32(float addrspace(1)* %out, float addr
; SNAN: v_max_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}
; SNAN: v_min_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
-define void @v_test_legacy_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
+define amdgpu_kernel void @v_test_legacy_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -149,6 +164,812 @@ define void @v_test_legacy_fmed3_r_i_i_f32(float addrspace(1)* %out, float addrs
ret void
}
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0_srcmod0:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, -[[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %a.fneg = fsub float -0.0, %a
+ %tmp0 = call float @llvm.minnum.f32(float %a.fneg, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.fneg, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0_srcmod1:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], -[[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %b.fneg = fsub float -0.0, %b
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b.fneg)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b.fneg)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0_srcmod2:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], -[[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod2(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %c.fneg = fsub float -0.0, %c
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.fneg)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0_srcmod012:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, -[[A]], |[[B]]|, -|[[C]]|
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod012(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+
+ %a.fneg = fsub float -0.0, %a
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %c.fabs.fneg = fsub float -0.0, %c.fabs
+
+ %tmp0 = call float @llvm.minnum.f32(float %a.fneg, float %b.fabs)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.fneg, float %b.fabs)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.fabs.fneg)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0_negabs012:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, -|[[A]]|, -|[[B]]|, -|[[C]]|
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_negabs012(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %a.fabs.fneg = fsub float -0.0, %a.fabs
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %b.fabs.fneg = fsub float -0.0, %b.fabs
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %c.fabs.fneg = fsub float -0.0, %c.fabs
+
+ %tmp0 = call float @llvm.minnum.f32(float %a.fabs.fneg, float %b.fabs.fneg)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.fabs.fneg, float %b.fabs.fneg)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.fabs.fneg)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_nnan_inputs_med3_f32_pat0:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN-DAG: v_add_f32_e32 [[A_ADD:v[0-9]+]], 1.0, [[A]]
+; GCN-DAG: v_add_f32_e32 [[B_ADD:v[0-9]+]], 2.0, [[B]]
+; GCN-DAG: v_add_f32_e32 [[C_ADD:v[0-9]+]], 4.0, [[C]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A_ADD]], [[B_ADD]], [[C_ADD]]
+define amdgpu_kernel void @v_nnan_inputs_med3_f32_pat0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+
+ %a.nnan = fadd nnan float %a, 1.0
+ %b.nnan = fadd nnan float %b, 2.0
+ %c.nnan = fadd nnan float %c, 4.0
+
+ %tmp0 = call float @llvm.minnum.f32(float %a.nnan, float %b.nnan)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.nnan, float %b.nnan)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.nnan)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; 16 combinations
+
+; 0: max(min(x, y), min(max(x, y), z))
+; 1: max(min(x, y), min(max(y, x), z))
+; 2: max(min(x, y), min(z, max(x, y)))
+; 3: max(min(x, y), min(z, max(y, x)))
+; 4: max(min(y, x), min(max(x, y), z))
+; 5: max(min(y, x), min(max(y, x), z))
+; 6: max(min(y, x), min(z, max(x, y)))
+; 7: max(min(y, x), min(z, max(y, x)))
+;
+; + commute outermost max
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat1:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat2:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat2(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat3:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat3(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat4:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat4(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat5:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat5(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat6:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat6(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat7:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat7(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat8:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat8(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat9:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat9(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat10:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat10(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat11:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat11(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat12:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat12(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat13:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat13(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat14:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[A]], [[B]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat14(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat15:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_med3_f32 v{{[0-9]+}}, [[B]], [[A]], [[C]]
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat15(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %b, float %a)
+ %tmp1 = call float @llvm.maxnum.f32(float %b, float %a)
+ %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1)
+ %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; ---------------------------------------------------------------------
+; Negative patterns
+; ---------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_test_safe_med3_f32_pat0_multi_use0:
+; GCN-DAG: v_min_f32
+; GCN-DAG: v_max_f32
+; GCN: v_min_f32
+; GCN: v_max_f32
+define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ store volatile float %tmp0, float addrspace(1)* undef
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_safe_med3_f32_pat0_multi_use1:
+define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ store volatile float %tmp1, float addrspace(1)* undef
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_safe_med3_f32_pat0_multi_use2:
+define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use2(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ store volatile float %tmp2, float addrspace(1)* undef
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}v_test_safe_med3_f32_pat0:
+define amdgpu_kernel void @v_test_safe_med3_f32_pat0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %tmp0 = call float @llvm.minnum.f32(float %a, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_nnan_inputs_missing0_med3_f32_pat0:
+define amdgpu_kernel void @v_nnan_inputs_missing0_med3_f32_pat0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+
+ %a.nnan = fadd float %a, 1.0
+ %b.nnan = fadd nnan float %b, 2.0
+ %c.nnan = fadd nnan float %c, 4.0
+
+ %tmp0 = call float @llvm.minnum.f32(float %a.nnan, float %b.nnan)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.nnan, float %b.nnan)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.nnan)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_nnan_inputs_missing1_med3_f32_pat0:
+define amdgpu_kernel void @v_nnan_inputs_missing1_med3_f32_pat0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+
+ %a.nnan = fadd nnan float %a, 1.0
+ %b.nnan = fadd float %b, 2.0
+ %c.nnan = fadd nnan float %c, 4.0
+
+ %tmp0 = call float @llvm.minnum.f32(float %a.nnan, float %b.nnan)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.nnan, float %b.nnan)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.nnan)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_nnan_inputs_missing2_med3_f32_pat0:
+define amdgpu_kernel void @v_nnan_inputs_missing2_med3_f32_pat0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+
+ %a.nnan = fadd nnan float %a, 1.0
+ %b.nnan = fadd nnan float %b, 2.0
+ %c.nnan = fadd float %c, 4.0
+
+ %tmp0 = call float @llvm.minnum.f32(float %a.nnan, float %b.nnan)
+ %tmp1 = call float @llvm.maxnum.f32(float %a.nnan, float %b.nnan)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.nnan)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_global_nnans_med3_f32_pat0_srcmod0_mismatch:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_min_f32
+; GCN: v_max_f32
+; GCN: v_min_f32
+; GCN: v_max_f32
+define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod0_mismatch(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %a.fneg = fsub float -0.0, %a
+ %tmp0 = call float @llvm.minnum.f32(float %a.fneg, float %b)
+ %tmp1 = call float @llvm.maxnum.f32(float %a, float %b)
+ %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c)
+ %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2)
+ store float %med3, float addrspace(1)* %outgep
+ ret void
+}
+
+; A simple min and max is not sufficient
+; GCN-LABEL: {{^}}v_test_global_nnans_min_max_f32:
+; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]]
+; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], [[B]], [[A]]
+; GCN: v_min_f32_e32 v{{[0-9]+}}, [[C]], [[MAX]]
+define amdgpu_kernel void @v_test_global_nnans_min_max_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr float, float addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load volatile float, float addrspace(1)* %gep0
+ %b = load volatile float, float addrspace(1)* %gep1
+ %c = load volatile float, float addrspace(1)* %gep2
+ %max = call float @llvm.maxnum.f32(float %a, float %b)
+ %minmax = call float @llvm.minnum.f32(float %max, float %c)
+ store float %minmax, float addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_nnan_input_fmed3_r_i_i_f16:
+; SI: v_cvt_f32_f16
+; SI: v_add_f32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}
+; SI: v_med3_f32 v{{[0-9]+}}, v{{[0-9]+}}, 2.0, 4.0
+; SI: v_cvt_f16_f32
+
+; VI: v_add_f16_e32 v{{[0-9]+}}, 1.0
+; VI: v_max_f16_e32 v{{[0-9]+}}, 2.0
+; VI: v_min_f16_e32 v{{[0-9]+}}, 4.0
+
+; GFX9: v_add_f16_e32 v{{[0-9]+}}, 1.0
+; GFX9: v_med3_f16 v{{[0-9]+}}, [[ADD]], 2.0, 4.0
+define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f16(half addrspace(1)* %out, half addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %outgep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load half, half addrspace(1)* %gep0
+ %a.add = fadd nnan half %a, 1.0
+ %max = call half @llvm.maxnum.f16(half %a.add, half 2.0)
+ %med = call half @llvm.minnum.f16(half %max, half 4.0)
+
+ store half %med, half addrspace(1)* %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_nnan_inputs_med3_f16_pat0:
+; GCN: {{buffer_|flat_}}load_ushort [[A:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_ushort [[B:v[0-9]+]]
+; GCN: {{buffer_|flat_}}load_ushort [[C:v[0-9]+]]
+
+; SI: v_cvt_f32_f16
+; SI: v_cvt_f32_f16
+; SI: v_add_f32_e32
+; SI: v_add_f32_e32
+; SI: v_add_f32_e32
+; SI: v_med3_f32
+; SI: v_cvt_f16_f32_e32
+
+
+; GFX89-DAG: v_add_f16_e32 [[A_ADD:v[0-9]+]], 1.0, [[A]]
+; GFX89-DAG: v_add_f16_e32 [[B_ADD:v[0-9]+]], 2.0, [[B]]
+; GFX89-DAG: v_add_f16_e32 [[C_ADD:v[0-9]+]], 4.0, [[C]]
+
+; VI-DAG: v_min_f16
+; VI-DAG: v_max_f16
+; VI: v_min_f16
+; VI: v_max_f16
+
+; GFX9: v_med3_f16 v{{[0-9]+}}, [[A_ADD]], [[B_ADD]], [[C_ADD]]
+define amdgpu_kernel void @v_nnan_inputs_med3_f16_pat0(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr half, half addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr half, half addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %a = load volatile half, half addrspace(1)* %gep0
+ %b = load volatile half, half addrspace(1)* %gep1
+ %c = load volatile half, half addrspace(1)* %gep2
+
+ %a.nnan = fadd nnan half %a, 1.0
+ %b.nnan = fadd nnan half %b, 2.0
+ %c.nnan = fadd nnan half %c, 4.0
+
+ %tmp0 = call half @llvm.minnum.f16(half %a.nnan, half %b.nnan)
+ %tmp1 = call half @llvm.maxnum.f16(half %a.nnan, half %b.nnan)
+ %tmp2 = call half @llvm.minnum.f16(half %tmp1, half %c.nnan)
+ %med3 = call half @llvm.maxnum.f16(half %tmp0, half %tmp2)
+ store half %med3, half addrspace(1)* %outgep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare float @llvm.fabs.f32(float) #0
+declare float @llvm.minnum.f32(float, float) #0
+declare float @llvm.maxnum.f32(float, float) #0
+declare double @llvm.minnum.f64(double, double) #0
+declare double @llvm.maxnum.f64(double, double) #0
+declare half @llvm.fabs.f16(half) #0
+declare half @llvm.minnum.f16(half, half) #0
+declare half @llvm.maxnum.f16(half, half) #0
+
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="false" }
attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
diff --git a/test/CodeGen/AMDGPU/fmin3.ll b/test/CodeGen/AMDGPU/fmin3.ll
index 3102ffdbdd28..3183f77f090b 100644
--- a/test/CodeGen/AMDGPU/fmin3.ll
+++ b/test/CodeGen/AMDGPU/fmin3.ll
@@ -11,7 +11,7 @@ declare float @llvm.minnum.f32(float, float) nounwind readnone
; SI: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_fmin3_olt_0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @test_fmin3_olt_0(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
%a = load volatile float, float addrspace(1)* %aptr, align 4
%b = load volatile float, float addrspace(1)* %bptr, align 4
%c = load volatile float, float addrspace(1)* %cptr, align 4
@@ -29,7 +29,7 @@ define void @test_fmin3_olt_0(float addrspace(1)* %out, float addrspace(1)* %apt
; SI: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_fmin3_olt_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @test_fmin3_olt_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) nounwind {
%a = load volatile float, float addrspace(1)* %aptr, align 4
%b = load volatile float, float addrspace(1)* %bptr, align 4
%c = load volatile float, float addrspace(1)* %cptr, align 4
diff --git a/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
new file mode 100644
index 000000000000..fdfe533b3d0c
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
@@ -0,0 +1,47 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SAFE -check-prefix=GCN %s
+; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-NONAN -check-prefix=GCN %s
+
+; FIXME: Should replace unsafe-fp-math with no signed zeros.
+
+; GCN-LABEL: {{^}}min_fneg_select_regression_0:
+; GCN-SAFE: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0
+; GCN-NONAN: v_max_f32_e64 v{{[0-9]+}}, -v0, -1.0
+define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ult float %a, 1.0
+ %min.a = select i1 %cmp.a, float %fneg.a, float -1.0
+ ret float %min.a
+}
+
+; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0:
+; GCN-SAFE: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0
+; GCN-NONAN: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0
+define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ult float %a, -1.0
+ %min.a = select i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
+; GCN-LABEL: {{^}}max_fneg_select_regression_0:
+; GCN-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0
+; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0
+define amdgpu_ps float @max_fneg_select_regression_0(float %a, float %b) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ugt float %a, 1.0
+ %min.a = select i1 %cmp.a, float %fneg.a, float -1.0
+ ret float %min.a
+}
+
+; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0:
+; GCN-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0
+; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0
+define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a, float %b) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ugt float %a, -1.0
+ %min.a = select i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fmin_legacy.f64.ll b/test/CodeGen/AMDGPU/fmin_legacy.f64.ll
index 6982ee0c0cb3..99bc114831ca 100644
--- a/test/CodeGen/AMDGPU/fmin_legacy.f64.ll
+++ b/test/CodeGen/AMDGPU/fmin_legacy.f64.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.amdgcn.workitem.id.x() #1
; FUNC-LABEL: @test_fmin_legacy_f64
-define void @test_fmin_legacy_f64(<4 x double> addrspace(1)* %out, <4 x double> inreg %reg0) #0 {
+define amdgpu_kernel void @test_fmin_legacy_f64(<4 x double> addrspace(1)* %out, <4 x double> inreg %reg0) #0 {
%r0 = extractelement <4 x double> %reg0, i32 0
%r1 = extractelement <4 x double> %reg0, i32 1
%r2 = fcmp uge double %r0, %r1
@@ -14,7 +14,7 @@ define void @test_fmin_legacy_f64(<4 x double> addrspace(1)* %out, <4 x double>
}
; FUNC-LABEL: @test_fmin_legacy_ule_f64
-define void @test_fmin_legacy_ule_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ule_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -29,7 +29,7 @@ define void @test_fmin_legacy_ule_f64(double addrspace(1)* %out, double addrspac
}
; FUNC-LABEL: @test_fmin_legacy_ole_f64
-define void @test_fmin_legacy_ole_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ole_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -44,7 +44,7 @@ define void @test_fmin_legacy_ole_f64(double addrspace(1)* %out, double addrspac
}
; FUNC-LABEL: @test_fmin_legacy_olt_f64
-define void @test_fmin_legacy_olt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_olt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -59,7 +59,7 @@ define void @test_fmin_legacy_olt_f64(double addrspace(1)* %out, double addrspac
}
; FUNC-LABEL: @test_fmin_legacy_ult_f64
-define void @test_fmin_legacy_ult_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ult_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/fmin_legacy.ll b/test/CodeGen/AMDGPU/fmin_legacy.ll
index 79acd02e6d1f..52336f95a909 100644
--- a/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -14,7 +14,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; EG: MIN *
; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_fmin_legacy_subreg_inputs_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 {
+define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
%r1 = extractelement <4 x float> %reg0, i32 1
%r2 = fcmp uge float %r0, %r1
@@ -34,7 +34,7 @@ define void @s_test_fmin_legacy_subreg_inputs_f32(<4 x float> addrspace(1)* %out
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[VA]]
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[VB]]
-define void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out, float %a, float %b) #0 {
%cmp = fcmp ule float %a, %b
%val = select i1 %cmp, float %a, float %b
store float %val, float addrspace(1)* %out, align 4
@@ -46,7 +46,7 @@ define void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out, float %a, floa
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -65,7 +65,7 @@ define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -84,7 +84,7 @@ define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -103,7 +103,7 @@ define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -122,7 +122,7 @@ define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(
; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-define void @test_fmin_legacy_ult_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr <1 x float>, <1 x float> addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr <1 x float>, <1 x float> addrspace(1)* %gep.0, i32 1
@@ -144,7 +144,7 @@ define void @test_fmin_legacy_ult_v1f32(<1 x float> addrspace(1)* %out, <1 x flo
; SI-NONAN: v_min_f32_e32
; SI-NONAN: v_min_f32_e32
-define void @test_fmin_legacy_ult_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr <2 x float>, <2 x float> addrspace(1)* %gep.0, i32 1
@@ -166,7 +166,7 @@ define void @test_fmin_legacy_ult_v2f32(<2 x float> addrspace(1)* %out, <2 x flo
; SI-NONAN: v_min_f32_e32
; SI-NONAN: v_min_f32_e32
; SI-NONAN: v_min_f32_e32
-define void @test_fmin_legacy_ult_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr <3 x float>, <3 x float> addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr <3 x float>, <3 x float> addrspace(1)* %gep.0, i32 1
@@ -188,7 +188,7 @@ define void @test_fmin_legacy_ult_v3f32(<3 x float> addrspace(1)* %out, <3 x flo
; SI-NEXT: v_cndmask_b32
; SI-NOT: v_min
; SI: s_endpgm
-define void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/fminnum.f64.ll b/test/CodeGen/AMDGPU/fminnum.f64.ll
index 0f929d6a81f0..01b267411212 100644
--- a/test/CodeGen/AMDGPU/fminnum.f64.ll
+++ b/test/CodeGen/AMDGPU/fminnum.f64.ll
@@ -9,7 +9,7 @@ declare <16 x double> @llvm.minnum.v16f64(<16 x double>, <16 x double>) #0
; FUNC-LABEL: @test_fmin_f64
; SI: v_min_f64
-define void @test_fmin_f64(double addrspace(1)* %out, double %a, double %b) nounwind {
+define amdgpu_kernel void @test_fmin_f64(double addrspace(1)* %out, double %a, double %b) nounwind {
%val = call double @llvm.minnum.f64(double %a, double %b) #0
store double %val, double addrspace(1)* %out, align 8
ret void
@@ -18,7 +18,7 @@ define void @test_fmin_f64(double addrspace(1)* %out, double %a, double %b) noun
; FUNC-LABEL: @test_fmin_v2f64
; SI: v_min_f64
; SI: v_min_f64
-define void @test_fmin_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
%val = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b) #0
store <2 x double> %val, <2 x double> addrspace(1)* %out, align 16
ret void
@@ -29,7 +29,7 @@ define void @test_fmin_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <
; SI: v_min_f64
; SI: v_min_f64
; SI: v_min_f64
-define void @test_fmin_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
%val = call <4 x double> @llvm.minnum.v4f64(<4 x double> %a, <4 x double> %b) #0
store <4 x double> %val, <4 x double> addrspace(1)* %out, align 32
ret void
@@ -44,7 +44,7 @@ define void @test_fmin_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <
; SI: v_min_f64
; SI: v_min_f64
; SI: v_min_f64
-define void @test_fmin_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
%val = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) #0
store <8 x double> %val, <8 x double> addrspace(1)* %out, align 64
ret void
@@ -67,7 +67,7 @@ define void @test_fmin_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <
; SI: v_min_f64
; SI: v_min_f64
; SI: v_min_f64
-define void @test_fmin_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
%val = call <16 x double> @llvm.minnum.v16f64(<16 x double> %a, <16 x double> %b) #0
store <16 x double> %val, <16 x double> addrspace(1)* %out, align 128
ret void
diff --git a/test/CodeGen/AMDGPU/fminnum.ll b/test/CodeGen/AMDGPU/fminnum.ll
index abd2b9d3e4d1..9e997c7a1045 100644
--- a/test/CodeGen/AMDGPU/fminnum.ll
+++ b/test/CodeGen/AMDGPU/fminnum.ll
@@ -13,7 +13,7 @@ declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>) #0
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MIN_DX10 {{.*}}[[OUT]]
-define void @test_fmin_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
+define amdgpu_kernel void @test_fmin_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
%val = call float @llvm.minnum.f32(float %a, float %b) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -26,7 +26,7 @@ define void @test_fmin_f32(float addrspace(1)* %out, float %a, float %b) nounwin
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+]]
; EG: MIN_DX10 {{.*}}[[OUT]]
; EG: MIN_DX10 {{.*}}[[OUT]]
-define void @test_fmin_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
%val = call <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %b) #0
store <2 x float> %val, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -43,7 +43,7 @@ define void @test_fmin_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2
; EG: MIN_DX10 {{.*}}[[OUT]]
; EG: MIN_DX10 {{.*}}[[OUT]]
; EG: MIN_DX10 {{.*}}[[OUT]]
-define void @test_fmin_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
%val = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b) #0
store <4 x float> %val, <4 x float> addrspace(1)* %out, align 16
ret void
@@ -69,7 +69,7 @@ define void @test_fmin_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4
; EG-DAG: MIN_DX10 {{.*}}[[OUT2]].Y
; EG-DAG: MIN_DX10 {{.*}}[[OUT2]].Z
; EG-DAG: MIN_DX10 {{.*}}[[OUT2]].W
-define void @test_fmin_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
%val = call <8 x float> @llvm.minnum.v8f32(<8 x float> %a, <8 x float> %b) #0
store <8 x float> %val, <8 x float> addrspace(1)* %out, align 32
ret void
@@ -113,7 +113,7 @@ define void @test_fmin_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8
; EG-DAG: MIN_DX10 {{.*}}[[OUT4]].Y
; EG-DAG: MIN_DX10 {{.*}}[[OUT4]].Z
; EG-DAG: MIN_DX10 {{.*}}[[OUT4]].W
-define void @test_fmin_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
+define amdgpu_kernel void @test_fmin_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
%val = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %b) #0
store <16 x float> %val, <16 x float> addrspace(1)* %out, align 64
ret void
@@ -127,7 +127,7 @@ define void @test_fmin_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a,
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float 1.0, float 2.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -142,7 +142,7 @@ define void @constant_fold_fmin_f32(float addrspace(1)* %out) nounwind {
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
; EG: 2143289344({{nan|1\.#QNAN0e\+00}})
-define void @constant_fold_fmin_f32_nan_nan(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_nan_nan(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float 0x7FF8000000000000, float 0x7FF8000000000000) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -156,7 +156,7 @@ define void @constant_fold_fmin_f32_nan_nan(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32_val_nan(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_val_nan(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float 1.0, float 0x7FF8000000000000) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -170,7 +170,7 @@ define void @constant_fold_fmin_f32_val_nan(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32_nan_val(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_nan_val(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float 0x7FF8000000000000, float 1.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -184,7 +184,7 @@ define void @constant_fold_fmin_f32_nan_val(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32_p0_p0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_p0_p0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float 0.0, float 0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -198,7 +198,7 @@ define void @constant_fold_fmin_f32_p0_p0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32_p0_n0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_p0_n0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float 0.0, float -0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -212,7 +212,7 @@ define void @constant_fold_fmin_f32_p0_n0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32_n0_p0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_n0_p0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float -0.0, float 0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -226,7 +226,7 @@ define void @constant_fold_fmin_f32_n0_p0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG-NOT: MIN_DX10
; EG: MOV {{.*}}[[OUT]], literal.{{[xy]}}
-define void @constant_fold_fmin_f32_n0_n0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @constant_fold_fmin_f32_n0_n0(float addrspace(1)* %out) nounwind {
%val = call float @llvm.minnum.f32(float -0.0, float -0.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -237,7 +237,7 @@ define void @constant_fold_fmin_f32_n0_n0(float addrspace(1)* %out) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MIN_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmin_var_immediate_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmin_var_immediate_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.minnum.f32(float %a, float 2.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -248,7 +248,7 @@ define void @fmin_var_immediate_f32(float addrspace(1)* %out, float %a) nounwind
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MIN_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmin_immediate_var_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmin_immediate_var_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.minnum.f32(float 2.0, float %a) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -260,7 +260,7 @@ define void @fmin_immediate_var_f32(float addrspace(1)* %out, float %a) nounwind
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MIN_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmin_var_literal_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmin_var_literal_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.minnum.f32(float %a, float 99.0) #0
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -272,7 +272,7 @@ define void @fmin_var_literal_f32(float addrspace(1)* %out, float %a) nounwind {
; EG: MEM_RAT_CACHELESS STORE_RAW [[OUT:T[0-9]+\.[XYZW]]]
; EG: MIN_DX10 {{.*}}[[OUT]], {{KC0\[[0-9]\].[XYZW]}}, literal.{{[xy]}}
-define void @fmin_literal_var_f32(float addrspace(1)* %out, float %a) nounwind {
+define amdgpu_kernel void @fmin_literal_var_f32(float addrspace(1)* %out, float %a) nounwind {
%val = call float @llvm.minnum.f32(float 99.0, float %a) #0
store float %val, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll b/test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll
index 8663a2129fc0..4002712ab169 100644
--- a/test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll
+++ b/test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll
@@ -1,5 +1,7 @@
; XUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-DENORM %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-FLUSH %s
+
; Make sure (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c)) doesn't
; make add an instruction if the fadd has more than one use.
@@ -19,9 +21,9 @@ declare float @llvm.fabs.f32(float) #1
; VI: v_cmp_gt_f32_e64 vcc, |v{{[0-9]+}}|, |v{{[0-9]+}}|
; VI: v_cndmask_b32_e32
; VI: v_add_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, |v{{[0-9]+}}|
-; VI: v_mul_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
-; VI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 1.0
-define void @multiple_fadd_use_test_f32(float addrspace(1)* %out, float %x, float %y, float %z) #0 {
+; VI: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, 1.0
+define amdgpu_kernel void @multiple_fadd_use_test_f32(float addrspace(1)* %out, float %x, float %y, float %z) #0 {
%a11 = fadd fast float %y, -1.0
%a12 = call float @llvm.fabs.f32(float %a11)
%a13 = fadd fast float %x, -1.0
@@ -42,7 +44,7 @@ define void @multiple_fadd_use_test_f32(float addrspace(1)* %out, float %x, floa
; GCN-DAG: buffer_store_dword [[MUL2]]
; GCN-DAG: buffer_store_dword [[MAD]]
; GCN: s_endpgm
-define void @multiple_use_fadd_fmac_f32(float addrspace(1)* %out, float %x, float %y) #0 {
+define amdgpu_kernel void @multiple_use_fadd_fmac_f32(float addrspace(1)* %out, float %x, float %y) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%mul2 = fmul fast float %x, 2.0
%mad = fadd fast float %mul2, %y
@@ -57,7 +59,7 @@ define void @multiple_use_fadd_fmac_f32(float addrspace(1)* %out, float %x, floa
; GCN-DAG: buffer_store_dword [[MUL2]]
; GCN-DAG: buffer_store_dword [[MAD]]
; GCN: s_endpgm
-define void @multiple_use_fadd_fmad_f32(float addrspace(1)* %out, float %x, float %y) #0 {
+define amdgpu_kernel void @multiple_use_fadd_fmad_f32(float addrspace(1)* %out, float %x, float %y) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%x.abs = call float @llvm.fabs.f32(float %x)
%mul2 = fmul fast float %x.abs, 2.0
@@ -70,7 +72,7 @@ define void @multiple_use_fadd_fmad_f32(float addrspace(1)* %out, float %x, floa
; GCN-LABEL: {{^}}multiple_use_fadd_multi_fmad_f32:
; GCN: v_mad_f32 {{v[0-9]+}}, |[[X:s[0-9]+]]|, 2.0, v{{[0-9]+}}
; GCN: v_mad_f32 {{v[0-9]+}}, |[[X]]|, 2.0, v{{[0-9]+}}
-define void @multiple_use_fadd_multi_fmad_f32(float addrspace(1)* %out, float %x, float %y, float %z) #0 {
+define amdgpu_kernel void @multiple_use_fadd_multi_fmad_f32(float addrspace(1)* %out, float %x, float %y, float %z) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%x.abs = call float @llvm.fabs.f32(float %x)
%mul2 = fmul fast float %x.abs, 2.0
@@ -85,7 +87,7 @@ define void @multiple_use_fadd_multi_fmad_f32(float addrspace(1)* %out, float %x
; GCN: v_mul_f32_e64 [[TMP0:v[0-9]+]], [[X:s[0-9]+]], -4.0
; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[X]], [[TMP0]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fmul_x2_xn2_f32(float addrspace(1)* %out, float %x, float %y) #0 {
+define amdgpu_kernel void @fmul_x2_xn2_f32(float addrspace(1)* %out, float %x, float %y) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%mul2 = fmul fast float %x, 2.0
%muln2 = fmul fast float %x, -2.0
@@ -99,7 +101,7 @@ define void @fmul_x2_xn2_f32(float addrspace(1)* %out, float %x, float %y) #0 {
; GCN: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[X:s[0-9]+]], [[K]]
; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[X]], [[TMP0]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fmul_x2_xn3_f32(float addrspace(1)* %out, float %x, float %y) #0 {
+define amdgpu_kernel void @fmul_x2_xn3_f32(float addrspace(1)* %out, float %x, float %y) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%mul2 = fmul fast float %x, 2.0
%muln2 = fmul fast float %x, -3.0
@@ -114,9 +116,10 @@ define void @fmul_x2_xn3_f32(float addrspace(1)* %out, float %x, float %y) #0 {
; VI: v_cmp_gt_f16_e64 vcc, |v{{[0-9]+}}|, |v{{[0-9]+}}|
; VI: v_cndmask_b32_e32
; VI: v_add_f16_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, |v{{[0-9]+}}|
-; VI: v_mul_f16_e64 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
-; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 1.0
-define void @multiple_fadd_use_test_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg, i16 zeroext %z.arg) #0 {
+; VI: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-FLUSH: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, 1.0
+; VI-DENORM: v_fma_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, 1.0
+define amdgpu_kernel void @multiple_fadd_use_test_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg, i16 zeroext %z.arg) #0 {
%x = bitcast i16 %x.arg to half
%y = bitcast i16 %y.arg to half
%z = bitcast i16 %z.arg to half
@@ -136,11 +139,14 @@ define void @multiple_fadd_use_test_f16(half addrspace(1)* %out, i16 zeroext %x.
; GCN-LABEL: {{^}}multiple_use_fadd_fmac_f16:
; GCN-DAG: v_add_f16_e64 [[MUL2:v[0-9]+]], [[X:s[0-9]+]], s{{[0-9]+}}
-; GCN-DAG: v_mac_f16_e64 [[MAD:v[0-9]+]], [[X]], 2.0
+
+; VI-FLUSH-DAG: v_mac_f16_e64 [[MAD:v[0-9]+]], [[X]], 2.0
+; VI-DENORM-DAG: v_fma_f16 [[MAD:v[0-9]+]], [[X]], 2.0, v{{[0-9]+}}
+
; GCN-DAG: buffer_store_short [[MUL2]]
; GCN-DAG: buffer_store_short [[MAD]]
; GCN: s_endpgm
-define void @multiple_use_fadd_fmac_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
+define amdgpu_kernel void @multiple_use_fadd_fmac_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
%x = bitcast i16 %x.arg to half
%y = bitcast i16 %y.arg to half
%out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
@@ -153,11 +159,14 @@ define void @multiple_use_fadd_fmac_f16(half addrspace(1)* %out, i16 zeroext %x.
; GCN-LABEL: {{^}}multiple_use_fadd_fmad_f16:
; GCN-DAG: v_add_f16_e64 [[MUL2:v[0-9]+]], |[[X:s[0-9]+]]|, |s{{[0-9]+}}|
-; GCN-DAG: v_mad_f16 [[MAD:v[0-9]+]], |[[X]]|, 2.0, v{{[0-9]+}}
+
+; VI-FLUSH-DAG: v_mad_f16 [[MAD:v[0-9]+]], |[[X]]|, 2.0, v{{[0-9]+}}
+; VI-DENORM-DAG: v_fma_f16 [[MAD:v[0-9]+]], |[[X]]|, 2.0, v{{[0-9]+}}
+
; GCN-DAG: buffer_store_short [[MUL2]]
; GCN-DAG: buffer_store_short [[MAD]]
; GCN: s_endpgm
-define void @multiple_use_fadd_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
+define amdgpu_kernel void @multiple_use_fadd_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
%x = bitcast i16 %x.arg to half
%y = bitcast i16 %y.arg to half
%out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
@@ -170,9 +179,13 @@ define void @multiple_use_fadd_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.
}
; GCN-LABEL: {{^}}multiple_use_fadd_multi_fmad_f16:
-; GCN: v_mad_f16 {{v[0-9]+}}, |[[X:s[0-9]+]]|, 2.0, v{{[0-9]+}}
-; GCN: v_mad_f16 {{v[0-9]+}}, |[[X]]|, 2.0, v{{[0-9]+}}
-define void @multiple_use_fadd_multi_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg, i16 zeroext %z.arg) #0 {
+; VI-FLUSH: v_mad_f16 {{v[0-9]+}}, |[[X:s[0-9]+]]|, 2.0, v{{[0-9]+}}
+; VI-FLUSH: v_mad_f16 {{v[0-9]+}}, |[[X]]|, 2.0, v{{[0-9]+}}
+
+; VI-DENORM: v_fma_f16 {{v[0-9]+}}, |[[X:s[0-9]+]]|, 2.0, v{{[0-9]+}}
+; VI-DENORM: v_fma_f16 {{v[0-9]+}}, |[[X]]|, 2.0, v{{[0-9]+}}
+
+define amdgpu_kernel void @multiple_use_fadd_multi_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg, i16 zeroext %z.arg) #0 {
%x = bitcast i16 %x.arg to half
%y = bitcast i16 %y.arg to half
%z = bitcast i16 %z.arg to half
@@ -190,7 +203,7 @@ define void @multiple_use_fadd_multi_fmad_f16(half addrspace(1)* %out, i16 zeroe
; GCN: v_mul_f16_e64 [[TMP0:v[0-9]+]], [[X:s[0-9]+]], -4.0
; GCN: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[X]], [[TMP0]]
; GCN: buffer_store_short [[RESULT]]
-define void @fmul_x2_xn2_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
+define amdgpu_kernel void @fmul_x2_xn2_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
%x = bitcast i16 %x.arg to half
%y = bitcast i16 %y.arg to half
%out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
@@ -206,7 +219,7 @@ define void @fmul_x2_xn2_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 ze
; GCN: v_mul_f16_e32 [[TMP0:v[0-9]+]], [[X:s[0-9]+]], [[K]]
; GCN: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[X]], [[TMP0]]
; GCN: buffer_store_short [[RESULT]]
-define void @fmul_x2_xn3_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
+define amdgpu_kernel void @fmul_x2_xn3_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 {
%x = bitcast i16 %x.arg to half
%y = bitcast i16 %y.arg to half
%out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
diff --git a/test/CodeGen/AMDGPU/fmul.f16.ll b/test/CodeGen/AMDGPU/fmul.f16.ll
index 4f47d2c8e755..4e96091ae256 100644
--- a/test/CodeGen/AMDGPU/fmul.f16.ll
+++ b/test/CodeGen/AMDGPU/fmul.f16.ll
@@ -11,7 +11,7 @@
; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fmul_f16(
+define amdgpu_kernel void @fmul_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -25,14 +25,13 @@ entry:
; GCN-LABEL: {{^}}fmul_f16_imm_a
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], 0x40400000, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fmul_f16_imm_a(
+define amdgpu_kernel void @fmul_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
@@ -44,14 +43,14 @@ entry:
; GCN-LABEL: {{^}}fmul_f16_imm_b
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], 4.0, v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+
; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], 4.0, v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fmul_f16_imm_b(
+define amdgpu_kernel void @fmul_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -61,27 +60,30 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fmul_v2f16
+; GCN-LABEL: {{^}}fmul_v2f16:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_mul_f16_e32 v[[R_F16_LO:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_mul_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fmul_v2f16(
+define amdgpu_kernel void @fmul_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -93,25 +95,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fmul_v2f16_imm_a
+; GCN-LABEL: {{^}}fmul_v2f16_imm_a:
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], 0x40400000, v[[B_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], 4.0, v[[B_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
-; VI: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI-DAG: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
+; VI-DAG: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fmul_v2f16_imm_a(
+define amdgpu_kernel void @fmul_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b) {
entry:
@@ -121,25 +120,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fmul_v2f16_imm_b
+; GCN-LABEL: {{^}}fmul_v2f16_imm_b:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], 4.0, v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], 0x40400000, v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
-; VI: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI-DAG: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
+; VI-DAG: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fmul_v2f16_imm_b(
+define amdgpu_kernel void @fmul_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/fmul.ll b/test/CodeGen/AMDGPU/fmul.ll
index d0c39b539456..125de7aabfd4 100644
--- a/test/CodeGen/AMDGPU/fmul.ll
+++ b/test/CodeGen/AMDGPU/fmul.ll
@@ -6,24 +6,20 @@
; GCN: v_mul_f32
; R600: MUL_IEEE {{\** *}}{{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
-define void @fmul_f32(float addrspace(1)* %out, float %a, float %b) {
+define amdgpu_kernel void @fmul_f32(float addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fmul float %a, %b
store float %0, float addrspace(1)* %out
ret void
}
-declare float @llvm.r600.load.input(i32) readnone
-
-declare void @llvm.AMDGPU.store.output(float, i32)
-
; FUNC-LABEL: {{^}}fmul_v2f32:
; GCN: v_mul_f32
; GCN: v_mul_f32
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}
-define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
+define amdgpu_kernel void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
entry:
%0 = fmul <2 x float> %a, %b
store <2 x float> %0, <2 x float> addrspace(1)* %out
@@ -40,7 +36,7 @@ entry:
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1) * %in
%b = load <4 x float>, <4 x float> addrspace(1) * %b_ptr
@@ -53,7 +49,7 @@ define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
; GCN: v_mul_f32
; GCN-NOT: v_mul_f32
; GCN: s_endpgm
-define void @test_mul_2_k(float addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @test_mul_2_k(float addrspace(1)* %out, float %x) #0 {
%y = fmul float %x, 2.0
%z = fmul float %y, 3.0
store float %z, float addrspace(1)* %out
@@ -65,7 +61,7 @@ define void @test_mul_2_k(float addrspace(1)* %out, float %x) #0 {
; GCN-NOT: v_mul_f32
; GCN-NOT: v_mad_f32
; GCN: s_endpgm
-define void @test_mul_2_k_inv(float addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @test_mul_2_k_inv(float addrspace(1)* %out, float %x) #0 {
%y = fmul float %x, 3.0
%z = fmul float %y, 2.0
store float %z, float addrspace(1)* %out
@@ -79,7 +75,7 @@ define void @test_mul_2_k_inv(float addrspace(1)* %out, float %x) #0 {
; GCN: v_mul_f32
; GCN: v_mul_f32
; GCN-NOT: v_mul_f32
-define void @test_mul_twouse(float addrspace(1)* %out, float %x, float %y) #0 {
+define amdgpu_kernel void @test_mul_twouse(float addrspace(1)* %out, float %x, float %y) #0 {
%a = fmul float %x, 5.0
%b = fsub float -0.0, %a
%c = fmul float %b, %y
diff --git a/test/CodeGen/AMDGPU/fmul64.ll b/test/CodeGen/AMDGPU/fmul64.ll
index 3c222eaba89d..f14233f267b2 100644
--- a/test/CodeGen/AMDGPU/fmul64.ll
+++ b/test/CodeGen/AMDGPU/fmul64.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}fmul_f64:
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -15,7 +15,7 @@ define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; FUNC-LABEL: {{^}}fmul_v2f64:
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
+define amdgpu_kernel void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
<2 x double> addrspace(1)* %in2) {
%r0 = load <2 x double>, <2 x double> addrspace(1)* %in1
%r1 = load <2 x double>, <2 x double> addrspace(1)* %in2
@@ -29,7 +29,7 @@ define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @fmul_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
+define amdgpu_kernel void @fmul_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
<4 x double> addrspace(1)* %in2) {
%r0 = load <4 x double>, <4 x double> addrspace(1)* %in1
%r1 = load <4 x double>, <4 x double> addrspace(1)* %in2
diff --git a/test/CodeGen/AMDGPU/fmuladd.f16.ll b/test/CodeGen/AMDGPU/fmuladd.f16.ll
index 500b00bdcf87..9b713419e747 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f16.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f16.ll
@@ -1,12 +1,12 @@
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-FLUSH,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-FLUSH,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-FLUSH,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-FLUSH,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-FLUSH,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-FLUSH,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-FLUSH,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-FLUSH,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-DENORM-STRICT,VI-DENORM,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-DENORM-STRICT,VI-DENORM,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-DENORM-CONTRACT,VI-DENORM,VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-DENORM-CONTRACT,VI-DENORM,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-DENORM-STRICT,VI-DENORM,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI-DENORM-STRICT,VI-DENORM,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-DENORM-CONTRACT,VI-DENORM,VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI-DENORM-CONTRACT,VI-DENORM,VI %s
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare half @llvm.fmuladd.f16(half, half, half) #1
@@ -16,7 +16,7 @@ declare half @llvm.fabs.f16(half) #1
; VI-FLUSH: v_mac_f16_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
; VI-DENORM: v_fma_f16 {{v[0-9]+, v[0-9]+, v[0-9]+}}
-define void @fmuladd_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
+define amdgpu_kernel void @fmuladd_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
half addrspace(1)* %in2, half addrspace(1)* %in3) #0 {
%r0 = load half, half addrspace(1)* %in1
%r1 = load half, half addrspace(1)* %in2
@@ -34,7 +34,7 @@ define void @fmuladd_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
; VI-DENORM: v_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -56,7 +56,7 @@ define void @fmuladd_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in
; VI-DENORM: v_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_a_2.0_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_a_2.0_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -82,7 +82,7 @@ define void @fmuladd_a_2.0_b_f16(half addrspace(1)* %out, half addrspace(1)* %in
; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fadd_a_a_b_f16(half addrspace(1)* %out,
+define amdgpu_kernel void @fadd_a_a_b_f16(half addrspace(1)* %out,
half addrspace(1)* %in1,
half addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -111,7 +111,7 @@ define void @fadd_a_a_b_f16(half addrspace(1)* %out,
; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fadd_b_a_a_f16(half addrspace(1)* %out,
+define amdgpu_kernel void @fadd_b_a_a_f16(half addrspace(1)* %out,
half addrspace(1)* %in1,
half addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -134,7 +134,7 @@ define void @fadd_b_a_a_f16(half addrspace(1)* %out,
; VI-FLUSH: v_mac_f16_e32 [[R2]], -2.0, [[R1]]
; VI-DENORM: v_fma_f16 [[R2:v[0-9]+]], [[R1]], -2.0, [[R2]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]]
-define void @fmuladd_neg_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_neg_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -156,7 +156,7 @@ define void @fmuladd_neg_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)*
; VI-DENORM: v_fma_f16 [[RESULT:v[0-9]+]], -[[R1]], -2.0, [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_neg_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -180,7 +180,7 @@ define void @fmuladd_neg_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace
; VI-DENORM: v_fma_f16 [[RESULT:v[0-9]+]], -[[R1]], 2.0, [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -202,7 +202,7 @@ define void @fmuladd_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)*
; VI-FLUSH: v_mad_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]]
; VI-DENORM: v_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_2.0_a_neg_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_2.0_a_neg_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -231,7 +231,7 @@ define void @fmuladd_2.0_a_neg_b_f16(half addrspace(1)* %out, half addrspace(1)*
; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @mad_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext
@@ -261,7 +261,7 @@ define void @mad_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspa
; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @mad_sub_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext
@@ -291,7 +291,7 @@ define void @mad_sub_inv_f16(half addrspace(1)* noalias nocapture %out, half add
; VI-DENORM-STRICT: v_sub_f16_e64 [[RESULT:v[0-9]+]], [[TMP]], |[[REGC]]|
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_fabs_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @mad_sub_fabs_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext
@@ -323,7 +323,7 @@ define void @mad_sub_fabs_f16(half addrspace(1)* noalias nocapture %out, half ad
; VI-DENORM-STRICT: v_sub_f16_e64 [[RESULT:v[0-9]+]], |[[REGC]]|, [[TMP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_fabs_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @mad_sub_fabs_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext
@@ -355,7 +355,7 @@ define void @mad_sub_fabs_inv_f16(half addrspace(1)* noalias nocapture %out, hal
; VI-DENORM-STRICT: v_mul_f16_e32 [[TMP:v[0-9]+]], [[REGB]], [[REGA]]
; VI-DENORM-STRICT: v_add_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @neg_neg_mad_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @neg_neg_mad_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext
@@ -388,7 +388,7 @@ define void @neg_neg_mad_f16(half addrspace(1)* noalias nocapture %out, half add
; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_fabs_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @mad_fabs_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext
@@ -419,7 +419,7 @@ define void @mad_fabs_sub_f16(half addrspace(1)* noalias nocapture %out, half ad
; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; VI-DENORM: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fsub_c_fadd_a_a_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
+define amdgpu_kernel void @fsub_c_fadd_a_a_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
@@ -447,7 +447,7 @@ define void @fsub_c_fadd_a_a_f16(half addrspace(1)* %out, half addrspace(1)* %in
; VI-DENORM-STRICT: v_subrev_f16_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fsub_fadd_a_a_c_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
+define amdgpu_kernel void @fsub_fadd_a_a_c_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/fmuladd.f32.ll b/test/CodeGen/AMDGPU/fmuladd.f32.ll
index e4b1053ff25c..fb605dd2e4bd 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f32.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f32.ll
@@ -25,7 +25,7 @@ declare float @llvm.fabs.f32(float) #1
; GCN-DENORM-SLOWFMA: v_mul_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
; GCN-DENORM-SLOWFMA: v_add_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
-define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
+define amdgpu_kernel void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2, float addrspace(1)* %in3) #0 {
%r0 = load float, float addrspace(1)* %in1
%r1 = load float, float addrspace(1)* %in2
@@ -45,7 +45,7 @@ define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
; GCN-DENORM-STRICT: v_mul_f32_e32
; GCN-DENORM-STRICT: v_add_f32_e32
-define void @fmul_fadd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
+define amdgpu_kernel void @fmul_fadd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2, float addrspace(1)* %in3) #0 {
%r0 = load volatile float, float addrspace(1)* %in1
%r1 = load volatile float, float addrspace(1)* %in2
@@ -71,7 +71,7 @@ define void @fmul_fadd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
; SI-DENORM buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -100,7 +100,7 @@ define void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -132,7 +132,7 @@ define void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fadd_a_a_b_f32(float addrspace(1)* %out,
+define amdgpu_kernel void @fadd_a_a_b_f32(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -167,7 +167,7 @@ define void @fadd_a_a_b_f32(float addrspace(1)* %out,
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fadd_b_a_a_f32(float addrspace(1)* %out,
+define amdgpu_kernel void @fadd_b_a_a_f32(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -196,7 +196,7 @@ define void @fadd_b_a_a_f32(float addrspace(1)* %out,
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -225,7 +225,7 @@ define void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -256,7 +256,7 @@ define void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspa
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -286,7 +286,7 @@ define void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fmuladd_2.0_a_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fmuladd_2.0_a_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -318,7 +318,7 @@ define void @fmuladd_2.0_a_neg_b_f32(float addrspace(1)* %out, float addrspace(1
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
+define amdgpu_kernel void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
@@ -353,7 +353,7 @@ define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrs
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
+define amdgpu_kernel void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
@@ -387,7 +387,7 @@ define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float a
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
+define amdgpu_kernel void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
@@ -422,7 +422,7 @@ define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
+define amdgpu_kernel void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
@@ -460,7 +460,7 @@ define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, fl
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
+define amdgpu_kernel void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
@@ -496,7 +496,7 @@ define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float a
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
+define amdgpu_kernel void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #0 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
@@ -532,7 +532,7 @@ define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fsub_c_fadd_a_a_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fsub_c_fadd_a_a_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -563,7 +563,7 @@ define void @fsub_c_fadd_a_a_f32(float addrspace(1)* %out, float addrspace(1)* %
; SI: buffer_store_dword [[RESULT]]
; VI: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fsub_fadd_a_a_c_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fsub_fadd_a_a_c_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/fmuladd.f64.ll b/test/CodeGen/AMDGPU/fmuladd.f64.ll
index f5e64b3c5941..86e91e04b0fc 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f64.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f64.ll
@@ -7,7 +7,7 @@
; GCN-LABEL: {{^}}fmuladd_f64:
; GCN: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2, double addrspace(1)* %in3) #0 {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -22,7 +22,7 @@ define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; GCN-STRICT: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
; GCN-STRICT: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @fmul_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fmul_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2, double addrspace(1)* %in3) #0 {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -44,7 +44,7 @@ define void @fmul_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; SI: buffer_store_dwordx2 [[RESULT]]
; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fadd_a_a_b_f64(double addrspace(1)* %out,
+define amdgpu_kernel void @fadd_a_a_b_f64(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -72,7 +72,7 @@ define void @fadd_a_a_b_f64(double addrspace(1)* %out,
; SI: buffer_store_dwordx2 [[RESULT]]
; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-define void @fadd_b_a_a_f64(double addrspace(1)* %out,
+define amdgpu_kernel void @fadd_b_a_a_f64(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -94,7 +94,7 @@ define void @fadd_b_a_a_f64(double addrspace(1)* %out,
; GCN-STRICT: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
; GCN-CONTRACT: v_fma_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
-define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double addrspace(1)* noalias nocapture readonly %ptr) #1 {
+define amdgpu_kernel void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tid.ext = sext i32 %tid to i64
%gep0 = getelementptr double, double addrspace(1)* %ptr, i64 %tid.ext
@@ -117,7 +117,7 @@ define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double add
; GCN-STRICT: v_add_f64
; GCN-CONTRACT: v_fma_f64
-define void @fadd_a_a_b_f64_fast_add0(double addrspace(1)* %out,
+define amdgpu_kernel void @fadd_a_a_b_f64_fast_add0(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -139,7 +139,7 @@ define void @fadd_a_a_b_f64_fast_add0(double addrspace(1)* %out,
; GCN-STRICT: v_add_f64
; GCN-CONTRACT: v_fma_f64
-define void @fadd_a_a_b_f64_fast_add1(double addrspace(1)* %out,
+define amdgpu_kernel void @fadd_a_a_b_f64_fast_add1(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -158,7 +158,7 @@ define void @fadd_a_a_b_f64_fast_add1(double addrspace(1)* %out,
; GCN-LABEL: {{^}}fadd_a_a_b_f64_fast:
; GCN: v_fma_f64
-define void @fadd_a_a_b_f64_fast(double addrspace(1)* %out,
+define amdgpu_kernel void @fadd_a_a_b_f64_fast(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
diff --git a/test/CodeGen/AMDGPU/fmuladd.v2f16.ll b/test/CodeGen/AMDGPU/fmuladd.v2f16.ll
new file mode 100644
index 000000000000..bdd3c04fd318
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fmuladd.v2f16.ll
@@ -0,0 +1,107 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-FLUSH,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-FLUSH,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-FLUSH,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-FLUSH,GFX9 %s
+
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-DENORM-STRICT,GFX9-DENORM,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,GFX9-DENORM-STRICT,GFX9-DENORM,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-DENORM-CONTRACT,GFX9-DENORM,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=+fp64-fp16-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,GFX9-DENORM-CONTRACT,GFX9-DENORM,GFX9 %s
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #1
+declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #1
+
+; GCN-LABEL: {{^}}fmuladd_v2f16:
+; GFX9-FLUSH: v_pk_mul_f16 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+; GFX9-FLUSH: v_pk_add_f16 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+
+; GFX9-DENORM: v_pk_fma_f16 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+define amdgpu_kernel void @fmuladd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in1,
+ <2 x half> addrspace(1)* %in2, <2 x half> addrspace(1)* %in3) #0 {
+ %r0 = load <2 x half>, <2 x half> addrspace(1)* %in1
+ %r1 = load <2 x half>, <2 x half> addrspace(1)* %in2
+ %r2 = load <2 x half>, <2 x half> addrspace(1)* %in3
+ %r3 = tail call <2 x half> @llvm.fmuladd.v2f16(<2 x half> %r0, <2 x half> %r1, <2 x half> %r2)
+ store <2 x half> %r3, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmuladd_2.0_a_b_v2f16:
+; GCN: {{buffer|flat}}_load_dword [[R1:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[R2:v[0-9]+]],
+; GFX9-FLUSH: v_pk_add_f16 [[ADD0:v[0-9]+]], [[R1]], [[R1]]
+; GFX9-FLUSH: v_pk_add_f16 [[RESULT:v[0-9]+]], [[ADD0]], [[R2]]
+
+; GFX9-FLUSH: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+
+; GFX9-DENORM: v_pk_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
+; GFX9-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @fmuladd_2.0_a_b_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr <2 x half>, <2 x half> addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+
+ %r1 = load volatile <2 x half>, <2 x half> addrspace(1)* %gep.0
+ %r2 = load volatile <2 x half>, <2 x half> addrspace(1)* %gep.1
+
+ %r3 = tail call <2 x half> @llvm.fmuladd.v2f16(<2 x half> <half 2.0, half 2.0>, <2 x half> %r1, <2 x half> %r2)
+ store <2 x half> %r3, <2 x half> addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmuladd_a_2.0_b_v2f16:
+; GCN: {{buffer|flat}}_load_dword [[R1:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[R2:v[0-9]+]],
+; GFX9-FLUSH: v_pk_add_f16 [[ADD0:v[0-9]+]], [[R1]], [[R1]]
+; GFX9-FLUSH: v_pk_add_f16 [[RESULT:v[0-9]+]], [[ADD0]], [[R2]]
+
+; GFX9-FLUSH: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+
+; GFX9-DENORM: v_pk_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
+; GFX9-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @fmuladd_a_2.0_b_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr <2 x half>, <2 x half> addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+
+ %r1 = load volatile <2 x half>, <2 x half> addrspace(1)* %gep.0
+ %r2 = load volatile <2 x half>, <2 x half> addrspace(1)* %gep.1
+
+ %r3 = tail call <2 x half> @llvm.fmuladd.v2f16(<2 x half> %r1, <2 x half> <half 2.0, half 2.0>, <2 x half> %r2)
+ store <2 x half> %r3, <2 x half> addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_a_a_b_v2f16:
+; GCN: {{buffer|flat}}_load_dword [[R1:v[0-9]+]],
+; GCN: {{buffer|flat}}_load_dword [[R2:v[0-9]+]],
+; GFX9-FLUSH: v_pk_add_f16 [[ADD0:v[0-9]+]], [[R1]], [[R1]]
+; GFX9-FLUSH: v_pk_add_f16 [[RESULT:v[0-9]+]], [[ADD0]], [[R2]]
+
+; GFX9-DENORM-STRICT: v_pk_add_f16 [[ADD0:v[0-9]+]], [[R1]], [[R1]]
+; GFX9-DENORM-STRICT: v_pk_add_f16 [[RESULT:v[0-9]+]], [[ADD0]], [[R2]]
+
+; GFX9-DENORM-CONTRACT: v_pk_fma_f16 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @fadd_a_a_b_v2f16(<2 x half> addrspace(1)* %out,
+ <2 x half> addrspace(1)* %in1,
+ <2 x half> addrspace(1)* %in2) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.0 = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr <2 x half>, <2 x half> addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr <2 x half>, <2 x half> addrspace(1)* %out, i32 %tid
+
+ %r0 = load volatile <2 x half>, <2 x half> addrspace(1)* %gep.0
+ %r1 = load volatile <2 x half>, <2 x half> addrspace(1)* %gep.1
+
+ %add.0 = fadd <2 x half> %r0, %r0
+ %add.1 = fadd <2 x half> %add.0, %r1
+ store <2 x half> %add.1, <2 x half> addrspace(1)* %gep.out
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fnearbyint.ll b/test/CodeGen/AMDGPU/fnearbyint.ll
index 5423fadf81e2..4ff3bbbcbc3e 100644
--- a/test/CodeGen/AMDGPU/fnearbyint.ll
+++ b/test/CodeGen/AMDGPU/fnearbyint.ll
@@ -13,41 +13,41 @@ declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #0
declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) #0
-define void @fnearbyint_f32(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @fnearbyint_f32(float addrspace(1)* %out, float %in) #1 {
entry:
%0 = call float @llvm.nearbyint.f32(float %in)
store float %0, float addrspace(1)* %out
ret void
}
-define void @fnearbyint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @fnearbyint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%0 = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %in)
store <2 x float> %0, <2 x float> addrspace(1)* %out
ret void
}
-define void @fnearbyint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
+define amdgpu_kernel void @fnearbyint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
entry:
%0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %in)
store <4 x float> %0, <4 x float> addrspace(1)* %out
ret void
}
-define void @nearbyint_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @nearbyint_f64(double addrspace(1)* %out, double %in) {
entry:
%0 = call double @llvm.nearbyint.f64(double %in)
store double %0, double addrspace(1)* %out
ret void
}
-define void @nearbyint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @nearbyint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
entry:
%0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %in)
store <2 x double> %0, <2 x double> addrspace(1)* %out
ret void
}
-define void @nearbyint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @nearbyint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
entry:
%0 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %in)
store <4 x double> %0, <4 x double> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fneg-combines.ll b/test/CodeGen/AMDGPU/fneg-combines.ll
index 3f9928c2b623..1c0e9a2f13ce 100644
--- a/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SAFE -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -enable-unsafe-fp-math -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NSZ -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -start-after=sink -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -enable-no-signed-zeros-fp-math -march=amdgcn -mcpu=tahiti -start-after=sink -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NSZ -check-prefix=SI -check-prefix=FUNC %s
; --------------------------------------------------------------------------------
; fadd tests
@@ -14,7 +14,7 @@
; GCN-NSZ: v_sub_f32_e64 [[RESULT:v[0-9]+]], -[[A]], [[B]]
; GCN-NSZ-NEXT: buffer_store_dword [[RESULT]]
-define void @v_fneg_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -35,7 +35,7 @@ define void @v_fneg_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr
; GCN-DAG: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_ADD]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_add_store_use_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_store_use_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -53,12 +53,16 @@ define void @v_fneg_add_store_use_add_f32(float addrspace(1)* %out, float addrsp
; GCN-LABEL: {{^}}v_fneg_add_multi_use_add_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
-; GCN-NEXT: buffer_store_dword [[NEG_ADD]]
+
+; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
+; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
+; GCN-SAFE: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
+
+; GCN-NSZ: v_sub_f32_e64 [[NEG_ADD:v[0-9]+]], -[[A]], [[B]]
+; GCN-NSZ-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[ADD]]
+; GCN: buffer_store_dword [[NEG_ADD]]
; GCN-NEXT: buffer_store_dword [[MUL]]
-define void @v_fneg_add_multi_use_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_multi_use_add_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -83,7 +87,7 @@ define void @v_fneg_add_multi_use_add_f32(float addrspace(1)* %out, float addrsp
; GCN-NSZ: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NSZ-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_add_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -107,7 +111,7 @@ define void @v_fneg_add_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)*
; GCN-NSZ: v_subrev_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
; GCN-NSZ-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_add_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -131,7 +135,7 @@ define void @v_fneg_add_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)*
; GCN-NSZ: v_add_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NSZ-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_add_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -160,7 +164,7 @@ define void @v_fneg_add_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(
; GCN-NSZ-DAG: v_subrev_f32_e32 [[NEG_ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_ADD]]
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_A]]
-define void @v_fneg_add_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_add_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -188,7 +192,7 @@ define void @v_fneg_add_store_use_fneg_x_f32(float addrspace(1)* %out, float add
; GCN-NSZ-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_ADD]]
; GCN-NSZ-NEXT: buffer_store_dword [[MUL]]
-define void @v_fneg_add_multi_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float %c) #0 {
+define amdgpu_kernel void @v_fneg_add_multi_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float %c) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -214,7 +218,7 @@ define void @v_fneg_add_multi_use_fneg_x_f32(float addrspace(1)* %out, float add
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
; GCN-NEXT: buffer_store_dword [[RESULT]]
-define void @v_fneg_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -235,7 +239,7 @@ define void @v_fneg_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr
; GCN-DAG: v_xor_b32_e32 [[NEG_MUL:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
; GCN: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_store_use_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -253,12 +257,11 @@ define void @v_fneg_mul_store_use_mul_f32(float addrspace(1)* %out, float addrsp
; GCN-LABEL: {{^}}v_fneg_mul_multi_use_mul_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_MUL:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
-; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
-; GCN: buffer_store_dword [[MUL]]
-define void @v_fneg_mul_multi_use_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+; GCN: v_mul_f32_e64 [[MUL0:v[0-9]+]], [[A]], -[[B]]
+; GCN-NEXT: v_mul_f32_e32 [[MUL1:v[0-9]+]], -4.0, [[MUL0]]
+; GCN-NEXT: buffer_store_dword [[MUL0]]
+; GCN-NEXT: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -279,7 +282,7 @@ define void @v_fneg_mul_multi_use_mul_f32(float addrspace(1)* %out, float addrsp
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -299,7 +302,7 @@ define void @v_fneg_mul_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)*
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -319,7 +322,7 @@ define void @v_fneg_mul_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)*
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -342,7 +345,7 @@ define void @v_fneg_mul_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(
; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[B]], [[A]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
; GCN: buffer_store_dword [[NEG_A]]
-define void @v_fneg_mul_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -365,7 +368,7 @@ define void @v_fneg_mul_store_use_fneg_x_f32(float addrspace(1)* %out, float add
; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN-NEXT: buffer_store_dword [[NEG_MUL]]
; GCN: buffer_store_dword [[MUL]]
-define void @v_fneg_mul_multi_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float %c) #0 {
+define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float %c) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -383,6 +386,300 @@ define void @v_fneg_mul_multi_use_fneg_x_f32(float addrspace(1)* %out, float add
}
; --------------------------------------------------------------------------------
+; fminnum tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_max_f32_e64 [[RESULT:v[0-9]+]], -[[A]], -[[B]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %min = call float @llvm.minnum.f32(float %a, float %b)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_self_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 [[RESULT:v[0-9]+]], -[[A]], -[[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_self_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.minnum.f32(float %a, float %a)
+ %min.fneg = fsub float -0.0, %min
+ store float %min.fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_posk_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 [[RESULT:v[0-9]+]], -[[A]], -4.0
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_posk_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.minnum.f32(float 4.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_negk_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 [[RESULT:v[0-9]+]], -[[A]], 4.0
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_negk_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.minnum.f32(float -4.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_0_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_0_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.minnum.f32(float 0.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_neg0_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e64 [[RESULT:v[0-9]+]], -[[A]], 0
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_neg0_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.minnum.f32(float -0.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_0_minnum_foldable_use_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], 0, [[A]]
+; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], [[B]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %min = call float @llvm.minnum.f32(float 0.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ %mul = fmul float %fneg, %b
+ store float %mul, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_minnum_multi_use_minnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_max_f32_e64 [[MAX0:v[0-9]+]], -[[A]], -[[B]]
+; GCN-NEXT: v_mul_f32_e32 [[MUL1:v[0-9]+]], -4.0, [[MUL0]]
+; GCN-NEXT: buffer_store_dword [[MAX0]]
+; GCN-NEXT: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %min = call float @llvm.minnum.f32(float %a, float %b)
+ %fneg = fsub float -0.000000e+00, %min
+ %use1 = fmul float %min, 4.0
+ store volatile float %fneg, float addrspace(1)* %out
+ store volatile float %use1, float addrspace(1)* %out
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; fmaxnum tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_min_f32_e64 [[RESULT:v[0-9]+]], -[[A]], -[[B]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %min = call float @llvm.maxnum.f32(float %a, float %b)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_self_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_min_f32_e64 [[RESULT:v[0-9]+]], -[[A]], -[[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_self_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.maxnum.f32(float %a, float %a)
+ %min.fneg = fsub float -0.0, %min
+ store float %min.fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_posk_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_min_f32_e64 [[RESULT:v[0-9]+]], -[[A]], -4.0
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_posk_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.maxnum.f32(float 4.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_negk_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_min_f32_e64 [[RESULT:v[0-9]+]], -[[A]], 4.0
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_negk_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %min = call float @llvm.maxnum.f32(float -4.0, float %a)
+ %fneg = fsub float -0.000000e+00, %min
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_0_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 0, [[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_0_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %max = call float @llvm.maxnum.f32(float 0.0, float %a)
+ %fneg = fsub float -0.000000e+00, %max
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_neg0_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_min_f32_e64 [[RESULT:v[0-9]+]], -[[A]], 0
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_neg0_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %max = call float @llvm.maxnum.f32(float -0.0, float %a)
+ %fneg = fsub float -0.000000e+00, %max
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_0_maxnum_foldable_use_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], 0, [[A]]
+; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], [[B]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %max = call float @llvm.maxnum.f32(float 0.0, float %a)
+ %fneg = fsub float -0.000000e+00, %max
+ %mul = fmul float %fneg, %b
+ store float %mul, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_maxnum_multi_use_maxnum_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_min_f32_e64 [[MAX0:v[0-9]+]], -[[A]], -[[B]]
+; GCN-NEXT: v_mul_f32_e32 [[MUL1:v[0-9]+]], -4.0, [[MUL0]]
+; GCN-NEXT: buffer_store_dword [[MAX0]]
+; GCN-NEXT: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %min = call float @llvm.maxnum.f32(float %a, float %b)
+ %fneg = fsub float -0.000000e+00, %min
+ %use1 = fmul float %min, 4.0
+ store volatile float %fneg, float addrspace(1)* %out
+ store volatile float %use1, float addrspace(1)* %out
+ ret void
+}
+
+; --------------------------------------------------------------------------------
; fma tests
; --------------------------------------------------------------------------------
@@ -396,7 +693,7 @@ define void @v_fneg_mul_multi_use_fneg_x_f32(float addrspace(1)* %out, float add
; GCN-NSZ: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], -[[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[RESULT]]
-define void @v_fneg_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -420,7 +717,7 @@ define void @v_fneg_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr
; GCN-DAG: v_xor_b32_e32 [[NEG_FMA:v[0-9]+]], 0x80000000, [[FMA]]
; GCN-NEXT: buffer_store_dword [[NEG_FMA]]
; GCN-NEXT: buffer_store_dword [[FMA]]
-define void @v_fneg_fma_store_use_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_store_use_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -441,12 +738,17 @@ define void @v_fneg_fma_store_use_fma_f32(float addrspace(1)* %out, float addrsp
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN-DAG: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_FMA:v[0-9]+]], 0x80000000, [[FMA]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[FMA]]
+
+; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
+; GCN-SAFE: v_xor_b32_e32 [[NEG_FMA:v[0-9]+]], 0x80000000, [[FMA]]
+; GCN-SAFE: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[FMA]]
+
+; GCN-NSZ: v_fma_f32 [[NEG_FMA:v[0-9]+]], [[A]], -[[B]], -[[C]]
+; GCN-NSZ-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[NEG_FMA]]
+
; GCN-NEXT: buffer_store_dword [[NEG_FMA]]
; GCN-NEXT: buffer_store_dword [[MUL]]
-define void @v_fneg_fma_multi_use_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_multi_use_fma_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -474,7 +776,7 @@ define void @v_fneg_fma_multi_use_fma_f32(float addrspace(1)* %out, float addrsp
; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[FMA]]
-define void @v_fneg_fma_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -501,7 +803,7 @@ define void @v_fneg_fma_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1
; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[FMA]]
-define void @v_fneg_fma_x_fneg_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_x_fneg_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -528,7 +830,7 @@ define void @v_fneg_fma_x_fneg_y_f32(float addrspace(1)* %out, float addrspace(1
; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], -[[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[FMA]]
-define void @v_fneg_fma_fneg_fneg_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_fneg_fneg_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -556,7 +858,7 @@ define void @v_fneg_fma_fneg_fneg_y_f32(float addrspace(1)* %out, float addrspac
; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[FMA]]
-define void @v_fneg_fma_fneg_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_fneg_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -584,7 +886,7 @@ define void @v_fneg_fma_fneg_x_fneg_f32(float addrspace(1)* %out, float addrspac
; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], -[[B]], [[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[FMA]]
-define void @v_fneg_fma_x_y_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_x_y_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -614,7 +916,7 @@ define void @v_fneg_fma_x_y_fneg_f32(float addrspace(1)* %out, float addrspace(1
; GCN-NSZ-DAG: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[FMA]]
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_A]]
-define void @v_fneg_fma_store_use_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fma_store_use_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -644,7 +946,7 @@ define void @v_fneg_fma_store_use_fneg_x_y_f32(float addrspace(1)* %out, float a
; GCN-NSZ-DAG: v_fma_f32 [[NEG_FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[NEG_FMA]]
; GCN-NSZ-NEXT: buffer_store_dword [[MUL]]
-define void @v_fneg_fma_multi_use_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, float %d) #0 {
+define amdgpu_kernel void @v_fneg_fma_multi_use_fneg_x_y_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, float %d) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -677,7 +979,7 @@ define void @v_fneg_fma_multi_use_fneg_x_y_f32(float addrspace(1)* %out, float a
; GCN-NSZ: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], -[[B]], -[[C]]
; GCN-NSZ-NEXT: buffer_store_dword [[RESULT]]
-define void @v_fneg_fmad_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fmad_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -697,12 +999,17 @@ define void @v_fneg_fmad_f32(float addrspace(1)* %out, float addrspace(1)* %a.pt
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN-DAG: v_mac_f32_e32 [[C]], [[B]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_C:v[0-9]+]], 0x80000000, [[C]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[C]]
-; GCN-NEXT: buffer_store_dword [[NEG_C]]
+
+; GCN-SAFE: v_mac_f32_e32 [[C]], [[B]], [[A]]
+; GCN-SAFE: v_xor_b32_e32 [[NEG_MAD:v[0-9]+]], 0x80000000, [[C]]
+; GCN-SAFE-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[C]]
+
+; GCN-NSZ: v_mad_f32 [[NEG_MAD:v[0-9]+]], -[[A]], [[B]], -[[C]]
+; GCN-NSZ-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[NEG_MAD]]
+
+; GCN: buffer_store_dword [[NEG_MAD]]
; GCN-NEXT: buffer_store_dword [[MUL]]
-define void @v_fneg_fmad_multi_use_fmad_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fmad_multi_use_fmad_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -728,7 +1035,7 @@ define void @v_fneg_fmad_multi_use_fmad_f32(float addrspace(1)* %out, float addr
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_cvt_f64_f32_e64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]]
; GCN: buffer_store_dwordx2 [[RESULT]]
-define void @v_fneg_fp_extend_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -744,7 +1051,7 @@ define void @v_fneg_fp_extend_f32_to_f64(double addrspace(1)* %out, float addrsp
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]]
; GCN: buffer_store_dwordx2 [[RESULT]]
-define void @v_fneg_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -763,7 +1070,7 @@ define void @v_fneg_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float a
; GCN-DAG: v_xor_b32_e32 [[FNEG_A:v[0-9]+]], 0x80000000, [[A]]
; GCN: buffer_store_dwordx2 [[RESULT]]
; GCN: buffer_store_dword [[FNEG_A]]
-define void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -783,7 +1090,7 @@ define void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(double addrspace(1)* %ou
; GCN-DAG: v_xor_b32_e32 v[[FNEG_A:[0-9]+]], 0x80000000, v[[CVT_HI]]
; GCN: buffer_store_dwordx2 v{{\[[0-9]+}}:[[FNEG_A]]{{\]}}
; GCN: buffer_store_dwordx2 v{{\[}}[[CVT_LO]]:[[CVT_HI]]{{\]}}
-define void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -803,7 +1110,7 @@ define void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(double addrspace(1)* %ou
; GCN-DAG: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[CVT_LO]]:[[CVT_HI]]{{\]}}, 4.0
; GCN: buffer_store_dwordx2 v{{\[[0-9]+}}:[[FNEG_A]]{{\]}}
; GCN: buffer_store_dwordx2 [[MUL]]
-define void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(double addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -819,7 +1126,7 @@ define void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(double addrspac
; FIXME: Source modifiers not folded for f16->f32
; GCN-LABEL: {{^}}v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
-define void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds half, half addrspace(1)* %a.ptr, i64 %tid.ext
@@ -833,7 +1140,7 @@ define void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(float addrspace(1)* %out
}
; GCN-LABEL: {{^}}v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
-define void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds half, half addrspace(1)* %a.ptr, i64 %tid.ext
@@ -855,7 +1162,7 @@ define void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(float addrspace
; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
; GCN: v_cvt_f32_f64_e64 [[RESULT:v[0-9]+]], -[[A]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_fp_round_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, double addrspace(1)* %a.ptr, i64 %tid.ext
@@ -871,7 +1178,7 @@ define void @v_fneg_fp_round_f64_to_f32(float addrspace(1)* %out, double addrspa
; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
; GCN: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], [[A]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_fp_round_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, double addrspace(1)* %a.ptr, i64 %tid.ext
@@ -888,10 +1195,9 @@ define void @v_fneg_fp_round_fneg_f64_to_f32(float addrspace(1)* %out, double ad
; GCN: {{buffer|flat}}_load_dwordx2 v{{\[}}[[A_LO:[0-9]+]]:[[A_HI:[0-9]+]]{{\]}}
; GCN-DAG: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], v{{\[}}[[A_LO]]:[[A_HI]]{{\]}}
; GCN-DAG: v_xor_b32_e32 v[[NEG_A_HI:[0-9]+]], 0x80000000, v[[A_HI]]
-; GCN-DAG: v_mov_b32_e32 v[[NEG_A_LO:[0-9]+]], v[[A_LO]]
; GCN: buffer_store_dword [[RESULT]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[NEG_A_LO]]:[[NEG_A_HI]]{{\]}}
-define void @v_fneg_fp_round_store_use_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
+; GCN: buffer_store_dwordx2 v{{\[}}[[A_LO]]:[[NEG_A_HI]]{{\]}}
+define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, double addrspace(1)* %a.ptr, i64 %tid.ext
@@ -911,7 +1217,7 @@ define void @v_fneg_fp_round_store_use_fneg_f64_to_f32(float addrspace(1)* %out,
; GCN-DAG: v_mul_f64 [[USE1:v\[[0-9]+:[0-9]+\]]], -[[A]], s{{\[}}
; GCN: buffer_store_dword [[RESULT]]
; GCN: buffer_store_dwordx2 [[USE1]]
-define void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr, double %c) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr, double %c) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, double addrspace(1)* %a.ptr, i64 %tid.ext
@@ -930,7 +1236,7 @@ define void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(float addrspace(1)* %out,
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_cvt_f16_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
; GCN: buffer_store_short [[RESULT]]
-define void @v_fneg_fp_round_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -946,7 +1252,7 @@ define void @v_fneg_fp_round_f32_to_f16(half addrspace(1)* %out, float addrspace
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
; GCN: buffer_store_short [[RESULT]]
-define void @v_fneg_fp_round_fneg_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -965,7 +1271,7 @@ define void @v_fneg_fp_round_fneg_f32_to_f16(half addrspace(1)* %out, float addr
; GCN-DAG: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80000000, [[CVT]]
; GCN: buffer_store_dword [[NEG]]
; GCN: buffer_store_dword [[CVT]]
-define void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(float addrspace(1)* %out, double addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, double addrspace(1)* %a.ptr, i64 %tid.ext
@@ -984,7 +1290,7 @@ define void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(float addrspace(1)* %out,
; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
; GCN: buffer_store_short [[RESULT]]
; GCN: buffer_store_dword [[NEG_A]]
-define void @v_fneg_fp_round_store_use_fneg_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1004,7 +1310,7 @@ define void @v_fneg_fp_round_store_use_fneg_f32_to_f16(half addrspace(1)* %out,
; GCN-DAG: v_mul_f32_e64 [[USE1:v[0-9]+]], -[[A]], s
; GCN: buffer_store_short [[RESULT]]
; GCN: buffer_store_dword [[USE1]]
-define void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr, float %c) #0 {
+define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %a.ptr, float %c) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1027,7 +1333,7 @@ define void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(half addrspace(1)* %out,
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_rcp_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_rcp_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_rcp_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1043,7 +1349,7 @@ define void @v_fneg_rcp_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_rcp_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_rcp_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1062,7 +1368,7 @@ define void @v_fneg_rcp_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %
; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
; GCN: buffer_store_dword [[RESULT]]
; GCN: buffer_store_dword [[NEG_A]]
-define void @v_fneg_rcp_store_use_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1082,7 +1388,7 @@ define void @v_fneg_rcp_store_use_fneg_f32(float addrspace(1)* %out, float addrs
; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN: buffer_store_dword [[RESULT]]
; GCN: buffer_store_dword [[MUL]]
-define void @v_fneg_rcp_multi_use_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float %c) #0 {
+define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float %c) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1105,7 +1411,7 @@ define void @v_fneg_rcp_multi_use_fneg_f32(float addrspace(1)* %out, float addrs
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_rcp_legacy_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_rcp_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_rcp_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1126,7 +1432,7 @@ define void @v_fneg_rcp_legacy_f32(float addrspace(1)* %out, float addrspace(1)*
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_legacy_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
; GCN-NEXT: buffer_store_dword [[RESULT]]
-define void @v_fneg_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1147,7 +1453,7 @@ define void @v_fneg_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)*
; GCN-DAG: v_xor_b32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], 0x80000000, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_legacy_store_use_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1170,7 +1476,7 @@ define void @v_fneg_mul_legacy_store_use_mul_legacy_f32(float addrspace(1)* %out
; GCN: v_mul_legacy_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[MUL]]
-define void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1191,7 +1497,7 @@ define void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(float addrspace(1)* %out
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_legacy_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1211,7 +1517,7 @@ define void @v_fneg_mul_legacy_fneg_x_f32(float addrspace(1)* %out, float addrsp
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_legacy_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1231,7 +1537,7 @@ define void @v_fneg_mul_legacy_x_fneg_f32(float addrspace(1)* %out, float addrsp
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
; GCN-NEXT: buffer_store_dword [[ADD]]
-define void @v_fneg_mul_legacy_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1254,7 +1560,7 @@ define void @v_fneg_mul_legacy_fneg_fneg_f32(float addrspace(1)* %out, float add
; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[B]], [[A]]
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[NEG_A]]
-define void @v_fneg_mul_legacy_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1277,7 +1583,7 @@ define void @v_fneg_mul_legacy_store_use_fneg_x_f32(float addrspace(1)* %out, fl
; GCN-DAG: v_mul_legacy_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
; GCN-NEXT: buffer_store_dword [[NEG_MUL_LEGACY]]
; GCN: buffer_store_dword [[MUL]]
-define void @v_fneg_mul_legacy_multi_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float %c) #0 {
+define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float %c) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1300,12 +1606,11 @@ define void @v_fneg_mul_legacy_multi_use_fneg_x_f32(float addrspace(1)* %out, fl
; GCN-LABEL: {{^}}v_fneg_sin_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e22f983
-; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[K]], -[[A]]
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 0xbe22f983, [[A]]
; GCN: v_fract_f32_e32 [[FRACT:v[0-9]+]], [[MUL]]
; GCN: v_sin_f32_e32 [[RESULT:v[0-9]+]], [[FRACT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_sin_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_sin_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
@@ -1321,27 +1626,509 @@ define void @v_fneg_sin_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: v_sin_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
; GCN: buffer_store_dword [[RESULT]]
-define void @v_fneg_amdgcn_sin_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
%out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
%a = load volatile float, float addrspace(1)* %a.gep
%sin = call float @llvm.amdgcn.sin.f32(float %a)
- %fneg = fsub float -0.000000e+00, %sin
+ %fneg = fsub float -0.0, %sin
store float %fneg, float addrspace(1)* %out.gep
ret void
}
+; --------------------------------------------------------------------------------
+; ftrunc tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_trunc_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_trunc_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_trunc_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %trunc = call float @llvm.trunc.f32(float %a)
+ %fneg = fsub float -0.0, %trunc
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; fround tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_round_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_trunc_f32_e32
+; GCN: v_subrev_f32_e32
+; GCN: v_cndmask_b32
+
+; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-SAFE: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[ADD]]
+
+; GCN-NSZ: v_sub_f32_e64 [[RESULT:v[0-9]+]], -v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_round_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %round = call float @llvm.round.f32(float %a)
+ %fneg = fsub float -0.0, %round
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; rint tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_rint_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_rint_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %rint = call float @llvm.rint.f32(float %a)
+ %fneg = fsub float -0.0, %rint
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; nearbyint tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_nearbyint_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
+; GCN: buffer_store_dword [[RESULT]]
+define amdgpu_kernel void @v_fneg_nearbyint_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %nearbyint = call float @llvm.nearbyint.f32(float %a)
+ %fneg = fsub float -0.0, %nearbyint
+ store float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; vintrp tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_interp_p1_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
+; GCN: v_interp_p1_f32 v{{[0-9]+}}, [[MUL]]
+; GCN: v_interp_p1_f32 v{{[0-9]+}}, [[MUL]]
+define amdgpu_kernel void @v_fneg_interp_p1_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %mul = fmul float %a, %b
+ %fneg = fsub float -0.0, %mul
+ %intrp0 = call float @llvm.amdgcn.interp.p1(float %fneg, i32 0, i32 0, i32 0)
+ %intrp1 = call float @llvm.amdgcn.interp.p1(float %fneg, i32 1, i32 0, i32 0)
+ store volatile float %intrp0, float addrspace(1)* %out.gep
+ store volatile float %intrp1, float addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_interp_p2_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
+; GCN: v_interp_p2_f32 v{{[0-9]+}}, [[MUL]]
+; GCN: v_interp_p2_f32 v{{[0-9]+}}, [[MUL]]
+define amdgpu_kernel void @v_fneg_interp_p2_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %mul = fmul float %a, %b
+ %fneg = fsub float -0.0, %mul
+ %intrp0 = call float @llvm.amdgcn.interp.p2(float 4.0, float %fneg, i32 0, i32 0, i32 0)
+ %intrp1 = call float @llvm.amdgcn.interp.p2(float 4.0, float %fneg, i32 1, i32 0, i32 0)
+ store volatile float %intrp0, float addrspace(1)* %out.gep
+ store volatile float %intrp1, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; CopyToReg tests
+; --------------------------------------------------------------------------------
+
+; GCN-LABEL: {{^}}v_fneg_copytoreg_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[B]], [[A]]
+; GCN: s_cbranch_scc1
+
+; GCN: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x80000000, [[MUL0]]
+; GCN: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[C]], [[XOR]]
+; GCN: buffer_store_dword [[MUL1]]
+
+; GCN: buffer_store_dword [[MUL0]]
+define amdgpu_kernel void @v_fneg_copytoreg_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, i32 %d) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+ %mul = fmul float %a, %b
+ %fneg = fsub float -0.0, %mul
+ %cmp0 = icmp eq i32 %d, 0
+ br i1 %cmp0, label %if, label %endif
+
+if:
+ %mul1 = fmul float %fneg, %c
+ store volatile float %mul1, float addrspace(1)* %out.gep
+ br label %endif
+
+endif:
+ store volatile float %mul, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; inlineasm tests
+; --------------------------------------------------------------------------------
+
+; Can't fold into use, so should fold into source
+; GCN-LABEL: {{^}}v_fneg_inlineasm_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
+; GCN: ; use [[MUL]]
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @v_fneg_inlineasm_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, i32 %d) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+ %mul = fmul float %a, %b
+ %fneg = fsub float -0.0, %mul
+ call void asm sideeffect "; use $0", "v"(float %fneg) #0
+ store volatile float %fneg, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; inlineasm tests
+; --------------------------------------------------------------------------------
+
+; Can't fold into use, so should fold into source
+; GCN-LABEL: {{^}}v_fneg_inlineasm_multi_use_src_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[B]], [[A]]
+; GCN: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80000000, [[MUL]]
+; GCN: ; use [[NEG]]
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, i32 %d) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+ %mul = fmul float %a, %b
+ %fneg = fsub float -0.0, %mul
+ call void asm sideeffect "; use $0", "v"(float %fneg) #0
+ store volatile float %mul, float addrspace(1)* %out.gep
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; code size regression tests
+; --------------------------------------------------------------------------------
+
+; There are multiple users of the fneg that must use a VOP3
+; instruction, so there is no penalty
+; GCN-LABEL: {{^}}multiuse_fneg_2_vop3_users_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+
+; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[A]], [[B]], [[C]]
+; GCN-NEXT: v_fma_f32 [[FMA1:v[0-9]+]], -[[A]], [[C]], 2.0
+; GCN-NEXT: buffer_store_dword [[FMA0]]
+; GCN-NEXT: buffer_store_dword [[FMA1]]
+define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+
+ %fneg.a = fsub float -0.0, %a
+ %fma0 = call float @llvm.fma.f32(float %fneg.a, float %b, float %c)
+ %fma1 = call float @llvm.fma.f32(float %fneg.a, float %c, float 2.0)
+
+ store volatile float %fma0, float addrspace(1)* %out
+ store volatile float %fma1, float addrspace(1)* %out
+ ret void
+}
+
+; There are multiple users, but both require using a larger encoding
+; for the modifier.
+
+; GCN-LABEL: {{^}}multiuse_fneg_2_vop2_users_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+
+; GCN: v_mul_f32_e64 [[MUL0:v[0-9]+]], -[[A]], [[B]]
+; GCN: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[A]], [[C]]
+; GCN-NEXT: buffer_store_dword [[MUL0]]
+; GCN-NEXT: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+
+ %fneg.a = fsub float -0.0, %a
+ %mul0 = fmul float %fneg.a, %b
+ %mul1 = fmul float %fneg.a, %c
+
+ store volatile float %mul0, float addrspace(1)* %out
+ store volatile float %mul1, float addrspace(1)* %out
+ ret void
+}
+
+; One user is VOP3 so has no cost to folding the modifier, the other does.
+; GCN-LABEL: {{^}}multiuse_fneg_vop2_vop3_users_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+
+; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[A]], [[B]], 2.0
+; GCN: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[A]], [[C]]
+
+; GCN: buffer_store_dword [[FMA0]]
+; GCN-NEXT: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+
+ %fneg.a = fsub float -0.0, %a
+ %fma0 = call float @llvm.fma.f32(float %fneg.a, float %b, float 2.0)
+ %mul1 = fmul float %fneg.a, %c
+
+ store volatile float %fma0, float addrspace(1)* %out
+ store volatile float %mul1, float addrspace(1)* %out
+ ret void
+}
+
+; The use of the fneg requires a code size increase, but folding into
+; the source does not
+
+; GCN-LABEL: {{^}}free_fold_src_code_size_cost_use_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[D:v[0-9]+]]
+
+; GCN-SAFE: v_fma_f32 [[FMA0:v[0-9]+]], [[A]], [[B]], 2.0
+; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[FMA0]], [[C]]
+; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL2:v[0-9]+]], -[[FMA0]], [[D]]
+
+; GCN-NSZ: v_fma_f32 [[FMA0:v[0-9]+]], [[A]], -[[B]], -2.0
+; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[C]], [[FMA0]]
+; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL2:v[0-9]+]], [[D]], [[FMA0]]
+
+; GCN: buffer_store_dword [[MUL1]]
+; GCN-NEXT: buffer_store_dword [[MUL2]]
+define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, float addrspace(1)* %d.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %d.gep = getelementptr inbounds float, float addrspace(1)* %d.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+ %d = load volatile float, float addrspace(1)* %d.gep
+
+ %fma0 = call float @llvm.fma.f32(float %a, float %b, float 2.0)
+ %fneg.fma0 = fsub float -0.0, %fma0
+ %mul1 = fmul float %fneg.fma0, %c
+ %mul2 = fmul float %fneg.fma0, %d
+
+ store volatile float %mul1, float addrspace(1)* %out
+ store volatile float %mul2, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}free_fold_src_code_size_cost_use_f64:
+; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
+; GCN: {{buffer|flat}}_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]]
+; GCN: {{buffer|flat}}_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]]
+; GCN: {{buffer|flat}}_load_dwordx2 [[D:v\[[0-9]+:[0-9]+\]]]
+
+; GCN: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], 2.0
+; GCN-DAG: v_mul_f64 [[MUL0:v\[[0-9]+:[0-9]+\]]], -[[FMA0]], [[C]]
+; GCN-DAG: v_mul_f64 [[MUL1:v\[[0-9]+:[0-9]+\]]], -[[FMA0]], [[D]]
+
+; GCN: buffer_store_dwordx2 [[MUL0]]
+; GCN: buffer_store_dwordx2 [[MUL1]]
+define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(double addrspace(1)* %out, double addrspace(1)* %a.ptr, double addrspace(1)* %b.ptr, double addrspace(1)* %c.ptr, double addrspace(1)* %d.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds double, double addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds double, double addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds double, double addrspace(1)* %c.ptr, i64 %tid.ext
+ %d.gep = getelementptr inbounds double, double addrspace(1)* %d.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile double, double addrspace(1)* %a.gep
+ %b = load volatile double, double addrspace(1)* %b.gep
+ %c = load volatile double, double addrspace(1)* %c.gep
+ %d = load volatile double, double addrspace(1)* %d.gep
+
+ %fma0 = call double @llvm.fma.f64(double %a, double %b, double 2.0)
+ %fneg.fma0 = fsub double -0.0, %fma0
+ %mul1 = fmul double %fneg.fma0, %c
+ %mul2 = fmul double %fneg.fma0, %d
+
+ store volatile double %mul1, double addrspace(1)* %out
+ store volatile double %mul2, double addrspace(1)* %out
+ ret void
+}
+
+; %trunc.a has one fneg use, but it requires a code size increase and
+; %the fneg can instead be folded for free into the fma.
+
+; GCN-LABEL: {{^}}one_use_cost_to_fold_into_src_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+; GCN: v_trunc_f32_e32 [[TRUNC_A:v[0-9]+]], [[A]]
+; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
+; GCN: buffer_store_dword [[FMA0]]
+define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, float addrspace(1)* %d.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %d.gep = getelementptr inbounds float, float addrspace(1)* %d.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+ %d = load volatile float, float addrspace(1)* %d.gep
+
+ %trunc.a = call float @llvm.trunc.f32(float %a)
+ %trunc.fneg.a = fsub float -0.0, %trunc.a
+ %fma0 = call float @llvm.fma.f32(float %trunc.fneg.a, float %b, float %c)
+ store volatile float %fma0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}multi_use_cost_to_fold_into_src:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[D:v[0-9]+]]
+; GCN: v_trunc_f32_e32 [[TRUNC_A:v[0-9]+]], [[A]]
+; GCN-DAG: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
+; GCN-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[D]], [[TRUNC_A]]
+; GCN: buffer_store_dword [[FMA0]]
+; GCN: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @multi_use_cost_to_fold_into_src(float addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr, float addrspace(1)* %c.ptr, float addrspace(1)* %d.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %c.gep = getelementptr inbounds float, float addrspace(1)* %c.ptr, i64 %tid.ext
+ %d.gep = getelementptr inbounds float, float addrspace(1)* %d.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %c = load volatile float, float addrspace(1)* %c.gep
+ %d = load volatile float, float addrspace(1)* %d.gep
+
+ %trunc.a = call float @llvm.trunc.f32(float %a)
+ %trunc.fneg.a = fsub float -0.0, %trunc.a
+ %fma0 = call float @llvm.fma.f32(float %trunc.fneg.a, float %b, float %c)
+ %mul1 = fmul float %trunc.a, %d
+ store volatile float %fma0, float addrspace(1)* %out
+ store volatile float %mul1, float addrspace(1)* %out
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare float @llvm.fma.f32(float, float, float) #1
declare float @llvm.fmuladd.f32(float, float, float) #1
declare float @llvm.sin.f32(float) #1
+declare float @llvm.trunc.f32(float) #1
+declare float @llvm.round.f32(float) #1
+declare float @llvm.rint.f32(float) #1
+declare float @llvm.nearbyint.f32(float) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+
+declare double @llvm.fma.f64(double, double, double) #1
declare float @llvm.amdgcn.sin.f32(float) #1
declare float @llvm.amdgcn.rcp.f32(float) #1
declare float @llvm.amdgcn.rcp.legacy(float) #1
declare float @llvm.amdgcn.fmul.legacy(float, float) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #0
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #0
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fneg-fabs.f16.ll b/test/CodeGen/AMDGPU/fneg-fabs.f16.ll
index d7d21311c1b9..555764c15519 100644
--- a/test/CodeGen/AMDGPU/fneg-fabs.f16.ll
+++ b/test/CodeGen/AMDGPU/fneg-fabs.f16.ll
@@ -1,33 +1,35 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=CIVI %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GFX89 -check-prefix=GCN -check-prefix=CIVI %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX89 -check-prefix=GFX9 -check-prefix=GCN %s
; GCN-LABEL: {{^}}fneg_fabs_fadd_f16:
; CI: v_cvt_f32_f16_e32
-; CI: v_cvt_f32_f16_e32
-; CI: v_sub_f32_e64 v{{[0-9]+}}, v{{[0-9]+}}, |v{{[0-9]+}}|
+; CI: v_cvt_f32_f16_e64 [[CVT_ABS_X:v[0-9]+]], |v{{[0-9]+}}|
+; CI: v_subrev_f32_e32 v{{[0-9]+}}, [[CVT_ABS_X]], v{{[0-9]+}}
-; VI-NOT: and
-; VI: v_sub_f16_e64 {{v[0-9]+}}, {{v[0-9]+}}, |{{v[0-9]+}}|
-define void @fneg_fabs_fadd_f16(half addrspace(1)* %out, half %x, half %y) {
+; GFX89-NOT: _and
+; GFX89: v_sub_f16_e64 {{v[0-9]+}}, {{v[0-9]+}}, |{{v[0-9]+}}|
+define amdgpu_kernel void @fneg_fabs_fadd_f16(half addrspace(1)* %out, half %x, half %y) {
%fabs = call half @llvm.fabs.f16(half %x)
- %fsub = fsub half -0.000000e+00, %fabs
+ %fsub = fsub half -0.0, %fabs
%fadd = fadd half %y, %fsub
store half %fadd, half addrspace(1)* %out, align 2
ret void
}
; GCN-LABEL: {{^}}fneg_fabs_fmul_f16:
-; CI: v_cvt_f32_f16_e32
-; CI: v_cvt_f32_f16_e32
-; CI: v_mul_f32_e64 {{v[0-9]+}}, {{v[0-9]+}}, -|{{v[0-9]+}}|
+; CI-DAG: v_cvt_f32_f16_e32
+; CI-DAG: v_cvt_f32_f16_e64 [[CVT_NEG_ABS_X:v[0-9]+]], -|{{v[0-9]+}}|
+; CI: v_mul_f32_e32 {{v[0-9]+}}, [[CVT_NEG_ABS_X]], {{v[0-9]+}}
; CI: v_cvt_f16_f32_e32
-; VI-NOT: and
-; VI: v_mul_f16_e64 {{v[0-9]+}}, {{v[0-9]+}}, -|{{v[0-9]+}}|
-; VI-NOT: and
-define void @fneg_fabs_fmul_f16(half addrspace(1)* %out, half %x, half %y) {
+; GFX89-NOT: _and
+; GFX89: v_mul_f16_e64 [[MUL:v[0-9]+]], {{v[0-9]+}}, -|{{v[0-9]+}}|
+; GFX89-NOT: [[MUL]]
+; GFX89: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
+define amdgpu_kernel void @fneg_fabs_fmul_f16(half addrspace(1)* %out, half %x, half %y) {
%fabs = call half @llvm.fabs.f16(half %x)
- %fsub = fsub half -0.000000e+00, %fabs
+ %fsub = fsub half -0.0, %fabs
%fmul = fmul half %y, %fsub
store half %fmul, half addrspace(1)* %out, align 2
ret void
@@ -39,75 +41,113 @@ define void @fneg_fabs_fmul_f16(half addrspace(1)* %out, half %x, half %y) {
; GCN-LABEL: {{^}}fneg_fabs_free_f16:
; GCN: v_or_b32_e32 v{{[0-9]+}}, 0x8000, v{{[0-9]+}}
-define void @fneg_fabs_free_f16(half addrspace(1)* %out, i16 %in) {
+define amdgpu_kernel void @fneg_fabs_free_f16(half addrspace(1)* %out, i16 %in) {
%bc = bitcast i16 %in to half
%fabs = call half @llvm.fabs.f16(half %bc)
- %fsub = fsub half -0.000000e+00, %fabs
+ %fsub = fsub half -0.0, %fabs
store half %fsub, half addrspace(1)* %out
ret void
}
-; FIXME: Should use or
; GCN-LABEL: {{^}}fneg_fabs_f16:
-; CI: v_cvt_f32_f16_e32 v{{[0-9]+}},
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-
-; VI: v_or_b32_e32 v{{[0-9]+}}, 0x8000, v{{[0-9]+}}
-define void @fneg_fabs_f16(half addrspace(1)* %out, half %in) {
+; GCN: v_or_b32_e32 v{{[0-9]+}}, 0x8000, v{{[0-9]+}}
+define amdgpu_kernel void @fneg_fabs_f16(half addrspace(1)* %out, half %in) {
%fabs = call half @llvm.fabs.f16(half %in)
- %fsub = fsub half -0.000000e+00, %fabs
+ %fsub = fsub half -0.0, %fabs
store half %fsub, half addrspace(1)* %out, align 2
ret void
}
; GCN-LABEL: {{^}}v_fneg_fabs_f16:
-; CI: v_cvt_f32_f16_e32 v{{[0-9]+}},
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-
-; VI: v_or_b32_e32 v{{[0-9]+}}, 0x8000, v{{[0-9]+}}
-define void @v_fneg_fabs_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
+; GCN: v_or_b32_e32 v{{[0-9]+}}, 0x8000, v{{[0-9]+}}
+define amdgpu_kernel void @v_fneg_fabs_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
%val = load half, half addrspace(1)* %in, align 2
%fabs = call half @llvm.fabs.f16(half %val)
- %fsub = fsub half -0.000000e+00, %fabs
+ %fsub = fsub half -0.0, %fabs
store half %fsub, half addrspace(1)* %out, align 2
ret void
}
; FIXME: single bit op
-; GCN-LABEL: {{^}}fneg_fabs_v2f16:
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-
-; VI: s_mov_b32 [[MASK:s[0-9]+]], 0x8000{{$}}
-; VI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
-; VI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
-; VI: flat_store_dword
-define void @fneg_fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) {
+; GCN-LABEL: {{^}}s_fneg_fabs_v2f16:
+; CIVI: s_mov_b32 [[MASK:s[0-9]+]], 0x8000{{$}}
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
+; CIVI: flat_store_dword
+
+; GFX9: s_or_b32 s{{[0-9]+}}, 0x80008000, s{{[0-9]+}}
+define amdgpu_kernel void @s_fneg_fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) {
%fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
- %fsub = fsub <2 x half> <half -0.000000e+00, half -0.000000e+00>, %fabs
- store <2 x half> %fsub, <2 x half> addrspace(1)* %out
+ %fneg.fabs = fsub <2 x half> <half -0.0, half -0.0>, %fabs
+ store <2 x half> %fneg.fabs, <2 x half> addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}fneg_fabs_v4f16:
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-; CI: v_cvt_f16_f32_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
-
-; VI: s_mov_b32 [[MASK:s[0-9]+]], 0x8000{{$}}
-; VI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
-; VI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
-; VI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
-; VI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
-; VI: flat_store_dwordx2
-define void @fneg_fabs_v4f16(<4 x half> addrspace(1)* %out, <4 x half> %in) {
+; CIVI: s_mov_b32 [[MASK:s[0-9]+]], 0x8000{{$}}
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, [[MASK]],
+
+; GFX9: s_mov_b32 [[MASK:s[0-9]+]], 0x80008000
+; GFX9: s_or_b32 s{{[0-9]+}}, [[MASK]], s{{[0-9]+}}
+; GFX9: s_or_b32 s{{[0-9]+}}, [[MASK]], s{{[0-9]+}}
+
+; GCN: flat_store_dwordx2
+define amdgpu_kernel void @fneg_fabs_v4f16(<4 x half> addrspace(1)* %out, <4 x half> %in) {
%fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %in)
- %fsub = fsub <4 x half> <half -0.000000e+00, half -0.000000e+00, half -0.000000e+00, half -0.000000e+00>, %fabs
+ %fsub = fsub <4 x half> <half -0.0, half -0.0, half -0.0, half -0.0>, %fabs
store <4 x half> %fsub, <4 x half> addrspace(1)* %out
ret void
}
-declare half @llvm.fabs.f16(half) readnone
-declare <2 x half> @llvm.fabs.v2f16(<2 x half>) readnone
-declare <4 x half> @llvm.fabs.v4f16(<4 x half>) readnone
+; GCN-LABEL: {{^}}fold_user_fneg_fabs_v2f16:
+; CI: v_cvt_f32_f16_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
+; CI: v_cvt_f32_f16_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|
+; CI: v_mul_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
+; CI: v_mul_f32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}
+
+; VI: v_mul_f16_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|, 4.0
+; VI: v_mul_f16_e64 v{{[0-9]+}}, -|v{{[0-9]+}}|, 4.0
+
+; GFX9: s_and_b32 [[ABS:s[0-9]+]], s{{[0-9]+}}, 0x7fff7fff
+; GFX9: v_pk_mul_f16 v{{[0-9]+}}, [[ABS]], 4.0 neg_lo:[1,0] neg_hi:[1,0]
+define amdgpu_kernel void @fold_user_fneg_fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) #0 {
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
+ %fneg.fabs = fsub <2 x half> <half -0.0, half -0.0>, %fabs
+ %mul = fmul <2 x half> %fneg.fabs, <half 4.0, half 4.0>
+ store <2 x half> %mul, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_fneg_multi_use_fabs_v2f16:
+; GFX9: s_and_b32 [[ABS:s[0-9]+]], s{{[0-9]+}}, 0x7fff7fff
+; GFX9: v_mov_b32_e32 [[VABS:v[0-9]+]], [[ABS]]
+; GFX9: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80008000, [[VABS]]
+define amdgpu_kernel void @s_fneg_multi_use_fabs_v2f16(<2 x half> addrspace(1)* %out0, <2 x half> addrspace(1)* %out1, <2 x half> %in) {
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
+ %fneg = fsub <2 x half> <half -0.0, half -0.0>, %fabs
+ store <2 x half> %fabs, <2 x half> addrspace(1)* %out0
+ store <2 x half> %fneg, <2 x half> addrspace(1)* %out1
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_fneg_multi_use_fabs_foldable_neg_v2f16:
+; GFX9: s_and_b32 [[ABS:s[0-9]+]], s{{[0-9]+}}, 0x7fff7fff
+; GFX9: v_pk_mul_f16 v{{[0-9]+}}, [[ABS]], 4.0 neg_lo:[1,0] neg_hi:[1,0]
+define amdgpu_kernel void @s_fneg_multi_use_fabs_foldable_neg_v2f16(<2 x half> addrspace(1)* %out0, <2 x half> addrspace(1)* %out1, <2 x half> %in) {
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
+ %fneg = fsub <2 x half> <half -0.0, half -0.0>, %fabs
+ %mul = fmul <2 x half> %fneg, <half 4.0, half 4.0>
+ store <2 x half> %fabs, <2 x half> addrspace(1)* %out0
+ store <2 x half> %mul, <2 x half> addrspace(1)* %out1
+ ret void
+}
+
+declare half @llvm.fabs.f16(half) #1
+declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #1
+declare <4 x half> @llvm.fabs.v4f16(<4 x half>) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fneg-fabs.f64.ll b/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
index d16e83fd4d5b..85f544032171 100644
--- a/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
+++ b/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
@@ -6,7 +6,7 @@
; GCN-LABEL: {{^}}fneg_fabs_fadd_f64:
; GCN: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}|, {{s\[[0-9]+:[0-9]+\]}}
-define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) {
+define amdgpu_kernel void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) {
%fabs = call double @llvm.fabs.f64(double %x)
%fsub = fsub double -0.000000e+00, %fabs
%fadd = fadd double %y, %fsub
@@ -14,7 +14,7 @@ define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y)
ret void
}
-define void @v_fneg_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %xptr, double addrspace(1)* %yptr) {
+define amdgpu_kernel void @v_fneg_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %xptr, double addrspace(1)* %yptr) {
%x = load double, double addrspace(1)* %xptr, align 8
%y = load double, double addrspace(1)* %xptr, align 8
%fabs = call double @llvm.fabs.f64(double %x)
@@ -26,7 +26,7 @@ define void @v_fneg_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)
; GCN-LABEL: {{^}}fneg_fabs_fmul_f64:
; GCN: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}|, {{s\[[0-9]+:[0-9]+\]}}
-define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) {
+define amdgpu_kernel void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) {
%fabs = call double @llvm.fabs.f64(double %x)
%fsub = fsub double -0.000000e+00, %fabs
%fmul = fmul double %y, %fsub
@@ -35,7 +35,7 @@ define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y)
}
; GCN-LABEL: {{^}}fneg_fabs_free_f64:
-define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fabs = call double @llvm.fabs.f64(double %bc)
%fsub = fsub double -0.000000e+00, %fabs
@@ -46,7 +46,7 @@ define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
; GCN-LABEL: {{^}}fneg_fabs_fn_free_f64:
; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fabs = call double @fabs(double %bc)
%fsub = fsub double -0.000000e+00, %fabs
@@ -62,7 +62,7 @@ define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
; GCN-DAG: v_or_b32_e32 v[[HI_V:[0-9]+]], s[[HI_X]], [[IMMREG]]
; GCN-DAG: v_mov_b32_e32 v[[LO_V:[0-9]+]], s[[LO_X]]
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_V]]:[[HI_V]]{{\]}}
-define void @fneg_fabs_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fneg_fabs_f64(double addrspace(1)* %out, double %in) {
%fabs = call double @llvm.fabs.f64(double %in)
%fsub = fsub double -0.000000e+00, %fabs
store double %fsub, double addrspace(1)* %out, align 8
@@ -74,7 +74,7 @@ define void @fneg_fabs_f64(double addrspace(1)* %out, double %in) {
; GCN-NOT: 0x80000000
; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-define void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in)
%fsub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %fabs
store <2 x double> %fsub, <2 x double> addrspace(1)* %out
@@ -88,7 +88,7 @@ define void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in)
; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-define void @fneg_fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fneg_fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
%fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in)
%fsub = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %fabs
store <4 x double> %fsub, <4 x double> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fneg-fabs.ll b/test/CodeGen/AMDGPU/fneg-fabs.ll
index 9ee1171306c7..a0cf37b159db 100644
--- a/test/CodeGen/AMDGPU/fneg-fabs.ll
+++ b/test/CodeGen/AMDGPU/fneg-fabs.ll
@@ -5,7 +5,7 @@
; FUNC-LABEL: {{^}}fneg_fabs_fadd_f32:
; SI-NOT: and
; SI: v_subrev_f32_e64 {{v[0-9]+}}, |{{v[0-9]+}}|, {{s[0-9]+}}
-define void @fneg_fabs_fadd_f32(float addrspace(1)* %out, float %x, float %y) {
+define amdgpu_kernel void @fneg_fabs_fadd_f32(float addrspace(1)* %out, float %x, float %y) {
%fabs = call float @llvm.fabs.f32(float %x)
%fsub = fsub float -0.000000e+00, %fabs
%fadd = fadd float %y, %fsub
@@ -17,7 +17,7 @@ define void @fneg_fabs_fadd_f32(float addrspace(1)* %out, float %x, float %y) {
; SI-NOT: and
; SI: v_mul_f32_e64 {{v[0-9]+}}, -|{{v[0-9]+}}|, {{s[0-9]+}}
; SI-NOT: and
-define void @fneg_fabs_fmul_f32(float addrspace(1)* %out, float %x, float %y) {
+define amdgpu_kernel void @fneg_fabs_fmul_f32(float addrspace(1)* %out, float %x, float %y) {
%fabs = call float @llvm.fabs.f32(float %x)
%fsub = fsub float -0.000000e+00, %fabs
%fmul = fmul float %y, %fsub
@@ -35,7 +35,7 @@ define void @fneg_fabs_fmul_f32(float addrspace(1)* %out, float %x, float %y) {
; R600: -PV
; SI: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-define void @fneg_fabs_free_f32(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @fneg_fabs_free_f32(float addrspace(1)* %out, i32 %in) {
%bc = bitcast i32 %in to float
%fabs = call float @llvm.fabs.f32(float %bc)
%fsub = fsub float -0.000000e+00, %fabs
@@ -49,7 +49,7 @@ define void @fneg_fabs_free_f32(float addrspace(1)* %out, i32 %in) {
; R600: -PV
; SI: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-define void @fneg_fabs_fn_free_f32(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @fneg_fabs_fn_free_f32(float addrspace(1)* %out, i32 %in) {
%bc = bitcast i32 %in to float
%fabs = call float @fabs(float %bc)
%fsub = fsub float -0.000000e+00, %fabs
@@ -59,7 +59,7 @@ define void @fneg_fabs_fn_free_f32(float addrspace(1)* %out, i32 %in) {
; FUNC-LABEL: {{^}}fneg_fabs_f32:
; SI: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-define void @fneg_fabs_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fneg_fabs_f32(float addrspace(1)* %out, float %in) {
%fabs = call float @llvm.fabs.f32(float %in)
%fsub = fsub float -0.000000e+00, %fabs
store float %fsub, float addrspace(1)* %out, align 4
@@ -68,7 +68,7 @@ define void @fneg_fabs_f32(float addrspace(1)* %out, float %in) {
; FUNC-LABEL: {{^}}v_fneg_fabs_f32:
; SI: v_or_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
-define void @v_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @v_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%val = load float, float addrspace(1)* %in, align 4
%fabs = call float @llvm.fabs.f32(float %val)
%fsub = fsub float -0.000000e+00, %fabs
@@ -86,7 +86,7 @@ define void @v_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in)
; SI: s_brev_b32 [[SIGNBITK:s[0-9]+]], 1{{$}}
; SI: v_or_b32_e32 v{{[0-9]+}}, [[SIGNBITK]], v{{[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+}}, [[SIGNBITK]], v{{[0-9]+}}
-define void @fneg_fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @fneg_fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
%fsub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %fabs
store <2 x float> %fsub, <2 x float> addrspace(1)* %out
@@ -99,7 +99,7 @@ define void @fneg_fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
; SI: v_or_b32_e32 v{{[0-9]+}}, [[SIGNBITK]], v{{[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+}}, [[SIGNBITK]], v{{[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+}}, [[SIGNBITK]], v{{[0-9]+}}
-define void @fneg_fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @fneg_fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
%fabs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in)
%fsub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %fabs
store <4 x float> %fsub, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fneg.f16.ll b/test/CodeGen/AMDGPU/fneg.f16.ll
index e3dfd9201a24..626a0b50cce8 100644
--- a/test/CodeGen/AMDGPU/fneg.f16.ll
+++ b/test/CodeGen/AMDGPU/fneg.f16.ll
@@ -1,11 +1,11 @@
-; RUN: llc -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
; FIXME: Should be able to do scalar op
-; FUNC-LABEL: {{^}}s_fneg_f16:
-
-define void @s_fneg_f16(half addrspace(1)* %out, half %in) {
- %fneg = fsub half -0.000000e+00, %in
+; GCN-LABEL: {{^}}s_fneg_f16:
+define amdgpu_kernel void @s_fneg_f16(half addrspace(1)* %out, half %in) #0 {
+ %fneg = fsub half -0.0, %in
store half %fneg, half addrspace(1)* %out
ret void
}
@@ -13,49 +13,123 @@ define void @s_fneg_f16(half addrspace(1)* %out, half %in) {
; FIXME: Should be able to use bit operations when illegal type as
; well.
-; FUNC-LABEL: {{^}}v_fneg_f16:
+; GCN-LABEL: {{^}}v_fneg_f16:
; GCN: flat_load_ushort [[VAL:v[0-9]+]],
-
-; CI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[VAL]]
-; CI: v_cvt_f16_f32_e64 [[CVT1:v[0-9]+]], -[[CVT0]]
-; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]]
-
-; VI: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[VAL]]
+; GCN: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[VAL]]
; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[XOR]]
-define void @v_fneg_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
- %val = load half, half addrspace(1)* %in, align 2
- %fneg = fsub half -0.000000e+00, %val
- store half %fneg, half addrspace(1)* %out
+; SI: buffer_store_short [[XOR]]
+define amdgpu_kernel void @v_fneg_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.in = getelementptr inbounds half, half addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr inbounds half, half addrspace(1)* %in, i32 %tid
+ %val = load half, half addrspace(1)* %gep.in, align 2
+ %fneg = fsub half -0.0, %val
+ store half %fneg, half addrspace(1)* %gep.out
ret void
}
-; FUNC-LABEL: {{^}}fneg_free_f16:
+; GCN-LABEL: {{^}}fneg_free_f16:
; GCN: flat_load_ushort [[NEG_VALUE:v[0-9]+]],
; XCI: s_xor_b32 [[XOR:s[0-9]+]], [[NEG_VALUE]], 0x8000{{$}}
; CI: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[NEG_VALUE]]
; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[XOR]]
-define void @fneg_free_f16(half addrspace(1)* %out, i16 %in) {
+define amdgpu_kernel void @fneg_free_f16(half addrspace(1)* %out, i16 %in) #0 {
%bc = bitcast i16 %in to half
%fsub = fsub half -0.0, %bc
store half %fsub, half addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}v_fneg_fold_f16:
+; GCN-LABEL: {{^}}v_fneg_fold_f16:
; GCN: flat_load_ushort [[NEG_VALUE:v[0-9]+]]
-; CI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[CVT0]]
-; CI: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[CVT0]], [[CVT0]]
+; CI-DAG: v_cvt_f32_f16_e32 [[CVT_VAL:v[0-9]+]], [[NEG_VALUE]]
+; CI-DAG: v_cvt_f32_f16_e64 [[NEG_CVT0:v[0-9]+]], -[[NEG_VALUE]]
+; CI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[CVT_VAL]], [[NEG_CVT0]]
; CI: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], [[MUL]]
; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]]
; VI-NOT: [[NEG_VALUE]]
; VI: v_mul_f16_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]]
-define void @v_fneg_fold_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
+define amdgpu_kernel void @v_fneg_fold_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%val = load half, half addrspace(1)* %in
%fsub = fsub half -0.0, %val
%fmul = fmul half %fsub, %val
store half %fmul, half addrspace(1)* %out
ret void
}
+
+; FIXME: Terrible code with VI and even worse with SI/CI
+; GCN-LABEL: {{^}}s_fneg_v2f16:
+; CI: s_mov_b32 [[MASK:s[0-9]+]], 0x8000{{$}}
+; CI: v_xor_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_xor_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; CI: v_or_b32_e32
+
+; VI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x8000{{$}}
+; VI-DAG: v_xor_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]]
+
+; GFX9: v_xor_b32_e32 v{{[0-9]+}}, 0x80008000, v{{[0-9]+}}
+
+define amdgpu_kernel void @s_fneg_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) #0 {
+ %fneg = fsub <2 x half> <half -0.0, half -0.0>, %in
+ store <2 x half> %fneg, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_v2f16:
+; GCN: flat_load_dword [[VAL:v[0-9]+]]
+; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80008000, [[VAL]]
+define amdgpu_kernel void @v_fneg_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.in = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid
+ %val = load <2 x half>, <2 x half> addrspace(1)* %gep.in, align 2
+ %fneg = fsub <2 x half> <half -0.0, half -0.0>, %val
+ store <2 x half> %fneg, <2 x half> addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}fneg_free_v2f16:
+; GCN: s_load_dword [[VAL:s[0-9]+]]
+; CIVI: s_xor_b32 s{{[0-9]+}}, [[VAL]], 0x80008000
+
+; GFX9: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GFX9: v_xor_b32_e32 v{{[0-9]+}}, 0x80008000, [[VVAL]]
+define amdgpu_kernel void @fneg_free_v2f16(<2 x half> addrspace(1)* %out, i32 %in) #0 {
+ %bc = bitcast i32 %in to <2 x half>
+ %fsub = fsub <2 x half> <half -0.0, half -0.0>, %bc
+ store <2 x half> %fsub, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_fneg_fold_v2f16:
+; GCN: flat_load_dword [[VAL:v[0-9]+]]
+
+; CI: v_cvt_f32_f16_e64 v{{[0-9]+}}, -v{{[0-9]+}}
+; CI: v_cvt_f32_f16_e64 v{{[0-9]+}}, -v{{[0-9]+}}
+; CI: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_cvt_f16_f32
+; CI: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_cvt_f16_f32
+
+; VI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_mul_f16_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mul_f16_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
+
+; GFX9: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} neg_lo:[1,0] neg_hi:[1,0]{{$}}
+define amdgpu_kernel void @v_fneg_fold_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %val = load <2 x half>, <2 x half> addrspace(1)* %in
+ %fsub = fsub <2 x half> <half -0.0, half -0.0>, %val
+ %fmul = fmul <2 x half> %fsub, %val
+ store <2 x half> %fmul, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fneg.f64.ll b/test/CodeGen/AMDGPU/fneg.f64.ll
index b7080f4622a3..9b4b4d6e942a 100644
--- a/test/CodeGen/AMDGPU/fneg.f64.ll
+++ b/test/CodeGen/AMDGPU/fneg.f64.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}fneg_f64:
; GCN: v_xor_b32
-define void @fneg_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fneg_f64(double addrspace(1)* %out, double %in) {
%fneg = fsub double -0.000000e+00, %in
store double %fneg, double addrspace(1)* %out
ret void
@@ -12,7 +12,7 @@ define void @fneg_f64(double addrspace(1)* %out, double %in) {
; FUNC-LABEL: {{^}}fneg_v2f64:
; GCN: v_xor_b32
; GCN: v_xor_b32
-define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double> %in) {
+define amdgpu_kernel void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double> %in) {
%fneg = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %in
store <2 x double> %fneg, <2 x double> addrspace(1)* %out
ret void
@@ -28,7 +28,7 @@ define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double>
; GCN: v_xor_b32
; GCN: v_xor_b32
; GCN: v_xor_b32
-define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double> %in) {
+define amdgpu_kernel void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double> %in) {
%fneg = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %in
store <4 x double> %fneg, <4 x double> addrspace(1)* %out
ret void
@@ -40,7 +40,7 @@ define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double>
; FUNC-LABEL: {{^}}fneg_free_f64:
; GCN: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, -{{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fsub = fsub double 0.0, %bc
store double %fsub, double addrspace(1)* %out
@@ -52,7 +52,7 @@ define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
; VI: s_load_dwordx2 [[NEG_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN-NOT: xor
; GCN: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, -[[NEG_VALUE]], [[NEG_VALUE]]
-define void @fneg_fold_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fneg_fold_f64(double addrspace(1)* %out, double %in) {
%fsub = fsub double -0.0, %in
%fmul = fmul double %fsub, %in
store double %fmul, double addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fneg.ll b/test/CodeGen/AMDGPU/fneg.ll
index 007c6dcadd9e..d1eabfb13c9a 100644
--- a/test/CodeGen/AMDGPU/fneg.ll
+++ b/test/CodeGen/AMDGPU/fneg.ll
@@ -6,7 +6,7 @@
; R600: -PV
; GCN: v_xor_b32
-define void @s_fneg_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @s_fneg_f32(float addrspace(1)* %out, float %in) {
%fneg = fsub float -0.000000e+00, %in
store float %fneg, float addrspace(1)* %out
ret void
@@ -18,7 +18,7 @@ define void @s_fneg_f32(float addrspace(1)* %out, float %in) {
; GCN: v_xor_b32
; GCN: v_xor_b32
-define void @s_fneg_v2f32(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) {
+define amdgpu_kernel void @s_fneg_v2f32(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) {
%fneg = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %in
store <2 x float> %fneg, <2 x float> addrspace(1)* %out
ret void
@@ -34,7 +34,7 @@ define void @s_fneg_v2f32(<2 x float> addrspace(1)* nocapture %out, <2 x float>
; GCN: v_xor_b32
; GCN: v_xor_b32
; GCN: v_xor_b32
-define void @s_fneg_v4f32(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) {
+define amdgpu_kernel void @s_fneg_v4f32(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) {
%fneg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %in
store <4 x float> %fneg, <4 x float> addrspace(1)* %out
ret void
@@ -50,7 +50,7 @@ define void @s_fneg_v4f32(<4 x float> addrspace(1)* nocapture %out, <4 x float>
; R600-NOT: XOR
; R600: -KC0[2].Z
-define void @fsub0_f32(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @fsub0_f32(float addrspace(1)* %out, i32 %in) {
%bc = bitcast i32 %in to float
%fsub = fsub float 0.0, %bc
store float %fsub, float addrspace(1)* %out
@@ -66,7 +66,7 @@ define void @fsub0_f32(float addrspace(1)* %out, i32 %in) {
; R600-NOT: XOR
; R600: -PV.W
-define void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
%bc = bitcast i32 %in to float
%fsub = fsub float -0.0, %bc
store float %fsub, float addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
; VI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
; GCN-NOT: xor
; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]]
-define void @fneg_fold_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fneg_fold_f32(float addrspace(1)* %out, float %in) {
%fsub = fsub float -0.0, %in
%fmul = fmul float %fsub, %in
store float %fmul, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fold-cndmask.mir b/test/CodeGen/AMDGPU/fold-cndmask.mir
new file mode 100644
index 000000000000..8dfec9166303
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fold-cndmask.mir
@@ -0,0 +1,34 @@
+# RUN: llc -march=amdgcn -run-pass si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
+
+# CHECK: %1 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %2 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %4 = COPY %3
+# CHECK: %5 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %6 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %7 = COPY %3
+
+---
+name: fold_cndmask
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: vgpr_32 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+body: |
+ bb.0.entry:
+ %0 = IMPLICIT_DEF
+ %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit %exec
+ %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit %exec
+ %3 = IMPLICIT_DEF
+ %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit %exec
+ %5 = COPY %1
+ %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit %exec
+ %vcc = IMPLICIT_DEF
+ %7 = V_CNDMASK_B32_e32 %3, %3, implicit %exec, implicit %vcc
+
+...
diff --git a/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir b/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
new file mode 100644
index 000000000000..986c6b296c96
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
@@ -0,0 +1,306 @@
+# RUN: llc -march=amdgcn -run-pass peephole-opt -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
+
+--- |
+ define amdgpu_kernel void @no_fold_imm_madak_mac_clamp_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_kernel void @no_fold_imm_madak_mac_omod_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_kernel void @no_fold_imm_madak_mad_clamp_f32() #0 {
+ ret void
+ }
+
+ define amdgpu_kernel void @no_fold_imm_madak_mad_omod_f32() #0 {
+ ret void
+ }
+
+ attributes #0 = { nounwind }
+
+...
+---
+# GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32
+# GCN: %23 = V_MOV_B32_e32 1090519040, implicit %exec
+# GCN-NEXT: %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+
+name: no_fold_imm_madak_mac_clamp_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_64_xexec }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: sreg_32_xm0 }
+ - { id: 10, class: sreg_64 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sreg_32_xm0 }
+ - { id: 13, class: sgpr_64 }
+ - { id: 14, class: sgpr_128 }
+ - { id: 15, class: sreg_32_xm0 }
+ - { id: 16, class: sreg_64 }
+ - { id: 17, class: sgpr_128 }
+ - { id: 18, class: sgpr_128 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vreg_64 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vreg_64 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vreg_64 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vreg_64 }
+ - { id: 29, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %28 = REG_SEQUENCE %3, 1, %27, 2
+ %11 = S_MOV_B32 61440
+ %12 = S_MOV_B32 0
+ %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
+ %14 = REG_SEQUENCE killed %5, 17, %13, 18
+ %15 = S_MOV_B32 2
+ %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+ %17 = REG_SEQUENCE killed %6, 17, %13, 18
+ %18 = REG_SEQUENCE killed %4, 17, %13, 18
+ %20 = COPY %29
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+ %22 = COPY %29
+ %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
+ %23 = V_MOV_B32_e32 1090519040, implicit %exec
+ %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+ %26 = COPY %29
+ BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+---
+# GCN-LABEL: name: no_fold_imm_madak_mac_omod_f32
+# GCN: %23 = V_MOV_B32_e32 1090519040, implicit %exec
+# GCN: %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec
+
+name: no_fold_imm_madak_mac_omod_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_64_xexec }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: sreg_32_xm0 }
+ - { id: 10, class: sreg_64 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sreg_32_xm0 }
+ - { id: 13, class: sgpr_64 }
+ - { id: 14, class: sgpr_128 }
+ - { id: 15, class: sreg_32_xm0 }
+ - { id: 16, class: sreg_64 }
+ - { id: 17, class: sgpr_128 }
+ - { id: 18, class: sgpr_128 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vreg_64 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vreg_64 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vreg_64 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vreg_64 }
+ - { id: 29, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %28 = REG_SEQUENCE %3, 1, %27, 2
+ %11 = S_MOV_B32 61440
+ %12 = S_MOV_B32 0
+ %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
+ %14 = REG_SEQUENCE killed %5, 17, %13, 18
+ %15 = S_MOV_B32 2
+ %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+ %17 = REG_SEQUENCE killed %6, 17, %13, 18
+ %18 = REG_SEQUENCE killed %4, 17, %13, 18
+ %20 = COPY %29
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+ %22 = COPY %29
+ %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
+ %23 = V_MOV_B32_e32 1090519040, implicit %exec
+ %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec
+ %26 = COPY %29
+ BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+---
+# GCN: name: no_fold_imm_madak_mad_clamp_f32
+# GCN: %23 = V_MOV_B32_e32 1090519040, implicit %exec
+# GCN: %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+
+name: no_fold_imm_madak_mad_clamp_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_64_xexec }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: sreg_32_xm0 }
+ - { id: 10, class: sreg_64 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sreg_32_xm0 }
+ - { id: 13, class: sgpr_64 }
+ - { id: 14, class: sgpr_128 }
+ - { id: 15, class: sreg_32_xm0 }
+ - { id: 16, class: sreg_64 }
+ - { id: 17, class: sgpr_128 }
+ - { id: 18, class: sgpr_128 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vreg_64 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vreg_64 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vreg_64 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vreg_64 }
+ - { id: 29, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %28 = REG_SEQUENCE %3, 1, %27, 2
+ %11 = S_MOV_B32 61440
+ %12 = S_MOV_B32 0
+ %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
+ %14 = REG_SEQUENCE killed %5, 17, %13, 18
+ %15 = S_MOV_B32 2
+ %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+ %17 = REG_SEQUENCE killed %6, 17, %13, 18
+ %18 = REG_SEQUENCE killed %4, 17, %13, 18
+ %20 = COPY %29
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+ %22 = COPY %29
+ %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
+ %23 = V_MOV_B32_e32 1090519040, implicit %exec
+ %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+ %26 = COPY %29
+ BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
+---
+# GCN: name: no_fold_imm_madak_mad_omod_f32
+# GCN: %23 = V_MOV_B32_e32 1090519040, implicit %exec
+# GCN: %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec
+
+name: no_fold_imm_madak_mad_omod_f32
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_64_xexec }
+ - { id: 6, class: sreg_64_xexec }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: sreg_32_xm0 }
+ - { id: 10, class: sreg_64 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sreg_32_xm0 }
+ - { id: 13, class: sgpr_64 }
+ - { id: 14, class: sgpr_128 }
+ - { id: 15, class: sreg_32_xm0 }
+ - { id: 16, class: sreg_64 }
+ - { id: 17, class: sgpr_128 }
+ - { id: 18, class: sgpr_128 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vreg_64 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vreg_64 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vreg_64 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vreg_64 }
+ - { id: 29, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY %vgpr0
+ %0 = COPY %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ %28 = REG_SEQUENCE %3, 1, %27, 2
+ %11 = S_MOV_B32 61440
+ %12 = S_MOV_B32 0
+ %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
+ %14 = REG_SEQUENCE killed %5, 17, %13, 18
+ %15 = S_MOV_B32 2
+ %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+ %17 = REG_SEQUENCE killed %6, 17, %13, 18
+ %18 = REG_SEQUENCE killed %4, 17, %13, 18
+ %20 = COPY %29
+ %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+ %22 = COPY %29
+ %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
+ %23 = V_MOV_B32_e32 1090519040, implicit %exec
+ %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec
+ %26 = COPY %29
+ BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/fp-classify.ll b/test/CodeGen/AMDGPU/fp-classify.ll
index b7ffaed70c5a..cbc42979f2ee 100644
--- a/test/CodeGen/AMDGPU/fp-classify.ll
+++ b/test/CodeGen/AMDGPU/fp-classify.ll
@@ -9,7 +9,7 @@ declare double @llvm.fabs.f64(double) #1
; SI: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
; SI-NOT: v_cmp
; SI: s_endpgm
-define void @test_isinf_pattern(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_isinf_pattern(i32 addrspace(1)* nocapture %out, float %x) #0 {
%fabs = tail call float @llvm.fabs.f32(float %x) #1
%cmp = fcmp oeq float %fabs, 0x7FF0000000000000
%ext = zext i1 %cmp to i32
@@ -20,7 +20,7 @@ define void @test_isinf_pattern(i32 addrspace(1)* nocapture %out, float %x) #0 {
; SI-LABEL: {{^}}test_not_isinf_pattern_0:
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_not_isinf_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_not_isinf_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
%fabs = tail call float @llvm.fabs.f32(float %x) #1
%cmp = fcmp ueq float %fabs, 0x7FF0000000000000
%ext = zext i1 %cmp to i32
@@ -31,7 +31,7 @@ define void @test_not_isinf_pattern_0(i32 addrspace(1)* nocapture %out, float %x
; SI-LABEL: {{^}}test_not_isinf_pattern_1:
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_not_isinf_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_not_isinf_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
%fabs = tail call float @llvm.fabs.f32(float %x) #1
%cmp = fcmp oeq float %fabs, 0xFFF0000000000000
%ext = zext i1 %cmp to i32
@@ -45,7 +45,7 @@ define void @test_not_isinf_pattern_1(i32 addrspace(1)* nocapture %out, float %x
; SI: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
; SI-NOT: v_cmp
; SI: s_endpgm
-define void @test_isfinite_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_isfinite_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
%ord = fcmp ord float %x, 0.000000e+00
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
%ninf = fcmp une float %x.fabs, 0x7FF0000000000000
@@ -59,7 +59,7 @@ define void @test_isfinite_pattern_0(i32 addrspace(1)* nocapture %out, float %x)
; SI-LABEL: {{^}}test_isfinite_not_pattern_0:
; SI-NOT: v_cmp_class_f32
; SI: s_endpgm
-define void @test_isfinite_not_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_isfinite_not_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
%ord = fcmp ord float %x, 0.000000e+00
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
%ninf = fcmp une float %x.fabs, 0xFFF0000000000000
@@ -73,7 +73,7 @@ define void @test_isfinite_not_pattern_0(i32 addrspace(1)* nocapture %out, float
; SI-LABEL: {{^}}test_isfinite_not_pattern_1:
; SI-NOT: v_cmp_class_f32
; SI: s_endpgm
-define void @test_isfinite_not_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_isfinite_not_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
%ord = fcmp ord float %x, 0.000000e+00
%ninf = fcmp une float %x, 0x7FF0000000000000
%and = and i1 %ord, %ninf
@@ -86,7 +86,7 @@ define void @test_isfinite_not_pattern_1(i32 addrspace(1)* nocapture %out, float
; SI-LABEL: {{^}}test_isfinite_not_pattern_2:
; SI-NOT: v_cmp_class_f32
; SI: s_endpgm
-define void @test_isfinite_not_pattern_2(i32 addrspace(1)* nocapture %out, float %x, float %y) #0 {
+define amdgpu_kernel void @test_isfinite_not_pattern_2(i32 addrspace(1)* nocapture %out, float %x, float %y) #0 {
%ord = fcmp ord float %x, 0.000000e+00
%x.fabs = tail call float @llvm.fabs.f32(float %y) #1
%ninf = fcmp une float %x.fabs, 0x7FF0000000000000
@@ -100,7 +100,7 @@ define void @test_isfinite_not_pattern_2(i32 addrspace(1)* nocapture %out, float
; SI-LABEL: {{^}}test_isfinite_not_pattern_3:
; SI-NOT: v_cmp_class_f32
; SI: s_endpgm
-define void @test_isfinite_not_pattern_3(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_isfinite_not_pattern_3(i32 addrspace(1)* nocapture %out, float %x) #0 {
%ord = fcmp uno float %x, 0.000000e+00
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
%ninf = fcmp une float %x.fabs, 0x7FF0000000000000
@@ -114,7 +114,7 @@ define void @test_isfinite_not_pattern_3(i32 addrspace(1)* nocapture %out, float
; SI-LABEL: {{^}}test_isfinite_not_pattern_4:
; SI-NOT: v_cmp_class_f32
; SI: s_endpgm
-define void @test_isfinite_not_pattern_4(i32 addrspace(1)* nocapture %out, float %x) #0 {
+define amdgpu_kernel void @test_isfinite_not_pattern_4(i32 addrspace(1)* nocapture %out, float %x) #0 {
%ord = fcmp ord float %x, 0.000000e+00
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
%ninf = fcmp one float %x.fabs, 0x7FF0000000000000
diff --git a/test/CodeGen/AMDGPU/fp16_to_fp32.ll b/test/CodeGen/AMDGPU/fp16_to_fp32.ll
index 01bc53ff35a5..ce041364b76d 100644
--- a/test/CodeGen/AMDGPU/fp16_to_fp32.ll
+++ b/test/CodeGen/AMDGPU/fp16_to_fp32.ll
@@ -14,7 +14,7 @@ declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
; CM: MEM_RAT_CACHELESS STORE_DWORD [[RES:T[0-9]+\.[XYZW]]]
; EGCM: VTX_READ_16 [[VAL:T[0-9]+\.[XYZW]]]
; EGCM: FLT16_TO_FLT32{{[ *]*}}[[RES]], [[VAL]]
-define void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
%val = load i16, i16 addrspace(1)* %in, align 2
%cvt = call float @llvm.convert.from.fp16.f32(i16 %val) nounwind readnone
store float %cvt, float addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/fp16_to_fp64.ll b/test/CodeGen/AMDGPU/fp16_to_fp64.ll
index a9f493bf0ccd..70f0c0c1afdb 100644
--- a/test/CodeGen/AMDGPU/fp16_to_fp64.ll
+++ b/test/CodeGen/AMDGPU/fp16_to_fp64.ll
@@ -8,7 +8,7 @@ declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
; GCN: v_cvt_f32_f16_e32 [[RESULT32:v[0-9]+]], [[VAL]]
; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[RESULT32]]
; GCN: buffer_store_dwordx2 [[RESULT]]
-define void @test_convert_fp16_to_fp64(double addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_convert_fp16_to_fp64(double addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
%val = load i16, i16 addrspace(1)* %in, align 2
%cvt = call double @llvm.convert.from.fp16.f64(i16 %val) nounwind readnone
store double %cvt, double addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/fp32_to_fp16.ll b/test/CodeGen/AMDGPU/fp32_to_fp16.ll
index 3e426e3e94b1..2c6b1cb18f7e 100644
--- a/test/CodeGen/AMDGPU/fp32_to_fp16.ll
+++ b/test/CodeGen/AMDGPU/fp32_to_fp16.ll
@@ -12,7 +12,7 @@ declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
; EG: MEM_RAT MSKOR
; EG: VTX_READ_32
; EG: FLT32_TO_FLT16
-define void @test_convert_fp32_to_fp16(i16 addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_convert_fp32_to_fp16(i16 addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%val = load float, float addrspace(1)* %in, align 4
%cvt = call i16 @llvm.convert.to.fp16.f32(float %val) nounwind readnone
store i16 %cvt, i16 addrspace(1)* %out, align 2
diff --git a/test/CodeGen/AMDGPU/fp_to_sint.f64.ll b/test/CodeGen/AMDGPU/fp_to_sint.f64.ll
index 1537d67cadcc..a7cddd09b762 100644
--- a/test/CodeGen/AMDGPU/fp_to_sint.f64.ll
+++ b/test/CodeGen/AMDGPU/fp_to_sint.f64.ll
@@ -6,7 +6,7 @@ declare double @llvm.fabs.f64(double) #1
; FUNC-LABEL: @fp_to_sint_f64_i32
; SI: v_cvt_i32_f64_e32
-define void @fp_to_sint_f64_i32(i32 addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fp_to_sint_f64_i32(i32 addrspace(1)* %out, double %in) {
%result = fptosi double %in to i32
store i32 %result, i32 addrspace(1)* %out
ret void
@@ -15,7 +15,7 @@ define void @fp_to_sint_f64_i32(i32 addrspace(1)* %out, double %in) {
; FUNC-LABEL: @fp_to_sint_v2f64_v2i32
; SI: v_cvt_i32_f64_e32
; SI: v_cvt_i32_f64_e32
-define void @fp_to_sint_v2f64_v2i32(<2 x i32> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fp_to_sint_v2f64_v2i32(<2 x i32> addrspace(1)* %out, <2 x double> %in) {
%result = fptosi <2 x double> %in to <2 x i32>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
@@ -26,7 +26,7 @@ define void @fp_to_sint_v2f64_v2i32(<2 x i32> addrspace(1)* %out, <2 x double> %
; SI: v_cvt_i32_f64_e32
; SI: v_cvt_i32_f64_e32
; SI: v_cvt_i32_f64_e32
-define void @fp_to_sint_v4f64_v4i32(<4 x i32> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fp_to_sint_v4f64_v4i32(<4 x i32> addrspace(1)* %out, <4 x double> %in) {
%result = fptosi <4 x double> %in to <4 x i32>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
@@ -47,7 +47,7 @@ define void @fp_to_sint_v4f64_v4i32(<4 x i32> addrspace(1)* %out, <4 x double> %
; CI-DAG: v_cvt_u32_f64_e32 v[[LO:[0-9]+]], [[FMA]]
; CI-DAG: v_cvt_i32_f64_e32 v[[HI:[0-9]+]], [[FLOOR]]
; CI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @fp_to_sint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @fp_to_sint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%val = load double, double addrspace(1)* %gep, align 8
@@ -58,7 +58,7 @@ define void @fp_to_sint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in
; FUNC-LABEL: {{^}}fp_to_sint_f64_to_i1:
; SI: v_cmp_eq_f64_e64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{\[[0-9]+:[0-9]+\]}}
-define void @fp_to_sint_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
+define amdgpu_kernel void @fp_to_sint_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
%conv = fptosi double %in to i1
store i1 %conv, i1 addrspace(1)* %out
ret void
@@ -66,7 +66,7 @@ define void @fp_to_sint_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
; FUNC-LABEL: {{^}}fp_to_sint_fabs_f64_to_i1:
; SI: v_cmp_eq_f64_e64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, |s{{\[[0-9]+:[0-9]+\]}}|
-define void @fp_to_sint_fabs_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
+define amdgpu_kernel void @fp_to_sint_fabs_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
%in.fabs = call double @llvm.fabs.f64(double %in)
%conv = fptosi double %in.fabs to i1
store i1 %conv, i1 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fp_to_sint.ll b/test/CodeGen/AMDGPU/fp_to_sint.ll
index a2fa7a190745..630a7186e101 100644
--- a/test/CodeGen/AMDGPU/fp_to_sint.ll
+++ b/test/CodeGen/AMDGPU/fp_to_sint.ll
@@ -8,7 +8,7 @@ declare float @llvm.fabs.f32(float) #1
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; SI: v_cvt_i32_f32_e32
; SI: s_endpgm
-define void @fp_to_sint_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fp_to_sint_i32(i32 addrspace(1)* %out, float %in) {
%conv = fptosi float %in to i32
store i32 %conv, i32 addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @fp_to_sint_i32(i32 addrspace(1)* %out, float %in) {
; FUNC-LABEL: {{^}}fp_to_sint_i32_fabs:
; SI: v_cvt_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}}
-define void @fp_to_sint_i32_fabs(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fp_to_sint_i32_fabs(i32 addrspace(1)* %out, float %in) {
%in.fabs = call float @llvm.fabs.f32(float %in)
%conv = fptosi float %in.fabs to i32
store i32 %conv, i32 addrspace(1)* %out
@@ -28,7 +28,7 @@ define void @fp_to_sint_i32_fabs(i32 addrspace(1)* %out, float %in) {
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; SI: v_cvt_i32_f32_e32
; SI: v_cvt_i32_f32_e32
-define void @fp_to_sint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @fp_to_sint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
%result = fptosi <2 x float> %in to <2 x i32>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
@@ -43,7 +43,7 @@ define void @fp_to_sint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
; SI: v_cvt_i32_f32_e32
; SI: v_cvt_i32_f32_e32
; SI: v_cvt_i32_f32_e32
-define void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%value = load <4 x float>, <4 x float> addrspace(1) * %in
%result = fptosi <4 x float> %value to <4 x i32>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
@@ -76,7 +76,7 @@ define void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspac
; Check that the compiler doesn't crash with a "cannot select" error
; SI: s_endpgm
-define void @fp_to_sint_i64 (i64 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fp_to_sint_i64 (i64 addrspace(1)* %out, float %in) {
entry:
%0 = fptosi float %in to i64
store i64 %0, i64 addrspace(1)* %out
@@ -128,7 +128,7 @@ entry:
; EG-DAG: CNDE_INT
; SI: s_endpgm
-define void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+define amdgpu_kernel void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
%conv = fptosi <2 x float> %x to <2 x i64>
store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
ret void
@@ -221,7 +221,7 @@ define void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
; EG-DAG: CNDE_INT
; SI: s_endpgm
-define void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+define amdgpu_kernel void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
%conv = fptosi <4 x float> %x to <4 x i64>
store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
ret void
@@ -233,7 +233,7 @@ define void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
; EG: AND_INT
; EG: SETE_DX10 {{[*]?}} T{{[0-9]+}}.{{[XYZW]}}, KC0[2].Z, literal.y,
; EG-NEXT: -1082130432(-1.000000e+00)
-define void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
%conv = fptosi float %in to i1
store i1 %conv, i1 addrspace(1)* %out
ret void
@@ -241,7 +241,7 @@ define void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
; FUNC-LABEL: {{^}}fp_to_uint_fabs_f32_to_i1:
; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, |s{{[0-9]+}}|
-define void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
%in.fabs = call float @llvm.fabs.f32(float %in)
%conv = fptosi float %in.fabs to i1
store i1 %conv, i1 addrspace(1)* %out
@@ -251,7 +251,7 @@ define void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
; FUNC-LABEL: {{^}}fp_to_sint_f32_i16:
; GCN: v_cvt_i32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
; GCN: buffer_store_short [[VAL]]
-define void @fp_to_sint_f32_i16(i16 addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @fp_to_sint_f32_i16(i16 addrspace(1)* %out, float %in) #0 {
%sint = fptosi float %in to i16
store i16 %sint, i16 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fp_to_uint.f64.ll b/test/CodeGen/AMDGPU/fp_to_uint.f64.ll
index d5bc416434df..4f597eb3f32c 100644
--- a/test/CodeGen/AMDGPU/fp_to_uint.f64.ll
+++ b/test/CodeGen/AMDGPU/fp_to_uint.f64.ll
@@ -6,7 +6,7 @@ declare double @llvm.fabs.f64(double) #1
; SI-LABEL: {{^}}fp_to_uint_i32_f64:
; SI: v_cvt_u32_f64_e32
-define void @fp_to_uint_i32_f64(i32 addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fp_to_uint_i32_f64(i32 addrspace(1)* %out, double %in) {
%cast = fptoui double %in to i32
store i32 %cast, i32 addrspace(1)* %out, align 4
ret void
@@ -15,7 +15,7 @@ define void @fp_to_uint_i32_f64(i32 addrspace(1)* %out, double %in) {
; SI-LABEL: @fp_to_uint_v2i32_v2f64
; SI: v_cvt_u32_f64_e32
; SI: v_cvt_u32_f64_e32
-define void @fp_to_uint_v2i32_v2f64(<2 x i32> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fp_to_uint_v2i32_v2f64(<2 x i32> addrspace(1)* %out, <2 x double> %in) {
%cast = fptoui <2 x double> %in to <2 x i32>
store <2 x i32> %cast, <2 x i32> addrspace(1)* %out, align 8
ret void
@@ -26,7 +26,7 @@ define void @fp_to_uint_v2i32_v2f64(<2 x i32> addrspace(1)* %out, <2 x double> %
; SI: v_cvt_u32_f64_e32
; SI: v_cvt_u32_f64_e32
; SI: v_cvt_u32_f64_e32
-define void @fp_to_uint_v4i32_v4f64(<4 x i32> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fp_to_uint_v4i32_v4f64(<4 x i32> addrspace(1)* %out, <4 x double> %in) {
%cast = fptoui <4 x double> %in to <4 x i32>
store <4 x i32> %cast, <4 x i32> addrspace(1)* %out, align 8
ret void
@@ -47,7 +47,7 @@ define void @fp_to_uint_v4i32_v4f64(<4 x i32> addrspace(1)* %out, <4 x double> %
; CI-DAG: v_cvt_u32_f64_e32 v[[LO:[0-9]+]], [[FMA]]
; CI-DAG: v_cvt_u32_f64_e32 v[[HI:[0-9]+]], [[FLOOR]]
; CI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @fp_to_uint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @fp_to_uint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%val = load double, double addrspace(1)* %gep, align 8
@@ -57,14 +57,14 @@ define void @fp_to_uint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in
}
; SI-LABEL: @fp_to_uint_v2i64_v2f64
-define void @fp_to_uint_v2i64_v2f64(<2 x i64> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fp_to_uint_v2i64_v2f64(<2 x i64> addrspace(1)* %out, <2 x double> %in) {
%cast = fptoui <2 x double> %in to <2 x i64>
store <2 x i64> %cast, <2 x i64> addrspace(1)* %out, align 16
ret void
}
; SI-LABEL: @fp_to_uint_v4i64_v4f64
-define void @fp_to_uint_v4i64_v4f64(<4 x i64> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fp_to_uint_v4i64_v4f64(<4 x i64> addrspace(1)* %out, <4 x double> %in) {
%cast = fptoui <4 x double> %in to <4 x i64>
store <4 x i64> %cast, <4 x i64> addrspace(1)* %out, align 32
ret void
@@ -72,7 +72,7 @@ define void @fp_to_uint_v4i64_v4f64(<4 x i64> addrspace(1)* %out, <4 x double> %
; FUNC-LABEL: {{^}}fp_to_uint_f64_to_i1:
; SI: v_cmp_eq_f64_e64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{\[[0-9]+:[0-9]+\]}}
-define void @fp_to_uint_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
+define amdgpu_kernel void @fp_to_uint_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
%conv = fptoui double %in to i1
store i1 %conv, i1 addrspace(1)* %out
ret void
@@ -80,7 +80,7 @@ define void @fp_to_uint_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
; FUNC-LABEL: {{^}}fp_to_uint_fabs_f64_to_i1:
; SI: v_cmp_eq_f64_e64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, |s{{\[[0-9]+:[0-9]+\]}}|
-define void @fp_to_uint_fabs_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
+define amdgpu_kernel void @fp_to_uint_fabs_f64_to_i1(i1 addrspace(1)* %out, double %in) #0 {
%in.fabs = call double @llvm.fabs.f64(double %in)
%conv = fptoui double %in.fabs to i1
store i1 %conv, i1 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fp_to_uint.ll b/test/CodeGen/AMDGPU/fp_to_uint.ll
index cbff9f22b073..fdb15801dc4e 100644
--- a/test/CodeGen/AMDGPU/fp_to_uint.ll
+++ b/test/CodeGen/AMDGPU/fp_to_uint.ll
@@ -9,7 +9,7 @@ declare float @llvm.fabs.f32(float) #1
; GCN: v_cvt_u32_f32_e32
; GCN: s_endpgm
-define void @fp_to_uint_f32_to_i32 (i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fp_to_uint_f32_to_i32 (i32 addrspace(1)* %out, float %in) {
%conv = fptoui float %in to i32
store i32 %conv, i32 addrspace(1)* %out
ret void
@@ -21,7 +21,7 @@ define void @fp_to_uint_f32_to_i32 (i32 addrspace(1)* %out, float %in) {
; GCN: v_cvt_u32_f32_e32
; GCN: v_cvt_u32_f32_e32
-define void @fp_to_uint_v2f32_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
%result = fptoui <2 x float> %in to <2 x i32>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
@@ -37,7 +37,7 @@ define void @fp_to_uint_v2f32_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x float>
; GCN: v_cvt_u32_f32_e32
; GCN: v_cvt_u32_f32_e32
-define void @fp_to_uint_v4f32_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%value = load <4 x float>, <4 x float> addrspace(1) * %in
%result = fptoui <4 x float> %value to <4 x i32>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
@@ -68,7 +68,7 @@ define void @fp_to_uint_v4f32_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x float>
; EG-DAG: CNDE_INT
; GCN: s_endpgm
-define void @fp_to_uint_f32_to_i64(i64 addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @fp_to_uint_f32_to_i64(i64 addrspace(1)* %out, float %x) {
%conv = fptoui float %x to i64
store i64 %conv, i64 addrspace(1)* %out
ret void
@@ -119,7 +119,7 @@ define void @fp_to_uint_f32_to_i64(i64 addrspace(1)* %out, float %x) {
; EG-DAG: CNDE_INT
; GCN: s_endpgm
-define void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
%conv = fptoui <2 x float> %x to <2 x i64>
store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
ret void
@@ -212,7 +212,7 @@ define void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x float>
; EG-DAG: CNDE_INT
; GCN: s_endpgm
-define void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
%conv = fptoui <4 x float> %x to <4 x i64>
store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
ret void
@@ -224,7 +224,7 @@ define void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float>
; EG: AND_INT
; EG: SETE_DX10 {{[*]?}} T{{[0-9]+}}.{{[XYZW]}}, KC0[2].Z, 1.0,
-define void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
%conv = fptoui float %in to i1
store i1 %conv, i1 addrspace(1)* %out
ret void
@@ -232,7 +232,7 @@ define void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
; FUNC-LABEL: {{^}}fp_to_uint_fabs_f32_to_i1:
; GCN: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, |s{{[0-9]+}}|
-define void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
%in.fabs = call float @llvm.fabs.f32(float %in)
%conv = fptoui float %in.fabs to i1
store i1 %conv, i1 addrspace(1)* %out
@@ -246,7 +246,7 @@ define void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
; SI: v_cvt_u32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
; VI: v_cvt_i32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
; GCN: buffer_store_short [[VAL]]
-define void @fp_to_uint_f32_to_i16(i16 addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @fp_to_uint_f32_to_i16(i16 addrspace(1)* %out, float %in) #0 {
%uint = fptoui float %in to i16
store i16 %uint, i16 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fpext.f16.ll b/test/CodeGen/AMDGPU/fpext.f16.ll
index c4f5d7cdfb5d..03657176c383 100644
--- a/test/CodeGen/AMDGPU/fpext.f16.ll
+++ b/test/CodeGen/AMDGPU/fpext.f16.ll
@@ -1,14 +1,15 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI -check-prefix=SIGFX9 %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=SIGFX9 %s
; GCN-LABEL: {{^}}fpext_f16_to_f32
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: v_cvt_f32_f16_e32 v[[R_F32:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_dword v[[R_F32]]
; GCN: s_endpgm
-define void @fpext_f16_to_f32(
+define amdgpu_kernel void @fpext_f16_to_f32(
float addrspace(1)* %r,
- half addrspace(1)* %a) {
+ half addrspace(1)* %a) #0 {
entry:
%a.val = load half, half addrspace(1)* %a
%r.val = fpext half %a.val to float
@@ -22,9 +23,9 @@ entry:
; GCN: v_cvt_f64_f32_e32 v{{\[}}[[R_F64_0:[0-9]+]]:[[R_F64_1:[0-9]+]]{{\]}}, v[[A_F32]]
; GCN: buffer_store_dwordx2 v{{\[}}[[R_F64_0]]:[[R_F64_1]]{{\]}}
; GCN: s_endpgm
-define void @fpext_f16_to_f64(
+define amdgpu_kernel void @fpext_f16_to_f64(
double addrspace(1)* %r,
- half addrspace(1)* %a) {
+ half addrspace(1)* %a) #0 {
entry:
%a.val = load half, half addrspace(1)* %a
%r.val = fpext half %a.val to double
@@ -34,15 +35,17 @@ entry:
; GCN-LABEL: {{^}}fpext_v2f16_to_v2f32
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; VI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[R_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GFX9-DAG: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN-DAG: v_cvt_f32_f16_e32 v[[R_F32_0:[0-9]+]], v[[A_V2_F16]]
; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[R_F32_1:[0-9]+]], v[[A_F16_1]]
+; SIGFX9: v_cvt_f32_f16_e32 v[[R_F32_1:[0-9]+]], v[[A_F16_1]]
+; VI: v_cvt_f32_f16_sdwa v[[R_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GCN: buffer_store_dwordx2 v{{\[}}[[R_F32_0]]:[[R_F32_1]]{{\]}}
; GCN: s_endpgm
-define void @fpext_v2f16_to_v2f32(
+
+define amdgpu_kernel void @fpext_v2f16_to_v2f32(
<2 x float> addrspace(1)* %r,
- <2 x half> addrspace(1)* %a) {
+ <2 x half> addrspace(1)* %a) #0 {
entry:
%a.val = load <2 x half>, <2 x half> addrspace(1)* %a
%r.val = fpext <2 x half> %a.val to <2 x float>
@@ -51,15 +54,18 @@ entry:
}
; GCN-LABEL: {{^}}fpext_v2f16_to_v2f64
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_cvt_f64_f32_e32 v{{\[}}{{[0-9]+}}:[[R_F64_3:[0-9]+]]{{\]}}, v[[A_F32_1]]
-; GCN: v_cvt_f64_f32_e32 v{{\[}}[[R_F64_0:[0-9]+]]:{{[0-9]+}}{{\]}}, v[[A_F32_0]]
-; GCN: buffer_store_dwordx4 v{{\[}}[[R_F64_0]]:[[R_F64_3]]{{\]}}
+; GCN: buffer_load_dword
+; SIGFX9-DAG: v_lshrrev_b32_e32
+; SIGFX9-DAG: v_cvt_f32_f16_e32
+; VI: v_cvt_f32_f16_sdwa
+; GCN: v_cvt_f32_f16_e32
+
+; GCN: v_cvt_f64_f32_e32
+; GCN: v_cvt_f64_f32_e32
+; GCN: buffer_store_dwordx4
; GCN: s_endpgm
-define void @fpext_v2f16_to_v2f64(
+
+define amdgpu_kernel void @fpext_v2f16_to_v2f64(
<2 x double> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
@@ -68,3 +74,202 @@ entry:
store <2 x double> %r.val, <2 x double> addrspace(1)* %r
ret void
}
+
+; GCN-LABEL: {{^}}s_fneg_fpext_f16_to_f32:
+; GCN: v_cvt_f32_f16_e32 v{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @s_fneg_fpext_f16_to_f32(float addrspace(1)* %r, i32 %a) {
+entry:
+ %a.trunc = trunc i32 %a to i16
+ %a.val = bitcast i16 %a.trunc to half
+ %r.val = fpext half %a.val to float
+ store float %r.val, float addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fneg_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN: v_cvt_f32_f16_e64 v{{[0-9]+}}, -[[A]]
+define amdgpu_kernel void @fneg_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.neg = fsub half -0.0, %a.val
+ %r.val = fpext half %a.neg to float
+ store float %r.val, float addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN: v_cvt_f32_f16_e64 v{{[0-9]+}}, |[[A]]|
+define amdgpu_kernel void @fabs_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.fabs = call half @llvm.fabs.f16(half %a.val)
+ %r.val = fpext half %a.fabs to float
+ store float %r.val, float addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fneg_fabs_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN: v_cvt_f32_f16_e64 v{{[0-9]+}}, -|[[A]]|
+define amdgpu_kernel void @fneg_fabs_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.fabs = call half @llvm.fabs.f16(half %a.val)
+ %a.fneg.fabs = fsub half -0.0, %a.fabs
+ %r.val = fpext half %a.fneg.fabs to float
+ store float %r.val, float addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fneg_multi_use_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN-DAG: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[A]]
+
+; FIXME: Using the source modifier here only wastes code size
+; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[A]]
+; GFX89-DAG: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], -[[A]]
+
+; GCN: store_dword [[CVT]]
+; GCN: store_short [[XOR]]
+define amdgpu_kernel void @fneg_multi_use_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.neg = fsub half -0.0, %a.val
+ %r.val = fpext half %a.neg to float
+ store volatile float %r.val, float addrspace(1)* %r
+ store volatile half %a.neg, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}fneg_multi_foldable_use_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN-DAG: v_cvt_f32_f16_e64 [[CVTA_NEG:v[0-9]+]], -[[A]]
+; SI-DAG: v_cvt_f32_f16_e32 [[CVTA:v[0-9]+]], [[A]]
+; SI: v_mul_f32_e32 [[MUL_F32:v[0-9]+]], [[CVTA]], [[CVTA_NEG]]
+; SI: v_cvt_f16_f32_e32 [[MUL:v[0-9]+]], [[MUL_F32]]
+
+; GFX89-DAG: v_cvt_f32_f16_e64 [[CVT_NEGA:v[0-9]+]], -[[A]]
+; GFX89: v_mul_f16_e64 [[MUL:v[0-9]+]], -[[A]], [[A]]
+
+; GCN: buffer_store_dword [[CVTA_NEG]]
+; GCN: buffer_store_short [[MUL]]
+define amdgpu_kernel void @fneg_multi_foldable_use_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.neg = fsub half -0.0, %a.val
+ %r.val = fpext half %a.neg to float
+ %mul = fmul half %a.neg, %a.val
+ store volatile float %r.val, float addrspace(1)* %r
+ store volatile half %mul, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_multi_use_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN-DAG: v_and_b32_e32 [[XOR:v[0-9]+]], 0x7fff, [[A]]
+
+; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[A]]
+; VI-DAG: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], |[[A]]|
+
+; GCN: store_dword [[CVT]]
+; GCN: store_short [[XOR]]
+define amdgpu_kernel void @fabs_multi_use_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.fabs = call half @llvm.fabs.f16(half %a.val)
+ %r.val = fpext half %a.fabs to float
+ store volatile float %r.val, float addrspace(1)* %r
+ store volatile half %a.fabs, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_multi_foldable_use_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; SI: v_cvt_f32_f16_e32 [[CVTA:v[0-9]+]], [[A]]
+; SI: v_mul_f32_e64 [[MUL_F32:v[0-9]+]], |[[CVTA]]|, [[CVTA]]
+; SI: v_cvt_f16_f32_e32 [[MUL:v[0-9]+]], [[MUL_F32]]
+; SI: v_and_b32_e32 [[ABS_A:v[0-9]+]], 0x7fffffff, [[CVTA]]
+
+; GFX89-DAG: v_cvt_f32_f16_e64 [[ABS_A:v[0-9]+]], |[[A]]|
+; GFX89: v_mul_f16_e64 [[MUL:v[0-9]+]], |[[A]]|, [[A]]
+
+; GCN: buffer_store_dword [[ABS_A]]
+; GCN: buffer_store_short [[MUL]]
+define amdgpu_kernel void @fabs_multi_foldable_use_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.fabs = call half @llvm.fabs.f16(half %a.val)
+ %r.val = fpext half %a.fabs to float
+ %mul = fmul half %a.fabs, %a.val
+ store volatile float %r.val, float addrspace(1)* %r
+ store volatile half %mul, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_fneg_multi_use_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN-DAG: v_or_b32_e32 [[OR:v[0-9]+]], 0x8000, [[A]]
+
+; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[OR]]
+; VI-DAG: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], -|[[OR]]|
+
+; GCN: buffer_store_dword [[CVT]]
+; GCN: buffer_store_short [[OR]]
+define amdgpu_kernel void @fabs_fneg_multi_use_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.fabs = call half @llvm.fabs.f16(half %a.val)
+ %a.fneg.fabs = fsub half -0.0, %a.fabs
+ %r.val = fpext half %a.fneg.fabs to float
+ store volatile float %r.val, float addrspace(1)* %r
+ store volatile half %a.fneg.fabs, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_fneg_multi_foldable_use_fpext_f16_to_f32:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; SI: v_cvt_f32_f16_e32 [[CVTA:v[0-9]+]], [[A]]
+; SI: v_mul_f32_e64 [[MUL_F32:v[0-9]+]], -|[[CVTA]]|, [[CVTA]]
+; SI: v_cvt_f16_f32_e32 [[MUL:v[0-9]+]], [[MUL_F32]]
+; SI: v_or_b32_e32 [[FABS_FNEG:v[0-9]+]], 0x80000000, [[CVTA]]
+
+; GFX89-DAG: v_cvt_f32_f16_e64 [[FABS_FNEG:v[0-9]+]], -|[[A]]|
+; GFX89-DAG: v_mul_f16_e64 [[MUL:v[0-9]+]], -|[[A]]|, [[A]]
+
+; GCN: buffer_store_dword [[FABS_FNEG]]
+; GCN: buffer_store_short [[MUL]]
+define amdgpu_kernel void @fabs_fneg_multi_foldable_use_fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %a.fabs = call half @llvm.fabs.f16(half %a.val)
+ %a.fneg.fabs = fsub half -0.0, %a.fabs
+ %r.val = fpext half %a.fneg.fabs to float
+ %mul = fmul half %a.fneg.fabs, %a.val
+ store volatile float %r.val, float addrspace(1)* %r
+ store volatile half %mul, half addrspace(1)* undef
+ ret void
+}
+
+declare half @llvm.fabs.f16(half) #1
+
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fpext.ll b/test/CodeGen/AMDGPU/fpext.ll
index 6dc84b01d734..b11e2ea056c3 100644
--- a/test/CodeGen/AMDGPU/fpext.ll
+++ b/test/CodeGen/AMDGPU/fpext.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}fpext_f32_to_f64:
; SI: v_cvt_f64_f32_e32 {{v\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
-define void @fpext_f32_to_f64(double addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fpext_f32_to_f64(double addrspace(1)* %out, float %in) {
%result = fpext float %in to double
store double %result, double addrspace(1)* %out
ret void
@@ -12,7 +12,7 @@ define void @fpext_f32_to_f64(double addrspace(1)* %out, float %in) {
; FUNC-LABEL: {{^}}fpext_v2f32_to_v2f64:
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
-define void @fpext_v2f32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @fpext_v2f32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x float> %in) {
%result = fpext <2 x float> %in to <2 x double>
store <2 x double> %result, <2 x double> addrspace(1)* %out
ret void
@@ -22,7 +22,7 @@ define void @fpext_v2f32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x float> %
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
-define void @fpext_v3f32_to_v3f64(<3 x double> addrspace(1)* %out, <3 x float> %in) {
+define amdgpu_kernel void @fpext_v3f32_to_v3f64(<3 x double> addrspace(1)* %out, <3 x float> %in) {
%result = fpext <3 x float> %in to <3 x double>
store <3 x double> %result, <3 x double> addrspace(1)* %out
ret void
@@ -33,7 +33,7 @@ define void @fpext_v3f32_to_v3f64(<3 x double> addrspace(1)* %out, <3 x float> %
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
-define void @fpext_v4f32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @fpext_v4f32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x float> %in) {
%result = fpext <4 x float> %in to <4 x double>
store <4 x double> %result, <4 x double> addrspace(1)* %out
ret void
@@ -48,7 +48,7 @@ define void @fpext_v4f32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x float> %
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
; SI: v_cvt_f64_f32_e32
-define void @fpext_v8f32_to_v8f64(<8 x double> addrspace(1)* %out, <8 x float> %in) {
+define amdgpu_kernel void @fpext_v8f32_to_v8f64(<8 x double> addrspace(1)* %out, <8 x float> %in) {
%result = fpext <8 x float> %in to <8 x double>
store <8 x double> %result, <8 x double> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fptosi.f16.ll b/test/CodeGen/AMDGPU/fptosi.f16.ll
index 71f56d730e96..50e56e08416a 100644
--- a/test/CodeGen/AMDGPU/fptosi.f16.ll
+++ b/test/CodeGen/AMDGPU/fptosi.f16.ll
@@ -7,7 +7,7 @@
; GCN: v_cvt_i32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[R_I16]]
; GCN: s_endpgm
-define void @fptosi_f16_to_i16(
+define amdgpu_kernel void @fptosi_f16_to_i16(
i16 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -23,7 +23,7 @@ entry:
; GCN: v_cvt_i32_f32_e32 v[[R_I32:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fptosi_f16_to_i32(
+define amdgpu_kernel void @fptosi_f16_to_i32(
i32 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -40,7 +40,7 @@ entry:
; GCN: buffer_load_ushort
; GCN: v_cvt_f32_f16_e32
; GCN: s_endpgm
-define void @fptosi_f16_to_i64(
+define amdgpu_kernel void @fptosi_f16_to_i64(
i64 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -52,17 +52,26 @@ entry:
; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
-; GCN: v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
-; GCN: v_and_b32_e32 v[[R_I16_LO:[0-9]+]], 0xffff, v[[R_I16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_LO]]
+
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_and_b32_e32 v[[R_I16_LO:[0-9]+]], 0xffff, v[[R_I16_0]]
+; SI: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
+; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_LO]]
+
+; VI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
+; VI: v_cvt_i32_f32_sdwa v[[R_I16_1:[0-9]+]], v[[A_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI: v_or_b32_sdwa v[[R_V2_I16:[0-9]+]], v[[R_I16_1]], v[[R_I16_0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
; GCN: buffer_store_dword v[[R_V2_I16]]
; GCN: s_endpgm
-define void @fptosi_v2f16_to_v2i16(
+
+define amdgpu_kernel void @fptosi_v2f16_to_v2i16(
<2 x i16> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
@@ -75,12 +84,13 @@ entry:
; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i32
; GCN: buffer_load_dword
; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; VI: v_cvt_f32_f16_sdwa
; GCN: v_cvt_i32_f32_e32
; GCN: v_cvt_i32_f32_e32
; GCN: buffer_store_dwordx2
; GCN: s_endpgm
-define void @fptosi_v2f16_to_v2i32(
+define amdgpu_kernel void @fptosi_v2f16_to_v2i32(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
@@ -96,9 +106,10 @@ entry:
; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i64
; GCN: buffer_load_dword
; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; VI: v_cvt_f32_f16_sdwa
; GCN: s_endpgm
-define void @fptosi_v2f16_to_v2i64(
+define amdgpu_kernel void @fptosi_v2f16_to_v2i64(
<2 x i64> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/fptoui.f16.ll b/test/CodeGen/AMDGPU/fptoui.f16.ll
index a6876624a0c6..2afa6111cf17 100644
--- a/test/CodeGen/AMDGPU/fptoui.f16.ll
+++ b/test/CodeGen/AMDGPU/fptoui.f16.ll
@@ -8,7 +8,7 @@
; VI: v_cvt_i32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[R_I16]]
; GCN: s_endpgm
-define void @fptoui_f16_to_i16(
+define amdgpu_kernel void @fptoui_f16_to_i16(
i16 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -24,7 +24,7 @@ entry:
; GCN: v_cvt_u32_f32_e32 v[[R_I32:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @fptoui_f16_to_i32(
+define amdgpu_kernel void @fptoui_f16_to_i32(
i32 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -41,7 +41,7 @@ entry:
; GCN: buffer_load_ushort
; GCN: v_cvt_f32_f16_e32
; GCN: s_endpgm
-define void @fptoui_f16_to_i64(
+define amdgpu_kernel void @fptoui_f16_to_i64(
i64 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -53,18 +53,25 @@ entry:
; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; GCN-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; SI: v_cvt_u32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_u32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
-; VI: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
+; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_0]]
+
+; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_cvt_f32_f16_sdwa v[[A_F32_0:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; VI: v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
-; GCN: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_0]]
+; VI: v_cvt_i32_f32_sdwa v[[R_I16_0:[0-9]+]], v[[A_F32_0]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI: v_or_b32_sdwa v[[R_V2_I16:[0-9]+]], v[[R_I16_0]], v[[R_I16_1]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
; GCN: buffer_store_dword v[[R_V2_I16]]
; GCN: s_endpgm
-define void @fptoui_v2f16_to_v2i16(
+
+define amdgpu_kernel void @fptoui_v2f16_to_v2i16(
<2 x i16> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
@@ -77,12 +84,13 @@ entry:
; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i32
; GCN: buffer_load_dword
; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; VI: v_cvt_f32_f16_sdwa
; GCN: v_cvt_u32_f32_e32
; GCN: v_cvt_u32_f32_e32
; GCN: buffer_store_dwordx2
; GCN: s_endpgm
-define void @fptoui_v2f16_to_v2i32(
+define amdgpu_kernel void @fptoui_v2f16_to_v2i32(
<2 x i32> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
@@ -98,9 +106,10 @@ entry:
; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i64
; GCN: buffer_load_dword
; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; VI: v_cvt_f32_f16_sdwa
; GCN: s_endpgm
-define void @fptoui_v2f16_to_v2i64(
+define amdgpu_kernel void @fptoui_v2f16_to_v2i64(
<2 x i64> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/fptrunc.f16.ll b/test/CodeGen/AMDGPU/fptrunc.f16.ll
index 284fc53c8240..bc72f4424c98 100644
--- a/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -1,12 +1,13 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global,-fp64-fp16-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
-; GCN-LABEL: {{^}}fptrunc_f32_to_f16
+; GCN-LABEL: {{^}}fptrunc_f32_to_f16:
; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fptrunc_f32_to_f16(
+define amdgpu_kernel void @fptrunc_f32_to_f16(
half addrspace(1)* %r,
float addrspace(1)* %a) {
entry:
@@ -16,13 +17,13 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fptrunc_f64_to_f16
+; GCN-LABEL: {{^}}fptrunc_f64_to_f16:
; GCN: buffer_load_dwordx2 v{{\[}}[[A_F64_0:[0-9]+]]:[[A_F64_1:[0-9]+]]{{\]}}
; GCN: v_cvt_f32_f64_e32 v[[A_F32:[0-9]+]], v{{\[}}[[A_F64_0]]:[[A_F64_1]]{{\]}}
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fptrunc_f64_to_f16(
+define amdgpu_kernel void @fptrunc_f64_to_f16(
half addrspace(1)* %r,
double addrspace(1)* %a) {
entry:
@@ -32,16 +33,24 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fptrunc_v2f32_to_v2f16
+; GCN-LABEL: {{^}}fptrunc_v2f32_to_v2f16:
; GCN: buffer_load_dwordx2 v{{\[}}[[A_F32_0:[0-9]+]]:[[A_F32_1:[0-9]+]]{{\]}}
; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
-; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
-; GCN-DAG: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_cvt_f16_f32_sdwa v[[R_F16_1:[0-9]+]], v[[A_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
+; GFX9-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; GFX9: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GFX9: v_lshl_or_b32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], 16, v[[R_F16_LO]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fptrunc_v2f32_to_v2f16(
+
+define amdgpu_kernel void @fptrunc_v2f32_to_v2f16(
<2 x half> addrspace(1)* %r,
<2 x float> addrspace(1)* %a) {
entry:
@@ -51,17 +60,23 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fptrunc_v2f64_to_v2f16
+; GCN-LABEL: {{^}}fptrunc_v2f64_to_v2f16:
; GCN: buffer_load_dwordx4 v{{\[}}[[A_F64_0:[0-9]+]]:[[A_F64_3:[0-9]+]]{{\]}}
-; GCN: v_cvt_f32_f64_e32 v[[A_F32_0:[0-9]+]], v{{\[}}[[A_F64_0]]:{{[0-9]+}}{{\]}}
-; GCN: v_cvt_f32_f64_e32 v[[A_F32_1:[0-9]+]], v{{\[}}{{[0-9]+}}:[[A_F64_3]]{{\]}}
-; GCN: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
-; GCN: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN-DAG: v_cvt_f32_f64_e32 v[[A_F32_0:[0-9]+]], v{{\[}}[[A_F64_0]]:{{[0-9]+}}{{\]}}
+; GCN-DAG: v_cvt_f32_f64_e32 v[[A_F32_1:[0-9]+]], v{{\[}}{{[0-9]+}}:[[A_F64_3]]{{\]}}
+; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
+
+; VI: v_cvt_f16_f32_sdwa v[[R_F16_HI:[0-9]+]], v[[A_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+
+; SIVI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; GFX9-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; GFX9: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GFX9: v_lshl_or_b32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], 16, v[[R_F16_LO]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
-define void @fptrunc_v2f64_to_v2f16(
+
+define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
<2 x half> addrspace(1)* %r,
<2 x double> addrspace(1)* %a) {
entry:
@@ -70,3 +85,109 @@ entry:
store <2 x half> %r.val, <2 x half> addrspace(1)* %r
ret void
}
+
+; GCN-LABEL: {{^}}fneg_fptrunc_f32_to_f16:
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e64 v[[R_F16:[0-9]+]], -v[[A_F32]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define amdgpu_kernel void @fneg_fptrunc_f32_to_f16(
+ half addrspace(1)* %r,
+ float addrspace(1)* %a) {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %a.fneg = fsub float -0.0, %a.val
+ %r.val = fptrunc float %a.fneg to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fabs_fptrunc_f32_to_f16:
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e64 v[[R_F16:[0-9]+]], |v[[A_F32]]|
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define amdgpu_kernel void @fabs_fptrunc_f32_to_f16(
+ half addrspace(1)* %r,
+ float addrspace(1)* %a) {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %a.fabs = call float @llvm.fabs.f32(float %a.val)
+ %r.val = fptrunc float %a.fabs to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fneg_fabs_fptrunc_f32_to_f16:
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e64 v[[R_F16:[0-9]+]], -|v[[A_F32]]|
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define amdgpu_kernel void @fneg_fabs_fptrunc_f32_to_f16(
+ half addrspace(1)* %r,
+ float addrspace(1)* %a) #0 {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %a.fabs = call float @llvm.fabs.f32(float %a.val)
+ %a.fneg.fabs = fsub float -0.0, %a.fabs
+ %r.val = fptrunc float %a.fneg.fabs to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptrunc_f32_to_f16_zext_i32:
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
+; GCN-NOT: v[[R_F16]]
+; GCN: buffer_store_dword v[[R_F16]]
+define amdgpu_kernel void @fptrunc_f32_to_f16_zext_i32(
+ i32 addrspace(1)* %r,
+ float addrspace(1)* %a) #0 {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %r.val = fptrunc float %a.val to half
+ %r.i16 = bitcast half %r.val to i16
+ %zext = zext i16 %r.i16 to i32
+ store i32 %zext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptrunc_fabs_f32_to_f16_zext_i32:
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e64 v[[R_F16:[0-9]+]], |v[[A_F32]]|
+; GCN-NOT: v[[R_F16]]
+; GCN: buffer_store_dword v[[R_F16]]
+define amdgpu_kernel void @fptrunc_fabs_f32_to_f16_zext_i32(
+ i32 addrspace(1)* %r,
+ float addrspace(1)* %a) #0 {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %a.fabs = call float @llvm.fabs.f32(float %a.val)
+ %r.val = fptrunc float %a.fabs to half
+ %r.i16 = bitcast half %r.val to i16
+ %zext = zext i16 %r.i16 to i32
+ store i32 %zext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptrunc_f32_to_f16_sext_i32:
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
+; GCN: v_bfe_i32 v[[R_F16_SEXT:[0-9]+]], v[[R_F16]], 0, 16
+; GCN: buffer_store_dword v[[R_F16_SEXT]]
+define amdgpu_kernel void @fptrunc_f32_to_f16_sext_i32(
+ i32 addrspace(1)* %r,
+ float addrspace(1)* %a) #0 {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %r.val = fptrunc float %a.val to half
+ %r.i16 = bitcast half %r.val to i16
+ %zext = sext i16 %r.i16 to i32
+ store i32 %zext, i32 addrspace(1)* %r
+ ret void
+}
+
+declare float @llvm.fabs.f32(float) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/fptrunc.ll b/test/CodeGen/AMDGPU/fptrunc.ll
index 0c7b67406a89..d9c5b7e6f359 100644
--- a/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/test/CodeGen/AMDGPU/fptrunc.ll
@@ -4,7 +4,7 @@
; FUNC-LABEL: {{^}}fptrunc_f64_to_f32:
; GCN: v_cvt_f32_f64_e32 {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @fptrunc_f64_to_f32(float addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fptrunc_f64_to_f32(float addrspace(1)* %out, double %in) {
%result = fptrunc double %in to float
store float %result, float addrspace(1)* %out
ret void
@@ -14,7 +14,7 @@ define void @fptrunc_f64_to_f32(float addrspace(1)* %out, double %in) {
; GCN-NOT: v_cvt
; GCN-UNSAFE: v_cvt_f32_f64_e32 [[F32:v[0-9]+]]
; GCN-UNSAFE: v_cvt_f16_f32_e32 v{{[0-9]+}}, [[F32]]
-define void @fptrunc_f64_to_f16(i16 addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @fptrunc_f64_to_f16(i16 addrspace(1)* %out, double %in) {
%result = fptrunc double %in to half
%result_i16 = bitcast half %result to i16
store i16 %result_i16, i16 addrspace(1)* %out
@@ -24,7 +24,7 @@ define void @fptrunc_f64_to_f16(i16 addrspace(1)* %out, double %in) {
; FUNC-LABEL: {{^}}fptrunc_v2f64_to_v2f32:
; GCN: v_cvt_f32_f64_e32
; GCN: v_cvt_f32_f64_e32
-define void @fptrunc_v2f64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @fptrunc_v2f64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x double> %in) {
%result = fptrunc <2 x double> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
@@ -35,7 +35,7 @@ define void @fptrunc_v2f64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x double>
; GCN: v_cvt_f32_f64_e32
; GCN: v_cvt_f32_f64_e32
; GCN: v_cvt_f32_f64_e32
-define void @fptrunc_v4f64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @fptrunc_v4f64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x double> %in) {
%result = fptrunc <4 x double> %in to <4 x float>
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
@@ -50,7 +50,7 @@ define void @fptrunc_v4f64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x double>
; GCN: v_cvt_f32_f64_e32
; GCN: v_cvt_f32_f64_e32
; GCN: v_cvt_f32_f64_e32
-define void @fptrunc_v8f64_to_v8f32(<8 x float> addrspace(1)* %out, <8 x double> %in) {
+define amdgpu_kernel void @fptrunc_v8f64_to_v8f32(<8 x float> addrspace(1)* %out, <8 x double> %in) {
%result = fptrunc <8 x double> %in to <8 x float>
store <8 x float> %result, <8 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/fract.f64.ll b/test/CodeGen/AMDGPU/fract.f64.ll
index 0651dce8d95c..7a5bcfffa3f3 100644
--- a/test/CodeGen/AMDGPU/fract.f64.ll
+++ b/test/CodeGen/AMDGPU/fract.f64.ll
@@ -27,7 +27,7 @@ declare double @llvm.floor.f64(double) #0
; GCN-UNSAFE: v_fract_f64_e32 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]]
; GCN: buffer_store_dwordx2 [[FRACT]]
-define void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
+define amdgpu_kernel void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
%x = load double, double addrspace(1)* %src
%floor.x = call double @llvm.floor.f64(double %x)
%fract = fsub double %x, %floor.x
@@ -54,7 +54,7 @@ define void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1
; GCN-UNSAFE: v_fract_f64_e64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -[[X]]
; GCN: buffer_store_dwordx2 [[FRACT]]
-define void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
+define amdgpu_kernel void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
%x = load double, double addrspace(1)* %src
%neg.x = fsub double -0.0, %x
%floor.neg.x = call double @llvm.floor.f64(double %neg.x)
@@ -82,7 +82,7 @@ define void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src)
; GCN-UNSAFE: v_fract_f64_e64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|[[X]]|
; GCN: buffer_store_dwordx2 [[FRACT]]
-define void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
+define amdgpu_kernel void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
%x = load double, double addrspace(1)* %src
%abs.x = call double @llvm.fabs.f64(double %x)
%neg.abs.x = fsub double -0.0, %abs.x
@@ -98,7 +98,7 @@ define void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %
; VI-UNSAFE-DAG: v_fract_f64_e32 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]]
; VI-UNSAFE: buffer_store_dwordx2 [[FLOOR]]
; VI-UNSAFE: buffer_store_dwordx2 [[FRACT]]
-define void @multi_use_floor_fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
+define amdgpu_kernel void @multi_use_floor_fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
%x = load double, double addrspace(1)* %src
%floor.x = call double @llvm.floor.f64(double %x)
%fract = fsub double %x, %floor.x
diff --git a/test/CodeGen/AMDGPU/fract.ll b/test/CodeGen/AMDGPU/fract.ll
index 4e1a503b1298..207fe280c9a6 100644
--- a/test/CodeGen/AMDGPU/fract.ll
+++ b/test/CodeGen/AMDGPU/fract.ll
@@ -14,7 +14,7 @@ declare float @llvm.floor.f32(float) #0
; GCN-UNSAFE: v_fract_f32_e32 [[RESULT:v[0-9]+]], [[INPUT:v[0-9]+]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
+define amdgpu_kernel void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
%x = load float, float addrspace(1)* %src
%floor.x = call float @llvm.floor.f32(float %x)
%fract = fsub float %x, %floor.x
@@ -29,7 +29,7 @@ define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
; GCN-UNSAFE: v_fract_f32_e64 [[RESULT:v[0-9]+]], -[[INPUT:v[0-9]+]]
; GCN: buffer_store_dword [[RESULT]]
-define void @fract_f32_neg(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
+define amdgpu_kernel void @fract_f32_neg(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
%x = load float, float addrspace(1)* %src
%x.neg = fsub float -0.0, %x
%floor.x.neg = call float @llvm.floor.f32(float %x.neg)
@@ -45,7 +45,7 @@ define void @fract_f32_neg(float addrspace(1)* %out, float addrspace(1)* %src) #
; GCN-UNSAFE: v_fract_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT:v[0-9]+]]|
; GCN: buffer_store_dword [[RESULT]]
-define void @fract_f32_neg_abs(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
+define amdgpu_kernel void @fract_f32_neg_abs(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
%x = load float, float addrspace(1)* %src
%abs.x = call float @llvm.fabs.f32(float %x)
%neg.abs.x = fsub float -0.0, %abs.x
@@ -61,7 +61,7 @@ define void @fract_f32_neg_abs(float addrspace(1)* %out, float addrspace(1)* %sr
; GCN-UNSAFE: buffer_store_dword [[FLOOR]]
; GCN-UNSAFE: buffer_store_dword [[FRACT]]
-define void @multi_use_floor_fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
+define amdgpu_kernel void @multi_use_floor_fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) #1 {
%x = load float, float addrspace(1)* %src
%floor.x = call float @llvm.floor.f32(float %x)
%fract = fsub float %x, %floor.x
diff --git a/test/CodeGen/AMDGPU/frem.ll b/test/CodeGen/AMDGPU/frem.ll
index 039623c02194..9778069d0477 100644
--- a/test/CodeGen/AMDGPU/frem.ll
+++ b/test/CodeGen/AMDGPU/frem.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}frem_f32:
; GCN-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
@@ -12,10 +12,10 @@
; GCN: v_mul_f32_e32
; GCN: v_div_fmas_f32
; GCN: v_div_fixup_f32
-; GCN: v_trunc_f32_e32
-; GCN: v_mad_f32
+; GCN: v_trunc_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
+define amdgpu_kernel void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2) #0 {
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
%r0 = load float, float addrspace(1)* %in1, align 4
@@ -33,8 +33,7 @@ define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
; GCN: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
; GCN: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
; GCN: buffer_store_dword [[RESULT]]
-; GCN: s_endpgm
-define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
+define amdgpu_kernel void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2) #1 {
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
%r0 = load float, float addrspace(1)* %in1, align 4
@@ -55,7 +54,7 @@ define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
; GCN: v_add_f64
; GCN: buffer_store_dwordx2
; GCN: s_endpgm
-define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%r0 = load double, double addrspace(1)* %in1, align 8
%r1 = load double, double addrspace(1)* %in2, align 8
@@ -71,7 +70,7 @@ define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; CI: v_trunc_f64_e32
; GCN: v_fma_f64
; GCN: s_endpgm
-define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) #1 {
%r0 = load double, double addrspace(1)* %in1, align 8
%r1 = load double, double addrspace(1)* %in2, align 8
@@ -80,7 +79,7 @@ define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in
ret void
}
-define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
+define amdgpu_kernel void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
<2 x float> addrspace(1)* %in2) #0 {
%gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
%r0 = load <2 x float>, <2 x float> addrspace(1)* %in1, align 8
@@ -90,7 +89,7 @@ define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
ret void
}
-define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
+define amdgpu_kernel void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
<4 x float> addrspace(1)* %in2) #0 {
%gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
%r0 = load <4 x float>, <4 x float> addrspace(1)* %in1, align 16
@@ -100,7 +99,7 @@ define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
ret void
}
-define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
+define amdgpu_kernel void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
<2 x double> addrspace(1)* %in2) #0 {
%gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
%r0 = load <2 x double>, <2 x double> addrspace(1)* %in1, align 16
diff --git a/test/CodeGen/AMDGPU/fsqrt.f64.ll b/test/CodeGen/AMDGPU/fsqrt.f64.ll
index ed040436a61a..453d8fb37f2f 100644
--- a/test/CodeGen/AMDGPU/fsqrt.f64.ll
+++ b/test/CodeGen/AMDGPU/fsqrt.f64.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}v_safe_fsqrt_f64:
; GCN: v_sqrt_f64_e32 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @v_safe_fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_safe_fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #1 {
%r0 = load double, double addrspace(1)* %in
%r1 = call double @llvm.sqrt.f64(double %r0)
store double %r1, double addrspace(1)* %out
@@ -12,7 +12,7 @@ define void @v_safe_fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %i
; FUNC-LABEL: {{^}}v_unsafe_fsqrt_f64:
; GCN: v_sqrt_f64_e32 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
-define void @v_unsafe_fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #2 {
+define amdgpu_kernel void @v_unsafe_fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #2 {
%r0 = load double, double addrspace(1)* %in
%r1 = call double @llvm.sqrt.f64(double %r0)
store double %r1, double addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/fsqrt.ll b/test/CodeGen/AMDGPU/fsqrt.ll
index b6526b8e0787..a0fd3411ca05 100644
--- a/test/CodeGen/AMDGPU/fsqrt.ll
+++ b/test/CodeGen/AMDGPU/fsqrt.ll
@@ -7,7 +7,7 @@
; FUNC-LABEL: {{^}}v_safe_fsqrt_f32:
; GCN: v_sqrt_f32_e32 {{v[0-9]+, v[0-9]+}}
-define void @v_safe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_safe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
%r0 = load float, float addrspace(1)* %in
%r1 = call float @llvm.sqrt.f32(float %r0)
store float %r1, float addrspace(1)* %out
@@ -16,7 +16,7 @@ define void @v_safe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in)
; FUNC-LABEL: {{^}}v_unsafe_fsqrt_f32:
; GCN: v_sqrt_f32_e32 {{v[0-9]+, v[0-9]+}}
-define void @v_unsafe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #2 {
+define amdgpu_kernel void @v_unsafe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #2 {
%r0 = load float, float addrspace(1)* %in
%r1 = call float @llvm.sqrt.f32(float %r0)
store float %r1, float addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @v_unsafe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %i
; R600: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].Z
; R600: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].Z, PS
-define void @s_sqrt_f32(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @s_sqrt_f32(float addrspace(1)* %out, float %in) #1 {
entry:
%fdiv = call float @llvm.sqrt.f32(float %in)
store float %fdiv, float addrspace(1)* %out
@@ -44,7 +44,7 @@ entry:
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].W, PS
; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].X
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].X, PS
-define void @s_sqrt_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @s_sqrt_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%fdiv = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
store <2 x float> %fdiv, <2 x float> addrspace(1)* %out
@@ -65,7 +65,7 @@ entry:
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].W, PS
; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[4].X
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[4].X, PS
-define void @s_sqrt_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
+define amdgpu_kernel void @s_sqrt_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
entry:
%fdiv = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %in)
store <4 x float> %fdiv, <4 x float> addrspace(1)* %out
@@ -75,7 +75,7 @@ entry:
; FUNC-LABEL: {{^}}elim_redun_check_neg0:
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_neg0(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @elim_redun_check_neg0(float addrspace(1)* %out, float %in) #1 {
entry:
%sqrt = call float @llvm.sqrt.f32(float %in)
%cmp = fcmp olt float %in, -0.000000e+00
@@ -87,7 +87,7 @@ entry:
; FUNC-LABEL: {{^}}elim_redun_check_pos0:
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_pos0(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @elim_redun_check_pos0(float addrspace(1)* %out, float %in) #1 {
entry:
%sqrt = call float @llvm.sqrt.f32(float %in)
%cmp = fcmp olt float %in, 0.000000e+00
@@ -99,7 +99,7 @@ entry:
; FUNC-LABEL: {{^}}elim_redun_check_ult:
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_ult(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @elim_redun_check_ult(float addrspace(1)* %out, float %in) #1 {
entry:
%sqrt = call float @llvm.sqrt.f32(float %in)
%cmp = fcmp ult float %in, -0.000000e+00
@@ -112,7 +112,7 @@ entry:
; GCN: v_sqrt_f32_e32
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_v2(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @elim_redun_check_v2(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%sqrt = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
%cmp = fcmp olt <2 x float> %in, <float -0.000000e+00, float -0.000000e+00>
@@ -125,7 +125,7 @@ entry:
; GCN: v_sqrt_f32_e32
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_v2_ult(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @elim_redun_check_v2_ult(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%sqrt = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
%cmp = fcmp ult <2 x float> %in, <float -0.000000e+00, float -0.000000e+00>
diff --git a/test/CodeGen/AMDGPU/fsub.f16.ll b/test/CodeGen/AMDGPU/fsub.f16.ll
index 0b3c8ac2503d..d3c5df317771 100644
--- a/test/CodeGen/AMDGPU/fsub.f16.ll
+++ b/test/CodeGen/AMDGPU/fsub.f16.ll
@@ -1,17 +1,18 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=VI -check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
-; GCN-LABEL: {{^}}fsub_f16
+; GCN-LABEL: {{^}}fsub_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_subrev_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_subrev_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GFX89: v_subrev_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fsub_f16(
+define amdgpu_kernel void @fsub_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -23,16 +24,15 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fsub_f16_imm_a
+; GCN-LABEL: {{^}}fsub_f16_imm_a:
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x3c00{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_subrev_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_sub_f32_e32 v[[R_F32:[0-9]+]], 1.0, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_sub_f16_e32 v[[R_F16:[0-9]+]], 1.0, v[[B_F16]]
+; GFX89: v_sub_f16_e32 v[[R_F16:[0-9]+]], 1.0, v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fsub_f16_imm_a(
+define amdgpu_kernel void @fsub_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
@@ -42,16 +42,15 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fsub_f16_imm_b
+; GCN-LABEL: {{^}}fsub_f16_imm_b:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0xc000{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], -2.0, v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], -2.0, v[[A_F16]]
+; GFX89: v_add_f16_e32 v[[R_F16:[0-9]+]], -2.0, v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fsub_f16_imm_b(
+define amdgpu_kernel void @fsub_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -61,27 +60,33 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fsub_v2f16
+; GCN-LABEL: {{^}}fsub_v2f16:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_subrev_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI: v_subrev_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_subrev_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_subrev_f16_sdwa v[[R_F16_HI:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; GFX9: v_pk_add_f16 v[[R_V2_F16:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]] neg_lo:[0,1] neg_hi:[0,1]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fsub_v2f16(
+
+define amdgpu_kernel void @fsub_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -93,25 +98,32 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fsub_v2f16_imm_a
+; GCN-LABEL: {{^}}fsub_v2f16_imm_a:
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x3c00{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4000{{$}}
+
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_sub_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[B_V2_F16]]
-; VI: v_sub_f16_e32 v[[R_F16_1:[0-9]+]], 2.0, v[[B_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_sub_f32_e32 v[[R_F32_0:[0-9]+]], 1.0, v[[B_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_sub_f32_e32 v[[R_F32_1:[0-9]+]], 2.0, v[[B_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; VI-DAG: v_sub_f16_e32 v[[R_F16_1:[0-9]+]], 2.0, v[[B_F16_1]]
+; VI-DAG: v_sub_f16_e32 v[[R_F16_0:[0-9]+]], 1.0, v[[B_V2_F16]]
+; VI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 0x40003c00
+; GFX9: v_pk_add_f16 v[[R_V2_F16:[0-9]+]], [[K]], v[[B_V2_F16]] neg_lo:[0,1] neg_hi:[0,1]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fsub_v2f16_imm_a(
+
+define amdgpu_kernel void @fsub_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b) {
entry:
@@ -121,25 +133,32 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}fsub_v2f16_imm_b
+; GCN-LABEL: {{^}}fsub_v2f16_imm_b:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4000{{$}}
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x3c00{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], -2.0, v[[A_V2_F16]]
-; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], -1.0, v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], -2.0, v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], -1.0, v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI-DAG: v_add_f16_e32 v[[R_F16_1:[0-9]+]], -1.0, v[[A_F16_1]]
+; VI-DAG: v_add_f16_e32 v[[R_F16_0:[0-9]+]], -2.0, v[[A_V2_F16]]
+; VI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 0xbc00c000
+; GFX9: v_pk_add_f16 v[[R_V2_F16:[0-9]+]], [[K]], v[[A_V2_F16]]{{$}}
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fsub_v2f16_imm_b(
+
+define amdgpu_kernel void @fsub_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/fsub.ll b/test/CodeGen/AMDGPU/fsub.ll
index 3429df33c015..e7a92d95d485 100644
--- a/test/CodeGen/AMDGPU/fsub.ll
+++ b/test/CodeGen/AMDGPU/fsub.ll
@@ -4,7 +4,7 @@
; FUNC-LABEL: {{^}}v_fsub_f32:
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%a = load float, float addrspace(1)* %in, align 4
%b = load float, float addrspace(1)* %b_ptr, align 4
@@ -17,23 +17,19 @@ define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W
; SI: v_sub_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
-define void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) {
+define amdgpu_kernel void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) {
%sub = fsub float %a, %b
store float %sub, float addrspace(1)* %out, align 4
ret void
}
-declare float @llvm.r600.load.input(i32) readnone
-
-declare void @llvm.AMDGPU.store.output(float, i32)
-
; FUNC-LABEL: {{^}}fsub_v2f32:
; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
-define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
+define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
%sub = fsub <2 x float> %a, %b
store <2 x float> %sub, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -49,7 +45,7 @@ define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x flo
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
%b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16
@@ -64,8 +60,75 @@ define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; SI: s_endpgm
-define void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) {
+define amdgpu_kernel void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) {
%result = fsub <4 x float> %a, %b
store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
ret void
}
+
+; FUNC-LABEL: {{^}}v_fneg_fsub_f32:
+; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
+define amdgpu_kernel void @v_fneg_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
+ %a = load float, float addrspace(1)* %in, align 4
+ %b = load float, float addrspace(1)* %b_ptr, align 4
+ %result = fsub float %a, %b
+ %neg.result = fsub float -0.0, %result
+ store float %neg.result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_f32:
+; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI-NOT: xor
+define amdgpu_kernel void @v_fneg_fsub_nsz_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
+ %a = load float, float addrspace(1)* %in, align 4
+ %b = load float, float addrspace(1)* %b_ptr, align 4
+ %result = fsub nsz float %a, %b
+ %neg.result = fsub float -0.0, %result
+ store float %neg.result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_attribute_f32:
+; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI-NOT: xor
+define amdgpu_kernel void @v_fneg_fsub_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
+ %a = load float, float addrspace(1)* %in, align 4
+ %b = load float, float addrspace(1)* %b_ptr, align 4
+ %result = fsub float %a, %b
+ %neg.result = fsub float -0.0, %result
+ store float %neg.result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; For some reason the attribute has a string "true" or "false", so
+; make sure it is disabled and the fneg is not folded if it is not
+; "true".
+; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_false_attribute_f32:
+; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
+define amdgpu_kernel void @v_fneg_fsub_nsz_false_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
+ %a = load float, float addrspace(1)* %in, align 4
+ %b = load float, float addrspace(1)* %b_ptr, align 4
+ %result = fsub float %a, %b
+ %neg.result = fsub float -0.0, %result
+ store float %neg.result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_fsub_0_nsz_attribute_f32:
+; SI-NOT: v_sub
+define amdgpu_kernel void @v_fsub_0_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %a = load float, float addrspace(1)* %in, align 4
+ %result = fsub float %a, 0.0
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" }
+attributes #1 = { nounwind "no-signed-zeros-fp-math"="false" }
diff --git a/test/CodeGen/AMDGPU/fsub64.ll b/test/CodeGen/AMDGPU/fsub64.ll
index 4c9c5ddd4c6e..1b0879d098ee 100644
--- a/test/CodeGen/AMDGPU/fsub64.ll
+++ b/test/CodeGen/AMDGPU/fsub64.ll
@@ -5,7 +5,7 @@ declare double @llvm.fabs.f64(double) #0
; SI-LABEL: {{^}}fsub_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
-define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -16,7 +16,7 @@ define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; SI-LABEL: {{^}}fsub_fabs_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|}}
-define void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -28,7 +28,7 @@ define void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; SI-LABEL: {{^}}fsub_fabs_inv_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], |v\[[0-9]+:[0-9]+\]|, -v\[[0-9]+:[0-9]+\]}}
-define void @fsub_fabs_inv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+define amdgpu_kernel void @fsub_fabs_inv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double, double addrspace(1)* %in1
%r1 = load double, double addrspace(1)* %in2
@@ -40,7 +40,7 @@ define void @fsub_fabs_inv_f64(double addrspace(1)* %out, double addrspace(1)* %
; SI-LABEL: {{^}}s_fsub_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\]}}
-define void @s_fsub_f64(double addrspace(1)* %out, double %a, double %b) {
+define amdgpu_kernel void @s_fsub_f64(double addrspace(1)* %out, double %a, double %b) {
%sub = fsub double %a, %b
store double %sub, double addrspace(1)* %out
ret void
@@ -48,7 +48,7 @@ define void @s_fsub_f64(double addrspace(1)* %out, double %a, double %b) {
; SI-LABEL: {{^}}s_fsub_imm_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], -s\[[0-9]+:[0-9]+\]}}, 4.0
-define void @s_fsub_imm_f64(double addrspace(1)* %out, double %a, double %b) {
+define amdgpu_kernel void @s_fsub_imm_f64(double addrspace(1)* %out, double %a, double %b) {
%sub = fsub double 4.0, %a
store double %sub, double addrspace(1)* %out
ret void
@@ -56,7 +56,7 @@ define void @s_fsub_imm_f64(double addrspace(1)* %out, double %a, double %b) {
; SI-LABEL: {{^}}s_fsub_imm_inv_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\]}}, -4.0
-define void @s_fsub_imm_inv_f64(double addrspace(1)* %out, double %a, double %b) {
+define amdgpu_kernel void @s_fsub_imm_inv_f64(double addrspace(1)* %out, double %a, double %b) {
%sub = fsub double %a, 4.0
store double %sub, double addrspace(1)* %out
ret void
@@ -64,7 +64,7 @@ define void @s_fsub_imm_inv_f64(double addrspace(1)* %out, double %a, double %b)
; SI-LABEL: {{^}}s_fsub_self_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -s\[[0-9]+:[0-9]+\]}}
-define void @s_fsub_self_f64(double addrspace(1)* %out, double %a) {
+define amdgpu_kernel void @s_fsub_self_f64(double addrspace(1)* %out, double %a) {
%sub = fsub double %a, %a
store double %sub, double addrspace(1)* %out
ret void
@@ -73,7 +73,7 @@ define void @s_fsub_self_f64(double addrspace(1)* %out, double %a) {
; SI-LABEL: {{^}}fsub_v2f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
-define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) {
+define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) {
%sub = fsub <2 x double> %a, %b
store <2 x double> %sub, <2 x double> addrspace(1)* %out
ret void
@@ -84,7 +84,7 @@ define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x d
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
-define void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) {
+define amdgpu_kernel void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
%a = load <4 x double>, <4 x double> addrspace(1)* %in
%b = load <4 x double>, <4 x double> addrspace(1)* %b_ptr
@@ -98,7 +98,7 @@ define void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
-define void @s_fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) {
+define amdgpu_kernel void @s_fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) {
%result = fsub <4 x double> %a, %b
store <4 x double> %result, <4 x double> addrspace(1)* %out, align 16
ret void
diff --git a/test/CodeGen/AMDGPU/ftrunc.f64.ll b/test/CodeGen/AMDGPU/ftrunc.f64.ll
index c4138ad79c28..1f72ec65588e 100644
--- a/test/CodeGen/AMDGPU/ftrunc.f64.ll
+++ b/test/CodeGen/AMDGPU/ftrunc.f64.ll
@@ -13,7 +13,7 @@ declare <16 x double> @llvm.trunc.v16f64(<16 x double>) nounwind readnone
; CI: v_trunc_f64
; SI: v_bfe_u32 {{v[0-9]+}}, {{v[0-9]+}}, 20, 11
; SI: s_endpgm
-define void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
%x = load double, double addrspace(1)* %in, align 8
%y = call double @llvm.trunc.f64(double %x) nounwind readnone
store double %y, double addrspace(1)* %out, align 8
@@ -36,7 +36,7 @@ define void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
; SI-DAG: cndmask_b32
; SI-DAG: cndmask_b32
; SI: s_endpgm
-define void @ftrunc_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @ftrunc_f64(double addrspace(1)* %out, double %x) {
%y = call double @llvm.trunc.f64(double %x) nounwind readnone
store double %y, double addrspace(1)* %out
ret void
@@ -45,7 +45,7 @@ define void @ftrunc_f64(double addrspace(1)* %out, double %x) {
; FUNC-LABEL: {{^}}ftrunc_v2f64:
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
-define void @ftrunc_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+define amdgpu_kernel void @ftrunc_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
%y = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) nounwind readnone
store <2 x double> %y, <2 x double> addrspace(1)* %out
ret void
@@ -55,7 +55,7 @@ define void @ftrunc_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; FIXME-CI: v_trunc_f64_e32
; FIXME-CI: v_trunc_f64_e32
; FIXME-CI: v_trunc_f64_e32
-; define void @ftrunc_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; define amdgpu_kernel void @ftrunc_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
; %y = call <3 x double> @llvm.trunc.v3f64(<3 x double> %x) nounwind readnone
; store <3 x double> %y, <3 x double> addrspace(1)* %out
; ret void
@@ -66,7 +66,7 @@ define void @ftrunc_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
-define void @ftrunc_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+define amdgpu_kernel void @ftrunc_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
%y = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone
store <4 x double> %y, <4 x double> addrspace(1)* %out
ret void
@@ -81,7 +81,7 @@ define void @ftrunc_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
-define void @ftrunc_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+define amdgpu_kernel void @ftrunc_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
%y = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) nounwind readnone
store <8 x double> %y, <8 x double> addrspace(1)* %out
ret void
@@ -104,7 +104,7 @@ define void @ftrunc_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
; CI: v_trunc_f64_e32
-define void @ftrunc_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+define amdgpu_kernel void @ftrunc_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
%y = call <16 x double> @llvm.trunc.v16f64(<16 x double> %x) nounwind readnone
store <16 x double> %y, <16 x double> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/ftrunc.ll b/test/CodeGen/AMDGPU/ftrunc.ll
index d0718394e7f1..b5ad01eaeaf0 100644
--- a/test/CodeGen/AMDGPU/ftrunc.ll
+++ b/test/CodeGen/AMDGPU/ftrunc.ll
@@ -12,7 +12,7 @@ declare <16 x float> @llvm.trunc.v16f32(<16 x float>) nounwind readnone
; FUNC-LABEL: {{^}}ftrunc_f32:
; EG: TRUNC
; SI: v_trunc_f32_e32
-define void @ftrunc_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @ftrunc_f32(float addrspace(1)* %out, float %x) {
%y = call float @llvm.trunc.f32(float %x) nounwind readnone
store float %y, float addrspace(1)* %out
ret void
@@ -23,7 +23,7 @@ define void @ftrunc_f32(float addrspace(1)* %out, float %x) {
; EG: TRUNC
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
-define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
+define amdgpu_kernel void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
%y = call <2 x float> @llvm.trunc.v2f32(<2 x float> %x) nounwind readnone
store <2 x float> %y, <2 x float> addrspace(1)* %out
ret void
@@ -36,7 +36,7 @@ define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
; FIXME-SI: v_trunc_f32_e32
; FIXME-SI: v_trunc_f32_e32
; FIXME-SI: v_trunc_f32_e32
-; define void @ftrunc_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
+; define amdgpu_kernel void @ftrunc_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
; %y = call <3 x float> @llvm.trunc.v3f32(<3 x float> %x) nounwind readnone
; store <3 x float> %y, <3 x float> addrspace(1)* %out
; ret void
@@ -51,7 +51,7 @@ define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
-define void @ftrunc_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
+define amdgpu_kernel void @ftrunc_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
%y = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone
store <4 x float> %y, <4 x float> addrspace(1)* %out
ret void
@@ -74,7 +74,7 @@ define void @ftrunc_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
-define void @ftrunc_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
+define amdgpu_kernel void @ftrunc_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
%y = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) nounwind readnone
store <8 x float> %y, <8 x float> addrspace(1)* %out
ret void
@@ -113,7 +113,7 @@ define void @ftrunc_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
; SI: v_trunc_f32_e32
-define void @ftrunc_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
+define amdgpu_kernel void @ftrunc_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
%y = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) nounwind readnone
store <16 x float> %y, <16 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/gep-address-space.ll b/test/CodeGen/AMDGPU/gep-address-space.ll
index f96463613e8e..7fb47e08ea58 100644
--- a/test/CodeGen/AMDGPU/gep-address-space.ll
+++ b/test/CodeGen/AMDGPU/gep-address-space.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=CHECK %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=CHECK %s
-define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
+define amdgpu_kernel void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
; CHECK-LABEL: {{^}}use_gep_address_space:
; CHECK: v_mov_b32_e32 [[PTR:v[0-9]+]], s{{[0-9]+}}
; CHECK: ds_write_b32 [[PTR]], v{{[0-9]+}} offset:64
@@ -17,7 +17,7 @@ define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
; SI: s_or_b32
; CI: s_add_i32
; CHECK: ds_write_b32
-define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind {
+define amdgpu_kernel void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind {
%p = getelementptr [1024 x i32], [1024 x i32] addrspace(3)* %array, i16 0, i16 16384
store i32 99, i32 addrspace(3)* %p
ret void
@@ -39,7 +39,7 @@ define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %arra
; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64
; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64
; CHECK: s_endpgm
-define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
+define amdgpu_kernel void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
%p = getelementptr [1024 x i32], <4 x [1024 x i32] addrspace(3)*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
%p0 = extractelement <4 x i32 addrspace(3)*> %p, i32 0
%p1 = extractelement <4 x i32 addrspace(3)*> %p, i32 1
@@ -60,7 +60,7 @@ define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind
; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64
; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64
; CHECK: s_endpgm
-define void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind {
+define amdgpu_kernel void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind {
%p = getelementptr [1024 x i32], <2 x [1024 x i32] addrspace(3)*> %array, <2 x i16> zeroinitializer, <2 x i16> <i16 16, i16 16>
%p0 = extractelement <2 x i32 addrspace(3)*> %p, i32 0
%p1 = extractelement <2 x i32 addrspace(3)*> %p, i32 1
diff --git a/test/CodeGen/AMDGPU/global-constant.ll b/test/CodeGen/AMDGPU/global-constant.ll
index 5a18d425d506..80acfcca7082 100644
--- a/test/CodeGen/AMDGPU/global-constant.ll
+++ b/test/CodeGen/AMDGPU/global-constant.ll
@@ -26,7 +26,7 @@
; HSA: s_add_u32 s{{[0-9]+}}, s[[PC1_LO]], private2@rel32@lo+4
; HSA: s_addc_u32 s{{[0-9]+}}, s[[PC1_HI]], private2@rel32@hi+4
-define void @private_test(i32 %index, float addrspace(1)* %out) {
+define amdgpu_kernel void @private_test(i32 %index, float addrspace(1)* %out) {
%ptr = getelementptr [4 x float], [4 x float] addrspace(2) * @private1, i32 0, i32 %index
%val = load float, float addrspace(2)* %ptr
store float %val, float addrspace(1)* %out
@@ -40,7 +40,7 @@ define void @private_test(i32 %index, float addrspace(1)* %out) {
; HSA: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}}
; HSA: s_add_u32 s{{[0-9]+}}, s[[PC0_LO]], available_externally@gotpcrel32@lo+4
; HSA: s_addc_u32 s{{[0-9]+}}, s[[PC0_HI]], available_externally@gotpcrel32@hi+4
-define void @available_externally_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @available_externally_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(2)* @available_externally, i32 0, i32 1
%val = load i32, i32 addrspace(2)* %ptr
store i32 %val, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/global-directive.ll b/test/CodeGen/AMDGPU/global-directive.ll
index 450b7d367429..ce89e390eac1 100644
--- a/test/CodeGen/AMDGPU/global-directive.ll
+++ b/test/CodeGen/AMDGPU/global-directive.ll
@@ -5,7 +5,7 @@
; SI: .globl foo
; SI: {{^}}foo:
-define void @foo(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @foo(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
diff --git a/test/CodeGen/AMDGPU/global-extload-i16.ll b/test/CodeGen/AMDGPU/global-extload-i16.ll
index 2c7c02de1673..19e592f50bea 100644
--- a/test/CodeGen/AMDGPU/global-extload-i16.ll
+++ b/test/CodeGen/AMDGPU/global-extload-i16.ll
@@ -7,7 +7,7 @@
; SI: buffer_load_ushort
; SI: buffer_store_dword
; SI: s_endpgm
-define void @zextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
%a = load i16, i16 addrspace(1)* %in
%ext = zext i16 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -18,7 +18,7 @@ define void @zextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; SI: buffer_load_sshort
; SI: buffer_store_dword
; SI: s_endpgm
-define void @sextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
%a = load i16, i16 addrspace(1)* %in
%ext = sext i16 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -28,7 +28,7 @@ define void @sextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i32:
; SI: buffer_load_ushort
; SI: s_endpgm
-define void @zextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = zext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @zextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i
; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i32:
; SI: buffer_load_sshort
; SI: s_endpgm
-define void @sextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = sext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @sextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i
; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i32:
; SI: s_endpgm
-define void @zextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = zext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @zextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i32:
; SI: s_endpgm
-define void @sextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = sext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -65,7 +65,7 @@ define void @sextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i32:
; SI: s_endpgm
-define void @zextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = zext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -74,7 +74,7 @@ define void @zextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i32:
; SI: s_endpgm
-define void @sextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = sext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -83,7 +83,7 @@ define void @sextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i32:
; SI: s_endpgm
-define void @zextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = zext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -92,7 +92,7 @@ define void @zextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i32:
; SI: s_endpgm
-define void @sextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = sext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -101,7 +101,7 @@ define void @sextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i32:
; SI: s_endpgm
-define void @zextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = zext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -110,7 +110,7 @@ define void @zextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i32:
; SI: s_endpgm
-define void @sextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = sext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -119,7 +119,7 @@ define void @sextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i32:
; SI: s_endpgm
-define void @zextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = zext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -128,7 +128,7 @@ define void @zextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i32:
; SI: s_endpgm
-define void @sextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = sext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -137,7 +137,7 @@ define void @sextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i32:
; SI: s_endpgm
-define void @zextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = zext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -146,7 +146,7 @@ define void @zextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i32:
; SI: s_endpgm
-define void @sextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = sext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -157,7 +157,7 @@ define void @sextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; SI-DAG: buffer_load_ushort v[[LO:[0-9]+]],
; SI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
-define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
%a = load i16, i16 addrspace(1)* %in
%ext = zext i16 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -168,7 +168,7 @@ define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; VI: buffer_load_ushort [[LOAD:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0
; VI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
; VI: buffer_store_dwordx2 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0
-define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
%a = load i16, i16 addrspace(1)* %in
%ext = sext i16 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -177,7 +177,7 @@ define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i64:
; SI: s_endpgm
-define void @zextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = zext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -186,7 +186,7 @@ define void @zextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i64:
; SI: s_endpgm
-define void @sextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = sext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -195,7 +195,7 @@ define void @sextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i64:
; SI: s_endpgm
-define void @zextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = zext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -204,7 +204,7 @@ define void @zextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i64:
; SI: s_endpgm
-define void @sextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = sext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -213,7 +213,7 @@ define void @sextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i64:
; SI: s_endpgm
-define void @zextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = zext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -222,7 +222,7 @@ define void @zextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i64:
; SI: s_endpgm
-define void @sextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = sext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -231,7 +231,7 @@ define void @sextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i64:
; SI: s_endpgm
-define void @zextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = zext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -240,7 +240,7 @@ define void @zextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i64:
; SI: s_endpgm
-define void @sextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = sext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -249,7 +249,7 @@ define void @sextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i64:
; SI: s_endpgm
-define void @zextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = zext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -258,7 +258,7 @@ define void @zextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i64:
; SI: s_endpgm
-define void @sextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = sext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -267,7 +267,7 @@ define void @sextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i64:
; SI: s_endpgm
-define void @zextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = zext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -276,7 +276,7 @@ define void @zextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i64:
; SI: s_endpgm
-define void @sextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = sext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -285,7 +285,7 @@ define void @sextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i64:
; SI: s_endpgm
-define void @zextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @zextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = zext <64 x i16> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -294,7 +294,7 @@ define void @zextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64
; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i64:
; SI: s_endpgm
-define void @sextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+define amdgpu_kernel void @sextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = sext <64 x i16> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/global-variable-relocs.ll b/test/CodeGen/AMDGPU/global-variable-relocs.ll
index 00be6e4d5c15..ae6dd54fec6c 100644
--- a/test/CodeGen/AMDGPU/global-variable-relocs.ll
+++ b/test/CodeGen/AMDGPU/global-variable-relocs.ll
@@ -19,7 +19,7 @@
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[ADDR_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[ADDR_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @private_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @private_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @private, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -33,7 +33,7 @@ define void @private_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[ADDR_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[ADDR_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @internal_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @internal_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @internal, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -50,7 +50,7 @@ define void @internal_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @available_externally_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @available_externally_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @available_externally, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -67,7 +67,7 @@ define void @available_externally_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @linkonce_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @linkonce_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @linkonce, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -84,7 +84,7 @@ define void @linkonce_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @weak_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @weak_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @weak, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -101,7 +101,7 @@ define void @weak_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @common_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @common_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @common, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -118,7 +118,7 @@ define void @common_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @extern_weak_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @extern_weak_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @extern_weak, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -135,7 +135,7 @@ define void @extern_weak_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @linkonce_odr_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @linkonce_odr_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @linkonce_odr, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -152,7 +152,7 @@ define void @linkonce_odr_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @weak_odr_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @weak_odr_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @weak_odr, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -169,7 +169,7 @@ define void @weak_odr_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @external_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @external_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @external, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
@@ -186,7 +186,7 @@ define void @external_test(i32 addrspace(1)* %out) {
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[GEP_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[GEP_HI]]
; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @external_w_init_test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @external_w_init_test(i32 addrspace(1)* %out) {
%ptr = getelementptr [256 x i32], [256 x i32] addrspace(1)* @external_w_init, i32 0, i32 1
%val = load i32, i32 addrspace(1)* %ptr
store i32 %val, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/global_atomics.ll b/test/CodeGen/AMDGPU/global_atomics.ll
index 909ceb5546c6..6928bede547e 100644
--- a/test/CodeGen/AMDGPU/global_atomics.ll
+++ b/test/CodeGen/AMDGPU/global_atomics.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}atomic_add_i32_offset:
; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -13,7 +13,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_add_i32_soffset:
; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x8ca0
; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}}
-define void @atomic_add_i32_soffset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_soffset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 9000
%val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -25,7 +25,7 @@ entry:
; SI-DAG: v_mov_b32_e32 v[[PTRHI:[0-9]+]], 0xabcd
; SI: buffer_atomic_add v{{[0-9]+}}, v{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_add
-define void @atomic_add_i32_huge_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_huge_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 47224239175595
@@ -36,7 +36,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_add_i32_ret_offset:
; GCN: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -47,7 +47,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset:
; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -59,7 +59,7 @@ entry:
; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -70,7 +70,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_add_i32:
; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -79,7 +79,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_add_i32_ret:
; GCN: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -89,7 +89,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_add_i32_addr64:
; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -100,7 +100,7 @@ entry:
; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -110,7 +110,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_and_i32_offset:
; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -120,7 +120,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_and_i32_ret_offset:
; GCN: buffer_atomic_and [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -131,7 +131,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset:
; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -143,7 +143,7 @@ entry:
; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -154,7 +154,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_and_i32:
; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -163,7 +163,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_and_i32_ret:
; GCN: buffer_atomic_and [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -173,7 +173,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_and_i32_addr64:
; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -184,7 +184,7 @@ entry:
; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -194,7 +194,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_sub_i32_offset:
; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -204,7 +204,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_sub_i32_ret_offset:
; GCN: buffer_atomic_sub [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -215,7 +215,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_sub_i32_addr64_offset:
; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -227,7 +227,7 @@ entry:
; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -238,7 +238,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_sub_i32:
; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -247,7 +247,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_sub_i32_ret:
; GCN: buffer_atomic_sub [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -257,7 +257,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_sub_i32_addr64:
; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -268,7 +268,7 @@ entry:
; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -278,7 +278,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_max_i32_offset:
; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -288,7 +288,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_max_i32_ret_offset:
; GCN: buffer_atomic_smax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -299,7 +299,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_max_i32_addr64_offset:
; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -311,7 +311,7 @@ entry:
; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -322,7 +322,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_max_i32:
; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -331,7 +331,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_max_i32_ret:
; GCN: buffer_atomic_smax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -341,7 +341,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_max_i32_addr64:
; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -352,7 +352,7 @@ entry:
; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -362,7 +362,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umax_i32_offset:
; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -372,7 +372,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umax_i32_ret_offset:
; GCN: buffer_atomic_umax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -383,7 +383,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umax_i32_addr64_offset:
; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -395,7 +395,7 @@ entry:
; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -406,7 +406,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umax_i32:
; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -415,7 +415,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umax_i32_ret:
; GCN: buffer_atomic_umax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -425,7 +425,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umax_i32_addr64:
; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -436,7 +436,7 @@ entry:
; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -446,7 +446,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_min_i32_offset:
; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -456,7 +456,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_min_i32_ret_offset:
; GCN: buffer_atomic_smin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -467,7 +467,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_min_i32_addr64_offset:
; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -479,7 +479,7 @@ entry:
; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -490,7 +490,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_min_i32:
; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -499,7 +499,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_min_i32_ret:
; GCN: buffer_atomic_smin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -509,7 +509,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_min_i32_addr64:
; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -520,7 +520,7 @@ entry:
; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -530,7 +530,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umin_i32_offset:
; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -540,7 +540,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umin_i32_ret_offset:
; GCN: buffer_atomic_umin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -551,7 +551,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umin_i32_addr64_offset:
; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -563,7 +563,7 @@ entry:
; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -574,7 +574,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umin_i32:
; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -583,7 +583,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umin_i32_ret:
; SI: buffer_atomic_umin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -593,7 +593,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_umin_i32_addr64:
; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -604,7 +604,7 @@ entry:
; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -614,7 +614,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_or_i32_offset:
; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -624,7 +624,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_or_i32_ret_offset:
; GCN: buffer_atomic_or [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -635,7 +635,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_or_i32_addr64_offset:
; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -647,7 +647,7 @@ entry:
; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -658,7 +658,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_or_i32:
; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -667,7 +667,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_or_i32_ret:
; GCN: buffer_atomic_or [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -677,7 +677,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_or_i32_addr64:
; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -688,7 +688,7 @@ entry:
; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -698,7 +698,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xchg_i32_offset:
; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -708,7 +708,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_offset:
; GCN: buffer_atomic_swap [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -720,7 +720,7 @@ entry:
; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
-define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -733,7 +733,7 @@ entry:
; VI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -744,7 +744,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xchg_i32:
; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -753,7 +753,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret:
; GCN: buffer_atomic_swap [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -763,7 +763,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64:
; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -774,7 +774,7 @@ entry:
; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -784,7 +784,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_offset:
; GCN: buffer_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_cmpxchg_i32_offset(i32 addrspace(1)* %out, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_offset(i32 addrspace(1)* %out, i32 %in, i32 %old) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
@@ -794,7 +794,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_ret_offset:
; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword v[[RET]]
-define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
@@ -807,7 +807,7 @@ entry:
; SI: buffer_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -819,7 +819,7 @@ entry:
; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dword v[[RET]]
-define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -831,7 +831,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32:
; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_cmpxchg_i32(i32 addrspace(1)* %out, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32(i32 addrspace(1)* %out, i32 %in, i32 %old) {
entry:
%val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
ret void
@@ -840,7 +840,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_ret:
; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword v[[RET]]
-define void @atomic_cmpxchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) {
entry:
%val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
%extract0 = extractvalue { i32, i1 } %val, 0
@@ -851,7 +851,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64:
; SI: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
@@ -862,7 +862,7 @@ entry:
; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dword v[[RET]]
-define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
@@ -873,7 +873,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
-define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -883,7 +883,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xor_i32_ret_offset:
; GCN: buffer_atomic_xor [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
%val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
@@ -894,7 +894,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset:
; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -906,7 +906,7 @@ entry:
; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -917,7 +917,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xor_i32:
; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
ret void
@@ -926,7 +926,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xor_i32_ret:
; GCN: buffer_atomic_xor [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+define amdgpu_kernel void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
%val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
store i32 %val, i32 addrspace(1)* %out2
@@ -936,7 +936,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_xor_i32_addr64:
; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -947,7 +947,7 @@ entry:
; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
@@ -959,7 +959,7 @@ entry:
; SI: buffer_load_dword [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_load_i32_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_load_i32_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %in, i64 4
%val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
@@ -971,7 +971,7 @@ entry:
; SI: buffer_load_dword [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc
; GCN: buffer_store_dword [[RET]]
-define void @atomic_load_i32(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_load_i32(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
entry:
%val = load atomic i32, i32 addrspace(1)* %in seq_cst, align 4
store i32 %val, i32 addrspace(1)* %out
@@ -982,7 +982,7 @@ entry:
; SI: buffer_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_load_i32_addr64_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i32_addr64_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -995,7 +995,7 @@ entry:
; SI: buffer_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dword [[RET]]
-define void @atomic_load_i32_addr64(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i32_addr64(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index
%val = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 4
@@ -1006,7 +1006,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i32_offset:
; SI: buffer_store_dword {{v[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32_offset(i32 %in, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, i32 addrspace(1)* %out) {
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
@@ -1016,7 +1016,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i32:
; SI: buffer_store_dword {{v[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc{{$}}
; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32(i32 %in, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_store_i32(i32 %in, i32 addrspace(1)* %out) {
entry:
store atomic i32 %in, i32 addrspace(1)* %out seq_cst, align 4
ret void
@@ -1025,7 +1025,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i32_addr64_offset:
; SI: buffer_store_dword {{v[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
@@ -1036,7 +1036,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i32_addr64:
; SI: buffer_store_dword {{v[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
-define void @atomic_store_i32_addr64(i32 %in, i32 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
store atomic i32 %in, i32 addrspace(1)* %ptr seq_cst, align 4
diff --git a/test/CodeGen/AMDGPU/global_atomics_i64.ll b/test/CodeGen/AMDGPU/global_atomics_i64.ll
index f66c6c7b531a..56520b787ead 100644
--- a/test/CodeGen/AMDGPU/global_atomics_i64.ll
+++ b/test/CodeGen/AMDGPU/global_atomics_i64.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}atomic_add_i64_offset:
; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_add_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -13,7 +13,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_ret_offset:
; GCN: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_add_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -24,7 +24,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_addr64_offset:
; CI: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
-define void @atomic_add_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -36,7 +36,7 @@ entry:
; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_add_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -47,7 +47,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64:
; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_add_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -56,7 +56,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_ret:
; GCN: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_add_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_add_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -66,7 +66,7 @@ entry:
; GCN-LABEL: {{^}}atomic_add_i64_addr64:
; CI: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_add_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_add_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -77,7 +77,7 @@ entry:
; CI: buffer_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_add_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_add_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_add_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -87,7 +87,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_offset:
; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_and_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -97,7 +97,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_ret_offset:
; GCN: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_and_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -108,7 +108,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_addr64_offset:
; CI: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_and_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -120,7 +120,7 @@ entry:
; CI: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_and_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -131,7 +131,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64:
; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_and_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -140,7 +140,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_ret:
; GCN: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_and_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_and_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -150,7 +150,7 @@ entry:
; GCN-LABEL: {{^}}atomic_and_i64_addr64:
; CI: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_and_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_and_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -161,7 +161,7 @@ entry:
; CI: buffer_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_and_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_and_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_and_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -171,7 +171,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_offset:
; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_sub_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -181,7 +181,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_ret_offset:
; GCN: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_sub_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -192,7 +192,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_addr64_offset:
; CI: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_sub_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -204,7 +204,7 @@ entry:
; CI: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_sub_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -215,7 +215,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64:
; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_sub_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -224,7 +224,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_ret:
; GCN: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_sub_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_sub_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -234,7 +234,7 @@ entry:
; GCN-LABEL: {{^}}atomic_sub_i64_addr64:
; CI: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_sub_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_sub_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -245,7 +245,7 @@ entry:
; CI: buffer_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_sub_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_sub_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_sub_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -255,7 +255,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_offset:
; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -265,7 +265,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_ret_offset:
; GCN: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -276,7 +276,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_addr64_offset:
; CI: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_max_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -288,7 +288,7 @@ entry:
; CI: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_max_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -299,7 +299,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64:
; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -308,7 +308,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_ret:
; GCN: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -318,7 +318,7 @@ entry:
; GCN-LABEL: {{^}}atomic_max_i64_addr64:
; CI: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_smax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -329,7 +329,7 @@ entry:
; CI: buffer_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_smax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -339,7 +339,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_offset:
; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -349,7 +349,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_ret_offset:
; GCN: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -360,7 +360,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_addr64_offset:
; CI: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umax_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -372,7 +372,7 @@ entry:
; CI: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -383,7 +383,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64:
; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -392,7 +392,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_ret:
; GCN: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -402,7 +402,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umax_i64_addr64:
; CI: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_umax_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -413,7 +413,7 @@ entry:
; CI: buffer_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_umax_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -423,7 +423,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_offset:
; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -433,7 +433,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_ret_offset:
; GCN: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -444,7 +444,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_addr64_offset:
; CI: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_min_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -456,7 +456,7 @@ entry:
; CI: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_min_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -467,7 +467,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64:
; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -476,7 +476,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_ret:
; GCN: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -486,7 +486,7 @@ entry:
; GCN-LABEL: {{^}}atomic_min_i64_addr64:
; CI: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_smin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -497,7 +497,7 @@ entry:
; CI: buffer_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_smin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -507,7 +507,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_offset:
; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -517,7 +517,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_ret_offset:
; GCN: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -528,7 +528,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_addr64_offset:
; CI: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umin_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -540,7 +540,7 @@ entry:
; CI: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -551,7 +551,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64:
; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -560,7 +560,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_ret:
; CI: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -570,7 +570,7 @@ entry:
; GCN-LABEL: {{^}}atomic_umin_i64_addr64:
; CI: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_umin_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -581,7 +581,7 @@ entry:
; CI: buffer_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_umin_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -591,7 +591,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_offset:
; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_or_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -601,7 +601,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_ret_offset:
; GCN: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_or_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -612,7 +612,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_addr64_offset:
; CI: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_or_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -624,7 +624,7 @@ entry:
; CI: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_or_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -635,7 +635,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64:
; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_or_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -644,7 +644,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_ret:
; GCN: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_or_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_or_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -654,7 +654,7 @@ entry:
; GCN-LABEL: {{^}}atomic_or_i64_addr64:
; CI: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_or_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_or_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -665,7 +665,7 @@ entry:
; CI: buffer_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_or_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_or_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_or_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -675,7 +675,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_offset:
; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_xchg_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -685,7 +685,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_ret_offset:
; GCN: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -696,7 +696,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_addr64_offset:
; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
-define void @atomic_xchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -708,7 +708,7 @@ entry:
; CI: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -719,7 +719,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64:
; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_xchg_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -728,7 +728,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_ret:
; GCN: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -738,7 +738,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xchg_i64_addr64:
; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -749,7 +749,7 @@ entry:
; CI: buffer_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_swap_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -759,7 +759,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_offset:
; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_xor_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -769,7 +769,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_ret_offset:
; GCN: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xor_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
@@ -780,7 +780,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_addr64_offset:
; CI: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xor_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -792,7 +792,7 @@ entry:
; CI: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xor_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -803,7 +803,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64:
; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_xor_i64(i64 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
ret void
@@ -812,7 +812,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_ret:
; GCN: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xor_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+define amdgpu_kernel void @atomic_xor_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
@@ -822,7 +822,7 @@ entry:
; GCN-LABEL: {{^}}atomic_xor_i64_addr64:
; CI: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_xor_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]$}}
-define void @atomic_xor_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -833,7 +833,7 @@ entry:
; CI: buffer_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_xor_x2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}} glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_xor_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+define amdgpu_kernel void @atomic_xor_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
@@ -851,7 +851,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_offset:
; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
-define void @atomic_cmpxchg_i64_offset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_offset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
@@ -861,7 +861,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_soffset:
; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x11940
; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}}
-define void @atomic_cmpxchg_i64_soffset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 9000
%val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
@@ -871,7 +871,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_offset:
; GCN: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
%val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
@@ -884,7 +884,7 @@ entry:
; CI: buffer_atomic_cmpswap_x2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
; VI: flat_atomic_cmpswap_x2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -896,7 +896,7 @@ entry:
; CI: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -908,7 +908,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i64:
; GCN: buffer_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
-define void @atomic_cmpxchg_i64(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64(i64 addrspace(1)* %out, i64 %in, i64 %old) {
entry:
%val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
ret void
@@ -917,7 +917,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret:
; GCN: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
entry:
%val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
%extract0 = extractvalue { i64, i1 } %val, 0
@@ -928,7 +928,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64:
; CI: buffer_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
; VI: flat_atomic_cmpswap_x2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
-define void @atomic_cmpxchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
@@ -939,7 +939,7 @@ entry:
; CI: buffer_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_atomic_cmpswap_x2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[RET]]:
-define void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
+define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
@@ -952,7 +952,7 @@ entry:
; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %in, i64 4
%val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
@@ -964,7 +964,7 @@ entry:
; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
entry:
%val = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8
store i64 %val, i64 addrspace(1)* %out
@@ -975,7 +975,7 @@ entry:
; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -988,7 +988,7 @@ entry:
; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
%val = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8
@@ -999,7 +999,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i64_offset:
; CI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
@@ -1009,7 +1009,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i64:
; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc
-define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) {
+define amdgpu_kernel void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) {
entry:
store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8
ret void
@@ -1018,7 +1018,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset:
; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
@@ -1029,7 +1029,7 @@ entry:
; FUNC-LABEL: {{^}}atomic_store_i64_addr64:
; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) {
+define amdgpu_kernel void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8
diff --git a/test/CodeGen/AMDGPU/gv-const-addrspace.ll b/test/CodeGen/AMDGPU/gv-const-addrspace.ll
index d07843e9dd27..0903542bac4f 100644
--- a/test/CodeGen/AMDGPU/gv-const-addrspace.ll
+++ b/test/CodeGen/AMDGPU/gv-const-addrspace.ll
@@ -15,7 +15,7 @@
; EG: @float_gv
; EG-NOT: MOVA_INT
; EG-NOT: MOV
-define void @float(float addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
@@ -33,7 +33,7 @@ entry:
; EG: @i32_gv
; EG-NOT: MOVA_INT
; EG-NOT: MOV
-define void @i32(i32 addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @i32(i32 addrspace(1)* %out, i32 %index) {
entry:
%0 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(2)* @i32_gv, i32 0, i32 %index
%1 = load i32, i32 addrspace(2)* %0
@@ -53,7 +53,7 @@ entry:
; EG: @struct_foo_gv
; EG-NOT: MOVA_INT
; EG-NOT: MOV
-define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
%gep = getelementptr inbounds [1 x %struct.foo], [1 x %struct.foo] addrspace(2)* @struct_foo_gv, i32 0, i32 0, i32 1, i32 %index
%load = load i32, i32 addrspace(2)* %gep, align 4
store i32 %load, i32 addrspace(1)* %out, align 4
@@ -72,7 +72,7 @@ define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
; EG: @array_v1_gv
; EG-NOT: MOVA_INT
; EG-NOT: MOV
-define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
%gep = getelementptr inbounds [4 x <1 x i32>], [4 x <1 x i32>] addrspace(2)* @array_v1_gv, i32 0, i32 %index
%load = load <1 x i32>, <1 x i32> addrspace(2)* %gep, align 4
store <1 x i32> %load, <1 x i32> addrspace(1)* %out, align 4
@@ -84,7 +84,7 @@ define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
; EG: VTX_READ_32
; EG: @float_gv
; EG-NOT: MOVA_INT
-define void @gv_addressing_in_branch(float addrspace(1)* %out, i32 %index, i32 %a) {
+define amdgpu_kernel void @gv_addressing_in_branch(float addrspace(1)* %out, i32 %index, i32 %a) {
entry:
%0 = icmp eq i32 0, %a
br i1 %0, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/gv-offset-folding.ll b/test/CodeGen/AMDGPU/gv-offset-folding.ll
index af5ee8e66750..e641d7266a79 100644
--- a/test/CodeGen/AMDGPU/gv-offset-folding.ll
+++ b/test/CodeGen/AMDGPU/gv-offset-folding.ll
@@ -12,8 +12,8 @@
; for local memory globals.
; CHECK-LABEL: lds_no_offset:
-; CHECK ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:4
-define void @lds_no_offset() {
+; CHECK: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:4
+define amdgpu_kernel void @lds_no_offset() {
entry:
%ptr = getelementptr [4 x i32], [4 x i32] addrspace(3)* @lds, i32 0, i32 1
store i32 0, i32 addrspace(3)* %ptr
diff --git a/test/CodeGen/AMDGPU/half.ll b/test/CodeGen/AMDGPU/half.ll
index aa22e83fade2..41ae5a4a0b00 100644
--- a/test/CodeGen/AMDGPU/half.ll
+++ b/test/CodeGen/AMDGPU/half.ll
@@ -8,7 +8,7 @@
; SI: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[ARG]]
; VI: v_trunc_f16_e32 [[CVT:v[0-9]+]], [[ARG]]
; GCN: buffer_store_short [[CVT]]
-define void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
+define amdgpu_kernel void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
store half %arg, half addrspace(1)* %out
ret void
}
@@ -20,7 +20,7 @@ define void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
; GCN: v_or_b32_e32 [[PACKED:v[0-9]+]], [[HI]], [[V0]]
; GCN: buffer_store_dword [[PACKED]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN: s_endpgm
-define void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
+define amdgpu_kernel void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
store <2 x half> %arg, <2 x half> addrspace(1)* %out
ret void
}
@@ -34,7 +34,7 @@ define void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
; GCN-DAG: buffer_store_short
; GCN-NOT: buffer_store
; GCN: s_endpgm
-define void @load_v3f16_arg(<3 x half> addrspace(1)* %out, <3 x half> %arg) #0 {
+define amdgpu_kernel void @load_v3f16_arg(<3 x half> addrspace(1)* %out, <3 x half> %arg) #0 {
store <3 x half> %arg, <3 x half> addrspace(1)* %out
ret void
}
@@ -46,33 +46,33 @@ define void @load_v3f16_arg(<3 x half> addrspace(1)* %out, <3 x half> %arg) #0 {
; GCN: buffer_load_ushort
; GCN: buffer_store_dwordx2
; GCN: s_endpgm
-define void @load_v4f16_arg(<4 x half> addrspace(1)* %out, <4 x half> %arg) #0 {
+define amdgpu_kernel void @load_v4f16_arg(<4 x half> addrspace(1)* %out, <4 x half> %arg) #0 {
store <4 x half> %arg, <4 x half> addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}load_v8f16_arg:
-define void @load_v8f16_arg(<8 x half> addrspace(1)* %out, <8 x half> %arg) #0 {
+define amdgpu_kernel void @load_v8f16_arg(<8 x half> addrspace(1)* %out, <8 x half> %arg) #0 {
store <8 x half> %arg, <8 x half> addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}extload_v2f16_arg:
-define void @extload_v2f16_arg(<2 x float> addrspace(1)* %out, <2 x half> %in) #0 {
+define amdgpu_kernel void @extload_v2f16_arg(<2 x float> addrspace(1)* %out, <2 x half> %in) #0 {
%fpext = fpext <2 x half> %in to <2 x float>
store <2 x float> %fpext, <2 x float> addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}extload_f16_to_f32_arg:
-define void @extload_f16_to_f32_arg(float addrspace(1)* %out, half %arg) #0 {
+define amdgpu_kernel void @extload_f16_to_f32_arg(float addrspace(1)* %out, half %arg) #0 {
%ext = fpext half %arg to float
store float %ext, float addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}extload_v2f16_to_v2f32_arg:
-define void @extload_v2f16_to_v2f32_arg(<2 x float> addrspace(1)* %out, <2 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v2f16_to_v2f32_arg(<2 x float> addrspace(1)* %out, <2 x half> %arg) #0 {
%ext = fpext <2 x half> %arg to <2 x float>
store <2 x float> %ext, <2 x float> addrspace(1)* %out
ret void
@@ -90,14 +90,14 @@ define void @extload_v2f16_to_v2f32_arg(<2 x float> addrspace(1)* %out, <2 x hal
; GCN-DAG: buffer_store_dword
; GCN-DAG: buffer_store_dwordx2
; GCN: s_endpgm
-define void @extload_v3f16_to_v3f32_arg(<3 x float> addrspace(1)* %out, <3 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v3f16_to_v3f32_arg(<3 x float> addrspace(1)* %out, <3 x half> %arg) #0 {
%ext = fpext <3 x half> %arg to <3 x float>
store <3 x float> %ext, <3 x float> addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}extload_v4f16_to_v4f32_arg:
-define void @extload_v4f16_to_v4f32_arg(<4 x float> addrspace(1)* %out, <4 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v4f16_to_v4f32_arg(<4 x float> addrspace(1)* %out, <4 x half> %arg) #0 {
%ext = fpext <4 x half> %arg to <4 x float>
store <4 x float> %ext, <4 x float> addrspace(1)* %out
ret void
@@ -124,7 +124,7 @@ define void @extload_v4f16_to_v4f32_arg(<4 x float> addrspace(1)* %out, <4 x hal
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x half> %arg) #0 {
%ext = fpext <8 x half> %arg to <8 x float>
store <8 x float> %ext, <8 x float> addrspace(1)* %out
ret void
@@ -138,7 +138,7 @@ define void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x hal
; VI: v_cvt_f32_f16_e32 v[[VARG_F32:[0-9]+]], v[[VARG]]
; VI: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], v[[VARG_F32]]
; GCN: buffer_store_dwordx2 [[RESULT]]
-define void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
+define amdgpu_kernel void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
%ext = fpext half %arg to double
store double %ext, double addrspace(1)* %out
ret void
@@ -152,7 +152,7 @@ define void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
; GCN-DAG: v_cvt_f64_f32_e32
; GCN-DAG: v_cvt_f64_f32_e32
; GCN: s_endpgm
-define void @extload_v2f16_to_v2f64_arg(<2 x double> addrspace(1)* %out, <2 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v2f16_to_v2f64_arg(<2 x double> addrspace(1)* %out, <2 x half> %arg) #0 {
%ext = fpext <2 x half> %arg to <2 x double>
store <2 x double> %ext, <2 x double> addrspace(1)* %out
ret void
@@ -169,7 +169,7 @@ define void @extload_v2f16_to_v2f64_arg(<2 x double> addrspace(1)* %out, <2 x ha
; GCN-DAG: v_cvt_f64_f32_e32
; GCN-DAG: v_cvt_f64_f32_e32
; GCN: s_endpgm
-define void @extload_v3f16_to_v3f64_arg(<3 x double> addrspace(1)* %out, <3 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v3f16_to_v3f64_arg(<3 x double> addrspace(1)* %out, <3 x half> %arg) #0 {
%ext = fpext <3 x half> %arg to <3 x double>
store <3 x double> %ext, <3 x double> addrspace(1)* %out
ret void
@@ -189,7 +189,7 @@ define void @extload_v3f16_to_v3f64_arg(<3 x double> addrspace(1)* %out, <3 x ha
; GCN-DAG: v_cvt_f64_f32_e32
; GCN-DAG: v_cvt_f64_f32_e32
; GCN: s_endpgm
-define void @extload_v4f16_to_v4f64_arg(<4 x double> addrspace(1)* %out, <4 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v4f16_to_v4f64_arg(<4 x double> addrspace(1)* %out, <4 x half> %arg) #0 {
%ext = fpext <4 x half> %arg to <4 x double>
store <4 x double> %ext, <4 x double> addrspace(1)* %out
ret void
@@ -227,7 +227,7 @@ define void @extload_v4f16_to_v4f64_arg(<4 x double> addrspace(1)* %out, <4 x ha
; GCN-DAG: v_cvt_f64_f32_e32
; GCN: s_endpgm
-define void @extload_v8f16_to_v8f64_arg(<8 x double> addrspace(1)* %out, <8 x half> %arg) #0 {
+define amdgpu_kernel void @extload_v8f16_to_v8f64_arg(<8 x double> addrspace(1)* %out, <8 x half> %arg) #0 {
%ext = fpext <8 x half> %arg to <8 x double>
store <8 x double> %ext, <8 x double> addrspace(1)* %out
ret void
@@ -236,7 +236,7 @@ define void @extload_v8f16_to_v8f64_arg(<8 x double> addrspace(1)* %out, <8 x ha
; GCN-LABEL: {{^}}global_load_store_f16:
; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
; GCN: buffer_store_short [[TMP]]
-define void @global_load_store_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_store_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
%val = load half, half addrspace(1)* %in
store half %val, half addrspace(1)* %out
ret void
@@ -245,7 +245,7 @@ define void @global_load_store_f16(half addrspace(1)* %out, half addrspace(1)* %
; GCN-LABEL: {{^}}global_load_store_v2f16:
; GCN: buffer_load_dword [[TMP:v[0-9]+]]
; GCN: buffer_store_dword [[TMP]]
-define void @global_load_store_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_store_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
%val = load <2 x half>, <2 x half> addrspace(1)* %in
store <2 x half> %val, <2 x half> addrspace(1)* %out
ret void
@@ -254,7 +254,7 @@ define void @global_load_store_v2f16(<2 x half> addrspace(1)* %out, <2 x half> a
; GCN-LABEL: {{^}}global_load_store_v4f16:
; GCN: buffer_load_dwordx2 [[TMP:v\[[0-9]+:[0-9]+\]]]
; GCN: buffer_store_dwordx2 [[TMP]]
-define void @global_load_store_v4f16(<4 x half> addrspace(1)* %in, <4 x half> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @global_load_store_v4f16(<4 x half> addrspace(1)* %in, <4 x half> addrspace(1)* %out) #0 {
%val = load <4 x half>, <4 x half> addrspace(1)* %in
store <4 x half> %val, <4 x half> addrspace(1)* %out
ret void
@@ -264,7 +264,7 @@ define void @global_load_store_v4f16(<4 x half> addrspace(1)* %in, <4 x half> ad
; GCN: buffer_load_dwordx4 [[TMP:v\[[0-9]+:[0-9]+\]]]
; GCN: buffer_store_dwordx4 [[TMP:v\[[0-9]+:[0-9]+\]]]
; GCN: s_endpgm
-define void @global_load_store_v8f16(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_store_v8f16(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
%val = load <8 x half>, <8 x half> addrspace(1)* %in
store <8 x half> %val, <8 x half> addrspace(1)* %out
ret void
@@ -274,7 +274,7 @@ define void @global_load_store_v8f16(<8 x half> addrspace(1)* %out, <8 x half> a
; GCN: buffer_load_ushort [[LOAD:v[0-9]+]]
; GCN: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[LOAD]]
; GCN: buffer_store_dword [[CVT]]
-define void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %in) #0 {
%val = load half, half addrspace(1)* %in
%cvt = fpext half %val to float
store float %cvt, float addrspace(1)* %out
@@ -283,13 +283,13 @@ define void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(
; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f32:
; GCN: buffer_load_dword [[LOAD:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; VI: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
; GCN: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
-; SI: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
-; GCN: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
+; SI: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
+; SI: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
+; VI: v_cvt_f32_f16_sdwa v[[CVT1:[0-9]+]], [[LOAD]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GCN: buffer_store_dwordx2 v{{\[}}[[CVT0]]:[[CVT1]]{{\]}}
; GCN: s_endpgm
-define void @global_extload_v2f16_to_v2f32(<2 x float> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v2f16_to_v2f32(<2 x float> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
%val = load <2 x half>, <2 x half> addrspace(1)* %in
%cvt = fpext <2 x half> %val to <2 x float>
store <2 x float> %cvt, <2 x float> addrspace(1)* %out
@@ -297,7 +297,7 @@ define void @global_extload_v2f16_to_v2f32(<2 x float> addrspace(1)* %out, <2 x
}
; GCN-LABEL: {{^}}global_extload_v3f16_to_v3f32:
-define void @global_extload_v3f16_to_v3f32(<3 x float> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v3f16_to_v3f32(<3 x float> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
%val = load <3 x half>, <3 x half> addrspace(1)* %in
%cvt = fpext <3 x half> %val to <3 x float>
store <3 x float> %cvt, <3 x float> addrspace(1)* %out
@@ -305,7 +305,7 @@ define void @global_extload_v3f16_to_v3f32(<3 x float> addrspace(1)* %out, <3 x
}
; GCN-LABEL: {{^}}global_extload_v4f16_to_v4f32:
-define void @global_extload_v4f16_to_v4f32(<4 x float> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v4f16_to_v4f32(<4 x float> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
%val = load <4 x half>, <4 x half> addrspace(1)* %in
%cvt = fpext <4 x half> %val to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out
@@ -313,7 +313,7 @@ define void @global_extload_v4f16_to_v4f32(<4 x float> addrspace(1)* %out, <4 x
}
; GCN-LABEL: {{^}}global_extload_v8f16_to_v8f32:
-define void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
%val = load <8 x half>, <8 x half> addrspace(1)* %in
%cvt = fpext <8 x half> %val to <8 x float>
store <8 x float> %cvt, <8 x float> addrspace(1)* %out
@@ -324,22 +324,26 @@ define void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx4
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
-; GCN: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+
+; VI: v_cvt_f32_f16_e32
+; VI: v_cvt_f32_f16_sdwa
+; ...
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
@@ -347,7 +351,7 @@ define void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
-define void @global_extload_v16f16_to_v16f32(<16 x float> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v16f16_to_v16f32(<16 x float> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
%val = load <16 x half>, <16 x half> addrspace(1)* %in
%cvt = fpext <16 x half> %val to <16 x float>
store <16 x float> %cvt, <16 x float> addrspace(1)* %out
@@ -359,7 +363,7 @@ define void @global_extload_v16f16_to_v16f32(<16 x float> addrspace(1)* %out, <1
; GCN: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[LOAD]]
; GCN: v_cvt_f64_f32_e32 [[CVT1:v\[[0-9]+:[0-9]+\]]], [[CVT0]]
; GCN: buffer_store_dwordx2 [[CVT1]]
-define void @global_extload_f16_to_f64(double addrspace(1)* %out, half addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_f16_to_f64(double addrspace(1)* %out, half addrspace(1)* %in) #0 {
%val = load half, half addrspace(1)* %in
%cvt = fpext half %val to double
store double %cvt, double addrspace(1)* %out
@@ -368,14 +372,21 @@ define void @global_extload_f16_to_f64(double addrspace(1)* %out, half addrspace
; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f64:
; GCN-DAG: buffer_load_dword [[LOAD:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; GCN-DAG: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
-; GCN-DAG: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
-; GCN-DAG: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
-; GCN-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT2_LO:[0-9]+]]:[[CVT2_HI:[0-9]+]]{{\]}}, v[[CVT0]]
-; GCN-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT3_LO:[0-9]+]]:[[CVT3_HI:[0-9]+]]{{\]}}, v[[CVT1]]
+
+; SI-DAG: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
+; SI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT2_LO:[0-9]+]]:[[CVT2_HI:[0-9]+]]{{\]}}, v[[CVT0]]
+; SI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT3_LO:[0-9]+]]:[[CVT3_HI:[0-9]+]]{{\]}}, v[[CVT1]]
+
+; VI-DAG: v_cvt_f32_f16_sdwa v[[CVT0:[0-9]+]], [[LOAD]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-DAG: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[LOAD]]
+; VI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT3_LO:[0-9]+]]:[[CVT3_HI:[0-9]+]]{{\]}}, v[[CVT0]]
+; VI-DAG: v_cvt_f64_f32_e32 v{{\[}}[[CVT2_LO:[0-9]+]]:[[CVT2_HI:[0-9]+]]{{\]}}, v[[CVT1]]
+
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[CVT2_LO]]:[[CVT3_HI]]{{\]}}
; GCN: s_endpgm
-define void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
%val = load <2 x half>, <2 x half> addrspace(1)* %in
%cvt = fpext <2 x half> %val to <2 x double>
store <2 x double> %cvt, <2 x double> addrspace(1)* %out
@@ -392,28 +403,27 @@ define void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x
; XSI-NOT: v_cvt_f32_f16
; XVI: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
-; XVI-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
-; XVI: v_cvt_f32_f16_e32
; XVI: v_cvt_f32_f16_e32
; XVI: v_cvt_f32_f16_e32
+; XVI: v_cvt_f32_f16_sdwa
; XVI-NOT: v_cvt_f32_f16
; GCN: buffer_load_dwordx2 v{{\[}}[[IN_LO:[0-9]+]]:[[IN_HI:[0-9]+]]
-; VI: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
-; GCN: v_cvt_f32_f16_e32 [[Z32:v[0-9]+]], v[[IN_HI]]
-; GCN: v_cvt_f32_f16_e32 [[X32:v[0-9]+]], v[[IN_LO]]
-; SI: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
-; GCN: v_cvt_f32_f16_e32 [[Y32:v[0-9]+]], [[Y16]]
-
-; GCN: v_cvt_f64_f32_e32 [[Z:v\[[0-9]+:[0-9]+\]]], [[Z32]]
-; GCN: v_cvt_f64_f32_e32 v{{\[}}[[XLO:[0-9]+]]:{{[0-9]+}}], [[X32]]
-; GCN: v_cvt_f64_f32_e32 v[{{[0-9]+}}:[[YHI:[0-9]+]]{{\]}}, [[Y32]]
+; GCN-DAG: v_cvt_f32_f16_e32 [[Z32:v[0-9]+]], v[[IN_HI]]
+; GCN-DAG: v_cvt_f32_f16_e32 [[X32:v[0-9]+]], v[[IN_LO]]
+; SI: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
+; SI-DAG: v_cvt_f32_f16_e32 [[Y32:v[0-9]+]], [[Y16]]
+; VI-DAG: v_cvt_f32_f16_sdwa [[Y32:v[0-9]+]], v[[IN_LO]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+
+; GCN-DAG: v_cvt_f64_f32_e32 [[Z:v\[[0-9]+:[0-9]+\]]], [[Z32]]
+; GCN-DAG: v_cvt_f64_f32_e32 v{{\[}}[[XLO:[0-9]+]]:{{[0-9]+}}], [[X32]]
+; GCN-DAG: v_cvt_f64_f32_e32 v[{{[0-9]+}}:[[YHI:[0-9]+]]{{\]}}, [[Y32]]
; GCN-NOT: v_cvt_f64_f32_e32
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[XLO]]:[[YHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_dwordx2 [[Z]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16
; GCN: s_endpgm
-define void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
%val = load <3 x half>, <3 x half> addrspace(1)* %in
%cvt = fpext <3 x half> %val to <3 x double>
store <3 x double> %cvt, <3 x double> addrspace(1)* %out
@@ -421,7 +431,7 @@ define void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x
}
; GCN-LABEL: {{^}}global_extload_v4f16_to_v4f64:
-define void @global_extload_v4f16_to_v4f64(<4 x double> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v4f16_to_v4f64(<4 x double> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
%val = load <4 x half>, <4 x half> addrspace(1)* %in
%cvt = fpext <4 x half> %val to <4 x double>
store <4 x double> %cvt, <4 x double> addrspace(1)* %out
@@ -429,7 +439,7 @@ define void @global_extload_v4f16_to_v4f64(<4 x double> addrspace(1)* %out, <4 x
}
; GCN-LABEL: {{^}}global_extload_v8f16_to_v8f64:
-define void @global_extload_v8f16_to_v8f64(<8 x double> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v8f16_to_v8f64(<8 x double> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
%val = load <8 x half>, <8 x half> addrspace(1)* %in
%cvt = fpext <8 x half> %val to <8 x double>
store <8 x double> %cvt, <8 x double> addrspace(1)* %out
@@ -437,7 +447,7 @@ define void @global_extload_v8f16_to_v8f64(<8 x double> addrspace(1)* %out, <8 x
}
; GCN-LABEL: {{^}}global_extload_v16f16_to_v16f64:
-define void @global_extload_v16f16_to_v16f64(<16 x double> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_extload_v16f16_to_v16f64(<16 x double> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
%val = load <16 x half>, <16 x half> addrspace(1)* %in
%cvt = fpext <16 x half> %val to <16 x double>
store <16 x double> %cvt, <16 x double> addrspace(1)* %out
@@ -448,7 +458,7 @@ define void @global_extload_v16f16_to_v16f64(<16 x double> addrspace(1)* %out, <
; GCN: buffer_load_dword [[LOAD:v[0-9]+]]
; GCN: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[LOAD]]
; GCN: buffer_store_short [[CVT]]
-define void @global_truncstore_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %in) #0 {
%val = load float, float addrspace(1)* %in
%cvt = fptrunc float %val to half
store half %cvt, half addrspace(1)* %out
@@ -458,12 +468,17 @@ define void @global_truncstore_f32_to_f16(half addrspace(1)* %out, float addrspa
; GCN-LABEL: {{^}}global_truncstore_v2f32_to_v2f16:
; GCN: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
; GCN-DAG: v_cvt_f16_f32_e32 [[CVT0:v[0-9]+]], v[[LO]]
-; GCN-DAG: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], v[[HI]]
-; GCN-DAG: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[CVT1]]
-; GCN-DAG: v_or_b32_e32 [[PACKED:v[0-9]+]], [[SHL]], [[CVT0]]
+
+; SI-DAG: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], v[[HI]]
+; SI-DAG: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[CVT1]]
+; SI: v_or_b32_e32 [[PACKED:v[0-9]+]], [[SHL]], [[CVT0]]
+
+; VI-DAG: v_cvt_f16_f32_sdwa [[CVT1:v[0-9]+]], v[[HI]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI: v_or_b32_e32 [[PACKED:v[0-9]+]], [[CVT1]], [[CVT0]]
+
; GCN-DAG: buffer_store_dword [[PACKED]]
; GCN: s_endpgm
-define void @global_truncstore_v2f32_to_v2f16(<2 x half> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v2f32_to_v2f16(<2 x half> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
%val = load <2 x float>, <2 x float> addrspace(1)* %in
%cvt = fptrunc <2 x float> %val to <2 x half>
store <2 x half> %cvt, <2 x half> addrspace(1)* %out
@@ -472,14 +487,14 @@ define void @global_truncstore_v2f32_to_v2f16(<2 x half> addrspace(1)* %out, <2
; GCN-LABEL: {{^}}global_truncstore_v3f32_to_v3f16:
; GCN: buffer_load_dwordx4
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN-NOT: v_cvt_f16_f32_e32
+; GCN-DAG: v_cvt_f16_f32_e32
+; SI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_sdwa
+; GCN-DAG: v_cvt_f16_f32_e32
; GCN: buffer_store_short
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @global_truncstore_v3f32_to_v3f16(<3 x half> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v3f32_to_v3f16(<3 x half> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
%val = load <3 x float>, <3 x float> addrspace(1)* %in
%cvt = fptrunc <3 x float> %val to <3 x half>
store <3 x half> %cvt, <3 x half> addrspace(1)* %out
@@ -488,13 +503,15 @@ define void @global_truncstore_v3f32_to_v3f16(<3 x half> addrspace(1)* %out, <3
; GCN-LABEL: {{^}}global_truncstore_v4f32_to_v4f16:
; GCN: buffer_load_dwordx4
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
+; GCN-DAG: v_cvt_f16_f32_e32
+; SI-DAG: v_cvt_f16_f32_e32
+; SI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI-DAG: v_cvt_f16_f32_sdwa
+; GCN-DAG: v_cvt_f16_f32_e32
; GCN: buffer_store_dwordx2
; GCN: s_endpgm
-define void @global_truncstore_v4f32_to_v4f16(<4 x half> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v4f32_to_v4f16(<4 x half> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
%val = load <4 x float>, <4 x float> addrspace(1)* %in
%cvt = fptrunc <4 x float> %val to <4 x half>
store <4 x half> %cvt, <4 x half> addrspace(1)* %out
@@ -504,17 +521,25 @@ define void @global_truncstore_v4f32_to_v4f16(<4 x half> addrspace(1)* %out, <4
; GCN-LABEL: {{^}}global_truncstore_v8f32_to_v8f16:
; GCN: buffer_load_dwordx4
; GCN: buffer_load_dwordx4
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI-DAG: v_cvt_f16_f32_sdwa
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
-define void @global_truncstore_v8f32_to_v8f16(<8 x half> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v8f32_to_v8f16(<8 x half> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
%val = load <8 x float>, <8 x float> addrspace(1)* %in
%cvt = fptrunc <8 x float> %val to <8 x half>
store <8 x half> %cvt, <8 x half> addrspace(1)* %out
@@ -545,7 +570,7 @@ define void @global_truncstore_v8f32_to_v8f16(<8 x half> addrspace(1)* %out, <8
; GCN-DAG: buffer_store_dwordx4
; GCN-DAG: buffer_store_dwordx4
; GCN: s_endpgm
-define void @global_truncstore_v16f32_to_v16f16(<16 x half> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v16f32_to_v16f16(<16 x half> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
%val = load <16 x float>, <16 x float> addrspace(1)* %in
%cvt = fptrunc <16 x float> %val to <16 x half>
store <16 x half> %cvt, <16 x half> addrspace(1)* %out
@@ -560,7 +585,7 @@ define void @global_truncstore_v16f32_to_v16f16(<16 x half> addrspace(1)* %out,
; SI-DAG: v_cvt_f32_f16_e32 v{{[0-9]+}},
; SI: v_add_f32
; GCN: s_endpgm
-define void @fadd_f16(half addrspace(1)* %out, half %a, half %b) #0 {
+define amdgpu_kernel void @fadd_f16(half addrspace(1)* %out, half %a, half %b) #0 {
%add = fadd half %a, %b
store half %add, half addrspace(1)* %out, align 4
ret void
@@ -570,7 +595,7 @@ define void @fadd_f16(half addrspace(1)* %out, half %a, half %b) #0 {
; SI: v_add_f32
; SI: v_add_f32
; GCN: s_endpgm
-define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %a, <2 x half> %b) #0 {
+define amdgpu_kernel void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %a, <2 x half> %b) #0 {
%add = fadd <2 x half> %a, %b
store <2 x half> %add, <2 x half> addrspace(1)* %out, align 8
ret void
@@ -582,7 +607,7 @@ define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %a, <2 x half>
; SI: v_add_f32
; SI: v_add_f32
; GCN: s_endpgm
-define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x half>, <4 x half> addrspace(1)* %in, i32 1
%a = load <4 x half>, <4 x half> addrspace(1)* %in, align 16
%b = load <4 x half>, <4 x half> addrspace(1)* %b_ptr, align 16
@@ -601,7 +626,7 @@ define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)*
; SI: v_add_f32
; SI: v_add_f32
; GCN: s_endpgm
-define void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half> %b) #0 {
+define amdgpu_kernel void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half> %b) #0 {
%add = fadd <8 x half> %a, %b
store <8 x half> %add, <8 x half> addrspace(1)* %out, align 32
ret void
@@ -610,7 +635,7 @@ define void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half>
; GCN-LABEL: {{^}}test_bitcast_from_half:
; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
; GCN: buffer_store_short [[TMP]]
-define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) #0 {
%val = load half, half addrspace(1)* %in
%val_int = bitcast half %val to i16
store i16 %val_int, i16 addrspace(1)* %out
@@ -620,7 +645,7 @@ define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %o
; GCN-LABEL: {{^}}test_bitcast_to_half:
; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
; GCN: buffer_store_short [[TMP]]
-define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%val = load i16, i16 addrspace(1)* %in
%val_fp = bitcast i16 %val to half
store half %val_fp, half addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/hsa-default-device.ll b/test/CodeGen/AMDGPU/hsa-default-device.ll
index 631d6def4442..45efe9b86557 100644
--- a/test/CodeGen/AMDGPU/hsa-default-device.ll
+++ b/test/CodeGen/AMDGPU/hsa-default-device.ll
@@ -4,7 +4,7 @@
; unsupported device.
; CHECK: .hsa_code_object_isa 7,0,0,"AMD","AMDGPU"
-define void @test_kernel(float addrspace(1)* %out0, double addrspace(1)* %out1) nounwind {
+define amdgpu_kernel void @test_kernel(float addrspace(1)* %out0, double addrspace(1)* %out1) nounwind {
store float 0.0, float addrspace(1)* %out0
ret void
}
diff --git a/test/CodeGen/AMDGPU/hsa-fp-mode.ll b/test/CodeGen/AMDGPU/hsa-fp-mode.ll
index 51d6aee25f45..b1901cf894b0 100644
--- a/test/CodeGen/AMDGPU/hsa-fp-mode.ll
+++ b/test/CodeGen/AMDGPU/hsa-fp-mode.ll
@@ -4,7 +4,7 @@
; GCN: float_mode = 192
; GCN: enable_dx10_clamp = 1
; GCN: enable_ieee_mode = 1
-define void @test_default_ci(float addrspace(1)* %out0, double addrspace(1)* %out1) #0 {
+define amdgpu_kernel void @test_default_ci(float addrspace(1)* %out0, double addrspace(1)* %out1) #0 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -14,7 +14,7 @@ define void @test_default_ci(float addrspace(1)* %out0, double addrspace(1)* %ou
; GCN: float_mode = 192
; GCN: enable_dx10_clamp = 1
; GCN: enable_ieee_mode = 1
-define void @test_default_vi(float addrspace(1)* %out0, double addrspace(1)* %out1) #1 {
+define amdgpu_kernel void @test_default_vi(float addrspace(1)* %out0, double addrspace(1)* %out1) #1 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -24,7 +24,7 @@ define void @test_default_vi(float addrspace(1)* %out0, double addrspace(1)* %ou
; GCN: float_mode = 192
; GCN: enable_dx10_clamp = 1
; GCN: enable_ieee_mode = 1
-define void @test_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #2 {
+define amdgpu_kernel void @test_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #2 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -34,7 +34,7 @@ define void @test_f64_denormals(float addrspace(1)* %out0, double addrspace(1)*
; GCN: float_mode = 48
; GCN: enable_dx10_clamp = 1
; GCN: enable_ieee_mode = 1
-define void @test_f32_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #3 {
+define amdgpu_kernel void @test_f32_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #3 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -44,7 +44,7 @@ define void @test_f32_denormals(float addrspace(1)* %out0, double addrspace(1)*
; GCN: float_mode = 240
; GCN: enable_dx10_clamp = 1
; GCN: enable_ieee_mode = 1
-define void @test_f32_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #4 {
+define amdgpu_kernel void @test_f32_f64_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #4 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -54,7 +54,17 @@ define void @test_f32_f64_denormals(float addrspace(1)* %out0, double addrspace(
; GCN: float_mode = 0
; GCN: enable_dx10_clamp = 1
; GCN: enable_ieee_mode = 1
-define void @test_no_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #5 {
+define amdgpu_kernel void @test_no_denormals(float addrspace(1)* %out0, double addrspace(1)* %out1) #5 {
+ store float 0.0, float addrspace(1)* %out0
+ store double 0.0, double addrspace(1)* %out1
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_no_dx10_clamp_vi:
+; GCN: float_mode = 192
+; GCN: enable_dx10_clamp = 0
+; GCN: enable_ieee_mode = 1
+define amdgpu_kernel void @test_no_dx10_clamp_vi(float addrspace(1)* %out0, double addrspace(1)* %out1) #6 {
store float 0.0, float addrspace(1)* %out0
store double 0.0, double addrspace(1)* %out1
ret void
@@ -62,7 +72,8 @@ define void @test_no_denormals(float addrspace(1)* %out0, double addrspace(1)* %
attributes #0 = { nounwind "target-cpu"="kaveri" }
attributes #1 = { nounwind "target-cpu"="fiji" }
-attributes #2 = { nounwind "target-features"="-fp32-denormals,+fp64-denormals" }
-attributes #3 = { nounwind "target-features"="+fp32-denormals,-fp64-denormals" }
-attributes #4 = { nounwind "target-features"="+fp32-denormals,+fp64-denormals" }
-attributes #5 = { nounwind "target-features"="-fp32-denormals,-fp64-denormals" }
+attributes #2 = { nounwind "target-features"="-fp32-denormals,+fp64-fp16-denormals" }
+attributes #3 = { nounwind "target-features"="+fp32-denormals,-fp64-fp16-denormals" }
+attributes #4 = { nounwind "target-features"="+fp32-denormals,+fp64-fp16-denormals" }
+attributes #5 = { nounwind "target-features"="-fp32-denormals,-fp64-fp16-denormals" }
+attributes #6 = { nounwind "target-cpu"="fiji" "target-features"="-dx10-clamp" }
diff --git a/test/CodeGen/AMDGPU/hsa-func.ll b/test/CodeGen/AMDGPU/hsa-func.ll
index d9662b69b126..b4cdd4030d86 100644
--- a/test/CodeGen/AMDGPU/hsa-func.ll
+++ b/test/CodeGen/AMDGPU/hsa-func.ll
@@ -26,7 +26,7 @@
; ELF: Symbol {
; ELF: Name: simple
-; ELF: Size: 288
+; ELF: Size: 292
; ELF: Type: Function (0x2)
; ELF: }
diff --git a/test/CodeGen/AMDGPU/hsa-globals.ll b/test/CodeGen/AMDGPU/hsa-globals.ll
index 2820b308edb8..2ec57a40f0a2 100644
--- a/test/CodeGen/AMDGPU/hsa-globals.ll
+++ b/test/CodeGen/AMDGPU/hsa-globals.ll
@@ -9,7 +9,7 @@
@internal_readonly = internal unnamed_addr addrspace(2) constant i32 0
@external_readonly = unnamed_addr addrspace(2) constant i32 0
-define void @test() {
+define amdgpu_kernel void @test() {
ret void
}
diff --git a/test/CodeGen/AMDGPU/hsa-group-segment.ll b/test/CodeGen/AMDGPU/hsa-group-segment.ll
index 1999dc38a6b0..600793810e59 100644
--- a/test/CodeGen/AMDGPU/hsa-group-segment.ll
+++ b/test/CodeGen/AMDGPU/hsa-group-segment.ll
@@ -3,7 +3,7 @@
@internal_group = internal addrspace(3) global i32 undef
@external_group = addrspace(3) global i32 undef
-define void @test() {
+define amdgpu_kernel void @test() {
entry:
store i32 0, i32 addrspace(3)* @internal_group
store i32 0, i32 addrspace(3)* @external_group
diff --git a/test/CodeGen/AMDGPU/hsa-note-no-func.ll b/test/CodeGen/AMDGPU/hsa-note-no-func.ll
index a4e599230b74..af63a4f8df76 100644
--- a/test/CodeGen/AMDGPU/hsa-note-no-func.ll
+++ b/test/CodeGen/AMDGPU/hsa-note-no-func.ll
@@ -13,6 +13,8 @@
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=gfx803 | FileCheck --check-prefix=HSA --check-prefix=HSA-VI803 %s
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=gfx804 | FileCheck --check-prefix=HSA --check-prefix=HSA-VI804 %s
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=gfx810 | FileCheck --check-prefix=HSA --check-prefix=HSA-VI810 %s
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=gfx900 | FileCheck --check-prefix=HSA --check-prefix=HSA-GFX900 %s
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=gfx901 | FileCheck --check-prefix=HSA --check-prefix=HSA-GFX901 %s
; HSA: .hsa_code_object_version 2,1
; HSA-CI700: .hsa_code_object_isa 7,0,0,"AMD","AMDGPU"
@@ -24,3 +26,5 @@
; HSA-VI803: .hsa_code_object_isa 8,0,3,"AMD","AMDGPU"
; HSA-VI804: .hsa_code_object_isa 8,0,4,"AMD","AMDGPU"
; HSA-VI810: .hsa_code_object_isa 8,1,0,"AMD","AMDGPU"
+; HSA-GFX900: .hsa_code_object_isa 9,0,0,"AMD","AMDGPU"
+; HSA-GFX901: .hsa_code_object_isa 9,0,1,"AMD","AMDGPU"
diff --git a/test/CodeGen/AMDGPU/hsa.ll b/test/CodeGen/AMDGPU/hsa.ll
index 12c15441c0f5..972fbd66ef37 100644
--- a/test/CodeGen/AMDGPU/hsa.ll
+++ b/test/CodeGen/AMDGPU/hsa.ll
@@ -45,6 +45,8 @@
; HSA: .amd_kernel_code_t
; HSA: enable_sgpr_private_segment_buffer = 1
; HSA: enable_sgpr_kernarg_segment_ptr = 1
+; HSA: wavefront_size = 6
+; HSA: call_convention = -1
; HSA: .end_amd_kernel_code_t
; HSA: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
diff --git a/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll b/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
index e85db65e7429..f6bf0b09486e 100644
--- a/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
+++ b/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
@@ -5,7 +5,7 @@
; SI-LABEL: {{^}}br_implicit_def:
; SI: BB#0:
; SI-NEXT: s_cbranch_scc1
-define void @br_implicit_def(i32 addrspace(1)* %out, i32 %arg) #0 {
+define amdgpu_kernel void @br_implicit_def(i32 addrspace(1)* %out, i32 %arg) #0 {
bb:
br i1 undef, label %bb1, label %bb2
diff --git a/test/CodeGen/AMDGPU/i1-copy-phi.ll b/test/CodeGen/AMDGPU/i1-copy-phi.ll
index d4912776debd..b160af86a2b6 100644
--- a/test/CodeGen/AMDGPU/i1-copy-phi.ll
+++ b/test/CodeGen/AMDGPU/i1-copy-phi.ll
@@ -10,7 +10,7 @@
; SI: s_and_saveexec_b64
; SI: s_xor_b64
; SI: s_endpgm
-define void @br_i1_phi(i32 %arg) {
+define amdgpu_kernel void @br_i1_phi(i32 %arg) {
bb:
%tidig = call i32 @llvm.r600.read.tidig.x() #0
%cmp = trunc i32 %tidig to i1
diff --git a/test/CodeGen/AMDGPU/i8-to-double-to-float.ll b/test/CodeGen/AMDGPU/i8-to-double-to-float.ll
index c218e1918bb0..d501be5c8bf0 100644
--- a/test/CodeGen/AMDGPU/i8-to-double-to-float.ll
+++ b/test/CodeGen/AMDGPU/i8-to-double-to-float.ll
@@ -2,7 +2,7 @@
;CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @test(float addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @test(float addrspace(1)* %out, i8 addrspace(1)* %in) {
%1 = load i8, i8 addrspace(1)* %in
%2 = uitofp i8 %1 to double
%3 = fptrunc double %2 to float
diff --git a/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll b/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll
index 60e59a5a5286..12cc440e48d9 100644
--- a/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll
+++ b/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll
@@ -6,7 +6,7 @@
;CHECK: SETNE_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;CHECK-NOT: SETNE_INT
-define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = load i32, i32 addrspace(1)* %in
%arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/AMDGPU/icmp.i16.ll b/test/CodeGen/AMDGPU/icmp.i16.ll
index c3dad2d32033..99c2138bbe64 100644
--- a/test/CodeGen/AMDGPU/icmp.i16.ll
+++ b/test/CodeGen/AMDGPU/icmp.i16.ll
@@ -8,7 +8,7 @@
; GCN-LABEL: {{^}}i16_eq:
; VI: v_cmp_eq_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_eq_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_eq(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_eq(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -26,7 +26,7 @@ entry:
; GCN-LABEL: {{^}}i16_ne:
; VI: v_cmp_ne_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_ne_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ne(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_ne(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -44,7 +44,7 @@ entry:
; GCN-LABEL: {{^}}i16_ugt:
; VI: v_cmp_gt_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_gt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ugt(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_ugt(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -62,7 +62,7 @@ entry:
; GCN-LABEL: {{^}}i16_uge:
; VI: v_cmp_ge_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_ge_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_uge(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_uge(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -80,7 +80,7 @@ entry:
; GCN-LABEL: {{^}}i16_ult:
; VI: v_cmp_lt_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_lt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ult(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_ult(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -98,7 +98,7 @@ entry:
; GCN-LABEL: {{^}}i16_ule:
; VI: v_cmp_le_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_le_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ule(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_ule(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -117,7 +117,7 @@ entry:
; GCN-LABEL: {{^}}i16_sgt:
; VI: v_cmp_gt_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_gt_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_sgt(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_sgt(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -135,7 +135,7 @@ entry:
; GCN-LABEL: {{^}}i16_sge:
; VI: v_cmp_ge_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_ge_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_sge(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_sge(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -153,7 +153,7 @@ entry:
; GCN-LABEL: {{^}}i16_slt:
; VI: v_cmp_lt_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_lt_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_slt(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_slt(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -171,7 +171,7 @@ entry:
; GCN-LABEL: {{^}}i16_sle:
; VI: v_cmp_le_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_le_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_sle(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+define amdgpu_kernel void @i16_sle(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -190,7 +190,7 @@ entry:
; GCN-LABEL: {{^}}i16_eq_v_s:
; VI: v_cmp_eq_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_eq_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_eq_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_eq_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -206,7 +206,7 @@ entry:
; GCN-LABEL: {{^}}i16_ne_v_s:
; VI: v_cmp_ne_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_ne_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ne_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_ne_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -222,7 +222,7 @@ entry:
; GCN-LABEL: {{^}}i16_ugt_v_s:
; VI: v_cmp_lt_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_lt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ugt_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_ugt_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -238,7 +238,7 @@ entry:
; GCN-LABEL: {{^}}i16_uge_v_s:
; VI: v_cmp_le_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_le_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_uge_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_uge_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -254,7 +254,7 @@ entry:
; GCN-LABEL: {{^}}i16_ult_v_s:
; VI: v_cmp_gt_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ult_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_ult_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -270,7 +270,7 @@ entry:
; GCN-LABEL: {{^}}i16_ule_v_s:
; VI: v_cmp_ge_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_ge_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_ule_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_ule_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -286,7 +286,7 @@ entry:
; GCN-LABEL: {{^}}i16_sgt_v_s:
; VI: v_cmp_lt_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_lt_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_sgt_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_sgt_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -302,7 +302,7 @@ entry:
; GCN-LABEL: {{^}}i16_sge_v_s:
; VI: v_cmp_le_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_sge_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_sge_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -318,7 +318,7 @@ entry:
; GCN-LABEL: {{^}}i16_slt_v_s:
; VI: v_cmp_gt_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_gt_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_slt_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_slt_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -334,7 +334,7 @@ entry:
; GCN-LABEL: {{^}}i16_sle_v_s:
; VI: v_cmp_ge_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_cmp_ge_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @i16_sle_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
+define amdgpu_kernel void @i16_sle_v_s(i32 addrspace(1)* %out, i16 addrspace(1)* %a.ptr, i16 %b) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/test/CodeGen/AMDGPU/icmp64.ll b/test/CodeGen/AMDGPU/icmp64.ll
index 33ad0c9199b9..3af74277df12 100644
--- a/test/CodeGen/AMDGPU/icmp64.ll
+++ b/test/CodeGen/AMDGPU/icmp64.ll
@@ -3,7 +3,7 @@
; SI-LABEL: {{^}}test_i64_eq:
; SI: v_cmp_eq_u64
-define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp eq i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -12,7 +12,7 @@ define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_ne:
; SI: v_cmp_ne_u64
-define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ne i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -21,7 +21,7 @@ define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_slt:
; SI: v_cmp_lt_i64
-define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp slt i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -30,7 +30,7 @@ define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_ult:
; SI: v_cmp_lt_u64
-define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ult i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -39,7 +39,7 @@ define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_sle:
; SI: v_cmp_le_i64
-define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp sle i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -48,7 +48,7 @@ define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_ule:
; SI: v_cmp_le_u64
-define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ule i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -57,7 +57,7 @@ define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_sgt:
; SI: v_cmp_gt_i64
-define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp sgt i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -66,7 +66,7 @@ define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_ugt:
; SI: v_cmp_gt_u64
-define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp ugt i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -75,7 +75,7 @@ define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_sge:
; SI: v_cmp_ge_i64
-define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp sge i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -84,7 +84,7 @@ define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; SI-LABEL: {{^}}test_i64_uge:
; SI: v_cmp_ge_u64
-define void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%cmp = icmp uge i64 %a, %b
%result = sext i1 %cmp to i32
store i32 %result, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/illegal-sgpr-to-vgpr-copy.ll b/test/CodeGen/AMDGPU/illegal-sgpr-to-vgpr-copy.ll
new file mode 100644
index 000000000000..6e411ce5e017
--- /dev/null
+++ b/test/CodeGen/AMDGPU/illegal-sgpr-to-vgpr-copy.ll
@@ -0,0 +1,45 @@
+; RUN: not llc -march=amdgcn < %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -march=amdgcn < %s | FileCheck -check-prefix=GCN %s
+
+; ERR: error: <unknown>:0:0: in function illegal_vgpr_to_sgpr_copy_i32 void (): illegal SGPR to VGPR copy
+; GCN: ; illegal copy v1 to s9
+
+define amdgpu_kernel void @illegal_vgpr_to_sgpr_copy_i32() #0 {
+ %vgpr = call i32 asm sideeffect "; def $0", "=${VGPR1}"()
+ call void asm sideeffect "; use $0", "${SGPR9}"(i32 %vgpr)
+ ret void
+}
+
+; ERR: error: <unknown>:0:0: in function illegal_vgpr_to_sgpr_copy_v2i32 void (): illegal SGPR to VGPR copy
+; GCN: ; illegal copy v[0:1] to s[10:11]
+define amdgpu_kernel void @illegal_vgpr_to_sgpr_copy_v2i32() #0 {
+ %vgpr = call <2 x i32> asm sideeffect "; def $0", "=${VGPR0_VGPR1}"()
+ call void asm sideeffect "; use $0", "${SGPR10_SGPR11}"(<2 x i32> %vgpr)
+ ret void
+}
+
+; ERR: error: <unknown>:0:0: in function illegal_vgpr_to_sgpr_copy_v4i32 void (): illegal SGPR to VGPR copy
+; GCN: ; illegal copy v[0:3] to s[8:11]
+define amdgpu_kernel void @illegal_vgpr_to_sgpr_copy_v4i32() #0 {
+ %vgpr = call <4 x i32> asm sideeffect "; def $0", "=${VGPR0_VGPR1_VGPR2_VGPR3}"()
+ call void asm sideeffect "; use $0", "${SGPR8_SGPR9_SGPR10_SGPR11}"(<4 x i32> %vgpr)
+ ret void
+}
+
+; ERR: error: <unknown>:0:0: in function illegal_vgpr_to_sgpr_copy_v8i32 void (): illegal SGPR to VGPR copy
+; GCN: ; illegal copy v[0:7] to s[8:15]
+define amdgpu_kernel void @illegal_vgpr_to_sgpr_copy_v8i32() #0 {
+ %vgpr = call <8 x i32> asm sideeffect "; def $0", "=${VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7}"()
+ call void asm sideeffect "; use $0", "${SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}"(<8 x i32> %vgpr)
+ ret void
+}
+
+; ERR error: <unknown>:0:0: in function illegal_vgpr_to_sgpr_copy_v16i32 void (): illegal SGPR to VGPR copy
+; GCN: ; illegal copy v[0:15] to s[16:31]
+define amdgpu_kernel void @illegal_vgpr_to_sgpr_copy_v16i32() #0 {
+ %vgpr = call <16 x i32> asm sideeffect "; def $0", "=${VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7_VGPR8_VGPR9_VGPR10_VGPR11_VGPR12_VGPR13_VGPR14_VGPR15}"()
+ call void asm sideeffect "; use $0", "${SGPR16_SGPR17_SGPR18_SGPR19_SGPR20_SGPR21_SGPR22_SGPR23_SGPR24_SGPR25_SGPR26_SGPR27_SGPR28_SGPR29_SGPR30_SGPR31}"(<16 x i32> %vgpr)
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/image-attributes.ll b/test/CodeGen/AMDGPU/image-attributes.ll
index 5906b2f15709..53d61e66c6ba 100644
--- a/test/CodeGen/AMDGPU/image-attributes.ll
+++ b/test/CodeGen/AMDGPU/image-attributes.ll
@@ -7,7 +7,7 @@
; FUNC-LABEL: {{^}}width_2d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[2].Z
-define void @width_2d (%opencl.image2d_t addrspace(1)* %in,
+define amdgpu_kernel void @width_2d (%opencl.image2d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [3 x i32] @llvm.OpenCL.image.get.size.2d(
@@ -20,7 +20,7 @@ entry:
; FUNC-LABEL: {{^}}width_3d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[2].Z
-define void @width_3d (%opencl.image3d_t addrspace(1)* %in,
+define amdgpu_kernel void @width_3d (%opencl.image3d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [3 x i32] @llvm.OpenCL.image.get.size.3d(
@@ -37,7 +37,7 @@ entry:
; FUNC-LABEL: {{^}}height_2d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[2].W
-define void @height_2d (%opencl.image2d_t addrspace(1)* %in,
+define amdgpu_kernel void @height_2d (%opencl.image2d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [3 x i32] @llvm.OpenCL.image.get.size.2d(
@@ -50,7 +50,7 @@ entry:
; FUNC-LABEL: {{^}}height_3d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[2].W
-define void @height_3d (%opencl.image3d_t addrspace(1)* %in,
+define amdgpu_kernel void @height_3d (%opencl.image3d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [3 x i32] @llvm.OpenCL.image.get.size.3d(
@@ -67,7 +67,7 @@ entry:
; FUNC-LABEL: {{^}}depth_3d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[3].X
-define void @depth_3d (%opencl.image3d_t addrspace(1)* %in,
+define amdgpu_kernel void @depth_3d (%opencl.image3d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [3 x i32] @llvm.OpenCL.image.get.size.3d(
@@ -84,7 +84,7 @@ entry:
; FUNC-LABEL: {{^}}data_type_2d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[3].Y
-define void @data_type_2d (%opencl.image2d_t addrspace(1)* %in,
+define amdgpu_kernel void @data_type_2d (%opencl.image2d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [2 x i32] @llvm.OpenCL.image.get.format.2d(
@@ -97,7 +97,7 @@ entry:
; FUNC-LABEL: {{^}}data_type_3d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[3].Y
-define void @data_type_3d (%opencl.image3d_t addrspace(1)* %in,
+define amdgpu_kernel void @data_type_3d (%opencl.image3d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [2 x i32] @llvm.OpenCL.image.get.format.3d(
@@ -114,7 +114,7 @@ entry:
; FUNC-LABEL: {{^}}channel_order_2d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[3].Z
-define void @channel_order_2d (%opencl.image2d_t addrspace(1)* %in,
+define amdgpu_kernel void @channel_order_2d (%opencl.image2d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [2 x i32] @llvm.OpenCL.image.get.format.2d(
@@ -127,7 +127,7 @@ entry:
; FUNC-LABEL: {{^}}channel_order_3d:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[3].Z
-define void @channel_order_3d (%opencl.image3d_t addrspace(1)* %in,
+define amdgpu_kernel void @channel_order_3d (%opencl.image3d_t addrspace(1)* %in,
i32 addrspace(1)* %out) {
entry:
%0 = call [2 x i32] @llvm.OpenCL.image.get.format.3d(
@@ -146,7 +146,7 @@ entry:
; FUNC-LABEL: {{^}}image_arg_2nd:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV * [[VAL]], KC0[4].Z
-define void @image_arg_2nd (%opencl.image3d_t addrspace(1)* %in1,
+define amdgpu_kernel void @image_arg_2nd (%opencl.image3d_t addrspace(1)* %in1,
i32 %x,
%opencl.image2d_t addrspace(1)* %in2,
i32 addrspace(1)* %out) {
diff --git a/test/CodeGen/AMDGPU/image-resource-id.ll b/test/CodeGen/AMDGPU/image-resource-id.ll
index d4cf34944240..dac7c7ddaeac 100644
--- a/test/CodeGen/AMDGPU/image-resource-id.ll
+++ b/test/CodeGen/AMDGPU/image-resource-id.ll
@@ -7,7 +7,7 @@
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_2d_rd_1_0(%opencl.image2d_t addrspace(1)* %in, ; read_only
+define amdgpu_kernel void @test_2d_rd_1_0(%opencl.image2d_t addrspace(1)* %in, ; read_only
i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.image.get.resource.id.2d(
@@ -21,7 +21,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_3d_rd_1_0(%opencl.image3d_t addrspace(1)* %in, ; read_only
+define amdgpu_kernel void @test_3d_rd_1_0(%opencl.image3d_t addrspace(1)* %in, ; read_only
i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.image.get.resource.id.3d(
@@ -37,7 +37,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_2d_wr_1_0(%opencl.image2d_t addrspace(1)* %in, ; write_only
+define amdgpu_kernel void @test_2d_wr_1_0(%opencl.image2d_t addrspace(1)* %in, ; write_only
i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.image.get.resource.id.2d(
@@ -51,7 +51,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_3d_wr_1_0(%opencl.image3d_t addrspace(1)* %in, ; write_only
+define amdgpu_kernel void @test_3d_wr_1_0(%opencl.image3d_t addrspace(1)* %in, ; write_only
i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.image.get.resource.id.3d(
@@ -67,7 +67,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_2d_rd_2_0(%opencl.image2d_t addrspace(1)* %in1, ; read_only
+define amdgpu_kernel void @test_2d_rd_2_0(%opencl.image2d_t addrspace(1)* %in1, ; read_only
%opencl.image2d_t addrspace(1)* %in2, ; read_only
i32 addrspace(1)* %out) {
entry:
@@ -82,7 +82,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_2d_rd_2_1(%opencl.image2d_t addrspace(1)* %in1, ; read_only
+define amdgpu_kernel void @test_2d_rd_2_1(%opencl.image2d_t addrspace(1)* %in1, ; read_only
%opencl.image2d_t addrspace(1)* %in2, ; read_only
i32 addrspace(1)* %out) {
entry:
@@ -97,7 +97,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_3d_rd_2_0(%opencl.image3d_t addrspace(1)* %in1, ; read_only
+define amdgpu_kernel void @test_3d_rd_2_0(%opencl.image3d_t addrspace(1)* %in1, ; read_only
%opencl.image3d_t addrspace(1)* %in2, ; read_only
i32 addrspace(1)* %out) {
entry:
@@ -112,7 +112,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_3d_rd_2_1(%opencl.image3d_t addrspace(1)* %in1, ; read_only
+define amdgpu_kernel void @test_3d_rd_2_1(%opencl.image3d_t addrspace(1)* %in1, ; read_only
%opencl.image3d_t addrspace(1)* %in2, ; read_only
i32 addrspace(1)* %out) {
entry:
@@ -129,7 +129,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_2d_wr_2_0(%opencl.image2d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_2d_wr_2_0(%opencl.image2d_t addrspace(1)* %in1, ; write_only
%opencl.image2d_t addrspace(1)* %in2, ; write_only
i32 addrspace(1)* %out) {
entry:
@@ -144,7 +144,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_2d_wr_2_1(%opencl.image2d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_2d_wr_2_1(%opencl.image2d_t addrspace(1)* %in1, ; write_only
%opencl.image2d_t addrspace(1)* %in2, ; write_only
i32 addrspace(1)* %out) {
entry:
@@ -159,7 +159,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_3d_wr_2_0(%opencl.image3d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_3d_wr_2_0(%opencl.image3d_t addrspace(1)* %in1, ; write_only
%opencl.image3d_t addrspace(1)* %in2, ; write_only
i32 addrspace(1)* %out) {
entry:
@@ -174,7 +174,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_3d_wr_2_1(%opencl.image3d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_3d_wr_2_1(%opencl.image3d_t addrspace(1)* %in1, ; write_only
%opencl.image3d_t addrspace(1)* %in2, ; write_only
i32 addrspace(1)* %out) {
entry:
@@ -191,7 +191,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 2(
-define void @test_2d_rd_3_0(%opencl.image2d_t addrspace(1)* %in1, ; read_only
+define amdgpu_kernel void @test_2d_rd_3_0(%opencl.image2d_t addrspace(1)* %in1, ; read_only
%opencl.image3d_t addrspace(1)* %in2, ; read_only
%opencl.image2d_t addrspace(1)* %in3, ; read_only
i32 addrspace(1)* %out) {
@@ -208,7 +208,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 2(
-define void @test_3d_rd_3_0(%opencl.image3d_t addrspace(1)* %in1, ; read_only
+define amdgpu_kernel void @test_3d_rd_3_0(%opencl.image3d_t addrspace(1)* %in1, ; read_only
%opencl.image2d_t addrspace(1)* %in2, ; read_only
%opencl.image3d_t addrspace(1)* %in3, ; read_only
i32 addrspace(1)* %out) {
@@ -226,7 +226,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 2(
-define void @test_2d_wr_3_0(%opencl.image2d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_2d_wr_3_0(%opencl.image2d_t addrspace(1)* %in1, ; write_only
%opencl.image3d_t addrspace(1)* %in2, ; write_only
%opencl.image2d_t addrspace(1)* %in3, ; write_only
i32 addrspace(1)* %out) {
@@ -243,7 +243,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 2(
-define void @test_3d_wr_3_0(%opencl.image3d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_3d_wr_3_0(%opencl.image3d_t addrspace(1)* %in1, ; write_only
%opencl.image2d_t addrspace(1)* %in2, ; write_only
%opencl.image3d_t addrspace(1)* %in3, ; write_only
i32 addrspace(1)* %out) {
@@ -261,7 +261,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_2d_mix_3_0(%opencl.image2d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_2d_mix_3_0(%opencl.image2d_t addrspace(1)* %in1, ; write_only
%opencl.image3d_t addrspace(1)* %in2, ; read_only
%opencl.image2d_t addrspace(1)* %in3, ; read_only
i32 addrspace(1)* %out) {
@@ -277,7 +277,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_3d_mix_3_0(%opencl.image3d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_3d_mix_3_0(%opencl.image3d_t addrspace(1)* %in1, ; write_only
%opencl.image2d_t addrspace(1)* %in2, ; read_only
%opencl.image3d_t addrspace(1)* %in3, ; read_only
i32 addrspace(1)* %out) {
@@ -293,7 +293,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_2d_mix_3_1(%opencl.image2d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_2d_mix_3_1(%opencl.image2d_t addrspace(1)* %in1, ; write_only
%opencl.image3d_t addrspace(1)* %in2, ; read_only
%opencl.image2d_t addrspace(1)* %in3, ; write_only
i32 addrspace(1)* %out) {
@@ -309,7 +309,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_3d_mix_3_1(%opencl.image3d_t addrspace(1)* %in1, ; write_only
+define amdgpu_kernel void @test_3d_mix_3_1(%opencl.image3d_t addrspace(1)* %in1, ; write_only
%opencl.image2d_t addrspace(1)* %in2, ; read_only
%opencl.image3d_t addrspace(1)* %in3, ; write_only
i32 addrspace(1)* %out) {
diff --git a/test/CodeGen/AMDGPU/imm.ll b/test/CodeGen/AMDGPU/imm.ll
index ef6008aa5fde..c2668a077b09 100644
--- a/test/CodeGen/AMDGPU/imm.ll
+++ b/test/CodeGen/AMDGPU/imm.ll
@@ -5,7 +5,7 @@
; GCN-LABEL: {{^}}i64_imm_inline_lo:
; GCN: v_mov_b32_e32 v[[LO_VGPR:[0-9]+]], 5
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VGPR]]:
-define void @i64_imm_inline_lo(i64 addrspace(1) *%out) {
+define amdgpu_kernel void @i64_imm_inline_lo(i64 addrspace(1) *%out) {
entry:
store i64 1311768464867721221, i64 addrspace(1) *%out ; 0x1234567800000005
ret void
@@ -15,7 +15,7 @@ entry:
; GCN-LABEL: {{^}}i64_imm_inline_hi:
; GCN: v_mov_b32_e32 v[[HI_VGPR:[0-9]+]], 5
; GCN: buffer_store_dwordx2 v{{\[[0-9]+:}}[[HI_VGPR]]
-define void @i64_imm_inline_hi(i64 addrspace(1) *%out) {
+define amdgpu_kernel void @i64_imm_inline_hi(i64 addrspace(1) *%out) {
entry:
store i64 21780256376, i64 addrspace(1) *%out ; 0x0000000512345678
ret void
@@ -25,7 +25,7 @@ entry:
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HI_VREG:[0-9]+]], 1{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) {
+define amdgpu_kernel void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) {
store i64 -9223372036854775808, i64 addrspace(1) *%out
ret void
}
@@ -33,7 +33,7 @@ define void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) {
; GCN-LABEL: {{^}}store_inline_imm_neg_0.0_i32:
; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) {
store i32 -2147483648, i32 addrspace(1)* %out
ret void
}
@@ -41,7 +41,7 @@ define void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_0.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
store float 0.0, float addrspace(1)* %out
ret void
}
@@ -49,7 +49,7 @@ define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_imm_neg_0.0_f32:
; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_imm_neg_0.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_imm_neg_0.0_f32(float addrspace(1)* %out) {
store float -0.0, float addrspace(1)* %out
ret void
}
@@ -57,7 +57,7 @@ define void @store_imm_neg_0.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_0.5_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0.5{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
store float 0.5, float addrspace(1)* %out
ret void
}
@@ -65,7 +65,7 @@ define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_m_0.5_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -0.5{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
store float -0.5, float addrspace(1)* %out
ret void
}
@@ -73,7 +73,7 @@ define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_1.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 1.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
store float 1.0, float addrspace(1)* %out
ret void
}
@@ -81,7 +81,7 @@ define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_m_1.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -1.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
store float -1.0, float addrspace(1)* %out
ret void
}
@@ -89,7 +89,7 @@ define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_2.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 2.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
store float 2.0, float addrspace(1)* %out
ret void
}
@@ -97,7 +97,7 @@ define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_m_2.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -2.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
store float -2.0, float addrspace(1)* %out
ret void
}
@@ -105,7 +105,7 @@ define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_4.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 4.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
store float 4.0, float addrspace(1)* %out
ret void
}
@@ -113,7 +113,7 @@ define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_m_4.0_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -4.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
store float -4.0, float addrspace(1)* %out
ret void
}
@@ -123,7 +123,7 @@ define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e22f983{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0.15915494{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) {
store float 0x3FC45F3060000000, float addrspace(1)* %out
ret void
}
@@ -131,7 +131,7 @@ define void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_m_inv_2pi_f32:
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbe22f983{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) {
store float 0xBFC45F3060000000, float addrspace(1)* %out
ret void
}
@@ -139,7 +139,7 @@ define void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_literal_imm_f32:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x45800000
; GCN: buffer_store_dword [[REG]]
-define void @store_literal_imm_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @store_literal_imm_f32(float addrspace(1)* %out) {
store float 4096.0, float addrspace(1)* %out
ret void
}
@@ -148,7 +148,7 @@ define void @store_literal_imm_f32(float addrspace(1)* %out) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0.0
store float %y, float addrspace(1)* %out
ret void
@@ -158,7 +158,7 @@ define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 0.5{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0.5
store float %y, float addrspace(1)* %out
ret void
@@ -168,7 +168,7 @@ define void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -0.5{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -0.5
store float %y, float addrspace(1)* %out
ret void
@@ -178,7 +178,7 @@ define void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 1.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 1.0
store float %y, float addrspace(1)* %out
ret void
@@ -188,7 +188,7 @@ define void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -1.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -1.0
store float %y, float addrspace(1)* %out
ret void
@@ -198,7 +198,7 @@ define void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 2.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 2.0
store float %y, float addrspace(1)* %out
ret void
@@ -208,7 +208,7 @@ define void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -2.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -2.0
store float %y, float addrspace(1)* %out
ret void
@@ -218,7 +218,7 @@ define void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 4.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 4.0
store float %y, float addrspace(1)* %out
ret void
@@ -228,7 +228,7 @@ define void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -4.0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -4.0
store float %y, float addrspace(1)* %out
ret void
@@ -238,7 +238,7 @@ define void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) {
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_add_f32_e32 [[REG:v[0-9]+]], 0.5, [[VAL]]
; GCN: buffer_store_dword [[REG]]
-define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%x = load float, float addrspace(1)* %in
%y = fadd float %x, 0.5
store float %y, float addrspace(1)* %out
@@ -249,7 +249,7 @@ define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addr
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_add_f32_e32 [[REG:v[0-9]+]], 0x44800000, [[VAL]]
; GCN: buffer_store_dword [[REG]]
-define void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%x = load float, float addrspace(1)* %in
%y = fadd float %x, 1024.0
store float %y, float addrspace(1)* %out
@@ -260,7 +260,7 @@ define void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 1{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0x36a0000000000000
store float %y, float addrspace(1)* %out
ret void
@@ -270,7 +270,7 @@ define void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 2{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0x36b0000000000000
store float %y, float addrspace(1)* %out
ret void
@@ -280,7 +280,7 @@ define void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 16
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0x36e0000000000000
store float %y, float addrspace(1)* %out
ret void
@@ -290,7 +290,7 @@ define void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -1{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0xffffffffe0000000
store float %y, float addrspace(1)* %out
ret void
@@ -300,7 +300,7 @@ define void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -2{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0xffffffffc0000000
store float %y, float addrspace(1)* %out
ret void
@@ -310,7 +310,7 @@ define void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -16
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0xfffffffe00000000
store float %y, float addrspace(1)* %out
ret void
@@ -320,7 +320,7 @@ define void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 63
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0x36ff800000000000
store float %y, float addrspace(1)* %out
ret void
@@ -330,7 +330,7 @@ define void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) {
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 64
; GCN: buffer_store_dword [[REG]]
-define void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) {
+define amdgpu_kernel void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0x3700000000000000
store float %y, float addrspace(1)* %out
ret void
@@ -342,7 +342,7 @@ define void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 0{{$}}
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0.0
store double %y, double addrspace(1)* %out
ret void
@@ -353,7 +353,7 @@ define void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 0.5
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0.5
store double %y, double addrspace(1)* %out
ret void
@@ -364,7 +364,7 @@ define void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -0.5
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, -0.5
store double %y, double addrspace(1)* %out
ret void
@@ -375,7 +375,7 @@ define void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 1.0
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 1.0
store double %y, double addrspace(1)* %out
ret void
@@ -386,7 +386,7 @@ define void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -1.0
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, -1.0
store double %y, double addrspace(1)* %out
ret void
@@ -397,7 +397,7 @@ define void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 2.0
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 2.0
store double %y, double addrspace(1)* %out
ret void
@@ -408,7 +408,7 @@ define void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -2.0
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, -2.0
store double %y, double addrspace(1)* %out
ret void
@@ -419,7 +419,7 @@ define void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 4.0
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 4.0
store double %y, double addrspace(1)* %out
ret void
@@ -430,7 +430,7 @@ define void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -4.0
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, -4.0
store double %y, double addrspace(1)* %out
ret void
@@ -445,7 +445,7 @@ define void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; VI: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 0.15915494{{$}}
; VI: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0x3fc45f306dc9c882
store double %y, double addrspace(1)* %out
ret void
@@ -455,7 +455,7 @@ define void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, double %x) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0x6dc9c882
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfc45f30
; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @add_m_inv_2pi_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_m_inv_2pi_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0xbfc45f306dc9c882
store double %y, double addrspace(1)* %out
ret void
@@ -466,7 +466,7 @@ define void @add_m_inv_2pi_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 1{{$}}
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0x0000000000000001
store double %y, double addrspace(1)* %out
ret void
@@ -477,7 +477,7 @@ define void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 2{{$}}
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0x0000000000000002
store double %y, double addrspace(1)* %out
ret void
@@ -488,7 +488,7 @@ define void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 16
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0x0000000000000010
store double %y, double addrspace(1)* %out
ret void
@@ -499,7 +499,7 @@ define void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -1
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0xffffffffffffffff
store double %y, double addrspace(1)* %out
ret void
@@ -510,7 +510,7 @@ define void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -2
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0xfffffffffffffffe
store double %y, double addrspace(1)* %out
ret void
@@ -521,7 +521,7 @@ define void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -16
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0xfffffffffffffff0
store double %y, double addrspace(1)* %out
ret void
@@ -532,7 +532,7 @@ define void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 63
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0x000000000000003F
store double %y, double addrspace(1)* %out
ret void
@@ -543,7 +543,7 @@ define void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) {
; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 64
; GCN: buffer_store_dwordx2 [[REG]]
-define void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) {
+define amdgpu_kernel void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) {
%y = fadd double %x, 0x0000000000000040
store double %y, double addrspace(1)* %out
ret void
@@ -554,7 +554,7 @@ define void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) {
; GCN: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0
; GCN: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], v[[LO_VREG]]{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_0.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_0.0_f64(double addrspace(1)* %out) {
store double 0.0, double addrspace(1)* %out
ret void
}
@@ -564,7 +564,7 @@ define void @store_inline_imm_0.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HI_VREG:[0-9]+]], 1{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) {
store double -0.0, double addrspace(1)* %out
ret void
}
@@ -573,7 +573,7 @@ define void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3fe00000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_0.5_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_0.5_f64(double addrspace(1)* %out) {
store double 0.5, double addrspace(1)* %out
ret void
}
@@ -582,7 +582,7 @@ define void @store_inline_imm_0.5_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfe00000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) {
store double -0.5, double addrspace(1)* %out
ret void
}
@@ -591,7 +591,7 @@ define void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3ff00000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_1.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_1.0_f64(double addrspace(1)* %out) {
store double 1.0, double addrspace(1)* %out
ret void
}
@@ -600,7 +600,7 @@ define void @store_inline_imm_1.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbff00000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) {
store double -1.0, double addrspace(1)* %out
ret void
}
@@ -609,7 +609,7 @@ define void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 2.0
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_2.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_2.0_f64(double addrspace(1)* %out) {
store double 2.0, double addrspace(1)* %out
ret void
}
@@ -618,7 +618,7 @@ define void @store_inline_imm_2.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], -2.0
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) {
store double -2.0, double addrspace(1)* %out
ret void
}
@@ -627,7 +627,7 @@ define void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x40100000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_4.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_4.0_f64(double addrspace(1)* %out) {
store double 4.0, double addrspace(1)* %out
ret void
}
@@ -636,7 +636,7 @@ define void @store_inline_imm_4.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xc0100000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) {
store double -4.0, double addrspace(1)* %out
ret void
}
@@ -645,7 +645,7 @@ define void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0x6dc9c882
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3fc45f30
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inv_2pi_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inv_2pi_f64(double addrspace(1)* %out) {
store double 0x3fc45f306dc9c882, double addrspace(1)* %out
ret void
}
@@ -654,7 +654,7 @@ define void @store_inv_2pi_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0x6dc9c882
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfc45f30
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) {
store double 0xbfc45f306dc9c882, double addrspace(1)* %out
ret void
}
@@ -663,7 +663,22 @@ define void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) {
; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x40b00000
; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
-define void @store_literal_imm_f64(double addrspace(1)* %out) {
+define amdgpu_kernel void @store_literal_imm_f64(double addrspace(1)* %out) {
store double 4096.0, double addrspace(1)* %out
ret void
}
+
+; GCN-LABEL: {{^}}literal_folding:
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0x3f4353f8, v{{[0-9]+}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0xbf4353f8, v{{[0-9]+}}
+define amdgpu_vs void @literal_folding(float %arg) {
+main_body:
+ %tmp = fmul float %arg, 0x3FE86A7F00000000
+ %tmp1 = fmul float %arg, 0xBFE86A7F00000000
+ call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp, float %tmp, float %tmp1, float %tmp1, i1 true, i1 false) #0
+ ret void
+}
+
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/imm16.ll b/test/CodeGen/AMDGPU/imm16.ll
index 2e73eb06502f..e42d58791890 100644
--- a/test/CodeGen/AMDGPU/imm16.ll
+++ b/test/CodeGen/AMDGPU/imm16.ll
@@ -7,7 +7,7 @@
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x8000{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff8000{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_neg_0.0_i16(i16 addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_neg_0.0_i16(i16 addrspace(1)* %out) {
store volatile i16 -32768, i16 addrspace(1)* %out
ret void
}
@@ -15,7 +15,7 @@ define void @store_inline_imm_neg_0.0_i16(i16 addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_0.0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_0.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_0.0_f16(half addrspace(1)* %out) {
store half 0.0, half addrspace(1)* %out
ret void
}
@@ -24,7 +24,7 @@ define void @store_inline_imm_0.0_f16(half addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x8000{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff8000{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_imm_neg_0.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_imm_neg_0.0_f16(half addrspace(1)* %out) {
store half -0.0, half addrspace(1)* %out
ret void
}
@@ -32,7 +32,7 @@ define void @store_imm_neg_0.0_f16(half addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_0.5_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3800{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_0.5_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_0.5_f16(half addrspace(1)* %out) {
store half 0.5, half addrspace(1)* %out
ret void
}
@@ -41,7 +41,7 @@ define void @store_inline_imm_0.5_f16(half addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xb800{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffb800{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_m_0.5_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_0.5_f16(half addrspace(1)* %out) {
store half -0.5, half addrspace(1)* %out
ret void
}
@@ -49,7 +49,7 @@ define void @store_inline_imm_m_0.5_f16(half addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_1.0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_1.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_1.0_f16(half addrspace(1)* %out) {
store half 1.0, half addrspace(1)* %out
ret void
}
@@ -58,7 +58,7 @@ define void @store_inline_imm_1.0_f16(half addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbc00{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffbc00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_m_1.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_1.0_f16(half addrspace(1)* %out) {
store half -1.0, half addrspace(1)* %out
ret void
}
@@ -66,7 +66,7 @@ define void @store_inline_imm_m_1.0_f16(half addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_2.0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4000{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_2.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_2.0_f16(half addrspace(1)* %out) {
store half 2.0, half addrspace(1)* %out
ret void
}
@@ -75,7 +75,7 @@ define void @store_inline_imm_2.0_f16(half addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xc000{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffc000{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_m_2.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_2.0_f16(half addrspace(1)* %out) {
store half -2.0, half addrspace(1)* %out
ret void
}
@@ -83,7 +83,7 @@ define void @store_inline_imm_m_2.0_f16(half addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_4.0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4400{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_4.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_4.0_f16(half addrspace(1)* %out) {
store half 4.0, half addrspace(1)* %out
ret void
}
@@ -92,7 +92,7 @@ define void @store_inline_imm_4.0_f16(half addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xc400{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffc400{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_m_4.0_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_4.0_f16(half addrspace(1)* %out) {
store half -4.0, half addrspace(1)* %out
ret void
}
@@ -101,7 +101,7 @@ define void @store_inline_imm_m_4.0_f16(half addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_inline_imm_inv_2pi_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3118{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_inv_2pi_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_inv_2pi_f16(half addrspace(1)* %out) {
store half 0xH3118, half addrspace(1)* %out
ret void
}
@@ -110,7 +110,7 @@ define void @store_inline_imm_inv_2pi_f16(half addrspace(1)* %out) {
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xb118{{$}}
; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffb118{{$}}
; GCN: buffer_store_short [[REG]]
-define void @store_inline_imm_m_inv_2pi_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f16(half addrspace(1)* %out) {
store half 0xHB118, half addrspace(1)* %out
ret void
}
@@ -118,7 +118,7 @@ define void @store_inline_imm_m_inv_2pi_f16(half addrspace(1)* %out) {
; GCN-LABEL: {{^}}store_literal_imm_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x6c00
; GCN: buffer_store_short [[REG]]
-define void @store_literal_imm_f16(half addrspace(1)* %out) {
+define amdgpu_kernel void @store_literal_imm_f16(half addrspace(1)* %out) {
store half 4096.0, half addrspace(1)* %out
ret void
}
@@ -127,7 +127,7 @@ define void @store_literal_imm_f16(half addrspace(1)* %out) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_0.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_0.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0.0
store half %y, half addrspace(1)* %out
ret void
@@ -137,7 +137,7 @@ define void @add_inline_imm_0.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0.5, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_0.5_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_0.5_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0.5
store half %y, half addrspace(1)* %out
ret void
@@ -147,7 +147,7 @@ define void @add_inline_imm_0.5_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -0.5, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_0.5_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_0.5_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, -0.5
store half %y, half addrspace(1)* %out
ret void
@@ -157,7 +157,7 @@ define void @add_inline_imm_neg_0.5_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 1.0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_1.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_1.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 1.0
store half %y, half addrspace(1)* %out
ret void
@@ -167,7 +167,7 @@ define void @add_inline_imm_1.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -1.0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_1.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_1.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, -1.0
store half %y, half addrspace(1)* %out
ret void
@@ -177,7 +177,7 @@ define void @add_inline_imm_neg_1.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 2.0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_2.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_2.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 2.0
store half %y, half addrspace(1)* %out
ret void
@@ -187,7 +187,7 @@ define void @add_inline_imm_2.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -2.0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_2.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_2.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, -2.0
store half %y, half addrspace(1)* %out
ret void
@@ -197,7 +197,7 @@ define void @add_inline_imm_neg_2.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 4.0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_4.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_4.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 4.0
store half %y, half addrspace(1)* %out
ret void
@@ -207,7 +207,7 @@ define void @add_inline_imm_4.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -4.0, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_4.0_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_4.0_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, -4.0
store half %y, half addrspace(1)* %out
ret void
@@ -217,7 +217,7 @@ define void @add_inline_imm_neg_4.0_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0.5, [[VAL]]
; VI: buffer_store_short [[REG]]
-define void @commute_add_inline_imm_0.5_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
+define amdgpu_kernel void @commute_add_inline_imm_0.5_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
%x = load half, half addrspace(1)* %in
%y = fadd half %x, 0.5
store half %y, half addrspace(1)* %out
@@ -228,7 +228,7 @@ define void @commute_add_inline_imm_0.5_f16(half addrspace(1)* %out, half addrsp
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0x6400, [[VAL]]
; VI: buffer_store_short [[REG]]
-define void @commute_add_literal_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
+define amdgpu_kernel void @commute_add_literal_f16(half addrspace(1)* %out, half addrspace(1)* %in) {
%x = load half, half addrspace(1)* %in
%y = fadd half %x, 1024.0
store half %y, half addrspace(1)* %out
@@ -239,7 +239,7 @@ define void @commute_add_literal_f16(half addrspace(1)* %out, half addrspace(1)*
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 1, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_1_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_1_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xH0001
store half %y, half addrspace(1)* %out
ret void
@@ -249,7 +249,7 @@ define void @add_inline_imm_1_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 2, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_2_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_2_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xH0002
store half %y, half addrspace(1)* %out
ret void
@@ -259,7 +259,7 @@ define void @add_inline_imm_2_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 16, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_16_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_16_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xH0010
store half %y, half addrspace(1)* %out
ret void
@@ -269,7 +269,7 @@ define void @add_inline_imm_16_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -1, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xHFFFF
store half %y, half addrspace(1)* %out
ret void
@@ -279,7 +279,7 @@ define void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -2, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xHFFFE
store half %y, half addrspace(1)* %out
ret void
@@ -289,7 +289,7 @@ define void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], -16, [[VAL]]{{$}}
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xHFFF0
store half %y, half addrspace(1)* %out
ret void
@@ -299,7 +299,7 @@ define void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 63, [[VAL]]
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_63_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_63_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xH003F
store half %y, half addrspace(1)* %out
ret void
@@ -309,7 +309,7 @@ define void @add_inline_imm_63_f16(half addrspace(1)* %out, half %x) {
; VI: buffer_load_ushort [[VAL:v[0-9]+]]
; VI: v_add_f16_e32 [[REG:v[0-9]+]], 64, [[VAL]]
; VI: buffer_store_short [[REG]]
-define void @add_inline_imm_64_f16(half addrspace(1)* %out, half %x) {
+define amdgpu_kernel void @add_inline_imm_64_f16(half addrspace(1)* %out, half %x) {
%y = fadd half %x, 0xH0040
store half %y, half addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/immv216.ll b/test/CodeGen/AMDGPU/immv216.ll
new file mode 100644
index 000000000000..85ad365d02a8
--- /dev/null
+++ b/test/CodeGen/AMDGPU/immv216.ll
@@ -0,0 +1,446 @@
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+; FIXME: Merge into imm.ll
+
+; GCN-LABEL: {{^}}store_inline_imm_neg_0.0_v2i16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80008000{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_neg_0.0_v2i16(<2 x i16> addrspace(1)* %out) #0 {
+ store <2 x i16> <i16 -32768, i16 -32768>, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_0.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_0.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 0.0, half 0.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_imm_neg_0.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80008000{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_imm_neg_0.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half -0.0, half -0.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_0.5_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x38003800{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_0.5_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 0.5, half 0.5>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_m_0.5_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xb800b800{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_m_0.5_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half -0.5, half -0.5>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_1.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c003c00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_1.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 1.0, half 1.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_m_1.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbc00bc00{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_m_1.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half -1.0, half -1.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_2.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x40004000{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_2.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 2.0, half 2.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_m_2.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xc000c000{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_m_2.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half -2.0, half -2.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_4.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x44004400{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_4.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 4.0, half 4.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_m_4.0_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xc400c400{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_m_4.0_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half -4.0, half -4.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_inv_2pi_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x31183118{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_inv_2pi_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 0xH3118, half 0xH3118>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_inline_imm_m_inv_2pi_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xb118b118{{$}}
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_inline_imm_m_inv_2pi_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 0xHB118, half 0xHB118>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}store_literal_imm_v2f16:
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x6c006c00
+; GCN: buffer_store_dword [[REG]]
+define amdgpu_kernel void @store_literal_imm_v2f16(<2 x half> addrspace(1)* %out) #0 {
+ store <2 x half> <half 4096.0, half 4096.0>, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_0.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_0.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0.0, half 0.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_0.5_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0.5{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0.5, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0.5, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_0.5_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0.5, half 0.5>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_0.5_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -0.5{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -0.5, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -0.5, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_0.5_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half -0.5, half -0.5>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_1.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 1.0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 1.0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 1.0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_1.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 1.0, half 1.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_1.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -1.0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -1.0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -1.0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_1.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half -1.0, half -1.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_2.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 2.0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 2.0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 2.0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_2.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 2.0, half 2.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_2.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -2.0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -2.0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -2.0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_2.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half -2.0, half -2.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_4.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 4.0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 4.0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 4.0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_4.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 4.0, half 4.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_4.0_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -4.0{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -4.0, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -4.0, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_4.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half -4.0, half -4.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}commute_add_inline_imm_0.5_v2f16:
+; GFX9: buffer_load_dword [[VAL:v[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0.5
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_dword
+; VI-NOT: and
+; VI: v_lshrrev_b32_e32 {{v[0-9]+}}, 16,
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0.5, v{{[0-9]+}}
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0.5, v{{[0-9]+}}
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @commute_add_inline_imm_0.5_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %x = load <2 x half>, <2 x half> addrspace(1)* %in
+ %y = fadd <2 x half> %x, <half 0.5, half 0.5>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}commute_add_literal_v2f16:
+; GFX9: buffer_load_dword [[VAL:v[0-9]+]]
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 0x64006400
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[K]], [[VAL]]
+; GFX9: buffer_store_dword [[REG]]
+
+; VI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x6400{{$}}
+; VI-DAG: buffer_load_dword
+; VI-NOT: and
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, [[K]], v{{[0-9]+}}
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[K]], v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: buffer_store_dword
+define amdgpu_kernel void @commute_add_literal_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %x = load <2 x half>, <2 x half> addrspace(1)* %in
+ %y = fadd <2 x half> %x, <half 1024.0, half 1024.0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_1_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 1{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 1, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 1, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_1_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xH0001, half 0xH0001>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_2_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 2{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 2, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 2, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_2_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xH0002, half 0xH0002>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_16_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 16{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 16, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 16, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_16_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xH0010, half 0xH0010>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_1_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -1{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -1, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -1, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_1_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xHFFFF, half 0xHFFFF>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_2_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -2{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -2, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -2, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_2_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xHFFFE, half 0xHFFFE>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_neg_16_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -16{{$}}
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -16, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -16, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_neg_16_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xHFFF0, half 0xHFFF0>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_63_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 63
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 63, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 63, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_63_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xH003F, half 0xH003F>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_inline_imm_64_v2f16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 64
+; GFX9: buffer_store_dword [[REG]]
+
+; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
+; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 64, [[VAL0]]
+; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 64, [[VAL1]]
+; VI: v_or_b32
+; VI: buffer_store_dword
+define amdgpu_kernel void @add_inline_imm_64_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
+ %y = fadd <2 x half> %x, <half 0xH0040, half 0xH0040>
+ store <2 x half> %y, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si-noopt.ll b/test/CodeGen/AMDGPU/indirect-addressing-si-noopt.ll
index 877956be3088..8e207a38c847 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si-noopt.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si-noopt.ll
@@ -10,7 +10,7 @@
; CHECK: s_mov_b32 m0, [[IN]]
; CHECK: v_movreld_b32_e32 v[[ELT0:[0-9]+]]
; CHECK-NEXT: buffer_store_dwordx4 v{{\[}}[[ELT0]]:
-define void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) {
entry:
%ins = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
store <4 x float> %ins, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 208e55c143ac..b18ae353ca4c 100644
--- a/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -1,6 +1,7 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-vgpr-index-mode -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
; Tests for indirect addressing on SI, which is implemented using dynamic
; indexing of vectors.
@@ -18,7 +19,7 @@
; IDXMODE: s_set_gpr_idx_on [[IN]], src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, [[BASEREG]]
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
entry:
%idx = add i32 %in, 1
%elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx
@@ -43,7 +44,7 @@ entry:
; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
+define amdgpu_kernel void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
entry:
%idx = add i32 %in, 1
%vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4>
@@ -65,7 +66,7 @@ entry:
; IDXMODE: s_set_gpr_idx_on [[IN]], src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, [[BASEREG]]
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
entry:
%elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in
store float %elt, float addrspace(1)* %out
@@ -83,7 +84,7 @@ entry:
; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
+define amdgpu_kernel void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
entry:
%index = add i32 %offset, -512
%value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
@@ -104,7 +105,7 @@ entry:
; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
+define amdgpu_kernel void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
entry:
%index = add i32 %offset, -512
%or = or <4 x i32> %vec0, %vec1
@@ -136,7 +137,7 @@ entry:
; IDXMODE: s_set_gpr_idx_off
; GCN: buffer_store_dword [[RESULT]]
-define void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
entry:
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%index = add i32 %id, -512
@@ -146,7 +147,7 @@ entry:
}
; GCN-LABEL: {{^}}extract_undef_offset_sgpr:
-define void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
entry:
%ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%value = extractelement <4 x i32> %ld, i32 undef
@@ -158,7 +159,7 @@ entry:
; GCN-DAG: buffer_load_dwordx4
; MOVREL-DAG: s_mov_b32 m0,
; MOVREL: v_movreld_b32
-define void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
entry:
%ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
%value = insertelement <4 x i32> %ld, i32 5, i32 undef
@@ -177,7 +178,7 @@ entry:
; MOVREL: v_movreld_b32_e32 v[[ELT1]], v[[INS]]
; MOVREL: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}}
-define void @insert_w_offset(<4 x float> addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @insert_w_offset(<4 x float> addrspace(1)* %out, i32 %in) {
entry:
%0 = add i32 %in, 1
%1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0
@@ -196,7 +197,7 @@ entry:
; IDXMODE-NEXT: s_set_gpr_idx_off
; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]:
-define void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) {
entry:
%0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
store <4 x float> %0, <4 x float> addrspace(1)* %out
@@ -212,7 +213,7 @@ entry:
; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], dst
; IDXMODE-NEXT: v_mov_b32_e32 v0, 5
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
+define amdgpu_kernel void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
entry:
%index = add i32 %offset, -512
%value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
@@ -232,7 +233,7 @@ entry:
; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], dst
; IDXMODE-NEXT: v_mov_b32_e32 v0, 5
; IDXMODE-NEXT: s_set_gpr_idx_off
-define void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
+define amdgpu_kernel void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
entry:
%index = add i32 %offset, -512
%value = insertelement <4 x i32> %vec, i32 5, i32 %index
@@ -269,7 +270,7 @@ entry:
; IDXMODE: s_set_gpr_idx_off
; GCN: buffer_store_dword
-define void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
entry:
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%index = add i32 %id, -512
@@ -304,7 +305,7 @@ entry:
; GCN: s_cbranch_execnz
; IDXMODE: s_set_gpr_idx_off
-define void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
entry:
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%index = add i32 %id, -16
@@ -374,7 +375,7 @@ entry:
; GCN: buffer_store_dword [[MOVREL0]]
; GCN: buffer_store_dword [[MOVREL1]]
-define void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
entry:
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%id.ext = zext i32 %id to i64
@@ -449,7 +450,7 @@ bb2:
; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
; GCN: buffer_store_dword [[INS0]]
-define void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
+define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
entry:
%id = call i32 @llvm.amdgcn.workitem.id.x() #1
%id.ext = zext i32 %id to i64
@@ -498,7 +499,7 @@ bb2:
; GCN: [[ENDBB]]:
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @extract_adjacent_blocks(i32 %arg) #0 {
+define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) #0 {
bb:
%tmp = icmp eq i32 %arg, 0
br i1 %tmp, label %bb1, label %bb4
@@ -548,7 +549,7 @@ bb7:
; GCN: [[ENDBB]]:
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
+define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
bb:
%tmp = icmp eq i32 %arg, 0
br i1 %tmp, label %bb1, label %bb4
@@ -609,7 +610,7 @@ bb7: ; preds = %bb4, %bb1
; GCN: ds_write_b32
; GCN: ds_write_b32
; GCN: s_endpgm
-define void @multi_same_block(i32 %arg) #0 {
+define amdgpu_kernel void @multi_same_block(i32 %arg) #0 {
bb:
%tmp1 = add i32 %arg, -16
%tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 4.000000e+00, i32 %tmp1
@@ -636,7 +637,7 @@ bb:
; IDXMODE: s_set_gpr_idx_off
; GCN: buffer_store_dword [[EXTRACT]]
-define void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
+define amdgpu_kernel void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
entry:
%ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%offset = add i32 %idx, 3
@@ -657,7 +658,7 @@ entry:
; IDXMODE: s_set_gpr_idx_off
; GCN: buffer_store_dword [[EXTRACT]]
-define void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
+define amdgpu_kernel void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
entry:
%ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%offset = add i32 %idx, 4
@@ -680,7 +681,7 @@ entry:
; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], src0
; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE: s_set_gpr_idx_off
-define void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) {
+define amdgpu_kernel void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) {
entry:
%ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
%idx.shl = shl i32 %idx.in, 2
@@ -701,7 +702,7 @@ entry:
; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], dst
; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE: s_set_gpr_idx_off
-define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind {
+define amdgpu_kernel void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind {
%idx.shl = shl i32 %idx.in, 2
%idx = or i32 %idx.shl, 1
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %idx
@@ -728,7 +729,7 @@ define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x f
; IDXMODE: s_set_gpr_idx_idx
; IDXMODE: v_mov_b32_e32
; GCN: s_cbranch_execnz [[REGLOOP]]
-define void @broken_phi_bb(i32 %arg, i32 %arg1) #0 {
+define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) #0 {
bb:
br label %bb2
diff --git a/test/CodeGen/AMDGPU/indirect-private-64.ll b/test/CodeGen/AMDGPU/indirect-private-64.ll
index 4db87c3c1b64..7f08a89d149e 100644
--- a/test/CodeGen/AMDGPU/indirect-private-64.ll
+++ b/test/CodeGen/AMDGPU/indirect-private-64.ll
@@ -20,10 +20,10 @@ declare void @llvm.amdgcn.s.barrier() #0
; SI-PROMOTE: ds_read_b64
; CI-PROMOTE: ds_write_b64
; CI-PROMOTE: ds_read_b64
-define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) #1 {
+define amdgpu_kernel void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) #1 {
%val = load double, double addrspace(1)* %in, align 8
- %array = alloca [16 x double], align 8
- %ptr = getelementptr inbounds [16 x double], [16 x double]* %array, i32 0, i32 %b
+ %array = alloca [8 x double], align 8
+ %ptr = getelementptr inbounds [8 x double], [8 x double]* %array, i32 0, i32 %b
store double %val, double* %ptr, align 8
call void @llvm.amdgcn.s.barrier()
%result = load double, double* %ptr, align 8
@@ -51,10 +51,10 @@ define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double
; SI-PROMOTE: ds_read_b64
; CI-PROMOTE: ds_write2_b64
; CI-PROMOTE: ds_read2_b64
-define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) #1 {
+define amdgpu_kernel void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) #1 {
%val = load <2 x double>, <2 x double> addrspace(1)* %in, align 16
- %array = alloca [8 x <2 x double>], align 16
- %ptr = getelementptr inbounds [8 x <2 x double>], [8 x <2 x double>]* %array, i32 0, i32 %b
+ %array = alloca [4 x <2 x double>], align 16
+ %ptr = getelementptr inbounds [4 x <2 x double>], [4 x <2 x double>]* %array, i32 0, i32 %b
store <2 x double> %val, <2 x double>* %ptr, align 16
call void @llvm.amdgcn.s.barrier()
%result = load <2 x double>, <2 x double>* %ptr, align 16
@@ -77,7 +77,7 @@ define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out
; SI-PROMOTE: ds_read_b64
; CI-PROMOTE: ds_write_b64
; CI-PROMOTE: ds_read_b64
-define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) #1 {
+define amdgpu_kernel void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) #1 {
%val = load i64, i64 addrspace(1)* %in, align 8
%array = alloca [8 x i64], align 8
%ptr = getelementptr inbounds [8 x i64], [8 x i64]* %array, i32 0, i32 %b
@@ -109,10 +109,10 @@ define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrs
; SI-PROMOTE: ds_read_b64
; CI-PROMOTE: ds_write2_b64
; CI-PROMOTE: ds_read2_b64
-define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) #1 {
+define amdgpu_kernel void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) #1 {
%val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16
- %array = alloca [8 x <2 x i64>], align 16
- %ptr = getelementptr inbounds [8 x <2 x i64>], [8 x <2 x i64>]* %array, i32 0, i32 %b
+ %array = alloca [4 x <2 x i64>], align 16
+ %ptr = getelementptr inbounds [4 x <2 x i64>], [4 x <2 x i64>]* %array, i32 0, i32 %b
store <2 x i64> %val, <2 x i64>* %ptr, align 16
call void @llvm.amdgcn.s.barrier()
%result = load <2 x i64>, <2 x i64>* %ptr, align 16
@@ -121,4 +121,4 @@ define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <
}
attributes #0 = { convergent nounwind }
-attributes #1 = { nounwind "amdgpu-waves-per-eu"="1,2" "amdgpu-flat-work-group-size"="64,64" }
+attributes #1 = { nounwind "amdgpu-waves-per-eu"="1,2" "amdgpu-flat-work-group-size"="64,128" }
diff --git a/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll b/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll
index 990f33518ab9..7cee8a41c120 100644
--- a/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll
+++ b/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll
@@ -2,7 +2,7 @@
; REQUIRES: asserts
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
-define void @inf_loop_irreducible_cfg() nounwind {
+define amdgpu_kernel void @inf_loop_irreducible_cfg() nounwind {
entry:
br label %block
diff --git a/test/CodeGen/AMDGPU/infinite-loop.ll b/test/CodeGen/AMDGPU/infinite-loop.ll
index 3e0b695934c7..73482756b8c8 100644
--- a/test/CodeGen/AMDGPU/infinite-loop.ll
+++ b/test/CodeGen/AMDGPU/infinite-loop.ll
@@ -7,7 +7,7 @@
; SI: buffer_store_dword [[REG]]
; SI: s_waitcnt vmcnt(0) expcnt(0)
; SI: s_branch BB0_1
-define void @infinite_loop(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @infinite_loop(i32 addrspace(1)* %out) {
entry:
br label %for.body
diff --git a/test/CodeGen/AMDGPU/inline-asm.ll b/test/CodeGen/AMDGPU/inline-asm.ll
index db1a0c67436d..0d7e07b9a624 100644
--- a/test/CodeGen/AMDGPU/inline-asm.ll
+++ b/test/CodeGen/AMDGPU/inline-asm.ll
@@ -4,7 +4,7 @@
; CHECK-LABEL: {{^}}inline_asm:
; CHECK: s_endpgm
; CHECK: s_endpgm
-define void @inline_asm(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @inline_asm(i32 addrspace(1)* %out) {
entry:
store i32 5, i32 addrspace(1)* %out
call void asm sideeffect "s_endpgm", ""()
@@ -25,7 +25,7 @@ entry:
; Make sure inline assembly is treted as divergent.
; CHECK: s_mov_b32 s{{[0-9]+}}, 0
; CHECK: s_and_saveexec_b64
-define void @branch_on_asm(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @branch_on_asm(i32 addrspace(1)* %out) {
%zero = call i32 asm "s_mov_b32 $0, 0", "=s"()
%cmp = icmp eq i32 %zero, 0
br i1 %cmp, label %if, label %endif
@@ -44,7 +44,7 @@ endif:
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[MASK_LO]]
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[MASK_HI]]
; CHECK: buffer_store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @v_cmp_asm(i64 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @v_cmp_asm(i64 addrspace(1)* %out, i32 %in) {
%sgpr = tail call i64 asm "v_cmp_ne_u32_e64 $0, 0, $1", "=s,v"(i32 %in)
store i64 %sgpr, i64 addrspace(1)* %out
ret void
@@ -52,7 +52,7 @@ define void @v_cmp_asm(i64 addrspace(1)* %out, i32 %in) {
; CHECK-LABEL: {{^}}code_size_inline_asm:
; CHECK: codeLenInByte = 12
-define void @code_size_inline_asm(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "v_nop_e64", ""()
ret void
@@ -61,7 +61,7 @@ entry:
; All inlineasm instructions are assumed to be the maximum size
; CHECK-LABEL: {{^}}code_size_inline_asm_small_inst:
; CHECK: codeLenInByte = 12
-define void @code_size_inline_asm_small_inst(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_small_inst(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "v_nop_e32", ""()
ret void
@@ -69,7 +69,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_2_inst:
; CHECK: codeLenInByte = 20
-define void @code_size_inline_asm_2_inst(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_2_inst(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "
v_nop_e64
@@ -80,7 +80,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_2_inst_extra_newline:
; CHECK: codeLenInByte = 20
-define void @code_size_inline_asm_2_inst_extra_newline(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_2_inst_extra_newline(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "
v_nop_e64
@@ -92,7 +92,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_0_inst:
; CHECK: codeLenInByte = 4
-define void @code_size_inline_asm_0_inst(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_0_inst(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "", ""()
ret void
@@ -100,7 +100,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_1_comment:
; CHECK: codeLenInByte = 4
-define void @code_size_inline_asm_1_comment(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_1_comment(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "; comment", ""()
ret void
@@ -108,7 +108,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_newline_1_comment:
; CHECK: codeLenInByte = 4
-define void @code_size_inline_asm_newline_1_comment(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_newline_1_comment(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "
; comment", ""()
@@ -117,7 +117,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_1_comment_newline:
; CHECK: codeLenInByte = 4
-define void @code_size_inline_asm_1_comment_newline(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_1_comment_newline(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "; comment
", ""()
@@ -126,7 +126,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_2_comments_line:
; CHECK: codeLenInByte = 4
-define void @code_size_inline_asm_2_comments_line(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_2_comments_line(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "; first comment ; second comment", ""()
ret void
@@ -134,7 +134,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_2_comments_line_nospace:
; CHECK: codeLenInByte = 4
-define void @code_size_inline_asm_2_comments_line_nospace(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_2_comments_line_nospace(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "; first comment;second comment", ""()
ret void
@@ -142,7 +142,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_mixed_comments0:
; CHECK: codeLenInByte = 20
-define void @code_size_inline_asm_mixed_comments0(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_mixed_comments0(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "; comment
v_nop_e64 ; inline comment
@@ -157,7 +157,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_mixed_comments1:
; CHECK: codeLenInByte = 20
-define void @code_size_inline_asm_mixed_comments1(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_mixed_comments1(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "v_nop_e64 ; inline comment
; separate comment
@@ -171,7 +171,7 @@ entry:
; CHECK-LABEL: {{^}}code_size_inline_asm_mixed_comments_operands:
; CHECK: codeLenInByte = 20
-define void @code_size_inline_asm_mixed_comments_operands(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @code_size_inline_asm_mixed_comments_operands(i32 addrspace(1)* %out) {
entry:
call void asm sideeffect "; comment
v_add_i32_e32 v0, vcc, v1, v2 ; inline comment
@@ -183,3 +183,52 @@ entry:
", ""()
ret void
}
+
+; FIXME: Should not have intermediate sgprs
+; CHECK-LABEL: {{^}}i64_imm_input_phys_vgpr:
+; CHECK: s_mov_b32 s1, 0
+; CHECK: s_mov_b32 s0, 0x1e240
+; CHECK: v_mov_b32_e32 v0, s0
+; CHECK: v_mov_b32_e32 v1, s1
+; CHECK: use v[0:1]
+define void @i64_imm_input_phys_vgpr() {
+entry:
+ call void asm sideeffect "; use $0 ", "{VGPR0_VGPR1}"(i64 123456)
+ ret void
+}
+
+; CHECK-LABEL: {{^}}i1_imm_input_phys_vgpr:
+; CHECK: v_mov_b32_e32 v0, -1{{$}}
+; CHECK: ; use v0
+define amdgpu_kernel void @i1_imm_input_phys_vgpr() {
+entry:
+ call void asm sideeffect "; use $0 ", "{VGPR0}"(i1 true)
+ ret void
+}
+
+; CHECK-LABEL: {{^}}i1_input_phys_vgpr:
+; CHECK: {{buffer|flat}}_load_ubyte [[LOAD:v[0-9]+]]
+; CHECK: v_and_b32_e32 [[LOAD]], 1, [[LOAD]]
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, [[LOAD]]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK: ; use v0
+define amdgpu_kernel void @i1_input_phys_vgpr() {
+entry:
+ %val = load i1, i1 addrspace(1)* undef
+ call void asm sideeffect "; use $0 ", "{VGPR0}"(i1 %val)
+ ret void
+}
+
+; FIXME: Should be scheduled to shrink vcc
+; CHECK-LABEL: {{^}}i1_input_phys_vgpr_x2:
+; CHECK: v_cmp_eq_u32_e32 vcc, 1, v0
+; CHECK: v_cmp_eq_u32_e64 s[0:1], 1, v1
+; CHECK: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
+define amdgpu_kernel void @i1_input_phys_vgpr_x2() {
+entry:
+ %val0 = load volatile i1, i1 addrspace(1)* undef
+ %val1 = load volatile i1, i1 addrspace(1)* undef
+ call void asm sideeffect "; use $0 $1 ", "{VGPR0}, {VGPR1}"(i1 %val0, i1 %val1)
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/inline-calls.ll b/test/CodeGen/AMDGPU/inline-calls.ll
index 4541a902c1b8..f8821f319893 100644
--- a/test/CodeGen/AMDGPU/inline-calls.ll
+++ b/test/CodeGen/AMDGPU/inline-calls.ll
@@ -11,7 +11,7 @@ entry:
; CHECK: {{^}}kernel:
; CHECK-NOT: call
-define void @kernel(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @kernel(i32 addrspace(1)* %out) {
entry:
%tmp0 = call i32 @func(i32 1)
store i32 %tmp0, i32 addrspace(1)* %out
@@ -20,7 +20,7 @@ entry:
; CHECK: {{^}}kernel2:
; CHECK-NOT: call
-define void @kernel2(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @kernel2(i32 addrspace(1)* %out) {
entry:
call void @kernel(i32 addrspace(1)* %out)
ret void
@@ -31,7 +31,7 @@ entry:
; CHECK: {{^}}kernel3:
; CHECK-NOT: call
-define void @kernel3(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @kernel3(i32 addrspace(1)* %out) {
entry:
%tmp0 = call i32 @func_alias(i32 1)
store i32 %tmp0, i32 addrspace(1)* %out
@@ -43,7 +43,7 @@ entry:
; CHECK: {{^}}kernel4:
; CHECK-NOT: call
-define void @kernel4(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @kernel4(i32 addrspace(1)* %out) {
entry:
call void @kernel_alias(i32 addrspace(1)* %out)
ret void
diff --git a/test/CodeGen/AMDGPU/inline-constraints.ll b/test/CodeGen/AMDGPU/inline-constraints.ll
index 1bcbd14009ce..941a1b90dcc1 100644
--- a/test/CodeGen/AMDGPU/inline-constraints.ll
+++ b/test/CodeGen/AMDGPU/inline-constraints.ll
@@ -10,7 +10,7 @@
; GCN: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
; GCN: s_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
-define void @inline_reg_constraints(i32 addrspace(1)* %ptr) {
+define amdgpu_kernel void @inline_reg_constraints(i32 addrspace(1)* %ptr) {
entry:
%v32 = tail call i32 asm sideeffect "flat_load_dword $0, $1", "=v,v"(i32 addrspace(1)* %ptr)
%v64 = tail call <2 x i32> asm sideeffect "flat_load_dwordx2 $0, $1", "=v,v"(i32 addrspace(1)* %ptr)
@@ -27,7 +27,7 @@ entry:
; GCN: s_mov_b32 m0, -1
; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
; GCN: ; use [[COPY_M0]]
-define void @inline_sreg_constraint_m0() {
+define amdgpu_kernel void @inline_sreg_constraint_m0() {
%m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={M0}"()
tail call void asm sideeffect "; use $0", "s"(i32 %m0)
ret void
@@ -36,7 +36,7 @@ define void @inline_sreg_constraint_m0() {
; GCN-LABEL: {{^}}inline_sreg_constraint_imm_i32:
; GCN: s_mov_b32 [[REG:s[0-9]+]], 32
; GCN: ; use [[REG]]
-define void @inline_sreg_constraint_imm_i32() {
+define amdgpu_kernel void @inline_sreg_constraint_imm_i32() {
tail call void asm sideeffect "; use $0", "s"(i32 32)
ret void
}
@@ -44,7 +44,7 @@ define void @inline_sreg_constraint_imm_i32() {
; GCN-LABEL: {{^}}inline_sreg_constraint_imm_f32:
; GCN: s_mov_b32 [[REG:s[0-9]+]], 1.0
; GCN: ; use [[REG]]
-define void @inline_sreg_constraint_imm_f32() {
+define amdgpu_kernel void @inline_sreg_constraint_imm_f32() {
tail call void asm sideeffect "; use $0", "s"(float 1.0)
ret void
}
@@ -54,7 +54,7 @@ define void @inline_sreg_constraint_imm_f32() {
; GCN-DAG: s_mov_b32 s[[REG_LO:[0-9]+]], -4{{$}}
; GCN-DAG: s_mov_b32 s[[REG_HI:[0-9]+]], -1{{$}}
; GCN: ; use s{{\[}}[[REG_LO]]:[[REG_HI]]{{\]}}
-define void @inline_sreg_constraint_imm_i64() {
+define amdgpu_kernel void @inline_sreg_constraint_imm_i64() {
tail call void asm sideeffect "; use $0", "s"(i64 -4)
ret void
}
@@ -63,7 +63,7 @@ define void @inline_sreg_constraint_imm_i64() {
; GCN-DAG: s_mov_b32 s[[REG_LO:[0-9]+]], 0{{$}}
; GCN-DAG: s_mov_b32 s[[REG_HI:[0-9]+]], 0x3ff00000{{$}}
; GCN: ; use s{{\[}}[[REG_LO]]:[[REG_HI]]{{\]}}
-define void @inline_sreg_constraint_imm_f64() {
+define amdgpu_kernel void @inline_sreg_constraint_imm_f64() {
tail call void asm sideeffect "; use $0", "s"(double 1.0)
ret void
}
diff --git a/test/CodeGen/AMDGPU/inlineasm-16.ll b/test/CodeGen/AMDGPU/inlineasm-16.ll
index 75f3158937dc..15e57fe6bffb 100644
--- a/test/CodeGen/AMDGPU/inlineasm-16.ll
+++ b/test/CodeGen/AMDGPU/inlineasm-16.ll
@@ -5,7 +5,7 @@
; GCN-LABEL: {{^}}s_input_output_i16:
; SICI: error: couldn't allocate output register for constraint 's'
; SICI: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_i16() #0 {
+define amdgpu_kernel void @s_input_output_i16() #0 {
%v = tail call i16 asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(i16 %v) #0
ret void
@@ -14,7 +14,7 @@ define void @s_input_output_i16() #0 {
; GCN-LABEL: {{^}}v_input_output_i16:
; SICI: error: couldn't allocate output register for constraint 'v'
; SICI: error: couldn't allocate input reg for constraint 'v'
-define void @v_input_output_i16() #0 {
+define amdgpu_kernel void @v_input_output_i16() #0 {
%v = tail call i16 asm sideeffect "v_mov_b32 $0, -1", "=v"() #0
tail call void asm sideeffect "; use $0", "v"(i16 %v)
ret void
@@ -23,7 +23,7 @@ define void @v_input_output_i16() #0 {
; GCN-LABEL: {{^}}s_input_output_f16:
; SICI: error: couldn't allocate output register for constraint 's'
; SICI: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_f16() #0 {
+define amdgpu_kernel void @s_input_output_f16() #0 {
%v = tail call half asm sideeffect "s_mov_b32 $0, -1", "=s"() #0
tail call void asm sideeffect "; use $0", "s"(half %v)
ret void
@@ -32,7 +32,7 @@ define void @s_input_output_f16() #0 {
; GCN-LABEL: {{^}}v_input_output_f16:
; SICI: error: couldn't allocate output register for constraint 'v'
; SICI: error: couldn't allocate input reg for constraint 'v'
-define void @v_input_output_f16() #0 {
+define amdgpu_kernel void @v_input_output_f16() #0 {
%v = tail call half asm sideeffect "v_mov_b32 $0, -1", "=v"() #0
tail call void asm sideeffect "; use $0", "v"(half %v)
ret void
diff --git a/test/CodeGen/AMDGPU/inlineasm-illegal-type.ll b/test/CodeGen/AMDGPU/inlineasm-illegal-type.ll
index 2eb21f07e0ec..c1d67ba614c6 100644
--- a/test/CodeGen/AMDGPU/inlineasm-illegal-type.ll
+++ b/test/CodeGen/AMDGPU/inlineasm-illegal-type.ll
@@ -3,7 +3,7 @@
; GCN: error: couldn't allocate output register for constraint 's'
; GCN: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_i8() {
+define amdgpu_kernel void @s_input_output_i8() {
%v = tail call i8 asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(i8 %v)
ret void
@@ -11,7 +11,7 @@ define void @s_input_output_i8() {
; GCN: error: couldn't allocate output register for constraint 'v'
; GCN: error: couldn't allocate input reg for constraint 'v'
-define void @v_input_output_i8() {
+define amdgpu_kernel void @v_input_output_i8() {
%v = tail call i8 asm sideeffect "v_mov_b32 $0, -1", "=v"()
tail call void asm sideeffect "; use $0", "v"(i8 %v)
ret void
@@ -19,7 +19,7 @@ define void @v_input_output_i8() {
; GCN: error: couldn't allocate output register for constraint 's'
; GCN: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_i128() {
+define amdgpu_kernel void @s_input_output_i128() {
%v = tail call i128 asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(i128 %v)
ret void
@@ -27,7 +27,7 @@ define void @s_input_output_i128() {
; GCN: error: couldn't allocate output register for constraint 's'
; GCN: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_v8f16() {
+define amdgpu_kernel void @s_input_output_v8f16() {
%v = tail call <8 x half> asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(<8 x half> %v)
ret void
@@ -36,7 +36,7 @@ define void @s_input_output_v8f16() {
; CI: error: couldn't allocate output register for constraint 's'
; CI: error: couldn't allocate input reg for constraint 's'
; VI-NOT: error
-define void @s_input_output_f16() {
+define amdgpu_kernel void @s_input_output_f16() {
%v = tail call half asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(half %v)
ret void
@@ -44,7 +44,7 @@ define void @s_input_output_f16() {
; GCN: error: couldn't allocate output register for constraint 's'
; GCN: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_v2f16() {
+define amdgpu_kernel void @s_input_output_v2f16() {
%v = tail call <2 x half> asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(<2 x half> %v)
ret void
@@ -52,7 +52,7 @@ define void @s_input_output_v2f16() {
; GCN: error: couldn't allocate output register for constraint 'v'
; GCN: error: couldn't allocate input reg for constraint 'v'
-define void @v_input_output_v2f16() {
+define amdgpu_kernel void @v_input_output_v2f16() {
%v = tail call <2 x half> asm sideeffect "v_mov_b32 $0, -1", "=v"()
tail call void asm sideeffect "; use $0", "v"(<2 x half> %v)
ret void
@@ -61,7 +61,7 @@ define void @v_input_output_v2f16() {
; CI: error: couldn't allocate output register for constraint 's'
; CI: error: couldn't allocate input reg for constraint 's'
; VI-NOT: error
-define void @s_input_output_i16() {
+define amdgpu_kernel void @s_input_output_i16() {
%v = tail call i16 asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(i16 %v)
ret void
@@ -69,14 +69,14 @@ define void @s_input_output_i16() {
; GCN: error: couldn't allocate output register for constraint 's'
; GCN: error: couldn't allocate input reg for constraint 's'
-define void @s_input_output_v2i16() {
+define amdgpu_kernel void @s_input_output_v2i16() {
%v = tail call <2 x i16> asm sideeffect "s_mov_b32 $0, -1", "=s"()
tail call void asm sideeffect "; use $0", "s"(<2 x i16> %v)
ret void
}
; FIXME: Crash in codegen prepare
-; define void @s_input_output_i3() {
+; define amdgpu_kernel void @s_input_output_i3() {
; %v = tail call i3 asm sideeffect "s_mov_b32 $0, -1", "=s"()
; tail call void asm sideeffect "; use $0", "s"(i3 %v)
; ret void
diff --git a/test/CodeGen/AMDGPU/inlineasm-packed.ll b/test/CodeGen/AMDGPU/inlineasm-packed.ll
new file mode 100644
index 000000000000..3c6c7e1d1b42
--- /dev/null
+++ b/test/CodeGen/AMDGPU/inlineasm-packed.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}inline_asm_input_v2i16:
+; GCN: s_mov_b32 s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @inline_asm_input_v2i16(i32 addrspace(1)* %out, <2 x i16> %in) #0 {
+entry:
+ %val = call i32 asm "s_mov_b32 $0, $1", "=r,r"(<2 x i16> %in) #0
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}inline_asm_input_v2f16:
+; GCN: s_mov_b32 s0, s{{[0-9]+}}
+define amdgpu_kernel void @inline_asm_input_v2f16(i32 addrspace(1)* %out, <2 x half> %in) #0 {
+entry:
+ %val = call i32 asm "s_mov_b32 $0, $1", "=r,r"(<2 x half> %in) #0
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}inline_asm_output_v2i16:
+; GCN: s_mov_b32 s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @inline_asm_output_v2i16(<2 x i16> addrspace(1)* %out, i32 %in) #0 {
+entry:
+ %val = call <2 x i16> asm "s_mov_b32 $0, $1", "=r,r"(i32 %in) #0
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}inline_asm_output_v2f16:
+; GCN: v_mov_b32 v{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @inline_asm_output_v2f16(<2 x half> addrspace(1)* %out, i32 %in) #0 {
+entry:
+ %val = call <2 x half> asm "v_mov_b32 $0, $1", "=v,r"(i32 %in) #0
+ store <2 x half> %val, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}inline_asm_packed_v2i16:
+; GCN: v_pk_add_u16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @inline_asm_packed_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %in0, <2 x i16> %in1) #0 {
+entry:
+ %val = call <2 x i16> asm "v_pk_add_u16 $0, $1, $2", "=v,r,v"(<2 x i16> %in0, <2 x i16> %in1) #0
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}inline_asm_packed_v2f16:
+; GCN: v_pk_add_f16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @inline_asm_packed_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in0, <2 x half> %in1) #0 {
+entry:
+ %val = call <2 x half> asm "v_pk_add_f16 $0, $1, $2", "=v,r,v"(<2 x half> %in0, <2 x half> %in1) #0
+ store <2 x half> %val, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir b/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
new file mode 100644
index 000000000000..bd5f296affb5
--- /dev/null
+++ b/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
@@ -0,0 +1,40 @@
+# RUN: llc -march=amdgcn -mcpu=polaris10 -run-pass si-insert-skips -amdgpu-skip-threshold=1 %s -o - | FileCheck %s
+# https://bugs.freedesktop.org/show_bug.cgi?id=99019
+--- |
+ define amdgpu_ps void @kill_uncond_branch() {
+ ret void
+ }
+...
+---
+
+# CHECK-LABEL: name: kill_uncond_branch
+
+# CHECK: bb.0:
+# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+
+# CHECK: bb.1:
+# CHECK: V_CMPX_LE_F32_e32
+# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit %exec
+
+# CHECK: bb.3:
+# CHECK-NEXT: EXP_DONE
+# CHECK: S_ENDPGM
+
+# CHECK: bb.2:
+# CHECK: S_ENDPGM
+
+name: kill_uncond_branch
+
+body: |
+ bb.0:
+ successors: %bb.1
+ S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+
+ bb.1:
+ successors: %bb.2
+ %vgpr0 = V_MOV_B32_e32 0, implicit %exec
+ SI_KILL_TERMINATOR %vgpr0, implicit-def %exec, implicit-def %vcc, implicit %exec
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/insert-waits-callee.mir b/test/CodeGen/AMDGPU/insert-waits-callee.mir
new file mode 100644
index 000000000000..ad7cd0cc8abf
--- /dev/null
+++ b/test/CodeGen/AMDGPU/insert-waits-callee.mir
@@ -0,0 +1,25 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-insert-waits -o - %s | FileCheck %s
+--- |
+ define float @entry_callee_wait(float %arg) #0 {
+ ret float %arg
+ }
+
+ attributes #0 = { nounwind }
+...
+---
+# CHECK-LABEL: name: entry_callee_wait{{$}}
+# CHECK: bb.0:
+# CHECK-NEXT: S_WAITCNT 0{{$}}
+# CHECK-NEXT: V_ADD_F32
+# CHECK-NEXT: S_SETPC_B64
+liveins:
+ - { reg: '%sgpr0_sgpr1' }
+ - { reg: '%vgpr0' }
+
+name: entry_callee_wait
+body: |
+ bb.0:
+ %vgpr0 = V_ADD_F32_e32 %vgpr0, %vgpr0, implicit %exec
+ S_SETPC_B64 killed %sgpr0_sgpr1
+
+...
diff --git a/test/CodeGen/AMDGPU/insert-waits-exp.mir b/test/CodeGen/AMDGPU/insert-waits-exp.mir
index 9aaa374ed28e..1055201ce3dd 100644
--- a/test/CodeGen/AMDGPU/insert-waits-exp.mir
+++ b/test/CodeGen/AMDGPU/insert-waits-exp.mir
@@ -1,18 +1,18 @@
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-insert-waits -o - %s | FileCheck %s
--- |
- define amdgpu_ps <4 x float> @exp_done_waitcnt(<4 x i32> inreg, <4 x i32> inreg, i32 inreg %w, float %v) {
+ define amdgpu_ps <4 x float> @exp_done_waitcnt(<4 x i32> inreg, <4 x
+ i32> inreg, i32 inreg %w, float %v) #0 {
%a = load volatile float, float addrspace(1)* undef
%b = load volatile float, float addrspace(1)* undef
%c = load volatile float, float addrspace(1)* undef
%d = load volatile float, float addrspace(1)* undef
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %a, float %b, float %c, float %d)
+ call void @llvm.amdgcn.exp.f32(i32 15, i32 1, float %a, float %b, float %c, float %d, i1 true, i1 false)
ret <4 x float> <float 5.000000e-01, float 1.000000e+00, float 2.000000e+00, float 4.000000e+00>
}
- declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+ declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
- attributes #0 = { readnone }
- attributes #1 = { nounwind }
+ attributes #0 = { nounwind }
...
---
@@ -58,6 +58,6 @@ body: |
%vgpr1 = V_MOV_B32_e32 1065353216, implicit %exec
%vgpr2 = V_MOV_B32_e32 1073741824, implicit %exec
%vgpr3 = V_MOV_B32_e32 1082130432, implicit %exec
- SI_RETURN killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3
+ SI_RETURN_TO_EPILOG killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3
...
diff --git a/test/CodeGen/AMDGPU/insert_subreg.ll b/test/CodeGen/AMDGPU/insert_subreg.ll
index 4a5e8869c2df..e895f27c886d 100644
--- a/test/CodeGen/AMDGPU/insert_subreg.ll
+++ b/test/CodeGen/AMDGPU/insert_subreg.ll
@@ -6,7 +6,7 @@
; Make sure this doesn't crash
; CHECK-LABEL: test:
-define void @test(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i64 addrspace(1)* %out) {
entry:
%tmp0 = alloca [16 x i32]
%tmp1 = ptrtoint [16 x i32]* %tmp0 to i32
diff --git a/test/CodeGen/AMDGPU/insert_vector_elt.ll b/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 65ac693a4f44..6391b6b5407b 100644
--- a/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=GCN-NO-TONGA %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=GCN-TONGA %s
; FIXME: Broken on evergreen
; FIXME: For some reason the 8 and 16 vectors are being stored as
@@ -18,56 +18,56 @@
; GCN-DAG: s_mov_b32 [[CONSTREG:s[0-9]+]], 0x40a00000
; GCN-DAG: v_mov_b32_e32 v[[LOW_REG:[0-9]+]], [[CONSTREG]]
; GCN: buffer_store_dwordx4 v{{\[}}[[LOW_REG]]:
-define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v4f32_1:
-define void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v4f32_2:
-define void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v4f32_3:
-define void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v4i32_0:
-define void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
+define amdgpu_kernel void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
%vecins = insertelement <4 x i32> %a, i32 999, i32 0
store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v3f32_1:
-define void @insertelement_v3f32_1(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v3f32_1(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
%vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 1
store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v3f32_2:
-define void @insertelement_v3f32_2(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v3f32_2(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
%vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 2
store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
ret void
}
; GCN-LABEL: {{^}}insertelement_v3f32_3:
-define void @insertelement_v3f32_3(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+define amdgpu_kernel void @insertelement_v3f32_3(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
%vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 3
store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
ret void
@@ -78,7 +78,7 @@ define void @insertelement_v3f32_3(<3 x float> addrspace(1)* %out, <3 x float> %
define amdgpu_ps <4 x float> @insertelement_to_sgpr() nounwind {
%tmp = load <4 x i32>, <4 x i32> addrspace(2)* undef
%tmp1 = insertelement <4 x i32> %tmp, i32 0, i32 0
- %tmp2 = call <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> %tmp1, i32 8, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp2 = call <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 false, i1 false, i1 false, i1 false, i1 true)
ret <4 x float> %tmp2
}
@@ -86,7 +86,7 @@ define amdgpu_ps <4 x float> @insertelement_to_sgpr() nounwind {
; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
; GCN: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
-define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind {
%vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b
store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8
ret void
@@ -97,7 +97,7 @@ define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x fl
; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
; GCN-DAG: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
; GCN-DAG: buffer_store_dword v
-define void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %a, i32 %b) nounwind {
%vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 %b
store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
ret void
@@ -107,7 +107,7 @@ define void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)* %out, <3 x fl
; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
; GCN: buffer_store_dwordx4 {{v\[}}[[LOW_RESULT_REG]]:
-define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
@@ -117,7 +117,7 @@ define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x fl
; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
%vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b
store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32
ret void
@@ -129,7 +129,7 @@ define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x fl
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind {
%vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b
store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64
ret void
@@ -138,7 +138,7 @@ define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x
; GCN-LABEL: {{^}}dynamic_insertelement_v2i32:
; GCN: v_movreld_b32
; GCN: buffer_store_dwordx2
-define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i32> %a, i32 5, i32 %b
store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8
ret void
@@ -148,7 +148,7 @@ define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], 5
; GCN-DAG: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
; GCN-DAG: buffer_store_dword v
-define void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <3 x i32> %a, i32 5, i32 %b
store <3 x i32> %vecins, <3 x i32> addrspace(1)* %out, align 16
ret void
@@ -159,7 +159,7 @@ define void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32>
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
; GCN: v_movreld_b32_e32 v{{[0-9]+}}, [[VVAL]]
; GCN: buffer_store_dwordx4
-define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b, i32 %val) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b, i32 %val) nounwind {
%vecins = insertelement <4 x i32> %a, i32 %val, i32 %b
store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
ret void
@@ -169,7 +169,7 @@ define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32>
; GCN: v_movreld_b32
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <8 x i32> %a, i32 5, i32 %b
store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
ret void
@@ -181,21 +181,21 @@ define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32>
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <16 x i32> %a, i32 5, i32 %b
store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64
ret void
}
; GCN-LABEL: {{^}}dynamic_insertelement_v2i16:
-define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i16> %a, i16 5, i32 %b
store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8
ret void
}
; GCN-LABEL: {{^}}dynamic_insertelement_v3i16:
-define void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <3 x i16> %a, i16 5, i32 %b
store <3 x i16> %vecins, <3 x i16> addrspace(1)* %out, align 8
ret void
@@ -207,25 +207,22 @@ define void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16>
; GCN: buffer_load_ushort v{{[0-9]+}}, off
; GCN: buffer_load_ushort v{{[0-9]+}}, off
-; GCN-DAG: v_mov_b32_e32 [[BASE_FI:v[0-9]+]], 0{{$}}
+; GCN-DAG: v_mov_b32_e32 [[BASE_FI:v[0-9]+]], 8{{$}}
; GCN-DAG: s_and_b32 [[MASK_IDX:s[0-9]+]], s{{[0-9]+}}, 3{{$}}
; GCN-DAG: v_or_b32_e32 [[IDX:v[0-9]+]], [[MASK_IDX]], [[BASE_FI]]{{$}}
-; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:6
-; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4
-; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2
-; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}}
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:14
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:10
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:8
; GCN: buffer_store_short v{{[0-9]+}}, [[IDX]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN: s_waitcnt
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
+; GCN: buffer_load_dwordx2
; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off
-define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i16> %a, i16 5, i32 %b
store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 8
ret void
@@ -235,16 +232,17 @@ define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16>
; GCN: buffer_load_ubyte v{{[0-9]+}}, off
; GCN: buffer_load_ubyte v{{[0-9]+}}, off
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:1
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:5
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4
; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-TONGA: buffer_load_ushort
; GCN: buffer_store_short v{{[0-9]+}}, off
-define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i8> %a, i8 5, i32 %b
store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8
ret void
@@ -255,19 +253,19 @@ define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a
; GCN: buffer_load_ubyte v{{[0-9]+}}, off
; GCN: buffer_load_ubyte v{{[0-9]+}}, off
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:1
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:5
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:6
-; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
-
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-TONGA: buffer_load_ushort
+; GCN-TONGA: buffer_load_ubyte
; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off
; GCN-DAG: buffer_store_short v{{[0-9]+}}, off
-define void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <3 x i8> %a, i8 5, i32 %b
store <3 x i8> %vecins, <3 x i8> addrspace(1)* %out, align 4
ret void
@@ -279,34 +277,35 @@ define void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> %a
; GCN: buffer_load_ubyte v{{[0-9]+}}, off
; GCN: buffer_load_ubyte v{{[0-9]+}}, off
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:3
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:2
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:1
-; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}{{$}}
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:7
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:6
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:5
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4
; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-TONGA: buffer_load_dword
; GCN: buffer_store_dword v{{[0-9]+}}, off
-define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i8> %a, i8 5, i32 %b
store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 4
ret void
}
; GCN-LABEL: {{^}}dynamic_insertelement_v8i8:
-define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <8 x i8> %a, i8 5, i32 %b
store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 8
ret void
}
; GCN-LABEL: {{^}}dynamic_insertelement_v16i8:
-define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <16 x i8> %a, i8 5, i32 %b
store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16
ret void
@@ -315,7 +314,7 @@ define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8>
; This test requires handling INSERT_SUBREG in SIFixSGPRCopies. Check that
; the compiler doesn't crash.
; GCN-LABEL: {{^}}insert_split_bb:
-define void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
+define amdgpu_kernel void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
entry:
%0 = insertelement <2 x i32> undef, i32 %a, i32 0
%1 = icmp eq i32 %a, 0
@@ -362,7 +361,7 @@ endif:
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
-define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, i32 %b) nounwind {
%vecins = insertelement <2 x double> %a, double 8.0, i32 %b
store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16
ret void
@@ -375,14 +374,14 @@ define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x d
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
-define void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i64> %a, i64 5, i32 %b
store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8
ret void
}
; GCN-LABEL: {{^}}dynamic_insertelement_v3i64:
-define void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %a, i32 %b) nounwind {
%vecins = insertelement <3 x i64> %a, i64 5, i32 %b
store <3 x i64> %vecins, <3 x i64> addrspace(1)* %out, align 32
ret void
@@ -396,15 +395,15 @@ define void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64>
; Stack store
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}{{$}}
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:16{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:32{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:48{{$}}
; Write element
; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
; Stack reload
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:16{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:32{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:48{{$}}
; Store result
; GCN: buffer_store_dwordx4
@@ -412,7 +411,7 @@ define void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64>
; GCN: s_endpgm
; GCN: ScratchSize: 64
-define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind {
%vecins = insertelement <4 x double> %a, double 8.0, i32 %b
store <4 x double> %vecins, <4 x double> addrspace(1)* %out, align 16
ret void
@@ -421,17 +420,17 @@ define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x d
; GCN-LABEL: {{^}}dynamic_insertelement_v8f64:
; GCN-DAG: SCRATCH_RSRC_DWORD
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:16{{$}}
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:32{{$}}
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:48{{$}}
-; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:64{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:80{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:96{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:112{{$}}
; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:48{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:32{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:16{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:64{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:80{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:96{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:112{{$}}
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
@@ -439,10 +438,13 @@ define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x d
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
; GCN: ScratchSize: 128
-define void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) nounwind {
+define amdgpu_kernel void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) #0 {
%vecins = insertelement <8 x double> %a, double 8.0, i32 %b
store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16
ret void
}
-declare <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) nounwind readnone
+declare <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
new file mode 100644
index 000000000000..a3f82b8a0117
--- /dev/null
+++ b/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
@@ -0,0 +1,470 @@
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=gfx901 -enable-amdgpu-aa=0 -mattr=+flat-for-global,-fp64-fp16-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=fiji -enable-amdgpu-aa=0 -mattr=+flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=CIVI -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=hawaii -enable-amdgpu-aa=0 -mattr=+flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=CIVI -check-prefix=CI %s
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_0:
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+
+; CIVI: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}}
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT1]], 0x3e7{{$}}
+
+; GFX9-NOT: lshr
+; GFX9: s_pack_lh_b32_b16 s{{[0-9]+}}, 0x3e7, [[VEC]]
+define amdgpu_kernel void @s_insertelement_v2i16_0(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_0_reg:
+; GCN: s_load_dword [[ELT0:s[0-9]+]]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+
+; CIVI-DAG: s_and_b32 [[ELT0]], [[ELT0]], 0xffff{{$}}
+; CIVI-DAG: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}}
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]]
+
+; GFX9-NOT: [[ELT0]]
+; GFX9-NOT: [[VEC]]
+; GFX9: s_pack_lh_b32_b16 s{{[0-9]+}}, [[ELT0]], [[VEC]]
+define amdgpu_kernel void @s_insertelement_v2i16_0_reg(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i16 %elt) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_0_multi_use_hi_reg:
+; GCN: s_load_dword [[ELT0:s[0-9]+]]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+
+; CIVI-DAG: s_and_b32 [[ELT0]], [[ELT0]], 0xffff{{$}}
+; CIVI: s_lshr_b32 [[SHR:s[0-9]+]], [[VEC]], 16
+; CIVI: s_lshl_b32 [[ELT1:s[0-9]+]], [[SHR]], 16
+; CIVI-DAG: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]]
+; CIVI-DAG: ; use [[SHR]]
+
+; GFX9: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16
+; GFX9-DAG: s_pack_ll_b32_b16 s{{[0-9]+}}, [[ELT0]], [[ELT1]]
+; GFX9-DAG: ; use [[ELT1]]
+define amdgpu_kernel void @s_insertelement_v2i16_0_multi_use_hi_reg(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i16 %elt) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt1 = extractelement <2 x i16> %vec, i32 1
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ %use1 = zext i16 %elt1 to i32
+ call void asm sideeffect "; use $0", "s"(i32 %use1) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_0_reghi:
+; GCN: s_load_dword [[ELT_ARG:s[0-9]+]], s[0:1]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+
+; CIVI-DAG: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}}
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT_ARG]], [[ELT1]]
+
+; GFX9-NOT: [[ELT0]]
+; GFX9-NOT: [[VEC]]
+; GFX9: s_pack_hh_b32_b16 s{{[0-9]+}}, [[ELT_ARG]], [[VEC]]
+define amdgpu_kernel void @s_insertelement_v2i16_0_reghi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %elt.arg) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt.hi = lshr i32 %elt.arg, 16
+ %elt = trunc i32 %elt.hi to i16
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_0_reghi_multi_use_1:
+; GCN: s_load_dword [[ELT_ARG:s[0-9]+]], s[0:1]
+; GCN: s_load_dword [[VEC:s[0-9]+]],
+
+; CIVI-DAG: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}}
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]]
+
+; GFX9: s_lshr_b32 [[ELT1:s[0-9]+]], [[ELT_ARG]], 16
+; GFX9: s_pack_lh_b32_b16 s{{[0-9]+}}, [[ELT1]], [[VEC]]
+; GFX9: ; use [[ELT1]]
+define amdgpu_kernel void @s_insertelement_v2i16_0_reghi_multi_use_1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %elt.arg) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt.hi = lshr i32 %elt.arg, 16
+ %elt = trunc i32 %elt.hi to i16
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ %use1 = zext i16 %elt to i32
+ call void asm sideeffect "; use $0", "s"(i32 %use1) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_0_reghi_both_multi_use_1:
+; GCN: s_load_dword [[ELT_ARG:s[0-9]+]], s[0:1]
+; GCN: s_load_dword [[VEC:s[0-9]+]],
+
+; CIVI-DAG: s_lshr_b32 [[ELT_HI:s[0-9]+]], [[ELT_ARG]], 16
+; CIVI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VEC]], 16
+; CIVI-DAG: s_lshl_b32 [[VEC_HI:s[0-9]+]], [[SHR]], 16
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT_HI]], [[VEC_HI]]
+
+; GFX9-DAG: s_lshr_b32 [[ELT_HI:s[0-9]+]], [[ELT_ARG]], 16
+; GFX9-DAG: s_lshr_b32 [[VEC_HI:s[0-9]+]], [[VEC]], 16
+; GFX9: s_pack_ll_b32_b16 s{{[0-9]+}}, [[ELT_HI]], [[VEC_HI]]
+; GFX9: ; use [[ELT_HI]]
+; GFX9: ; use [[VEC_HI]]
+define amdgpu_kernel void @s_insertelement_v2i16_0_reghi_both_multi_use_1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %elt.arg) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %elt.hi = lshr i32 %elt.arg, 16
+ %elt = trunc i32 %elt.hi to i16
+ %vec.hi = extractelement <2 x i16> %vec, i32 1
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ %use1 = zext i16 %elt to i32
+ %vec.hi.use1 = zext i16 %vec.hi to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %use1) #0
+ call void asm sideeffect "; use $0", "s"(i32 %vec.hi.use1) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_1:
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+
+; GCN-NOT: s_lshr
+
+; CIVI: s_and_b32 [[ELT0:s[0-9]+]], [[VEC]], 0xffff{{$}}
+; CIVI: s_or_b32 [[INS:s[0-9]+]], [[ELT0]], 0x3e70000
+
+; GFX9: s_pack_ll_b32_b16 s{{[0-9]+}}, [[VEC]], 0x3e7
+define amdgpu_kernel void @s_insertelement_v2i16_1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 1
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2i16_1_reg:
+; GCN: s_load_dword [[ELT1:s[0-9]+]]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+
+; CIVI: s_and_b32 [[ELT0:s[0-9]+]], [[VEC]], 0xffff{{$}}
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]]
+
+; GCN-NOT: shlr
+; GFX9: s_pack_ll_b32_b16 s{{[0-9]+}}, [[VEC]], [[ELT1]]
+define amdgpu_kernel void @s_insertelement_v2i16_1_reg(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i16 %elt) #0 {
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 1
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2f16_0:
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; CIVI: s_and_b32 [[ELT1:s[0-9]+]], [[VEC:s[0-9]+]], 0xffff0000
+; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT1]], 0x4500
+
+; GFX9: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16
+; GFX9: s_pack_ll_b32_b16 s{{[0-9]+}}, 0x4500, [[ELT1]]
+define amdgpu_kernel void @s_insertelement_v2f16_0(<2 x half> addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr) #0 {
+ %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 0
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_insertelement_v2f16_1:
+; GFX9: s_load_dword [[VEC:s[0-9]+]]
+; GCN-NOT: s_lshr
+
+; CIVI: s_and_b32 [[ELT0:s[0-9]+]], [[VEC]], 0xffff{{$}}
+; CIVI: s_or_b32 [[INS:s[0-9]+]], [[ELT0]], 0x45000000
+
+; GFX9: s_pack_ll_b32_b16 s{{[0-9]+}}, [[VEC]], 0x4500
+define amdgpu_kernel void @s_insertelement_v2f16_1(<2 x half> addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr) #0 {
+ %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 1
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_0:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x3e7, [[ELT1]]
+
+; GFX9-DAG: s_movk_i32 [[ELT0:s[0-9]+]], 0x3e7{{$}}
+; GFX9-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0xffff{{$}}
+; GFX9: v_bfi_b32 [[RES:v[0-9]+]], [[MASK]], [[ELT0]], [[VEC]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2i16_0(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_0_reghi:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+; GCN-DAG: s_load_dword [[ELT0:s[0-9]+]]
+
+; CIVI-DAG: s_lshr_b32 [[ELT0_SHIFT:s[0-9]+]], [[ELT0]], 16
+; CIVI-DAG: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], [[ELT0_SHIFT]], [[ELT1]]
+
+; GFX9-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0xffff{{$}}
+; GFX9-DAG: v_lshrrev_b32_e64 [[ELT0_SHIFT:v[0-9]+]], 16, [[ELT0]]
+; GFX9: v_and_or_b32 [[RES:v[0-9]+]], [[VEC]], [[MASK]], [[ELT0_SHIFT]]
+
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2i16_0_reghi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, i32 %elt.arg) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %elt.hi = lshr i32 %elt.arg, 16
+ %elt = trunc i32 %elt.hi to i16
+ %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_0_inlineimm:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+
+; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 53, [[ELT1]]
+
+; GFX9-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0xffff{{$}}
+; GFX9: v_bfi_b32 [[RES:v[0-9]+]], [[MASK]], 53, [[VEC]]
+
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2i16_0_inlineimm(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x i16> %vec, i16 53, i32 0
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; FIXME: fold lshl_or c0, c1, v0 -> or (c0 << c1), v0
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_1:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x3e70000, [[VEC]]
+
+; GFX9-DAG: s_movk_i32 [[K:s[0-9]+]], 0x3e7
+; GFX9-DAG: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VEC]]
+; GFX9: v_lshl_or_b32 [[RES:v[0-9]+]], [[K]], 16, [[ELT0]]
+
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2i16_1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 1
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_1_inlineimm:
+; GCN: flat_load_dword [[VEC:v[0-9]+]]
+; GCN: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0xfff10000, [[ELT0]]
+; GFX9: v_lshl_or_b32 [[RES:v[0-9]+]], -15, 16, [[ELT0]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2i16_1_inlineimm(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x i16> %vec, i16 -15, i32 1
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2f16_0:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+
+; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x4500, [[ELT1]]
+
+; GFX9-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 0x4500{{$}}
+; GFX9-DAG: v_lshrrev_b32_e32 [[ELT1:v[0-9]+]], 16, [[VEC]]
+; GFX9: v_lshl_or_b32 [[RES:v[0-9]+]], [[ELT1]], 16, [[ELT0]]
+
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2f16_0(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 0
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2f16_0_inlineimm:
+; GCN: flat_load_dword [[VEC:v[0-9]+]]
+
+; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 53, [[ELT1]]
+
+; GFX9: v_lshrrev_b32_e32 [[ELT1:v[0-9]+]], 16, [[VEC]]
+; GFX9: v_lshl_or_b32 [[RES:v[0-9]+]], [[ELT1]], 16, 53
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2f16_0_inlineimm(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x half> %vec, half 0xH0035, i32 0
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2f16_1:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x45000000, [[VEC]]
+
+; GFX9-DAG: s_movk_i32 [[K:s[0-9]+]], 0x4500
+; GFX9-DAG: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VEC]]
+; GFX9: v_lshl_or_b32 [[RES:v[0-9]+]], [[K]], 16, [[ELT0]]
+
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2f16_1(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 1
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2f16_1_inlineimm:
+; GCN: flat_load_dword [[VEC:v[0-9]+]]
+; GCN: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VEC]]
+; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x230000, [[ELT0]]
+; GFX9: v_lshl_or_b32 [[RES:v[0-9]+]], 35, 16, [[ELT0]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
+define amdgpu_kernel void @v_insertelement_v2f16_1_inlineimm(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x half> %vec, half 0xH0023, i32 1
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; FIXME: Enable for others when argument load not split
+; GCN-LABEL: {{^}}s_insertelement_v2i16_dynamic:
+; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7
+; GCN: s_load_dword [[IDX:s[0-9]+]]
+; GCN: s_load_dword [[VEC:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VVEC:v[0-9]+]], [[VEC]]
+; GCN-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16
+; GCN-DAG: s_lshl_b32 [[MASK:s[0-9]+]], 0xffff, [[SCALED_IDX]]
+; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VVEC]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @s_insertelement_v2i16_dynamic(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 addrspace(2)* %idx.ptr) #0 {
+ %idx = load volatile i32, i32 addrspace(2)* %idx.ptr
+ %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 %idx
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_dynamic_sgpr:
+; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]]
+; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7
+; GCN-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16
+; GCN-DAG: s_lshl_b32 [[MASK:s[0-9]+]], 0xffff, [[SCALED_IDX]]
+; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VEC]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @v_insertelement_v2i16_dynamic_sgpr(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, i32 %idx) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 %idx
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2i16_dynamic_vgpr:
+; GCN: flat_load_dword [[IDX:v[0-9]+]]
+; GCN: flat_load_dword [[VEC:v[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7
+
+; GFX89-DAG: s_mov_b32 [[MASKK:s[0-9]+]], 0xffff{{$}}
+; GFX89-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]]
+; GFX89-DAG: v_lshlrev_b32_e64 [[MASK:v[0-9]+]], [[SCALED_IDX]], [[MASKK]]
+
+; CI-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]]
+; CI-DAG: v_lshl_b32_e32 [[MASK:v[0-9]+]], 0xffff, [[SCALED_IDX]]
+
+; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VEC]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @v_insertelement_v2i16_dynamic_vgpr(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, i32 addrspace(1)* %idx.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %idx.gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %idx = load i32, i32 addrspace(1)* %idx.gep
+ %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x i16> %vec, i16 999, i32 %idx
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_insertelement_v2f16_dynamic_vgpr:
+; GCN: flat_load_dword [[IDX:v[0-9]+]]
+; GCN: flat_load_dword [[VEC:v[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1234
+
+; GFX89-DAG: s_mov_b32 [[MASKK:s[0-9]+]], 0xffff{{$}}
+; GFX89-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]]
+; GFX89-DAG: v_lshlrev_b32_e64 [[MASK:v[0-9]+]], [[SCALED_IDX]], [[MASKK]]
+
+; CI-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]]
+; CI-DAG: v_lshl_b32_e32 [[MASK:v[0-9]+]], 0xffff, [[SCALED_IDX]]
+
+; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VEC]]
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
+define amdgpu_kernel void @v_insertelement_v2f16_dynamic_vgpr(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in, i32 addrspace(1)* %idx.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext
+ %idx.gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %idx = load i32, i32 addrspace(1)* %idx.gep
+ %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep
+ %vecins = insertelement <2 x half> %vec, half 0xH1234, i32 %idx
+ store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/inserted-wait-states.mir b/test/CodeGen/AMDGPU/inserted-wait-states.mir
index 85cd903a405d..1479303712d0 100644
--- a/test/CodeGen/AMDGPU/inserted-wait-states.mir
+++ b/test/CodeGen/AMDGPU/inserted-wait-states.mir
@@ -1,14 +1,46 @@
# RUN: llc -march=amdgcn -mcpu=tahiti -run-pass post-RA-hazard-rec %s -o - | FileCheck %s -check-prefixes=GCN
# RUN: llc -march=amdgcn -mcpu=hawaii -run-pass post-RA-hazard-rec %s -o - | FileCheck %s -check-prefixes=GCN,CIVI
# RUN: llc -march=amdgcn -mcpu=fiji -run-pass post-RA-hazard-rec %s -o - | FileCheck %s -check-prefixes=GCN,CIVI,VI
+# RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass post-RA-hazard-rec %s -o - | FileCheck %s -check-prefixes=GCN,CIVI,VI,GFX9
--- |
- define void @div_fmas() { ret void }
- define void @s_getreg() { ret void }
- define void @s_setreg() { ret void }
- define void @vmem_gt_8dw_store() { ret void }
- define void @readwrite_lane() { ret void }
- define void @rfe() { ret void }
+ define amdgpu_kernel void @div_fmas() { ret void }
+ define amdgpu_kernel void @s_getreg() { ret void }
+ define amdgpu_kernel void @s_setreg() { ret void }
+ define amdgpu_kernel void @vmem_gt_8dw_store() { ret void }
+ define amdgpu_kernel void @readwrite_lane() { ret void }
+ define amdgpu_kernel void @rfe() { ret void }
+ define amdgpu_kernel void @s_mov_fed_b32() { ret void }
+ define amdgpu_kernel void @s_movrel() { ret void }
+ define amdgpu_kernel void @v_interp() { ret void }
+
+ define amdgpu_kernel void @mov_fed_hazard_crash_on_dbg_value(i32 addrspace(1)* %A) {
+ entry:
+ %A.addr = alloca i32 addrspace(1)*, align 4
+ store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %A.addr, metadata !5, metadata !11), !dbg !12
+ ret void
+ }
+
+ declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!3, !4}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 (trunk 268929)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+ !1 = !DIFile(filename: "test01.cl", directory: "/dev/null")
+ !2 = !{}
+ !3 = !{i32 2, !"Dwarf Version", i32 2}
+ !4 = !{i32 2, !"Debug Info Version", i32 3}
+ !5 = !DILocalVariable(name: "A", arg: 1, scope: !6, file: !1, line: 1, type: !9)
+ !6 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+ !7 = !DISubroutineType(types: !8)
+ !8 = !{null, !9}
+ !9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64, align: 32)
+ !10 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+ !11 = !DIExpression()
+ !12 = !DILocation(line: 1, column: 30, scope: !6)
+
...
---
# GCN-LABEL: name: div_fmas
@@ -331,3 +363,185 @@ body: |
S_ENDPGM
...
+
+...
+---
+
+# GCN-LABEL: name: s_mov_fed_b32
+
+# GCN-LABEL: bb.0:
+# GCN: S_MOV_FED_B32
+# GFX9: S_NOP
+# GCN-NEXT: S_MOV_B32
+
+# GCN-LABEL: bb.1:
+# GCN: S_MOV_FED_B32
+# GFX9: S_NOP
+# GCN-NEXT: V_MOV_B32
+name: s_mov_fed_b32
+
+body: |
+ bb.0:
+ successors: %bb.1
+ %sgpr0 = S_MOV_FED_B32 %sgpr0
+ %sgpr0 = S_MOV_B32 %sgpr0
+ S_BRANCH %bb.1
+
+ bb.1:
+ %sgpr0 = S_MOV_FED_B32 %sgpr0
+ %vgpr0 = V_MOV_B32_e32 %sgpr0, implicit %exec
+ S_ENDPGM
+
+...
+
+...
+---
+
+# GCN-LABEL: name: s_movrel
+
+# GCN-LABEL: bb.0:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: S_MOVRELS_B32
+
+# GCN-LABEL: bb.1:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: S_MOVRELS_B64
+
+# GCN-LABEL: bb.2:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: S_MOVRELD_B32
+
+# GCN-LABEL: bb.3:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: S_MOVRELD_B64
+
+name: s_movrel
+
+body: |
+ bb.0:
+ successors: %bb.1
+ %m0 = S_MOV_B32 0
+ %sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2
+ %m0 = S_MOV_B32 0
+ %sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0
+ S_BRANCH %bb.2
+
+ bb.2:
+ successors: %bb.3
+ %m0 = S_MOV_B32 0
+ %sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0
+ S_BRANCH %bb.3
+
+ bb.3:
+ %m0 = S_MOV_B32 0
+ %sgpr0_sgpr1 = S_MOVRELD_B64 %sgpr0_sgpr1, implicit %m0
+ S_ENDPGM
+...
+
+...
+---
+
+# GCN-LABEL: name: v_interp
+
+# GCN-LABEL: bb.0:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: V_INTERP_P1_F32
+
+# GCN-LABEL: bb.1:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: V_INTERP_P2_F32
+
+# GCN-LABEL: bb.2:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: V_INTERP_P1_F32_16bank
+
+# GCN-LABEL: bb.3:
+# GCN: S_MOV_B32
+# GFX9: S_NOP
+# GCN-NEXT: V_INTERP_MOV_F32
+
+name: v_interp
+
+body: |
+ bb.0:
+ successors: %bb.1
+ %m0 = S_MOV_B32 0
+ %vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2
+ %m0 = S_MOV_B32 0
+ %vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec
+ S_BRANCH %bb.2
+
+ bb.2:
+ successors: %bb.3
+ %m0 = S_MOV_B32 0
+ %vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec
+ S_BRANCH %bb.3
+
+ bb.3:
+ %m0 = S_MOV_B32 0
+ %vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit %m0, implicit %exec
+ S_ENDPGM
+...
+---
+name: mov_fed_hazard_crash_on_dbg_value
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%sgpr4_sgpr5' }
+ - { reg: '%sgpr6_sgpr7' }
+ - { reg: '%sgpr9' }
+ - { reg: '%sgpr0_sgpr1_sgpr2_sgpr3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 16
+ offsetAdjustment: 0
+ maxAlignment: 8
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+stack:
+ - { id: 0, name: A.addr, offset: 0, size: 8, alignment: 8, local-offset: 0 }
+ - { id: 1, offset: 8, size: 4, alignment: 4 }
+body: |
+ bb.0.entry:
+ liveins: %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr9, %sgpr0_sgpr1_sgpr2_sgpr3
+
+ %flat_scr_lo = S_ADD_U32 %sgpr6, %sgpr9, implicit-def %scc
+ %flat_scr_hi = S_ADDC_U32 %sgpr7, 0, implicit-def %scc, implicit %scc
+ DBG_VALUE _, 2, !5, !11, debug-location !12
+ %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ dead %sgpr6_sgpr7 = KILL %sgpr4_sgpr5
+ %sgpr8 = S_MOV_B32 %sgpr5
+ %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec
+ BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr + 4)
+ %sgpr8 = S_MOV_B32 %sgpr4, implicit killed %sgpr4_sgpr5
+ %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec
+ BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr)
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/internalize.ll b/test/CodeGen/AMDGPU/internalize.ll
new file mode 100644
index 000000000000..968b1d326a76
--- /dev/null
+++ b/test/CodeGen/AMDGPU/internalize.ll
@@ -0,0 +1,35 @@
+; RUN: opt -O1 -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-internalize-symbols < %s | FileCheck %s
+; CHECK-NOT: unused
+; CHECK-NOT: foo_used
+; CHECK: gvar_used
+; CHECK: main_kernel
+
+@gvar_unused = addrspace(1) global i32 undef, align 4
+@gvar_used = addrspace(1) global i32 undef, align 4
+
+; Function Attrs: alwaysinline nounwind
+define amdgpu_kernel void @foo_unused(i32 addrspace(1)* %out) local_unnamed_addr #1 {
+entry:
+ store i32 1, i32 addrspace(1)* %out
+ ret void
+}
+
+; Function Attrs: alwaysinline nounwind
+define amdgpu_kernel void @foo_used(i32 addrspace(1)* %out, i32 %tid) local_unnamed_addr #1 {
+entry:
+ store i32 %tid, i32 addrspace(1)* %out
+ ret void
+}
+
+define amdgpu_kernel void @main_kernel() {
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ tail call void @foo_used(i32 addrspace(1)* @gvar_used, i32 %tid) nounwind
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone }
+
+attributes #1 = { alwaysinline nounwind }
diff --git a/test/CodeGen/AMDGPU/invalid-addrspacecast.ll b/test/CodeGen/AMDGPU/invalid-addrspacecast.ll
index c29434f5eca2..31f2fbc919aa 100644
--- a/test/CodeGen/AMDGPU/invalid-addrspacecast.ll
+++ b/test/CodeGen/AMDGPU/invalid-addrspacecast.ll
@@ -1,7 +1,7 @@
; RUN: not llc -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s 2>&1 | FileCheck -check-prefix=ERROR %s
; ERROR: error: <unknown>:0:0: in function use_group_to_global_addrspacecast void (i32 addrspace(3)*): invalid addrspacecast
-define void @use_group_to_global_addrspacecast(i32 addrspace(3)* %ptr) {
+define amdgpu_kernel void @use_group_to_global_addrspacecast(i32 addrspace(3)* %ptr) {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(1)*
store volatile i32 0, i32 addrspace(1)* %stof
ret void
diff --git a/test/CodeGen/AMDGPU/invalid-opencl-version-metadata1.ll b/test/CodeGen/AMDGPU/invalid-opencl-version-metadata1.ll
deleted file mode 100644
index 49c314fbc5d0..000000000000
--- a/test/CodeGen/AMDGPU/invalid-opencl-version-metadata1.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-runtime-metadata | FileCheck %s
-; check llc does not crash for invalid opencl version metadata
-
-; CHECK: { amd.MDVersion: [ 2, 0 ] }
-
-!opencl.ocl.version = !{}
diff --git a/test/CodeGen/AMDGPU/invalid-opencl-version-metadata2.ll b/test/CodeGen/AMDGPU/invalid-opencl-version-metadata2.ll
deleted file mode 100644
index 1f5e8be531dc..000000000000
--- a/test/CodeGen/AMDGPU/invalid-opencl-version-metadata2.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-runtime-metadata | FileCheck %s
-; check llc does not crash for invalid opencl version metadata
-
-; CHECK: { amd.MDVersion: [ 2, 0 ] }
-
-!opencl.ocl.version = !{!0}
-!0 = !{}
diff --git a/test/CodeGen/AMDGPU/invalid-opencl-version-metadata3.ll b/test/CodeGen/AMDGPU/invalid-opencl-version-metadata3.ll
deleted file mode 100644
index b77551e268a0..000000000000
--- a/test/CodeGen/AMDGPU/invalid-opencl-version-metadata3.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-runtime-metadata | FileCheck %s
-; check llc does not crash for invalid opencl version metadata
-
-; CHECK: { amd.MDVersion: [ 2, 0 ] }
-
-!opencl.ocl.version = !{!0}
-!0 = !{i32 1}
diff --git a/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll b/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll
index 45a061067cfc..5cd965d2fa9c 100644
--- a/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll
+++ b/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll
@@ -10,7 +10,7 @@
; GCN-DAG: buffer_load_dwordx2 [[PTR:v\[[0-9]+:[0-9]+\]]],
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1c8007b
; GCN: buffer_store_dword [[K]], [[PTR]]
-define void @test_merge_store_constant_i16_invariant_global_pointer_load(i16 addrspace(1)* addrspace(1)* dereferenceable(4096) nonnull %in) #0 {
+define amdgpu_kernel void @test_merge_store_constant_i16_invariant_global_pointer_load(i16 addrspace(1)* addrspace(1)* dereferenceable(4096) nonnull %in) #0 {
%ptr = load i16 addrspace(1)*, i16 addrspace(1)* addrspace(1)* %in, !invariant.load !0
%ptr.1 = getelementptr i16, i16 addrspace(1)* %ptr, i64 1
store i16 123, i16 addrspace(1)* %ptr, align 4
@@ -22,7 +22,7 @@ define void @test_merge_store_constant_i16_invariant_global_pointer_load(i16 add
; GCN: s_load_dwordx2 s{{\[}}[[SPTR_LO:[0-9]+]]:[[SPTR_HI:[0-9]+]]{{\]}}
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x1c8007b
; GCN: buffer_store_dword [[K]], off, s{{\[}}[[SPTR_LO]]:
-define void @test_merge_store_constant_i16_invariant_constant_pointer_load(i16 addrspace(1)* addrspace(2)* dereferenceable(4096) nonnull %in) #0 {
+define amdgpu_kernel void @test_merge_store_constant_i16_invariant_constant_pointer_load(i16 addrspace(1)* addrspace(2)* dereferenceable(4096) nonnull %in) #0 {
%ptr = load i16 addrspace(1)*, i16 addrspace(1)* addrspace(2)* %in, !invariant.load !0
%ptr.1 = getelementptr i16, i16 addrspace(1)* %ptr, i64 1
store i16 123, i16 addrspace(1)* %ptr, align 4
diff --git a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
index 66182d092895..bc1dafe0ea1e 100644
--- a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
+++ b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
@@ -1,7 +1,7 @@
# RUN: llc -run-pass block-placement -march=amdgcn -verify-machineinstrs -o - %s | FileCheck %s
--- |
- define void @invert_br_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 {
+ define amdgpu_kernel void @invert_br_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 {
entry:
br i1 undef, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0
diff --git a/test/CodeGen/AMDGPU/kcache-fold.ll b/test/CodeGen/AMDGPU/kcache-fold.ll
index 43448fbd7b33..37dd977ae216 100644
--- a/test/CodeGen/AMDGPU/kcache-fold.ll
+++ b/test/CodeGen/AMDGPU/kcache-fold.ll
@@ -1,100 +1,112 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s
; CHECK: {{^}}main1:
; CHECK: MOV * T{{[0-9]+\.[XYZW], KC0}}
-define void @main1() {
+define amdgpu_kernel void @main1() #0 {
main_body:
- %0 = load <4 x float>, <4 x float> addrspace(8)* null
- %1 = extractelement <4 x float> %0, i32 0
- %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %3 = extractelement <4 x float> %2, i32 0
- %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %5 = extractelement <4 x float> %4, i32 0
- %6 = fcmp ogt float %1, 0.000000e+00
- %7 = select i1 %6, float %3, float %5
- %8 = load <4 x float>, <4 x float> addrspace(8)* null
- %9 = extractelement <4 x float> %8, i32 1
- %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %11 = extractelement <4 x float> %10, i32 1
- %12 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %13 = extractelement <4 x float> %12, i32 1
- %14 = fcmp ogt float %9, 0.000000e+00
- %15 = select i1 %14, float %11, float %13
- %16 = load <4 x float>, <4 x float> addrspace(8)* null
- %17 = extractelement <4 x float> %16, i32 2
- %18 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %19 = extractelement <4 x float> %18, i32 2
- %20 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %21 = extractelement <4 x float> %20, i32 2
- %22 = fcmp ogt float %17, 0.000000e+00
- %23 = select i1 %22, float %19, float %21
- %24 = load <4 x float>, <4 x float> addrspace(8)* null
- %25 = extractelement <4 x float> %24, i32 3
- %26 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %27 = extractelement <4 x float> %26, i32 3
- %28 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %29 = extractelement <4 x float> %28, i32 3
- %30 = fcmp ogt float %25, 0.000000e+00
- %31 = select i1 %30, float %27, float %29
- %32 = call float @llvm.AMDGPU.clamp.f32(float %7, float 0.000000e+00, float 1.000000e+00)
- %33 = call float @llvm.AMDGPU.clamp.f32(float %15, float 0.000000e+00, float 1.000000e+00)
- %34 = call float @llvm.AMDGPU.clamp.f32(float %23, float 0.000000e+00, float 1.000000e+00)
- %35 = call float @llvm.AMDGPU.clamp.f32(float %31, float 0.000000e+00, float 1.000000e+00)
- %36 = insertelement <4 x float> undef, float %32, i32 0
- %37 = insertelement <4 x float> %36, float %33, i32 1
- %38 = insertelement <4 x float> %37, float %34, i32 2
- %39 = insertelement <4 x float> %38, float %35, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %39, i32 0, i32 0)
+ %tmp = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp7 = extractelement <4 x float> %tmp, i32 0
+ %tmp8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp9 = extractelement <4 x float> %tmp8, i32 0
+ %tmp10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp11 = extractelement <4 x float> %tmp10, i32 0
+ %tmp12 = fcmp ogt float %tmp7, 0.000000e+00
+ %tmp13 = select i1 %tmp12, float %tmp9, float %tmp11
+ %tmp14 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp15 = extractelement <4 x float> %tmp14, i32 1
+ %tmp16 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp17 = extractelement <4 x float> %tmp16, i32 1
+ %tmp18 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp19 = extractelement <4 x float> %tmp18, i32 1
+ %tmp20 = fcmp ogt float %tmp15, 0.000000e+00
+ %tmp21 = select i1 %tmp20, float %tmp17, float %tmp19
+ %tmp22 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp23 = extractelement <4 x float> %tmp22, i32 2
+ %tmp24 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp25 = extractelement <4 x float> %tmp24, i32 2
+ %tmp26 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp27 = extractelement <4 x float> %tmp26, i32 2
+ %tmp28 = fcmp ogt float %tmp23, 0.000000e+00
+ %tmp29 = select i1 %tmp28, float %tmp25, float %tmp27
+ %tmp30 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp31 = extractelement <4 x float> %tmp30, i32 3
+ %tmp32 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp33 = extractelement <4 x float> %tmp32, i32 3
+ %tmp34 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp35 = extractelement <4 x float> %tmp34, i32 3
+ %tmp36 = fcmp ogt float %tmp31, 0.000000e+00
+ %tmp37 = select i1 %tmp36, float %tmp33, float %tmp35
+ %max.0.i = call float @llvm.maxnum.f32(float %tmp13, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %max.0.i5 = call float @llvm.maxnum.f32(float %tmp21, float 0.000000e+00)
+ %clamp.i6 = call float @llvm.minnum.f32(float %max.0.i5, float 1.000000e+00)
+ %max.0.i3 = call float @llvm.maxnum.f32(float %tmp29, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %tmp37, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp38 = insertelement <4 x float> undef, float %clamp.i, i32 0
+ %tmp39 = insertelement <4 x float> %tmp38, float %clamp.i6, i32 1
+ %tmp40 = insertelement <4 x float> %tmp39, float %clamp.i4, i32 2
+ %tmp41 = insertelement <4 x float> %tmp40, float %clamp.i2, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp41, i32 0, i32 0)
ret void
}
; CHECK: {{^}}main2:
; CHECK-NOT: MOV
-define void @main2() {
+define amdgpu_kernel void @main2() #0 {
main_body:
- %0 = load <4 x float>, <4 x float> addrspace(8)* null
- %1 = extractelement <4 x float> %0, i32 0
- %2 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %3 = extractelement <4 x float> %2, i32 0
- %4 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %5 = extractelement <4 x float> %4, i32 1
- %6 = fcmp ogt float %1, 0.000000e+00
- %7 = select i1 %6, float %3, float %5
- %8 = load <4 x float>, <4 x float> addrspace(8)* null
- %9 = extractelement <4 x float> %8, i32 1
- %10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %11 = extractelement <4 x float> %10, i32 0
- %12 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %13 = extractelement <4 x float> %12, i32 1
- %14 = fcmp ogt float %9, 0.000000e+00
- %15 = select i1 %14, float %11, float %13
- %16 = load <4 x float>, <4 x float> addrspace(8)* null
- %17 = extractelement <4 x float> %16, i32 2
- %18 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %19 = extractelement <4 x float> %18, i32 3
- %20 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %21 = extractelement <4 x float> %20, i32 2
- %22 = fcmp ogt float %17, 0.000000e+00
- %23 = select i1 %22, float %19, float %21
- %24 = load <4 x float>, <4 x float> addrspace(8)* null
- %25 = extractelement <4 x float> %24, i32 3
- %26 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %27 = extractelement <4 x float> %26, i32 3
- %28 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %29 = extractelement <4 x float> %28, i32 2
- %30 = fcmp ogt float %25, 0.000000e+00
- %31 = select i1 %30, float %27, float %29
- %32 = call float @llvm.AMDGPU.clamp.f32(float %7, float 0.000000e+00, float 1.000000e+00)
- %33 = call float @llvm.AMDGPU.clamp.f32(float %15, float 0.000000e+00, float 1.000000e+00)
- %34 = call float @llvm.AMDGPU.clamp.f32(float %23, float 0.000000e+00, float 1.000000e+00)
- %35 = call float @llvm.AMDGPU.clamp.f32(float %31, float 0.000000e+00, float 1.000000e+00)
- %36 = insertelement <4 x float> undef, float %32, i32 0
- %37 = insertelement <4 x float> %36, float %33, i32 1
- %38 = insertelement <4 x float> %37, float %34, i32 2
- %39 = insertelement <4 x float> %38, float %35, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %39, i32 0, i32 0)
+ %tmp = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp7 = extractelement <4 x float> %tmp, i32 0
+ %tmp8 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp9 = extractelement <4 x float> %tmp8, i32 0
+ %tmp10 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp11 = extractelement <4 x float> %tmp10, i32 1
+ %tmp12 = fcmp ogt float %tmp7, 0.000000e+00
+ %tmp13 = select i1 %tmp12, float %tmp9, float %tmp11
+ %tmp14 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp15 = extractelement <4 x float> %tmp14, i32 1
+ %tmp16 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp17 = extractelement <4 x float> %tmp16, i32 0
+ %tmp18 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp19 = extractelement <4 x float> %tmp18, i32 1
+ %tmp20 = fcmp ogt float %tmp15, 0.000000e+00
+ %tmp21 = select i1 %tmp20, float %tmp17, float %tmp19
+ %tmp22 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp23 = extractelement <4 x float> %tmp22, i32 2
+ %tmp24 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp25 = extractelement <4 x float> %tmp24, i32 3
+ %tmp26 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp27 = extractelement <4 x float> %tmp26, i32 2
+ %tmp28 = fcmp ogt float %tmp23, 0.000000e+00
+ %tmp29 = select i1 %tmp28, float %tmp25, float %tmp27
+ %tmp30 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp31 = extractelement <4 x float> %tmp30, i32 3
+ %tmp32 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp33 = extractelement <4 x float> %tmp32, i32 3
+ %tmp34 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp35 = extractelement <4 x float> %tmp34, i32 2
+ %tmp36 = fcmp ogt float %tmp31, 0.000000e+00
+ %tmp37 = select i1 %tmp36, float %tmp33, float %tmp35
+ %max.0.i = call float @llvm.maxnum.f32(float %tmp13, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %max.0.i5 = call float @llvm.maxnum.f32(float %tmp21, float 0.000000e+00)
+ %clamp.i6 = call float @llvm.minnum.f32(float %max.0.i5, float 1.000000e+00)
+ %max.0.i3 = call float @llvm.maxnum.f32(float %tmp29, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %tmp37, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp38 = insertelement <4 x float> undef, float %clamp.i, i32 0
+ %tmp39 = insertelement <4 x float> %tmp38, float %clamp.i6, i32 1
+ %tmp40 = insertelement <4 x float> %tmp39, float %clamp.i4, i32 2
+ %tmp41 = insertelement <4 x float> %tmp40, float %clamp.i2, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp41, i32 0, i32 0)
ret void
}
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) readnone
-declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32)
+declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32) #0
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/kernarg-stack-alignment.ll b/test/CodeGen/AMDGPU/kernarg-stack-alignment.ll
index 21c92dbc9098..8e358ef2804f 100644
--- a/test/CodeGen/AMDGPU/kernarg-stack-alignment.ll
+++ b/test/CodeGen/AMDGPU/kernarg-stack-alignment.ll
@@ -4,40 +4,40 @@
; alignment of the stack
; CHECK-LABEL: {{^}}no_args:
-; CHECK: ScratchSize: 8{{$}}
-define void @no_args() {
+; CHECK: ScratchSize: 5{{$}}
+define amdgpu_kernel void @no_args() {
%alloca = alloca i8
store volatile i8 0, i8* %alloca
ret void
}
; CHECK-LABEL: {{^}}force_align32:
-; CHECK: ScratchSize: 8{{$}}
-define void @force_align32(<8 x i32>) {
+; CHECK: ScratchSize: 5{{$}}
+define amdgpu_kernel void @force_align32(<8 x i32>) {
%alloca = alloca i8
store volatile i8 0, i8* %alloca
ret void
}
; CHECK-LABEL: {{^}}force_align64:
-; CHECK: ScratchSize: 8{{$}}
-define void @force_align64(<16 x i32>) {
+; CHECK: ScratchSize: 5{{$}}
+define amdgpu_kernel void @force_align64(<16 x i32>) {
%alloca = alloca i8
store volatile i8 0, i8* %alloca
ret void
}
; CHECK-LABEL: {{^}}force_align128:
-; CHECK: ScratchSize: 8{{$}}
-define void @force_align128(<32 x i32>) {
+; CHECK: ScratchSize: 5{{$}}
+define amdgpu_kernel void @force_align128(<32 x i32>) {
%alloca = alloca i8
store volatile i8 0, i8* %alloca
ret void
}
; CHECK-LABEL: {{^}}force_align256:
-; CHECK: ScratchSize: 8{{$}}
-define void @force_align256(<64 x i32>) {
+; CHECK: ScratchSize: 5{{$}}
+define amdgpu_kernel void @force_align256(<64 x i32>) {
%alloca = alloca i8
store volatile i8 0, i8* %alloca
ret void
diff --git a/test/CodeGen/AMDGPU/kernel-args.ll b/test/CodeGen/AMDGPU/kernel-args.ll
index 95a68319f8af..6fa26cb38793 100644
--- a/test/CodeGen/AMDGPU/kernel-args.ll
+++ b/test/CodeGen/AMDGPU/kernel-args.ll
@@ -17,7 +17,7 @@
; FIXME: Should be using s_load_dword
; HSA-VI: flat_load_ubyte v{{[0-9]+}}, v{{\[}}[[VPTR_LO]]:[[VPTR_HI]]]
-define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
+define amdgpu_kernel void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
entry:
%0 = zext i8 %in to i32
store i32 %0, i32 addrspace(1)* %out, align 4
@@ -36,7 +36,7 @@ entry:
; FIXME: Should be using s_load_dword
; HSA-VI: flat_load_ubyte v{{[0-9]+}}, v{{\[}}[[VPTR_LO]]:[[VPTR_HI]]]
-define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
+define amdgpu_kernel void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
entry:
%0 = zext i8 %in to i32
store i32 %0, i32 addrspace(1)* %out, align 4
@@ -55,7 +55,7 @@ entry:
; FIXME: Should be using s_load_dword
; HSA-VI: flat_load_sbyte v{{[0-9]+}}, v{{\[}}[[VPTR_LO]]:[[VPTR_HI]]]
-define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
+define amdgpu_kernel void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
entry:
%0 = sext i8 %in to i32
store i32 %0, i32 addrspace(1)* %out, align 4
@@ -75,7 +75,7 @@ entry:
; FIXME: Should be using s_load_dword
; HSA-VI: flat_load_ushort v{{[0-9]+}}, v{{\[}}[[VPTR_LO]]:[[VPTR_HI]]]
-define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
+define amdgpu_kernel void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
entry:
%0 = zext i16 %in to i32
store i32 %0, i32 addrspace(1)* %out, align 4
@@ -94,7 +94,7 @@ entry:
; FIXME: Should be using s_load_dword
; HSA-VI: flat_load_ushort v{{[0-9]+}}, v{{\[}}[[VPTR_LO]]:[[VPTR_HI]]]
-define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
+define amdgpu_kernel void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
entry:
%0 = zext i16 %in to i32
store i32 %0, i32 addrspace(1)* %out, align 4
@@ -113,7 +113,7 @@ entry:
; FIXME: Should be using s_load_dword
; HSA-VI: flat_load_sshort v{{[0-9]+}}, v{{\[}}[[VPTR_LO]]:[[VPTR_HI]]]
-define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
+define amdgpu_kernel void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
entry:
%0 = sext i16 %in to i32
store i32 %0, i32 addrspace(1)* %out, align 4
@@ -126,7 +126,7 @@ entry:
; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
; HSA-VI: s_load_dword s{{[0-9]}}, s[4:5], 0x8
-define void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
+define amdgpu_kernel void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
entry:
store i32 %in, i32 addrspace(1)* %out, align 4
ret void
@@ -138,7 +138,7 @@ entry:
; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x8
-define void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
+define amdgpu_kernel void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
entry:
store float %in, float addrspace(1)* %out, align 4
ret void
@@ -152,7 +152,7 @@ entry:
; MESA-GCN: buffer_load_ubyte
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
-define void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
+define amdgpu_kernel void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
entry:
store <2 x i8> %in, <2 x i8> addrspace(1)* %out
ret void
@@ -166,7 +166,7 @@ entry:
; MESA-GCN: buffer_load_ushort
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
-define void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in) {
+define amdgpu_kernel void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in) {
entry:
store <2 x i16> %in, <2 x i16> addrspace(1)* %out
ret void
@@ -179,7 +179,7 @@ entry:
; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
; MESA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x2c
; HSA-VI: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x8
-define void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
+define amdgpu_kernel void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
entry:
store <2 x i32> %in, <2 x i32> addrspace(1)* %out, align 4
ret void
@@ -192,7 +192,7 @@ entry:
; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
; MESA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x2c
; HSA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[4:5], 0x8
-define void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
+define amdgpu_kernel void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
entry:
store <2 x float> %in, <2 x float> addrspace(1)* %out, align 4
ret void
@@ -209,7 +209,7 @@ entry:
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
-define void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x i8> %in) nounwind {
+define amdgpu_kernel void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x i8> %in) nounwind {
entry:
store <3 x i8> %in, <3 x i8> addrspace(1)* %out, align 4
ret void
@@ -226,7 +226,7 @@ entry:
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
-define void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3 x i16> %in) nounwind {
+define amdgpu_kernel void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3 x i16> %in) nounwind {
entry:
store <3 x i16> %in, <3 x i16> addrspace(1)* %out, align 4
ret void
@@ -239,7 +239,7 @@ entry:
; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34
; HSA-VI: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
-define void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
+define amdgpu_kernel void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
entry:
store <3 x i32> %in, <3 x i32> addrspace(1)* %out, align 4
ret void
@@ -253,7 +253,7 @@ entry:
; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34
; HSA-VI: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
-define void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
+define amdgpu_kernel void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
entry:
store <3 x float> %in, <3 x float> addrspace(1)* %out, align 4
ret void
@@ -273,7 +273,7 @@ entry:
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
-define void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
+define amdgpu_kernel void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
entry:
store <4 x i8> %in, <4 x i8> addrspace(1)* %out
ret void
@@ -293,7 +293,7 @@ entry:
; HSA-GCN: flat_load_ushort
; HSA-GCN: flat_load_ushort
; HSA-GCN: flat_load_ushort
-define void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in) {
+define amdgpu_kernel void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in) {
entry:
store <4 x i16> %in, <4 x i16> addrspace(1)* %out
ret void
@@ -308,7 +308,7 @@ entry:
; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x34
; HSA-VI: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
-define void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
+define amdgpu_kernel void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
ret void
@@ -323,7 +323,7 @@ entry:
; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x34
; HSA-VI: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
-define void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
+define amdgpu_kernel void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
entry:
store <4 x float> %in, <4 x float> addrspace(1)* %out, align 4
ret void
@@ -354,7 +354,7 @@ entry:
; HSA-GCN: float_load_ubyte
; HSA-GCN: float_load_ubyte
; HSA-GCN: float_load_ubyte
-define void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
+define amdgpu_kernel void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
entry:
store <8 x i8> %in, <8 x i8> addrspace(1)* %out
ret void
@@ -386,7 +386,7 @@ entry:
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
-define void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in) {
+define amdgpu_kernel void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in) {
entry:
store <8 x i16> %in, <8 x i16> addrspace(1)* %out
ret void
@@ -405,7 +405,7 @@ entry:
; SI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x11
; MESA-VI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x44
; HSA-VI: s_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x20
-define void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
+define amdgpu_kernel void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
entry:
store <8 x i32> %in, <8 x i32> addrspace(1)* %out, align 4
ret void
@@ -422,7 +422,7 @@ entry:
; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
; SI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x11
-define void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
+define amdgpu_kernel void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
entry:
store <8 x float> %in, <8 x float> addrspace(1)* %out, align 4
ret void
@@ -478,7 +478,7 @@ entry:
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
; HSA-VI: flat_load_ubyte
-define void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in) {
+define amdgpu_kernel void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in) {
entry:
store <16 x i8> %in, <16 x i8> addrspace(1)* %out
ret void
@@ -534,7 +534,7 @@ entry:
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
; HSA-VI: flat_load_ushort
-define void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16> %in) {
+define amdgpu_kernel void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16> %in) {
entry:
store <16 x i16> %in, <16 x i16> addrspace(1)* %out
ret void
@@ -561,7 +561,7 @@ entry:
; SI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x19
; MESA-VI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x64
; HSA-VI: s_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x40
-define void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
+define amdgpu_kernel void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
entry:
store <16 x i32> %in, <16 x i32> addrspace(1)* %out, align 4
ret void
@@ -588,7 +588,7 @@ entry:
; SI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x19
; MESA-VI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x64
; HSA-VI: s_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x40
-define void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
+define amdgpu_kernel void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
entry:
store <16 x float> %in, <16 x float> addrspace(1)* %out, align 4
ret void
@@ -599,7 +599,7 @@ entry:
; MESA-GCN: s_load_dwordx2
; MESA-GCN: buffer_store_dwordx2
; HSA-VI: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x8
-define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
+define amdgpu_kernel void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
store i64 %a, i64 addrspace(1)* %out, align 8
ret void
}
@@ -611,7 +611,7 @@ define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
; MESA-VI-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0x2c
; MESA-GCN: buffer_store_dwordx2
; HSA-VI: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x8
-define void @f64_kernel_arg(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @f64_kernel_arg(double addrspace(1)* %out, double %in) {
entry:
store double %in, double addrspace(1)* %out
ret void
@@ -621,7 +621,7 @@ entry:
; XGCN: s_load_dwordx2
; XGCN: s_load_dwordx2
; XGCN: buffer_store_dwordx2
-; define void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
+; define amdgpu_kernel void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
; store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
; ret void
; }
@@ -631,7 +631,7 @@ entry:
; SI: v_and_b32_e32
; SI: buffer_store_byte
; SI: s_endpgm
-define void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
+define amdgpu_kernel void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
store i1 %x, i1 addrspace(1)* %out, align 1
ret void
}
@@ -640,7 +640,7 @@ define void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
; SI: buffer_load_ubyte
; SI: buffer_store_dword
; SI: s_endpgm
-define void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
+define amdgpu_kernel void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
%ext = zext i1 %x to i32
store i32 %ext, i32 addrspace(1)* %out, align 4
ret void
@@ -650,7 +650,7 @@ define void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
; SI: buffer_load_ubyte
; SI: buffer_store_dwordx2
; SI: s_endpgm
-define void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
+define amdgpu_kernel void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
%ext = zext i1 %x to i64
store i64 %ext, i64 addrspace(1)* %out, align 8
ret void
@@ -660,7 +660,7 @@ define void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
; SI: buffer_load_ubyte
; SI: buffer_store_dword
; SI: s_endpgm
-define void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
+define amdgpu_kernel void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
%ext = sext i1 %x to i32
store i32 %ext, i32addrspace(1)* %out, align 4
ret void
@@ -672,7 +672,7 @@ define void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
; SI: v_ashrrev_i32
; SI: buffer_store_dwordx2
; SI: s_endpgm
-define void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
+define amdgpu_kernel void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
%ext = sext i1 %x to i64
store i64 %ext, i64 addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/large-alloca-compute.ll b/test/CodeGen/AMDGPU/large-alloca-compute.ll
index 4f6dbf9dc2bf..4af37d8da966 100644
--- a/test/CodeGen/AMDGPU/large-alloca-compute.ll
+++ b/test/CodeGen/AMDGPU/large-alloca-compute.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -show-mc-encoding < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=ALL %s
; RUN: llc -march=amdgcn -mcpu=carrizo --show-mc-encoding < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=ALL %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 --show-mc-encoding < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=ALL %s
; RUN: llc -march=amdgcn -mcpu=bonaire -mtriple=amdgcn-unknown-amdhsa < %s -mattr=-flat-for-global | FileCheck -check-prefix=GCNHSA -check-prefix=CIHSA -check-prefix=ALL %s
; RUN: llc -march=amdgcn -mcpu=carrizo -mtriple=amdgcn-unknown-amdhsa -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCNHSA -check-prefix=VIHSA -check-prefix=ALL %s
@@ -14,6 +15,7 @@
; GCN-DAG: s_mov_b32 s{{[0-9]+}}, -1
; CI-DAG: s_mov_b32 s{{[0-9]+}}, 0xe8f000
; VI-DAG: s_mov_b32 s{{[0-9]+}}, 0xe80000
+; GFX9-DAG: s_mov_b32 s{{[0-9]+}}, 0xe00000
; GCNHSA: .amd_kernel_code_t
@@ -46,7 +48,7 @@
; Scratch size = alloca size + emergency stack slot
; ALL: ; ScratchSize: 32772
-define void @large_alloca_compute_shader(i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @large_alloca_compute_shader(i32 %x, i32 %y) #0 {
%large = alloca [8192 x i32], align 4
%gep = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 8191
store volatile i32 %x, i32* %gep
diff --git a/test/CodeGen/AMDGPU/large-alloca-graphics.ll b/test/CodeGen/AMDGPU/large-alloca-graphics.ll
index ea9754a390b6..28b819a6374b 100644
--- a/test/CodeGen/AMDGPU/large-alloca-graphics.ll
+++ b/test/CodeGen/AMDGPU/large-alloca-graphics.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=ALL %s
; RUN: llc -march=amdgcn -mcpu=carrizo -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=ALL %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=ALL %s
; ALL-LABEL: {{^}}large_alloca_pixel_shader:
; GCN-DAG: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
@@ -7,6 +8,7 @@
; GCN-DAG: s_mov_b32 s10, -1
; CI-DAG: s_mov_b32 s11, 0xe8f000
; VI-DAG: s_mov_b32 s11, 0xe80000
+; GFX9-DAG: s_mov_b32 s11, 0xe00000
; GCN: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, s[8:11], s0 offen
; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, s[8:11], s0 offen
@@ -28,6 +30,7 @@ define amdgpu_ps void @large_alloca_pixel_shader(i32 %x, i32 %y) #0 {
; GCN-DAG: s_mov_b32 s10, -1
; CI-DAG: s_mov_b32 s11, 0xe8f000
; VI-DAG: s_mov_b32 s11, 0xe80000
+; GFX9-DAG: s_mov_b32 s11, 0xe00000
; GCN: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, s[8:11], s2 offen
; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, s[8:11], s2 offen
diff --git a/test/CodeGen/AMDGPU/large-constant-initializer.ll b/test/CodeGen/AMDGPU/large-constant-initializer.ll
index 9975b1b7f5cc..c46d68e38ade 100644
--- a/test/CodeGen/AMDGPU/large-constant-initializer.ll
+++ b/test/CodeGen/AMDGPU/large-constant-initializer.ll
@@ -4,7 +4,7 @@
@gv = external unnamed_addr addrspace(2) constant [239 x i32], align 4
-define void @opencv_cvtfloat_crash(i32 addrspace(1)* %out, i32 %x) nounwind {
+define amdgpu_kernel void @opencv_cvtfloat_crash(i32 addrspace(1)* %out, i32 %x) nounwind {
%val = load i32, i32 addrspace(2)* getelementptr ([239 x i32], [239 x i32] addrspace(2)* @gv, i64 0, i64 239), align 4
%mul12 = mul nsw i32 %val, 7
br i1 undef, label %exit, label %bb
diff --git a/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll b/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
index 906a688febd2..13dd7058c50a 100644
--- a/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
+++ b/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
@@ -1,8 +1,10 @@
-; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca < %s | FileCheck --check-prefix=SI --check-prefix=ALL %s
+; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca < %s | FileCheck --check-prefix=CI --check-prefix=ALL %s
-; CHECK: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4
+; SI-NOT: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4
+; CI: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4
-define void @promote_alloca_size_63(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+define amdgpu_kernel void @promote_alloca_size_63(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -22,9 +24,9 @@ entry:
ret void
}
-; CHECK: @promote_alloca_size_256.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align 4
+; ALL: @promote_alloca_size_256.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align 4
-define void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #1 {
+define amdgpu_kernel void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #1 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -44,9 +46,9 @@ entry:
ret void
}
-; CHECK: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4
+; ALL: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4
-define void @promote_alloca_size_1600(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #2 {
+define amdgpu_kernel void @promote_alloca_size_1600(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #2 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -66,9 +68,10 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_0(
-; CHECK: alloca [5 x i32]
-define void @occupancy_0(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #3 {
+; ALL-LABEL: @occupancy_0(
+; CI-NOT: alloca [5 x i32]
+; SI: alloca [5 x i32]
+define amdgpu_kernel void @occupancy_0(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #3 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -88,9 +91,10 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_max(
-; CHECK: alloca [5 x i32]
-define void @occupancy_max(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #4 {
+; ALL-LABEL: @occupancy_max(
+; CI-NOT: alloca [5 x i32]
+; SI: alloca [5 x i32]
+define amdgpu_kernel void @occupancy_max(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #4 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -110,9 +114,11 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_6(
-; CHECK-NOT: alloca
-define void @occupancy_6(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 {
+; SI-LABEL: @occupancy_6(
+; CI-LABEL: @occupancy_6(
+; SI: alloca
+; CI-NOT: alloca
+define amdgpu_kernel void @occupancy_6(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 {
entry:
%stack = alloca [42 x i8], align 4
%tmp = load i8, i8 addrspace(1)* %in, align 1
@@ -134,9 +140,9 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_6_over(
-; CHECK: alloca [43 x i8]
-define void @occupancy_6_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 {
+; ALL-LABEL: @occupancy_6_over(
+; ALL: alloca [43 x i8]
+define amdgpu_kernel void @occupancy_6_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 {
entry:
%stack = alloca [43 x i8], align 4
%tmp = load i8, i8 addrspace(1)* %in, align 1
@@ -158,9 +164,11 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_8(
-; CHECK-NOT: alloca
-define void @occupancy_8(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 {
+; SI-LABEL: @occupancy_8(
+; CI-LABEL: @occupancy_8(
+; SI: alloca
+; CI-NOT: alloca
+define amdgpu_kernel void @occupancy_8(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 {
entry:
%stack = alloca [32 x i8], align 4
%tmp = load i8, i8 addrspace(1)* %in, align 1
@@ -182,9 +190,9 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_8_over(
-; CHECK: alloca [33 x i8]
-define void @occupancy_8_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 {
+; ALL-LABEL: @occupancy_8_over(
+; ALL: alloca [33 x i8]
+define amdgpu_kernel void @occupancy_8_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 {
entry:
%stack = alloca [33 x i8], align 4
%tmp = load i8, i8 addrspace(1)* %in, align 1
@@ -206,9 +214,11 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_9(
-; CHECK-NOT: alloca
-define void @occupancy_9(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 {
+; SI-LABEL: @occupancy_9(
+; CI-LABEL: @occupancy_9(
+; SI: alloca
+; CI-NOT: alloca
+define amdgpu_kernel void @occupancy_9(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 {
entry:
%stack = alloca [28 x i8], align 4
%tmp = load i8, i8 addrspace(1)* %in, align 1
@@ -230,9 +240,9 @@ entry:
ret void
}
-; CHECK-LABEL: @occupancy_9_over(
-; CHECK: alloca [29 x i8]
-define void @occupancy_9_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 {
+; ALL-LABEL: @occupancy_9_over(
+; ALL: alloca [29 x i8]
+define amdgpu_kernel void @occupancy_9_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 {
entry:
%stack = alloca [29 x i8], align 4
%tmp = load i8, i8 addrspace(1)* %in, align 1
diff --git a/test/CodeGen/AMDGPU/lds-alignment.ll b/test/CodeGen/AMDGPU/lds-alignment.ll
index 99334585e589..c23dea2b6b76 100644
--- a/test/CodeGen/AMDGPU/lds-alignment.ll
+++ b/test/CodeGen/AMDGPU/lds-alignment.ll
@@ -15,7 +15,7 @@ declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace
; HSA-LABEL: {{^}}test_no_round_size_1:
; HSA: workgroup_group_segment_byte_size = 38
-define void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 4, i1 false)
@@ -34,7 +34,7 @@ define void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #
; HSA-LABEL: {{^}}test_round_size_2:
; HSA: workgroup_group_segment_byte_size = 86
; HSA: group_segment_alignment = 4
-define void @test_round_size_2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 4, i1 false)
@@ -50,7 +50,7 @@ define void @test_round_size_2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
; HSA-LABEL: {{^}}test_round_size_2_align_8:
; HSA: workgroup_group_segment_byte_size = 86
; HSA: group_segment_alignment = 4
-define void @test_round_size_2_align_8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_2_align_8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false)
@@ -65,7 +65,7 @@ define void @test_round_size_2_align_8(i8 addrspace(1)* %out, i8 addrspace(1)* %
; HSA-LABEL: {{^}}test_round_local_lds_and_arg:
; HSA: workgroup_group_segment_byte_size = 38
; HSA: group_segment_alignment = 4
-define void @test_round_local_lds_and_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* %lds.arg) #1 {
+define amdgpu_kernel void @test_round_local_lds_and_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* %lds.arg) #1 {
%lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false)
@@ -78,7 +78,7 @@ define void @test_round_local_lds_and_arg(i8 addrspace(1)* %out, i8 addrspace(1)
; HSA-LABEL: {{^}}test_round_lds_arg:
; HSA: workgroup_group_segment_byte_size = 0
; HSA: group_segment_alignment = 4
-define void @test_round_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* %lds.arg) #1 {
+define amdgpu_kernel void @test_round_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* %lds.arg) #1 {
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.arg, i8 addrspace(1)* %in, i32 38, i32 4, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.arg, i32 38, i32 4, i1 false)
ret void
@@ -88,7 +88,7 @@ define void @test_round_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8
; HSA-LABEL: {{^}}test_high_align_lds_arg:
; HSA: workgroup_group_segment_byte_size = 0
; HSA: group_segment_alignment = 4
-define void @test_high_align_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* align 64 %lds.arg) #1 {
+define amdgpu_kernel void @test_high_align_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* align 64 %lds.arg) #1 {
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.arg, i8 addrspace(1)* %in, i32 38, i32 64, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.arg, i32 38, i32 64, i1 false)
ret void
@@ -98,7 +98,7 @@ define void @test_high_align_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in
; HSA-LABEL: {{^}}test_missing_alignment_size_2_order0:
; HSA: workgroup_group_segment_byte_size = 212
; HSA: group_segment_alignment = 4
-define void @test_missing_alignment_size_2_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_missing_alignment_size_2_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.missing.align.0.bc = bitcast [39 x i32] addrspace(3)* @lds.missing.align.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.missing.align.0.bc, i8 addrspace(1)* %in, i32 160, i32 4, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.missing.align.0.bc, i32 160, i32 4, i1 false)
@@ -114,7 +114,7 @@ define void @test_missing_alignment_size_2_order0(i8 addrspace(1)* %out, i8 addr
; HSA-LABEL: {{^}}test_missing_alignment_size_2_order1:
; HSA: workgroup_group_segment_byte_size = 216
; HSA: group_segment_alignment = 4
-define void @test_missing_alignment_size_2_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_missing_alignment_size_2_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.missing.align.1.bc = bitcast [7 x i64] addrspace(3)* @lds.missing.align.1 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.missing.align.1.bc, i8 addrspace(1)* %in, i32 56, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.missing.align.1.bc, i32 56, i32 8, i1 false)
@@ -142,7 +142,7 @@ define void @test_missing_alignment_size_2_order1(i8 addrspace(1)* %out, i8 addr
; HSA-LABEL: {{^}}test_round_size_3_order0:
; HSA: workgroup_group_segment_byte_size = 134
; HSA: group_segment_alignment = 4
-define void @test_round_size_3_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_3_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false)
@@ -163,7 +163,7 @@ define void @test_round_size_3_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %i
; HSA-LABEL: {{^}}test_round_size_3_order1:
; HSA: workgroup_group_segment_byte_size = 134
; HSA: group_segment_alignment = 4
-define void @test_round_size_3_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_3_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false)
@@ -184,7 +184,7 @@ define void @test_round_size_3_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %i
; HSA-LABEL: {{^}}test_round_size_3_order2:
; HSA: workgroup_group_segment_byte_size = 150
; HSA: group_segment_alignment = 4
-define void @test_round_size_3_order2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_3_order2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false)
@@ -205,7 +205,7 @@ define void @test_round_size_3_order2(i8 addrspace(1)* %out, i8 addrspace(1)* %i
; HSA-LABEL: {{^}}test_round_size_3_order3:
; HSA: workgroup_group_segment_byte_size = 118
; HSA: group_segment_alignment = 4
-define void @test_round_size_3_order3(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_3_order3(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false)
@@ -226,7 +226,7 @@ define void @test_round_size_3_order3(i8 addrspace(1)* %out, i8 addrspace(1)* %i
; HSA-LABEL: {{^}}test_round_size_3_order4:
; HSA: workgroup_group_segment_byte_size = 142
; HSA: group_segment_alignment = 4
-define void @test_round_size_3_order4(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_3_order4(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false)
@@ -247,7 +247,7 @@ define void @test_round_size_3_order4(i8 addrspace(1)* %out, i8 addrspace(1)* %i
; HSA-LABEL: {{^}}test_round_size_3_order5:
; HSA: workgroup_group_segment_byte_size = 126
; HSA: group_segment_alignment = 4
-define void @test_round_size_3_order5(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @test_round_size_3_order5(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
%lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false)
call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false)
diff --git a/test/CodeGen/AMDGPU/lds-initializer.ll b/test/CodeGen/AMDGPU/lds-initializer.ll
index 9875814b03d3..254673d8a1e4 100644
--- a/test/CodeGen/AMDGPU/lds-initializer.ll
+++ b/test/CodeGen/AMDGPU/lds-initializer.ll
@@ -5,7 +5,7 @@
@lds = addrspace(3) global [8 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8]
-define void @load_init_lds_global(i32 addrspace(1)* %out, i1 %p) {
+define amdgpu_kernel void @load_init_lds_global(i32 addrspace(1)* %out, i1 %p) {
%gep = getelementptr [8 x i32], [8 x i32] addrspace(3)* @lds, i32 0, i32 10
%ld = load i32, i32 addrspace(3)* %gep
store i32 %ld, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/lds-m0-init-in-loop.ll b/test/CodeGen/AMDGPU/lds-m0-init-in-loop.ll
index 078d6330ce04..1b3eeed3005c 100644
--- a/test/CodeGen/AMDGPU/lds-m0-init-in-loop.ll
+++ b/test/CodeGen/AMDGPU/lds-m0-init-in-loop.ll
@@ -18,7 +18,7 @@
; GCN: BB0_3:
; GCN-NEXT: s_endpgm
-define void @copy_local_to_global_loop_m0_init(i32 addrspace(1)* noalias nocapture %out, i32 addrspace(3)* noalias nocapture readonly %in, i32 %n) #0 {
+define amdgpu_kernel void @copy_local_to_global_loop_m0_init(i32 addrspace(1)* noalias nocapture %out, i32 addrspace(3)* noalias nocapture readonly %in, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
diff --git a/test/CodeGen/AMDGPU/lds-oqap-crash.ll b/test/CodeGen/AMDGPU/lds-oqap-crash.ll
index 6ff6fc3d7afc..fff2a9200729 100644
--- a/test/CodeGen/AMDGPU/lds-oqap-crash.ll
+++ b/test/CodeGen/AMDGPU/lds-oqap-crash.ll
@@ -10,7 +10,7 @@
; reads and writes are bundled together in the same instruction.
; CHECK: {{^}}lds_crash:
-define void @lds_crash(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @lds_crash(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %a, i32 %b, i32 %c) {
entry:
%0 = load i32, i32 addrspace(3)* %in
; This block needs to be > 115 ISA instructions to hit the bug,
diff --git a/test/CodeGen/AMDGPU/lds-output-queue.ll b/test/CodeGen/AMDGPU/lds-output-queue.ll
index abe472e423fc..8b7e9e6d6aa8 100644
--- a/test/CodeGen/AMDGPU/lds-output-queue.ll
+++ b/test/CodeGen/AMDGPU/lds-output-queue.ll
@@ -10,7 +10,7 @@
@local_mem = internal unnamed_addr addrspace(3) global [2 x i32] undef, align 4
-define void @lds_input_queue(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %index) {
+define amdgpu_kernel void @lds_input_queue(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %index) {
entry:
%0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
%1 = load i32, i32 addrspace(3)* %0
@@ -88,7 +88,7 @@ declare void @llvm.r600.group.barrier() nounwind convergent
; CHECK: LDS_READ_RET
; CHECK-NOT: ALU clause
; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
-define void @local_global_alias(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @local_global_alias(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 0
%1 = load i32, i32 addrspace(3)* %0
diff --git a/test/CodeGen/AMDGPU/lds-size.ll b/test/CodeGen/AMDGPU/lds-size.ll
index 1607713090e3..c65817abd489 100644
--- a/test/CodeGen/AMDGPU/lds-size.ll
+++ b/test/CodeGen/AMDGPU/lds-size.ll
@@ -14,7 +14,7 @@
; GCN: ; LDSByteSize: 4 bytes/workgroup (compile time only)
@lds = internal unnamed_addr addrspace(3) global i32 undef, align 4
-define void @test(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 %cond) {
entry:
%0 = icmp eq i32 %cond, 0
br i1 %0, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/lds-zero-initializer.ll b/test/CodeGen/AMDGPU/lds-zero-initializer.ll
index cb5d73fb0d8b..53c1c727a19d 100644
--- a/test/CodeGen/AMDGPU/lds-zero-initializer.ll
+++ b/test/CodeGen/AMDGPU/lds-zero-initializer.ll
@@ -5,7 +5,7 @@
@lds = addrspace(3) global [256 x i32] zeroinitializer
-define void @load_zeroinit_lds_global(i32 addrspace(1)* %out, i1 %p) {
+define amdgpu_kernel void @load_zeroinit_lds_global(i32 addrspace(1)* %out, i1 %p) {
%gep = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds, i32 0, i32 10
%ld = load i32, i32 addrspace(3)* %gep
store i32 %ld, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll b/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll
index 4244c48d240e..e85a1b690af6 100644
--- a/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll
+++ b/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll
@@ -11,7 +11,7 @@
; CHECK: {{^}}setcc_expand:
; CHECK: SET
; CHECK-NOT: CND
-define void @setcc_expand(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @setcc_expand(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp eq i32 %in, 5
br i1 %0, label %IF, label %ENDIF
diff --git a/test/CodeGen/AMDGPU/limit-coalesce.mir b/test/CodeGen/AMDGPU/limit-coalesce.mir
new file mode 100644
index 000000000000..106a96e32dc3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/limit-coalesce.mir
@@ -0,0 +1,71 @@
+# RUN: llc -march=amdgcn -run-pass simple-register-coalescing -o - %s | FileCheck %s
+
+# Check that coalescer does not create wider register tuple than in source
+
+# CHECK: - { id: 2, class: vreg_64 }
+# CHECK: - { id: 3, class: vreg_64 }
+# CHECK: - { id: 4, class: vreg_64 }
+# CHECK: - { id: 5, class: vreg_96 }
+# CHECK: - { id: 6, class: vreg_96 }
+# CHECK: - { id: 7, class: vreg_128 }
+# CHECK: - { id: 8, class: vreg_128 }
+# No more registers shall be defined
+# CHECK-NEXT: liveins:
+# CHECK: FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %4,
+# CHECK: FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %6,
+
+---
+name: main
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 1, class: sreg_32_xm0, preferred-register: '%1' }
+ - { id: 2, class: vreg_64, preferred-register: '%2' }
+ - { id: 3, class: vreg_64 }
+ - { id: 4, class: vreg_64 }
+ - { id: 5, class: vreg_64 }
+ - { id: 6, class: vreg_96 }
+ - { id: 7, class: vreg_96 }
+ - { id: 8, class: vreg_128 }
+ - { id: 9, class: vreg_128 }
+liveins:
+ - { reg: '%sgpr6', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %sgpr0, %vgpr0_vgpr1
+
+ %3 = IMPLICIT_DEF
+ undef %4.sub0 = COPY %sgpr0
+ %4.sub1 = COPY %3.sub0
+ undef %5.sub0 = COPY %4.sub1
+ %5.sub1 = COPY %4.sub0
+ FLAT_STORE_DWORDX2 %vgpr0_vgpr1, killed %5, 0, 0, 0, implicit %exec, implicit %flat_scr
+
+ %6 = IMPLICIT_DEF
+ undef %7.sub0_sub1 = COPY %6
+ %7.sub2 = COPY %3.sub0
+ FLAT_STORE_DWORDX3 %vgpr0_vgpr1, killed %7, 0, 0, 0, implicit %exec, implicit %flat_scr
+
+ %8 = IMPLICIT_DEF
+ undef %9.sub0_sub1_sub2 = COPY %8
+ %9.sub3 = COPY %3.sub0
+ FLAT_STORE_DWORDX4 %vgpr0_vgpr1, killed %9, 0, 0, 0, implicit %exec, implicit %flat_scr
+...
diff --git a/test/CodeGen/AMDGPU/literals.ll b/test/CodeGen/AMDGPU/literals.ll
index 82fbb7f46186..1c546ba9f74b 100644
--- a/test/CodeGen/AMDGPU/literals.ll
+++ b/test/CodeGen/AMDGPU/literals.ll
@@ -10,7 +10,7 @@
; CHECK: LSHR
; CHECK-NEXT: ADD_INT * {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.y
; CHECK-NEXT: 5
-define void @i32_literal(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @i32_literal(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = add i32 5, %in
store i32 %0, i32 addrspace(1)* %out
@@ -27,7 +27,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: ADD * {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.y
; CHECK-NEXT: 1084227584(5.0
-define void @float_literal(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @float_literal(float addrspace(1)* %out, float %in) {
entry:
%0 = fadd float 5.0, %in
store float %0, float addrspace(1)* %out
@@ -41,7 +41,7 @@ entry:
; CHECK-NEXT: MOV {{\** *}}T[[GPR]].Z, 0.0
; CHECK-NEXT: MOV {{\** *}}T[[GPR]].W, 0.0
-define void @inline_literal_reg_sequence(<4 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @inline_literal_reg_sequence(<4 x i32> addrspace(1)* %out) {
entry:
store <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> addrspace(1)* %out
ret void
@@ -52,7 +52,7 @@ entry:
; CHECK-NEXT: DOT4 T[[GPR]].Y (MASKED), 1.0
; CHECK-NEXT: DOT4 T[[GPR]].Z (MASKED), 1.0
; CHECK-NEXT: DOT4 * T[[GPR]].W (MASKED), 1.0
-define void @inline_literal_dot4(float addrspace(1)* %out) {
+define amdgpu_kernel void @inline_literal_dot4(float addrspace(1)* %out) {
entry:
%0 = call float @llvm.r600.dot4(<4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>)
store float %0, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/liveness.mir b/test/CodeGen/AMDGPU/liveness.mir
index 112c3f8e69a6..48762e3f2ab4 100644
--- a/test/CodeGen/AMDGPU/liveness.mir
+++ b/test/CodeGen/AMDGPU/liveness.mir
@@ -8,7 +8,7 @@
# Should see three distinct value numbers:
# CHECK: %vreg0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
--- |
- define void @test0() { ret void }
+ define amdgpu_kernel void @test0() { ret void }
...
---
name: test0
diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll b/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
deleted file mode 100644
index 77dd4b134982..000000000000
--- a/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
+++ /dev/null
@@ -1,437 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-
-declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
-
-; FUNC-LABEL: {{^}}bfe_i32_arg_arg_arg:
-; SI: v_bfe_i32
-; EG: BFE_INT
-; EG: encoding: [{{[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+}},0xac
-define void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_arg_arg_imm:
-; SI: v_bfe_i32
-; EG: BFE_INT
-define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 123) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_arg_imm_arg:
-; SI: v_bfe_i32
-; EG: BFE_INT
-define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 123, i32 %src2) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_imm_arg_arg:
-; SI: v_bfe_i32
-; EG: BFE_INT
-define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 123, i32 %src1, i32 %src2) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}v_bfe_print_arg:
-; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 2, 8
-define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) nounwind {
- %load = load i32, i32 addrspace(1)* %src0, align 4
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 2, i32 8) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_arg_0_width_reg_offset:
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 0) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_arg_0_width_imm_offset:
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 8, i32 0) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_6:
-; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; SI: s_endpgm
-define void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 1, i32 31)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_7:
-; SI-NOT: shl
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 0, i32 31)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_8:
-; SI: buffer_load_dword
-; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
-; SI: s_endpgm
-define void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_9:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_10:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 1, i32 31)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_11:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 8, i32 24)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_12:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 24, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_13:
-; SI: v_ashrrev_i32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = ashr i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_test_14:
-; SI-NOT: lshr
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = lshr i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_0:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 0, i32 0, i32 0) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_1:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 12334, i32 0, i32 0) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_2:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 0, i32 0, i32 1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_3:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 1, i32 0, i32 1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_4:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 4294967295, i32 0, i32 1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_5:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 128, i32 7, i32 1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_6:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0xffffff80
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 128, i32 0, i32 8) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_7:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 127, i32 0, i32 8) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_8:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 127, i32 6, i32 8) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_9:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 65536, i32 16, i32 8) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_10:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 65535, i32 16, i32 16) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_11:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], -6
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 4, i32 4) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_12:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 31, i32 1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_13:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 131070, i32 16, i32 16) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_14:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 40
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 2, i32 30) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_15:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 10
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 4, i32 28) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_16:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 4294967295, i32 1, i32 7) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_17:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 255, i32 1, i32 31) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_i32_constant_fold_test_18:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
- %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 255, i32 31, i32 1) nounwind readnone
- store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_sext_in_reg_i24:
-; SI: buffer_load_dword [[LOAD:v[0-9]+]],
-; SI-NOT: v_lshl
-; SI-NOT: v_ashr
-; SI: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 0, 24
-; SI: buffer_store_dword [[BFE]],
-define void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 0, i32 24)
- %shl = shl i32 %bfe, 8
- %ashr = ashr i32 %shl, 8
- store i32 %ashr, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: @simplify_demanded_bfe_sdiv
-; SI: buffer_load_dword [[LOAD:v[0-9]+]]
-; SI: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 1, 16
-; SI: v_lshrrev_b32_e32 [[TMP0:v[0-9]+]], 31, [[BFE]]
-; SI: v_add_i32_e32 [[TMP1:v[0-9]+]], vcc, [[TMP0]], [[BFE]]
-; SI: v_ashrrev_i32_e32 [[TMP2:v[0-9]+]], 1, [[TMP1]]
-; SI: buffer_store_dword [[TMP2]]
-define void @simplify_demanded_bfe_sdiv(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %src = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %src, i32 1, i32 16) nounwind readnone
- %div = sdiv i32 %bfe, 2
- store i32 %div, i32 addrspace(1)* %out, align 4
- ret void
-}
diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll b/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll
deleted file mode 100644
index ee47b14c496d..000000000000
--- a/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll
+++ /dev/null
@@ -1,631 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC -check-prefix=GCN %s
-; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-
-declare i32 @llvm.AMDGPU.bfe.u32(i32, i32, i32) nounwind readnone
-
-; FUNC-LABEL: {{^}}bfe_u32_arg_arg_arg:
-; SI: v_bfe_u32
-; EG: BFE_UINT
-define void @bfe_u32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_arg_arg_imm:
-; SI: v_bfe_u32
-; EG: BFE_UINT
-define void @bfe_u32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 123) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_arg_imm_arg:
-; SI: v_bfe_u32
-; EG: BFE_UINT
-define void @bfe_u32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 123, i32 %src2) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_imm_arg_arg:
-; SI: v_bfe_u32
-; EG: BFE_UINT
-define void @bfe_u32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 123, i32 %src1, i32 %src2) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_arg_0_width_reg_offset:
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 0) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_arg_0_width_imm_offset:
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 8, i32 0) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zextload_i8:
-; SI: buffer_load_ubyte
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_zextload_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
- %load = load i8, i8 addrspace(1)* %in
- %ext = zext i8 %load to i32
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8:
-; GCN: buffer_load_dword
-; SI: v_add_i32
-; SI-NEXT: v_and_b32_e32
-; FIXME: Should be using s_add_i32
-; VI: v_add_i32
-; VI-NEXT: v_and_b32_e32
-; SI-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @bfe_u32_zext_in_reg_i8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
- %add = add i32 %load, 1
- %ext = and i32 %add, 255
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i16:
-; SI: buffer_load_dword
-; SI: v_add_i32
-; SI-NEXT: v_and_b32_e32
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_zext_in_reg_i16(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
- %add = add i32 %load, 1
- %ext = and i32 %add, 65535
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 16)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_1:
-; SI: buffer_load_dword
-; SI: v_add_i32
-; SI: bfe
-; SI: s_endpgm
-define void @bfe_u32_zext_in_reg_i8_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
- %add = add i32 %load, 1
- %ext = and i32 %add, 255
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 1, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_3:
-; SI: buffer_load_dword
-; SI: v_add_i32
-; SI-NEXT: v_and_b32_e32 {{v[0-9]+}}, 0xf8
-; SI-NEXT: bfe
-; SI: s_endpgm
-define void @bfe_u32_zext_in_reg_i8_offset_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
- %add = add i32 %load, 1
- %ext = and i32 %add, 255
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 3, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_7:
-; SI: buffer_load_dword
-; SI: v_add_i32
-; SI-NEXT: v_and_b32_e32 {{v[0-9]+}}, 0x80
-; SI-NEXT: bfe
-; SI: s_endpgm
-define void @bfe_u32_zext_in_reg_i8_offset_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
- %add = add i32 %load, 1
- %ext = and i32 %add, 255
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 7, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_zext_in_reg_i16_offset_8:
-; SI: buffer_load_dword
-; SI: v_add_i32
-; SI-NEXT: bfe
-; SI: s_endpgm
-define void @bfe_u32_zext_in_reg_i16_offset_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %load = load i32, i32 addrspace(1)* %in, align 4
- %add = add i32 %load, 1
- %ext = and i32 %add, 65535
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 8, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_1:
-; SI: buffer_load_dword
-; SI: v_and_b32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
-; SI: s_endpgm
-; EG: AND_INT T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, 1,
-define void @bfe_u32_test_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 0, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-define void @bfe_u32_test_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-define void @bfe_u32_test_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_4:
-; SI-NOT: lshl
-; SI-NOT: shr
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-define void @bfe_u32_test_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %shr = lshr i32 %shl, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shr, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_5:
-; SI: buffer_load_dword
-; SI-NOT: lshl
-; SI-NOT: shr
-; SI: v_bfe_i32 {{v[0-9]+}}, {{v[0-9]+}}, 0, 1
-; SI: s_endpgm
-define void @bfe_u32_test_5(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %shr = ashr i32 %shl, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shr, i32 0, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_6:
-; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; SI: s_endpgm
-define void @bfe_u32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 1, i32 31)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_7:
-; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 31)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_8:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_and_b32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_9:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_10:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 1, i32 31)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_11:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 8, i32 24)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_12:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 24, i32 8)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_13:
-; V_ASHRREV_U32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = ashr i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_test_14:
-; SI-NOT: lshr
-; SI-NOT: {{[^@]}}bfe
-; SI: s_endpgm
-define void @bfe_u32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = lshr i32 %x, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_0:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 0, i32 0, i32 0) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_1:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 12334, i32 0, i32 0) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_2:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 0, i32 0, i32 1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_3:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 1, i32 0, i32 1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_4:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 4294967295, i32 0, i32 1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_5:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 128, i32 7, i32 1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_6:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x80
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 128, i32 0, i32 8) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_7:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 127, i32 0, i32 8) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_8:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 127, i32 6, i32 8) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_9:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 65536, i32 16, i32 8) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_10:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 65535, i32 16, i32 16) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_11:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 10
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 4, i32 4) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_12:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 31, i32 1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_13:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 131070, i32 16, i32 16) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_14:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 40
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 2, i32 30) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_15:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 10
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 4, i32 28) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_16:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 4294967295, i32 1, i32 7) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_17:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 255, i32 1, i32 31) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_u32_constant_fold_test_18:
-; SI-NOT: {{[^@]}}bfe
-; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
-; SI: buffer_store_dword [[VREG]],
-; SI: s_endpgm
-; EG-NOT: BFE
-define void @bfe_u32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 255, i32 31, i32 1) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; Make sure that SimplifyDemandedBits doesn't cause the and to be
-; reduced to the bits demanded by the bfe.
-
-; XXX: The operand to v_bfe_u32 could also just directly be the load register.
-; FUNC-LABEL: {{^}}simplify_bfe_u32_multi_use_arg:
-; SI: buffer_load_dword [[ARG:v[0-9]+]]
-; SI: v_and_b32_e32 [[AND:v[0-9]+]], 63, [[ARG]]
-; SI: v_bfe_u32 [[BFE:v[0-9]+]], [[AND]], 2, 2
-; SI-DAG: buffer_store_dword [[AND]]
-; SI-DAG: buffer_store_dword [[BFE]]
-; SI: s_endpgm
-define void @simplify_bfe_u32_multi_use_arg(i32 addrspace(1)* %out0,
- i32 addrspace(1)* %out1,
- i32 addrspace(1)* %in) nounwind {
- %src = load i32, i32 addrspace(1)* %in, align 4
- %and = and i32 %src, 63
- %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %and, i32 2, i32 2) nounwind readnone
- store i32 %bfe_u32, i32 addrspace(1)* %out0, align 4
- store i32 %and, i32 addrspace(1)* %out1, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}lshr_and:
-; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006
-; SI: buffer_store_dword
-define void @lshr_and(i32 addrspace(1)* %out, i32 %a) nounwind {
- %b = lshr i32 %a, 6
- %c = and i32 %b, 7
- store i32 %c, i32 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}v_lshr_and:
-; SI: v_bfe_u32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}, 3
-; SI: buffer_store_dword
-define void @v_lshr_and(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
- %c = lshr i32 %a, %b
- %d = and i32 %c, 7
- store i32 %d, i32 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}and_lshr:
-; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006
-; SI: buffer_store_dword
-define void @and_lshr(i32 addrspace(1)* %out, i32 %a) nounwind {
- %b = and i32 %a, 448
- %c = lshr i32 %b, 6
- store i32 %c, i32 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}and_lshr2:
-; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006
-; SI: buffer_store_dword
-define void @and_lshr2(i32 addrspace(1)* %out, i32 %a) nounwind {
- %b = and i32 %a, 511
- %c = lshr i32 %b, 6
- store i32 %c, i32 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}shl_lshr:
-; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x150002
-; SI: buffer_store_dword
-define void @shl_lshr(i32 addrspace(1)* %out, i32 %a) nounwind {
- %b = shl i32 %a, 9
- %c = lshr i32 %b, 11
- store i32 %c, i32 addrspace(1)* %out, align 8
- ret void
-}
diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll b/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll
deleted file mode 100644
index 2336109f4dad..000000000000
--- a/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-
-declare float @llvm.fabs.f32(float) nounwind readnone
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) nounwind readnone
-
-; FUNC-LABEL: {{^}}clamp_0_1_f32:
-; SI: s_load_dword [[ARG:s[0-9]+]],
-; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], [[ARG]], 0 clamp{{$}}
-; SI: buffer_store_dword [[RESULT]]
-; SI: s_endpgm
-
-; EG: MOV_SAT
-define void @clamp_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
- %clamp = call float @llvm.AMDGPU.clamp.f32(float %src, float 0.0, float 1.0) nounwind readnone
- store float %clamp, float addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}clamp_fabs_0_1_f32:
-; SI: s_load_dword [[ARG:s[0-9]+]],
-; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], |[[ARG]]|, 0 clamp{{$}}
-; SI: buffer_store_dword [[RESULT]]
-; SI: s_endpgm
-define void @clamp_fabs_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
- %src.fabs = call float @llvm.fabs.f32(float %src) nounwind readnone
- %clamp = call float @llvm.AMDGPU.clamp.f32(float %src.fabs, float 0.0, float 1.0) nounwind readnone
- store float %clamp, float addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}clamp_fneg_0_1_f32:
-; SI: s_load_dword [[ARG:s[0-9]+]],
-; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], -[[ARG]], 0 clamp{{$}}
-; SI: buffer_store_dword [[RESULT]]
-; SI: s_endpgm
-define void @clamp_fneg_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
- %src.fneg = fsub float -0.0, %src
- %clamp = call float @llvm.AMDGPU.clamp.f32(float %src.fneg, float 0.0, float 1.0) nounwind readnone
- store float %clamp, float addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}clamp_fneg_fabs_0_1_f32:
-; SI: s_load_dword [[ARG:s[0-9]+]],
-; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], -|[[ARG]]|, 0 clamp{{$}}
-; SI: buffer_store_dword [[RESULT]]
-; SI: s_endpgm
-define void @clamp_fneg_fabs_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
- %src.fabs = call float @llvm.fabs.f32(float %src) nounwind readnone
- %src.fneg.fabs = fsub float -0.0, %src.fabs
- %clamp = call float @llvm.AMDGPU.clamp.f32(float %src.fneg.fabs, float 0.0, float 1.0) nounwind readnone
- store float %clamp, float addrspace(1)* %out, align 4
- ret void
-}
diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll b/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll
index 59997d27683d..595f632b493d 100644
--- a/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll
+++ b/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll
@@ -4,15 +4,14 @@
; SI-LABEL: {{^}}kill_gs_const:
; SI-NOT: v_cmpx_le_f32
; SI: s_mov_b64 exec, 0
-
define amdgpu_gs void @kill_gs_const() {
main_body:
- %0 = icmp ule i32 0, 3
- %1 = select i1 %0, float 1.000000e+00, float -1.000000e+00
- call void @llvm.AMDGPU.kill(float %1)
- %2 = icmp ule i32 3, 0
- %3 = select i1 %2, float 1.000000e+00, float -1.000000e+00
- call void @llvm.AMDGPU.kill(float %3)
+ %tmp = icmp ule i32 0, 3
+ %tmp1 = select i1 %tmp, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kill(float %tmp1)
+ %tmp2 = icmp ule i32 3, 0
+ %tmp3 = select i1 %tmp2, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kill(float %tmp3)
ret void
}
@@ -21,16 +20,16 @@ main_body:
; SI: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}}
; SI: v_cmpx_le_f32_e32 vcc, 0, v{{[0-9]+}}
; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]]
-define amdgpu_ps void @kill_vcc_implicit_def([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) {
+define amdgpu_ps void @kill_vcc_implicit_def([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) {
entry:
- %tmp0 = fcmp olt float %13, 0.0
- call void @llvm.AMDGPU.kill(float %14)
- %tmp1 = select i1 %tmp0, float 1.0, float 0.0
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 1, i32 1, float %tmp1, float %tmp1, float %tmp1, float %tmp1)
+ %tmp0 = fcmp olt float %arg13, 0.000000e+00
+ call void @llvm.AMDGPU.kill(float %arg14)
+ %tmp1 = select i1 %tmp0, float 1.000000e+00, float 0.000000e+00
+ call void @llvm.amdgcn.exp.f32(i32 1, i32 15, float %tmp1, float %tmp1, float %tmp1, float %tmp1, i1 true, i1 true) #0
ret void
}
-declare void @llvm.AMDGPU.kill(float)
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.AMDGPU.kill(float) #0
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-!0 = !{!"const", null, i32 1}
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.export.ll b/test/CodeGen/AMDGPU/llvm.SI.export.ll
deleted file mode 100644
index 23a32dcfd943..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.export.ll
+++ /dev/null
@@ -1,237 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) #0
-
-; GCN-LABEL: {{^}}test_export_zeroes:
-; GCN: exp mrt0 off, off, off, off{{$}}
-; GCN: exp mrt0 off, off, off, off done{{$}}
-define void @test_export_zeroes() #0 {
-
- call void @llvm.SI.export(i32 0, i32 0, i32 0, i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0)
- call void @llvm.SI.export(i32 0, i32 0, i32 1, i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0)
- ret void
-}
-
-; FIXME: Should not set up registers for the unused source registers.
-
-; GCN-LABEL: {{^}}test_export_en_src0:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 [[SRC0]], off, off, off done{{$}}
-define void @test_export_en_src0() #0 {
- call void @llvm.SI.export(i32 1, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src1:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 off, [[SRC1]], off, off done{{$}}
-define void @test_export_en_src1() #0 {
- call void @llvm.SI.export(i32 2, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src2:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 off, off, [[SRC2]], off done{{$}}
-define void @test_export_en_src2() #0 {
- call void @llvm.SI.export(i32 4, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src3:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 off, off, off, [[SRC3]] done{{$}}
-define void @test_export_en_src3() #0 {
- call void @llvm.SI.export(i32 8, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src0_src1:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 [[SRC0]], [[SRC1]], off, off done{{$}}
-define void @test_export_en_src0_src1() #0 {
- call void @llvm.SI.export(i32 3, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src0_src2:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 [[SRC0]], off, [[SRC2]], off done{{$}}
-define void @test_export_en_src0_src2() #0 {
- call void @llvm.SI.export(i32 5, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src0_src3:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 [[SRC0]], off, off, [[SRC3]]{{$}}
-; GCN: exp mrt0 [[SRC0]], off, off, [[SRC3]] done{{$}}
-define void @test_export_en_src0_src3() #0 {
- call void @llvm.SI.export(i32 9, i32 0, i32 0, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 9, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_en_src0_src1_src2_src3:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_en_src0_src1_src2_src3() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_mrt7:
-; GCN-DAG: v_mov_b32_e32 [[VHALF:v[0-9]+]], 0.5
-; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]]{{$}}
-; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]] done{{$}}
-define void @test_export_mrt7() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 7, i32 0, float 0.5, float 0.5, float 0.5, float 0.5)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 7, i32 0, float 0.5, float 0.5, float 0.5, float 0.5)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_z:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrtz [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp mrtz [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_z() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 8, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 8, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_null:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp null [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp null [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_null() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 9, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 9, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_reserved10:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp invalid_target_10 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp invalid_target_10 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_reserved10() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 10, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 10, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_reserved11:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp invalid_target_11 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp invalid_target_11 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_reserved11() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 11, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 11, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_pos0:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp pos0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp pos0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_pos0() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 12, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_pos3:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp pos3 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp pos3 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_pos3() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 15, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 15, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_param0:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp param0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp param0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_param0() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 32, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 32, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_param31:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp param31 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
-; GCN: exp param31 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
-define void @test_export_param31() #0 {
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 63, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 63, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-; GCN-LABEL: {{^}}test_export_vm:
-; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
-; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
-; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
-; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] vm{{$}}
-; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done vm{{$}}
-define void @test_export_vm() #0 {
- call void @llvm.SI.export(i32 15, i32 1, i32 0, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float 1.0, float 2.0, float 0.5, float 4.0)
- ret void
-}
-
-attributes #0 = { nounwind "ShaderType"="0" }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll b/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll
deleted file mode 100644
index 9e7c3c2e6201..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN %s
-;RUN: llc < %s -march=amdgcn -mcpu=kabini -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=16BANK %s
-;RUN: llc < %s -march=amdgcn -mcpu=stoney -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=16BANK %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN %s
-
-;GCN-LABEL: {{^}}main:
-;GCN-NOT: s_wqm
-;GCN: s_mov_b32 m0
-;GCN-DAG: v_interp_mov_f32
-;GCN-DAG: v_interp_p1_f32
-;GCN-DAG: v_interp_p2_f32
-
-define amdgpu_ps void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>) {
-main_body:
- %5 = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %3)
- %6 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %4)
- %7 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %4)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %5, float %6, float %7, float %7)
- ret void
-}
-
-; Thest that v_interp_p1 uses different source and destination registers
-; on 16 bank LDS chips.
-
-; 16BANK-LABEL: {{^}}v_interp_p1_bank16_bug:
-; 16BANK-NOT: v_interp_p1_f32 [[DST:v[0-9]+]], [[DST]]
-
-define amdgpu_ps void @v_interp_p1_bank16_bug([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) {
-main_body:
- %22 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %5, <2 x i32> %7)
- %23 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %5, <2 x i32> %7)
- %24 = call float @llvm.SI.fs.interp(i32 2, i32 0, i32 %5, <2 x i32> %7)
- %25 = call float @fabs(float %22)
- %26 = call float @fabs(float %23)
- %27 = call float @fabs(float %24)
- %28 = call i32 @llvm.SI.packf16(float %25, float %26)
- %29 = bitcast i32 %28 to float
- %30 = call i32 @llvm.SI.packf16(float %27, float 1.000000e+00)
- %31 = bitcast i32 %30 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %29, float %31, float %29, float %31)
- ret void
-}
-
-; Function Attrs: readnone
-declare float @fabs(float) #1
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.SI.packf16(float, float) #0
-
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.constant(i32, i32, i32) #0
-
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
-attributes #1 = { readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.gather4.ll b/test/CodeGen/AMDGPU/llvm.SI.gather4.ll
deleted file mode 100644
index aef9f660436e..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.gather4.ll
+++ /dev/null
@@ -1,525 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-
-;CHECK-LABEL: {{^}}gather4_v2:
-;CHECK: image_gather4 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_v2() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4:
-;CHECK: image_gather4 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_cl:
-;CHECK: image_gather4_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_l:
-;CHECK: image_gather4_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_l() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_b:
-;CHECK: image_gather4_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_b() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_b_cl:
-;CHECK: image_gather4_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_b_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.b.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_b_cl_v8:
-;CHECK: image_gather4_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_b_cl_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.b.cl.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_lz_v2:
-;CHECK: image_gather4_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_lz_v2() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_lz:
-;CHECK: image_gather4_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_lz() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-
-
-;CHECK-LABEL: {{^}}gather4_o:
-;CHECK: image_gather4_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_cl_o:
-;CHECK: image_gather4_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_cl_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_cl_o_v8:
-;CHECK: image_gather4_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_cl_o_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.cl.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_l_o:
-;CHECK: image_gather4_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_l_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.l.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_l_o_v8:
-;CHECK: image_gather4_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_l_o_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.l.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_b_o:
-;CHECK: image_gather4_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_b_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.b.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_b_o_v8:
-;CHECK: image_gather4_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_b_o_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.b.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_b_cl_o:
-;CHECK: image_gather4_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_b_cl_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.b.cl.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_lz_o:
-;CHECK: image_gather4_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_lz_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-
-
-;CHECK-LABEL: {{^}}gather4_c:
-;CHECK: image_gather4_c {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_cl:
-;CHECK: image_gather4_c_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_cl_v8:
-;CHECK: image_gather4_c_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_cl_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.cl.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_l:
-;CHECK: image_gather4_c_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_l() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_l_v8:
-;CHECK: image_gather4_c_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_l_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.l.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_b:
-;CHECK: image_gather4_c_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_b() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_b_v8:
-;CHECK: image_gather4_c_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_b_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.b.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_b_cl:
-;CHECK: image_gather4_c_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_b_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.b.cl.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_lz:
-;CHECK: image_gather4_c_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_lz() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-
-
-;CHECK-LABEL: {{^}}gather4_c_o:
-;CHECK: image_gather4_c_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_o_v8:
-;CHECK: image_gather4_c_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_o_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_cl_o:
-;CHECK: image_gather4_c_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_cl_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.cl.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_l_o:
-;CHECK: image_gather4_c_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_l_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.l.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_b_o:
-;CHECK: image_gather4_c_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_b_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.b.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_b_cl_o:
-;CHECK: image_gather4_c_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_b_cl_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.b.cl.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_lz_o:
-;CHECK: image_gather4_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_lz_o() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_c_lz_o_v8:
-;CHECK: image_gather4_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define amdgpu_ps void @gather4_c_lz_o_v8() {
-main_body:
- %r = call <4 x float> @llvm.SI.gather4.c.lz.o.v8i32(<8 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}gather4_sgpr_bug:
-;
-; This crashed at some point due to a bug in FixSGPRCopies. Derived from the
-; report in https://bugs.freedesktop.org/show_bug.cgi?id=96877
-;
-;CHECK: s_load_dwordx4 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
-;CHECK: s_waitcnt lgkmcnt(0)
-;CHECK: s_mov_b32 s[[LO]], 0
-;CHECK: image_gather4_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, s{{\[}}[[LO]]:[[HI]]] dmask:0x8
-define amdgpu_ps float @gather4_sgpr_bug() {
-main_body:
- %tmp = load <4 x i32>, <4 x i32> addrspace(2)* undef, align 16
- %tmp1 = insertelement <4 x i32> %tmp, i32 0, i32 0
- %tmp2 = call <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> %tmp1, i32 8, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp4 = extractelement <4 x float> %tmp2, i32 1
- %tmp9 = fadd float undef, %tmp4
- ret float %tmp9
-}
-
-declare <4 x float> @llvm.SI.gather4.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.b.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.b.cl.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.lz.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare <4 x float> @llvm.SI.gather4.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.cl.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.l.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.l.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.b.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.b.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.b.cl.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.lz.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare <4 x float> @llvm.SI.gather4.c.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.cl.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.l.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.b.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.b.cl.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.lz.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare <4 x float> @llvm.SI.gather4.c.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.cl.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.l.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.b.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.b.cl.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.lz.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.gather4.c.lz.o.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.getlod.ll b/test/CodeGen/AMDGPU/llvm.SI.getlod.ll
deleted file mode 100644
index ac34d31b97c1..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.getlod.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-
-;CHECK-LABEL: {{^}}getlod:
-;CHECK: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 da
-define amdgpu_ps void @getlod() {
-main_body:
- %r = call <4 x float> @llvm.SI.getlod.i32(i32 undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
- ret void
-}
-
-;CHECK-LABEL: {{^}}getlod_v2:
-;CHECK: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 da
-define amdgpu_ps void @getlod_v2() {
-main_body:
- %r = call <4 x float> @llvm.SI.getlod.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
- ret void
-}
-
-;CHECK-LABEL: {{^}}getlod_v4:
-;CHECK: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 da
-define amdgpu_ps void @getlod_v4() {
-main_body:
- %r = call <4 x float> @llvm.SI.getlod.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
- ret void
-}
-
-
-declare <4 x float> @llvm.SI.getlod.i32(i32, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.getlod.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.getlod.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.image.ll b/test/CodeGen/AMDGPU/llvm.SI.image.ll
deleted file mode 100644
index 50341e3e207f..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.image.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-
-;CHECK-LABEL: {{^}}image_load:
-;CHECK: image_load {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @image_load() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.load.v4i32(<4 x i32> undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}image_load_mip:
-;CHECK: image_load_mip {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @image_load_mip() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.load.mip.v4i32(<4 x i32> undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}getresinfo:
-;CHECK: image_get_resinfo {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @getresinfo() {
-main_body:
- %r = call <4 x float> @llvm.SI.getresinfo.i32(i32 undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-declare <4 x float> @llvm.SI.image.load.v4i32(<4 x i32>, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.load.mip.v4i32(<4 x i32>, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.getresinfo.i32(i32, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.image.sample-masked.ll b/test/CodeGen/AMDGPU/llvm.SI.image.sample-masked.ll
deleted file mode 100644
index 7cdd9559994e..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.image.sample-masked.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s
-
-; CHECK-LABEL: {{^}}v1:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xd
-define amdgpu_ps void @v1(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 0
- %3 = extractelement <4 x float> %1, i32 2
- %4 = extractelement <4 x float> %1, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %4, float %4)
- ret void
-}
-
-; CHECK-LABEL: {{^}}v2:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xb
-define amdgpu_ps void @v2(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 0
- %3 = extractelement <4 x float> %1, i32 1
- %4 = extractelement <4 x float> %1, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %4, float %4)
- ret void
-}
-
-; CHECK-LABEL: {{^}}v3:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xe
-define amdgpu_ps void @v3(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 1
- %3 = extractelement <4 x float> %1, i32 2
- %4 = extractelement <4 x float> %1, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %4, float %4)
- ret void
-}
-
-; CHECK-LABEL: {{^}}v4:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x7
-define amdgpu_ps void @v4(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 0
- %3 = extractelement <4 x float> %1, i32 1
- %4 = extractelement <4 x float> %1, i32 2
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %4, float %4)
- ret void
-}
-
-; CHECK-LABEL: {{^}}v5:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xa
-define amdgpu_ps void @v5(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 1
- %3 = extractelement <4 x float> %1, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %3, float %3)
- ret void
-}
-
-; CHECK-LABEL: {{^}}v6:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x6
-define amdgpu_ps void @v6(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 1
- %3 = extractelement <4 x float> %1, i32 2
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %3, float %3)
- ret void
-}
-
-; CHECK-LABEL: {{^}}v7:
-; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x9
-define amdgpu_ps void @v7(i32 %a1) {
-entry:
- %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
- %1 = call <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32> %0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %2 = extractelement <4 x float> %1, i32 0
- %3 = extractelement <4 x float> %1, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %2, float %3, float %3, float %3)
- ret void
-}
-
-declare <4 x float> @llvm.SI.image.sample.v1i32(<1 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) readnone
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll b/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll
deleted file mode 100644
index 60077dc218fd..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll
+++ /dev/null
@@ -1,309 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-
-;CHECK-LABEL: {{^}}sample:
-;CHECK: s_wqm
-;CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_d:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_d {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_d() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.d.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_d_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_d_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_d_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.d.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_l:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_l() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_b:
-;CHECK: s_wqm
-;CHECK: image_sample_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_b() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_b_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_b_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.b.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_lz:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_lz() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_cd:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_cd {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_cd() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.cd.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_cd_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_cd_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_cd_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.cd.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c:
-;CHECK: s_wqm
-;CHECK: image_sample_c {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_c_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_d:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_d {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_d() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.d.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_d_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_d_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_d_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.d.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_l:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_l() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_b:
-;CHECK: s_wqm
-;CHECK: image_sample_c_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_b() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_b_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_c_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_b_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.b.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_lz:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_lz() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_cd:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_cd {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_cd() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.cd.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_cd_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_cd_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_cd_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.cd.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-
-declare <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.d.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.d.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.b.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.lz.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.cd.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.cd.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.d.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.d.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.b.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.lz.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.cd.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.cd.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll b/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll
deleted file mode 100644
index 34d4f6825690..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll
+++ /dev/null
@@ -1,309 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-
-;CHECK-LABEL: {{^}}sample:
-;CHECK: s_wqm
-;CHECK: image_sample_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_d:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_d_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_d() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.d.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_d_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_d_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_d_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.d.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_l:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_l() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.l.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_b:
-;CHECK: s_wqm
-;CHECK: image_sample_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_b() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.b.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_b_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_b_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.b.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_lz:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_lz() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_cd:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_cd_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_cd() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.cd.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_cd_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_cd_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_cd_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.cd.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c:
-;CHECK: s_wqm
-;CHECK: image_sample_c_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_c_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_d:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_d_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_d() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.d.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_d_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_d_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_d_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.d.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_l:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_l() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.l.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_b:
-;CHECK: s_wqm
-;CHECK: image_sample_c_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_b() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.b.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_b_cl:
-;CHECK: s_wqm
-;CHECK: image_sample_c_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_b_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.b.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_lz:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_lz() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_cd:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_cd_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_cd() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.cd.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-;CHECK-LABEL: {{^}}sample_c_cd_cl:
-;CHECK-NOT: s_wqm
-;CHECK: image_sample_c_cd_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @sample_c_cd_cl() {
-main_body:
- %r = call <4 x float> @llvm.SI.image.sample.c.cd.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %r0 = extractelement <4 x float> %r, i32 0
- %r1 = extractelement <4 x float> %r, i32 1
- %r2 = extractelement <4 x float> %r, i32 2
- %r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
- ret void
-}
-
-
-declare <4 x float> @llvm.SI.image.sample.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.d.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.d.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.l.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.b.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.b.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.lz.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.cd.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.cd.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare <4 x float> @llvm.SI.image.sample.c.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.d.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.d.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.l.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.b.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.b.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.lz.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.cd.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-declare <4 x float> @llvm.SI.image.sample.c.cd.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll b/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
index ee0a41f2210f..51f564d96909 100644
--- a/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
+++ b/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
@@ -34,8 +34,8 @@ main_body:
%tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 1234, i32 65535, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp23 = bitcast i32 %tmp22 to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %tmp13, float %tmp15, float %tmp17, float %tmp19)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %tmp21, float %tmp23, float %tmp23, float %tmp23)
+ call void @llvm.amdgcn.exp.f32(i32 15, i32 12, float %tmp13, float %tmp15, float %tmp17, float %tmp19, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 15, i32 12, float %tmp21, float %tmp23, float %tmp23, float %tmp23, i1 true, i1 false)
ret void
}
@@ -45,9 +45,10 @@ declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i3
; Function Attrs: nounwind readonly
declare i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32) #0
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind inaccessiblememonly }
!0 = !{!"const", !1, i32 1}
!1 = !{!"tbaa root"}
diff --git a/test/CodeGen/AMDGPU/llvm.SI.packf16.ll b/test/CodeGen/AMDGPU/llvm.SI.packf16.ll
deleted file mode 100644
index 6984b4cf488a..000000000000
--- a/test/CodeGen/AMDGPU/llvm.SI.packf16.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-
-; GCN-LABEL: {{^}}main:
-; GCN: v_cvt_pkrtz_f16_f32
-; GCN: v_cvt_pkrtz_f16_f32
-; GCN-NOT: v_cvt_pkrtz_f16_f32
-
-define amdgpu_ps void @main(float %src) {
-main_body:
- %p1 = call i32 @llvm.SI.packf16(float undef, float %src)
- %p2 = call i32 @llvm.SI.packf16(float %src, float undef)
- %p3 = call i32 @llvm.SI.packf16(float undef, float undef)
- %f1 = bitcast i32 %p1 to float
- %f2 = bitcast i32 %p2 to float
- %f3 = bitcast i32 %p3 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 0, i32 0, i32 1, float undef, float %f1, float undef, float %f1)
- call void @llvm.SI.export(i32 15, i32 1, i32 0, i32 0, i32 1, float undef, float %f2, float undef, float %f2)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float undef, float %f3, float undef, float %f2)
- ret void
-}
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.SI.packf16(float, float) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll
index 9c845e84bc12..56966a19cf7b 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll
@@ -1,21 +1,45 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-declare i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* nocapture, i32) #2
-declare i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* nocapture, i32) #2
-declare i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* nocapture, i32) #2
+declare i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* nocapture, i32, i32, i32, i1) #2
-declare i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* nocapture, i64) #2
-declare i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* nocapture, i64) #2
-declare i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* nocapture, i64) #2
+declare i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* nocapture, i64, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* nocapture, i64, i32, i32, i1) #2
declare i32 @llvm.amdgcn.workitem.id.x() #1
+; Make sure no crash on invalid non-constant
+; GCN-LABEL: {{^}}invalid_variable_order_lds_atomic_dec_ret_i32:
+define amdgpu_kernel void @invalid_variable_order_lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %order.var) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 %order.var, i32 0, i1 false)
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; Make sure no crash on invalid non-constant
+; GCN-LABEL: {{^}}invalid_variable_scope_lds_atomic_dec_ret_i32:
+define amdgpu_kernel void @invalid_variable_scope_lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %scope.var) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 %scope.var, i1 false)
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; Make sure no crash on invalid non-constant
+; GCN-LABEL: {{^}}invalid_variable_volatile_lds_atomic_dec_ret_i32:
+define amdgpu_kernel void @invalid_variable_volatile_lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i1 %volatile.var) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 %volatile.var)
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
; GCN-LABEL: {{^}}lds_atomic_dec_ret_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]
-define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42)
+define amdgpu_kernel void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
@@ -23,9 +47,9 @@ define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_dec_ret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]] offset:16
-define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
+define amdgpu_kernel void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
@@ -35,25 +59,25 @@ define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; GCN: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; GCN: ds_dec_u32 [[VPTR]], [[DATA]]
-define void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42)
+define amdgpu_kernel void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: ds_dec_u32 v{{[0-9]+}}, [[K]] offset:16
-define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}global_atomic_dec_ret_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: buffer_atomic_dec [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 glc{{$}}
-define void @global_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 42)
+define amdgpu_kernel void @global_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
@@ -61,26 +85,26 @@ define void @global_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)*
; GCN-LABEL: {{^}}global_atomic_dec_ret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: buffer_atomic_dec [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16 glc{{$}}
-define void @global_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_atomic_dec_noret_i32:
; GCN: buffer_atomic_dec [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @global_atomic_dec_noret_i32(i32 addrspace(1)* %ptr) nounwind {
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 42)
+define amdgpu_kernel void @global_atomic_dec_noret_i32(i32 addrspace(1)* %ptr) nounwind {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
ret void
}
; FUNC-LABEL: {{^}}global_atomic_dec_noret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: buffer_atomic_dec [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16{{$}}
-define void @global_atomic_dec_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind {
+define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
@@ -88,12 +112,12 @@ define void @global_atomic_dec_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; CI: buffer_atomic_dec [[K]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20 glc{{$}}
; VI: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @global_atomic_dec_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id
%gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out.gep
ret void
}
@@ -102,19 +126,19 @@ define void @global_atomic_dec_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; CI: buffer_atomic_dec [[K]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20{{$}}
; VI: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @global_atomic_dec_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
%gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}flat_atomic_dec_ret_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @flat_atomic_dec_ret_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %ptr, i32 42)
+define amdgpu_kernel void @flat_atomic_dec_ret_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %ptr, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(4)* %out
ret void
}
@@ -122,38 +146,38 @@ define void @flat_atomic_dec_ret_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %
; GCN-LABEL: {{^}}flat_atomic_dec_ret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @flat_atomic_dec_ret_i32_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(4)* %out
ret void
}
; FUNC-LABEL: {{^}}flat_atomic_dec_noret_i32:
; GCN: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @flat_atomic_dec_noret_i32(i32 addrspace(4)* %ptr) nounwind {
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %ptr, i32 42)
+define amdgpu_kernel void @flat_atomic_dec_noret_i32(i32 addrspace(4)* %ptr) nounwind {
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %ptr, i32 42, i32 0, i32 0, i1 false)
ret void
}
; FUNC-LABEL: {{^}}flat_atomic_dec_noret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @flat_atomic_dec_noret_i32_offset(i32 addrspace(4)* %ptr) nounwind {
+define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(i32 addrspace(4)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}flat_atomic_dec_ret_i32_offset_addr64:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_dec v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @flat_atomic_dec_ret_i32_offset_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(4)* %ptr, i32 %id
%out.gep = getelementptr i32, i32 addrspace(4)* %out, i32 %id
%gep = getelementptr i32, i32 addrspace(4)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(4)* %out.gep
ret void
}
@@ -161,11 +185,11 @@ define void @flat_atomic_dec_ret_i32_offset_addr64(i32 addrspace(4)* %out, i32 a
; GCN-LABEL: {{^}}flat_atomic_dec_noret_i32_offset_addr64:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_dec v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @flat_atomic_dec_noret_i32_offset_addr64(i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(i32 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(4)* %ptr, i32 %id
%gep = getelementptr i32, i32 addrspace(4)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
@@ -173,8 +197,8 @@ define void @flat_atomic_dec_noret_i32_offset_addr64(i32 addrspace(4)* %ptr) #0
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @flat_atomic_dec_ret_i64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %ptr, i64 42)
+define amdgpu_kernel void @flat_atomic_dec_ret_i64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %ptr, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(4)* %out
ret void
}
@@ -183,9 +207,9 @@ define void @flat_atomic_dec_ret_i64(i64 addrspace(4)* %out, i64 addrspace(4)* %
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @flat_atomic_dec_ret_i64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(4)* %out
ret void
}
@@ -194,8 +218,8 @@ define void @flat_atomic_dec_ret_i64_offset(i64 addrspace(4)* %out, i64 addrspac
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
-define void @flat_atomic_dec_noret_i64(i64 addrspace(4)* %ptr) nounwind {
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %ptr, i64 42)
+define amdgpu_kernel void @flat_atomic_dec_noret_i64(i64 addrspace(4)* %ptr) nounwind {
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %ptr, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -203,35 +227,35 @@ define void @flat_atomic_dec_noret_i64(i64 addrspace(4)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
-define void @flat_atomic_dec_noret_i64_offset(i64 addrspace(4)* %ptr) nounwind {
+define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(i64 addrspace(4)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}flat_atomic_dec_ret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @flat_atomic_dec_ret_i64_offset_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(4)* %ptr, i32 %id
%out.gep = getelementptr i64, i64 addrspace(4)* %out, i32 %id
%gep = getelementptr i64, i64 addrspace(4)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(4)* %out.gep
ret void
}
; GCN-LABEL: {{^}}flat_atomic_dec_noret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
-define void @flat_atomic_dec_noret_i64_offset_addr64(i64 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(i64 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(4)* %ptr, i32 %id
%gep = getelementptr i64, i64 addrspace(4)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -240,11 +264,11 @@ define void @flat_atomic_dec_noret_i64_offset_addr64(i64 addrspace(4)* %ptr) #0
; SI-LABEL: {{^}}atomic_dec_shl_base_lds_0:
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_dec_rtn_u32 {{v[0-9]+}}, [[PTR]] offset:8
-define void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds0, i32 0, i32 %idx.0
- %val0 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %arrayidx0, i32 9)
+ %val0 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %arrayidx0, i32 9, i32 0, i32 0, i1 false)
store i32 %idx.0, i32 addrspace(1)* %add_use
store i32 %val0, i32 addrspace(1)* %out
ret void
@@ -254,8 +278,8 @@ define void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_dec_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
-define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %ptr, i64 42)
+define amdgpu_kernel void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -264,9 +288,9 @@ define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_dec_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32
-define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
+define amdgpu_kernel void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -275,8 +299,8 @@ define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_dec_u64 v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
-define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %ptr, i64 42)
+define amdgpu_kernel void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -284,9 +308,9 @@ define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_dec_u64 v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32{{$}}
-define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -294,8 +318,8 @@ define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 glc{{$}}
-define void @global_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 42)
+define amdgpu_kernel void @global_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -304,9 +328,9 @@ define void @global_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32 glc{{$}}
-define void @global_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -315,8 +339,8 @@ define void @global_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrsp
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @global_atomic_dec_noret_i64(i64 addrspace(1)* %ptr) nounwind {
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 42)
+define amdgpu_kernel void @global_atomic_dec_noret_i64(i64 addrspace(1)* %ptr) nounwind {
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -324,37 +348,37 @@ define void @global_atomic_dec_noret_i64(i64 addrspace(1)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32{{$}}
-define void @global_atomic_dec_noret_i64_offset(i64 addrspace(1)* %ptr) nounwind {
+define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(i64 addrspace(1)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}global_atomic_dec_ret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; CI: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40 glc{{$}}
; VI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @global_atomic_dec_ret_i64_offset_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id
%gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out.gep
ret void
}
; GCN-LABEL: {{^}}global_atomic_dec_noret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; CI: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40{{$}}
; VI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
-define void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
%gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -363,11 +387,11 @@ define void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #
; GCN-LABEL: {{^}}atomic_dec_shl_base_lds_0_i64:
; GCN: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 3, {{v[0-9]+}}
; GCN: ds_dec_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]], v{{\[[0-9]+:[0-9]+\]}} offset:16
-define void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i64], [512 x i64] addrspace(3)* @lds1, i32 0, i32 %idx.0
- %val0 = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %arrayidx0, i64 9)
+ %val0 = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %arrayidx0, i64 9, i32 0, i32 0, i1 false)
store i32 %idx.0, i32 addrspace(1)* %add_use
store i64 %val0, i64 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll
index 22097418eec4..3d64f93db2e4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll
@@ -1,21 +1,21 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-declare i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* nocapture, i32) #2
-declare i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* nocapture, i32) #2
-declare i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* nocapture, i32) #2
+declare i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* nocapture, i32, i32, i32, i1) #2
-declare i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* nocapture, i64) #2
-declare i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* nocapture, i64) #2
-declare i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* nocapture, i64) #2
+declare i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* nocapture, i64, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* nocapture, i64, i32, i32, i1) #2
declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN-LABEL: {{^}}lds_atomic_inc_ret_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]
-define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42)
+define amdgpu_kernel void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
@@ -23,9 +23,9 @@ define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_inc_ret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]] offset:16
-define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
+define amdgpu_kernel void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
@@ -35,25 +35,25 @@ define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; GCN: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; GCN: ds_inc_u32 [[VPTR]], [[DATA]]
-define void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42)
+define amdgpu_kernel void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: ds_inc_u32 v{{[0-9]+}}, [[K]] offset:16
-define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}global_atomic_inc_ret_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 glc{{$}}
-define void @global_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 42)
+define amdgpu_kernel void @global_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
@@ -61,26 +61,26 @@ define void @global_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)*
; GCN-LABEL: {{^}}global_atomic_inc_ret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16 glc{{$}}
-define void @global_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_atomic_inc_noret_i32:
; GCN: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @global_atomic_inc_noret_i32(i32 addrspace(1)* %ptr) nounwind {
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 42)
+define amdgpu_kernel void @global_atomic_inc_noret_i32(i32 addrspace(1)* %ptr) nounwind {
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
ret void
}
; FUNC-LABEL: {{^}}global_atomic_inc_noret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16{{$}}
-define void @global_atomic_inc_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind {
+define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
@@ -88,12 +88,12 @@ define void @global_atomic_inc_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; CI: buffer_atomic_inc [[K]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20 glc{{$}}
; VI: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id
%gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(1)* %out.gep
ret void
}
@@ -102,11 +102,11 @@ define void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; CI: buffer_atomic_inc [[K]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20{{$}}
; VI: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
%gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
@@ -115,11 +115,11 @@ define void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #
; GCN-LABEL: {{^}}atomic_inc_shl_base_lds_0_i32:
; GCN: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; GCN: ds_inc_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-define void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds0, i32 0, i32 %idx.0
- %val0 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %arrayidx0, i32 9)
+ %val0 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %arrayidx0, i32 9, i32 0, i32 0, i1 false)
store i32 %idx.0, i32 addrspace(1)* %add_use
store i32 %val0, i32 addrspace(1)* %out
ret void
@@ -129,8 +129,8 @@ define void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out, i32 addrspace
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
-define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %ptr, i64 42)
+define amdgpu_kernel void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -139,9 +139,9 @@ define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32
-define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
+define amdgpu_kernel void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -150,8 +150,8 @@ define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_inc_u64 v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
-define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %ptr, i64 42)
+define amdgpu_kernel void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -159,9 +159,9 @@ define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: ds_inc_u64 v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32{{$}}
-define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -169,8 +169,8 @@ define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 glc{{$}}
-define void @global_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 42)
+define amdgpu_kernel void @global_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -179,9 +179,9 @@ define void @global_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32 glc{{$}}
-define void @global_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out
ret void
}
@@ -190,8 +190,8 @@ define void @global_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrsp
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @global_atomic_inc_noret_i64(i64 addrspace(1)* %ptr) nounwind {
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 42)
+define amdgpu_kernel void @global_atomic_inc_noret_i64(i64 addrspace(1)* %ptr) nounwind {
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -199,45 +199,45 @@ define void @global_atomic_inc_noret_i64(i64 addrspace(1)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32{{$}}
-define void @global_atomic_inc_noret_i64_offset(i64 addrspace(1)* %ptr) nounwind {
+define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(i64 addrspace(1)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}global_atomic_inc_ret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; CI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40 glc{{$}}
; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id
%gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(1)* %out.gep
ret void
}
; GCN-LABEL: {{^}}global_atomic_inc_noret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; CI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40{{$}}
; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
-define void @global_atomic_inc_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
%gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}flat_atomic_inc_ret_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @flat_atomic_inc_ret_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %ptr, i32 42)
+define amdgpu_kernel void @flat_atomic_inc_ret_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %ptr, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(4)* %out
ret void
}
@@ -245,38 +245,38 @@ define void @flat_atomic_inc_ret_i32(i32 addrspace(4)* %out, i32 addrspace(4)* %
; GCN-LABEL: {{^}}flat_atomic_inc_ret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @flat_atomic_inc_ret_i32_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(4)* %out
ret void
}
; FUNC-LABEL: {{^}}flat_atomic_inc_noret_i32:
; GCN: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @flat_atomic_inc_noret_i32(i32 addrspace(4)* %ptr) nounwind {
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %ptr, i32 42)
+define amdgpu_kernel void @flat_atomic_inc_noret_i32(i32 addrspace(4)* %ptr) nounwind {
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %ptr, i32 42, i32 0, i32 0, i1 false)
ret void
}
; FUNC-LABEL: {{^}}flat_atomic_inc_noret_i32_offset:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @flat_atomic_inc_noret_i32_offset(i32 addrspace(4)* %ptr) nounwind {
+define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(i32 addrspace(4)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}flat_atomic_inc_ret_i32_offset_addr64:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
-define void @flat_atomic_inc_ret_i32_offset_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(i32 addrspace(4)* %out, i32 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(4)* %ptr, i32 %id
%out.gep = getelementptr i32, i32 addrspace(4)* %out, i32 %id
%gep = getelementptr i32, i32 addrspace(4)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
store i32 %result, i32 addrspace(4)* %out.gep
ret void
}
@@ -284,11 +284,11 @@ define void @flat_atomic_inc_ret_i32_offset_addr64(i32 addrspace(4)* %out, i32 a
; GCN-LABEL: {{^}}flat_atomic_inc_noret_i32_offset_addr64:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
; GCN: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
-define void @flat_atomic_inc_noret_i32_offset_addr64(i32 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(i32 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, i32 addrspace(4)* %ptr, i32 %id
%gep = getelementptr i32, i32 addrspace(4)* %gep.tid, i32 5
- %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42)
+ %result = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %gep, i32 42, i32 0, i32 0, i1 false)
ret void
}
@@ -297,31 +297,22 @@ define void @flat_atomic_inc_noret_i32_offset_addr64(i32 addrspace(4)* %ptr) #0
; GCN-LABEL: {{^}}atomic_inc_shl_base_lds_0_i64:
; GCN: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 3, {{v[0-9]+}}
; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]], v{{\[[0-9]+:[0-9]+\]}} offset:16
-define void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i64], [512 x i64] addrspace(3)* @lds1, i32 0, i32 %idx.0
- %val0 = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %arrayidx0, i64 9)
+ %val0 = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %arrayidx0, i64 9, i32 0, i32 0, i1 false)
store i32 %idx.0, i32 addrspace(1)* %add_use
store i64 %val0, i64 addrspace(1)* %out
ret void
}
-attributes #0 = { nounwind }
-attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind argmemonly }
-
-
-
-
-
-
; GCN-LABEL: {{^}}flat_atomic_inc_ret_i64:
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @flat_atomic_inc_ret_i64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %ptr, i64 42)
+define amdgpu_kernel void @flat_atomic_inc_ret_i64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %ptr, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(4)* %out
ret void
}
@@ -330,9 +321,9 @@ define void @flat_atomic_inc_ret_i64(i64 addrspace(4)* %out, i64 addrspace(4)* %
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @flat_atomic_inc_ret_i64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(4)* %out
ret void
}
@@ -341,8 +332,8 @@ define void @flat_atomic_inc_ret_i64_offset(i64 addrspace(4)* %out, i64 addrspac
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
-define void @flat_atomic_inc_noret_i64(i64 addrspace(4)* %ptr) nounwind {
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %ptr, i64 42)
+define amdgpu_kernel void @flat_atomic_inc_noret_i64(i64 addrspace(4)* %ptr) nounwind {
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %ptr, i64 42, i32 0, i32 0, i1 false)
ret void
}
@@ -350,34 +341,38 @@ define void @flat_atomic_inc_noret_i64(i64 addrspace(4)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
-define void @flat_atomic_inc_noret_i64_offset(i64 addrspace(4)* %ptr) nounwind {
+define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(i64 addrspace(4)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(4)* %ptr, i32 4
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
; GCN-LABEL: {{^}}flat_atomic_inc_ret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
-define void @flat_atomic_inc_ret_i64_offset_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(i64 addrspace(4)* %out, i64 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(4)* %ptr, i32 %id
%out.gep = getelementptr i64, i64 addrspace(4)* %out, i32 %id
%gep = getelementptr i64, i64 addrspace(4)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
store i64 %result, i64 addrspace(4)* %out.gep
ret void
}
; GCN-LABEL: {{^}}flat_atomic_inc_noret_i64_offset_addr64:
-; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
-; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
+; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
+; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
-define void @flat_atomic_inc_noret_i64_offset_addr64(i64 addrspace(4)* %ptr) #0 {
+define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(i64 addrspace(4)* %ptr) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, i64 addrspace(4)* %ptr, i32 %id
%gep = getelementptr i64, i64 addrspace(4)* %gep.tid, i32 5
- %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42)
+ %result = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %gep, i64 42, i32 0, i32 0, i1 false)
ret void
}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind argmemonly }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
index 6d9db65e7d93..10bea8ea63b0 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll
@@ -8,7 +8,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1() #0
; SI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xc4,0xe1,0x00,0x00,0x00,0x00]
; VI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
-define void @test_buffer_wbinvl1() #0 {
+define amdgpu_kernel void @test_buffer_wbinvl1() #0 {
call void @llvm.amdgcn.buffer.wbinvl1()
ret void
}
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
index 746298465e58..fe60d16d90f7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll
@@ -6,7 +6,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1.sc() #0
; SI-NEXT: ; BB#0:
; SI-NEXT: buffer_wbinvl1_sc ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
; SI-NEXT: s_endpgm
-define void @test_buffer_wbinvl1_sc() #0 {
+define amdgpu_kernel void @test_buffer_wbinvl1_sc() #0 {
call void @llvm.amdgcn.buffer.wbinvl1.sc()
ret void
}
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
index 4e0f3c37f214..061c1469ed4d 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll
@@ -8,7 +8,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1.vol() #0
; CI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
; VI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00]
; GCN: s_endpgm
-define void @test_buffer_wbinvl1_vol() #0 {
+define amdgpu_kernel void @test_buffer_wbinvl1_vol() #0 {
call void @llvm.amdgcn.buffer.wbinvl1.vol()
; This used to crash in hazard recognizer
store i8 0, i8 addrspace(1)* undef, align 1
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
index 011a0fdbd219..f08d4b6c7915 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
@@ -10,7 +10,7 @@ declare i1 @llvm.amdgcn.class.f16(half %a, i32 %b)
; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
; GCN: buffer_store_dword v[[R_I32]]
; GCN: s_endpgm
-define void @class_f16(
+define amdgpu_kernel void @class_f16(
i32 addrspace(1)* %r,
half addrspace(1)* %a,
i32 addrspace(1)* %b) {
@@ -31,7 +31,7 @@ entry:
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_fabs(
+define amdgpu_kernel void @class_f16_fabs(
i32 addrspace(1)* %r,
half %a.val,
i32 %b.val) {
@@ -46,12 +46,12 @@ entry:
; GCN-LABEL: {{^}}class_f16_fneg
; GCN: s_load_dword s[[SA_F16:[0-9]+]]
; GCN: s_load_dword s[[SB_I32:[0-9]+]]
-; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
-; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -v[[VA_F16]], s[[SB_I32]]
+; VI: v_trunc_f16_e64 v[[VA_F16:[0-9]+]], -s[[SA_F16]]
+; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], v[[VA_F16]], s[[SB_I32]]
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_fneg(
+define amdgpu_kernel void @class_f16_fneg(
i32 addrspace(1)* %r,
half %a.val,
i32 %b.val) {
@@ -71,7 +71,7 @@ entry:
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_fabs_fneg(
+define amdgpu_kernel void @class_f16_fabs_fneg(
i32 addrspace(1)* %r,
half %a.val,
i32 %b.val) {
@@ -91,7 +91,7 @@ entry:
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_1(
+define amdgpu_kernel void @class_f16_1(
i32 addrspace(1)* %r,
half %a.val) {
entry:
@@ -108,7 +108,7 @@ entry:
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_64(
+define amdgpu_kernel void @class_f16_64(
i32 addrspace(1)* %r,
half %a.val) {
entry:
@@ -126,7 +126,7 @@ entry:
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, vcc
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_full_mask(
+define amdgpu_kernel void @class_f16_full_mask(
i32 addrspace(1)* %r,
half %a.val) {
entry:
@@ -144,7 +144,7 @@ entry:
; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, vcc
; GCN: buffer_store_dword v[[VR_I32]]
; GCN: s_endpgm
-define void @class_f16_nine_bit_mask(
+define amdgpu_kernel void @class_f16_nine_bit_mask(
i32 addrspace(1)* %r,
half %a.val) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
index 668c669e41e8..1fcdac537fba 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
@@ -14,7 +14,7 @@ declare double @llvm.fabs.f64(double) #1
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -29,7 +29,7 @@ define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%a.fabs = call float @llvm.fabs.f32(float %a) #1
%result = call i1 @llvm.amdgcn.class.f32(float %a.fabs, i32 %b) #1
%sext = sext i1 %result to i32
@@ -45,7 +45,7 @@ define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%a.fneg = fsub float -0.0, %a
%result = call i1 @llvm.amdgcn.class.f32(float %a.fneg, i32 %b) #1
%sext = sext i1 %result to i32
@@ -61,7 +61,7 @@ define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%a.fabs = call float @llvm.fabs.f32(float %a) #1
%a.fneg.fabs = fsub float -0.0, %a.fabs
%result = call i1 @llvm.amdgcn.class.f32(float %a.fneg.fabs, i32 %b) #1
@@ -76,7 +76,7 @@ define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b)
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 1) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -89,7 +89,7 @@ define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 64) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -104,7 +104,7 @@ define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 1023) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -118,7 +118,7 @@ define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -132,7 +132,7 @@ define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -150,7 +150,7 @@ define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -170,7 +170,7 @@ define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -190,7 +190,7 @@ define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i3
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -205,7 +205,7 @@ define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%a.fabs = call double @llvm.fabs.f64(double %a) #1
%result = call i1 @llvm.amdgcn.class.f64(double %a.fabs, i32 %b) #1
%sext = sext i1 %result to i32
@@ -221,7 +221,7 @@ define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%a.fneg = fsub double -0.0, %a
%result = call i1 @llvm.amdgcn.class.f64(double %a.fneg, i32 %b) #1
%sext = sext i1 %result to i32
@@ -237,7 +237,7 @@ define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%a.fabs = call double @llvm.fabs.f64(double %a) #1
%a.fneg.fabs = fsub double -0.0, %a.fabs
%result = call i1 @llvm.amdgcn.class.f64(double %a.fneg.fabs, i32 %b) #1
@@ -249,7 +249,7 @@ define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b)
; SI-LABEL: {{^}}test_class_1_f64:
; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 1{{$}}
; SI: s_endpgm
-define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
+define amdgpu_kernel void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 1) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -259,7 +259,7 @@ define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
; SI-LABEL: {{^}}test_class_64_f64:
; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 64{{$}}
; SI: s_endpgm
-define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
+define amdgpu_kernel void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 64) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -275,7 +275,7 @@ define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
+define amdgpu_kernel void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -290,7 +290,7 @@ define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -306,7 +306,7 @@ define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace
; XSI: v_cmp_class_f64_e32 vcc, 1.0,
; SI: v_cmp_class_f64_e32 vcc,
; SI: s_endpgm
-define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -321,7 +321,7 @@ define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %
; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f64:
; SI: v_cmp_class_f64_e32 vcc, s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
; SI: s_endpgm
-define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -338,7 +338,7 @@ define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i3
; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 3{{$}}
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -358,7 +358,7 @@ define void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)
; SI: v_cmp_class_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -381,7 +381,7 @@ define void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1
; SI: v_cmp_class_f32_e32 vcc, v{{[0-9]+}}, [[MASK]]{{$}}
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -416,7 +416,7 @@ define void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float ad
; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 12{{$}}
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -436,7 +436,7 @@ define void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)
; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
; SI-NOT: v_cmp_class
; SI: s_endpgm
-define void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -456,7 +456,7 @@ define void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)
; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, 8{{$}}
; SI: s_or_b64
; SI: s_endpgm
-define void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in, float %b) #0 {
+define amdgpu_kernel void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in, float %b) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -476,7 +476,7 @@ define void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace
; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_0_f32(i32 addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_class_0_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 0) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -488,7 +488,7 @@ define void @test_class_0_f32(i32 addrspace(1)* %out, float %a) #0 {
; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @test_class_0_f64(i32 addrspace(1)* %out, double %a) #0 {
+define amdgpu_kernel void @test_class_0_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 0) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -500,7 +500,7 @@ define void @test_class_0_f64(i32 addrspace(1)* %out, double %a) #0 {
; SI-NOT: v_cmp_class
; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1,
; SI: buffer_store_dword
-define void @test_class_undef_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+define amdgpu_kernel void @test_class_undef_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%result = call i1 @llvm.amdgcn.class.f32(float undef, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll
index 410ac59279a5..054388607293 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.cos.f16(half %a)
; VI: v_cos_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @cos_f16(
+define amdgpu_kernel void @cos_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cos.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cos.ll
index f6495d8155f7..5b9c83c11cf4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cos.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cos.ll
@@ -5,7 +5,7 @@ declare float @llvm.amdgcn.cos.f32(float) #0
; GCN-LABEL: {{^}}v_cos_f32:
; GCN: v_cos_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @v_cos_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @v_cos_f32(float addrspace(1)* %out, float %src) #1 {
%cos = call float @llvm.amdgcn.cos.f32(float %src) #0
store float %cos, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cubeid.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cubeid.ll
index 22bed45ee30f..dadb070bdcf8 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cubeid.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cubeid.ll
@@ -5,7 +5,7 @@ declare float @llvm.amdgcn.cubeid(float, float, float) #0
; GCN-LABEL: {{^}}test_cubeid:
; GCN: v_cubeid_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @test_cubeid(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
+define amdgpu_kernel void @test_cubeid(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
%result = call float @llvm.amdgcn.cubeid(float %a, float %b, float %c)
store float %result, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cubema.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cubema.ll
index 565f22c5d5b6..60c4618a011b 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cubema.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cubema.ll
@@ -5,7 +5,7 @@ declare float @llvm.amdgcn.cubema(float, float, float) #0
; GCN-LABEL: {{^}}test_cubema:
; GCN: v_cubema_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @test_cubema(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
+define amdgpu_kernel void @test_cubema(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
%result = call float @llvm.amdgcn.cubema(float %a, float %b, float %c)
store float %result, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cubesc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cubesc.ll
index a3ba32745814..10669cf99138 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cubesc.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cubesc.ll
@@ -5,7 +5,7 @@ declare float @llvm.amdgcn.cubesc(float, float, float) #0
; GCN-LABEL: {{^}}test_cubesc:
; GCN: v_cubesc_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @test_cubesc(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
+define amdgpu_kernel void @test_cubesc(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
%result = call float @llvm.amdgcn.cubesc(float %a, float %b, float %c)
store float %result, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cubetc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cubetc.ll
index d3c0f2851ead..b2770308c170 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.cubetc.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cubetc.ll
@@ -5,7 +5,7 @@ declare float @llvm.amdgcn.cubetc(float, float, float) #0
; GCN-LABEL: {{^}}test_cubetc:
; GCN: v_cubetc_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @test_cubetc(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
+define amdgpu_kernel void @test_cubetc(float addrspace(1)* %out, float %a, float %b, float %c) #1 {
%result = call float @llvm.amdgcn.cubetc(float %a, float %b, float %c)
store float %result, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
new file mode 100644
index 000000000000..b92eb34750d9
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll
@@ -0,0 +1,166 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
+
+; GCN-LABEL: {{^}}s_cvt_pkrtz_v2f16_f32:
+; GCN-DAG: s_load_dword [[X:s[0-9]+]], s[0:1], 0x{{b|2c}}
+; GCN-DAG: s_load_dword [[SY:s[0-9]+]], s[0:1], 0x{{c|30}}
+; GCN: v_mov_b32_e32 [[VY:v[0-9]+]], [[SY]]
+; SI: v_cvt_pkrtz_f16_f32_e32 v{{[0-9]+}}, [[X]], [[VY]]
+; GFX89: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[X]], [[VY]]
+define amdgpu_kernel void @s_cvt_pkrtz_v2f16_f32(<2 x half> addrspace(1)* %out, float %x, float %y) #0 {
+ %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y)
+ store <2 x half> %result, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_cvt_pkrtz_samereg_v2f16_f32:
+; GCN: s_load_dword [[X:s[0-9]+]]
+; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[X]], [[X]]
+define amdgpu_kernel void @s_cvt_pkrtz_samereg_v2f16_f32(<2 x half> addrspace(1)* %out, float %x) #0 {
+ %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %x)
+ store <2 x half> %result, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Folds to 0 on gfx9
+; GCN-LABEL: {{^}}s_cvt_pkrtz_undef_undef:
+; GCN-NEXT: ; BB#0
+; SI-NEXT: s_endpgm
+; VI-NEXT: s_endpgm
+; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+define amdgpu_kernel void @s_cvt_pkrtz_undef_undef(<2 x half> addrspace(1)* %out) #0 {
+ %result = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float undef)
+ store <2 x half> %result, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; SI: v_cvt_pkrtz_f16_f32_e32 v{{[0-9]+}}, [[A]], [[B]]
+; GFX89: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[A]], [[B]]
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %b)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_reg_imm:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[A]], 1.0
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_reg_imm(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float 1.0)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_imm_reg:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; SI: v_cvt_pkrtz_f16_f32_e32 v{{[0-9]+}}, 1.0, [[A]]
+; GFX89: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, 1.0, [[A]]
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_imm_reg(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 1.0, float %a)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_lo:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, -[[A]], [[B]]
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_lo(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %neg.a = fsub float -0.0, %a
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %neg.a, float %b)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_hi:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, [[A]], -[[B]]
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_hi(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %neg.b = fsub float -0.0, %b
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %a, float %neg.b)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_lo_hi:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, -[[A]], -[[B]]
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_lo_hi(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %neg.a = fsub float -0.0, %a
+ %neg.b = fsub float -0.0, %b
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %neg.a, float %neg.b)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_cvt_pkrtz_v2f16_f32_fneg_fabs_lo_fneg_hi:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN: v_cvt_pkrtz_f16_f32_e64 v{{[0-9]+}}, -|[[A]]|, -[[B]]
+define amdgpu_kernel void @v_cvt_pkrtz_v2f16_f32_fneg_fabs_lo_fneg_hi(<2 x half> addrspace(1)* %out, float addrspace(1)* %a.ptr, float addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds float, float addrspace(1)* %a.ptr, i64 %tid.ext
+ %b.gep = getelementptr inbounds float, float addrspace(1)* %b.ptr, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext
+ %a = load volatile float, float addrspace(1)* %a.gep
+ %b = load volatile float, float addrspace(1)* %b.gep
+ %fabs.a = call float @llvm.fabs.f32(float %a)
+ %neg.fabs.a = fsub float -0.0, %fabs.a
+ %neg.b = fsub float -0.0, %b
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %neg.fabs.a, float %neg.b)
+ store <2 x half> %cvt, <2 x half> addrspace(1)* %out.gep
+ ret void
+}
+
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
+declare float @llvm.fabs.f32(float) #1
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.id.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.id.ll
index 6c09aa592447..58250de2f891 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.id.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.id.ll
@@ -9,7 +9,7 @@ declare i64 @llvm.amdgcn.dispatch.id() #1
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], s6
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], s7
; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @dispatch_id(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @dispatch_id(i64 addrspace(1)* %out) #0 {
%tmp0 = call i64 @llvm.amdgcn.dispatch.id()
store i64 %tmp0, i64 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.ptr.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.ptr.ll
index 2e8625256f13..92208e7fe17c 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.ptr.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.dispatch.ptr.ll
@@ -6,7 +6,7 @@
; GCN-LABEL: {{^}}test:
; GCN: enable_sgpr_dispatch_ptr = 1
; GCN: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
-define void @test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out) {
%dispatch_ptr = call noalias i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #0
%header_ptr = bitcast i8 addrspace(2)* %dispatch_ptr to i32 addrspace(2)*
%value = load i32, i32 addrspace(2)* %header_ptr
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll
index 6d262cf497ac..e04d9e662cea 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll
@@ -9,7 +9,7 @@ declare half @llvm.amdgcn.div.fixup.f16(half %a, half %b, half %c)
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16(
+define amdgpu_kernel void @div_fixup_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -30,7 +30,7 @@ entry:
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16_imm_a(
+define amdgpu_kernel void @div_fixup_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b,
half addrspace(1)* %c) {
@@ -49,7 +49,7 @@ entry:
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16_imm_b(
+define amdgpu_kernel void @div_fixup_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %c) {
@@ -68,7 +68,7 @@ entry:
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16_imm_c(
+define amdgpu_kernel void @div_fixup_f16_imm_c(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -86,7 +86,7 @@ entry:
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[AB_F16]], v[[AB_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16_imm_a_imm_b(
+define amdgpu_kernel void @div_fixup_f16_imm_a_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %c) {
entry:
@@ -102,7 +102,7 @@ entry:
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[BC_F16]], v[[BC_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16_imm_b_imm_c(
+define amdgpu_kernel void @div_fixup_f16_imm_b_imm_c(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -118,7 +118,7 @@ entry:
; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[AC_F16]], v[[B_F16]], v[[AC_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @div_fixup_f16_imm_a_imm_c(
+define amdgpu_kernel void @div_fixup_f16_imm_a_imm_c(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.ll
index cc1504f2bc8d..b8fcacf46bba 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.ll
@@ -16,7 +16,7 @@ declare double @llvm.amdgcn.div.fixup.f64(double, double, double) nounwind readn
; GCN: v_div_fixup_f32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+define amdgpu_kernel void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
%result = call float @llvm.amdgcn.div.fixup.f32(float %a, float %b, float %c) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -24,7 +24,7 @@ define void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, fl
; GCN-LABEL: {{^}}test_div_fixup_f64:
; GCN: v_div_fixup_f64
-define void @test_div_fixup_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
+define amdgpu_kernel void @test_div_fixup_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
%result = call double @llvm.amdgcn.div.fixup.f64(double %a, double %b, double %c) nounwind readnone
store double %result, double addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
index d408fe9f87f6..a86468b07a27 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
@@ -20,7 +20,7 @@ declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1) nounwind re
; GCN: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[VB]], [[VA]], [[VC]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
%result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -34,7 +34,7 @@ define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, flo
; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], 1.0, [[VB]], [[VC]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_div_fmas_f32_inline_imm_0(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_inline_imm_0(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
%result = call float @llvm.amdgcn.div.fmas.f32(float 1.0, float %b, float %c, i1 %d) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -48,7 +48,7 @@ define void @test_div_fmas_f32_inline_imm_0(float addrspace(1)* %out, float %a,
; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[VA]], 1.0, [[VC]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_div_fmas_f32_inline_imm_1(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_inline_imm_1(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
%result = call float @llvm.amdgcn.div.fmas.f32(float %a, float 1.0, float %c, i1 %d) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -62,7 +62,7 @@ define void @test_div_fmas_f32_inline_imm_1(float addrspace(1)* %out, float %a,
; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[VA]], [[VB]], 1.0
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_div_fmas_f32_inline_imm_2(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_inline_imm_2(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
%result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float 1.0, i1 %d) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -70,7 +70,7 @@ define void @test_div_fmas_f32_inline_imm_2(float addrspace(1)* %out, float %a,
; GCN-LABEL: {{^}}test_div_fmas_f64:
; GCN: v_div_fmas_f64
-define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind {
+define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind {
%result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone
store double %result, double addrspace(1)* %out, align 8
ret void
@@ -79,7 +79,7 @@ define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b,
; GCN-LABEL: {{^}}test_div_fmas_f32_cond_to_vcc:
; SI: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-define void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind {
%cmp = icmp eq i32 %i, 0
%result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cmp) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
@@ -89,7 +89,7 @@ define void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, f
; GCN-LABEL: {{^}}test_div_fmas_f32_imm_false_cond_to_vcc:
; SI: s_mov_b64 vcc, 0
; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-define void @test_div_fmas_f32_imm_false_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_imm_false_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
%result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 false) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -98,7 +98,7 @@ define void @test_div_fmas_f32_imm_false_cond_to_vcc(float addrspace(1)* %out, f
; GCN-LABEL: {{^}}test_div_fmas_f32_imm_true_cond_to_vcc:
; SI: s_mov_b64 vcc, -1
; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-define void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
%result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 true) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -114,7 +114,7 @@ define void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, fl
; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]]
; SI: v_div_fmas_f32 {{v[0-9]+}}, [[A]], [[B]], [[C]]
; SI: s_endpgm
-define void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 %d) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 %d) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
@@ -150,7 +150,7 @@ define void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, flo
; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
; SI: buffer_store_dword
; SI: s_endpgm
-define void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) nounwind {
+define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) nounwind {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.out = getelementptr float, float addrspace(1)* %out, i32 2
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.div.scale.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.div.scale.ll
index 8e5c62c31db5..0b4f09ac6517 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.div.scale.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.div.scale.ll
@@ -11,7 +11,7 @@ declare float @llvm.fabs.f32(float) nounwind readnone
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -31,7 +31,7 @@ define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)*
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -51,7 +51,7 @@ define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)*
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -71,7 +71,7 @@ define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
@@ -91,7 +91,7 @@ define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -109,7 +109,7 @@ define void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float add
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -127,7 +127,7 @@ define void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float add
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -145,7 +145,7 @@ define void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float add
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -163,7 +163,7 @@ define void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float add
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
@@ -181,7 +181,7 @@ define void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double a
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
@@ -199,7 +199,7 @@ define void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double a
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
@@ -217,7 +217,7 @@ define void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double a
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_scalar_den_2(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_scalar_den_2(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
@@ -236,7 +236,7 @@ define void @test_div_scale_f64_scalar_den_2(double addrspace(1)* %out, double a
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[VA]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_all_scalar_1(float addrspace(1)* %out, float %a, float %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_all_scalar_1(float addrspace(1)* %out, float %a, float %b) nounwind {
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
store float %result0, float addrspace(1)* %out, align 4
@@ -250,7 +250,7 @@ define void @test_div_scale_f32_all_scalar_1(float addrspace(1)* %out, float %a,
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[VB]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_all_scalar_2(float addrspace(1)* %out, float %a, float %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_all_scalar_2(float addrspace(1)* %out, float %a, float %b) nounwind {
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
%result0 = extractvalue { float, i1 } %result, 0
store float %result0, float addrspace(1)* %out, align 4
@@ -265,7 +265,7 @@ define void @test_div_scale_f32_all_scalar_2(float addrspace(1)* %out, float %a,
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], v{{\[}}[[VA_LO]]:[[VA_HI]]{{\]}}
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_all_scalar_1(double addrspace(1)* %out, double %a, double %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_all_scalar_1(double addrspace(1)* %out, double %a, double %b) nounwind {
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
store double %result0, double addrspace(1)* %out, align 8
@@ -280,7 +280,7 @@ define void @test_div_scale_f64_all_scalar_1(double addrspace(1)* %out, double %
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], v{{\[}}[[VB_LO]]:[[VB_HI]]{{\]}}, [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f64_all_scalar_2(double addrspace(1)* %out, double %a, double %b) nounwind {
+define amdgpu_kernel void @test_div_scale_f64_all_scalar_2(double addrspace(1)* %out, double %a, double %b) nounwind {
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
%result0 = extractvalue { double, i1 } %result, 0
store double %result0, double addrspace(1)* %out, align 8
@@ -292,7 +292,7 @@ define void @test_div_scale_f64_all_scalar_2(double addrspace(1)* %out, double %
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[A]], 1.0
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%a = load float, float addrspace(1)* %gep.0, align 4
@@ -308,7 +308,7 @@ define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float a
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], 2.0, 2.0, [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%a = load float, float addrspace(1)* %gep.0, align 4
@@ -326,7 +326,7 @@ define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float a
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[ABS_A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -349,7 +349,7 @@ define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspa
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[ABS_B]], [[ABS_B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
-define void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ds.bpermute.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ds.bpermute.ll
index 92d3fc8b107e..08f286a7f510 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ds.bpermute.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ds.bpermute.ll
@@ -4,8 +4,7 @@ declare i32 @llvm.amdgcn.ds.bpermute(i32, i32) #0
; FUNC-LABEL: {{^}}ds_bpermute:
; CHECK: ds_bpermute_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; CHECK: s_waitcnt lgkmcnt
-define void @ds_bpermute(i32 addrspace(1)* %out, i32 %index, i32 %src) nounwind {
+define amdgpu_kernel void @ds_bpermute(i32 addrspace(1)* %out, i32 %index, i32 %src) nounwind {
%bpermute = call i32 @llvm.amdgcn.ds.bpermute(i32 %index, i32 %src) #0
store i32 %bpermute, i32 addrspace(1)* %out, align 4
ret void
@@ -13,8 +12,7 @@ define void @ds_bpermute(i32 addrspace(1)* %out, i32 %index, i32 %src) nounwind
; CHECK-LABEL: {{^}}ds_bpermute_imm_offset:
; CHECK: ds_bpermute_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:4
-; CHECK: s_waitcnt lgkmcnt
-define void @ds_bpermute_imm_offset(i32 addrspace(1)* %out, i32 %base_index, i32 %src) nounwind {
+define amdgpu_kernel void @ds_bpermute_imm_offset(i32 addrspace(1)* %out, i32 %base_index, i32 %src) nounwind {
%index = add i32 %base_index, 4
%bpermute = call i32 @llvm.amdgcn.ds.bpermute(i32 %index, i32 %src) #0
store i32 %bpermute, i32 addrspace(1)* %out, align 4
@@ -23,8 +21,7 @@ define void @ds_bpermute_imm_offset(i32 addrspace(1)* %out, i32 %base_index, i32
; CHECK-LABEL: {{^}}ds_bpermute_imm_index:
; CHECK: ds_bpermute_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:64
-; CHECK: s_waitcnt lgkmcnt
-define void @ds_bpermute_imm_index(i32 addrspace(1)* %out, i32 %base_index, i32 %src) nounwind {
+define amdgpu_kernel void @ds_bpermute_imm_index(i32 addrspace(1)* %out, i32 %base_index, i32 %src) nounwind {
%bpermute = call i32 @llvm.amdgcn.ds.bpermute(i32 64, i32 %src) #0
store i32 %bpermute, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ds.permute.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ds.permute.ll
index 6d9c94191535..63618c3aed77 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ds.permute.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ds.permute.ll
@@ -4,8 +4,7 @@ declare i32 @llvm.amdgcn.ds.permute(i32, i32) #0
; CHECK-LABEL: {{^}}ds_permute:
; CHECK: ds_permute_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; CHECK: s_waitcnt lgkmcnt
-define void @ds_permute(i32 addrspace(1)* %out, i32 %index, i32 %src) nounwind {
+define amdgpu_kernel void @ds_permute(i32 addrspace(1)* %out, i32 %index, i32 %src) nounwind {
%bpermute = call i32 @llvm.amdgcn.ds.permute(i32 %index, i32 %src) #0
store i32 %bpermute, i32 addrspace(1)* %out, align 4
ret void
@@ -13,8 +12,7 @@ define void @ds_permute(i32 addrspace(1)* %out, i32 %index, i32 %src) nounwind {
; CHECK-LABEL: {{^}}ds_permute_imm_offset:
; CHECK: ds_permute_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:4
-; CHECK: s_waitcnt lgkmcnt
-define void @ds_permute_imm_offset(i32 addrspace(1)* %out, i32 %base_index, i32 %src) nounwind {
+define amdgpu_kernel void @ds_permute_imm_offset(i32 addrspace(1)* %out, i32 %base_index, i32 %src) nounwind {
%index = add i32 %base_index, 4
%bpermute = call i32 @llvm.amdgcn.ds.permute(i32 %index, i32 %src) #0
store i32 %bpermute, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ds.swizzle.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ds.swizzle.ll
index ef3cb00024bb..a3a78d326a62 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ds.swizzle.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ds.swizzle.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #0
; FUNC-LABEL: {{^}}ds_swizzle:
; CHECK: ds_swizzle_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:100
; CHECK: s_waitcnt lgkmcnt
-define void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) nounwind {
+define amdgpu_kernel void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) nounwind {
%swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
store i32 %swizzle, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.exp.compr.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.exp.compr.ll
new file mode 100644
index 000000000000..b972ddb8cb77
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.exp.compr.ll
@@ -0,0 +1,162 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
+
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare void @llvm.amdgcn.exp.compr.v2i16(i32, i32, <2 x i16>, <2 x i16>, i1, i1) #0
+
+; GCN-LABEL: {{^}}test_export_compr_zeroes_v2f16:
+; GCN: exp mrt0 off, off, off, off compr{{$}}
+; GCN: exp mrt0 off, off, off, off done compr{{$}}
+define amdgpu_kernel void @test_export_compr_zeroes_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> zeroinitializer, <2 x half> zeroinitializer, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> zeroinitializer, <2 x half> zeroinitializer, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_src0_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], off, off done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_src0_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 3, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_src1_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrt0 off, off, [[SRC1]], [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_src1_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 12, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_src0_src1_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_src0_src1_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_invalid2_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrt0 off, [[SRC0]], off, off done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_invalid2_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 2, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_invalid10_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrt0 off, [[SRC0]], off, [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_invalid10_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 10, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_mrt7_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[VHALF:v[0-9]+]], 0x38003800
+; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]] compr{{$}}
+; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_mrt7_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 7, i32 15, <2 x half> <half 0.5, half 0.5>, <2 x half> <half 0.5, half 0.5>, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 7, i32 15, <2 x half> <half 0.5, half 0.5>, <2 x half> <half 0.5, half 0.5>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_z_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrtz [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] compr{{$}}
+; GCN: exp mrtz [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_z_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 8, i32 15, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 8, i32 15, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_vm_v2f16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x40003c00
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x44003800
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] compr vm{{$}}
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] done compr vm{{$}}
+define amdgpu_kernel void @test_export_compr_vm_v2f16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 false, i1 true)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 true)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_zeroes_v2i16:
+; GCN: exp mrt0 off, off, off, off compr{{$}}
+; GCN: exp mrt0 off, off, off, off done compr{{$}}
+define amdgpu_kernel void @test_export_compr_zeroes_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 0, <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 0, <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_src0_v2i16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x20001
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x40005
+; GCN: exp mrt0 [[SRC0]], off, off, off done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_src0_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 1, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_src1_v2i16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x20001
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x40005
+; GCN: exp mrt0 off, off, [[SRC1]], [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_src1_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 12, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_en_src0_src1_v2i16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x20001
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x40005
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_en_src0_src1_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 15, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_mrt7_v2i16:
+; GCN-DAG: v_mov_b32_e32 [[VI16:v[0-9]+]], 0x50005
+; GCN: exp mrt7 [[VI16]], [[VI16]], [[VI16]], [[VI16]] compr{{$}}
+; GCN: exp mrt7 [[VI16]], [[VI16]], [[VI16]], [[VI16]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_mrt7_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 7, i32 15, <2 x i16> <i16 5, i16 5>, <2 x i16> <i16 5, i16 5>, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 7, i32 15, <2 x i16> <i16 5, i16 5>, <2 x i16> <i16 5, i16 5>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_z_v2i16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x20001
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x40005
+; GCN: exp mrtz [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] compr{{$}}
+; GCN: exp mrtz [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] done compr{{$}}
+define amdgpu_kernel void @test_export_compr_z_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 8, i32 15, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 8, i32 15, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_compr_vm_v2i16:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 0x20001
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 0x40005
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] compr vm{{$}}
+; GCN: exp mrt0 [[SRC0]], [[SRC0]], [[SRC1]], [[SRC1]] done compr vm{{$}}
+define amdgpu_kernel void @test_export_compr_vm_v2i16() #0 {
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 15, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 false, i1 true)
+ call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 15, <2 x i16> <i16 1, i16 2>, <2 x i16> <i16 5, i16 4>, i1 true, i1 true)
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll
new file mode 100644
index 000000000000..6d2de108829d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll
@@ -0,0 +1,484 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=GCN %s
+
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
+declare void @llvm.amdgcn.exp.i32(i32, i32, i32, i32, i32, i32, i1, i1) #1
+
+; GCN-LABEL: {{^}}test_export_zeroes_f32:
+; GCN: exp mrt0 off, off, off, off{{$}}
+; GCN: exp mrt0 off, off, off, off done{{$}}
+define amdgpu_kernel void @test_export_zeroes_f32() #0 {
+
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 true, i1 false)
+ ret void
+}
+
+; FIXME: Should not set up registers for the unused source registers.
+
+; GCN-LABEL: {{^}}test_export_en_src0_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 [[SRC0]], off, off, off done{{$}}
+define amdgpu_kernel void @test_export_en_src0_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src1_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 off, [[SRC1]], off, off done{{$}}
+define amdgpu_kernel void @test_export_en_src1_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 2, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src2_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 off, off, [[SRC2]], off done{{$}}
+define amdgpu_kernel void @test_export_en_src2_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 4, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src3_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 off, off, off, [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_en_src3_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 8, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src1_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], off, off done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src1_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 3, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src2_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 [[SRC0]], off, [[SRC2]], off done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src2_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 5, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src3_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 [[SRC0]], off, off, [[SRC3]]{{$}}
+; GCN: exp mrt0 [[SRC0]], off, off, [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src3_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 9, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 9, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src1_src2_src3_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src1_src2_src3_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_mrt7_f32:
+; GCN-DAG: v_mov_b32_e32 [[VHALF:v[0-9]+]], 0.5
+; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]]{{$}}
+; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]] done{{$}}
+define amdgpu_kernel void @test_export_mrt7_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 7, i32 15, float 0.5, float 0.5, float 0.5, float 0.5, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 7, i32 15, float 0.5, float 0.5, float 0.5, float 0.5, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_z_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrtz [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp mrtz [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_z_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 8, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 8, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_null_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp null [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp null [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_null_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 9, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 9, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_reserved10_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp invalid_target_10 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp invalid_target_10 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_reserved10_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 10, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 10, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_reserved11_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp invalid_target_11 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp invalid_target_11 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_reserved11_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 11, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 11, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_pos0_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp pos0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp pos0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_pos0_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_pos3_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp pos3 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp pos3 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_pos3_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 15, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 15, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_param0_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp param0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp param0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_param0_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_param31_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp param31 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp param31 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_param31_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 63, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 63, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_vm_f32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1.0
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2.0
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 0.5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4.0
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] vm{{$}}
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done vm{{$}}
+define amdgpu_kernel void @test_export_vm_f32() #0 {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 true)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 true)
+ ret void
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+; GCN-LABEL: {{^}}test_export_zeroes_i32:
+; GCN: exp mrt0 off, off, off, off{{$}}
+; GCN: exp mrt0 off, off, off, off done{{$}}
+define amdgpu_kernel void @test_export_zeroes_i32() #0 {
+
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i1 true, i1 false)
+ ret void
+}
+
+; FIXME: Should not set up registers for the unused source registers.
+
+; GCN-LABEL: {{^}}test_export_en_src0_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 [[SRC0]], off, off, off done{{$}}
+define amdgpu_kernel void @test_export_en_src0_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 1, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src1_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 off, [[SRC1]], off, off done{{$}}
+define amdgpu_kernel void @test_export_en_src1_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 2, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src2_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 off, off, [[SRC2]], off done{{$}}
+define amdgpu_kernel void @test_export_en_src2_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 4, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src3_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 off, off, off, [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_en_src3_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 8, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src1_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], off, off done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src1_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src2_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 [[SRC0]], off, [[SRC2]], off done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src2_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 5, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src3_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 [[SRC0]], off, off, [[SRC3]]{{$}}
+; GCN: exp mrt0 [[SRC0]], off, off, [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src3_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 9, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 9, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_en_src0_src1_src2_src3_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_en_src0_src1_src2_src3_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_mrt7_i32:
+; GCN-DAG: v_mov_b32_e32 [[VHALF:v[0-9]+]], 5
+; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]]{{$}}
+; GCN: exp mrt7 [[VHALF]], [[VHALF]], [[VHALF]], [[VHALF]] done{{$}}
+define amdgpu_kernel void @test_export_mrt7_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 7, i32 15, i32 5, i32 5, i32 5, i32 5, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 7, i32 15, i32 5, i32 5, i32 5, i32 5, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_z_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrtz [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp mrtz [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_z_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 8, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 8, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_null_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp null [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp null [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_null_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 9, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 9, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_reserved10_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp invalid_target_10 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp invalid_target_10 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_reserved10_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 10, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 10, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_reserved11_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp invalid_target_11 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp invalid_target_11 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_reserved11_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 11, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 11, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_pos0_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp pos0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp pos0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_pos0_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 12, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 12, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_pos3_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp pos3 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp pos3 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_pos3_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 15, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 15, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_param0_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp param0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp param0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_param0_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 32, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 32, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_param31_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp param31 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]]{{$}}
+; GCN: exp param31 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done{{$}}
+define amdgpu_kernel void @test_export_param31_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 63, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.i32(i32 63, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 false)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_export_vm_i32:
+; GCN-DAG: v_mov_b32_e32 [[SRC0:v[0-9]+]], 1
+; GCN-DAG: v_mov_b32_e32 [[SRC1:v[0-9]+]], 2
+; GCN-DAG: v_mov_b32_e32 [[SRC2:v[0-9]+]], 5
+; GCN-DAG: v_mov_b32_e32 [[SRC3:v[0-9]+]], 4
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] vm{{$}}
+; GCN: exp mrt0 [[SRC0]], [[SRC1]], [[SRC2]], [[SRC3]] done vm{{$}}
+define amdgpu_kernel void @test_export_vm_i32() #0 {
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 15, i32 1, i32 2, i32 5, i32 4, i1 false, i1 true)
+ call void @llvm.amdgcn.exp.i32(i32 0, i32 15, i32 1, i32 2, i32 5, i32 4, i1 true, i1 true)
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind inaccessiblememonly }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll
index 427ad5ef553d..c9993ee88369 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll
@@ -5,9 +5,17 @@ declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32) #0
declare i64 @llvm.amdgcn.fcmp.f64(double, double, i32) #0
declare float @llvm.fabs.f32(float) #0
+; GCN-LABEL: {{^}}v_fcmp_f32_dynamic_cc:
+; GCN: s_endpgm
+define amdgpu_kernel void @v_fcmp_f32_dynamic_cc(i64 addrspace(1)* %out, float %src0, float %src1, i32 %cc) {
+ %result = call i64 @llvm.amdgcn.fcmp.f32(float %src0, float %src1, i32 %cc)
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
; GCN-LABEL: {{^}}v_fcmp_f32_oeq_with_fabs:
; GCN: v_cmp_eq_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, |{{v[0-9]+}}|, {{s[0-9]+}}
-define void @v_fcmp_f32_oeq_with_fabs(i64 addrspace(1)* %out, float %src, float %a) {
+define amdgpu_kernel void @v_fcmp_f32_oeq_with_fabs(i64 addrspace(1)* %out, float %src, float %a) {
%temp = call float @llvm.fabs.f32(float %a)
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float %temp, i32 1)
store i64 %result, i64 addrspace(1)* %out
@@ -16,7 +24,7 @@ define void @v_fcmp_f32_oeq_with_fabs(i64 addrspace(1)* %out, float %src, float
; GCN-LABEL: {{^}}v_fcmp_f32_oeq_both_operands_with_fabs:
; GCN: v_cmp_eq_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, |{{v[0-9]+}}|, |{{s[0-9]+}}|
-define void @v_fcmp_f32_oeq_both_operands_with_fabs(i64 addrspace(1)* %out, float %src, float %a) {
+define amdgpu_kernel void @v_fcmp_f32_oeq_both_operands_with_fabs(i64 addrspace(1)* %out, float %src, float %a) {
%temp = call float @llvm.fabs.f32(float %a)
%src_input = call float @llvm.fabs.f32(float %src)
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src_input, float %temp, i32 1)
@@ -26,7 +34,7 @@ define void @v_fcmp_f32_oeq_both_operands_with_fabs(i64 addrspace(1)* %out, floa
; GCN-LABEL: {{^}}v_fcmp:
; GCN-NOT: v_cmp_eq_f32_e64
-define void @v_fcmp(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 -1)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -34,7 +42,7 @@ define void @v_fcmp(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_oeq:
; GCN: v_cmp_eq_f32_e64
-define void @v_fcmp_f32_oeq(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_oeq(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 1)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -42,7 +50,7 @@ define void @v_fcmp_f32_oeq(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_one:
; GCN: v_cmp_neq_f32_e64
-define void @v_fcmp_f32_one(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_one(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 6)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -50,7 +58,7 @@ define void @v_fcmp_f32_one(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_ogt:
; GCN: v_cmp_gt_f32_e64
-define void @v_fcmp_f32_ogt(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_ogt(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 2)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -58,7 +66,7 @@ define void @v_fcmp_f32_ogt(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_oge:
; GCN: v_cmp_ge_f32_e64
-define void @v_fcmp_f32_oge(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_oge(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 3)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -66,7 +74,7 @@ define void @v_fcmp_f32_oge(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_olt:
; GCN: v_cmp_lt_f32_e64
-define void @v_fcmp_f32_olt(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_olt(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 4)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -74,7 +82,7 @@ define void @v_fcmp_f32_olt(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_ole:
; GCN: v_cmp_le_f32_e64
-define void @v_fcmp_f32_ole(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_ole(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 5)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -83,7 +91,7 @@ define void @v_fcmp_f32_ole(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_ueq:
; GCN: v_cmp_nlg_f32_e64
-define void @v_fcmp_f32_ueq(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_ueq(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 9)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -91,7 +99,7 @@ define void @v_fcmp_f32_ueq(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_une:
; GCN: v_cmp_neq_f32_e64
-define void @v_fcmp_f32_une(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_une(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 14)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -99,7 +107,7 @@ define void @v_fcmp_f32_une(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_ugt:
; GCN: v_cmp_nle_f32_e64
-define void @v_fcmp_f32_ugt(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_ugt(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 10)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -107,7 +115,7 @@ define void @v_fcmp_f32_ugt(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_uge:
; GCN: v_cmp_nlt_f32_e64
-define void @v_fcmp_f32_uge(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_uge(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 11)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -115,7 +123,7 @@ define void @v_fcmp_f32_uge(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_ult:
; GCN: v_cmp_nge_f32_e64
-define void @v_fcmp_f32_ult(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_ult(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 12)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -123,7 +131,7 @@ define void @v_fcmp_f32_ult(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f32_ule:
; GCN: v_cmp_ngt_f32_e64
-define void @v_fcmp_f32_ule(i64 addrspace(1)* %out, float %src) {
+define amdgpu_kernel void @v_fcmp_f32_ule(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 13)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -131,7 +139,7 @@ define void @v_fcmp_f32_ule(i64 addrspace(1)* %out, float %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_oeq:
; GCN: v_cmp_eq_f64_e64
-define void @v_fcmp_f64_oeq(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_oeq(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 1)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -139,7 +147,7 @@ define void @v_fcmp_f64_oeq(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_one:
; GCN: v_cmp_neq_f64_e64
-define void @v_fcmp_f64_one(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_one(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 6)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -147,7 +155,7 @@ define void @v_fcmp_f64_one(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_ogt:
; GCN: v_cmp_gt_f64_e64
-define void @v_fcmp_f64_ogt(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_ogt(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 2)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -155,7 +163,7 @@ define void @v_fcmp_f64_ogt(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_oge:
; GCN: v_cmp_ge_f64_e64
-define void @v_fcmp_f64_oge(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_oge(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 3)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -163,7 +171,7 @@ define void @v_fcmp_f64_oge(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_olt:
; GCN: v_cmp_lt_f64_e64
-define void @v_fcmp_f64_olt(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_olt(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 4)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -171,7 +179,7 @@ define void @v_fcmp_f64_olt(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_ole:
; GCN: v_cmp_le_f64_e64
-define void @v_fcmp_f64_ole(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_ole(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 5)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -179,7 +187,7 @@ define void @v_fcmp_f64_ole(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_ueq:
; GCN: v_cmp_nlg_f64_e64
-define void @v_fcmp_f64_ueq(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_ueq(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 9)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -187,7 +195,7 @@ define void @v_fcmp_f64_ueq(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_une:
; GCN: v_cmp_neq_f64_e64
-define void @v_fcmp_f64_une(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_une(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 14)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -195,7 +203,7 @@ define void @v_fcmp_f64_une(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_ugt:
; GCN: v_cmp_nle_f64_e64
-define void @v_fcmp_f64_ugt(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_ugt(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 10)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -203,7 +211,7 @@ define void @v_fcmp_f64_ugt(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_uge:
; GCN: v_cmp_nlt_f64_e64
-define void @v_fcmp_f64_uge(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_uge(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 11)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -211,7 +219,7 @@ define void @v_fcmp_f64_uge(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_ult:
; GCN: v_cmp_nge_f64_e64
-define void @v_fcmp_f64_ult(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_ult(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 12)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -219,7 +227,7 @@ define void @v_fcmp_f64_ult(i64 addrspace(1)* %out, double %src) {
; GCN-LABEL: {{^}}v_fcmp_f64_ule:
; GCN: v_cmp_ngt_f64_e64
-define void @v_fcmp_f64_ule(i64 addrspace(1)* %out, double %src) {
+define amdgpu_kernel void @v_fcmp_f64_ule(i64 addrspace(1)* %out, double %src) {
%result = call i64 @llvm.amdgcn.fcmp.f64(double %src, double 100.00, i32 13)
store i64 %result, i64 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fdiv.fast.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fdiv.fast.ll
index 54d7848da3bf..248ee9904da0 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.fdiv.fast.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fdiv.fast.ll
@@ -8,7 +8,7 @@ declare float @llvm.amdgcn.fdiv.fast(float, float) #0
; CHECK: v_rcp_f32_e32
; CHECK: v_mul_f32_e32
; CHECK: v_mul_f32_e32
-define void @test_fdiv_fast(float addrspace(1)* %out, float %a, float %b) #1 {
+define amdgpu_kernel void @test_fdiv_fast(float addrspace(1)* %out, float %a, float %b) #1 {
%fdiv = call float @llvm.amdgcn.fdiv.fast(float %a, float %b)
store float %fdiv, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.f16.ll
new file mode 100644
index 000000000000..a4ae37b23c5f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.f16.ll
@@ -0,0 +1,39 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}test_fmed3_f16:
+; GCN: v_med3_f16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @test_fmed3_f16(half addrspace(1)* %out, i32 %src0.arg, i32 %src1.arg, i32 %src2.arg) #1 {
+ %src0.f16 = trunc i32 %src0.arg to i16
+ %src0 = bitcast i16 %src0.f16 to half
+ %src1.f16 = trunc i32 %src1.arg to i16
+ %src1 = bitcast i16 %src1.f16 to half
+ %src2.f16 = trunc i32 %src2.arg to i16
+ %src2 = bitcast i16 %src2.f16 to half
+ %mad = call half @llvm.amdgcn.fmed3.f16(half %src0, half %src1, half %src2)
+ store half %mad, half addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fmed3_srcmods_f16:
+; GCN: v_med3_f16 v{{[0-9]+}}, -s{{[0-9]+}}, |v{{[0-9]+}}|, -|v{{[0-9]+}}|
+define amdgpu_kernel void @test_fmed3_srcmods_f16(half addrspace(1)* %out, i32 %src0.arg, i32 %src1.arg, i32 %src2.arg) #1 {
+ %src0.f16 = trunc i32 %src0.arg to i16
+ %src0 = bitcast i16 %src0.f16 to half
+ %src1.f16 = trunc i32 %src1.arg to i16
+ %src1 = bitcast i16 %src1.f16 to half
+ %src2.f16 = trunc i32 %src2.arg to i16
+ %src2 = bitcast i16 %src2.f16 to half
+ %src0.fneg = fsub half -0.0, %src0
+ %src1.fabs = call half @llvm.fabs.f16(half %src1)
+ %src2.fabs = call half @llvm.fabs.f16(half %src2)
+ %src2.fneg.fabs = fsub half -0.0, %src2.fabs
+ %mad = call half @llvm.amdgcn.fmed3.f16(half %src0.fneg, half %src1.fabs, half %src2.fneg.fabs)
+ store half %mad, half addrspace(1)* %out
+ ret void
+}
+
+declare half @llvm.amdgcn.fmed3.f16(half, half, half) #0
+declare half @llvm.fabs.f16(half) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.ll
new file mode 100644
index 000000000000..230e625ad45b
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fmed3.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}test_fmed3:
+; GCN: v_med3_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @test_fmed3(float addrspace(1)* %out, float %src0, float %src1, float %src2) #1 {
+ %mad = call float @llvm.amdgcn.fmed3.f32(float %src0, float %src1, float %src2)
+ store float %mad, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_fmed3_srcmods:
+; GCN: v_med3_f32 v{{[0-9]+}}, -s{{[0-9]+}}, |v{{[0-9]+}}|, -|v{{[0-9]+}}|
+define amdgpu_kernel void @test_fmed3_srcmods(float addrspace(1)* %out, float %src0, float %src1, float %src2) #1 {
+ %src0.fneg = fsub float -0.0, %src0
+ %src1.fabs = call float @llvm.fabs.f32(float %src1)
+ %src2.fabs = call float @llvm.fabs.f32(float %src2)
+ %src2.fneg.fabs = fsub float -0.0, %src2.fabs
+ %mad = call float @llvm.amdgcn.fmed3.f32(float %src0.fneg, float %src1.fabs, float %src2.fneg.fabs)
+ store float %mad, float addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) #0
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fmul.legacy.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fmul.legacy.ll
index d5c1c0a0969b..b47d2dbc744d 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.fmul.legacy.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fmul.legacy.ll
@@ -4,7 +4,7 @@
; GCN-LABEL: {{^}}test_mul_legacy_f32:
; GCN: v_mul_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
-define void @test_mul_legacy_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_mul_legacy_f32(float addrspace(1)* %out, float %a, float %b) #0 {
%result = call float @llvm.amdgcn.fmul.legacy(float %a, float %b)
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -12,7 +12,7 @@ define void @test_mul_legacy_f32(float addrspace(1)* %out, float %a, float %b) #
; GCN-LABEL: {{^}}test_mul_legacy_undef0_f32:
; GCN: v_mul_legacy_f32_e32
-define void @test_mul_legacy_undef0_f32(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_mul_legacy_undef0_f32(float addrspace(1)* %out, float %a) #0 {
%result = call float @llvm.amdgcn.fmul.legacy(float undef, float %a)
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -20,7 +20,7 @@ define void @test_mul_legacy_undef0_f32(float addrspace(1)* %out, float %a) #0 {
; GCN-LABEL: {{^}}test_mul_legacy_undef1_f32:
; GCN: v_mul_legacy_f32_e32
-define void @test_mul_legacy_undef1_f32(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_mul_legacy_undef1_f32(float addrspace(1)* %out, float %a) #0 {
%result = call float @llvm.amdgcn.fmul.legacy(float %a, float undef)
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -28,7 +28,7 @@ define void @test_mul_legacy_undef1_f32(float addrspace(1)* %out, float %a) #0 {
; GCN-LABEL: {{^}}test_mul_legacy_fabs_f32:
; GCN: v_mul_legacy_f32_e64 v{{[0-9]+}}, |v{{[0-9]+}}|, |s{{[0-9]+}}|
-define void @test_mul_legacy_fabs_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_mul_legacy_fabs_f32(float addrspace(1)* %out, float %a, float %b) #0 {
%a.fabs = call float @llvm.fabs.f32(float %a)
%b.fabs = call float @llvm.fabs.f32(float %b)
%result = call float @llvm.amdgcn.fmul.legacy(float %a.fabs, float %b.fabs)
@@ -40,7 +40,7 @@ define void @test_mul_legacy_fabs_f32(float addrspace(1)* %out, float %a, float
; GCN-LABEL: {{^}}test_mad_legacy_f32:
; GCN: v_mul_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_add_f32_e32
-define void @test_mad_legacy_f32(float addrspace(1)* %out, float %a, float %b, float %c) #0 {
+define amdgpu_kernel void @test_mad_legacy_f32(float addrspace(1)* %out, float %a, float %b, float %c) #0 {
%mul = call float @llvm.amdgcn.fmul.legacy(float %a, float %b)
%add = fadd float %mul, %c
store float %add, float addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll
index d8c1af036a34..026f6901fc7f 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.fract.f16(half %a)
; VI: v_fract_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fract_f16(
+define amdgpu_kernel void @fract_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.fract.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.fract.ll
index a75267b8d693..d4f1c5fd9be7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.fract.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.fract.ll
@@ -6,7 +6,7 @@ declare double @llvm.amdgcn.fract.f64(double) #0
; GCN-LABEL: {{^}}v_fract_f32:
; GCN: v_fract_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @v_fract_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @v_fract_f32(float addrspace(1)* %out, float %src) #1 {
%fract = call float @llvm.amdgcn.fract.f32(float %src)
store float %fract, float addrspace(1)* %out
ret void
@@ -14,7 +14,7 @@ define void @v_fract_f32(float addrspace(1)* %out, float %src) #1 {
; GCN-LABEL: {{^}}v_fract_f64:
; GCN: v_fract_f64_e32 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @v_fract_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @v_fract_f64(double addrspace(1)* %out, double %src) #1 {
%fract = call double @llvm.amdgcn.fract.f64(double %src)
store double %fract, double addrspace(1)* %out
ret void
@@ -22,9 +22,8 @@ define void @v_fract_f64(double addrspace(1)* %out, double %src) #1 {
; GCN-LABEL: {{^}}v_fract_undef_f32:
; GCN-NOT: v_fract_f32
-; GCN-NOT: v0
-; GCN: buffer_store_dword v0
-define void @v_fract_undef_f32(float addrspace(1)* %out) #1 {
+; GCN-NOT: store_dword
+define amdgpu_kernel void @v_fract_undef_f32(float addrspace(1)* %out) #1 {
%fract = call float @llvm.amdgcn.fract.f32(float undef)
store float %fract, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll
index 7521224058f3..dc3eb4ce191e 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll
@@ -6,7 +6,7 @@ declare i16 @llvm.amdgcn.frexp.exp.i16.f16(half %a)
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; VI: v_frexp_exp_i16_f16_e32 v[[R_I16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_I16]]
-define void @frexp_exp_f16(
+define amdgpu_kernel void @frexp_exp_f16(
i16 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -21,7 +21,7 @@ entry:
; VI: v_frexp_exp_i16_f16_e32 v[[R_I16:[0-9]+]], v[[A_F16]]
; VI: v_bfe_i32 v[[R_I32:[0-9]+]], v[[R_I16]], 0, 16{{$}}
; GCN: buffer_store_dword v[[R_I32]]
-define void @frexp_exp_f16_sext(
+define amdgpu_kernel void @frexp_exp_f16_sext(
i32 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -37,7 +37,7 @@ entry:
; VI: v_frexp_exp_i16_f16_e32 v[[R_I16:[0-9]+]], v[[A_F16]]
; VI: v_and_b32_e32 v[[R_I32:[0-9]+]], 0xffff, v[[R_I16]]
; GCN: buffer_store_dword v[[R_I32]]
-define void @frexp_exp_f16_zext(
+define amdgpu_kernel void @frexp_exp_f16_zext(
i32 addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
index 9c49f175f2b5..0d686147caf8 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.amdgcn.frexp.exp.i32.f64(double) #0
; GCN-LABEL: {{^}}s_test_frexp_exp_f32:
; GCN: v_frexp_exp_i32_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @s_test_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @s_test_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
%frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float %src)
store i32 %frexp.exp, i32 addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @s_test_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
; GCN-LABEL: {{^}}s_test_fabs_frexp_exp_f32:
; GCN: v_frexp_exp_i32_f32_e64 {{v[0-9]+}}, |{{s[0-9]+}}|
-define void @s_test_fabs_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @s_test_fabs_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
%fabs.src = call float @llvm.fabs.f32(float %src)
%frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float %fabs.src)
store i32 %frexp.exp, i32 addrspace(1)* %out
@@ -25,7 +25,7 @@ define void @s_test_fabs_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
; GCN-LABEL: {{^}}s_test_fneg_fabs_frexp_exp_f32:
; GCN: v_frexp_exp_i32_f32_e64 {{v[0-9]+}}, -|{{s[0-9]+}}|
-define void @s_test_fneg_fabs_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @s_test_fneg_fabs_frexp_exp_f32(i32 addrspace(1)* %out, float %src) #1 {
%fabs.src = call float @llvm.fabs.f32(float %src)
%fneg.fabs.src = fsub float -0.0, %fabs.src
%frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float %fneg.fabs.src)
@@ -35,7 +35,7 @@ define void @s_test_fneg_fabs_frexp_exp_f32(i32 addrspace(1)* %out, float %src)
; GCN-LABEL: {{^}}s_test_frexp_exp_f64:
; GCN: v_frexp_exp_i32_f64_e32 {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @s_test_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @s_test_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
%frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f64(double %src)
store i32 %frexp.exp, i32 addrspace(1)* %out
ret void
@@ -43,7 +43,7 @@ define void @s_test_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
; GCN-LABEL: {{^}}s_test_fabs_frexp_exp_f64:
; GCN: v_frexp_exp_i32_f64_e64 {{v[0-9]+}}, |{{s\[[0-9]+:[0-9]+\]}}|
-define void @s_test_fabs_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @s_test_fabs_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
%fabs.src = call double @llvm.fabs.f64(double %src)
%frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f64(double %fabs.src)
store i32 %frexp.exp, i32 addrspace(1)* %out
@@ -52,7 +52,7 @@ define void @s_test_fabs_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
; GCN-LABEL: {{^}}s_test_fneg_fabs_frexp_exp_f64:
; GCN: v_frexp_exp_i32_f64_e64 {{v[0-9]+}}, -|{{s\[[0-9]+:[0-9]+\]}}|
-define void @s_test_fneg_fabs_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @s_test_fneg_fabs_frexp_exp_f64(i32 addrspace(1)* %out, double %src) #1 {
%fabs.src = call double @llvm.fabs.f64(double %src)
%fneg.fabs.src = fsub double -0.0, %fabs.src
%frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f64(double %fneg.fabs.src)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll
index 706537d7e21c..722cd44e99fb 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.frexp.mant.f16(half %a)
; VI: v_frexp_mant_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @frexp_mant_f16(
+define amdgpu_kernel void @frexp_mant_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
index b8d63defffed..605dc3db2b98 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
@@ -8,7 +8,7 @@ declare double @llvm.amdgcn.frexp.mant.f64(double) #0
; GCN-LABEL: {{^}}s_test_frexp_mant_f32:
; GCN: v_frexp_mant_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @s_test_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @s_test_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
%frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %src)
store float %frexp.mant, float addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @s_test_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
; GCN-LABEL: {{^}}s_test_fabs_frexp_mant_f32:
; GCN: v_frexp_mant_f32_e64 {{v[0-9]+}}, |{{s[0-9]+}}|
-define void @s_test_fabs_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @s_test_fabs_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
%fabs.src = call float @llvm.fabs.f32(float %src)
%frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fabs.src)
store float %frexp.mant, float addrspace(1)* %out
@@ -25,7 +25,7 @@ define void @s_test_fabs_frexp_mant_f32(float addrspace(1)* %out, float %src) #1
; GCN-LABEL: {{^}}s_test_fneg_fabs_frexp_mant_f32:
; GCN: v_frexp_mant_f32_e64 {{v[0-9]+}}, -|{{s[0-9]+}}|
-define void @s_test_fneg_fabs_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @s_test_fneg_fabs_frexp_mant_f32(float addrspace(1)* %out, float %src) #1 {
%fabs.src = call float @llvm.fabs.f32(float %src)
%fneg.fabs.src = fsub float -0.0, %fabs.src
%frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fneg.fabs.src)
@@ -35,7 +35,7 @@ define void @s_test_fneg_fabs_frexp_mant_f32(float addrspace(1)* %out, float %sr
; GCN-LABEL: {{^}}s_test_frexp_mant_f64:
; GCN: v_frexp_mant_f64_e32 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @s_test_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @s_test_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
%frexp.mant = call double @llvm.amdgcn.frexp.mant.f64(double %src)
store double %frexp.mant, double addrspace(1)* %out
ret void
@@ -43,7 +43,7 @@ define void @s_test_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
; GCN-LABEL: {{^}}s_test_fabs_frexp_mant_f64:
; GCN: v_frexp_mant_f64_e64 {{v\[[0-9]+:[0-9]+\]}}, |{{s\[[0-9]+:[0-9]+\]}}|
-define void @s_test_fabs_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @s_test_fabs_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
%fabs.src = call double @llvm.fabs.f64(double %src)
%frexp.mant = call double @llvm.amdgcn.frexp.mant.f64(double %fabs.src)
store double %frexp.mant, double addrspace(1)* %out
@@ -52,7 +52,7 @@ define void @s_test_fabs_frexp_mant_f64(double addrspace(1)* %out, double %src)
; GCN-LABEL: {{^}}s_test_fneg_fabs_frexp_mant_f64:
; GCN: v_frexp_mant_f64_e64 {{v\[[0-9]+:[0-9]+\]}}, -|{{s\[[0-9]+:[0-9]+\]}}|
-define void @s_test_fneg_fabs_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @s_test_fneg_fabs_frexp_mant_f64(double addrspace(1)* %out, double %src) #1 {
%fabs.src = call double @llvm.fabs.f64(double %src)
%fneg.fabs.src = fsub double -0.0, %fabs.src
%frexp.mant = call double @llvm.amdgcn.frexp.mant.f64(double %fneg.fabs.src)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
index 6014e2ed85f8..d26fab4cebe1 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.groupstaticsize.ll
@@ -9,7 +9,7 @@
; CHECK-LABEL: {{^}}groupstaticsize_test0:
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0x800{{$}}
-define void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 {
+define amdgpu_kernel void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 64
%static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
@@ -23,7 +23,7 @@ define void @groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %
; CHECK-LABEL: {{^}}groupstaticsize_test1:
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0xc00{{$}}
-define void @groupstaticsize_test1(float addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %lds_size) {
+define amdgpu_kernel void @groupstaticsize_test1(float addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %lds_size) {
entry:
%static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4
@@ -51,7 +51,7 @@ endif: ; preds = %else, %if
; Exceeds 16-bit simm limit of s_movk_i32
; CHECK-LABEL: {{^}}large_groupstaticsize:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4000{{$}}
-define void @large_groupstaticsize(i32 addrspace(1)* %size, i32 %idx) #0 {
+define amdgpu_kernel void @large_groupstaticsize(i32 addrspace(1)* %size, i32 %idx) #0 {
%gep = getelementptr inbounds [4096 x i32], [4096 x i32] addrspace(3)* @large, i32 0, i32 %idx
store volatile i32 0, i32 addrspace(3)* %gep
%static_lds_size = call i32 @llvm.amdgcn.groupstaticsize()
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
index 6d0457bc6489..aa04af7a64a9 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
@@ -4,9 +4,18 @@
declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #0
declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) #0
+; No crash on invalid input
+; GCN-LABEL: {{^}}v_icmp_i32_dynamic_cc:
+; GCN: s_endpgm
+define amdgpu_kernel void @v_icmp_i32_dynamic_cc(i64 addrspace(1)* %out, i32 %src, i32 %cc) {
+ %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 %cc)
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
; GCN-LABEL: {{^}}v_icmp_i32_eq:
; GCN: v_cmp_eq_u32_e64
-define void @v_icmp_i32_eq(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_i32_eq(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 32)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -14,14 +23,14 @@ define void @v_icmp_i32_eq(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp:
; GCN-NOT: v_cmp_eq_u32_e64
-define void @v_icmp(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 30)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i32_ne:
; GCN: v_cmp_ne_u32_e64
-define void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 33)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -29,7 +38,7 @@ define void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_u32_ugt:
; GCN: v_cmp_gt_u32_e64
-define void @v_icmp_u32_ugt(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_u32_ugt(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 34)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -37,7 +46,7 @@ define void @v_icmp_u32_ugt(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_u32_uge:
; GCN: v_cmp_ge_u32_e64
-define void @v_icmp_u32_uge(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_u32_uge(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 35)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -45,7 +54,7 @@ define void @v_icmp_u32_uge(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_u32_ult:
; GCN: v_cmp_lt_u32_e64
-define void @v_icmp_u32_ult(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_u32_ult(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 36)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -53,7 +62,7 @@ define void @v_icmp_u32_ult(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_u32_ule:
; GCN: v_cmp_le_u32_e64
-define void @v_icmp_u32_ule(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_u32_ule(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 37)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -61,7 +70,7 @@ define void @v_icmp_u32_ule(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_i32_sgt:
; GCN: v_cmp_gt_i32_e64
-define void @v_icmp_i32_sgt(i64 addrspace(1)* %out, i32 %src) #1 {
+define amdgpu_kernel void @v_icmp_i32_sgt(i64 addrspace(1)* %out, i32 %src) #1 {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 38)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -69,7 +78,7 @@ define void @v_icmp_i32_sgt(i64 addrspace(1)* %out, i32 %src) #1 {
; GCN-LABEL: {{^}}v_icmp_i32_sge:
; GCN: v_cmp_ge_i32_e64
-define void @v_icmp_i32_sge(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_i32_sge(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 39)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -77,14 +86,14 @@ define void @v_icmp_i32_sge(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_i32_slt:
; GCN: v_cmp_lt_i32_e64
-define void @v_icmp_i32_slt(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_i32_slt(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 40)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i32_sle:
; GCN: v_cmp_le_i32_e64
-define void @v_icmp_i32_sle(i64 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_icmp_i32_sle(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 41)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -92,7 +101,7 @@ define void @v_icmp_i32_sle(i64 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_icmp_i64_eq:
; GCN: v_cmp_eq_u64_e64
-define void @v_icmp_i64_eq(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_i64_eq(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 32)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -100,7 +109,7 @@ define void @v_icmp_i64_eq(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_i64_ne:
; GCN: v_cmp_ne_u64_e64
-define void @v_icmp_i64_ne(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_i64_ne(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 33)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -108,7 +117,7 @@ define void @v_icmp_i64_ne(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_u64_ugt:
; GCN: v_cmp_gt_u64_e64
-define void @v_icmp_u64_ugt(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_u64_ugt(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 34)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -116,7 +125,7 @@ define void @v_icmp_u64_ugt(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_u64_uge:
; GCN: v_cmp_ge_u64_e64
-define void @v_icmp_u64_uge(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_u64_uge(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 35)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -124,7 +133,7 @@ define void @v_icmp_u64_uge(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_u64_ult:
; GCN: v_cmp_lt_u64_e64
-define void @v_icmp_u64_ult(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_u64_ult(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 36)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -132,7 +141,7 @@ define void @v_icmp_u64_ult(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_u64_ule:
; GCN: v_cmp_le_u64_e64
-define void @v_icmp_u64_ule(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_u64_ule(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 37)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -140,7 +149,7 @@ define void @v_icmp_u64_ule(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_i64_sgt:
; GCN: v_cmp_gt_i64_e64
-define void @v_icmp_i64_sgt(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_i64_sgt(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 38)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -148,7 +157,7 @@ define void @v_icmp_i64_sgt(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_i64_sge:
; GCN: v_cmp_ge_i64_e64
-define void @v_icmp_i64_sge(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_i64_sge(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 39)
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -156,14 +165,14 @@ define void @v_icmp_i64_sge(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_icmp_i64_slt:
; GCN: v_cmp_lt_i64_e64
-define void @v_icmp_i64_slt(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_i64_slt(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 40)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i64_sle:
; GCN: v_cmp_le_i64_e64
-define void @v_icmp_i64_sle(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_icmp_i64_sle(i64 addrspace(1)* %out, i64 %src) {
%result = call i64 @llvm.amdgcn.icmp.i64(i64 %src, i64 100, i32 41)
store i64 %result, i64 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.ll
index a65f422742c9..a9351dbb27d2 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.gather4.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}gather4_v2:
; GCN: image_gather4 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_v2(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_v2(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -12,7 +12,7 @@ main_body:
; GCN-LABEL: {{^}}gather4:
; GCN: image_gather4 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_cl:
; GCN: image_gather4_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -30,7 +30,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_l:
; GCN: image_gather4_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_l(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_l(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.l.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -39,7 +39,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_b:
; GCN: image_gather4_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_b(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_b(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.b.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -48,7 +48,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_b_cl:
; GCN: image_gather4_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_b_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_b_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -57,7 +57,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_b_cl_v8:
; GCN: image_gather4_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_b_cl_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_b_cl_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -66,7 +66,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_lz_v2:
; GCN: image_gather4_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_lz_v2(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_lz_v2(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -75,7 +75,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_lz:
; GCN: image_gather4_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_lz(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_lz(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.lz.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -86,7 +86,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_o:
; GCN: image_gather4_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -95,7 +95,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_cl_o:
; GCN: image_gather4_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_cl_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_cl_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -104,7 +104,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_cl_o_v8:
; GCN: image_gather4_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_cl_o_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_cl_o_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.cl.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -113,7 +113,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_l_o:
; GCN: image_gather4_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_l_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_l_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.l.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -122,7 +122,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_l_o_v8:
; GCN: image_gather4_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_l_o_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_l_o_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.l.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -131,7 +131,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_b_o:
; GCN: image_gather4_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_b_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_b_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.b.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -140,7 +140,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_b_o_v8:
; GCN: image_gather4_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_b_o_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_b_o_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.b.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -149,7 +149,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_b_cl_o:
; GCN: image_gather4_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_b_cl_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_b_cl_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -158,7 +158,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_lz_o:
; GCN: image_gather4_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_lz_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_lz_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.lz.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -168,7 +168,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c:
; GCN: image_gather4_c {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -177,7 +177,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_cl:
; GCN: image_gather4_c_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -186,7 +186,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_cl_v8:
; GCN: image_gather4_c_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_cl_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_cl_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -195,7 +195,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_l:
; GCN: image_gather4_c_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_l(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_l(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.l.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -204,7 +204,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_l_v8:
; GCN: image_gather4_c_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_l_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_l_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.l.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -213,7 +213,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_b:
; GCN: image_gather4_c_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_b(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_b(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.b.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -222,7 +222,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_b_v8:
; GCN: image_gather4_c_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_b_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_b_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.b.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -231,7 +231,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_b_cl:
; GCN: image_gather4_c_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_b_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_b_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -240,7 +240,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_lz:
; GCN: image_gather4_c_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_lz(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_lz(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -250,7 +250,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_o:
; GCN: image_gather4_c_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -259,7 +259,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_o_v8:
; GCN: image_gather4_c_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_o_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_o_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -268,7 +268,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_cl_o:
; GCN: image_gather4_c_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_cl_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_cl_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -277,7 +277,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_l_o:
; GCN: image_gather4_c_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_l_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_l_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -286,7 +286,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_b_o:
; GCN: image_gather4_c_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_b_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_b_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.b.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -295,7 +295,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_b_cl_o:
; GCN: image_gather4_c_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_b_cl_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_b_cl_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -304,7 +304,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_lz_o:
; GCN: image_gather4_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_lz_o(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_lz_o(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -313,7 +313,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_c_lz_o_v8:
; GCN: image_gather4_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_c_lz_o_v8(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_c_lz_o_v8(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.v4f32.v8f32.v8i32(<8 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -322,7 +322,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_f32:
; GCN: image_gather4 {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 da
-define void @gather4_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_f32(float addrspace(1)* %out) {
main_body:
%r = call float @llvm.amdgcn.image.gather4.f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 1)
store float %r, float addrspace(1)* %out
@@ -331,7 +331,7 @@ main_body:
; GCN-LABEL: {{^}}gather4_v2f32:
; GCN: image_gather4 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 da
-define void @gather4_v2f32(<2 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @gather4_v2f32(<2 x float> addrspace(1)* %out) {
main_body:
%r = call <2 x float> @llvm.amdgcn.image.gather4.v2f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 3, i1 0, i1 0, i1 0, i1 0, i1 1)
store <2 x float> %r, <2 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.getlod.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.getlod.ll
index ef810a330017..2e78e2a4c6f5 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.getlod.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.getlod.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}getlod:
; GCN: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf da
-define void @getlod(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @getlod(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.f32.v8i32(float undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -12,7 +12,7 @@ main_body:
; GCN-LABEL: {{^}}getlod_v2:
; GCN: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf da
-define void @getlod_v2(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @getlod_v2(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -21,13 +21,23 @@ main_body:
; GCN-LABEL: {{^}}getlod_v4:
; GCN: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf da
-define void @getlod_v4(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @getlod_v4(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 1)
store <4 x float> %r, <4 x float> addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}adjust_writemask_getlod_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_getlod_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.getlod.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
declare <4 x float> @llvm.amdgcn.image.getlod.v4f32.f32.v8i32(float, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.getlod.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll
index 69c43ca3070a..c74c0fa15855 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll
@@ -1,146 +1,144 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=CHECK,VI %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
-;CHECK-LABEL: {{^}}image_load_v4i32:
-;CHECK: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps <4 x float> @image_load_v4i32(<8 x i32> inreg %rsrc, <4 x i32> %c) {
+; GCN-LABEL: {{^}}image_load_v4i32:
+; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps <4 x float> @image_load_v4i32(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
main_body:
- %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret <4 x float> %tex
}
-;CHECK-LABEL: {{^}}image_load_v2i32:
-;CHECK: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps <4 x float> @image_load_v2i32(<8 x i32> inreg %rsrc, <2 x i32> %c) {
+; GCN-LABEL: {{^}}image_load_v2i32:
+; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps <4 x float> @image_load_v2i32(<8 x i32> inreg %rsrc, <2 x i32> %c) #0 {
main_body:
- %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32> %c, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret <4 x float> %tex
}
-;CHECK-LABEL: {{^}}image_load_i32:
-;CHECK: image_load v[0:3], v0, s[0:7] dmask:0xf unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps <4 x float> @image_load_i32(<8 x i32> inreg %rsrc, i32 %c) {
+; GCN-LABEL: {{^}}image_load_i32:
+; GCN: image_load v[0:3], v0, s[0:7] dmask:0xf unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps <4 x float> @image_load_i32(<8 x i32> inreg %rsrc, i32 %c) #0 {
main_body:
- %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.i32.v8i32(i32 %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.i32.v8i32(i32 %c, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret <4 x float> %tex
}
-;CHECK-LABEL: {{^}}image_load_mip:
-;CHECK: image_load_mip v[0:3], v[0:3], s[0:7] dmask:0xf unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps <4 x float> @image_load_mip(<8 x i32> inreg %rsrc, <4 x i32> %c) {
+; GCN-LABEL: {{^}}image_load_mip:
+; GCN: image_load_mip v[0:3], v[0:3], s[0:7] dmask:0xf unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps <4 x float> @image_load_mip(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
main_body:
- %tex = call <4 x float> @llvm.amdgcn.image.load.mip.v4f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.load.mip.v4f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret <4 x float> %tex
}
-;CHECK-LABEL: {{^}}image_load_1:
-;CHECK: image_load v0, v[0:3], s[0:7] dmask:0x1 unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps float @image_load_1(<8 x i32> inreg %rsrc, <4 x i32> %c) {
+; GCN-LABEL: {{^}}image_load_1:
+; GCN: image_load v0, v[0:3], s[0:7] dmask:0x1 unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps float @image_load_1(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
main_body:
- %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
%elt = extractelement <4 x float> %tex, i32 0
-; Only first component used, test that dmask etc. is changed accordingly
ret float %elt
}
-;CHECK-LABEL: {{^}}image_load_f32_v2i32:
-;CHECK: image_load {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps float @image_load_f32_v2i32(<8 x i32> inreg %rsrc, <2 x i32> %c) {
+; GCN-LABEL: {{^}}image_load_f32_v2i32:
+; GCN: image_load {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps float @image_load_f32_v2i32(<8 x i32> inreg %rsrc, <2 x i32> %c) #0 {
main_body:
- %tex = call float @llvm.amdgcn.image.load.f32.v2i32.v8i32(<2 x i32> %c, <8 x i32> %rsrc, i32 1, i1 0, i1 0, i1 0, i1 0)
+ %tex = call float @llvm.amdgcn.image.load.f32.v2i32.v8i32(<2 x i32> %c, <8 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false)
ret float %tex
}
-;CHECK-LABEL: {{^}}image_load_v2f32_v4i32:
-;CHECK: image_load {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 unorm
-;CHECK: s_waitcnt vmcnt(0)
-define amdgpu_ps <2 x float> @image_load_v2f32_v4i32(<8 x i32> inreg %rsrc, <4 x i32> %c) {
+; GCN-LABEL: {{^}}image_load_v2f32_v4i32:
+; GCN: image_load {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 unorm
+; GCN: s_waitcnt vmcnt(0)
+define amdgpu_ps <2 x float> @image_load_v2f32_v4i32(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
main_body:
- %tex = call <2 x float> @llvm.amdgcn.image.load.v2f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 3, i1 0, i1 0, i1 0, i1 0)
+ %tex = call <2 x float> @llvm.amdgcn.image.load.v2f32.v4i32.v8i32(<4 x i32> %c, <8 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false)
ret <2 x float> %tex
}
-
-;CHECK-LABEL: {{^}}image_store_v4i32:
-;CHECK: image_store v[0:3], v[4:7], s[0:7] dmask:0xf unorm
-define amdgpu_ps void @image_store_v4i32(<8 x i32> inreg %rsrc, <4 x float> %data, <4 x i32> %coords) {
+; GCN-LABEL: {{^}}image_store_v4i32:
+; GCN: image_store v[0:3], v[4:7], s[0:7] dmask:0xf unorm
+define amdgpu_ps void @image_store_v4i32(<8 x i32> inreg %rsrc, <4 x float> %data, <4 x i32> %coords) #0 {
main_body:
- call void @llvm.amdgcn.image.store.v4f32.v4i32.v8i32(<4 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.v4f32.v4i32.v8i32(<4 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret void
}
-;CHECK-LABEL: {{^}}image_store_v2i32:
-;CHECK: image_store v[0:3], v[4:5], s[0:7] dmask:0xf unorm
-define amdgpu_ps void @image_store_v2i32(<8 x i32> inreg %rsrc, <4 x float> %data, <2 x i32> %coords) {
+; GCN-LABEL: {{^}}image_store_v2i32:
+; GCN: image_store v[0:3], v[4:5], s[0:7] dmask:0xf unorm
+define amdgpu_ps void @image_store_v2i32(<8 x i32> inreg %rsrc, <4 x float> %data, <2 x i32> %coords) #0 {
main_body:
- call void @llvm.amdgcn.image.store.v4f32.v2i32.v8i32(<4 x float> %data, <2 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.v4f32.v2i32.v8i32(<4 x float> %data, <2 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret void
}
-;CHECK-LABEL: {{^}}image_store_i32:
-;CHECK: image_store v[0:3], v4, s[0:7] dmask:0xf unorm
-define amdgpu_ps void @image_store_i32(<8 x i32> inreg %rsrc, <4 x float> %data, i32 %coords) {
+; GCN-LABEL: {{^}}image_store_i32:
+; GCN: image_store v[0:3], v4, s[0:7] dmask:0xf unorm
+define amdgpu_ps void @image_store_i32(<8 x i32> inreg %rsrc, <4 x float> %data, i32 %coords) #0 {
main_body:
- call void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float> %data, i32 %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float> %data, i32 %coords, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret void
}
-;CHECK-LABEL: {{^}}image_store_f32_i32:
-;CHECK: image_store {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 unorm
-define amdgpu_ps void @image_store_f32_i32(<8 x i32> inreg %rsrc, float %data, i32 %coords) {
+; GCN-LABEL: {{^}}image_store_f32_i32:
+; GCN: image_store {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1 unorm
+define amdgpu_ps void @image_store_f32_i32(<8 x i32> inreg %rsrc, float %data, i32 %coords) #0 {
main_body:
- call void @llvm.amdgcn.image.store.f32.i32.v8i32(float %data, i32 %coords, <8 x i32> %rsrc, i32 1, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.f32.i32.v8i32(float %data, i32 %coords, <8 x i32> %rsrc, i32 1, i1 false, i1 false, i1 false, i1 false)
ret void
}
-;CHECK-LABEL: {{^}}image_store_v2f32_v4i32:
-;CHECK: image_store {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 unorm
-define amdgpu_ps void @image_store_v2f32_v4i32(<8 x i32> inreg %rsrc, <2 x float> %data, <4 x i32> %coords) {
+; GCN-LABEL: {{^}}image_store_v2f32_v4i32:
+; GCN: image_store {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3 unorm
+define amdgpu_ps void @image_store_v2f32_v4i32(<8 x i32> inreg %rsrc, <2 x float> %data, <4 x i32> %coords) #0 {
main_body:
- call void @llvm.amdgcn.image.store.v2f32.v4i32.v8i32(<2 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 3, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.v2f32.v4i32.v8i32(<2 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 3, i1 false, i1 false, i1 false, i1 false)
ret void
}
-;CHECK-LABEL: {{^}}image_store_mip:
-;CHECK: image_store_mip v[0:3], v[4:7], s[0:7] dmask:0xf unorm
-define amdgpu_ps void @image_store_mip(<8 x i32> inreg %rsrc, <4 x float> %data, <4 x i32> %coords) {
+; GCN-LABEL: {{^}}image_store_mip:
+; GCN: image_store_mip v[0:3], v[4:7], s[0:7] dmask:0xf unorm
+define amdgpu_ps void @image_store_mip(<8 x i32> inreg %rsrc, <4 x float> %data, <4 x i32> %coords) #0 {
main_body:
- call void @llvm.amdgcn.image.store.mip.v4f32.v4i32.v8i32(<4 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.mip.v4f32.v4i32.v8i32(<4 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
ret void
}
-;CHECK-LABEL: {{^}}getresinfo:
-;CHECK: image_get_resinfo {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define amdgpu_ps void @getresinfo() {
+; GCN-LABEL: {{^}}getresinfo:
+; GCN: image_get_resinfo {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
+define amdgpu_ps void @getresinfo() #0 {
main_body:
- %r = call <4 x float> @llvm.amdgcn.image.getresinfo.v4f32.i32.v8i32(i32 undef, <8 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %r = call <4 x float> @llvm.amdgcn.image.getresinfo.v4f32.i32.v8i32(i32 undef, <8 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false)
%r0 = extractelement <4 x float> %r, i32 0
%r1 = extractelement <4 x float> %r, i32 1
%r2 = extractelement <4 x float> %r, i32 2
%r3 = extractelement <4 x float> %r, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r0, float %r1, float %r2, float %r3, i1 true, i1 true) #0
ret void
}
; Ideally, the register allocator would avoid the wait here
;
-;CHECK-LABEL: {{^}}image_store_wait:
-;CHECK: image_store v[0:3], v4, s[0:7] dmask:0xf unorm
-;CHECK: s_waitcnt vmcnt(0) expcnt(0)
-;CHECK: image_load v[0:3], v4, s[8:15] dmask:0xf unorm
-;CHECK: s_waitcnt vmcnt(0)
-;CHECK: image_store v[0:3], v4, s[16:23] dmask:0xf unorm
-define amdgpu_ps void @image_store_wait(<8 x i32> inreg, <8 x i32> inreg, <8 x i32> inreg, <4 x float>, i32) {
+; GCN-LABEL: {{^}}image_store_wait:
+; GCN: image_store v[0:3], v4, s[0:7] dmask:0xf unorm
+; GCN: s_waitcnt vmcnt(0) expcnt(0)
+; GCN: image_load v[0:3], v4, s[8:15] dmask:0xf unorm
+; GCN: s_waitcnt vmcnt(0)
+; GCN: image_store v[0:3], v4, s[16:23] dmask:0xf unorm
+define amdgpu_ps void @image_store_wait(<8 x i32> inreg %arg, <8 x i32> inreg %arg1, <8 x i32> inreg %arg2, <4 x float> %arg3, i32 %arg4) #0 {
main_body:
- call void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float> %3, i32 %4, <8 x i32> %0, i32 15, i1 0, i1 0, i1 0, i1 0)
- %data = call <4 x float> @llvm.amdgcn.image.load.v4f32.i32.v8i32(i32 %4, <8 x i32> %1, i32 15, i1 0, i1 0, i1 0, i1 0)
- call void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float> %data, i32 %4, <8 x i32> %2, i32 15, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float> %arg3, i32 %arg4, <8 x i32> %arg, i32 15, i1 false, i1 false, i1 false, i1 false)
+ %data = call <4 x float> @llvm.amdgcn.image.load.v4f32.i32.v8i32(i32 %arg4, <8 x i32> %arg1, i32 15, i1 false, i1 false, i1 false, i1 false)
+ call void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float> %data, i32 %arg4, <8 x i32> %arg2, i32 15, i1 false, i1 false, i1 false, i1 false)
ret void
}
@@ -149,21 +147,22 @@ main_body:
; VI-LABEL: image_load_mmo
; VI: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
; VI: ds_write2_b32 v{{[0-9]+}}, [[ZERO]], [[ZERO]] offset1:4
-define amdgpu_ps void @image_load_mmo(float addrspace(3)* %lds, <2 x i32> %c, <8 x i32> inreg %rsrc) {
- store float 0.0, float addrspace(3)* %lds
- %tex = call float @llvm.amdgcn.image.load.f32.v2i32.v8i32(<2 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+define amdgpu_ps void @image_load_mmo(float addrspace(3)* %lds, <2 x i32> %c, <8 x i32> inreg %rsrc) #0 {
+bb:
+ store float 0.000000e+00, float addrspace(3)* %lds
+ %tex = call float @llvm.amdgcn.image.load.f32.v2i32.v8i32(<2 x i32> %c, <8 x i32> %rsrc, i32 15, i1 false, i1 false, i1 false, i1 false)
%tmp2 = getelementptr float, float addrspace(3)* %lds, i32 4
- store float 0.0, float addrspace(3)* %tmp2
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tex, float %tex, float %tex, float %tex)
+ store float 0.000000e+00, float addrspace(3)* %tmp2
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tex, float %tex, float %tex, float %tex, i1 true, i1 true) #0
ret void
}
declare float @llvm.amdgcn.image.load.f32.v2i32.v8i32(<2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
declare <2 x float> @llvm.amdgcn.image.load.v2f32.v4i32.v8i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
declare void @llvm.amdgcn.image.store.f32.i32.v8i32(float, i32, <8 x i32>, i32, i1, i1, i1, i1) #0
-declare void @llvm.amdgcn.image.store.v2f32.v4i32.v8i32(<2 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #0
+declare void @llvm.amdgcn.image.store.v2f32.v4i32.v8i32(<2 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #0
declare void @llvm.amdgcn.image.store.v4f32.i32.v8i32(<4 x float>, i32, <8 x i32>, i32, i1, i1, i1, i1) #0
declare void @llvm.amdgcn.image.store.v4f32.v2i32.v8i32(<4 x float>, <2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #0
declare void @llvm.amdgcn.image.store.v4f32.v4i32.v8i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #0
@@ -173,10 +172,9 @@ declare <4 x float> @llvm.amdgcn.image.load.v4f32.i32.v8i32(i32, <8 x i32>, i32,
declare <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
declare <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
declare <4 x float> @llvm.amdgcn.image.load.mip.v4f32.v4i32.v8i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
+declare <4 x float> @llvm.amdgcn.image.getresinfo.v4f32.i32.v8i32(i32, <8 x i32>, i32, i1, i1, i1, i1) #1
-declare <4 x float> @llvm.amdgcn.image.getresinfo.v4f32.i32.v8i32(i32, <8 x i32>, i32, i1, i1, i1, i1) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
attributes #0 = { nounwind }
attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.ll
index 752ec2d42fac..4f90b0a25eaa 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}sample:
; GCN: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -12,7 +12,7 @@ main_body:
; GCN-LABEL: {{^}}sample_cl:
; GCN: image_sample_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ main_body:
; GCN-LABEL: {{^}}sample_d:
; GCN: image_sample_d {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_d(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_d(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -30,7 +30,7 @@ main_body:
; GCN-LABEL: {{^}}sample_d_cl:
; GCN: image_sample_d_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_d_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_d_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.d.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -39,7 +39,7 @@ main_body:
; GCN-LABEL: {{^}}sample_l:
; GCN: image_sample_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_l(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_l(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -48,7 +48,7 @@ main_body:
; GCN-LABEL: {{^}}sample_b:
; GCN: image_sample_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_b(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_b(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -57,7 +57,7 @@ main_body:
; GCN-LABEL: {{^}}sample_b_cl:
; GCN: image_sample_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_b_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_b_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.b.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -66,7 +66,7 @@ main_body:
; GCN-LABEL: {{^}}sample_lz:
; GCN: image_sample_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_lz(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_lz(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.lz.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -75,7 +75,7 @@ main_body:
; GCN-LABEL: {{^}}sample_cd:
; GCN: image_sample_cd {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_cd(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_cd(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.cd.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -84,7 +84,7 @@ main_body:
; GCN-LABEL: {{^}}sample_cd_cl:
; GCN: image_sample_cd_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_cd_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_cd_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -93,7 +93,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c:
; GCN: image_sample_c {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -102,7 +102,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_cl:
; GCN: image_sample_c_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -111,7 +111,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_d:
; GCN: image_sample_c_d {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_d(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_d(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.d.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -120,7 +120,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_d_cl:
; GCN: image_sample_c_d_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_d_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_d_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -129,7 +129,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_l:
; GCN: image_sample_c_l {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_l(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_l(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.l.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -138,7 +138,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_b:
; GCN: image_sample_c_b {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_b(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_b(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.b.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -147,7 +147,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_b_cl:
; GCN: image_sample_c_b_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_b_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_b_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -156,7 +156,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_lz:
; GCN: image_sample_c_lz {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_lz(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_lz(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.lz.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -165,7 +165,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_cd:
; GCN: image_sample_c_cd {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_cd(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_cd(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.cd.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -174,7 +174,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_cd_cl:
; GCN: image_sample_c_cd_cl {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_cd_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_cd_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -183,7 +183,7 @@ main_body:
; GCN-LABEL: {{^}}sample_f32:
; GCN: image_sample {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x1
-define void @sample_f32(float addrspace(1)* %out) {
+define amdgpu_kernel void @sample_f32(float addrspace(1)* %out) {
main_body:
%r = call float @llvm.amdgcn.image.sample.f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 1, i1 0, i1 0, i1 0, i1 0, i1 0)
store float %r, float addrspace(1)* %out
@@ -192,13 +192,221 @@ main_body:
; GCN-LABEL: {{^}}sample_v2f32:
; GCN: image_sample {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0x3
-define void @sample_v2f32(<2 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_v2f32(<2 x float> addrspace(1)* %out) {
main_body:
%r = call <2 x float> @llvm.amdgcn.image.sample.v2f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 3, i1 0, i1 0, i1 0, i1 0, i1 0)
store <2 x float> %r, <2 x float> addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}adjust_writemask_sample_0:
+; GCN: image_sample v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0x1{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_0(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_01:
+; GCN: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0x3{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_01(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ %elt1 = extractelement <4 x float> %r, i32 1
+ store volatile float %elt0, float addrspace(1)* %out
+ store volatile float %elt1, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_012:
+; GCN: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0x7{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_012(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ %elt1 = extractelement <4 x float> %r, i32 1
+ %elt2 = extractelement <4 x float> %r, i32 2
+ store volatile float %elt0, float addrspace(1)* %out
+ store volatile float %elt1, float addrspace(1)* %out
+ store volatile float %elt2, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_12:
+; GCN: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0x6{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_12(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt1 = extractelement <4 x float> %r, i32 1
+ %elt2 = extractelement <4 x float> %r, i32 2
+ store volatile float %elt1, float addrspace(1)* %out
+ store volatile float %elt2, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_03:
+; GCN: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0x9{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_03(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ %elt3 = extractelement <4 x float> %r, i32 3
+ store volatile float %elt0, float addrspace(1)* %out
+ store volatile float %elt3, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_13:
+; GCN: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0xa{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_13(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt1 = extractelement <4 x float> %r, i32 1
+ %elt3 = extractelement <4 x float> %r, i32 3
+ store volatile float %elt1, float addrspace(1)* %out
+ store volatile float %elt3, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_123:
+; GCN: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} dmask:0xe{{$}}
+define amdgpu_kernel void @adjust_writemask_sample_123(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
+ %elt1 = extractelement <4 x float> %r, i32 1
+ %elt2 = extractelement <4 x float> %r, i32 2
+ %elt3 = extractelement <4 x float> %r, i32 3
+ store volatile float %elt1, float addrspace(1)* %out
+ store volatile float %elt2, float addrspace(1)* %out
+ store volatile float %elt3, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_variable_dmask_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_variable_dmask_enabled(float addrspace(1)* %out, i32 %dmask) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 %dmask, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_cl_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_cl_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_d_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_d_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_d_cl_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_d_cl_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.d.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_l_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_l_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_b_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_b_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_b_cl_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_b_cl_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.b.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_lz_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_lz_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.lz.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_cd_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_cd_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.cd.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_cd_cl_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_cd_cl_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.cl.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.o.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.o.ll
index d10fd0824692..42d7bc0e7778 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.o.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.o.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}sample:
; GCN: image_sample_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -12,7 +12,7 @@ main_body:
; GCN-LABEL: {{^}}sample_cl:
; GCN: image_sample_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ main_body:
; GCN-LABEL: {{^}}sample_d:
; GCN: image_sample_d_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_d(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_d(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.d.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -30,7 +30,7 @@ main_body:
; GCN-LABEL: {{^}}sample_d_cl:
; GCN: image_sample_d_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_d_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_d_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.d.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -39,7 +39,7 @@ main_body:
; GCN-LABEL: {{^}}sample_l:
; GCN: image_sample_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_l(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_l(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.l.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -48,7 +48,7 @@ main_body:
; GCN-LABEL: {{^}}sample_b:
; GCN: image_sample_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_b(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_b(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.b.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -57,7 +57,7 @@ main_body:
; GCN-LABEL: {{^}}sample_b_cl:
; GCN: image_sample_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_b_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_b_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.b.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -66,7 +66,7 @@ main_body:
; GCN-LABEL: {{^}}sample_lz:
; GCN: image_sample_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_lz(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_lz(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.lz.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -75,7 +75,7 @@ main_body:
; GCN-LABEL: {{^}}sample_cd:
; GCN: image_sample_cd_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_cd(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_cd(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.cd.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -84,7 +84,7 @@ main_body:
; GCN-LABEL: {{^}}sample_cd_cl:
; GCN: image_sample_cd_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_cd_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_cd_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -93,7 +93,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c:
; GCN: image_sample_c_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -102,7 +102,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_cl:
; GCN: image_sample_c_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -111,7 +111,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_d:
; GCN: image_sample_c_d_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_d(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_d(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -120,7 +120,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_d_cl:
; GCN: image_sample_c_d_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_d_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_d_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -129,7 +129,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_l:
; GCN: image_sample_c_l_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_l(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_l(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -138,7 +138,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_b:
; GCN: image_sample_c_b_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_b(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_b(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.b.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -147,7 +147,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_b_cl:
; GCN: image_sample_c_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_b_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_b_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -156,7 +156,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_lz:
; GCN: image_sample_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_lz(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_lz(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.lz.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -165,7 +165,7 @@ main_body:
; GCN-LABEL: {{^}}sample_c_cd:
; GCN: image_sample_c_cd_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_cd(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_cd(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.cd.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
@@ -174,13 +174,232 @@ main_body:
; GCN-LABEL: {{^}}sample_c_cd_cl:
; GCN: image_sample_c_cd_cl_o {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}} dmask:0xf
-define void @sample_c_cd_cl(<4 x float> addrspace(1)* %out) {
+define amdgpu_kernel void @sample_c_cd_cl(<4 x float> addrspace(1)* %out) {
main_body:
%r = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 0, i1 0, i1 0, i1 0, i1 0)
store <4 x float> %r, <4 x float> addrspace(1)* %out
ret void
}
+; GCN-LABEL: {{^}}adjust_writemask_sample_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_d_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_d_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.d.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_d_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_d_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.d.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_l_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_l_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.l.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_b_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_b_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.b.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_b_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_b_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.b.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_lz_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_lz_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.lz.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_cd_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_cd_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.cd.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_cd_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_cd_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.cd.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_d_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_d_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.d.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_d_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_d_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.d.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_l_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_l_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_b_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_b_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.b.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_b_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_b_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.b.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_lz_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_lz_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.lz.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_cd_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_cd_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.cd.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}adjust_writemask_sample_c_cd_cl_o_none_enabled:
+; GCN-NOT: image
+; GCN-NOT: store
+define amdgpu_kernel void @adjust_writemask_sample_c_cd_cl_o_none_enabled(float addrspace(1)* %out) {
+main_body:
+ %r = call <4 x float> @llvm.amdgcn.image.sample.c.cd.cl.o.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 0, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %r, i32 0
+ store float %elt0, float addrspace(1)* %out
+ ret void
+}
declare <4 x float> @llvm.amdgcn.image.sample.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.cl.o.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll
index 9ba5c69a9a24..c4795a23cd5b 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll
@@ -1,5 +1,7 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GCN,VI %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: llc -march=amdgcn -mcpu=kabini -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,16BANK %s
+; RUN: llc -march=amdgcn -mcpu=stoney -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,16BANK %s
; GCN-LABEL: {{^}}v_interp:
; GCN-NOT: s_wqm
@@ -8,17 +10,17 @@
; GCN-DAG: v_interp_p1_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr0.y{{$}}
; GCN-DAG: v_interp_p2_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr0.y{{$}}
; GCN-DAG: v_interp_mov_f32 v{{[0-9]+}}, p0, attr0.x{{$}}
-define amdgpu_ps void @v_interp(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x float>) {
+define amdgpu_ps void @v_interp(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x float> %arg4) #0 {
main_body:
- %i = extractelement <2 x float> %4, i32 0
- %j = extractelement <2 x float> %4, i32 1
- %p0_0 = call float @llvm.amdgcn.interp.p1(float %i, i32 0, i32 0, i32 %3)
- %p1_0 = call float @llvm.amdgcn.interp.p2(float %p0_0, float %j, i32 0, i32 0, i32 %3)
- %p0_1 = call float @llvm.amdgcn.interp.p1(float %i, i32 1, i32 0, i32 %3)
- %p1_1 = call float @llvm.amdgcn.interp.p2(float %p0_1, float %j, i32 1, i32 0, i32 %3)
- %const = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %3)
+ %i = extractelement <2 x float> %arg4, i32 0
+ %j = extractelement <2 x float> %arg4, i32 1
+ %p0_0 = call float @llvm.amdgcn.interp.p1(float %i, i32 0, i32 0, i32 %arg3)
+ %p1_0 = call float @llvm.amdgcn.interp.p2(float %p0_0, float %j, i32 0, i32 0, i32 %arg3)
+ %p0_1 = call float @llvm.amdgcn.interp.p1(float %i, i32 1, i32 0, i32 %arg3)
+ %p1_1 = call float @llvm.amdgcn.interp.p2(float %p0_1, float %j, i32 1, i32 0, i32 %arg3)
+ %const = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %arg3)
%w = fadd float %p1_1, %const
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %p0_0, float %p0_0, float %p1_1, float %w)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %p0_0, float %p0_0, float %p1_1, float %w, i1 true, i1 true) #0
ret void
}
@@ -37,7 +39,8 @@ main_body:
; GCN-DAG: v_interp_p1_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr63.w{{$}}
; GCN-DAG: v_interp_p1_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr64.w{{$}}
; GCN-DAG: v_interp_p1_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr64.x{{$}}
-define amdgpu_ps void @v_interp_p1(float %i) {
+define amdgpu_ps void @v_interp_p1(float %i) #0 {
+bb:
%p0_0 = call float @llvm.amdgcn.interp.p1(float %i, i32 0, i32 0, i32 256)
%p0_1 = call float @llvm.amdgcn.interp.p1(float %i, i32 1, i32 0, i32 256)
%p0_2 = call float @llvm.amdgcn.interp.p1(float %i, i32 2, i32 0, i32 256)
@@ -77,7 +80,8 @@ define amdgpu_ps void @v_interp_p1(float %i) {
; GCN-DAG: v_interp_p2_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr63.x{{$}}
; GCN-DAG: v_interp_p2_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr64.x{{$}}
; GCN-DAG: v_interp_p2_f32 v{{[0-9]+}}, v{{[0-9]+}}, attr64.x{{$}}
-define amdgpu_ps void @v_interp_p2(float %x, float %j) {
+define amdgpu_ps void @v_interp_p2(float %x, float %j) #0 {
+bb:
%p2_0 = call float @llvm.amdgcn.interp.p2(float %x, float %j, i32 0, i32 0, i32 256)
%p2_1 = call float @llvm.amdgcn.interp.p2(float %x, float %j, i32 1, i32 0, i32 256)
%p2_2 = call float @llvm.amdgcn.interp.p2(float %x, float %j, i32 2, i32 0, i32 256)
@@ -118,7 +122,8 @@ define amdgpu_ps void @v_interp_p2(float %x, float %j) {
; GCN-DAG: v_interp_mov_f32 v{{[0-9]+}}, p10, attr64.y{{$}}
; GCN-DAG: v_interp_mov_f32 v{{[0-9]+}}, invalid_param_3, attr64.y{{$}}
; GCN-DAG: v_interp_mov_f32 v{{[0-9]+}}, invalid_param_10, attr64.x{{$}}
-define amdgpu_ps void @v_interp_mov(float %x, float %j) {
+define amdgpu_ps void @v_interp_mov(float %x, float %j) #0 {
+bb:
%mov_0 = call float @llvm.amdgcn.interp.mov(i32 0, i32 0, i32 0, i32 256)
%mov_1 = call float @llvm.amdgcn.interp.mov(i32 1, i32 0, i32 0, i32 256)
%mov_2 = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 256)
@@ -161,23 +166,57 @@ define amdgpu_ps void @v_interp_mov(float %x, float %j) {
; VI-DAG: v_interp_mov_f32 v{{[0-9]+}}, p0, attr0.x{{$}}
; VI: s_mov_b32 m0, -1{{$}}
; VI: ds_write2_b32 v{{[0-9]+}}, [[ZERO]], [[ZERO]] offset1:4
-define amdgpu_ps void @v_interp_readnone(float addrspace(3)* %lds) {
- store float 0.0, float addrspace(3)* %lds
+define amdgpu_ps void @v_interp_readnone(float addrspace(3)* %lds) #0 {
+bb:
+ store float 0.000000e+00, float addrspace(3)* %lds
%tmp1 = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 0)
%tmp2 = getelementptr float, float addrspace(3)* %lds, i32 4
- store float 0.0, float addrspace(3)* %tmp2
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp1, float %tmp1, float %tmp1, float %tmp1)
+ store float 0.000000e+00, float addrspace(3)* %tmp2
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp1, float %tmp1, float %tmp1, float %tmp1, i1 true, i1 true) #0
ret void
}
-; Function Attrs: nounwind readnone
-declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #0
+; Thest that v_interp_p1 uses different source and destination registers
+; on 16 bank LDS chips.
-; Function Attrs: nounwind readnone
-declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #0
-
-declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #0
+; GCN-LABEL: {{^}}v_interp_p1_bank16_bug:
+; 16BANK-NOT: v_interp_p1_f32 [[DST:v[0-9]+]], [[DST]]
+define amdgpu_ps void @v_interp_p1_bank16_bug([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg13, [17 x <4 x i32>] addrspace(2)* byval %arg14, [34 x <8 x i32>] addrspace(2)* byval %arg15, float inreg %arg16, i32 inreg %arg17, <2 x i32> %arg18, <2 x i32> %arg19, <2 x i32> %arg20, <3 x i32> %arg21, <2 x i32> %arg22, <2 x i32> %arg23, <2 x i32> %arg24, float %arg25, float %arg26, float %arg27, float %arg28, float %arg29, float %arg30, i32 %arg31, float %arg32, float %arg33) #0 {
+main_body:
+ %i.i = extractelement <2 x i32> %arg19, i32 0
+ %j.i = extractelement <2 x i32> %arg19, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg17) #0
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg17) #0
+ %i.i7 = extractelement <2 x i32> %arg19, i32 0
+ %j.i8 = extractelement <2 x i32> %arg19, i32 1
+ %i.f.i9 = bitcast i32 %i.i7 to float
+ %j.f.i10 = bitcast i32 %j.i8 to float
+ %p1.i11 = call float @llvm.amdgcn.interp.p1(float %i.f.i9, i32 1, i32 0, i32 %arg17) #0
+ %p2.i12 = call float @llvm.amdgcn.interp.p2(float %p1.i11, float %j.f.i10, i32 1, i32 0, i32 %arg17) #0
+ %i.i1 = extractelement <2 x i32> %arg19, i32 0
+ %j.i2 = extractelement <2 x i32> %arg19, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 2, i32 0, i32 %arg17) #0
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 2, i32 0, i32 %arg17) #0
+ %tmp = call float @llvm.fabs.f32(float %p2.i)
+ %tmp34 = call float @llvm.fabs.f32(float %p2.i12)
+ %tmp35 = call float @llvm.fabs.f32(float %p2.i6)
+ %tmp36 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp, float %tmp34)
+ %tmp38 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp35, float 1.000000e+00)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp36, <2 x half> %tmp38, i1 true, i1 true) #0
+ ret void
+}
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
-attributes #0 = { nounwind readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
index 5d4d4cd7ee46..055dddbfa8af 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
@@ -8,7 +8,7 @@
; CO-V2: s_load_dword s{{[0-9]+}}, s[4:5], 0xa
; OS-UNKNOWN: s_load_dword s{{[0-9]+}}, s[0:1], 0xa
-define void @test(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out) #1 {
%kernarg.segment.ptr = call noalias i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
%header.ptr = bitcast i8 addrspace(2)* %kernarg.segment.ptr to i32 addrspace(2)*
%gep = getelementptr i32, i32 addrspace(2)* %header.ptr, i64 10
@@ -20,7 +20,7 @@ define void @test(i32 addrspace(1)* %out) #1 {
; ALL-LABEL: {{^}}test_implicit:
; 10 + 9 (36 prepended implicit bytes) + 2(out pointer) = 21 = 0x15
; OS-UNKNOWN: s_load_dword s{{[0-9]+}}, s[0:1], 0x15
-define void @test_implicit(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_implicit(i32 addrspace(1)* %out) #1 {
%implicitarg.ptr = call noalias i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr()
%header.ptr = bitcast i8 addrspace(2)* %implicitarg.ptr to i32 addrspace(2)*
%gep = getelementptr i32, i32 addrspace(2)* %header.ptr, i64 10
@@ -39,7 +39,7 @@ define void @test_implicit(i32 addrspace(1)* %out) #1 {
; ALL: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[VAL]]
; MESA: buffer_store_dword [[V_VAL]]
; HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[V_VAL]]
-define void @test_implicit_alignment(i32 addrspace(1)* %out, <2 x i8> %in) #1 {
+define amdgpu_kernel void @test_implicit_alignment(i32 addrspace(1)* %out, <2 x i8> %in) #1 {
%implicitarg.ptr = call noalias i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr()
%arg.ptr = bitcast i8 addrspace(2)* %implicitarg.ptr to i32 addrspace(2)*
%val = load i32, i32 addrspace(2)* %arg.ptr
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
index 6720cbe9d8da..fe211d356070 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.ldexp.f16(half %a, i32 %b)
; GCN: buffer_load_dword v[[B_I32:[0-9]+]]
; VI: v_ldexp_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_I32]]
; GCN: buffer_store_short v[[R_F16]]
-define void @ldexp_f16(
+define amdgpu_kernel void @ldexp_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
i32 addrspace(1)* %b) {
@@ -22,7 +22,7 @@ define void @ldexp_f16(
; GCN: buffer_load_dword v[[B_I32:[0-9]+]]
; VI: v_ldexp_f16_e32 v[[R_F16:[0-9]+]], 2.0, v[[B_I32]]
; GCN: buffer_store_short v[[R_F16]]
-define void @ldexp_f16_imm_a(
+define amdgpu_kernel void @ldexp_f16_imm_a(
half addrspace(1)* %r,
i32 addrspace(1)* %b) {
%b.val = load i32, i32 addrspace(1)* %b
@@ -35,7 +35,7 @@ define void @ldexp_f16_imm_a(
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; VI: v_ldexp_f16_e64 v[[R_F16:[0-9]+]], v[[A_F16]], 2{{$}}
; GCN: buffer_store_short v[[R_F16]]
-define void @ldexp_f16_imm_b(
+define amdgpu_kernel void @ldexp_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
%a.val = load half, half addrspace(1)* %a
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll
index a23defd742a8..1ab4e8b80630 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll
@@ -7,7 +7,7 @@ declare double @llvm.amdgcn.ldexp.f64(double, i32) nounwind readnone
; SI-LABEL: {{^}}test_ldexp_f32:
; SI: v_ldexp_f32
; SI: s_endpgm
-define void @test_ldexp_f32(float addrspace(1)* %out, float %a, i32 %b) nounwind {
+define amdgpu_kernel void @test_ldexp_f32(float addrspace(1)* %out, float %a, i32 %b) nounwind {
%result = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
@@ -16,7 +16,7 @@ define void @test_ldexp_f32(float addrspace(1)* %out, float %a, i32 %b) nounwind
; SI-LABEL: {{^}}test_ldexp_f64:
; SI: v_ldexp_f64
; SI: s_endpgm
-define void @test_ldexp_f64(double addrspace(1)* %out, double %a, i32 %b) nounwind {
+define amdgpu_kernel void @test_ldexp_f64(double addrspace(1)* %out, double %a, i32 %b) nounwind {
%result = call double @llvm.amdgcn.ldexp.f64(double %a, i32 %b) nounwind readnone
store double %result, double addrspace(1)* %out, align 8
ret void
@@ -24,7 +24,7 @@ define void @test_ldexp_f64(double addrspace(1)* %out, double %a, i32 %b) nounwi
; SI-LABEL: {{^}}test_ldexp_undef_f32:
; SI-NOT: v_ldexp_f32
-define void @test_ldexp_undef_f32(float addrspace(1)* %out, i32 %b) nounwind {
+define amdgpu_kernel void @test_ldexp_undef_f32(float addrspace(1)* %out, i32 %b) nounwind {
%result = call float @llvm.amdgcn.ldexp.f32(float undef, i32 %b) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.lerp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.lerp.ll
index 014369b45015..bc599897f82a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.lerp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.lerp.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.lerp(i32, i32, i32) #0
; GCN-LABEL: {{^}}v_lerp:
; GCN: v_lerp_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_lerp(i32 addrspace(1)* %out, i32 %src) nounwind {
+define amdgpu_kernel void @v_lerp(i32 addrspace(1)* %out, i32 %src) nounwind {
%result= call i32 @llvm.amdgcn.lerp(i32 %src, i32 100, i32 100) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.log.clamp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.log.clamp.ll
index f78257f1d226..feecd6c0e35d 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.log.clamp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.log.clamp.ll
@@ -7,7 +7,7 @@ declare float @llvm.amdgcn.log.clamp.f32(float) #0
; GCN-LABEL: {{^}}v_log_clamp_f32:
; GCN: v_log_clamp_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @v_log_clamp_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @v_log_clamp_f32(float addrspace(1)* %out, float %src) #1 {
%log.clamp = call float @llvm.amdgcn.log.clamp.f32(float %src) #0
store float %log.clamp, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll
index 303446b63315..ab76c870796b 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.mbcnt.ll
@@ -1,24 +1,22 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}mbcnt_intrinsics:
; GCN: v_mbcnt_lo_u32_b32_e64 [[LO:v[0-9]+]], -1, 0
; SI: v_mbcnt_hi_u32_b32_e32 {{v[0-9]+}}, -1, [[LO]]
; VI: v_mbcnt_hi_u32_b32_e64 {{v[0-9]+}}, -1, [[LO]]
-
-define amdgpu_ps void @mbcnt_intrinsics(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg) {
+define amdgpu_ps void @mbcnt_intrinsics(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3) {
main_body:
- %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #1
- %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) #1
- %4 = bitcast i32 %hi to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %4, float %4, float %4, float %4)
+ %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
+ %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) #0
+ %tmp = bitcast i32 %hi to float
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp, float %tmp, float %tmp, float %tmp, i1 true, i1 true) #1
ret void
}
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
-
-declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #1
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
-attributes #1 = { nounwind readnone }
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll
index 35fdba8f34a3..8baaad190406 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll
@@ -7,7 +7,7 @@
; VI: v_mov_b32_e32 v0, s{{[0-9]+}}
; VI: s_nop 1
; VI: v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11]
-define void @dpp_test(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) {
%tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 1) #0
store i32 %tmp0, i32 addrspace(1)* %out
ret void
@@ -19,7 +19,7 @@ define void @dpp_test(i32 addrspace(1)* %out, i32 %in) {
; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
; VI: s_nop 1
; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
-define void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) {
%tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 1) #0
%tmp1 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp0, i32 1, i32 1, i32 1, i1 1) #0
store i32 %tmp1, i32 addrspace(1)* %out
@@ -36,7 +36,7 @@ define void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) {
; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
; VI: s_nop 1
; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
-define void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) {
+define amdgpu_kernel void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) {
%cmp = fcmp oeq float %cond, 0.0
br i1 %cmp, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.pk.u16.u8.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.pk.u16.u8.ll
index 7c2495e096ec..3a2b87cd87f3 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.pk.u16.u8.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.pk.u16.u8.ll
@@ -5,7 +5,7 @@ declare i64 @llvm.amdgcn.mqsad.pk.u16.u8(i64, i32, i64) #0
; GCN-LABEL: {{^}}v_mqsad_pk_u16_u8:
; GCN: v_mqsad_pk_u16_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_pk_u16_u8(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_mqsad_pk_u16_u8(i64 addrspace(1)* %out, i64 %src) {
%result= call i64 @llvm.amdgcn.mqsad.pk.u16.u8(i64 %src, i32 100, i64 100) #0
store i64 %result, i64 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_mqsad_pk_u16_u8(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_mqsad_pk_u16_u8_non_immediate:
; GCN: v_mqsad_pk_u16_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_pk_u16_u8_non_immediate(i64 addrspace(1)* %out, i64 %src, i32 %a, i64 %b) {
+define amdgpu_kernel void @v_mqsad_pk_u16_u8_non_immediate(i64 addrspace(1)* %out, i64 %src, i32 %a, i64 %b) {
%result= call i64 @llvm.amdgcn.mqsad.pk.u16.u8(i64 %src, i32 %a, i64 %b) #0
store i64 %result, i64 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.u32.u8.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.u32.u8.ll
index 04bb97a9eb57..a8d03bf6bbac 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.u32.u8.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.mqsad.u32.u8.ll
@@ -5,7 +5,7 @@ declare <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64, i32, <4 x i32>) #0
; GCN-LABEL: {{^}}v_mqsad_u32_u8_use_non_inline_constant:
; GCN: v_mqsad_u32_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_u32_u8_use_non_inline_constant(<4 x i32> addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_mqsad_u32_u8_use_non_inline_constant(<4 x i32> addrspace(1)* %out, i64 %src) {
%result = call <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src, i32 100, <4 x i32> <i32 100, i32 100, i32 100, i32 100>) #0
store <4 x i32> %result, <4 x i32> addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_mqsad_u32_u8_use_non_inline_constant(<4 x i32> addrspace(1)* %out
; GCN-LABEL: {{^}}v_mqsad_u32_u8_non_immediate:
; GCN: v_mqsad_u32_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_u32_u8_non_immediate(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a, <4 x i32> %b) {
+define amdgpu_kernel void @v_mqsad_u32_u8_non_immediate(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a, <4 x i32> %b) {
%result = call <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src, i32 %a, <4 x i32> %b) #0
store <4 x i32> %result, <4 x i32> addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @v_mqsad_u32_u8_non_immediate(<4 x i32> addrspace(1)* %out, i64 %src
; GCN-LABEL: {{^}}v_mqsad_u32_u8_inline_integer_immediate:
; GCN: v_mqsad_u32_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_u32_u8_inline_integer_immediate(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a) {
+define amdgpu_kernel void @v_mqsad_u32_u8_inline_integer_immediate(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a) {
%result = call <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src, i32 %a, <4 x i32> <i32 10, i32 20, i32 30, i32 40>) #0
store <4 x i32> %result, <4 x i32> addrspace(1)* %out, align 4
ret void
@@ -29,7 +29,7 @@ define void @v_mqsad_u32_u8_inline_integer_immediate(<4 x i32> addrspace(1)* %ou
; GCN-LABEL: {{^}}v_mqsad_u32_u8_inline_fp_immediate:
; GCN: v_mqsad_u32_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_u32_u8_inline_fp_immediate(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a) {
+define amdgpu_kernel void @v_mqsad_u32_u8_inline_fp_immediate(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a) {
%result = call <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src, i32 %a, <4 x i32> <i32 1065353216, i32 0, i32 0, i32 0>) #0
store <4 x i32> %result, <4 x i32> addrspace(1)* %out, align 4
ret void
@@ -37,7 +37,7 @@ define void @v_mqsad_u32_u8_inline_fp_immediate(<4 x i32> addrspace(1)* %out, i6
; GCN-LABEL: {{^}}v_mqsad_u32_u8_use_sgpr_vgpr:
; GCN: v_mqsad_u32_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_mqsad_u32_u8_use_sgpr_vgpr(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a, <4 x i32> addrspace(1)* %input) {
+define amdgpu_kernel void @v_mqsad_u32_u8_use_sgpr_vgpr(<4 x i32> addrspace(1)* %out, i64 %src, i32 %a, <4 x i32> addrspace(1)* %input) {
%in = load <4 x i32>, <4 x i32> addrspace(1) * %input
%result = call <4 x i32> @llvm.amdgcn.mqsad.u32.u8(i64 %src, i32 %a, <4 x i32> %in) #0
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.msad.u8.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.msad.u8.ll
index 83d13ab26846..dfaac042227c 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.msad.u8.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.msad.u8.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.msad.u8(i32, i32, i32) #0
; GCN-LABEL: {{^}}v_msad_u8:
; GCN: v_msad_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_msad_u8(i32 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_msad_u8(i32 addrspace(1)* %out, i32 %src) {
%result= call i32 @llvm.amdgcn.msad.u8(i32 %src, i32 100, i32 100) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_msad_u8(i32 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_msad_u8_non_immediate:
; GCN: v_msad_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_msad_u8_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
+define amdgpu_kernel void @v_msad_u8_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
%result= call i32 @llvm.amdgcn.msad.u8(i32 %src, i32 %a, i32 %b) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll
index fd1a463fd3e9..f0af876567b4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ps.live.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK %s
; CHECK-LABEL: {{^}}test1:
; CHECK: v_cndmask_b32_e64 v0, 0, 1, exec
@@ -7,7 +7,7 @@
; there is no WQM use and therefore llvm.amdgcn.ps.live is constant. However,
; the expectation is that the intrinsic will be used in non-trivial shaders,
; so such an optimization doesn't seem worth the effort.
-define amdgpu_ps float @test1() {
+define amdgpu_ps float @test1() #0 {
%live = call i1 @llvm.amdgcn.ps.live()
%live.32 = zext i1 %live to i32
%r = bitcast i32 %live.32 to float
@@ -19,12 +19,11 @@ define amdgpu_ps float @test1() {
; CHECK-DAG: s_wqm_b64 exec, exec
; CHECK-DAG: v_cndmask_b32_e64 [[VAR:v[0-9]+]], 0, 1, [[LIVE]]
; CHECK: image_sample v0, [[VAR]],
-define amdgpu_ps float @test2() {
+define amdgpu_ps float @test2() #0 {
%live = call i1 @llvm.amdgcn.ps.live()
%live.32 = zext i1 %live to i32
-
- %t = call <4 x float> @llvm.SI.image.sample.i32(i32 %live.32, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
-
+ %live.32.bc = bitcast i32 %live.32 to float
+ %t = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %live.32.bc, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%r = extractelement <4 x float> %t, i32 0
ret float %r
}
@@ -35,7 +34,7 @@ define amdgpu_ps float @test2() {
; CHECK-DAG: s_xor_b64 [[HELPER:s\[[0-9]+:[0-9]+\]]], [[LIVE]], -1
; CHECK_DAG: s_and_saveexec_b64 [[SAVED:s\[[0-9]+:[0-9]+\]]], [[HELPER]]
; CHECK: ; %dead
-define amdgpu_ps float @test3(i32 %in) {
+define amdgpu_ps float @test3(i32 %in) #0 {
entry:
%live = call i1 @llvm.amdgcn.ps.live()
br i1 %live, label %end, label %dead
@@ -46,14 +45,15 @@ dead:
end:
%tc = phi i32 [ %in, %entry ], [ %tc.dead, %dead ]
- %t = call <4 x float> @llvm.SI.image.sample.i32(i32 %tc, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
-
+ %tc.bc = bitcast i32 %tc to float
+ %t = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %tc.bc, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%r = extractelement <4 x float> %t, i32 0
ret float %r
}
-declare i1 @llvm.amdgcn.ps.live() #0
-
-declare <4 x float> @llvm.SI.image.sample.i32(i32, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
+declare i1 @llvm.amdgcn.ps.live() #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-attributes #0 = { nounwind readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.qsad.pk.u16.u8.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.qsad.pk.u16.u8.ll
index ece4224f6e67..be71225c5e06 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.qsad.pk.u16.u8.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.qsad.pk.u16.u8.ll
@@ -5,7 +5,7 @@ declare i64 @llvm.amdgcn.qsad.pk.u16.u8(i64, i32, i64) #0
; GCN-LABEL: {{^}}v_qsad_pk_u16_u8:
; GCN: v_qsad_pk_u16_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_qsad_pk_u16_u8(i64 addrspace(1)* %out, i64 %src) {
+define amdgpu_kernel void @v_qsad_pk_u16_u8(i64 addrspace(1)* %out, i64 %src) {
%result= call i64 @llvm.amdgcn.qsad.pk.u16.u8(i64 %src, i32 100, i64 100) #0
store i64 %result, i64 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_qsad_pk_u16_u8(i64 addrspace(1)* %out, i64 %src) {
; GCN-LABEL: {{^}}v_qsad_pk_u16_u8_non_immediate:
; GCN: v_qsad_pk_u16_u8 v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @v_qsad_pk_u16_u8_non_immediate(i64 addrspace(1)* %out, i64 %src, i32 %a, i64 %b) {
+define amdgpu_kernel void @v_qsad_pk_u16_u8_non_immediate(i64 addrspace(1)* %out, i64 %src, i32 %a, i64 %b) {
%result= call i64 @llvm.amdgcn.qsad.pk.u16.u8(i64 %src, i32 %a, i64 %b) #0
store i64 %result, i64 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.queue.ptr.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.queue.ptr.ll
index 6bf871543ca2..9200fe7c67b1 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.queue.ptr.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.queue.ptr.ll
@@ -6,7 +6,7 @@
; GCN-LABEL: {{^}}test:
; GCN: enable_sgpr_queue_ptr = 1
; GCN: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
-define void @test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out) {
%queue_ptr = call noalias i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #0
%header_ptr = bitcast i8 addrspace(2)* %queue_ptr to i32 addrspace(2)*
%value = load i32, i32 addrspace(2)* %header_ptr
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll
index f0b8e2a0293f..0f1fa15f47cc 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.rcp.f16(half %a)
; VI: v_rcp_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @rcp_f16(
+define amdgpu_kernel void @rcp_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.legacy.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.legacy.ll
index d53861456c78..71db76d902b7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.legacy.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.legacy.ll
@@ -7,7 +7,7 @@ declare float @llvm.amdgcn.rcp.legacy(float) #0
; GCN-LABEL: {{^}}rcp_legacy_f32:
; GCN: v_rcp_legacy_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @rcp_legacy_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @rcp_legacy_f32(float addrspace(1)* %out, float %src) #1 {
%rcp = call float @llvm.amdgcn.rcp.legacy(float %src) #0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -16,7 +16,7 @@ define void @rcp_legacy_f32(float addrspace(1)* %out, float %src) #1 {
; TODO: Really these should be constant folded
; GCN-LABEL: {{^}}rcp_legacy_f32_constant_4.0
; GCN: v_rcp_legacy_f32_e32 {{v[0-9]+}}, 4.0
-define void @rcp_legacy_f32_constant_4.0(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rcp_legacy_f32_constant_4.0(float addrspace(1)* %out) #1 {
%rcp = call float @llvm.amdgcn.rcp.legacy(float 4.0) #0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -24,7 +24,7 @@ define void @rcp_legacy_f32_constant_4.0(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}rcp_legacy_f32_constant_100.0
; GCN: v_rcp_legacy_f32_e32 {{v[0-9]+}}, 0x42c80000
-define void @rcp_legacy_f32_constant_100.0(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rcp_legacy_f32_constant_100.0(float addrspace(1)* %out) #1 {
%rcp = call float @llvm.amdgcn.rcp.legacy(float 100.0) #0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -32,7 +32,7 @@ define void @rcp_legacy_f32_constant_100.0(float addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}rcp_legacy_undef_f32:
; GCN-NOT: v_rcp_legacy_f32
-define void @rcp_legacy_undef_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rcp_legacy_undef_f32(float addrspace(1)* %out) #1 {
%rcp = call float @llvm.amdgcn.rcp.legacy(float undef)
store float %rcp, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
index 825231bf8680..ad2d84b7911b 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
@@ -8,17 +8,35 @@ declare float @llvm.sqrt.f32(float) #0
; FUNC-LABEL: {{^}}rcp_undef_f32:
; SI-NOT: v_rcp_f32
-define void @rcp_undef_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rcp_undef_f32(float addrspace(1)* %out) #1 {
%rcp = call float @llvm.amdgcn.rcp.f32(float undef)
store float %rcp, float addrspace(1)* %out, align 4
ret void
}
+; FUNC-LABEL: {{^}}rcp_2_f32:
+; SI-NOT: v_rcp_f32
+; SI: v_mov_b32_e32 v{{[0-9]+}}, 0.5
+define amdgpu_kernel void @rcp_2_f32(float addrspace(1)* %out) #1 {
+ %rcp = call float @llvm.amdgcn.rcp.f32(float 2.0)
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}rcp_10_f32:
+; SI-NOT: v_rcp_f32
+; SI: v_mov_b32_e32 v{{[0-9]+}}, 0x3dcccccd
+define amdgpu_kernel void @rcp_10_f32(float addrspace(1)* %out) #1 {
+ %rcp = call float @llvm.amdgcn.rcp.f32(float 10.0)
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
; FUNC-LABEL: {{^}}safe_no_fp32_denormals_rcp_f32:
; SI: v_rcp_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}
; SI-NOT: [[RESULT]]
; SI: buffer_store_dword [[RESULT]]
-define void @safe_no_fp32_denormals_rcp_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @safe_no_fp32_denormals_rcp_f32(float addrspace(1)* %out, float %src) #1 {
%rcp = fdiv float 1.0, %src
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -28,7 +46,7 @@ define void @safe_no_fp32_denormals_rcp_f32(float addrspace(1)* %out, float %src
; SI: v_rcp_f32_e32 [[RESULT:v[0-9]+]], s{{[0-9]+}}
; SI-NOT: [[RESULT]]
; SI: buffer_store_dword [[RESULT]]
-define void @safe_f32_denormals_rcp_pat_f32(float addrspace(1)* %out, float %src) #4 {
+define amdgpu_kernel void @safe_f32_denormals_rcp_pat_f32(float addrspace(1)* %out, float %src) #4 {
%rcp = fdiv float 1.0, %src
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -36,7 +54,7 @@ define void @safe_f32_denormals_rcp_pat_f32(float addrspace(1)* %out, float %src
; FUNC-LABEL: {{^}}unsafe_f32_denormals_rcp_pat_f32:
; SI: v_div_scale_f32
-define void @unsafe_f32_denormals_rcp_pat_f32(float addrspace(1)* %out, float %src) #3 {
+define amdgpu_kernel void @unsafe_f32_denormals_rcp_pat_f32(float addrspace(1)* %out, float %src) #3 {
%rcp = fdiv float 1.0, %src
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -45,7 +63,7 @@ define void @unsafe_f32_denormals_rcp_pat_f32(float addrspace(1)* %out, float %s
; FUNC-LABEL: {{^}}safe_rsq_rcp_pat_f32:
; SI: v_sqrt_f32_e32
; SI: v_rcp_f32_e32
-define void @safe_rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @safe_rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) #1 {
%sqrt = call float @llvm.sqrt.f32(float %src)
%rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
store float %rcp, float addrspace(1)* %out, align 4
@@ -54,7 +72,7 @@ define void @safe_rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) #1 {
; FUNC-LABEL: {{^}}unsafe_rsq_rcp_pat_f32:
; SI: v_rsq_f32_e32
-define void @unsafe_rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) #2 {
+define amdgpu_kernel void @unsafe_rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) #2 {
%sqrt = call float @llvm.sqrt.f32(float %src)
%rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
store float %rcp, float addrspace(1)* %out, align 4
@@ -65,7 +83,7 @@ define void @unsafe_rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) #2 {
; SI: v_rcp_f64_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}
; SI-NOT: [[RESULT]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @rcp_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @rcp_f64(double addrspace(1)* %out, double %src) #1 {
%rcp = call double @llvm.amdgcn.rcp.f64(double %src)
store double %rcp, double addrspace(1)* %out, align 8
ret void
@@ -75,7 +93,7 @@ define void @rcp_f64(double addrspace(1)* %out, double %src) #1 {
; SI: v_rcp_f64_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}
; SI-NOT: [[RESULT]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @unsafe_rcp_f64(double addrspace(1)* %out, double %src) #2 {
+define amdgpu_kernel void @unsafe_rcp_f64(double addrspace(1)* %out, double %src) #2 {
%rcp = call double @llvm.amdgcn.rcp.f64(double %src)
store double %rcp, double addrspace(1)* %out, align 8
ret void
@@ -83,7 +101,7 @@ define void @unsafe_rcp_f64(double addrspace(1)* %out, double %src) #2 {
; FUNC-LABEL: {{^}}rcp_pat_f64:
; SI: v_div_scale_f64
-define void @rcp_pat_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @rcp_pat_f64(double addrspace(1)* %out, double %src) #1 {
%rcp = fdiv double 1.0, %src
store double %rcp, double addrspace(1)* %out, align 8
ret void
@@ -93,7 +111,7 @@ define void @rcp_pat_f64(double addrspace(1)* %out, double %src) #1 {
; SI: v_rcp_f64_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}
; SI-NOT: [[RESULT]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @unsafe_rcp_pat_f64(double addrspace(1)* %out, double %src) #2 {
+define amdgpu_kernel void @unsafe_rcp_pat_f64(double addrspace(1)* %out, double %src) #2 {
%rcp = fdiv double 1.0, %src
store double %rcp, double addrspace(1)* %out, align 8
ret void
@@ -103,7 +121,7 @@ define void @unsafe_rcp_pat_f64(double addrspace(1)* %out, double %src) #2 {
; SI-NOT: v_rsq_f64_e32
; SI: v_sqrt_f64
; SI: v_rcp_f64
-define void @safe_rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @safe_rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) #1 {
%sqrt = call double @llvm.sqrt.f64(double %src)
%rcp = call double @llvm.amdgcn.rcp.f64(double %sqrt)
store double %rcp, double addrspace(1)* %out, align 8
@@ -114,7 +132,7 @@ define void @safe_rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) #1 {
; SI: v_rsq_f64_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}
; SI-NOT: [[RESULT]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @unsafe_rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) #2 {
+define amdgpu_kernel void @unsafe_rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) #2 {
%sqrt = call double @llvm.sqrt.f64(double %src)
%rcp = call double @llvm.amdgcn.rcp.f64(double %sqrt)
store double %rcp, double addrspace(1)* %out, align 8
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
index 2569108e7b18..9f5c809455ea 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -4,7 +4,7 @@ declare i32 @llvm.amdgcn.readfirstlane(i32) #0
; CHECK-LABEL: {{^}}test_readfirstlane:
; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
-define void @test_readfirstlane(i32 addrspace(1)* %out, i32 %src) #1 {
+define amdgpu_kernel void @test_readfirstlane(i32 addrspace(1)* %out, i32 %src) #1 {
%readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %src)
store i32 %readfirstlane, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @test_readfirstlane(i32 addrspace(1)* %out, i32 %src) #1 {
; CHECK-LABEL: {{^}}test_readfirstlane_imm:
; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], 32
; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]]
-define void @test_readfirstlane_imm(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm(i32 addrspace(1)* %out) #1 {
%readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 32)
store i32 %readfirstlane, i32 addrspace(1)* %out, align 4
ret void
@@ -25,7 +25,7 @@ define void @test_readfirstlane_imm(i32 addrspace(1)* %out) #1 {
; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]]
; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]]
-define void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 {
%m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"()
%readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %m0)
store i32 %readfirstlane, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
index 436ffff692c6..5e892fad3741 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
@@ -4,7 +4,7 @@ declare i32 @llvm.amdgcn.readlane(i32, i32) #0
; CHECK-LABEL: {{^}}test_readlane_sreg:
; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-define void @test_readlane_sreg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #1 {
+define amdgpu_kernel void @test_readlane_sreg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #1 {
%readlane = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
store i32 %readlane, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @test_readlane_sreg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #1
; CHECK-LABEL: {{^}}test_readlane_imm_sreg:
; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], 32
; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}}
-define void @test_readlane_imm_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
+define amdgpu_kernel void @test_readlane_imm_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
%readlane = call i32 @llvm.amdgcn.readlane(i32 32, i32 %src1)
store i32 %readlane, i32 addrspace(1)* %out, align 4
ret void
@@ -25,7 +25,7 @@ define void @test_readlane_imm_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]]
; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}}
-define void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
+define amdgpu_kernel void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
%m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"()
%readlane = call i32 @llvm.amdgcn.readlane(i32 %m0, i32 %src1)
store i32 %readlane, i32 addrspace(1)* %out, align 4
@@ -34,7 +34,7 @@ define void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
; CHECK-LABEL: {{^}}test_readlane_imm:
; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
-define void @test_readlane_imm(i32 addrspace(1)* %out, i32 %src0) #1 {
+define amdgpu_kernel void @test_readlane_imm(i32 addrspace(1)* %out, i32 %src0) #1 {
%readlane = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 32) #0
store i32 %readlane, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll
index 5f40e0d0986f..3611047f1277 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.clamp.ll
@@ -12,7 +12,7 @@ declare double @llvm.amdgcn.rsq.clamp.f64(double) #1
; VI-DAG: v_min_f32_e32 [[MIN:v[0-9]+]], 0x7f7fffff, [[RSQ]]
; VI: v_max_f32_e32 [[RESULT:v[0-9]+]], 0xff7fffff, [[MIN]]
; VI: buffer_store_dword [[RESULT]]
-define void @rsq_clamp_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rsq_clamp_f32(float addrspace(1)* %out, float %src) #0 {
%rsq_clamp = call float @llvm.amdgcn.rsq.clamp.f32(float %src)
store float %rsq_clamp, float addrspace(1)* %out
ret void
@@ -30,7 +30,7 @@ define void @rsq_clamp_f32(float addrspace(1)* %out, float %src) #0 {
; VI-DAG: v_rsq_f64_e32 [[RSQ:v\[[0-9]+:[0-9]+\]]], s[{{[0-9]+:[0-9]+}}
; VI-DAG: v_min_f64 v[0:1], [[RSQ]], s{{\[}}[[LOW1]]:[[HIGH1]]]
; VI-DAG: v_max_f64 v[0:1], v[0:1], s{{\[}}[[LOW1]]:[[HIGH2]]]
-define void @rsq_clamp_f64(double addrspace(1)* %out, double %src) #0 {
+define amdgpu_kernel void @rsq_clamp_f64(double addrspace(1)* %out, double %src) #0 {
%rsq_clamp = call double @llvm.amdgcn.rsq.clamp.f64(double %src)
store double %rsq_clamp, double addrspace(1)* %out
ret void
@@ -38,7 +38,7 @@ define void @rsq_clamp_f64(double addrspace(1)* %out, double %src) #0 {
; FUNC-LABEL: {{^}}rsq_clamp_undef_f32:
; SI-NOT: v_rsq_clamp_f32
-define void @rsq_clamp_undef_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @rsq_clamp_undef_f32(float addrspace(1)* %out) #0 {
%rsq_clamp = call float @llvm.amdgcn.rsq.clamp.f32(float undef)
store float %rsq_clamp, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll
index 2022d0289862..fd4802140810 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.rsq.f16(half %a)
; VI: v_rsq_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @rsq_f16(
+define amdgpu_kernel void @rsq_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.legacy.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.legacy.ll
index 47bd0d82b834..7f4c2cb19a32 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.legacy.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.legacy.ll
@@ -4,7 +4,7 @@ declare float @llvm.amdgcn.rsq.legacy(float) #0
; FUNC-LABEL: {{^}}rsq_legacy_f32:
; SI: v_rsq_legacy_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @rsq_legacy_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @rsq_legacy_f32(float addrspace(1)* %out, float %src) #1 {
%rsq = call float @llvm.amdgcn.rsq.legacy(float %src) #0
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @rsq_legacy_f32(float addrspace(1)* %out, float %src) #1 {
; TODO: Really these should be constant folded
; FUNC-LABEL: {{^}}rsq_legacy_f32_constant_4.0
; SI: v_rsq_legacy_f32_e32 {{v[0-9]+}}, 4.0
-define void @rsq_legacy_f32_constant_4.0(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_legacy_f32_constant_4.0(float addrspace(1)* %out) #1 {
%rsq = call float @llvm.amdgcn.rsq.legacy(float 4.0) #0
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @rsq_legacy_f32_constant_4.0(float addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}rsq_legacy_f32_constant_100.0
; SI: v_rsq_legacy_f32_e32 {{v[0-9]+}}, 0x42c80000
-define void @rsq_legacy_f32_constant_100.0(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_legacy_f32_constant_100.0(float addrspace(1)* %out) #1 {
%rsq = call float @llvm.amdgcn.rsq.legacy(float 100.0) #0
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -29,7 +29,7 @@ define void @rsq_legacy_f32_constant_100.0(float addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}rsq_legacy_undef_f32:
; SI-NOT: v_rsq_legacy_f32
-define void @rsq_legacy_undef_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_legacy_undef_f32(float addrspace(1)* %out) #1 {
%rsq = call float @llvm.amdgcn.rsq.legacy(float undef)
store float %rsq, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.ll
index c644288977a3..0ce26d0fe876 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.ll
@@ -6,7 +6,7 @@ declare double @llvm.amdgcn.rsq.f64(double) #0
; FUNC-LABEL: {{^}}rsq_f32:
; SI: v_rsq_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @rsq_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @rsq_f32(float addrspace(1)* %out, float %src) #1 {
%rsq = call float @llvm.amdgcn.rsq.f32(float %src) #0
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -15,7 +15,7 @@ define void @rsq_f32(float addrspace(1)* %out, float %src) #1 {
; TODO: Really these should be constant folded
; FUNC-LABEL: {{^}}rsq_f32_constant_4.0
; SI: v_rsq_f32_e32 {{v[0-9]+}}, 4.0
-define void @rsq_f32_constant_4.0(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_f32_constant_4.0(float addrspace(1)* %out) #1 {
%rsq = call float @llvm.amdgcn.rsq.f32(float 4.0) #0
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -23,7 +23,7 @@ define void @rsq_f32_constant_4.0(float addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}rsq_f32_constant_100.0
; SI: v_rsq_f32_e32 {{v[0-9]+}}, 0x42c80000
-define void @rsq_f32_constant_100.0(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_f32_constant_100.0(float addrspace(1)* %out) #1 {
%rsq = call float @llvm.amdgcn.rsq.f32(float 100.0) #0
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -31,7 +31,7 @@ define void @rsq_f32_constant_100.0(float addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}rsq_f64:
; SI: v_rsq_f64_e32 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @rsq_f64(double addrspace(1)* %out, double %src) #1 {
+define amdgpu_kernel void @rsq_f64(double addrspace(1)* %out, double %src) #1 {
%rsq = call double @llvm.amdgcn.rsq.f64(double %src) #0
store double %rsq, double addrspace(1)* %out, align 4
ret void
@@ -40,7 +40,7 @@ define void @rsq_f64(double addrspace(1)* %out, double %src) #1 {
; TODO: Really these should be constant folded
; FUNC-LABEL: {{^}}rsq_f64_constant_4.0
; SI: v_rsq_f64_e32 {{v\[[0-9]+:[0-9]+\]}}, 4.0
-define void @rsq_f64_constant_4.0(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_f64_constant_4.0(double addrspace(1)* %out) #1 {
%rsq = call double @llvm.amdgcn.rsq.f64(double 4.0) #0
store double %rsq, double addrspace(1)* %out, align 4
ret void
@@ -50,7 +50,7 @@ define void @rsq_f64_constant_4.0(double addrspace(1)* %out) #1 {
; SI-DAG: s_mov_b32 s{{[0-9]+}}, 0x40590000
; SI-DAG: s_mov_b32 s{{[0-9]+}}, 0{{$}}
; SI: v_rsq_f64_e32 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
-define void @rsq_f64_constant_100.0(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_f64_constant_100.0(double addrspace(1)* %out) #1 {
%rsq = call double @llvm.amdgcn.rsq.f64(double 100.0) #0
store double %rsq, double addrspace(1)* %out, align 4
ret void
@@ -58,7 +58,7 @@ define void @rsq_f64_constant_100.0(double addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}rsq_undef_f32:
; SI-NOT: v_rsq_f32
-define void @rsq_undef_f32(float addrspace(1)* %out) #1 {
+define amdgpu_kernel void @rsq_undef_f32(float addrspace(1)* %out) #1 {
%rsq = call float @llvm.amdgcn.rsq.f32(float undef)
store float %rsq, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
index 132e476d5e29..5f8ca28ec5f0 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
@@ -1,10 +1,13 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; GCN-LABEL: {{^}}test_barrier:
-; GCN: buffer_store_dword
-; GCN: s_waitcnt
+; GFX8: buffer_store_dword
+; GFX8: s_waitcnt
+; GFX9: flat_store_dword
+; GFX9-NOT: s_waitcnt
; GCN: s_barrier
-define void @test_barrier(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_barrier(i32 addrspace(1)* %out) #0 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tmp
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
index ecd4ac6824cc..b488565c6b3a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll
@@ -9,7 +9,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0
; SI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0xc0,0xc7]
; VI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
-define void @test_s_dcache_inv() #0 {
+define amdgpu_kernel void @test_s_dcache_inv() #0 {
call void @llvm.amdgcn.s.dcache.inv()
ret void
}
@@ -18,7 +18,7 @@ define void @test_s_dcache_inv() #0 {
; GCN-NEXT: ; BB#0:
; GCN: s_dcache_inv
; GCN: s_waitcnt lgkmcnt(0) ; encoding
-define void @test_s_dcache_inv_insert_wait() #0 {
+define amdgpu_kernel void @test_s_dcache_inv_insert_wait() #0 {
call void @llvm.amdgcn.s.dcache.inv()
call void @llvm.amdgcn.s.waitcnt(i32 0)
br label %end
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
index 097f35d42c4f..a3a5c329f411 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll
@@ -9,7 +9,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0
; CI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x40,0xc7]
; VI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00]
; GCN-NEXT: s_endpgm
-define void @test_s_dcache_inv_vol() #0 {
+define amdgpu_kernel void @test_s_dcache_inv_vol() #0 {
call void @llvm.amdgcn.s.dcache.inv.vol()
ret void
}
@@ -18,7 +18,7 @@ define void @test_s_dcache_inv_vol() #0 {
; GCN-NEXT: ; BB#0:
; GCN-NEXT: s_dcache_inv_vol
; GCN: s_waitcnt lgkmcnt(0) ; encoding
-define void @test_s_dcache_inv_vol_insert_wait() #0 {
+define amdgpu_kernel void @test_s_dcache_inv_vol_insert_wait() #0 {
call void @llvm.amdgcn.s.dcache.inv.vol()
call void @llvm.amdgcn.s.waitcnt(i32 0)
br label %end
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
index 9ecce7463f6b..909a85dda3e8 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll
@@ -7,7 +7,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0
; VI-NEXT: ; BB#0:
; VI-NEXT: s_dcache_wb ; encoding: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00]
; VI-NEXT: s_endpgm
-define void @test_s_dcache_wb() #0 {
+define amdgpu_kernel void @test_s_dcache_wb() #0 {
call void @llvm.amdgcn.s.dcache.wb()
ret void
}
@@ -16,7 +16,7 @@ define void @test_s_dcache_wb() #0 {
; VI-NEXT: ; BB#0:
; VI-NEXT: s_dcache_wb
; VI: s_waitcnt lgkmcnt(0) ; encoding
-define void @test_s_dcache_wb_insert_wait() #0 {
+define amdgpu_kernel void @test_s_dcache_wb_insert_wait() #0 {
call void @llvm.amdgcn.s.dcache.wb()
call void @llvm.amdgcn.s.waitcnt(i32 0)
br label %end
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
index 943f8c67a2e3..217bf97c41a4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll
@@ -7,7 +7,7 @@ declare void @llvm.amdgcn.s.waitcnt(i32) #0
; VI-NEXT: ; BB#0:
; VI-NEXT: s_dcache_wb_vol ; encoding: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00]
; VI-NEXT: s_endpgm
-define void @test_s_dcache_wb_vol() #0 {
+define amdgpu_kernel void @test_s_dcache_wb_vol() #0 {
call void @llvm.amdgcn.s.dcache.wb.vol()
ret void
}
@@ -16,7 +16,7 @@ define void @test_s_dcache_wb_vol() #0 {
; VI-NEXT: ; BB#0:
; VI-NEXT: s_dcache_wb_vol
; VI: s_waitcnt lgkmcnt(0) ; encoding
-define void @test_s_dcache_wb_vol_insert_wait() #0 {
+define amdgpu_kernel void @test_s_dcache_wb_vol_insert_wait() #0 {
call void @llvm.amdgcn.s.dcache.wb.vol()
call void @llvm.amdgcn.s.waitcnt(i32 0)
br label %end
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.decperflevel.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.decperflevel.ll
index 72513fc86f49..8f64c50b9c60 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.decperflevel.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.decperflevel.ll
@@ -20,7 +20,7 @@ declare void @llvm.amdgcn.s.decperflevel(i32) #0
; GCN: s_decperflevel 13{{$}}
; GCN: s_decperflevel 14{{$}}
; GCN: s_decperflevel 15{{$}}
-define void @test_s_decperflevel(i32 %x) #0 {
+define amdgpu_kernel void @test_s_decperflevel(i32 %x) #0 {
call void @llvm.amdgcn.s.decperflevel(i32 0)
call void @llvm.amdgcn.s.decperflevel(i32 1)
call void @llvm.amdgcn.s.decperflevel(i32 2)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.getreg.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.getreg.ll
index 4304398182a6..906a8a3e05f4 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.getreg.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.getreg.ll
@@ -4,7 +4,7 @@
; GCN-LABEL: {{^}}s_getreg_test:
; GCN: s_getreg_b32 s{{[0-9]+}}, hwreg(HW_REG_LDS_ALLOC, 8, 23)
-define void @s_getreg_test(i32 addrspace(1)* %out) { ; simm16=45574 for lds size.
+define amdgpu_kernel void @s_getreg_test(i32 addrspace(1)* %out) { ; simm16=45574 for lds size.
%lds_size_64dwords = call i32 @llvm.amdgcn.s.getreg(i32 45574)
%lds_size_bytes = shl i32 %lds_size_64dwords, 8
store i32 %lds_size_bytes, i32 addrspace(1)* %out
@@ -14,7 +14,7 @@ define void @s_getreg_test(i32 addrspace(1)* %out) { ; simm16=45574 for lds size
; Call site has additional readnone knowledge.
; GCN-LABEL: {{^}}readnone_s_getreg_test:
; GCN: s_getreg_b32 s{{[0-9]+}}, hwreg(HW_REG_LDS_ALLOC, 8, 23)
-define void @readnone_s_getreg_test(i32 addrspace(1)* %out) { ; simm16=45574 for lds size.
+define amdgpu_kernel void @readnone_s_getreg_test(i32 addrspace(1)* %out) { ; simm16=45574 for lds size.
%lds_size_64dwords = call i32 @llvm.amdgcn.s.getreg(i32 45574) #1
%lds_size_bytes = shl i32 %lds_size_64dwords, 8
store i32 %lds_size_bytes, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.incperflevel.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.incperflevel.ll
index 2ae4fc473eaa..49e6e4257906 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.incperflevel.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.incperflevel.ll
@@ -20,7 +20,7 @@ declare void @llvm.amdgcn.s.incperflevel(i32) #0
; GCN: s_incperflevel 13{{$}}
; GCN: s_incperflevel 14{{$}}
; GCN: s_incperflevel 15{{$}}
-define void @test_s_incperflevel(i32 %x) #0 {
+define amdgpu_kernel void @test_s_incperflevel(i32 %x) #0 {
call void @llvm.amdgcn.s.incperflevel(i32 0)
call void @llvm.amdgcn.s.incperflevel(i32 1)
call void @llvm.amdgcn.s.incperflevel(i32 2)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.memrealtime.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.memrealtime.ll
index d8eda10fdfd8..66041037168a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.memrealtime.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.memrealtime.ll
@@ -10,7 +10,7 @@ declare i64 @llvm.amdgcn.s.memrealtime() #0
; GCN-NOT: lgkmcnt
; GCN: s_memrealtime s{{\[[0-9]+:[0-9]+\]}}
; GCN: _store_dwordx2
-define void @test_s_memrealtime(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_s_memrealtime(i64 addrspace(1)* %out) #0 {
%cycle0 = call i64 @llvm.amdgcn.s.memrealtime()
store volatile i64 %cycle0, i64 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.memtime.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.memtime.ll
index ff9d74619788..6aef769bafad 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.memtime.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.memtime.ll
@@ -11,7 +11,7 @@ declare i64 @llvm.amdgcn.s.memtime() #0
; GCN-NOT: lgkmcnt
; GCN: s_memtime s{{\[[0-9]+:[0-9]+\]}}
; GCN: buffer_store_dwordx2
-define void @test_s_memtime(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_s_memtime(i64 addrspace(1)* %out) #0 {
%cycle0 = call i64 @llvm.amdgcn.s.memtime()
store volatile i64 %cycle0, i64 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.sleep.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.sleep.ll
index 870aa48a3417..59c910c71c5a 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.sleep.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.sleep.ll
@@ -20,7 +20,7 @@ declare void @llvm.amdgcn.s.sleep(i32) #0
; GCN: s_sleep 13{{$}}
; GCN: s_sleep 14{{$}}
; GCN: s_sleep 15{{$}}
-define void @test_s_sleep(i32 %x) #0 {
+define amdgpu_kernel void @test_s_sleep(i32 %x) #0 {
call void @llvm.amdgcn.s.sleep(i32 0)
call void @llvm.amdgcn.s.sleep(i32 1)
call void @llvm.amdgcn.s.sleep(i32 2)
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sad.hi.u8.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sad.hi.u8.ll
index 3aaed9d53772..2a3705de2b44 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sad.hi.u8.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sad.hi.u8.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.sad.hi.u8(i32, i32, i32) #0
; GCN-LABEL: {{^}}v_sad_hi_u8:
; GCN: v_sad_hi_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_hi_u8(i32 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_sad_hi_u8(i32 addrspace(1)* %out, i32 %src) {
%result= call i32 @llvm.amdgcn.sad.hi.u8(i32 %src, i32 100, i32 100) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_sad_hi_u8(i32 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_sad_hi_u8_non_immediate:
; GCN: v_sad_hi_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_hi_u8_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
+define amdgpu_kernel void @v_sad_hi_u8_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
%result= call i32 @llvm.amdgcn.sad.hi.u8(i32 %src, i32 %a, i32 %b) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u16.ll
index 5438571c5821..c404531513e7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u16.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.sad.u16(i32, i32, i32) #0
; GCN-LABEL: {{^}}v_sad_u16:
; GCN: v_sad_u16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u16(i32 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_sad_u16(i32 addrspace(1)* %out, i32 %src) {
%result= call i32 @llvm.amdgcn.sad.u16(i32 %src, i32 100, i32 100) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_sad_u16(i32 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_sad_u16_non_immediate:
; GCN: v_sad_u16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u16_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
+define amdgpu_kernel void @v_sad_u16_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
%result= call i32 @llvm.amdgcn.sad.u16(i32 %src, i32 %a, i32 %b) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u8.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u8.ll
index 9422d7620ca6..1ee876aa724e 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u8.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sad.u8.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.sad.u8(i32, i32, i32) #0
; GCN-LABEL: {{^}}v_sad_u8:
; GCN: v_sad_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u8(i32 addrspace(1)* %out, i32 %src) {
+define amdgpu_kernel void @v_sad_u8(i32 addrspace(1)* %out, i32 %src) {
%result= call i32 @llvm.amdgcn.sad.u8(i32 %src, i32 100, i32 100) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_sad_u8(i32 addrspace(1)* %out, i32 %src) {
; GCN-LABEL: {{^}}v_sad_u8_non_immediate:
; GCN: v_sad_u8 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u8_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
+define amdgpu_kernel void @v_sad_u8_non_immediate(i32 addrspace(1)* %out, i32 %src, i32 %a, i32 %b) {
%result= call i32 @llvm.amdgcn.sad.u8(i32 %src, i32 %a, i32 %b) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll
new file mode 100644
index 000000000000..593c95856811
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll
@@ -0,0 +1,556 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}bfe_i32_arg_arg_arg:
+; GCN: v_bfe_i32
+define amdgpu_kernel void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 %src1, i32 %src1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_arg_arg_imm:
+; GCN: v_bfe_i32
+define amdgpu_kernel void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 %src1, i32 123)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_arg_imm_arg:
+; GCN: v_bfe_i32
+define amdgpu_kernel void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 123, i32 %src2)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_imm_arg_arg:
+; GCN: v_bfe_i32
+define amdgpu_kernel void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 123, i32 %src1, i32 %src2)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_bfe_print_arg:
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 2, 8
+define amdgpu_kernel void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) #0 {
+ %load = load i32, i32 addrspace(1)* %src0, align 4
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 %load, i32 2, i32 8)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_arg_0_width_reg_offset:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 %src1, i32 0)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_arg_0_width_imm_offset:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.sbfe.i32(i32 %src0, i32 8, i32 0)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_6:
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shl, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_7:
+; GCN-NOT: shl
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shl, i32 0, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_8:
+; GCN: buffer_load_dword
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_9:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %x, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_10:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %x, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_11:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %x, i32 8, i32 24)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_12:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %x, i32 24, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_13:
+; GCN: v_ashrrev_i32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = ashr i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_test_14:
+; GCN-NOT: lshr
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = lshr i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_0:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 0, i32 0, i32 0)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_1:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 12334, i32 0, i32 0)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_2:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 0, i32 0, i32 1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_3:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 1, i32 0, i32 1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_4:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 4294967295, i32 0, i32 1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_5:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 128, i32 7, i32 1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_6:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0xffffff80
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 128, i32 0, i32 8)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_7:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 127, i32 0, i32 8)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_8:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 127, i32 6, i32 8)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_9:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 65536, i32 16, i32 8)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_10:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 65535, i32 16, i32 16)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_11:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], -6
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 160, i32 4, i32 4)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_12:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 160, i32 31, i32 1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_13:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 131070, i32 16, i32 16)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_14:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 40
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 160, i32 2, i32 30)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_15:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 10
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 160, i32 4, i32 28)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_16:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 4294967295, i32 1, i32 7)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_17:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 255, i32 1, i32 31)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_i32_constant_fold_test_18:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) #0 {
+ %bfe_i32 = call i32 @llvm.amdgcn.sbfe.i32(i32 255, i32 31, i32 1)
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_sext_in_reg_i24:
+; GCN: buffer_load_dword [[LOAD:v[0-9]+]],
+; GCN-NOT: v_lshl
+; GCN-NOT: v_ashr
+; GCN: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 0, 24
+; GCN: buffer_store_dword [[BFE]],
+define amdgpu_kernel void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %x, i32 0, i32 24)
+ %shl = shl i32 %bfe, 8
+ %ashr = ashr i32 %shl, 8
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: @simplify_demanded_bfe_sdiv
+; GCN: buffer_load_dword [[LOAD:v[0-9]+]]
+; GCN: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 1, 16
+; GCN: v_lshrrev_b32_e32 [[TMP0:v[0-9]+]], 31, [[BFE]]
+; GCN: v_add_i32_e32 [[TMP1:v[0-9]+]], vcc, [[TMP0]], [[BFE]]
+; GCN: v_ashrrev_i32_e32 [[TMP2:v[0-9]+]], 1, [[TMP1]]
+; GCN: buffer_store_dword [[TMP2]]
+define amdgpu_kernel void @simplify_demanded_bfe_sdiv(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %src = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16)
+ %div = sdiv i32 %bfe, 2
+ store i32 %div, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_0_width:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_0_width(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+ %load = load i32, i32 addrspace(1)* %ptr, align 4
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %load, i32 8, i32 0)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_8_bfe_8:
+; GCN: v_bfe_i32
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_8_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+ %load = load i32, i32 addrspace(1)* %ptr, align 4
+ %bfe0 = call i32 @llvm.amdgcn.sbfe.i32(i32 %load, i32 0, i32 8)
+ %bfe1 = call i32 @llvm.amdgcn.sbfe.i32(i32 %bfe0, i32 0, i32 8)
+ store i32 %bfe1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_8_bfe_16:
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_8_bfe_16(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+ %load = load i32, i32 addrspace(1)* %ptr, align 4
+ %bfe0 = call i32 @llvm.amdgcn.sbfe.i32(i32 %load, i32 0, i32 8)
+ %bfe1 = call i32 @llvm.amdgcn.sbfe.i32(i32 %bfe0, i32 0, i32 16)
+ store i32 %bfe1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; This really should be folded into 1
+; GCN-LABEL: {{^}}bfe_16_bfe_8:
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_16_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
+ %load = load i32, i32 addrspace(1)* %ptr, align 4
+ %bfe0 = call i32 @llvm.amdgcn.sbfe.i32(i32 %load, i32 0, i32 16)
+ %bfe1 = call i32 @llvm.amdgcn.sbfe.i32(i32 %bfe0, i32 0, i32 8)
+ store i32 %bfe1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Make sure there isn't a redundant BFE
+; GCN-LABEL: {{^}}sext_in_reg_i8_to_i32_bfe:
+; GCN: s_sext_i32_i8 s{{[0-9]+}}, s{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @sext_in_reg_i8_to_i32_bfe(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %c, i32 0, i32 8)
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sext_in_reg_i8_to_i32_bfe_wrong:
+define amdgpu_kernel void @sext_in_reg_i8_to_i32_bfe_wrong(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %c, i32 8, i32 0)
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sextload_i8_to_i32_bfe:
+; GCN: buffer_load_sbyte
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @sextload_i8_to_i32_bfe(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) #0 {
+ %load = load i8, i8 addrspace(1)* %ptr, align 1
+ %sext = sext i8 %load to i32
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %sext, i32 0, i32 8)
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN: .text
+; GCN-LABEL: {{^}}sextload_i8_to_i32_bfe_0:{{.*$}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @sextload_i8_to_i32_bfe_0(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) #0 {
+ %load = load i8, i8 addrspace(1)* %ptr, align 1
+ %sext = sext i8 %load to i32
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %sext, i32 8, i32 0)
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sext_in_reg_i1_bfe_offset_0:
+; GCN-NOT: shr
+; GCN-NOT: shl
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
+; GCN: s_endpgm
+define amdgpu_kernel void @sext_in_reg_i1_bfe_offset_0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %shr = ashr i32 %shl, 31
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shr, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sext_in_reg_i1_bfe_offset_1:
+; GCN: buffer_load_dword
+; GCN-NOT: shl
+; GCN-NOT: shr
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 1
+; GCN: s_endpgm
+define amdgpu_kernel void @sext_in_reg_i1_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 30
+ %shr = ashr i32 %shl, 30
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shr, i32 1, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sext_in_reg_i2_bfe_offset_1:
+; GCN: buffer_load_dword
+; GCN-NOT: v_lshl
+; GCN-NOT: v_ashr
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 2
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 2
+; GCN: s_endpgm
+define amdgpu_kernel void @sext_in_reg_i2_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 30
+ %shr = ashr i32 %shl, 30
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %shr, i32 1, i32 2)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.sbfe.i32(i32, i32, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll
new file mode 100644
index 000000000000..94aeb077ebef
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sendmsg.ll
@@ -0,0 +1,127 @@
+;RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+;RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}test_interrupt:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsg sendmsg(MSG_INTERRUPT)
+define amdgpu_kernel void @test_interrupt() {
+body:
+ call void @llvm.amdgcn.s.sendmsg(i32 1, i32 0);
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_emit:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT, 0)
+define amdgpu_kernel void @test_gs_emit() {
+body:
+ call void @llvm.amdgcn.s.sendmsg(i32 34, i32 0);
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_cut:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsg sendmsg(MSG_GS, GS_OP_CUT, 1)
+define amdgpu_kernel void @test_gs_cut() {
+body:
+ call void @llvm.amdgcn.s.sendmsg(i32 274, i32 0);
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_emit_cut:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsg sendmsg(MSG_GS, GS_OP_EMIT_CUT, 2)
+define amdgpu_kernel void @test_gs_emit_cut() {
+body:
+ call void @llvm.amdgcn.s.sendmsg(i32 562, i32 0)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_done:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+define amdgpu_kernel void @test_gs_done() {
+body:
+ call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
+ ret void
+}
+
+; GCN-LABEL: {{^}}sendmsg:
+; GCN: s_mov_b32 m0, s0
+; VI-NEXT: s_nop 0
+; GCN-NEXT: sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GCN-NEXT: s_endpgm
+define amdgpu_gs void @sendmsg(i32 inreg %a) #0 {
+ call void @llvm.amdgcn.s.sendmsg(i32 3, i32 %a)
+ ret void
+}
+
+; GCN-LABEL: {{^}}sendmsghalt:
+; GCN: s_mov_b32 m0, s0
+; VI-NEXT: s_nop 0
+; GCN-NEXT: s_sendmsghalt sendmsg(MSG_INTERRUPT)
+; GCN-NEXT: s_endpgm
+define amdgpu_kernel void @sendmsghalt(i32 inreg %a) #0 {
+ call void @llvm.amdgcn.s.sendmsghalt(i32 1, i32 %a)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_interrupt_halt:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsghalt sendmsg(MSG_INTERRUPT)
+define amdgpu_kernel void @test_interrupt_halt() {
+body:
+ call void @llvm.amdgcn.s.sendmsghalt(i32 1, i32 0)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_emit_halt:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsghalt sendmsg(MSG_GS, GS_OP_EMIT, 0)
+define amdgpu_kernel void @test_gs_emit_halt() {
+body:
+ call void @llvm.amdgcn.s.sendmsghalt(i32 34, i32 0)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_cut_halt:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsghalt sendmsg(MSG_GS, GS_OP_CUT, 1)
+define amdgpu_kernel void @test_gs_cut_halt() {
+body:
+ call void @llvm.amdgcn.s.sendmsghalt(i32 274, i32 0)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_emit_cut_halt:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsghalt sendmsg(MSG_GS, GS_OP_EMIT_CUT, 2)
+define amdgpu_kernel void @test_gs_emit_cut_halt() {
+body:
+ call void @llvm.amdgcn.s.sendmsghalt(i32 562, i32 0)
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_gs_done_halt:
+; GCN: s_mov_b32 m0, 0
+; GCN-NOT: s_mov_b32 m0
+; GCN: s_sendmsghalt sendmsg(MSG_GS_DONE, GS_OP_NOP)
+define amdgpu_kernel void @test_gs_done_halt() {
+body:
+ call void @llvm.amdgcn.s.sendmsghalt(i32 3, i32 0)
+ ret void
+}
+
+declare void @llvm.amdgcn.s.sendmsg(i32, i32) #0
+declare void @llvm.amdgcn.s.sendmsghalt(i32, i32) #0
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll
index d453d03cded8..495e36b09f8f 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sffbh.ll
@@ -2,14 +2,13 @@
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
declare i32 @llvm.amdgcn.sffbh.i32(i32) #1
-declare i32 @llvm.AMDGPU.flbit.i32(i32) #1
; FUNC-LABEL: {{^}}s_flbit:
; GCN: s_load_dword [[VAL:s[0-9]+]],
; GCN: s_flbit_i32 [[SRESULT:s[0-9]+]], [[VAL]]
; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; GCN: buffer_store_dword [[VRESULT]],
-define void @s_flbit(i32 addrspace(1)* noalias %out, i32 %val) #0 {
+define amdgpu_kernel void @s_flbit(i32 addrspace(1)* noalias %out, i32 %val) #0 {
%r = call i32 @llvm.amdgcn.sffbh.i32(i32 %val)
store i32 %r, i32 addrspace(1)* %out, align 4
ret void
@@ -19,36 +18,12 @@ define void @s_flbit(i32 addrspace(1)* noalias %out, i32 %val) #0 {
; GCN: buffer_load_dword [[VAL:v[0-9]+]],
; GCN: v_ffbh_i32_e32 [[RESULT:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[RESULT]],
-define void @v_flbit(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 {
+define amdgpu_kernel void @v_flbit(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 {
%val = load i32, i32 addrspace(1)* %valptr, align 4
%r = call i32 @llvm.amdgcn.sffbh.i32(i32 %val)
store i32 %r, i32 addrspace(1)* %out, align 4
ret void
}
-; FUNC-LABEL: {{^}}legacy_s_flbit:
-; GCN: s_load_dword [[VAL:s[0-9]+]],
-; GCN: s_flbit_i32 [[SRESULT:s[0-9]+]], [[VAL]]
-; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
-; GCN: buffer_store_dword [[VRESULT]],
-; GCN: s_endpgm
-define void @legacy_s_flbit(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
- %r = call i32 @llvm.AMDGPU.flbit.i32(i32 %val) nounwind readnone
- store i32 %r, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}legacy_v_flbit:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN: v_ffbh_i32_e32 [[RESULT:v[0-9]+]], [[VAL]]
-; GCN: buffer_store_dword [[RESULT]],
-; GCN: s_endpgm
-define void @legacy_v_flbit(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
- %val = load i32, i32 addrspace(1)* %valptr, align 4
- %r = call i32 @llvm.AMDGPU.flbit.i32(i32 %val) nounwind readnone
- store i32 %r, i32 addrspace(1)* %out, align 4
- ret void
-}
-
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll
index fac0e352614c..4b930bfa210c 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll
@@ -7,7 +7,7 @@ declare half @llvm.amdgcn.sin.f16(half %a)
; VI: v_sin_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @sin_f16(
+define amdgpu_kernel void @sin_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.sin.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.sin.ll
index e3692fc5906c..0b7064da23f9 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.sin.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.sin.ll
@@ -5,7 +5,7 @@ declare float @llvm.amdgcn.sin.f32(float) #0
; GCN-LABEL: {{^}}v_sin_f32:
; GCN: v_sin_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
-define void @v_sin_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @v_sin_f32(float addrspace(1)* %out, float %src) #1 {
%sin = call float @llvm.amdgcn.sin.f32(float %src) #0
store float %sin, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll
index caac6ddbeb80..e0cec2134e70 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.trig.preop.ll
@@ -9,7 +9,7 @@ declare double @llvm.amdgcn.trig.preop.f64(double, i32) nounwind readnone
; SI: v_trig_preop_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], [[SEG]]
; SI: buffer_store_dwordx2 [[RESULT]],
; SI: s_endpgm
-define void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load double, double addrspace(1)* %aptr, align 8
%b = load i32, i32 addrspace(1)* %bptr, align 4
%result = call double @llvm.amdgcn.trig.preop.f64(double %a, i32 %b) nounwind readnone
@@ -22,7 +22,7 @@ define void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)*
; SI: v_trig_preop_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], 7
; SI: buffer_store_dwordx2 [[RESULT]],
; SI: s_endpgm
-define void @test_trig_preop_f64_imm_segment(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
+define amdgpu_kernel void @test_trig_preop_f64_imm_segment(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
%a = load double, double addrspace(1)* %aptr, align 8
%result = call double @llvm.amdgcn.trig.preop.f64(double %a, i32 7) nounwind readnone
store double %result, double addrspace(1)* %out, align 8
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
new file mode 100644
index 000000000000..92e3a1099da0
--- /dev/null
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
@@ -0,0 +1,623 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}bfe_u32_arg_arg_arg:
+; GCN: v_bfe_u32
+define amdgpu_kernel void @bfe_u32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 %src1, i32 %src1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_arg_arg_imm:
+; GCN: v_bfe_u32
+define amdgpu_kernel void @bfe_u32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 %src1, i32 123)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_arg_imm_arg:
+; GCN: v_bfe_u32
+define amdgpu_kernel void @bfe_u32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 123, i32 %src2)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_imm_arg_arg:
+; GCN: v_bfe_u32
+define amdgpu_kernel void @bfe_u32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 123, i32 %src1, i32 %src2)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_arg_0_width_reg_offset:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 %src1, i32 0)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_arg_0_width_imm_offset:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 %src0, i32 8, i32 0)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zextload_i8:
+; GCN: buffer_load_ubyte
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zextload_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+ %load = load i8, i8 addrspace(1)* %in
+ %ext = zext i8 %load to i32
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 0, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zext_in_reg_i8:
+; GCN: buffer_load_dword
+; GCN: v_add_i32
+; GCN-NEXT: v_and_b32_e32
+; FIXME: Should be using s_add_i32
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zext_in_reg_i8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %load = load i32, i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 0, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zext_in_reg_i16:
+; GCN: buffer_load_dword
+; GCN: v_add_i32
+; GCN-NEXT: v_and_b32_e32
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zext_in_reg_i16(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %load = load i32, i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 65535
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 0, i32 16)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_1:
+; GCN: buffer_load_dword
+; GCN: v_add_i32
+; GCN: bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zext_in_reg_i8_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %load = load i32, i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 1, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_3:
+; GCN: buffer_load_dword
+; GCN: v_add_i32
+; GCN-NEXT: v_and_b32_e32 {{v[0-9]+}}, 0xf8
+; GCN-NEXT: bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zext_in_reg_i8_offset_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %load = load i32, i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 3, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zext_in_reg_i8_offset_7:
+; GCN: buffer_load_dword
+; GCN: v_add_i32
+; GCN-NEXT: v_and_b32_e32 {{v[0-9]+}}, 0x80
+; GCN-NEXT: bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zext_in_reg_i8_offset_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %load = load i32, i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 7, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_zext_in_reg_i16_offset_8:
+; GCN: buffer_load_dword
+; GCN: v_add_i32
+; GCN-NEXT: bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_zext_in_reg_i16_offset_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %load = load i32, i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 65535
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %ext, i32 8, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_1:
+; GCN: buffer_load_dword
+; GCN: v_and_b32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %x, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define amdgpu_kernel void @bfe_u32_test_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 0, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define amdgpu_kernel void @bfe_u32_test_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_4:
+; GCN-NOT: lshl
+; GCN-NOT: shr
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %shr = lshr i32 %shl, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shr, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_5:
+; GCN: buffer_load_dword
+; GCN-NOT: lshl
+; GCN-NOT: shr
+; GCN: v_bfe_i32 {{v[0-9]+}}, {{v[0-9]+}}, 0, 1
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_5(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %shr = ashr i32 %shl, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shr, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_6:
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_7:
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 0, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_8:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_and_b32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_9:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %x, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_10:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %x, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_11:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %x, i32 8, i32 24)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_12:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %x, i32 24, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_13:
+; V_ASHRREV_U32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = ashr i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_test_14:
+; GCN-NOT: lshr
+; GCN-NOT: {{[^@]}}bfe
+; GCN: s_endpgm
+define amdgpu_kernel void @bfe_u32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %x = load i32, i32 addrspace(1)* %in, align 4
+ %shl = lshr i32 %x, 31
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_0:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_0(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 0, i32 0, i32 0)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_1:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_1(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 12334, i32 0, i32 0)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_2:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_2(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 0, i32 0, i32 1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_3:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_3(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 1, i32 0, i32 1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_4:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], -1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_4(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 4294967295, i32 0, i32 1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_5:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_5(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 128, i32 7, i32 1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_6:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x80
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_6(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 128, i32 0, i32 8)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_7:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_7(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 127, i32 0, i32 8)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_8:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_8(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 127, i32 6, i32 8)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_9:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_9(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 65536, i32 16, i32 8)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_10:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_10(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 65535, i32 16, i32 16)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_11:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 10
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_11(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 160, i32 4, i32 4)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_12:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_12(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 160, i32 31, i32 1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_13:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 1
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_13(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 131070, i32 16, i32 16)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_14:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 40
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_14(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 160, i32 2, i32 30)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_15:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 10
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_15(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 160, i32 4, i32 28)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_16:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_16(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 4294967295, i32 1, i32 7)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_17:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0x7f
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_17(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 255, i32 1, i32 31)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}bfe_u32_constant_fold_test_18:
+; GCN-NOT: {{[^@]}}bfe
+; GCN: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
+; GCN: buffer_store_dword [[VREG]],
+; GCN: s_endpgm
+; EG-NOT: BFE
+define amdgpu_kernel void @bfe_u32_constant_fold_test_18(i32 addrspace(1)* %out) #0 {
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 255, i32 31, i32 1)
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Make sure that SimplifyDemandedBits doesn't cause the and to be
+; reduced to the bits demanded by the bfe.
+
+; XXX: The operand to v_bfe_u32 could also just directly be the load register.
+; GCN-LABEL: {{^}}simplify_bfe_u32_multi_use_arg:
+; GCN: buffer_load_dword [[ARG:v[0-9]+]]
+; GCN: v_and_b32_e32 [[AND:v[0-9]+]], 63, [[ARG]]
+; GCN: v_bfe_u32 [[BFE:v[0-9]+]], [[AND]], 2, 2
+; GCN-DAG: buffer_store_dword [[AND]]
+; GCN-DAG: buffer_store_dword [[BFE]]
+; GCN: s_endpgm
+define amdgpu_kernel void @simplify_bfe_u32_multi_use_arg(i32 addrspace(1)* %out0,
+ i32 addrspace(1)* %out1,
+ i32 addrspace(1)* %in) #0 {
+ %src = load i32, i32 addrspace(1)* %in, align 4
+ %and = and i32 %src, 63
+ %bfe_u32 = call i32 @llvm.amdgcn.ubfe.i32(i32 %and, i32 2, i32 2)
+ store i32 %bfe_u32, i32 addrspace(1)* %out0, align 4
+ store i32 %and, i32 addrspace(1)* %out1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}lshr_and:
+; GCN: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006
+; GCN: buffer_store_dword
+define amdgpu_kernel void @lshr_and(i32 addrspace(1)* %out, i32 %a) #0 {
+ %b = lshr i32 %a, 6
+ %c = and i32 %b, 7
+ store i32 %c, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_lshr_and:
+; GCN: v_bfe_u32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}, 3
+; GCN: buffer_store_dword
+define amdgpu_kernel void @v_lshr_and(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ %c = lshr i32 %a, %b
+ %d = and i32 %c, 7
+ store i32 %d, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}and_lshr:
+; GCN: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006
+; GCN: buffer_store_dword
+define amdgpu_kernel void @and_lshr(i32 addrspace(1)* %out, i32 %a) #0 {
+ %b = and i32 %a, 448
+ %c = lshr i32 %b, 6
+ store i32 %c, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}and_lshr2:
+; GCN: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006
+; GCN: buffer_store_dword
+define amdgpu_kernel void @and_lshr2(i32 addrspace(1)* %out, i32 %a) #0 {
+ %b = and i32 %a, 511
+ %c = lshr i32 %b, 6
+ store i32 %c, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_lshr:
+; GCN: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x150002
+; GCN: buffer_store_dword
+define amdgpu_kernel void @shl_lshr(i32 addrspace(1)* %out, i32 %a) #0 {
+ %b = shl i32 %a, 9
+ %c = lshr i32 %b, 11
+ store i32 %c, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+declare i32 @llvm.amdgcn.ubfe.i32(i32, i32, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.wave.barrier.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.wave.barrier.ll
index e85179755371..e305f8eff587 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.wave.barrier.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.wave.barrier.ll
@@ -4,7 +4,7 @@
; GCN-DAG: ; wave barrier
; GCN-NOT: s_barrier
-define void @test_wave_barrier() #0 {
+define amdgpu_kernel void @test_wave_barrier() #0 {
entry:
call void @llvm.amdgcn.wave.barrier() #1
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.workgroup.id.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.workgroup.id.ll
index 58529b874442..349e7f0f0e8d 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.workgroup.id.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.workgroup.id.ll
@@ -34,7 +34,7 @@ declare i32 @llvm.amdgcn.workgroup.id.z() #0
; ALL: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
; ALL: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
; ALL: COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: 0
-define void @test_workgroup_id_x(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_workgroup_id_x(i32 addrspace(1)* %out) #1 {
%id = call i32 @llvm.amdgcn.workgroup.id.x()
store i32 %id, i32 addrspace(1)* %out
ret void
@@ -61,7 +61,7 @@ define void @test_workgroup_id_x(i32 addrspace(1)* %out) #1 {
; ALL: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1
; ALL: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0
; ALL: COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: 0
-define void @test_workgroup_id_y(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_workgroup_id_y(i32 addrspace(1)* %out) #1 {
%id = call i32 @llvm.amdgcn.workgroup.id.y()
store i32 %id, i32 addrspace(1)* %out
ret void
@@ -96,7 +96,7 @@ define void @test_workgroup_id_y(i32 addrspace(1)* %out) #1 {
; ALL: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0
; ALL: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1
; ALL: COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: 0
-define void @test_workgroup_id_z(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_workgroup_id_z(i32 addrspace(1)* %out) #1 {
%id = call i32 @llvm.amdgcn.workgroup.id.z()
store i32 %id, i32 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.workitem.id.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.workitem.id.ll
index 1f18173f40a4..8b80998cab6f 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.workitem.id.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.workitem.id.ll
@@ -18,7 +18,7 @@ declare i32 @llvm.amdgcn.workitem.id.z() #0
; ALL-NOT: v0
; ALL: {{buffer|flat}}_store_dword {{.*}}v0
-define void @test_workitem_id_x(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_workitem_id_x(i32 addrspace(1)* %out) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
store i32 %id, i32 addrspace(1)* %out
ret void
@@ -33,7 +33,7 @@ define void @test_workitem_id_x(i32 addrspace(1)* %out) #1 {
; ALL-NOT: v1
; ALL: {{buffer|flat}}_store_dword {{.*}}v1
-define void @test_workitem_id_y(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_workitem_id_y(i32 addrspace(1)* %out) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.y()
store i32 %id, i32 addrspace(1)* %out
ret void
@@ -48,7 +48,7 @@ define void @test_workitem_id_y(i32 addrspace(1)* %out) #1 {
; ALL-NOT: v2
; ALL: {{buffer|flat}}_store_dword {{.*}}v2
-define void @test_workitem_id_z(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_workitem_id_z(i32 addrspace(1)* %out) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.z()
store i32 %id, i32 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.ceil.f16.ll b/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
index 112e29ed22a7..0604a49372a2 100644
--- a/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
@@ -4,7 +4,7 @@
declare half @llvm.ceil.f16(half %a)
declare <2 x half> @llvm.ceil.v2f16(<2 x half> %a)
-; GCN-LABEL: {{^}}ceil_f16
+; GCN-LABEL: {{^}}ceil_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_ceil_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
@@ -12,7 +12,7 @@ declare <2 x half> @llvm.ceil.v2f16(<2 x half> %a)
; VI: v_ceil_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @ceil_f16(
+define amdgpu_kernel void @ceil_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -22,23 +22,27 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}ceil_v2f16
+; GCN-LABEL: {{^}}ceil_v2f16:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_ceil_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_ceil_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_ceil_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_ceil_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_ceil_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_ceil_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @ceil_v2f16(
+define amdgpu_kernel void @ceil_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.cos.f16.ll b/test/CodeGen/AMDGPU/llvm.cos.f16.ll
index ba354ed0b124..d836ea36ef63 100644
--- a/test/CodeGen/AMDGPU/llvm.cos.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.cos.f16.ll
@@ -13,7 +13,7 @@ declare <2 x half> @llvm.cos.v2f16(<2 x half> %a)
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @cos_f16(
+define amdgpu_kernel void @cos_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -25,26 +25,34 @@ entry:
; GCN-LABEL: {{^}}cos_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; SI: v_mov_b32_e32 v[[HALF_PIE:[0-9]+]], 0x3e22f983{{$}}
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
-; VI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], 0.15915494, v[[A_F32_0]]
-; GCN: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
-; SI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
-; VI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], 0.15915494, v[[A_F32_1]]
-; GCN: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
-; GCN: v_cos_f32_e32 v[[R_F32_0:[0-9]+]], v[[F_F32_0]]
-; GCN: v_cos_f32_e32 v[[R_F32_1:[0-9]+]], v[[F_F32_1]]
-; GCN: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; GCN: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_mov_b32_e32 v[[HALF_PIE:[0-9]+]], 0x3e22f983{{$}}
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
+
+; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], 0.15915494, v[[A_F32_0]]
+; VI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], 0.15915494, v[[A_F32_1]]
+
+; GCN-DAG: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
+; GCN-DAG: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
+; GCN-DAG: v_cos_f32_e32 v[[R_F32_0:[0-9]+]], v[[F_F32_0]]
+; GCN-DAG: v_cos_f32_e32 v[[R_F32_1:[0-9]+]], v[[F_F32_1]]
+
+; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI-DAG: v_cvt_f16_f32_sdwa v[[R_F16_1:[0-9]+]], v[[R_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GCN-NOT: and
+
+; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @cos_v2f16(
+define amdgpu_kernel void @cos_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.cos.ll b/test/CodeGen/AMDGPU/llvm.cos.ll
index eb7dcbbf2346..bd89502d7b82 100644
--- a/test/CodeGen/AMDGPU/llvm.cos.ll
+++ b/test/CodeGen/AMDGPU/llvm.cos.ll
@@ -11,7 +11,7 @@
;SI: v_cos_f32
;SI-NOT: v_cos_f32
-define void @test(float addrspace(1)* %out, float %x) #1 {
+define amdgpu_kernel void @test(float addrspace(1)* %out, float %x) #1 {
%cos = call float @llvm.cos.f32(float %x)
store float %cos, float addrspace(1)* %out
ret void
@@ -29,7 +29,7 @@ define void @test(float addrspace(1)* %out, float %x) #1 {
;SI: v_cos_f32
;SI-NOT: v_cos_f32
-define void @testv(<4 x float> addrspace(1)* %out, <4 x float> inreg %vx) #1 {
+define amdgpu_kernel void @testv(<4 x float> addrspace(1)* %out, <4 x float> inreg %vx) #1 {
%cos = call <4 x float> @llvm.cos.v4f32(<4 x float> %vx)
store <4 x float> %cos, <4 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.dbg.value.ll b/test/CodeGen/AMDGPU/llvm.dbg.value.ll
index 8b0854c2c2b5..c4a76de5989c 100644
--- a/test/CodeGen/AMDGPU/llvm.dbg.value.ll
+++ b/test/CodeGen/AMDGPU/llvm.dbg.value.ll
@@ -9,7 +9,7 @@
; CHECK: buffer_store_dword
; CHECK: s_endpgm
-define void @test_debug_value(i32 addrspace(1)* nocapture %globalptr_arg) #0 !dbg !4 {
+define amdgpu_kernel void @test_debug_value(i32 addrspace(1)* nocapture %globalptr_arg) #0 !dbg !4 {
entry:
tail call void @llvm.dbg.value(metadata i32 addrspace(1)* %globalptr_arg, i64 0, metadata !10, metadata !13), !dbg !14
store i32 123, i32 addrspace(1)* %globalptr_arg, align 4
diff --git a/test/CodeGen/AMDGPU/llvm.exp2.f16.ll b/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
index 7fa56911efdc..5757142b9e95 100644
--- a/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
@@ -12,7 +12,7 @@ declare <2 x half> @llvm.exp2.v2f16(<2 x half> %a)
; VI: v_exp_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @exp2_f16(
+define amdgpu_kernel void @exp2_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -25,20 +25,24 @@ entry:
; GCN-LABEL: {{^}}exp2_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_exp_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_exp_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_exp_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_exp_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_exp_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_exp_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @exp2_v2f16(
+define amdgpu_kernel void @exp2_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.exp2.ll b/test/CodeGen/AMDGPU/llvm.exp2.ll
index 42698925aae4..387dc3b8566a 100644
--- a/test/CodeGen/AMDGPU/llvm.exp2.ll
+++ b/test/CodeGen/AMDGPU/llvm.exp2.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
-;RUN: llc < %s -march=amdgcn -mcpu=SI | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn | FileCheck %s --check-prefix=SI --check-prefix=FUNC
;RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s --check-prefix=SI --check-prefix=FUNC
;FUNC-LABEL: {{^}}test:
@@ -11,7 +11,7 @@
;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
;SI: v_exp_f32
-define void @test(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @test(float addrspace(1)* %out, float %in) {
entry:
%0 = call float @llvm.exp2.f32(float %in)
store float %0, float addrspace(1)* %out
@@ -34,7 +34,7 @@ entry:
;SI: v_exp_f32
;SI: v_exp_f32
-define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
%0 = call <2 x float> @llvm.exp2.v2f32(<2 x float> %in)
store <2 x float> %0, <2 x float> addrspace(1)* %out
@@ -68,7 +68,7 @@ entry:
;SI: v_exp_f32
;SI: v_exp_f32
;SI: v_exp_f32
-define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %in)
store <4 x float> %0, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.floor.f16.ll b/test/CodeGen/AMDGPU/llvm.floor.f16.ll
index 60dfd734ee73..6a18141d8035 100644
--- a/test/CodeGen/AMDGPU/llvm.floor.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.floor.f16.ll
@@ -12,7 +12,7 @@ declare <2 x half> @llvm.floor.v2f16(<2 x half> %a)
; VI: v_floor_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @floor_f16(
+define amdgpu_kernel void @floor_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -25,20 +25,24 @@ entry:
; GCN-LABEL: {{^}}floor_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_floor_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_floor_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_floor_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_floor_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_floor_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_floor_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @floor_v2f16(
+define amdgpu_kernel void @floor_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.fma.f16.ll b/test/CodeGen/AMDGPU/llvm.fma.f16.ll
index 3431267e3943..518fe8baaa7a 100644
--- a/test/CodeGen/AMDGPU/llvm.fma.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.fma.f16.ll
@@ -16,7 +16,7 @@ declare <2 x half> @llvm.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fma_f16(
+define amdgpu_kernel void @fma_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -32,7 +32,8 @@ define void @fma_f16(
; GCN-LABEL: {{^}}fma_f16_imm_a
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+
+; SI: v_mov_b32_e32 v[[A_F32:[0-9]+]], 0x40400000{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
@@ -41,7 +42,7 @@ define void @fma_f16(
; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fma_f16_imm_a(
+define amdgpu_kernel void @fma_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b,
half addrspace(1)* %c) {
@@ -55,7 +56,7 @@ define void @fma_f16_imm_a(
; GCN-LABEL: {{^}}fma_f16_imm_b
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_mov_b32_e32 v[[B_F32:[0-9]+]], 0x40400000{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
@@ -64,7 +65,7 @@ define void @fma_f16_imm_a(
; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fma_f16_imm_b(
+define amdgpu_kernel void @fma_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %c) {
@@ -78,7 +79,7 @@ define void @fma_f16_imm_b(
; GCN-LABEL: {{^}}fma_f16_imm_c
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_mov_b32_e32 v[[C_F32:[0-9]+]], 0x40400000{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
@@ -87,7 +88,7 @@ define void @fma_f16_imm_b(
; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @fma_f16_imm_c(
+define amdgpu_kernel void @fma_f16_imm_c(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -102,27 +103,35 @@ define void @fma_f16_imm_c(
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
+; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
-; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_V2_F16]]
-; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+
+; VI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; VI: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; VI-DAG: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_V2_F16]]
+; VI-DAG: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16_1]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fma_v2f16(
+define amdgpu_kernel void @fma_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -135,29 +144,33 @@ define void @fma_v2f16(
ret void
}
-; GCN-LABEL: {{^}}fma_v2f16_imm_a
+; GCN-LABEL: {{^}}fma_v2f16_imm_a:
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+
+; SI: v_mov_b32_e32 v[[A_F32:[0-9]+]], 0x40400000{{$}}
; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x4200{{$}}
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; GCN-DAG: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; GCN-DAG: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
+; SI-DAG: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32]], v[[C_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32]], v[[C_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_F16]], v[[C_V2_F16]]
-; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16]], v[[C_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32]], v[[C_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+
+; VI-DAG: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[C_F16_1]], v[[A_F16]], v[[B_F16_1]]
+; VI-DAG: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[C_V2_F16]], v[[A_F16]], v[[B_V2_F16]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fma_v2f16_imm_a(
+define amdgpu_kernel void @fma_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b,
<2 x half> addrspace(1)* %c) {
@@ -168,29 +181,39 @@ define void @fma_v2f16_imm_a(
ret void
}
-; GCN-LABEL: {{^}}fma_v2f16_imm_b
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4200{{$}}
+; GCN-LABEL: {{^}}fma_v2f16_imm_b:
+; SI: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+
+; VI: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+; VI: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+
+; SI: v_mov_b32_e32 v[[B_F32:[0-9]+]], 0x40400000{{$}}
; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x4200{{$}}
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
-; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32]], v[[C_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32]], v[[C_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_F16]], v[[C_V2_F16]]
-; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16]], v[[C_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
+; SI-DAG: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32]], v[[C_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32]], v[[C_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+
+; VI-DAG: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI-DAG: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; VI-DAG: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_F16]], v[[C_V2_F16]]
+; VI-DAG: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16]], v[[C_F16_1]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fma_v2f16_imm_b(
+define amdgpu_kernel void @fma_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %c) {
@@ -201,29 +224,39 @@ define void @fma_v2f16_imm_b(
ret void
}
-; GCN-LABEL: {{^}}fma_v2f16_imm_c
-; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], 0x4200{{$}}
+; GCN-LABEL: {{^}}fma_v2f16_imm_c:
+; SI: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+
+; VI: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; VI: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+
+; SI: v_mov_b32_e32 v[[C_F32:[0-9]+]], 0x40400000{{$}}
; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x4200{{$}}
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_F16]]
-; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+
+; VI-DAG: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI-DAG: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; VI-DAG: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_F16]]
+; VI-DAG: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16]]
+
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fma_v2f16_imm_c(
+define amdgpu_kernel void @fma_v2f16_imm_c(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
diff --git a/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll b/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
index 3bc85bdc29ef..f30fd1d58204 100644
--- a/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
@@ -1,7 +1,7 @@
-; RUN: llc -march=amdgcn -mattr=-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-FLUSH %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-FLUSH %s
-; RUN: llc -march=amdgcn -mattr=+fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-DENORM %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-DENORM %s
+; RUN: llc -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-FLUSH %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-FLUSH %s
+; RUN: llc -march=amdgcn -mattr=+fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SI-DENORM %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VI-DENORM %s
declare half @llvm.fmuladd.f16(half %a, half %b, half %c)
declare <2 x half> @llvm.fmuladd.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
@@ -24,7 +24,7 @@ declare <2 x half> @llvm.fmuladd.v2f16(<2 x half> %a, <2 x half> %b, <2 x half>
; VI-DENORM: buffer_store_short [[RESULT]]
; GCN: s_endpgm
-define void @fmuladd_f16(
+define amdgpu_kernel void @fmuladd_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -40,10 +40,9 @@ define void @fmuladd_f16(
; GCN-LABEL: {{^}}fmuladd_f16_imm_a
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
-; SI: v_mac_f32_e32 v[[C_F32]], v[[A_F32]], v[[B_F32]]
+; SI: v_mac_f32_e32 v[[C_F32]], 0x40400000, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
; SI: buffer_store_short v[[R_F16]]
@@ -55,7 +54,7 @@ define void @fmuladd_f16(
; VI-DENORM: buffer_store_short [[RESULT]]
; GCN: s_endpgm
-define void @fmuladd_f16_imm_a(
+define amdgpu_kernel void @fmuladd_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b,
half addrspace(1)* %c) {
@@ -69,10 +68,9 @@ define void @fmuladd_f16_imm_a(
; GCN-LABEL: {{^}}fmuladd_f16_imm_b
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
-; SI: v_mac_f32_e32 v[[C_F32]], v[[B_F32]], v[[A_F32]]
+; SI: v_mac_f32_e32 v[[C_F32]], 0x40400000, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
; SI: buffer_store_short v[[R_F16]]
@@ -85,7 +83,7 @@ define void @fmuladd_f16_imm_a(
; GCN: s_endpgm
-define void @fmuladd_f16_imm_b(
+define amdgpu_kernel void @fmuladd_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %c) {
@@ -100,38 +98,45 @@ define void @fmuladd_f16_imm_b(
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI-DAG: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI-DAG: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
; SI: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[C_F32_0]]
; SI: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
-; SI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_LO:[0-9]+]], v[[C_F32_0]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-
-
-; FIXME: and should be unnecessary
-; VI-FLUSH: v_mac_f16_e32 v[[C_V2_F16]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI-FLUSH: v_mac_f16_e32 v[[C_F16_1]], v[[B_F16_1]], v[[A_F16_1]]
-; VI-FLUSH: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[C_V2_F16]]
-; VI-FLUSH: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[C_F16_1]]
-
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+
+; VI-FLUSH: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI-FLUSH-DAG: v_mac_f16_sdwa v[[A_F16_1]], v[[C_V2_F16]], v[[B_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-FLUSH-DAG: v_mac_f16_e32 v[[A_V2_F16]], v[[C_V2_F16]], v[[B_V2_F16]]
+; VI-FLUSH-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[A_F16_1]]
+; VI-FLUSH-NOT: v_and_b32
+; VI-FLUSH: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[A_V2_F16]]
+
+; VI-DENORM: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI-DENORM: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; VI-DENORM: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
; VI-DENORM-DAG: v_fma_f16 v[[RES0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_V2_F16]]
; VI-DENORM-DAG: v_fma_f16 v[[RES1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16_1]]
-; VI-DENORM: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[RES0]]
-; VI-DENORM: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[RES1]]
+; VI-DENORM-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[RES1]]
+; VI-DENORM-NOT: v_and_b32
+; VI-DENORM: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[RES0]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @fmuladd_v2f16(
+
+define amdgpu_kernel void @fmuladd_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
diff --git a/test/CodeGen/AMDGPU/llvm.log2.f16.ll b/test/CodeGen/AMDGPU/llvm.log2.f16.ll
index 8d1a8973cb4e..773eb55283e4 100644
--- a/test/CodeGen/AMDGPU/llvm.log2.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.log2.f16.ll
@@ -12,7 +12,7 @@ declare <2 x half> @llvm.log2.v2f16(<2 x half> %a)
; VI: v_log_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @log2_f16(
+define amdgpu_kernel void @log2_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -24,21 +24,25 @@ entry:
; GCN-LABEL: {{^}}log2_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_log_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
-; SI: v_log_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_log_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_log_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_log_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI-DAG: v_log_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_log_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_log_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @log2_v2f16(
+define amdgpu_kernel void @log2_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.log2.ll b/test/CodeGen/AMDGPU/llvm.log2.ll
index c75e7850b353..b9d593e43f32 100644
--- a/test/CodeGen/AMDGPU/llvm.log2.ll
+++ b/test/CodeGen/AMDGPU/llvm.log2.ll
@@ -11,7 +11,7 @@
;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
;SI: v_log_f32
-define void @test(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @test(float addrspace(1)* %out, float %in) {
entry:
%0 = call float @llvm.log2.f32(float %in)
store float %0, float addrspace(1)* %out
@@ -34,7 +34,7 @@ entry:
;SI: v_log_f32
;SI: v_log_f32
-define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
%0 = call <2 x float> @llvm.log2.v2f32(<2 x float> %in)
store <2 x float> %0, <2 x float> addrspace(1)* %out
@@ -68,7 +68,7 @@ entry:
;SI: v_log_f32
;SI: v_log_f32
;SI: v_log_f32
-define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.log2.v4f32(<4 x float> %in)
store <4 x float> %0, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll b/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
index 8adc01b7b8c7..4c8dff52509a 100644
--- a/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
@@ -4,7 +4,7 @@
declare half @llvm.maxnum.f16(half %a, half %b)
declare <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b)
-; GCN-LABEL: {{^}}maxnum_f16
+; GCN-LABEL: {{^}}maxnum_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
@@ -14,7 +14,7 @@ declare <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b)
; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @maxnum_f16(
+define amdgpu_kernel void @maxnum_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -26,16 +26,15 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}maxnum_f16_imm_a
+; GCN-LABEL: {{^}}maxnum_f16_imm_a:
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], 0x40400000, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @maxnum_f16_imm_a(
+define amdgpu_kernel void @maxnum_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
@@ -45,16 +44,15 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}maxnum_f16_imm_b
+; GCN-LABEL: {{^}}maxnum_f16_imm_b:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], 4.0, v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], 4.0, v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @maxnum_f16_imm_b(
+define amdgpu_kernel void @maxnum_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -64,27 +62,33 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}maxnum_v2f16
+; GCN-LABEL: {{^}}maxnum_v2f16:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_max_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI: v_max_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_max_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_max_f16_sdwa v[[R_F16_1:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @maxnum_v2f16(
+define amdgpu_kernel void @maxnum_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -96,25 +100,24 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}maxnum_v2f16_imm_a
+; GCN-LABEL: {{^}}maxnum_v2f16_imm_a:
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], 0x40400000, v[[B_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], 4.0, v[[B_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_max_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
-; VI: v_max_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI-DAG: v_max_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
+; VI-DAG: v_max_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @maxnum_v2f16_imm_a(
+define amdgpu_kernel void @maxnum_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b) {
entry:
@@ -124,25 +127,24 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}maxnum_v2f16_imm_b
+; GCN-LABEL: {{^}}maxnum_v2f16_imm_b:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], 4.0, v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], 0x40400000, v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_max_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
-; VI: v_max_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI-DAG: v_max_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
+; VI-DAG: v_max_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @maxnum_v2f16_imm_b(
+define amdgpu_kernel void @maxnum_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.memcpy.ll b/test/CodeGen/AMDGPU/llvm.memcpy.ll
index 009338d273f5..7b4db55155eb 100644
--- a/test/CodeGen/AMDGPU/llvm.memcpy.ll
+++ b/test/CodeGen/AMDGPU/llvm.memcpy.ll
@@ -80,7 +80,7 @@ declare void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* nocapture, i8 addrspace
; SI-DAG: ds_write_b8
; SI: s_endpgm
-define void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)*
%bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 1, i1 false) nounwind
@@ -125,7 +125,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias %
; SI-DAG: ds_write_b16
; SI: s_endpgm
-define void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)*
%bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 2, i1 false) nounwind
@@ -144,7 +144,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %
; SI: ds_write2_b32
; SI: s_endpgm
-define void @test_small_memcpy_i64_lds_to_lds_align4(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align4(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)*
%bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 4, i1 false) nounwind
@@ -161,7 +161,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align4(i64 addrspace(3)* noalias %
; SI: ds_write2_b64
; SI-DAG: s_endpgm
-define void @test_small_memcpy_i64_lds_to_lds_align8(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align8(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)*
%bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)*
call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 8, i1 false) nounwind
@@ -238,7 +238,7 @@ define void @test_small_memcpy_i64_lds_to_lds_align8(i64 addrspace(3)* noalias %
; SI-DAG: buffer_store_byte
; SI: s_endpgm
-define void @test_small_memcpy_i64_global_to_global_align1(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align1(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
%bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)*
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 1, i1 false) nounwind
@@ -281,7 +281,7 @@ define void @test_small_memcpy_i64_global_to_global_align1(i64 addrspace(1)* noa
; SI-DAG: buffer_store_short
; SI: s_endpgm
-define void @test_small_memcpy_i64_global_to_global_align2(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align2(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
%bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)*
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 2, i1 false) nounwind
@@ -294,7 +294,7 @@ define void @test_small_memcpy_i64_global_to_global_align2(i64 addrspace(1)* noa
; SI: buffer_store_dwordx4
; SI: buffer_store_dwordx4
; SI: s_endpgm
-define void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
%bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)*
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 4, i1 false) nounwind
@@ -307,7 +307,7 @@ define void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noa
; SI: buffer_store_dwordx4
; SI: buffer_store_dwordx4
; SI: s_endpgm
-define void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
%bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)*
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 8, i1 false) nounwind
@@ -320,7 +320,7 @@ define void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noa
; SI: buffer_store_dwordx4
; SI: buffer_store_dwordx4
; SI: s_endpgm
-define void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
%bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)*
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 16, i1 false) nounwind
@@ -340,7 +340,7 @@ define void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* no
; SI-DAG: s_load_dwordx2
; SI-DAG: buffer_store_dwordx4
; SI-DAG: buffer_store_dwordx4
-define void @test_memcpy_const_string_align4(i8 addrspace(1)* noalias %out) nounwind {
+define amdgpu_kernel void @test_memcpy_const_string_align4(i8 addrspace(1)* noalias %out) nounwind {
%str = bitcast [16 x i8] addrspace(2)* @hello.align4 to i8 addrspace(2)*
call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i32 4, i1 false)
ret void
@@ -365,7 +365,7 @@ define void @test_memcpy_const_string_align4(i8 addrspace(1)* noalias %out) noun
; SI: buffer_store_byte
; SI: buffer_store_byte
; SI: buffer_store_byte
-define void @test_memcpy_const_string_align1(i8 addrspace(1)* noalias %out) nounwind {
+define amdgpu_kernel void @test_memcpy_const_string_align1(i8 addrspace(1)* noalias %out) nounwind {
%str = bitcast [16 x i8] addrspace(2)* @hello.align1 to i8 addrspace(2)*
call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i32 1, i1 false)
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.minnum.f16.ll b/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
index 4cc1deb2095c..b8221356b664 100644
--- a/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
@@ -4,7 +4,7 @@
declare half @llvm.minnum.f16(half %a, half %b)
declare <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
-; GCN-LABEL: {{^}}minnum_f16
+; GCN-LABEL: {{^}}minnum_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
@@ -14,7 +14,7 @@ declare <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @minnum_f16(
+define amdgpu_kernel void @minnum_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -26,16 +26,15 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}minnum_f16_imm_a
+; GCN-LABEL: {{^}}minnum_f16_imm_a:
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], 0x40400000, v[[B_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @minnum_f16_imm_a(
+define amdgpu_kernel void @minnum_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b) {
entry:
@@ -45,16 +44,15 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}minnum_f16_imm_b
+; GCN-LABEL: {{^}}minnum_f16_imm_b:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], 4.0, v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], 4.0, v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @minnum_f16_imm_b(
+define amdgpu_kernel void @minnum_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -64,27 +62,32 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}minnum_v2f16
+; GCN-LABEL: {{^}}minnum_v2f16:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_min_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI-DAG: v_min_f16_sdwa v[[R_F16_1:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @minnum_v2f16(
+define amdgpu_kernel void @minnum_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b) {
@@ -96,25 +99,27 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}minnum_v2f16_imm_a
+; GCN-LABEL: {{^}}minnum_v2f16_imm_a:
; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
+
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
-; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
-; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], 0x40400000, v[[B_F32_0]]
+; SI-DAG: v_min_f32_e32 v[[R_F32_1:[0-9]+]], 4.0, v[[B_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+
+; VI: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; VI-DAG: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
+; VI-DAG: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @minnum_v2f16_imm_a(
+define amdgpu_kernel void @minnum_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b) {
entry:
@@ -124,25 +129,24 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}minnum_v2f16_imm_b
+; GCN-LABEL: {{^}}minnum_v2f16_imm_b:
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
-; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], 4.0, v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], 0x40400000, v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
-; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI-DAG: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
+; VI-DAG: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
+
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN-NOT: and
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @minnum_v2f16_imm_b(
+define amdgpu_kernel void @minnum_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll b/test/CodeGen/AMDGPU/llvm.r600.cube.ll
index 78b88122229b..b5a0de95acf5 100644
--- a/test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.cube.ll
@@ -22,7 +22,7 @@ main_body:
%tmp12 = insertelement <4 x float> %tmp11, float %tmp7, i32 1
%tmp13 = insertelement <4 x float> %tmp12, float %tmp10, i32 2
%tmp14 = insertelement <4 x float> %tmp13, float 1.000000e+00, i32 3
- %tmp15 = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %tmp14)
+ %tmp15 = call <4 x float> @llvm.r600.cube(<4 x float> %tmp14)
%tmp16 = extractelement <4 x float> %tmp15, i32 0
%tmp17 = extractelement <4 x float> %tmp15, i32 1
%tmp18 = extractelement <4 x float> %tmp15, i32 2
@@ -44,7 +44,7 @@ main_body:
}
; Function Attrs: readnone
-declare <4 x float> @llvm.AMDGPU.cube(<4 x float>) #0
+declare <4 x float> @llvm.r600.cube(<4 x float>) #0
; Function Attrs: nounwind readnone
declare float @llvm.fabs.f32(float) #0
diff --git a/test/CodeGen/AMDGPU/llvm.r600.dot4.ll b/test/CodeGen/AMDGPU/llvm.r600.dot4.ll
index 4db29c58385e..de8a47741c94 100644
--- a/test/CodeGen/AMDGPU/llvm.r600.dot4.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.dot4.ll
@@ -2,7 +2,7 @@
declare float @llvm.r600.dot4(<4 x float>, <4 x float>) nounwind readnone
-define void @test_dp4(float addrspace(1)* %out, <4 x float> addrspace(1)* %a, <4 x float> addrspace(1)* %b) nounwind {
+define amdgpu_kernel void @test_dp4(float addrspace(1)* %out, <4 x float> addrspace(1)* %a, <4 x float> addrspace(1)* %b) nounwind {
%src0 = load <4 x float>, <4 x float> addrspace(1)* %a, align 16
%src1 = load <4 x float>, <4 x float> addrspace(1)* %b, align 16
%dp4 = call float @llvm.r600.dot4(<4 x float> %src0, <4 x float> %src1) nounwind readnone
diff --git a/test/CodeGen/AMDGPU/llvm.r600.group.barrier.ll b/test/CodeGen/AMDGPU/llvm.r600.group.barrier.ll
index e4e6dd8e1069..93caafbb9524 100644
--- a/test/CodeGen/AMDGPU/llvm.r600.group.barrier.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.group.barrier.ll
@@ -2,7 +2,7 @@
; EG-LABEL: {{^}}test_group_barrier:
; EG: GROUP_BARRIER
-define void @test_group_barrier(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_group_barrier(i32 addrspace(1)* %out) #0 {
entry:
%tmp = call i32 @llvm.r600.read.tidig.x()
%tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tmp
diff --git a/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll b/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
index a5b07e072fa5..82c42601ef1e 100644
--- a/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll
@@ -14,7 +14,7 @@
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @local_size_x(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_x(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -29,7 +29,7 @@ entry:
; VI-NOHSA: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x1c
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @local_size_y(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_y(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -44,7 +44,7 @@ entry:
; VI-NOHSA: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x20
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @local_size_z(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_z(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -59,7 +59,7 @@ entry:
; GCN-DAG: v_mov_b32_e32 [[VY:v[0-9]+]], [[Y]]
; GCN: v_mul_u32_u24_e32 [[VAL:v[0-9]+]], [[X]], [[VY]]
; GCN: buffer_store_dword [[VAL]]
-define void @local_size_xy(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_xy(i32 addrspace(1)* %out) {
entry:
%x = call i32 @llvm.r600.read.local.size.x() #0
%y = call i32 @llvm.r600.read.local.size.y() #0
@@ -78,7 +78,7 @@ entry:
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_mul_u32_u24_e32 [[VAL:v[0-9]+]], [[X]], [[VZ]]
; GCN: buffer_store_dword [[VAL]]
-define void @local_size_xz(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_xz(i32 addrspace(1)* %out) {
entry:
%x = call i32 @llvm.r600.read.local.size.x() #0
%z = call i32 @llvm.r600.read.local.size.z() #0
@@ -98,7 +98,7 @@ entry:
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_mul_u32_u24_e32 [[VAL:v[0-9]+]], [[Y]], [[VZ]]
; GCN: buffer_store_dword [[VAL]]
-define void @local_size_yz(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_yz(i32 addrspace(1)* %out) {
entry:
%y = call i32 @llvm.r600.read.local.size.y() #0
%z = call i32 @llvm.r600.read.local.size.z() #0
@@ -121,7 +121,7 @@ entry:
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_mad_u32_u24 [[VAL:v[0-9]+]], [[X]], [[VY]], [[VZ]]
; GCN: buffer_store_dword [[VAL]]
-define void @local_size_xyz(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_xyz(i32 addrspace(1)* %out) {
entry:
%x = call i32 @llvm.r600.read.local.size.x() #0
%y = call i32 @llvm.r600.read.local.size.y() #0
@@ -138,7 +138,7 @@ entry:
; GCN-NOT: 0xffff
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN-NEXT: buffer_store_dword [[VVAL]]
-define void @local_size_x_known_bits(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_x_known_bits(i32 addrspace(1)* %out) {
entry:
%size = call i32 @llvm.r600.read.local.size.x() #0
%shl = shl i32 %size, 16
@@ -153,7 +153,7 @@ entry:
; GCN-NOT: 0xffff
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN-NEXT: buffer_store_dword [[VVAL]]
-define void @local_size_y_known_bits(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_y_known_bits(i32 addrspace(1)* %out) {
entry:
%size = call i32 @llvm.r600.read.local.size.y() #0
%shl = shl i32 %size, 16
@@ -168,7 +168,7 @@ entry:
; GCN-NOT: 0xffff
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN-NEXT: buffer_store_dword [[VVAL]]
-define void @local_size_z_known_bits(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @local_size_z_known_bits(i32 addrspace(1)* %out) {
entry:
%size = call i32 @llvm.r600.read.local.size.z() #0
%shl = shl i32 %size, 16
diff --git a/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.clamped.ll b/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.clamped.ll
index 1c6e7950e9b7..90d076d4fb4d 100644
--- a/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.clamped.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.clamped.ll
@@ -4,7 +4,7 @@ declare float @llvm.r600.recipsqrt.clamped.f32(float) nounwind readnone
; EG-LABEL: {{^}}rsq_clamped_f32:
; EG: RECIPSQRT_CLAMPED
-define void @rsq_clamped_f32(float addrspace(1)* %out, float %src) nounwind {
+define amdgpu_kernel void @rsq_clamped_f32(float addrspace(1)* %out, float %src) nounwind {
%rsq_clamped = call float @llvm.r600.recipsqrt.clamped.f32(float %src)
store float %rsq_clamped, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.ieee.ll b/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.ieee.ll
index 1d6bff01e662..d9177b39b8ac 100644
--- a/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.ieee.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.recipsqrt.ieee.ll
@@ -4,7 +4,7 @@ declare float @llvm.r600.recipsqrt.ieee.f32(float) nounwind readnone
; EG-LABEL: {{^}}recipsqrt.ieee_f32:
; EG: RECIPSQRT_IEEE
-define void @recipsqrt.ieee_f32(float addrspace(1)* %out, float %src) nounwind {
+define amdgpu_kernel void @recipsqrt.ieee_f32(float addrspace(1)* %out, float %src) nounwind {
%recipsqrt.ieee = call float @llvm.r600.recipsqrt.ieee.f32(float %src) nounwind readnone
store float %recipsqrt.ieee, float addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @recipsqrt.ieee_f32(float addrspace(1)* %out, float %src) nounwind {
; TODO: Really these should be constant folded
; EG-LABEL: {{^}}recipsqrt.ieee_f32_constant_4.0
; EG: RECIPSQRT_IEEE
-define void @recipsqrt.ieee_f32_constant_4.0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @recipsqrt.ieee_f32_constant_4.0(float addrspace(1)* %out) nounwind {
%recipsqrt.ieee = call float @llvm.r600.recipsqrt.ieee.f32(float 4.0) nounwind readnone
store float %recipsqrt.ieee, float addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @recipsqrt.ieee_f32_constant_4.0(float addrspace(1)* %out) nounwind
; EG-LABEL: {{^}}recipsqrt.ieee_f32_constant_100.0
; EG: RECIPSQRT_IEEE
-define void @recipsqrt.ieee_f32_constant_100.0(float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @recipsqrt.ieee_f32_constant_100.0(float addrspace(1)* %out) nounwind {
%recipsqrt.ieee = call float @llvm.r600.recipsqrt.ieee.f32(float 100.0) nounwind readnone
store float %recipsqrt.ieee, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.r600.tex.ll b/test/CodeGen/AMDGPU/llvm.r600.tex.ll
index 409037f3e976..98044917e2b0 100644
--- a/test/CodeGen/AMDGPU/llvm.r600.tex.ll
+++ b/test/CodeGen/AMDGPU/llvm.r600.tex.ll
@@ -17,7 +17,7 @@
;CHECK: TEX_SAMPLE T{{[0-9]+\.XYZW, T[0-9]+\.XYZW}} RID:0 SID:0 CT:NNNN
;CHECK: TEX_SAMPLE T{{[0-9]+\.XYZW, T[0-9]+\.XYZW}} RID:0 SID:0 CT:NNUN
-define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
bb:
%addr = load <4 x float>, <4 x float> addrspace(1)* %in
%tmp = shufflevector <4 x float> %addr, <4 x float> %addr, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/AMDGPU/llvm.rint.f16.ll b/test/CodeGen/AMDGPU/llvm.rint.f16.ll
index 3657940f36fd..59e81a7acc0b 100644
--- a/test/CodeGen/AMDGPU/llvm.rint.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.rint.f16.ll
@@ -1,5 +1,6 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SIVI -check-prefix=VI -check-prefix=GFX89 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 %s
declare half @llvm.rint.f16(half %a)
declare <2 x half> @llvm.rint.v2f16(<2 x half> %a)
@@ -9,10 +10,10 @@ declare <2 x half> @llvm.rint.v2f16(<2 x half> %a)
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_rndne_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
-; VI: v_rndne_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GFX89: v_rndne_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @rint_f16(
+define amdgpu_kernel void @rint_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -25,20 +26,30 @@ entry:
; GCN-LABEL: {{^}}rint_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_rndne_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_rndne_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_rndne_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_rndne_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: v_and_b32
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_rndne_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_rndne_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: v_and_b32
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
+; GFX9: v_rndne_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; GFX9: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GFX9: v_rndne_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GFX9: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GFX9: v_lshl_or_b32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], 16, v[[R_F16_LO]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @rint_v2f16(
+define amdgpu_kernel void @rint_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.rint.f64.ll b/test/CodeGen/AMDGPU/llvm.rint.f64.ll
index c63fb1727940..30ce8ed83ff1 100644
--- a/test/CodeGen/AMDGPU/llvm.rint.f64.ll
+++ b/test/CodeGen/AMDGPU/llvm.rint.f64.ll
@@ -11,7 +11,7 @@
; SI: v_cndmask_b32
; SI: v_cndmask_b32
; SI: s_endpgm
-define void @rint_f64(double addrspace(1)* %out, double %in) {
+define amdgpu_kernel void @rint_f64(double addrspace(1)* %out, double %in) {
entry:
%0 = call double @llvm.rint.f64(double %in)
store double %0, double addrspace(1)* %out
@@ -21,7 +21,7 @@ entry:
; FUNC-LABEL: {{^}}rint_v2f64:
; CI: v_rndne_f64_e32
; CI: v_rndne_f64_e32
-define void @rint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+define amdgpu_kernel void @rint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
entry:
%0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %in)
store <2 x double> %0, <2 x double> addrspace(1)* %out
@@ -33,7 +33,7 @@ entry:
; CI: v_rndne_f64_e32
; CI: v_rndne_f64_e32
; CI: v_rndne_f64_e32
-define void @rint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+define amdgpu_kernel void @rint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
entry:
%0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %in)
store <4 x double> %0, <4 x double> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.rint.ll b/test/CodeGen/AMDGPU/llvm.rint.ll
index cf7c0e4c6fb6..4056bc39448d 100644
--- a/test/CodeGen/AMDGPU/llvm.rint.ll
+++ b/test/CodeGen/AMDGPU/llvm.rint.ll
@@ -6,7 +6,7 @@
; R600: RNDNE
; SI: v_rndne_f32_e32
-define void @rint_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @rint_f32(float addrspace(1)* %out, float %in) {
entry:
%0 = call float @llvm.rint.f32(float %in) #0
store float %0, float addrspace(1)* %out
@@ -19,7 +19,7 @@ entry:
; SI: v_rndne_f32_e32
; SI: v_rndne_f32_e32
-define void @rint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+define amdgpu_kernel void @rint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
%0 = call <2 x float> @llvm.rint.v2f32(<2 x float> %in) #0
store <2 x float> %0, <2 x float> addrspace(1)* %out
@@ -36,7 +36,7 @@ entry:
; SI: v_rndne_f32_e32
; SI: v_rndne_f32_e32
; SI: v_rndne_f32_e32
-define void @rint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+define amdgpu_kernel void @rint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %in) #0
store <4 x float> %0, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/llvm.round.f64.ll b/test/CodeGen/AMDGPU/llvm.round.f64.ll
index 3ea4551f0ee7..c58b9b4d9e94 100644
--- a/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -2,7 +2,7 @@
; FUNC-LABEL: {{^}}round_f64:
; SI: s_endpgm
-define void @round_f64(double addrspace(1)* %out, double %x) #0 {
+define amdgpu_kernel void @round_f64(double addrspace(1)* %out, double %x) #0 {
%result = call double @llvm.round.f64(double %x) #1
store double %result, double addrspace(1)* %out
ret void
@@ -26,7 +26,7 @@ define void @round_f64(double addrspace(1)* %out, double %x) #0 {
; SI: buffer_store_dwordx2
; SI: s_endpgm
-define void @v_round_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_round_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
@@ -38,7 +38,7 @@ define void @v_round_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0
; FUNC-LABEL: {{^}}round_v2f64:
; SI: s_endpgm
-define void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) #0 {
+define amdgpu_kernel void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) #0 {
%result = call <2 x double> @llvm.round.v2f64(<2 x double> %in) #1
store <2 x double> %result, <2 x double> addrspace(1)* %out
ret void
@@ -46,7 +46,7 @@ define void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) #0 {
; FUNC-LABEL: {{^}}round_v4f64:
; SI: s_endpgm
-define void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) #0 {
+define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) #0 {
%result = call <4 x double> @llvm.round.v4f64(<4 x double> %in) #1
store <4 x double> %result, <4 x double> addrspace(1)* %out
ret void
@@ -54,7 +54,7 @@ define void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) #0 {
; FUNC-LABEL: {{^}}round_v8f64:
; SI: s_endpgm
-define void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %in) #0 {
+define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %in) #0 {
%result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1
store <8 x double> %result, <8 x double> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.round.ll b/test/CodeGen/AMDGPU/llvm.round.ll
index 7e8f8ff172e8..ffe87977870b 100644
--- a/test/CodeGen/AMDGPU/llvm.round.ll
+++ b/test/CodeGen/AMDGPU/llvm.round.ll
@@ -1,18 +1,19 @@
-; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=GFX89 -check-prefix=GFX9 -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}round_f32:
-; SI-DAG: s_load_dword [[SX:s[0-9]+]]
-; SI-DAG: s_brev_b32 [[K:s[0-9]+]], -2{{$}}
-; SI-DAG: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[SX]]
-; SI-DAG: v_sub_f32_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]]
-; SI-DAG: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]]
-; SI: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]]
-; SI: v_cmp_ge_f32_e64 vcc, |[[SUB]]|, 0.5
-; SI: v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, [[VX]]
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
-; SI: buffer_store_dword [[RESULT]]
+; GCN-DAG: s_load_dword [[SX:s[0-9]+]]
+; GCN-DAG: s_brev_b32 [[K:s[0-9]+]], -2{{$}}
+; GCN-DAG: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[SX]]
+; GCN-DAG: v_sub_f32_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]]
+; GCN-DAG: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]]
+; GCN: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]]
+; GCN: v_cmp_ge_f32_e64 vcc, |[[SUB]]|, 0.5
+; GCN: v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, [[VX]]
+; GCN: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
+; GCN: buffer_store_dword [[RESULT]]
; R600: TRUNC {{.*}}, [[ARG:KC[0-9]\[[0-9]+\]\.[XYZW]]]
; R600-DAG: ADD {{.*}},
@@ -20,7 +21,7 @@
; R600-DAG: SETGE
; R600-DAG: CNDE
; R600-DAG: ADD
-define void @round_f32(float addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @round_f32(float addrspace(1)* %out, float %x) #0 {
%result = call float @llvm.round.f32(float %x) #1
store float %result, float addrspace(1)* %out
ret void
@@ -32,36 +33,78 @@ define void @round_f32(float addrspace(1)* %out, float %x) #0 {
; compiler doesn't crash.
; FUNC-LABEL: {{^}}round_v2f32:
-; SI: s_endpgm
+; GCN: s_endpgm
; R600: CF_END
-define void @round_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #0 {
+define amdgpu_kernel void @round_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #0 {
%result = call <2 x float> @llvm.round.v2f32(<2 x float> %in) #1
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}round_v4f32:
-; SI: s_endpgm
+; GCN: s_endpgm
; R600: CF_END
-define void @round_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #0 {
+define amdgpu_kernel void @round_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #0 {
%result = call <4 x float> @llvm.round.v4f32(<4 x float> %in) #1
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}round_v8f32:
-; SI: s_endpgm
+; GCN: s_endpgm
; R600: CF_END
-define void @round_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %in) #0 {
+define amdgpu_kernel void @round_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %in) #0 {
%result = call <8 x float> @llvm.round.v8f32(<8 x float> %in) #1
store <8 x float> %result, <8 x float> addrspace(1)* %out
ret void
}
+; FUNC-LABEL: {{^}}round_f16:
+; GFX89-DAG: s_load_dword [[SX:s[0-9]+]]
+; GFX89-DAG: s_movk_i32 [[K:s[0-9]+]], 0x7fff{{$}}
+; GFX89-DAG: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]]
+; GFX89-DAG: v_mov_b32_e32 [[BFI_K:v[0-9]+]], 0x3c00
+; GFX89: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], [[BFI_K]], [[VX]]
+
+; GFX89: v_trunc_f16_e32 [[TRUNC:v[0-9]+]], [[SX]]
+; GFX89: v_sub_f16_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]]
+; GFX89: v_cmp_ge_f16_e64 vcc, |[[SUB]]|, 0.5
+; GFX89: v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, [[COPYSIGN]]
+; GFX89: v_add_f16_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
+; GFX89: buffer_store_short [[RESULT]]
+define amdgpu_kernel void @round_f16(half addrspace(1)* %out, i32 %x.arg) #0 {
+ %x.arg.trunc = trunc i32 %x.arg to i16
+ %x = bitcast i16 %x.arg.trunc to half
+ %result = call half @llvm.round.f16(half %x) #1
+ store half %result, half addrspace(1)* %out
+ ret void
+}
+
+; Should be scalarized
+; FUNC-LABEL: {{^}}round_v2f16:
+; GFX89-DAG: s_movk_i32 [[K:s[0-9]+]], 0x7fff{{$}}
+; GFX89-DAG: v_mov_b32_e32 [[BFI_K:v[0-9]+]], 0x3c00
+; GFX89: v_bfi_b32 [[COPYSIGN0:v[0-9]+]], [[K]], [[BFI_K]],
+; GFX89: v_bfi_b32 [[COPYSIGN1:v[0-9]+]], [[K]], [[BFI_K]],
+
+; GFX9: v_and_b32_e32
+; GFX9: v_lshl_or_b32
+define amdgpu_kernel void @round_v2f16(<2 x half> addrspace(1)* %out, i32 %in.arg) #0 {
+ %in = bitcast i32 %in.arg to <2 x half>
+ %result = call <2 x half> @llvm.round.v2f16(<2 x half> %in)
+ store <2 x half> %result, <2 x half> addrspace(1)* %out
+ ret void
+}
+
declare float @llvm.round.f32(float) #1
declare <2 x float> @llvm.round.v2f32(<2 x float>) #1
declare <4 x float> @llvm.round.v4f32(<4 x float>) #1
declare <8 x float> @llvm.round.v8f32(<8 x float>) #1
+declare half @llvm.round.f16(half) #1
+declare <2 x half> @llvm.round.v2f16(<2 x half>) #1
+declare <4 x half> @llvm.round.v4f16(<4 x half>) #1
+declare <8 x half> @llvm.round.v8f16(<8 x half>) #1
+
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/llvm.sin.f16.ll b/test/CodeGen/AMDGPU/llvm.sin.f16.ll
index b01932f69b06..eb1f32c981f8 100644
--- a/test/CodeGen/AMDGPU/llvm.sin.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.sin.f16.ll
@@ -13,7 +13,7 @@ declare <2 x half> @llvm.sin.v2f16(<2 x half> %a)
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @sin_f16(
+define amdgpu_kernel void @sin_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -26,25 +26,35 @@ entry:
; GCN-LABEL: {{^}}sin_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_mov_b32_e32 v[[HALF_PIE:[0-9]+]], 0x3e22f983{{$}}
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
-; SI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
-; VI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], 0.15915494, v[[A_F32_0]]
-; GCN: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
-; SI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
-; VI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], 0.15915494, v[[A_F32_1]]
-; GCN: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
-; GCN: v_sin_f32_e32 v[[R_F32_0:[0-9]+]], v[[F_F32_0]]
-; GCN: v_sin_f32_e32 v[[R_F32_1:[0-9]+]], v[[F_F32_1]]
-; GCN: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
-; GCN: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
+; SI-DAG: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
+; SI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
+; SI-DAG: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
+
+; VI-DAG: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_cvt_f32_f16_sdwa v[[A_F32_1:[0-9]+]], v[[A_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-DAG: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], 0.15915494, v[[A_F32_0]]
+; VI-DAG: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], 0.15915494, v[[A_F32_1]]
+; VI-DAG: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
+; VI-DAG: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
+
+; GCN-DAG: v_sin_f32_e32 v[[R_F32_0:[0-9]+]], v[[F_F32_0]]
+; GCN-DAG: v_sin_f32_e32 v[[R_F32_1:[0-9]+]], v[[F_F32_1]]
+; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_cvt_f16_f32_sdwa v[[R_F16_1:[0-9]+]], v[[R_F32_1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @sin_v2f16(
+define amdgpu_kernel void @sin_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.sin.ll b/test/CodeGen/AMDGPU/llvm.sin.ll
index 04754396a0f7..2a17303267ba 100644
--- a/test/CodeGen/AMDGPU/llvm.sin.ll
+++ b/test/CodeGen/AMDGPU/llvm.sin.ll
@@ -12,7 +12,7 @@
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @sin_f32(float addrspace(1)* %out, float %x) #1 {
+define amdgpu_kernel void @sin_f32(float addrspace(1)* %out, float %x) #1 {
%sin = call float @llvm.sin.f32(float %x)
store float %sin, float addrspace(1)* %out
ret void
@@ -24,7 +24,7 @@ define void @sin_f32(float addrspace(1)* %out, float %x) #1 {
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @safe_sin_3x_f32(float addrspace(1)* %out, float %x) #1 {
+define amdgpu_kernel void @safe_sin_3x_f32(float addrspace(1)* %out, float %x) #1 {
%y = fmul float 3.0, %x
%sin = call float @llvm.sin.f32(float %y)
store float %sin, float addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @safe_sin_3x_f32(float addrspace(1)* %out, float %x) #1 {
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @unsafe_sin_3x_f32(float addrspace(1)* %out, float %x) #2 {
+define amdgpu_kernel void @unsafe_sin_3x_f32(float addrspace(1)* %out, float %x) #2 {
%y = fmul float 3.0, %x
%sin = call float @llvm.sin.f32(float %y)
store float %sin, float addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @unsafe_sin_3x_f32(float addrspace(1)* %out, float %x) #2 {
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @safe_sin_2x_f32(float addrspace(1)* %out, float %x) #1 {
+define amdgpu_kernel void @safe_sin_2x_f32(float addrspace(1)* %out, float %x) #1 {
%y = fmul float 2.0, %x
%sin = call float @llvm.sin.f32(float %y)
store float %sin, float addrspace(1)* %out
@@ -65,7 +65,7 @@ define void @safe_sin_2x_f32(float addrspace(1)* %out, float %x) #1 {
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @unsafe_sin_2x_f32(float addrspace(1)* %out, float %x) #2 {
+define amdgpu_kernel void @unsafe_sin_2x_f32(float addrspace(1)* %out, float %x) #2 {
%y = fmul float 2.0, %x
%sin = call float @llvm.sin.f32(float %y)
store float %sin, float addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @unsafe_sin_2x_f32(float addrspace(1)* %out, float %x) #2 {
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @test_safe_2sin_f32(float addrspace(1)* %out, float %x) #1 {
+define amdgpu_kernel void @test_safe_2sin_f32(float addrspace(1)* %out, float %x) #1 {
%y = fmul float 2.0, %x
%sin = call float @llvm.sin.f32(float %y)
store float %sin, float addrspace(1)* %out
@@ -91,7 +91,7 @@ define void @test_safe_2sin_f32(float addrspace(1)* %out, float %x) #1 {
; SI: v_fract_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @test_unsafe_2sin_f32(float addrspace(1)* %out, float %x) #2 {
+define amdgpu_kernel void @test_unsafe_2sin_f32(float addrspace(1)* %out, float %x) #2 {
%y = fmul float 2.0, %x
%sin = call float @llvm.sin.f32(float %y)
store float %sin, float addrspace(1)* %out
@@ -110,7 +110,7 @@ define void @test_unsafe_2sin_f32(float addrspace(1)* %out, float %x) #2 {
; SI: v_sin_f32
; SI: v_sin_f32
; SI-NOT: v_sin_f32
-define void @sin_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %vx) #1 {
+define amdgpu_kernel void @sin_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %vx) #1 {
%sin = call <4 x float> @llvm.sin.v4f32( <4 x float> %vx)
store <4 x float> %sin, <4 x float> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll b/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
index 69125b0bcfdc..46ee6526aca2 100644
--- a/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
@@ -12,7 +12,7 @@ declare <2 x half> @llvm.sqrt.v2f16(<2 x half> %a)
; VI: v_sqrt_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @sqrt_f16(
+define amdgpu_kernel void @sqrt_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -25,20 +25,24 @@ entry:
; GCN-LABEL: {{^}}sqrt_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_sqrt_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_sqrt_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_sqrt_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_sqrt_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: v_and_b32
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_sqrt_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_sqrt_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: v_and_b32
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @sqrt_v2f16(
+define amdgpu_kernel void @sqrt_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/llvm.trunc.f16.ll b/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
index 9f84b432209d..dc7182aa0d89 100644
--- a/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
+++ b/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
@@ -12,7 +12,7 @@ declare <2 x half> @llvm.trunc.v2f16(<2 x half> %a)
; VI: v_trunc_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @trunc_f16(
+define amdgpu_kernel void @trunc_f16(
half addrspace(1)* %r,
half addrspace(1)* %a) {
entry:
@@ -25,20 +25,24 @@ entry:
; GCN-LABEL: {{^}}trunc_v2f16
; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_trunc_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
; SI: v_trunc_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
-; VI: v_trunc_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
-; VI: v_trunc_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
-; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
-; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; SI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI-NOT: v_and_b32
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_0]]
+
+; VI-DAG: v_trunc_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI-DAG: v_trunc_f16_sdwa v[[R_F16_1:[0-9]+]], v[[A_V2_F16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NOT: v_and_b32
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_1]], v[[R_F16_0]]
+
; GCN: buffer_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @trunc_v2f16(
+define amdgpu_kernel void @trunc_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/load-constant-f64.ll b/test/CodeGen/AMDGPU/load-constant-f64.ll
index 1b42a9e96e01..0050d1a4f874 100644
--- a/test/CodeGen/AMDGPU/load-constant-f64.ll
+++ b/test/CodeGen/AMDGPU/load-constant-f64.ll
@@ -6,7 +6,7 @@
; GCN: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}]
; GCN-NOHSA: buffer_store_dwordx2
; GCN-HSA: flat_store_dwordx2
-define void @constant_load_f64(double addrspace(1)* %out, double addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_f64(double addrspace(1)* %out, double addrspace(2)* %in) #0 {
%ld = load double, double addrspace(2)* %in
store double %ld, double addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/load-constant-i1.ll b/test/CodeGen/AMDGPU/load-constant-i1.ll
index 104af10036c1..c8abe5c77ee5 100644
--- a/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -9,56 +9,56 @@
; EG: VTX_READ_8
; EG: AND_INT
-define void @constant_load_i1(i1 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_i1(i1 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
%load = load i1, i1 addrspace(2)* %in
store i1 %load, i1 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v2i1:
-define void @constant_load_v2i1(<2 x i1> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v2i1(<2 x i1> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(2)* %in
store <2 x i1> %load, <2 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v3i1:
-define void @constant_load_v3i1(<3 x i1> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v3i1(<3 x i1> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(2)* %in
store <3 x i1> %load, <3 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v4i1:
-define void @constant_load_v4i1(<4 x i1> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v4i1(<4 x i1> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(2)* %in
store <4 x i1> %load, <4 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v8i1:
-define void @constant_load_v8i1(<8 x i1> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v8i1(<8 x i1> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(2)* %in
store <8 x i1> %load, <8 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v16i1:
-define void @constant_load_v16i1(<16 x i1> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v16i1(<16 x i1> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(2)* %in
store <16 x i1> %load, <16 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v32i1:
-define void @constant_load_v32i1(<32 x i1> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v32i1(<32 x i1> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(2)* %in
store <32 x i1> %load, <32 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}constant_load_v64i1:
-define void @constant_load_v64i1(<64 x i1> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_load_v64i1(<64 x i1> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(2)* %in
store <64 x i1> %load, <64 x i1> addrspace(1)* %out
ret void
@@ -67,7 +67,7 @@ define void @constant_load_v64i1(<64 x i1> addrspace(1)* %out, <64 x i1> addrspa
; FUNC-LABEL: {{^}}constant_zextload_i1_to_i32:
; GCN: buffer_load_ubyte
; GCN: buffer_store_dword
-define void @constant_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
%a = load i1, i1 addrspace(2)* %in
%ext = zext i1 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -81,7 +81,7 @@ define void @constant_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(2)
; EG: VTX_READ_8
; EG: BFE_INT
-define void @constant_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
%a = load i1, i1 addrspace(2)* %in
%ext = sext i1 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -89,7 +89,7 @@ define void @constant_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(2)
}
; FUNC-LABEL: {{^}}constant_zextload_v1i1_to_v1i32:
-define void @constant_zextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(2)* %in
%ext = zext <1 x i1> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -97,7 +97,7 @@ define void @constant_zextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
}
; FUNC-LABEL: {{^}}constant_sextload_v1i1_to_v1i32:
-define void @constant_sextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(2)* %in
%ext = sext <1 x i1> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -105,7 +105,7 @@ define void @constant_sextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
}
; FUNC-LABEL: {{^}}constant_zextload_v2i1_to_v2i32:
-define void @constant_zextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(2)* %in
%ext = zext <2 x i1> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -113,7 +113,7 @@ define void @constant_zextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
}
; FUNC-LABEL: {{^}}constant_sextload_v2i1_to_v2i32:
-define void @constant_sextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(2)* %in
%ext = sext <2 x i1> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -121,7 +121,7 @@ define void @constant_sextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
}
; FUNC-LABEL: {{^}}constant_zextload_v3i1_to_v3i32:
-define void @constant_zextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(2)* %in
%ext = zext <3 x i1> %load to <3 x i32>
store <3 x i32> %ext, <3 x i32> addrspace(1)* %out
@@ -129,7 +129,7 @@ define void @constant_zextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x
}
; FUNC-LABEL: {{^}}constant_sextload_v3i1_to_v3i32:
-define void @constant_sextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(2)* %in
%ext = sext <3 x i1> %load to <3 x i32>
store <3 x i32> %ext, <3 x i32> addrspace(1)* %out
@@ -137,7 +137,7 @@ define void @constant_sextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x
}
; FUNC-LABEL: {{^}}constant_zextload_v4i1_to_v4i32:
-define void @constant_zextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(2)* %in
%ext = zext <4 x i1> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -145,7 +145,7 @@ define void @constant_zextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
}
; FUNC-LABEL: {{^}}constant_sextload_v4i1_to_v4i32:
-define void @constant_sextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(2)* %in
%ext = sext <4 x i1> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -153,7 +153,7 @@ define void @constant_sextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
}
; FUNC-LABEL: {{^}}constant_zextload_v8i1_to_v8i32:
-define void @constant_zextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(2)* %in
%ext = zext <8 x i1> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -161,7 +161,7 @@ define void @constant_zextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
}
; FUNC-LABEL: {{^}}constant_sextload_v8i1_to_v8i32:
-define void @constant_sextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(2)* %in
%ext = sext <8 x i1> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -169,7 +169,7 @@ define void @constant_sextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
}
; FUNC-LABEL: {{^}}constant_zextload_v16i1_to_v16i32:
-define void @constant_zextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(2)* %in
%ext = zext <16 x i1> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -177,7 +177,7 @@ define void @constant_zextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <1
}
; FUNC-LABEL: {{^}}constant_sextload_v16i1_to_v16i32:
-define void @constant_sextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(2)* %in
%ext = sext <16 x i1> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -185,7 +185,7 @@ define void @constant_sextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <1
}
; FUNC-LABEL: {{^}}constant_zextload_v32i1_to_v32i32:
-define void @constant_zextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(2)* %in
%ext = zext <32 x i1> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -193,7 +193,7 @@ define void @constant_zextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <3
}
; FUNC-LABEL: {{^}}constant_sextload_v32i1_to_v32i32:
-define void @constant_sextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(2)* %in
%ext = sext <32 x i1> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -201,7 +201,7 @@ define void @constant_sextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <3
}
; FUNC-LABEL: {{^}}constant_zextload_v64i1_to_v64i32:
-define void @constant_zextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(2)* %in
%ext = zext <64 x i1> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -209,7 +209,7 @@ define void @constant_zextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <6
}
; FUNC-LABEL: {{^}}constant_sextload_v64i1_to_v64i32:
-define void @constant_sextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(2)* %in
%ext = sext <64 x i1> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -221,7 +221,7 @@ define void @constant_sextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <6
; GCN-DAG: v_mov_b32_e32 {{v[0-9]+}}, 0{{$}}
; GCN-DAG: v_and_b32_e32 {{v[0-9]+}}, 1, [[LOAD]]
; GCN: buffer_store_dwordx2
-define void @constant_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
%a = load i1, i1 addrspace(2)* %in
%ext = zext i1 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -233,7 +233,7 @@ define void @constant_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(2)
; GCN: v_bfe_i32 [[BFE:v[0-9]+]], {{v[0-9]+}}, 0, 1{{$}}
; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[BFE]]
; GCN: buffer_store_dwordx2
-define void @constant_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 {
%a = load i1, i1 addrspace(2)* %in
%ext = sext i1 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -241,7 +241,7 @@ define void @constant_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(2)
}
; FUNC-LABEL: {{^}}constant_zextload_v1i1_to_v1i64:
-define void @constant_zextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(2)* %in
%ext = zext <1 x i1> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -249,7 +249,7 @@ define void @constant_zextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
}
; FUNC-LABEL: {{^}}constant_sextload_v1i1_to_v1i64:
-define void @constant_sextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(2)* %in
%ext = sext <1 x i1> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -257,7 +257,7 @@ define void @constant_sextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
}
; FUNC-LABEL: {{^}}constant_zextload_v2i1_to_v2i64:
-define void @constant_zextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(2)* %in
%ext = zext <2 x i1> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -265,7 +265,7 @@ define void @constant_zextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
}
; FUNC-LABEL: {{^}}constant_sextload_v2i1_to_v2i64:
-define void @constant_sextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(2)* %in
%ext = sext <2 x i1> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -273,7 +273,7 @@ define void @constant_sextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
}
; FUNC-LABEL: {{^}}constant_zextload_v3i1_to_v3i64:
-define void @constant_zextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(2)* %in
%ext = zext <3 x i1> %load to <3 x i64>
store <3 x i64> %ext, <3 x i64> addrspace(1)* %out
@@ -281,7 +281,7 @@ define void @constant_zextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x
}
; FUNC-LABEL: {{^}}constant_sextload_v3i1_to_v3i64:
-define void @constant_sextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(2)* %in
%ext = sext <3 x i1> %load to <3 x i64>
store <3 x i64> %ext, <3 x i64> addrspace(1)* %out
@@ -289,7 +289,7 @@ define void @constant_sextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x
}
; FUNC-LABEL: {{^}}constant_zextload_v4i1_to_v4i64:
-define void @constant_zextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(2)* %in
%ext = zext <4 x i1> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -297,7 +297,7 @@ define void @constant_zextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
}
; FUNC-LABEL: {{^}}constant_sextload_v4i1_to_v4i64:
-define void @constant_sextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(2)* %in
%ext = sext <4 x i1> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -305,7 +305,7 @@ define void @constant_sextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
}
; FUNC-LABEL: {{^}}constant_zextload_v8i1_to_v8i64:
-define void @constant_zextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(2)* %in
%ext = zext <8 x i1> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -313,7 +313,7 @@ define void @constant_zextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
}
; FUNC-LABEL: {{^}}constant_sextload_v8i1_to_v8i64:
-define void @constant_sextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(2)* %in
%ext = sext <8 x i1> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -321,7 +321,7 @@ define void @constant_sextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
}
; FUNC-LABEL: {{^}}constant_zextload_v16i1_to_v16i64:
-define void @constant_zextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(2)* %in
%ext = zext <16 x i1> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -329,7 +329,7 @@ define void @constant_zextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <1
}
; FUNC-LABEL: {{^}}constant_sextload_v16i1_to_v16i64:
-define void @constant_sextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(2)* %in
%ext = sext <16 x i1> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -337,7 +337,7 @@ define void @constant_sextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <1
}
; FUNC-LABEL: {{^}}constant_zextload_v32i1_to_v32i64:
-define void @constant_zextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(2)* %in
%ext = zext <32 x i1> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -345,7 +345,7 @@ define void @constant_zextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <3
}
; FUNC-LABEL: {{^}}constant_sextload_v32i1_to_v32i64:
-define void @constant_sextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(2)* %in
%ext = sext <32 x i1> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -353,7 +353,7 @@ define void @constant_sextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <3
}
; FUNC-LABEL: {{^}}constant_zextload_v64i1_to_v64i64:
-define void @constant_zextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_zextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(2)* %in
%ext = zext <64 x i1> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -361,7 +361,7 @@ define void @constant_zextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <6
}
; FUNC-LABEL: {{^}}constant_sextload_v64i1_to_v64i64:
-define void @constant_sextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
+define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(2)* nocapture %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(2)* %in
%ext = sext <64 x i1> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-constant-i16.ll b/test/CodeGen/AMDGPU/load-constant-i16.ll
index f7be1291040f..5dd2efdf6382 100644
--- a/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -8,7 +8,7 @@
; GCN-HSA: flat_load_ushort
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_i16(i16 addrspace(1)* %out, i16 addrspace(2)* %in) {
+define amdgpu_kernel void @constant_load_i16(i16 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
%ld = load i16, i16 addrspace(2)* %in
store i16 %ld, i16 addrspace(1)* %out
@@ -19,7 +19,7 @@ entry:
; GCN: s_load_dword s
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_load_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) {
entry:
%ld = load <2 x i16>, <2 x i16> addrspace(2)* %in
store <2 x i16> %ld, <2 x i16> addrspace(1)* %out
@@ -31,7 +31,7 @@ entry:
; EG-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4, #1
-define void @constant_load_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_load_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(2)* %in
store <3 x i16> %ld, <3 x i16> addrspace(1)* %out
@@ -42,7 +42,7 @@ entry:
; GCN: s_load_dwordx2
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_load_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) {
entry:
%ld = load <4 x i16>, <4 x i16> addrspace(2)* %in
store <4 x i16> %ld, <4 x i16> addrspace(1)* %out
@@ -53,7 +53,7 @@ entry:
; GCN: s_load_dwordx4
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_load_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) {
entry:
%ld = load <8 x i16>, <8 x i16> addrspace(2)* %in
store <8 x i16> %ld, <8 x i16> addrspace(1)* %out
@@ -65,7 +65,7 @@ entry:
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @constant_load_v16i16(<16 x i16> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_load_v16i16(<16 x i16> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) {
entry:
%ld = load <16 x i16>, <16 x i16> addrspace(2)* %in
store <16 x i16> %ld, <16 x i16> addrspace(1)* %out
@@ -80,7 +80,7 @@ entry:
; GCN-HSA: flat_store_dword
; EG: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}, 0, #1
-define void @constant_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
%a = load i16, i16 addrspace(2)* %in
%ext = zext i16 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -97,7 +97,7 @@ define void @constant_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(
; EG: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EG: 16
-define void @constant_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
%a = load i16, i16 addrspace(2)* %in
%ext = sext i16 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -109,7 +109,7 @@ define void @constant_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(
; GCN-HSA: flat_load_ushort
; EG: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}, 0, #1
-define void @constant_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(2)* %in
%ext = zext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -123,7 +123,7 @@ define void @constant_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
; EG: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EG: 16
-define void @constant_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(2)* %in
%ext = sext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -140,7 +140,7 @@ define void @constant_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], literal
; EG: 16
; EG: 16
-define void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(2)* %in
%ext = zext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -160,7 +160,7 @@ define void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{PV\.[XYZW]}}, 0.0, literal
; EG-DAG: 16
; EG-DAG: 16
-define void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(2)* %in
%ext = sext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -183,7 +183,7 @@ define void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{T[0-9]\.[XYZW]}}, literal
; EG-DAG: 65535
; EG-DAG: 65535
-define void @constant_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(2)* %in
%ext = zext <3 x i16> %ld to <3 x i32>
@@ -204,7 +204,7 @@ entry:
; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal
; EG-DAG: 16
; EG-DAG: 16
-define void @constant_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
+define amdgpu_kernel void @constant_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(2)* %in
%ext = sext <3 x i16> %ld to <3 x i32>
@@ -229,7 +229,7 @@ entry:
; EG-DAG: AND_INT {{[* ]*}}[[ST]].Z, {{T[0-9]\.[XYZW]}}, literal
; EG-DAG: 65535
; EG-DAG: 65535
-define void @constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(2)* %in
%ext = zext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -254,7 +254,7 @@ define void @constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; EG-DAG: 16
; EG-DAG: 16
; EG-DAG: 16
-define void @constant_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(2)* %in
%ext = sext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -288,7 +288,7 @@ define void @constant_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; EG-DAG: 65535
; EG-DAG: 65535
; EG-DAG: 65535
-define void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(2)* %in
%ext = zext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -322,7 +322,7 @@ define void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
; EG-DAG: 16
; EG-DAG: 16
; EG-DAG: 16
-define void @constant_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(2)* %in
%ext = sext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -337,7 +337,7 @@ define void @constant_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
; v16i16 is naturally 32 byte aligned
; EG-DAG: VTX_READ_128 [[DST_HI:T[0-9]+\.XYZW]], {{T[0-9]+.[XYZW]}}, 0, #1
; EG-DAG: VTX_READ_128 [[DST_LO:T[0-9]+\.XYZW]], {{T[0-9]+.[XYZW]}}, 16, #1
-define void @constant_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(2)* %in
%ext = zext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -352,7 +352,7 @@ define void @constant_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <
; v16i16 is naturally 32 byte aligned
; EG-DAG: VTX_READ_128 [[DST_HI:T[0-9]+\.XYZW]], {{T[0-9]+\.[XYZW]}}, 0, #1
; EG-DAG: VTX_READ_128 [[DST_LO:T[0-9]+\.XYZW]], {{T[0-9]+\.[XYZW]}}, 16, #1
-define void @constant_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(2)* %in
%ext = sext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -369,7 +369,7 @@ define void @constant_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 16, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 32, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 48, #1
-define void @constant_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(2)* %in
%ext = zext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -385,7 +385,7 @@ define void @constant_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 16, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 32, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 48, #1
-define void @constant_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(2)* %in
%ext = sext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -404,7 +404,7 @@ define void @constant_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 80, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 96, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 112, #1
-define void @constant_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(2)* %in
%ext = zext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -421,7 +421,7 @@ define void @constant_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 80, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 96, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+\.[XYZW]}}, 112, #1
-define void @constant_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(2)* %in
%ext = sext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -438,7 +438,7 @@ define void @constant_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: MOV {{.*}}, 0.0
-define void @constant_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
%a = load i16, i16 addrspace(2)* %in
%ext = zext i16 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -464,7 +464,7 @@ define void @constant_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: These could be expanded earlier using ASHR 15
; EG: 31
-define void @constant_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
%a = load i16, i16 addrspace(2)* %in
%ext = sext i16 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -475,7 +475,7 @@ define void @constant_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: MOV {{.*}}, 0.0
-define void @constant_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(2)* %in
%ext = zext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -488,7 +488,7 @@ define void @constant_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: These could be expanded earlier using ASHR 15
; EG: 31
-define void @constant_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(2)* %in
%ext = sext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -498,7 +498,7 @@ define void @constant_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; FUNC-LABEL: {{^}}constant_zextload_v2i16_to_v2i64:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(2)* %in
%ext = zext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -508,7 +508,7 @@ define void @constant_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
; FUNC-LABEL: {{^}}constant_sextload_v2i16_to_v2i64:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(2)* %in
%ext = sext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -518,7 +518,7 @@ define void @constant_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
; FUNC-LABEL: {{^}}constant_zextload_v4i16_to_v4i64:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(2)* %in
%ext = zext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -528,7 +528,7 @@ define void @constant_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
; FUNC-LABEL: {{^}}constant_sextload_v4i16_to_v4i64:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(2)* %in
%ext = sext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -538,7 +538,7 @@ define void @constant_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
; FUNC-LABEL: {{^}}constant_zextload_v8i16_to_v8i64:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(2)* %in
%ext = zext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -548,7 +548,7 @@ define void @constant_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
; FUNC-LABEL: {{^}}constant_sextload_v8i16_to_v8i64:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(2)* %in
%ext = sext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -559,7 +559,7 @@ define void @constant_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @constant_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(2)* %in
%ext = zext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -570,7 +570,7 @@ define void @constant_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @constant_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(2)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(2)* %in
%ext = sext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -583,7 +583,7 @@ define void @constant_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
-define void @constant_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(2)* %in
%ext = zext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -596,7 +596,7 @@ define void @constant_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
-define void @constant_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(2)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(2)* %in
%ext = sext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -606,7 +606,7 @@ define void @constant_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <
; These trigger undefined register machine verifier errors
; ; XFUNC-LABEL: {{^}}constant_zextload_v64i16_to_v64i64:
-; define void @constant_zextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
+; define amdgpu_kernel void @constant_zextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
; %load = load <64 x i16>, <64 x i16> addrspace(2)* %in
; %ext = zext <64 x i16> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -614,7 +614,7 @@ define void @constant_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <
; }
; ; XFUNC-LABEL: {{^}}constant_sextload_v64i16_to_v64i64:
-; define void @constant_sextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
+; define amdgpu_kernel void @constant_sextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(2)* %in) #0 {
; %load = load <64 x i16>, <64 x i16> addrspace(2)* %in
; %ext = sext <64 x i16> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-constant-i32.ll b/test/CodeGen/AMDGPU/load-constant-i32.ll
index d1ff1c706c40..7370d45ca6b9 100644
--- a/test/CodeGen/AMDGPU/load-constant-i32.ll
+++ b/test/CodeGen/AMDGPU/load-constant-i32.ll
@@ -7,7 +7,7 @@
; GCN: s_load_dword s{{[0-9]+}}
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-define void @constant_load_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
entry:
%ld = load i32, i32 addrspace(2)* %in
store i32 %ld, i32 addrspace(1)* %out
@@ -18,7 +18,7 @@ entry:
; GCN: s_load_dwordx2
; EG: VTX_READ_64
-define void @constant_load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(2)* %in) #0 {
entry:
%ld = load <2 x i32>, <2 x i32> addrspace(2)* %in
store <2 x i32> %ld, <2 x i32> addrspace(1)* %out
@@ -29,7 +29,7 @@ entry:
; GCN: s_load_dwordx4
; EG: VTX_READ_128
-define void @constant_load_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(2)* %in) #0 {
entry:
%ld = load <3 x i32>, <3 x i32> addrspace(2)* %in
store <3 x i32> %ld, <3 x i32> addrspace(1)* %out
@@ -40,7 +40,7 @@ entry:
; GCN: s_load_dwordx4
; EG: VTX_READ_128
-define void @constant_load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(2)* %in) #0 {
entry:
%ld = load <4 x i32>, <4 x i32> addrspace(2)* %in
store <4 x i32> %ld, <4 x i32> addrspace(1)* %out
@@ -52,7 +52,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @constant_load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(2)* %in) #0 {
entry:
%ld = load <8 x i32>, <8 x i32> addrspace(2)* %in
store <8 x i32> %ld, <8 x i32> addrspace(1)* %out
@@ -66,7 +66,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @constant_load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(2)* %in) #0 {
entry:
%ld = load <16 x i32>, <16 x i32> addrspace(2)* %in
store <16 x i32> %ld, <16 x i32> addrspace(1)* %out
@@ -81,7 +81,7 @@ entry:
; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
; EG: CF_END
; EG: VTX_READ_32
-define void @constant_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
%ld = load i32, i32 addrspace(2)* %in
%ext = zext i32 %ld to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -98,7 +98,7 @@ define void @constant_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(
; EG: VTX_READ_32
; EG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.
; EG: 31
-define void @constant_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
%ld = load i32, i32 addrspace(2)* %in
%ext = sext i32 %ld to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -108,7 +108,7 @@ define void @constant_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(
; FUNC-LABEL: {{^}}constant_zextload_v1i32_to_v1i64:
; GCN: s_load_dword
; GCN: store_dwordx2
-define void @constant_zextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(2)* %in) #0 {
%ld = load <1 x i32>, <1 x i32> addrspace(2)* %in
%ext = zext <1 x i32> %ld to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -119,7 +119,7 @@ define void @constant_zextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; GCN: s_load_dword s[[LO:[0-9]+]]
; GCN: s_ashr_i32 s[[HI:[0-9]+]], s[[LO]], 31
; GCN: store_dwordx2
-define void @constant_sextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(2)* %in) #0 {
%ld = load <1 x i32>, <1 x i32> addrspace(2)* %in
%ext = sext <1 x i32> %ld to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -129,7 +129,7 @@ define void @constant_sextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; FUNC-LABEL: {{^}}constant_zextload_v2i32_to_v2i64:
; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
; GCN: store_dwordx4
-define void @constant_zextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(2)* %in) #0 {
%ld = load <2 x i32>, <2 x i32> addrspace(2)* %in
%ext = zext <2 x i32> %ld to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -143,7 +143,7 @@ define void @constant_zextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
; GCN-DAG: s_ashr_i32
; GCN: store_dwordx4
-define void @constant_sextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(2)* %in) #0 {
%ld = load <2 x i32>, <2 x i32> addrspace(2)* %in
%ext = sext <2 x i32> %ld to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -155,7 +155,7 @@ define void @constant_sextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
; GCN: store_dwordx4
; GCN: store_dwordx4
-define void @constant_zextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(2)* %in) #0 {
%ld = load <4 x i32>, <4 x i32> addrspace(2)* %in
%ext = zext <4 x i32> %ld to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -172,7 +172,7 @@ define void @constant_zextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
; GCN: store_dwordx4
; GCN: store_dwordx4
-define void @constant_sextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(2)* %in) #0 {
%ld = load <4 x i32>, <4 x i32> addrspace(2)* %in
%ext = sext <4 x i32> %ld to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -191,7 +191,7 @@ define void @constant_sextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-SA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @constant_zextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(2)* %in) #0 {
%ld = load <8 x i32>, <8 x i32> addrspace(2)* %in
%ext = zext <8 x i32> %ld to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -219,7 +219,7 @@ define void @constant_zextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @constant_sextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(2)* %in) #0 {
%ld = load <8 x i32>, <8 x i32> addrspace(2)* %in
%ext = sext <8 x i32> %ld to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -240,7 +240,7 @@ define void @constant_sextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
; GCN: store_dwordx4
; GCN: store_dwordx4
; GCN: store_dwordx4
-define void @constant_sextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(2)* %in) #0 {
%ld = load <16 x i32>, <16 x i32> addrspace(2)* %in
%ext = sext <16 x i32> %ld to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -267,7 +267,7 @@ define void @constant_sextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <
; GCN-HSA: flat_store_dwordx4
; GCN-HSA: flat_store_dwordx4
; GCN-HSA: flat_store_dwordx4
-define void @constant_zextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(2)* %in) #0 {
%ld = load <16 x i32>, <16 x i32> addrspace(2)* %in
%ext = zext <16 x i32> %ld to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -319,7 +319,7 @@ define void @constant_zextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @constant_sextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(2)* %in) #0 {
%ld = load <32 x i32>, <32 x i32> addrspace(2)* %in
%ext = sext <32 x i32> %ld to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -370,7 +370,7 @@ define void @constant_sextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @constant_zextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(2)* %in) #0 {
%ld = load <32 x i32>, <32 x i32> addrspace(2)* %in
%ext = zext <32 x i32> %ld to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-constant-i64.ll b/test/CodeGen/AMDGPU/load-constant-i64.ll
index 0d071a10b49a..14e50ea4c3ca 100644
--- a/test/CodeGen/AMDGPU/load-constant-i64.ll
+++ b/test/CodeGen/AMDGPU/load-constant-i64.ll
@@ -7,7 +7,7 @@
; FUNC-LABEL: {{^}}constant_load_i64:
; GCN: s_load_dwordx2 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
; EG: VTX_READ_64
-define void @constant_load_i64(i64 addrspace(1)* %out, i64 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_i64(i64 addrspace(1)* %out, i64 addrspace(2)* %in) #0 {
%ld = load i64, i64 addrspace(2)* %in
store i64 %ld, i64 addrspace(1)* %out
ret void
@@ -17,7 +17,7 @@ define void @constant_load_i64(i64 addrspace(1)* %out, i64 addrspace(2)* %in) #0
; GCN: s_load_dwordx4
; EG: VTX_READ_128
-define void @constant_load_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(2)* %in) #0 {
entry:
%ld = load <2 x i64>, <2 x i64> addrspace(2)* %in
store <2 x i64> %ld, <2 x i64> addrspace(1)* %out
@@ -29,7 +29,7 @@ entry:
; EG-DAG: VTX_READ_128
; EG-DAG: VTX_READ_128
-define void @constant_load_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(2)* %in) #0 {
entry:
%ld = load <3 x i64>, <3 x i64> addrspace(2)* %in
store <3 x i64> %ld, <3 x i64> addrspace(1)* %out
@@ -41,7 +41,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @constant_load_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(2)* %in) #0 {
entry:
%ld = load <4 x i64>, <4 x i64> addrspace(2)* %in
store <4 x i64> %ld, <4 x i64> addrspace(1)* %out
@@ -55,7 +55,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @constant_load_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(2)* %in) #0 {
entry:
%ld = load <8 x i64>, <8 x i64> addrspace(2)* %in
store <8 x i64> %ld, <8 x i64> addrspace(1)* %out
@@ -74,7 +74,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @constant_load_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(2)* %in) #0 {
entry:
%ld = load <16 x i64>, <16 x i64> addrspace(2)* %in
store <16 x i64> %ld, <16 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-constant-i8.ll b/test/CodeGen/AMDGPU/load-constant-i8.ll
index 9fdc4ebfd854..6e56b9f9b6d6 100644
--- a/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -10,7 +10,7 @@
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; TODO: NOT AND
-define void @constant_load_i8(i8 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_i8(i8 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
entry:
%ld = load i8, i8 addrspace(2)* %in
store i8 %ld, i8 addrspace(1)* %out
@@ -22,7 +22,7 @@ entry:
; GCN-HSA: flat_load_ushort v
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <2 x i8>, <2 x i8> addrspace(2)* %in
store <2 x i8> %ld, <2 x i8> addrspace(1)* %out
@@ -33,7 +33,7 @@ entry:
; GCN: s_load_dword s
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(2)* %in
store <3 x i8> %ld, <3 x i8> addrspace(1)* %out
@@ -44,7 +44,7 @@ entry:
; GCN: s_load_dword s
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <4 x i8>, <4 x i8> addrspace(2)* %in
store <4 x i8> %ld, <4 x i8> addrspace(1)* %out
@@ -55,7 +55,7 @@ entry:
; GCN: s_load_dwordx2
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <8 x i8>, <8 x i8> addrspace(2)* %in
store <8 x i8> %ld, <8 x i8> addrspace(1)* %out
@@ -66,7 +66,7 @@ entry:
; GCN: s_load_dwordx4
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_load_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_load_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <16 x i8>, <16 x i8> addrspace(2)* %in
store <16 x i8> %ld, <16 x i8> addrspace(1)* %out
@@ -78,7 +78,7 @@ entry:
; GCN-HSA: flat_load_ubyte
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
%a = load i8, i8 addrspace(2)* %in
%ext = zext i8 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -92,7 +92,7 @@ define void @constant_zextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(2)
; EG: VTX_READ_8 [[DST:T[0-9]+\.X]], T{{[0-9]+}}.X, 0, #1
; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EG: 8
-define void @constant_sextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
%ld = load i8, i8 addrspace(2)* %in
%ext = sext i8 %ld to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -102,7 +102,7 @@ define void @constant_sextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(2)
; FUNC-LABEL: {{^}}constant_zextload_v1i8_to_v1i32:
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(2)* %in
%ext = zext <1 x i8> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -114,7 +114,7 @@ define void @constant_zextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
; EG: VTX_READ_8 [[DST:T[0-9]+\.X]], T{{[0-9]+}}.X, 0, #1
; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EG: 8
-define void @constant_sextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(2)* %in
%ext = sext <1 x i8> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -129,7 +129,7 @@ define void @constant_sextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
; TODO: This should use DST, but for some there are redundant MOVs
; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, {{.*}}literal
; EG: 8
-define void @constant_zextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(2)* %in
%ext = zext <2 x i8> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -150,7 +150,7 @@ define void @constant_zextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_sextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(2)* %in
%ext = sext <2 x i8> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -170,7 +170,7 @@ define void @constant_sextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, {{.*}}literal
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_zextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(2)* %in
%ext = zext <3 x i8> %ld to <3 x i32>
@@ -193,7 +193,7 @@ entry:
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_sextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(2)* %in
%ext = sext <3 x i8> %ld to <3 x i32>
@@ -214,7 +214,7 @@ entry:
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_zextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(2)* %in
%ext = zext <4 x i8> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -236,7 +236,7 @@ define void @constant_zextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_sextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(2)* %in
%ext = sext <4 x i8> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -264,7 +264,7 @@ define void @constant_sextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_zextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(2)* %in
%ext = zext <8 x i8> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -294,7 +294,7 @@ define void @constant_zextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_sextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(2)* %in
%ext = sext <8 x i8> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -335,7 +335,7 @@ define void @constant_sextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_zextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(2)* %in
%ext = zext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -378,7 +378,7 @@ define void @constant_zextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <1
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_sextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(2)* %in
%ext = sext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -450,7 +450,7 @@ define void @constant_sextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <1
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_zextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(2)* %in
%ext = zext <32 x i8> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -526,7 +526,7 @@ define void @constant_zextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <3
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @constant_sextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(2)* %in
%ext = sext <32 x i8> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -539,7 +539,7 @@ define void @constant_sextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <3
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 16, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 32, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 48, #1
-define void @constant_zextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
%load = load <64 x i8>, <64 x i8> addrspace(2)* %in
%ext = zext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -552,7 +552,7 @@ define void @constant_zextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <6
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 16, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 32, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 48, #1
-define void @constant_sextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
%load = load <64 x i8>, <64 x i8> addrspace(2)* %in
%ext = sext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -570,7 +570,7 @@ define void @constant_sextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <6
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: MOV {{.*}}, 0.0
-define void @constant_zextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
%a = load i8, i8 addrspace(2)* %in
%ext = zext i8 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -589,7 +589,7 @@ define void @constant_zextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(2)
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: Why not 7 ?
; EG: 31
-define void @constant_sextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
%a = load i8, i8 addrspace(2)* %in
%ext = sext i8 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -600,7 +600,7 @@ define void @constant_sextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(2)
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: MOV {{.*}}, 0.0
-define void @constant_zextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(2)* %in
%ext = zext <1 x i8> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -613,7 +613,7 @@ define void @constant_zextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: Why not 7 ?
; EG: 31
-define void @constant_sextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(2)* %in
%ext = sext <1 x i8> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -623,7 +623,7 @@ define void @constant_sextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; FUNC-LABEL: {{^}}constant_zextload_v2i8_to_v2i64:
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(2)* %in
%ext = zext <2 x i8> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -633,7 +633,7 @@ define void @constant_zextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
; FUNC-LABEL: {{^}}constant_sextload_v2i8_to_v2i64:
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(2)* %in
%ext = sext <2 x i8> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -643,7 +643,7 @@ define void @constant_sextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x
; FUNC-LABEL: {{^}}constant_zextload_v4i8_to_v4i64:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(2)* %in
%ext = zext <4 x i8> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -653,7 +653,7 @@ define void @constant_zextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
; FUNC-LABEL: {{^}}constant_sextload_v4i8_to_v4i64:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(2)* %in
%ext = sext <4 x i8> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -663,7 +663,7 @@ define void @constant_sextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x
; FUNC-LABEL: {{^}}constant_zextload_v8i8_to_v8i64:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(2)* %in
%ext = zext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -673,7 +673,7 @@ define void @constant_zextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
; FUNC-LABEL: {{^}}constant_sextload_v8i8_to_v8i64:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(2)* %in
%ext = sext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -683,7 +683,7 @@ define void @constant_sextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x
; FUNC-LABEL: {{^}}constant_zextload_v16i8_to_v16i64:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(2)* %in
%ext = zext <16 x i8> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -693,7 +693,7 @@ define void @constant_zextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <1
; FUNC-LABEL: {{^}}constant_sextload_v16i8_to_v16i64:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(2)* %in
%ext = sext <16 x i8> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -704,7 +704,7 @@ define void @constant_sextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @constant_zextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(2)* %in
%ext = zext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -715,7 +715,7 @@ define void @constant_zextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <3
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @constant_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(2)* %in
%ext = sext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -723,7 +723,7 @@ define void @constant_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <3
}
; XFUNC-LABEL: {{^}}constant_zextload_v64i8_to_v64i64:
-; define void @constant_zextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
+; define amdgpu_kernel void @constant_zextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(2)* %in
; %ext = zext <64 x i8> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -731,7 +731,7 @@ define void @constant_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <3
; }
; XFUNC-LABEL: {{^}}constant_sextload_v64i8_to_v64i64:
-; define void @constant_sextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
+; define amdgpu_kernel void @constant_sextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(2)* %in
; %ext = sext <64 x i8> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -744,7 +744,7 @@ define void @constant_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <3
; GCN-HSA: flat_load_ubyte v[[VAL:[0-9]+]],
; GCN-HSA: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, v[[VAL]]
-define void @constant_zextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
%a = load i8, i8 addrspace(2)* %in
%ext = zext i8 %a to i16
store i16 %ext, i16 addrspace(1)* %out
@@ -759,7 +759,7 @@ define void @constant_zextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(2)
; GCN-HSA: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, v[[VAL]]
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_sextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(2)* %in) #0 {
%a = load i8, i8 addrspace(2)* %in
%ext = sext i8 %a to i16
store i16 %ext, i16 addrspace(1)* %out
@@ -767,7 +767,7 @@ define void @constant_sextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(2)
}
; FUNC-LABEL: {{^}}constant_zextload_v1i8_to_v1i16:
-define void @constant_zextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(2)* %in
%ext = zext <1 x i8> %load to <1 x i16>
store <1 x i16> %ext, <1 x i16> addrspace(1)* %out
@@ -778,7 +778,7 @@ define void @constant_zextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @constant_sextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(2)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(2)* %in
%ext = sext <1 x i8> %load to <1 x i16>
store <1 x i16> %ext, <1 x i16> addrspace(1)* %out
@@ -788,7 +788,7 @@ define void @constant_sextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x
; FUNC-LABEL: {{^}}constant_zextload_v2i8_to_v2i16:
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(2)* %in
%ext = zext <2 x i8> %load to <2 x i16>
store <2 x i16> %ext, <2 x i16> addrspace(1)* %out
@@ -800,7 +800,7 @@ define void @constant_zextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @constant_sextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(2)* %in
%ext = sext <2 x i8> %load to <2 x i16>
store <2 x i16> %ext, <2 x i16> addrspace(1)* %out
@@ -810,7 +810,7 @@ define void @constant_sextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x
; FUNC-LABEL: {{^}}constant_zextload_v4i8_to_v4i16:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(2)* %in
%ext = zext <4 x i8> %load to <4 x i16>
store <4 x i16> %ext, <4 x i16> addrspace(1)* %out
@@ -824,7 +824,7 @@ define void @constant_zextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @constant_sextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(2)* %in
%ext = sext <4 x i8> %load to <4 x i16>
store <4 x i16> %ext, <4 x i16> addrspace(1)* %out
@@ -834,7 +834,7 @@ define void @constant_sextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x
; FUNC-LABEL: {{^}}constant_zextload_v8i8_to_v8i16:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(2)* %in
%ext = zext <8 x i8> %load to <8 x i16>
store <8 x i16> %ext, <8 x i16> addrspace(1)* %out
@@ -853,7 +853,7 @@ define void @constant_zextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @constant_sextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(2)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(2)* %in
%ext = sext <8 x i8> %load to <8 x i16>
store <8 x i16> %ext, <8 x i16> addrspace(1)* %out
@@ -863,7 +863,7 @@ define void @constant_sextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x
; FUNC-LABEL: {{^}}constant_zextload_v16i8_to_v16i16:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @constant_zextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(2)* %in
%ext = zext <16 x i8> %load to <16 x i16>
store <16 x i16> %ext, <16 x i16> addrspace(1)* %out
@@ -889,7 +889,7 @@ define void @constant_zextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <1
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @constant_sextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(2)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(2)* %in
%ext = sext <16 x i8> %load to <16 x i16>
store <16 x i16> %ext, <16 x i16> addrspace(1)* %out
@@ -900,7 +900,7 @@ define void @constant_sextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @constant_zextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_zextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(2)* %in
%ext = zext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, <32 x i16> addrspace(1)* %out
@@ -943,7 +943,7 @@ define void @constant_zextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <3
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @constant_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
+define amdgpu_kernel void @constant_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(2)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(2)* %in
%ext = sext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, <32 x i16> addrspace(1)* %out
@@ -951,7 +951,7 @@ define void @constant_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <3
}
; XFUNC-LABEL: {{^}}constant_zextload_v64i8_to_v64i16:
-; define void @constant_zextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
+; define amdgpu_kernel void @constant_zextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(2)* %in
; %ext = zext <64 x i8> %load to <64 x i16>
; store <64 x i16> %ext, <64 x i16> addrspace(1)* %out
@@ -959,7 +959,7 @@ define void @constant_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <3
; }
; XFUNC-LABEL: {{^}}constant_sextload_v64i8_to_v64i16:
-; define void @constant_sextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
+; define amdgpu_kernel void @constant_sextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(2)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(2)* %in
; %ext = sext <64 x i8> %load to <64 x i16>
; store <64 x i16> %ext, <64 x i16> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-f32.ll b/test/CodeGen/AMDGPU/load-global-f32.ll
index 805c0a7a39c7..bd6fea587b42 100644
--- a/test/CodeGen/AMDGPU/load-global-f32.ll
+++ b/test/CodeGen/AMDGPU/load-global-f32.ll
@@ -10,7 +10,7 @@
; GCN-HSA: flat_load_dword
; R600: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-define void @global_load_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%tmp0 = load float, float addrspace(1)* %in
store float %tmp0, float addrspace(1)* %out
@@ -22,7 +22,7 @@ entry:
; GCN-HSA: flat_load_dwordx2
; R600: VTX_READ_64
-define void @global_load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
entry:
%tmp0 = load <2 x float>, <2 x float> addrspace(1)* %in
store <2 x float> %tmp0, <2 x float> addrspace(1)* %out
@@ -34,7 +34,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; R600: VTX_READ_128
-define void @global_load_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
entry:
%tmp0 = load <3 x float>, <3 x float> addrspace(1)* %in
store <3 x float> %tmp0, <3 x float> addrspace(1)* %out
@@ -46,7 +46,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; R600: VTX_READ_128
-define void @global_load_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
entry:
%tmp0 = load <4 x float>, <4 x float> addrspace(1)* %in
store <4 x float> %tmp0, <4 x float> addrspace(1)* %out
@@ -61,7 +61,7 @@ entry:
; R600: VTX_READ_128
; R600: VTX_READ_128
-define void @global_load_v8f32(<8 x float> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v8f32(<8 x float> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
entry:
%tmp0 = load <8 x float>, <8 x float> addrspace(1)* %in
store <8 x float> %tmp0, <8 x float> addrspace(1)* %out
@@ -83,7 +83,7 @@ entry:
; R600: VTX_READ_128
; R600: VTX_READ_128
; R600: VTX_READ_128
-define void @global_load_v16f32(<16 x float> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v16f32(<16 x float> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
entry:
%tmp0 = load <16 x float>, <16 x float> addrspace(1)* %in
store <16 x float> %tmp0, <16 x float> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-f64.ll b/test/CodeGen/AMDGPU/load-global-f64.ll
index dc1a9432283e..5b772e1fe5ee 100644
--- a/test/CodeGen/AMDGPU/load-global-f64.ll
+++ b/test/CodeGen/AMDGPU/load-global-f64.ll
@@ -8,7 +8,7 @@
; GCN-HSA: flat_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
; GCN-HSA: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, [[VAL]]
-define void @global_load_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%ld = load double, double addrspace(1)* %in
store double %ld, double addrspace(1)* %out
ret void
@@ -17,7 +17,7 @@ define void @global_load_f64(double addrspace(1)* %out, double addrspace(1)* %in
; FUNC-LABEL: {{^}}global_load_v2f64:
; GCN-NOHSA: buffer_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @global_load_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
entry:
%ld = load <2 x double>, <2 x double> addrspace(1)* %in
store <2 x double> %ld, <2 x double> addrspace(1)* %out
@@ -29,7 +29,7 @@ entry:
; GCN-NOHSA: buffer_load_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @global_load_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
entry:
%ld = load <3 x double>, <3 x double> addrspace(1)* %in
store <3 x double> %ld, <3 x double> addrspace(1)* %out
@@ -42,7 +42,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @global_load_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
entry:
%ld = load <4 x double>, <4 x double> addrspace(1)* %in
store <4 x double> %ld, <4 x double> addrspace(1)* %out
@@ -59,7 +59,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @global_load_v8f64(<8 x double> addrspace(1)* %out, <8 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v8f64(<8 x double> addrspace(1)* %out, <8 x double> addrspace(1)* %in) #0 {
entry:
%ld = load <8 x double>, <8 x double> addrspace(1)* %in
store <8 x double> %ld, <8 x double> addrspace(1)* %out
@@ -84,7 +84,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @global_load_v16f64(<16 x double> addrspace(1)* %out, <16 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v16f64(<16 x double> addrspace(1)* %out, <16 x double> addrspace(1)* %in) #0 {
entry:
%ld = load <16 x double>, <16 x double> addrspace(1)* %in
store <16 x double> %ld, <16 x double> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-i1.ll b/test/CodeGen/AMDGPU/load-global-i1.ll
index e2e90cac8cc1..cb3536a0c128 100644
--- a/test/CodeGen/AMDGPU/load-global-i1.ll
+++ b/test/CodeGen/AMDGPU/load-global-i1.ll
@@ -9,56 +9,56 @@
; EG: VTX_READ_8
; EG: AND_INT
-define void @global_load_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%load = load i1, i1 addrspace(1)* %in
store i1 %load, i1 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v2i1:
-define void @global_load_v2i1(<2 x i1> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v2i1(<2 x i1> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(1)* %in
store <2 x i1> %load, <2 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v3i1:
-define void @global_load_v3i1(<3 x i1> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v3i1(<3 x i1> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(1)* %in
store <3 x i1> %load, <3 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v4i1:
-define void @global_load_v4i1(<4 x i1> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v4i1(<4 x i1> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(1)* %in
store <4 x i1> %load, <4 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v8i1:
-define void @global_load_v8i1(<8 x i1> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v8i1(<8 x i1> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(1)* %in
store <8 x i1> %load, <8 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v16i1:
-define void @global_load_v16i1(<16 x i1> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v16i1(<16 x i1> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(1)* %in
store <16 x i1> %load, <16 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v32i1:
-define void @global_load_v32i1(<32 x i1> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v32i1(<32 x i1> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(1)* %in
store <32 x i1> %load, <32 x i1> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}global_load_v64i1:
-define void @global_load_v64i1(<64 x i1> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v64i1(<64 x i1> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(1)* %in
store <64 x i1> %load, <64 x i1> addrspace(1)* %out
ret void
@@ -67,7 +67,7 @@ define void @global_load_v64i1(<64 x i1> addrspace(1)* %out, <64 x i1> addrspace
; FUNC-LABEL: {{^}}global_zextload_i1_to_i32:
; GCN: buffer_load_ubyte
; GCN: buffer_store_dword
-define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%a = load i1, i1 addrspace(1)* %in
%ext = zext i1 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -81,7 +81,7 @@ define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
; EG: VTX_READ_8
; EG: BFE_INT
-define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%a = load i1, i1 addrspace(1)* %in
%ext = sext i1 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -89,7 +89,7 @@ define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}global_zextload_v1i1_to_v1i32:
-define void @global_zextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(1)* %in
%ext = zext <1 x i1> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -97,7 +97,7 @@ define void @global_zextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v1i1_to_v1i32:
-define void @global_sextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(1)* %in
%ext = sext <1 x i1> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -105,7 +105,7 @@ define void @global_sextload_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v2i1_to_v2i32:
-define void @global_zextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(1)* %in
%ext = zext <2 x i1> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -113,7 +113,7 @@ define void @global_zextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v2i1_to_v2i32:
-define void @global_sextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(1)* %in
%ext = sext <2 x i1> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -121,7 +121,7 @@ define void @global_sextload_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v3i1_to_v3i32:
-define void @global_zextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(1)* %in
%ext = zext <3 x i1> %load to <3 x i32>
store <3 x i32> %ext, <3 x i32> addrspace(1)* %out
@@ -129,7 +129,7 @@ define void @global_zextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v3i1_to_v3i32:
-define void @global_sextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(1)* %in
%ext = sext <3 x i1> %load to <3 x i32>
store <3 x i32> %ext, <3 x i32> addrspace(1)* %out
@@ -137,7 +137,7 @@ define void @global_sextload_v3i1_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v4i1_to_v4i32:
-define void @global_zextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(1)* %in
%ext = zext <4 x i1> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -145,7 +145,7 @@ define void @global_zextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v4i1_to_v4i32:
-define void @global_sextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(1)* %in
%ext = sext <4 x i1> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -153,7 +153,7 @@ define void @global_sextload_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v8i1_to_v8i32:
-define void @global_zextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(1)* %in
%ext = zext <8 x i1> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -161,7 +161,7 @@ define void @global_zextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v8i1_to_v8i32:
-define void @global_sextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(1)* %in
%ext = sext <8 x i1> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -169,7 +169,7 @@ define void @global_sextload_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v16i1_to_v16i32:
-define void @global_zextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(1)* %in
%ext = zext <16 x i1> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -177,7 +177,7 @@ define void @global_zextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16
}
; FUNC-LABEL: {{^}}global_sextload_v16i1_to_v16i32:
-define void @global_sextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(1)* %in
%ext = sext <16 x i1> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -185,7 +185,7 @@ define void @global_sextload_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16
}
; FUNC-LABEL: {{^}}global_zextload_v32i1_to_v32i32:
-define void @global_zextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(1)* %in
%ext = zext <32 x i1> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -193,7 +193,7 @@ define void @global_zextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32
}
; FUNC-LABEL: {{^}}global_sextload_v32i1_to_v32i32:
-define void @global_sextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(1)* %in
%ext = sext <32 x i1> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -201,7 +201,7 @@ define void @global_sextload_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32
}
; FUNC-LABEL: {{^}}global_zextload_v64i1_to_v64i32:
-define void @global_zextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(1)* %in
%ext = zext <64 x i1> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -209,7 +209,7 @@ define void @global_zextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64
}
; FUNC-LABEL: {{^}}global_sextload_v64i1_to_v64i32:
-define void @global_sextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(1)* %in
%ext = sext <64 x i1> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -221,7 +221,7 @@ define void @global_sextload_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; GCN-DAG: v_mov_b32_e32 {{v[0-9]+}}, 0{{$}}
; GCN-DAG: v_and_b32_e32 {{v[0-9]+}}, 1, [[LOAD]]{{$}}
; GCN: buffer_store_dwordx2
-define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%a = load i1, i1 addrspace(1)* %in
%ext = zext i1 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -233,7 +233,7 @@ define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
; GCN: v_bfe_i32 [[BFE:v[0-9]+]], {{v[0-9]+}}, 0, 1{{$}}
; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[BFE]]
; GCN: buffer_store_dwordx2
-define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%a = load i1, i1 addrspace(1)* %in
%ext = sext i1 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -241,7 +241,7 @@ define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}global_zextload_v1i1_to_v1i64:
-define void @global_zextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(1)* %in
%ext = zext <1 x i1> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -249,7 +249,7 @@ define void @global_zextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v1i1_to_v1i64:
-define void @global_sextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(1)* %in
%ext = sext <1 x i1> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -257,7 +257,7 @@ define void @global_sextload_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v2i1_to_v2i64:
-define void @global_zextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(1)* %in
%ext = zext <2 x i1> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -265,7 +265,7 @@ define void @global_zextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v2i1_to_v2i64:
-define void @global_sextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(1)* %in
%ext = sext <2 x i1> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -273,7 +273,7 @@ define void @global_sextload_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v3i1_to_v3i64:
-define void @global_zextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(1)* %in
%ext = zext <3 x i1> %load to <3 x i64>
store <3 x i64> %ext, <3 x i64> addrspace(1)* %out
@@ -281,7 +281,7 @@ define void @global_zextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v3i1_to_v3i64:
-define void @global_sextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(1)* %in
%ext = sext <3 x i1> %load to <3 x i64>
store <3 x i64> %ext, <3 x i64> addrspace(1)* %out
@@ -289,7 +289,7 @@ define void @global_sextload_v3i1_to_v3i64(<3 x i64> addrspace(1)* %out, <3 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v4i1_to_v4i64:
-define void @global_zextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(1)* %in
%ext = zext <4 x i1> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -297,7 +297,7 @@ define void @global_zextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v4i1_to_v4i64:
-define void @global_sextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(1)* %in
%ext = sext <4 x i1> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -305,7 +305,7 @@ define void @global_sextload_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v8i1_to_v8i64:
-define void @global_zextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(1)* %in
%ext = zext <8 x i1> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -313,7 +313,7 @@ define void @global_zextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1
}
; FUNC-LABEL: {{^}}global_sextload_v8i1_to_v8i64:
-define void @global_sextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(1)* %in
%ext = sext <8 x i1> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -321,7 +321,7 @@ define void @global_sextload_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1
}
; FUNC-LABEL: {{^}}global_zextload_v16i1_to_v16i64:
-define void @global_zextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(1)* %in
%ext = zext <16 x i1> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -329,7 +329,7 @@ define void @global_zextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16
}
; FUNC-LABEL: {{^}}global_sextload_v16i1_to_v16i64:
-define void @global_sextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(1)* %in
%ext = sext <16 x i1> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -337,7 +337,7 @@ define void @global_sextload_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16
}
; FUNC-LABEL: {{^}}global_zextload_v32i1_to_v32i64:
-define void @global_zextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(1)* %in
%ext = zext <32 x i1> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -345,7 +345,7 @@ define void @global_zextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32
}
; FUNC-LABEL: {{^}}global_sextload_v32i1_to_v32i64:
-define void @global_sextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(1)* %in
%ext = sext <32 x i1> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -353,7 +353,7 @@ define void @global_sextload_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32
}
; FUNC-LABEL: {{^}}global_zextload_v64i1_to_v64i64:
-define void @global_zextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(1)* %in
%ext = zext <64 x i1> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -361,7 +361,7 @@ define void @global_zextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64
}
; FUNC-LABEL: {{^}}global_sextload_v64i1_to_v64i64:
-define void @global_sextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(1)* %in
%ext = sext <64 x i1> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-i16.ll b/test/CodeGen/AMDGPU/load-global-i16.ll
index 88d6b7b99d30..dcdd1a947cd4 100644
--- a/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -11,7 +11,7 @@
; GCN-HSA: flat_load_ushort
; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_load_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @global_load_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
%ld = load i16, i16 addrspace(1)* %in
store i16 %ld, i16 addrspace(1)* %out
@@ -23,7 +23,7 @@ entry:
; GCN-HSA: flat_load_dword v
; EGCM: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_load_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
entry:
%ld = load <2 x i16>, <2 x i16> addrspace(1)* %in
store <2 x i16> %ld, <2 x i16> addrspace(1)* %out
@@ -36,7 +36,7 @@ entry:
; EGCM-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EGCM-DAG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4, #1
-define void @global_load_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_load_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(1)* %in
store <3 x i16> %ld, <3 x i16> addrspace(1)* %out
@@ -48,7 +48,7 @@ entry:
; GCN-HSA: flat_load_dwordx2
; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_load_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
entry:
%ld = load <4 x i16>, <4 x i16> addrspace(1)* %in
store <4 x i16> %ld, <4 x i16> addrspace(1)* %out
@@ -60,7 +60,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_load_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) {
entry:
%ld = load <8 x i16>, <8 x i16> addrspace(1)* %in
store <8 x i16> %ld, <8 x i16> addrspace(1)* %out
@@ -76,7 +76,7 @@ entry:
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @global_load_v16i16(<16 x i16> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_load_v16i16(<16 x i16> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) {
entry:
%ld = load <16 x i16>, <16 x i16> addrspace(1)* %in
store <16 x i16> %ld, <16 x i16> addrspace(1)* %out
@@ -91,7 +91,7 @@ entry:
; GCN-HSA: flat_store_dword
; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = zext i16 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -108,7 +108,7 @@ define void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; EGCM: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1
; EGCM: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EGCM: 16
-define void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = sext i16 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -120,7 +120,7 @@ define void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; GCN-HSA: flat_load_ushort
; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = zext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -134,7 +134,7 @@ define void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i
; EGCM: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1
; EGCM: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EGCM: 16
-define void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = sext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -148,7 +148,7 @@ define void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i
; EGCM: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
; EGCM: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], literal
; EGCM: 16
-define void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = zext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -168,7 +168,7 @@ define void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i
; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{PV.[XYZW]}}, 0.0, literal
; EGCM-DAG: 16
; EGCM-DAG: 16
-define void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = sext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -190,7 +190,7 @@ define void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i
; EGCM: 16
; EGCM: AND_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, literal
; EGCM: AND_INT {{[* ]*}}[[ST_HI]].X, [[DST_HI]], literal
-define void @global_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(1)* %in
%ext = zext <3 x i16> %ld to <3 x i32>
@@ -214,7 +214,7 @@ entry:
; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, [[DST_HI]], 0.0, literal
; EGCM-DAG: 16
; EGCM-DAG: 16
-define void @global_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @global_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(1)* %in
%ext = sext <3 x i16> %ld to <3 x i32>
@@ -237,7 +237,7 @@ entry:
; EGCM-DAG: AND_INT {{[* ]*}}[[ST]].X, {{.*}}, literal
; EGCM-DAG: AND_INT {{[* ]*}}[[ST]].Z, {{.*}}, literal
; EGCM-DAG: 16
-define void @global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = zext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -262,7 +262,7 @@ define void @global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
; EGCM-DAG: 16
; EGCM-DAG: 16
; EGCM-DAG: 16
-define void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = sext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -296,7 +296,7 @@ define void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
; EGCM-DAG: 16
; EGCM-DAG: 16
; EGCM-DAG: 16
-define void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = zext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -330,7 +330,7 @@ define void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i
; EGCM-DAG: 16
; EGCM-DAG: 16
; EGCM-DAG: 16
-define void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = sext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -346,7 +346,7 @@ define void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
-define void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = zext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -357,7 +357,7 @@ define void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
-define void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = sext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -379,7 +379,7 @@ define void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
-define void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = zext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -401,7 +401,7 @@ define void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
-define void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = sext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -435,7 +435,7 @@ define void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1
-define void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = zext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -452,7 +452,7 @@ define void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1
; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1
-define void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = sext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -469,7 +469,7 @@ define void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EGCM: MOV {{.*}}, 0.0
-define void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = zext i16 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -495,7 +495,7 @@ define void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; EGCM: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: These could be expanded earlier using ASHR 15
; EGCM: 31
-define void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = sext i16 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -506,7 +506,7 @@ define void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EGCM: MOV {{.*}}, 0.0
-define void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = zext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -519,7 +519,7 @@ define void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
; EGCM: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: These could be expanded earlier using ASHR 15
; EGCM: 31
-define void @global_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = sext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -527,7 +527,7 @@ define void @global_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
}
; FUNC-LABEL: {{^}}global_zextload_v2i16_to_v2i64:
-define void @global_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = zext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -537,7 +537,7 @@ define void @global_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}global_sextload_v2i16_to_v2i64:
; EGCM: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = sext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -547,7 +547,7 @@ define void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}global_zextload_v4i16_to_v4i64:
; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = zext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -557,7 +557,7 @@ define void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}global_sextload_v4i16_to_v4i64:
; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = sext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -567,7 +567,7 @@ define void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}global_zextload_v8i16_to_v8i64:
; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = zext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -577,7 +577,7 @@ define void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}global_sextload_v8i16_to_v8i64:
; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = sext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -588,7 +588,7 @@ define void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = zext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -599,7 +599,7 @@ define void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = sext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -612,7 +612,7 @@ define void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
-define void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = zext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -625,7 +625,7 @@ define void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
-define void @global_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = sext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -633,7 +633,7 @@ define void @global_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32
}
; ; XFUNC-LABEL: {{^}}global_zextload_v64i16_to_v64i64:
-; define void @global_zextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
+; define amdgpu_kernel void @global_zextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
; %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
; %ext = zext <64 x i16> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -641,7 +641,7 @@ define void @global_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; }
; ; XFUNC-LABEL: {{^}}global_sextload_v64i16_to_v64i64:
-; define void @global_sextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
+; define amdgpu_kernel void @global_sextload_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
; %load = load <64 x i16>, <64 x i16> addrspace(1)* %in
; %ext = sext <64 x i16> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-i32.ll b/test/CodeGen/AMDGPU/load-global-i32.ll
index e3335347a63f..5df32c1e3120 100644
--- a/test/CodeGen/AMDGPU/load-global-i32.ll
+++ b/test/CodeGen/AMDGPU/load-global-i32.ll
@@ -9,7 +9,7 @@
; GCN-HSA: flat_load_dword
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-define void @global_load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
entry:
%ld = load i32, i32 addrspace(1)* %in
store i32 %ld, i32 addrspace(1)* %out
@@ -21,7 +21,7 @@ entry:
; GCN-HSA: flat_load_dwordx2
; EG: VTX_READ_64
-define void @global_load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) #0 {
entry:
%ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
store <2 x i32> %ld, <2 x i32> addrspace(1)* %out
@@ -33,7 +33,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; EG: VTX_READ_128
-define void @global_load_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %in) #0 {
entry:
%ld = load <3 x i32>, <3 x i32> addrspace(1)* %in
store <3 x i32> %ld, <3 x i32> addrspace(1)* %out
@@ -45,7 +45,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; EG: VTX_READ_128
-define void @global_load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
entry:
%ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
store <4 x i32> %ld, <4 x i32> addrspace(1)* %out
@@ -60,7 +60,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @global_load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) #0 {
entry:
%ld = load <8 x i32>, <8 x i32> addrspace(1)* %in
store <8 x i32> %ld, <8 x i32> addrspace(1)* %out
@@ -82,7 +82,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @global_load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) #0 {
entry:
%ld = load <16 x i32>, <16 x i32> addrspace(1)* %in
store <16 x i32> %ld, <16 x i32> addrspace(1)* %out
@@ -98,7 +98,7 @@ entry:
; GCN-HSA: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]]
; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-define void @global_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%ld = load i32, i32 addrspace(1)* %in
%ext = zext i32 %ld to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -117,7 +117,7 @@ define void @global_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)
; EG: VTX_READ_32
; EG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.
; EG: 31
-define void @global_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%ld = load i32, i32 addrspace(1)* %in
%ext = sext i32 %ld to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -130,7 +130,7 @@ define void @global_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)
; GCN-HSA: flat_load_dword
; GCN-HSA: flat_store_dwordx2
-define void @global_zextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* %in) #0 {
%ld = load <1 x i32>, <1 x i32> addrspace(1)* %in
%ext = zext <1 x i32> %ld to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -143,7 +143,7 @@ define void @global_zextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
; GCN-HSA: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @global_sextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* %in) #0 {
%ld = load <1 x i32>, <1 x i32> addrspace(1)* %in
%ext = sext <1 x i32> %ld to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -156,7 +156,7 @@ define void @global_sextload_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
; GCN-HSA: flat_load_dwordx2
; GCN-HSA: flat_store_dwordx4
-define void @global_zextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) #0 {
%ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
%ext = zext <2 x i32> %ld to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -172,7 +172,7 @@ define void @global_zextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; GCN-NOHSA-DAG: buffer_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @global_sextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) #0 {
%ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
%ext = sext <2 x i32> %ld to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -187,7 +187,7 @@ define void @global_sextload_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_store_dwordx4
; GCN-HSA: flat_store_dwordx4
-define void @global_zextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
%ext = zext <4 x i32> %ld to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -208,7 +208,7 @@ define void @global_zextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @global_sextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
%ext = sext <4 x i32> %ld to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -231,7 +231,7 @@ define void @global_sextload_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-SA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @global_zextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) #0 {
%ld = load <8 x i32>, <8 x i32> addrspace(1)* %in
%ext = zext <8 x i32> %ld to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -263,7 +263,7 @@ define void @global_zextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @global_sextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) #0 {
%ld = load <8 x i32>, <8 x i32> addrspace(1)* %in
%ext = sext <8 x i32> %ld to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -309,7 +309,7 @@ define void @global_sextload_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; GCN-DAG: v_ashrrev_i32
; GCN-NOHSA-DAG: buffer_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @global_sextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) #0 {
%ld = load <16 x i32>, <16 x i32> addrspace(1)* %in
%ext = sext <16 x i32> %ld to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -344,7 +344,7 @@ define void @global_sextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; GCN-HSA: flat_store_dwordx4
; GCN-HSA: flat_store_dwordx4
; GCN-HSA: flat_store_dwordx4
-define void @global_zextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) #0 {
%ld = load <16 x i32>, <16 x i32> addrspace(1)* %in
%ext = zext <16 x i32> %ld to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -444,7 +444,7 @@ define void @global_zextload_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; GCN-HSA: flat_store_dwordx4
; GCN-HSA: flat_store_dwordx4
-define void @global_sextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* %in) #0 {
%ld = load <32 x i32>, <32 x i32> addrspace(1)* %in
%ext = sext <32 x i32> %ld to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -511,7 +511,7 @@ define void @global_sextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
; GCN-HSA-DAG: flat_store_dwordx4
-define void @global_zextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* %in) #0 {
%ld = load <32 x i32>, <32 x i32> addrspace(1)* %in
%ext = zext <32 x i32> %ld to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-i64.ll b/test/CodeGen/AMDGPU/load-global-i64.ll
index dd4ce2c10ebd..de16b6c8997e 100644
--- a/test/CodeGen/AMDGPU/load-global-i64.ll
+++ b/test/CodeGen/AMDGPU/load-global-i64.ll
@@ -13,7 +13,7 @@
; GCN-HSA: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, [[VAL]]
; EG: VTX_READ_64
-define void @global_load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%ld = load i64, i64 addrspace(1)* %in
store i64 %ld, i64 addrspace(1)* %out
ret void
@@ -24,7 +24,7 @@ define void @global_load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
; GCN-HSA: flat_load_dwordx4
; EG: VTX_READ_128
-define void @global_load_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) #0 {
entry:
%ld = load <2 x i64>, <2 x i64> addrspace(1)* %in
store <2 x i64> %ld, <2 x i64> addrspace(1)* %out
@@ -40,7 +40,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @global_load_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %in) #0 {
entry:
%ld = load <3 x i64>, <3 x i64> addrspace(1)* %in
store <3 x i64> %ld, <3 x i64> addrspace(1)* %out
@@ -56,7 +56,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @global_load_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
entry:
%ld = load <4 x i64>, <4 x i64> addrspace(1)* %in
store <4 x i64> %ld, <4 x i64> addrspace(1)* %out
@@ -78,7 +78,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @global_load_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %in) #0 {
entry:
%ld = load <8 x i64>, <8 x i64> addrspace(1)* %in
store <8 x i64> %ld, <8 x i64> addrspace(1)* %out
@@ -112,7 +112,7 @@ entry:
; EG: VTX_READ_128
; EG: VTX_READ_128
; EG: VTX_READ_128
-define void @global_load_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %in) #0 {
entry:
%ld = load <16 x i64>, <16 x i64> addrspace(1)* %in
store <16 x i64> %ld, <16 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-global-i8.ll b/test/CodeGen/AMDGPU/load-global-i8.ll
index c880700f347b..71adf090532f 100644
--- a/test/CodeGen/AMDGPU/load-global-i8.ll
+++ b/test/CodeGen/AMDGPU/load-global-i8.ll
@@ -11,7 +11,7 @@
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; TODO: NOT AND
-define void @global_load_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
entry:
%ld = load i8, i8 addrspace(1)* %in
store i8 %ld, i8 addrspace(1)* %out
@@ -23,7 +23,7 @@ entry:
; GCN-HSA: flat_load_ushort v
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <2 x i8>, <2 x i8> addrspace(1)* %in
store <2 x i8> %ld, <2 x i8> addrspace(1)* %out
@@ -35,7 +35,7 @@ entry:
; GCN-HSA: flat_load_dword v
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(1)* %in
store <3 x i8> %ld, <3 x i8> addrspace(1)* %out
@@ -47,7 +47,7 @@ entry:
; GCN-HSA: flat_load_dword v
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <4 x i8>, <4 x i8> addrspace(1)* %in
store <4 x i8> %ld, <4 x i8> addrspace(1)* %out
@@ -59,7 +59,7 @@ entry:
; GCN-HSA: flat_load_dwordx2
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <8 x i8>, <8 x i8> addrspace(1)* %in
store <8 x i8> %ld, <8 x i8> addrspace(1)* %out
@@ -72,7 +72,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_load_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_load_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <16 x i8>, <16 x i8> addrspace(1)* %in
store <16 x i8> %ld, <16 x i8> addrspace(1)* %out
@@ -84,7 +84,7 @@ entry:
; GCN-HSA: flat_load_ubyte
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%a = load i8, i8 addrspace(1)* %in
%ext = zext i8 %a to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -98,7 +98,7 @@ define void @global_zextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)*
; EG: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EG: 8
-define void @global_sextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%ld = load i8, i8 addrspace(1)* %in
%ext = sext i8 %ld to i32
store i32 %ext, i32 addrspace(1)* %out
@@ -108,7 +108,7 @@ define void @global_sextload_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)*
; FUNC-LABEL: {{^}}global_zextload_v1i8_to_v1i32:
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(1)* %in
%ext = zext <1 x i8> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -120,7 +120,7 @@ define void @global_zextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8
; EG: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
; EG: 8
-define void @global_sextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(1)* %in
%ext = sext <1 x i8> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
@@ -135,7 +135,7 @@ define void @global_sextload_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8
; TODO: These should use DST, but for some there are redundant MOVs
; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
; EG-DAG: 8
-define void @global_zextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in
%ext = zext <2 x i8> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -152,7 +152,7 @@ define void @global_zextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: 8
; EG-DAG: 8
-define void @global_sextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in
%ext = sext <2 x i8> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
@@ -174,7 +174,7 @@ define void @global_sextload_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8
; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, {{.*}}literal
; EG-DAG: 8
; EG-DAG: 8
-define void @global_zextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(1)* %in
%ext = zext <3 x i8> %ld to <3 x i32>
@@ -207,7 +207,7 @@ entry:
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_sextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v3i8_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(1)* %in
%ext = sext <3 x i8> %ld to <3 x i32>
@@ -227,7 +227,7 @@ entry:
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_zextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in
%ext = zext <4 x i8> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -248,7 +248,7 @@ define void @global_zextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_sextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in
%ext = sext <4 x i8> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -273,7 +273,7 @@ define void @global_sextload_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_zextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in
%ext = zext <8 x i8> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -300,7 +300,7 @@ define void @global_zextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_sextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in
%ext = sext <8 x i8> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
@@ -341,7 +341,7 @@ define void @global_sextload_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_zextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(1)* %in
%ext = zext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -384,7 +384,7 @@ define void @global_zextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_sextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(1)* %in
%ext = sext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
@@ -456,7 +456,7 @@ define void @global_sextload_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_zextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(1)* %in
%ext = zext <32 x i8> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -532,7 +532,7 @@ define void @global_zextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; EG-DAG: 8
; EG-DAG: 8
; EG-DAG: 8
-define void @global_sextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(1)* %in
%ext = sext <32 x i8> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
@@ -545,7 +545,7 @@ define void @global_sextload_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 16, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 32, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 48, #1
-define void @global_zextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
%load = load <64 x i8>, <64 x i8> addrspace(1)* %in
%ext = zext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -558,7 +558,7 @@ define void @global_zextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 16, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 32, #1
; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, T{{[0-9]+}}.X, 48, #1
-define void @global_sextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
%load = load <64 x i8>, <64 x i8> addrspace(1)* %in
%ext = sext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
@@ -576,7 +576,7 @@ define void @global_sextload_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: MOV {{.*}}, 0.0
-define void @global_zextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%a = load i8, i8 addrspace(1)* %in
%ext = zext i8 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -595,7 +595,7 @@ define void @global_zextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)*
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: Why not 7 ?
; EG: 31
-define void @global_sextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%a = load i8, i8 addrspace(1)* %in
%ext = sext i8 %a to i64
store i64 %ext, i64 addrspace(1)* %out
@@ -606,7 +606,7 @@ define void @global_sextload_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)*
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: MOV {{.*}}, 0.0
-define void @global_zextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(1)* %in
%ext = zext <1 x i8> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -619,7 +619,7 @@ define void @global_zextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
; TODO: Why not 7 ?
; EG: 31
-define void @global_sextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(1)* %in
%ext = sext <1 x i8> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
@@ -629,7 +629,7 @@ define void @global_sextload_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8
; FUNC-LABEL: {{^}}global_zextload_v2i8_to_v2i64:
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in
%ext = zext <2 x i8> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -639,7 +639,7 @@ define void @global_zextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8
; FUNC-LABEL: {{^}}global_sextload_v2i8_to_v2i64:
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in
%ext = sext <2 x i8> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
@@ -649,7 +649,7 @@ define void @global_sextload_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8
; FUNC-LABEL: {{^}}global_zextload_v4i8_to_v4i64:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in
%ext = zext <4 x i8> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -659,7 +659,7 @@ define void @global_zextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8
; FUNC-LABEL: {{^}}global_sextload_v4i8_to_v4i64:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in
%ext = sext <4 x i8> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
@@ -669,7 +669,7 @@ define void @global_sextload_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8
; FUNC-LABEL: {{^}}global_zextload_v8i8_to_v8i64:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in
%ext = zext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -679,7 +679,7 @@ define void @global_zextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8
; FUNC-LABEL: {{^}}global_sextload_v8i8_to_v8i64:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in
%ext = sext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
@@ -689,7 +689,7 @@ define void @global_sextload_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8
; FUNC-LABEL: {{^}}global_zextload_v16i8_to_v16i64:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(1)* %in
%ext = zext <16 x i8> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -699,7 +699,7 @@ define void @global_zextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}global_sextload_v16i8_to_v16i64:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_sextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(1)* %in
%ext = sext <16 x i8> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
@@ -710,7 +710,7 @@ define void @global_sextload_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @global_zextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(1)* %in
%ext = zext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -721,7 +721,7 @@ define void @global_zextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @global_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(1)* %in
%ext = sext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
@@ -729,7 +729,7 @@ define void @global_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32
}
; XFUNC-LABEL: {{^}}global_zextload_v64i8_to_v64i64:
-; define void @global_zextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
+; define amdgpu_kernel void @global_zextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
; %ext = zext <64 x i8> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -737,7 +737,7 @@ define void @global_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; }
; XFUNC-LABEL: {{^}}global_sextload_v64i8_to_v64i64:
-; define void @global_sextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
+; define amdgpu_kernel void @global_sextload_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
; %ext = sext <64 x i8> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
@@ -752,7 +752,7 @@ define void @global_sextload_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; GCN-HSA: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, v[[VAL]]
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%a = load i8, i8 addrspace(1)* %in
%ext = zext i8 %a to i16
store i16 %ext, i16 addrspace(1)* %out
@@ -768,7 +768,7 @@ define void @global_zextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(1)*
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%a = load i8, i8 addrspace(1)* %in
%ext = sext i8 %a to i16
store i16 %ext, i16 addrspace(1)* %out
@@ -778,7 +778,7 @@ define void @global_sextload_i8_to_i16(i16 addrspace(1)* %out, i8 addrspace(1)*
; FUNC-LABEL: {{^}}global_zextload_v1i8_to_v1i16:
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(1)* %in
%ext = zext <1 x i8> %load to <1 x i16>
store <1 x i16> %ext, <1 x i16> addrspace(1)* %out
@@ -789,7 +789,7 @@ define void @global_zextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8
; EG: VTX_READ_8 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8> addrspace(1)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(1)* %in
%ext = sext <1 x i8> %load to <1 x i16>
store <1 x i16> %ext, <1 x i16> addrspace(1)* %out
@@ -799,7 +799,7 @@ define void @global_sextload_v1i8_to_v1i16(<1 x i16> addrspace(1)* %out, <1 x i8
; FUNC-LABEL: {{^}}global_zextload_v2i8_to_v2i16:
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in
%ext = zext <2 x i8> %load to <2 x i16>
store <2 x i16> %ext, <2 x i16> addrspace(1)* %out
@@ -811,7 +811,7 @@ define void @global_zextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(1)* %in
%ext = sext <2 x i8> %load to <2 x i16>
store <2 x i16> %ext, <2 x i16> addrspace(1)* %out
@@ -821,7 +821,7 @@ define void @global_sextload_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i8
; FUNC-LABEL: {{^}}global_zextload_v4i8_to_v4i16:
; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in
%ext = zext <4 x i8> %load to <4 x i16>
store <4 x i16> %ext, <4 x i16> addrspace(1)* %out
@@ -835,7 +835,7 @@ define void @global_zextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in
%ext = sext <4 x i8> %load to <4 x i16>
store <4 x i16> %ext, <4 x i16> addrspace(1)* %out
@@ -845,7 +845,7 @@ define void @global_sextload_v4i8_to_v4i16(<4 x i16> addrspace(1)* %out, <4 x i8
; FUNC-LABEL: {{^}}global_zextload_v8i8_to_v8i16:
; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in
%ext = zext <8 x i8> %load to <8 x i16>
store <8 x i16> %ext, <8 x i16> addrspace(1)* %out
@@ -863,7 +863,7 @@ define void @global_zextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8> addrspace(1)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(1)* %in
%ext = sext <8 x i8> %load to <8 x i16>
store <8 x i16> %ext, <8 x i16> addrspace(1)* %out
@@ -873,7 +873,7 @@ define void @global_sextload_v8i8_to_v8i16(<8 x i16> addrspace(1)* %out, <8 x i8
; FUNC-LABEL: {{^}}global_zextload_v16i8_to_v16i16:
; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-define void @global_zextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(1)* %in
%ext = zext <16 x i8> %load to <16 x i16>
store <16 x i16> %ext, <16 x i16> addrspace(1)* %out
@@ -899,7 +899,7 @@ define void @global_zextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16 x i8> addrspace(1)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(1)* %in
%ext = sext <16 x i8> %load to <16 x i16>
store <16 x i16> %ext, <16 x i16> addrspace(1)* %out
@@ -910,7 +910,7 @@ define void @global_sextload_v16i8_to_v16i16(<16 x i16> addrspace(1)* %out, <16
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-define void @global_zextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_zextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(1)* %in
%ext = zext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, <32 x i16> addrspace(1)* %out
@@ -953,7 +953,7 @@ define void @global_zextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-define void @global_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32 x i8> addrspace(1)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(1)* %in
%ext = sext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, <32 x i16> addrspace(1)* %out
@@ -961,7 +961,7 @@ define void @global_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32
}
; XFUNC-LABEL: {{^}}global_zextload_v64i8_to_v64i16:
-; define void @global_zextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
+; define amdgpu_kernel void @global_zextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
; %ext = zext <64 x i8> %load to <64 x i16>
; store <64 x i16> %ext, <64 x i16> addrspace(1)* %out
@@ -969,7 +969,7 @@ define void @global_sextload_v32i8_to_v32i16(<32 x i16> addrspace(1)* %out, <32
; }
; XFUNC-LABEL: {{^}}global_sextload_v64i8_to_v64i16:
-; define void @global_sextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
+; define amdgpu_kernel void @global_sextload_v64i8_to_v64i16(<64 x i16> addrspace(1)* %out, <64 x i8> addrspace(1)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(1)* %in
; %ext = sext <64 x i8> %load to <64 x i16>
; store <64 x i16> %ext, <64 x i16> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/load-input-fold.ll b/test/CodeGen/AMDGPU/load-input-fold.ll
index b1899a45bf56..0724e09d7ad0 100644
--- a/test/CodeGen/AMDGPU/load-input-fold.ll
+++ b/test/CodeGen/AMDGPU/load-input-fold.ll
@@ -97,15 +97,6 @@ main_body:
; Function Attrs: readnone
declare float @llvm.r600.dot4(<4 x float>, <4 x float>) #1
-; Function Attrs: readonly
-declare float @fabs(float) #2
-
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #1
-
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #1
-
; Function Attrs: nounwind readonly
declare float @llvm.pow.f32(float, float) #3
diff --git a/test/CodeGen/AMDGPU/load-local-f32.ll b/test/CodeGen/AMDGPU/load-local-f32.ll
index 77b5e3cf3aed..09d7145424de 100644
--- a/test/CodeGen/AMDGPU/load-local-f32.ll
+++ b/test/CodeGen/AMDGPU/load-local-f32.ll
@@ -7,7 +7,7 @@
; GCN: ds_read_b32
; EG: LDS_READ_RET
-define void @load_f32_local(float addrspace(1)* %out, float addrspace(3)* %in) #0 {
+define amdgpu_kernel void @load_f32_local(float addrspace(1)* %out, float addrspace(3)* %in) #0 {
entry:
%tmp0 = load float, float addrspace(3)* %in
store float %tmp0, float addrspace(1)* %out
@@ -20,7 +20,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) #0 {
entry:
%tmp0 = load <2 x float>, <2 x float> addrspace(3)* %in
store <2 x float> %tmp0, <2 x float> addrspace(1)* %out
@@ -38,7 +38,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v3f32(<3 x float> addrspace(3)* %out, <3 x float> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v3f32(<3 x float> addrspace(3)* %out, <3 x float> addrspace(3)* %in) #0 {
entry:
%tmp0 = load <3 x float>, <3 x float> addrspace(3)* %in
store <3 x float> %tmp0, <3 x float> addrspace(3)* %out
@@ -52,7 +52,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v4f32(<4 x float> addrspace(3)* %out, <4 x float> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v4f32(<4 x float> addrspace(3)* %out, <4 x float> addrspace(3)* %in) #0 {
entry:
%tmp0 = load <4 x float>, <4 x float> addrspace(3)* %in
store <4 x float> %tmp0, <4 x float> addrspace(3)* %out
@@ -71,7 +71,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v8f32(<8 x float> addrspace(3)* %out, <8 x float> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v8f32(<8 x float> addrspace(3)* %out, <8 x float> addrspace(3)* %in) #0 {
entry:
%tmp0 = load <8 x float>, <8 x float> addrspace(3)* %in
store <8 x float> %tmp0, <8 x float> addrspace(3)* %out
@@ -100,7 +100,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v16f32(<16 x float> addrspace(3)* %out, <16 x float> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v16f32(<16 x float> addrspace(3)* %out, <16 x float> addrspace(3)* %in) #0 {
entry:
%tmp0 = load <16 x float>, <16 x float> addrspace(3)* %in
store <16 x float> %tmp0, <16 x float> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-local-f64.ll b/test/CodeGen/AMDGPU/load-local-f64.ll
index 27d39b7e9d7d..9ad6c087bf2e 100644
--- a/test/CodeGen/AMDGPU/load-local-f64.ll
+++ b/test/CodeGen/AMDGPU/load-local-f64.ll
@@ -9,7 +9,7 @@
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_f64(double addrspace(3)* %out, double addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_f64(double addrspace(3)* %out, double addrspace(3)* %in) #0 {
%ld = load double, double addrspace(3)* %in
store double %ld, double addrspace(3)* %out
ret void
@@ -22,7 +22,7 @@ define void @local_load_f64(double addrspace(3)* %out, double addrspace(3)* %in)
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v2f64(<2 x double> addrspace(3)* %out, <2 x double> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v2f64(<2 x double> addrspace(3)* %out, <2 x double> addrspace(3)* %in) #0 {
entry:
%ld = load <2 x double>, <2 x double> addrspace(3)* %in
store <2 x double> %ld, <2 x double> addrspace(3)* %out
@@ -39,7 +39,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v3f64(<3 x double> addrspace(3)* %out, <3 x double> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v3f64(<3 x double> addrspace(3)* %out, <3 x double> addrspace(3)* %in) #0 {
entry:
%ld = load <3 x double>, <3 x double> addrspace(3)* %in
store <3 x double> %ld, <3 x double> addrspace(3)* %out
@@ -59,7 +59,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v4f64(<4 x double> addrspace(3)* %out, <4 x double> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v4f64(<4 x double> addrspace(3)* %out, <4 x double> addrspace(3)* %in) #0 {
entry:
%ld = load <4 x double>, <4 x double> addrspace(3)* %in
store <4 x double> %ld, <4 x double> addrspace(3)* %out
@@ -88,7 +88,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v8f64(<8 x double> addrspace(3)* %out, <8 x double> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v8f64(<8 x double> addrspace(3)* %out, <8 x double> addrspace(3)* %in) #0 {
entry:
%ld = load <8 x double>, <8 x double> addrspace(3)* %in
store <8 x double> %ld, <8 x double> addrspace(3)* %out
@@ -144,7 +144,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v16f64(<16 x double> addrspace(3)* %out, <16 x double> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v16f64(<16 x double> addrspace(3)* %out, <16 x double> addrspace(3)* %in) #0 {
entry:
%ld = load <16 x double>, <16 x double> addrspace(3)* %in
store <16 x double> %ld, <16 x double> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-local-i1.ll b/test/CodeGen/AMDGPU/load-local-i1.ll
index 2eed9917b5e5..e8f134b1fb2e 100644
--- a/test/CodeGen/AMDGPU/load-local-i1.ll
+++ b/test/CodeGen/AMDGPU/load-local-i1.ll
@@ -10,56 +10,56 @@
; EG: LDS_UBYTE_READ_RET
; EG: AND_INT
; EG: LDS_BYTE_WRITE
-define void @local_load_i1(i1 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i1(i1 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
%load = load i1, i1 addrspace(3)* %in
store i1 %load, i1 addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v2i1:
-define void @local_load_v2i1(<2 x i1> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v2i1(<2 x i1> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(3)* %in
store <2 x i1> %load, <2 x i1> addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v3i1:
-define void @local_load_v3i1(<3 x i1> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v3i1(<3 x i1> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(3)* %in
store <3 x i1> %load, <3 x i1> addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v4i1:
-define void @local_load_v4i1(<4 x i1> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v4i1(<4 x i1> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(3)* %in
store <4 x i1> %load, <4 x i1> addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v8i1:
-define void @local_load_v8i1(<8 x i1> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v8i1(<8 x i1> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(3)* %in
store <8 x i1> %load, <8 x i1> addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v16i1:
-define void @local_load_v16i1(<16 x i1> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v16i1(<16 x i1> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(3)* %in
store <16 x i1> %load, <16 x i1> addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v32i1:
-define void @local_load_v32i1(<32 x i1> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v32i1(<32 x i1> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(3)* %in
store <32 x i1> %load, <32 x i1> addrspace(3)* %out
ret void
}
; FUNC-LABEL: {{^}}local_load_v64i1:
-define void @local_load_v64i1(<64 x i1> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v64i1(<64 x i1> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(3)* %in
store <64 x i1> %load, <64 x i1> addrspace(3)* %out
ret void
@@ -68,7 +68,7 @@ define void @local_load_v64i1(<64 x i1> addrspace(3)* %out, <64 x i1> addrspace(
; FUNC-LABEL: {{^}}local_zextload_i1_to_i32:
; GCN: ds_read_u8
; GCN: ds_write_b32
-define void @local_zextload_i1_to_i32(i32 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i1_to_i32(i32 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
%a = load i1, i1 addrspace(3)* %in
%ext = zext i1 %a to i32
store i32 %ext, i32 addrspace(3)* %out
@@ -82,7 +82,7 @@ define void @local_zextload_i1_to_i32(i32 addrspace(3)* %out, i1 addrspace(3)* %
; EG: LDS_UBYTE_READ_RET
; EG: BFE_INT
-define void @local_sextload_i1_to_i32(i32 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i1_to_i32(i32 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
%a = load i1, i1 addrspace(3)* %in
%ext = sext i1 %a to i32
store i32 %ext, i32 addrspace(3)* %out
@@ -90,7 +90,7 @@ define void @local_sextload_i1_to_i32(i32 addrspace(3)* %out, i1 addrspace(3)* %
}
; FUNC-LABEL: {{^}}local_zextload_v1i1_to_v1i32:
-define void @local_zextload_v1i1_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i1_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(3)* %in
%ext = zext <1 x i1> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(3)* %out
@@ -98,7 +98,7 @@ define void @local_zextload_v1i1_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v1i1_to_v1i32:
-define void @local_sextload_v1i1_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i1_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(3)* %in
%ext = sext <1 x i1> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(3)* %out
@@ -106,7 +106,7 @@ define void @local_sextload_v1i1_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v2i1_to_v2i32:
-define void @local_zextload_v2i1_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i1_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(3)* %in
%ext = zext <2 x i1> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(3)* %out
@@ -114,7 +114,7 @@ define void @local_zextload_v2i1_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v2i1_to_v2i32:
-define void @local_sextload_v2i1_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i1_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(3)* %in
%ext = sext <2 x i1> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(3)* %out
@@ -122,7 +122,7 @@ define void @local_sextload_v2i1_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v3i1_to_v3i32:
-define void @local_zextload_v3i1_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v3i1_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(3)* %in
%ext = zext <3 x i1> %load to <3 x i32>
store <3 x i32> %ext, <3 x i32> addrspace(3)* %out
@@ -130,7 +130,7 @@ define void @local_zextload_v3i1_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v3i1_to_v3i32:
-define void @local_sextload_v3i1_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v3i1_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(3)* %in
%ext = sext <3 x i1> %load to <3 x i32>
store <3 x i32> %ext, <3 x i32> addrspace(3)* %out
@@ -138,7 +138,7 @@ define void @local_sextload_v3i1_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v4i1_to_v4i32:
-define void @local_zextload_v4i1_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i1_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(3)* %in
%ext = zext <4 x i1> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(3)* %out
@@ -146,7 +146,7 @@ define void @local_zextload_v4i1_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v4i1_to_v4i32:
-define void @local_sextload_v4i1_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i1_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(3)* %in
%ext = sext <4 x i1> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(3)* %out
@@ -154,7 +154,7 @@ define void @local_sextload_v4i1_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v8i1_to_v8i32:
-define void @local_zextload_v8i1_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i1_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(3)* %in
%ext = zext <8 x i1> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(3)* %out
@@ -162,7 +162,7 @@ define void @local_zextload_v8i1_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v8i1_to_v8i32:
-define void @local_sextload_v8i1_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i1_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(3)* %in
%ext = sext <8 x i1> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(3)* %out
@@ -170,7 +170,7 @@ define void @local_sextload_v8i1_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v16i1_to_v16i32:
-define void @local_zextload_v16i1_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i1_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(3)* %in
%ext = zext <16 x i1> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(3)* %out
@@ -178,7 +178,7 @@ define void @local_zextload_v16i1_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x
}
; FUNC-LABEL: {{^}}local_sextload_v16i1_to_v16i32:
-define void @local_sextload_v16i1_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i1_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(3)* %in
%ext = sext <16 x i1> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(3)* %out
@@ -186,7 +186,7 @@ define void @local_sextload_v16i1_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x
}
; FUNC-LABEL: {{^}}local_zextload_v32i1_to_v32i32:
-define void @local_zextload_v32i1_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i1_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(3)* %in
%ext = zext <32 x i1> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(3)* %out
@@ -194,7 +194,7 @@ define void @local_zextload_v32i1_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x
}
; FUNC-LABEL: {{^}}local_sextload_v32i1_to_v32i32:
-define void @local_sextload_v32i1_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i1_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(3)* %in
%ext = sext <32 x i1> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(3)* %out
@@ -202,7 +202,7 @@ define void @local_sextload_v32i1_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x
}
; FUNC-LABEL: {{^}}local_zextload_v64i1_to_v64i32:
-define void @local_zextload_v64i1_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v64i1_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(3)* %in
%ext = zext <64 x i1> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(3)* %out
@@ -210,7 +210,7 @@ define void @local_zextload_v64i1_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x
}
; FUNC-LABEL: {{^}}local_sextload_v64i1_to_v64i32:
-define void @local_sextload_v64i1_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v64i1_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(3)* %in
%ext = sext <64 x i1> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(3)* %out
@@ -221,7 +221,7 @@ define void @local_sextload_v64i1_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x
; GCN-DAG: ds_read_u8 [[LOAD:v[0-9]+]],
; GCN-DAG: v_mov_b32_e32 {{v[0-9]+}}, 0{{$}}
; GCN: ds_write_b64
-define void @local_zextload_i1_to_i64(i64 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i1_to_i64(i64 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
%a = load i1, i1 addrspace(3)* %in
%ext = zext i1 %a to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -233,7 +233,7 @@ define void @local_zextload_i1_to_i64(i64 addrspace(3)* %out, i1 addrspace(3)* %
; GCN: v_bfe_i32 [[BFE:v[0-9]+]], {{v[0-9]+}}, 0, 1{{$}}
; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[BFE]]
; GCN: ds_write_b64
-define void @local_sextload_i1_to_i64(i64 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i1_to_i64(i64 addrspace(3)* %out, i1 addrspace(3)* %in) #0 {
%a = load i1, i1 addrspace(3)* %in
%ext = sext i1 %a to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -241,7 +241,7 @@ define void @local_sextload_i1_to_i64(i64 addrspace(3)* %out, i1 addrspace(3)* %
}
; FUNC-LABEL: {{^}}local_zextload_v1i1_to_v1i64:
-define void @local_zextload_v1i1_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i1_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(3)* %in
%ext = zext <1 x i1> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -249,7 +249,7 @@ define void @local_zextload_v1i1_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v1i1_to_v1i64:
-define void @local_sextload_v1i1_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i1_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1> addrspace(3)* %in) #0 {
%load = load <1 x i1>, <1 x i1> addrspace(3)* %in
%ext = sext <1 x i1> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -257,7 +257,7 @@ define void @local_sextload_v1i1_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v2i1_to_v2i64:
-define void @local_zextload_v2i1_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i1_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(3)* %in
%ext = zext <2 x i1> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -265,7 +265,7 @@ define void @local_zextload_v2i1_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v2i1_to_v2i64:
-define void @local_sextload_v2i1_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i1_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 {
%load = load <2 x i1>, <2 x i1> addrspace(3)* %in
%ext = sext <2 x i1> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -273,7 +273,7 @@ define void @local_sextload_v2i1_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v3i1_to_v3i64:
-define void @local_zextload_v3i1_to_v3i64(<3 x i64> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v3i1_to_v3i64(<3 x i64> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(3)* %in
%ext = zext <3 x i1> %load to <3 x i64>
store <3 x i64> %ext, <3 x i64> addrspace(3)* %out
@@ -281,7 +281,7 @@ define void @local_zextload_v3i1_to_v3i64(<3 x i64> addrspace(3)* %out, <3 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v3i1_to_v3i64:
-define void @local_sextload_v3i1_to_v3i64(<3 x i64> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v3i1_to_v3i64(<3 x i64> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 {
%load = load <3 x i1>, <3 x i1> addrspace(3)* %in
%ext = sext <3 x i1> %load to <3 x i64>
store <3 x i64> %ext, <3 x i64> addrspace(3)* %out
@@ -289,7 +289,7 @@ define void @local_sextload_v3i1_to_v3i64(<3 x i64> addrspace(3)* %out, <3 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v4i1_to_v4i64:
-define void @local_zextload_v4i1_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i1_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(3)* %in
%ext = zext <4 x i1> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -297,7 +297,7 @@ define void @local_zextload_v4i1_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v4i1_to_v4i64:
-define void @local_sextload_v4i1_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i1_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 {
%load = load <4 x i1>, <4 x i1> addrspace(3)* %in
%ext = sext <4 x i1> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -305,7 +305,7 @@ define void @local_sextload_v4i1_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v8i1_to_v8i64:
-define void @local_zextload_v8i1_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i1_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(3)* %in
%ext = zext <8 x i1> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -313,7 +313,7 @@ define void @local_zextload_v8i1_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1>
}
; FUNC-LABEL: {{^}}local_sextload_v8i1_to_v8i64:
-define void @local_sextload_v8i1_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i1_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1> addrspace(3)* %in) #0 {
%load = load <8 x i1>, <8 x i1> addrspace(3)* %in
%ext = sext <8 x i1> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -321,7 +321,7 @@ define void @local_sextload_v8i1_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1>
}
; FUNC-LABEL: {{^}}local_zextload_v16i1_to_v16i64:
-define void @local_zextload_v16i1_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i1_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(3)* %in
%ext = zext <16 x i1> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -329,7 +329,7 @@ define void @local_zextload_v16i1_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x
}
; FUNC-LABEL: {{^}}local_sextload_v16i1_to_v16i64:
-define void @local_sextload_v16i1_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i1_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i1> addrspace(3)* %in) #0 {
%load = load <16 x i1>, <16 x i1> addrspace(3)* %in
%ext = sext <16 x i1> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -337,7 +337,7 @@ define void @local_sextload_v16i1_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x
}
; FUNC-LABEL: {{^}}local_zextload_v32i1_to_v32i64:
-define void @local_zextload_v32i1_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i1_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(3)* %in
%ext = zext <32 x i1> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -345,7 +345,7 @@ define void @local_zextload_v32i1_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x
}
; FUNC-LABEL: {{^}}local_sextload_v32i1_to_v32i64:
-define void @local_sextload_v32i1_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i1_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i1> addrspace(3)* %in) #0 {
%load = load <32 x i1>, <32 x i1> addrspace(3)* %in
%ext = sext <32 x i1> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -353,7 +353,7 @@ define void @local_sextload_v32i1_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x
}
; FUNC-LABEL: {{^}}local_zextload_v64i1_to_v64i64:
-define void @local_zextload_v64i1_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v64i1_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(3)* %in
%ext = zext <64 x i1> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(3)* %out
@@ -361,7 +361,7 @@ define void @local_zextload_v64i1_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x
}
; FUNC-LABEL: {{^}}local_sextload_v64i1_to_v64i64:
-define void @local_sextload_v64i1_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v64i1_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i1> addrspace(3)* %in) #0 {
%load = load <64 x i1>, <64 x i1> addrspace(3)* %in
%ext = sext <64 x i1> %load to <64 x i64>
store <64 x i64> %ext, <64 x i64> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-local-i16.ll b/test/CodeGen/AMDGPU/load-local-i16.ll
index d4e86de66aff..bbbb34e8d333 100644
--- a/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -10,7 +10,7 @@
; EG-DAG: MOV {{[* ]*}}[[DATA:T[0-9]+\.[XYZW]]], OQAP
; EG-DAG: MOV {{[* ]*}}[[TO:T[0-9]+\.[XYZW]]], KC0[2].Y
; EG: LDS_SHORT_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_load_i16(i16 addrspace(3)* %out, i16 addrspace(3)* %in) {
+define amdgpu_kernel void @local_load_i16(i16 addrspace(3)* %out, i16 addrspace(3)* %in) {
entry:
%ld = load i16, i16 addrspace(3)* %in
store i16 %ld, i16 addrspace(3)* %out
@@ -25,7 +25,7 @@ entry:
; EG-DAG: MOV {{[* ]*}}[[DATA:T[0-9]+\.[XYZW]]], OQAP
; EG-DAG: MOV {{[* ]*}}[[TO:T[0-9]+\.[XYZW]]], KC0[2].Y
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_load_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_load_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) {
entry:
%ld = load <2 x i16>, <2 x i16> addrspace(3)* %in
store <2 x i16> %ld, <2 x i16> addrspace(3)* %out
@@ -39,7 +39,7 @@ entry:
; EG-DAG: LDS_USHORT_READ_RET
; EG-DAG: LDS_READ_RET
-define void @local_load_v3i16(<3 x i16> addrspace(3)* %out, <3 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_load_v3i16(<3 x i16> addrspace(3)* %out, <3 x i16> addrspace(3)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(3)* %in
store <3 x i16> %ld, <3 x i16> addrspace(3)* %out
@@ -51,7 +51,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v4i16(<4 x i16> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_load_v4i16(<4 x i16> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) {
entry:
%ld = load <4 x i16>, <4 x i16> addrspace(3)* %in
store <4 x i16> %ld, <4 x i16> addrspace(3)* %out
@@ -65,7 +65,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v8i16(<8 x i16> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_load_v8i16(<8 x i16> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) {
entry:
%ld = load <8 x i16>, <8 x i16> addrspace(3)* %in
store <8 x i16> %ld, <8 x i16> addrspace(3)* %out
@@ -86,7 +86,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v16i16(<16 x i16> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_load_v16i16(<16 x i16> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) {
entry:
%ld = load <16 x i16>, <16 x i16> addrspace(3)* %in
store <16 x i16> %ld, <16 x i16> addrspace(3)* %out
@@ -102,7 +102,7 @@ entry:
; EG-DAG: MOV {{[* ]*}}[[DATA:T[0-9]+\.[XYZW]]], OQAP
; EG-DAG: MOV {{[* ]*}}[[TO:T[0-9]+\.[XYZW]]], KC0[2].Y
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_zextload_i16_to_i32(i32 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i16_to_i32(i32 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
%a = load i16, i16 addrspace(3)* %in
%ext = zext i16 %a to i32
store i32 %ext, i32 addrspace(3)* %out
@@ -121,7 +121,7 @@ define void @local_zextload_i16_to_i32(i32 addrspace(3)* %out, i16 addrspace(3)*
; EG-DAG: BFE_INT {{[* ]*}}[[DATA:T[0-9]+\.[XYZW]]], {{.*}}, 0.0, literal
; EG: 16
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_sextload_i16_to_i32(i32 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i16_to_i32(i32 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
%a = load i16, i16 addrspace(3)* %in
%ext = sext i16 %a to i32
store i32 %ext, i32 addrspace(3)* %out
@@ -136,7 +136,7 @@ define void @local_sextload_i16_to_i32(i32 addrspace(3)* %out, i16 addrspace(3)*
; EG-DAG: MOV {{[* ]*}}[[DATA:T[0-9]+\.[XYZW]]], OQAP
; EG-DAG: MOV {{[* ]*}}[[TO:T[0-9]+\.[XYZW]]], KC0[2].Y
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_zextload_v1i16_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i16_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(3)* %in
%ext = zext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(3)* %out
@@ -153,7 +153,7 @@ define void @local_zextload_v1i16_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1
; EG-DAG: BFE_INT {{[* ]*}}[[DATA:T[0-9]+\.[XYZW]]], {{.*}}, 0.0, literal
; EG: 16
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_sextload_v1i16_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i16_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(3)* %in
%ext = sext <1 x i16> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(3)* %out
@@ -166,7 +166,7 @@ define void @local_sextload_v1i16_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i1
; GCN: ds_read_b32
; EG: LDS_READ_RET
-define void @local_zextload_v2i16_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i16_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(3)* %in
%ext = zext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(3)* %out
@@ -181,7 +181,7 @@ define void @local_zextload_v2i16_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1
; EG: LDS_READ_RET
; EG: BFE_INT
; EG: BFE_INT
-define void @local_sextload_v2i16_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i16_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(3)* %in
%ext = sext <2 x i16> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(3)* %out
@@ -194,7 +194,7 @@ define void @local_sextload_v2i16_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i1
; GCN-DAG: ds_write_b64
; EG: LDS_READ_RET
-define void @local_local_zextload_v3i16_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_local_zextload_v3i16_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i16> addrspace(3)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(3)* %in
%ext = zext <3 x i16> %ld to <3 x i32>
@@ -211,7 +211,7 @@ entry:
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_local_sextload_v3i16_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i16> addrspace(3)* %in) {
+define amdgpu_kernel void @local_local_sextload_v3i16_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i16> addrspace(3)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(3)* %in
%ext = sext <3 x i16> %ld to <3 x i32>
@@ -226,7 +226,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_local_zextload_v4i16_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_local_zextload_v4i16_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(3)* %in
%ext = zext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(3)* %out
@@ -244,7 +244,7 @@ define void @local_local_zextload_v4i16_to_v4i32(<4 x i32> addrspace(3)* %out, <
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v4i16_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i16_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(3)* %in
%ext = sext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(3)* %out
@@ -258,7 +258,7 @@ define void @local_sextload_v4i16_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i1
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v8i16_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i16_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(3)* %in
%ext = zext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(3)* %out
@@ -280,7 +280,7 @@ define void @local_zextload_v8i16_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v8i16_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i16_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(3)* %in
%ext = sext <8 x i16> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(3)* %out
@@ -304,7 +304,7 @@ define void @local_sextload_v8i16_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i1
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v16i16_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i16_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(3)* %in
%ext = zext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(3)* %out
@@ -340,7 +340,7 @@ define void @local_zextload_v16i16_to_v16i32(<16 x i32> addrspace(3)* %out, <16
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v16i16_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i16_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(3)* %in
%ext = sext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(3)* %out
@@ -369,7 +369,7 @@ define void @local_sextload_v16i16_to_v16i32(<16 x i32> addrspace(3)* %out, <16
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v32i16_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i16_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(3)* %in
%ext = zext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(3)* %out
@@ -406,7 +406,7 @@ define void @local_zextload_v32i16_to_v32i32(<32 x i32> addrspace(3)* %out, <32
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_sextload_v32i16_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i16_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(3)* %in
%ext = sext <32 x i16> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(3)* %out
@@ -471,7 +471,7 @@ define void @local_sextload_v32i16_to_v32i32(<32 x i32> addrspace(3)* %out, <32
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v64i16_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(3)* %in
%ext = zext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(3)* %out
@@ -512,7 +512,7 @@ define void @local_zextload_v64i16_to_v64i32(<64 x i32> addrspace(3)* %out, <64
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_sextload_v64i16_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(3)* %in
%ext = sext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(3)* %out
@@ -531,7 +531,7 @@ define void @local_sextload_v64i16_to_v64i32(<64 x i32> addrspace(3)* %out, <64
; EG-DAG: MOV {{[* ]*}}[[TO:T[0-9]+\.[XYZW]]], KC0[2].Y
; EG-DAG: LDS_WRITE
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_zextload_i16_to_i64(i64 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i16_to_i64(i64 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
%a = load i16, i16 addrspace(3)* %in
%ext = zext i16 %a to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -558,7 +558,7 @@ define void @local_zextload_i16_to_i64(i64 addrspace(3)* %out, i16 addrspace(3)*
; EG-DAG: LDS_WRITE
; EG-DAG: 16
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_sextload_i16_to_i64(i64 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i16_to_i64(i64 addrspace(3)* %out, i16 addrspace(3)* %in) #0 {
%a = load i16, i16 addrspace(3)* %in
%ext = sext i16 %a to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -573,7 +573,7 @@ define void @local_sextload_i16_to_i64(i64 addrspace(3)* %out, i16 addrspace(3)*
; EG-DAG: MOV {{[* ]*}}[[TO:T[0-9]+\.[XYZW]]], KC0[2].Y
; EG-DAG: LDS_WRITE
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_zextload_v1i16_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i16_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(3)* %in
%ext = zext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -590,7 +590,7 @@ define void @local_zextload_v1i16_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1
; EG-DAG: LDS_WRITE
; EG-DAG: 16
; EG: LDS_WRITE {{\*?}} [[TO]], [[DATA]]
-define void @local_sextload_v1i16_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i16_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i16> addrspace(3)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(3)* %in
%ext = sext <1 x i16> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -600,7 +600,7 @@ define void @local_sextload_v1i16_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i1
; FUNC-LABEL: {{^}}local_zextload_v2i16_to_v2i64:
; EG: LDS_READ_RET
-define void @local_zextload_v2i16_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i16_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(3)* %in
%ext = zext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -612,7 +612,7 @@ define void @local_zextload_v2i16_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1
; EG: LDS_READ_RET
; EG-DAG: BFE_INT
; EG-DAG: ASHR
-define void @local_sextload_v2i16_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i16_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i16> addrspace(3)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(3)* %in
%ext = sext <2 x i16> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -623,7 +623,7 @@ define void @local_sextload_v2i16_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i1
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v4i16_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i16_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(3)* %in
%ext = zext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -638,7 +638,7 @@ define void @local_zextload_v4i16_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1
; EG-DAG: BFE_INT
; EG-DAG: ASHR
; EG-DAG: ASHR
-define void @local_sextload_v4i16_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i16_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i16> addrspace(3)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(3)* %in
%ext = sext <4 x i16> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -651,7 +651,7 @@ define void @local_sextload_v4i16_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i1
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v8i16_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i16_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(3)* %in
%ext = zext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -672,7 +672,7 @@ define void @local_zextload_v8i16_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1
; EG-DAG: BFE_INT
; EG-DAG: ASHR
; EG-DAG: ASHR
-define void @local_sextload_v8i16_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i16_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(3)* %in
%ext = sext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -689,7 +689,7 @@ define void @local_sextload_v8i16_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i1
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v16i16_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i16_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(3)* %in
%ext = zext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -722,7 +722,7 @@ define void @local_zextload_v16i16_to_v16i64(<16 x i64> addrspace(3)* %out, <16
; EG-DAG: BFE_INT
; EG-DAG: ASHR
; EG-DAG: ASHR
-define void @local_sextload_v16i16_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i16_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i16> addrspace(3)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(3)* %in
%ext = sext <16 x i16> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -747,7 +747,7 @@ define void @local_sextload_v16i16_to_v16i64(<16 x i64> addrspace(3)* %out, <16
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(3)* %in
%ext = zext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -804,7 +804,7 @@ define void @local_zextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32
; EG-DAG: BFE_INT
; EG-DAG: ASHR
; EG-DAG: ASHR
-define void @local_sextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i16> addrspace(3)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(3)* %in
%ext = sext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -812,7 +812,7 @@ define void @local_sextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32
}
; ; XFUNC-LABEL: {{^}}local_zextload_v64i16_to_v64i64:
-; define void @local_zextload_v64i16_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
+; define amdgpu_kernel void @local_zextload_v64i16_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
; %load = load <64 x i16>, <64 x i16> addrspace(3)* %in
; %ext = zext <64 x i16> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(3)* %out
@@ -820,7 +820,7 @@ define void @local_sextload_v32i16_to_v32i64(<32 x i64> addrspace(3)* %out, <32
; }
; ; XFUNC-LABEL: {{^}}local_sextload_v64i16_to_v64i64:
-; define void @local_sextload_v64i16_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
+; define amdgpu_kernel void @local_sextload_v64i16_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i16> addrspace(3)* %in) #0 {
; %load = load <64 x i16>, <64 x i16> addrspace(3)* %in
; %ext = sext <64 x i16> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-local-i32.ll b/test/CodeGen/AMDGPU/load-local-i32.ll
index 280f9658ef8d..86055413d2cf 100644
--- a/test/CodeGen/AMDGPU/load-local-i32.ll
+++ b/test/CodeGen/AMDGPU/load-local-i32.ll
@@ -9,7 +9,7 @@
; GCN: ds_read_b32
; EG: LDS_READ_RET
-define void @local_load_i32(i32 addrspace(3)* %out, i32 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i32(i32 addrspace(3)* %out, i32 addrspace(3)* %in) #0 {
entry:
%ld = load i32, i32 addrspace(3)* %in
store i32 %ld, i32 addrspace(3)* %out
@@ -18,7 +18,7 @@ entry:
; FUNC-LABEL: {{^}}local_load_v2i32:
; GCN: ds_read_b64
-define void @local_load_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> addrspace(3)* %in) #0 {
entry:
%ld = load <2 x i32>, <2 x i32> addrspace(3)* %in
store <2 x i32> %ld, <2 x i32> addrspace(3)* %out
@@ -28,7 +28,7 @@ entry:
; FUNC-LABEL: {{^}}local_load_v3i32:
; GCN-DAG: ds_read_b64
; GCN-DAG: ds_read_b32
-define void @local_load_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> addrspace(3)* %in) #0 {
entry:
%ld = load <3 x i32>, <3 x i32> addrspace(3)* %in
store <3 x i32> %ld, <3 x i32> addrspace(3)* %out
@@ -38,7 +38,7 @@ entry:
; FUNC-LABEL: {{^}}local_load_v4i32:
; GCN: ds_read2_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset1:1{{$}}
-define void @local_load_v4i32(<4 x i32> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v4i32(<4 x i32> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) #0 {
entry:
%ld = load <4 x i32>, <4 x i32> addrspace(3)* %in
store <4 x i32> %ld, <4 x i32> addrspace(3)* %out
@@ -48,7 +48,7 @@ entry:
; FUNC-LABEL: {{^}}local_load_v8i32:
; GCN-DAG: ds_read2_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset0:2 offset1:3{{$}}
; GCN-DAG: ds_read2_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset1:1{{$}}
-define void @local_load_v8i32(<8 x i32> addrspace(3)* %out, <8 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v8i32(<8 x i32> addrspace(3)* %out, <8 x i32> addrspace(3)* %in) #0 {
entry:
%ld = load <8 x i32>, <8 x i32> addrspace(3)* %in
store <8 x i32> %ld, <8 x i32> addrspace(3)* %out
@@ -64,7 +64,7 @@ entry:
; GCN-DAG: ds_write2_b64 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}} offset0:4 offset1:5
; GCN-DAG: ds_write2_b64 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}} offset0:2 offset1:3
; GCN-DAG: ds_write2_b64 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}} offset1:1
-define void @local_load_v16i32(<16 x i32> addrspace(3)* %out, <16 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v16i32(<16 x i32> addrspace(3)* %out, <16 x i32> addrspace(3)* %in) #0 {
entry:
%ld = load <16 x i32>, <16 x i32> addrspace(3)* %in
store <16 x i32> %ld, <16 x i32> addrspace(3)* %out
@@ -72,7 +72,7 @@ entry:
}
; FUNC-LABEL: {{^}}local_zextload_i32_to_i64:
-define void @local_zextload_i32_to_i64(i64 addrspace(3)* %out, i32 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i32_to_i64(i64 addrspace(3)* %out, i32 addrspace(3)* %in) #0 {
%ld = load i32, i32 addrspace(3)* %in
%ext = zext i32 %ld to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -80,7 +80,7 @@ define void @local_zextload_i32_to_i64(i64 addrspace(3)* %out, i32 addrspace(3)*
}
; FUNC-LABEL: {{^}}local_sextload_i32_to_i64:
-define void @local_sextload_i32_to_i64(i64 addrspace(3)* %out, i32 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i32_to_i64(i64 addrspace(3)* %out, i32 addrspace(3)* %in) #0 {
%ld = load i32, i32 addrspace(3)* %in
%ext = sext i32 %ld to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -88,7 +88,7 @@ define void @local_sextload_i32_to_i64(i64 addrspace(3)* %out, i32 addrspace(3)*
}
; FUNC-LABEL: {{^}}local_zextload_v1i32_to_v1i64:
-define void @local_zextload_v1i32_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i32_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i32> addrspace(3)* %in) #0 {
%ld = load <1 x i32>, <1 x i32> addrspace(3)* %in
%ext = zext <1 x i32> %ld to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -96,7 +96,7 @@ define void @local_zextload_v1i32_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i3
}
; FUNC-LABEL: {{^}}local_sextload_v1i32_to_v1i64:
-define void @local_sextload_v1i32_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i32_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i32> addrspace(3)* %in) #0 {
%ld = load <1 x i32>, <1 x i32> addrspace(3)* %in
%ext = sext <1 x i32> %ld to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -104,7 +104,7 @@ define void @local_sextload_v1i32_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i3
}
; FUNC-LABEL: {{^}}local_zextload_v2i32_to_v2i64:
-define void @local_zextload_v2i32_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i32_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i32> addrspace(3)* %in) #0 {
%ld = load <2 x i32>, <2 x i32> addrspace(3)* %in
%ext = zext <2 x i32> %ld to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -112,7 +112,7 @@ define void @local_zextload_v2i32_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i3
}
; FUNC-LABEL: {{^}}local_sextload_v2i32_to_v2i64:
-define void @local_sextload_v2i32_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i32_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i32> addrspace(3)* %in) #0 {
%ld = load <2 x i32>, <2 x i32> addrspace(3)* %in
%ext = sext <2 x i32> %ld to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -120,7 +120,7 @@ define void @local_sextload_v2i32_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i3
}
; FUNC-LABEL: {{^}}local_zextload_v4i32_to_v4i64:
-define void @local_zextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) #0 {
%ld = load <4 x i32>, <4 x i32> addrspace(3)* %in
%ext = zext <4 x i32> %ld to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -128,7 +128,7 @@ define void @local_zextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i3
}
; FUNC-LABEL: {{^}}local_sextload_v4i32_to_v4i64:
-define void @local_sextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) #0 {
%ld = load <4 x i32>, <4 x i32> addrspace(3)* %in
%ext = sext <4 x i32> %ld to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -136,7 +136,7 @@ define void @local_sextload_v4i32_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i3
}
; FUNC-LABEL: {{^}}local_zextload_v8i32_to_v8i64:
-define void @local_zextload_v8i32_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i32_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i32> addrspace(3)* %in) #0 {
%ld = load <8 x i32>, <8 x i32> addrspace(3)* %in
%ext = zext <8 x i32> %ld to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -144,7 +144,7 @@ define void @local_zextload_v8i32_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i3
}
; FUNC-LABEL: {{^}}local_sextload_v8i32_to_v8i64:
-define void @local_sextload_v8i32_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i32_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i32> addrspace(3)* %in) #0 {
%ld = load <8 x i32>, <8 x i32> addrspace(3)* %in
%ext = sext <8 x i32> %ld to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -152,7 +152,7 @@ define void @local_sextload_v8i32_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i3
}
; FUNC-LABEL: {{^}}local_sextload_v16i32_to_v16i64:
-define void @local_sextload_v16i32_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i32_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i32> addrspace(3)* %in) #0 {
%ld = load <16 x i32>, <16 x i32> addrspace(3)* %in
%ext = sext <16 x i32> %ld to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -160,7 +160,7 @@ define void @local_sextload_v16i32_to_v16i64(<16 x i64> addrspace(3)* %out, <16
}
; FUNC-LABEL: {{^}}local_zextload_v16i32_to_v16i64
-define void @local_zextload_v16i32_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i32_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i32> addrspace(3)* %in) #0 {
%ld = load <16 x i32>, <16 x i32> addrspace(3)* %in
%ext = zext <16 x i32> %ld to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -168,7 +168,7 @@ define void @local_zextload_v16i32_to_v16i64(<16 x i64> addrspace(3)* %out, <16
}
; FUNC-LABEL: {{^}}local_sextload_v32i32_to_v32i64:
-define void @local_sextload_v32i32_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i32_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i32> addrspace(3)* %in) #0 {
%ld = load <32 x i32>, <32 x i32> addrspace(3)* %in
%ext = sext <32 x i32> %ld to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -176,7 +176,7 @@ define void @local_sextload_v32i32_to_v32i64(<32 x i64> addrspace(3)* %out, <32
}
; FUNC-LABEL: {{^}}local_zextload_v32i32_to_v32i64:
-define void @local_zextload_v32i32_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i32> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i32_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i32> addrspace(3)* %in) #0 {
%ld = load <32 x i32>, <32 x i32> addrspace(3)* %in
%ext = zext <32 x i32> %ld to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-local-i64.ll b/test/CodeGen/AMDGPU/load-local-i64.ll
index 180807df7b9a..0c719a9e0bf9 100644
--- a/test/CodeGen/AMDGPU/load-local-i64.ll
+++ b/test/CodeGen/AMDGPU/load-local-i64.ll
@@ -9,7 +9,7 @@
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_i64(i64 addrspace(3)* %out, i64 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i64(i64 addrspace(3)* %out, i64 addrspace(3)* %in) #0 {
%ld = load i64, i64 addrspace(3)* %in
store i64 %ld, i64 addrspace(3)* %out
ret void
@@ -22,7 +22,7 @@ define void @local_load_i64(i64 addrspace(3)* %out, i64 addrspace(3)* %in) #0 {
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v2i64(<2 x i64> addrspace(3)* %out, <2 x i64> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v2i64(<2 x i64> addrspace(3)* %out, <2 x i64> addrspace(3)* %in) #0 {
entry:
%ld = load <2 x i64>, <2 x i64> addrspace(3)* %in
store <2 x i64> %ld, <2 x i64> addrspace(3)* %out
@@ -39,7 +39,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> addrspace(3)* %in) #0 {
entry:
%ld = load <3 x i64>, <3 x i64> addrspace(3)* %in
store <3 x i64> %ld, <3 x i64> addrspace(3)* %out
@@ -59,7 +59,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v4i64(<4 x i64> addrspace(3)* %out, <4 x i64> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v4i64(<4 x i64> addrspace(3)* %out, <4 x i64> addrspace(3)* %in) #0 {
entry:
%ld = load <4 x i64>, <4 x i64> addrspace(3)* %in
store <4 x i64> %ld, <4 x i64> addrspace(3)* %out
@@ -88,7 +88,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v8i64(<8 x i64> addrspace(3)* %out, <8 x i64> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v8i64(<8 x i64> addrspace(3)* %out, <8 x i64> addrspace(3)* %in) #0 {
entry:
%ld = load <8 x i64>, <8 x i64> addrspace(3)* %in
store <8 x i64> %ld, <8 x i64> addrspace(3)* %out
@@ -144,7 +144,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v16i64(<16 x i64> addrspace(3)* %out, <16 x i64> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v16i64(<16 x i64> addrspace(3)* %out, <16 x i64> addrspace(3)* %in) #0 {
entry:
%ld = load <16 x i64>, <16 x i64> addrspace(3)* %in
store <16 x i64> %ld, <16 x i64> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-local-i8.ll b/test/CodeGen/AMDGPU/load-local-i8.ll
index 9ffc74213dd5..731996ec6c45 100644
--- a/test/CodeGen/AMDGPU/load-local-i8.ll
+++ b/test/CodeGen/AMDGPU/load-local-i8.ll
@@ -9,7 +9,7 @@
; GCN: ds_read_u8
; EG: LDS_UBYTE_READ_RET
-define void @local_load_i8(i8 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i8(i8 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
entry:
%ld = load i8, i8 addrspace(3)* %in
store i8 %ld, i8 addrspace(3)* %out
@@ -22,7 +22,7 @@ entry:
; GCN: ds_read_u16
; EG: LDS_USHORT_READ_RET
-define void @local_load_v2i8(<2 x i8> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v2i8(<2 x i8> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <2 x i8>, <2 x i8> addrspace(3)* %in
store <2 x i8> %ld, <2 x i8> addrspace(3)* %out
@@ -33,7 +33,7 @@ entry:
; GCN: ds_read_b32
; EG: DS_READ_RET
-define void @local_load_v3i8(<3 x i8> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v3i8(<3 x i8> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(3)* %in
store <3 x i8> %ld, <3 x i8> addrspace(3)* %out
@@ -44,7 +44,7 @@ entry:
; GCN: ds_read_b32
; EG: LDS_READ_RET
-define void @local_load_v4i8(<4 x i8> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v4i8(<4 x i8> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <4 x i8>, <4 x i8> addrspace(3)* %in
store <4 x i8> %ld, <4 x i8> addrspace(3)* %out
@@ -56,7 +56,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v8i8(<8 x i8> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v8i8(<8 x i8> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <8 x i8>, <8 x i8> addrspace(3)* %in
store <8 x i8> %ld, <8 x i8> addrspace(3)* %out
@@ -71,7 +71,7 @@ entry:
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_load_v16i8(<16 x i8> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_v16i8(<16 x i8> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <16 x i8>, <16 x i8> addrspace(3)* %in
store <16 x i8> %ld, <16 x i8> addrspace(3)* %out
@@ -84,7 +84,7 @@ entry:
; GCN: ds_read_u8
; EG: LDS_UBYTE_READ_RET
-define void @local_zextload_i8_to_i32(i32 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i8_to_i32(i32 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
%a = load i8, i8 addrspace(3)* %in
%ext = zext i8 %a to i32
store i32 %ext, i32 addrspace(3)* %out
@@ -98,7 +98,7 @@ define void @local_zextload_i8_to_i32(i32 addrspace(3)* %out, i8 addrspace(3)* %
; EG: LDS_UBYTE_READ_RET
; EG: BFE_INT
-define void @local_sextload_i8_to_i32(i32 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i8_to_i32(i32 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
%ld = load i8, i8 addrspace(3)* %in
%ext = sext i8 %ld to i32
store i32 %ext, i32 addrspace(3)* %out
@@ -108,7 +108,7 @@ define void @local_sextload_i8_to_i32(i32 addrspace(3)* %out, i8 addrspace(3)* %
; FUNC-LABEL: {{^}}local_zextload_v1i8_to_v1i32:
; EG: LDS_UBYTE_READ_RET
-define void @local_zextload_v1i8_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i8_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(3)* %in
%ext = zext <1 x i8> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(3)* %out
@@ -119,7 +119,7 @@ define void @local_zextload_v1i8_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i8>
; EG: LDS_UBYTE_READ_RET
; EG: BFE_INT
-define void @local_sextload_v1i8_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i8_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(3)* %in
%ext = sext <1 x i8> %load to <1 x i32>
store <1 x i32> %ext, <1 x i32> addrspace(3)* %out
@@ -130,7 +130,7 @@ define void @local_sextload_v1i8_to_v1i32(<1 x i32> addrspace(3)* %out, <1 x i8>
; GCN: ds_read_u16
; EG: LDS_USHORT_READ_RET
-define void @local_zextload_v2i8_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i8_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(3)* %in
%ext = zext <2 x i8> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(3)* %out
@@ -156,7 +156,7 @@ define void @local_zextload_v2i8_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i8>
; EG: LDS_USHORT_READ_RET
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v2i8_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i8_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(3)* %in
%ext = sext <2 x i8> %load to <2 x i32>
store <2 x i32> %ext, <2 x i32> addrspace(3)* %out
@@ -172,7 +172,7 @@ define void @local_sextload_v2i8_to_v2i32(<2 x i32> addrspace(3)* %out, <2 x i8>
; GCN-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff,
; EG: LDS_READ_RET
-define void @local_zextload_v3i8_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v3i8_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(3)* %in
%ext = zext <3 x i8> %ld to <3 x i32>
@@ -197,7 +197,7 @@ entry:
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v3i8_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v3i8_to_v3i32(<3 x i32> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 {
entry:
%ld = load <3 x i8>, <3 x i8> addrspace(3)* %in
%ext = sext <3 x i8> %ld to <3 x i32>
@@ -214,7 +214,7 @@ entry:
; EG-DAG: BFE_UINT
; EG-DAG: BFE_UINT
; EG-DAG: BFE_UINT
-define void @local_zextload_v4i8_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i8_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(3)* %in
%ext = zext <4 x i8> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(3)* %out
@@ -231,7 +231,7 @@ define void @local_zextload_v4i8_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i8>
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v4i8_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i8_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(3)* %in
%ext = sext <4 x i8> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(3)* %out
@@ -248,7 +248,7 @@ define void @local_sextload_v4i8_to_v4i32(<4 x i32> addrspace(3)* %out, <4 x i8>
; EG-DAG: BFE_UINT
; EG-DAG: BFE_UINT
; EG-DAG: BFE_UINT
-define void @local_zextload_v8i8_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i8_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(3)* %in
%ext = zext <8 x i8> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(3)* %out
@@ -267,7 +267,7 @@ define void @local_zextload_v8i8_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i8>
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v8i8_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i8_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(3)* %in
%ext = sext <8 x i8> %load to <8 x i32>
store <8 x i32> %ext, <8 x i32> addrspace(3)* %out
@@ -292,7 +292,7 @@ define void @local_sextload_v8i8_to_v8i32(<8 x i32> addrspace(3)* %out, <8 x i8>
; EG-DAG: BFE_UINT
; EG-DAG: BFE_UINT
; EG-DAG: BFE_UINT
-define void @local_zextload_v16i8_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i8_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(3)* %in
%ext = zext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(3)* %out
@@ -321,7 +321,7 @@ define void @local_zextload_v16i8_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v16i8_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i8_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(3)* %in
%ext = sext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, <16 x i32> addrspace(3)* %out
@@ -338,7 +338,7 @@ define void @local_sextload_v16i8_to_v16i32(<16 x i32> addrspace(3)* %out, <16 x
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
-define void @local_zextload_v32i8_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i8_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(3)* %in
%ext = zext <32 x i8> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(3)* %out
@@ -355,7 +355,7 @@ define void @local_zextload_v32i8_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
-define void @local_sextload_v32i8_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i8_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(3)* %in
%ext = sext <32 x i8> %load to <32 x i32>
store <32 x i32> %ext, <32 x i32> addrspace(3)* %out
@@ -380,7 +380,7 @@ define void @local_sextload_v32i8_to_v32i32(<32 x i32> addrspace(3)* %out, <32 x
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
-define void @local_zextload_v64i8_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v64i8_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
%load = load <64 x i8>, <64 x i8> addrspace(3)* %in
%ext = zext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(3)* %out
@@ -405,7 +405,7 @@ define void @local_zextload_v64i8_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
; EG-DAG: LDS_READ_RET
-define void @local_sextload_v64i8_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v64i8_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
%load = load <64 x i8>, <64 x i8> addrspace(3)* %in
%ext = sext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, <64 x i32> addrspace(3)* %out
@@ -420,7 +420,7 @@ define void @local_sextload_v64i8_to_v64i32(<64 x i32> addrspace(3)* %out, <64 x
; EG: LDS_UBYTE_READ_RET
; EG: MOV {{.*}}, literal
; EG: 0.0
-define void @local_zextload_i8_to_i64(i64 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i8_to_i64(i64 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
%a = load i8, i8 addrspace(3)* %in
%ext = zext i8 %a to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -437,7 +437,7 @@ define void @local_zextload_i8_to_i64(i64 addrspace(3)* %out, i8 addrspace(3)* %
; EG: ASHR
; TODO: why not 7?
; EG: 31
-define void @local_sextload_i8_to_i64(i64 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i8_to_i64(i64 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
%a = load i8, i8 addrspace(3)* %in
%ext = sext i8 %a to i64
store i64 %ext, i64 addrspace(3)* %out
@@ -450,7 +450,7 @@ define void @local_sextload_i8_to_i64(i64 addrspace(3)* %out, i8 addrspace(3)* %
; EG: MOV {{.*}}, literal
; TODO: merge?
; EG: 0.0
-define void @local_zextload_v1i8_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i8_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(3)* %in
%ext = zext <1 x i8> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -463,7 +463,7 @@ define void @local_zextload_v1i8_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i8>
; EG: ASHR
; TODO: why not 7?
; EG: 31
-define void @local_sextload_v1i8_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i8_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(3)* %in
%ext = sext <1 x i8> %load to <1 x i64>
store <1 x i64> %ext, <1 x i64> addrspace(3)* %out
@@ -473,7 +473,7 @@ define void @local_sextload_v1i8_to_v1i64(<1 x i64> addrspace(3)* %out, <1 x i8>
; FUNC-LABEL: {{^}}local_zextload_v2i8_to_v2i64:
; EG: LDS_USHORT_READ_RET
-define void @local_zextload_v2i8_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i8_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(3)* %in
%ext = zext <2 x i8> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -485,7 +485,7 @@ define void @local_zextload_v2i8_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i8>
; EG: LDS_USHORT_READ_RET
; EG: BFE_INT
; EG: BFE_INT
-define void @local_sextload_v2i8_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i8_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(3)* %in
%ext = sext <2 x i8> %load to <2 x i64>
store <2 x i64> %ext, <2 x i64> addrspace(3)* %out
@@ -495,7 +495,7 @@ define void @local_sextload_v2i8_to_v2i64(<2 x i64> addrspace(3)* %out, <2 x i8>
; FUNC-LABEL: {{^}}local_zextload_v4i8_to_v4i64:
; EG: LDS_READ_RET
-define void @local_zextload_v4i8_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i8_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(3)* %in
%ext = zext <4 x i8> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -505,7 +505,7 @@ define void @local_zextload_v4i8_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i8>
; FUNC-LABEL: {{^}}local_sextload_v4i8_to_v4i64:
; EG: LDS_READ_RET
-define void @local_sextload_v4i8_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i8_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(3)* %in
%ext = sext <4 x i8> %load to <4 x i64>
store <4 x i64> %ext, <4 x i64> addrspace(3)* %out
@@ -516,7 +516,7 @@ define void @local_sextload_v4i8_to_v4i64(<4 x i64> addrspace(3)* %out, <4 x i8>
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v8i8_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i8_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(3)* %in
%ext = zext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -536,7 +536,7 @@ define void @local_zextload_v8i8_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i8>
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
; EG-DAG: BFE_INT
-define void @local_sextload_v8i8_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i8_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(3)* %in
%ext = sext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, <8 x i64> addrspace(3)* %out
@@ -549,7 +549,7 @@ define void @local_sextload_v8i8_to_v8i64(<8 x i64> addrspace(3)* %out, <8 x i8>
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v16i8_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i8_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(3)* %in
%ext = zext <16 x i8> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -562,7 +562,7 @@ define void @local_zextload_v16i8_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_sextload_v16i8_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i8_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(3)* %in
%ext = sext <16 x i8> %load to <16 x i64>
store <16 x i64> %ext, <16 x i64> addrspace(3)* %out
@@ -579,7 +579,7 @@ define void @local_sextload_v16i8_to_v16i64(<16 x i64> addrspace(3)* %out, <16 x
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_zextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(3)* %in
%ext = zext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -596,7 +596,7 @@ define void @local_zextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x
; EG: LDS_READ_RET
; EG: LDS_READ_RET
; EG: LDS_READ_RET
-define void @local_sextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(3)* %in
%ext = sext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, <32 x i64> addrspace(3)* %out
@@ -604,7 +604,7 @@ define void @local_sextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x
}
; XFUNC-LABEL: {{^}}local_zextload_v64i8_to_v64i64:
-; define void @local_zextload_v64i8_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
+; define amdgpu_kernel void @local_zextload_v64i8_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(3)* %in
; %ext = zext <64 x i8> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(3)* %out
@@ -612,7 +612,7 @@ define void @local_sextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x
; }
; XFUNC-LABEL: {{^}}local_sextload_v64i8_to_v64i64:
-; define void @local_sextload_v64i8_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
+; define amdgpu_kernel void @local_sextload_v64i8_to_v64i64(<64 x i64> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(3)* %in
; %ext = sext <64 x i8> %load to <64 x i64>
; store <64 x i64> %ext, <64 x i64> addrspace(3)* %out
@@ -625,7 +625,7 @@ define void @local_sextload_v32i8_to_v32i64(<32 x i64> addrspace(3)* %out, <32 x
; EG: LDS_UBYTE_READ_RET
; EG: LDS_SHORT_WRITE
-define void @local_zextload_i8_to_i16(i16 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_i8_to_i16(i16 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
%a = load i8, i8 addrspace(3)* %in
%ext = zext i8 %a to i16
store i16 %ext, i16 addrspace(3)* %out
@@ -639,7 +639,7 @@ define void @local_zextload_i8_to_i16(i16 addrspace(3)* %out, i8 addrspace(3)* %
; EG: LDS_UBYTE_READ_RET
; EG: BFE_INT
; EG: LDS_SHORT_WRITE
-define void @local_sextload_i8_to_i16(i16 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_i8_to_i16(i16 addrspace(3)* %out, i8 addrspace(3)* %in) #0 {
%a = load i8, i8 addrspace(3)* %in
%ext = sext i8 %a to i16
store i16 %ext, i16 addrspace(3)* %out
@@ -650,7 +650,7 @@ define void @local_sextload_i8_to_i16(i16 addrspace(3)* %out, i8 addrspace(3)* %
; EG: LDS_UBYTE_READ_RET
; EG: LDS_SHORT_WRITE
-define void @local_zextload_v1i8_to_v1i16(<1 x i16> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v1i8_to_v1i16(<1 x i16> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(3)* %in
%ext = zext <1 x i8> %load to <1 x i16>
store <1 x i16> %ext, <1 x i16> addrspace(3)* %out
@@ -662,7 +662,7 @@ define void @local_zextload_v1i8_to_v1i16(<1 x i16> addrspace(3)* %out, <1 x i8>
; EG: LDS_UBYTE_READ_RET
; EG: BFE_INT
; EG: LDS_SHORT_WRITE
-define void @local_sextload_v1i8_to_v1i16(<1 x i16> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v1i8_to_v1i16(<1 x i16> addrspace(3)* %out, <1 x i8> addrspace(3)* %in) #0 {
%load = load <1 x i8>, <1 x i8> addrspace(3)* %in
%ext = sext <1 x i8> %load to <1 x i16>
store <1 x i16> %ext, <1 x i16> addrspace(3)* %out
@@ -673,7 +673,7 @@ define void @local_sextload_v1i8_to_v1i16(<1 x i16> addrspace(3)* %out, <1 x i8>
; EG: LDS_USHORT_READ_RET
; EG: LDS_WRITE
-define void @local_zextload_v2i8_to_v2i16(<2 x i16> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v2i8_to_v2i16(<2 x i16> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(3)* %in
%ext = zext <2 x i8> %load to <2 x i16>
store <2 x i16> %ext, <2 x i16> addrspace(3)* %out
@@ -686,7 +686,7 @@ define void @local_zextload_v2i8_to_v2i16(<2 x i16> addrspace(3)* %out, <2 x i8>
; EG: BFE_INT
; EG: BFE_INT
; EG: LDS_WRITE
-define void @local_sextload_v2i8_to_v2i16(<2 x i16> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v2i8_to_v2i16(<2 x i16> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 {
%load = load <2 x i8>, <2 x i8> addrspace(3)* %in
%ext = sext <2 x i8> %load to <2 x i16>
store <2 x i16> %ext, <2 x i16> addrspace(3)* %out
@@ -698,7 +698,7 @@ define void @local_sextload_v2i8_to_v2i16(<2 x i16> addrspace(3)* %out, <2 x i8>
; EG: LDS_READ_RET
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_zextload_v4i8_to_v4i16(<4 x i16> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v4i8_to_v4i16(<4 x i16> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(3)* %in
%ext = zext <4 x i8> %load to <4 x i16>
store <4 x i16> %ext, <4 x i16> addrspace(3)* %out
@@ -715,7 +715,7 @@ define void @local_zextload_v4i8_to_v4i16(<4 x i16> addrspace(3)* %out, <4 x i8>
; EG-DAG: BFE_INT
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_sextload_v4i8_to_v4i16(<4 x i16> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v4i8_to_v4i16(<4 x i16> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 {
%load = load <4 x i8>, <4 x i8> addrspace(3)* %in
%ext = sext <4 x i8> %load to <4 x i16>
store <4 x i16> %ext, <4 x i16> addrspace(3)* %out
@@ -730,7 +730,7 @@ define void @local_sextload_v4i8_to_v4i16(<4 x i16> addrspace(3)* %out, <4 x i8>
; EG: LDS_WRITE
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_zextload_v8i8_to_v8i16(<8 x i16> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v8i8_to_v8i16(<8 x i16> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(3)* %in
%ext = zext <8 x i8> %load to <8 x i16>
store <8 x i16> %ext, <8 x i16> addrspace(3)* %out
@@ -754,7 +754,7 @@ define void @local_zextload_v8i8_to_v8i16(<8 x i16> addrspace(3)* %out, <8 x i8>
; EG: LDS_WRITE
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_sextload_v8i8_to_v8i16(<8 x i16> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v8i8_to_v8i16(<8 x i16> addrspace(3)* %out, <8 x i8> addrspace(3)* %in) #0 {
%load = load <8 x i8>, <8 x i8> addrspace(3)* %in
%ext = sext <8 x i8> %load to <8 x i16>
store <8 x i16> %ext, <8 x i16> addrspace(3)* %out
@@ -775,7 +775,7 @@ define void @local_sextload_v8i8_to_v8i16(<8 x i16> addrspace(3)* %out, <8 x i8>
; EG: LDS_WRITE
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_zextload_v16i8_to_v16i16(<16 x i16> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v16i8_to_v16i16(<16 x i16> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(3)* %in
%ext = zext <16 x i8> %load to <16 x i16>
store <16 x i16> %ext, <16 x i16> addrspace(3)* %out
@@ -813,7 +813,7 @@ define void @local_zextload_v16i8_to_v16i16(<16 x i16> addrspace(3)* %out, <16 x
; EG: LDS_WRITE
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_sextload_v16i8_to_v16i16(<16 x i16> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v16i8_to_v16i16(<16 x i16> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) #0 {
%load = load <16 x i8>, <16 x i8> addrspace(3)* %in
%ext = sext <16 x i8> %load to <16 x i16>
store <16 x i16> %ext, <16 x i16> addrspace(3)* %out
@@ -846,7 +846,7 @@ define void @local_sextload_v16i8_to_v16i16(<16 x i16> addrspace(3)* %out, <16 x
; EG: LDS_WRITE
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_zextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_zextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(3)* %in
%ext = zext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, <32 x i16> addrspace(3)* %out
@@ -908,7 +908,7 @@ define void @local_zextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x
; EG: LDS_WRITE
; EG: LDS_WRITE
; EG: LDS_WRITE
-define void @local_sextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_sextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x i8> addrspace(3)* %in) #0 {
%load = load <32 x i8>, <32 x i8> addrspace(3)* %in
%ext = sext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, <32 x i16> addrspace(3)* %out
@@ -916,7 +916,7 @@ define void @local_sextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x
}
; XFUNC-LABEL: {{^}}local_zextload_v64i8_to_v64i16:
-; define void @local_zextload_v64i8_to_v64i16(<64 x i16> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
+; define amdgpu_kernel void @local_zextload_v64i8_to_v64i16(<64 x i16> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(3)* %in
; %ext = zext <64 x i8> %load to <64 x i16>
; store <64 x i16> %ext, <64 x i16> addrspace(3)* %out
@@ -924,7 +924,7 @@ define void @local_sextload_v32i8_to_v32i16(<32 x i16> addrspace(3)* %out, <32 x
; }
; XFUNC-LABEL: {{^}}local_sextload_v64i8_to_v64i16:
-; define void @local_sextload_v64i8_to_v64i16(<64 x i16> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
+; define amdgpu_kernel void @local_sextload_v64i8_to_v64i16(<64 x i16> addrspace(3)* %out, <64 x i8> addrspace(3)* %in) #0 {
; %load = load <64 x i8>, <64 x i8> addrspace(3)* %in
; %ext = sext <64 x i8> %load to <64 x i16>
; store <64 x i16> %ext, <64 x i16> addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/load-weird-sizes.ll b/test/CodeGen/AMDGPU/load-weird-sizes.ll
index bc5e4945fb04..d6162c388b5b 100644
--- a/test/CodeGen/AMDGPU/load-weird-sizes.ll
+++ b/test/CodeGen/AMDGPU/load-weird-sizes.ll
@@ -8,7 +8,7 @@
; SI: {{flat|buffer}}_load_ubyte
; SI: {{flat|buffer}}_load_ushort
; SI: {{flat|buffer}}_store_dword
-define void @load_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @load_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) #0 {
%1 = load i24, i24 addrspace(1)* %in
%2 = zext i24 %1 to i32
store i32 %2, i32 addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @load_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) #0 {
; CI-HSA: flat_load_dword [[VAL:v[0-9]+]]
; CI-HSA: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VAL]]
-define void @load_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @load_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) #0 {
%1 = load i25, i25 addrspace(1)* %in
%2 = zext i25 %1 to i32
store i32 %2, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/local-64.ll b/test/CodeGen/AMDGPU/local-64.ll
index a7cee43187c1..bf4a93237bd4 100644
--- a/test/CodeGen/AMDGPU/local-64.ll
+++ b/test/CodeGen/AMDGPU/local-64.ll
@@ -5,7 +5,7 @@
; BOTH-LABEL: {{^}}local_i32_load
; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}} offset:28
; BOTH: buffer_store_dword [[REG]],
-define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
%val = load i32, i32 addrspace(3)* %gep, align 4
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -15,7 +15,7 @@ define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounw
; BOTH-LABEL: {{^}}local_i32_load_0_offset
; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}}
; BOTH: buffer_store_dword [[REG]],
-define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
%val = load i32, i32 addrspace(3)* %in, align 4
store i32 %val, i32 addrspace(1)* %out, align 4
ret void
@@ -25,7 +25,7 @@ define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %
; BOTH-NOT: ADD
; BOTH: ds_read_u8 [[REG:v[0-9]+]], {{v[0-9]+}} offset:65535
; BOTH: buffer_store_byte [[REG]],
-define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
%gep = getelementptr i8, i8 addrspace(3)* %in, i32 65535
%val = load i8, i8 addrspace(3)* %gep, align 4
store i8 %val, i8 addrspace(1)* %out, align 4
@@ -40,7 +40,7 @@ define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
; BOTH: v_mov_b32_e32 [[VREGADDR:v[0-9]+]], [[ADDR]]
; BOTH: ds_read_u8 [[REG:v[0-9]+]], [[VREGADDR]]
; BOTH: buffer_store_byte [[REG]],
-define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
%gep = getelementptr i8, i8 addrspace(3)* %in, i32 65536
%val = load i8, i8 addrspace(3)* %gep, align 4
store i8 %val, i8 addrspace(1)* %out, align 4
@@ -51,7 +51,7 @@ define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspa
; BOTH-NOT: ADD
; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56
; BOTH: buffer_store_dwordx2 [[REG]],
-define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %in, i32 7
%val = load i64, i64 addrspace(3)* %gep, align 8
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -61,7 +61,7 @@ define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounw
; BOTH-LABEL: {{^}}local_i64_load_0_offset
; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
; BOTH: buffer_store_dwordx2 [[REG]],
-define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
%val = load i64, i64 addrspace(3)* %in, align 8
store i64 %val, i64 addrspace(1)* %out, align 8
ret void
@@ -71,7 +71,7 @@ define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %
; BOTH-NOT: ADD
; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56
; BOTH: buffer_store_dwordx2 [[REG]],
-define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
%gep = getelementptr double, double addrspace(3)* %in, i32 7
%val = load double, double addrspace(3)* %gep, align 8
store double %val, double addrspace(1)* %out, align 8
@@ -81,7 +81,7 @@ define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in)
; BOTH-LABEL: {{^}}local_f64_load_0_offset
; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
; BOTH: buffer_store_dwordx2 [[REG]],
-define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
+define amdgpu_kernel void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
%val = load double, double addrspace(3)* %in, align 8
store double %val, double addrspace(1)* %out, align 8
ret void
@@ -90,7 +90,7 @@ define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace
; BOTH-LABEL: {{^}}local_i64_store:
; BOTH-NOT: ADD
; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56
-define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_i64_store(i64 addrspace(3)* %out) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %out, i32 7
store i64 5678, i64 addrspace(3)* %gep, align 8
ret void
@@ -99,7 +99,7 @@ define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_i64_store_0_offset:
; BOTH-NOT: ADD
; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
-define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
store i64 1234, i64 addrspace(3)* %out, align 8
ret void
}
@@ -107,7 +107,7 @@ define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_f64_store:
; BOTH-NOT: ADD
; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56
-define void @local_f64_store(double addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_f64_store(double addrspace(3)* %out) nounwind {
%gep = getelementptr double, double addrspace(3)* %out, i32 7
store double 16.0, double addrspace(3)* %gep, align 8
ret void
@@ -115,7 +115,7 @@ define void @local_f64_store(double addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_f64_store_0_offset
; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
-define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
store double 20.0, double addrspace(3)* %out, align 8
ret void
}
@@ -124,7 +124,7 @@ define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
; BOTH-NOT: ADD
; BOTH: ds_write2_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset0:14 offset1:15
; BOTH: s_endpgm
-define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
%gep = getelementptr <2 x i64>, <2 x i64> addrspace(3)* %out, i32 7
store <2 x i64> <i64 5678, i64 5678>, <2 x i64> addrspace(3)* %gep, align 16
ret void
@@ -134,7 +134,7 @@ define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
; BOTH-NOT: ADD
; BOTH: ds_write2_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset1:1
; BOTH: s_endpgm
-define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
store <2 x i64> <i64 1234, i64 1234>, <2 x i64> addrspace(3)* %out, align 16
ret void
}
@@ -144,7 +144,7 @@ define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
; BOTH-DAG: ds_write2_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset0:30 offset1:31
; BOTH-DAG: ds_write2_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset0:28 offset1:29
; BOTH: s_endpgm
-define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
%gep = getelementptr <4 x i64>, <4 x i64> addrspace(3)* %out, i32 7
store <4 x i64> <i64 5678, i64 5678, i64 5678, i64 5678>, <4 x i64> addrspace(3)* %gep, align 16
ret void
@@ -155,7 +155,7 @@ define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
; BOTH-DAG: ds_write2_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset0:2 offset1:3
; BOTH-DAG: ds_write2_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}} offset1:1
; BOTH: s_endpgm
-define void @local_v4i64_store_0_offset(<4 x i64> addrspace(3)* %out) nounwind {
+define amdgpu_kernel void @local_v4i64_store_0_offset(<4 x i64> addrspace(3)* %out) nounwind {
store <4 x i64> <i64 1234, i64 1234, i64 1234, i64 1234>, <4 x i64> addrspace(3)* %out, align 16
ret void
}
diff --git a/test/CodeGen/AMDGPU/local-atomics.ll b/test/CodeGen/AMDGPU/local-atomics.ll
index 6714a28aa43a..de029d964b0d 100644
--- a/test/CodeGen/AMDGPU/local-atomics.ll
+++ b/test/CodeGen/AMDGPU/local-atomics.ll
@@ -11,7 +11,7 @@
; GCN: ds_wrxchg_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; EG: LDS_WRXCHG_RET *
; GCN: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -37,7 +37,7 @@ define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; GCN: ds_add_rtn_u32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]]
; GCN: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
-define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -47,7 +47,7 @@ define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; EG: LDS_ADD_RET *
; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -59,7 +59,7 @@ define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; CIVI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
@@ -73,7 +73,7 @@ define void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 ad
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[ONE]]
; GCN: s_endpgm
-define void @lds_atomic_add1_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -84,7 +84,7 @@ define void @lds_atomic_add1_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[ONE]] offset:16
; GCN: s_endpgm
-define void @lds_atomic_add1_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -96,7 +96,7 @@ define void @lds_atomic_add1_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; CIVI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_add1_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
@@ -109,7 +109,7 @@ define void @lds_atomic_add1_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 a
; EG: LDS_SUB_RET *
; GCN: ds_sub_rtn_u32
; GCN: s_endpgm
-define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -119,7 +119,7 @@ define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; EG: LDS_SUB_RET *
; GCN: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -131,7 +131,7 @@ define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[ONE]]
; GCN: s_endpgm
-define void @lds_atomic_sub1_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -142,7 +142,7 @@ define void @lds_atomic_sub1_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[ONE]] offset:16
; GCN: s_endpgm
-define void @lds_atomic_sub1_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -153,7 +153,7 @@ define void @lds_atomic_sub1_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; EG: LDS_AND_RET *
; GCN: ds_and_rtn_b32
; GCN: s_endpgm
-define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -163,7 +163,7 @@ define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; EG: LDS_AND_RET *
; GCN: ds_and_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -174,7 +174,7 @@ define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; EG: LDS_OR_RET *
; GCN: ds_or_rtn_b32
; GCN: s_endpgm
-define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -184,7 +184,7 @@ define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %pt
; EG: LDS_OR_RET *
; GCN: ds_or_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -195,7 +195,7 @@ define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(
; EG: LDS_XOR_RET *
; GCN: ds_xor_rtn_b32
; GCN: s_endpgm
-define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -205,7 +205,7 @@ define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; EG: LDS_XOR_RET *
; GCN: ds_xor_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -214,7 +214,7 @@ define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FIXME: There is no atomic nand instr
; XFUNC-LABEL: {{^}}lds_atomic_nand_ret_i32:uction, so we somehow need to expand this.
-; define void @lds_atomic_nand_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+; define amdgpu_kernel void @lds_atomic_nand_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
; store i32 %result, i32 addrspace(1)* %out, align 4
; ret void
@@ -224,7 +224,7 @@ define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; EG: LDS_MIN_INT_RET *
; GCN: ds_min_rtn_i32
; GCN: s_endpgm
-define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -234,7 +234,7 @@ define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; EG: LDS_MIN_INT_RET *
; GCN: ds_min_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -245,7 +245,7 @@ define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; EG: LDS_MAX_INT_RET *
; GCN: ds_max_rtn_i32
; GCN: s_endpgm
-define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -255,7 +255,7 @@ define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; EG: LDS_MAX_INT_RET *
; GCN: ds_max_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -266,7 +266,7 @@ define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; EG: LDS_MIN_UINT_RET *
; GCN: ds_min_rtn_u32
; GCN: s_endpgm
-define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -276,7 +276,7 @@ define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; EG: LDS_MIN_UINT_RET *
; GCN: ds_min_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -287,7 +287,7 @@ define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; EG: LDS_MAX_UINT_RET *
; GCN: ds_max_rtn_u32
; GCN: s_endpgm
-define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -297,7 +297,7 @@ define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; EG: LDS_MAX_UINT_RET *
; GCN: ds_max_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -310,7 +310,7 @@ define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; GCN: ds_wrxchg_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]]
; GCN: s_endpgm
-define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -318,7 +318,7 @@ define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i32_offset:
; GCN: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -330,7 +330,7 @@ define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; GCN: ds_add_u32 [[VPTR]], [[DATA]]
; GCN: s_endpgm
-define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -338,7 +338,7 @@ define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32_offset:
; GCN: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -348,7 +348,7 @@ define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}}
; CIVI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
@@ -360,7 +360,7 @@ define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_add_u32 v{{[0-9]+}}, [[ONE]]
; GCN: s_endpgm
-define void @lds_atomic_add1_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
ret void
}
@@ -369,7 +369,7 @@ define void @lds_atomic_add1_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_add_u32 v{{[0-9]+}}, [[ONE]] offset:16
; GCN: s_endpgm
-define void @lds_atomic_add1_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
ret void
@@ -379,7 +379,7 @@ define void @lds_atomic_add1_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}}
; CIVI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_add1_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
@@ -390,7 +390,7 @@ define void @lds_atomic_add1_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32
; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i32:
; GCN: ds_sub_u32
; GCN: s_endpgm
-define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -398,7 +398,7 @@ define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i32_offset:
; GCN: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -408,7 +408,7 @@ define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_sub_u32 v{{[0-9]+}}, [[ONE]]
; GCN: s_endpgm
-define void @lds_atomic_sub1_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
ret void
}
@@ -417,7 +417,7 @@ define void @lds_atomic_sub1_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1{{$}}
; GCN: ds_sub_u32 v{{[0-9]+}}, [[ONE]] offset:16
; GCN: s_endpgm
-define void @lds_atomic_sub1_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
ret void
@@ -426,7 +426,7 @@ define void @lds_atomic_sub1_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_and_noret_i32:
; GCN: ds_and_b32
; GCN: s_endpgm
-define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -434,7 +434,7 @@ define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_and_noret_i32_offset:
; GCN: ds_and_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -443,7 +443,7 @@ define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_or_noret_i32:
; GCN: ds_or_b32
; GCN: s_endpgm
-define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -451,7 +451,7 @@ define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_or_noret_i32_offset:
; GCN: ds_or_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -460,7 +460,7 @@ define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i32:
; GCN: ds_xor_b32
; GCN: s_endpgm
-define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -468,7 +468,7 @@ define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i32_offset:
; GCN: ds_xor_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -476,7 +476,7 @@ define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FIXME: There is no atomic nand instr
; XFUNC-LABEL: {{^}}lds_atomic_nand_noret_i32:uction, so we somehow need to expand this.
-; define void @lds_atomic_nand_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+; define amdgpu_kernel void @lds_atomic_nand_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
; ret void
; }
@@ -484,7 +484,7 @@ define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_min_noret_i32:
; GCN: ds_min_i32
; GCN: s_endpgm
-define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -492,7 +492,7 @@ define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_min_noret_i32_offset:
; GCN: ds_min_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -501,7 +501,7 @@ define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_max_noret_i32:
; GCN: ds_max_i32
; GCN: s_endpgm
-define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -509,7 +509,7 @@ define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_max_noret_i32_offset:
; GCN: ds_max_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -518,7 +518,7 @@ define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i32:
; GCN: ds_min_u32
; GCN: s_endpgm
-define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -526,7 +526,7 @@ define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i32_offset:
; GCN: ds_min_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
@@ -535,7 +535,7 @@ define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i32:
; GCN: ds_max_u32
; GCN: s_endpgm
-define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
@@ -543,7 +543,7 @@ define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i32_offset:
; GCN: ds_max_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
-define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
diff --git a/test/CodeGen/AMDGPU/local-atomics64.ll b/test/CodeGen/AMDGPU/local-atomics64.ll
index c88917812eda..6572a7bcd4fe 100644
--- a/test/CodeGen/AMDGPU/local-atomics64.ll
+++ b/test/CodeGen/AMDGPU/local-atomics64.ll
@@ -4,7 +4,7 @@
; GCN-LABEL: {{^}}lds_atomic_xchg_ret_i64:
; GCN: ds_wrxchg_rtn_b64
; GCN: s_endpgm
-define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -13,7 +13,7 @@ define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
; GCN-LABEL: {{^}}lds_atomic_xchg_ret_i64_offset:
; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -23,7 +23,7 @@ define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
; GCN-LABEL: {{^}}lds_atomic_add_ret_i64:
; GCN: ds_add_rtn_u64
; GCN: s_endpgm
-define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -38,7 +38,7 @@ define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN: ds_add_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32
; GCN: buffer_store_dwordx2 [[RESULT]],
; GCN: s_endpgm
-define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -51,7 +51,7 @@ define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN: ds_add_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
; GCN: buffer_store_dwordx2 [[RESULT]],
; GCN: s_endpgm
-define void @lds_atomic_add1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -60,7 +60,7 @@ define void @lds_atomic_add1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
; GCN-LABEL: {{^}}lds_atomic_add1_ret_i64_offset:
; GCN: ds_add_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_add1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -70,7 +70,7 @@ define void @lds_atomic_add1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
; GCN-LABEL: {{^}}lds_atomic_sub_ret_i64:
; GCN: ds_sub_rtn_u64
; GCN: s_endpgm
-define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -79,7 +79,7 @@ define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_sub_ret_i64_offset:
; GCN: ds_sub_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -92,7 +92,7 @@ define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN: ds_sub_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
; GCN: buffer_store_dwordx2 [[RESULT]],
; GCN: s_endpgm
-define void @lds_atomic_sub1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -101,7 +101,7 @@ define void @lds_atomic_sub1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
; GCN-LABEL: {{^}}lds_atomic_sub1_ret_i64_offset:
; GCN: ds_sub_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_sub1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -111,7 +111,7 @@ define void @lds_atomic_sub1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
; GCN-LABEL: {{^}}lds_atomic_and_ret_i64:
; GCN: ds_and_rtn_b64
; GCN: s_endpgm
-define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -120,7 +120,7 @@ define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_and_ret_i64_offset:
; GCN: ds_and_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -130,7 +130,7 @@ define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN-LABEL: {{^}}lds_atomic_or_ret_i64:
; GCN: ds_or_rtn_b64
; GCN: s_endpgm
-define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -139,7 +139,7 @@ define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %pt
; GCN-LABEL: {{^}}lds_atomic_or_ret_i64_offset:
; GCN: ds_or_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -149,7 +149,7 @@ define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(
; GCN-LABEL: {{^}}lds_atomic_xor_ret_i64:
; GCN: ds_xor_rtn_b64
; GCN: s_endpgm
-define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -158,7 +158,7 @@ define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_xor_ret_i64_offset:
; GCN: ds_xor_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -167,7 +167,7 @@ define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; FIXME: There is no atomic nand instr
; XGCN-LABEL: {{^}}lds_atomic_nand_ret_i64:uction, so we somehow need to expand this.
-; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+; define amdgpu_kernel void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
; store i64 %result, i64 addrspace(1)* %out, align 8
; ret void
@@ -176,7 +176,7 @@ define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN-LABEL: {{^}}lds_atomic_min_ret_i64:
; GCN: ds_min_rtn_i64
; GCN: s_endpgm
-define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -185,7 +185,7 @@ define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_min_ret_i64_offset:
; GCN: ds_min_rtn_i64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -195,7 +195,7 @@ define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN-LABEL: {{^}}lds_atomic_max_ret_i64:
; GCN: ds_max_rtn_i64
; GCN: s_endpgm
-define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -204,7 +204,7 @@ define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
; GCN-LABEL: {{^}}lds_atomic_max_ret_i64_offset:
; GCN: ds_max_rtn_i64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -214,7 +214,7 @@ define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; GCN-LABEL: {{^}}lds_atomic_umin_ret_i64:
; GCN: ds_min_rtn_u64
; GCN: s_endpgm
-define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -223,7 +223,7 @@ define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
; GCN-LABEL: {{^}}lds_atomic_umin_ret_i64_offset:
; GCN: ds_min_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -233,7 +233,7 @@ define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
; GCN-LABEL: {{^}}lds_atomic_umax_ret_i64:
; GCN: ds_max_rtn_u64
; GCN: s_endpgm
-define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -242,7 +242,7 @@ define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
; GCN-LABEL: {{^}}lds_atomic_umax_ret_i64_offset:
; GCN: ds_max_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -252,7 +252,7 @@ define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
; GCN-LABEL: {{^}}lds_atomic_xchg_noret_i64:
; GCN: ds_wrxchg_rtn_b64
; GCN: s_endpgm
-define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -260,7 +260,7 @@ define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_xchg_noret_i64_offset:
; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -269,7 +269,7 @@ define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_add_noret_i64:
; GCN: ds_add_u64
; GCN: s_endpgm
-define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -282,7 +282,7 @@ define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
; GCN: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
ret void
@@ -293,7 +293,7 @@ define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0{{$}}
; GCN: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
; GCN: s_endpgm
-define void @lds_atomic_add1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
ret void
}
@@ -301,7 +301,7 @@ define void @lds_atomic_add1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_add1_noret_i64_offset:
; GCN: ds_add_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_add1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_add1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
ret void
@@ -310,7 +310,7 @@ define void @lds_atomic_add1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_sub_noret_i64:
; GCN: ds_sub_u64
; GCN: s_endpgm
-define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -318,7 +318,7 @@ define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_sub_noret_i64_offset:
; GCN: ds_sub_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -329,7 +329,7 @@ define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0{{$}}
; GCN: ds_sub_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
; GCN: s_endpgm
-define void @lds_atomic_sub1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
ret void
}
@@ -337,7 +337,7 @@ define void @lds_atomic_sub1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_sub1_noret_i64_offset:
; GCN: ds_sub_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_sub1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_sub1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
ret void
@@ -346,7 +346,7 @@ define void @lds_atomic_sub1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_and_noret_i64:
; GCN: ds_and_b64
; GCN: s_endpgm
-define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -354,7 +354,7 @@ define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_and_noret_i64_offset:
; GCN: ds_and_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -363,7 +363,7 @@ define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_or_noret_i64:
; GCN: ds_or_b64
; GCN: s_endpgm
-define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -371,7 +371,7 @@ define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_or_noret_i64_offset:
; GCN: ds_or_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -380,7 +380,7 @@ define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_xor_noret_i64:
; GCN: ds_xor_b64
; GCN: s_endpgm
-define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -388,7 +388,7 @@ define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_xor_noret_i64_offset:
; GCN: ds_xor_b64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -396,7 +396,7 @@ define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; FIXME: There is no atomic nand instr
; XGCN-LABEL: {{^}}lds_atomic_nand_noret_i64:uction, so we somehow need to expand this.
-; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+; define amdgpu_kernel void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
; ret void
; }
@@ -404,7 +404,7 @@ define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_min_noret_i64:
; GCN: ds_min_i64
; GCN: s_endpgm
-define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -412,7 +412,7 @@ define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_min_noret_i64_offset:
; GCN: ds_min_i64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -421,7 +421,7 @@ define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_max_noret_i64:
; GCN: ds_max_i64
; GCN: s_endpgm
-define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -429,7 +429,7 @@ define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_max_noret_i64_offset:
; GCN: ds_max_i64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -438,7 +438,7 @@ define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_umin_noret_i64:
; GCN: ds_min_u64
; GCN: s_endpgm
-define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -446,7 +446,7 @@ define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_umin_noret_i64_offset:
; GCN: ds_min_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
@@ -455,7 +455,7 @@ define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_umax_noret_i64:
; GCN: ds_max_u64
; GCN: s_endpgm
-define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
@@ -463,7 +463,7 @@ define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; GCN-LABEL: {{^}}lds_atomic_umax_noret_i64_offset:
; GCN: ds_max_u64 {{.*}} offset:32
; GCN: s_endpgm
-define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
+define amdgpu_kernel void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
diff --git a/test/CodeGen/AMDGPU/local-memory.amdgcn.ll b/test/CodeGen/AMDGPU/local-memory.amdgcn.ll
index a57e4f595322..47b6558241b9 100644
--- a/test/CodeGen/AMDGPU/local-memory.amdgcn.ll
+++ b/test/CodeGen/AMDGPU/local-memory.amdgcn.ll
@@ -17,7 +17,7 @@
; GCN: s_barrier
; GCN: ds_read_b32 {{v[0-9]+}},
-define void @local_memory(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @local_memory(i32 addrspace(1)* %out) #0 {
entry:
%y.i = call i32 @llvm.amdgcn.workitem.id.x() #1
%arrayidx = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %y.i
@@ -45,11 +45,7 @@ entry:
; GCN-LABEL: {{^}}local_memory_two_objects:
; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
; CI-DAG: ds_write2_b32 [[ADDRW]], {{v[0-9]+}}, {{v[0-9]+}} offset1:4
-
-; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
-
-; SI-DAG: ds_write_b32 [[ADDRW]],
-; SI-DAG: ds_write_b32 [[ADDRW_OFF]],
+; SI-DAG: ds_write2_b32 [[ADDRW]], {{v[0-9]+}}, {{v[0-9]+}} offset1:4
; GCN: s_barrier
@@ -61,7 +57,7 @@ entry:
; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]]
; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, [[SUB]] offset0:3 offset1:7
-define void @local_memory_two_objects(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @local_memory_two_objects(i32 addrspace(1)* %out) #0 {
entry:
%x.i = call i32 @llvm.amdgcn.workitem.id.x()
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
diff --git a/test/CodeGen/AMDGPU/local-memory.ll b/test/CodeGen/AMDGPU/local-memory.ll
index 1a11332f865d..6124237d7638 100644
--- a/test/CodeGen/AMDGPU/local-memory.ll
+++ b/test/CodeGen/AMDGPU/local-memory.ll
@@ -14,7 +14,7 @@
; GCN: ds_read_b32 v{{[0-9]+}}, v[[ZERO]] offset:4
; R600: LDS_READ_RET
-define void @load_i32_local_const_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @load_i32_local_const_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %in) #0 {
entry:
%tmp0 = getelementptr [512 x i32], [512 x i32] addrspace(3)* @lds, i32 0, i32 1
%tmp1 = load i32, i32 addrspace(3)* %tmp0
@@ -30,7 +30,7 @@ entry:
; R600: LDS_READ_RET
; GCN-DAG: ds_read_b32
; GCN-DAG: ds_read2_b32
-define void @load_i32_v2i32_local(<2 x i32> addrspace(1)* %out, i32 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @load_i32_v2i32_local(<2 x i32> addrspace(1)* %out, i32 addrspace(3)* %in) #0 {
%scalar = load i32, i32 addrspace(3)* %in
%tmp0 = bitcast i32 addrspace(3)* %in to <2 x i32> addrspace(3)*
%vec_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(3)* %tmp0, i32 2
diff --git a/test/CodeGen/AMDGPU/local-memory.r600.ll b/test/CodeGen/AMDGPU/local-memory.r600.ll
index 9841b8882b39..c8f4e4c986a7 100644
--- a/test/CodeGen/AMDGPU/local-memory.r600.ll
+++ b/test/CodeGen/AMDGPU/local-memory.r600.ll
@@ -15,7 +15,7 @@
; EG-NEXT: ALU clause
; EG: LDS_READ_RET
-define void @local_memory(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @local_memory(i32 addrspace(1)* %out) #0 {
entry:
%y.i = call i32 @llvm.r600.read.tidig.x() #1
%arrayidx = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %y.i
@@ -57,7 +57,7 @@ entry:
; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
-define void @local_memory_two_objects(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @local_memory_two_objects(i32 addrspace(1)* %out) #0 {
entry:
%x.i = call i32 @llvm.r600.read.tidig.x() #1
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
diff --git a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll b/test/CodeGen/AMDGPU/local-stack-slot-bug.ll
index dc43e8613ddf..d3e0f0be4b5f 100644
--- a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll
+++ b/test/CodeGen/AMDGPU/local-stack-slot-bug.ll
@@ -8,13 +8,12 @@
; CHECK-LABEL: {{^}}main:
; CHECK-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200
-; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}}
; CHECK-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0
; CHECK-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]]
-; TODO: add 0?
-; CHECK-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]]
-; CHECK-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]]
+; CHECK-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]]
+; CHECK-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]]
; CHECK: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen
; CHECK: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen
diff --git a/test/CodeGen/AMDGPU/loop-address.ll b/test/CodeGen/AMDGPU/loop-address.ll
index f60d574497de..e25d4f4b4f5f 100644
--- a/test/CodeGen/AMDGPU/loop-address.ll
+++ b/test/CodeGen/AMDGPU/loop-address.ll
@@ -5,7 +5,7 @@
;CHECK: LOOP_BREAK @10
;CHECK: POP @10
-define void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) #0 {
+define amdgpu_kernel void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) #0 {
entry:
%cmp5 = icmp sgt i32 %iterations, 0
br i1 %cmp5, label %for.body, label %for.end
diff --git a/test/CodeGen/AMDGPU/loop-idiom.ll b/test/CodeGen/AMDGPU/loop-idiom.ll
index 5fd9806813cd..23ddd6488af9 100644
--- a/test/CodeGen/AMDGPU/loop-idiom.ll
+++ b/test/CodeGen/AMDGPU/loop-idiom.ll
@@ -9,7 +9,7 @@
; FUNC: @no_memcpy
; R600-NOT: {{^}}llvm.memcpy
; SI-NOT: {{^}}llvm.memcpy
-define void @no_memcpy(i8 addrspace(3)* %in, i32 %size) {
+define amdgpu_kernel void @no_memcpy(i8 addrspace(3)* %in, i32 %size) {
entry:
%dest = alloca i8, i32 32
br label %for.body
@@ -33,7 +33,7 @@ for.end:
; R600-NOT: {{^}}memset_pattern16:
; SI-NOT: {{^}}llvm.memset
; SI-NOT: {{^}}memset_pattern16:
-define void @no_memset(i32 %size) {
+define amdgpu_kernel void @no_memset(i32 %size) {
entry:
%dest = alloca i8, i32 32
br label %for.body
diff --git a/test/CodeGen/AMDGPU/loop_break.ll b/test/CodeGen/AMDGPU/loop_break.ll
index 82564b8bb28d..b9df2cb779ad 100644
--- a/test/CodeGen/AMDGPU/loop_break.ll
+++ b/test/CodeGen/AMDGPU/loop_break.ll
@@ -27,8 +27,9 @@
; GCN: [[LOOP_ENTRY:BB[0-9]+_[0-9]+]]: ; %bb1
; GCN: s_or_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec, [[INITMASK]]
-; GCN: s_cmp_gt_i32 s{{[0-9]+}}, -1
-; GCN-NEXT: s_cbranch_scc1 [[FLOW:BB[0-9]+_[0-9]+]]
+; GCN: v_cmp_lt_i32_e32 vcc, -1
+; GCN: s_and_b64 vcc, exec, vcc
+; GCN-NEXT: s_cbranch_vccnz [[FLOW:BB[0-9]+_[0-9]+]]
; GCN: ; BB#2: ; %bb4
; GCN: buffer_load_dword
@@ -43,7 +44,7 @@
; GCN: ; BB#4: ; %bb9
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
; GCN-NEXT: s_endpgm
-define void @break_loop(i32 %arg) #0 {
+define amdgpu_kernel void @break_loop(i32 %arg) #0 {
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
@@ -64,6 +65,264 @@ bb9:
ret void
}
+; OPT-LABEL: @undef_phi_cond_break_loop(
+; OPT: bb1:
+; OPT-NEXT: %phi.broken = phi i64 [ %loop.phi, %Flow ], [ 0, %bb ]
+; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+; OPT: %0 = call i64 @llvm.amdgcn.if.break(i1 undef, i64 %phi.broken)
+; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
+
+; OPT: bb4:
+; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
+; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
+; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break(i1 %cmp1, i64 %phi.broken)
+; OPT-NEXT: br label %Flow
+
+; OPT: Flow:
+; OPT-NEXT: %loop.phi = phi i64 [ %1, %bb4 ], [ %0, %bb1 ]
+; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop(i64 %loop.phi)
+; OPT-NEXT: br i1 %2, label %bb9, label %bb1
+
+; OPT: bb9: ; preds = %Flow
+; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi)
+; OPT-NEXT: store volatile i32 7
+; OPT-NEXT: ret void
+define amdgpu_kernel void @undef_phi_cond_break_loop(i32 %arg) #0 {
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp = sub i32 %id, %arg
+ br label %bb1
+
+bb1: ; preds = %Flow, %bb
+ %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %cmp0 = icmp slt i32 %lsr.iv.next, 0
+ br i1 %cmp0, label %bb4, label %Flow
+
+bb4: ; preds = %bb1
+ %load = load volatile i32, i32 addrspace(1)* undef, align 4
+ %cmp1 = icmp sge i32 %tmp, %load
+ br label %Flow
+
+Flow: ; preds = %bb4, %bb1
+ %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+ %tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ]
+ br i1 %tmp3, label %bb9, label %bb1
+
+bb9: ; preds = %Flow
+ store volatile i32 7, i32 addrspace(3)* undef
+ ret void
+}
+
+; FIXME: ConstantExpr compare of address to null folds away
+@lds = addrspace(3) global i32 undef
+
+; OPT-LABEL: @constexpr_phi_cond_break_loop(
+; OPT: bb1:
+; OPT-NEXT: %phi.broken = phi i64 [ %loop.phi, %Flow ], [ 0, %bb ]
+; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+; OPT: %0 = call i64 @llvm.amdgcn.if.break(i1 icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), i64 %phi.broken)
+; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
+
+; OPT: bb4:
+; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
+; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
+; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break(i1 %cmp1, i64 %phi.broken)
+; OPT-NEXT: br label %Flow
+
+; OPT: Flow:
+; OPT-NEXT: %loop.phi = phi i64 [ %1, %bb4 ], [ %0, %bb1 ]
+; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop(i64 %loop.phi)
+; OPT-NEXT: br i1 %2, label %bb9, label %bb1
+
+; OPT: bb9: ; preds = %Flow
+; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi)
+; OPT-NEXT: store volatile i32 7
+; OPT-NEXT: ret void
+define amdgpu_kernel void @constexpr_phi_cond_break_loop(i32 %arg) #0 {
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp = sub i32 %id, %arg
+ br label %bb1
+
+bb1: ; preds = %Flow, %bb
+ %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %cmp0 = icmp slt i32 %lsr.iv.next, 0
+ br i1 %cmp0, label %bb4, label %Flow
+
+bb4: ; preds = %bb1
+ %load = load volatile i32, i32 addrspace(1)* undef, align 4
+ %cmp1 = icmp sge i32 %tmp, %load
+ br label %Flow
+
+Flow: ; preds = %bb4, %bb1
+ %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+ %tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ]
+ br i1 %tmp3, label %bb9, label %bb1
+
+bb9: ; preds = %Flow
+ store volatile i32 7, i32 addrspace(3)* undef
+ ret void
+}
+
+; OPT-LABEL: @true_phi_cond_break_loop(
+; OPT: bb1:
+; OPT-NEXT: %phi.broken = phi i64 [ %loop.phi, %Flow ], [ 0, %bb ]
+; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+; OPT: %0 = call i64 @llvm.amdgcn.break(i64 %phi.broken)
+; OPT: br i1 %cmp0, label %bb4, label %Flow
+
+; OPT: bb4:
+; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
+; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
+; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break(i1 %cmp1, i64 %phi.broken)
+; OPT-NEXT: br label %Flow
+
+; OPT: Flow:
+; OPT-NEXT: %loop.phi = phi i64 [ %1, %bb4 ], [ %0, %bb1 ]
+; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop(i64 %loop.phi)
+; OPT-NEXT: br i1 %2, label %bb9, label %bb1
+
+; OPT: bb9: ; preds = %Flow
+; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi)
+; OPT-NEXT: store volatile i32 7
+; OPT-NEXT: ret void
+define amdgpu_kernel void @true_phi_cond_break_loop(i32 %arg) #0 {
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp = sub i32 %id, %arg
+ br label %bb1
+
+bb1: ; preds = %Flow, %bb
+ %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %cmp0 = icmp slt i32 %lsr.iv.next, 0
+ br i1 %cmp0, label %bb4, label %Flow
+
+bb4: ; preds = %bb1
+ %load = load volatile i32, i32 addrspace(1)* undef, align 4
+ %cmp1 = icmp sge i32 %tmp, %load
+ br label %Flow
+
+Flow: ; preds = %bb4, %bb1
+ %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+ %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
+ br i1 %tmp3, label %bb9, label %bb1
+
+bb9: ; preds = %Flow
+ store volatile i32 7, i32 addrspace(3)* undef
+ ret void
+}
+
+; OPT-LABEL: @false_phi_cond_break_loop(
+; OPT: bb1:
+; OPT-NEXT: %phi.broken = phi i64 [ %loop.phi, %Flow ], [ 0, %bb ]
+; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+; OPT-NOT: call
+; OPT: br i1 %cmp0, label %bb4, label %Flow
+
+; OPT: bb4:
+; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
+; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %cmp1, i64 %phi.broken)
+; OPT-NEXT: br label %Flow
+
+; OPT: Flow:
+; OPT-NEXT: %loop.phi = phi i64 [ %0, %bb4 ], [ %phi.broken, %bb1 ]
+; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %loop.phi)
+; OPT-NEXT: br i1 %1, label %bb9, label %bb1
+
+; OPT: bb9: ; preds = %Flow
+; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi)
+; OPT-NEXT: store volatile i32 7
+; OPT-NEXT: ret void
+define amdgpu_kernel void @false_phi_cond_break_loop(i32 %arg) #0 {
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp = sub i32 %id, %arg
+ br label %bb1
+
+bb1: ; preds = %Flow, %bb
+ %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %cmp0 = icmp slt i32 %lsr.iv.next, 0
+ br i1 %cmp0, label %bb4, label %Flow
+
+bb4: ; preds = %bb1
+ %load = load volatile i32, i32 addrspace(1)* undef, align 4
+ %cmp1 = icmp sge i32 %tmp, %load
+ br label %Flow
+
+Flow: ; preds = %bb4, %bb1
+ %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+ %tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ]
+ br i1 %tmp3, label %bb9, label %bb1
+
+bb9: ; preds = %Flow
+ store volatile i32 7, i32 addrspace(3)* undef
+ ret void
+}
+
+; Swap order of branches in flow block so that the true phi is
+; continue.
+
+; OPT-LABEL: @invert_true_phi_cond_break_loop(
+; OPT: bb1:
+; OPT-NEXT: %phi.broken = phi i64 [ %1, %Flow ], [ 0, %bb ]
+; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+; OPT-NEXT: %lsr.iv.next = add i32 %lsr.iv, 1
+; OPT-NEXT: %cmp0 = icmp slt i32 %lsr.iv.next, 0
+; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
+
+; OPT: bb4:
+; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
+; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
+; OPT-NEXT: br label %Flow
+
+; OPT: Flow:
+; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
+; OPT-NEXT: %0 = xor i1 %tmp3, true
+; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break(i1 %0, i64 %phi.broken)
+; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop(i64 %1)
+; OPT-NEXT: br i1 %2, label %bb9, label %bb1
+
+; OPT: bb9:
+; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %1)
+; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
+; OPT-NEXT: ret void
+define amdgpu_kernel void @invert_true_phi_cond_break_loop(i32 %arg) #0 {
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp = sub i32 %id, %arg
+ br label %bb1
+
+bb1: ; preds = %Flow, %bb
+ %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %cmp0 = icmp slt i32 %lsr.iv.next, 0
+ br i1 %cmp0, label %bb4, label %Flow
+
+bb4: ; preds = %bb1
+ %load = load volatile i32, i32 addrspace(1)* undef, align 4
+ %cmp1 = icmp sge i32 %tmp, %load
+ br label %Flow
+
+Flow: ; preds = %bb4, %bb1
+ %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+ %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
+ br i1 %tmp3, label %bb1, label %bb9
+
+bb9: ; preds = %Flow
+ store volatile i32 7, i32 addrspace(3)* undef
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll b/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
new file mode 100644
index 000000000000..74564f387ede
--- /dev/null
+++ b/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll
@@ -0,0 +1,117 @@
+; RUN: opt -S -amdgpu-lower-intrinsics %s | FileCheck -check-prefix=OPT %s
+
+declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture readonly, i32, i32, i1) #1
+
+declare void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memset.p1i8.i64(i8 addrspace(1)* nocapture, i8, i64, i32, i1) #1
+
+; Test the upper bound for sizes to leave
+; OPT-LABEL: @max_size_small_static_memcpy_caller0(
+; OPT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false)
+define amdgpu_kernel void @max_size_small_static_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 {
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false)
+ ret void
+}
+
+; Smallest static size which will be expanded
+; OPT-LABEL: @min_size_large_static_memcpy_caller0(
+; OPT-NOT: call
+; OPT: getelementptr
+; OPT-NEXT: load i8
+; OPT: getelementptr
+; OPT-NEXT: store i8
+define amdgpu_kernel void @min_size_large_static_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 {
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @max_size_small_static_memmove_caller0(
+; OPT: call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false)
+define amdgpu_kernel void @max_size_small_static_memmove_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 {
+ call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @min_size_large_static_memmove_caller0(
+; OPT-NOT: call
+; OPT: getelementptr
+; OPT-NEXT: load i8
+; OPT: getelementptr
+; OPT-NEXT: store i8
+define amdgpu_kernel void @min_size_large_static_memmove_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 {
+ call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @max_size_small_static_memset_caller0(
+; OPT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1024, i32 1, i1 false)
+define amdgpu_kernel void @max_size_small_static_memset_caller0(i8 addrspace(1)* %dst, i8 %val) #0 {
+ call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1024, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @min_size_large_static_memset_caller0(
+; OPT-NOT: call
+; OPT: getelementptr
+; OPT: store i8
+define amdgpu_kernel void @min_size_large_static_memset_caller0(i8 addrspace(1)* %dst, i8 %val) #0 {
+ call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1025, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @variable_memcpy_caller0(
+; OPT-NOT: call
+; OPT: phi
+define amdgpu_kernel void @variable_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n) #0 {
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @variable_memcpy_caller1(
+; OPT-NOT: call
+; OPT: phi
+define amdgpu_kernel void @variable_memcpy_caller1(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n) #0 {
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @memcpy_multi_use_one_function(
+; OPT-NOT: call
+; OPT: phi
+; OPT-NOT: call
+; OPT: phi
+; OPT-NOT: call
+define amdgpu_kernel void @memcpy_multi_use_one_function(i8 addrspace(1)* %dst0, i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %n, i64 %m) #0 {
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst0, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false)
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %m, i32 1, i1 false)
+ ret void
+}
+
+; OPT-LABEL: @memcpy_alt_type(
+; OPT: phi
+; OPT: getelementptr inbounds i8, i8 addrspace(3)*
+; OPT: load i8, i8 addrspace(3)*
+; OPT: getelementptr inbounds i8, i8 addrspace(1)*
+; OPT: store i8
+define amdgpu_kernel void @memcpy_alt_type(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 %n) #0 {
+ call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 %n, i32 1, i1 false)
+ ret void
+}
+
+; One of the uses in the function should be expanded, the other left alone.
+; OPT-LABEL: @memcpy_multi_use_one_function_keep_small(
+; OPT: getelementptr inbounds i8, i8 addrspace(1)*
+; OPT: load i8, i8 addrspace(1)*
+; OPT: getelementptr inbounds i8, i8 addrspace(1)*
+; OPT: store i8
+
+; OPT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 102, i32 1, i1 false)
+define amdgpu_kernel void @memcpy_multi_use_one_function_keep_small(i8 addrspace(1)* %dst0, i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %n) #0 {
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst0, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false)
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 102, i32 1, i1 false)
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }
diff --git a/test/CodeGen/AMDGPU/lower-range-metadata-intrinsic-call.ll b/test/CodeGen/AMDGPU/lower-range-metadata-intrinsic-call.ll
index e1fad13e0b51..6f5f4ca13b5e 100644
--- a/test/CodeGen/AMDGPU/lower-range-metadata-intrinsic-call.ll
+++ b/test/CodeGen/AMDGPU/lower-range-metadata-intrinsic-call.ll
@@ -5,7 +5,7 @@
; CHECK-LABEL: {{^}}test_workitem_id_x_known_max_range:
; CHECK-NOT: v0
; CHECK: {{flat|buffer}}_store_dword {{.*}}v0
-define void @test_workitem_id_x_known_max_range(i32 addrspace(1)* nocapture %out) #0 {
+define amdgpu_kernel void @test_workitem_id_x_known_max_range(i32 addrspace(1)* nocapture %out) #0 {
entry:
%id = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
%and = and i32 %id, 1023
@@ -14,9 +14,9 @@ entry:
}
; CHECK-LABEL: {{^}}test_workitem_id_x_known_trunc_1_bit_range:
-; CHECK: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x1ff, v0
-; CHECK: {{flat|buffer}}_store_dword {{.*}}[[MASKED]]
-define void @test_workitem_id_x_known_trunc_1_bit_range(i32 addrspace(1)* nocapture %out) #0 {
+; CHECK-NOT: v_and_b32
+; CHECK: {{flat|buffer}}_store_dword {{.*}}v0
+define amdgpu_kernel void @test_workitem_id_x_known_trunc_1_bit_range(i32 addrspace(1)* nocapture %out) #0 {
entry:
%id = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
%and = and i32 %id, 511
@@ -26,9 +26,9 @@ entry:
; CHECK-LABEL: {{^}}test_workitem_id_x_known_max_range_m1:
; CHECK-NOT: v0
-; CHECK: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xff, v0
-; CHECK: {{flat|buffer}}_store_dword {{.*}}[[MASKED]]
-define void @test_workitem_id_x_known_max_range_m1(i32 addrspace(1)* nocapture %out) #0 {
+; CHECK-NOT: v_and_b32
+; CHECK: {{flat|buffer}}_store_dword {{.*}}v0
+define amdgpu_kernel void @test_workitem_id_x_known_max_range_m1(i32 addrspace(1)* nocapture %out) #0 {
entry:
%id = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !1
%and = and i32 %id, 255
diff --git a/test/CodeGen/AMDGPU/lshl.ll b/test/CodeGen/AMDGPU/lshl.ll
deleted file mode 100644
index 8468437c2c1f..000000000000
--- a/test/CodeGen/AMDGPU/lshl.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s
-
-;CHECK: s_lshl_b32 s{{[0-9]}}, s{{[0-9]}}, 1
-
-define void @test(i32 %p) {
- %i = mul i32 %p, 2
- %r = bitcast i32 %i to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
- ret void
-}
-
-declare <4 x float> @llvm.SI.sample.(i32, <4 x i32>, <8 x i32>, <4 x i32>, i32) readnone
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/lshr.ll b/test/CodeGen/AMDGPU/lshr.ll
deleted file mode 100644
index c8ab7871434e..000000000000
--- a/test/CodeGen/AMDGPU/lshr.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s
-
-;CHECK: s_lshr_b32 s{{[0-9]}}, s{{[0-9]}}, 1
-
-define void @test(i32 %p) {
- %i = udiv i32 %p, 2
- %r = bitcast i32 %i to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
- ret void
-}
-
-declare <4 x float> @llvm.SI.sample.(i32, <4 x i32>, <8 x i32>, <4 x i32>, i32) readnone
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/lshr.v2i16.ll b/test/CodeGen/AMDGPU/lshr.v2i16.ll
new file mode 100644
index 000000000000..e21d0d09bb41
--- /dev/null
+++ b/test/CodeGen/AMDGPU/lshr.v2i16.ll
@@ -0,0 +1,149 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
+
+; GCN-LABEL: {{^}}s_lshr_v2i16:
+; GFX9: s_load_dword [[LHS:s[0-9]+]]
+; GFX9: s_load_dword [[RHS:s[0-9]+]]
+; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
+; GFX9: v_pk_lshrrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
+
+; VI: v_lshrrev_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CIVI: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 16
+; CIVI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
+ %result = lshr <2 x i16> %lhs, %rhs
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_lshr_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_lshrrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+
+; VI: v_lshrrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshrrev_b16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
+; CI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[LHS]]
+; CI-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[RHS]]
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; CI: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 16
+; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in.gep, i32 1
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
+ %result = lshr <2 x i16> %a, %b
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}lshr_v_s_v2i16:
+; GFX9: s_load_dword [[RHS:s[0-9]+]]
+; GFX9: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GFX9: v_pk_lshrrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+define amdgpu_kernel void @lshr_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = lshr <2 x i16> %vgpr, %sgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}lshr_s_v_v2i16:
+; GFX9: s_load_dword [[LHS:s[0-9]+]]
+; GFX9: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_lshrrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+define amdgpu_kernel void @lshr_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = lshr <2 x i16> %sgpr, %vgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}lshr_imm_v_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_lshrrev_b16 [[RESULT:v[0-9]+]], [[RHS]], 8
+define amdgpu_kernel void @lshr_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = lshr <2 x i16> <i16 8, i16 8>, %vgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}lshr_v_imm_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GFX9: v_pk_lshrrev_b16 [[RESULT:v[0-9]+]], 8, [[LHS]]
+define amdgpu_kernel void @lshr_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = lshr <2 x i16> %vgpr, <i16 8, i16 8>
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_lshr_v4i16:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GCN: {{buffer|flat}}_load_dwordx2
+; GFX9: v_pk_lshrrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_pk_lshrrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: {{buffer|flat}}_store_dwordx2
+define amdgpu_kernel void @v_lshr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
+ %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in.gep, i32 1
+ %a = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
+ %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
+ %result = lshr <4 x i16> %a, %b
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}lshr_v_imm_v4i16:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GFX9: v_pk_lshrrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GFX9: v_pk_lshrrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GCN: {{buffer|flat}}_store_dwordx2
+define amdgpu_kernel void @lshr_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
+ %result = lshr <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8>
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/mad-combine.ll b/test/CodeGen/AMDGPU/mad-combine.ll
index 9caba32cbac0..b855fc500c6b 100644
--- a/test/CodeGen/AMDGPU/mad-combine.ll
+++ b/test/CodeGen/AMDGPU/mad-combine.ll
@@ -31,7 +31,7 @@ declare float @llvm.fmuladd.f32(float, float, float) #0
; SI-DENORM: buffer_store_dword [[RESULT]]
; SI-STD: buffer_store_dword [[C]]
-define void @combine_to_mad_f32_0(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_f32_0(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -70,7 +70,7 @@ define void @combine_to_mad_f32_0(float addrspace(1)* noalias %out, float addrsp
; SI-STD-DAG: buffer_store_dword [[C]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-STD-DAG: buffer_store_dword [[D]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI: s_endpgm
-define void @combine_to_mad_f32_0_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_f32_0_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -107,7 +107,7 @@ define void @combine_to_mad_f32_0_2use(float addrspace(1)* noalias %out, float a
; SI-DENORM: buffer_store_dword [[RESULT]]
; SI-STD: buffer_store_dword [[C]]
-define void @combine_to_mad_f32_1(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_f32_1(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -137,7 +137,7 @@ define void @combine_to_mad_f32_1(float addrspace(1)* noalias %out, float addrsp
; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
; SI: buffer_store_dword [[RESULT]]
-define void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -174,7 +174,7 @@ define void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float a
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI: s_endpgm
-define void @combine_to_mad_fsub_0_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_0_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -209,7 +209,7 @@ define void @combine_to_mad_fsub_0_f32_2use(float addrspace(1)* noalias %out, fl
; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
; SI: buffer_store_dword [[RESULT]]
-define void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -245,7 +245,7 @@ define void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float a
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI: s_endpgm
-define void @combine_to_mad_fsub_1_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_1_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -281,7 +281,7 @@ define void @combine_to_mad_fsub_1_f32_2use(float addrspace(1)* noalias %out, fl
; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
; SI: buffer_store_dword [[RESULT]]
-define void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -319,7 +319,7 @@ define void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float a
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI: s_endpgm
-define void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -362,7 +362,7 @@ define void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1)* noalias %ou
; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; SI: s_endpgm
-define void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -404,7 +404,7 @@ define void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1)* noalias %ou
; SI-DENORM: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[C]], [[TMP1]]
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-define void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -447,7 +447,7 @@ define void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %o
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: s_endpgm
-define void @aggressive_combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @aggressive_combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -497,7 +497,7 @@ define void @aggressive_combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %o
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: s_endpgm
-define void @aggressive_combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -548,7 +548,7 @@ define void @aggressive_combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %o
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; SI: s_endpgm
-define void @aggressive_combine_to_mad_fsub_3_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
diff --git a/test/CodeGen/AMDGPU/mad24-get-global-id.ll b/test/CodeGen/AMDGPU/mad24-get-global-id.ll
index 9183ae0972dc..1e78c4ebcc9f 100644
--- a/test/CodeGen/AMDGPU/mad24-get-global-id.ll
+++ b/test/CodeGen/AMDGPU/mad24-get-global-id.ll
@@ -11,7 +11,7 @@ declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #0
; GCN: s_and_b32 [[WGSIZEX:s[0-9]+]], {{s[0-9]+}}, 0xffff
; GCN: v_mov_b32_e32 [[VWGSIZEX:v[0-9]+]], [[WGSIZEX]]
; GCN: v_mad_u32_u24 v{{[0-9]+}}, [[VWGSIZEX]], s8, v0
-define void @get_global_id_0(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @get_global_id_0(i32 addrspace(1)* %out) #1 {
%dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
%cast.dispatch.ptr = bitcast i8 addrspace(2)* %dispatch.ptr to i32 addrspace(2)*
%gep = getelementptr inbounds i32, i32 addrspace(2)* %cast.dispatch.ptr, i64 1
diff --git a/test/CodeGen/AMDGPU/mad_int24.ll b/test/CodeGen/AMDGPU/mad_int24.ll
index f149ea0a6a0e..af0159aa9b10 100644
--- a/test/CodeGen/AMDGPU/mad_int24.ll
+++ b/test/CodeGen/AMDGPU/mad_int24.ll
@@ -11,7 +11,7 @@
; CM: MULADD_INT24
; SI-NOT: and
; SI: v_mad_i32_i24
-define void @i32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @i32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%0 = shl i32 %a, 8
%a_24 = ashr i32 %0, 8
diff --git a/test/CodeGen/AMDGPU/mad_uint24.ll b/test/CodeGen/AMDGPU/mad_uint24.ll
index 9fde950f822c..2c4f7d324a96 100644
--- a/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -11,7 +11,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; SI: v_mad_u32_u24
; VI: v_mad_u32_u24
-define void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
@@ -32,7 +32,7 @@ entry:
; FIXME: Should be using scalar instructions here.
; GCN: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; GCN: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
-define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
+define amdgpu_kernel void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
entry:
%0 = mul i16 %a, %b
%1 = add i16 %0, %c
@@ -49,7 +49,7 @@ entry:
; EG: 8
; GCN: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
-define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
+define amdgpu_kernel void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
entry:
%0 = mul i8 %a, %b
%1 = add i8 %0, %c
@@ -68,7 +68,7 @@ entry:
; FUNC-LABEL: {{^}}i24_i32_i32_mad:
; EG: CNDE_INT
; SI: v_cndmask
-define void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+define amdgpu_kernel void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
entry:
%0 = ashr i32 %a, 8
%1 = icmp ne i32 %c, 0
diff --git a/test/CodeGen/AMDGPU/madak.ll b/test/CodeGen/AMDGPU/madak.ll
index 6722aa79dd5d..eb4066a2a0a8 100644
--- a/test/CodeGen/AMDGPU/madak.ll
+++ b/test/CodeGen/AMDGPU/madak.ll
@@ -10,7 +10,7 @@ declare float @llvm.fabs.f32(float) nounwind readnone
; GCN: buffer_load_dword [[VA:v[0-9]+]]
; GCN: buffer_load_dword [[VB:v[0-9]+]]
; GCN: v_madak_f32_e32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
-define void @madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+define amdgpu_kernel void @madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
@@ -37,7 +37,7 @@ define void @madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noa
; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VB]], [[VA]], [[VK]]
; GCN-DAG: v_mac_f32_e32 [[VK]], [[VC]], [[VA]]
; GCN: s_endpgm
-define void @madak_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @madak_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -64,7 +64,7 @@ define void @madak_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1
; GCN-LABEL: {{^}}madak_m_inline_imm_f32:
; GCN: buffer_load_dword [[VA:v[0-9]+]]
; GCN: v_madak_f32_e32 {{v[0-9]+}}, 4.0, [[VA]], 0x41200000
-define void @madak_m_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a) nounwind {
+define amdgpu_kernel void @madak_m_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -84,7 +84,7 @@ define void @madak_m_inline_imm_f32(float addrspace(1)* noalias %out, float addr
; GCN: buffer_load_dword [[VA:v[0-9]+]]
; GCN: buffer_load_dword [[VB:v[0-9]+]]
; GCN: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VB]], 4.0
-define void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+define amdgpu_kernel void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
@@ -106,7 +106,7 @@ define void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrsp
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
; GCN-NOT: v_madak_f32
; GCN: v_mac_f32_e32 [[VK]], [[SB]], [[VA]]
-define void @s_v_madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float %b) nounwind {
+define amdgpu_kernel void @s_v_madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float %b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -125,7 +125,7 @@ define void @s_v_madak_f32(float addrspace(1)* noalias %out, float addrspace(1)*
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
; GCN-NOT: v_madak_f32
; GCN: v_mac_f32_e32 [[VK]], [[SB]], [[VA]]
-define void @v_s_madak_f32(float addrspace(1)* noalias %out, float %a, float addrspace(1)* noalias %in.b) nounwind {
+define amdgpu_kernel void @v_s_madak_f32(float addrspace(1)* noalias %out, float %a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -141,7 +141,7 @@ define void @v_s_madak_f32(float addrspace(1)* noalias %out, float %a, float add
; GCN-LABEL: {{^}}s_s_madak_f32:
; GCN-NOT: v_madak_f32
; GCN: v_mac_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
-define void @s_s_madak_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
+define amdgpu_kernel void @s_s_madak_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
%mul = fmul float %a, %b
%madak = fadd float %mul, 10.0
store float %madak, float addrspace(1)* %out, align 4
@@ -153,7 +153,7 @@ define void @s_s_madak_f32(float addrspace(1)* %out, float %a, float %b) nounwin
; GCN: buffer_load_dword [[VB:v[0-9]+]]
; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, |{{v[0-9]+}}|, {{[sv][0-9]+}}
; GCN: s_endpgm
-define void @no_madak_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+define amdgpu_kernel void @no_madak_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
@@ -175,7 +175,7 @@ define void @no_madak_src0_modifier_f32(float addrspace(1)* noalias %out, float
; GCN: buffer_load_dword [[VB:v[0-9]+]]
; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}}
; GCN: s_endpgm
-define void @no_madak_src1_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+define amdgpu_kernel void @no_madak_src1_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
%in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
@@ -201,7 +201,7 @@ define void @no_madak_src1_modifier_f32(float addrspace(1)* noalias %out, float
; GCN: v_madak_f32_e32 [[MADAK:v[0-9]+]], 0.5, [[SGPR0_VCOPY]], 0x42280000
; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VGPR]], [[MADAK]]
; GCN: buffer_store_dword [[MUL]]
-define void @madak_constant_bus_violation(i32 %arg1, float %sgpr0, float %sgpr1) #0 {
+define amdgpu_kernel void @madak_constant_bus_violation(i32 %arg1, float %sgpr0, float %sgpr1) #0 {
bb:
%tmp = icmp eq i32 %arg1, 0
br i1 %tmp, label %bb3, label %bb4
diff --git a/test/CodeGen/AMDGPU/madmk.ll b/test/CodeGen/AMDGPU/madmk.ll
index 27fbf58d26c6..6e70e95383c9 100644
--- a/test/CodeGen/AMDGPU/madmk.ll
+++ b/test/CodeGen/AMDGPU/madmk.ll
@@ -12,7 +12,7 @@ declare float @llvm.fabs.f32(float) nounwind readnone
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN: v_mac_f32_e32 [[VB]], 0x41200000, [[VA]]
-define void @madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -35,7 +35,7 @@ define void @madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noa
; GCN-DAG: v_mac_f32_e32 [[VB]], [[VK]], [[VA]]
; GCN-DAG: v_mac_f32_e32 [[VC]], [[VK]], [[VA]]
; GCN: s_endpgm
-define void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -64,7 +64,7 @@ define void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN: v_mac_f32_e32 [[VB]], 4.0, [[VA]]
-define void @madmk_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @madmk_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -83,7 +83,7 @@ define void @madmk_inline_imm_f32(float addrspace(1)* noalias %out, float addrsp
; GCN-NOT: v_madmk_f32
; GCN: v_mac_f32_e32
; GCN: s_endpgm
-define void @s_s_madmk_f32(float addrspace(1)* noalias %out, float %a, float %b) nounwind {
+define amdgpu_kernel void @s_s_madmk_f32(float addrspace(1)* noalias %out, float %a, float %b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -97,7 +97,7 @@ define void @s_s_madmk_f32(float addrspace(1)* noalias %out, float %a, float %b)
; GCN-NOT: v_madmk_f32
; GCN: v_mad_f32
; GCN: s_endpgm
-define void @v_s_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %b) nounwind {
+define amdgpu_kernel void @v_s_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %b) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -113,7 +113,7 @@ define void @v_s_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)*
; GCN-NOT: v_madmk_f32
; GCN: v_mac_f32_e32
; GCN: s_endpgm
-define void @scalar_vector_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %a) nounwind {
+define amdgpu_kernel void @scalar_vector_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %a) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -130,7 +130,7 @@ define void @scalar_vector_madmk_f32(float addrspace(1)* noalias %out, float add
; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GCN: v_mad_f32 {{v[0-9]+}}, [[VK]], |[[VA]]|, [[VB]]
-define void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -151,7 +151,7 @@ define void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, |{{[sv][0-9]+}}|
-define void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
@@ -172,7 +172,7 @@ define void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float
; GCN: buffer_load_dword [[A:v[0-9]+]]
; GCN: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GCN: v_mad_f32 {{v[0-9]+}}, [[VK]], [[A]], 2.0
-define void @madmk_add_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @madmk_add_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -189,7 +189,7 @@ define void @madmk_add_inline_imm_f32(float addrspace(1)* noalias %out, float ad
; SI: s_xor_b64
; SI: v_mac_f32_e32 {{v[0-9]+}}, 0x472aee8c, {{v[0-9]+}}
; SI: s_or_b64
-define void @kill_madmk_verifier_error() nounwind {
+define amdgpu_kernel void @kill_madmk_verifier_error() nounwind {
bb:
br label %bb2
diff --git a/test/CodeGen/AMDGPU/max.i16.ll b/test/CodeGen/AMDGPU/max.i16.ll
index 3f2a87f20691..abd75258c4d4 100644
--- a/test/CodeGen/AMDGPU/max.i16.ll
+++ b/test/CodeGen/AMDGPU/max.i16.ll
@@ -1,12 +1,10 @@
-; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck -check-prefix=GCN -check-prefix=VI %s
-
-
-declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=VIPLUS %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=VIPLUS %s
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_imax_sge_i16:
-; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_test_imax_sge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
+; VIPLUS: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_imax_sge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
@@ -20,12 +18,56 @@ define void @v_test_imax_sge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr
}
; FIXME: Need to handle non-uniform case for function below (load without gep).
-; GCN-LABEL: {{^}}v_test_imax_sge_v4i16:
+; GCN-LABEL: {{^}}v_test_imax_sge_v2i16:
; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_max_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+
+; GFX9: v_pk_max_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_imax_sge_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %aptr, <2 x i16> addrspace(1)* %bptr) nounwind {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+ %gep0 = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep0, align 4
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %gep1, align 4
+ %cmp = icmp sge <2 x i16> %a, %b
+ %val = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %outgep, align 4
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_imax_sge_v3i16:
+; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_max_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOT: v_max_i16
+
+; GFX9: v_pk_max_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_pk_max_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_imax_sge_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %aptr, <3 x i16> addrspace(1)* %bptr) nounwind {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+ %gep0 = getelementptr <3 x i16>, <3 x i16> addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr <3 x i16>, <3 x i16> addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr <3 x i16>, <3 x i16> addrspace(1)* %out, i32 %tid
+ %a = load <3 x i16>, <3 x i16> addrspace(1)* %gep0, align 4
+ %b = load <3 x i16>, <3 x i16> addrspace(1)* %gep1, align 4
+ %cmp = icmp sge <3 x i16> %a, %b
+ %val = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b
+ store <3 x i16> %val, <3 x i16> addrspace(1)* %outgep, align 4
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_imax_sge_v4i16:
; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_max_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_test_imax_sge_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %aptr, <4 x i16> addrspace(1)* %bptr) nounwind {
+; VI: v_max_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+
+; GFX9: v_pk_max_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_pk_max_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_imax_sge_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %aptr, <4 x i16> addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %bptr, i32 %tid
@@ -40,8 +82,8 @@ define void @v_test_imax_sge_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrs
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_imax_sgt_i16:
-; VI: v_max_i16_e32
-define void @v_test_imax_sgt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
+; VIPLUS: v_max_i16_e32
+define amdgpu_kernel void @v_test_imax_sgt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
@@ -56,8 +98,8 @@ define void @v_test_imax_sgt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_umax_uge_i16:
-; VI: v_max_u16_e32
-define void @v_test_umax_uge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
+; VIPLUS: v_max_u16_e32
+define amdgpu_kernel void @v_test_umax_uge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
@@ -72,8 +114,8 @@ define void @v_test_umax_uge_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_umax_ugt_i16:
-; VI: v_max_u16_e32
-define void @v_test_umax_ugt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
+; VIPLUS: v_max_u16_e32
+define amdgpu_kernel void @v_test_umax_ugt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
@@ -85,3 +127,23 @@ define void @v_test_umax_ugt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr
store i16 %val, i16 addrspace(1)* %outgep, align 4
ret void
}
+
+; GCN-LABEL: {{^}}v_test_umax_ugt_v2i16:
+; VI: v_max_u16_e32
+; VI: v_max_u16_sdwa
+
+; GFX9: v_pk_max_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_umax_ugt_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %aptr, <2 x i16> addrspace(1)* %bptr) nounwind {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+ %gep0 = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep0, align 4
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %gep1, align 4
+ %cmp = icmp ugt <2 x i16> %a, %b
+ %val = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %outgep, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
diff --git a/test/CodeGen/AMDGPU/max.ll b/test/CodeGen/AMDGPU/max.ll
index 5fa307be0fd5..ffcdac03bc74 100644
--- a/test/CodeGen/AMDGPU/max.ll
+++ b/test/CodeGen/AMDGPU/max.ll
@@ -6,7 +6,7 @@
; SI: v_max_i32_e32
; EG: MAX_INT
-define void @v_test_imax_sge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_imax_sge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%cmp = icmp sge i32 %a, %b
@@ -26,7 +26,7 @@ define void @v_test_imax_sge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; EG: MAX_INT
; EG: MAX_INT
; EG: MAX_INT
-define void @v_test_imax_sge_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %aptr, <4 x i32> addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_imax_sge_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %aptr, <4 x i32> addrspace(1)* %bptr) nounwind {
%a = load <4 x i32>, <4 x i32> addrspace(1)* %aptr, align 4
%b = load <4 x i32>, <4 x i32> addrspace(1)* %bptr, align 4
%cmp = icmp sge <4 x i32> %a, %b
@@ -39,7 +39,7 @@ define void @v_test_imax_sge_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrs
; SI: s_max_i32
; EG: MAX_INT
-define void @s_test_imax_sge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_imax_sge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp sge i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -50,7 +50,7 @@ define void @s_test_imax_sge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; SI: s_max_i32 {{s[0-9]+}}, {{s[0-9]+}}, 9
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
-define void @s_test_imax_sge_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
+define amdgpu_kernel void @s_test_imax_sge_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
%cmp = icmp sge i32 %a, 9
%val = select i1 %cmp, i32 %a, i32 9
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -63,7 +63,7 @@ define void @s_test_imax_sge_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
; SI: v_max_i32_e32
; EG: MAX_INT
-define void @v_test_imax_sge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_imax_sge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
%a = load i8, i8 addrspace(1)* %aptr, align 1
%b = load i8, i8 addrspace(1)* %bptr, align 1
%cmp = icmp sge i8 %a, %b
@@ -76,7 +76,7 @@ define void @v_test_imax_sge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i
; SI: s_max_i32 {{s[0-9]+}}, {{s[0-9]+}}, 9
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
-define void @s_test_imax_sgt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
+define amdgpu_kernel void @s_test_imax_sgt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
%cmp = icmp sgt i32 %a, 9
%val = select i1 %cmp, i32 %a, i32 9
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -89,7 +89,7 @@ define void @s_test_imax_sgt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
-define void @s_test_imax_sgt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a) nounwind {
+define amdgpu_kernel void @s_test_imax_sgt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a) nounwind {
%cmp = icmp sgt <2 x i32> %a, <i32 9, i32 9>
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> <i32 9, i32 9>
store <2 x i32> %val, <2 x i32> addrspace(1)* %out, align 4
@@ -100,7 +100,7 @@ define void @s_test_imax_sgt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
; SI: v_max_i32_e32
; EG: MAX_INT
-define void @v_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%cmp = icmp sgt i32 %a, %b
@@ -113,7 +113,7 @@ define void @v_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; SI: s_max_i32
; EG: MAX_INT
-define void @s_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp sgt i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -124,7 +124,7 @@ define void @s_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; SI: v_max_u32_e32
; EG: MAX_UINT
-define void @v_test_umax_uge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_umax_uge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%cmp = icmp uge i32 %a, %b
@@ -137,7 +137,7 @@ define void @v_test_umax_uge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; SI: s_max_u32
; EG: MAX_UINT
-define void @s_test_umax_uge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_umax_uge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp uge i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -155,7 +155,7 @@ define void @s_test_umax_uge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; EG: MAX_UINT
; EG: MAX_UINT
; EG-NOT: MAX_UINT
-define void @s_test_umax_uge_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, <3 x i32> %b) nounwind {
+define amdgpu_kernel void @s_test_umax_uge_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, <3 x i32> %b) nounwind {
%cmp = icmp uge <3 x i32> %a, %b
%val = select <3 x i1> %cmp, <3 x i32> %a, <3 x i32> %b
store <3 x i32> %val, <3 x i32> addrspace(1)* %out, align 4
@@ -168,7 +168,7 @@ define void @s_test_umax_uge_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, <
; SI: v_max_u32_e32
; EG: MAX_UINT
-define void @v_test_umax_uge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_umax_uge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
%a = load i8, i8 addrspace(1)* %aptr, align 1
%b = load i8, i8 addrspace(1)* %bptr, align 1
%cmp = icmp uge i8 %a, %b
@@ -181,7 +181,7 @@ define void @v_test_umax_uge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i
; SI: v_max_u32_e32
; EG: MAX_UINT
-define void @v_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%cmp = icmp ugt i32 %a, %b
@@ -194,7 +194,7 @@ define void @v_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; SI: s_max_u32
; EG: MAX_UINT
-define void @s_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp ugt i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -207,7 +207,7 @@ define void @s_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; EG: MAX_UINT {{.*}}literal.{{[xyzw]}}
; EG: MAX_UINT {{.*}}literal.{{[xyzw]}}
-define void @s_test_umax_ugt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a) nounwind {
+define amdgpu_kernel void @s_test_umax_ugt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a) nounwind {
%cmp = icmp ugt <2 x i32> %a, <i32 15, i32 23>
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> <i32 15, i32 23>
store <2 x i32> %val, <2 x i32> addrspace(1)* %out, align 4
@@ -223,7 +223,7 @@ define void @s_test_umax_ugt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
; SI: buffer_store_dword [[VMAX]]
; EG: MAX_UINT
-define void @simplify_demanded_bits_test_umax_ugt_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) nounwind {
+define amdgpu_kernel void @simplify_demanded_bits_test_umax_ugt_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) nounwind {
%a.ext = zext i16 %a to i32
%b.ext = zext i16 %b to i32
%cmp = icmp ugt i32 %a.ext, %b.ext
@@ -243,7 +243,7 @@ define void @simplify_demanded_bits_test_umax_ugt_i16(i32 addrspace(1)* %out, i1
; SI: buffer_store_dword [[VMAX]]
; EG: MAX_INT
-define void @simplify_demanded_bits_test_max_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) nounwind {
+define amdgpu_kernel void @simplify_demanded_bits_test_max_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) nounwind {
%a.ext = sext i16 %a to i32
%b.ext = sext i16 %b to i32
%cmp = icmp sgt i32 %a.ext, %b.ext
@@ -262,7 +262,7 @@ define void @simplify_demanded_bits_test_max_slt_i16(i32 addrspace(1)* %out, i16
; SI: s_max_i32
; EG: MAX_INT
-define void @s_test_imax_sge_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwind {
+define amdgpu_kernel void @s_test_imax_sge_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwind {
%cmp = icmp sge i16 %a, %b
%val = select i1 %cmp, i16 %a, i16 %b
store i16 %val, i16 addrspace(1)* %out
@@ -275,7 +275,7 @@ define void @s_test_imax_sge_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwin
; EG: MAX_UINT
; EG: MAX_UINT
-define void @test_umax_ugt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_umax_ugt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%tmp = icmp ugt i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -287,7 +287,7 @@ define void @test_umax_ugt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
; EG: MAX_UINT
; EG: MAX_UINT
-define void @test_umax_uge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_umax_uge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%tmp = icmp uge i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -299,7 +299,7 @@ define void @test_umax_uge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
; EG-DAG: MAX_UINT
; EG-DAG: MAX_INT
-define void @test_imax_sgt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_imax_sgt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%tmp = icmp sgt i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -311,7 +311,7 @@ define void @test_imax_sgt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
; EG-DAG: MAX_UINT
; EG-DAG: MAX_INT
-define void @test_imax_sge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_imax_sge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%tmp = icmp sge i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
diff --git a/test/CodeGen/AMDGPU/max3.ll b/test/CodeGen/AMDGPU/max3.ll
index a12dba2eb6e9..4bb4fd46becd 100644
--- a/test/CodeGen/AMDGPU/max3.ll
+++ b/test/CodeGen/AMDGPU/max3.ll
@@ -4,7 +4,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; FUNC-LABEL: @v_test_imax3_sgt_i32
; SI: v_max3_i32
-define void @v_test_imax3_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @v_test_imax3_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
@@ -23,7 +23,7 @@ define void @v_test_imax3_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %apt
; FUNC-LABEL: @v_test_umax3_ugt_i32
; SI: v_max3_u32
-define void @v_test_umax3_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @v_test_umax3_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
diff --git a/test/CodeGen/AMDGPU/mem-builtins.ll b/test/CodeGen/AMDGPU/mem-builtins.ll
index 97512670f59e..1cbd0c327510 100644
--- a/test/CodeGen/AMDGPU/mem-builtins.ll
+++ b/test/CodeGen/AMDGPU/mem-builtins.ll
@@ -9,7 +9,7 @@ declare i32 @strcmp(i8* nocapture, i8* nocapture) #1
; ERROR: error: <unknown>:0:0: in function test_memcmp void (i8 addrspace(1)*, i8 addrspace(1)*, i32*): unsupported call to function memcmp
-define void @test_memcmp(i8 addrspace(1)* %x, i8 addrspace(1)* %y, i32* nocapture %p) #0 {
+define amdgpu_kernel void @test_memcmp(i8 addrspace(1)* %x, i8 addrspace(1)* %y, i32* nocapture %p) #0 {
entry:
%cmp = tail call i32 @memcmp(i8 addrspace(1)* %x, i8 addrspace(1)* %y, i64 2)
store volatile i32 %cmp, i32 addrspace(1)* undef
@@ -17,35 +17,35 @@ entry:
}
; ERROR: error: <unknown>:0:0: in function test_memchr void (i8 addrspace(1)*, i32, i64): unsupported call to function memchr
-define void @test_memchr(i8 addrspace(1)* %src, i32 %char, i64 %len) #0 {
+define amdgpu_kernel void @test_memchr(i8 addrspace(1)* %src, i32 %char, i64 %len) #0 {
%res = call i8 addrspace(1)* @memchr(i8 addrspace(1)* %src, i32 %char, i64 %len)
store volatile i8 addrspace(1)* %res, i8 addrspace(1)* addrspace(1)* undef
ret void
}
; ERROR: error: <unknown>:0:0: in function test_strcpy void (i8*, i8*): unsupported call to function strcpy
-define void @test_strcpy(i8* %dst, i8* %src) #0 {
+define amdgpu_kernel void @test_strcpy(i8* %dst, i8* %src) #0 {
%res = call i8* @strcpy(i8* %dst, i8* %src)
store volatile i8* %res, i8* addrspace(1)* undef
ret void
}
; ERROR: error: <unknown>:0:0: in function test_strcmp void (i8*, i8*): unsupported call to function strcmp
-define void @test_strcmp(i8* %src0, i8* %src1) #0 {
+define amdgpu_kernel void @test_strcmp(i8* %src0, i8* %src1) #0 {
%res = call i32 @strcmp(i8* %src0, i8* %src1)
store volatile i32 %res, i32 addrspace(1)* undef
ret void
}
; ERROR: error: <unknown>:0:0: in function test_strlen void (i8*): unsupported call to function strlen
-define void @test_strlen(i8* %src) #0 {
+define amdgpu_kernel void @test_strlen(i8* %src) #0 {
%res = call i32 @strlen(i8* %src)
store volatile i32 %res, i32 addrspace(1)* undef
ret void
}
; ERROR: error: <unknown>:0:0: in function test_strnlen void (i8*, i32): unsupported call to function strnlen
-define void @test_strnlen(i8* %src, i32 %size) #0 {
+define amdgpu_kernel void @test_strnlen(i8* %src, i32 %size) #0 {
%res = call i32 @strnlen(i8* %src, i32 %size)
store volatile i32 %res, i32 addrspace(1)* undef
ret void
diff --git a/test/CodeGen/AMDGPU/merge-stores.ll b/test/CodeGen/AMDGPU/merge-stores.ll
index 07104ebc8c97..dfd5b97fcc86 100644
--- a/test/CodeGen/AMDGPU/merge-stores.ll
+++ b/test/CodeGen/AMDGPU/merge-stores.ll
@@ -1,8 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-NOAA %s
-
-; RUN: llc -march=amdgcn -verify-machineinstrs -combiner-alias-analysis -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -combiner-alias-analysis -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
+; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -amdgpu-load-store-vectorizer=0 < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=GCN-AA %s
; This test is mostly to test DAG store merging, so disable the vectorizer.
; Run with devices with different unaligned load restrictions.
@@ -16,7 +13,7 @@
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: s_endpgm
-define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i8 123, i8 addrspace(1)* %out.gep.1
@@ -28,7 +25,7 @@ define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: s_endpgm
-define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i8 123, i8 addrspace(1)* %out.gep.1
@@ -38,7 +35,7 @@ define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %o
; GCN-LABEL: {{^}}merge_global_store_2_constants_i16:
; GCN: buffer_store_dword v
-define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 123, i16 addrspace(1)* %out.gep.1
@@ -48,7 +45,7 @@ define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: {{^}}merge_global_store_2_constants_0_i16:
; GCN: buffer_store_dword v
-define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 0, i16 addrspace(1)* %out.gep.1
@@ -60,7 +57,7 @@ define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
; GCN: buffer_store_short
; GCN: buffer_store_short
; GCN: s_endpgm
-define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 123, i16 addrspace(1)* %out.gep.1
@@ -72,7 +69,7 @@ define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)*
; SI-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0x1c8
; SI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7b
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
@@ -82,7 +79,7 @@ define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
; GCN-LABEL: {{^}}merge_global_store_2_constants_i32_f32:
; GCN: buffer_store_dwordx2
-define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.1.bc = bitcast i32 addrspace(1)* %out.gep.1 to float addrspace(1)*
store float 1.0, float addrspace(1)* %out.gep.1.bc
@@ -94,7 +91,7 @@ define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 4.0
; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], 0x7b
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)*
store i32 123, i32 addrspace(1)* %out.gep.1.bc
@@ -108,7 +105,7 @@ define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x7b{{$}}
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0x4d2{{$}}
; GCN: buffer_store_dwordx4 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -122,7 +119,7 @@ define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
; GCN-LABEL: {{^}}merge_global_store_4_constants_f32_order:
; GCN: buffer_store_dwordx4
-define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -137,7 +134,7 @@ define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out)
; First store is out of order.
; GCN-LABEL: {{^}}merge_global_store_4_constants_f32:
; GCN: buffer_store_dwordx4
-define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -150,14 +147,9 @@ define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
}
; GCN-LABEL: {{^}}merge_global_store_4_constants_mixed_i32_f32:
-; GCN-NOAA: buffer_store_dwordx4 v
-
-; GCN-AA: buffer_store_dwordx2
-; GCN-AA: buffer_store_dword v
-; GCN-AA: buffer_store_dword v
-
+; GCN-AA: buffer_store_dwordx4 v
; GCN: s_endpgm
-define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -177,7 +169,7 @@ define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %o
; SI-DAG: buffer_store_dword
; SI-NOT: buffer_store_dword
; GCN: s_endpgm
-define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
@@ -189,7 +181,7 @@ define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
; GCN-LABEL: {{^}}merge_global_store_2_constants_i64:
; GCN: buffer_store_dwordx4
-define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
store i64 123, i64 addrspace(1)* %out.gep.1
@@ -200,7 +192,7 @@ define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
; GCN-LABEL: {{^}}merge_global_store_4_constants_i64:
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
-define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
%out.gep.2 = getelementptr i64, i64 addrspace(1)* %out, i64 2
%out.gep.3 = getelementptr i64, i64 addrspace(1)* %out, i64 3
@@ -215,7 +207,7 @@ define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
; GCN-LABEL: {{^}}merge_global_store_2_adjacent_loads_i32:
; GCN: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
; GCN: buffer_store_dwordx2 [[LOAD]]
-define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
@@ -230,7 +222,7 @@ define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32
; GCN-LABEL: {{^}}merge_global_store_2_adjacent_loads_i32_nonzero_base:
; GCN: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
; GCN: buffer_store_dwordx2 [[LOAD]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
-define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 2
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 3
@@ -249,7 +241,7 @@ define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(
; GCN: buffer_load_dword v
; GCN: buffer_store_dword v
; GCN: buffer_store_dword v
-define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
@@ -264,7 +256,7 @@ define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %
; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_i32:
; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
; GCN: buffer_store_dwordx4 [[LOAD]]
-define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -291,7 +283,7 @@ define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32
; SI-DAG: buffer_store_dword v
; SI-DAG: buffer_store_dwordx2 v
; GCN: s_endpgm
-define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
@@ -310,7 +302,7 @@ define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32
; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_f32:
; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
; GCN: buffer_store_dwordx4 [[LOAD]]
-define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -333,7 +325,7 @@ define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, f
; GCN-LABEL: {{^}}merge_global_store_4_adjacent_loads_i32_nonzero_base:
; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44
; GCN: buffer_store_dwordx4 [[LOAD]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:28
-define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 11
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 12
%in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 13
@@ -359,7 +351,7 @@ define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(
; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
; GCN: s_barrier
; GCN: buffer_store_dwordx4 [[LOAD]]
-define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -396,7 +388,7 @@ define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %
; GCN: buffer_store_dword v
; GCN: buffer_store_dword v
; GCN: buffer_store_dword v
-define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -424,7 +416,7 @@ define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %
; GCN: buffer_load_dword [[LOAD:v[0-9]+]]
; GCN: buffer_store_dword [[LOAD]]
; GCN: s_endpgm
-define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
%out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
%out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
@@ -454,7 +446,7 @@ define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 ad
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: s_endpgm
-define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
%out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
%out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
@@ -474,19 +466,11 @@ define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1
ret void
}
-; This works once AA is enabled on the subtarget
; GCN-LABEL: {{^}}merge_global_store_4_vector_elts_loads_v4i32:
; GCN: buffer_load_dwordx4 [[LOAD:v\[[0-9]+:[0-9]+\]]]
-
-; GCN-NOAA: buffer_store_dword v
-; GCN-NOAA: buffer_store_dword v
-; GCN-NOAA: buffer_store_dword v
-; GCN-NOAA: buffer_store_dword v
-
-; GCN-AA: buffer_store_dwordx4 [[LOAD]]
-
+; GCN: buffer_store_dwordx4 [[LOAD]]
; GCN: s_endpgm
-define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -508,7 +492,7 @@ define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out
; GCN: ds_write_b8
; GCN: ds_write_b8
; GCN: s_endpgm
-define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(3)* %out, i32 1
store i8 123, i8 addrspace(3)* %out.gep.1
@@ -520,7 +504,7 @@ define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0x1c8
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0x7b
; GCN: ds_write2_b32 v{{[0-9]+}}, v[[LO]], v[[HI]] offset1:1{{$}}
-define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
store i32 123, i32 addrspace(3)* %out.gep.1
@@ -538,7 +522,7 @@ define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
; GCN-DAG: ds_write2_b32 v{{[0-9]+}}, [[K0]], [[K1]] offset1:1
; GCN: s_endpgm
-define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(3)* %out, i32 3
@@ -556,7 +540,7 @@ define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
; GCN: buffer_store_dwordx4 v{{\[}}[[LO]]:[[HI4]]{{\]}}
; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 11{{$}}
; GCN: buffer_store_dword v[[HI]]
-define void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
store i32 9, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 12, i32 addrspace(1)* %idx1, align 4
@@ -572,7 +556,7 @@ define void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
; GCN-LABEL: {{^}}merge_global_store_6_constants_i32:
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx2
-define void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
store i32 13, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 15, i32 addrspace(1)* %idx1, align 4
@@ -591,7 +575,7 @@ define void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx2
; GCN: buffer_store_dword v
-define void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
store i32 34, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 999, i32 addrspace(1)* %idx1, align 4
@@ -612,7 +596,7 @@ define void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: s_endpgm
-define void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
store i32 34, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 999, i32 addrspace(1)* %idx1, align 4
@@ -646,7 +630,7 @@ define void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
; GCN-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
; GCN: ScratchSize: 0{{$}}
-define void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 {
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4
store <3 x i32> %vec, <3 x i32> addrspace(1)* %out
ret void
@@ -662,7 +646,7 @@ define void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> a
; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16{{$}}
; GCN: ScratchSize: 0{{$}}
-define void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> addrspace(1)* noalias %in) #0 {
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4
store <3 x i64> %vec, <3 x i64> addrspace(1)* %out
ret void
@@ -678,7 +662,7 @@ define void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> a
; GCN-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
; GCN: ScratchSize: 0{{$}}
-define void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4
%fadd = fadd <3 x float> %vec, <float 1.0, float 2.0, float 4.0>
store <3 x float> %fadd, <3 x float> addrspace(1)* %out
@@ -695,7 +679,7 @@ define void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x floa
; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
; GCN-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16{{$}}
; GCN: ScratchSize: 0{{$}}
-define void @copy_v3f64_align4(<3 x double> addrspace(1)* noalias %out, <3 x double> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3f64_align4(<3 x double> addrspace(1)* noalias %out, <3 x double> addrspace(1)* noalias %in) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4
%fadd = fadd <3 x double> %vec, <double 1.0, double 2.0, double 4.0>
store <3 x double> %fadd, <3 x double> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/min.ll b/test/CodeGen/AMDGPU/min.ll
index 19d0117d64a9..e85a724c1567 100644
--- a/test/CodeGen/AMDGPU/min.ll
+++ b/test/CodeGen/AMDGPU/min.ll
@@ -1,17 +1,22 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}v_test_imin_sle_i32:
; GCN: v_min_i32_e32
; EG: MIN_INT
-define void @v_test_imin_sle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %bptr, align 4
+define amdgpu_kernel void @v_test_imin_sle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
%cmp = icmp sle i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
- store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %val, i32 addrspace(1)* %out.gep, align 4
ret void
}
@@ -19,7 +24,7 @@ define void @v_test_imin_sle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; GCN: s_min_i32
; EG: MIN_INT
-define void @s_test_imin_sle_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%cmp = icmp sle i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -30,7 +35,7 @@ define void @s_test_imin_sle_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; GCN: s_min_i32
; EG: MIN_INT
-define void @s_test_imin_sle_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) #0 {
%cmp = icmp sle <1 x i32> %a, %b
%val = select <1 x i1> %cmp, <1 x i32> %a, <1 x i32> %b
store <1 x i32> %val, <1 x i32> addrspace(1)* %out
@@ -47,7 +52,7 @@ define void @s_test_imin_sle_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <
; EG: MIN_INT
; EG: MIN_INT
; EG: MIN_INT
-define void @s_test_imin_sle_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
%cmp = icmp sle <4 x i32> %a, %b
%val = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
store <4 x i32> %val, <4 x i32> addrspace(1)* %out
@@ -60,7 +65,7 @@ define void @s_test_imin_sle_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <
; GCN: s_sext_i32_i8
; GCN: s_sext_i32_i8
; GCN: s_min_i32
-define void @s_test_imin_sle_i8(i8 addrspace(1)* %out, i8 %a, i8 %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_i8(i8 addrspace(1)* %out, i8 %a, i8 %b) #0 {
%cmp = icmp sle i8 %a, %b
%val = select i1 %cmp, i8 %a, i8 %b
store i8 %val, i8 addrspace(1)* %out
@@ -90,30 +95,62 @@ define void @s_test_imin_sle_i8(i8 addrspace(1)* %out, i8 %a, i8 %b) nounwind {
; VI: v_min_i32
; VI: v_min_i32
+; GFX9: v_min_i16
+; GFX9: v_min_i16
+; GFX9: v_min_i16
+; GFX9: v_min_i16
+
; GCN: s_endpgm
; EG: MIN_INT
; EG: MIN_INT
; EG: MIN_INT
; EG: MIN_INT
-define void @s_test_imin_sle_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b) #0 {
%cmp = icmp sle <4 x i8> %a, %b
%val = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
store <4 x i8> %val, <4 x i8> addrspace(1)* %out
ret void
}
+; FUNC-LABEL: {{^}}s_test_imin_sle_v2i16:
+; SI: v_min_i32
+; SI: v_min_i32
+
+; VI: v_min_i32
+; VI: v_min_i32
+
+; GFX9: v_pk_min_i16
+
+; EG: MIN_INT
+; EG: MIN_INT
+define amdgpu_kernel void @s_test_imin_sle_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
+ %cmp = icmp sle <2 x i16> %a, %b
+ %val = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: VI use s_min_i32
; FUNC-LABEL: {{^}}s_test_imin_sle_v4i16:
; SI: v_min_i32
; SI: v_min_i32
; SI: v_min_i32
; SI: v_min_i32
+; VI: v_min_i32
+; VI: v_min_i32
+; VI: v_min_i32
+; VI: v_min_i32
+
+; GFX9: v_pk_min_i16
+; GFX9: v_pk_min_i16
+
; EG: MIN_INT
; EG: MIN_INT
; EG: MIN_INT
; EG: MIN_INT
-define void @s_test_imin_sle_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b) #0 {
%cmp = icmp sle <4 x i16> %a, %b
%val = select <4 x i1> %cmp, <4 x i16> %a, <4 x i16> %b
store <4 x i16> %val, <4 x i16> addrspace(1)* %out
@@ -124,12 +161,36 @@ define void @s_test_imin_sle_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <
; GCN: v_min_i32_e32
; EG: MIN_INT
-define void @v_test_imin_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %bptr, align 4
+define amdgpu_kernel void @v_test_imin_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %aptr, i32 %tid
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %bptr, i32 %tid
+ %out.gep = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
%cmp = icmp slt i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
- store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %val, i32 addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_test_imin_slt_i16
+; SI: v_min_i32_e32
+
+; GFX89: v_min_i16_e32
+
+; EG: MIN_INT
+define amdgpu_kernel void @v_test_imin_slt_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %aptr, i32 %tid
+ %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %bptr, i32 %tid
+ %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
+
+ %a = load i16, i16 addrspace(1)* %a.gep
+ %b = load i16, i16 addrspace(1)* %b.gep
+ %cmp = icmp slt i16 %a, %b
+ %val = select i1 %cmp, i16 %a, i16 %b
+ store i16 %val, i16 addrspace(1)* %out.gep
ret void
}
@@ -137,7 +198,7 @@ define void @v_test_imin_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; GCN: s_min_i32
; EG: MIN_INT
-define void @s_test_imin_slt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_imin_slt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%cmp = icmp slt i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -150,7 +211,7 @@ define void @s_test_imin_slt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; EG: MIN_INT
; EG: MIN_INT
-define void @s_test_imin_slt_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+define amdgpu_kernel void @s_test_imin_slt_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%cmp = icmp slt <2 x i32> %a, %b
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
store <2 x i32> %val, <2 x i32> addrspace(1)* %out
@@ -161,7 +222,7 @@ define void @s_test_imin_slt_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <
; GCN: s_min_i32 {{s[0-9]+}}, {{s[0-9]+}}, 8
; EG: MIN_INT {{.*}}literal.{{[xyzw]}}
-define void @s_test_imin_slt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
+define amdgpu_kernel void @s_test_imin_slt_imm_i32(i32 addrspace(1)* %out, i32 %a) #0 {
%cmp = icmp slt i32 %a, 8
%val = select i1 %cmp, i32 %a, i32 8
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -172,7 +233,7 @@ define void @s_test_imin_slt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
; GCN: s_min_i32 {{s[0-9]+}}, {{s[0-9]+}}, 8
; EG: MIN_INT {{.*}}literal.{{[xyzw]}}
-define void @s_test_imin_sle_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_imm_i32(i32 addrspace(1)* %out, i32 %a) #0 {
%cmp = icmp sle i32 %a, 8
%val = select i1 %cmp, i32 %a, i32 8
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -183,12 +244,16 @@ define void @s_test_imin_sle_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
; GCN: v_min_u32_e32
; EG: MIN_UINT
-define void @v_test_umin_ule_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %bptr, align 4
+define amdgpu_kernel void @v_test_umin_ule_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
%cmp = icmp ule i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
- store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %val, i32 addrspace(1)* %out.gep, align 4
ret void
}
@@ -196,25 +261,65 @@ define void @v_test_umin_ule_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
; GCN: v_min_u32_e32
; GCN: v_min_u32_e32
; GCN: v_min_u32_e32
-; SI-NOT: v_min_u32_e32
+; GCN-NOT: v_min_u32_e32
; GCN: s_endpgm
; EG: MIN_UINT
; EG: MIN_UINT
; EG: MIN_UINT
-define void @v_test_umin_ule_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %aptr, <3 x i32> addrspace(1)* %bptr) nounwind {
- %a = load <3 x i32>, <3 x i32> addrspace(1)* %aptr
- %b = load <3 x i32>, <3 x i32> addrspace(1)* %bptr
+define amdgpu_kernel void @v_test_umin_ule_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %a.ptr, <3 x i32> addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %out, i32 %tid
+
+ %a = load <3 x i32>, <3 x i32> addrspace(1)* %a.gep
+ %b = load <3 x i32>, <3 x i32> addrspace(1)* %b.gep
%cmp = icmp ule <3 x i32> %a, %b
%val = select <3 x i1> %cmp, <3 x i32> %a, <3 x i32> %b
- store <3 x i32> %val, <3 x i32> addrspace(1)* %out
+ store <3 x i32> %val, <3 x i32> addrspace(1)* %out.gep
+ ret void
+}
+
+; FIXME: Reduce unused packed component to scalar
+; FUNC-LABEL: @v_test_umin_ule_v3i16{{$}}
+; SI: v_min_u32_e32
+; SI: v_min_u32_e32
+; SI: v_min_u32_e32
+; SI-NOT: v_min_u32_e32
+
+; VI: v_min_u16_e32
+; VI: v_min_u16_sdwa
+; VI: v_min_u16_e32
+; VI-NOT: v_min_u16_e32
+
+; GFX9: v_pk_min_u16
+; GFX9: v_pk_min_u16
+
+; GCN: s_endpgm
+
+; EG: MIN_UINT
+; EG: MIN_UINT
+; EG: MIN_UINT
+define amdgpu_kernel void @v_test_umin_ule_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %a.ptr, <3 x i16> addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds <3 x i16>, <3 x i16> addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds <3 x i16>, <3 x i16> addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds <3 x i16>, <3 x i16> addrspace(1)* %out, i32 %tid
+
+ %a = load <3 x i16>, <3 x i16> addrspace(1)* %a.gep
+ %b = load <3 x i16>, <3 x i16> addrspace(1)* %b.gep
+ %cmp = icmp ule <3 x i16> %a, %b
+ %val = select <3 x i1> %cmp, <3 x i16> %a, <3 x i16> %b
+ store <3 x i16> %val, <3 x i16> addrspace(1)* %out.gep
ret void
}
+
; FUNC-LABEL: @s_test_umin_ule_i32
; GCN: s_min_u32
; EG: MIN_UINT
-define void @s_test_umin_ule_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_umin_ule_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%cmp = icmp ule i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -225,27 +330,40 @@ define void @s_test_umin_ule_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; GCN: v_min_u32_e32
; EG: MIN_UINT
-define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %bptr, align 4
+define amdgpu_kernel void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
%cmp = icmp ult i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
- store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %val, i32 addrspace(1)* %out.gep, align 4
ret void
}
; FUNC-LABEL: {{^}}v_test_umin_ult_i8:
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: v_min_u32_e32
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: v_min_u32_e32
+
+; GFX89: flat_load_ubyte
+; GFX89: flat_load_ubyte
+; GFX89: v_min_u16_e32
; EG: MIN_UINT
-define void @v_test_umin_ult_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
- %a = load i8, i8 addrspace(1)* %aptr, align 1
- %b = load i8, i8 addrspace(1)* %bptr, align 1
+define amdgpu_kernel void @v_test_umin_ult_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %a.ptr, i8 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds i8, i8 addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds i8, i8 addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds i8, i8 addrspace(1)* %out, i32 %tid
+
+ %a = load i8, i8 addrspace(1)* %a.gep, align 1
+ %b = load i8, i8 addrspace(1)* %b.gep, align 1
%cmp = icmp ult i8 %a, %b
%val = select i1 %cmp, i8 %a, i8 %b
- store i8 %val, i8 addrspace(1)* %out, align 1
+ store i8 %val, i8 addrspace(1)* %out.gep, align 1
ret void
}
@@ -253,7 +371,7 @@ define void @v_test_umin_ult_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i
; GCN: s_min_u32
; EG: MIN_UINT
-define void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%cmp = icmp ult i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -268,7 +386,7 @@ define void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; GCN: s_endpgm
; EG-NOT: MIN_UINT
-define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace(1)* %out1, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace(1)* %out1, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) #0 {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%cmp = icmp ult i32 %a, %b
@@ -286,7 +404,7 @@ define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace
; GCN: s_endpgm
; EG-NOT: MIN_UINT
-define void @v_test_umin_ult_i16_multi_use(i16 addrspace(1)* %out0, i1 addrspace(1)* %out1, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(i16 addrspace(1)* %out0, i1 addrspace(1)* %out1, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) #0 {
%a = load i16, i16 addrspace(1)* %aptr, align 2
%b = load i16, i16 addrspace(1)* %bptr, align 2
%cmp = icmp ult i16 %a, %b
@@ -301,7 +419,7 @@ define void @v_test_umin_ult_i16_multi_use(i16 addrspace(1)* %out0, i1 addrspace
; GCN: s_min_u32
; EG: MIN_UINT
-define void @s_test_umin_ult_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
+define amdgpu_kernel void @s_test_umin_ult_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) #0 {
%cmp = icmp ult <1 x i32> %a, %b
%val = select <1 x i1> %cmp, <1 x i32> %a, <1 x i32> %b
store <1 x i32> %val, <1 x i32> addrspace(1)* %out
@@ -326,7 +444,7 @@ define void @s_test_umin_ult_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <
; EG: MIN_UINT
; EG: MIN_UINT
; EG: MIN_UINT
-define void @s_test_umin_ult_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) nounwind {
+define amdgpu_kernel void @s_test_umin_ult_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) #0 {
%cmp = icmp ult <8 x i32> %a, %b
%val = select <8 x i1> %cmp, <8 x i32> %a, <8 x i32> %b
store <8 x i32> %val, <8 x i32> addrspace(1)* %out
@@ -334,14 +452,23 @@ define void @s_test_umin_ult_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <
}
; FUNC-LABEL: {{^}}s_test_umin_ult_v8i16:
-; GCN: v_min_u32
-; GCN: v_min_u32
-; GCN: v_min_u32
-; GCN: v_min_u32
-; GCN: v_min_u32
-; GCN: v_min_u32
-; GCN: v_min_u32
-; GCN: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+; SI: v_min_u32
+
+; VI: v_min_u32
+; VI: v_min_u32
+; VI: v_min_u32
+; VI: v_min_u32
+; VI: v_min_u32
+; VI: v_min_u32
+; VI: v_min_u32
+; VI: v_min_u32
; EG: MIN_UINT
; EG: MIN_UINT
@@ -351,7 +478,7 @@ define void @s_test_umin_ult_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <
; EG: MIN_UINT
; EG: MIN_UINT
; EG: MIN_UINT
-define void @s_test_umin_ult_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x i16> %b) nounwind {
+define amdgpu_kernel void @s_test_umin_ult_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x i16> %b) #0 {
%cmp = icmp ult <8 x i16> %a, %b
%val = select <8 x i1> %cmp, <8 x i16> %a, <8 x i16> %b
store <8 x i16> %val, <8 x i16> addrspace(1)* %out
@@ -367,7 +494,7 @@ define void @s_test_umin_ult_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> %a, <
; GCN: buffer_store_dword [[VMIN]]
; EG: MIN_UINT
-define void @simplify_demanded_bits_test_umin_ult_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) nounwind {
+define amdgpu_kernel void @simplify_demanded_bits_test_umin_ult_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) #0 {
%a.ext = zext i16 %a to i32
%b.ext = zext i16 %b to i32
%cmp = icmp ult i32 %a.ext, %b.ext
@@ -387,7 +514,7 @@ define void @simplify_demanded_bits_test_umin_ult_i16(i32 addrspace(1)* %out, i1
; GCN: buffer_store_dword [[VMIN]]
; EG: MIN_INT
-define void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) nounwind {
+define amdgpu_kernel void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) #0 {
%a.ext = sext i16 %a to i32
%b.ext = sext i16 %b to i32
%cmp = icmp slt i32 %a.ext, %b.ext
@@ -402,7 +529,7 @@ define void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16
; GCN: s_min_i32
; EG: MIN_INT
-define void @s_test_imin_sle_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwind {
+define amdgpu_kernel void @s_test_imin_sle_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) #0 {
%cmp = icmp sle i16 %a, %b
%val = select i1 %cmp, i16 %a, i16 %b
store i16 %val, i16 addrspace(1)* %out
@@ -415,7 +542,7 @@ define void @s_test_imin_sle_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwin
; EG: MIN_UINT
; EG: MIN_UINT
-define void @test_umin_ult_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_umin_ult_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%tmp = icmp ult i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -427,7 +554,7 @@ define void @test_umin_ult_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
; EG: MIN_UINT
; EG: MIN_UINT
-define void @test_umin_ule_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_umin_ule_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%tmp = icmp ule i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -439,7 +566,7 @@ define void @test_umin_ule_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
; EG-DAG: MIN_UINT
; EG-DAG: MIN_INT
-define void @test_imin_slt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_imin_slt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%tmp = icmp slt i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -451,9 +578,63 @@ define void @test_imin_slt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
; EG-DAG: MIN_UINT
; EG-DAG: MIN_INT
-define void @test_imin_sle_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @test_imin_sle_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%tmp = icmp sle i64 %a, %b
%val = select i1 %tmp, i64 %a, i64 %b
store i64 %val, i64 addrspace(1)* %out, align 8
ret void
}
+
+; FUNC-LABEL: {{^}}v_test_imin_sle_v2i16:
+; SI: v_min_i32
+; SI: v_min_i32
+
+; VI: v_min_i16
+; VI: v_min_i16
+
+; GFX9: v_pk_min_i16
+
+; EG: MIN_INT
+; EG: MIN_INT
+define amdgpu_kernel void @v_test_imin_sle_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %a.ptr, <2 x i16> addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %a.gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b.gep
+ %cmp = icmp sle <2 x i16> %a, %b
+ %val = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; FIXME: i16 min
+; FUNC-LABEL: {{^}}v_test_imin_ule_v2i16:
+; SI: v_min_u32
+; SI: v_min_u32
+
+; VI: v_min_u16
+; VI: v_min_u16
+
+; GFX9: v_pk_min_u16
+
+; EG: MIN_UINT
+; EG: MIN_UINT
+define amdgpu_kernel void @v_test_imin_ule_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %a.ptr, <2 x i16> addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %a.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %a.ptr, i32 %tid
+ %b.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %b.ptr, i32 %tid
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %a.gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b.gep
+ %cmp = icmp ule <2 x i16> %a, %b
+ %val = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+ store <2 x i16> %val, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/min3.ll b/test/CodeGen/AMDGPU/min3.ll
index 728479ad9f62..59d5d2cdb1aa 100644
--- a/test/CodeGen/AMDGPU/min3.ll
+++ b/test/CodeGen/AMDGPU/min3.ll
@@ -4,7 +4,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; FUNC-LABEL: @v_test_imin3_slt_i32
; SI: v_min3_i32
-define void @v_test_imin3_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @v_test_imin3_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
@@ -23,7 +23,7 @@ define void @v_test_imin3_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %apt
; FUNC-LABEL: @v_test_umin3_ult_i32
; SI: v_min3_u32
-define void @v_test_umin3_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @v_test_umin3_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
@@ -43,7 +43,7 @@ define void @v_test_umin3_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %apt
; FUNC-LABEL: @v_test_umin_umin_umin
; SI: v_min_i32
; SI: v_min3_i32
-define void @v_test_umin_umin_umin(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @v_test_umin_umin_umin(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%tid2 = mul i32 %tid, 2
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
@@ -77,7 +77,7 @@ define void @v_test_umin_umin_umin(i32 addrspace(1)* %out, i32 addrspace(1)* %ap
; FUNC-LABEL: @v_test_umin3_2_uses
; SI-NOT: v_min3
-define void @v_test_umin3_2_uses(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
+define amdgpu_kernel void @v_test_umin3_2_uses(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%tid2 = mul i32 %tid, 2
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
diff --git a/test/CodeGen/AMDGPU/missing-store.ll b/test/CodeGen/AMDGPU/missing-store.ll
index 8e1b0036a1af..83c2a911a5ce 100644
--- a/test/CodeGen/AMDGPU/missing-store.ll
+++ b/test/CodeGen/AMDGPU/missing-store.ll
@@ -15,7 +15,7 @@
; SI: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}
; SI: buffer_store_dword
; SI: s_endpgm
-define void @missing_store_reduced(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
+define amdgpu_kernel void @missing_store_reduced(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @ptr_load, align 8
%ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
diff --git a/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll b/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll
index 85dfbe6b8a33..e1fb00a1de30 100644
--- a/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll
+++ b/test/CodeGen/AMDGPU/move-addr64-rsrc-dead-subreg-writes.ll
@@ -19,7 +19,7 @@
; GCN: v_addc_u32_e32 v[[PTRHI:[0-9]+]], vcc, v[[LDPTRHI]], v[[VARG1HI]]
; GCN: buffer_load_ubyte v{{[0-9]+}}, v{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}},
-define void @clobber_vgpr_pair_pointer_add(i64 %arg1, i8 addrspace(1)* addrspace(1)* %ptrarg, i32 %arg3) #0 {
+define amdgpu_kernel void @clobber_vgpr_pair_pointer_add(i64 %arg1, i8 addrspace(1)* addrspace(1)* %ptrarg, i32 %arg3) #0 {
bb:
%tmp = icmp sgt i32 %arg3, 0
br i1 %tmp, label %bb4, label %bb17
diff --git a/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll b/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll
index 1a0a39027853..417b4ba802e1 100644
--- a/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll
+++ b/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll
@@ -11,7 +11,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN-LABEL: {{^}}atomic_max_i32:
; GCN: buffer_atomic_smax v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:400 glc{{$}}
-define void @atomic_max_i32(i32 addrspace(1)* %out, i32 addrspace(1)* addrspace(1)* %in, i32 addrspace(1)* %x, i32 %y) #0 {
+define amdgpu_kernel void @atomic_max_i32(i32 addrspace(1)* %out, i32 addrspace(1)* addrspace(1)* %in, i32 addrspace(1)* %x, i32 %y) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.gep = getelementptr i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in, i32 %tid
%ptr = load volatile i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %tid.gep
@@ -31,7 +31,7 @@ exit:
; GCN-LABEL: {{^}}atomic_max_i32_noret:
; GCN: buffer_atomic_smax v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:400{{$}}
-define void @atomic_max_i32_noret(i32 addrspace(1)* %out, i32 addrspace(1)* addrspace(1)* %in, i32 addrspace(1)* %x, i32 %y) #0 {
+define amdgpu_kernel void @atomic_max_i32_noret(i32 addrspace(1)* %out, i32 addrspace(1)* addrspace(1)* %in, i32 addrspace(1)* %x, i32 %y) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.gep = getelementptr i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in, i32 %tid
%ptr = load volatile i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %tid.gep
diff --git a/test/CodeGen/AMDGPU/mubuf.ll b/test/CodeGen/AMDGPU/mubuf.ll
index a574365da986..9e1d2e0490c7 100644
--- a/test/CodeGen/AMDGPU/mubuf.ll
+++ b/test/CodeGen/AMDGPU/mubuf.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() readnone
; MUBUF load with an immediate byte offset that fits into 12-bits
; CHECK-LABEL: {{^}}mubuf_load0:
; CHECK: buffer_load_dword v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x30,0xe0
-define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr i32, i32 addrspace(1)* %in, i64 1
%1 = load i32, i32 addrspace(1)* %0
@@ -20,7 +20,7 @@ entry:
; MUBUF load with the largest possible immediate offset
; CHECK-LABEL: {{^}}mubuf_load1:
; CHECK: buffer_load_ubyte v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe0
-define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%0 = getelementptr i8, i8 addrspace(1)* %in, i64 4095
%1 = load i8, i8 addrspace(1)* %0
@@ -32,7 +32,7 @@ entry:
; CHECK-LABEL: {{^}}mubuf_load2:
; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000
; CHECK: buffer_load_dword v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x30,0xe0
-define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr i32, i32 addrspace(1)* %in, i64 1024
%1 = load i32, i32 addrspace(1)* %0
@@ -44,7 +44,7 @@ entry:
; CHECK-LABEL: {{^}}mubuf_load3:
; CHECK-NOT: ADD
; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x30,0xe0
-define void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
+define amdgpu_kernel void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
entry:
%0 = getelementptr i32, i32 addrspace(1)* %in, i64 %offset
%1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
@@ -91,7 +91,7 @@ main_body:
; MUBUF store with an immediate byte offset that fits into 12-bits
; CHECK-LABEL: {{^}}mubuf_store0:
; CHECK: buffer_store_dword v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x70,0xe0
-define void @mubuf_store0(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @mubuf_store0(i32 addrspace(1)* %out) {
entry:
%0 = getelementptr i32, i32 addrspace(1)* %out, i64 1
store i32 0, i32 addrspace(1)* %0
@@ -102,7 +102,7 @@ entry:
; CHECK-LABEL: {{^}}mubuf_store1:
; CHECK: buffer_store_byte v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0
-define void @mubuf_store1(i8 addrspace(1)* %out) {
+define amdgpu_kernel void @mubuf_store1(i8 addrspace(1)* %out) {
entry:
%0 = getelementptr i8, i8 addrspace(1)* %out, i64 4095
store i8 0, i8 addrspace(1)* %0
@@ -113,7 +113,7 @@ entry:
; CHECK-LABEL: {{^}}mubuf_store2:
; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000
; CHECK: buffer_store_dword v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x70,0xe0
-define void @mubuf_store2(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @mubuf_store2(i32 addrspace(1)* %out) {
entry:
%0 = getelementptr i32, i32 addrspace(1)* %out, i64 1024
store i32 0, i32 addrspace(1)* %0
@@ -124,7 +124,7 @@ entry:
; CHECK-LABEL: {{^}}mubuf_store3:
; CHECK-NOT: ADD
; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x70,0xe0
-define void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
+define amdgpu_kernel void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
entry:
%0 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset
%1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
@@ -134,14 +134,14 @@ entry:
; CHECK-LABEL: {{^}}store_sgpr_ptr:
; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0
-define void @store_sgpr_ptr(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @store_sgpr_ptr(i32 addrspace(1)* %out) #0 {
store i32 99, i32 addrspace(1)* %out, align 4
ret void
}
; CHECK-LABEL: {{^}}store_sgpr_ptr_offset:
; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:40
-define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 10
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
@@ -150,7 +150,7 @@ define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset:
; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
-define void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
@@ -159,7 +159,7 @@ define void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset_atomic:
; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
; CHECK: buffer_atomic_add v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
-define void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) #0 {
%gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768
%val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 5 seq_cst
ret void
@@ -167,7 +167,7 @@ define void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}store_vgpr_ptr:
; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
-define void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
store i32 99, i32 addrspace(1)* %out.gep, align 4
diff --git a/test/CodeGen/AMDGPU/mul.ll b/test/CodeGen/AMDGPU/mul.ll
index 7910b70d8cf2..a72a6efb0711 100644
--- a/test/CodeGen/AMDGPU/mul.ll
+++ b/test/CodeGen/AMDGPU/mul.ll
@@ -11,7 +11,7 @@
; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
%b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
@@ -31,7 +31,7 @@ define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)
; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
%b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
@@ -45,7 +45,7 @@ define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
; SI: s_load_dword
; SI: s_mul_i32
; SI: buffer_store_dword
-define void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
%mul = mul i64 %b, %a
%trunc = trunc i64 %mul to i32
store i32 %trunc, i32 addrspace(1)* %out, align 8
@@ -57,7 +57,7 @@ define void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
; SI: s_load_dword
; SI: v_mul_lo_i32
; SI: buffer_store_dword
-define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
%mul = mul i64 %b, %a
@@ -73,7 +73,7 @@ define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %a
; EG-DAG: MULHI_INT
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_i32
-define void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
entry:
%0 = sext i32 %in to i64
%1 = mul i64 %0, 80
@@ -87,7 +87,7 @@ entry:
; SI-DAG: v_mul_lo_i32
; SI-DAG: v_mul_hi_i32
; SI: s_endpgm
-define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in, align 4
%ext = sext i32 %val to i64
%mul = mul i64 %ext, 80
@@ -99,7 +99,7 @@ define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI-DAG: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
; SI: s_endpgm
-define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in, align 4
%ext = sext i32 %val to i64
%mul = mul i64 %ext, 9
@@ -114,7 +114,7 @@ define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
; SI: buffer_store_dword [[VRESULT]],
; SI: s_endpgm
-define void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%mul = mul i32 %a, %b
store i32 %mul, i32 addrspace(1)* %out, align 4
ret void
@@ -122,7 +122,7 @@ define void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
; FUNC-LABEL: {{^}}v_mul_i32:
; SI: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -139,7 +139,7 @@ define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; crash with a 'failed to select' error.
; FUNC-LABEL: {{^}}s_mul_i64:
-define void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%mul = mul i64 %a, %b
store i64 %mul, i64 addrspace(1)* %out, align 8
ret void
@@ -147,7 +147,7 @@ define void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; FUNC-LABEL: {{^}}v_mul_i64:
; SI: v_mul_lo_i32
-define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
+define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
%mul = mul i64 %a, %b
@@ -157,7 +157,7 @@ define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addr
; FUNC-LABEL: {{^}}mul32_in_branch:
; SI: s_mul_i32
-define void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
entry:
%0 = icmp eq i32 %a, 0
br i1 %0, label %if, label %else
@@ -180,7 +180,7 @@ endif:
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_u32
; SI: s_endpgm
-define void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
+define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
@@ -224,7 +224,7 @@ endif:
; SI: s_mul_i32
; SI: buffer_store_dwordx4
-define void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) nounwind #0 {
+define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) nounwind #0 {
%mul = mul i128 %a, %b
store i128 %mul, i128 addrspace(1)* %out
ret void
@@ -234,26 +234,26 @@ define void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) nounwind #0 {
; SI: {{buffer|flat}}_load_dwordx4
; SI: {{buffer|flat}}_load_dwordx4
-; SI: v_mul_lo_i32
-; SI: v_mul_hi_u32
-; SI: v_mul_hi_u32
-; SI: v_mul_lo_i32
-; SI: v_mul_hi_u32
-; SI: v_mul_hi_u32
-; SI: v_mul_lo_i32
-; SI: v_mul_lo_i32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_hi_u32
+; SI-DAG: v_mul_hi_u32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_hi_u32
+; SI-DAG: v_mul_hi_u32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_lo_i32
; SI: v_add_i32_e32
-; SI: v_mul_hi_u32
-; SI: v_mul_lo_i32
-; SI: v_mul_hi_u32
-; SI: v_mul_lo_i32
-; SI: v_mul_lo_i32
-; SI: v_mul_lo_i32
-; SI: v_mul_lo_i32
-; SI: v_mul_lo_i32
+; SI-DAG: v_mul_hi_u32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_hi_u32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_lo_i32
+; SI-DAG: v_mul_lo_i32
; SI: {{buffer|flat}}_store_dwordx4
-define void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {
+define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%gep.a = getelementptr inbounds i128, i128 addrspace(1)* %aptr, i32 %tid
%gep.b = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
diff --git a/test/CodeGen/AMDGPU/mul_int24.ll b/test/CodeGen/AMDGPU/mul_int24.ll
index 6f7dfe2e13eb..3137569e9ca7 100644
--- a/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/test/CodeGen/AMDGPU/mul_int24.ll
@@ -13,7 +13,7 @@
; Make sure we are not masking the inputs
; CM-NOT: AND
; CM: MUL_INT24
-define void @test_smul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @test_smul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%a.shl = shl i32 %a, 8
%a.24 = ashr i32 %a.shl, 8
@@ -39,7 +39,7 @@ entry:
; CM: MULHI_INT24
; CM: MULHI_INT24
; CM: MULHI_INT24
-define void @test_smulhi24_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @test_smulhi24_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%a.shl = shl i32 %a, 8
%a.24 = ashr i32 %a.shl, 8
@@ -70,7 +70,7 @@ entry:
; GCN-DAG: v_mul_i32_i24_e32
; GCN: buffer_store_dwordx2
-define void @test_smul24_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @test_smul24_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%shl.i = shl i32 %a, 8
%shr.i = ashr i32 %shl.i, 8
%conv.i = sext i32 %shr.i to i64
@@ -87,7 +87,7 @@ define void @test_smul24_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
; GCN-DAG: v_mul_hi_i32_i24_e64 v{{[0-9]+}}, [[A]], [[A]]
; GCN-DAG: v_mul_i32_i24_e64 v{{[0-9]+}}, [[A]], [[A]]
; GCN: buffer_store_dwordx2
-define void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%shl.i = shl i32 %a, 8
%shr.i = ashr i32 %shl.i, 8
%conv.i = sext i32 %shr.i to i64
@@ -112,7 +112,7 @@ define void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
; VI: v_ashrrev_i64 v{{\[[0-9]+:[0-9]+\]}}, 31, v{{\[[0-9]+:[0-9]+\]}}
; GCN: buffer_store_dwordx2
-define void @test_smul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) #0 {
+define amdgpu_kernel void @test_smul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) #0 {
entry:
%a.shl = shl i33 %a, 9
%a.24 = ashr i33 %a.shl, 9
@@ -133,7 +133,7 @@ entry:
; SI: v_mul_hi_i32_i24_e32 v[[MUL_HI:[0-9]+]],
; SI-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
; SI-NEXT: buffer_store_dword v[[HI]]
-define void @test_smulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
+define amdgpu_kernel void @test_smulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
entry:
%tmp0 = shl i33 %a, 9
%a_24 = ashr i33 %tmp0, 9
@@ -151,7 +151,7 @@ entry:
; GCN: v_mul_i32_i24_e32 v[[VAL_LO:[0-9]+]]
; GCN: v_mov_b32_e32 v[[VAL_HI:[0-9]+]], v[[VAL_LO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
-define void @simplify_i24_crash(<2 x i32> addrspace(1)* %out, i32 %arg0, <2 x i32> %arg1, <2 x i32> %arg2) {
+define amdgpu_kernel void @simplify_i24_crash(<2 x i32> addrspace(1)* %out, i32 %arg0, <2 x i32> %arg1, <2 x i32> %arg2) {
bb:
%cmp = icmp eq i32 %arg0, 0
br i1 %cmp, label %bb11, label %bb7
diff --git a/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
index 004d36f00e51..59fdc8be5cea 100644
--- a/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
+++ b/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
@@ -6,7 +6,7 @@ declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
; FUNC-LABEL: {{^}}test_umul24_i32:
; GCN: v_mul_u32_u24
-define void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
@@ -22,7 +22,7 @@ entry:
; SI: v_bfe_i32 v{{[0-9]}}, [[VI_MUL]], 0, 16
; VI: s_mul_i32 [[SI_MUL:s[0-9]]], s{{[0-9]}}, s{{[0-9]}}
; VI: s_sext_i32_i16 s{{[0-9]}}, [[SI_MUL]]
-define void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
%mul = mul i16 %a, %b
%ext = sext i16 %mul to i32
@@ -34,7 +34,7 @@ entry:
; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16
-define void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%tid.y = call i32 @llvm.amdgcn.workitem.id.y()
%ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x
@@ -54,7 +54,7 @@ define void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)*
; VI: s_mul_i32
; VI: s_and_b32
; VI: v_mov_b32_e32
-define void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+define amdgpu_kernel void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
%mul = mul i16 %a, %b
%ext = zext i16 %mul to i32
@@ -66,7 +66,7 @@ entry:
; SI: v_mul_u32_u24_e32
; SI: v_and_b32_e32
; VI: v_mul_lo_u16
-define void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%tid.y = call i32 @llvm.amdgcn.workitem.id.y()
%ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x
@@ -83,7 +83,7 @@ define void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in)
; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
-define void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) {
+define amdgpu_kernel void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) {
entry:
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%tid.y = call i32 @llvm.amdgcn.workitem.id.y()
@@ -101,7 +101,7 @@ entry:
; GCN-NOT: and
; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
; GCN-NEXT: buffer_store_dword [[RESULT]]
-define void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%a.24 = and i32 %a, 16777215
%b.24 = and i32 %b, 16777215
@@ -118,7 +118,7 @@ entry:
; GCN-NOT: and
; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
; GCN-NEXT: buffer_store_dword [[RESULT]]
-define void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%a.24 = and i64 %a, 16777215
%b.24 = and i64 %b, 16777215
@@ -136,7 +136,7 @@ entry:
; GCN-DAG: v_mul_u32_u24_e32
; GCN-DAG: v_mul_hi_u32_u24_e32
; GCN: buffer_store_dwordx2
-define void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%tmp0 = shl i64 %a, 40
%a_24 = lshr i64 %tmp0, 40
@@ -152,7 +152,7 @@ entry:
; GCN-NOT: s_and_b32
; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
-define void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) {
entry:
%tmp0 = shl i64 %a, 40
%a.24 = lshr i64 %tmp0, 40
@@ -166,7 +166,7 @@ entry:
; GCN: s_and_b32
; GCN: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]]
; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]]
-define void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%a.16 = and i32 %a, 65535
%b.16 = and i32 %b, 65535
@@ -186,7 +186,7 @@ entry:
; GCN-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
; GCN-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}}
-define void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
+define amdgpu_kernel void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
entry:
%tmp0 = shl i33 %a, 9
%a_24 = lshr i33 %tmp0, 9
@@ -206,7 +206,7 @@ entry:
; GCN: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
; GCN-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
; GCN-NEXT: buffer_store_dword v[[HI]]
-define void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
+define amdgpu_kernel void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
entry:
%tmp0 = shl i33 %a, 9
%a_24 = lshr i33 %tmp0, 9
diff --git a/test/CodeGen/AMDGPU/mul_uint24-r600.ll b/test/CodeGen/AMDGPU/mul_uint24-r600.ll
index da1c111fa5c0..0a646b7126d0 100644
--- a/test/CodeGen/AMDGPU/mul_uint24-r600.ll
+++ b/test/CodeGen/AMDGPU/mul_uint24-r600.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}test_umul24_i32:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
-define void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
@@ -19,7 +19,7 @@ entry:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
; EG: 16
-define void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
%mul = mul i16 %a, %b
%ext = sext i16 %mul to i32
@@ -31,7 +31,7 @@ entry:
; FUNC-LABEL: {{^}}test_umul24_i8:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
-define void @test_umul24_i8(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+define amdgpu_kernel void @test_umul24_i8(i32 addrspace(1)* %out, i8 %a, i8 %b) {
entry:
%mul = mul i8 %a, %b
%ext = sext i8 %mul to i32
@@ -41,7 +41,7 @@ entry:
; FUNC-LABEL: {{^}}test_umulhi24_i32_i64:
; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
-define void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%a.24 = and i32 %a, 16777215
%b.24 = and i32 %b, 16777215
@@ -56,7 +56,7 @@ entry:
; FUNC-LABEL: {{^}}test_umulhi24:
; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
-define void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%a.24 = and i64 %a, 16777215
%b.24 = and i64 %b, 16777215
@@ -71,7 +71,7 @@ entry:
; FUNC-LABEL: {{^}}test_umul24_i64:
; EG; MUL_UINT24
; EG: MULHI
-define void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%tmp0 = shl i64 %a, 40
%a_24 = lshr i64 %tmp0, 40
diff --git a/test/CodeGen/AMDGPU/mulhu.ll b/test/CodeGen/AMDGPU/mulhu.ll
deleted file mode 100644
index 29b0944a5533..000000000000
--- a/test/CodeGen/AMDGPU/mulhu.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
-
-;CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0xaaaaaaab
-;CHECK: v_mul_hi_u32 v0, {{v[0-9]+}}, {{s[0-9]+}}
-;CHECK-NEXT: v_lshrrev_b32_e32 v0, 1, v0
-
-define void @test(i32 %p) {
- %i = udiv i32 %p, 3
- %r = bitcast i32 %i to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
- ret void
-}
-
-declare <4 x float> @llvm.SI.sample.(i32, <4 x i32>, <8 x i32>, <4 x i32>, i32) readnone
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll b/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
new file mode 100644
index 000000000000..9d0b6b395996
--- /dev/null
+++ b/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
@@ -0,0 +1,710 @@
+; RUN: opt -mtriple=amdgcn-- -S -amdgpu-unify-divergent-exit-nodes -verify -structurizecfg -verify -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; Add an extra verifier runs. There were some cases where invalid IR
+; was produced but happened to be fixed by the later passes.
+
+; Make sure divergent control flow with multiple exits from a region
+; is properly handled. UnifyFunctionExitNodes should be run before
+; StructurizeCFG.
+
+; IR-LABEL: @multi_divergent_region_exit_ret_ret(
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+; IR: %2 = extractvalue { i1, i64 } %1, 0
+; IR: %3 = extractvalue { i1, i64 } %1, 1
+; IR: br i1 %2, label %LeafBlock1, label %Flow
+
+; IR: Flow:
+; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
+; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: %7 = extractvalue { i1, i64 } %6, 0
+; IR: %8 = extractvalue { i1, i64 } %6, 1
+; IR: br i1 %7, label %LeafBlock, label %Flow1
+
+; IR: LeafBlock:
+; IR: br label %Flow1
+
+; IR: LeafBlock1:
+; IR: br label %Flow{{$}}
+
+; IR: Flow2:
+; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: %13 = extractvalue { i1, i64 } %12, 0
+; IR: %14 = extractvalue { i1, i64 } %12, 1
+; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock
+
+; IR: exit0:
+; IR: store volatile i32 9, i32 addrspace(1)* undef
+; IR: br label %UnifiedReturnBlock
+
+; IR: Flow1:
+; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ]
+; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %8)
+; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16)
+; IR: %18 = extractvalue { i1, i64 } %17, 0
+; IR: %19 = extractvalue { i1, i64 } %17, 1
+; IR: br i1 %18, label %exit1, label %Flow2
+
+; IR: exit1:
+; IR: store volatile i32 17, i32 addrspace(3)* undef
+; IR: br label %Flow2
+
+; IR: UnifiedReturnBlock:
+; IR: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR: ret void
+
+
+; GCN-LABEL: {{^}}multi_divergent_region_exit_ret_ret:
+; GCN: v_cmp_lt_i32_e32 vcc, 1
+; GCN: s_and_saveexec_b64
+; GCN: s_xor_b64
+
+
+; FIXME: Why is this compare essentially repeated?
+; GCN: v_cmp_eq_u32_e32 vcc, 1, [[REG:v[0-9]+]]
+; GCN-NEXT: v_cmp_ne_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1, [[REG]]
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, vcc
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1
+
+; GCN: ; %Flow1
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN: v_cmp_ne_u32_e32 vcc, 0
+
+; GCN: ; %exit1
+; GCN: ds_write_b32
+
+; GCN: %Flow2
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN: v_cmp_ne_u32_e32 vcc, 0
+; GCN-NEXT: s_and_saveexec_b64
+; GCN-NEXT: s_xor_b64
+
+; GCN: ; %exit0
+; GCN: buffer_store_dword
+
+; GCN: ; %UnifiedReturnBlock
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN-NEXT: s_endpgm
+define amdgpu_kernel void @multi_divergent_region_exit_ret_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ %Pivot = icmp slt i32 %tmp16, 2
+ br i1 %Pivot, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %tmp16, 1
+ br i1 %SwitchLeaf, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %tmp16, 2
+ br i1 %SwitchLeaf2, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ ret void
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ ret void
+}
+
+; IR-LABEL: @multi_divergent_region_exit_unreachable_unreachable(
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+
+; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: br i1 %13, label %exit0, label %UnifiedUnreachableBlock
+
+
+; IR: UnifiedUnreachableBlock:
+; IR-NEXT: unreachable
+
+
+; FIXME: Probably should insert an s_endpgm anyway.
+; GCN-LABEL: {{^}}multi_divergent_region_exit_unreachable_unreachable:
+; GCN: ; %UnifiedUnreachableBlock
+; GCN-NEXT: .Lfunc_end
+define amdgpu_kernel void @multi_divergent_region_exit_unreachable_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ %Pivot = icmp slt i32 %tmp16, 2
+ br i1 %Pivot, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %tmp16, 1
+ br i1 %SwitchLeaf, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %tmp16, 2
+ br i1 %SwitchLeaf2, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ unreachable
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ unreachable
+}
+
+; IR-LABEL: @multi_exit_region_divergent_ret_uniform_ret(
+; IR: %divergent.cond0 = icmp slt i32 %tmp16, 2
+; IR: llvm.amdgcn.if
+; IR: br i1
+
+; IR: {{^}}Flow:
+; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
+; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: br i1 %7, label %LeafBlock, label %Flow1
+
+; IR: {{^}}LeafBlock:
+; IR: %divergent.cond1 = icmp eq i32 %tmp16, 1
+; IR: %9 = xor i1 %divergent.cond1, true
+; IR: br label %Flow1
+
+; IR: LeafBlock1:
+; IR: %uniform.cond0 = icmp eq i32 %arg3, 2
+; IR: %10 = xor i1 %uniform.cond0, true
+; IR: br label %Flow
+
+; IR: Flow2:
+; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock
+
+; IR: exit0:
+; IR: store volatile i32 9, i32 addrspace(1)* undef
+; IR: br label %UnifiedReturnBlock
+
+; IR: {{^}}Flow1:
+; IR: %15 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %4, %Flow ]
+; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %8)
+; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16)
+; IR: %18 = extractvalue { i1, i64 } %17, 0
+; IR: %19 = extractvalue { i1, i64 } %17, 1
+; IR: br i1 %18, label %exit1, label %Flow2
+
+; IR: exit1:
+; IR: store volatile i32 17, i32 addrspace(3)* undef
+; IR: br label %Flow2
+
+; IR: UnifiedReturnBlock:
+; IR: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR: ret void
+define amdgpu_kernel void @multi_exit_region_divergent_ret_uniform_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ %divergent.cond0 = icmp slt i32 %tmp16, 2
+ br i1 %divergent.cond0, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %divergent.cond1 = icmp eq i32 %tmp16, 1
+ br i1 %divergent.cond1, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %uniform.cond0 = icmp eq i32 %arg3, 2
+ br i1 %uniform.cond0, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ ret void
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ ret void
+}
+
+; IR-LABEL: @multi_exit_region_uniform_ret_divergent_ret(
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+; IR: br i1 %2, label %LeafBlock1, label %Flow
+
+; IR: Flow:
+; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
+; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+
+; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+
+define amdgpu_kernel void @multi_exit_region_uniform_ret_divergent_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ %Pivot = icmp slt i32 %tmp16, 2
+ br i1 %Pivot, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %arg3, 1
+ br i1 %SwitchLeaf, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %tmp16, 2
+ br i1 %SwitchLeaf2, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ ret void
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ ret void
+}
+
+; IR-LABEL: @multi_divergent_region_exit_ret_ret_return_value(
+; IR: Flow2:
+; IR: %11 = phi float [ 2.000000e+00, %exit1 ], [ undef, %Flow1 ]
+; IR: %12 = phi i1 [ false, %exit1 ], [ %16, %Flow1 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %20)
+
+; IR: UnifiedReturnBlock:
+; IR: %UnifiedRetVal = phi float [ %11, %Flow2 ], [ 1.000000e+00, %exit0 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %15)
+; IR: ret float %UnifiedRetVal
+define amdgpu_ps float @multi_divergent_region_exit_ret_ret_return_value(i32 %vgpr) #0 {
+entry:
+ %Pivot = icmp slt i32 %vgpr, 2
+ br i1 %Pivot, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %vgpr, 1
+ br i1 %SwitchLeaf, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %vgpr, 2
+ br i1 %SwitchLeaf2, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store i32 9, i32 addrspace(1)* undef
+ ret float 1.0
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store i32 17, i32 addrspace(3)* undef
+ ret float 2.0
+}
+
+; IR-LABEL: @uniform_branch_to_multi_divergent_region_exit_ret_ret_return_value(
+
+; GCN-LABEL: {{^}}uniform_branch_to_multi_divergent_region_exit_ret_ret_return_value:
+; GCN: s_cmp_gt_i32 s0, 1
+; GCN: s_cbranch_scc0 [[FLOW:BB[0-9]+_[0-9]+]]
+
+; GCN: v_cmp_ne_u32_e32 vcc, 7, v0
+
+; GCN: {{^}}[[FLOW]]:
+; GCN: s_cbranch_vccnz [[FLOW1:BB[0-9]+]]
+
+; GCN: v_mov_b32_e32 v0, 2.0
+; GCN: s_or_b64 exec, exec
+; GCN: s_and_b64 exec, exec
+; GCN: v_mov_b32_e32 v0, 1.0
+
+; GCN: {{^BB[0-9]+_[0-9]+}}: ; %UnifiedReturnBlock
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN-NEXT: ; return
+
+define amdgpu_ps float @uniform_branch_to_multi_divergent_region_exit_ret_ret_return_value(i32 inreg %sgpr, i32 %vgpr) #0 {
+entry:
+ %uniform.cond = icmp slt i32 %sgpr, 2
+ br i1 %uniform.cond, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %divergent.cond0 = icmp eq i32 %vgpr, 3
+ br i1 %divergent.cond0, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %divergent.cond1 = icmp eq i32 %vgpr, 7
+ br i1 %divergent.cond1, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store i32 9, i32 addrspace(1)* undef
+ ret float 1.0
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store i32 17, i32 addrspace(3)* undef
+ ret float 2.0
+}
+
+; IR-LABEL: @multi_divergent_region_exit_ret_unreachable(
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+
+; IR: Flow:
+; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
+; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+
+; IR: Flow2:
+; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock
+
+; IR: exit0:
+; IR-NEXT: store volatile i32 17, i32 addrspace(3)* undef
+; IR-NEXT: br label %UnifiedReturnBlock
+
+; IR: Flow1:
+; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ]
+; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ]
+; IR: call void @llvm.amdgcn.end.cf(i64 %8)
+; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16)
+; IR: %18 = extractvalue { i1, i64 } %17, 0
+; IR: %19 = extractvalue { i1, i64 } %17, 1
+; IR: br i1 %18, label %exit1, label %Flow2
+
+; IR: exit1:
+; IR-NEXT: store volatile i32 9, i32 addrspace(1)* undef
+; IR-NEXT: call void @llvm.amdgcn.unreachable()
+; IR-NEXT: br label %Flow2
+
+; IR: UnifiedReturnBlock:
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR-NEXT: ret void
+define amdgpu_kernel void @multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ %Pivot = icmp slt i32 %tmp16, 2
+ br i1 %Pivot, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %tmp16, 1
+ br i1 %SwitchLeaf, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %tmp16, 2
+ br i1 %SwitchLeaf2, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ ret void
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ unreachable
+}
+
+; The non-uniformity of the branch to the exiting blocks requires
+; looking at transitive predecessors.
+
+; IR-LABEL: @indirect_multi_divergent_region_exit_ret_unreachable(
+
+; IR: exit0: ; preds = %Flow2
+; IR-NEXT: store volatile i32 17, i32 addrspace(3)* undef
+; IR-NEXT: br label %UnifiedReturnBlock
+
+
+; IR: indirect.exit1:
+; IR: %load = load volatile i32, i32 addrspace(1)* undef
+; IR: store volatile i32 %load, i32 addrspace(1)* undef
+; IR: store volatile i32 9, i32 addrspace(1)* undef
+; IR: call void @llvm.amdgcn.unreachable()
+; IR-NEXT: br label %Flow2
+
+; IR: UnifiedReturnBlock: ; preds = %exit0, %Flow2
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR-NEXT: ret void
+define amdgpu_kernel void @indirect_multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ %Pivot = icmp slt i32 %tmp16, 2
+ br i1 %Pivot, label %LeafBlock, label %LeafBlock1
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %tmp16, 1
+ br i1 %SwitchLeaf, label %exit0, label %indirect.exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %tmp16, 2
+ br i1 %SwitchLeaf2, label %exit0, label %indirect.exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ ret void
+
+indirect.exit1:
+ %load = load volatile i32, i32 addrspace(1)* undef
+ store volatile i32 %load, i32 addrspace(1)* undef
+ br label %exit1
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ unreachable
+}
+
+; IR-LABEL: @multi_divergent_region_exit_ret_switch(
+define amdgpu_kernel void @multi_divergent_region_exit_ret_switch(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
+entry:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = add i32 0, %tmp
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 0, %tmp2
+ %tmp4 = shl i64 %tmp3, 32
+ %tmp5 = ashr exact i64 %tmp4, 32
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %tmp5
+ %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+ %tmp8 = sext i32 %tmp7 to i64
+ %tmp9 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp8
+ %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 4
+ %tmp13 = zext i32 %tmp10 to i64
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp13
+ %tmp16 = load i32, i32 addrspace(1)* %tmp14, align 16
+ switch i32 %tmp16, label %exit1
+ [ i32 1, label %LeafBlock
+ i32 2, label %LeafBlock1
+ i32 3, label %exit0 ]
+
+LeafBlock: ; preds = %entry
+ %SwitchLeaf = icmp eq i32 %tmp16, 1
+ br i1 %SwitchLeaf, label %exit0, label %exit1
+
+LeafBlock1: ; preds = %entry
+ %SwitchLeaf2 = icmp eq i32 %tmp16, 2
+ br i1 %SwitchLeaf2, label %exit0, label %exit1
+
+exit0: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 17, i32 addrspace(3)* undef
+ ret void
+
+exit1: ; preds = %LeafBlock, %LeafBlock1
+ store volatile i32 9, i32 addrspace(1)* undef
+ unreachable
+}
+
+; IR-LABEL: @divergent_multi_ret_nest_in_uniform_triangle(
+define amdgpu_kernel void @divergent_multi_ret_nest_in_uniform_triangle(i32 %arg0) #0 {
+entry:
+ %uniform.cond0 = icmp eq i32 %arg0, 4
+ br i1 %uniform.cond0, label %divergent.multi.exit.region, label %uniform.ret
+
+divergent.multi.exit.region:
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %divergent.cond0 = icmp eq i32 %id.x, 0
+ br i1 %divergent.cond0, label %divergent.ret0, label %divergent.ret1
+
+divergent.ret0:
+ store volatile i32 11, i32 addrspace(3)* undef
+ ret void
+
+divergent.ret1:
+ store volatile i32 42, i32 addrspace(3)* undef
+ ret void
+
+uniform.ret:
+ store volatile i32 9, i32 addrspace(1)* undef
+ ret void
+}
+
+; IR-LABEL: @divergent_complex_multi_ret_nest_in_uniform_triangle(
+define amdgpu_kernel void @divergent_complex_multi_ret_nest_in_uniform_triangle(i32 %arg0) #0 {
+entry:
+ %uniform.cond0 = icmp eq i32 %arg0, 4
+ br i1 %uniform.cond0, label %divergent.multi.exit.region, label %uniform.ret
+
+divergent.multi.exit.region:
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %divergent.cond0 = icmp eq i32 %id.x, 0
+ br i1 %divergent.cond0, label %divergent.if, label %divergent.ret1
+
+divergent.if:
+ %vgpr0 = load volatile float, float addrspace(1)* undef
+ %divergent.cond1 = fcmp ogt float %vgpr0, 1.0
+ br i1 %divergent.cond1, label %divergent.then, label %divergent.endif
+
+divergent.then:
+ %vgpr1 = load volatile float, float addrspace(1)* undef
+ %divergent.cond2 = fcmp olt float %vgpr1, 4.0
+ store volatile i32 33, i32 addrspace(1)* undef
+ br i1 %divergent.cond2, label %divergent.ret0, label %divergent.endif
+
+divergent.endif:
+ store volatile i32 38, i32 addrspace(1)* undef
+ br label %divergent.ret0
+
+divergent.ret0:
+ store volatile i32 11, i32 addrspace(3)* undef
+ ret void
+
+divergent.ret1:
+ store volatile i32 42, i32 addrspace(3)* undef
+ ret void
+
+uniform.ret:
+ store volatile i32 9, i32 addrspace(1)* undef
+ ret void
+}
+
+; IR-LABEL: @uniform_complex_multi_ret_nest_in_divergent_triangle(
+; IR: Flow1: ; preds = %uniform.ret1, %uniform.multi.exit.region
+; IR: %8 = phi i1 [ false, %uniform.ret1 ], [ true, %uniform.multi.exit.region ]
+; IR: br i1 %8, label %uniform.if, label %Flow2
+
+; IR: Flow: ; preds = %uniform.then, %uniform.if
+; IR: %11 = phi i1 [ %10, %uniform.then ], [ %9, %uniform.if ]
+; IR: br i1 %11, label %uniform.endif, label %uniform.ret0
+
+; IR: UnifiedReturnBlock: ; preds = %Flow3, %Flow2
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %6)
+; IR-NEXT: ret void
+define amdgpu_kernel void @uniform_complex_multi_ret_nest_in_divergent_triangle(i32 %arg0) #0 {
+entry:
+ %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %divergent.cond0 = icmp eq i32 %id.x, 0
+ br i1 %divergent.cond0, label %uniform.multi.exit.region, label %divergent.ret
+
+uniform.multi.exit.region:
+ %uniform.cond0 = icmp eq i32 %arg0, 4
+ br i1 %uniform.cond0, label %uniform.if, label %uniform.ret1
+
+uniform.if:
+ %sgpr0 = load volatile i32, i32 addrspace(2)* undef
+ %uniform.cond1 = icmp slt i32 %sgpr0, 1
+ br i1 %uniform.cond1, label %uniform.then, label %uniform.endif
+
+uniform.then:
+ %sgpr1 = load volatile i32, i32 addrspace(2)* undef
+ %uniform.cond2 = icmp sge i32 %sgpr1, 4
+ store volatile i32 33, i32 addrspace(1)* undef
+ br i1 %uniform.cond2, label %uniform.ret0, label %uniform.endif
+
+uniform.endif:
+ store volatile i32 38, i32 addrspace(1)* undef
+ br label %uniform.ret0
+
+uniform.ret0:
+ store volatile i32 11, i32 addrspace(3)* undef
+ ret void
+
+uniform.ret1:
+ store volatile i32 42, i32 addrspace(3)* undef
+ ret void
+
+divergent.ret:
+ store volatile i32 9, i32 addrspace(1)* undef
+ ret void
+}
+
+; IR-LABEL: @multi_divergent_unreachable_exit(
+; IR: UnifiedUnreachableBlock:
+; IR-NEXT: call void @llvm.amdgcn.unreachable()
+; IR-NEXT: br label %UnifiedReturnBlock
+
+; IR: UnifiedReturnBlock:
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64
+; IR-NEXT: ret void
+define amdgpu_kernel void @multi_divergent_unreachable_exit() #0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+ switch i32 %tmp, label %bb3 [
+ i32 2, label %bb1
+ i32 0, label %bb2
+ ]
+
+bb1: ; preds = %bb
+ unreachable
+
+bb2: ; preds = %bb
+ unreachable
+
+bb3: ; preds = %bb
+ switch i32 undef, label %bb5 [
+ i32 2, label %bb4
+ ]
+
+bb4: ; preds = %bb3
+ ret void
+
+bb5: ; preds = %bb3
+ unreachable
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/multilevel-break.ll b/test/CodeGen/AMDGPU/multilevel-break.ll
index 95c7ce862329..15de689b953e 100644
--- a/test/CodeGen/AMDGPU/multilevel-break.ll
+++ b/test/CodeGen/AMDGPU/multilevel-break.ll
@@ -64,7 +64,7 @@ ENDIF: ; preds = %LOOP
br i1 %tmp51, label %LOOP, label %LOOP.outer
}
-; OPT-LABEL: define void @multi_if_break_loop(
+; OPT-LABEL: define amdgpu_kernel void @multi_if_break_loop(
; OPT: llvm.amdgcn.break
; OPT: llvm.amdgcn.loop
; OPT: llvm.amdgcn.if.break
@@ -79,7 +79,7 @@ ENDIF: ; preds = %LOOP
; Uses a copy intsead of an or
; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]]
-define void @multi_if_break_loop(i32 %arg) #0 {
+define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
diff --git a/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/test/CodeGen/AMDGPU/nested-loop-conditions.ll
new file mode 100644
index 000000000000..672549c8ea63
--- /dev/null
+++ b/test/CodeGen/AMDGPU/nested-loop-conditions.ll
@@ -0,0 +1,269 @@
+; RUN: opt -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; After structurizing, there are 3 levels of loops. The i1 phi
+; conditions mutually depend on each other, so it isn't safe to delete
+; the condition that appears to have no uses until the loop is
+; completely processed.
+
+
+; IR-LABEL: @reduced_nested_loop_conditions(
+
+; IR: bb5:
+; IR-NEXT: %phi.broken = phi i64 [ %loop.phi, %bb10 ], [ 0, %bb ]
+; IR-NEXT: %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
+; IR-NEXT: %tmp7 = icmp eq i32 %tmp6, 1
+; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %tmp7)
+; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
+; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
+; IR-NEXT: br i1 %1, label %bb8, label %Flow
+
+; IR: bb8:
+; IR-NEXT: %3 = call i64 @llvm.amdgcn.break(i64 %phi.broken)
+; IR-NEXT: br label %bb13
+
+; IR: bb10:
+; IR-NEXT: %loop.phi = phi i64 [ %6, %Flow ]
+; IR-NEXT: %tmp11 = phi i32 [ %5, %Flow ]
+; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop(i64 %loop.phi)
+; IR-NEXT: br i1 %4, label %bb23, label %bb5
+
+; IR: Flow:
+; IR-NEXT: %loop.phi1 = phi i64 [ %loop.phi2, %bb4 ], [ %phi.broken, %bb5 ]
+; IR-NEXT: %5 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
+; IR-NEXT: %6 = call i64 @llvm.amdgcn.else.break(i64 %2, i64 %loop.phi1)
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %2)
+; IR-NEXT: br label %bb10
+
+; IR: bb13:
+; IR-NEXT: %loop.phi3 = phi i64 [ %loop.phi4, %bb3 ], [ %3, %bb8 ]
+; IR-NEXT: %tmp14 = phi i1 [ false, %bb3 ], [ true, %bb8 ]
+; IR-NEXT: %tmp15 = bitcast i64 %tmp2 to <2 x i32>
+; IR-NEXT: br i1 %tmp14, label %bb16, label %bb20
+
+; IR: bb16:
+; IR-NEXT: %tmp17 = extractelement <2 x i32> %tmp15, i64 1
+; IR-NEXT: %tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %tmp17
+; IR-NEXT: %tmp19 = load volatile i32, i32 addrspace(3)* %tmp18
+; IR-NEXT: br label %bb20
+
+; IR: bb20:
+; IR-NEXT: %loop.phi4 = phi i64 [ %phi.broken, %bb16 ], [ %phi.broken, %bb13 ]
+; IR-NEXT: %loop.phi2 = phi i64 [ %phi.broken, %bb16 ], [ %loop.phi3, %bb13 ]
+; IR-NEXT: %tmp21 = phi i32 [ %tmp19, %bb16 ], [ 0, %bb13 ]
+; IR-NEXT: br label %bb9
+
+; IR: bb23:
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi)
+; IR-NEXT: ret void
+
+; GCN-LABEL: {{^}}reduced_nested_loop_conditions:
+
+; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 1
+; GCN-NEXT: s_cbranch_scc1
+
+; FIXME: Should fold to unconditional branch?
+; GCN: s_mov_b64 vcc, -1
+; GCN-NEXT: ; implicit-def
+; GCN: s_cbranch_vccz
+
+; GCN: ds_read_b32
+
+; GCN: [[BB9:BB[0-9]+_[0-9]+]]: ; %bb9
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_branch [[BB9]]
+define amdgpu_kernel void @reduced_nested_loop_conditions(i64 addrspace(3)* nocapture %arg) #0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = getelementptr inbounds i64, i64 addrspace(3)* %arg, i32 %tmp
+ %tmp2 = load volatile i64, i64 addrspace(3)* %tmp1
+ br label %bb5
+
+bb3: ; preds = %bb9
+ br i1 true, label %bb4, label %bb13
+
+bb4: ; preds = %bb3
+ br label %bb10
+
+bb5: ; preds = %bb10, %bb
+ %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
+ %tmp7 = icmp eq i32 %tmp6, 1
+ br i1 %tmp7, label %bb8, label %bb10
+
+bb8: ; preds = %bb5
+ br label %bb13
+
+bb9: ; preds = %bb20, %bb9
+ br i1 false, label %bb3, label %bb9
+
+bb10: ; preds = %bb5, %bb4
+ %tmp11 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
+ %tmp12 = phi i1 [ %tmp22, %bb4 ], [ true, %bb5 ]
+ br i1 %tmp12, label %bb23, label %bb5
+
+bb13: ; preds = %bb8, %bb3
+ %tmp14 = phi i1 [ %tmp22, %bb3 ], [ true, %bb8 ]
+ %tmp15 = bitcast i64 %tmp2 to <2 x i32>
+ br i1 %tmp14, label %bb16, label %bb20
+
+bb16: ; preds = %bb13
+ %tmp17 = extractelement <2 x i32> %tmp15, i64 1
+ %tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %tmp17
+ %tmp19 = load volatile i32, i32 addrspace(3)* %tmp18
+ br label %bb20
+
+bb20: ; preds = %bb16, %bb13
+ %tmp21 = phi i32 [ %tmp19, %bb16 ], [ 0, %bb13 ]
+ %tmp22 = phi i1 [ false, %bb16 ], [ %tmp14, %bb13 ]
+ br label %bb9
+
+bb23: ; preds = %bb10
+ ret void
+}
+
+; Earlier version of above, before a run of the structurizer.
+; IR-LABEL: @nested_loop_conditions(
+
+; IR: Flow7:
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %17)
+; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %15)
+; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
+; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
+; IR-NEXT: br i1 %1, label %bb4.bb13_crit_edge, label %Flow8
+
+; IR: Flow1:
+; IR-NEXT: %loop.phi = phi i64 [ %loop.phi9, %Flow6 ], [ %phi.broken, %bb14 ]
+; IR-NEXT: %13 = phi <4 x i32> [ %29, %Flow6 ], [ undef, %bb14 ]
+; IR-NEXT: %14 = phi i32 [ %30, %Flow6 ], [ undef, %bb14 ]
+; IR-NEXT: %15 = phi i1 [ %31, %Flow6 ], [ false, %bb14 ]
+; IR-NEXT: %16 = phi i1 [ false, %Flow6 ], [ %8, %bb14 ]
+; IR-NEXT: %17 = call i64 @llvm.amdgcn.else.break(i64 %11, i64 %loop.phi)
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %11)
+; IR-NEXT: %18 = call i1 @llvm.amdgcn.loop(i64 %17)
+; IR-NEXT: br i1 %18, label %Flow7, label %bb14
+
+; IR: Flow2:
+; IR-NEXT: %loop.phi10 = phi i64 [ %loop.phi11, %Flow5 ], [ %12, %bb16 ]
+; IR-NEXT: %19 = phi <4 x i32> [ %29, %Flow5 ], [ undef, %bb16 ]
+; IR-NEXT: %20 = phi i32 [ %30, %Flow5 ], [ undef, %bb16 ]
+; IR-NEXT: %21 = phi i1 [ %31, %Flow5 ], [ false, %bb16 ]
+; IR-NEXT: %22 = phi i1 [ false, %Flow5 ], [ false, %bb16 ]
+; IR-NEXT: %23 = phi i1 [ false, %Flow5 ], [ %8, %bb16 ]
+; IR-NEXT: %24 = call { i1, i64 } @llvm.amdgcn.if(i1 %23)
+; IR-NEXT: %25 = extractvalue { i1, i64 } %24, 0
+; IR-NEXT: %26 = extractvalue { i1, i64 } %24, 1
+; IR-NEXT: br i1 %25, label %bb21, label %Flow3
+
+; IR: bb21:
+; IR: %tmp12 = icmp slt i32 %tmp11, 9
+; IR-NEXT: %27 = xor i1 %tmp12, true
+; IR-NEXT: %28 = call i64 @llvm.amdgcn.if.break(i1 %27, i64 %phi.broken)
+; IR-NEXT: br label %Flow3
+
+; IR: Flow3:
+; IR-NEXT: %loop.phi11 = phi i64 [ %phi.broken, %bb21 ], [ %phi.broken, %Flow2 ]
+; IR-NEXT: %loop.phi9 = phi i64 [ %28, %bb21 ], [ %loop.phi10, %Flow2 ]
+; IR-NEXT: %29 = phi <4 x i32> [ %tmp9, %bb21 ], [ %19, %Flow2 ]
+; IR-NEXT: %30 = phi i32 [ %tmp10, %bb21 ], [ %20, %Flow2 ]
+; IR-NEXT: %31 = phi i1 [ %27, %bb21 ], [ %21, %Flow2 ]
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %26)
+; IR-NEXT: br i1 %22, label %bb31.loopexit, label %Flow4
+
+; IR: bb31:
+; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %7)
+; IR-NEXT: store volatile i32 0, i32 addrspace(1)* undef
+; IR-NEXT: ret void
+
+
+; GCN-LABEL: {{^}}nested_loop_conditions:
+
+; GCN: v_cmp_lt_i32_e32 vcc, 8, v
+; GCN: s_and_b64 vcc, exec, vcc
+; GCN: s_cbranch_vccnz [[BB31:BB[0-9]+_[0-9]+]]
+
+; GCN: [[BB14:BB[0-9]+_[0-9]+]]: ; %bb14
+; GCN: v_cmp_ne_u32_e32 vcc, 1, v
+; GCN-NEXT: s_and_b64 vcc, exec, vcc
+; GCN-NEXT: s_cbranch_vccnz [[BB31]]
+
+; GCN: [[BB18:BB[0-9]+_[0-9]+]]: ; %bb18
+; GCN: buffer_load_dword
+; GCN: v_cmp_lt_i32_e32 vcc, 8, v
+; GCN-NEXT: s_and_b64 vcc, exec, vcc
+; GCN-NEXT: s_cbranch_vccnz [[BB18]]
+
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: v_cmp_gt_i32_e32 vcc, 9
+; GCN-NEXT: s_and_b64 vcc, exec, vcc
+; GCN-NEXT: s_cbranch_vccnz [[BB14]]
+
+; GCN: [[BB31]]:
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+define amdgpu_kernel void @nested_loop_conditions(i64 addrspace(1)* nocapture %arg) #0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp1 = zext i32 %tmp to i64
+ %tmp2 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp1
+ %tmp3 = load i64, i64 addrspace(1)* %tmp2, align 16
+ %tmp932 = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
+ %tmp1033 = extractelement <4 x i32> %tmp932, i64 0
+ %tmp1134 = load volatile i32, i32 addrspace(1)* undef
+ %tmp1235 = icmp slt i32 %tmp1134, 9
+ br i1 %tmp1235, label %bb14.lr.ph, label %bb13
+
+bb14.lr.ph: ; preds = %bb
+ br label %bb14
+
+bb4.bb13_crit_edge: ; preds = %bb21
+ br label %bb13
+
+bb13: ; preds = %bb4.bb13_crit_edge, %bb
+ br label %bb31
+
+bb14: ; preds = %bb21, %bb14.lr.ph
+ %tmp1037 = phi i32 [ %tmp1033, %bb14.lr.ph ], [ %tmp10, %bb21 ]
+ %tmp936 = phi <4 x i32> [ %tmp932, %bb14.lr.ph ], [ %tmp9, %bb21 ]
+ %tmp15 = icmp eq i32 %tmp1037, 1
+ br i1 %tmp15, label %bb16, label %bb31.loopexit
+
+bb16: ; preds = %bb14
+ %tmp17 = bitcast i64 %tmp3 to <2 x i32>
+ br label %bb18
+
+bb18: ; preds = %bb18, %bb16
+ %tmp19 = load volatile i32, i32 addrspace(1)* undef
+ %tmp20 = icmp slt i32 %tmp19, 9
+ br i1 %tmp20, label %bb21, label %bb18
+
+bb21: ; preds = %bb18
+ %tmp22 = extractelement <2 x i32> %tmp17, i64 1
+ %tmp23 = lshr i32 %tmp22, 16
+ %tmp24 = select i1 undef, i32 undef, i32 %tmp23
+ %tmp25 = uitofp i32 %tmp24 to float
+ %tmp26 = fmul float %tmp25, 0x3EF0001000000000
+ %tmp27 = fsub float %tmp26, undef
+ %tmp28 = fcmp olt float %tmp27, 5.000000e-01
+ %tmp29 = select i1 %tmp28, i64 1, i64 2
+ %tmp30 = extractelement <4 x i32> %tmp936, i64 %tmp29
+ %tmp7 = zext i32 %tmp30 to i64
+ %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 %tmp7
+ %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16
+ %tmp10 = extractelement <4 x i32> %tmp9, i64 0
+ %tmp11 = load volatile i32, i32 addrspace(1)* undef
+ %tmp12 = icmp slt i32 %tmp11, 9
+ br i1 %tmp12, label %bb14, label %bb4.bb13_crit_edge
+
+bb31.loopexit: ; preds = %bb14
+ br label %bb31
+
+bb31: ; preds = %bb31.loopexit, %bb13
+ store volatile i32 0, i32 addrspace(1)* undef
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll b/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll
index 9dd99efd997c..97dc67f82607 100644
--- a/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll
+++ b/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll
@@ -9,7 +9,7 @@
@extern_const_addrspace = external unnamed_addr addrspace(2) constant [5 x i32], align 4
; CHECK-DAG: Name: load_extern_const_init
-define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
%val = load i32, i32 addrspace(2)* getelementptr ([5 x i32], [5 x i32] addrspace(2)* @extern_const_addrspace, i64 0, i64 3), align 4
store i32 %val, i32 addrspace(1)* %out, align 4
ret void
@@ -19,7 +19,7 @@ define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
@undef_const_addrspace = unnamed_addr addrspace(2) constant [5 x i32] undef, align 4
; CHECK-DAG: Name: undef_const_addrspace
-define void @load_undef_const_init(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @load_undef_const_init(i32 addrspace(1)* %out) nounwind {
%val = load i32, i32 addrspace(2)* getelementptr ([5 x i32], [5 x i32] addrspace(2)* @undef_const_addrspace, i64 0, i64 3), align 4
store i32 %val, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/no-shrink-extloads.ll b/test/CodeGen/AMDGPU/no-shrink-extloads.ll
index fd66b0b5d1f6..8a7bf6db5b8d 100644
--- a/test/CodeGen/AMDGPU/no-shrink-extloads.ll
+++ b/test/CodeGen/AMDGPU/no-shrink-extloads.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; FUNC-LABEL: {{^}}truncate_kernarg_i32_to_i16:
; SI: s_load_dword s
; SI: buffer_store_short v
-define void @truncate_kernarg_i32_to_i16(i16 addrspace(1)* %out, i32 %arg) nounwind {
+define amdgpu_kernel void @truncate_kernarg_i32_to_i16(i16 addrspace(1)* %out, i32 %arg) nounwind {
%trunc = trunc i32 %arg to i16
store i16 %trunc, i16 addrspace(1)* %out
ret void
@@ -21,7 +21,7 @@ define void @truncate_kernarg_i32_to_i16(i16 addrspace(1)* %out, i32 %arg) nounw
; FUNC-LABEL: {{^}}truncate_buffer_load_i32_to_i16:
; SI: buffer_load_dword v
; SI: buffer_store_short v
-define void @truncate_buffer_load_i32_to_i16(i16 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @truncate_buffer_load_i32_to_i16(i16 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
@@ -34,7 +34,7 @@ define void @truncate_buffer_load_i32_to_i16(i16 addrspace(1)* %out, i32 addrspa
; FUNC-LABEL: {{^}}truncate_kernarg_i32_to_i8:
; SI: s_load_dword s
; SI: buffer_store_byte v
-define void @truncate_kernarg_i32_to_i8(i8 addrspace(1)* %out, i32 %arg) nounwind {
+define amdgpu_kernel void @truncate_kernarg_i32_to_i8(i8 addrspace(1)* %out, i32 %arg) nounwind {
%trunc = trunc i32 %arg to i8
store i8 %trunc, i8 addrspace(1)* %out
ret void
@@ -43,7 +43,7 @@ define void @truncate_kernarg_i32_to_i8(i8 addrspace(1)* %out, i32 %arg) nounwin
; FUNC-LABEL: {{^}}truncate_buffer_load_i32_to_i8:
; SI: buffer_load_dword v
; SI: buffer_store_byte v
-define void @truncate_buffer_load_i32_to_i8(i8 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @truncate_buffer_load_i32_to_i8(i8 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
@@ -56,7 +56,7 @@ define void @truncate_buffer_load_i32_to_i8(i8 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}truncate_kernarg_i32_to_i1:
; SI: s_load_dword s
; SI: buffer_store_byte v
-define void @truncate_kernarg_i32_to_i1(i1 addrspace(1)* %out, i32 %arg) nounwind {
+define amdgpu_kernel void @truncate_kernarg_i32_to_i1(i1 addrspace(1)* %out, i32 %arg) nounwind {
%trunc = trunc i32 %arg to i1
store i1 %trunc, i1 addrspace(1)* %out
ret void
@@ -65,7 +65,7 @@ define void @truncate_kernarg_i32_to_i1(i1 addrspace(1)* %out, i32 %arg) nounwin
; FUNC-LABEL: {{^}}truncate_buffer_load_i32_to_i1:
; SI: buffer_load_dword v
; SI: buffer_store_byte v
-define void @truncate_buffer_load_i32_to_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @truncate_buffer_load_i32_to_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i1, i1 addrspace(1)* %out, i32 %tid
@@ -78,7 +78,7 @@ define void @truncate_buffer_load_i32_to_i1(i1 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}truncate_kernarg_i64_to_i32:
; SI: s_load_dword s
; SI: buffer_store_dword v
-define void @truncate_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
+define amdgpu_kernel void @truncate_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
%trunc = trunc i64 %arg to i32
store i32 %trunc, i32 addrspace(1)* %out
ret void
@@ -87,7 +87,7 @@ define void @truncate_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounw
; FUNC-LABEL: {{^}}truncate_buffer_load_i64_to_i32:
; SI: buffer_load_dword v
; SI: buffer_store_dword v
-define void @truncate_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @truncate_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -100,7 +100,7 @@ define void @truncate_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspa
; FUNC-LABEL: {{^}}srl_kernarg_i64_to_i32:
; SI: s_load_dword s
; SI: buffer_store_dword v
-define void @srl_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
+define amdgpu_kernel void @srl_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
%srl = lshr i64 %arg, 32
%trunc = trunc i64 %srl to i32
store i32 %trunc, i32 addrspace(1)* %out
@@ -110,7 +110,7 @@ define void @srl_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
; FUNC-LABEL: {{^}}srl_buffer_load_i64_to_i32:
; SI: buffer_load_dword v
; SI: buffer_store_dword v
-define void @srl_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @srl_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
@@ -125,7 +125,7 @@ define void @srl_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)
; FUNC-LABEL: {{^}}truncate_kernarg_i16_to_i8:
; SI: s_load_dword s
; SI: buffer_store_byte v
-define void @truncate_kernarg_i16_to_i8(i8 addrspace(1)* %out, i16 %arg) nounwind {
+define amdgpu_kernel void @truncate_kernarg_i16_to_i8(i8 addrspace(1)* %out, i16 %arg) nounwind {
%trunc = trunc i16 %arg to i8
store i8 %trunc, i8 addrspace(1)* %out
ret void
@@ -134,7 +134,7 @@ define void @truncate_kernarg_i16_to_i8(i8 addrspace(1)* %out, i16 %arg) nounwin
; FUNC-LABEL: {{^}}truncate_buffer_load_i16_to_i8:
; SI: buffer_load_ubyte v
; SI: buffer_store_byte v
-define void @truncate_buffer_load_i16_to_i8(i8 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @truncate_buffer_load_i16_to_i8(i8 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
@@ -147,7 +147,7 @@ define void @truncate_buffer_load_i16_to_i8(i8 addrspace(1)* %out, i16 addrspace
; FUNC-LABEL: {{^}}srl_kernarg_i64_to_i8:
; SI: s_load_dword s
; SI: buffer_store_byte v
-define void @srl_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
+define amdgpu_kernel void @srl_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
%srl = lshr i64 %arg, 32
%trunc = trunc i64 %srl to i8
store i8 %trunc, i8 addrspace(1)* %out
@@ -157,7 +157,7 @@ define void @srl_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
; FUNC-LABEL: {{^}}srl_buffer_load_i64_to_i8:
; SI: buffer_load_dword v
; SI: buffer_store_byte v
-define void @srl_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @srl_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
@@ -171,7 +171,7 @@ define void @srl_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}truncate_kernarg_i64_to_i8:
; SI: s_load_dword s
; SI: buffer_store_byte v
-define void @truncate_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
+define amdgpu_kernel void @truncate_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
%trunc = trunc i64 %arg to i8
store i8 %trunc, i8 addrspace(1)* %out
ret void
@@ -180,7 +180,7 @@ define void @truncate_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwin
; FUNC-LABEL: {{^}}truncate_buffer_load_i64_to_i8:
; SI: buffer_load_dword v
; SI: buffer_store_byte v
-define void @truncate_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @truncate_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
@@ -194,7 +194,7 @@ define void @truncate_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace
; SI: s_load_dword [[LOAD:s[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0x0
; SI: s_waitcnt lgkmcnt(0)
; SI: s_and_b32 s{{[0-9]+}}, [[LOAD]], 0xffff
-define void @smrd_mask_i32_to_i16(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+define amdgpu_kernel void @smrd_mask_i32_to_i16(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%val = load i32, i32 addrspace(2)* %in
%mask = and i32 %val, 65535
@@ -205,7 +205,7 @@ entry:
; FUNC-LABEL: {{^}}extract_hi_i64_bitcast_v2i32:
; SI: buffer_load_dword v
; SI: buffer_store_dword v
-define void @extract_hi_i64_bitcast_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @extract_hi_i64_bitcast_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
%ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
%bc = bitcast <2 x i32> %ld to i64
%hi = lshr i64 %bc, 32
diff --git a/test/CodeGen/AMDGPU/nop-data.ll b/test/CodeGen/AMDGPU/nop-data.ll
new file mode 100644
index 000000000000..b68f343097e5
--- /dev/null
+++ b/test/CodeGen/AMDGPU/nop-data.ll
@@ -0,0 +1,87 @@
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -filetype=obj < %s | llvm-objdump -d - -mcpu=fiji | FileCheck %s
+
+; CHECK: kernel0:
+; CHECK-NEXT: s_endpgm
+define amdgpu_kernel void @kernel0() align 256 {
+entry:
+ ret void
+}
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_nop 0 // 0000000001FC: BF800000
+
+; CHECK-NEXT: {{^$}}
+; CHECK-NEXT: kernel1:
+; CHECK-NEXT: s_endpgm
+define amdgpu_kernel void @kernel1(i32 addrspace(1)* addrspace(2)* %ptr.out) align 256 {
+entry:
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/nullptr.ll b/test/CodeGen/AMDGPU/nullptr.ll
new file mode 100644
index 000000000000..0df16da13562
--- /dev/null
+++ b/test/CodeGen/AMDGPU/nullptr.ll
@@ -0,0 +1,113 @@
+;RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s
+
+%struct.S = type { i32*, i32 addrspace(1)*, i32 addrspace(2)*, i32 addrspace(3)*, i32 addrspace(4)*, i32 addrspace(5)*}
+
+; CHECK-LABEL: nullptr_priv:
+; CHECK-NEXT: .long 0
+@nullptr_priv = global i32* addrspacecast (i32 addrspace(4)* null to i32*)
+
+; CHECK-LABEL: nullptr_glob:
+; CHECK-NEXT: .quad 0
+@nullptr_glob = global i32 addrspace(1)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(1)*)
+
+; CHECK-LABEL: nullptr_const:
+; CHECK-NEXT: .quad 0
+@nullptr_const = global i32 addrspace(2)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(2)*)
+
+; CHECK-LABEL: nullptr_local:
+; CHECK-NEXT: .long -1
+@nullptr_local = global i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
+
+; CHECK-LABEL: nullptr_region:
+; CHECK-NEXT: .long -1
+@nullptr_region = global i32 addrspace(5)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(5)*)
+
+; CHECK-LABEL: nullptr6:
+; CHECK-NEXT: .long 0
+@nullptr6 = global i32 addrspace(6)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(6)*)
+
+; CHECK-LABEL: nullptr7:
+; CHECK-NEXT: .long 0
+@nullptr7 = global i32 addrspace(7)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(7)*)
+
+; CHECK-LABEL: nullptr8:
+; CHECK-NEXT: .long 0
+@nullptr8 = global i32 addrspace(8)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(8)*)
+
+; CHECK-LABEL: nullptr9:
+; CHECK-NEXT: .long 0
+@nullptr9 = global i32 addrspace(9)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(9)*)
+
+; CHECK-LABEL: nullptr10:
+; CHECK-NEXT: .long 0
+@nullptr10 = global i32 addrspace(10)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(10)*)
+
+; CHECK-LABEL: nullptr11:
+; CHECK-NEXT: .long 0
+@nullptr11 = global i32 addrspace(11)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(11)*)
+
+; CHECK-LABEL: nullptr12:
+; CHECK-NEXT: .long 0
+@nullptr12 = global i32 addrspace(12)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(12)*)
+
+; CHECK-LABEL: nullptr13:
+; CHECK-NEXT: .long 0
+@nullptr13 = global i32 addrspace(13)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(13)*)
+
+; CHECK-LABEL: nullptr14:
+; CHECK-NEXT: .long 0
+@nullptr14 = global i32 addrspace(14)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(14)*)
+
+; CHECK-LABEL: nullptr15:
+; CHECK-NEXT: .long 0
+@nullptr15 = global i32 addrspace(15)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(15)*)
+
+; CHECK-LABEL: nullptr16:
+; CHECK-NEXT: .long 0
+@nullptr16 = global i32 addrspace(16)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(16)*)
+
+; CHECK-LABEL: nullptr17:
+; CHECK-NEXT: .long 0
+@nullptr17 = global i32 addrspace(17)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(17)*)
+
+; CHECK-LABEL: nullptr18:
+; CHECK-NEXT: .long 0
+@nullptr18 = global i32 addrspace(18)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(18)*)
+
+; CHECK-LABEL: nullptr19:
+; CHECK-NEXT: .long 0
+@nullptr19 = global i32 addrspace(19)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(19)*)
+
+; CHECK-LABEL: nullptr20:
+; CHECK-NEXT: .long 0
+@nullptr20 = global i32 addrspace(20)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(20)*)
+
+; CHECK-LABEL: nullptr21:
+; CHECK-NEXT: .long 0
+@nullptr21 = global i32 addrspace(21)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(21)*)
+
+; CHECK-LABEL: nullptr22:
+; CHECK-NEXT: .long 0
+@nullptr22 = global i32 addrspace(22)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(22)*)
+
+; CHECK-LABEL: nullptr23:
+; CHECK-NEXT: .long 0
+@nullptr23 = global i32 addrspace(23)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(23)*)
+
+; CHECK-LABEL: structWithPointers:
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .zero 4
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .long -1
+; CHECK-NEXT: .zero 4
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .long -1
+; CHECK-NEXT: .zero 4
+@structWithPointers = addrspace(1) global %struct.S {
+ i32* addrspacecast (i32 addrspace(4)* null to i32*),
+ i32 addrspace(1)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(1)*),
+ i32 addrspace(2)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(2)*),
+ i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*),
+ i32 addrspace(4)* null,
+ i32 addrspace(5)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(5)*)}, align 4
diff --git a/test/CodeGen/AMDGPU/omod.ll b/test/CodeGen/AMDGPU/omod.ll
new file mode 100644
index 000000000000..3fd7b13fcc58
--- /dev/null
+++ b/test/CodeGen/AMDGPU/omod.ll
@@ -0,0 +1,297 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; IEEE bit enabled for compute kernel, no shouldn't use.
+; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_signed_zeros:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_signed_zeros(float addrspace(1)* %out, float addrspace(1)* %aptr) #4 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 0.5
+ store float %div2, float addrspace(1)* %out.gep
+ ret void
+}
+
+; IEEE bit enabled for compute kernel, no shouldn't use even though nsz is allowed
+; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_nsz:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_nsz(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %a = load float, float addrspace(1)* %gep0
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 0.5
+ store float %div2, float addrspace(1)* %out.gep
+ ret void
+}
+
+; Only allow without IEEE bit if signed zeros are significant.
+; GCN-LABEL: {{^}}v_omod_div2_f32_signed_zeros:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_f32_signed_zeros(float %a) #4 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 0.5
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_div2_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 div:2{{$}}
+define amdgpu_ps void @v_omod_div2_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 0.5
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul2_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:2{{$}}
+define amdgpu_ps void @v_omod_mul2_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 2.0
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul4_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}}
+define amdgpu_ps void @v_omod_mul4_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 4.0
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul4_multi_use_f32:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 4.0, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_mul4_multi_use_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 4.0
+ store float %div2, float addrspace(1)* undef
+ store volatile float %add, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul4_dbg_use_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}}
+define amdgpu_ps void @v_omod_mul4_dbg_use_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ call void @llvm.dbg.value(metadata float %add, i64 0, metadata !4, metadata !9), !dbg !10
+ %div2 = fmul float %add, 4.0
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; Clamp is applied after omod, folding both into instruction is OK.
+; GCN-LABEL: {{^}}v_clamp_omod_div2_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 clamp div:2{{$}}
+define amdgpu_ps void @v_clamp_omod_div2_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 0.5
+
+ %max = call float @llvm.maxnum.f32(float %div2, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* undef
+ ret void
+}
+
+; Cannot fold omod into clamp
+; GCN-LABEL: {{^}}v_omod_div2_clamp_f32:
+; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 clamp{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_clamp_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ %div2 = fmul float %clamp, 0.5
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_div2_abs_src_f32:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ADD]]|, 0.5{{$}}
+define amdgpu_ps void @v_omod_div2_abs_src_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %abs.add = call float @llvm.fabs.f32(float %add)
+ %div2 = fmul float %abs.add, 0.5
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_self_clamp_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, v0 clamp{{$}}
+define amdgpu_ps void @v_omod_add_self_clamp_f32(float %a) #0 {
+ %add = fadd float %a, %a
+ %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ store float %clamp, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_clamp_self_f32:
+; GCN: v_max_f32_e64 [[CLAMP:v[0-9]+]], v0, v0 clamp{{$}}
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[CLAMP]], [[CLAMP]]{{$}}
+define amdgpu_ps void @v_omod_add_clamp_self_f32(float %a) #0 {
+ %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+ %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+ %add = fadd float %clamp, %clamp
+ store float %add, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_abs_self_f32:
+; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
+; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, |[[X]]|{{$}}
+define amdgpu_ps void @v_omod_add_abs_self_f32(float %a) #0 {
+ %x = fadd float %a, 1.0
+ %abs.x = call float @llvm.fabs.f32(float %x)
+ %add = fadd float %abs.x, %abs.x
+ store float %add, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_abs_x_x_f32:
+
+; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
+; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, [[X]]{{$}}
+define amdgpu_ps void @v_omod_add_abs_x_x_f32(float %a) #0 {
+ %x = fadd float %a, 1.0
+ %abs.x = call float @llvm.fabs.f32(float %x)
+ %add = fadd float %abs.x, %x
+ store float %add, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_x_abs_x_f32:
+; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
+; GCN: v_add_f32_e64 v{{[0-9]+}}, [[X]], |[[X]]|{{$}}
+define amdgpu_ps void @v_omod_add_x_abs_x_f32(float %a) #0 {
+ %x = fadd float %a, 1.0
+ %abs.x = call float @llvm.fabs.f32(float %x)
+ %add = fadd float %x, %abs.x
+ store float %add, float addrspace(1)* undef
+ ret void
+}
+
+; Don't fold omod into omod into another omod.
+; GCN-LABEL: {{^}}v_omod_div2_omod_div2_f32:
+; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_omod_div2_f32(float %a) #0 {
+ %add = fadd float %a, 1.0
+ %div2.0 = fmul float %add, 0.5
+ %div2.1 = fmul float %div2.0, 0.5
+ store float %div2.1, float addrspace(1)* undef
+ ret void
+}
+
+; Don't fold omod if denorms enabled
+; GCN-LABEL: {{^}}v_omod_div2_f32_denormals:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_f32_denormals(float %a) #2 {
+ %add = fadd float %a, 1.0
+ %div2 = fmul float %add, 0.5
+ store float %div2, float addrspace(1)* undef
+ ret void
+}
+
+; Don't fold omod if denorms enabled for add form.
+; GCN-LABEL: {{^}}v_omod_mul2_f32_denormals:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_mul2_f32_denormals(float %a) #2 {
+ %add = fadd float %a, 1.0
+ %mul2 = fadd float %add, %add
+ store float %mul2, float addrspace(1)* undef
+ ret void
+}
+
+; Don't fold omod if denorms enabled
+; GCN-LABEL: {{^}}v_omod_div2_f16_denormals:
+; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; VI: v_mul_f16_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_f16_denormals(half %a) #0 {
+ %add = fadd half %a, 1.0
+ %div2 = fmul half %add, 0.5
+ store half %div2, half addrspace(1)* undef
+ ret void
+}
+
+; Don't fold omod if denorms enabled for add form.
+; GCN-LABEL: {{^}}v_omod_mul2_f16_denormals:
+; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; VI: v_add_f16_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_mul2_f16_denormals(half %a) #0 {
+ %add = fadd half %a, 1.0
+ %mul2 = fadd half %add, %add
+ store half %mul2, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_div2_f16_no_denormals:
+; VI-NOT: v0
+; VI: v_add_f16_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}}
+define amdgpu_ps void @v_omod_div2_f16_no_denormals(half %a) #3 {
+ %add = fadd half %a, 1.0
+ %div2 = fmul half %add, 0.5
+ store half %div2, half addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mac_to_mad:
+; GCN: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}} mul:2{{$}}
+define amdgpu_ps void @v_omod_mac_to_mad(float %b, float %a) #0 {
+ %mul = fmul float %a, %a
+ %add = fadd float %mul, %b
+ %mad = fmul float %add, 2.0
+ %res = fmul float %mad, %b
+ store float %res, float addrspace(1)* undef
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.floor.f32(float) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1
+declare double @llvm.fabs.f64(double) #1
+declare double @llvm.minnum.f64(double, double) #1
+declare double @llvm.maxnum.f64(double, double) #1
+declare half @llvm.fabs.f16(half) #1
+declare half @llvm.minnum.f16(half, half) #1
+declare half @llvm.maxnum.f16(half, half) #1
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind "target-features"="+fp32-denormals" "no-signed-zeros-fp-math"="true" }
+attributes #3 = { nounwind "target-features"="-fp64-fp16-denormals" "no-signed-zeros-fp-math"="true" }
+attributes #4 = { nounwind "no-signed-zeros-fp-math"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug)
+!1 = !DIFile(filename: "/tmp/foo.cl", directory: "/dev/null")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !DILocalVariable(name: "add", arg: 1, scope: !5, file: !1, line: 1)
+!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{null, !8}
+!8 = !DIBasicType(name: "float", size: 32, align: 32)
+!9 = !DIExpression()
+!10 = !DILocation(line: 1, column: 42, scope: !5)
diff --git a/test/CodeGen/AMDGPU/opencl-image-metadata.ll b/test/CodeGen/AMDGPU/opencl-image-metadata.ll
index 0242f6d6145a..c974471c6573 100644
--- a/test/CodeGen/AMDGPU/opencl-image-metadata.ll
+++ b/test/CodeGen/AMDGPU/opencl-image-metadata.ll
@@ -6,7 +6,7 @@
; EG: CF_END
; SI: s_endpgm
-define void @kernel(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @kernel(i32 addrspace(1)* %out) {
entry:
store i32 0, i32 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/operand-folding.ll b/test/CodeGen/AMDGPU/operand-folding.ll
index 4e5ea4b86b77..3836a2b7e599 100644
--- a/test/CodeGen/AMDGPU/operand-folding.ll
+++ b/test/CodeGen/AMDGPU/operand-folding.ll
@@ -2,7 +2,7 @@
; CHECK-LABEL: {{^}}fold_sgpr:
; CHECK: v_add_i32_e32 v{{[0-9]+}}, vcc, s
-define void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) {
+define amdgpu_kernel void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) {
entry:
%tmp0 = icmp ne i32 %fold, 0
br i1 %tmp0, label %if, label %endif
@@ -20,7 +20,7 @@ endif:
; CHECK-LABEL: {{^}}fold_imm:
; CHECK: v_or_b32_e32 v{{[0-9]+}}, 5
-define void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) {
+define amdgpu_kernel void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) {
entry:
%fold = add i32 3, 2
%tmp0 = icmp ne i32 %cmp, 0
@@ -46,7 +46,7 @@ endif:
; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]]
; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}},
-define void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) {
+define amdgpu_kernel void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) {
entry:
%tmp0 = add i64 %val, 1
store i64 %tmp0, i64 addrspace(1)* %out
@@ -61,7 +61,7 @@ entry:
; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
-define void @vector_inline(<4 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @vector_inline(<4 x i32> addrspace(1)* %out) {
entry:
%tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = add i32 %tmp0, 1
@@ -80,7 +80,7 @@ entry:
; CHECK-LABEL: {{^}}imm_one_use:
; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}}
-define void @imm_one_use(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @imm_one_use(i32 addrspace(1)* %out) {
entry:
%tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = xor i32 %tmp0, 100
@@ -94,7 +94,7 @@ entry:
; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
-define void @vector_imm(<4 x i32> addrspace(1)* %out) {
+define amdgpu_kernel void @vector_imm(<4 x i32> addrspace(1)* %out) {
entry:
%tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = add i32 %tmp0, 1
@@ -114,7 +114,7 @@ entry:
; CHECK: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
; CHECK: v_mac_f32_e32 v[[LO]], 0x41200000, v[[HI]]
; CHECK: buffer_store_dword v[[LO]]
-define void @no_fold_tied_subregister() {
+define amdgpu_kernel void @no_fold_tied_subregister() {
%tmp1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
%tmp2 = extractelement <2 x float> %tmp1, i32 0
%tmp3 = extractelement <2 x float> %tmp1, i32 1
diff --git a/test/CodeGen/AMDGPU/operand-spacing.ll b/test/CodeGen/AMDGPU/operand-spacing.ll
index 127f3da220e7..fc6f070b737a 100644
--- a/test/CodeGen/AMDGPU/operand-spacing.ll
+++ b/test/CodeGen/AMDGPU/operand-spacing.ll
@@ -11,7 +11,7 @@
; GCN: v_mov_b32_e32 [[VREGB:v[0-9]+]], [[SREGB]]
; GCN: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SREGA]], [[VREGB]]
; GCN: buffer_store_dword [[RESULT]],
-define void @add_f32(float addrspace(1)* %out, float %a, float %b) {
+define amdgpu_kernel void @add_f32(float addrspace(1)* %out, float %a, float %b) {
%result = fadd float %a, %b
store float %result, float addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index 4584802ad5a7..2de6b59e59e9 100644
--- a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -3,7 +3,7 @@
--- |
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
- define void @optimize_if_and_saveexec_xor(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_and_saveexec_xor(i32 %z, i32 %v) #0 {
main_body:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%cc = icmp eq i32 %id, 0
@@ -23,7 +23,7 @@
ret void
}
- define void @optimize_if_and_saveexec(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_and_saveexec(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -34,7 +34,7 @@
ret void
}
- define void @optimize_if_or_saveexec(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_or_saveexec(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -46,7 +46,7 @@
}
- define void @optimize_if_and_saveexec_xor_valu_middle(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_and_saveexec_xor_valu_middle(i32 %z, i32 %v) #0 {
main_body:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%cc = icmp eq i32 %id, 0
@@ -67,7 +67,7 @@
ret void
}
- define void @optimize_if_and_saveexec_xor_wrong_reg(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_and_saveexec_xor_wrong_reg(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -78,7 +78,7 @@
ret void
}
- define void @optimize_if_and_saveexec_xor_modify_copy_to_exec(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_and_saveexec_xor_modify_copy_to_exec(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -89,7 +89,7 @@
ret void
}
- define void @optimize_if_and_saveexec_xor_live_out_setexec(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_and_saveexec_xor_live_out_setexec(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -100,7 +100,7 @@
ret void
}
- define void @optimize_if_unknown_saveexec(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_unknown_saveexec(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -111,7 +111,7 @@
ret void
}
- define void @optimize_if_andn2_saveexec(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_andn2_saveexec(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
@@ -122,7 +122,7 @@
ret void
}
- define void @optimize_if_andn2_saveexec_no_commute(i32 %z, i32 %v) #0 {
+ define amdgpu_kernel void @optimize_if_andn2_saveexec_no_commute(i32 %z, i32 %v) #0 {
main_body:
br i1 undef, label %if, label %end
diff --git a/test/CodeGen/AMDGPU/or.ll b/test/CodeGen/AMDGPU/or.ll
index eca6909d4eb9..eb082843fb82 100644
--- a/test/CodeGen/AMDGPU/or.ll
+++ b/test/CodeGen/AMDGPU/or.ll
@@ -9,7 +9,7 @@
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
%b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
@@ -28,7 +28,7 @@ define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in)
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
%b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
@@ -39,7 +39,7 @@ define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in)
; FUNC-LABEL: {{^}}scalar_or_i32:
; SI: s_or_b32
-define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%or = or i32 %a, %b
store i32 %or, i32 addrspace(1)* %out
ret void
@@ -47,7 +47,7 @@ define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
; FUNC-LABEL: {{^}}vector_or_i32:
; SI: v_or_b32_e32 v{{[0-9]}}
-define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
+define amdgpu_kernel void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
%loada = load i32, i32 addrspace(1)* %a
%or = or i32 %loada, %b
store i32 %or, i32 addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b)
; FUNC-LABEL: {{^}}scalar_or_literal_i32:
; SI: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x1869f
-define void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
%or = or i32 %a, 99999
store i32 %or, i32 addrspace(1)* %out, align 4
ret void
@@ -68,7 +68,7 @@ define void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
; SI-DAG: s_or_b32 s[[RES_LO:[0-9]+]], s[[LO]], 0x3039
; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_LO]]
; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_HI]]
-define void @scalar_or_literal_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_or_literal_i64(i64 addrspace(1)* %out, i64 %a) {
%or = or i64 %a, 4261135838621753
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -82,7 +82,7 @@ define void @scalar_or_literal_i64(i64 addrspace(1)* %out, i64 %a) {
; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s[[K_LO]]
; SI: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, s[[K_HI]]
-define void @scalar_or_literal_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @scalar_or_literal_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%or = or i64 %a, 4261135838621753
store i64 %or, i64 addrspace(1)* %out
@@ -101,7 +101,7 @@ define void @scalar_or_literal_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64
; SI: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[VAL_HI]]
; SI-NOT: or_b32
; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @scalar_or_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_or_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
%or = or i64 %a, 63
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -111,7 +111,7 @@ define void @scalar_or_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
; SI-NOT: or_b32
; SI: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, 63
; SI-NOT: or_b32
-define void @scalar_or_inline_imm_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @scalar_or_inline_imm_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%or = or i64 %a, 63
store i64 %or, i64 addrspace(1)* %out
%foo = add i64 %b, 63
@@ -125,7 +125,7 @@ define void @scalar_or_inline_imm_multi_use_i64(i64 addrspace(1)* %out, i64 %a,
; SI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], -1{{$}}
; SI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[VAL]]
; SI: buffer_store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
-define void @scalar_or_neg_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_or_neg_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
%or = or i64 %a, -8
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -133,7 +133,7 @@ define void @scalar_or_neg_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
; FUNC-LABEL: {{^}}vector_or_literal_i32:
; SI: v_or_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
-define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
%loada = load i32, i32 addrspace(1)* %a, align 4
%or = or i32 %loada, 65535
store i32 %or, i32 addrspace(1)* %out, align 4
@@ -142,7 +142,7 @@ define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a,
; FUNC-LABEL: {{^}}vector_or_inline_immediate_i32:
; SI: v_or_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
-define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
%loada = load i32, i32 addrspace(1)* %a, align 4
%or = or i32 %loada, 4
store i32 %or, i32 addrspace(1)* %out, align 4
@@ -154,7 +154,7 @@ define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspac
; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
; SI: s_or_b64
-define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%or = or i64 %a, %b
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -163,7 +163,7 @@ define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
; FUNC-LABEL: {{^}}vector_or_i64:
; SI: v_or_b32_e32 v{{[0-9]}}
; SI: v_or_b32_e32 v{{[0-9]}}
-define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%loadb = load i64, i64 addrspace(1)* %b, align 8
%or = or i64 %loada, %loadb
@@ -174,7 +174,7 @@ define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; FUNC-LABEL: {{^}}scalar_vector_or_i64:
; SI: v_or_b32_e32 v{{[0-9]}}
; SI: v_or_b32_e32 v{{[0-9]}}
-define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
+define amdgpu_kernel void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
%loada = load i64, i64 addrspace(1)* %a
%or = or i64 %loada, %b
store i64 %or, i64 addrspace(1)* %out
@@ -186,7 +186,7 @@ define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 0xdf77987f, v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 0x146f, v[[HI_VREG]]
; SI: s_endpgm
-define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%or = or i64 %loada, 22470723082367
store i64 %or, i64 addrspace(1)* %out
@@ -200,7 +200,7 @@ define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
; SI-NOT: v_or_b32_e32 {{v[0-9]+}}, 0
; SI: buffer_store_dwordx2 v{{\[}}[[LO_RESULT]]:[[HI_VREG]]{{\]}}
; SI: s_endpgm
-define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%or = or i64 %loada, 8
store i64 %or, i64 addrspace(1)* %out
@@ -213,7 +213,7 @@ define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64
; SI-DAG: v_mov_b32_e32 v[[RES_HI:[0-9]+]], -1{{$}}
; SI: buffer_store_dwordx2 v{{\[}}[[RES_LO]]:[[RES_HI]]{{\]}}
; SI: s_endpgm
-define void @vector_or_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%or = or i64 %loada, -8
store i64 %or, i64 addrspace(1)* %out
@@ -226,7 +226,7 @@ define void @vector_or_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace(
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 0xffffff38, v[[LO_VREG]]
; SI: buffer_store_dwordx2
; SI: s_endpgm
-define void @vector_or_i64_neg_literal(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_or_i64_neg_literal(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%or = or i64 %loada, -200
store i64 %or, i64 addrspace(1)* %out
@@ -239,7 +239,7 @@ define void @vector_or_i64_neg_literal(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: s_or_b32 s[[SRESULT:[0-9]+]], s[[SREG1]], s[[SREG0]]
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], s[[SRESULT]]
; SI: buffer_store_dword [[VRESULT]],
-define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
%add = or i64 %b, %a
%trunc = trunc i64 %add to i32
store i32 %trunc, i32 addrspace(1)* %out, align 8
@@ -250,7 +250,7 @@ define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
; EG: OR_INT * {{\** *}}T{{[0-9]+\.[XYZW], PS, PV\.[XYZW]}}
; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], vcc, s[{{[0-9]+:[0-9]+}}]
-define void @or_i1(i32 addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
+define amdgpu_kernel void @or_i1(i32 addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
%a = load float, float addrspace(1)* %in0
%b = load float, float addrspace(1)* %in1
%acmp = fcmp oge float %a, 0.000000e+00
@@ -263,7 +263,7 @@ define void @or_i1(i32 addrspace(1)* %out, float addrspace(1)* %in0, float addrs
; FUNC-LABEL: {{^}}s_or_i1:
; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], vcc, s[{{[0-9]+:[0-9]+}}]
-define void @s_or_i1(i1 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+define amdgpu_kernel void @s_or_i1(i1 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
%cmp0 = icmp eq i32 %a, %b
%cmp1 = icmp eq i32 %c, %d
%or = or i1 %cmp0, %cmp1
diff --git a/test/CodeGen/AMDGPU/over-max-lds-size.ll b/test/CodeGen/AMDGPU/over-max-lds-size.ll
index 32ad9aba04ed..57777e783c56 100644
--- a/test/CodeGen/AMDGPU/over-max-lds-size.ll
+++ b/test/CodeGen/AMDGPU/over-max-lds-size.ll
@@ -6,7 +6,7 @@
@huge = internal unnamed_addr addrspace(3) global [100000 x i32] undef, align 4
-define void @use_huge_lds() {
+define amdgpu_kernel void @use_huge_lds() {
entry:
%v0 = getelementptr inbounds [100000 x i32], [100000 x i32] addrspace(3)* @huge, i32 0, i32 0
store i32 0, i32 addrspace(3)* %v0
diff --git a/test/CodeGen/AMDGPU/pack.v2f16.ll b/test/CodeGen/AMDGPU/pack.v2f16.ll
new file mode 100644
index 000000000000..5a07f7ca6ae8
--- /dev/null
+++ b/test/CodeGen/AMDGPU/pack.v2f16.ll
@@ -0,0 +1,219 @@
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx901 -mattr=-flat-for-global,-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+
+
+; GCN-LABEL: {{^}}s_pack_v2f16:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9: s_pack_ll_b32_b16 [[PACKED:s[0-9]+]], [[VAL0]], [[VAL1]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @s_pack_v2f16(i32 addrspace(2)* %in0, i32 addrspace(2)* %in1) #0 {
+ %val0 = load volatile i32, i32 addrspace(2)* %in0
+ %val1 = load volatile i32, i32 addrspace(2)* %in1
+ %lo.i = trunc i32 %val0 to i16
+ %hi.i = trunc i32 %val1 to i16
+ %lo = bitcast i16 %lo.i to half
+ %hi = bitcast i16 %hi.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half %hi, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_pack_v2f16_imm_lo:
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9: s_pack_ll_b32_b16 [[PACKED:s[0-9]+]], 0x1234, [[VAL1]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @s_pack_v2f16_imm_lo(i32 addrspace(2)* %in1) #0 {
+ %val1 = load i32, i32 addrspace(2)* %in1
+ %hi.i = trunc i32 %val1 to i16
+ %hi = bitcast i16 %hi.i to half
+ %vec.0 = insertelement <2 x half> undef, half 0xH1234, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half %hi, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_pack_v2f16_imm_hi:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_pack_ll_b32_b16 [[PACKED:s[0-9]+]], [[VAL0]], 0x1234
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @s_pack_v2f16_imm_hi(i32 addrspace(2)* %in0) #0 {
+ %val0 = load i32, i32 addrspace(2)* %in0
+ %lo.i = trunc i32 %val0 to i16
+ %lo = bitcast i16 %lo.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half 0xH1234, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16:
+; GFX9: flat_load_dword [[VAL0:v[0-9]+]]
+; GFX9: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VAL0]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[ELT0]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16(i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %lo.i = trunc i32 %val0 to i16
+ %hi.i = trunc i32 %val1 to i16
+ %lo = bitcast i16 %lo.i to half
+ %hi = bitcast i16 %hi.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half %hi, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16_user:
+; GFX9: flat_load_dword [[VAL0:v[0-9]+]]
+; GFX9: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VAL0]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[ELT0]]
+
+; GFX9: v_add_i32_e32 v{{[0-9]+}}, vcc, 9, [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16_user(i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %lo.i = trunc i32 %val0 to i16
+ %hi.i = trunc i32 %val1 to i16
+ %lo = bitcast i16 %lo.i to half
+ %hi = bitcast i16 %hi.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half %hi, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ %foo = add i32 %vec.i32, 9
+ store volatile i32 %foo, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16_imm_lo:
+; GFX9-DAG: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1234{{$}}
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[K]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16_imm_lo(i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %hi.i = trunc i32 %val1 to i16
+ %hi = bitcast i16 %hi.i to half
+ %vec.0 = insertelement <2 x half> undef, half 0xH1234, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half %hi, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16_inline_imm_lo:
+; GFX9-DAG: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x4400{{$}}
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[K]]
+
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16_inline_imm_lo(i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %hi.i = trunc i32 %val1 to i16
+ %hi = bitcast i16 %hi.i to half
+ %vec.0 = insertelement <2 x half> undef, half 4.0, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half %hi, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16_imm_hi:
+; GFX9-DAG: flat_load_dword [[VAL0:v[0-9]+]]
+
+; GFX9-DAG: s_movk_i32 [[K:s[0-9]+]], 0x1234
+; GFX9: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xffff, [[VAL0]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[K]], 16, [[MASKED]]
+
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16_imm_hi(i32 addrspace(1)* %in0) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %lo.i = trunc i32 %val0 to i16
+ %lo = bitcast i16 %lo.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half 0xH1234, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16_inline_f16imm_hi:
+; GFX9-DAG: flat_load_dword [[VAL:v[0-9]+]]
+
+; GFX9-DAG: s_movk_i32 [[K:s[0-9]+]], 0x3c00
+; GFX9: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xffff, [[VAL]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[K]], 16, [[MASKED]]
+
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16_inline_f16imm_hi(i32 addrspace(1)* %in0) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %lo.i = trunc i32 %val0 to i16
+ %lo = bitcast i16 %lo.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half 1.0, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2f16_inline_imm_hi:
+; GFX9: flat_load_dword [[VAL:v[0-9]+]]
+
+; GFX9: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xffff, [[VAL]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], 64, 16, [[MASKED]]
+
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2f16_inline_imm_hi(i32 addrspace(1)* %in0) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %lo.i = trunc i32 %val0 to i16
+ %lo = bitcast i16 %lo.i to half
+ %vec.0 = insertelement <2 x half> undef, half %lo, i32 0
+ %vec.1 = insertelement <2 x half> %vec.0, half 0xH0040, i32 1
+ %vec.i32 = bitcast <2 x half> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/pack.v2i16.ll b/test/CodeGen/AMDGPU/pack.v2i16.ll
new file mode 100644
index 000000000000..8515fbc6dbae
--- /dev/null
+++ b/test/CodeGen/AMDGPU/pack.v2i16.ll
@@ -0,0 +1,181 @@
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx901 -mattr=-flat-for-global,-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+
+
+; GCN-LABEL: {{^}}s_pack_v2i16:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9: s_pack_ll_b32_b16 [[PACKED:s[0-9]+]], [[VAL0]], [[VAL1]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @s_pack_v2i16(i32 addrspace(2)* %in0, i32 addrspace(2)* %in1) #0 {
+ %val0 = load volatile i32, i32 addrspace(2)* %in0
+ %val1 = load volatile i32, i32 addrspace(2)* %in1
+ %lo = trunc i32 %val0 to i16
+ %hi = trunc i32 %val1 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 %lo, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 %hi, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_pack_v2i16_imm_lo:
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9: s_pack_ll_b32_b16 [[PACKED:s[0-9]+]], 0x1c8, [[VAL1]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @s_pack_v2i16_imm_lo(i32 addrspace(2)* %in1) #0 {
+ %val1 = load i32, i32 addrspace(2)* %in1
+ %hi = trunc i32 %val1 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 456, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 %hi, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_pack_v2i16_imm_hi:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_pack_ll_b32_b16 [[PACKED:s[0-9]+]], [[VAL0]], 0x1c8
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @s_pack_v2i16_imm_hi(i32 addrspace(2)* %in0) #0 {
+ %val0 = load i32, i32 addrspace(2)* %in0
+ %lo = trunc i32 %val0 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 %lo, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 456, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+
+ call void asm sideeffect "; use $0", "s"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2i16:
+; GFX9: flat_load_dword [[VAL0:v[0-9]+]]
+; GFX9: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xffff, [[VAL0]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[MASKED]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2i16(i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %lo = trunc i32 %val0 to i16
+ %hi = trunc i32 %val1 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 %lo, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 %hi, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2i16_user:
+; GFX9: flat_load_dword [[VAL0:v[0-9]+]]
+; GFX9: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xffff, [[VAL0]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[MASKED]]
+
+; GFX9: v_add_i32_e32 v{{[0-9]+}}, vcc, 9, [[PACKED]]
+define amdgpu_kernel void @v_pack_v2i16_user(i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %lo = trunc i32 %val0 to i16
+ %hi = trunc i32 %val1 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 %lo, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 %hi, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+ %foo = add i32 %vec.i32, 9
+ store volatile i32 %foo, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2i16_imm_lo:
+; GFX9-DAG: flat_load_dword [[VAL1:v[0-9]+]]
+; GFX9-DENORM-DAG: s_movk_i32 [[K:s[0-9]+]], 0x7b{{$}}
+
+; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x7b{{$}}
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[K]]
+
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2i16_imm_lo(i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %hi = trunc i32 %val1 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 123, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 %hi, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2i16_inline_imm_lo:
+; GFX9: flat_load_dword [[VAL1:v[0-9]+]]
+
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, 64
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2i16_inline_imm_lo(i32 addrspace(1)* %in1) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in1.gep = getelementptr inbounds i32, i32 addrspace(1)* %in1, i64 %tid.ext
+ %val1 = load volatile i32, i32 addrspace(1)* %in1.gep
+ %hi = trunc i32 %val1 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 64, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 %hi, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2i16_imm_hi:
+; GFX9-DAG: flat_load_dword [[VAL0:v[0-9]+]]
+
+; GFX9-DAG: s_movk_i32 [[K:s[0-9]+]], 0x7b{{$}}
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[K]], 16, [[VAL0]]
+
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2i16_imm_hi(i32 addrspace(1)* %in0) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %lo = trunc i32 %val0 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 %lo, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 123, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_pack_v2i16_inline_imm_hi:
+; GFX9: flat_load_dword [[VAL:v[0-9]+]]
+; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], 7, 16, [[VAL0]]
+; GFX9: ; use [[PACKED]]
+define amdgpu_kernel void @v_pack_v2i16_inline_imm_hi(i32 addrspace(1)* %in0) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in0.gep = getelementptr inbounds i32, i32 addrspace(1)* %in0, i64 %tid.ext
+ %val0 = load volatile i32, i32 addrspace(1)* %in0.gep
+ %lo = trunc i32 %val0 to i16
+ %vec.0 = insertelement <2 x i16> undef, i16 %lo, i32 0
+ %vec.1 = insertelement <2 x i16> %vec.0, i16 7, i32 1
+ %vec.i32 = bitcast <2 x i16> %vec.1 to i32
+ call void asm sideeffect "; use $0", "v"(i32 %vec.i32) #0
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/packetizer.ll b/test/CodeGen/AMDGPU/packetizer.ll
index 49a7c0df748f..1764d64c367f 100644
--- a/test/CodeGen/AMDGPU/packetizer.ll
+++ b/test/CodeGen/AMDGPU/packetizer.ll
@@ -7,7 +7,7 @@
; CHECK: BIT_ALIGN_INT T{{[0-9]}}.Z
; CHECK: BIT_ALIGN_INT * T{{[0-9]}}.W
-define void @test(i32 addrspace(1)* %out, i32 %x_arg, i32 %y_arg, i32 %z_arg, i32 %w_arg, i32 %e) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 %x_arg, i32 %y_arg, i32 %z_arg, i32 %w_arg, i32 %e) {
entry:
%shl = sub i32 32, %e
%x = add i32 %x_arg, 1
diff --git a/test/CodeGen/AMDGPU/parallelandifcollapse.ll b/test/CodeGen/AMDGPU/parallelandifcollapse.ll
index ea943a533c81..a90f200f79e3 100644
--- a/test/CodeGen/AMDGPU/parallelandifcollapse.ll
+++ b/test/CodeGen/AMDGPU/parallelandifcollapse.ll
@@ -11,7 +11,7 @@
; to do its transfomation, however now that we are using local memory for
; allocas, the transformation isn't happening.
-define void @_Z9chk1D_512v() #0 {
+define amdgpu_kernel void @_Z9chk1D_512v() #0 {
entry:
%a0 = alloca i32, align 4
%b0 = alloca i32, align 4
diff --git a/test/CodeGen/AMDGPU/parallelorifcollapse.ll b/test/CodeGen/AMDGPU/parallelorifcollapse.ll
index 1da1e91b8ab8..91116b0f65ea 100644
--- a/test/CodeGen/AMDGPU/parallelorifcollapse.ll
+++ b/test/CodeGen/AMDGPU/parallelorifcollapse.ll
@@ -12,7 +12,7 @@
; CHECK: OR_INT
; CHECK-NEXT: OR_INT
; CHECK-NEXT: OR_INT
-define void @_Z9chk1D_512v() #0 {
+define amdgpu_kernel void @_Z9chk1D_512v() #0 {
entry:
%a0 = alloca i32, align 4
%b0 = alloca i32, align 4
diff --git a/test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll b/test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll
new file mode 100644
index 000000000000..77d793201adc
--- /dev/null
+++ b/test/CodeGen/AMDGPU/partial-sgpr-to-vgpr-spills.ll
@@ -0,0 +1,638 @@
+; RUN: llc -O0 -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=VGPR -check-prefix=GCN %s
+
+; FIXME: we should disable sdwa peephole because dead-code elimination, that
+; runs after peephole, ruins this test (different register numbers)
+
+; Spill all SGPRs so multiple VGPRs are required for spilling all of them.
+
+; Ideally we only need 2 VGPRs for all spilling. The VGPRs are
+; allocated per-frame index, so it's possible to get up with more.
+
+; GCN-LABEL: {{^}}spill_sgprs_to_multiple_vgprs:
+
+; GCN: def s[8:15]
+; GCN: def s[16:23]
+; GCN: def s[24:31]
+; GCN: def s[32:39]
+; GCN: def s[40:47]
+; GCN: def s[48:55]
+; GCN: def s[56:63]
+; GCN: def s[64:71]
+; GCN: def s[72:79]
+; GCN: def s[80:87]
+; GCN: def s[88:95]
+
+; GCN: v_writelane_b32 v0, s8, 0
+; GCN-NEXT: v_writelane_b32 v0, s9, 1
+; GCN-NEXT: v_writelane_b32 v0, s10, 2
+; GCN-NEXT: v_writelane_b32 v0, s11, 3
+; GCN-NEXT: v_writelane_b32 v0, s12, 4
+; GCN-NEXT: v_writelane_b32 v0, s13, 5
+; GCN-NEXT: v_writelane_b32 v0, s14, 6
+; GCN-NEXT: v_writelane_b32 v0, s15, 7
+
+; GCN: def s{{\[}}[[TMP_LO:[0-9]+]]:[[TMP_HI:[0-9]+]]{{\]}}
+; GCN: v_writelane_b32 v0, s[[TMP_LO]], 8
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 9
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 10
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 11
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 12
+; GCN-NEXT: v_writelane_b32 v0, s13, 13
+; GCN-NEXT: v_writelane_b32 v0, s14, 14
+; GCN-NEXT: v_writelane_b32 v0, s[[TMP_HI]], 15
+
+; GCN: def s{{\[}}[[TMP_LO]]:[[TMP_HI]]{{\]}}
+; GCN: v_writelane_b32 v0, s[[TMP_LO]], 16
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 17
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 18
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 19
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 20
+; GCN-NEXT: v_writelane_b32 v0, s13, 21
+; GCN-NEXT: v_writelane_b32 v0, s14, 22
+; GCN-NEXT: v_writelane_b32 v0, s[[TMP_HI]], 23
+
+; GCN: def s{{\[}}[[TMP_LO]]:[[TMP_HI]]{{\]}}
+; GCN: v_writelane_b32 v0, s[[TMP_LO]], 24
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 25
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 26
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 27
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 28
+; GCN-NEXT: v_writelane_b32 v0, s13, 29
+; GCN-NEXT: v_writelane_b32 v0, s14, 30
+; GCN-NEXT: v_writelane_b32 v0, s[[TMP_HI]], 31
+
+; GCN: def s{{\[}}[[TMP_LO]]:[[TMP_HI]]{{\]}}
+; GCN: v_writelane_b32 v0, s[[TMP_LO]], 32
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 33
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 34
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 35
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 36
+; GCN-NEXT: v_writelane_b32 v0, s13, 37
+; GCN-NEXT: v_writelane_b32 v0, s14, 38
+; GCN-NEXT: v_writelane_b32 v0, s[[TMP_HI]], 39
+
+; GCN: def s{{\[}}[[TMP_LO]]:[[TMP_HI]]{{\]}}
+; GCN: v_writelane_b32 v0, s[[TMP_LO]], 40
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 41
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 42
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 43
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 44
+; GCN-NEXT: v_writelane_b32 v0, s13, 45
+; GCN-NEXT: v_writelane_b32 v0, s14, 46
+; GCN-NEXT: v_writelane_b32 v0, s[[TMP_HI]], 47
+
+; GCN: def s{{\[}}[[TMP_LO]]:[[TMP_HI]]{{\]}}
+; GCN: v_writelane_b32 v0, s[[TMP_LO]], 48
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 49
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 50
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 51
+; GCN-NEXT: v_writelane_b32 v0, s{{[0-9]+}}, 52
+; GCN-NEXT: v_writelane_b32 v0, s13, 53
+; GCN-NEXT: v_writelane_b32 v0, s14, 54
+; GCN-NEXT: v_writelane_b32 v0, s[[TMP_HI]], 55
+
+; GCN-NEXT: v_writelane_b32 v0, s88, 56
+; GCN-NEXT: v_writelane_b32 v0, s89, 57
+; GCN-NEXT: v_writelane_b32 v0, s90, 58
+; GCN-NEXT: v_writelane_b32 v0, s91, 59
+; GCN-NEXT: v_writelane_b32 v0, s92, 60
+; GCN-NEXT: v_writelane_b32 v0, s93, 61
+; GCN-NEXT: v_writelane_b32 v0, s94, 62
+; GCN-NEXT: v_writelane_b32 v0, s95, 63
+; GCN-NEXT: v_writelane_b32 v1, s16, 0
+; GCN-NEXT: v_writelane_b32 v1, s17, 1
+; GCN-NEXT: v_writelane_b32 v1, s18, 2
+; GCN-NEXT: v_writelane_b32 v1, s19, 3
+; GCN-NEXT: v_writelane_b32 v1, s20, 4
+; GCN-NEXT: v_writelane_b32 v1, s21, 5
+; GCN-NEXT: v_writelane_b32 v1, s22, 6
+; GCN-NEXT: v_writelane_b32 v1, s23, 7
+; GCN-NEXT: v_writelane_b32 v1, s24, 8
+; GCN-NEXT: v_writelane_b32 v1, s25, 9
+; GCN-NEXT: v_writelane_b32 v1, s26, 10
+; GCN-NEXT: v_writelane_b32 v1, s27, 11
+; GCN-NEXT: v_writelane_b32 v1, s28, 12
+; GCN-NEXT: v_writelane_b32 v1, s29, 13
+; GCN-NEXT: v_writelane_b32 v1, s30, 14
+; GCN-NEXT: v_writelane_b32 v1, s31, 15
+; GCN-NEXT: v_writelane_b32 v1, s32, 16
+; GCN-NEXT: v_writelane_b32 v1, s33, 17
+; GCN-NEXT: v_writelane_b32 v1, s34, 18
+; GCN-NEXT: v_writelane_b32 v1, s35, 19
+; GCN-NEXT: v_writelane_b32 v1, s36, 20
+; GCN-NEXT: v_writelane_b32 v1, s37, 21
+; GCN-NEXT: v_writelane_b32 v1, s38, 22
+; GCN-NEXT: v_writelane_b32 v1, s39, 23
+; GCN-NEXT: v_writelane_b32 v1, s40, 24
+; GCN-NEXT: v_writelane_b32 v1, s41, 25
+; GCN-NEXT: v_writelane_b32 v1, s42, 26
+; GCN-NEXT: v_writelane_b32 v1, s43, 27
+; GCN-NEXT: v_writelane_b32 v1, s44, 28
+; GCN-NEXT: v_writelane_b32 v1, s45, 29
+; GCN-NEXT: v_writelane_b32 v1, s46, 30
+; GCN-NEXT: v_writelane_b32 v1, s47, 31
+; GCN-NEXT: v_writelane_b32 v1, s48, 32
+; GCN-NEXT: v_writelane_b32 v1, s49, 33
+; GCN-NEXT: v_writelane_b32 v1, s50, 34
+; GCN-NEXT: v_writelane_b32 v1, s51, 35
+; GCN-NEXT: v_writelane_b32 v1, s52, 36
+; GCN-NEXT: v_writelane_b32 v1, s53, 37
+; GCN-NEXT: v_writelane_b32 v1, s54, 38
+; GCN-NEXT: v_writelane_b32 v1, s55, 39
+; GCN-NEXT: v_writelane_b32 v1, s56, 40
+; GCN-NEXT: v_writelane_b32 v1, s57, 41
+; GCN-NEXT: v_writelane_b32 v1, s58, 42
+; GCN-NEXT: v_writelane_b32 v1, s59, 43
+; GCN-NEXT: v_writelane_b32 v1, s60, 44
+; GCN-NEXT: v_writelane_b32 v1, s61, 45
+; GCN-NEXT: v_writelane_b32 v1, s62, 46
+; GCN-NEXT: v_writelane_b32 v1, s63, 47
+; GCN-NEXT: v_writelane_b32 v1, s64, 48
+; GCN-NEXT: v_writelane_b32 v1, s65, 49
+; GCN-NEXT: v_writelane_b32 v1, s66, 50
+; GCN-NEXT: v_writelane_b32 v1, s67, 51
+; GCN-NEXT: v_writelane_b32 v1, s68, 52
+; GCN-NEXT: v_writelane_b32 v1, s69, 53
+; GCN-NEXT: v_writelane_b32 v1, s70, 54
+; GCN-NEXT: v_writelane_b32 v1, s71, 55
+; GCN-NEXT: v_writelane_b32 v1, s72, 56
+; GCN-NEXT: v_writelane_b32 v1, s73, 57
+; GCN-NEXT: v_writelane_b32 v1, s74, 58
+; GCN-NEXT: v_writelane_b32 v1, s75, 59
+; GCN-NEXT: v_writelane_b32 v1, s76, 60
+; GCN-NEXT: v_writelane_b32 v1, s77, 61
+; GCN-NEXT: v_writelane_b32 v1, s78, 62
+; GCN-NEXT: v_writelane_b32 v1, s79, 63
+; GCN-NEXT: v_writelane_b32 v2, s80, 0
+; GCN-NEXT: v_writelane_b32 v2, s81, 1
+; GCN-NEXT: v_writelane_b32 v2, s82, 2
+; GCN-NEXT: v_writelane_b32 v2, s83, 3
+; GCN-NEXT: v_writelane_b32 v2, s84, 4
+; GCN-NEXT: v_writelane_b32 v2, s85, 5
+; GCN-NEXT: v_writelane_b32 v2, s86, 6
+; GCN-NEXT: v_writelane_b32 v2, s87, 7
+; GCN: s_cbranch_scc1
+
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO:[0-9]+]], v0, 0
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 1
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 2
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 3
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 4
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 5
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 6
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI:[0-9]+]], v0, 7
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 0
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 1
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 2
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 3
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 4
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 5
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 6
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 7
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 8
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 9
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 10
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 11
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 12
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 13
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 14
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 15
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 16
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 17
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 18
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 19
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 20
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 21
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 22
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 23
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 24
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 25
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 26
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 27
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 28
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 29
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 30
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 31
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 32
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 33
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 34
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 35
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 36
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 37
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 38
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 39
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 40
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 41
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 42
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 43
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 44
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 45
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 46
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 47
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 48
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 49
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 50
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 51
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 52
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 53
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 54
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 55
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO]], v1, 56
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 57
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 58
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 59
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 60
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 61
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v1, 62
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI]], v1, 63
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v2, 0
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 1
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 2
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 3
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 4
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 5
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 6
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v2, 7
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 56
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 57
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 58
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 59
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 60
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 61
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 62
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 63
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 8
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 9
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 10
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 11
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 12
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 13
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 14
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 15
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 16
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 17
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 18
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 19
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 20
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 21
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 22
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 23
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 24
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 25
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 26
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 27
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 28
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 29
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 30
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 31
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 32
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 33
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 34
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 35
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 36
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 37
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 38
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 39
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 40
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 41
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 42
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 43
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 44
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 45
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 46
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 47
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s{{[0-9]+}}, v0, 48
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 49
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 50
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 51
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 52
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 53
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 54
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v0, 55
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+define amdgpu_kernel void @spill_sgprs_to_multiple_vgprs(i32 addrspace(1)* %out, i32 %in) #0 {
+ %wide.sgpr0 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr1 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr2 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr3 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr4 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr5 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr6 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr7 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr8 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr9 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr10 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr11 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr12 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr13 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr14 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr15 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr16 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %cmp = icmp eq i32 %in, 0
+ br i1 %cmp, label %bb0, label %ret
+
+bb0:
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr0) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr1) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr2) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr3) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr4) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr5) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr6) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr7) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr8) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr9) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr10) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr11) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr12) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr13) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr14) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr15) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr16) #0
+ br label %ret
+
+ret:
+ ret void
+}
+
+; Some of the lanes of an SGPR spill are in one VGPR and some forced
+; into the next available VGPR.
+
+; GCN-LABEL: {{^}}split_sgpr_spill_2_vgprs:
+; GCN: def s[24:39]
+
+; GCN: v_writelane_b32 v0, s24, 50
+; GCN-NEXT: v_writelane_b32 v0, s25, 51
+; GCN-NEXT: v_writelane_b32 v0, s26, 52
+; GCN-NEXT: v_writelane_b32 v0, s27, 53
+; GCN-NEXT: v_writelane_b32 v0, s28, 54
+; GCN-NEXT: v_writelane_b32 v0, s29, 55
+; GCN-NEXT: v_writelane_b32 v0, s30, 56
+; GCN-NEXT: v_writelane_b32 v0, s31, 57
+; GCN-NEXT: v_writelane_b32 v0, s32, 58
+; GCN-NEXT: v_writelane_b32 v0, s33, 59
+; GCN-NEXT: v_writelane_b32 v0, s34, 60
+; GCN-NEXT: v_writelane_b32 v0, s35, 61
+; GCN-NEXT: v_writelane_b32 v0, s36, 62
+; GCN-NEXT: v_writelane_b32 v0, s37, 63
+; GCN-NEXT: v_writelane_b32 v1, s38, 0
+; GCN-NEXT: v_writelane_b32 v1, s39, 1
+
+; GCN: v_readlane_b32 s4, v0, 50
+; GCN-NEXT: v_readlane_b32 s5, v0, 51
+; GCN-NEXT: v_readlane_b32 s6, v0, 52
+; GCN-NEXT: v_readlane_b32 s7, v0, 53
+; GCN-NEXT: v_readlane_b32 s8, v0, 54
+; GCN-NEXT: v_readlane_b32 s9, v0, 55
+; GCN-NEXT: v_readlane_b32 s10, v0, 56
+; GCN-NEXT: v_readlane_b32 s11, v0, 57
+; GCN-NEXT: v_readlane_b32 s12, v0, 58
+; GCN-NEXT: v_readlane_b32 s13, v0, 59
+; GCN-NEXT: v_readlane_b32 s14, v0, 60
+; GCN-NEXT: v_readlane_b32 s15, v0, 61
+; GCN-NEXT: v_readlane_b32 s16, v0, 62
+; GCN-NEXT: v_readlane_b32 s17, v0, 63
+; GCN-NEXT: v_readlane_b32 s18, v1, 0
+; GCN-NEXT: v_readlane_b32 s19, v1, 1
+define amdgpu_kernel void @split_sgpr_spill_2_vgprs(i32 addrspace(1)* %out, i32 %in) #1 {
+ %wide.sgpr0 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr1 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr2 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr5 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr3 = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr4 = call <2 x i32> asm sideeffect "; def $0", "=s" () #0
+
+ %cmp = icmp eq i32 %in, 0
+ br i1 %cmp, label %bb0, label %ret
+
+bb0:
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr0) #0
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr1) #0
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr2) #0
+ call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr3) #0
+ call void asm sideeffect "; use $0", "s"(<2 x i32> %wide.sgpr4) #0
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr5) #0
+ br label %ret
+
+ret:
+ ret void
+}
+
+; The first 64 SGPR spills can go to a VGPR, but there isn't a second
+; so some spills must be to memory. The last 16 element spill runs out of lanes at the 15th element.
+
+; GCN-LABEL: {{^}}no_vgprs_last_sgpr_spill:
+
+; GCN: v_writelane_b32 v23, s{{[0-9]+}}, 0
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 1
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 2
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 3
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 4
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 5
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 6
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 7
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 8
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 9
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 10
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 11
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 12
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 13
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 14
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 15
+
+; GCN: v_writelane_b32 v23, s{{[0-9]+}}, 16
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 17
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 18
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 19
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 20
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 21
+; GCN-NEXT: v_writelane_b32 v23, s{{[0-9]+}}, 22
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 23
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 24
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 25
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 26
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 27
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 28
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 29
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 30
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 31
+
+; GCN: def s[0:1]
+; GCN: v_writelane_b32 v23, s0, 32
+; GCN-NEXT: v_writelane_b32 v23, s1, 33
+
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 34
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 35
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 36
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 37
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 38
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 39
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 40
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 41
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 42
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 43
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 44
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 45
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 46
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 47
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 48
+; GCN-NEXT: v_writelane_b32 v23, s{{[[0-9]+}}, 49
+
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: s_cbranch_scc1
+
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO:[0-9]+]], v23, 0
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 1
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 2
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 3
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 4
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 5
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 6
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 7
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 8
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 9
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 10
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 11
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 12
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 13
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 14
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI:[0-9]+]], v23, 15
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO:[0-9]+]], v23, 34
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 35
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 36
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 37
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 38
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 39
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 40
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 41
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 42
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 43
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 44
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 45
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 46
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 47
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 48
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI:[0-9]+]], v23, 49
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: v_readlane_b32 s[[USE_TMP_LO:[0-9]+]], v23, 16
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 17
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 18
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 19
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 20
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 21
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 22
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 23
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 24
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 25
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 26
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 27
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 28
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 29
+; GCN-NEXT: v_readlane_b32 s{{[0-9]+}}, v23, 30
+; GCN-NEXT: v_readlane_b32 s[[USE_TMP_HI:[0-9]+]], v23, 31
+; GCN: ; use s{{\[}}[[USE_TMP_LO]]:[[USE_TMP_HI]]{{\]}}
+
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+
+; GCN: v_readlane_b32 s0, v23, 32
+; GCN: v_readlane_b32 s1, v23, 33
+; GCN: ;;#ASMSTART
+; GCN: ; use s[0:1]
+define amdgpu_kernel void @no_vgprs_last_sgpr_spill(i32 addrspace(1)* %out, i32 %in) #1 {
+ call void asm sideeffect "", "~{VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7}" () #0
+ call void asm sideeffect "", "~{VGPR8_VGPR9_VGPR10_VGPR11_VGPR12_VGPR13_VGPR14_VGPR15}" () #0
+ call void asm sideeffect "", "~{VGPR16_VGPR17_VGPR18_VGPR19}"() #0
+ call void asm sideeffect "", "~{VGPR20_VGPR21}"() #0
+ call void asm sideeffect "", "~{VGPR22}"() #0
+
+ %wide.sgpr0 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr1 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr2 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr3 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
+ %wide.sgpr4 = call <2 x i32> asm sideeffect "; def $0", "=s" () #0
+ %cmp = icmp eq i32 %in, 0
+ br i1 %cmp, label %bb0, label %ret
+
+bb0:
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr0) #0
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr1) #0
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr2) #0
+ call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr3) #0
+ call void asm sideeffect "; use $0", "s"(<2 x i32> %wide.sgpr4) #0
+ br label %ret
+
+ret:
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "amdgpu-waves-per-eu"="10,10" }
diff --git a/test/CodeGen/AMDGPU/partially-dead-super-register-immediate.ll b/test/CodeGen/AMDGPU/partially-dead-super-register-immediate.ll
index 3e0d36978ad4..4bcfe5f3d28c 100644
--- a/test/CodeGen/AMDGPU/partially-dead-super-register-immediate.ll
+++ b/test/CodeGen/AMDGPU/partially-dead-super-register-immediate.ll
@@ -10,7 +10,7 @@
declare i32 @llvm.amdgcn.workitem.id.x() #1
-define void @dead_def_subregister(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @dead_def_subregister(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%val = load i64, i64 addrspace(1)* %in.gep
diff --git a/test/CodeGen/AMDGPU/predicates.ll b/test/CodeGen/AMDGPU/predicates.ll
index c1af815c7b1e..566b48eb8864 100644
--- a/test/CodeGen/AMDGPU/predicates.ll
+++ b/test/CodeGen/AMDGPU/predicates.ll
@@ -6,7 +6,7 @@
; CHECK-LABEL: {{^}}simple_if:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
-define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @simple_if(i32 addrspace(1)* %out, i32 %in) {
entry:
%cmp0 = icmp sgt i32 %in, 0
br i1 %cmp0, label %IF, label %ENDIF
@@ -25,7 +25,7 @@ ENDIF:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
-define void @simple_if_else(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @simple_if_else(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
br i1 %0, label %IF, label %ELSE
@@ -51,7 +51,7 @@ ENDIF:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Exec
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
-define void @nested_if(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @nested_if(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
br i1 %0, label %IF0, label %ENDIF
@@ -79,7 +79,7 @@ ENDIF:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
-define void @nested_if_else(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @nested_if_else(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
br i1 %0, label %IF0, label %ENDIF
diff --git a/test/CodeGen/AMDGPU/private-access-no-objects.ll b/test/CodeGen/AMDGPU/private-access-no-objects.ll
index 2894730eccb1..af2683510293 100644
--- a/test/CodeGen/AMDGPU/private-access-no-objects.ll
+++ b/test/CodeGen/AMDGPU/private-access-no-objects.ll
@@ -18,7 +18,7 @@
; OPTNONE-NOT: s_mov_b32
; OPTNONE: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s7 offen{{$}}
-define void @store_to_undef() #0 {
+define amdgpu_kernel void @store_to_undef() #0 {
store volatile i32 0, i32* undef
ret void
}
@@ -28,7 +28,7 @@ define void @store_to_undef() #0 {
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
-define void @store_to_inttoptr() #0 {
+define amdgpu_kernel void @store_to_inttoptr() #0 {
store volatile i32 0, i32* inttoptr (i32 123 to i32*)
ret void
}
@@ -38,7 +38,7 @@ define void @store_to_inttoptr() #0 {
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
-define void @load_from_undef() #0 {
+define amdgpu_kernel void @load_from_undef() #0 {
%ld = load volatile i32, i32* undef
ret void
}
@@ -48,7 +48,7 @@ define void @load_from_undef() #0 {
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
-define void @load_from_inttoptr() #0 {
+define amdgpu_kernel void @load_from_inttoptr() #0 {
%ld = load volatile i32, i32* inttoptr (i32 123 to i32*)
ret void
}
diff --git a/test/CodeGen/AMDGPU/private-element-size.ll b/test/CodeGen/AMDGPU/private-element-size.ll
index de9a8f755122..f80543079701 100644
--- a/test/CodeGen/AMDGPU/private-element-size.ll
+++ b/test/CodeGen/AMDGPU/private-element-size.ll
@@ -10,33 +10,33 @@
; HSA-ELT4: private_element_size = 1
-; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
+; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:32
; HSA-ELT16-DAG: buffer_load_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:8
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:24{{$}}
; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:24
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:32
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:40
; HSA-ELT8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
; HSA-ELT8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:4{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:8{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:12{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:16{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:20{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:24{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:28{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:32{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:36{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:40{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:44{{$}}
-; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
-; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}}
-; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}}
-; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}}
-define void @private_elt_size_v4i32(<4 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
+; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
+; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}}
+; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}}
+; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}}
+define amdgpu_kernel void @private_elt_size_v4i32(<4 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
@@ -59,36 +59,28 @@ entry:
; HSA-ELT8: private_element_size = 2
; HSA-ELT4: private_element_size = 1
-; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
-; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:32
; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:48
+; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:64
+; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:80
; HSA-ELT16-DAG: buffer_load_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
; HSA-ELT16-DAG: buffer_load_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:8
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:24
; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:32
; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:40
; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:48
; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:56
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:88
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:80
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:72
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:64
; HSA-ELT8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
; HSA-ELT8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:4{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:8{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:12{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:16{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:20{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:24{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:28{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:32{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:36{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:40{{$}}
@@ -97,6 +89,14 @@ entry:
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:52{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:56{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:60{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:64{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:68{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:72{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:76{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:80{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:84{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:88{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:92{{$}}
; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}}
@@ -106,7 +106,7 @@ entry:
; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:20{{$}}
; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:24{{$}}
; HSA-ELT4-DAG: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:28{{$}}
-define void @private_elt_size_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
+define amdgpu_kernel void @private_elt_size_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
@@ -130,20 +130,20 @@ entry:
; HSA-ELT8: private_element_size = 2
; HSA-ELT4: private_element_size = 1
-; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
-; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:8
+; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{off|v[0-9]}}, s[0:3], s9 offset:1
+; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{off|v[0-9]}}, s[0:3], s9 offset:2
; HSA-ELTGE8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:4{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:8{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:12{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:16{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:20{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:24{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:28{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}}
-define void @private_elt_size_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
+define amdgpu_kernel void @private_elt_size_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
@@ -166,20 +166,20 @@ entry:
; HSA-ELT8: private_element_size = 2
; HSA-ELT4: private_element_size = 1
-; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
-; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:8
+; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
+; HSA-ELTGE8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:24
; HSA-ELTGE8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:4{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:8{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:12{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:16{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:20{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:24{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:28{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}}
-define void @private_elt_size_f64(double addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
+define amdgpu_kernel void @private_elt_size_f64(double addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
@@ -202,33 +202,33 @@ entry:
; HSA-ELT8: private_element_size = 2
; HSA-ELT4: private_element_size = 1
-; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
+; HSA-ELT16-DAG: buffer_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:32
; HSA-ELT16-DAG: buffer_load_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9{{$}}
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:8
-; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:16{{$}}
; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:24
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:40
+; HSA-ELT8-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], s9 offset:32
; HSA-ELT8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
; HSA-ELT8: buffer_load_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], s9 offen
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:4{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:8{{$}}
-; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:12{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:16{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:20{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:24{{$}}
; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:28{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:32{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:36{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:40{{$}}
+; HSA-ELT4-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s9 offset:44{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:4{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:8{{$}}
; HSA-ELT4: buffer_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s[0:3], s9 offen offset:12{{$}}
-define void @private_elt_size_v2i64(<2 x i64> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
+define amdgpu_kernel void @private_elt_size_v2i64(<2 x i64> addrspace(1)* %out, i32 addrspace(1)* %index.array) #0 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
diff --git a/test/CodeGen/AMDGPU/private-memory-atomics.ll b/test/CodeGen/AMDGPU/private-memory-atomics.ll
index eea10c862238..9fa3051928a0 100644
--- a/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -4,7 +4,7 @@
; This works because promote allocas pass replaces these with LDS atomics.
; Private atomics have no real use, but at least shouldn't crash on it.
-define void @atomicrmw_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @atomicrmw_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
%tmp1 = getelementptr inbounds [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
@@ -17,7 +17,7 @@ entry:
ret void
}
-define void @cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
%tmp1 = getelementptr inbounds [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
diff --git a/test/CodeGen/AMDGPU/private-memory-broken.ll b/test/CodeGen/AMDGPU/private-memory-broken.ll
index 8ba0b70dbdbb..9b5f655f1b52 100644
--- a/test/CodeGen/AMDGPU/private-memory-broken.ll
+++ b/test/CodeGen/AMDGPU/private-memory-broken.ll
@@ -7,7 +7,7 @@
declare i32 @foo(i32*) nounwind
-define void @call_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @call_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
%tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
diff --git a/test/CodeGen/AMDGPU/private-memory-r600.ll b/test/CodeGen/AMDGPU/private-memory-r600.ll
index 3e1796959aa6..866cd16ec3b5 100644
--- a/test/CodeGen/AMDGPU/private-memory-r600.ll
+++ b/test/CodeGen/AMDGPU/private-memory-r600.ll
@@ -12,11 +12,11 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; OPT: call i32 @llvm.r600.read.local.size.y(), !range !0
; OPT: call i32 @llvm.r600.read.local.size.z(), !range !0
-; OPT: call i32 @llvm.r600.read.tidig.x(), !range !0
-; OPT: call i32 @llvm.r600.read.tidig.y(), !range !0
-; OPT: call i32 @llvm.r600.read.tidig.z(), !range !0
+; OPT: call i32 @llvm.r600.read.tidig.x(), !range !1
+; OPT: call i32 @llvm.r600.read.tidig.y(), !range !1
+; OPT: call i32 @llvm.r600.read.tidig.z(), !range !1
-define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+define amdgpu_kernel void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32, i32 addrspace(1)* %in, align 4
@@ -47,7 +47,7 @@ entry:
; R600-NOT: MOVA_INT
%struct.point = type { i32, i32 }
-define void @multiple_structs(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @multiple_structs(i32 addrspace(1)* %out) #0 {
entry:
%a = alloca %struct.point
%b = alloca %struct.point
@@ -75,7 +75,7 @@ entry:
; FUNC-LABEL: {{^}}direct_loop:
; R600-NOT: MOVA_INT
-define void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
entry:
%prv_array_const = alloca [2 x i32]
%prv_array = alloca [2 x i32]
@@ -110,7 +110,7 @@ for.end:
; FUNC-LABEL: {{^}}short_array:
; R600: MOVA_INT
-define void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = alloca [2 x i16]
%1 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 0
@@ -127,7 +127,7 @@ entry:
; FUNC-LABEL: {{^}}char_array:
; R600: MOVA_INT
-define void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = alloca [2 x i8]
%1 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 0
@@ -148,7 +148,7 @@ entry:
; R600-NOT: MOV T0.X
; Additional check in case the move ends up in the last slot
; R600-NOT: MOV * TO.X
-define void @work_item_info(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @work_item_info(i32 addrspace(1)* %out, i32 %in) #0 {
entry:
%0 = alloca [2 x i32]
%1 = getelementptr inbounds [2 x i32], [2 x i32]* %0, i32 0, i32 0
@@ -169,7 +169,7 @@ entry:
; R600_CHECK: MOV
; R600_CHECK: [[CHAN:[XYZW]]]+
; R600-NOT: [[CHAN]]+
-define void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
entry:
%0 = alloca [3 x i8], align 1
%1 = alloca [2 x i8], align 1
@@ -193,7 +193,7 @@ entry:
ret void
}
-define void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i8]]
%gep0 = getelementptr inbounds [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
@@ -207,7 +207,7 @@ entry:
ret void
}
-define void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i32]]
%gep0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
@@ -220,7 +220,7 @@ entry:
ret void
}
-define void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i64]]
%gep0 = getelementptr inbounds [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
@@ -235,7 +235,7 @@ entry:
%struct.pair32 = type { i32, i32 }
-define void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x %struct.pair32]]
%gep0 = getelementptr inbounds [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
@@ -248,7 +248,7 @@ entry:
ret void
}
-define void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x %struct.pair32]
%gep0 = getelementptr inbounds [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
@@ -261,7 +261,7 @@ entry:
ret void
}
-define void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
%tmp1 = getelementptr inbounds [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
@@ -282,7 +282,7 @@ entry:
; SI-NOT: ds_write
; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ;
-define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32]
%tmp0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
store i32 5, i32* %tmp0
@@ -295,6 +295,7 @@ define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
ret void
}
-; OPT: !0 = !{i32 0, i32 2048}
+; OPT: !0 = !{i32 0, i32 257}
+; OPT: !1 = !{i32 0, i32 256}
attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" }
diff --git a/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll b/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll
index 3bd0aecf7aa9..41a68b18b0a7 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll
@@ -5,7 +5,7 @@
; CHECK-LABEL: @array_alloca(
; CHECK: %stack = alloca i32, i32 5, align 4
-define void @array_alloca(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
+define amdgpu_kernel void @array_alloca(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
entry:
%stack = alloca i32, i32 5, align 4
%ld0 = load i32, i32 addrspace(1)* %in, align 4
@@ -27,7 +27,7 @@ entry:
; CHECK-LABEL: @array_alloca_dynamic(
; CHECK: %stack = alloca i32, i32 %size, align 4
-define void @array_alloca_dynamic(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %size) #0 {
+define amdgpu_kernel void @array_alloca_dynamic(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %size) #0 {
entry:
%stack = alloca i32, i32 %size, align 4
%ld0 = load i32, i32 addrspace(1)* %in, align 4
diff --git a/test/CodeGen/AMDGPU/promote-alloca-bitcast-function.ll b/test/CodeGen/AMDGPU/promote-alloca-bitcast-function.ll
index 82030f377d9f..a5eb92de9e4b 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-bitcast-function.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-bitcast-function.ll
@@ -7,14 +7,14 @@ declare void @foo(float*) #0
declare void @foo.varargs(...) #0
; CHECK: in function crash_call_constexpr_cast{{.*}}: unsupported call to function foo
-define void @crash_call_constexpr_cast() #0 {
+define amdgpu_kernel void @crash_call_constexpr_cast() #0 {
%alloca = alloca i32
call void bitcast (void (float*)* @foo to void (i32*)*)(i32* %alloca) #0
ret void
}
; CHECK: in function crash_call_constexpr_cast{{.*}}: unsupported call to function foo.varargs
-define void @crash_call_constexpr_cast_varargs() #0 {
+define amdgpu_kernel void @crash_call_constexpr_cast_varargs() #0 {
%alloca = alloca i32
call void bitcast (void (...)* @foo.varargs to void (i32*)*)(i32* %alloca) #0
ret void
diff --git a/test/CodeGen/AMDGPU/promote-alloca-globals.ll b/test/CodeGen/AMDGPU/promote-alloca-globals.ll
index eb0d0cc62697..38db51d4c8c6 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-globals.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-globals.ll
@@ -5,12 +5,12 @@
@global_array0 = internal unnamed_addr addrspace(3) global [750 x [10 x i32]] undef, align 4
@global_array1 = internal unnamed_addr addrspace(3) global [750 x [10 x i32]] undef, align 4
-; IR-LABEL: define void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
+; IR-LABEL: define amdgpu_kernel void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
; IR: alloca [10 x i32]
; ASM-LABEL: {{^}}promote_alloca_size_256:
; ASM: ; LDSByteSize: 60000 bytes/workgroup (compile time only)
-define void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
+define amdgpu_kernel void @promote_alloca_size_256(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
%stack = alloca [10 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
diff --git a/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll b/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
index 4c3c15dac0d1..f83eb56dc6ed 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
@@ -7,7 +7,7 @@ declare i8* @llvm.invariant.group.barrier(i8*) #1
; GCN-LABEL: {{^}}use_invariant_promotable_lds:
; GCN: buffer_load_dword
; GCN: ds_write_b32
-define void @use_invariant_promotable_lds(i32 addrspace(1)* %arg) #2 {
+define amdgpu_kernel void @use_invariant_promotable_lds(i32 addrspace(1)* %arg) #2 {
bb:
%tmp = alloca i32, align 4
%tmp1 = bitcast i32* %tmp to i8*
diff --git a/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll b/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
index eeda19fa27ac..bd4571a9616b 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
@@ -1,21 +1,21 @@
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-promote-alloca %s | FileCheck -check-prefix=OPT %s
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
; OPT-LABEL: @use_lifetime_promotable_lds(
; OPT-NOT: alloca i32
; OPT-NOT: llvm.lifetime
; OPT: store i32 %tmp3, i32 addrspace(3)*
-define void @use_lifetime_promotable_lds(i32 addrspace(1)* %arg) #2 {
+define amdgpu_kernel void @use_lifetime_promotable_lds(i32 addrspace(1)* %arg) #2 {
bb:
%tmp = alloca i32, align 4
%tmp1 = bitcast i32* %tmp to i8*
- call void @llvm.lifetime.start(i64 4, i8* %tmp1)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %tmp1)
%tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
%tmp3 = load i32, i32 addrspace(1)* %tmp2
store i32 %tmp3, i32* %tmp
- call void @llvm.lifetime.end(i64 4, i8* %tmp1)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %tmp1)
ret void
}
diff --git a/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll b/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll
index 9cea1a23ea98..7a4a451ff360 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll
@@ -8,13 +8,13 @@ declare void @llvm.memmove.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocaptu
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1) #1
+declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1) #1
; CHECK-LABEL: @promote_with_memcpy(
; CHECK: getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_memcpy.alloca, i32 0, i32 %{{[0-9]+}}
; CHECK: call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %alloca.bc, i8 addrspace(1)* %in.bc, i32 68, i32 4, i1 false)
; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out.bc, i8 addrspace(3)* %alloca.bc, i32 68, i32 4, i1 false)
-define void @promote_with_memcpy(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @promote_with_memcpy(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%alloca = alloca [17 x i32], align 4
%alloca.bc = bitcast [17 x i32]* %alloca to i8*
%in.bc = bitcast i32 addrspace(1)* %in to i8 addrspace(1)*
@@ -28,7 +28,7 @@ define void @promote_with_memcpy(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; CHECK: getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_memmove.alloca, i32 0, i32 %{{[0-9]+}}
; CHECK: call void @llvm.memmove.p3i8.p1i8.i32(i8 addrspace(3)* %alloca.bc, i8 addrspace(1)* %in.bc, i32 68, i32 4, i1 false)
; CHECK: call void @llvm.memmove.p1i8.p3i8.i32(i8 addrspace(1)* %out.bc, i8 addrspace(3)* %alloca.bc, i32 68, i32 4, i1 false)
-define void @promote_with_memmove(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @promote_with_memmove(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%alloca = alloca [17 x i32], align 4
%alloca.bc = bitcast [17 x i32]* %alloca to i8*
%in.bc = bitcast i32 addrspace(1)* %in to i8 addrspace(1)*
@@ -41,7 +41,7 @@ define void @promote_with_memmove(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; CHECK-LABEL: @promote_with_memset(
; CHECK: getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_memset.alloca, i32 0, i32 %{{[0-9]+}}
; CHECK: call void @llvm.memset.p3i8.i32(i8 addrspace(3)* %alloca.bc, i8 7, i32 68, i32 4, i1 false)
-define void @promote_with_memset(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @promote_with_memset(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%alloca = alloca [17 x i32], align 4
%alloca.bc = bitcast [17 x i32]* %alloca to i8*
%in.bc = bitcast i32 addrspace(1)* %in to i8 addrspace(1)*
@@ -52,11 +52,11 @@ define void @promote_with_memset(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; CHECK-LABEL: @promote_with_objectsize(
; CHECK: [[PTR:%[0-9]+]] = getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_objectsize.alloca, i32 0, i32 %{{[0-9]+}}
-; CHECK: call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* %alloca.bc, i1 false)
-define void @promote_with_objectsize(i32 addrspace(1)* %out) #0 {
+; CHECK: call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* %alloca.bc, i1 false, i1 false)
+define amdgpu_kernel void @promote_with_objectsize(i32 addrspace(1)* %out) #0 {
%alloca = alloca [17 x i32], align 4
%alloca.bc = bitcast [17 x i32]* %alloca to i8*
- %size = call i32 @llvm.objectsize.i32.p0i8(i8* %alloca.bc, i1 false)
+ %size = call i32 @llvm.objectsize.i32.p0i8(i8* %alloca.bc, i1 false, i1 false)
store i32 %size, i32 addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/AMDGPU/promote-alloca-no-opts.ll b/test/CodeGen/AMDGPU/promote-alloca-no-opts.ll
index 8ba849e5f884..9f22f2071797 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-no-opts.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-no-opts.ll
@@ -5,7 +5,7 @@
; NOOPTS: workgroup_group_segment_byte_size = 0{{$}}
; NOOPTS-NOT ds_write
; OPTS: ds_write
-define void @promote_alloca_i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
+define amdgpu_kernel void @promote_alloca_i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
entry:
%alloca = alloca [2 x [2 x i32]]
%gep0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
@@ -21,7 +21,7 @@ entry:
; ALL-LABEL: {{^}}optnone_promote_alloca_i32_array_array:
; ALL: workgroup_group_segment_byte_size = 0{{$}}
; ALL-NOT ds_write
-define void @optnone_promote_alloca_i32_array_array(i32 addrspace(1)* %out, i32 %index) #1 {
+define amdgpu_kernel void @optnone_promote_alloca_i32_array_array(i32 addrspace(1)* %out, i32 %index) #1 {
entry:
%alloca = alloca [2 x [2 x i32]]
%gep0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
diff --git a/test/CodeGen/AMDGPU/promote-alloca-padding-size-estimate.ll b/test/CodeGen/AMDGPU/promote-alloca-padding-size-estimate.ll
index 468a789e4a67..bf3bc493a4b8 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-padding-size-estimate.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-padding-size-estimate.ll
@@ -30,7 +30,7 @@
; GCN-LABEL: {{^}}promote_alloca_size_order_0:
; GCN: workgroup_group_segment_byte_size = 2340
-define void @promote_alloca_size_order_0(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 {
+define amdgpu_kernel void @promote_alloca_size_order_0(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 {
entry:
%stack = alloca [5 x i32], align 4
%tmp0 = load i32, i32 addrspace(1)* %in, align 4
@@ -62,7 +62,7 @@ entry:
; GCN-LABEL: {{^}}promote_alloca_size_order_1:
; GCN: workgroup_group_segment_byte_size = 2352
-define void @promote_alloca_size_order_1(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 {
+define amdgpu_kernel void @promote_alloca_size_order_1(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 {
entry:
%stack = alloca [5 x i32], align 4
%tmp0 = load i32, i32 addrspace(1)* %in, align 4
@@ -100,7 +100,7 @@ entry:
; GCN-LABEL: {{^}}promote_alloca_align_pad_guess_over_limit:
; GCN: workgroup_group_segment_byte_size = 1060
-define void @promote_alloca_align_pad_guess_over_limit(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 {
+define amdgpu_kernel void @promote_alloca_align_pad_guess_over_limit(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 {
entry:
%stack = alloca [5 x i32], align 4
%tmp0 = load i32, i32 addrspace(1)* %in, align 4
diff --git a/test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll b/test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll
index 3bcbb4f986b7..03ce116cfcad 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll
@@ -5,7 +5,7 @@
; GCN-LABEL: {{^}}stored_lds_pointer_value:
; GCN: buffer_store_dword v
-define void @stored_lds_pointer_value(float* addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @stored_lds_pointer_value(float* addrspace(1)* %ptr) #0 {
%tmp = alloca float
store float 0.0, float *%tmp
store float* %tmp, float* addrspace(1)* %ptr
@@ -14,7 +14,7 @@ define void @stored_lds_pointer_value(float* addrspace(1)* %ptr) #0 {
; GCN-LABEL: {{^}}stored_lds_pointer_value_offset:
; GCN: buffer_store_dword v
-define void @stored_lds_pointer_value_offset(float* addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @stored_lds_pointer_value_offset(float* addrspace(1)* %ptr) #0 {
%tmp0 = alloca float
%tmp1 = alloca float
store float 0.0, float *%tmp0
@@ -29,7 +29,7 @@ define void @stored_lds_pointer_value_offset(float* addrspace(1)* %ptr) #0 {
; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD1
; GCN: buffer_store_dword v
; GCN: buffer_store_dword v
-define void @stored_lds_pointer_value_gep(float* addrspace(1)* %ptr, i32 %idx) #0 {
+define amdgpu_kernel void @stored_lds_pointer_value_gep(float* addrspace(1)* %ptr, i32 %idx) #0 {
bb:
%tmp = alloca float, i32 16
store float 0.0, float* %tmp
@@ -46,7 +46,7 @@ bb:
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
-define void @stored_vector_pointer_value(i32* addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @stored_vector_pointer_value(i32* addrspace(1)* %out, i32 %index) {
entry:
%tmp0 = alloca [4 x i32]
%x = getelementptr inbounds [4 x i32], [4 x i32]* %tmp0, i32 0, i32 0
@@ -64,7 +64,7 @@ entry:
; GCN-LABEL: {{^}}stored_fi_to_self:
; GCN-NOT: ds_
-define void @stored_fi_to_self() #0 {
+define amdgpu_kernel void @stored_fi_to_self() #0 {
%tmp = alloca i32*
store volatile i32* inttoptr (i32 1234 to i32*), i32** %tmp
%bitcast = bitcast i32** %tmp to i32*
diff --git a/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll b/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll
index 2e7527dbdbc4..ebef61229905 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll
@@ -8,7 +8,7 @@
; CHECK: %ptr0 = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[ARRAYGEP]], i32 0, i32 %a
; CHECK: %ptr1 = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[ARRAYGEP]], i32 0, i32 %b
; CHECK: %cmp = icmp eq i32 addrspace(3)* %ptr0, %ptr1
-define void @lds_promoted_alloca_icmp_same_derived_pointer(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @lds_promoted_alloca_icmp_same_derived_pointer(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
%ptr1 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %b
@@ -22,7 +22,7 @@ define void @lds_promoted_alloca_icmp_same_derived_pointer(i32 addrspace(1)* %ou
; CHECK: [[ARRAYGEP:%[0-9]+]] = getelementptr inbounds [256 x [16 x i32]], [256 x [16 x i32]] addrspace(3)* @lds_promoted_alloca_icmp_null_rhs.alloca, i32 0, i32 %{{[0-9]+}}
; CHECK: %ptr0 = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[ARRAYGEP]], i32 0, i32 %a
; CHECK: %cmp = icmp eq i32 addrspace(3)* %ptr0, null
-define void @lds_promoted_alloca_icmp_null_rhs(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @lds_promoted_alloca_icmp_null_rhs(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
%cmp = icmp eq i32* %ptr0, null
@@ -35,7 +35,7 @@ define void @lds_promoted_alloca_icmp_null_rhs(i32 addrspace(1)* %out, i32 %a, i
; CHECK: [[ARRAYGEP:%[0-9]+]] = getelementptr inbounds [256 x [16 x i32]], [256 x [16 x i32]] addrspace(3)* @lds_promoted_alloca_icmp_null_lhs.alloca, i32 0, i32 %{{[0-9]+}}
; CHECK: %ptr0 = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[ARRAYGEP]], i32 0, i32 %a
; CHECK: %cmp = icmp eq i32 addrspace(3)* null, %ptr0
-define void @lds_promoted_alloca_icmp_null_lhs(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @lds_promoted_alloca_icmp_null_lhs(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
%cmp = icmp eq i32* null, %ptr0
@@ -49,7 +49,7 @@ define void @lds_promoted_alloca_icmp_null_lhs(i32 addrspace(1)* %out, i32 %a, i
; CHECK: %ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
; CHECK: %ptr1 = call i32* @get_unknown_pointer()
; CHECK: %cmp = icmp eq i32* %ptr0, %ptr1
-define void @lds_promoted_alloca_icmp_unknown_ptr(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @lds_promoted_alloca_icmp_unknown_ptr(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
%ptr1 = call i32* @get_unknown_pointer()
diff --git a/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll b/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
index 0462a351c39b..d196897d67dc 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
@@ -13,7 +13,7 @@
; CHECK: endif:
; CHECK: %phi.ptr = phi i32 addrspace(3)* [ %arrayidx0, %if ], [ %arrayidx1, %else ]
; CHECK: store i32 0, i32 addrspace(3)* %phi.ptr, align 4
-define void @branch_ptr_var_same_alloca(i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @branch_ptr_var_same_alloca(i32 %a, i32 %b) #0 {
entry:
%alloca = alloca [64 x i32], align 4
br i1 undef, label %if, label %else
@@ -34,7 +34,7 @@ endif:
; CHECK-LABEL: @branch_ptr_phi_alloca_null_0(
; CHECK: %phi.ptr = phi i32 addrspace(3)* [ %arrayidx0, %if ], [ null, %entry ]
-define void @branch_ptr_phi_alloca_null_0(i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @branch_ptr_phi_alloca_null_0(i32 %a, i32 %b) #0 {
entry:
%alloca = alloca [64 x i32], align 4
br i1 undef, label %if, label %endif
@@ -51,7 +51,7 @@ endif:
; CHECK-LABEL: @branch_ptr_phi_alloca_null_1(
; CHECK: %phi.ptr = phi i32 addrspace(3)* [ null, %entry ], [ %arrayidx0, %if ]
-define void @branch_ptr_phi_alloca_null_1(i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @branch_ptr_phi_alloca_null_1(i32 %a, i32 %b) #0 {
entry:
%alloca = alloca [64 x i32], align 4
br i1 undef, label %if, label %endif
@@ -73,7 +73,7 @@ endif:
; CHECK: br label %exit
; CHECK: %phi.ptr = phi i32 addrspace(3)* [ %arrayidx0, %entry ]
; CHECK: store i32 0, i32 addrspace(3)* %phi.ptr, align 4
-define void @one_phi_value(i32 %a) #0 {
+define amdgpu_kernel void @one_phi_value(i32 %a) #0 {
entry:
%alloca = alloca [64 x i32], align 4
%arrayidx0 = getelementptr inbounds [64 x i32], [64 x i32]* %alloca, i32 0, i32 %a
@@ -97,7 +97,7 @@ exit:
; CHECK: endif:
; CHECK: %phi.ptr = phi i32* [ %arrayidx0, %if ], [ %arrayidx1, %else ]
; CHECK: store i32 0, i32* %phi.ptr, align 4
-define void @branch_ptr_alloca_unknown_obj(i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @branch_ptr_alloca_unknown_obj(i32 %a, i32 %b) #0 {
entry:
%alloca = alloca [64 x i32], align 4
br i1 undef, label %if, label %else
@@ -134,7 +134,7 @@ endif:
; CHECK-LABEL: @ptr_induction_var_same_alloca(
; CHECK: %alloca = alloca [64 x i32], align 4
; CHECK: phi i32* [ %arrayidx, %entry ], [ %incdec.ptr, %for.body ]
-define void @ptr_induction_var_same_alloca() #0 {
+define amdgpu_kernel void @ptr_induction_var_same_alloca() #0 {
entry:
%alloca = alloca [64 x i32], align 4
%arrayidx = getelementptr inbounds [64 x i32], [64 x i32]* %alloca, i32 0, i32 2
@@ -172,7 +172,7 @@ for.body: ; preds = %for.body, %entry
; CHECK: %alloca = alloca [64 x i32], align 4
; CHECK: %p.08 = phi i32* [ %incdec.ptr, %for.body ], [ %arrayidx, %for.body.preheader ]
; CHECK: %cmp = icmp eq i32* %incdec.ptr, %call
-define void @ptr_induction_var_alloca_unknown() #0 {
+define amdgpu_kernel void @ptr_induction_var_alloca_unknown() #0 {
entry:
%alloca = alloca [64 x i32], align 4
%arrayidx = getelementptr inbounds [64 x i32], [64 x i32]* %alloca, i32 0, i32 2
diff --git a/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll b/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
index 34d274df7387..55c2229fb6bd 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
@@ -3,7 +3,7 @@
; CHECK-LABEL: @lds_promoted_alloca_select_invalid_pointer_operand(
; CHECK: %alloca = alloca i32
; CHECK: select i1 undef, i32* undef, i32* %alloca
-define void @lds_promoted_alloca_select_invalid_pointer_operand() #0 {
+define amdgpu_kernel void @lds_promoted_alloca_select_invalid_pointer_operand() #0 {
%alloca = alloca i32, align 4
%select = select i1 undef, i32* undef, i32* %alloca
store i32 0, i32* %select, align 4
@@ -16,7 +16,7 @@ define void @lds_promoted_alloca_select_invalid_pointer_operand() #0 {
; CHECK: %ptr1 = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[ARRAYGEP]], i32 0, i32 %b
; CHECK: %select = select i1 undef, i32 addrspace(3)* %ptr0, i32 addrspace(3)* %ptr1
; CHECK: store i32 0, i32 addrspace(3)* %select, align 4
-define void @lds_promote_alloca_select_two_derived_pointers(i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @lds_promote_alloca_select_two_derived_pointers(i32 %a, i32 %b) #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
%ptr1 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %b
@@ -33,7 +33,7 @@ define void @lds_promote_alloca_select_two_derived_pointers(i32 %a, i32 %b) #0 {
; CHECK: %ptr0 = getelementptr inbounds i32, i32* %alloca0, i32 %a
; CHECK: %ptr1 = getelementptr inbounds i32, i32* %alloca1, i32 %b
; CHECK: %select = select i1 undef, i32* %ptr0, i32* %ptr1
-define void @lds_promote_alloca_select_two_allocas(i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @lds_promote_alloca_select_two_allocas(i32 %a, i32 %b) #0 {
%alloca0 = alloca i32, i32 16, align 4
%alloca1 = alloca i32, i32 16, align 4
%ptr0 = getelementptr inbounds i32, i32* %alloca0, i32 %a
@@ -50,7 +50,7 @@ define void @lds_promote_alloca_select_two_allocas(i32 %a, i32 %b) #0 {
; CHECK: %ptr1 = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[ARRAYGEP]], i32 0, i32 3
; CHECK: %select = select i1 undef, i32 addrspace(3)* %ptr0, i32 addrspace(3)* %ptr1
; CHECK: store i32 0, i32 addrspace(3)* %select, align 4
-define void @lds_promote_alloca_select_two_derived_constant_pointers() #0 {
+define amdgpu_kernel void @lds_promote_alloca_select_two_derived_constant_pointers() #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 1
%ptr1 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 3
@@ -67,7 +67,7 @@ define void @lds_promote_alloca_select_two_derived_constant_pointers() #0 {
; CHECK: %select0 = select i1 undef, i32 addrspace(3)* %ptr0, i32 addrspace(3)* %ptr1
; CHECK: %select1 = select i1 undef, i32 addrspace(3)* %select0, i32 addrspace(3)* %ptr2
; CHECK: store i32 0, i32 addrspace(3)* %select1, align 4
-define void @lds_promoted_alloca_select_input_select(i32 %a, i32 %b, i32 %c) #0 {
+define amdgpu_kernel void @lds_promoted_alloca_select_input_select(i32 %a, i32 %b, i32 %c) #0 {
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
%ptr1 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %b
@@ -78,7 +78,7 @@ define void @lds_promoted_alloca_select_input_select(i32 %a, i32 %b, i32 %c) #0
ret void
}
-define void @lds_promoted_alloca_select_input_phi(i32 %a, i32 %b, i32 %c) #0 {
+define amdgpu_kernel void @lds_promoted_alloca_select_input_phi(i32 %a, i32 %b, i32 %c) #0 {
entry:
%alloca = alloca [16 x i32], align 4
%ptr0 = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
@@ -102,7 +102,7 @@ bb2:
; CHECK-LABEL: @select_null_rhs(
; CHECK-NOT: alloca
; CHECK: select i1 %tmp2, double addrspace(3)* %{{[0-9]+}}, double addrspace(3)* null
-define void @select_null_rhs(double addrspace(1)* nocapture %arg, i32 %arg1) #1 {
+define amdgpu_kernel void @select_null_rhs(double addrspace(1)* nocapture %arg, i32 %arg1) #1 {
bb:
%tmp = alloca double, align 8
store double 0.000000e+00, double* %tmp, align 8
@@ -117,7 +117,7 @@ bb:
; CHECK-LABEL: @select_null_lhs(
; CHECK-NOT: alloca
; CHECK: select i1 %tmp2, double addrspace(3)* null, double addrspace(3)* %{{[0-9]+}}
-define void @select_null_lhs(double addrspace(1)* nocapture %arg, i32 %arg1) #1 {
+define amdgpu_kernel void @select_null_lhs(double addrspace(1)* nocapture %arg, i32 %arg1) #1 {
bb:
%tmp = alloca double, align 8
store double 0.000000e+00, double* %tmp, align 8
diff --git a/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll b/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll
index e331731f90f6..88c0e911662d 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll
@@ -8,7 +8,7 @@ declare void @llvm.stackrestore(i8*) #2
; CHECK-LABEL: @try_promote_unhandled_intrinsic(
; CHECK: alloca
; CHECK: call void @llvm.stackrestore(i8* %tmp1)
-define void @try_promote_unhandled_intrinsic(i32 addrspace(1)* %arg) #2 {
+define amdgpu_kernel void @try_promote_unhandled_intrinsic(i32 addrspace(1)* %arg) #2 {
bb:
%tmp = alloca i32, align 4
%tmp1 = bitcast i32* %tmp to i8*
diff --git a/test/CodeGen/AMDGPU/promote-alloca-volatile.ll b/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
index f9de38839bc5..9c43a6dc60f4 100644
--- a/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
+++ b/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
@@ -2,8 +2,8 @@
; CHECK-LABEL: @volatile_load(
; CHECK: alloca [5 x i32]
-; CHECK load volatile i32, i32*
-define void @volatile_load(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
+; CHECK: load volatile i32, i32*
+define amdgpu_kernel void @volatile_load(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
%stack = alloca [5 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
@@ -15,8 +15,8 @@ entry:
; CHECK-LABEL: @volatile_store(
; CHECK: alloca [5 x i32]
-; CHECK store volatile i32 %tmp, i32*
-define void @volatile_store(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
+; CHECK: store volatile i32 %tmp, i32*
+define amdgpu_kernel void @volatile_store(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
%stack = alloca [5 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
@@ -30,7 +30,7 @@ entry:
; CHECK: alloca double
; CHECK: load double
; CHECK: load volatile double
-define void @volatile_and_non_volatile_load(double addrspace(1)* nocapture %arg, i32 %arg1) #0 {
+define amdgpu_kernel void @volatile_and_non_volatile_load(double addrspace(1)* nocapture %arg, i32 %arg1) #0 {
bb:
%tmp = alloca double, align 8
store double 0.000000e+00, double* %tmp, align 8
diff --git a/test/CodeGen/AMDGPU/pv.ll b/test/CodeGen/AMDGPU/pv.ll
index d5f9833d6ad0..1474dbabba69 100644
--- a/test/CodeGen/AMDGPU/pv.ll
+++ b/test/CodeGen/AMDGPU/pv.ll
@@ -1,240 +1,236 @@
-; RUN: llc < %s -march=r600 | FileCheck %s
+; RUN: llc -march=r600 < %s | FileCheck %s
; CHECK: DOT4 * T{{[0-9]\.W}} (MASKED)
; CHECK: MAX T{{[0-9].[XYZW]}}, 0.0, PV.X
-
define amdgpu_vs void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7) {
main_body:
- %0 = extractelement <4 x float> %reg1, i32 0
- %1 = extractelement <4 x float> %reg1, i32 1
- %2 = extractelement <4 x float> %reg1, i32 2
- %3 = extractelement <4 x float> %reg1, i32 3
- %4 = extractelement <4 x float> %reg2, i32 0
- %5 = extractelement <4 x float> %reg2, i32 1
- %6 = extractelement <4 x float> %reg2, i32 2
- %7 = extractelement <4 x float> %reg2, i32 3
- %8 = extractelement <4 x float> %reg3, i32 0
- %9 = extractelement <4 x float> %reg3, i32 1
- %10 = extractelement <4 x float> %reg3, i32 2
- %11 = extractelement <4 x float> %reg3, i32 3
- %12 = extractelement <4 x float> %reg4, i32 0
- %13 = extractelement <4 x float> %reg4, i32 1
- %14 = extractelement <4 x float> %reg4, i32 2
- %15 = extractelement <4 x float> %reg4, i32 3
- %16 = extractelement <4 x float> %reg5, i32 0
- %17 = extractelement <4 x float> %reg5, i32 1
- %18 = extractelement <4 x float> %reg5, i32 2
- %19 = extractelement <4 x float> %reg5, i32 3
- %20 = extractelement <4 x float> %reg6, i32 0
- %21 = extractelement <4 x float> %reg6, i32 1
- %22 = extractelement <4 x float> %reg6, i32 2
- %23 = extractelement <4 x float> %reg6, i32 3
- %24 = extractelement <4 x float> %reg7, i32 0
- %25 = extractelement <4 x float> %reg7, i32 1
- %26 = extractelement <4 x float> %reg7, i32 2
- %27 = extractelement <4 x float> %reg7, i32 3
- %28 = load <4 x float>, <4 x float> addrspace(8)* null
- %29 = extractelement <4 x float> %28, i32 0
- %30 = fmul float %0, %29
- %31 = load <4 x float>, <4 x float> addrspace(8)* null
- %32 = extractelement <4 x float> %31, i32 1
- %33 = fmul float %0, %32
- %34 = load <4 x float>, <4 x float> addrspace(8)* null
- %35 = extractelement <4 x float> %34, i32 2
- %36 = fmul float %0, %35
- %37 = load <4 x float>, <4 x float> addrspace(8)* null
- %38 = extractelement <4 x float> %37, i32 3
- %39 = fmul float %0, %38
- %40 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %41 = extractelement <4 x float> %40, i32 0
- %42 = fmul float %1, %41
- %43 = fadd float %42, %30
- %44 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %45 = extractelement <4 x float> %44, i32 1
- %46 = fmul float %1, %45
- %47 = fadd float %46, %33
- %48 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %49 = extractelement <4 x float> %48, i32 2
- %50 = fmul float %1, %49
- %51 = fadd float %50, %36
- %52 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
- %53 = extractelement <4 x float> %52, i32 3
- %54 = fmul float %1, %53
- %55 = fadd float %54, %39
- %56 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %57 = extractelement <4 x float> %56, i32 0
- %58 = fmul float %2, %57
- %59 = fadd float %58, %43
- %60 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %61 = extractelement <4 x float> %60, i32 1
- %62 = fmul float %2, %61
- %63 = fadd float %62, %47
- %64 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %65 = extractelement <4 x float> %64, i32 2
- %66 = fmul float %2, %65
- %67 = fadd float %66, %51
- %68 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
- %69 = extractelement <4 x float> %68, i32 3
- %70 = fmul float %2, %69
- %71 = fadd float %70, %55
- %72 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
- %73 = extractelement <4 x float> %72, i32 0
- %74 = fmul float %3, %73
- %75 = fadd float %74, %59
- %76 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
- %77 = extractelement <4 x float> %76, i32 1
- %78 = fmul float %3, %77
- %79 = fadd float %78, %63
- %80 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
- %81 = extractelement <4 x float> %80, i32 2
- %82 = fmul float %3, %81
- %83 = fadd float %82, %67
- %84 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
- %85 = extractelement <4 x float> %84, i32 3
- %86 = fmul float %3, %85
- %87 = fadd float %86, %71
- %88 = insertelement <4 x float> undef, float %4, i32 0
- %89 = insertelement <4 x float> %88, float %5, i32 1
- %90 = insertelement <4 x float> %89, float %6, i32 2
- %91 = insertelement <4 x float> %90, float 0.000000e+00, i32 3
- %92 = insertelement <4 x float> undef, float %4, i32 0
- %93 = insertelement <4 x float> %92, float %5, i32 1
- %94 = insertelement <4 x float> %93, float %6, i32 2
- %95 = insertelement <4 x float> %94, float 0.000000e+00, i32 3
- %96 = call float @llvm.r600.dot4(<4 x float> %91, <4 x float> %95)
- %97 = call float @llvm.fabs.f32(float %96)
- %98 = call float @llvm.r600.recipsqrt.clamped.f32(float %97)
- %99 = fmul float %4, %98
- %100 = fmul float %5, %98
- %101 = fmul float %6, %98
- %102 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
- %103 = extractelement <4 x float> %102, i32 0
- %104 = fmul float %103, %8
- %105 = fadd float %104, %20
- %106 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
- %107 = extractelement <4 x float> %106, i32 1
- %108 = fmul float %107, %9
- %109 = fadd float %108, %21
- %110 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
- %111 = extractelement <4 x float> %110, i32 2
- %112 = fmul float %111, %10
- %113 = fadd float %112, %22
- %114 = call float @llvm.AMDGPU.clamp.f32(float %105, float 0.000000e+00, float 1.000000e+00)
- %115 = call float @llvm.AMDGPU.clamp.f32(float %109, float 0.000000e+00, float 1.000000e+00)
- %116 = call float @llvm.AMDGPU.clamp.f32(float %113, float 0.000000e+00, float 1.000000e+00)
- %117 = call float @llvm.AMDGPU.clamp.f32(float %15, float 0.000000e+00, float 1.000000e+00)
- %118 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
- %119 = extractelement <4 x float> %118, i32 0
- %120 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
- %121 = extractelement <4 x float> %120, i32 1
- %122 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
- %123 = extractelement <4 x float> %122, i32 2
- %124 = insertelement <4 x float> undef, float %99, i32 0
- %125 = insertelement <4 x float> %124, float %100, i32 1
- %126 = insertelement <4 x float> %125, float %101, i32 2
- %127 = insertelement <4 x float> %126, float 0.000000e+00, i32 3
- %128 = insertelement <4 x float> undef, float %119, i32 0
- %129 = insertelement <4 x float> %128, float %121, i32 1
- %130 = insertelement <4 x float> %129, float %123, i32 2
- %131 = insertelement <4 x float> %130, float 0.000000e+00, i32 3
- %132 = call float @llvm.r600.dot4(<4 x float> %127, <4 x float> %131)
- %133 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
- %134 = extractelement <4 x float> %133, i32 0
- %135 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
- %136 = extractelement <4 x float> %135, i32 1
- %137 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
- %138 = extractelement <4 x float> %137, i32 2
- %139 = insertelement <4 x float> undef, float %99, i32 0
- %140 = insertelement <4 x float> %139, float %100, i32 1
- %141 = insertelement <4 x float> %140, float %101, i32 2
- %142 = insertelement <4 x float> %141, float 0.000000e+00, i32 3
- %143 = insertelement <4 x float> undef, float %134, i32 0
- %144 = insertelement <4 x float> %143, float %136, i32 1
- %145 = insertelement <4 x float> %144, float %138, i32 2
- %146 = insertelement <4 x float> %145, float 0.000000e+00, i32 3
- %147 = call float @llvm.r600.dot4(<4 x float> %142, <4 x float> %146)
- %148 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
- %149 = extractelement <4 x float> %148, i32 0
- %150 = fmul float %149, %8
- %151 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
- %152 = extractelement <4 x float> %151, i32 1
- %153 = fmul float %152, %9
- %154 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
- %155 = extractelement <4 x float> %154, i32 2
- %156 = fmul float %155, %10
- %157 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
- %158 = extractelement <4 x float> %157, i32 0
- %159 = fmul float %158, %12
- %160 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
- %161 = extractelement <4 x float> %160, i32 1
- %162 = fmul float %161, %13
- %163 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
- %164 = extractelement <4 x float> %163, i32 2
- %165 = fmul float %164, %14
- %166 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
- %167 = extractelement <4 x float> %166, i32 0
- %168 = fmul float %167, %16
- %169 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
- %170 = extractelement <4 x float> %169, i32 1
- %171 = fmul float %170, %17
- %172 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
- %173 = extractelement <4 x float> %172, i32 2
- %174 = fmul float %173, %18
- %175 = fcmp uge float %132, 0.000000e+00
- %176 = select i1 %175, float %132, float 0.000000e+00
- %177 = fcmp uge float %147, 0.000000e+00
- %178 = select i1 %177, float %147, float 0.000000e+00
- %179 = call float @llvm.pow.f32(float %178, float %24)
- %180 = fcmp ult float %132, 0.000000e+00
- %181 = select i1 %180, float 0.000000e+00, float %179
- %182 = fadd float %150, %105
- %183 = fadd float %153, %109
- %184 = fadd float %156, %113
- %185 = fmul float %176, %159
- %186 = fadd float %185, %182
- %187 = fmul float %176, %162
- %188 = fadd float %187, %183
- %189 = fmul float %176, %165
- %190 = fadd float %189, %184
- %191 = fmul float %181, %168
- %192 = fadd float %191, %186
- %193 = fmul float %181, %171
- %194 = fadd float %193, %188
- %195 = fmul float %181, %174
- %196 = fadd float %195, %190
- %197 = call float @llvm.AMDGPU.clamp.f32(float %192, float 0.000000e+00, float 1.000000e+00)
- %198 = call float @llvm.AMDGPU.clamp.f32(float %194, float 0.000000e+00, float 1.000000e+00)
- %199 = call float @llvm.AMDGPU.clamp.f32(float %196, float 0.000000e+00, float 1.000000e+00)
- %200 = insertelement <4 x float> undef, float %75, i32 0
- %201 = insertelement <4 x float> %200, float %79, i32 1
- %202 = insertelement <4 x float> %201, float %83, i32 2
- %203 = insertelement <4 x float> %202, float %87, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %203, i32 60, i32 1)
- %204 = insertelement <4 x float> undef, float %197, i32 0
- %205 = insertelement <4 x float> %204, float %198, i32 1
- %206 = insertelement <4 x float> %205, float %199, i32 2
- %207 = insertelement <4 x float> %206, float %117, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %207, i32 0, i32 2)
+ %tmp = extractelement <4 x float> %reg1, i32 0
+ %tmp13 = extractelement <4 x float> %reg1, i32 1
+ %tmp14 = extractelement <4 x float> %reg1, i32 2
+ %tmp15 = extractelement <4 x float> %reg1, i32 3
+ %tmp16 = extractelement <4 x float> %reg2, i32 0
+ %tmp17 = extractelement <4 x float> %reg2, i32 1
+ %tmp18 = extractelement <4 x float> %reg2, i32 2
+ %tmp19 = extractelement <4 x float> %reg2, i32 3
+ %tmp20 = extractelement <4 x float> %reg3, i32 0
+ %tmp21 = extractelement <4 x float> %reg3, i32 1
+ %tmp22 = extractelement <4 x float> %reg3, i32 2
+ %tmp23 = extractelement <4 x float> %reg3, i32 3
+ %tmp24 = extractelement <4 x float> %reg4, i32 0
+ %tmp25 = extractelement <4 x float> %reg4, i32 1
+ %tmp26 = extractelement <4 x float> %reg4, i32 2
+ %tmp27 = extractelement <4 x float> %reg4, i32 3
+ %tmp28 = extractelement <4 x float> %reg5, i32 0
+ %tmp29 = extractelement <4 x float> %reg5, i32 1
+ %tmp30 = extractelement <4 x float> %reg5, i32 2
+ %tmp31 = extractelement <4 x float> %reg5, i32 3
+ %tmp32 = extractelement <4 x float> %reg6, i32 0
+ %tmp33 = extractelement <4 x float> %reg6, i32 1
+ %tmp34 = extractelement <4 x float> %reg6, i32 2
+ %tmp35 = extractelement <4 x float> %reg6, i32 3
+ %tmp36 = extractelement <4 x float> %reg7, i32 0
+ %tmp37 = extractelement <4 x float> %reg7, i32 1
+ %tmp38 = extractelement <4 x float> %reg7, i32 2
+ %tmp39 = extractelement <4 x float> %reg7, i32 3
+ %tmp40 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp41 = extractelement <4 x float> %tmp40, i32 0
+ %tmp42 = fmul float %tmp, %tmp41
+ %tmp43 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp44 = extractelement <4 x float> %tmp43, i32 1
+ %tmp45 = fmul float %tmp, %tmp44
+ %tmp46 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp47 = extractelement <4 x float> %tmp46, i32 2
+ %tmp48 = fmul float %tmp, %tmp47
+ %tmp49 = load <4 x float>, <4 x float> addrspace(8)* null
+ %tmp50 = extractelement <4 x float> %tmp49, i32 3
+ %tmp51 = fmul float %tmp, %tmp50
+ %tmp52 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp53 = extractelement <4 x float> %tmp52, i32 0
+ %tmp54 = fmul float %tmp13, %tmp53
+ %tmp55 = fadd float %tmp54, %tmp42
+ %tmp56 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp57 = extractelement <4 x float> %tmp56, i32 1
+ %tmp58 = fmul float %tmp13, %tmp57
+ %tmp59 = fadd float %tmp58, %tmp45
+ %tmp60 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp61 = extractelement <4 x float> %tmp60, i32 2
+ %tmp62 = fmul float %tmp13, %tmp61
+ %tmp63 = fadd float %tmp62, %tmp48
+ %tmp64 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %tmp65 = extractelement <4 x float> %tmp64, i32 3
+ %tmp66 = fmul float %tmp13, %tmp65
+ %tmp67 = fadd float %tmp66, %tmp51
+ %tmp68 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp69 = extractelement <4 x float> %tmp68, i32 0
+ %tmp70 = fmul float %tmp14, %tmp69
+ %tmp71 = fadd float %tmp70, %tmp55
+ %tmp72 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp73 = extractelement <4 x float> %tmp72, i32 1
+ %tmp74 = fmul float %tmp14, %tmp73
+ %tmp75 = fadd float %tmp74, %tmp59
+ %tmp76 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp77 = extractelement <4 x float> %tmp76, i32 2
+ %tmp78 = fmul float %tmp14, %tmp77
+ %tmp79 = fadd float %tmp78, %tmp63
+ %tmp80 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %tmp81 = extractelement <4 x float> %tmp80, i32 3
+ %tmp82 = fmul float %tmp14, %tmp81
+ %tmp83 = fadd float %tmp82, %tmp67
+ %tmp84 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %tmp85 = extractelement <4 x float> %tmp84, i32 0
+ %tmp86 = fmul float %tmp15, %tmp85
+ %tmp87 = fadd float %tmp86, %tmp71
+ %tmp88 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %tmp89 = extractelement <4 x float> %tmp88, i32 1
+ %tmp90 = fmul float %tmp15, %tmp89
+ %tmp91 = fadd float %tmp90, %tmp75
+ %tmp92 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %tmp93 = extractelement <4 x float> %tmp92, i32 2
+ %tmp94 = fmul float %tmp15, %tmp93
+ %tmp95 = fadd float %tmp94, %tmp79
+ %tmp96 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %tmp97 = extractelement <4 x float> %tmp96, i32 3
+ %tmp98 = fmul float %tmp15, %tmp97
+ %tmp99 = fadd float %tmp98, %tmp83
+ %tmp100 = insertelement <4 x float> undef, float %tmp16, i32 0
+ %tmp101 = insertelement <4 x float> %tmp100, float %tmp17, i32 1
+ %tmp102 = insertelement <4 x float> %tmp101, float %tmp18, i32 2
+ %tmp103 = insertelement <4 x float> %tmp102, float 0.000000e+00, i32 3
+ %tmp104 = insertelement <4 x float> undef, float %tmp16, i32 0
+ %tmp105 = insertelement <4 x float> %tmp104, float %tmp17, i32 1
+ %tmp106 = insertelement <4 x float> %tmp105, float %tmp18, i32 2
+ %tmp107 = insertelement <4 x float> %tmp106, float 0.000000e+00, i32 3
+ %tmp108 = call float @llvm.r600.dot4(<4 x float> %tmp103, <4 x float> %tmp107)
+ %tmp109 = call float @llvm.fabs.f32(float %tmp108)
+ %tmp110 = call float @llvm.r600.recipsqrt.clamped.f32(float %tmp109)
+ %tmp111 = fmul float %tmp16, %tmp110
+ %tmp112 = fmul float %tmp17, %tmp110
+ %tmp113 = fmul float %tmp18, %tmp110
+ %tmp114 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %tmp115 = extractelement <4 x float> %tmp114, i32 0
+ %tmp116 = fmul float %tmp115, %tmp20
+ %tmp117 = fadd float %tmp116, %tmp32
+ %tmp118 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %tmp119 = extractelement <4 x float> %tmp118, i32 1
+ %tmp120 = fmul float %tmp119, %tmp21
+ %tmp121 = fadd float %tmp120, %tmp33
+ %tmp122 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %tmp123 = extractelement <4 x float> %tmp122, i32 2
+ %tmp124 = fmul float %tmp123, %tmp22
+ %tmp125 = fadd float %tmp124, %tmp34
+ %max.0.i = call float @llvm.maxnum.f32(float %tmp117, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %max.0.i11 = call float @llvm.maxnum.f32(float %tmp121, float 0.000000e+00)
+ %clamp.i12 = call float @llvm.minnum.f32(float %max.0.i11, float 1.000000e+00)
+ %max.0.i9 = call float @llvm.maxnum.f32(float %tmp125, float 0.000000e+00)
+ %clamp.i10 = call float @llvm.minnum.f32(float %max.0.i9, float 1.000000e+00)
+ %max.0.i7 = call float @llvm.maxnum.f32(float %tmp27, float 0.000000e+00)
+ %clamp.i8 = call float @llvm.minnum.f32(float %max.0.i7, float 1.000000e+00)
+ %tmp126 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+ %tmp127 = extractelement <4 x float> %tmp126, i32 0
+ %tmp128 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+ %tmp129 = extractelement <4 x float> %tmp128, i32 1
+ %tmp130 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+ %tmp131 = extractelement <4 x float> %tmp130, i32 2
+ %tmp132 = insertelement <4 x float> undef, float %tmp111, i32 0
+ %tmp133 = insertelement <4 x float> %tmp132, float %tmp112, i32 1
+ %tmp134 = insertelement <4 x float> %tmp133, float %tmp113, i32 2
+ %tmp135 = insertelement <4 x float> %tmp134, float 0.000000e+00, i32 3
+ %tmp136 = insertelement <4 x float> undef, float %tmp127, i32 0
+ %tmp137 = insertelement <4 x float> %tmp136, float %tmp129, i32 1
+ %tmp138 = insertelement <4 x float> %tmp137, float %tmp131, i32 2
+ %tmp139 = insertelement <4 x float> %tmp138, float 0.000000e+00, i32 3
+ %tmp140 = call float @llvm.r600.dot4(<4 x float> %tmp135, <4 x float> %tmp139)
+ %tmp141 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+ %tmp142 = extractelement <4 x float> %tmp141, i32 0
+ %tmp143 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+ %tmp144 = extractelement <4 x float> %tmp143, i32 1
+ %tmp145 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+ %tmp146 = extractelement <4 x float> %tmp145, i32 2
+ %tmp147 = insertelement <4 x float> undef, float %tmp111, i32 0
+ %tmp148 = insertelement <4 x float> %tmp147, float %tmp112, i32 1
+ %tmp149 = insertelement <4 x float> %tmp148, float %tmp113, i32 2
+ %tmp150 = insertelement <4 x float> %tmp149, float 0.000000e+00, i32 3
+ %tmp151 = insertelement <4 x float> undef, float %tmp142, i32 0
+ %tmp152 = insertelement <4 x float> %tmp151, float %tmp144, i32 1
+ %tmp153 = insertelement <4 x float> %tmp152, float %tmp146, i32 2
+ %tmp154 = insertelement <4 x float> %tmp153, float 0.000000e+00, i32 3
+ %tmp155 = call float @llvm.r600.dot4(<4 x float> %tmp150, <4 x float> %tmp154)
+ %tmp156 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+ %tmp157 = extractelement <4 x float> %tmp156, i32 0
+ %tmp158 = fmul float %tmp157, %tmp20
+ %tmp159 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+ %tmp160 = extractelement <4 x float> %tmp159, i32 1
+ %tmp161 = fmul float %tmp160, %tmp21
+ %tmp162 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+ %tmp163 = extractelement <4 x float> %tmp162, i32 2
+ %tmp164 = fmul float %tmp163, %tmp22
+ %tmp165 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+ %tmp166 = extractelement <4 x float> %tmp165, i32 0
+ %tmp167 = fmul float %tmp166, %tmp24
+ %tmp168 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+ %tmp169 = extractelement <4 x float> %tmp168, i32 1
+ %tmp170 = fmul float %tmp169, %tmp25
+ %tmp171 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+ %tmp172 = extractelement <4 x float> %tmp171, i32 2
+ %tmp173 = fmul float %tmp172, %tmp26
+ %tmp174 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+ %tmp175 = extractelement <4 x float> %tmp174, i32 0
+ %tmp176 = fmul float %tmp175, %tmp28
+ %tmp177 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+ %tmp178 = extractelement <4 x float> %tmp177, i32 1
+ %tmp179 = fmul float %tmp178, %tmp29
+ %tmp180 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+ %tmp181 = extractelement <4 x float> %tmp180, i32 2
+ %tmp182 = fmul float %tmp181, %tmp30
+ %tmp183 = fcmp uge float %tmp140, 0.000000e+00
+ %tmp184 = select i1 %tmp183, float %tmp140, float 0.000000e+00
+ %tmp185 = fcmp uge float %tmp155, 0.000000e+00
+ %tmp186 = select i1 %tmp185, float %tmp155, float 0.000000e+00
+ %tmp187 = call float @llvm.pow.f32(float %tmp186, float %tmp36)
+ %tmp188 = fcmp ult float %tmp140, 0.000000e+00
+ %tmp189 = select i1 %tmp188, float 0.000000e+00, float %tmp187
+ %tmp190 = fadd float %tmp158, %tmp117
+ %tmp191 = fadd float %tmp161, %tmp121
+ %tmp192 = fadd float %tmp164, %tmp125
+ %tmp193 = fmul float %tmp184, %tmp167
+ %tmp194 = fadd float %tmp193, %tmp190
+ %tmp195 = fmul float %tmp184, %tmp170
+ %tmp196 = fadd float %tmp195, %tmp191
+ %tmp197 = fmul float %tmp184, %tmp173
+ %tmp198 = fadd float %tmp197, %tmp192
+ %tmp199 = fmul float %tmp189, %tmp176
+ %tmp200 = fadd float %tmp199, %tmp194
+ %tmp201 = fmul float %tmp189, %tmp179
+ %tmp202 = fadd float %tmp201, %tmp196
+ %tmp203 = fmul float %tmp189, %tmp182
+ %tmp204 = fadd float %tmp203, %tmp198
+ %max.0.i5 = call float @llvm.maxnum.f32(float %tmp200, float 0.000000e+00)
+ %clamp.i6 = call float @llvm.minnum.f32(float %max.0.i5, float 1.000000e+00)
+ %max.0.i3 = call float @llvm.maxnum.f32(float %tmp202, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %tmp204, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp205 = insertelement <4 x float> undef, float %tmp87, i32 0
+ %tmp206 = insertelement <4 x float> %tmp205, float %tmp91, i32 1
+ %tmp207 = insertelement <4 x float> %tmp206, float %tmp95, i32 2
+ %tmp208 = insertelement <4 x float> %tmp207, float %tmp99, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp208, i32 60, i32 1)
+ %tmp209 = insertelement <4 x float> undef, float %clamp.i6, i32 0
+ %tmp210 = insertelement <4 x float> %tmp209, float %clamp.i4, i32 1
+ %tmp211 = insertelement <4 x float> %tmp210, float %clamp.i2, i32 2
+ %tmp212 = insertelement <4 x float> %tmp211, float %clamp.i8, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp212, i32 0, i32 2)
ret void
}
-; Function Attrs: readnone
-declare float @llvm.r600.dot4(<4 x float>, <4 x float>) #1
-
-; Function Attrs: readonly
-declare float @llvm.fabs.f32(float) #1
-
-; Function Attrs: readnone
-declare float @llvm.r600.recipsqrt.clamped.f32(float) #1
-
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #1
-
-; Function Attrs: nounwind readonly
-declare float @llvm.pow.f32(float, float) #2
-
-declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32) #3
+declare float @llvm.minnum.f32(float, float) #0
+declare float @llvm.maxnum.f32(float, float) #0
+declare float @llvm.r600.dot4(<4 x float>, <4 x float>) #0
+declare float @llvm.fabs.f32(float) #0
+declare float @llvm.r600.recipsqrt.clamped.f32(float) #0
+declare float @llvm.pow.f32(float, float) #0
+declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32) #1
-attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind readonly }
-attributes #3 = { nounwind }
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll b/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
index 461caf5b5d20..e2143ff85b72 100644
--- a/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
+++ b/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
@@ -10,7 +10,7 @@ main_body:
%tmp6 = insertelement <4 x float> %tmp5, float %tmp2, i32 1
%tmp7 = insertelement <4 x float> %tmp6, float %tmp3, i32 2
%tmp8 = insertelement <4 x float> %tmp7, float %tmp4, i32 3
- %tmp9 = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %tmp8)
+ %tmp9 = call <4 x float> @llvm.r600.cube(<4 x float> %tmp8)
%tmp10 = extractelement <4 x float> %tmp9, i32 0
%tmp11 = extractelement <4 x float> %tmp9, i32 1
%tmp12 = extractelement <4 x float> %tmp9, i32 2
@@ -45,7 +45,7 @@ main_body:
}
; Function Attrs: readnone
-declare <4 x float> @llvm.AMDGPU.cube(<4 x float>) #0
+declare <4 x float> @llvm.r600.cube(<4 x float>) #0
; Function Attrs: readnone
declare float @fabs(float) #0
diff --git a/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll b/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll
index 866a4a9191e2..b7ed34bbf09b 100644
--- a/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll
+++ b/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll
@@ -2,7 +2,7 @@
; Don't crash
; CHECK: MAX_UINT
-define void @test(i64 addrspace(1)* %out) {
+define amdgpu_kernel void @test(i64 addrspace(1)* %out) {
bb:
store i64 2, i64 addrspace(1)* %out
%tmp = load i64, i64 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/r600.alu-limits.ll b/test/CodeGen/AMDGPU/r600.alu-limits.ll
new file mode 100644
index 000000000000..2604ed4e574c
--- /dev/null
+++ b/test/CodeGen/AMDGPU/r600.alu-limits.ll
@@ -0,0 +1,29 @@
+; RUN: opt -loop-unroll -unroll-threshold=2000 -S < %s | llc -march=r600 -mcpu=cypress | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: {{^}}alu_limits:
+; CHECK: CF_END
+
+%struct.foo = type {i32, i32, i32}
+
+define amdgpu_kernel void @alu_limits(i32 addrspace(1)* %out, %struct.foo* %in, i32 %offset) {
+entry:
+ %ptr = getelementptr inbounds %struct.foo, %struct.foo* %in, i32 1, i32 2
+ %x = load i32, i32 *%ptr, align 4
+ br label %loop
+loop:
+ %i = phi i32 [ 100, %entry ], [ %nexti, %loop ]
+ %val = phi i32 [ 1, %entry ], [ %nextval, %loop ]
+
+ %nexti = sub i32 %i, 1
+
+ %y = xor i32 %x, %i
+ %nextval = mul i32 %val, %y
+
+ %cond = icmp ne i32 %nexti, 0
+ br i1 %cond, label %loop, label %end
+end:
+ %out_val = add i32 %nextval, 4
+ store i32 %out_val, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/r600.amdgpu-alias-analysis.ll b/test/CodeGen/AMDGPU/r600.amdgpu-alias-analysis.ll
new file mode 100644
index 000000000000..8956d113e8b5
--- /dev/null
+++ b/test/CodeGen/AMDGPU/r600.amdgpu-alias-analysis.ll
@@ -0,0 +1,7 @@
+; RUN: opt -mtriple=r600-- -O3 -aa-eval -print-all-alias-modref-info -disable-output < %s 2>&1 | FileCheck %s
+
+; CHECK: NoAlias: i8 addrspace(7)* %p1, i8* %p
+
+define amdgpu_kernel void @test(i8* %p, i8 addrspace(7)* %p1) {
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/r600.bitcast.ll b/test/CodeGen/AMDGPU/r600.bitcast.ll
index 49441ee8d186..acf7a66a357f 100644
--- a/test/CodeGen/AMDGPU/r600.bitcast.ll
+++ b/test/CodeGen/AMDGPU/r600.bitcast.ll
@@ -8,7 +8,7 @@
; EG: VTX_READ_128 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
%1 = load <16 x i8>, <16 x i8> addrspace(1)* %0
@@ -21,7 +21,7 @@ entry:
; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%load = load float, float addrspace(1)* %in, align 4
%bc = bitcast float %load to <2 x i16>
store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
@@ -33,7 +33,7 @@ define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in)
; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
%bc = bitcast <2 x i16> %load to float
store float %bc, float addrspace(1)* %out, align 4
@@ -45,7 +45,7 @@ define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in)
; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
%load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
%bc = bitcast <4 x i8> %load to i32
store i32 %bc, i32 addrspace(1)* %out, align 4
@@ -57,7 +57,7 @@ define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nou
; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%load = load i32, i32 addrspace(1)* %in, align 4
%bc = bitcast i32 %load to <4 x i8>
store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
@@ -69,7 +69,7 @@ define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nou
; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @v2i16_to_v4i8(<4 x i8> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v2i16_to_v4i8(<4 x i8> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
%bc = bitcast <2 x i16> %load to <4 x i8>
store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
@@ -85,7 +85,7 @@ define void @v2i16_to_v4i8(<4 x i8> addrspace(1)* %out, <2 x i16> addrspace(1)*
; EG: VTX_READ_16
; EG-DAG: BFE_UINT
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @v4i16_extract_i8(i8 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v4i16_extract_i8(i8 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) nounwind {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in, align 2
%bc = bitcast <4 x i16> %load to <8 x i8>
%element = extractelement <8 x i8> %bc, i32 5
@@ -98,7 +98,7 @@ define void @v4i16_extract_i8(i8 addrspace(1)* %out, <4 x i16> addrspace(1)* %in
; EG: VTX_READ_64 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
-define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
%bc = bitcast <2 x i32> %val to double
store double %bc, double addrspace(1)* %out, align 8
diff --git a/test/CodeGen/AMDGPU/r600.global_atomics.ll b/test/CodeGen/AMDGPU/r600.global_atomics.ll
new file mode 100644
index 000000000000..1ddc41feb006
--- /dev/null
+++ b/test/CodeGen/AMDGPU/r600.global_atomics.ll
@@ -0,0 +1,542 @@
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; TODO: Add _RTN versions and merge with the GCN test
+
+; FUNC-LABEL: {{^}}atomic_add_i32_offset:
+; EG: MEM_RAT ATOMIC_ADD [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32_soffset:
+; EG: MEM_RAT ATOMIC_ADD [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_add_i32_soffset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 9000
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32_huge_offset:
+; FIXME: looks like the offset is wrong
+; EG: MEM_RAT ATOMIC_ADD [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_add_i32_huge_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 47224239175595
+
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_ADD [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32:
+; EG: MEM_RAT ATOMIC_ADD [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32_addr64:
+; EG: MEM_RAT ATOMIC_ADD [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_and_i32_offset:
+; EG: MEM_RAT ATOMIC_AND [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_AND [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_and_i32:
+; EG: MEM_RAT ATOMIC_AND [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_and_i32_addr64:
+; EG: MEM_RAT ATOMIC_AND [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_sub_i32_offset:
+; EG: MEM_RAT ATOMIC_SUB [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_sub_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_SUB [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_sub_i32:
+; EG: MEM_RAT ATOMIC_SUB [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_sub_i32_addr64:
+; EG: MEM_RAT ATOMIC_SUB [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_max_i32_offset:
+; EG: MEM_RAT ATOMIC_MAX_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_max_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_MAX_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_max_i32:
+; EG: MEM_RAT ATOMIC_MAX_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_max_i32_addr64:
+; EG: MEM_RAT ATOMIC_MAX_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umax_i32_offset:
+; EG: MEM_RAT ATOMIC_MAX_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umax_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_MAX_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umax_i32:
+; EG: MEM_RAT ATOMIC_MAX_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umax_i32_addr64:
+; EG: MEM_RAT ATOMIC_MAX_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_min_i32_offset:
+; EG: MEM_RAT ATOMIC_MIN_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_min_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_MIN_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_min_i32:
+; EG: MEM_RAT ATOMIC_MIN_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_min_i32_addr64:
+; EG: MEM_RAT ATOMIC_MIN_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umin_i32_offset:
+; EG: MEM_RAT ATOMIC_MIN_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umin_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_MIN_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umin_i32:
+; EG: MEM_RAT ATOMIC_MIN_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_umin_i32_addr64:
+; EG: MEM_RAT ATOMIC_MIN_UINT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_or_i32_offset:
+; EG: MEM_RAT ATOMIC_OR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_or_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_OR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_or_i32:
+; EG: MEM_RAT ATOMIC_OR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_or_i32_addr64:
+; EG: MEM_RAT ATOMIC_OR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xchg_i32_offset:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xchg_i32:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_offset:
+; EG: MEM_RAT ATOMIC_CMPXCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_cmpxchg_i32_offset(i32 addrspace(1)* %out, i32 %in, i32 %old) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_CMPXCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i32:
+; EG: MEM_RAT ATOMIC_CMPXCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_cmpxchg_i32(i32 addrspace(1)* %out, i32 %in, i32 %old) {
+entry:
+ %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64:
+; EG: MEM_RAT ATOMIC_CMPXCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
+; EG: MEM_RAT ATOMIC_XOR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_XOR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xor_i32:
+; EG: MEM_RAT ATOMIC_XOR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_xor_i32_addr64:
+; EG: MEM_RAT ATOMIC_XOR [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Z
+define amdgpu_kernel void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i32_offset:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Y
+define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, i32 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i32:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Y
+define amdgpu_kernel void @atomic_store_i32(i32 %in, i32 addrspace(1)* %out) {
+entry:
+ store atomic i32 %in, i32 addrspace(1)* %out seq_cst, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i32_addr64_offset:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Y
+define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(1)* %out, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i32_addr64:
+; EG: MEM_RAT ATOMIC_XCHG_INT [[REG:T[0-9]+]]
+; EG: MOV{{[ *]*}}[[REG]].X, KC0[2].Y
+define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, i32 addrspace(1)* %out, i64 %index) {
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ store atomic i32 %in, i32 addrspace(1)* %ptr seq_cst, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_inc_add
+; EG: MEM_RAT ATOMIC_INC_UINT
+define amdgpu_kernel void @atomic_inc_add(i32 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 1 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_dec_add
+; EG: MEM_RAT ATOMIC_DEC_UINT
+define amdgpu_kernel void @atomic_dec_add(i32 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 -1 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_inc_sub
+; EG: MEM_RAT ATOMIC_INC_UINT
+define amdgpu_kernel void @atomic_inc_sub(i32 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 -1 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_dec_sub
+; EG: MEM_RAT ATOMIC_DEC_UINT
+define amdgpu_kernel void @atomic_dec_sub(i32 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 1 seq_cst
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/r600.private-memory.ll b/test/CodeGen/AMDGPU/r600.private-memory.ll
index f406c160cbbe..53ee214f07ec 100644
--- a/test/CodeGen/AMDGPU/r600.private-memory.ll
+++ b/test/CodeGen/AMDGPU/r600.private-memory.ll
@@ -10,7 +10,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; Additional check in case the move ends up in the last slot
; R600-NOT: MOV * TO.X
-define void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = alloca [2 x i32]
%1 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 0
diff --git a/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll b/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll
index a34a48e3b7ba..9eee9a6effc9 100644
--- a/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll
+++ b/test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll
@@ -2,7 +2,7 @@
; FUNC-LABEL: {{^}}tgid_x:
; EG: MEM_RAT_CACHELESS STORE_RAW T1.X
-define void @tgid_x(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tgid_x(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -11,7 +11,7 @@ entry:
; FUNC-LABEL: {{^}}tgid_y:
; EG: MEM_RAT_CACHELESS STORE_RAW T1.Y
-define void @tgid_y(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tgid_y(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -20,7 +20,7 @@ entry:
; FUNC-LABEL: {{^}}tgid_z:
; EG: MEM_RAT_CACHELESS STORE_RAW T1.Z
-define void @tgid_z(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tgid_z(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -29,7 +29,7 @@ entry:
; FUNC-LABEL: {{^}}tidig_x:
; EG: MEM_RAT_CACHELESS STORE_RAW T0.X
-define void @tidig_x(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tidig_x(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.x() #0
store i32 %0, i32 addrspace(1)* %out
@@ -38,7 +38,7 @@ entry:
; FUNC-LABEL: {{^}}tidig_y:
; EG: MEM_RAT_CACHELESS STORE_RAW T0.Y
-define void @tidig_y(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tidig_y(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.y() #0
store i32 %0, i32 addrspace(1)* %out
@@ -47,7 +47,7 @@ entry:
; FUNC-LABEL: {{^}}tidig_z:
; EG: MEM_RAT_CACHELESS STORE_RAW T0.Z
-define void @tidig_z(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @tidig_z(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.z() #0
store i32 %0, i32 addrspace(1)* %out
@@ -57,7 +57,7 @@ entry:
; FUNC-LABEL: {{^}}test_implicit:
; 36 prepended implicit bytes + 4(out pointer) + 4*4 = 56
; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 56
-define void @test_implicit(i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_implicit(i32 addrspace(1)* %out) #1 {
%implicitarg.ptr = call noalias i8 addrspace(7)* @llvm.r600.implicitarg.ptr()
%header.ptr = bitcast i8 addrspace(7)* %implicitarg.ptr to i32 addrspace(7)*
%gep = getelementptr i32, i32 addrspace(7)* %header.ptr, i32 4
@@ -69,7 +69,7 @@ define void @test_implicit(i32 addrspace(1)* %out) #1 {
; FUNC-LABEL: {{^}}test_implicit_dyn:
; 36 prepended implicit bytes + 8(out pointer + in) = 44
; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 44
-define void @test_implicit_dyn(i32 addrspace(1)* %out, i32 %in) #1 {
+define amdgpu_kernel void @test_implicit_dyn(i32 addrspace(1)* %out, i32 %in) #1 {
%implicitarg.ptr = call noalias i8 addrspace(7)* @llvm.r600.implicitarg.ptr()
%header.ptr = bitcast i8 addrspace(7)* %implicitarg.ptr to i32 addrspace(7)*
%gep = getelementptr i32, i32 addrspace(7)* %header.ptr, i32 %in
diff --git a/test/CodeGen/AMDGPU/rcp-pattern.ll b/test/CodeGen/AMDGPU/rcp-pattern.ll
index b7cc6d47cd87..fbdaeb829297 100644
--- a/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -9,7 +9,7 @@
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
%rcp = fdiv float 1.0, %src
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @rcp_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
%rcp = fdiv float 1.0, %src, !fpmath !0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -33,7 +33,7 @@ define void @rcp_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @rcp_fast_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_fast_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
%rcp = fdiv fast float 1.0, %src, !fpmath !0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -45,7 +45,7 @@ define void @rcp_fast_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @rcp_arcp_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_arcp_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
%rcp = fdiv arcp float 1.0, %src, !fpmath !0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -57,7 +57,7 @@ define void @rcp_arcp_ulp25_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @rcp_global_fast_ulp25_pat_f32(float addrspace(1)* %out, float %src) #2 {
+define amdgpu_kernel void @rcp_global_fast_ulp25_pat_f32(float addrspace(1)* %out, float %src) #2 {
%rcp = fdiv float 1.0, %src, !fpmath !0
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -69,7 +69,7 @@ define void @rcp_global_fast_ulp25_pat_f32(float addrspace(1)* %out, float %src)
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @rcp_fabs_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_fabs_pat_f32(float addrspace(1)* %out, float %src) #0 {
%src.fabs = call float @llvm.fabs.f32(float %src)
%rcp = fdiv float 1.0, %src.fabs
store float %rcp, float addrspace(1)* %out, align 4
@@ -82,7 +82,7 @@ define void @rcp_fabs_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: buffer_store_dword [[RCP]]
; EG: RECIP_IEEE
-define void @neg_rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @neg_rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
%rcp = fdiv float -1.0, %src
store float %rcp, float addrspace(1)* %out, align 4
ret void
@@ -92,7 +92,7 @@ define void @neg_rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: s_load_dword [[SRC:s[0-9]+]]
; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -|[[SRC]]|
; GCN: buffer_store_dword [[RCP]]
-define void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
%src.fabs = call float @llvm.fabs.f32(float %src)
%src.fabs.fneg = fsub float -0.0, %src.fabs
%rcp = fdiv float 1.0, %src.fabs.fneg
@@ -106,7 +106,7 @@ define void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[SRC]], -|[[SRC]]|
; GCN: buffer_store_dword [[RCP]]
; GCN: buffer_store_dword [[MUL]]
-define void @rcp_fabs_fneg_pat_multi_use_f32(float addrspace(1)* %out, float %src) #0 {
+define amdgpu_kernel void @rcp_fabs_fneg_pat_multi_use_f32(float addrspace(1)* %out, float %src) #0 {
%src.fabs = call float @llvm.fabs.f32(float %src)
%src.fabs.fneg = fsub float -0.0, %src.fabs
%rcp = fdiv float 1.0, %src.fabs.fneg
@@ -117,6 +117,35 @@ define void @rcp_fabs_fneg_pat_multi_use_f32(float addrspace(1)* %out, float %sr
ret void
}
+; FUNC-LABEL: {{^}}div_arcp_2_x_pat_f32:
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 0.5, v{{[0-9]+}}
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @div_arcp_2_x_pat_f32(float addrspace(1)* %out) #0 {
+ %x = load float, float addrspace(1)* undef
+ %rcp = fdiv arcp float %x, 2.0
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}div_arcp_k_x_pat_f32:
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 0x3dcccccd, v{{[0-9]+}}
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @div_arcp_k_x_pat_f32(float addrspace(1)* %out) #0 {
+ %x = load float, float addrspace(1)* undef
+ %rcp = fdiv arcp float %x, 10.0
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}div_arcp_neg_k_x_pat_f32:
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 0xbdcccccd, v{{[0-9]+}}
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @div_arcp_neg_k_x_pat_f32(float addrspace(1)* %out) #0 {
+ %x = load float, float addrspace(1)* undef
+ %rcp = fdiv arcp float %x, -10.0
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
declare float @llvm.fabs.f32(float) #1
declare float @llvm.sqrt.f32(float) #1
diff --git a/test/CodeGen/AMDGPU/read-register-invalid-subtarget.ll b/test/CodeGen/AMDGPU/read-register-invalid-subtarget.ll
index a5581d73cb25..34cbe3963361 100644
--- a/test/CodeGen/AMDGPU/read-register-invalid-subtarget.ll
+++ b/test/CodeGen/AMDGPU/read-register-invalid-subtarget.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.read_register.i32(metadata) #0
-define void @test_invalid_read_flat_scratch_lo(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @test_invalid_read_flat_scratch_lo(i32 addrspace(1)* %out) nounwind {
store volatile i32 0, i32 addrspace(3)* undef
%m0 = call i32 @llvm.read_register.i32(metadata !0)
store i32 %m0, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/read-register-invalid-type-i32.ll b/test/CodeGen/AMDGPU/read-register-invalid-type-i32.ll
index 2617ad7402ff..6417d28e7aad 100644
--- a/test/CodeGen/AMDGPU/read-register-invalid-type-i32.ll
+++ b/test/CodeGen/AMDGPU/read-register-invalid-type-i32.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.read_register.i32(metadata) #0
-define void @test_invalid_read_exec(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @test_invalid_read_exec(i32 addrspace(1)* %out) nounwind {
store volatile i32 0, i32 addrspace(3)* undef
%m0 = call i32 @llvm.read_register.i32(metadata !0)
store i32 %m0, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/read-register-invalid-type-i64.ll b/test/CodeGen/AMDGPU/read-register-invalid-type-i64.ll
index dcde8a1894fc..8e248fdfea4c 100644
--- a/test/CodeGen/AMDGPU/read-register-invalid-type-i64.ll
+++ b/test/CodeGen/AMDGPU/read-register-invalid-type-i64.ll
@@ -4,7 +4,7 @@
declare i64 @llvm.read_register.i64(metadata) #0
-define void @test_invalid_read_m0(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_invalid_read_m0(i64 addrspace(1)* %out) #0 {
%exec = call i64 @llvm.read_register.i64(metadata !0)
store i64 %exec, i64 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/read_register.ll b/test/CodeGen/AMDGPU/read_register.ll
index 601a0adb8122..8fe9e7f3f111 100644
--- a/test/CodeGen/AMDGPU/read_register.ll
+++ b/test/CodeGen/AMDGPU/read_register.ll
@@ -9,7 +9,7 @@ declare i64 @llvm.read_register.i64(metadata) #0
; CHECK: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], [[COPY_M0]]
; CHECK: buffer_store_dword [[COPY]]
-define void @test_read_m0(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_m0(i32 addrspace(1)* %out) #0 {
store volatile i32 0, i32 addrspace(3)* undef
%m0 = call i32 @llvm.read_register.i32(metadata !0)
store i32 %m0, i32 addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @test_read_m0(i32 addrspace(1)* %out) #0 {
; CHECK: v_mov_b32_e32 v[[LO:[0-9]+]], exec_lo
; CHECK: v_mov_b32_e32 v[[HI:[0-9]+]], exec_hi
; CHECK: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_read_exec(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_exec(i64 addrspace(1)* %out) #0 {
%exec = call i64 @llvm.read_register.i64(metadata !1)
store i64 %exec, i64 addrspace(1)* %out
ret void
@@ -30,7 +30,7 @@ define void @test_read_exec(i64 addrspace(1)* %out) #0 {
; CHECK: v_mov_b32_e32 v[[LO:[0-9]+]], flat_scratch_lo
; CHECK: v_mov_b32_e32 v[[HI:[0-9]+]], flat_scratch_hi
; CHECK: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @test_read_flat_scratch(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_flat_scratch(i64 addrspace(1)* %out) #0 {
%flat_scratch = call i64 @llvm.read_register.i64(metadata !2)
store i64 %flat_scratch, i64 addrspace(1)* %out
ret void
@@ -39,7 +39,7 @@ define void @test_read_flat_scratch(i64 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}test_read_flat_scratch_lo:
; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], flat_scratch_lo
; CHECK: buffer_store_dword [[COPY]]
-define void @test_read_flat_scratch_lo(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_flat_scratch_lo(i32 addrspace(1)* %out) #0 {
%flat_scratch_lo = call i32 @llvm.read_register.i32(metadata !3)
store i32 %flat_scratch_lo, i32 addrspace(1)* %out
ret void
@@ -48,7 +48,7 @@ define void @test_read_flat_scratch_lo(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}test_read_flat_scratch_hi:
; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], flat_scratch_hi
; CHECK: buffer_store_dword [[COPY]]
-define void @test_read_flat_scratch_hi(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_flat_scratch_hi(i32 addrspace(1)* %out) #0 {
%flat_scratch_hi = call i32 @llvm.read_register.i32(metadata !4)
store i32 %flat_scratch_hi, i32 addrspace(1)* %out
ret void
@@ -57,7 +57,7 @@ define void @test_read_flat_scratch_hi(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}test_read_exec_lo:
; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], exec_lo
; CHECK: buffer_store_dword [[COPY]]
-define void @test_read_exec_lo(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_exec_lo(i32 addrspace(1)* %out) #0 {
%exec_lo = call i32 @llvm.read_register.i32(metadata !5)
store i32 %exec_lo, i32 addrspace(1)* %out
ret void
@@ -66,7 +66,7 @@ define void @test_read_exec_lo(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: {{^}}test_read_exec_hi:
; CHECK: v_mov_b32_e32 [[COPY:v[0-9]+]], exec_hi
; CHECK: buffer_store_dword [[COPY]]
-define void @test_read_exec_hi(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_read_exec_hi(i32 addrspace(1)* %out) #0 {
%exec_hi = call i32 @llvm.read_register.i32(metadata !6)
store i32 %exec_hi, i32 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/readcyclecounter.ll b/test/CodeGen/AMDGPU/readcyclecounter.ll
index 7965b061fe5b..5c698c839fa6 100644
--- a/test/CodeGen/AMDGPU/readcyclecounter.ll
+++ b/test/CodeGen/AMDGPU/readcyclecounter.ll
@@ -13,7 +13,7 @@ declare i64 @llvm.readcyclecounter() #0
; SI: s_memtime s{{\[[0-9]+:[0-9]+\]}}
; VI: s_memrealtime s{{\[[0-9]+:[0-9]+\]}}
; GCN: store_dwordx2
-define void @test_readcyclecounter(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @test_readcyclecounter(i64 addrspace(1)* %out) #0 {
%cycle0 = call i64 @llvm.readcyclecounter()
store volatile i64 %cycle0, i64 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll b/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
index dd67dc488dbf..ecb513cd80b6 100644
--- a/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
+++ b/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
@@ -6,7 +6,7 @@
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, [[VAL]]
; GCN: buffer_store_dwordx2
-define void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%a = load i64, i64 addrspace(1)* %in, align 4
%and = and i64 %a, 1234567
store i64 %and, i64 addrspace(1)* %out, align 8
@@ -16,7 +16,7 @@ define void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 ad
; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt0:
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: buffer_store_dword [[VAL]]
-define void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%a = load i64, i64 addrspace(1)* %in, align 4
%vec = bitcast i64 %a to <2 x i32>
%elt0 = extractelement <2 x i32> %vec, i32 0
@@ -27,7 +27,7 @@ define void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 a
; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt1:
; GCN: buffer_load_dword [[VAL:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4
; GCN: buffer_store_dword [[VAL]]
-define void @reduce_i64_align_4_bitcast_v2i32_elt1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @reduce_i64_align_4_bitcast_v2i32_elt1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%a = load i64, i64 addrspace(1)* %in, align 4
%vec = bitcast i64 %a to <2 x i32>
%elt0 = extractelement <2 x i32> %vec, i32 1
diff --git a/test/CodeGen/AMDGPU/reduce-store-width-alignment.ll b/test/CodeGen/AMDGPU/reduce-store-width-alignment.ll
index 281e49f804c6..601aca48e1e2 100644
--- a/test/CodeGen/AMDGPU/reduce-store-width-alignment.ll
+++ b/test/CodeGen/AMDGPU/reduce-store-width-alignment.ll
@@ -3,7 +3,7 @@
; GCN-LABEL: {{^}}store_v2i32_as_v4i16_align_4:
; GCN: s_load_dwordx2
; GCN: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset1:1{{$}}
-define void @store_v2i32_as_v4i16_align_4(<4 x i16> addrspace(3)* align 4 %out, <2 x i32> %x) #0 {
+define amdgpu_kernel void @store_v2i32_as_v4i16_align_4(<4 x i16> addrspace(3)* align 4 %out, <2 x i32> %x) #0 {
%x.bc = bitcast <2 x i32> %x to <4 x i16>
store <4 x i16> %x.bc, <4 x i16> addrspace(3)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @store_v2i32_as_v4i16_align_4(<4 x i16> addrspace(3)* align 4 %out,
; GCN: s_load_dwordx4
; GCN-DAG: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:2 offset1:3
; GCN-DAG: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset1:1{{$}}
-define void @store_v4i32_as_v8i16_align_4(<8 x i16> addrspace(3)* align 4 %out, <4 x i32> %x) #0 {
+define amdgpu_kernel void @store_v4i32_as_v8i16_align_4(<8 x i16> addrspace(3)* align 4 %out, <4 x i32> %x) #0 {
%x.bc = bitcast <4 x i32> %x to <8 x i16>
store <8 x i16> %x.bc, <8 x i16> addrspace(3)* %out, align 4
ret void
@@ -22,7 +22,7 @@ define void @store_v4i32_as_v8i16_align_4(<8 x i16> addrspace(3)* align 4 %out,
; GCN-LABEL: {{^}}store_v2i32_as_i64_align_4:
; GCN: s_load_dwordx2
; GCN: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset1:1{{$}}
-define void @store_v2i32_as_i64_align_4(<4 x i16> addrspace(3)* align 4 %out, <2 x i32> %x) #0 {
+define amdgpu_kernel void @store_v2i32_as_i64_align_4(<4 x i16> addrspace(3)* align 4 %out, <2 x i32> %x) #0 {
%x.bc = bitcast <2 x i32> %x to <4 x i16>
store <4 x i16> %x.bc, <4 x i16> addrspace(3)* %out, align 4
ret void
@@ -32,7 +32,7 @@ define void @store_v2i32_as_i64_align_4(<4 x i16> addrspace(3)* align 4 %out, <2
; GCN: s_load_dwordx4
; GCN-DAG: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:2 offset1:3
; GCN-DAG: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset1:1{{$}}
-define void @store_v4i32_as_v2i64_align_4(<2 x i64> addrspace(3)* align 4 %out, <4 x i32> %x) #0 {
+define amdgpu_kernel void @store_v4i32_as_v2i64_align_4(<2 x i64> addrspace(3)* align 4 %out, <4 x i32> %x) #0 {
%x.bc = bitcast <4 x i32> %x to <2 x i64>
store <2 x i64> %x.bc, <2 x i64> addrspace(3)* %out, align 4
ret void
@@ -44,7 +44,7 @@ define void @store_v4i32_as_v2i64_align_4(<2 x i64> addrspace(3)* align 4 %out,
; GCN: buffer_load_ushort
; GCN: buffer_load_ushort
; GCN: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset1:1{{$}}
-define void @store_v4i16_as_v2i32_align_4(<2 x i32> addrspace(3)* align 4 %out, <4 x i16> %x) #0 {
+define amdgpu_kernel void @store_v4i16_as_v2i32_align_4(<2 x i32> addrspace(3)* align 4 %out, <4 x i16> %x) #0 {
%x.bc = bitcast <4 x i16> %x to <2 x i32>
store <2 x i32> %x.bc, <2 x i32> addrspace(3)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/reg-coalescer-sched-crash.ll b/test/CodeGen/AMDGPU/reg-coalescer-sched-crash.ll
index 909644850750..9f8667d35993 100644
--- a/test/CodeGen/AMDGPU/reg-coalescer-sched-crash.ll
+++ b/test/CodeGen/AMDGPU/reg-coalescer-sched-crash.ll
@@ -6,7 +6,7 @@
declare i32 @llvm.amdgcn.workitem.id.x() #0
-define void @reg_coalescer_breaks_dead(<2 x i32> addrspace(1)* nocapture readonly %arg, i32 %arg1, i32 %arg2, i32 %arg3) #1 {
+define amdgpu_kernel void @reg_coalescer_breaks_dead(<2 x i32> addrspace(1)* nocapture readonly %arg, i32 %arg1, i32 %arg2, i32 %arg3) #1 {
bb:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%cmp0 = icmp eq i32 %id.x, 0
diff --git a/test/CodeGen/AMDGPU/regcoalesce-dbg.mir b/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
new file mode 100644
index 000000000000..ecf94b5772ff
--- /dev/null
+++ b/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
@@ -0,0 +1,76 @@
+# RUN: llc -march=amdgcn -run-pass simple-register-coalescing -o - %s | FileCheck %s
+
+# Test that register coalescing does not allow a call to
+# LIS->getInstructionIndex with a DBG_VALUE instruction, which does not have
+# a slot index.
+
+# CHECK: %13.sub2 = S_MOV_B32 0
+# CHECK: DBG_VALUE{{.*}}debug-use %13.sub2
+
+--- |
+ define amdgpu_kernel void @test(i32 addrspace(1)* %out) { ret void }
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4, producer: "llvm", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4)
+ !1 = !DILocalVariable(name: "a", scope: !2, file: !4, line: 126, type: !6)
+ !2 = distinct !DISubprogram(name: "test", scope: !4, file: !4, line: 1, type: !3, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !5)
+ !3 = !DISubroutineType(types: !4)
+ !4 = !{null}
+ !5 = !{!1}
+ !6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64, align: 32)
+ !7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+ !8 = !DIExpression()
+ !9 = !DILocation(line: 126, column: 9, scope: !2)
+
+...
+---
+name: test
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sgpr_64 }
+ - { id: 1, class: sreg_32_xm0 }
+ - { id: 2, class: sgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_64_xexec }
+ - { id: 5, class: sreg_32_xm0_xexec }
+ - { id: 6, class: sreg_32 }
+ - { id: 7, class: sreg_32 }
+ - { id: 8, class: sreg_32_xm0 }
+ - { id: 9, class: sreg_64 }
+ - { id: 10, class: sreg_32_xm0 }
+ - { id: 11, class: sreg_32_xm0 }
+ - { id: 12, class: sgpr_64 }
+ - { id: 13, class: sgpr_128 }
+ - { id: 14, class: sreg_32_xm0 }
+ - { id: 15, class: sreg_64 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vreg_64 }
+ - { id: 18, class: vgpr_32 }
+ - { id: 19, class: vreg_64 }
+ - { id: 20, class: vreg_64 }
+liveins:
+ - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+ - { reg: '%vgpr0', virtual-reg: '%3' }
+body: |
+ bb.0:
+ liveins: %sgpr0_sgpr1, %vgpr0
+
+ %3 = COPY killed %vgpr0
+ %0 = COPY killed %sgpr0_sgpr1
+ %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+ %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+ %18 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+ undef %19.sub0 = COPY killed %3
+ %19.sub1 = COPY killed %18
+ %10 = S_MOV_B32 61440
+ %11 = S_MOV_B32 0
+ DBG_VALUE debug-use %11, debug-use _, !1, !8, debug-location !9
+ undef %12.sub0 = COPY killed %11
+ %12.sub1 = COPY killed %10
+ undef %13.sub0_sub1 = COPY killed %4
+ %13.sub2_sub3 = COPY killed %12
+ %20 = V_LSHL_B64 killed %19, 2, implicit %exec
+ %16 = COPY killed %5
+ BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/register-count-comments.ll b/test/CodeGen/AMDGPU/register-count-comments.ll
index bff3a9f5d2b0..26a76cf2041e 100644
--- a/test/CodeGen/AMDGPU/register-count-comments.ll
+++ b/test/CodeGen/AMDGPU/register-count-comments.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
; SI: ; Kernel info:
; SI: ; NumSgprs: {{[0-9]+}}
; SI: ; NumVgprs: {{[0-9]+}}
-define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 addrspace(1)* %bbase) nounwind {
+define amdgpu_kernel void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 addrspace(1)* %bbase) nounwind {
%mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0);
%tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo)
%aptr = getelementptr i32, i32 addrspace(1)* %abase, i32 %tid
@@ -24,7 +24,7 @@ define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 a
; SI-LABEL: {{^}}one_vgpr_used:
; SI: NumVgprs: 1
-define void @one_vgpr_used(i32 addrspace(1)* %out, i32 %x) nounwind {
+define amdgpu_kernel void @one_vgpr_used(i32 addrspace(1)* %out, i32 %x) nounwind {
store i32 %x, i32 addrspace(1)* %out, align 4
ret void
}
diff --git a/test/CodeGen/AMDGPU/rename-disconnected-bug.ll b/test/CodeGen/AMDGPU/rename-disconnected-bug.ll
index 47bdfba96530..5d4955aa1ce2 100644
--- a/test/CodeGen/AMDGPU/rename-disconnected-bug.ll
+++ b/test/CodeGen/AMDGPU/rename-disconnected-bug.ll
@@ -3,7 +3,7 @@
; definition on every path (there should at least be IMPLICIT_DEF instructions).
target triple = "amdgcn--"
-define void @func() {
+define amdgpu_kernel void @func() {
B0:
br i1 undef, label %B1, label %B2
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs.mir b/test/CodeGen/AMDGPU/rename-independent-subregs.mir
index b928bc7086bb..fc2e4426ba48 100644
--- a/test/CodeGen/AMDGPU/rename-independent-subregs.mir
+++ b/test/CodeGen/AMDGPU/rename-independent-subregs.mir
@@ -1,7 +1,7 @@
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass simple-register-coalescing,rename-independent-subregs -o - %s | FileCheck %s
--- |
- define void @test0() { ret void }
- define void @test1() { ret void }
+ define amdgpu_kernel void @test0() { ret void }
+ define amdgpu_kernel void @test1() { ret void }
...
---
# In the test below we have two independent def+use pairs of subregister1 which
diff --git a/test/CodeGen/AMDGPU/reorder-stores.ll b/test/CodeGen/AMDGPU/reorder-stores.ll
index 412202fa5d51..ff4069226a62 100644
--- a/test/CodeGen/AMDGPU/reorder-stores.ll
+++ b/test/CodeGen/AMDGPU/reorder-stores.ll
@@ -7,7 +7,7 @@
; SI: buffer_store_dwordx4
; SI: buffer_store_dwordx4
; SI: s_endpgm
-define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
+define amdgpu_kernel void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
%tmp1 = load <2 x double>, <2 x double> addrspace(1)* %x, align 16
%tmp4 = load <2 x double>, <2 x double> addrspace(1)* %y, align 16
store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16
@@ -19,7 +19,7 @@ define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocap
; SI: ds_read2_b64
; SI: ds_write2_b64
; SI: s_endpgm
-define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
+define amdgpu_kernel void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
%tmp1 = load <2 x double>, <2 x double> addrspace(3)* %x, align 16
%tmp4 = load <2 x double>, <2 x double> addrspace(3)* %y, align 16
store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16
@@ -39,7 +39,7 @@ define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace
; SI: buffer_store_dwordx4
; SI: buffer_store_dwordx4
; SI: s_endpgm
-define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
+define amdgpu_kernel void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
%tmp1 = load <8 x i32>, <8 x i32> addrspace(1)* %x, align 32
%tmp4 = load <8 x i32>, <8 x i32> addrspace(1)* %y, align 32
store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32
@@ -54,7 +54,7 @@ define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* no
; SI-NOT: ds_read
; SI: ds_write_b64
; SI: s_endpgm
-define void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
+define amdgpu_kernel void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
%tmp1 = load <2 x i32>, <2 x i32> addrspace(3)* %x, align 8
%tmp4 = load <2 x i32>, <2 x i32> addrspace(3)* %y, align 8
%tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
diff --git a/test/CodeGen/AMDGPU/ret.ll b/test/CodeGen/AMDGPU/ret.ll
index 515203fad4cb..831c71dff79d 100644
--- a/test/CodeGen/AMDGPU/ret.ll
+++ b/test/CodeGen/AMDGPU/ret.ll
@@ -1,25 +1,24 @@
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
; GCN-LABEL: {{^}}vgpr:
; GCN: v_mov_b32_e32 v1, v0
; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
-; GCN-DAG: exp mrt0 v1, v1, v1, v1 done compr vm
+; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
; GCN: s_waitcnt expcnt(0)
; GCN-NOT: s_endpgm
-define amdgpu_vs {float, float} @vgpr([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) {
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3)
- %x = fadd float %3, 1.0
- %a = insertvalue {float, float} undef, float %x, 0
- %b = insertvalue {float, float} %a, float %3, 1
- ret {float, float} %b
+define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
+bb:
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %arg3, float %arg3, float %arg3, float %arg3, i1 true, i1 true) #0
+ %x = fadd float %arg3, 1.000000e+00
+ %a = insertvalue { float, float } undef, float %x, 0
+ %b = insertvalue { float, float } %a, float %arg3, 1
+ ret { float, float } %b
}
; GCN-LABEL: {{^}}vgpr_literal:
; GCN: v_mov_b32_e32 v4, v0
-; GCN: exp mrt0 v4, v4, v4, v4 done compr vm
+; GCN: exp mrt0 v4, v4, v4, v4 done vm
; GCN-DAG: v_mov_b32_e32 v0, 1.0
; GCN-DAG: v_mov_b32_e32 v1, 2.0
@@ -27,12 +26,12 @@ define amdgpu_vs {float, float} @vgpr([9 x <16 x i8>] addrspace(2)* byval, i32 i
; GCN-DAG: v_mov_b32_e32 v3, -1.0
; GCN: s_waitcnt expcnt(0)
; GCN-NOT: s_endpgm
-define amdgpu_vs {float, float, float, float} @vgpr_literal([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) {
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3)
- ret {float, float, float, float} {float 1.0, float 2.0, float 4.0, float -1.0}
+define amdgpu_vs { float, float, float, float } @vgpr_literal([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
+bb:
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %arg3, float %arg3, float %arg3, float %arg3, i1 true, i1 true) #0
+ ret { float, float, float, float } { float 1.000000e+00, float 2.000000e+00, float 4.000000e+00, float -1.000000e+00 }
}
-
; GCN: .long 165580
; GCN-NEXT: .long 562
; GCN-NEXT: .long 165584
@@ -44,24 +43,24 @@ define amdgpu_vs {float, float, float, float} @vgpr_literal([9 x <16 x i8>] addr
; GCN: v_mov_b32_e32 v3, v4
; GCN: v_mov_b32_e32 v4, v6
; GCN-NOT: s_endpgm
-define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr0([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
- %i0 = extractelement <2 x i32> %4, i32 0
- %i1 = extractelement <2 x i32> %4, i32 1
- %i2 = extractelement <2 x i32> %7, i32 0
- %i3 = extractelement <2 x i32> %8, i32 0
+define amdgpu_ps { float, float, float, float, float } @vgpr_ps_addr0([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <3 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, float %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18) #1 {
+bb:
+ %i0 = extractelement <2 x i32> %arg4, i32 0
+ %i1 = extractelement <2 x i32> %arg4, i32 1
+ %i2 = extractelement <2 x i32> %arg7, i32 0
+ %i3 = extractelement <2 x i32> %arg8, i32 0
%f0 = bitcast i32 %i0 to float
%f1 = bitcast i32 %i1 to float
%f2 = bitcast i32 %i2 to float
%f3 = bitcast i32 %i3 to float
- %r0 = insertvalue {float, float, float, float, float} undef, float %f0, 0
- %r1 = insertvalue {float, float, float, float, float} %r0, float %f1, 1
- %r2 = insertvalue {float, float, float, float, float} %r1, float %f2, 2
- %r3 = insertvalue {float, float, float, float, float} %r2, float %f3, 3
- %r4 = insertvalue {float, float, float, float, float} %r3, float %12, 4
- ret {float, float, float, float, float} %r4
+ %r0 = insertvalue { float, float, float, float, float } undef, float %f0, 0
+ %r1 = insertvalue { float, float, float, float, float } %r0, float %f1, 1
+ %r2 = insertvalue { float, float, float, float, float } %r1, float %f2, 2
+ %r3 = insertvalue { float, float, float, float, float } %r2, float %f3, 3
+ %r4 = insertvalue { float, float, float, float, float } %r3, float %arg12, 4
+ ret { float, float, float, float, float } %r4
}
-
; GCN: .long 165580
; GCN-NEXT: .long 1
; GCN-NEXT: .long 165584
@@ -69,11 +68,11 @@ define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr0([9 x <16 x i
; GCN-LABEL: {{^}}ps_input_ena_no_inputs:
; GCN: v_mov_b32_e32 v0, 1.0
; GCN-NOT: s_endpgm
-define amdgpu_ps float @ps_input_ena_no_inputs([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
- ret float 1.0
+define amdgpu_ps float @ps_input_ena_no_inputs([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <3 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, float %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18) #1 {
+bb:
+ ret float 1.000000e+00
}
-
; GCN: .long 165580
; GCN-NEXT: .long 2081
; GCN-NEXT: .long 165584
@@ -83,14 +82,14 @@ define amdgpu_ps float @ps_input_ena_no_inputs([9 x <16 x i8>] addrspace(2)* byv
; GCN-DAG: v_mov_b32_e32 v1, v2
; GCN: v_mov_b32_e32 v2, v3
; GCN-NOT: s_endpgm
-define amdgpu_ps {float, <2 x float>} @ps_input_ena_pos_w([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
- %f = bitcast <2 x i32> %8 to <2 x float>
- %s = insertvalue {float, <2 x float>} undef, float %14, 0
- %s1 = insertvalue {float, <2 x float>} %s, <2 x float> %f, 1
- ret {float, <2 x float>} %s1
+define amdgpu_ps { float, <2 x float> } @ps_input_ena_pos_w([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <3 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, float %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18) #1 {
+bb:
+ %f = bitcast <2 x i32> %arg8 to <2 x float>
+ %s = insertvalue { float, <2 x float> } undef, float %arg14, 0
+ %s1 = insertvalue { float, <2 x float> } %s, <2 x float> %f, 1
+ ret { float, <2 x float> } %s1
}
-
; GCN: .long 165580
; GCN-NEXT: .long 562
; GCN-NEXT: .long 165584
@@ -102,25 +101,24 @@ define amdgpu_ps {float, <2 x float>} @ps_input_ena_pos_w([9 x <16 x i8>] addrsp
; GCN-DAG: v_mov_b32_e32 v3, v6
; GCN-DAG: v_mov_b32_e32 v4, v8
; GCN-NOT: s_endpgm
-attributes #1 = { "InitialPSInputAddr"="1" }
-define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr1([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 {
- %i0 = extractelement <2 x i32> %4, i32 0
- %i1 = extractelement <2 x i32> %4, i32 1
- %i2 = extractelement <2 x i32> %7, i32 0
- %i3 = extractelement <2 x i32> %8, i32 0
+define amdgpu_ps { float, float, float, float, float } @vgpr_ps_addr1([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <3 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, float %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18) #2 {
+bb:
+ %i0 = extractelement <2 x i32> %arg4, i32 0
+ %i1 = extractelement <2 x i32> %arg4, i32 1
+ %i2 = extractelement <2 x i32> %arg7, i32 0
+ %i3 = extractelement <2 x i32> %arg8, i32 0
%f0 = bitcast i32 %i0 to float
%f1 = bitcast i32 %i1 to float
%f2 = bitcast i32 %i2 to float
%f3 = bitcast i32 %i3 to float
- %r0 = insertvalue {float, float, float, float, float} undef, float %f0, 0
- %r1 = insertvalue {float, float, float, float, float} %r0, float %f1, 1
- %r2 = insertvalue {float, float, float, float, float} %r1, float %f2, 2
- %r3 = insertvalue {float, float, float, float, float} %r2, float %f3, 3
- %r4 = insertvalue {float, float, float, float, float} %r3, float %12, 4
- ret {float, float, float, float, float} %r4
+ %r0 = insertvalue { float, float, float, float, float } undef, float %f0, 0
+ %r1 = insertvalue { float, float, float, float, float } %r0, float %f1, 1
+ %r2 = insertvalue { float, float, float, float, float } %r1, float %f2, 2
+ %r3 = insertvalue { float, float, float, float, float } %r2, float %f3, 3
+ %r4 = insertvalue { float, float, float, float, float } %r3, float %arg12, 4
+ ret { float, float, float, float, float } %r4
}
-
; GCN: .long 165580
; GCN-NEXT: .long 562
; GCN-NEXT: .long 165584
@@ -132,25 +130,24 @@ define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr1([9 x <16 x i
; GCN: v_mov_b32_e32 v3, v8
; GCN: v_mov_b32_e32 v4, v12
; GCN-NOT: s_endpgm
-attributes #2 = { "InitialPSInputAddr"="119" }
-define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr119([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #2 {
- %i0 = extractelement <2 x i32> %4, i32 0
- %i1 = extractelement <2 x i32> %4, i32 1
- %i2 = extractelement <2 x i32> %7, i32 0
- %i3 = extractelement <2 x i32> %8, i32 0
+define amdgpu_ps { float, float, float, float, float } @vgpr_ps_addr119([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <3 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, float %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18) #3 {
+bb:
+ %i0 = extractelement <2 x i32> %arg4, i32 0
+ %i1 = extractelement <2 x i32> %arg4, i32 1
+ %i2 = extractelement <2 x i32> %arg7, i32 0
+ %i3 = extractelement <2 x i32> %arg8, i32 0
%f0 = bitcast i32 %i0 to float
%f1 = bitcast i32 %i1 to float
%f2 = bitcast i32 %i2 to float
%f3 = bitcast i32 %i3 to float
- %r0 = insertvalue {float, float, float, float, float} undef, float %f0, 0
- %r1 = insertvalue {float, float, float, float, float} %r0, float %f1, 1
- %r2 = insertvalue {float, float, float, float, float} %r1, float %f2, 2
- %r3 = insertvalue {float, float, float, float, float} %r2, float %f3, 3
- %r4 = insertvalue {float, float, float, float, float} %r3, float %12, 4
- ret {float, float, float, float, float} %r4
+ %r0 = insertvalue { float, float, float, float, float } undef, float %f0, 0
+ %r1 = insertvalue { float, float, float, float, float } %r0, float %f1, 1
+ %r2 = insertvalue { float, float, float, float, float } %r1, float %f2, 2
+ %r3 = insertvalue { float, float, float, float, float } %r2, float %f3, 3
+ %r4 = insertvalue { float, float, float, float, float } %r3, float %arg12, 4
+ ret { float, float, float, float, float } %r4
}
-
; GCN: .long 165580
; GCN-NEXT: .long 562
; GCN-NEXT: .long 165584
@@ -162,38 +159,37 @@ define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr119([9 x <16 x
; GCN: v_mov_b32_e32 v3, v4
; GCN: v_mov_b32_e32 v4, v8
; GCN-NOT: s_endpgm
-attributes #3 = { "InitialPSInputAddr"="418" }
-define amdgpu_ps {float, float, float, float, float} @vgpr_ps_addr418([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #3 {
- %i0 = extractelement <2 x i32> %4, i32 0
- %i1 = extractelement <2 x i32> %4, i32 1
- %i2 = extractelement <2 x i32> %7, i32 0
- %i3 = extractelement <2 x i32> %8, i32 0
+define amdgpu_ps { float, float, float, float, float } @vgpr_ps_addr418([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <3 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, float %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18) #4 {
+bb:
+ %i0 = extractelement <2 x i32> %arg4, i32 0
+ %i1 = extractelement <2 x i32> %arg4, i32 1
+ %i2 = extractelement <2 x i32> %arg7, i32 0
+ %i3 = extractelement <2 x i32> %arg8, i32 0
%f0 = bitcast i32 %i0 to float
%f1 = bitcast i32 %i1 to float
%f2 = bitcast i32 %i2 to float
%f3 = bitcast i32 %i3 to float
- %r0 = insertvalue {float, float, float, float, float} undef, float %f0, 0
- %r1 = insertvalue {float, float, float, float, float} %r0, float %f1, 1
- %r2 = insertvalue {float, float, float, float, float} %r1, float %f2, 2
- %r3 = insertvalue {float, float, float, float, float} %r2, float %f3, 3
- %r4 = insertvalue {float, float, float, float, float} %r3, float %12, 4
- ret {float, float, float, float, float} %r4
+ %r0 = insertvalue { float, float, float, float, float } undef, float %f0, 0
+ %r1 = insertvalue { float, float, float, float, float } %r0, float %f1, 1
+ %r2 = insertvalue { float, float, float, float, float } %r1, float %f2, 2
+ %r3 = insertvalue { float, float, float, float, float } %r2, float %f3, 3
+ %r4 = insertvalue { float, float, float, float, float } %r3, float %arg12, 4
+ ret { float, float, float, float, float } %r4
}
-
; GCN-LABEL: {{^}}sgpr:
; GCN: s_add_i32 s0, s3, 2
; GCN: s_mov_b32 s2, s3
; GCN-NOT: s_endpgm
-define amdgpu_vs {i32, i32, i32} @sgpr([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) {
- %x = add i32 %2, 2
- %a = insertvalue {i32, i32, i32} undef, i32 %x, 0
- %b = insertvalue {i32, i32, i32} %a, i32 %1, 1
- %c = insertvalue {i32, i32, i32} %a, i32 %2, 2
- ret {i32, i32, i32} %c
+define amdgpu_vs { i32, i32, i32 } @sgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
+bb:
+ %x = add i32 %arg2, 2
+ %a = insertvalue { i32, i32, i32 } undef, i32 %x, 0
+ %b = insertvalue { i32, i32, i32 } %a, i32 %arg1, 1
+ %c = insertvalue { i32, i32, i32 } %a, i32 %arg2, 2
+ ret { i32, i32, i32 } %c
}
-
; GCN-LABEL: {{^}}sgpr_literal:
; GCN: s_mov_b32 s0, 5
; GCN-NOT: s_mov_b32 s0, s0
@@ -201,37 +197,37 @@ define amdgpu_vs {i32, i32, i32} @sgpr([9 x <16 x i8>] addrspace(2)* byval, i32
; GCN-DAG: s_mov_b32 s2, 7
; GCN-DAG: s_mov_b32 s3, 8
; GCN-NOT: s_endpgm
-define amdgpu_vs {i32, i32, i32, i32} @sgpr_literal([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) {
- %x = add i32 %2, 2
- ret {i32, i32, i32, i32} {i32 5, i32 6, i32 7, i32 8}
+define amdgpu_vs { i32, i32, i32, i32 } @sgpr_literal([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
+bb:
+ %x = add i32 %arg2, 2
+ ret { i32, i32, i32, i32 } { i32 5, i32 6, i32 7, i32 8 }
}
-
; GCN-LABEL: {{^}}both:
; GCN: v_mov_b32_e32 v1, v0
-; GCN-DAG: exp mrt0 v1, v1, v1, v1 done compr vm
+; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
; GCN-DAG: s_add_i32 s0, s3, 2
; GCN-DAG: s_mov_b32 s1, s2
; GCN: s_mov_b32 s2, s3
; GCN: s_waitcnt expcnt(0)
; GCN-NOT: s_endpgm
-define amdgpu_vs {float, i32, float, i32, i32} @both([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) {
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3)
- %v = fadd float %3, 1.0
- %s = add i32 %2, 2
- %a0 = insertvalue {float, i32, float, i32, i32} undef, float %v, 0
- %a1 = insertvalue {float, i32, float, i32, i32} %a0, i32 %s, 1
- %a2 = insertvalue {float, i32, float, i32, i32} %a1, float %3, 2
- %a3 = insertvalue {float, i32, float, i32, i32} %a2, i32 %1, 3
- %a4 = insertvalue {float, i32, float, i32, i32} %a3, i32 %2, 4
- ret {float, i32, float, i32, i32} %a4
+define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
+bb:
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %arg3, float %arg3, float %arg3, float %arg3, i1 true, i1 true) #0
+ %v = fadd float %arg3, 1.000000e+00
+ %s = add i32 %arg2, 2
+ %a0 = insertvalue { float, i32, float, i32, i32 } undef, float %v, 0
+ %a1 = insertvalue { float, i32, float, i32, i32 } %a0, i32 %s, 1
+ %a2 = insertvalue { float, i32, float, i32, i32 } %a1, float %arg3, 2
+ %a3 = insertvalue { float, i32, float, i32, i32 } %a2, i32 %arg1, 3
+ %a4 = insertvalue { float, i32, float, i32, i32 } %a3, i32 %arg2, 4
+ ret { float, i32, float, i32, i32 } %a4
}
-
; GCN-LABEL: {{^}}structure_literal:
; GCN: v_mov_b32_e32 v3, v0
-; GCN: exp mrt0 v3, v3, v3, v3 done compr vm
+; GCN: exp mrt0 v3, v3, v3, v3 done vm
; GCN-DAG: v_mov_b32_e32 v0, 1.0
; GCN-DAG: s_mov_b32 s0, 2
@@ -239,9 +235,16 @@ define amdgpu_vs {float, i32, float, i32, i32} @both([9 x <16 x i8>] addrspace(2
; GCN-DAG: v_mov_b32_e32 v1, 2.0
; GCN-DAG: v_mov_b32_e32 v2, 4.0
; GCN: s_waitcnt expcnt(0)
-define amdgpu_vs {{float, i32}, {i32, <2 x float>}} @structure_literal([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) {
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3)
- ret {{float, i32}, {i32, <2 x float>}} {{float, i32} {float 1.0, i32 2}, {i32, <2 x float>} {i32 3, <2 x float> <float 2.0, float 4.0>}}
+define amdgpu_vs { { float, i32 }, { i32, <2 x float> } } @structure_literal([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
+bb:
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %arg3, float %arg3, float %arg3, float %arg3, i1 true, i1 true) #0
+ ret { { float, i32 }, { i32, <2 x float> } } { { float, i32 } { float 1.000000e+00, i32 2 }, { i32, <2 x float> } { i32 3, <2 x float> <float 2.000000e+00, float 4.000000e+00> } }
}
-attributes #0 = { nounwind "InitialPSInputAddr"="0" }
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "InitialPSInputAddr"="0" }
+attributes #2 = { nounwind "InitialPSInputAddr"="1" }
+attributes #3 = { nounwind "InitialPSInputAddr"="119" }
+attributes #4 = { nounwind "InitialPSInputAddr"="418" }
diff --git a/test/CodeGen/AMDGPU/ret_jump.ll b/test/CodeGen/AMDGPU/ret_jump.ll
index 51ca60492414..f2fbacbab82e 100644
--- a/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/test/CodeGen/AMDGPU/ret_jump.ll
@@ -4,24 +4,86 @@
; This should end with an no-op sequence of exec mask manipulations
; Mask should be in original state after executed unreachable block
-; GCN-LABEL: {{^}}main:
+
+; GCN-LABEL: {{^}}uniform_br_trivial_ret_divergent_br_trivial_unreachable:
; GCN: s_cbranch_scc1 [[RET_BB:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: ; %else
+
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
; GCN-NEXT: s_xor_b64 [[XOR_EXEC:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE_EXEC]]
-; GCN-NEXT: ; mask branch [[UNREACHABLE_BB:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
-; GCN: [[RET_BB]]:
-; GCN-NEXT: s_branch [[FINAL_BB:BB[0-9]+_[0-9]+]]
+; GCN: BB{{[0-9]+_[0-9]+}}: ; %unreachable.bb
+; GCN-NEXT: ; divergent unreachable
-; GCN-NEXT: [[UNREACHABLE_BB]]:
-; GCN-NEXT: s_or_b64 exec, exec, [[XOR_EXEC]]
-; GCN-NEXT: [[FINAL_BB]]:
+; GCN-NEXT: {{^}}[[FLOW]]: ; %Flow
+; GCN-NEXT: s_or_b64 exec, exec
+
+; GCN-NEXT: [[RET_BB]]:
+; GCN-NEXT: ; return
; GCN-NEXT: .Lfunc_end0
-define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @main([9 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <8 x i32>] addrspace(2)* byval, i32 addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, float, i32) #0 {
+define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
+entry:
+ %i.i = extractelement <2 x i32> %arg7, i32 0
+ %j.i = extractelement <2 x i32> %arg7, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 1, i32 0, i32 %arg5) #2
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 1, i32 0, i32 %arg5) #2
+ %p87 = fmul float undef, %p2.i
+ %p88 = fadd float %p87, undef
+ %p93 = fadd float %p88, undef
+ %p97 = fmul float %p93, undef
+ %p102 = fsub float %p97, undef
+ %p104 = fmul float %p102, undef
+ %p106 = fadd float 0.000000e+00, %p104
+ %p108 = fadd float undef, %p106
+ %uniform.cond = icmp slt i32 %arg17, 0
+ br i1 %uniform.cond, label %ret.bb, label %else
+
+else: ; preds = %main_body
+ %p124 = fmul float %p108, %p108
+ %p125 = fsub float %p124, undef
+ %divergent.cond = fcmp olt float %p125, 0.000000e+00
+ br i1 %divergent.cond, label %ret.bb, label %unreachable.bb
+
+unreachable.bb: ; preds = %else
+ unreachable
+
+ret.bb: ; preds = %else, %main_body
+ ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef
+}
+
+; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable:
+; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]]
+
+; GCN: ; BB#{{[0-9]+}}: ; %else
+; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
+; GCN-NEXT: s_xor_b64 [[XOR_EXEC:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE_EXEC]]
+; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]]
+
+; GCN-NEXT: ; %unreachable.bb
+; GCN: ds_write_b32
+; GCN: s_waitcnt
+; GCN: ; divergent unreachable
+
+; GCN: ; %ret.bb
+; GCN: store_dword
+
+; GCN: ; %UnifiedReturnBlock
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN-NEXT: ; return
+; GCN-NEXT: .Lfunc_end
+define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
main_body:
- %p83 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %5, <2 x i32> %7)
- %p87 = fmul float undef, %p83
+ %i.i = extractelement <2 x i32> %arg7, i32 0
+ %j.i = extractelement <2 x i32> %arg7, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 1, i32 0, i32 %arg5) #2
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 1, i32 0, i32 %arg5) #2
+ %p87 = fmul float undef, %p2.i
%p88 = fadd float %p87, undef
%p93 = fadd float %p88, undef
%p97 = fmul float %p93, undef
@@ -29,26 +91,35 @@ main_body:
%p104 = fmul float %p102, undef
%p106 = fadd float 0.000000e+00, %p104
%p108 = fadd float undef, %p106
- br i1 undef, label %ENDIF69, label %ELSE
+ %uniform.cond = icmp slt i32 %arg18, 0
+ br i1 %uniform.cond, label %ret.bb, label %else
-ELSE: ; preds = %main_body
+else: ; preds = %main_body
%p124 = fmul float %p108, %p108
%p125 = fsub float %p124, undef
- %p126 = fcmp olt float %p125, 0.000000e+00
- br i1 %p126, label %ENDIF69, label %ELSE41
+ %divergent.cond = fcmp olt float %p125, 0.000000e+00
+ br i1 %divergent.cond, label %ret.bb, label %unreachable.bb
-ELSE41: ; preds = %ELSE
+unreachable.bb: ; preds = %else
+ store volatile i32 8, i32 addrspace(3)* undef
unreachable
-ENDIF69: ; preds = %ELSE, %main_body
+ret.bb: ; preds = %else, %main_body
+ store volatile i32 11, i32 addrspace(1)* undef
ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef
}
; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
+declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
; Function Attrs: nounwind readnone
declare float @llvm.fabs.f32(float) #1
@@ -61,3 +132,4 @@ declare float @llvm.floor.f32(float) #1
attributes #0 = { "InitialPSInputAddr"="36983" }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/rotl.i64.ll b/test/CodeGen/AMDGPU/rotl.i64.ll
index b60c470de97c..266490718dd1 100644
--- a/test/CodeGen/AMDGPU/rotl.i64.ll
+++ b/test/CodeGen/AMDGPU/rotl.i64.ll
@@ -7,7 +7,7 @@
; BOTH-DAG: s_lshr_b64
; BOTH: s_or_b64
; BOTH: s_endpgm
-define void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
+define amdgpu_kernel void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
entry:
%0 = shl i64 %x, %y
%1 = sub i64 64, %y
@@ -26,7 +26,7 @@ entry:
; BOTH: v_or_b32
; BOTH: v_or_b32
; BOTH: s_endpgm
-define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
+define amdgpu_kernel void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
entry:
%x = load i64, i64 addrspace(1)* %xptr, align 8
%y = load i64, i64 addrspace(1)* %yptr, align 8
diff --git a/test/CodeGen/AMDGPU/rotl.ll b/test/CodeGen/AMDGPU/rotl.ll
index 7d2b5538ca33..c4bc8cdaabf5 100644
--- a/test/CodeGen/AMDGPU/rotl.ll
+++ b/test/CodeGen/AMDGPU/rotl.ll
@@ -10,7 +10,7 @@
; SI: s_sub_i32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
; SI: v_mov_b32_e32 [[VDST:v[0-9]+]], [[SDST]]
; SI: v_alignbit_b32 {{v[0-9]+, [s][0-9]+, s[0-9]+}}, [[VDST]]
-define void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+define amdgpu_kernel void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
entry:
%0 = shl i32 %x, %y
%1 = sub i32 32, %y
@@ -26,7 +26,7 @@ entry:
; SI-DAG: v_alignbit_b32
; SI-DAG: v_alignbit_b32
; SI: s_endpgm
-define void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+define amdgpu_kernel void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
entry:
%0 = shl <2 x i32> %x, %y
%1 = sub <2 x i32> <i32 32, i32 32>, %y
@@ -46,7 +46,7 @@ entry:
; SI-DAG: s_sub_i32
; SI-DAG: v_alignbit_b32
; SI: s_endpgm
-define void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
+define amdgpu_kernel void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
entry:
%0 = shl <4 x i32> %x, %y
%1 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
diff --git a/test/CodeGen/AMDGPU/rotr.i64.ll b/test/CodeGen/AMDGPU/rotr.i64.ll
index 58a1efe08079..9eda479cd25c 100644
--- a/test/CodeGen/AMDGPU/rotr.i64.ll
+++ b/test/CodeGen/AMDGPU/rotr.i64.ll
@@ -6,7 +6,7 @@
; BOTH-DAG: s_lshr_b64
; BOTH-DAG: s_lshl_b64
; BOTH: s_or_b64
-define void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
+define amdgpu_kernel void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
entry:
%tmp0 = sub i64 64, %y
%tmp1 = shl i64 %x, %tmp0
@@ -24,7 +24,7 @@ entry:
; VI-DAG: v_lshlrev_b64
; BOTH: v_or_b32
; BOTH: v_or_b32
-define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
+define amdgpu_kernel void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
entry:
%x = load i64, i64 addrspace(1)* %xptr, align 8
%y = load i64, i64 addrspace(1)* %yptr, align 8
@@ -37,7 +37,7 @@ entry:
}
; BOTH-LABEL: {{^}}s_rotr_v2i64:
-define void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
+define amdgpu_kernel void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
entry:
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
%tmp1 = shl <2 x i64> %x, %tmp0
@@ -48,7 +48,7 @@ entry:
}
; BOTH-LABEL: {{^}}v_rotr_v2i64:
-define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
+define amdgpu_kernel void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
entry:
%x = load <2 x i64>, <2 x i64> addrspace(1)* %xptr, align 8
%y = load <2 x i64>, <2 x i64> addrspace(1)* %yptr, align 8
diff --git a/test/CodeGen/AMDGPU/rotr.ll b/test/CodeGen/AMDGPU/rotr.ll
index 55d180077cc7..b4e2c2b67ce1 100644
--- a/test/CodeGen/AMDGPU/rotr.ll
+++ b/test/CodeGen/AMDGPU/rotr.ll
@@ -6,7 +6,7 @@
; R600: BIT_ALIGN_INT
; SI: v_alignbit_b32
-define void @rotr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+define amdgpu_kernel void @rotr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
entry:
%tmp0 = sub i32 32, %y
%tmp1 = shl i32 %x, %tmp0
@@ -22,7 +22,7 @@ entry:
; SI: v_alignbit_b32
; SI: v_alignbit_b32
-define void @rotr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+define amdgpu_kernel void @rotr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
entry:
%tmp0 = sub <2 x i32> <i32 32, i32 32>, %y
%tmp1 = shl <2 x i32> %x, %tmp0
@@ -42,7 +42,7 @@ entry:
; SI: v_alignbit_b32
; SI: v_alignbit_b32
; SI: v_alignbit_b32
-define void @rotr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
+define amdgpu_kernel void @rotr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
entry:
%tmp0 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
%tmp1 = shl <4 x i32> %x, %tmp0
diff --git a/test/CodeGen/AMDGPU/rsq.ll b/test/CodeGen/AMDGPU/rsq.ll
index 699440c3efbf..9462683efe0e 100644
--- a/test/CodeGen/AMDGPU/rsq.ll
+++ b/test/CodeGen/AMDGPU/rsq.ll
@@ -8,7 +8,7 @@ declare double @llvm.sqrt.f64(double) nounwind readnone
; SI-LABEL: {{^}}rsq_f32:
; SI: v_rsq_f32_e32
; SI: s_endpgm
-define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%val = load float, float addrspace(1)* %in, align 4
%sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
%div = fdiv float 1.0, %sqrt
@@ -20,7 +20,7 @@ define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noali
; SI-UNSAFE: v_rsq_f64_e32
; SI-SAFE: v_sqrt_f64_e32
; SI: s_endpgm
-define void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
%val = load double, double addrspace(1)* %in, align 4
%sqrt = call double @llvm.sqrt.f64(double %val) nounwind readnone
%div = fdiv double 1.0, %sqrt
@@ -31,7 +31,7 @@ define void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noa
; SI-LABEL: {{^}}rsq_f32_sgpr:
; SI: v_rsq_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
; SI: s_endpgm
-define void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) nounwind {
+define amdgpu_kernel void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) nounwind {
%sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
%div = fdiv float 1.0, %sqrt
store float %div, float addrspace(1)* %out, align 4
@@ -55,7 +55,7 @@ define void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) nounwind
; SI-SAFE-NOT: v_rsq_f32
; SI: s_endpgm
-define void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
@@ -81,7 +81,7 @@ define void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
; SI-UNSAFE: v_rsq_f32_e32 [[RSQ:v[0-9]+]], v{{[0-9]+}}
; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
-define void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%val = load float, float addrspace(1)* %in, align 4
%sqrt = call float @llvm.sqrt.f32(float %val)
%div = fdiv float -1.0, %sqrt
@@ -96,7 +96,7 @@ define void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* n
; SI-UNSAFE: v_sqrt_f64_e32 [[SQRT:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}
; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
-define void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
%val = load double, double addrspace(1)* %in, align 4
%sqrt = call double @llvm.sqrt.f64(double %val)
%div = fdiv double -1.0, %sqrt
@@ -112,7 +112,7 @@ define void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)*
; SI-UNSAFE: v_rsq_f32_e64 [[RSQ:v[0-9]+]], -v{{[0-9]+}}
; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
-define void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%val = load float, float addrspace(1)* %in, align 4
%val.fneg = fsub float -0.0, %val
%sqrt = call float @llvm.sqrt.f32(float %val.fneg)
@@ -128,7 +128,7 @@ define void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1
; SI-UNSAFE: v_sqrt_f64_e64 [[SQRT:v\[[0-9]+:[0-9]+\]]], -v{{\[[0-9]+:[0-9]+\]}}
; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
-define void @neg_rsq_neg_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+define amdgpu_kernel void @neg_rsq_neg_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
%val = load double, double addrspace(1)* %in, align 4
%val.fneg = fsub double -0.0, %val
%sqrt = call double @llvm.sqrt.f64(double %val.fneg)
diff --git a/test/CodeGen/AMDGPU/runtime-metadata.ll b/test/CodeGen/AMDGPU/runtime-metadata.ll
deleted file mode 100644
index abdbc325fd4d..000000000000
--- a/test/CodeGen/AMDGPU/runtime-metadata.ll
+++ /dev/null
@@ -1,396 +0,0 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -filetype=obj -o - < %s | llvm-readobj -amdgpu-runtime-metadata | FileCheck %s
-; RUN: llc -mtriple=amdgcn--amdhsa -filetype=obj -amdgpu-dump-rtmd -amdgpu-check-rtmd-parser %s -o - 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=PARSER %s
-
-%struct.A = type { i8, float }
-%opencl.image1d_t = type opaque
-%opencl.image2d_t = type opaque
-%opencl.image3d_t = type opaque
-%opencl.queue_t = type opaque
-%opencl.pipe_t = type opaque
-%struct.B = type { i32 addrspace(1)*}
-%opencl.clk_event_t = type opaque
-
-; CHECK: ---
-; CHECK-NEXT: { amd.MDVersion: [ 2, 0 ], amd.PrintfInfo: [ '1:1:4:%d\n', '2:1:8:%g\n' ], amd.Kernels:
-
-; CHECK-NEXT: - { amd.KernelName: test_char, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 1, amd.ArgAlign: 1, amd.ArgKind: 0, amd.ArgValueType: 1, amd.ArgTypeName: char, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_char(i8 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !9 !kernel_arg_base_type !9 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_ushort2, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 4, amd.ArgTypeName: ushort2, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_ushort2(<2 x i16> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !10 !kernel_arg_base_type !10 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_int3, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 16, amd.ArgAlign: 16, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_int3(<3 x i32> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !11 !kernel_arg_base_type !11 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_ulong4, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 32, amd.ArgAlign: 32, amd.ArgKind: 0, amd.ArgValueType: 10, amd.ArgTypeName: ulong4, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_ulong4(<4 x i64> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !12 !kernel_arg_base_type !12 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_half8, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 16, amd.ArgAlign: 16, amd.ArgKind: 0, amd.ArgValueType: 5, amd.ArgTypeName: half8, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_half8(<8 x half> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !13 !kernel_arg_base_type !13 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_float16, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 64, amd.ArgAlign: 64, amd.ArgKind: 0, amd.ArgValueType: 8, amd.ArgTypeName: float16, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_float16(<16 x float> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !14 !kernel_arg_base_type !14 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_double16, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 128, amd.ArgAlign: 128, amd.ArgKind: 0, amd.ArgValueType: 11, amd.ArgTypeName: double16, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_double16(<16 x double> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !15 !kernel_arg_base_type !15 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_pointer, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 6, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_pointer(i32 addrspace(1)* %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !16 !kernel_arg_base_type !16 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_image, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 4, amd.ArgValueType: 0, amd.ArgTypeName: image2d_t, amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_image(%opencl.image2d_t addrspace(1)* %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !17 !kernel_arg_base_type !17 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_sampler, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 3, amd.ArgValueType: 6, amd.ArgTypeName: sampler_t, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_sampler(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !18 !kernel_arg_base_type !18 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_queue, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 6, amd.ArgValueType: 0, amd.ArgTypeName: queue_t, amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_queue(%opencl.queue_t addrspace(1)* %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !19 !kernel_arg_base_type !19 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_struct, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 1, amd.ArgValueType: 0, amd.ArgTypeName: struct A, amd.ArgAddrQual: 0, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_struct(%struct.A* byval %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !20 !kernel_arg_base_type !20 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_i128, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 16, amd.ArgAlign: 8, amd.ArgKind: 0, amd.ArgValueType: 0, amd.ArgTypeName: i128, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_i128(i128 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !21 !kernel_arg_base_type !21 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_multi_arg, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 3, amd.ArgTypeName: short2, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 1, amd.ArgTypeName: char3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_multi_arg(i32 %a, <2 x i16> %b, <3 x i8> %c) !kernel_arg_addr_space !22 !kernel_arg_access_qual !23 !kernel_arg_type !24 !kernel_arg_base_type !24 !kernel_arg_type_qual !25 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_addr_space, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 6, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 6, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 2, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 4, amd.ArgKind: 2, amd.ArgValueType: 6, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_addr_space(i32 addrspace(1)* %g, i32 addrspace(2)* %c, i32 addrspace(3)* %l) !kernel_arg_addr_space !50 !kernel_arg_access_qual !23 !kernel_arg_type !51 !kernel_arg_base_type !51 !kernel_arg_type_qual !25 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_type_qual, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 6, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 1, amd.ArgAccQual: 0, amd.ArgIsVolatile: 1 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 6, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 1, amd.ArgAccQual: 0, amd.ArgIsConst: 1, amd.ArgIsRestrict: 1 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 5, amd.ArgValueType: 0, amd.ArgTypeName: 'int *', amd.ArgAddrQual: 1, amd.ArgAccQual: 0, amd.ArgIsPipe: 1 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_type_qual(i32 addrspace(1)* %a, i32 addrspace(1)* %b, %opencl.pipe_t addrspace(1)* %c) !kernel_arg_addr_space !22 !kernel_arg_access_qual !23 !kernel_arg_type !51 !kernel_arg_base_type !51 !kernel_arg_type_qual !70 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_access_qual, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 4, amd.ArgValueType: 0, amd.ArgTypeName: image1d_t, amd.ArgAddrQual: 1, amd.ArgAccQual: 1 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 4, amd.ArgValueType: 0, amd.ArgTypeName: image2d_t, amd.ArgAddrQual: 1, amd.ArgAccQual: 2 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 4, amd.ArgValueType: 0, amd.ArgTypeName: image3d_t, amd.ArgAddrQual: 1, amd.ArgAccQual: 3 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_access_qual(%opencl.image1d_t addrspace(1)* %ro, %opencl.image2d_t addrspace(1)* %wo, %opencl.image3d_t addrspace(1)* %rw) !kernel_arg_addr_space !60 !kernel_arg_access_qual !61 !kernel_arg_type !62 !kernel_arg_base_type !62 !kernel_arg_type_qual !25 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_half, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: half, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_half(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !26 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_float, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: float, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_float(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !27 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_double, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: double, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_double(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !28 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_char, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: char, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_char(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !29 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_short, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: short, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_short(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !30 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_long, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: long, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_long(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !31 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_vec_type_hint_unknown, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.VecTypeHint: unknown, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_vec_type_hint_unknown(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !32 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_reqd_wgs_vec_type_hint, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.ReqdWorkGroupSize: [ 1, 2, 4 ], amd.VecTypeHint: int, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_reqd_wgs_vec_type_hint(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !5 !reqd_work_group_size !6 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_wgs_hint_vec_type_hint, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.WorkGroupSizeHint: [ 8, 16, 32 ], amd.VecTypeHint: uint4, amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: int, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_wgs_hint_vec_type_hint(i32 %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !3 !kernel_arg_type_qual !4 !vec_type_hint !7 !work_group_size_hint !8 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_arg_ptr_to_ptr, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 6, amd.ArgTypeName: 'int **', amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_arg_ptr_to_ptr(i32 * addrspace(1)* %a) !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !80 !kernel_arg_base_type !80 !kernel_arg_type_qual !4 {
- ret void
-}
-; CHECK-NEXT: - { amd.KernelName: test_arg_struct_contains_ptr, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 1, amd.ArgValueType: 0, amd.ArgTypeName: struct B, amd.ArgAddrQual: 0, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_arg_struct_contains_ptr(%struct.B * byval %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !82 !kernel_arg_base_type !82 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_arg_vector_of_ptr, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 16, amd.ArgAlign: 16, amd.ArgKind: 0, amd.ArgValueType: 6, amd.ArgTypeName: 'global int* __attribute__((ext_vector_type(2)))', amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x i32 addrspace(1)*> %a) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !83 !kernel_arg_base_type !83 !kernel_arg_type_qual !4 {
- ret void
-}
-
-
-; CHECK-NEXT: - { amd.KernelName: test_arg_unknown_builtin_type, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 0, amd.ArgTypeName: clk_event_t, amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-define amdgpu_kernel void @test_arg_unknown_builtin_type(%opencl.clk_event_t addrspace(1)* %a) !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !84 !kernel_arg_base_type !84 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK-NEXT: - { amd.KernelName: test_pointee_align, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 1, amd.ArgValueType: 9, amd.ArgTypeName: 'long *', amd.ArgAddrQual: 1, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 1, amd.ArgKind: 2, amd.ArgValueType: 1, amd.ArgTypeName: 'char *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 2, amd.ArgKind: 2, amd.ArgValueType: 1, amd.ArgTypeName: 'char2 *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 4, amd.ArgKind: 2, amd.ArgValueType: 1, amd.ArgTypeName: 'char3 *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 4, amd.ArgKind: 2, amd.ArgValueType: 1, amd.ArgTypeName: 'char4 *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 8, amd.ArgKind: 2, amd.ArgValueType: 1, amd.ArgTypeName: 'char8 *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgPointeeAlign: 16, amd.ArgKind: 2, amd.ArgValueType: 1, amd.ArgTypeName: 'char16 *', amd.ArgAddrQual: 3, amd.ArgAccQual: 0 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-; CHECK-NEXT: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } } }
-define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a, i8 addrspace(3)* %b, <2 x i8> addrspace(3)* %c, <3 x i8> addrspace(3)* %d, <4 x i8> addrspace(3)* %e, <8 x i8> addrspace(3)* %f, <16 x i8> addrspace(3)* %g) !kernel_arg_addr_space !91 !kernel_arg_access_qual !92 !kernel_arg_type !93 !kernel_arg_base_type !93 !kernel_arg_type_qual !94 {
- ret void
-}
-
-; CHECK-NEXT:...
-
-; PARSER: AMDGPU runtime metadata parser test passes.
-
-!llvm.printf.fmts = !{!100, !101}
-
-!1 = !{i32 0}
-!2 = !{!"none"}
-!3 = !{!"int"}
-!4 = !{!""}
-!5 = !{i32 undef, i32 1}
-!6 = !{i32 1, i32 2, i32 4}
-!7 = !{<4 x i32> undef, i32 0}
-!8 = !{i32 8, i32 16, i32 32}
-!9 = !{!"char"}
-!10 = !{!"ushort2"}
-!11 = !{!"int3"}
-!12 = !{!"ulong4"}
-!13 = !{!"half8"}
-!14 = !{!"float16"}
-!15 = !{!"double16"}
-!16 = !{!"int *"}
-!17 = !{!"image2d_t"}
-!18 = !{!"sampler_t"}
-!19 = !{!"queue_t"}
-!20 = !{!"struct A"}
-!21 = !{!"i128"}
-!22 = !{i32 0, i32 0, i32 0}
-!23 = !{!"none", !"none", !"none"}
-!24 = !{!"int", !"short2", !"char3"}
-!25 = !{!"", !"", !""}
-!26 = !{half undef, i32 1}
-!27 = !{float undef, i32 1}
-!28 = !{double undef, i32 1}
-!29 = !{i8 undef, i32 1}
-!30 = !{i16 undef, i32 1}
-!31 = !{i64 undef, i32 1}
-!32 = !{i32 *undef, i32 1}
-!50 = !{i32 1, i32 2, i32 3}
-!51 = !{!"int *", !"int *", !"int *"}
-!60 = !{i32 1, i32 1, i32 1}
-!61 = !{!"read_only", !"write_only", !"read_write"}
-!62 = !{!"image1d_t", !"image2d_t", !"image3d_t"}
-!70 = !{!"volatile", !"const restrict", !"pipe"}
-!80 = !{!"int **"}
-!81 = !{i32 1}
-!82 = !{!"struct B"}
-!83 = !{!"global int* __attribute__((ext_vector_type(2)))"}
-!84 = !{!"clk_event_t"}
-!opencl.ocl.version = !{!90}
-!90 = !{i32 2, i32 0}
-!91 = !{i32 0, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3}
-!92 = !{!"none", !"none", !"none", !"none", !"none", !"none", !"none"}
-!93 = !{!"long *", !"char *", !"char2 *", !"char3 *", !"char4 *", !"char8 *", !"char16 *"}
-!94 = !{!"", !"", !"", !"", !"", !"", !""}
-!100 = !{!"1:1:4:%d\5Cn"}
-!101 = !{!"2:1:8:%g\5Cn"}
diff --git a/test/CodeGen/AMDGPU/s_addk_i32.ll b/test/CodeGen/AMDGPU/s_addk_i32.ll
index f776faca8397..deef24cea377 100644
--- a/test/CodeGen/AMDGPU/s_addk_i32.ll
+++ b/test/CodeGen/AMDGPU/s_addk_i32.ll
@@ -7,7 +7,7 @@
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[VAL]]
; SI: buffer_store_dword [[VRESULT]]
; SI: s_endpgm
-define void @s_addk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @s_addk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
%add = add i32 %b, 65
store i32 %add, i32 addrspace(1)* %out
ret void
@@ -19,7 +19,7 @@ define void @s_addk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
; SI-DAG: s_add_i32 {{s[0-9]+}}, {{s[0-9]+}}, [[K]]
; SI-DAG: s_add_i32 {{s[0-9]+}}, {{s[0-9]+}}, [[K]]
; SI: s_endpgm
-define void @s_addk_i32_k0_x2(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %a, i32 %b) {
+define amdgpu_kernel void @s_addk_i32_k0_x2(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %a, i32 %b) {
%add0 = add i32 %a, 65
%add1 = add i32 %b, 65
store i32 %add0, i32 addrspace(1)* %out0
@@ -30,26 +30,35 @@ define void @s_addk_i32_k0_x2(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1,
; SI-LABEL: {{^}}s_addk_i32_k1:
; SI: s_addk_i32 {{s[0-9]+}}, 0x7fff{{$}}
; SI: s_endpgm
-define void @s_addk_i32_k1(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @s_addk_i32_k1(i32 addrspace(1)* %out, i32 %b) {
%add = add i32 %b, 32767 ; (1 << 15) - 1
store i32 %add, i32 addrspace(1)* %out
ret void
}
; SI-LABEL: {{^}}s_addk_i32_k2:
-; SI: s_addk_i32 {{s[0-9]+}}, 0xffef{{$}}
+; SI: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, 17
; SI: s_endpgm
-define void @s_addk_i32_k2(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @s_addk_i32_k2(i32 addrspace(1)* %out, i32 %b) {
%add = add i32 %b, -17
store i32 %add, i32 addrspace(1)* %out
ret void
}
+; SI-LABEL: {{^}}s_addk_i32_k3:
+; SI: s_addk_i32 {{s[0-9]+}}, 0xffbf{{$}}
+; SI: s_endpgm
+define amdgpu_kernel void @s_addk_i32_k3(i32 addrspace(1)* %out, i32 %b) {
+ %add = add i32 %b, -65
+ store i32 %add, i32 addrspace(1)* %out
+ ret void
+}
+
; SI-LABEL: {{^}}s_addk_v2i32_k0:
; SI-DAG: s_addk_i32 {{s[0-9]+}}, 0x41
; SI-DAG: s_addk_i32 {{s[0-9]+}}, 0x42
; SI: s_endpgm
-define void @s_addk_v2i32_k0(<2 x i32> addrspace(1)* %out, <2 x i32> %b) {
+define amdgpu_kernel void @s_addk_v2i32_k0(<2 x i32> addrspace(1)* %out, <2 x i32> %b) {
%add = add <2 x i32> %b, <i32 65, i32 66>
store <2 x i32> %add, <2 x i32> addrspace(1)* %out
ret void
@@ -61,7 +70,7 @@ define void @s_addk_v2i32_k0(<2 x i32> addrspace(1)* %out, <2 x i32> %b) {
; SI-DAG: s_addk_i32 {{s[0-9]+}}, 0x43
; SI-DAG: s_addk_i32 {{s[0-9]+}}, 0x44
; SI: s_endpgm
-define void @s_addk_v4i32_k0(<4 x i32> addrspace(1)* %out, <4 x i32> %b) {
+define amdgpu_kernel void @s_addk_v4i32_k0(<4 x i32> addrspace(1)* %out, <4 x i32> %b) {
%add = add <4 x i32> %b, <i32 65, i32 66, i32 67, i32 68>
store <4 x i32> %add, <4 x i32> addrspace(1)* %out
ret void
@@ -77,7 +86,7 @@ define void @s_addk_v4i32_k0(<4 x i32> addrspace(1)* %out, <4 x i32> %b) {
; SI-DAG: s_addk_i32 {{s[0-9]+}}, 0x47
; SI-DAG: s_addk_i32 {{s[0-9]+}}, 0x48
; SI: s_endpgm
-define void @s_addk_v8i32_k0(<8 x i32> addrspace(1)* %out, <8 x i32> %b) {
+define amdgpu_kernel void @s_addk_v8i32_k0(<8 x i32> addrspace(1)* %out, <8 x i32> %b) {
%add = add <8 x i32> %b, <i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72>
store <8 x i32> %add, <8 x i32> addrspace(1)* %out
ret void
@@ -86,7 +95,7 @@ define void @s_addk_v8i32_k0(<8 x i32> addrspace(1)* %out, <8 x i32> %b) {
; SI-LABEL: {{^}}no_s_addk_i32_k0:
; SI: s_add_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x8000{{$}}
; SI: s_endpgm
-define void @no_s_addk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @no_s_addk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
%add = add i32 %b, 32768 ; 1 << 15
store i32 %add, i32 addrspace(1)* %out
ret void
@@ -96,7 +105,7 @@ define void @no_s_addk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
; SI-LABEL: {{^}}commute_s_addk_i32:
; SI: s_addk_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_s_addk_i32(i32 addrspace(1)* %out, i32 %b) #0 {
+define amdgpu_kernel void @commute_s_addk_i32(i32 addrspace(1)* %out, i32 %b) #0 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%add = add i32 %size, %b
call void asm sideeffect "; foo $0, $1", "v,s"([512 x i32] addrspace(3)* @lds, i32 %add)
diff --git a/test/CodeGen/AMDGPU/s_movk_i32.ll b/test/CodeGen/AMDGPU/s_movk_i32.ll
index 0164c45083a2..a131aaa3dfb4 100644
--- a/test/CodeGen/AMDGPU/s_movk_i32.ll
+++ b/test/CodeGen/AMDGPU/s_movk_i32.ll
@@ -7,7 +7,7 @@
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 1, v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k0(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k0(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 4295032831 ; ((1 << 16) - 1) | (1 << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @s_movk_i32_k0(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 1, v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k1(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k1(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 4295000063 ; ((1 << 15) - 1) | (1 << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -35,7 +35,7 @@ define void @s_movk_i32_k1(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 64, v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k2(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k2(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 274877939711 ; ((1 << 15) - 1) | (64 << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -49,7 +49,7 @@ define void @s_movk_i32_k2(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 1, v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k3(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k3(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 4295000064 ; (1 << 15) | (1 << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -63,7 +63,7 @@ define void @s_movk_i32_k3(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 1, v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k4(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k4(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 4295098368 ; (1 << 17) | (1 << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @s_movk_i32_k4(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k5(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k5(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 18374967954648334319 ; -17 & 0xff00ffffffffffff
store i64 %or, i64 addrspace(1)* %out
@@ -92,7 +92,7 @@ define void @s_movk_i32_k5(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, 63, v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k6(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k6(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 270582939713 ; 65 | (63 << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -107,7 +107,7 @@ define void @s_movk_i32_k6(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k7(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k7(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 70368744185856; ((1 << 13)) | ((1 << 14) << 32)
store i64 %or, i64 addrspace(1)* %out
@@ -122,7 +122,7 @@ define void @s_movk_i32_k7(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k8(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k8(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 1229782942255906816 ; 0x11111111ffff8000
store i64 %or, i64 addrspace(1)* %out
@@ -137,7 +137,7 @@ define void @s_movk_i32_k8(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k9(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k9(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 1229782942255906817 ; 0x11111111ffff8001
store i64 %or, i64 addrspace(1)* %out
@@ -152,7 +152,7 @@ define void @s_movk_i32_k9(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k10(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k10(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 1229782942255909000 ; 0x11111111ffff8888
store i64 %or, i64 addrspace(1)* %out
@@ -167,7 +167,7 @@ define void @s_movk_i32_k10(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 ad
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k11(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k11(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 1229782942255910911 ; 0x11111111ffff8fff
store i64 %or, i64 addrspace(1)* %out
@@ -182,7 +182,7 @@ define void @s_movk_i32_k11(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 ad
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
; SI-DAG: v_or_b32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: s_endpgm
-define void @s_movk_i32_k12(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @s_movk_i32_k12(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 4
%or = or i64 %loada, 1229782942255902721 ; 0x11111111ffff7001
store i64 %or, i64 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/s_mulk_i32.ll b/test/CodeGen/AMDGPU/s_mulk_i32.ll
index e83b368cc1cb..f6ed5408ba45 100644
--- a/test/CodeGen/AMDGPU/s_mulk_i32.ll
+++ b/test/CodeGen/AMDGPU/s_mulk_i32.ll
@@ -7,7 +7,7 @@
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[VAL]]
; SI: buffer_store_dword [[VRESULT]]
; SI: s_endpgm
-define void @s_mulk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @s_mulk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
%mul = mul i32 %b, 65
store i32 %mul, i32 addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @s_mulk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
; SI-LABEL: {{^}}s_mulk_i32_k1:
; SI: s_mulk_i32 {{s[0-9]+}}, 0x7fff{{$}}
; SI: s_endpgm
-define void @s_mulk_i32_k1(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @s_mulk_i32_k1(i32 addrspace(1)* %out, i32 %b) {
%mul = mul i32 %b, 32767 ; (1 << 15) - 1
store i32 %mul, i32 addrspace(1)* %out
ret void
@@ -25,7 +25,7 @@ define void @s_mulk_i32_k1(i32 addrspace(1)* %out, i32 %b) {
; SI-LABEL: {{^}}s_mulk_i32_k2:
; SI: s_mulk_i32 {{s[0-9]+}}, 0xffef{{$}}
; SI: s_endpgm
-define void @s_mulk_i32_k2(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @s_mulk_i32_k2(i32 addrspace(1)* %out, i32 %b) {
%mul = mul i32 %b, -17
store i32 %mul, i32 addrspace(1)* %out
ret void
@@ -34,7 +34,7 @@ define void @s_mulk_i32_k2(i32 addrspace(1)* %out, i32 %b) {
; SI-LABEL: {{^}}no_s_mulk_i32_k0:
; SI: s_mul_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x8001{{$}}
; SI: s_endpgm
-define void @no_s_mulk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
+define amdgpu_kernel void @no_s_mulk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
%mul = mul i32 %b, 32769 ; 1 << 15 + 1
store i32 %mul, i32 addrspace(1)* %out
ret void
@@ -44,7 +44,7 @@ define void @no_s_mulk_i32_k0(i32 addrspace(1)* %out, i32 %b) {
; SI-LABEL: {{^}}commute_s_mulk_i32:
; SI: s_mulk_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_s_mulk_i32(i32 addrspace(1)* %out, i32 %b) #0 {
+define amdgpu_kernel void @commute_s_mulk_i32(i32 addrspace(1)* %out, i32 %b) #0 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%add = mul i32 %size, %b
call void asm sideeffect "; foo $0, $1", "v,s"([512 x i32] addrspace(3)* @lds, i32 %add)
diff --git a/test/CodeGen/AMDGPU/sad.ll b/test/CodeGen/AMDGPU/sad.ll
index 534483401638..f7a1c65881d0 100644
--- a/test/CodeGen/AMDGPU/sad.ll
+++ b/test/CodeGen/AMDGPU/sad.ll
@@ -2,7 +2,7 @@
; GCN-LABEL: {{^}}v_sad_u32_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -18,7 +18,7 @@ define void @v_sad_u32_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: {{^}}v_sad_u32_constant_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, 20
-define void @v_sad_u32_constant_pat1(i32 addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @v_sad_u32_constant_pat1(i32 addrspace(1)* %out, i32 %a) {
%icmp0 = icmp ugt i32 %a, 90
%t0 = select i1 %icmp0, i32 %a, i32 90
@@ -34,7 +34,7 @@ define void @v_sad_u32_constant_pat1(i32 addrspace(1)* %out, i32 %a) {
; GCN-LABEL: {{^}}v_sad_u32_pat2:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
%sub1 = sub i32 %b, %a
@@ -51,7 +51,7 @@ define void @v_sad_u32_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
; GCN: s_min_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-define void @v_sad_u32_multi_use_sub_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -68,7 +68,7 @@ define void @v_sad_u32_multi_use_sub_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b
; GCN-LABEL: {{^}}v_sad_u32_multi_use_add_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_multi_use_add_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_multi_use_add_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -84,7 +84,7 @@ define void @v_sad_u32_multi_use_add_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b
; GCN-LABEL: {{^}}v_sad_u32_multi_use_max_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_multi_use_max_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_multi_use_max_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
store volatile i32 %t0, i32 *undef
@@ -101,7 +101,7 @@ define void @v_sad_u32_multi_use_max_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b
; GCN-LABEL: {{^}}v_sad_u32_multi_use_min_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_multi_use_min_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_multi_use_min_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -119,7 +119,7 @@ define void @v_sad_u32_multi_use_min_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b
; GCN-LABEL: {{^}}v_sad_u32_multi_use_sub_pat2:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_multi_use_sub_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
store volatile i32 %sub0, i32 *undef
@@ -136,7 +136,7 @@ define void @v_sad_u32_multi_use_sub_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b
; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
; GCN: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-define void @v_sad_u32_multi_use_select_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
%sub1 = sub i32 %b, %a
@@ -154,7 +154,7 @@ define void @v_sad_u32_multi_use_select_pat2(i32 addrspace(1)* %out, i32 %a, i32
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_vector_pat1(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+define amdgpu_kernel void @v_sad_u32_vector_pat1(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
%icmp0 = icmp ugt <4 x i32> %a, %b
%t0 = select <4 x i1> %icmp0, <4 x i32> %a, <4 x i32> %b
@@ -173,7 +173,7 @@ define void @v_sad_u32_vector_pat1(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_vector_pat2(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+define amdgpu_kernel void @v_sad_u32_vector_pat2(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
%icmp0 = icmp ugt <4 x i32> %a, %b
%sub0 = sub <4 x i32> %a, %b
%sub1 = sub <4 x i32> %b, %a
@@ -187,7 +187,7 @@ define void @v_sad_u32_vector_pat2(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <
; GCN-LABEL: {{^}}v_sad_u32_i16_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_i16_pat1(i16 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
+define amdgpu_kernel void @v_sad_u32_i16_pat1(i16 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
%icmp0 = icmp ugt i16 %a, %b
%t0 = select i1 %icmp0, i16 %a, i16 %b
@@ -204,7 +204,7 @@ define void @v_sad_u32_i16_pat1(i16 addrspace(1)* %out, i16 %a, i16 %b, i16 %c)
; GCN-LABEL: {{^}}v_sad_u32_i16_pat2:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_i16_pat2(i16 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b, i16 zeroext %c) {
+define amdgpu_kernel void @v_sad_u32_i16_pat2(i16 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b, i16 zeroext %c) {
%icmp0 = icmp ugt i16 %a, %b
%sub0 = sub i16 %a, %b
%sub1 = sub i16 %b, %a
@@ -218,7 +218,7 @@ define void @v_sad_u32_i16_pat2(i16 addrspace(1)* %out, i16 zeroext %a, i16 zero
; GCN-LABEL: {{^}}v_sad_u32_i8_pat1:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_i8_pat1(i8 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
+define amdgpu_kernel void @v_sad_u32_i8_pat1(i8 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
%icmp0 = icmp ugt i8 %a, %b
%t0 = select i1 %icmp0, i8 %a, i8 %b
@@ -234,7 +234,7 @@ define void @v_sad_u32_i8_pat1(i8 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
; GCN-LABEL: {{^}}v_sad_u32_i8_pat2:
; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_i8_pat2(i8 addrspace(1)* %out, i8 zeroext %a, i8 zeroext %b, i8 zeroext %c) {
+define amdgpu_kernel void @v_sad_u32_i8_pat2(i8 addrspace(1)* %out, i8 zeroext %a, i8 zeroext %b, i8 zeroext %c) {
%icmp0 = icmp ugt i8 %a, %b
%sub0 = sub i8 %a, %b
%sub1 = sub i8 %b, %a
@@ -251,7 +251,7 @@ define void @v_sad_u32_i8_pat2(i8 addrspace(1)* %out, i8 zeroext %a, i8 zeroext
; GCN: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_mismatched_operands_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -269,7 +269,7 @@ define void @v_sad_u32_mismatched_operands_pat1(i32 addrspace(1)* %out, i32 %a,
; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
-define void @v_sad_u32_mismatched_operands_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %d
%sub1 = sub i32 %b, %a
diff --git a/test/CodeGen/AMDGPU/saddo.ll b/test/CodeGen/AMDGPU/saddo.ll
index f8ced7942a60..586a455b2b91 100644
--- a/test/CodeGen/AMDGPU/saddo.ll
+++ b/test/CodeGen/AMDGPU/saddo.ll
@@ -6,7 +6,7 @@ declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
; FUNC-LABEL: {{^}}saddo_i64_zext:
-define void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %sadd, 0
%carry = extractvalue { i64, i1 } %sadd, 1
@@ -17,7 +17,7 @@ define void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
}
; FUNC-LABEL: {{^}}s_saddo_i32:
-define void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
%sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
%val = extractvalue { i32, i1 } %sadd, 0
%carry = extractvalue { i32, i1 } %sadd, 1
@@ -27,7 +27,7 @@ define void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
}
; FUNC-LABEL: {{^}}v_saddo_i32:
-define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
@@ -39,7 +39,7 @@ define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
}
; FUNC-LABEL: {{^}}s_saddo_i64:
-define void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
%sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %sadd, 0
%carry = extractvalue { i64, i1 } %sadd, 1
@@ -51,7 +51,7 @@ define void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
; FUNC-LABEL: {{^}}v_saddo_i64:
; SI: v_add_i32
; SI: v_addc_u32
-define void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%a = load i64, i64 addrspace(1)* %aptr, align 4
%b = load i64, i64 addrspace(1)* %bptr, align 4
%sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
diff --git a/test/CodeGen/AMDGPU/salu-to-valu.ll b/test/CodeGen/AMDGPU/salu-to-valu.ll
index 37083fbbd3c5..6e1dd1638333 100644
--- a/test/CodeGen/AMDGPU/salu-to-valu.ll
+++ b/test/CodeGen/AMDGPU/salu-to-valu.ll
@@ -24,7 +24,7 @@ declare i32 @llvm.amdgcn.workitem.id.y() #0
; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
-define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = call i32 @llvm.amdgcn.workitem.id.y()
@@ -55,17 +55,17 @@ done: ; preds = %loop
; GCN-LABEL: {{^}}smrd_valu:
; SI: s_movk_i32 [[OFFSET:s[0-9]+]], 0x2ee0
+; SI: s_mov_b32
; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
; SI: s_nop 3
; SI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, [[OFFSET]]
-; SI: s_mov_b32
; CI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xbb8
; GCN: v_mov_b32_e32 [[V_OUT:v[0-9]+]], [[OUT]]
; GCN-NOHSA: buffer_store_dword [[V_OUT]]
; GCN-HSA: flat_store_dword {{.*}}, [[V_OUT]]
-define void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 {
+define amdgpu_kernel void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 {
entry:
%tmp = icmp ne i32 %a, 0
br i1 %tmp, label %if, label %else
@@ -93,7 +93,7 @@ endif: ; preds = %else, %if
; GCN-NOHSA-NOT: v_add
; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16{{$}}
; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
-define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) #1 {
+define amdgpu_kernel void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = add i32 %tmp, 4
@@ -113,7 +113,7 @@ entry:
; GCN-NOHSA: buffer_store_dword
; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
; GCN-HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}
-define void @smrd_valu_ci_offset(i32 addrspace(1)* %out, i32 addrspace(2)* %in, i32 %c) #1 {
+define amdgpu_kernel void @smrd_valu_ci_offset(i32 addrspace(1)* %out, i32 addrspace(2)* %in, i32 %c) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp2 = getelementptr i32, i32 addrspace(2)* %in, i32 %tmp
@@ -133,7 +133,7 @@ entry:
; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; GCN-NOHSA: buffer_store_dwordx2
; GCN-HSA: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
-define void @smrd_valu_ci_offset_x2(i64 addrspace(1)* %out, i64 addrspace(2)* %in, i64 %c) #1 {
+define amdgpu_kernel void @smrd_valu_ci_offset_x2(i64 addrspace(1)* %out, i64 addrspace(2)* %in, i64 %c) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp2 = getelementptr i64, i64 addrspace(2)* %in, i32 %tmp
@@ -155,7 +155,7 @@ entry:
; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; GCN-NOHSA: buffer_store_dwordx4
; GCN-HSA: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
-define void @smrd_valu_ci_offset_x4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(2)* %in, <4 x i32> %c) #1 {
+define amdgpu_kernel void @smrd_valu_ci_offset_x4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(2)* %in, <4 x i32> %c) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp2 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %in, i32 %tmp
@@ -189,7 +189,7 @@ entry:
; GCN-NOHSA: buffer_store_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @smrd_valu_ci_offset_x8(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(2)* %in, <8 x i32> %c) #1 {
+define amdgpu_kernel void @smrd_valu_ci_offset_x8(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(2)* %in, <8 x i32> %c) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp2 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %in, i32 %tmp
@@ -230,7 +230,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN: s_endpgm
-define void @smrd_valu_ci_offset_x16(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(2)* %in, <16 x i32> %c) #1 {
+define amdgpu_kernel void @smrd_valu_ci_offset_x16(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(2)* %in, <16 x i32> %c) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp2 = getelementptr <16 x i32>, <16 x i32> addrspace(2)* %in, i32 %tmp
@@ -247,7 +247,7 @@ entry:
; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, s{{[0-9]+}}, [[MOVED]]
; GCN-NOHSA: buffer_store_dword [[ADD]]
; GCN-HSA: flat_store_dword {{.*}}, [[ADD]]
-define void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in, i32 %a) #1 {
+define amdgpu_kernel void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in, i32 %a) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = add i32 %tmp, 4
@@ -261,7 +261,7 @@ entry:
; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset:
; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}}
; GCN-HSA flat_load_dword v{{[0-9]}}, v{{[0-9]+:[0-9]+}}
-define void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
+define amdgpu_kernel void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = add i32 %tmp, 4
@@ -275,7 +275,7 @@ entry:
; GCN-NOHSA-NOT: v_add
; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1024{{$}}
; GCN-HSA: flat_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}]
-define void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
+define amdgpu_kernel void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = add i32 %tmp, 4
@@ -290,7 +290,7 @@ entry:
; GCN-NOHSA: buffer_load_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
+define amdgpu_kernel void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
entry:
%tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
@@ -313,7 +313,7 @@ entry:
; GCN-NOHSA: buffer_store_dword
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
+define amdgpu_kernel void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
entry:
%tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
@@ -350,7 +350,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
+define amdgpu_kernel void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
entry:
%tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
@@ -385,7 +385,7 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-define void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
+define amdgpu_kernel void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
entry:
%tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
@@ -439,9 +439,9 @@ entry:
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
; GCN-NOHSA: buffer_store_dword [[ONE]]
; GCN-HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[ONE]]
-; GCN; {{^}}[[EXIT]]:
+; GCN: {{^}}[[EXIT]]:
; GCN: s_endpgm
-define void @sopc_vopc_legalize_bug(i32 %cond, i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @sopc_vopc_legalize_bug(i32 %cond, i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
bb3: ; preds = %bb2
%tmp0 = bitcast i32 %cond to float
%tmp1 = fadd float %tmp0, 2.500000e-01
@@ -459,7 +459,7 @@ bb7: ; preds = %bb3
; GCN-LABEL: {{^}}phi_visit_order:
; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, 1, v{{[0-9]+}}
-define void @phi_visit_order() {
+define amdgpu_kernel void @phi_visit_order() {
bb:
br label %bb1
@@ -484,7 +484,7 @@ bb4:
; GCN: [[LOOP_LABEL:[0-9a-zA-Z_]+]]:
; GCN: s_xor_b32 [[B]], [[B]], [[A]]
; GCN: s_cbranch_scc{{[01]}} [[LOOP_LABEL]]
-define void @phi_imm_in_sgprs(i32 addrspace(3)* %out, i32 %cond) {
+define amdgpu_kernel void @phi_imm_in_sgprs(i32 addrspace(3)* %out, i32 %cond) {
entry:
br label %loop
diff --git a/test/CodeGen/AMDGPU/sampler-resource-id.ll b/test/CodeGen/AMDGPU/sampler-resource-id.ll
index c41d345369bf..4ea503bf6098 100644
--- a/test/CodeGen/AMDGPU/sampler-resource-id.ll
+++ b/test/CodeGen/AMDGPU/sampler-resource-id.ll
@@ -5,7 +5,7 @@
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 0(
-define void @test_0(i32 %in0, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test_0(i32 %in0, i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.sampler.get.resource.id(i32 %in0) #0
store i32 %0, i32 addrspace(1)* %out
@@ -17,7 +17,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 1(
-define void @test_1(i32 %in0, i32 %in1, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test_1(i32 %in0, i32 %in1, i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.sampler.get.resource.id(i32 %in1) #0
store i32 %0, i32 addrspace(1)* %out
@@ -29,7 +29,7 @@ entry:
; EG: MOV [[VAL]], literal.x
; EG-NEXT: LSHR
; EG-NEXT: 2(
-define void @test_2(i32 %in0, i32 %in1, i32 %in2, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @test_2(i32 %in0, i32 %in1, i32 %in2, i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.OpenCL.sampler.get.resource.id(i32 %in2) #0
store i32 %0, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir b/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
index af71086e542f..5bee36d878eb 100644
--- a/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
+++ b/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
@@ -1,23 +1,23 @@
# RUN: llc -march=amdgcn -run-pass si-insert-waits %s -o - | FileCheck %s
--- |
- define void @basic_insert_dcache_wb() {
+ define amdgpu_kernel void @basic_insert_dcache_wb() {
ret void
}
- define void @explicit_flush_after() {
+ define amdgpu_kernel void @explicit_flush_after() {
ret void
}
- define void @explicit_flush_before() {
+ define amdgpu_kernel void @explicit_flush_before() {
ret void
}
- define void @no_scalar_store() {
+ define amdgpu_kernel void @no_scalar_store() {
ret void
}
- define void @multi_block_store() {
+ define amdgpu_kernel void @multi_block_store() {
bb0:
br i1 undef, label %bb1, label %bb2
@@ -28,7 +28,7 @@
ret void
}
- define void @one_block_store() {
+ define amdgpu_kernel void @one_block_store() {
bb0:
br i1 undef, label %bb1, label %bb2
@@ -169,5 +169,5 @@ tracksRegLiveness: false
body: |
bb.0:
S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
- SI_RETURN undef %vgpr0
+ SI_RETURN_TO_EPILOG undef %vgpr0
...
diff --git a/test/CodeGen/AMDGPU/scalar_to_vector.ll b/test/CodeGen/AMDGPU/scalar_to_vector.ll
index 32df16778a91..62d0d9367885 100644
--- a/test/CodeGen/AMDGPU/scalar_to_vector.ll
+++ b/test/CodeGen/AMDGPU/scalar_to_vector.ll
@@ -1,15 +1,15 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; XXX - Why the packing?
-; FUNC-LABEL: {{^}}scalar_to_vector_v2i32:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
-; SI: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
-; SI: v_or_b32_e32 v[[OR:[0-9]+]], [[SHL]], [[SHR]]
-; SI: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[OR]]
-; SI: buffer_store_dwordx2 v{{\[}}[[OR]]:[[COPY]]{{\]}}
-define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+; GCN-LABEL: {{^}}scalar_to_vector_v2i32:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
+; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
+; GCN: v_or_b32_e32 v[[OR:[0-9]+]], [[SHL]], [[SHR]]
+; GCN: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[OR]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[OR]]:[[COPY]]{{\]}}
+define amdgpu_kernel void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tmp1 = load i32, i32 addrspace(1)* %in, align 4
%bc = bitcast i32 %tmp1 to <2 x i16>
%tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -17,11 +17,11 @@ define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(
ret void
}
-; FUNC-LABEL: {{^}}scalar_to_vector_v2f32:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
-; SI: buffer_store_dwordx2
-define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+; GCN-LABEL: {{^}}scalar_to_vector_v2f32:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
+; GCN: buffer_store_dwordx2
+define amdgpu_kernel void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tmp1 = load float, float addrspace(1)* %in, align 4
%bc = bitcast float %tmp1 to <2 x i16>
%tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -33,7 +33,7 @@ define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspac
; to produce one, but for some reason never made it to selection.
-; define void @scalar_to_vector_test2(<8 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+; define amdgpu_kernel void @scalar_to_vector_test2(<8 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
; %tmp1 = load i32, i32 addrspace(1)* %in, align 4
; %bc = bitcast i32 %tmp1 to <4 x i8>
@@ -42,7 +42,7 @@ define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspac
; ret void
; }
-; define void @scalar_to_vector_test3(<4 x i32> addrspace(1)* %out) nounwind {
+; define amdgpu_kernel void @scalar_to_vector_test3(<4 x i32> addrspace(1)* %out) nounwind {
; %newvec0 = insertelement <2 x i64> undef, i64 12345, i32 0
; %newvec1 = insertelement <2 x i64> %newvec0, i64 undef, i32 1
; %bc = bitcast <2 x i64> %newvec1 to <4 x i32>
@@ -51,7 +51,7 @@ define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspac
; ret void
; }
-; define void @scalar_to_vector_test4(<8 x i16> addrspace(1)* %out) nounwind {
+; define amdgpu_kernel void @scalar_to_vector_test4(<8 x i16> addrspace(1)* %out) nounwind {
; %newvec0 = insertelement <4 x i32> undef, i32 12345, i32 0
; %bc = bitcast <4 x i32> %newvec0 to <8 x i16>
; %add = add <8 x i16> %bc, <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>
@@ -59,7 +59,7 @@ define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspac
; ret void
; }
-; define void @scalar_to_vector_test5(<4 x i16> addrspace(1)* %out) nounwind {
+; define amdgpu_kernel void @scalar_to_vector_test5(<4 x i16> addrspace(1)* %out) nounwind {
; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
@@ -67,10 +67,9 @@ define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspac
; ret void
; }
-; define void @scalar_to_vector_test6(<4 x i16> addrspace(1)* %out) nounwind {
-; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
-; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
-; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
-; store <4 x i16> %add, <4 x i16> addrspace(1)* %out, align 16
-; ret void
-; }
+define amdgpu_kernel void @scalar_to_vector_test6(<2 x half> addrspace(1)* %out, i8 zeroext %val) nounwind {
+ %newvec0 = insertelement <4 x i8> undef, i8 %val, i32 0
+ %bc = bitcast <4 x i8> %newvec0 to <2 x half>
+ store <2 x half> %bc, <2 x half> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll b/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll
index e040639a2d94..60abd83546d3 100644
--- a/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll
+++ b/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll
@@ -1,81 +1,85 @@
-;RUN: llc < %s -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs
-;REQUIRES: asserts
+; RUN: llc -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs < %s
+; REQUIRES: asserts
-define amdgpu_vs void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) {
+define amdgpu_vs void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
main_body:
- %0 = extractelement <4 x float> %reg1, i32 0
- %1 = extractelement <4 x float> %reg1, i32 1
- %2 = extractelement <4 x float> %reg1, i32 2
- %3 = extractelement <4 x float> %reg1, i32 3
- %4 = fcmp ult float %1, 0.000000e+00
- %5 = select i1 %4, float 1.000000e+00, float 0.000000e+00
- %6 = fsub float -0.000000e+00, %5
- %7 = fptosi float %6 to i32
- %8 = bitcast i32 %7 to float
- %9 = fcmp ult float %0, 5.700000e+01
- %10 = select i1 %9, float 1.000000e+00, float 0.000000e+00
- %11 = fsub float -0.000000e+00, %10
- %12 = fptosi float %11 to i32
- %13 = bitcast i32 %12 to float
- %14 = bitcast float %8 to i32
- %15 = bitcast float %13 to i32
- %16 = and i32 %14, %15
- %17 = bitcast i32 %16 to float
- %18 = bitcast float %17 to i32
- %19 = icmp ne i32 %18, 0
- %20 = fcmp ult float %0, 0.000000e+00
- %21 = select i1 %20, float 1.000000e+00, float 0.000000e+00
- %22 = fsub float -0.000000e+00, %21
- %23 = fptosi float %22 to i32
- %24 = bitcast i32 %23 to float
- %25 = bitcast float %24 to i32
- %26 = icmp ne i32 %25, 0
- br i1 %19, label %IF, label %ELSE
+ %tmp = extractelement <4 x float> %reg1, i32 0
+ %tmp5 = extractelement <4 x float> %reg1, i32 1
+ %tmp6 = extractelement <4 x float> %reg1, i32 2
+ %tmp7 = extractelement <4 x float> %reg1, i32 3
+ %tmp8 = fcmp ult float %tmp5, 0.000000e+00
+ %tmp9 = select i1 %tmp8, float 1.000000e+00, float 0.000000e+00
+ %tmp10 = fsub float -0.000000e+00, %tmp9
+ %tmp11 = fptosi float %tmp10 to i32
+ %tmp12 = bitcast i32 %tmp11 to float
+ %tmp13 = fcmp ult float %tmp, 5.700000e+01
+ %tmp14 = select i1 %tmp13, float 1.000000e+00, float 0.000000e+00
+ %tmp15 = fsub float -0.000000e+00, %tmp14
+ %tmp16 = fptosi float %tmp15 to i32
+ %tmp17 = bitcast i32 %tmp16 to float
+ %tmp18 = bitcast float %tmp12 to i32
+ %tmp19 = bitcast float %tmp17 to i32
+ %tmp20 = and i32 %tmp18, %tmp19
+ %tmp21 = bitcast i32 %tmp20 to float
+ %tmp22 = bitcast float %tmp21 to i32
+ %tmp23 = icmp ne i32 %tmp22, 0
+ %tmp24 = fcmp ult float %tmp, 0.000000e+00
+ %tmp25 = select i1 %tmp24, float 1.000000e+00, float 0.000000e+00
+ %tmp26 = fsub float -0.000000e+00, %tmp25
+ %tmp27 = fptosi float %tmp26 to i32
+ %tmp28 = bitcast i32 %tmp27 to float
+ %tmp29 = bitcast float %tmp28 to i32
+ %tmp30 = icmp ne i32 %tmp29, 0
+ br i1 %tmp23, label %IF, label %ELSE
IF: ; preds = %main_body
- %. = select i1 %26, float 0.000000e+00, float 1.000000e+00
- %.18 = select i1 %26, float 1.000000e+00, float 0.000000e+00
+ %. = select i1 %tmp30, float 0.000000e+00, float 1.000000e+00
+ %.18 = select i1 %tmp30, float 1.000000e+00, float 0.000000e+00
br label %ENDIF
ELSE: ; preds = %main_body
- br i1 %26, label %ENDIF, label %ELSE17
+ br i1 %tmp30, label %ENDIF, label %ELSE17
ENDIF: ; preds = %ELSE17, %ELSE, %IF
- %temp1.0 = phi float [ %., %IF ], [ %48, %ELSE17 ], [ 0.000000e+00, %ELSE ]
- %temp2.0 = phi float [ 0.000000e+00, %IF ], [ %49, %ELSE17 ], [ 1.000000e+00, %ELSE ]
- %temp.0 = phi float [ %.18, %IF ], [ %47, %ELSE17 ], [ 0.000000e+00, %ELSE ]
- %27 = call float @llvm.AMDGPU.clamp.f32(float %temp.0, float 0.000000e+00, float 1.000000e+00)
- %28 = call float @llvm.AMDGPU.clamp.f32(float %temp1.0, float 0.000000e+00, float 1.000000e+00)
- %29 = call float @llvm.AMDGPU.clamp.f32(float %temp2.0, float 0.000000e+00, float 1.000000e+00)
- %30 = call float @llvm.AMDGPU.clamp.f32(float 1.000000e+00, float 0.000000e+00, float 1.000000e+00)
- %31 = insertelement <4 x float> undef, float %27, i32 0
- %32 = insertelement <4 x float> %31, float %28, i32 1
- %33 = insertelement <4 x float> %32, float %29, i32 2
- %34 = insertelement <4 x float> %33, float %30, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %34, i32 0, i32 0)
+ %temp1.0 = phi float [ %., %IF ], [ %tmp48, %ELSE17 ], [ 0.000000e+00, %ELSE ]
+ %temp2.0 = phi float [ 0.000000e+00, %IF ], [ %tmp49, %ELSE17 ], [ 1.000000e+00, %ELSE ]
+ %temp.0 = phi float [ %.18, %IF ], [ %tmp47, %ELSE17 ], [ 0.000000e+00, %ELSE ]
+ %max.0.i = call float @llvm.maxnum.f32(float %temp.0, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %max.0.i3 = call float @llvm.maxnum.f32(float %temp1.0, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %temp2.0, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp31 = insertelement <4 x float> undef, float %clamp.i, i32 0
+ %tmp32 = insertelement <4 x float> %tmp31, float %clamp.i4, i32 1
+ %tmp33 = insertelement <4 x float> %tmp32, float %clamp.i2, i32 2
+ %tmp34 = insertelement <4 x float> %tmp33, float 1.000000e+00, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp34, i32 0, i32 0)
ret void
ELSE17: ; preds = %ELSE
- %35 = fadd float 0.000000e+00, 0x3FC99999A0000000
- %36 = fadd float 0.000000e+00, 0x3FC99999A0000000
- %37 = fadd float 0.000000e+00, 0x3FC99999A0000000
- %38 = fadd float %35, 0x3FC99999A0000000
- %39 = fadd float %36, 0x3FC99999A0000000
- %40 = fadd float %37, 0x3FC99999A0000000
- %41 = fadd float %38, 0x3FC99999A0000000
- %42 = fadd float %39, 0x3FC99999A0000000
- %43 = fadd float %40, 0x3FC99999A0000000
- %44 = fadd float %41, 0x3FC99999A0000000
- %45 = fadd float %42, 0x3FC99999A0000000
- %46 = fadd float %43, 0x3FC99999A0000000
- %47 = fadd float %44, 0x3FC99999A0000000
- %48 = fadd float %45, 0x3FC99999A0000000
- %49 = fadd float %46, 0x3FC99999A0000000
+ %tmp35 = fadd float 0.000000e+00, 0x3FC99999A0000000
+ %tmp36 = fadd float 0.000000e+00, 0x3FC99999A0000000
+ %tmp37 = fadd float 0.000000e+00, 0x3FC99999A0000000
+ %tmp38 = fadd float %tmp35, 0x3FC99999A0000000
+ %tmp39 = fadd float %tmp36, 0x3FC99999A0000000
+ %tmp40 = fadd float %tmp37, 0x3FC99999A0000000
+ %tmp41 = fadd float %tmp38, 0x3FC99999A0000000
+ %tmp42 = fadd float %tmp39, 0x3FC99999A0000000
+ %tmp43 = fadd float %tmp40, 0x3FC99999A0000000
+ %tmp44 = fadd float %tmp41, 0x3FC99999A0000000
+ %tmp45 = fadd float %tmp42, 0x3FC99999A0000000
+ %tmp46 = fadd float %tmp43, 0x3FC99999A0000000
+ %tmp47 = fadd float %tmp44, 0x3FC99999A0000000
+ %tmp48 = fadd float %tmp45, 0x3FC99999A0000000
+ %tmp49 = fadd float %tmp46, 0x3FC99999A0000000
br label %ENDIF
}
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #0
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32)
-attributes #0 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll b/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll
index f907e154f962..177957c0b35b 100644
--- a/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll
+++ b/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll
@@ -1,88 +1,91 @@
-;RUN: llc < %s -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs
-;REQUIRES: asserts
+; RUN: llc -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs < %s
+; REQUIRES: asserts
-define void @main() {
+define amdgpu_kernel void @main() #0 {
main_body:
- %0 = load <4 x float>, <4 x float> addrspace(9)* null
- %1 = extractelement <4 x float> %0, i32 3
- %2 = fptosi float %1 to i32
- %3 = bitcast i32 %2 to float
- %4 = bitcast float %3 to i32
- %5 = sdiv i32 %4, 4
- %6 = bitcast i32 %5 to float
- %7 = bitcast float %6 to i32
- %8 = mul i32 %7, 4
- %9 = bitcast i32 %8 to float
- %10 = bitcast float %9 to i32
- %11 = sub i32 0, %10
- %12 = bitcast i32 %11 to float
- %13 = bitcast float %3 to i32
- %14 = bitcast float %12 to i32
- %15 = add i32 %13, %14
- %16 = bitcast i32 %15 to float
- %17 = load <4 x float>, <4 x float> addrspace(9)* null
- %18 = extractelement <4 x float> %17, i32 0
- %19 = load <4 x float>, <4 x float> addrspace(9)* null
- %20 = extractelement <4 x float> %19, i32 1
- %21 = load <4 x float>, <4 x float> addrspace(9)* null
- %22 = extractelement <4 x float> %21, i32 2
+ %tmp = load <4 x float>, <4 x float> addrspace(9)* null
+ %tmp5 = extractelement <4 x float> %tmp, i32 3
+ %tmp6 = fptosi float %tmp5 to i32
+ %tmp7 = bitcast i32 %tmp6 to float
+ %tmp8 = bitcast float %tmp7 to i32
+ %tmp9 = sdiv i32 %tmp8, 4
+ %tmp10 = bitcast i32 %tmp9 to float
+ %tmp11 = bitcast float %tmp10 to i32
+ %tmp12 = mul i32 %tmp11, 4
+ %tmp13 = bitcast i32 %tmp12 to float
+ %tmp14 = bitcast float %tmp13 to i32
+ %tmp15 = sub i32 0, %tmp14
+ %tmp16 = bitcast i32 %tmp15 to float
+ %tmp17 = bitcast float %tmp7 to i32
+ %tmp18 = bitcast float %tmp16 to i32
+ %tmp19 = add i32 %tmp17, %tmp18
+ %tmp20 = bitcast i32 %tmp19 to float
+ %tmp21 = load <4 x float>, <4 x float> addrspace(9)* null
+ %tmp22 = extractelement <4 x float> %tmp21, i32 0
+ %tmp23 = load <4 x float>, <4 x float> addrspace(9)* null
+ %tmp24 = extractelement <4 x float> %tmp23, i32 1
+ %tmp25 = load <4 x float>, <4 x float> addrspace(9)* null
+ %tmp26 = extractelement <4 x float> %tmp25, i32 2
br label %LOOP
LOOP: ; preds = %IF31, %main_body
- %temp12.0 = phi float [ 0.000000e+00, %main_body ], [ %47, %IF31 ]
- %temp6.0 = phi float [ %22, %main_body ], [ %temp6.1, %IF31 ]
- %temp5.0 = phi float [ %20, %main_body ], [ %temp5.1, %IF31 ]
- %temp4.0 = phi float [ %18, %main_body ], [ %temp4.1, %IF31 ]
- %23 = bitcast float %temp12.0 to i32
- %24 = bitcast float %6 to i32
- %25 = icmp sge i32 %23, %24
- %26 = sext i1 %25 to i32
- %27 = bitcast i32 %26 to float
- %28 = bitcast float %27 to i32
- %29 = icmp ne i32 %28, 0
- br i1 %29, label %IF, label %LOOP29
+ %temp12.0 = phi float [ 0.000000e+00, %main_body ], [ %tmp47, %IF31 ]
+ %temp6.0 = phi float [ %tmp26, %main_body ], [ %temp6.1, %IF31 ]
+ %temp5.0 = phi float [ %tmp24, %main_body ], [ %temp5.1, %IF31 ]
+ %temp4.0 = phi float [ %tmp22, %main_body ], [ %temp4.1, %IF31 ]
+ %tmp27 = bitcast float %temp12.0 to i32
+ %tmp28 = bitcast float %tmp10 to i32
+ %tmp29 = icmp sge i32 %tmp27, %tmp28
+ %tmp30 = sext i1 %tmp29 to i32
+ %tmp31 = bitcast i32 %tmp30 to float
+ %tmp32 = bitcast float %tmp31 to i32
+ %tmp33 = icmp ne i32 %tmp32, 0
+ br i1 %tmp33, label %IF, label %LOOP29
IF: ; preds = %LOOP
- %30 = call float @llvm.AMDGPU.clamp.f32(float %temp4.0, float 0.000000e+00, float 1.000000e+00)
- %31 = call float @llvm.AMDGPU.clamp.f32(float %temp5.0, float 0.000000e+00, float 1.000000e+00)
- %32 = call float @llvm.AMDGPU.clamp.f32(float %temp6.0, float 0.000000e+00, float 1.000000e+00)
- %33 = call float @llvm.AMDGPU.clamp.f32(float 1.000000e+00, float 0.000000e+00, float 1.000000e+00)
- %34 = insertelement <4 x float> undef, float %30, i32 0
- %35 = insertelement <4 x float> %34, float %31, i32 1
- %36 = insertelement <4 x float> %35, float %32, i32 2
- %37 = insertelement <4 x float> %36, float %33, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %37, i32 0, i32 0)
+ %max.0.i = call float @llvm.maxnum.f32(float %temp4.0, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %max.0.i3 = call float @llvm.maxnum.f32(float %temp5.0, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %temp6.0, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp34 = insertelement <4 x float> undef, float %clamp.i, i32 0
+ %tmp35 = insertelement <4 x float> %tmp34, float %clamp.i4, i32 1
+ %tmp36 = insertelement <4 x float> %tmp35, float %clamp.i2, i32 2
+ %tmp37 = insertelement <4 x float> %tmp36, float 1.000000e+00, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp37, i32 0, i32 0)
ret void
-LOOP29: ; preds = %LOOP, %ENDIF30
+LOOP29: ; preds = %ENDIF30, %LOOP
%temp6.1 = phi float [ %temp4.1, %ENDIF30 ], [ %temp6.0, %LOOP ]
%temp5.1 = phi float [ %temp6.1, %ENDIF30 ], [ %temp5.0, %LOOP ]
%temp4.1 = phi float [ %temp5.1, %ENDIF30 ], [ %temp4.0, %LOOP ]
- %temp20.0 = phi float [ %50, %ENDIF30 ], [ 0.000000e+00, %LOOP ]
- %38 = bitcast float %temp20.0 to i32
- %39 = bitcast float %16 to i32
- %40 = icmp sge i32 %38, %39
- %41 = sext i1 %40 to i32
- %42 = bitcast i32 %41 to float
- %43 = bitcast float %42 to i32
- %44 = icmp ne i32 %43, 0
- br i1 %44, label %IF31, label %ENDIF30
+ %temp20.0 = phi float [ %tmp50, %ENDIF30 ], [ 0.000000e+00, %LOOP ]
+ %tmp38 = bitcast float %temp20.0 to i32
+ %tmp39 = bitcast float %tmp20 to i32
+ %tmp40 = icmp sge i32 %tmp38, %tmp39
+ %tmp41 = sext i1 %tmp40 to i32
+ %tmp42 = bitcast i32 %tmp41 to float
+ %tmp43 = bitcast float %tmp42 to i32
+ %tmp44 = icmp ne i32 %tmp43, 0
+ br i1 %tmp44, label %IF31, label %ENDIF30
IF31: ; preds = %LOOP29
- %45 = bitcast float %temp12.0 to i32
- %46 = add i32 %45, 1
- %47 = bitcast i32 %46 to float
+ %tmp45 = bitcast float %temp12.0 to i32
+ %tmp46 = add i32 %tmp45, 1
+ %tmp47 = bitcast i32 %tmp46 to float
br label %LOOP
ENDIF30: ; preds = %LOOP29
- %48 = bitcast float %temp20.0 to i32
- %49 = add i32 %48, 1
- %50 = bitcast i32 %49 to float
+ %tmp48 = bitcast float %temp20.0 to i32
+ %tmp49 = add i32 %tmp48, 1
+ %tmp50 = bitcast i32 %tmp49 to float
br label %LOOP29
}
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #0
+declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32) #0
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
-declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32)
-
-attributes #0 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/schedule-fs-loop.ll b/test/CodeGen/AMDGPU/schedule-fs-loop.ll
index 5839785f00d5..6cd419f6cfc4 100644
--- a/test/CodeGen/AMDGPU/schedule-fs-loop.ll
+++ b/test/CodeGen/AMDGPU/schedule-fs-loop.ll
@@ -1,55 +1,84 @@
-;RUN: llc < %s -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs
-;REQUIRES: asserts
+; RUN: llc -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs < %s
+; REQUIRES: asserts
-define void @main() {
+define amdgpu_vs void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
main_body:
- %0 = load <4 x float>, <4 x float> addrspace(9)* null
- %1 = extractelement <4 x float> %0, i32 3
- %2 = fptosi float %1 to i32
- %3 = bitcast i32 %2 to float
- %4 = load <4 x float>, <4 x float> addrspace(9)* null
- %5 = extractelement <4 x float> %4, i32 0
- %6 = load <4 x float>, <4 x float> addrspace(9)* null
- %7 = extractelement <4 x float> %6, i32 1
- %8 = load <4 x float>, <4 x float> addrspace(9)* null
- %9 = extractelement <4 x float> %8, i32 2
- br label %LOOP
+ %tmp = extractelement <4 x float> %reg1, i32 0
+ %tmp5 = extractelement <4 x float> %reg1, i32 1
+ %tmp6 = extractelement <4 x float> %reg1, i32 2
+ %tmp7 = extractelement <4 x float> %reg1, i32 3
+ %tmp8 = fcmp ult float %tmp5, 0.000000e+00
+ %tmp9 = select i1 %tmp8, float 1.000000e+00, float 0.000000e+00
+ %tmp10 = fsub float -0.000000e+00, %tmp9
+ %tmp11 = fptosi float %tmp10 to i32
+ %tmp12 = bitcast i32 %tmp11 to float
+ %tmp13 = fcmp ult float %tmp, 5.700000e+01
+ %tmp14 = select i1 %tmp13, float 1.000000e+00, float 0.000000e+00
+ %tmp15 = fsub float -0.000000e+00, %tmp14
+ %tmp16 = fptosi float %tmp15 to i32
+ %tmp17 = bitcast i32 %tmp16 to float
+ %tmp18 = bitcast float %tmp12 to i32
+ %tmp19 = bitcast float %tmp17 to i32
+ %tmp20 = and i32 %tmp18, %tmp19
+ %tmp21 = bitcast i32 %tmp20 to float
+ %tmp22 = bitcast float %tmp21 to i32
+ %tmp23 = icmp ne i32 %tmp22, 0
+ %tmp24 = fcmp ult float %tmp, 0.000000e+00
+ %tmp25 = select i1 %tmp24, float 1.000000e+00, float 0.000000e+00
+ %tmp26 = fsub float -0.000000e+00, %tmp25
+ %tmp27 = fptosi float %tmp26 to i32
+ %tmp28 = bitcast i32 %tmp27 to float
+ %tmp29 = bitcast float %tmp28 to i32
+ %tmp30 = icmp ne i32 %tmp29, 0
+ br i1 %tmp23, label %IF, label %ELSE
-LOOP: ; preds = %ENDIF, %main_body
- %temp4.0 = phi float [ %5, %main_body ], [ %temp5.0, %ENDIF ]
- %temp5.0 = phi float [ %7, %main_body ], [ %temp6.0, %ENDIF ]
- %temp6.0 = phi float [ %9, %main_body ], [ %temp4.0, %ENDIF ]
- %temp8.0 = phi float [ 0.000000e+00, %main_body ], [ %27, %ENDIF ]
- %10 = bitcast float %temp8.0 to i32
- %11 = bitcast float %3 to i32
- %12 = icmp sge i32 %10, %11
- %13 = sext i1 %12 to i32
- %14 = bitcast i32 %13 to float
- %15 = bitcast float %14 to i32
- %16 = icmp ne i32 %15, 0
- br i1 %16, label %IF, label %ENDIF
+IF: ; preds = %main_body
+ %. = select i1 %tmp30, float 0.000000e+00, float 1.000000e+00
+ %.18 = select i1 %tmp30, float 1.000000e+00, float 0.000000e+00
+ br label %ENDIF
-IF: ; preds = %LOOP
- %17 = call float @llvm.AMDGPU.clamp.f32(float %temp4.0, float 0.000000e+00, float 1.000000e+00)
- %18 = call float @llvm.AMDGPU.clamp.f32(float %temp5.0, float 0.000000e+00, float 1.000000e+00)
- %19 = call float @llvm.AMDGPU.clamp.f32(float %temp6.0, float 0.000000e+00, float 1.000000e+00)
- %20 = call float @llvm.AMDGPU.clamp.f32(float 1.000000e+00, float 0.000000e+00, float 1.000000e+00)
- %21 = insertelement <4 x float> undef, float %17, i32 0
- %22 = insertelement <4 x float> %21, float %18, i32 1
- %23 = insertelement <4 x float> %22, float %19, i32 2
- %24 = insertelement <4 x float> %23, float %20, i32 3
- call void @llvm.r600.store.swizzle(<4 x float> %24, i32 0, i32 0)
+ELSE: ; preds = %main_body
+ br i1 %tmp30, label %ENDIF, label %ELSE17
+
+ENDIF: ; preds = %ELSE17, %ELSE, %IF
+ %temp1.0 = phi float [ %., %IF ], [ %tmp48, %ELSE17 ], [ 0.000000e+00, %ELSE ]
+ %temp2.0 = phi float [ 0.000000e+00, %IF ], [ %tmp49, %ELSE17 ], [ 1.000000e+00, %ELSE ]
+ %temp.0 = phi float [ %.18, %IF ], [ %tmp47, %ELSE17 ], [ 0.000000e+00, %ELSE ]
+ %max.0.i = call float @llvm.maxnum.f32(float %temp.0, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %max.0.i3 = call float @llvm.maxnum.f32(float %temp1.0, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %temp2.0, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp31 = insertelement <4 x float> undef, float %clamp.i, i32 0
+ %tmp32 = insertelement <4 x float> %tmp31, float %clamp.i4, i32 1
+ %tmp33 = insertelement <4 x float> %tmp32, float %clamp.i2, i32 2
+ %tmp34 = insertelement <4 x float> %tmp33, float 1.000000e+00, i32 3
+ call void @llvm.r600.store.swizzle(<4 x float> %tmp34, i32 0, i32 0)
ret void
-ENDIF: ; preds = %LOOP
- %25 = bitcast float %temp8.0 to i32
- %26 = add i32 %25, 1
- %27 = bitcast i32 %26 to float
- br label %LOOP
+ELSE17: ; preds = %ELSE
+ %tmp35 = fadd float 0.000000e+00, 0x3FC99999A0000000
+ %tmp36 = fadd float 0.000000e+00, 0x3FC99999A0000000
+ %tmp37 = fadd float 0.000000e+00, 0x3FC99999A0000000
+ %tmp38 = fadd float %tmp35, 0x3FC99999A0000000
+ %tmp39 = fadd float %tmp36, 0x3FC99999A0000000
+ %tmp40 = fadd float %tmp37, 0x3FC99999A0000000
+ %tmp41 = fadd float %tmp38, 0x3FC99999A0000000
+ %tmp42 = fadd float %tmp39, 0x3FC99999A0000000
+ %tmp43 = fadd float %tmp40, 0x3FC99999A0000000
+ %tmp44 = fadd float %tmp41, 0x3FC99999A0000000
+ %tmp45 = fadd float %tmp42, 0x3FC99999A0000000
+ %tmp46 = fadd float %tmp43, 0x3FC99999A0000000
+ %tmp47 = fadd float %tmp44, 0x3FC99999A0000000
+ %tmp48 = fadd float %tmp45, 0x3FC99999A0000000
+ %tmp49 = fadd float %tmp46, 0x3FC99999A0000000
+ br label %ENDIF
}
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #0
-
-declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32)
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare void @llvm.r600.store.swizzle(<4 x float>, i32, i32) #0
-attributes #0 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/schedule-global-loads.ll b/test/CodeGen/AMDGPU/schedule-global-loads.ll
index 32c456bd2ceb..44d46086f02a 100644
--- a/test/CodeGen/AMDGPU/schedule-global-loads.ll
+++ b/test/CodeGen/AMDGPU/schedule-global-loads.ll
@@ -10,7 +10,7 @@
; SI-DAG: buffer_load_dword [[REG1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
; SI: buffer_store_dword [[REG0]]
; SI: buffer_store_dword [[REG1]]
-define void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr) #0 {
%load0 = load i32, i32 addrspace(1)* %ptr, align 4
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 2
%load1 = load i32, i32 addrspace(1)* %gep, align 4
@@ -24,7 +24,7 @@ define void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)*
; FUNC-LABEL: {{^}}same_base_ptr_crash:
; SI: buffer_load_dword
; SI: buffer_load_dword
-define void @same_base_ptr_crash(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
+define amdgpu_kernel void @same_base_ptr_crash(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
entry:
%out1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset
%tmp0 = load i32, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/schedule-if-2.ll b/test/CodeGen/AMDGPU/schedule-if-2.ll
index aa67b2e0f7db..964298a55318 100644
--- a/test/CodeGen/AMDGPU/schedule-if-2.ll
+++ b/test/CodeGen/AMDGPU/schedule-if-2.ll
@@ -1,7 +1,7 @@
;RUN: llc < %s -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs
;REQUIRES: asserts
-define void @main() {
+define amdgpu_kernel void @main() {
main_body:
%0 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
%1 = extractelement <4 x float> %0, i32 0
diff --git a/test/CodeGen/AMDGPU/schedule-if.ll b/test/CodeGen/AMDGPU/schedule-if.ll
index 6637b3897717..feac5d918f63 100644
--- a/test/CodeGen/AMDGPU/schedule-if.ll
+++ b/test/CodeGen/AMDGPU/schedule-if.ll
@@ -1,7 +1,7 @@
;RUN: llc < %s -march=r600 -mcpu=cayman -stress-sched -verify-misched -verify-machineinstrs
;REQUIRES: asserts
-define void @main() {
+define amdgpu_kernel void @main() {
main_body:
%0 = load <4 x float>, <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>], [1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
%1 = extractelement <4 x float> %0, i32 0
diff --git a/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll b/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
index ccfde7b9adc5..5c47c163dcce 100644
--- a/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
+++ b/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
@@ -12,7 +12,7 @@
; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
-define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
+define amdgpu_kernel void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
store i32 %x, i32 addrspace(1)* %out0, align 4
store i32 %y, i32 addrspace(1)* %out1, align 4
ret void
@@ -26,7 +26,7 @@ define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1,
; GCN: s_load_dwordx2
; GCN: s_load_dwordx2
; GCN: s_endpgm
-define void @same_base_ptr_crash(i64 addrspace(1)* %out,
+define amdgpu_kernel void @same_base_ptr_crash(i64 addrspace(1)* %out,
i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7,
i64 %arg8, i64 %arg9, i64 %arg10, i64 %arg11, i64 %arg12, i64 %arg13, i64 %arg14, i64 %arg15,
i64 %arg16, i64 %arg17, i64 %arg18, i64 %arg19, i64 %arg20, i64 %arg21, i64 %arg22, i64 %arg23,
diff --git a/test/CodeGen/AMDGPU/schedule-regpressure-limit.ll b/test/CodeGen/AMDGPU/schedule-regpressure-limit.ll
new file mode 100644
index 000000000000..4520fe86136f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/schedule-regpressure-limit.ll
@@ -0,0 +1,591 @@
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -misched=gcn-minreg -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -misched=gcn-max-occupancy-experimental -verify-machineinstrs < %s | FileCheck %s
+
+; We expect a two digit VGPR usage here, not a three digit.
+; CHECK: NumVgprs: {{[0-9][0-9]$}}
+
+define amdgpu_kernel void @load_fma_store(float addrspace(3)* nocapture readonly %arg, float addrspace(1)* nocapture %arg1) {
+bb:
+ %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1
+ %tmp2 = load float, float addrspace(3)* %tmp, align 4
+ %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2
+ %tmp4 = load float, float addrspace(3)* %tmp3, align 4
+ %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3
+ %tmp6 = load float, float addrspace(3)* %tmp5, align 4
+ %tmp7 = tail call float @llvm.fmuladd.f32(float %tmp2, float %tmp4, float %tmp6)
+ %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5
+ %tmp9 = load float, float addrspace(3)* %tmp8, align 4
+ %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6
+ %tmp11 = load float, float addrspace(3)* %tmp10, align 4
+ %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7
+ %tmp13 = load float, float addrspace(3)* %tmp12, align 4
+ %tmp14 = tail call float @llvm.fmuladd.f32(float %tmp9, float %tmp11, float %tmp13)
+ %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9
+ %tmp16 = load float, float addrspace(3)* %tmp15, align 4
+ %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10
+ %tmp18 = load float, float addrspace(3)* %tmp17, align 4
+ %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11
+ %tmp20 = load float, float addrspace(3)* %tmp19, align 4
+ %tmp21 = tail call float @llvm.fmuladd.f32(float %tmp16, float %tmp18, float %tmp20)
+ %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13
+ %tmp23 = load float, float addrspace(3)* %tmp22, align 4
+ %tmp24 = getelementptr inbounds float, float addrspace(3)* %arg, i32 14
+ %tmp25 = load float, float addrspace(3)* %tmp24, align 4
+ %tmp26 = getelementptr inbounds float, float addrspace(3)* %arg, i32 15
+ %tmp27 = load float, float addrspace(3)* %tmp26, align 4
+ %tmp28 = tail call float @llvm.fmuladd.f32(float %tmp23, float %tmp25, float %tmp27)
+ %tmp29 = getelementptr inbounds float, float addrspace(3)* %arg, i32 17
+ %tmp30 = load float, float addrspace(3)* %tmp29, align 4
+ %tmp31 = getelementptr inbounds float, float addrspace(3)* %arg, i32 18
+ %tmp32 = load float, float addrspace(3)* %tmp31, align 4
+ %tmp33 = getelementptr inbounds float, float addrspace(3)* %arg, i32 19
+ %tmp34 = load float, float addrspace(3)* %tmp33, align 4
+ %tmp35 = tail call float @llvm.fmuladd.f32(float %tmp30, float %tmp32, float %tmp34)
+ %tmp36 = getelementptr inbounds float, float addrspace(3)* %arg, i32 21
+ %tmp37 = load float, float addrspace(3)* %tmp36, align 4
+ %tmp38 = getelementptr inbounds float, float addrspace(3)* %arg, i32 22
+ %tmp39 = load float, float addrspace(3)* %tmp38, align 4
+ %tmp40 = getelementptr inbounds float, float addrspace(3)* %arg, i32 23
+ %tmp41 = load float, float addrspace(3)* %tmp40, align 4
+ %tmp42 = tail call float @llvm.fmuladd.f32(float %tmp37, float %tmp39, float %tmp41)
+ %tmp43 = getelementptr inbounds float, float addrspace(3)* %arg, i32 25
+ %tmp44 = load float, float addrspace(3)* %tmp43, align 4
+ %tmp45 = getelementptr inbounds float, float addrspace(3)* %arg, i32 26
+ %tmp46 = load float, float addrspace(3)* %tmp45, align 4
+ %tmp47 = getelementptr inbounds float, float addrspace(3)* %arg, i32 27
+ %tmp48 = load float, float addrspace(3)* %tmp47, align 4
+ %tmp49 = tail call float @llvm.fmuladd.f32(float %tmp44, float %tmp46, float %tmp48)
+ %tmp50 = getelementptr inbounds float, float addrspace(3)* %arg, i32 29
+ %tmp51 = load float, float addrspace(3)* %tmp50, align 4
+ %tmp52 = getelementptr inbounds float, float addrspace(3)* %arg, i32 30
+ %tmp53 = load float, float addrspace(3)* %tmp52, align 4
+ %tmp54 = getelementptr inbounds float, float addrspace(3)* %arg, i32 31
+ %tmp55 = load float, float addrspace(3)* %tmp54, align 4
+ %tmp56 = tail call float @llvm.fmuladd.f32(float %tmp51, float %tmp53, float %tmp55)
+ %tmp57 = getelementptr inbounds float, float addrspace(3)* %arg, i32 33
+ %tmp58 = load float, float addrspace(3)* %tmp57, align 4
+ %tmp59 = getelementptr inbounds float, float addrspace(3)* %arg, i32 34
+ %tmp60 = load float, float addrspace(3)* %tmp59, align 4
+ %tmp61 = getelementptr inbounds float, float addrspace(3)* %arg, i32 35
+ %tmp62 = load float, float addrspace(3)* %tmp61, align 4
+ %tmp63 = tail call float @llvm.fmuladd.f32(float %tmp58, float %tmp60, float %tmp62)
+ %tmp64 = getelementptr inbounds float, float addrspace(3)* %arg, i32 37
+ %tmp65 = load float, float addrspace(3)* %tmp64, align 4
+ %tmp66 = getelementptr inbounds float, float addrspace(3)* %arg, i32 38
+ %tmp67 = load float, float addrspace(3)* %tmp66, align 4
+ %tmp68 = getelementptr inbounds float, float addrspace(3)* %arg, i32 39
+ %tmp69 = load float, float addrspace(3)* %tmp68, align 4
+ %tmp70 = tail call float @llvm.fmuladd.f32(float %tmp65, float %tmp67, float %tmp69)
+ %tmp71 = getelementptr inbounds float, float addrspace(3)* %arg, i32 41
+ %tmp72 = load float, float addrspace(3)* %tmp71, align 4
+ %tmp73 = getelementptr inbounds float, float addrspace(3)* %arg, i32 42
+ %tmp74 = load float, float addrspace(3)* %tmp73, align 4
+ %tmp75 = getelementptr inbounds float, float addrspace(3)* %arg, i32 43
+ %tmp76 = load float, float addrspace(3)* %tmp75, align 4
+ %tmp77 = tail call float @llvm.fmuladd.f32(float %tmp72, float %tmp74, float %tmp76)
+ %tmp78 = getelementptr inbounds float, float addrspace(3)* %arg, i32 45
+ %tmp79 = load float, float addrspace(3)* %tmp78, align 4
+ %tmp80 = getelementptr inbounds float, float addrspace(3)* %arg, i32 46
+ %tmp81 = load float, float addrspace(3)* %tmp80, align 4
+ %tmp82 = getelementptr inbounds float, float addrspace(3)* %arg, i32 47
+ %tmp83 = load float, float addrspace(3)* %tmp82, align 4
+ %tmp84 = tail call float @llvm.fmuladd.f32(float %tmp79, float %tmp81, float %tmp83)
+ %tmp85 = getelementptr inbounds float, float addrspace(3)* %arg, i32 49
+ %tmp86 = load float, float addrspace(3)* %tmp85, align 4
+ %tmp87 = getelementptr inbounds float, float addrspace(3)* %arg, i32 50
+ %tmp88 = load float, float addrspace(3)* %tmp87, align 4
+ %tmp89 = getelementptr inbounds float, float addrspace(3)* %arg, i32 51
+ %tmp90 = load float, float addrspace(3)* %tmp89, align 4
+ %tmp91 = tail call float @llvm.fmuladd.f32(float %tmp86, float %tmp88, float %tmp90)
+ %tmp92 = getelementptr inbounds float, float addrspace(3)* %arg, i32 53
+ %tmp93 = load float, float addrspace(3)* %tmp92, align 4
+ %tmp94 = getelementptr inbounds float, float addrspace(3)* %arg, i32 54
+ %tmp95 = load float, float addrspace(3)* %tmp94, align 4
+ %tmp96 = getelementptr inbounds float, float addrspace(3)* %arg, i32 55
+ %tmp97 = load float, float addrspace(3)* %tmp96, align 4
+ %tmp98 = tail call float @llvm.fmuladd.f32(float %tmp93, float %tmp95, float %tmp97)
+ %tmp99 = getelementptr inbounds float, float addrspace(3)* %arg, i32 57
+ %tmp100 = load float, float addrspace(3)* %tmp99, align 4
+ %tmp101 = getelementptr inbounds float, float addrspace(3)* %arg, i32 58
+ %tmp102 = load float, float addrspace(3)* %tmp101, align 4
+ %tmp103 = getelementptr inbounds float, float addrspace(3)* %arg, i32 59
+ %tmp104 = load float, float addrspace(3)* %tmp103, align 4
+ %tmp105 = tail call float @llvm.fmuladd.f32(float %tmp100, float %tmp102, float %tmp104)
+ %tmp106 = getelementptr inbounds float, float addrspace(3)* %arg, i32 61
+ %tmp107 = load float, float addrspace(3)* %tmp106, align 4
+ %tmp108 = getelementptr inbounds float, float addrspace(3)* %arg, i32 62
+ %tmp109 = load float, float addrspace(3)* %tmp108, align 4
+ %tmp110 = getelementptr inbounds float, float addrspace(3)* %arg, i32 63
+ %tmp111 = load float, float addrspace(3)* %tmp110, align 4
+ %tmp112 = tail call float @llvm.fmuladd.f32(float %tmp107, float %tmp109, float %tmp111)
+ %tmp113 = getelementptr inbounds float, float addrspace(3)* %arg, i32 65
+ %tmp114 = load float, float addrspace(3)* %tmp113, align 4
+ %tmp115 = getelementptr inbounds float, float addrspace(3)* %arg, i32 66
+ %tmp116 = load float, float addrspace(3)* %tmp115, align 4
+ %tmp117 = getelementptr inbounds float, float addrspace(3)* %arg, i32 67
+ %tmp118 = load float, float addrspace(3)* %tmp117, align 4
+ %tmp119 = tail call float @llvm.fmuladd.f32(float %tmp114, float %tmp116, float %tmp118)
+ %tmp120 = getelementptr inbounds float, float addrspace(3)* %arg, i32 69
+ %tmp121 = load float, float addrspace(3)* %tmp120, align 4
+ %tmp122 = getelementptr inbounds float, float addrspace(3)* %arg, i32 70
+ %tmp123 = load float, float addrspace(3)* %tmp122, align 4
+ %tmp124 = getelementptr inbounds float, float addrspace(3)* %arg, i32 71
+ %tmp125 = load float, float addrspace(3)* %tmp124, align 4
+ %tmp126 = tail call float @llvm.fmuladd.f32(float %tmp121, float %tmp123, float %tmp125)
+ %tmp127 = getelementptr inbounds float, float addrspace(3)* %arg, i32 73
+ %tmp128 = load float, float addrspace(3)* %tmp127, align 4
+ %tmp129 = getelementptr inbounds float, float addrspace(3)* %arg, i32 74
+ %tmp130 = load float, float addrspace(3)* %tmp129, align 4
+ %tmp131 = getelementptr inbounds float, float addrspace(3)* %arg, i32 75
+ %tmp132 = load float, float addrspace(3)* %tmp131, align 4
+ %tmp133 = tail call float @llvm.fmuladd.f32(float %tmp128, float %tmp130, float %tmp132)
+ %tmp134 = getelementptr inbounds float, float addrspace(3)* %arg, i32 77
+ %tmp135 = load float, float addrspace(3)* %tmp134, align 4
+ %tmp136 = getelementptr inbounds float, float addrspace(3)* %arg, i32 78
+ %tmp137 = load float, float addrspace(3)* %tmp136, align 4
+ %tmp138 = getelementptr inbounds float, float addrspace(3)* %arg, i32 79
+ %tmp139 = load float, float addrspace(3)* %tmp138, align 4
+ %tmp140 = tail call float @llvm.fmuladd.f32(float %tmp135, float %tmp137, float %tmp139)
+ %tmp141 = getelementptr inbounds float, float addrspace(3)* %arg, i32 81
+ %tmp142 = load float, float addrspace(3)* %tmp141, align 4
+ %tmp143 = getelementptr inbounds float, float addrspace(3)* %arg, i32 82
+ %tmp144 = load float, float addrspace(3)* %tmp143, align 4
+ %tmp145 = getelementptr inbounds float, float addrspace(3)* %arg, i32 83
+ %tmp146 = load float, float addrspace(3)* %tmp145, align 4
+ %tmp147 = tail call float @llvm.fmuladd.f32(float %tmp142, float %tmp144, float %tmp146)
+ %tmp148 = getelementptr inbounds float, float addrspace(3)* %arg, i32 85
+ %tmp149 = load float, float addrspace(3)* %tmp148, align 4
+ %tmp150 = getelementptr inbounds float, float addrspace(3)* %arg, i32 86
+ %tmp151 = load float, float addrspace(3)* %tmp150, align 4
+ %tmp152 = getelementptr inbounds float, float addrspace(3)* %arg, i32 87
+ %tmp153 = load float, float addrspace(3)* %tmp152, align 4
+ %tmp154 = tail call float @llvm.fmuladd.f32(float %tmp149, float %tmp151, float %tmp153)
+ %tmp155 = getelementptr inbounds float, float addrspace(3)* %arg, i32 89
+ %tmp156 = load float, float addrspace(3)* %tmp155, align 4
+ %tmp157 = getelementptr inbounds float, float addrspace(3)* %arg, i32 90
+ %tmp158 = load float, float addrspace(3)* %tmp157, align 4
+ %tmp159 = getelementptr inbounds float, float addrspace(3)* %arg, i32 91
+ %tmp160 = load float, float addrspace(3)* %tmp159, align 4
+ %tmp161 = tail call float @llvm.fmuladd.f32(float %tmp156, float %tmp158, float %tmp160)
+ %tmp162 = getelementptr inbounds float, float addrspace(3)* %arg, i32 93
+ %tmp163 = load float, float addrspace(3)* %tmp162, align 4
+ %tmp164 = getelementptr inbounds float, float addrspace(3)* %arg, i32 94
+ %tmp165 = load float, float addrspace(3)* %tmp164, align 4
+ %tmp166 = getelementptr inbounds float, float addrspace(3)* %arg, i32 95
+ %tmp167 = load float, float addrspace(3)* %tmp166, align 4
+ %tmp168 = tail call float @llvm.fmuladd.f32(float %tmp163, float %tmp165, float %tmp167)
+ %tmp169 = getelementptr inbounds float, float addrspace(3)* %arg, i32 97
+ %tmp170 = load float, float addrspace(3)* %tmp169, align 4
+ %tmp171 = getelementptr inbounds float, float addrspace(3)* %arg, i32 98
+ %tmp172 = load float, float addrspace(3)* %tmp171, align 4
+ %tmp173 = getelementptr inbounds float, float addrspace(3)* %arg, i32 99
+ %tmp174 = load float, float addrspace(3)* %tmp173, align 4
+ %tmp175 = tail call float @llvm.fmuladd.f32(float %tmp170, float %tmp172, float %tmp174)
+ %tmp176 = getelementptr inbounds float, float addrspace(3)* %arg, i32 101
+ %tmp177 = load float, float addrspace(3)* %tmp176, align 4
+ %tmp178 = getelementptr inbounds float, float addrspace(3)* %arg, i32 102
+ %tmp179 = load float, float addrspace(3)* %tmp178, align 4
+ %tmp180 = getelementptr inbounds float, float addrspace(3)* %arg, i32 103
+ %tmp181 = load float, float addrspace(3)* %tmp180, align 4
+ %tmp182 = tail call float @llvm.fmuladd.f32(float %tmp177, float %tmp179, float %tmp181)
+ %tmp183 = getelementptr inbounds float, float addrspace(3)* %arg, i32 105
+ %tmp184 = load float, float addrspace(3)* %tmp183, align 4
+ %tmp185 = getelementptr inbounds float, float addrspace(3)* %arg, i32 106
+ %tmp186 = load float, float addrspace(3)* %tmp185, align 4
+ %tmp187 = getelementptr inbounds float, float addrspace(3)* %arg, i32 107
+ %tmp188 = load float, float addrspace(3)* %tmp187, align 4
+ %tmp189 = tail call float @llvm.fmuladd.f32(float %tmp184, float %tmp186, float %tmp188)
+ %tmp190 = getelementptr inbounds float, float addrspace(3)* %arg, i32 109
+ %tmp191 = load float, float addrspace(3)* %tmp190, align 4
+ %tmp192 = getelementptr inbounds float, float addrspace(3)* %arg, i32 110
+ %tmp193 = load float, float addrspace(3)* %tmp192, align 4
+ %tmp194 = getelementptr inbounds float, float addrspace(3)* %arg, i32 111
+ %tmp195 = load float, float addrspace(3)* %tmp194, align 4
+ %tmp196 = tail call float @llvm.fmuladd.f32(float %tmp191, float %tmp193, float %tmp195)
+ %tmp197 = getelementptr inbounds float, float addrspace(3)* %arg, i32 113
+ %tmp198 = load float, float addrspace(3)* %tmp197, align 4
+ %tmp199 = getelementptr inbounds float, float addrspace(3)* %arg, i32 114
+ %tmp200 = load float, float addrspace(3)* %tmp199, align 4
+ %tmp201 = getelementptr inbounds float, float addrspace(3)* %arg, i32 115
+ %tmp202 = load float, float addrspace(3)* %tmp201, align 4
+ %tmp203 = tail call float @llvm.fmuladd.f32(float %tmp198, float %tmp200, float %tmp202)
+ %tmp204 = getelementptr inbounds float, float addrspace(3)* %arg, i32 117
+ %tmp205 = load float, float addrspace(3)* %tmp204, align 4
+ %tmp206 = getelementptr inbounds float, float addrspace(3)* %arg, i32 118
+ %tmp207 = load float, float addrspace(3)* %tmp206, align 4
+ %tmp208 = getelementptr inbounds float, float addrspace(3)* %arg, i32 119
+ %tmp209 = load float, float addrspace(3)* %tmp208, align 4
+ %tmp210 = tail call float @llvm.fmuladd.f32(float %tmp205, float %tmp207, float %tmp209)
+ %tmp211 = getelementptr inbounds float, float addrspace(3)* %arg, i32 121
+ %tmp212 = load float, float addrspace(3)* %tmp211, align 4
+ %tmp213 = getelementptr inbounds float, float addrspace(3)* %arg, i32 122
+ %tmp214 = load float, float addrspace(3)* %tmp213, align 4
+ %tmp215 = getelementptr inbounds float, float addrspace(3)* %arg, i32 123
+ %tmp216 = load float, float addrspace(3)* %tmp215, align 4
+ %tmp217 = tail call float @llvm.fmuladd.f32(float %tmp212, float %tmp214, float %tmp216)
+ %tmp218 = getelementptr inbounds float, float addrspace(3)* %arg, i32 125
+ %tmp219 = load float, float addrspace(3)* %tmp218, align 4
+ %tmp220 = getelementptr inbounds float, float addrspace(3)* %arg, i32 126
+ %tmp221 = load float, float addrspace(3)* %tmp220, align 4
+ %tmp222 = getelementptr inbounds float, float addrspace(3)* %arg, i32 127
+ %tmp223 = load float, float addrspace(3)* %tmp222, align 4
+ %tmp224 = tail call float @llvm.fmuladd.f32(float %tmp219, float %tmp221, float %tmp223)
+ %tmp225 = getelementptr inbounds float, float addrspace(3)* %arg, i32 129
+ %tmp226 = load float, float addrspace(3)* %tmp225, align 4
+ %tmp227 = getelementptr inbounds float, float addrspace(3)* %arg, i32 130
+ %tmp228 = load float, float addrspace(3)* %tmp227, align 4
+ %tmp229 = getelementptr inbounds float, float addrspace(3)* %arg, i32 131
+ %tmp230 = load float, float addrspace(3)* %tmp229, align 4
+ %tmp231 = tail call float @llvm.fmuladd.f32(float %tmp226, float %tmp228, float %tmp230)
+ %tmp232 = getelementptr inbounds float, float addrspace(3)* %arg, i32 133
+ %tmp233 = load float, float addrspace(3)* %tmp232, align 4
+ %tmp234 = getelementptr inbounds float, float addrspace(3)* %arg, i32 134
+ %tmp235 = load float, float addrspace(3)* %tmp234, align 4
+ %tmp236 = getelementptr inbounds float, float addrspace(3)* %arg, i32 135
+ %tmp237 = load float, float addrspace(3)* %tmp236, align 4
+ %tmp238 = tail call float @llvm.fmuladd.f32(float %tmp233, float %tmp235, float %tmp237)
+ %tmp239 = getelementptr inbounds float, float addrspace(3)* %arg, i32 137
+ %tmp240 = load float, float addrspace(3)* %tmp239, align 4
+ %tmp241 = getelementptr inbounds float, float addrspace(3)* %arg, i32 138
+ %tmp242 = load float, float addrspace(3)* %tmp241, align 4
+ %tmp243 = getelementptr inbounds float, float addrspace(3)* %arg, i32 139
+ %tmp244 = load float, float addrspace(3)* %tmp243, align 4
+ %tmp245 = tail call float @llvm.fmuladd.f32(float %tmp240, float %tmp242, float %tmp244)
+ %tmp246 = getelementptr inbounds float, float addrspace(3)* %arg, i32 141
+ %tmp247 = load float, float addrspace(3)* %tmp246, align 4
+ %tmp248 = getelementptr inbounds float, float addrspace(3)* %arg, i32 142
+ %tmp249 = load float, float addrspace(3)* %tmp248, align 4
+ %tmp250 = getelementptr inbounds float, float addrspace(3)* %arg, i32 143
+ %tmp251 = load float, float addrspace(3)* %tmp250, align 4
+ %tmp252 = tail call float @llvm.fmuladd.f32(float %tmp247, float %tmp249, float %tmp251)
+ %tmp253 = getelementptr inbounds float, float addrspace(3)* %arg, i32 145
+ %tmp254 = load float, float addrspace(3)* %tmp253, align 4
+ %tmp255 = getelementptr inbounds float, float addrspace(3)* %arg, i32 146
+ %tmp256 = load float, float addrspace(3)* %tmp255, align 4
+ %tmp257 = getelementptr inbounds float, float addrspace(3)* %arg, i32 147
+ %tmp258 = load float, float addrspace(3)* %tmp257, align 4
+ %tmp259 = tail call float @llvm.fmuladd.f32(float %tmp254, float %tmp256, float %tmp258)
+ %tmp260 = getelementptr inbounds float, float addrspace(3)* %arg, i32 149
+ %tmp261 = load float, float addrspace(3)* %tmp260, align 4
+ %tmp262 = getelementptr inbounds float, float addrspace(3)* %arg, i32 150
+ %tmp263 = load float, float addrspace(3)* %tmp262, align 4
+ %tmp264 = getelementptr inbounds float, float addrspace(3)* %arg, i32 151
+ %tmp265 = load float, float addrspace(3)* %tmp264, align 4
+ %tmp266 = tail call float @llvm.fmuladd.f32(float %tmp261, float %tmp263, float %tmp265)
+ %tmp267 = getelementptr inbounds float, float addrspace(3)* %arg, i32 153
+ %tmp268 = load float, float addrspace(3)* %tmp267, align 4
+ %tmp269 = getelementptr inbounds float, float addrspace(3)* %arg, i32 154
+ %tmp270 = load float, float addrspace(3)* %tmp269, align 4
+ %tmp271 = getelementptr inbounds float, float addrspace(3)* %arg, i32 155
+ %tmp272 = load float, float addrspace(3)* %tmp271, align 4
+ %tmp273 = tail call float @llvm.fmuladd.f32(float %tmp268, float %tmp270, float %tmp272)
+ %tmp274 = getelementptr inbounds float, float addrspace(3)* %arg, i32 157
+ %tmp275 = load float, float addrspace(3)* %tmp274, align 4
+ %tmp276 = getelementptr inbounds float, float addrspace(3)* %arg, i32 158
+ %tmp277 = load float, float addrspace(3)* %tmp276, align 4
+ %tmp278 = getelementptr inbounds float, float addrspace(3)* %arg, i32 159
+ %tmp279 = load float, float addrspace(3)* %tmp278, align 4
+ %tmp280 = tail call float @llvm.fmuladd.f32(float %tmp275, float %tmp277, float %tmp279)
+ %tmp281 = getelementptr inbounds float, float addrspace(3)* %arg, i32 161
+ %tmp282 = load float, float addrspace(3)* %tmp281, align 4
+ %tmp283 = getelementptr inbounds float, float addrspace(3)* %arg, i32 162
+ %tmp284 = load float, float addrspace(3)* %tmp283, align 4
+ %tmp285 = getelementptr inbounds float, float addrspace(3)* %arg, i32 163
+ %tmp286 = load float, float addrspace(3)* %tmp285, align 4
+ %tmp287 = tail call float @llvm.fmuladd.f32(float %tmp282, float %tmp284, float %tmp286)
+ %tmp288 = getelementptr inbounds float, float addrspace(3)* %arg, i32 165
+ %tmp289 = load float, float addrspace(3)* %tmp288, align 4
+ %tmp290 = getelementptr inbounds float, float addrspace(3)* %arg, i32 166
+ %tmp291 = load float, float addrspace(3)* %tmp290, align 4
+ %tmp292 = getelementptr inbounds float, float addrspace(3)* %arg, i32 167
+ %tmp293 = load float, float addrspace(3)* %tmp292, align 4
+ %tmp294 = tail call float @llvm.fmuladd.f32(float %tmp289, float %tmp291, float %tmp293)
+ %tmp295 = getelementptr inbounds float, float addrspace(3)* %arg, i32 169
+ %tmp296 = load float, float addrspace(3)* %tmp295, align 4
+ %tmp297 = getelementptr inbounds float, float addrspace(3)* %arg, i32 170
+ %tmp298 = load float, float addrspace(3)* %tmp297, align 4
+ %tmp299 = getelementptr inbounds float, float addrspace(3)* %arg, i32 171
+ %tmp300 = load float, float addrspace(3)* %tmp299, align 4
+ %tmp301 = tail call float @llvm.fmuladd.f32(float %tmp296, float %tmp298, float %tmp300)
+ %tmp302 = getelementptr inbounds float, float addrspace(3)* %arg, i32 173
+ %tmp303 = load float, float addrspace(3)* %tmp302, align 4
+ %tmp304 = getelementptr inbounds float, float addrspace(3)* %arg, i32 174
+ %tmp305 = load float, float addrspace(3)* %tmp304, align 4
+ %tmp306 = getelementptr inbounds float, float addrspace(3)* %arg, i32 175
+ %tmp307 = load float, float addrspace(3)* %tmp306, align 4
+ %tmp308 = tail call float @llvm.fmuladd.f32(float %tmp303, float %tmp305, float %tmp307)
+ %tmp309 = getelementptr inbounds float, float addrspace(3)* %arg, i32 177
+ %tmp310 = load float, float addrspace(3)* %tmp309, align 4
+ %tmp311 = getelementptr inbounds float, float addrspace(3)* %arg, i32 178
+ %tmp312 = load float, float addrspace(3)* %tmp311, align 4
+ %tmp313 = getelementptr inbounds float, float addrspace(3)* %arg, i32 179
+ %tmp314 = load float, float addrspace(3)* %tmp313, align 4
+ %tmp315 = tail call float @llvm.fmuladd.f32(float %tmp310, float %tmp312, float %tmp314)
+ %tmp316 = getelementptr inbounds float, float addrspace(3)* %arg, i32 181
+ %tmp317 = load float, float addrspace(3)* %tmp316, align 4
+ %tmp318 = getelementptr inbounds float, float addrspace(3)* %arg, i32 182
+ %tmp319 = load float, float addrspace(3)* %tmp318, align 4
+ %tmp320 = getelementptr inbounds float, float addrspace(3)* %arg, i32 183
+ %tmp321 = load float, float addrspace(3)* %tmp320, align 4
+ %tmp322 = tail call float @llvm.fmuladd.f32(float %tmp317, float %tmp319, float %tmp321)
+ %tmp323 = getelementptr inbounds float, float addrspace(3)* %arg, i32 185
+ %tmp324 = load float, float addrspace(3)* %tmp323, align 4
+ %tmp325 = getelementptr inbounds float, float addrspace(3)* %arg, i32 186
+ %tmp326 = load float, float addrspace(3)* %tmp325, align 4
+ %tmp327 = getelementptr inbounds float, float addrspace(3)* %arg, i32 187
+ %tmp328 = load float, float addrspace(3)* %tmp327, align 4
+ %tmp329 = tail call float @llvm.fmuladd.f32(float %tmp324, float %tmp326, float %tmp328)
+ %tmp330 = getelementptr inbounds float, float addrspace(3)* %arg, i32 189
+ %tmp331 = load float, float addrspace(3)* %tmp330, align 4
+ %tmp332 = getelementptr inbounds float, float addrspace(3)* %arg, i32 190
+ %tmp333 = load float, float addrspace(3)* %tmp332, align 4
+ %tmp334 = getelementptr inbounds float, float addrspace(3)* %arg, i32 191
+ %tmp335 = load float, float addrspace(3)* %tmp334, align 4
+ %tmp336 = tail call float @llvm.fmuladd.f32(float %tmp331, float %tmp333, float %tmp335)
+ %tmp337 = getelementptr inbounds float, float addrspace(3)* %arg, i32 193
+ %tmp338 = load float, float addrspace(3)* %tmp337, align 4
+ %tmp339 = getelementptr inbounds float, float addrspace(3)* %arg, i32 194
+ %tmp340 = load float, float addrspace(3)* %tmp339, align 4
+ %tmp341 = getelementptr inbounds float, float addrspace(3)* %arg, i32 195
+ %tmp342 = load float, float addrspace(3)* %tmp341, align 4
+ %tmp343 = tail call float @llvm.fmuladd.f32(float %tmp338, float %tmp340, float %tmp342)
+ %tmp344 = getelementptr inbounds float, float addrspace(3)* %arg, i32 197
+ %tmp345 = load float, float addrspace(3)* %tmp344, align 4
+ %tmp346 = getelementptr inbounds float, float addrspace(3)* %arg, i32 198
+ %tmp347 = load float, float addrspace(3)* %tmp346, align 4
+ %tmp348 = getelementptr inbounds float, float addrspace(3)* %arg, i32 199
+ %tmp349 = load float, float addrspace(3)* %tmp348, align 4
+ %tmp350 = tail call float @llvm.fmuladd.f32(float %tmp345, float %tmp347, float %tmp349)
+ %tmp351 = getelementptr inbounds float, float addrspace(3)* %arg, i32 201
+ %tmp352 = load float, float addrspace(3)* %tmp351, align 4
+ %tmp353 = getelementptr inbounds float, float addrspace(3)* %arg, i32 202
+ %tmp354 = load float, float addrspace(3)* %tmp353, align 4
+ %tmp355 = getelementptr inbounds float, float addrspace(3)* %arg, i32 203
+ %tmp356 = load float, float addrspace(3)* %tmp355, align 4
+ %tmp357 = tail call float @llvm.fmuladd.f32(float %tmp352, float %tmp354, float %tmp356)
+ %tmp358 = getelementptr inbounds float, float addrspace(3)* %arg, i32 205
+ %tmp359 = load float, float addrspace(3)* %tmp358, align 4
+ %tmp360 = getelementptr inbounds float, float addrspace(3)* %arg, i32 206
+ %tmp361 = load float, float addrspace(3)* %tmp360, align 4
+ %tmp362 = getelementptr inbounds float, float addrspace(3)* %arg, i32 207
+ %tmp363 = load float, float addrspace(3)* %tmp362, align 4
+ %tmp364 = tail call float @llvm.fmuladd.f32(float %tmp359, float %tmp361, float %tmp363)
+ %tmp365 = getelementptr inbounds float, float addrspace(3)* %arg, i32 209
+ %tmp366 = load float, float addrspace(3)* %tmp365, align 4
+ %tmp367 = getelementptr inbounds float, float addrspace(3)* %arg, i32 210
+ %tmp368 = load float, float addrspace(3)* %tmp367, align 4
+ %tmp369 = getelementptr inbounds float, float addrspace(3)* %arg, i32 211
+ %tmp370 = load float, float addrspace(3)* %tmp369, align 4
+ %tmp371 = tail call float @llvm.fmuladd.f32(float %tmp366, float %tmp368, float %tmp370)
+ %tmp372 = getelementptr inbounds float, float addrspace(3)* %arg, i32 213
+ %tmp373 = load float, float addrspace(3)* %tmp372, align 4
+ %tmp374 = getelementptr inbounds float, float addrspace(3)* %arg, i32 214
+ %tmp375 = load float, float addrspace(3)* %tmp374, align 4
+ %tmp376 = getelementptr inbounds float, float addrspace(3)* %arg, i32 215
+ %tmp377 = load float, float addrspace(3)* %tmp376, align 4
+ %tmp378 = tail call float @llvm.fmuladd.f32(float %tmp373, float %tmp375, float %tmp377)
+ %tmp379 = getelementptr inbounds float, float addrspace(3)* %arg, i32 217
+ %tmp380 = load float, float addrspace(3)* %tmp379, align 4
+ %tmp381 = getelementptr inbounds float, float addrspace(3)* %arg, i32 218
+ %tmp382 = load float, float addrspace(3)* %tmp381, align 4
+ %tmp383 = getelementptr inbounds float, float addrspace(3)* %arg, i32 219
+ %tmp384 = load float, float addrspace(3)* %tmp383, align 4
+ %tmp385 = tail call float @llvm.fmuladd.f32(float %tmp380, float %tmp382, float %tmp384)
+ %tmp386 = getelementptr inbounds float, float addrspace(3)* %arg, i32 221
+ %tmp387 = load float, float addrspace(3)* %tmp386, align 4
+ %tmp388 = getelementptr inbounds float, float addrspace(3)* %arg, i32 222
+ %tmp389 = load float, float addrspace(3)* %tmp388, align 4
+ %tmp390 = getelementptr inbounds float, float addrspace(3)* %arg, i32 223
+ %tmp391 = load float, float addrspace(3)* %tmp390, align 4
+ %tmp392 = tail call float @llvm.fmuladd.f32(float %tmp387, float %tmp389, float %tmp391)
+ %tmp393 = getelementptr inbounds float, float addrspace(3)* %arg, i32 225
+ %tmp394 = load float, float addrspace(3)* %tmp393, align 4
+ %tmp395 = getelementptr inbounds float, float addrspace(3)* %arg, i32 226
+ %tmp396 = load float, float addrspace(3)* %tmp395, align 4
+ %tmp397 = getelementptr inbounds float, float addrspace(3)* %arg, i32 227
+ %tmp398 = load float, float addrspace(3)* %tmp397, align 4
+ %tmp399 = tail call float @llvm.fmuladd.f32(float %tmp394, float %tmp396, float %tmp398)
+ %tmp400 = getelementptr inbounds float, float addrspace(3)* %arg, i32 229
+ %tmp401 = load float, float addrspace(3)* %tmp400, align 4
+ %tmp402 = getelementptr inbounds float, float addrspace(3)* %arg, i32 230
+ %tmp403 = load float, float addrspace(3)* %tmp402, align 4
+ %tmp404 = getelementptr inbounds float, float addrspace(3)* %arg, i32 231
+ %tmp405 = load float, float addrspace(3)* %tmp404, align 4
+ %tmp406 = tail call float @llvm.fmuladd.f32(float %tmp401, float %tmp403, float %tmp405)
+ %tmp407 = getelementptr inbounds float, float addrspace(3)* %arg, i32 233
+ %tmp408 = load float, float addrspace(3)* %tmp407, align 4
+ %tmp409 = getelementptr inbounds float, float addrspace(3)* %arg, i32 234
+ %tmp410 = load float, float addrspace(3)* %tmp409, align 4
+ %tmp411 = getelementptr inbounds float, float addrspace(3)* %arg, i32 235
+ %tmp412 = load float, float addrspace(3)* %tmp411, align 4
+ %tmp413 = tail call float @llvm.fmuladd.f32(float %tmp408, float %tmp410, float %tmp412)
+ %tmp414 = getelementptr inbounds float, float addrspace(3)* %arg, i32 237
+ %tmp415 = load float, float addrspace(3)* %tmp414, align 4
+ %tmp416 = getelementptr inbounds float, float addrspace(3)* %arg, i32 238
+ %tmp417 = load float, float addrspace(3)* %tmp416, align 4
+ %tmp418 = getelementptr inbounds float, float addrspace(3)* %arg, i32 239
+ %tmp419 = load float, float addrspace(3)* %tmp418, align 4
+ %tmp420 = tail call float @llvm.fmuladd.f32(float %tmp415, float %tmp417, float %tmp419)
+ %tmp421 = getelementptr inbounds float, float addrspace(3)* %arg, i32 241
+ %tmp422 = load float, float addrspace(3)* %tmp421, align 4
+ %tmp423 = getelementptr inbounds float, float addrspace(3)* %arg, i32 242
+ %tmp424 = load float, float addrspace(3)* %tmp423, align 4
+ %tmp425 = getelementptr inbounds float, float addrspace(3)* %arg, i32 243
+ %tmp426 = load float, float addrspace(3)* %tmp425, align 4
+ %tmp427 = tail call float @llvm.fmuladd.f32(float %tmp422, float %tmp424, float %tmp426)
+ %tmp428 = getelementptr inbounds float, float addrspace(3)* %arg, i32 245
+ %tmp429 = load float, float addrspace(3)* %tmp428, align 4
+ %tmp430 = getelementptr inbounds float, float addrspace(3)* %arg, i32 246
+ %tmp431 = load float, float addrspace(3)* %tmp430, align 4
+ %tmp432 = getelementptr inbounds float, float addrspace(3)* %arg, i32 247
+ %tmp433 = load float, float addrspace(3)* %tmp432, align 4
+ %tmp434 = tail call float @llvm.fmuladd.f32(float %tmp429, float %tmp431, float %tmp433)
+ %tmp435 = getelementptr inbounds float, float addrspace(3)* %arg, i32 249
+ %tmp436 = load float, float addrspace(3)* %tmp435, align 4
+ %tmp437 = getelementptr inbounds float, float addrspace(3)* %arg, i32 250
+ %tmp438 = load float, float addrspace(3)* %tmp437, align 4
+ %tmp439 = getelementptr inbounds float, float addrspace(3)* %arg, i32 251
+ %tmp440 = load float, float addrspace(3)* %tmp439, align 4
+ %tmp441 = tail call float @llvm.fmuladd.f32(float %tmp436, float %tmp438, float %tmp440)
+ %tmp442 = getelementptr inbounds float, float addrspace(3)* %arg, i32 253
+ %tmp443 = load float, float addrspace(3)* %tmp442, align 4
+ %tmp444 = getelementptr inbounds float, float addrspace(3)* %arg, i32 254
+ %tmp445 = load float, float addrspace(3)* %tmp444, align 4
+ %tmp446 = getelementptr inbounds float, float addrspace(3)* %arg, i32 255
+ %tmp447 = load float, float addrspace(3)* %tmp446, align 4
+ %tmp448 = tail call float @llvm.fmuladd.f32(float %tmp443, float %tmp445, float %tmp447)
+ store float %tmp7, float addrspace(1)* %arg1, align 4
+ %tmp449 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 1
+ store float %tmp14, float addrspace(1)* %tmp449, align 4
+ %tmp450 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 2
+ store float %tmp21, float addrspace(1)* %tmp450, align 4
+ %tmp451 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 3
+ store float %tmp28, float addrspace(1)* %tmp451, align 4
+ %tmp452 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 4
+ store float %tmp35, float addrspace(1)* %tmp452, align 4
+ %tmp453 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 5
+ store float %tmp42, float addrspace(1)* %tmp453, align 4
+ %tmp454 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 6
+ store float %tmp49, float addrspace(1)* %tmp454, align 4
+ %tmp455 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 7
+ store float %tmp56, float addrspace(1)* %tmp455, align 4
+ %tmp456 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 8
+ store float %tmp63, float addrspace(1)* %tmp456, align 4
+ %tmp457 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 9
+ store float %tmp70, float addrspace(1)* %tmp457, align 4
+ %tmp458 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 10
+ store float %tmp77, float addrspace(1)* %tmp458, align 4
+ %tmp459 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 11
+ store float %tmp84, float addrspace(1)* %tmp459, align 4
+ %tmp460 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 12
+ store float %tmp91, float addrspace(1)* %tmp460, align 4
+ %tmp461 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 13
+ store float %tmp98, float addrspace(1)* %tmp461, align 4
+ %tmp462 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 14
+ store float %tmp105, float addrspace(1)* %tmp462, align 4
+ %tmp463 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 15
+ store float %tmp112, float addrspace(1)* %tmp463, align 4
+ %tmp464 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 16
+ store float %tmp119, float addrspace(1)* %tmp464, align 4
+ %tmp465 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 17
+ store float %tmp126, float addrspace(1)* %tmp465, align 4
+ %tmp466 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 18
+ store float %tmp133, float addrspace(1)* %tmp466, align 4
+ %tmp467 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 19
+ store float %tmp140, float addrspace(1)* %tmp467, align 4
+ %tmp468 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 20
+ store float %tmp147, float addrspace(1)* %tmp468, align 4
+ %tmp469 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 21
+ store float %tmp154, float addrspace(1)* %tmp469, align 4
+ %tmp470 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 22
+ store float %tmp161, float addrspace(1)* %tmp470, align 4
+ %tmp471 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 23
+ store float %tmp168, float addrspace(1)* %tmp471, align 4
+ %tmp472 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 24
+ store float %tmp175, float addrspace(1)* %tmp472, align 4
+ %tmp473 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 25
+ store float %tmp182, float addrspace(1)* %tmp473, align 4
+ %tmp474 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 26
+ store float %tmp189, float addrspace(1)* %tmp474, align 4
+ %tmp475 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 27
+ store float %tmp196, float addrspace(1)* %tmp475, align 4
+ %tmp476 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 28
+ store float %tmp203, float addrspace(1)* %tmp476, align 4
+ %tmp477 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 29
+ store float %tmp210, float addrspace(1)* %tmp477, align 4
+ %tmp478 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 30
+ store float %tmp217, float addrspace(1)* %tmp478, align 4
+ %tmp479 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 31
+ store float %tmp224, float addrspace(1)* %tmp479, align 4
+ %tmp480 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 32
+ store float %tmp231, float addrspace(1)* %tmp480, align 4
+ %tmp481 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 33
+ store float %tmp238, float addrspace(1)* %tmp481, align 4
+ %tmp482 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 34
+ store float %tmp245, float addrspace(1)* %tmp482, align 4
+ %tmp483 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 35
+ store float %tmp252, float addrspace(1)* %tmp483, align 4
+ %tmp484 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 36
+ store float %tmp259, float addrspace(1)* %tmp484, align 4
+ %tmp485 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 37
+ store float %tmp266, float addrspace(1)* %tmp485, align 4
+ %tmp486 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 38
+ store float %tmp273, float addrspace(1)* %tmp486, align 4
+ %tmp487 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 39
+ store float %tmp280, float addrspace(1)* %tmp487, align 4
+ %tmp488 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 40
+ store float %tmp287, float addrspace(1)* %tmp488, align 4
+ %tmp489 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 41
+ store float %tmp294, float addrspace(1)* %tmp489, align 4
+ %tmp490 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 42
+ store float %tmp301, float addrspace(1)* %tmp490, align 4
+ %tmp491 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 43
+ store float %tmp308, float addrspace(1)* %tmp491, align 4
+ %tmp492 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 44
+ store float %tmp315, float addrspace(1)* %tmp492, align 4
+ %tmp493 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 45
+ store float %tmp322, float addrspace(1)* %tmp493, align 4
+ %tmp494 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 46
+ store float %tmp329, float addrspace(1)* %tmp494, align 4
+ %tmp495 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 47
+ store float %tmp336, float addrspace(1)* %tmp495, align 4
+ %tmp496 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 48
+ store float %tmp343, float addrspace(1)* %tmp496, align 4
+ %tmp497 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 49
+ store float %tmp350, float addrspace(1)* %tmp497, align 4
+ %tmp498 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 50
+ store float %tmp357, float addrspace(1)* %tmp498, align 4
+ %tmp499 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 51
+ store float %tmp364, float addrspace(1)* %tmp499, align 4
+ %tmp500 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 52
+ store float %tmp371, float addrspace(1)* %tmp500, align 4
+ %tmp501 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 53
+ store float %tmp378, float addrspace(1)* %tmp501, align 4
+ %tmp502 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 54
+ store float %tmp385, float addrspace(1)* %tmp502, align 4
+ %tmp503 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 55
+ store float %tmp392, float addrspace(1)* %tmp503, align 4
+ %tmp504 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 56
+ store float %tmp399, float addrspace(1)* %tmp504, align 4
+ %tmp505 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 57
+ store float %tmp406, float addrspace(1)* %tmp505, align 4
+ %tmp506 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 58
+ store float %tmp413, float addrspace(1)* %tmp506, align 4
+ %tmp507 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 59
+ store float %tmp420, float addrspace(1)* %tmp507, align 4
+ %tmp508 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 60
+ store float %tmp427, float addrspace(1)* %tmp508, align 4
+ %tmp509 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 61
+ store float %tmp434, float addrspace(1)* %tmp509, align 4
+ %tmp510 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 62
+ store float %tmp441, float addrspace(1)* %tmp510, align 4
+ %tmp511 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 63
+ store float %tmp448, float addrspace(1)* %tmp511, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.fmuladd.f32(float, float, float) #0
+
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/schedule-regpressure-limit2.ll b/test/CodeGen/AMDGPU/schedule-regpressure-limit2.ll
new file mode 100644
index 000000000000..0d19c1e6a8f3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/schedule-regpressure-limit2.ll
@@ -0,0 +1,288 @@
+; RUN: llc -march=amdgcn -misched=gcn-minreg -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -misched=gcn-max-occupancy-experimental -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -misched=gcn-minreg -verify-machineinstrs < %s | FileCheck -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -misched=gcn-max-occupancy-experimental -verify-machineinstrs < %s | FileCheck -check-prefix=VI %s
+
+; SI: NumSgprs: {{[1-9]$}}
+; SI: NumVgprs: {{[1-9]$}}
+
+; stores may alias loads
+; VI: NumSgprs: {{[1-5][0-9]$}}
+; VI: NumVgprs: {{[1-3][0-9]$}}
+
+define amdgpu_kernel void @load_fma_store(float addrspace(3)* nocapture readonly %in_arg, float addrspace(1)* nocapture %out_arg) {
+bb:
+ %adr.a.0 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20004
+ %adr.b.0 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20252
+ %adr.c.0 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20508
+ %adr.a.1 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20772
+ %adr.b.1 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21020
+ %adr.c.1 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21276
+ %adr.a.2 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21540
+ %adr.b.2 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21788
+ %adr.c.2 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 22044
+ %adr.a.3 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 22308
+ %adr.b.3 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 22556
+ %adr.c.3 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 22812
+ %adr.a.4 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 23076
+ %adr.b.4 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 23324
+ %adr.c.4 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 23580
+ %adr.a.5 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 23844
+ %adr.b.5 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 24092
+ %adr.c.5 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 24348
+ %adr.a.6 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 24612
+ %adr.b.6 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 24860
+ %adr.c.6 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 25116
+ %adr.a.7 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 25380
+ %adr.b.7 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 25628
+ %adr.c.7 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 25884
+ %adr.a.8 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 26148
+ %adr.b.8 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 26396
+ %adr.c.8 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 26652
+ %adr.a.9 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 26916
+ %adr.b.9 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 27164
+ %adr.c.9 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 27420
+ %adr.a.10 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 27684
+ %adr.b.10 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 27932
+ %adr.c.10 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 28188
+ %adr.a.11 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 28452
+ %adr.b.11 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 28700
+ %adr.c.11 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 28956
+ %adr.a.12 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 29220
+ %adr.b.12 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 29468
+ %adr.c.12 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 29724
+ %adr.a.13 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 29988
+ %adr.b.13 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 30236
+ %adr.c.13 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 30492
+ %adr.a.14 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 30756
+ %adr.b.14 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 31004
+ %adr.c.14 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 31260
+ %adr.a.15 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 31524
+ %adr.b.15 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 31772
+ %adr.c.15 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 32028
+ %adr.a.16 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 32292
+ %adr.b.16 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 32540
+ %adr.c.16 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 32796
+ %adr.a.17 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 33060
+ %adr.b.17 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 33308
+ %adr.c.17 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 33564
+ %adr.a.18 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 33828
+ %adr.b.18 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 34076
+ %adr.c.18 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 34332
+ %adr.a.19 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 34596
+ %adr.b.19 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 34844
+ %adr.c.19 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 35100
+ %adr.a.20 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 35364
+ %adr.b.20 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 35612
+ %adr.c.20 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 35868
+ %adr.a.21 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 36132
+ %adr.b.21 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 36380
+ %adr.c.21 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 36636
+ %adr.a.22 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 36900
+ %adr.b.22 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 37148
+ %adr.c.22 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 37404
+ %adr.a.23 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 37668
+ %adr.b.23 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 37916
+ %adr.c.23 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 38172
+ %adr.a.24 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 38436
+ %adr.b.24 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 38684
+ %adr.c.24 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 38940
+ %adr.a.25 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 39204
+ %adr.b.25 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 39452
+ %adr.c.25 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 39708
+ %adr.a.26 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 39972
+ %adr.b.26 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 40220
+ %adr.c.26 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 40476
+ %adr.a.27 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 40740
+ %adr.b.27 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 40988
+ %adr.c.27 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 41244
+ %adr.a.28 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 41508
+ %adr.b.28 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 41756
+ %adr.c.28 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 42012
+ %adr.a.29 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 42276
+ %adr.b.29 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 42524
+ %adr.c.29 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 42780
+ %a.0 = load float, float addrspace(3)* %adr.a.0, align 4
+ %b.0 = load float, float addrspace(3)* %adr.b.0, align 4
+ %c.0 = load float, float addrspace(3)* %adr.c.0, align 4
+ %a.1 = load float, float addrspace(3)* %adr.a.1, align 4
+ %b.1 = load float, float addrspace(3)* %adr.b.1, align 4
+ %c.1 = load float, float addrspace(3)* %adr.c.1, align 4
+ %a.2 = load float, float addrspace(3)* %adr.a.2, align 4
+ %b.2 = load float, float addrspace(3)* %adr.b.2, align 4
+ %c.2 = load float, float addrspace(3)* %adr.c.2, align 4
+ %a.3 = load float, float addrspace(3)* %adr.a.3, align 4
+ %b.3 = load float, float addrspace(3)* %adr.b.3, align 4
+ %c.3 = load float, float addrspace(3)* %adr.c.3, align 4
+ %a.4 = load float, float addrspace(3)* %adr.a.4, align 4
+ %b.4 = load float, float addrspace(3)* %adr.b.4, align 4
+ %c.4 = load float, float addrspace(3)* %adr.c.4, align 4
+ %a.5 = load float, float addrspace(3)* %adr.a.5, align 4
+ %b.5 = load float, float addrspace(3)* %adr.b.5, align 4
+ %c.5 = load float, float addrspace(3)* %adr.c.5, align 4
+ %a.6 = load float, float addrspace(3)* %adr.a.6, align 4
+ %b.6 = load float, float addrspace(3)* %adr.b.6, align 4
+ %c.6 = load float, float addrspace(3)* %adr.c.6, align 4
+ %a.7 = load float, float addrspace(3)* %adr.a.7, align 4
+ %b.7 = load float, float addrspace(3)* %adr.b.7, align 4
+ %c.7 = load float, float addrspace(3)* %adr.c.7, align 4
+ %a.8 = load float, float addrspace(3)* %adr.a.8, align 4
+ %b.8 = load float, float addrspace(3)* %adr.b.8, align 4
+ %c.8 = load float, float addrspace(3)* %adr.c.8, align 4
+ %a.9 = load float, float addrspace(3)* %adr.a.9, align 4
+ %b.9 = load float, float addrspace(3)* %adr.b.9, align 4
+ %c.9 = load float, float addrspace(3)* %adr.c.9, align 4
+ %a.10 = load float, float addrspace(3)* %adr.a.10, align 4
+ %b.10 = load float, float addrspace(3)* %adr.b.10, align 4
+ %c.10 = load float, float addrspace(3)* %adr.c.10, align 4
+ %a.11 = load float, float addrspace(3)* %adr.a.11, align 4
+ %b.11 = load float, float addrspace(3)* %adr.b.11, align 4
+ %c.11 = load float, float addrspace(3)* %adr.c.11, align 4
+ %a.12 = load float, float addrspace(3)* %adr.a.12, align 4
+ %b.12 = load float, float addrspace(3)* %adr.b.12, align 4
+ %c.12 = load float, float addrspace(3)* %adr.c.12, align 4
+ %a.13 = load float, float addrspace(3)* %adr.a.13, align 4
+ %b.13 = load float, float addrspace(3)* %adr.b.13, align 4
+ %c.13 = load float, float addrspace(3)* %adr.c.13, align 4
+ %a.14 = load float, float addrspace(3)* %adr.a.14, align 4
+ %b.14 = load float, float addrspace(3)* %adr.b.14, align 4
+ %c.14 = load float, float addrspace(3)* %adr.c.14, align 4
+ %a.15 = load float, float addrspace(3)* %adr.a.15, align 4
+ %b.15 = load float, float addrspace(3)* %adr.b.15, align 4
+ %c.15 = load float, float addrspace(3)* %adr.c.15, align 4
+ %a.16 = load float, float addrspace(3)* %adr.a.16, align 4
+ %b.16 = load float, float addrspace(3)* %adr.b.16, align 4
+ %c.16 = load float, float addrspace(3)* %adr.c.16, align 4
+ %a.17 = load float, float addrspace(3)* %adr.a.17, align 4
+ %b.17 = load float, float addrspace(3)* %adr.b.17, align 4
+ %c.17 = load float, float addrspace(3)* %adr.c.17, align 4
+ %a.18 = load float, float addrspace(3)* %adr.a.18, align 4
+ %b.18 = load float, float addrspace(3)* %adr.b.18, align 4
+ %c.18 = load float, float addrspace(3)* %adr.c.18, align 4
+ %a.19 = load float, float addrspace(3)* %adr.a.19, align 4
+ %b.19 = load float, float addrspace(3)* %adr.b.19, align 4
+ %c.19 = load float, float addrspace(3)* %adr.c.19, align 4
+ %a.20 = load float, float addrspace(3)* %adr.a.20, align 4
+ %b.20 = load float, float addrspace(3)* %adr.b.20, align 4
+ %c.20 = load float, float addrspace(3)* %adr.c.20, align 4
+ %a.21 = load float, float addrspace(3)* %adr.a.21, align 4
+ %b.21 = load float, float addrspace(3)* %adr.b.21, align 4
+ %c.21 = load float, float addrspace(3)* %adr.c.21, align 4
+ %a.22 = load float, float addrspace(3)* %adr.a.22, align 4
+ %b.22 = load float, float addrspace(3)* %adr.b.22, align 4
+ %c.22 = load float, float addrspace(3)* %adr.c.22, align 4
+ %a.23 = load float, float addrspace(3)* %adr.a.23, align 4
+ %b.23 = load float, float addrspace(3)* %adr.b.23, align 4
+ %c.23 = load float, float addrspace(3)* %adr.c.23, align 4
+ %a.24 = load float, float addrspace(3)* %adr.a.24, align 4
+ %b.24 = load float, float addrspace(3)* %adr.b.24, align 4
+ %c.24 = load float, float addrspace(3)* %adr.c.24, align 4
+ %a.25 = load float, float addrspace(3)* %adr.a.25, align 4
+ %b.25 = load float, float addrspace(3)* %adr.b.25, align 4
+ %c.25 = load float, float addrspace(3)* %adr.c.25, align 4
+ %a.26 = load float, float addrspace(3)* %adr.a.26, align 4
+ %b.26 = load float, float addrspace(3)* %adr.b.26, align 4
+ %c.26 = load float, float addrspace(3)* %adr.c.26, align 4
+ %a.27 = load float, float addrspace(3)* %adr.a.27, align 4
+ %b.27 = load float, float addrspace(3)* %adr.b.27, align 4
+ %c.27 = load float, float addrspace(3)* %adr.c.27, align 4
+ %a.28 = load float, float addrspace(3)* %adr.a.28, align 4
+ %b.28 = load float, float addrspace(3)* %adr.b.28, align 4
+ %c.28 = load float, float addrspace(3)* %adr.c.28, align 4
+ %a.29 = load float, float addrspace(3)* %adr.a.29, align 4
+ %b.29 = load float, float addrspace(3)* %adr.b.29, align 4
+ %c.29 = load float, float addrspace(3)* %adr.c.29, align 4
+ %res.0 = tail call float @llvm.fmuladd.f32(float %a.0, float %b.0, float %c.0)
+ %res.1 = tail call float @llvm.fmuladd.f32(float %a.1, float %b.1, float %c.1)
+ %res.2 = tail call float @llvm.fmuladd.f32(float %a.2, float %b.2, float %c.2)
+ %res.3 = tail call float @llvm.fmuladd.f32(float %a.3, float %b.3, float %c.3)
+ %res.4 = tail call float @llvm.fmuladd.f32(float %a.4, float %b.4, float %c.4)
+ %res.5 = tail call float @llvm.fmuladd.f32(float %a.5, float %b.5, float %c.5)
+ %res.6 = tail call float @llvm.fmuladd.f32(float %a.6, float %b.6, float %c.6)
+ %res.7 = tail call float @llvm.fmuladd.f32(float %a.7, float %b.7, float %c.7)
+ %res.8 = tail call float @llvm.fmuladd.f32(float %a.8, float %b.8, float %c.8)
+ %res.9 = tail call float @llvm.fmuladd.f32(float %a.9, float %b.9, float %c.9)
+ %res.10 = tail call float @llvm.fmuladd.f32(float %a.10, float %b.10, float %c.10)
+ %res.11 = tail call float @llvm.fmuladd.f32(float %a.11, float %b.11, float %c.11)
+ %res.12 = tail call float @llvm.fmuladd.f32(float %a.12, float %b.12, float %c.12)
+ %res.13 = tail call float @llvm.fmuladd.f32(float %a.13, float %b.13, float %c.13)
+ %res.14 = tail call float @llvm.fmuladd.f32(float %a.14, float %b.14, float %c.14)
+ %res.15 = tail call float @llvm.fmuladd.f32(float %a.15, float %b.15, float %c.15)
+ %res.16 = tail call float @llvm.fmuladd.f32(float %a.16, float %b.16, float %c.16)
+ %res.17 = tail call float @llvm.fmuladd.f32(float %a.17, float %b.17, float %c.17)
+ %res.18 = tail call float @llvm.fmuladd.f32(float %a.18, float %b.18, float %c.18)
+ %res.19 = tail call float @llvm.fmuladd.f32(float %a.19, float %b.19, float %c.19)
+ %res.20 = tail call float @llvm.fmuladd.f32(float %a.20, float %b.20, float %c.20)
+ %res.21 = tail call float @llvm.fmuladd.f32(float %a.21, float %b.21, float %c.21)
+ %res.22 = tail call float @llvm.fmuladd.f32(float %a.22, float %b.22, float %c.22)
+ %res.23 = tail call float @llvm.fmuladd.f32(float %a.23, float %b.23, float %c.23)
+ %res.24 = tail call float @llvm.fmuladd.f32(float %a.24, float %b.24, float %c.24)
+ %res.25 = tail call float @llvm.fmuladd.f32(float %a.25, float %b.25, float %c.25)
+ %res.26 = tail call float @llvm.fmuladd.f32(float %a.26, float %b.26, float %c.26)
+ %res.27 = tail call float @llvm.fmuladd.f32(float %a.27, float %b.27, float %c.27)
+ %res.28 = tail call float @llvm.fmuladd.f32(float %a.28, float %b.28, float %c.28)
+ %res.29 = tail call float @llvm.fmuladd.f32(float %a.29, float %b.29, float %c.29)
+ %adr.res.0 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 0
+ %adr.res.1 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 2
+ %adr.res.2 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 4
+ %adr.res.3 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 6
+ %adr.res.4 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 8
+ %adr.res.5 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 10
+ %adr.res.6 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 12
+ %adr.res.7 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 14
+ %adr.res.8 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 16
+ %adr.res.9 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 18
+ %adr.res.10 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 20
+ %adr.res.11 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 22
+ %adr.res.12 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 24
+ %adr.res.13 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 26
+ %adr.res.14 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 28
+ %adr.res.15 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 30
+ %adr.res.16 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 32
+ %adr.res.17 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 34
+ %adr.res.18 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 36
+ %adr.res.19 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 38
+ %adr.res.20 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 40
+ %adr.res.21 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 42
+ %adr.res.22 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 44
+ %adr.res.23 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 46
+ %adr.res.24 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 48
+ %adr.res.25 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 50
+ %adr.res.26 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 52
+ %adr.res.27 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 54
+ %adr.res.28 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 56
+ %adr.res.29 = getelementptr inbounds float, float addrspace(1)* %out_arg, i64 58
+ store float %res.0, float addrspace(1)* %adr.res.0, align 4
+ store float %res.1, float addrspace(1)* %adr.res.1, align 4
+ store float %res.2, float addrspace(1)* %adr.res.2, align 4
+ store float %res.3, float addrspace(1)* %adr.res.3, align 4
+ store float %res.4, float addrspace(1)* %adr.res.4, align 4
+ store float %res.5, float addrspace(1)* %adr.res.5, align 4
+ store float %res.6, float addrspace(1)* %adr.res.6, align 4
+ store float %res.7, float addrspace(1)* %adr.res.7, align 4
+ store float %res.8, float addrspace(1)* %adr.res.8, align 4
+ store float %res.9, float addrspace(1)* %adr.res.9, align 4
+ store float %res.10, float addrspace(1)* %adr.res.10, align 4
+ store float %res.11, float addrspace(1)* %adr.res.11, align 4
+ store float %res.12, float addrspace(1)* %adr.res.12, align 4
+ store float %res.13, float addrspace(1)* %adr.res.13, align 4
+ store float %res.14, float addrspace(1)* %adr.res.14, align 4
+ store float %res.15, float addrspace(1)* %adr.res.15, align 4
+ store float %res.16, float addrspace(1)* %adr.res.16, align 4
+ store float %res.17, float addrspace(1)* %adr.res.17, align 4
+ store float %res.18, float addrspace(1)* %adr.res.18, align 4
+ store float %res.19, float addrspace(1)* %adr.res.19, align 4
+ store float %res.20, float addrspace(1)* %adr.res.20, align 4
+ store float %res.21, float addrspace(1)* %adr.res.21, align 4
+ store float %res.22, float addrspace(1)* %adr.res.22, align 4
+ store float %res.23, float addrspace(1)* %adr.res.23, align 4
+ store float %res.24, float addrspace(1)* %adr.res.24, align 4
+ store float %res.25, float addrspace(1)* %adr.res.25, align 4
+ store float %res.26, float addrspace(1)* %adr.res.26, align 4
+ store float %res.27, float addrspace(1)* %adr.res.27, align 4
+ store float %res.28, float addrspace(1)* %adr.res.28, align 4
+ store float %res.29, float addrspace(1)* %adr.res.29, align 4
+ ret void
+}
+declare float @llvm.fmuladd.f32(float, float, float) #0
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/schedule-regpressure.mir b/test/CodeGen/AMDGPU/schedule-regpressure.mir
new file mode 100644
index 000000000000..c71de87eeece
--- /dev/null
+++ b/test/CodeGen/AMDGPU/schedule-regpressure.mir
@@ -0,0 +1,57 @@
+# RUN: llc -march=amdgcn -misched=converge -run-pass machine-scheduler %s -o - -debug-only=misched 2>&1 | FileCheck %s
+# REQUIRES: asserts
+
+# Check there is no SReg_32 pressure created by DS_* instructions because of M0 use
+
+# CHECK: ScheduleDAGMILive::schedule starting
+# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} %M0<imp-use>, %EXEC<imp-use>
+# CHECK: Pressure Diff : {{$}}
+# CHECK: SU({{.*}} DS_WRITE_B32
+
+---
+name: mo_pset
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: sreg_128 }
+ - { id: 1, class: sgpr_64 }
+ - { id: 2, class: sreg_32_xm0 }
+ - { id: 3, class: sgpr_32 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: sreg_32_xm0_xexec }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: vgpr_32 }
+ - { id: 8, class: vgpr_32 }
+liveins:
+ - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0:
+ liveins: %sgpr4_sgpr5
+
+ %1 = COPY %sgpr4_sgpr5
+ %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+ %m0 = S_MOV_B32 -1
+ %7 = COPY %5
+ %6 = DS_READ_B32 %7, 0, 0, implicit %m0, implicit %exec
+ DS_WRITE_B32 %7, %6, 4, 0, implicit killed %m0, implicit %exec
+ S_ENDPGM
+
+...
diff --git a/test/CodeGen/AMDGPU/scratch-buffer.ll b/test/CodeGen/AMDGPU/scratch-buffer.ll
index 94101f0b92b6..6b1e85915a11 100644
--- a/test/CodeGen/AMDGPU/scratch-buffer.ll
+++ b/test/CodeGen/AMDGPU/scratch-buffer.ll
@@ -9,11 +9,11 @@
; should be able to reuse the same regiser for each scratch buffer access.
; GCN-LABEL: {{^}}legal_offset_fi:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+$}}
-; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x8000
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offset:4{{$}}
+; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x8004
; GCN: buffer_store_dword v{{[0-9]+}}, [[OFFSET]], s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen{{$}}
-define void @legal_offset_fi(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) {
+define amdgpu_kernel void @legal_offset_fi(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) {
entry:
%scratch0 = alloca [8192 x i32]
%scratch1 = alloca [8192 x i32]
@@ -49,11 +49,11 @@ done:
; GCN-LABEL: {{^}}legal_offset_fi_offset:
; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen{{$}}
; This constant isn't folded, because it has multiple uses.
-; GCN-DAG: v_mov_b32_e32 [[K8000:v[0-9]+]], 0x8000
+; GCN-DAG: v_mov_b32_e32 [[K8000:v[0-9]+]], 0x8004
; GCN-DAG: v_add_i32_e32 [[OFFSET:v[0-9]+]], vcc, [[K8000]]
; GCN: buffer_store_dword v{{[0-9]+}}, [[OFFSET]], s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen{{$}}
-define void @legal_offset_fi_offset(i32 addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %offsets, i32 %if_offset, i32 %else_offset) {
+define amdgpu_kernel void @legal_offset_fi_offset(i32 addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %offsets, i32 %if_offset, i32 %else_offset) {
entry:
%scratch0 = alloca [8192 x i32]
%scratch1 = alloca [8192 x i32]
@@ -88,7 +88,7 @@ done:
; GCN-LABEL: {{^}}neg_vaddr_offset:
; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:16{{$}}
-define void @neg_vaddr_offset(i32 %offset) {
+define amdgpu_kernel void @neg_vaddr_offset(i32 %offset) {
entry:
%array = alloca [8192 x i32]
%ptr_offset = add i32 %offset, 4
@@ -98,8 +98,8 @@ entry:
}
; GCN-LABEL: {{^}}pos_vaddr_offset:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:16
-define void @pos_vaddr_offset(i32 addrspace(1)* %out, i32 %offset) {
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:20
+define amdgpu_kernel void @pos_vaddr_offset(i32 addrspace(1)* %out, i32 %offset) {
entry:
%array = alloca [8192 x i32]
%ptr = getelementptr [8192 x i32], [8192 x i32]* %array, i32 0, i32 4
diff --git a/test/CodeGen/AMDGPU/sdiv.ll b/test/CodeGen/AMDGPU/sdiv.ll
index bafd6a50ccfe..f9ac425be794 100644
--- a/test/CodeGen/AMDGPU/sdiv.ll
+++ b/test/CodeGen/AMDGPU/sdiv.ll
@@ -13,7 +13,7 @@
; FUNC-LABEL: {{^}}sdiv_i32:
; EG: CF_END
-define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in
%den = load i32, i32 addrspace(1) * %den_ptr
@@ -23,7 +23,7 @@ define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
}
; FUNC-LABEL: {{^}}sdiv_i32_4:
-define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%num = load i32, i32 addrspace(1) * %in
%result = sdiv i32 %num, 4
store i32 %result, i32 addrspace(1)* %out
@@ -43,14 +43,14 @@ define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_add_i32
; SI: buffer_store_dword
; SI: s_endpgm
-define void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%num = load i32, i32 addrspace(1) * %in
%result = sdiv i32 %num, 3435
store i32 %result, i32 addrspace(1)* %out
ret void
}
-define void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%num = load <2 x i32>, <2 x i32> addrspace(1) * %in
%den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr
@@ -59,14 +59,14 @@ define void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
ret void
}
-define void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%num = load <2 x i32>, <2 x i32> addrspace(1) * %in
%result = sdiv <2 x i32> %num, <i32 4, i32 4>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
-define void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%num = load <4 x i32>, <4 x i32> addrspace(1) * %in
%den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr
@@ -75,7 +75,7 @@ define void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
ret void
}
-define void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%num = load <4 x i32>, <4 x i32> addrspace(1) * %in
%result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
@@ -86,7 +86,7 @@ define void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)*
; SI: v_rcp_f32
; SI: v_bfe_i32 [[BFE:v[0-9]+]], v{{[0-9]+}}, 0, 8
; SI: buffer_store_dword [[BFE]]
-define void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -100,7 +100,7 @@ define void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_bfe_i32 [[BFE:v[0-9]+]], v{{[0-9]+}}, 0, 23
; SI: buffer_store_dword [[BFE]]
-define void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
+define amdgpu_kernel void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
%den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1
%num = load i23, i23 addrspace(1) * %in
%den = load i23, i23 addrspace(1) * %den_ptr
@@ -114,7 +114,7 @@ define void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_bfe_i32 [[BFE:v[0-9]+]], v{{[0-9]+}}, 0, 24
; SI: buffer_store_dword [[BFE]]
-define void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
+define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
%den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1
%num = load i24, i24 addrspace(1) * %in
%den = load i24, i24 addrspace(1) * %den_ptr
@@ -126,7 +126,7 @@ define void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}v_sdiv_i25:
; SI-NOT: v_rcp_f32
-define void @v_sdiv_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) {
+define amdgpu_kernel void @v_sdiv_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) {
%den_ptr = getelementptr i25, i25 addrspace(1)* %in, i25 1
%num = load i25, i25 addrspace(1) * %in
%den = load i25, i25 addrspace(1) * %den_ptr
@@ -137,19 +137,19 @@ define void @v_sdiv_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) {
}
; Tests for 64-bit divide bypass.
-; define void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; define amdgpu_kernel void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; %result = sdiv i64 %a, %b
; store i64 %result, i64 addrspace(1)* %out, align 8
; ret void
; }
-; define void @test_get_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; define amdgpu_kernel void @test_get_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; %result = srem i64 %a, %b
; store i64 %result, i64 addrspace(1)* %out, align 8
; ret void
; }
-; define void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; define amdgpu_kernel void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
; %resultdiv = sdiv i64 %a, %b
; %resultrem = srem i64 %a, %b
; %result = add i64 %resultdiv, %resultrem
@@ -163,7 +163,7 @@ define void @v_sdiv_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) {
; SI: v_mul_hi_i32
; SI: v_mul_hi_i32
-define void @scalarize_mulhs_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
+define amdgpu_kernel void @scalarize_mulhs_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
%1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
%2 = sdiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668>
store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16
diff --git a/test/CodeGen/AMDGPU/sdivrem24.ll b/test/CodeGen/AMDGPU/sdivrem24.ll
index 349a7821da17..257e6be96b65 100644
--- a/test/CodeGen/AMDGPU/sdivrem24.ll
+++ b/test/CodeGen/AMDGPU/sdivrem24.ll
@@ -12,7 +12,7 @@
; EG-DAG: INT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
-define void @sdiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -31,7 +31,7 @@ define void @sdiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
; EG-DAG: INT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
-define void @sdiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16, i16 addrspace(1) * %in, align 2
%den = load i16, i16 addrspace(1) * %den_ptr, align 2
@@ -50,7 +50,7 @@ define void @sdiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
; EG-DAG: INT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
-define void @sdiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -69,7 +69,7 @@ define void @sdiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @sdiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @sdiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -88,7 +88,7 @@ define void @sdiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @test_no_sdiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_no_sdiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -107,7 +107,7 @@ define void @test_no_sdiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -130,7 +130,7 @@ define void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; EG-DAG: INT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
-define void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -149,7 +149,7 @@ define void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
; EG-DAG: INT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
-define void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16, i16 addrspace(1) * %in, align 2
%den = load i16, i16 addrspace(1) * %den_ptr, align 2
@@ -168,7 +168,7 @@ define void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
; EG-DAG: INT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
-define void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -187,7 +187,7 @@ define void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @no_srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -206,7 +206,7 @@ define void @no_srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @no_sdiv25_i24_i25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_sdiv25_i24_i25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -225,7 +225,7 @@ define void @no_sdiv25_i24_i25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @no_sdiv25_i25_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_sdiv25_i25_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -244,7 +244,7 @@ define void @no_sdiv25_i25_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @no_srem25_i24_i25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_srem25_i24_i25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -263,7 +263,7 @@ define void @no_srem25_i24_i25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @no_srem25_i25_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_srem25_i25_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -283,7 +283,7 @@ define void @no_srem25_i25_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; EG: INT_TO_FLT
; EG: RECIP_IEEE
-define void @srem25_i24_i11_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem25_i24_i11_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -303,7 +303,7 @@ define void @srem25_i24_i11_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG: INT_TO_FLT
; EG: RECIP_IEEE
-define void @srem25_i11_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem25_i11_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -323,7 +323,7 @@ define void @srem25_i11_i24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG: INT_TO_FLT
; EG: RECIP_IEEE
-define void @srem25_i17_i12_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem25_i17_i12_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
diff --git a/test/CodeGen/AMDGPU/sdivrem64.ll b/test/CodeGen/AMDGPU/sdivrem64.ll
index 28fdb69e1ada..5ad0d8efaed3 100644
--- a/test/CodeGen/AMDGPU/sdivrem64.ll
+++ b/test/CodeGen/AMDGPU/sdivrem64.ll
@@ -70,7 +70,7 @@
; SI-NOT: v_lshr_b64
; VI-NOT: v_lshrrev_b64
; GCN: s_endpgm
-define void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%result = sdiv i64 %x, %y
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -144,7 +144,7 @@ define void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%result = urem i64 %x, %y
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -159,7 +159,7 @@ define void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_sdiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_sdiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = ashr i64 %x, 33
%2 = ashr i64 %y, 33
%result = sdiv i64 %1, %2
@@ -176,7 +176,7 @@ define void @test_sdiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_srem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_srem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = ashr i64 %x, 33
%2 = ashr i64 %y, 33
%result = srem i64 %1, %2
@@ -196,7 +196,7 @@ define void @test_srem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_sdiv2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_sdiv2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = ashr i64 %x, 40
%2 = ashr i64 %y, 40
%result = sdiv i64 %1, %2
@@ -216,7 +216,7 @@ define void @test_sdiv2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_srem2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_srem2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = ashr i64 %x, 40
%2 = ashr i64 %y, 40
%result = srem i64 %1, %2
diff --git a/test/CodeGen/AMDGPU/sdwa-peephole.ll b/test/CodeGen/AMDGPU/sdwa-peephole.ll
new file mode 100644
index 000000000000..1e0ac3807528
--- /dev/null
+++ b/test/CodeGen/AMDGPU/sdwa-peephole.ll
@@ -0,0 +1,395 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=NOSDWA -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SDWA -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}add_shr_i32:
+; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
+; NOSDWA-NOT: v_add_i32_sdwa
+
+; SDWA: v_add_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+
+define amdgpu_kernel void @add_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %a = load i32, i32 addrspace(1)* %in, align 4
+ %shr = lshr i32 %a, 16
+ %add = add i32 %a, %shr
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sub_shr_i32:
+; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_subrev_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
+; NOSDWA-NOT: v_subrev_i32_sdwa
+
+; SDWA: v_subrev_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+
+define amdgpu_kernel void @sub_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %a = load i32, i32 addrspace(1)* %in, align 4
+ %shr = lshr i32 %a, 16
+ %sub = sub i32 %shr, %a
+ store i32 %sub, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_shr_i32:
+; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v[[DST1]], v[[DST0]]
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+
+define amdgpu_kernel void @mul_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in1, i32 addrspace(1)* %in2) {
+ %a = load i32, i32 addrspace(1)* %in1, align 4
+ %b = load i32, i32 addrspace(1)* %in2, align 4
+ %shra = lshr i32 %a, 16
+ %shrb = lshr i32 %b, 16
+ %mul = mul i32 %shra, %shrb
+ store i32 %mul, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_i16:
+; NOSDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+; SDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-NOT: v_mul_u32_u24_sdwa
+
+define amdgpu_kernel void @mul_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %ina, i16 addrspace(1)* %inb) {
+entry:
+ %a = load i16, i16 addrspace(1)* %ina, align 4
+ %b = load i16, i16 addrspace(1)* %inb, align 4
+ %mul = mul i16 %a, %b
+ store i16 %mul, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v2i16:
+; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v[[DST_MUL:[0-9]+]], v[[DST1]], v[[DST0]]
+; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_SHL]], v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL_HI]], v[[DST_MUL_LO]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
+define amdgpu_kernel void @mul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) {
+entry:
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
+ %mul = mul <2 x i16> %a, %b
+ store <2 x i16> %mul, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v4i16:
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL0:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL2:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL3:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL3]], v[[DST_MUL2]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; SDWA-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL1]], v[[DST_MUL0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
+define amdgpu_kernel void @mul_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %ina, <4 x i16> addrspace(1)* %inb) {
+entry:
+ %a = load <4 x i16>, <4 x i16> addrspace(1)* %ina, align 4
+ %b = load <4 x i16>, <4 x i16> addrspace(1)* %inb, align 4
+ %mul = mul <4 x i16> %a, %b
+ store <4 x i16> %mul, <4 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v8i16:
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL0:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL2:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL3:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL4:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL5:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL6:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; SDWA-DAG: v_mul_u32_u24_sdwa v[[DST_MUL7:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL7]], v[[DST_MUL6]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; SDWA-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL5]], v[[DST_MUL4]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; SDWA-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL3]], v[[DST_MUL2]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; SDWA-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL1]], v[[DST_MUL0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
+define amdgpu_kernel void @mul_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %ina, <8 x i16> addrspace(1)* %inb) {
+entry:
+ %a = load <8 x i16>, <8 x i16> addrspace(1)* %ina, align 4
+ %b = load <8 x i16>, <8 x i16> addrspace(1)* %inb, align 4
+ %mul = mul <8 x i16> %a, %b
+ store <8 x i16> %mul, <8 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_half:
+; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_f16_sdwa
+; SDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-NOT: v_mul_f16_sdwa
+
+define amdgpu_kernel void @mul_half(half addrspace(1)* %out, half addrspace(1)* %ina, half addrspace(1)* %inb) {
+entry:
+ %a = load half, half addrspace(1)* %ina, align 4
+ %b = load half, half addrspace(1)* %inb, align 4
+ %mul = fmul half %a, %b
+ store half %mul, half addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v2half:
+; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_mul_f16_e32 v[[DST_MUL:[0-9]+]], v[[DST1]], v[[DST0]]
+; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_SHL]], v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_f16_sdwa
+
+; SDWA-DAG: v_mul_f16_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_f16_e32 v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL_HI]], v[[DST_MUL_LO]]
+define amdgpu_kernel void @mul_v2half(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %ina, <2 x half> addrspace(1)* %inb) {
+entry:
+ %a = load <2 x half>, <2 x half> addrspace(1)* %ina, align 4
+ %b = load <2 x half>, <2 x half> addrspace(1)* %inb, align 4
+ %mul = fmul <2 x half> %a, %b
+ store <2 x half> %mul, <2 x half> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v4half:
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_f16_sdwa
+
+; SDWA-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+define amdgpu_kernel void @mul_v4half(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %ina, <4 x half> addrspace(1)* %inb) {
+entry:
+ %a = load <4 x half>, <4 x half> addrspace(1)* %ina, align 4
+ %b = load <4 x half>, <4 x half> addrspace(1)* %inb, align 4
+ %mul = fmul <4 x half> %a, %b
+ store <4 x half> %mul, <4 x half> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v8half:
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_f16_sdwa
+
+; SDWA-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+define amdgpu_kernel void @mul_v8half(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %ina, <8 x half> addrspace(1)* %inb) {
+entry:
+ %a = load <8 x half>, <8 x half> addrspace(1)* %ina, align 4
+ %b = load <8 x half>, <8 x half> addrspace(1)* %inb, align 4
+ %mul = fmul <8 x half> %a, %b
+ store <8 x half> %mul, <8 x half> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_i8:
+; NOSDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+; SDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SDWA-NOT: v_mul_u32_u24_sdwa
+
+define amdgpu_kernel void @mul_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %ina, i8 addrspace(1)* %inb) {
+entry:
+ %a = load i8, i8 addrspace(1)* %ina, align 4
+ %b = load i8, i8 addrspace(1)* %inb, align 4
+ %mul = mul i8 %a, %b
+ store i8 %mul, i8 addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v2i8:
+; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+
+define amdgpu_kernel void @mul_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %ina, <2 x i8> addrspace(1)* %inb) {
+entry:
+ %a = load <2 x i8>, <2 x i8> addrspace(1)* %ina, align 4
+ %b = load <2 x i8>, <2 x i8> addrspace(1)* %inb, align 4
+ %mul = mul <2 x i8> %a, %b
+ store <2 x i8> %mul, <2 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v4i8:
+; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+
+define amdgpu_kernel void @mul_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %ina, <4 x i8> addrspace(1)* %inb) {
+entry:
+ %a = load <4 x i8>, <4 x i8> addrspace(1)* %ina, align 4
+ %b = load <4 x i8>, <4 x i8> addrspace(1)* %inb, align 4
+ %mul = mul <4 x i8> %a, %b
+ store <4 x i8> %mul, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}mul_v8i8:
+; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+; SDWA-DAG: v_mul_u32_u24_sdwa
+
+define amdgpu_kernel void @mul_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(1)* %ina, <8 x i8> addrspace(1)* %inb) {
+entry:
+ %a = load <8 x i8>, <8 x i8> addrspace(1)* %ina, align 4
+ %b = load <8 x i8>, <8 x i8> addrspace(1)* %inb, align 4
+ %mul = mul <8 x i8> %a, %b
+ store <8 x i8> %mul, <8 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sitofp_v2i16_to_v2f16:
+; NOSDWA-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; NOSDWA-DAG: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA-DAG: v_cvt_f32_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-DAG: v_cvt_f32_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_cvt_f32_i32_sdwa
+
+; SDWA-DAG: v_cvt_f32_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; SDWA-DAG: v_cvt_f32_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+
+define amdgpu_kernel void @sitofp_v2i16_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x i16> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a
+ %r.val = sitofp <2 x i16> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+
+; GCN-LABEL: {{^}}mac_v2half:
+; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
+; NOSDWA: v_mac_f16_e32 v[[DST_MAC:[0-9]+]], v[[DST1]], v[[DST0]]
+; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v[[DST_SHL]], v{{[0-9]+}}
+; NOSDWA-NOT: v_mac_f16_sdwa
+
+; SDWA: v_mac_f16_sdwa v[[DST_MAC:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; SDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
+
+define amdgpu_kernel void @mac_v2half(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %ina, <2 x half> addrspace(1)* %inb) {
+entry:
+ %a = load <2 x half>, <2 x half> addrspace(1)* %ina, align 4
+ %b = load <2 x half>, <2 x half> addrspace(1)* %inb, align 4
+ %mul = fmul <2 x half> %a, %b
+ %mac = fadd <2 x half> %mul, %b
+ store <2 x half> %mac, <2 x half> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}immediate_mul_v2i16:
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+; SDWA-NOT: v_mul_u32_u24_sdwa
+
+define amdgpu_kernel void @immediate_mul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+entry:
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
+ %mul = mul <2 x i16> %a, <i16 123, i16 321>
+ store <2 x i16> %mul, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; Double use of same src - should not convert it
+; GCN-LABEL: {{^}}mulmul_v2i16:
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; NOSDWA-NOT: v_mul_u32_u24_sdwa
+
+; SDWA: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+
+define amdgpu_kernel void @mulmul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) {
+entry:
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
+ %mul = mul <2 x i16> %a, %b
+ %mul2 = mul <2 x i16> %mul, %b
+ store <2 x i16> %mul2, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}add_bb_v2i16:
+; NOSDWA-NOT: v_add_i32_sdwa
+
+; SDWA: v_add_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+
+define amdgpu_kernel void @add_bb_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) {
+entry:
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
+ br label %add_label
+add_label:
+ %add = add <2 x i16> %a, %b
+ br label %store_label
+store_label:
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/select-fabs-fneg-extract-legacy.ll b/test/CodeGen/AMDGPU/select-fabs-fneg-extract-legacy.ll
index 559d464f36a5..c8839c17365e 100644
--- a/test/CodeGen/AMDGPU/select-fabs-fneg-extract-legacy.ll
+++ b/test/CodeGen/AMDGPU/select-fabs-fneg-extract-legacy.ll
@@ -11,7 +11,7 @@
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -2.0, [[RCP]], vcc
; GCN: v_xor_b32_e32 [[NEG_SELECT:v[0-9]+]], 0x80000000, [[SELECT]]
; GCN-NEXT: buffer_store_dword [[NEG_SELECT]]
-define void @select_fneg_posk_src_rcp_legacy_f32(i32 %c) #2 {
+define amdgpu_kernel void @select_fneg_posk_src_rcp_legacy_f32(i32 %c) #2 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -29,7 +29,7 @@ define void @select_fneg_posk_src_rcp_legacy_f32(i32 %c) #2 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -2.0, [[MUL]], vcc
; GCN: v_xor_b32_e32 [[NEG_SELECT:v[0-9]+]], 0x80000000, [[SELECT]]
; GCN-NEXT: buffer_store_dword [[NEG_SELECT]]
-define void @select_fneg_posk_src_mul_legacy_f32(i32 %c) #2 {
+define amdgpu_kernel void @select_fneg_posk_src_mul_legacy_f32(i32 %c) #2 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%mul = call float @llvm.amdgcn.fmul.legacy(float %x, float 4.0)
diff --git a/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll b/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
index 73dadde884ae..3417eb02b361 100644
--- a/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
+++ b/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-no-signed-zeros-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-no-signed-zeros-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}add_select_fabs_fabs_f32:
; GCN: buffer_load_dword [[X:v[0-9]+]]
@@ -8,7 +8,7 @@
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[Z]]
-define void @add_select_fabs_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -30,7 +30,7 @@ define void @add_select_fabs_fabs_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[Z]]
; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, [[W]]
-define void @add_select_multi_use_lhs_fabs_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_multi_use_lhs_fabs_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -57,7 +57,7 @@ define void @add_select_multi_use_lhs_fabs_fabs_f32(i32 %c) #0 {
; GCN: buffer_store_dword [[ADD]]
; GCN: buffer_store_dword [[X_ABS]]
-define void @add_select_multi_store_use_lhs_fabs_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_multi_store_use_lhs_fabs_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -80,7 +80,7 @@ define void @add_select_multi_store_use_lhs_fabs_fabs_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[Z]]
; GCN-DAG: v_add_f32_e64 v{{[0-9]+}}, |[[Y]]|, [[W]]
-define void @add_select_multi_use_rhs_fabs_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_multi_use_rhs_fabs_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -104,7 +104,7 @@ define void @add_select_multi_use_rhs_fabs_fabs_f32(i32 %c) #0 {
; GCN: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X_ABS]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
-define void @add_select_fabs_var_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_var_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -123,7 +123,7 @@ define void @add_select_fabs_var_f32(i32 %c) #0 {
; GCN: v_and_b32_e32 [[FABS_X:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[FABS_X]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
-define void @add_select_fabs_negk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -140,7 +140,7 @@ define void @add_select_fabs_negk_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[X]]
-define void @add_select_fabs_negk_negk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_negk_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, float -2.0, float -1.0
@@ -155,7 +155,7 @@ define void @add_select_fabs_negk_negk_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], 1.0, 2.0, s
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
-define void @add_select_posk_posk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_posk_posk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, float 2.0, float 1.0
@@ -172,7 +172,7 @@ define void @add_select_posk_posk_f32(i32 %c) #0 {
; GCN-DAG: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[FABS_X]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
-define void @add_select_negk_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negk_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -192,7 +192,7 @@ define void @add_select_negk_fabs_f32(i32 %c) #0 {
; GCN-DAG: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K]], [[FABS_X]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
-define void @add_select_negliteralk_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negliteralk_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -209,7 +209,7 @@ define void @add_select_negliteralk_fabs_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[Y]]
-define void @add_select_fabs_posk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_posk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
@@ -228,7 +228,7 @@ define void @add_select_fabs_posk_f32(i32 %c) #0 {
; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[Y]]
-define void @add_select_posk_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_posk_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -246,7 +246,7 @@ define void @add_select_posk_fabs_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
-define void @add_select_fneg_fneg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -268,7 +268,7 @@ define void @add_select_fneg_fneg_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[X]], [[W]]
-define void @add_select_multi_use_lhs_fneg_fneg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_multi_use_lhs_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -295,7 +295,7 @@ define void @add_select_multi_use_lhs_fneg_fneg_f32(i32 %c) #0 {
; GCN: buffer_store_dword [[ADD]]
; GCN: buffer_store_dword [[NEG_X]]
-define void @add_select_multi_store_use_lhs_fneg_fneg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_multi_store_use_lhs_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -318,7 +318,7 @@ define void @add_select_multi_store_use_lhs_fneg_fneg_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X]], vcc
; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
; GCN-DAG: v_subrev_f32_e32 v{{[0-9]+}}, [[Y]], [[W]]
-define void @add_select_multi_use_rhs_fneg_fneg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_multi_use_rhs_fneg_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -342,7 +342,7 @@ define void @add_select_multi_use_rhs_fneg_fneg_f32(i32 %c) #0 {
; GCN: v_xor_b32_e32 [[X_NEG:v[0-9]+]], 0x80000000, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y]], [[X_NEG]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
-define void @add_select_fneg_var_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_var_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -360,7 +360,7 @@ define void @add_select_fneg_var_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
-define void @add_select_fneg_negk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -372,13 +372,13 @@ define void @add_select_fneg_negk_f32(i32 %c) #0 {
}
; GCN-LABEL: {{^}}add_select_fneg_inv2pi_f32:
-; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0xbe22f983
; GCN: buffer_load_dword [[X:v[0-9]+]]
; GCN: buffer_load_dword [[Y:v[0-9]+]]
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0xbe22f983
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K]], [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
-define void @add_select_fneg_inv2pi_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_inv2pi_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -390,15 +390,15 @@ define void @add_select_fneg_inv2pi_f32(i32 %c) #0 {
}
; GCN-LABEL: {{^}}add_select_fneg_neginv2pi_f32:
-; SI: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e22f983
; GCN: buffer_load_dword [[X:v[0-9]+]]
; GCN: buffer_load_dword [[Y:v[0-9]+]]
+; SI: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e22f983
; SI: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K]], [[X]], vcc
; VI: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 0.15915494, [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
-define void @add_select_fneg_neginv2pi_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_neginv2pi_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -415,7 +415,7 @@ define void @add_select_fneg_neginv2pi_f32(i32 %c) #0 {
; GCN: v_cmp_eq_u32_e64
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
-define void @add_select_negk_negk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negk_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, float -2.0, float -1.0
@@ -432,7 +432,7 @@ define void @add_select_negk_negk_f32(i32 %c) #0 {
; GCN: v_cmp_eq_u32_e64
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K1]], [[K0]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
-define void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, float -2048.0, float -4096.0
@@ -446,7 +446,7 @@ define void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
-define void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, float -2.0, float -1.0
@@ -463,7 +463,7 @@ define void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
-define void @add_select_negk_fneg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negk_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -480,7 +480,7 @@ define void @add_select_negk_fneg_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
-define void @add_select_fneg_posk_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fneg_posk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -498,7 +498,7 @@ define void @add_select_fneg_posk_f32(i32 %c) #0 {
; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
-define void @add_select_posk_fneg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_posk_fneg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -518,7 +518,7 @@ define void @add_select_posk_fneg_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[Y_ABS:v[0-9]+]], 0x7fffffff, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_ABS]], [[X_NEG_ABS]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
-define void @add_select_negfabs_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negfabs_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -541,7 +541,7 @@ define void @add_select_negfabs_fabs_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_NEG_ABS]], [[X_ABS]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
-define void @add_select_fabs_negfabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_negfabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -564,7 +564,7 @@ define void @add_select_fabs_negfabs_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[Y_ABS:v[0-9]+]], 0x7fffffff, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_ABS]], [[X_NEG]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
-define void @add_select_neg_fabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_neg_fabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -586,7 +586,7 @@ define void @add_select_neg_fabs_f32(i32 %c) #0 {
; GCN-DAG: v_xor_b32_e32 [[Y_NEG:v[0-9]+]], 0x80000000, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_NEG]], [[X_ABS]], vcc
; GCN: v_add_f32_e32 v{{[0-9]+}}, [[Z]], [[SELECT]]
-define void @add_select_fabs_neg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_fabs_neg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -607,7 +607,7 @@ define void @add_select_fabs_neg_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[Y_ABS:v[0-9]+]], 0x7fffffff, [[Y]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[Y_ABS]], [[X]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
-define void @add_select_neg_negfabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_neg_negfabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -629,7 +629,7 @@ define void @add_select_neg_negfabs_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[X_ABS]], [[Y]], vcc
; GCN: v_subrev_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Z]]
-define void @add_select_negfabs_neg_f32(i32 %c) #0 {
+define amdgpu_kernel void @add_select_negfabs_neg_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
@@ -651,7 +651,7 @@ define void @add_select_negfabs_neg_f32(i32 %c) #0 {
; GCN-DAG: v_and_b32_e32 [[X_ABS:v[0-9]+]], 0x7fffffff, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -4.0, [[X_ABS]], vcc
; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[SELECT]], [[Y]]
-define void @mul_select_negfabs_posk_f32(i32 %c) #0 {
+define amdgpu_kernel void @mul_select_negfabs_posk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -672,7 +672,7 @@ define void @mul_select_negfabs_posk_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -4.0, [[X_ABS]], vcc
; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[SELECT]], [[Y]]
-define void @mul_select_posk_negfabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @mul_select_posk_negfabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -690,7 +690,7 @@ define void @mul_select_posk_negfabs_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 4.0, [[X]], vcc
; GCN: v_mul_f32_e64 v{{[0-9]+}}, -|[[SELECT]]|, [[Y]]
-define void @mul_select_negfabs_negk_f32(i32 %c) #0 {
+define amdgpu_kernel void @mul_select_negfabs_negk_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -709,7 +709,7 @@ define void @mul_select_negfabs_negk_f32(i32 %c) #0 {
; GCN: v_cmp_ne_u32_e64 vcc
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 4.0, [[X]], vcc
; GCN: v_mul_f32_e64 v{{[0-9]+}}, -|[[SELECT]]|, [[Y]]
-define void @mul_select_negk_negfabs_f32(i32 %c) #0 {
+define amdgpu_kernel void @mul_select_negk_negfabs_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -732,7 +732,7 @@ define void @mul_select_negk_negfabs_f32(i32 %c) #0 {
; GCN: v_sub_f32_e32 [[ADD:v[0-9]+]], -4.0, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 2.0, [[ADD]], vcc
; GCN-NEXT: buffer_store_dword [[SELECT]]
-define void @select_fneg_posk_src_add_f32(i32 %c) #0 {
+define amdgpu_kernel void @select_fneg_posk_src_add_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -749,7 +749,7 @@ define void @select_fneg_posk_src_add_f32(i32 %c) #0 {
; GCN: v_sub_f32_e32 [[ADD:v[0-9]+]], 4.0, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 2.0, [[ADD]], vcc
; GCN-NEXT: buffer_store_dword [[SELECT]]
-define void @select_fneg_posk_src_sub_f32(i32 %c) #0 {
+define amdgpu_kernel void @select_fneg_posk_src_sub_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%add = fsub float %x, 4.0
@@ -765,7 +765,7 @@ define void @select_fneg_posk_src_sub_f32(i32 %c) #0 {
; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[X]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 2.0, [[MUL]], vcc
; GCN-NEXT: buffer_store_dword [[SELECT]]
-define void @select_fneg_posk_src_mul_f32(i32 %c) #0 {
+define amdgpu_kernel void @select_fneg_posk_src_mul_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
%mul = fmul float %x, 4.0
@@ -782,7 +782,7 @@ define void @select_fneg_posk_src_mul_f32(i32 %c) #0 {
; GCN: v_fma_f32 [[FMA:v[0-9]+]], [[X]], -4.0, -[[Z]]
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 2.0, [[FMA]], vcc
; GCN-NEXT: buffer_store_dword [[SELECT]]
-define void @select_fneg_posk_src_fma_f32(i32 %c) #0 {
+define amdgpu_kernel void @select_fneg_posk_src_fma_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -799,7 +799,7 @@ define void @select_fneg_posk_src_fma_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 2.0, [[X]], vcc
; GCN-NEXT: buffer_store_dword [[SELECT]]
-define void @select_fneg_posk_src_fmad_f32(i32 %c) #0 {
+define amdgpu_kernel void @select_fneg_posk_src_fmad_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%z = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
@@ -818,7 +818,7 @@ define void @select_fneg_posk_src_fmad_f32(i32 %c) #0 {
; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -2.0, [[RCP]], vcc
; GCN: v_xor_b32_e32 [[NEG_SELECT:v[0-9]+]], 0x80000000, [[SELECT]]
; GCN-NEXT: buffer_store_dword [[NEG_SELECT]]
-define void @select_fneg_posk_src_rcp_f32(i32 %c) #0 {
+define amdgpu_kernel void @select_fneg_posk_src_rcp_f32(i32 %c) #0 {
%x = load volatile float, float addrspace(1)* undef
%y = load volatile float, float addrspace(1)* undef
%cmp = icmp eq i32 %c, 0
diff --git a/test/CodeGen/AMDGPU/select-i1.ll b/test/CodeGen/AMDGPU/select-i1.ll
index 07dcb2153384..5eaad1f363f9 100644
--- a/test/CodeGen/AMDGPU/select-i1.ll
+++ b/test/CodeGen/AMDGPU/select-i1.ll
@@ -6,7 +6,7 @@
; FUNC-LABEL: {{^}}select_i1:
; SI: v_cndmask_b32
; SI-NOT: v_cndmask_b32
-define void @select_i1(i1 addrspace(1)* %out, i32 %cond, i1 %a, i1 %b) nounwind {
+define amdgpu_kernel void @select_i1(i1 addrspace(1)* %out, i32 %cond, i1 %a, i1 %b) nounwind {
%cmp = icmp ugt i32 %cond, 5
%sel = select i1 %cmp, i1 %a, i1 %b
store i1 %sel, i1 addrspace(1)* %out, align 4
@@ -19,7 +19,7 @@ define void @select_i1(i1 addrspace(1)* %out, i32 %cond, i1 %a, i1 %b) nounwind
; SI-DAG: buffer_load_ubyte [[B:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:46
; SI: v_cmp_eq_u32_e32 vcc, 1, [[COND]]
; SI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-define void @s_minmax_i1(i1 addrspace(1)* %out, i1 zeroext %cond, i1 zeroext %a, i1 zeroext %b) nounwind {
+define amdgpu_kernel void @s_minmax_i1(i1 addrspace(1)* %out, i1 zeroext %cond, i1 zeroext %a, i1 zeroext %b) nounwind {
%cmp = icmp slt i1 %cond, false
%sel = select i1 %cmp, i1 %a, i1 %b
store i1 %sel, i1 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/AMDGPU/select-opt.ll b/test/CodeGen/AMDGPU/select-opt.ll
index ad358d33c405..d56b952118b5 100644
--- a/test/CodeGen/AMDGPU/select-opt.ll
+++ b/test/CodeGen/AMDGPU/select-opt.ll
@@ -11,7 +11,7 @@
; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @opt_select_i32_and_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @opt_select_i32_and_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %x, i32 %y) #0 {
%icmp0 = icmp ne i32 %a, %b
%icmp1 = icmp ne i32 %a, %c
%and = and i1 %icmp0, %icmp1
@@ -27,7 +27,7 @@ define void @opt_select_i32_and_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b,
; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @opt_select_i32_and_cmp_f32(i32 addrspace(1)* %out, float %a, float %b, float %c, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @opt_select_i32_and_cmp_f32(i32 addrspace(1)* %out, float %a, float %b, float %c, i32 %x, i32 %y) #0 {
%fcmp0 = fcmp one float %a, %b
%fcmp1 = fcmp one float %a, %c
%and = and i1 %fcmp0, %fcmp1
@@ -43,7 +43,7 @@ define void @opt_select_i32_and_cmp_f32(i32 addrspace(1)* %out, float %a, float
; GCN: v_cndmask_b32_e32 v[[RESULT1:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: v_cndmask_b32_e32 v[[RESULT0:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT0]]:[[RESULT1]]{{\]}}
-define void @opt_select_i64_and_cmp_i32(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i64 %x, i64 %y) #0 {
+define amdgpu_kernel void @opt_select_i64_and_cmp_i32(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i64 %x, i64 %y) #0 {
%icmp0 = icmp ne i32 %a, %b
%icmp1 = icmp ne i32 %a, %c
%and = and i1 %icmp0, %icmp1
@@ -59,7 +59,7 @@ define void @opt_select_i64_and_cmp_i32(i64 addrspace(1)* %out, i32 %a, i32 %b,
; GCN: v_cndmask_b32_e32 v[[RESULT1:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: v_cndmask_b32_e32 v[[RESULT0:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT0]]:[[RESULT1]]{{\]}}
-define void @opt_select_i64_and_cmp_f32(i64 addrspace(1)* %out, float %a, float %b, float %c, i64 %x, i64 %y) #0 {
+define amdgpu_kernel void @opt_select_i64_and_cmp_f32(i64 addrspace(1)* %out, float %a, float %b, float %c, i64 %x, i64 %y) #0 {
%fcmp0 = fcmp one float %a, %b
%fcmp1 = fcmp one float %a, %c
%and = and i1 %fcmp0, %fcmp1
@@ -76,7 +76,7 @@ define void @opt_select_i64_and_cmp_f32(i64 addrspace(1)* %out, float %a, float
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
; GCN: s_endpgm
-define void @opt_select_i32_or_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @opt_select_i32_or_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %x, i32 %y) #0 {
%icmp0 = icmp ne i32 %a, %b
%icmp1 = icmp ne i32 %a, %c
%or = or i1 %icmp0, %icmp1
@@ -92,7 +92,7 @@ define void @opt_select_i32_or_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b, i
; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN-NOT: [[RESULT]]
; GCN: buffer_store_dword [[RESULT]]
-define void @opt_select_i32_or_cmp_f32(i32 addrspace(1)* %out, float %a, float %b, float %c, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @opt_select_i32_or_cmp_f32(i32 addrspace(1)* %out, float %a, float %b, float %c, i32 %x, i32 %y) #0 {
%fcmp0 = fcmp one float %a, %b
%fcmp1 = fcmp one float %a, %c
%or = or i1 %fcmp0, %fcmp1
@@ -108,7 +108,7 @@ define void @opt_select_i32_or_cmp_f32(i32 addrspace(1)* %out, float %a, float %
; GCN: v_cndmask_b32_e32 v[[RESULT1:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: v_cndmask_b32_e32 v[[RESULT0:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT0]]:[[RESULT1]]{{\]}}
-define void @opt_select_i64_or_cmp_i32(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i64 %x, i64 %y) #0 {
+define amdgpu_kernel void @opt_select_i64_or_cmp_i32(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i64 %x, i64 %y) #0 {
%icmp0 = icmp ne i32 %a, %b
%icmp1 = icmp ne i32 %a, %c
%or = or i1 %icmp0, %icmp1
@@ -124,7 +124,7 @@ define void @opt_select_i64_or_cmp_i32(i64 addrspace(1)* %out, i32 %a, i32 %b, i
; GCN: v_cndmask_b32_e32 v[[RESULT1:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: v_cndmask_b32_e32 v[[RESULT0:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT0]]:[[RESULT1]]{{\]}}
-define void @opt_select_i64_or_cmp_f32(i64 addrspace(1)* %out, float %a, float %b, float %c, i64 %x, i64 %y) #0 {
+define amdgpu_kernel void @opt_select_i64_or_cmp_f32(i64 addrspace(1)* %out, float %a, float %b, float %c, i64 %x, i64 %y) #0 {
%fcmp0 = fcmp one float %a, %b
%fcmp1 = fcmp one float %a, %c
%or = or i1 %fcmp0, %fcmp1
@@ -138,7 +138,7 @@ define void @opt_select_i64_or_cmp_f32(i64 addrspace(1)* %out, float %a, float %
; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
-define void @regression(float addrspace(1)* %out, float %c0, float %c1) #0 {
+define amdgpu_kernel void @regression(float addrspace(1)* %out, float %c0, float %c1) #0 {
entry:
%cmp0 = fcmp oeq float %c0, 1.0
br i1 %cmp0, label %if0, label %endif
diff --git a/test/CodeGen/AMDGPU/select-vectors.ll b/test/CodeGen/AMDGPU/select-vectors.ll
index 759abe2f2e9a..8710fc8c7307 100644
--- a/test/CodeGen/AMDGPU/select-vectors.ll
+++ b/test/CodeGen/AMDGPU/select-vectors.ll
@@ -10,7 +10,7 @@
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) nounwind {
+define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) nounwind {
%cmp = icmp eq i8 %c, 0
%select = select i1 %cmp, <4 x i8> %a, <4 x i8> %b
store <4 x i8> %select, <4 x i8> addrspace(1)* %out, align 4
@@ -22,7 +22,7 @@ define void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b,
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b, i32 %c) nounwind {
+define amdgpu_kernel void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <4 x i16> %a, <4 x i16> %b
store <4 x i16> %select, <4 x i16> addrspace(1)* %out, align 4
@@ -36,7 +36,7 @@ define void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16>
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: buffer_store_dwordx2
-define void @s_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b, i32 %c) nounwind {
+define amdgpu_kernel void @s_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <2 x i32> %a, <2 x i32> %b
store <2 x i32> %select, <2 x i32> addrspace(1)* %out, align 8
@@ -49,7 +49,7 @@ define void @s_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: buffer_store_dwordx4
-define void @s_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, i32 %c) nounwind {
+define amdgpu_kernel void @s_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <4 x i32> %a, <4 x i32> %b
store <4 x i32> %select, <4 x i32> addrspace(1)* %out, align 16
@@ -64,7 +64,7 @@ define void @s_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32
; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
; SI: buffer_store_dwordx4
-define void @v_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %cond) #0 {
+define amdgpu_kernel void @v_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %cond) #0 {
bb:
%tmp2 = icmp ult i32 %cond, 32
%val = load <4 x i32>, <4 x i32> addrspace(1)* %in
@@ -82,7 +82,7 @@ bb:
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b, i32 %c) nounwind {
+define amdgpu_kernel void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <8 x i32> %a, <8 x i32> %b
store <8 x i32> %select, <8 x i32> addrspace(1)* %out, align 16
@@ -102,7 +102,7 @@ define void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32>
; SI: v_mov_b32_e32 v{{[0-9]+}}, s[[BLO]]
; SI: v_cndmask_b32_e32
; SI: buffer_store_dwordx2
-define void @s_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) nounwind {
+define amdgpu_kernel void @s_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <2 x float> %a, <2 x float> %b
store <2 x float> %select, <2 x float> addrspace(1)* %out, align 16
@@ -120,7 +120,7 @@ define void @s_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x
; SI: v_cndmask_b32_e32
; SI: buffer_store_dwordx4
-define void @s_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) nounwind {
+define amdgpu_kernel void @s_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <4 x float> %a, <4 x float> %b
store <4 x float> %select, <4 x float> addrspace(1)* %out, align 16
@@ -135,7 +135,7 @@ define void @s_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x
; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
; SI: buffer_store_dwordx4
-define void @v_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %cond) #0 {
+define amdgpu_kernel void @v_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in, i32 %cond) #0 {
bb:
%tmp2 = icmp ult i32 %cond, 32
%val = load <4 x float>, <4 x float> addrspace(1)* %in
@@ -153,7 +153,7 @@ bb:
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b, i32 %c) nounwind {
+define amdgpu_kernel void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <8 x float> %a, <8 x float> %b
store <8 x float> %select, <8 x float> addrspace(1)* %out, align 16
@@ -165,7 +165,7 @@ define void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x f
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b, i32 %c) nounwind {
+define amdgpu_kernel void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <2 x double> %a, <2 x double> %b
store <2 x double> %select, <2 x double> addrspace(1)* %out, align 16
@@ -181,7 +181,7 @@ define void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b, i32 %c) nounwind {
+define amdgpu_kernel void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <4 x double> %a, <4 x double> %b
store <4 x double> %select, <4 x double> addrspace(1)* %out, align 16
@@ -205,7 +205,7 @@ define void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
; SI: v_cndmask_b32_e32
-define void @select_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b, i32 %c) nounwind {
+define amdgpu_kernel void @select_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b, i32 %c) nounwind {
%cmp = icmp eq i32 %c, 0
%select = select i1 %cmp, <8 x double> %a, <8 x double> %b
store <8 x double> %select, <8 x double> addrspace(1)* %out, align 16
diff --git a/test/CodeGen/AMDGPU/select.f16.ll b/test/CodeGen/AMDGPU/select.f16.ll
index 19fe8d9b2326..2a7a9c9e0638 100644
--- a/test/CodeGen/AMDGPU/select.f16.ll
+++ b/test/CodeGen/AMDGPU/select.f16.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; GCN-LABEL: {{^}}select_f16
+; GCN-LABEL: {{^}}select_f16:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
@@ -17,7 +17,7 @@
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[D_F16]], v[[C_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @select_f16(
+define amdgpu_kernel void @select_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -34,13 +34,12 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_f16_imm_a
+; GCN-LABEL: {{^}}select_f16_imm_a:
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[D_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x3800{{$}}
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
-; SI: v_cmp_gt_f32_e32 vcc, v[[B_F32]], v[[A_F32]]
+; SI: v_cmp_lt_f32_e32 vcc, 0.5, v[[B_F32]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
; SI: v_cvt_f32_f16_e32 v[[D_F32:[0-9]+]], v[[D_F16]]
; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], v[[D_F32]], v[[C_F32]]
@@ -49,7 +48,7 @@ entry:
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[D_F16]], v[[C_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @select_f16_imm_a(
+define amdgpu_kernel void @select_f16_imm_a(
half addrspace(1)* %r,
half addrspace(1)* %b,
half addrspace(1)* %c,
@@ -64,22 +63,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_f16_imm_b
+; GCN-LABEL: {{^}}select_f16_imm_b:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[D_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x3800{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
-; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; SI: v_cmp_gt_f32_e32 vcc, 0.5, v[[A_F32]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
; SI: v_cvt_f32_f16_e32 v[[D_F32:[0-9]+]], v[[D_F16]]
; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], v[[D_F32]], v[[C_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+
; VI: v_cmp_gt_f16_e32 vcc, 0.5, v[[A_F16]]
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[D_F16]], v[[C_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @select_f16_imm_b(
+define amdgpu_kernel void @select_f16_imm_b(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %c,
@@ -94,23 +93,23 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_f16_imm_c
+; GCN-LABEL: {{^}}select_f16_imm_c:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[D_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], 0x3800{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_cvt_f32_f16_e32 v[[D_F32:[0-9]+]], v[[D_F16]]
; SI: v_cmp_nlt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
-; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], v[[C_F32]], v[[D_F32]], vcc
+; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], 0.5, v[[D_F32]], vcc
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+
; VI: v_cmp_nlt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x3800{{$}}
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[C_F16]], v[[D_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @select_f16_imm_c(
+define amdgpu_kernel void @select_f16_imm_c(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -125,23 +124,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_f16_imm_d
+; GCN-LABEL: {{^}}select_f16_imm_d:
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
-; SI: v_cvt_f32_f16_e32 v[[D_F32:[0-9]+]], 0x3800{{$}}
; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
-; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], v[[D_F32]], v[[C_F32]]
+; SI: v_cndmask_b32_e32 v[[R_F32:[0-9]+]], 0.5, v[[C_F32]]
; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
; VI: v_cmp_lt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
; VI: v_mov_b32_e32 v[[D_F16:[0-9]+]], 0x3800{{$}}
; VI: v_cndmask_b32_e32 v[[R_F16:[0-9]+]], v[[D_F16]], v[[C_F16]], vcc
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @select_f16_imm_d(
+define amdgpu_kernel void @select_f16_imm_d(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -156,21 +154,25 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_v2f16
-; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e64
-; SI: v_cmp_lt_f32_e32
-; VI: v_cmp_lt_f16_e32
-; VI: v_cmp_lt_f16_e64
-; GCN: v_cndmask_b32_e32
-; GCN: v_cndmask_b32_e64
-; SI: v_cvt_f16_f32_e32
-; SI: v_cvt_f16_f32_e32
+; GCN-LABEL: {{^}}select_v2f16:
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cmp_lt_f32_e64
+; SI: v_cmp_lt_f32_e32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e64
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+
+; VI: v_cmp_lt_f16_e64
+; VI: v_cmp_lt_f16_e32
+; VI: v_cndmask_b32_e64
+; VI: v_cndmask_b32_e32
+
; GCN: s_endpgm
-define void @select_v2f16(
+define amdgpu_kernel void @select_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -187,25 +189,24 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_v2f16_imm_a
+; GCN-LABEL: {{^}}select_v2f16_imm_a:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_gt_f32_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_gt_f32_e64
+; SI: v_cmp_lt_f32_e64
+; SI: v_cmp_lt_f32_e32 vcc, 0.5
+
; VI: v_cmp_lt_f16_e32
; VI: v_cmp_lt_f16_e64
; GCN: v_cndmask_b32_e32
-; SI: v_cvt_f16_f32_e32
; GCN: v_cndmask_b32_e64
; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
; GCN: s_endpgm
-define void @select_v2f16_imm_a(
+define amdgpu_kernel void @select_v2f16_imm_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %b,
<2 x half> addrspace(1)* %c,
@@ -220,25 +221,25 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_v2f16_imm_b
+; GCN-LABEL: {{^}}select_v2f16_imm_b:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e64
+; SI: v_cmp_gt_f32_e64
+; SI: v_cmp_gt_f32_e32 vcc, 0.5
+
; VI: v_cmp_gt_f16_e32
; VI: v_cmp_gt_f16_e64
; GCN: v_cndmask_b32_e32
-; SI: v_cvt_f16_f32_e32
; GCN: v_cndmask_b32_e64
+
+; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
; GCN: s_endpgm
-define void @select_v2f16_imm_b(
+define amdgpu_kernel void @select_v2f16_imm_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %c,
@@ -253,9 +254,7 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_v2f16_imm_c
-; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
+; GCN-LABEL: {{^}}select_v2f16_imm_c:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
@@ -263,10 +262,10 @@ entry:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e32
-; SI: v_cmp_lt_f32_e64
-; SI: v_cndmask_b32_e32
+; SI: v_cmp_nlt_f32_e32
+; SI: v_cmp_nlt_f32_e64
; SI: v_cndmask_b32_e64
+; SI: v_cndmask_b32_e32
; VI: v_cmp_nlt_f16_e32
; VI: v_cndmask_b32_e32
@@ -277,7 +276,7 @@ entry:
; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
; GCN: s_endpgm
-define void @select_v2f16_imm_c(
+define amdgpu_kernel void @select_v2f16_imm_c(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -292,25 +291,24 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}select_v2f16_imm_d
-; SI: v_cvt_f32_f16_e32
+; GCN-LABEL: {{^}}select_v2f16_imm_d:
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
; SI: v_cvt_f32_f16_e32
-; SI: v_cvt_f32_f16_e32
-; SI: v_cmp_lt_f32_e32
; SI: v_cmp_lt_f32_e64
+; SI: v_cmp_lt_f32_e32
+
; VI: v_cmp_lt_f16_e32
; VI: v_cmp_lt_f16_e64
-; GCN: v_cndmask_b32_e32
-; GCN: v_cndmask_b32_e64
+; GCN: v_cndmask_b32
+; GCN: v_cndmask_b32
; SI: v_cvt_f16_f32_e32
; SI: v_cvt_f16_f32_e32
; GCN: s_endpgm
-define void @select_v2f16_imm_d(
+define amdgpu_kernel void @select_v2f16_imm_d(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
diff --git a/test/CodeGen/AMDGPU/select.ll b/test/CodeGen/AMDGPU/select.ll
index 45f3cd5a7ac5..e53c159a2f71 100644
--- a/test/CodeGen/AMDGPU/select.ll
+++ b/test/CodeGen/AMDGPU/select.ll
@@ -14,7 +14,7 @@
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW
-define void @select (i32 addrspace(1)* %i32out, float addrspace(1)* %f32out,
+define amdgpu_kernel void @select (i32 addrspace(1)* %i32out, float addrspace(1)* %f32out,
<2 x i32> addrspace(1)* %v2i32out, <2 x float> addrspace(1)* %v2f32out,
<4 x i32> addrspace(1)* %v4i32out, <4 x float> addrspace(1)* %v4f32out,
i32 %cond) {
diff --git a/test/CodeGen/AMDGPU/select64.ll b/test/CodeGen/AMDGPU/select64.ll
index a68fdecb00af..3b4c925a87a0 100644
--- a/test/CodeGen/AMDGPU/select64.ll
+++ b/test/CodeGen/AMDGPU/select64.ll
@@ -7,7 +7,7 @@
; CHECK-NOT: s_lshr_b64
; CHECK: v_cndmask
; CHECK: v_cndmask
-define void @select0(i64 addrspace(1)* %out, i32 %cond, i64 %in) {
+define amdgpu_kernel void @select0(i64 addrspace(1)* %out, i32 %cond, i64 %in) {
entry:
%0 = icmp ugt i32 %cond, 5
%1 = select i1 %0, i64 0, i64 %in
@@ -18,7 +18,7 @@ entry:
; CHECK-LABEL: {{^}}select_trunc_i64:
; CHECK: v_cndmask_b32
; CHECK-NOT: v_cndmask_b32
-define void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwind {
+define amdgpu_kernel void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwind {
%cmp = icmp ugt i32 %cond, 5
%sel = select i1 %cmp, i64 0, i64 %in
%trunc = trunc i64 %sel to i32
@@ -29,7 +29,7 @@ define void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwi
; CHECK-LABEL: {{^}}select_trunc_i64_2:
; CHECK: v_cndmask_b32
; CHECK-NOT: v_cndmask_b32
-define void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %b) nounwind {
%cmp = icmp ugt i32 %cond, 5
%sel = select i1 %cmp, i64 %a, i64 %b
%trunc = trunc i64 %sel to i32
@@ -40,7 +40,7 @@ define void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %
; CHECK-LABEL: {{^}}v_select_trunc_i64_2:
; CHECK: v_cndmask_b32
; CHECK-NOT: v_cndmask_b32
-define void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%cmp = icmp ugt i32 %cond, 5
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
@@ -54,7 +54,7 @@ define void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspa
; CHECK-DAG: v_cndmask_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
; CHECK-DAG: v_cndmask_b32_e32 {{v[0-9]+}}, 63, {{v[0-9]+}}
; CHECK: s_endpgm
-define void @v_select_i64_split_imm(i64 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_select_i64_split_imm(i64 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%cmp = icmp ugt i32 %cond, 5
%a = load i64, i64 addrspace(1)* %aptr, align 8
%b = load i64, i64 addrspace(1)* %bptr, align 8
diff --git a/test/CodeGen/AMDGPU/selectcc-cnd.ll b/test/CodeGen/AMDGPU/selectcc-cnd.ll
index 94d0ace75697..18616851c9c2 100644
--- a/test/CodeGen/AMDGPU/selectcc-cnd.ll
+++ b/test/CodeGen/AMDGPU/selectcc-cnd.ll
@@ -3,7 +3,7 @@
;CHECK-NOT: SETE
;CHECK: CNDE {{\*?}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1.0, literal.x,
;CHECK: 1073741824
-define void @test(float addrspace(1)* %out, float addrspace(1)* %in) {
+define amdgpu_kernel void @test(float addrspace(1)* %out, float addrspace(1)* %in) {
%1 = load float, float addrspace(1)* %in
%2 = fcmp oeq float %1, 0.0
%3 = select i1 %2, float 1.0, float 2.0
diff --git a/test/CodeGen/AMDGPU/selectcc-cnde-int.ll b/test/CodeGen/AMDGPU/selectcc-cnde-int.ll
index 58a4ee7d62b2..1504165d3d2b 100644
--- a/test/CodeGen/AMDGPU/selectcc-cnde-int.ll
+++ b/test/CodeGen/AMDGPU/selectcc-cnde-int.ll
@@ -3,7 +3,7 @@
;CHECK-NOT: SETE_INT
;CHECK: CNDE_INT {{\*?}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, literal.x,
;CHECK-NEXT: 2
-define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%1 = load i32, i32 addrspace(1)* %in
%2 = icmp eq i32 %1, 0
%3 = select i1 %2, i32 1, i32 2
diff --git a/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll b/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll
index e870ee891e66..7af5478600bb 100644
--- a/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll
+++ b/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll
@@ -6,7 +6,7 @@
; CHECK-NEXT: -1
; Test a selectcc with i32 LHS/RHS and float True/False
-define void @test(float addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test(float addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = load i32, i32 addrspace(1)* %in
%1 = icmp sge i32 %0, 0
diff --git a/test/CodeGen/AMDGPU/selectcc-opt.ll b/test/CodeGen/AMDGPU/selectcc-opt.ll
index 0f46d4c7ea06..8fef3f8b3808 100644
--- a/test/CodeGen/AMDGPU/selectcc-opt.ll
+++ b/test/CodeGen/AMDGPU/selectcc-opt.ll
@@ -7,7 +7,7 @@
; EG-NOT: CND
; EG: SET{{[NEQGTL]+}}_DX10
-define void @test_a(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @test_a(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 0.000000e+00
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -35,7 +35,7 @@ ENDIF:
; EG: SET{{[GTEQN]+}}_DX10
; EG-NEXT: PRED_
; EG-NEXT: ALU clause starting
-define void @test_b(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @test_b(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 0.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -59,7 +59,7 @@ ENDIF:
; Test a CND*_INT instruction with float true/false values
; EG-LABEL: {{^}}test_c:
; EG: CND{{[GTE]+}}_INT
-define void @test_c(float addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @test_c(float addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
%1 = select i1 %0, float 2.0, float 3.0
@@ -72,7 +72,7 @@ entry:
; SI-NEXT: v_cndmask_b32_e64
; SI-NOT: cmp
; SI-NOT: cndmask
-define void @selectcc_bool(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @selectcc_bool(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = select i1 %icmp0, i32 -1, i32 0
store i32 %ext, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/selectcc.ll b/test/CodeGen/AMDGPU/selectcc.ll
index 446d4ab344b2..7eca22913987 100644
--- a/test/CodeGen/AMDGPU/selectcc.ll
+++ b/test/CodeGen/AMDGPU/selectcc.ll
@@ -11,7 +11,7 @@
; SI: v_cmp_eq_u64
; SI: v_cndmask
; SI: v_cndmask
-define void @selectcc_i64(i64 addrspace(1) * %out, i64 %lhs, i64 %rhs, i64 %true, i64 %false) {
+define amdgpu_kernel void @selectcc_i64(i64 addrspace(1) * %out, i64 %lhs, i64 %rhs, i64 %true, i64 %false) {
entry:
%0 = icmp eq i64 %lhs, %rhs
%1 = select i1 %0, i64 %true, i64 %false
diff --git a/test/CodeGen/AMDGPU/selected-stack-object.ll b/test/CodeGen/AMDGPU/selected-stack-object.ll
index 37f2747d9815..50ca59ace94e 100644
--- a/test/CodeGen/AMDGPU/selected-stack-object.ll
+++ b/test/CodeGen/AMDGPU/selected-stack-object.ll
@@ -1,4 +1,4 @@
-; "Assertion failure" should be caught with both XFAIL:* and +Asserts.
+; "Assertion failure" should be caught with both XFAIL * and +Asserts.
; XFAIL: *
; REQUIRES: asserts
diff --git a/test/CodeGen/AMDGPU/set-dx10.ll b/test/CodeGen/AMDGPU/set-dx10.ll
index 57365a6e1fc3..6867c6394937 100644
--- a/test/CodeGen/AMDGPU/set-dx10.ll
+++ b/test/CodeGen/AMDGPU/set-dx10.ll
@@ -8,7 +8,7 @@
; CHECK: LSHR
; CHECK-NEXT: SETNE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_une_select_fptosi(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_une_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp une float %in, 5.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -22,7 +22,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETNE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_une_select_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_une_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp une float %in, 5.0
%1 = select i1 %0, i32 -1, i32 0
@@ -34,7 +34,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_oeq_select_fptosi(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_oeq_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp oeq float %in, 5.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -48,7 +48,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_oeq_select_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_oeq_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp oeq float %in, 5.0
%1 = select i1 %0, i32 -1, i32 0
@@ -60,7 +60,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_ogt_select_fptosi(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_ogt_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ogt float %in, 5.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -74,7 +74,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_ogt_select_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_ogt_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ogt float %in, 5.0
%1 = select i1 %0, i32 -1, i32 0
@@ -86,7 +86,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_oge_select_fptosi(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_oge_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp oge float %in, 5.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -100,7 +100,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.y,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_oge_select_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_oge_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp oge float %in, 5.0
%1 = select i1 %0, i32 -1, i32 0
@@ -112,7 +112,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.y, KC0[2].Z,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_ole_select_fptosi(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_ole_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ole float %in, 5.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -126,7 +126,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGE_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.y, KC0[2].Z,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_ole_select_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_ole_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ole float %in, 5.0
%1 = select i1 %0, i32 -1, i32 0
@@ -138,7 +138,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.y, KC0[2].Z,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_olt_select_fptosi(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_olt_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 5.0
%1 = select i1 %0, float 1.000000e+00, float 0.000000e+00
@@ -152,7 +152,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_DX10 * {{\** *}}T{{[0-9]+\.[XYZW]}}, literal.y, KC0[2].Z,
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @fcmp_olt_select_i32(i32 addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @fcmp_olt_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 5.0
%1 = select i1 %0, i32 -1, i32 0
diff --git a/test/CodeGen/AMDGPU/setcc-equivalent.ll b/test/CodeGen/AMDGPU/setcc-equivalent.ll
index 11ea793650c4..853afa8772ea 100644
--- a/test/CodeGen/AMDGPU/setcc-equivalent.ll
+++ b/test/CodeGen/AMDGPU/setcc-equivalent.ll
@@ -3,7 +3,7 @@
; EG-LABEL: {{^}}and_setcc_setcc_i32:
; EG: AND_INT
; EG-NEXT: SETE_INT
-define void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%cmp1 = icmp eq i32 %a, -1
%cmp2 = icmp eq i32 %b, -1
%and = and i1 %cmp1, %cmp2
@@ -20,7 +20,7 @@ define void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
; EG: SETE_INT
; EG: AND_INT
; EG: SETE_INT
-define void @and_setcc_setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+define amdgpu_kernel void @and_setcc_setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
%cmp1 = icmp eq <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%cmp2 = icmp eq <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
%and = and <4 x i1> %cmp1, %cmp2
diff --git a/test/CodeGen/AMDGPU/setcc-fneg-constant.ll b/test/CodeGen/AMDGPU/setcc-fneg-constant.ll
new file mode 100644
index 000000000000..8d455d84bf9e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/setcc-fneg-constant.ll
@@ -0,0 +1,258 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+
+; Test fcmp pred (fneg x), c -> fcmp (swapped pred) x, -c combine.
+
+; GCN-LABEL: {{^}}multi_use_fneg_src:
+; GCN: buffer_load_dword [[A:v[0-9]+]]
+; GCN: buffer_load_dword [[B:v[0-9]+]]
+; GCN: buffer_load_dword [[C:v[0-9]+]]
+
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[B]], [[A]]
+; GCN: v_cmp_eq_f32_e32 vcc, -4.0, [[MUL]]
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @multi_use_fneg_src() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %b = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+
+ %mul = fmul float %a, %b
+ %neg.mul = fsub float -0.0, %mul
+ %cmp = fcmp oeq float %neg.mul, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ store volatile float %mul, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}multi_foldable_use_fneg_src:
+; GCN: buffer_load_dword [[A:v[0-9]+]]
+; GCN: buffer_load_dword [[B:v[0-9]+]]
+; GCN: buffer_load_dword [[C:v[0-9]+]]
+
+; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[B]], [[A]]
+; GCN: v_cmp_eq_f32_e32 vcc, -4.0, [[A]]
+; GCN: v_mul_f32_e64 [[USE1:v[0-9]+]], [[MUL]], -[[MUL]]
+define amdgpu_kernel void @multi_foldable_use_fneg_src() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %b = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+
+ %mul = fmul float %a, %b
+ %neg.mul = fsub float -0.0, %mul
+ %use1 = fmul float %mul, %neg.mul
+ %cmp = fcmp oeq float %neg.mul, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+
+ store volatile i32 %select, i32 addrspace(1)* undef
+ store volatile float %use1, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}multi_use_fneg:
+; GCN: buffer_load_dword [[A:v[0-9]+]]
+; GCN: buffer_load_dword [[B:v[0-9]+]]
+; GCN: buffer_load_dword [[C:v[0-9]+]]
+
+; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
+; GCN-NEXT: v_cmp_eq_f32_e32 vcc, 4.0, [[MUL]]
+; GCN-NOT: xor
+; GCN: buffer_store_dword [[MUL]]
+define amdgpu_kernel void @multi_use_fneg() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %b = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+
+ %mul = fmul float %a, %b
+ %neg.mul = fsub float -0.0, %mul
+ %cmp = fcmp oeq float %neg.mul, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ store volatile float %neg.mul, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}multi_foldable_use_fneg:
+; GCN: buffer_load_dword [[A:v[0-9]+]]
+; GCN: buffer_load_dword [[B:v[0-9]+]]
+
+; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[B]], [[A]]
+; GCN: v_cmp_eq_f32_e32 vcc, -4.0, [[MUL0]]
+; GCN: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[MUL0]], [[MUL0]]
+; GCN: buffer_store_dword [[MUL1]]
+define amdgpu_kernel void @multi_foldable_use_fneg() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %b = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %z = load volatile i32, i32 addrspace(1)* undef
+
+ %mul = fmul float %a, %b
+ %neg.mul = fsub float -0.0, %mul
+ %cmp = fcmp oeq float %neg.mul, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ %use1 = fmul float %neg.mul, %mul
+ store volatile i32 %select, i32 addrspace(1)* undef
+ store volatile float %use1, float addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_oeq_posk_f32:
+; GCN: v_cmp_eq_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_oeq_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp oeq float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_ogt_posk_f32:
+; GCN: v_cmp_gt_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_ogt_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp ogt float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_oge_posk_f32:
+; GCN: v_cmp_ge_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_oge_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp oge float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_olt_posk_f32:
+; GCN: v_cmp_lt_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_olt_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp olt float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_ole_posk_f32:
+; GCN: v_cmp_le_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_ole_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp ole float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_one_posk_f32:
+; GCN: v_cmp_lg_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_one_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp one float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_ueq_posk_f32:
+; GCN: v_cmp_nlg_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_ueq_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp ueq float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_ugt_posk_f32:
+; GCN: v_cmp_nle_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_ugt_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp ugt float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_uge_posk_f32:
+; GCN: v_cmp_nlt_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_uge_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp uge float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_ult_posk_f32:
+; GCN: v_cmp_nge_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_ult_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp ult float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_ule_posk_f32:
+; GCN: v_cmp_ngt_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_ule_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp ule float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_setcc_fneg_une_posk_f32:
+; GCN: v_cmp_neq_f32_e32 vcc, -4.0, v{{[0-9]+}}
+define amdgpu_kernel void @test_setcc_fneg_une_posk_f32() #0 {
+ %a = load volatile float, float addrspace(1)* undef
+ %x = load volatile i32, i32 addrspace(1)* undef
+ %y = load volatile i32, i32 addrspace(1)* undef
+ %neg.a = fsub float -0.0, %a
+ %cmp = fcmp une float %neg.a, 4.0
+ %select = select i1 %cmp, i32 %x, i32 %y
+ store volatile i32 %select, i32 addrspace(1)* undef
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/setcc-opt.ll b/test/CodeGen/AMDGPU/setcc-opt.ll
index 4ab6da085634..caddb6f68218 100644
--- a/test/CodeGen/AMDGPU/setcc-opt.ll
+++ b/test/CodeGen/AMDGPU/setcc-opt.ll
@@ -11,7 +11,7 @@
; EG: SETNE_INT * [[CMP:T[0-9]+]].[[CMPCHAN:[XYZW]]], KC0[2].Z, KC0[2].W
; EG: AND_INT T{{[0-9]+.[XYZW]}}, PS, 1
-define void @sext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp eq i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, 0
@@ -28,7 +28,7 @@ define void @sext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; EG: SETNE_INT * [[CMP:T[0-9]+]].[[CMPCHAN:[XYZW]]], KC0[2].Z, KC0[2].W
; EG: AND_INT T{{[0-9]+.[XYZW]}}, PS, 1
-define void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 0
@@ -42,7 +42,7 @@ define void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN-NEXT: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp eq i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, -1
@@ -56,7 +56,7 @@ define void @sext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounw
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN-NEXT: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, -1
@@ -70,7 +70,7 @@ define void @sext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounw
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN-NEXT: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp eq i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, 0
@@ -84,7 +84,7 @@ define void @zext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN-NEXT: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 0
@@ -98,7 +98,7 @@ define void @zext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN-NEXT: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp eq i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, 1
@@ -111,7 +111,7 @@ define void @zext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; GCN: v_cmp_eq_u32_e32 vcc,
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN-NEXT: buffer_store_byte [[RESULT]]
-define void @zext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 1
@@ -124,7 +124,7 @@ define void @zext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; GCN: v_mov_b32_e32 [[TMP:v[0-9]+]], 0{{$}}
; GCN: buffer_store_byte [[TMP]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp eq i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, -1
@@ -137,7 +137,7 @@ define void @zext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounw
; GCN: v_mov_b32_e32 [[TMP:v[0-9]+]], 1{{$}}
; GCN: buffer_store_byte [[TMP]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, -1
@@ -159,7 +159,7 @@ define void @zext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounw
; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN: buffer_store_byte [[RESULT]]
; GCN: s_endpgm
-define void @cmp_zext_k_i8max(i1 addrspace(1)* %out, i8 %b) nounwind {
+define amdgpu_kernel void @cmp_zext_k_i8max(i1 addrspace(1)* %out, i8 %b) nounwind {
%b.ext = zext i8 %b to i32
%icmp0 = icmp ne i32 %b.ext, 255
store i1 %icmp0, i1 addrspace(1)* %out
@@ -172,7 +172,7 @@ define void @cmp_zext_k_i8max(i1 addrspace(1)* %out, i8 %b) nounwind {
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN: buffer_store_byte [[RESULT]]
; GCN: s_endpgm
-define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nounwind {
+define amdgpu_kernel void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nounwind {
%b = load i8, i8 addrspace(1)* %b.ptr
%b.ext = sext i8 %b to i32
%icmp0 = icmp ne i32 %b.ext, -1
@@ -186,7 +186,7 @@ define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nou
; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
; GCN-NEXT: buffer_store_byte [[RESULT]]
; GCN: s_endpgm
-define void @cmp_sext_k_neg1_i8_sext_arg(i1 addrspace(1)* %out, i8 signext %b) nounwind {
+define amdgpu_kernel void @cmp_sext_k_neg1_i8_sext_arg(i1 addrspace(1)* %out, i8 signext %b) nounwind {
%b.ext = sext i8 %b to i32
%icmp0 = icmp ne i32 %b.ext, -1
store i1 %icmp0, i1 addrspace(1)* %out
@@ -207,7 +207,7 @@ define void @cmp_sext_k_neg1_i8_sext_arg(i1 addrspace(1)* %out, i8 signext %b) n
; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; GCN: buffer_store_byte [[RESULT]]
; GCN: s_endpgm
-define void @cmp_sext_k_neg1_i8_arg(i1 addrspace(1)* %out, i8 %b) nounwind {
+define amdgpu_kernel void @cmp_sext_k_neg1_i8_arg(i1 addrspace(1)* %out, i8 %b) nounwind {
%b.ext = sext i8 %b to i32
%icmp0 = icmp ne i32 %b.ext, -1
store i1 %icmp0, i1 addrspace(1)* %out
@@ -218,7 +218,7 @@ define void @cmp_sext_k_neg1_i8_arg(i1 addrspace(1)* %out, i8 %b) nounwind {
; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
; GCN: buffer_store_byte [[RESULT]]
; GCN: s_endpgm
-define void @cmp_zext_k_neg1(i1 addrspace(1)* %out, i8 %b) nounwind {
+define amdgpu_kernel void @cmp_zext_k_neg1(i1 addrspace(1)* %out, i8 %b) nounwind {
%b.ext = zext i8 %b to i32
%icmp0 = icmp ne i32 %b.ext, -1
store i1 %icmp0, i1 addrspace(1)* %out
@@ -229,7 +229,7 @@ define void @cmp_zext_k_neg1(i1 addrspace(1)* %out, i8 %b) nounwind {
; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
; GCN: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 2
@@ -241,7 +241,7 @@ define void @zext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; GCN: buffer_store_byte [[RESULT]]
; GCN-NEXT: s_endpgm
-define void @zext_bool_icmp_eq_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @zext_bool_icmp_eq_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = zext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, 2
@@ -256,7 +256,7 @@ define void @zext_bool_icmp_eq_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}}
; GCN: buffer_store_byte [[K]]
-define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp eq i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp eq i32 %ext, 1
@@ -267,7 +267,7 @@ define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
; GCN: buffer_store_byte [[K]]
-define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 1
@@ -278,7 +278,7 @@ define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
; GCN: buffer_store_byte [[K]]
-define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 2
diff --git a/test/CodeGen/AMDGPU/setcc.ll b/test/CodeGen/AMDGPU/setcc.ll
index 10d04bab9f6b..add90e9c2f3a 100644
--- a/test/CodeGen/AMDGPU/setcc.ll
+++ b/test/CodeGen/AMDGPU/setcc.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; GCN-DAG: v_cmp_eq_u32_e32
; GCN-DAG: v_cmp_eq_u32_e64
-define void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
+define amdgpu_kernel void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%result = icmp eq <2 x i32> %a, %b
%sext = sext <2 x i1> %result to <2 x i32>
store <2 x i32> %sext, <2 x i32> addrspace(1)* %out
@@ -26,7 +26,7 @@ define void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %
; GCN: v_cmp_eq_u32_e64
; GCN: v_cmp_eq_u32_e64
; GCN: v_cmp_eq_u32_e64
-define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@@ -43,7 +43,7 @@ define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
; FUNC-LABEL: {{^}}f32_oeq:
; R600: SETE_DX10
; GCN: v_cmp_eq_f32
-define void @f32_oeq(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_oeq(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp oeq float %a, %b
%1 = sext i1 %0 to i32
@@ -54,7 +54,7 @@ entry:
; FUNC-LABEL: {{^}}f32_ogt:
; R600: SETGT_DX10
; GCN: v_cmp_gt_f32
-define void @f32_ogt(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ogt(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ogt float %a, %b
%1 = sext i1 %0 to i32
@@ -65,7 +65,7 @@ entry:
; FUNC-LABEL: {{^}}f32_oge:
; R600: SETGE_DX10
; GCN: v_cmp_ge_f32
-define void @f32_oge(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_oge(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp oge float %a, %b
%1 = sext i1 %0 to i32
@@ -76,7 +76,7 @@ entry:
; FUNC-LABEL: {{^}}f32_olt:
; R600: SETGT_DX10
; GCN: v_cmp_lt_f32
-define void @f32_olt(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_olt(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp olt float %a, %b
%1 = sext i1 %0 to i32
@@ -87,7 +87,7 @@ entry:
; FUNC-LABEL: {{^}}f32_ole:
; R600: SETGE_DX10
; GCN: v_cmp_le_f32
-define void @f32_ole(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ole(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ole float %a, %b
%1 = sext i1 %0 to i32
@@ -105,7 +105,7 @@ entry:
; GCN: v_cmp_lg_f32_e32 vcc
; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_one(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_one(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp one float %a, %b
%1 = sext i1 %0 to i32
@@ -119,7 +119,7 @@ entry:
; R600-DAG: AND_INT
; R600-DAG: SETNE_INT
; GCN: v_cmp_o_f32
-define void @f32_ord(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ord(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ord float %a, %b
%1 = sext i1 %0 to i32
@@ -137,7 +137,7 @@ entry:
; GCN: v_cmp_nlg_f32_e32 vcc
; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ueq(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ueq(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ueq float %a, %b
%1 = sext i1 %0 to i32
@@ -150,7 +150,7 @@ entry:
; R600: SETE_DX10
; GCN: v_cmp_nle_f32_e32 vcc
; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ugt(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ugt(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ugt float %a, %b
%1 = sext i1 %0 to i32
@@ -164,7 +164,7 @@ entry:
; GCN: v_cmp_nlt_f32_e32 vcc
; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_uge(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_uge(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp uge float %a, %b
%1 = sext i1 %0 to i32
@@ -178,7 +178,7 @@ entry:
; GCN: v_cmp_nge_f32_e32 vcc
; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ult(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ult(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ult float %a, %b
%1 = sext i1 %0 to i32
@@ -192,7 +192,7 @@ entry:
; GCN: v_cmp_ngt_f32_e32 vcc
; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ule(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_ule(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp ule float %a, %b
%1 = sext i1 %0 to i32
@@ -203,7 +203,7 @@ entry:
; FUNC-LABEL: {{^}}f32_une:
; R600: SETNE_DX10
; GCN: v_cmp_neq_f32
-define void @f32_une(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_une(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp une float %a, %b
%1 = sext i1 %0 to i32
@@ -217,7 +217,7 @@ entry:
; R600: OR_INT
; R600: SETNE_INT
; GCN: v_cmp_u_f32
-define void @f32_uno(i32 addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @f32_uno(i32 addrspace(1)* %out, float %a, float %b) #0 {
entry:
%0 = fcmp uno float %a, %b
%1 = sext i1 %0 to i32
@@ -232,7 +232,7 @@ entry:
; FUNC-LABEL: {{^}}i32_eq:
; R600: SETE_INT
; GCN: v_cmp_eq_u32
-define void @i32_eq(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_eq(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp eq i32 %a, %b
%1 = sext i1 %0 to i32
@@ -243,7 +243,7 @@ entry:
; FUNC-LABEL: {{^}}i32_ne:
; R600: SETNE_INT
; GCN: v_cmp_ne_u32
-define void @i32_ne(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_ne(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp ne i32 %a, %b
%1 = sext i1 %0 to i32
@@ -254,7 +254,7 @@ entry:
; FUNC-LABEL: {{^}}i32_ugt:
; R600: SETGT_UINT
; GCN: v_cmp_gt_u32
-define void @i32_ugt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_ugt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp ugt i32 %a, %b
%1 = sext i1 %0 to i32
@@ -265,7 +265,7 @@ entry:
; FUNC-LABEL: {{^}}i32_uge:
; R600: SETGE_UINT
; GCN: v_cmp_ge_u32
-define void @i32_uge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_uge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp uge i32 %a, %b
%1 = sext i1 %0 to i32
@@ -276,7 +276,7 @@ entry:
; FUNC-LABEL: {{^}}i32_ult:
; R600: SETGT_UINT
; GCN: v_cmp_lt_u32
-define void @i32_ult(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_ult(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp ult i32 %a, %b
%1 = sext i1 %0 to i32
@@ -287,7 +287,7 @@ entry:
; FUNC-LABEL: {{^}}i32_ule:
; R600: SETGE_UINT
; GCN: v_cmp_le_u32
-define void @i32_ule(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_ule(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp ule i32 %a, %b
%1 = sext i1 %0 to i32
@@ -298,7 +298,7 @@ entry:
; FUNC-LABEL: {{^}}i32_sgt:
; R600: SETGT_INT
; GCN: v_cmp_gt_i32
-define void @i32_sgt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_sgt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp sgt i32 %a, %b
%1 = sext i1 %0 to i32
@@ -309,7 +309,7 @@ entry:
; FUNC-LABEL: {{^}}i32_sge:
; R600: SETGE_INT
; GCN: v_cmp_ge_i32
-define void @i32_sge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_sge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp sge i32 %a, %b
%1 = sext i1 %0 to i32
@@ -320,7 +320,7 @@ entry:
; FUNC-LABEL: {{^}}i32_slt:
; R600: SETGT_INT
; GCN: v_cmp_lt_i32
-define void @i32_slt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_slt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp slt i32 %a, %b
%1 = sext i1 %0 to i32
@@ -331,7 +331,7 @@ entry:
; FUNC-LABEL: {{^}}i32_sle:
; R600: SETGE_INT
; GCN: v_cmp_le_i32
-define void @i32_sle(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @i32_sle(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%0 = icmp sle i32 %a, %b
%1 = sext i1 %0 to i32
@@ -348,7 +348,7 @@ entry:
; GCN-DAG: v_cmp_eq_u32
; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
; GCN: s_endpgm
-define void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptra, <3 x i32> addrspace(1)* %ptrb) #0 {
+define amdgpu_kernel void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptra, <3 x i32> addrspace(1)* %ptrb) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%gep.a = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptra, i32 %tid
%gep.b = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptrb, i32 %tid
@@ -369,7 +369,7 @@ define void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptr
; GCN-DAG: v_cmp_eq_u32
; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
; GCN: s_endpgm
-define void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra, <3 x i8> addrspace(1)* %ptrb) #0 {
+define amdgpu_kernel void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra, <3 x i8> addrspace(1)* %ptrb) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%gep.a = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptra, i32 %tid
%gep.b = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptrb, i32 %tid
@@ -386,7 +386,7 @@ define void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra,
; FUNC-LABEL: setcc-i1
; GCN: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 1
; GCN: s_cmp_eq_u32 [[AND]], 0
-define void @setcc-i1(i32 %in) #0 {
+define amdgpu_kernel void @setcc-i1(i32 %in) #0 {
%and = and i32 %in, 1
%cmp = icmp eq i32 %and, 0
br i1 %cmp, label %endif, label %if
@@ -400,7 +400,7 @@ endif:
; GCN-DAG: v_cmp_ge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}}
; GCN-DAG: v_cmp_le_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
; GCN: s_and_b64 s[2:3], [[A]], [[B]]
-define void @setcc-i1-and-xor(i32 addrspace(1)* %out, float %cond) #0 {
+define amdgpu_kernel void @setcc-i1-and-xor(i32 addrspace(1)* %out, float %cond) #0 {
bb0:
%tmp5 = fcmp oge float %cond, 0.000000e+00
%tmp7 = fcmp ole float %cond, 1.000000e+00
diff --git a/test/CodeGen/AMDGPU/setcc64.ll b/test/CodeGen/AMDGPU/setcc64.ll
index 1f86277e0bc6..1f1bdb055302 100644
--- a/test/CodeGen/AMDGPU/setcc64.ll
+++ b/test/CodeGen/AMDGPU/setcc64.ll
@@ -9,7 +9,7 @@
; GCN-LABEL: {{^}}f64_oeq:
; GCN: v_cmp_eq_f64
-define void @f64_oeq(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_oeq(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp oeq double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -19,7 +19,7 @@ entry:
; GCN-LABEL: {{^}}f64_ogt:
; GCN: v_cmp_gt_f64
-define void @f64_ogt(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ogt(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ogt double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -29,7 +29,7 @@ entry:
; GCN-LABEL: {{^}}f64_oge:
; GCN: v_cmp_ge_f64
-define void @f64_oge(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_oge(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp oge double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -39,7 +39,7 @@ entry:
; GCN-LABEL: {{^}}f64_olt:
; GCN: v_cmp_lt_f64
-define void @f64_olt(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_olt(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp olt double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -49,7 +49,7 @@ entry:
; GCN-LABEL: {{^}}f64_ole:
; GCN: v_cmp_le_f64
-define void @f64_ole(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ole(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ole double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -60,7 +60,7 @@ entry:
; GCN-LABEL: {{^}}f64_one:
; GCN: v_cmp_lg_f64_e32 vcc
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_one(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_one(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp one double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -70,7 +70,7 @@ entry:
; GCN-LABEL: {{^}}f64_ord:
; GCN: v_cmp_o_f64
-define void @f64_ord(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ord(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ord double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -81,7 +81,7 @@ entry:
; GCN-LABEL: {{^}}f64_ueq:
; GCN: v_cmp_nlg_f64_e32 vcc
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ueq double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -93,7 +93,7 @@ entry:
; GCN: v_cmp_nle_f64_e32 vcc
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ugt double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -104,7 +104,7 @@ entry:
; GCN-LABEL: {{^}}f64_uge:
; GCN: v_cmp_nlt_f64_e32 vcc
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp uge double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -115,7 +115,7 @@ entry:
; GCN-LABEL: {{^}}f64_ult:
; GCN: v_cmp_nge_f64_e32 vcc
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ult double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -126,7 +126,7 @@ entry:
; GCN-LABEL: {{^}}f64_ule:
; GCN: v_cmp_ngt_f64_e32 vcc
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp ule double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -136,7 +136,7 @@ entry:
; GCN-LABEL: {{^}}f64_une:
; GCN: v_cmp_neq_f64
-define void @f64_une(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_une(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp une double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -146,7 +146,7 @@ entry:
; GCN-LABEL: {{^}}f64_uno:
; GCN: v_cmp_u_f64
-define void @f64_uno(i32 addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @f64_uno(i32 addrspace(1)* %out, double %a, double %b) #0 {
entry:
%tmp0 = fcmp uno double %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -160,7 +160,7 @@ entry:
; GCN-LABEL: {{^}}i64_eq:
; GCN: v_cmp_eq_u64
-define void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp eq i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -170,7 +170,7 @@ entry:
; GCN-LABEL: {{^}}i64_ne:
; GCN: v_cmp_ne_u64
-define void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp ne i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -180,7 +180,7 @@ entry:
; GCN-LABEL: {{^}}i64_ugt:
; GCN: v_cmp_gt_u64
-define void @i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp ugt i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -190,7 +190,7 @@ entry:
; GCN-LABEL: {{^}}i64_uge:
; GCN: v_cmp_ge_u64
-define void @i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp uge i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -200,7 +200,7 @@ entry:
; GCN-LABEL: {{^}}i64_ult:
; GCN: v_cmp_lt_u64
-define void @i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp ult i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -210,7 +210,7 @@ entry:
; GCN-LABEL: {{^}}i64_ule:
; GCN: v_cmp_le_u64
-define void @i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp ule i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -220,7 +220,7 @@ entry:
; GCN-LABEL: {{^}}i64_sgt:
; GCN: v_cmp_gt_i64
-define void @i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp sgt i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -230,7 +230,7 @@ entry:
; GCN-LABEL: {{^}}i64_sge:
; GCN: v_cmp_ge_i64
-define void @i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp sge i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -240,7 +240,7 @@ entry:
; GCN-LABEL: {{^}}i64_slt:
; GCN: v_cmp_lt_i64
-define void @i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp slt i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
@@ -250,7 +250,7 @@ entry:
; GCN-LABEL: {{^}}i64_sle:
; GCN: v_cmp_le_i64
-define void @i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
entry:
%tmp0 = icmp sle i64 %a, %b
%tmp1 = sext i1 %tmp0 to i32
diff --git a/test/CodeGen/AMDGPU/seto.ll b/test/CodeGen/AMDGPU/seto.ll
index 01e4a7fda5d2..b4385aa0ccca 100644
--- a/test/CodeGen/AMDGPU/seto.ll
+++ b/test/CodeGen/AMDGPU/seto.ll
@@ -4,12 +4,9 @@
; CHECK-LABEL: {{^}}main:
; CHECK: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
; CHECK-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, 1.0, [[CMP]]
-define void @main(float %p) {
+define amdgpu_ps float @main(float inreg %p) {
main_body:
%c = fcmp oeq float %p, %p
%r = select i1 %c, float 1.000000e+00, float 0.000000e+00
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %r, float %r, float %r, float %r)
- ret void
+ ret float %r
}
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/setuo.ll b/test/CodeGen/AMDGPU/setuo.ll
index 76346c4f624a..f6821b675e22 100644
--- a/test/CodeGen/AMDGPU/setuo.ll
+++ b/test/CodeGen/AMDGPU/setuo.ll
@@ -4,12 +4,9 @@
; CHECK-LABEL: {{^}}main:
; CHECK: v_cmp_u_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
; CHECK-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, 1.0, [[CMP]]
-define void @main(float %p) {
+define amdgpu_ps float @main(float inreg %p) {
main_body:
%c = fcmp une float %p, %p
%r = select i1 %c, float 1.000000e+00, float 0.000000e+00
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %r, float %r, float %r, float %r)
- ret void
+ ret float %r
}
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/sext-eliminate.ll b/test/CodeGen/AMDGPU/sext-eliminate.ll
index 7dc6eb87f6b5..0b780af17bca 100644
--- a/test/CodeGen/AMDGPU/sext-eliminate.ll
+++ b/test/CodeGen/AMDGPU/sext-eliminate.ll
@@ -6,7 +6,7 @@
; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
; EG: SUB_INT {{[* ]*}}[[RES]]
; EG-NOT: BFE
-define void @sext_in_reg_i1_i32_add(i32 addrspace(1)* %out, i1 %a, i32 %b) {
+define amdgpu_kernel void @sext_in_reg_i1_i32_add(i32 addrspace(1)* %out, i1 %a, i32 %b) {
%sext = sext i1 %a to i32
%res = add i32 %b, %sext
store i32 %res, i32 addrspace(1)* %out
@@ -18,7 +18,7 @@ define void @sext_in_reg_i1_i32_add(i32 addrspace(1)* %out, i1 %a, i32 %b) {
; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
; EG: ADD_INT {{[* ]*}}[[RES]]
; EG-NOT: BFE
-define void @sext_in_reg_i1_i32_sub(i32 addrspace(1)* %out, i1 %a, i32 %b) {
+define amdgpu_kernel void @sext_in_reg_i1_i32_sub(i32 addrspace(1)* %out, i1 %a, i32 %b) {
%sext = sext i1 %a to i32
%res = sub i32 %b, %sext
store i32 %res, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/sext-in-reg-failure-r600.ll b/test/CodeGen/AMDGPU/sext-in-reg-failure-r600.ll
index adba6bbb51d4..7ac4e1d9fe4b 100644
--- a/test/CodeGen/AMDGPU/sext-in-reg-failure-r600.ll
+++ b/test/CodeGen/AMDGPU/sext-in-reg-failure-r600.ll
@@ -11,7 +11,7 @@
; EG: LSHR {{\*?}} [[ADDR]]
; Works with the align 2 removed
-define void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+define amdgpu_kernel void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
%c = add <2 x i32> %a, %b
%x = shl <2 x i32> %c, <i32 6, i32 6>
%y = ashr <2 x i32> %x, <i32 7, i32 7>
diff --git a/test/CodeGen/AMDGPU/sext-in-reg.ll b/test/CodeGen/AMDGPU/sext-in-reg.ll
index 4c58261709c4..b702e1c07200 100644
--- a/test/CodeGen/AMDGPU/sext-in-reg.ll
+++ b/test/CodeGen/AMDGPU/sext-in-reg.ll
@@ -1,8 +1,10 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FIXME: i16 promotion pass ruins the scalar cases when legal.
+; FIXME: r600 fails verifier
; FUNC-LABEL: {{^}}sext_in_reg_i1_i32:
; GCN: s_load_dword [[ARG:s[0-9]+]],
@@ -13,7 +15,7 @@
; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
; EG: LSHR * [[ADDR]]
; EG: BFE_INT * [[RES]], {{.*}}, 0.0, 1
-define void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) #0 {
%shl = shl i32 %in, 31
%sext = ashr i32 %shl, 31
store i32 %sext, i32 addrspace(1)* %out
@@ -30,7 +32,7 @@ define void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) #0 {
; EG: ADD_INT
; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
; EG-NEXT: LSHR * [[ADDR]]
-define void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%c = add i32 %a, %b ; add to prevent folding into extload
%shl = shl i32 %c, 24
%ashr = ashr i32 %shl, 24
@@ -48,7 +50,7 @@ define void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
; EG: ADD_INT
; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
; EG-NEXT: LSHR * [[ADDR]]
-define void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%c = add i32 %a, %b ; add to prevent folding into extload
%shl = shl i32 %c, 16
%ashr = ashr i32 %shl, 16
@@ -66,7 +68,7 @@ define void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
; EG: ADD_INT
; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
; EG-NEXT: LSHR * [[ADDR]]
-define void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) #0 {
%c = add <1 x i32> %a, %b ; add to prevent folding into extload
%shl = shl <1 x i32> %c, <i32 24>
%ashr = ashr <1 x i32> %shl, <i32 24>
@@ -80,7 +82,7 @@ define void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a,
; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%c = shl i64 %a, %b
%shl = shl i64 %c, 63
%ashr = ashr i64 %shl, 63
@@ -94,7 +96,7 @@ define void @sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%c = shl i64 %a, %b
%shl = shl i64 %c, 56
%ashr = ashr i64 %shl, 56
@@ -109,7 +111,7 @@ define void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%c = shl i64 %a, %b
%shl = shl i64 %c, 48
%ashr = ashr i64 %shl, 48
@@ -123,7 +125,7 @@ define void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
%c = shl i64 %a, %b
%shl = shl i64 %c, 32
%ashr = ashr i64 %shl, 32
@@ -138,7 +140,7 @@ define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
; XGCN: buffer_store_dword
; XEG: BFE_INT
; XEG: ASHR
-; define void @sext_in_reg_i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a, <1 x i64> %b) #0 {
+; define amdgpu_kernel void @sext_in_reg_i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a, <1 x i64> %b) #0 {
; %c = add <1 x i64> %a, %b
; %shl = shl <1 x i64> %c, <i64 56>
; %ashr = ashr <1 x i64> %shl, <i64 56>
@@ -150,15 +152,15 @@ define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
; SI: buffer_load_dwordx2
; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
-; VI: flat_load_dwordx2
-; VI: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
+; GFX89: flat_load_dwordx2
+; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
; GCN: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 1
; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @v_sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
+; GFX89: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
+define amdgpu_kernel void @v_sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
@@ -177,15 +179,15 @@ define void @v_sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; SI: buffer_load_dwordx2
; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
-; VI: flat_load_dwordx2
-; VI: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
+; GFX89: flat_load_dwordx2
+; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
; GCN: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 8
; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @v_sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
+; GFX89: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
+define amdgpu_kernel void @v_sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
@@ -204,15 +206,15 @@ define void @v_sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; SI: buffer_load_dwordx2
; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
-; VI: flat_load_dwordx2
-; VI: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
+; GFX89: flat_load_dwordx2
+; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
; GCN: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 16
; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @v_sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
+; GFX89: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
+define amdgpu_kernel void @v_sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
@@ -231,12 +233,12 @@ define void @v_sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; SI: buffer_load_dwordx2
; SI: v_lshl_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
-; VI: flat_load_dwordx2
-; VI: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
+; GFX89: flat_load_dwordx2
+; GFX89: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
; GCN: v_ashrrev_i32_e32 v[[SHR:[0-9]+]], 31, v[[LO]]
-; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[SHR]]{{\]}}
-define void @v_sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
+; GFX89: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[SHR]]{{\]}}
+define amdgpu_kernel void @v_sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
@@ -262,7 +264,7 @@ define void @v_sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; EG: LSHL
; EG: ASHR [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%c = add i32 %a, %b
%x = shl i32 %c, 6
%y = ashr i32 %x, 7
@@ -285,7 +287,7 @@ define void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a,
; EG: LSHL
; EG: ASHR [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%c = add <2 x i32> %a, %b
%x = shl <2 x i32> %c, <i32 6, i32 6>
%y = ashr <2 x i32> %x, <i32 7, i32 7>
@@ -303,7 +305,7 @@ define void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out
; EG: BFE_INT [[RES]]
; EG: BFE_INT [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%c = add <2 x i32> %a, %b ; add to prevent folding into extload
%shl = shl <2 x i32> %c, <i32 31, i32 31>
%ashr = ashr <2 x i32> %shl, <i32 31, i32 31>
@@ -324,7 +326,7 @@ define void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
; EG: BFE_INT [[RES]]
; EG: BFE_INT [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
%c = add <4 x i32> %a, %b ; add to prevent folding into extload
%shl = shl <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31>
%ashr = ashr <4 x i32> %shl, <i32 31, i32 31, i32 31, i32 31>
@@ -341,7 +343,7 @@ define void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %
; EG: BFE_INT [[RES]]
; EG: BFE_INT [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%c = add <2 x i32> %a, %b ; add to prevent folding into extload
%shl = shl <2 x i32> %c, <i32 24, i32 24>
%ashr = ashr <2 x i32> %shl, <i32 24, i32 24>
@@ -362,7 +364,7 @@ define void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
; EG: BFE_INT [[RES]]
; EG: BFE_INT [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
%c = add <4 x i32> %a, %b ; add to prevent folding into extload
%shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
%ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
@@ -379,7 +381,7 @@ define void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %
; EG: BFE_INT [[RES]]
; EG: BFE_INT [[RES]]
; EG: LSHR {{\*?}} [[ADDR]]
-define void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
+define amdgpu_kernel void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
%c = add <2 x i32> %a, %b ; add to prevent folding into extload
%shl = shl <2 x i32> %c, <i32 16, i32 16>
%ashr = ashr <2 x i32> %shl, <i32 16, i32 16>
@@ -388,7 +390,7 @@ define void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
}
; FUNC-LABEL: {{^}}testcase:
-define void @testcase(i8 addrspace(1)* %out, i8 %a) #0 {
+define amdgpu_kernel void @testcase(i8 addrspace(1)* %out, i8 %a) #0 {
%and_a_1 = and i8 %a, 1
%cmp_eq = icmp eq i8 %and_a_1, 0
%cmp_slt = icmp slt i8 %a, 0
@@ -400,7 +402,7 @@ define void @testcase(i8 addrspace(1)* %out, i8 %a) #0 {
}
; FUNC-LABEL: {{^}}testcase_3:
-define void @testcase_3(i8 addrspace(1)* %out, i8 %a) #0 {
+define amdgpu_kernel void @testcase_3(i8 addrspace(1)* %out, i8 %a) #0 {
%and_a_1 = and i8 %a, 1
%cmp_eq = icmp eq i8 %and_a_1, 0
%cmp_slt = icmp slt i8 %a, 0
@@ -416,7 +418,7 @@ define void @testcase_3(i8 addrspace(1)* %out, i8 %a) #0 {
; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
-define void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) #0 {
+define amdgpu_kernel void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) #0 {
%loada = load <4 x i32>, <4 x i32> addrspace(1)* %a, align 16
%loadb = load <4 x i32>, <4 x i32> addrspace(1)* %b, align 16
%c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
@@ -429,7 +431,7 @@ define void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}vgpr_sext_in_reg_v4i16_to_v4i32:
; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
-define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) #0 {
+define amdgpu_kernel void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) #0 {
%loada = load <4 x i32>, <4 x i32> addrspace(1)* %a, align 16
%loadb = load <4 x i32>, <4 x i32> addrspace(1)* %b, align 16
%c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
@@ -444,7 +446,7 @@ define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; GCN: v_max_i32
; GCN-NOT: bfe
; GCN: buffer_store_short
-define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) #0 {
+define amdgpu_kernel void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) #0 {
%tmp5 = load i8, i8 addrspace(1)* %src, align 1
%tmp2 = sext i8 %tmp5 to i32
%tmp2.5 = icmp sgt i32 %tmp2, 0
@@ -455,167 +457,22 @@ define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 ad
ret void
}
-declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
-
-; FUNC-LABEL: {{^}}bfe_0_width:
-; GCN-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @bfe_0_width(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
- %load = load i32, i32 addrspace(1)* %ptr, align 4
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 8, i32 0) nounwind readnone
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_8_bfe_8:
-; GCN: v_bfe_i32
-; GCN-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @bfe_8_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
- %load = load i32, i32 addrspace(1)* %ptr, align 4
- %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 8) nounwind readnone
- %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 8) nounwind readnone
- store i32 %bfe1, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}bfe_8_bfe_16:
-; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
-; GCN: s_endpgm
-define void @bfe_8_bfe_16(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
- %load = load i32, i32 addrspace(1)* %ptr, align 4
- %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 8) nounwind readnone
- %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 16) nounwind readnone
- store i32 %bfe1, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; This really should be folded into 1
-; FUNC-LABEL: {{^}}bfe_16_bfe_8:
-; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
-; GCN-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @bfe_16_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
- %load = load i32, i32 addrspace(1)* %ptr, align 4
- %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 16) nounwind readnone
- %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 8) nounwind readnone
- store i32 %bfe1, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; Make sure there isn't a redundant BFE
-; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i32_bfe:
-; GCN: s_sext_i32_i8 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @sext_in_reg_i8_to_i32_bfe(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
- %c = add i32 %a, %b ; add to prevent folding into extload
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %c, i32 0, i32 8) nounwind readnone
- %shl = shl i32 %bfe, 24
- %ashr = ashr i32 %shl, 24
- store i32 %ashr, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i32_bfe_wrong:
-define void @sext_in_reg_i8_to_i32_bfe_wrong(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
- %c = add i32 %a, %b ; add to prevent folding into extload
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %c, i32 8, i32 0) nounwind readnone
- %shl = shl i32 %bfe, 24
- %ashr = ashr i32 %shl, 24
- store i32 %ashr, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_i8_to_i32_bfe:
-; GCN: buffer_load_sbyte
-; GCN-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @sextload_i8_to_i32_bfe(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) #0 {
- %load = load i8, i8 addrspace(1)* %ptr, align 1
- %sext = sext i8 %load to i32
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %sext, i32 0, i32 8) nounwind readnone
- %shl = shl i32 %bfe, 24
- %ashr = ashr i32 %shl, 24
- store i32 %ashr, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; GCN: .text
-; FUNC-LABEL: {{^}}sextload_i8_to_i32_bfe_0:{{.*$}}
-; GCN-NOT: {{[^@]}}bfe
-; GCN: s_endpgm
-define void @sextload_i8_to_i32_bfe_0(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) #0 {
- %load = load i8, i8 addrspace(1)* %ptr, align 1
- %sext = sext i8 %load to i32
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %sext, i32 8, i32 0) nounwind readnone
- %shl = shl i32 %bfe, 24
- %ashr = ashr i32 %shl, 24
- store i32 %ashr, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}sext_in_reg_i1_bfe_offset_0:
-; GCN-NOT: shr
-; GCN-NOT: shl
-; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
-; GCN: s_endpgm
-define void @sext_in_reg_i1_bfe_offset_0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 31
- %shr = ashr i32 %shl, 31
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 0, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}sext_in_reg_i1_bfe_offset_1:
-; GCN: buffer_load_dword
-; GCN-NOT: shl
-; GCN-NOT: shr
-; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 1
-; GCN: s_endpgm
-define void @sext_in_reg_i1_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 30
- %shr = ashr i32 %shl, 30
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 1, i32 1)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
-; FUNC-LABEL: {{^}}sext_in_reg_i2_bfe_offset_1:
-; GCN: buffer_load_dword
-; GCN-NOT: v_lshl
-; GCN-NOT: v_ashr
-; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 2
-; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 2
-; GCN: s_endpgm
-define void @sext_in_reg_i2_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %x = load i32, i32 addrspace(1)* %in, align 4
- %shl = shl i32 %x, 30
- %shr = ashr i32 %shl, 30
- %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 1, i32 2)
- store i32 %bfe, i32 addrspace(1)* %out, align 4
- ret void
-}
-
; Make sure we propagate the VALUness to users of a moved scalar BFE.
; FUNC-LABEL: {{^}}v_sext_in_reg_i1_to_i64_move_use:
; SI: buffer_load_dwordx2
; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
-; VI: flat_load_dwordx2
-; VI: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
+; GFX89: flat_load_dwordx2
+; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
; GCN-DAG: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 1
; GCN-DAG: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; GCN-DAG: v_and_b32_e32 v[[RESULT_LO:[0-9]+]], s{{[0-9]+}}, v[[LO]]
; GCN-DAG: v_and_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}, v[[HI]]
; SI: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
-; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
-define void @v_sext_in_reg_i1_to_i64_move_use(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 %s.val) #0 {
+; GFX89: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
+define amdgpu_kernel void @v_sext_in_reg_i1_to_i64_move_use(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 %s.val) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
@@ -636,16 +493,16 @@ define void @v_sext_in_reg_i1_to_i64_move_use(i64 addrspace(1)* %out, i64 addrsp
; SI: buffer_load_dwordx2
; SI: v_lshl_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
-; VI: flat_load_dwordx2
-; VI: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
+; GFX89: flat_load_dwordx2
+; GFX89: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
; GCN-DAG: v_ashrrev_i32_e32 v[[SHR:[0-9]+]], 31, v[[LO]]
; GCN-DAG: v_and_b32_e32 v[[RESULT_LO:[0-9]+]], s{{[0-9]+}}, v[[LO]]
; GCN-DAG: v_and_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}, v[[SHR]]
; SI: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
-; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
-define void @v_sext_in_reg_i32_to_i64_move_use(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 %s.val) #0 {
+; GFX89: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
+define amdgpu_kernel void @v_sext_in_reg_i32_to_i64_move_use(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 %s.val) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
@@ -668,10 +525,10 @@ define void @v_sext_in_reg_i32_to_i64_move_use(i64 addrspace(1)* %out, i64 addrs
; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
; SI: buffer_store_short [[VBFE]]
-; VI: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 15
-; VI: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
-; VI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 15
-define void @s_sext_in_reg_i1_i16(i16 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
+; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 15
+; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
+; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 15
+define amdgpu_kernel void @s_sext_in_reg_i1_i16(i16 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
%ld = load i32, i32 addrspace(2)* %ptr
%in = trunc i32 %ld to i16
%shl = shl i16 %in, 15
@@ -687,10 +544,10 @@ define void @s_sext_in_reg_i1_i16(i16 addrspace(1)* %out, i32 addrspace(2)* %ptr
; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
; SI: buffer_store_short [[VBFE]]
-; VI: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14
-; VI: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
-; VI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14
-define void @s_sext_in_reg_i2_i16(i16 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
+; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14
+; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
+; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14
+define amdgpu_kernel void @s_sext_in_reg_i2_i16(i16 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
%ld = load i32, i32 addrspace(2)* %ptr
%in = trunc i32 %ld to i16
%shl = shl i16 %in, 14
@@ -704,7 +561,7 @@ define void @s_sext_in_reg_i2_i16(i16 addrspace(1)* %out, i32 addrspace(2)* %ptr
; GCN: v_bfe_i32 [[BFE:v[0-9]+]], [[VAL]], 0, 1{{$}}
; GCN: ds_write_b16 v{{[0-9]+}}, [[BFE]]
-define void @v_sext_in_reg_i1_i16(i16 addrspace(3)* %out, i16 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @v_sext_in_reg_i1_i16(i16 addrspace(3)* %out, i16 addrspace(1)* %ptr) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%gep = getelementptr i16, i16 addrspace(1)* %ptr, i32 %tid
%out.gep = getelementptr i16, i16 addrspace(3)* %out, i32 %tid
@@ -721,11 +578,11 @@ define void @v_sext_in_reg_i1_i16(i16 addrspace(3)* %out, i16 addrspace(1)* %ptr
; GCN: {{buffer|flat}}_load_ushort [[VAL1:v[0-9]+]]
; SI: v_lshlrev_b32_e32 [[REG:v[0-9]+]], [[VAL1]], [[VAL0]]
-; VI: v_lshlrev_b16_e32 [[REG:v[0-9]+]], [[VAL1]], [[VAL0]]
+; GFX89: v_lshlrev_b16_e32 [[REG:v[0-9]+]], [[VAL1]], [[VAL0]]
; GCN: v_bfe_i32 [[BFE:v[0-9]+]], [[REG]], 0, 1{{$}}
; GCN: ds_write_b16 v{{[0-9]+}}, [[BFE]]
-define void @v_sext_in_reg_i1_i16_nonload(i16 addrspace(3)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr, i16 %s.val) nounwind {
+define amdgpu_kernel void @v_sext_in_reg_i1_i16_nonload(i16 addrspace(3)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr, i16 %s.val) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
%a.gep = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%b.gep = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
@@ -748,10 +605,10 @@ define void @v_sext_in_reg_i1_i16_nonload(i16 addrspace(3)* %out, i16 addrspace(
; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
; SI: buffer_store_short [[VBFE]]
-; VI: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14{{$}}
-; VI: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
-; VI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14{{$}}
-define void @s_sext_in_reg_i2_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
+; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14{{$}}
+; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
+; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14{{$}}
+define amdgpu_kernel void @s_sext_in_reg_i2_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
%shl = shl i16 %in, 14
%sext = ashr i16 %shl, 14
store i16 %sext, i16 addrspace(1)* %out
@@ -765,10 +622,10 @@ define void @s_sext_in_reg_i2_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
; SI: v_mov_b32_e32 [[VSEXT:v[0-9]+]], [[SSEXT]]
; SI: buffer_store_short [[VBFE]]
-; VI: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8{{$}}
-; VI: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
-; VI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8{{$}}
-define void @s_sext_in_reg_i8_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
+; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8{{$}}
+; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
+; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8{{$}}
+define amdgpu_kernel void @s_sext_in_reg_i8_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
%shl = shl i16 %in, 8
%sext = ashr i16 %shl, 8
store i16 %sext, i16 addrspace(1)* %out
@@ -782,16 +639,82 @@ define void @s_sext_in_reg_i8_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
; SI: buffer_store_short [[VBFE]]
-; VI: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1{{$}}
-; VI: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
-; VI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1{{$}}
-define void @s_sext_in_reg_i15_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
+; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1{{$}}
+; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
+; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1{{$}}
+define amdgpu_kernel void @s_sext_in_reg_i15_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
%shl = shl i16 %in, 1
%sext = ashr i16 %shl, 1
store i16 %sext, i16 addrspace(1)* %out
ret void
}
+; FUNC-LABEL: {{^}}sext_in_reg_v2i1_to_v2i16:
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[SHL:v[0-9]+]], 15, [[ADD]]
+; GFX9: v_pk_ashrrev_i16 [[SRA:v[0-9]+]], 15, [[SHL]]
+define amdgpu_kernel void @sext_in_reg_v2i1_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
+ %c = add <2 x i16> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i16> %c, <i16 15, i16 15>
+ %ashr = ashr <2 x i16> %shl, <i16 15, i16 15>
+ store <2 x i16> %ashr, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_in_reg_v3i1_to_v3i16:
+; GFX9: v_pk_add_u16
+; GFX9: v_pk_add_u16
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 15, v{{[0-9]+}}
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 15, v{{[0-9]+}}
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 15, v{{[0-9]+}}
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 15, v{{[0-9]+}}
+define amdgpu_kernel void @sext_in_reg_v3i1_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, <3 x i16> %b) #0 {
+ %c = add <3 x i16> %a, %b ; add to prevent folding into extload
+ %shl = shl <3 x i16> %c, <i16 15, i16 15, i16 15>
+ %ashr = ashr <3 x i16> %shl, <i16 15, i16 15, i16 15>
+ store <3 x i16> %ashr, <3 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_in_reg_v2i2_to_v2i16:
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[SHL:v[0-9]+]], 14, [[ADD]]
+; GFX9: v_pk_ashrrev_i16 [[SRA:v[0-9]+]], 14, [[SHL]]
+define amdgpu_kernel void @sext_in_reg_v2i2_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
+ %c = add <2 x i16> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i16> %c, <i16 14, i16 14>
+ %ashr = ashr <2 x i16> %shl, <i16 14, i16 14>
+ store <2 x i16> %ashr, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_in_reg_v2i8_to_v2i16:
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[SHL:v[0-9]+]], 8, [[ADD]]
+; GFX9: v_pk_ashrrev_i16 [[SRA:v[0-9]+]], 8, [[SHL]]
+define amdgpu_kernel void @sext_in_reg_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
+ %c = add <2 x i16> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i16> %c, <i16 8, i16 8>
+ %ashr = ashr <2 x i16> %shl, <i16 8, i16 8>
+ store <2 x i16> %ashr, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_in_reg_v3i8_to_v3i16:
+; GFX9: v_pk_add_u16
+; GFX9: v_pk_add_u16
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+define amdgpu_kernel void @sext_in_reg_v3i8_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, <3 x i16> %b) #0 {
+ %c = add <3 x i16> %a, %b ; add to prevent folding into extload
+ %shl = shl <3 x i16> %c, <i16 8, i16 8, i16 8>
+ %ashr = ashr <3 x i16> %shl, <i16 8, i16 8, i16 8>
+ store <3 x i16> %ashr, <3 x i16> addrspace(1)* %out
+ ret void
+}
+
declare i32 @llvm.r600.read.tidig.x() #1
attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index d5d2f6b717f9..8e18ab5554e4 100644
--- a/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -13,7 +13,7 @@
; SI: s_sub
-define void @sgpr_if_else_salu_br(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+define amdgpu_kernel void @sgpr_if_else_salu_br(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
entry:
%0 = icmp eq i32 %a, 0
br i1 %0, label %if, label %else
@@ -52,7 +52,7 @@ endif:
; SI: s_add_i32 s{{[0-9]+}}, [[LOAD0]], [[LOAD1]]
; SI: buffer_store_dword
; SI-NEXT: s_endpgm
-define void @sgpr_if_else_salu_br_opt(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+define amdgpu_kernel void @sgpr_if_else_salu_br_opt(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
entry:
%0 = icmp eq i32 %a, 0
br i1 %0, label %if, label %else
@@ -79,7 +79,7 @@ endif:
; SI: s_add_i32 [[SGPR:s[0-9]+]]
; SI-NOT: s_add_i32 [[SGPR]]
-define void @sgpr_if_else_valu_br(i32 addrspace(1)* %out, float %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+define amdgpu_kernel void @sgpr_if_else_valu_br(i32 addrspace(1)* %out, float %a, i32 %b, i32 %c, i32 %d, i32 %e) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%tid_f = uitofp i32 %tid to float
@@ -116,7 +116,7 @@ endif:
; SI: v_cmp_ne_u32_e32 [[CMP_CMP:vcc]], 0, [[V_CMP]]
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP_CMP]]
; SI: buffer_store_dword [[RESULT]]
-define void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
+define amdgpu_kernel void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%tmp1 = icmp eq i32 %tid, 0
diff --git a/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll b/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
index f44ae6e09e9f..fb0bbaa9cbf2 100644
--- a/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
+++ b/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
@@ -6,7 +6,7 @@
; SI-LABEL: {{^}}test_dup_operands:
; SI: v_add_i32_e32
-define void @test_dup_operands(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) {
+define amdgpu_kernel void @test_dup_operands(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) {
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%lo = extractelement <2 x i32> %a, i32 0
%hi = extractelement <2 x i32> %a, i32 1
diff --git a/test/CodeGen/AMDGPU/sgpr-copy.ll b/test/CodeGen/AMDGPU/sgpr-copy.ll
index 013f5253b369..5c20e9a8d585 100644
--- a/test/CodeGen/AMDGPU/sgpr-copy.ll
+++ b/test/CodeGen/AMDGPU/sgpr-copy.ll
@@ -1,13 +1,6 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s
-; This test checks that no VGPR to SGPR copies are created by the register
-; allocator.
-
-
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-
-
; CHECK-LABEL: {{^}}phi1:
; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
; CHECK: v_mov_b32_e32 v{{[0-9]}}, [[DST]]
@@ -29,13 +22,13 @@ ELSE: ; preds = %main_body
ENDIF: ; preds = %ELSE, %main_body
%temp.0 = phi float [ %tmp26, %ELSE ], [ %tmp21, %main_body ]
%tmp27 = fadd float %temp.0, %tmp23
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %tmp27, float %tmp27, float 0.000000e+00, float 1.000000e+00)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp27, float %tmp27, float 0.000000e+00, float 1.000000e+00, i1 true, i1 true) #0
ret void
}
; Make sure this program doesn't crash
; CHECK-LABEL: {{^}}phi2:
-define amdgpu_ps void @phi2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @phi2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 {
main_body:
%tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
%tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
@@ -58,28 +51,54 @@ main_body:
%tmp37 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp36, !tbaa !0
%tmp38 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg1, i32 0
%tmp39 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp38, !tbaa !0
- %tmp40 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg3, <2 x i32> %arg5)
- %tmp41 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg3, <2 x i32> %arg5)
- %tmp42 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %arg3, <2 x i32> %arg5)
- %tmp43 = call float @llvm.SI.fs.interp(i32 1, i32 1, i32 %arg3, <2 x i32> %arg5)
- %tmp44 = call float @llvm.SI.fs.interp(i32 2, i32 1, i32 %arg3, <2 x i32> %arg5)
- %tmp45 = bitcast float %tmp40 to i32
- %tmp46 = bitcast float %tmp41 to i32
+ %i.i = extractelement <2 x i32> %arg5, i32 0
+ %j.i = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg3) #1
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg3) #1
+ %i.i19 = extractelement <2 x i32> %arg5, i32 0
+ %j.i20 = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i21 = bitcast i32 %i.i19 to float
+ %j.f.i22 = bitcast i32 %j.i20 to float
+ %p1.i23 = call float @llvm.amdgcn.interp.p1(float %i.f.i21, i32 1, i32 0, i32 %arg3) #1
+ %p2.i24 = call float @llvm.amdgcn.interp.p2(float %p1.i23, float %j.f.i22, i32 1, i32 0, i32 %arg3) #1
+ %i.i13 = extractelement <2 x i32> %arg5, i32 0
+ %j.i14 = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i15 = bitcast i32 %i.i13 to float
+ %j.f.i16 = bitcast i32 %j.i14 to float
+ %p1.i17 = call float @llvm.amdgcn.interp.p1(float %i.f.i15, i32 0, i32 1, i32 %arg3) #1
+ %p2.i18 = call float @llvm.amdgcn.interp.p2(float %p1.i17, float %j.f.i16, i32 0, i32 1, i32 %arg3) #1
+ %i.i7 = extractelement <2 x i32> %arg5, i32 0
+ %j.i8 = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i9 = bitcast i32 %i.i7 to float
+ %j.f.i10 = bitcast i32 %j.i8 to float
+ %p1.i11 = call float @llvm.amdgcn.interp.p1(float %i.f.i9, i32 1, i32 1, i32 %arg3) #1
+ %p2.i12 = call float @llvm.amdgcn.interp.p2(float %p1.i11, float %j.f.i10, i32 1, i32 1, i32 %arg3) #1
+ %i.i1 = extractelement <2 x i32> %arg5, i32 0
+ %j.i2 = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 2, i32 1, i32 %arg3) #1
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 2, i32 1, i32 %arg3) #1
+ %tmp45 = bitcast float %p2.i to i32
+ %tmp46 = bitcast float %p2.i24 to i32
%tmp47 = insertelement <2 x i32> undef, i32 %tmp45, i32 0
%tmp48 = insertelement <2 x i32> %tmp47, i32 %tmp46, i32 1
%tmp39.bc = bitcast <16 x i8> %tmp39 to <4 x i32>
- %tmp49 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp48, <8 x i32> %tmp37, <4 x i32> %tmp39.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp50 = extractelement <4 x float> %tmp49, i32 2
- %tmp51 = call float @fabs(float %tmp50)
- %tmp52 = fmul float %tmp42, %tmp42
- %tmp53 = fmul float %tmp43, %tmp43
+ %a.bc.i = bitcast <2 x i32> %tmp48 to <2 x float>
+ %tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp37, <4 x i32> %tmp39.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp50 = extractelement <4 x float> %tmp1, i32 2
+ %tmp51 = call float @llvm.fabs.f32(float %tmp50)
+ %tmp52 = fmul float %p2.i18, %p2.i18
+ %tmp53 = fmul float %p2.i12, %p2.i12
%tmp54 = fadd float %tmp53, %tmp52
- %tmp55 = fmul float %tmp44, %tmp44
+ %tmp55 = fmul float %p2.i6, %p2.i6
%tmp56 = fadd float %tmp54, %tmp55
%tmp57 = call float @llvm.amdgcn.rsq.f32(float %tmp56)
- %tmp58 = fmul float %tmp42, %tmp57
- %tmp59 = fmul float %tmp43, %tmp57
- %tmp60 = fmul float %tmp44, %tmp57
+ %tmp58 = fmul float %p2.i18, %tmp57
+ %tmp59 = fmul float %p2.i12, %tmp57
+ %tmp60 = fmul float %p2.i6, %tmp57
%tmp61 = fmul float %tmp58, %tmp22
%tmp62 = fmul float %tmp59, %tmp23
%tmp63 = fadd float %tmp62, %tmp61
@@ -90,7 +109,7 @@ main_body:
%tmp68 = fadd float %tmp67, %tmp66
%tmp69 = fmul float %tmp26, %tmp68
%tmp70 = fmul float %tmp27, %tmp68
- %tmp71 = call float @fabs(float %tmp69)
+ %tmp71 = call float @llvm.fabs.f32(float %tmp69)
%tmp72 = fcmp olt float 0x3EE4F8B580000000, %tmp71
%tmp73 = sext i1 %tmp72 to i32
%tmp74 = bitcast i32 %tmp73 to float
@@ -110,7 +129,7 @@ IF: ; preds = %main_body
ENDIF: ; preds = %IF, %main_body
%temp4.0 = phi float [ %tmp83, %IF ], [ %tmp31, %main_body ]
- %tmp84 = call float @fabs(float %tmp70)
+ %tmp84 = call float @llvm.fabs.f32(float %tmp70)
%tmp85 = fcmp olt float 0x3EE4F8B580000000, %tmp84
%tmp86 = sext i1 %tmp85 to i32
%tmp87 = bitcast i32 %tmp86 to float
@@ -146,11 +165,9 @@ ENDIF24: ; preds = %IF25, %ENDIF
%tmp110 = fmul float %tmp109, %tmp106
%tmp111 = fsub float -0.000000e+00, %tmp105
%tmp112 = fmul float %tmp111, %tmp106
- %tmp113 = call i32 @llvm.SI.packf16(float %tmp108, float %tmp110)
- %tmp114 = bitcast i32 %tmp113 to float
- %tmp115 = call i32 @llvm.SI.packf16(float %tmp112, float 1.000000e+00)
- %tmp116 = bitcast i32 %tmp115 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp114, float %tmp116, float %tmp114, float %tmp116)
+ %tmp113 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp108, float %tmp110)
+ %tmp115 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp112, float 1.000000e+00)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp113, <2 x half> %tmp115, i1 true, i1 true) #0
ret void
}
@@ -183,7 +200,7 @@ LOOP: ; preds = %ENDIF, %main_body
br i1 %tmp33, label %IF, label %ENDIF
IF: ; preds = %LOOP
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %temp4.0, float %temp5.0, float %temp6.0, float 1.000000e+00)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %temp4.0, float %temp5.0, float %temp6.0, float 1.000000e+00, i1 true, i1 true) #0
ret void
ENDIF: ; preds = %LOOP
@@ -193,31 +210,6 @@ ENDIF: ; preds = %LOOP
br label %LOOP
}
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-
-; Function Attrs: readonly
-declare float @fabs(float) #2
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
-
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.sample.v2i32(<2 x i32>, <8 x i32>, <16 x i8>, i32) #1
-
-; Function Attrs: readnone
-declare float @llvm.amdgcn.rsq.f32(float) #1
-
-declare float @llvm.exp2.f32(float) #1
-
-; Function Attrs: nounwind readnone
-declare float @llvm.pow.f32(float, float) #1
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.SI.packf16(float, float) #1
-
; This checks for a bug in the FixSGPRCopies pass where VReg96
; registers were being identified as an SGPR regclass which was causing
; an assertion failure.
@@ -248,24 +240,24 @@ entry:
br i1 %tmp27, label %if, label %else
if: ; preds = %entry
- %val.if = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> <i32 11, i32 13>, <8 x i32> %tmp24, <4 x i32> %tmp26.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %val.if.0 = extractelement <4 x float> %val.if, i32 0
- %val.if.1 = extractelement <4 x float> %val.if, i32 1
- %val.if.2 = extractelement <4 x float> %val.if, i32 2
+ %tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> <float 0x36D6000000000000, float 0x36DA000000000000>, <8 x i32> %tmp24, <4 x i32> %tmp26.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %val.if.0 = extractelement <4 x float> %tmp1, i32 0
+ %val.if.1 = extractelement <4 x float> %tmp1, i32 1
+ %val.if.2 = extractelement <4 x float> %tmp1, i32 2
br label %endif
else: ; preds = %entry
- %val.else = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> <i32 5, i32 7>, <8 x i32> %tmp24, <4 x i32> %tmp26.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %val.else.0 = extractelement <4 x float> %val.else, i32 0
- %val.else.1 = extractelement <4 x float> %val.else, i32 1
- %val.else.2 = extractelement <4 x float> %val.else, i32 2
+ %tmp2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> <float 0x36C4000000000000, float 0x36CC000000000000>, <8 x i32> %tmp24, <4 x i32> %tmp26.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %val.else.0 = extractelement <4 x float> %tmp2, i32 0
+ %val.else.1 = extractelement <4 x float> %tmp2, i32 1
+ %val.else.2 = extractelement <4 x float> %tmp2, i32 2
br label %endif
endif: ; preds = %else, %if
%val.0 = phi float [ %val.if.0, %if ], [ %val.else.0, %else ]
%val.1 = phi float [ %val.if.1, %if ], [ %val.else.1, %else ]
%val.2 = phi float [ %val.if.2, %if ], [ %val.else.2, %else ]
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %val.0, float %val.1, float %val.2, float 0.000000e+00)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %val.0, float %val.1, float %val.2, float 0.000000e+00, i1 true, i1 true) #0
ret void
}
@@ -273,7 +265,7 @@ endif: ; preds = %else, %if
; CHECK: buffer_load_dword
; CHECK: v_add
; CHECK: s_endpgm
-define void @copy1(float addrspace(1)* %out, float addrspace(1)* %in0) {
+define amdgpu_kernel void @copy1(float addrspace(1)* %out, float addrspace(1)* %in0) {
entry:
%tmp = load float, float addrspace(1)* %in0
%tmp1 = fcmp oeq float %tmp, 0.000000e+00
@@ -312,7 +304,7 @@ LOOP68: ; preds = %ENDIF69, %entry
IF70: ; preds = %LOOP68
%q = icmp ne i32 %l, 13
%temp.8 = select i1 %q, float 1.000000e+00, float 0.000000e+00
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %temp.8, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %temp.8, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, i1 true, i1 true) #0
ret void
ENDIF69: ; preds = %LOOP68
@@ -337,41 +329,53 @@ ENDIF69: ; preds = %LOOP68
define amdgpu_ps void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
bb:
%tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0
- %tmp22 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !2
+ %tmp22 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !3
%tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp22, i32 16)
%tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(2)* %arg3, i32 0, i32 0
- %tmp26 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp25, !tbaa !2
+ %tmp26 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp25, !tbaa !3
%tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg2, i32 0, i32 0
- %tmp28 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp27, !tbaa !2
- %tmp29 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg5, <2 x i32> %arg7)
- %tmp30 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg5, <2 x i32> %arg7)
+ %tmp28 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp27, !tbaa !3
+ %i.i = extractelement <2 x i32> %arg7, i32 0
+ %j.i = extractelement <2 x i32> %arg7, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg5) #0
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg5) #0
+ %i.i1 = extractelement <2 x i32> %arg7, i32 0
+ %j.i2 = extractelement <2 x i32> %arg7, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 1, i32 0, i32 %arg5) #0
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 1, i32 0, i32 %arg5) #0
%tmp31 = bitcast float %tmp23 to i32
%tmp36 = icmp ne i32 %tmp31, 0
br i1 %tmp36, label %bb38, label %bb80
bb38: ; preds = %bb
- %tmp52 = bitcast float %tmp29 to i32
- %tmp53 = bitcast float %tmp30 to i32
+ %tmp52 = bitcast float %p2.i to i32
+ %tmp53 = bitcast float %p2.i6 to i32
%tmp54 = insertelement <2 x i32> undef, i32 %tmp52, i32 0
%tmp55 = insertelement <2 x i32> %tmp54, i32 %tmp53, i32 1
%tmp56 = bitcast <8 x i32> %tmp26 to <8 x i32>
- %tmp58 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp55, <8 x i32> %tmp56, <4 x i32> %tmp28, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %a.bc.i = bitcast <2 x i32> %tmp55 to <2 x float>
+ %tmp2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp56, <4 x i32> %tmp28, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
br label %bb71
bb80: ; preds = %bb
- %tmp81 = bitcast float %tmp29 to i32
- %tmp82 = bitcast float %tmp30 to i32
+ %tmp81 = bitcast float %p2.i to i32
+ %tmp82 = bitcast float %p2.i6 to i32
%tmp82.2 = add i32 %tmp82, 1
%tmp83 = insertelement <2 x i32> undef, i32 %tmp81, i32 0
%tmp84 = insertelement <2 x i32> %tmp83, i32 %tmp82.2, i32 1
%tmp85 = bitcast <8 x i32> %tmp26 to <8 x i32>
- %tmp87 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp84, <8 x i32> %tmp85, <4 x i32> %tmp28, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %a.bc.i1 = bitcast <2 x i32> %tmp84 to <2 x float>
+ %tmp3 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i1, <8 x i32> %tmp85, <4 x i32> %tmp28, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
br label %bb71
bb71: ; preds = %bb80, %bb38
- %tmp72 = phi <4 x float> [ %tmp58, %bb38 ], [ %tmp87, %bb80 ]
+ %tmp72 = phi <4 x float> [ %tmp2, %bb38 ], [ %tmp3, %bb80 ]
%tmp88 = extractelement <4 x float> %tmp72, i32 0
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp88, float %tmp88, float %tmp88, float %tmp88)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp88, float %tmp88, float %tmp88, float %tmp88, i1 true, i1 true) #0
ret void
}
@@ -379,14 +383,14 @@ bb71: ; preds = %bb80, %bb38
; CHECK-LABEL: {{^}}mimg_srsrc_sgpr:
; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
define amdgpu_ps void @mimg_srsrc_sgpr([34 x <8 x i32>] addrspace(2)* byval %arg) #0 {
+bb:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%tmp7 = getelementptr [34 x <8 x i32>], [34 x <8 x i32>] addrspace(2)* %arg, i32 0, i32 %tid
%tmp8 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp7, align 32, !tbaa !0
- %tmp9 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> <i32 1061158912, i32 1048576000>, <8 x i32> %tmp8, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp10 = extractelement <4 x float> %tmp9, i32 0
- %tmp12 = call i32 @llvm.SI.packf16(float undef, float %tmp10)
- %tmp13 = bitcast i32 %tmp12 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp13, float undef, float undef, float undef)
+ %tmp = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> <float 7.500000e-01, float 2.500000e-01>, <8 x i32> %tmp8, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp10 = extractelement <4 x float> %tmp, i32 0
+ %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %tmp10)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0
ret void
}
@@ -394,24 +398,35 @@ define amdgpu_ps void @mimg_srsrc_sgpr([34 x <8 x i32>] addrspace(2)* byval %arg
; CHECK-LABEL: {{^}}mimg_ssamp_sgpr:
; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
define amdgpu_ps void @mimg_ssamp_sgpr([17 x <4 x i32>] addrspace(2)* byval %arg) #0 {
+bb:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%tmp7 = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i32 0, i32 %tid
%tmp8 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp7, align 16, !tbaa !0
- %tmp9 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> <i32 1061158912, i32 1048576000>, <8 x i32> undef, <4 x i32> %tmp8, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp10 = extractelement <4 x float> %tmp9, i32 0
- %tmp12 = call i32 @llvm.SI.packf16(float %tmp10, float undef)
- %tmp13 = bitcast i32 %tmp12 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp13, float undef, float undef, float undef)
+ %tmp = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> <float 7.500000e-01, float 2.500000e-01>, <8 x i32> undef, <4 x i32> %tmp8, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp10 = extractelement <4 x float> %tmp, i32 0
+ %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp10, float undef)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0
ret void
}
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.amdgcn.rsq.f32(float) #1
+declare float @llvm.exp2.f32(float) #1
+declare float @llvm.pow.f32(float, float) #1
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
-!1 = !{!"const", !3}
-!2 = !{!1, !1, i64 0}
-!3 = !{!"tbaa root"}
+!1 = !{!"const", !2}
+!2 = !{!"tbaa root"}
+!3 = !{!1, !1, i64 0}
diff --git a/test/CodeGen/AMDGPU/sgprcopies.ll b/test/CodeGen/AMDGPU/sgprcopies.ll
new file mode 100644
index 000000000000..68cd83bb6cf0
--- /dev/null
+++ b/test/CodeGen/AMDGPU/sgprcopies.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}checkTwoBlocksWithUniformBranch
+; GCN: BB0_2
+; GCN: v_add
+define amdgpu_kernel void @checkTwoBlocksWithUniformBranch(i32 addrspace(1)* nocapture %out, i32 %width, float %xPos, float %yPos, float %xStep, float %yStep, i32 %maxIter) {
+entry:
+ %conv = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %rem = urem i32 %conv, %width
+ %div = udiv i32 %conv, %width
+ %conv1 = sitofp i32 %rem to float
+ %x = tail call float @llvm.fmuladd.f32(float %xStep, float %conv1, float %xPos)
+ %conv2 = sitofp i32 %div to float
+ %y = tail call float @llvm.fmuladd.f32(float %yStep, float %conv2, float %yPos)
+ %yy = fmul float %y, %y
+ %xy = tail call float @llvm.fmuladd.f32(float %x, float %x, float %yy)
+ %cmp01 = fcmp ole float %xy, 4.000000e+00
+ %cmp02 = icmp ne i32 %maxIter, 0
+ %cond01 = and i1 %cmp02, %cmp01
+ br i1 %cond01, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %x_val = phi float [ %call8, %for.body ], [ %x, %for.body.preheader ]
+ %iter_val = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+ %y_val = phi float [ %call9, %for.body ], [ %y, %for.body.preheader ]
+ %sub = fsub float -0.000000e+00, %y_val
+ %call7 = tail call float @llvm.fmuladd.f32(float %x_val, float %x_val, float %x) #1
+ %call8 = tail call float @llvm.fmuladd.f32(float %sub, float %y_val, float %call7) #1
+ %mul = fmul float %x_val, 2.000000e+00
+ %call9 = tail call float @llvm.fmuladd.f32(float %mul, float %y_val, float %y) #1
+ %inc = add nuw i32 %iter_val, 1
+ %mul3 = fmul float %call9, %call9
+ %0 = tail call float @llvm.fmuladd.f32(float %call8, float %call8, float %mul3)
+ %cmp = fcmp ole float %0, 4.000000e+00
+ %cmp5 = icmp ult i32 %inc, %maxIter
+ %or.cond = and i1 %cmp5, %cmp
+ br i1 %or.cond, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ %iter.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %for.end.loopexit ]
+ %idxprom = ashr exact i32 %conv, 32
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %idxprom
+ store i32 %iter.0.lcssa, i32 addrspace(1)* %arrayidx, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare float @llvm.fmuladd.f32(float, float, float) #1
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { readnone }
diff --git a/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll
index 48bbc32abcbb..0a29db4a0580 100644
--- a/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll
+++ b/test/CodeGen/AMDGPU/shift-and-i128-ubfe.ll
@@ -11,8 +11,8 @@
; GCN: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO2]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN: s_endpgm
-define void @v_uextract_bit_31_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_31_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x
%ld.64 = load i128, i128 addrspace(1)* %in.gep
@@ -33,7 +33,7 @@ define void @v_uextract_bit_31_i128(i128 addrspace(1)* %out, i128 addrspace(1)*
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO2]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN: s_endpgm
-define void @v_uextract_bit_63_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_63_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x
@@ -55,8 +55,8 @@ define void @v_uextract_bit_63_i128(i128 addrspace(1)* %out, i128 addrspace(1)*
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO2]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN: s_endpgm
-define void @v_uextract_bit_95_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_95_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x
%ld.64 = load i128, i128 addrspace(1)* %in.gep
@@ -77,7 +77,7 @@ define void @v_uextract_bit_95_i128(i128 addrspace(1)* %out, i128 addrspace(1)*
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO2]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN: s_endpgm
-define void @v_uextract_bit_127_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_127_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x
@@ -100,7 +100,7 @@ define void @v_uextract_bit_127_i128(i128 addrspace(1)* %out, i128 addrspace(1)*
; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[OR0]]:[[ZERO]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN: s_endpgm
-define void @v_uextract_bit_34_100_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_34_100_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x
@@ -113,5 +113,7 @@ define void @v_uextract_bit_34_100_i128(i128 addrspace(1)* %out, i128 addrspace(
declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i32 @llvm.amdgcn.workgroup.id.x() #0
+
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
index b85714ea54c1..6f5fc6d0f38c 100644
--- a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
+++ b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
@@ -8,8 +8,8 @@
; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHIFT]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_31_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_31_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -25,7 +25,7 @@ define void @v_uextract_bit_31_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in
; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHIFT]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -41,8 +41,8 @@ define void @v_uextract_bit_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 1, 1
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -57,8 +57,8 @@ define void @v_uextract_bit_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 20, 1
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_20_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_20_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -73,7 +73,7 @@ define void @v_uextract_bit_20_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 1, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -89,7 +89,7 @@ define void @v_uextract_bit_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 1, 1{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHIFT]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_33_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_33_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -105,8 +105,8 @@ define void @v_uextract_bit_33_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 20, 2
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_20_21_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_20_21_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -121,8 +121,8 @@ define void @v_uextract_bit_20_21_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 1, 30
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_1_30_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_1_30_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -137,8 +137,8 @@ define void @v_uextract_bit_1_30_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 1, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHIFT]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_1_31_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_1_31_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -155,8 +155,8 @@ define void @v_uextract_bit_1_31_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 3, v[[SHRLO]]{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_31_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
- %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @v_uextract_bit_31_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+ %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
%ld.64 = load i64, i64 addrspace(1)* %in.gep
@@ -171,7 +171,7 @@ define void @v_uextract_bit_31_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 1, 2
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_32_33_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_32_33_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -188,7 +188,7 @@ define void @v_uextract_bit_32_33_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 0x3fffffff, v[[SHRLO]]{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_30_60_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_30_60_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -204,7 +204,7 @@ define void @v_uextract_bit_30_60_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 1, 30
; GCN-DAG: v_mov_b32_e32 v[[BFE:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHIFT]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_33_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_33_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -220,7 +220,7 @@ define void @v_uextract_bit_33_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
; GCN-NEXT: v_mov_b32_e32 v[[SHRHI]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[SHRHI]]{{\]}}
-define void @v_uextract_bit_31_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_31_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -236,7 +236,7 @@ define void @v_uextract_bit_31_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; GCN: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]]
; GCN: buffer_store_dword v[[SHIFT]]
-define void @v_uextract_bit_31_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_31_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
@@ -252,7 +252,7 @@ define void @v_uextract_bit_31_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspa
; GCN: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GCN: v_bfe_u32 [[BFE:v[0-9]+]], [[VAL]], 3, 1{{$}}
; GCN: buffer_store_dword [[BFE]]
-define void @v_uextract_bit_3_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_3_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
@@ -268,7 +268,7 @@ define void @v_uextract_bit_3_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspac
; GCN: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
; GCN: v_bfe_u32 [[BFE:v[0-9]+]], [[VAL]], 1, 1{{$}}
; GCN: buffer_store_dword [[BFE]]
-define void @v_uextract_bit_33_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_33_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
@@ -286,7 +286,7 @@ define void @v_uextract_bit_33_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspa
; GCN-NEXT: v_and_b32_e32 v[[SHRLO]], 3, v[[SHRLO]]
; GCN-NOT: v[[SHRLO]]
; GCN: buffer_store_dword v[[SHRLO]]
-define void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
@@ -306,7 +306,7 @@ define void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* %out, i64 addr
; GCN-NOT: v[[SHRLO]]
; GCN-NOT: v[[SHRHI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[SHRHI]]{{\]}}
-define void @and_not_mask_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @and_not_mask_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -327,7 +327,7 @@ define void @and_not_mask_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[SHRHI]]{{\]}}
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO]]{{\]}}
-define void @v_uextract_bit_27_29_multi_use_shift_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_27_29_multi_use_shift_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -347,7 +347,7 @@ define void @v_uextract_bit_27_29_multi_use_shift_i64(i64 addrspace(1)* %out, i6
; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 2, 3
; GCN-DAG: buffer_store_dwordx2 v{{\[}}[[SHR]]:[[ZERO_SHR]]{{\]}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO_BFE]]{{\]}}
-define void @v_uextract_bit_34_37_multi_use_shift_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_34_37_multi_use_shift_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x
@@ -365,7 +365,7 @@ define void @v_uextract_bit_34_37_multi_use_shift_i64(i64 addrspace(1)* %out, i6
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
; GCN: buffer_store_dword v[[ZERO]]
-define void @v_uextract_bit_33_36_use_upper_half_shift_i64(i64 addrspace(1)* %out0, i32 addrspace(1)* %out1, i64 addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_uextract_bit_33_36_use_upper_half_shift_i64(i64 addrspace(1)* %out0, i32 addrspace(1)* %out1, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
%out0.gep = getelementptr i64, i64 addrspace(1)* %out0, i32 %id.x
@@ -383,5 +383,7 @@ define void @v_uextract_bit_33_36_use_upper_half_shift_i64(i64 addrspace(1)* %ou
declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i32 @llvm.amdgcn.workgroup.id.x() #0
+
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/shift-i64-opts.ll b/test/CodeGen/AMDGPU/shift-i64-opts.ll
index 28a7b924904d..a803849be02c 100644
--- a/test/CodeGen/AMDGPU/shift-i64-opts.ll
+++ b/test/CodeGen/AMDGPU/shift-i64-opts.ll
@@ -8,7 +8,7 @@
; GCN-DAG: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 3, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @lshr_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = lshr i64 %val, 35
store i64 %shl, i64 addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @lshr_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN-DAG: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 31, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @lshr_i64_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_i64_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = lshr i64 %val, 63
store i64 %shl, i64 addrspace(1)* %out
@@ -32,7 +32,7 @@ define void @lshr_i64_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN-DAG: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 1, [[VAL]]
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @lshr_i64_33(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_i64_33(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = lshr i64 %val, 33
store i64 %shl, i64 addrspace(1)* %out
@@ -43,7 +43,7 @@ define void @lshr_i64_33(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN-DAG: buffer_load_dword v[[LO:[0-9]+]]
; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @lshr_i64_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_i64_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = lshr i64 %val, 32
store i64 %shl, i64 addrspace(1)* %out
@@ -58,7 +58,7 @@ define void @lshr_i64_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: v_bfe_u32 v[[BFE:[0-9]+]], v[[HI]], 8, 23
; GCN: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
-define void @lshr_and_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_and_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%and = and i64 %val, 9223372036854775807 ; 0x7fffffffffffffff
%shl = lshr i64 %and, 40
@@ -73,7 +73,7 @@ define void @lshr_and_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: v_lshlrev_b32_e32 v[[HI:[0-9]+]], 3, [[VAL]]
; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @shl_i64_const_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @shl_i64_const_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 35
store i64 %shl, i64 addrspace(1)* %out
@@ -84,7 +84,7 @@ define void @shl_i64_const_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN-DAG: buffer_load_dword v[[HI:[0-9]+]]
; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @shl_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @shl_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 32
store i64 %shl, i64 addrspace(1)* %out
@@ -96,7 +96,7 @@ define void @shl_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: v_lshlrev_b32_e32 v[[HI:[0-9]+]], 31, [[VAL]]
; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @shl_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @shl_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 63
store i64 %shl, i64 addrspace(1)* %out
@@ -106,7 +106,7 @@ define void @shl_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; ashr (i64 x), 63 => (ashr lo(x), 31), lo(x)
; GCN-LABEL: {{^}}ashr_i64_const_32:
-define void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = ashr i64 %val, 32
store i64 %shl, i64 addrspace(1)* %out
@@ -114,7 +114,7 @@ define void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
}
; GCN-LABEL: {{^}}ashr_i64_const_63:
-define void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = ashr i64 %val, 63
store i64 %shl, i64 addrspace(1)* %out
@@ -125,7 +125,7 @@ define void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 31, [[VAL]]
; GCN: buffer_store_dword [[SHL]]
-define void @trunc_shl_31_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_31_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 31
%trunc = trunc i64 %shl to i32
@@ -137,7 +137,7 @@ define void @trunc_shl_31_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 15, [[VAL]]
; GCN: buffer_store_short [[SHL]]
-define void @trunc_shl_15_i16_i64(i16 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_15_i16_i64(i16 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 15
%trunc = trunc i64 %shl to i16
@@ -149,7 +149,7 @@ define void @trunc_shl_15_i16_i64(i16 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 15, [[VAL]]
; GCN: buffer_store_short [[SHL]]
-define void @trunc_shl_15_i16_i32(i16 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_15_i16_i32(i16 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in
%shl = shl i32 %val, 15
%trunc = trunc i32 %shl to i16
@@ -161,7 +161,7 @@ define void @trunc_shl_15_i16_i32(i16 addrspace(1)* %out, i32 addrspace(1)* %in)
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 7, [[VAL]]
; GCN: buffer_store_byte [[SHL]]
-define void @trunc_shl_7_i8_i64(i8 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_7_i8_i64(i8 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 7
%trunc = trunc i64 %shl to i8
@@ -174,7 +174,7 @@ define void @trunc_shl_7_i8_i64(i8 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 1, [[VAL]]
; GCN: v_and_b32_e32 [[AND:v[0-9]+]], 2, [[SHL]]
; GCN: buffer_store_byte [[AND]]
-define void @trunc_shl_1_i2_i64(i2 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_1_i2_i64(i2 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 1
%trunc = trunc i64 %shl to i2
@@ -186,7 +186,7 @@ define void @trunc_shl_1_i2_i64(i2 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 1, [[VAL]]
; GCN: buffer_store_dword [[SHL]]
-define void @trunc_shl_1_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_1_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 1
%trunc = trunc i64 %shl to i32
@@ -198,7 +198,7 @@ define void @trunc_shl_1_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN: buffer_load_dword [[VAL:v[0-9]+]]
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[VAL]]
; GCN: buffer_store_dword [[SHL]]
-define void @trunc_shl_16_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_16_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 16
%trunc = trunc i64 %shl to i32
@@ -209,7 +209,7 @@ define void @trunc_shl_16_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN-LABEL: {{^}}trunc_shl_33_i32_i64:
; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[ZERO]]
-define void @trunc_shl_33_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_33_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 33
%trunc = trunc i64 %shl to i32
@@ -222,7 +222,7 @@ define void @trunc_shl_33_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in)
; GCN-DAG: v_lshlrev_b32_e32 v[[RESHI:[0-9]+]], 16, v{{[0-9]+}}
; GCN-DAG: v_lshlrev_b32_e32 v[[RESLO:[0-9]+]], 16, v[[LO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
-define void @trunc_shl_16_v2i32_v2i64(<2 x i32> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_16_v2i32_v2i64(<2 x i32> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%val = load <2 x i64>, <2 x i64> addrspace(1)* %in
%shl = shl <2 x i64> %val, <i64 16, i64 16>
%trunc = trunc <2 x i64> %shl to <2 x i32>
@@ -235,7 +235,7 @@ define void @trunc_shl_16_v2i32_v2i64(<2 x i32> addrspace(1)* %out, <2 x i64> ad
; GCN: v_lshl_b64 v{{\[}}[[RESLO:[0-9]+]]:[[RESHI:[0-9]+]]{{\]}}, [[VAL]], 31
; GCN: buffer_store_dword v[[RESLO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
-define void @trunc_shl_31_i32_i64_multi_use(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_shl_31_i32_i64_multi_use(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
%shl = shl i64 %val, 31
%trunc = trunc i64 %shl to i32
diff --git a/test/CodeGen/AMDGPU/shl.ll b/test/CodeGen/AMDGPU/shl.ll
index 972349c24453..ff666cc3653b 100644
--- a/test/CodeGen/AMDGPU/shl.ll
+++ b/test/CodeGen/AMDGPU/shl.ll
@@ -1,9 +1,11 @@
-; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; XUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; XUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() #0
+declare i32 @llvm.r600.read.tgid.x() #0
+
;EG: {{^}}shl_v2i32:
;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -17,7 +19,7 @@ declare i32 @llvm.r600.read.tidig.x() #0
;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
@@ -44,7 +46,7 @@ define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in
;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@@ -57,7 +59,7 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%a = load i16, i16 addrspace(1)* %in
%b = load i16, i16 addrspace(1)* %b_ptr
@@ -70,7 +72,7 @@ define void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-define void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
+define amdgpu_kernel void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
%a = load i16, i16 addrspace(1)* %in
%result = shl i16 %a, %b
store i16 %result, i16 addrspace(1)* %out
@@ -81,7 +83,7 @@ define void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b)
; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-define void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
+define amdgpu_kernel void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
%a = load i16, i16 addrspace(1)* %in
%b.add = add i16 %b, 3
%result = shl i16 %a, %b.add
@@ -92,7 +94,7 @@ define void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in,
; GCN-LABEL: {{^}}shl_i16_computed_amount:
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 3, v{{[0-9]+}}
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, [[ADD]], v{{[0-9]+}}
-define void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
@@ -107,7 +109,7 @@ define void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %
; GCN-LABEL: {{^}}shl_i16_i_s:
; GCN: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 12
-define void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) {
+define amdgpu_kernel void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) {
%result = shl i16 %a, 12
store i16 %result, i16 addrspace(1)* %out
ret void
@@ -116,7 +118,7 @@ define void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) {
; GCN-LABEL: {{^}}shl_v2i16:
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
%gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
@@ -133,7 +135,7 @@ define void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
%gep.out = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i32 %tid
@@ -160,7 +162,7 @@ define void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in
; GCN-LABEL: {{^}}shl_i64:
; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
; VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
-define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64, i64 addrspace(1)* %in
%b = load i64, i64 addrspace(1)* %b_ptr
@@ -199,7 +201,7 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
-define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64>, <2 x i64> addrspace(1)* %in
%b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
@@ -262,7 +264,7 @@ define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
-define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64>, <4 x i64> addrspace(1)* %in
%b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
@@ -277,7 +279,7 @@ define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in
; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[LO_A]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) {
%result = shl i64 %a, 32
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -287,8 +289,8 @@ define void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) {
; GCN-DAG: buffer_load_dword v[[LO_A:[0-9]+]],
; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[LO_A]]{{\]}}
-define void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x() #0
+define amdgpu_kernel void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tgid.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64, i64 addrspace(1)* %gep.in
@@ -299,7 +301,7 @@ define void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}s_shl_constant_i64
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
-define void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) {
%shl = shl i64 281474976710655, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -311,7 +313,7 @@ define void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) {
; SI-DAG: s_movk_i32 s[[KHI:[0-9]+]], 0x11e{{$}}
; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]]
; SI: buffer_store_dwordx2
-define void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%shl = shl i64 1231231234567, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
@@ -323,7 +325,7 @@ define void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr)
; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x12d687{{$}}
; SI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0{{$}}
; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]]
-define void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%shl = shl i64 1234567, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
@@ -332,7 +334,7 @@ define void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}v_shl_inline_imm_64_i64:
; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, 64, {{v[0-9]+}}
-define void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+define amdgpu_kernel void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
%a = load i64, i64 addrspace(1)* %aptr, align 8
%shl = shl i64 64, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
@@ -341,7 +343,7 @@ define void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; FUNC-LABEL: {{^}}s_shl_inline_imm_64_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 64, s{{[0-9]+}}
-define void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 64, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -349,7 +351,7 @@ define void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
; FUNC-LABEL: {{^}}s_shl_inline_imm_1_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1, s{{[0-9]+}}
-define void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 1, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -357,7 +359,7 @@ define void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a
; FUNC-LABEL: {{^}}s_shl_inline_imm_1.0_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{[0-9]+}}
-define void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 4607182418800017408, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -365,7 +367,7 @@ define void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_1.0_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{[0-9]+}}
-define void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 13830554455654793216, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -373,7 +375,7 @@ define void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; FUNC-LABEL: {{^}}s_shl_inline_imm_0.5_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 0.5, s{{[0-9]+}}
-define void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 4602678819172646912, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -381,7 +383,7 @@ define void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_0.5_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -0.5, s{{[0-9]+}}
-define void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 13826050856027422720, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -389,7 +391,7 @@ define void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(
; FUNC-LABEL: {{^}}s_shl_inline_imm_2.0_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 2.0, s{{[0-9]+}}
-define void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 4611686018427387904, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -397,7 +399,7 @@ define void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_2.0_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -2.0, s{{[0-9]+}}
-define void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 13835058055282163712, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -405,7 +407,7 @@ define void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; FUNC-LABEL: {{^}}s_shl_inline_imm_4.0_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 4.0, s{{[0-9]+}}
-define void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 4616189618054758400, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -413,7 +415,7 @@ define void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_4.0_i64:
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -4.0, s{{[0-9]+}}
-define void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 13839561654909534208, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -427,7 +429,7 @@ define void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0
; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}}
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
-define void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 1082130432, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -439,7 +441,7 @@ define void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(
; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}}
; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]]
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}, s{{[0-9]+}}
-define void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 -1065353216, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -450,7 +452,7 @@ define void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrsp
; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0
; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
-define void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 4647714815446351872, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
@@ -460,10 +462,18 @@ define void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrs
; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0
; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
-define void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define amdgpu_kernel void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%shl = shl i64 13871086852301127680, %a
store i64 %shl, i64 addrspace(1)* %out, align 8
ret void
}
+; FUNC-LABEL: {{^}}test_mul2:
+; GCN: s_lshl_b32 s{{[0-9]}}, s{{[0-9]}}, 1
+define amdgpu_kernel void @test_mul2(i32 %p) {
+ %i = mul i32 %p, 2
+ store volatile i32 %i, i32 addrspace(1)* undef
+ ret void
+}
+
attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/shl.v2i16.ll b/test/CodeGen/AMDGPU/shl.v2i16.ll
new file mode 100644
index 000000000000..eac29bad7cf2
--- /dev/null
+++ b/test/CodeGen/AMDGPU/shl.v2i16.ll
@@ -0,0 +1,152 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
+
+; GCN-LABEL: {{^}}s_shl_v2i16:
+; GFX9: s_load_dword [[LHS:s[0-9]+]]
+; GFX9: s_load_dword [[RHS:s[0-9]+]]
+; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
+; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
+
+; VI: v_lshlrev_b32_e32
+; VI: v_lshlrev_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
+; CI: v_lshlrev_b32_e32
+; CI: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_or_b32_e32
+define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
+ %result = shl <2 x i16> %lhs, %rhs
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_shl_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+
+; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_lshlrev_b16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
+; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[LHS]]
+; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshl_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
+; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in.gep, i32 1
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
+ %result = shl <2 x i16> %a, %b
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_v_s_v2i16:
+; GFX9: s_load_dword [[RHS:s[0-9]+]]
+; GFX9: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+define amdgpu_kernel void @shl_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = shl <2 x i16> %vgpr, %sgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_s_v_v2i16:
+; GFX9: s_load_dword [[LHS:s[0-9]+]]
+; GFX9: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
+define amdgpu_kernel void @shl_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = shl <2 x i16> %sgpr, %vgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_imm_v_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], 8
+define amdgpu_kernel void @shl_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = shl <2 x i16> <i16 8, i16 8>, %vgpr
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_v_imm_v2i16:
+; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]]
+; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], 8, [[LHS]]
+define amdgpu_kernel void @shl_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
+ %result = shl <2 x i16> %vgpr, <i16 8, i16 8>
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_shl_v4i16:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GCN: {{buffer|flat}}_load_dwordx2
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: {{buffer|flat}}_store_dwordx2
+define amdgpu_kernel void @v_shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
+ %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in.gep, i32 1
+ %a = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
+ %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
+ %result = shl <4 x i16> %a, %b
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+; GCN-LABEL: {{^}}shl_v_imm_v4i16:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; GCN: {{buffer|flat}}_store_dwordx2
+define amdgpu_kernel void @shl_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
+ %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
+ %vgpr = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
+ %result = shl <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8>
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/shl_add_constant.ll b/test/CodeGen/AMDGPU/shl_add_constant.ll
index 9b5f9fed4d79..9da4bc028016 100644
--- a/test/CodeGen/AMDGPU/shl_add_constant.ll
+++ b/test/CodeGen/AMDGPU/shl_add_constant.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; SI: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, 36, [[REG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
%val = load i32, i32 addrspace(1)* %ptr, align 4
@@ -25,7 +25,7 @@ define void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
; SI-DAG: buffer_store_dword [[ADDREG]]
; SI-DAG: buffer_store_dword [[SHLREG]]
; SI: s_endpgm
-define void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
%val = load i32, i32 addrspace(1)* %ptr, align 4
@@ -43,7 +43,7 @@ define void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1
; SI: v_add_i32_e32 [[RESULT:v[0-9]+]], vcc, 0xf9c, [[REG]]
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
%val = load i32, i32 addrspace(1)* %ptr, align 4
@@ -61,7 +61,7 @@ define void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0
; SI: s_addk_i32 [[RESULT]], 0x3d8
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[RESULT]]
; SI: buffer_store_dword [[VRESULT]]
-define void @test_add_shl_add_constant(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @test_add_shl_add_constant(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
%add.0 = add i32 %x, 123
%shl = shl i32 %add.0, 3
%add.1 = add i32 %shl, %y
@@ -78,7 +78,7 @@ define void @test_add_shl_add_constant(i32 addrspace(1)* %out, i32 %x, i32 %y) #
; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[TMP]]
; SI: buffer_store_dword [[VRESULT]]
-define void @test_add_shl_add_constant_inv(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @test_add_shl_add_constant_inv(i32 addrspace(1)* %out, i32 %x, i32 %y) #0 {
%add.0 = add i32 %x, 123
%shl = shl i32 %add.0, 3
%add.1 = add i32 %y, %shl
diff --git a/test/CodeGen/AMDGPU/shl_add_ptr.ll b/test/CodeGen/AMDGPU/shl_add_ptr.ll
index 6e45759fa058..9147eb58c6ad 100644
--- a/test/CodeGen/AMDGPU/shl_add_ptr.ll
+++ b/test/CodeGen/AMDGPU/shl_add_ptr.ll
@@ -19,7 +19,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_read_b32 {{v[0-9]+}}, [[PTR]] offset:8
; SI: s_endpgm
-define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
@@ -39,7 +39,7 @@ define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %ad
; SI-DAG: buffer_store_dword [[RESULT]]
; SI-DAG: buffer_store_dword [[ADDUSE]]
; SI: s_endpgm
-define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
@@ -55,7 +55,7 @@ define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %ad
; SI-LABEL: {{^}}load_shl_base_lds_max_offset
; SI: ds_read_u8 v{{[0-9]+}}, v{{[0-9]+}} offset:65535
; SI: s_endpgm
-define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 65535
%arrayidx0 = getelementptr inbounds [65536 x i8], [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
@@ -73,7 +73,7 @@ define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
; SI: s_mov_b32 m0, -1
; SI-NEXT: ds_read2st64_b32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9
; SI: s_endpgm
-define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 64
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
@@ -89,7 +89,7 @@ define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_write_b32 [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
@@ -104,7 +104,7 @@ define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %a
@lds2 = addrspace(3) global [512 x i32] undef, align 4
-; define void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+; define amdgpu_kernel void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
; %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
; %idx.0 = add nsw i32 %tid.x, 2
; %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -119,7 +119,7 @@ define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %a
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_cmpst_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
+define amdgpu_kernel void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -134,7 +134,7 @@ define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_wrxchg_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -148,7 +148,7 @@ define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_add_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -162,7 +162,7 @@ define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_sub_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -176,7 +176,7 @@ define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_and_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -190,7 +190,7 @@ define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_or_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -204,7 +204,7 @@ define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_xor_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -214,7 +214,7 @@ define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
ret void
}
-; define void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+; define amdgpu_kernel void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
; %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
; %idx.0 = add nsw i32 %tid.x, 2
; %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -228,7 +228,7 @@ define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_min_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -242,7 +242,7 @@ define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_max_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -256,7 +256,7 @@ define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)*
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_min_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
@@ -270,7 +270,7 @@ define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
; SI: ds_max_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
-define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+define amdgpu_kernel void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
diff --git a/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll b/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll
new file mode 100644
index 000000000000..14ca635c6dad
--- /dev/null
+++ b/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll
@@ -0,0 +1,186 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; Test that add/sub with a constant is swapped to sub/add with negated
+; constant to minimize code size.
+
+; GCN-LABEL: {{^}}v_test_i32_x_sub_64:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_subrev_i32_e32 v{{[0-9]+}}, vcc, 64, [[X]]
+define amdgpu_kernel void @v_test_i32_x_sub_64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 %x, 64
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_x_sub_64_multi_use:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[Y:v[0-9]+]]
+; GCN-DAG: v_subrev_i32_e32 v{{[0-9]+}}, vcc, 64, [[X]]
+; GCN-DAG: v_subrev_i32_e32 v{{[0-9]+}}, vcc, 64, [[Y]]
+define amdgpu_kernel void @v_test_i32_x_sub_64_multi_use(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load volatile i32, i32 addrspace(1)* %gep
+ %y = load volatile i32, i32 addrspace(1)* %gep
+ %result0 = sub i32 %x, 64
+ %result1 = sub i32 %y, 64
+ store volatile i32 %result0, i32 addrspace(1)* %gep.out
+ store volatile i32 %result1, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_64_sub_x:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, 64, [[X]]
+define amdgpu_kernel void @v_test_i32_64_sub_x(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 64, %x
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_x_sub_65:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, 0xffffffbf, [[X]]
+define amdgpu_kernel void @v_test_i32_x_sub_65(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 %x, 65
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_65_sub_x:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, 0x41, [[X]]
+define amdgpu_kernel void @v_test_i32_65_sub_x(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 65, %x
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_x_sub_neg16:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, 16, [[X]]
+define amdgpu_kernel void @v_test_i32_x_sub_neg16(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 %x, -16
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_neg16_sub_x:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, -16, [[X]]
+define amdgpu_kernel void @v_test_i32_neg16_sub_x(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 -16, %x
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_x_sub_neg17:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, 17, [[X]]
+define amdgpu_kernel void @v_test_i32_x_sub_neg17(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 %x, -17
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i32_neg17_sub_x:
+; GCN: {{buffer|flat}}_load_dword [[X:v[0-9]+]]
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, 0xffffffef, [[X]]
+define amdgpu_kernel void @v_test_i32_neg17_sub_x(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tid.ext
+ %x = load i32, i32 addrspace(1)* %gep
+ %result = sub i32 -17, %x
+ store i32 %result, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_test_i32_x_sub_64:
+; GCN: s_load_dword [[X:s[0-9]+]]
+; GCN: s_sub_i32 s{{[0-9]+}}, [[X]], 64
+define amdgpu_kernel void @s_test_i32_x_sub_64(i32 %x) #0 {
+ %result = sub i32 %x, 64
+ call void asm sideeffect "; use $0", "s"(i32 %result)
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i16_x_sub_64:
+; VI: {{buffer|flat}}_load_ushort [[X:v[0-9]+]]
+; VI: v_subrev_u16_e32 v{{[0-9]+}}, 64, [[X]]
+define amdgpu_kernel void @v_test_i16_x_sub_64(i16 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext
+ %x = load i16, i16 addrspace(1)* %gep
+ %result = sub i16 %x, 64
+ store i16 %result, i16 addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_i16_x_sub_64_multi_use:
+; GCN: {{buffer|flat}}_load_ushort [[X:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort [[Y:v[0-9]+]]
+; VI-DAG: v_subrev_u16_e32 v{{[0-9]+}}, 64, [[X]]
+; VI-DAG: v_subrev_u16_e32 v{{[0-9]+}}, 64, [[Y]]
+
+; SI-DAG: v_subrev_i32_e32 v{{[0-9]+}}, vcc, 64, [[X]]
+; SI-DAG: v_subrev_i32_e32 v{{[0-9]+}}, vcc, 64, [[Y]]
+define amdgpu_kernel void @v_test_i16_x_sub_64_multi_use(i16 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 %tid.ext
+ %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext
+ %x = load volatile i16, i16 addrspace(1)* %gep
+ %y = load volatile i16, i16 addrspace(1)* %gep
+ %result0 = sub i16 %x, 64
+ %result1 = sub i16 %y, 64
+ store volatile i16 %result0, i16 addrspace(1)* %gep.out
+ store volatile i16 %result1, i16 addrspace(1)* %gep.out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir b/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
index 1988a14b5845..6248d8a46daf 100644
--- a/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
+++ b/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
@@ -7,7 +7,7 @@
# resume crashes
--- |
- define void @shrink_add_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ define amdgpu_kernel void @shrink_add_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
@@ -20,7 +20,7 @@
ret void
}
- define void @shrink_sub_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ define amdgpu_kernel void @shrink_sub_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
@@ -33,7 +33,7 @@
ret void
}
- define void @shrink_subrev_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ define amdgpu_kernel void @shrink_subrev_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
@@ -46,7 +46,7 @@
ret void
}
- define void @check_addc_src2_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ define amdgpu_kernel void @check_addc_src2_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
@@ -59,7 +59,7 @@
ret void
}
- define void @shrink_addc_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ define amdgpu_kernel void @shrink_addc_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
@@ -72,7 +72,7 @@
ret void
}
- define void @shrink_addc_undef_vcc(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ define amdgpu_kernel void @shrink_addc_undef_vcc(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
diff --git a/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll b/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll
index ef616eb63801..5c6663dbbdab 100644
--- a/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll
+++ b/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll
@@ -6,10 +6,10 @@
; OPT-NOT: call i1 @llvm.amdgcn.loop
; GCN-LABEL: {{^}}annotate_unreachable_noloop:
-; GCN: s_cbranch_vccnz
+; GCN: s_cbranch_scc1
; GCN-NOT: s_endpgm
; GCN: .Lfunc_end0
-define void @annotate_unreachable_noloop(<4 x float> addrspace(1)* noalias nocapture readonly %arg) #0 {
+define amdgpu_kernel void @annotate_unreachable_noloop(<4 x float> addrspace(1)* noalias nocapture readonly %arg) #0 {
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -37,12 +37,49 @@ bb5: ; preds = %bb3, %bb1
; OPT-NOT: call i1 @llvm.amdgcn.loop
; GCN-LABEL: {{^}}annotate_ret_noloop:
+; GCN: load_dwordx4
+; GCN: v_cmp_nlt_f32
+; GCN: s_and_saveexec_b64
+; GCN: ; mask branch [[UNIFIED_RET:BB[0-9]+_[0-9]+]]
+; GCN-NEXT: [[UNIFIED_RET]]:
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN-NEXT: s_endpgm
+; GCN: .Lfunc_end
+define amdgpu_kernel void @annotate_ret_noloop(<4 x float> addrspace(1)* noalias nocapture readonly %arg) #0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+
+bb1: ; preds = %bb
+ %tmp2 = sext i32 %tmp to i64
+ %tmp3 = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %arg, i64 %tmp2
+ %tmp4 = load <4 x float>, <4 x float> addrspace(1)* %tmp3, align 16
+ %tmp5 = extractelement <4 x float> %tmp4, i32 1
+ store volatile <4 x float> %tmp4, <4 x float> addrspace(1)* undef
+ %cmp = fcmp ogt float %tmp5, 1.0
+ br i1 %cmp, label %bb5, label %bb3
+
+bb3: ; preds = %bb1
+ %tmp6 = extractelement <4 x float> %tmp4, i32 2
+ %tmp7 = fcmp olt float %tmp6, 0.000000e+00
+ br i1 %tmp7, label %bb4, label %bb5 ; crash goes away if these are swapped
+
+bb4: ; preds = %bb3
+ ret void
+
+bb5: ; preds = %bb3, %bb1
+ ret void
+}
+
+; OPT-LABEL: @uniform_annotate_ret_noloop(
+; OPT-NOT: call i1 @llvm.amdgcn.loop
+
+; GCN-LABEL: {{^}}uniform_annotate_ret_noloop:
; GCN: s_cbranch_scc1
; GCN: s_endpgm
-; GCN: .Lfunc_end1
-define void @annotate_ret_noloop(<4 x float> addrspace(1)* noalias nocapture readonly %arg) #0 {
+; GCN: .Lfunc_end
+define amdgpu_kernel void @uniform_annotate_ret_noloop(<4 x float> addrspace(1)* noalias nocapture readonly %arg, i32 %tmp) #0 {
bb:
- %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1: ; preds = %bb
diff --git a/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll b/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll
new file mode 100644
index 000000000000..e50c595bc6c3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll
@@ -0,0 +1,40 @@
+; RUN: opt -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=OPT %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+
+; OPT-LABEL: @annotate_unreachable(
+; OPT: call { i1, i64 } @llvm.amdgcn.if(
+; OPT-NOT: call void @llvm.amdgcn.end.cf(
+
+
+; GCN-LABEL: {{^}}annotate_unreachable:
+; GCN: s_and_saveexec_b64
+; GCN-NOT: s_endpgm
+; GCN: .Lfunc_end0
+define amdgpu_kernel void @annotate_unreachable(<4 x float> addrspace(1)* noalias nocapture readonly %arg) #0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+
+bb1: ; preds = %bb
+ %tmp2 = sext i32 %tmp to i64
+ %tmp3 = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %arg, i64 %tmp2
+ %tmp4 = load <4 x float>, <4 x float> addrspace(1)* %tmp3, align 16
+ br i1 undef, label %bb3, label %bb5 ; label order reversed
+
+bb3: ; preds = %bb1
+ %tmp6 = extractelement <4 x float> %tmp4, i32 2
+ %tmp7 = fcmp olt float %tmp6, 0.000000e+00
+ br i1 %tmp7, label %bb4, label %bb5
+
+bb4: ; preds = %bb3
+ unreachable
+
+bb5: ; preds = %bb3, %bb1
+ unreachable
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/si-annotate-cf.ll b/test/CodeGen/AMDGPU/si-annotate-cf.ll
index d658b229fd37..a4b6d1fd069d 100644
--- a/test/CodeGen/AMDGPU/si-annotate-cf.ll
+++ b/test/CodeGen/AMDGPU/si-annotate-cf.ll
@@ -10,7 +10,7 @@
; SI: s_andn2_b64
; s_cbranch_execnz [[LOOP_LABEL]]
; SI: s_endpgm
-define void @break_inserted_outside_of_loop(i32 addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @break_inserted_outside_of_loop(i32 addrspace(1)* %out, i32 %a) {
main_body:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%0 = and i32 %a, %tid
@@ -40,7 +40,7 @@ ENDIF:
; SI: s_cbranch_execnz [[LOOP_LABEL]]
; SI: s_endpgm
-define void @phi_cond_outside_loop(i32 %b) {
+define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
entry:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%0 = icmp eq i32 %tid , 0
@@ -68,7 +68,7 @@ exit:
; CHECK-LABEL: {{^}}switch_unreachable:
; CHECK-NOT: s_endpgm
; CHECK: .Lfunc_end2
-define void @switch_unreachable(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
+define amdgpu_kernel void @switch_unreachable(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
centry:
switch i32 %x, label %sw.default [
i32 0, label %sw.bb
@@ -100,7 +100,7 @@ declare float @llvm.fabs.f32(float) nounwind readnone
; SI: [[ENDPGM]]:
; SI: s_endpgm
-define void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32 %c3, i32 %x, i32 %y, i1 %arg) nounwind {
+define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32 %c3, i32 %x, i32 %y, i1 %arg) nounwind {
entry:
%cmp = icmp sgt i32 %c0, 0
br label %while.cond.outer
diff --git a/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll b/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
index 025a3d8fca2e..b0473f3b5bda 100644
--- a/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
+++ b/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
@@ -6,7 +6,7 @@
; CHECK s_or_b64 exec, exec
; CHECK s_andn2_b64 exec, exec
; CHECK s_cbranch_execnz
-define void @test(i32 %arg, i32 %arg1) {
+define amdgpu_kernel void @test(i32 %arg, i32 %arg1) {
bb:
%tmp = icmp ne i32 %arg, 0
%tmp7 = icmp ne i32 %arg1, 0
diff --git a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
index 0c08deb13a8e..20052e865a54 100644
--- a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
+++ b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
@@ -1,7 +1,7 @@
# RUN: llc -march=amdgcn -run-pass si-fix-sgpr-copies %s -o - | FileCheck %s -check-prefixes=GCN
--- |
- define void @phi_visit_order() { ret void }
+ define amdgpu_kernel void @phi_visit_order() { ret void }
name: phi_visit_order
tracksRegLiveness: true
diff --git a/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll b/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
index 0d1de6662f25..580268deb85d 100644
--- a/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
+++ b/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
@@ -5,7 +5,7 @@
; CHECK: %{{[0-9]+}} = V_ADD_I32_e32 %{{[0-9]+}}, %{{[0-9]+}}, implicit-def %vcc, implicit %exec
-define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load volatile i32, i32 addrspace(1)* %in
diff --git a/test/CodeGen/AMDGPU/si-literal-folding.ll b/test/CodeGen/AMDGPU/si-literal-folding.ll
deleted file mode 100644
index b3f000c8ccd2..000000000000
--- a/test/CodeGen/AMDGPU/si-literal-folding.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-
-; GCN-LABEL: {{^}}main:
-; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0x3f4353f8, v{{[0-9]+}}
-; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0xbf4353f8, v{{[0-9]+}}
-define amdgpu_vs void @main(float) {
-main_body:
- %1 = fmul float %0, 0x3FE86A7F00000000
- %2 = fmul float %0, 0xBFE86A7F00000000
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %1, float %1, float %2, float %2)
- ret void
-}
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/si-lod-bias.ll b/test/CodeGen/AMDGPU/si-lod-bias.ll
index 8e846d7a238e..3a7359ea4ffa 100644
--- a/test/CodeGen/AMDGPU/si-lod-bias.ll
+++ b/test/CodeGen/AMDGPU/si-lod-bias.ll
@@ -1,12 +1,12 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; This shader has the potential to generated illegal VGPR to SGPR copies if
; the wrong register class is used for the REG_SEQUENCE instructions.
-; CHECK: {{^}}main:
-; CHECK: image_sample_b v{{\[[0-9]:[0-9]\]}}, v{{\[[0-9]:[0-9]\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0xf
-define amdgpu_ps void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) {
+; GCN-LABEL: {{^}}main:
+; GCN: image_sample_b v{{\[[0-9]:[0-9]\]}}, v{{\[[0-9]:[0-9]\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0xf
+define amdgpu_ps void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
%tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
%tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
@@ -15,38 +15,45 @@ main_body:
%tmp23 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp22, !tbaa !0
%tmp24 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg1, i32 0
%tmp25 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp24, !tbaa !0
- %tmp26 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg3, <2 x i32> %arg5)
- %tmp27 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg3, <2 x i32> %arg5)
+ %i.i = extractelement <2 x i32> %arg5, i32 0
+ %j.i = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg3) #0
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg3) #0
+ %i.i1 = extractelement <2 x i32> %arg5, i32 0
+ %j.i2 = extractelement <2 x i32> %arg5, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 1, i32 0, i32 %arg3) #0
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 1, i32 0, i32 %arg3) #0
%tmp28 = bitcast float %tmp21 to i32
- %tmp29 = bitcast float %tmp26 to i32
- %tmp30 = bitcast float %tmp27 to i32
+ %tmp29 = bitcast float %p2.i to i32
+ %tmp30 = bitcast float %p2.i6 to i32
%tmp31 = insertelement <4 x i32> undef, i32 %tmp28, i32 0
%tmp32 = insertelement <4 x i32> %tmp31, i32 %tmp29, i32 1
%tmp33 = insertelement <4 x i32> %tmp32, i32 %tmp30, i32 2
%tmp34 = insertelement <4 x i32> %tmp33, i32 undef, i32 3
%tmp25.bc = bitcast <16 x i8> %tmp25 to <4 x i32>
- %tmp35 = call <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32> %tmp34, <8 x i32> %tmp23, <4 x i32> %tmp25.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp34.bc = bitcast <4 x i32> %tmp34 to <4 x float>
+ %tmp35 = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> %tmp34.bc, <8 x i32> %tmp23, <4 x i32> %tmp25.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp36 = extractelement <4 x float> %tmp35, i32 0
%tmp37 = extractelement <4 x float> %tmp35, i32 1
%tmp38 = extractelement <4 x float> %tmp35, i32 2
%tmp39 = extractelement <4 x float> %tmp35, i32 3
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %tmp36, float %tmp37, float %tmp38, float %tmp39)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp36, float %tmp37, float %tmp38, float %tmp39, i1 true, i1 true) #0
ret void
}
-; Function Attrs: nounwind readnone
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
-
-declare <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-
+attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
!1 = !{!"const", !2}
diff --git a/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll b/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
index 8d66df258e43..cb010cf15300 100644
--- a/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
+++ b/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll
@@ -4,17 +4,18 @@
; GCN: v_cmp_eq_u32
; GCN: s_and_saveexec_b64
; GCN: s_xor_b64
-; GCN: ; mask branch [[RET:BB[0-9]+]]
-; GCN: s_branch [[UNREACHABLE:BB[0-9]+_[0-9]+]]
+; GCN: ; mask branch [[RET:BB[0-9]+_[0-9]+]]
-; GCN: [[RET]]
-; GCN: s_or_b64 exec, exec
-; GCN: s_endpgm
-
-; GCN: [[UNREACHABLE]]:
+; GCN-NEXT: BB{{[0-9]+_[0-9]+}}: ; %unreachable
; GCN: ds_write_b32
+; GCN: ; divergent unreachable
; GCN: s_waitcnt
-define void @lower_control_flow_unreachable_terminator() #0 {
+
+; GCN-NEXT: [[RET]]: ; %UnifiedReturnBlock
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN: s_endpgm
+
+define amdgpu_kernel void @lower_control_flow_unreachable_terminator() #0 {
bb:
%tmp15 = tail call i32 @llvm.amdgcn.workitem.id.y()
%tmp63 = icmp eq i32 %tmp15, 32
@@ -29,19 +30,20 @@ ret:
}
; GCN-LABEL: {{^}}lower_control_flow_unreachable_terminator_swap_block_order:
-; GCN: v_cmp_eq_u32
+; GCN: v_cmp_ne_u32
; GCN: s_and_saveexec_b64
; GCN: s_xor_b64
-; GCN: ; mask branch [[UNREACHABLE:BB[0-9]+_[0-9]+]]
+; GCN: ; mask branch [[RETURN:BB[0-9]+_[0-9]+]]
-; GCN-NEXT: ; %ret
-; GCN-NEXT: s_endpgm
-
-; GCN-NEXT: [[UNREACHABLE]]:
-; GCN-NEXT: s_or_b64 exec, exec
+; GCN-NEXT: {{^BB[0-9]+_[0-9]+}}: ; %unreachable
; GCN: ds_write_b32
+; GCN: ; divergent unreachable
; GCN: s_waitcnt
-define void @lower_control_flow_unreachable_terminator_swap_block_order() #0 {
+
+; GCN: [[RETURN]]:
+; GCN-NEXT: s_or_b64 exec, exec
+; GCN-NEXT: s_endpgm
+define amdgpu_kernel void @lower_control_flow_unreachable_terminator_swap_block_order() #0 {
bb:
%tmp15 = tail call i32 @llvm.amdgcn.workitem.id.y()
%tmp63 = icmp eq i32 %tmp15, 32
@@ -55,7 +57,29 @@ unreachable:
unreachable
}
-; Function Attrs: nounwind readnone
+; GCN-LABEL: {{^}}uniform_lower_control_flow_unreachable_terminator:
+; GCN: s_cmp_lg_u32
+; GCN: s_cbranch_scc0 [[UNREACHABLE:BB[0-9]+_[0-9]+]]
+
+; GCN-NEXT: BB#{{[0-9]+}}: ; %ret
+; GCN-NEXT: s_endpgm
+
+; GCN: [[UNREACHABLE]]:
+; GCN: ds_write_b32
+; GCN: s_waitcnt
+define amdgpu_kernel void @uniform_lower_control_flow_unreachable_terminator(i32 %arg0) #0 {
+bb:
+ %tmp63 = icmp eq i32 %arg0, 32
+ br i1 %tmp63, label %unreachable, label %ret
+
+unreachable:
+ store volatile i32 0, i32 addrspace(3)* undef, align 4
+ unreachable
+
+ret:
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.y() #1
attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/si-scheduler.ll b/test/CodeGen/AMDGPU/si-scheduler.ll
index 9374ef3cd907..462528c4ff1a 100644
--- a/test/CodeGen/AMDGPU/si-scheduler.ll
+++ b/test/CodeGen/AMDGPU/si-scheduler.ll
@@ -3,7 +3,7 @@
; The only way the subtarget knows that the si machine scheduler is being used
; is to specify -mattr=si-scheduler. If we just pass --misched=si, the backend
; won't know what scheduler we are using.
-; RUN: llc -march=amdgcn -mcpu=SI --misched=si -mattr=si-scheduler < %s | FileCheck %s
+; RUN: llc -march=amdgcn --misched=si -mattr=si-scheduler < %s | FileCheck %s
; The test checks the "si" machine scheduler pass works correctly.
@@ -22,39 +22,46 @@ main_body:
%tmp22 = load <32 x i8>, <32 x i8> addrspace(2)* %tmp, align 32, !tbaa !0
%tmp23 = bitcast [17 x <4 x i32>] addrspace(2)* %arg2 to <16 x i8> addrspace(2)*
%tmp24 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp23, align 16, !tbaa !0
- %tmp25 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg5, <2 x i32> %arg11)
- %tmp26 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg5, <2 x i32> %arg11)
- %tmp27 = bitcast float %tmp25 to i32
- %tmp28 = bitcast float %tmp26 to i32
+ %i.i = extractelement <2 x i32> %arg11, i32 0
+ %j.i = extractelement <2 x i32> %arg11, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg5) #1
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg5) #1
+ %i.i1 = extractelement <2 x i32> %arg11, i32 0
+ %j.i2 = extractelement <2 x i32> %arg11, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 1, i32 0, i32 %arg5) #1
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 1, i32 0, i32 %arg5) #1
+ %tmp27 = bitcast float %p2.i to i32
+ %tmp28 = bitcast float %p2.i6 to i32
%tmp29 = insertelement <2 x i32> undef, i32 %tmp27, i32 0
%tmp30 = insertelement <2 x i32> %tmp29, i32 %tmp28, i32 1
%tmp22.bc = bitcast <32 x i8> %tmp22 to <8 x i32>
%tmp24.bc = bitcast <16 x i8> %tmp24 to <4 x i32>
- %tmp31 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp30, <8 x i32> %tmp22.bc, <4 x i32> %tmp24.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp30.bc = bitcast <2 x i32> %tmp30 to <2 x float>
+ %tmp31 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp30.bc, <8 x i32> %tmp22.bc, <4 x i32> %tmp24.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+
%tmp32 = extractelement <4 x float> %tmp31, i32 0
%tmp33 = extractelement <4 x float> %tmp31, i32 1
%tmp34 = extractelement <4 x float> %tmp31, i32 2
%tmp35 = extractelement <4 x float> %tmp31, i32 3
- %tmp36 = call i32 @llvm.SI.packf16(float %tmp32, float %tmp33)
- %tmp37 = bitcast i32 %tmp36 to float
- %tmp38 = call i32 @llvm.SI.packf16(float %tmp34, float %tmp35)
- %tmp39 = bitcast i32 %tmp38 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp37, float %tmp39, float %tmp37, float %tmp39)
+ %tmp36 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp32, float %tmp33)
+ %tmp38 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp34, float %tmp35)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp36, <2 x half> %tmp38, i1 true, i1 false) #0
ret void
}
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
-
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.SI.packf16(float, float) #1
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
!1 = !{!"const", !2}
diff --git a/test/CodeGen/AMDGPU/si-sgpr-spill.ll b/test/CodeGen/AMDGPU/si-sgpr-spill.ll
index e61b4051124a..8731e74d63a0 100644
--- a/test/CodeGen/AMDGPU/si-sgpr-spill.ll
+++ b/test/CodeGen/AMDGPU/si-sgpr-spill.ll
@@ -1,27 +1,29 @@
-; RUN: llc -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=TOVGPR %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling,-mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; These tests check that the compiler won't crash when it needs to spill
; SGPRs.
-
@ddxy_lds = external addrspace(3) global [64 x i32]
-; CHECK-LABEL: {{^}}main:
-; CHECK: s_wqm
+; GCN-LABEL: {{^}}main:
+; GCN: s_wqm
; Make sure not emitting unused scratch resource descriptor setup
-; CHECK-NOT: s_mov_b32
-; CHECK-NOT: s_mov_b32
-; CHECK-NOT: s_mov_b32
-; CHECK-NOT: s_mov_b32
+; GCN-NOT: s_mov_b32
+; GCN-NOT: s_mov_b32
+; GCN-NOT: s_mov_b32
+; GCN-NOT: s_mov_b32
-; CHECK: s_mov_b32 m0
+; GCN: s_mov_b32 m0
+; Make sure scratch space isn't being used for SGPR->VGPR spills
; Writing to M0 from an SMRD instruction will hang the GPU.
-; CHECK-NOT: s_buffer_load_dword m0
-; CHECK: s_endpgm
+; GCN-NOT: s_buffer_load_dword m0
+; GCN: s_endpgm
+
+; TOVGPR: ScratchSize: 0{{$}}
define amdgpu_ps void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
main_body:
%tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
@@ -97,29 +99,114 @@ main_body:
%tmp89 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp88, !tbaa !0
%tmp90 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
%tmp91 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp90, !tbaa !0
- %tmp92 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg4, <2 x i32> %arg6)
- %tmp93 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg4, <2 x i32> %arg6)
- %tmp94 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp95 = call float @llvm.SI.fs.interp(i32 1, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp96 = call float @llvm.SI.fs.interp(i32 2, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp97 = call float @llvm.SI.fs.interp(i32 0, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp98 = call float @llvm.SI.fs.interp(i32 1, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp99 = call float @llvm.SI.fs.interp(i32 2, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp100 = call float @llvm.SI.fs.interp(i32 0, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp101 = call float @llvm.SI.fs.interp(i32 1, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp102 = call float @llvm.SI.fs.interp(i32 2, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp103 = call float @llvm.SI.fs.interp(i32 0, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp104 = call float @llvm.SI.fs.interp(i32 1, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp105 = call float @llvm.SI.fs.interp(i32 2, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp106 = call float @llvm.SI.fs.interp(i32 0, i32 5, i32 %arg4, <2 x i32> %arg6)
- %tmp107 = call float @llvm.SI.fs.interp(i32 1, i32 5, i32 %arg4, <2 x i32> %arg6)
- %tmp108 = call float @llvm.SI.fs.interp(i32 2, i32 5, i32 %arg4, <2 x i32> %arg6)
+ %i.i = extractelement <2 x i32> %arg6, i32 0
+ %j.i = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg4) #0
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg4) #0
+ %i.i91 = extractelement <2 x i32> %arg6, i32 0
+ %j.i92 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i93 = bitcast i32 %i.i91 to float
+ %j.f.i94 = bitcast i32 %j.i92 to float
+ %p1.i95 = call float @llvm.amdgcn.interp.p1(float %i.f.i93, i32 1, i32 0, i32 %arg4) #0
+ %p2.i96 = call float @llvm.amdgcn.interp.p2(float %p1.i95, float %j.f.i94, i32 1, i32 0, i32 %arg4) #0
+ %i.i85 = extractelement <2 x i32> %arg6, i32 0
+ %j.i86 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i87 = bitcast i32 %i.i85 to float
+ %j.f.i88 = bitcast i32 %j.i86 to float
+ %p1.i89 = call float @llvm.amdgcn.interp.p1(float %i.f.i87, i32 0, i32 1, i32 %arg4) #0
+ %p2.i90 = call float @llvm.amdgcn.interp.p2(float %p1.i89, float %j.f.i88, i32 0, i32 1, i32 %arg4) #0
+ %i.i79 = extractelement <2 x i32> %arg6, i32 0
+ %j.i80 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i81 = bitcast i32 %i.i79 to float
+ %j.f.i82 = bitcast i32 %j.i80 to float
+ %p1.i83 = call float @llvm.amdgcn.interp.p1(float %i.f.i81, i32 1, i32 1, i32 %arg4) #0
+ %p2.i84 = call float @llvm.amdgcn.interp.p2(float %p1.i83, float %j.f.i82, i32 1, i32 1, i32 %arg4) #0
+ %i.i73 = extractelement <2 x i32> %arg6, i32 0
+ %j.i74 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i75 = bitcast i32 %i.i73 to float
+ %j.f.i76 = bitcast i32 %j.i74 to float
+ %p1.i77 = call float @llvm.amdgcn.interp.p1(float %i.f.i75, i32 2, i32 1, i32 %arg4) #0
+ %p2.i78 = call float @llvm.amdgcn.interp.p2(float %p1.i77, float %j.f.i76, i32 2, i32 1, i32 %arg4) #0
+ %i.i67 = extractelement <2 x i32> %arg6, i32 0
+ %j.i68 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i69 = bitcast i32 %i.i67 to float
+ %j.f.i70 = bitcast i32 %j.i68 to float
+ %p1.i71 = call float @llvm.amdgcn.interp.p1(float %i.f.i69, i32 0, i32 2, i32 %arg4) #0
+ %p2.i72 = call float @llvm.amdgcn.interp.p2(float %p1.i71, float %j.f.i70, i32 0, i32 2, i32 %arg4) #0
+ %i.i61 = extractelement <2 x i32> %arg6, i32 0
+ %j.i62 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i63 = bitcast i32 %i.i61 to float
+ %j.f.i64 = bitcast i32 %j.i62 to float
+ %p1.i65 = call float @llvm.amdgcn.interp.p1(float %i.f.i63, i32 1, i32 2, i32 %arg4) #0
+ %p2.i66 = call float @llvm.amdgcn.interp.p2(float %p1.i65, float %j.f.i64, i32 1, i32 2, i32 %arg4) #0
+ %i.i55 = extractelement <2 x i32> %arg6, i32 0
+ %j.i56 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i57 = bitcast i32 %i.i55 to float
+ %j.f.i58 = bitcast i32 %j.i56 to float
+ %p1.i59 = call float @llvm.amdgcn.interp.p1(float %i.f.i57, i32 2, i32 2, i32 %arg4) #0
+ %p2.i60 = call float @llvm.amdgcn.interp.p2(float %p1.i59, float %j.f.i58, i32 2, i32 2, i32 %arg4) #0
+ %i.i49 = extractelement <2 x i32> %arg6, i32 0
+ %j.i50 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i51 = bitcast i32 %i.i49 to float
+ %j.f.i52 = bitcast i32 %j.i50 to float
+ %p1.i53 = call float @llvm.amdgcn.interp.p1(float %i.f.i51, i32 0, i32 3, i32 %arg4) #0
+ %p2.i54 = call float @llvm.amdgcn.interp.p2(float %p1.i53, float %j.f.i52, i32 0, i32 3, i32 %arg4) #0
+ %i.i43 = extractelement <2 x i32> %arg6, i32 0
+ %j.i44 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i45 = bitcast i32 %i.i43 to float
+ %j.f.i46 = bitcast i32 %j.i44 to float
+ %p1.i47 = call float @llvm.amdgcn.interp.p1(float %i.f.i45, i32 1, i32 3, i32 %arg4) #0
+ %p2.i48 = call float @llvm.amdgcn.interp.p2(float %p1.i47, float %j.f.i46, i32 1, i32 3, i32 %arg4) #0
+ %i.i37 = extractelement <2 x i32> %arg6, i32 0
+ %j.i38 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i39 = bitcast i32 %i.i37 to float
+ %j.f.i40 = bitcast i32 %j.i38 to float
+ %p1.i41 = call float @llvm.amdgcn.interp.p1(float %i.f.i39, i32 2, i32 3, i32 %arg4) #0
+ %p2.i42 = call float @llvm.amdgcn.interp.p2(float %p1.i41, float %j.f.i40, i32 2, i32 3, i32 %arg4) #0
+ %i.i31 = extractelement <2 x i32> %arg6, i32 0
+ %j.i32 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i33 = bitcast i32 %i.i31 to float
+ %j.f.i34 = bitcast i32 %j.i32 to float
+ %p1.i35 = call float @llvm.amdgcn.interp.p1(float %i.f.i33, i32 0, i32 4, i32 %arg4) #0
+ %p2.i36 = call float @llvm.amdgcn.interp.p2(float %p1.i35, float %j.f.i34, i32 0, i32 4, i32 %arg4) #0
+ %i.i25 = extractelement <2 x i32> %arg6, i32 0
+ %j.i26 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i27 = bitcast i32 %i.i25 to float
+ %j.f.i28 = bitcast i32 %j.i26 to float
+ %p1.i29 = call float @llvm.amdgcn.interp.p1(float %i.f.i27, i32 1, i32 4, i32 %arg4) #0
+ %p2.i30 = call float @llvm.amdgcn.interp.p2(float %p1.i29, float %j.f.i28, i32 1, i32 4, i32 %arg4) #0
+ %i.i19 = extractelement <2 x i32> %arg6, i32 0
+ %j.i20 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i21 = bitcast i32 %i.i19 to float
+ %j.f.i22 = bitcast i32 %j.i20 to float
+ %p1.i23 = call float @llvm.amdgcn.interp.p1(float %i.f.i21, i32 2, i32 4, i32 %arg4) #0
+ %p2.i24 = call float @llvm.amdgcn.interp.p2(float %p1.i23, float %j.f.i22, i32 2, i32 4, i32 %arg4) #0
+ %i.i13 = extractelement <2 x i32> %arg6, i32 0
+ %j.i14 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i15 = bitcast i32 %i.i13 to float
+ %j.f.i16 = bitcast i32 %j.i14 to float
+ %p1.i17 = call float @llvm.amdgcn.interp.p1(float %i.f.i15, i32 0, i32 5, i32 %arg4) #0
+ %p2.i18 = call float @llvm.amdgcn.interp.p2(float %p1.i17, float %j.f.i16, i32 0, i32 5, i32 %arg4) #0
+ %i.i7 = extractelement <2 x i32> %arg6, i32 0
+ %j.i8 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i9 = bitcast i32 %i.i7 to float
+ %j.f.i10 = bitcast i32 %j.i8 to float
+ %p1.i11 = call float @llvm.amdgcn.interp.p1(float %i.f.i9, i32 1, i32 5, i32 %arg4) #0
+ %p2.i12 = call float @llvm.amdgcn.interp.p2(float %p1.i11, float %j.f.i10, i32 1, i32 5, i32 %arg4) #0
+ %i.i1 = extractelement <2 x i32> %arg6, i32 0
+ %j.i2 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 2, i32 5, i32 %arg4) #0
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 2, i32 5, i32 %arg4) #0
%mbcnt.lo.0 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tmp109 = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo.0)
%tmp110 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %tmp109
- %tmp111 = bitcast float %tmp92 to i32
+ %tmp111 = bitcast float %p2.i to i32
store i32 %tmp111, i32 addrspace(3)* %tmp110
- %tmp112 = bitcast float %tmp93 to i32
+ %tmp112 = bitcast float %p2.i96 to i32
store i32 %tmp112, i32 addrspace(3)* %tmp110
%mbcnt.lo.1 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tmp113 = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo.1)
@@ -128,14 +215,14 @@ main_body:
%tmp116 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %tmp115
%tmp117 = add i32 %tmp115, 1
%tmp118 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %tmp117
- %tmp119 = bitcast float %tmp92 to i32
+ %tmp119 = bitcast float %p2.i to i32
store i32 %tmp119, i32 addrspace(3)* %tmp114
%tmp120 = load i32, i32 addrspace(3)* %tmp116
%tmp121 = bitcast i32 %tmp120 to float
%tmp122 = load i32, i32 addrspace(3)* %tmp118
%tmp123 = bitcast i32 %tmp122 to float
%tmp124 = fsub float %tmp123, %tmp121
- %tmp125 = bitcast float %tmp93 to i32
+ %tmp125 = bitcast float %p2.i96 to i32
store i32 %tmp125, i32 addrspace(3)* %tmp114
%tmp126 = load i32, i32 addrspace(3)* %tmp116
%tmp127 = bitcast i32 %tmp126 to float
@@ -148,10 +235,10 @@ main_body:
%tmp134 = insertelement <4 x float> %tmp133, float %tmp130, i32 3
%tmp135 = extractelement <4 x float> %tmp134, i32 0
%tmp136 = extractelement <4 x float> %tmp134, i32 1
- %tmp137 = fmul float %tmp59, %tmp92
- %tmp138 = fmul float %tmp59, %tmp93
- %tmp139 = fmul float %tmp59, %tmp93
- %tmp140 = fmul float %tmp59, %tmp93
+ %tmp137 = fmul float %tmp59, %p2.i
+ %tmp138 = fmul float %tmp59, %p2.i96
+ %tmp139 = fmul float %tmp59, %p2.i96
+ %tmp140 = fmul float %tmp59, %p2.i96
%mbcnt.lo.2 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tmp141 = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo.2)
%tmp142 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %tmp141
@@ -204,26 +291,26 @@ main_body:
%tmp180 = insertelement <4 x float> %tmp179, float %tmp176, i32 3
%tmp181 = extractelement <4 x float> %tmp180, i32 0
%tmp182 = extractelement <4 x float> %tmp180, i32 1
- %tmp183 = fdiv float 1.000000e+00, %tmp96
+ %tmp183 = fdiv float 1.000000e+00, %p2.i78
%tmp184 = fmul float %tmp32, %tmp183
%tmp185 = fcmp uge float 1.000000e+00, %tmp184
%tmp186 = select i1 %tmp185, float %tmp184, float 1.000000e+00
%tmp187 = fmul float %tmp186, %tmp29
- %tmp188 = call float @ceil(float %tmp187)
+ %tmp188 = call float @llvm.ceil.f32(float %tmp187)
%tmp189 = fcmp uge float 3.000000e+00, %tmp188
%tmp190 = select i1 %tmp189, float 3.000000e+00, float %tmp188
%tmp191 = fdiv float 1.000000e+00, %tmp190
%tmp192 = fdiv float 1.000000e+00, %tmp29
%tmp193 = fmul float %tmp190, %tmp192
%tmp194 = fmul float %tmp30, %tmp193
- %tmp195 = fmul float %tmp94, %tmp94
- %tmp196 = fmul float %tmp95, %tmp95
+ %tmp195 = fmul float %p2.i90, %p2.i90
+ %tmp196 = fmul float %p2.i84, %p2.i84
%tmp197 = fadd float %tmp196, %tmp195
- %tmp198 = fmul float %tmp96, %tmp96
+ %tmp198 = fmul float %p2.i78, %p2.i78
%tmp199 = fadd float %tmp197, %tmp198
%tmp200 = call float @llvm.amdgcn.rsq.f32(float %tmp199)
- %tmp201 = fmul float %tmp94, %tmp200
- %tmp202 = fmul float %tmp95, %tmp200
+ %tmp201 = fmul float %p2.i90, %tmp200
+ %tmp202 = fmul float %p2.i84, %tmp200
%tmp203 = fmul float %tmp201, %tmp28
%tmp204 = fmul float %tmp202, %tmp28
%tmp205 = fmul float %tmp203, -1.000000e+00
@@ -231,9 +318,9 @@ main_body:
%tmp207 = fmul float %tmp205, %tmp31
%tmp208 = fmul float %tmp206, %tmp31
%tmp209 = fsub float -0.000000e+00, %tmp207
- %tmp210 = fadd float %tmp92, %tmp209
+ %tmp210 = fadd float %p2.i, %tmp209
%tmp211 = fsub float -0.000000e+00, %tmp208
- %tmp212 = fadd float %tmp93, %tmp211
+ %tmp212 = fadd float %p2.i96, %tmp211
%tmp213 = fmul float %tmp205, %tmp191
%tmp214 = fmul float %tmp206, %tmp191
%tmp215 = fmul float -1.000000e+00, %tmp191
@@ -277,7 +364,8 @@ ENDIF: ; preds = %LOOP
%tmp240 = insertelement <8 x i32> %tmp239, i32 %tmp238, i32 5
%tmp241 = insertelement <8 x i32> %tmp240, i32 undef, i32 6
%tmp242 = insertelement <8 x i32> %tmp241, i32 undef, i32 7
- %tmp243 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp242, <8 x i32> %tmp61, <4 x i32> %tmp63.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp242.bc = bitcast <8 x i32> %tmp242 to <8 x float>
+ %tmp243 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp242.bc, <8 x i32> %tmp61, <4 x i32> %tmp63.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp244 = extractelement <4 x float> %tmp243, i32 3
%tmp245 = fcmp oge float %temp30.0, %tmp244
%tmp246 = sext i1 %tmp245 to i32
@@ -323,7 +411,8 @@ IF67: ; preds = %LOOP65
%tmp275 = insertelement <8 x i32> %tmp274, i32 undef, i32 6
%tmp276 = insertelement <8 x i32> %tmp275, i32 undef, i32 7
%tmp67.bc = bitcast <16 x i8> %tmp67 to <4 x i32>
- %tmp277 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp276, <8 x i32> %tmp65, <4 x i32> %tmp67.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp276.bc = bitcast <8 x i32> %tmp276 to <8 x float>
+ %tmp277 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp276.bc, <8 x i32> %tmp65, <4 x i32> %tmp67.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp278 = extractelement <4 x float> %tmp277, i32 0
%tmp279 = extractelement <4 x float> %tmp277, i32 1
%tmp280 = extractelement <4 x float> %tmp277, i32 2
@@ -344,7 +433,8 @@ IF67: ; preds = %LOOP65
%tmp295 = insertelement <8 x i32> %tmp294, i32 undef, i32 6
%tmp296 = insertelement <8 x i32> %tmp295, i32 undef, i32 7
%tmp83.bc = bitcast <16 x i8> %tmp83 to <4 x i32>
- %tmp297 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp296, <8 x i32> %tmp81, <4 x i32> %tmp83.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp296.bc = bitcast <8 x i32> %tmp296 to <8 x float>
+ %tmp297 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp296.bc, <8 x i32> %tmp81, <4 x i32> %tmp83.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp298 = extractelement <4 x float> %tmp297, i32 0
%tmp299 = extractelement <4 x float> %tmp297, i32 1
%tmp300 = extractelement <4 x float> %tmp297, i32 2
@@ -363,7 +453,8 @@ IF67: ; preds = %LOOP65
%tmp313 = insertelement <8 x i32> %tmp312, i32 undef, i32 6
%tmp314 = insertelement <8 x i32> %tmp313, i32 undef, i32 7
%tmp79.bc = bitcast <16 x i8> %tmp79 to <4 x i32>
- %tmp315 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp314, <8 x i32> %tmp77, <4 x i32> %tmp79.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp314.bc = bitcast <8 x i32> %tmp314 to <8 x float>
+ %tmp315 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp314.bc, <8 x i32> %tmp77, <4 x i32> %tmp79.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp316 = extractelement <4 x float> %tmp315, i32 0
%tmp317 = extractelement <4 x float> %tmp315, i32 1
%tmp318 = extractelement <4 x float> %tmp315, i32 2
@@ -393,7 +484,8 @@ IF67: ; preds = %LOOP65
%tmp342 = insertelement <8 x i32> %tmp341, i32 %tmp336, i32 5
%tmp343 = insertelement <8 x i32> %tmp342, i32 undef, i32 6
%tmp344 = insertelement <8 x i32> %tmp343, i32 undef, i32 7
- %tmp345 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp344, <8 x i32> %tmp61, <4 x i32> %tmp63.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp344.bc = bitcast <8 x i32> %tmp344 to <8 x float>
+ %tmp345 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp344.bc, <8 x i32> %tmp61, <4 x i32> %tmp63.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp346 = extractelement <4 x float> %tmp345, i32 0
%tmp347 = extractelement <4 x float> %tmp345, i32 1
%tmp348 = extractelement <4 x float> %tmp345, i32 2
@@ -424,14 +516,15 @@ IF67: ; preds = %LOOP65
%tmp373 = insertelement <8 x i32> %tmp372, i32 undef, i32 6
%tmp374 = insertelement <8 x i32> %tmp373, i32 undef, i32 7
%tmp71.bc = bitcast <16 x i8> %tmp71 to <4 x i32>
- %tmp375 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp374, <8 x i32> %tmp69, <4 x i32> %tmp71.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp374.bc = bitcast <8 x i32> %tmp374 to <8 x float>
+ %tmp375 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp374.bc, <8 x i32> %tmp69, <4 x i32> %tmp71.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp376 = extractelement <4 x float> %tmp375, i32 0
%tmp377 = extractelement <4 x float> %tmp375, i32 1
%tmp378 = extractelement <4 x float> %tmp375, i32 2
%tmp379 = extractelement <4 x float> %tmp375, i32 3
- %tmp380 = fsub float -0.000000e+00, %tmp94
- %tmp381 = fsub float -0.000000e+00, %tmp95
- %tmp382 = fsub float -0.000000e+00, %tmp96
+ %tmp380 = fsub float -0.000000e+00, %p2.i90
+ %tmp381 = fsub float -0.000000e+00, %p2.i84
+ %tmp382 = fsub float -0.000000e+00, %p2.i78
%tmp383 = fmul float %tmp358, %tmp380
%tmp384 = fmul float %tmp359, %tmp381
%tmp385 = fadd float %tmp384, %tmp383
@@ -449,20 +542,20 @@ IF67: ; preds = %LOOP65
%tmp397 = fadd float %tmp381, %tmp396
%tmp398 = fsub float -0.000000e+00, %tmp393
%tmp399 = fadd float %tmp382, %tmp398
- %tmp400 = fmul float %tmp395, %tmp97
- %tmp401 = fmul float %tmp395, %tmp98
- %tmp402 = fmul float %tmp395, %tmp99
- %tmp403 = fmul float %tmp397, %tmp100
+ %tmp400 = fmul float %tmp395, %p2.i72
+ %tmp401 = fmul float %tmp395, %p2.i66
+ %tmp402 = fmul float %tmp395, %p2.i60
+ %tmp403 = fmul float %tmp397, %p2.i54
%tmp404 = fadd float %tmp403, %tmp400
- %tmp405 = fmul float %tmp397, %tmp101
+ %tmp405 = fmul float %tmp397, %p2.i48
%tmp406 = fadd float %tmp405, %tmp401
- %tmp407 = fmul float %tmp397, %tmp102
+ %tmp407 = fmul float %tmp397, %p2.i42
%tmp408 = fadd float %tmp407, %tmp402
- %tmp409 = fmul float %tmp399, %tmp103
+ %tmp409 = fmul float %tmp399, %p2.i36
%tmp410 = fadd float %tmp409, %tmp404
- %tmp411 = fmul float %tmp399, %tmp104
+ %tmp411 = fmul float %tmp399, %p2.i30
%tmp412 = fadd float %tmp411, %tmp406
- %tmp413 = fmul float %tmp399, %tmp105
+ %tmp413 = fmul float %tmp399, %p2.i24
%tmp414 = fadd float %tmp413, %tmp408
%tmp415 = bitcast float %tmp135 to i32
%tmp416 = bitcast float %tmp181 to i32
@@ -479,7 +572,8 @@ IF67: ; preds = %LOOP65
%tmp427 = insertelement <8 x i32> %tmp426, i32 undef, i32 6
%tmp428 = insertelement <8 x i32> %tmp427, i32 undef, i32 7
%tmp87.bc = bitcast <16 x i8> %tmp87 to <4 x i32>
- %tmp429 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp428, <8 x i32> %tmp85, <4 x i32> %tmp87.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp428.bc = bitcast <8 x i32> %tmp428 to <8 x float>
+ %tmp429 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp428.bc, <8 x i32> %tmp85, <4 x i32> %tmp87.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp430 = extractelement <4 x float> %tmp429, i32 0
%tmp431 = extractelement <4 x float> %tmp429, i32 1
%tmp432 = extractelement <4 x float> %tmp429, i32 2
@@ -502,12 +596,22 @@ IF67: ; preds = %LOOP65
%tmp449 = insertelement <4 x float> %tmp448, float %tmp445, i32 1
%tmp450 = insertelement <4 x float> %tmp449, float %tmp447, i32 2
%tmp451 = insertelement <4 x float> %tmp450, float %tmp194, i32 3
- %tmp452 = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %tmp451)
+ %tmp451.x = extractelement <4 x float> %tmp451, i32 0
+ %tmp451.y = extractelement <4 x float> %tmp451, i32 1
+ %tmp451.z = extractelement <4 x float> %tmp451, i32 2
+ %cubetc = call float @llvm.amdgcn.cubetc(float %tmp451.x, float %tmp451.y, float %tmp451.z)
+ %cubesc = call float @llvm.amdgcn.cubesc(float %tmp451.x, float %tmp451.y, float %tmp451.z)
+ %cubema = call float @llvm.amdgcn.cubema(float %tmp451.x, float %tmp451.y, float %tmp451.z)
+ %cubeid = call float @llvm.amdgcn.cubeid(float %tmp451.x, float %tmp451.y, float %tmp451.z)
+ %tmp452.0 = insertelement <4 x float> undef, float %cubetc, i32 0
+ %tmp452.1 = insertelement <4 x float> %tmp452.0, float %cubesc, i32 1
+ %tmp452.2 = insertelement <4 x float> %tmp452.1, float %cubema, i32 2
+ %tmp452 = insertelement <4 x float> %tmp452.2, float %cubeid, i32 3
%tmp453 = extractelement <4 x float> %tmp452, i32 0
%tmp454 = extractelement <4 x float> %tmp452, i32 1
%tmp455 = extractelement <4 x float> %tmp452, i32 2
%tmp456 = extractelement <4 x float> %tmp452, i32 3
- %tmp457 = call float @fabs(float %tmp455)
+ %tmp457 = call float @llvm.fabs.f32(float %tmp455)
%tmp458 = fdiv float 1.000000e+00, %tmp457
%tmp459 = fmul float %tmp453, %tmp458
%tmp460 = fadd float %tmp459, 1.500000e+00
@@ -521,7 +625,8 @@ IF67: ; preds = %LOOP65
%tmp468 = insertelement <4 x i32> %tmp467, i32 %tmp465, i32 2
%tmp469 = insertelement <4 x i32> %tmp468, i32 undef, i32 3
%tmp91.bc = bitcast <16 x i8> %tmp91 to <4 x i32>
- %tmp470 = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tmp469, <8 x i32> %tmp89, <4 x i32> %tmp91.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp469.bc = bitcast <4 x i32> %tmp469 to <4 x float>
+ %tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tmp469.bc, <8 x i32> %tmp89, <4 x i32> %tmp91.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%tmp471 = extractelement <4 x float> %tmp470, i32 0
%tmp472 = extractelement <4 x float> %tmp470, i32 1
%tmp473 = extractelement <4 x float> %tmp470, i32 2
@@ -531,15 +636,15 @@ IF67: ; preds = %LOOP65
%tmp477 = fadd float %tmp476, %tmp329
%tmp478 = fmul float %tmp432, %tmp473
%tmp479 = fadd float %tmp478, %tmp330
- %tmp480 = fmul float %tmp106, %tmp106
- %tmp481 = fmul float %tmp107, %tmp107
+ %tmp480 = fmul float %p2.i18, %p2.i18
+ %tmp481 = fmul float %p2.i12, %p2.i12
%tmp482 = fadd float %tmp481, %tmp480
- %tmp483 = fmul float %tmp108, %tmp108
+ %tmp483 = fmul float %p2.i6, %p2.i6
%tmp484 = fadd float %tmp482, %tmp483
%tmp485 = call float @llvm.amdgcn.rsq.f32(float %tmp484)
- %tmp486 = fmul float %tmp106, %tmp485
- %tmp487 = fmul float %tmp107, %tmp485
- %tmp488 = fmul float %tmp108, %tmp485
+ %tmp486 = fmul float %p2.i18, %tmp485
+ %tmp487 = fmul float %p2.i12, %tmp485
+ %tmp488 = fmul float %p2.i6, %tmp485
%tmp489 = fmul float %tmp376, %tmp39
%tmp490 = fmul float %tmp377, %tmp40
%tmp491 = fmul float %tmp378, %tmp41
@@ -560,15 +665,15 @@ IF67: ; preds = %LOOP65
%tmp506 = fadd float %tmp487, %tmp505
%tmp507 = fsub float -0.000000e+00, %tmp502
%tmp508 = fadd float %tmp488, %tmp507
- %tmp509 = fmul float %tmp94, %tmp94
- %tmp510 = fmul float %tmp95, %tmp95
+ %tmp509 = fmul float %p2.i90, %p2.i90
+ %tmp510 = fmul float %p2.i84, %p2.i84
%tmp511 = fadd float %tmp510, %tmp509
- %tmp512 = fmul float %tmp96, %tmp96
+ %tmp512 = fmul float %p2.i78, %p2.i78
%tmp513 = fadd float %tmp511, %tmp512
%tmp514 = call float @llvm.amdgcn.rsq.f32(float %tmp513)
- %tmp515 = fmul float %tmp94, %tmp514
- %tmp516 = fmul float %tmp95, %tmp514
- %tmp517 = fmul float %tmp96, %tmp514
+ %tmp515 = fmul float %p2.i90, %tmp514
+ %tmp516 = fmul float %p2.i84, %tmp514
+ %tmp517 = fmul float %p2.i78, %tmp514
%tmp518 = fmul float %tmp504, %tmp515
%tmp519 = fmul float %tmp506, %tmp516
%tmp520 = fadd float %tmp519, %tmp518
@@ -623,7 +728,8 @@ IF67: ; preds = %LOOP65
%tmp569 = insertelement <8 x i32> %tmp568, i32 undef, i32 6
%tmp570 = insertelement <8 x i32> %tmp569, i32 undef, i32 7
%tmp75.bc = bitcast <16 x i8> %tmp75 to <4 x i32>
- %tmp571 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp570, <8 x i32> %tmp73, <4 x i32> %tmp75.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp570.bc = bitcast <8 x i32> %tmp570 to <8 x float>
+ %tmp571 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp570.bc, <8 x i32> %tmp73, <4 x i32> %tmp75.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp572 = extractelement <4 x float> %tmp571, i32 0
%tmp573 = extractelement <4 x float> %tmp571, i32 1
%tmp574 = extractelement <4 x float> %tmp571, i32 2
@@ -633,11 +739,9 @@ IF67: ; preds = %LOOP65
%tmp578 = fadd float %tmp577, %tmp554
%tmp579 = fmul float %tmp574, %tmp45
%tmp580 = fadd float %tmp579, %tmp556
- %tmp581 = call i32 @llvm.SI.packf16(float %tmp576, float %tmp578)
- %tmp582 = bitcast i32 %tmp581 to float
- %tmp583 = call i32 @llvm.SI.packf16(float %tmp580, float %tmp282)
- %tmp584 = bitcast i32 %tmp583 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp582, float %tmp584, float %tmp582, float %tmp584)
+ %tmp581 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp576, float %tmp578)
+ %tmp583 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp580, float %tmp282)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp581, <2 x half> %tmp583, i1 true, i1 true) #0
ret void
ENDIF66: ; preds = %LOOP65
@@ -647,7 +751,8 @@ ENDIF66: ; preds = %LOOP65
%tmp588 = insertelement <8 x i32> %tmp587, i32 %tmp586, i32 5
%tmp589 = insertelement <8 x i32> %tmp588, i32 undef, i32 6
%tmp590 = insertelement <8 x i32> %tmp589, i32 undef, i32 7
- %tmp591 = call <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32> %tmp590, <8 x i32> %tmp61, <4 x i32> %tmp63.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp590.bc = bitcast <8 x i32> %tmp590 to <8 x float>
+ %tmp591 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp590.bc, <8 x i32> %tmp61, <4 x i32> %tmp63.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp592 = extractelement <4 x float> %tmp591, i32 3
%tmp593 = fcmp oge float %temp30.1, %tmp592
%tmp594 = sext i1 %tmp593 to i32
@@ -670,9 +775,10 @@ ENDIF66: ; preds = %LOOP65
br label %LOOP65
}
-; CHECK-LABEL: {{^}}main1:
-; CHECK: s_endpgm
-define amdgpu_ps void @main1([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
+; GCN-LABEL: {{^}}main1:
+; GCN: s_endpgm
+; TOVGPR: ScratchSize: 0{{$}}
+define amdgpu_ps void @main1([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
main_body:
%tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
%tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
@@ -817,52 +923,210 @@ main_body:
%tmp160 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp159, !tbaa !0
%tmp161 = fcmp ugt float %arg17, 0.000000e+00
%tmp162 = select i1 %tmp161, float 1.000000e+00, float 0.000000e+00
- %tmp163 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg4, <2 x i32> %arg6)
- %tmp164 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg4, <2 x i32> %arg6)
- %tmp165 = call float @llvm.SI.fs.interp(i32 2, i32 0, i32 %arg4, <2 x i32> %arg6)
- %tmp166 = call float @llvm.SI.fs.interp(i32 3, i32 0, i32 %arg4, <2 x i32> %arg6)
- %tmp167 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp168 = call float @llvm.SI.fs.interp(i32 1, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp169 = call float @llvm.SI.fs.interp(i32 2, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp170 = call float @llvm.SI.fs.interp(i32 3, i32 1, i32 %arg4, <2 x i32> %arg6)
- %tmp171 = call float @llvm.SI.fs.interp(i32 0, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp172 = call float @llvm.SI.fs.interp(i32 1, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp173 = call float @llvm.SI.fs.interp(i32 2, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp174 = call float @llvm.SI.fs.interp(i32 3, i32 2, i32 %arg4, <2 x i32> %arg6)
- %tmp175 = call float @llvm.SI.fs.interp(i32 0, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp176 = call float @llvm.SI.fs.interp(i32 1, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp177 = call float @llvm.SI.fs.interp(i32 2, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp178 = call float @llvm.SI.fs.interp(i32 3, i32 3, i32 %arg4, <2 x i32> %arg6)
- %tmp179 = call float @llvm.SI.fs.interp(i32 0, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp180 = call float @llvm.SI.fs.interp(i32 1, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp181 = call float @llvm.SI.fs.interp(i32 2, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp182 = call float @llvm.SI.fs.interp(i32 3, i32 4, i32 %arg4, <2 x i32> %arg6)
- %tmp183 = call float @llvm.SI.fs.interp(i32 0, i32 5, i32 %arg4, <2 x i32> %arg6)
- %tmp184 = call float @llvm.SI.fs.interp(i32 1, i32 5, i32 %arg4, <2 x i32> %arg6)
- %tmp185 = call float @llvm.SI.fs.interp(i32 2, i32 5, i32 %arg4, <2 x i32> %arg6)
- %tmp186 = call float @llvm.SI.fs.interp(i32 3, i32 5, i32 %arg4, <2 x i32> %arg6)
- %tmp187 = call float @llvm.SI.fs.interp(i32 0, i32 6, i32 %arg4, <2 x i32> %arg6)
- %tmp188 = call float @llvm.SI.fs.interp(i32 1, i32 6, i32 %arg4, <2 x i32> %arg6)
- %tmp189 = call float @llvm.SI.fs.interp(i32 2, i32 6, i32 %arg4, <2 x i32> %arg6)
- %tmp190 = call float @llvm.SI.fs.interp(i32 3, i32 6, i32 %arg4, <2 x i32> %arg6)
- %tmp191 = call float @llvm.SI.fs.interp(i32 0, i32 7, i32 %arg4, <2 x i32> %arg6)
- %tmp192 = call float @llvm.SI.fs.interp(i32 1, i32 7, i32 %arg4, <2 x i32> %arg6)
- %tmp193 = call float @llvm.SI.fs.interp(i32 2, i32 7, i32 %arg4, <2 x i32> %arg6)
- %tmp194 = call float @llvm.SI.fs.interp(i32 3, i32 7, i32 %arg4, <2 x i32> %arg6)
+ %i.i = extractelement <2 x i32> %arg6, i32 0
+ %j.i = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg4) #0
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg4) #0
+ %i.i181 = extractelement <2 x i32> %arg6, i32 0
+ %j.i182 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i183 = bitcast i32 %i.i181 to float
+ %j.f.i184 = bitcast i32 %j.i182 to float
+ %p1.i185 = call float @llvm.amdgcn.interp.p1(float %i.f.i183, i32 1, i32 0, i32 %arg4) #0
+ %p2.i186 = call float @llvm.amdgcn.interp.p2(float %p1.i185, float %j.f.i184, i32 1, i32 0, i32 %arg4) #0
+ %i.i175 = extractelement <2 x i32> %arg6, i32 0
+ %j.i176 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i177 = bitcast i32 %i.i175 to float
+ %j.f.i178 = bitcast i32 %j.i176 to float
+ %p1.i179 = call float @llvm.amdgcn.interp.p1(float %i.f.i177, i32 2, i32 0, i32 %arg4) #0
+ %p2.i180 = call float @llvm.amdgcn.interp.p2(float %p1.i179, float %j.f.i178, i32 2, i32 0, i32 %arg4) #0
+ %i.i169 = extractelement <2 x i32> %arg6, i32 0
+ %j.i170 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i171 = bitcast i32 %i.i169 to float
+ %j.f.i172 = bitcast i32 %j.i170 to float
+ %p1.i173 = call float @llvm.amdgcn.interp.p1(float %i.f.i171, i32 3, i32 0, i32 %arg4) #0
+ %p2.i174 = call float @llvm.amdgcn.interp.p2(float %p1.i173, float %j.f.i172, i32 3, i32 0, i32 %arg4) #0
+ %i.i163 = extractelement <2 x i32> %arg6, i32 0
+ %j.i164 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i165 = bitcast i32 %i.i163 to float
+ %j.f.i166 = bitcast i32 %j.i164 to float
+ %p1.i167 = call float @llvm.amdgcn.interp.p1(float %i.f.i165, i32 0, i32 1, i32 %arg4) #0
+ %p2.i168 = call float @llvm.amdgcn.interp.p2(float %p1.i167, float %j.f.i166, i32 0, i32 1, i32 %arg4) #0
+ %i.i157 = extractelement <2 x i32> %arg6, i32 0
+ %j.i158 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i159 = bitcast i32 %i.i157 to float
+ %j.f.i160 = bitcast i32 %j.i158 to float
+ %p1.i161 = call float @llvm.amdgcn.interp.p1(float %i.f.i159, i32 1, i32 1, i32 %arg4) #0
+ %p2.i162 = call float @llvm.amdgcn.interp.p2(float %p1.i161, float %j.f.i160, i32 1, i32 1, i32 %arg4) #0
+ %i.i151 = extractelement <2 x i32> %arg6, i32 0
+ %j.i152 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i153 = bitcast i32 %i.i151 to float
+ %j.f.i154 = bitcast i32 %j.i152 to float
+ %p1.i155 = call float @llvm.amdgcn.interp.p1(float %i.f.i153, i32 2, i32 1, i32 %arg4) #0
+ %p2.i156 = call float @llvm.amdgcn.interp.p2(float %p1.i155, float %j.f.i154, i32 2, i32 1, i32 %arg4) #0
+ %i.i145 = extractelement <2 x i32> %arg6, i32 0
+ %j.i146 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i147 = bitcast i32 %i.i145 to float
+ %j.f.i148 = bitcast i32 %j.i146 to float
+ %p1.i149 = call float @llvm.amdgcn.interp.p1(float %i.f.i147, i32 3, i32 1, i32 %arg4) #0
+ %p2.i150 = call float @llvm.amdgcn.interp.p2(float %p1.i149, float %j.f.i148, i32 3, i32 1, i32 %arg4) #0
+ %i.i139 = extractelement <2 x i32> %arg6, i32 0
+ %j.i140 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i141 = bitcast i32 %i.i139 to float
+ %j.f.i142 = bitcast i32 %j.i140 to float
+ %p1.i143 = call float @llvm.amdgcn.interp.p1(float %i.f.i141, i32 0, i32 2, i32 %arg4) #0
+ %p2.i144 = call float @llvm.amdgcn.interp.p2(float %p1.i143, float %j.f.i142, i32 0, i32 2, i32 %arg4) #0
+ %i.i133 = extractelement <2 x i32> %arg6, i32 0
+ %j.i134 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i135 = bitcast i32 %i.i133 to float
+ %j.f.i136 = bitcast i32 %j.i134 to float
+ %p1.i137 = call float @llvm.amdgcn.interp.p1(float %i.f.i135, i32 1, i32 2, i32 %arg4) #0
+ %p2.i138 = call float @llvm.amdgcn.interp.p2(float %p1.i137, float %j.f.i136, i32 1, i32 2, i32 %arg4) #0
+ %i.i127 = extractelement <2 x i32> %arg6, i32 0
+ %j.i128 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i129 = bitcast i32 %i.i127 to float
+ %j.f.i130 = bitcast i32 %j.i128 to float
+ %p1.i131 = call float @llvm.amdgcn.interp.p1(float %i.f.i129, i32 2, i32 2, i32 %arg4) #0
+ %p2.i132 = call float @llvm.amdgcn.interp.p2(float %p1.i131, float %j.f.i130, i32 2, i32 2, i32 %arg4) #0
+ %i.i121 = extractelement <2 x i32> %arg6, i32 0
+ %j.i122 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i123 = bitcast i32 %i.i121 to float
+ %j.f.i124 = bitcast i32 %j.i122 to float
+ %p1.i125 = call float @llvm.amdgcn.interp.p1(float %i.f.i123, i32 3, i32 2, i32 %arg4) #0
+ %p2.i126 = call float @llvm.amdgcn.interp.p2(float %p1.i125, float %j.f.i124, i32 3, i32 2, i32 %arg4) #0
+ %i.i115 = extractelement <2 x i32> %arg6, i32 0
+ %j.i116 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i117 = bitcast i32 %i.i115 to float
+ %j.f.i118 = bitcast i32 %j.i116 to float
+ %p1.i119 = call float @llvm.amdgcn.interp.p1(float %i.f.i117, i32 0, i32 3, i32 %arg4) #0
+ %p2.i120 = call float @llvm.amdgcn.interp.p2(float %p1.i119, float %j.f.i118, i32 0, i32 3, i32 %arg4) #0
+ %i.i109 = extractelement <2 x i32> %arg6, i32 0
+ %j.i110 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i111 = bitcast i32 %i.i109 to float
+ %j.f.i112 = bitcast i32 %j.i110 to float
+ %p1.i113 = call float @llvm.amdgcn.interp.p1(float %i.f.i111, i32 1, i32 3, i32 %arg4) #0
+ %p2.i114 = call float @llvm.amdgcn.interp.p2(float %p1.i113, float %j.f.i112, i32 1, i32 3, i32 %arg4) #0
+ %i.i103 = extractelement <2 x i32> %arg6, i32 0
+ %j.i104 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i105 = bitcast i32 %i.i103 to float
+ %j.f.i106 = bitcast i32 %j.i104 to float
+ %p1.i107 = call float @llvm.amdgcn.interp.p1(float %i.f.i105, i32 2, i32 3, i32 %arg4) #0
+ %p2.i108 = call float @llvm.amdgcn.interp.p2(float %p1.i107, float %j.f.i106, i32 2, i32 3, i32 %arg4) #0
+ %i.i97 = extractelement <2 x i32> %arg6, i32 0
+ %j.i98 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i99 = bitcast i32 %i.i97 to float
+ %j.f.i100 = bitcast i32 %j.i98 to float
+ %p1.i101 = call float @llvm.amdgcn.interp.p1(float %i.f.i99, i32 3, i32 3, i32 %arg4) #0
+ %p2.i102 = call float @llvm.amdgcn.interp.p2(float %p1.i101, float %j.f.i100, i32 3, i32 3, i32 %arg4) #0
+ %i.i91 = extractelement <2 x i32> %arg6, i32 0
+ %j.i92 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i93 = bitcast i32 %i.i91 to float
+ %j.f.i94 = bitcast i32 %j.i92 to float
+ %p1.i95 = call float @llvm.amdgcn.interp.p1(float %i.f.i93, i32 0, i32 4, i32 %arg4) #0
+ %p2.i96 = call float @llvm.amdgcn.interp.p2(float %p1.i95, float %j.f.i94, i32 0, i32 4, i32 %arg4) #0
+ %i.i85 = extractelement <2 x i32> %arg6, i32 0
+ %j.i86 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i87 = bitcast i32 %i.i85 to float
+ %j.f.i88 = bitcast i32 %j.i86 to float
+ %p1.i89 = call float @llvm.amdgcn.interp.p1(float %i.f.i87, i32 1, i32 4, i32 %arg4) #0
+ %p2.i90 = call float @llvm.amdgcn.interp.p2(float %p1.i89, float %j.f.i88, i32 1, i32 4, i32 %arg4) #0
+ %i.i79 = extractelement <2 x i32> %arg6, i32 0
+ %j.i80 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i81 = bitcast i32 %i.i79 to float
+ %j.f.i82 = bitcast i32 %j.i80 to float
+ %p1.i83 = call float @llvm.amdgcn.interp.p1(float %i.f.i81, i32 2, i32 4, i32 %arg4) #0
+ %p2.i84 = call float @llvm.amdgcn.interp.p2(float %p1.i83, float %j.f.i82, i32 2, i32 4, i32 %arg4) #0
+ %i.i73 = extractelement <2 x i32> %arg6, i32 0
+ %j.i74 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i75 = bitcast i32 %i.i73 to float
+ %j.f.i76 = bitcast i32 %j.i74 to float
+ %p1.i77 = call float @llvm.amdgcn.interp.p1(float %i.f.i75, i32 3, i32 4, i32 %arg4) #0
+ %p2.i78 = call float @llvm.amdgcn.interp.p2(float %p1.i77, float %j.f.i76, i32 3, i32 4, i32 %arg4) #0
+ %i.i67 = extractelement <2 x i32> %arg6, i32 0
+ %j.i68 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i69 = bitcast i32 %i.i67 to float
+ %j.f.i70 = bitcast i32 %j.i68 to float
+ %p1.i71 = call float @llvm.amdgcn.interp.p1(float %i.f.i69, i32 0, i32 5, i32 %arg4) #0
+ %p2.i72 = call float @llvm.amdgcn.interp.p2(float %p1.i71, float %j.f.i70, i32 0, i32 5, i32 %arg4) #0
+ %i.i61 = extractelement <2 x i32> %arg6, i32 0
+ %j.i62 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i63 = bitcast i32 %i.i61 to float
+ %j.f.i64 = bitcast i32 %j.i62 to float
+ %p1.i65 = call float @llvm.amdgcn.interp.p1(float %i.f.i63, i32 1, i32 5, i32 %arg4) #0
+ %p2.i66 = call float @llvm.amdgcn.interp.p2(float %p1.i65, float %j.f.i64, i32 1, i32 5, i32 %arg4) #0
+ %i.i55 = extractelement <2 x i32> %arg6, i32 0
+ %j.i56 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i57 = bitcast i32 %i.i55 to float
+ %j.f.i58 = bitcast i32 %j.i56 to float
+ %p1.i59 = call float @llvm.amdgcn.interp.p1(float %i.f.i57, i32 2, i32 5, i32 %arg4) #0
+ %p2.i60 = call float @llvm.amdgcn.interp.p2(float %p1.i59, float %j.f.i58, i32 2, i32 5, i32 %arg4) #0
+ %i.i49 = extractelement <2 x i32> %arg6, i32 0
+ %j.i50 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i51 = bitcast i32 %i.i49 to float
+ %j.f.i52 = bitcast i32 %j.i50 to float
+ %p1.i53 = call float @llvm.amdgcn.interp.p1(float %i.f.i51, i32 3, i32 5, i32 %arg4) #0
+ %p2.i54 = call float @llvm.amdgcn.interp.p2(float %p1.i53, float %j.f.i52, i32 3, i32 5, i32 %arg4) #0
+ %i.i43 = extractelement <2 x i32> %arg6, i32 0
+ %j.i44 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i45 = bitcast i32 %i.i43 to float
+ %j.f.i46 = bitcast i32 %j.i44 to float
+ %p1.i47 = call float @llvm.amdgcn.interp.p1(float %i.f.i45, i32 0, i32 6, i32 %arg4) #0
+ %p2.i48 = call float @llvm.amdgcn.interp.p2(float %p1.i47, float %j.f.i46, i32 0, i32 6, i32 %arg4) #0
+ %i.i37 = extractelement <2 x i32> %arg6, i32 0
+ %j.i38 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i39 = bitcast i32 %i.i37 to float
+ %j.f.i40 = bitcast i32 %j.i38 to float
+ %p1.i41 = call float @llvm.amdgcn.interp.p1(float %i.f.i39, i32 1, i32 6, i32 %arg4) #0
+ %p2.i42 = call float @llvm.amdgcn.interp.p2(float %p1.i41, float %j.f.i40, i32 1, i32 6, i32 %arg4) #0
+ %i.i31 = extractelement <2 x i32> %arg6, i32 0
+ %j.i32 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i33 = bitcast i32 %i.i31 to float
+ %j.f.i34 = bitcast i32 %j.i32 to float
+ %p1.i35 = call float @llvm.amdgcn.interp.p1(float %i.f.i33, i32 2, i32 6, i32 %arg4) #0
+ %p2.i36 = call float @llvm.amdgcn.interp.p2(float %p1.i35, float %j.f.i34, i32 2, i32 6, i32 %arg4) #0
+ %i.i25 = extractelement <2 x i32> %arg6, i32 0
+ %j.i26 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i27 = bitcast i32 %i.i25 to float
+ %j.f.i28 = bitcast i32 %j.i26 to float
+ %p1.i29 = call float @llvm.amdgcn.interp.p1(float %i.f.i27, i32 3, i32 6, i32 %arg4) #0
+ %p2.i30 = call float @llvm.amdgcn.interp.p2(float %p1.i29, float %j.f.i28, i32 3, i32 6, i32 %arg4) #0
+ %i.i19 = extractelement <2 x i32> %arg6, i32 0
+ %j.i20 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i21 = bitcast i32 %i.i19 to float
+ %j.f.i22 = bitcast i32 %j.i20 to float
+ %p1.i23 = call float @llvm.amdgcn.interp.p1(float %i.f.i21, i32 0, i32 7, i32 %arg4) #0
+ %p2.i24 = call float @llvm.amdgcn.interp.p2(float %p1.i23, float %j.f.i22, i32 0, i32 7, i32 %arg4) #0
+ %i.i13 = extractelement <2 x i32> %arg6, i32 0
+ %j.i14 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i15 = bitcast i32 %i.i13 to float
+ %j.f.i16 = bitcast i32 %j.i14 to float
+ %p1.i17 = call float @llvm.amdgcn.interp.p1(float %i.f.i15, i32 1, i32 7, i32 %arg4) #0
+ %p2.i18 = call float @llvm.amdgcn.interp.p2(float %p1.i17, float %j.f.i16, i32 1, i32 7, i32 %arg4) #0
+ %i.i7 = extractelement <2 x i32> %arg6, i32 0
+ %j.i8 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i9 = bitcast i32 %i.i7 to float
+ %j.f.i10 = bitcast i32 %j.i8 to float
+ %p1.i11 = call float @llvm.amdgcn.interp.p1(float %i.f.i9, i32 2, i32 7, i32 %arg4) #0
+ %p2.i12 = call float @llvm.amdgcn.interp.p2(float %p1.i11, float %j.f.i10, i32 2, i32 7, i32 %arg4) #0
+ %i.i1 = extractelement <2 x i32> %arg6, i32 0
+ %j.i2 = extractelement <2 x i32> %arg6, i32 1
+ %i.f.i3 = bitcast i32 %i.i1 to float
+ %j.f.i4 = bitcast i32 %j.i2 to float
+ %p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 3, i32 7, i32 %arg4) #0
+ %p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 3, i32 7, i32 %arg4) #0
%tmp195 = fmul float %arg14, %tmp123
%tmp196 = fadd float %tmp195, %tmp124
- %tmp197 = call float @llvm.AMDGPU.clamp.f32(float %tmp162, float 0.000000e+00, float 1.000000e+00)
- %tmp198 = call float @llvm.AMDGPU.clamp.f32(float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
- %tmp199 = call float @llvm.AMDGPU.clamp.f32(float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
- %tmp200 = call float @llvm.AMDGPU.clamp.f32(float 1.000000e+00, float 0.000000e+00, float 1.000000e+00)
- %tmp201 = bitcast float %tmp197 to i32
+ %max.0.i = call float @llvm.maxnum.f32(float %tmp162, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %tmp201 = bitcast float %clamp.i to i32
%tmp202 = icmp ne i32 %tmp201, 0
%. = select i1 %tmp202, float -1.000000e+00, float 1.000000e+00
- %tmp203 = fsub float -0.000000e+00, %tmp163
+ %tmp203 = fsub float -0.000000e+00, %p2.i
%tmp204 = fadd float %tmp43, %tmp203
- %tmp205 = fsub float -0.000000e+00, %tmp164
+ %tmp205 = fsub float -0.000000e+00, %p2.i186
%tmp206 = fadd float %tmp44, %tmp205
- %tmp207 = fsub float -0.000000e+00, %tmp165
+ %tmp207 = fsub float -0.000000e+00, %p2.i180
%tmp208 = fadd float %tmp45, %tmp207
%tmp209 = fmul float %tmp204, %tmp204
%tmp210 = fmul float %tmp206, %tmp206
@@ -876,12 +1140,13 @@ main_body:
%tmp218 = fmul float %., %tmp53
%tmp219 = fmul float %arg13, %tmp46
%tmp220 = fmul float %tmp196, %tmp47
- %tmp221 = bitcast float %tmp173 to i32
- %tmp222 = bitcast float %tmp174 to i32
+ %tmp221 = bitcast float %p2.i132 to i32
+ %tmp222 = bitcast float %p2.i126 to i32
%tmp223 = insertelement <2 x i32> undef, i32 %tmp221, i32 0
%tmp224 = insertelement <2 x i32> %tmp223, i32 %tmp222, i32 1
%tmp132.bc = bitcast <16 x i8> %tmp132 to <4 x i32>
- %tmp225 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp224, <8 x i32> %tmp130, <4 x i32> %tmp132.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp224.bc = bitcast <2 x i32> %tmp224 to <2 x float>
+ %tmp225 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp224.bc, <8 x i32> %tmp130, <4 x i32> %tmp132.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp226 = extractelement <4 x float> %tmp225, i32 0
%tmp227 = extractelement <4 x float> %tmp225, i32 1
%tmp228 = extractelement <4 x float> %tmp225, i32 2
@@ -895,34 +1160,36 @@ main_body:
%result.i44 = fadd float %tmp231, %one.sub.a.i43
%one.sub.a.i41 = fsub float 1.000000e+00, %tmp26
%result.i42 = fadd float %tmp232, %one.sub.a.i41
- %tmp233 = fmul float %tmp215, %tmp183
- %tmp234 = fmul float %tmp216, %tmp184
+ %tmp233 = fmul float %tmp215, %p2.i72
+ %tmp234 = fmul float %tmp216, %p2.i66
%tmp235 = fadd float %tmp234, %tmp233
- %tmp236 = fmul float %tmp217, %tmp185
+ %tmp236 = fmul float %tmp217, %p2.i60
%tmp237 = fadd float %tmp235, %tmp236
- %tmp238 = fmul float %tmp215, %tmp186
- %tmp239 = fmul float %tmp216, %tmp187
+ %tmp238 = fmul float %tmp215, %p2.i54
+ %tmp239 = fmul float %tmp216, %p2.i48
%tmp240 = fadd float %tmp239, %tmp238
- %tmp241 = fmul float %tmp217, %tmp188
+ %tmp241 = fmul float %tmp217, %p2.i42
%tmp242 = fadd float %tmp240, %tmp241
- %tmp243 = fmul float %tmp215, %tmp189
- %tmp244 = fmul float %tmp216, %tmp190
+ %tmp243 = fmul float %tmp215, %p2.i36
+ %tmp244 = fmul float %tmp216, %p2.i30
%tmp245 = fadd float %tmp244, %tmp243
- %tmp246 = fmul float %tmp217, %tmp191
+ %tmp246 = fmul float %tmp217, %p2.i24
%tmp247 = fadd float %tmp245, %tmp246
- %tmp248 = call float @llvm.AMDGPU.clamp.f32(float %tmp247, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i19 = call float @llvm.maxnum.f32(float %tmp247, float 0.000000e+00)
+ %clamp.i20 = call float @llvm.minnum.f32(float %max.0.i19, float 1.000000e+00)
%tmp249 = fmul float %tmp213, 0x3F5A36E2E0000000
- %tmp250 = call float @llvm.AMDGPU.clamp.f32(float %tmp249, float 0.000000e+00, float 1.000000e+00)
- %tmp251 = fsub float -0.000000e+00, %tmp250
+ %max.0.i17 = call float @llvm.maxnum.f32(float %tmp249, float 0.000000e+00)
+ %clamp.i18 = call float @llvm.minnum.f32(float %max.0.i17, float 1.000000e+00)
+ %tmp251 = fsub float -0.000000e+00, %clamp.i18
%tmp252 = fadd float 1.000000e+00, %tmp251
- %tmp253 = call float @llvm.pow.f32(float %tmp248, float 2.500000e-01)
+ %tmp253 = call float @llvm.pow.f32(float %clamp.i20, float 2.500000e-01)
%tmp254 = fmul float %tmp38, %tmp253
%tmp255 = fmul float %tmp237, %tmp254
%tmp256 = fmul float %tmp242, %tmp254
%tmp257 = fmul float %tmp255, %tmp229
%tmp258 = fmul float %tmp256, %tmp229
- %tmp259 = fadd float %tmp248, 0x3EE4F8B580000000
- %tmp260 = fsub float -0.000000e+00, %tmp248
+ %tmp259 = fadd float %clamp.i20, 0x3EE4F8B580000000
+ %tmp260 = fsub float -0.000000e+00, %clamp.i20
%tmp261 = fadd float 1.000000e+00, %tmp260
%tmp262 = fmul float 1.200000e+01, %tmp261
%tmp263 = fadd float %tmp262, 4.000000e+00
@@ -942,8 +1209,8 @@ main_body:
LOOP: ; preds = %LOOP, %main_body
%temp144.0 = phi float [ 1.000000e+00, %main_body ], [ %tmp288, %LOOP ]
- %temp168.0 = phi float [ %tmp175, %main_body ], [ %tmp284, %LOOP ]
- %temp169.0 = phi float [ %tmp176, %main_body ], [ %tmp285, %LOOP ]
+ %temp168.0 = phi float [ %p2.i120, %main_body ], [ %tmp284, %LOOP ]
+ %temp169.0 = phi float [ %p2.i114, %main_body ], [ %tmp285, %LOOP ]
%temp170.0 = phi float [ %tmp252, %main_body ], [ %tmp286, %LOOP ]
%tmp276 = bitcast float %temp168.0 to i32
%tmp277 = bitcast float %temp169.0 to i32
@@ -952,7 +1219,8 @@ LOOP: ; preds = %LOOP, %main_body
%tmp280 = insertelement <4 x i32> %tmp279, i32 0, i32 2
%tmp281 = insertelement <4 x i32> %tmp280, i32 undef, i32 3
%tmp148.bc = bitcast <16 x i8> %tmp148 to <4 x i32>
- %tmp282 = call <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32> %tmp281, <8 x i32> %tmp146, <4 x i32> %tmp148.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp281.bc = bitcast <4 x i32> %tmp281 to <4 x float>
+ %tmp282 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp281.bc, <8 x i32> %tmp146, <4 x i32> %tmp148.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp283 = extractelement <4 x float> %tmp282, i32 3
%tmp284 = fadd float %temp168.0, %tmp273
%tmp285 = fadd float %temp169.0, %tmp274
@@ -979,12 +1247,12 @@ IF189: ; preds = %LOOP
%tmp303 = fadd float %tmp302, %tmp284
%tmp304 = fmul float %tmp301, %tmp274
%tmp305 = fadd float %tmp304, %tmp285
- %tmp306 = fsub float -0.000000e+00, %tmp175
+ %tmp306 = fsub float -0.000000e+00, %p2.i120
%tmp307 = fadd float %tmp303, %tmp306
- %tmp308 = fsub float -0.000000e+00, %tmp176
+ %tmp308 = fsub float -0.000000e+00, %p2.i114
%tmp309 = fadd float %tmp305, %tmp308
- %tmp310 = fadd float %tmp175, %tmp307
- %tmp311 = fadd float %tmp176, %tmp309
+ %tmp310 = fadd float %p2.i120, %tmp307
+ %tmp311 = fadd float %p2.i114, %tmp309
%tmp312 = fmul float %tmp307, %tmp66
%tmp313 = fmul float %tmp309, %tmp67
%tmp314 = fmul float %tmp312, %tmp54
@@ -993,8 +1261,8 @@ IF189: ; preds = %LOOP
%tmp317 = fadd float %tmp316, %tmp314
%tmp318 = fmul float %tmp313, %tmp57
%tmp319 = fadd float %tmp318, %tmp315
- %tmp320 = fadd float %tmp177, %tmp317
- %tmp321 = fadd float %tmp178, %tmp319
+ %tmp320 = fadd float %p2.i108, %tmp317
+ %tmp321 = fadd float %p2.i102, %tmp319
%tmp322 = fmul float %tmp312, %tmp58
%tmp323 = fmul float %tmp312, %tmp59
%tmp324 = fmul float %tmp312, %tmp60
@@ -1007,28 +1275,29 @@ IF189: ; preds = %LOOP
%tmp331 = fadd float %tmp330, %tmp324
%tmp332 = fmul float %tmp313, %tmp65
%tmp333 = fadd float %tmp332, %tmp325
- %tmp334 = fadd float %tmp167, %tmp327
- %tmp335 = fadd float %tmp168, %tmp329
- %tmp336 = fadd float %tmp169, %tmp331
- %tmp337 = fadd float %tmp170, %tmp333
+ %tmp334 = fadd float %p2.i168, %tmp327
+ %tmp335 = fadd float %p2.i162, %tmp329
+ %tmp336 = fadd float %p2.i156, %tmp331
+ %tmp337 = fadd float %p2.i150, %tmp333
%tmp338 = bitcast float %tmp334 to i32
%tmp339 = bitcast float %tmp335 to i32
%tmp340 = insertelement <2 x i32> undef, i32 %tmp338, i32 0
%tmp341 = insertelement <2 x i32> %tmp340, i32 %tmp339, i32 1
%tmp136.bc = bitcast <16 x i8> %tmp136 to <4 x i32>
- %tmp342 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp341, <8 x i32> %tmp134, <4 x i32> %tmp136.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp343 = extractelement <4 x float> %tmp342, i32 0
- %tmp344 = extractelement <4 x float> %tmp342, i32 1
- %tmp345 = extractelement <4 x float> %tmp342, i32 2
- %tmp346 = extractelement <4 x float> %tmp342, i32 3
+ %a.bc.i = bitcast <2 x i32> %tmp341 to <2 x float>
+ %tmp0 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp134, <4 x i32> %tmp136.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp343 = extractelement <4 x float> %tmp0, i32 0
+ %tmp344 = extractelement <4 x float> %tmp0, i32 1
+ %tmp345 = extractelement <4 x float> %tmp0, i32 2
+ %tmp346 = extractelement <4 x float> %tmp0, i32 3
%tmp347 = fmul float %tmp343, %tmp22
%tmp348 = fmul float %tmp344, %tmp23
%tmp349 = fmul float %tmp345, %tmp24
%tmp350 = fmul float %tmp346, %tmp25
- %tmp351 = fmul float %tmp347, %tmp179
- %tmp352 = fmul float %tmp348, %tmp180
- %tmp353 = fmul float %tmp349, %tmp181
- %tmp354 = fmul float %tmp350, %tmp182
+ %tmp351 = fmul float %tmp347, %p2.i96
+ %tmp352 = fmul float %tmp348, %p2.i90
+ %tmp353 = fmul float %tmp349, %p2.i84
+ %tmp354 = fmul float %tmp350, %p2.i78
%tmp355 = fsub float -0.000000e+00, %tmp346
%tmp356 = fadd float 1.000000e+00, %tmp355
%tmp357 = fmul float %tmp356, %tmp48
@@ -1049,8 +1318,9 @@ IF189: ; preds = %LOOP
%tmp360 = insertelement <2 x i32> undef, i32 %tmp358, i32 0
%tmp361 = insertelement <2 x i32> %tmp360, i32 %tmp359, i32 1
%tmp152.bc = bitcast <16 x i8> %tmp152 to <4 x i32>
- %tmp362 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp361, <8 x i32> %tmp150, <4 x i32> %tmp152.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp363 = extractelement <4 x float> %tmp362, i32 2
+ %a.bc.i3 = bitcast <2 x i32> %tmp361 to <2 x float>
+ %tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i3, <8 x i32> %tmp150, <4 x i32> %tmp152.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp363 = extractelement <4 x float> %tmp1, i32 2
%tmp364 = fmul float %result.i40, %result.i
%tmp365 = fmul float %result.i36, %result.i44
%tmp366 = fmul float %result.i32, %result.i42
@@ -1060,11 +1330,12 @@ IF189: ; preds = %LOOP
%tmp370 = insertelement <2 x i32> undef, i32 %tmp368, i32 0
%tmp371 = insertelement <2 x i32> %tmp370, i32 %tmp369, i32 1
%tmp140.bc = bitcast <16 x i8> %tmp140 to <4 x i32>
- %tmp372 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp371, <8 x i32> %tmp138, <4 x i32> %tmp140.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp373 = extractelement <4 x float> %tmp372, i32 0
- %tmp374 = extractelement <4 x float> %tmp372, i32 1
- %tmp375 = extractelement <4 x float> %tmp372, i32 2
- %tmp376 = extractelement <4 x float> %tmp372, i32 3
+ %a.bc.i2 = bitcast <2 x i32> %tmp371 to <2 x float>
+ %tmp2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i2, <8 x i32> %tmp138, <4 x i32> %tmp140.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp373 = extractelement <4 x float> %tmp2, i32 0
+ %tmp374 = extractelement <4 x float> %tmp2, i32 1
+ %tmp375 = extractelement <4 x float> %tmp2, i32 2
+ %tmp376 = extractelement <4 x float> %tmp2, i32 3
%tmp377 = fcmp olt float 0.000000e+00, %tmp375
%tmp378 = sext i1 %tmp377 to i32
%tmp379 = bitcast i32 %tmp378 to float
@@ -1077,11 +1348,12 @@ IF189: ; preds = %LOOP
%tmp384 = insertelement <2 x i32> undef, i32 %tmp382, i32 0
%tmp385 = insertelement <2 x i32> %tmp384, i32 %tmp383, i32 1
%tmp144.bc = bitcast <16 x i8> %tmp144 to <4 x i32>
- %tmp386 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp385, <8 x i32> %tmp142, <4 x i32> %tmp144.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tmp387 = extractelement <4 x float> %tmp386, i32 0
- %tmp388 = extractelement <4 x float> %tmp386, i32 1
- %tmp389 = extractelement <4 x float> %tmp386, i32 2
- %tmp390 = extractelement <4 x float> %tmp386, i32 3
+ %a.bc.i1 = bitcast <2 x i32> %tmp385 to <2 x float>
+ %tmp3 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i1, <8 x i32> %tmp142, <4 x i32> %tmp144.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp387 = extractelement <4 x float> %tmp3, i32 0
+ %tmp388 = extractelement <4 x float> %tmp3, i32 1
+ %tmp389 = extractelement <4 x float> %tmp3, i32 2
+ %tmp390 = extractelement <4 x float> %tmp3, i32 3
%tmp391 = fcmp olt float 0.000000e+00, %tmp389
%tmp392 = sext i1 %tmp391 to i32
%tmp393 = bitcast i32 %tmp392 to float
@@ -1107,8 +1379,8 @@ IF189: ; preds = %LOOP
%tmp411 = fmul float %tmp410, %tmp35
%tmp412 = fmul float %tmp409, %tmp363
%tmp413 = fmul float %tmp411, %tmp363
- %tmp414 = call float @fabs(float %tmp405)
- %tmp415 = call float @fabs(float %tmp407)
+ %tmp414 = call float @llvm.fabs.f32(float %tmp405)
+ %tmp415 = call float @llvm.fabs.f32(float %tmp407)
%tmp416 = fsub float -0.000000e+00, %tmp414
%tmp417 = fadd float 1.000000e+00, %tmp416
%tmp418 = fsub float -0.000000e+00, %tmp415
@@ -1122,26 +1394,27 @@ IF189: ; preds = %LOOP
%tmp426 = fadd float %tmp424, %tmp425
%tmp427 = fsub float -0.000000e+00, %tmp426
%tmp428 = fadd float 0x3FF00068E0000000, %tmp427
- %tmp429 = call float @llvm.AMDGPU.clamp.f32(float %tmp428, float 0.000000e+00, float 1.000000e+00)
- %tmp430 = call float @llvm.amdgcn.rsq.f32(float %tmp429)
- %tmp431 = fmul float %tmp430, %tmp429
- %tmp432 = fsub float -0.000000e+00, %tmp429
+ %max.0.i15 = call float @llvm.maxnum.f32(float %tmp428, float 0.000000e+00)
+ %clamp.i16 = call float @llvm.minnum.f32(float %max.0.i15, float 1.000000e+00)
+ %tmp430 = call float @llvm.amdgcn.rsq.f32(float %clamp.i16)
+ %tmp431 = fmul float %tmp430, %clamp.i16
+ %tmp432 = fsub float -0.000000e+00, %clamp.i16
%cmp = fcmp ogt float 0.000000e+00, %tmp432
%tmp433 = select i1 %cmp, float %tmp431, float 0.000000e+00
- %tmp434 = fmul float %tmp183, %tmp421
- %tmp435 = fmul float %tmp184, %tmp421
- %tmp436 = fmul float %tmp185, %tmp421
- %tmp437 = fmul float %tmp186, %tmp423
+ %tmp434 = fmul float %p2.i72, %tmp421
+ %tmp435 = fmul float %p2.i66, %tmp421
+ %tmp436 = fmul float %p2.i60, %tmp421
+ %tmp437 = fmul float %p2.i54, %tmp423
%tmp438 = fadd float %tmp437, %tmp434
- %tmp439 = fmul float %tmp187, %tmp423
+ %tmp439 = fmul float %p2.i48, %tmp423
%tmp440 = fadd float %tmp439, %tmp435
- %tmp441 = fmul float %tmp188, %tmp423
+ %tmp441 = fmul float %p2.i42, %tmp423
%tmp442 = fadd float %tmp441, %tmp436
- %tmp443 = fmul float %tmp189, %tmp433
+ %tmp443 = fmul float %p2.i36, %tmp433
%tmp444 = fadd float %tmp443, %tmp438
- %tmp445 = fmul float %tmp190, %tmp433
+ %tmp445 = fmul float %p2.i30, %tmp433
%tmp446 = fadd float %tmp445, %tmp440
- %tmp447 = fmul float %tmp191, %tmp433
+ %tmp447 = fmul float %p2.i24, %tmp433
%tmp448 = fadd float %tmp447, %tmp442
%tmp449 = fmul float %tmp444, %tmp444
%tmp450 = fmul float %tmp446, %tmp446
@@ -1174,7 +1447,8 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp468 = insertelement <2 x i32> undef, i32 %tmp466, i32 0
%tmp469 = insertelement <2 x i32> %tmp468, i32 %tmp467, i32 1
%tmp160.bc = bitcast <16 x i8> %tmp160 to <4 x i32>
- %tmp470 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp469, <8 x i32> %tmp158, <4 x i32> %tmp160.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp469.bc = bitcast <2 x i32> %tmp469 to <2 x float>
+ %tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp469.bc, <8 x i32> %tmp158, <4 x i32> %tmp160.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp471 = extractelement <4 x float> %tmp470, i32 0
%tmp472 = extractelement <4 x float> %tmp470, i32 1
%tmp473 = extractelement <4 x float> %tmp470, i32 2
@@ -1187,12 +1461,13 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp480 = fadd float %tmp479, %tmp40
%tmp481 = fmul float %tmp474, %tmp41
%tmp482 = fadd float %tmp481, %tmp42
- %tmp483 = bitcast float %tmp171 to i32
- %tmp484 = bitcast float %tmp172 to i32
+ %tmp483 = bitcast float %p2.i144 to i32
+ %tmp484 = bitcast float %p2.i138 to i32
%tmp485 = insertelement <2 x i32> undef, i32 %tmp483, i32 0
%tmp486 = insertelement <2 x i32> %tmp485, i32 %tmp484, i32 1
%tmp156.bc = bitcast <16 x i8> %tmp156 to <4 x i32>
- %tmp487 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> %tmp486, <8 x i32> %tmp154, <4 x i32> %tmp156.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp486.bc = bitcast <2 x i32> %tmp486 to <2 x float>
+ %tmp487 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp486.bc, <8 x i32> %tmp154, <4 x i32> %tmp156.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp488 = extractelement <4 x float> %tmp487, i32 0
%tmp489 = extractelement <4 x float> %tmp487, i32 1
%tmp490 = extractelement <4 x float> %tmp487, i32 2
@@ -1204,11 +1479,11 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp496 = fmul float %tmp489, %tmp494
%tmp497 = fmul float %tmp490, %tmp494
%tmp498 = fmul float %tmp27, %tmp495
- %tmp499 = fadd float %tmp498, %tmp192
+ %tmp499 = fadd float %tmp498, %p2.i18
%tmp500 = fmul float %tmp28, %tmp496
- %tmp501 = fadd float %tmp500, %tmp193
+ %tmp501 = fadd float %tmp500, %p2.i12
%tmp502 = fmul float %tmp29, %tmp497
- %tmp503 = fadd float %tmp502, %tmp194
+ %tmp503 = fadd float %tmp502, %p2.i6
%tmp504 = fmul float %tmp499, %tmp482
%tmp505 = fmul float %tmp501, %tmp482
%tmp506 = fmul float %tmp503, %tmp482
@@ -1242,18 +1517,19 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp534 = fadd float %tmp533, %tmp532
%tmp535 = fmul float %temp14.0, %tmp531
%tmp536 = fadd float %tmp534, %tmp535
- %tmp537 = call float @llvm.AMDGPU.clamp.f32(float %tmp536, float 0.000000e+00, float 1.000000e+00)
- %tmp538 = fmul float %tmp364, %tmp537
- %tmp539 = fmul float %tmp365, %tmp537
- %tmp540 = fmul float %tmp366, %tmp537
+ %max.0.i13 = call float @llvm.maxnum.f32(float %tmp536, float 0.000000e+00)
+ %clamp.i14 = call float @llvm.minnum.f32(float %max.0.i13, float 1.000000e+00)
+ %tmp538 = fmul float %tmp364, %clamp.i14
+ %tmp539 = fmul float %tmp365, %clamp.i14
+ %tmp540 = fmul float %tmp366, %clamp.i14
%tmp541 = fmul float %tmp538, %tmp68
%tmp542 = fmul float %tmp539, %tmp69
%tmp543 = fmul float %tmp540, %tmp70
- %tmp544 = fsub float -0.000000e+00, %tmp163
+ %tmp544 = fsub float -0.000000e+00, %p2.i
%tmp545 = fadd float %tmp96, %tmp544
- %tmp546 = fsub float -0.000000e+00, %tmp164
+ %tmp546 = fsub float -0.000000e+00, %p2.i186
%tmp547 = fadd float %tmp97, %tmp546
- %tmp548 = fsub float -0.000000e+00, %tmp165
+ %tmp548 = fsub float -0.000000e+00, %p2.i180
%tmp549 = fadd float %tmp98, %tmp548
%tmp550 = fmul float %tmp545, %tmp545
%tmp551 = fmul float %tmp547, %tmp547
@@ -1339,31 +1615,31 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%temp69.0 = phi float [ %tmp112, %ENDIF200 ], [ %.231, %ELSE214 ], [ %tmp108, %ELSE211 ]
%temp70.0 = phi float [ %tmp113, %ENDIF200 ], [ %.232, %ELSE214 ], [ %tmp109, %ELSE211 ]
%temp71.0 = phi float [ %tmp114, %ENDIF200 ], [ %.233, %ELSE214 ], [ %tmp110, %ELSE211 ]
- %tmp602 = fmul float %tmp163, %tmp84
- %tmp603 = fmul float %tmp164, %tmp85
+ %tmp602 = fmul float %p2.i, %tmp84
+ %tmp603 = fmul float %p2.i186, %tmp85
%tmp604 = fadd float %tmp602, %tmp603
- %tmp605 = fmul float %tmp165, %tmp86
+ %tmp605 = fmul float %p2.i180, %tmp86
%tmp606 = fadd float %tmp604, %tmp605
- %tmp607 = fmul float %tmp166, %tmp87
+ %tmp607 = fmul float %p2.i174, %tmp87
%tmp608 = fadd float %tmp606, %tmp607
- %tmp609 = fmul float %tmp163, %tmp88
- %tmp610 = fmul float %tmp164, %tmp89
+ %tmp609 = fmul float %p2.i, %tmp88
+ %tmp610 = fmul float %p2.i186, %tmp89
%tmp611 = fadd float %tmp609, %tmp610
- %tmp612 = fmul float %tmp165, %tmp90
+ %tmp612 = fmul float %p2.i180, %tmp90
%tmp613 = fadd float %tmp611, %tmp612
- %tmp614 = fmul float %tmp166, %tmp91
+ %tmp614 = fmul float %p2.i174, %tmp91
%tmp615 = fadd float %tmp613, %tmp614
- %tmp616 = fmul float %tmp163, %tmp92
- %tmp617 = fmul float %tmp164, %tmp93
+ %tmp616 = fmul float %p2.i, %tmp92
+ %tmp617 = fmul float %p2.i186, %tmp93
%tmp618 = fadd float %tmp616, %tmp617
- %tmp619 = fmul float %tmp165, %tmp94
+ %tmp619 = fmul float %p2.i180, %tmp94
%tmp620 = fadd float %tmp618, %tmp619
- %tmp621 = fmul float %tmp166, %tmp95
+ %tmp621 = fmul float %p2.i174, %tmp95
%tmp622 = fadd float %tmp620, %tmp621
%tmp623 = fsub float -0.000000e+00, %tmp77
%tmp624 = fadd float 1.000000e+00, %tmp623
- %tmp625 = call float @fabs(float %tmp608)
- %tmp626 = call float @fabs(float %tmp615)
+ %tmp625 = call float @llvm.fabs.f32(float %tmp608)
+ %tmp626 = call float @llvm.fabs.f32(float %tmp615)
%tmp627 = fcmp oge float %tmp624, %tmp625
%tmp628 = sext i1 %tmp627 to i32
%tmp629 = bitcast i32 %tmp628 to float
@@ -1389,7 +1665,8 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp649 = fadd float %temp80.0, -1.000000e+00
%tmp650 = fmul float %tmp649, %tmp76
%tmp651 = fadd float %tmp650, 1.000000e+00
- %tmp652 = call float @llvm.AMDGPU.clamp.f32(float %tmp651, float 0.000000e+00, float 1.000000e+00)
+ %max.0.i11 = call float @llvm.maxnum.f32(float %tmp651, float 0.000000e+00)
+ %clamp.i12 = call float @llvm.minnum.f32(float %max.0.i11, float 1.000000e+00)
%tmp653 = bitcast float %tmp642 to i32
%tmp654 = bitcast float %tmp644 to i32
%tmp655 = bitcast float 0.000000e+00 to i32
@@ -1398,7 +1675,8 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp658 = insertelement <4 x i32> %tmp657, i32 %tmp655, i32 2
%tmp659 = insertelement <4 x i32> %tmp658, i32 undef, i32 3
%tmp128.bc = bitcast <16 x i8> %tmp128 to <4 x i32>
- %tmp660 = call <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32> %tmp659, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp659.bc = bitcast <4 x i32> %tmp659 to <4 x float>
+ %tmp660 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp659.bc, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp661 = extractelement <4 x float> %tmp660, i32 0
%tmp662 = extractelement <4 x float> %tmp660, i32 1
%tmp663 = bitcast float %tmp646 to i32
@@ -1408,7 +1686,8 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp667 = insertelement <4 x i32> %tmp666, i32 %tmp664, i32 1
%tmp668 = insertelement <4 x i32> %tmp667, i32 %tmp665, i32 2
%tmp669 = insertelement <4 x i32> %tmp668, i32 undef, i32 3
- %tmp670 = call <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32> %tmp669, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp669.bc = bitcast <4 x i32> %tmp669 to <4 x float>
+ %tmp670 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp669.bc, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp671 = extractelement <4 x float> %tmp670, i32 0
%tmp672 = extractelement <4 x float> %tmp670, i32 1
%tmp673 = fsub float -0.000000e+00, %tmp662
@@ -1425,11 +1704,13 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp684 = fadd float %tmp683, %temp89.0
%tmp685 = fmul float %tmp640, %temp90.0
%tmp686 = fadd float %tmp685, %temp91.0
- %tmp687 = call float @llvm.AMDGPU.clamp.f32(float %tmp684, float 0.000000e+00, float 1.000000e+00)
- %tmp688 = call float @llvm.AMDGPU.clamp.f32(float %tmp686, float 0.000000e+00, float 1.000000e+00)
- %tmp689 = fsub float -0.000000e+00, %tmp687
+ %max.0.i9 = call float @llvm.maxnum.f32(float %tmp684, float 0.000000e+00)
+ %clamp.i10 = call float @llvm.minnum.f32(float %max.0.i9, float 1.000000e+00)
+ %max.0.i7 = call float @llvm.maxnum.f32(float %tmp686, float 0.000000e+00)
+ %clamp.i8 = call float @llvm.minnum.f32(float %max.0.i7, float 1.000000e+00)
+ %tmp689 = fsub float -0.000000e+00, %clamp.i10
%tmp690 = fadd float %tmp661, %tmp689
- %tmp691 = fsub float -0.000000e+00, %tmp688
+ %tmp691 = fsub float -0.000000e+00, %clamp.i8
%tmp692 = fadd float %tmp671, %tmp691
%tmp693 = fmul float %tmp661, %tmp661
%tmp694 = fmul float %tmp671, %tmp671
@@ -1461,16 +1742,17 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp719 = bitcast float %tmp718 to i32
%tmp720 = icmp ne i32 %tmp719, 0
%temp28.0 = select i1 %tmp720, float 1.000000e+00, float %tmp710
- %one.sub.a.i25 = fsub float 1.000000e+00, %tmp652
+ %one.sub.a.i25 = fsub float 1.000000e+00, %clamp.i12
%one.sub.ac.i26 = fmul float %one.sub.a.i25, %.229
%mul.i27 = fmul float %temp28.0, %.229
%result.i28 = fadd float %mul.i27, %one.sub.ac.i26
%tmp721 = call float @llvm.pow.f32(float %result.i28, float %tmp75)
%tmp722 = fmul float %tmp721, %tmp78
%tmp723 = fadd float %tmp722, %tmp79
- %tmp724 = call float @llvm.AMDGPU.clamp.f32(float %tmp723, float 0.000000e+00, float 1.000000e+00)
- %tmp725 = fmul float %tmp724, %tmp724
- %tmp726 = fmul float 2.000000e+00, %tmp724
+ %max.0.i5 = call float @llvm.maxnum.f32(float %tmp723, float 0.000000e+00)
+ %clamp.i6 = call float @llvm.minnum.f32(float %max.0.i5, float 1.000000e+00)
+ %tmp725 = fmul float %clamp.i6, %clamp.i6
+ %tmp726 = fmul float 2.000000e+00, %clamp.i6
%tmp727 = fsub float -0.000000e+00, %tmp726
%tmp728 = fadd float 3.000000e+00, %tmp727
%tmp729 = fmul float %tmp725, %tmp728
@@ -1504,12 +1786,13 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp747 = fadd float %tmp746, %tmp745
%tmp748 = fmul float %temp14.0, %tmp217
%tmp749 = fadd float %tmp747, %tmp748
- %tmp750 = call float @fabs(float %tmp749)
+ %tmp750 = call float @llvm.fabs.f32(float %tmp749)
%tmp751 = fmul float %tmp750, %tmp750
%tmp752 = fmul float %tmp751, %tmp49
%tmp753 = fadd float %tmp752, %tmp50
- %tmp754 = call float @llvm.AMDGPU.clamp.f32(float %tmp753, float 0.000000e+00, float 1.000000e+00)
- %tmp755 = fsub float -0.000000e+00, %tmp754
+ %max.0.i3 = call float @llvm.maxnum.f32(float %tmp753, float 0.000000e+00)
+ %clamp.i4 = call float @llvm.minnum.f32(float %max.0.i3, float 1.000000e+00)
+ %tmp755 = fsub float -0.000000e+00, %clamp.i4
%tmp756 = fadd float 1.000000e+00, %tmp755
%tmp757 = fmul float %tmp32, %tmp756
%tmp758 = fmul float %tmp32, %tmp756
@@ -1545,12 +1828,11 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp772 = select i1 %tmp771, float 6.550400e+04, float %tmp766
%tmp773 = fmul float %result.i2, %tmp51
%tmp774 = fadd float %tmp773, %tmp52
- %tmp775 = call float @llvm.AMDGPU.clamp.f32(float %tmp774, float 0.000000e+00, float 1.000000e+00)
- %tmp776 = call i32 @llvm.SI.packf16(float %tmp768, float %tmp770)
- %tmp777 = bitcast i32 %tmp776 to float
- %tmp778 = call i32 @llvm.SI.packf16(float %tmp772, float %tmp775)
- %tmp779 = bitcast i32 %tmp778 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp777, float %tmp779, float %tmp777, float %tmp779)
+ %max.0.i1 = call float @llvm.maxnum.f32(float %tmp774, float 0.000000e+00)
+ %clamp.i2 = call float @llvm.minnum.f32(float %max.0.i1, float 1.000000e+00)
+ %tmp776 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp768, float %tmp770)
+ %tmp778 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp772, float %clamp.i2)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp776, <2 x half> %tmp778, i1 true, i1 true) #0
ret void
ELSE214: ; preds = %ELSE211
@@ -1566,57 +1848,32 @@ ELSE214: ; preds = %ELSE211
br label %ENDIF209
}
-; Function Attrs: readnone
-declare float @llvm.AMDGPU.clamp.f32(float, float, float) #1
-
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #2
-
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #2
-
-
-declare float @llvm.exp2.f32(float) #2
-
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #2
-
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #2
-
+declare float @llvm.exp2.f32(float) #1
+declare float @llvm.ceil.f32(float) #1
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.pow.f32(float, float) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare float @llvm.amdgcn.rsq.f32(float) #1
+declare float @llvm.amdgcn.cubeid(float, float, float) #1
+declare float @llvm.amdgcn.cubesc(float, float, float) #1
+declare float @llvm.amdgcn.cubetc(float, float, float) #1
+declare float @llvm.amdgcn.cubema(float, float, float) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #1
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-; Function Attrs: nounwind readonly
-declare float @ceil(float) #3
-
-; Function Attrs: nounwind readnone
-declare float @llvm.amdgcn.rsq.f32(float) #2
-
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.image.sample.d.v8i32(<8 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #2
-
-; Function Attrs: readnone
-declare <4 x float> @llvm.AMDGPU.cube(<4 x float>) #1
-
-; Function Attrs: readnone
-declare float @fabs(float) #1
-
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #2
-
-
-; Function Attrs: nounwind readnone
-declare float @llvm.pow.f32(float, float) #2
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.SI.packf16(float, float) #2
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-attributes #1 = { readnone }
-attributes #2 = { nounwind readnone }
-attributes #3 = { nounwind readonly }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
!1 = !{!"const", !2}
diff --git a/test/CodeGen/AMDGPU/si-spill-cf.ll b/test/CodeGen/AMDGPU/si-spill-cf.ll
index 06f9277080a8..926702645d9e 100644
--- a/test/CodeGen/AMDGPU/si-spill-cf.ll
+++ b/test/CodeGen/AMDGPU/si-spill-cf.ll
@@ -6,270 +6,271 @@
; SI: s_or_b64 exec, exec, [[SAVED:s\[[0-9]+:[0-9]+\]|[a-z]+]]
; SI-NOT: v_readlane_b32 [[SAVED]]
+
define amdgpu_ps void @main() #0 {
main_body:
- %0 = call float @llvm.SI.load.const(<16 x i8> undef, i32 16)
- %1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 32)
- %2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 80)
- %3 = call float @llvm.SI.load.const(<16 x i8> undef, i32 84)
- %4 = call float @llvm.SI.load.const(<16 x i8> undef, i32 88)
- %5 = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
- %6 = call float @llvm.SI.load.const(<16 x i8> undef, i32 100)
- %7 = call float @llvm.SI.load.const(<16 x i8> undef, i32 104)
- %8 = call float @llvm.SI.load.const(<16 x i8> undef, i32 112)
- %9 = call float @llvm.SI.load.const(<16 x i8> undef, i32 116)
- %10 = call float @llvm.SI.load.const(<16 x i8> undef, i32 120)
- %11 = call float @llvm.SI.load.const(<16 x i8> undef, i32 128)
- %12 = call float @llvm.SI.load.const(<16 x i8> undef, i32 132)
- %13 = call float @llvm.SI.load.const(<16 x i8> undef, i32 136)
- %14 = call float @llvm.SI.load.const(<16 x i8> undef, i32 144)
- %15 = call float @llvm.SI.load.const(<16 x i8> undef, i32 148)
- %16 = call float @llvm.SI.load.const(<16 x i8> undef, i32 152)
- %17 = call float @llvm.SI.load.const(<16 x i8> undef, i32 160)
- %18 = call float @llvm.SI.load.const(<16 x i8> undef, i32 164)
- %19 = call float @llvm.SI.load.const(<16 x i8> undef, i32 168)
- %20 = call float @llvm.SI.load.const(<16 x i8> undef, i32 176)
- %21 = call float @llvm.SI.load.const(<16 x i8> undef, i32 180)
- %22 = call float @llvm.SI.load.const(<16 x i8> undef, i32 184)
- %23 = call float @llvm.SI.load.const(<16 x i8> undef, i32 192)
- %24 = call float @llvm.SI.load.const(<16 x i8> undef, i32 196)
- %25 = call float @llvm.SI.load.const(<16 x i8> undef, i32 200)
- %26 = call float @llvm.SI.load.const(<16 x i8> undef, i32 208)
- %27 = call float @llvm.SI.load.const(<16 x i8> undef, i32 212)
- %28 = call float @llvm.SI.load.const(<16 x i8> undef, i32 216)
- %29 = call float @llvm.SI.load.const(<16 x i8> undef, i32 224)
- %30 = call float @llvm.SI.load.const(<16 x i8> undef, i32 228)
- %31 = call float @llvm.SI.load.const(<16 x i8> undef, i32 232)
- %32 = call float @llvm.SI.load.const(<16 x i8> undef, i32 240)
- %33 = call float @llvm.SI.load.const(<16 x i8> undef, i32 244)
- %34 = call float @llvm.SI.load.const(<16 x i8> undef, i32 248)
- %35 = call float @llvm.SI.load.const(<16 x i8> undef, i32 256)
- %36 = call float @llvm.SI.load.const(<16 x i8> undef, i32 260)
- %37 = call float @llvm.SI.load.const(<16 x i8> undef, i32 264)
- %38 = call float @llvm.SI.load.const(<16 x i8> undef, i32 272)
- %39 = call float @llvm.SI.load.const(<16 x i8> undef, i32 276)
- %40 = call float @llvm.SI.load.const(<16 x i8> undef, i32 280)
- %41 = call float @llvm.SI.load.const(<16 x i8> undef, i32 288)
- %42 = call float @llvm.SI.load.const(<16 x i8> undef, i32 292)
- %43 = call float @llvm.SI.load.const(<16 x i8> undef, i32 296)
- %44 = call float @llvm.SI.load.const(<16 x i8> undef, i32 304)
- %45 = call float @llvm.SI.load.const(<16 x i8> undef, i32 308)
- %46 = call float @llvm.SI.load.const(<16 x i8> undef, i32 312)
- %47 = call float @llvm.SI.load.const(<16 x i8> undef, i32 320)
- %48 = call float @llvm.SI.load.const(<16 x i8> undef, i32 324)
- %49 = call float @llvm.SI.load.const(<16 x i8> undef, i32 328)
- %50 = call float @llvm.SI.load.const(<16 x i8> undef, i32 336)
- %51 = call float @llvm.SI.load.const(<16 x i8> undef, i32 340)
- %52 = call float @llvm.SI.load.const(<16 x i8> undef, i32 344)
- %53 = call float @llvm.SI.load.const(<16 x i8> undef, i32 352)
- %54 = call float @llvm.SI.load.const(<16 x i8> undef, i32 356)
- %55 = call float @llvm.SI.load.const(<16 x i8> undef, i32 360)
- %56 = call float @llvm.SI.load.const(<16 x i8> undef, i32 368)
- %57 = call float @llvm.SI.load.const(<16 x i8> undef, i32 372)
- %58 = call float @llvm.SI.load.const(<16 x i8> undef, i32 376)
- %59 = call float @llvm.SI.load.const(<16 x i8> undef, i32 384)
- %60 = call float @llvm.SI.load.const(<16 x i8> undef, i32 388)
- %61 = call float @llvm.SI.load.const(<16 x i8> undef, i32 392)
- %62 = call float @llvm.SI.load.const(<16 x i8> undef, i32 400)
- %63 = call float @llvm.SI.load.const(<16 x i8> undef, i32 404)
- %64 = call float @llvm.SI.load.const(<16 x i8> undef, i32 408)
- %65 = call float @llvm.SI.load.const(<16 x i8> undef, i32 416)
- %66 = call float @llvm.SI.load.const(<16 x i8> undef, i32 420)
+ %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 16)
+ %tmp1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 32)
+ %tmp2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 80)
+ %tmp3 = call float @llvm.SI.load.const(<16 x i8> undef, i32 84)
+ %tmp4 = call float @llvm.SI.load.const(<16 x i8> undef, i32 88)
+ %tmp5 = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
+ %tmp6 = call float @llvm.SI.load.const(<16 x i8> undef, i32 100)
+ %tmp7 = call float @llvm.SI.load.const(<16 x i8> undef, i32 104)
+ %tmp8 = call float @llvm.SI.load.const(<16 x i8> undef, i32 112)
+ %tmp9 = call float @llvm.SI.load.const(<16 x i8> undef, i32 116)
+ %tmp10 = call float @llvm.SI.load.const(<16 x i8> undef, i32 120)
+ %tmp11 = call float @llvm.SI.load.const(<16 x i8> undef, i32 128)
+ %tmp12 = call float @llvm.SI.load.const(<16 x i8> undef, i32 132)
+ %tmp13 = call float @llvm.SI.load.const(<16 x i8> undef, i32 136)
+ %tmp14 = call float @llvm.SI.load.const(<16 x i8> undef, i32 144)
+ %tmp15 = call float @llvm.SI.load.const(<16 x i8> undef, i32 148)
+ %tmp16 = call float @llvm.SI.load.const(<16 x i8> undef, i32 152)
+ %tmp17 = call float @llvm.SI.load.const(<16 x i8> undef, i32 160)
+ %tmp18 = call float @llvm.SI.load.const(<16 x i8> undef, i32 164)
+ %tmp19 = call float @llvm.SI.load.const(<16 x i8> undef, i32 168)
+ %tmp20 = call float @llvm.SI.load.const(<16 x i8> undef, i32 176)
+ %tmp21 = call float @llvm.SI.load.const(<16 x i8> undef, i32 180)
+ %tmp22 = call float @llvm.SI.load.const(<16 x i8> undef, i32 184)
+ %tmp23 = call float @llvm.SI.load.const(<16 x i8> undef, i32 192)
+ %tmp24 = call float @llvm.SI.load.const(<16 x i8> undef, i32 196)
+ %tmp25 = call float @llvm.SI.load.const(<16 x i8> undef, i32 200)
+ %tmp26 = call float @llvm.SI.load.const(<16 x i8> undef, i32 208)
+ %tmp27 = call float @llvm.SI.load.const(<16 x i8> undef, i32 212)
+ %tmp28 = call float @llvm.SI.load.const(<16 x i8> undef, i32 216)
+ %tmp29 = call float @llvm.SI.load.const(<16 x i8> undef, i32 224)
+ %tmp30 = call float @llvm.SI.load.const(<16 x i8> undef, i32 228)
+ %tmp31 = call float @llvm.SI.load.const(<16 x i8> undef, i32 232)
+ %tmp32 = call float @llvm.SI.load.const(<16 x i8> undef, i32 240)
+ %tmp33 = call float @llvm.SI.load.const(<16 x i8> undef, i32 244)
+ %tmp34 = call float @llvm.SI.load.const(<16 x i8> undef, i32 248)
+ %tmp35 = call float @llvm.SI.load.const(<16 x i8> undef, i32 256)
+ %tmp36 = call float @llvm.SI.load.const(<16 x i8> undef, i32 260)
+ %tmp37 = call float @llvm.SI.load.const(<16 x i8> undef, i32 264)
+ %tmp38 = call float @llvm.SI.load.const(<16 x i8> undef, i32 272)
+ %tmp39 = call float @llvm.SI.load.const(<16 x i8> undef, i32 276)
+ %tmp40 = call float @llvm.SI.load.const(<16 x i8> undef, i32 280)
+ %tmp41 = call float @llvm.SI.load.const(<16 x i8> undef, i32 288)
+ %tmp42 = call float @llvm.SI.load.const(<16 x i8> undef, i32 292)
+ %tmp43 = call float @llvm.SI.load.const(<16 x i8> undef, i32 296)
+ %tmp44 = call float @llvm.SI.load.const(<16 x i8> undef, i32 304)
+ %tmp45 = call float @llvm.SI.load.const(<16 x i8> undef, i32 308)
+ %tmp46 = call float @llvm.SI.load.const(<16 x i8> undef, i32 312)
+ %tmp47 = call float @llvm.SI.load.const(<16 x i8> undef, i32 320)
+ %tmp48 = call float @llvm.SI.load.const(<16 x i8> undef, i32 324)
+ %tmp49 = call float @llvm.SI.load.const(<16 x i8> undef, i32 328)
+ %tmp50 = call float @llvm.SI.load.const(<16 x i8> undef, i32 336)
+ %tmp51 = call float @llvm.SI.load.const(<16 x i8> undef, i32 340)
+ %tmp52 = call float @llvm.SI.load.const(<16 x i8> undef, i32 344)
+ %tmp53 = call float @llvm.SI.load.const(<16 x i8> undef, i32 352)
+ %tmp54 = call float @llvm.SI.load.const(<16 x i8> undef, i32 356)
+ %tmp55 = call float @llvm.SI.load.const(<16 x i8> undef, i32 360)
+ %tmp56 = call float @llvm.SI.load.const(<16 x i8> undef, i32 368)
+ %tmp57 = call float @llvm.SI.load.const(<16 x i8> undef, i32 372)
+ %tmp58 = call float @llvm.SI.load.const(<16 x i8> undef, i32 376)
+ %tmp59 = call float @llvm.SI.load.const(<16 x i8> undef, i32 384)
+ %tmp60 = call float @llvm.SI.load.const(<16 x i8> undef, i32 388)
+ %tmp61 = call float @llvm.SI.load.const(<16 x i8> undef, i32 392)
+ %tmp62 = call float @llvm.SI.load.const(<16 x i8> undef, i32 400)
+ %tmp63 = call float @llvm.SI.load.const(<16 x i8> undef, i32 404)
+ %tmp64 = call float @llvm.SI.load.const(<16 x i8> undef, i32 408)
+ %tmp65 = call float @llvm.SI.load.const(<16 x i8> undef, i32 416)
+ %tmp66 = call float @llvm.SI.load.const(<16 x i8> undef, i32 420)
br label %LOOP
LOOP: ; preds = %ENDIF2795, %main_body
%temp894.0 = phi float [ 0.000000e+00, %main_body ], [ %temp894.1, %ENDIF2795 ]
%temp18.0 = phi float [ undef, %main_body ], [ %temp18.1, %ENDIF2795 ]
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
- %67 = icmp sgt i32 %tid, 4
- br i1 %67, label %ENDLOOP, label %ENDIF
+ %tmp67 = icmp sgt i32 %tid, 4
+ br i1 %tmp67, label %ENDLOOP, label %ENDIF
ENDLOOP: ; preds = %ELSE2566, %LOOP
- %one.sub.a.i = fsub float 1.000000e+00, %0
+ %one.sub.a.i = fsub float 1.000000e+00, %tmp
%one.sub.ac.i = fmul float %one.sub.a.i, undef
%result.i = fadd float fmul (float undef, float undef), %one.sub.ac.i
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float undef, float %result.i, float undef, float 1.000000e+00)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float undef, float %result.i, float undef, float 1.000000e+00, i1 true, i1 true) #0
ret void
ENDIF: ; preds = %LOOP
- %68 = fsub float %2, undef
- %69 = fsub float %3, undef
- %70 = fsub float %4, undef
- %71 = fmul float %68, 0.000000e+00
- %72 = fmul float %69, undef
- %73 = fmul float %70, undef
- %74 = fsub float %6, undef
- %75 = fsub float %7, undef
- %76 = fmul float %74, undef
- %77 = fmul float %75, 0.000000e+00
- %78 = call float @llvm.minnum.f32(float %73, float %77)
- %79 = call float @llvm.maxnum.f32(float %71, float 0.000000e+00)
- %80 = call float @llvm.maxnum.f32(float %72, float %76)
- %81 = call float @llvm.maxnum.f32(float undef, float %78)
- %82 = call float @llvm.minnum.f32(float %79, float %80)
- %83 = call float @llvm.minnum.f32(float %82, float undef)
- %84 = fsub float %14, undef
- %85 = fsub float %15, undef
- %86 = fsub float %16, undef
- %87 = fmul float %84, undef
- %88 = fmul float %85, undef
- %89 = fmul float %86, undef
- %90 = fsub float %17, undef
- %91 = fsub float %18, undef
- %92 = fsub float %19, undef
- %93 = fmul float %90, 0.000000e+00
- %94 = fmul float %91, undef
- %95 = fmul float %92, undef
- %96 = call float @llvm.minnum.f32(float %88, float %94)
- %97 = call float @llvm.maxnum.f32(float %87, float %93)
- %98 = call float @llvm.maxnum.f32(float %89, float %95)
- %99 = call float @llvm.maxnum.f32(float undef, float %96)
- %100 = call float @llvm.maxnum.f32(float %99, float undef)
- %101 = call float @llvm.minnum.f32(float %97, float undef)
- %102 = call float @llvm.minnum.f32(float %101, float %98)
- %103 = fsub float %30, undef
- %104 = fsub float %31, undef
- %105 = fmul float %103, 0.000000e+00
- %106 = fmul float %104, 0.000000e+00
- %107 = call float @llvm.minnum.f32(float undef, float %105)
- %108 = call float @llvm.maxnum.f32(float undef, float %106)
- %109 = call float @llvm.maxnum.f32(float undef, float %107)
- %110 = call float @llvm.maxnum.f32(float %109, float undef)
- %111 = call float @llvm.minnum.f32(float undef, float %108)
- %112 = fsub float %32, undef
- %113 = fsub float %33, undef
- %114 = fsub float %34, undef
- %115 = fmul float %112, 0.000000e+00
- %116 = fmul float %113, undef
- %117 = fmul float %114, undef
- %118 = fsub float %35, undef
- %119 = fsub float %36, undef
- %120 = fsub float %37, undef
- %121 = fmul float %118, undef
- %122 = fmul float %119, undef
- %123 = fmul float %120, undef
- %124 = call float @llvm.minnum.f32(float %115, float %121)
- %125 = call float @llvm.minnum.f32(float %116, float %122)
- %126 = call float @llvm.minnum.f32(float %117, float %123)
- %127 = call float @llvm.maxnum.f32(float %124, float %125)
- %128 = call float @llvm.maxnum.f32(float %127, float %126)
- %129 = fsub float %38, undef
- %130 = fsub float %39, undef
- %131 = fsub float %40, undef
- %132 = fmul float %129, 0.000000e+00
- %133 = fmul float %130, undef
- %134 = fmul float %131, undef
- %135 = fsub float %41, undef
- %136 = fsub float %42, undef
- %137 = fsub float %43, undef
- %138 = fmul float %135, undef
- %139 = fmul float %136, undef
- %140 = fmul float %137, undef
- %141 = call float @llvm.minnum.f32(float %132, float %138)
- %142 = call float @llvm.minnum.f32(float %133, float %139)
- %143 = call float @llvm.minnum.f32(float %134, float %140)
- %144 = call float @llvm.maxnum.f32(float %141, float %142)
- %145 = call float @llvm.maxnum.f32(float %144, float %143)
- %146 = fsub float %44, undef
- %147 = fsub float %45, undef
- %148 = fsub float %46, undef
- %149 = fmul float %146, 0.000000e+00
- %150 = fmul float %147, 0.000000e+00
- %151 = fmul float %148, undef
- %152 = fsub float %47, undef
- %153 = fsub float %48, undef
- %154 = fsub float %49, undef
- %155 = fmul float %152, undef
- %156 = fmul float %153, 0.000000e+00
- %157 = fmul float %154, undef
- %158 = call float @llvm.minnum.f32(float %149, float %155)
- %159 = call float @llvm.minnum.f32(float %150, float %156)
- %160 = call float @llvm.minnum.f32(float %151, float %157)
- %161 = call float @llvm.maxnum.f32(float %158, float %159)
- %162 = call float @llvm.maxnum.f32(float %161, float %160)
- %163 = fsub float %50, undef
- %164 = fsub float %51, undef
- %165 = fsub float %52, undef
- %166 = fmul float %163, undef
- %167 = fmul float %164, 0.000000e+00
- %168 = fmul float %165, 0.000000e+00
- %169 = fsub float %53, undef
- %170 = fsub float %54, undef
- %171 = fsub float %55, undef
- %172 = fdiv float 1.000000e+00, %temp18.0
- %173 = fmul float %169, undef
- %174 = fmul float %170, undef
- %175 = fmul float %171, %172
- %176 = call float @llvm.minnum.f32(float %166, float %173)
- %177 = call float @llvm.minnum.f32(float %167, float %174)
- %178 = call float @llvm.minnum.f32(float %168, float %175)
- %179 = call float @llvm.maxnum.f32(float %176, float %177)
- %180 = call float @llvm.maxnum.f32(float %179, float %178)
- %181 = fsub float %62, undef
- %182 = fsub float %63, undef
- %183 = fsub float %64, undef
- %184 = fmul float %181, 0.000000e+00
- %185 = fmul float %182, undef
- %186 = fmul float %183, undef
- %187 = fsub float %65, undef
- %188 = fsub float %66, undef
- %189 = fmul float %187, undef
- %190 = fmul float %188, undef
- %191 = call float @llvm.maxnum.f32(float %184, float %189)
- %192 = call float @llvm.maxnum.f32(float %185, float %190)
- %193 = call float @llvm.maxnum.f32(float %186, float undef)
- %194 = call float @llvm.minnum.f32(float %191, float %192)
- %195 = call float @llvm.minnum.f32(float %194, float %193)
- %.temp292.7 = select i1 undef, float %162, float undef
- %temp292.9 = select i1 false, float %180, float %.temp292.7
+ %tmp68 = fsub float %tmp2, undef
+ %tmp69 = fsub float %tmp3, undef
+ %tmp70 = fsub float %tmp4, undef
+ %tmp71 = fmul float %tmp68, 0.000000e+00
+ %tmp72 = fmul float %tmp69, undef
+ %tmp73 = fmul float %tmp70, undef
+ %tmp74 = fsub float %tmp6, undef
+ %tmp75 = fsub float %tmp7, undef
+ %tmp76 = fmul float %tmp74, undef
+ %tmp77 = fmul float %tmp75, 0.000000e+00
+ %tmp78 = call float @llvm.minnum.f32(float %tmp73, float %tmp77)
+ %tmp79 = call float @llvm.maxnum.f32(float %tmp71, float 0.000000e+00)
+ %tmp80 = call float @llvm.maxnum.f32(float %tmp72, float %tmp76)
+ %tmp81 = call float @llvm.maxnum.f32(float undef, float %tmp78)
+ %tmp82 = call float @llvm.minnum.f32(float %tmp79, float %tmp80)
+ %tmp83 = call float @llvm.minnum.f32(float %tmp82, float undef)
+ %tmp84 = fsub float %tmp14, undef
+ %tmp85 = fsub float %tmp15, undef
+ %tmp86 = fsub float %tmp16, undef
+ %tmp87 = fmul float %tmp84, undef
+ %tmp88 = fmul float %tmp85, undef
+ %tmp89 = fmul float %tmp86, undef
+ %tmp90 = fsub float %tmp17, undef
+ %tmp91 = fsub float %tmp18, undef
+ %tmp92 = fsub float %tmp19, undef
+ %tmp93 = fmul float %tmp90, 0.000000e+00
+ %tmp94 = fmul float %tmp91, undef
+ %tmp95 = fmul float %tmp92, undef
+ %tmp96 = call float @llvm.minnum.f32(float %tmp88, float %tmp94)
+ %tmp97 = call float @llvm.maxnum.f32(float %tmp87, float %tmp93)
+ %tmp98 = call float @llvm.maxnum.f32(float %tmp89, float %tmp95)
+ %tmp99 = call float @llvm.maxnum.f32(float undef, float %tmp96)
+ %tmp100 = call float @llvm.maxnum.f32(float %tmp99, float undef)
+ %tmp101 = call float @llvm.minnum.f32(float %tmp97, float undef)
+ %tmp102 = call float @llvm.minnum.f32(float %tmp101, float %tmp98)
+ %tmp103 = fsub float %tmp30, undef
+ %tmp104 = fsub float %tmp31, undef
+ %tmp105 = fmul float %tmp103, 0.000000e+00
+ %tmp106 = fmul float %tmp104, 0.000000e+00
+ %tmp107 = call float @llvm.minnum.f32(float undef, float %tmp105)
+ %tmp108 = call float @llvm.maxnum.f32(float undef, float %tmp106)
+ %tmp109 = call float @llvm.maxnum.f32(float undef, float %tmp107)
+ %tmp110 = call float @llvm.maxnum.f32(float %tmp109, float undef)
+ %tmp111 = call float @llvm.minnum.f32(float undef, float %tmp108)
+ %tmp112 = fsub float %tmp32, undef
+ %tmp113 = fsub float %tmp33, undef
+ %tmp114 = fsub float %tmp34, undef
+ %tmp115 = fmul float %tmp112, 0.000000e+00
+ %tmp116 = fmul float %tmp113, undef
+ %tmp117 = fmul float %tmp114, undef
+ %tmp118 = fsub float %tmp35, undef
+ %tmp119 = fsub float %tmp36, undef
+ %tmp120 = fsub float %tmp37, undef
+ %tmp121 = fmul float %tmp118, undef
+ %tmp122 = fmul float %tmp119, undef
+ %tmp123 = fmul float %tmp120, undef
+ %tmp124 = call float @llvm.minnum.f32(float %tmp115, float %tmp121)
+ %tmp125 = call float @llvm.minnum.f32(float %tmp116, float %tmp122)
+ %tmp126 = call float @llvm.minnum.f32(float %tmp117, float %tmp123)
+ %tmp127 = call float @llvm.maxnum.f32(float %tmp124, float %tmp125)
+ %tmp128 = call float @llvm.maxnum.f32(float %tmp127, float %tmp126)
+ %tmp129 = fsub float %tmp38, undef
+ %tmp130 = fsub float %tmp39, undef
+ %tmp131 = fsub float %tmp40, undef
+ %tmp132 = fmul float %tmp129, 0.000000e+00
+ %tmp133 = fmul float %tmp130, undef
+ %tmp134 = fmul float %tmp131, undef
+ %tmp135 = fsub float %tmp41, undef
+ %tmp136 = fsub float %tmp42, undef
+ %tmp137 = fsub float %tmp43, undef
+ %tmp138 = fmul float %tmp135, undef
+ %tmp139 = fmul float %tmp136, undef
+ %tmp140 = fmul float %tmp137, undef
+ %tmp141 = call float @llvm.minnum.f32(float %tmp132, float %tmp138)
+ %tmp142 = call float @llvm.minnum.f32(float %tmp133, float %tmp139)
+ %tmp143 = call float @llvm.minnum.f32(float %tmp134, float %tmp140)
+ %tmp144 = call float @llvm.maxnum.f32(float %tmp141, float %tmp142)
+ %tmp145 = call float @llvm.maxnum.f32(float %tmp144, float %tmp143)
+ %tmp146 = fsub float %tmp44, undef
+ %tmp147 = fsub float %tmp45, undef
+ %tmp148 = fsub float %tmp46, undef
+ %tmp149 = fmul float %tmp146, 0.000000e+00
+ %tmp150 = fmul float %tmp147, 0.000000e+00
+ %tmp151 = fmul float %tmp148, undef
+ %tmp152 = fsub float %tmp47, undef
+ %tmp153 = fsub float %tmp48, undef
+ %tmp154 = fsub float %tmp49, undef
+ %tmp155 = fmul float %tmp152, undef
+ %tmp156 = fmul float %tmp153, 0.000000e+00
+ %tmp157 = fmul float %tmp154, undef
+ %tmp158 = call float @llvm.minnum.f32(float %tmp149, float %tmp155)
+ %tmp159 = call float @llvm.minnum.f32(float %tmp150, float %tmp156)
+ %tmp160 = call float @llvm.minnum.f32(float %tmp151, float %tmp157)
+ %tmp161 = call float @llvm.maxnum.f32(float %tmp158, float %tmp159)
+ %tmp162 = call float @llvm.maxnum.f32(float %tmp161, float %tmp160)
+ %tmp163 = fsub float %tmp50, undef
+ %tmp164 = fsub float %tmp51, undef
+ %tmp165 = fsub float %tmp52, undef
+ %tmp166 = fmul float %tmp163, undef
+ %tmp167 = fmul float %tmp164, 0.000000e+00
+ %tmp168 = fmul float %tmp165, 0.000000e+00
+ %tmp169 = fsub float %tmp53, undef
+ %tmp170 = fsub float %tmp54, undef
+ %tmp171 = fsub float %tmp55, undef
+ %tmp172 = fdiv float 1.000000e+00, %temp18.0
+ %tmp173 = fmul float %tmp169, undef
+ %tmp174 = fmul float %tmp170, undef
+ %tmp175 = fmul float %tmp171, %tmp172
+ %tmp176 = call float @llvm.minnum.f32(float %tmp166, float %tmp173)
+ %tmp177 = call float @llvm.minnum.f32(float %tmp167, float %tmp174)
+ %tmp178 = call float @llvm.minnum.f32(float %tmp168, float %tmp175)
+ %tmp179 = call float @llvm.maxnum.f32(float %tmp176, float %tmp177)
+ %tmp180 = call float @llvm.maxnum.f32(float %tmp179, float %tmp178)
+ %tmp181 = fsub float %tmp62, undef
+ %tmp182 = fsub float %tmp63, undef
+ %tmp183 = fsub float %tmp64, undef
+ %tmp184 = fmul float %tmp181, 0.000000e+00
+ %tmp185 = fmul float %tmp182, undef
+ %tmp186 = fmul float %tmp183, undef
+ %tmp187 = fsub float %tmp65, undef
+ %tmp188 = fsub float %tmp66, undef
+ %tmp189 = fmul float %tmp187, undef
+ %tmp190 = fmul float %tmp188, undef
+ %tmp191 = call float @llvm.maxnum.f32(float %tmp184, float %tmp189)
+ %tmp192 = call float @llvm.maxnum.f32(float %tmp185, float %tmp190)
+ %tmp193 = call float @llvm.maxnum.f32(float %tmp186, float undef)
+ %tmp194 = call float @llvm.minnum.f32(float %tmp191, float %tmp192)
+ %tmp195 = call float @llvm.minnum.f32(float %tmp194, float %tmp193)
+ %.temp292.7 = select i1 undef, float %tmp162, float undef
+ %temp292.9 = select i1 false, float %tmp180, float %.temp292.7
%.temp292.9 = select i1 undef, float undef, float %temp292.9
- %196 = fcmp ogt float undef, 0.000000e+00
- %197 = fcmp olt float undef, %195
- %198 = and i1 %196, %197
- %199 = fcmp olt float undef, %.temp292.9
- %200 = and i1 %198, %199
- %temp292.11 = select i1 %200, float undef, float %.temp292.9
- %tid0 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #2
+ %tmp196 = fcmp ogt float undef, 0.000000e+00
+ %tmp197 = fcmp olt float undef, %tmp195
+ %tmp198 = and i1 %tmp196, %tmp197
+ %tmp199 = fcmp olt float undef, %.temp292.9
+ %tmp200 = and i1 %tmp198, %tmp199
+ %temp292.11 = select i1 %tmp200, float undef, float %.temp292.9
+ %tid0 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%cmp0 = icmp eq i32 %tid0, 0
br i1 %cmp0, label %IF2565, label %ELSE2566
IF2565: ; preds = %ENDIF
- %tid1 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #2
+ %tid1 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%cmp1 = icmp eq i32 %tid1, 0
br i1 %cmp1, label %ENDIF2582, label %ELSE2584
ELSE2566: ; preds = %ENDIF
- %tid2 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #2
+ %tid2 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tidf = bitcast i32 %tid2 to float
- %201 = fcmp oeq float %temp292.11, %tidf
- br i1 %201, label %ENDLOOP, label %ELSE2593
+ %tmp201 = fcmp oeq float %temp292.11, %tidf
+ br i1 %tmp201, label %ENDLOOP, label %ELSE2593
ENDIF2564: ; preds = %ENDIF2594, %ENDIF2588
%temp894.1 = phi float [ undef, %ENDIF2588 ], [ %temp894.2, %ENDIF2594 ]
- %temp18.1 = phi float [ %218, %ENDIF2588 ], [ undef, %ENDIF2594 ]
- %202 = fsub float %5, undef
- %203 = fmul float %202, undef
- %204 = call float @llvm.maxnum.f32(float undef, float %203)
- %205 = call float @llvm.minnum.f32(float %204, float undef)
- %206 = call float @llvm.minnum.f32(float %205, float undef)
- %207 = fcmp ogt float undef, 0.000000e+00
- %208 = fcmp olt float undef, 1.000000e+00
- %209 = and i1 %207, %208
- %tid3 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #2
+ %temp18.1 = phi float [ %tmp218, %ENDIF2588 ], [ undef, %ENDIF2594 ]
+ %tmp202 = fsub float %tmp5, undef
+ %tmp203 = fmul float %tmp202, undef
+ %tmp204 = call float @llvm.maxnum.f32(float undef, float %tmp203)
+ %tmp205 = call float @llvm.minnum.f32(float %tmp204, float undef)
+ %tmp206 = call float @llvm.minnum.f32(float %tmp205, float undef)
+ %tmp207 = fcmp ogt float undef, 0.000000e+00
+ %tmp208 = fcmp olt float undef, 1.000000e+00
+ %tmp209 = and i1 %tmp207, %tmp208
+ %tid3 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tidf3 = bitcast i32 %tid3 to float
- %210 = fcmp olt float %tidf3, %206
- %211 = and i1 %209, %210
- br i1 %211, label %ENDIF2795, label %ELSE2797
+ %tmp210 = fcmp olt float %tidf3, %tmp206
+ %tmp211 = and i1 %tmp209, %tmp210
+ br i1 %tmp211, label %ENDIF2795, label %ELSE2797
ELSE2584: ; preds = %IF2565
br label %ENDIF2582
ENDIF2582: ; preds = %ELSE2584, %IF2565
- %212 = fadd float %1, undef
- %213 = fadd float 0.000000e+00, %212
- %floor = call float @llvm.floor.f32(float %213)
- %214 = fsub float %213, %floor
- %tid4 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #2
+ %tmp212 = fadd float %tmp1, undef
+ %tmp213 = fadd float 0.000000e+00, %tmp212
+ %floor = call float @llvm.floor.f32(float %tmp213)
+ %tmp214 = fsub float %tmp213, %floor
+ %tid4 = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%cmp4 = icmp eq i32 %tid4, 0
br i1 %cmp4, label %IF2589, label %ELSE2590
@@ -280,61 +281,61 @@ ELSE2590: ; preds = %ENDIF2582
br label %ENDIF2588
ENDIF2588: ; preds = %ELSE2590, %IF2589
- %215 = fsub float 1.000000e+00, %214
- %216 = call float @llvm.sqrt.f32(float %215)
- %217 = fmul float %216, undef
- %218 = fadd float %217, undef
+ %tmp215 = fsub float 1.000000e+00, %tmp214
+ %tmp216 = call float @llvm.sqrt.f32(float %tmp215)
+ %tmp217 = fmul float %tmp216, undef
+ %tmp218 = fadd float %tmp217, undef
br label %ENDIF2564
ELSE2593: ; preds = %ELSE2566
- %219 = fcmp oeq float %temp292.11, %81
- %220 = fcmp olt float %81, %83
- %221 = and i1 %219, %220
- br i1 %221, label %ENDIF2594, label %ELSE2596
+ %tmp219 = fcmp oeq float %temp292.11, %tmp81
+ %tmp220 = fcmp olt float %tmp81, %tmp83
+ %tmp221 = and i1 %tmp219, %tmp220
+ br i1 %tmp221, label %ENDIF2594, label %ELSE2596
ELSE2596: ; preds = %ELSE2593
- %222 = fcmp oeq float %temp292.11, %100
- %223 = fcmp olt float %100, %102
- %224 = and i1 %222, %223
- br i1 %224, label %ENDIF2594, label %ELSE2632
+ %tmp222 = fcmp oeq float %temp292.11, %tmp100
+ %tmp223 = fcmp olt float %tmp100, %tmp102
+ %tmp224 = and i1 %tmp222, %tmp223
+ br i1 %tmp224, label %ENDIF2594, label %ELSE2632
ENDIF2594: ; preds = %ELSE2788, %ELSE2785, %ELSE2782, %ELSE2779, %IF2775, %ELSE2761, %ELSE2758, %IF2757, %ELSE2704, %ELSE2686, %ELSE2671, %ELSE2668, %IF2667, %ELSE2632, %ELSE2596, %ELSE2593
%temp894.2 = phi float [ 0.000000e+00, %IF2667 ], [ 0.000000e+00, %ELSE2671 ], [ 0.000000e+00, %IF2757 ], [ 0.000000e+00, %ELSE2761 ], [ %temp894.0, %ELSE2758 ], [ 0.000000e+00, %IF2775 ], [ 0.000000e+00, %ELSE2779 ], [ 0.000000e+00, %ELSE2782 ], [ %.2848, %ELSE2788 ], [ 0.000000e+00, %ELSE2785 ], [ 0.000000e+00, %ELSE2593 ], [ 0.000000e+00, %ELSE2632 ], [ 0.000000e+00, %ELSE2704 ], [ 0.000000e+00, %ELSE2686 ], [ 0.000000e+00, %ELSE2668 ], [ 0.000000e+00, %ELSE2596 ]
- %225 = fmul float %temp894.2, undef
+ %tmp225 = fmul float %temp894.2, undef
br label %ENDIF2564
ELSE2632: ; preds = %ELSE2596
br i1 undef, label %ENDIF2594, label %ELSE2650
ELSE2650: ; preds = %ELSE2632
- %226 = fcmp oeq float %temp292.11, %110
- %227 = fcmp olt float %110, %111
- %228 = and i1 %226, %227
- br i1 %228, label %IF2667, label %ELSE2668
+ %tmp226 = fcmp oeq float %temp292.11, %tmp110
+ %tmp227 = fcmp olt float %tmp110, %tmp111
+ %tmp228 = and i1 %tmp226, %tmp227
+ br i1 %tmp228, label %IF2667, label %ELSE2668
IF2667: ; preds = %ELSE2650
br i1 undef, label %ENDIF2594, label %ELSE2671
ELSE2668: ; preds = %ELSE2650
- %229 = fcmp oeq float %temp292.11, %128
- %230 = fcmp olt float %128, undef
- %231 = and i1 %229, %230
- br i1 %231, label %ENDIF2594, label %ELSE2686
+ %tmp229 = fcmp oeq float %temp292.11, %tmp128
+ %tmp230 = fcmp olt float %tmp128, undef
+ %tmp231 = and i1 %tmp229, %tmp230
+ br i1 %tmp231, label %ENDIF2594, label %ELSE2686
ELSE2671: ; preds = %IF2667
br label %ENDIF2594
ELSE2686: ; preds = %ELSE2668
- %232 = fcmp oeq float %temp292.11, %145
- %233 = fcmp olt float %145, undef
- %234 = and i1 %232, %233
- br i1 %234, label %ENDIF2594, label %ELSE2704
+ %tmp232 = fcmp oeq float %temp292.11, %tmp145
+ %tmp233 = fcmp olt float %tmp145, undef
+ %tmp234 = and i1 %tmp232, %tmp233
+ br i1 %tmp234, label %ENDIF2594, label %ELSE2704
ELSE2704: ; preds = %ELSE2686
- %235 = fcmp oeq float %temp292.11, %180
- %236 = fcmp olt float %180, undef
- %237 = and i1 %235, %236
- br i1 %237, label %ENDIF2594, label %ELSE2740
+ %tmp235 = fcmp oeq float %temp292.11, %tmp180
+ %tmp236 = fcmp olt float %tmp180, undef
+ %tmp237 = and i1 %tmp235, %tmp236
+ br i1 %tmp237, label %ENDIF2594, label %ELSE2740
ELSE2740: ; preds = %ELSE2704
br i1 undef, label %IF2757, label %ELSE2758
@@ -349,8 +350,8 @@ ELSE2761: ; preds = %IF2757
br label %ENDIF2594
IF2775: ; preds = %ELSE2758
- %238 = fcmp olt float undef, undef
- br i1 %238, label %ENDIF2594, label %ELSE2779
+ %tmp238 = fcmp olt float undef, undef
+ br i1 %tmp238, label %ENDIF2594, label %ELSE2779
ELSE2779: ; preds = %IF2775
br i1 undef, label %ENDIF2594, label %ELSE2782
@@ -359,39 +360,39 @@ ELSE2782: ; preds = %ELSE2779
br i1 undef, label %ENDIF2594, label %ELSE2785
ELSE2785: ; preds = %ELSE2782
- %239 = fcmp olt float undef, 0.000000e+00
- br i1 %239, label %ENDIF2594, label %ELSE2788
+ %tmp239 = fcmp olt float undef, 0.000000e+00
+ br i1 %tmp239, label %ENDIF2594, label %ELSE2788
ELSE2788: ; preds = %ELSE2785
- %240 = fcmp olt float 0.000000e+00, undef
- %.2848 = select i1 %240, float -1.000000e+00, float 1.000000e+00
+ %tmp240 = fcmp olt float 0.000000e+00, undef
+ %.2848 = select i1 %tmp240, float -1.000000e+00, float 1.000000e+00
br label %ENDIF2594
ELSE2797: ; preds = %ENDIF2564
- %241 = fsub float %8, undef
- %242 = fsub float %9, undef
- %243 = fsub float %10, undef
- %244 = fmul float %241, undef
- %245 = fmul float %242, undef
- %246 = fmul float %243, undef
- %247 = fsub float %11, undef
- %248 = fsub float %12, undef
- %249 = fsub float %13, undef
- %250 = fmul float %247, undef
- %251 = fmul float %248, undef
- %252 = fmul float %249, undef
- %253 = call float @llvm.minnum.f32(float %244, float %250)
- %254 = call float @llvm.minnum.f32(float %245, float %251)
- %255 = call float @llvm.maxnum.f32(float %246, float %252)
- %256 = call float @llvm.maxnum.f32(float %253, float %254)
- %257 = call float @llvm.maxnum.f32(float %256, float undef)
- %258 = call float @llvm.minnum.f32(float undef, float %255)
- %259 = fcmp ogt float %257, 0.000000e+00
- %260 = fcmp olt float %257, 1.000000e+00
- %261 = and i1 %259, %260
- %262 = fcmp olt float %257, %258
- %263 = and i1 %261, %262
- br i1 %263, label %ENDIF2795, label %ELSE2800
+ %tmp241 = fsub float %tmp8, undef
+ %tmp242 = fsub float %tmp9, undef
+ %tmp243 = fsub float %tmp10, undef
+ %tmp244 = fmul float %tmp241, undef
+ %tmp245 = fmul float %tmp242, undef
+ %tmp246 = fmul float %tmp243, undef
+ %tmp247 = fsub float %tmp11, undef
+ %tmp248 = fsub float %tmp12, undef
+ %tmp249 = fsub float %tmp13, undef
+ %tmp250 = fmul float %tmp247, undef
+ %tmp251 = fmul float %tmp248, undef
+ %tmp252 = fmul float %tmp249, undef
+ %tmp253 = call float @llvm.minnum.f32(float %tmp244, float %tmp250)
+ %tmp254 = call float @llvm.minnum.f32(float %tmp245, float %tmp251)
+ %tmp255 = call float @llvm.maxnum.f32(float %tmp246, float %tmp252)
+ %tmp256 = call float @llvm.maxnum.f32(float %tmp253, float %tmp254)
+ %tmp257 = call float @llvm.maxnum.f32(float %tmp256, float undef)
+ %tmp258 = call float @llvm.minnum.f32(float undef, float %tmp255)
+ %tmp259 = fcmp ogt float %tmp257, 0.000000e+00
+ %tmp260 = fcmp olt float %tmp257, 1.000000e+00
+ %tmp261 = and i1 %tmp259, %tmp260
+ %tmp262 = fcmp olt float %tmp257, %tmp258
+ %tmp263 = and i1 %tmp261, %tmp262
+ br i1 %tmp263, label %ENDIF2795, label %ELSE2800
ENDIF2795: ; preds = %ELSE2824, %ELSE2821, %ELSE2818, %ELSE2815, %ELSE2812, %ELSE2809, %ELSE2806, %ELSE2803, %ELSE2800, %ELSE2797, %ENDIF2564
br label %LOOP
@@ -400,53 +401,53 @@ ELSE2800: ; preds = %ELSE2797
br i1 undef, label %ENDIF2795, label %ELSE2803
ELSE2803: ; preds = %ELSE2800
- %264 = fsub float %20, undef
- %265 = fsub float %21, undef
- %266 = fsub float %22, undef
- %267 = fmul float %264, undef
- %268 = fmul float %265, undef
- %269 = fmul float %266, 0.000000e+00
- %270 = fsub float %23, undef
- %271 = fsub float %24, undef
- %272 = fsub float %25, undef
- %273 = fmul float %270, undef
- %274 = fmul float %271, undef
- %275 = fmul float %272, undef
- %276 = call float @llvm.minnum.f32(float %267, float %273)
- %277 = call float @llvm.maxnum.f32(float %268, float %274)
- %278 = call float @llvm.maxnum.f32(float %269, float %275)
- %279 = call float @llvm.maxnum.f32(float %276, float undef)
- %280 = call float @llvm.maxnum.f32(float %279, float undef)
- %281 = call float @llvm.minnum.f32(float undef, float %277)
- %282 = call float @llvm.minnum.f32(float %281, float %278)
- %283 = fcmp ogt float %280, 0.000000e+00
- %284 = fcmp olt float %280, 1.000000e+00
- %285 = and i1 %283, %284
- %286 = fcmp olt float %280, %282
- %287 = and i1 %285, %286
- br i1 %287, label %ENDIF2795, label %ELSE2806
+ %tmp264 = fsub float %tmp20, undef
+ %tmp265 = fsub float %tmp21, undef
+ %tmp266 = fsub float %tmp22, undef
+ %tmp267 = fmul float %tmp264, undef
+ %tmp268 = fmul float %tmp265, undef
+ %tmp269 = fmul float %tmp266, 0.000000e+00
+ %tmp270 = fsub float %tmp23, undef
+ %tmp271 = fsub float %tmp24, undef
+ %tmp272 = fsub float %tmp25, undef
+ %tmp273 = fmul float %tmp270, undef
+ %tmp274 = fmul float %tmp271, undef
+ %tmp275 = fmul float %tmp272, undef
+ %tmp276 = call float @llvm.minnum.f32(float %tmp267, float %tmp273)
+ %tmp277 = call float @llvm.maxnum.f32(float %tmp268, float %tmp274)
+ %tmp278 = call float @llvm.maxnum.f32(float %tmp269, float %tmp275)
+ %tmp279 = call float @llvm.maxnum.f32(float %tmp276, float undef)
+ %tmp280 = call float @llvm.maxnum.f32(float %tmp279, float undef)
+ %tmp281 = call float @llvm.minnum.f32(float undef, float %tmp277)
+ %tmp282 = call float @llvm.minnum.f32(float %tmp281, float %tmp278)
+ %tmp283 = fcmp ogt float %tmp280, 0.000000e+00
+ %tmp284 = fcmp olt float %tmp280, 1.000000e+00
+ %tmp285 = and i1 %tmp283, %tmp284
+ %tmp286 = fcmp olt float %tmp280, %tmp282
+ %tmp287 = and i1 %tmp285, %tmp286
+ br i1 %tmp287, label %ENDIF2795, label %ELSE2806
ELSE2806: ; preds = %ELSE2803
- %288 = fsub float %26, undef
- %289 = fsub float %27, undef
- %290 = fsub float %28, undef
- %291 = fmul float %288, undef
- %292 = fmul float %289, 0.000000e+00
- %293 = fmul float %290, undef
- %294 = fsub float %29, undef
- %295 = fmul float %294, undef
- %296 = call float @llvm.minnum.f32(float %291, float %295)
- %297 = call float @llvm.minnum.f32(float %292, float undef)
- %298 = call float @llvm.maxnum.f32(float %293, float undef)
- %299 = call float @llvm.maxnum.f32(float %296, float %297)
- %300 = call float @llvm.maxnum.f32(float %299, float undef)
- %301 = call float @llvm.minnum.f32(float undef, float %298)
- %302 = fcmp ogt float %300, 0.000000e+00
- %303 = fcmp olt float %300, 1.000000e+00
- %304 = and i1 %302, %303
- %305 = fcmp olt float %300, %301
- %306 = and i1 %304, %305
- br i1 %306, label %ENDIF2795, label %ELSE2809
+ %tmp288 = fsub float %tmp26, undef
+ %tmp289 = fsub float %tmp27, undef
+ %tmp290 = fsub float %tmp28, undef
+ %tmp291 = fmul float %tmp288, undef
+ %tmp292 = fmul float %tmp289, 0.000000e+00
+ %tmp293 = fmul float %tmp290, undef
+ %tmp294 = fsub float %tmp29, undef
+ %tmp295 = fmul float %tmp294, undef
+ %tmp296 = call float @llvm.minnum.f32(float %tmp291, float %tmp295)
+ %tmp297 = call float @llvm.minnum.f32(float %tmp292, float undef)
+ %tmp298 = call float @llvm.maxnum.f32(float %tmp293, float undef)
+ %tmp299 = call float @llvm.maxnum.f32(float %tmp296, float %tmp297)
+ %tmp300 = call float @llvm.maxnum.f32(float %tmp299, float undef)
+ %tmp301 = call float @llvm.minnum.f32(float undef, float %tmp298)
+ %tmp302 = fcmp ogt float %tmp300, 0.000000e+00
+ %tmp303 = fcmp olt float %tmp300, 1.000000e+00
+ %tmp304 = and i1 %tmp302, %tmp303
+ %tmp305 = fcmp olt float %tmp300, %tmp301
+ %tmp306 = and i1 %tmp304, %tmp305
+ br i1 %tmp306, label %ENDIF2795, label %ELSE2809
ELSE2809: ; preds = %ELSE2806
br i1 undef, label %ENDIF2795, label %ELSE2812
@@ -461,53 +462,42 @@ ELSE2818: ; preds = %ELSE2815
br i1 undef, label %ENDIF2795, label %ELSE2821
ELSE2821: ; preds = %ELSE2818
- %307 = fsub float %56, undef
- %308 = fsub float %57, undef
- %309 = fsub float %58, undef
- %310 = fmul float %307, undef
- %311 = fmul float %308, 0.000000e+00
- %312 = fmul float %309, undef
- %313 = fsub float %59, undef
- %314 = fsub float %60, undef
- %315 = fsub float %61, undef
- %316 = fmul float %313, undef
- %317 = fmul float %314, undef
- %318 = fmul float %315, undef
- %319 = call float @llvm.maxnum.f32(float %310, float %316)
- %320 = call float @llvm.maxnum.f32(float %311, float %317)
- %321 = call float @llvm.maxnum.f32(float %312, float %318)
- %322 = call float @llvm.minnum.f32(float %319, float %320)
- %323 = call float @llvm.minnum.f32(float %322, float %321)
- %324 = fcmp ogt float undef, 0.000000e+00
- %325 = fcmp olt float undef, 1.000000e+00
- %326 = and i1 %324, %325
- %327 = fcmp olt float undef, %323
- %328 = and i1 %326, %327
- br i1 %328, label %ENDIF2795, label %ELSE2824
+ %tmp307 = fsub float %tmp56, undef
+ %tmp308 = fsub float %tmp57, undef
+ %tmp309 = fsub float %tmp58, undef
+ %tmp310 = fmul float %tmp307, undef
+ %tmp311 = fmul float %tmp308, 0.000000e+00
+ %tmp312 = fmul float %tmp309, undef
+ %tmp313 = fsub float %tmp59, undef
+ %tmp314 = fsub float %tmp60, undef
+ %tmp315 = fsub float %tmp61, undef
+ %tmp316 = fmul float %tmp313, undef
+ %tmp317 = fmul float %tmp314, undef
+ %tmp318 = fmul float %tmp315, undef
+ %tmp319 = call float @llvm.maxnum.f32(float %tmp310, float %tmp316)
+ %tmp320 = call float @llvm.maxnum.f32(float %tmp311, float %tmp317)
+ %tmp321 = call float @llvm.maxnum.f32(float %tmp312, float %tmp318)
+ %tmp322 = call float @llvm.minnum.f32(float %tmp319, float %tmp320)
+ %tmp323 = call float @llvm.minnum.f32(float %tmp322, float %tmp321)
+ %tmp324 = fcmp ogt float undef, 0.000000e+00
+ %tmp325 = fcmp olt float undef, 1.000000e+00
+ %tmp326 = and i1 %tmp324, %tmp325
+ %tmp327 = fcmp olt float undef, %tmp323
+ %tmp328 = and i1 %tmp326, %tmp327
+ br i1 %tmp328, label %ENDIF2795, label %ELSE2824
ELSE2824: ; preds = %ELSE2821
%.2849 = select i1 undef, float 0.000000e+00, float 1.000000e+00
br label %ENDIF2795
}
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
-
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-
-; Function Attrs: nounwind readnone
declare float @llvm.floor.f32(float) #1
-
-; Function Attrs: nounwind readnone
declare float @llvm.sqrt.f32(float) #1
-
-; Function Attrs: nounwind readnone
declare float @llvm.minnum.f32(float, float) #1
-
-; Function Attrs: nounwind readnone
declare float @llvm.maxnum.f32(float, float) #1
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/si-spill-sgpr-stack.ll b/test/CodeGen/AMDGPU/si-spill-sgpr-stack.ll
index 062f5245af10..114c97b61bd4 100644
--- a/test/CodeGen/AMDGPU/si-spill-sgpr-stack.ll
+++ b/test/CodeGen/AMDGPU/si-spill-sgpr-stack.ll
@@ -8,7 +8,7 @@
; ALL: s_mov_b32 s[[HI:[0-9]+]], 0xe80000
; Make sure we are handling hazards correctly.
-; SGPR: buffer_load_dword [[VHI:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:12
+; SGPR: buffer_load_dword [[VHI:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:16
; SGPR-NEXT: s_waitcnt vmcnt(0)
; SGPR-NEXT: v_readfirstlane_b32 s[[HI:[0-9]+]], [[VHI]]
; SGPR-NEXT: s_nop 4
@@ -16,15 +16,15 @@
; Make sure scratch wave offset register is correctly incremented and
; then restored.
-; SMEM: s_mov_b32 m0, s[[OFF]]{{$}}
+; SMEM: s_add_u32 m0, s[[OFF]], 0x100{{$}}
; SMEM: s_buffer_store_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[LO]]:[[HI]]], m0 ; 16-byte Folded Spill
-; SMEM: s_mov_b32 m0, s[[OFF]]{{$}}
+; SMEM: s_add_u32 m0, s[[OFF]], 0x100{{$}}
; SMEM: s_buffer_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[LO]]:[[HI]]], m0 ; 16-byte Folded Reload
; SMEM: s_dcache_wb
; ALL: s_endpgm
-define void @test(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 %in) {
call void asm sideeffect "", "~{SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7}" ()
call void asm sideeffect "", "~{SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15}" ()
call void asm sideeffect "", "~{SGPR16_SGPR17_SGPR18_SGPR19_SGPR20_SGPR21_SGPR22_SGPR23}" ()
diff --git a/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll b/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
index 4beefb047f22..8a4cee264fd8 100644
--- a/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
+++ b/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
@@ -13,7 +13,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #2
; FUNC-LABEL: @reorder_local_load_global_store_local_load
; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:3
; CI: buffer_store_dword
-define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
+define amdgpu_kernel void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
@@ -33,7 +33,7 @@ define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out,
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
; CI: buffer_store_dword
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
-define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
+define amdgpu_kernel void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
@@ -53,7 +53,7 @@ define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspac
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
; CI: buffer_store_dword
-define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
+define amdgpu_kernel void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
@@ -77,7 +77,7 @@ define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
; CI: buffer_store_dword
-define void @reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
+define amdgpu_kernel void @reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
%ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
@@ -100,7 +100,7 @@ define void @reorder_constant_load_global_store_constant_load(i32 addrspace(1)*
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
; CI: ds_write_b32
; CI: buffer_store_dword
-define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
+define amdgpu_kernel void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
%ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
%ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
@@ -122,7 +122,7 @@ define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %
; CI: s_load_dword
; CI: ds_write_b32
; CI: buffer_store_dword
-define void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(2)* %ptr0) #0 {
+define amdgpu_kernel void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(2)* %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
@@ -141,7 +141,7 @@ define void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32
; CI: buffer_load_dword
; CI: buffer_load_dword
; CI: buffer_store_dword
-define void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
+define amdgpu_kernel void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 3
@@ -157,12 +157,11 @@ define void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out,
; FUNC-LABEL: @reorder_local_offsets
; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:100 offset1:102
-; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:3 offset1:100
-; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
-; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408
+; CI-DAG: ds_write2_b32 {{v[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:3 offset1:100
+; CI-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408
; CI: buffer_store_dword
; CI: s_endpgm
-define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
+define amdgpu_kernel void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
%ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 100
%ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 102
@@ -181,14 +180,14 @@ define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspa
}
; FUNC-LABEL: @reorder_global_offsets
-; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
-; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
-; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
-; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
-; CI: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
-; CI: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
+; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
+; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
+; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
+; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
+; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
+; CI: buffer_store_dword
; CI: s_endpgm
-define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
+define amdgpu_kernel void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
%ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 100
%ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 102
@@ -222,7 +221,7 @@ define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrsp
; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:36{{$}}
; GCN-NEXT: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:52{{$}}
-define void @reorder_global_offsets_addr64_soffset0(i32 addrspace(1)* noalias nocapture %ptr.base) #0 {
+define amdgpu_kernel void @reorder_global_offsets_addr64_soffset0(i32 addrspace(1)* noalias nocapture %ptr.base) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
diff --git a/test/CodeGen/AMDGPU/si-vector-hang.ll b/test/CodeGen/AMDGPU/si-vector-hang.ll
index dd8783df5c3c..7990990478af 100644
--- a/test/CodeGen/AMDGPU/si-vector-hang.ll
+++ b/test/CodeGen/AMDGPU/si-vector-hang.ll
@@ -12,7 +12,7 @@
; CHECK: buffer_store_byte
; ModuleID = 'radeon'
-define void @test_8_min_char(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture readonly %in0, i8 addrspace(1)* nocapture readonly %in1) #0 {
+define amdgpu_kernel void @test_8_min_char(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture readonly %in0, i8 addrspace(1)* nocapture readonly %in1) #0 {
entry:
%0 = load i8, i8 addrspace(1)* %in0, align 1
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
diff --git a/test/CodeGen/AMDGPU/sign_extend.ll b/test/CodeGen/AMDGPU/sign_extend.ll
index 875351c59961..3e452c214e98 100644
--- a/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/test/CodeGen/AMDGPU/sign_extend.ll
@@ -4,7 +4,7 @@
; GCN-LABEL: {{^}}s_sext_i1_to_i32:
; GCN: v_cndmask_b32_e64
; GCN: s_endpgm
-define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
@@ -14,7 +14,7 @@ define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
; GCN-LABEL: {{^}}test_s_sext_i32_to_i64:
; GCN: s_ashr_i32
; GCN: s_endpg
-define void @test_s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
+define amdgpu_kernel void @test_s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
entry:
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -28,7 +28,7 @@ entry:
; GCN: v_mov_b32_e32 v[[HIREG:[0-9]+]], v[[LOREG]]
; GCN: buffer_store_dwordx2 v{{\[}}[[LOREG]]:[[HIREG]]{{\]}}
; GCN: s_endpgm
-define void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i64
store i64 %sext, i64 addrspace(1)* %out, align 8
@@ -38,7 +38,7 @@ define void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
; GCN-LABEL: {{^}}s_sext_i32_to_i64:
; GCN: s_ashr_i32
; GCN: s_endpgm
-define void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
+define amdgpu_kernel void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
%sext = sext i32 %a to i64
store i64 %sext, i64 addrspace(1)* %out, align 8
ret void
@@ -47,7 +47,7 @@ define void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
; GCN-LABEL: {{^}}v_sext_i32_to_i64:
; GCN: v_ashr
; GCN: s_endpgm
-define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%val = load i32, i32 addrspace(1)* %in, align 4
%sext = sext i32 %val to i64
store i64 %sext, i64 addrspace(1)* %out, align 8
@@ -56,7 +56,7 @@ define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) no
; GCN-LABEL: {{^}}s_sext_i16_to_i64:
; GCN: s_bfe_i64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x100000
-define void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
+define amdgpu_kernel void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
%sext = sext i16 %a to i64
store i64 %sext, i64 addrspace(1)* %out, align 8
ret void
@@ -65,7 +65,7 @@ define void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
; GCN-LABEL: {{^}}s_sext_i1_to_i16:
; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1
; GCN-NEXT: buffer_store_short [[RESULT]]
-define void @s_sext_i1_to_i16(i16 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_sext_i1_to_i16(i16 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i16
store i16 %sext, i16 addrspace(1)* %out
@@ -79,7 +79,7 @@ define void @s_sext_i1_to_i16(i16 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
; GCN-LABEL: {{^}}s_sext_i1_to_i16_with_and:
; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1
; GCN-NEXT: buffer_store_short [[RESULT]]
-define void @s_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+define amdgpu_kernel void @s_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
%cmp0 = icmp eq i32 %a, %b
%cmp1 = icmp eq i32 %c, %d
%cmp = and i1 %cmp0, %cmp1
@@ -91,7 +91,7 @@ define void @s_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32 %a, i32 %b, i
; GCN-LABEL: {{^}}v_sext_i1_to_i16_with_and:
; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1
; GCN-NEXT: buffer_store_short [[RESULT]]
-define void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
+define amdgpu_kernel void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
%tid = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%cmp0 = icmp eq i32 %a, %tid
%cmp1 = icmp eq i32 %b, %c
@@ -130,7 +130,7 @@ define void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32 %a, i32 %b, i
; GCN-DAG: buffer_store_dword [[VEXT3]]
; GCN: s_endpgm
-define void @s_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 %a) nounwind {
+define amdgpu_kernel void @s_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 %a) nounwind {
%cast = bitcast i32 %a to <4 x i8>
%ext = sext <4 x i8> %cast to <4 x i32>
%elt0 = extractelement <4 x i32> %ext, i32 0
@@ -162,7 +162,7 @@ define void @s_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 %a) nounwind {
; GCN: buffer_store_dword [[EXT1]]
; GCN: buffer_store_dword [[EXT2]]
; GCN: buffer_store_dword [[EXT3]]
-define void @v_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%a = load i32, i32 addrspace(1)* %in
%cast = bitcast i32 %a to <4 x i8>
%ext = sext <4 x i8> %cast to <4 x i32>
@@ -184,7 +184,7 @@ define void @v_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; GCN-DAG: s_sext_i32_i16
; GCN-DAG: s_sext_i32_i16
; GCN: s_endpgm
-define void @s_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 %a) nounwind {
+define amdgpu_kernel void @s_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 %a) nounwind {
%cast = bitcast i64 %a to <4 x i16>
%ext = sext <4 x i16> %cast to <4 x i32>
%elt0 = extractelement <4 x i32> %ext, i32 0
@@ -206,7 +206,7 @@ define void @s_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 %a) nounwind {
; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
; GCN-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
; GCN: s_endpgm
-define void @v_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @v_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%a = load i64, i64 addrspace(1)* %in
%cast = bitcast i64 %a to <4 x i16>
%ext = sext <4 x i16> %cast to <4 x i32>
diff --git a/test/CodeGen/AMDGPU/sint_to_fp.f64.ll b/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
index 68dc3c6ccd24..f98a716b4fd1 100644
--- a/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
+++ b/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
@@ -4,7 +4,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; SI-LABEL: {{^}}sint_to_fp_i32_to_f64
; SI: v_cvt_f64_i32_e32
-define void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
%result = sitofp i32 %in to double
store double %result, double addrspace(1)* %out
ret void
@@ -19,7 +19,7 @@ define void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
; SI-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; SI: buffer_store_dwordx2 v{{\[}}[[ZERO]]:[[SEL]]{{\]}}
; SI: s_endpgm
-define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
%cmp = icmp eq i32 %in, 0
%fp = sitofp i1 %cmp to double
store double %fp, double addrspace(1)* %out, align 4
@@ -31,14 +31,14 @@ define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
; SI-NEXT: v_cvt_f64_i32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
; SI: buffer_store_dwordx2 [[RESULT]]
; SI: s_endpgm
-define void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
+define amdgpu_kernel void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
%fp = sitofp i1 %in to double
store double %fp, double addrspace(1)* %out, align 8
ret void
}
; SI-LABEL: @s_sint_to_fp_i64_to_f64
-define void @s_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @s_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
%result = sitofp i64 %in to double
store double %result, double addrspace(1)* %out
ret void
@@ -51,7 +51,7 @@ define void @s_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
; SI-DAG: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%val = load i64, i64 addrspace(1)* %gep, align 8
diff --git a/test/CodeGen/AMDGPU/sint_to_fp.i64.ll b/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
index 5df8105116cc..04cd199b81ae 100644
--- a/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
+++ b/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
@@ -4,7 +4,7 @@
; FIXME: This should be merged with sint_to_fp.ll, but s_sint_to_fp_v2i64 crashes on r600
; FUNC-LABEL: {{^}}s_sint_to_fp_i64_to_f16:
-define void @s_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
+define amdgpu_kernel void @s_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
%result = sitofp i64 %in to half
store half %result, half addrspace(1)* %out
ret void
@@ -28,7 +28,7 @@ define void @s_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
; GCN: v_cndmask_b32_e{{32|64}} [[SIGN_SEL:v[0-9]+]],
; GCN: v_cvt_f16_f32_e32 [[SIGN_SEL_F16:v[0-9]+]], [[SIGN_SEL]]
; GCN: {{buffer|flat}}_store_short {{.*}}[[SIGN_SEL_F16]]
-define void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
@@ -39,7 +39,7 @@ define void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)*
}
; FUNC-LABEL: {{^}}s_sint_to_fp_i64_to_f32:
-define void @s_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
+define amdgpu_kernel void @s_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
%result = sitofp i64 %in to float
store float %result, float addrspace(1)* %out
ret void
@@ -62,7 +62,7 @@ define void @s_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
; GCN: v_cndmask_b32_e{{32|64}} [[SIGN_SEL:v[0-9]+]],
; GCN: {{buffer|flat}}_store_dword {{.*}}[[SIGN_SEL]]
-define void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -74,14 +74,14 @@ define void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)*
; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64_to_v2f32:
; GCN-NOT: v_and_b32_e32 v{{[0-9]+}}, -1,
-define void @s_sint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
+define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
%result = sitofp <2 x i64> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64_to_v4f32:
-define void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -93,14 +93,14 @@ define void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i6
; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64_to_v2f16:
; GCN-NOT: v_and_b32_e32 v{{[0-9]+}}, -1,
-define void @s_sint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
+define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
%result = sitofp <2 x i64> %in to <2 x half>
store <2 x half> %result, <2 x half> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64_to_v4f16:
-define void @v_sint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x half>, <4 x half> addrspace(1)* %out, i32 %tid
diff --git a/test/CodeGen/AMDGPU/sint_to_fp.ll b/test/CodeGen/AMDGPU/sint_to_fp.ll
index 4c8fea12bada..8e85d9998597 100644
--- a/test/CodeGen/AMDGPU/sint_to_fp.ll
+++ b/test/CodeGen/AMDGPU/sint_to_fp.ll
@@ -6,7 +6,7 @@
; SI: v_cvt_f32_i32_e32 {{v[0-9]+}}, {{s[0-9]+$}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-define void @s_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @s_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) #0 {
%result = sitofp i32 %in to float
store float %result, float addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @s_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) #0 {
; SI: v_cvt_f32_i32_e32 {{v[0-9]+}}, {{v[0-9]+$}}
; R600: INT_TO_FLT
-define void @v_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -32,7 +32,7 @@ define void @v_sint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 addrspace(1)*
; R600-DAG: INT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
; R600-DAG: INT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
-define void @s_sint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) #0{
+define amdgpu_kernel void @s_sint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) #0{
%result = sitofp <2 x i32> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
@@ -49,7 +49,7 @@ define void @s_sint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) #
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @s_sint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @s_sint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%value = load <4 x i32>, <4 x i32> addrspace(1) * %in
%result = sitofp <4 x i32> %value to <4 x float>
store <4 x float> %result, <4 x float> addrspace(1)* %out
@@ -66,7 +66,7 @@ define void @s_sint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i3
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @v_sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -81,7 +81,7 @@ define void @v_sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrsp
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @s_sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @s_sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) #0 {
%cmp = icmp eq i32 %in, 0
%fp = uitofp i1 %cmp to float
store float %fp, float addrspace(1)* %out
@@ -92,7 +92,7 @@ define void @s_sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1.0
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @s_sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) #0 {
+define amdgpu_kernel void @s_sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) #0 {
%fp = sitofp i1 %in to float
store float %fp, float addrspace(1)* %out
ret void
@@ -105,7 +105,7 @@ define void @s_sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1.0
; SI: {{buffer|flat}}_store_dword {{.*}}[[RESULT]]
; SI: s_endpgm
-define void @v_sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i1, i1 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
diff --git a/test/CodeGen/AMDGPU/sitofp.f16.ll b/test/CodeGen/AMDGPU/sitofp.f16.ll
index 1395fa2bfea0..574d1c0b2c78 100644
--- a/test/CodeGen/AMDGPU/sitofp.f16.ll
+++ b/test/CodeGen/AMDGPU/sitofp.f16.ll
@@ -7,7 +7,7 @@
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @sitofp_i16_to_f16(
+define amdgpu_kernel void @sitofp_i16_to_f16(
half addrspace(1)* %r,
i16 addrspace(1)* %a) {
entry:
@@ -23,7 +23,7 @@ entry:
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @sitofp_i32_to_f16(
+define amdgpu_kernel void @sitofp_i32_to_f16(
half addrspace(1)* %r,
i32 addrspace(1)* %a) {
entry:
@@ -37,15 +37,24 @@ entry:
; GCN-LABEL: {{^}}sitofp_v2i16_to_v2f16
; GCN: buffer_load_dword
-; GCN: v_cvt_f32_i32_e32
-; GCN: v_cvt_f32_i32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN-DAG: v_lshlrev_b32_e32
-; GCN-DAG: v_or_b32_e32
-; GCN: buffer_store_dword
-; GCN: s_endpgm
-define void @sitofp_v2i16_to_v2f16(
+
+; SI: v_cvt_f32_i32_e32
+; SI: v_cvt_f32_i32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI-DAG: v_lshlrev_b32_e32
+; SI: v_or_b32_e32
+
+; VI-DAG: v_cvt_f32_i32_sdwa
+; VI-DAG: v_cvt_f32_i32_sdwa
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI: v_or_b32_e32
+
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+
+define amdgpu_kernel void @sitofp_v2i16_to_v2f16(
<2 x half> addrspace(1)* %r,
<2 x i16> addrspace(1)* %a) {
entry:
@@ -56,17 +65,24 @@ entry:
}
; GCN-LABEL: {{^}}sitofp_v2i32_to_v2f16
-; GCN: buffer_load_dwordx2
-; GCN: v_cvt_f32_i32_e32
-; GCN: v_cvt_f32_i32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN-DAG: v_and_b32_e32
-; GCN-DAG: v_lshlrev_b32_e32
-; GCN-DAG: v_or_b32_e32
-; GCN: buffer_store_dword
-; GCN: s_endpgm
-define void @sitofp_v2i32_to_v2f16(
+; GCN: buffer_load_dwordx2
+
+; SI: v_cvt_f32_i32_e32
+; SI: v_cvt_f32_i32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI-DAG: v_lshlrev_b32_e32
+; SI: v_or_b32_e32
+
+; VI-DAG: v_cvt_f32_i32_e32
+; VI-DAG: v_cvt_f32_i32_e32
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI: v_or_b32_e32
+
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+define amdgpu_kernel void @sitofp_v2i32_to_v2f16(
<2 x half> addrspace(1)* %r,
<2 x i32> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/skip-if-dead.ll b/test/CodeGen/AMDGPU/skip-if-dead.ll
index 60cee7a3499e..3f53572ab440 100644
--- a/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -357,7 +357,7 @@ bb7: ; preds = %bb4
; CHECK: [[END]]:
; CHECK: s_or_b64 exec, exec
; CHECK: s_endpgm
-define amdgpu_ps void @if_after_kill_block(float %arg, float %arg1, <4 x i32> %arg2) #0 {
+define amdgpu_ps void @if_after_kill_block(float %arg, float %arg1, <4 x float> %arg2) #0 {
bb:
%tmp = fcmp ult float %arg1, 0.000000e+00
br i1 %tmp, label %bb3, label %bb4
@@ -367,7 +367,7 @@ bb3: ; preds = %bb
br label %bb4
bb4: ; preds = %bb3, %bb
- %tmp5 = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> %arg2, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp5 = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> %arg2, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp6 = extractelement <4 x float> %tmp5, i32 0
%tmp7 = fcmp une float %tmp6, 0.000000e+00
br i1 %tmp7, label %bb8, label %bb9
@@ -380,9 +380,8 @@ bb9: ; preds = %bb4
ret void
}
+declare <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1
declare void @llvm.AMDGPU.kill(float) #0
-declare <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) nounwind
attributes #0 = { nounwind }
-attributes #1 = { nounwind readnone }
+attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/smed3.ll b/test/CodeGen/AMDGPU/smed3.ll
index 985c73904f43..8665ab697265 100644
--- a/test/CodeGen/AMDGPU/smed3.ll
+++ b/test/CodeGen/AMDGPU/smed3.ll
@@ -1,12 +1,13 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SICIVI -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SICIVI -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
-declare i32 @llvm.r600.read.tidig.x() #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN-LABEL: {{^}}v_test_smed3_r_i_i_i32:
; GCN: v_med3_i32 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
-define void @v_test_smed3_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_smed3_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -24,8 +25,8 @@ define void @v_test_smed3_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a
; GCN-LABEL: {{^}}v_test_smed3_multi_use_r_i_i_i32:
; GCN: v_max_i32
; GCN: v_min_i32
-define void @v_test_smed3_multi_use_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_smed3_multi_use_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -44,8 +45,8 @@ define void @v_test_smed3_multi_use_r_i_i_i32(i32 addrspace(1)* %out, i32 addrsp
; GCN-LABEL: {{^}}v_test_smed3_r_i_i_constant_order_i32:
; GCN: v_max_i32_e32 v{{[0-9]+}}, 17, v{{[0-9]+}}
; GCN: v_min_i32_e32 v{{[0-9]+}}, 12, v{{[0-9]+}}
-define void @v_test_smed3_r_i_i_constant_order_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_smed3_r_i_i_constant_order_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -63,8 +64,8 @@ define void @v_test_smed3_r_i_i_constant_order_i32(i32 addrspace(1)* %out, i32 a
; GCN-LABEL: {{^}}v_test_smed3_r_i_i_sign_mismatch_i32:
; GCN: v_max_u32_e32 v{{[0-9]+}}, 12, v{{[0-9]+}}
; GCN: v_min_i32_e32 v{{[0-9]+}}, 17, v{{[0-9]+}}
-define void @v_test_smed3_r_i_i_sign_mismatch_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_smed3_r_i_i_sign_mismatch_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -82,8 +83,8 @@ define void @v_test_smed3_r_i_i_sign_mismatch_i32(i32 addrspace(1)* %out, i32 ad
; GCN-LABEL: {{^}}v_test_smed3_r_i_i_i64:
; GCN: v_cmp_lt_i64
; GCN: v_cmp_gt_i64
-define void @v_test_smed3_r_i_i_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_smed3_r_i_i_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64, i64 addrspace(1)* %gep0
@@ -99,9 +100,10 @@ define void @v_test_smed3_r_i_i_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a
}
; GCN-LABEL: {{^}}v_test_smed3_r_i_i_i16:
-; GCN: v_med3_i32 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
-define void @v_test_smed3_r_i_i_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+; SICIVI: v_med3_i32 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
+; GFX9: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
+define amdgpu_kernel void @v_test_smed3_r_i_i_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
%a = load i16, i16 addrspace(1)* %gep0
@@ -172,7 +174,7 @@ define internal i8 @smax8(i8 %x, i8 %y) #2 {
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_0:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -184,7 +186,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_1:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -196,7 +198,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_2:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -208,7 +210,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_3:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_3(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_3(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -220,7 +222,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_4:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_4(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_4(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -232,7 +234,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_5:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_5(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_5(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -244,7 +246,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_6:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_6(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_6(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -256,7 +258,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_7:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_7(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_7(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -268,7 +270,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_8:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_8(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_8(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -280,7 +282,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_9:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_9(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_9(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -292,7 +294,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_10:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_10(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_10(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -304,7 +306,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_11:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_11(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_11(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -316,7 +318,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_12:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_12(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_12(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -328,7 +330,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_13:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_13(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_13(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -340,7 +342,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_14:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_14(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_14(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -352,7 +354,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_15:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_15(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_15(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %y, i32 %x)
%tmp1 = call i32 @smax(i32 %y, i32 %x)
@@ -362,12 +364,13 @@ bb:
ret void
}
+; FIXME: Should keep scalar or not promote
; GCN-LABEL: {{^}}s_test_smed3_i16_pat_0:
; GCN: s_sext_i32_i16
; GCN: s_sext_i32_i16
; GCN: s_sext_i32_i16
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i16_pat_0(i16 addrspace(1)* %arg, i16 %x, i16 %y, i16 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i16_pat_0(i16 addrspace(1)* %arg, i16 %x, i16 %y, i16 %z) #1 {
bb:
%tmp0 = call i16 @smin16(i16 %x, i16 %y)
%tmp1 = call i16 @smax16(i16 %x, i16 %y)
@@ -382,7 +385,7 @@ bb:
; GCN: s_sext_i32_i8
; GCN: s_sext_i32_i8
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i8_pat_0(i8 addrspace(1)* %arg, i8 %x, i8 %y, i8 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i8_pat_0(i8 addrspace(1)* %arg, i8 %x, i8 %y, i8 %z) #1 {
bb:
%tmp0 = call i8 @smin8(i8 %x, i8 %y)
%tmp1 = call i8 @smax8(i8 %x, i8 %y)
@@ -394,7 +397,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_0_multi_use_0:
; GCN-NOT: v_med3_i32
-define void @s_test_smed3_i32_pat_0_multi_use_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_0_multi_use_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -407,7 +410,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_0_multi_use_1:
; GCN-NOT: v_med3_i32
-define void @s_test_smed3_i32_pat_0_multi_use_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_0_multi_use_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -420,7 +423,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_0_multi_use_2:
; GCN-NOT: v_med3_i32
-define void @s_test_smed3_i32_pat_0_multi_use_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_0_multi_use_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -433,7 +436,7 @@ bb:
; GCN-LABEL: {{^}}s_test_smed3_i32_pat_0_multi_use_result:
; GCN: v_med3_i32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_smed3_i32_pat_0_multi_use_result(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_smed3_i32_pat_0_multi_use_result(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @smin(i32 %x, i32 %y)
%tmp1 = call i32 @smax(i32 %x, i32 %y)
@@ -444,6 +447,35 @@ bb:
ret void
}
+; GCN-LABEL: {{^}}v_test_smed3_i16_pat_0:
+; SI: v_med3_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; FIXME: VI not matching med3
+; VI: v_min_i16
+; VI: v_max_i16
+; VI: v_min_i16
+; VI: v_max_i16
+
+; GFX9: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_smed3_i16_pat_0(i16 addrspace(1)* %arg, i16 addrspace(1)* %out, i16 addrspace(1)* %a.ptr) #1 {
+bb:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr, i32 %tid
+ %gep1 = getelementptr inbounds i16, i16 addrspace(1)* %gep0, i32 3
+ %gep2 = getelementptr inbounds i16, i16 addrspace(1)* %gep0, i32 8
+ %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
+ %x = load i16, i16 addrspace(1)* %gep0
+ %y = load i16, i16 addrspace(1)* %gep1
+ %z = load i16, i16 addrspace(1)* %gep2
+
+ %tmp0 = call i16 @smin16(i16 %x, i16 %y)
+ %tmp1 = call i16 @smax16(i16 %x, i16 %y)
+ %tmp2 = call i16 @smin16(i16 %tmp1, i16 %z)
+ %tmp3 = call i16 @smax16(i16 %tmp0, i16 %tmp2)
+ store i16 %tmp3, i16 addrspace(1)* %out.gep
+ ret void
+}
+
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
attributes #2 = { nounwind readnone alwaysinline }
diff --git a/test/CodeGen/AMDGPU/sminmax.ll b/test/CodeGen/AMDGPU/sminmax.ll
index ce5d92451647..827d672022eb 100644
--- a/test/CodeGen/AMDGPU/sminmax.ll
+++ b/test/CodeGen/AMDGPU/sminmax.ll
@@ -7,7 +7,7 @@
; GCN: s_add_i32
; EG: MAX_INT
-define void @s_abs_i32(i32 addrspace(1)* %out, i32 %val) nounwind {
+define amdgpu_kernel void @s_abs_i32(i32 addrspace(1)* %out, i32 %val) nounwind {
%neg = sub i32 0, %val
%cond = icmp sgt i32 %val, %neg
%res = select i1 %cond, i32 %val, i32 %neg
@@ -22,7 +22,7 @@ define void @s_abs_i32(i32 addrspace(1)* %out, i32 %val) nounwind {
; GCN: v_add_i32
; EG: MAX_INT
-define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
+define amdgpu_kernel void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
%val = load i32, i32 addrspace(1)* %src, align 4
%neg = sub i32 0, %val
%cond = icmp sgt i32 %val, %neg
@@ -36,7 +36,7 @@ define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind
; GCN: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
; GCN: v_max_i32_e32 [[MAX:v[0-9]+]], [[NEG]], [[SRC]]
; GCN: v_mul_lo_i32 v{{[0-9]+}}, [[MAX]], [[MAX]]
-define void @v_abs_i32_repeat_user(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
+define amdgpu_kernel void @v_abs_i32_repeat_user(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
%val = load i32, i32 addrspace(1)* %src, align 4
%neg = sub i32 0, %val
%cond = icmp sgt i32 %val, %neg
@@ -54,7 +54,7 @@ define void @v_abs_i32_repeat_user(i32 addrspace(1)* %out, i32 addrspace(1)* %sr
; EG: MAX_INT
; EG: MAX_INT
-define void @s_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %val) nounwind {
+define amdgpu_kernel void @s_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %val) nounwind {
%z0 = insertelement <2 x i32> undef, i32 0, i32 0
%z1 = insertelement <2 x i32> %z0, i32 0, i32 1
%t0 = insertelement <2 x i32> undef, i32 2, i32 0
@@ -79,7 +79,7 @@ define void @s_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %val) nounwind
; EG: MAX_INT
; EG: MAX_INT
-define void @v_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %src) nounwind {
+define amdgpu_kernel void @v_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %src) nounwind {
%z0 = insertelement <2 x i32> undef, i32 0, i32 0
%z1 = insertelement <2 x i32> %z0, i32 0, i32 1
%t0 = insertelement <2 x i32> undef, i32 2, i32 0
@@ -109,7 +109,7 @@ define void @v_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %
; EG: MAX_INT
; EG: MAX_INT
; EG: MAX_INT
-define void @s_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %val) nounwind {
+define amdgpu_kernel void @s_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %val) nounwind {
%z0 = insertelement <4 x i32> undef, i32 0, i32 0
%z1 = insertelement <4 x i32> %z0, i32 0, i32 1
%z2 = insertelement <4 x i32> %z1, i32 0, i32 2
@@ -146,7 +146,7 @@ define void @s_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %val) nounwind
; EG: MAX_INT
; EG: MAX_INT
; EG: MAX_INT
-define void @v_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %src) nounwind {
+define amdgpu_kernel void @v_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %src) nounwind {
%z0 = insertelement <4 x i32> undef, i32 0, i32 0
%z1 = insertelement <4 x i32> %z0, i32 0, i32 1
%z2 = insertelement <4 x i32> %z1, i32 0, i32 2
@@ -170,7 +170,7 @@ define void @v_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
; GCN-DAG: s_min_i32 s{{[0-9]+}}, [[VAL0]], [[VAL1]]
; GCN-DAG: s_max_i32 s{{[0-9]+}}, [[VAL0]], [[VAL1]]
-define void @s_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %val0, i32 %val1) nounwind {
+define amdgpu_kernel void @s_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %val0, i32 %val1) nounwind {
%cond0 = icmp sgt i32 %val0, %val1
%sel0 = select i1 %cond0, i32 %val0, i32 %val1
%sel1 = select i1 %cond0, i32 %val1, i32 %val0
@@ -186,7 +186,7 @@ define void @s_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32
; GCN-DAG: v_min_i32_e32 v{{[0-9]+}}, [[VAL1]], [[VAL0]]
; GCN-DAG: v_max_i32_e32 v{{[0-9]+}}, [[VAL1]], [[VAL0]]
-define void @v_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr0, i32 addrspace(1)* %ptr1) nounwind {
+define amdgpu_kernel void @v_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr0, i32 addrspace(1)* %ptr1) nounwind {
%val0 = load volatile i32, i32 addrspace(1)* %ptr0
%val1 = load volatile i32, i32 addrspace(1)* %ptr1
@@ -208,7 +208,7 @@ define void @v_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32
; GCN-DAG: s_max_i32
; GCN-DAG: s_max_i32
; GCN-DAG: s_max_i32
-define void @s_min_max_v4i32(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, <4 x i32> %val0, <4 x i32> %val1) nounwind {
+define amdgpu_kernel void @s_min_max_v4i32(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, <4 x i32> %val0, <4 x i32> %val1) nounwind {
%cond0 = icmp sgt <4 x i32> %val0, %val1
%sel0 = select <4 x i1> %cond0, <4 x i32> %val0, <4 x i32> %val1
%sel1 = select <4 x i1> %cond0, <4 x i32> %val1, <4 x i32> %val0
@@ -223,7 +223,7 @@ define void @s_min_max_v4i32(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(
; GCN-DAG: v_cndmask_b32_e32
; GCN-DAG: v_cndmask_b32_e32
; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
-define void @v_min_max_i32_user(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr0, i32 addrspace(1)* %ptr1) nounwind {
+define amdgpu_kernel void @v_min_max_i32_user(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr0, i32 addrspace(1)* %ptr1) nounwind {
%val0 = load volatile i32, i32 addrspace(1)* %ptr0
%val1 = load volatile i32, i32 addrspace(1)* %ptr1
diff --git a/test/CodeGen/AMDGPU/sminmax.v2i16.ll b/test/CodeGen/AMDGPU/sminmax.v2i16.ll
new file mode 100644
index 000000000000..4e093cdece21
--- /dev/null
+++ b/test/CodeGen/AMDGPU/sminmax.v2i16.ll
@@ -0,0 +1,224 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=CIVI -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}s_abs_v2i16:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
+; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2
+
+; VI: v_sub_i32_e32
+; VI-DAG: v_sub_i32_e32
+; VI: v_max_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI: v_max_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI: v_add_i32_e32
+; VI: v_add_i32_e32
+; VI: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+
+; CI: v_sub_i32_e32
+; CI-DAG: v_sub_i32_e32
+; CI: v_bfe_i32
+; CI-DAG: v_bfe_i32
+; CI-DAG: v_add_i32_e32
+; CI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16
+; CI: v_add_i32_e32
+; CI: v_and_b32_e32 v{{[0-9]+}}, 0xffff,
+; CI: v_or_b32_e32
+define amdgpu_kernel void @s_abs_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %val) #0 {
+ %neg = sub <2 x i16> zeroinitializer, %val
+ %cond = icmp sgt <2 x i16> %val, %neg
+ %res = select <2 x i1> %cond, <2 x i16> %val, <2 x i16> %neg
+ %res2 = add <2 x i16> %res, <i16 2, i16 2>
+ store <2 x i16> %res2, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_abs_v2i16:
+; GFX9: flat_load_dword [[VAL:v[0-9]+]]
+; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
+; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2
+
+; VI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_sub_u16_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
+; VI: v_sub_u16_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
+; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_add_u16_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}
+; VI: v_add_u16_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}
+; VI-NOT: v_and_b32
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_abs_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %src) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.in = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %src, i32 %tid
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %val = load <2 x i16>, <2 x i16> addrspace(1)* %gep.in, align 4
+ %neg = sub <2 x i16> zeroinitializer, %val
+ %cond = icmp sgt <2 x i16> %val, %neg
+ %res = select <2 x i1> %cond, <2 x i16> %val, <2 x i16> %neg
+ %res2 = add <2 x i16> %res, <i16 2, i16 2>
+ store <2 x i16> %res2, <2 x i16> addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_abs_v2i16_2:
+; GFX9: s_load_dword [[VAL:s[0-9]+]]
+; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
+; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2
+define amdgpu_kernel void @s_abs_v2i16_2(<2 x i16> addrspace(1)* %out, <2 x i16> %val) #0 {
+ %z0 = insertelement <2 x i16> undef, i16 0, i16 0
+ %z1 = insertelement <2 x i16> %z0, i16 0, i16 1
+ %t0 = insertelement <2 x i16> undef, i16 2, i16 0
+ %t1 = insertelement <2 x i16> %t0, i16 2, i16 1
+ %neg = sub <2 x i16> %z1, %val
+ %cond = icmp sgt <2 x i16> %val, %neg
+ %res = select <2 x i1> %cond, <2 x i16> %val, <2 x i16> %neg
+ %res2 = add <2 x i16> %res, %t1
+ store <2 x i16> %res2, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_abs_v2i16_2:
+; GFX9: buffer_load_dword [[VAL:v[0-9]+]]
+; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
+; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
+; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2
+define amdgpu_kernel void @v_abs_v2i16_2(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %src) #0 {
+ %z0 = insertelement <2 x i16> undef, i16 0, i16 0
+ %z1 = insertelement <2 x i16> %z0, i16 0, i16 1
+ %t0 = insertelement <2 x i16> undef, i16 2, i16 0
+ %t1 = insertelement <2 x i16> %t0, i16 2, i16 1
+ %val = load <2 x i16>, <2 x i16> addrspace(1)* %src, align 4
+ %neg = sub <2 x i16> %z1, %val
+ %cond = icmp sgt <2 x i16> %val, %neg
+ %res = select <2 x i1> %cond, <2 x i16> %val, <2 x i16> %neg
+ %res2 = add <2 x i16> %res, %t1
+ store <2 x i16> %res2, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_abs_v4i16:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9-DAG: v_pk_sub_i16 [[SUB0:v[0-9]+]], 0, [[VAL0]]
+; GFX9-DAG: v_pk_max_i16 [[MAX0:v[0-9]+]], [[VAL0]], [[SUB0]]
+; GFX9-DAG: v_pk_add_u16 [[ADD0:v[0-9]+]], [[MAX0]], 2
+
+; GFX9-DAG: v_pk_sub_i16 [[SUB1:v[0-9]+]], 0, [[VAL1]]
+; GFX9-DAG: v_pk_max_i16 [[MAX1:v[0-9]+]], [[VAL1]], [[SUB1]]
+; GFX9-DAG: v_pk_add_u16 [[ADD1:v[0-9]+]], [[MAX1]], 2
+define amdgpu_kernel void @s_abs_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %val) #0 {
+ %z0 = insertelement <4 x i16> undef, i16 0, i16 0
+ %z1 = insertelement <4 x i16> %z0, i16 0, i16 1
+ %z2 = insertelement <4 x i16> %z1, i16 0, i16 2
+ %z3 = insertelement <4 x i16> %z2, i16 0, i16 3
+ %t0 = insertelement <4 x i16> undef, i16 2, i16 0
+ %t1 = insertelement <4 x i16> %t0, i16 2, i16 1
+ %t2 = insertelement <4 x i16> %t1, i16 2, i16 2
+ %t3 = insertelement <4 x i16> %t2, i16 2, i16 3
+ %neg = sub <4 x i16> %z3, %val
+ %cond = icmp sgt <4 x i16> %val, %neg
+ %res = select <4 x i1> %cond, <4 x i16> %val, <4 x i16> %neg
+ %res2 = add <4 x i16> %res, %t3
+ store <4 x i16> %res2, <4 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_abs_v4i16:
+; GFX9: buffer_load_dwordx2 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}}
+
+; GFX9-DAG: v_pk_sub_i16 [[SUB0:v[0-9]+]], 0, v[[VAL0]]
+; GFX9-DAG: v_pk_max_i16 [[MAX0:v[0-9]+]], v[[VAL0]], [[SUB0]]
+; GFX9-DAG: v_pk_add_u16 [[ADD0:v[0-9]+]], [[MAX0]], 2
+
+; GFX9-DAG: v_pk_sub_i16 [[SUB1:v[0-9]+]], 0, v[[VAL1]]
+; GFX9-DAG: v_pk_max_i16 [[MAX1:v[0-9]+]], v[[VAL1]], [[SUB1]]
+; GFX9-DAG: v_pk_add_u16 [[ADD1:v[0-9]+]], [[MAX1]], 2
+define amdgpu_kernel void @v_abs_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %src) #0 {
+ %z0 = insertelement <4 x i16> undef, i16 0, i16 0
+ %z1 = insertelement <4 x i16> %z0, i16 0, i16 1
+ %z2 = insertelement <4 x i16> %z1, i16 0, i16 2
+ %z3 = insertelement <4 x i16> %z2, i16 0, i16 3
+ %t0 = insertelement <4 x i16> undef, i16 2, i16 0
+ %t1 = insertelement <4 x i16> %t0, i16 2, i16 1
+ %t2 = insertelement <4 x i16> %t1, i16 2, i16 2
+ %t3 = insertelement <4 x i16> %t2, i16 2, i16 3
+ %val = load <4 x i16>, <4 x i16> addrspace(1)* %src, align 4
+ %neg = sub <4 x i16> %z3, %val
+ %cond = icmp sgt <4 x i16> %val, %neg
+ %res = select <4 x i1> %cond, <4 x i16> %val, <4 x i16> %neg
+ %res2 = add <4 x i16> %res, %t3
+ store <4 x i16> %res2, <4 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_min_max_v2i16:
+define amdgpu_kernel void @s_min_max_v2i16(<2 x i16> addrspace(1)* %out0, <2 x i16> addrspace(1)* %out1, <2 x i16> %val0, <2 x i16> %val1) #0 {
+ %cond0 = icmp sgt <2 x i16> %val0, %val1
+ %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1
+ %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0
+
+ store volatile <2 x i16> %sel0, <2 x i16> addrspace(1)* %out0, align 4
+ store volatile <2 x i16> %sel1, <2 x i16> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_min_max_v2i16:
+define amdgpu_kernel void @v_min_max_v2i16(<2 x i16> addrspace(1)* %out0, <2 x i16> addrspace(1)* %out1, <2 x i16> addrspace(1)* %ptr0, <2 x i16> addrspace(1)* %ptr1) #0 {
+ %val0 = load volatile <2 x i16>, <2 x i16> addrspace(1)* %ptr0
+ %val1 = load volatile <2 x i16>, <2 x i16> addrspace(1)* %ptr1
+
+ %cond0 = icmp sgt <2 x i16> %val0, %val1
+ %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1
+ %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0
+
+ store volatile <2 x i16> %sel0, <2 x i16> addrspace(1)* %out0, align 4
+ store volatile <2 x i16> %sel1, <2 x i16> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_min_max_v4i32:
+define amdgpu_kernel void @s_min_max_v4i32(<4 x i16> addrspace(1)* %out0, <4 x i16> addrspace(1)* %out1, <4 x i16> %val0, <4 x i16> %val1) #0 {
+ %cond0 = icmp sgt <4 x i16> %val0, %val1
+ %sel0 = select <4 x i1> %cond0, <4 x i16> %val0, <4 x i16> %val1
+ %sel1 = select <4 x i1> %cond0, <4 x i16> %val1, <4 x i16> %val0
+
+ store volatile <4 x i16> %sel0, <4 x i16> addrspace(1)* %out0, align 4
+ store volatile <4 x i16> %sel1, <4 x i16> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_min_max_v2i16_user:
+define amdgpu_kernel void @v_min_max_v2i16_user(<2 x i16> addrspace(1)* %out0, <2 x i16> addrspace(1)* %out1, <2 x i16> addrspace(1)* %ptr0, <2 x i16> addrspace(1)* %ptr1) #0 {
+ %val0 = load volatile <2 x i16>, <2 x i16> addrspace(1)* %ptr0
+ %val1 = load volatile <2 x i16>, <2 x i16> addrspace(1)* %ptr1
+
+ %cond0 = icmp sgt <2 x i16> %val0, %val1
+ %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1
+ %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0
+
+ store volatile <2 x i16> %sel0, <2 x i16> addrspace(1)* %out0, align 4
+ store volatile <2 x i16> %sel1, <2 x i16> addrspace(1)* %out1, align 4
+ store volatile <2 x i1> %cond0, <2 x i1> addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}u_min_max_v2i16:
+; GFX9: v_pk_max_u16 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+; GFX9: v_pk_min_u16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @u_min_max_v2i16(<2 x i16> addrspace(1)* %out0, <2 x i16> addrspace(1)* %out1, <2 x i16> %val0, <2 x i16> %val1) nounwind {
+ %cond0 = icmp ugt <2 x i16> %val0, %val1
+ %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1
+ %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0
+
+ store volatile <2 x i16> %sel0, <2 x i16> addrspace(1)* %out0, align 4
+ store volatile <2 x i16> %sel1, <2 x i16> addrspace(1)* %out1, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/smrd-vccz-bug.ll b/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
index daac5b92b1ef..343211b0219c 100644
--- a/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
+++ b/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
@@ -12,7 +12,7 @@
; GCN: buffer_store_dword
; GCN: [[EXIT]]:
; GCN: s_endpgm
-define void @vccz_workaround(i32 addrspace(2)* %in, i32 addrspace(1)* %out, float %cond) {
+define amdgpu_kernel void @vccz_workaround(i32 addrspace(2)* %in, i32 addrspace(1)* %out, float %cond) {
entry:
%cnd = fcmp oeq float 0.0, %cond
%sgpr = load volatile i32, i32 addrspace(2)* %in
@@ -32,7 +32,7 @@ endif:
; GCN: buffer_store_dword
; GCN: [[EXIT]]:
; GCN: s_endpgm
-define void @vccz_noworkaround(float addrspace(1)* %in, float addrspace(1)* %out) {
+define amdgpu_kernel void @vccz_noworkaround(float addrspace(1)* %in, float addrspace(1)* %out) {
entry:
%vgpr = load volatile float, float addrspace(1)* %in
%cnd = fcmp oeq float 0.0, %vgpr
diff --git a/test/CodeGen/AMDGPU/smrd.ll b/test/CodeGen/AMDGPU/smrd.ll
index 9b118425f9cb..50f72c670598 100644
--- a/test/CodeGen/AMDGPU/smrd.ll
+++ b/test/CodeGen/AMDGPU/smrd.ll
@@ -1,16 +1,16 @@
-; RUN: llc < %s -march=amdgcn -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=GCN --check-prefix=SIVI %s
-; RUN: llc < %s -march=amdgcn -mcpu=bonaire -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=CI --check-prefix=GCN %s
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=VI --check-prefix=GCN --check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=SIVI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=SIVI %s
; SMRD load with an immediate offset.
; GCN-LABEL: {{^}}smrd0:
; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
-define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+define amdgpu_kernel void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
entry:
- %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
- %1 = load i32, i32 addrspace(2)* %0
- store i32 %1, i32 addrspace(1)* %out
+ %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
+ %tmp1 = load i32, i32 addrspace(2)* %tmp
+ store i32 %tmp1, i32 addrspace(1)* %out
ret void
}
@@ -18,11 +18,11 @@ entry:
; GCN-LABEL: {{^}}smrd1:
; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff,0x{{[0-9]+[137]}}
; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
-define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+define amdgpu_kernel void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
entry:
- %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
- %1 = load i32, i32 addrspace(2)* %0
- store i32 %1, i32 addrspace(1)* %out
+ %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
+ %tmp1 = load i32, i32 addrspace(2)* %tmp
+ store i32 %tmp1, i32 addrspace(1)* %out
ret void
}
@@ -33,11 +33,11 @@ entry:
; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
; GCN: s_endpgm
-define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+define amdgpu_kernel void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
entry:
- %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
- %1 = load i32, i32 addrspace(2)* %0
- store i32 %1, i32 addrspace(1)* %out
+ %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
+ %tmp1 = load i32, i32 addrspace(2)* %tmp
+ store i32 %tmp1, i32 addrspace(1)* %out
ret void
}
@@ -48,11 +48,11 @@ entry:
; SI: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0xb ; encoding: [0x0b
; TODO: Add VI checks
; GCN: s_endpgm
-define void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+define amdgpu_kernel void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
entry:
- %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
- %1 = load i32, i32 addrspace(2)* %0
- store i32 %1, i32 addrspace(1)* %out
+ %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296
+ %tmp1 = load i32, i32 addrspace(2)* %tmp
+ store i32 %tmp1, i32 addrspace(1)* %out
ret void
}
@@ -62,11 +62,11 @@ entry:
; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
-define void @smrd4(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+define amdgpu_kernel void @smrd4(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
entry:
- %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 262143
- %1 = load i32, i32 addrspace(2)* %0
- store i32 %1, i32 addrspace(1)* %out
+ %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 262143
+ %tmp1 = load i32, i32 addrspace(2)* %tmp
+ store i32 %tmp1, i32 addrspace(1)* %out
ret void
}
@@ -76,11 +76,11 @@ entry:
; SIVI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
; GCN: s_endpgm
-define void @smrd5(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+define amdgpu_kernel void @smrd5(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
entry:
- %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 262144
- %1 = load i32, i32 addrspace(2)* %0
- store i32 %1, i32 addrspace(1)* %out
+ %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 262144
+ %tmp1 = load i32, i32 addrspace(2)* %tmp
+ store i32 %tmp1, i32 addrspace(1)* %out
ret void
}
@@ -88,12 +88,12 @@ entry:
; GCN-LABEL: {{^}}smrd_load_const0:
; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
-define amdgpu_ps void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) {
+define amdgpu_ps void @smrd_load_const0(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
- %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
- %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
+ %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -102,14 +102,15 @@ main_body:
; GCN-LABEL: {{^}}smrd_load_const1:
; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
-define amdgpu_ps void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) {
+define amdgpu_ps void @smrd_load_const1(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
- %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
- %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1020)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
+ %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1020)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
+
; SMRD load using the load.const intrinsic with an offset greater than the
; largets possible immediate.
; immediate offset.
@@ -118,12 +119,12 @@ main_body:
; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
-define amdgpu_ps void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) {
+define amdgpu_ps void @smrd_load_const2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
- %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
- %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1024)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
+ %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1024)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -133,12 +134,12 @@ main_body:
; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
-define amdgpu_ps void @smrd_load_const3(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) {
+define amdgpu_ps void @smrd_load_const3(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
- %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
- %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1048572)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
+ %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1048572)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -148,18 +149,17 @@ main_body:
; SIVI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
; GCN: s_endpgm
-define amdgpu_ps void @smrd_load_const4(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) {
+define amdgpu_ps void @smrd_load_const4(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
- %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
- %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1048576)
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
+ %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1048576)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #0
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-attributes #0 = { nounwind readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/sopk-compares.ll b/test/CodeGen/AMDGPU/sopk-compares.ll
index 74acc5bc961c..c0f773ca70c2 100644
--- a/test/CodeGen/AMDGPU/sopk-compares.ll
+++ b/test/CodeGen/AMDGPU/sopk-compares.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.groupstaticsize() #1
; GCN-LABEL: {{^}}br_scc_eq_i32_inline_imm:
; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 4{{$}}
-define void @br_scc_eq_i32_inline_imm(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_inline_imm(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 4
br i1 %cmp0, label %endif, label %if
@@ -25,7 +25,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_simm16_max:
; GCN: s_cmpk_eq_i32 s{{[0-9]+}}, 0x7fff{{$}}
-define void @br_scc_eq_i32_simm16_max(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_simm16_max(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 32767
br i1 %cmp0, label %endif, label %if
@@ -41,7 +41,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_simm16_max_p1:
; GCN: s_cmpk_eq_u32 s{{[0-9]+}}, 0x8000{{$}}
-define void @br_scc_eq_i32_simm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_simm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 32768
br i1 %cmp0, label %endif, label %if
@@ -57,7 +57,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ne_i32_simm16_max_p1:
; GCN: s_cmpk_lg_u32 s{{[0-9]+}}, 0x8000{{$}}
-define void @br_scc_ne_i32_simm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ne_i32_simm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ne i32 %cond, 32768
br i1 %cmp0, label %endif, label %if
@@ -73,7 +73,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_simm16_min:
; GCN: s_cmpk_eq_i32 s{{[0-9]+}}, 0x8000{{$}}
-define void @br_scc_eq_i32_simm16_min(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_simm16_min(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, -32768
br i1 %cmp0, label %endif, label %if
@@ -89,7 +89,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_simm16_min_m1:
; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0xffff7fff{{$}}
-define void @br_scc_eq_i32_simm16_min_m1(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_simm16_min_m1(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, -32769
br i1 %cmp0, label %endif, label %if
@@ -105,7 +105,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_uimm15_max:
; GCN: s_cmpk_eq_u32 s{{[0-9]+}}, 0xffff{{$}}
-define void @br_scc_eq_i32_uimm15_max(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_uimm15_max(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 65535
br i1 %cmp0, label %endif, label %if
@@ -121,7 +121,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_uimm16_max:
; GCN: s_cmpk_eq_u32 s{{[0-9]+}}, 0xffff{{$}}
-define void @br_scc_eq_i32_uimm16_max(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_uimm16_max(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 65535
br i1 %cmp0, label %endif, label %if
@@ -137,7 +137,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32_uimm16_max_p1:
; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0x10000{{$}}
-define void @br_scc_eq_i32_uimm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32_uimm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 65536
br i1 %cmp0, label %endif, label %if
@@ -154,7 +154,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_eq_i32:
; GCN: s_cmpk_eq_i32 s{{[0-9]+}}, 0x41{{$}}
-define void @br_scc_eq_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i32 %cond, 65
br i1 %cmp0, label %endif, label %if
@@ -170,7 +170,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ne_i32:
; GCN: s_cmpk_lg_i32 s{{[0-9]+}}, 0x41{{$}}
-define void @br_scc_ne_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ne_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ne i32 %cond, 65
br i1 %cmp0, label %endif, label %if
@@ -186,7 +186,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_sgt_i32:
; GCN: s_cmpk_gt_i32 s{{[0-9]+}}, 0x41{{$}}
-define void @br_scc_sgt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_sgt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp sgt i32 %cond, 65
br i1 %cmp0, label %endif, label %if
@@ -202,7 +202,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_sgt_i32_simm16_max:
; GCN: s_cmpk_gt_i32 s{{[0-9]+}}, 0x7fff{{$}}
-define void @br_scc_sgt_i32_simm16_max(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_sgt_i32_simm16_max(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp sgt i32 %cond, 32767
br i1 %cmp0, label %endif, label %if
@@ -218,7 +218,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_sgt_i32_simm16_max_p1:
; GCN: s_cmp_gt_i32 s{{[0-9]+}}, 0x8000{{$}}
-define void @br_scc_sgt_i32_simm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_sgt_i32_simm16_max_p1(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp sgt i32 %cond, 32768
br i1 %cmp0, label %endif, label %if
@@ -234,7 +234,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_sge_i32:
; GCN: s_cmpk_ge_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @br_scc_sge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_sge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp sge i32 %cond, %size
@@ -251,7 +251,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_slt_i32:
; GCN: s_cmpk_lt_i32 s{{[0-9]+}}, 0x41{{$}}
-define void @br_scc_slt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_slt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp slt i32 %cond, 65
br i1 %cmp0, label %endif, label %if
@@ -267,7 +267,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_sle_i32:
; GCN: s_cmpk_le_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @br_scc_sle_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_sle_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp sle i32 %cond, %size
@@ -284,7 +284,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ugt_i32:
; GCN: s_cmpk_gt_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @br_scc_ugt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ugt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp ugt i32 %cond, %size
@@ -301,7 +301,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_uge_i32:
; GCN: s_cmpk_ge_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @br_scc_uge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_uge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp uge i32 %cond, %size
@@ -318,7 +318,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ult_i32:
; GCN: s_cmpk_lt_u32 s{{[0-9]+}}, 0x41{{$}}
-define void @br_scc_ult_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ult_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ult i32 %cond, 65
br i1 %cmp0, label %endif, label %if
@@ -334,7 +334,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ult_i32_min_simm16:
; GCN: s_cmp_lt_u32 s2, 0xffff8000
-define void @br_scc_ult_i32_min_simm16(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ult_i32_min_simm16(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ult i32 %cond, -32768
br i1 %cmp0, label %endif, label %if
@@ -350,7 +350,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ult_i32_min_simm16_m1:
; GCN: s_cmp_lt_u32 s{{[0-9]+}}, 0xffff7fff{{$}}
-define void @br_scc_ult_i32_min_simm16_m1(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ult_i32_min_simm16_m1(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ult i32 %cond, -32769
br i1 %cmp0, label %endif, label %if
@@ -366,7 +366,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ule_i32:
; GCN: s_cmpk_le_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @br_scc_ule_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ule_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp ule i32 %cond, %size
@@ -383,7 +383,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_eq_i32:
; GCN: s_cmpk_eq_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_eq_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_eq_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp eq i32 %size, %cond
@@ -400,7 +400,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_ne_i32:
; GCN: s_cmpk_lg_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_ne_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_ne_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp ne i32 %size, %cond
@@ -417,7 +417,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_sgt_i32:
; GCN: s_cmpk_lt_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_sgt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_sgt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp sgt i32 %size, %cond
@@ -434,7 +434,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_sge_i32:
; GCN: s_cmpk_le_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_sge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_sge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp sge i32 %size, %cond
@@ -451,7 +451,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_slt_i32:
; GCN: s_cmpk_gt_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_slt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_slt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp slt i32 %size, %cond
@@ -468,7 +468,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_sle_i32:
; GCN: s_cmpk_ge_i32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_sle_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_sle_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp sle i32 %size, %cond
@@ -485,7 +485,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_ugt_i32:
; GCN: s_cmpk_lt_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_ugt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_ugt_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp ugt i32 %size, %cond
@@ -502,7 +502,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_uge_i32:
; GCN: s_cmpk_le_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_uge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_uge_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp uge i32 %size, %cond
@@ -519,7 +519,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_ult_i32:
; GCN: s_cmpk_gt_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_ult_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_ult_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp ult i32 %size, %cond
@@ -536,7 +536,7 @@ endif:
; GCN-LABEL: {{^}}commute_br_scc_ule_i32:
; GCN: s_cmpk_ge_u32 s{{[0-9]+}}, 0x800{{$}}
-define void @commute_br_scc_ule_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @commute_br_scc_ule_i32(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%cmp0 = icmp ule i32 %size, %cond
@@ -553,7 +553,7 @@ endif:
; GCN-LABEL: {{^}}br_scc_ult_i32_non_u16:
; GCN: s_cmp_lt_u32 s2, 0xfffff7ff
-define void @br_scc_ult_i32_non_u16(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ult_i32_non_u16(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%size = call i32 @llvm.amdgcn.groupstaticsize()
%not.size = xor i32 %size, -1
@@ -573,7 +573,7 @@ endif:
; VI: s_cmp_eq_u64 s{{\[[0-9]+:[0-9]+\]}}, 4
; SI: v_cmp_eq_u64_e64
-define void @br_scc_eq_i64_inline_imm(i64 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i64_inline_imm(i64 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i64 %cond, 4
br i1 %cmp0, label %endif, label %if
@@ -593,7 +593,7 @@ endif:
; VI: s_cmp_eq_u64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
; SI: v_cmp_eq_u64_e32
-define void @br_scc_eq_i64_simm16(i64 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_eq_i64_simm16(i64 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp eq i64 %cond, 1234
br i1 %cmp0, label %endif, label %if
@@ -611,7 +611,7 @@ endif:
; VI: s_cmp_lg_u64 s{{\[[0-9]+:[0-9]+\]}}, 4
; SI: v_cmp_ne_u64_e64
-define void @br_scc_ne_i64_inline_imm(i64 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ne_i64_inline_imm(i64 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ne i64 %cond, 4
br i1 %cmp0, label %endif, label %if
@@ -631,7 +631,7 @@ endif:
; VI: s_cmp_lg_u64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
; SI: v_cmp_ne_u64_e32
-define void @br_scc_ne_i64_simm16(i64 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @br_scc_ne_i64_simm16(i64 %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = icmp ne i64 %cond, 1234
br i1 %cmp0, label %endif, label %if
diff --git a/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll b/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll
index ff9429843b22..63ea21b05339 100644
--- a/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll
+++ b/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll
@@ -4,7 +4,7 @@
; allocate scratch registers correctly. Check that this test compiles without
; error.
; TONGA-LABEL: test
-define void @test(<256 x i32> addrspace(1)* %out, <256 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test(<256 x i32> addrspace(1)* %out, <256 x i32> addrspace(1)* %in) {
entry:
%mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo)
diff --git a/test/CodeGen/AMDGPU/spill-cfg-position.ll b/test/CodeGen/AMDGPU/spill-cfg-position.ll
new file mode 100644
index 000000000000..1ca0919258a8
--- /dev/null
+++ b/test/CodeGen/AMDGPU/spill-cfg-position.ll
@@ -0,0 +1,78 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -stress-regalloc=6 < %s | FileCheck %s
+
+; Inline spiller can decide to move a spill as early as possible in the basic block.
+; It will skip phis and label, but we also need to make sure it skips instructions
+; in the basic block prologue which restore exec mask.
+; Make sure instruction to restore exec mask immediately follows label
+
+; CHECK-LABEL: {{^}}spill_cfg_position:
+; CHECK: s_cbranch_execz [[LABEL1:BB[0-9_]+]]
+; CHECK: {{^}}[[LABEL1]]:
+; CHECK: s_cbranch_execz [[LABEL2:BB[0-9_]+]]
+; CHECK: {{^}}[[LABEL2]]:
+; CHECK-NEXT: s_or_b64 exec
+; CHECK: buffer_
+
+define amdgpu_kernel void @spill_cfg_position(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #0
+ %tmp14 = load i32, i32 addrspace(1)* %arg, align 4
+ %tmp15 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
+ %tmp16 = load i32, i32 addrspace(1)* %tmp15, align 4
+ %tmp17 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
+ %tmp18 = load i32, i32 addrspace(1)* %tmp17, align 4
+ %tmp19 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 3
+ %tmp20 = load i32, i32 addrspace(1)* %tmp19, align 4
+ %tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 4
+ %tmp22 = load i32, i32 addrspace(1)* %tmp21, align 4
+ %tmp23 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 5
+ %tmp24 = load i32, i32 addrspace(1)* %tmp23, align 4
+ %tmp25 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 6
+ %tmp26 = load i32, i32 addrspace(1)* %tmp25, align 4
+ %tmp27 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 7
+ %tmp28 = load i32, i32 addrspace(1)* %tmp27, align 4
+ %tmp29 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 8
+ %tmp30 = load i32, i32 addrspace(1)* %tmp29, align 4
+ %tmp33 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %tmp1
+ %tmp34 = load i32, i32 addrspace(1)* %tmp33, align 4
+ %tmp35 = icmp eq i32 %tmp34, 0
+ br i1 %tmp35, label %bb44, label %bb36
+
+bb36: ; preds = %bb
+ %tmp37 = mul nsw i32 %tmp20, %tmp18
+ %tmp38 = add nsw i32 %tmp37, %tmp16
+ %tmp39 = mul nsw i32 %tmp24, %tmp22
+ %tmp40 = add nsw i32 %tmp38, %tmp39
+ %tmp41 = mul nsw i32 %tmp28, %tmp26
+ %tmp42 = add nsw i32 %tmp40, %tmp41
+ %tmp43 = add nsw i32 %tmp42, %tmp30
+ br label %bb52
+
+bb44: ; preds = %bb
+ %tmp45 = mul nsw i32 %tmp18, %tmp16
+ %tmp46 = mul nsw i32 %tmp22, %tmp20
+ %tmp47 = add nsw i32 %tmp46, %tmp45
+ %tmp48 = mul nsw i32 %tmp26, %tmp24
+ %tmp49 = add nsw i32 %tmp47, %tmp48
+ %tmp50 = mul nsw i32 %tmp30, %tmp28
+ %tmp51 = add nsw i32 %tmp49, %tmp50
+ br label %bb52
+
+bb52: ; preds = %bb44, %bb36
+ %tmp53 = phi i32 [ %tmp43, %bb36 ], [ %tmp51, %bb44 ]
+ %tmp54 = mul nsw i32 %tmp16, %tmp14
+ %tmp55 = mul nsw i32 %tmp22, %tmp18
+ %tmp56 = mul nsw i32 %tmp24, %tmp20
+ %tmp57 = mul nsw i32 %tmp30, %tmp26
+ %tmp58 = add i32 %tmp55, %tmp54
+ %tmp59 = add i32 %tmp58, %tmp56
+ %tmp60 = add i32 %tmp59, %tmp28
+ %tmp61 = add i32 %tmp60, %tmp57
+ %tmp62 = add i32 %tmp61, %tmp53
+ store i32 %tmp62, i32 addrspace(1)* %tmp33, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/spill-m0.ll b/test/CodeGen/AMDGPU/spill-m0.ll
index 8c16b9d1649c..0e715c453209 100644
--- a/test/CodeGen/AMDGPU/spill-m0.ll
+++ b/test/CodeGen/AMDGPU/spill-m0.ll
@@ -17,11 +17,11 @@
; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]]
-; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Spill
+; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4 ; 4-byte Folded Spill
; TOVMEM: s_waitcnt vmcnt(0)
; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
-; TOSMEM: s_mov_b32 m0, s3{{$}}
+; TOSMEM: s_add_u32 m0, s3, 0x100{{$}}
; TOSMEM-NOT: [[M0_COPY]]
; TOSMEM: s_buffer_store_dword [[M0_COPY]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Spill
; TOSMEM: s_waitcnt lgkmcnt(0)
@@ -32,18 +32,18 @@
; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 0
; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
-; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Reload
+; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4 ; 4-byte Folded Reload
; TOVMEM: s_waitcnt vmcnt(0)
; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]]
; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
-; TOSMEM: s_mov_b32 m0, s3{{$}}
+; TOSMEM: s_add_u32 m0, s3, 0x100{{$}}
; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Reload
; TOSMEM-NOT: [[M0_RESTORE]]
; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]]
; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
-define void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
entry:
%m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
%cmp0 = icmp eq i32 %cond, 0
@@ -67,12 +67,12 @@ endif:
; GCN: v_interp_mov_f32
; TOSMEM-NOT: s_m0
-; TOSMEM: s_mov_b32 m0, s7
+; TOSMEM: s_add_u32 m0, s7, 0x100
; TOSMEM-NEXT: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
; TOSMEM-NOT: m0
; TOSMEM-NOT: m0
-; TOSMEM: s_add_u32 m0, s7, 0x100
+; TOSMEM: s_add_u32 m0, s7, 0x200
; TOSMEM: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
; TOSMEM-NOT: m0
@@ -81,16 +81,16 @@ endif:
; TOSMEM: s_branch
; TOSMEM: BB{{[0-9]+_[0-9]+}}:
-; TOSMEM-NEXT: s_add_u32 m0, s7, 0x100
+; TOSMEM-NEXT: s_add_u32 m0, s7, 0x200
; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
; GCN-NOT: v_readlane_b32 m0
; GCN-NOT: s_buffer_store_dword m0
; GCN-NOT: s_buffer_load_dword m0
-define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3) #0 {
+define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %m0) #0 {
main_body:
- %tmp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3)
+ %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
%cmp = fcmp ueq float 0.000000e+00, %tmp
br i1 %cmp, label %if, label %else
@@ -100,14 +100,13 @@ if: ; preds = %main_body
br label %endif
else: ; preds = %main_body
- %interp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3)
+ %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
br label %endif
endif: ; preds = %else, %if
%export = phi float [ %lds_data, %if ], [ %interp, %else ]
- %tmp4 = call i32 @llvm.SI.packf16(float %export, float %export)
- %tmp5 = bitcast i32 %tmp4 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp5, float %tmp5, float %tmp5, float %tmp5)
+ %tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
ret void
}
@@ -122,7 +121,7 @@ endif: ; preds = %else, %if
; GCN: ; clobber m0
; TOSMEM: s_mov_b32 vcc_hi, m0
-; TOSMEM: s_mov_b32 m0, s3
+; TOSMEM: s_add_u32 m0, s3, 0x100
; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
; TOSMEM: s_mov_b32 m0, vcc_hi
@@ -131,16 +130,16 @@ endif: ; preds = %else, %if
; TOSMEM: s_branch
; TOSMEM: BB{{[0-9]+_[0-9]+}}:
-; TOSMEM-NEXT: s_mov_b32 m0, s3
+; TOSMEM-NEXT: s_add_u32 m0, s3, 0x100
; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
; GCN-NOT: v_readlane_b32 m0
; GCN-NOT: s_buffer_store_dword m0
; GCN-NOT: s_buffer_load_dword m0
-define void @m0_unavailable_spill(i32 %arg3) #0 {
+define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
main_body:
%m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0
- %tmp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3)
+ %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
call void asm sideeffect "; clobber $0", "~{M0}"() #0
%cmp = fcmp ueq float 0.000000e+00, %tmp
br i1 %cmp, label %if, label %else
@@ -161,10 +160,10 @@ endif:
; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]]
; TOSMEM: s_cmp_eq_u32
; TOSMEM-NOT: m0
-; TOSMEM: s_mov_b32 m0, s3
+; TOSMEM: s_add_u32 m0, s3, 0x100
; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
; TOSMEM-NOT: m0
-; TOSMEM: s_add_u32 m0, s3, 0x200
+; TOSMEM: s_add_u32 m0, s3, 0x300
; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s[88:91], m0 ; 4-byte Folded Spill
; TOSMEM-NOT: m0
; TOSMEM: s_cbranch_scc1
@@ -172,7 +171,7 @@ endif:
; TOSMEM: s_mov_b32 m0, -1
; TOSMEM: s_mov_b32 vcc_hi, m0
-; TOSMEM: s_mov_b32 m0, s3
+; TOSMEM: s_add_u32 m0, s3, 0x100
; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
; TOSMEM: s_mov_b32 m0, vcc_hi
; TOSMEM: s_waitcnt lgkmcnt(0)
@@ -180,7 +179,7 @@ endif:
; TOSMEM: ds_write_b64
; TOSMEM-NOT: m0
-; TOSMEM: s_add_u32 m0, s3, 0x200
+; TOSMEM: s_add_u32 m0, s3, 0x300
; TOSMEM: s_buffer_load_dword s0, s[88:91], m0 ; 4-byte Folded Reload
; TOSMEM-NOT: m0
; TOSMEM: s_waitcnt lgkmcnt(0)
@@ -190,7 +189,7 @@ endif:
; TOSMEM: s_dcache_wb
; TOSMEM: s_endpgm
-define void @restore_m0_lds(i32 %arg) {
+define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
%m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
%sval = load volatile i64, i64 addrspace(2)* undef
%cmp = icmp eq i32 %arg, 0
@@ -205,10 +204,10 @@ ret:
ret void
}
-declare float @llvm.SI.fs.constant(i32, i32, i32) readnone
-
-declare i32 @llvm.SI.packf16(float, float) readnone
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
index 9b3dfab2be6a..c05021a91ff0 100644
--- a/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
+++ b/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -11,7 +11,7 @@
; Just test that it compiles successfully.
; CHECK-LABEL: test
-define void @test(<1280 x i32> addrspace(1)* %out, <1280 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test(<1280 x i32> addrspace(1)* %out, <1280 x i32> addrspace(1)* %in) {
entry:
%lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
diff --git a/test/CodeGen/AMDGPU/spill-wide-sgpr.ll b/test/CodeGen/AMDGPU/spill-wide-sgpr.ll
index cab45be8da50..ebba35a6689a 100644
--- a/test/CodeGen/AMDGPU/spill-wide-sgpr.ll
+++ b/test/CodeGen/AMDGPU/spill-wide-sgpr.ll
@@ -3,11 +3,11 @@
; RUN: llc -O0 -march=amdgcn -mcpu=fiji -amdgpu-spill-sgpr-to-smem=0 -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=VMEM %s
; ALL-LABEL: {{^}}spill_sgpr_x2:
-; SMEM: s_mov_b32 m0, s3{{$}}
+; SMEM: s_add_u32 m0, s3, 0x100{{$}}
; SMEM: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:11], m0 ; 8-byte Folded Spill
; SMEM: s_cbranch_scc1
-; SMEM: s_mov_b32 m0, s3{{$}}
+; SMEM: s_add_u32 m0, s3, 0x100{{$}}
; SMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:11], m0 ; 8-byte Folded Reload
; SMEM: s_dcache_wb
@@ -44,11 +44,11 @@ ret:
}
; ALL-LABEL: {{^}}spill_sgpr_x4:
-; SMEM: s_mov_b32 m0, s3{{$}}
+; SMEM: s_add_u32 m0, s3, 0x100{{$}}
; SMEM: s_buffer_store_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[12:15], m0 ; 16-byte Folded Spill
; SMEM: s_cbranch_scc1
-; SMEM: s_mov_b32 m0, s3{{$}}
+; SMEM: s_add_u32 m0, s3, 0x100{{$}}
; SMEM: s_buffer_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[12:15], m0 ; 16-byte Folded Reload
; SMEM: s_dcache_wb
; SMEM: s_endpgm
@@ -93,15 +93,15 @@ ret:
; ALL-LABEL: {{^}}spill_sgpr_x8:
-; SMEM: s_mov_b32 m0, s3{{$}}
+; SMEM: s_add_u32 m0, s3, 0x100{{$}}
; SMEM: s_buffer_store_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[16:19], m0 ; 16-byte Folded Spill
-; SMEM: s_add_u32 m0, s3, 16
+; SMEM: s_add_u32 m0, s3, 0x110{{$}}
; SMEM: s_buffer_store_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[16:19], m0 ; 16-byte Folded Spill
; SMEM: s_cbranch_scc1
-; SMEM: s_mov_b32 m0, s3{{$}}
+; SMEM: s_add_u32 m0, s3, 0x100{{$}}
; SMEM: s_buffer_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[16:19], m0 ; 16-byte Folded Reload
-; SMEM: s_add_u32 m0, s3, 16
+; SMEM: s_add_u32 m0, s3, 0x110{{$}}
; SMEM: s_buffer_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[16:19], m0 ; 16-byte Folded Reload
; SMEM: s_dcache_wb
diff --git a/test/CodeGen/AMDGPU/split-scalar-i64-add.ll b/test/CodeGen/AMDGPU/split-scalar-i64-add.ll
index d4e2dc814050..5d7d29db3a2f 100644
--- a/test/CodeGen/AMDGPU/split-scalar-i64-add.ll
+++ b/test/CodeGen/AMDGPU/split-scalar-i64-add.ll
@@ -10,7 +10,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() readnone
; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_0:
; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, 0x18f, v{{[0-9]+}}
; SI: v_addc_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc
-define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %s.val) {
+define amdgpu_kernel void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %s.val) {
%v.val = load volatile i32, i32 addrspace(1)* %in
%vec.0 = insertelement <2 x i32> undef, i32 %s.val, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 %v.val, i32 1
@@ -23,7 +23,7 @@ define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 addrspace(1
; FUNC-LABEL: {{^}}s_imp_def_vcc_split_i64_add_0:
; SI: s_add_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x18f
; SI: s_addc_u32 {{s[0-9]+}}, 0xf423f, 0
-define void @s_imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) {
+define amdgpu_kernel void @s_imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) {
%vec.0 = insertelement <2 x i32> undef, i32 %val, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 999999, i32 1
%bc = bitcast <2 x i32> %vec.1 to i64
@@ -35,7 +35,7 @@ define void @s_imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) {
; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_1:
; SI: v_add_i32
; SI: v_addc_u32
-define void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
+define amdgpu_kernel void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
%v.val = load volatile i32, i32 addrspace(1)* %in
%vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 %v.val, i32 1
@@ -48,7 +48,7 @@ define void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 addrspace(1
; FUNC-LABEL: {{^}}s_imp_def_vcc_split_i64_add_1:
; SI: s_add_u32 {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; SI: s_addc_u32 {{s[0-9]+}}, 0x1869f, {{s[0-9]+}}
-define void @s_imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i64 %val1) {
+define amdgpu_kernel void @s_imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i64 %val1) {
%vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 99999, i32 1
%bc = bitcast <2 x i32> %vec.1 to i64
@@ -61,7 +61,7 @@ define void @s_imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i6
; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_2:
; SI: v_add_i32_e32 {{v[0-9]+}}, vcc, {{s[0-9]+}}, {{v[0-9]+}}
; SI: v_addc_u32_e32 {{v[0-9]+}}, vcc, {{v[0-9]+}}, {{v[0-9]+}}, vcc
-define void @imp_def_vcc_split_i64_add_2(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
+define amdgpu_kernel void @imp_def_vcc_split_i64_add_2(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%load = load i32, i32 addrspace(1)* %gep
diff --git a/test/CodeGen/AMDGPU/split-smrd.ll b/test/CodeGen/AMDGPU/split-smrd.ll
index d07da1030936..cdb1b1e3b503 100644
--- a/test/CodeGen/AMDGPU/split-smrd.ll
+++ b/test/CodeGen/AMDGPU/split-smrd.ll
@@ -1,11 +1,11 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; FIXME: Move this to sgpr-copy.ll when this is fixed on VI.
; Make sure that when we split an smrd instruction in order to move it to
; the VALU, we are also moving its users to the VALU.
-; CHECK-LABEL: {{^}}split_smrd_add_worklist:
-; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
+; GCN-LABEL: {{^}}split_smrd_add_worklist:
+; GCN: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
define amdgpu_ps void @split_smrd_add_worklist([34 x <8 x i32>] addrspace(2)* byval %arg) #0 {
bb:
%tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
@@ -21,27 +21,22 @@ bb3: ; preds = %bb
%tmp6 = sext i32 %tmp5 to i64
%tmp7 = getelementptr [34 x <8 x i32>], [34 x <8 x i32>] addrspace(2)* %arg, i64 0, i64 %tmp6
%tmp8 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp7, align 32, !tbaa !0
- %tmp9 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> <i32 1061158912, i32 1048576000>, <8 x i32> %tmp8, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp9 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> <float bitcast (i32 1061158912 to float), float bitcast (i32 1048576000 to float)>, <8 x i32> %tmp8, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp10 = extractelement <4 x float> %tmp9, i32 0
- %tmp12 = call i32 @llvm.SI.packf16(float %tmp10, float undef)
- %tmp13 = bitcast i32 %tmp12 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float undef, float %tmp13, float undef, float undef)
+ %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp10, float undef)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0
ret void
}
-; Function Attrs: nounwind readnone
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-
-declare i32 @llvm.SI.packf16(float, float) #1
-
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
-!1 = !{!"const", !3}
-!2 = !{!1, !1, i64 0}
-!3 = !{!"tbaa root"}
+!1 = !{!"const", !2}
+!2 = !{!"tbaa root"}
diff --git a/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll b/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll
index 37ec2b012896..c2426993bb3a 100644
--- a/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll
+++ b/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs -mattr=-promote-alloca,-load-store-opt < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -enable-amdgpu-aa=0 -verify-machineinstrs -mattr=-promote-alloca,-load-store-opt < %s | FileCheck -check-prefix=GCN %s
@sPrivateStorage = internal addrspace(3) global [256 x [8 x <4 x i64>]] undef
@@ -29,7 +29,7 @@
; GCN-DAG: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24
; GCN: s_endpgm
-define void @ds_reorder_vector_split(<4 x i64> addrspace(1)* nocapture readonly %srcValues, i32 addrspace(1)* nocapture readonly %offsets, <4 x i64> addrspace(1)* nocapture %destBuffer, i32 %alignmentOffset) #0 {
+define amdgpu_kernel void @ds_reorder_vector_split(<4 x i64> addrspace(1)* nocapture readonly %srcValues, i32 addrspace(1)* nocapture readonly %offsets, <4 x i64> addrspace(1)* nocapture %destBuffer, i32 %alignmentOffset) #0 {
entry:
%tmp = tail call i32 @llvm.r600.read.local.size.y()
%tmp1 = tail call i32 @llvm.r600.read.local.size.z()
diff --git a/test/CodeGen/AMDGPU/splitkit.mir b/test/CodeGen/AMDGPU/splitkit.mir
new file mode 100644
index 000000000000..41782af40e3c
--- /dev/null
+++ b/test/CodeGen/AMDGPU/splitkit.mir
@@ -0,0 +1,105 @@
+# RUN: llc -o - %s -mtriple=amdgcn-- -mcpu=fiji -verify-machineinstrs -run-pass=greedy,virtregrewriter | FileCheck %s
+--- |
+ define amdgpu_kernel void @func0() #0 { ret void }
+ define amdgpu_kernel void @func1() #0 { ret void }
+ define amdgpu_kernel void @splitHoist() #0 { ret void }
+
+ attributes #0 = { "amdgpu-num-sgpr"="12" }
+...
+---
+# Make sure we only get a single spill+reload even if liverange splitting
+# created a sequence of multiple copy instructions.
+# CHECK-LABEL: name: func0
+# CHECK: SI_SPILL_S128_SAVE
+# CHECK-NOT: SI_SPILL_S128_SAVE
+# CHECK: S_NOP 0
+# CHECK: SI_SPILL_S128_RESTORE
+# CHECK-NOT: SI_SPILL_S128_RESTORE
+name: func0
+body: |
+ bb.0:
+ S_NOP 0, implicit-def undef %0.sub0 : sreg_128
+ S_NOP 0, implicit-def %0.sub3 : sreg_128
+
+ ; Clobber registers
+ S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11
+
+ S_NOP 0, implicit %0.sub0
+ S_NOP 0, implicit %0.sub3
+ S_NOP 0, implicit %0.sub0
+ S_NOP 0, implicit %0.sub3
+...
+---
+# LiveRange splitting should split this into 2 intervals with the second getting
+# allocated to sgpr0_sgpr1 and the first to something else so we see two copies
+# in between for the two subregisters that are alive.
+# CHECK-LABEL: name: func1
+# CHECK: [[REG0:%sgpr[0-9]+]] = COPY %sgpr0
+# CHECK: [[REG1:%sgpr[0-9]+]] = COPY %sgpr2
+# CHECK: S_NOP 0
+# CHECK: S_NOP 0, implicit [[REG0]]
+# CHECK: S_NOP 0, implicit [[REG1]]
+# CHECK: %sgpr0 = COPY [[REG0]]
+# CHECK: %sgpr2 = COPY [[REG1]]
+# CHECK: S_NOP
+# CHECK: S_NOP 0, implicit %sgpr0
+# CHECK: S_NOP 0, implicit %sgpr2
+name: func1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %sgpr0, %sgpr1, %sgpr2
+ undef %0.sub0 : sreg_128 = COPY %sgpr0
+ %0.sub2 = COPY %sgpr2
+
+ S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1
+
+ S_NOP 0, implicit %0.sub0
+ S_NOP 0, implicit %0.sub2
+
+ ; Clobber everything but sgpr0-sgpr3
+ S_NOP 0, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11, implicit-def dead %sgpr12, implicit-def dead %sgpr13, implicit-def dead %sgpr14, implicit-def dead %sgpr15, implicit-def dead %vcc_lo, implicit-def dead %vcc_hi
+
+ S_NOP 0, implicit %0.sub0
+ S_NOP 0, implicit %0.sub2
+...
+---
+# Check that copy hoisting out of loops works. This mainly should not crash the
+# compiler when it hoists a subreg copy sequence.
+# CHECK-LABEL: name: splitHoist
+# CHECK: S_NOP 0, implicit-def %sgpr0
+# CHECK: S_NOP 0, implicit-def %sgpr3
+# CHECK-NEXT: SI_SPILL_S128_SAVE
+name: splitHoist
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ S_NOP 0, implicit-def undef %0.sub0 : sreg_128
+ S_NOP 0, implicit-def %0.sub3 : sreg_128
+
+ S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+ S_BRANCH %bb.2
+
+ bb.1:
+ successors: %bb.1, %bb.3
+ S_NOP 0, implicit %0.sub0
+
+ ; Clobber registers
+ S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11
+
+ S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+ S_BRANCH %bb.3
+
+ bb.2:
+ successors: %bb.3
+ ; Clobber registers
+ S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11
+ S_BRANCH %bb.3
+
+ bb.3:
+ S_NOP 0, implicit %0.sub0
+ S_NOP 0, implicit %0.sub3
+ S_NOP 0, implicit %0.sub0
+ S_NOP 0, implicit %0.sub3
+...
diff --git a/test/CodeGen/AMDGPU/sra.ll b/test/CodeGen/AMDGPU/sra.ll
index ad7c86fe7919..b4355b76016a 100644
--- a/test/CodeGen/AMDGPU/sra.ll
+++ b/test/CodeGen/AMDGPU/sra.ll
@@ -13,7 +13,7 @@ declare i32 @llvm.r600.read.tidig.x() #0
; EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
@@ -37,7 +37,7 @@ define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
; EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@@ -49,9 +49,9 @@ define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
; FUNC-LABEL: {{^}}ashr_v2i16:
; FIXME: The ashr operation is uniform, but because its operands come from a
; global load we end up with the vector instructions rather than scalar.
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), sext(v{{[0-9]+}}) dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+define amdgpu_kernel void @ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
%a = load <2 x i16>, <2 x i16> addrspace(1)* %in
%b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
@@ -63,11 +63,11 @@ define void @ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %i
; FUNC-LABEL: {{^}}ashr_v4i16:
; FIXME: The ashr operation is uniform, but because its operands come from a
; global load we end up with the vector instructions rather than scalar.
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-; VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), sext(v{{[0-9]+}}) dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; VI: v_ashrrev_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}), sext(v{{[0-9]+}}) dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+define amdgpu_kernel void @ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
%a = load <4 x i16>, <4 x i16> addrspace(1)* %in
%b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
@@ -80,7 +80,7 @@ define void @ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %i
; GCN: s_ashr_i64 s[{{[0-9]}}:{{[0-9]}}], s[{{[0-9]}}:{{[0-9]}}], 8
; EG: ASHR
-define void @s_ashr_i64(i64 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @s_ashr_i64(i64 addrspace(1)* %out, i32 %in) {
entry:
%in.ext = sext i32 %in to i64
%ashr = ashr i64 %in.ext, 8
@@ -105,7 +105,7 @@ entry:
; EG-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
; EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
; EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
-define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
%b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64, i64 addrspace(1)* %in
@@ -143,7 +143,7 @@ entry:
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
-define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64>, <2 x i64> addrspace(1)* %in
%b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
@@ -156,7 +156,7 @@ define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
; XFUNC-LABEL: {{^}}s_ashr_v2i64:
; XGCN: s_ashr_i64 {{s\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], s[0-9]+}}
; XGCN: s_ashr_i64 {{s\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], s[0-9]+}}
-; define void @s_ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in, <2 x i64> %a, <2 x i64> %b) {
+; define amdgpu_kernel void @s_ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in, <2 x i64> %a, <2 x i64> %b) {
; %result = ashr <2 x i64> %a, %b
; store <2 x i64> %result, <2 x i64> addrspace(1)* %out
; ret void
@@ -221,7 +221,7 @@ define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
-define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64>, <4 x i64> addrspace(1)* %in
%b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
@@ -235,7 +235,7 @@ define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %i
; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
; GCN: s_add_u32 s{{[0-9]+}}, s[[HI]], s{{[0-9]+}}
; GCN: s_addc_u32 s{{[0-9]+}}, s[[SHIFT]], s{{[0-9]+}}
-define void @s_ashr_32_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @s_ashr_32_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = ashr i64 %a, 32
%add = add i64 %result, %b
store i64 %add, i64 addrspace(1)* %out
@@ -247,7 +247,7 @@ define void @s_ashr_32_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
; VI: flat_load_dword v[[HI:[0-9]+]]
; GCN: v_ashrrev_i32_e32 v[[SHIFT:[0-9]+]], 31, v[[HI]]
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[HI]]:[[SHIFT]]{{\]}}
-define void @v_ashr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @v_ashr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
@@ -262,7 +262,7 @@ define void @v_ashr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
; GCN: s_add_u32 {{s[0-9]+}}, s[[SHIFT]], {{s[0-9]+}}
; GCN: s_addc_u32 {{s[0-9]+}}, s[[SHIFT]], {{s[0-9]+}}
-define void @s_ashr_63_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @s_ashr_63_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = ashr i64 %a, 63
%add = add i64 %result, %b
store i64 %add, i64 addrspace(1)* %out
@@ -275,7 +275,7 @@ define void @s_ashr_63_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
; GCN: v_ashrrev_i32_e32 v[[SHIFT:[0-9]+]], 31, v[[HI]]
; GCN: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[SHIFT]]
; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[SHIFT]]:[[COPY]]{{\]}}
-define void @v_ashr_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @v_ashr_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
diff --git a/test/CodeGen/AMDGPU/srem.ll b/test/CodeGen/AMDGPU/srem.ll
index c78fd549b316..c89f798397ae 100644
--- a/test/CodeGen/AMDGPU/srem.ll
+++ b/test/CodeGen/AMDGPU/srem.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s
-define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in
%den = load i32, i32 addrspace(1) * %den_ptr
@@ -11,7 +11,7 @@ define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-define void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%num = load i32, i32 addrspace(1) * %in
%result = srem i32 %num, 4
store i32 %result, i32 addrspace(1)* %out
@@ -24,14 +24,14 @@ define void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_mul_lo_i32
; SI: v_sub_i32
; SI: s_endpgm
-define void @srem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @srem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%num = load i32, i32 addrspace(1) * %in
%result = srem i32 %num, 7
store i32 %result, i32 addrspace(1)* %out
ret void
}
-define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%num = load <2 x i32>, <2 x i32> addrspace(1) * %in
%den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr
@@ -40,14 +40,14 @@ define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
ret void
}
-define void @srem_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%num = load <2 x i32>, <2 x i32> addrspace(1) * %in
%result = srem <2 x i32> %num, <i32 4, i32 4>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
-define void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%num = load <4 x i32>, <4 x i32> addrspace(1) * %in
%den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr
@@ -56,14 +56,14 @@ define void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
ret void
}
-define void @srem_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%num = load <4 x i32>, <4 x i32> addrspace(1) * %in
%result = srem <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
-define void @srem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @srem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%den_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%num = load i64, i64 addrspace(1) * %in
%den = load i64, i64 addrspace(1) * %den_ptr
@@ -72,14 +72,14 @@ define void @srem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
ret void
}
-define void @srem_i64_4(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @srem_i64_4(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%num = load i64, i64 addrspace(1) * %in
%result = srem i64 %num, 4
store i64 %result, i64 addrspace(1)* %out
ret void
}
-define void @srem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%den_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%num = load <2 x i64>, <2 x i64> addrspace(1) * %in
%den = load <2 x i64>, <2 x i64> addrspace(1) * %den_ptr
@@ -88,14 +88,14 @@ define void @srem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
ret void
}
-define void @srem_v2i64_4(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v2i64_4(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%num = load <2 x i64>, <2 x i64> addrspace(1) * %in
%result = srem <2 x i64> %num, <i64 4, i64 4>
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
ret void
}
-define void @srem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%den_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%num = load <4 x i64>, <4 x i64> addrspace(1) * %in
%den = load <4 x i64>, <4 x i64> addrspace(1) * %den_ptr
@@ -104,7 +104,7 @@ define void @srem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %i
ret void
}
-define void @srem_v4i64_4(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @srem_v4i64_4(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%num = load <4 x i64>, <4 x i64> addrspace(1) * %in
%result = srem <4 x i64> %num, <i64 4, i64 4, i64 4, i64 4>
store <4 x i64> %result, <4 x i64> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/srl.ll b/test/CodeGen/AMDGPU/srl.ll
index 6b006fd936d7..1daf4bb33e81 100644
--- a/test/CodeGen/AMDGPU/srl.ll
+++ b/test/CodeGen/AMDGPU/srl.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.r600.read.tidig.x() #0
; SI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -26,7 +26,7 @@ define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
@@ -50,7 +50,7 @@ define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@@ -74,7 +74,7 @@ define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
; EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]|PS}}
; EG-DAG: LSHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], [[SHIFT]]
; EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
-define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64, i64 addrspace(1)* %in
%b = load i64, i64 addrspace(1)* %b_ptr
@@ -112,7 +112,7 @@ define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; EG-DAG: CNDE_INT {{.*}}, 0.0
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
-define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64>, <2 x i64> addrspace(1)* %in
%b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
@@ -178,7 +178,7 @@ define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
-define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64>, <4 x i64> addrspace(1)* %in
%b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
@@ -193,7 +193,7 @@ define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %i
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[HI_A]]
; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @s_lshr_32_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @s_lshr_32_i64(i64 addrspace(1)* %out, i64 %a) {
%result = lshr i64 %a, 32
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -203,7 +203,7 @@ define void @s_lshr_32_i64(i64 addrspace(1)* %out, i64 %a) {
; GCN-DAG: buffer_load_dword v[[HI_A:[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[HI_A]]:[[VHI]]{{\]}}
-define void @v_lshr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @v_lshr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
diff --git a/test/CodeGen/AMDGPU/ssubo.ll b/test/CodeGen/AMDGPU/ssubo.ll
index 26884a1b7761..135632343f90 100644
--- a/test/CodeGen/AMDGPU/ssubo.ll
+++ b/test/CodeGen/AMDGPU/ssubo.ll
@@ -6,7 +6,7 @@ declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
; FUNC-LABEL: {{^}}ssubo_i64_zext:
-define void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
%ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %ssub, 0
%carry = extractvalue { i64, i1 } %ssub, 1
@@ -17,7 +17,7 @@ define void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
}
; FUNC-LABEL: {{^}}s_ssubo_i32:
-define void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+define amdgpu_kernel void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
%ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
%val = extractvalue { i32, i1 } %ssub, 0
%carry = extractvalue { i32, i1 } %ssub, 1
@@ -27,7 +27,7 @@ define void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
}
; FUNC-LABEL: {{^}}v_ssubo_i32:
-define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
%b = load i32, i32 addrspace(1)* %bptr, align 4
%ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
@@ -41,7 +41,7 @@ define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
; FUNC-LABEL: {{^}}s_ssubo_i64:
; SI: s_sub_u32
; SI: s_subb_u32
-define void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
%ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %ssub, 0
%carry = extractvalue { i64, i1 } %ssub, 1
@@ -53,7 +53,7 @@ define void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
; FUNC-LABEL: {{^}}v_ssubo_i64:
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
-define void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+define amdgpu_kernel void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%a = load i64, i64 addrspace(1)* %aptr, align 4
%b = load i64, i64 addrspace(1)* %bptr, align 4
%ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
diff --git a/test/CodeGen/AMDGPU/store-barrier.ll b/test/CodeGen/AMDGPU/store-barrier.ll
index 57a93ccd2505..afa4e94222cd 100644
--- a/test/CodeGen/AMDGPU/store-barrier.ll
+++ b/test/CodeGen/AMDGPU/store-barrier.ll
@@ -12,7 +12,7 @@
; CHECK: s_barrier
; CHECK: s_endpgm
; Function Attrs: nounwind
-define void @test(<2 x i8> addrspace(3)* nocapture %arg, <2 x i8> addrspace(1)* nocapture readonly %arg1, i32 addrspace(1)* nocapture readonly %arg2, <2 x i8> addrspace(1)* nocapture %arg3, i32 %arg4, i64 %tmp9) #0 {
+define amdgpu_kernel void @test(<2 x i8> addrspace(3)* nocapture %arg, <2 x i8> addrspace(1)* nocapture readonly %arg1, i32 addrspace(1)* nocapture readonly %arg2, <2 x i8> addrspace(1)* nocapture %arg3, i32 %arg4, i64 %tmp9) #0 {
bb:
%tmp10 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp9
%tmp13 = load i32, i32 addrspace(1)* %tmp10, align 2
diff --git a/test/CodeGen/AMDGPU/store-global.ll b/test/CodeGen/AMDGPU/store-global.ll
index 5d49795a68ec..160e921fc075 100644
--- a/test/CodeGen/AMDGPU/store-global.ll
+++ b/test/CodeGen/AMDGPU/store-global.ll
@@ -11,7 +11,7 @@
; CM-NOT: MEM_RAT MSKOR
; GCN: buffer_store_byte
-define void @store_i1(i1 addrspace(1)* %out) {
+define amdgpu_kernel void @store_i1(i1 addrspace(1)* %out) {
entry:
store i1 true, i1 addrspace(1)* %out
ret void
@@ -42,7 +42,7 @@ entry:
; GCN: buffer_store_byte
-define void @store_i8(i8 addrspace(1)* %out, i8 %in) {
+define amdgpu_kernel void @store_i8(i8 addrspace(1)* %out, i8 %in) {
entry:
store i8 %in, i8 addrspace(1)* %out
ret void
@@ -75,7 +75,7 @@ entry:
; EG: MOV * T[[RW_GPR]].Z, 0.0
; GCN: buffer_store_short
-define void @store_i16(i16 addrspace(1)* %out, i16 %in) {
+define amdgpu_kernel void @store_i16(i16 addrspace(1)* %out, i16 %in) {
entry:
store i16 %in, i16 addrspace(1)* %out
ret void
@@ -88,7 +88,7 @@ entry:
; EG: MEM_RAT MSKOR
; EG: MEM_RAT MSKOR
-define void @store_i24(i24 addrspace(1)* %out, i24 %in) {
+define amdgpu_kernel void @store_i24(i24 addrspace(1)* %out, i24 %in) {
entry:
store i24 %in, i24 addrspace(1)* %out
ret void
@@ -104,7 +104,7 @@ entry:
; CM: MEM_RAT_CACHELESS STORE_DWORD
; CM-NOT: MEM_RAT
-define void @store_i25(i25 addrspace(1)* %out, i25 %in) {
+define amdgpu_kernel void @store_i25(i25 addrspace(1)* %out, i25 %in) {
entry:
store i25 %in, i25 addrspace(1)* %out
ret void
@@ -119,7 +119,7 @@ entry:
; CM-NOT: MEM_RAT MSKOR
; GCN: buffer_store_short
-define void @store_v2i8(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i8(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i8>
store <2 x i8> %0, <2 x i8> addrspace(1)* %out
@@ -136,7 +136,7 @@ entry:
; CM-NOT: MEM_RAT MSKOR
; SI: buffer_store_byte
-define void @store_v2i8_unaligned(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i8_unaligned(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i8>
store <2 x i8> %0, <2 x i8> addrspace(1)* %out, align 1
@@ -150,7 +150,7 @@ entry:
; CM: MEM_RAT_CACHELESS STORE_DWORD
; GCN: buffer_store_dword
-define void @store_v2i16(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i16(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i16>
store <2 x i16> %0, <2 x i16> addrspace(1)* %out
@@ -170,7 +170,7 @@ entry:
; SI: buffer_store_short
; SI: buffer_store_short
-define void @store_v2i16_unaligned(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i16_unaligned(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i16>
store <2 x i16> %0, <2 x i16> addrspace(1)* %out, align 2
@@ -183,7 +183,7 @@ entry:
; CM: MEM_RAT_CACHELESS STORE_DWORD
; GCN: buffer_store_dword
-define void @store_v4i8(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i8(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
store <4 x i8> %0, <4 x i8> addrspace(1)* %out
@@ -210,7 +210,7 @@ entry:
; SI: buffer_store_byte
; SI: buffer_store_byte
; SI-NOT: buffer_store_dword
-define void @store_v4i8_unaligned(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i8_unaligned(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
store <4 x i8> %0, <4 x i8> addrspace(1)* %out, align 1
@@ -231,7 +231,7 @@ entry:
; SI: buffer_store_short
; SI: buffer_store_short
; SI-NOT: buffer_store_dword
-define void @store_v4i8_halfaligned(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i8_halfaligned(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
store <4 x i8> %0, <4 x i8> addrspace(1)* %out, align 2
@@ -246,7 +246,7 @@ entry:
; GCN: buffer_store_dword
-define void @store_f32(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @store_f32(float addrspace(1)* %out, float %in) {
store float %in, float addrspace(1)* %out
ret void
}
@@ -257,7 +257,7 @@ define void @store_f32(float addrspace(1)* %out, float %in) {
; CM: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+}}
; GCN: buffer_store_dwordx2
-define void @store_v4i16(<4 x i16> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i16(<4 x i16> addrspace(1)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i16>
store <4 x i16> %0, <4 x i16> addrspace(1)* %out
@@ -272,7 +272,7 @@ entry:
; GCN: buffer_store_dwordx2
-define void @store_v2f32(<2 x float> addrspace(1)* %out, float %a, float %b) {
+define amdgpu_kernel void @store_v2f32(<2 x float> addrspace(1)* %out, float %a, float %b) {
entry:
%0 = insertelement <2 x float> <float 0.0, float 0.0>, float %a, i32 0
%1 = insertelement <2 x float> %0, float %b, i32 1
@@ -286,7 +286,7 @@ entry:
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}},
; EG-DAG: MEM_RAT_CACHELESS STORE_RAW {{T[0-9]+\.XY}}, {{T[0-9]+\.[XYZW]}},
-define void @store_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a) nounwind {
+define amdgpu_kernel void @store_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a) nounwind {
store <3 x i32> %a, <3 x i32> addrspace(1)* %out, align 16
ret void
}
@@ -299,7 +299,7 @@ define void @store_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a) nounwind {
; CM-NOT: MEM_RAT_CACHELESS STORE_DWORD
; GCN: buffer_store_dwordx4
-define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out
ret void
@@ -313,7 +313,7 @@ entry:
; CM-NOT: MEM_RAT_CACHELESS STORE_DWORD
; SI: buffer_store_dwordx4
-define void @store_v4i32_unaligned(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i32_unaligned(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
ret void
@@ -328,7 +328,7 @@ entry:
; CM-NOT: MEM_RAT_CACHELESS STORE_DWORD
; GCN: buffer_store_dwordx4
-define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define amdgpu_kernel void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%1 = load <4 x float>, <4 x float> addrspace(1) * %in
store <4 x float> %1, <4 x float> addrspace(1)* %out
ret void
@@ -340,7 +340,7 @@ define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1
; CM: MEM_RAT MSKOR
; GCN: buffer_store_byte
-define void @store_i64_i8(i8 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @store_i64_i8(i8 addrspace(1)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i8
store i8 %0, i8 addrspace(1)* %out
@@ -350,7 +350,7 @@ entry:
; FUNC-LABEL: {{^}}store_i64_i16:
; EG: MEM_RAT MSKOR
; GCN: buffer_store_short
-define void @store_i64_i16(i16 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @store_i64_i16(i16 addrspace(1)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i16
store i16 %0, i16 addrspace(1)* %out
@@ -369,7 +369,7 @@ entry:
; CM-NOT: MEM_RAT_CACHELESS STORE_DWORD
; GCN: buffer_store_dwordx2
-define void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
+define amdgpu_kernel void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
entry:
%0 = load i32, i32 addrspace(2)* %mem, align 4
%arrayidx1.i = getelementptr inbounds i32, i32 addrspace(2)* %mem, i64 1
@@ -388,7 +388,7 @@ entry:
; CM: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+}}, T{{[0-9]+}}.X
; GCN: buffer_store_dwordx4
-define void @i128-const-store(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @i128-const-store(i32 addrspace(1)* %out) {
entry:
store i32 1, i32 addrspace(1)* %out, align 4
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
diff --git a/test/CodeGen/AMDGPU/store-local.ll b/test/CodeGen/AMDGPU/store-local.ll
index 03fd30ca9a25..c144bf2aa878 100644
--- a/test/CodeGen/AMDGPU/store-local.ll
+++ b/test/CodeGen/AMDGPU/store-local.ll
@@ -9,7 +9,7 @@
; CM: LDS_BYTE_WRITE
; GCN: ds_write_b8
-define void @store_local_i1(i1 addrspace(3)* %out) {
+define amdgpu_kernel void @store_local_i1(i1 addrspace(3)* %out) {
entry:
store i1 true, i1 addrspace(3)* %out
ret void
@@ -21,7 +21,7 @@ entry:
; CM: LDS_BYTE_WRITE
; GCN: ds_write_b8
-define void @store_local_i8(i8 addrspace(3)* %out, i8 %in) {
+define amdgpu_kernel void @store_local_i8(i8 addrspace(3)* %out, i8 %in) {
store i8 %in, i8 addrspace(3)* %out
ret void
}
@@ -32,7 +32,7 @@ define void @store_local_i8(i8 addrspace(3)* %out, i8 %in) {
; CM: LDS_SHORT_WRITE
; GCN: ds_write_b16
-define void @store_local_i16(i16 addrspace(3)* %out, i16 %in) {
+define amdgpu_kernel void @store_local_i16(i16 addrspace(3)* %out, i16 %in) {
store i16 %in, i16 addrspace(3)* %out
ret void
}
@@ -43,7 +43,7 @@ define void @store_local_i16(i16 addrspace(3)* %out, i16 %in) {
; CM: LDS_WRITE
; GCN: ds_write_b32
-define void @store_local_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> %in) {
+define amdgpu_kernel void @store_local_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> %in) {
entry:
store <2 x i16> %in, <2 x i16> addrspace(3)* %out
ret void
@@ -55,7 +55,7 @@ entry:
; CM: LDS_WRITE
; GCN: ds_write_b32
-define void @store_local_v4i8(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
+define amdgpu_kernel void @store_local_v4i8(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
entry:
store <4 x i8> %in, <4 x i8> addrspace(3)* %out
ret void
@@ -78,7 +78,7 @@ entry:
; GCN: ds_write_b8
; GCN: ds_write_b8
; GCN: ds_write_b8
-define void @store_local_v4i8_unaligned(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
+define amdgpu_kernel void @store_local_v4i8_unaligned(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
entry:
store <4 x i8> %in, <4 x i8> addrspace(3)* %out, align 1
ret void
@@ -95,7 +95,7 @@ entry:
; GCN: ds_write_b16
; GCN: ds_write_b16
-define void @store_local_v4i8_halfaligned(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
+define amdgpu_kernel void @store_local_v4i8_halfaligned(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
entry:
store <4 x i8> %in, <4 x i8> addrspace(3)* %out, align 2
ret void
@@ -111,7 +111,7 @@ entry:
; CM-NOT: LDS_WRITE
; GCN: ds_write_b64
-define void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
entry:
store <2 x i32> %in, <2 x i32> addrspace(3)* %out
ret void
@@ -129,7 +129,7 @@ entry:
; CM: LDS_WRITE
; GCN: ds_write2_b64
-define void @store_local_v4i32(<4 x i32> addrspace(3)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_local_v4i32(<4 x i32> addrspace(3)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(3)* %out
ret void
@@ -148,7 +148,7 @@ entry:
; GCN: ds_write2_b32
; GCN: ds_write2_b32
-define void @store_local_v4i32_align4(<4 x i32> addrspace(3)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_local_v4i32_align4(<4 x i32> addrspace(3)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(3)* %out, align 4
ret void
@@ -157,7 +157,7 @@ entry:
; FUNC-LABEL: {{^}}store_local_i64_i8:
; EG: LDS_BYTE_WRITE
; GCN: ds_write_b8
-define void @store_local_i64_i8(i8 addrspace(3)* %out, i64 %in) {
+define amdgpu_kernel void @store_local_i64_i8(i8 addrspace(3)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i8
store i8 %0, i8 addrspace(3)* %out
@@ -167,7 +167,7 @@ entry:
; FUNC-LABEL: {{^}}store_local_i64_i16:
; EG: LDS_SHORT_WRITE
; GCN: ds_write_b16
-define void @store_local_i64_i16(i16 addrspace(3)* %out, i64 %in) {
+define amdgpu_kernel void @store_local_i64_i16(i16 addrspace(3)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i16
store i16 %0, i16 addrspace(3)* %out
diff --git a/test/CodeGen/AMDGPU/store-private.ll b/test/CodeGen/AMDGPU/store-private.ll
index 33d27f24e9cf..ab73ada370ea 100644
--- a/test/CodeGen/AMDGPU/store-private.ll
+++ b/test/CodeGen/AMDGPU/store-private.ll
@@ -15,7 +15,7 @@
; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
; SI: buffer_store_byte
-define void @store_i1(i1 addrspace(0)* %out) {
+define amdgpu_kernel void @store_i1(i1 addrspace(0)* %out) {
entry:
store i1 true, i1 addrspace(0)* %out
ret void
@@ -44,7 +44,7 @@ entry:
; SI: buffer_store_byte
-define void @store_i8(i8 addrspace(0)* %out, i8 %in) {
+define amdgpu_kernel void @store_i8(i8 addrspace(0)* %out, i8 %in) {
entry:
store i8 %in, i8 addrspace(0)* %out
ret void
@@ -72,7 +72,7 @@ entry:
; EG: MOV * T(0 + AR.x).X+, [[RES]]
; SI: buffer_store_short
-define void @store_i16(i16 addrspace(0)* %out, i16 %in) {
+define amdgpu_kernel void @store_i16(i16 addrspace(0)* %out, i16 %in) {
entry:
store i16 %in, i16 addrspace(0)* %out
ret void
@@ -102,7 +102,7 @@ entry:
; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
; CM: MOVA_INT
; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
-define void @store_i24(i24 addrspace(0)* %out, i24 %in) {
+define amdgpu_kernel void @store_i24(i24 addrspace(0)* %out, i24 %in) {
entry:
store i24 %in, i24 addrspace(0)* %out
ret void
@@ -120,7 +120,7 @@ entry:
; CM: MOVA_INT
; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
; CM-NOT: MOVA_INT
-define void @store_i25(i25 addrspace(0)* %out, i25 %in) {
+define amdgpu_kernel void @store_i25(i25 addrspace(0)* %out, i25 %in) {
entry:
store i25 %in, i25 addrspace(0)* %out
ret void
@@ -141,7 +141,7 @@ entry:
; CM-NOT: MOVA_INT
; SI: buffer_store_short
-define void @store_v2i8(<2 x i8> addrspace(0)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i8(<2 x i8> addrspace(0)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i8>
store <2 x i8> %0, <2 x i8> addrspace(0)* %out
@@ -172,7 +172,7 @@ entry:
; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
; SI: buffer_store_byte
-define void @store_v2i8_unaligned(<2 x i8> addrspace(0)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i8_unaligned(<2 x i8> addrspace(0)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i8>
store <2 x i8> %0, <2 x i8> addrspace(0)* %out, align 1
@@ -191,7 +191,7 @@ entry:
; CM-NOT: MOVA_INT
; SI: buffer_store_dword
-define void @store_v2i16(<2 x i16> addrspace(0)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i16(<2 x i16> addrspace(0)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i16>
store <2 x i16> %0, <2 x i16> addrspace(0)* %out
@@ -223,7 +223,7 @@ entry:
; SI: buffer_store_short
; SI: buffer_store_short
-define void @store_v2i16_unaligned(<2 x i16> addrspace(0)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @store_v2i16_unaligned(<2 x i16> addrspace(0)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i16>
store <2 x i16> %0, <2 x i16> addrspace(0)* %out, align 2
@@ -240,7 +240,7 @@ entry:
; CM-NOT: MOVA_INT
; SI: buffer_store_dword
-define void @store_v4i8(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i8(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
store <4 x i8> %0, <4 x i8> addrspace(0)* %out
@@ -299,7 +299,7 @@ entry:
; SI: buffer_store_byte
; SI: buffer_store_byte
; SI-NOT: buffer_store_dword
-define void @store_v4i8_unaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i8_unaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
store <4 x i8> %0, <4 x i8> addrspace(0)* %out, align 1
@@ -410,7 +410,7 @@ entry:
; SI: buffer_store_byte
; SI: buffer_store_byte
; SI-NOT: buffer_store_dword
-define void @store_v8i8_unaligned(<8 x i8> addrspace(0)* %out, <8 x i32> %in) {
+define amdgpu_kernel void @store_v8i8_unaligned(<8 x i8> addrspace(0)* %out, <8 x i32> %in) {
entry:
%0 = trunc <8 x i32> %in to <8 x i8>
store <8 x i8> %0, <8 x i8> addrspace(0)* %out, align 1
@@ -443,7 +443,7 @@ entry:
; SI: buffer_store_short
; SI: buffer_store_short
; SI-NOT: buffer_store_dword
-define void @store_v4i8_halfaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i8_halfaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
store <4 x i8> %0, <4 x i8> addrspace(0)* %out, align 2
@@ -460,7 +460,7 @@ entry:
; SI: buffer_store_dword
-define void @store_f32(float addrspace(0)* %out, float %in) {
+define amdgpu_kernel void @store_f32(float addrspace(0)* %out, float %in) {
store float %in, float addrspace(0)* %out
ret void
}
@@ -480,7 +480,7 @@ define void @store_f32(float addrspace(0)* %out, float %in) {
; XSI: buffer_store_dwordx2
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @store_v4i16(<4 x i16> addrspace(0)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i16(<4 x i16> addrspace(0)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i16>
store <4 x i16> %0, <4 x i16> addrspace(0)* %out
@@ -504,7 +504,7 @@ entry:
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @store_v2f32(<2 x float> addrspace(0)* %out, float %a, float %b) {
+define amdgpu_kernel void @store_v2f32(<2 x float> addrspace(0)* %out, float %a, float %b) {
entry:
%0 = insertelement <2 x float> <float 0.0, float 0.0>, float %a, i32 0
%1 = insertelement <2 x float> %0, float %b, i32 1
@@ -533,7 +533,7 @@ entry:
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @store_v3i32(<3 x i32> addrspace(0)* %out, <3 x i32> %a) nounwind {
+define amdgpu_kernel void @store_v3i32(<3 x i32> addrspace(0)* %out, <3 x i32> %a) nounwind {
store <3 x i32> %a, <3 x i32> addrspace(0)* %out, align 16
ret void
}
@@ -563,7 +563,7 @@ define void @store_v3i32(<3 x i32> addrspace(0)* %out, <3 x i32> %a) nounwind {
; SI: buffer_store_dword
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @store_v4i32(<4 x i32> addrspace(0)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i32(<4 x i32> addrspace(0)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(0)* %out
ret void
@@ -594,7 +594,7 @@ entry:
; SI: buffer_store_dword
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @store_v4i32_unaligned(<4 x i32> addrspace(0)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @store_v4i32_unaligned(<4 x i32> addrspace(0)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(0)* %out, align 4
ret void
@@ -626,7 +626,7 @@ entry:
; SI: buffer_store_dword
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @store_v4f32(<4 x float> addrspace(0)* %out, <4 x float> addrspace(0)* %in) {
+define amdgpu_kernel void @store_v4f32(<4 x float> addrspace(0)* %out, <4 x float> addrspace(0)* %in) {
%1 = load <4 x float>, <4 x float> addrspace(0) * %in
store <4 x float> %1, <4 x float> addrspace(0)* %out
ret void
@@ -644,7 +644,7 @@ define void @store_v4f32(<4 x float> addrspace(0)* %out, <4 x float> addrspace(0
; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
; SI: buffer_store_byte
-define void @store_i64_i8(i8 addrspace(0)* %out, i64 %in) {
+define amdgpu_kernel void @store_i64_i8(i8 addrspace(0)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i8
store i8 %0, i8 addrspace(0)* %out
@@ -663,7 +663,7 @@ entry:
; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
; SI: buffer_store_short
-define void @store_i64_i16(i16 addrspace(0)* %out, i64 %in) {
+define amdgpu_kernel void @store_i64_i16(i16 addrspace(0)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i16
store i16 %0, i16 addrspace(0)* %out
@@ -689,7 +689,7 @@ entry:
; XSI: buffer_store_dwordx2
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @vecload2(i32 addrspace(0)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
+define amdgpu_kernel void @vecload2(i32 addrspace(0)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
entry:
%0 = load i32, i32 addrspace(2)* %mem, align 4
%arrayidx1.i = getelementptr inbounds i32, i32 addrspace(2)* %mem, i64 1
@@ -727,7 +727,7 @@ entry:
; SI: buffer_store_dword
; SI: buffer_store_dword
; SI: buffer_store_dword
-define void @i128-const-store(i32 addrspace(0)* %out) {
+define amdgpu_kernel void @i128-const-store(i32 addrspace(0)* %out) {
entry:
store i32 1, i32 addrspace(0)* %out, align 4
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 1
diff --git a/test/CodeGen/AMDGPU/store-v3i64.ll b/test/CodeGen/AMDGPU/store-v3i64.ll
index 78db2d37724b..7518e887135c 100644
--- a/test/CodeGen/AMDGPU/store-v3i64.ll
+++ b/test/CodeGen/AMDGPU/store-v3i64.ll
@@ -5,7 +5,7 @@
; GCN-LABEL: {{^}}global_store_v3i64:
; GCN-DAG: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16
; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 32
ret void
}
@@ -40,7 +40,7 @@ define void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
; GCN: buffer_store_byte
; GCN: buffer_store_byte
; GCN: buffer_store_byte
-define void @global_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @global_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
ret void
}
@@ -48,7 +48,7 @@ define void @global_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64
; GCN-LABEL: {{^}}local_store_v3i64:
; GCN: ds_write2_b64
; GCN: ds_write_b64
-define void @local_store_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @local_store_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(3)* %out, align 32
ret void
}
@@ -83,7 +83,7 @@ define void @local_store_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
; GCN: ds_write_b8
; GCN: ds_write_b8
; GCN: ds_write_b8
-define void @local_store_v3i64_unaligned(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @local_store_v3i64_unaligned(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
store <3 x i64> %x, <3 x i64> addrspace(3)* %out, align 1
ret void
}
@@ -91,7 +91,7 @@ define void @local_store_v3i64_unaligned(<3 x i64> addrspace(3)* %out, <3 x i64>
; GCN-LABEL: {{^}}global_truncstore_v3i64_to_v3i32:
; GCN-DAG: buffer_store_dwordx2
; GCN-DAG: buffer_store_dword v
-define void @global_truncstore_v3i64_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @global_truncstore_v3i64_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i64> %x) {
%trunc = trunc <3 x i64> %x to <3 x i32>
store <3 x i32> %trunc, <3 x i32> addrspace(1)* %out
ret void
@@ -100,7 +100,7 @@ define void @global_truncstore_v3i64_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x
; GCN-LABEL: {{^}}global_truncstore_v3i64_to_v3i16:
; GCN-DAG: buffer_store_short
; GCN-DAG: buffer_store_dword v
-define void @global_truncstore_v3i64_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @global_truncstore_v3i64_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x i64> %x) {
%trunc = trunc <3 x i64> %x to <3 x i16>
store <3 x i16> %trunc, <3 x i16> addrspace(1)* %out
ret void
@@ -110,7 +110,7 @@ define void @global_truncstore_v3i64_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x
; GCN-LABEL: {{^}}global_truncstore_v3i64_to_v3i8:
; GCN-DAG: buffer_store_short
; GCN-DAG: buffer_store_byte v
-define void @global_truncstore_v3i64_to_v3i8(<3 x i8> addrspace(1)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @global_truncstore_v3i64_to_v3i8(<3 x i8> addrspace(1)* %out, <3 x i64> %x) {
%trunc = trunc <3 x i64> %x to <3 x i8>
store <3 x i8> %trunc, <3 x i8> addrspace(1)* %out
ret void
@@ -120,7 +120,7 @@ define void @global_truncstore_v3i64_to_v3i8(<3 x i8> addrspace(1)* %out, <3 x i
; GCN-DAG: buffer_store_byte v
; GCN-DAG: buffer_store_byte v
; GCN-DAG: buffer_store_byte v
-define void @global_truncstore_v3i64_to_v3i1(<3 x i1> addrspace(1)* %out, <3 x i64> %x) {
+define amdgpu_kernel void @global_truncstore_v3i64_to_v3i1(<3 x i1> addrspace(1)* %out, <3 x i64> %x) {
%trunc = trunc <3 x i64> %x to <3 x i1>
store <3 x i1> %trunc, <3 x i1> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/store-vector-ptrs.ll b/test/CodeGen/AMDGPU/store-vector-ptrs.ll
index d5af3b29118a..507f07dee052 100644
--- a/test/CodeGen/AMDGPU/store-vector-ptrs.ll
+++ b/test/CodeGen/AMDGPU/store-vector-ptrs.ll
@@ -5,7 +5,7 @@
; AMDGPUDAGToDAGISel::SelectMUBUFScratch() which is used for selecting
; scratch loads and stores.
; CHECK-LABEL: {{^}}store_vector_ptrs:
-define void @store_vector_ptrs(<4 x i32*>* %out, <4 x [1024 x i32]*> %array) nounwind {
+define amdgpu_kernel void @store_vector_ptrs(<4 x i32*>* %out, <4 x [1024 x i32]*> %array) nounwind {
%p = getelementptr [1024 x i32], <4 x [1024 x i32]*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
store <4 x i32*> %p, <4 x i32*>* %out
ret void
diff --git a/test/CodeGen/AMDGPU/store_typed.ll b/test/CodeGen/AMDGPU/store_typed.ll
index 515fcf04f406..eaa21617f937 100644
--- a/test/CodeGen/AMDGPU/store_typed.ll
+++ b/test/CodeGen/AMDGPU/store_typed.ll
@@ -6,7 +6,7 @@
; EG: MEM_RAT STORE_TYPED RAT(0) {{T[0-9]+, T[0-9]+}}, 1
; CM: MEM_RAT STORE_TYPED RAT(0) {{T[0-9]+, T[0-9]+}}
-define void @store_typed_rat0(<4 x i32> %data, <4 x i32> %index) {
+define amdgpu_kernel void @store_typed_rat0(<4 x i32> %data, <4 x i32> %index) {
call void @llvm.r600.rat.store.typed(<4 x i32> %data, <4 x i32> %index, i32 0)
ret void
}
@@ -16,7 +16,7 @@ define void @store_typed_rat0(<4 x i32> %data, <4 x i32> %index) {
; EG: MEM_RAT STORE_TYPED RAT(11) {{T[0-9]+, T[0-9]+}}, 1
; CM: MEM_RAT STORE_TYPED RAT(11) {{T[0-9]+, T[0-9]+}}
-define void @store_typed_rat11(<4 x i32> %data, <4 x i32> %index) {
+define amdgpu_kernel void @store_typed_rat11(<4 x i32> %data, <4 x i32> %index) {
call void @llvm.r600.rat.store.typed(<4 x i32> %data, <4 x i32> %index, i32 11)
ret void
}
diff --git a/test/CodeGen/AMDGPU/structurize.ll b/test/CodeGen/AMDGPU/structurize.ll
index 174e64e2cf8b..3cceb2d45c93 100644
--- a/test/CodeGen/AMDGPU/structurize.ll
+++ b/test/CodeGen/AMDGPU/structurize.ll
@@ -45,7 +45,7 @@
; CHECK: CF_END
-define void @branch_into_diamond(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @branch_into_diamond(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%0 = icmp ne i32 %a, 0
br i1 %0, label %diamond_head, label %branch_from
diff --git a/test/CodeGen/AMDGPU/structurize1.ll b/test/CodeGen/AMDGPU/structurize1.ll
index db0f50247e38..2e7d0e615e07 100644
--- a/test/CodeGen/AMDGPU/structurize1.ll
+++ b/test/CodeGen/AMDGPU/structurize1.ll
@@ -19,7 +19,7 @@
; CHECK-LABEL: {{^}}if_inside_loop:
; CHECK: LOOP_START_DX10
; CHECK: END_LOOP
-define void @if_inside_loop(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+define amdgpu_kernel void @if_inside_loop(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
entry:
br label %for.body
diff --git a/test/CodeGen/AMDGPU/sub.i16.ll b/test/CodeGen/AMDGPU/sub.i16.ll
index b5d5f56b2796..6642411f7a63 100644
--- a/test/CodeGen/AMDGPU/sub.i16.ll
+++ b/test/CodeGen/AMDGPU/sub.i16.ll
@@ -7,7 +7,7 @@
; VI: flat_load_ushort [[B:v[0-9]+]]
; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -24,7 +24,7 @@ define void @v_test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in0, i16
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0xffffff85, [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_sub_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_sub_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -39,7 +39,7 @@ define void @v_test_sub_i16_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0x34d, [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_sub_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_sub_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -52,9 +52,9 @@ define void @v_test_sub_i16_neg_constant(i16 addrspace(1)* %out, i16 addrspace(1
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_sub_i16_inline_63:
; VI: flat_load_ushort [[A:v[0-9]+]]
-; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 0xffffffc1, [[A]]
+; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], 63, [[A]]
; VI-NEXT: buffer_store_short [[ADD]]
-define void @v_test_sub_i16_inline_63(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_sub_i16_inline_63(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -70,7 +70,7 @@ define void @v_test_sub_i16_inline_63(i16 addrspace(1)* %out, i16 addrspace(1)*
; VI: flat_load_ushort [[B:v[0-9]+]]
; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; VI-NEXT: buffer_store_dword [[ADD]]
-define void @v_test_sub_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_sub_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -85,12 +85,12 @@ define void @v_test_sub_i16_zext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; FIXME: Need to handle non-uniform case for function below (load without gep).
; GCN-LABEL: {{^}}v_test_sub_i16_zext_to_i64:
+; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0
; VI: flat_load_ushort [[A:v[0-9]+]]
; VI: flat_load_ushort [[B:v[0-9]+]]
; VI-DAG: v_subrev_u16_e32 v[[ADD:[0-9]+]], [[B]], [[A]]
-; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0
; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:[[VZERO]]{{\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
-define void @v_test_sub_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_sub_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -110,7 +110,7 @@ define void @v_test_sub_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; VI: v_subrev_u16_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
; VI-NEXT: v_bfe_i32 [[SEXT:v[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: buffer_store_dword [[SEXT]]
-define void @v_test_sub_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_sub_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -131,7 +131,7 @@ define void @v_test_sub_i16_sext_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; VI-NEXT: v_bfe_i32 v[[LO:[0-9]+]], [[ADD]], 0, 16
; VI-NEXT: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
; VI-NEXT: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @v_test_sub_i16_sext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
+define amdgpu_kernel void @v_test_sub_i16_sext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid
%gep.in0 = getelementptr inbounds i16, i16 addrspace(1)* %in0, i32 %tid
@@ -149,7 +149,7 @@ define void @v_test_sub_i16_sext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; GCN-LABEL: {{^}}v_test_sub_i16_constant_commute:
; VI: v_subrev_u16_e32 v{{[0-9]+}}, 0x800, v{{[0-9]+}}
; CI: v_subrev_i32_e32 v{{[0-9]+}}, vcc, 0x800, v{{[0-9]+}}
-define void @v_test_sub_i16_constant_commute(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
+define amdgpu_kernel void @v_test_sub_i16_constant_commute(i16 addrspace(1)* %out, i16 addrspace(1)* %in0) #1 {
%size = call i32 @llvm.amdgcn.groupstaticsize()
%size.trunc = trunc i32 %size to i16
call void asm sideeffect "; $0", "v"([512 x i32] addrspace(3)* @lds)
diff --git a/test/CodeGen/AMDGPU/sub.ll b/test/CodeGen/AMDGPU/sub.ll
index 5816345098af..f366029fdea2 100644
--- a/test/CodeGen/AMDGPU/sub.ll
+++ b/test/CodeGen/AMDGPU/sub.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.r600.read.tidig.x() readnone
; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI: v_subrev_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-define void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -25,7 +25,7 @@ define void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-define void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
%b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
@@ -45,7 +45,7 @@ define void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
-define void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
%b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
@@ -55,7 +55,7 @@ define void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)
}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%a = load i16, i16 addrspace(1)* %in
%b = load i16, i16 addrspace(1)* %b_ptr
@@ -69,7 +69,7 @@ define void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
%a = load <2 x i16>, <2 x i16> addrspace(1) * %in
%b = load <2 x i16>, <2 x i16> addrspace(1) * %b_ptr
@@ -85,7 +85,7 @@ define void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
%a = load <4 x i16>, <4 x i16> addrspace(1) * %in
%b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
@@ -103,7 +103,7 @@ define void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
; EG-DAG: SUB_INT {{[* ]*}}
-define void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
+define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
%result = sub i64 %a, %b
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
@@ -118,7 +118,7 @@ define void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
; EG-DAG: SUB_INT {{[* ]*}}
-define void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
+define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
@@ -134,7 +134,7 @@ define void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
-define void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
+define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
@@ -154,7 +154,7 @@ define void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(
; SI: v_subb_u32_e32
; SI: v_subrev_i32_e32
; SI: v_subb_u32_e32
-define void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
+define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
diff --git a/test/CodeGen/AMDGPU/sub.v2i16.ll b/test/CodeGen/AMDGPU/sub.v2i16.ll
new file mode 100644
index 000000000000..69f0accef628
--- /dev/null
+++ b/test/CodeGen/AMDGPU/sub.v2i16.ll
@@ -0,0 +1,278 @@
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_sub_v2i16:
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; VI: v_subrev_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI: v_subrev_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = sub <2 x i16> %a, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_test_sub_v2i16:
+; GFX9: s_load_dword [[VAL0:s[0-9]+]]
+; GFX9: s_load_dword [[VAL1:s[0-9]+]]
+; GFX9: v_mov_b32_e32 [[VVAL1:v[0-9]+]]
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, [[VVAL1]], [[VAL0]]
+
+; VI: s_sub_i32
+; VI: s_sub_i32
+define amdgpu_kernel void @s_test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %in0, <2 x i16> addrspace(2)* %in1) #1 {
+ %a = load <2 x i16>, <2 x i16> addrspace(2)* %in0
+ %b = load <2 x i16>, <2 x i16> addrspace(2)* %in1
+ %add = sub <2 x i16> %a, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_test_sub_self_v2i16:
+; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]]
+; GCN: buffer_store_dword [[ZERO]]
+define amdgpu_kernel void @s_test_sub_self_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %in0) #1 {
+ %a = load <2 x i16>, <2 x i16> addrspace(2)* %in0
+ %add = sub <2 x i16> %a, %a
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: VI should not scalarize arg access.
+; GCN-LABEL: {{^}}s_test_sub_v2i16_kernarg:
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+
+; VI: v_subrev_i32_e32
+; VI: v_subrev_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+define amdgpu_kernel void @s_test_sub_v2i16_kernarg(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #1 {
+ %add = sub <2 x i16> %a, %b
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_sub_v2i16_constant:
+; GFX9: s_mov_b32 [[CONST:s[0-9]+]], 0x1c8007b{{$}}
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}
+
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0xfffffe38, v{{[0-9]+}}
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0xffffff85, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_sub_v2i16_constant(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = sub <2 x i16> %a, <i16 123, i16 456>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_sub_v2i16_neg_constant:
+; GFX9: s_mov_b32 [[CONST:s[0-9]+]], 0xfc21fcb3{{$}}
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}
+
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0x3df, v{{[0-9]+}}
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 0x34d, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_sub_v2i16_neg_constant(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = sub <2 x i16> %a, <i16 -845, i16 -991>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_sub_v2i16_inline_neg1:
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, v{{[0-9]+}}, -1{{$}}
+
+; VI: flat_load_ushort [[LOAD0:v[0-9]+]]
+; VI: flat_load_ushort [[LOAD1:v[0-9]+]]
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 1, [[LOAD0]]
+; VI-DAG: v_add_u16_e32 v{{[0-9]+}}, 1, [[LOAD1]]
+; VI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_test_sub_v2i16_inline_neg1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = sub <2 x i16> %a, <i16 -1, i16 -1>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_sub_v2i16_inline_lo_zero_hi:
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 32{{$}}
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, [[K]], v{{[0-9]+}}{{$}}
+
+; VI-NOT: v_subrev_i16
+; VI: v_add_u16_e32 v{{[0-9]+}}, 0xffffffe0, v{{[0-9]+}}
+; VI-NOT: v_subrev_i16
+; VI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_test_sub_v2i16_inline_lo_zero_hi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = sub <2 x i16> %a, <i16 32, i16 0>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; The high element gives fp
+; GCN-LABEL: {{^}}v_test_sub_v2i16_inline_fp_split:
+; GFX9: s_mov_b32 [[K:s[0-9]+]], 1.0
+; GFX9: v_pk_sub_i16 v{{[0-9]+}}, [[K]], v{{[0-9]+}}{{$}}
+
+; VI-NOT: v_subrev_i16
+; VI: v_add_u16_e32 v{{[0-9]+}}, 0xffffc080, v{{[0-9]+}}
+; VI-NOT: v_subrev_i16
+; VI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16,
+; VI: v_or_b32_e32
+define amdgpu_kernel void @v_test_sub_v2i16_inline_fp_split(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %add = sub <2 x i16> %a, <i16 0, i16 16256>
+ store <2 x i16> %add, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_sub_v2i16_zext_to_v2i32:
+; GFX9: flat_load_dword [[A:v[0-9]+]]
+; GFX9: flat_load_dword [[B:v[0-9]+]]
+
+; GFX9: v_pk_sub_i16 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GFX9-DAG: v_and_b32_e32 v[[ELT0:[0-9]+]], 0xffff, [[ADD]]
+; GFX9-DAG: v_lshrrev_b32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
+; GFX9: buffer_store_dwordx2 v{{\[}}[[ELT0]]:[[ELT1]]{{\]}}
+
+; VI: flat_load_ushort v[[A_HI:[0-9]+]]
+; VI: flat_load_ushort v[[A_LO:[0-9]+]]
+; VI: flat_load_ushort v[[B_HI:[0-9]+]]
+; VI: flat_load_ushort v[[B_LO:[0-9]+]]
+
+; VI: v_subrev_u16_e32 v[[ADD_HI:[0-9]+]], v[[B_HI]], v[[A_HI]]
+; VI-NOT: and
+; VI-NOT: shl
+; VI: v_subrev_u16_e32 v[[ADD_LO:[0-9]+]], v[[B_LO]], v[[A_LO]]
+; VI-NOT: and
+; VI-NOT: shl
+; VI: buffer_store_dwordx2 v{{\[}}[[ADD_LO]]:[[ADD_HI]]{{\]}}
+define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = sub <2 x i16> %a, %b
+ %ext = zext <2 x i16> %add to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_sub_v2i16_zext_to_v2i64:
+; GFX9: flat_load_dword [[A:v[0-9]+]]
+; GFX9: flat_load_dword [[B:v[0-9]+]]
+
+; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+; GFX9: v_pk_sub_i16 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GFX9-DAG: v_and_b32_e32 v[[ELT0:[0-9]+]], 0xffff, [[ADD]]
+; GFX9-DAG: v_lshrrev_b32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
+; GFX9: buffer_store_dwordx4
+
+; VI: flat_load_ushort v[[A_LO:[0-9]+]]
+; VI: flat_load_ushort v[[A_HI:[0-9]+]]
+; VI: flat_load_ushort v[[B_LO:[0-9]+]]
+; VI: flat_load_ushort v[[B_HI:[0-9]+]]
+
+; VI-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+; VI-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
+; VI-DAG: v_subrev_u16_e32
+; VI-DAG: v_subrev_u16_e32
+
+; VI: buffer_store_dwordx4
+define amdgpu_kernel void @v_test_sub_v2i16_zext_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i64>, <2 x i64> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = sub <2 x i16> %a, %b
+ %ext = zext <2 x i16> %add to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_sub_v2i16_sext_to_v2i32:
+; GFX9: flat_load_dword [[A:v[0-9]+]]
+; GFX9: flat_load_dword [[B:v[0-9]+]]
+
+; GFX9: v_pk_sub_i16 [[ADD:v[0-9]+]], [[A]], [[B]]
+; GFX9-DAG: v_bfe_i32 v[[ELT0:[0-9]+]], [[ADD]], 0, 16
+; GFX9-DAG: v_ashrrev_i32_e32 v[[ELT1:[0-9]+]], 16, [[ADD]]
+; GFX9: buffer_store_dwordx2 v{{\[}}[[ELT0]]:[[ELT1]]{{\]}}
+
+; VI: v_subrev_u16_e32
+; VI: v_subrev_u16_e32
+; VI: buffer_store_dwordx2
+define amdgpu_kernel void @v_test_sub_v2i16_sext_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load volatile <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = sub <2 x i16> %a, %b
+ %ext = sext <2 x i16> %add to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Need to handle non-uniform case for function below (load without gep).
+; GCN-LABEL: {{^}}v_test_sub_v2i16_sext_to_v2i64:
+; GCN: flat_load_dword
+; GCN: flat_load_dword
+
+; GFX9: v_pk_sub_i16
+; GFX9: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
+
+; VI: v_subrev_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI: v_subrev_u16_e32
+
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; GCN: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_sub_v2i16_sext_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep.out = getelementptr inbounds <2 x i64>, <2 x i64> addrspace(1)* %out, i32 %tid
+ %gep.in0 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in0, i32 %tid
+ %gep.in1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in1, i32 %tid
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep.in0
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %gep.in1
+ %add = sub <2 x i16> %a, %b
+ %ext = sext <2 x i16> %add to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/subreg-coalescer-crash.ll b/test/CodeGen/AMDGPU/subreg-coalescer-crash.ll
index ec2ed78b4954..c2d04abf829f 100644
--- a/test/CodeGen/AMDGPU/subreg-coalescer-crash.ll
+++ b/test/CodeGen/AMDGPU/subreg-coalescer-crash.ll
@@ -1,39 +1,37 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -o - %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -o - %s
+; RUN: llc -march=amdgcn -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
-; SI-LABEL:{{^}}row_filter_C1_D0:
-; SI: s_endpgm
-; Function Attrs: nounwind
-define void @row_filter_C1_D0() {
+; GCN-LABEL:{{^}}row_filter_C1_D0:
+define amdgpu_kernel void @row_filter_C1_D0() #0 {
entry:
br i1 undef, label %for.inc.1, label %do.body.preheader
do.body.preheader: ; preds = %entry
- %0 = insertelement <4 x i32> zeroinitializer, i32 undef, i32 1
+ %tmp = insertelement <4 x i32> zeroinitializer, i32 undef, i32 1
br i1 undef, label %do.body56.1, label %do.body90
do.body90: ; preds = %do.body56.2, %do.body56.1, %do.body.preheader
- %1 = phi <4 x i32> [ %6, %do.body56.2 ], [ %5, %do.body56.1 ], [ %0, %do.body.preheader ]
- %2 = insertelement <4 x i32> %1, i32 undef, i32 2
- %3 = insertelement <4 x i32> %2, i32 undef, i32 3
+ %tmp1 = phi <4 x i32> [ %tmp6, %do.body56.2 ], [ %tmp5, %do.body56.1 ], [ %tmp, %do.body.preheader ]
+ %tmp2 = insertelement <4 x i32> %tmp1, i32 undef, i32 2
+ %tmp3 = insertelement <4 x i32> %tmp2, i32 undef, i32 3
br i1 undef, label %do.body124.1, label %do.body.1562.preheader
do.body.1562.preheader: ; preds = %do.body124.1, %do.body90
- %storemerge = phi <4 x i32> [ %3, %do.body90 ], [ %7, %do.body124.1 ]
- %4 = insertelement <4 x i32> undef, i32 undef, i32 1
+ %storemerge = phi <4 x i32> [ %tmp3, %do.body90 ], [ %tmp7, %do.body124.1 ]
+ %tmp4 = insertelement <4 x i32> undef, i32 undef, i32 1
br label %for.inc.1
do.body56.1: ; preds = %do.body.preheader
- %5 = insertelement <4 x i32> %0, i32 undef, i32 1
+ %tmp5 = insertelement <4 x i32> %tmp, i32 undef, i32 1
%or.cond472.1 = or i1 undef, undef
br i1 %or.cond472.1, label %do.body56.2, label %do.body90
do.body56.2: ; preds = %do.body56.1
- %6 = insertelement <4 x i32> %5, i32 undef, i32 1
+ %tmp6 = insertelement <4 x i32> %tmp5, i32 undef, i32 1
br label %do.body90
do.body124.1: ; preds = %do.body90
- %7 = insertelement <4 x i32> %3, i32 undef, i32 3
+ %tmp7 = insertelement <4 x i32> %tmp3, i32 undef, i32 3
br label %do.body.1562.preheader
for.inc.1: ; preds = %do.body.1562.preheader, %entry
@@ -42,8 +40,8 @@ for.inc.1: ; preds = %do.body.1562.prehea
unreachable
}
-; SI-LABEL: {{^}}foo:
-; SI: s_endpgm
+; GCN-LABEL: {{^}}foo:
+; GCN: s_endpgm
define amdgpu_ps void @foo() #0 {
bb:
br i1 undef, label %bb2, label %bb1
@@ -67,7 +65,7 @@ bb7: ; preds = %bb6
br label %bb4
bb9: ; preds = %bb2
- %tmp10 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp10 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp11 = extractelement <4 x float> %tmp10, i32 1
%tmp12 = extractelement <4 x float> %tmp10, i32 3
br label %bb14
@@ -78,9 +76,9 @@ bb13: ; preds = %bb2
bb14: ; preds = %bb27, %bb24, %bb9
%tmp15 = phi float [ %tmp12, %bb9 ], [ undef, %bb27 ], [ 0.000000e+00, %bb24 ]
%tmp16 = phi float [ %tmp11, %bb9 ], [ undef, %bb27 ], [ %tmp25, %bb24 ]
- %tmp17 = fmul float 10.5, %tmp16
- %tmp18 = fmul float 11.5, %tmp15
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp18, float %tmp17, float %tmp17, float %tmp17)
+ %tmp17 = fmul float 1.050000e+01, %tmp16
+ %tmp18 = fmul float 1.150000e+01, %tmp15
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp18, float %tmp17, float %tmp17, float %tmp17, i1 true, i1 true) #0
ret void
bb23: ; preds = %bb13
@@ -97,13 +95,9 @@ bb27: ; preds = %bb24
br label %bb14
}
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-; Function Attrs: nounwind readnone
-declare i32 @llvm.SI.packf16(float, float) #1
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1
attributes #0 = { nounwind }
-attributes #1 = { nounwind readnone }
+attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll b/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll
index 72a1f1e25b30..35615c40d498 100644
--- a/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll
+++ b/test/CodeGen/AMDGPU/subreg-coalescer-undef-use.ll
@@ -20,7 +20,7 @@ target triple="amdgcn--"
; CHECK-NEXT: s_mov_b32 s6, -1
; CHECK-NEXT: buffer_store_dword v1, off, s[4:7], 0
; CHECK-NEXT: s_endpgm
-define void @foobar(float %a0, float %a1, float addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @foobar(float %a0, float %a1, float addrspace(1)* %out) nounwind {
entry:
%v0 = insertelement <4 x float> undef, float %a0, i32 0
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
diff --git a/test/CodeGen/AMDGPU/subreg-eliminate-dead.ll b/test/CodeGen/AMDGPU/subreg-eliminate-dead.ll
index 8bd995a8ecbb..57c267e54a14 100644
--- a/test/CodeGen/AMDGPU/subreg-eliminate-dead.ll
+++ b/test/CodeGen/AMDGPU/subreg-eliminate-dead.ll
@@ -5,7 +5,7 @@
; Just make sure this test doesn't crash.
; CHECK-LABEL: foobar:
; CHECK: s_endpgm
-define void @foobar() {
+define amdgpu_kernel void @foobar() {
%v0 = icmp eq <4 x i32> undef, <i32 0, i32 1, i32 2, i32 3>
%v3 = sext <4 x i1> %v0 to <4 x i32>
%v4 = extractelement <4 x i32> %v3, i32 1
diff --git a/test/CodeGen/AMDGPU/subreg-intervals.mir b/test/CodeGen/AMDGPU/subreg-intervals.mir
index c4e00215159b..c477fe9bc6d3 100644
--- a/test/CodeGen/AMDGPU/subreg-intervals.mir
+++ b/test/CodeGen/AMDGPU/subreg-intervals.mir
@@ -10,8 +10,8 @@
# CHECK-LABEL: Machine code for function test1:
--- |
- define void @test0() { ret void }
- define void @test1() { ret void }
+ define amdgpu_kernel void @test0() { ret void }
+ define amdgpu_kernel void @test1() { ret void }
...
---
name: test0
diff --git a/test/CodeGen/AMDGPU/subreg_interference.mir b/test/CodeGen/AMDGPU/subreg_interference.mir
new file mode 100644
index 000000000000..24d06a576c2a
--- /dev/null
+++ b/test/CodeGen/AMDGPU/subreg_interference.mir
@@ -0,0 +1,24 @@
+# RUN: llc -o - %s -mtriple=amdgcn--amdhsa -verify-machineinstrs -run-pass=greedy,virtregrewriter | FileCheck %s
+---
+# We should not detect any interference between v0/v1 here and only allocate
+# sgpr0-sgpr3.
+#
+# CHECK-LABEL: func0
+# CHECK: S_NOP 0, implicit-def %sgpr0
+# CHECK: S_NOP 0, implicit-def %sgpr3
+# CHECK: S_NOP 0, implicit-def %sgpr1
+# CHECK: S_NOP 0, implicit-def %sgpr2
+# CHECK: S_NOP 0, implicit %sgpr0, implicit %sgpr3
+# CHECK: S_NOP 0, implicit %sgpr1, implicit %sgpr2
+name: func0
+body: |
+ bb.0:
+ S_NOP 0, implicit-def undef %0.sub0 : sreg_128
+ S_NOP 0, implicit-def %0.sub3
+ S_NOP 0, implicit-def undef %1.sub1 : sreg_128
+ S_NOP 0, implicit-def %1.sub2
+
+
+ S_NOP 0, implicit %0.sub0, implicit %0.sub3
+ S_NOP 0, implicit %1.sub1, implicit %1.sub2
+...
diff --git a/test/CodeGen/AMDGPU/target-cpu.ll b/test/CodeGen/AMDGPU/target-cpu.ll
index cf80ff3f4c83..466e89ebee80 100644
--- a/test/CodeGen/AMDGPU/target-cpu.ll
+++ b/test/CodeGen/AMDGPU/target-cpu.ll
@@ -14,7 +14,7 @@ declare void @llvm.amdgcn.s.dcache.wb() #0
; CHECK: s_movk_i32 [[OFFSETREG:s[0-9]+]], 0x400
; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, [[OFFSETREG]]
; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
-define void @target_none() #0 {
+define amdgpu_kernel void @target_none() #0 {
%kernargs = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
%kernargs.gep = getelementptr inbounds i8, i8 addrspace(2)* %kernargs, i64 1024
%kernargs.gep.cast = bitcast i8 addrspace(2)* %kernargs.gep to i32 addrspace(1)* addrspace(2)*
@@ -30,7 +30,7 @@ define void @target_none() #0 {
; CHECK: s_movk_i32 [[OFFSETREG:s[0-9]+]], 0x400
; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, [[OFFSETREG]]
; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
-define void @target_tahiti() #1 {
+define amdgpu_kernel void @target_tahiti() #1 {
%kernargs = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
%kernargs.gep = getelementptr inbounds i8, i8 addrspace(2)* %kernargs, i64 1024
%kernargs.gep.cast = bitcast i8 addrspace(2)* %kernargs.gep to i32 addrspace(1)* addrspace(2)*
@@ -46,7 +46,7 @@ define void @target_tahiti() #1 {
; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x100
; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
; CHECK: s_dcache_inv_vol
-define void @target_bonaire() #3 {
+define amdgpu_kernel void @target_bonaire() #3 {
%kernargs = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
%kernargs.gep = getelementptr inbounds i8, i8 addrspace(2)* %kernargs, i64 1024
%kernargs.gep.cast = bitcast i8 addrspace(2)* %kernargs.gep to i32 addrspace(1)* addrspace(2)*
@@ -63,7 +63,7 @@ define void @target_bonaire() #3 {
; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x400
; CHECK: flat_store_dword
; CHECK: s_dcache_wb{{$}}
-define void @target_fiji() #4 {
+define amdgpu_kernel void @target_fiji() #4 {
%kernargs = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
%kernargs.gep = getelementptr inbounds i8, i8 addrspace(2)* %kernargs, i64 1024
%kernargs.gep.cast = bitcast i8 addrspace(2)* %kernargs.gep to i32 addrspace(1)* addrspace(2)*
@@ -79,7 +79,7 @@ define void @target_fiji() #4 {
; CHECK-LABEL: {{^}}promote_alloca_enabled:
; CHECK: ds_read_b32
; CHECK: ; LDSByteSize: 5120
-define void @promote_alloca_enabled(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #5 {
+define amdgpu_kernel void @promote_alloca_enabled(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #5 {
entry:
%stack = alloca [5 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
@@ -93,7 +93,7 @@ entry:
; CHECK: SCRATCH_RSRC_DWORD0
; CHECK: SCRATCH_RSRC_DWORD1
; CHECK: ScratchSize: 24
-define void @promote_alloca_disabled(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #6 {
+define amdgpu_kernel void @promote_alloca_disabled(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #6 {
entry:
%stack = alloca [5 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
diff --git a/test/CodeGen/AMDGPU/trap.ll b/test/CodeGen/AMDGPU/trap.ll
index 1555cfe39b1e..77ad895d0e86 100644
--- a/test/CodeGen/AMDGPU/trap.ll
+++ b/test/CodeGen/AMDGPU/trap.ll
@@ -1,13 +1,81 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=HSA-TRAP %s
-; GCN: warning: <unknown>:0:0: in function trap void (): trap handler not supported
+; RUN: llc -mtriple=amdgcn--amdhsa -mattr=+trap-handler -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=HSA-TRAP %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mattr=-trap-handler -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=NO-HSA-TRAP %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mattr=-trap-handler -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=GCN -check-prefix=GCN-WARNING %s
+
+; enable trap handler feature
+; RUN: llc -mtriple=amdgcn-unknown-mesa3d -mattr=+trap-handler -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=NO-MESA-TRAP -check-prefix=TRAP-BIT -check-prefix=MESA-TRAP %s
+; RUN: llc -mtriple=amdgcn-unknown-mesa3d -mattr=+trap-handler -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=GCN -check-prefix=GCN-WARNING -check-prefix=TRAP-BIT %s
+
+; disable trap handler feature
+; RUN: llc -mtriple=amdgcn-unknown-mesa3d -mattr=-trap-handler -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=NO-MESA-TRAP -check-prefix=NO-TRAP-BIT -check-prefix=NOMESA-TRAP %s
+; RUN: llc -mtriple=amdgcn-unknown-mesa3d -mattr=-trap-handler -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=GCN -check-prefix=GCN-WARNING -check-prefix=NO-TRAP-BIT %s
+
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=GCN -check-prefix=GCN-WARNING %s
declare void @llvm.trap() #0
+declare void @llvm.debugtrap() #0
+
+; MESA-TRAP: .section .AMDGPU.config
+; MESA-TRAP: .long 47180
+; MESA-TRAP-NEXT: .long 208
+
+; NOMESA-TRAP: .section .AMDGPU.config
+; NOMESA-TRAP: .long 47180
+; NOMESA-TRAP-NEXT: .long 144
+
+; GCN-LABEL: {{^}}hsa_trap:
+; HSA-TRAP: enable_trap_handler = 1
+; HSA-TRAP: s_mov_b64 s[0:1], s[4:5]
+; HSA-TRAP: s_trap 2
+
+; for llvm.trap in hsa path without ABI, direct generate s_endpgm instruction without any warning information
+; NO-HSA-TRAP: enable_trap_handler = 0
+; NO-HSA-TRAP: s_endpgm
+; NO-HSA-TRAP: COMPUTE_PGM_RSRC2:TRAP_HANDLER: 0
+
+; TRAP-BIT: enable_trap_handler = 1
+; NO-TRAP-BIT: enable_trap_handler = 0
+; NO-MESA-TRAP: s_endpgm
+define amdgpu_kernel void @hsa_trap() {
+ call void @llvm.trap()
+ ret void
+}
+
+; MESA-TRAP: .section .AMDGPU.config
+; MESA-TRAP: .long 47180
+; MESA-TRAP-NEXT: .long 208
+
+; NOMESA-TRAP: .section .AMDGPU.config
+; NOMESA-TRAP: .long 47180
+; NOMESA-TRAP-NEXT: .long 144
+
+; GCN-WARNING: warning: <unknown>:0:0: in function hsa_debugtrap void (): debugtrap handler not supported
+; GCN-LABEL: {{^}}hsa_debugtrap:
+; HSA-TRAP: enable_trap_handler = 1
+; HSA-TRAP: s_mov_b64 s[0:1], s[4:5]
+; HSA-TRAP: s_trap 3
+
+; for llvm.debugtrap in non-hsa path without ABI, generate a warning and a s_endpgm instruction
+; NO-HSA-TRAP: enable_trap_handler = 0
+; NO-HSA-TRAP: s_endpgm
+
+; TRAP-BIT: enable_trap_handler = 1
+; NO-TRAP-BIT: enable_trap_handler = 0
+; NO-MESA-TRAP: s_endpgm
+define amdgpu_kernel void @hsa_debugtrap() {
+ call void @llvm.debugtrap()
+ ret void
+}
+; For non-HSA path
; GCN-LABEL: {{^}}trap:
-; GCN: s_endpgm
-; GCN-NEXT: s_endpgm
-define void @trap() {
+; TRAP-BIT: enable_trap_handler = 1
+; NO-TRAP-BIT: enable_trap_handler = 0
+; NO-HSA-TRAP: s_endpgm
+; NO-MESA-TRAP: s_endpgm
+define amdgpu_kernel void @trap() {
call void @llvm.trap()
ret void
}
diff --git a/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll b/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
index a331475820a0..f90040385f75 100644
--- a/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
+++ b/test/CodeGen/AMDGPU/trunc-bitcast-vector.ll
@@ -4,7 +4,7 @@
; CHECK-LABEL: {{^}}trunc_i64_bitcast_v2i32:
; CHECK: buffer_load_dword v
; CHECK: buffer_store_dword v
-define void @trunc_i64_bitcast_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i64_bitcast_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
%bc = bitcast <2 x i32> %ld to i64
%trunc = trunc i64 %bc to i32
@@ -15,7 +15,7 @@ define void @trunc_i64_bitcast_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace
; CHECK-LABEL: {{^}}trunc_i96_bitcast_v3i32:
; CHECK: buffer_load_dword v
; CHECK: buffer_store_dword v
-define void @trunc_i96_bitcast_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i96_bitcast_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %in) {
%ld = load <3 x i32>, <3 x i32> addrspace(1)* %in
%bc = bitcast <3 x i32> %ld to i96
%trunc = trunc i96 %bc to i32
@@ -26,7 +26,7 @@ define void @trunc_i96_bitcast_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace
; CHECK-LABEL: {{^}}trunc_i128_bitcast_v4i32:
; CHECK: buffer_load_dword v
; CHECK: buffer_store_dword v
-define void @trunc_i128_bitcast_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i128_bitcast_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
%bc = bitcast <4 x i32> %ld to i128
%trunc = trunc i128 %bc to i32
@@ -38,7 +38,7 @@ define void @trunc_i128_bitcast_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspac
; CHECK-LABEL: {{^}}trunc_i16_bitcast_v2i16:
; CHECK: buffer_load_dword [[VAL:v[0-9]+]]
; CHECK: buffer_store_short [[VAL]]
-define void @trunc_i16_bitcast_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i16_bitcast_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
%ld = load <2 x i16>, <2 x i16> addrspace(1)* %in
%bc = bitcast <2 x i16> %ld to i32
%trunc = trunc i32 %bc to i16
@@ -54,7 +54,7 @@ define void @trunc_i16_bitcast_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace
; SI: buffer_load_dword v[[VAL:[0-9]+]]
; VI: buffer_load_dwordx2 v{{\[}}[[VAL:[0-9]+]]
; CHECK: buffer_store_short [[VAL]]
-define void @trunc_i16_bitcast_v4i16(i16 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i16_bitcast_v4i16(i16 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
%ld = load <4 x i16>, <4 x i16> addrspace(1)* %in
%bc = bitcast <4 x i16> %ld to i64
%trunc = trunc i64 %bc to i16
@@ -66,7 +66,7 @@ define void @trunc_i16_bitcast_v4i16(i16 addrspace(1)* %out, <4 x i16> addrspace
; CHECK-LABEL: {{^}}trunc_i8_bitcast_v2i8:
; CHECK: buffer_load_ubyte [[VAL:v[0-9]+]]
; CHECK: buffer_store_byte [[VAL]]
-define void @trunc_i8_bitcast_v2i8(i8 addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i8_bitcast_v2i8(i8 addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
%ld = load <2 x i8>, <2 x i8> addrspace(1)* %in
%bc = bitcast <2 x i8> %ld to i16
%trunc = trunc i16 %bc to i8
@@ -77,7 +77,7 @@ define void @trunc_i8_bitcast_v2i8(i8 addrspace(1)* %out, <2 x i8> addrspace(1)*
; CHECK-LABEL: {{^}}trunc_i32_bitcast_v4i8:
; CHECK: buffer_load_dword [[VAL:v[0-9]+]]
; CHECK: buffer_store_byte [[VAL]]
-define void @trunc_i32_bitcast_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i32_bitcast_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
%ld = load <4 x i8>, <4 x i8> addrspace(1)* %in
%bc = bitcast <4 x i8> %ld to i32
%trunc = trunc i32 %bc to i8
@@ -88,7 +88,7 @@ define void @trunc_i32_bitcast_v4i8(i8 addrspace(1)* %out, <4 x i8> addrspace(1)
; CHECK-LABEL: {{^}}trunc_i24_bitcast_v3i8:
; CHECK: buffer_load_dword [[VAL:v[0-9]+]]
; CHECK: buffer_store_byte [[VAL]]
-define void @trunc_i24_bitcast_v3i8(i8 addrspace(1)* %out, <3 x i8> addrspace(1)* %in) {
+define amdgpu_kernel void @trunc_i24_bitcast_v3i8(i8 addrspace(1)* %out, <3 x i8> addrspace(1)* %in) {
%ld = load <3 x i8>, <3 x i8> addrspace(1)* %in
%bc = bitcast <3 x i8> %ld to i24
%trunc = trunc i24 %bc to i8
diff --git a/test/CodeGen/AMDGPU/trunc-cmp-constant.ll b/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
index 7a4bced9d436..cb8d36550331 100644
--- a/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
+++ b/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; SI: v_cmp_eq_u32_e32 vcc, 0, [[TMP]]{{$}}
; SI: v_cndmask_b32_e64
; SI: buffer_store_byte
-define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = sext i1 %load to i32
%cmp = icmp eq i32 %ext, 0
@@ -25,7 +25,7 @@ define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspa
; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], vcc, -1
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
; SI: buffer_store_byte [[RESULT]]
-define void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = zext i1 %load to i32
%cmp = icmp eq i32 %ext, 0
@@ -36,7 +36,7 @@ define void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspa
; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_eq_1:
; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; SI: buffer_store_byte [[RESULT]]
-define void @sextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = sext i1 %load to i32
%cmp = icmp eq i32 %ext, 1
@@ -48,7 +48,7 @@ define void @sextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspa
; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
; SI: buffer_store_byte [[RESULT]]
-define void @zextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = zext i1 %load to i32
%cmp = icmp eq i32 %ext, 1
@@ -60,7 +60,7 @@ define void @zextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspa
; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
; SI: buffer_store_byte [[RESULT]]
-define void @sextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = sext i1 %load to i32
%cmp = icmp eq i32 %ext, -1
@@ -71,7 +71,7 @@ define void @sextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addr
; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_eq_neg1:
; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
; SI: buffer_store_byte [[RESULT]]
-define void @zextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = zext i1 %load to i32
%cmp = icmp eq i32 %ext, -1
@@ -84,7 +84,7 @@ define void @zextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addr
; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
; SI: buffer_store_byte [[RESULT]]
-define void @sextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = sext i1 %load to i32
%cmp = icmp ne i32 %ext, 0
@@ -96,7 +96,7 @@ define void @sextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspa
; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
; SI: buffer_store_byte [[RESULT]]
-define void @zextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = zext i1 %load to i32
%cmp = icmp ne i32 %ext, 0
@@ -107,7 +107,7 @@ define void @zextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspa
; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_ne_1:
; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
; SI: buffer_store_byte [[RESULT]]
-define void @sextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = sext i1 %load to i32
%cmp = icmp ne i32 %ext, 1
@@ -122,7 +122,7 @@ define void @sextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspa
; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], vcc, -1
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
; SI: buffer_store_byte [[RESULT]]
-define void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = zext i1 %load to i32
%cmp = icmp ne i32 %ext, 1
@@ -137,7 +137,7 @@ define void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspa
; XSI: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 0{{$}}
; XSI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP0]]
; XSI-NEXT: buffer_store_byte [[RESULT]]
-define void @sextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = sext i1 %load to i32
%cmp = icmp ne i32 %ext, -1
@@ -148,7 +148,7 @@ define void @sextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addr
; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_ne_neg1:
; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
; SI: buffer_store_byte [[RESULT]]
-define void @zextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1, i1 addrspace(1)* %in
%ext = zext i1 %load to i32
%cmp = icmp ne i32 %ext, -1
@@ -162,7 +162,7 @@ define void @zextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addr
; SI: v_cmp_ne_u32_e32 vcc, -1, [[LOAD]]{{$}}
; SI-NEXT: v_cndmask_b32_e64
; SI: {{buffer|flat}}_store_byte
-define void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
%in.ptr = getelementptr i8, i8 addrspace(1)* %in, i32 %tid.x
%load = load i8, i8 addrspace(1)* %in.ptr
diff --git a/test/CodeGen/AMDGPU/trunc-store-f64-to-f16.ll b/test/CodeGen/AMDGPU/trunc-store-f64-to-f16.ll
index 03b8af0610d7..d67b8f981b28 100644
--- a/test/CodeGen/AMDGPU/trunc-store-f64-to-f16.ll
+++ b/test/CodeGen/AMDGPU/trunc-store-f64-to-f16.ll
@@ -2,7 +2,7 @@
; GCN-LABEL: {{^}}global_truncstore_f64_to_f16:
; GCN: s_endpgm
-define void @global_truncstore_f64_to_f16(half addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_f64_to_f16(half addrspace(1)* %out, double addrspace(1)* %in) #0 {
%val = load double, double addrspace(1)* %in
%cvt = fptrunc double %val to half
store half %cvt, half addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @global_truncstore_f64_to_f16(half addrspace(1)* %out, double addrsp
; GCN-LABEL: {{^}}global_truncstore_v2f64_to_v2f16:
; GCN: s_endpgm
-define void @global_truncstore_v2f64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v2f64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
%val = load <2 x double>, <2 x double> addrspace(1)* %in
%cvt = fptrunc <2 x double> %val to <2 x half>
store <2 x half> %cvt, <2 x half> addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @global_truncstore_v2f64_to_v2f16(<2 x half> addrspace(1)* %out, <2
; GCN-LABEL: {{^}}global_truncstore_v3f64_to_v3f16:
; GCN: s_endpgm
-define void @global_truncstore_v3f64_to_v3f16(<3 x half> addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v3f64_to_v3f16(<3 x half> addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
%val = load <3 x double>, <3 x double> addrspace(1)* %in
%cvt = fptrunc <3 x double> %val to <3 x half>
store <3 x half> %cvt, <3 x half> addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @global_truncstore_v3f64_to_v3f16(<3 x half> addrspace(1)* %out, <3
; GCN-LABEL: {{^}}global_truncstore_v4f64_to_v4f16:
; GCN: s_endpgm
-define void @global_truncstore_v4f64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v4f64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
%val = load <4 x double>, <4 x double> addrspace(1)* %in
%cvt = fptrunc <4 x double> %val to <4 x half>
store <4 x half> %cvt, <4 x half> addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @global_truncstore_v4f64_to_v4f16(<4 x half> addrspace(1)* %out, <4
; GCN-LABEL: {{^}}global_truncstore_v8f64_to_v8f16:
; GCN: s_endpgm
-define void @global_truncstore_v8f64_to_v8f16(<8 x half> addrspace(1)* %out, <8 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v8f64_to_v8f16(<8 x half> addrspace(1)* %out, <8 x double> addrspace(1)* %in) #0 {
%val = load <8 x double>, <8 x double> addrspace(1)* %in
%cvt = fptrunc <8 x double> %val to <8 x half>
store <8 x half> %cvt, <8 x half> addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @global_truncstore_v8f64_to_v8f16(<8 x half> addrspace(1)* %out, <8
; GCN-LABEL: {{^}}global_truncstore_v16f64_to_v16f16:
; GCN: s_endpgm
-define void @global_truncstore_v16f64_to_v16f16(<16 x half> addrspace(1)* %out, <16 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @global_truncstore_v16f64_to_v16f16(<16 x half> addrspace(1)* %out, <16 x double> addrspace(1)* %in) #0 {
%val = load <16 x double>, <16 x double> addrspace(1)* %in
%cvt = fptrunc <16 x double> %val to <16 x half>
store <16 x half> %cvt, <16 x half> addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/trunc-store-i1.ll b/test/CodeGen/AMDGPU/trunc-store-i1.ll
index da2a5b43dad5..4ea2352f57f3 100644
--- a/test/CodeGen/AMDGPU/trunc-store-i1.ll
+++ b/test/CodeGen/AMDGPU/trunc-store-i1.ll
@@ -7,7 +7,7 @@
; SI: s_and_b32 [[SREG:s[0-9]+]], [[LOAD]], 1
; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], [[SREG]]
; SI: buffer_store_byte [[VREG]],
-define void @global_truncstore_i32_to_i1(i1 addrspace(1)* %out, i32 %val) nounwind {
+define amdgpu_kernel void @global_truncstore_i32_to_i1(i1 addrspace(1)* %out, i32 %val) nounwind {
%trunc = trunc i32 %val to i1
store i1 %trunc, i1 addrspace(1)* %out, align 1
ret void
@@ -15,7 +15,7 @@ define void @global_truncstore_i32_to_i1(i1 addrspace(1)* %out, i32 %val) nounwi
; SI-LABEL: {{^}}global_truncstore_i64_to_i1:
; SI: buffer_store_byte
-define void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwind {
+define amdgpu_kernel void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwind {
%trunc = trunc i64 %val to i1
store i1 %trunc, i1 addrspace(1)* %out, align 1
ret void
@@ -26,13 +26,13 @@ define void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwi
; SI: s_and_b32 [[SREG:s[0-9]+]], [[LOAD]], 1
; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], [[SREG]]
; SI: buffer_store_byte [[VREG]],
-define void @s_arg_global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val) nounwind {
+define amdgpu_kernel void @s_arg_global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val) nounwind {
%trunc = trunc i16 %val to i1
store i1 %trunc, i1 addrspace(1)* %out, align 1
ret void
}
; SI-LABEL: {{^}}global_truncstore_i16_to_i1:
-define void @global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val0, i16 %val1) nounwind {
+define amdgpu_kernel void @global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val0, i16 %val1) nounwind {
%add = add i16 %val0, %val1
%trunc = trunc i16 %add to i1
store i1 %trunc, i1 addrspace(1)* %out, align 1
diff --git a/test/CodeGen/AMDGPU/trunc-store.ll b/test/CodeGen/AMDGPU/trunc-store.ll
index c6727e1e1273..f45de679588f 100644
--- a/test/CodeGen/AMDGPU/trunc-store.ll
+++ b/test/CodeGen/AMDGPU/trunc-store.ll
@@ -3,7 +3,7 @@
; FUNC-LABEL: {{^}}truncstore_arg_v16i32_to_v16i8:
; SI: buffer_store_dwordx4
-define void @truncstore_arg_v16i32_to_v16i8(<16 x i8> addrspace(1)* %out, <16 x i32> %in) {
+define amdgpu_kernel void @truncstore_arg_v16i32_to_v16i8(<16 x i8> addrspace(1)* %out, <16 x i32> %in) {
%trunc = trunc <16 x i32> %in to <16 x i8>
store <16 x i8> %trunc, <16 x i8> addrspace(1)* %out
ret void
@@ -11,7 +11,7 @@ define void @truncstore_arg_v16i32_to_v16i8(<16 x i8> addrspace(1)* %out, <16 x
; FUNC-LABEL: {{^}}truncstore_arg_v16i64_to_v16i8:
; SI: buffer_store_dwordx4
-define void @truncstore_arg_v16i64_to_v16i8(<16 x i8> addrspace(1)* %out, <16 x i64> %in) {
+define amdgpu_kernel void @truncstore_arg_v16i64_to_v16i8(<16 x i8> addrspace(1)* %out, <16 x i64> %in) {
%trunc = trunc <16 x i64> %in to <16 x i8>
store <16 x i8> %trunc, <16 x i8> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/trunc-vector-store-assertion-failure.ll b/test/CodeGen/AMDGPU/trunc-vector-store-assertion-failure.ll
index 878ea3f48995..3dbc10d2e9b5 100644
--- a/test/CodeGen/AMDGPU/trunc-vector-store-assertion-failure.ll
+++ b/test/CodeGen/AMDGPU/trunc-vector-store-assertion-failure.ll
@@ -6,7 +6,7 @@
; CHECK-LABEL: {{^}}test:
; CHECK: MEM_RAT_CACHELESS STORE_RAW
-define void @test(<4 x i8> addrspace(1)* %out, i32 %cond, <4 x i8> %in) {
+define amdgpu_kernel void @test(<4 x i8> addrspace(1)* %out, i32 %cond, <4 x i8> %in) {
entry:
%0 = icmp eq i32 %cond, 0
br i1 %0, label %if, label %done
diff --git a/test/CodeGen/AMDGPU/trunc.ll b/test/CodeGen/AMDGPU/trunc.ll
index 2c2ce4c5d351..0c91d52df0c0 100644
--- a/test/CodeGen/AMDGPU/trunc.ll
+++ b/test/CodeGen/AMDGPU/trunc.ll
@@ -4,7 +4,7 @@
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
-define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
; GCN-LABEL: {{^}}trunc_i64_to_i32_store:
; GCN: s_load_dword [[SLOAD:s[0-9]+]], s[0:1],
; GCN: v_mov_b32_e32 [[VLOAD:v[0-9]+]], [[SLOAD]]
@@ -28,7 +28,7 @@ define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
; SI: buffer_store_dword [[VSHL]]
; VI: flat_store_dword v[{{[0-9:]+}}], [[VSHL]]
-define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
%b = shl i64 %a, 2
%result = trunc i64 %b to i32
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -46,7 +46,7 @@ define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
; VI: flat_store_dword v[{{[0-9:]+}}], v[[LO_VREG]]
; GCN: v_mov_b32_e32
; GCN: v_mov_b32_e32
-define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {
%aa = add i64 %a, 234 ; Prevent shrinking store.
%b = shl i64 %aa, 2
%result = trunc i64 %b to i32
@@ -56,9 +56,8 @@ define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64
}
; GCN-LABEL: {{^}}trunc_i32_to_i1:
-; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; GCN: v_cmp_eq_u32
-define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
+; GCN: v_and_b32_e32 [[VREG:v[0-9]+]], 1, v{{[0-9]+}}
+define amdgpu_kernel void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
%a = load i32, i32 addrspace(1)* %ptr, align 4
%trunc = trunc i32 %a to i1
%result = select i1 %trunc, i32 1, i32 0
@@ -67,9 +66,8 @@ define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
}
; GCN-LABEL: {{^}}trunc_i8_to_i1:
-; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; GCN: v_cmp_eq_u32
-define void @trunc_i8_to_i1(i8 addrspace(1)* %out, i8 addrspace(1)* %ptr) {
+; GCN: v_and_b32_e32 [[VREG:v[0-9]+]], 1, v{{[0-9]+}}
+define amdgpu_kernel void @trunc_i8_to_i1(i8 addrspace(1)* %out, i8 addrspace(1)* %ptr) {
%a = load i8, i8 addrspace(1)* %ptr, align 4
%trunc = trunc i8 %a to i1
%result = select i1 %trunc, i8 1, i8 0
@@ -78,9 +76,8 @@ define void @trunc_i8_to_i1(i8 addrspace(1)* %out, i8 addrspace(1)* %ptr) {
}
; GCN-LABEL: {{^}}sgpr_trunc_i16_to_i1:
-; GCN: s_and_b32 s{{[0-9]+}}, 1, s{{[0-9]+}}
-; GCN: v_cmp_eq_u32
-define void @sgpr_trunc_i16_to_i1(i16 addrspace(1)* %out, i16 %a) {
+; GCN: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
+define amdgpu_kernel void @sgpr_trunc_i16_to_i1(i16 addrspace(1)* %out, i16 %a) {
%trunc = trunc i16 %a to i1
%result = select i1 %trunc, i16 1, i16 0
store i16 %result, i16 addrspace(1)* %out, align 4
@@ -88,9 +85,8 @@ define void @sgpr_trunc_i16_to_i1(i16 addrspace(1)* %out, i16 %a) {
}
; GCN-LABEL: {{^}}sgpr_trunc_i32_to_i1:
-; GCN: s_and_b32 s{{[0-9]+}}, 1, s{{[0-9]+}}
-; GCN: v_cmp_eq_u32
-define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
+; GCN: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
+define amdgpu_kernel void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
%trunc = trunc i32 %a to i1
%result = select i1 %trunc, i32 1, i32 0
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -103,7 +99,7 @@ define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
; GCN: s_and_b32 [[MASKED:s[0-9]+]], 1, s[[SLO]]
; GCN: v_cmp_eq_u32_e64 s{{\[}}[[VLO:[0-9]+]]:[[VHI:[0-9]+]]], [[MASKED]], 1{{$}}
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, s{{\[}}[[VLO]]:[[VHI]]]
-define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) {
+define amdgpu_kernel void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) {
%trunc = trunc i64 %x to i1
%sel = select i1 %trunc, i32 63, i32 -12
store i32 %sel, i32 addrspace(1)* %out
@@ -116,7 +112,7 @@ define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) {
; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 1, v[[VLO]]
; GCN: v_cmp_eq_u32_e32 vcc, 1, [[MASKED]]
; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc
-define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
diff --git a/test/CodeGen/AMDGPU/tti-unroll-prefs.ll b/test/CodeGen/AMDGPU/tti-unroll-prefs.ll
index 76c32afc1f21..7c369a312761 100644
--- a/test/CodeGen/AMDGPU/tti-unroll-prefs.ll
+++ b/test/CodeGen/AMDGPU/tti-unroll-prefs.ll
@@ -19,7 +19,7 @@
; CHECK: store i8 0, i8 addrspace(1)*
; CHECK-NOT: store i8 0, i8 addrspace(1)*
; CHECK: ret void
-define void @test(i8 addrspace(1)* nocapture %dst, i32 %a, i32 %b, i32 %c) {
+define amdgpu_kernel void @test(i8 addrspace(1)* nocapture %dst, i32 %a, i32 %b, i32 %c) {
entry:
%add = add nsw i32 %b, 4
%cmp = icmp sgt i32 %add, %a
diff --git a/test/CodeGen/AMDGPU/uaddo.ll b/test/CodeGen/AMDGPU/uaddo.ll
index 35af7119a300..632ccaa7e612 100644
--- a/test/CodeGen/AMDGPU/uaddo.ll
+++ b/test/CodeGen/AMDGPU/uaddo.ll
@@ -1,19 +1,16 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
-declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
-declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
-
-; FUNC-LABEL: {{^}}uaddo_i64_zext:
-; SI: add
-; SI: addc
-; SI: addc
+; FUNC-LABEL: {{^}}s_uaddo_i64_zext:
+; GCN: s_add_u32
+; GCN: s_addc_u32
+; GCN: v_cmp_lt_u64_e32 vcc
; EG: ADDC_UINT
; EG: ADDC_UINT
-define void @uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
- %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+define amdgpu_kernel void @s_uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %uadd, 0
%carry = extractvalue { i64, i1 } %uadd, 1
%ext = zext i1 %carry to i64
@@ -22,13 +19,16 @@ define void @uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
+; FIXME: Could do scalar
+
; FUNC-LABEL: {{^}}s_uaddo_i32:
-; SI: s_add_i32
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG: ADDC_UINT
; EG: ADD_INT
-define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
- %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+define amdgpu_kernel void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 {
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
%val = extractvalue { i32, i1 } %uadd, 0
%carry = extractvalue { i32, i1 } %uadd, 1
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -37,14 +37,19 @@ define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
}
; FUNC-LABEL: {{^}}v_uaddo_i32:
-; SI: v_add_i32
+; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG: ADDC_UINT
; EG: ADD_INT
-define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %bptr, align 4
- %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+define amdgpu_kernel void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
%val = extractvalue { i32, i1 } %uadd, 0
%carry = extractvalue { i32, i1 } %uadd, 1
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -52,14 +57,36 @@ define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
+; FUNC-LABEL: {{^}}v_uaddo_i32_novcc:
+; GCN: v_add_i32_e64 v{{[0-9]+}}, [[COND:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[COND]]
+
+; EG: ADDC_UINT
+; EG: ADD_INT
+define amdgpu_kernel void @v_uaddo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+ %val = extractvalue { i32, i1 } %uadd, 0
+ %carry = extractvalue { i32, i1 } %uadd, 1
+ store volatile i32 %val, i32 addrspace(1)* %out, align 4
+ call void asm sideeffect "", "~{VCC}"() #0
+ store volatile i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
; FUNC-LABEL: {{^}}s_uaddo_i64:
-; SI: s_add_u32
-; SI: s_addc_u32
+; GCN: s_add_u32
+; GCN: s_addc_u32
; EG: ADDC_UINT
; EG: ADD_INT
-define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
- %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+define amdgpu_kernel void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 {
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %uadd, 0
%carry = extractvalue { i64, i1 } %uadd, 1
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -68,18 +95,48 @@ define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
}
; FUNC-LABEL: {{^}}v_uaddo_i64:
-; SI: v_add_i32
-; SI: v_addc_u32
+; GCN: v_add_i32
+; GCN: v_addc_u32
; EG: ADDC_UINT
; EG: ADD_INT
-define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
- %a = load i64, i64 addrspace(1)* %aptr, align 4
- %b = load i64, i64 addrspace(1)* %bptr, align 4
- %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+define amdgpu_kernel void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr
+ %a = load i64, i64 addrspace(1)* %a.gep
+ %b = load i64, i64 addrspace(1)* %b.gep
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %uadd, 0
%carry = extractvalue { i64, i1 } %uadd, 1
- store i64 %val, i64 addrspace(1)* %out, align 8
+ store i64 %val, i64 addrspace(1)* %out
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_uaddo_i16:
+; VI: v_add_u16_e32
+; VI: v_cmp_lt_u16_e32
+define amdgpu_kernel void @v_uaddo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr
+ %a = load i16, i16 addrspace(1)* %a.gep
+ %b = load i16, i16 addrspace(1)* %b.gep
+ %uadd = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
+ %val = extractvalue { i16, i1 } %uadd, 0
+ %carry = extractvalue { i16, i1 } %uadd, 1
+ store i16 %val, i16 addrspace(1)* %out
store i1 %carry, i1 addrspace(1)* %carryout
ret void
}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare { i16, i1 } @llvm.uadd.with.overflow.i16(i16, i16) #1
+declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1
+declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/udiv.ll b/test/CodeGen/AMDGPU/udiv.ll
index da88d2a8e8cb..2874a0cdbc05 100644
--- a/test/CodeGen/AMDGPU/udiv.ll
+++ b/test/CodeGen/AMDGPU/udiv.ll
@@ -1,22 +1,27 @@
; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -mattr=-fp32-denormals < %s | FileCheck -check-prefix=SI -check-prefix=FUNC -check-prefix=VI %s
+
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=+fp32-denormals < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}udiv_i32:
; EG-NOT: SETGE_INT
; EG: CF_END
-define void @udiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+
+; SI: v_rcp_iflag_f32_e32
+define amdgpu_kernel void @udiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
- %a = load i32, i32 addrspace(1) * %in
- %b = load i32, i32 addrspace(1) * %b_ptr
+ %a = load i32, i32 addrspace(1)* %in
+ %b = load i32, i32 addrspace(1)* %b_ptr
%result = udiv i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}s_udiv_i32:
-
-define void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; SI: v_rcp_iflag_f32_e32
+define amdgpu_kernel void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = udiv i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
@@ -30,8 +35,10 @@ define void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
; FUNC-LABEL: {{^}}udiv_v2i32:
; EG: CF_END
+; SI: v_rcp_iflag_f32_e32
+; SI: v_rcp_iflag_f32_e32
; SI: s_endpgm
-define void @udiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
%b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
@@ -43,7 +50,7 @@ define void @udiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
; FUNC-LABEL: {{^}}udiv_v4i32:
; EG: CF_END
; SI: s_endpgm
-define void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
%b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
@@ -56,7 +63,7 @@ define void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
; SI: buffer_load_dword [[VAL:v[0-9]+]]
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 4, [[VAL]]
; SI: buffer_store_dword [[RESULT]]
-define void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%result = udiv i32 %a, 16
@@ -70,7 +77,7 @@ define void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_mul_hi_u32 [[MULHI:v[0-9]+]], [[K]], [[VAL]]
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 25, [[MULHI]]
; SI: buffer_store_dword [[RESULT]]
-define void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%result = udiv i32 %a, 34259182
@@ -84,7 +91,7 @@ define void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; SI: v_mul_hi_u32 [[MULHI:v[0-9]+]], [[K]], [[VAL]]
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 24, [[MULHI]]
; SI: buffer_store_dword [[RESULT]]
-define void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%result = udiv i32 %a, 34259183
@@ -96,7 +103,7 @@ define void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0xff, v{{[0-9]+}}
; SI: buffer_store_dword [[TRUNC]]
-define void @v_udiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -110,7 +117,7 @@ define void @v_udiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0xffff, v{{[0-9]+}}
; SI: buffer_store_dword [[TRUNC]]
-define void @v_udiv_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
%den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16, i16 addrspace(1) * %in
%den = load i16, i16 addrspace(1) * %den_ptr
@@ -124,7 +131,7 @@ define void @v_udiv_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
; SI: v_rcp_f32
; SI: v_and_b32_e32 [[TRUNC:v[0-9]+]], 0x7fffff, v{{[0-9]+}}
; SI: buffer_store_dword [[TRUNC]]
-define void @v_udiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
%den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1
%num = load i23, i23 addrspace(1) * %in
%den = load i23, i23 addrspace(1) * %den_ptr
@@ -136,7 +143,7 @@ define void @v_udiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}v_udiv_i24:
; SI-NOT: v_rcp_f32
-define void @v_udiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
+define amdgpu_kernel void @v_udiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
%den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1
%num = load i24, i24 addrspace(1) * %in
%den = load i24, i24 addrspace(1) * %den_ptr
@@ -152,9 +159,42 @@ define void @v_udiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
; SI: v_mul_hi_u32
; SI: v_mul_hi_u32
-define void @scalarize_mulhu_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
+define amdgpu_kernel void @scalarize_mulhu_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
%1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
%2 = udiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668>
store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16
ret void
}
+
+; FUNC-LABEL: {{^}}test_udiv2:
+; SI: s_lshr_b32 s{{[0-9]}}, s{{[0-9]}}, 1
+define amdgpu_kernel void @test_udiv2(i32 %p) {
+ %i = udiv i32 %p, 2
+ store volatile i32 %i, i32 addrspace(1)* undef
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_udiv_3_mulhu:
+; SI: v_mov_b32_e32 v{{[0-9]+}}, 0xaaaaaaab
+; SI: v_mul_hi_u32 v0, {{v[0-9]+}}, {{s[0-9]+}}
+; SI-NEXT: v_lshrrev_b32_e32 v0, 1, v0
+define amdgpu_kernel void @test_udiv_3_mulhu(i32 %p) {
+ %i = udiv i32 %p, 3
+ store volatile i32 %i, i32 addrspace(1)* undef
+ ret void
+}
+
+; GCN-LABEL: {{^}}fdiv_test_denormals
+; VI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @fdiv_test_denormals(i8 addrspace(1)* nocapture readonly %arg) {
+bb:
+ %tmp = load i8, i8 addrspace(1)* null, align 1
+ %tmp1 = sext i8 %tmp to i32
+ %tmp2 = getelementptr inbounds i8, i8 addrspace(1)* %arg, i64 undef
+ %tmp3 = load i8, i8 addrspace(1)* %tmp2, align 1
+ %tmp4 = sext i8 %tmp3 to i32
+ %tmp5 = sdiv i32 %tmp1, %tmp4
+ %tmp6 = trunc i32 %tmp5 to i8
+ store i8 %tmp6, i8 addrspace(1)* null, align 1
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/udivrem.ll b/test/CodeGen/AMDGPU/udivrem.ll
index 17f4ebf175d9..9507a49cfc8b 100644
--- a/test/CodeGen/AMDGPU/udivrem.ll
+++ b/test/CodeGen/AMDGPU/udivrem.ll
@@ -51,7 +51,7 @@
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI: s_endpgm
-define void @test_udivrem(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) {
+define amdgpu_kernel void @test_udivrem(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) {
%result0 = udiv i32 %x, %y
store i32 %result0, i32 addrspace(1)* %out0
%result1 = urem i32 %x, %y
@@ -158,7 +158,7 @@ define void @test_udivrem(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI: s_endpgm
-define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
+define amdgpu_kernel void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
%result0 = udiv <2 x i32> %x, %y
store <2 x i32> %result0, <2 x i32> addrspace(1)* %out
%result1 = urem <2 x i32> %x, %y
@@ -340,7 +340,7 @@ define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i3
; SI-DAG: v_subrev_i32_e32
; SI-DAG: v_cndmask_b32_e64
; SI: s_endpgm
-define void @test_udivrem_v4(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+define amdgpu_kernel void @test_udivrem_v4(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
%result0 = udiv <4 x i32> %x, %y
store <4 x i32> %result0, <4 x i32> addrspace(1)* %out
%result1 = urem <4 x i32> %x, %y
diff --git a/test/CodeGen/AMDGPU/udivrem24.ll b/test/CodeGen/AMDGPU/udivrem24.ll
index 6d145f1dbf09..6f144dcc6fd2 100644
--- a/test/CodeGen/AMDGPU/udivrem24.ll
+++ b/test/CodeGen/AMDGPU/udivrem24.ll
@@ -12,7 +12,7 @@
; EG-DAG: UINT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
-define void @udiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -31,7 +31,7 @@ define void @udiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
; EG-DAG: UINT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
-define void @udiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16, i16 addrspace(1) * %in, align 2
%den = load i16, i16 addrspace(1) * %den_ptr, align 2
@@ -50,7 +50,7 @@ define void @udiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
; EG-DAG: UINT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
-define void @udiv23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -67,7 +67,7 @@ define void @udiv23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_rcp_iflag
; SI-NOT v_rcp_f32
; EG-NOT: RECIP_IEEE
-define void @udiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -84,7 +84,7 @@ define void @udiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_rcp_iflag
; SI-NOT v_rcp_f32
; EG-NOT: RECIP_IEEE
-define void @no_udiv24_u23_u24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_udiv24_u23_u24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -101,7 +101,7 @@ define void @no_udiv24_u23_u24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; SI: v_rcp_iflag
; SI-NOT v_rcp_f32
; EG-NOT: RECIP_IEEE
-define void @no_udiv24_u24_u23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @no_udiv24_u24_u23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -121,7 +121,7 @@ define void @no_udiv24_u24_u23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @udiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @udiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -141,7 +141,7 @@ define void @udiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @test_no_udiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_no_udiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -161,7 +161,7 @@ define void @test_no_udiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @test_no_udiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_no_udiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -184,7 +184,7 @@ define void @test_no_udiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; EG-DAG: UINT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
-define void @urem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+define amdgpu_kernel void @urem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8, i8 addrspace(1) * %in
%den = load i8, i8 addrspace(1) * %den_ptr
@@ -203,7 +203,7 @@ define void @urem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
; EG-DAG: UINT_TO_FLT
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
-define void @urem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+define amdgpu_kernel void @urem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16, i16 addrspace(1) * %in, align 2
%den = load i16, i16 addrspace(1) * %den_ptr, align 2
@@ -215,7 +215,7 @@ define void @urem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}urem24_i32:
; SI-NOT: v_rcp_f32
; EG-NOT: RECIP_IEEE
-define void @urem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @urem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -235,7 +235,7 @@ define void @urem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @urem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @urem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -255,7 +255,7 @@ define void @urem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @test_no_urem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_no_urem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -275,7 +275,7 @@ define void @test_no_urem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
-define void @test_no_urem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_no_urem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -294,7 +294,7 @@ define void @test_no_urem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; SI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]],
; EG: RECIP_IEEE
-define void @test_udiv24_u16_u23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_udiv24_u16_u23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
@@ -313,7 +313,7 @@ define void @test_udiv24_u16_u23_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %
; SI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]],
; EG: RECIP_IEEE
-define void @test_udiv24_u23_u16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_udiv24_u23_u16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32, i32 addrspace(1) * %in, align 4
%den = load i32, i32 addrspace(1) * %den_ptr, align 4
diff --git a/test/CodeGen/AMDGPU/udivrem64.ll b/test/CodeGen/AMDGPU/udivrem64.ll
index da61a841ff35..bd297920d563 100644
--- a/test/CodeGen/AMDGPU/udivrem64.ll
+++ b/test/CodeGen/AMDGPU/udivrem64.ll
@@ -70,7 +70,7 @@
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%result = udiv i64 %x, %y
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -144,7 +144,7 @@ define void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_urem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_urem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%result = urem i64 %x, %y
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -159,7 +159,7 @@ define void @test_urem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_udiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_udiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = lshr i64 %x, 33
%2 = lshr i64 %y, 33
%result = udiv i64 %1, %2
@@ -176,7 +176,7 @@ define void @test_udiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;SI-NOT: v_lshr_b64
;VI-NOT: v_lshrrev_b64
;GCN: s_endpgm
-define void @test_urem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_urem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = lshr i64 %x, 33
%2 = lshr i64 %y, 33
%result = urem i64 %1, %2
@@ -195,7 +195,7 @@ define void @test_urem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;VI-NOT: v_lshrrev_b64
;GCN: v_mad_f32
;GCN: s_endpgm
-define void @test_udiv2364(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_udiv2364(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = lshr i64 %x, 41
%2 = lshr i64 %y, 41
%result = udiv i64 %1, %2
@@ -214,7 +214,7 @@ define void @test_udiv2364(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;VI-NOT: v_lshrrev_b64
;GCN: v_mad_f32
;GCN: s_endpgm
-define void @test_urem2364(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+define amdgpu_kernel void @test_urem2364(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%1 = lshr i64 %x, 41
%2 = lshr i64 %y, 41
%result = urem i64 %1, %2
diff --git a/test/CodeGen/AMDGPU/uint_to_fp.f64.ll b/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
index a4e18ebc9120..62943aeefbd8 100644
--- a/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
+++ b/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; SI-DAG: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
; SI: buffer_store_dwordx2 [[RESULT]]
-define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%val = load i64, i64 addrspace(1)* %gep, align 8
@@ -19,21 +19,21 @@ define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)
}
; SI-LABEL: {{^}}s_uint_to_fp_i64_to_f64
-define void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
+define amdgpu_kernel void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
%cast = uitofp i64 %in to double
store double %cast, double addrspace(1)* %out, align 8
ret void
}
; SI-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64
-define void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
+define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
%cast = uitofp <2 x i64> %in to <2 x double>
store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
ret void
}
; SI-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64
-define void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
+define amdgpu_kernel void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
%cast = uitofp <4 x i64> %in to <4 x double>
store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
ret void
@@ -42,7 +42,7 @@ define void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i
; SI-LABEL: {{^}}s_uint_to_fp_i32_to_f64
; SI: v_cvt_f64_u32_e32
; SI: s_endpgm
-define void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
%cast = uitofp i32 %in to double
store double %cast, double addrspace(1)* %out, align 8
ret void
@@ -52,7 +52,7 @@ define void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
; SI: v_cvt_f64_u32_e32
; SI: v_cvt_f64_u32_e32
; SI: s_endpgm
-define void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) {
+define amdgpu_kernel void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) {
%cast = uitofp <2 x i32> %in to <2 x double>
store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
ret void
@@ -64,7 +64,7 @@ define void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i
; SI: v_cvt_f64_u32_e32
; SI: v_cvt_f64_u32_e32
; SI: s_endpgm
-define void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) {
+define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) {
%cast = uitofp <4 x i32> %in to <4 x double>
store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
ret void
@@ -79,7 +79,7 @@ define void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i
; SI-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; SI: buffer_store_dwordx2 v{{\[}}[[ZERO]]:[[SEL]]{{\]}}
; SI: s_endpgm
-define void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) {
%cmp = icmp eq i32 %in, 0
%fp = uitofp i1 %cmp to double
store double %fp, double addrspace(1)* %out, align 4
@@ -91,7 +91,7 @@ define void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) {
; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
; SI: buffer_store_dwordx2 [[RESULT]]
; SI: s_endpgm
-define void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) {
+define amdgpu_kernel void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) {
%fp = uitofp i1 %in to double
store double %fp, double addrspace(1)* %out, align 8
ret void
diff --git a/test/CodeGen/AMDGPU/uint_to_fp.i64.ll b/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
index cd816b27fce6..4168326e14c6 100644
--- a/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
+++ b/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
@@ -4,7 +4,7 @@
; FIXME: This should be merged with uint_to_fp.ll, but s_uint_to_fp_v2i64 crashes on r600
; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f16:
-define void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
%result = uitofp i64 %in to half
store half %result, half addrspace(1)* %out
ret void
@@ -24,7 +24,7 @@ define void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
; GCN: v_add_i32_e32 [[VR:v[0-9]+]]
; GCN: v_cvt_f16_f32_e32 [[VR_F16:v[0-9]+]], [[VR]]
; GCN: {{buffer|flat}}_store_short {{.*}}[[VR_F16]]
-define void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
@@ -35,7 +35,7 @@ define void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)*
}
; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f32:
-define void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
%result = uitofp i64 %in to float
store float %result, float addrspace(1)* %out
ret void
@@ -54,7 +54,7 @@ define void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
; GCN: v_add_i32_e32 [[VR:v[0-9]+]]
; GCN: {{buffer|flat}}_store_dword {{.*}}[[VR]]
-define void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -65,14 +65,14 @@ define void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)*
}
; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f32:
-define void @s_uint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
+define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
%result = uitofp <2 x i64> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64_to_v4f32:
-define void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -83,14 +83,14 @@ define void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i6
}
; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f16:
-define void @s_uint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
+define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
%result = uitofp <2 x i64> %in to <2 x half>
store <2 x half> %result, <2 x half> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64_to_v4f16:
-define void @v_uint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x half>, <4 x half> addrspace(1)* %out, i32 %tid
diff --git a/test/CodeGen/AMDGPU/uint_to_fp.ll b/test/CodeGen/AMDGPU/uint_to_fp.ll
index 3003226ca1a4..2e9918717c3a 100644
--- a/test/CodeGen/AMDGPU/uint_to_fp.ll
+++ b/test/CodeGen/AMDGPU/uint_to_fp.ll
@@ -6,7 +6,7 @@
; SI: v_cvt_f32_u32_e32
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-define void @s_uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) #0 {
%result = uitofp i32 %in to float
store float %result, float addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @s_uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) #0 {
; SI: v_cvt_f32_u32_e32 {{v[0-9]+}}, {{v[0-9]+$}}
; R600: INT_TO_FLT
-define void @v_uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -32,7 +32,7 @@ define void @v_uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 addrspace(1)*
; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
-define void @s_uint_to_fp_v2i32_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i32> %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_v2i32_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i32> %in) #0 {
%result = uitofp <2 x i32> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
@@ -49,7 +49,7 @@ define void @s_uint_to_fp_v2i32_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i3
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @s_uint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%value = load <4 x i32>, <4 x i32> addrspace(1) * %in
%result = uitofp <4 x i32> %value to <4 x float>
store <4 x float> %result, <4 x float> addrspace(1)* %out
@@ -66,7 +66,7 @@ define void @s_uint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i3
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @v_uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -81,7 +81,7 @@ define void @v_uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrsp
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @s_uint_to_fp_i1_to_f32(float addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_i1_to_f32(float addrspace(1)* %out, i32 %in) #0 {
%cmp = icmp eq i32 %in, 0
%fp = uitofp i1 %cmp to float
store float %fp, float addrspace(1)* %out
@@ -92,7 +92,7 @@ define void @s_uint_to_fp_i1_to_f32(float addrspace(1)* %out, i32 %in) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @s_uint_to_fp_i1_to_f32_load(float addrspace(1)* %out, i1 %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_i1_to_f32_load(float addrspace(1)* %out, i1 %in) #0 {
%fp = uitofp i1 %in to float
store float %fp, float addrspace(1)* %out
ret void
@@ -105,7 +105,7 @@ define void @s_uint_to_fp_i1_to_f32_load(float addrspace(1)* %out, i1 %in) #0 {
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0
; SI: {{buffer|flat}}_store_dword {{.*}}[[RESULT]]
; SI: s_endpgm
-define void @v_uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr i1, i1 addrspace(1)* %in, i32 %tid
%out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -126,7 +126,7 @@ define void @v_uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 addrspace(1)*
; R600-DAG: SETGT_UINT
; R600-DAG: SETE_INT
-define void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
+define amdgpu_kernel void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
entry:
%cvt = uitofp i64 %in to float
store float %cvt, float addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/uitofp.f16.ll b/test/CodeGen/AMDGPU/uitofp.f16.ll
index faab5ca5db73..0c3b0fcaf854 100644
--- a/test/CodeGen/AMDGPU/uitofp.f16.ll
+++ b/test/CodeGen/AMDGPU/uitofp.f16.ll
@@ -8,7 +8,7 @@
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @uitofp_i16_to_f16(
+define amdgpu_kernel void @uitofp_i16_to_f16(
half addrspace(1)* %r,
i16 addrspace(1)* %a) {
entry:
@@ -24,7 +24,7 @@ entry:
; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
; GCN: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @uitofp_i32_to_f16(
+define amdgpu_kernel void @uitofp_i32_to_f16(
half addrspace(1)* %r,
i32 addrspace(1)* %a) {
entry:
@@ -38,18 +38,23 @@ entry:
; GCN-LABEL: {{^}}uitofp_v2i16_to_v2f16
; GCN: buffer_load_dword
-; SI: v_cvt_f32_u32_e32
-; SI: v_cvt_f32_u32_e32
-; VI: v_cvt_f32_i32_e32
-; VI: v_cvt_f32_i32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN-DAG: v_and_b32_e32
-; GCN-DAG: v_lshlrev_b32_e32
-; GCN-DAG: v_or_b32_e32
-; GCN: buffer_store_dword
-; GCN: s_endpgm
-define void @uitofp_v2i16_to_v2f16(
+
+; SI: v_cvt_f32_u32_e32
+; SI: v_cvt_f32_u32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI-DAG: v_lshlrev_b32_e32
+; SI: v_or_b32_e32
+
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f32_i32_sdwa
+; VI-DAG: v_cvt_f32_i32_sdwa
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI: v_or_b32_e32
+
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+define amdgpu_kernel void @uitofp_v2i16_to_v2f16(
<2 x half> addrspace(1)* %r,
<2 x i16> addrspace(1)* %a) {
entry:
@@ -61,16 +66,23 @@ entry:
; GCN-LABEL: {{^}}uitofp_v2i32_to_v2f16
; GCN: buffer_load_dwordx2
-; GCN: v_cvt_f32_u32_e32
-; GCN: v_cvt_f32_u32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN: v_cvt_f16_f32_e32
-; GCN-DAG: v_and_b32_e32
-; GCN-DAG: v_lshlrev_b32_e32
-; GCN-DAG: v_or_b32_e32
+
+; SI: v_cvt_f32_u32_e32
+; SI: v_cvt_f32_u32_e32
+; SI: v_cvt_f16_f32_e32
+; SI: v_cvt_f16_f32_e32
+; SI-DAG: v_lshlrev_b32_e32
+; SI: v_or_b32_e32
+
+; VI-DAG: v_cvt_f32_u32_e32
+; VI-DAG: v_cvt_f32_u32_e32
+; VI-DAG: v_cvt_f16_f32_e32
+; VI-DAG: v_cvt_f16_f32_sdwa
+; VI: v_or_b32_e32
+
; GCN: buffer_store_dword
; GCN: s_endpgm
-define void @uitofp_v2i32_to_v2f16(
+define amdgpu_kernel void @uitofp_v2i32_to_v2f16(
<2 x half> addrspace(1)* %r,
<2 x i32> addrspace(1)* %a) {
entry:
diff --git a/test/CodeGen/AMDGPU/umed3.ll b/test/CodeGen/AMDGPU/umed3.ll
index a2e485d36225..5a579f3575fd 100644
--- a/test/CodeGen/AMDGPU/umed3.ll
+++ b/test/CodeGen/AMDGPU/umed3.ll
@@ -1,12 +1,13 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SICIVI -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SICIVI -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
-declare i32 @llvm.r600.read.tidig.x() #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN-LABEL: {{^}}v_test_umed3_r_i_i_i32:
; GCN: v_med3_u32 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
-define void @v_test_umed3_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_umed3_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -24,8 +25,8 @@ define void @v_test_umed3_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a
; GCN-LABEL: {{^}}v_test_umed3_multi_use_r_i_i_i32:
; GCN: v_max_u32
; GCN: v_min_u32
-define void @v_test_umed3_multi_use_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_umed3_multi_use_r_i_i_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -44,8 +45,8 @@ define void @v_test_umed3_multi_use_r_i_i_i32(i32 addrspace(1)* %out, i32 addrsp
; GCN-LABEL: {{^}}v_test_umed3_r_i_i_constant_order_i32:
; GCN: v_max_u32_e32 v{{[0-9]+}}, 17, v{{[0-9]+}}
; GCN: v_min_u32_e32 v{{[0-9]+}}, 12, v{{[0-9]+}}
-define void @v_test_umed3_r_i_i_constant_order_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_umed3_r_i_i_constant_order_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -63,8 +64,8 @@ define void @v_test_umed3_r_i_i_constant_order_i32(i32 addrspace(1)* %out, i32 a
; GCN-LABEL: {{^}}v_test_umed3_r_i_i_sign_mismatch_i32:
; GCN: v_max_i32_e32 v{{[0-9]+}}, 12, v{{[0-9]+}}
; GCN: v_min_u32_e32 v{{[0-9]+}}, 17, v{{[0-9]+}}
-define void @v_test_umed3_r_i_i_sign_mismatch_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_umed3_r_i_i_sign_mismatch_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32, i32 addrspace(1)* %gep0
@@ -82,8 +83,8 @@ define void @v_test_umed3_r_i_i_sign_mismatch_i32(i32 addrspace(1)* %out, i32 ad
; GCN-LABEL: {{^}}v_test_umed3_r_i_i_i64:
; GCN: v_cmp_lt_u64
; GCN: v_cmp_gt_u64
-define void @v_test_umed3_r_i_i_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+define amdgpu_kernel void @v_test_umed3_r_i_i_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64, i64 addrspace(1)* %gep0
@@ -99,9 +100,10 @@ define void @v_test_umed3_r_i_i_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a
}
; GCN-LABEL: {{^}}v_test_umed3_r_i_i_i16:
-; GCN: v_med3_u32 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
-define void @v_test_umed3_r_i_i_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr) #1 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+; SICIVI: v_med3_u32 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
+; GFX9: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
+define amdgpu_kernel void @v_test_umed3_r_i_i_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %aptr) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
%outgep = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
%a = load i16, i16 addrspace(1)* %gep0
@@ -171,7 +173,7 @@ define internal i8 @umax8(i8 %x, i8 %y) #2 {
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -183,7 +185,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_1:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -195,7 +197,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_2:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -207,7 +209,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_3:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_3(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_3(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -219,7 +221,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_4:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_4(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_4(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -231,7 +233,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_5:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_5(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_5(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -243,7 +245,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_6:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_6(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_6(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -255,7 +257,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_7:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_7(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_7(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -267,7 +269,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_8:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_8(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_8(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -279,7 +281,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_9:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_9(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_9(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -291,7 +293,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_10:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_10(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_10(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -303,7 +305,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_11:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_11(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_11(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -315,7 +317,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_12:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_12(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_12(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -327,7 +329,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_13:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_13(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_13(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -339,7 +341,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_14:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_14(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_14(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -351,7 +353,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_15:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_15(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_15(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %y, i32 %x)
%tmp1 = call i32 @umax(i32 %y, i32 %x)
@@ -366,7 +368,7 @@ bb:
; GCN: s_and_b32
; GCN: s_and_b32
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i16_pat_0(i16 addrspace(1)* %arg, i16 %x, i16 %y, i16 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i16_pat_0(i16 addrspace(1)* %arg, i16 %x, i16 %y, i16 %z) #1 {
bb:
%tmp0 = call i16 @umin16(i16 %x, i16 %y)
%tmp1 = call i16 @umax16(i16 %x, i16 %y)
@@ -381,7 +383,7 @@ bb:
; GCN: s_and_b32
; GCN: s_and_b32
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i8_pat_0(i8 addrspace(1)* %arg, i8 %x, i8 %y, i8 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i8_pat_0(i8 addrspace(1)* %arg, i8 %x, i8 %y, i8 %z) #1 {
bb:
%tmp0 = call i8 @umin8(i8 %x, i8 %y)
%tmp1 = call i8 @umax8(i8 %x, i8 %y)
@@ -393,7 +395,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_multi_use_0:
; GCN-NOT: v_med3_u32
-define void @s_test_umed3_i32_pat_0_multi_use_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_multi_use_0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -406,7 +408,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_multi_use_1:
; GCN-NOT: v_med3_u32
-define void @s_test_umed3_i32_pat_0_multi_use_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_multi_use_1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -419,7 +421,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_multi_use_2:
; GCN-NOT: v_med3_u32
-define void @s_test_umed3_i32_pat_0_multi_use_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_multi_use_2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -432,7 +434,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_multi_use_result:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_0_multi_use_result(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_multi_use_result(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -445,7 +447,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_imm_src0:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, 1, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_0_imm_src0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_imm_src0(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 1, i32 %y)
%tmp1 = call i32 @umax(i32 1, i32 %y)
@@ -457,7 +459,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_imm_src1:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, 2, v{{[0-9]+}}
-define void @s_test_umed3_i32_pat_0_imm_src1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_imm_src1(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 2)
%tmp1 = call i32 @umax(i32 %x, i32 2)
@@ -469,7 +471,7 @@ bb:
; GCN-LABEL: {{^}}s_test_umed3_i32_pat_0_imm_src2:
; GCN: v_med3_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, 9
-define void @s_test_umed3_i32_pat_0_imm_src2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
+define amdgpu_kernel void @s_test_umed3_i32_pat_0_imm_src2(i32 addrspace(1)* %arg, i32 %x, i32 %y, i32 %z) #1 {
bb:
%tmp0 = call i32 @umin(i32 %x, i32 %y)
%tmp1 = call i32 @umax(i32 %x, i32 %y)
@@ -479,6 +481,35 @@ bb:
ret void
}
+; GCN-LABEL: {{^}}v_test_umed3_i16_pat_0:
+; SI: v_med3_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; FIXME: VI not matching med3
+; VI: v_min_u16
+; VI: v_max_u16
+; VI: v_min_u16
+; VI: v_max_u16
+
+; GFX9: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @v_test_umed3_i16_pat_0(i16 addrspace(1)* %arg, i16 addrspace(1)* %out, i16 addrspace(1)* %a.ptr) #1 {
+bb:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr, i32 %tid
+ %gep1 = getelementptr inbounds i16, i16 addrspace(1)* %gep0, i32 3
+ %gep2 = getelementptr inbounds i16, i16 addrspace(1)* %gep0, i32 8
+ %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
+ %x = load i16, i16 addrspace(1)* %gep0
+ %y = load i16, i16 addrspace(1)* %gep1
+ %z = load i16, i16 addrspace(1)* %gep2
+
+ %tmp0 = call i16 @umin16(i16 %x, i16 %y)
+ %tmp1 = call i16 @umax16(i16 %x, i16 %y)
+ %tmp2 = call i16 @umin16(i16 %tmp1, i16 %z)
+ %tmp3 = call i16 @umax16(i16 %tmp0, i16 %tmp2)
+ store i16 %tmp3, i16 addrspace(1)* %out.gep
+ ret void
+}
+
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
attributes #2 = { nounwind readnone alwaysinline }
diff --git a/test/CodeGen/AMDGPU/unaligned-load-store.ll b/test/CodeGen/AMDGPU/unaligned-load-store.ll
index 0f76a54975e6..68aacd084bf9 100644
--- a/test/CodeGen/AMDGPU/unaligned-load-store.ll
+++ b/test/CodeGen/AMDGPU/unaligned-load-store.ll
@@ -8,7 +8,7 @@
; SI: ds_write_b8
; SI: ds_write_b8
; SI: s_endpgm
-define void @local_unaligned_load_store_i16(i16 addrspace(3)* %p, i16 addrspace(3)* %r) #0 {
+define amdgpu_kernel void @local_unaligned_load_store_i16(i16 addrspace(3)* %p, i16 addrspace(3)* %r) #0 {
%v = load i16, i16 addrspace(3)* %p, align 1
store i16 %v, i16 addrspace(3)* %r, align 1
ret void
@@ -23,7 +23,7 @@ define void @local_unaligned_load_store_i16(i16 addrspace(3)* %p, i16 addrspace(
; UNALIGNED: buffer_load_ushort
; UNALIGNED: buffer_store_short
; SI: s_endpgm
-define void @global_unaligned_load_store_i16(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @global_unaligned_load_store_i16(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
%v = load i16, i16 addrspace(1)* %p, align 1
store i16 %v, i16 addrspace(1)* %r, align 1
ret void
@@ -42,7 +42,7 @@ define void @global_unaligned_load_store_i16(i16 addrspace(1)* %p, i16 addrspace
; SI: ds_write_b8
; SI: ds_write_b8
; SI: s_endpgm
-define void @local_unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r) #0 {
+define amdgpu_kernel void @local_unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r) #0 {
%v = load i32, i32 addrspace(3)* %p, align 1
store i32 %v, i32 addrspace(3)* %r, align 1
ret void
@@ -60,7 +60,7 @@ define void @local_unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(
; UNALIGNED: buffer_load_dword
; UNALIGNED: buffer_store_dword
-define void @global_unaligned_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @global_unaligned_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 {
%v = load i32, i32 addrspace(1)* %p, align 1
store i32 %v, i32 addrspace(1)* %r, align 1
ret void
@@ -74,7 +74,7 @@ define void @global_unaligned_load_store_i32(i32 addrspace(1)* %p, i32 addrspace
; UNALIGNED: buffer_load_dword
; UNALIGNED: buffer_store_dword
-define void @global_align2_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @global_align2_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 {
%v = load i32, i32 addrspace(1)* %p, align 2
store i32 %v, i32 addrspace(1)* %r, align 2
ret void
@@ -85,7 +85,7 @@ define void @global_align2_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)
; GCN: ds_read_u16
; GCN: ds_write_b16
; GCN: ds_write_b16
-define void @local_align2_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r) #0 {
+define amdgpu_kernel void @local_align2_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r) #0 {
%v = load i32, i32 addrspace(3)* %p, align 2
store i32 %v, i32 addrspace(3)* %r, align 2
ret void
@@ -132,7 +132,7 @@ define void @local_align2_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)*
; SI-NOT: v_lshl
; SI: ds_write_b8
; SI: s_endpgm
-define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(3)* %r) #0 {
+define amdgpu_kernel void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(3)* %r) #0 {
%v = load i64, i64 addrspace(3)* %p, align 1
store i64 %v, i64 addrspace(3)* %r, align 1
ret void
@@ -179,7 +179,7 @@ define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(
; SI-NOT: v_lshl
; SI: ds_write_b8
; SI: s_endpgm
-define void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i32> addrspace(3)* %r) #0 {
+define amdgpu_kernel void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i32> addrspace(3)* %r) #0 {
%v = load <2 x i32>, <2 x i32> addrspace(3)* %p, align 1
store <2 x i32> %v, <2 x i32> addrspace(3)* %r, align 1
ret void
@@ -209,7 +209,7 @@ define void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i
; UNALIGNED: buffer_load_dwordx2
; UNALIGNED: buffer_store_dwordx2
-define void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(1)* %p, align 2
store i64 %v, i64 addrspace(1)* %r, align 2
ret void
@@ -239,7 +239,7 @@ define void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)
; UNALIGNED: buffer_load_dwordx2
; UNALIGNED: buffer_store_dwordx2
-define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(1)* %p, align 1
store i64 %v, i64 addrspace(1)* %r, align 1
ret void
@@ -286,7 +286,7 @@ define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace
; GCN: ds_write_b8
; GCN: ds_write_b8
; GCN: s_endpgm
-define void @local_unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i32> addrspace(3)* %r) #0 {
+define amdgpu_kernel void @local_unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i32> addrspace(3)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(3)* %p, align 1
store <4 x i32> %v, <4 x i32> addrspace(3)* %r, align 1
ret void
@@ -329,7 +329,7 @@ define void @local_unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i
; UNALIGNED: buffer_load_dwordx4
; UNALIGNED: buffer_store_dwordx4
-define void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) #0 {
+define amdgpu_kernel void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(1)* %p, align 1
store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1
ret void
@@ -337,7 +337,7 @@ define void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x
; FUNC-LABEL: {{^}}local_load_i64_align_4:
; GCN: ds_read2_b32
-define void @local_load_i64_align_4(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i64_align_4(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
%val = load i64, i64 addrspace(3)* %in, align 4
store i64 %val, i64 addrspace(1)* %out, align 8
ret void
@@ -345,7 +345,7 @@ define void @local_load_i64_align_4(i64 addrspace(1)* nocapture %out, i64 addrsp
; FUNC-LABEL: {{^}}local_load_i64_align_4_with_offset
; GCN: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]}} offset0:8 offset1:9
-define void @local_load_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
%ptr = getelementptr i64, i64 addrspace(3)* %in, i32 4
%val = load i64, i64 addrspace(3)* %ptr, align 4
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -356,7 +356,7 @@ define void @local_load_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out
; The tests for the case where the lo offset is 8-bits, but the hi offset is 9-bits
; GCN: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]}} offset1:1
; GCN: s_endpgm
-define void @local_load_i64_align_4_with_split_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i64_align_4_with_split_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
%ptr = bitcast i64 addrspace(3)* %in to i32 addrspace(3)*
%ptr255 = getelementptr i32, i32 addrspace(3)* %ptr, i32 255
%ptri64 = bitcast i32 addrspace(3)* %ptr255 to i64 addrspace(3)*
@@ -375,7 +375,7 @@ define void @local_load_i64_align_4_with_split_offset(i64 addrspace(1)* nocaptur
; GCN: ds_read_u8
; GCN: ds_read_u8
; GCN: store_dwordx2
-define void @local_load_i64_align_1(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_i64_align_1(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
%val = load i64, i64 addrspace(3)* %in, align 1
store i64 %val, i64 addrspace(1)* %out, align 8
ret void
@@ -383,7 +383,7 @@ define void @local_load_i64_align_1(i64 addrspace(1)* nocapture %out, i64 addrsp
; FUNC-LABEL: {{^}}local_store_i64_align_4:
; GCN: ds_write2_b32
-define void @local_store_i64_align_4(i64 addrspace(3)* %out, i64 %val) #0 {
+define amdgpu_kernel void @local_store_i64_align_4(i64 addrspace(3)* %out, i64 %val) #0 {
store i64 %val, i64 addrspace(3)* %out, align 4
ret void
}
@@ -391,7 +391,7 @@ define void @local_store_i64_align_4(i64 addrspace(3)* %out, i64 %val) #0 {
; FUNC-LABEL: {{^}}local_store_i64_align_4_with_offset
; GCN: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:8 offset1:9
; GCN: s_endpgm
-define void @local_store_i64_align_4_with_offset(i64 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @local_store_i64_align_4_with_offset(i64 addrspace(3)* %out) #0 {
%ptr = getelementptr i64, i64 addrspace(3)* %out, i32 4
store i64 0, i64 addrspace(3)* %ptr, align 4
ret void
@@ -401,7 +401,7 @@ define void @local_store_i64_align_4_with_offset(i64 addrspace(3)* %out) #0 {
; The tests for the case where the lo offset is 8-bits, but the hi offset is 9-bits
; GCN: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset1:1
; GCN: s_endpgm
-define void @local_store_i64_align_4_with_split_offset(i64 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @local_store_i64_align_4_with_split_offset(i64 addrspace(3)* %out) #0 {
%ptr = bitcast i64 addrspace(3)* %out to i32 addrspace(3)*
%ptr255 = getelementptr i32, i32 addrspace(3)* %ptr, i32 255
%ptri64 = bitcast i32 addrspace(3)* %ptr255 to i64 addrspace(3)*
@@ -418,7 +418,7 @@ define void @local_store_i64_align_4_with_split_offset(i64 addrspace(3)* %out) #
; UNALIGNED: s_load_dword
; SI: buffer_store_dword
-define void @constant_unaligned_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_unaligned_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
%v = load i32, i32 addrspace(2)* %p, align 1
store i32 %v, i32 addrspace(1)* %r, align 4
ret void
@@ -430,7 +430,7 @@ define void @constant_unaligned_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)*
; UNALIGNED: s_load_dword
; UNALIGNED: buffer_store_dword
-define void @constant_align2_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align2_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
%v = load i32, i32 addrspace(2)* %p, align 2
store i32 %v, i32 addrspace(1)* %r, align 4
ret void
@@ -444,7 +444,7 @@ define void @constant_align2_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r
; UNALIGNED: s_load_dwordx2
; UNALIGNED: buffer_store_dwordx2
-define void @constant_align2_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align2_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(2)* %p, align 2
store i64 %v, i64 addrspace(1)* %r, align 4
ret void
@@ -453,7 +453,7 @@ define void @constant_align2_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r
; SI-LABEL: {{^}}constant_align4_load_i64:
; SI: s_load_dwordx2
; SI: buffer_store_dwordx2
-define void @constant_align4_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align4_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(2)* %p, align 4
store i64 %v, i64 addrspace(1)* %r, align 4
ret void
@@ -462,7 +462,7 @@ define void @constant_align4_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r
; SI-LABEL: {{^}}constant_align4_load_v4i32:
; SI: s_load_dwordx4
; SI: buffer_store_dwordx4
-define void @constant_align4_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align4_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 4
store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4
ret void
@@ -482,7 +482,7 @@ define void @constant_align4_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> ad
; UNALIGNED: buffer_load_dwordx2
; SI: buffer_store_dwordx2
-define void @constant_unaligned_load_v2i32(<2 x i32> addrspace(2)* %p, <2 x i32> addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_unaligned_load_v2i32(<2 x i32> addrspace(2)* %p, <2 x i32> addrspace(1)* %r) #0 {
%v = load <2 x i32>, <2 x i32> addrspace(2)* %p, align 1
store <2 x i32> %v, <2 x i32> addrspace(1)* %r, align 4
ret void
@@ -512,7 +512,7 @@ define void @constant_unaligned_load_v2i32(<2 x i32> addrspace(2)* %p, <2 x i32>
; UNALIGNED: buffer_load_dwordx4
; SI: buffer_store_dwordx4
-define void @constant_unaligned_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_unaligned_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 1
store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4
ret void
@@ -521,7 +521,7 @@ define void @constant_unaligned_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32>
; SI-LABEL: {{^}}constant_align4_load_i8:
; SI: buffer_load_ubyte
; SI: buffer_store_byte
-define void @constant_align4_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align4_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 {
%v = load i8, i8 addrspace(2)* %p, align 4
store i8 %v, i8 addrspace(1)* %r, align 4
ret void
@@ -530,7 +530,7 @@ define void @constant_align4_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #
; SI-LABEL: {{^}}constant_align2_load_i8:
; SI: buffer_load_ubyte
; SI: buffer_store_byte
-define void @constant_align2_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align2_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 {
%v = load i8, i8 addrspace(2)* %p, align 2
store i8 %v, i8 addrspace(1)* %r, align 2
ret void
@@ -541,7 +541,7 @@ define void @constant_align2_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #
; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[LO]]
; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[HI]]
; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @constant_align4_merge_load_2_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
+define amdgpu_kernel void @constant_align4_merge_load_2_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
%gep0 = getelementptr i32, i32 addrspace(2)* %p, i64 1
%v0 = load i32, i32 addrspace(2)* %p, align 4
%v1 = load i32, i32 addrspace(2)* %gep0, align 4
@@ -571,7 +571,7 @@ define void @constant_align4_merge_load_2_i32(i32 addrspace(2)* %p, i32 addrspac
; SI: ds_read_u8
; SI: ScratchSize: 0{{$}}
-define void @local_load_align1_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(3)* %in) #0 {
+define amdgpu_kernel void @local_load_align1_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> addrspace(3)* %in) #0 {
%ld = load <16 x i8>, <16 x i8> addrspace(3)* %in, align 1
store <16 x i8> %ld, <16 x i8> addrspace(1)* %out
ret void
@@ -596,7 +596,7 @@ define void @local_load_align1_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> add
; SI: ds_write_b8
; SI: ScratchSize: 0{{$}}
-define void @local_store_align1_v16i8(<16 x i8> addrspace(3)* %out) #0 {
+define amdgpu_kernel void @local_store_align1_v16i8(<16 x i8> addrspace(3)* %out) #0 {
store <16 x i8> zeroinitializer, <16 x i8> addrspace(3)* %out, align 1
ret void
}
diff --git a/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll b/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
index 4902e9a3cafb..3e80fcf85b52 100644
--- a/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
+++ b/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
@@ -4,7 +4,7 @@
; CHECK-LABEL: {{^}}func:
-define void @func() #0 {
+define amdgpu_kernel void @func() #0 {
B0:
br i1 undef, label %B1, label %B2
@@ -35,7 +35,8 @@ bb:
%tmp1 = load volatile i32, i32 addrspace(1)* undef, align 4
%tmp2 = insertelement <4 x i32> undef, i32 %tmp1, i32 0
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
- %tmp4 = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tmp3, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp3.cast = bitcast <4 x i32> %tmp3 to <4 x float>
+ %tmp4 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tmp3.cast, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp5 = extractelement <4 x float> %tmp4, i32 0
%tmp6 = fmul float %tmp5, undef
%tmp7 = fadd float %tmp6, %tmp6
@@ -71,7 +72,7 @@ bb11: ; preds = %bb9
; CHECK: v_mov_b32_e32 v[[OUTPUT_LO]], v6
; CHECK: buffer_store_dwordx4 v{{\[}}[[OUTPUT_LO]]:[[OUTPUT_HI]]{{\]}}
-define void @partially_undef_copy() #0 {
+define amdgpu_kernel void @partially_undef_copy() #0 {
%tmp0 = call i32 asm sideeffect "v_mov_b32_e32 v5, 5", "={VGPR5}"()
%tmp1 = call i32 asm sideeffect "v_mov_b32_e32 v6, 6", "={VGPR6}"()
@@ -83,8 +84,7 @@ define void @partially_undef_copy() #0 {
ret void
}
-declare <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-declare float @llvm.SI.image.sample.i32(i32, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #1
attributes #0 = { nounwind }
-attributes #1 = { nounwind readnone }
+attributes #1 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll b/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
index d96ee6d21ce8..60ab7631a101 100644
--- a/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
+++ b/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
@@ -5,7 +5,7 @@
; SI hits an assertion at -O0, evergreen hits a not implemented unreachable.
; COMMON-LABEL: {{^}}branch_true:
-define void @branch_true(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+define amdgpu_kernel void @branch_true(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
entry:
br i1 true, label %for.end, label %for.body.lr.ph
@@ -42,7 +42,7 @@ for.end: ; preds = %for.body, %entry
; SI: s_cbranch_vccnz
; SI: s_cbranch_scc1
; SI: s_endpgm
-define void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+define amdgpu_kernel void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
entry:
br i1 false, label %for.end, label %for.body.lr.ph
@@ -79,7 +79,7 @@ for.end: ; preds = %for.body, %entry
; SI: s_cbranch_scc1
; SI: s_cbranch_scc1
; SI: s_endpgm
-define void @branch_undef(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+define amdgpu_kernel void @branch_undef(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
entry:
br i1 undef, label %for.end, label %for.body.lr.ph
diff --git a/test/CodeGen/AMDGPU/uniform-branch-intrinsic-cond.ll b/test/CodeGen/AMDGPU/uniform-branch-intrinsic-cond.ll
index 93a2c6998be4..eb6007f21c10 100644
--- a/test/CodeGen/AMDGPU/uniform-branch-intrinsic-cond.ll
+++ b/test/CodeGen/AMDGPU/uniform-branch-intrinsic-cond.ll
@@ -14,6 +14,7 @@ main_body:
if:
%u = fadd float %v, %v
+ call void asm sideeffect "", ""() #0 ; Prevent ifconversion
br label %else
else:
diff --git a/test/CodeGen/AMDGPU/uniform-cfg.ll b/test/CodeGen/AMDGPU/uniform-cfg.ll
index 154ac361e797..a9d45d71fa2e 100644
--- a/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=verde -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}uniform_if_scc:
; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0
@@ -12,7 +12,7 @@
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_scc(i32 %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_scc(i32 %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %if, label %else
@@ -40,7 +40,7 @@ done:
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_vcc(float %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_vcc(float %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = fcmp oeq float %cond, 0.0
br i1 %cmp0, label %if, label %else
@@ -68,7 +68,7 @@ done:
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_swap_br_targets_scc(i32 %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_swap_br_targets_scc(i32 %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %else, label %if
@@ -96,7 +96,7 @@ done:
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_swap_br_targets_vcc(float %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_swap_br_targets_vcc(float %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = fcmp oeq float %cond, 0.0
br i1 %cmp0, label %else, label %if
@@ -123,7 +123,7 @@ done:
; GCN: buffer_store_dword
; GCN: [[ENDIF_LABEL]]:
; GCN: s_endpgm
-define void @uniform_if_move_valu(i32 addrspace(1)* %out, float %a) {
+define amdgpu_kernel void @uniform_if_move_valu(i32 addrspace(1)* %out, float %a) {
entry:
%a.0 = fadd float %a, 10.0
%cond = bitcast float %a.0 to i32
@@ -148,7 +148,7 @@ endif:
; GCN: buffer_store_dword
; GCN: [[ENDIF_LABEL]]:
; GCN: s_endpgm
-define void @uniform_if_move_valu_commute(i32 addrspace(1)* %out, float %a) {
+define amdgpu_kernel void @uniform_if_move_valu_commute(i32 addrspace(1)* %out, float %a) {
entry:
%a.0 = fadd float %a, 10.0
%cond = bitcast float %a.0 to i32
@@ -166,7 +166,7 @@ endif:
; GCN-LABEL: {{^}}uniform_if_else_ret:
; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN-NEXT: s_cbranch_scc0 [[IF_LABEL:[0-9_A-Za-z]+]]
+; GCN: s_cbranch_scc0 [[IF_LABEL:[0-9_A-Za-z]+]]
; GCN: v_mov_b32_e32 [[TWO:v[0-9]+]], 2
; GCN: buffer_store_dword [[TWO]]
@@ -176,7 +176,7 @@ endif:
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
; GCN: buffer_store_dword [[ONE]]
; GCN: s_endpgm
-define void @uniform_if_else_ret(i32 addrspace(1)* nocapture %out, i32 %a) {
+define amdgpu_kernel void @uniform_if_else_ret(i32 addrspace(1)* nocapture %out, i32 %a) {
entry:
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %if.then, label %if.else
@@ -209,7 +209,7 @@ if.end: ; preds = %if.else, %if.then
; GCN: v_mov_b32_e32 [[THREE:v[0-9]+]], 3
; GCN: buffer_store_dword [[THREE]]
; GCN: s_endpgm
-define void @uniform_if_else(i32 addrspace(1)* nocapture %out0, i32 addrspace(1)* nocapture %out1, i32 %a) {
+define amdgpu_kernel void @uniform_if_else(i32 addrspace(1)* nocapture %out0, i32 addrspace(1)* nocapture %out1, i32 %a) {
entry:
%cmp = icmp eq i32 %a, 0
br i1 %cmp, label %if.then, label %if.else
@@ -233,7 +233,7 @@ if.end: ; preds = %if.else, %if.then
; GCN: buffer_store_dword
; GCN: [[LABEL]]:
; GCN: s_endpgm
-define void @icmp_2_users(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @icmp_2_users(i32 addrspace(1)* %out, i32 %cond) {
main_body:
%0 = icmp sgt i32 %cond, 0
%1 = sext i1 %0 to i32
@@ -252,11 +252,13 @@ ENDIF: ; preds = %IF, %main_body
; GCN: s_cmp_lt_i32 [[COND]], 1
; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]]
; GCN: v_cmp_gt_i32_e64 vcc, [[COND]], 0{{$}}
-; GCN: s_cbranch_vccnz [[EXIT]]
-; GCN: buffer_store
+; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]]
; GCN: {{^}}[[EXIT]]:
; GCN: s_endpgm
-define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) {
+; GCN: {{^}}[[BODY]]:
+; GCN: buffer_store
+; GCN: s_endpgm
+define amdgpu_kernel void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) {
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%cmp0 = icmp sgt i32 %cond0, 0
@@ -282,7 +284,7 @@ bb9: ; preds = %bb8, %bb4
; SI: s_cmp_lg_u32 [[I]], 0
; SI: s_cbranch_scc1 [[LOOP_LABEL]]
; SI: s_endpgm
-define void @uniform_loop(i32 addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @uniform_loop(i32 addrspace(1)* %out, i32 %a) {
entry:
br label %loop
@@ -302,12 +304,13 @@ done:
; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
; GCN: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
; GCN: s_xor_b64 [[MASK1:s\[[0-9]+:[0-9]+\]]], exec, [[MASK]]
-; GCN: s_cbranch_execz [[ENDIF_LABEL:[0-9_A-Za-z]+]]
; GCN: s_cmp_lg_u32 {{s[0-9]+}}, 0
-; GCN: s_cbranch_scc1 [[ENDIF_LABEL]]
+; GCN: s_cbranch_scc0 [[IF_UNIFORM_LABEL:[A-Z0-9_a-z]+]]
+; GCN: s_endpgm
+; GCN: {{^}}[[IF_UNIFORM_LABEL]]:
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
; GCN: buffer_store_dword [[ONE]]
-define void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%d_cmp = icmp ult i32 %tid, 16
@@ -328,15 +331,14 @@ endif:
; GCN-LABEL: {{^}}divergent_inside_uniform:
; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cbranch_scc1 [[ENDIF_LABEL:[0-9_A-Za-z]+]]
+; GCN: s_cbranch_scc0 [[IF_LABEL:[0-9_A-Za-z]+]]
+; GCN: [[IF_LABEL]]:
; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
; GCN: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
; GCN: s_xor_b64 [[MASK1:s\[[0-9]+:[0-9]+\]]], exec, [[MASK]]
; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
; GCN: buffer_store_dword [[ONE]]
-; GCN: [[ENDIF_LABEL]]:
-; GCN: s_endpgm
-define void @divergent_inside_uniform(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @divergent_inside_uniform(i32 addrspace(1)* %out, i32 %cond) {
entry:
%u_cmp = icmp eq i32 %cond, 0
br i1 %u_cmp, label %if, label %endif
@@ -363,12 +365,12 @@ endif:
; GCN: buffer_store_dword [[ONE]]
; GCN: s_or_b64 exec, exec, [[MASK]]
; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cbranch_scc1 [[EXIT:[A-Z0-9_]+]]
+; GCN: s_cbranch_scc0 [[IF_UNIFORM:[A-Z0-9_]+]]
+; GCN: s_endpgm
+; GCN: [[IF_UNIFORM]]:
; GCN: v_mov_b32_e32 [[TWO:v[0-9]+]], 2
; GCN: buffer_store_dword [[TWO]]
-; GCN: [[EXIT]]:
-; GCN: s_endpgm
-define void @divergent_if_uniform_if(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @divergent_if_uniform_if(i32 addrspace(1)* %out, i32 %cond) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() #0
%d_cmp = icmp eq i32 %tid, 0
@@ -408,7 +410,7 @@ exit:
; GCN: BB[[FNNUM]]_3:
; GCN: s_endpgm
-define void @cse_uniform_condition_different_blocks(i32 %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @cse_uniform_condition_different_blocks(i32 %cond, i32 addrspace(1)* %out) {
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tmp1 = icmp sgt i32 %cond, 0
@@ -443,7 +445,7 @@ bb9: ; preds = %bb8, %bb4
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_scc_i64_eq(i64 %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_scc_i64_eq(i64 %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = icmp eq i64 %cond, 0
br i1 %cmp0, label %if, label %else
@@ -475,7 +477,7 @@ done:
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL:v[0-9]+]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_scc_i64_ne(i64 %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_scc_i64_ne(i64 %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = icmp ne i64 %cond, 0
br i1 %cmp0, label %if, label %else
@@ -503,7 +505,7 @@ done:
; GCN: [[IF_LABEL]]:
; GCN: v_mov_b32_e32 [[V_VAL]], [[S_VAL]]
; GCN: buffer_store_dword [[V_VAL]]
-define void @uniform_if_scc_i64_sgt(i64 %cond, i32 addrspace(1)* %out) {
+define amdgpu_kernel void @uniform_if_scc_i64_sgt(i64 %cond, i32 addrspace(1)* %out) {
entry:
%cmp0 = icmp sgt i64 %cond, 0
br i1 %cmp0, label %if, label %else
@@ -522,7 +524,7 @@ done:
; GCN-LABEL: {{^}}move_to_valu_i64_eq:
; GCN: v_cmp_eq_u64_e32
-define void @move_to_valu_i64_eq(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @move_to_valu_i64_eq(i32 addrspace(1)* %out) {
%cond = load volatile i64, i64 addrspace(3)* undef
%cmp0 = icmp eq i64 %cond, 0
br i1 %cmp0, label %if, label %else
@@ -541,7 +543,7 @@ done:
; GCN-LABEL: {{^}}move_to_valu_i64_ne:
; GCN: v_cmp_ne_u64_e32
-define void @move_to_valu_i64_ne(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @move_to_valu_i64_ne(i32 addrspace(1)* %out) {
%cond = load volatile i64, i64 addrspace(3)* undef
%cmp0 = icmp ne i64 %cond, 0
br i1 %cmp0, label %if, label %else
diff --git a/test/CodeGen/AMDGPU/uniform-crash.ll b/test/CodeGen/AMDGPU/uniform-crash.ll
index cfbb2af58677..028199ef9de7 100644
--- a/test/CodeGen/AMDGPU/uniform-crash.ll
+++ b/test/CodeGen/AMDGPU/uniform-crash.ll
@@ -6,7 +6,7 @@
; GCN: s_cbranch_scc1 [[LABEL:BB[0-9_A-Z]+]]
; GCN: [[LABEL]]:
; GCN-NEXT: s_endpgm
-define void @icmp_2_users(i32 addrspace(1)* %out, i32 %cond) {
+define amdgpu_kernel void @icmp_2_users(i32 addrspace(1)* %out, i32 %cond) {
main_body:
%0 = icmp sgt i32 %cond, 0
%1 = sext i1 %0 to i32
@@ -25,7 +25,7 @@ ENDIF: ; preds = %IF, %main_body
; GCN: {{^}}[[LOOP:[A-Z0-9_]+]]:
; GCN: s_cbranch_scc1 [[LOOP]]
; GCN: {{^}}[[BB0]]:
-define void @fix_sgpr_live_ranges_crash(i32 %arg, i32 %arg1) {
+define amdgpu_kernel void @fix_sgpr_live_ranges_crash(i32 %arg, i32 %arg1) {
bb:
%cnd = trunc i32 %arg to i1
br i1 %cnd, label %bb2, label %bb5
diff --git a/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll b/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
index 2c3a09818860..8a08f9d8bb0d 100644
--- a/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
+++ b/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
@@ -7,11 +7,11 @@
; CHECK: s_and_saveexec_b64
; CHECK-NEXT: s_xor_b64
; CHECK-NEXT: ; mask branch
-
+; CHECK-NEXT: s_cbranch_execz BB{{[0-9]+_[0-9]+}}
; CHECK-NEXT: BB{{[0-9]+_[0-9]+}}: ; %loop_body.preheader
; CHECK: [[LOOP_BODY_LABEL:BB[0-9]+_[0-9]+]]:
-; CHECK: s_cbranch_scc0 [[LOOP_BODY_LABEL]]
+; CHECK: s_cbranch_vccz [[LOOP_BODY_LABEL]]
; CHECK: s_endpgm
define amdgpu_ps void @test1(<8 x i32> inreg %rsrc, <2 x i32> %addr.base, i32 %y, i32 %p) {
@@ -38,7 +38,7 @@ out:
; CHECK-NEXT: s_xor_b64
; CHECK-NEXT: ; mask branch
; CHECK-NEXT: s_cbranch_execz
-define void @test2(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @test2(i32 addrspace(1)* %out, i32 %a, i32 %b) {
main_body:
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%cc = icmp eq i32 %tid, 0
diff --git a/test/CodeGen/AMDGPU/unify-metadata.ll b/test/CodeGen/AMDGPU/unify-metadata.ll
index 9549b08ffee1..d96583e71f13 100644
--- a/test/CodeGen/AMDGPU/unify-metadata.ll
+++ b/test/CodeGen/AMDGPU/unify-metadata.ll
@@ -14,10 +14,6 @@
; ALL-DAG: ![[USED_EXT_1]] = !{!"cl_khr_fp16"}
; ALL-DAG: ![[USED_EXT_2]] = !{!"cl_doubles"}
-define void @test() {
- ret void
-}
-
!opencl.ocl.version = !{!1, !0, !0, !0}
!llvm.ident = !{!2, !2, !2, !2, !6}
!opencl.used.extensions = !{!3, !3, !4, !5}
diff --git a/test/CodeGen/AMDGPU/unigine-liveness-crash.ll b/test/CodeGen/AMDGPU/unigine-liveness-crash.ll
index 732790ceb335..853131baed5e 100644
--- a/test/CodeGen/AMDGPU/unigine-liveness-crash.ll
+++ b/test/CodeGen/AMDGPU/unigine-liveness-crash.ll
@@ -1,5 +1,4 @@
-; RUN: llc -march=amdgcn < %s | FileCheck %s
-; REQUIRES: asserts
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
;
; This test used to crash with the following assertion:
; llc: include/llvm/ADT/IntervalMap.h:632: unsigned int llvm::IntervalMapImpl::LeafNode<llvm::SlotIndex, llvm::LiveInterval *, 8, llvm::IntervalMapInfo<llvm::SlotIndex> >::insertFrom(unsigned int &, unsigned int, KeyT, KeyT, ValT) [KeyT = llvm::SlotIndex, ValT = llvm::LiveInterval *, N = 8, Traits = llvm::IntervalMapInfo<llvm::SlotIndex>]: Assertion `(i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert"' failed.
@@ -10,31 +9,33 @@
;
; Check for a valid output.
; CHECK: image_sample_c
-
-target triple = "amdgcn--"
-
-@ddxy_lds = external addrspace(3) global [64 x i32]
-
define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @main([17 x <16 x i8>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg, [16 x <16 x i8>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg1, [32 x <8 x i32>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg2, [16 x <8 x i32>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg3, [16 x <4 x i32>] addrspace(2)* byval dereferenceable(18446744073709551615) %arg4, float inreg %arg5, i32 inreg %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <3 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, <2 x i32> %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, i32 %arg20, float %arg21, i32 %arg22) #0 {
main_body:
- %tmp = call float @llvm.SI.fs.interp(i32 3, i32 4, i32 %arg6, <2 x i32> %arg8)
- %tmp23 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %i.i = extractelement <2 x i32> %arg8, i32 0
+ %j.i = extractelement <2 x i32> %arg8, i32 1
+ %i.f.i = bitcast i32 %i.i to float
+ %j.f.i = bitcast i32 %j.i to float
+ %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 3, i32 4, i32 %arg6) #2
+ %p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 3, i32 4, i32 %arg6) #2
+ %tmp23 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+
%tmp24 = extractelement <4 x float> %tmp23, i32 3
%tmp25 = fmul float %tmp24, undef
- %tmp26 = fmul float undef, %tmp
+ %tmp26 = fmul float undef, %p2.i
%tmp27 = fadd float %tmp26, undef
%tmp28 = bitcast float %tmp27 to i32
%tmp29 = insertelement <4 x i32> undef, i32 %tmp28, i32 0
%tmp30 = insertelement <4 x i32> %tmp29, i32 0, i32 1
%tmp31 = insertelement <4 x i32> %tmp30, i32 undef, i32 2
- %tmp32 = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> %tmp31, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp31.cast = bitcast <4 x i32> %tmp31 to <4 x float>
+ %tmp32 = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> %tmp31.cast, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp33 = extractelement <4 x float> %tmp32, i32 0
%tmp34 = fadd float undef, %tmp33
%tmp35 = fadd float %tmp34, undef
%tmp36 = fadd float %tmp35, undef
%tmp37 = fadd float %tmp36, undef
%tmp38 = fadd float %tmp37, undef
- %tmp39 = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp39 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp40 = extractelement <4 x float> %tmp39, i32 0
%tmp41 = extractelement <4 x float> %tmp39, i32 1
%tmp42 = extractelement <4 x float> %tmp39, i32 2
@@ -51,7 +52,8 @@ main_body:
%tmp53 = insertelement <4 x i32> undef, i32 %tmp50, i32 0
%tmp54 = insertelement <4 x i32> %tmp53, i32 %tmp51, i32 1
%tmp55 = insertelement <4 x i32> %tmp54, i32 %tmp52, i32 2
- %tmp56 = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> %tmp55, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp55.cast = bitcast <4 x i32> %tmp55 to <4 x float>
+ %tmp56 = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> %tmp55.cast, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp57 = extractelement <4 x float> %tmp56, i32 0
%tmp58 = fadd float %tmp38, %tmp57
%tmp59 = fadd float undef, %tmp46
@@ -60,7 +62,8 @@ main_body:
%tmp62 = bitcast float %tmp60 to i32
%tmp63 = insertelement <4 x i32> undef, i32 %tmp61, i32 1
%tmp64 = insertelement <4 x i32> %tmp63, i32 %tmp62, i32 2
- %tmp65 = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> %tmp64, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp64.cast = bitcast <4 x i32> %tmp64 to <4 x float>
+ %tmp65 = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> %tmp64.cast, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp66 = extractelement <4 x float> %tmp65, i32 0
%tmp67 = fadd float %tmp58, %tmp66
%tmp68 = fmul float %tmp67, 1.250000e-01
@@ -76,8 +79,9 @@ IF26: ; preds = %main_body
ENDIF25: ; preds = %IF29, %main_body
%.4 = phi float [ %tmp84, %IF29 ], [ %tmp68, %main_body ]
%tmp73 = fadd float %.4, undef
- %tmp74 = call float @llvm.AMDGPU.clamp.(float %tmp73, float 0.000000e+00, float 1.000000e+00)
- %tmp75 = fmul float undef, %tmp74
+ %max.0.i = call float @llvm.maxnum.f32(float %tmp73, float 0.000000e+00)
+ %clamp.i = call float @llvm.minnum.f32(float %max.0.i, float 1.000000e+00)
+ %tmp75 = fmul float undef, %clamp.i
%tmp76 = fmul float %tmp75, undef
%tmp77 = fadd float %tmp76, undef
%tmp78 = insertvalue <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef, float %tmp77, 11
@@ -99,17 +103,22 @@ IF29: ; preds = %LOOP
ENDIF28: ; preds = %LOOP
%tmp85 = insertelement <4 x i32> %tmp72, i32 undef, i32 1
%tmp86 = insertelement <4 x i32> %tmp85, i32 undef, i32 2
- %tmp87 = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> %tmp86, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tmp86.cast = bitcast <4 x i32> %tmp86 to <4 x float>
+ %tmp87 = call <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float> %tmp86.cast, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp88 = extractelement <4 x float> %tmp87, i32 0
%tmp89 = fadd float undef, %tmp88
br label %LOOP
}
-declare float @llvm.AMDGPU.clamp.(float, float, float) #1
-declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-declare <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
-declare <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.sample.c.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-attributes #0 = { "InitialPSInputAddr"="36983" "target-cpu"="tonga" }
+attributes #0 = { nounwind "InitialPSInputAddr"="36983" "target-cpu"="tonga" }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
+attributes #3 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/unknown-processor.ll b/test/CodeGen/AMDGPU/unknown-processor.ll
index 941f4c601e34..25a700a943d2 100644
--- a/test/CodeGen/AMDGPU/unknown-processor.ll
+++ b/test/CodeGen/AMDGPU/unknown-processor.ll
@@ -13,7 +13,7 @@
; GCN: ScratchSize: 8{{$}}
; R600: MOV
-define void @foo() {
+define amdgpu_kernel void @foo() {
%alloca = alloca i32, align 4
store volatile i32 0, i32* %alloca
ret void
diff --git a/test/CodeGen/AMDGPU/unroll.ll b/test/CodeGen/AMDGPU/unroll.ll
index 411a15a4b839..2ce4de90a02d 100644
--- a/test/CodeGen/AMDGPU/unroll.ll
+++ b/test/CodeGen/AMDGPU/unroll.ll
@@ -6,10 +6,10 @@
; private memory. We want to make sure these kinds of loops are always
; unrolled, because private memory is slow.
-; CHECK-LABEL: @test
+; CHECK-LABEL: @private_memory
; CHECK-NOT: alloca
; CHECK: store i32 5, i32 addrspace(1)* %out
-define void @test(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @private_memory(i32 addrspace(1)* %out) {
entry:
%0 = alloca [32 x i32]
br label %loop.header
@@ -34,3 +34,67 @@ exit:
store i32 %3, i32 addrspace(1)* %out
ret void
}
+
+; Check that loop is unrolled for local memory references
+
+; CHECK-LABEL: @local_memory
+; CHECK: getelementptr i32, i32 addrspace(1)* %out, i32 128
+; CHECK-NEXT: store
+; CHECK-NEXT: ret
+define amdgpu_kernel void @local_memory(i32 addrspace(1)* %out, i32 addrspace(3)* %lds) {
+entry:
+ br label %loop.header
+
+loop.header:
+ %counter = phi i32 [0, %entry], [%inc, %loop.inc]
+ br label %loop.body
+
+loop.body:
+ %ptr_lds = getelementptr i32, i32 addrspace(3)* %lds, i32 %counter
+ %val = load i32, i32 addrspace(3)* %ptr_lds
+ %ptr_out = getelementptr i32, i32 addrspace(1)* %out, i32 %counter
+ store i32 %val, i32 addrspace(1)* %ptr_out
+ br label %loop.inc
+
+loop.inc:
+ %inc = add i32 %counter, 1
+ %cond = icmp sge i32 %counter, 128
+ br i1 %cond, label %exit, label %loop.header
+
+exit:
+ ret void
+}
+
+; Check that a loop with if inside completely unrolled to eliminate phi and if
+
+; CHECK-LABEL: @unroll_for_if
+; CHECK: entry:
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: store
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: store
+; CHECK-NOT: br
+define amdgpu_kernel void @unroll_for_if(i32* %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.inc
+ %i1 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %and = and i32 %i1, 1
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %for.inc, label %if.then
+
+if.then: ; preds = %for.body
+ %0 = sext i32 %i1 to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %0
+ store i32 0, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %inc = add nuw nsw i32 %i1, 1
+ %cmp = icmp ult i32 %inc, 48
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.cond
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/unsupported-cc.ll b/test/CodeGen/AMDGPU/unsupported-cc.ll
index d120111a71fb..68e91e8c9c6b 100644
--- a/test/CodeGen/AMDGPU/unsupported-cc.ll
+++ b/test/CodeGen/AMDGPU/unsupported-cc.ll
@@ -6,7 +6,7 @@
; CHECK: LSHR
; CHECK-NEXT: SETGT_INT {{\** *}}T{{[0-9]+\.[XYZW]}}, {{literal\.[xy]}}, KC0[2].Z
; CHECK-NEXT: 5(7.006492e-45)
-define void @slt(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @slt(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp slt i32 %in, 5
%1 = select i1 %0, i32 -1, i32 0
@@ -18,7 +18,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_UINT {{\** *}}T{{[0-9]+\.[XYZW]}}, {{literal\.[xy]}}, KC0[2].Z
; CHECK-NEXT: 5(7.006492e-45)
-define void @ult_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @ult_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp ult i32 %in, 5
%1 = select i1 %0, i32 -1, i32 0
@@ -31,7 +31,7 @@ entry:
; CHECK-NEXT: 1084227584(5.000000e+00)
; CHECK-NEXT: SETE T{{[0-9]\.[XYZW]}}, PV.[[CHAN]], 0.0
; CHECK-NEXT: LSHR *
-define void @ult_float(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @ult_float(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ult float %in, 5.0
%1 = select i1 %0, float 1.0, float 0.0
@@ -43,7 +43,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGE {{\*? *}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, {{literal\.[xy]}}
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @ult_float_native(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @ult_float_native(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ult float %in, 5.0
%1 = select i1 %0, float 0.0, float 1.0
@@ -55,7 +55,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT {{\*? *}}T{{[0-9]+\.[XYZW]}}, {{literal\.[xy]}}, KC0[2].Z
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @olt(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @olt(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 5.0
%1 = select i1 %0, float 1.0, float 0.0
@@ -67,7 +67,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_INT {{\** *}}T{{[0-9]+\.[XYZW]}}, {{literal\.[xy]}}, KC0[2].Z
; CHECK-NEXT: 6(8.407791e-45)
-define void @sle(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @sle(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sle i32 %in, 5
%1 = select i1 %0, i32 -1, i32 0
@@ -79,7 +79,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT_UINT {{\** *}}T{{[0-9]+\.[XYZW]}}, {{literal\.[xy]}}, KC0[2].Z
; CHECK-NEXT: 6(8.407791e-45)
-define void @ule_i32(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @ule_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp ule i32 %in, 5
%1 = select i1 %0, i32 -1, i32 0
@@ -92,7 +92,7 @@ entry:
; CHECK-NEXT: 1084227584(5.000000e+00)
; CHECK-NEXT: SETE T{{[0-9]\.[XYZW]}}, PV.[[CHAN]], 0.0
; CHECK-NEXT: LSHR *
-define void @ule_float(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @ule_float(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ule float %in, 5.0
%1 = select i1 %0, float 1.0, float 0.0
@@ -104,7 +104,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGT {{\*? *}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, {{literal\.[xy]}}
; CHECK-NEXT: 1084227584(5.000000e+00)
-define void @ule_float_native(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @ule_float_native(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ule float %in, 5.0
%1 = select i1 %0, float 0.0, float 1.0
@@ -116,7 +116,7 @@ entry:
; CHECK: LSHR
; CHECK-NEXT: SETGE {{\*? *}}T{{[0-9]\.[XYZW]}}, {{literal\.[xy]}}, KC0[2].Z
; CHECK-NEXT:1084227584(5.000000e+00)
-define void @ole(float addrspace(1)* %out, float %in) {
+define amdgpu_kernel void @ole(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ole float %in, 5.0
%1 = select i1 %0, float 1.0, float 0.0
diff --git a/test/CodeGen/AMDGPU/urecip.ll b/test/CodeGen/AMDGPU/urecip.ll
deleted file mode 100644
index d58d2dc2d963..000000000000
--- a/test/CodeGen/AMDGPU/urecip.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
-
-; CHECK: v_rcp_iflag_f32_e32
-
-define void @test(i32 %p, i32 %q) {
- %i = udiv i32 %p, %q
- %r = bitcast i32 %i to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
- ret void
-}
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/AMDGPU/urem.ll b/test/CodeGen/AMDGPU/urem.ll
index 9e2cfa34e0b9..fd7f8fa2efab 100644
--- a/test/CodeGen/AMDGPU/urem.ll
+++ b/test/CodeGen/AMDGPU/urem.ll
@@ -9,7 +9,7 @@
; FUNC-LABEL: {{^}}test_urem_i32:
; SI: s_endpgm
; EG: CF_END
-define void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@@ -26,7 +26,7 @@ define void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; SI: v_sub_i32
; SI: buffer_store_dword
; SI: s_endpgm
-define void @test_urem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%num = load i32, i32 addrspace(1) * %in
%result = urem i32 %num, 7
store i32 %result, i32 addrspace(1)* %out
@@ -36,7 +36,7 @@ define void @test_urem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}test_urem_v2i32:
; SI: s_endpgm
; EG: CF_END
-define void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
@@ -48,7 +48,7 @@ define void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1
; FUNC-LABEL: {{^}}test_urem_v4i32:
; SI: s_endpgm
; EG: CF_END
-define void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@@ -60,7 +60,7 @@ define void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1
; FUNC-LABEL: {{^}}test_urem_i64:
; SI: s_endpgm
; EG: CF_END
-define void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64, i64 addrspace(1)* %in
%b = load i64, i64 addrspace(1)* %b_ptr
@@ -72,7 +72,7 @@ define void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}test_urem_v2i64:
; SI: s_endpgm
; EG: CF_END
-define void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64>, <2 x i64> addrspace(1)* %in
%b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
@@ -84,7 +84,7 @@ define void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1
; FUNC-LABEL: {{^}}test_urem_v4i64:
; SI: s_endpgm
; EG: CF_END
-define void @test_urem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+define amdgpu_kernel void @test_urem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64>, <4 x i64> addrspace(1)* %in
%b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
diff --git a/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll b/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll
index 82bdc261b112..f8e6b7edfe35 100644
--- a/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll
+++ b/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll
@@ -11,7 +11,7 @@ declare float @llvm.amdgcn.div.fixup.f32(float, float, float) #1
; GCN: s_load_dword [[SGPR:s[0-9]+]],
; GCN: v_add_f32_e64 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_binop(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_binop(float addrspace(1)* %out, float %a) #0 {
%dbl = fadd float %a, %a
store float %dbl, float addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @test_sgpr_use_twice_binop(float addrspace(1)* %out, float %a) #0 {
; GCN: s_load_dword [[SGPR:s[0-9]+]],
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], [[SGPR]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_three_ternary_op(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_sgpr_use_three_ternary_op(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -35,7 +35,7 @@ define void @test_sgpr_use_three_ternary_op(float addrspace(1)* %out, float %a)
; GCN: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR0]], [[SGPR0]], [[VGPR1]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_a_a_b(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_a_a_b(float addrspace(1)* %out, float %a, float %b) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float %b) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -58,7 +58,7 @@ define void @test_sgpr_use_twice_ternary_op_a_a_b(float addrspace(1)* %out, floa
; GCN-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[VA1]], [[SA]], [[VB]]
; GCN: buffer_store_dword [[RESULT0]]
; GCN: buffer_store_dword [[RESULT1]]
-define void @test_use_s_v_s(float addrspace(1)* %out, float %a, float %b, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @test_use_s_v_s(float addrspace(1)* %out, float %a, float %b, float addrspace(1)* %in) #0 {
%va0 = load volatile float, float addrspace(1)* %in
%va1 = load volatile float, float addrspace(1)* %in
%fma0 = call float @llvm.fma.f32(float %a, float %va0, float %b) #1
@@ -76,7 +76,7 @@ define void @test_use_s_v_s(float addrspace(1)* %out, float %a, float %b, float
; GCN: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[VGPR1]], [[SGPR0]], [[SGPR0]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_a_b_a(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_a_b_a(float addrspace(1)* %out, float %a, float %b) #0 {
%fma = call float @llvm.fma.f32(float %a, float %b, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -90,7 +90,7 @@ define void @test_sgpr_use_twice_ternary_op_a_b_a(float addrspace(1)* %out, floa
; GCN: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR0]], [[VGPR1]], [[SGPR0]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_b_a_a(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_b_a_a(float addrspace(1)* %out, float %a, float %b) #0 {
%fma = call float @llvm.fma.f32(float %b, float %a, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -100,7 +100,7 @@ define void @test_sgpr_use_twice_ternary_op_b_a_a(float addrspace(1)* %out, floa
; GCN: s_load_dword [[SGPR:s[0-9]+]]
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], 2.0
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_a_a_imm(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_a_a_imm(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float 2.0) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -110,7 +110,7 @@ define void @test_sgpr_use_twice_ternary_op_a_a_imm(float addrspace(1)* %out, fl
; GCN: s_load_dword [[SGPR:s[0-9]+]]
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], 2.0, [[SGPR]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_a_imm_a(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_a_imm_a(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float 2.0, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -121,7 +121,7 @@ define void @test_sgpr_use_twice_ternary_op_a_imm_a(float addrspace(1)* %out, fl
; GCN: s_load_dword [[SGPR:s[0-9]+]]
; GCN: v_div_fixup_f32 [[RESULT:v[0-9]+]], 2.0, [[SGPR]], [[SGPR]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_imm_a_a(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_imm_a_a(float addrspace(1)* %out, float %a) #0 {
%val = call float @llvm.amdgcn.div.fixup.f32(float 2.0, float %a, float %a) #1
store float %val, float addrspace(1)* %out, align 4
ret void
@@ -132,7 +132,7 @@ define void @test_sgpr_use_twice_ternary_op_imm_a_a(float addrspace(1)* %out, fl
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], [[VK]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_sgpr_use_twice_ternary_op_a_a_kimm(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_sgpr_use_twice_ternary_op_a_a_kimm(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float 1024.0) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
@@ -143,7 +143,7 @@ define void @test_sgpr_use_twice_ternary_op_a_a_kimm(float addrspace(1)* %out, f
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
; GCN: v_fma_f32 [[RESULT0:v[0-9]+]], [[VK]], [[VK]], [[SGPR]]
; GCN: buffer_store_dword [[RESULT0]]
-define void @test_literal_use_twice_ternary_op_k_k_s(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_literal_use_twice_ternary_op_k_k_s(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float 1024.0, float 1024.0, float %a) #1
store float %fma, float addrspace(1)* %out
ret void
@@ -158,7 +158,7 @@ define void @test_literal_use_twice_ternary_op_k_k_s(float addrspace(1)* %out, f
; GCN: buffer_store_dword [[RESULT0]]
; GCN: buffer_store_dword [[RESULT1]]
; GCN: s_endpgm
-define void @test_literal_use_twice_ternary_op_k_k_s_x2(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_literal_use_twice_ternary_op_k_k_s_x2(float addrspace(1)* %out, float %a, float %b) #0 {
%fma0 = call float @llvm.fma.f32(float 1024.0, float 1024.0, float %a) #1
%fma1 = call float @llvm.fma.f32(float 1024.0, float 1024.0, float %b) #1
store volatile float %fma0, float addrspace(1)* %out
@@ -171,7 +171,7 @@ define void @test_literal_use_twice_ternary_op_k_k_s_x2(float addrspace(1)* %out
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[VK]], [[VK]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_literal_use_twice_ternary_op_k_s_k(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_literal_use_twice_ternary_op_k_s_k(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float 1024.0, float %a, float 1024.0) #1
store float %fma, float addrspace(1)* %out
ret void
@@ -186,7 +186,7 @@ define void @test_literal_use_twice_ternary_op_k_s_k(float addrspace(1)* %out, f
; GCN: buffer_store_dword [[RESULT0]]
; GCN: buffer_store_dword [[RESULT1]]
; GCN: s_endpgm
-define void @test_literal_use_twice_ternary_op_k_s_k_x2(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_literal_use_twice_ternary_op_k_s_k_x2(float addrspace(1)* %out, float %a, float %b) #0 {
%fma0 = call float @llvm.fma.f32(float 1024.0, float %a, float 1024.0) #1
%fma1 = call float @llvm.fma.f32(float 1024.0, float %b, float 1024.0) #1
store volatile float %fma0, float addrspace(1)* %out
@@ -199,7 +199,7 @@ define void @test_literal_use_twice_ternary_op_k_s_k_x2(float addrspace(1)* %out
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[VK]], [[VK]]
; GCN: buffer_store_dword [[RESULT]]
-define void @test_literal_use_twice_ternary_op_s_k_k(float addrspace(1)* %out, float %a) #0 {
+define amdgpu_kernel void @test_literal_use_twice_ternary_op_s_k_k(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float 1024.0, float 1024.0) #1
store float %fma, float addrspace(1)* %out
ret void
@@ -214,7 +214,7 @@ define void @test_literal_use_twice_ternary_op_s_k_k(float addrspace(1)* %out, f
; GCN: buffer_store_dword [[RESULT0]]
; GCN: buffer_store_dword [[RESULT1]]
; GCN: s_endpgm
-define void @test_literal_use_twice_ternary_op_s_k_k_x2(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_literal_use_twice_ternary_op_s_k_k_x2(float addrspace(1)* %out, float %a, float %b) #0 {
%fma0 = call float @llvm.fma.f32(float %a, float 1024.0, float 1024.0) #1
%fma1 = call float @llvm.fma.f32(float %b, float 1024.0, float 1024.0) #1
store volatile float %fma0, float addrspace(1)* %out
@@ -234,7 +234,7 @@ define void @test_literal_use_twice_ternary_op_s_k_k_x2(float addrspace(1)* %out
; GCN: buffer_store_dword [[RESULT0]]
; GCN: buffer_store_dword [[RESULT1]]
-define void @test_s0_s1_k_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+define amdgpu_kernel void @test_s0_s1_k_f32(float addrspace(1)* %out, float %a, float %b) #0 {
%fma0 = call float @llvm.fma.f32(float %a, float %b, float 1024.0) #1
%fma1 = call float @llvm.fma.f32(float %a, float %b, float 4096.0) #1
store volatile float %fma0, float addrspace(1)* %out
@@ -259,7 +259,7 @@ define void @test_s0_s1_k_f32(float addrspace(1)* %out, float %a, float %b) #0 {
; GCN: buffer_store_dwordx2 [[RESULT0]]
; GCN: buffer_store_dwordx2 [[RESULT1]]
-define void @test_s0_s1_k_f64(double addrspace(1)* %out, double %a, double %b) #0 {
+define amdgpu_kernel void @test_s0_s1_k_f64(double addrspace(1)* %out, double %a, double %b) #0 {
%fma0 = call double @llvm.fma.f64(double %a, double %b, double 1024.0) #1
%fma1 = call double @llvm.fma.f64(double %a, double %b, double 4096.0) #1
store volatile double %fma0, double addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/usubo.ll b/test/CodeGen/AMDGPU/usubo.ll
index 3c9b1622a076..d1f454f0bc65 100644
--- a/test/CodeGen/AMDGPU/usubo.ll
+++ b/test/CodeGen/AMDGPU/usubo.ll
@@ -1,16 +1,16 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
-declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
-declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
-
-; FUNC-LABEL: {{^}}usubo_i64_zext:
+; FUNC-LABEL: {{^}}s_usubo_i64_zext:
+; GCN: s_sub_u32
+; GCN: s_subb_u32
+; GCN: v_cmp_gt_u64_e32 vcc
; EG: SUBB_UINT
; EG: ADDC_UINT
-define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
- %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+define amdgpu_kernel void @s_usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
%val = extractvalue { i64, i1 } %usub, 0
%carry = extractvalue { i64, i1 } %usub, 1
%ext = zext i1 %carry to i64
@@ -19,13 +19,16 @@ define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
ret void
}
+; FIXME: Could do scalar
+
; FUNC-LABEL: {{^}}s_usubo_i32:
-; SI: s_sub_i32
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
-define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
- %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
+define amdgpu_kernel void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 {
+ %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
%val = extractvalue { i32, i1 } %usub, 0
%carry = extractvalue { i32, i1 } %usub, 1
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -34,14 +37,19 @@ define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
}
; FUNC-LABEL: {{^}}v_usubo_i32:
-; SI: v_subrev_i32_e32
+; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
-define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
- %a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %bptr, align 4
- %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
+define amdgpu_kernel void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
+ %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
%val = extractvalue { i32, i1 } %usub, 0
%carry = extractvalue { i32, i1 } %usub, 1
store i32 %val, i32 addrspace(1)* %out, align 4
@@ -49,16 +57,38 @@ define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
ret void
}
+; FUNC-LABEL: {{^}}v_usubo_i32_novcc:
+; GCN: v_sub_i32_e64 v{{[0-9]+}}, [[COND:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[COND]]
+
+; EG-DAG: SUBB_UINT
+; EG-DAG: SUB_INT
+define amdgpu_kernel void @v_usubo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
+ %a = load i32, i32 addrspace(1)* %a.gep, align 4
+ %b = load i32, i32 addrspace(1)* %b.gep, align 4
+ %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+ %val = extractvalue { i32, i1 } %uadd, 0
+ %carry = extractvalue { i32, i1 } %uadd, 1
+ store volatile i32 %val, i32 addrspace(1)* %out, align 4
+ call void asm sideeffect "", "~{VCC}"() #0
+ store volatile i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
; FUNC-LABEL: {{^}}s_usubo_i64:
-; SI: s_sub_u32
-; SI: s_subb_u32
+; GCN: s_sub_u32
+; GCN: s_subb_u32
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
; EG-DAG: SUB_INT
; EG: SUB_INT
-define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
- %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+define amdgpu_kernel void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 {
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %usub, 0
%carry = extractvalue { i64, i1 } %usub, 1
store i64 %val, i64 addrspace(1)* %out, align 8
@@ -67,20 +97,50 @@ define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64
}
; FUNC-LABEL: {{^}}v_usubo_i64:
-; SI: v_sub_i32
-; SI: v_subb_u32
+; GCN: v_sub_i32
+; GCN: v_subb_u32
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
; EG-DAG: SUB_INT
; EG: SUB_INT
-define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
- %a = load i64, i64 addrspace(1)* %aptr, align 4
- %b = load i64, i64 addrspace(1)* %bptr, align 4
- %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+define amdgpu_kernel void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr
+ %a = load i64, i64 addrspace(1)* %a.gep
+ %b = load i64, i64 addrspace(1)* %b.gep
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue { i64, i1 } %usub, 0
%carry = extractvalue { i64, i1 } %usub, 1
store i64 %val, i64 addrspace(1)* %out, align 8
store i1 %carry, i1 addrspace(1)* %carryout
ret void
}
+
+; FUNC-LABEL: {{^}}v_usubo_i16:
+; VI: v_subrev_u16_e32
+; VI: v_cmp_gt_u16_e32
+define amdgpu_kernel void @v_usubo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr
+ %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr
+ %a = load i16, i16 addrspace(1)* %a.gep
+ %b = load i16, i16 addrspace(1)* %b.gep
+ %usub = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
+ %val = extractvalue { i16, i1 } %usub, 0
+ %carry = extractvalue { i16, i1 } %usub, 1
+ store i16 %val, i16 addrspace(1)* %out
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare { i16, i1 } @llvm.usub.with.overflow.i16(i16, i16) #1
+declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #1
+declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/v1i64-kernel-arg.ll b/test/CodeGen/AMDGPU/v1i64-kernel-arg.ll
index a48e7acd4cf3..b7d766aa395e 100644
--- a/test/CodeGen/AMDGPU/v1i64-kernel-arg.ll
+++ b/test/CodeGen/AMDGPU/v1i64-kernel-arg.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
; CHECK-LABEL: {{^}}kernel_arg_i64:
-define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
+define amdgpu_kernel void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
store i64 %a, i64 addrspace(1)* %out, align 8
ret void
}
; i64 arg works, v1i64 arg does not.
; CHECK-LABEL: {{^}}kernel_arg_v1i64:
-define void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
+define amdgpu_kernel void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
ret void
}
diff --git a/test/CodeGen/AMDGPU/v_cndmask.ll b/test/CodeGen/AMDGPU/v_cndmask.ll
index 1cd49feb0d88..d4a68a418ee4 100644
--- a/test/CodeGen/AMDGPU/v_cndmask.ll
+++ b/test/CodeGen/AMDGPU/v_cndmask.ll
@@ -4,12 +4,12 @@
declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN-LABEL: {{^}}v_cnd_nan_nosgpr:
-; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0
-; GCN: v_cndmask_b32_e32 v{{[0-9]}}, -1, v{{[0-9]+}}, vcc
+; GCN: v_cmp_eq_u32_e64 [[COND:vcc|s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0
+; GCN: v_cndmask_b32_e{{32|64}} v{{[0-9]}}, -1, v{{[0-9]+}}, [[COND]]
; GCN-DAG: v{{[0-9]}}
; All nan values are converted to 0xffffffff
; GCN: s_endpgm
-define void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, float addrspace(1)* %fptr) #0 {
+define amdgpu_kernel void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, float addrspace(1)* %fptr) #0 {
%idx = call i32 @llvm.amdgcn.workitem.id.x() #1
%f.gep = getelementptr float, float addrspace(1)* %fptr, i32 %idx
%f = load float, float addrspace(1)* %f.gep
@@ -30,7 +30,7 @@ define void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, float addrspace(
; GCN-DAG: v{{[0-9]}}
; All nan values are converted to 0xffffffff
; GCN: s_endpgm
-define void @v_cnd_nan(float addrspace(1)* %out, i32 %c, float %f) #0 {
+define amdgpu_kernel void @v_cnd_nan(float addrspace(1)* %out, i32 %c, float %f) #0 {
%setcc = icmp ne i32 %c, 0
%select = select i1 %setcc, float 0xFFFFFFFFE0000000, float %f
store float %select, float addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @v_cnd_nan(float addrspace(1)* %out, i32 %c, float %f) #0 {
; GCN-DAG: v_cmp_nlg_f32_e64 vcc, [[X]], 0
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[VZ]], vcc
-define void @fcmp_sgprX_k0_select_k1_sgprZ_f32(float addrspace(1)* %out, float %x, float %z) #0 {
+define amdgpu_kernel void @fcmp_sgprX_k0_select_k1_sgprZ_f32(float addrspace(1)* %out, float %x, float %z) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
@@ -62,7 +62,7 @@ define void @fcmp_sgprX_k0_select_k1_sgprZ_f32(float addrspace(1)* %out, float %
; GCN-DAG: v_cmp_nlg_f32_e64 vcc, [[X]], 0
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[VZ]], vcc
-define void @fcmp_sgprX_k0_select_k1_sgprX_f32(float addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @fcmp_sgprX_k0_select_k1_sgprX_f32(float addrspace(1)* %out, float %x) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
@@ -77,7 +77,7 @@ define void @fcmp_sgprX_k0_select_k1_sgprX_f32(float addrspace(1)* %out, float %
; GCN-DAG: v_cmp_nlg_f32_e64 vcc, [[X]], 0
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, [[VZ]], vcc
-define void @fcmp_sgprX_k0_select_k0_sgprZ_f32(float addrspace(1)* %out, float %x, float %z) #0 {
+define amdgpu_kernel void @fcmp_sgprX_k0_select_k0_sgprZ_f32(float addrspace(1)* %out, float %x, float %z) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
@@ -92,7 +92,7 @@ define void @fcmp_sgprX_k0_select_k0_sgprZ_f32(float addrspace(1)* %out, float %
; GCN-DAG: v_cmp_nlg_f32_e64 vcc, [[X]], 0
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, [[VZ]], vcc
-define void @fcmp_sgprX_k0_select_k0_sgprX_f32(float addrspace(1)* %out, float %x) #0 {
+define amdgpu_kernel void @fcmp_sgprX_k0_select_k0_sgprX_f32(float addrspace(1)* %out, float %x) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
@@ -105,9 +105,9 @@ define void @fcmp_sgprX_k0_select_k0_sgprX_f32(float addrspace(1)* %out, float %
; GCN-LABEL: {{^}}fcmp_sgprX_k0_select_k0_vgprZ_f32:
; GCN-DAG: s_load_dword [[X:s[0-9]+]]
; GCN-DAG: {{buffer|flat}}_load_dword [[Z:v[0-9]+]]
-; GCN-DAG: v_cmp_nlg_f32_e64 vcc, [[X]], 0
-; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, [[Z]], vcc
-define void @fcmp_sgprX_k0_select_k0_vgprZ_f32(float addrspace(1)* %out, float %x, float addrspace(1)* %z.ptr) #0 {
+; GCN-DAG: v_cmp_nlg_f32_e64 [[COND:vcc|s\[[0-9]+:[0-9]+\]]], [[X]], 0
+; GCN: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, 0, [[Z]], [[COND]]
+define amdgpu_kernel void @fcmp_sgprX_k0_select_k0_vgprZ_f32(float addrspace(1)* %out, float %x, float addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%z.gep = getelementptr inbounds float, float addrspace(1)* %z.ptr, i64 %tid.ext
@@ -122,9 +122,9 @@ define void @fcmp_sgprX_k0_select_k0_vgprZ_f32(float addrspace(1)* %out, float %
; GCN-LABEL: {{^}}fcmp_sgprX_k0_select_k1_vgprZ_f32:
; GCN-DAG: {{buffer|flat}}_load_dword [[Z:v[0-9]+]]
; GCN-DAG: s_load_dword [[X:s[0-9]+]]
-; GCN: v_cmp_nlg_f32_e64 vcc, [[X]], 0
-; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[Z]], vcc
-define void @fcmp_sgprX_k0_select_k1_vgprZ_f32(float addrspace(1)* %out, float %x, float addrspace(1)* %z.ptr) #0 {
+; GCN: v_cmp_nlg_f32_e64 [[COND:vcc|s\[[0-9]+:[0-9]+\]]], [[X]], 0
+; GCN: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, 1.0, [[Z]], [[COND]]
+define amdgpu_kernel void @fcmp_sgprX_k0_select_k1_vgprZ_f32(float addrspace(1)* %out, float %x, float addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%z.gep = getelementptr inbounds float, float addrspace(1)* %z.ptr, i64 %tid.ext
@@ -142,7 +142,7 @@ define void @fcmp_sgprX_k0_select_k1_vgprZ_f32(float addrspace(1)* %out, float %
; GCN-DAG: v_cmp_ngt_f32_e32 vcc, 0, [[X]]
; GCN-DAG: v_mov_b32_e32 [[VZ:v[0-9]+]], [[Z]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[VZ]], vcc
-define void @fcmp_vgprX_k0_select_k1_sgprZ_f32(float addrspace(1)* %out, float addrspace(1)* %x.ptr, float %z) #0 {
+define amdgpu_kernel void @fcmp_vgprX_k0_select_k1_sgprZ_f32(float addrspace(1)* %out, float addrspace(1)* %x.ptr, float %z) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -159,7 +159,7 @@ define void @fcmp_vgprX_k0_select_k1_sgprZ_f32(float addrspace(1)* %out, float a
; GCN: {{buffer|flat}}_load_dword [[Z:v[0-9]+]]
; GCN: v_cmp_le_f32_e32 vcc, 0, [[X]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[Z]], vcc
-define void @fcmp_vgprX_k0_select_k1_vgprZ_f32(float addrspace(1)* %out, float addrspace(1)* %x.ptr, float addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_vgprX_k0_select_k1_vgprZ_f32(float addrspace(1)* %out, float addrspace(1)* %x.ptr, float addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -178,7 +178,7 @@ define void @fcmp_vgprX_k0_select_k1_vgprZ_f32(float addrspace(1)* %out, float a
; GCN: {{buffer|flat}}_load_dword [[Z:v[0-9]+]]
; GCN: v_cmp_lt_i32_e32 vcc, -1, [[X]]
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 2, [[Z]], vcc
-define void @icmp_vgprX_k0_select_k1_vgprZ_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %x.ptr, i32 addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %x.ptr, i32 addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds i32, i32 addrspace(1)* %x.ptr, i64 %tid.ext
@@ -203,7 +203,7 @@ define void @icmp_vgprX_k0_select_k1_vgprZ_i32(i32 addrspace(1)* %out, i32 addrs
; VI-DAG: v_cmp_lt_i64_e64 s{{\[[0-9]+:[0-9]+\]}}, -1, v{{\[}}[[X_LO]]:[[X_HI]]{{\]}}
; VI-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, v[[Z_HI]], s
; VI-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 2, v[[Z_LO]], s
-define void @icmp_vgprX_k0_select_k1_vgprZ_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %x.ptr, i64 addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %x.ptr, i64 addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds i64, i64 addrspace(1)* %x.ptr, i64 %tid.ext
@@ -226,7 +226,7 @@ define void @icmp_vgprX_k0_select_k1_vgprZ_i64(i64 addrspace(1)* %out, i64 addrs
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, -0.5, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
-define void @fcmp_vgprX_k0_select_vgprZ_k1_v4f32(<4 x float> addrspace(1)* %out, float addrspace(1)* %x.ptr, <4 x float> addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_vgprX_k0_select_vgprZ_k1_v4f32(<4 x float> addrspace(1)* %out, float addrspace(1)* %x.ptr, <4 x float> addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -249,7 +249,7 @@ define void @fcmp_vgprX_k0_select_vgprZ_k1_v4f32(<4 x float> addrspace(1)* %out,
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, -0.5, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
-define void @fcmp_vgprX_k0_select_k1_vgprZ_v4f32(<4 x float> addrspace(1)* %out, float addrspace(1)* %x.ptr, <4 x float> addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_vgprX_k0_select_k1_vgprZ_v4f32(<4 x float> addrspace(1)* %out, float addrspace(1)* %x.ptr, <4 x float> addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -275,7 +275,7 @@ define void @fcmp_vgprX_k0_select_k1_vgprZ_v4f32(<4 x float> addrspace(1)* %out,
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, -0.5, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
-define void @fcmp_k0_vgprX_select_k1_vgprZ_v4f32(<4 x float> addrspace(1)* %out, float addrspace(1)* %x.ptr, <4 x float> addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_k0_vgprX_select_k1_vgprZ_v4f32(<4 x float> addrspace(1)* %out, float addrspace(1)* %x.ptr, <4 x float> addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -298,7 +298,7 @@ define void @fcmp_k0_vgprX_select_k1_vgprZ_v4f32(<4 x float> addrspace(1)* %out,
; GCN-DAG: s_or_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, vcc
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, s
; GCN: store_byte
-define void @icmp_vgprX_k0_select_k1_vgprZ_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %x.ptr, i1 addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @icmp_vgprX_k0_select_k1_vgprZ_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %x.ptr, i1 addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds i32, i32 addrspace(1)* %x.ptr, i64 %tid.ext
@@ -321,7 +321,7 @@ define void @icmp_vgprX_k0_select_k1_vgprZ_i1(i1 addrspace(1)* %out, i32 addrspa
; GCN: v_cmp_le_f32_e32 vcc, 0, [[X]]
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[K]], v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}, vcc
-define void @fcmp_vgprX_k0_selectf64_k1_vgprZ_f32(double addrspace(1)* %out, float addrspace(1)* %x.ptr, double addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_vgprX_k0_selectf64_k1_vgprZ_f32(double addrspace(1)* %out, float addrspace(1)* %x.ptr, double addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -343,7 +343,7 @@ define void @fcmp_vgprX_k0_selectf64_k1_vgprZ_f32(double addrspace(1)* %out, flo
; GCN: v_cmp_nlg_f32_e32 vcc, 0, [[X]]
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}, vcc
-define void @fcmp_vgprX_k0_selecti64_k1_vgprZ_f32(i64 addrspace(1)* %out, float addrspace(1)* %x.ptr, i64 addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_vgprX_k0_selecti64_k1_vgprZ_f32(i64 addrspace(1)* %out, float addrspace(1)* %x.ptr, i64 addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
@@ -364,7 +364,7 @@ define void @fcmp_vgprX_k0_selecti64_k1_vgprZ_f32(i64 addrspace(1)* %out, float
; GCN: v_cmp_gt_u32_e32 vcc, 2, [[X]]
; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, [[Z]], vcc
-define void @icmp_vgprX_k0_selectf32_k1_vgprZ_i32(float addrspace(1)* %out, i32 addrspace(1)* %x.ptr, float addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @icmp_vgprX_k0_selectf32_k1_vgprZ_i32(float addrspace(1)* %out, i32 addrspace(1)* %x.ptr, float addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds i32, i32 addrspace(1)* %x.ptr, i64 %tid.ext
@@ -386,7 +386,7 @@ define void @icmp_vgprX_k0_selectf32_k1_vgprZ_i32(float addrspace(1)* %out, i32
; GCN: v_cmp_nle_f32_e32 vcc, 4.0, [[X]]
; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -1.0, vcc
; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, -2.0, vcc
-define void @fcmp_k0_vgprX_select_k1_vgprZ_f32_cond_use_x2(float addrspace(1)* %out, float addrspace(1)* %x.ptr, float addrspace(1)* %z.ptr) #0 {
+define amdgpu_kernel void @fcmp_k0_vgprX_select_k1_vgprZ_f32_cond_use_x2(float addrspace(1)* %out, float addrspace(1)* %x.ptr, float addrspace(1)* %z.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%x.gep = getelementptr inbounds float, float addrspace(1)* %x.ptr, i64 %tid.ext
diff --git a/test/CodeGen/AMDGPU/v_cvt_pk_u8_f32.ll b/test/CodeGen/AMDGPU/v_cvt_pk_u8_f32.ll
index 9246ce38dbed..2cda52a8438a 100644
--- a/test/CodeGen/AMDGPU/v_cvt_pk_u8_f32.ll
+++ b/test/CodeGen/AMDGPU/v_cvt_pk_u8_f32.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.amdgcn.cvt.pk.u8.f32(float, i32, i32) #0
; GCN-LABEL: {{^}}v_cvt_pk_u8_f32_idx_0:
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 0, v{{[0-9]+}}
-define void @v_cvt_pk_u8_f32_idx_0(i32 addrspace(1)* %out, float %src, i32 %reg) {
+define amdgpu_kernel void @v_cvt_pk_u8_f32_idx_0(i32 addrspace(1)* %out, float %src, i32 %reg) {
%result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 0, i32 %reg) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -13,7 +13,7 @@ define void @v_cvt_pk_u8_f32_idx_0(i32 addrspace(1)* %out, float %src, i32 %reg)
; GCN-LABEL: {{^}}v_cvt_pk_u8_f32_idx_1:
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 1, v{{[0-9]+}}
-define void @v_cvt_pk_u8_f32_idx_1(i32 addrspace(1)* %out, float %src, i32 %reg) {
+define amdgpu_kernel void @v_cvt_pk_u8_f32_idx_1(i32 addrspace(1)* %out, float %src, i32 %reg) {
%result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 1, i32 %reg) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -21,7 +21,7 @@ define void @v_cvt_pk_u8_f32_idx_1(i32 addrspace(1)* %out, float %src, i32 %reg)
; GCN-LABEL: {{^}}v_cvt_pk_u8_f32_idx_2:
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 2, v{{[0-9]+}}
-define void @v_cvt_pk_u8_f32_idx_2(i32 addrspace(1)* %out, float %src, i32 %reg) {
+define amdgpu_kernel void @v_cvt_pk_u8_f32_idx_2(i32 addrspace(1)* %out, float %src, i32 %reg) {
%result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 2, i32 %reg) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -29,7 +29,7 @@ define void @v_cvt_pk_u8_f32_idx_2(i32 addrspace(1)* %out, float %src, i32 %reg)
; GCN-LABEL: {{^}}v_cvt_pk_u8_f32_idx_3:
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 3, v{{[0-9]+}}
-define void @v_cvt_pk_u8_f32_idx_3(i32 addrspace(1)* %out, float %src, i32 %reg) {
+define amdgpu_kernel void @v_cvt_pk_u8_f32_idx_3(i32 addrspace(1)* %out, float %src, i32 %reg) {
%result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 3, i32 %reg) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
@@ -40,7 +40,7 @@ define void @v_cvt_pk_u8_f32_idx_3(i32 addrspace(1)* %out, float %src, i32 %reg)
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 1, v{{[0-9]+}}
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 2, v{{[0-9]+}}
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, 3, v{{[0-9]+}}
-define void @v_cvt_pk_u8_f32_combine(i32 addrspace(1)* %out, float %src, i32 %reg) {
+define amdgpu_kernel void @v_cvt_pk_u8_f32_combine(i32 addrspace(1)* %out, float %src, i32 %reg) {
%result0 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 0, i32 %reg) #0
%result1 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 1, i32 %result0) #0
%result2 = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 2, i32 %result1) #0
@@ -51,7 +51,7 @@ define void @v_cvt_pk_u8_f32_combine(i32 addrspace(1)* %out, float %src, i32 %re
; GCN-LABEL: {{^}}v_cvt_pk_u8_f32_idx:
; GCN: v_cvt_pk_u8_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-define void @v_cvt_pk_u8_f32_idx(i32 addrspace(1)* %out, float %src, i32 %idx, i32 %reg) {
+define amdgpu_kernel void @v_cvt_pk_u8_f32_idx(i32 addrspace(1)* %out, float %src, i32 %idx, i32 %reg) {
%result = call i32 @llvm.amdgcn.cvt.pk.u8.f32(float %src, i32 %idx, i32 %reg) #0
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/AMDGPU/v_mac.ll b/test/CodeGen/AMDGPU/v_mac.ll
index 9a2dc743d6c9..2b96f7d50076 100644
--- a/test/CodeGen/AMDGPU/v_mac.ll
+++ b/test/CodeGen/AMDGPU/v_mac.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=VI-FLUSH -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=VI-DENORM -check-prefix=GCN %s
; GCN-LABEL: {{^}}mac_vvv:
; GCN: buffer_load_dword [[A:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0{{$}}
@@ -7,7 +8,7 @@
; GCN: buffer_load_dword [[C:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:8
; GCN: v_mac_f32_e32 [[C]], [[B]], [[A]]
; GCN: buffer_store_dword [[C]]
-define void @mac_vvv(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @mac_vvv(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -25,7 +26,7 @@ entry:
; GCN-LABEL: {{^}}mad_inline_sgpr_inline:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]}}, s{{[0-9]+}}, 0.5, 0.5
-define void @mad_inline_sgpr_inline(float addrspace(1)* %out, float %in) #0 {
+define amdgpu_kernel void @mad_inline_sgpr_inline(float addrspace(1)* %out, float %in) #0 {
entry:
%tmp0 = fmul float 0.5, %in
%tmp1 = fadd float %tmp0, 0.5
@@ -36,7 +37,7 @@ entry:
; GCN-LABEL: {{^}}mad_vvs:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-define void @mad_vvs(float addrspace(1)* %out, float addrspace(1)* %in, float %c) #0 {
+define amdgpu_kernel void @mad_vvs(float addrspace(1)* %out, float addrspace(1)* %in, float %c) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
@@ -51,7 +52,7 @@ entry:
; GCN-LABEL: {{^}}mac_ssv:
; GCN: v_mac_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-define void @mac_ssv(float addrspace(1)* %out, float addrspace(1)* %in, float %a) #0 {
+define amdgpu_kernel void @mac_ssv(float addrspace(1)* %out, float addrspace(1)* %in, float %a) #0 {
entry:
%c = load float, float addrspace(1)* %in
@@ -64,7 +65,7 @@ entry:
; GCN-LABEL: {{^}}mac_mad_same_add:
; GCN: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD:v[0-9]+]]
; GCN: v_mac_f32_e32 [[ADD]], v{{[0-9]+}}, v{{[0-9]+}}
-define void @mac_mad_same_add(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @mac_mad_same_add(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -95,7 +96,7 @@ entry:
; GCN-LABEL: {{^}}mad_neg_src0:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define void @mad_neg_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @mad_neg_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -112,10 +113,10 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}unsafe_mad_sub0_src0:
+; GCN-LABEL: {{^}}nsz_mad_sub0_src0:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define void @unsafe_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @nsz_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -135,7 +136,7 @@ entry:
; GCN-LABEL: {{^}}safe_mad_sub0_src0:
; GCN: v_sub_f32_e32 [[SUB0:v[0-9]+]], 0,
; GCN: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[SUB0]]
-define void @safe_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @safe_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -155,7 +156,7 @@ entry:
; GCN-LABEL: {{^}}mad_neg_src1:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define void @mad_neg_src1(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @mad_neg_src1(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -172,10 +173,10 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}unsafe_mad_sub0_src1:
+; GCN-LABEL: {{^}}nsz_mad_sub0_src1:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define void @unsafe_mad_sub0_src1(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @nsz_mad_sub0_src1(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -195,7 +196,7 @@ entry:
; GCN-LABEL: {{^}}mad_neg_src2:
; GCN-NOT: v_mac
; GCN: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
-define void @mad_neg_src2(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @mad_neg_src2(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
entry:
%b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
@@ -221,7 +222,7 @@ entry:
; GCN: v_add_f32_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
; GCN: v_mad_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
-define void @fold_inline_imm_into_mac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) #3 {
+define amdgpu_kernel void @fold_inline_imm_into_mac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) #3 {
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -247,12 +248,16 @@ bb:
; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_ushort [[B:v[0-9]+]]
-; FIXME: How is this not folded?
-; SI: v_cvt_f32_f16_e32 v{{[0-9]+}}, 0x3c00
+; SI-DAG: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], [[A]]
+; SI-DAG: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], [[B]]
-; VI: v_add_f16_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
-; VI: v_mad_f16 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
-define void @fold_inline_imm_into_mac_src2_f16(half addrspace(1)* %out, half addrspace(1)* %a, half addrspace(1)* %b) #3 {
+; SI: v_add_f32_e32 [[TMP2:v[0-9]+]], [[CVT_A]], [[CVT_A]]
+; SI: v_mad_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
+; SI: v_mac_f32_e32 v{{[0-9]+}}, 0x41000000, v{{[0-9]+}}
+
+; VI-FLUSH: v_add_f16_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
+; VI-FLUSH: v_mad_f16 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
+define amdgpu_kernel void @fold_inline_imm_into_mac_src2_f16(half addrspace(1)* %out, half addrspace(1)* %a, half addrspace(1)* %b) #3 {
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -276,7 +281,7 @@ bb:
declare i32 @llvm.amdgcn.workitem.id.x() #2
-attributes #0 = { nounwind "unsafe-fp-math"="false" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" }
+attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
+attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" }
attributes #2 = { nounwind readnone }
attributes #3 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/v_mac_f16.ll b/test/CodeGen/AMDGPU/v_mac_f16.ll
index 151f2cc9fc73..c45af522ec49 100644
--- a/test/CodeGen/AMDGPU/v_mac_f16.ll
+++ b/test/CodeGen/AMDGPU/v_mac_f16.ll
@@ -1,7 +1,7 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; GCN-LABEL: {{^}}mac_f16
+; GCN-LABEL: {{^}}mac_f16:
; GCN: {{buffer|flat}}_load_ushort v[[A_F16:[0-9]+]]
; GCN: {{buffer|flat}}_load_ushort v[[B_F16:[0-9]+]]
; GCN: {{buffer|flat}}_load_ushort v[[C_F16:[0-9]+]]
@@ -14,7 +14,7 @@
; VI: v_mac_f16_e32 v[[C_F16]], v[[B_F16]], v[[A_F16]]
; VI: buffer_store_short v[[C_F16]]
; GCN: s_endpgm
-define void @mac_f16(
+define amdgpu_kernel void @mac_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -31,13 +31,14 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_same_add
+; GCN-LABEL: {{^}}mac_f16_same_add:
; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD:v[0-9]+]]
; SI: v_mac_f32_e32 [[ADD]], v{{[0-9]+}}, v{{[0-9]+}}
+
; VI: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD:v[0-9]+]]
; VI: v_mac_f16_e32 [[ADD]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_f16_same_add(
+define amdgpu_kernel void @mac_f16_same_add(
half addrspace(1)* %r0,
half addrspace(1)* %r1,
half addrspace(1)* %a,
@@ -63,13 +64,16 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_a
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_f16_neg_a:
+; SI: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, -[[CVT_A]], [[CVT_B]], [[CVT_C]]
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_f16_neg_a(
+define amdgpu_kernel void @mac_f16_neg_a(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -87,13 +91,16 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_b
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_f16_neg_b:
+; SI: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, -[[CVT_A]], [[CVT_B]], [[CVT_C]]
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_f16_neg_b(
+define amdgpu_kernel void @mac_f16_neg_b(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -111,13 +118,16 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_c
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_f16_neg_c:
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_cvt_f32_f16_e32
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_f16_neg_c(
+define amdgpu_kernel void @mac_f16_neg_c(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -135,14 +145,13 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_a_safe_fp_math
-; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; SI: v_subrev_f32_e32 v[[NEG_A:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; GCN-LABEL: {{^}}mac_f16_neg_a_safe_fp_math:
+; SI: v_sub_f32_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
; GCN: s_endpgm
-define void @mac_f16_neg_a_safe_fp_math(
+define amdgpu_kernel void @mac_f16_neg_a_safe_fp_math(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -160,14 +169,13 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_b_safe_fp_math
-; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; SI: v_subrev_f32_e32 v[[NEG_A:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; GCN-LABEL: {{^}}mac_f16_neg_b_safe_fp_math:
+; SI: v_sub_f32_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_f16_neg_b_safe_fp_math(
+define amdgpu_kernel void @mac_f16_neg_b_safe_fp_math(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -185,14 +193,13 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_c_safe_fp_math
-; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; SI: v_subrev_f32_e32 v[[NEG_A:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; GCN-LABEL: {{^}}mac_f16_neg_c_safe_fp_math:
+; SI: v_sub_f32_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
; SI: v_mac_f32_e32 v[[NEG_A]], v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_mac_f16_e32 v[[NEG_A]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_f16_neg_c_safe_fp_math(
+define amdgpu_kernel void @mac_f16_neg_c_safe_fp_math(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -210,13 +217,16 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_a_unsafe_fp_math
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
+; GCN-LABEL: {{^}}mac_f16_neg_a_nsz_fp_math:
+; SI: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, -[[CVT_A]], [[CVT_B]], [[CVT_C]]
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
; GCN: s_endpgm
-define void @mac_f16_neg_a_unsafe_fp_math(
+define amdgpu_kernel void @mac_f16_neg_a_nsz_fp_math(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -234,13 +244,16 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_b_unsafe_fp_math
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
+; GCN-LABEL: {{^}}mac_f16_neg_b_nsz_fp_math:
+; SI: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, -[[CVT_A]], [[CVT_B]], [[CVT_C]]
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
; GCN: s_endpgm
-define void @mac_f16_neg_b_unsafe_fp_math(
+define amdgpu_kernel void @mac_f16_neg_b_nsz_fp_math(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -258,13 +271,16 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_f16_neg_c_unsafe_fp_math
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]}}
+; GCN-LABEL: {{^}}mac_f16_neg_c_nsz_fp_math:
+; SI: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, [[CVT_A]], [[CVT_B]], -[[CVT_C]]
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]}}
; GCN: s_endpgm
-define void @mac_f16_neg_c_unsafe_fp_math(
+define amdgpu_kernel void @mac_f16_neg_c_nsz_fp_math(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b,
@@ -282,33 +298,38 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16
+; GCN-LABEL: {{^}}mac_v2f16:
; GCN: {{buffer|flat}}_load_dword v[[A_V2_F16:[0-9]+]]
; GCN: {{buffer|flat}}_load_dword v[[B_V2_F16:[0-9]+]]
; GCN: {{buffer|flat}}_load_dword v[[C_V2_F16:[0-9]+]]
-; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
-; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+
; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
-; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI-DAG: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI-DAG: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI-DAG: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
-; SI: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[C_F32_0]]
-; SI: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
-; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
-; SI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; SI-DAG: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_LO:[0-9]+]], v[[C_F32_0]]
+; SI-DAG: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
+; SI-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
-; VI: v_mac_f16_e32 v[[C_V2_F16]], v[[B_V2_F16]], v[[A_V2_F16]]
-; VI: v_mac_f16_e32 v[[C_F16_1]], v[[B_F16_1]], v[[A_F16_1]]
-; VI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[C_V2_F16]]
-; VI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[C_F16_1]]
-; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; VI-NOT: and
+; SI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+
+; VI-DAG: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; VI-DAG: v_mac_f16_sdwa v[[A_F16_1]], v[[C_V2_F16]], v[[B_V2_F16]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-DAG: v_mac_f16_e32 v[[A_V2_F16]], v[[C_V2_F16]], v[[B_V2_F16]]
+; VI-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[A_F16_1]]
+; VI-NOT: and
+; VI: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[A_V2_F16]]
+
; GCN: {{buffer|flat}}_store_dword v[[R_V2_F16]]
; GCN: s_endpgm
-define void @mac_v2f16(
+define amdgpu_kernel void @mac_v2f16(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -325,17 +346,19 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_same_add
-; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD0:v[0-9]+]]
-; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD1:v[0-9]+]]
-; SI: v_mac_f32_e32 [[ADD0]], v{{[0-9]+}}, v{{[0-9]+}}
-; SI: v_mac_f32_e32 [[ADD1]], v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD0:v[0-9]+]]
-; VI: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD1:v[0-9]+]]
-; VI: v_mac_f16_e32 [[ADD0]], v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_mac_f16_e32 [[ADD1]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_v2f16_same_add:
+; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
+; VI-DAG: v_mac_f16_sdwa v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-DAG: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-DAG: v_mac_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
; GCN: s_endpgm
-define void @mac_v2f16_same_add(
+define amdgpu_kernel void @mac_v2f16_same_add(
<2 x half> addrspace(1)* %r0,
<2 x half> addrspace(1)* %r1,
<2 x half> addrspace(1)* %a,
@@ -361,15 +384,18 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_a
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_a:
+; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], {{v[0-9]+}}
+
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -[[CVT0]], v{{[0-9]+}}, v{{[0-9]+}}
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, [[CVT1]], v{{[0-9]+}}
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_v2f16_neg_a(
+define amdgpu_kernel void @mac_v2f16_neg_a(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -388,14 +414,17 @@ entry:
}
; GCN-LABEL: {{^}}mac_v2f16_neg_b
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], {{v[0-9]+}}
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -[[CVT0]], v{{[0-9]+}}, v{{[0-9]+}}
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, [[CVT1]], v{{[0-9]+}}
+
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_v2f16_neg_b(
+define amdgpu_kernel void @mac_v2f16_neg_b(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -413,15 +442,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_c
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
-; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_c:
+; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT2:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT3:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT4:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT5:v[0-9]+]], {{v[0-9]+}}
+
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -[[CVT2]]
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -[[CVT5]]
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
; GCN: s_endpgm
-define void @mac_v2f16_neg_c(
+define amdgpu_kernel void @mac_v2f16_neg_c(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -439,18 +475,20 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_a_safe_fp_math
-; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; SI: v_subrev_f32_e32 v[[NEG_A0:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
-; SI: v_subrev_f32_e32 v[[NEG_A1:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
-; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
-; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+; GCN-LABEL: {{^}}mac_v2f16_neg_a_safe_fp_math:
+
+; SI: v_sub_f32_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
+; SI: v_sub_f32_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+
; VI: v_sub_f16_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
-; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
-; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+; VI-DAG: v_mac_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+
; GCN: s_endpgm
-define void @mac_v2f16_neg_a_safe_fp_math(
+define amdgpu_kernel void @mac_v2f16_neg_a_safe_fp_math(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -468,18 +506,20 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_b_safe_fp_math
-; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; SI: v_subrev_f32_e32 v[[NEG_A0:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
-; SI: v_subrev_f32_e32 v[[NEG_A1:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
-; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
-; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_b_safe_fp_math:
+
+; SI: v_sub_f32_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
+; SI: v_sub_f32_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+
; VI: v_sub_f16_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
-; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
-; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+; VI-DAG: v_mac_f16_sdwa v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-DAG: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+
; GCN: s_endpgm
-define void @mac_v2f16_neg_b_safe_fp_math(
+define amdgpu_kernel void @mac_v2f16_neg_b_safe_fp_math(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -497,18 +537,20 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_c_safe_fp_math
-; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; SI: v_subrev_f32_e32 v[[NEG_A0:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
-; SI: v_subrev_f32_e32 v[[NEG_A1:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
-; SI: v_mac_f32_e32 v[[NEG_A0]], v{{[0-9]+}}, v{{[0-9]+}}
-; SI: v_mac_f32_e32 v[[NEG_A1]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_c_safe_fp_math:
+
+; SI: v_sub_f32_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
+; SI: v_sub_f32_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v[[NEG_A0]], v{{[0-9]+}}, v{{[0-9]+}}
+; SI-DAG: v_mac_f32_e32 v[[NEG_A1]], v{{[0-9]+}}, v{{[0-9]+}}
+
; VI: v_sub_f16_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
-; VI: v_mac_f16_e32 v[[NEG_A0]], v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_mac_f16_e32 v[[NEG_A1]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI-DAG: v_mac_f16_sdwa v[[NEG_A0]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-DAG: v_mac_f16_e32 v[[NEG_A1]], v{{[0-9]+}}, v{{[0-9]+}}
+
; GCN: s_endpgm
-define void @mac_v2f16_neg_c_safe_fp_math(
+define amdgpu_kernel void @mac_v2f16_neg_c_safe_fp_math(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -526,15 +568,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_a_unsafe_fp_math
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_a_nsz_fp_math:
+; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT2:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT3:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT4:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT5:v[0-9]+]], {{v[0-9]+}}
+
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
; GCN: s_endpgm
-define void @mac_v2f16_neg_a_unsafe_fp_math(
+define amdgpu_kernel void @mac_v2f16_neg_a_nsz_fp_math(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -552,15 +601,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_b_unsafe_fp_math
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_b_nsz_fp_math:
+; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT2:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT3:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT4:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT5:v[0-9]+]], {{v[0-9]+}}
+
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
; GCN: s_endpgm
-define void @mac_v2f16_neg_b_unsafe_fp_math(
+define amdgpu_kernel void @mac_v2f16_neg_b_nsz_fp_math(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -578,15 +634,22 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mac_v2f16_neg_c_unsafe_fp_math
-; SI-NOT: v_mac_f32
-; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
-; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
+; GCN-LABEL: {{^}}mac_v2f16_neg_c_nsz_fp_math:
+; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT1:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT2:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT3:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT4:v[0-9]+]], {{v[0-9]+}}
+; SI: v_cvt_f32_f16_e32 [[CVT5:v[0-9]+]], {{v[0-9]+}}
+
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; SI-DAG: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+
; VI-NOT: v_mac_f16
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
; GCN: s_endpgm
-define void @mac_v2f16_neg_c_unsafe_fp_math(
+define amdgpu_kernel void @mac_v2f16_neg_c_nsz_fp_math(
<2 x half> addrspace(1)* %r,
<2 x half> addrspace(1)* %a,
<2 x half> addrspace(1)* %b,
@@ -604,5 +667,5 @@ entry:
ret void
}
-attributes #0 = {"unsafe-fp-math"="false"}
-attributes #1 = {"unsafe-fp-math"="true"}
+attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
+attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" }
diff --git a/test/CodeGen/AMDGPU/v_madak_f16.ll b/test/CodeGen/AMDGPU/v_madak_f16.ll
index df220d7a977b..bfb10503aaea 100644
--- a/test/CodeGen/AMDGPU/v_madak_f16.ll
+++ b/test/CodeGen/AMDGPU/v_madak_f16.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp64-fp16-denormals,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; GCN-LABEL: {{^}}madak_f16
; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
@@ -7,7 +7,7 @@
; VI: v_madak_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], 0x4900{{$}}
; VI: buffer_store_short v[[R_F16]]
; GCN: s_endpgm
-define void @madak_f16(
+define amdgpu_kernel void @madak_f16(
half addrspace(1)* %r,
half addrspace(1)* %a,
half addrspace(1)* %b) {
@@ -28,7 +28,7 @@ entry:
; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_endpgm
-define void @madak_f16_use_2(
+define amdgpu_kernel void @madak_f16_use_2(
half addrspace(1)* %r0,
half addrspace(1)* %r1,
half addrspace(1)* %a,
diff --git a/test/CodeGen/AMDGPU/valu-i1.ll b/test/CodeGen/AMDGPU/valu-i1.ll
index e64f8467240a..85a8929ebe58 100644
--- a/test/CodeGen/AMDGPU/valu-i1.ll
+++ b/test/CodeGen/AMDGPU/valu-i1.ll
@@ -29,7 +29,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; SI-NEXT: s_xor_b64 exec, exec, [[SAVE3]]
; SI-NEXT: ; mask branch
;
-define void @test_if(i32 %b, i32 addrspace(1)* %src, i32 addrspace(1)* %dst) #1 {
+define amdgpu_kernel void @test_if(i32 %b, i32 addrspace(1)* %src, i32 addrspace(1)* %dst) #1 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
switch i32 %tid, label %default [
@@ -64,29 +64,100 @@ end:
ret void
}
-; SI-LABEL: @simple_test_v_if
+; SI-LABEL: {{^}}simple_test_v_if:
; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
+; SI: ; mask branch [[EXIT:BB[0-9]+_[0-9]+]]
-; SI: BB{{[0-9]+_[0-9]+}}:
+; SI-NEXT: BB{{[0-9]+_[0-9]+}}:
; SI: buffer_store_dword
-; SI: s_endpgm
+; SI-NEXT: s_waitcnt
-; SI: BB1_2:
+; SI-NEXT: {{^}}[[EXIT]]:
; SI: s_or_b64 exec, exec, [[BR_SREG]]
; SI: s_endpgm
-define void @simple_test_v_if(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
+define amdgpu_kernel void @simple_test_v_if(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%is.0 = icmp ne i32 %tid, 0
- br i1 %is.0, label %store, label %exit
+ br i1 %is.0, label %then, label %exit
+
+then:
+ %gep = getelementptr i32, i32 addrspace(1)* %dst, i32 %tid
+ store i32 999, i32 addrspace(1)* %gep
+ br label %exit
+
+exit:
+ ret void
+}
+
+; FIXME: It would be better to endpgm in the then block.
+
+; SI-LABEL: {{^}}simple_test_v_if_ret_else_ret:
+; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
+; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
+; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
+; SI: ; mask branch [[EXIT:BB[0-9]+_[0-9]+]]
+
+; SI-NEXT: BB{{[0-9]+_[0-9]+}}:
+; SI: buffer_store_dword
+; SI-NEXT: s_waitcnt
+
+; SI-NEXT: {{^}}[[EXIT]]:
+; SI: s_or_b64 exec, exec, [[BR_SREG]]
+; SI: s_endpgm
+define amdgpu_kernel void @simple_test_v_if_ret_else_ret(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %is.0 = icmp ne i32 %tid, 0
+ br i1 %is.0, label %then, label %exit
+
+then:
+ %gep = getelementptr i32, i32 addrspace(1)* %dst, i32 %tid
+ store i32 999, i32 addrspace(1)* %gep
+ ret void
+
+exit:
+ ret void
+}
+
+; Final block has more than a ret to execute. This was miscompiled
+; before function exit blocks were unified since the endpgm would
+; terminate the then wavefront before reaching the store.
+
+; SI-LABEL: {{^}}simple_test_v_if_ret_else_code_ret:
+; SI: v_cmp_eq_u32_e32 vcc, 0, v{{[0-9]+}}
+; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
+; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
+; SI: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
+
+; SI-NEXT: {{^BB[0-9]+_[0-9]+}}: ; %exit
+; SI: ds_write_b32
+; SI: s_waitcnt
+
+; SI-NEXT: {{^}}[[FLOW]]:
+; SI-NEXT: s_or_saveexec_b64
+; SI-NEXT: s_xor_b64 exec, exec
+; SI-NEXT: ; mask branch [[UNIFIED_RETURN:BB[0-9]+_[0-9]+]]
+
+; SI-NEXT: {{^BB[0-9]+_[0-9]+}}: ; %then
+; SI: buffer_store_dword
+; SI-NEXT: s_waitcnt
+
+; SI-NEXT: {{^}}[[UNIFIED_RETURN]]: ; %UnifiedReturnBlock
+; SI: s_or_b64 exec, exec
+; SI: s_endpgm
+define amdgpu_kernel void @simple_test_v_if_ret_else_code_ret(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %is.0 = icmp ne i32 %tid, 0
+ br i1 %is.0, label %then, label %exit
-store:
+then:
%gep = getelementptr i32, i32 addrspace(1)* %dst, i32 %tid
store i32 999, i32 addrspace(1)* %gep
ret void
exit:
+ store volatile i32 7, i32 addrspace(3)* undef
ret void
}
@@ -101,12 +172,12 @@ exit:
; SI: [[LABEL_LOOP:BB[0-9]+_[0-9]+]]:
; SI: buffer_load_dword
; SI-DAG: buffer_store_dword
-; SI-DAG: s_cmpk_eq_i32 s{{[0-9]+}}, 0x100
-; SI: s_cbranch_scc0 [[LABEL_LOOP]]
+; SI-DAG: v_cmp_eq_u32_e32 vcc, 0x100
+; SI: s_cbranch_vccz [[LABEL_LOOP]]
; SI: [[LABEL_EXIT]]:
; SI: s_endpgm
-define void @simple_test_v_loop(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
+define amdgpu_kernel void @simple_test_v_loop(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%is.0 = icmp ne i32 %tid, 0
@@ -156,7 +227,7 @@ exit:
; SI: BB{{[0-9]+_[0-9]+}}: ; %bb20
; SI: buffer_store_dword
-; SI: v_cmp_ge_i64_e32 [[CMP:s\[[0-9]+:[0-9]+\]|vcc]]
+; SI: v_cmp_ge_i64_e{{32|64}} [[CMP:s\[[0-9]+:[0-9]+\]|vcc]]
; SI: s_or_b64 [[TMP:s\[[0-9]+:[0-9]+\]]], [[CMP]], [[COND_STATE]]
; SI: [[LABEL_FLOW]]:
@@ -173,7 +244,7 @@ exit:
; SI-NOT: [[COND_STATE]]
; SI: s_endpgm
-define void @multi_vcond_loop(i32 addrspace(1)* noalias nocapture %arg, i32 addrspace(1)* noalias nocapture readonly %arg1, i32 addrspace(1)* noalias nocapture readonly %arg2, i32 addrspace(1)* noalias nocapture readonly %arg3) #1 {
+define amdgpu_kernel void @multi_vcond_loop(i32 addrspace(1)* noalias nocapture %arg, i32 addrspace(1)* noalias nocapture readonly %arg1, i32 addrspace(1)* noalias nocapture readonly %arg2, i32 addrspace(1)* noalias nocapture readonly %arg3) #1 {
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tmp4 = sext i32 %tmp to i64
diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index 03e473e3a0c0..5e5465800c3a 100644
--- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -1,7 +1,7 @@
# RUN: llc -run-pass si-insert-waits -march=amdgcn -mcpu=tahiti -o - %s | FileCheck %s
--- |
- define void @vccz_corrupt_workaround(float %cond, i32 addrspace(1)* %out) #0 {
+ define amdgpu_kernel void @vccz_corrupt_workaround(float %cond, i32 addrspace(1)* %out) #0 {
entry:
%cmp0 = fcmp oeq float %cond, 0.000000e+00
br i1 %cmp0, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0
@@ -20,7 +20,7 @@
ret void
}
- define void @vccz_corrupt_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 {
+ define amdgpu_kernel void @vccz_corrupt_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 {
entry:
br i1 undef, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0
diff --git a/test/CodeGen/AMDGPU/vector-alloca.ll b/test/CodeGen/AMDGPU/vector-alloca.ll
index 7dcf36f144ac..03cf725601b7 100644
--- a/test/CodeGen/AMDGPU/vector-alloca.ll
+++ b/test/CodeGen/AMDGPU/vector-alloca.ll
@@ -15,7 +15,7 @@
; EG: MOV
; EG: MOV
; EG: MOVA_INT
-define void @vector_read(i32 addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @vector_read(i32 addrspace(1)* %out, i32 %index) {
entry:
%tmp = alloca [4 x i32]
%x = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0
@@ -44,7 +44,7 @@ entry:
; EG: MOV
; EG: MOVA_INT
; EG: MOVA_INT
-define void @vector_write(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
+define amdgpu_kernel void @vector_write(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
entry:
%tmp = alloca [4 x i32]
%x = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0
@@ -71,7 +71,7 @@ entry:
; FUNC-LABEL: {{^}}bitcast_gep:
; EG: STORE_RAW
-define void @bitcast_gep(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
+define amdgpu_kernel void @bitcast_gep(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
entry:
%tmp = alloca [4 x i32]
%x = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0
@@ -93,7 +93,7 @@ entry:
; OPT-LABEL: @vector_read_bitcast_gep(
; OPT: %0 = extractelement <4 x i32> <i32 1065353216, i32 1, i32 2, i32 3>, i32 %index
; OPT: store i32 %0, i32 addrspace(1)* %out, align 4
-define void @vector_read_bitcast_gep(i32 addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @vector_read_bitcast_gep(i32 addrspace(1)* %out, i32 %index) {
entry:
%tmp = alloca [4 x i32]
%x = getelementptr inbounds [4 x i32], [4 x i32]* %tmp, i32 0, i32 0
@@ -121,7 +121,7 @@ entry:
; OPT: store float
; OPT: store float
; OPT: load float
-define void @vector_read_bitcast_alloca(float addrspace(1)* %out, i32 %index) {
+define amdgpu_kernel void @vector_read_bitcast_alloca(float addrspace(1)* %out, i32 %index) {
entry:
%tmp = alloca [4 x i32]
%tmp.bc = bitcast [4 x i32]* %tmp to [4 x float]*
diff --git a/test/CodeGen/AMDGPU/vector-extract-insert.ll b/test/CodeGen/AMDGPU/vector-extract-insert.ll
index 2d39f82e2499..ab2bfcfd1fb7 100644
--- a/test/CodeGen/AMDGPU/vector-extract-insert.ll
+++ b/test/CodeGen/AMDGPU/vector-extract-insert.ll
@@ -13,7 +13,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN-NOT: [[VVAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @extract_insert_same_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx) #1 {
+define amdgpu_kernel void @extract_insert_same_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
%gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %in, i64 %id.ext
@@ -30,7 +30,7 @@ define void @extract_insert_same_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32>
; GCN: v_movreld_b32
; GCN: v_movrels_b32
; GCN: buffer_store_dword v
-define void @extract_insert_different_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 {
+define amdgpu_kernel void @extract_insert_different_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
%gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %in, i64 %id.ext
@@ -49,7 +49,7 @@ define void @extract_insert_different_dynelt_v4i32(i32 addrspace(1)* %out, <4 x
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN-NOT: [[VVAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @extract_insert_same_elt2_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx) #1 {
+define amdgpu_kernel void @extract_insert_same_elt2_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
%gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %in, i64 %id.ext
@@ -68,7 +68,7 @@ define void @extract_insert_same_elt2_v4i32(i32 addrspace(1)* %out, <4 x i32> ad
; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; GCN-NOT: [[VVAL]]
; GCN: buffer_store_dword [[VVAL]]
-define void @extract_insert_same_dynelt_v4f32(float addrspace(1)* %out, <4 x float> addrspace(1)* %in, float %val, i32 %idx) #1 {
+define amdgpu_kernel void @extract_insert_same_dynelt_v4f32(float addrspace(1)* %out, <4 x float> addrspace(1)* %in, float %val, i32 %idx) #1 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
%gep.in = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %in, i64 %id.ext
diff --git a/test/CodeGen/AMDGPU/vectorize-global-local.ll b/test/CodeGen/AMDGPU/vectorize-global-local.ll
new file mode 100644
index 000000000000..90cf34e609f6
--- /dev/null
+++ b/test/CodeGen/AMDGPU/vectorize-global-local.ll
@@ -0,0 +1,80 @@
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
+; CHECK-DAG: flat_load_dwordx4
+; CHECK-DAG: flat_load_dwordx4
+; CHECK-DAG: flat_load_dwordx4
+; CHECK-DAG: flat_load_dwordx4
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+; CHECK-DAG: ds_write2_b32
+
+define amdgpu_kernel void @vectorize_global_local(i32 addrspace(1)* nocapture readonly %arg, i32 addrspace(3)* nocapture %arg1) {
+bb:
+ %tmp = load i32, i32 addrspace(1)* %arg, align 4
+ store i32 %tmp, i32 addrspace(3)* %arg1, align 4
+ %tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
+ %tmp3 = load i32, i32 addrspace(1)* %tmp2, align 4
+ %tmp4 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 1
+ store i32 %tmp3, i32 addrspace(3)* %tmp4, align 4
+ %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
+ %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
+ %tmp7 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 2
+ store i32 %tmp6, i32 addrspace(3)* %tmp7, align 4
+ %tmp8 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 3
+ %tmp9 = load i32, i32 addrspace(1)* %tmp8, align 4
+ %tmp10 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 3
+ store i32 %tmp9, i32 addrspace(3)* %tmp10, align 4
+ %tmp11 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 4
+ %tmp12 = load i32, i32 addrspace(1)* %tmp11, align 4
+ %tmp13 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 4
+ store i32 %tmp12, i32 addrspace(3)* %tmp13, align 4
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 5
+ %tmp15 = load i32, i32 addrspace(1)* %tmp14, align 4
+ %tmp16 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 5
+ store i32 %tmp15, i32 addrspace(3)* %tmp16, align 4
+ %tmp17 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 6
+ %tmp18 = load i32, i32 addrspace(1)* %tmp17, align 4
+ %tmp19 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 6
+ store i32 %tmp18, i32 addrspace(3)* %tmp19, align 4
+ %tmp20 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 7
+ %tmp21 = load i32, i32 addrspace(1)* %tmp20, align 4
+ %tmp22 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 7
+ store i32 %tmp21, i32 addrspace(3)* %tmp22, align 4
+ %tmp23 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 8
+ %tmp24 = load i32, i32 addrspace(1)* %tmp23, align 4
+ %tmp25 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 8
+ store i32 %tmp24, i32 addrspace(3)* %tmp25, align 4
+ %tmp26 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 9
+ %tmp27 = load i32, i32 addrspace(1)* %tmp26, align 4
+ %tmp28 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 9
+ store i32 %tmp27, i32 addrspace(3)* %tmp28, align 4
+ %tmp29 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 10
+ %tmp30 = load i32, i32 addrspace(1)* %tmp29, align 4
+ %tmp31 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 10
+ store i32 %tmp30, i32 addrspace(3)* %tmp31, align 4
+ %tmp32 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 11
+ %tmp33 = load i32, i32 addrspace(1)* %tmp32, align 4
+ %tmp34 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 11
+ store i32 %tmp33, i32 addrspace(3)* %tmp34, align 4
+ %tmp35 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 12
+ %tmp36 = load i32, i32 addrspace(1)* %tmp35, align 4
+ %tmp37 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 12
+ store i32 %tmp36, i32 addrspace(3)* %tmp37, align 4
+ %tmp38 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 13
+ %tmp39 = load i32, i32 addrspace(1)* %tmp38, align 4
+ %tmp40 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 13
+ store i32 %tmp39, i32 addrspace(3)* %tmp40, align 4
+ %tmp41 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 14
+ %tmp42 = load i32, i32 addrspace(1)* %tmp41, align 4
+ %tmp43 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 14
+ store i32 %tmp42, i32 addrspace(3)* %tmp43, align 4
+ %tmp44 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 15
+ %tmp45 = load i32, i32 addrspace(1)* %tmp44, align 4
+ %tmp46 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 15
+ store i32 %tmp45, i32 addrspace(3)* %tmp46, align 4
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/vertex-fetch-encoding.ll b/test/CodeGen/AMDGPU/vertex-fetch-encoding.ll
index 3d71062f1fba..46a1c87184d1 100644
--- a/test/CodeGen/AMDGPU/vertex-fetch-encoding.ll
+++ b/test/CodeGen/AMDGPU/vertex-fetch-encoding.ll
@@ -6,7 +6,7 @@
; EG: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0, #1 ; encoding: [0x40,0x01,0x0[[GPR]],0x10,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x08,0x00
; CM: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0, #1 ; encoding: [0x40,0x01,0x0[[GPR]],0x00,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x00,0x00
-define void @vtx_fetch32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+define amdgpu_kernel void @vtx_fetch32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%v = load i32, i32 addrspace(1)* %in
store i32 %v, i32 addrspace(1)* %out
ret void
@@ -16,7 +16,7 @@ define void @vtx_fetch32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; EG: VTX_READ_128 T[[DST:[0-9]]].XYZW, T[[SRC:[0-9]]].X, 0, #1 ; encoding: [0x40,0x01,0x0[[SRC]],0x40,0x0[[DST]],0x10,0x8d,0x18,0x00,0x00,0x08,0x00
; CM: VTX_READ_128 T[[DST:[0-9]]].XYZW, T[[SRC:[0-9]]].X, 0, #1 ; encoding: [0x40,0x01,0x0[[SRC]],0x00,0x0[[DST]],0x10,0x8d,0x18,0x00,0x00,0x00,0x00
-define void @vtx_fetch128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define amdgpu_kernel void @vtx_fetch128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%v = load <4 x i32>, <4 x i32> addrspace(1)* %in
store <4 x i32> %v, <4 x i32> addrspace(1)* %out
ret void
@@ -26,7 +26,7 @@ define void @vtx_fetch128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)*
; EG: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0, #3 ; encoding: [0x40,0x03,0x0[[GPR]],0x10,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x08,0x00
; CM: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0, #3 ; encoding: [0x40,0x03,0x0[[GPR]],0x00,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x00,0x00
-define void @vtx_fetch32_id3(i32 addrspace(1)* %out, i32 addrspace(7)* %in) {
+define amdgpu_kernel void @vtx_fetch32_id3(i32 addrspace(1)* %out, i32 addrspace(7)* %in) {
%v = load i32, i32 addrspace(7)* %in
store i32 %v, i32 addrspace(1)* %out
ret void
@@ -38,7 +38,7 @@ define void @vtx_fetch32_id3(i32 addrspace(1)* %out, i32 addrspace(7)* %in) {
@t = internal addrspace(2) constant [4 x i32] [i32 0, i32 1, i32 2, i32 3]
-define void @vtx_fetch32_id2(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @vtx_fetch32_id2(i32 addrspace(1)* %out, i32 %in) {
%a = getelementptr inbounds [4 x i32], [4 x i32] addrspace(2)* @t, i32 0, i32 %in
%v = load i32, i32 addrspace(2)* %a
store i32 %v, i32 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll
index a8908f87fbf6..e82e548f23cd 100644
--- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll
+++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCNMESA -check-prefix=SIMESA %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+vgpr-spilling,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCNMESA -check-prefix=VIMESA %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+vgpr-spilling,-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCNMESA -check-prefix=GFX9MESA %s
; RUN: llc -march=amdgcn -mcpu=hawaii -mtriple=amdgcn-unknown-amdhsa -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CIHSA -check-prefix=HSA %s
; RUN: llc -march=amdgcn -mcpu=fiji -mtriple=amdgcn-unknown-amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VIHSA -check-prefix=HSA %s
@@ -15,16 +16,17 @@
; HSA: enable_sgpr_private_segment_buffer = 1
; HSA: enable_sgpr_flat_scratch_init = 0
-; HSA: workitem_private_segment_byte_size = 1024
+; HSA: workitem_private_segment_byte_size = 1536
; GCN-NOT: flat_scr
; GCNMESA-DAG: s_mov_b32 s16, s3
; GCNMESA-DAG: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GCNMESA--DAG: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GCNMESA-DAG: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCNMESA-DAG: s_mov_b32 s14, -1
; SIMESA-DAG: s_mov_b32 s15, 0xe8f000
; VIMESA-DAG: s_mov_b32 s15, 0xe80000
+; GFX9MESA-DAG: s_mov_b32 s15, 0xe00000
; GCN: buffer_store_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}} ; 4-byte Folded Spill
@@ -40,10 +42,10 @@
; GCN: buffer_load_dword {{v[0-9]+}}, off, s[12:15], s16 offset:{{[0-9]+}}
; GCN: NumVgprs: 256
-; GCN: ScratchSize: 1024
+; GCN: ScratchSize: 1536
; s[0:3] input user SGPRs. s4,s5,s6 = workgroup IDs. s8 scratch offset.
-define void @spill_vgpr_compute(<4 x float> %arg6, float addrspace(1)* %arg, i32 %arg1, i32 %arg2, float %arg3, float %arg4, float %arg5) #0 {
+define amdgpu_kernel void @spill_vgpr_compute(<4 x float> %arg6, float addrspace(1)* %arg, i32 %arg1, i32 %arg2, float %arg3, float %arg4, float %arg5) #0 {
bb:
%tmp = add i32 %arg1, %arg2
%tmp7 = extractelement <4 x float> %arg6, i32 0
diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
index 4de35b97aeab..c9c8583d5e87 100644
--- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
+++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
; This ends up using all 255 registers and requires register
; scavenging which will fail to find an unsued register.
@@ -12,19 +13,19 @@
; GCN-LABEL: {{^}}main:
-; GCN-DAG: s_mov_b32 s11, s12
-; GCN-DAG: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GCN-DAG: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
-; GCN-DAG: s_mov_b32 s14, -1
-; SI-DAG: s_mov_b32 s15, 0xe8f000
-; VI-DAG: s_mov_b32 s15, 0xe80000
-
-; s11 is offset system SGPR
-; GCN: buffer_store_dword {{v[0-9]+}}, off, s[12:15], s11 offset:{{[0-9]+}} ; 4-byte Folded Spill
-; GCN: buffer_load_dword v{{[0-9]+}}, off, s[12:15], s11 offset:{{[0-9]+}} ; 4-byte Folded Reload
+; GCN-DAG: s_mov_b32 s[[OFFREG:[0-9]+]], s12
+; GCN-DAG: s_mov_b32 s[[DESC0:[0-9]+]], SCRATCH_RSRC_DWORD0
+; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD1
+; GCN-DAG: s_mov_b32 s{{[0-9]+}}, -1
+; SI-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe8f000
+; VI-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe80000
+; GFX9-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe00000
+; OFFREG is offset system SGPR
+; GCN: buffer_store_dword {{v[0-9]+}}, off, s{{\[}}[[DESC0]]:[[DESC3]]], s[[OFFREG]] offset:{{[0-9]+}} ; 4-byte Folded Spill
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[DESC0]]:[[DESC3]]], s[[OFFREG]] offset:{{[0-9]+}} ; 4-byte Folded Reload
; GCN: NumVgprs: 256
-; GCN: ScratchSize: 1024
+; GCN: ScratchSize: 1536
define amdgpu_vs void @main([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
bb:
@@ -36,7 +37,8 @@ bb:
%tmp15 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0
%tmp16 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp15, align 16, !tbaa !0
%tmp17 = add i32 %arg5, %arg7
- %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp16, i32 0, i32 %tmp17)
+ %tmp16.cast = bitcast <16 x i8> %tmp16 to <4 x i32>
+ %tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp16.cast, i32 %tmp17, i32 0, i1 false, i1 false)
%tmp19 = extractelement <4 x float> %tmp18, i32 0
%tmp20 = extractelement <4 x float> %tmp18, i32 1
%tmp21 = extractelement <4 x float> %tmp18, i32 2
@@ -180,39 +182,39 @@ bb24: ; preds = %bb157, %bb
br i1 %tmp155, label %bb156, label %bb157
bb156: ; preds = %bb24
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 32, i32 0, float %tmp12, float %tmp103, float %tmp102, float %tmp101)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 33, i32 0, float %tmp99, float %tmp98, float %tmp97, float %tmp95)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 34, i32 0, float %tmp94, float %tmp93, float %tmp91, float %tmp90)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 35, i32 0, float %tmp89, float %tmp87, float %tmp86, float %tmp85)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 36, i32 0, float %tmp83, float %tmp82, float %tmp81, float %tmp79)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 37, i32 0, float %tmp78, float %tmp77, float %tmp75, float %tmp74)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 38, i32 0, float %tmp73, float %tmp71, float %tmp70, float %tmp69)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 39, i32 0, float %tmp67, float %tmp66, float %tmp65, float %tmp63)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 40, i32 0, float %tmp62, float %tmp61, float %tmp59, float %tmp58)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 41, i32 0, float %tmp57, float %tmp55, float %tmp54, float %tmp53)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 42, i32 0, float %tmp51, float %tmp50, float %tmp49, float %tmp47)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 43, i32 0, float %tmp46, float %tmp45, float %tmp43, float %tmp42)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 44, i32 0, float %tmp41, float %tmp39, float %tmp38, float %tmp37)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 45, i32 0, float %tmp35, float %tmp34, float %tmp33, float %tmp31)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 46, i32 0, float %tmp30, float %tmp29, float %tmp27, float %tmp26)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 47, i32 0, float %tmp25, float %tmp28, float %tmp32, float %tmp36)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 48, i32 0, float %tmp40, float %tmp44, float %tmp48, float %tmp52)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 49, i32 0, float %tmp56, float %tmp60, float %tmp64, float %tmp68)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 50, i32 0, float %tmp72, float %tmp76, float %tmp80, float %tmp84)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 51, i32 0, float %tmp88, float %tmp92, float %tmp96, float %tmp100)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 52, i32 0, float %tmp104, float %tmp105, float %tmp106, float %tmp108)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 53, i32 0, float %tmp109, float %tmp110, float %tmp111, float %tmp112)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 54, i32 0, float %tmp113, float %tmp114, float %tmp115, float %tmp116)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 55, i32 0, float %tmp117, float %tmp118, float %tmp119, float %tmp120)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 56, i32 0, float %tmp121, float %tmp122, float %tmp123, float %tmp124)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 57, i32 0, float %tmp125, float %tmp126, float %tmp127, float %tmp128)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 58, i32 0, float %tmp129, float %tmp130, float %tmp131, float %tmp132)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 59, i32 0, float %tmp133, float %tmp134, float %tmp135, float %tmp136)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 60, i32 0, float %tmp137, float %tmp138, float %tmp139, float %tmp140)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 61, i32 0, float %tmp141, float %tmp142, float %tmp143, float %tmp144)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 62, i32 0, float %tmp145, float %tmp146, float %tmp147, float %tmp148)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 63, i32 0, float %tmp149, float %tmp150, float %tmp151, float %tmp13)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %tmp19, float %tmp20, float %tmp21, float %tmp22)
+ call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp12, float %tmp103, float %tmp102, float %tmp101, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float %tmp99, float %tmp98, float %tmp97, float %tmp95, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 34, i32 15, float %tmp94, float %tmp93, float %tmp91, float %tmp90, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 35, i32 15, float %tmp89, float %tmp87, float %tmp86, float %tmp85, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 36, i32 15, float %tmp83, float %tmp82, float %tmp81, float %tmp79, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 37, i32 15, float %tmp78, float %tmp77, float %tmp75, float %tmp74, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 38, i32 15, float %tmp73, float %tmp71, float %tmp70, float %tmp69, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 39, i32 15, float %tmp67, float %tmp66, float %tmp65, float %tmp63, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 40, i32 15, float %tmp62, float %tmp61, float %tmp59, float %tmp58, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 41, i32 15, float %tmp57, float %tmp55, float %tmp54, float %tmp53, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 42, i32 15, float %tmp51, float %tmp50, float %tmp49, float %tmp47, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 43, i32 15, float %tmp46, float %tmp45, float %tmp43, float %tmp42, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 44, i32 15, float %tmp41, float %tmp39, float %tmp38, float %tmp37, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 45, i32 15, float %tmp35, float %tmp34, float %tmp33, float %tmp31, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 46, i32 15, float %tmp30, float %tmp29, float %tmp27, float %tmp26, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 47, i32 15, float %tmp25, float %tmp28, float %tmp32, float %tmp36, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 48, i32 15, float %tmp40, float %tmp44, float %tmp48, float %tmp52, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 49, i32 15, float %tmp56, float %tmp60, float %tmp64, float %tmp68, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 50, i32 15, float %tmp72, float %tmp76, float %tmp80, float %tmp84, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 51, i32 15, float %tmp88, float %tmp92, float %tmp96, float %tmp100, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 52, i32 15, float %tmp104, float %tmp105, float %tmp106, float %tmp108, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 53, i32 15, float %tmp109, float %tmp110, float %tmp111, float %tmp112, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 54, i32 15, float %tmp113, float %tmp114, float %tmp115, float %tmp116, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 55, i32 15, float %tmp117, float %tmp118, float %tmp119, float %tmp120, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 56, i32 15, float %tmp121, float %tmp122, float %tmp123, float %tmp124, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 57, i32 15, float %tmp125, float %tmp126, float %tmp127, float %tmp128, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 58, i32 15, float %tmp129, float %tmp130, float %tmp131, float %tmp132, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 59, i32 15, float %tmp133, float %tmp134, float %tmp135, float %tmp136, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 60, i32 15, float %tmp137, float %tmp138, float %tmp139, float %tmp140, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 61, i32 15, float %tmp141, float %tmp142, float %tmp143, float %tmp144, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 62, i32 15, float %tmp145, float %tmp146, float %tmp147, float %tmp148, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 63, i32 15, float %tmp149, float %tmp150, float %tmp151, float %tmp13, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp19, float %tmp20, float %tmp21, float %tmp22, i1 true, i1 false) #0
ret void
bb157: ; preds = %bb24
@@ -483,18 +485,15 @@ bb157: ; preds = %bb24
br label %bb24
}
-; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #1
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
!1 = !{!"const", !2}
diff --git a/test/CodeGen/AMDGPU/vi-removed-intrinsics.ll b/test/CodeGen/AMDGPU/vi-removed-intrinsics.ll
index ad7521a3da9b..8d66c346ed5b 100644
--- a/test/CodeGen/AMDGPU/vi-removed-intrinsics.ll
+++ b/test/CodeGen/AMDGPU/vi-removed-intrinsics.ll
@@ -1,10 +1,10 @@
; RUN: not llc -march=amdgcn -mcpu=tonga < %s 2>&1 | FileCheck -check-prefix=ERROR %s
-; ERROR: error: :1:42: in function rsq_legacy_f32 void (float addrspace(1)*, float): intrinsic not supported on subtarget
+; ERROR: error: foo.cl:1:42: in function rsq_legacy_f32 void (float addrspace(1)*, float): intrinsic not supported on subtarget
declare float @llvm.amdgcn.rsq.legacy(float) #0
-define void @rsq_legacy_f32(float addrspace(1)* %out, float %src) #1 {
+define amdgpu_kernel void @rsq_legacy_f32(float addrspace(1)* %out, float %src) #1 {
%rsq = call float @llvm.amdgcn.rsq.legacy(float %src), !dbg !4
store float %rsq, float addrspace(1)* %out, align 4
ret void
@@ -21,4 +21,4 @@ attributes #1 = { nounwind }
!2 = !{i32 2, !"Dwarf Version", i32 4}
!3 = !{i32 2, !"Debug Info Version", i32 3}
!4 = !DILocation(line: 1, column: 42, scope: !5)
-!5 = distinct !DISubprogram(name: "rsq_legacy_f32", scope: null, line: 1, isLocal: false, isDefinition: true, scopeLine: 2, isOptimized: false, unit: !0)
+!5 = distinct !DISubprogram(name: "rsq_legacy_f32", scope: null, file: !1, line: 1, isLocal: false, isDefinition: true, scopeLine: 2, isOptimized: false, unit: !0)
diff --git a/test/CodeGen/AMDGPU/vop-shrink.ll b/test/CodeGen/AMDGPU/vop-shrink.ll
index ae8ec58270c1..d2708b068eb4 100644
--- a/test/CodeGen/AMDGPU/vop-shrink.ll
+++ b/test/CodeGen/AMDGPU/vop-shrink.ll
@@ -8,7 +8,7 @@
; ModuleID = 'vop-shrink.ll'
-define void @sub_rev(i32 addrspace(1)* %out, <4 x i32> %sgpr, i32 %cond) {
+define amdgpu_kernel void @sub_rev(i32 addrspace(1)* %out, <4 x i32> %sgpr, i32 %cond) {
entry:
%vgpr = call i32 @llvm.amdgcn.workitem.id.x() #1
%tmp = icmp eq i32 %cond, 0
@@ -35,7 +35,7 @@ endif: ; preds = %else, %if
; FUNC-LABEL: {{^}}add_fold:
; SI: v_add_f32_e32 v{{[0-9]+}}, 0x44800000
-define void @add_fold(float addrspace(1)* %out) {
+define amdgpu_kernel void @add_fold(float addrspace(1)* %out) {
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = uitofp i32 %tmp to float
diff --git a/test/CodeGen/AMDGPU/vselect.ll b/test/CodeGen/AMDGPU/vselect.ll
index fe5be7526b19..bb6234729f90 100644
--- a/test/CodeGen/AMDGPU/vselect.ll
+++ b/test/CodeGen/AMDGPU/vselect.ll
@@ -10,7 +10,7 @@
; SI: v_cndmask_b32_e64
; SI: v_cndmask_b32_e32
-define void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1, <2 x i32> %val) {
+define amdgpu_kernel void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1, <2 x i32> %val) {
entry:
%load0 = load <2 x i32>, <2 x i32> addrspace(1)* %in0
%load1 = load <2 x i32>, <2 x i32> addrspace(1)* %in1
@@ -28,7 +28,7 @@ entry:
;SI: v_cndmask_b32_e64
;SI: v_cndmask_b32_e32
-define void @test_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in0, <2 x float> addrspace(1)* %in1) {
+define amdgpu_kernel void @test_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in0, <2 x float> addrspace(1)* %in1) {
entry:
%0 = load <2 x float>, <2 x float> addrspace(1)* %in0
%1 = load <2 x float>, <2 x float> addrspace(1)* %in1
@@ -52,7 +52,7 @@ entry:
; SI: v_cndmask_b32
; SI: v_cndmask_b32
-define void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1, <4 x i32> %val) {
+define amdgpu_kernel void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1, <4 x i32> %val) {
entry:
%load0 = load <4 x i32>, <4 x i32> addrspace(1)* %in0
%load1 = load <4 x i32>, <4 x i32> addrspace(1)* %in1
@@ -68,7 +68,7 @@ entry:
;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @test_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in0, <4 x float> addrspace(1)* %in1) {
+define amdgpu_kernel void @test_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in0, <4 x float> addrspace(1)* %in1) {
entry:
%0 = load <4 x float>, <4 x float> addrspace(1)* %in0
%1 = load <4 x float>, <4 x float> addrspace(1)* %in1
diff --git a/test/CodeGen/AMDGPU/vselect64.ll b/test/CodeGen/AMDGPU/vselect64.ll
index ef85ebe7899f..4a0435565161 100644
--- a/test/CodeGen/AMDGPU/vselect64.ll
+++ b/test/CodeGen/AMDGPU/vselect64.ll
@@ -5,7 +5,7 @@
; Make sure the vectors aren't being stored on the stack. We know they are
; being stored on the stack if the shaders uses at leat 10 registers.
; CHECK-NOT: {{\**}} MOV T{{[0-9][0-9]}}.X
-define void @test_select_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> %c) {
+define amdgpu_kernel void @test_select_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> %c) {
entry:
%cmp = icmp ne <4 x i32> %c, <i32 0, i32 0, i32 0, i32 0>
%result = select <4 x i1> %cmp, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x i64> <i64 4, i64 5, i64 6, i64 7>
diff --git a/test/CodeGen/AMDGPU/vtx-fetch-branch.ll b/test/CodeGen/AMDGPU/vtx-fetch-branch.ll
index 4584d6e25254..4c5eb3d3aa5d 100644
--- a/test/CodeGen/AMDGPU/vtx-fetch-branch.ll
+++ b/test/CodeGen/AMDGPU/vtx-fetch-branch.ll
@@ -10,7 +10,7 @@
; CHECK-NOT: ALU_POP_AFTER
; CHECK: TEX
; CHECK-NEXT: POP
-define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond) {
entry:
%0 = icmp eq i32 %cond, 0
br i1 %0, label %endif, label %if
diff --git a/test/CodeGen/AMDGPU/vtx-schedule.ll b/test/CodeGen/AMDGPU/vtx-schedule.ll
index 912e258ebb83..c4b619bf168f 100644
--- a/test/CodeGen/AMDGPU/vtx-schedule.ll
+++ b/test/CodeGen/AMDGPU/vtx-schedule.ll
@@ -9,7 +9,7 @@
; CHECK: VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
; CHECK: Fetch clause
; CHECK: VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
-define void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* addrspace(1)* nocapture %in0) {
+define amdgpu_kernel void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* addrspace(1)* nocapture %in0) {
entry:
%0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in0
%1 = load i32, i32 addrspace(1)* %0
diff --git a/test/CodeGen/AMDGPU/wait.ll b/test/CodeGen/AMDGPU/wait.ll
index 621c582fcefd..623cbeae8da9 100644
--- a/test/CodeGen/AMDGPU/wait.ll
+++ b/test/CodeGen/AMDGPU/wait.ll
@@ -11,26 +11,27 @@
; DEFAULT: exp
; DEFAULT: s_waitcnt lgkmcnt(0)
; DEFAULT: s_endpgm
-define amdgpu_vs void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, <16 x i8> addrspace(2)* inreg %arg3, <16 x i8> addrspace(2)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(2)* inreg %constptr) {
+define amdgpu_vs void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, <16 x i8> addrspace(2)* inreg %arg3, <16 x i8> addrspace(2)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(2)* inreg %constptr) #0 {
main_body:
%tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 0
%tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp11 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp10, i32 0, i32 %arg6)
+ %tmp10.cast = bitcast <16 x i8> %tmp10 to <4 x i32>
+ %tmp11 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp10.cast, i32 %arg6, i32 0, i1 false, i1 false)
%tmp12 = extractelement <4 x float> %tmp11, i32 0
%tmp13 = extractelement <4 x float> %tmp11, i32 1
call void @llvm.amdgcn.s.barrier() #1
%tmp14 = extractelement <4 x float> %tmp11, i32 2
-; %tmp15 = extractelement <4 x float> %tmp11, i32 3
- %tmp15 = load float, float addrspace(2)* %constptr, align 4 ; Force waiting for expcnt and lgkmcnt
+ %tmp15 = load float, float addrspace(2)* %constptr, align 4
%tmp16 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 1
%tmp17 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp16, !tbaa !0
- %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp17, i32 0, i32 %arg6)
+ %tmp17.cast = bitcast <16 x i8> %tmp17 to <4 x i32>
+ %tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp17.cast, i32 %arg6, i32 0, i1 false, i1 false)
%tmp19 = extractelement <4 x float> %tmp18, i32 0
%tmp20 = extractelement <4 x float> %tmp18, i32 1
%tmp21 = extractelement <4 x float> %tmp18, i32 2
%tmp22 = extractelement <4 x float> %tmp18, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 32, i32 0, float %tmp19, float %tmp20, float %tmp21, float %tmp22)
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %tmp12, float %tmp13, float %tmp14, float %tmp15)
+ call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp19, float %tmp20, float %tmp21, float %tmp22, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp12, float %tmp13, float %tmp14, float %tmp15, i1 true, i1 false) #0
ret void
}
@@ -41,45 +42,42 @@ main_body:
; ILPMAX: s_load_dwordx4
; ILPMAX: s_waitcnt lgkmcnt(0)
; ILPMAX: buffer_load
-; ILPMAX: s_waitcnt vmcnt(1)
; ILPMAX: s_waitcnt vmcnt(0)
+; ILPMAX: exp pos0
+; ILPMAX-NEXT: exp param0
; ILPMAX: s_endpgm
-
-define amdgpu_vs void @main2([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, [16 x <16 x i8>] addrspace(2)*
-byval, i32 inreg, i32 inreg, i32, i32, i32, i32) {
+define amdgpu_vs void @main2([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
main_body:
- %11 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %4, i64 0, i64 0
- %12 = load <16 x i8>, <16 x i8> addrspace(2)* %11, align 16, !tbaa !0
- %13 = add i32 %5, %7
- %14 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %12, i32 0, i32 %13)
- %15 = extractelement <4 x float> %14, i32 0
- %16 = extractelement <4 x float> %14, i32 1
- %17 = extractelement <4 x float> %14, i32 2
- %18 = extractelement <4 x float> %14, i32 3
- %19 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %4, i64 0, i64 1
- %20 = load <16 x i8>, <16 x i8> addrspace(2)* %19, align 16, !tbaa !0
- %21 = add i32 %5, %7
- %22 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %20, i32 0, i32 %21)
- %23 = extractelement <4 x float> %22, i32 0
- %24 = extractelement <4 x float> %22, i32 1
- %25 = extractelement <4 x float> %22, i32 2
- %26 = extractelement <4 x float> %22, i32 3
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %15, float %16, float %17, float %18)
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 32, i32 0, float %23, float %24, float %25, float %26)
+ %tmp = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0
+ %tmp11 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0
+ %tmp12 = add i32 %arg5, %arg7
+ %tmp11.cast = bitcast <16 x i8> %tmp11 to <4 x i32>
+ %tmp13 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp11.cast, i32 %tmp12, i32 0, i1 false, i1 false)
+ %tmp14 = extractelement <4 x float> %tmp13, i32 0
+ %tmp15 = extractelement <4 x float> %tmp13, i32 1
+ %tmp16 = extractelement <4 x float> %tmp13, i32 2
+ %tmp17 = extractelement <4 x float> %tmp13, i32 3
+ %tmp18 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 1
+ %tmp19 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp18, align 16, !tbaa !0
+ %tmp20 = add i32 %arg5, %arg7
+ %tmp19.cast = bitcast <16 x i8> %tmp19 to <4 x i32>
+ %tmp21 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp19.cast, i32 %tmp20, i32 0, i1 false, i1 false)
+ %tmp22 = extractelement <4 x float> %tmp21, i32 0
+ %tmp23 = extractelement <4 x float> %tmp21, i32 1
+ %tmp24 = extractelement <4 x float> %tmp21, i32 2
+ %tmp25 = extractelement <4 x float> %tmp21, i32 3
+ call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp14, float %tmp15, float %tmp16, float %tmp17, i1 false, i1 false) #0
+ call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp22, float %tmp23, float %tmp24, float %tmp25, i1 true, i1 false) #0
ret void
}
-
-; Function Attrs: convergent nounwind
declare void @llvm.amdgcn.s.barrier() #1
+declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-; Function Attrs: nounwind readnone
-declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #2
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
+attributes #0 = { nounwind }
attributes #1 = { convergent nounwind }
-attributes #2 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
!0 = !{!1, !1, i64 0, i32 1}
!1 = !{!"const", !2}
diff --git a/test/CodeGen/AMDGPU/waitcnt-flat.ll b/test/CodeGen/AMDGPU/waitcnt-flat.ll
index d29bae45d8c2..5d86b12da95f 100644
--- a/test/CodeGen/AMDGPU/waitcnt-flat.ll
+++ b/test/CodeGen/AMDGPU/waitcnt-flat.ll
@@ -9,7 +9,7 @@
; XGCN: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[DATA:v[0-9]+]]
; XGCN: s_waitcnt vmcnt(0) lgkmcnt(0)
; XGCN: flat_load_dword [[DATA]], v[{{[0-9]+:[0-9]+}}]
-define void @test(i32 addrspace(1)* %out, i32 %in) {
+define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 %in) {
store volatile i32 0, i32 addrspace(1)* %out
%val = load volatile i32, i32 addrspace(1)* %out
ret void
diff --git a/test/CodeGen/AMDGPU/waitcnt.mir b/test/CodeGen/AMDGPU/waitcnt.mir
index cb5de6a2419d..38662e83b359 100644
--- a/test/CodeGen/AMDGPU/waitcnt.mir
+++ b/test/CodeGen/AMDGPU/waitcnt.mir
@@ -1,12 +1,21 @@
# RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-waits %s -o - | FileCheck %s
--- |
- define void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
+ define amdgpu_kernel void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
<4 x i32> addrspace(1)* %global16,
i32 addrspace(4)* %flat4,
<4 x i32> addrspace(4)* %flat16) {
ret void
}
+
+ define amdgpu_kernel void @single_fallthrough_successor_no_end_block_wait() {
+ ret void
+ }
+
+ define amdgpu_kernel void @single_branch_successor_not_next_block() {
+ ret void
+ }
+
...
---
@@ -21,18 +30,21 @@
# CHECK-LABEL: bb.1:
# CHECK: FLAT_LOAD_DWORD
+# CHECK: S_WAITCNT 3952
# CHECK: FLAT_LOAD_DWORDX4
# The first load has no mem operand, so we should assume it accesses the flat
# address space.
# s_waitcnt vmcnt(0) lgkmcnt(0)
-# CHECK-NEXT: S_WAITCNT 112
+# CHECK-NEXT: S_WAITCNT 127
# CHECK-LABEL: bb.2:
# CHECK: FLAT_LOAD_DWORD
+# CHECK: S_WAITCNT 3952
# CHECK: FLAT_LOAD_DWORDX4
+
# One outstand loads access the flat address space.
# s_waitcnt vmcnt(0) lgkmcnt(0)
-# CHECK-NEXT: S_WAITCNT 112
+# CHECK-NEXT: S_WAITCNT 127
name: flat_zero_waitcnt
@@ -57,3 +69,60 @@ body: |
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_ENDPGM
...
+---
+# There is only a single fallthrough successor block, so there's no
+# need to wait immediately.
+
+# CHECK-LABEL: name: single_fallthrough_successor_no_end_block_wait
+# CHECK: %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2
+# CHECK-NOT: S_WAITCNT
+
+# CHECK: bb.1:
+# CHECK-NEXT: V_LSHLREV_B64
+# CHECK-NEXT: S_WAITCNT 112
+# CHECK-NEXT: FLAT_STORE_DWORD
+name: single_fallthrough_successor_no_end_block_wait
+
+body: |
+ bb.0:
+ successors: %bb.1
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+
+ bb.1:
+ %vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec
+ FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
+ S_ENDPGM
+...
+---
+# The block has a single predecessor with a single successor, but it
+# is not the next block so it's non-obvious that the wait is not needed.
+
+
+# CHECK-LABEL: name: single_branch_successor_not_next_block
+# CHECK: %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2
+# CHECK-NEXT: S_WAITCNT 112
+
+# CHECK: bb.1
+# CHECK-NEXT: FLAT_STORE_DWORD
+# CHECK-NEXT: S_ENDPGM
+
+# CHECK: bb.2:
+# CHECK-NEXT: V_LSHLREV_B64
+# CHECK-NEXT: FLAT_STORE_DWORD
+name: single_branch_successor_not_next_block
+
+body: |
+ bb.0:
+ successors: %bb.2
+ %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+ S_BRANCH %bb.2
+
+ bb.1:
+ FLAT_STORE_DWORD %vgpr8_vgpr9, %vgpr10, 0, 0, 0, implicit %exec, implicit %flat_scr
+ S_ENDPGM
+
+ bb.2:
+ %vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec
+ FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
+ S_ENDPGM
+...
diff --git a/test/CodeGen/AMDGPU/wqm.ll b/test/CodeGen/AMDGPU/wqm.ll
index 3f7b2b284c53..9f277b2c9a59 100644
--- a/test/CodeGen/AMDGPU/wqm.ll
+++ b/test/CodeGen/AMDGPU/wqm.ll
@@ -1,5 +1,5 @@
-;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=SI
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=VI
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK -check-prefix=VI %s
; Check that WQM isn't triggered by image load/store intrinsics.
;
@@ -18,16 +18,14 @@ main_body:
;CHECK-NEXT: ; %main_body
;CHECK-NEXT: s_wqm_b64 exec, exec
;CHECK-NOT: exec
-define amdgpu_ps void @test2(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <4 x i32> %c) {
+define amdgpu_ps void @test2(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <4 x float> %c) {
main_body:
- %c.1 = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %c.1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%c.2 = bitcast <4 x float> %c.1 to <4 x i32>
%c.3 = extractelement <4 x i32> %c.2, i32 0
%gep = getelementptr float, float addrspace(1)* %ptr, i32 %c.3
%data = load float, float addrspace(1)* %gep
-
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %data, float undef, float undef, float undef)
-
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %data, float undef, float undef, float undef, i1 true, i1 true) #1
ret void
}
@@ -42,9 +40,9 @@ main_body:
;CHECK: store
;CHECK-NOT: exec
;CHECK: .size test3
-define amdgpu_ps <4 x float> @test3(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <4 x i32> %c) {
+define amdgpu_ps <4 x float> @test3(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <4 x float> %c) {
main_body:
- %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%tex.1 = bitcast <4 x float> %tex to <4 x i32>
%tex.2 = extractelement <4 x i32> %tex.1, i32 0
@@ -70,10 +68,9 @@ main_body:
%c.1 = mul i32 %c, %d
call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> undef, <4 x i32> undef, i32 %c.1, i32 0, i1 0, i1 0)
-
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %c.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.1 = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %c.1.bc = bitcast i32 %c.1 to float
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %c.1.bc, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
ret <4 x float> %dtex
}
@@ -101,9 +98,9 @@ main_body:
br i1 %cmp, label %IF, label %ELSE
IF:
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.1 = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %c.bc = bitcast i32 %c to float
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %c.bc, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%data.if = extractelement <4 x float> %dtex, i32 0
br label %END
@@ -143,9 +140,9 @@ main_body:
br i1 %cmp, label %ELSE, label %IF
IF:
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %c, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.1 = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %c.bc = bitcast i32 %c to float
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %c.bc, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%data.if = extractelement <4 x float> %dtex, i32 0
br label %END
@@ -200,7 +197,8 @@ ELSE:
END:
%coord.END = phi i32 [ %coord.IF, %IF ], [ %coord.ELSE, %ELSE ]
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord.END, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %coord.END.bc = bitcast i32 %coord.END to float
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %coord.END.bc, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
ret <4 x float> %tex
}
@@ -215,13 +213,11 @@ END:
;CHECK: image_sample
;CHECK: v_cmp
;CHECK: store
-define amdgpu_ps float @test_control_flow_3(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, i32 %idx, i32 %coord) {
+define amdgpu_ps float @test_control_flow_3(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, i32 %idx, float %coord) {
main_body:
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.1 = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%dtex.1 = extractelement <4 x float> %dtex, i32 0
-
call void @llvm.amdgcn.buffer.store.f32(float %dtex.1, <4 x i32> undef, i32 %idx, i32 0, i1 0, i1 0)
%cc = fcmp ogt float %dtex.1, 0.0
@@ -254,7 +250,7 @@ END:
;CHECK: %END
;CHECK: image_sample
;CHECK: image_sample
-define amdgpu_ps <4 x float> @test_control_flow_4(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, i32 %coord, i32 %y, float %z) {
+define amdgpu_ps <4 x float> @test_control_flow_4(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float %coord, i32 %y, float %z) {
main_body:
%cond = icmp eq i32 %y, 0
br i1 %cond, label %IF, label %END
@@ -265,9 +261,8 @@ IF:
br label %END
END:
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.1 = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
ret <4 x float> %dtex
}
@@ -286,10 +281,9 @@ END:
;CHECK: buffer_store_dword
;CHECK: s_mov_b64 exec, [[SAVE]]
;CHECK: image_sample
-define amdgpu_ps <4 x float> @test_kill_0(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <2 x i32> %idx, <2 x float> %data, i32 %coord, i32 %coord2, float %z) {
+define amdgpu_ps <4 x float> @test_kill_0(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, float addrspace(1)* inreg %ptr, <2 x i32> %idx, <2 x float> %data, float %coord, float %coord2, float %z) {
main_body:
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
-
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%idx.0 = extractelement <2 x i32> %idx, i32 0
%data.0 = extractelement <2 x float> %data, i32 0
call void @llvm.amdgcn.buffer.store.f32(float %data.0, <4 x i32> undef, i32 %idx.0, i32 0, i1 0, i1 0)
@@ -299,10 +293,8 @@ main_body:
%idx.1 = extractelement <2 x i32> %idx, i32 1
%data.1 = extractelement <2 x float> %data, i32 1
call void @llvm.amdgcn.buffer.store.f32(float %data.1, <4 x i32> undef, i32 %idx.1, i32 0, i1 0, i1 0)
-
- %tex2 = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord2, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex2.1 = bitcast <4 x float> %tex2 to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex2.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %coord2, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex2, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%out = fadd <4 x float> %tex, %dtex
ret <4 x float> %out
@@ -320,11 +312,10 @@ main_body:
; CHECK: buffer_store_dword
; CHECK-NOT: wqm
; CHECK: v_cmpx_
-define amdgpu_ps <4 x float> @test_kill_1(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, i32 %idx, float %data, i32 %coord, i32 %coord2, float %z) {
+define amdgpu_ps <4 x float> @test_kill_1(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, i32 %idx, float %data, float %coord, float %coord2, float %z) {
main_body:
- %tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.1 = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.1, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
call void @llvm.amdgcn.buffer.store.f32(float %data, <4 x i32> undef, i32 0, i32 0, i1 0, i1 0)
@@ -375,8 +366,7 @@ loop:
br i1 %cc, label %break, label %body
body:
- %c.i = bitcast <4 x float> %c.iv to <4 x i32>
- %c.next = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %c.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %c.next = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %c.iv, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%ctr.next = fadd float %ctr.iv, 2.0
br label %loop
@@ -394,7 +384,7 @@ break:
; CHECK: s_and_b64 exec, exec, [[LIVE]]
; CHECK: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0
; CHECK: s_wqm_b64 exec, exec
-; CHECK: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+$}}
+; CHECK: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offset:4{{$}}
; CHECK: s_and_b64 exec, exec, [[LIVE]]
; CHECK: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 idxen
; CHECK: s_wqm_b64 exec, exec
@@ -416,9 +406,8 @@ entry:
%c.gep = getelementptr [32 x i32], [32 x i32]* %array, i32 0, i32 %idx
%c = load i32, i32* %c.gep, align 4
-
- %t = call <4 x float> @llvm.SI.image.sample.i32(i32 %c, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
-
+ %c.bc = bitcast i32 %c to float
+ %t = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float %c.bc, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %t, <4 x i32> undef, i32 0, i32 0, i1 0, i1 0)
ret void
@@ -436,9 +425,8 @@ entry:
; CHECK: s_and_b64 exec, exec, [[LIVE]]
; CHECK-NOT: exec
define amdgpu_ps <4 x float> @test_nonvoid_return() nounwind {
- %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.i = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
ret <4 x float> %dtex
}
@@ -450,10 +438,8 @@ define amdgpu_ps <4 x float> @test_nonvoid_return() nounwind {
; CHECK-NOT: exec
define amdgpu_ps <4 x float> @test_nonvoid_return_unreachable(i32 inreg %c) nounwind {
entry:
- %tex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
- %tex.i = bitcast <4 x float> %tex to <4 x i32>
- %dtex = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> %tex.i, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
-
+ %tex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
+ %dtex = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tex, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%cc = icmp sgt i32 %c, 0
br i1 %cc, label %if, label %else
@@ -485,35 +471,29 @@ main_body:
br i1 %cc, label %if, label %else
if:
- %r.if = call <4 x float> @llvm.SI.image.sample.i32(i32 0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r.if = call <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float 0.0, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
br label %end
else:
- %r.else = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> <i32 0, i32 1>, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r.else = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> <float 0.0, float bitcast (i32 1 to float)>, <8 x i32> undef, <4 x i32> undef, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
br label %end
end:
%r = phi <4 x float> [ %r.if, %if ], [ %r.else, %else ]
-
call void @llvm.amdgcn.buffer.store.f32(float 1.0, <4 x i32> undef, i32 %idx, i32 0, i1 0, i1 0)
-
ret <4 x float> %r
}
-
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
declare void @llvm.amdgcn.image.store.v4f32.v4i32.v8i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
-declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #1
-declare void @llvm.amdgcn.buffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i1, i1) #1
-
-declare <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2
-declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #2
-
-declare <4 x float> @llvm.SI.image.sample.i32(i32, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #3
-declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #3
-declare <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #3
-
-declare void @llvm.AMDGPU.kill(float)
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #2
+declare void @llvm.amdgcn.buffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.load.v4f32.v4i32.v8i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #3
+declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #3
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.f32.v8i32(float, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #3
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #3
+declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #3
+declare void @llvm.AMDGPU.kill(float) #1
attributes #1 = { nounwind }
attributes #2 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/write-register-vgpr-into-sgpr.ll b/test/CodeGen/AMDGPU/write-register-vgpr-into-sgpr.ll
index deac809f9b05..b1ee016e99c9 100644
--- a/test/CodeGen/AMDGPU/write-register-vgpr-into-sgpr.ll
+++ b/test/CodeGen/AMDGPU/write-register-vgpr-into-sgpr.ll
@@ -10,7 +10,7 @@ declare void @llvm.write_register.i32(metadata, i32) #0
declare i32 @llvm.amdgcn.workitem.id.x() #0
-define void @write_vgpr_into_sgpr() {
+define amdgpu_kernel void @write_vgpr_into_sgpr() {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
call void @llvm.write_register.i32(metadata !0, i32 %tid)
ret void
diff --git a/test/CodeGen/AMDGPU/write_register.ll b/test/CodeGen/AMDGPU/write_register.ll
index 88660ba6ec6a..9c62e003dde0 100644
--- a/test/CodeGen/AMDGPU/write_register.ll
+++ b/test/CodeGen/AMDGPU/write_register.ll
@@ -4,7 +4,7 @@ declare void @llvm.write_register.i32(metadata, i32) #0
declare void @llvm.write_register.i64(metadata, i64) #0
; CHECK-LABEL: {{^}}test_write_m0:
-define void @test_write_m0(i32 %val) #0 {
+define amdgpu_kernel void @test_write_m0(i32 %val) #0 {
call void @llvm.write_register.i32(metadata !0, i32 0)
call void @llvm.write_register.i32(metadata !0, i32 -1)
call void @llvm.write_register.i32(metadata !0, i32 %val)
@@ -15,7 +15,7 @@ define void @test_write_m0(i32 %val) #0 {
; CHECK: s_mov_b64 exec, 0
; CHECK: s_mov_b64 exec, -1
; CHECK: s_mov_b64 exec, s{{\[[0-9]+:[0-9]+\]}}
-define void @test_write_exec(i64 %val) #0 {
+define amdgpu_kernel void @test_write_exec(i64 %val) #0 {
call void @llvm.write_register.i64(metadata !1, i64 0)
call void @llvm.write_register.i64(metadata !1, i64 -1)
call void @llvm.write_register.i64(metadata !1, i64 %val)
@@ -26,7 +26,7 @@ define void @test_write_exec(i64 %val) #0 {
; CHECK: s_mov_b64 flat_scratch, 0
; CHECK: s_mov_b64 flat_scratch, -1
; CHECK: s_mov_b64 flat_scratch, s{{\[[0-9]+:[0-9]+\]}}
-define void @test_write_flat_scratch(i64 %val) #0 {
+define amdgpu_kernel void @test_write_flat_scratch(i64 %val) #0 {
call void @llvm.write_register.i64(metadata !2, i64 0)
call void @llvm.write_register.i64(metadata !2, i64 -1)
call void @llvm.write_register.i64(metadata !2, i64 %val)
@@ -36,7 +36,7 @@ define void @test_write_flat_scratch(i64 %val) #0 {
; CHECK-LABEL: {{^}}test_write_flat_scratch_lo:
; CHECK: s_mov_b32 flat_scratch_lo, 0
; CHECK: s_mov_b32 flat_scratch_lo, s{{[0-9]+}}
-define void @test_write_flat_scratch_lo(i32 %val) #0 {
+define amdgpu_kernel void @test_write_flat_scratch_lo(i32 %val) #0 {
call void @llvm.write_register.i32(metadata !3, i32 0)
call void @llvm.write_register.i32(metadata !3, i32 %val)
ret void
@@ -45,7 +45,7 @@ define void @test_write_flat_scratch_lo(i32 %val) #0 {
; CHECK-LABEL: {{^}}test_write_flat_scratch_hi:
; CHECK: s_mov_b32 flat_scratch_hi, 0
; CHECK: s_mov_b32 flat_scratch_hi, s{{[0-9]+}}
-define void @test_write_flat_scratch_hi(i32 %val) #0 {
+define amdgpu_kernel void @test_write_flat_scratch_hi(i32 %val) #0 {
call void @llvm.write_register.i32(metadata !4, i32 0)
call void @llvm.write_register.i32(metadata !4, i32 %val)
ret void
@@ -54,7 +54,7 @@ define void @test_write_flat_scratch_hi(i32 %val) #0 {
; CHECK-LABEL: {{^}}test_write_exec_lo:
; CHECK: s_mov_b32 exec_lo, 0
; CHECK: s_mov_b32 exec_lo, s{{[0-9]+}}
-define void @test_write_exec_lo(i32 %val) #0 {
+define amdgpu_kernel void @test_write_exec_lo(i32 %val) #0 {
call void @llvm.write_register.i32(metadata !5, i32 0)
call void @llvm.write_register.i32(metadata !5, i32 %val)
ret void
@@ -63,7 +63,7 @@ define void @test_write_exec_lo(i32 %val) #0 {
; CHECK-LABEL: {{^}}test_write_exec_hi:
; CHECK: s_mov_b32 exec_hi, 0
; CHECK: s_mov_b32 exec_hi, s{{[0-9]+}}
-define void @test_write_exec_hi(i32 %val) #0 {
+define amdgpu_kernel void @test_write_exec_hi(i32 %val) #0 {
call void @llvm.write_register.i32(metadata !6, i32 0)
call void @llvm.write_register.i32(metadata !6, i32 %val)
ret void
diff --git a/test/CodeGen/AMDGPU/wrong-transalu-pos-fix.ll b/test/CodeGen/AMDGPU/wrong-transalu-pos-fix.ll
index 7f6b80459047..36532365d871 100644
--- a/test/CodeGen/AMDGPU/wrong-transalu-pos-fix.ll
+++ b/test/CodeGen/AMDGPU/wrong-transalu-pos-fix.ll
@@ -4,7 +4,7 @@
;CHECK: {{^}}fill3d:
;CHECK-NOT: MULLO_INT T[0-9]+
-define void @fill3d(i32 addrspace(1)* nocapture %out) #0 {
+define amdgpu_kernel void @fill3d(i32 addrspace(1)* nocapture %out) #0 {
entry:
%x.i = tail call i32 @llvm.r600.read.global.size.x() #1
%y.i18 = tail call i32 @llvm.r600.read.global.size.y() #1
diff --git a/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll b/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll
index babae9ead27c..88ef9fd93c8f 100644
--- a/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll
+++ b/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll
@@ -5,7 +5,7 @@
; TODO: enable doubles
; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32:
-define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
+define amdgpu_kernel void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
%val = load double, double addrspace(1)* %in, align 8
%add = fadd double %val, 4.0
%bc = bitcast double %add to <2 x i32>
@@ -14,7 +14,7 @@ define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace
}
; FUNC-LABEL: {{^}}bitcast_v2i64_to_v2f64:
-define void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) {
+define amdgpu_kernel void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) {
entry:
%cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %if, label %end
@@ -30,7 +30,7 @@ end:
}
; FUNC-LABEL: {{^}}bitcast_v2f64_to_v2i64:
-define void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) {
+define amdgpu_kernel void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) {
entry:
%cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %if, label %end
diff --git a/test/CodeGen/AMDGPU/xor.ll b/test/CodeGen/AMDGPU/xor.ll
index bf02d4c3b311..57a082a0170c 100644
--- a/test/CodeGen/AMDGPU/xor.ll
+++ b/test/CodeGen/AMDGPU/xor.ll
@@ -10,7 +10,7 @@
; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
+define amdgpu_kernel void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in0
%b = load <2 x i32>, <2 x i32> addrspace(1) * %in1
%result = xor <2 x i32> %a, %b
@@ -29,7 +29,7 @@ define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in
; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
-define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
+define amdgpu_kernel void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in0
%b = load <4 x i32>, <4 x i32> addrspace(1) * %in1
%result = xor <4 x i32> %a, %b
@@ -46,7 +46,7 @@ define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
; SI: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
-define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
+define amdgpu_kernel void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
%a = load float, float addrspace(1) * %in0
%b = load float, float addrspace(1) * %in1
%acmp = fcmp oge float %a, 0.000000e+00
@@ -63,7 +63,7 @@ define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float ad
; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[A]], [[B]]
; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
; SI: buffer_store_byte [[RESULT]]
-define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
+define amdgpu_kernel void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
%a = load volatile i1, i1 addrspace(1)* %in0
%b = load volatile i1, i1 addrspace(1)* %in1
%xor = xor i1 %a, %b
@@ -73,7 +73,7 @@ define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace
; FUNC-LABEL: {{^}}vector_xor_i32:
; SI: v_xor_b32_e32
-define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+define amdgpu_kernel void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32, i32 addrspace(1)* %in0
%b = load i32, i32 addrspace(1)* %in1
%result = xor i32 %a, %b
@@ -83,7 +83,7 @@ define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
; FUNC-LABEL: {{^}}scalar_xor_i32:
; SI: s_xor_b32
-define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = xor i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
@@ -91,7 +91,7 @@ define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
; FUNC-LABEL: {{^}}scalar_not_i32:
; SI: s_not_b32
-define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
+define amdgpu_kernel void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
%result = xor i32 %a, -1
store i32 %result, i32 addrspace(1)* %out
ret void
@@ -99,7 +99,7 @@ define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
; FUNC-LABEL: {{^}}vector_not_i32:
; SI: v_not_b32
-define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+define amdgpu_kernel void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32, i32 addrspace(1)* %in0
%b = load i32, i32 addrspace(1)* %in1
%result = xor i32 %a, -1
@@ -111,7 +111,7 @@ define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
; SI: v_xor_b32_e32
; SI: v_xor_b32_e32
; SI: s_endpgm
-define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
+define amdgpu_kernel void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
%a = load i64, i64 addrspace(1)* %in0
%b = load i64, i64 addrspace(1)* %in1
%result = xor i64 %a, %b
@@ -122,7 +122,7 @@ define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64
; FUNC-LABEL: {{^}}scalar_xor_i64:
; SI: s_xor_b64
; SI: s_endpgm
-define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = xor i64 %a, %b
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -130,7 +130,7 @@ define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
; FUNC-LABEL: {{^}}scalar_not_i64:
; SI: s_not_b64
-define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
%result = xor i64 %a, -1
store i64 %result, i64 addrspace(1)* %out
ret void
@@ -139,7 +139,7 @@ define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
; FUNC-LABEL: {{^}}vector_not_i64:
; SI: v_not_b32
; SI: v_not_b32
-define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
+define amdgpu_kernel void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
%a = load i64, i64 addrspace(1)* %in0
%b = load i64, i64 addrspace(1)* %in1
%result = xor i64 %a, -1
@@ -153,7 +153,7 @@ define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64
; FUNC-LABEL: {{^}}xor_cf:
; SI: s_xor_b64
-define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
+define amdgpu_kernel void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
@@ -178,7 +178,7 @@ endif:
; SI-DAG: s_xor_b32 s[[RES_LO:[0-9]+]], s[[LO]], 0x3039
; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_LO]]
; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_HI]]
-define void @scalar_xor_literal_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_xor_literal_i64(i64 addrspace(1)* %out, i64 %a) {
%or = xor i64 %a, 4261135838621753
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -192,7 +192,7 @@ define void @scalar_xor_literal_i64(i64 addrspace(1)* %out, i64 %a) {
; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s[[K_LO]]
; SI: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, s[[K_HI]]
-define void @scalar_xor_literal_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+define amdgpu_kernel void @scalar_xor_literal_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%or = xor i64 %a, 4261135838621753
store i64 %or, i64 addrspace(1)* %out
@@ -211,7 +211,7 @@ define void @scalar_xor_literal_multi_use_i64(i64 addrspace(1)* %out, i64 %a, i6
; SI: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[VAL_HI]]
; SI-NOT: xor_b32
; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
-define void @scalar_xor_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_xor_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
%or = xor i64 %a, 63
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -220,7 +220,7 @@ define void @scalar_xor_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
; FUNC-LABEL: {{^}}scalar_xor_neg_inline_imm_i64:
; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
; SI: s_xor_b64 [[VAL]], [[VAL]], -8
-define void @scalar_xor_neg_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
+define amdgpu_kernel void @scalar_xor_neg_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
%or = xor i64 %a, -8
store i64 %or, i64 addrspace(1)* %out
ret void
@@ -231,7 +231,7 @@ define void @scalar_xor_neg_inline_imm_i64(i64 addrspace(1)* %out, i64 %a) {
; SI: v_xor_b32_e32 {{v[0-9]+}}, -8, v[[LO_VREG]]
; SI: v_xor_b32_e32 {{v[0-9]+}}, -1, {{.*}}
; SI: s_endpgm
-define void @vector_xor_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_xor_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%or = xor i64 %loada, -8
store i64 %or, i64 addrspace(1)* %out
@@ -243,7 +243,7 @@ define void @vector_xor_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace
; SI-DAG: v_xor_b32_e32 {{v[0-9]+}}, 0xdf77987f, v[[LO_VREG]]
; SI-DAG: v_xor_b32_e32 {{v[0-9]+}}, 0x146f, v[[HI_VREG]]
; SI: s_endpgm
-define void @vector_xor_literal_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+define amdgpu_kernel void @vector_xor_literal_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64, i64 addrspace(1)* %a, align 8
%or = xor i64 %loada, 22470723082367
store i64 %or, i64 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/zero_extend.ll b/test/CodeGen/AMDGPU/zero_extend.ll
index 572099617605..f256d89f0cb7 100644
--- a/test/CodeGen/AMDGPU/zero_extend.ll
+++ b/test/CodeGen/AMDGPU/zero_extend.ll
@@ -9,7 +9,7 @@
; SI: {{^}}s_mad_zext_i32_to_i64:
; SI: v_mov_b32_e32 v[[V_ZERO:[0-9]]], 0{{$}}
; SI: buffer_store_dwordx2 v[0:[[V_ZERO]]{{\]}}
-define void @s_mad_zext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) #0 {
+define amdgpu_kernel void @s_mad_zext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) #0 {
entry:
%tmp0 = mul i32 %a, %b
%tmp1 = add i32 %tmp0, %c
@@ -20,7 +20,7 @@ entry:
; SI-LABEL: {{^}}s_cmp_zext_i1_to_i32
; SI: v_cndmask_b32
-define void @s_cmp_zext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @s_cmp_zext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
%tmp0 = icmp eq i32 %a, %b
%tmp1 = zext i1 %tmp0 to i32
@@ -29,7 +29,7 @@ entry:
}
; SI-LABEL: {{^}}s_arg_zext_i1_to_i64:
-define void @s_arg_zext_i1_to_i64(i64 addrspace(1)* %out, i1 zeroext %arg) #0 {
+define amdgpu_kernel void @s_arg_zext_i1_to_i64(i64 addrspace(1)* %out, i1 zeroext %arg) #0 {
%ext = zext i1 %arg to i64
store i64 %ext, i64 addrspace(1)* %out, align 8
ret void
@@ -39,7 +39,7 @@ define void @s_arg_zext_i1_to_i64(i64 addrspace(1)* %out, i1 zeroext %arg) #0 {
; SI: s_mov_b32 s{{[0-9]+}}, 0
; SI: v_cmp_eq_u32
; SI: v_cndmask_b32
-define void @s_cmp_zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+define amdgpu_kernel void @s_cmp_zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%cmp = icmp eq i32 %a, %b
%ext = zext i1 %cmp to i64
store i64 %ext, i64 addrspace(1)* %out, align 8
@@ -49,7 +49,7 @@ define void @s_cmp_zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
; SI-LABEL: {{^}}s_cmp_zext_i1_to_i16
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
; SI: buffer_store_short [[RESULT]]
-define void @s_cmp_zext_i1_to_i16(i16 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) #0 {
+define amdgpu_kernel void @s_cmp_zext_i1_to_i16(i16 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) #0 {
%tmp0 = icmp eq i16 %a, %b
%tmp1 = zext i1 %tmp0 to i16
store i16 %tmp1, i16 addrspace(1)* %out
diff --git a/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll b/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll
index 842c30b40df2..a902234898cd 100644
--- a/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll
+++ b/test/CodeGen/AMDGPU/zext-i64-bit-operand.ll
@@ -11,7 +11,7 @@
; GCN-NOT: v[[HI]]
; GCN-NOT: v_mov_b32_e32 v{{[0-9]+}}, 0
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @zext_or_operand_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+define amdgpu_kernel void @zext_or_operand_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%ld.64 = load volatile i64, i64 addrspace(1)* %in0
%ld.32 = load volatile i32, i32 addrspace(1)* %in1
%ext = zext i32 %ld.32 to i64
@@ -31,7 +31,7 @@ define void @zext_or_operand_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0,
; GCN-NOT: _or_
; GCN-NOT: v_mov_b32_e32 v{{[0-9]+}}, 0
; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
-define void @zext_or_operand_commute_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+define amdgpu_kernel void @zext_or_operand_commute_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%ld.64 = load volatile i64, i64 addrspace(1)* %in0
%ld.32 = load volatile i32, i32 addrspace(1)* %in1
%ext = zext i32 %ld.32 to i64
diff --git a/test/CodeGen/AMDGPU/zext-lid.ll b/test/CodeGen/AMDGPU/zext-lid.ll
new file mode 100644
index 000000000000..8eeff53ff99f
--- /dev/null
+++ b/test/CodeGen/AMDGPU/zext-lid.ll
@@ -0,0 +1,83 @@
+; RUN: llc -march=amdgcn < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-intrinsics < %s | FileCheck -check-prefix=OPT %s
+
+; CHECK-NOT: and_b32
+
+; OPT-LABEL: @zext_grp_size_128
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.x() #2, !range !0
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.y() #2, !range !0
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.z() #2, !range !0
+define amdgpu_kernel void @zext_grp_size_128(i32 addrspace(1)* nocapture %arg) #0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #2
+ %tmp1 = and i32 %tmp, 127
+ store i32 %tmp1, i32 addrspace(1)* %arg, align 4
+ %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.y() #2
+ %tmp3 = and i32 %tmp2, 127
+ %tmp4 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
+ store i32 %tmp3, i32 addrspace(1)* %tmp4, align 4
+ %tmp5 = tail call i32 @llvm.amdgcn.workitem.id.z() #2
+ %tmp6 = and i32 %tmp5, 127
+ %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
+ store i32 %tmp6, i32 addrspace(1)* %tmp7, align 4
+ ret void
+}
+
+; OPT-LABEL: @zext_grp_size_32x4x1
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.x() #2, !range !2
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.y() #2, !range !3
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.z() #2, !range !4
+define amdgpu_kernel void @zext_grp_size_32x4x1(i32 addrspace(1)* nocapture %arg) #0 !reqd_work_group_size !0 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #2
+ %tmp1 = and i32 %tmp, 31
+ store i32 %tmp1, i32 addrspace(1)* %arg, align 4
+ %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.y() #2
+ %tmp3 = and i32 %tmp2, 3
+ %tmp4 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
+ store i32 %tmp3, i32 addrspace(1)* %tmp4, align 4
+ %tmp5 = tail call i32 @llvm.amdgcn.workitem.id.z() #2
+ %tmp6 = and i32 %tmp5, 1
+ %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
+ store i32 %tmp6, i32 addrspace(1)* %tmp7, align 4
+ ret void
+}
+
+; OPT-LABEL: @zext_grp_size_512
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.x() #2, !range !5
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.y() #2, !range !5
+; OPT: tail call i32 @llvm.amdgcn.workitem.id.z() #2, !range !5
+define amdgpu_kernel void @zext_grp_size_512(i32 addrspace(1)* nocapture %arg) #1 {
+bb:
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #2
+ %tmp1 = and i32 %tmp, 65535
+ store i32 %tmp1, i32 addrspace(1)* %arg, align 4
+ %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.y() #2
+ %tmp3 = and i32 %tmp2, 65535
+ %tmp4 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
+ store i32 %tmp3, i32 addrspace(1)* %tmp4, align 4
+ %tmp5 = tail call i32 @llvm.amdgcn.workitem.id.z() #2
+ %tmp6 = and i32 %tmp5, 65535
+ %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 2
+ store i32 %tmp6, i32 addrspace(1)* %tmp7, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
+declare i32 @llvm.amdgcn.workitem.id.y() #2
+
+declare i32 @llvm.amdgcn.workitem.id.z() #2
+
+attributes #0 = { nounwind "amdgpu-flat-work-group-size"="64,128" }
+attributes #1 = { nounwind "amdgpu-flat-work-group-size"="512,512" }
+attributes #2 = { nounwind readnone }
+
+!0 = !{i32 32, i32 4, i32 1}
+
+; OPT: !0 = !{i32 0, i32 128}
+; OPT: !1 = !{i32 32, i32 4, i32 1}
+; OPT: !2 = !{i32 0, i32 32}
+; OPT: !3 = !{i32 0, i32 4}
+; OPT: !4 = !{i32 0, i32 1}
+; OPT: !5 = !{i32 0, i32 512}
diff --git a/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll b/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
index 52cc37e24084..b8f2980be750 100644
--- a/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
+++ b/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
@@ -12,11 +12,11 @@
; CHECK: bl _quux
; CHECK-NOT: bl _quux
-; NOMERGE: bl _baz
-; NOMERGE: bl _baz
+; NOMERGE-DAG: bl _baz
+; NOMERGE-DAG: bl _baz
-; NOMERGE: bl _quux
-; NOMERGE: bl _quux
+; NOMERGE-DAG: bl _quux
+; NOMERGE-DAG: bl _quux
; ModuleID = 'tail.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll b/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
index 5d59fc64d922..e5c2fb4d67a1 100644
--- a/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
+++ b/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
@@ -1,5 +1,4 @@
; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
; PR4091
define void @foo(i32 %i, i32* %p) nounwind {
diff --git a/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll b/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
index 4a1341c4d6e7..2a5af6199a34 100644
--- a/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
+++ b/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
@@ -12,13 +12,14 @@ define void @test_byval_8_bytes_alignment(i32 %i, ...) {
entry:
; CHECK: sub sp, sp, #12
; CHECK: sub sp, sp, #4
-; CHECK: stmib sp, {r1, r2, r3}
+; CHECK: add r0, sp, #4
+; CHECK: stm sp, {r0, r1, r2, r3}
%g = alloca i8*
%g1 = bitcast i8** %g to i8*
call void @llvm.va_start(i8* %g1)
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
-; CHECK: bfc [[REG]], #0, #3
+; CHECK: bic [[REG]], [[REG]], #7
%0 = va_arg i8** %g, double
call void @llvm.va_end(i8* %g1)
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 5c0853cfaab4..66d9033a6d7c 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -1,10 +1,135 @@
# RUN: llc -O0 -mtriple arm-- -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
--- |
+ define void @test_zext_s1() { ret void }
+ define void @test_sext_s1() { ret void }
+ define void @test_sext_s8() { ret void }
+ define void @test_zext_s16() { ret void }
+
define void @test_add_s8() { ret void }
define void @test_add_s16() { ret void }
define void @test_add_s32() { ret void }
+ define void @test_fadd_s32() #0 { ret void }
+ define void @test_fadd_s64() #0 { ret void }
+
define void @test_load_from_stack() { ret void }
+ define void @test_load_f32() #0 { ret void }
+ define void @test_load_f64() #0 { ret void }
+
+ define void @test_stores() #0 { ret void }
+
+ define void @test_gep() { ret void }
+ define void @test_constants() { ret void }
+
+ define void @test_soft_fp_double() #0 { ret void }
+
+ attributes #0 = { "target-features"="+vfp2,-neonfp" }
+...
+---
+name: test_zext_s1
+# CHECK-LABEL: name: test_zext_s1
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s1) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = G_ZEXT %0(s1)
+ ; CHECK: [[VREGEXT:%[0-9]+]] = ANDri [[VREGX]], 1, 14, _, _
+
+ %r0 = COPY %1(s32)
+ ; CHECK: %r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_sext_s1
+# CHECK-LABEL: name: test_sext_s1
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s1) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = G_SEXT %0(s1)
+ ; CHECK: [[VREGAND:%[0-9]+]] = ANDri [[VREGX]], 1, 14, _, _
+ ; CHECK: [[VREGEXT:%[0-9]+]] = RSBri [[VREGAND]], 0, 14, _, _
+
+ %r0 = COPY %1(s32)
+ ; CHECK: %r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_sext_s8
+# CHECK-LABEL: name: test_sext_s8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s8) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = G_SEXT %0(s8)
+ ; CHECK: [[VREGEXT:%[0-9]+]] = SXTB [[VREGX]], 0, 14, _
+
+ %r0 = COPY %1(s32)
+ ; CHECK: %r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_zext_s16
+# CHECK-LABEL: name: test_zext_s16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s16) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = G_ZEXT %0(s16)
+ ; CHECK: [[VREGEXT:%[0-9]+]] = UXTH [[VREGX]], 0, 14, _
+
+ %r0 = COPY %1(s32)
+ ; CHECK: %r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
...
---
name: test_add_s8
@@ -106,6 +231,72 @@ body: |
; CHECK: BX_RET 14, _, implicit %r0
...
---
+name: test_fadd_s32
+# CHECK-LABEL: name: test_fadd_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: fprb }
+# CHECK: id: 0, class: spr
+# CHECK: id: 1, class: spr
+# CHECK: id: 2, class: spr
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %s0
+
+ %1(s32) = COPY %s1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %s1
+
+ %2(s32) = G_FADD %0, %1
+ ; CHECK: [[VREGSUM:%[0-9]+]] = VADDS [[VREGX]], [[VREGY]], 14, _
+
+ %s0 = COPY %2(s32)
+ ; CHECK: %s0 = COPY [[VREGSUM]]
+
+ BX_RET 14, _, implicit %s0
+ ; CHECK: BX_RET 14, _, implicit %s0
+...
+---
+name: test_fadd_s64
+# CHECK-LABEL: name: test_fadd_s64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: fprb }
+ - { id: 1, class: fprb }
+ - { id: 2, class: fprb }
+# CHECK: id: 0, class: dpr
+# CHECK: id: 1, class: dpr
+# CHECK: id: 2, class: dpr
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %d0
+
+ %1(s64) = COPY %d1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %d1
+
+ %2(s64) = G_FADD %0, %1
+ ; CHECK: [[VREGSUM:%[0-9]+]] = VADDD [[VREGX]], [[VREGY]], 14, _
+
+ %d0 = COPY %2(s64)
+ ; CHECK: %d0 = COPY [[VREGSUM]]
+
+ BX_RET 14, _, implicit %d0
+ ; CHECK: BX_RET 14, _, implicit %d0
+...
+---
name: test_load_from_stack
# CHECK-LABEL: name: test_load_from_stack
legalized: true
@@ -122,20 +313,225 @@ registers:
# CHECK-DAG: id: 2, class: gpr
# CHECK-DAG: id: 3, class: gpr
fixedStack:
- - { id: 0, offset: 0, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+ - { id: 0, offset: 0, size: 1, alignment: 4, isImmutable: true, isAliased: false }
- { id: 1, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
- { id: 2, offset: 8, size: 4, alignment: 4, isImmutable: true, isAliased: false }
-# CHECK: id: [[FRAME_INDEX:[0-9]+]], offset: 8
+# CHECK-DAG: id: [[FI1:[0-9]+]], offset: 0
+# CHECK-DAG: id: [[FI32:[0-9]+]], offset: 8
body: |
bb.0:
liveins: %r0, %r1, %r2, %r3
%0(p0) = G_FRAME_INDEX %fixed-stack.2
- ; CHECK: [[FIVREG:%[0-9]+]] = ADDri %fixed-stack.[[FRAME_INDEX]], 0, 14, _, _
+ ; CHECK: [[FI32VREG:%[0-9]+]] = ADDri %fixed-stack.[[FI32]], 0, 14, _, _
+
+ %1(s32) = G_LOAD %0(p0) :: (load 4)
+ ; CHECK: [[LD32VREG:%[0-9]+]] = LDRi12 [[FI32VREG]], 0, 14, _
+
+ %r0 = COPY %1
+ ; CHECK: %r0 = COPY [[LD32VREG]]
+
+ %2(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; CHECK: [[FI1VREG:%[0-9]+]] = ADDri %fixed-stack.[[FI1]], 0, 14, _, _
- %1(s32) = G_LOAD %0(p0)
- ; CHECK: {{%[0-9]+}} = LDRi12 [[FIVREG]], 0, 14, _
+ %3(s1) = G_LOAD %2(p0) :: (load 1)
+ ; CHECK: [[LD1VREG:%[0-9]+]] = LDRBi12 [[FI1VREG]], 0, 14, _
+
+ %r0 = COPY %3
+ ; CHECK: %r0 = COPY [[LD1VREG]]
BX_RET 14, _
; CHECK: BX_RET 14, _
...
+---
+name: test_load_f32
+# CHECK-LABEL: name: test_load_f32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: fprb }
+# CHECK-DAG: id: [[P:[0-9]+]], class: gpr
+# CHECK-DAG: id: [[V:[0-9]+]], class: spr
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(p0) = COPY %r0
+
+ %1(s32) = G_LOAD %0(p0) :: (load 4)
+ ; CHECK: %[[V]] = VLDRS %[[P]], 0, 14, _
+
+ %s0 = COPY %1
+ ; CHECK: %s0 = COPY %[[V]]
+
+ BX_RET 14, _, implicit %s0
+ ; CHECK: BX_RET 14, _, implicit %s0
+...
+---
+name: test_load_f64
+# CHECK-LABEL: name: test_load_f64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: fprb }
+# CHECK-DAG: id: [[P:[0-9]+]], class: gpr
+# CHECK-DAG: id: [[V:[0-9]+]], class: dpr
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(p0) = COPY %r0
+
+ %1(s64) = G_LOAD %0(p0) :: (load 8)
+ ; CHECK: %[[V]] = VLDRD %[[P]], 0, 14, _
+
+ %d0 = COPY %1
+ ; CHECK: %d0 = COPY %[[V]]
+
+ BX_RET 14, _, implicit %d0
+ ; CHECK: BX_RET 14, _, implicit %d0
+...
+---
+name: test_stores
+# CHECK-LABEL: name: test_stores
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: fprb }
+ - { id: 5, class: fprb }
+# CHECK: id: [[P:[0-9]+]], class: gpr
+# CHECK: id: [[I8:[0-9]+]], class: gpr
+# CHECK: id: [[I16:[0-9]+]], class: gpr
+# CHECK: id: [[I32:[0-9]+]], class: gpr
+# CHECK: id: [[F32:[0-9]+]], class: spr
+# CHECK: id: [[F64:[0-9]+]], class: dpr
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(p0) = COPY %r0
+ %1(s8) = COPY %r3
+ %2(s16) = COPY %r2
+ %3(s32) = COPY %r1
+ %4(s32) = COPY %s0
+ %5(s64) = COPY %d2
+
+ G_STORE %1(s8), %0(p0) :: (store 1)
+ ; CHECK: STRBi12 %[[I8]], %[[P]], 0, 14, _
+
+ G_STORE %2(s16), %0(p0) :: (store 2)
+ ; CHECK: STRH %[[I16]], %[[P]], _, 0, 14, _
+
+ G_STORE %3(s32), %0(p0) :: (store 4)
+ ; CHECK: STRi12 %[[I32]], %[[P]], 0, 14, _
+
+ G_STORE %4(s32), %0(p0) :: (store 4)
+ ; CHECK: VSTRS %[[F32]], %[[P]], 0, 14, _
+
+ G_STORE %5(s64), %0(p0) :: (store 8)
+ ; CHECK: VSTRD %[[F64]], %[[P]], 0, 14, _
+
+ BX_RET 14, _
+...
+---
+name: test_gep
+# CHECK-LABEL: name: test_gep
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+# CHECK: id: [[PTR:[0-9]+]], class: gpr
+# CHECK: id: [[OFF:[0-9]+]], class: gpr
+# CHECK: id: [[GEP:[0-9]+]], class: gpr
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(p0) = COPY %r0
+ %1(s32) = COPY %r1
+
+ %2(p0) = G_GEP %0, %1(s32)
+ ; CHECK: %[[GEP]] = ADDrr %[[PTR]], %[[OFF]], 14, _, _
+
+ %r0 = COPY %2(p0)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_constants
+# CHECK-LABEL: name: test_constants
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+# CHECK: id: [[C:[0-9]+]], class: gpr
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT 42
+ ; CHECK: %[[C]] = MOVi 42, 14, _, _
+
+ %r0 = COPY %0(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_soft_fp_double
+# CHECK-LABEL: name: test_soft_fp_double
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: fprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+# CHECK-DAG: id: {{[0-9]+}}, class: gpr
+# CHECK-DAG: id: {{[0-9]+}}, class: gpr
+# CHECK-DAG: id: {{[0-9]+}}, class: gpr
+# CHECK-DAG: id: {{[0-9]+}}, class: gpr
+# CHECK-DAG: id: [[DREG:[0-9]+]], class: dpr
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ %0(s32) = COPY %r2
+ ; CHECK: [[IN1:%[0-9]+]] = COPY %r2
+
+ %1(s32) = COPY %r3
+ ; CHECK: [[IN2:%[0-9]+]] = COPY %r3
+
+ %2(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 1
+ ; CHECK: %[[DREG]] = VMOVDRR [[IN1]], [[IN2]]
+
+ %3(s32) = G_EXTRACT %2(s64), 0
+ %4(s32) = G_EXTRACT %2(s64), 32
+ ; CHECK: [[OUT1:%[0-9]+]] = VGETLNi32 %[[DREG]], 0
+ ; CHECK: [[OUT2:%[0-9]+]] = VGETLNi32 %[[DREG]], 1
+
+ %r0 = COPY %3
+ ; CHECK: %r0 = COPY [[OUT1]]
+
+ %r1 = COPY %4
+ ; CHECK: %r1 = COPY [[OUT2]]
+
+ BX_RET 14, _, implicit %r0, implicit %r1
+ ; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index f863ed5a6849..a7f5ec33bee3 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple arm-unknown -global-isel -stop-after=irtranslator %s -o - | FileCheck %s
+; RUN: llc -mtriple arm-unknown -mattr=+vfp2 -global-isel -stop-after=irtranslator %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LITTLE
+; RUN: llc -mtriple armeb-unknown -mattr=+vfp2 -global-isel -stop-after=irtranslator %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=BIG
define void @test_void_return() {
; CHECK-LABEL: name: test_void_return
@@ -7,6 +8,20 @@ entry:
ret void
}
+define signext i1 @test_add_i1(i1 %x, i1 %y) {
+; CHECK-LABEL: name: test_add_i1
+; CHECK: liveins: %r0, %r1
+; CHECK-DAG: [[VREGX:%[0-9]+]](s1) = COPY %r0
+; CHECK-DAG: [[VREGY:%[0-9]+]](s1) = COPY %r1
+; CHECK: [[SUM:%[0-9]+]](s1) = G_ADD [[VREGX]], [[VREGY]]
+; CHECK: [[EXT:%[0-9]+]](s32) = G_SEXT [[SUM]]
+; CHECK: %r0 = COPY [[EXT]](s32)
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %sum = add i1 %x, %y
+ ret i1 %sum
+}
+
define i8 @test_add_i8(i8 %x, i8 %y) {
; CHECK-LABEL: name: test_add_i8
; CHECK: liveins: %r0, %r1
@@ -20,6 +35,17 @@ entry:
ret i8 %sum
}
+define signext i8 @test_return_sext_i8(i8 %x) {
+; CHECK-LABEL: name: test_return_sext_i8
+; CHECK: liveins: %r0
+; CHECK: [[VREG:%[0-9]+]](s8) = COPY %r0
+; CHECK: [[VREGEXT:%[0-9]+]](s32) = G_SEXT [[VREG]]
+; CHECK: %r0 = COPY [[VREGEXT]](s32)
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ ret i8 %x
+}
+
define i16 @test_add_i16(i16 %x, i16 %y) {
; CHECK-LABEL: name: test_add_i16
; CHECK: liveins: %r0, %r1
@@ -33,6 +59,17 @@ entry:
ret i16 %sum
}
+define zeroext i16 @test_return_zext_i16(i16 %x) {
+; CHECK-LABEL: name: test_return_zext_i16
+; CHECK: liveins: %r0
+; CHECK: [[VREG:%[0-9]+]](s16) = COPY %r0
+; CHECK: [[VREGEXT:%[0-9]+]](s32) = G_ZEXT [[VREG]]
+; CHECK: %r0 = COPY [[VREGEXT]](s32)
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ ret i16 %x
+}
+
define i32 @test_add_i32(i32 %x, i32 %y) {
; CHECK-LABEL: name: test_add_i32
; CHECK: liveins: %r0, %r1
@@ -46,8 +83,8 @@ entry:
ret i32 %sum
}
-define i32 @test_many_args(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) {
-; CHECK-LABEL: name: test_many_args
+define i32 @test_stack_args(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) {
+; CHECK-LABEL: name: test_stack_args
; CHECK: fixedStack:
; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 4
; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 4
@@ -62,3 +99,527 @@ entry:
%sum = add i32 %p2, %p5
ret i32 %sum
}
+
+define i16 @test_stack_args_signext(i32 %p0, i16 %p1, i8 %p2, i1 %p3,
+ i8 signext %p4, i16 signext %p5) {
+; CHECK-LABEL: name: test_stack_args_signext
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 1
+; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 2
+; CHECK: liveins: %r0, %r1, %r2, %r3
+; CHECK: [[VREGP1:%[0-9]+]]{{.*}} = COPY %r1
+; CHECK: [[FIP5:%[0-9]+]]{{.*}} = G_FRAME_INDEX %fixed-stack.[[P5]]
+; CHECK: [[VREGP5:%[0-9]+]]{{.*}} = G_LOAD [[FIP5]](p0)
+; CHECK: [[SUM:%[0-9]+]]{{.*}} = G_ADD [[VREGP1]], [[VREGP5]]
+; CHECK: %r0 = COPY [[SUM]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %sum = add i16 %p1, %p5
+ ret i16 %sum
+}
+
+define i8 @test_stack_args_zeroext(i32 %p0, i16 %p1, i8 %p2, i1 %p3,
+ i8 zeroext %p4, i16 zeroext %p5) {
+; CHECK-LABEL: name: test_stack_args_zeroext
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[P4:[0-9]]]{{.*}}offset: 0{{.*}}size: 1
+; CHECK-DAG: id: [[P5:[0-9]]]{{.*}}offset: 4{{.*}}size: 2
+; CHECK: liveins: %r0, %r1, %r2, %r3
+; CHECK: [[VREGP2:%[0-9]+]]{{.*}} = COPY %r2
+; CHECK: [[FIP4:%[0-9]+]]{{.*}} = G_FRAME_INDEX %fixed-stack.[[P4]]
+; CHECK: [[VREGP4:%[0-9]+]]{{.*}} = G_LOAD [[FIP4]](p0)
+; CHECK: [[SUM:%[0-9]+]]{{.*}} = G_ADD [[VREGP2]], [[VREGP4]]
+; CHECK: %r0 = COPY [[SUM]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %sum = add i8 %p2, %p4
+ ret i8 %sum
+}
+
+define i16 @test_ptr_arg(i16* %p) {
+; CHECK-LABEL: name: test_ptr_arg
+; CHECK: liveins: %r0
+; CHECK: [[VREGP:%[0-9]+]](p0) = COPY %r0
+; CHECK: [[VREGV:%[0-9]+]](s16) = G_LOAD [[VREGP]](p0)
+entry:
+ %v = load i16, i16* %p
+ ret i16 %v
+}
+
+define i32* @test_ptr_ret(i32** %p) {
+; Test pointer returns and pointer-to-pointer arguments
+; CHECK-LABEL: name: test_ptr_ret
+; CHECK: liveins: %r0
+; CHECK: [[VREGP:%[0-9]+]](p0) = COPY %r0
+; CHECK: [[VREGV:%[0-9]+]](p0) = G_LOAD [[VREGP]](p0)
+; CHECK: %r0 = COPY [[VREGV]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %v = load i32*, i32** %p
+ ret i32* %v
+}
+
+define i32 @test_ptr_arg_on_stack(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32* %p) {
+; CHECK-LABEL: name: test_ptr_arg_on_stack
+; CHECK: fixedStack:
+; CHECK: id: [[P:[0-9]+]]{{.*}}offset: 0{{.*}}size: 4
+; CHECK: liveins: %r0, %r1, %r2, %r3
+; CHECK: [[FIP:%[0-9]+]]{{.*}} = G_FRAME_INDEX %fixed-stack.[[P]]
+; CHECK: [[VREGP:%[0-9]+]](p0) = G_LOAD [[FIP]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s32) = G_LOAD [[VREGP]](p0)
+; CHECK: %r0 = COPY [[VREGV]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %v = load i32, i32* %p
+ ret i32 %v
+}
+
+define arm_aapcscc float @test_float_aapcscc(float %p0, float %p1, float %p2,
+ float %p3, float %p4, float %p5) {
+; CHECK-LABEL: name: test_float_aapcscc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[P4:[0-9]+]]{{.*}}offset: 0{{.*}}size: 4
+; CHECK-DAG: id: [[P5:[0-9]+]]{{.*}}offset: 4{{.*}}size: 4
+; CHECK: liveins: %r0, %r1, %r2, %r3
+; CHECK: [[VREGP1:%[0-9]+]](s32) = COPY %r1
+; CHECK: [[FIP5:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P5]]
+; CHECK: [[VREGP5:%[0-9]+]](s32) = G_LOAD [[FIP5]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s32) = G_FADD [[VREGP1]], [[VREGP5]]
+; CHECK: %r0 = COPY [[VREGV]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %v = fadd float %p1, %p5
+ ret float %v
+}
+
+define arm_aapcs_vfpcc float @test_float_vfpcc(float %p0, float %p1, float %p2,
+ float %p3, float %p4, float %p5,
+ float %ridiculous,
+ float %number,
+ float %of,
+ float %parameters,
+ float %that,
+ float %should,
+ float %never,
+ float %exist,
+ float %in,
+ float %practice,
+ float %q0, float %q1) {
+; CHECK-LABEL: name: test_float_vfpcc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[Q0:[0-9]+]]{{.*}}offset: 0{{.*}}size: 4
+; CHECK-DAG: id: [[Q1:[0-9]+]]{{.*}}offset: 4{{.*}}size: 4
+; CHECK: liveins: %s0, %s1, %s2, %s3, %s4, %s5, %s6, %s7, %s8, %s9, %s10, %s11, %s12, %s13, %s14, %s15
+; CHECK: [[VREGP1:%[0-9]+]](s32) = COPY %s1
+; CHECK: [[FIQ1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[Q1]]
+; CHECK: [[VREGQ1:%[0-9]+]](s32) = G_LOAD [[FIQ1]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s32) = G_FADD [[VREGP1]], [[VREGQ1]]
+; CHECK: %s0 = COPY [[VREGV]]
+; CHECK: BX_RET 14, _, implicit %s0
+entry:
+ %v = fadd float %p1, %q1
+ ret float %v
+}
+
+define arm_aapcs_vfpcc double @test_double_vfpcc(double %p0, double %p1, double %p2,
+ double %p3, double %p4, double %p5,
+ double %reasonable,
+ double %parameters,
+ double %q0, double %q1) {
+; CHECK-LABEL: name: test_double_vfpcc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[Q0:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8
+; CHECK-DAG: id: [[Q1:[0-9]+]]{{.*}}offset: 8{{.*}}size: 8
+; CHECK: liveins: %d0, %d1, %d2, %d3, %d4, %d5, %d6, %d7
+; CHECK: [[VREGP1:%[0-9]+]](s64) = COPY %d1
+; CHECK: [[FIQ1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[Q1]]
+; CHECK: [[VREGQ1:%[0-9]+]](s64) = G_LOAD [[FIQ1]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP1]], [[VREGQ1]]
+; CHECK: %d0 = COPY [[VREGV]]
+; CHECK: BX_RET 14, _, implicit %d0
+entry:
+ %v = fadd double %p1, %q1
+ ret double %v
+}
+
+define arm_aapcscc double @test_double_aapcscc(double %p0, double %p1, double %p2,
+ double %p3, double %p4, double %p5) {
+; CHECK-LABEL: name: test_double_aapcscc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[P2:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8
+; CHECK-DAG: id: [[P3:[0-9]+]]{{.*}}offset: 8{{.*}}size: 8
+; CHECK-DAG: id: [[P4:[0-9]+]]{{.*}}offset: 16{{.*}}size: 8
+; CHECK-DAG: id: [[P5:[0-9]+]]{{.*}}offset: 24{{.*}}size: 8
+; CHECK: liveins: %r0, %r1, %r2, %r3
+; CHECK-DAG: [[VREGP1LO:%[0-9]+]](s32) = COPY %r2
+; CHECK-DAG: [[VREGP1HI:%[0-9]+]](s32) = COPY %r3
+; LITTLE: [[VREGP1:%[0-9]+]](s64) = G_SEQUENCE [[VREGP1LO]](s32), 0, [[VREGP1HI]](s32), 32
+; BIG: [[VREGP1:%[0-9]+]](s64) = G_SEQUENCE [[VREGP1HI]](s32), 0, [[VREGP1LO]](s32), 32
+; CHECK: [[FIP5:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P5]]
+; CHECK: [[VREGP5:%[0-9]+]](s64) = G_LOAD [[FIP5]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP1]], [[VREGP5]]
+; LITTLE: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
+; LITTLE: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
+; BIG: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
+; BIG: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
+; CHECK-DAG: %r0 = COPY [[VREGVLO]]
+; CHECK-DAG: %r1 = COPY [[VREGVHI]]
+; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+entry:
+ %v = fadd double %p1, %p5
+ ret double %v
+}
+
+define arm_aapcs_vfpcc double @test_double_gap_vfpcc(double %p0, float %filler,
+ double %p1, double %p2,
+ double %p3, double %p4,
+ double %reasonable,
+ double %parameters,
+ double %q0, double %q1) {
+; CHECK-LABEL: name: test_double_gap_vfpcc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[Q0:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8
+; CHECK-DAG: id: [[Q1:[0-9]+]]{{.*}}offset: 8{{.*}}size: 8
+; CHECK: liveins: %d0, %d2, %d3, %d4, %d5, %d6, %d7, %s2
+; CHECK: [[VREGP1:%[0-9]+]](s64) = COPY %d2
+; CHECK: [[FIQ1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[Q1]]
+; CHECK: [[VREGQ1:%[0-9]+]](s64) = G_LOAD [[FIQ1]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP1]], [[VREGQ1]]
+; CHECK: %d0 = COPY [[VREGV]]
+; CHECK: BX_RET 14, _, implicit %d0
+entry:
+ %v = fadd double %p1, %q1
+ ret double %v
+}
+
+define arm_aapcscc double @test_double_gap_aapcscc(float %filler, double %p0,
+ double %p1) {
+; CHECK-LABEL: name: test_double_gap_aapcscc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[P1:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8
+; CHECK: liveins: %r0, %r2, %r3
+; CHECK-DAG: [[VREGP0LO:%[0-9]+]](s32) = COPY %r2
+; CHECK-DAG: [[VREGP0HI:%[0-9]+]](s32) = COPY %r3
+; LITTLE: [[VREGP0:%[0-9]+]](s64) = G_SEQUENCE [[VREGP0LO]](s32), 0, [[VREGP0HI]](s32), 32
+; BIG: [[VREGP0:%[0-9]+]](s64) = G_SEQUENCE [[VREGP0HI]](s32), 0, [[VREGP0LO]](s32), 32
+; CHECK: [[FIP1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P1]]
+; CHECK: [[VREGP1:%[0-9]+]](s64) = G_LOAD [[FIP1]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP0]], [[VREGP1]]
+; LITTLE: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
+; LITTLE: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
+; BIG: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
+; BIG: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
+; CHECK-DAG: %r0 = COPY [[VREGVLO]]
+; CHECK-DAG: %r1 = COPY [[VREGVHI]]
+; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+entry:
+ %v = fadd double %p0, %p1
+ ret double %v
+}
+
+define arm_aapcscc double @test_double_gap2_aapcscc(double %p0, float %filler,
+ double %p1) {
+; CHECK-LABEL: name: test_double_gap2_aapcscc
+; CHECK: fixedStack:
+; CHECK-DAG: id: [[P1:[0-9]+]]{{.*}}offset: 0{{.*}}size: 8
+; CHECK: liveins: %r0, %r1, %r2
+; CHECK-DAG: [[VREGP0LO:%[0-9]+]](s32) = COPY %r0
+; CHECK-DAG: [[VREGP0HI:%[0-9]+]](s32) = COPY %r1
+; LITTLE: [[VREGP0:%[0-9]+]](s64) = G_SEQUENCE [[VREGP0LO]](s32), 0, [[VREGP0HI]](s32), 32
+; BIG: [[VREGP0:%[0-9]+]](s64) = G_SEQUENCE [[VREGP0HI]](s32), 0, [[VREGP0LO]](s32), 32
+; CHECK: [[FIP1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P1]]
+; CHECK: [[VREGP1:%[0-9]+]](s64) = G_LOAD [[FIP1]](p0)
+; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP0]], [[VREGP1]]
+; LITTLE: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
+; LITTLE: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
+; BIG: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
+; BIG: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
+; CHECK-DAG: %r0 = COPY [[VREGVLO]]
+; CHECK-DAG: %r1 = COPY [[VREGVHI]]
+; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+entry:
+ %v = fadd double %p0, %p1
+ ret double %v
+}
+
+define arm_aapcscc void @test_indirect_call(void() *%fptr) {
+; CHECK-LABEL: name: test_indirect_call
+; CHECK: [[FPTR:%[0-9]+]](p0) = COPY %r0
+; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: BLX [[FPTR]](p0), csr_aapcs, implicit-def %lr, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+entry:
+ notail call arm_aapcscc void %fptr()
+ ret void
+}
+
+declare arm_aapcscc void @call_target()
+
+define arm_aapcscc void @test_direct_call() {
+; CHECK-LABEL: name: test_direct_call
+; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: BLX @call_target, csr_aapcs, implicit-def %lr, implicit %sp
+; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+entry:
+ notail call arm_aapcscc void @call_target()
+ ret void
+}
+
+declare arm_aapcscc i32* @simple_reg_params_target(i32, i32*)
+
+define arm_aapcscc i32* @test_call_simple_reg_params(i32 *%a, i32 %b) {
+; CHECK-LABEL: name: test_call_simple_reg_params
+; CHECK-DAG: [[AVREG:%[0-9]+]](p0) = COPY %r0
+; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r1
+; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK-DAG: %r0 = COPY [[BVREG]]
+; CHECK-DAG: %r1 = COPY [[AVREG]]
+; CHECK: BLX @simple_reg_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit-def %r0
+; CHECK: [[RVREG:%[0-9]+]](p0) = COPY %r0
+; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: %r0 = COPY [[RVREG]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %r = notail call arm_aapcscc i32 *@simple_reg_params_target(i32 %b, i32 *%a)
+ ret i32 *%r
+}
+
+declare arm_aapcscc i32* @simple_stack_params_target(i32, i32*, i32, i32*, i32, i32*)
+
+define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) {
+; CHECK-LABEL: name: test_call_simple_stack_params
+; CHECK-DAG: [[AVREG:%[0-9]+]](p0) = COPY %r0
+; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r1
+; CHECK: ADJCALLSTACKDOWN 8, 14, _, implicit-def %sp, implicit %sp
+; CHECK-DAG: %r0 = COPY [[BVREG]]
+; CHECK-DAG: %r1 = COPY [[AVREG]]
+; CHECK-DAG: %r2 = COPY [[BVREG]]
+; CHECK-DAG: %r3 = COPY [[AVREG]]
+; CHECK: [[SP1:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF1:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[FI1:%[0-9]+]](p0) = G_GEP [[SP1]], [[OFF1]](s32)
+; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4
+; CHECK: [[SP2:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF2:%[0-9]+]](s32) = G_CONSTANT i32 4
+; CHECK: [[FI2:%[0-9]+]](p0) = G_GEP [[SP2]], [[OFF2]](s32)
+; CHECK: G_STORE [[AVREG]](p0), [[FI2]](p0){{.*}}store 4
+; CHECK: BLX @simple_stack_params_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+; CHECK: [[RVREG:%[0-9]+]](p0) = COPY %r0
+; CHECK: ADJCALLSTACKUP 8, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: %r0 = COPY [[RVREG]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %r = notail call arm_aapcscc i32 *@simple_stack_params_target(i32 %b, i32 *%a, i32 %b, i32 *%a, i32 %b, i32 *%a)
+ ret i32 *%r
+}
+
+declare arm_aapcscc signext i16 @ext_target(i8 signext, i8 zeroext, i16 signext, i16 zeroext, i8 signext, i8 zeroext, i16 signext, i16 zeroext, i1 zeroext)
+
+define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) {
+; CHECK-LABEL: name: test_call_ext_params
+; CHECK-DAG: [[AVREG:%[0-9]+]](s8) = COPY %r0
+; CHECK-DAG: [[BVREG:%[0-9]+]](s16) = COPY %r1
+; CHECK-DAG: [[CVREG:%[0-9]+]](s1) = COPY %r2
+; CHECK: ADJCALLSTACKDOWN 20, 14, _, implicit-def %sp, implicit %sp
+; CHECK: [[SEXTA:%[0-9]+]](s32) = G_SEXT [[AVREG]](s8)
+; CHECK: %r0 = COPY [[SEXTA]]
+; CHECK: [[ZEXTA:%[0-9]+]](s32) = G_ZEXT [[AVREG]](s8)
+; CHECK: %r1 = COPY [[ZEXTA]]
+; CHECK: [[SEXTB:%[0-9]+]](s32) = G_SEXT [[BVREG]](s16)
+; CHECK: %r2 = COPY [[SEXTB]]
+; CHECK: [[ZEXTB:%[0-9]+]](s32) = G_ZEXT [[BVREG]](s16)
+; CHECK: %r3 = COPY [[ZEXTB]]
+; CHECK: [[SP1:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF1:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[FI1:%[0-9]+]](p0) = G_GEP [[SP1]], [[OFF1]](s32)
+; CHECK: [[SEXTA2:%[0-9]+]](s32) = G_SEXT [[AVREG]]
+; CHECK: G_STORE [[SEXTA2]](s32), [[FI1]](p0){{.*}}store 4
+; CHECK: [[SP2:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF2:%[0-9]+]](s32) = G_CONSTANT i32 4
+; CHECK: [[FI2:%[0-9]+]](p0) = G_GEP [[SP2]], [[OFF2]](s32)
+; CHECK: [[ZEXTA2:%[0-9]+]](s32) = G_ZEXT [[AVREG]]
+; CHECK: G_STORE [[ZEXTA2]](s32), [[FI2]](p0){{.*}}store 4
+; CHECK: [[SP3:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF3:%[0-9]+]](s32) = G_CONSTANT i32 8
+; CHECK: [[FI3:%[0-9]+]](p0) = G_GEP [[SP3]], [[OFF3]](s32)
+; CHECK: [[SEXTB2:%[0-9]+]](s32) = G_SEXT [[BVREG]]
+; CHECK: G_STORE [[SEXTB2]](s32), [[FI3]](p0){{.*}}store 4
+; CHECK: [[SP4:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF4:%[0-9]+]](s32) = G_CONSTANT i32 12
+; CHECK: [[FI4:%[0-9]+]](p0) = G_GEP [[SP4]], [[OFF4]](s32)
+; CHECK: [[ZEXTB2:%[0-9]+]](s32) = G_ZEXT [[BVREG]]
+; CHECK: G_STORE [[ZEXTB2]](s32), [[FI4]](p0){{.*}}store 4
+; CHECK: [[SP5:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF5:%[0-9]+]](s32) = G_CONSTANT i32 16
+; CHECK: [[FI5:%[0-9]+]](p0) = G_GEP [[SP5]], [[OFF5]](s32)
+; CHECK: [[ZEXTC:%[0-9]+]](s32) = G_ZEXT [[CVREG]]
+; CHECK: G_STORE [[ZEXTC]](s32), [[FI5]](p0){{.*}}store 4
+; CHECK: BLX @ext_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0
+; CHECK: [[RVREG:%[0-9]+]](s16) = COPY %r0
+; CHECK: ADJCALLSTACKUP 20, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: [[RExtVREG:%[0-9]+]](s32) = G_SEXT [[RVREG]]
+; CHECK: %r0 = COPY [[RExtVREG]]
+; CHECK: BX_RET 14, _, implicit %r0
+entry:
+ %r = notail call arm_aapcscc signext i16 @ext_target(i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i1 zeroext %c)
+ ret i16 %r
+}
+
+declare arm_aapcs_vfpcc double @vfpcc_fp_target(float, double)
+
+define arm_aapcs_vfpcc double @test_call_vfpcc_fp_params(double %a, float %b) {
+; CHECK-LABEL: name: test_call_vfpcc_fp_params
+; CHECK-DAG: [[AVREG:%[0-9]+]](s64) = COPY %d0
+; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %s2
+; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK-DAG: %s0 = COPY [[BVREG]]
+; CHECK-DAG: %d1 = COPY [[AVREG]]
+; CHECK: BLX @vfpcc_fp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %d1, implicit-def %d0
+; CHECK: [[RVREG:%[0-9]+]](s64) = COPY %d0
+; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: %d0 = COPY [[RVREG]]
+; CHECK: BX_RET 14, _, implicit %d0
+entry:
+ %r = notail call arm_aapcs_vfpcc double @vfpcc_fp_target(float %b, double %a)
+ ret double %r
+}
+
+declare arm_aapcscc double @aapcscc_fp_target(float, double, float, double)
+
+define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
+; CHECK-LABEL: name: test_call_aapcs_fp_params
+; CHECK-DAG: [[A1:%[0-9]+]](s32) = COPY %r0
+; CHECK-DAG: [[A2:%[0-9]+]](s32) = COPY %r1
+; LITTLE-DAG: [[AVREG:%[0-9]+]](s64) = G_SEQUENCE [[A1]](s32), 0, [[A2]](s32), 32
+; BIG-DAG: [[AVREG:%[0-9]+]](s64) = G_SEQUENCE [[A2]](s32), 0, [[A1]](s32), 32
+; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r2
+; CHECK: ADJCALLSTACKDOWN 16, 14, _, implicit-def %sp, implicit %sp
+; CHECK-DAG: %r0 = COPY [[BVREG]]
+; CHECK-DAG: [[A1:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 0
+; CHECK-DAG: [[A2:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 32
+; LITTLE-DAG: %r2 = COPY [[A1]]
+; LITTLE-DAG: %r3 = COPY [[A2]]
+; BIG-DAG: %r2 = COPY [[A2]]
+; BIG-DAG: %r3 = COPY [[A1]]
+; CHECK: [[SP1:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF1:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK: [[FI1:%[0-9]+]](p0) = G_GEP [[SP1]], [[OFF1]](s32)
+; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4
+; CHECK: [[SP2:%[0-9]+]](p0) = COPY %sp
+; CHECK: [[OFF2:%[0-9]+]](s32) = G_CONSTANT i32 8
+; CHECK: [[FI2:%[0-9]+]](p0) = G_GEP [[SP2]], [[OFF2]](s32)
+; CHECK: G_STORE [[AVREG]](s64), [[FI2]](p0){{.*}}store 8
+; CHECK: BLX @aapcscc_fp_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
+; CHECK-DAG: [[R1:%[0-9]+]](s32) = COPY %r0
+; CHECK-DAG: [[R2:%[0-9]+]](s32) = COPY %r1
+; LITTLE: [[RVREG:%[0-9]+]](s64) = G_SEQUENCE [[R1]](s32), 0, [[R2]](s32), 32
+; BIG: [[RVREG:%[0-9]+]](s64) = G_SEQUENCE [[R2]](s32), 0, [[R1]](s32), 32
+; CHECK: ADJCALLSTACKUP 16, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: [[R1:%[0-9]+]](s32) = G_EXTRACT [[RVREG]](s64), 0
+; CHECK: [[R2:%[0-9]+]](s32) = G_EXTRACT [[RVREG]](s64), 32
+; LITTLE-DAG: %r0 = COPY [[R1]]
+; LITTLE-DAG: %r1 = COPY [[R2]]
+; BIG-DAG: %r0 = COPY [[R2]]
+; BIG-DAG: %r1 = COPY [[R1]]
+; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
+entry:
+ %r = notail call arm_aapcscc double @aapcscc_fp_target(float %b, double %a, float %b, double %a)
+ ret double %r
+}
+
+declare arm_aapcscc float @different_call_conv_target(float)
+
+define arm_aapcs_vfpcc float @test_call_different_call_conv(float %x) {
+; CHECK-LABEL: name: test_call_different_call_conv
+; CHECK: [[X:%[0-9]+]](s32) = COPY %s0
+; CHECK: ADJCALLSTACKDOWN 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: %r0 = COPY [[X]]
+; CHECK: BLX @different_call_conv_target, csr_aapcs, implicit-def %lr, implicit %sp, implicit %r0, implicit-def %r0
+; CHECK: [[R:%[0-9]+]](s32) = COPY %r0
+; CHECK: ADJCALLSTACKUP 0, 0, 14, _, implicit-def %sp, implicit %sp
+; CHECK: %s0 = COPY [[R]]
+; CHECK: BX_RET 14, _, implicit %s0
+entry:
+ %r = notail call arm_aapcscc float @different_call_conv_target(float %x)
+ ret float %r
+}
+
+define i32 @test_shufflevector_s32_v2s32(i32 %arg) {
+; CHECK-LABEL: name: test_shufflevector_s32_v2s32
+; CHECK: [[ARG:%[0-9]+]](s32) = COPY %r0
+; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>)
+ %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
+ %shuffle = shufflevector <1 x i32> %vec, <1 x i32> undef, <2 x i32> zeroinitializer
+ %res = extractelement <2 x i32> %shuffle, i32 0
+ ret i32 %res
+}
+
+define i32 @test_shufflevector_v2s32_v3s32(i32 %arg1, i32 %arg2) {
+; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
+; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-DAG: [[MASK:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
+; CHECK-DAG: [[V1:%[0-9]+]](<2 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32)
+; CHECK-DAG: [[V2:%[0-9]+]](<2 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32)
+; CHECK: [[VEC:%[0-9]+]](<3 x s32>) = G_SHUFFLE_VECTOR [[V2]](<2 x s32>), [[UNDEF]], [[MASK]](<3 x s32>)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
+ %v1 = insertelement <2 x i32> undef, i32 %arg1, i32 0
+ %v2 = insertelement <2 x i32> %v1, i32 %arg2, i32 1
+ %shuffle = shufflevector <2 x i32> %v2, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
+ %res = extractelement <3 x i32> %shuffle, i32 0
+ ret i32 %res
+}
+
+
+define i32 @test_shufflevector_v2s32_v4s32(i32 %arg1, i32 %arg2) {
+; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
+; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-DAG: [[MASK:%[0-9]+]](<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32)
+; CHECK-DAG: [[V1:%[0-9]+]](<2 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32)
+; CHECK-DAG: [[V2:%[0-9]+]](<2 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32)
+; CHECK: [[VEC:%[0-9]+]](<4 x s32>) = G_SHUFFLE_VECTOR [[V2]](<2 x s32>), [[UNDEF]], [[MASK]](<4 x s32>)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<4 x s32>)
+ %v1 = insertelement <2 x i32> undef, i32 %arg1, i32 0
+ %v2 = insertelement <2 x i32> %v1, i32 %arg2, i32 1
+ %shuffle = shufflevector <2 x i32> %v2, <2 x i32> undef, <4 x i32> zeroinitializer
+ %res = extractelement <4 x i32> %shuffle, i32 0
+ ret i32 %res
+}
+
+define i32 @test_shufflevector_v4s32_v2s32(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
+; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
+; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
+; CHECK: [[ARG3:%[0-9]+]](s32) = COPY %r2
+; CHECK: [[ARG4:%[0-9]+]](s32) = COPY %r3
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
+; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
+; CHECK-DAG: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
+; CHECK-DAG: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
+; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32)
+; CHECK-DAG: [[V1:%[0-9]+]](<4 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32)
+; CHECK-DAG: [[V2:%[0-9]+]](<4 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32)
+; CHECK-DAG: [[V3:%[0-9]+]](<4 x s32>) = G_INSERT_VECTOR_ELT [[V2]], [[ARG3]](s32), [[C2]](s32)
+; CHECK-DAG: [[V4:%[0-9]+]](<4 x s32>) = G_INSERT_VECTOR_ELT [[V3]], [[ARG4]](s32), [[C3]](s32)
+; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[V4]](<4 x s32>), [[UNDEF]], [[MASK]](<2 x s32>)
+; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>)
+ %v1 = insertelement <4 x i32> undef, i32 %arg1, i32 0
+ %v2 = insertelement <4 x i32> %v1, i32 %arg2, i32 1
+ %v3 = insertelement <4 x i32> %v2, i32 %arg3, i32 2
+ %v4 = insertelement <4 x i32> %v3, i32 %arg4, i32 3
+ %shuffle = shufflevector <4 x i32> %v4, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
+ %res = extractelement <2 x i32> %shuffle, i32 0
+ ret i32 %res
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll b/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll
new file mode 100644
index 000000000000..7d021fdb43dd
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel-fp.ll
@@ -0,0 +1,51 @@
+; RUN: llc -mtriple arm-linux-gnueabihf -mattr=+vfp2 -float-abi=hard -global-isel %s -o - | FileCheck %s -check-prefix CHECK -check-prefix HARD
+; RUN: llc -mtriple arm-linux-gnueabi -mattr=+vfp2,+soft-float -float-abi=soft -global-isel %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT-AEABI
+; RUN: llc -mtriple arm-linux-gnu- -mattr=+vfp2,+soft-float -float-abi=soft -global-isel %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT-DEFAULT
+
+define arm_aapcscc float @test_frem_float(float %x, float %y) {
+; CHECK-LABEL: test_frem_float:
+; CHECK: blx fmodf
+ %r = frem float %x, %y
+ ret float %r
+}
+
+define arm_aapcscc double @test_frem_double(double %x, double %y) {
+; CHECK-LABEL: test_frem_double:
+; CHECK: blx fmod
+ %r = frem double %x, %y
+ ret double %r
+}
+
+declare float @llvm.pow.f32(float %x, float %y)
+define arm_aapcscc float @test_fpow_float(float %x, float %y) {
+; CHECK-LABEL: test_fpow_float:
+; CHECK: blx powf
+ %r = call float @llvm.pow.f32(float %x, float %y)
+ ret float %r
+}
+
+declare double @llvm.pow.f64(double %x, double %y)
+define arm_aapcscc double @test_fpow_double(double %x, double %y) {
+; CHECK-LABEL: test_fpow_double:
+; CHECK: blx pow
+ %r = call double @llvm.pow.f64(double %x, double %y)
+ ret double %r
+}
+
+define arm_aapcscc float @test_add_float(float %x, float %y) {
+; CHECK-LABEL: test_add_float:
+; HARD: vadd.f32
+; SOFT-AEABI: blx __aeabi_fadd
+; SOFT-DEFAULT: blx __addsf3
+ %r = fadd float %x, %y
+ ret float %r
+}
+
+define arm_aapcscc double @test_add_double(double %x, double %y) {
+; CHECK-LABEL: test_add_double:
+; HARD: vadd.f64
+; SOFT-AEABI: blx __aeabi_dadd
+; SOFT-DEFAULT: blx __adddf3
+ %r = fadd double %x, %y
+ ret double %r
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel.ll b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
index 3f01b6dd3a83..236dcbeb84c5 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-isel.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple arm-unknown -global-isel %s -o - | FileCheck %s
+; RUN: llc -mtriple arm-unknown -mattr=+vfp2 -global-isel %s -o - | FileCheck %s
define void @test_void_return() {
; CHECK-LABEL: test_void_return:
@@ -7,6 +7,39 @@ entry:
ret void
}
+define zeroext i1 @test_zext_i1(i1 %x) {
+; CHECK-LABEL: test_zext_i1
+; CHECK: and r0, r0, #1
+; CHECK: bx lr
+entry:
+ ret i1 %x
+}
+
+define signext i1 @test_sext_i1(i1 %x) {
+; CHECK-LABEL: test_sext_i1
+; CHECK: and r0, r0, #1
+; CHECK: rsb r0, r0, #0
+; CHECK: bx lr
+entry:
+ ret i1 %x
+}
+
+define zeroext i8 @test_ext_i8(i8 %x) {
+; CHECK-LABEL: test_ext_i8:
+; CHECK: uxtb r0, r0
+; CHECK: bx lr
+entry:
+ ret i8 %x
+}
+
+define signext i16 @test_ext_i16(i16 %x) {
+; CHECK-LABEL: test_ext_i16:
+; CHECK: sxth r0, r0
+; CHECK: bx lr
+entry:
+ ret i16 %x
+}
+
define i8 @test_add_i8(i8 %x, i8 %y) {
; CHECK-LABEL: test_add_i8:
; CHECK: add r0, r0, r1
@@ -34,8 +67,8 @@ entry:
ret i32 %sum
}
-define i32 @test_many_args(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) {
-; CHECK-LABEL: test_many_args:
+define i32 @test_stack_args_i32(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) {
+; CHECK-LABEL: test_stack_args_i32:
; CHECK: add [[P5ADDR:r[0-9]+]], sp, #4
; CHECK: ldr [[P5:r[0-9]+]], {{.*}}[[P5ADDR]]
; CHECK: add r0, r2, [[P5]]
@@ -44,3 +77,108 @@ entry:
%sum = add i32 %p2, %p5
ret i32 %sum
}
+
+define i16 @test_stack_args_mixed(i32 %p0, i16 %p1, i8 %p2, i1 %p3, i8 %p4, i16 %p5) {
+; CHECK-LABEL: test_stack_args_mixed:
+; CHECK: add [[P5ADDR:r[0-9]+]], sp, #4
+; CHECK: ldrh [[P5:r[0-9]+]], {{.*}}[[P5ADDR]]
+; CHECK: add r0, r1, [[P5]]
+; CHECK: bx lr
+entry:
+ %sum = add i16 %p1, %p5
+ ret i16 %sum
+}
+
+define i16 @test_stack_args_zeroext(i32 %p0, i16 %p1, i8 %p2, i1 %p3, i16 zeroext %p4) {
+; CHECK-LABEL: test_stack_args_zeroext:
+; CHECK: mov [[P4ADDR:r[0-9]+]], sp
+; CHECK: ldr [[P4:r[0-9]+]], {{.*}}[[P4ADDR]]
+; CHECK: add r0, r1, [[P4]]
+; CHECK: bx lr
+entry:
+ %sum = add i16 %p1, %p4
+ ret i16 %sum
+}
+
+define i8 @test_stack_args_signext(i32 %p0, i16 %p1, i8 %p2, i1 %p3, i8 signext %p4) {
+; CHECK-LABEL: test_stack_args_signext:
+; CHECK: mov [[P4ADDR:r[0-9]+]], sp
+; CHECK: ldr [[P4:r[0-9]+]], {{.*}}[[P4ADDR]]
+; CHECK: add r0, r2, [[P4]]
+; CHECK: bx lr
+entry:
+ %sum = add i8 %p2, %p4
+ ret i8 %sum
+}
+
+define i32 @test_ptr_arg_in_reg(i32* %p) {
+; CHECK-LABEL: test_ptr_arg_in_reg:
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %v = load i32, i32* %p
+ ret i32 %v
+}
+
+define i32 @test_ptr_arg_on_stack(i32 %f0, i32 %f1, i32 %f2, i32 %f3, i32* %p) {
+; CHECK-LABEL: test_ptr_arg_on_stack:
+; CHECK: mov r0, sp
+; CHECK: ldr r0, [r0]
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %v = load i32, i32* %p
+ ret i32 %v
+}
+
+define i8* @test_ptr_ret(i8** %p) {
+; CHECK-LABEL: test_ptr_ret:
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %v = load i8*, i8** %p
+ ret i8* %v
+}
+
+define arm_aapcs_vfpcc float @test_float_hard(float %f0, float %f1) {
+; CHECK-LABEL: test_float_hard:
+; CHECK: vadd.f32 s0, s0, s1
+; CHECK: bx lr
+entry:
+ %v = fadd float %f0, %f1
+ ret float %v
+}
+
+define arm_aapcscc float @test_float_softfp(float %f0, float %f1) {
+; CHECK-LABEL: test_float_softfp:
+; CHECK-DAG: vmov [[F0:s[0-9]+]], r0
+; CHECK-DAG: vmov [[F1:s[0-9]+]], r1
+; CHECK: vadd.f32 [[FV:s[0-9]+]], [[F0]], [[F1]]
+; CHECK: vmov r0, [[FV]]
+; CHECK: bx lr
+entry:
+ %v = fadd float %f0, %f1
+ ret float %v
+}
+
+define arm_aapcs_vfpcc double @test_double_hard(double %f0, double %f1) {
+; CHECK-LABEL: test_double_hard:
+; CHECK: vadd.f64 d0, d0, d1
+; CHECK: bx lr
+entry:
+ %v = fadd double %f0, %f1
+ ret double %v
+}
+
+define arm_aapcscc double @test_double_softfp(double %f0, double %f1) {
+; CHECK-LABEL: test_double_softfp:
+; CHECK-DAG: vmov [[F0:d[0-9]+]], r0, r1
+; CHECK-DAG: vmov [[F1:d[0-9]+]], r2, r3
+; CHECK: vadd.f64 [[FV:d[0-9]+]], [[F0]], [[F1]]
+; CHECK: vmov.32 r0, [[FV]][0]
+; CHECK: vmov.32 r1, [[FV]][1]
+; CHECK: bx lr
+entry:
+ %v = fadd double %f0, %f1
+ ret double %v
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
new file mode 100644
index 000000000000..d154b4887c19
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
@@ -0,0 +1,282 @@
+# RUN: llc -mtriple arm-linux-gnueabihf -mattr=+vfp2 -float-abi=hard -global-isel -run-pass=legalizer %s -o - | FileCheck %s -check-prefix CHECK -check-prefix HARD
+# RUN: llc -mtriple arm-linux-gnueabi -mattr=+vfp2,+soft-float -float-abi=soft -global-isel -run-pass=legalizer %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT -check-prefix SOFT-AEABI
+# RUN: llc -mtriple arm-linux-gnu -mattr=+soft-float -float-abi=soft -global-isel -run-pass=legalizer %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT -check-prefix SOFT-DEFAULT
+--- |
+ define void @test_frem_float() { ret void }
+ define void @test_frem_double() { ret void }
+
+ define void @test_fpow_float() { ret void }
+ define void @test_fpow_double() { ret void }
+
+ define void @test_fadd_float() { ret void }
+ define void @test_fadd_double() { ret void }
+...
+---
+name: test_frem_float
+# CHECK-LABEL: name: test_frem_float
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; HARD-DAG: %s0 = COPY [[X]]
+ ; HARD-DAG: %s1 = COPY [[Y]]
+ ; SOFT: BLX $fmodf, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; HARD: BLX $fmodf, {{.*}}, implicit %s0, implicit %s1, implicit-def %s0
+ ; SOFT: [[R:%[0-9]+]](s32) = COPY %r0
+ ; HARD: [[R:%[0-9]+]](s32) = COPY %s0
+ ; CHECK: ADJCALLSTACKUP
+ %2(s32) = G_FREM %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_frem_double
+# CHECK-LABEL: name: test_frem_double
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+ - { id: 8, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ ; The inputs may be in the wrong order (depending on the target's
+ ; endianness), but that's orthogonal to what we're trying to test here.
+ ; For soft float, we only need to check that the first value, received
+ ; through R0-R1, ends up in R0-R1 or R1-R0, and the second value, received
+ ; through R2-R3, ends up in R2-R3 or R3-R2, when passed to fmod.
+ ; For hard float, the values need to end up in D0 and D1.
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_SEQUENCE [[X0]]
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_SEQUENCE [[Y0]]
+ %4(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 32
+ %5(s64) = G_SEQUENCE %2(s32), 0, %3(s32), 32
+ ; CHECK: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]]
+ ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]]
+ ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]]
+ ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]]
+ ; HARD-DAG: %d0 = COPY [[X]]
+ ; HARD-DAG: %d1 = COPY [[Y]]
+ ; SOFT: BLX $fmod, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
+ ; HARD: BLX $fmod, {{.*}}, implicit %d0, implicit %d1, implicit-def %d0
+ ; CHECK: ADJCALLSTACKUP
+ %6(s64) = G_FREM %4, %5
+ %7(s32) = G_EXTRACT %6(s64), 0
+ %8(s32) = G_EXTRACT %6(s64), 32
+ %r0 = COPY %7(s32)
+ %r1 = COPY %8(s32)
+ BX_RET 14, _, implicit %r0, implicit %r1
+...
+---
+name: test_fpow_float
+# CHECK-LABEL: name: test_fpow_float
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; CHECK: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; HARD-DAG: %s0 = COPY [[X]]
+ ; HARD-DAG: %s1 = COPY [[Y]]
+ ; SOFT: BLX $powf, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; HARD: BLX $powf, {{.*}}, implicit %s0, implicit %s1, implicit-def %s0
+ ; SOFT: [[R:%[0-9]+]](s32) = COPY %r0
+ ; HARD: [[R:%[0-9]+]](s32) = COPY %s0
+ ; CHECK: ADJCALLSTACKUP
+ %2(s32) = G_FPOW %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fpow_double
+# CHECK-LABEL: name: test_fpow_double
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+ - { id: 8, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ ; The inputs may be in the wrong order (depending on the target's
+ ; endianness), but that's orthogonal to what we're trying to test here.
+ ; For soft float, we only need to check that the first value, received
+ ; through R0-R1, ends up in R0-R1 or R1-R0, and the second value, received
+ ; through R2-R3, ends up in R2-R3 or R3-R2, when passed to pow.
+ ; For hard float, the values need to end up in D0 and D1.
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_SEQUENCE [[X0]]
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_SEQUENCE [[Y0]]
+ %4(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 32
+ %5(s64) = G_SEQUENCE %2(s32), 0, %3(s32), 32
+ ; CHECK: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]]
+ ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]]
+ ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]]
+ ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]]
+ ; HARD-DAG: %d0 = COPY [[X]]
+ ; HARD-DAG: %d1 = COPY [[Y]]
+ ; SOFT: BLX $pow, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
+ ; HARD: BLX $pow, {{.*}}, implicit %d0, implicit %d1, implicit-def %d0
+ ; CHECK: ADJCALLSTACKUP
+ %6(s64) = G_FPOW %4, %5
+ %7(s32) = G_EXTRACT %6(s64), 0
+ %8(s32) = G_EXTRACT %6(s64), 32
+ %r0 = COPY %7(s32)
+ %r1 = COPY %8(s32)
+ BX_RET 14, _, implicit %r0, implicit %r1
+...
+---
+name: test_fadd_float
+# CHECK-LABEL: name: test_fadd_float
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ ; CHECK-DAG: [[X:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[Y:%[0-9]+]](s32) = COPY %r1
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ ; HARD: [[R:%[0-9]+]](s32) = G_FADD [[X]], [[Y]]
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r0 = COPY [[X]]
+ ; SOFT-DAG: %r1 = COPY [[Y]]
+ ; SOFT-AEABI: BLX $__aeabi_fadd, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT-DEFAULT: BLX $__addsf3, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0
+ ; SOFT: [[R:%[0-9]+]](s32) = COPY %r0
+ ; SOFT: ADJCALLSTACKUP
+ %2(s32) = G_FADD %0, %1
+ ; CHECK: %r0 = COPY [[R]]
+ %r0 = COPY %2(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fadd_double
+# CHECK-LABEL: name: test_fadd_double
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+ - { id: 8, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ ; CHECK-DAG: [[X0:%[0-9]+]](s32) = COPY %r0
+ ; CHECK-DAG: [[X1:%[0-9]+]](s32) = COPY %r1
+ ; CHECK-DAG: [[Y0:%[0-9]+]](s32) = COPY %r2
+ ; CHECK-DAG: [[Y1:%[0-9]+]](s32) = COPY %r3
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = COPY %r2
+ %3(s32) = COPY %r3
+ ; HARD-DAG: [[X:%[0-9]+]](s64) = G_SEQUENCE [[X0]]
+ ; HARD-DAG: [[Y:%[0-9]+]](s64) = G_SEQUENCE [[Y0]]
+ %4(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 32
+ %5(s64) = G_SEQUENCE %2(s32), 0, %3(s32), 32
+ ; HARD: [[R:%[0-9]+]](s64) = G_FADD [[X]], [[Y]]
+ ; SOFT: ADJCALLSTACKDOWN
+ ; SOFT-DAG: %r{{[0-1]}} = COPY [[X0]]
+ ; SOFT-DAG: %r{{[0-1]}} = COPY [[X1]]
+ ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y0]]
+ ; SOFT-DAG: %r{{[2-3]}} = COPY [[Y1]]
+ ; SOFT-AEABI: BLX $__aeabi_dadd, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
+ ; SOFT-DEFAULT: BLX $__adddf3, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0, implicit-def %r1
+ ; SOFT: ADJCALLSTACKUP
+ %6(s64) = G_FADD %4, %5
+ ; HARD-DAG: G_EXTRACT [[R]](s64), 0
+ ; HARD-DAG: G_EXTRACT [[R]](s64), 32
+ %7(s32) = G_EXTRACT %6(s64), 0
+ %8(s32) = G_EXTRACT %6(s64), 32
+ %r0 = COPY %7(s32)
+ %r1 = COPY %8(s32)
+ BX_RET 14, _, implicit %r0, implicit %r1
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 98d71c09e63b..cbff7e12fb77 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -1,10 +1,68 @@
# RUN: llc -mtriple arm-- -global-isel -run-pass=legalizer %s -o - | FileCheck %s
--- |
+ define void @test_sext_s8() { ret void }
+ define void @test_zext_s16() { ret void }
+
define void @test_add_s8() { ret void }
define void @test_add_s16() { ret void }
define void @test_add_s32() { ret void }
define void @test_load_from_stack() { ret void }
+ define void @test_legal_loads() #0 { ret void }
+ define void @test_legal_stores() #0 { ret void }
+
+ define void @test_gep() { ret void }
+
+ define void @test_constants() { ret void }
+
+ define void @test_fadd_s32() #0 { ret void }
+ define void @test_fadd_s64() #0 { ret void }
+
+ attributes #0 = { "target-features"="+vfp2" }
+...
+---
+name: test_sext_s8
+# CHECK-LABEL: name: test_sext_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s8) = COPY %r0
+ %1(s32) = G_SEXT %0
+ ; G_SEXT with s8 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(s32) = G_SEXT {{%[0-9]+}}
+ %r0 = COPY %1(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_zext_s16
+# CHECK-LABEL: name: test_zext_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: %r0
+
+ %0(s16) = COPY %r0
+ %1(s32) = G_ZEXT %0
+ ; G_ZEXT with s16 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(s32) = G_ZEXT {{%[0-9]+}}
+ %r0 = COPY %1(s32)
+ BX_RET 14, _, implicit %r0
...
---
name: test_add_s8
@@ -104,8 +162,179 @@ body: |
; This is legal, so we should find it unchanged in the output
; CHECK: [[FIVREG:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[FRAME_INDEX]]
- ; CHECK: {{%[0-9]+}}(s32) = G_LOAD [[FIVREG]](p0)
+ ; CHECK: {{%[0-9]+}}(s32) = G_LOAD [[FIVREG]](p0) :: (load 4)
%0(p0) = G_FRAME_INDEX %fixed-stack.2
- %1(s32) = G_LOAD %0(p0)
+ %1(s32) = G_LOAD %0(p0) :: (load 4)
+ BX_RET 14, _
+...
+---
+name: test_legal_loads
+# CHECK-LABEL: name: test_legal_loads
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3
+
+ ; These are all legal, so we should find them unchanged in the output
+ ; CHECK-DAG: {{%[0-9]+}}(s64) = G_LOAD %0
+ ; CHECK-DAG: {{%[0-9]+}}(s32) = G_LOAD %0
+ ; CHECK-DAG: {{%[0-9]+}}(s16) = G_LOAD %0
+ ; CHECK-DAG: {{%[0-9]+}}(s8) = G_LOAD %0
+ ; CHECK-DAG: {{%[0-9]+}}(s1) = G_LOAD %0
+ ; CHECK-DAG: {{%[0-9]+}}(p0) = G_LOAD %0
+ %0(p0) = COPY %r0
+ %1(s32) = G_LOAD %0(p0) :: (load 4)
+ %2(s16) = G_LOAD %0(p0) :: (load 2)
+ %3(s8) = G_LOAD %0(p0) :: (load 1)
+ %4(s1) = G_LOAD %0(p0) :: (load 1)
+ %5(p0) = G_LOAD %0(p0) :: (load 4)
+ %6(s64) = G_LOAD %0(p0) :: (load 8)
+ BX_RET 14, _
+...
+---
+name: test_legal_stores
+# CHECK-LABEL: name: test_legal_stores
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3, %r4, %r5, %r6, %d1
+
+ ; These are all legal, so we should find them unchanged in the output
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s64), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s32), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s16), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s8), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s1), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(p0), %0(p0)
+ %0(p0) = COPY %r0
+ %1(s64) = COPY %d1
+ G_STORE %1(s64), %0(p0) :: (store 8)
+ %2(s32) = COPY %r2
+ G_STORE %2(s32), %0(p0) :: (store 4)
+ %3(s16) = COPY %r3
+ G_STORE %3(s16), %0(p0) :: (store 2)
+ %4(s8) = COPY %r4
+ G_STORE %4(s8), %0(p0) :: (store 1)
+ %5(s1) = COPY %r5
+ G_STORE %5(s1), %0(p0) :: (store 1)
+ %6(p0) = COPY %r6
+ G_STORE %6(p0), %0(p0) :: (store 4)
BX_RET 14, _
...
+---
+name: test_gep
+# CHECK-LABEL: name: test_gep
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(p0) = COPY %r0
+ %1(s32) = COPY %r1
+
+ ; CHECK: {{%[0-9]+}}(p0) = G_GEP {{%[0-9]+}}, {{%[0-9]+}}(s32)
+ %2(p0) = G_GEP %0, %1(s32)
+
+ %r0 = COPY %2(p0)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_constants
+# CHECK-LABEL: name: test_constants
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT 42
+ ; CHECK: {{%[0-9]+}}(s32) = G_CONSTANT 42
+
+ %r0 = COPY %0(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fadd_s32
+# CHECK-LABEL: name: test_fadd_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s32) = G_FADD %0, %1
+ ; G_FADD with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(s32) = G_FADD {{%[0-9]+, %[0-9]+}}
+ %r0 = COPY %2(s32)
+ BX_RET 14, _, implicit %r0
+
+...
+---
+name: test_fadd_s64
+# CHECK-LABEL: name: test_fadd_s64
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FADD %0, %1
+ ; G_FADD with s64 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(s64) = G_FADD {{%[0-9]+, %[0-9]+}}
+ %d0 = COPY %2(s64)
+ BX_RET 14, _, implicit %d0
+
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index ce0601021e62..fbf8d81322f8 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -3,6 +3,23 @@
define void @test_add_s32() { ret void }
define void @test_add_s16() { ret void }
define void @test_add_s8() { ret void }
+ define void @test_add_s1() { ret void }
+
+ define void @test_loads() #0 { ret void }
+ define void @test_stores() #0 { ret void }
+
+ define void @test_stack() { ret void }
+
+ define void @test_gep() { ret void }
+
+ define void @test_constants() { ret void }
+
+ define void @test_fadd_s32() #0 { ret void }
+ define void @test_fadd_s64() #0 { ret void }
+
+ define void @test_soft_fp_s64() #0 { ret void }
+
+ attributes #0 = { "target-features"="+vfp2"}
...
---
name: test_add_s32
@@ -82,3 +99,266 @@ body: |
BX_RET 14, _, implicit %r0
...
+---
+name: test_add_s1
+# CHECK-LABEL: name: test_add_s1
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+# CHECK: - { id: 2, class: gprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s1) = COPY %r0
+ %1(s1) = COPY %r1
+ %2(s1) = G_ADD %0, %1
+ %r0 = COPY %2(s1)
+ BX_RET 14, _, implicit %r0
+
+...
+---
+name: test_loads
+# CHECK-LABEL: name: test_loads
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
+# CHECK: - { id: 6, class: fprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+body: |
+ bb.0:
+ liveins: %r0
+ %0(p0) = COPY %r0
+ %6(s64) = G_LOAD %0 :: (load 8)
+ %1(s32) = G_LOAD %0 :: (load 4)
+ %2(s16) = G_LOAD %0 :: (load 2)
+ %3(s8) = G_LOAD %0 :: (load 1)
+ %4(s1) = G_LOAD %0 :: (load 1)
+ %5(p0) = G_LOAD %0 :: (load 4)
+ BX_RET 14, _, implicit %r0
+
+...
+---
+name: test_stores
+# CHECK-LABEL: name: test_stores
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+# CHECK: - { id: 5, class: gprb }
+# CHECK: - { id: 6, class: fprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2, %r3, %r4, %r5, %d6
+ %0(p0) = COPY %r0
+ %1(s32) = COPY %r1
+ G_STORE %1(s32), %0 :: (store 4)
+ %2(s16) = COPY %r2
+ G_STORE %2(s16), %0 :: (store 2)
+ %3(s8) = COPY %r3
+ G_STORE %3(s8), %0 :: (store 1)
+ %4(s1) = COPY %r4
+ G_STORE %4(s1), %0 :: (store 1)
+ %5(p0) = COPY %r5
+ G_STORE %5(p0), %0 :: (store 4)
+ %6(s64) = COPY %d6
+ G_STORE %6(s64), %0 :: (store 8)
+ BX_RET 14, _, implicit %r0
+
+...
+---
+name: test_stack
+# CHECK-LABEL: name: test_stack
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+# CHECK: - { id: 2, class: gprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+fixedStack:
+ - { id: 0, offset: 0, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+body: |
+ bb.0:
+ %0(p0) = G_FRAME_INDEX %fixed-stack.0
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %fixed-stack.0, align 0)
+
+ %2(p0) = COPY %sp
+ %3(s32) = G_CONSTANT i32 8
+ %4(p0) = G_GEP %2, %3(s32)
+ G_STORE %1(s32), %4(p0) :: (store 4)
+
+ BX_RET 14, _
+
+...
+---
+name: test_gep
+# CHECK-LABEL: name: test_gep
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+# CHECK: - { id: 2, class: gprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(p0) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(p0) = G_GEP %0, %1(s32)
+ %r0 = COPY %2(p0)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_constants
+# CHECK-LABEL: name: test_constants
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+registers:
+ - { id: 0, class: _ }
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT 42
+ %r0 = COPY %0(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_fadd_s32
+# CHECK-LABEL: name: test_fadd_s32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: fprb }
+# CHECK: - { id: 1, class: fprb }
+# CHECK: - { id: 2, class: fprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %s0, %s1
+
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FADD %0, %1
+ %s0 = COPY %2(s32)
+ BX_RET 14, _, implicit %s0
+
+...
+---
+name: test_fadd_s64
+# CHECK-LABEL: name: test_fadd_s64
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: fprb }
+# CHECK: - { id: 1, class: fprb }
+# CHECK: - { id: 2, class: fprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: %d0, %d1
+
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FADD %0, %1
+ %d0 = COPY %2(s64)
+ BX_RET 14, _, implicit %d0
+
+...
+---
+name: test_soft_fp_s64
+# CHECK-LABEL: name: test_soft_fp_s64
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb }
+# CHECK: - { id: 1, class: gprb }
+# CHECK: - { id: 2, class: fprb }
+# CHECK: - { id: 3, class: gprb }
+# CHECK: - { id: 4, class: gprb }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 32
+ %3(s32) = G_EXTRACT %2(s64), 0
+ %4(s32) = G_EXTRACT %2(s64), 32
+ %r0 = COPY %3(s32)
+ %r1 = COPY %4(s32)
+ BX_RET 14, _, implicit %r0, implicit %r1
+
+...
diff --git a/test/CodeGen/ARM/alloc-no-stack-realign.ll b/test/CodeGen/ARM/alloc-no-stack-realign.ll
index 7d37c83d7483..0e077b3aee5a 100644
--- a/test/CodeGen/ARM/alloc-no-stack-realign.ll
+++ b/test/CodeGen/ARM/alloc-no-stack-realign.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=NO-REALIGN
-; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=REALIGN
+; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s
; rdar://12713765
; When realign-stack is set to false, make sure we are not creating stack
@@ -8,29 +7,31 @@
define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
entry:
-; NO-REALIGN-LABEL: test1
-; NO-REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
-; NO-REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
-; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
-; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
-; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: mov r[[R3:[0-9]+]], r[[R1]]
-; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R3]]:128]!
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R3]]:128]
-
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0:0]], #48
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0]], #32
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
+; CHECK-LABEL: test1
+; CHECK: ldr r[[R1:[0-9]+]], [pc, r1]
+; CHECK: add r[[R2:[0-9]+]], r1, #48
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: mov r[[R2:[0-9]+]], r[[R1]]
+; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: mov r[[R1:[0-9]+]], sp
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: add r[[R2:[0-9]+]], r[[R1]], #32
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: add r[[R1:[0-9]+]], r0, #48
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: add r[[R1:[0-9]+]], r0, #32
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0:128]!
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r0:128]
%retval = alloca <16 x float>, align 16
%0 = load <16 x float>, <16 x float>* @T3_retval, align 16
store <16 x float> %0, <16 x float>* %retval
@@ -41,32 +42,33 @@ entry:
define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
entry:
-; REALIGN-LABEL: test2
-; REALIGN: bfc sp, #0, #6
-; REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
-; REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
-; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
-; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
-; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: ldr r[[R1:[0-9]+]], [pc, r1]
+; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: mov r[[R2:[0-9]+]], r[[R1]]
+; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: mov r[[R1:[0-9]+]], sp
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: orr r[[R2:[0-9]+]], r[[R1]], #32
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; CHECK: add r[[R1:[0-9]+]], r0, #48
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: add r[[R1:[0-9]+]], r0, #32
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0:128]!
+; CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r0:128]
-; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
-
-; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
-; REALIGN: add r[[R1:[0-9]+]], r[[R0]], #32
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
-; REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
- %retval = alloca <16 x float>, align 16
+%retval = alloca <16 x float>, align 16
%0 = load <16 x float>, <16 x float>* @T3_retval, align 16
store <16 x float> %0, <16 x float>* %retval
%1 = load <16 x float>, <16 x float>* %retval
diff --git a/test/CodeGen/ARM/arg-copy-elide.ll b/test/CodeGen/ARM/arg-copy-elide.ll
new file mode 100644
index 000000000000..739b560b0833
--- /dev/null
+++ b/test/CodeGen/ARM/arg-copy-elide.ll
@@ -0,0 +1,61 @@
+; RUN: llc -mtriple=armv7-linux < %s | FileCheck %s
+
+declare arm_aapcscc void @addrof_i32(i32*)
+declare arm_aapcscc void @addrof_i64(i64*)
+
+define arm_aapcscc void @simple(i32, i32, i32, i32, i32 %x) {
+entry:
+ %x.addr = alloca i32
+ store i32 %x, i32* %x.addr
+ call void @addrof_i32(i32* %x.addr)
+ ret void
+}
+
+; CHECK-LABEL: simple:
+; CHECK: push {r11, lr}
+; CHECK: add r0, sp, #8
+; CHECK: bl addrof_i32
+; CHECK: pop {r11, pc}
+
+
+; We need to load %x before calling addrof_i32 now because it could mutate %x in
+; place.
+
+define arm_aapcscc i32 @use_arg(i32, i32, i32, i32, i32 %x) {
+entry:
+ %x.addr = alloca i32
+ store i32 %x, i32* %x.addr
+ call void @addrof_i32(i32* %x.addr)
+ ret i32 %x
+}
+
+; CHECK-LABEL: use_arg:
+; CHECK: push {[[csr:[^ ]*]], lr}
+; CHECK: ldr [[csr]], [sp, #8]
+; CHECK: add r0, sp, #8
+; CHECK: bl addrof_i32
+; CHECK: mov r0, [[csr]]
+; CHECK: pop {[[csr]], pc}
+
+
+define arm_aapcscc i64 @split_i64(i32, i32, i32, i32, i64 %x) {
+entry:
+ %x.addr = alloca i64, align 4
+ store i64 %x, i64* %x.addr, align 4
+ call void @addrof_i64(i64* %x.addr)
+ ret i64 %x
+}
+
+; CHECK-LABEL: split_i64:
+; CHECK: push {r4, r5, r11, lr}
+; CHECK: sub sp, sp, #8
+; CHECK: ldr r4, [sp, #28]
+; CHECK: ldr r5, [sp, #24]
+; CHECK: mov r0, sp
+; CHECK: str r4, [sp, #4]
+; CHECK: str r5, [sp]
+; CHECK: bl addrof_i64
+; CHECK: mov r0, r5
+; CHECK: mov r1, r4
+; CHECK: add sp, sp, #8
+; CHECK: pop {r4, r5, r11, pc}
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 9bd2077e4d03..31691e9468c9 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -1,7 +1,6 @@
; RUN: llc -mtriple=arm-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=ARM %s
; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=THUMB %s
-; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
-; RUN: | FileCheck -check-prefix=T2 %s
+; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck -check-prefix=T2 %s
; RUN: llc -mtriple=thumbv8-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=V8 %s
; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified.
@@ -49,9 +48,9 @@ tailrecurse.switch: ; preds = %tailrecurse
; V8-NEXT: beq
; V8-NEXT: %tailrecurse.switch
; V8: cmp
-; V8-NEXT: bne
-; V8-NEXT: b
-; The trailing space in the last line checks that the branch is unconditional
+; V8-NEXT: beq
+; V8-NEXT: %sw.epilog
+; V8-NEXT: bx lr
switch i32 %and, label %sw.epilog [
i32 1, label %sw.bb
i32 3, label %sw.bb6
@@ -93,7 +92,7 @@ entry:
%1 = load i8, i8* %0, align 1
%2 = zext i8 %1 to i32
; ARM: ands
-; THUMB: ands
+; THUMB: ands
; T2: ands
; V8: ands
; V8-NEXT: beq
@@ -141,19 +140,48 @@ return: ; preds = %bb2, %bb, %entry
; folding of unrelated tests (in this case, a TST against r1 was eliminated in
; favour of an AND of r0).
+define i32 @test_tst_assessment(i32 %a, i32 %b) {
; ARM-LABEL: test_tst_assessment:
+; ARM: @ BB#0:
+; ARM-NEXT: and r0, r0, #1
+; ARM-NEXT: tst r1, #1
+; ARM-NEXT: subne r0, r0, #1
+; ARM-NEXT: mov pc, lr
+;
; THUMB-LABEL: test_tst_assessment:
+; THUMB: @ BB#0:
+; THUMB-NEXT: movs r2, r0
+; THUMB-NEXT: movs r0, #1
+; THUMB-NEXT: ands r0, r2
+; THUMB-NEXT: subs r2, r0, #1
+; THUMB-NEXT: lsls r1, r1, #31
+; THUMB-NEXT: beq .LBB2_2
+; THUMB-NEXT: @ BB#1:
+; THUMB-NEXT: movs r0, r2
+; THUMB-NEXT: .LBB2_2:
+; THUMB-NEXT: bx lr
+;
; T2-LABEL: test_tst_assessment:
+; T2: @ BB#0:
+; T2-NEXT: lsls r1, r1, #31
+; T2-NEXT: and r0, r0, #1
+; T2-NEXT: it ne
+; T2-NEXT: subne r0, #1
+; T2-NEXT: bx lr
+;
; V8-LABEL: test_tst_assessment:
-define i32 @test_tst_assessment(i1 %lhs, i1 %rhs) {
- %lhs32 = zext i1 %lhs to i32
- %rhs32 = zext i1 %rhs to i32
- %diff = sub nsw i32 %lhs32, %rhs32
-; ARM: tst r1, #1
-; THUMB: lsls r1, r1, #31
-; T2: lsls r1, r1, #31
-; V8: lsls r1, r1, #31
- ret i32 %diff
+; V8: @ BB#0:
+; V8-NEXT: lsls r1, r1, #31
+; V8-NEXT: and r0, r0, #1
+; V8-NEXT: it ne
+; V8-NEXT: subne r0, #1
+; V8-NEXT: bx lr
+ %and1 = and i32 %a, 1
+ %sub = sub i32 %and1, 1
+ %and2 = and i32 %b, 1
+ %cmp = icmp eq i32 %and2, 0
+ %sel = select i1 %cmp, i32 %and1, i32 %sub
+ ret i32 %sel
}
!1 = !{!"branch_weights", i32 1, i32 1, i32 3, i32 2 }
diff --git a/test/CodeGen/ARM/arm-position-independence.ll b/test/CodeGen/ARM/arm-position-independence.ll
index 02a63984ad6f..4aa817f7a481 100644
--- a/test/CodeGen/ARM/arm-position-independence.ll
+++ b/test/CodeGen/ARM/arm-position-independence.ll
@@ -13,6 +13,12 @@
; RUN: llc -relocation-model=rwpi -mtriple=thumbv6m--none-eabi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=THUMB1_RO_ABS --check-prefix=THUMB1_RW_SB
; RUN: llc -relocation-model=ropi-rwpi -mtriple=thumbv6m--none-eabi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=THUMB1_RO_PC --check-prefix=THUMB1_RW_SB
+; RUN: llc -relocation-model=rwpi -mtriple=armv7a--none-eabi -mattr=no-movt < %s | FileCheck %s --check-prefix=CHECK --check-prefix=NO_MOVT_ARM_RO_ABS --check-prefix=NO_MOVT_ARM_RW_SB
+; RUN: llc -relocation-model=ropi-rwpi -mtriple=armv7a--none-eabi -mattr=no-movt < %s | FileCheck %s --check-prefix=CHECK --check-prefix=NO_MOVT_ARM_RO_PC --check-prefix=NO_MOVT_ARM_RW_SB
+
+; RUN: llc -relocation-model=rwpi -mtriple=thumbv7m--none-eabi -mattr=no-movt < %s | FileCheck %s --check-prefix=CHECK --check-prefix=NO_MOVT_THUMB2_RO_ABS --check-prefix=NO_MOVT_THUMB2_RW_SB
+; RUN: llc -relocation-model=ropi-rwpi -mtriple=thumbv7m--none-eabi -mattr=no-movt < %s | FileCheck %s --check-prefix=CHECK --check-prefix=NO_MOVT_THUMB2_RO_PC --check-prefix=NO_MOVT_THUMB2_RW_SB
+
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
@a = external global i32, align 4
@@ -28,16 +34,24 @@ entry:
; ARM_RW_ABS: movt r[[REG]], :upper16:a
; ARM_RW_ABS: ldr r0, [r[[REG]]]
-; ARM_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; ARM_RW_SB: movw r[[REG:[0-9]]], :lower16:a(sbrel)
+; ARM_RW_SB: movt r[[REG]], :upper16:a(sbrel)
; ARM_RW_SB: ldr r0, [r9, r[[REG]]]
+; NO_MOVT_ARM_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RW_SB: ldr r0, [r9, r[[REG]]]
+
; THUMB2_RW_ABS: movw r[[REG:[0-9]]], :lower16:a
; THUMB2_RW_ABS: movt r[[REG]], :upper16:a
; THUMB2_RW_ABS: ldr r0, [r[[REG]]]
-; THUMB2_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; THUMB2_RW_SB: movw r[[REG:[0-9]]], :lower16:a(sbrel)
+; THUMB2_RW_SB: movt r[[REG]], :upper16:a(sbrel)
; THUMB2_RW_SB: ldr.w r0, [r9, r[[REG]]]
+; NO_MOVT_THUMB2_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RW_SB: ldr.w r0, [r9, r[[REG]]]
+
; THUMB1_RW_ABS: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
; THUMB1_RW_ABS: ldr r0, [r[[REG]]]
@@ -47,11 +61,11 @@ entry:
; CHECK: {{(bx lr|pop)}}
-; ARM_RW_SB: [[LCPI]]
-; ARM_RW_SB: .long a(sbrel)
+; NO_MOVT_ARM_RW_SB: [[LCPI]]
+; NO_MOVT_ARM_RW_SB: .long a(sbrel)
-; THUMB2_RW_SB: [[LCPI]]
-; THUMB2_RW_SB: .long a(sbrel)
+; NO_MOVT_THUMB2_RW_SB: [[LCPI]]
+; NO_MOVT_THUMB2_RW_SB: .long a(sbrel)
; THUMB1_RW_ABS: [[LCPI]]
; THUMB1_RW_ABS-NEXT: .long a
@@ -70,16 +84,24 @@ entry:
; ARM_RW_ABS: movt r[[REG]], :upper16:a
; ARM_RW_ABS: str r0, [r[[REG:[0-9]]]]
-; ARM_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
-; ARM_RW_SB: str r0, [r9, r[[REG]]]
+; ARM_RW_SB: movw r[[REG:[0-9]]], :lower16:a
+; ARM_RW_SB: movt r[[REG]], :upper16:a
+; ARM_RW_SB: str r0, [r9, r[[REG:[0-9]]]]
+
+; NO_MOVT_ARM_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RW_SB: str r0, [r9, r[[REG]]]
; THUMB2_RW_ABS: movw r[[REG:[0-9]]], :lower16:a
; THUMB2_RW_ABS: movt r[[REG]], :upper16:a
; THUMB2_RW_ABS: str r0, [r[[REG]]]
-; THUMB2_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; THUMB2_RW_SB: movw r[[REG:[0-9]]], :lower16:a(sbrel)
+; THUMB2_RW_SB: movt r[[REG]], :upper16:a(sbrel)
; THUMB2_RW_SB: str.w r0, [r9, r[[REG]]]
+; NO_MOVT_THUMB2_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RW_SB: str.w r0, [r9, r[[REG]]]
+
; THUMB1_RW_ABS: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
; THUMB1_RW_ABS: str r0, [r[[REG]]]
@@ -89,11 +111,11 @@ entry:
; CHECK: {{(bx lr|pop)}}
-; ARM_RW_SB: [[LCPI]]
-; ARM_RW_SB: .long a(sbrel)
+; NO_MOVT_ARM_RW_SB: [[LCPI]]
+; NO_MOVT_ARM_RW_SB: .long a(sbrel)
-; THUMB2_RW_SB: [[LCPI]]
-; THUMB2_RW_SB: .long a(sbrel)
+; NO_MOVT_THUMB2_RW_SB: [[LCPI]]
+; NO_MOVT_THUMB2_RW_SB: .long a(sbrel)
; THUMB1_RW_ABS: [[LCPI]]
; THUMB1_RW_ABS-NEXT: .long a
@@ -112,21 +134,37 @@ entry:
; ARM_RO_ABS: movt r[[reg]], :upper16:b
; ARM_RO_ABS: ldr r0, [r[[reg]]]
+; NO_MOVT_ARM_RO_ABS: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RO_ABS: ldr r0, [r[[REG]]]
+
; ARM_RO_PC: movw r[[REG:[0-9]]], :lower16:(b-([[LPC:.LPC[0-9]+_[0-9]+]]+8))
; ARM_RO_PC: movt r[[REG]], :upper16:(b-([[LPC]]+8))
; ARM_RO_PC: [[LPC]]:
; ARM_RO_PC-NEXT: ldr r0, [pc, r[[REG]]]
+; NO_MOVT_ARM_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RO_PC: [[LPC:.LPC[0-9]+_[0-9]+]]:
+; NO_MOVT_ARM_RO_PC: ldr r0, [pc, r[[REG]]]
+
; THUMB2_RO_ABS: movw r[[REG:[0-9]]], :lower16:b
; THUMB2_RO_ABS: movt r[[REG]], :upper16:b
; THUMB2_RO_ABS: ldr r0, [r[[REG]]]
+; NO_MOVT_THUMB2_RO_ABS: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RO_ABS: ldr r0, [r[[REG]]]
+
; THUMB2_RO_PC: movw r[[REG:[0-9]]], :lower16:(b-([[LPC:.LPC[0-9]+_[0-9]+]]+4))
; THUMB2_RO_PC: movt r[[REG]], :upper16:(b-([[LPC]]+4))
; THUMB2_RO_PC: [[LPC]]:
; THUMB2_RO_PC-NEXT: add r[[REG]], pc
; THUMB2_RO_PC: ldr r0, [r[[REG]]]
+; NO_MOVT_THUMB2_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RO_PC: [[LPC:.LPC[0-9]+_[0-9]+]]:
+; NO_MOVT_THUMB2_RO_PC-NEXT: add r[[REG]], pc
+; NO_MOVT_THUMB2_RO_PC: ldr r0, [r[[REG]]]
+
+
; THUMB1_RO_ABS: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
; THUMB1_RO_ABS: ldr r0, [r[[REG]]]
@@ -137,9 +175,21 @@ entry:
; CHECK: {{(bx lr|pop)}}
+; NO_MOVT_ARM_RO_ABS: [[LCPI]]
+; NO_MOVT_ARM_RO_ABS-NEXT: .long b
+
+; NO_MOVT_THUMB2_RO_ABS: [[LCPI]]
+; NO_MOVT_THUMB2_RO_ABS-NEXT: .long b
+
; THUMB1_RO_ABS: [[LCPI]]
; THUMB1_RO_ABS-NEXT: .long b
+; NO_MOVT_ARM_RO_PC: [[LCPI]]
+; NO_MOVT_ARM_RO_PC-NEXT: .long b-([[LPC]]+8)
+
+; NO_MOVT_THUMB2_RO_PC: [[LCPI]]
+; NO_MOVT_THUMB2_RO_PC-NEXT: .long b-([[LPC]]+4)
+
; THUMB1_RO_PC: [[LCPI]]
; THUMB1_RO_PC-NEXT: .long b-([[LPC]]+4)
}
@@ -152,15 +202,23 @@ entry:
; ARM_RW_ABS: movw r[[REG:[0-9]]], :lower16:a
; ARM_RW_ABS: movt r[[REG]], :upper16:a
-; ARM_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; ARM_RW_SB: movw r[[REG:[0-9]]], :lower16:a(sbrel)
+; ARM_RW_SB: movt r[[REG]], :upper16:a(sbrel)
; ARM_RW_SB: add r0, r9, r[[REG]]
+; NO_MOVT_ARM_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RW_SB: add r0, r9, r[[REG]]
+
; THUMB2_RW_ABS: movw r[[REG:[0-9]]], :lower16:a
; THUMB2_RW_ABS: movt r[[REG]], :upper16:a
-; THUMB2_RW_SB: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; THUMB2_RW_SB: movw r[[REG:[0-9]]], :lower16:a(sbrel)
+; THUMB2_RW_SB: movt r[[REG]], :upper16:a(sbrel)
; THUMB2_RW_SB: add r0, r9
+; NO_MOVT_THUMB2_RW_SB: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RW_SB: add r0, r9
+
; THUMB1_RW_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
; THUMB1_RW_SB: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
@@ -169,11 +227,11 @@ entry:
; CHECK: {{(bx lr|pop)}}
-; ARM_RW_SB: [[LCPI]]
-; ARM_RW_SB: .long a(sbrel)
+; NO_MOVT_ARM_RW_SB: [[LCPI]]
+; NO_MOVT_ARM_RW_SB: .long a(sbrel)
-; THUMB2_RW_SB: [[LCPI]]
-; THUMB2_RW_SB: .long a(sbrel)
+; NO_MOVT_THUMB2_RW_SB: [[LCPI]]
+; NO_MOVT_THUMB2_RW_SB: .long a(sbrel)
; THUMB1_RW_ABS: [[LCPI]]
; THUMB1_RW_ABS-NEXT: .long a
@@ -190,19 +248,31 @@ entry:
; ARM_RO_ABS: movw r[[REG:[0-9]]], :lower16:b
; ARM_RO_ABS: movt r[[REG]], :upper16:b
+; NO_MOVT_ARM_RO_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
+
; ARM_RO_PC: movw r[[REG:[0-9]]], :lower16:(b-([[LPC:.LPC[0-9]+_[0-9]+]]+8))
; ARM_RO_PC: movt r[[REG]], :upper16:(b-([[LPC]]+8))
; ARM_RO_PC: [[LPC]]:
; ARM_RO_PC-NEXT: add r0, pc, r[[REG:[0-9]]]
+; NO_MOVT_ARM_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RO_PC: [[LPC:.LPC[0-9]+_[0-9]+]]:
+; NO_MOVT_ARM_RO_PC-NEXT: add r0, pc, r[[REG]]
+
; THUMB2_RO_ABS: movw r[[REG:[0-9]]], :lower16:b
; THUMB2_RO_ABS: movt r[[REG]], :upper16:b
+; NO_MOVT_THUMB2_RO_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
+
; THUMB2_RO_PC: movw r0, :lower16:(b-([[LPC:.LPC[0-9]+_[0-9]+]]+4))
; THUMB2_RO_PC: movt r0, :upper16:(b-([[LPC]]+4))
; THUMB2_RO_PC: [[LPC]]:
; THUMB2_RO_PC-NEXT: add r0, pc
+; NO_MOVT_THUMB2_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RO_PC: [[LPC:.LPC[0-9]+_[0-9]+]]:
+; NO_MOVT_THUMB2_RO_PC-NEXT: add r[[REG]], pc
+
; THUMB1_RO_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
; THUMB1_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
@@ -211,9 +281,21 @@ entry:
; CHECK: {{(bx lr|pop)}}
+; NO_MOVT_ARM_RO_ABS: [[LCPI]]
+; NO_MOVT_ARM_RO_ABS-NEXT: .long b
+
+; NO_MOVT_THUMB2_RO_ABS: [[LCPI]]
+; NO_MOVT_THUMB2_RO_ABS-NEXT: .long b
+
; THUMB1_RO_ABS: [[LCPI]]
; THUMB1_RO_ABS-NEXT: .long b
+; NO_MOVT_ARM_RO_PC: [[LCPI]]
+; NO_MOVT_ARM_RO_PC-NEXT: .long b-([[LPC]]+8)
+
+; NO_MOVT_THUMB2_RO_PC: [[LCPI]]
+; NO_MOVT_THUMB2_RO_PC-NEXT: .long b-([[LPC]]+4)
+
; THUMB1_RO_PC: [[LCPI]]
; THUMB1_RO_PC-NEXT: .long b-([[LPC]]+4)
}
@@ -226,19 +308,31 @@ entry:
; ARM_RO_ABS: movw r[[REG:[0-9]]], :lower16:take_addr_func
; ARM_RO_ABS: movt r[[REG]], :upper16:take_addr_func
+; NO_MOVT_ARM_RO_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
+
; ARM_RO_PC: movw r[[REG:[0-9]]], :lower16:(take_addr_func-([[LPC:.LPC[0-9]+_[0-9]+]]+8))
; ARM_RO_PC: movt r[[REG]], :upper16:(take_addr_func-([[LPC]]+8))
; ARM_RO_PC: [[LPC]]:
; ARM_RO_PC-NEXT: add r0, pc, r[[REG:[0-9]]]
+; NO_MOVT_ARM_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_ARM_RO_PC: [[LPC:.LPC[0-9]+_[0-9]+]]:
+; NO_MOVT_ARM_RO_PC-NEXT: add r0, pc, r[[REG]]
+
; THUMB2_RO_ABS: movw r[[REG:[0-9]]], :lower16:take_addr_func
; THUMB2_RO_ABS: movt r[[REG]], :upper16:take_addr_func
+; NO_MOVT_THUMB2_RO_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
+
; THUMB2_RO_PC: movw r0, :lower16:(take_addr_func-([[LPC:.LPC[0-9]+_[0-9]+]]+4))
; THUMB2_RO_PC: movt r0, :upper16:(take_addr_func-([[LPC]]+4))
; THUMB2_RO_PC: [[LPC]]:
; THUMB2_RO_PC-NEXT: add r0, pc
+; NO_MOVT_THUMB2_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
+; NO_MOVT_THUMB2_RO_PC: [[LPC:.LPC[0-9]+_[0-9]+]]:
+; NO_MOVT_THUMB2_RO_PC-NEXT: add r[[REG]], pc
+
; THUMB1_RO_ABS: ldr r0, [[LCPI:.LCPI[0-9]+_[0-9]+]]
; THUMB1_RO_PC: ldr r[[REG:[0-9]]], [[LCPI:.LCPI[0-9]+_[0-9]+]]
@@ -247,9 +341,21 @@ entry:
; CHECK: {{(bx lr|pop)}}
+; NO_MOVT_ARM_RO_ABS: [[LCPI]]
+; NO_MOVT_ARM_RO_ABS-NEXT: .long take_addr_func
+
+; NO_MOVT_THUMB2_RO_ABS: [[LCPI]]
+; NO_MOVT_THUMB2_RO_ABS-NEXT: .long take_addr_func
+
; THUMB1_RO_ABS: [[LCPI]]
; THUMB1_RO_ABS-NEXT: .long take_addr_func
+; NO_MOVT_ARM_RO_PC: [[LCPI]]
+; NO_MOVT_ARM_RO_PC-NEXT: .long take_addr_func-([[LPC]]+8)
+
+; NO_MOVT_THUMB2_RO_PC: [[LCPI]]
+; NO_MOVT_THUMB2_RO_PC-NEXT: .long take_addr_func-([[LPC]]+4)
+
; THUMB1_RO_PC: [[LCPI]]
; THUMB1_RO_PC-NEXT: .long take_addr_func-([[LPC]]+4)
}
diff --git a/test/CodeGen/ARM/atomic-cmpxchg.ll b/test/CodeGen/ARM/atomic-cmpxchg.ll
index 364bd5d13691..e026bae361e1 100644
--- a/test/CodeGen/ARM/atomic-cmpxchg.ll
+++ b/test/CodeGen/ARM/atomic-cmpxchg.ll
@@ -24,14 +24,12 @@ entry:
; CHECK-THUMB-LABEL: test_cmpxchg_res_i8
; CHECK-THUMB: bl __sync_val_compare_and_swap_1
; CHECK-THUMB-NOT: mov [[R1:r[0-7]]], r0
-; CHECK-THUMB: push {r0}
-; CHECK-THUMB: pop {[[R1:r[0-7]]]}
+; CHECK-THUMB: movs [[R1:r[0-7]]], r0
; CHECK-THUMB: movs r0, #1
; CHECK-THUMB: movs [[R2:r[0-9]+]], #0
; CHECK-THUMB: cmp [[R1]], {{r[0-9]+}}
; CHECK-THUMB: beq
-; CHECK-THUMB: push {[[R2]]}
-; CHECK-THUMB: pop {r0}
+; CHECK-THUMB: movs r0, [[R2]]
; CHECK-ARMV6-LABEL: test_cmpxchg_res_i8:
; CHECK-ARMV6-NEXT: .fnstart
@@ -66,14 +64,14 @@ entry:
; CHECK-ARMV7-NEXT: [[HEAD:.LBB[0-9_]+]]:
; CHECK-ARMV7-NEXT: strexb [[SUCCESS:r[0-9]+]], r2, [r0]
; CHECK-ARMV7-NEXT: cmp [[SUCCESS]], #0
-; CHECK-ARMV7-NEXT: moveq [[RES:r[0-9]+]], #1
+; CHECK-ARMV7-NEXT: moveq r0, #1
; CHECK-ARMV7-NEXT: bxeq lr
; CHECK-ARMV7-NEXT: [[TRY]]:
-; CHECK-ARMV7-NEXT: ldrexb [[LD:r[0-9]+]], [r0]
-; CHECK-ARMV7-NEXT: cmp [[LD]], [[DESIRED]]
+; CHECK-ARMV7-NEXT: ldrexb [[SUCCESS]], [r0]
+; CHECK-ARMV7-NEXT: cmp [[SUCCESS]], r1
; CHECK-ARMV7-NEXT: beq [[HEAD]]
; CHECK-ARMV7-NEXT: clrex
-; CHECK-ARMV7-NEXT: mov [[RES]], #0
+; CHECK-ARMV7-NEXT: mov r0, #0
; CHECK-ARMV7-NEXT: bx lr
; CHECK-THUMBV7-LABEL: test_cmpxchg_res_i8:
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index e6a4949d53ce..23c4ccea4604 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -320,10 +320,10 @@ define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
; CHECK: cmp [[SUCCESS]], #0
; CHECK: bne [[LOOP_BB]]
-; CHECK: b [[END_BB:\.?LBB[0-9]+_[0-9]+]]
+; CHECK: dmb ish
+; CHECK: bx lr
; CHECK: [[FAIL_BB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: [[END_BB]]:
; CHECK: dmb ish
; CHECK: bx lr
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index 77b850bd617b..d1575ed12e4e 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -1045,20 +1045,21 @@ define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind
; function there.
; CHECK-ARM-NEXT: cmp r[[OLD]], r0
; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]]
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
; CHECK: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK-NEXT: b .LBB{{[0-9]+}}_4
-; CHECK-NEXT: .LBB{{[0-9]+}}_3:
-; CHECK-NEXT: clrex
+; CHECK-ARM: mov r0, r[[OLD]]
+; CHECK: bx lr
; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: clrex
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK-ARM: mov r0, r[[OLD]]
+; CHECK-ARM-NEXT: bx lr
ret i8 %old
}
@@ -1078,20 +1079,21 @@ define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounw
; function there.
; CHECK-ARM-NEXT: cmp r[[OLD]], r0
; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]]
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK-NEXT: b .LBB{{[0-9]+}}_4
-; CHECK-NEXT: .LBB{{[0-9]+}}_3:
-; CHECK-NEXT: clrex
+; CHECK-ARM: mov r0, r[[OLD]]
+; CHECK: bx lr
; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: clrex
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK-ARM: mov r0, r[[OLD]]
+; CHECK-ARM-NEXT: bx lr
ret i16 %old
}
@@ -1110,20 +1112,21 @@ define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLD]], r0
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK-NEXT: b .LBB{{[0-9]+}}_4
-; CHECK-NEXT: .LBB{{[0-9]+}}_3:
-; CHECK-NEXT: clrex
+; CHECK: str{{(.w)?}} r[[OLD]],
+; CHECK-NEXT: bx lr
; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: clrex
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: str{{(.w)?}} r[[OLD]],
+; CHECK-ARM-NEXT: bx lr
ret void
}
@@ -1148,16 +1151,16 @@ define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-BE-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
; CHECK-ARM-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-THUMB-BE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_LO]], [[MISMATCH_HI]]
-; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_4
; CHECK-NEXT: BB#2:
; As above, r2, r3 is a reasonable guess.
; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
-; CHECK-NEXT: b .LBB{{[0-9]+}}_4
-; CHECK-NEXT: .LBB{{[0-9]+}}_3:
-; CHECK-NEXT: clrex
+; CHECK: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+; CHECK-NEXT: pop
; CHECK-NEXT: .LBB{{[0-9]+}}_4:
+; CHECK-NEXT: clrex
; CHECK-NOT: dmb
; CHECK-NOT: mcr
diff --git a/test/CodeGen/ARM/bfi.ll b/test/CodeGen/ARM/bfi.ll
index 893fef3add7e..31eff16fcc3c 100644
--- a/test/CodeGen/ARM/bfi.ll
+++ b/test/CodeGen/ARM/bfi.ll
@@ -77,7 +77,7 @@ entry:
define i32 @f7(i32 %x, i32 %y) {
; CHECK-LABEL: f7:
-; CHECK: bfi r1, r0, #4, #1
+; CHECK: bfi r0, r2, #4, #1
%y2 = and i32 %y, 4294967040 ; 0xFFFFFF00
%and = and i32 %x, 4
%or = or i32 %y2, 16
@@ -88,8 +88,8 @@ define i32 @f7(i32 %x, i32 %y) {
define i32 @f8(i32 %x, i32 %y) {
; CHECK-LABEL: f8:
-; CHECK: bfi r1, r0, #4, #1
-; CHECK: bfi r1, r0, #5, #1
+; CHECK: bfi r0, r2, #4, #1
+; CHECK: bfi r0, r2, #5, #1
%y2 = and i32 %y, 4294967040 ; 0xFFFFFF00
%and = and i32 %x, 4
%or = or i32 %y2, 48
@@ -111,7 +111,7 @@ define i32 @f9(i32 %x, i32 %y) {
define i32 @f10(i32 %x, i32 %y) {
; CHECK-LABEL: f10:
-; CHECK: bfi r1, r0, #4, #2
+; CHECK: bfi r0, r2, #4, #2
%y2 = and i32 %y, 4294967040 ; 0xFFFFFF00
%and = and i32 %x, 4
%or = or i32 %y2, 32
@@ -128,7 +128,7 @@ define i32 @f10(i32 %x, i32 %y) {
define i32 @f11(i32 %x, i32 %y) {
; CHECK-LABEL: f11:
-; CHECK: bfi r1, r0, #4, #3
+; CHECK: bfi r0, r2, #4, #3
%y2 = and i32 %y, 4294967040 ; 0xFFFFFF00
%and = and i32 %x, 4
%or = or i32 %y2, 32
@@ -150,7 +150,7 @@ define i32 @f11(i32 %x, i32 %y) {
define i32 @f12(i32 %x, i32 %y) {
; CHECK-LABEL: f12:
-; CHECK: bfi r1, r0, #4, #1
+; CHECK: bfi r0, r2, #4, #1
%y2 = and i32 %y, 4294967040 ; 0xFFFFFF00
%and = and i32 %x, 4
%or = or i32 %y2, 16
diff --git a/test/CodeGen/ARM/bic.ll b/test/CodeGen/ARM/bic.ll
index 691f8be4ab66..8be59898bd0f 100644
--- a/test/CodeGen/ARM/bic.ll
+++ b/test/CodeGen/ARM/bic.ll
@@ -1,17 +1,24 @@
; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: bic r0, r0, r1
%tmp = xor i32 %b, 4294967295
%tmp1 = and i32 %a, %tmp
ret i32 %tmp1
}
-; CHECK: bic r0, r0, r1
-
define i32 @f2(i32 %a, i32 %b) {
+; CHECK-LABEL: f2:
+; CHECK: bic r0, r0, r1
%tmp = xor i32 %b, 4294967295
%tmp1 = and i32 %tmp, %a
ret i32 %tmp1
}
-; CHECK: bic r0, r0, r1
+define i32 @f3(i32 %a) {
+; CHECK-LABEL: f3:
+; CHECK: bic r0, r0, #255
+ %tmp = and i32 %a, -256
+ ret i32 %tmp
+}
diff --git a/test/CodeGen/ARM/bool-ext-inc.ll b/test/CodeGen/ARM/bool-ext-inc.ll
new file mode 100644
index 000000000000..fe43f1b2ef93
--- /dev/null
+++ b/test/CodeGen/ARM/bool-ext-inc.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-eabi -mattr=neon | FileCheck %s
+
+define i32 @sext_inc(i1 zeroext %x) {
+; CHECK-LABEL: sext_inc:
+; CHECK: @ BB#0:
+; CHECK-NEXT: rsb r0, r0, #1
+; CHECK-NEXT: mov pc, lr
+ %ext = sext i1 %x to i32
+ %add = add i32 %ext, 1
+ ret i32 %add
+}
+
+define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
+; CHECK-LABEL: sext_inc_vec:
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov.i32 q9, #0x1f
+; CHECK-NEXT: vmov.i32 q10, #0x1
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vneg.s32 q9, q9
+; CHECK-NEXT: vshl.i32 q8, q8, #31
+; CHECK-NEXT: vshl.s32 q8, q8, q9
+; CHECK-NEXT: vadd.i32 q8, q8, q10
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %ext = sext <4 x i1> %x to <4 x i32>
+ %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %add
+}
+
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
index b1b3b46dce24..fc85a3a2e683 100644
--- a/test/CodeGen/ARM/build-attributes.ll
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -102,6 +102,10 @@
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
@@ -182,6 +186,8 @@
; ARMv7a
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=NO-STRICT-ALIGN
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
+; ARMv7ve
+; RUN: llc < %s -mtriple=armv7ve-none-linux-gnueabi | FileCheck %s --check-prefix=V7VE
; ARMv7r
; RUN: llc < %s -mtriple=armv7r-none-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=NO-STRICT-ALIGN
; RUN: llc < %s -mtriple=armv7r-none-linux-gnueabi -mcpu=cortex-r5 -mattr=+strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
@@ -210,6 +216,12 @@
; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 -mattr=-neon,+fp-only-sp,+d16 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-SP
; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-NEON
+; ARMv8-M
+; RUN: llc < %s -mtriple=thumbv8-none-none-eabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=NO-STRICT-ALIGN
+; RUN: llc < %s -mtriple=thumbv8-none-none-eabi -mcpu=cortex-m23 -mattr=+strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
+; RUN: llc < %s -mtriple=thumbv8-none-none-eabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=NO-STRICT-ALIGN
+; RUN: llc < %s -mtriple=thumbv8-none-none-eabi -mcpu=cortex-m33 -mattr=+strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
+
; XSCALE: .eabi_attribute 6, 5
; XSCALE: .eabi_attribute 8, 1
; XSCALE: .eabi_attribute 9, 1
@@ -369,6 +381,22 @@
; V7-FAST-NOT: .eabi_attribute 22
; V7-FAST: .eabi_attribute 23, 1
+; V7VE: .syntax unified
+; V7VE: .eabi_attribute 6, 10 @ Tag_CPU_arch
+; V7VE: .eabi_attribute 7, 65 @ Tag_CPU_arch_profile
+; V7VE: .eabi_attribute 8, 1 @ Tag_ARM_ISA_use
+; V7VE: .eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
+; V7VE: .eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
+; V7VE: .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
+; V7VE: .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
+; V7VE: .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
+; V7VE: .eabi_attribute 24, 1 @ Tag_ABI_align_needed
+; V7VE: .eabi_attribute 25, 1 @ Tag_ABI_align_preserved
+; V7VE: .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format
+; V7VE: .eabi_attribute 42, 1 @ Tag_MPextension_use
+; V7VE: .eabi_attribute 44, 2 @ Tag_DIV_use
+; V7VE: .eabi_attribute 68, 3 @ Tag_Virtualization_use
+
; V8: .syntax unified
; V8: .eabi_attribute 67, "2.09"
; V8: .eabi_attribute 6, 14
@@ -1310,6 +1338,55 @@
; CORTEX-A32-FAST-NOT: .eabi_attribute 22
; CORTEX-A32-FAST: .eabi_attribute 23, 1
+; CORTEX-M23: .cpu cortex-m23
+; CORTEX-M23: .eabi_attribute 6, 16
+; CORTEX-M23: .eabi_attribute 7, 77
+; CORTEX-M23: .eabi_attribute 8, 0
+; CORTEX-M23: .eabi_attribute 9, 3
+; CORTEX-M23: .eabi_attribute 17, 1
+;; We default to IEEE 754 compliance
+; CORTEX-M23-NOT: .eabi_attribute 19
+; CORTEX-M23: .eabi_attribute 20, 1
+; CORTEX-M23: .eabi_attribute 21, 1
+; CORTEX-M23: .eabi_attribute 23, 3
+; CORTEX-M23: .eabi_attribute 34, 1
+; CORTEX-M23: .eabi_attribute 24, 1
+; CORTEX-M23-NOT: .eabi_attribute 27
+; CORTEX-M23-NOT: .eabi_attribute 28
+; CORTEX-M23: .eabi_attribute 25, 1
+; CORTEX-M23: .eabi_attribute 38, 1
+; CORTEX-M23: .eabi_attribute 14, 0
+; CORTEX-M23-NOT: .eabi_attribute 44
+
+; CORTEX-M33: .cpu cortex-m33
+; CORTEX-M33: .eabi_attribute 6, 17
+; CORTEX-M33: .eabi_attribute 7, 77
+; CORTEX-M33: .eabi_attribute 8, 0
+; CORTEX-M33: .eabi_attribute 9, 3
+; CORTEX-M33: .fpu fpv5-sp-d16
+; CORTEX-M33: .eabi_attribute 17, 1
+;; We default to IEEE 754 compliance
+; CORTEX-M23-NOT: .eabi_attribute 19
+; CORTEX-M33: .eabi_attribute 20, 1
+; CORTEX-M33: .eabi_attribute 21, 1
+; CORTEX-M33: .eabi_attribute 23, 3
+; CORTEX-M33: .eabi_attribute 34, 1
+; CORTEX-M33: .eabi_attribute 24, 1
+; CORTEX-M33: .eabi_attribute 25, 1
+; CORTEX-M33: .eabi_attribute 27, 1
+; CORTEX-M33-NOT: .eabi_attribute 28
+; CORTEX-M33: .eabi_attribute 36, 1
+; CORTEX-M33: .eabi_attribute 38, 1
+; CORTEX-M33: .eabi_attribute 46, 1
+; CORTEX-M33-NOT: .eabi_attribute 44
+; CORTEX-M33: .eabi_attribute 14, 0
+
+; CORTEX-M33-FAST-NOT: .eabi_attribute 19
+; CORTEX-M33-FAST: .eabi_attribute 20, 2
+; CORTEX-M33-FAST-NOT: .eabi_attribute 21
+; CORTEX-M33-FAST-NOT: .eabi_attribute 22
+; CORTEX-M33-FAST: .eabi_attribute 23, 1
+
; CORTEX-A35: .cpu cortex-a35
; CORTEX-A35: .eabi_attribute 6, 14
; CORTEX-A35: .eabi_attribute 7, 65
diff --git a/test/CodeGen/ARM/cmp1-peephole-thumb.mir b/test/CodeGen/ARM/cmp1-peephole-thumb.mir
new file mode 100644
index 000000000000..5ace58fd0658
--- /dev/null
+++ b/test/CodeGen/ARM/cmp1-peephole-thumb.mir
@@ -0,0 +1,78 @@
+# RUN: llc -run-pass=peephole-opt %s -o - | FileCheck %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumb-none--eabi"
+
+ define i32 @f(i32 %a, i32 %b) {
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %cmp = icmp eq i32 %mul, 0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+ }
+
+...
+---
+name: f
+# CHECK-LABEL: name: f
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: tgpr }
+ - { id: 1, class: tgpr }
+ - { id: 2, class: tgpr }
+ - { id: 3, class: tgpr }
+ - { id: 4, class: tgpr }
+ - { id: 5, class: tgpr }
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+
+# CHECK: tMOVi8 1, 14, _
+# CHECK: tMOVi8 0, 14, _
+# CHECK: tMUL %1, %0, 14, _
+# CHECK-NOT: tCMPi8
+body: |
+ bb.0.entry:
+ successors: %bb.1.entry(0x40000000), %bb.2.entry(0x40000000)
+ liveins: %r0, %r1
+
+ %1 = COPY %r1
+ %0 = COPY %r0
+ %2, %cpsr = tMUL %1, %0, 14, _
+ %3, %cpsr = tMOVi8 1, 14, _
+ %4, %cpsr = tMOVi8 0, 14, _
+ tCMPi8 killed %2, 0, 14, _, implicit-def %cpsr
+ tBcc %bb.2.entry, 0, %cpsr
+
+ bb.1.entry:
+ successors: %bb.2.entry(0x80000000)
+
+
+ bb.2.entry:
+ %5 = PHI %4, %bb.1.entry, %3, %bb.0.entry
+ %r0 = COPY %5
+ tBX_RET 14, _, implicit %r0
+
+...
diff --git a/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/test/CodeGen/ARM/cmp2-peephole-thumb.mir
new file mode 100644
index 000000000000..6e9ca70f1741
--- /dev/null
+++ b/test/CodeGen/ARM/cmp2-peephole-thumb.mir
@@ -0,0 +1,108 @@
+# RUN: llc -run-pass=peephole-opt %s -o - | FileCheck %s
+
+# Here we check that the peephole cmp rewrite is not triggered, because
+# there is store instruction between the tMUL and tCMP, i.e. there are
+# no constants to reorder.
+
+--- |
+ ; ModuleID = 'cmp2-peephole-thumb.ll'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumb-none--eabi"
+
+ define i32 @g(i32 %a, i32 %b) {
+ entry:
+ %retval = alloca i32, align 4
+ %mul = alloca i32, align 4
+ %mul1 = mul nsw i32 %a, %b
+ store i32 %mul1, i32* %mul, align 4
+ %0 = load i32, i32* %mul, align 4
+ %cmp = icmp sle i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+ if.then: ; preds = %entry
+ store i32 42, i32* %retval, align 4
+ br label %return
+
+ if.end: ; preds = %entry
+ store i32 1, i32* %retval, align 4
+ br label %return
+
+ return: ; preds = %if.end, %if.then
+ %1 = load i32, i32* %retval, align 4
+ ret i32 %1
+ }
+
+...
+---
+name: g
+# CHECK-LABEL: name: g
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: tgpr }
+ - { id: 1, class: tgpr }
+ - { id: 2, class: tgpr }
+ - { id: 3, class: tgpr }
+ - { id: 4, class: tgpr }
+ - { id: 5, class: tgpr }
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+stack:
+ - { id: 0, name: retval, offset: 0, size: 4, alignment: 4, local-offset: -4 }
+ - { id: 1, name: mul, offset: 0, size: 4, alignment: 4, local-offset: -8 }
+
+# CHECK: tMUL
+# CHECK-NEXT: tSTRspi
+# CHECK-NEXT: tCMPi8
+body: |
+ bb.0.entry:
+ successors: %bb.1.if.then(0x40000000), %bb.2.if.end(0x40000000)
+ liveins: %r0, %r1
+
+ %1 = COPY %r1
+ %0 = COPY %r0
+ %2, %cpsr = tMUL %0, %1, 14, _
+ tSTRspi %2, %stack.1.mul, 0, 14, _ :: (store 4 into %ir.mul)
+ tCMPi8 %2, 0, 14, _, implicit-def %cpsr
+ tBcc %bb.2.if.end, 12, %cpsr
+ tB %bb.1.if.then, 14, _
+
+ bb.1.if.then:
+ successors: %bb.3.return(0x80000000)
+
+ %4, %cpsr = tMOVi8 42, 14, _
+ tSTRspi killed %4, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval)
+ tB %bb.3.return, 14, _
+
+ bb.2.if.end:
+ successors: %bb.3.return(0x80000000)
+
+ %3, %cpsr = tMOVi8 1, 14, _
+ tSTRspi killed %3, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval)
+
+ bb.3.return:
+ %5 = tLDRspi %stack.0.retval, 0, 14, _ :: (dereferenceable load 4 from %ir.retval)
+ %r0 = COPY %5
+ tBX_RET 14, _, implicit %r0
+
+...
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
index 4038528c91bc..0d5681aafbcb 100644
--- a/test/CodeGen/ARM/cmpxchg-weak.ll
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -13,14 +13,16 @@ define void @test_cmpxchg_weak(i32 *%addr, i32 %desired, i32 %new) {
; CHECK-NEXT: dmb ish
; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r2, [r0]
; CHECK-NEXT: cmp [[SUCCESS]], #0
-; CHECK-NEXT: bne [[FAILBB:LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: beq [[SUCCESSBB:LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: BB#2:
-; CHECK-NEXT: dmb ish
; CHECK-NEXT: str r3, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: [[LDFAILBB]]:
; CHECK-NEXT: clrex
-; CHECK-NEXT: [[FAILBB]]:
+; CHECK-NEXT: str r3, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: [[SUCCESSBB]]:
+; CHECK-NEXT: dmb ish
; CHECK-NEXT: str r3, [r0]
; CHECK-NEXT: bx lr
diff --git a/test/CodeGen/ARM/constantpool-promote.ll b/test/CodeGen/ARM/constantpool-promote.ll
index fb1bdfd62fb7..8df7e100c051 100644
--- a/test/CodeGen/ARM/constantpool-promote.ll
+++ b/test/CodeGen/ARM/constantpool-promote.ll
@@ -1,10 +1,15 @@
-; RUN: llc -relocation-model=static < %s | FileCheck %s
-; RUN: llc -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -relocation-model=ropi < %s | FileCheck %s
-; RUN: llc -relocation-model=rwpi < %s | FileCheck %s
-
-target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
-target triple = "armv7--linux-gnueabihf"
+; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=static < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
+; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=pic < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
+; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=ropi < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
+; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=rwpi < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
+; RUN: llc -mtriple thumbv7--linux-gnueabihf -relocation-model=static < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7THUMB
+; RUN: llc -mtriple thumbv7--linux-gnueabihf -relocation-model=pic < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7THUMB
+; RUN: llc -mtriple thumbv7--linux-gnueabihf -relocation-model=ropi < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7THUMB
+; RUN: llc -mtriple thumbv7--linux-gnueabihf -relocation-model=rwpi < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7THUMB
+; RUN: llc -mtriple thumbv6m--linux-gnueabihf -relocation-model=static < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V6M
+; RUN: llc -mtriple thumbv6m--linux-gnueabihf -relocation-model=pic < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V6M
+; RUN: llc -mtriple thumbv6m--linux-gnueabihf -relocation-model=ropi < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V6M
+; RUN: llc -mtriple thumbv6m--linux-gnueabihf -relocation-model=rwpi < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V6M
@.str = private unnamed_addr constant [2 x i8] c"s\00", align 1
@.str1 = private unnamed_addr constant [69 x i8] c"this string is far too long to fit in a literal pool by far and away\00", align 1
@@ -16,6 +21,7 @@ target triple = "armv7--linux-gnueabihf"
@.arr3 = private unnamed_addr constant [2 x i16*] [i16* null, i16* null], align 4
@.ptr = private unnamed_addr constant [2 x i16*] [i16* getelementptr inbounds ([2 x i16], [2 x i16]* @.arr2, i32 0, i32 0), i16* null], align 2
@.arr4 = private unnamed_addr constant [2 x i16] [i16 3, i16 4], align 16
+@.zerosize = private unnamed_addr constant [0 x i16] zeroinitializer, align 4
; CHECK-LABEL: @test1
; CHECK: adr r0, [[x:.*]]
@@ -134,18 +140,56 @@ define void @test9() #0 {
ret void
}
+; Ensure that zero sized values are supported / not promoted.
+; CHECK-LABEL: @pr32130
+; CHECK-NOT: adr
+define void @pr32130() #0 {
+ tail call void @c(i16* getelementptr inbounds ([0 x i16], [0 x i16]* @.zerosize, i32 0, i32 0)) #2
+ ret void
+}
+
+; CHECK-LABEL: @test10
+; CHECK-V6M: adr r{{[0-9]*}}, [[x:.*]]
+; CHECK-V6M: [[x]]:
+; CHECK-V6M: .asciz "s\000\000"
+; CHECK-V7: ldrb{{(.w)?}} r{{[0-9]*}}, [[x:.*]]
+; CHECK-V7: [[x]]:
+; CHECK-V7: .asciz "s\000\000"
+define void @test10(i8* %a) local_unnamed_addr #0 {
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %a, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i32 1, i32 1, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @test11
+; CHECK-V6M: adr r{{[0-9]*}}, [[x:.*]]
+; CHECK-V6M: [[x]]:
+; CHECK-V6M: .short 3
+; CHECK-V6M: .short 4
+; CHECK-V7THUMB: ldrh{{(.w)?}} r{{[0-9]*}}, [[x:.*]]
+; CHECK-V7THUMB: [[x]]:
+; CHECK-V7THUMB: .short 3
+; CHECK-V7THUMB: .short 4
+; CHECK-V7ARM: adr r{{[0-9]*}}, [[x:.*]]
+; CHECK-V7ARM: [[x]]:
+; CHECK-V7ARM: .short 3
+; CHECK-V7ARM: .short 4
+define void @test11(i16* %a) local_unnamed_addr #0 {
+ call void @llvm.memmove.p0i16.p0i16.i32(i16* %a, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @.arr1, i32 0, i32 0), i32 2, i32 2, i1 false)
+ ret void
+}
+
declare void @b(i8*) #1
declare void @c(i16*) #1
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1)
+declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) local_unnamed_addr
+declare void @llvm.memmove.p0i16.p0i16.i32(i16*, i16*, i32, i32, i1) local_unnamed_addr
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind }
!llvm.module.flags = !{!0, !1}
-!llvm.ident = !{!2}
!0 = !{i32 1, !"wchar_size", i32 4}
!1 = !{i32 1, !"min_enum_size", i32 4}
-!2 = !{!"Apple LLVM version 6.1.0 (clang-602.0.53) (based on LLVM 3.6.0svn)"}
diff --git a/test/CodeGen/ARM/debug-info-s16-reg.ll b/test/CodeGen/ARM/debug-info-s16-reg.ll
index 2987b9a2105a..197746c5f122 100644
--- a/test/CodeGen/ARM/debug-info-s16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-s16-reg.ll
@@ -3,8 +3,6 @@
; Test dwarf reg no for s16
;CHECK: super-register DW_OP_regx
;CHECK-NEXT: 264
-;CHECK-NEXT: DW_OP_piece
-;CHECK-NEXT: 4
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-macosx10.6.7"
diff --git a/test/CodeGen/ARM/debug-info-sreg2.ll b/test/CodeGen/ARM/debug-info-sreg2.ll
index b31d1b7bed4f..094b10499788 100644
--- a/test/CodeGen/ARM/debug-info-sreg2.ll
+++ b/test/CodeGen/ARM/debug-info-sreg2.ll
@@ -10,7 +10,7 @@ target triple = "thumbv7-apple-macosx10.6.7"
; CHECK: 0x00000000: Beginning address offset:
; CHECK-NEXT: Ending address offset:
-; CHECK-NEXT: Location description: 90 {{.. .. .. .. $}}
+; CHECK-NEXT: Location description: 90 {{.. .. $}}
define void @_Z3foov() optsize ssp !dbg !1 {
entry:
diff --git a/test/CodeGen/ARM/div.ll b/test/CodeGen/ARM/div.ll
index 997f50760f3a..883731519755 100644
--- a/test/CodeGen/ARM/div.ll
+++ b/test/CodeGen/ARM/div.ll
@@ -10,12 +10,18 @@
; RUN: FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-HWDIV
; RUN: llc < %s -mtriple=arm-none-eabi -mcpu=cortex-a8 | \
; RUN: FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-EABI
+; RUN: llc < %s -mtriple=armv7ve-none-linux-gnu | \
+; RUN: FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-HWDIV
+; RUN: llc < %s -mtriple=thumbv7ve-none-linux-gnu | \
+; RUN: FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-HWDIV \
+; RUN: -check-prefix=CHECK-THUMB
define i32 @f1(i32 %a, i32 %b) {
entry:
; CHECK-LABEL: f1
; CHECK-SWDIV: __divsi3
+; CHECK-THUMB: .thumb_func
; CHECK-HWDIV: sdiv
; CHECK-EABI: __aeabi_idiv
@@ -28,6 +34,7 @@ entry:
; CHECK-LABEL: f2
; CHECK-SWDIV: __udivsi3
+; CHECK-THUMB: .thumb_func
; CHECK-HWDIV: udiv
; CHECK-EABI: __aeabi_uidiv
@@ -40,6 +47,7 @@ entry:
; CHECK-LABEL: f3
; CHECK-SWDIV: __modsi3
+; CHECK-THUMB: .thumb_func
; CHECK-HWDIV: sdiv
; CHECK-HWDIV: mls
@@ -55,6 +63,7 @@ entry:
; CHECK-LABEL: f4
; CHECK-SWDIV: __umodsi3
+; CHECK-THUMB: .thumb_func
; CHECK-HWDIV: udiv
; CHECK-HWDIV: mls
diff --git a/test/CodeGen/ARM/fast-isel-align.ll b/test/CodeGen/ARM/fast-isel-align.ll
index 701884e926a8..71cd73a4a25d 100644
--- a/test/CodeGen/ARM/fast-isel-align.ll
+++ b/test/CodeGen/ARM/fast-isel-align.ll
@@ -72,10 +72,10 @@ entry:
%4 = fcmp une float %3, 0.000000e+00
; ARM: ldr r[[R:[0-9]+]], [r0, #2]
; ARM: vmov s0, r[[R]]
-; ARM: vcmpe.f32 s0, #0
+; ARM: vcmp.f32 s0, #0
; THUMB: ldr.w r[[R:[0-9]+]], [r0, #2]
; THUMB: vmov s0, r[[R]]
-; THUMB: vcmpe.f32 s0, #0
+; THUMB: vcmp.f32 s0, #0
ret i1 %4
}
diff --git a/test/CodeGen/ARM/fast-isel-cmp-imm.ll b/test/CodeGen/ARM/fast-isel-cmp-imm.ll
index a9d7e4580638..543b6c285f3f 100644
--- a/test/CodeGen/ARM/fast-isel-cmp-imm.ll
+++ b/test/CodeGen/ARM/fast-isel-cmp-imm.ll
@@ -7,8 +7,8 @@ entry:
; ARM: t1a
; THUMB: t1a
%cmp = fcmp oeq float %a, 0.000000e+00
-; ARM: vcmpe.f32 s{{[0-9]+}}, #0
-; THUMB: vcmpe.f32 s{{[0-9]+}}, #0
+; ARM: vcmp.f32 s{{[0-9]+}}, #0
+; THUMB: vcmp.f32 s{{[0-9]+}}, #0
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
@@ -28,9 +28,9 @@ entry:
; THUMB: t1b
%cmp = fcmp oeq float %a, -0.000000e+00
; ARM: vldr
-; ARM: vcmpe.f32 s{{[0-9]+}}, s{{[0-9]+}}
+; ARM: vcmp.f32 s{{[0-9]+}}, s{{[0-9]+}}
; THUMB: vldr
-; THUMB: vcmpe.f32 s{{[0-9]+}}, s{{[0-9]+}}
+; THUMB: vcmp.f32 s{{[0-9]+}}, s{{[0-9]+}}
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
@@ -46,8 +46,8 @@ entry:
; ARM: t2a
; THUMB: t2a
%cmp = fcmp oeq double %a, 0.000000e+00
-; ARM: vcmpe.f64 d{{[0-9]+}}, #0
-; THUMB: vcmpe.f64 d{{[0-9]+}}, #0
+; ARM: vcmp.f64 d{{[0-9]+}}, #0
+; THUMB: vcmp.f64 d{{[0-9]+}}, #0
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
@@ -65,9 +65,9 @@ entry:
; THUMB: t2b
%cmp = fcmp oeq double %a, -0.000000e+00
; ARM: vldr
-; ARM: vcmpe.f64 d{{[0-9]+}}, d{{[0-9]+}}
+; ARM: vcmp.f64 d{{[0-9]+}}, d{{[0-9]+}}
; THUMB: vldr
-; THUMB: vcmpe.f64 d{{[0-9]+}}, d{{[0-9]+}}
+; THUMB: vcmp.f64 d{{[0-9]+}}, d{{[0-9]+}}
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
index 442459bc0582..eb32ee54c095 100644
--- a/test/CodeGen/ARM/fold-stack-adjust.ll
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -135,7 +135,7 @@ define void @test_fold_point(i1 %tst) minsize {
; Important to check for beginning of basic block, because if it gets
; if-converted the test is probably no longer checking what it should.
-; CHECK: {{LBB[0-9]+_2}}:
+; CHECK: %end
; CHECK-NEXT: vpop {d7, d8}
; CHECK-NEXT: pop {r4, pc}
diff --git a/test/CodeGen/ARM/fp-only-sp.ll b/test/CodeGen/ARM/fp-only-sp.ll
new file mode 100644
index 000000000000..2c7b2acbde9c
--- /dev/null
+++ b/test/CodeGen/ARM/fp-only-sp.ll
@@ -0,0 +1,62 @@
+; RUN: llc -mtriple=thumbv7em-apple-macho -mcpu=cortex-m4 %s -o - -O0 | FileCheck %s
+; RUN: llc -mtriple=thumbv7em-apple-macho -mcpu=cortex-m4 %s -o - | FileCheck %s
+
+; Note: vldr and vstr really do have 64-bit variants even with fp-only-sp
+define void @test_load_store(double* %addr) {
+; CHECK-LABEL: test_load_store:
+; CHECK: vldr [[TMP:d[0-9]+]], [r0]
+; CHECK: vstr [[TMP]], [r0]
+ %val = load volatile double, double* %addr
+ store volatile double %val, double* %addr
+ ret void
+}
+
+define void @test_cmp(double %l, double %r, i1* %addr.dst) {
+; CHECK-LABEL: test_cmp:
+; CHECK: bl ___eqdf2
+ %res = fcmp oeq double %l, %r
+ store i1 %res, i1* %addr.dst
+ ret void
+}
+
+define void @test_ext(float %in, double* %addr) {
+; CHECK-LABEL: test_ext:
+; CHECK: bl ___extendsfdf2
+ %res = fpext float %in to double
+ store double %res, double* %addr
+ ret void
+}
+
+define void @test_trunc(double %in, float* %addr) {
+; CHECK-LABEL: test_trunc:
+; CHECK: bl ___truncdfsf2
+ %res = fptrunc double %in to float
+ store float %res, float* %addr
+ ret void
+}
+
+define void @test_itofp(i32 %in, double* %addr) {
+; CHECK-LABEL: test_itofp:
+; CHECK: bl ___floatsidf
+ %res = sitofp i32 %in to double
+ store double %res, double* %addr
+; %res = fptoui double %tmp to i32
+ ret void
+}
+
+define i32 @test_fptoi(double* %addr) {
+; CHECK-LABEL: test_fptoi:
+; CHECK: bl ___fixunsdfsi
+ %val = load double, double* %addr
+ %res = fptoui double %val to i32
+ ret i32 %res
+}
+
+define void @test_binop(double* %addr) {
+; CHECK-LABEL: test_binop:
+; CHECK: bl ___adddf3
+ %in = load double, double* %addr
+ %res = fadd double %in, %in
+ store double %res, double* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll
index 824123687287..9148ac109ae3 100644
--- a/test/CodeGen/ARM/fp16-promote.ll
+++ b/test/CodeGen/ARM/fp16-promote.ll
@@ -161,14 +161,14 @@ define void @test_select(half* %p, half* %q, i1 zeroext %c) #0 {
ret void
}
-; Test only two variants of fcmp. These get translated to f32 vcmpe
+; Test only two variants of fcmp. These get translated to f32 vcmp
; instructions anyway.
; CHECK-ALL-LABEL: test_fcmp_une:
; CHECK-FP16: vcvtb.f32.f16
; CHECK-FP16: vcvtb.f32.f16
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcmpe.f32
+; CHECK-VFP: vcmp.f32
; CHECK-NOVFP: bl __aeabi_fcmpeq
; CHECK-FP16: vmrs APSR_nzcv, fpscr
; CHECK-ALL: movw{{ne|eq}}
@@ -184,7 +184,7 @@ define i1 @test_fcmp_une(half* %p, half* %q) #0 {
; CHECK-FP16: vcvtb.f32.f16
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcmpe.f32
+; CHECK-VFP: vcmp.f32
; CHECK-NOVFP: bl __aeabi_fcmpeq
; CHECK-FP16: vmrs APSR_nzcv, fpscr
; CHECK-LIBCALL: movw{{ne|eq}}
@@ -597,7 +597,7 @@ define void @test_fma(half* %p, half* %q, half* %r) #0 {
; CHECK-FP16: vcvtb.f16.f32
; CHECK-LIBCALL-LABEL: test_fabs:
; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bfc
+; CHECK-LIBCALL: bic
; CHECK-LIBCALL: bl __aeabi_f2h
define void @test_fabs(half* %p) {
%a = load half, half* %p, align 2
@@ -643,10 +643,11 @@ define void @test_maxnum(half* %p, half* %q) #0 {
}
; CHECK-ALL-LABEL: test_minnan:
-; CHECK-FP16: vcvtb.f32.f16
+; CHECK-FP16: vmov.f32 s0, #1.000000e+00
; CHECK-FP16: vcvtb.f32.f16
; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP: vmov.f32 s{{[0-9]+}}, #1.000000e+00
+; CHECK-NOVFP: mov r{{[0-9]+}}, #1065353216
; CHECK-VFP: vmin.f32
; CHECK-NOVFP: bl __aeabi_fcmpge
; CHECK-FP16: vcvtb.f16.f32
@@ -660,10 +661,11 @@ define void @test_minnan(half* %p) #0 {
}
; CHECK-ALL-LABEL: test_maxnan:
+; CHECK-FP16: vmov.f32 s0, #1.000000e+00
; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP: vmov.f32 s0, #1.000000e+00
+; CHECK-NOVFP: mov r{{[0-9]+}}, #1065353216
; CHECK-VFP: vmax.f32
; CHECK-NOVFP: bl __aeabi_fcmple
; CHECK-FP16: vcvtb.f16.f32
@@ -685,7 +687,7 @@ define void @test_maxnan(half* %p) #0 {
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-VFP-LIBCALL: vbsl
-; CHECK-NOVFP: bfc
+; CHECK-NOVFP: bic
; CHECK-NOVFP: and
; CHECK-NOVFP: orr
; CHECK-LIBCALL: bl __aeabi_f2h
@@ -845,21 +847,15 @@ define void @test_insertelement(half* %p, <4 x half>* %q, i32 %i) #0 {
}
; CHECK-ALL-LABEL: test_extractelement:
+; CHECK-VFP: push {{{.*}}, lr}
; CHECK-VFP: sub sp, sp, #8
-; CHECK-VFP: ldrh
-; CHECK-VFP: ldrh
-; CHECK-VFP: orr
-; CHECK-VFP: str
-; CHECK-VFP: ldrh
-; CHECK-VFP: ldrh
-; CHECK-VFP: orr
-; CHECK-VFP: str
+; CHECK-VFP: ldrd
; CHECK-VFP: mov
; CHECK-VFP: orr
; CHECK-VFP: ldrh
; CHECK-VFP: strh
; CHECK-VFP: add sp, sp, #8
-; CHECK-VFP: bx lr
+; CHECK-VFP: pop {{{.*}}, pc}
; CHECK-NOVFP: ldrh
; CHECK-NOVFP: strh
; CHECK-NOVFP: ldrh
diff --git a/test/CodeGen/ARM/fp16-v3.ll b/test/CodeGen/ARM/fp16-v3.ll
index e26455e61e7f..a37f71d9ba88 100644
--- a/test/CodeGen/ARM/fp16-v3.ll
+++ b/test/CodeGen/ARM/fp16-v3.ll
@@ -4,7 +4,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv7a--none-eabi"
; CHECK-LABEL: test_vec3:
-; CHECK-DAG: vcvtb.f32.f16 [[SREG1:s[0-9]+]],
+; CHECK-DAG: vmov.f32 [[SREG1:s[0-9]+]], #1.200000e+01
; CHECK-DAG: vcvt.f32.s32 [[SREG2:s[0-9]+]],
; CHECK-DAG: vcvtb.f16.f32 [[SREG3:s[0-9]+]], [[SREG2]]
; CHECK-DAG: vcvtb.f32.f16 [[SREG4:s[0-9]+]], [[SREG3]]
diff --git a/test/CodeGen/ARM/fpcmp-opt.ll b/test/CodeGen/ARM/fpcmp-opt.ll
index 45bb6d2f702d..a82854109450 100644
--- a/test/CodeGen/ARM/fpcmp-opt.ll
+++ b/test/CodeGen/ARM/fpcmp-opt.ll
@@ -10,7 +10,7 @@ entry:
; CHECK-LABEL: t1:
; CHECK: vldr [[S0:s[0-9]+]],
; CHECK: vldr [[S1:s[0-9]+]],
-; CHECK: vcmpe.f32 [[S1]], [[S0]]
+; CHECK: vcmp.f32 [[S1]], [[S0]]
; CHECK: vmrs APSR_nzcv, fpscr
; CHECK: beq
%0 = load float, float* %a
@@ -35,10 +35,10 @@ entry:
; CHECK-NOT: vldr
; CHECK: ldrd [[REG1:(r[0-9]+)]], [[REG2:(r[0-9]+)]], [r0]
; CHECK-NOT: b LBB
-; CHECK: bfc [[REG2]], #31, #1
+; CHECK: bic [[REG2]], [[REG2]], #-2147483648
; CHECK: cmp [[REG1]], #0
; CHECK: cmpeq [[REG2]], #0
-; CHECK-NOT: vcmpe.f32
+; CHECK-NOT: vcmp.f32
; CHECK-NOT: vmrs
; CHECK: bne
%0 = load double, double* %a
@@ -61,7 +61,7 @@ entry:
; CHECK: ldr [[REG3:(r[0-9]+)]], [r0]
; CHECK: mvn [[REG4:(r[0-9]+)]], #-2147483648
; CHECK: tst [[REG3]], [[REG4]]
-; CHECK-NOT: vcmpe.f32
+; CHECK-NOT: vcmp.f32
; CHECK-NOT: vmrs
; CHECK: bne
%0 = load float, float* %a
diff --git a/test/CodeGen/ARM/fpcmp.ll b/test/CodeGen/ARM/fpcmp.ll
index e3ffd45a396d..67326e000169 100644
--- a/test/CodeGen/ARM/fpcmp.ll
+++ b/test/CodeGen/ARM/fpcmp.ll
@@ -12,7 +12,7 @@ entry:
define i32 @f2(float %a) {
;CHECK-LABEL: f2:
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
;CHECK: moveq
entry:
%tmp = fcmp oeq float %a, 1.000000e+00 ; <i1> [#uses=1]
@@ -52,7 +52,7 @@ entry:
define i32 @f6(float %a) {
;CHECK-LABEL: f6:
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
;CHECK: movne
entry:
%tmp = fcmp une float %a, 1.000000e+00 ; <i1> [#uses=1]
diff --git a/test/CodeGen/ARM/fpcmp_ueq.ll b/test/CodeGen/ARM/fpcmp_ueq.ll
index c1696c9be1b7..698c7506cc59 100644
--- a/test/CodeGen/ARM/fpcmp_ueq.ll
+++ b/test/CodeGen/ARM/fpcmp_ueq.ll
@@ -17,7 +17,7 @@ entry:
; CHECK-ARMv4: moveq r0, #42
; CHECK-ARMv7-LABEL: f7:
-; CHECK-ARMv7: vcmpe.f32
+; CHECK-ARMv7: vcmp.f32
; CHECK-ARMv7: vmrs APSR_nzcv, fpscr
; CHECK-ARMv7: movweq
; CHECK-ARMv7-NOT: vmrs
diff --git a/test/CodeGen/ARM/fpscr-intrinsics.ll b/test/CodeGen/ARM/fpscr-intrinsics.ll
new file mode 100644
index 000000000000..64b97525febf
--- /dev/null
+++ b/test/CodeGen/ARM/fpscr-intrinsics.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -O0 -mtriple=armv7-eabi -mcpu=cortex-a8 -mattr=+neon,+fp-armv8 | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=armv7-eabi -mcpu=cortex-a8 -mattr=+neon,+fp-armv8 | FileCheck %s
+
+@a = common global double 0.000000e+00, align 8
+
+; Function Attrs: noinline nounwind uwtable
+define void @strtod() {
+entry:
+ ; CHECK: vmrs r{{[0-9]+}}, fpscr
+ %0 = call i32 @llvm.flt.rounds()
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store double 5.000000e-01, double* @a, align 8
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @fn1(i32* nocapture %p) local_unnamed_addr {
+entry:
+ ; CHECK: vmrs r{{[0-9]+}}, fpscr
+ %0 = tail call i32 @llvm.arm.get.fpscr()
+ store i32 %0, i32* %p, align 4
+ ; CHECK: vmsr fpscr, r{{[0-9]+}}
+ tail call void @llvm.arm.set.fpscr(i32 1)
+ ; CHECK: vmrs r{{[0-9]+}}, fpscr
+ %1 = tail call i32 @llvm.arm.get.fpscr()
+ %arrayidx1 = getelementptr inbounds i32, i32* %p, i32 1
+ store i32 %1, i32* %arrayidx1, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.arm.get.fpscr()
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.arm.set.fpscr(i32)
+
+; Function Attrs: nounwind
+declare i32 @llvm.flt.rounds()
diff --git a/test/CodeGen/ARM/gpr-paired-spill.ll b/test/CodeGen/ARM/gpr-paired-spill.ll
index ef3e5a54a2db..797b147d5d01 100644
--- a/test/CodeGen/ARM/gpr-paired-spill.ll
+++ b/test/CodeGen/ARM/gpr-paired-spill.ll
@@ -16,22 +16,22 @@ define void @foo(i64* %addr) {
; an LDMIA was created with both a FrameIndex and an offset, which
; is not allowed.
-; CHECK-WITH-LDRD: strd {{r[0-9]+}}, {{r[0-9]+}}, [sp, #8]
-; CHECK-WITH-LDRD: strd {{r[0-9]+}}, {{r[0-9]+}}, [sp]
+; CHECK-WITH-LDRD-DAG: strd {{r[0-9]+}}, {{r[0-9]+}}, [sp, #8]
+; CHECK-WITH-LDRD-DAG: strd {{r[0-9]+}}, {{r[0-9]+}}, [sp]
-; CHECK-WITH-LDRD: ldrd {{r[0-9]+}}, {{r[0-9]+}}, [sp, #8]
-; CHECK-WITH-LDRD: ldrd {{r[0-9]+}}, {{r[0-9]+}}, [sp]
+; CHECK-WITH-LDRD-DAG: ldrd {{r[0-9]+}}, {{r[0-9]+}}, [sp, #8]
+; CHECK-WITH-LDRD-DAG: ldrd {{r[0-9]+}}, {{r[0-9]+}}, [sp]
; We also want to ensure the register scavenger is working (i.e. an
; offset from sp can be generated), so we need two spills.
-; CHECK-WITHOUT-LDRD: add [[ADDRREG:[a-z0-9]+]], sp, #{{[0-9]+}}
-; CHECK-WITHOUT-LDRD: stm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
-; CHECK-WITHOUT-LDRD: stm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
+; CHECK-WITHOUT-LDRD-DAG: add [[ADDRREG:[a-z0-9]+]], sp, #{{[0-9]+}}
+; CHECK-WITHOUT-LDRD-DAG: stm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
+; CHECK-WITHOUT-LDRD-DAG: stm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
; In principle LLVM may have to recalculate the offset. At the moment
; it reuses the original though.
-; CHECK-WITHOUT-LDRD: ldm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
-; CHECK-WITHOUT-LDRD: ldm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
+; CHECK-WITHOUT-LDRD-DAG: ldm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
+; CHECK-WITHOUT-LDRD-DAG: ldm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
store volatile i64 %val1, i64* %addr
store volatile i64 %val2, i64* %addr
diff --git a/test/CodeGen/ARM/ifcvt10.ll b/test/CodeGen/ARM/ifcvt10.ll
index 5725a404c320..c7e18d35dbee 100644
--- a/test/CodeGen/ARM/ifcvt10.ll
+++ b/test/CodeGen/ARM/ifcvt10.ll
@@ -10,8 +10,6 @@ entry:
; CHECK: vpop {d8}
; CHECK-NOT: vpopne
; CHECK: pop {r7, pc}
-; CHECK: vpop {d8}
-; CHECK: pop {r7, pc}
br i1 undef, label %if.else, label %if.then
if.then: ; preds = %entry
diff --git a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
new file mode 100644
index 000000000000..74117d3896bd
--- /dev/null
+++ b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
@@ -0,0 +1,184 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-eabi | FileCheck %s -check-prefix=LE
+; RUN: llc < %s -mtriple=armeb-eabi | FileCheck %s -check-prefix=BE
+
+define void @i24_or(i24* %a) {
+; LE-LABEL: i24_or:
+; LE: @ BB#0:
+; LE-NEXT: ldrh r1, [r0]
+; LE-NEXT: orr r1, r1, #384
+; LE-NEXT: strh r1, [r0]
+; LE-NEXT: mov pc, lr
+;
+; BE-LABEL: i24_or:
+; BE: @ BB#0:
+; BE-NEXT: ldrh r1, [r0]
+; BE-NEXT: ldrb r2, [r0, #2]
+; BE-NEXT: orr r1, r2, r1, lsl #8
+; BE-NEXT: orr r1, r1, #384
+; BE-NEXT: strb r1, [r0, #2]
+; BE-NEXT: lsr r1, r1, #8
+; BE-NEXT: strh r1, [r0]
+; BE-NEXT: mov pc, lr
+ %aa = load i24, i24* %a, align 1
+ %b = or i24 %aa, 384
+ store i24 %b, i24* %a, align 1
+ ret void
+}
+
+define void @i24_and_or(i24* %a) {
+; LE-LABEL: i24_and_or:
+; LE: @ BB#0:
+; LE-NEXT: ldrh r1, [r0]
+; LE-NEXT: mov r2, #16256
+; LE-NEXT: orr r2, r2, #49152
+; LE-NEXT: orr r1, r1, #384
+; LE-NEXT: and r1, r1, r2
+; LE-NEXT: strh r1, [r0]
+; LE-NEXT: mov pc, lr
+;
+; BE-LABEL: i24_and_or:
+; BE: @ BB#0:
+; BE-NEXT: mov r1, #128
+; BE-NEXT: strb r1, [r0, #2]
+; BE-NEXT: ldrh r1, [r0]
+; BE-NEXT: orr r1, r1, #1
+; BE-NEXT: strh r1, [r0]
+; BE-NEXT: mov pc, lr
+ %b = load i24, i24* %a, align 1
+ %c = and i24 %b, -128
+ %d = or i24 %c, 384
+ store i24 %d, i24* %a, align 1
+ ret void
+}
+
+define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
+; LE-LABEL: i24_insert_bit:
+; LE: @ BB#0:
+; LE-NEXT: ldrh r2, [r0]
+; LE-NEXT: mov r3, #255
+; LE-NEXT: orr r3, r3, #57088
+; LE-NEXT: and r2, r2, r3
+; LE-NEXT: orr r1, r2, r1, lsl #13
+; LE-NEXT: strh r1, [r0]
+; LE-NEXT: mov pc, lr
+;
+; BE-LABEL: i24_insert_bit:
+; BE: @ BB#0:
+; BE-NEXT: ldrh r2, [r0]
+; BE-NEXT: mov r3, #57088
+; BE-NEXT: orr r3, r3, #16711680
+; BE-NEXT: and r2, r3, r2, lsl #8
+; BE-NEXT: orr r1, r2, r1, lsl #13
+; BE-NEXT: lsr r1, r1, #8
+; BE-NEXT: strh r1, [r0]
+; BE-NEXT: mov pc, lr
+ %extbit = zext i1 %bit to i24
+ %b = load i24, i24* %a, align 1
+ %extbit.shl = shl nuw nsw i24 %extbit, 13
+ %c = and i24 %b, -8193
+ %d = or i24 %c, %extbit.shl
+ store i24 %d, i24* %a, align 1
+ ret void
+}
+
+define void @i56_or(i56* %a) {
+; LE-LABEL: i56_or:
+; LE: @ BB#0:
+; LE-NEXT: ldr r1, [r0]
+; LE-NEXT: orr r1, r1, #384
+; LE-NEXT: str r1, [r0]
+; LE-NEXT: mov pc, lr
+;
+; BE-LABEL: i56_or:
+; BE: @ BB#0:
+; BE-NEXT: mov r1, r0
+; BE-NEXT: ldr r12, [r0]
+; BE-NEXT: ldrh r2, [r1, #4]!
+; BE-NEXT: ldrb r3, [r1, #2]
+; BE-NEXT: orr r2, r3, r2, lsl #8
+; BE-NEXT: orr r2, r2, r12, lsl #24
+; BE-NEXT: orr r2, r2, #384
+; BE-NEXT: lsr r3, r2, #8
+; BE-NEXT: strb r2, [r1, #2]
+; BE-NEXT: strh r3, [r1]
+; BE-NEXT: bic r1, r12, #255
+; BE-NEXT: orr r1, r1, r2, lsr #24
+; BE-NEXT: str r1, [r0]
+; BE-NEXT: mov pc, lr
+ %aa = load i56, i56* %a
+ %b = or i56 %aa, 384
+ store i56 %b, i56* %a
+ ret void
+}
+
+define void @i56_and_or(i56* %a) {
+; LE-LABEL: i56_and_or:
+; LE: @ BB#0:
+; LE-NEXT: ldr r1, [r0]
+; LE-NEXT: orr r1, r1, #384
+; LE-NEXT: bic r1, r1, #127
+; LE-NEXT: str r1, [r0]
+; LE-NEXT: mov pc, lr
+;
+; BE-LABEL: i56_and_or:
+; BE: @ BB#0:
+; BE-NEXT: mov r1, r0
+; BE-NEXT: mov r3, #128
+; BE-NEXT: ldrh r2, [r1, #4]!
+; BE-NEXT: strb r3, [r1, #2]
+; BE-NEXT: lsl r2, r2, #8
+; BE-NEXT: ldr r12, [r0]
+; BE-NEXT: orr r2, r2, r12, lsl #24
+; BE-NEXT: orr r2, r2, #384
+; BE-NEXT: lsr r3, r2, #8
+; BE-NEXT: strh r3, [r1]
+; BE-NEXT: bic r1, r12, #255
+; BE-NEXT: orr r1, r1, r2, lsr #24
+; BE-NEXT: str r1, [r0]
+; BE-NEXT: mov pc, lr
+
+ %b = load i56, i56* %a, align 1
+ %c = and i56 %b, -128
+ %d = or i56 %c, 384
+ store i56 %d, i56* %a, align 1
+ ret void
+}
+
+define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
+; LE-LABEL: i56_insert_bit:
+; LE: @ BB#0:
+; LE-NEXT: ldr r2, [r0]
+; LE-NEXT: bic r2, r2, #8192
+; LE-NEXT: orr r1, r2, r1, lsl #13
+; LE-NEXT: str r1, [r0]
+; LE-NEXT: mov pc, lr
+;
+; BE-LABEL: i56_insert_bit:
+; BE: @ BB#0:
+; BE-NEXT: .save {r11, lr}
+; BE-NEXT: push {r11, lr}
+; BE-NEXT: mov r2, r0
+; BE-NEXT: ldr lr, [r0]
+; BE-NEXT: ldrh r12, [r2, #4]!
+; BE-NEXT: ldrb r3, [r2, #2]
+; BE-NEXT: orr r12, r3, r12, lsl #8
+; BE-NEXT: orr r3, r12, lr, lsl #24
+; BE-NEXT: bic r3, r3, #8192
+; BE-NEXT: orr r1, r3, r1, lsl #13
+; BE-NEXT: lsr r3, r1, #8
+; BE-NEXT: strh r3, [r2]
+; BE-NEXT: bic r2, lr, #255
+; BE-NEXT: orr r1, r2, r1, lsr #24
+; BE-NEXT: str r1, [r0]
+; BE-NEXT: pop {r11, lr}
+; BE-NEXT: mov pc, lr
+ %extbit = zext i1 %bit to i56
+ %b = load i56, i56* %a, align 1
+ %extbit.shl = shl nuw nsw i56 %extbit, 13
+ %c = and i56 %b, -8193
+ %d = or i56 %c, %extbit.shl
+ store i56 %d, i56* %a, align 1
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/indirectbr.ll b/test/CodeGen/ARM/indirectbr.ll
index d15ef14b4493..90defad43a7d 100644
--- a/test/CodeGen/ARM/indirectbr.ll
+++ b/test/CodeGen/ARM/indirectbr.ll
@@ -47,6 +47,7 @@ L3: ; preds = %L4, %bb2
br label %L2
L2: ; preds = %L3, %bb2
+; THUMB-LABEL: %L1.clone
; THUMB: muls
%res.2 = phi i32 [ %res.1, %L3 ], [ 1, %bb2 ] ; <i32> [#uses=1]
%phitmp = mul i32 %res.2, 6 ; <i32> [#uses=1]
diff --git a/test/CodeGen/ARM/interval-update-remat.ll b/test/CodeGen/ARM/interval-update-remat.ll
index 6391d4c29604..524e8a0aa491 100644
--- a/test/CodeGen/ARM/interval-update-remat.ll
+++ b/test/CodeGen/ARM/interval-update-remat.ll
@@ -109,7 +109,7 @@ _ZN7MessageD1Ev.exit: ; preds = %if.then.i.i.i.i, %i
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
declare %class.StaticSocketDataProvider.6.231.281.1306.2331* @_ZN24StaticSocketDataProviderC1EP13MockReadWritejS1_j(%class.StaticSocketDataProvider.6.231.281.1306.2331* returned, %struct.MockReadWrite.7.232.282.1307.2332*, i32, %struct.MockReadWrite.7.232.282.1307.2332*, i32) unnamed_addr
@@ -130,7 +130,7 @@ declare %class.Message.13.238.288.1313.2338* @_ZN7MessageC1Ev(%class.Message.13.
declare %class.AssertHelper.10.235.285.1310.2335* @_ZN12AssertHelperD1Ev(%class.AssertHelper.10.235.285.1310.2335* returned) unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
declare void @_ZN18ClientSocketHandle5m_fn3IPiEEvRK25Trans_NS___1_basic_stringIciiE13scoped_refptr15RequestPriorityN16ClientSocketPool13RespectLimitsERiT_11BoundNetLog(%class.ClientSocketHandle.14.239.289.1314.2339*, %class.Trans_NS___1_basic_string.18.243.293.1318.2343* dereferenceable(12), %class.scoped_refptr.19.244.294.1319.2344*, i32, i32, i32* dereferenceable(4), i32*, %class.BoundNetLog.20.245.295.1320.2345*)
diff --git a/test/CodeGen/ARM/intrinsics-coprocessor.ll b/test/CodeGen/ARM/intrinsics-coprocessor.ll
index 8fea49b39fb6..5352471238f9 100644
--- a/test/CodeGen/ARM/intrinsics-coprocessor.ll
+++ b/test/CodeGen/ARM/intrinsics-coprocessor.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mtriple=armv7-eabi -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -march=thumb -mtriple=thumbv7-eabi -mcpu=cortex-a8 | FileCheck %s
define void @coproc(i8* %i) nounwind {
entry:
diff --git a/test/CodeGen/ARM/ldm-stm-i256.ll b/test/CodeGen/ARM/ldm-stm-i256.ll
new file mode 100644
index 000000000000..7b4151dabf6d
--- /dev/null
+++ b/test/CodeGen/ARM/ldm-stm-i256.ll
@@ -0,0 +1,38 @@
+; RUN: llc -mtriple=armv7--eabi -verify-machineinstrs < %s | FileCheck %s
+
+; Check the way we schedule/merge a bunch of loads and stores.
+; Originally test/CodeGen/ARM/2011-07-07-ScheduleDAGCrash.ll ; now
+; being used as a test of optimizations related to ldm/stm.
+
+; FIXME: We could merge more loads/stores with regalloc hints.
+; FIXME: Fix scheduling so we don't have 16 live registers.
+
+define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
+entry:
+ %c = load i256, i256* %cc
+ %d = load i256, i256* %dd
+ %add = add nsw i256 %c, %d
+ store i256 %add, i256* %a, align 8
+ %or = or i256 %c, 1606938044258990275541962092341162602522202993782792835301376
+ %add6 = add nsw i256 %or, %d
+ store i256 %add6, i256* %b, align 8
+ ret void
+ ; CHECK-DAG: ldm r3
+ ; CHECK-DAG: ldm r2
+ ; CHECK-DAG: ldr {{.*}}, [r3, #20]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #16]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #28]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #24]
+ ; CHECK-DAG: ldr {{.*}}, [r2, #20]
+ ; CHECK-DAG: ldr {{.*}}, [r2, #16]
+ ; CHECK-DAG: ldr {{.*}}, [r2, #28]
+ ; CHECK-DAG: ldr {{.*}}, [r2, #24]
+ ; CHECK-DAG: stmib r0
+ ; CHECK-DAG: str {{.*}}, [r0]
+ ; CHECK-DAG: str {{.*}}, [r0, #24]
+ ; CHECK-DAG: str {{.*}}, [r0, #28]
+ ; CHECK-DAG: str {{.*}}, [r1]
+ ; CHECK-DAG: stmib r1
+ ; CHECK-DAG: str {{.*}}, [r1, #24]
+ ; CHECK-DAG: str {{.*}}, [r1, #28]
+}
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index 6a9e63f649c9..6981cfcb0855 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -80,7 +80,7 @@ return: ; preds = %bb, %entry
; CHECK-LABEL: Func1:
define void @Func1() nounwind ssp "no-frame-pointer-elim"="true" {
-entry:
+entry:
; A8: movw [[BASE:r[0-9]+]], :lower16:{{.*}}TestVar{{.*}}
; A8: movt [[BASE]], :upper16:{{.*}}TestVar{{.*}}
; A8: ldrd [[FIELD1:r[0-9]+]], [[FIELD2:r[0-9]+]], {{\[}}[[BASE]], #4]
@@ -88,12 +88,12 @@ entry:
; A8-NEXT: str [[FIELD1]], {{\[}}[[BASE]]{{\]}}
; CONSERVATIVE-NOT: ldrd
%orig_blocks = alloca [256 x i16], align 2
- %0 = bitcast [256 x i16]* %orig_blocks to i8*call void @llvm.lifetime.start(i64 512, i8* %0) nounwind
+ %0 = bitcast [256 x i16]* %orig_blocks to i8*call void @llvm.lifetime.start.p0i8(i64 512, i8* %0) nounwind
%tmp1 = load i32, i32* getelementptr inbounds (%struct.Test, %struct.Test* @TestVar, i32 0, i32 1), align 4
%tmp2 = load i32, i32* getelementptr inbounds (%struct.Test, %struct.Test* @TestVar, i32 0, i32 2), align 4
%add = add nsw i32 %tmp2, %tmp1
store i32 %add, i32* getelementptr inbounds (%struct.Test, %struct.Test* @TestVar, i32 0, i32 0), align 4
- call void @llvm.lifetime.end(i64 512, i8* %0) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %0) nounwind
ret void
}
@@ -189,5 +189,23 @@ define i32* @strd_postupdate_inc(i32* %p0, i32 %v0, i32 %v1) "no-frame-pointer-e
ret i32* %p1
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+; CHECK-LABEL: ldrd_strd_aa:
+; NORMAL: ldrd [[TMP1:r[0-9]]], [[TMP2:r[0-9]]],
+; NORMAL: strd [[TMP1]], [[TMP2]],
+; CONSERVATIVE-NOT: ldrd
+; CONSERVATIVE-NOT: strd
+; CHECK: bx lr
+
+define void @ldrd_strd_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) {
+entry:
+ %0 = load i32, i32* %y, align 4
+ store i32 %0, i32* %x, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1
+ %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1
+ store i32 %1, i32* %arrayidx3, align 4
+ ret void
+}
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/ARM/load-combine-big-endian.ll b/test/CodeGen/ARM/load-combine-big-endian.ll
new file mode 100644
index 000000000000..8d8a0136cf96
--- /dev/null
+++ b/test/CodeGen/ARM/load-combine-big-endian.ll
@@ -0,0 +1,779 @@
+; RUN: llc < %s -mtriple=armeb-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=armv6eb-unknown | FileCheck %s --check-prefix=CHECK-ARMv6
+
+; i8* p; // p is 4 byte aligned
+; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_big_endian(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_big_endian:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_big_endian:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 4
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ret i32 %tmp17
+}
+
+; i8* p; // p is 4 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i8_bswap(i32* %arg) {
+; BSWAP is not supported by 32 bit target
+; CHECK-LABEL: load_i32_by_i8_bswap:
+; CHECK: ldr r0, [r0]
+; CHECK: and
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_bswap:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p is 4 byte aligned
+; ((i32) (((i16) p[0] << 8) | (i16) p[1]) << 16) | (i32) (((i16) p[3] << 8) | (i16) p[4])
+define i32 @load_i32_by_i16_by_i8_big_endian(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16_by_i8_big_endian:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i16_by_i8_big_endian:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 4
+ %tmp2 = zext i8 %tmp1 to i16
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i16
+ %tmp6 = shl nuw nsw i16 %tmp2, 8
+ %tmp7 = or i16 %tmp6, %tmp5
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i16
+ %tmp11 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp12 = load i8, i8* %tmp11, align 1
+ %tmp13 = zext i8 %tmp12 to i16
+ %tmp14 = shl nuw nsw i16 %tmp10, 8
+ %tmp15 = or i16 %tmp14, %tmp13
+ %tmp16 = zext i16 %tmp7 to i32
+ %tmp17 = zext i16 %tmp15 to i32
+ %tmp18 = shl nuw nsw i32 %tmp16, 16
+ %tmp19 = or i32 %tmp18, %tmp17
+ ret i32 %tmp19
+}
+
+; i16* p; // p is 4 byte aligned
+; ((i32) p[0] << 16) | (i32) p[1]
+define i32 @load_i32_by_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i16:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = zext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i16* p_16; // p_16 is 4 byte aligned
+; i8* p_8 = (i8*) p_16;
+; (i32) (p_16[0] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i16_i8(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16_i8:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i16_i8:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = bitcast i32* %arg to i8*
+ %tmp2 = load i16, i16* %tmp, align 4
+ %tmp3 = zext i16 %tmp2 to i32
+ %tmp4 = shl nuw nsw i32 %tmp3, 16
+ %tmp5 = getelementptr inbounds i8, i8* %tmp1, i32 2
+ %tmp6 = load i8, i8* %tmp5, align 1
+ %tmp7 = zext i8 %tmp6 to i32
+ %tmp8 = shl nuw nsw i32 %tmp7, 8
+ %tmp9 = getelementptr inbounds i8, i8* %tmp1, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = or i32 %tmp8, %tmp11
+ %tmp13 = or i32 %tmp12, %tmp4
+ ret i32 %tmp13
+}
+
+; i8* p; // p is 8 byte aligned
+; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
+define i64 @load_i64_by_i8_bswap(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8_bswap:
+; CHECK: ldr{{.*}}r0
+; CHECK: ldr{{.*}}r0
+; CHECK: and
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i64_by_i8_bswap:
+; CHECK-ARMv6: ldrd r2, r3, [r0]
+; CHECK-ARMv6: rev r0, r3
+; CHECK-ARMv6: rev r1, r2
+; CHECK-ARMv6: bx lr
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i64
+ %tmp6 = shl nuw nsw i64 %tmp5, 8
+ %tmp7 = or i64 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i64
+ %tmp11 = shl nuw nsw i64 %tmp10, 16
+ %tmp12 = or i64 %tmp7, %tmp11
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i64
+ %tmp16 = shl nuw nsw i64 %tmp15, 24
+ %tmp17 = or i64 %tmp12, %tmp16
+ %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp19 = load i8, i8* %tmp18, align 1
+ %tmp20 = zext i8 %tmp19 to i64
+ %tmp21 = shl nuw nsw i64 %tmp20, 32
+ %tmp22 = or i64 %tmp17, %tmp21
+ %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp24 = load i8, i8* %tmp23, align 1
+ %tmp25 = zext i8 %tmp24 to i64
+ %tmp26 = shl nuw nsw i64 %tmp25, 40
+ %tmp27 = or i64 %tmp22, %tmp26
+ %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp29 = load i8, i8* %tmp28, align 1
+ %tmp30 = zext i8 %tmp29 to i64
+ %tmp31 = shl nuw nsw i64 %tmp30, 48
+ %tmp32 = or i64 %tmp27, %tmp31
+ %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp34 = load i8, i8* %tmp33, align 1
+ %tmp35 = zext i8 %tmp34 to i64
+ %tmp36 = shl nuw i64 %tmp35, 56
+ %tmp37 = or i64 %tmp32, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p is 8 byte aligned
+; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
+define i64 @load_i64_by_i8(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8:
+; CHECK: ldr r2, [r0]
+; CHECK: ldr r1, [r0, #4]
+; CHECK: mov r0, r2
+; CHECK: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i64_by_i8:
+; CHECK-ARMv6: ldrd r0, r1, [r0]
+; CHECK-ARMv6: bx lr
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = shl nuw i64 %tmp2, 56
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i64
+ %tmp7 = shl nuw nsw i64 %tmp6, 48
+ %tmp8 = or i64 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i64
+ %tmp12 = shl nuw nsw i64 %tmp11, 40
+ %tmp13 = or i64 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i64
+ %tmp17 = shl nuw nsw i64 %tmp16, 32
+ %tmp18 = or i64 %tmp13, %tmp17
+ %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp20 = load i8, i8* %tmp19, align 1
+ %tmp21 = zext i8 %tmp20 to i64
+ %tmp22 = shl nuw nsw i64 %tmp21, 24
+ %tmp23 = or i64 %tmp18, %tmp22
+ %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp25 = load i8, i8* %tmp24, align 1
+ %tmp26 = zext i8 %tmp25 to i64
+ %tmp27 = shl nuw nsw i64 %tmp26, 16
+ %tmp28 = or i64 %tmp23, %tmp27
+ %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp30 = load i8, i8* %tmp29, align 1
+ %tmp31 = zext i8 %tmp30 to i64
+ %tmp32 = shl nuw nsw i64 %tmp31, 8
+ %tmp33 = or i64 %tmp28, %tmp32
+ %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i64
+ %tmp37 = or i64 %tmp33, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
+define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK: ldr r0, [r0, #1]
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK-ARMv6: ldr r0, [r0, #1]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
+define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset:
+; CHECK: ldr r0, [r0, #-4]
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_neg_offset:
+; CHECK-ARMv6: ldr r0, [r0, #-4]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
+define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; CHECK: ldr r0, [r0, #1]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; CHECK-ARMv6: ldr r0, [r0, #1]
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
+define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
+; CHECK: ldr r0, [r0, #-4]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_neg_offset_bswap:
+; CHECK-ARMv6: ldr r0, [r0, #-4]
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+declare i16 @llvm.bswap.i16(i16)
+
+; i16* p; // p is 4 byte aligned
+; (i32) bswap(p[0]) | (i32) bswap(p[1] << 16)
+define i32 @load_i32_by_bswap_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_bswap_i16:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_bswap_i16:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
+ %tmp2 = zext i16 %tmp11 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
+ %tmp5 = zext i16 %tmp41 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 16
+ %tmp7 = or i32 %tmp6, %tmp2
+ ret i32 %tmp7
+}
+
+; i16* p; // p is 4 byte aligned
+; (i32) p[1] | (sext(p[0] << 16) to i32)
+define i32 @load_i32_by_sext_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_sext_i16:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i32_by_sext_i16:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp2 = sext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = zext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
+define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK: add r0, r0, r1
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: ldr r0, [r0, #12]
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK-ARMv6: add r0, r0, r1
+; CHECK-ARMv6-NEXT: ldr r0, [r0, #12]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = add nuw nsw i32 %i, 3
+ %tmp2 = add nuw nsw i32 %i, 2
+ %tmp3 = add nuw nsw i32 %i, 1
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = zext i32 %i to i64
+ %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
+ %tmp7 = load i8, i8* %tmp6, align 4
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = zext i32 %tmp3 to i64
+ %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
+ %tmp11 = load i8, i8* %tmp10, align 1
+ %tmp12 = zext i8 %tmp11 to i32
+ %tmp13 = shl nuw nsw i32 %tmp12, 8
+ %tmp14 = or i32 %tmp13, %tmp8
+ %tmp15 = zext i32 %tmp2 to i64
+ %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
+ %tmp17 = load i8, i8* %tmp16, align 1
+ %tmp18 = zext i8 %tmp17 to i32
+ %tmp19 = shl nuw nsw i32 %tmp18, 16
+ %tmp20 = or i32 %tmp14, %tmp19
+ %tmp21 = zext i32 %tmp to i64
+ %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
+ %tmp23 = load i8, i8* %tmp22, align 1
+ %tmp24 = zext i8 %tmp23 to i32
+ %tmp25 = shl nuw i32 %tmp24, 24
+ %tmp26 = or i32 %tmp20, %tmp25
+ ret i32 %tmp26
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
+define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK: add r0, r0, r1
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: ldr r0, [r0, #13]
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK-ARMv6: add r0, r0, r1
+; CHECK-ARMv6-NEXT: ldr r0, [r0, #13]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = add nuw nsw i32 %i, 4
+ %tmp2 = add nuw nsw i32 %i, 3
+ %tmp3 = add nuw nsw i32 %i, 2
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = add nuw nsw i32 %i, 1
+ %tmp27 = zext i32 %tmp5 to i64
+ %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
+ %tmp29 = load i8, i8* %tmp28, align 4
+ %tmp30 = zext i8 %tmp29 to i32
+ %tmp31 = zext i32 %tmp3 to i64
+ %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
+ %tmp33 = load i8, i8* %tmp32, align 1
+ %tmp34 = zext i8 %tmp33 to i32
+ %tmp35 = shl nuw nsw i32 %tmp34, 8
+ %tmp36 = or i32 %tmp35, %tmp30
+ %tmp37 = zext i32 %tmp2 to i64
+ %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
+ %tmp39 = load i8, i8* %tmp38, align 1
+ %tmp40 = zext i8 %tmp39 to i32
+ %tmp41 = shl nuw nsw i32 %tmp40, 16
+ %tmp42 = or i32 %tmp36, %tmp41
+ %tmp43 = zext i32 %tmp to i64
+ %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
+ %tmp45 = load i8, i8* %tmp44, align 1
+ %tmp46 = zext i8 %tmp45 to i32
+ %tmp47 = shl nuw i32 %tmp46, 24
+ %tmp48 = or i32 %tmp42, %tmp47
+ ret i32 %tmp48
+}
+
+; i8* p; // p is 2 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8)
+define i32 @zext_load_i32_by_i8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 8) | ((i32) p[1] << 16)
+define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r0, r0, #16
+; CHECK-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r0, r0, #16
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 16) | ((i32) p[1] << 24)
+define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r0, r0, #24
+; CHECK-NEXT: orr r0, r0, r1, lsl #16
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r0, r0, #24
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; (i32) p[1] | ((i32) p[0] << 8)
+define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 8) | ((i32) p[0] << 16)
+define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r1, r1, #16
+; CHECK-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r1, r1, #16
+; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 16) | ((i32) p[0] << 24)
+define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r1, r1, #24
+; CHECK-NEXT: orr r0, r1, r0, lsl #16
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r1, r1, #24
+; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #16
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p;
+; i16* p1.i16 = (i16*) p;
+; (p1.i16[0] << 8) | ((i16) p[2])
+;
+; This is essentialy a i16 load from p[1], but we don't fold the pattern now
+; because in the original DAG we don't have p[1] address available
+define i16 @load_i16_from_nonzero_offset(i8* %p) {
+; CHECK-LABEL: load_i16_from_nonzero_offset:
+; CHECK: ldrh r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #2]
+; CHECK-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i16_from_nonzero_offset:
+; CHECK-ARMv6: ldrh r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #2]
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %p1.i16 = bitcast i8* %p to i16*
+ %p2.i8 = getelementptr i8, i8* %p, i64 2
+ %v1 = load i16, i16* %p1.i16
+ %v2.i8 = load i8, i8* %p2.i8
+ %v2 = zext i8 %v2.i8 to i16
+ %v1.shl = shl i16 %v1, 8
+ %res = or i16 %v1.shl, %v2
+ ret i16 %res
+}
diff --git a/test/CodeGen/ARM/load-combine.ll b/test/CodeGen/ARM/load-combine.ll
new file mode 100644
index 000000000000..720bc7b88b32
--- /dev/null
+++ b/test/CodeGen/ARM/load-combine.ll
@@ -0,0 +1,692 @@
+; RUN: llc < %s -mtriple=arm-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=armv6-unknown | FileCheck %s --check-prefix=CHECK-ARMv6
+
+; i8* p; // p is 1 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i8_unaligned(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_unaligned:
+; CHECK: ldrb{{.*}}r0
+; CHECK: ldrb{{.*}}r0
+; CHECK: ldrb{{.*}}r0
+; CHECK: ldrb{{.*}}r0
+; CHECK: orr
+; CHECK: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_unaligned:
+; CHECK-ARMv6: ldrb{{.*}}r0
+; CHECK-ARMv6: ldrb{{.*}}r0
+; CHECK-ARMv6: ldrb{{.*}}r0
+; CHECK-ARMv6: ldrb{{.*}}r0
+; CHECK-ARMv6: orr
+; CHECK-ARMv6: bx lr
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p is 4 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i8_aligned(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_aligned:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_aligned:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p is 4 byte aligned
+; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_bswap(i32* %arg) {
+; BSWAP is not supported by 32 bit target
+; CHECK-LABEL: load_i32_by_i8_bswap:
+; CHECK: ldr r0, [r0]
+; CHECK: and
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_bswap:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 4
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ret i32 %tmp17
+}
+
+; i8* p; // p is 8 byte aligned
+; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
+define i64 @load_i64_by_i8(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8:
+; CHECK: ldr r2, [r0]
+; CHECK-NEXT: ldr r1, [r0, #4]
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i64_by_i8:
+; CHECK-ARMv6: ldrd r0, r1, [r0]
+; CHECK-ARMv6: bx lr
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i64
+ %tmp6 = shl nuw nsw i64 %tmp5, 8
+ %tmp7 = or i64 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i64
+ %tmp11 = shl nuw nsw i64 %tmp10, 16
+ %tmp12 = or i64 %tmp7, %tmp11
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i64
+ %tmp16 = shl nuw nsw i64 %tmp15, 24
+ %tmp17 = or i64 %tmp12, %tmp16
+ %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp19 = load i8, i8* %tmp18, align 1
+ %tmp20 = zext i8 %tmp19 to i64
+ %tmp21 = shl nuw nsw i64 %tmp20, 32
+ %tmp22 = or i64 %tmp17, %tmp21
+ %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp24 = load i8, i8* %tmp23, align 1
+ %tmp25 = zext i8 %tmp24 to i64
+ %tmp26 = shl nuw nsw i64 %tmp25, 40
+ %tmp27 = or i64 %tmp22, %tmp26
+ %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp29 = load i8, i8* %tmp28, align 1
+ %tmp30 = zext i8 %tmp29 to i64
+ %tmp31 = shl nuw nsw i64 %tmp30, 48
+ %tmp32 = or i64 %tmp27, %tmp31
+ %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp34 = load i8, i8* %tmp33, align 1
+ %tmp35 = zext i8 %tmp34 to i64
+ %tmp36 = shl nuw i64 %tmp35, 56
+ %tmp37 = or i64 %tmp32, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p is 8 byte aligned
+; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
+define i64 @load_i64_by_i8_bswap(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8_bswap:
+; CHECK: ldr{{.*}}r0
+; CHECK: ldr{{.*}}r0
+; CHECK: and
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: and
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK-NEXT: orr
+; CHECK: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i64_by_i8_bswap:
+; CHECK-ARMv6: ldrd r2, r3, [r0]
+; CHECK-ARMv6: rev r0, r3
+; CHECK-ARMv6: rev r1, r2
+; CHECK-ARMv6: bx lr
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 8
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = shl nuw i64 %tmp2, 56
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i64
+ %tmp7 = shl nuw nsw i64 %tmp6, 48
+ %tmp8 = or i64 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i64
+ %tmp12 = shl nuw nsw i64 %tmp11, 40
+ %tmp13 = or i64 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i64
+ %tmp17 = shl nuw nsw i64 %tmp16, 32
+ %tmp18 = or i64 %tmp13, %tmp17
+ %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp20 = load i8, i8* %tmp19, align 1
+ %tmp21 = zext i8 %tmp20 to i64
+ %tmp22 = shl nuw nsw i64 %tmp21, 24
+ %tmp23 = or i64 %tmp18, %tmp22
+ %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp25 = load i8, i8* %tmp24, align 1
+ %tmp26 = zext i8 %tmp25 to i64
+ %tmp27 = shl nuw nsw i64 %tmp26, 16
+ %tmp28 = or i64 %tmp23, %tmp27
+ %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp30 = load i8, i8* %tmp29, align 1
+ %tmp31 = zext i8 %tmp30 to i64
+ %tmp32 = shl nuw nsw i64 %tmp31, 8
+ %tmp33 = or i64 %tmp28, %tmp32
+ %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i64
+ %tmp37 = or i64 %tmp33, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
+define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK: ldr r0, [r0, #1]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK-ARMv6: ldr r0, [r0, #1]
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
+define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset:
+; CHECK: ldr r0, [r0, #-4]
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_neg_offset:
+; CHECK-ARMv6: ldr r0, [r0, #-4]
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp2 = load i8, i8* %tmp1, align 4
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[1] is 4 byte aligned
+; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
+define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; CHECK: ldr r0, [r0, #1]
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; CHECK-ARMv6: ldr r0, [r0, #1]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; // p[-4] is 4 byte aligned
+; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
+define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
+; CHECK: ldr r0, [r0, #-4]
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_i8_neg_offset_bswap:
+; CHECK-ARMv6: ldr r0, [r0, #-4]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp15 = load i8, i8* %tmp14, align 4
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+declare i16 @llvm.bswap.i16(i16)
+
+; i16* p; // p is 4 byte aligned
+; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
+define i32 @load_i32_by_bswap_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_bswap_i16:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov r1, #65280
+; CHECK-NEXT: mov r2, #16711680
+; CHECK-NEXT: and r1, r1, r0, lsr #8
+; CHECK-NEXT: and r2, r2, r0, lsl #8
+; CHECK-NEXT: orr r1, r1, r0, lsr #24
+; CHECK-NEXT: orr r0, r2, r0, lsl #24
+; CHECK-NEXT: orr r0, r0, r1
+; CHECK-NEXT: mov pc, lr
+
+; CHECK-ARMv6-LABEL: load_i32_by_bswap_i16:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: rev r0, r0
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
+ %tmp2 = zext i16 %tmp11 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
+ %tmp5 = zext i16 %tmp41 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i16* p;
+; (i32) p[0] | (sext(p[1] << 16) to i32)
+define i32 @load_i32_by_sext_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_sext_i16:
+; CHECK: ldr r0, [r0]
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i32_by_sext_i16:
+; CHECK-ARMv6: ldr r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = sext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 16
+ %tmp7 = or i32 %tmp6, %tmp2
+ ret i32 %tmp7
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
+define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK: add r0, r0, r1
+; CHECK-NEXT: ldr r0, [r0, #12]
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK-ARMv6: add r0, r0, r1
+; CHECK-ARMv6-NEXT: ldr r0, [r0, #12]
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = add nuw nsw i32 %i, 3
+ %tmp2 = add nuw nsw i32 %i, 2
+ %tmp3 = add nuw nsw i32 %i, 1
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = zext i32 %i to i64
+ %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
+ %tmp7 = load i8, i8* %tmp6, align 4
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = zext i32 %tmp3 to i64
+ %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
+ %tmp11 = load i8, i8* %tmp10, align 1
+ %tmp12 = zext i8 %tmp11 to i32
+ %tmp13 = shl nuw nsw i32 %tmp12, 8
+ %tmp14 = or i32 %tmp13, %tmp8
+ %tmp15 = zext i32 %tmp2 to i64
+ %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
+ %tmp17 = load i8, i8* %tmp16, align 1
+ %tmp18 = zext i8 %tmp17 to i32
+ %tmp19 = shl nuw nsw i32 %tmp18, 16
+ %tmp20 = or i32 %tmp14, %tmp19
+ %tmp21 = zext i32 %tmp to i64
+ %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
+ %tmp23 = load i8, i8* %tmp22, align 1
+ %tmp24 = zext i8 %tmp23 to i32
+ %tmp25 = shl nuw i32 %tmp24, 24
+ %tmp26 = or i32 %tmp20, %tmp25
+ ret i32 %tmp26
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
+define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK: add r0, r0, r1
+; CHECK-NEXT: ldr r0, [r0, #13]
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK-ARMv6: add r0, r0, r1
+; CHECK-ARMv6-NEXT: ldr r0, [r0, #13]
+; CHECK-ARMv6-NEXT: bx lr
+ %tmp = add nuw nsw i32 %i, 4
+ %tmp2 = add nuw nsw i32 %i, 3
+ %tmp3 = add nuw nsw i32 %i, 2
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = add nuw nsw i32 %i, 1
+ %tmp27 = zext i32 %tmp5 to i64
+ %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
+ %tmp29 = load i8, i8* %tmp28, align 4
+ %tmp30 = zext i8 %tmp29 to i32
+ %tmp31 = zext i32 %tmp3 to i64
+ %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
+ %tmp33 = load i8, i8* %tmp32, align 1
+ %tmp34 = zext i8 %tmp33 to i32
+ %tmp35 = shl nuw nsw i32 %tmp34, 8
+ %tmp36 = or i32 %tmp35, %tmp30
+ %tmp37 = zext i32 %tmp2 to i64
+ %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
+ %tmp39 = load i8, i8* %tmp38, align 1
+ %tmp40 = zext i8 %tmp39 to i32
+ %tmp41 = shl nuw nsw i32 %tmp40, 16
+ %tmp42 = or i32 %tmp36, %tmp41
+ %tmp43 = zext i32 %tmp to i64
+ %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
+ %tmp45 = load i8, i8* %tmp44, align 1
+ %tmp46 = zext i8 %tmp45 to i32
+ %tmp47 = shl nuw i32 %tmp46, 24
+ %tmp48 = or i32 %tmp42, %tmp47
+ ret i32 %tmp48
+}
+
+; i8* p; // p is 2 byte aligned
+; (i32) p[0] | ((i32) p[1] << 8)
+define i32 @zext_load_i32_by_i8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 8) | ((i32) p[1] << 16)
+define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r0, r0, #16
+; CHECK-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r0, r0, #16
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[0] << 16) | ((i32) p[1] << 24)
+define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r0, r0, #24
+; CHECK-NEXT: orr r0, r0, r1, lsl #16
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r0, r0, #24
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 2
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; (i32) p[1] | ((i32) p[0] << 8)
+define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 8) | ((i32) p[0] << 16)
+define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r1, r1, #16
+; CHECK-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r1, r1, #16
+; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #8
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p; // p is 2 byte aligned
+; ((i32) p[1] << 16) | ((i32) p[0] << 24)
+define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK: ldrb r1, [r0]
+; CHECK-NEXT: ldrb r0, [r0, #1]
+; CHECK-NEXT: lsl r1, r1, #24
+; CHECK-NEXT: orr r0, r1, r0, lsl #16
+; CHECK-NEXT: mov pc, lr
+;
+; CHECK-ARMv6-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK-ARMv6: ldrb r1, [r0]
+; CHECK-ARMv6-NEXT: ldrb r0, [r0, #1]
+; CHECK-ARMv6-NEXT: lsl r1, r1, #24
+; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #16
+; CHECK-ARMv6-NEXT: bx lr
+
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 2
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
diff --git a/test/CodeGen/ARM/longMAC.ll b/test/CodeGen/ARM/longMAC.ll
index 80cb5096c03c..9ecda8b06cbf 100644
--- a/test/CodeGen/ARM/longMAC.ll
+++ b/test/CodeGen/ARM/longMAC.ll
@@ -1,14 +1,15 @@
; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-LE
-; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s --check-prefix=CHECK-V7-LE
+; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-V7-LE
; RUN: llc -mtriple=armeb-eabi %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
-; RUN: llc -mtriple=armebv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7-BE
-; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V6-THUMB
-; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V6-THUMB2
-; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7-THUMB
-; RUN: llc -mtriple=thumbebv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7-THUMB-BE
-; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V6M-THUMB
-; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7M-THUMB
-; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7EM-THUMB
+; RUN: llc -mtriple=armebv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V7-BE
+; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6-THUMB
+; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-T2-DSP
+; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-T2-DSP
+; RUN: llc -mtriple=thumbebv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V7-THUMB-BE
+; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6M-THUMB
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V7M-THUMB
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-T2-DSP
+; RUN: llc -mtriple=armv5te-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V5TE
; Check generated signed and unsigned multiply accumulate long.
define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
@@ -20,12 +21,9 @@ define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
;CHECK-BE: umlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-BE: mov r0, [[RDHI]]
;CHECK-BE: mov r1, [[RDLO]]
-;CHECK-V6-THUMB2: umlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V6-THUMB2: mov r0, [[RDLO]]
-;CHECK-V6-THUMB2: mov r1, [[RDHI]]
-;CHECK-V7-THUMB: umlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V7-THUMB: mov r0, [[RDLO]]
-;CHECK-V7-THUMB: mov r1, [[RDHI]]
+;CHECK-T2-DSP: umlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
+;CHECK-T2-DSP-NEXT: mov r0, [[RDLO]]
+;CHECK-T2-DSP-NEXT: mov r1, [[RDHI]]
;CHECK-V7-THUMB-BE: umlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-V7-THUMB-BE: mov r0, [[RDHI]]
;CHECK-V7-THUMB-BE: mov r1, [[RDLO]]
@@ -44,12 +42,9 @@ define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) {
;CHECK-BE: smlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-BE: mov r0, [[RDHI]]
;CHECK-BE: mov r1, [[RDLO]]
-;CHECK-V6-THUMB2: smlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V6-THUMB2: mov r0, [[RDLO]]
-;CHECK-V6-THUMB2: mov r1, [[RDHI]]
-;CHECK-V7-THUMB: smlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V7-THUMB: mov r0, [[RDLO]]
-;CHECK-V7-THUMB: mov r1, [[RDHI]]
+;CHECK-T2-DSP: smlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
+;CHECK-T2-DSP-NEXT: mov r0, [[RDLO]]
+;CHECK-T2-DSP-NEXT: mov r1, [[RDHI]]
;CHECK-V7-THUMB-BE: smlal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-V7-THUMB-BE: mov r0, [[RDHI]]
;CHECK-V7-THUMB-BE: mov r1, [[RDLO]]
@@ -78,8 +73,7 @@ define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) {
;CHECK-BE: umlal [[RDLO:r[0-9]+]], [[RDHI]], r1, r0
;CHECK-BE: mov r0, [[RDHI]]
;CHECK-BE: mov r1, [[RDLO]]
-;CHECK-V6-THUMB2: umlal
-;CHECK-V7-THUMB: umlal
+;CHECK-T2-DSP: umlal
;CHECK-V6-THUMB-NOT: umlal
%conv = zext i32 %b to i64
%conv1 = zext i32 %a to i64
@@ -92,8 +86,7 @@ define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) {
define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) {
;CHECK-LABEL: MACLongTest4:
;CHECK-V6-THUMB-NOT: smlal
-;CHECK-V6-THUMB2: smlal
-;CHECK-V7-THUMB: smlal
+;CHECK-T2-DSP: smlal
;CHECK-LE: asr [[RDHI:r[0-9]+]], [[RDLO:r[0-9]+]], #31
;CHECK-LE: smlal [[RDLO]], [[RDHI]], r1, r0
;CHECK-LE: mov r0, [[RDLO]]
@@ -114,14 +107,12 @@ define i64 @MACLongTest6(i32 %a, i32 %b, i32 %c, i32 %d) {
;CHECK-LABEL: MACLongTest6:
;CHECK-V6-THUMB-NOT: smull
;CHECK-V6-THUMB-NOT: smlal
-;CHECK: smull r12, lr, r1, r0
-;CHECK: smlal r12, lr, r3, r2
+;CHECK-LE: smull r12, lr, r1, r0
+;CHECK-LE: smlal r12, lr, r3, r2
;CHECK-V7: smull [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], r1, r0
;CHECK-V7: smlal [[RDLO]], [[RDHI]], [[Rn:r[0-9]+]], [[Rm:r[0-9]+]]
-;CHECK-V7-THUMB: smull [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], r1, r0
-;CHECK-V7-THUMB: smlal [[RDLO]], [[RDHI]], [[Rn:r[0-9]+]], [[Rm:r[0-9]+]]
-;CHECK-V6-THUMB2: smull [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], r1, r0
-;CHECK-V6-THUMB2: smlal [[RDLO]], [[RDHI]], [[Rn:r[0-9]+]], [[Rm:r[0-9]+]]
+;CHECK-T2-DSP: smull [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], r1, r0
+;CHECK-T2-DSP: smlal [[RDLO]], [[RDHI]], [[Rn:r[0-9]+]], [[Rm:r[0-9]+]]
%conv = sext i32 %a to i64
%conv1 = sext i32 %b to i64
%mul = mul nsw i64 %conv1, %conv
@@ -172,18 +163,12 @@ define i64 @MACLongTest9(i32 %lhs, i32 %rhs, i32 %lo, i32 %hi) {
;CHECK-V7-BE: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-V7-BE: mov r0, [[RDHI]]
;CHECK-V7-BE: mov r1, [[RDLO]]
-;CHECK-V6-THUMB2: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V6-THUMB2: mov r0, [[RDLO]]
-;CHECK-V6-THUMB2: mov r1, [[RDHI]]
-;CHECK-V7-THUMB: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V7-THUMB: mov r0, [[RDLO]]
-;CHECK-V7-THUMB: mov r1, [[RDHI]]
+;CHECK-T2-DSP: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
+;CHECK-T2-DSP-NEXT: mov r0, [[RDLO]]
+;CHECK-T2-DSP-NEXT: mov r1, [[RDHI]]
;CHECK-V7-THUMB-BE: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-V7-THUMB-BE: mov r0, [[RDHI]]
;CHECK-V7-THUMB-BE: mov r1, [[RDLO]]
-;CHECK-V7EM-THUMB: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V7EM-THUMB: mov r0, [[RDLO]]
-;CHECK-V7EM-THUMB: mov r1, [[RDHI]]
;CHECK-NOT:umaal
;CHECK-V6-THUMB-NOT: umaal
;CHECK-V6M-THUMB-NOT: umaal
@@ -206,18 +191,12 @@ define i64 @MACLongTest10(i32 %lhs, i32 %rhs, i32 %lo, i32 %hi) {
;CHECK-V7-BE: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-V7-BE: mov r0, [[RDHI]]
;CHECK-V7-BE: mov r1, [[RDLO]]
-;CHECK-V6-THUMB2: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V6-THUMB2: mov r0, [[RDLO]]
-;CHECK-V6-THUMB2: mov r1, [[RDHI]]
-;CHECK-V7-THUMB: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V7-THUMB: mov r0, [[RDLO]]
-;CHECK-V7-THUMB: mov r1, [[RDHI]]
+;CHECK-T2-DSP: umaal r2, r3, r1, r0
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
;CHECK-V7-THUMB-BE: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
;CHECK-V7-THUMB-BE: mov r0, [[RDHI]]
;CHECK-V7-THUMB-BE: mov r1, [[RDLO]]
-;CHECK-V7EM-THUMB: umaal [[RDLO:r[0-9]+]], [[RDHI:r[0-9]+]], [[LHS:r[0-9]+]], [[RHS:r[0-9]+]]
-;CHECK-V7EM-THUMB: mov r0, [[RDLO]]
-;CHECK-V7EM-THUMB: mov r1, [[RDHI]]
;CHECK-NOT:umaal
;CHECK-V6-THUMB-NOT:umaal
;CHECK-V6M-THUMB-NOT: umaal
@@ -231,3 +210,188 @@ define i64 @MACLongTest10(i32 %lhs, i32 %rhs, i32 %lo, i32 %hi) {
%add2 = add i64 %add, %mul
ret i64 %add2
}
+
+define i64 @MACLongTest11(i16 %a, i16 %b, i64 %c) {
+;CHECK-LABEL: MACLongTest11:
+;CHECK-T2-DSP-NOT: sxth
+;CHECK-T2-DSP: smlalbb r2, r3
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
+;CHECK-V5TE-NOT: sxth
+;CHECK-V5TE: smlalbb r2, r3
+;CHECK-V5TE-NEXT: mov r0, r2
+;CHECK-V5TE-NEXT: mov r1, r3
+;CHECK-V7-LE-NOT: sxth
+;CHECK-V7-LE: smlalbb r2, r3
+;CHECK-V7-LE-NEXT: mov r0, r2
+;CHECK-V7-LE-NEXT: mov r1, r3
+;CHECK-V7-THUMB-BE: smlalbb r3, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r0, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r1, r3
+;CHECK-LE-NOT: smlalbb
+;CHECK-BE-NOT: smlalbb
+;CHECK-V6M-THUMB-NOT: smlalbb
+;CHECK-V7M-THUMB-NOT: smlalbb
+ %conv = sext i16 %a to i32
+ %conv1 = sext i16 %b to i32
+ %mul = mul nsw i32 %conv1, %conv
+ %conv2 = sext i32 %mul to i64
+ %add = add nsw i64 %conv2, %c
+ ret i64 %add
+}
+
+define i64 @MACLongTest12(i16 %b, i32 %t, i64 %c) {
+;CHECK-LABEL: MACLongTest12:
+;CHECK-T2-DSP-NOT: sxth
+;CHECK-T2-DSP-NOT: {{asr|lsr}}
+;CHECK-T2-DSP: smlalbt r2, r3, r0, r1
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
+;CHECK-T2-DSP-NOT: sxth
+;CHECK-V5TE-NOT: sxth
+;CHECK-V5TE-NOT: {{asr|lsr}}
+;CHECK-V5TE: smlalbt r2, r3, r0, r1
+;CHECK-V5TE-NEXT: mov r0, r2
+;CHECK-V5TE-NEXT: mov r1, r3
+;CHECK-V7-LE-NOT: sxth
+;CHECK-V7-LE-NOT: {{asr|lsr}}
+;CHECK-V7-LE: smlalbt r2, r3, r0, r1
+;CHECK-V7-LE-NEXT: mov r0, r2
+;CHECK-V7-LE-NEXT: mov r1, r3
+;CHECK-V7-THUMB-BE: smlalbt r3, r2,
+;CHECK-V7-THUMB-BE-NEXT: mov r0, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r1, r3
+;CHECK-LE-NOT: smlalbt
+;CHECK-BE-NOT: smlalbt
+;CHECK-V6M-THUMB-NOT: smlalbt
+;CHECK-V7M-THUMB-NOT: smlalbt
+ %conv0 = sext i16 %b to i32
+ %conv1 = ashr i32 %t, 16
+ %mul = mul nsw i32 %conv0, %conv1
+ %conv2 = sext i32 %mul to i64
+ %add = add nsw i64 %conv2, %c
+ ret i64 %add
+}
+
+define i64 @MACLongTest13(i32 %t, i16 %b, i64 %c) {
+;CHECK-LABEL: MACLongTest13:
+;CHECK-T2-DSP-NOT: sxth
+;CHECK-T2-DSP-NOT: {{asr|lsr}}
+;CHECK-T2-DSP: smlaltb r2, r3, r0, r1
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
+;CHECK-V5TE-NOT: sxth
+;CHECK-V5TE-NOT: {{asr|lsr}}
+;CHECK-V5TE: smlaltb r2, r3, r0, r1
+;CHECK-V5TE-NEXT: mov r0, r2
+;CHECK-V5TE-NEXT: mov r1, r3
+;CHECK-V7-LE-NOT: sxth
+;CHECK-V7-LE-NOT: {{asr|lsr}}
+;CHECK-V7-LE: smlaltb r2, r3, r0, r1
+;CHECK-V7-LE-NEXT: mov r0, r2
+;CHECK-V7-LE-NEXT: mov r1, r3
+;CHECK-V7-THUMB-BE: smlaltb r3, r2, r0, r1
+;CHECK-V7-THUMB-BE-NEXT: mov r0, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r1, r3
+;CHECK-LE-NOT: smlaltb
+;CHECK-BE-NOT: smlaltb
+;CHECK-V6M-THUMB-NOT: smlaltb
+;CHECK-V7M-THUMB-NOT: smlaltb
+ %conv0 = ashr i32 %t, 16
+ %conv1= sext i16 %b to i32
+ %mul = mul nsw i32 %conv0, %conv1
+ %conv2 = sext i32 %mul to i64
+ %add = add nsw i64 %conv2, %c
+ ret i64 %add
+}
+
+define i64 @MACLongTest14(i32 %a, i32 %b, i64 %c) {
+;CHECK-LABEL: MACLongTest14:
+;CHECK-T2-DSP-NOT: {{asr|lsr}}
+;CHECK-T2-DSP: smlaltt r2, r3,
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
+;CHECK-V5TE-NOT: {{asr|lsr}}
+;CHECK-V5TE: smlaltt r2, r3,
+;CHECK-V5TE-NEXT: mov r0, r2
+;CHECK-V5TE-NEXT: mov r1, r3
+;CHECK-V7-LE-NOT: {{asr|lsr}}
+;CHECK-V7-LE: smlaltt r2, r3,
+;CHECK-V7-LE-NEXT: mov r0, r2
+;CHECK-V7-LE-NEXT: mov r1, r3
+;CHECK-V7-THUMB-BE: smlaltt r3, r2,
+;CHECK-V7-THUMB-BE-NEXT: mov r0, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r1, r3
+;CHECK-LE-NOT: smlaltt
+;CHECK-BE-NOT: smlaltt
+;CHECK-V6M-THUMB-NOT: smlaltt
+;CHECK-V7M-THUMB-NOT: smlaltt
+ %conv0 = ashr i32 %a, 16
+ %conv1 = ashr i32 %b, 16
+ %mul = mul nsw i32 %conv1, %conv0
+ %conv2 = sext i32 %mul to i64
+ %add = add nsw i64 %conv2, %c
+ ret i64 %add
+}
+
+@global_b = external global i16, align 2
+;CHECK-LABEL: MACLongTest15
+;CHECK-T2-DSP-NOT: {{asr|lsr}}
+;CHECK-T2-DSP: smlaltb r2, r3, r0, r1
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
+;CHECK-V5TE-NOT: {{asr|lsr}}
+;CHECK-V5TE: smlaltb r2, r3, r0, r1
+;CHECK-V5TE-NEXT: mov r0, r2
+;CHECK-V5TE-NEXT: mov r1, r3
+;CHECK-V7-LE-NOT: {{asr|lsr}}
+;CHECK-V7-LE: smlaltb r2, r3, r0, r1
+;CHECK-V7-LE-NEXT: mov r0, r2
+;CHECK-V7-LE-NEXT: mov r1, r3
+;CHECK-V7-THUMB-BE: smlaltb r3, r2, r0, r1
+;CHECK-V7-THUMB-BE-NEXT: mov r0, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r1, r3
+;CHECK-LE-NOT: smlaltb
+;CHECK-BE-NOT: smlaltb
+;CHECK-V6M-THUMB-NOT: smlaltb
+;CHECK-V7M-THUMB-NOT: smlaltb
+define i64 @MACLongTest15(i32 %t, i64 %acc) {
+entry:
+ %0 = load i16, i16* @global_b, align 2
+ %conv = sext i16 %0 to i32
+ %shr = ashr i32 %t, 16
+ %mul = mul nsw i32 %shr, %conv
+ %conv1 = sext i32 %mul to i64
+ %add = add nsw i64 %conv1, %acc
+ ret i64 %add
+}
+
+;CHECK-LABEL: MACLongTest16
+;CHECK-T2-DSP-NOT: {{asr|lsr}}
+;CHECK-T2-DSP: smlalbt r2, r3, r1, r0
+;CHECK-T2-DSP-NEXT: mov r0, r2
+;CHECK-T2-DSP-NEXT: mov r1, r3
+;CHECK-V5TE-NOT: {{asr|lsr}}
+;CHECK-V5TE: smlalbt r2, r3, r1, r0
+;CHECK-V5TE-NEXT: mov r0, r2
+;CHECK-V5TE-NEXT: mov r1, r3
+;CHECK-V7-LE: smlalbt r2, r3, r1, r0
+;CHECK-V7-LE-NEXT: mov r0, r2
+;CHECK-V7-LE-NEXT: mov r1, r3
+;CHECK-V7-THUMB-BE: smlalbt r3, r2, r1, r0
+;CHECK-V7-THUMB-BE-NEXT: mov r0, r2
+;CHECK-V7-THUMB-BE-NEXT: mov r1, r3
+;CHECK-LE-NOT: smlalbt
+;CHECK-BE-NOT: smlalbt
+;CHECK-V6M-THUMB-NOT: smlalbt
+;CHECK-V7M-THUMB-NOT: smlalbt
+define i64 @MACLongTest16(i32 %t, i64 %acc) {
+entry:
+ %0 = load i16, i16* @global_b, align 2
+ %conv = sext i16 %0 to i32
+ %shr = ashr i32 %t, 16
+ %mul = mul nsw i32 %conv, %shr
+ %conv1 = sext i32 %mul to i64
+ %add = add nsw i64 %conv1, %acc
+ ret i64 %add
+}
diff --git a/test/CodeGen/ARM/lowerMUL-newload.ll b/test/CodeGen/ARM/lowerMUL-newload.ll
new file mode 100644
index 000000000000..93d765cba116
--- /dev/null
+++ b/test/CodeGen/ARM/lowerMUL-newload.ll
@@ -0,0 +1,115 @@
+; RUN: llc < %s -mtriple=arm-eabi -mcpu=krait | FileCheck %s
+
+define void @func1(i16* %a, i16* %b, i16* %c) {
+entry:
+; The test case trying to vectorize the pseudo code below.
+; a[i] = b[i] + c[i];
+; b[i] = a[i] * c[i];
+; a[i] = b[i] + a[i] * c[i];
+;
+; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i]" is
+; scheduled before the first vector store to "a[i] = b[i] + c[i]".
+; Checking that there is no vector load a[i] scheduled between the vector
+; stores to a[i], otherwise the load of a[i] will be polluted by the first
+; vector store to a[i].
+;
+; This test case check that the chain information is updated during
+; lowerMUL for the new created Load SDNode.
+
+; CHECK: vldr {{.*}} [r0, #16]
+; CHECK: vstr {{.*}} [r0, #16]
+; CHECK-NOT: vldr {{.*}} [r0, #16]
+; CHECK: vstr {{.*}} [r0, #16]
+
+ %scevgep0 = getelementptr i16, i16* %a, i32 8
+ %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
+ %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
+ %scevgep1 = getelementptr i16, i16* %b, i32 8
+ %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
+ %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+ %0 = zext <4 x i16> %vec1 to <4 x i32>
+ %scevgep2 = getelementptr i16, i16* %c, i32 8
+ %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
+ %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+ %1 = sext <4 x i16> %vec2 to <4 x i32>
+ %vec3 = add <4 x i32> %1, %0
+ %2 = trunc <4 x i32> %vec3 to <4 x i16>
+ %scevgep3 = getelementptr i16, i16* %a, i32 8
+ %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
+ store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
+ %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
+ %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
+ %3 = sext <4 x i16> %vec4 to <4 x i32>
+ %vec5 = mul <4 x i32> %3, %vec3
+ %4 = trunc <4 x i32> %vec5 to <4 x i16>
+ %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
+ store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
+ %5 = sext <4 x i16> %vec0 to <4 x i32>
+ %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
+ %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
+ %6 = sext <4 x i16> %vec6 to <4 x i32>
+ %vec7 = mul <4 x i32> %6, %5
+ %vec8 = add <4 x i32> %vec7, %vec5
+ %7 = trunc <4 x i32> %vec8 to <4 x i16>
+ %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
+ store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
+ ret void
+}
+
+define void @func2(i16* %a, i16* %b, i16* %c) {
+entry:
+; The test case trying to vectorize the pseudo code below.
+; a[i] = b[i] + c[i];
+; b[i] = a[i] * c[i];
+; a[i] = b[i] + a[i] * c[i] + a[i];
+;
+; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i] + a[i]"
+; is scheduled before the first vector store to "a[i] = b[i] + c[i]".
+; Checking that there is no vector load a[i] scheduled between the first
+; vector store to a[i] and the vector add of a[i], otherwise the load of
+; a[i] will be polluted by the first vector store to a[i].
+;
+; This test case check that both the chain and value of the new created
+; Load SDNode are updated during lowerMUL.
+
+; CHECK: vldr {{.*}} [r0, #16]
+; CHECK: vstr {{.*}} [r0, #16]
+; CHECK-NOT: vldr {{.*}} [r0, #16]
+; CHECK: vaddw.s16
+; CHECK: vstr {{.*}} [r0, #16]
+
+ %scevgep0 = getelementptr i16, i16* %a, i32 8
+ %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
+ %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
+ %scevgep1 = getelementptr i16, i16* %b, i32 8
+ %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
+ %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+ %0 = zext <4 x i16> %vec1 to <4 x i32>
+ %scevgep2 = getelementptr i16, i16* %c, i32 8
+ %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
+ %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+ %1 = sext <4 x i16> %vec2 to <4 x i32>
+ %vec3 = add <4 x i32> %1, %0
+ %2 = trunc <4 x i32> %vec3 to <4 x i16>
+ %scevgep3 = getelementptr i16, i16* %a, i32 8
+ %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
+ store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
+ %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
+ %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
+ %3 = sext <4 x i16> %vec4 to <4 x i32>
+ %vec5 = mul <4 x i32> %3, %vec3
+ %4 = trunc <4 x i32> %vec5 to <4 x i16>
+ %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
+ store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
+ %5 = sext <4 x i16> %vec0 to <4 x i32>
+ %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
+ %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
+ %6 = sext <4 x i16> %vec6 to <4 x i32>
+ %vec7 = mul <4 x i32> %6, %5
+ %vec8 = add <4 x i32> %vec7, %vec5
+ %vec9 = add <4 x i32> %vec8, %5
+ %7 = trunc <4 x i32> %vec9 to <4 x i16>
+ %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
+ store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
+ ret void
+}
diff --git a/test/CodeGen/ARM/mature-mc-support.ll b/test/CodeGen/ARM/mature-mc-support.ll
index 0a7e5b91adc5..f89657dd81ac 100644
--- a/test/CodeGen/ARM/mature-mc-support.ll
+++ b/test/CodeGen/ARM/mature-mc-support.ll
@@ -9,4 +9,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/ARM/misched-fp-basic.ll b/test/CodeGen/ARM/misched-fp-basic.ll
new file mode 100644
index 000000000000..27ad2cec34fd
--- /dev/null
+++ b/test/CodeGen/ARM/misched-fp-basic.ll
@@ -0,0 +1,69 @@
+; REQUIRES: asserts
+; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a9 -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > \
+; RUN: /dev/null | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_A9
+; RUN: llc < %s -mtriple=arm-eabi -mcpu=swift -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > \
+; RUN: /dev/null | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_SWIFT
+; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-r52 -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > \
+; RUN: /dev/null | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_R52
+;
+; Check the latency of instructions for processors with sched-models
+;
+; Function Attrs: norecurse nounwind readnone
+define i32 @foo(float %a, float %b, float %c, i32 %d) local_unnamed_addr #0 {
+entry:
+;
+; CHECK: ********** MI Scheduling **********
+; CHECK_A9: VADDS
+; CHECK_SWIFT: VADDfd
+; CHECK_R52: VADDS
+; CHECK_A9: Latency : 5
+; CHECK_SWIFT: Latency : 4
+; CHECK_R52: Latency : 6
+;
+; CHECK_A9: VMULS
+; CHECK_SWIFT: VMULfd
+; CHECK_R52: VMULS
+; CHECK_SWIFT: Latency : 4
+; CHECK_A9: Latency : 6
+; CHECK_R52: Latency : 6
+;
+; CHECK: VDIVS
+; CHECK_SWIFT: Latency : 17
+; CHECK_A9: Latency : 16
+; CHECK_R52: Latency : 7
+;
+; CHECK: VCVTDS
+; CHECK_SWIFT: Latency : 4
+; CHECK_A9: Latency : 5
+; CHECK_R52: Latency : 6
+;
+; CHECK: VADDD
+; CHECK_SWIFT: Latency : 6
+; CHECK_A9: Latency : 5
+; CHECK_R52: Latency : 6
+;
+; CHECK: VMULD
+; CHECK_SWIFT: Latency : 6
+; CHECK_A9: Latency : 7
+; CHECK_R52: Latency : 6
+;
+; CHECK: VDIVD
+; CHECK_SWIFT: Latency : 32
+; CHECK_A9: Latency : 26
+; CHECK_R52: Latency : 17
+;
+; CHECK: VTOSIZD
+; CHECK_SWIFT: Latency : 4
+; CHECK_A9: Latency : 5
+; CHECK_R52: Latency : 6
+;
+ %add = fadd float %a, %b
+ %mul = fmul float %add, %add
+ %div = fdiv float %mul, %b
+ %conv1 = fpext float %div to double
+ %add3 = fadd double %conv1, %conv1
+ %mul4 = fmul double %add3, %add3
+ %div5 = fdiv double %mul4, %conv1
+ %conv6 = fptosi double %div5 to i32
+ ret i32 %conv6
+}
diff --git a/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
new file mode 100644
index 000000000000..86ef1e26f636
--- /dev/null
+++ b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -0,0 +1,175 @@
+# Basic machine sched model test for Thumb2 int instructions
+# RUN: llc -o /dev/null %s -mtriple=thumbv7-eabi -mcpu=swift -run-pass machine-scheduler -enable-misched -verify-misched \
+# RUN: -debug-only=misched 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_SWIFT
+# RUN: llc -o /dev/null %s -mtriple=thumbv7--eabi -mcpu=cortex-a9 -run-pass machine-scheduler -enable-misched -verify-misched \
+# RUN: -debug-only=misched 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_A9
+# RUN: llc -o /dev/null %s -mtriple=thumbv8r-eabi -mcpu=cortex-r52 -run-pass machine-scheduler -enable-misched -verify-misched \
+# RUN: -debug-only=misched 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_R52
+# REQUIRES: asserts
+--- |
+ ; ModuleID = 'foo.ll'
+ source_filename = "foo.ll"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv7---eabi"
+
+ @g1 = common global i32 0, align 4
+ @g2 = common global i32 0, align 4
+
+ define i64 @foo(i16 signext %a, i16 signext %b) {
+ entry:
+ %0 = load i32, i32* @g1, align 4
+ %1 = load i32, i32* @g2, align 4
+ %2 = add nuw nsw i32 %0, %0
+ %3 = sdiv i32 %2, %1
+ store i32 %3, i32* @g1, align 4
+ %d = mul nsw i16 %a, %a
+ %e = mul nsw i16 %b, %b
+ %f = add nuw nsw i16 %e, %d
+ %c = zext i16 %f to i32
+ %mul8 = mul nsw i32 %c, %3
+ %mul9 = mul nsw i32 %mul8, %mul8
+ %add10 = add nuw nsw i32 %mul9, %mul8
+ %conv1130 = zext i32 %add10 to i64
+ %mul12 = mul nuw nsw i64 %conv1130, %conv1130
+ %mul13 = mul nsw i64 %mul12, %mul12
+ %add14 = add nuw nsw i64 %mul13, %mul12
+ ret i64 %add14
+ }
+#
+# CHECK: ********** MI Scheduling **********
+# CHECK: SU(2): %vreg2<def> = t2MOVi32imm <ga:@g1>; rGPR:%vreg2
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 2
+# CHECK_R52: Latency : 2
+#
+# CHECK: SU(3): %vreg3<def> = t2LDRi12 %vreg2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%vreg3,%vreg2
+# CHECK_A9: Latency : 1
+# CHECK_SWIFT: Latency : 3
+# CHECK_R52: Latency : 4
+#
+# CHECK : SU(6): %vreg6<def> = t2ADDrr %vreg3, %vreg3, pred:14, pred:%noreg, opt:%noreg; rGPR:%vreg6,%vreg3,%vreg3
+# CHECK_A9: Latency : 1
+# CHECK_SWIFT: Latency : 1
+# CHECK_R52: Latency : 3
+
+# CHECK: SU(7): %vreg7<def> = t2SDIV %vreg6, %vreg5, pred:14, pred:%noreg; rGPR:%vreg7,%vreg6,%vreg5
+# CHECK_A9: Latency : 0
+# CHECK_SWIFT: Latency : 14
+# CHECK_R52: Latency : 8
+
+# CHECK: SU(8): t2STRi12 %vreg7, %vreg2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%vreg7,%vreg2
+# CHECK_A9: Latency : 1
+# CHECK_SWIFT: Latency : 0
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(9): %vreg8<def> = t2SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; rGPR:%vreg8,%vreg1,%vreg1
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(10): %vreg9<def> = t2SMLABB %vreg0, %vreg0, %vreg8, pred:14, pred:%noreg; rGPR:%vreg9,%vreg0,%vreg0,%vreg8
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(11): %vreg10<def> = t2UXTH %vreg9, 0, pred:14, pred:%noreg; rGPR:%vreg10,%vreg9
+# CHECK_A9: Latency : 1
+# CHECK_SWIFT: Latency : 1
+# CHECK_R52: Latency : 3
+#
+# CHECK: SU(12): %vreg11<def> = t2MUL %vreg10, %vreg7, pred:14, pred:%noreg; rGPR:%vreg11,%vreg10,%vreg7
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(13): %vreg12<def> = t2MLA %vreg11, %vreg11, %vreg11, pred:14, pred:%noreg; rGPR:%vreg12,%vreg11,%vreg11,%vreg11
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(14): %vreg13<def>, %vreg14<def> = t2UMULL %vreg12, %vreg12, pred:14, pred:%noreg; rGPR:%vreg13,%vreg14,%vreg12,%vreg12
+# CHECK_A9: Latency : 3
+# CHECK_SWIFT: Latency : 5
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(18): %vreg19<def,tied4>, %vreg20<def,tied5> = t2UMLAL %vreg12, %vreg12, %vreg19<tied0>, %vreg20<tied1>, pred:14, pred:%noreg; rGPR:%vreg19,%vreg20,%vreg12,%vreg12,%vreg20
+# CHECK_A9: Latency : 3
+# CHECK_SWIFT: Latency : 7
+# CHECK_R52: Latency : 4
+# CHECK: ** ScheduleDAGMILive::schedule picking next node
+...
+---
+name: foo
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: rgpr }
+ - { id: 1, class: rgpr }
+ - { id: 2, class: rgpr }
+ - { id: 3, class: rgpr }
+ - { id: 4, class: rgpr }
+ - { id: 5, class: rgpr }
+ - { id: 6, class: rgpr }
+ - { id: 7, class: rgpr }
+ - { id: 8, class: rgpr }
+ - { id: 9, class: rgpr }
+ - { id: 10, class: rgpr }
+ - { id: 11, class: rgpr }
+ - { id: 12, class: rgpr }
+ - { id: 13, class: rgpr }
+ - { id: 14, class: rgpr }
+ - { id: 15, class: rgpr }
+ - { id: 16, class: rgpr }
+ - { id: 17, class: rgpr }
+ - { id: 18, class: rgpr }
+ - { id: 19, class: rgpr }
+ - { id: 20, class: rgpr }
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %r0, %r1
+
+ %1 = COPY %r1
+ %0 = COPY %r0
+ %2 = t2MOVi32imm @g1
+ %3 = t2LDRi12 %2, 0, 14, _ :: (dereferenceable load 4 from @g1)
+ %4 = t2MOVi32imm @g2
+ %5 = t2LDRi12 %4, 0, 14, _ :: (dereferenceable load 4 from @g2)
+ %6 = t2ADDrr %3, %3, 14, _, _
+ %7 = t2SDIV %6, %5, 14, _
+ t2STRi12 %7, %2, 0, 14, _ :: (store 4 into @g1)
+ %8 = t2SMULBB %1, %1, 14, _
+ %9 = t2SMLABB %0, %0, %8, 14, _
+ %10 = t2UXTH %9, 0, 14, _
+ %11 = t2MUL %10, %7, 14, _
+ %12 = t2MLA %11, %11, %11, 14, _
+ %13, %14 = t2UMULL %12, %12, 14, _
+ %19, %16 = t2UMULL %13, %13, 14, _
+ %17 = t2MLA %13, %14, %16, 14, _
+ %20 = t2MLA %13, %14, %17, 14, _
+ %19, %20 = t2UMLAL %12, %12, %19, %20, 14, _
+ %r0 = COPY %19
+ %r1 = COPY %20
+ tBX_RET 14, _, implicit %r0, implicit %r1
+
+...
diff --git a/test/CodeGen/ARM/misched-int-basic.mir b/test/CodeGen/ARM/misched-int-basic.mir
new file mode 100644
index 000000000000..f237c0a07b2e
--- /dev/null
+++ b/test/CodeGen/ARM/misched-int-basic.mir
@@ -0,0 +1,128 @@
+# RUN: llc -o /dev/null %s -mtriple=arm-eabi -mcpu=swift -run-pass machine-scheduler -enable-misched -verify-misched \
+# RUN: -debug-only=misched 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_SWIFT
+# RUN: llc -o /dev/null %s -mtriple=arm-eabi -mcpu=cortex-a9 -run-pass machine-scheduler -enable-misched -verify-misched \
+# RUN: -debug-only=misched 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_A9
+# RUN: llc -o /dev/null %s -mtriple=arm-eabi -mcpu=cortex-r52 -run-pass machine-scheduler -enable-misched -verify-misched \
+# RUN: -debug-only=misched 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK_R52
+# REQUIRES: asserts
+--- |
+ ; ModuleID = 'foo.ll'
+ source_filename = "foo.ll"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "arm---eabi"
+
+ define i64 @foo(i16 signext %a, i16 signext %b) {
+ entry:
+ %d = mul nsw i16 %a, %a
+ %e = mul nsw i16 %b, %b
+ %f = add nuw nsw i16 %e, %d
+ %c = zext i16 %f to i32
+ %mul8 = mul nsw i32 %c, %c
+ %mul9 = mul nsw i32 %mul8, %mul8
+ %add10 = add nuw nsw i32 %mul9, %mul8
+ %conv1130 = zext i32 %add10 to i64
+ %mul12 = mul nuw nsw i64 %conv1130, %conv1130
+ %mul13 = mul nsw i64 %mul12, %mul12
+ %add14 = add nuw nsw i64 %mul13, %mul12
+ ret i64 %add14
+ }
+
+# CHECK: ********** MI Scheduling **********
+# CHECK: SU(2): %vreg2<def> = SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; GPR:%vreg2,%vreg1,%vreg1
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(3): %vreg3<def> = SMLABB %vreg0, %vreg0, %vreg2, pred:14, pred:%noreg; GPRnopc:%vreg3,%vreg0,%vreg0 GPR:%vreg2
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(4): %vreg4<def> = UXTH %vreg3, 0, pred:14, pred:%noreg; GPRnopc:%vreg4,%vreg3
+# CHECK_A9: Latency : 1
+# CHECK_SWIFT: Latency : 1
+# CHECK_R52: Latency : 3
+#
+# CHECK: SU(5): %vreg5<def> = MUL %vreg4, %vreg4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg5,%vreg4,%vreg4
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(6): %vreg6<def> = MLA %vreg5, %vreg5, %vreg5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg6,%vreg5,%vreg5,%vreg5
+# CHECK_A9: Latency : 2
+# CHECK_SWIFT: Latency : 4
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(7): %vreg7<def>, %vreg8<def> = UMULL %vreg6, %vreg6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg7,%vreg8,%vreg6,%vreg6
+# CHECK_A9: Latency : 3
+# CHECK_SWIFT: Latency : 5
+# CHECK_R52: Latency : 4
+#
+# CHECK: SU(11): %vreg13<def,tied4>, %vreg14<def,tied5> = UMLAL %vreg6, %vreg6, %vreg13<tied0>, %vreg14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%vreg13 GPRnopc:%vreg14,%vreg6,%vreg6
+# CHECK_SWIFT: Latency : 7
+# CHECK_A9: Latency : 3
+# CHECK_R52: Latency : 4
+# CHECK: ** ScheduleDAGMILive::schedule picking next node
+...
+---
+name: foo
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprnopc }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gprnopc }
+ - { id: 4, class: gprnopc }
+ - { id: 5, class: gprnopc }
+ - { id: 6, class: gprnopc }
+ - { id: 7, class: gprnopc }
+ - { id: 8, class: gprnopc }
+ - { id: 9, class: gpr }
+ - { id: 10, class: gprnopc }
+ - { id: 11, class: gprnopc }
+ - { id: 12, class: gprnopc }
+ - { id: 13, class: gpr }
+ - { id: 14, class: gprnopc }
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %r0, %r1
+
+ %1 = COPY %r1
+ %0 = COPY %r0
+ %2 = SMULBB %1, %1, 14, _
+ %3 = SMLABB %0, %0, %2, 14, _
+ %4 = UXTH %3, 0, 14, _
+ %5 = MUL %4, %4, 14, _, _
+ %6 = MLA %5, %5, %5, 14, _, _
+ %7, %8 = UMULL %6, %6, 14, _, _
+ %13, %10 = UMULL %7, %7, 14, _, _
+ %11 = MLA %7, %8, %10, 14, _, _
+ %14 = MLA %7, %8, %11, 14, _, _
+ %13, %14 = UMLAL %6, %6, %13, %14, 14, _, _
+ %r0 = COPY %13
+ %r1 = COPY %14
+ BX_RET 14, _, implicit %r0, implicit %r1
+
+...
diff --git a/test/CodeGen/ARM/movt.ll b/test/CodeGen/ARM/movt.ll
index da9b698f2099..f51582031bd5 100644
--- a/test/CodeGen/ARM/movt.ll
+++ b/test/CodeGen/ARM/movt.ll
@@ -2,10 +2,15 @@
; rdar://7317664
; RUN: llc -mtriple=thumbv8m.base %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8m.base -mcpu=cortex-m23 %s -o - | FileCheck %s --check-prefix=NOMOVT
+; RUN: llc -mtriple=thumbv8m.base -mcpu=cortex-m33 %s -o - | FileCheck %s
define i32 @t(i32 %X) nounwind {
; CHECK-LABEL: t:
; CHECK: movt r{{[0-9]}}, #65535
+; NOMOVT-LABEL: t:
+; NOMOVT-NOT: movt r{{[0-9]}}, #65535
+; NOMOVT: ldr r{{[0-9]}}, .LCP
entry:
%0 = or i32 %X, -65536
ret i32 %0
@@ -14,6 +19,9 @@ entry:
define i32 @t2(i32 %X) nounwind {
; CHECK-LABEL: t2:
; CHECK: movt r{{[0-9]}}, #65534
+; NOMOVT-LABEL: t2:
+; NOMOVT-NOT: movt r{{[0-9]}}, #65534
+; NOMOVT: ldr r{{[0-9]}}, .LCP
entry:
%0 = or i32 %X, -131072
%1 = and i32 %0, -65537
diff --git a/test/CodeGen/ARM/msr-it-block.ll b/test/CodeGen/ARM/msr-it-block.ll
index 0f9ff6b29d79..8d4ddc3a4985 100644
--- a/test/CodeGen/ARM/msr-it-block.ll
+++ b/test/CodeGen/ARM/msr-it-block.ll
@@ -20,8 +20,8 @@ write_reg:
; V6M: msr apsr, {{r[0-9]+}}
; V7M: msr apsr_nzcvq, {{r[0-9]+}}
; V7M: msr apsr_nzcvq, {{r[0-9]+}}
-; V7A: msr APSR_nzcvqg, {{r[0-9]+}}
-; V7A: msr APSR_nzcvqg, {{r[0-9]+}}
+; V7A: msr APSR_nzcvq, {{r[0-9]+}}
+; V7A: msr APSR_nzcvq, {{r[0-9]+}}
br label %exit
exit:
@@ -41,8 +41,8 @@ write_reg:
; V6M: msr apsr, {{r[0-9]+}}
; V7M: msr apsr_nzcvq, {{r[0-9]+}}
; V7M: msr apsr_nzcvq, {{r[0-9]+}}
-; V7A: msr APSR_nzcvqg, {{r[0-9]+}}
-; V7A: msr APSR_nzcvqg, {{r[0-9]+}}
+; V7A: msr APSR_nzcvq, {{r[0-9]+}}
+; V7A: msr APSR_nzcvq, {{r[0-9]+}}
br label %exit
exit:
diff --git a/test/CodeGen/ARM/neon_vabs.ll b/test/CodeGen/ARM/neon_vabs.ll
index d32e7b78879b..109d09582afd 100644
--- a/test/CodeGen/ARM/neon_vabs.ll
+++ b/test/CodeGen/ARM/neon_vabs.ll
@@ -1,8 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <4 x i32> @test1(<4 x i32> %a) nounwind {
; CHECK-LABEL: test1:
-; CHECK: vabs.s32 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s32 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
@@ -11,7 +18,13 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind {
define <4 x i32> @test2(<4 x i32> %a) nounwind {
; CHECK-LABEL: test2:
-; CHECK: vabs.s32 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s32 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sge <4 x i32> %a, zeroinitializer
%abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
@@ -20,7 +33,13 @@ define <4 x i32> @test2(<4 x i32> %a) nounwind {
define <8 x i16> @test3(<8 x i16> %a) nounwind {
; CHECK-LABEL: test3:
-; CHECK: vabs.s16 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s16 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <8 x i16> zeroinitializer, %a
%b = icmp sgt <8 x i16> %a, zeroinitializer
%abs = select <8 x i1> %b, <8 x i16> %a, <8 x i16> %tmp1neg
@@ -29,7 +48,13 @@ define <8 x i16> @test3(<8 x i16> %a) nounwind {
define <16 x i8> @test4(<16 x i8> %a) nounwind {
; CHECK-LABEL: test4:
-; CHECK: vabs.s8 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s8 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <16 x i8> zeroinitializer, %a
%b = icmp slt <16 x i8> %a, zeroinitializer
%abs = select <16 x i1> %b, <16 x i8> %tmp1neg, <16 x i8> %a
@@ -38,7 +63,13 @@ define <16 x i8> @test4(<16 x i8> %a) nounwind {
define <4 x i32> @test5(<4 x i32> %a) nounwind {
; CHECK-LABEL: test5:
-; CHECK: vabs.s32 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s32 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sle <4 x i32> %a, zeroinitializer
%abs = select <4 x i1> %b, <4 x i32> %tmp1neg, <4 x i32> %a
@@ -47,7 +78,11 @@ define <4 x i32> @test5(<4 x i32> %a) nounwind {
define <2 x i32> @test6(<2 x i32> %a) nounwind {
; CHECK-LABEL: test6:
-; CHECK: vabs.s32 d
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s32 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <2 x i32> zeroinitializer, %a
%b = icmp sgt <2 x i32> %a, <i32 -1, i32 -1>
%abs = select <2 x i1> %b, <2 x i32> %a, <2 x i32> %tmp1neg
@@ -56,7 +91,11 @@ define <2 x i32> @test6(<2 x i32> %a) nounwind {
define <2 x i32> @test7(<2 x i32> %a) nounwind {
; CHECK-LABEL: test7:
-; CHECK: vabs.s32 d
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s32 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <2 x i32> zeroinitializer, %a
%b = icmp sge <2 x i32> %a, zeroinitializer
%abs = select <2 x i1> %b, <2 x i32> %a, <2 x i32> %tmp1neg
@@ -65,7 +104,11 @@ define <2 x i32> @test7(<2 x i32> %a) nounwind {
define <4 x i16> @test8(<4 x i16> %a) nounwind {
; CHECK-LABEL: test8:
-; CHECK: vabs.s16 d
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s16 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <4 x i16> zeroinitializer, %a
%b = icmp sgt <4 x i16> %a, zeroinitializer
%abs = select <4 x i1> %b, <4 x i16> %a, <4 x i16> %tmp1neg
@@ -74,7 +117,11 @@ define <4 x i16> @test8(<4 x i16> %a) nounwind {
define <8 x i8> @test9(<8 x i8> %a) nounwind {
; CHECK-LABEL: test9:
-; CHECK: vabs.s8 d
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s8 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <8 x i8> zeroinitializer, %a
%b = icmp slt <8 x i8> %a, zeroinitializer
%abs = select <8 x i1> %b, <8 x i8> %tmp1neg, <8 x i8> %a
@@ -83,7 +130,11 @@ define <8 x i8> @test9(<8 x i8> %a) nounwind {
define <2 x i32> @test10(<2 x i32> %a) nounwind {
; CHECK-LABEL: test10:
-; CHECK: vabs.s32 d
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vabs.s32 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1neg = sub <2 x i32> zeroinitializer, %a
%b = icmp sle <2 x i32> %a, zeroinitializer
%abs = select <2 x i1> %b, <2 x i32> %tmp1neg, <2 x i32> %a
@@ -95,7 +146,13 @@ define <2 x i32> @test10(<2 x i32> %a) nounwind {
define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind {
; CHECK-LABEL: test11:
-; CHECK: vabdl.u16 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vabdl.u16 q8, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%zext1 = zext <4 x i16> %a to <4 x i32>
%zext2 = zext <4 x i16> %b to <4 x i32>
%diff = sub <4 x i32> %zext1, %zext2
@@ -106,7 +163,13 @@ define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind {
}
define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind {
; CHECK-LABEL: test12:
-; CHECK: vabdl.u8 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vabdl.u8 q8, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%zext1 = zext <8 x i8> %a to <8 x i16>
%zext2 = zext <8 x i8> %b to <8 x i16>
%diff = sub <8 x i16> %zext1, %zext2
@@ -118,7 +181,13 @@ define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind {
define <2 x i64> @test13(<2 x i32> %a, <2 x i32> %b) nounwind {
; CHECK-LABEL: test13:
-; CHECK: vabdl.u32 q
+; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d16, r2, r3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vabdl.u32 q8, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%zext1 = zext <2 x i32> %a to <2 x i64>
%zext2 = zext <2 x i32> %b to <2 x i64>
%diff = sub <2 x i64> %zext1, %zext2
diff --git a/test/CodeGen/ARM/no-cmov2bfi.ll b/test/CodeGen/ARM/no-cmov2bfi.ll
new file mode 100644
index 000000000000..c8b512048905
--- /dev/null
+++ b/test/CodeGen/ARM/no-cmov2bfi.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -mtriple=thumbv7 | FileCheck --check-prefix=CHECK-NOBFI %s
+
+declare zeroext i1 @dummy()
+
+define i8 @test(i8 %a1, i1 %c) {
+; CHECK-NOBFI-NOT: bfi
+; CHECK-NOBFI: bl dummy
+; CHECK-NOBFI: cmp r0, #0
+; CHECK-NOBFI: it ne
+; CHECK-NOBFI: orrne [[REG:r[0-9]+]], [[REG]], #8
+; CHECK-NOBFI: mov r0, [[REG]]
+
+ %1 = and i8 %a1, -9
+ %2 = select i1 %c, i8 %1, i8 %a1
+ %3 = tail call zeroext i1 @dummy()
+ %4 = or i8 %2, 8
+ %ret = select i1 %3, i8 %4, i8 %2
+ ret i8 %ret
+}
diff --git a/test/CodeGen/ARM/phi.ll b/test/CodeGen/ARM/phi.ll
index ff85052175c8..568f7572b32e 100644
--- a/test/CodeGen/ARM/phi.ll
+++ b/test/CodeGen/ARM/phi.ll
@@ -1,5 +1,4 @@
; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s
-; RUN: llc -mtriple=arm-eabi -mattr=+v4t -addr-sink-using-gep=1 %s -o - | FileCheck %s
; <rdar://problem/8686347>
diff --git a/test/CodeGen/ARM/pr32545.ll b/test/CodeGen/ARM/pr32545.ll
new file mode 100644
index 000000000000..5bfb01b45983
--- /dev/null
+++ b/test/CodeGen/ARM/pr32545.ll
@@ -0,0 +1,22 @@
+; RUN: llc %s -o - | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv7--linux-gnueabi"
+
+; CHECK: vld1.16 {[[DREG:d[0-9]+]][0]}, {{.*}}
+; CHECK: vmovl.u8 [[QREG:q[0-9]+]], [[DREG]]
+; CHECK: vmovl.u16 [[QREG]], [[DREG]]
+
+define void @f(i32 %dstStride, i8* %indvars.iv, <2 x i8>* %zz) {
+entry:
+ br label %for.body
+
+for.body:
+ %tmp = load <2 x i8>, <2 x i8>* %zz, align 1
+ %tmp1 = extractelement <2 x i8> %tmp, i32 0
+ %.lhs.rhs = zext i8 %tmp1 to i32
+ call void @g(i32 %.lhs.rhs)
+ br label %for.body
+}
+
+declare void @g(i32)
diff --git a/test/CodeGen/ARM/prera-ldst-aliasing.mir b/test/CodeGen/ARM/prera-ldst-aliasing.mir
new file mode 100644
index 000000000000..ce37106ed8d2
--- /dev/null
+++ b/test/CodeGen/ARM/prera-ldst-aliasing.mir
@@ -0,0 +1,40 @@
+# RUN: llc -run-pass arm-prera-ldst-opt %s -o - | FileCheck %s
+--- |
+ target triple = "thumbv7---eabi"
+
+ define void @ldrd_strd_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) {
+ entry:
+ %0 = load i32, i32* %y, align 4
+ store i32 %0, i32* %x, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1
+ %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1
+ store i32 %1, i32* %arrayidx3, align 4
+ ret void
+ }
+...
+---
+name: ldrd_strd_aa
+alignment: 1
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: %r0, %r1
+
+ %1 : gpr = COPY %r1
+ %0 : gpr = COPY %r0
+ %2 : gpr = t2LDRi12 %1, 0, 14, _ :: (load 4 from %ir.y)
+ t2STRi12 killed %2, %0, 0, 14, _ :: (store 4 into %ir.x)
+ %3 : gpr = t2LDRi12 %1, 4, 14, _ :: (load 4 from %ir.arrayidx2)
+ t2STRi12 killed %3, %0, 4, 14, _ :: (store 4 into %ir.arrayidx3)
+ ; CHECK: t2LDRi12
+ ; CHECK-NEXT: t2LDRi12
+ ; CHECK-NEXT: t2STRi12
+ ; CHECK-NEXT: t2STRi12
+ tBX_RET 14, _
+
+...
+
diff --git a/test/CodeGen/ARM/prera-ldst-insertpt.mir b/test/CodeGen/ARM/prera-ldst-insertpt.mir
new file mode 100644
index 000000000000..eafcc7c36d33
--- /dev/null
+++ b/test/CodeGen/ARM/prera-ldst-insertpt.mir
@@ -0,0 +1,105 @@
+# RUN: llc -run-pass arm-prera-ldst-opt %s -o - | FileCheck %s
+--- |
+ target triple = "thumbv7---eabi"
+
+ define void @a(i32* nocapture %x, i32 %y, i32 %z) {
+ entry:
+ ret void
+ }
+
+ define void @b(i32* nocapture %x, i32 %y, i32 %z) {
+ entry:
+ ret void
+ }
+...
+---
+# CHECK-LABEL: name: a
+name: a
+alignment: 1
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+ - { reg: '%r2', virtual-reg: '%2' }
+body: |
+ bb.0.entry:
+ liveins: %r0, %r1, %r2
+
+ %2 : rgpr = COPY %r2
+ %1 : rgpr = COPY %r1
+ %0 : gpr = COPY %r0
+ %3 : rgpr = t2MUL %2, %2, 14, _
+ %4 : rgpr = t2MUL %1, %1, 14, _
+ %5 : rgpr = t2MOVi32imm -858993459
+ %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, _
+ %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, _
+ t2STRi12 %1, %0, 0, 14, _ :: (store 4)
+ %10 : rgpr = t2LSLri %2, 1, 14, _, _
+ t2STRi12 killed %10, %0, 4, 14, _ :: (store 4)
+
+ ; Make sure we move the paired stores next to each other, and
+ ; insert them in an appropriate location.
+ ; CHECK: t2STRi12 %1,
+ ; CHECK-NEXT: t2STRi12 killed %10,
+ ; CHECK-NEXT: t2MOVi
+ ; CHECK-NEXT: t2ADDrs
+
+ %11 : rgpr = t2MOVi 55, 14, _, _
+ %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, _, _
+ t2STRi12 killed %12, %0, 16, 14, _ :: (store 4)
+ %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, _, _
+ t2STRi12 killed %13, %0, 20, 14, _ :: (store 4)
+
+ ; Make sure we move the paired stores next to each other.
+ ; CHECK: t2STRi12 killed %12,
+ ; CHECK-NEXT: t2STRi12 killed %13,
+
+ tBX_RET 14, _
+---
+# CHECK-LABEL: name: b
+name: b
+alignment: 1
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '%r1', virtual-reg: '%1' }
+ - { reg: '%r2', virtual-reg: '%2' }
+body: |
+ bb.0.entry:
+ liveins: %r0, %r1, %r2
+
+ %2 : rgpr = COPY %r2
+ %1 : rgpr = COPY %r1
+ %0 : gpr = COPY %r0
+ t2STRi12 %1, %0, 0, 14, _ :: (store 4)
+ %10 : rgpr = t2LSLri %2, 1, 14, _, _
+ t2STRi12 killed %10, %0, 4, 14, _ :: (store 4)
+ %3 : rgpr = t2MUL %2, %2, 14, _
+ t2STRi12 %3, %0, 8, 14, _ :: (store 4)
+
+ ; Make sure we move the paired stores next to each other, and
+ ; insert them in an appropriate location.
+ ; CHECK: t2STRi12 {{.*}}, 0
+ ; CHECK-NEXT: t2STRi12 {{.*}}, 4
+ ; CHECK-NEXT: t2STRi12 {{.*}}, 8
+ ; CHECK-NEXT: t2MUL
+ ; CHECK-NEXT: t2MOVi32imm
+
+ %4 : rgpr = t2MUL %1, %1, 14, _
+ %5 : rgpr = t2MOVi32imm -858993459
+ %6 : rgpr, %7 : rgpr = t2UMULL killed %3, %5, 14, _
+ %8 : rgpr, %9 : rgpr = t2UMULL killed %4, %5, 14, _
+ %10 : rgpr = t2LSLri %2, 1, 14, _, _
+ %11 : rgpr = t2MOVi 55, 14, _, _
+ %12 : gprnopc = t2ADDrs %11, killed %7, 19, 14, _, _
+ t2STRi12 killed %12, %0, 16, 14, _ :: (store 4)
+ %13 : gprnopc = t2ADDrs %11, killed %9, 19, 14, _, _
+ t2STRi12 killed %13, %0, 20, 14, _ :: (store 4)
+
+ ; Make sure we move the paired stores next to each other.
+ ; CHECK: t2STRi12 {{.*}}, 16
+ ; CHECK-NEXT: t2STRi12 {{.*}}, 20
+
+ tBX_RET 14, _
+
+...
diff --git a/test/CodeGen/ARM/rbit.ll b/test/CodeGen/ARM/rbit.ll
index a2bfeca75526..c8badfb32370 100644
--- a/test/CodeGen/ARM/rbit.ll
+++ b/test/CodeGen/ARM/rbit.ll
@@ -10,7 +10,8 @@ entry:
; CHECK-LABEL: rbit_constant
; CHECK: mov r0, #0
-; CHECK: rbit r0, r0
+; CHECK-NOT: rbit
+; CHECK: bx lr
define i32 @rbit_constant() {
entry:
%rbit.i = call i32 @llvm.arm.rbit(i32 0)
diff --git a/test/CodeGen/ARM/rev.ll b/test/CodeGen/ARM/rev.ll
index f95f97105b9f..a36526ff1fb0 100644
--- a/test/CodeGen/ARM/rev.ll
+++ b/test/CodeGen/ARM/rev.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
define i32 @test1(i32 %X) nounwind {
-; CHECK: test1
+; CHECK-LABEL: test1
; CHECK: rev16 r0, r0
%tmp1 = lshr i32 %X, 8
%X15 = bitcast i32 %X to i32
@@ -17,7 +17,7 @@ define i32 @test1(i32 %X) nounwind {
}
define i32 @test2(i32 %X) nounwind {
-; CHECK: test2
+; CHECK-LABEL: test2
; CHECK: revsh r0, r0
%tmp1 = lshr i32 %X, 8
%tmp1.upgrd.1 = trunc i32 %tmp1 to i16
@@ -58,7 +58,7 @@ entry:
; rdar://9609059
define i32 @test5(i32 %i) nounwind readnone {
entry:
-; CHECK: test5
+; CHECK-LABEL: test5
; CHECK: revsh r0, r0
%shl = shl i32 %i, 24
%shr = ashr exact i32 %shl, 16
@@ -71,7 +71,7 @@ entry:
; rdar://9609108
define i32 @test6(i32 %x) nounwind readnone {
entry:
-; CHECK: test6
+; CHECK-LABEL: test6
; CHECK: rev16 r0, r0
%and = shl i32 %x, 8
%shl = and i32 %and, 65280
@@ -88,7 +88,7 @@ entry:
; rdar://9164521
define i32 @test7(i32 %a) nounwind readnone {
entry:
-; CHECK: test7
+; CHECK-LABEL: test7
; CHECK: rev r0, r0
; CHECK: lsr r0, r0, #16
%and = lshr i32 %a, 8
@@ -101,7 +101,7 @@ entry:
define i32 @test8(i32 %a) nounwind readnone {
entry:
-; CHECK: test8
+; CHECK-LABEL: test8
; CHECK: revsh r0, r0
%and = lshr i32 %a, 8
%shr4 = and i32 %and, 255
@@ -115,7 +115,7 @@ entry:
; rdar://10750814
define zeroext i16 @test9(i16 zeroext %v) nounwind readnone {
entry:
-; CHECK: test9
+; CHECK-LABEL: test9
; CHECK: rev16 r0, r0
%conv = zext i16 %v to i32
%shr4 = lshr i32 %conv, 8
diff --git a/test/CodeGen/ARM/select_const.ll b/test/CodeGen/ARM/select_const.ll
new file mode 100644
index 000000000000..48fe572bf8a7
--- /dev/null
+++ b/test/CodeGen/ARM/select_const.ll
@@ -0,0 +1,326 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-eabi-unknown-unknown | FileCheck %s
+
+; Select of constants: control flow / conditional moves can always be replaced by logic+math (but may not be worth it?).
+; Test the zeroext/signext variants of each pattern to see if that makes a difference.
+
+; select Cond, 0, 1 --> zext (!Cond)
+
+define i32 @select_0_or_1(i1 %cond) {
+; CHECK-LABEL: select_0_or_1:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #1
+; CHECK-NEXT: bic r0, r1, r0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_0_or_1_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: eor r0, r0, #1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_1_signext(i1 signext %cond) {
+; CHECK-LABEL: select_0_or_1_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #1
+; CHECK-NEXT: bic r0, r1, r0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+; select Cond, 1, 0 --> zext (Cond)
+
+define i32 @select_1_or_0(i1 %cond) {
+; CHECK-LABEL: select_1_or_0:
+; CHECK: @ BB#0:
+; CHECK-NEXT: and r0, r0, #1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_1_or_0_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_1_or_0_signext(i1 signext %cond) {
+; CHECK-LABEL: select_1_or_0_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: and r0, r0, #1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, 0, -1 --> sext (!Cond)
+
+define i32 @select_0_or_neg1(i1 %cond) {
+; CHECK-LABEL: select_0_or_neg1:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #1
+; CHECK-NEXT: bic r0, r1, r0
+; CHECK-NEXT: rsb r0, r0, #0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_0_or_neg1_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: eor r0, r0, #1
+; CHECK-NEXT: rsb r0, r0, #0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_signext(i1 signext %cond) {
+; CHECK-LABEL: select_0_or_neg1_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mvn r0, r0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_alt(i1 %cond) {
+; CHECK-LABEL: select_0_or_neg1_alt:
+; CHECK: @ BB#0:
+; CHECK-NEXT: and r0, r0, #1
+; CHECK-NEXT: sub r0, r0, #1
+; CHECK-NEXT: mov pc, lr
+ %z = zext i1 %cond to i32
+ %add = add i32 %z, -1
+ ret i32 %add
+}
+
+define i32 @select_0_or_neg1_alt_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_0_or_neg1_alt_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: sub r0, r0, #1
+; CHECK-NEXT: mov pc, lr
+ %z = zext i1 %cond to i32
+ %add = add i32 %z, -1
+ ret i32 %add
+}
+
+define i32 @select_0_or_neg1_alt_signext(i1 signext %cond) {
+; CHECK-LABEL: select_0_or_neg1_alt_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mvn r0, r0
+; CHECK-NEXT: mov pc, lr
+ %z = zext i1 %cond to i32
+ %add = add i32 %z, -1
+ ret i32 %add
+}
+
+; select Cond, -1, 0 --> sext (Cond)
+
+define i32 @select_neg1_or_0(i1 %cond) {
+; CHECK-LABEL: select_neg1_or_0:
+; CHECK: @ BB#0:
+; CHECK-NEXT: and r0, r0, #1
+; CHECK-NEXT: rsb r0, r0, #0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_neg1_or_0_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: rsb r0, r0, #0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_neg1_or_0_signext(i1 signext %cond) {
+; CHECK-LABEL: select_neg1_or_0_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, C+1, C --> add (zext Cond), C
+
+define i32 @select_Cplus1_C(i1 %cond) {
+; CHECK-LABEL: select_Cplus1_C:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #41
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: movne r1, #42
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_Cplus1_C_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #41
+; CHECK-NEXT: cmp r0, #0
+; CHECK-NEXT: movne r1, #42
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_signext(i1 signext %cond) {
+; CHECK-LABEL: select_Cplus1_C_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #41
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: movne r1, #42
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+; select Cond, C, C+1 --> add (sext Cond), C
+
+define i32 @select_C_Cplus1(i1 %cond) {
+; CHECK-LABEL: select_C_Cplus1:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #42
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: movne r1, #41
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_C_Cplus1_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #42
+; CHECK-NEXT: cmp r0, #0
+; CHECK-NEXT: movne r1, #41
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_signext(i1 signext %cond) {
+; CHECK-LABEL: select_C_Cplus1_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #42
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: movne r1, #41
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+; In general, select of 2 constants could be:
+; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2 --> add (and (sext Cond), C1-C2), C2
+
+define i32 @select_C1_C2(i1 %cond) {
+; CHECK-LABEL: select_C1_C2:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #165
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: orr r1, r1, #256
+; CHECK-NEXT: moveq r1, #42
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_C1_C2_zeroext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #165
+; CHECK-NEXT: cmp r0, #0
+; CHECK-NEXT: orr r1, r1, #256
+; CHECK-NEXT: moveq r1, #42
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_signext(i1 signext %cond) {
+; CHECK-LABEL: select_C1_C2_signext:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #165
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: orr r1, r1, #256
+; CHECK-NEXT: moveq r1, #42
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+; 4295032833 = 0x100010001.
+; This becomes an opaque constant via ConstantHoisting, so we don't fold it into the select.
+
+define i64 @opaque_constant1(i1 %cond, i64 %x) {
+; CHECK-LABEL: opaque_constant1:
+; CHECK: @ BB#0:
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: ands r12, r0, #1
+; CHECK-NEXT: mov lr, #1
+; CHECK-NEXT: mov r0, #23
+; CHECK-NEXT: eor r3, r3, #1
+; CHECK-NEXT: orr lr, lr, #65536
+; CHECK-NEXT: mvnne r0, #3
+; CHECK-NEXT: movne r12, #1
+; CHECK-NEXT: and r4, r0, lr
+; CHECK-NEXT: eor r2, r2, lr
+; CHECK-NEXT: subs r0, r4, #1
+; CHECK-NEXT: sbc r1, r12, #0
+; CHECK-NEXT: orrs r2, r2, r3
+; CHECK-NEXT: movne r0, r4
+; CHECK-NEXT: movne r1, r12
+; CHECK-NEXT: pop {r4, lr}
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i64 -4, i64 23
+ %bo = and i64 %sel, 4295032833 ; 0x100010001
+ %cmp = icmp eq i64 %x, 4295032833
+ %sext = sext i1 %cmp to i64
+ %add = add i64 %bo, %sext
+ ret i64 %add
+}
+
+; 65537 == 0x10001.
+; This becomes an opaque constant via ConstantHoisting, so we don't fold it into the select.
+
+define i64 @opaque_constant2(i1 %cond, i64 %x) {
+; CHECK-LABEL: opaque_constant2:
+; CHECK: @ BB#0:
+; CHECK-NEXT: mov r1, #1
+; CHECK-NEXT: tst r0, #1
+; CHECK-NEXT: orr r1, r1, #65536
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: moveq r0, #23
+; CHECK-NEXT: and r0, r0, r1
+; CHECK-NEXT: mov r1, #0
+; CHECK-NEXT: mov pc, lr
+ %sel = select i1 %cond, i64 65537, i64 23
+ %bo = and i64 %sel, 65537
+ ret i64 %bo
+}
+
diff --git a/test/CodeGen/ARM/select_xform.ll b/test/CodeGen/ARM/select_xform.ll
index 8c1502e14655..09e8ed4bc096 100644
--- a/test/CodeGen/ARM/select_xform.ll
+++ b/test/CodeGen/ARM/select_xform.ll
@@ -223,21 +223,19 @@ entry:
ret i32 %add
}
-; Do not fold the xor into the select
+; Fold the xor into the select.
define i32 @t15(i32 %p) {
entry:
; ARM-LABEL: t15:
-; ARM: mov [[REG:r[0-9]+]], #2
+; ARM: mov [[REG:r[0-9]+]], #3
; ARM: cmp r0, #8
-; ARM: movwgt [[REG:r[0-9]+]], #1
-; ARM: eor r0, [[REG:r[0-9]+]], #1
+; ARM: movwgt [[REG:r[0-9]+]], #0
; T2-LABEL: t15:
-; T2: movs [[REG:r[0-9]+]], #2
+; T2: movs [[REG:r[0-9]+]], #3
; T2: cmp [[REG:r[0-9]+]], #8
; T2: it gt
-; T2: movgt [[REG:r[0-9]+]], #1
-; T2: eor r0, [[REG:r[0-9]+]], #1
+; T2: movgt [[REG:r[0-9]+]], #0
%cmp = icmp sgt i32 %p, 8
%a = select i1 %cmp, i32 1, i32 2
%xor = xor i32 %a, 1
diff --git a/test/CodeGen/ARM/setcc-logic.ll b/test/CodeGen/ARM/setcc-logic.ll
new file mode 100644
index 000000000000..79bae1facb3e
--- /dev/null
+++ b/test/CodeGen/ARM/setcc-logic.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s
+
+define zeroext i1 @ne_neg1_and_ne_zero(i32 %x) nounwind {
+; CHECK-LABEL: ne_neg1_and_ne_zero:
+; CHECK: @ BB#0:
+; CHECK-NEXT: add r1, r0, #1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: cmp r1, #1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: bx lr
+ %cmp1 = icmp ne i32 %x, -1
+ %cmp2 = icmp ne i32 %x, 0
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; PR32401 - https://bugs.llvm.org/show_bug.cgi?id=32401
+
+define zeroext i1 @and_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+; CHECK-LABEL: and_eq:
+; CHECK: @ BB#0:
+; CHECK-NEXT: eor r2, r2, r3
+; CHECK-NEXT: eor r0, r0, r1
+; CHECK-NEXT: orrs r0, r0, r2
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: bx lr
+ %cmp1 = icmp eq i32 %a, %b
+ %cmp2 = icmp eq i32 %c, %d
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+; CHECK-LABEL: or_ne:
+; CHECK: @ BB#0:
+; CHECK-NEXT: eor r2, r2, r3
+; CHECK-NEXT: eor r0, r0, r1
+; CHECK-NEXT: orrs r0, r0, r2
+; CHECK-NEXT: movwne r0, #1
+; CHECK-NEXT: bx lr
+ %cmp1 = icmp ne i32 %a, %b
+ %cmp2 = icmp ne i32 %c, %d
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
+; CHECK-LABEL: and_eq_vec:
+; CHECK: @ BB#0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: add r12, sp, #40
+; CHECK-NEXT: add lr, sp, #8
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vld1.64 {d16, d17}, [lr]
+; CHECK-NEXT: add r0, sp, #24
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: vceq.i32 q8, q9, q8
+; CHECK-NEXT: vld1.64 {d22, d23}, [r0]
+; CHECK-NEXT: vceq.i32 q9, q11, q10
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vmovn.i32 d17, q9
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: pop {r11, pc}
+ %cmp1 = icmp eq <4 x i32> %a, %b
+ %cmp2 = icmp eq <4 x i32> %c, %d
+ %and = and <4 x i1> %cmp1, %cmp2
+ ret <4 x i1> %and
+}
+
diff --git a/test/CodeGen/ARM/setcc-sentinals.ll b/test/CodeGen/ARM/setcc-sentinals.ll
deleted file mode 100644
index dc45e0e13881..000000000000
--- a/test/CodeGen/ARM/setcc-sentinals.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -asm-verbose=false %s -o - | FileCheck %s
-
-define zeroext i1 @test0(i32 %x) nounwind {
-; CHECK-LABEL: test0:
-; CHECK: add [[REG:(r[0-9]+)|(lr)]], r0, #1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: cmp [[REG]], #1
-; CHECK-NEXT: movwhi r0, #1
-; CHECK-NEXT: bx lr
- %cmp1 = icmp ne i32 %x, -1
- %not.cmp = icmp ne i32 %x, 0
- %.cmp1 = and i1 %cmp1, %not.cmp
- ret i1 %.cmp1
-}
diff --git a/test/CodeGen/ARM/single-issue-r52.mir b/test/CodeGen/ARM/single-issue-r52.mir
new file mode 100644
index 000000000000..6c95f7603e6e
--- /dev/null
+++ b/test/CodeGen/ARM/single-issue-r52.mir
@@ -0,0 +1,86 @@
+# RUN: llc -o /dev/null %s -mtriple=arm-eabi -mcpu=cortex-r52 -run-pass machine-scheduler -enable-misched -debug-only=misched -misched-topdown 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=TOPDOWN
+# RUN: llc -o /dev/null %s -mtriple=arm-eabi -mcpu=cortex-r52 -run-pass machine-scheduler -enable-misched -debug-only=misched -misched-bottomup 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=BOTTOMUP
+# REQUIRES: asserts
+--- |
+ ; ModuleID = 'foo.ll'
+ source_filename = "foo.ll"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "arm---eabi"
+
+ %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+ ; Function Attrs: nounwind
+ define <8 x i8> @foo(i8* %A) {
+ %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 8)
+ %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 1
+ %tmp4 = add <8 x i8> %tmp2, %tmp3
+ ret <8 x i8> %tmp4
+ }
+ declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8*, i32)
+
+# CHECK: ********** MI Scheduling **********
+# CHECK: ScheduleDAGMILive::schedule starting
+# CHECK: SU(1): %vreg1<def> = VLD4d8Pseudo %vreg0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%vreg1 GPR:%vreg0
+# CHECK: Latency : 8
+# CHECK: Single Issue : true;
+# CHECK: SU(2): %vreg4<def> = VADDv8i8 %vreg1:dsub_0, %vreg1:dsub_1, pred:14, pred:%noreg; DPR:%vreg4 QQPR:%vreg1
+# CHECK: Latency : 5
+# CHECK: Single Issue : false;
+# CHECK: SU(3): %vreg5<def>, %vreg6<def> = VMOVRRD %vreg4, pred:14, pred:%noreg; GPR:%vreg5,%vreg6 DPR:%vreg4
+# CHECK: Latency : 4
+# CHECK: Single Issue : false;
+
+# TOPDOWN: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# TOPDOWN: Bump cycle to end group
+# TOPDOWN: Scheduling SU(2) %vreg4<def> = VADDv8i8
+
+# BOTTOMUP: Scheduling SU(2) %vreg4<def> = VADDv8i8
+# BOTTOMUP: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# BOTTOMUP: Bump cycle to begin group
+
+...
+---
+name: foo
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: qqpr }
+ - { id: 2, class: dpr }
+ - { id: 3, class: dpr }
+ - { id: 4, class: dpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
+liveins:
+ - { reg: '%r0', virtual-reg: '%0' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %r0
+
+ %0 = COPY %r0
+ %1 = VLD4d8Pseudo %0, 8, 14, _ :: (load 32 from %ir.A, align 8)
+ %4 = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, _
+ %5, %6 = VMOVRRD %4, 14, _
+ %r0 = COPY %5
+ %r1 = COPY %6
+ BX_RET 14, _, implicit %r0, implicit killed %r1
+
+...
diff --git a/test/CodeGen/ARM/sjljeh-swifterror.ll b/test/CodeGen/ARM/sjljeh-swifterror.ll
new file mode 100644
index 000000000000..aae0e75c98af
--- /dev/null
+++ b/test/CodeGen/ARM/sjljeh-swifterror.ll
@@ -0,0 +1,27 @@
+; RUN: opt -sjljehprepare -verify < %s | FileCheck %s
+target datalayout = "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+target triple = "armv7s-apple-ios7.0"
+
+%swift.error = type opaque
+
+declare void @objc_msgSend() local_unnamed_addr
+
+declare i32 @__objc_personality_v0(...)
+
+; Make sure we don't leave a select on a swifterror argument.
+; CHECK-LABEL; @test
+; CHECK-NOT: select true, %0
+define swiftcc void @test(%swift.error** swifterror) local_unnamed_addr personality i32 (...)* @__objc_personality_v0 {
+entry:
+ %call28.i = invoke i32 bitcast (void ()* @objc_msgSend to i32 (i8*, i8*)*)(i8* undef, i8* undef)
+ to label %invoke.cont.i unwind label %lpad.i
+
+invoke.cont.i:
+ unreachable
+
+lpad.i:
+ %1 = landingpad { i8*, i32 }
+ cleanup
+ resume { i8*, i32 } undef
+}
+
diff --git a/test/CodeGen/ARM/smml.ll b/test/CodeGen/ARM/smml.ll
index aa093192f2b2..4788644cf195 100644
--- a/test/CodeGen/ARM/smml.ll
+++ b/test/CodeGen/ARM/smml.ll
@@ -1,20 +1,15 @@
-; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
-; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V6
-; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s -check-prefix=CHECK-THUMB
-; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK-THUMB
-; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK-THUMBV6T2
-; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-THUMBV7
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V4
+; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6
+; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMB
+; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6
+; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
+; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
define i32 @Test0(i32 %a, i32 %b, i32 %c) nounwind readnone ssp {
entry:
; CHECK-LABEL: Test0
; CHECK-NOT: smmls
-; CHECK-V6-NOT: smmls
-; CHECK-V7-NOT: smmls
-; CHECK_THUMB-NOT: smmls
-; CHECK-THUMBV6T2-NOT: smmls
-; CHECK-THUMBV7-NOT: smmls
%conv4 = zext i32 %a to i64
%conv1 = sext i32 %b to i64
%conv2 = sext i32 %c to i64
@@ -27,12 +22,11 @@ entry:
define i32 @Test1(i32 %a, i32 %b, i32 %c) {
;CHECK-LABEL: Test1
-;CHECK-NOT: smmls
+;CHECK-V4-NOT: smmls
;CHECK-THUMB-NOT: smmls
+;CHECK-THUMBV6-NOT: smmls
;CHECK-V6: smmls r0, [[Rn:r[1-2]]], [[Rm:r[1-2]]], r0
-;CHECK-V7: smmls r0, [[Rn:r[1-2]]], [[Rm:r[1-2]]], r0
;CHECK-THUMBV6T2: smmls r0, [[Rn:r[1-2]]], [[Rm:r[1-2]]], r0
-;CHECK-THUMBV7: smmls r0, [[Rn:r[1-2]]], [[Rm:r[1-2]]], r0
entry:
%conv = sext i32 %b to i64
%conv1 = sext i32 %c to i64
@@ -47,10 +41,21 @@ entry:
declare void @opaque(i32)
define void @test_used_flags(i32 %in1, i32 %in2) {
-; CHECK-V7-LABEL: test_used_flags:
-; CHECK-V7: smull [[PROD_LO:r[0-9]+]], [[PROD_HI:r[0-9]+]], r0, r1
-; CHECK-V7: rsbs {{.*}}, [[PROD_LO]], #0
-; CHECK-V7: rscs {{.*}}, [[PROD_HI]], #0
+; CHECK-LABEL: test_used_flags:
+; CHECK-THUMB: cmp r1, #0
+; CHECK-THUMB: push {r2}
+; CHECK-THUMB: pop {r3}
+; CHECK-THUMB: ble
+; CHECK-THUMBV6: cmp r1, #0
+; CHECK-THUMBV6: mov r3, r2
+; CHECK-THUMBV6: ble
+; CHECK-V6: smull [[PROD_LO:r[0-9]+]], [[PROD_HI:r[0-9]+]], r0, r1
+; CHECK-V6: rsbs {{.*}}, [[PROD_LO]], #0
+; CHECK-V6: rscs {{.*}}, [[PROD_HI]], #0
+; CHECK-THUMBV6T2: smull [[PROD_LO:r[0-9]+]], [[PROD_HI:r[0-9]+]], r0, r1
+; CHECK-THUMBV6T2: movs [[ZERO:r[0-9]+]], #0
+; CHECK-THUMBV6T2: rsbs {{.*}}, [[PROD_LO]], #0
+; CHECK-THUMBV6T2: sbcs.w {{.*}}, [[ZERO]], [[PROD_HI]]
%in1.64 = sext i32 %in1 to i64
%in2.64 = sext i32 %in2 to i64
%mul = mul nsw i64 %in1.64, %in2.64
diff --git a/test/CodeGen/ARM/smul.ll b/test/CodeGen/ARM/smul.ll
index 3c187aa846d5..2b7be41ddb24 100644
--- a/test/CodeGen/ARM/smul.ll
+++ b/test/CodeGen/ARM/smul.ll
@@ -262,3 +262,32 @@ define i32 @f21(i32 %a, i32 %x, i16 %y) {
%tmp5 = add i32 %a, %tmp4
ret i32 %tmp5
}
+
+@global_b = external global i16, align 2
+
+define i32 @f22(i32 %a) {
+; CHECK-LABEL: f22:
+; CHECK: smulwb r0, r0, r1
+; CHECK-THUMBV6-NOT: smulwb
+ %b = load i16, i16* @global_b, align 2
+ %sext = sext i16 %b to i64
+ %conv = sext i32 %a to i64
+ %mul = mul nsw i64 %sext, %conv
+ %shr37 = lshr i64 %mul, 16
+ %conv4 = trunc i64 %shr37 to i32
+ ret i32 %conv4
+}
+
+define i32 @f23(i32 %a, i32 %c) {
+; CHECK-LABEL: f23:
+; CHECK: smlawb r0, r0, r2, r1
+; CHECK-THUMBV6-NOT: smlawb
+ %b = load i16, i16* @global_b, align 2
+ %sext = sext i16 %b to i64
+ %conv = sext i32 %a to i64
+ %mul = mul nsw i64 %sext, %conv
+ %shr49 = lshr i64 %mul, 16
+ %conv5 = trunc i64 %shr49 to i32
+ %add = add nsw i32 %conv5, %c
+ ret i32 %add
+}
diff --git a/test/CodeGen/ARM/softfp-fabs-fneg.ll b/test/CodeGen/ARM/softfp-fabs-fneg.ll
index b608fb840218..b7c684d35b57 100644
--- a/test/CodeGen/ARM/softfp-fabs-fneg.ll
+++ b/test/CodeGen/ARM/softfp-fabs-fneg.ll
@@ -14,8 +14,7 @@ define double @f(double %a) {
define float @g(float %a) {
; CHECK-LABEL: g:
- ; CHECK-THUMB: bic r0, r0, #-2147483648
- ; CHECK-ARM: bfc r0, #31, #1
+ ; CHECK: bic r0, r0, #-2147483648
; CHECK-NEXT: bx lr
%x = call float @llvm.fabs.f32(float %a) readnone
ret float %x
diff --git a/test/CodeGen/ARM/special-reg-mcore.ll b/test/CodeGen/ARM/special-reg-mcore.ll
index 45e6db9e78fe..1ecf8dc77a70 100644
--- a/test/CodeGen/ARM/special-reg-mcore.ll
+++ b/test/CodeGen/ARM/special-reg-mcore.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m4 2>&1 | FileCheck %s --check-prefix=MCORE
+; RUN: llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m4 --show-mc-encoding 2>&1 | FileCheck %s --check-prefix=MCORE
; RUN: not llc < %s -mtriple=thumb-none-eabi -mcpu=cortex-m3 2>&1 | FileCheck %s --check-prefix=M3CORE
; RUN: not llc < %s -mtriple=arm-none-eabi -mcpu=cortex-a8 2>&1 | FileCheck %s --check-prefix=ACORE
@@ -8,20 +8,20 @@
define i32 @read_mclass_registers() nounwind {
entry:
; MCORE-LABEL: read_mclass_registers:
- ; MCORE: mrs r0, apsr
- ; MCORE: mrs r1, iapsr
- ; MCORE: mrs r1, eapsr
- ; MCORE: mrs r1, xpsr
- ; MCORE: mrs r1, ipsr
- ; MCORE: mrs r1, epsr
- ; MCORE: mrs r1, iepsr
- ; MCORE: mrs r1, msp
- ; MCORE: mrs r1, psp
- ; MCORE: mrs r1, primask
- ; MCORE: mrs r1, basepri
- ; MCORE: mrs r1, basepri_max
- ; MCORE: mrs r1, faultmask
- ; MCORE: mrs r1, control
+ ; MCORE: mrs r0, apsr @ encoding: [0xef,0xf3,0x00,0x80]
+ ; MCORE: mrs r1, iapsr @ encoding: [0xef,0xf3,0x01,0x81]
+ ; MCORE: mrs r1, eapsr @ encoding: [0xef,0xf3,0x02,0x81]
+ ; MCORE: mrs r1, xpsr @ encoding: [0xef,0xf3,0x03,0x81]
+ ; MCORE: mrs r1, ipsr @ encoding: [0xef,0xf3,0x05,0x81]
+ ; MCORE: mrs r1, epsr @ encoding: [0xef,0xf3,0x06,0x81]
+ ; MCORE: mrs r1, iepsr @ encoding: [0xef,0xf3,0x07,0x81]
+ ; MCORE: mrs r1, msp @ encoding: [0xef,0xf3,0x08,0x81]
+ ; MCORE: mrs r1, psp @ encoding: [0xef,0xf3,0x09,0x81]
+ ; MCORE: mrs r1, primask @ encoding: [0xef,0xf3,0x10,0x81]
+ ; MCORE: mrs r1, basepri @ encoding: [0xef,0xf3,0x11,0x81]
+ ; MCORE: mrs r1, basepri_max @ encoding: [0xef,0xf3,0x12,0x81]
+ ; MCORE: mrs r1, faultmask @ encoding: [0xef,0xf3,0x13,0x81]
+ ; MCORE: mrs r1, control @ encoding: [0xef,0xf3,0x14,0x81]
%0 = call i32 @llvm.read_register.i32(metadata !0)
%1 = call i32 @llvm.read_register.i32(metadata !4)
@@ -56,32 +56,32 @@ entry:
define void @write_mclass_registers(i32 %x) nounwind {
entry:
; MCORE-LABEL: write_mclass_registers:
- ; MCORE: msr apsr_nzcvqg, r0
- ; MCORE: msr apsr_nzcvq, r0
- ; MCORE: msr apsr_g, r0
- ; MCORE: msr apsr_nzcvqg, r0
- ; MCORE: msr iapsr_nzcvqg, r0
- ; MCORE: msr iapsr_nzcvq, r0
- ; MCORE: msr iapsr_g, r0
- ; MCORE: msr iapsr_nzcvqg, r0
- ; MCORE: msr eapsr_nzcvqg, r0
- ; MCORE: msr eapsr_nzcvq, r0
- ; MCORE: msr eapsr_g, r0
- ; MCORE: msr eapsr_nzcvqg, r0
- ; MCORE: msr xpsr_nzcvqg, r0
- ; MCORE: msr xpsr_nzcvq, r0
- ; MCORE: msr xpsr_g, r0
- ; MCORE: msr xpsr_nzcvqg, r0
- ; MCORE: msr ipsr, r0
- ; MCORE: msr epsr, r0
- ; MCORE: msr iepsr, r0
- ; MCORE: msr msp, r0
- ; MCORE: msr psp, r0
- ; MCORE: msr primask, r0
- ; MCORE: msr basepri, r0
- ; MCORE: msr basepri_max, r0
- ; MCORE: msr faultmask, r0
- ; MCORE: msr control, r0
+ ; MCORE: msr apsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x00,0x88]
+ ; MCORE: msr apsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x00,0x88]
+ ; MCORE: msr apsr_g, r0 @ encoding: [0x80,0xf3,0x00,0x84]
+ ; MCORE: msr apsr_nzcvqg, r0 @ encoding: [0x80,0xf3,0x00,0x8c]
+ ; MCORE: msr iapsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x01,0x88]
+ ; MCORE: msr iapsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x01,0x88]
+ ; MCORE: msr iapsr_g, r0 @ encoding: [0x80,0xf3,0x01,0x84]
+ ; MCORE: msr iapsr_nzcvqg, r0 @ encoding: [0x80,0xf3,0x01,0x8c]
+ ; MCORE: msr eapsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x02,0x88]
+ ; MCORE: msr eapsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x02,0x88]
+ ; MCORE: msr eapsr_g, r0 @ encoding: [0x80,0xf3,0x02,0x84]
+ ; MCORE: msr eapsr_nzcvqg, r0 @ encoding: [0x80,0xf3,0x02,0x8c]
+ ; MCORE: msr xpsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x03,0x88]
+ ; MCORE: msr xpsr_nzcvq, r0 @ encoding: [0x80,0xf3,0x03,0x88]
+ ; MCORE: msr xpsr_g, r0 @ encoding: [0x80,0xf3,0x03,0x84]
+ ; MCORE: msr xpsr_nzcvqg, r0 @ encoding: [0x80,0xf3,0x03,0x8c]
+ ; MCORE: msr ipsr, r0 @ encoding: [0x80,0xf3,0x05,0x88]
+ ; MCORE: msr epsr, r0 @ encoding: [0x80,0xf3,0x06,0x88]
+ ; MCORE: msr iepsr, r0 @ encoding: [0x80,0xf3,0x07,0x88]
+ ; MCORE: msr msp, r0 @ encoding: [0x80,0xf3,0x08,0x88]
+ ; MCORE: msr psp, r0 @ encoding: [0x80,0xf3,0x09,0x88]
+ ; MCORE: msr primask, r0 @ encoding: [0x80,0xf3,0x10,0x88]
+ ; MCORE: msr basepri, r0 @ encoding: [0x80,0xf3,0x11,0x88]
+ ; MCORE: msr basepri_max, r0 @ encoding: [0x80,0xf3,0x12,0x88]
+ ; MCORE: msr faultmask, r0 @ encoding: [0x80,0xf3,0x13,0x88]
+ ; MCORE: msr control, r0 @ encoding: [0x80,0xf3,0x14,0x88]
call void @llvm.write_register.i32(metadata !0, i32 %x)
call void @llvm.write_register.i32(metadata !1, i32 %x)
diff --git a/test/CodeGen/ARM/special-reg-v8m-main.ll b/test/CodeGen/ARM/special-reg-v8m-main.ll
index cde296c6b218..ea9c01487d85 100644
--- a/test/CodeGen/ARM/special-reg-v8m-main.ll
+++ b/test/CodeGen/ARM/special-reg-v8m-main.ll
@@ -90,19 +90,19 @@ entry:
define void @write_mclass_registers(i32 %x) nounwind {
entry:
; MAINLINE-LABEL: write_mclass_registers:
- ; MAINLINE: msr apsr_nzcvqg, r0
+ ; MAINLINE: msr apsr_nzcvq, r0
; MAINLINE: msr apsr_nzcvq, r0
; MAINLINE: msr apsr_g, r0
; MAINLINE: msr apsr_nzcvqg, r0
- ; MAINLINE: msr iapsr_nzcvqg, r0
+ ; MAINLINE: msr iapsr_nzcvq, r0
; MAINLINE: msr iapsr_nzcvq, r0
; MAINLINE: msr iapsr_g, r0
; MAINLINE: msr iapsr_nzcvqg, r0
- ; MAINLINE: msr eapsr_nzcvqg, r0
+ ; MAINLINE: msr eapsr_nzcvq, r0
; MAINLINE: msr eapsr_nzcvq, r0
; MAINLINE: msr eapsr_g, r0
; MAINLINE: msr eapsr_nzcvqg, r0
- ; MAINLINE: msr xpsr_nzcvqg, r0
+ ; MAINLINE: msr xpsr_nzcvq, r0
; MAINLINE: msr xpsr_nzcvq, r0
; MAINLINE: msr xpsr_g, r0
; MAINLINE: msr xpsr_nzcvqg, r0
diff --git a/test/CodeGen/ARM/stack_guard_remat.ll b/test/CodeGen/ARM/stack_guard_remat.ll
index 99d499498450..9b5677608d26 100644
--- a/test/CodeGen/ARM/stack_guard_remat.ll
+++ b/test/CodeGen/ARM/stack_guard_remat.ll
@@ -51,20 +51,20 @@
define i32 @test_stack_guard_remat() #0 {
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
- call void @llvm.lifetime.start(i64 1024, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %1)
%2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
- call void @llvm.lifetime.end(i64 1024, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %1)
ret i32 0
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/static-addr-hoisting.ll b/test/CodeGen/ARM/static-addr-hoisting.ll
index 3d47e02f965e..683d607936b8 100644
--- a/test/CodeGen/ARM/static-addr-hoisting.ll
+++ b/test/CodeGen/ARM/static-addr-hoisting.ll
@@ -6,9 +6,9 @@ define void @multiple_store() {
; CHECK: movs [[VAL:r[0-9]+]], #42
; CHECK: movt r[[BASE1]], #15
-; CHECK: str [[VAL]], [r[[BASE1]]]
-; CHECK: str [[VAL]], [r[[BASE1]], #24]
-; CHECK: str.w [[VAL]], [r[[BASE1]], #42]
+; CHECK-DAG: str [[VAL]], [r[[BASE1]]]
+; CHECK-DAG: str [[VAL]], [r[[BASE1]], #24]
+; CHECK-DAG: str.w [[VAL]], [r[[BASE1]], #42]
; CHECK: movw r[[BASE2:[0-9]+]], #20394
; CHECK: movt r[[BASE2]], #18
diff --git a/test/CodeGen/ARM/tail-opts.ll b/test/CodeGen/ARM/tail-opts.ll
index 37e9a4af3be5..475b80b3bb07 100644
--- a/test/CodeGen/ARM/tail-opts.ll
+++ b/test/CodeGen/ARM/tail-opts.ll
@@ -65,3 +65,55 @@ altret:
call void @far(i32 1001)
ret void
}
+
+; Use alternating abort functions so that the blocks we wish to merge are not
+; layout successors during branch folding.
+
+; CHECK-LABEL: merge_alternating_aborts:
+; CHECK-NOT: _abort
+; CHECK-NOT: _alt_abort
+; CHECK: bxne lr
+; CHECK-NOT: _abort
+; CHECK-NOT: _alt_abort
+; CHECK: LBB{{.*}}:
+; CHECK: mov lr, pc
+; CHECK: b _alt_abort
+; CHECK-NOT: _abort
+; CHECK-NOT: _alt_abort
+; CHECK: LBB{{.*}}:
+; CHECK: mov lr, pc
+; CHECK: b _abort
+; CHECK-NOT: _abort
+; CHECK-NOT: _alt_abort
+
+declare void @abort()
+declare void @alt_abort()
+
+define void @merge_alternating_aborts() {
+entry:
+ %c1 = call i1 @qux()
+ br i1 %c1, label %cont1, label %abort1
+abort1:
+ call void @abort()
+ unreachable
+cont1:
+ %c2 = call i1 @qux()
+ br i1 %c2, label %cont2, label %abort2
+abort2:
+ call void @alt_abort()
+ unreachable
+cont2:
+ %c3 = call i1 @qux()
+ br i1 %c3, label %cont3, label %abort3
+abort3:
+ call void @abort()
+ unreachable
+cont3:
+ %c4 = call i1 @qux()
+ br i1 %c4, label %cont4, label %abort4
+abort4:
+ call void @alt_abort()
+ unreachable
+cont4:
+ ret void
+}
diff --git a/test/CodeGen/ARM/thumb1-div.ll b/test/CodeGen/ARM/thumb1-div.ll
new file mode 100644
index 000000000000..844dfe6f963c
--- /dev/null
+++ b/test/CodeGen/ARM/thumb1-div.ll
@@ -0,0 +1,67 @@
+; RUN: llc < %s -mtriple=arm-none-eabi -mcpu=cortex-m23 -march=thumb | \
+; RUN: FileCheck %s -check-prefix=CHECK
+
+define i32 @f1(i32 %a, i32 %b) {
+entry:
+; CHECK-LABEL: f1
+
+; CHECK: sdiv
+ %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp1
+}
+
+define i32 @f2(i32 %a, i32 %b) {
+entry:
+; CHECK-LABEL: f2
+; CHECK: udiv
+ %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp1
+}
+
+define i32 @f3(i32 %a, i32 %b) {
+entry:
+; CHECK-LABEL: f3
+
+
+ %tmp1 = srem i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp1
+; CHECK: sdiv
+; CHECK-NEXT: muls
+; CHECK-NEXT: subs
+}
+
+define i32 @f4(i32 %a, i32 %b) {
+entry:
+; CHECK-LABEL: f4
+
+; CHECK: udiv
+; CHECK-NEXT: muls
+; CHECK-NEXT: subs
+ %tmp1 = urem i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp1
+}
+
+
+define i64 @f5(i64 %a, i64 %b) {
+entry:
+; CHECK-LABEL: f5
+
+; EABI MODE = Remainder in R2-R3, quotient in R0-R1
+; CHECK: __aeabi_ldivmod
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+ %tmp1 = srem i64 %a, %b ; <i64> [#uses=1]
+ ret i64 %tmp1
+}
+
+define i64 @f6(i64 %a, i64 %b) {
+entry:
+; CHECK-LABEL: f6
+
+; EABI MODE = Remainder in R2-R3, quotient in R0-R1
+; CHECK: __aeabi_uldivmod
+; CHECK: mov r0, r2
+; CHECK: mov r1, r3
+ %tmp1 = urem i64 %a, %b ; <i64> [#uses=1]
+ ret i64 %tmp1
+}
diff --git a/test/CodeGen/ARM/unschedule-first-call.ll b/test/CodeGen/ARM/unschedule-first-call.ll
new file mode 100644
index 000000000000..4a218afcc5e1
--- /dev/null
+++ b/test/CodeGen/ARM/unschedule-first-call.ll
@@ -0,0 +1,136 @@
+; RUN: llc < %s
+; PR30911
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv6kz--linux-gnueabihf"
+
+; Function Attrs: nounwind
+define void @dradbg(i32, i32, float*, float*, float*, float*, float*) #0 {
+ br i1 undef, label %.critedge, label %8
+
+.critedge: ; preds = %7
+ %.mux2 = select i1 undef, i1 undef, i1 true
+ br label %8
+
+; <label>:8: ; preds = %.critedge, %7
+ %9 = getelementptr float, float* %3, i64 undef
+ %10 = ptrtoint float* %9 to i32
+ %11 = icmp ule i32 %10, undef
+ %12 = getelementptr float, float* %5, i64 undef
+ %13 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
+ %14 = extractvalue { i64, i1 } %13, 0
+ %15 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %14, i64 1)
+ %16 = extractvalue { i64, i1 } %15, 0
+ %17 = icmp slt i64 1, %16
+ %18 = select i1 %17, i64 1, i64 %16
+ %19 = sext i32 %1 to i64
+ %20 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %18, i64 %19)
+ %21 = extractvalue { i64, i1 } %20, 0
+ %22 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %21, i64 0)
+ %23 = extractvalue { i64, i1 } %22, 0
+ %24 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %23, i64 undef)
+ %25 = extractvalue { i64, i1 } %24, 0
+ %26 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %25, i64 0)
+ %27 = extractvalue { i64, i1 } %26, 0
+ %28 = getelementptr float, float* %3, i64 %27
+ %29 = ptrtoint float* %12 to i32
+ %30 = ptrtoint float* %28 to i32
+ %31 = icmp ule i32 %29, %30
+ %32 = or i1 %11, %31
+ %33 = and i1 false, %32
+ %34 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 0, i64 undef)
+ %35 = extractvalue { i64, i1 } %34, 0
+ %36 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %35, i64 1)
+ %37 = extractvalue { i64, i1 } %36, 0
+ %38 = icmp slt i64 1, %37
+ %39 = select i1 %38, i64 1, i64 %37
+ %40 = sext i32 %1 to i64
+ %41 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %39, i64 %40)
+ %42 = extractvalue { i64, i1 } %41, 0
+ %43 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %42, i64 0)
+ %44 = extractvalue { i64, i1 } %43, 0
+ %45 = sext i32 %0 to i64
+ %46 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %44, i64 %45)
+ %47 = extractvalue { i64, i1 } %46, 0
+ %48 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %47, i64 0)
+ %49 = extractvalue { i64, i1 } %48, 0
+ %50 = getelementptr float, float* %5, i64 %49
+ %51 = ptrtoint float* %50 to i32
+ %52 = icmp ule i32 undef, %51
+ %53 = getelementptr float, float* %4, i64 undef
+ %54 = ptrtoint float* %53 to i32
+ %55 = icmp ule i32 undef, %54
+ %56 = or i1 %52, %55
+ %57 = and i1 %33, %56
+ %58 = getelementptr float, float* %2, i64 undef
+ %59 = ptrtoint float* %58 to i32
+ %60 = icmp ule i32 %59, undef
+ %61 = select i1 undef, i64 undef, i64 0
+ %62 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %61, i64 undef)
+ %63 = extractvalue { i64, i1 } %62, 0
+ %64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 1)
+ %65 = extractvalue { i64, i1 } %64, 0
+ %66 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %63, i64 %65)
+ %67 = extractvalue { i64, i1 } %66, 0
+ %68 = sext i32 %0 to i64
+ %69 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %67, i64 %68)
+ %70 = extractvalue { i64, i1 } %69, 0
+ %71 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %70, i64 0)
+ %72 = extractvalue { i64, i1 } %71, 0
+ %73 = getelementptr float, float* %5, i64 %72
+ %74 = ptrtoint float* %73 to i32
+ %75 = icmp ule i32 %74, undef
+ %76 = or i1 %60, %75
+ %77 = and i1 %57, %76
+ %78 = getelementptr float, float* %6, i64 undef
+ %79 = ptrtoint float* %78 to i32
+ %80 = icmp ule i32 %79, undef
+ %81 = getelementptr float, float* %5, i64 undef
+ %82 = ptrtoint float* %81 to i32
+ %83 = icmp ule i32 %82, undef
+ %84 = or i1 %80, %83
+ %85 = and i1 %77, %84
+ %86 = and i1 %85, undef
+ %87 = and i1 %86, undef
+ %88 = and i1 %87, undef
+ %89 = and i1 %88, undef
+ %90 = and i1 %89, undef
+ %91 = and i1 %90, undef
+ %92 = and i1 %91, undef
+ %93 = and i1 %92, undef
+ %94 = and i1 %93, undef
+ %95 = and i1 %94, undef
+ br i1 %95, label %97, label %96
+
+; <label>:96: ; preds = %8
+ br i1 undef, label %.critedge122, label %.critedge110
+
+.critedge122: ; preds = %.critedge122, %96
+ br i1 false, label %.critedge122, label %.critedge110
+
+.critedge110: ; preds = %.critedge219, %97, %.critedge122, %96
+ ret void
+
+; <label>:97: ; preds = %8
+ br i1 undef, label %.critedge219, label %.critedge110
+
+.critedge219: ; preds = %.critedge219, %97
+ %.pr287 = phi i1 [ undef, %.critedge219 ], [ true, %97 ]
+ br i1 %.pr287, label %.critedge219, label %.critedge110
+}
+
+; Function Attrs: nounwind readnone
+declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) #1
+
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-cpu"="arm1176jzf-s" "target-features"="+dsp,+strict-align,+vfp2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 4.0.0 (trunk 285923) (llvm/trunk 285921)"}
diff --git a/test/CodeGen/ARM/v6-jumptable-clobber.mir b/test/CodeGen/ARM/v6-jumptable-clobber.mir
new file mode 100644
index 000000000000..0e9bc42565f3
--- /dev/null
+++ b/test/CodeGen/ARM/v6-jumptable-clobber.mir
@@ -0,0 +1,384 @@
+# RUN: llc -run-pass=arm-cp-islands -o - %s | FileCheck %s
+
+# Test created by tweaking the register allocation after stopping the IR below
+# just before constant islands. We were forwarding the table index to the end of
+# the block, even though the LEA clobbered it.
+
+# CHECK-LABEL: name: foo
+# CHECK: tBR_JT
+ # This order is important. If the jump-table comes first then the
+ # transformation is valid because the LEA can be removed, see second test.
+# CHECK: CONSTPOOL_ENTRY
+# CHECK: JUMPTABLE_ADDRS
+
+# CHECK-LABEL: name: bar
+# CHECK: tTBB_JT %pc, killed %r1
+
+--- |
+ ; ModuleID = 'simple.ll'
+ source_filename = "simple.ll"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv6m-none--eabi"
+
+ define void @foo(i8 %in, i32* %addr) {
+ store i32 12345678, i32* %addr
+ %1 = call i32 @llvm.arm.space(i32 980, i32 undef)
+ %2 = zext i8 %in to i32
+ switch i32 %2, label %default [
+ i32 0, label %d1
+ i32 1, label %d2
+ i32 3, label %d3
+ i32 4, label %d4
+ i32 5, label %d5
+ i32 6, label %d6
+ i32 7, label %d7
+ i32 2, label %d8
+ i32 8, label %d9
+ i32 9, label %d10
+ i32 19, label %d11
+ i32 20, label %d12
+ i32 21, label %d13
+ i32 22, label %d14
+ i32 24, label %d15
+ i32 25, label %d16
+ i32 26, label %d17
+ ]
+
+ default: ; preds = %0
+ unreachable
+
+ d1: ; preds = %0
+ unreachable
+
+ d2: ; preds = %0
+ unreachable
+
+ d3: ; preds = %0
+ unreachable
+
+ d4: ; preds = %0
+ unreachable
+
+ d5: ; preds = %0
+ unreachable
+
+ d6: ; preds = %0
+ unreachable
+
+ d7: ; preds = %0
+ unreachable
+
+ d8: ; preds = %0
+ unreachable
+
+ d9: ; preds = %0
+ unreachable
+
+ d10: ; preds = %0
+ unreachable
+
+ d11: ; preds = %0
+ unreachable
+
+ d12: ; preds = %0
+ unreachable
+
+ d13: ; preds = %0
+ unreachable
+
+ d14: ; preds = %0
+ unreachable
+
+ d15: ; preds = %0
+ unreachable
+
+ d16: ; preds = %0
+ unreachable
+
+ d17: ; preds = %0
+ unreachable
+ }
+
+ define void @bar(i8 %in, i32* %addr) {
+ store i32 12345678, i32* %addr
+ %1 = zext i8 %in to i32
+ switch i32 %1, label %default [
+ i32 0, label %d1
+ i32 1, label %d2
+ i32 3, label %d3
+ i32 4, label %d4
+ i32 5, label %d5
+ i32 6, label %d6
+ i32 7, label %d7
+ i32 2, label %d8
+ i32 8, label %d9
+ i32 9, label %d10
+ i32 19, label %d11
+ i32 20, label %d12
+ i32 21, label %d13
+ i32 22, label %d14
+ i32 24, label %d15
+ i32 25, label %d16
+ i32 26, label %d17
+ ]
+
+ default: ; preds = %0
+ unreachable
+
+ d1: ; preds = %0
+ unreachable
+
+ d2: ; preds = %0
+ unreachable
+
+ d3: ; preds = %0
+ unreachable
+
+ d4: ; preds = %0
+ unreachable
+
+ d5: ; preds = %0
+ unreachable
+
+ d6: ; preds = %0
+ unreachable
+
+ d7: ; preds = %0
+ unreachable
+
+ d8: ; preds = %0
+ unreachable
+
+ d9: ; preds = %0
+ unreachable
+
+ d10: ; preds = %0
+ unreachable
+
+ d11: ; preds = %0
+ unreachable
+
+ d12: ; preds = %0
+ unreachable
+
+ d13: ; preds = %0
+ unreachable
+
+ d14: ; preds = %0
+ unreachable
+
+ d15: ; preds = %0
+ unreachable
+
+ d16: ; preds = %0
+ unreachable
+
+ d17: ; preds = %0
+ unreachable
+ }
+
+ ; Function Attrs: nounwind
+ declare i32 @llvm.arm.space(i32, i32) #0
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #0
+
+ attributes #0 = { nounwind }
+
+...
+---
+name: foo
+alignment: 1
+exposesReturnsTwice: false
+noVRegs: true
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0' }
+ - { reg: '%r1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+constants:
+ - id: 0
+ value: i32 12345678
+ alignment: 4
+jumpTable:
+ kind: inline
+ entries:
+ - id: 0
+ blocks: [ '%bb.3.d2', '%bb.9.d8', '%bb.4.d3', '%bb.5.d4',
+ '%bb.6.d5', '%bb.7.d6', '%bb.8.d7', '%bb.10.d9',
+ '%bb.11.d10', '%bb.2.d1', '%bb.2.d1', '%bb.2.d1',
+ '%bb.2.d1', '%bb.2.d1', '%bb.2.d1', '%bb.2.d1',
+ '%bb.2.d1', '%bb.2.d1', '%bb.12.d11', '%bb.13.d12',
+ '%bb.14.d13', '%bb.15.d14', '%bb.2.d1', '%bb.16.d15',
+ '%bb.17.d16', '%bb.18.d17' ]
+body: |
+ bb.0 (%ir-block.0):
+ successors: %bb.2.d1(0x03c3c3c4), %bb.1(0x7c3c3c3c)
+ liveins: %r0, %r1
+
+ %r2 = tLDRpci %const.0, 14, _
+ tSTRi killed %r2, killed %r1, 0, 14, _ :: (store 4 into %ir.addr)
+ dead %r1 = SPACE 980, undef %r0
+ %r0 = tUXTB killed %r0, 14, _
+ %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, _
+ tCMPi8 %r1, 25, 14, _, implicit-def %cpsr
+ tBcc %bb.2.d1, 8, killed %cpsr
+
+ bb.1 (%ir-block.0):
+ successors: %bb.3.d2(0x07c549d2), %bb.9.d8(0x07c549d2), %bb.4.d3(0x07c549d2), %bb.5.d4(0x07c549d2), %bb.6.d5(0x07c549d2), %bb.7.d6(0x07c549d2), %bb.8.d7(0x07c549d2), %bb.10.d9(0x07c549d2), %bb.11.d10(0x07c549d2), %bb.2.d1(0x03ab62db), %bb.12.d11(0x07c549d2), %bb.13.d12(0x07c549d2), %bb.14.d13(0x07c549d2), %bb.15.d14(0x07c549d2), %bb.16.d15(0x07c549d2), %bb.17.d16(0x07c549d2), %bb.18.d17(0x07c549d2)
+ liveins: %r1
+
+ %r0, dead %cpsr = tLSLri killed %r1, 2, 14, _
+ %r1 = tLEApcrelJT %jump-table.0, 14, _
+ %r0 = tLDRr killed %r0, killed %r1, 14, _ :: (load 4 from jump-table)
+ tBR_JTr killed %r0, %jump-table.0
+
+ bb.3.d2:
+
+ bb.9.d8:
+
+ bb.4.d3:
+
+ bb.5.d4:
+
+ bb.6.d5:
+
+ bb.7.d6:
+
+ bb.8.d7:
+
+ bb.10.d9:
+
+ bb.11.d10:
+
+ bb.2.d1:
+
+ bb.12.d11:
+
+ bb.13.d12:
+
+ bb.14.d13:
+
+ bb.15.d14:
+
+ bb.16.d15:
+
+ bb.17.d16:
+
+ bb.18.d17:
+
+...
+
+---
+name: bar
+alignment: 1
+exposesReturnsTwice: false
+noVRegs: true
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0' }
+ - { reg: '%r1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+constants:
+ - id: 0
+ value: i32 12345678
+ alignment: 4
+jumpTable:
+ kind: inline
+ entries:
+ - id: 0
+ blocks: [ '%bb.3.d2', '%bb.9.d8', '%bb.4.d3', '%bb.5.d4',
+ '%bb.6.d5', '%bb.7.d6', '%bb.8.d7', '%bb.10.d9',
+ '%bb.11.d10', '%bb.2.d1', '%bb.2.d1', '%bb.2.d1',
+ '%bb.2.d1', '%bb.2.d1', '%bb.2.d1', '%bb.2.d1',
+ '%bb.2.d1', '%bb.2.d1', '%bb.12.d11', '%bb.13.d12',
+ '%bb.14.d13', '%bb.15.d14', '%bb.2.d1', '%bb.16.d15',
+ '%bb.17.d16', '%bb.18.d17' ]
+body: |
+ bb.0 (%ir-block.0):
+ successors: %bb.2.d1(0x03c3c3c4), %bb.1(0x7c3c3c3c)
+ liveins: %r0, %r1
+
+ %r2 = tLDRpci %const.0, 14, _
+ tSTRi killed %r2, killed %r1, 0, 14, _ :: (store 4 into %ir.addr)
+ %r0 = tUXTB killed %r0, 14, _
+ %r1, dead %cpsr = tSUBi3 killed %r0, 1, 14, _
+ tCMPi8 %r1, 25, 14, _, implicit-def %cpsr
+ tBcc %bb.2.d1, 8, killed %cpsr
+
+ bb.1 (%ir-block.0):
+ successors: %bb.3.d2(0x07c549d2), %bb.9.d8(0x07c549d2), %bb.4.d3(0x07c549d2), %bb.5.d4(0x07c549d2), %bb.6.d5(0x07c549d2), %bb.7.d6(0x07c549d2), %bb.8.d7(0x07c549d2), %bb.10.d9(0x07c549d2), %bb.11.d10(0x07c549d2), %bb.2.d1(0x03ab62db), %bb.12.d11(0x07c549d2), %bb.13.d12(0x07c549d2), %bb.14.d13(0x07c549d2), %bb.15.d14(0x07c549d2), %bb.16.d15(0x07c549d2), %bb.17.d16(0x07c549d2), %bb.18.d17(0x07c549d2)
+ liveins: %r1
+
+ %r0, dead %cpsr = tLSLri killed %r1, 2, 14, _
+ %r1 = tLEApcrelJT %jump-table.0, 14, _
+ %r0 = tLDRr killed %r0, killed %r1, 14, _ :: (load 4 from jump-table)
+ tBR_JTr killed %r0, %jump-table.0
+
+ bb.3.d2:
+
+ bb.9.d8:
+
+ bb.4.d3:
+
+ bb.5.d4:
+
+ bb.6.d5:
+
+ bb.7.d6:
+
+ bb.8.d7:
+
+ bb.10.d9:
+
+ bb.11.d10:
+
+ bb.2.d1:
+
+ bb.12.d11:
+
+ bb.13.d12:
+
+ bb.14.d13:
+
+ bb.15.d14:
+
+ bb.16.d15:
+
+ bb.17.d16:
+
+ bb.18.d17:
+
+...
diff --git a/test/CodeGen/ARM/v8m-tail-call.ll b/test/CodeGen/ARM/v8m-tail-call.ll
new file mode 100644
index 000000000000..2c2c795838ff
--- /dev/null
+++ b/test/CodeGen/ARM/v8m-tail-call.ll
@@ -0,0 +1,23 @@
+; RUN: llc %s -o - -mtriple=thumbv8m.base | FileCheck %s
+
+define void @test() {
+; CHECK-LABEL: test:
+entry:
+ %call = tail call i32 @foo()
+ %tail = tail call i32 @foo()
+ ret void
+; CHECK: bl foo
+; CHECK: bl foo
+; CHECK-NOT: b foo
+}
+
+define void @test2() {
+; CHECK-LABEL: test2:
+entry:
+ %tail = tail call i32 @foo()
+ ret void
+; CHECK: b foo
+; CHECK-NOT: bl foo
+}
+
+declare i32 @foo()
diff --git a/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll b/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll
new file mode 100644
index 000000000000..673e04687a10
--- /dev/null
+++ b/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll
@@ -0,0 +1,51 @@
+; RUN: llc -filetype=obj -o /dev/null < %s
+; RUN: llc -filetype=asm < %s | FileCheck %s
+
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+source_filename = "bugpoint-output-39ed676.bc"
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv8m.base-arm-none-eabi"
+
+@crc32_tab = external unnamed_addr global [256 x i32], align 4
+@g_566 = external global i32**, align 4
+
+define void @main() {
+entry:
+ %0 = load volatile i32**, i32*** @g_566, align 4
+ br label %func_16.exit.i.i.i
+
+lbl_1394.i.i.i.loopexit: ; preds = %for.cond14.preheader.us.i.i.i
+ unreachable
+
+func_16.exit.i.i.i: ; preds = %entry
+ br i1 undef, label %for.cond7.preheader.i.lr.ph.i.i, label %for.end476.i.i.i.loopexit
+
+for.cond7.preheader.i.lr.ph.i.i: ; preds = %func_16.exit.i.i.i
+ br i1 undef, label %for.end476.i.i.i.loopexit, label %for.cond7.preheader.i.i.preheader.i
+
+for.cond7.preheader.i.i.preheader.i: ; preds = %for.cond7.preheader.i.lr.ph.i.i
+ br label %for.cond14.preheader.us.i.i.i
+
+for.cond7.preheader.i.us.i.i: ; preds = %for.cond7.preheader.i.lr.ph.i.i
+ unreachable
+
+for.cond14.preheader.us.i.i.i: ; preds = %for.inc459.us.i.i.i, %for.cond7.preheader.i.i.preheader.i
+; CHECK: @ BB#4
+; CHECK-NEXT: .p2align 2
+ switch i4 undef, label %func_1.exit.loopexit [
+ i4 0, label %for.inc459.us.i.i.i
+ i4 -5, label %for.inc459.us.i.i.i
+ i4 2, label %lbl_1394.i.i.i.loopexit
+ i4 3, label %for.end476.i.i.i.loopexit
+ ]
+
+for.inc459.us.i.i.i: ; preds = %for.cond14.preheader.us.i.i.i, %for.cond14.preheader.us.i.i.i
+ br label %for.cond14.preheader.us.i.i.i
+
+for.end476.i.i.i.loopexit: ; preds = %for.cond14.preheader.us.i.i.i
+ unreachable
+
+func_1.exit.loopexit: ; preds = %for.cond14.preheader.us.i.i.i
+ %arrayidx.i63.i.i5252 = getelementptr inbounds [256 x i32], [256 x i32]* @crc32_tab, i32 0, i32 undef
+ unreachable
+}
diff --git a/test/CodeGen/ARM/va_arg.ll b/test/CodeGen/ARM/va_arg.ll
index d901a7461fc8..57470694b124 100644
--- a/test/CodeGen/ARM/va_arg.ll
+++ b/test/CodeGen/ARM/va_arg.ll
@@ -4,8 +4,8 @@
; CHECK-LABEL: test1:
; CHECK-NOT: bfc
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
-; CHECK: bfc [[REG]], #0, #3
-; CHECK-NOT: bfc
+; CHECK: bic {{(r[0-9]+)|(lr)}}, [[REG]], #7
+; CHECK-NOT: bic
define i64 @test1(i32 %i, ...) nounwind optsize {
entry:
@@ -20,8 +20,8 @@ entry:
; CHECK-LABEL: test2:
; CHECK-NOT: bfc
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
-; CHECK: bfc [[REG]], #0, #3
-; CHECK-NOT: bfc
+; CHECK: bic {{(r[0-9]+)|(lr)}}, [[REG]], #7
+; CHECK-NOT: bic
; CHECK: bx lr
define double @test2(i32 %a, i32* %b, ...) nounwind optsize {
diff --git a/test/CodeGen/ARM/vcmp-crash.ll b/test/CodeGen/ARM/vcmp-crash.ll
new file mode 100644
index 000000000000..2d3262be5849
--- /dev/null
+++ b/test/CodeGen/ARM/vcmp-crash.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mcpu=cortex-m4 < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv7em-none--eabi"
+
+; CHECK: vcmp.f32
+define double @f(double %a, double %b, double %c, float %d) {
+ %1 = fcmp oeq float %d, 0.0
+ %2 = select i1 %1, double %a, double %c
+ ret double %2
+}
diff --git a/test/CodeGen/ARM/vldm-liveness.ll b/test/CodeGen/ARM/vldm-liveness.ll
index e114e6970a32..63dc9d61ebcc 100644
--- a/test/CodeGen/ARM/vldm-liveness.ll
+++ b/test/CodeGen/ARM/vldm-liveness.ll
@@ -1,26 +1,13 @@
; RUN: llc -mtriple thumbv7-apple-ios -verify-machineinstrs -o - %s | FileCheck %s
-; ARM load store optimizer was dealing with a sequence like:
-; s1 = VLDRS [r0, 1], Q0<imp-def>
-; s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def>
-; s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def>
-; s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def>
+; Make sure we emit the loads in ascending order, and form a vldmia.
;
-; It decided to combine the {s0, s1} loads into a single instruction in the
-; third position. However, this leaves the instruction defining s3 with a stray
-; imp-use of Q0, which is undefined.
-;
-; The verifier catches this, so this test just makes sure that appropriate
-; liveness flags are added.
-;
-; I believe the change will be tested as long as the vldmia is not the first of
-; the loads. Earlier optimisations may perturb the output over time, but
-; fiddling the indices should be sufficient to restore the test.
+; See vldm-liveness.mir for the bug this file originally testing.
define arm_aapcs_vfpcc <4 x float> @foo(float* %ptr) {
; CHECK-LABEL: foo:
-; CHECK: vldr s3, [r0, #8]
; CHECK: vldmia r0, {s0, s1}
+; CHECK: vldr s3, [r0, #8]
; CHECK: vldr s2, [r0, #16]
%off0 = getelementptr float, float* %ptr, i32 0
%val0 = load float, float* %off0
diff --git a/test/CodeGen/ARM/vldm-liveness.mir b/test/CodeGen/ARM/vldm-liveness.mir
new file mode 100644
index 000000000000..a85a018a8b1a
--- /dev/null
+++ b/test/CodeGen/ARM/vldm-liveness.mir
@@ -0,0 +1,40 @@
+# RUN: llc -run-pass arm-ldst-opt -verify-machineinstrs %s -o - | FileCheck %s
+# ARM load store optimizer was dealing with a sequence like:
+# s1 = VLDRS [r0, 1], Q0<imp-def>
+# s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def>
+# s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def>
+# s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def>
+#
+# It decided to combine the {s0, s1} loads into a single instruction in the
+# third position. However, this leaves the instruction defining s3 with a stray
+# imp-use of Q0, which is undefined.
+#
+# The verifier catches this, so this test just makes sure that appropriate
+# liveness flags are added.
+--- |
+ target triple = "thumbv7-apple-ios"
+ define arm_aapcs_vfpcc <4 x float> @foo(float* %ptr) {
+ ret <4 x float> undef
+ }
+...
+---
+name: foo
+alignment: 1
+liveins:
+ - { reg: '%r0' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %r0
+
+ %s1 = VLDRS %r0, 1, 14, _, implicit-def %q0 :: (load 4)
+ %s3 = VLDRS %r0, 2, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
+ ; CHECK: %s3 = VLDRS %r0, 2, 14, _, implicit killed undef %q0, implicit-def %q0 :: (load 4)
+
+ %s0 = VLDRS %r0, 0, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
+ ; CHECK: VLDMSIA %r0, 14, _, def %s0, def %s1, implicit-def _
+
+ %s2 = VLDRS killed %r0, 4, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
+ ; CHECK: %s2 = VLDRS killed %r0, 4, 14, _, implicit killed %q0, implicit-def %q0 :: (load 4)
+
+ tBX_RET 14, _, implicit %q0
+...
diff --git a/test/CodeGen/ARM/vsel.ll b/test/CodeGen/ARM/vsel.ll
index 746b1b000ef1..daea41399b47 100644
--- a/test/CodeGen/ARM/vsel.ll
+++ b/test/CodeGen/ARM/vsel.ll
@@ -132,7 +132,7 @@ define void @test_vsel32oeq(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp oeq float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vseleq.f32 s0, s2, s3
ret void
}
@@ -141,7 +141,7 @@ define void @test_vsel64oeq(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp oeq float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vseleq.f64 d16, d1, d2
ret void
}
@@ -276,7 +276,7 @@ define void @test_vsel32une(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp une float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vseleq.f32 s0, s3, s2
ret void
}
@@ -285,7 +285,7 @@ define void @test_vsel64une(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp une float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vseleq.f64 d16, d2, d1
ret void
}
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index a83a4df5490c..0a5235df319f 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -318,33 +318,29 @@ entry:
ret void
}
-define <8 x i8> @vuzp_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8 x i32> %cmp1) {
+define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8 x i32> %cmp1) {
; In order to create the select we need to truncate the vcgt result from a vector of i32 to a vector of i8.
; This results in a build_vector with mismatched types. We will generate two vmovn.i32 instructions to
-; truncate from i32 to i16 and one vuzp to perform the final truncation for i8.
-; CHECK-LABEL: vuzp_trunc:
+; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8.
+; CHECK-LABEL: cmpsel_trunc:
; CHECK: @ BB#0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
-; CHECK-NEXT: add r12, sp, #48
-; CHECK-NEXT: add lr, sp, #16
; CHECK-NEXT: add r4, sp, #64
; CHECK-NEXT: add r5, sp, #32
+; CHECK-NEXT: add r12, sp, #48
+; CHECK-NEXT: add lr, sp, #16
; CHECK-NEXT: vld1.64 {d16, d17}, [r5]
; CHECK-NEXT: vld1.64 {d18, d19}, [r4]
; CHECK-NEXT: vld1.64 {d20, d21}, [lr]
; CHECK-NEXT: vld1.64 {d22, d23}, [r12]
; CHECK-NEXT: vcgt.u32 q8, q9, q8
; CHECK-NEXT: vcgt.u32 q9, q11, q10
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmovn.i32 d17, q9
-; CHECK-NEXT: vmov.i8 d18, #0x7
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vuzp.8 d17, d16
-; CHECK-NEXT: vneg.s8 d16, d18
-; CHECK-NEXT: vshl.i8 d17, d17, #7
+; CHECK-NEXT: vmovn.i32 d17, q8
+; CHECK-NEXT: vmovn.i32 d16, q9
; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vshl.s8 d16, d17, d16
+; CHECK-NEXT: vmov d19, r0, r1
+; CHECK-NEXT: vmovn.i16 d16, q8
; CHECK-NEXT: vbsl d16, d19, d18
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: pop {r4, r5, r11, lr}
diff --git a/test/CodeGen/AVR/inline-asm/inline-asm.ll b/test/CodeGen/AVR/inline-asm/inline-asm.ll
index 678395a3e5c4..88d0c3af2e88 100644
--- a/test/CodeGen/AVR/inline-asm/inline-asm.ll
+++ b/test/CodeGen/AVR/inline-asm/inline-asm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=avr -mattr=movw | FileCheck %s
+; RUN: llc < %s -march=avr -mattr=movw -no-integrated-as | FileCheck %s
; CHECK-LABEL: no_operands:
define void @no_operands() {
diff --git a/test/CodeGen/AVR/inline-asm/inline-asm2.ll b/test/CodeGen/AVR/inline-asm/inline-asm2.ll
index 083390999b8a..74365b42c60e 100644
--- a/test/CodeGen/AVR/inline-asm/inline-asm2.ll
+++ b/test/CodeGen/AVR/inline-asm/inline-asm2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=avr | FileCheck %s
+; RUN: llc < %s -march=avr -no-integrated-as | FileCheck %s
; CHECK-LABEL: foo
define void @foo(i16 %a) {
diff --git a/test/CodeGen/AVR/inline-asm/multibyte.ll b/test/CodeGen/AVR/inline-asm/multibyte.ll
index 34cdf5d006e9..a7c8f6e75f0f 100644
--- a/test/CodeGen/AVR/inline-asm/multibyte.ll
+++ b/test/CodeGen/AVR/inline-asm/multibyte.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=avr | FileCheck %s
+; RUN: llc < %s -march=avr -no-integrated-as | FileCheck %s
; XFAIL: *
; Multibyte references
diff --git a/test/CodeGen/AVR/intrinsics/stacksave-restore.ll b/test/CodeGen/AVR/intrinsics/stacksave-restore.ll
new file mode 100644
index 000000000000..3985f49b92f5
--- /dev/null
+++ b/test/CodeGen/AVR/intrinsics/stacksave-restore.ll
@@ -0,0 +1,27 @@
+; RUN: llc -O0 < %s -march=avr | FileCheck %s
+
+; CHECK-LABEL: foo
+define void @foo() {
+entry:
+ br label %save
+
+; CHECK-LABEL: save
+; CHECK: in [[SREG1:r[0-9]+]], 61
+; CHECK-NEXT: in [[SREG2:r[0-9]+]], 62
+save:
+ %saved = call i8* @llvm.stacksave()
+ br label %restore
+
+; CHECK-LABEL: restore
+; CHECK: in r0, 63
+; CHECK-NEXT: cli
+; CHECK-NEXT: out 62, [[SREG2]]
+; CHECK-NEXT: out 63, r0
+; CHECK-NEXT: out 61, [[SREG1]]
+restore:
+ call void @llvm.stackrestore(i8* %saved)
+ ret void
+}
+
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8* %ptr)
diff --git a/test/CodeGen/AVR/no-print-operand-twice.ll b/test/CodeGen/AVR/no-print-operand-twice.ll
new file mode 100644
index 000000000000..8326507768ba
--- /dev/null
+++ b/test/CodeGen/AVR/no-print-operand-twice.ll
@@ -0,0 +1,8 @@
+; RUN: llc -no-integrated-as -march=avr < %s | FileCheck %s
+
+define void @test() {
+entry:
+; CHECK: /* result: 68719476738 */
+ tail call void asm sideeffect "/* result: ${0:c} */", "i,~{dirflag},~{fpsr},~{flags}"( i64 68719476738 )
+ ret void
+}
diff --git a/test/CodeGen/AVR/pseudo/ADCWRdRr.mir b/test/CodeGen/AVR/pseudo/ADCWRdRr.mir
index 475d5b39299c..b1fc792d6594 100644
--- a/test/CodeGen/AVR/pseudo/ADCWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/ADCWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit add with carry pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/ADDWRdRr.mir b/test/CodeGen/AVR/pseudo/ADDWRdRr.mir
index 2205febcc933..5743b1536330 100644
--- a/test/CodeGen/AVR/pseudo/ADDWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/ADDWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit add pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/ANDIWRdK.mir b/test/CodeGen/AVR/pseudo/ANDIWRdK.mir
index 5af8db159519..bcea4e6dfe27 100644
--- a/test/CodeGen/AVR/pseudo/ANDIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/ANDIWRdK.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit ANDO pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/ANDWRdRr.mir b/test/CodeGen/AVR/pseudo/ANDWRdRr.mir
index c9458e9ba5d6..f6b060a5d734 100644
--- a/test/CodeGen/AVR/pseudo/ANDWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/ANDWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit AND pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/ASRWRd.mir b/test/CodeGen/AVR/pseudo/ASRWRd.mir
index 3e809564ca1c..5253dcd87f13 100644
--- a/test/CodeGen/AVR/pseudo/ASRWRd.mir
+++ b/test/CodeGen/AVR/pseudo/ASRWRd.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/COMWRd.mir b/test/CodeGen/AVR/pseudo/COMWRd.mir
index 282d601686ad..58ff7af7cb3c 100644
--- a/test/CodeGen/AVR/pseudo/COMWRd.mir
+++ b/test/CodeGen/AVR/pseudo/COMWRd.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit COM pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/CPCWRdRr.mir b/test/CodeGen/AVR/pseudo/CPCWRdRr.mir
index 2081aa0b5ee4..c0ab60e89291 100644
--- a/test/CodeGen/AVR/pseudo/CPCWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/CPCWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit CPCW pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/CPWRdRr.mir b/test/CodeGen/AVR/pseudo/CPWRdRr.mir
index 7e25e7fe2272..c93c99151a49 100644
--- a/test/CodeGen/AVR/pseudo/CPWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/CPWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit CPW pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/EORWRdRr.mir b/test/CodeGen/AVR/pseudo/EORWRdRr.mir
index 8769c12cbb11..de53c2d077ed 100644
--- a/test/CodeGen/AVR/pseudo/EORWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/EORWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit EOR pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/FRMIDX.mir b/test/CodeGen/AVR/pseudo/FRMIDX.mir
index 47a9397fa6b0..b56122a43ada 100644
--- a/test/CodeGen/AVR/pseudo/FRMIDX.mir
+++ b/test/CodeGen/AVR/pseudo/FRMIDX.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# TODO: Write this test.
# This instruction isn't expanded by the pseudo expansion passs, but
diff --git a/test/CodeGen/AVR/pseudo/INWRdA.mir b/test/CodeGen/AVR/pseudo/INWRdA.mir
index a801598faddd..1b2d7fa0f539 100644
--- a/test/CodeGen/AVR/pseudo/INWRdA.mir
+++ b/test/CodeGen/AVR/pseudo/INWRdA.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/LDDWRdPtrQ.mir b/test/CodeGen/AVR/pseudo/LDDWRdPtrQ.mir
index 781cb5d82433..5ff2ef1742e0 100644
--- a/test/CodeGen/AVR/pseudo/LDDWRdPtrQ.mir
+++ b/test/CodeGen/AVR/pseudo/LDDWRdPtrQ.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 %s -o - 2>&1 -march=avr | FileCheck %s
+# RUN: llc -O0 %s -o - -march=avr | FileCheck %s
# This test checks the expansion of the 16-bit 'LDDWRdPtrQ' pseudo instruction.
@@ -12,6 +12,7 @@
---
name: test_lddwrdptrq
+tracksRegLiveness: true
body: |
bb.0.entry:
@@ -20,5 +21,5 @@ body: |
; CHECK: ldd r30, Y+10
; CHECK-NEXT: ldd r31, Y+11
- early-clobber %r31r30 = LDDWRdPtrQ %r29r28, 10
+ early-clobber %r31r30 = LDDWRdPtrQ undef %r29r28, 10
...
diff --git a/test/CodeGen/AVR/pseudo/LDDWRdYQ.mir b/test/CodeGen/AVR/pseudo/LDDWRdYQ.mir
index 472f498b912c..831c75b38b17 100644
--- a/test/CodeGen/AVR/pseudo/LDDWRdYQ.mir
+++ b/test/CodeGen/AVR/pseudo/LDDWRdYQ.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 %s -o - 2>&1 -march=avr | FileCheck %s
+# RUN: llc -O0 %s -o - -march=avr | FileCheck %s
# This test checks the expansion of the 16-bit 'LDDWRdYQ instruction
@@ -12,6 +12,7 @@
---
name: test_lddwrdyq
+tracksRegLiveness: true
body: |
bb.0.entry:
@@ -20,5 +21,5 @@ body: |
; CHECK: ldd r30, Y+1
; CHECK-NEXT: ldd r31, Y+2
- early-clobber %r31r30 = LDDWRdYQ %r29r28, 1
+ early-clobber %r31r30 = LDDWRdYQ undef %r29r28, 1
...
diff --git a/test/CodeGen/AVR/pseudo/LDIWRdK.mir b/test/CodeGen/AVR/pseudo/LDIWRdK.mir
index 23d16d9c5692..f4788adf20b4 100644
--- a/test/CodeGen/AVR/pseudo/LDIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/LDIWRdK.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit LDIWRdK pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/LDSWRdK.mir b/test/CodeGen/AVR/pseudo/LDSWRdK.mir
index aa4883634d74..b813923abcb2 100644
--- a/test/CodeGen/AVR/pseudo/LDSWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/LDSWRdK.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit LDSWRdK pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/LDWRdPtr.mir b/test/CodeGen/AVR/pseudo/LDWRdPtr.mir
index aaf9f182f2be..6db615878b95 100644
--- a/test/CodeGen/AVR/pseudo/LDWRdPtr.mir
+++ b/test/CodeGen/AVR/pseudo/LDWRdPtr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit LDWRdPtr pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/LDWRdPtrPd.mir b/test/CodeGen/AVR/pseudo/LDWRdPtrPd.mir
index f304cc220cbc..eb65c6538d11 100644
--- a/test/CodeGen/AVR/pseudo/LDWRdPtrPd.mir
+++ b/test/CodeGen/AVR/pseudo/LDWRdPtrPd.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit LDWRdPtrPd pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/LDWRdPtrPi.mir b/test/CodeGen/AVR/pseudo/LDWRdPtrPi.mir
index 9153be0bf1c9..50bad2a4c765 100644
--- a/test/CodeGen/AVR/pseudo/LDWRdPtrPi.mir
+++ b/test/CodeGen/AVR/pseudo/LDWRdPtrPi.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit LDWRdPtrPi pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/LSLWRd.mir b/test/CodeGen/AVR/pseudo/LSLWRd.mir
index 441939856aef..537944866e53 100644
--- a/test/CodeGen/AVR/pseudo/LSLWRd.mir
+++ b/test/CodeGen/AVR/pseudo/LSLWRd.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/LSRWRd.mir b/test/CodeGen/AVR/pseudo/LSRWRd.mir
index f5ffb93f4035..a1a513f4e364 100644
--- a/test/CodeGen/AVR/pseudo/LSRWRd.mir
+++ b/test/CodeGen/AVR/pseudo/LSRWRd.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/ORIWRdK.mir b/test/CodeGen/AVR/pseudo/ORIWRdK.mir
index 92bc36769eb8..d77a6ba88488 100644
--- a/test/CodeGen/AVR/pseudo/ORIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/ORIWRdK.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit OR pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/ORWRdRr.mir b/test/CodeGen/AVR/pseudo/ORWRdRr.mir
index f7a377ec860b..834c21cba8f9 100644
--- a/test/CodeGen/AVR/pseudo/ORWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/ORWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit OR pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/OUTWARr.mir b/test/CodeGen/AVR/pseudo/OUTWARr.mir
index 85e9f5259a87..99abad1c31b8 100644
--- a/test/CodeGen/AVR/pseudo/OUTWARr.mir
+++ b/test/CodeGen/AVR/pseudo/OUTWARr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/POPWRd.mir b/test/CodeGen/AVR/pseudo/POPWRd.mir
index 6794742bf54a..8bd7fe68727c 100644
--- a/test/CodeGen/AVR/pseudo/POPWRd.mir
+++ b/test/CodeGen/AVR/pseudo/POPWRd.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/PUSHWRr.mir b/test/CodeGen/AVR/pseudo/PUSHWRr.mir
index 93920867030f..ec94ecbf5bb6 100644
--- a/test/CodeGen/AVR/pseudo/PUSHWRr.mir
+++ b/test/CodeGen/AVR/pseudo/PUSHWRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/SBCIWRdK.mir b/test/CodeGen/AVR/pseudo/SBCIWRdK.mir
index 9152c6d91266..644e6106ee79 100644
--- a/test/CodeGen/AVR/pseudo/SBCIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/SBCIWRdK.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit subtraction with carry pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/SBCWRdRr.mir b/test/CodeGen/AVR/pseudo/SBCWRdRr.mir
index 9159906b76a0..5cf5d33252c7 100644
--- a/test/CodeGen/AVR/pseudo/SBCWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/SBCWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit subtraction with carry pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/SEXT.mir b/test/CodeGen/AVR/pseudo/SEXT.mir
index 069eb883dcc1..0d10358c10e1 100644
--- a/test/CodeGen/AVR/pseudo/SEXT.mir
+++ b/test/CodeGen/AVR/pseudo/SEXT.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/STDWPtrQRr.mir b/test/CodeGen/AVR/pseudo/STDWPtrQRr.mir
index ff2fdb9155e1..9252997d489e 100644
--- a/test/CodeGen/AVR/pseudo/STDWPtrQRr.mir
+++ b/test/CodeGen/AVR/pseudo/STDWPtrQRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/STSWKRr.mir b/test/CodeGen/AVR/pseudo/STSWKRr.mir
index ccf852271ae9..18f101808094 100644
--- a/test/CodeGen/AVR/pseudo/STSWKRr.mir
+++ b/test/CodeGen/AVR/pseudo/STSWKRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit STSWRdK pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/STWPtrPdRr.mir b/test/CodeGen/AVR/pseudo/STWPtrPdRr.mir
index 0d0d9e909e4a..d884d2121c2c 100644
--- a/test/CodeGen/AVR/pseudo/STWPtrPdRr.mir
+++ b/test/CodeGen/AVR/pseudo/STWPtrPdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/STWPtrPiRr.mir b/test/CodeGen/AVR/pseudo/STWPtrPiRr.mir
index a436d9b109bb..962776aa6330 100644
--- a/test/CodeGen/AVR/pseudo/STWPtrPiRr.mir
+++ b/test/CodeGen/AVR/pseudo/STWPtrPiRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/STWPtrRr.mir b/test/CodeGen/AVR/pseudo/STWPtrRr.mir
index f85f4f8a0452..efed707bfe8a 100644
--- a/test/CodeGen/AVR/pseudo/STWPtrRr.mir
+++ b/test/CodeGen/AVR/pseudo/STWPtrRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit STSWRdK pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/SUBIWRdK.mir b/test/CodeGen/AVR/pseudo/SUBIWRdK.mir
index 95c68c0a122a..c7d88d7ab3f6 100644
--- a/test/CodeGen/AVR/pseudo/SUBIWRdK.mir
+++ b/test/CodeGen/AVR/pseudo/SUBIWRdK.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit subtraction pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/SUBWRdRr.mir b/test/CodeGen/AVR/pseudo/SUBWRdRr.mir
index 9892cf5b7f33..b12b0e5349e2 100644
--- a/test/CodeGen/AVR/pseudo/SUBWRdRr.mir
+++ b/test/CodeGen/AVR/pseudo/SUBWRdRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
# This test checks the expansion of the 16-bit subtraction pseudo instruction.
diff --git a/test/CodeGen/AVR/pseudo/ZEXT.mir b/test/CodeGen/AVR/pseudo/ZEXT.mir
index 069eb883dcc1..0d10358c10e1 100644
--- a/test/CodeGen/AVR/pseudo/ZEXT.mir
+++ b/test/CodeGen/AVR/pseudo/ZEXT.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/AVR/pseudo/expand-lddw-dst-src-same.mir b/test/CodeGen/AVR/pseudo/expand-lddw-dst-src-same.mir
index 5ed95ad76a7f..8427a2bfb4ed 100644
--- a/test/CodeGen/AVR/pseudo/expand-lddw-dst-src-same.mir
+++ b/test/CodeGen/AVR/pseudo/expand-lddw-dst-src-same.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 %s -o - 2>&1 -march=avr | FileCheck %s
+# RUN: llc -O0 %s -o - -march=avr | FileCheck %s
# This test ensures that the pseudo expander can correctly handle the case
# where we are expanding a 16-bit LDD instruction where the source and
@@ -18,6 +18,7 @@
...
---
name: test_lddw
+tracksRegLiveness: true
stack:
- { id: 0, type: spill-slot, offset: -4, size: 1, alignment: 1, callee-saved-register: '%r28' }
body: |
diff --git a/test/CodeGen/AVR/relax-mem/STDWPtrQRr.mir b/test/CodeGen/AVR/relax-mem/STDWPtrQRr.mir
index b43c77508328..7421bd4c4e81 100644
--- a/test/CodeGen/AVR/relax-mem/STDWPtrQRr.mir
+++ b/test/CodeGen/AVR/relax-mem/STDWPtrQRr.mir
@@ -1,4 +1,4 @@
-# RUN: llc -O0 -run-pass=avr-relax-mem %s -o - 2>&1 | FileCheck %s
+# RUN: llc -O0 -run-pass=avr-relax-mem %s -o - | FileCheck %s
--- |
target triple = "avr--"
diff --git a/test/CodeGen/BPF/cc_args.ll b/test/CodeGen/BPF/cc_args.ll
index 4ad7a22c7e2e..a2ac03f0da12 100644
--- a/test/CodeGen/BPF/cc_args.ll
+++ b/test/CodeGen/BPF/cc_args.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpfel -show-mc-encoding | FileCheck %s
+; RUN: not llc < %s -march=bpfel -show-mc-encoding | FileCheck %s
define void @test() #0 {
entry:
diff --git a/test/CodeGen/BPF/cc_args_be.ll b/test/CodeGen/BPF/cc_args_be.ll
index 4d1efccf5160..dc41ee0d8a7d 100644
--- a/test/CodeGen/BPF/cc_args_be.ll
+++ b/test/CodeGen/BPF/cc_args_be.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpfeb -show-mc-encoding | FileCheck %s
+; RUN: not llc < %s -march=bpfeb -show-mc-encoding | FileCheck %s
; test big endian
define void @test() #0 {
diff --git a/test/CodeGen/BPF/cc_ret.ll b/test/CodeGen/BPF/cc_ret.ll
index 7bd01adc6ded..eab2a359b8f2 100644
--- a/test/CodeGen/BPF/cc_ret.ll
+++ b/test/CodeGen/BPF/cc_ret.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpfel | FileCheck %s
+; RUN: not llc < %s -march=bpfel | FileCheck %s
define void @test() #0 {
entry:
diff --git a/test/CodeGen/BPF/fi_ri.ll b/test/CodeGen/BPF/fi_ri.ll
index 12452988e8a9..6ecc82679691 100644
--- a/test/CodeGen/BPF/fi_ri.ll
+++ b/test/CodeGen/BPF/fi_ri.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: not llc < %s -march=bpf | FileCheck %s
%struct.key_t = type { i32, [16 x i8] }
diff --git a/test/CodeGen/BPF/intrinsics.ll b/test/CodeGen/BPF/intrinsics.ll
index e7a9de88cbb2..88aba805adad 100644
--- a/test/CodeGen/BPF/intrinsics.ll
+++ b/test/CodeGen/BPF/intrinsics.ll
@@ -52,14 +52,12 @@ declare i64 @llvm.bpf.load.word(i8*, i64) #1
define i32 @ld_pseudo() #0 {
entry:
%call = tail call i64 @llvm.bpf.pseudo(i64 2, i64 3)
- tail call void @bar(i64 %call, i32 4) #2
+ tail call void inttoptr (i64 4 to void (i64, i32)*)(i64 %call, i32 4) #2
ret i32 0
; CHECK-LABEL: ld_pseudo:
; CHECK: ld_pseudo r1, 2, 3 # encoding: [0x18,0x21,0x00,0x00,0x03,0x00
}
-declare void @bar(i64, i32) #1
-
declare i64 @llvm.bpf.pseudo(i64, i64) #2
define i32 @bswap(i64 %a, i64 %b, i64 %c) #0 {
diff --git a/test/CodeGen/BPF/mem_offset.ll b/test/CodeGen/BPF/mem_offset.ll
new file mode 100644
index 000000000000..2b86e44ae592
--- /dev/null
+++ b/test/CodeGen/BPF/mem_offset.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=bpfel -show-mc-encoding < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define i32 @bpf_prog1(i8* nocapture readnone) local_unnamed_addr #0 {
+; CHECK: r1 += -1879113726 # encoding: [0x07,0x01,0x00,0x00,0x02,0x00,0xff,0x8f]
+; CHECK: r0 = *(u64 *)(r1 + 0) # encoding: [0x79,0x10,0x00,0x00,0x00,0x00,0x00,0x00]
+ %2 = alloca i64, align 8
+ %3 = bitcast i64* %2 to i8*
+ store volatile i64 590618314553, i64* %2, align 8
+ %4 = load volatile i64, i64* %2, align 8
+ %5 = add i64 %4, -1879113726
+ %6 = inttoptr i64 %5 to i64*
+ %7 = load i64, i64* %6, align 8
+ %8 = trunc i64 %7 to i32
+ ret i32 %8
+}
+
diff --git a/test/CodeGen/BPF/objdump_intrinsics.ll b/test/CodeGen/BPF/objdump_intrinsics.ll
index ddb92b9d8fba..1d33e57de789 100644
--- a/test/CodeGen/BPF/objdump_intrinsics.ll
+++ b/test/CodeGen/BPF/objdump_intrinsics.ll
@@ -52,14 +52,12 @@ declare i64 @llvm.bpf.load.word(i8*, i64) #1
define i32 @ld_pseudo() #0 {
entry:
%call = tail call i64 @llvm.bpf.pseudo(i64 2, i64 3)
- tail call void @bar(i64 %call, i32 4) #2
+ tail call void inttoptr (i64 4 to void (i64, i32)*)(i64 %call, i32 4) #2
ret i32 0
; CHECK-LABEL: ld_pseudo:
; CHECK: ld_pseudo r1, 2, 3
}
-declare void @bar(i64, i32) #1
-
declare i64 @llvm.bpf.pseudo(i64, i64) #2
define i32 @bswap(i64 %a, i64 %b, i64 %c) #0 {
diff --git a/test/CodeGen/BPF/objdump_trivial.ll b/test/CodeGen/BPF/objdump_trivial.ll
index 48fee21a2e4c..6b5423854ee7 100644
--- a/test/CodeGen/BPF/objdump_trivial.ll
+++ b/test/CodeGen/BPF/objdump_trivial.ll
@@ -1,19 +1,18 @@
; RUN: llc -march=bpfel -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
; CHECK: if r2 s> r1 goto
-; CHECK: call
+; CHECK: call 1
+; CHECK: exit
+; CHECK: call 2
; CHECK: exit
-
-declare void @a()
-declare void @b()
define void @foo(i32 %a) {
%b = icmp sgt i32 %a, -1
br i1 %b, label %x, label %y
x:
-call void @a()
+call void inttoptr (i64 1 to void ()*)()
ret void
y:
-call void @b()
+call void inttoptr (i64 2 to void ()*)()
ret void
}
diff --git a/test/CodeGen/BPF/sanity.ll b/test/CodeGen/BPF/sanity.ll
index f318c3a95e48..a7aed65b821e 100644
--- a/test/CodeGen/BPF/sanity.ll
+++ b/test/CodeGen/BPF/sanity.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpfel | FileCheck %s
+; RUN: not llc < %s -march=bpfel | FileCheck %s
@foo_printf.fmt = private unnamed_addr constant [9 x i8] c"hello \0A\00", align 1
diff --git a/test/CodeGen/BPF/undef.ll b/test/CodeGen/BPF/undef.ll
index 1a925ccae803..de14bfde1ab9 100644
--- a/test/CodeGen/BPF/undef.ll
+++ b/test/CodeGen/BPF/undef.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=bpf | FileCheck %s
+; RUN: not llc < %s -march=bpf | FileCheck %s
%struct.bpf_map_def = type { i32, i32, i32, i32 }
%struct.__sk_buff = type opaque
@@ -13,50 +13,55 @@
; Function Attrs: nounwind uwtable
define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 section "socket1" {
+; CHECK: r2 = r10
+; CHECK: r2 += -2
+; CHECK: r1 = 0
+; CHECK: *(u16 *)(r2 + 6) = r1
+; CHECK: *(u16 *)(r2 + 4) = r1
+; CHECK: *(u16 *)(r2 + 2) = r1
+; CHECK: r2 = 6
+; CHECK: *(u8 *)(r10 - 7) = r2
+; CHECK: r2 = 5
+; CHECK: *(u8 *)(r10 - 8) = r2
+; CHECK: r2 = 7
+; CHECK: *(u8 *)(r10 - 6) = r2
+; CHECK: r2 = 8
+; CHECK: *(u8 *)(r10 - 5) = r2
+; CHECK: r2 = 9
+; CHECK: *(u8 *)(r10 - 4) = r2
+; CHECK: r2 = 10
+; CHECK: *(u8 *)(r10 - 3) = r2
+; CHECK: *(u16 *)(r10 + 24) = r1
+; CHECK: *(u16 *)(r10 + 22) = r1
+; CHECK: *(u16 *)(r10 + 20) = r1
+; CHECK: *(u16 *)(r10 + 18) = r1
+; CHECK: *(u16 *)(r10 + 16) = r1
+; CHECK: *(u16 *)(r10 + 14) = r1
+; CHECK: *(u16 *)(r10 + 12) = r1
+; CHECK: *(u16 *)(r10 + 10) = r1
+; CHECK: *(u16 *)(r10 + 8) = r1
+; CHECK: *(u16 *)(r10 + 6) = r1
+; CHECK: *(u16 *)(r10 - 2) = r1
+; CHECK: *(u16 *)(r10 + 26) = r1
+; CHECK: r2 = r10
+; CHECK: r2 += -8
+; CHECK: r1 = <MCOperand Expr:(routing)>ll
+; CHECK: call bpf_map_lookup_elem
+; CHECK: exit
%key = alloca %struct.routing_key_2, align 1
%1 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 0
-; CHECK: r1 = 5
-; CHECK: *(u8 *)(r10 - 8) = r1
store i8 5, i8* %1, align 1
%2 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 1
-; CHECK: r1 = 6
-; CHECK: *(u8 *)(r10 - 7) = r1
store i8 6, i8* %2, align 1
%3 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 2
-; CHECK: r1 = 7
-; CHECK: *(u8 *)(r10 - 6) = r1
store i8 7, i8* %3, align 1
%4 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 3
-; CHECK: r1 = 8
-; CHECK: *(u8 *)(r10 - 5) = r1
store i8 8, i8* %4, align 1
%5 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 4
-; CHECK: r1 = 9
-; CHECK: *(u8 *)(r10 - 4) = r1
store i8 9, i8* %5, align 1
%6 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 5
-; CHECK: r1 = 10
-; CHECK: *(u8 *)(r10 - 3) = r1
store i8 10, i8* %6, align 1
%7 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 1, i32 0, i64 0
-; CHECK: r1 = r10
-; CHECK: r1 += -2
-; CHECK: r2 = 0
-; CHECK: *(u16 *)(r1 + 6) = r2
-; CHECK: *(u16 *)(r1 + 4) = r2
-; CHECK: *(u16 *)(r1 + 2) = r2
-; CHECK: *(u16 *)(r10 + 24) = r2
-; CHECK: *(u16 *)(r10 + 22) = r2
-; CHECK: *(u16 *)(r10 + 20) = r2
-; CHECK: *(u16 *)(r10 + 18) = r2
-; CHECK: *(u16 *)(r10 + 16) = r2
-; CHECK: *(u16 *)(r10 + 14) = r2
-; CHECK: *(u16 *)(r10 + 12) = r2
-; CHECK: *(u16 *)(r10 + 10) = r2
-; CHECK: *(u16 *)(r10 + 8) = r2
-; CHECK: *(u16 *)(r10 + 6) = r2
-; CHECK: *(u16 *)(r10 - 2) = r2
-; CHECK: *(u16 *)(r10 + 26) = r2
call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 30, i32 1, i1 false)
%8 = call i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...) bitcast (i32 (...)* @bpf_map_lookup_elem to i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...)*)(%struct.bpf_map_def* nonnull @routing, %struct.routing_key_2* nonnull %key) #3
ret i32 undef
diff --git a/test/CodeGen/BPF/warn-call.ll b/test/CodeGen/BPF/warn-call.ll
new file mode 100644
index 000000000000..ae7f78ac1aa8
--- /dev/null
+++ b/test/CodeGen/BPF/warn-call.ll
@@ -0,0 +1,69 @@
+; RUN: not llc -march=bpfel < %s 2>&1 >/dev/null | FileCheck %s
+
+; CHECK: error: warn_call.c
+; CHECK: built-in function 'memcpy'
+; CHECK: error: warn_call.c
+; CHECK: global function 'foo'
+; CHECK: global function 'bar'
+define i8* @warn(i8* returned, i8*, i64) local_unnamed_addr #0 !dbg !6 {
+ tail call void @llvm.dbg.value(metadata i8* %0, i64 0, metadata !14, metadata !17), !dbg !18
+ tail call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !15, metadata !17), !dbg !19
+ tail call void @llvm.dbg.value(metadata i64 %2, i64 0, metadata !16, metadata !17), !dbg !20
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %2, i32 1, i1 false), !dbg !21
+ %4 = tail call i8* @foo(i8* %0, i8* %1, i64 %2) #5, !dbg !22
+ %5 = tail call fastcc i8* @bar(i8* %0), !dbg !23
+ ret i8* %5, !dbg !24
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1
+
+declare i8* @foo(i8*, i8*, i64) local_unnamed_addr #2
+
+; Function Attrs: noinline nounwind readnone
+define internal fastcc i8* @bar(i8* readnone returned) unnamed_addr #3 !dbg !25 {
+ tail call void @llvm.dbg.value(metadata i8* null, i64 0, metadata !28, metadata !17), !dbg !30
+ tail call void @llvm.dbg.value(metadata i64 0, i64 0, metadata !29, metadata !17), !dbg !31
+ ret i8* %0, !dbg !32
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #4
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (trunk 292174) (llvm/trunk 292179)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "warn_call.c", directory: "/w/llvm/bld")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 5.0.0 (trunk 292174) (llvm/trunk 292179)"}
+!6 = distinct !DISubprogram(name: "warn", scope: !1, file: !1, line: 4, type: !7, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !13)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9, !9, !10, !12}
+!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64)
+!11 = !DIDerivedType(tag: DW_TAG_const_type, baseType: null)
+!12 = !DIBasicType(name: "long unsigned int", size: 64, encoding: DW_ATE_unsigned)
+!13 = !{!14, !15, !16}
+!14 = !DILocalVariable(name: "dst", arg: 1, scope: !6, file: !1, line: 4, type: !9)
+!15 = !DILocalVariable(name: "src", arg: 2, scope: !6, file: !1, line: 4, type: !10)
+!16 = !DILocalVariable(name: "len", arg: 3, scope: !6, file: !1, line: 4, type: !12)
+!17 = !DIExpression()
+!18 = !DILocation(line: 4, column: 18, scope: !6)
+!19 = !DILocation(line: 4, column: 35, scope: !6)
+!20 = !DILocation(line: 4, column: 54, scope: !6)
+!21 = !DILocation(line: 6, column: 2, scope: !6)
+!22 = !DILocation(line: 7, column: 2, scope: !6)
+!23 = !DILocation(line: 8, column: 9, scope: !6)
+!24 = !DILocation(line: 8, column: 2, scope: !6)
+!25 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 2, type: !7, isLocal: true, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !26)
+!26 = !{!27, !28, !29}
+!27 = !DILocalVariable(name: "dst", arg: 1, scope: !25, file: !1, line: 2, type: !9)
+!28 = !DILocalVariable(name: "src", arg: 2, scope: !25, file: !1, line: 2, type: !10)
+!29 = !DILocalVariable(name: "len", arg: 3, scope: !25, file: !1, line: 2, type: !12)
+!30 = !DILocation(line: 2, column: 67, scope: !25)
+!31 = !DILocation(line: 2, column: 86, scope: !25)
+!32 = !DILocation(line: 2, column: 93, scope: !25)
diff --git a/test/CodeGen/BPF/warn-stack.ll b/test/CodeGen/BPF/warn-stack.ll
new file mode 100644
index 000000000000..5a579d28554a
--- /dev/null
+++ b/test/CodeGen/BPF/warn-stack.ll
@@ -0,0 +1,76 @@
+; RUN: not llc -march=bpfel < %s 2>&1 >/dev/null | FileCheck %s
+
+;; CHECK-NOT: nowarn
+define void @nowarn() local_unnamed_addr #0 !dbg !6 {
+ %1 = alloca [504 x i8], align 1
+ %2 = getelementptr inbounds [504 x i8], [504 x i8]* %1, i64 0, i64 0, !dbg !15
+ call void @llvm.lifetime.start.p0i8(i64 504, i8* nonnull %2) #4, !dbg !15
+ tail call void @llvm.dbg.declare(metadata [504 x i8]* %1, metadata !10, metadata !16), !dbg !17
+ call void @doit(i8* nonnull %2) #4, !dbg !18
+ call void @llvm.lifetime.end.p0i8(i64 504, i8* nonnull %2) #4, !dbg !19
+ ret void, !dbg !19
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
+
+declare void @doit(i8*) local_unnamed_addr #3
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+
+; CHECK: error: warn_stack.c
+; CHECK: BPF stack limit
+define void @warn() local_unnamed_addr #0 !dbg !20 {
+ %1 = alloca [512 x i8], align 1
+ %2 = getelementptr inbounds [512 x i8], [512 x i8]* %1, i64 0, i64 0, !dbg !26
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* nonnull %2) #4, !dbg !26
+ tail call void @llvm.dbg.declare(metadata [512 x i8]* %1, metadata !22, metadata !16), !dbg !27
+ call void @doit(i8* nonnull %2) #4, !dbg !28
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* nonnull %2) #4, !dbg !29
+ ret void, !dbg !29
+}
+
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind readnone }
+attributes #3 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #4 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (trunk 292141) (llvm/trunk 292156)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "warn_stack.c", directory: "/w/llvm/bld")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 5.0.0 (trunk 292141) (llvm/trunk 292156)"}
+!6 = distinct !DISubprogram(name: "nowarn", scope: !1, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !9)
+!7 = !DISubroutineType(types: !8)
+!8 = !{null}
+!9 = !{!10}
+!10 = !DILocalVariable(name: "buf", scope: !6, file: !1, line: 4, type: !11)
+!11 = !DICompositeType(tag: DW_TAG_array_type, baseType: !12, size: 4088, elements: !13)
+!12 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!13 = !{!14}
+!14 = !DISubrange(count: 504)
+!15 = !DILocation(line: 4, column: 2, scope: !6)
+!16 = !DIExpression()
+!17 = !DILocation(line: 4, column: 7, scope: !6)
+!18 = !DILocation(line: 5, column: 2, scope: !6)
+!19 = !DILocation(line: 6, column: 1, scope: !6)
+!20 = distinct !DISubprogram(name: "warn", scope: !1, file: !1, line: 7, type: !7, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !21)
+!21 = !{!22}
+!22 = !DILocalVariable(name: "buf", scope: !20, file: !1, line: 9, type: !23)
+!23 = !DICompositeType(tag: DW_TAG_array_type, baseType: !12, size: 4096, elements: !24)
+!24 = !{!25}
+!25 = !DISubrange(count: 512)
+!26 = !DILocation(line: 9, column: 2, scope: !20)
+!27 = !DILocation(line: 9, column: 7, scope: !20)
+!28 = !DILocation(line: 10, column: 2, scope: !20)
+!29 = !DILocation(line: 11, column: 1, scope: !20)
diff --git a/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll b/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
index 010c0c553638..9e4664ad69c9 100644
--- a/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
+++ b/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
@@ -1,5 +1,8 @@
; RUN: llc < %s
+; Bug: PR31341
+; XFAIL: avr
+
;; Date: Jul 29, 2003.
;; From: test/Programs/MultiSource/Ptrdist-bc
;; Function: ---
diff --git a/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll b/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
index 21c05f17a7c5..e961ea764ec2 100644
--- a/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
+++ b/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
@@ -3,6 +3,9 @@
; PR1308
; PR1557
+; Bug: PR31336
+; XFAIL: avr
+
define i32 @stuff(i32, ...) {
%foo = alloca i8*
%bar = alloca i32*
diff --git a/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll b/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
index fe7f463159a5..bb8058575c82 100644
--- a/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
+++ b/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
@@ -1,5 +1,7 @@
; RUN: llc -no-integrated-as < %s
+; XFAIL: avr
+
define fastcc void @bc__support__high_resolution_time__initialize_clock_rate() personality i32 (...)* @__gxx_personality_v0 {
entry:
invoke void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* null, i32* null )
diff --git a/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll b/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
index 5cc48c212c40..a9a33d72bca2 100644
--- a/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
+++ b/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
@@ -1,4 +1,8 @@
; RUN: llc < %s
+
+; Bug: PR31898
+; XFAIL: avr
+
; This caused ScheduleDAG to crash in EmitPhysRegCopy when searching
; the uses of a copy to a physical register without ignoring non-data
; dependence, PR10220.
diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll
index 921fa62c1c43..804e5b0ce9fc 100644
--- a/test/CodeGen/Generic/MachineBranchProb.ll
+++ b/test/CodeGen/Generic/MachineBranchProb.ll
@@ -1,12 +1,12 @@
; RUN: llc < %s -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 | FileCheck %s
-; ARM & AArch64 run an extra SimplifyCFG which disrupts this test.
-; XFAIL: arm,aarch64
-
; Hexagon runs passes that renumber the basic blocks, causing this test
; to fail.
; XFAIL: hexagon
+; Bug: PR31899
+; XFAIL: avr
+
; Make sure we have the correct weight attached to each successor.
define i32 @test2(i32 %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: Machine code for function test2:
diff --git a/test/CodeGen/Generic/externally_available.ll b/test/CodeGen/Generic/externally_available.ll
index 7976cc971880..2376bc739927 100644
--- a/test/CodeGen/Generic/externally_available.ll
+++ b/test/CodeGen/Generic/externally_available.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | not grep test_
+; RUN: llc -verify-machine-dom-info < %s | not grep test_
; test_function should not be emitted to the .s file.
define available_externally i32 @test_function() {
diff --git a/test/CodeGen/Generic/icmp-illegal.ll b/test/CodeGen/Generic/icmp-illegal.ll
index 23d20c04652f..77dd5a59dfd1 100644
--- a/test/CodeGen/Generic/icmp-illegal.ll
+++ b/test/CodeGen/Generic/icmp-illegal.ll
@@ -1,4 +1,3 @@
-
; RUN: llc < %s | FileCheck %s
; CHECK-LABEL: test_ult
diff --git a/test/CodeGen/Generic/inline-asm-mem-clobber.ll b/test/CodeGen/Generic/inline-asm-mem-clobber.ll
index be1e0a39b3b0..6184f803b71f 100644
--- a/test/CodeGen/Generic/inline-asm-mem-clobber.ll
+++ b/test/CodeGen/Generic/inline-asm-mem-clobber.ll
@@ -1,5 +1,8 @@
; RUN: llc -O2 -no-integrated-as < %s | FileCheck %s
+; Test uses 32-bit registers which aren't supported on AVR.
+; XFAIL: avr
+
@G = common global i32 0, align 4
define i32 @foo(i8* %p) nounwind uwtable {
diff --git a/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll b/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
index a21906cf6dc5..8c11cb2f0217 100644
--- a/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
+++ b/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s
-; XFAIL: hexagon
declare { i64, double } @wild()
define void @foo(i64* %p, double* %q) nounwind personality i32 (...)* @__gxx_personality_v0 {
diff --git a/test/CodeGen/Generic/overloaded-intrinsic-name.ll b/test/CodeGen/Generic/overloaded-intrinsic-name.ll
index 65fc9c1184cf..89a5f8077991 100644
--- a/test/CodeGen/Generic/overloaded-intrinsic-name.ll
+++ b/test/CodeGen/Generic/overloaded-intrinsic-name.ll
@@ -1,4 +1,4 @@
-; RUN: opt -verify -S < %s
+; RUN: opt -verify -S < %s | FileCheck %s
; Tests the name mangling performed by the codepath following
; getMangledTypeStr(). Only tests that code with the various manglings
@@ -44,14 +44,43 @@ define <3 x i32>* @test_vAny(<3 x i32>* %v) gc "statepoint-example" {
; struct
define %struct.test* @test_struct(%struct.test* %v) gc "statepoint-example" {
%tok = call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, %struct.test* %v)
- %v-new = call %struct.test* @llvm.experimental.gc.relocate.p0struct.test(token %tok, i32 7, i32 7)
+ %v-new = call %struct.test* @llvm.experimental.gc.relocate.p0s_struct.tests(token %tok, i32 7, i32 7)
ret %struct.test* %v-new
}
+; literal struct with nested literal struct
+define {i64, i64, {i64} }* @test_literal_struct({i64, i64, {i64}}* %v) gc "statepoint-example" {
+ %tok = call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, {i64, i64, {i64}} *%v)
+ %v-new = call {i64, i64, {i64}}* @llvm.experimental.gc.relocate.p0sl_i64i64sl_i64ss.test(token %tok, i32 7, i32 7)
+ ret {i64, i64, {i64}}* %v-new
+}
+; struct with a horrible name, broken when structs were unprefixed
+%i32 = type { i32 }
+
+define %i32* @test_i32_struct(%i32* %v) gc "statepoint-example" {
+entry:
+ %tok = call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, %i32* %v)
+ %v-new = call %i32* @llvm.experimental.gc.relocate.p0s_i32s(token %tok, i32 7, i32 7)
+ ret %i32* %v-new
+}
+; completely broken intrinsic naming due to needing remangling. Just use random naming to test
+
+define %i32* @test_broken_names(%i32* %v) gc "statepoint-example" {
+entry:
+ %tok = call fastcc token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.deadbeef(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, %i32* %v)
+; Make sure we do not destroy the calling convention when remangling
+; CHECK: fastcc
+ %v-new = call %i32* @llvm.experimental.gc.relocate.beefdead(token %tok, i32 7, i32 7)
+ ret %i32* %v-new
+}
declare zeroext i1 @return_i1()
declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
declare i32* @llvm.experimental.gc.relocate.p0i32(token, i32, i32)
declare float* @llvm.experimental.gc.relocate.p0f32(token, i32, i32)
declare [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(token, i32, i32)
declare <3 x i32>* @llvm.experimental.gc.relocate.p0v3i32(token, i32, i32)
-declare %struct.test* @llvm.experimental.gc.relocate.p0struct.test(token, i32, i32)
+declare %struct.test* @llvm.experimental.gc.relocate.p0s_struct.tests(token, i32, i32)
+declare {i64, i64, {i64}}* @llvm.experimental.gc.relocate.p0sl_i64i64sl_i64ss.test(token, i32, i32)
+declare %i32* @llvm.experimental.gc.relocate.p0s_i32s(token, i32, i32)
+declare %i32* @llvm.experimental.gc.relocate.beefdead(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.deadbeef(i64, i32, i1 ()*, i32, i32, ...)
diff --git a/test/CodeGen/Generic/pr24662.ll b/test/CodeGen/Generic/pr24662.ll
new file mode 100644
index 000000000000..5a10b9cb0acb
--- /dev/null
+++ b/test/CodeGen/Generic/pr24662.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -fast-isel
+; RUN: llc < %s
+
+define i60 @PR24662a() {
+ ret i60 trunc (i670010 fptoui(float 0x400D9999A0000000 to i670010) to i60)
+}
+
+define i60 @PR24662b() {
+ %1 = fptoui float 0x400D9999A0000000 to i670010
+ %2 = trunc i670010 %1 to i60
+ ret i60 %2
+}
diff --git a/test/CodeGen/Generic/select-cc.ll b/test/CodeGen/Generic/select-cc.ll
index 7510f701b147..c67f57fde18b 100644
--- a/test/CodeGen/Generic/select-cc.ll
+++ b/test/CodeGen/Generic/select-cc.ll
@@ -1,6 +1,8 @@
; RUN: llc < %s
-; PR2504
-; XFAIL: hexagon
+
+; PR31338
+; XFAIL: avr
+
define <2 x double> @vector_select(<2 x double> %x, <2 x double> %y) nounwind {
%x.lo = extractelement <2 x double> %x, i32 0 ; <double> [#uses=1]
%x.lo.ge = fcmp oge double %x.lo, 0.000000e+00 ; <i1> [#uses=1]
diff --git a/test/CodeGen/Generic/v-split.ll b/test/CodeGen/Generic/v-split.ll
index 00c62f389520..91aece94fecd 100644
--- a/test/CodeGen/Generic/v-split.ll
+++ b/test/CodeGen/Generic/v-split.ll
@@ -1,4 +1,8 @@
; RUN: llc < %s
+
+; Bug: PR31898
+; XFAIL: avr
+
%f8 = type <8 x float>
define void @test_f8(%f8 *%P, %f8* %Q, %f8 *%S) {
diff --git a/test/CodeGen/Generic/vector-redux.ll b/test/CodeGen/Generic/vector-redux.ll
index 8efdbf85b8c0..64562d6d9490 100644
--- a/test/CodeGen/Generic/vector-redux.ll
+++ b/test/CodeGen/Generic/vector-redux.ll
@@ -1,6 +1,9 @@
; RUN: llc < %s -debug-only=isel -o /dev/null 2>&1 | FileCheck %s
; REQUIRES: asserts
+; Bug: PR31898
+; XFAIL: avr
+
@a = global [1024 x i32] zeroinitializer, align 16
define i32 @reduce_add() {
diff --git a/test/CodeGen/Generic/vector.ll b/test/CodeGen/Generic/vector.ll
index 2d4dc501a53a..9c0cacdcd878 100644
--- a/test/CodeGen/Generic/vector.ll
+++ b/test/CodeGen/Generic/vector.ll
@@ -1,6 +1,9 @@
; Test that vectors are scalarized/lowered correctly.
; RUN: llc < %s
+; Bug: PR31898
+; XFAIL: avr
+
%d8 = type <8 x double>
%f1 = type <1 x float>
%f2 = type <2 x float>
diff --git a/test/CodeGen/Hexagon/BranchPredict.ll b/test/CodeGen/Hexagon/BranchPredict.ll
index 17d169974e5a..40791c981483 100644
--- a/test/CodeGen/Hexagon/BranchPredict.ll
+++ b/test/CodeGen/Hexagon/BranchPredict.ll
@@ -9,7 +9,7 @@
@j = external global i32
define i32 @foo(i32 %a) nounwind {
-; CHECK: if{{ *}}(!p{{[0-3]}}.new) jump:nt
+; CHECK: if (!p{{[0-3]}}.new) jump:nt
entry:
%tobool = icmp eq i32 %a, 0
br i1 %tobool, label %if.else, label %if.then, !prof !0
@@ -31,7 +31,7 @@ return: ; preds = %if.else, %if.then
declare i32 @foobar(...)
define i32 @bar(i32 %a) nounwind {
-; CHECK: if{{ *}}(p{{[0-3]}}.new) jump:nt
+; CHECK: if (p{{[0-3]}}.new) jump:nt
entry:
%tobool = icmp eq i32 %a, 0
br i1 %tobool, label %if.else, label %if.then, !prof !1
@@ -51,7 +51,7 @@ return: ; preds = %if.else, %if.then
}
define i32 @foo_bar(i32 %a, i16 signext %b) nounwind {
-; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt
+; CHECK: if (!cmp.eq(r{{[0-9]*}}.new,#0)) jump:nt
entry:
%0 = load i32, i32* @j, align 4
%tobool = icmp eq i32 %0, 0
diff --git a/test/CodeGen/Hexagon/adde.ll b/test/CodeGen/Hexagon/adde.ll
index 43ddb4307ef2..12913eea7e81 100644
--- a/test/CodeGen/Hexagon/adde.ll
+++ b/test/CodeGen/Hexagon/adde.ll
@@ -1,34 +1,27 @@
-; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1)
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0)
-; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
-
-define void @check_adde_addc (i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
-entry:
- %tmp1 = zext i64 %AL to i128
- %tmp23 = zext i64 %AH to i128
- %tmp4 = shl i128 %tmp23, 64
- %tmp5 = or i128 %tmp4, %tmp1
- %tmp67 = zext i64 %BL to i128
- %tmp89 = zext i64 %BH to i128
- %tmp11 = shl i128 %tmp89, 64
- %tmp12 = or i128 %tmp11, %tmp67
- %tmp15 = add i128 %tmp12, %tmp5
- %tmp1617 = trunc i128 %tmp15 to i64
- store i64 %tmp1617, i64* %RL
- %tmp21 = lshr i128 %tmp15, 64
- %tmp2122 = trunc i128 %tmp21 to i64
- store i64 %tmp2122, i64* %RH
- ret void
+define void @check_adde_addc(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64* %a4, i64* %a5) {
+b6:
+ %v7 = zext i64 %a0 to i128
+ %v8 = zext i64 %a1 to i128
+ %v9 = shl i128 %v8, 64
+ %v10 = or i128 %v7, %v9
+ %v11 = zext i64 %a2 to i128
+ %v12 = zext i64 %a3 to i128
+ %v13 = shl i128 %v12, 64
+ %v14 = or i128 %v11, %v13
+ %v15 = add i128 %v10, %v14
+ %v16 = lshr i128 %v15, 64
+ %v17 = trunc i128 %v15 to i64
+ %v18 = trunc i128 %v16 to i64
+ store i64 %v17, i64* %a4
+ store i64 %v18, i64* %a5
+ ret void
}
diff --git a/test/CodeGen/Hexagon/addh-sext-trunc.ll b/test/CodeGen/Hexagon/addh-sext-trunc.ll
index 7f219944436b..ec5dc611105d 100644
--- a/test/CodeGen/Hexagon/addh-sext-trunc.ll
+++ b/test/CodeGen/Hexagon/addh-sext-trunc.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{H|h}})
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{H|h}})
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon-unknown-none"
diff --git a/test/CodeGen/Hexagon/addh-shifted.ll b/test/CodeGen/Hexagon/addh-shifted.ll
index eb263521b42f..697a5c5c69bf 100644
--- a/test/CodeGen/Hexagon/addh-shifted.ll
+++ b/test/CodeGen/Hexagon/addh-shifted.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}):<<16
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}):<<16
define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
entry:
diff --git a/test/CodeGen/Hexagon/addh.ll b/test/CodeGen/Hexagon/addh.ll
index c2b536c4669a..8217d6753cb3 100644
--- a/test/CodeGen/Hexagon/addh.ll
+++ b/test/CodeGen/Hexagon/addh.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}})
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}})
define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
entry:
diff --git a/test/CodeGen/Hexagon/alu64.ll b/test/CodeGen/Hexagon/alu64.ll
index f986f1359374..453b40a6ee83 100644
--- a/test/CodeGen/Hexagon/alu64.ll
+++ b/test/CodeGen/Hexagon/alu64.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; CHECK-LABEL: @test00
-; CHECK: = cmp.eq(r1:0, r3:2)
+; CHECK: = cmp.eq(r1:0,r3:2)
define i32 @test00(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.C2.cmpeqp(i64 %Rs, i64 %Rt)
@@ -9,7 +9,7 @@ entry:
}
; CHECK-LABEL: @test01
-; CHECK: = cmp.gt(r1:0, r3:2)
+; CHECK: = cmp.gt(r1:0,r3:2)
define i32 @test01(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.C2.cmpgtp(i64 %Rs, i64 %Rt)
@@ -17,7 +17,7 @@ entry:
}
; CHECK-LABEL: @test02
-; CHECK: = cmp.gtu(r1:0, r3:2)
+; CHECK: = cmp.gtu(r1:0,r3:2)
define i32 @test02(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.C2.cmpgtup(i64 %Rs, i64 %Rt)
@@ -25,7 +25,7 @@ entry:
}
; CHECK-LABEL: @test10
-; CHECK: = cmp.eq(r0, r1)
+; CHECK: = cmp.eq(r0,r1)
define i32 @test10(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpeq(i32 %Rs, i32 %Rt)
@@ -33,7 +33,7 @@ entry:
}
; CHECK-LABEL: @test11
-; CHECK: = !cmp.eq(r0, r1)
+; CHECK: = !cmp.eq(r0,r1)
define i32 @test11(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpneq(i32 %Rs, i32 %Rt)
@@ -41,7 +41,7 @@ entry:
}
; CHECK-LABEL: @test12
-; CHECK: = cmp.eq(r0, #23)
+; CHECK: = cmp.eq(r0,#23)
define i32 @test12(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpeqi(i32 %Rs, i32 23)
@@ -49,7 +49,7 @@ entry:
}
; CHECK-LABEL: @test13
-; CHECK: = !cmp.eq(r0, #47)
+; CHECK: = !cmp.eq(r0,#47)
define i32 @test13(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.rcmpneqi(i32 %Rs, i32 47)
@@ -57,7 +57,7 @@ entry:
}
; CHECK-LABEL: @test20
-; CHECK: = cmpb.eq(r0, r1)
+; CHECK: = cmpb.eq(r0,r1)
define i32 @test20(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbeq(i32 %Rs, i32 %Rt)
@@ -65,7 +65,7 @@ entry:
}
; CHECK-LABEL: @test21
-; CHECK: = cmpb.gt(r0, r1)
+; CHECK: = cmpb.gt(r0,r1)
define i32 @test21(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgt(i32 %Rs, i32 %Rt)
@@ -73,7 +73,7 @@ entry:
}
; CHECK-LABEL: @test22
-; CHECK: = cmpb.gtu(r0, r1)
+; CHECK: = cmpb.gtu(r0,r1)
define i32 @test22(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgtu(i32 %Rs, i32 %Rt)
@@ -81,7 +81,7 @@ entry:
}
; CHECK-LABEL: @test23
-; CHECK: = cmpb.eq(r0, #56)
+; CHECK: = cmpb.eq(r0,#56)
define i32 @test23(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbeqi(i32 %Rs, i32 56)
@@ -89,7 +89,7 @@ entry:
}
; CHECK-LABEL: @test24
-; CHECK: = cmpb.gt(r0, #29)
+; CHECK: = cmpb.gt(r0,#29)
define i32 @test24(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgti(i32 %Rs, i32 29)
@@ -97,7 +97,7 @@ entry:
}
; CHECK-LABEL: @test25
-; CHECK: = cmpb.gtu(r0, #111)
+; CHECK: = cmpb.gtu(r0,#111)
define i32 @test25(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpbgtui(i32 %Rs, i32 111)
@@ -105,7 +105,7 @@ entry:
}
; CHECK-LABEL: @test30
-; CHECK: = cmph.eq(r0, r1)
+; CHECK: = cmph.eq(r0,r1)
define i32 @test30(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpheq(i32 %Rs, i32 %Rt)
@@ -113,7 +113,7 @@ entry:
}
; CHECK-LABEL: @test31
-; CHECK: = cmph.gt(r0, r1)
+; CHECK: = cmph.gt(r0,r1)
define i32 @test31(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgt(i32 %Rs, i32 %Rt)
@@ -121,7 +121,7 @@ entry:
}
; CHECK-LABEL: @test32
-; CHECK: = cmph.gtu(r0, r1)
+; CHECK: = cmph.gtu(r0,r1)
define i32 @test32(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgtu(i32 %Rs, i32 %Rt)
@@ -129,7 +129,7 @@ entry:
}
; CHECK-LABEL: @test33
-; CHECK: = cmph.eq(r0, #-123)
+; CHECK: = cmph.eq(r0,#-123)
define i32 @test33(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmpheqi(i32 %Rs, i32 -123)
@@ -137,7 +137,7 @@ entry:
}
; CHECK-LABEL: @test34
-; CHECK: = cmph.gt(r0, #-3)
+; CHECK: = cmph.gt(r0,#-3)
define i32 @test34(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgti(i32 %Rs, i32 -3)
@@ -145,7 +145,7 @@ entry:
}
; CHECK-LABEL: @test35
-; CHECK: = cmph.gtu(r0, #13)
+; CHECK: = cmph.gtu(r0,#13)
define i32 @test35(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.cmphgtui(i32 %Rs, i32 13)
@@ -153,7 +153,7 @@ entry:
}
; CHECK-LABEL: @test40
-; CHECK: = vmux(p0, r3:2, r5:4)
+; CHECK: = vmux(p0,r3:2,r5:4)
define i64 @test40(i32 %Pu, i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.C2.vmux(i32 %Pu, i64 %Rs, i64 %Rt)
@@ -161,7 +161,7 @@ entry:
}
; CHECK-LABEL: @test41
-; CHECK: = any8(vcmpb.eq(r1:0, r3:2))
+; CHECK: = any8(vcmpb.eq(r1:0,r3:2))
define i32 @test41(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %Rs, i64 %Rt)
@@ -169,7 +169,7 @@ entry:
}
; CHECK-LABEL: @test50
-; CHECK: = add(r1:0, r3:2)
+; CHECK: = add(r1:0,r3:2)
define i64 @test50(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.addp(i64 %Rs, i64 %Rt)
@@ -177,7 +177,7 @@ entry:
}
; CHECK-LABEL: @test51
-; CHECK: = add(r1:0, r3:2):sat
+; CHECK: = add(r1:0,r3:2):sat
define i64 @test51(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.addpsat(i64 %Rs, i64 %Rt)
@@ -185,7 +185,7 @@ entry:
}
; CHECK-LABEL: @test52
-; CHECK: = sub(r1:0, r3:2)
+; CHECK: = sub(r1:0,r3:2)
define i64 @test52(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.subp(i64 %Rs, i64 %Rt)
@@ -193,7 +193,7 @@ entry:
}
; CHECK-LABEL: @test53
-; CHECK: = add(r1:0, r3:2):raw:
+; CHECK: = add(r1:0,r3:2):raw:
define i64 @test53(i32 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.addsp(i32 %Rs, i64 %Rt)
@@ -201,7 +201,7 @@ entry:
}
; CHECK-LABEL: @test54
-; CHECK: = and(r1:0, r3:2)
+; CHECK: = and(r1:0,r3:2)
define i64 @test54(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.andp(i64 %Rs, i64 %Rt)
@@ -209,7 +209,7 @@ entry:
}
; CHECK-LABEL: @test55
-; CHECK: = or(r1:0, r3:2)
+; CHECK: = or(r1:0,r3:2)
define i64 @test55(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.orp(i64 %Rs, i64 %Rt)
@@ -217,7 +217,7 @@ entry:
}
; CHECK-LABEL: @test56
-; CHECK: = xor(r1:0, r3:2)
+; CHECK: = xor(r1:0,r3:2)
define i64 @test56(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.xorp(i64 %Rs, i64 %Rt)
@@ -225,7 +225,7 @@ entry:
}
; CHECK-LABEL: @test57
-; CHECK: = and(r1:0, ~r3:2)
+; CHECK: = and(r1:0,~r3:2)
define i64 @test57(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A4.andnp(i64 %Rs, i64 %Rt)
@@ -233,7 +233,7 @@ entry:
}
; CHECK-LABEL: @test58
-; CHECK: = or(r1:0, ~r3:2)
+; CHECK: = or(r1:0,~r3:2)
define i64 @test58(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A4.ornp(i64 %Rs, i64 %Rt)
@@ -241,7 +241,7 @@ entry:
}
; CHECK-LABEL: @test60
-; CHECK: = add(r0.l, r1.l)
+; CHECK: = add(r0.l,r1.l)
define i32 @test60(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %Rs, i32 %Rt)
@@ -249,7 +249,7 @@ entry:
}
; CHECK-LABEL: @test61
-; CHECK: = add(r0.l, r1.h)
+; CHECK: = add(r0.l,r1.h)
define i32 @test61(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %Rs, i32 %Rt)
@@ -257,7 +257,7 @@ entry:
}
; CHECK-LABEL: @test62
-; CHECK: = add(r0.l, r1.l):sat
+; CHECK: = add(r0.l,r1.l):sat
define i32 @test62(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %Rs, i32 %Rt)
@@ -265,7 +265,7 @@ entry:
}
; CHECK-LABEL: @test63
-; CHECK: = add(r0.l, r1.h):sat
+; CHECK: = add(r0.l,r1.h):sat
define i32 @test63(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %Rs, i32 %Rt)
@@ -273,7 +273,7 @@ entry:
}
; CHECK-LABEL: @test64
-; CHECK: = add(r0.l, r1.l):<<16
+; CHECK: = add(r0.l,r1.l):<<16
define i32 @test64(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %Rs, i32 %Rt)
@@ -281,7 +281,7 @@ entry:
}
; CHECK-LABEL: @test65
-; CHECK: = add(r0.l, r1.h):<<16
+; CHECK: = add(r0.l,r1.h):<<16
define i32 @test65(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %Rs, i32 %Rt)
@@ -289,7 +289,7 @@ entry:
}
; CHECK-LABEL: @test66
-; CHECK: = add(r0.h, r1.l):<<16
+; CHECK: = add(r0.h,r1.l):<<16
define i32 @test66(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %Rs, i32 %Rt)
@@ -297,7 +297,7 @@ entry:
}
; CHECK-LABEL: @test67
-; CHECK: = add(r0.h, r1.h):<<16
+; CHECK: = add(r0.h,r1.h):<<16
define i32 @test67(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %Rs, i32 %Rt)
@@ -305,7 +305,7 @@ entry:
}
; CHECK-LABEL: @test68
-; CHECK: = add(r0.l, r1.l):sat:<<16
+; CHECK: = add(r0.l,r1.l):sat:<<16
define i32 @test68(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %Rs, i32 %Rt)
@@ -313,7 +313,7 @@ entry:
}
; CHECK-LABEL: @test69
-; CHECK: = add(r0.l, r1.h):sat:<<16
+; CHECK: = add(r0.l,r1.h):sat:<<16
define i32 @test69(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %Rs, i32 %Rt)
@@ -321,7 +321,7 @@ entry:
}
; CHECK-LABEL: @test6A
-; CHECK: = add(r0.h, r1.l):sat:<<16
+; CHECK: = add(r0.h,r1.l):sat:<<16
define i32 @test6A(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %Rs, i32 %Rt)
@@ -329,7 +329,7 @@ entry:
}
; CHECK-LABEL: @test6B
-; CHECK: = add(r0.h, r1.h):sat:<<16
+; CHECK: = add(r0.h,r1.h):sat:<<16
define i32 @test6B(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %Rs, i32 %Rt)
@@ -337,7 +337,7 @@ entry:
}
; CHECK-LABEL: @test70
-; CHECK: = sub(r0.l, r1.l)
+; CHECK: = sub(r0.l,r1.l)
define i32 @test70(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %Rs, i32 %Rt)
@@ -345,7 +345,7 @@ entry:
}
; CHECK-LABEL: @test71
-; CHECK: = sub(r0.l, r1.h)
+; CHECK: = sub(r0.l,r1.h)
define i32 @test71(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %Rs, i32 %Rt)
@@ -353,7 +353,7 @@ entry:
}
; CHECK-LABEL: @test72
-; CHECK: = sub(r0.l, r1.l):sat
+; CHECK: = sub(r0.l,r1.l):sat
define i32 @test72(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %Rs, i32 %Rt)
@@ -361,7 +361,7 @@ entry:
}
; CHECK-LABEL: @test73
-; CHECK: = sub(r0.l, r1.h):sat
+; CHECK: = sub(r0.l,r1.h):sat
define i32 @test73(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %Rs, i32 %Rt)
@@ -369,7 +369,7 @@ entry:
}
; CHECK-LABEL: @test74
-; CHECK: = sub(r0.l, r1.l):<<16
+; CHECK: = sub(r0.l,r1.l):<<16
define i32 @test74(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %Rs, i32 %Rt)
@@ -377,7 +377,7 @@ entry:
}
; CHECK-LABEL: @test75
-; CHECK: = sub(r0.l, r1.h):<<16
+; CHECK: = sub(r0.l,r1.h):<<16
define i32 @test75(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %Rs, i32 %Rt)
@@ -385,7 +385,7 @@ entry:
}
; CHECK-LABEL: @test76
-; CHECK: = sub(r0.h, r1.l):<<16
+; CHECK: = sub(r0.h,r1.l):<<16
define i32 @test76(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %Rs, i32 %Rt)
@@ -393,7 +393,7 @@ entry:
}
; CHECK-LABEL: @test77
-; CHECK: = sub(r0.h, r1.h):<<16
+; CHECK: = sub(r0.h,r1.h):<<16
define i32 @test77(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %Rs, i32 %Rt)
@@ -401,7 +401,7 @@ entry:
}
; CHECK-LABEL: @test78
-; CHECK: = sub(r0.l, r1.l):sat:<<16
+; CHECK: = sub(r0.l,r1.l):sat:<<16
define i32 @test78(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %Rs, i32 %Rt)
@@ -409,7 +409,7 @@ entry:
}
; CHECK-LABEL: @test79
-; CHECK: = sub(r0.l, r1.h):sat:<<16
+; CHECK: = sub(r0.l,r1.h):sat:<<16
define i32 @test79(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %Rs, i32 %Rt)
@@ -417,7 +417,7 @@ entry:
}
; CHECK-LABEL: @test7A
-; CHECK: = sub(r0.h, r1.l):sat:<<16
+; CHECK: = sub(r0.h,r1.l):sat:<<16
define i32 @test7A(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %Rs, i32 %Rt)
@@ -425,7 +425,7 @@ entry:
}
; CHECK-LABEL: @test7B
-; CHECK: = sub(r0.h, r1.h):sat:<<16
+; CHECK: = sub(r0.h,r1.h):sat:<<16
define i32 @test7B(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %Rs, i32 %Rt)
@@ -433,7 +433,7 @@ entry:
}
; CHECK-LABEL: @test90
-; CHECK: = and(#1, asl(r0, #2))
+; CHECK: = and(#1,asl(r0,#2))
define i32 @test90(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.andi.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -441,7 +441,7 @@ entry:
}
; CHECK-LABEL: @test91
-; CHECK: = or(#1, asl(r0, #2))
+; CHECK: = or(#1,asl(r0,#2))
define i32 @test91(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.ori.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -449,7 +449,7 @@ entry:
}
; CHECK-LABEL: @test92
-; CHECK: = add(#1, asl(r0, #2))
+; CHECK: = add(#1,asl(r0,#2))
define i32 @test92(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.addi.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -457,7 +457,7 @@ entry:
}
; CHECK-LABEL: @test93
-; CHECK: = sub(#1, asl(r0, #2))
+; CHECK: = sub(#1,asl(r0,#2))
define i32 @test93(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.subi.asl.ri(i32 1, i32 %Rs, i32 2)
@@ -465,7 +465,7 @@ entry:
}
; CHECK-LABEL: @test94
-; CHECK: = and(#1, lsr(r0, #2))
+; CHECK: = and(#1,lsr(r0,#2))
define i32 @test94(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -473,7 +473,7 @@ entry:
}
; CHECK-LABEL: @test95
-; CHECK: = or(#1, lsr(r0, #2))
+; CHECK: = or(#1,lsr(r0,#2))
define i32 @test95(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -481,7 +481,7 @@ entry:
}
; CHECK-LABEL: @test96
-; CHECK: = add(#1, lsr(r0, #2))
+; CHECK: = add(#1,lsr(r0,#2))
define i32 @test96(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -489,7 +489,7 @@ entry:
}
; CHECK-LABEL: @test97
-; CHECK: = sub(#1, lsr(r0, #2))
+; CHECK: = sub(#1,lsr(r0,#2))
define i32 @test97(i32 %Rs) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 1, i32 %Rs, i32 2)
@@ -497,7 +497,7 @@ entry:
}
; CHECK-LABEL: @test100
-; CHECK: = bitsplit(r0, r1)
+; CHECK: = bitsplit(r0,r1)
define i64 @test100(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A4.bitsplit(i32 %Rs, i32 %Rt)
@@ -505,7 +505,7 @@ entry:
}
; CHECK-LABEL: @test101
-; CHECK: = modwrap(r0, r1)
+; CHECK: = modwrap(r0,r1)
define i32 @test101(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A4.modwrapu(i32 %Rs, i32 %Rt)
@@ -513,7 +513,7 @@ entry:
}
; CHECK-LABEL: @test102
-; CHECK: = parity(r1:0, r3:2)
+; CHECK: = parity(r1:0,r3:2)
define i32 @test102(i64 %Rs, i64 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S2.parityp(i64 %Rs, i64 %Rt)
@@ -521,7 +521,7 @@ entry:
}
; CHECK-LABEL: @test103
-; CHECK: = parity(r0, r1)
+; CHECK: = parity(r0,r1)
define i32 @test103(i32 %Rs, i32 %Rt) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.S4.parity(i32 %Rs, i32 %Rt)
diff --git a/test/CodeGen/Hexagon/args.ll b/test/CodeGen/Hexagon/args.ll
index 3bfb8b159556..a1c7bc3230dd 100644
--- a/test/CodeGen/Hexagon/args.ll
+++ b/test/CodeGen/Hexagon/args.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r5:4 = combine(#6, #5)
-; CHECK: r3:2 = combine(#4, #3)
-; CHECK: r1:0 = combine(#2, #1)
-; CHECK: memw(r29+#0)=#7
+; CHECK: r5:4 = combine(#6,#5)
+; CHECK: r3:2 = combine(#4,#3)
+; CHECK: r1:0 = combine(#2,#1)
+; CHECK: memw(r29+#0) = #7
define void @foo() nounwind {
diff --git a/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll b/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll
index 561013b174dd..906a877b91e5 100644
--- a/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll
+++ b/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll
@@ -7,7 +7,6 @@
; without adding an extra spill of that register.
;
; CHECK: PredSpill:
-; CHECK: memd(r29{{.*}}) = r17:16
; CHECK-DAG: r{{[0-9]+}} = p0
; CHECK-DAG: p0 = r{{[0-9]+}}
; CHECK-NOT: = memw(r29
diff --git a/test/CodeGen/Hexagon/bit-bitsplit-at.ll b/test/CodeGen/Hexagon/bit-bitsplit-at.ll
new file mode 100644
index 000000000000..87d535fd0f22
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-bitsplit-at.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; This testcase used to crash due to putting the bitsplit instruction in a
+; wrong place.
+; CHECK: bitsplit
+
+target triple = "hexagon"
+
+define hidden fastcc i32 @fred(i32 %a0) unnamed_addr #0 {
+b1:
+ %v2 = lshr i32 %a0, 16
+ %v3 = trunc i32 %v2 to i8
+ br i1 undef, label %b6, label %b4
+
+b4: ; preds = %b1
+ %v5 = and i32 %a0, 65535
+ br i1 undef, label %b8, label %b9
+
+b6: ; preds = %b1
+ %v7 = and i32 %a0, 65535
+ br label %b9
+
+b8: ; preds = %b4
+ store i8 %v3, i8* undef, align 2
+ unreachable
+
+b9: ; preds = %b6, %b4
+ %v10 = phi i32 [ %v7, %b6 ], [ %v5, %b4 ]
+ ret i32 %v10
+}
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/bit-bitsplit-src.ll b/test/CodeGen/Hexagon/bit-bitsplit-src.ll
new file mode 100644
index 000000000000..2d1c71c709f4
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-bitsplit-src.ll
@@ -0,0 +1,35 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; This used to crash. Check for some sane output.
+; CHECK: call printf
+
+target triple = "hexagon"
+
+@g0 = external local_unnamed_addr global [4 x i64], align 8
+@g1 = external hidden unnamed_addr constant [29 x i8], align 1
+@g2 = external hidden unnamed_addr constant [29 x i8], align 1
+
+define void @fred() local_unnamed_addr #0 {
+b0:
+ %v1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g0, i32 0, i32 0), align 8
+ %v2 = trunc i64 %v1 to i32
+ %v3 = lshr i64 %v1, 16
+ %v4 = trunc i64 %v3 to i32
+ %v5 = and i32 %v4, 255
+ %v6 = add nuw nsw i32 0, %v5
+ %v7 = add nuw nsw i32 %v6, 0
+ %v8 = zext i32 %v7 to i64
+ %v9 = and i32 %v2, 65535
+ %v10 = and i32 %v4, 65535
+ %v11 = add nuw nsw i32 %v10, %v9
+ %v12 = zext i32 %v11 to i64
+ tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g1, i32 0, i32 0), i64 %v8) #0
+ tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g2, i32 0, i32 0), i64 %v12) #0
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @printf(i8* nocapture readonly, ...) local_unnamed_addr #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/bit-bitsplit.ll b/test/CodeGen/Hexagon/bit-bitsplit.ll
new file mode 100644
index 000000000000..4ae2e4e66508
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-bitsplit.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: bitsplit(r{{[0-9]+}},#5)
+
+target triple = "hexagon"
+
+define i32 @fred(i32 %a, i32* nocapture readonly %b) local_unnamed_addr #0 {
+entry:
+ %and = and i32 %a, 31
+ %shr = lshr i32 %a, 5
+ %arrayidx = getelementptr inbounds i32, i32* %b, i32 %shr
+ %0 = load i32, i32* %arrayidx, align 4
+ %shr1 = lshr i32 %0, %and
+ %and2 = and i32 %shr1, 1
+ ret i32 %and2
+}
+
+attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double" }
diff --git a/test/CodeGen/Hexagon/bit-eval.ll b/test/CodeGen/Hexagon/bit-eval.ll
index 1d2be5bfc19d..5b0111dfcd10 100644
--- a/test/CodeGen/Hexagon/bit-eval.ll
+++ b/test/CodeGen/Hexagon/bit-eval.ll
@@ -20,7 +20,7 @@ entry:
}
; CHECK-LABEL: test3:
-; CHECK: r1:0 = combine(#0, #1)
+; CHECK: r1:0 = combine(#0,#1)
define i64 @test3() #0 {
entry:
%0 = tail call i64 @llvm.hexagon.S4.extractp(i64 -1, i32 63, i32 63)
diff --git a/test/CodeGen/Hexagon/bit-ext-sat.ll b/test/CodeGen/Hexagon/bit-ext-sat.ll
new file mode 100644
index 000000000000..47c49c2364b7
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-ext-sat.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK-LABEL: xh_sh
+; CHECK: sath
+; CHECK-NOT: sxth
+define i32 @xh_sh(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = tail call i32 @llvm.hexagon.A2.sath(i32 %x)
+ %1 = tail call i32 @llvm.hexagon.A2.sxth(i32 %0)
+ ret i32 %1
+}
+
+; CHECK-LABEL: xb_sb
+; CHECK: satb
+; CHECK-NOT: sxtb
+define i32 @xb_sb(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = tail call i32 @llvm.hexagon.A2.satb(i32 %x)
+ %1 = tail call i32 @llvm.hexagon.A2.sxtb(i32 %0)
+ ret i32 %1
+}
+
+; CHECK-LABEL: xuh_suh
+; CHECK: satuh
+; CHECK-NOT: zxth
+define i32 @xuh_suh(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = tail call i32 @llvm.hexagon.A2.satuh(i32 %x)
+ %1 = tail call i32 @llvm.hexagon.A2.zxth(i32 %0)
+ ret i32 %1
+}
+
+; CHECK-LABEL: xub_sub
+; CHECK: satub
+; CHECK-NOT: zxtb
+define i32 @xub_sub(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = tail call i32 @llvm.hexagon.A2.satub(i32 %x)
+ %1 = tail call i32 @llvm.hexagon.A2.zxtb(i32 %0)
+ ret i32 %1
+}
+
+
+declare i32 @llvm.hexagon.A2.sxtb(i32) #1
+declare i32 @llvm.hexagon.A2.sxth(i32) #1
+declare i32 @llvm.hexagon.A2.zxtb(i32) #1
+declare i32 @llvm.hexagon.A2.zxth(i32) #1
+
+declare i32 @llvm.hexagon.A2.satb(i32) #1
+declare i32 @llvm.hexagon.A2.sath(i32) #1
+declare i32 @llvm.hexagon.A2.satub(i32) #1
+declare i32 @llvm.hexagon.A2.satuh(i32) #1
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/Hexagon/bit-extract-off.ll b/test/CodeGen/Hexagon/bit-extract-off.ll
new file mode 100644
index 000000000000..183435ab7b23
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-extract-off.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: extractu(r1,#31,#0)
+
+; In the IR this was an extract of 31 bits starting at position 32 in r1:0.
+; When mapping it to an extract from r1, the offset was not reset to 0, and
+; we had "extractu(r1,#31,#32)".
+
+target triple = "hexagon"
+
+define hidden i32 @fred([101 x double]* %a0, i32 %a1, i32* %a2, i32* %a3) #0 {
+b4:
+ br label %b5
+
+b5: ; preds = %b5, %b4
+ %v6 = call double @fabs(double undef) #1
+ store double %v6, double* undef, align 8
+ br label %b5
+}
+
+declare double @fabs(double) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/bit-extract.ll b/test/CodeGen/Hexagon/bit-extract.ll
new file mode 100644
index 000000000000..ad7d05d2c235
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-extract.ll
@@ -0,0 +1,75 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK-LABEL: ua
+; CHECK: extractu(r0,#26,#0)
+define i32 @ua(i32 %x) local_unnamed_addr #0 {
+entry:
+ %shl = and i32 %x, 67108863
+ ret i32 %shl
+}
+
+; CHECK-LABEL: ub
+; CHECK: extractu(r0,#16,#4)
+define i32 @ub(i32 %x) local_unnamed_addr #0 {
+entry:
+ %0 = lshr i32 %x, 4
+ %shr = and i32 %0, 65535
+ ret i32 %shr
+}
+
+; CHECK-LABEL: uc
+; CHECK: extractu(r0,#24,#0)
+define i32 @uc(i32 %x) local_unnamed_addr #0 {
+entry:
+ %shl = and i32 %x, 16777215
+ ret i32 %shl
+}
+
+; CHECK-LABEL: ud
+; CHECK: extractu(r0,#16,#8)
+define i32 @ud(i32 %x) local_unnamed_addr #0 {
+entry:
+ %bf.lshr = lshr i32 %x, 8
+ %bf.clear = and i32 %bf.lshr, 65535
+ ret i32 %bf.clear
+}
+
+; CHECK-LABEL: sa
+; CHECK: extract(r0,#26,#0)
+define i32 @sa(i32 %x) local_unnamed_addr #0 {
+entry:
+ %shl = shl i32 %x, 6
+ %shr = ashr exact i32 %shl, 6
+ ret i32 %shr
+}
+
+; CHECK-LABEL: sb
+; CHECK: extract(r0,#16,#4)
+define i32 @sb(i32 %x) local_unnamed_addr #0 {
+entry:
+ %shl = shl i32 %x, 12
+ %shr = ashr i32 %shl, 16
+ ret i32 %shr
+}
+
+; CHECK-LABEL: sc
+; CHECK: extract(r0,#24,#0)
+define i32 @sc(i32 %x) local_unnamed_addr #0 {
+entry:
+ %shl = shl i32 %x, 8
+ %shr = ashr exact i32 %shl, 8
+ ret i32 %shr
+}
+
+; CHECK-LABEL: sd
+; CHECK: extract(r0,#16,#8)
+define i32 @sd(i32 %x) local_unnamed_addr #0 {
+entry:
+ %bf.shl = shl i32 %x, 8
+ %bf.ashr = ashr i32 %bf.shl, 16
+ ret i32 %bf.ashr
+}
+
+attributes #0 = { noinline norecurse nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/bit-has.ll b/test/CodeGen/Hexagon/bit-has.ll
new file mode 100644
index 000000000000..9022de391868
--- /dev/null
+++ b/test/CodeGen/Hexagon/bit-has.ll
@@ -0,0 +1,64 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; This used to crash. Check for some sane output.
+; CHECK: sath
+
+target triple = "hexagon"
+
+define void @fred() local_unnamed_addr #0 {
+b0:
+ %v1 = load i32, i32* undef, align 4
+ %v2 = tail call i32 @llvm.hexagon.A2.sath(i32 undef)
+ %v3 = and i32 %v1, 603979776
+ %v4 = trunc i32 %v3 to i30
+ switch i30 %v4, label %b22 [
+ i30 -536870912, label %b5
+ i30 -469762048, label %b6
+ ]
+
+b5: ; preds = %b0
+ unreachable
+
+b6: ; preds = %b0
+ %v7 = load i32, i32* undef, align 4
+ %v8 = sub nsw i32 65536, %v7
+ %v9 = load i32, i32* undef, align 4
+ %v10 = mul nsw i32 %v9, %v9
+ %v11 = zext i32 %v10 to i64
+ %v12 = mul nsw i32 %v2, %v8
+ %v13 = sext i32 %v12 to i64
+ %v14 = mul nsw i64 %v13, %v11
+ %v15 = trunc i64 %v14 to i32
+ %v16 = and i32 %v15, 2147483647
+ store i32 %v16, i32* undef, align 4
+ %v17 = lshr i64 %v14, 31
+ %v18 = trunc i64 %v17 to i32
+ store i32 %v18, i32* undef, align 4
+ br label %b19
+
+b19: ; preds = %b6
+ br i1 undef, label %b20, label %b21
+
+b20: ; preds = %b19
+ unreachable
+
+b21: ; preds = %b19
+ br label %b23
+
+b22: ; preds = %b0
+ unreachable
+
+b23: ; preds = %b21
+ %v24 = load i32, i32* undef, align 4
+ %v25 = shl i32 %v24, 1
+ %v26 = and i32 %v25, 65534
+ %v27 = or i32 %v26, 0
+ store i32 %v27, i32* undef, align 4
+ ret void
+}
+
+declare i32 @llvm.hexagon.A2.sath(i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/Hexagon/bit-phi.ll b/test/CodeGen/Hexagon/bit-phi.ll
index 86b18d8bf256..7abfba079bb0 100644
--- a/test/CodeGen/Hexagon/bit-phi.ll
+++ b/test/CodeGen/Hexagon/bit-phi.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=hexagon < %s
+; RUN: llc -march=hexagon -disable-hcp < %s
; REQUIRES: asserts
target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
diff --git a/test/CodeGen/Hexagon/bit-rie.ll b/test/CodeGen/Hexagon/bit-rie.ll
index 6bd0558f580c..302382a1ade4 100644
--- a/test/CodeGen/Hexagon/bit-rie.ll
+++ b/test/CodeGen/Hexagon/bit-rie.ll
@@ -187,8 +187,8 @@ declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2
declare i32 @llvm.hexagon.S2.clbnorm(i32) #2
declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) #2
declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
diff --git a/test/CodeGen/Hexagon/bit-skip-byval.ll b/test/CodeGen/Hexagon/bit-skip-byval.ll
index d6c1aad94007..9ee4014ae346 100644
--- a/test/CodeGen/Hexagon/bit-skip-byval.ll
+++ b/test/CodeGen/Hexagon/bit-skip-byval.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
;
; Either and or zxtb.
-; CHECK: r0 = and(r1, #255)
+; CHECK: r0 = and(r1,#255)
%struct.t0 = type { i32 }
diff --git a/test/CodeGen/Hexagon/bit-validate-reg.ll b/test/CodeGen/Hexagon/bit-validate-reg.ll
index 16d4a5e4484d..42eed97786cd 100644
--- a/test/CodeGen/Hexagon/bit-validate-reg.ll
+++ b/test/CodeGen/Hexagon/bit-validate-reg.ll
@@ -1,10 +1,13 @@
-; RUN: llc -march=hexagon < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexbit-extract=0 < %s | FileCheck %s
; Make sure we don't generate zxtb to transfer a predicate register into
; a general purpose register.
; CHECK: r0 = p0
; CHECK-NOT: zxtb(p
+; CHECK-NOT: and(p
+; CHECK-NOT: extract(p
+; CHECK-NOT: extractu(p
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/bitmanip.ll b/test/CodeGen/Hexagon/bitmanip.ll
new file mode 100644
index 000000000000..2044a2fdd083
--- /dev/null
+++ b/test/CodeGen/Hexagon/bitmanip.ll
@@ -0,0 +1,135 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: popcount_16
+; CHECK: zxth
+; CHECK: popcount
+define i16 @popcount_16(i16 %p) #0 {
+ %t = call i16 @llvm.ctpop.i16(i16 %p) #0
+ ret i16 %t
+}
+
+; CHECK-LABEL: popcount_32
+; CHECK: popcount
+define i32 @popcount_32(i32 %p) #0 {
+ %t = call i32 @llvm.ctpop.i32(i32 %p) #0
+ ret i32 %t
+}
+
+; CHECK-LABEL: popcount_64
+; CHECK: popcount
+define i64 @popcount_64(i64 %p) #0 {
+ %t = call i64 @llvm.ctpop.i64(i64 %p) #0
+ ret i64 %t
+}
+
+; CHECK-LABEL: ctlz_16
+; CHECK: [[REG0:r[0-9]+]] = zxth
+; CHECK: [[REG1:r[0-9]+]] = cl0([[REG0]])
+; CHECK: add([[REG1]],#-16)
+define i16 @ctlz_16(i16 %p) #0 {
+ %t = call i16 @llvm.ctlz.i16(i16 %p, i1 true) #0
+ ret i16 %t
+}
+
+; CHECK-LABEL: ctlz_32
+; CHECK: cl0
+define i32 @ctlz_32(i32 %p) #0 {
+ %t = call i32 @llvm.ctlz.i32(i32 %p, i1 true) #0
+ ret i32 %t
+}
+
+; CHECK-LABEL: ctlz_64
+; CHECK: cl0
+define i64 @ctlz_64(i64 %p) #0 {
+ %t = call i64 @llvm.ctlz.i64(i64 %p, i1 true) #0
+ ret i64 %t
+}
+
+; CHECK-LABEL: cttz_16
+; CHECK: ct0
+define i16 @cttz_16(i16 %p) #0 {
+ %t = call i16 @llvm.cttz.i16(i16 %p, i1 true) #0
+ ret i16 %t
+}
+
+; CHECK-LABEL: cttz_32
+; CHECK: ct0
+define i32 @cttz_32(i32 %p) #0 {
+ %t = call i32 @llvm.cttz.i32(i32 %p, i1 true) #0
+ ret i32 %t
+}
+
+; CHECK-LABEL: cttz_64
+; CHECK: ct0
+define i64 @cttz_64(i64 %p) #0 {
+ %t = call i64 @llvm.cttz.i64(i64 %p, i1 true) #0
+ ret i64 %t
+}
+
+; CHECK-LABEL: brev_16
+; CHECK: [[REG:r[0-9]+]] = brev
+; CHECK: lsr([[REG]],#16)
+define i16 @brev_16(i16 %p) #0 {
+ %t = call i16 @llvm.bitreverse.i16(i16 %p) #0
+ ret i16 %t
+}
+
+; CHECK-LABEL: brev_32
+; CHECK: brev
+define i32 @brev_32(i32 %p) #0 {
+ %t = call i32 @llvm.bitreverse.i32(i32 %p) #0
+ ret i32 %t
+}
+
+; CHECK-LABEL: brev_64
+; CHECK: brev
+define i64 @brev_64(i64 %p) #0 {
+ %t = call i64 @llvm.bitreverse.i64(i64 %p) #0
+ ret i64 %t
+}
+
+; CHECK-LABEL: bswap_16
+; CHECK: [[REG:r[0-9]+]] = swiz
+; CHECK: lsr([[REG]],#16)
+define i16 @bswap_16(i16 %p) #0 {
+ %t = call i16 @llvm.bswap.i16(i16 %p) #0
+ ret i16 %t
+}
+
+; CHECK-LABEL: bswap_32
+; CHECK: swiz
+define i32 @bswap_32(i32 %p) #0 {
+ %t = call i32 @llvm.bswap.i32(i32 %p) #0
+ ret i32 %t
+}
+
+; CHECK-LABEL: bswap_64
+; CHECK: swiz
+; CHECK: swiz
+; CHECK: combine
+define i64 @bswap_64(i64 %p) #0 {
+ %t = call i64 @llvm.bswap.i64(i64 %p) #0
+ ret i64 %t
+}
+
+declare i16 @llvm.ctpop.i16(i16) #0
+declare i32 @llvm.ctpop.i32(i32) #0
+declare i64 @llvm.ctpop.i64(i64) #0
+
+declare i16 @llvm.ctlz.i16(i16, i1) #0
+declare i32 @llvm.ctlz.i32(i32, i1) #0
+declare i64 @llvm.ctlz.i64(i64, i1) #0
+
+declare i16 @llvm.cttz.i16(i16, i1) #0
+declare i32 @llvm.cttz.i32(i32, i1) #0
+declare i64 @llvm.cttz.i64(i64, i1) #0
+
+declare i16 @llvm.bitreverse.i16(i16) #0
+declare i32 @llvm.bitreverse.i32(i32) #0
+declare i64 @llvm.bitreverse.i64(i64) #0
+
+declare i16 @llvm.bswap.i16(i16) #0
+declare i32 @llvm.bswap.i32(i32) #0
+declare i64 @llvm.bswap.i64(i64) #0
+
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/Hexagon/block-addr.ll b/test/CodeGen/Hexagon/block-addr.ll
index c0db2cef545e..5af3a69f8aab 100644
--- a/test/CodeGen/Hexagon/block-addr.ll
+++ b/test/CodeGen/Hexagon/block-addr.ll
@@ -2,7 +2,7 @@
; CHECK: .LJTI
; CHECK-DAG: r[[REG:[0-9]+]] = memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+<<#[0-9]+}})
-; CHECK-DAG: jumpr:nt r[[REG]]
+; CHECK-DAG: jumpr r[[REG]]
define void @main() #0 {
entry:
diff --git a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
index a56680bd4399..e09f79866215 100644
--- a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
+++ b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
@@ -3,7 +3,7 @@
; Check that the testcase compiles successfully. Expect that if-conversion
; took place.
; CHECK-LABEL: fred:
-; CHECK: if (!p0) r1 = memw(r0 + #0)
+; CHECK: if (!p0) r1 = memw(r0+#0)
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/brev_ld.ll b/test/CodeGen/Hexagon/brev_ld.ll
index a2914296ec41..861da32b981b 100644
--- a/test/CodeGen/Hexagon/brev_ld.ll
+++ b/test/CodeGen/Hexagon/brev_ld.ll
@@ -29,7 +29,7 @@ entry:
%1 = bitcast i64* %inputLR to i8*
%sub = sub i32 13, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
%2 = call i8* @llvm.hexagon.brev.ldd(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i64*
%4 = load i64, i64* %3, align 8, !tbaa !0
@@ -49,7 +49,7 @@ entry:
%1 = bitcast i32* %inputLR to i8*
%sub = sub i32 14, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
%2 = call i8* @llvm.hexagon.brev.ldw(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i32*
%4 = load i32, i32* %3, align 4, !tbaa !2
@@ -69,7 +69,7 @@ entry:
%1 = bitcast i16* %inputLR to i8*
%sub = sub i32 15, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memh(r{{[0-9]*}} ++ m0:brev)
+; CHECK: = memh(r{{[0-9]*}}++m0:brev)
%2 = call i8* @llvm.hexagon.brev.ldh(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !3
@@ -89,7 +89,7 @@ entry:
%1 = bitcast i16* %inputLR to i8*
%sub = sub i32 15, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memuh(r{{[0-9]*}} ++ m0:brev)
+; CHECK: = memuh(r{{[0-9]*}}++m0:brev)
%2 = call i8* @llvm.hexagon.brev.lduh(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !3
@@ -108,7 +108,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memub(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memub(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = call i8* @llvm.hexagon.brev.ldub(i8* %0, i8* %inputLR, i32 %shl)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
@@ -126,7 +126,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr1
%shl = shl i32 1, %sub
-; CHECK: = memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: = memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = call i8* @llvm.hexagon.brev.ldb(i8* %0, i8* %inputLR, i32 %shl)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
diff --git a/test/CodeGen/Hexagon/brev_st.ll b/test/CodeGen/Hexagon/brev_st.ll
index 6c55681a683b..cee5f52e3e40 100644
--- a/test/CodeGen/Hexagon/brev_st.ll
+++ b/test/CodeGen/Hexagon/brev_st.ll
@@ -26,7 +26,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 13, %shr2
%shl = shl i32 1, %sub
-; CHECK: memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.std(i8* %0, i64 undef, i32 %shl)
ret i64 0
}
@@ -42,7 +42,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 14, %shr1
%shl = shl i32 1, %sub
-; CHECK: memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.stw(i8* %0, i32 undef, i32 %shl)
ret i32 0
}
@@ -58,7 +58,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 15, %shr2
%shl = shl i32 1, %sub
-; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.sth(i8* %0, i32 0, i32 %shl)
ret i16 0
}
@@ -74,7 +74,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%sub = sub i32 15, %shr2
%shl = shl i32 1, %sub
-; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev){{ *}}={{ *}}r{{[0-9]*}}.h
+; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) = r{{[0-9]*}}.h
%1 = tail call i8* @llvm.hexagon.brev.sthhi(i8* %0, i32 0, i32 %shl)
ret i16 0
}
@@ -89,7 +89,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr2
- ; CHECK: memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev)
+ ; CHECK: memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
%shl = shl i32 1, %sub
%1 = tail call i8* @llvm.hexagon.brev.stb(i8* %0, i32 0, i32 %shl)
ret i8 0
diff --git a/test/CodeGen/Hexagon/builtin-expect.ll b/test/CodeGen/Hexagon/builtin-expect.ll
new file mode 100644
index 000000000000..9945da1782b2
--- /dev/null
+++ b/test/CodeGen/Hexagon/builtin-expect.ll
@@ -0,0 +1,44 @@
+; RUN: llc -march=hexagon -disable-block-placement < %s | FileCheck %s
+
+; Check that the branch to the block b10 is marked as taken (i.e. ":t").
+; CHECK-LABEL: foo
+; CHECK: if ({{.*}}) jump:t .LBB0_[[LAB:[0-9]+]]
+; CHECK: [[LAB]]:
+; CHECK: add({{.*}},#65)
+
+target triple = "hexagon"
+
+define i32 @foo(i32 %a0) local_unnamed_addr #0 {
+b1:
+ %v2 = icmp eq i32 %a0, 0
+ br i1 %v2, label %b3, label %b10, !prof !0
+
+b3: ; preds = %b1
+ br label %b4
+
+b4: ; preds = %b4, %b3
+ %v5 = phi i32 [ %v6, %b4 ], [ 0, %b3 ]
+ %v6 = add nuw nsw i32 %v5, 1
+ %v7 = mul nuw nsw i32 %v5, 67
+ %v8 = tail call i32 @bar(i32 %v7) #0
+ %v9 = icmp eq i32 %v6, 10
+ br i1 %v9, label %b13, label %b4
+
+b10: ; preds = %b1
+ %v11 = add nsw i32 %a0, 65
+ %v12 = tail call i32 @bar(i32 %v11) #0
+ br label %b14
+
+b13: ; preds = %b4
+ br label %b14
+
+b14: ; preds = %b13, %b10
+ %v15 = phi i32 [ %v12, %b10 ], [ 0, %b13 ]
+ ret i32 %v15
+}
+
+declare i32 @bar(i32) local_unnamed_addr #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double,-long-calls" }
+
+!0 = !{!"branch_weights", i32 1, i32 2000}
diff --git a/test/CodeGen/Hexagon/cext-valid-packet1.ll b/test/CodeGen/Hexagon/cext-valid-packet1.ll
index 36abc59f5e3e..b0aa3c16f862 100644
--- a/test/CodeGen/Hexagon/cext-valid-packet1.ll
+++ b/test/CodeGen/Hexagon/cext-valid-packet1.ll
@@ -3,8 +3,8 @@
; Check that the packetizer generates valid packets with constant
; extended instructions.
; CHECK: {
-; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}})
-; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}})
+; CHECK-NEXT: r{{[0-9]+}} = add(r{{[0-9]+}},##{{[0-9]+}})
+; CHECK-NEXT: r{{[0-9]+}} = add(r{{[0-9]+}},##{{[0-9]+}})
; CHECK-NEXT: }
define i32 @check-packet1(i32 %a, i32 %b, i32 %c) nounwind readnone {
diff --git a/test/CodeGen/Hexagon/circ_ld.ll b/test/CodeGen/Hexagon/circ_ld.ll
index ffa5f2cd2220..a9b367e9c4ee 100644
--- a/test/CodeGen/Hexagon/circ_ld.ll
+++ b/test/CodeGen/Hexagon/circ_ld.ll
@@ -26,7 +26,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr1, 33554432
-; CHECK: = memb(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}}))
+; CHECK: = memb(r{{[0-9]*}}++#-1:circ(m{{[0-1]}}))
%1 = call i8* @llvm.hexagon.circ.ldb(i8* %0, i8* %inputLR, i32 %or, i32 -1)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
@@ -45,7 +45,7 @@ entry:
%1 = bitcast i64* %inputLR to i8*
%shl = shl nuw nsw i32 %shr1, 3
%or = or i32 %shl, 83886080
-; CHECK: = memd(r{{[0-9]*.}}++{{.}}#-8:circ(m{{[0-1]}}))
+; CHECK: = memd(r{{[0-9]*}}++#-8:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.ldd(i8* %0, i8* %1, i32 %or, i32 -8)
%3 = bitcast i8* %1 to i64*
%4 = load i64, i64* %3, align 8, !tbaa !0
@@ -64,7 +64,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i16* %inputLR to i8*
%or = or i32 %shr1, 50331648
-; CHECK: = memh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}}))
+; CHECK: = memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.ldh(i8* %0, i8* %1, i32 %or, i32 -2)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !2
@@ -82,7 +82,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr1, 33554432
-; CHECK: = memub(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}}))
+; CHECK: = memub(r{{[0-9]*}}++#-1:circ(m{{[0-1]}}))
%1 = call i8* @llvm.hexagon.circ.ldub(i8* %0, i8* %inputLR, i32 %or, i32 -1)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
@@ -100,7 +100,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i16* %inputLR to i8*
%or = or i32 %shr1, 50331648
-; CHECK: = memuh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}}))
+; CHECK: = memuh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.lduh(i8* %0, i8* %1, i32 %or, i32 -2)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !2
@@ -120,7 +120,7 @@ entry:
%1 = bitcast i32* %inputLR to i8*
%shl = shl nuw nsw i32 %shr1, 2
%or = or i32 %shl, 67108864
-; CHECK: = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m{{[0-1]}}))
+; CHECK: = memw(r{{[0-9]*}}++#-4:circ(m{{[0-1]}}))
%2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 %or, i32 -4)
%3 = bitcast i8* %1 to i32*
%4 = load i32, i32* %3, align 4, !tbaa !3
diff --git a/test/CodeGen/Hexagon/circ_ldw.ll b/test/CodeGen/Hexagon/circ_ldw.ll
index 4511a9cf69da..abfb0886c686 100644
--- a/test/CodeGen/Hexagon/circ_ldw.ll
+++ b/test/CodeGen/Hexagon/circ_ldw.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
-; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m0))
+; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*}}++#-4:circ(m0))
%union.vect64 = type { i64 }
diff --git a/test/CodeGen/Hexagon/circ_st.ll b/test/CodeGen/Hexagon/circ_st.ll
index 4b54afbc611d..c8fa256ad48a 100644
--- a/test/CodeGen/Hexagon/circ_st.ll
+++ b/test/CodeGen/Hexagon/circ_st.ll
@@ -23,7 +23,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr2, 33554432
-; CHECK: memb(r{{[0-9]*}}{{.}}++{{.}}#-1:circ(m{{[0-1]}}))
+; CHECK: memb(r{{[0-9]*}}++#-1:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.stb(i8* %0, i32 0, i32 %or, i32 -1)
ret i8 0
}
@@ -39,7 +39,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%shl = shl nuw nsw i32 %shr1, 3
%or = or i32 %shl, 83886080
-; CHECK: memd(r{{[0-9]*}}{{.}}++{{.}}#-8:circ(m{{[0-1]}}))
+; CHECK: memd(r{{[0-9]*}}++#-8:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.std(i8* %0, i64 undef, i32 %or, i32 -8)
ret i64 0
}
@@ -54,7 +54,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr2, 50331648
-; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}}))
+; CHECK: memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.sth(i8* %0, i32 0, i32 %or, i32 -2)
ret i16 0
}
@@ -69,7 +69,7 @@ entry:
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%or = or i32 %shr2, 50331648
-; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})){{ *}}={{ *}}r{{[0-9]*}}.h
+; CHECK: memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}})) = r{{[0-9]*}}.h
%1 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %0, i32 0, i32 %or, i32 -2)
ret i16 0
}
@@ -85,7 +85,7 @@ entry:
%0 = bitcast i16* %arrayidx to i8*
%shl = shl nuw nsw i32 %shr1, 2
%or = or i32 %shl, 67108864
-; CHECK: memw(r{{[0-9]*}}{{.}}++{{.}}#-4:circ(m{{[0-1]}}))
+; CHECK: memw(r{{[0-9]*}}++#-4:circ(m{{[0-1]}}))
%1 = tail call i8* @llvm.hexagon.circ.stw(i8* %0, i32 undef, i32 %or, i32 -4)
ret i32 0
}
diff --git a/test/CodeGen/Hexagon/clr_set_toggle.ll b/test/CodeGen/Hexagon/clr_set_toggle.ll
index 19e3ed0cf897..4e9838316522 100644
--- a/test/CodeGen/Hexagon/clr_set_toggle.ll
+++ b/test/CodeGen/Hexagon/clr_set_toggle.ll
@@ -4,7 +4,7 @@
define i32 @my_clrbit(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -15,7 +15,7 @@ entry:
define i64 @my_clrbit2(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit2
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -26,7 +26,7 @@ entry:
define i64 @my_clrbit3(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit3
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -37,7 +37,7 @@ entry:
define i32 @my_clrbit4(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit4
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -48,7 +48,7 @@ entry:
define i64 @my_clrbit5(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit5
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -59,7 +59,7 @@ entry:
define i64 @my_clrbit6(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_clrbit6
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #27)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#27)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -70,7 +70,7 @@ entry:
define zeroext i16 @my_setbit(i16 zeroext %crc) nounwind {
entry:
; CHECK-LABEL: my_setbit
-; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}){{ *}}={{ *}}setbit(#15)
+; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}) = setbit(#15)
%crc.addr = alloca i16, align 2
store i16 %crc, i16* %crc.addr, align 2
%0 = load i16, i16* %crc.addr, align 2
@@ -85,7 +85,7 @@ entry:
define i32 @my_setbit2(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit2
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -96,7 +96,7 @@ entry:
define i64 @my_setbit3(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit3
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -107,7 +107,7 @@ entry:
define i32 @my_setbit4(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit4
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#31)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -118,7 +118,7 @@ entry:
define i64 @my_setbit5(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_setbit5
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #13)
+; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#13)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -129,7 +129,7 @@ entry:
define zeroext i16 @my_togglebit(i16 zeroext %crc) nounwind {
entry:
; CHECK-LABEL: my_togglebit
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
%crc.addr = alloca i16, align 2
store i16 %crc, i16* %crc.addr, align 2
%0 = load i16, i16* %crc.addr, align 2
@@ -144,7 +144,7 @@ entry:
define i32 @my_togglebit2(i32 %x) nounwind {
entry:
; CHECK-LABEL: my_togglebit2
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
%0 = load i32, i32* %x.addr, align 4
@@ -155,7 +155,7 @@ entry:
define i64 @my_togglebit3(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_togglebit3
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
@@ -166,7 +166,7 @@ entry:
define i64 @my_togglebit4(i64 %x) nounwind {
entry:
; CHECK-LABEL: my_togglebit4
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #20)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#20)
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%0 = load i64, i64* %x.addr, align 8
diff --git a/test/CodeGen/Hexagon/cmp.ll b/test/CodeGen/Hexagon/cmp.ll
index c274a787249a..a0bb90de1c27 100644
--- a/test/CodeGen/Hexagon/cmp.ll
+++ b/test/CodeGen/Hexagon/cmp.ll
@@ -9,7 +9,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpeq(i32 %0, i32 1)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpeq(i32, i32) #1
@@ -23,7 +23,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgt(i32 %0, i32 2)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgt(i32, i32) #1
@@ -37,7 +37,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgtu(i32 %0, i32 3)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgtu(i32, i32) #1
@@ -51,7 +51,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmplt(i32 %0, i32 4)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmplt(i32, i32) #1
@@ -65,7 +65,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpltu(i32 %0, i32 5)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},r{{[0-9]}})
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpltu(i32, i32) #1
@@ -79,7 +79,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpeqi(i32 %0, i32 10)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, {{.*}}#10)
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},#10)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpeqi(i32, i32) #1
@@ -93,7 +93,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgti(i32 %0, i32 20)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#20)
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},#20)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1
@@ -107,7 +107,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgtui(i32 %0, i32 40)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#40)
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},#40)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgtui(i32, i32) #1
@@ -121,7 +121,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgei(i32 %0, i32 3)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#2)
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},#2)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgei(i32, i32) #1
@@ -135,7 +135,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 3)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#2)
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},#2)
; Function Attrs: nounwind readnone
declare i32 @llvm.hexagon.C2.cmpgeui(i32, i32) #1
@@ -149,7 +149,7 @@ entry:
%1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 0)
ret i32 %1
}
-; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}})
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},r{{[0-9]}})
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/combine.ll b/test/CodeGen/Hexagon/combine.ll
index 04a080fdf425..5b71b3665667 100644
--- a/test/CodeGen/Hexagon/combine.ll
+++ b/test/CodeGen/Hexagon/combine.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr -hexagon-bit=0 < %s | FileCheck %s
-; CHECK: combine(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}})
@j = external global i32
@k = external global i64
diff --git a/test/CodeGen/Hexagon/compound.ll b/test/CodeGen/Hexagon/compound.ll
index f8d36b8b77d9..a3bd52f97194 100644
--- a/test/CodeGen/Hexagon/compound.ll
+++ b/test/CodeGen/Hexagon/compound.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
+; RUN: llc -march=hexagon -filetype=obj -ifcvt-limit=0 -o - %s | llvm-objdump -d - | FileCheck %s
; CHECK: p0 = cmp.gt(r0,#-1); if (!p0.new) jump:nt
@@ -14,4 +14,4 @@ ret void
y:
call void @b()
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Hexagon/constp-combine-neg.ll b/test/CodeGen/Hexagon/constp-combine-neg.ll
index 18f0e81076af..089d9f6a9984 100644
--- a/test/CodeGen/Hexagon/constp-combine-neg.ll
+++ b/test/CodeGen/Hexagon/constp-combine-neg.ll
@@ -19,9 +19,9 @@ entry:
; The instructions seem to be in a different order in the .s file than
; the corresponding values in the .ll file, so just run the test three
; times and each time test for a different instruction.
-; CHECK-TEST1: combine(#-2, #3)
-; CHECK-TEST2: combine(#6, #-4)
-; CHECK-TEST3: combine(#-10, #-8)
+; CHECK-TEST1: combine(#-2,#3)
+; CHECK-TEST2: combine(#6,#-4)
+; CHECK-TEST3: combine(#-10,#-8)
attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/convert-to-dot-old.ll b/test/CodeGen/Hexagon/convert-to-dot-old.ll
new file mode 100644
index 000000000000..b793fa0c22cd
--- /dev/null
+++ b/test/CodeGen/Hexagon/convert-to-dot-old.ll
@@ -0,0 +1,110 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv55 -filetype=obj -o /dev/null
+; REQUIRES: asserts
+; There should be no output (nothing on stderr).
+
+; Due to a bug in converting a dot-new branch into a dot-old one, opcodes
+; with branch prediction bits were selected even if the architecture did
+; not support them. On V55-, the dot-old branch opcodes are J2_jumpt and
+; J2_jumpf (and a pair of J2_jumpr*), whereas J2_jumptpt could have been
+; a result of the conversion to dot-old. This would fail a verification
+; check in the MC code emitter, so make sure it does not happen.
+
+target triple = "hexagon"
+
+define void @fred(i16* nocapture %a0, i16* nocapture %a1, i16* nocapture %a2, i16 signext %a3, i16* %a4, i16 signext %a5, i16 signext %a6, i16 signext %a7, i32 %a8, i16 signext %a9, i16 signext %a10) local_unnamed_addr #0 {
+b11:
+ %v12 = sext i16 %a5 to i32
+ %v13 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v12)
+ %v14 = tail call i32 @llvm.hexagon.A2.sxth(i32 2)
+ %v15 = tail call i32 @llvm.hexagon.A2.sxth(i32 undef)
+ %v16 = tail call i32 @llvm.hexagon.A2.sath(i32 undef)
+ %v17 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v16)
+ %v18 = tail call i32 @llvm.hexagon.A2.aslh(i32 undef)
+ %v19 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v18, i32 %v14)
+ %v20 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v19)
+ %v21 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v20)
+ %v22 = tail call i32 @llvm.hexagon.A2.sub(i32 %v17, i32 %v21)
+ %v23 = tail call i32 @llvm.hexagon.A2.sath(i32 %v22)
+ %v24 = select i1 undef, i32 undef, i32 %v23
+ %v25 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v24)
+ %v26 = tail call i32 @llvm.hexagon.A2.sub(i32 %v13, i32 %v25)
+ %v27 = tail call i32 @llvm.hexagon.A2.sath(i32 %v26)
+ %v28 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v27)
+ %v29 = tail call i32 @llvm.hexagon.A2.sub(i32 %v28, i32 %v14)
+ %v30 = tail call i32 @llvm.hexagon.A2.sath(i32 %v29)
+ %v31 = shl i32 %v30, 16
+ %v32 = icmp sgt i32 undef, %v31
+ %v33 = select i1 %v32, i32 %v30, i32 undef
+ %v34 = trunc i32 %v33 to i16
+ %v35 = trunc i32 %v24 to i16
+ call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext %v35, i16 signext %v34, i16 signext 2) #4
+ %v36 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v18, i32 undef)
+ %v37 = call i32 @llvm.hexagon.A2.asrh(i32 %v36)
+ %v38 = call i32 @llvm.hexagon.A2.sub(i32 %v13, i32 undef)
+ %v39 = call i32 @llvm.hexagon.A2.sath(i32 %v38)
+ %v40 = call i32 @llvm.hexagon.A2.sxth(i32 %v39)
+ %v41 = call i32 @llvm.hexagon.A2.sub(i32 %v40, i32 %v14)
+ %v42 = call i32 @llvm.hexagon.A2.sath(i32 %v41)
+ %v43 = select i1 undef, i32 %v42, i32 %v37
+ %v44 = trunc i32 %v43 to i16
+ call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext undef, i16 signext %v44, i16 signext 2) #4
+ %v45 = call i32 @llvm.hexagon.A2.sath(i32 undef)
+ %v46 = select i1 undef, i32 undef, i32 %v45
+ %v47 = trunc i32 %v46 to i16
+ call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext %v47, i16 signext undef, i16 signext 2) #4
+ %v48 = call i32 @llvm.hexagon.A2.sub(i32 undef, i32 %v15)
+ %v49 = call i32 @llvm.hexagon.A2.sath(i32 %v48)
+ %v50 = trunc i32 %v49 to i16
+ store i16 %v50, i16* undef, align 2
+ store i16 %a3, i16* %a0, align 2
+ %v51 = sext i16 %a10 to i32
+ %v52 = call i32 @llvm.hexagon.A2.sxth(i32 %v51)
+ %v53 = call i32 @llvm.hexagon.A2.add(i32 undef, i32 %v52)
+ %v54 = call i32 @llvm.hexagon.A2.sath(i32 %v53)
+ %v55 = trunc i32 %v54 to i16
+ store i16 %v55, i16* %a1, align 2
+ store i16 %a7, i16* %a2, align 2
+ %v56 = sext i16 %a9 to i32
+ %v57 = call i32 @llvm.hexagon.A2.sxth(i32 %v56)
+ br i1 undef, label %b58, label %b62
+
+b58: ; preds = %b11
+ %v59 = call i32 @llvm.hexagon.A2.add(i32 %v57, i32 %v52)
+ %v60 = call i32 @llvm.hexagon.A2.sath(i32 %v59)
+ %v61 = trunc i32 %v60 to i16
+ store i16 %v61, i16* %a1, align 2
+ br label %b63
+
+b62: ; preds = %b11
+ br label %b63
+
+b63: ; preds = %b62, %b58
+ %v64 = phi i16 [ undef, %b58 ], [ %a9, %b62 ]
+ %v65 = icmp slt i16 undef, %v64
+ br i1 %v65, label %b66, label %b67
+
+b66: ; preds = %b63
+ br i1 undef, label %b67, label %b68
+
+b67: ; preds = %b66, %b63
+ store i16 0, i16* %a2, align 2
+ br label %b68
+
+b68: ; preds = %b67, %b66
+ ret void
+}
+
+declare i32 @llvm.hexagon.A2.sath(i32) #2
+declare i32 @llvm.hexagon.A2.add(i32, i32) #2
+declare i32 @llvm.hexagon.A2.sxth(i32) #2
+declare i32 @llvm.hexagon.A2.sub(i32, i32) #2
+declare i32 @llvm.hexagon.A2.asrh(i32) #2
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2
+declare i32 @llvm.hexagon.A2.aslh(i32) #2
+declare void @foo(i16*, i32*, i16*, i16 signext, i16 signext, i16 signext) local_unnamed_addr #3
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind readnone }
+attributes #3 = { optsize "target-cpu"="hexagonv55" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #4 = { nounwind optsize }
diff --git a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll b/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll
deleted file mode 100644
index b8f483298f8c..000000000000
--- a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-
-; CHECK-DAG: ct0({{r[0-9]*:[0-9]*}})
-; CHECK-DAG: cl0({{r[0-9]*:[0-9]*}})
-; CHECK-DAG: ct0({{r[0-9]*}})
-; CHECK-DAG: cl0({{r[0-9]*}})
-; CHECK-DAG: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #4)
-
-define i32 @foo(i64 %a, i32 %b) nounwind {
-entry:
- %tmp0 = tail call i64 @llvm.ctlz.i64( i64 %a, i1 true )
- %tmp1 = tail call i64 @llvm.cttz.i64( i64 %a, i1 true )
- %tmp2 = tail call i32 @llvm.ctlz.i32( i32 %b, i1 true )
- %tmp3 = tail call i32 @llvm.cttz.i32( i32 %b, i1 true )
- %tmp4 = tail call i64 @llvm.ctpop.i64( i64 %a )
- %tmp5 = tail call i32 @llvm.ctpop.i32( i32 %b )
-
-
- %tmp6 = trunc i64 %tmp0 to i32
- %tmp7 = trunc i64 %tmp1 to i32
- %tmp8 = trunc i64 %tmp4 to i32
- %tmp9 = add i32 %tmp6, %tmp7
- %tmp10 = add i32 %tmp9, %tmp8
- %tmp11 = add i32 %tmp10, %tmp2
- %tmp12 = add i32 %tmp11, %tmp3
- %tmp13 = add i32 %tmp12, %tmp5
-
- ret i32 %tmp13
-}
-
-declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
-declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
-declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone
-declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
-declare i64 @llvm.ctpop.i64(i64) nounwind readnone
-declare i32 @llvm.ctpop.i32(i32) nounwind readnone
diff --git a/test/CodeGen/Hexagon/dead-store-stack.ll b/test/CodeGen/Hexagon/dead-store-stack.ll
index 93d324baad9e..0d8124e76b90 100644
--- a/test/CodeGen/Hexagon/dead-store-stack.ll
+++ b/test/CodeGen/Hexagon/dead-store-stack.ll
@@ -1,6 +1,6 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
; CHECK: ParseFunc:
-; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]] + #[[OFFSET:[0-9]+]])
+; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]]+#[[OFFSET:[0-9]+]])
; CHECK: memw(r[[ARG1]]+#[[OFFSET]]) = r[[ARG0]]
@.str.3 = external unnamed_addr constant [8 x i8], align 1
diff --git a/test/CodeGen/Hexagon/early-if-merge-loop.ll b/test/CodeGen/Hexagon/early-if-merge-loop.ll
new file mode 100644
index 000000000000..f45058f029dd
--- /dev/null
+++ b/test/CodeGen/Hexagon/early-if-merge-loop.ll
@@ -0,0 +1,91 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Make sure that the loop in the end has only one basic block.
+
+; CHECK-LABEL: fred
+; Rely on the comments, make sure the one for the loop header is present.
+; CHECK: %loop
+; CHECK-NOT: %should_merge
+
+target triple = "hexagon"
+
+define i32 @fred(i32 %a0, i64* nocapture readonly %a1) #0 {
+b2:
+ %v3 = bitcast i64* %a1 to i32*
+ %v4 = getelementptr inbounds i32, i32* %v3, i32 1
+ %v5 = zext i32 %a0 to i64
+ br label %loop
+
+loop: ; preds = %should_merge, %b2
+ %v7 = phi i32 [ 0, %b2 ], [ %v49, %should_merge ]
+ %v8 = phi i32 [ 0, %b2 ], [ %v42, %should_merge ]
+ %v9 = phi i32* [ %v4, %b2 ], [ %v53, %should_merge ]
+ %v10 = phi i32 [ 0, %b2 ], [ %v30, %should_merge ]
+ %v11 = phi i32* [ %v3, %b2 ], [ %v51, %should_merge ]
+ %v12 = phi i32 [ 0, %b2 ], [ %v23, %should_merge ]
+ %v13 = phi i32 [ 2, %b2 ], [ %v54, %should_merge ]
+ %v14 = load i32, i32* %v11, align 4, !tbaa !0
+ %v15 = load i32, i32* %v9, align 4, !tbaa !0
+ %v16 = icmp ult i32 %v13, 30
+ %v17 = zext i32 %v12 to i64
+ %v18 = shl nuw i64 %v17, 32
+ %v19 = zext i32 %v14 to i64
+ %v20 = or i64 %v18, %v19
+ %v21 = tail call i64 @llvm.hexagon.A2.addp(i64 %v20, i64 %v5)
+ %v22 = lshr i64 %v21, 32
+ %v23 = trunc i64 %v22 to i32
+ %v24 = zext i32 %v10 to i64
+ %v25 = shl nuw i64 %v24, 32
+ %v26 = zext i32 %v15 to i64
+ %v27 = or i64 %v25, %v26
+ %v28 = tail call i64 @llvm.hexagon.A2.addp(i64 %v27, i64 %v5)
+ %v29 = lshr i64 %v28, 32
+ %v30 = trunc i64 %v29 to i32
+ %v31 = getelementptr inbounds i32, i32* %v3, i32 %v13
+ %v32 = load i32, i32* %v31, align 4, !tbaa !0
+ %v33 = or i32 %v13, 1
+ %v34 = getelementptr inbounds i32, i32* %v3, i32 %v33
+ %v35 = load i32, i32* %v34, align 4, !tbaa !0
+ %v36 = zext i32 %v8 to i64
+ %v37 = shl nuw i64 %v36, 32
+ %v38 = zext i32 %v32 to i64
+ %v39 = or i64 %v37, %v38
+ %v40 = tail call i64 @llvm.hexagon.A2.subp(i64 %v39, i64 %v5)
+ %v41 = lshr i64 %v40, 32
+ %v42 = trunc i64 %v41 to i32
+ %v43 = zext i32 %v7 to i64
+ %v44 = shl nuw i64 %v43, 32
+ %v45 = zext i32 %v35 to i64
+ %v46 = or i64 %v44, %v45
+ %v47 = tail call i64 @llvm.hexagon.A2.subp(i64 %v46, i64 %v5)
+ %v48 = lshr i64 %v47, 32
+ %v49 = trunc i64 %v48 to i32
+ br i1 %v16, label %should_merge, label %exit
+
+should_merge: ; preds = %loop
+ %v50 = add nuw nsw i32 %v13, 2
+ %v51 = getelementptr inbounds i32, i32* %v3, i32 %v50
+ %v52 = add nuw nsw i32 %v13, 3
+ %v53 = getelementptr inbounds i32, i32* %v3, i32 %v52
+ %v54 = add nuw nsw i32 %v13, 4
+ br label %loop
+
+exit: ; preds = %loop
+ %v57 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v42, i32 %v23)
+ %v58 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v49, i32 %v30)
+ %v59 = tail call i64 @llvm.hexagon.A2.addp(i64 %v57, i64 %v58)
+ %v60 = lshr i64 %v59, 32
+ %v61 = trunc i64 %v60 to i32
+ ret i32 %v61
+}
+
+declare i64 @llvm.hexagon.A2.addp(i64, i64) #1
+declare i64 @llvm.hexagon.A2.subp(i64, i64) #1
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
+
+attributes #0 = { nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/early-if-phi-i1.ll b/test/CodeGen/Hexagon/early-if-phi-i1.ll
index 1649d51269ee..f4af62d6b10e 100644
--- a/test/CodeGen/Hexagon/early-if-phi-i1.ll
+++ b/test/CodeGen/Hexagon/early-if-phi-i1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -march=hexagon < %s
; REQUIRES: asserts
; Check that the early if-conversion does not predicate block1 (where the
; join block has a phi node of type i1).
diff --git a/test/CodeGen/Hexagon/early-if-vecpred.ll b/test/CodeGen/Hexagon/early-if-vecpred.ll
new file mode 100644
index 000000000000..ca119e1d1dec
--- /dev/null
+++ b/test/CodeGen/Hexagon/early-if-vecpred.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Hexagon early if-conversion used to crash on this testcase due to not
+; recognizing vector predicate registers.
+
+target triple = "hexagon"
+
+; Check that the early if-conversion has not happened.
+
+; CHECK-LABEL: fred
+; CHECK: q{{[0-3]}} = not
+; CHECK: LBB
+; CHECK: if (q{{[0-3]}}) vmem
+define void @fred(i32 %a0) #0 {
+b1:
+ %v2 = tail call <1024 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32 %a0) #2
+ br i1 undef, label %b3, label %b5
+
+b3: ; preds = %b1
+ %v4 = tail call <1024 x i1> @llvm.hexagon.V6.pred.not.128B(<1024 x i1> %v2) #2
+ br label %b5
+
+b5: ; preds = %b3, %b1
+ %v6 = phi <1024 x i1> [ %v4, %b3 ], [ %v2, %b1 ]
+ %v7 = bitcast <1024 x i1> %v6 to <32 x i32>
+ tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<32 x i32> %v7, <32 x i32>* undef, <32 x i32> undef) #2
+ ret void
+}
+
+declare <1024 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32) #1
+declare <1024 x i1> @llvm.hexagon.V6.pred.not.128B(<1024 x i1>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
diff --git a/test/CodeGen/Hexagon/eh_return.ll b/test/CodeGen/Hexagon/eh_return.ll
index 67649a07afc7..1596ade24c82 100644
--- a/test/CodeGen/Hexagon/eh_return.ll
+++ b/test/CodeGen/Hexagon/eh_return.ll
@@ -4,7 +4,7 @@
; CHECK: deallocframe
; CHECK-NEXT: }
; CHECK-NEXT: {
-; CHECK-NEXT: r29 = add(r29, r28)
+; CHECK-NEXT: r29 = add(r29,r28)
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: jumpr r31
diff --git a/test/CodeGen/Hexagon/eliminate-pred-spill.ll b/test/CodeGen/Hexagon/eliminate-pred-spill.ll
index 6fb0a3e2658d..b3a4a2f42524 100644
--- a/test/CodeGen/Hexagon/eliminate-pred-spill.ll
+++ b/test/CodeGen/Hexagon/eliminate-pred-spill.ll
@@ -1,5 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-hexagon-hvx-double \
-; RUN: -hexagon-bit=0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-bit=0 < %s | FileCheck %s
; This spill should be eliminated.
; CHECK-NOT: vmem(r29+#6)
@@ -140,5 +139,5 @@ declare <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32>, <32 x i32>, i32)
declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll b/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll
new file mode 100644
index 000000000000..ce7f5e0ce12f
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Check for some output other than crashing.
+; CHECK: bitsset
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @fred() local_unnamed_addr #0 {
+b0:
+ %v1 = load i32, i32* undef, align 4
+ %v2 = and i32 %v1, 603979776
+ %v3 = trunc i32 %v2 to i30
+ switch i30 %v3, label %b23 [
+ i30 -536870912, label %b4
+ i30 -469762048, label %b5
+ ]
+
+b4: ; preds = %b0
+ unreachable
+
+b5: ; preds = %b0
+ %v6 = load i32, i32* undef, align 4
+ br i1 undef, label %b7, label %b8
+
+b7: ; preds = %b5
+ br label %b9
+
+b8: ; preds = %b5
+ br label %b9
+
+b9: ; preds = %b8, %b7
+ %v10 = load i32, i32* undef, align 4
+ %v11 = load i32, i32* undef, align 4
+ %v12 = mul nsw i32 %v11, %v10
+ %v13 = ashr i32 %v12, 13
+ %v14 = mul nsw i32 %v13, %v13
+ %v15 = zext i32 %v14 to i64
+ %v16 = mul nsw i32 %v6, %v6
+ %v17 = zext i32 %v16 to i64
+ %v18 = lshr i64 %v17, 5
+ %v19 = select i1 undef, i64 %v18, i64 %v17
+ %v20 = mul nuw nsw i64 %v19, %v15
+ %v21 = trunc i64 %v20 to i32
+ %v22 = and i32 %v21, 2147483647
+ store i32 %v22, i32* undef, align 4
+ unreachable
+
+b23: ; preds = %b0
+ ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll b/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll
new file mode 100644
index 000000000000..ecec83625e1c
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Check for some output (as opposed to a crash).
+; CHECK: loop0
+
+target triple = "hexagon"
+
+@x = external local_unnamed_addr global [80 x i32], align 8
+
+; Function Attrs: nounwind
+define void @fred() local_unnamed_addr #0 {
+b0:
+ br label %b1
+
+b1: ; preds = %b20, %b0
+ br label %b2
+
+b2: ; preds = %b2, %b1
+ %v3 = phi i32 [ 0, %b1 ], [ %v17, %b2 ]
+ %v4 = phi i32 [ 0, %b1 ], [ %v16, %b2 ]
+ %v5 = phi i32 [ undef, %b1 ], [ %v18, %b2 ]
+ %v6 = load i32, i32* undef, align 8
+ %v7 = icmp sgt i32 %v6, undef
+ %v8 = select i1 %v7, i32 %v3, i32 %v4
+ %v9 = select i1 undef, i32 0, i32 %v8
+ %v10 = select i1 undef, i32 undef, i32 %v9
+ %v11 = select i1 undef, i32 0, i32 %v10
+ %v12 = icmp sgt i32 undef, 0
+ %v13 = select i1 %v12, i32 undef, i32 %v11
+ %v14 = select i1 false, i32 undef, i32 %v13
+ %v15 = select i1 false, i32 undef, i32 %v14
+ %v16 = select i1 false, i32 undef, i32 %v15
+ %v17 = add nsw i32 %v3, 8
+ %v18 = add i32 %v5, -8
+ %v19 = icmp eq i32 %v18, 0
+ br i1 %v19, label %b20, label %b2
+
+b20: ; preds = %b2
+ %v21 = getelementptr inbounds [80 x i32], [80 x i32]* @x, i32 0, i32 %v16
+ store i32 -2000, i32* %v21, align 4
+ br label %b1
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir b/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
index 983035e228cc..f3d105f75da2 100644
--- a/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
+++ b/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
@@ -1,4 +1,4 @@
-# RUN: llc -march=hexagon -run-pass expand-condsets -o - 2>&1 %s -verify-machineinstrs -debug-only=expand-condsets | FileCheck %s
+# RUN: llc -march=hexagon -run-pass expand-condsets -o - %s -verify-machineinstrs -debug-only=expand-condsets 2>&1 | FileCheck %s
# REQUIRES: asserts
# Check that coalesced registers are removed from live intervals.
diff --git a/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll b/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll
new file mode 100644
index 000000000000..4f2bb86f0842
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll
@@ -0,0 +1,216 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Dead defs may still appear live in LivePhysRegs, leading to an expansion
+; of a double-vector store that uses an undefined source register.
+
+target triple = "hexagon-unknown--elf"
+
+declare noalias i8* @halide_malloc() local_unnamed_addr #0
+declare void @halide_free() local_unnamed_addr #0
+
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1
+declare <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32>, <32 x i32>, i32) #1
+declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #1
+declare <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32>, <32 x i32>, i32) #1
+declare <32 x i32> @llvm.hexagon.V6.vavghrnd.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32>, i32) #1
+declare <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vshufoh.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32>, <32 x i32>) #1
+declare <64 x i32> @llvm.hexagon.V6.vaddhw.128B(<32 x i32>, <32 x i32>) #1
+declare <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32>, <64 x i32>) #1
+declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1
+declare <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32>, i32) #1
+declare <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32>, <32 x i32>, i32) #1
+declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #1
+
+define hidden void @fred() #0 {
+b0:
+ br i1 undef, label %b1, label %b2
+
+b1: ; preds = %b0
+ ret void
+
+b2: ; preds = %b0
+ %v3 = tail call i8* @halide_malloc()
+ %v4 = bitcast i8* %v3 to i16*
+ %v5 = tail call i8* @halide_malloc()
+ %v6 = bitcast i8* %v5 to i16*
+ %v7 = tail call i8* @halide_malloc()
+ %v8 = bitcast i8* %v7 to i16*
+ %v9 = tail call i8* @halide_malloc()
+ %v10 = bitcast i8* %v9 to i16*
+ br label %b11
+
+b11: ; preds = %b11, %b2
+ br i1 undef, label %b12, label %b11
+
+b12: ; preds = %b11
+ br i1 undef, label %b16, label %b13
+
+b13: ; preds = %b12
+ %v14 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> zeroinitializer) #2
+ %v15 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v14, i32 1) #2
+ br i1 undef, label %b19, label %b17
+
+b16: ; preds = %b12
+ unreachable
+
+b17: ; preds = %b13
+ %v18 = tail call <32 x i32> @llvm.hexagon.V6.vavghrnd.128B(<32 x i32> %v15, <32 x i32> undef) #2
+ br label %b19
+
+b19: ; preds = %b17, %b13
+ %v20 = phi <32 x i32> [ %v18, %b17 ], [ %v15, %b13 ]
+ %v21 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> zeroinitializer, <32 x i32> %v20) #2
+ %v22 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v21, <32 x i32> undef, i32 -2)
+ %v23 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v22)
+ store <32 x i32> %v23, <32 x i32>* undef, align 128
+ tail call void @halide_free() #3
+ br label %b24
+
+b24: ; preds = %b33, %b19
+ %v25 = load <32 x i32>, <32 x i32>* undef, align 128
+ %v26 = fptoui float undef to i16
+ %v27 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 -2147450880) #2
+ %v28 = xor i16 %v26, -1
+ %v29 = zext i16 %v28 to i32
+ %v30 = or i32 0, %v29
+ %v31 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1) #2
+ %v32 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v31, <32 x i32> %v31)
+ br label %b34
+
+b33: ; preds = %b34
+ br label %b24
+
+b34: ; preds = %b34, %b24
+ %v35 = phi <32 x i32> [ %v45, %b34 ], [ undef, %b24 ]
+ %v36 = phi <32 x i32> [ undef, %b34 ], [ %v25, %b24 ]
+ %v37 = phi <32 x i32> [ %v46, %b34 ], [ undef, %b24 ]
+ %v38 = phi i32 [ %v145, %b34 ], [ 0, %b24 ]
+ %v39 = load <32 x i32>, <32 x i32>* undef, align 128
+ %v40 = add nsw i32 %v38, undef
+ %v41 = shl nsw i32 %v40, 6
+ %v42 = add nsw i32 %v41, 64
+ %v43 = getelementptr inbounds i16, i16* %v6, i32 %v42
+ %v44 = bitcast i16* %v43 to <32 x i32>*
+ %v45 = load <32 x i32>, <32 x i32>* %v44, align 128
+ %v46 = load <32 x i32>, <32 x i32>* undef, align 128
+ %v47 = load <32 x i32>, <32 x i32>* null, align 128
+ %v48 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 2)
+ %v49 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v45, <32 x i32> %v35, i32 24)
+ %v50 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> %v48, <32 x i32> %v49) #2
+ %v51 = tail call <64 x i32> @llvm.hexagon.V6.vaddhw.128B(<32 x i32> undef, <32 x i32> %v50) #2
+ %v52 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v39, <32 x i32> %v47, i32 50)
+ %v53 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v52, <32 x i32> undef)
+ %v54 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v53, <32 x i32> %v27) #2
+ %v55 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> undef, <32 x i32> %v54, i32 undef) #2
+ %v56 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v55, <64 x i32> zeroinitializer) #2
+ %v57 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v56)
+ %v58 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v57, i32 16) #2
+ %v59 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v56)
+ %v60 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v59, i32 16) #2
+ %v61 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v60, <32 x i32> %v58)
+ %v62 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v61, <64 x i32> %v55) #2
+ %v63 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v62, <64 x i32> zeroinitializer) #2
+ %v64 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v63) #2
+ %v65 = tail call <32 x i32> @llvm.hexagon.V6.vshufoh.128B(<32 x i32> %v64, <32 x i32> undef) #2
+ %v66 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v65, <32 x i32> %v27) #2
+ %v67 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v66, <32 x i32> undef) #2
+ %v68 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> zeroinitializer, <32 x i32> %v27) #2
+ %v69 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32> %v68, i32 %v30) #2
+ %v70 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v47, <32 x i32> undef, i32 52)
+ %v71 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v39, <32 x i32> %v47, i32 52)
+ %v72 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v71, <32 x i32> %v70)
+ %v73 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v72, <32 x i32> %v27) #2
+ %v74 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %v69, <32 x i32> %v73, i32 undef) #2
+ %v75 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v74, <64 x i32> zeroinitializer) #2
+ %v76 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v75)
+ %v77 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v76, i32 16) #2
+ %v78 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> undef, <32 x i32> %v77)
+ %v79 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v78, <64 x i32> %v74) #2
+ %v80 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v79, <64 x i32> zeroinitializer) #2
+ %v81 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v80) #2
+ %v82 = tail call <32 x i32> @llvm.hexagon.V6.vshufoh.128B(<32 x i32> %v81, <32 x i32> undef) #2
+ %v83 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v82, <32 x i32> %v27) #2
+ %v84 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v51, <64 x i32> %v32) #2
+ %v85 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v84) #2
+ %v86 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v85, i32 1) #2
+ %v87 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v83, <32 x i32> %v86) #2
+ %v88 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v87, <32 x i32> %v67, i32 -2)
+ %v89 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v88)
+ %v90 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v88)
+ %v91 = getelementptr inbounds i16, i16* %v10, i32 undef
+ %v92 = bitcast i16* %v91 to <32 x i32>*
+ store <32 x i32> %v90, <32 x i32>* %v92, align 128
+ %v93 = getelementptr inbounds i16, i16* %v10, i32 undef
+ %v94 = bitcast i16* %v93 to <32 x i32>*
+ store <32 x i32> %v89, <32 x i32>* %v94, align 128
+ %v95 = getelementptr inbounds i16, i16* %v4, i32 undef
+ %v96 = bitcast i16* %v95 to <32 x i32>*
+ %v97 = load <32 x i32>, <32 x i32>* %v96, align 128
+ %v98 = getelementptr inbounds i16, i16* %v8, i32 undef
+ %v99 = bitcast i16* %v98 to <32 x i32>*
+ %v100 = load <32 x i32>, <32 x i32>* %v99, align 128
+ %v101 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> undef, <32 x i32> %v36, i32 22)
+ %v102 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> %v100, <32 x i32> %v101) #2
+ %v103 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> undef, <32 x i32> %v102) #2
+ %v104 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v97, <32 x i32> %v37, i32 48)
+ %v105 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v46, <32 x i32> %v97, i32 48)
+ %v106 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v105, <32 x i32> %v104)
+ %v107 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> undef, <64 x i32> %v32) #2
+ %v108 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v107) #2
+ %v109 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v108, i32 1) #2
+ %v110 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v106, <32 x i32> %v109) #2
+ %v111 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v110, <32 x i32> %v103, i32 -2)
+ %v112 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v111)
+ %v113 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v111)
+ %v114 = getelementptr inbounds i16, i16* %v10, i32 undef
+ %v115 = bitcast i16* %v114 to <32 x i32>*
+ store <32 x i32> %v113, <32 x i32>* %v115, align 128
+ %v116 = getelementptr inbounds i16, i16* %v10, i32 undef
+ %v117 = bitcast i16* %v116 to <32 x i32>*
+ store <32 x i32> %v112, <32 x i32>* %v117, align 128
+ %v118 = getelementptr inbounds i16, i16* %v4, i32 undef
+ %v119 = bitcast i16* %v118 to <32 x i32>*
+ %v120 = load <32 x i32>, <32 x i32>* %v119, align 128
+ %v121 = getelementptr inbounds i16, i16* %v6, i32 undef
+ %v122 = bitcast i16* %v121 to <32 x i32>*
+ %v123 = load <32 x i32>, <32 x i32>* %v122, align 128
+ %v124 = getelementptr inbounds i16, i16* %v6, i32 0
+ %v125 = bitcast i16* %v124 to <32 x i32>*
+ %v126 = load <32 x i32>, <32 x i32>* %v125, align 128
+ %v127 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32 22)
+ %v128 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> undef, <32 x i32> %v127) #2
+ %v129 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32 24)
+ %v130 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> undef, <32 x i32> %v129) #2
+ %v131 = tail call <64 x i32> @llvm.hexagon.V6.vaddhw.128B(<32 x i32> %v128, <32 x i32> %v130) #2
+ %v132 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v120, <32 x i32> undef, i32 46)
+ %v133 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> undef, <32 x i32> %v132)
+ %v134 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v133, <32 x i32> %v128) #2
+ %v135 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v120, <32 x i32> undef, i32 48)
+ %v136 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> undef, <32 x i32> %v120, i32 48)
+ %v137 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v136, <32 x i32> %v135)
+ %v138 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v131, <64 x i32> %v32) #2
+ %v139 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v138) #2
+ %v140 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v139, <32 x i32> undef, i32 1) #2
+ %v141 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v137, <32 x i32> %v140) #2
+ %v142 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v141, <32 x i32> %v134, i32 -2)
+ %v143 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v142)
+ %v144 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v142)
+ store <32 x i32> %v144, <32 x i32>* undef, align 128
+ store <32 x i32> %v143, <32 x i32>* undef, align 128
+ %v145 = add nuw nsw i32 %v38, 1
+ %v146 = icmp eq i32 %v38, undef
+ br i1 %v146, label %b33, label %b34
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-double" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+attributes #3 = { nobuiltin nounwind }
diff --git a/test/CodeGen/Hexagon/extload-combine.ll b/test/CodeGen/Hexagon/extload-combine.ll
index c492343d7915..c7a386a664ba 100644
--- a/test/CodeGen/Hexagon/extload-combine.ll
+++ b/test/CodeGen/Hexagon/extload-combine.ll
@@ -15,8 +15,8 @@
; Function Attrs: nounwind
define i64 @short_test1() #0 {
-; CHECK: [[VAR:r[0-9]+]]{{ *}}={{ *}}memuh(##
-; CHECK: combine(#0, [[VAR]])
+; CHECK: [[VAR:r[0-9]+]] = memuh(##
+; CHECK: combine(#0,[[VAR]])
entry:
store i16 0, i16* @a, align 2
%0 = load i16, i16* @b, align 2
@@ -26,7 +26,7 @@ entry:
; Function Attrs: nounwind
define i64 @short_test2() #0 {
-; CHECK: [[VAR1:r[0-9]+]]{{ *}}={{ *}}memh(##
+; CHECK: [[VAR1:r[0-9]+]] = memh(##
; CHECK: sxtw([[VAR1]])
entry:
store i16 0, i16* @a, align 2
@@ -37,8 +37,8 @@ entry:
; Function Attrs: nounwind
define i64 @char_test1() #0 {
-; CHECK: [[VAR2:r[0-9]+]]{{ *}}={{ *}}memub(##
-; CHECK: combine(#0, [[VAR2]])
+; CHECK: [[VAR2:r[0-9]+]] = memub(##
+; CHECK: combine(#0,[[VAR2]])
entry:
store i8 0, i8* @char_a, align 1
%0 = load i8, i8* @char_b, align 1
@@ -48,7 +48,7 @@ entry:
; Function Attrs: nounwind
define i64 @char_test2() #0 {
-; CHECK: [[VAR3:r[0-9]+]]{{ *}}={{ *}}memb(##
+; CHECK: [[VAR3:r[0-9]+]] = memb(##
; CHECK: sxtw([[VAR3]])
entry:
store i8 0, i8* @char_a, align 1
@@ -59,8 +59,8 @@ entry:
; Function Attrs: nounwind
define i64 @int_test1() #0 {
-; CHECK: [[VAR4:r[0-9]+]]{{ *}}={{ *}}memw(##
-; CHECK: combine(#0, [[VAR4]])
+; CHECK: [[VAR4:r[0-9]+]] = memw(##
+; CHECK: combine(#0,[[VAR4]])
entry:
store i32 0, i32* @int_a, align 4
%0 = load i32, i32* @int_b, align 4
@@ -70,7 +70,7 @@ entry:
; Function Attrs: nounwind
define i64 @int_test2() #0 {
-; CHECK: [[VAR5:r[0-9]+]]{{ *}}={{ *}}memw(##
+; CHECK: [[VAR5:r[0-9]+]] = memw(##
; CHECK: sxtw([[VAR5]])
entry:
store i32 0, i32* @int_a, align 4
diff --git a/test/CodeGen/Hexagon/extract-basic.ll b/test/CodeGen/Hexagon/extract-basic.ll
index c75125cedd35..ad118dea0ab6 100644
--- a/test/CodeGen/Hexagon/extract-basic.ll
+++ b/test/CodeGen/Hexagon/extract-basic.ll
@@ -1,8 +1,8 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK-DAG: extractu(r{{[0-9]*}}, #3, #4)
-; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #7)
-; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #16)
+; CHECK-DAG: extractu(r{{[0-9]*}},#3,#4)
+; CHECK-DAG: extractu(r{{[0-9]*}},#8,#7)
+; CHECK-DAG: extractu(r{{[0-9]*}},#8,#16)
; C source:
; typedef struct {
diff --git a/test/CodeGen/Hexagon/fadd.ll b/test/CodeGen/Hexagon/fadd.ll
index 6cf0fbbccf73..0418c1724f5b 100644
--- a/test/CodeGen/Hexagon/fadd.ll
+++ b/test/CodeGen/Hexagon/fadd.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate sp floating point add in V5.
-; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}},r{{[0-9]+}})
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/find-loop-instr.ll b/test/CodeGen/Hexagon/find-loop-instr.ll
new file mode 100644
index 000000000000..1234baf17f52
--- /dev/null
+++ b/test/CodeGen/Hexagon/find-loop-instr.ll
@@ -0,0 +1,79 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; This code causes multiple endloop instructions to be generated for the
+; same loop. The findLoopInstr would encounter for one endloop would encounter
+; the other endloop, and return null in response. This resulted in a crash.
+;
+; Check that with the fix we are able to compile this code successfully.
+
+target triple = "hexagon"
+
+; Function Attrs: norecurse
+define void @fred() local_unnamed_addr #0 align 2 {
+b0:
+ br label %b7
+
+b1: ; preds = %b9
+ br i1 undef, label %b4, label %b2
+
+b2: ; preds = %b1
+ %v3 = sub i32 undef, undef
+ br label %b4
+
+b4: ; preds = %b2, %b1
+ %v5 = phi i32 [ undef, %b1 ], [ %v3, %b2 ]
+ br i1 undef, label %b14, label %b6
+
+b6: ; preds = %b4
+ br label %b10
+
+b7: ; preds = %b0
+ br i1 undef, label %b9, label %b8
+
+b8: ; preds = %b7
+ unreachable
+
+b9: ; preds = %b7
+ br label %b1
+
+b10: ; preds = %b21, %b6
+ %v11 = phi i32 [ %v22, %b21 ], [ %v5, %b6 ]
+ br i1 undef, label %b21, label %b12
+
+b12: ; preds = %b10
+ br label %b15
+
+b13: ; preds = %b21
+ br label %b14
+
+b14: ; preds = %b13, %b4
+ ret void
+
+b15: ; preds = %b12
+ br i1 undef, label %b16, label %b17
+
+b16: ; preds = %b15
+ store i32 0, i32* undef, align 4
+ br label %b21
+
+b17: ; preds = %b15
+ br label %b18
+
+b18: ; preds = %b17
+ br i1 undef, label %b19, label %b20
+
+b19: ; preds = %b18
+ br label %b21
+
+b20: ; preds = %b18
+ store i32 0, i32* undef, align 4
+ br label %b21
+
+b21: ; preds = %b20, %b19, %b16, %b10
+ %v22 = add i32 %v11, -8
+ %v23 = icmp eq i32 %v22, 0
+ br i1 %v23, label %b13, label %b10
+}
+
+attributes #0 = { norecurse "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/float-amode.ll b/test/CodeGen/Hexagon/float-amode.ll
index 9804f48349f8..d770582ecab9 100644
--- a/test/CodeGen/Hexagon/float-amode.ll
+++ b/test/CodeGen/Hexagon/float-amode.ll
@@ -12,9 +12,9 @@
@a = common global float 0.000000e+00, align 4
; CHECK-LABEL: test1
-; CHECK: [[REG11:(r[0-9]+)]]{{ *}}={{ *}}memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2)
+; CHECK: [[REG11:(r[0-9]+)]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)
; CHECK: [[REG12:(r[0-9]+)]] += sfmpy({{.*}}[[REG11]]
-; CHECK: memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2) = [[REG12]].new
+; CHECK: memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = [[REG12]].new
; Function Attrs: norecurse nounwind
define void @test1(%struct.matrix_params* nocapture readonly %params, i32 %col1) {
@@ -35,7 +35,7 @@ entry:
}
; CHECK-LABEL: test2
-; CHECK: [[REG21:(r[0-9]+)]]{{ *}}={{ *}}memw(##globB+92)
+; CHECK: [[REG21:(r[0-9]+)]] = memw(##globB+92)
; CHECK: [[REG22:(r[0-9]+)]] = sfadd({{.*}}[[REG21]]
; CHECK: memw(##globA+84) = [[REG22]]
@@ -54,9 +54,9 @@ entry:
}
; CHECK-LABEL: test3
-; CHECK: [[REG31:(r[0-9]+)]]{{ *}}={{ *}}memw(#b)
+; CHECK: [[REG31:(r[0-9]+)]] = memw(gp+#b)
; CHECK: [[REG32:(r[0-9]+)]] = sfadd({{.*}}[[REG31]]
-; CHECK: memw(#a) = [[REG32]]
+; CHECK: memw(gp+#a) = [[REG32]]
; Function Attrs: norecurse nounwind
define void @test3(%struct.matrix_params* nocapture readonly %params, i32 %col1) {
@@ -73,9 +73,9 @@ entry:
}
; CHECK-LABEL: test4
-; CHECK: [[REG41:(r[0-9]+)]]{{ *}}={{ *}}memw(r0<<#2 + ##globB+52)
+; CHECK: [[REG41:(r[0-9]+)]] = memw(r0<<#2+##globB+52)
; CHECK: [[REG42:(r[0-9]+)]] = sfadd({{.*}}[[REG41]]
-; CHECK: memw(r0<<#2 + ##globA+60) = [[REG42]]
+; CHECK: memw(r0<<#2+##globA+60) = [[REG42]]
; Function Attrs: noinline norecurse nounwind
define void @test4(i32 %col1) {
entry:
diff --git a/test/CodeGen/Hexagon/fmul.ll b/test/CodeGen/Hexagon/fmul.ll
index 4f55d0bec471..552f98ec7a53 100644
--- a/test/CodeGen/Hexagon/fmul.ll
+++ b/test/CodeGen/Hexagon/fmul.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate single precision floating point multiply in V5.
-; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}},r{{[0-9]+}})
define i32 @main() nounwind {
diff --git a/test/CodeGen/Hexagon/fsel.ll b/test/CodeGen/Hexagon/fsel.ll
index 247249da50b1..a2f0b4a47f10 100644
--- a/test/CodeGen/Hexagon/fsel.ll
+++ b/test/CodeGen/Hexagon/fsel.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
; CHECK-LABEL: danny:
-; CHECK: mux(p0, r1, ##1065353216)
+; CHECK: mux(p0,r1,##1065353216)
define float @danny(i32 %x, float %f) #0 {
%t = icmp sgt i32 %x, 0
@@ -10,7 +10,7 @@ define float @danny(i32 %x, float %f) #0 {
}
; CHECK-LABEL: sammy:
-; CHECK: mux(p0, ##1069547520, r1)
+; CHECK: mux(p0,##1069547520,r1)
define float @sammy(i32 %x, float %f) #0 {
%t = icmp sgt i32 %x, 0
diff --git a/test/CodeGen/Hexagon/fsub.ll b/test/CodeGen/Hexagon/fsub.ll
index ca7bdc4d0b38..d7b0e2f65b33 100644
--- a/test/CodeGen/Hexagon/fsub.ll
+++ b/test/CodeGen/Hexagon/fsub.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate sp floating point subtract in V5.
-; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}},r{{[0-9]+}})
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/fusedandshift.ll b/test/CodeGen/Hexagon/fusedandshift.ll
index 414574aec401..9abd366e6916 100644
--- a/test/CodeGen/Hexagon/fusedandshift.ll
+++ b/test/CodeGen/Hexagon/fusedandshift.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=hexagon -hexagon-extract=0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-extract=0 -hexbit-extract=0 < %s | FileCheck %s
; Check that we generate fused logical and with shift instruction.
; Disable "extract" generation, since it may eliminate the and/lsr.
-; CHECK: r{{[0-9]+}} = and(#15, lsr(r{{[0-9]+}}, #{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = and(#15,lsr(r{{[0-9]+}},#{{[0-9]+}})
define i32 @main(i16* %a, i16* %b) nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/gp-rel.ll b/test/CodeGen/Hexagon/gp-rel.ll
index bb7cb182bf1b..00f57797b6f1 100644
--- a/test/CodeGen/Hexagon/gp-rel.ll
+++ b/test/CodeGen/Hexagon/gp-rel.ll
@@ -7,8 +7,8 @@
define i32 @foo(i32 %p) #0 {
entry:
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a)
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(gp+#a)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(gp+#b)
; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}}
%0 = load i32, i32* @a, align 4
%1 = load i32, i32* @b, align 4
diff --git a/test/CodeGen/Hexagon/hwloop-cleanup.ll b/test/CodeGen/Hexagon/hwloop-cleanup.ll
index c04966a5a4b2..56a6fedf81ef 100644
--- a/test/CodeGen/Hexagon/hwloop-cleanup.ll
+++ b/test/CodeGen/Hexagon/hwloop-cleanup.ll
@@ -5,7 +5,7 @@
; Bug 6685.
; CHECK: loop0
-; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1)
+; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},#-1)
; CHECK-NOT: cmp.eq
; CHECK: endloop0
@@ -39,7 +39,7 @@ for.end:
; This test checks that that initial loop count value is removed.
; CHECK-NOT: ={{.}}#40
; CHECK: loop0
-; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1)
+; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},#-1)
; CHECK-NOT: cmp.eq
; CHECK: endloop0
@@ -64,7 +64,7 @@ for.end:
; This test checks that we don't remove the induction variable since it's used.
; CHECK: loop0
-; CHECK: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#1)
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},#1)
; CHECK-NOT: cmp.eq
; CHECK: endloop0
define i32 @test3(i32* nocapture %b) nounwind {
diff --git a/test/CodeGen/Hexagon/hwloop-loop1.ll b/test/CodeGen/Hexagon/hwloop-loop1.ll
index 238d34e7ea15..427efdc2c111 100644
--- a/test/CodeGen/Hexagon/hwloop-loop1.ll
+++ b/test/CodeGen/Hexagon/hwloop-loop1.ll
@@ -2,8 +2,8 @@
;
; Generate loop1 instruction for double loop sequence.
-; CHECK: loop1(.LBB{{.}}_{{.}}, #100)
-; CHECK: loop0(.LBB{{.}}_{{.}}, #100)
+; CHECK: loop1(.LBB{{.}}_{{.}},#100)
+; CHECK: loop0(.LBB{{.}}_{{.}},#100)
; CHECK: endloop0
; CHECK: endloop1
@@ -12,9 +12,9 @@ entry:
%array = alloca [100 x i32], align 8
%doublearray = alloca [100 x [100 x i32]], align 8
%0 = bitcast [100 x i32]* %array to i8*
- call void @llvm.lifetime.start(i64 400, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 400, i8* %0) #1
%1 = bitcast [100 x [100 x i32]]* %doublearray to i8*
- call void @llvm.lifetime.start(i64 40000, i8* %1) #1
+ call void @llvm.lifetime.start.p0i8(i64 40000, i8* %1) #1
%arrayidx1 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %doublearray, i32 0, i32 10, i32 10
%arrayidx2.gep = getelementptr [100 x i32], [100 x i32]* %array, i32 0, i32 0
br label %for.body
@@ -56,11 +56,11 @@ for.inc15:
for.end17:
%3 = load i32, i32* %arrayidx1, align 8
- call void @llvm.lifetime.end(i64 40000, i8* %1) #1
- call void @llvm.lifetime.end(i64 400, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 40000, i8* %1) #1
+ call void @llvm.lifetime.end.p0i8(i64 400, i8* %0) #1
ret i32 %3
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
diff --git a/test/CodeGen/Hexagon/hwloop1.ll b/test/CodeGen/Hexagon/hwloop1.ll
index 68af3b34eeeb..7a805d951b95 100644
--- a/test/CodeGen/Hexagon/hwloop1.ll
+++ b/test/CodeGen/Hexagon/hwloop1.ll
@@ -3,7 +3,7 @@
; Case 1 : Loop with a constant number of iterations.
; CHECK-LABEL: @hwloop1
-; CHECK: loop0(.LBB{{.}}_{{.}}, #10)
+; CHECK: loop0(.LBB{{.}}_{{.}},#10)
; CHECK: endloop0
@a = common global [10 x i32] zeroinitializer, align 4
@@ -23,7 +23,7 @@ for.end:
; Case 2 : Loop with a run-time number of iterations.
; CHECK-LABEL: @hwloop2
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop2(i32 %n, i32* nocapture %b) nounwind {
@@ -54,8 +54,8 @@ for.end:
; Case 3 : Induction variable increment more than 1.
; CHECK-LABEL: @hwloop3
-; CHECK: lsr(r{{[0-9]+}}, #2)
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: lsr(r{{[0-9]+}},#2)
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop3(i32 %n, i32* nocapture %b) nounwind {
@@ -86,7 +86,7 @@ for.end:
; Case 4 : Loop exit compare uses register instead of immediate value.
; CHECK-LABEL: @hwloop4
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop4(i32 %n, i32* nocapture %b) nounwind {
@@ -114,7 +114,7 @@ for.end:
; Case 5: After LSR, the initial value is 100 and the iv decrements to 0.
; CHECK-LABEL: @hwloop5
-; CHECK: loop0(.LBB{{.}}_{{.}}, #100)
+; CHECK: loop0(.LBB{{.}}_{{.}},#100)
; CHECK: endloop0
define void @hwloop5(i32* nocapture %a, i32* nocapture %res) nounwind {
@@ -138,8 +138,8 @@ for.end:
; Case 6: Large immediate offset
; CHECK-LABEL: @hwloop6
-; CHECK-NOT: loop0(.LBB{{.}}_{{.}}, #1024)
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK-NOT: loop0(.LBB{{.}}_{{.}},#1024)
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define void @hwloop6(i32* nocapture %a, i32* nocapture %res) nounwind {
diff --git a/test/CodeGen/Hexagon/hwloop2.ll b/test/CodeGen/Hexagon/hwloop2.ll
index d411d979904e..ba3de1f1a2af 100644
--- a/test/CodeGen/Hexagon/hwloop2.ll
+++ b/test/CodeGen/Hexagon/hwloop2.ll
@@ -2,7 +2,7 @@
; Test for multiple phis with induction variables.
-; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}})
; CHECK: endloop0
define i32 @hwloop4(i32* nocapture %s, i32* nocapture %a, i32 %n) {
diff --git a/test/CodeGen/Hexagon/hwloop4.ll b/test/CodeGen/Hexagon/hwloop4.ll
index d159c45e3fb8..b8cea4c77720 100644
--- a/test/CodeGen/Hexagon/hwloop4.ll
+++ b/test/CodeGen/Hexagon/hwloop4.ll
@@ -2,9 +2,9 @@
;
; Remove the unnecessary 'add' instruction used for the hardware loop setup.
-; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]], #-[[OP2:[0-9]+]]
-; CHECK-NOT: add([[OP0]], #[[OP2]])
-; CHECK: lsr([[OP1]], #{{[0-9]+}})
+; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]],#-[[OP2:[0-9]+]]
+; CHECK-NOT: add([[OP0]],#[[OP2]])
+; CHECK: lsr([[OP1]],#{{[0-9]+}})
; CHECK: loop0
define void @matrix_mul_matrix(i32 %N, i32* nocapture %C, i16* nocapture readnone %A, i16* nocapture readnone %B) #0 {
diff --git a/test/CodeGen/Hexagon/hwloop5.ll b/test/CodeGen/Hexagon/hwloop5.ll
index 0886b03cc754..f4990dabebb9 100644
--- a/test/CodeGen/Hexagon/hwloop5.ll
+++ b/test/CodeGen/Hexagon/hwloop5.ll
@@ -2,9 +2,9 @@
;
; Generate hardware loop when unknown trip count loop is vectorized.
-; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}})
; CHECK: endloop0
-; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}})
+; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}})
; CHECK: endloop0
@A = common global [1000 x i32] zeroinitializer, align 8
diff --git a/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll b/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
index 68a5dc16ecff..91b9aaa9cb4e 100644
--- a/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
+++ b/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -o - %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-eif=0 < %s | FileCheck %s
target triple = "hexagon"
%struct.0 = type { i16, i16 }
@@ -15,7 +15,7 @@ entry:
br i1 %cmp199, label %if.then200, label %if.else201
; CHECK-DAG: [[R4:r[0-9]+]] = #4
-; CHECK: p0 = cmp.eq(r0, #0)
+; CHECK: p0 = cmp.eq(r0,#0)
; CHECK: if (!p0.new) [[R3:r[0-9]+]] = #3
; CHECK-DAG: if (!p0) memh(##t) = [[R3]]
; CHECK-DAG: if (p0) memh(##t) = [[R4]]
diff --git a/test/CodeGen/Hexagon/ifcvt-simple-bprob.ll b/test/CodeGen/Hexagon/ifcvt-simple-bprob.ll
new file mode 100644
index 000000000000..2d48d30dd7d8
--- /dev/null
+++ b/test/CodeGen/Hexagon/ifcvt-simple-bprob.ll
@@ -0,0 +1,36 @@
+; RUN: llc -march=hexagon < %s
+
+; Check that branch probabilities are set correctly after performing the
+; simple variant of if-conversion. The converted block has a branch that
+; is not analyzable.
+
+target triple = "hexagon"
+
+declare void @foo()
+
+; CHECK-LABEL: danny
+; CHECK: if (p0.new) jump:nt foo
+define void @danny(i32 %x) {
+ %t0 = icmp sgt i32 %x, 0
+ br i1 %t0, label %tail, label %exit, !prof !0
+tail:
+ tail call void @foo();
+ ret void
+exit:
+ ret void
+}
+
+; CHECK-LABEL: sammy
+; CHECK: if (!p0.new) jump:t foo
+define void @sammy(i32 %x) {
+ %t0 = icmp sgt i32 %x, 0
+ br i1 %t0, label %exit, label %tail, !prof !0
+tail:
+ tail call void @foo();
+ ret void
+exit:
+ ret void
+}
+
+!0 = !{!"branch_weights", i32 1, i32 2000}
+
diff --git a/test/CodeGen/Hexagon/inline-asm-vecpred128.ll b/test/CodeGen/Hexagon/inline-asm-vecpred128.ll
new file mode 100644
index 000000000000..234f5a0b7926
--- /dev/null
+++ b/test/CodeGen/Hexagon/inline-asm-vecpred128.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Make sure we can handle the 'q' constraint in the 128-byte mode.
+
+target triple = "hexagon"
+
+; CHECK-LABEL: fred
+; CHECK: if (q{{[0-3]}}) vmem
+define void @fred() #0 {
+ tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<32 x i32> undef, <32 x i32>* undef, <32 x i32> undef) #0
+ ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" }
diff --git a/test/CodeGen/Hexagon/insert-basic.ll b/test/CodeGen/Hexagon/insert-basic.ll
index e941c063d9ed..14ee735abd79 100644
--- a/test/CodeGen/Hexagon/insert-basic.ll
+++ b/test/CodeGen/Hexagon/insert-basic.ll
@@ -1,8 +1,8 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK-DAG: insert(r{{[0-9]*}}, #17, #0)
-; CHECK-DAG: insert(r{{[0-9]*}}, #18, #0)
-; CHECK-DAG: insert(r{{[0-9]*}}, #22, #0)
-; CHECK-DAG: insert(r{{[0-9]*}}, #12, #0)
+; CHECK-DAG: insert(r{{[0-9]*}},#17,#0)
+; CHECK-DAG: insert(r{{[0-9]*}},#18,#0)
+; CHECK-DAG: insert(r{{[0-9]*}},#22,#0)
+; CHECK-DAG: insert(r{{[0-9]*}},#12,#0)
; C source:
; typedef struct {
diff --git a/test/CodeGen/Hexagon/insert4.ll b/test/CodeGen/Hexagon/insert4.ll
index c4d575dd4060..3bc8e9e57982 100644
--- a/test/CodeGen/Hexagon/insert4.ll
+++ b/test/CodeGen/Hexagon/insert4.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
;
; Check that we no longer generate 4 inserts.
-; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l)
-; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l)
+; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l)
+; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l)
; CHECK-NOT: insert
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
index fcf80b08181e..abdd4cba7c5c 100644
--- a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
@@ -10,21 +10,21 @@ define i32 @A2_addi(i32 %a) {
%z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add({{.*}}, #0)
+; CHECK: = add({{.*}},#0)
declare i32 @llvm.hexagon.A2.add(i32, i32)
define i32 @A2_add(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, {{.*}})
+; CHECK: = add({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.addsat(i32, i32)
define i32 @A2_addsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, {{.*}}):sat
+; CHECK: = add({{.*}},{{.*}}):sat
; Logical operations
declare i32 @llvm.hexagon.A2.and(i32, i32)
@@ -32,35 +32,35 @@ define i32 @A2_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, {{.*}})
+; CHECK: = and({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.or(i32, i32)
define i32 @A2_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, {{.*}})
+; CHECK: = or({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.xor(i32, i32)
define i32 @A2_xor(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = xor({{.*}}, {{.*}})
+; CHECK: = xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.andn(i32, i32)
define i32 @A4_andn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, ~{{.*}})
+; CHECK: = and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.A4.orn(i32, i32)
define i32 @A4_orn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, ~{{.*}})
+; CHECK: = or({{.*}},~{{.*}})
; Subtract
declare i32 @llvm.hexagon.A2.sub(i32, i32)
@@ -68,14 +68,14 @@ define i32 @A2_sub(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}, {{.*}})
+; CHECK: = sub({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.subsat(i32, i32)
define i32 @A2_subsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}, {{.*}}):sat
+; CHECK: = sub({{.*}},{{.*}}):sat
; Sign extend
declare i32 @llvm.hexagon.A2.sxtb(i32)
@@ -128,21 +128,21 @@ define i32 @A2_svaddh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}})
+; CHECK: = vaddh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svaddhs(i32, i32)
define i32 @A2_svaddhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}}):sat
+; CHECK: = vaddh({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.A2.svadduhs(i32, i32)
define i32 @A2_svadduhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vadduh({{.*}}, {{.*}}):sat
+; CHECK: = vadduh({{.*}},{{.*}}):sat
; Vector average halfwords
declare i32 @llvm.hexagon.A2.svavgh(i32, i32)
@@ -150,21 +150,21 @@ define i32 @A2_svavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}})
+; CHECK: = vavgh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svavghs(i32, i32)
define i32 @A2_svavghs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}}):rnd
+; CHECK: = vavgh({{.*}},{{.*}}):rnd
declare i32 @llvm.hexagon.A2.svnavgh(i32, i32)
define i32 @A2_svnavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}})
+; CHECK: = vnavgh({{.*}},{{.*}})
; Vector subtract halfwords
declare i32 @llvm.hexagon.A2.svsubh(i32, i32)
@@ -172,21 +172,21 @@ define i32 @A2_svsubh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}})
+; CHECK: = vsubh({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.svsubhs(i32, i32)
define i32 @A2_svsubhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}}):sat
+; CHECK: = vsubh({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32)
define i32 @A2_svsubuhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vsubuh({{.*}}, {{.*}}):sat
+; CHECK: = vsubuh({{.*}},{{.*}}):sat
; Zero extend
declare i32 @llvm.hexagon.A2.zxth(i32)
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
index c9fb0afe0781..554dac4563d1 100644
--- a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
@@ -10,56 +10,56 @@ define i64 @A4_combineri(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0)
ret i64 %z
}
-; CHECK: = combine({{.*}}, #0)
+; CHECK: = combine({{.*}},#0)
declare i64 @llvm.hexagon.A4.combineir(i32, i32)
define i64 @A4_combineir(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a)
ret i64 %z
}
-; CHECK: = combine(#0, {{.*}})
+; CHECK: = combine(#0,{{.*}})
declare i64 @llvm.hexagon.A2.combineii(i32, i32)
define i64 @A2_combineii() {
%z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0)
ret i64 %z
}
-; CHECK: = combine(#0, #0)
+; CHECK: = combine(#0,#0)
declare i32 @llvm.hexagon.A2.combine.hh(i32, i32)
define i32 @A2_combine_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.hl(i32, i32)
define i32 @A2_combine_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.lh(i32, i32)
define i32 @A2_combine_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.combine.ll(i32, i32)
define i32 @A2_combine_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.combinew(i32, i32)
define i64 @A2_combinew(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = combine({{.*}}, {{.*}})
+; CHECK: = combine({{.*}},{{.*}})
; Mux
declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32)
@@ -67,21 +67,21 @@ define i32 @C2_muxri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: = mux({{.*}}, #0, {{.*}})
+; CHECK: = mux({{.*}},#0,{{.*}})
declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32)
define i32 @C2_muxir(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = mux({{.*}}, {{.*}}, #0)
+; CHECK: = mux({{.*}},{{.*}},#0)
declare i32 @llvm.hexagon.C2.mux(i32, i32, i32)
define i32 @C2_mux(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = mux({{.*}}, {{.*}}, {{.*}})
+; CHECK: = mux({{.*}},{{.*}},{{.*}})
; Shift word by 16
declare i32 @llvm.hexagon.A2.aslh(i32)
@@ -104,4 +104,4 @@ define i64 @S2_packhl(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = packhl({{.*}}, {{.*}})
+; CHECK: = packhl({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll b/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll
new file mode 100644
index 000000000000..2a54bfef0ad7
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mattr=+hvx-double -march=hexagon -O2 < %s | FileCheck %s
+
+; CHECK-LABEL: V6_vmaskedstoreq_128B
+; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+; CHECK-LABEL: V6_vmaskedstorenq_128B
+; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+; CHECK-LABEL: V6_vmaskedstorentq_128B
+; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+; CHECK-LABEL: V6_vmaskedstorentnq_128B
+; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+declare void @llvm.hexagon.V6.vmaskedstoreq.128B(<1024 x i1>, i8*, <32 x i32>)
+define void @V6_vmaskedstoreq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+ %1 = bitcast <32 x i32> %a to <1024 x i1>
+ call void @llvm.hexagon.V6.vmaskedstoreq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
+ ret void
+}
+
+declare void @llvm.hexagon.V6.vmaskedstorenq.128B(<1024 x i1>, i8*, <32 x i32>)
+define void @V6_vmaskedstorenq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+ %1 = bitcast <32 x i32> %a to <1024 x i1>
+ call void @llvm.hexagon.V6.vmaskedstorenq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
+ ret void
+}
+
+declare void @llvm.hexagon.V6.vmaskedstorentq.128B(<1024 x i1>, i8*, <32 x i32>)
+define void @V6_vmaskedstorentq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+ %1 = bitcast <32 x i32> %a to <1024 x i1>
+ call void @llvm.hexagon.V6.vmaskedstorentq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
+ ret void
+}
+
+declare void @llvm.hexagon.V6.vmaskedstorentnq.128B(<1024 x i1>, i8*, <32 x i32>)
+define void @V6_vmaskedstorentnq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) {
+ %1 = bitcast <32 x i32> %a to <1024 x i1>
+ call void @llvm.hexagon.V6.vmaskedstorentnq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c)
+ ret void
+}
diff --git a/test/CodeGen/Hexagon/intrinsics/byte-store.ll b/test/CodeGen/Hexagon/intrinsics/byte-store.ll
new file mode 100644
index 000000000000..208c15fec980
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/byte-store.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mattr=+hvx -march=hexagon -O2 < %s | FileCheck %s
+
+; CHECK-LABEL: V6_vmaskedstoreq
+; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+; CHECK-LABEL: V6_vmaskedstorenq
+; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+; CHECK-LABEL: V6_vmaskedstorentq
+; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+; CHECK-LABEL: V6_vmaskedstorentnq
+; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+declare void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1>, i8*, <16 x i32>)
+define void @V6_vmaskedstoreq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+ %1 = bitcast <16 x i32> %a to <512 x i1>
+ call void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1> %1, i8* %b, <16 x i32> %c)
+ ret void
+}
+
+declare void @llvm.hexagon.V6.vmaskedstorenq(<512 x i1>, i8*, <16 x i32>)
+define void @V6_vmaskedstorenq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+ %1 = bitcast <16 x i32> %a to <512 x i1>
+ call void @llvm.hexagon.V6.vmaskedstorenq(<512 x i1> %1, i8* %b, <16 x i32> %c)
+ ret void
+}
+
+declare void @llvm.hexagon.V6.vmaskedstorentq(<512 x i1>, i8*, <16 x i32>)
+define void @V6_vmaskedstorentq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+ %1 = bitcast <16 x i32> %a to <512 x i1>
+ call void @llvm.hexagon.V6.vmaskedstorentq(<512 x i1> %1, i8* %b, <16 x i32> %c)
+ ret void
+}
+
+declare void @llvm.hexagon.V6.vmaskedstorentnq(<512 x i1>, i8*, <16 x i32>)
+define void @V6_vmaskedstorentnq( <16 x i32> %a, i8* %b, <16 x i32> %c) {
+ %1 = bitcast <16 x i32> %a to <512 x i1>
+ call void @llvm.hexagon.V6.vmaskedstorentnq(<512 x i1> %1, i8* %b, <16 x i32> %c)
+ ret void
+}
diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll
index f308ef8e5664..4c0fcb3707c1 100644
--- a/test/CodeGen/Hexagon/intrinsics/cr.ll
+++ b/test/CodeGen/Hexagon/intrinsics/cr.ll
@@ -10,14 +10,14 @@ define i32 @C4_fastcorner9(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = fastcorner9({{.*}}, {{.*}})
+; CHECK: = fastcorner9({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32)
define i32 @C4_fastcorner9_not(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !fastcorner9({{.*}}, {{.*}})
+; CHECK: = !fastcorner9({{.*}},{{.*}})
; Logical reductions on predicates
declare i32 @llvm.hexagon.C2.any8(i32)
@@ -41,70 +41,70 @@ define i32 @C2_and(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, {{.*}})
+; CHECK: = and({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32)
define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, and({{.*}}, {{.*}}))
+; CHECK: = and({{.*}},and({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.or(i32, i32)
define i32 @C2_or(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, {{.*}})
+; CHECK: = or({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32)
define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, or({{.*}}, {{.*}}))
+; CHECK: = and({{.*}},or({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.xor(i32, i32)
define i32 @C2_xor(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = xor({{.*}}, {{.*}})
+; CHECK: = xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32)
define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, and({{.*}}, {{.*}}))
+; CHECK: = or({{.*}},and({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C2.andn(i32, i32)
define i32 @C2_andn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = and({{.*}}, !{{.*}})
+; CHECK: = and({{.*}},!{{.*}})
declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32)
define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, or({{.*}}, {{.*}}))
+; CHECK: = or({{.*}},or({{.*}},{{.*}}))
declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32)
define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, and({{.*}}, !{{.*}}))
+; CHECK: = and({{.*}},and({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32)
define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = and({{.*}}, or({{.*}}, !{{.*}}))
+; CHECK: = and({{.*}},or({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C2.not(i32)
define i32 @C2_not(i32 %a) {
@@ -118,18 +118,18 @@ define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, and({{.*}}, !{{.*}}))
+; CHECK: = or({{.*}},and({{.*}},!{{.*}}))
declare i32 @llvm.hexagon.C2.orn(i32, i32)
define i32 @C2_orn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = or({{.*}}, !{{.*}})
+; CHECK: = or({{.*}},!{{.*}})
declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32)
define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = or({{.*}}, or({{.*}}, !{{.*}}))
+; CHECK: = or({{.*}},or({{.*}},!{{.*}}))
diff --git a/test/CodeGen/Hexagon/intrinsics/system_user.ll b/test/CodeGen/Hexagon/intrinsics/system_user.ll
index dad4effb0a14..ac4c53e221d0 100644
--- a/test/CodeGen/Hexagon/intrinsics/system_user.ll
+++ b/test/CodeGen/Hexagon/intrinsics/system_user.ll
@@ -10,4 +10,4 @@ define void @prefetch(i8* %a) {
call void @llvm.hexagon.prefetch(i8* %a)
ret void
}
-; CHECK: dcfetch({{.*}} + #0)
+; CHECK: dcfetch({{.*}}+#0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
index c5c23c22bde9..4d630c62005b 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
@@ -34,42 +34,42 @@ define i32 @S4_addaddi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = add({{.*}}, add({{.*}}, #0))
+; CHECK: = add({{.*}},add({{.*}},#0))
declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32)
define i32 @S4_subaddi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, sub(#0, {{.*}}))
+; CHECK: = add({{.*}},sub(#0,{{.*}}))
declare i32 @llvm.hexagon.M2.accii(i32, i32, i32)
define i32 @M2_accii(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += add({{.*}}, #0)
+; CHECK: += add({{.*}},#0)
declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32)
define i32 @M2_naccii(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= add({{.*}}, #0)
+; CHECK: -= add({{.*}},#0)
declare i32 @llvm.hexagon.M2.acci(i32, i32, i32)
define i32 @M2_acci(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += add({{.*}}, {{.*}})
+; CHECK: += add({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32)
define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= add({{.*}}, {{.*}})
+; CHECK: -= add({{.*}},{{.*}})
; Add doublewords
declare i64 @llvm.hexagon.A2.addp(i64, i64)
@@ -77,14 +77,14 @@ define i64 @A2_addp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = add({{.*}}, {{.*}})
+; CHECK: = add({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.addpsat(i64, i64)
define i64 @A2_addpsat(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = add({{.*}}, {{.*}}):sat
+; CHECK: = add({{.*}},{{.*}}):sat
; Add halfword
declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32)
@@ -92,84 +92,84 @@ define i32 @A2_addh_l16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l)
+; CHECK: = add({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32)
define i32 @A2_addh_l16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h)
+; CHECK: = add({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32)
define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l):sat
+; CHECK: = add({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32)
define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h):sat
+; CHECK: = add({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32)
define i32 @A2_addh_h16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l):<<16
+; CHECK: = add({{.*}}.l,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32)
define i32 @A2_addh_h16_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h):<<16
+; CHECK: = add({{.*}}.l,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32)
define i32 @A2_addh_h16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.l):<<16
+; CHECK: = add({{.*}}.h,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32)
define i32 @A2_addh_h16_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.h):<<16
+; CHECK: = add({{.*}}.h,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32)
define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.l):sat:<<16
+; CHECK: = add({{.*}}.l,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32)
define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.l, {{.*}}.h):sat:<<16
+; CHECK: = add({{.*}}.l,{{.*}}.h):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32)
define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.l):sat:<<16
+; CHECK: = add({{.*}}.h,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32)
define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}.h, {{.*}}.h):sat:<<16
+; CHECK: = add({{.*}}.h,{{.*}}.h):sat:<<16
; Logical doublewords
declare i64 @llvm.hexagon.A2.notp(i64)
@@ -184,35 +184,35 @@ define i64 @A2_andp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = and({{.*}}, {{.*}})
+; CHECK: = and({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.andnp(i64, i64)
define i64 @A2_andnp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = and({{.*}}, ~{{.*}})
+; CHECK: = and({{.*}},~{{.*}})
declare i64 @llvm.hexagon.A2.orp(i64, i64)
define i64 @A2_orp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = or({{.*}}, {{.*}})
+; CHECK: = or({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.ornp(i64, i64)
define i64 @A2_ornp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = or({{.*}}, ~{{.*}})
+; CHECK: = or({{.*}},~{{.*}})
declare i64 @llvm.hexagon.A2.xorp(i64, i64)
define i64 @A2_xorp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = xor({{.*}}, {{.*}})
+; CHECK: = xor({{.*}},{{.*}})
; Logical-logical doublewords
declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64)
@@ -220,7 +220,7 @@ define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: ^= xor({{.*}}, {{.*}})
+; CHECK: ^= xor({{.*}},{{.*}})
; Logical-logical words
declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32)
@@ -228,91 +228,91 @@ define i32 @S4_or_andi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= and({{.*}}, #0)
+; CHECK: |= and({{.*}},#0)
declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32)
define i32 @S4_or_andix(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = or({{.*}}, and({{.*}}, #0))
+; CHECK: = or({{.*}},and({{.*}},#0))
declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32)
define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= and({{.*}}, ~{{.*}})
+; CHECK: |= and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32)
define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= and({{.*}}, ~{{.*}})
+; CHECK: &= and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32)
define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: ^= and({{.*}}, ~{{.*}})
+; CHECK: ^= and({{.*}},~{{.*}})
declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32)
define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= and({{.*}}, {{.*}})
+; CHECK: &= and({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32)
define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= or({{.*}}, {{.*}})
+; CHECK: &= or({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32)
define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= xor({{.*}}, {{.*}})
+; CHECK: &= xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32)
define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= and({{.*}}, {{.*}})
+; CHECK: |= and({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32)
define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= or({{.*}}, {{.*}})
+; CHECK: |= or({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32)
define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= xor({{.*}}, {{.*}})
+; CHECK: |= xor({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32)
define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: ^= and({{.*}}, {{.*}})
+; CHECK: ^= and({{.*}},{{.*}})
declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32)
define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: ^= or({{.*}}, {{.*}})
+; CHECK: ^= or({{.*}},{{.*}})
; Maximum words
declare i32 @llvm.hexagon.A2.max(i32, i32)
@@ -320,14 +320,14 @@ define i32 @A2_max(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = max({{.*}}, {{.*}})
+; CHECK: = max({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.maxu(i32, i32)
define i32 @A2_maxu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = maxu({{.*}}, {{.*}})
+; CHECK: = maxu({{.*}},{{.*}})
; Maximum doublewords
declare i64 @llvm.hexagon.A2.maxp(i64, i64)
@@ -335,14 +335,14 @@ define i64 @A2_maxp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = max({{.*}}, {{.*}})
+; CHECK: = max({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.maxup(i64, i64)
define i64 @A2_maxup(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = maxu({{.*}}, {{.*}})
+; CHECK: = maxu({{.*}},{{.*}})
; Minimum words
declare i32 @llvm.hexagon.A2.min(i32, i32)
@@ -350,14 +350,14 @@ define i32 @A2_min(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = min({{.*}}, {{.*}})
+; CHECK: = min({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.minu(i32, i32)
define i32 @A2_minu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = minu({{.*}}, {{.*}})
+; CHECK: = minu({{.*}},{{.*}})
; Minimum doublewords
declare i64 @llvm.hexagon.A2.minp(i64, i64)
@@ -365,14 +365,14 @@ define i64 @A2_minp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = min({{.*}}, {{.*}})
+; CHECK: = min({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.minup(i64, i64)
define i64 @A2_minup(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = minu({{.*}}, {{.*}})
+; CHECK: = minu({{.*}},{{.*}})
; Module wrap
declare i32 @llvm.hexagon.A4.modwrapu(i32, i32)
@@ -380,7 +380,7 @@ define i32 @A4_modwrapu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = modwrap({{.*}}, {{.*}})
+; CHECK: = modwrap({{.*}},{{.*}})
; Negate
declare i64 @llvm.hexagon.A2.negp(i64)
@@ -410,42 +410,42 @@ define i32 @A4_cround_ri(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cround({{.*}}, #0)
+; CHECK: = cround({{.*}},#0)
declare i32 @llvm.hexagon.A4.round.ri(i32, i32)
define i32 @A4_round_ri(i32 %a) {
%z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = round({{.*}}, #0)
+; CHECK: = round({{.*}},#0)
declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32)
define i32 @A4_round_ri_sat(i32 %a) {
%z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = round({{.*}}, #0):sat
+; CHECK: = round({{.*}},#0):sat
declare i32 @llvm.hexagon.A4.cround.rr(i32, i32)
define i32 @A4_cround_rr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cround({{.*}}, {{.*}})
+; CHECK: = cround({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.round.rr(i32, i32)
define i32 @A4_round_rr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = round({{.*}}, {{.*}})
+; CHECK: = round({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32)
define i32 @A4_round_rr_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = round({{.*}}, {{.*}}):sat
+; CHECK: = round({{.*}},{{.*}}):sat
; Subtract doublewords
declare i64 @llvm.hexagon.A2.subp(i64, i64)
@@ -453,7 +453,7 @@ define i64 @A2_subp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = sub({{.*}}, {{.*}})
+; CHECK: = sub({{.*}},{{.*}})
; Subtract and accumulate
declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32)
@@ -461,7 +461,7 @@ define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += sub({{.*}}, {{.*}})
+; CHECK: += sub({{.*}},{{.*}})
; Subtract halfwords
declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32)
@@ -469,84 +469,84 @@ define i32 @A2_subh_l16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l)
+; CHECK: = sub({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32)
define i32 @A2_subh_l16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h)
+; CHECK: = sub({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32)
define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l):sat
+; CHECK: = sub({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32)
define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h):sat
+; CHECK: = sub({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32)
define i32 @A2_subh_h16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l):<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32)
define i32 @A2_subh_h16_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h):<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32)
define i32 @A2_subh_h16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.l):<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32)
define i32 @A2_subh_h16_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.h):<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32)
define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.l):sat:<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32)
define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.l, {{.*}}.h):sat:<<16
+; CHECK: = sub({{.*}}.l,{{.*}}.h):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32)
define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.l):sat:<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32)
define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = sub({{.*}}.h, {{.*}}.h):sat:<<16
+; CHECK: = sub({{.*}}.h,{{.*}}.h):sat:<<16
; Sign extend word to doubleword
declare i64 @llvm.hexagon.A2.sxtw(i32)
@@ -592,7 +592,7 @@ define i64 @M2_vabsdiffh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vabsdiffh({{.*}}, {{.*}})
+; CHECK: = vabsdiffh({{.*}},{{.*}})
; Vector absolute difference words
declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64)
@@ -600,7 +600,7 @@ define i64 @M2_vabsdiffw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vabsdiffw({{.*}}, {{.*}})
+; CHECK: = vabsdiffw({{.*}},{{.*}})
; Vector add halfwords
declare i64 @llvm.hexagon.A2.vaddh(i64, i64)
@@ -608,21 +608,21 @@ define i64 @A2_vaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}})
+; CHECK: = vaddh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vaddhs(i64, i64)
define i64 @A2_vaddhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddh({{.*}}, {{.*}}):sat
+; CHECK: = vaddh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.A2.vadduhs(i64, i64)
define i64 @A2_vadduhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vadduh({{.*}}, {{.*}}):sat
+; CHECK: = vadduh({{.*}},{{.*}}):sat
; Vector add halfwords with saturate and pack to unsigned bytes
declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64)
@@ -630,7 +630,7 @@ define i32 @A5_vaddhubs(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vaddhub({{.*}}, {{.*}}):sat
+; CHECK: = vaddhub({{.*}},{{.*}}):sat
; Vector reduce add unsigned bytes
declare i64 @llvm.hexagon.A2.vraddub(i64, i64)
@@ -638,14 +638,14 @@ define i64 @A2_vraddub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vraddub({{.*}}, {{.*}})
+; CHECK: = vraddub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64)
define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vraddub({{.*}}, {{.*}})
+; CHECK: += vraddub({{.*}},{{.*}})
; Vector reduce add halfwords
declare i32 @llvm.hexagon.M2.vradduh(i64, i64)
@@ -653,14 +653,14 @@ define i32 @M2_vradduh(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vradduh({{.*}}, {{.*}})
+; CHECK: = vradduh({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.vraddh(i64, i64)
define i32 @M2_vraddh(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vraddh({{.*}}, {{.*}})
+; CHECK: = vraddh({{.*}},{{.*}})
; Vector add bytes
declare i64 @llvm.hexagon.A2.vaddub(i64, i64)
@@ -668,14 +668,14 @@ define i64 @A2_vaddub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddub({{.*}}, {{.*}})
+; CHECK: = vaddub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vaddubs(i64, i64)
define i64 @A2_vaddubs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddub({{.*}}, {{.*}}):sat
+; CHECK: = vaddub({{.*}},{{.*}}):sat
; Vector add words
declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
@@ -683,14 +683,14 @@ define i64 @A2_vaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddw({{.*}}, {{.*}})
+; CHECK: = vaddw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vaddws(i64, i64)
define i64 @A2_vaddws(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vaddw({{.*}}, {{.*}}):sat
+; CHECK: = vaddw({{.*}},{{.*}}):sat
; Vector average halfwords
declare i64 @llvm.hexagon.A2.vavgh(i64, i64)
@@ -698,56 +698,56 @@ define i64 @A2_vavgh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}})
+; CHECK: = vavgh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavghr(i64, i64)
define i64 @A2_vavghr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}}):rnd
+; CHECK: = vavgh({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vavghcr(i64, i64)
define i64 @A2_vavghcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgh({{.*}}, {{.*}}):crnd
+; CHECK: = vavgh({{.*}},{{.*}}):crnd
declare i64 @llvm.hexagon.A2.vavguh(i64, i64)
define i64 @A2_vavguh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguh({{.*}}, {{.*}})
+; CHECK: = vavguh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavguhr(i64, i64)
define i64 @A2_vavguhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguh({{.*}}, {{.*}}):rnd
+; CHECK: = vavguh({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgh(i64, i64)
define i64 @A2_vnavgh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}})
+; CHECK: = vnavgh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vnavghr(i64, i64)
define i64 @A2_vnavghr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}}):rnd
+; CHECK: = vnavgh({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64)
define i64 @A2_vnavghcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgh({{.*}}, {{.*}}):crnd
+; CHECK: = vnavgh({{.*}},{{.*}}):crnd
; Vector average unsigned bytes
declare i64 @llvm.hexagon.A2.vavgub(i64, i64)
@@ -755,14 +755,14 @@ define i64 @A2_vavgub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: vavgub({{.*}}, {{.*}})
+; CHECK: vavgub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavgubr(i64, i64)
define i64 @A2_vavgubr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgub({{.*}}, {{.*}}):rnd
+; CHECK: = vavgub({{.*}},{{.*}}):rnd
; Vector average words
declare i64 @llvm.hexagon.A2.vavgw(i64, i64)
@@ -770,56 +770,56 @@ define i64 @A2_vavgw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgw({{.*}}, {{.*}})
+; CHECK: = vavgw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavgwr(i64, i64)
define i64 @A2_vavgwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgw({{.*}}, {{.*}}):rnd
+; CHECK: = vavgw({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64)
define i64 @A2_vavgwcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavgw({{.*}}, {{.*}}):crnd
+; CHECK: = vavgw({{.*}},{{.*}}):crnd
declare i64 @llvm.hexagon.A2.vavguw(i64, i64)
define i64 @A2_vavguw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguw({{.*}}, {{.*}})
+; CHECK: = vavguw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vavguwr(i64, i64)
define i64 @A2_vavguwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vavguw({{.*}}, {{.*}}):rnd
+; CHECK: = vavguw({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgw(i64, i64)
define i64 @A2_vnavgw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgw({{.*}}, {{.*}})
+; CHECK: = vnavgw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64)
define i64 @A2_vnavgwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgw({{.*}}, {{.*}}):rnd
+; CHECK: = vnavgw({{.*}},{{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64)
define i64 @A2_vnavgwcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vnavgw({{.*}}, {{.*}}):crnd
+; CHECK: = vnavgw({{.*}},{{.*}}):crnd
; Vector conditional negate
declare i64 @llvm.hexagon.S2.vcnegh(i64, i32)
@@ -827,14 +827,14 @@ define i64 @S2_vcnegh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vcnegh({{.*}}, {{.*}})
+; CHECK: = vcnegh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32)
define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vrcnegh({{.*}}, {{.*}})
+; CHECK: += vrcnegh({{.*}},{{.*}})
; Vector maximum bytes
declare i64 @llvm.hexagon.A2.vmaxub(i64, i64)
@@ -842,14 +842,14 @@ define i64 @A2_vmaxub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxub({{.*}}, {{.*}})
+; CHECK: = vmaxub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vmaxb(i64, i64)
define i64 @A2_vmaxb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxb({{.*}}, {{.*}})
+; CHECK: = vmaxb({{.*}},{{.*}})
; Vector maximum halfwords
declare i64 @llvm.hexagon.A2.vmaxh(i64, i64)
@@ -857,14 +857,14 @@ define i64 @A2_vmaxh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxh({{.*}}, {{.*}})
+; CHECK: = vmaxh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64)
define i64 @A2_vmaxuh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmaxuh({{.*}}, {{.*}})
+; CHECK: = vmaxuh({{.*}},{{.*}})
; Vector reduce maximum halfwords
declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32)
@@ -872,14 +872,14 @@ define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrmaxh({{.*}}, {{.*}})
+; CHECK: = vrmaxh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32)
define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrmaxuh({{.*}}, {{.*}})
+; CHECK: = vrmaxuh({{.*}},{{.*}})
; Vector reduce maximum words
declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32)
@@ -887,14 +887,14 @@ define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrmaxw({{.*}}, {{.*}})
+; CHECK: = vrmaxw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32)
define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: vrmaxuw({{.*}}, {{.*}})
+; CHECK: vrmaxuw({{.*}},{{.*}})
; Vector minimum bytes
declare i64 @llvm.hexagon.A2.vminub(i64, i64)
@@ -902,14 +902,14 @@ define i64 @A2_vminub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminub({{.*}}, {{.*}})
+; CHECK: = vminub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vminb(i64, i64)
define i64 @A2_vminb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminb({{.*}}, {{.*}})
+; CHECK: = vminb({{.*}},{{.*}})
; Vector minimum halfwords
declare i64 @llvm.hexagon.A2.vminh(i64, i64)
@@ -917,14 +917,14 @@ define i64 @A2_vminh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminh({{.*}}, {{.*}})
+; CHECK: = vminh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vminuh(i64, i64)
define i64 @A2_vminuh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vminuh({{.*}}, {{.*}})
+; CHECK: = vminuh({{.*}},{{.*}})
; Vector reduce minimum halfwords
declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32)
@@ -932,14 +932,14 @@ define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminh({{.*}}, {{.*}})
+; CHECK: = vrminh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32)
define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminuh({{.*}}, {{.*}})
+; CHECK: = vrminuh({{.*}},{{.*}})
; Vector reduce minimum words
declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32)
@@ -947,14 +947,14 @@ define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminw({{.*}}, {{.*}})
+; CHECK: = vrminw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32)
define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vrminuw({{.*}}, {{.*}})
+; CHECK: = vrminuw({{.*}},{{.*}})
; Vector sum of absolute differences unsigned bytes
declare i64 @llvm.hexagon.A2.vrsadub(i64, i64)
@@ -962,14 +962,14 @@ define i64 @A2_vrsadub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrsadub({{.*}}, {{.*}})
+; CHECK: = vrsadub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64)
define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrsadub({{.*}}, {{.*}})
+; CHECK: += vrsadub({{.*}},{{.*}})
; Vector subtract halfwords
declare i64 @llvm.hexagon.A2.vsubh(i64, i64)
@@ -977,21 +977,21 @@ define i64 @A2_vsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}})
+; CHECK: = vsubh({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vsubhs(i64, i64)
define i64 @A2_vsubhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubh({{.*}}, {{.*}}):sat
+; CHECK: = vsubh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64)
define i64 @A2_vsubuhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubuh({{.*}}, {{.*}}):sat
+; CHECK: = vsubuh({{.*}},{{.*}}):sat
; Vector subtract bytes
declare i64 @llvm.hexagon.A2.vsubub(i64, i64)
@@ -999,14 +999,14 @@ define i64 @A2_vsubub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubub({{.*}}, {{.*}})
+; CHECK: = vsubub({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vsububs(i64, i64)
define i64 @A2_vsububs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubub({{.*}}, {{.*}}):sat
+; CHECK: = vsubub({{.*}},{{.*}}):sat
; Vector subtract words
declare i64 @llvm.hexagon.A2.vsubw(i64, i64)
@@ -1014,11 +1014,11 @@ define i64 @A2_vsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubw({{.*}}, {{.*}})
+; CHECK: = vsubw({{.*}},{{.*}})
declare i64 @llvm.hexagon.A2.vsubws(i64, i64)
define i64 @A2_vsubws(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vsubw({{.*}}, {{.*}}):sat
+; CHECK: = vsubw({{.*}},{{.*}}):sat
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
index e8f83d01820a..ec7613e3ef2a 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
@@ -38,14 +38,14 @@ define i32 @S4_clbpaddi(i64 %a) {
%z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(clb({{.*}}), #0)
+; CHECK: = add(clb({{.*}}),#0)
declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
define i32 @S4_clbaddi(i32 %a) {
%z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(clb({{.*}}), #0)
+; CHECK: = add(clb({{.*}}),#0)
declare i32 @llvm.hexagon.S2.cl0(i32)
define i32 @S2_cl0(i32 %a) {
@@ -111,56 +111,56 @@ define i64 @S2_extractup(i64 %a) {
%z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
ret i64 %z
}
-; CHECK: = extractu({{.*}}, #0, #0)
+; CHECK: = extractu({{.*}},#0,#0)
declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
define i64 @S2_extractp(i64 %a) {
%z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
ret i64 %z
}
-; CHECK: = extract({{.*}}, #0, #0)
+; CHECK: = extract({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
define i32 @S2_extractu(i32 %a) {
%z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = extractu({{.*}}, #0, #0)
+; CHECK: = extractu({{.*}},#0,#0)
declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
define i32 @S2_extract(i32 %a) {
%z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = extract({{.*}}, #0, #0)
+; CHECK: = extract({{.*}},#0,#0)
declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
define i64 @S2_extractup_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = extractu({{.*}}, {{.*}})
+; CHECK: = extractu({{.*}},{{.*}})
declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
define i64 @S4_extractp_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = extract({{.*}}, {{.*}})
+; CHECK: = extract({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
define i32 @S2_extractu_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
ret i32 %z
}
-; CHECK: = extractu({{.*}}, {{.*}})
+; CHECK: = extractu({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
define i32 @S4_extract_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
ret i32 %z
}
-; CHECK: = extract({{.*}}, {{.*}})
+; CHECK: = extract({{.*}},{{.*}})
; Insert bitfield
declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
@@ -168,28 +168,28 @@ define i64 @S2_insertp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
ret i64 %z
}
-; CHECK: = insert({{.*}}, #0, #0)
+; CHECK: = insert({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
define i32 @S2_insert(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = insert({{.*}}, #0, #0)
+; CHECK: = insert({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
%z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
ret i32 %z
}
-; CHECK: = insert({{.*}}, {{.*}})
+; CHECK: = insert({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: = insert({{.*}}, r5:4)
+; CHECK: = insert({{.*}},r5:4)
; Interleave/deinterleave
declare i64 @llvm.hexagon.S2.deinterleave(i64)
@@ -212,7 +212,7 @@ define i64 @S2_lfsp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = lfs({{.*}}, {{.*}})
+; CHECK: = lfs({{.*}},{{.*}})
; Masked parity
declare i32 @llvm.hexagon.S2.parityp(i64, i64)
@@ -220,14 +220,14 @@ define i32 @S2_parityp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = parity({{.*}}, {{.*}})
+; CHECK: = parity({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.parity(i32, i32)
define i32 @S4_parity(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = parity({{.*}}, {{.*}})
+; CHECK: = parity({{.*}},{{.*}})
; Bit reverse
declare i64 @llvm.hexagon.S2.brevp(i64)
@@ -250,42 +250,42 @@ define i32 @S2_setbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = setbit({{.*}}, #0)
+; CHECK: = setbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
define i32 @S2_clrbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = clrbit({{.*}}, #0)
+; CHECK: = clrbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
define i32 @S2_togglebit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = togglebit({{.*}}, #0)
+; CHECK: = togglebit({{.*}},#0)
declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
define i32 @S2_setbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = setbit({{.*}}, {{.*}})
+; CHECK: = setbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
define i32 @S2_clrbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = clrbit({{.*}}, {{.*}})
+; CHECK: = clrbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
define i32 @S2_togglebit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = togglebit({{.*}}, {{.*}})
+; CHECK: = togglebit({{.*}},{{.*}})
; Split bitfield
declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
@@ -293,14 +293,14 @@ define i64 @A4_bitspliti(i32 %a) {
%z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
ret i64 %z
}
-; CHECK: = bitsplit({{.*}}, #0)
+; CHECK: = bitsplit({{.*}},#0)
declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
define i64 @A4_bitsplit(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = bitsplit({{.*}}, {{.*}})
+; CHECK: = bitsplit({{.*}},{{.*}})
; Table index
declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
@@ -308,25 +308,25 @@ define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxb({{.*}}, #0, #0)
+; CHECK: = tableidxb({{.*}},#0,#0)
declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxh({{.*}}, #0, #-1)
+; CHECK: = tableidxh({{.*}},#0,#-1)
declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxw({{.*}}, #0, #-2)
+; CHECK: = tableidxw({{.*}},#0,#-2)
declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: = tableidxd({{.*}}, #0, #-3)
+; CHECK: = tableidxd({{.*}},#0,#-3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
index 0087883573ec..254b928aa982 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
@@ -10,28 +10,28 @@ define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxaddsubh({{.*}}, {{.*}}):sat
+; CHECK: = vxaddsubh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxsubaddh({{.*}}, {{.*}}):sat
+; CHECK: = vxsubaddh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxaddsubh({{.*}}, {{.*}}):rnd:>>1:sat
+; CHECK: = vxaddsubh({{.*}},{{.*}}):rnd:>>1:sat
declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxsubaddh({{.*}}, {{.*}}):rnd:>>1:sat
+; CHECK: = vxsubaddh({{.*}},{{.*}}):rnd:>>1:sat
; Complex add/sub words
declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
@@ -39,14 +39,14 @@ define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxaddsubw({{.*}}, {{.*}}):sat
+; CHECK: = vxaddsubw({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vxsubaddw({{.*}}, {{.*}}):sat
+; CHECK: = vxsubaddw({{.*}},{{.*}}):sat
; Complex multiply
declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
@@ -54,84 +54,84 @@ define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):sat
+; CHECK: = cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: = cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):sat
+; CHECK: = cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:sat
+; CHECK: = cmpy({{.*}},{{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}):sat
+; CHECK: += cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: += cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}):sat
+; CHECK: -= cmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: -= cmpy({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}*):sat
+; CHECK: += cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpy({{.*}}, {{.*}}*):<<1:sat
+; CHECK: += cmpy({{.*}},{{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}*):sat
+; CHECK: -= cmpy({{.*}},{{.*}}*):sat
declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= cmpy({{.*}}, {{.*}}*):<<1:sat
+; CHECK: -= cmpy({{.*}},{{.*}}*):<<1:sat
; Complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
@@ -139,28 +139,28 @@ define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpyi({{.*}}, {{.*}})
+; CHECK: = cmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = cmpyr({{.*}}, {{.*}})
+; CHECK: = cmpyr({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpyi({{.*}}, {{.*}})
+; CHECK: += cmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += cmpyr({{.*}}, {{.*}})
+; CHECK: += cmpyr({{.*}},{{.*}})
; Complex multiply with round and pack
declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
@@ -168,28 +168,28 @@ define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}*):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:rnd:sat
+; CHECK: = cmpy({{.*}},{{.*}}*):<<1:rnd:sat
; Complex multiply 32x16
declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
@@ -197,28 +197,28 @@ define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyiwh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = cmpyiwh({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyiwh({{.*}}, {{.*}}*):<<1:rnd:sat
+; CHECK: = cmpyiwh({{.*}},{{.*}}*):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyrwh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = cmpyrwh({{.*}},{{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpyrwh({{.*}}, {{.*}}*):<<1:rnd:sat
+; CHECK: = cmpyrwh({{.*}},{{.*}}*):<<1:rnd:sat
; Vector complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
@@ -226,42 +226,42 @@ define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyr({{.*}}, {{.*}}):sat
+; CHECK: = vcmpyr({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyr({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vcmpyr({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyi({{.*}}, {{.*}}):sat
+; CHECK: = vcmpyi({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vcmpyi({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vcmpyi({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vcmpyr({{.*}}, r5:4):sat
+; CHECK: += vcmpyr({{.*}},r5:4):sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vcmpyi({{.*}}, r5:4):sat
+; CHECK: += vcmpyi({{.*}},r5:4):sat
; Vector complex conjugate
declare i64 @llvm.hexagon.A2.vconj(i64)
@@ -277,7 +277,7 @@ define i64 @S2_vcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vcrotate({{.*}}, {{.*}})
+; CHECK: = vcrotate({{.*}},{{.*}})
; Vector reduce complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
@@ -285,56 +285,56 @@ define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyi({{.*}}, {{.*}})
+; CHECK: = vrcmpyi({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyr({{.*}}, {{.*}})
+; CHECK: = vrcmpyr({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyi({{.*}}, {{.*}}*)
+; CHECK: = vrcmpyi({{.*}},{{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrcmpyr({{.*}}, {{.*}}*)
+; CHECK: = vrcmpyr({{.*}},{{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyi({{.*}}, r5:4)
+; CHECK: += vrcmpyi({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyr({{.*}}, r5:4)
+; CHECK: += vrcmpyr({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyi({{.*}}, r5:4*)
+; CHECK: += vrcmpyi({{.*}},r5:4*)
declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrcmpyr({{.*}}, r5:4*)
+; CHECK: += vrcmpyr({{.*}},r5:4*)
; Vector reduce complex rotate
declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
@@ -342,11 +342,11 @@ define i64 @S4_vrcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
ret i64 %z
}
-; CHECK: = vrcrotate({{.*}}, {{.*}}, #0)
+; CHECK: = vrcrotate({{.*}},{{.*}},#0)
declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
ret i64 %z
}
-; CHECK: += vrcrotate({{.*}}, {{.*}}, #0)
+; CHECK: += vrcrotate({{.*}},{{.*}},#0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
index 598d0a83206d..ee56e9051621 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
@@ -11,7 +11,7 @@ define float @F2_sfadd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfadd(float %a, float %b)
ret float %z
}
-; CHECK: = sfadd({{.*}}, {{.*}})
+; CHECK: = sfadd({{.*}},{{.*}})
; Classify floating-point value
declare i32 @llvm.hexagon.F2.sfclass(float, i32)
@@ -19,14 +19,14 @@ define i32 @F2_sfclass(float %a) {
%z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0)
ret i32 %z
}
-; CHECK: = sfclass({{.*}}, #0)
+; CHECK: = sfclass({{.*}},#0)
declare i32 @llvm.hexagon.F2.dfclass(double, i32)
define i32 @F2_dfclass(double %a) {
%z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0)
ret i32 %z
}
-; CHECK: = dfclass({{.*}}, #0)
+; CHECK: = dfclass({{.*}},#0)
; Compare floating-point value
declare i32 @llvm.hexagon.F2.sfcmpge(float, float)
@@ -34,56 +34,56 @@ define i32 @F2_sfcmpge(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.ge({{.*}}, {{.*}})
+; CHECK: = sfcmp.ge({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpuo(float, float)
define i32 @F2_sfcmpuo(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.uo({{.*}}, {{.*}})
+; CHECK: = sfcmp.uo({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpeq(float, float)
define i32 @F2_sfcmpeq(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.eq({{.*}}, {{.*}})
+; CHECK: = sfcmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.sfcmpgt(float, float)
define i32 @F2_sfcmpgt(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b)
ret i32 %z
}
-; CHECK: = sfcmp.gt({{.*}}, {{.*}})
+; CHECK: = sfcmp.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpge(double, double)
define i32 @F2_dfcmpge(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.ge({{.*}}, {{.*}})
+; CHECK: = dfcmp.ge({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpuo(double, double)
define i32 @F2_dfcmpuo(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.uo({{.*}}, {{.*}})
+; CHECK: = dfcmp.uo({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpeq(double, double)
define i32 @F2_dfcmpeq(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.eq({{.*}}, {{.*}})
+; CHECK: = dfcmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.F2.dfcmpgt(double, double)
define i32 @F2_dfcmpgt(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b)
ret i32 %z
}
-; CHECK: = dfcmp.gt({{.*}}, {{.*}})
+; CHECK: = dfcmp.gt({{.*}},{{.*}})
; Convert floating-point value to other format
declare double @llvm.hexagon.F2.conv.sf2df(float)
@@ -283,14 +283,14 @@ define float @F2_sffixupn(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b)
ret float %z
}
-; CHECK: = sffixupn({{.*}}, {{.*}})
+; CHECK: = sffixupn({{.*}},{{.*}})
declare float @llvm.hexagon.F2.sffixupd(float, float)
define float @F2_sffixupd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b)
ret float %z
}
-; CHECK: = sffixupd({{.*}}, {{.*}})
+; CHECK: = sffixupd({{.*}},{{.*}})
; Floating point fused multiply-add
declare float @llvm.hexagon.F2.sffma(float, float, float)
@@ -298,14 +298,14 @@ define float @F2_sffma(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c)
ret float %z
}
-; CHECK: += sfmpy({{.*}}, {{.*}})
+; CHECK: += sfmpy({{.*}},{{.*}})
declare float @llvm.hexagon.F2.sffms(float, float, float)
define float @F2_sffms(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c)
ret float %z
}
-; CHECK: -= sfmpy({{.*}}, {{.*}})
+; CHECK: -= sfmpy({{.*}},{{.*}})
; Floating point fused multiply-add with scaling
declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32)
@@ -313,7 +313,7 @@ define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) {
%z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d)
ret float %z
}
-; CHECK: += sfmpy({{.*}}, {{.*}}, {{.*}}):scale
+; CHECK: += sfmpy({{.*}},{{.*}},{{.*}}):scale
; Floating point fused multiply-add for library routines
declare float @llvm.hexagon.F2.sffma.lib(float, float, float)
@@ -321,14 +321,14 @@ define float @F2_sffma_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c)
ret float %z
}
-; CHECK: += sfmpy({{.*}}, {{.*}}):lib
+; CHECK: += sfmpy({{.*}},{{.*}}):lib
declare float @llvm.hexagon.F2.sffms.lib(float, float, float)
define float @F2_sffms_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c)
ret float %z
}
-; CHECK: -= sfmpy({{.*}}, {{.*}}):lib
+; CHECK: -= sfmpy({{.*}},{{.*}}):lib
; Create floating-point constant
declare float @llvm.hexagon.F2.sfimm.p(i32)
@@ -365,7 +365,7 @@ define float @F2_sfmax(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmax(float %a, float %b)
ret float %z
}
-; CHECK: = sfmax({{.*}}, {{.*}})
+; CHECK: = sfmax({{.*}},{{.*}})
; Floating point minimum
declare float @llvm.hexagon.F2.sfmin(float, float)
@@ -373,7 +373,7 @@ define float @F2_sfmin(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmin(float %a, float %b)
ret float %z
}
-; CHECK: = sfmin({{.*}}, {{.*}})
+; CHECK: = sfmin({{.*}},{{.*}})
; Floating point multiply
declare float @llvm.hexagon.F2.sfmpy(float, float)
@@ -381,7 +381,7 @@ define float @F2_sfmpy(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b)
ret float %z
}
-; CHECK: = sfmpy({{.*}}, {{.*}})
+; CHECK: = sfmpy({{.*}},{{.*}})
; Floating point subtraction
declare float @llvm.hexagon.F2.sfsub(float, float)
@@ -389,4 +389,4 @@ define float @F2_sfsub(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfsub(float %a, float %b)
ret float %z
}
-; CHECK: = sfsub({{.*}}, {{.*}})
+; CHECK: = sfsub({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
index a1490499fbf6..4da4a8a6393f 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
@@ -11,35 +11,35 @@ define i32 @M4_mpyrr_addi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = add(#0, mpyi({{.*}}, {{.*}}))
+; CHECK: = add(#0,mpyi({{.*}},{{.*}}))
declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32)
define i32 @M4_mpyri_addi(i32 %a) {
%z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(#0, mpyi({{.*}}, #0))
+; CHECK: = add(#0,mpyi({{.*}},#0))
declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32)
define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: = add({{.*}}, mpyi(#0, {{.*}}))
+; CHECK: = add({{.*}},mpyi(#0,{{.*}}))
declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32)
define i32 @M4_mpyri_addr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = add({{.*}}, mpyi({{.*}}, #0))
+; CHECK: = add({{.*}},mpyi({{.*}},#0))
declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32)
define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: = add({{.*}}, mpyi({{.*}}, {{.*}}))
+; CHECK: = add({{.*}},mpyi({{.*}},{{.*}}))
; Vector multiply word by signed half (32x16)
declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64)
@@ -47,56 +47,56 @@ define i64 @M2_mmpyl_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64)
define i64 @M2_mmpyl_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64)
define i64 @M2_mmpyh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64)
define i64 @M2_mmpyh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64)
define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64)
define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpyweh({{.*}},{{.*}}):<<1:rnd:sat
declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64)
define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64)
define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpywoh({{.*}},{{.*}}):<<1:rnd:sat
; Vector multiply word by unsigned half (32x16)
declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64)
@@ -104,56 +104,56 @@ define i64 @M2_mmpyul_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64)
define i64 @M2_mmpyul_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64)
define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64)
define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64)
define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64)
define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpyweuh({{.*}},{{.*}}):<<1:rnd:sat
declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64)
define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):rnd:sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64)
define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:rnd:sat
+; CHECK: = vmpywouh({{.*}},{{.*}}):<<1:rnd:sat
; Multiply signed halfwords
declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32)
@@ -161,616 +161,616 @@ define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l)
+; CHECK: = mpy({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32)
define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32)
define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h)
+; CHECK: = mpy({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32)
define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32)
define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l)
+; CHECK: = mpy({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32)
define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32)
define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h)
+; CHECK: = mpy({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32)
define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32)
define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32)
define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32)
define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32)
define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32)
define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32)
define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32)
define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32)
define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l)
+; CHECK: += mpy({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h)
+; CHECK: += mpy({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l)
+; CHECK: += mpy({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h)
+; CHECK: += mpy({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32)
define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l)
+; CHECK: = mpy({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32)
define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32)
define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h)
+; CHECK: = mpy({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32)
define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32)
define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l)
+; CHECK: = mpy({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32)
define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32)
define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h)
+; CHECK: = mpy({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32)
define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32)
define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32)
define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32)
define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32)
define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32)
define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32)
define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32)
define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32)
define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32)
define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l)
+; CHECK: += mpy({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32)
define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h)
+; CHECK: += mpy({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32)
define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l)
+; CHECK: += mpy({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32)
define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h)
+; CHECK: += mpy({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1:sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1:sat
+; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1:sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1:sat
+; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32)
define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32)
define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32)
define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32)
define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1:sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1:sat
+; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1:sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1:sat
+; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1:sat
; Multiply unsigned halfwords
declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32)
@@ -778,336 +778,336 @@ define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32)
define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32)
define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32)
define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32)
define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32)
define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32)
define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32)
define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32)
define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32)
define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32)
define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32)
define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32)
define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32)
define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32)
define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32)
define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: = mpyu({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: += mpyu({{.*}}.h,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.l,{{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h)
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1
+; CHECK: -= mpyu({{.*}}.h,{{.*}}.h):<<1
; Polynomial multiply words
declare i64 @llvm.hexagon.M4.pmpyw(i32, i32)
@@ -1115,14 +1115,14 @@ define i64 @M4_pmpyw(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = pmpyw({{.*}}, {{.*}})
+; CHECK: = pmpyw({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32)
define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: ^= pmpyw({{.*}}, {{.*}})
+; CHECK: ^= pmpyw({{.*}},{{.*}})
; Vector reduce multiply word by signed half
declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64)
@@ -1130,56 +1130,56 @@ define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpywoh({{.*}}, {{.*}})
+; CHECK: = vrmpywoh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64)
define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpywoh({{.*}}, {{.*}}):<<1
+; CHECK: = vrmpywoh({{.*}},{{.*}}):<<1
declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64)
define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpyweh({{.*}}, {{.*}})
+; CHECK: = vrmpyweh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64)
define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpyweh({{.*}}, {{.*}}):<<1
+; CHECK: = vrmpyweh({{.*}},{{.*}}):<<1
declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64)
define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpywoh({{.*}}, r5:4)
+; CHECK: += vrmpywoh({{.*}},r5:4)
declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64)
define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpywoh({{.*}}, r5:4):<<1
+; CHECK: += vrmpywoh({{.*}},r5:4):<<1
declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64)
define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpyweh({{.*}}, r5:4)
+; CHECK: += vrmpyweh({{.*}},r5:4)
declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64)
define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpyweh({{.*}}, r5:4):<<1
+; CHECK: += vrmpyweh({{.*}},r5:4):<<1
; Multiply and use upper result
declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32)
@@ -1187,84 +1187,84 @@ define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}):rnd
+; CHECK: = mpy({{.*}},{{.*}}):rnd
declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32)
define i32 @M2_mpyu_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpyu({{.*}}, {{.*}})
+; CHECK: = mpyu({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32)
define i32 @M2_mpysu_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpysu({{.*}}, {{.*}})
+; CHECK: = mpysu({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32)
define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:sat
+; CHECK: = mpy({{.*}},{{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32)
define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:sat
+; CHECK: = mpy({{.*}},{{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32)
define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}},{{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32)
define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: = mpy({{.*}},{{.*}}):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32)
define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}},{{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.up(i32, i32)
define i32 @M2_mpy_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}})
+; CHECK: = mpy({{.*}},{{.*}})
declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32)
define i32 @M2_mpy_up_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = mpy({{.*}}, {{.*}}):<<1
+; CHECK: = mpy({{.*}},{{.*}}):<<1
declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32)
define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += mpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: += mpy({{.*}},{{.*}}):<<1:sat
declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32)
define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= mpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: -= mpy({{.*}},{{.*}}):<<1:sat
; Multiply and use full result
declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32)
@@ -1272,42 +1272,42 @@ define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpy({{.*}}, {{.*}})
+; CHECK: = mpy({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32)
define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = mpyu({{.*}}, {{.*}})
+; CHECK: = mpyu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32)
define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpy({{.*}}, {{.*}})
+; CHECK: += mpy({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32)
define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpy({{.*}}, {{.*}})
+; CHECK: -= mpy({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32)
define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += mpyu({{.*}}, {{.*}})
+; CHECK: += mpyu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32)
define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= mpyu({{.*}}, {{.*}})
+; CHECK: -= mpyu({{.*}},{{.*}})
; Vector dual multiply
declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64)
@@ -1315,14 +1315,14 @@ define i64 @M2_vdmpys_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vdmpy({{.*}}, {{.*}}):sat
+; CHECK: = vdmpy({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64)
define i64 @M2_vdmpys_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vdmpy({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vdmpy({{.*}},{{.*}}):<<1:sat
; Vector reduce multiply bytes
declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64)
@@ -1330,28 +1330,28 @@ define i64 @M5_vrmpybuu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpybu({{.*}}, {{.*}})
+; CHECK: = vrmpybu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64)
define i64 @M5_vrmpybsu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpybsu({{.*}}, {{.*}})
+; CHECK: = vrmpybsu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64)
define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpybu({{.*}}, r5:4)
+; CHECK: += vrmpybu({{.*}},r5:4)
declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64)
define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpybsu({{.*}}, r5:4)
+; CHECK: += vrmpybsu({{.*}},r5:4)
; Vector dual multiply signed by unsigned bytes
declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64)
@@ -1359,14 +1359,14 @@ define i64 @M5_vdmpybsu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vdmpybsu({{.*}}, {{.*}}):sat
+; CHECK: = vdmpybsu({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64)
define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vdmpybsu({{.*}}, r5:4):sat
+; CHECK: += vdmpybsu({{.*}},r5:4):sat
; Vector multiply even halfwords
declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64)
@@ -1374,35 +1374,35 @@ define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyeh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyeh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64)
define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vmpyeh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyeh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64)
define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vmpyeh({{.*}}, r5:4)
+; CHECK: += vmpyeh({{.*}},r5:4)
declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64)
define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vmpyeh({{.*}}, r5:4):sat
+; CHECK: += vmpyeh({{.*}},r5:4):sat
declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64)
define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vmpyeh({{.*}}, r5:4):<<1:sat
+; CHECK: += vmpyeh({{.*}},r5:4):<<1:sat
; Vector multiply halfwords
declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32)
@@ -1410,35 +1410,35 @@ define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyh({{.*}}, {{.*}}):sat
+; CHECK: = vmpyh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32)
define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyh({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyh({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32)
define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyh({{.*}}, {{.*}})
+; CHECK: += vmpyh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32)
define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyh({{.*}}, {{.*}}):sat
+; CHECK: += vmpyh({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32)
define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyh({{.*}}, {{.*}}):<<1:sat
+; CHECK: += vmpyh({{.*}},{{.*}}):<<1:sat
; Vector multiply halfwords signed by unsigned
declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32)
@@ -1446,28 +1446,28 @@ define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyhsu({{.*}}, {{.*}}):sat
+; CHECK: = vmpyhsu({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32)
define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpyhsu({{.*}}, {{.*}}):<<1:sat
+; CHECK: = vmpyhsu({{.*}},{{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32)
define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyhsu({{.*}}, {{.*}}):sat
+; CHECK: += vmpyhsu({{.*}},{{.*}}):sat
declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32)
define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpyhsu({{.*}}, {{.*}}):<<1:sat
+; CHECK: += vmpyhsu({{.*}},{{.*}}):<<1:sat
; Vector reduce multiply halfwords
declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64)
@@ -1475,14 +1475,14 @@ define i64 @M2_vrmpy_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vrmpyh({{.*}}, {{.*}})
+; CHECK: = vrmpyh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64)
define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: += vrmpyh({{.*}}, r5:4)
+; CHECK: += vrmpyh({{.*}},r5:4)
; Vector multiply bytes
declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32)
@@ -1490,28 +1490,28 @@ define i64 @M2_vmpybsu(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpybsu({{.*}}, {{.*}})
+; CHECK: = vmpybsu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32)
define i64 @M2_vmpybuu(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vmpybu({{.*}}, {{.*}})
+; CHECK: = vmpybu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32)
define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpybu({{.*}}, {{.*}})
+; CHECK: += vmpybu({{.*}},{{.*}})
declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32)
define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: += vmpybsu({{.*}}, {{.*}})
+; CHECK: += vmpybsu({{.*}},{{.*}})
; Vector polynomial multiply halfwords
declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32)
@@ -1519,11 +1519,11 @@ define i64 @M4_vpmpyh(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vpmpyh({{.*}}, {{.*}})
+; CHECK: = vpmpyh({{.*}},{{.*}})
declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32)
define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: ^= vpmpyh({{.*}}, {{.*}})
+; CHECK: ^= vpmpyh({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
index 3e044e3838de..9260790e33a6 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
@@ -141,28 +141,28 @@ define i64 @S2_shuffeb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffeb({{.*}}, {{.*}})
+; CHECK: = shuffeb({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffob(i64, i64)
define i64 @S2_shuffob(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffob({{.*}}, {{.*}})
+; CHECK: = shuffob({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffeh(i64, i64)
define i64 @S2_shuffeh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffeh({{.*}}, {{.*}})
+; CHECK: = shuffeh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.shuffoh(i64, i64)
define i64 @S2_shuffoh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = shuffoh({{.*}}, {{.*}})
+; CHECK: = shuffoh({{.*}},{{.*}})
; Vector splat bytes
declare i32 @llvm.hexagon.S2.vsplatrb(i32)
@@ -186,14 +186,14 @@ define i64 @S2_vspliceib(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: = vspliceb({{.*}}, {{.*}}, #0)
+; CHECK: = vspliceb({{.*}},{{.*}},#0)
declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32)
define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: = vspliceb({{.*}}, {{.*}}, {{.*}})
+; CHECK: = vspliceb({{.*}},{{.*}},{{.*}})
; Vector sign extend
declare i64 @llvm.hexagon.S2.vsxtbh(i32)
@@ -230,14 +230,14 @@ define i64 @S2_vtrunowh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vtrunowh({{.*}}, {{.*}})
+; CHECK: = vtrunowh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64)
define i64 @S2_vtrunewh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: = vtrunewh({{.*}}, {{.*}})
+; CHECK: = vtrunewh({{.*}},{{.*}})
; Vector zero extend
declare i64 @llvm.hexagon.S2.vzxtbh(i32)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
index f06339b9a85a..506dc88d3c1a 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
@@ -10,42 +10,42 @@ define i32 @A4_cmpbgt(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpb.gt({{.*}}, {{.*}})
+; CHECK: = cmpb.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32)
define i32 @A4_cmpbeq(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpb.eq({{.*}}, {{.*}})
+; CHECK: = cmpb.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32)
define i32 @A4_cmpbgtu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmpb.gtu({{.*}}, {{.*}})
+; CHECK: = cmpb.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32)
define i32 @A4_cmpbgti(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmpb.gt({{.*}}, #0)
+; CHECK: = cmpb.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32)
define i32 @A4_cmpbeqi(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmpb.eq({{.*}}, #0)
+; CHECK: = cmpb.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32)
define i32 @A4_cmpbgtui(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmpb.gtu({{.*}}, #0)
+; CHECK: = cmpb.gtu({{.*}},#0)
; Compare half
declare i32 @llvm.hexagon.A4.cmphgt(i32, i32)
@@ -53,42 +53,42 @@ define i32 @A4_cmphgt(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmph.gt({{.*}}, {{.*}})
+; CHECK: = cmph.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmpheq(i32, i32)
define i32 @A4_cmpheq(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmph.eq({{.*}}, {{.*}})
+; CHECK: = cmph.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32)
define i32 @A4_cmphgtu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = cmph.gtu({{.*}}, {{.*}})
+; CHECK: = cmph.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.cmphgti(i32, i32)
define i32 @A4_cmphgti(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmph.gt({{.*}}, #0)
+; CHECK: = cmph.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32)
define i32 @A4_cmpheqi(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmph.eq({{.*}}, #0)
+; CHECK: = cmph.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32)
define i32 @A4_cmphgtui(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = cmph.gtu({{.*}}, #0)
+; CHECK: = cmph.gtu({{.*}},#0)
; Compare doublewords
declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64)
@@ -96,21 +96,21 @@ define i32 @C2_cmpgtp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = cmp.gt({{.*}}, {{.*}})
+; CHECK: = cmp.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64)
define i32 @C2_cmpeqp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = cmp.eq({{.*}}, {{.*}})
+; CHECK: = cmp.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64)
define i32 @C2_cmpgtup(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = cmp.gtu({{.*}}, {{.*}})
+; CHECK: = cmp.gtu({{.*}},{{.*}})
; Compare bitmask
declare i32 @llvm.hexagon.C2.bitsclri(i32, i32)
@@ -118,42 +118,42 @@ define i32 @C2_bitsclri(i32 %a) {
%z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = bitsclr({{.*}}, #0)
+; CHECK: = bitsclr({{.*}},#0)
declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32)
define i32 @C4_nbitsclri(i32 %a) {
%z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = !bitsclr({{.*}}, #0)
+; CHECK: = !bitsclr({{.*}},#0)
declare i32 @llvm.hexagon.C2.bitsset(i32, i32)
define i32 @C2_bitsset(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = bitsset({{.*}}, {{.*}})
+; CHECK: = bitsset({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.nbitsset(i32, i32)
define i32 @C4_nbitsset(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !bitsset({{.*}}, {{.*}})
+; CHECK: = !bitsset({{.*}},{{.*}})
declare i32 @llvm.hexagon.C2.bitsclr(i32, i32)
define i32 @C2_bitsclr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = bitsclr({{.*}}, {{.*}})
+; CHECK: = bitsclr({{.*}},{{.*}})
declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32)
define i32 @C4_nbitsclr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !bitsclr({{.*}}, {{.*}})
+; CHECK: = !bitsclr({{.*}},{{.*}})
; Mask generate from predicate
declare i64 @llvm.hexagon.C2.mask(i32)
@@ -169,7 +169,7 @@ define i32 @A4_tlbmatch(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = tlbmatch({{.*}}, {{.*}})
+; CHECK: = tlbmatch({{.*}},{{.*}})
; Test bit
declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32)
@@ -177,28 +177,28 @@ define i32 @S2_tstbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = tstbit({{.*}}, #0)
+; CHECK: = tstbit({{.*}},#0)
declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32)
define i32 @S4_ntstbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = !tstbit({{.*}}, #0)
+; CHECK: = !tstbit({{.*}},#0)
declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32)
define i32 @S2_tstbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = tstbit({{.*}}, {{.*}})
+; CHECK: = tstbit({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32)
define i32 @S4_ntstbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = !tstbit({{.*}}, {{.*}})
+; CHECK: = !tstbit({{.*}},{{.*}})
; Vector compare halfwords
declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64)
@@ -206,42 +206,42 @@ define i32 @A2_vcmpheq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmph.eq({{.*}}, {{.*}})
+; CHECK: = vcmph.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64)
define i32 @A2_vcmphgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmph.gt({{.*}}, {{.*}})
+; CHECK: = vcmph.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64)
define i32 @A2_vcmphgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmph.gtu({{.*}}, {{.*}})
+; CHECK: = vcmph.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32)
define i32 @A4_vcmpheqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmph.eq({{.*}}, #0)
+; CHECK: = vcmph.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32)
define i32 @A4_vcmphgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmph.gt({{.*}}, #0)
+; CHECK: = vcmph.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32)
define i32 @A4_vcmphgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmph.gtu({{.*}}, #0)
+; CHECK: = vcmph.gtu({{.*}},#0)
; Vector compare bytes for any match
declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64)
@@ -249,7 +249,7 @@ define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = any8(vcmpb.eq({{.*}}, {{.*}}))
+; CHECK: = any8(vcmpb.eq({{.*}},{{.*}}))
; Vector compare bytes
declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64)
@@ -257,42 +257,42 @@ define i32 @A2_vcmpbeq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpb.eq({{.*}}, {{.*}})
+; CHECK: = vcmpb.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64)
define i32 @A2_vcmpbgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpb.gtu({{.*}}, {{.*}})
+; CHECK: = vcmpb.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64)
define i32 @A4_vcmpbgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpb.gt({{.*}}, {{.*}})
+; CHECK: = vcmpb.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32)
define i32 @A4_vcmpbeqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpb.eq({{.*}}, #0)
+; CHECK: = vcmpb.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32)
define i32 @A4_vcmpbgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpb.gt({{.*}}, #0)
+; CHECK: = vcmpb.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32)
define i32 @A4_vcmpbgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpb.gtu({{.*}}, #0)
+; CHECK: = vcmpb.gtu({{.*}},#0)
; Vector compare words
declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64)
@@ -300,42 +300,42 @@ define i32 @A2_vcmpweq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpw.eq({{.*}}, {{.*}})
+; CHECK: = vcmpw.eq({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64)
define i32 @A2_vcmpwgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpw.gt({{.*}}, {{.*}})
+; CHECK: = vcmpw.gt({{.*}},{{.*}})
declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64)
define i32 @A2_vcmpwgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: = vcmpw.gtu({{.*}}, {{.*}})
+; CHECK: = vcmpw.gtu({{.*}},{{.*}})
declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32)
define i32 @A4_vcmpweqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpw.eq({{.*}}, #0)
+; CHECK: = vcmpw.eq({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32)
define i32 @A4_vcmpwgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpw.gt({{.*}}, #0)
+; CHECK: = vcmpw.gt({{.*}},#0)
declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32)
define i32 @A4_vcmpwgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vcmpw.gtu({{.*}}, #0)
+; CHECK: = vcmpw.gtu({{.*}},#0)
; Viterbi pack even and odd predicate bitsclr
declare i32 @llvm.hexagon.C2.vitpack(i32, i32)
@@ -343,7 +343,7 @@ define i32 @C2_vitpack(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vitpack({{.*}}, {{.*}})
+; CHECK: = vitpack({{.*}},{{.*}})
; Vector mux
declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64)
@@ -351,4 +351,4 @@ define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: = vmux({{.*}}, {{.*}}, {{.*}})
+; CHECK: = vmux({{.*}},{{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
index 1a65f44c1954..8809baf3551b 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
@@ -10,42 +10,42 @@ define i64 @S2_asr_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = asr({{.*}}, #0)
+; CHECK: = asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
define i64 @S2_lsr_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = lsr({{.*}}, #0)
+; CHECK: = lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
define i64 @S2_asl_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = asl({{.*}}, #0)
+; CHECK: = asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
define i32 @S2_asr_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asr({{.*}}, #0)
+; CHECK: = asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
define i32 @S2_lsr_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = lsr({{.*}}, #0)
+; CHECK: = lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
define i32 @S2_asl_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asl({{.*}}, #0)
+; CHECK: = asl({{.*}},#0)
; Shift by immediate and accumulate
declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
@@ -53,84 +53,84 @@ define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: -= asr({{.*}}, #0)
+; CHECK: -= asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: -= lsr({{.*}}, #0)
+; CHECK: -= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: -= asl({{.*}}, #0)
+; CHECK: -= asl({{.*}},#0)
declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: += asr({{.*}}, #0)
+; CHECK: += asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: += lsr({{.*}}, #0)
+; CHECK: += lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: += asl({{.*}}, #0)
+; CHECK: += asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= asr({{.*}}, #0)
+; CHECK: -= asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= lsr({{.*}}, #0)
+; CHECK: -= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: -= asl({{.*}}, #0)
+; CHECK: -= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += asr({{.*}}, #0)
+; CHECK: += asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += lsr({{.*}}, #0)
+; CHECK: += lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: += asl({{.*}}, #0)
+; CHECK: += asl({{.*}},#0)
; Shift by immediate and add
declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
@@ -138,35 +138,35 @@ define i32 @S4_addi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(#0, asl({{.*}}, #0))
+; CHECK: = add(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
define i32 @S4_subi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = sub(#0, asl({{.*}}, #0))
+; CHECK: = sub(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
define i32 @S4_addi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = add(#0, lsr({{.*}}, #0))
+; CHECK: = add(#0,lsr({{.*}},#0))
declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
define i32 @S4_subi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = sub(#0, lsr({{.*}}, #0))
+; CHECK: = sub(#0,lsr({{.*}},#0))
declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: = addasl({{.*}}, {{.*}}, #0)
+; CHECK: = addasl({{.*}},{{.*}},#0)
; Shift by immediate and logical
declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
@@ -174,140 +174,140 @@ define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: &= asr({{.*}}, #0)
+; CHECK: &= asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: {{.*}} &= lsr({{.*}}, #0)
+; CHECK: {{.*}} &= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: &= asl({{.*}}, #0)
+; CHECK: &= asl({{.*}},#0)
declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: |= asr({{.*}}, #0)
+; CHECK: |= asr({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: |= lsr({{.*}}, #0)
+; CHECK: |= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: |= asl({{.*}}, #0)
+; CHECK: |= asl({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: ^= lsr({{.*}}, #0)
+; CHECK: ^= lsr({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: ^= asl({{.*}}, #0)
+; CHECK: ^= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: &= asr({{.*}}, #0)
+; CHECK: &= asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: &= lsr({{.*}}, #0)
+; CHECK: &= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: &= asl({{.*}}, #0)
+; CHECK: &= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= asr({{.*}}, #0)
+; CHECK: |= asr({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= lsr({{.*}}, #0)
+; CHECK: |= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: |= asl({{.*}}, #0)
+; CHECK: |= asl({{.*}},#0)
declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: ^= lsr({{.*}}, #0)
+; CHECK: ^= lsr({{.*}},#0)
declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: ^= asl({{.*}}, #0)
+; CHECK: ^= asl({{.*}},#0)
declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
define i32 @S4_andi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = and(#0, asl({{.*}}, #0))
+; CHECK: = and(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
define i32 @S4_ori_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = or(#0, asl({{.*}}, #0))
+; CHECK: = or(#0,asl({{.*}},#0))
declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
define i32 @S4_andi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = and(#0, lsr({{.*}}, #0))
+; CHECK: = and(#0,lsr({{.*}},#0))
declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
define i32 @S4_ori_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = or(#0, lsr({{.*}}, #0))
+; CHECK: = or(#0,lsr({{.*}},#0))
; Shift right by immediate with rounding
declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
@@ -315,14 +315,14 @@ define i64 @S2_asr_i_p_rnd(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = asr({{.*}}, #0):rnd
+; CHECK: = asr({{.*}},#0):rnd
declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
define i32 @S2_asr_i_r_rnd(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asr({{.*}}, #0):rnd
+; CHECK: = asr({{.*}},#0):rnd
; Shift left by immediate with saturation
declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
@@ -330,7 +330,7 @@ define i32 @S2_asl_i_r_sat(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: = asl({{.*}}, #0):sat
+; CHECK: = asl({{.*}},#0):sat
; Shift by register
declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
@@ -338,63 +338,63 @@ define i64 @S2_asr_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = asr({{.*}}, {{.*}})
+; CHECK: = asr({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = lsr({{.*}}, {{.*}})
+; CHECK: = lsr({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
define i64 @S2_asl_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = asl({{.*}}, {{.*}})
+; CHECK: = asl({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = lsl({{.*}}, {{.*}})
+; CHECK: = lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
define i32 @S2_asr_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asr({{.*}}, {{.*}})
+; CHECK: = asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = lsr({{.*}}, {{.*}})
+; CHECK: = lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
define i32 @S2_asl_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asl({{.*}}, {{.*}})
+; CHECK: = asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = lsl({{.*}}, {{.*}})
+; CHECK: = lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S4.lsli(i32, i32)
define i32 @S4_lsli(i32 %a) {
%z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
ret i32 %z
}
-; CHECK: = lsl(#0, {{.*}})
+; CHECK: = lsl(#0,{{.*}})
; Shift by register and accumulate
declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
@@ -402,112 +402,112 @@ define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= asr({{.*}}, r4)
+; CHECK: -= asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= lsr({{.*}}, r4)
+; CHECK: -= lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= asl({{.*}}, r4)
+; CHECK: -= asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: -= lsl({{.*}}, r4)
+; CHECK: -= lsl({{.*}},r4)
declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += asr({{.*}}, r4)
+; CHECK: += asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += lsr({{.*}}, r4)
+; CHECK: += lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += asl({{.*}}, r4)
+; CHECK: += asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: += lsl({{.*}}, r4)
+; CHECK: += lsl({{.*}},r4)
declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= asr({{.*}}, {{.*}})
+; CHECK: -= asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= lsr({{.*}}, {{.*}})
+; CHECK: -= lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= asl({{.*}}, {{.*}})
+; CHECK: -= asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: -= lsl({{.*}}, {{.*}})
+; CHECK: -= lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += asr({{.*}}, {{.*}})
+; CHECK: += asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += lsr({{.*}}, {{.*}})
+; CHECK: += lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += asl({{.*}}, {{.*}})
+; CHECK: += asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: += lsl({{.*}}, {{.*}})
+; CHECK: += lsl({{.*}},{{.*}})
; Shift by register and logical
declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
@@ -515,112 +515,112 @@ define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= asr({{.*}}, r4)
+; CHECK: |= asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= lsr({{.*}}, r4)
+; CHECK: |= lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= asl({{.*}}, r4)
+; CHECK: |= asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: |= lsl({{.*}}, r4)
+; CHECK: |= lsl({{.*}},r4)
declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= asr({{.*}}, r4)
+; CHECK: &= asr({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= lsr({{.*}}, r4)
+; CHECK: &= lsr({{.*}},r4)
declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= asl({{.*}}, r4)
+; CHECK: &= asl({{.*}},r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: &= lsl({{.*}}, r4)
+; CHECK: &= lsl({{.*}},r4)
declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= asr({{.*}}, {{.*}})
+; CHECK: |= asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= lsr({{.*}}, {{.*}})
+; CHECK: |= lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= asl({{.*}}, {{.*}})
+; CHECK: |= asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: |= lsl({{.*}}, {{.*}})
+; CHECK: |= lsl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= asr({{.*}}, {{.*}})
+; CHECK: &= asr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= lsr({{.*}}, {{.*}})
+; CHECK: &= lsr({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= asl({{.*}}, {{.*}})
+; CHECK: &= asl({{.*}},{{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: &= lsl({{.*}}, {{.*}})
+; CHECK: &= lsl({{.*}},{{.*}})
; Shift by register with saturation
declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
@@ -628,14 +628,14 @@ define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asr({{.*}}, {{.*}}):sat
+; CHECK: = asr({{.*}},{{.*}}):sat
declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: = asl({{.*}}, {{.*}}):sat
+; CHECK: = asl({{.*}},{{.*}}):sat
; Vector shift halfwords by immediate
declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32)
@@ -643,21 +643,21 @@ define i64 @S2_asr_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vasrh({{.*}}, #0)
+; CHECK: = vasrh({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32)
define i64 @S2_lsr_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vlsrh({{.*}}, #0)
+; CHECK: = vlsrh({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32)
define i64 @S2_asl_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vaslh({{.*}}, #0)
+; CHECK: = vaslh({{.*}},#0)
; Vector shift halfwords by register
declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32)
@@ -665,28 +665,28 @@ define i64 @S2_asr_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vasrh({{.*}}, {{.*}})
+; CHECK: = vasrh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32)
define i64 @S2_lsr_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vlsrh({{.*}}, {{.*}})
+; CHECK: = vlsrh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32)
define i64 @S2_asl_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vaslh({{.*}}, {{.*}})
+; CHECK: = vaslh({{.*}},{{.*}})
declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32)
define i64 @S2_lsl_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: = vlslh({{.*}}, {{.*}})
+; CHECK: = vlslh({{.*}},{{.*}})
; Vector shift words by immediate
declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32)
@@ -694,21 +694,21 @@ define i64 @S2_asr_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vasrw({{.*}}, #0)
+; CHECK: = vasrw({{.*}},#0)
declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32)
define i64 @S2_lsr_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vlsrw({{.*}}, #0)
+; CHECK: = vlsrw({{.*}},#0)
declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32)
define i64 @S2_asl_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: = vaslw({{.*}}, #0)
+; CHECK: = vaslw({{.*}},#0)
; Vector shift words by with truncate and pack
declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32)
@@ -716,11 +716,11 @@ define i32 @S2_asr_i_svw_trun(i64 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: = vasrw({{.*}}, #0)
+; CHECK: = vasrw({{.*}},#0)
declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32)
define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: = vasrw({{.*}}, {{.*}})
+; CHECK: = vasrw({{.*}},{{.*}})
diff --git a/test/CodeGen/Hexagon/isel-exti1.ll b/test/CodeGen/Hexagon/isel-exti1.ll
new file mode 100644
index 000000000000..b49986628e4e
--- /dev/null
+++ b/test/CodeGen/Hexagon/isel-exti1.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: sexti1
+; CHECK: r[[REG:[0-9]+]] = mux(p{{[0-3]}},#-1,#0)
+; CHECK: combine(r[[REG]],r[[REG]])
+define i64 @sexti1(i64 %a0, i64 %a1) {
+entry:
+ %t0 = icmp ult i64 %a0, %a1
+ %t1 = sext i1 %t0 to i64
+ ret i64 %t1
+}
+
+; CHECK-LABEL: zexti1
+; CHECK: r[[REG:[0-9]+]] = mux(p{{[0-3]}},#1,#0)
+; CHECK: combine(#0,r[[REG]])
+define i64 @zexti1(i64 %a0, i64 %a1) {
+entry:
+ %t0 = icmp ult i64 %a0, %a1
+ %t1 = zext i1 %t0 to i64
+ ret i64 %t1
+}
+
diff --git a/test/CodeGen/Hexagon/isel-i1arg-crash.ll b/test/CodeGen/Hexagon/isel-i1arg-crash.ll
new file mode 100644
index 000000000000..7e8bd9e93b27
--- /dev/null
+++ b/test/CodeGen/Hexagon/isel-i1arg-crash.ll
@@ -0,0 +1,6 @@
+; RUN: llc -march=hexagon -debug-only=isel < %s
+; REQUIRES: asserts
+
+define void @g(i1 %cond) {
+ ret void
+}
diff --git a/test/CodeGen/Hexagon/isel-op-zext-i1.ll b/test/CodeGen/Hexagon/isel-op-zext-i1.ll
new file mode 100644
index 000000000000..d77d0929e21f
--- /dev/null
+++ b/test/CodeGen/Hexagon/isel-op-zext-i1.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s
+
+; In the IR, the i1 value is zero-extended first, then passed to add.
+; Check that in the final code, the mux happens after the add.
+; CHECK: [[REG1:r[0-9]+]] = add([[REG0:r[0-9]+]],#1)
+; CHECK: r{{[0-9]+}} = mux(p{{[0-3]}},[[REG1]],[[REG0]])
+
+define i32 @foo(i32 %a, i32 %b) {
+ %v0 = icmp eq i32 %a, %b
+ %v1 = zext i1 %v0 to i32
+ %v2 = add i32 %v1, %a
+ ret i32 %v2
+}
diff --git a/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
new file mode 100644
index 000000000000..db850950fd53
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
@@ -0,0 +1,36 @@
+; Check for recognizing the "memmove" idiom.
+; RUN: opt -basicaa -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
+; CHECK: call void @llvm.memmove
+
+; Function Attrs: norecurse nounwind
+define void @foo(i32* nocapture %A, i32* nocapture readonly %B, i32 %n) #0 {
+entry:
+ %cmp1 = icmp sgt i32 %n, 0
+ br i1 %cmp1, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ %arrayidx.gep = getelementptr i32, i32* %B, i32 0
+ %arrayidx1.gep = getelementptr i32, i32* %A, i32 0
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %arrayidx.phi = phi i32* [ %arrayidx.gep, %for.body.preheader ], [ %arrayidx.inc, %for.body ]
+ %arrayidx1.phi = phi i32* [ %arrayidx1.gep, %for.body.preheader ], [ %arrayidx1.inc, %for.body ]
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+ %0 = load i32, i32* %arrayidx.phi, align 4
+ store i32 %0, i32* %arrayidx1.phi, align 4
+ %inc = add nuw nsw i32 %i.02, 1
+ %exitcond = icmp ne i32 %inc, %n
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+ %arrayidx1.inc = getelementptr i32, i32* %arrayidx1.phi, i32 1
+ br i1 %exitcond, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
new file mode 100644
index 000000000000..b9747a887a59
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
@@ -0,0 +1,36 @@
+; RUN: opt -basicaa -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
+
+define void @PR14241(i32* %s, i64 %size) #0 {
+; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
+; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy
+; instead of a memmove. If we get the memmove transform back, this will catch
+; regressions.
+;
+; CHECK-LABEL: @PR14241(
+
+entry:
+ %end.idx = add i64 %size, -1
+ %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
+ br label %while.body
+; CHECK-NOT: memcpy
+; CHECK: memmove
+
+while.body:
+ %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
+ %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
+ %val = load i32, i32* %src.ptr, align 4
+; CHECK: load
+ %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0
+ store i32 %val, i32* %dst.ptr, align 4
+; CHECK: store
+ %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
+ %cmp = icmp eq i32* %next.ptr, %end.ptr
+ br i1 %cmp, label %exit, label %while.body
+
+exit:
+ ret void
+; CHECK: ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/Hexagon/loop-idiom/lcssa.ll b/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
new file mode 100644
index 000000000000..48632fde1368
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
@@ -0,0 +1,46 @@
+; RUN: opt -hexagon-loop-idiom -loop-deletion -gvn -S < %s
+; REQUIRES: asserts
+
+; This tests that the HexagonLoopIdiom pass does not mark LCSSA information
+; as preserved. The pass calls SimplifyInstruction is a couple of places,
+; which can invalidate LCSSA. Specifically, the uses of a LCSSA phi variable
+; are replaced by the incoming value.
+
+define hidden void @test() local_unnamed_addr #0 {
+entry:
+ br label %if.then63
+
+if.then63:
+ br i1 undef, label %do.body311, label %if.end375
+
+do.body311:
+ br i1 undef, label %do.end318, label %do.body311
+
+do.end318:
+ br i1 undef, label %if.end322, label %if.end375
+
+if.end322:
+ %sub325 = sub i32 undef, undef
+ br i1 undef, label %do.end329, label %do.body311
+
+do.end329:
+ %sub325.lcssa = phi i32 [ %sub325, %if.end322 ]
+ br label %do.body330
+
+do.body330:
+ %row_width.7 = phi i32 [ %sub325.lcssa, %do.end329 ], [ %dec334, %do.body330 ]
+ %sp.5 = phi i8* [ undef, %do.end329 ], [ %incdec.ptr331, %do.body330 ]
+ %dp.addr.5 = phi i8* [ undef, %do.end329 ], [ %incdec.ptr332, %do.body330 ]
+ %0 = load i8, i8* %sp.5, align 1
+ store i8 %0, i8* %dp.addr.5, align 1
+ %incdec.ptr332 = getelementptr inbounds i8, i8* %dp.addr.5, i32 1
+ %incdec.ptr331 = getelementptr inbounds i8, i8* %sp.5, i32 1
+ %dec334 = add i32 %row_width.7, -1
+ %cmp335 = icmp eq i32 %dec334, 0
+ br i1 %cmp335, label %if.end375, label %do.body330
+
+if.end375:
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll b/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
new file mode 100644
index 000000000000..591683291982
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
@@ -0,0 +1,24 @@
+; RUN: opt -basicaa -hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @fred(i8 zeroext %L) #0 {
+entry:
+ br i1 undef, label %if.end53, label %while.body37
+
+while.body37: ; preds = %while.body37, %entry
+ %i.121 = phi i32 [ %inc46, %while.body37 ], [ 0, %entry ]
+ %shl = shl i32 1, %i.121
+ %and39 = and i32 %shl, undef
+ %tobool40 = icmp eq i32 %and39, 0
+ %inc46 = add nuw nsw i32 %i.121, 1
+ %storemerge = select i1 %tobool40, i8 %L, i8 0
+ br i1 undef, label %while.body37, label %if.end53
+
+if.end53: ; preds = %while.body37, %entry
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll b/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
new file mode 100644
index 000000000000..f738282c0f1b
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
@@ -0,0 +1,83 @@
+; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; CHECK-LABEL: define void @fred
+
+; Check that this test does not crash.
+
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon"
+
+%struct.0 = type { [120 x i16], [80 x i16], [80 x i16], [80 x i16], [80 x i16], [80 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16] }
+
+define void @fred(%struct.0* %demod_state) local_unnamed_addr #0 {
+entry:
+ br label %for.body309
+
+for.body309: ; preds = %for.body309, %entry
+ %max_diff.0300 = phi i16 [ %max_diff.1, %for.body309 ], [ 0, %entry ]
+ %arrayidx322.phi = phi i16* [ undef, %entry ], [ %arrayidx322.inc, %for.body309 ]
+ %arrayidx331.phi = phi i16* [ undef, %entry ], [ %arrayidx331.inc, %for.body309 ]
+ %lag.4299.apmt = phi i32 [ %inc376.apmt, %for.body309 ], [ 0, %entry ]
+ %0 = load i16, i16* %arrayidx322.phi, align 2
+ %conv323 = sext i16 %0 to i32
+ %sub324 = sub nsw i32 0, %conv323
+ %ispos258 = icmp sgt i32 %sub324, -1
+ %1 = select i1 %ispos258, i32 %sub324, i32 0
+ %add326 = add nsw i32 %1, 0
+ %2 = load i16, i16* %arrayidx331.phi, align 2
+ %conv332 = sext i16 %2 to i32
+ %sub333 = sub nsw i32 0, %conv332
+ %ispos260 = icmp sgt i32 %sub333, -1
+ %3 = select i1 %ispos260, i32 %sub333, i32 undef
+ %sub342 = sub nsw i32 0, %conv323
+ %ispos262 = icmp sgt i32 %sub342, -1
+ %4 = select i1 %ispos262, i32 %sub342, i32 undef
+ %sub351 = sub nsw i32 0, %conv332
+ %ispos264 = icmp sgt i32 %sub351, -1
+ %5 = select i1 %ispos264, i32 %sub351, i32 0
+ %sub360 = sub nsw i32 %conv323, %conv332
+ %ispos266 = icmp sgt i32 %sub360, -1
+ %6 = select i1 %ispos266, i32 %sub360, i32 0
+ %add335 = add nsw i32 %add326, %4
+ %add344 = add nsw i32 %add335, %3
+ %add353 = add i32 %add344, %5
+ %add362 = add i32 %add353, %6
+ %div363 = sdiv i32 %add362, 6
+ %conv364 = trunc i32 %div363 to i16
+ %sext268 = shl i32 %div363, 16
+ %conv369 = ashr exact i32 %sext268, 16
+ %conv370 = sext i16 %max_diff.0300 to i32
+ %cmp371 = icmp sgt i32 %conv369, %conv370
+ %max_diff.1 = select i1 %cmp371, i16 %conv364, i16 %max_diff.0300
+ %inc376.apmt = add nuw nsw i32 %lag.4299.apmt, 1
+ %exitcond331 = icmp ne i32 %inc376.apmt, 40
+ %arrayidx322.inc = getelementptr i16, i16* %arrayidx322.phi, i32 1
+ %arrayidx331.inc = getelementptr i16, i16* %arrayidx331.phi, i32 1
+ br i1 %exitcond331, label %for.body309, label %for.end377
+
+for.end377: ; preds = %for.body309
+ %max_diff.1.lcssa = phi i16 [ %max_diff.1, %for.body309 ]
+ %cmp407 = icmp sgt i16 %max_diff.1.lcssa, 4
+ br label %for.body405
+
+for.body405: ; preds = %if.end437, %for.end377
+ %arrayidx412 = getelementptr inbounds %struct.0, %struct.0* %demod_state, i32 0, i32 11, i32 undef
+ br i1 %cmp407, label %if.then409, label %if.end437
+
+if.then409: ; preds = %for.body405
+ %arrayidx416 = getelementptr inbounds [40 x i16], [40 x i16]* null, i32 0, i32 undef
+ %7 = load i16, i16* %arrayidx416, align 2
+ %conv417 = sext i16 %7 to i32
+ %shl = shl i32 %conv417, 4
+ %mul419 = mul nsw i32 %shl, 655
+ %add420 = add nsw i32 %mul419, 0
+ br label %if.end437
+
+if.end437: ; preds = %if.then409, %for.body405
+ %mul431.sink = phi i32 [ %add420, %if.then409 ], [ undef, %for.body405 ]
+ %shr432257 = lshr i32 %mul431.sink, 15
+ %conv433 = trunc i32 %shr432257 to i16
+ store i16 %conv433, i16* %arrayidx412, align 2
+ br label %for.body405
+}
+
+attributes #0 = { noinline nounwind "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll b/test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll
new file mode 100644
index 000000000000..9907ae71c992
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll
@@ -0,0 +1,84 @@
+; Run -O2 to make sure that all the usual optimizations do happen before
+; the Hexagon loop idiom recognition runs. This is to check that we still
+; get this opportunity regardless of what happens before.
+
+; RUN: opt -O2 -march=hexagon -S < %s | FileCheck %s
+
+target triple = "hexagon"
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+
+; CHECK-LABEL: define zeroext i16 @pmpy_mod_lsr
+; There need to be two pmpy instructions.
+; CHECK: call i64 @llvm.hexagon.M4.pmpyw
+; CHECK: call i64 @llvm.hexagon.M4.pmpyw
+
+define zeroext i16 @pmpy_mod_lsr(i8 zeroext %a0, i16 zeroext %a1) #0 {
+b2:
+ br label %b3
+
+b3: ; preds = %b44, %b2
+ %v4 = phi i8 [ %a0, %b2 ], [ %v19, %b44 ]
+ %v5 = phi i16 [ %a1, %b2 ], [ %v43, %b44 ]
+ %v6 = phi i8 [ 0, %b2 ], [ %v45, %b44 ]
+ %v7 = zext i8 %v6 to i32
+ %v8 = icmp slt i32 %v7, 8
+ br i1 %v8, label %b9, label %b46
+
+b9: ; preds = %b3
+ %v10 = zext i8 %v4 to i32
+ %v11 = and i32 %v10, 1
+ %v12 = trunc i16 %v5 to i8
+ %v13 = zext i8 %v12 to i32
+ %v14 = and i32 %v13, 1
+ %v15 = xor i32 %v11, %v14
+ %v16 = trunc i32 %v15 to i8
+ %v17 = zext i8 %v4 to i32
+ %v18 = ashr i32 %v17, 1
+ %v19 = trunc i32 %v18 to i8
+ %v20 = zext i8 %v16 to i32
+ %v21 = icmp eq i32 %v20, 1
+ br i1 %v21, label %b22, label %b26
+
+b22: ; preds = %b9
+ %v23 = zext i16 %v5 to i32
+ %v24 = xor i32 %v23, 16386
+ %v25 = trunc i32 %v24 to i16
+ br label %b27
+
+b26: ; preds = %b9
+ br label %b27
+
+b27: ; preds = %b26, %b22
+ %v28 = phi i16 [ %v25, %b22 ], [ %v5, %b26 ]
+ %v29 = phi i8 [ 1, %b22 ], [ 0, %b26 ]
+ %v30 = zext i16 %v28 to i32
+ %v31 = ashr i32 %v30, 1
+ %v32 = trunc i32 %v31 to i16
+ %v33 = icmp ne i8 %v29, 0
+ br i1 %v33, label %b34, label %b38
+
+b34: ; preds = %b27
+ %v35 = zext i16 %v32 to i32
+ %v36 = or i32 %v35, 32768
+ %v37 = trunc i32 %v36 to i16
+ br label %b42
+
+b38: ; preds = %b27
+ %v39 = zext i16 %v32 to i32
+ %v40 = and i32 %v39, 32767
+ %v41 = trunc i32 %v40 to i16
+ br label %b42
+
+b42: ; preds = %b38, %b34
+ %v43 = phi i16 [ %v37, %b34 ], [ %v41, %b38 ]
+ br label %b44
+
+b44: ; preds = %b42
+ %v45 = add i8 %v6, 1
+ br label %b3
+
+b46: ; preds = %b3
+ ret i16 %v5
+}
+
+attributes #0 = { noinline nounwind "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double,-long-calls" }
diff --git a/test/CodeGen/Hexagon/loop-idiom/pmpy.ll b/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
new file mode 100644
index 000000000000..781618e58901
--- /dev/null
+++ b/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
@@ -0,0 +1,33 @@
+; RUN: opt -hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
+; RUN: | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK: define i64 @basic_pmpy
+; CHECK: llvm.hexagon.M4.pmpyw
+define i64 @basic_pmpy(i32 %P, i32 %Q) #0 {
+entry:
+ %conv = zext i32 %Q to i64
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.07 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %R.06 = phi i64 [ 0, %entry ], [ %xor.R.06, %for.body ]
+ %shl = shl i32 1, %i.07
+ %and = and i32 %shl, %P
+ %tobool = icmp eq i32 %and, 0
+ %sh_prom = zext i32 %i.07 to i64
+ %shl1 = shl i64 %conv, %sh_prom
+ %xor = xor i64 %shl1, %R.06
+ %xor.R.06 = select i1 %tobool, i64 %R.06, i64 %xor
+ %inc = add nuw nsw i32 %i.07, 1
+ %exitcond = icmp ne i32 %inc, 32
+ br i1 %exitcond, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ %R.1.lcssa = phi i64 [ %xor.R.06, %for.body ]
+ ret i64 %R.1.lcssa
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/Hexagon/memops-stack.ll b/test/CodeGen/Hexagon/memops-stack.ll
index a8dc664591e9..1aa2e30ea25b 100644
--- a/test/CodeGen/Hexagon/memops-stack.ll
+++ b/test/CodeGen/Hexagon/memops-stack.ll
@@ -9,13 +9,13 @@ define void @test0() #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = add nsw i32 %1, 1
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -25,13 +25,13 @@ define void @test1() #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = sub nsw i32 %1, 1
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -41,13 +41,13 @@ define void @test2() #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = or i32 %1, 1
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -57,13 +57,13 @@ define void @test3() #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = and i32 %1, -2
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -73,13 +73,13 @@ define void @test4(i32 %a) #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = add nsw i32 %1, %a
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -89,13 +89,13 @@ define void @test5(i32 %a) #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = sub nsw i32 %1, %a
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -105,13 +105,13 @@ define void @test6(i32 %a) #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = or i32 %1, %a
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
@@ -121,20 +121,20 @@ define void @test7(i32 %a) #0 {
entry:
%x = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
call void @foo(i32* nonnull %x) #3
%1 = load i32, i32* %x, align 4, !tbaa !1
%inc = and i32 %1, %a
store i32 %inc, i32* %x, align 4, !tbaa !1
call void @foo(i32* nonnull %x) #3
- call void @llvm.lifetime.end(i64 4, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3
ret void
}
declare void @foo(i32*) #2
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
diff --git a/test/CodeGen/Hexagon/newvalueSameReg.ll b/test/CodeGen/Hexagon/newvalueSameReg.ll
index 0fc4df22eb32..39f32fb2f9d5 100644
--- a/test/CodeGen/Hexagon/newvalueSameReg.ll
+++ b/test/CodeGen/Hexagon/newvalueSameReg.ll
@@ -12,8 +12,8 @@
; Test that we don't generate a new value compare if the operands are
; the same register.
-; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new, [[REG0]])
-; CHECK: cmp.eq([[REG1:(r[0-9]+)]], [[REG1]])
+; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new,[[REG0]])
+; CHECK: cmp.eq([[REG1:(r[0-9]+)]],[[REG1]])
; Function Attrs: nounwind
declare void @fprintf(%struct._Dnk_filet.1* nocapture, i8* nocapture readonly, ...) #1
diff --git a/test/CodeGen/Hexagon/newvaluejump.ll b/test/CodeGen/Hexagon/newvaluejump.ll
index 3e1ee179573a..e1437f369c88 100644
--- a/test/CodeGen/Hexagon/newvaluejump.ll
+++ b/test/CodeGen/Hexagon/newvaluejump.ll
@@ -6,7 +6,7 @@
define i32 @foo(i32 %a) nounwind {
entry:
-; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}}
+; CHECK: if (cmp.eq(r{{[0-9]+}}.new,#0)) jump{{.}}
%addr1 = alloca i32, align 4
%addr2 = alloca i32, align 4
%0 = load i32, i32* @i, align 4
diff --git a/test/CodeGen/Hexagon/newvaluejump2.ll b/test/CodeGen/Hexagon/newvaluejump2.ll
index a812a7d96659..4c897f0830f3 100644
--- a/test/CodeGen/Hexagon/newvaluejump2.ll
+++ b/test/CodeGen/Hexagon/newvaluejump2.ll
@@ -6,7 +6,7 @@
@Reg = common global i32 0, align 4
define i32 @main() nounwind {
entry:
-; CHECK: if (cmp.gt(r{{[0-9]+}}, r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
+; CHECK: if (cmp.gt(r{{[0-9]+}},r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
%Reg2 = alloca i32, align 4
%0 = load i32, i32* %Reg2, align 4
%1 = load i32, i32* @Reg, align 4
diff --git a/test/CodeGen/Hexagon/newvaluejump3.ll b/test/CodeGen/Hexagon/newvaluejump3.ll
new file mode 100644
index 000000000000..1e2e6c28c849
--- /dev/null
+++ b/test/CodeGen/Hexagon/newvaluejump3.ll
@@ -0,0 +1,79 @@
+; RUN: llc -march=hexagon -filetype=obj -o /dev/null < %s
+; REQUIRES: asserts
+
+; This crashed in the MC code emitter, because a new-value branch was created
+; with IMPLICIT_DEF as the producer.
+
+target triple = "hexagon"
+
+%type.0 = type { %type.1, [64 x i8] }
+%type.1 = type { [12 x i8], %type.2*, double }
+%type.2 = type { i16, i16, [1 x %type.3] }
+%type.3 = type { i32 }
+%type.4 = type { %type.2*, i32 }
+
+define hidden fastcc i8* @fred(%type.0* nocapture readonly %a0, i8* readonly %a1) unnamed_addr #2 {
+b2:
+ %v3 = load i8, i8* %a1, align 1
+ br i1 undef, label %b4, label %b24
+
+b4: ; preds = %b2
+ switch i8 %v3, label %b13 [
+ i8 25, label %b5
+ i8 26, label %b6
+ i8 28, label %b8
+ i8 27, label %b9
+ i8 43, label %b11
+ i8 110, label %b12
+ ]
+
+b5: ; preds = %b4
+ unreachable
+
+b6: ; preds = %b4
+ %v7 = getelementptr inbounds i8, i8* %a1, i32 2
+ br label %b16
+
+b8: ; preds = %b4
+ br label %b16
+
+b9: ; preds = %b4
+ %v10 = tail call fastcc i8* @fred(%type.0* undef, i8* undef)
+ br label %b24
+
+b11: ; preds = %b4
+ unreachable
+
+b12: ; preds = %b4
+ unreachable
+
+b13: ; preds = %b4
+ br label %b14
+
+b14: ; preds = %b13
+ br i1 undef, label %b15, label %b16
+
+b15: ; preds = %b14
+ unreachable
+
+b16: ; preds = %b20, %b14, %b8, %b6
+ %v17 = phi i8* [ %v21, %b20 ], [ undef, %b14 ], [ undef, %b8 ], [ %v7, %b6 ]
+ %v18 = phi i32 [ 0, %b20 ], [ undef, %b14 ], [ 0, %b8 ], [ 8, %b6 ]
+ %v19 = icmp sgt i32 %v18, 0
+ br i1 %v19, label %b20, label %b24
+
+b20: ; preds = %b16
+ %v21 = getelementptr inbounds i8, i8* %v17, i32 1
+ %v22 = load i8, i8* %v17, align 1
+ %v23 = icmp eq i8 %v22, undef
+ br i1 %v23, label %b16, label %b24
+
+b24: ; preds = %b20, %b16, %b9, %b2
+ %v25 = phi i8* [ null, %b2 ], [ null, %b9 ], [ %v17, %b16 ], [ null, %b20 ]
+ ret i8* %v25
+}
+
+attributes #0 = { argmemonly nounwind }
+attributes #1 = { nounwind readonly "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double,-long-calls" }
+attributes #2 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double,-long-calls" }
+
diff --git a/test/CodeGen/Hexagon/opt-addr-mode.ll b/test/CodeGen/Hexagon/opt-addr-mode.ll
index 7cb437c327cf..705cd045ea30 100644
--- a/test/CodeGen/Hexagon/opt-addr-mode.ll
+++ b/test/CodeGen/Hexagon/opt-addr-mode.ll
@@ -2,10 +2,10 @@
; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 -disable-hexagon-amodeopt=0 -hexagon-amode-growth-limit=4 < %s | FileCheck %s --check-prefix=CHECK-AMODE
; CHECK-NO-AMODE: [[REG0:(r[0-9]+)]] = ##global_2
-; CHECK-NO-AMODE: memw([[REG0]] + {{.*}}<<#2) =
+; CHECK-NO-AMODE: memw([[REG0]]+{{.*}}<<#2) =
; CHECK-AMODE: [[REG1:(r[0-9]+)]] = memw(##global_1)
-; CHECK-AMODE: memw([[REG1]]<<#2 + ##global_2) =
+; CHECK-AMODE: memw([[REG1]]<<#2+##global_2) =
@global_1 = external global i32, align 4
@global_2 = external global [128 x i32], align 8
diff --git a/test/CodeGen/Hexagon/opt-fabs.ll b/test/CodeGen/Hexagon/opt-fabs.ll
index 2ecbce310ade..9c94f853ba50 100644
--- a/test/CodeGen/Hexagon/opt-fabs.ll
+++ b/test/CodeGen/Hexagon/opt-fabs.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=hexagon-unknown-elf -mcpu=hexagonv5 -hexagon-bit=0 < %s | FileCheck %s
; Optimize fabsf to clrbit in V5.
-; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31)
define float @my_fabsf(float %x) nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/opt-fneg.ll b/test/CodeGen/Hexagon/opt-fneg.ll
index 978957865863..da496c588019 100644
--- a/test/CodeGen/Hexagon/opt-fneg.ll
+++ b/test/CodeGen/Hexagon/opt-fneg.ll
@@ -3,7 +3,7 @@
define float @foo(float %x) nounwind {
entry:
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
%0 = load float, float* %x.addr, align 4
@@ -13,14 +13,14 @@ entry:
define float @bar(float %x) nounwind {
entry:
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
%sub = fsub float -0.000000e+00, %x
ret float %sub
}
define float @baz(float %x) nounwind {
entry:
-; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
+; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31)
%conv1 = fmul float %x, -1.000000e+00
ret float %conv1
}
diff --git a/test/CodeGen/Hexagon/opt-spill-volatile.ll b/test/CodeGen/Hexagon/opt-spill-volatile.ll
index 99dd4646d743..1c86716132fd 100644
--- a/test/CodeGen/Hexagon/opt-spill-volatile.ll
+++ b/test/CodeGen/Hexagon/opt-spill-volatile.ll
@@ -6,22 +6,22 @@ target triple = "hexagon"
; CHECK-LABEL: foo
; CHECK: memw(r29+#4) =
-; CHECK: = memw(r29 + #4)
+; CHECK: = memw(r29+#4)
define i32 @foo(i32 %a) #0 {
entry:
%x = alloca i32, align 4
%x.0.x.0..sroa_cast = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %x.0.x.0..sroa_cast)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x.0.x.0..sroa_cast)
store volatile i32 0, i32* %x, align 4
%call = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #0
%x.0.x.0. = load volatile i32, i32* %x, align 4
%add = add nsw i32 %x.0.x.0., %a
- call void @llvm.lifetime.end(i64 4, i8* %x.0.x.0..sroa_cast)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %x.0.x.0..sroa_cast)
ret i32 %add
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
declare i32 @bar(...) #0
diff --git a/test/CodeGen/Hexagon/pic-local.ll b/test/CodeGen/Hexagon/pic-local.ll
index 48b0096aa652..6544b3d32165 100644
--- a/test/CodeGen/Hexagon/pic-local.ll
+++ b/test/CodeGen/Hexagon/pic-local.ll
@@ -9,11 +9,11 @@ define internal void @f2() {
}
define void()* @get_f1() {
- ; CHECK: r0 = add(pc, ##.Lf1@PCREL)
+ ; CHECK: r0 = add(pc,##.Lf1@PCREL)
ret void()* @f1
}
define void()* @get_f2() {
- ; CHECK: r0 = add(pc, ##f2@PCREL)
+ ; CHECK: r0 = add(pc,##f2@PCREL)
ret void()* @f2
}
diff --git a/test/CodeGen/Hexagon/pic-simple.ll b/test/CodeGen/Hexagon/pic-simple.ll
index 46d95204f2e7..aeb21ef7de1c 100644
--- a/test/CodeGen/Hexagon/pic-simple.ll
+++ b/test/CodeGen/Hexagon/pic-simple.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s
-; CHECK: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL)
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##src@GOT)
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##dst@GOT)
+; CHECK: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##src@GOT)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##dst@GOT)
@dst = external global i32
@src = external global i32
diff --git a/test/CodeGen/Hexagon/pic-static.ll b/test/CodeGen/Hexagon/pic-static.ll
index 66d7734f2cf2..95da5f060d72 100644
--- a/test/CodeGen/Hexagon/pic-static.ll
+++ b/test/CodeGen/Hexagon/pic-static.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s
-; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL)
-; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##x@PCREL)
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##bar@GOT)
+; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}},##x@PCREL)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##bar@GOT)
@x = internal global i32 9, align 4
@bar = external global i32*
diff --git a/test/CodeGen/Hexagon/pred-absolute-store.ll b/test/CodeGen/Hexagon/pred-absolute-store.ll
index 3e5e98270d53..2f19e9aeb7bb 100644
--- a/test/CodeGen/Hexagon/pred-absolute-store.ll
+++ b/test/CodeGen/Hexagon/pred-absolute-store.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; Check that we are able to predicate instructions with abosolute
+; Check that we are able to predicate instructions with absolute
; addressing mode.
-; CHECK: if ({{!*}}p{{[0-2]}}.new) memw(##gvar) = r{{[0-9]+}}
+; CHECK: if ({{!?}}p{{[0-3]}}) memw(##gvar) = r{{[0-9]+}}
@gvar = external global i32
define i32 @test2(i32 %a, i32 %b) nounwind {
diff --git a/test/CodeGen/Hexagon/predicate-logical.ll b/test/CodeGen/Hexagon/predicate-logical.ll
index be2bcb03d6a1..e3ba4d8643db 100644
--- a/test/CodeGen/Hexagon/predicate-logical.ll
+++ b/test/CodeGen/Hexagon/predicate-logical.ll
@@ -1,5 +1,5 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK: p{{[0-9]}} = or(p{{[0-9]}}, and(p{{[0-9]}}, p{{[0-9]}}))
+; CHECK: p{{[0-9]}} = or(p{{[0-9]}},and(p{{[0-9]}},p{{[0-9]}}))
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/predicate-rcmp.ll b/test/CodeGen/Hexagon/predicate-rcmp.ll
index 45daa88d7161..78991e0dbe70 100644
--- a/test/CodeGen/Hexagon/predicate-rcmp.ll
+++ b/test/CodeGen/Hexagon/predicate-rcmp.ll
@@ -1,5 +1,5 @@
; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
-; CHECK: cmp.eq(r{{[0-9]+}}, #0)
+; CHECK: cmp.eq(r{{[0-9]+}},#0)
; Check that the result of the builtin is not stored directly, i.e. that
; there is an instruction that converts it to {0,1} from {0,-1}. Right now
; the instruction is "r4 = !cmp.eq(r0, #0)".
diff --git a/test/CodeGen/Hexagon/rdf-copy-undef2.ll b/test/CodeGen/Hexagon/rdf-copy-undef2.ll
index 5f29d414ffc1..28bf4c67cd75 100644
--- a/test/CodeGen/Hexagon/rdf-copy-undef2.ll
+++ b/test/CodeGen/Hexagon/rdf-copy-undef2.ll
@@ -3,8 +3,8 @@
target triple = "hexagon"
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
declare signext i16 @cat(i16 signext) #1
declare void @danny(i16 signext, i16 signext, i16 signext, i16* nocapture readonly, i16 signext, i16* nocapture) #1
declare void @sammy(i16* nocapture readonly, i16* nocapture readonly, i16* nocapture readonly, i32* nocapture, i16* nocapture, i16 signext, i16 signext, i16 signext) #1
diff --git a/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll b/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
index 7adf7e8a5355..222d8a2b2e14 100644
--- a/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
+++ b/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
@@ -13,18 +13,18 @@ define i32 @foo(i32 %status) #0 {
entry:
%arg1 = alloca i32, align 4
%0 = bitcast i32* %arg1 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #2
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #2
store i32 %status, i32* %arg1, align 4, !tbaa !1
%1 = call i32 asm sideeffect "r0 = #$1\0Ar1 = $2\0Ar2 = $4\0Atrap0 (#0)\0A$0 = r0", "=r,i,r,*m,r,~{r0},~{r1},~{r2}"(i32 24, i32* nonnull %arg1, i32* nonnull %arg1, i32 %status) #2, !srcloc !5
- call void @llvm.lifetime.end(i64 4, i8* %0) #2
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #2
ret i32 %1
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
diff --git a/test/CodeGen/Hexagon/rdf-phi-up.ll b/test/CodeGen/Hexagon/rdf-phi-up.ll
index 28f4c90c174d..d4e726471238 100644
--- a/test/CodeGen/Hexagon/rdf-phi-up.ll
+++ b/test/CodeGen/Hexagon/rdf-phi-up.ll
@@ -7,8 +7,8 @@ target triple = "hexagon"
%struct.0 = type { i32, i16, i8* }
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @fred(i8* readonly %p0, i32* %p1) local_unnamed_addr #0 {
entry:
@@ -32,7 +32,7 @@ if.then3: ; preds = %if.then
if.else: ; preds = %lor.lhs.false
%v6 = bitcast i16* %v0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* nonnull %v6) #0
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %v6) #0
store i16 0, i16* %v0, align 2
%v7 = call i32 @foo(%struct.0* nonnull %v3, i16* nonnull %v0) #0
%v8 = icmp eq i32* %p1, null
@@ -45,7 +45,7 @@ if.then6: ; preds = %if.else
br label %if.end7
if.end7: ; preds = %if.else, %if.then6
- call void @llvm.lifetime.end(i64 2, i8* nonnull %v6) #0
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %v6) #0
br label %cleanup
cleanup: ; preds = %if.then3, %if.then,
diff --git a/test/CodeGen/Hexagon/readcyclecounter.ll b/test/CodeGen/Hexagon/readcyclecounter.ll
new file mode 100644
index 000000000000..0a60c94b019c
--- /dev/null
+++ b/test/CodeGen/Hexagon/readcyclecounter.ll
@@ -0,0 +1,10 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: test_readcyclecounter
+; CHECK: r1:0 = c15:14
+define i64 @test_readcyclecounter() nounwind {
+ %t0 = call i64 @llvm.readcyclecounter()
+ ret i64 %t0
+}
+
+declare i64 @llvm.readcyclecounter()
diff --git a/test/CodeGen/Hexagon/regalloc-block-overlap.ll b/test/CodeGen/Hexagon/regalloc-block-overlap.ll
new file mode 100644
index 000000000000..c98fcb6a9f04
--- /dev/null
+++ b/test/CodeGen/Hexagon/regalloc-block-overlap.ll
@@ -0,0 +1,143 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check for a sane output. This testcase used to cause a crash.
+; CHECK: vlut16
+
+target triple = "hexagon-unknown--elf"
+
+declare void @halide_malloc() local_unnamed_addr #0
+
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vmpyiewuh.128B(<32 x i32>, <32 x i32>) #1
+declare <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32>, <64 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vasrwhsat.128B(<32 x i32>, <32 x i32>, i32) #1
+declare <64 x i32> @llvm.hexagon.V6.vlutvwh.128B(<32 x i32>, <32 x i32>, i32) #1
+declare <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32>, <32 x i32>, <32 x i32>, i32) #1
+
+define hidden void @fred() #0 {
+b0:
+ %v1 = ashr i32 undef, 7
+ %v2 = shl nsw i32 %v1, 7
+ switch i32 undef, label %b7 [
+ i32 1, label %b3
+ i32 2, label %b5
+ i32 3, label %b6
+ ]
+
+b3: ; preds = %b0
+ unreachable
+
+b4: ; preds = %b7
+ switch i32 undef, label %b9 [
+ i32 1, label %b8
+ i32 2, label %b10
+ i32 3, label %b11
+ ]
+
+b5: ; preds = %b0
+ unreachable
+
+b6: ; preds = %b0
+ unreachable
+
+b7: ; preds = %b0
+ br label %b4
+
+b8: ; preds = %b4
+ br label %b12
+
+b9: ; preds = %b4
+ br label %b12
+
+b10: ; preds = %b4
+ br label %b12
+
+b11: ; preds = %b4
+ br label %b12
+
+b12: ; preds = %b11, %b10, %b9, %b8
+ br label %b13
+
+b13: ; preds = %b14, %b12
+ br label %b14
+
+b14: ; preds = %b13
+ br i1 undef, label %b15, label %b13
+
+b15: ; preds = %b14
+ br label %b16
+
+b16: ; preds = %b15
+ br i1 undef, label %b17, label %b18
+
+b17: ; preds = %b16
+ unreachable
+
+b18: ; preds = %b16
+ tail call void @halide_malloc()
+ br label %b19
+
+b19: ; preds = %b18
+ br i1 undef, label %b20, label %b21
+
+b20: ; preds = %b19
+ br label %b32
+
+b21: ; preds = %b38, %b19
+ %v22 = zext i32 %v2 to i64
+ %v23 = lshr i64 %v22, 31
+ %v24 = shl nuw nsw i64 %v23, 1
+ %v25 = or i64 %v24, 0
+ %v26 = icmp ult i64 undef, 2147483648
+ %v27 = mul nuw nsw i64 %v25, 3
+ %v28 = add nuw nsw i64 %v27, 0
+ %v29 = and i64 %v28, 133143986176
+ %v30 = icmp eq i64 %v29, 0
+ %v31 = and i1 %v26, %v30
+ br label %b39
+
+b32: ; preds = %b20
+ %v33 = zext i32 %v2 to i64
+ %v34 = mul nuw nsw i64 %v33, 12
+ %v35 = icmp ult i64 %v34, 2147483648
+ %v36 = and i1 %v35, undef
+ br i1 %v36, label %b38, label %b37
+
+b37: ; preds = %b32
+ ret void
+
+b38: ; preds = %b32
+ tail call void @halide_malloc()
+ br label %b21
+
+b39: ; preds = %b42, %b21
+ br label %b40
+
+b40: ; preds = %b39
+ br i1 %v31, label %b42, label %b41
+
+b41: ; preds = %b40
+ unreachable
+
+b42: ; preds = %b40
+ %v43 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.128B(<32 x i32> undef, <32 x i32> undef, i32 0)
+ %v44 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v43, <32 x i32> undef, <32 x i32> undef, i32 1)
+ %v45 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v44, <32 x i32> undef, <32 x i32> undef, i32 2)
+ %v46 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v45, <32 x i32> undef, <32 x i32> undef, i32 3)
+ %v47 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v46, <32 x i32> undef, <32 x i32> undef, i32 4)
+ %v48 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v47, <32 x i32> undef, <32 x i32> undef, i32 5)
+ %v49 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v48)
+ %v50 = tail call <32 x i32> @llvm.hexagon.V6.vmpyiewuh.128B(<32 x i32> undef, <32 x i32> %v49) #2
+ %v51 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> undef, <32 x i32> %v50) #2
+ %v52 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v51, <64 x i32> undef) #2
+ %v53 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v52) #2
+ %v54 = tail call <32 x i32> @llvm.hexagon.V6.vasrwhsat.128B(<32 x i32> %v53, <32 x i32> undef, i32 15) #2
+ store <32 x i32> %v54, <32 x i32>* undef, align 128
+ br label %b39
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-double" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/Hexagon/ret-struct-by-val.ll b/test/CodeGen/Hexagon/ret-struct-by-val.ll
index 26ed2ff36f77..60a97bcccfc5 100644
--- a/test/CodeGen/Hexagon/ret-struct-by-val.ll
+++ b/test/CodeGen/Hexagon/ret-struct-by-val.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: r0 = add(r0, r1)
+; CHECK: r0 = add(r0,r1)
; Allow simple structures to be returned by value.
diff --git a/test/CodeGen/Hexagon/runtime-stkchk.ll b/test/CodeGen/Hexagon/runtime-stkchk.ll
index a4e8f117679e..38aa8726d19c 100644
--- a/test/CodeGen/Hexagon/runtime-stkchk.ll
+++ b/test/CodeGen/Hexagon/runtime-stkchk.ll
@@ -6,12 +6,12 @@ define i32 @foo_1(i32 %n) #0 {
entry:
%local = alloca [1024 x i32], align 8
%0 = bitcast [1024 x i32]* %local to i8*
- call void @llvm.lifetime.start(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
%arraydecay = getelementptr inbounds [1024 x i32], [1024 x i32]* %local, i32 0, i32 0
call void @baz_1(i32* %arraydecay) #3
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* %local, i32 0, i32 %n
%1 = load i32, i32* %arrayidx, align 4
- call void @llvm.lifetime.end(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
ret i32 %1
}
@@ -21,21 +21,21 @@ define i32 @foo_2(i32 %n, i32* %y) #0 {
entry:
%local = alloca [2048 x i32], align 8
%0 = bitcast [2048 x i32]* %local to i8*
- call void @llvm.lifetime.start(i64 8192, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %0) #1
%arraydecay = getelementptr inbounds [2048 x i32], [2048 x i32]* %local, i32 0, i32 0
call void @baz_2(i32* %y, i32* %arraydecay) #3
%1 = load i32, i32* %y, align 4
%add = add nsw i32 %n, %1
%arrayidx = getelementptr inbounds [2048 x i32], [2048 x i32]* %local, i32 0, i32 %add
%2 = load i32, i32* %arrayidx, align 4
- call void @llvm.lifetime.end(i64 8192, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 8192, i8* %0) #1
ret i32 %2
}
declare void @baz_1(i32*) #2
declare void @baz_2(i32*, i32*) #2
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/test/CodeGen/Hexagon/section_7275.ll b/test/CodeGen/Hexagon/section_7275.ll
index c2b80ae3f69d..1806f1e9c844 100644
--- a/test/CodeGen/Hexagon/section_7275.ll
+++ b/test/CodeGen/Hexagon/section_7275.ll
@@ -8,13 +8,13 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK-LABEL: foo
; CHECK-DAG: memw(##b)
-; CHECK-DAG: memw(#d)
+; CHECK-DAG: memw(gp+#d)
; CHECK-DAG: memw(##g)
-; CHECK-DAG: memw(#h)
-; CHECK-DAG: memw(#f)
+; CHECK-DAG: memw(gp+#h)
+; CHECK-DAG: memw(gp+#f)
; CHECK-DAG: memw(##e)
-; CHECK-DAG: memw(#a)
-; CHECK-DAG: memw(#c)
+; CHECK-DAG: memw(gp+#a)
+; CHECK-DAG: memw(gp+#c)
; CHECK-LABEL: bar
; CHECK: memw(##b)
diff --git a/test/CodeGen/Hexagon/signed_immediates.ll b/test/CodeGen/Hexagon/signed_immediates.ll
index a4766313cc68..ad4aa2596607 100644
--- a/test/CodeGen/Hexagon/signed_immediates.ll
+++ b/test/CodeGen/Hexagon/signed_immediates.ll
@@ -33,7 +33,7 @@ define i64* @foo4(i64* %a, i64 %b) {
}
; s6Ext
-; CHECK: if (p0.new) memw(r0+#0)=#-1
+; CHECK: if (p0.new) memw(r0+#0) = #-1
define void @foo5(i32* %a, i1 %b) {
br i1 %b, label %x, label %y
x:
@@ -44,7 +44,7 @@ y:
}
; s10Ext
-; CHECK: p0 = cmp.eq(r0, #-1)
+; CHECK: p0 = cmp.eq(r0,#-1)
define i1 @foo7(i32 %a) {
%b = icmp eq i32 %a, -1
ret i1 %b
@@ -96,4 +96,4 @@ y:
; CHECK: r0 = #-2
define i32 @foo13() {
ret i32 -2
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/Hexagon/stack-align1.ll b/test/CodeGen/Hexagon/stack-align1.ll
index 4efa70f59854..aefd16594f06 100644
--- a/test/CodeGen/Hexagon/stack-align1.ll
+++ b/test/CodeGen/Hexagon/stack-align1.ll
@@ -1,7 +1,7 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK: and(r29, #-32)
-; CHECK-DAG: add(r29, #0)
-; CHECK-DAG: add(r29, #28)
+; CHECK: and(r29,#-32)
+; CHECK-DAG: add(r29,#0)
+; CHECK-DAG: add(r29,#28)
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/stack-align2.ll b/test/CodeGen/Hexagon/stack-align2.ll
index 1bbd57820325..042e4097c56a 100644
--- a/test/CodeGen/Hexagon/stack-align2.ll
+++ b/test/CodeGen/Hexagon/stack-align2.ll
@@ -1,9 +1,9 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK: and(r29, #-128)
-; CHECK-DAG: add(r29, #0)
-; CHECK-DAG: add(r29, #64)
-; CHECK-DAG: add(r29, #96)
-; CHECK-DAG: add(r29, #124)
+; CHECK: and(r29,#-128)
+; CHECK-DAG: add(r29,#0)
+; CHECK-DAG: add(r29,#64)
+; CHECK-DAG: add(r29,#96)
+; CHECK-DAG: add(r29,#124)
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/stack-alloca1.ll b/test/CodeGen/Hexagon/stack-alloca1.ll
index 00e9e051aebb..b38b8846d26f 100644
--- a/test/CodeGen/Hexagon/stack-alloca1.ll
+++ b/test/CodeGen/Hexagon/stack-alloca1.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK: sub(r29, r[[REG:[0-9]+]])
+; CHECK: sub(r29,r[[REG:[0-9]+]])
; CHECK: r29 = r[[REG]]
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/stack-alloca2.ll b/test/CodeGen/Hexagon/stack-alloca2.ll
index ad5e13166aa2..b211be0c0fff 100644
--- a/test/CodeGen/Hexagon/stack-alloca2.ll
+++ b/test/CodeGen/Hexagon/stack-alloca2.ll
@@ -1,8 +1,8 @@
; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
-; CHECK-DAG: r[[AP:[0-9]+]] = and(r30, #-32)
-; CHECK-DAG: r1 = add(r[[AP]], #-32)
+; CHECK-DAG: r[[AP:[0-9]+]] = and(r30,#-32)
+; CHECK-DAG: r1 = add(r[[AP]],#-32)
-; CHECK-DAG: sub(r29, r[[SP:[0-9]+]])
+; CHECK-DAG: sub(r29,r[[SP:[0-9]+]])
; CHECK-DAG: r29 = r[[SP]]
target triple = "hexagon-unknown-unknown"
diff --git a/test/CodeGen/Hexagon/static.ll b/test/CodeGen/Hexagon/static.ll
index c3237b748881..15aab434158c 100644
--- a/test/CodeGen/Hexagon/static.ll
+++ b/test/CodeGen/Hexagon/static.ll
@@ -4,9 +4,9 @@
@acc = external global i32
@val = external global i32
-; CHECK-DAG: memw(#num)
-; CHECK-DAG: memw(#acc)
-; CHECK-DAG: memw(#val)
+; CHECK-DAG: memw(gp+#num)
+; CHECK-DAG: memw(gp+#acc)
+; CHECK-DAG: memw(gp+#val)
define void @foo() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/store-shift.ll b/test/CodeGen/Hexagon/store-shift.ll
index 866930990baa..981071a0181e 100644
--- a/test/CodeGen/Hexagon/store-shift.ll
+++ b/test/CodeGen/Hexagon/store-shift.ll
@@ -1,12 +1,12 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK-DAG: r[[BASE:[0-9]+]] += add
-; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2, #5)
-; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2, #6)
-; CHECK-DAG: memw(r0 + r[[IDX0]]<<#2) = r3
-; CHECK-DAG: memw(r0 + r[[IDX1]]<<#2) = r3
-; CHECK-DAG: memw(r[[BASE]] + r[[IDX0]]<<#2) = r[[IDX0]]
-; CHECK-DAG: memw(r[[BASE]] + r[[IDX1]]<<#2) = r[[IDX0]]
+; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2,#5)
+; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2,#6)
+; CHECK-DAG: memw(r0+r[[IDX0]]<<#2) = r3
+; CHECK-DAG: memw(r0+r[[IDX1]]<<#2) = r3
+; CHECK-DAG: memw(r[[BASE]]+r[[IDX0]]<<#2) = r[[IDX0]]
+; CHECK-DAG: memw(r[[BASE]]+r[[IDX1]]<<#2) = r[[IDX0]]
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/sube.ll b/test/CodeGen/Hexagon/sube.ll
index 7bc00759303f..2b09a998eff0 100644
--- a/test/CodeGen/Hexagon/sube.ll
+++ b/test/CodeGen/Hexagon/sube.ll
@@ -1,29 +1,26 @@
-; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0)
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1)
-; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK-DAG: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
+; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}})
-define void @check_sube_subc(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
-entry:
- %tmp1 = zext i64 %AL to i128
- %tmp23 = zext i64 %AH to i128
- %tmp4 = shl i128 %tmp23, 64
- %tmp5 = or i128 %tmp4, %tmp1
- %tmp67 = zext i64 %BL to i128
- %tmp89 = zext i64 %BH to i128
- %tmp11 = shl i128 %tmp89, 64
- %tmp12 = or i128 %tmp11, %tmp67
- %tmp15 = sub i128 %tmp5, %tmp12
- %tmp1617 = trunc i128 %tmp15 to i64
- store i64 %tmp1617, i64* %RL
- %tmp21 = lshr i128 %tmp15, 64
- %tmp2122 = trunc i128 %tmp21 to i64
- store i64 %tmp2122, i64* %RH
- ret void
+define void @check_sube_subc(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64* %a4, i64* %a5) {
+b6:
+ %v7 = zext i64 %a0 to i128
+ %v8 = zext i64 %a1 to i128
+ %v9 = shl i128 %v8, 64
+ %v10 = or i128 %v7, %v9
+ %v11 = zext i64 %a2 to i128
+ %v12 = zext i64 %a3 to i128
+ %v13 = shl i128 %v12, 64
+ %v14 = or i128 %v11, %v13
+ %v15 = sub i128 %v10, %v14
+ %v16 = lshr i128 %v15, 64
+ %v17 = trunc i128 %v15 to i64
+ %v18 = trunc i128 %v16 to i64
+ store i64 %v17, i64* %a4
+ store i64 %v18, i64* %a5
+ ret void
}
diff --git a/test/CodeGen/Hexagon/subi-asl.ll b/test/CodeGen/Hexagon/subi-asl.ll
index f0b27e828f50..d7610ceb62ac 100644
--- a/test/CodeGen/Hexagon/subi-asl.ll
+++ b/test/CodeGen/Hexagon/subi-asl.ll
@@ -3,11 +3,11 @@
; Check if S4_subi_asl_ri is being generated correctly.
; CHECK-LABEL: yes_sub_asl
-; CHECK: [[REG1:(r[0-9]+)]] = sub(#0, asl([[REG1]], #1))
+; CHECK: [[REG1:(r[0-9]+)]] = sub(#0,asl([[REG1]],#1))
; CHECK-LABEL: no_sub_asl
-; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}}, #1)
-; CHECK: r{{[0-9]+}} = sub([[REG2]], r{{[0-9]+}})
+; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}},#1)
+; CHECK: r{{[0-9]+}} = sub([[REG2]],r{{[0-9]+}})
%struct.rtx_def = type { i16, i8 }
diff --git a/test/CodeGen/Hexagon/swp-const-tc.ll b/test/CodeGen/Hexagon/swp-const-tc.ll
index 3113094d2ba3..c07d23623eba 100644
--- a/test/CodeGen/Hexagon/swp-const-tc.ll
+++ b/test/CodeGen/Hexagon/swp-const-tc.ll
@@ -4,7 +4,7 @@
; of computing a new LC0 value.
; CHECK-LABEL: @test
-; CHECK: loop0(.LBB0_1, #998)
+; CHECK: loop0(.LBB0_1,#998)
define i32 @test(i32* %A, i32* %B, i32 %count) {
entry:
diff --git a/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/test/CodeGen/Hexagon/swp-matmul-bitext.ll
index db5bb96d0bc9..9c425ae6a098 100644
--- a/test/CodeGen/Hexagon/swp-matmul-bitext.ll
+++ b/test/CodeGen/Hexagon/swp-matmul-bitext.ll
@@ -11,7 +11,7 @@
; CHECK: [[REG0:(r[0-9]+)]] = memh
; CHECK: [[REG1:(r[0-9]+)]] = memh
; CHECK: += mpyi
-; CHECK: [[REG2]] = mpyi([[REG0]], [[REG1]])
+; CHECK: [[REG2]] = mpyi([[REG0]],[[REG1]])
; CHECK: endloop0
%union_h2_sem_t = type { i32 }
diff --git a/test/CodeGen/Hexagon/swp-max.ll b/test/CodeGen/Hexagon/swp-max.ll
index 038138ff2561..26238ea6fb37 100644
--- a/test/CodeGen/Hexagon/swp-max.ll
+++ b/test/CodeGen/Hexagon/swp-max.ll
@@ -15,8 +15,8 @@ for.body.preheader:
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}}, [[REG1]])
-; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]], [[REG0]])
+; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}},[[REG1]])
+; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]],[[REG0]])
; CHECK: [[REG2]] = memw
; CHECK: endloop0
diff --git a/test/CodeGen/Hexagon/swp-multi-loops.ll b/test/CodeGen/Hexagon/swp-multi-loops.ll
index 56e8c6511000..fc2576af8ac2 100644
--- a/test/CodeGen/Hexagon/swp-multi-loops.ll
+++ b/test/CodeGen/Hexagon/swp-multi-loops.ll
@@ -5,15 +5,15 @@
; Check if the first loop is pipelined.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4)
+; CHECK: add(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-NEXT: memw(r{{[0-9]+}}++#4)
; CHECK-NEXT: endloop0
; Check if the second loop is pipelined.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}})
-; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4)
+; CHECK: add(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-NEXT: memw(r{{[0-9]+}}++#4)
; CHECK-NEXT: endloop0
define i32 @test(i32* %a, i32 %n, i32 %l) {
diff --git a/test/CodeGen/Hexagon/swp-stages4.ll b/test/CodeGen/Hexagon/swp-stages4.ll
new file mode 100644
index 000000000000..f58e83203154
--- /dev/null
+++ b/test/CodeGen/Hexagon/swp-stages4.ll
@@ -0,0 +1,94 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner -pipeliner-max-stages=2 -disable-block-placement=0 -hexagon-bit=0 < %s | FileCheck %s
+
+; Test that we rename registers correctly for multiple stages when there is a
+; Phi and depends upon another Phi.
+
+; CHECK: = and
+; CHECK: = and
+; CHECK: = and
+; CHECK: [[REG0:(r[0-9]+)]] = and([[REG1:(r[0-9]+)]],#255)
+; CHECK-NOT: [[REG0]] = and([[REG1]],#255)
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: [[REG0]] += add
+; CHECK: = and
+; CHECK: = and
+; CHECK: [[REG0]] = and
+; CHECK: endloop
+
+; Function Attrs: nounwind
+define void @test(i8* noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, i8* noalias nocapture %dst, i32 %dstStride) #0 {
+entry:
+ %sub = add i32 %srcWidth, -1
+ %sub1 = add i32 %srcHeight, -1
+ %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride
+ %add.ptr.sum = mul i32 %srcStride, 2
+ %add.ptr2 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum
+ br label %for.body.lr.ph
+
+for.body.lr.ph:
+ %0 = add i32 %srcHeight, -2
+ %1 = mul i32 %0, %dstStride
+ %2 = mul i32 %0, %srcStride
+ %3 = mul i32 %sub1, %srcStride
+ br label %for.cond
+
+for.cond:
+ %scevgep = getelementptr i8, i8* %dst, i32 %1
+ %scevgep220 = getelementptr i8, i8* %src, i32 %2
+ %scevgep221 = getelementptr i8, i8* %src, i32 %3
+ %arrayidx6 = getelementptr inbounds i8, i8* %src, i32 1
+ %add11 = add i32 %srcStride, 1
+ %arrayidx12 = getelementptr inbounds i8, i8* %src, i32 %add11
+ br label %for.body75.preheader
+
+for.body75.preheader:
+ %sri = load i8, i8* %arrayidx6, align 1
+ %sri224 = load i8, i8* %src, align 1
+ %sri227 = load i8, i8* %arrayidx12, align 1
+ %sri229 = load i8, i8* %add.ptr, align 1
+ br label %for.body75
+
+for.body75:
+ %j.0211 = phi i32 [ %add82, %for.body75 ], [ 1, %for.body75.preheader ]
+ %sr = phi i8 [ %4, %for.body75 ], [ %sri, %for.body75.preheader ]
+ %sr225 = phi i8 [ %sr, %for.body75 ], [ %sri224, %for.body75.preheader ]
+ %sr230 = phi i8 [ %5, %for.body75 ], [ %sri227, %for.body75.preheader ]
+ %sr231 = phi i8 [ %sr230, %for.body75 ], [ %sri229, %for.body75.preheader ]
+ %conv78 = zext i8 %sr225 to i32
+ %conv80 = zext i8 %sr to i32
+ %add81 = add nsw i32 %conv80, %conv78
+ %add82 = add i32 %j.0211, 1
+ %arrayidx83 = getelementptr inbounds i8, i8* %src, i32 %add82
+ %4 = load i8, i8* %arrayidx83, align 1, !tbaa !0
+ %conv84 = zext i8 %4 to i32
+ %add85 = add nsw i32 %add81, %conv84
+ %conv88 = zext i8 %sr231 to i32
+ %add89 = add nsw i32 %add85, %conv88
+ %conv91 = zext i8 %sr230 to i32
+ %add92 = add nsw i32 %add89, %conv91
+ %add.ptr.sum208 = add i32 %add82, %srcStride
+ %arrayidx94 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum208
+ %5 = load i8, i8* %arrayidx94, align 1, !tbaa !0
+ %conv95 = zext i8 %5 to i32
+ %add96 = add nsw i32 %add92, %conv95
+ %mul97 = mul nsw i32 %add96, 7282
+ %add98 = add nsw i32 %mul97, 32768
+ %shr99209 = lshr i32 %add98, 16
+ %conv100 = trunc i32 %shr99209 to i8
+ %arrayidx101 = getelementptr inbounds i8, i8* %dst, i32 %j.0211
+ store i8 %conv100, i8* %arrayidx101, align 1, !tbaa !0
+ %exitcond = icmp eq i32 %add82, %sub
+ br i1 %exitcond, label %for.end104.loopexit, label %for.body75
+
+for.end104.loopexit:
+ br label %for.end104
+
+for.end104:
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!0 = !{!"omnipotent char", !1}
+!1 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/swp-stages5.ll b/test/CodeGen/Hexagon/swp-stages5.ll
new file mode 100644
index 000000000000..fdfb2101cd36
--- /dev/null
+++ b/test/CodeGen/Hexagon/swp-stages5.ll
@@ -0,0 +1,78 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner -pipeliner-max-stages=2 -hexagon-bit=0 < %s | FileCheck %s
+
+; Very similar to swp-stages4.ll, but the pipelined schedule is a little
+; different.
+
+; CHECK: = memub(r{{[0-9]+}}++#1)
+; CHECK-DAG: [[REG0:(r[0-9]+)]] = memub(r{{[0-9]+}}++#1)
+; CHECK-DAG: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: = and([[REG0]],#255)
+; CHECK: [[REG0]]{{[:0-9]*}} =
+; CHECK: endloop
+
+define void @fred(i8* noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, i8* noalias nocapture %dst, i32 %dstStride) #0 {
+entry:
+ %sub = add i32 %srcWidth, -1
+ %sub1 = add i32 %srcHeight, -1
+ %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride
+ %add.ptr.sum = mul i32 %srcStride, 2
+ %add.ptr2 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum
+ %cmp212 = icmp ugt i32 %sub1, 1
+ br i1 %cmp212, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph:
+ br label %for.body74.preheader
+
+for.body74.preheader:
+ %0 = load i8, i8* %add.ptr, align 1, !tbaa !0
+ %arrayidx40 = getelementptr inbounds i8, i8* %add.ptr, i32 1
+ %1 = load i8, i8* %arrayidx40, align 1, !tbaa !0
+ %2 = load i8, i8* %add.ptr, align 1, !tbaa !0
+ %arrayidx46 = getelementptr inbounds i8, i8* %add.ptr, i32 1
+ %3 = load i8, i8* %arrayidx46, align 1, !tbaa !0
+ br label %for.body74
+
+for.body74:
+ %4 = phi i8 [ %9, %for.body74 ], [ %3, %for.body74.preheader ]
+ %5 = phi i8 [ %4, %for.body74 ], [ %2, %for.body74.preheader ]
+ %6 = phi i8 [ %8, %for.body74 ], [ %1, %for.body74.preheader ]
+ %7 = phi i8 [ %6, %for.body74 ], [ %0, %for.body74.preheader ]
+ %j.0211 = phi i32 [ %add81, %for.body74 ], [ 1, %for.body74.preheader ]
+ %conv77 = zext i8 %7 to i32
+ %conv79 = zext i8 %6 to i32
+ %add80 = add nsw i32 %conv79, %conv77
+ %add81 = add i32 %j.0211, 1
+ %arrayidx82 = getelementptr inbounds i8, i8* %src, i32 %add81
+ %8 = load i8, i8* %arrayidx82, align 1, !tbaa !0
+ %conv83 = zext i8 %8 to i32
+ %add84 = add nsw i32 %add80, %conv83
+ %conv87 = zext i8 %5 to i32
+ %add88 = add nsw i32 %add84, %conv87
+ %conv90 = zext i8 %4 to i32
+ %add91 = add nsw i32 %add88, %conv90
+ %arrayidx93 = getelementptr inbounds i8, i8* %add.ptr, i32 %add81
+ %9 = load i8, i8* %arrayidx93, align 1, !tbaa !0
+ %conv94 = zext i8 %9 to i32
+ %add95 = add nsw i32 %add91, %conv94
+ %mul96 = mul nsw i32 %add95, 7282
+ %add97 = add nsw i32 %mul96, 32768
+ %shr98208 = lshr i32 %add97, 16
+ %conv99 = trunc i32 %shr98208 to i8
+ %add.ptr5.sum209 = add i32 %j.0211, %dstStride
+ %arrayidx100 = getelementptr inbounds i8, i8* %dst, i32 %add.ptr5.sum209
+ store i8 %conv99, i8* %arrayidx100, align 1, !tbaa !0
+ %exitcond = icmp eq i32 %add81, %sub
+ br i1 %exitcond, label %for.end103.loopexit, label %for.body74
+
+for.end103.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!"omnipotent char", !1}
+!1 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/swp-vmult.ll b/test/CodeGen/Hexagon/swp-vmult.ll
index 9018405274cd..7c53248f47fc 100644
--- a/test/CodeGen/Hexagon/swp-vmult.ll
+++ b/test/CodeGen/Hexagon/swp-vmult.ll
@@ -2,10 +2,10 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O3 < %s | FileCheck %s
; Multiply and accumulate
-; CHECK: mpyi([[REG0:r([0-9]+)]], [[REG1:r([0-9]+)]])
-; CHECK-NEXT: add(r{{[0-9]+}}, #4)
-; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0)
-; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0)
+; CHECK: mpyi([[REG0:r([0-9]+)]],[[REG1:r([0-9]+)]])
+; CHECK-NEXT: add(r{{[0-9]+}},#4)
+; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
; CHECK-NEXT: endloop0
define i32 @foo(i32* %a, i32* %b, i32 %n) {
diff --git a/test/CodeGen/Hexagon/swp-vsum.ll b/test/CodeGen/Hexagon/swp-vsum.ll
index 4756c644709f..3561997450de 100644
--- a/test/CodeGen/Hexagon/swp-vsum.ll
+++ b/test/CodeGen/Hexagon/swp-vsum.ll
@@ -4,9 +4,9 @@
; Simple vector total.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: add([[REG:r([0-9]+)]], r{{[0-9]+}})
-; CHECK-NEXT: add(r{{[0-9]+}}, #4)
-; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0)
+; CHECK: add([[REG:r([0-9]+)]],r{{[0-9]+}})
+; CHECK-NEXT: add(r{{[0-9]+}},#4)
+; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
; CHECK-NEXT: endloop0
define i32 @foo(i32* %a, i32 %n) {
diff --git a/test/CodeGen/Hexagon/tail-dup-subreg-map.ll b/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
index 08dadeb9aaa4..1b11d087832a 100644
--- a/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
+++ b/test/CodeGen/Hexagon/tail-dup-subreg-map.ll
@@ -5,7 +5,7 @@
; subregisters were dropped by the tail duplicator, resulting in invalid
; COPY instructions being generated.
-; CHECK: = extractu(r{{[0-9]+}}, #15, #17)
+; CHECK: = extractu(r{{[0-9]+}},#15,#17)
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/tfr-to-combine.ll b/test/CodeGen/Hexagon/tfr-to-combine.ll
index 1b82f3e4562e..50879ffe582d 100644
--- a/test/CodeGen/Hexagon/tfr-to-combine.ll
+++ b/test/CodeGen/Hexagon/tfr-to-combine.ll
@@ -8,7 +8,7 @@
; Function Attrs: nounwind
define i64 @test1() #0 {
-; CHECK: combine(#10, #0)
+; CHECK: combine(#10,#0)
entry:
store i16 0, i16* @a, align 2
store i16 10, i16* @b, align 2
@@ -17,7 +17,7 @@ entry:
; Function Attrs: nounwind
define i64 @test2() #0 {
-; CHECK: combine(#0, r{{[0-9]+}})
+; CHECK: combine(#0,r{{[0-9]+}})
entry:
store i16 0, i16* @a, align 2
%0 = load i16, i16* @c, align 2
@@ -27,7 +27,7 @@ entry:
; Function Attrs: nounwind
define i64 @test4() #0 {
-; CHECK: combine(#0, #100)
+; CHECK: combine(#0,#100)
entry:
store i16 100, i16* @b, align 2
store i16 0, i16* @a, align 2
diff --git a/test/CodeGen/Hexagon/tls_pic.ll b/test/CodeGen/Hexagon/tls_pic.ll
index 190e1d71d39b..2c2be0dc384a 100644
--- a/test/CodeGen/Hexagon/tls_pic.ll
+++ b/test/CodeGen/Hexagon/tls_pic.ll
@@ -4,7 +4,7 @@
@src_ie = thread_local(initialexec) global i32 0, align 4
; CHECK-LABEL: test_initial_exec
-; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK-DAG: = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL)
; CHECK-DAG: = ##src_ie@IEGOT
; CHECK-DAG: = ##dst_ie@IEGOT
; CHECK-NOT: call
@@ -22,7 +22,7 @@ entry:
; general-dynamic model.
; CHECK-LABEL: test_dynamic
-; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL)
+; CHECK-DAG: = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL)
; CHECK-DAG: = ##src_gd@GDGOT
; CHECK-DAG: = ##dst_gd@GDGOT
; CHECK-DAG: call src_gd@GDPLT
diff --git a/test/CodeGen/Hexagon/two-crash.ll b/test/CodeGen/Hexagon/two-crash.ll
index 0ab02cda8a07..7e79cb3be912 100644
--- a/test/CodeGen/Hexagon/two-crash.ll
+++ b/test/CodeGen/Hexagon/two-crash.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; This testcase crashed, because we propagated a reg:sub into a tied use.
; The two-address pass rewrote it in a way that generated incorrect code.
-; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #16)
+; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}},#16)
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/undo-dag-shift.ll b/test/CodeGen/Hexagon/undo-dag-shift.ll
new file mode 100644
index 000000000000..c1ab5d73f5c3
--- /dev/null
+++ b/test/CodeGen/Hexagon/undo-dag-shift.ll
@@ -0,0 +1,59 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; DAG combiner folds sequences of shifts, which can sometimes obscure
+; optimization opportunities. For example
+;
+; unsigned int c(unsigned int b, unsigned int *a) {
+; unsigned int bitidx = b >> 5;
+; return a[bitidx];
+; }
+;
+; produces
+; (add x (shl (srl y 5) 2))
+; which is then folded into
+; (add x (and (srl y 3) 1FFFFFFC))
+;
+; That results in a constant-extended and:
+; r0 = and(##536870908,lsr(r0,#3))
+; r0 = memw(r1+r0<<#0)
+; whereas
+; r0 = lsr(r0,#5)
+; r0 = memw(r1+r0<<#2)
+; is more desirable.
+
+target triple = "hexagon"
+
+; CHECK-LABEL: load_0
+; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2)
+define i32 @load_0(i32 %b, i32* nocapture readonly %a) #0 {
+entry:
+ %shr = lshr i32 %b, 5
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr
+ %0 = load i32, i32* %arrayidx, align 4
+ ret i32 %0
+}
+
+; This would require r0<<#3, which is not legal.
+; CHECK-LABEL: load_1
+; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#0)
+define i32 @load_1(i32 %b, [3 x i32]* nocapture readonly %a) #0 {
+entry:
+ %shr = lshr i32 %b, 5
+ %arrayidx = getelementptr inbounds [3 x i32], [3 x i32]* %a, i32 %shr, i32 0
+ %0 = load i32, i32* %arrayidx, align 4
+ ret i32 %0
+}
+
+; CHECK-LABEL: store_0
+; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2)
+define void @store_0(i32 %b, i32* nocapture %a, i32 %v) #1 {
+entry:
+ %shr = lshr i32 %b, 5
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr
+ store i32 %v, i32* %arrayidx, align 4
+ ret void
+}
+
+attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
+attributes #1 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" }
+
diff --git a/test/CodeGen/Hexagon/vaddh.ll b/test/CodeGen/Hexagon/vaddh.ll
index 88194b750ad5..a4fb33de4ac5 100644
--- a/test/CodeGen/Hexagon/vaddh.ll
+++ b/test/CodeGen/Hexagon/vaddh.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; CHECK: vaddh(r{{[0-9]+}}, r{{[0-9]+}})
+; CHECK: vaddh(r{{[0-9]+}},r{{[0-9]+}})
@j = external global i32
@k = external global i32
diff --git a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
index 70c4aeb4bac0..4bba134a40cb 100644
--- a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
+++ b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; This one should generate a combine with two immediates.
-; CHECK: combine(#7, #7)
+; CHECK: combine(#7,#7)
@B = common global [400 x i32] zeroinitializer, align 8
@A = common global [400 x i32] zeroinitializer, align 8
@C = common global [400 x i32] zeroinitializer, align 8
diff --git a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
index 91b32652400f..f49a1e24a1bb 100644
--- a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
+++ b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; Check that store is post-incremented.
-; CHECK: memuh(r{{[0-9]+}} + {{ *}}#6{{ *}})
-; CHECK: combine(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}})
+; CHECK: memuh(r{{[0-9]+}}+#6)
+; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}})
; CHECK: vaddh
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
diff --git a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll
index 4861181d4125..a4d6afa40bce 100644
--- a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll
+++ b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll
@@ -6,12 +6,12 @@
; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-LSRH
;
; Make sure that the instructions with immediate operands are generated.
-; CHECK-ASLW: vaslw({{.*}}, #9)
-; CHECK-ASRW: vasrw({{.*}}, #8)
-; CHECK-LSRW: vlsrw({{.*}}, #7)
-; CHECK-ASLH: vaslh({{.*}}, #6)
-; CHECK-ASRH: vasrh({{.*}}, #5)
-; CHECK-LSRH: vlsrh({{.*}}, #4)
+; CHECK-ASLW: vaslw({{.*}},#9)
+; CHECK-ASRW: vasrw({{.*}},#8)
+; CHECK-LSRW: vlsrw({{.*}},#7)
+; CHECK-ASLH: vaslh({{.*}},#6)
+; CHECK-ASRH: vasrh({{.*}},#5)
+; CHECK-LSRH: vlsrh({{.*}},#4)
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/vect/vect-shuffle.ll b/test/CodeGen/Hexagon/vect/vect-shuffle.ll
index bd5b2b981695..27840bbd28d9 100644
--- a/test/CodeGen/Hexagon/vect/vect-shuffle.ll
+++ b/test/CodeGen/Hexagon/vect/vect-shuffle.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; Check that store is post-incremented.
-; CHECK-NOT: extractu
+; CHECK-NOT: extractu(r{{[0-9]+}},#32,
; CHECK-NOT: insert
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/vect/vect-vshifts.ll b/test/CodeGen/Hexagon/vect/vect-vshifts.ll
index 49ff812601ae..9d3cbe6e113f 100644
--- a/test/CodeGen/Hexagon/vect/vect-vshifts.ll
+++ b/test/CodeGen/Hexagon/vect/vect-vshifts.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that store is post-incremented.
-; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}})
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
diff --git a/test/CodeGen/Hexagon/vect/vect-xor.ll b/test/CodeGen/Hexagon/vect/vect-xor.ll
index 96719e683413..8864ab5c5cb7 100644
--- a/test/CodeGen/Hexagon/vect/vect-xor.ll
+++ b/test/CodeGen/Hexagon/vect/vect-xor.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s
; Check that the parsing succeeded.
-; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}})
+; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}})
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
diff --git a/test/CodeGen/MIR/AArch64/atomic-memoperands.mir b/test/CodeGen/MIR/AArch64/atomic-memoperands.mir
new file mode 100644
index 000000000000..1fe42a731488
--- /dev/null
+++ b/test/CodeGen/MIR/AArch64/atomic-memoperands.mir
@@ -0,0 +1,30 @@
+# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s
+
+--- |
+
+ define void @atomic_memoperands() {
+ ret void
+ }
+
+...
+---
+# CHECK-LABEL: name: atomic_memoperands
+# CHECK: %1(s64) = G_LOAD %0(p0) :: (load unordered 8)
+# CHECK: %2(s32) = G_LOAD %0(p0) :: (load monotonic 4)
+# CHECK: %3(s16) = G_LOAD %0(p0) :: (load acquire 2)
+# CHECK: G_STORE %3(s16), %0(p0) :: (store release 2)
+# CHECK: G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
+# CHECK: G_STORE %1(s64), %0(p0) :: (store singlethread seq_cst 8)
+name: atomic_memoperands
+body: |
+ bb.0:
+
+ %0:_(p0) = COPY %x0
+ %1:_(s64) = G_LOAD %0(p0) :: (load unordered 8)
+ %2:_(s32) = G_LOAD %0(p0) :: (load monotonic 4)
+ %3:_(s16) = G_LOAD %0(p0) :: (load acquire 2)
+ G_STORE %3(s16), %0(p0) :: (store release 2)
+ G_STORE %2(s32), %0(p0) :: (store acq_rel 4)
+ G_STORE %1(s64), %0(p0) :: (store singlethread seq_cst 8)
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/MIR/AArch64/register-operand-bank.mir b/test/CodeGen/MIR/AArch64/register-operand-bank.mir
new file mode 100644
index 000000000000..d48495167f15
--- /dev/null
+++ b/test/CodeGen/MIR/AArch64/register-operand-bank.mir
@@ -0,0 +1,20 @@
+# RUN: llc -o - %s -mtriple=aarch64-- -run-pass=none | FileCheck %s
+# REQUIRES: global-isel
+# Test various aspects of register bank specification on machine operands.
+--- |
+ define void @func() { ret void }
+...
+---
+# CHECK-LABEL: name: func
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: fpr }
+name: func
+body: |
+ bb.0:
+ %0 : gpr(s64) = COPY %x9
+ %x9 = COPY %0
+
+ %3 : fpr(s64) = COPY %d0
+ %d1 = COPY %3 : fpr
+...
diff --git a/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir b/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
index 47f0e168a722..5da98fb9c2d1 100644
--- a/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
+++ b/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
@@ -1,4 +1,4 @@
-# RUN: not llc -march=amdgcn -mcpu=SI -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+# RUN: not llc -march=amdgcn -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
--- |
@@ -6,7 +6,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
- define void @float(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
@@ -14,21 +14,7 @@
ret void
}
- declare { i1, i64 } @llvm.SI.if(i1)
-
- declare { i1, i64 } @llvm.SI.else(i64)
-
- declare i64 @llvm.SI.break(i64)
-
- declare i64 @llvm.SI.if.break(i1, i64)
-
- declare i64 @llvm.SI.else.break(i64, i64)
-
- declare i1 @llvm.SI.loop(i64)
-
- declare void @llvm.SI.end.cf(i64)
-
- attributes #0 = { "target-cpu"="SI" }
+ attributes #0 = { nounwind }
...
---
diff --git a/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir b/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
index 3277d37d7e4d..7cef01c9d12d 100644
--- a/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
+++ b/test/CodeGen/MIR/AMDGPU/fold-imm-f16-f32.mir
@@ -1,6 +1,6 @@
# RUN: llc --mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-fold-operands,si-shrink-instructions %s -o - | FileCheck %s
--- |
- define void @add_f32_1.0_one_f16_use() #0 {
+ define amdgpu_kernel void @add_f32_1.0_one_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -11,7 +11,7 @@
ret void
}
- define void @add_f32_1.0_multi_f16_use() #0 {
+ define amdgpu_kernel void @add_f32_1.0_multi_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -22,7 +22,7 @@
ret void
}
- define void @add_f32_1.0_one_f32_use_one_f16_use () #0 {
+ define amdgpu_kernel void @add_f32_1.0_one_f32_use_one_f16_use () #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -33,7 +33,7 @@
ret void
}
- define void @add_f32_1.0_one_f32_use_multi_f16_use () #0 {
+ define amdgpu_kernel void @add_f32_1.0_one_f32_use_multi_f16_use () #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -46,7 +46,7 @@
ret void
}
- define void @add_i32_1_multi_f16_use() #0 {
+ define amdgpu_kernel void @add_i32_1_multi_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f16.add0 = fadd half %f16.val0, 0xH0001
@@ -56,7 +56,7 @@
ret void
}
- define void @add_i32_m2_one_f32_use_multi_f16_use () #0 {
+ define amdgpu_kernel void @add_i32_m2_one_f32_use_multi_f16_use () #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -69,7 +69,7 @@
ret void
}
- define void @add_f16_1.0_multi_f32_use() #0 {
+ define amdgpu_kernel void @add_f16_1.0_multi_f32_use() #0 {
%f32.val0 = load volatile float, float addrspace(1)* undef
%f32.val1 = load volatile float, float addrspace(1)* undef
%f32.val = load volatile float, float addrspace(1)* undef
@@ -80,7 +80,7 @@
ret void
}
- define void @add_f16_1.0_other_high_bits_multi_f16_use() #0 {
+ define amdgpu_kernel void @add_f16_1.0_other_high_bits_multi_f16_use() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile half, half addrspace(1)* undef
@@ -91,7 +91,7 @@
ret void
}
- define void @add_f16_1.0_other_high_bits_use_f16_f32() #0 {
+ define amdgpu_kernel void @add_f16_1.0_other_high_bits_use_f16_f32() #0 {
%f16.val0 = load volatile half, half addrspace(1)* undef
%f16.val1 = load volatile half, half addrspace(1)* undef
%f32.val = load volatile half, half addrspace(1)* undef
diff --git a/test/CodeGen/MIR/AMDGPU/intrinsics.mir b/test/CodeGen/MIR/AMDGPU/intrinsics.mir
index f43266eacbf0..cb6e6190990b 100644
--- a/test/CodeGen/MIR/AMDGPU/intrinsics.mir
+++ b/test/CodeGen/MIR/AMDGPU/intrinsics.mir
@@ -2,18 +2,18 @@
--- |
- define void @use_intrin() {
+ define amdgpu_kernel void @use_intrin() {
ret void
}
...
---
# Completely invalid code, but it checks that intrinsics round-trip properly.
-# CHECK: %0(s64) = COPY intrinsic(@llvm.AMDGPU.bfe.i32)
+# CHECK: %0(s64) = COPY intrinsic(@llvm.amdgcn.sbfe)
name: use_intrin
registers:
- { id: 0, class: _ }
body: |
bb.0:
- %0(s64) = COPY intrinsic(@llvm.AMDGPU.bfe.i32)
+ %0(s64) = COPY intrinsic(@llvm.amdgcn.sbfe.i32)
...
diff --git a/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir b/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
index d73503223aa8..8cffc86373a3 100644
--- a/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
+++ b/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
@@ -1,4 +1,4 @@
-# RUN: not llc -march=amdgcn -mcpu=SI -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+# RUN: not llc -march=amdgcn -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
--- |
@@ -6,7 +6,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
- define void @float(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
@@ -14,21 +14,7 @@
ret void
}
- declare { i1, i64 } @llvm.SI.if(i1)
-
- declare { i1, i64 } @llvm.SI.else(i64)
-
- declare i64 @llvm.SI.break(i64)
-
- declare i64 @llvm.SI.if.break(i1, i64)
-
- declare i64 @llvm.SI.else.break(i64, i64)
-
- declare i1 @llvm.SI.loop(i64)
-
- declare void @llvm.SI.end.cf(i64)
-
- attributes #0 = { "target-cpu"="SI" }
+ attributes #0 = { nounwind }
...
---
diff --git a/test/CodeGen/MIR/AMDGPU/target-index-operands.mir b/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
index a4e77f281ea6..32669de15ea3 100644
--- a/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
+++ b/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
@@ -7,7 +7,7 @@
@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00], align 4
- define void @float(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
@@ -15,29 +15,14 @@
ret void
}
- define void @float2(float addrspace(1)* %out, i32 %index) #0 {
+ define amdgpu_kernel void @float2(float addrspace(1)* %out, i32 %index) #0 {
entry:
%0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float, float addrspace(2)* %0
store float %1, float addrspace(1)* %out
ret void
}
-
- declare { i1, i64 } @llvm.SI.if(i1)
-
- declare { i1, i64 } @llvm.SI.else(i64)
-
- declare i64 @llvm.SI.break(i64)
-
- declare i64 @llvm.SI.if.break(i1, i64)
-
- declare i64 @llvm.SI.else.break(i64, i64)
-
- declare i1 @llvm.SI.loop(i64)
-
- declare void @llvm.SI.end.cf(i64)
-
- attributes #0 = { "target-cpu"="SI" }
+ attributes #0 = { nounwind }
...
---
diff --git a/test/CodeGen/MIR/Generic/llvmIR.mir b/test/CodeGen/MIR/Generic/llvmIR.mir
index 432b18ff939d..5c0e60e916f0 100644
--- a/test/CodeGen/MIR/Generic/llvmIR.mir
+++ b/test/CodeGen/MIR/Generic/llvmIR.mir
@@ -28,10 +28,8 @@
IfUnequal:
ret i32 0
}
-
+
...
---
name: foo
-body: |
- bb.0:
...
diff --git a/test/CodeGen/MIR/Generic/llvmIRMissing.mir b/test/CodeGen/MIR/Generic/llvmIRMissing.mir
index 9f361e8d3fe4..419f60be8061 100644
--- a/test/CodeGen/MIR/Generic/llvmIRMissing.mir
+++ b/test/CodeGen/MIR/Generic/llvmIRMissing.mir
@@ -1,9 +1,7 @@
-# RUN: llc -run-pass none -o - %s 2>&1 | FileCheck %s
+# RUN: llc -run-pass none -o - %s | FileCheck %s
# This test ensures that the MIR parser accepts files without the LLVM IR.
---
# CHECK: name: foo
name: foo
-body: |
- bb.0:
...
diff --git a/test/CodeGen/MIR/Generic/machine-basic-block-ir-block-reference.mir b/test/CodeGen/MIR/Generic/machine-basic-block-ir-block-reference.mir
index a5737c2c1526..cf095537bebd 100644
--- a/test/CodeGen/MIR/Generic/machine-basic-block-ir-block-reference.mir
+++ b/test/CodeGen/MIR/Generic/machine-basic-block-ir-block-reference.mir
@@ -1,4 +1,4 @@
-# RUN: llc -run-pass none -o - %s 2>&1 | FileCheck %s
+# RUN: llc -run-pass none -o - %s | FileCheck %s
# This test ensures that the MIR parser preserves unnamed LLVM IR block
# references.
diff --git a/test/CodeGen/MIR/Generic/machine-function-missing-body-error.mir b/test/CodeGen/MIR/Generic/machine-function-missing-body-error.mir
deleted file mode 100644
index 1896371db36a..000000000000
--- a/test/CodeGen/MIR/Generic/machine-function-missing-body-error.mir
+++ /dev/null
@@ -1,15 +0,0 @@
-# RUN: not llc -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# This test ensures that the MIR parser reports an error when it encounters a
-# machine function with an empty body.
-
---- |
-
- define i32 @foo() {
- ret i32 0
- }
-
-...
----
-# CHECK: machine function 'foo' requires at least one machine basic block in its body
-name: foo
-...
diff --git a/test/CodeGen/MIR/Generic/machine-function-missing-body.mir b/test/CodeGen/MIR/Generic/machine-function-missing-body.mir
new file mode 100644
index 000000000000..0fd970c3af7c
--- /dev/null
+++ b/test/CodeGen/MIR/Generic/machine-function-missing-body.mir
@@ -0,0 +1,15 @@
+# RUN: llc -run-pass none -o - %s | FileCheck %s
+# This test ensures that the MIR parser accepts files with llvm IR but
+# no machine function body.
+
+--- |
+ ; CHECK: define i32 @foo()
+ define i32 @foo() {
+ ret i32 0
+ }
+
+...
+---
+# CHECK: name: foo
+name: foo
+...
diff --git a/test/CodeGen/MIR/Generic/machine-function-missing-function.mir b/test/CodeGen/MIR/Generic/machine-function-missing-function.mir
index c547bb25d753..b218afd72ea3 100644
--- a/test/CodeGen/MIR/Generic/machine-function-missing-function.mir
+++ b/test/CodeGen/MIR/Generic/machine-function-missing-function.mir
@@ -12,12 +12,8 @@
...
---
name: foo
-body: |
- bb.0:
...
---
# CHECK: function 'faa' isn't defined in the provided LLVM IR
name: faa
-body: |
- bb.0:
...
diff --git a/test/CodeGen/MIR/Generic/machine-function-missing-name.mir b/test/CodeGen/MIR/Generic/machine-function-missing-name.mir
index 30f0e51b3b66..bc279a6ecfdc 100644
--- a/test/CodeGen/MIR/Generic/machine-function-missing-name.mir
+++ b/test/CodeGen/MIR/Generic/machine-function-missing-name.mir
@@ -16,11 +16,7 @@
---
# CHECK: [[@LINE+1]]:1: missing required key 'name'
nme: foo
-body: |
- bb.0:
...
---
name: bar
-body: |
- bb.0:
...
diff --git a/test/CodeGen/MIR/Generic/machine-function.mir b/test/CodeGen/MIR/Generic/machine-function.mir
index f9001cca4c26..9c19b980e675 100644
--- a/test/CodeGen/MIR/Generic/machine-function.mir
+++ b/test/CodeGen/MIR/Generic/machine-function.mir
@@ -18,7 +18,7 @@
define i32 @func2() {
ret i32 0
}
-
+
...
---
# CHECK: name: foo
@@ -26,8 +26,6 @@
# CHECK-NEXT: exposesReturnsTwice: false
# CHECK: ...
name: foo
-body: |
- bb.0:
...
---
# CHECK: name: bar
@@ -35,8 +33,6 @@ body: |
# CHECK-NEXT: exposesReturnsTwice: false
# CHECK: ...
name: bar
-body: |
- bb.0:
...
---
# CHECK: name: func
@@ -45,8 +41,6 @@ body: |
# CHECK: ...
name: func
alignment: 8
-body: |
- bb.0:
...
---
# CHECK: name: func2
@@ -56,6 +50,4 @@ body: |
name: func2
alignment: 16
exposesReturnsTwice: true
-body: |
- bb.0:
...
diff --git a/test/CodeGen/MIR/Generic/register-info.mir b/test/CodeGen/MIR/Generic/register-info.mir
index af3f44f9abcc..84a6125abe88 100644
--- a/test/CodeGen/MIR/Generic/register-info.mir
+++ b/test/CodeGen/MIR/Generic/register-info.mir
@@ -20,8 +20,6 @@
# CHECK: tracksRegLiveness: false
# CHECK: ...
name: foo
-body: |
- bb.0:
...
---
# CHECK: name: bar
@@ -29,6 +27,4 @@ body: |
# CHECK: ...
name: bar
tracksRegLiveness: true
-body: |
- bb.0:
...
diff --git a/test/CodeGen/MIR/Generic/runPass.mir b/test/CodeGen/MIR/Generic/runPass.mir
index bf37bdd1836b..eeef9d526510 100644
--- a/test/CodeGen/MIR/Generic/runPass.mir
+++ b/test/CodeGen/MIR/Generic/runPass.mir
@@ -1,4 +1,4 @@
-# RUN: llc -run-pass=greedy -debug-pass=Arguments -o - %s 2>&1 | FileCheck %s
+# RUN: llc -run-pass=greedy -debug-pass=Arguments -o - %s | FileCheck %s
# Check that passes are initialized correctly, so that it's possible to
# use -run-pass.
diff --git a/test/CodeGen/MIR/X86/dynamic-regmask.ll b/test/CodeGen/MIR/X86/dynamic-regmask.ll
new file mode 100644
index 000000000000..df58f4be79d7
--- /dev/null
+++ b/test/CodeGen/MIR/X86/dynamic-regmask.ll
@@ -0,0 +1,30 @@
+; RUN: llc -mtriple=x86_64-pc-win32 -stop-after machine-sink %s -o %t.mir
+; RUN: FileCheck %s < %t.mir
+; RUN: llc %t.mir -mtriple=x86_64-pc-win32 -run-pass machine-sink
+; Check that callee saved registers are printed in a format that can then be parsed.
+
+declare x86_regcallcc i32 @callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0)
+
+define i32 @caller(i32 %a0) nounwind {
+ %b1 = call x86_regcallcc i32 @callee(i32 %a0, i32 %a0, i32 %a0, i32 %a0, i32 %a0)
+ %b2 = add i32 %b1, %a0
+ ret i32 %b2
+}
+; CHECK: name: caller
+; CHECK: CALL64pcrel32 @callee, CustomRegMask(%bh,%bl,%bp,%bpl,%bx,%ebp,%ebx,%esp,%rbp,%rbx,%rsp,%sp,%spl,%r10,%r11,%r12,%r13,%r14,%r15,%xmm8,%xmm9,%xmm10,%xmm11,%xmm12,%xmm13,%xmm14,%xmm15,%r10b,%r11b,%r12b,%r13b,%r14b,%r15b,%r10d,%r11d,%r12d,%r13d,%r14d,%r15d,%r10w,%r11w,%r12w,%r13w,%r14w,%r15w)
+; CHECK: RET 0, %eax
+
+define x86_regcallcc {i32, i32, i32} @test_callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind {
+ %b1 = mul i32 7, %e0
+ %b2 = udiv i32 5, %e0
+ %b3 = mul i32 7, %d0
+ %b4 = insertvalue {i32, i32, i32} undef, i32 %b1, 0
+ %b5 = insertvalue {i32, i32, i32} %b4, i32 %b2, 1
+ %b6 = insertvalue {i32, i32, i32} %b5, i32 %b3, 2
+ ret {i32, i32, i32} %b6
+}
+; CHECK: name: test_callee
+; CHECK: calleeSavedRegisters: [ '%rbx', '%rbp', '%rsp', '%r10', '%r11', '%r12',
+; CHECK: '%r13', '%r14', '%r15', '%xmm8', '%xmm9', '%xmm10',
+; CHECK: '%xmm11', '%xmm12', '%xmm13', '%xmm14', '%xmm15' ]
+; CHECK: RET 0, %eax, %ecx, %edx
diff --git a/test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir b/test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir
index 5e7dde26769b..9847d027ee02 100644
--- a/test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir
+++ b/test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir
@@ -1,4 +1,4 @@
-# RUN: llc -march=x86-64 -run-pass none -o - %s 2>&1 | FileCheck %s
+# RUN: llc -march=x86-64 -run-pass none -o - %s | FileCheck %s
--- |
diff --git a/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir b/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
index cfa03247e31f..57e11d39723a 100644
--- a/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
+++ b/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
@@ -17,7 +17,7 @@ liveins:
body: |
bb.0.entry:
liveins: %rdi
- ; CHECK: [[@LINE+1]]:53: expected the size integer literal after memory operation
+ ; CHECK: [[@LINE+1]]:53: expected an atomic scope, ordering or a size integer literal
%eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load from %ir.a)
RETQ %eax
...
diff --git a/test/CodeGen/MIR/X86/register-operand-class-invalid0.mir b/test/CodeGen/MIR/X86/register-operand-class-invalid0.mir
new file mode 100644
index 000000000000..10a9f2d7ceb1
--- /dev/null
+++ b/test/CodeGen/MIR/X86/register-operand-class-invalid0.mir
@@ -0,0 +1,13 @@
+# RUN: not llc -o /dev/null %s -march=x86-64 -run-pass none 2>&1 | FileCheck %s
+# This test ensures that an error is reported for specifying the register class
+# of a physical register.
+--- |
+ define void @t() { ret void }
+...
+---
+name: t
+body: |
+ bb.0:
+ ; CHECK: [[@LINE+1]]:10: register class specification expects a virtual register
+ %eax : gr32 = COPY %rdx
+...
diff --git a/test/CodeGen/MIR/X86/register-operand-class-invalid1.mir b/test/CodeGen/MIR/X86/register-operand-class-invalid1.mir
new file mode 100644
index 000000000000..4be7fb38335e
--- /dev/null
+++ b/test/CodeGen/MIR/X86/register-operand-class-invalid1.mir
@@ -0,0 +1,14 @@
+# RUN: not llc -o /dev/null %s -march=x86-64 -run-pass none 2>&1 | FileCheck %s
+# This test ensures that an error is reported for specifying the register class
+# of a physical register.
+--- |
+ define void @t() { ret void }
+...
+---
+name: t
+body: |
+ bb.0:
+ %0 : gr32 = COPY %rdx
+ ; CHECK: [[@LINE+1]]:24: conflicting register classes, previously: GR32
+ NOOP implicit %0 : gr32_abcd
+...
diff --git a/test/CodeGen/MIR/X86/register-operand-class.mir b/test/CodeGen/MIR/X86/register-operand-class.mir
new file mode 100644
index 000000000000..63019daad7a1
--- /dev/null
+++ b/test/CodeGen/MIR/X86/register-operand-class.mir
@@ -0,0 +1,27 @@
+# RUN: llc -o - %s -march=x86-64 -run-pass none | FileCheck %s
+# Test various aspects of register class specification on machine operands.
+--- |
+ define void @func() { ret void }
+...
+---
+# CHECK-LABEL: name: func
+# CHECK: registers:
+# CHECK: - { id: 0, class: gr32 }
+# CHECK: - { id: 1, class: gr64 }
+# CHECK: - { id: 2, class: gr32 }
+# CHECK: - { id: 3, class: gr16 }
+# CHECK: - { id: 4, class: _ }
+name: func
+body: |
+ bb.0:
+ %0 : gr32 = COPY %rax
+ %1.sub_32bit : gr64 = COPY %eax
+ %rdx = COPY %1
+ %2 = COPY %ecx
+ %ecx = COPY %2 : gr32
+
+ %3 : gr16 = COPY %bx
+ %bx = COPY %3 : gr16
+
+ %4 : _(s32) = COPY %edx
+...
diff --git a/test/CodeGen/MIR/X86/used-physical-register-info.mir b/test/CodeGen/MIR/X86/used-physical-register-info.mir
deleted file mode 100644
index 9edc4113b279..000000000000
--- a/test/CodeGen/MIR/X86/used-physical-register-info.mir
+++ /dev/null
@@ -1,109 +0,0 @@
-# RUN: llc -march=x86-64 -run-pass none -o - %s | FileCheck %s
-# This test ensures that the MIR parser parses the callee saved register mask
-# correctly and that the MIR parser can infer it as well.
-
---- |
-
- define i32 @compute(i32 %a) #0 {
- body:
- %c = mul i32 %a, 11
- ret i32 %c
- }
-
- define i32 @foo(i32 %a) #0 {
- entry:
- %b = call i32 @compute(i32 %a)
- ret i32 %b
- }
-
- define i32 @bar(i32 %a) #0 {
- entry:
- %b = call i32 @compute(i32 %a)
- ret i32 %b
- }
-
- define i32 @empty(i32 %a) #0 {
- entry:
- %b = call i32 @compute(i32 %a)
- ret i32 %b
- }
-
- attributes #0 = { "no-frame-pointer-elim"="false" }
-
-...
----
-# CHECK: name: compute
-# CHECK: liveins:
-# CHECK-NEXT: - { reg: '%edi' }
-# CHECK-NEXT: frameInfo:
-name: compute
-liveins:
- - { reg: '%edi' }
-frameInfo:
- stackSize: 8
-body: |
- bb.0.body:
- liveins: %edi
-
- %eax = IMUL32rri8 %edi, 11, implicit-def %eflags
- RETQ %eax
-...
----
-name: foo
-liveins:
- - { reg: '%edi' }
-# CHECK: name: foo
-# CHECK: calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
-# CHECK-NEXT: '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
-# CHECK-NEXT: '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
-# CHECK-NEXT: '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
-calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
- '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
- '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
- '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
-body: |
- bb.0.entry:
- liveins: %edi
-
- PUSH64r %rax, implicit-def %rsp, implicit %rsp
- CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax
- %rdx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
-...
----
-name: bar
-liveins:
- - { reg: '%edi' }
-# Verify that the callee saved register can be inferred from register mask
-# machine operands:
-# CHECK: name: bar
-# CHECK: calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
-# CHECK-NEXT: '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
-# CHECK-NEXT: '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
-# CHECK-NEXT: '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
-body: |
- bb.0.entry:
- liveins: %edi
-
- PUSH64r %rax, implicit-def %rsp, implicit %rsp
- CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax
- %rdx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
-...
----
-name: empty
-liveins:
- - { reg: '%edi' }
-# Verify that the callee saved register can be empty.
-# CHECK: name: empty
-# CHECK: calleeSavedRegisters: [ ]
-calleeSavedRegisters: [ ]
-body: |
- bb.0.entry:
- liveins: %edi
-
- PUSH64r %rax, implicit-def %rsp, implicit %rsp
- CALL64pcrel32 @compute, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, implicit-def %eax
- %rdx = POP64r implicit-def %rsp, implicit %rsp
- RETQ %eax
-...
diff --git a/test/CodeGen/MSP430/AddrMode-bis-rx.ll b/test/CodeGen/MSP430/AddrMode-bis-rx.ll
index 941ee2dc2ce9..f4cb30f2d014 100644
--- a/test/CodeGen/MSP430/AddrMode-bis-rx.ll
+++ b/test/CodeGen/MSP430/AddrMode-bis-rx.ll
@@ -8,7 +8,7 @@ define i16 @am1(i16 %x, i16* %a) nounwind {
ret i16 %2
}
; CHECK-LABEL: am1:
-; CHECK: bis.w 0(r14), r15
+; CHECK: bis.w 0(r13), r12
@foo = external global i16
@@ -18,7 +18,7 @@ define i16 @am2(i16 %x) nounwind {
ret i16 %2
}
; CHECK-LABEL: am2:
-; CHECK: bis.w &foo, r15
+; CHECK: bis.w &foo, r12
@bar = internal constant [2 x i8] [ i8 32, i8 64 ]
@@ -29,7 +29,7 @@ define i8 @am3(i8 %x, i16 %n) nounwind {
ret i8 %3
}
; CHECK-LABEL: am3:
-; CHECK: bis.b bar(r14), r15
+; CHECK: bis.b bar(r13), r12
define i16 @am4(i16 %x) nounwind {
%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
@@ -37,7 +37,7 @@ define i16 @am4(i16 %x) nounwind {
ret i16 %2
}
; CHECK-LABEL: am4:
-; CHECK: bis.w &32, r15
+; CHECK: bis.w &32, r12
define i16 @am5(i16 %x, i16* %a) nounwind {
%1 = getelementptr i16, i16* %a, i16 2
@@ -46,7 +46,7 @@ define i16 @am5(i16 %x, i16* %a) nounwind {
ret i16 %3
}
; CHECK-LABEL: am5:
-; CHECK: bis.w 4(r14), r15
+; CHECK: bis.w 4(r13), r12
%S = type { i16, i16 }
@baz = common global %S zeroinitializer, align 1
@@ -57,7 +57,7 @@ define i16 @am6(i16 %x) nounwind {
ret i16 %2
}
; CHECK-LABEL: am6:
-; CHECK: bis.w &baz+2, r15
+; CHECK: bis.w &baz+2, r12
%T = type { i16, [2 x i8] }
@duh = internal constant %T { i16 16, [2 x i8][i8 32, i8 64 ] }
@@ -70,5 +70,5 @@ define i8 @am7(i8 %x, i16 %n) nounwind {
ret i8 %4
}
; CHECK-LABEL: am7:
-; CHECK: bis.b duh+2(r14), r15
+; CHECK: bis.b duh+2(r13), r12
diff --git a/test/CodeGen/MSP430/AddrMode-bis-xr.ll b/test/CodeGen/MSP430/AddrMode-bis-xr.ll
index 4b8f367a8880..1e150f382062 100644
--- a/test/CodeGen/MSP430/AddrMode-bis-xr.ll
+++ b/test/CodeGen/MSP430/AddrMode-bis-xr.ll
@@ -9,7 +9,7 @@ define void @am1(i16* %a, i16 %x) nounwind {
ret void
}
; CHECK-LABEL: am1:
-; CHECK: bis.w r14, 0(r15)
+; CHECK: bis.w r13, 0(r12)
@foo = external global i16
@@ -20,7 +20,7 @@ define void @am2(i16 %x) nounwind {
ret void
}
; CHECK-LABEL: am2:
-; CHECK: bis.w r15, &foo
+; CHECK: bis.w r12, &foo
@bar = external global [2 x i8]
@@ -32,7 +32,7 @@ define void @am3(i16 %i, i8 %x) nounwind {
ret void
}
; CHECK-LABEL: am3:
-; CHECK: bis.b r14, bar(r15)
+; CHECK: bis.b r13, bar(r12)
define void @am4(i16 %x) nounwind {
%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
@@ -41,7 +41,7 @@ define void @am4(i16 %x) nounwind {
ret void
}
; CHECK-LABEL: am4:
-; CHECK: bis.w r15, &32
+; CHECK: bis.w r12, &32
define void @am5(i16* %a, i16 %x) readonly {
%1 = getelementptr inbounds i16, i16* %a, i16 2
@@ -51,7 +51,7 @@ define void @am5(i16* %a, i16 %x) readonly {
ret void
}
; CHECK-LABEL: am5:
-; CHECK: bis.w r14, 4(r15)
+; CHECK: bis.w r13, 4(r12)
%S = type { i16, i16 }
@baz = common global %S zeroinitializer
@@ -63,7 +63,7 @@ define void @am6(i16 %x) nounwind {
ret void
}
; CHECK-LABEL: am6:
-; CHECK: bis.w r15, &baz+2
+; CHECK: bis.w r12, &baz+2
%T = type { i16, [2 x i8] }
@duh = external global %T
@@ -77,5 +77,5 @@ define void @am7(i16 %n, i8 %x) nounwind {
ret void
}
; CHECK-LABEL: am7:
-; CHECK: bis.b r14, duh+2(r15)
+; CHECK: bis.b r13, duh+2(r12)
diff --git a/test/CodeGen/MSP430/AddrMode-mov-rx.ll b/test/CodeGen/MSP430/AddrMode-mov-rx.ll
index cdee931bf96d..808aca0ea10b 100644
--- a/test/CodeGen/MSP430/AddrMode-mov-rx.ll
+++ b/test/CodeGen/MSP430/AddrMode-mov-rx.ll
@@ -7,7 +7,7 @@ define i16 @am1(i16* %a) nounwind {
ret i16 %1
}
; CHECK-LABEL: am1:
-; CHECK: mov.w 0(r15), r15
+; CHECK: mov.w 0(r12), r12
@foo = external global i16
@@ -16,7 +16,7 @@ define i16 @am2() nounwind {
ret i16 %1
}
; CHECK-LABEL: am2:
-; CHECK: mov.w &foo, r15
+; CHECK: mov.w &foo, r12
@bar = internal constant [2 x i8] [ i8 32, i8 64 ]
@@ -26,14 +26,14 @@ define i8 @am3(i16 %n) nounwind {
ret i8 %2
}
; CHECK-LABEL: am3:
-; CHECK: mov.b bar(r15), r15
+; CHECK: mov.b bar(r12), r12
define i16 @am4() nounwind {
%1 = load volatile i16, i16* inttoptr(i16 32 to i16*)
ret i16 %1
}
; CHECK-LABEL: am4:
-; CHECK: mov.w &32, r15
+; CHECK: mov.w &32, r12
define i16 @am5(i16* %a) nounwind {
%1 = getelementptr i16, i16* %a, i16 2
@@ -41,7 +41,7 @@ define i16 @am5(i16* %a) nounwind {
ret i16 %2
}
; CHECK-LABEL: am5:
-; CHECK: mov.w 4(r15), r15
+; CHECK: mov.w 4(r12), r12
%S = type { i16, i16 }
@baz = common global %S zeroinitializer, align 1
@@ -51,7 +51,7 @@ define i16 @am6() nounwind {
ret i16 %1
}
; CHECK-LABEL: am6:
-; CHECK: mov.w &baz+2, r15
+; CHECK: mov.w &baz+2, r12
%T = type { i16, [2 x i8] }
@duh = internal constant %T { i16 16, [2 x i8][i8 32, i8 64 ] }
@@ -63,5 +63,5 @@ define i8 @am7(i16 %n) nounwind {
ret i8 %3
}
; CHECK-LABEL: am7:
-; CHECK: mov.b duh+2(r15), r15
+; CHECK: mov.b duh+2(r12), r12
diff --git a/test/CodeGen/MSP430/AddrMode-mov-xr.ll b/test/CodeGen/MSP430/AddrMode-mov-xr.ll
index ccb42886e9b4..c336289a60d7 100644
--- a/test/CodeGen/MSP430/AddrMode-mov-xr.ll
+++ b/test/CodeGen/MSP430/AddrMode-mov-xr.ll
@@ -7,7 +7,7 @@ define void @am1(i16* %a, i16 %b) nounwind {
ret void
}
; CHECK-LABEL: am1:
-; CHECK: mov.w r14, 0(r15)
+; CHECK: mov.w r13, 0(r12)
@foo = external global i16
@@ -16,7 +16,7 @@ define void @am2(i16 %a) nounwind {
ret void
}
; CHECK-LABEL: am2:
-; CHECK: mov.w r15, &foo
+; CHECK: mov.w r12, &foo
@bar = external global [2 x i8]
@@ -26,14 +26,14 @@ define void @am3(i16 %i, i8 %a) nounwind {
ret void
}
; CHECK-LABEL: am3:
-; CHECK: mov.b r14, bar(r15)
+; CHECK: mov.b r13, bar(r12)
define void @am4(i16 %a) nounwind {
store volatile i16 %a, i16* inttoptr(i16 32 to i16*)
ret void
}
; CHECK-LABEL: am4:
-; CHECK: mov.w r15, &32
+; CHECK: mov.w r12, &32
define void @am5(i16* nocapture %p, i16 %a) nounwind readonly {
%1 = getelementptr inbounds i16, i16* %p, i16 2
@@ -41,7 +41,7 @@ define void @am5(i16* nocapture %p, i16 %a) nounwind readonly {
ret void
}
; CHECK-LABEL: am5:
-; CHECK: mov.w r14, 4(r15)
+; CHECK: mov.w r13, 4(r12)
%S = type { i16, i16 }
@baz = common global %S zeroinitializer, align 1
@@ -51,7 +51,7 @@ define void @am6(i16 %a) nounwind {
ret void
}
; CHECK-LABEL: am6:
-; CHECK: mov.w r15, &baz+2
+; CHECK: mov.w r12, &baz+2
%T = type { i16, [2 x i8] }
@duh = external global %T
@@ -63,5 +63,5 @@ define void @am7(i16 %n, i8 %a) nounwind {
ret void
}
; CHECK-LABEL: am7:
-; CHECK: mov.b r14, duh+2(r15)
+; CHECK: mov.b r13, duh+2(r12)
diff --git a/test/CodeGen/MSP430/Inst16mm.ll b/test/CodeGen/MSP430/Inst16mm.ll
index c75e1beb2356..a48d8592c1a6 100644
--- a/test/CodeGen/MSP430/Inst16mm.ll
+++ b/test/CodeGen/MSP430/Inst16mm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=msp430 -combiner-alias-analysis < %s | FileCheck %s
+; RUN: llc -march=msp430 < %s | FileCheck %s
target datalayout = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8"
target triple = "msp430-generic-generic"
@foo = common global i16 0, align 2
diff --git a/test/CodeGen/MSP430/Inst16mr.ll b/test/CodeGen/MSP430/Inst16mr.ll
index 50dc4c0b6731..847c093f4088 100644
--- a/test/CodeGen/MSP430/Inst16mr.ll
+++ b/test/CodeGen/MSP430/Inst16mr.ll
@@ -5,14 +5,14 @@ target triple = "msp430-generic-generic"
define void @mov(i16 %a) nounwind {
; CHECK-LABEL: mov:
-; CHECK: mov.w r15, &foo
+; CHECK: mov.w r12, &foo
store i16 %a, i16* @foo
ret void
}
define void @add(i16 %a) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.w r15, &foo
+; CHECK: add.w r12, &foo
%1 = load i16, i16* @foo
%2 = add i16 %a, %1
store i16 %2, i16* @foo
@@ -21,7 +21,7 @@ define void @add(i16 %a) nounwind {
define void @and(i16 %a) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.w r15, &foo
+; CHECK: and.w r12, &foo
%1 = load i16, i16* @foo
%2 = and i16 %a, %1
store i16 %2, i16* @foo
@@ -30,7 +30,7 @@ define void @and(i16 %a) nounwind {
define void @bis(i16 %a) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.w r15, &foo
+; CHECK: bis.w r12, &foo
%1 = load i16, i16* @foo
%2 = or i16 %a, %1
store i16 %2, i16* @foo
@@ -39,7 +39,7 @@ define void @bis(i16 %a) nounwind {
define void @bic(i16 zeroext %m) nounwind {
; CHECK-LABEL: bic:
-; CHECK: bic.w r15, &foo
+; CHECK: bic.w r12, &foo
%1 = xor i16 %m, -1
%2 = load i16, i16* @foo
%3 = and i16 %2, %1
@@ -49,7 +49,7 @@ define void @bic(i16 zeroext %m) nounwind {
define void @xor(i16 %a) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.w r15, &foo
+; CHECK: xor.w r12, &foo
%1 = load i16, i16* @foo
%2 = xor i16 %a, %1
store i16 %2, i16* @foo
diff --git a/test/CodeGen/MSP430/Inst16ri.ll b/test/CodeGen/MSP430/Inst16ri.ll
index f89f686ab567..3a4bb6a93d99 100644
--- a/test/CodeGen/MSP430/Inst16ri.ll
+++ b/test/CodeGen/MSP430/Inst16ri.ll
@@ -4,34 +4,34 @@ target triple = "msp430-generic-generic"
define i16 @mov() nounwind {
; CHECK-LABEL: mov:
-; CHECK: mov.w #1, r15
+; CHECK: mov.w #1, r12
ret i16 1
}
define i16 @add(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.w #1, r15
+; CHECK: add.w #1, r12
%1 = add i16 %a, 1
ret i16 %1
}
define i16 @and(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.w #1, r15
+; CHECK: and.w #1, r12
%1 = and i16 %a, 1
ret i16 %1
}
define i16 @bis(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.w #1, r15
+; CHECK: bis.w #1, r12
%1 = or i16 %a, 1
ret i16 %1
}
define i16 @xor(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.w #1, r15
+; CHECK: xor.w #1, r12
%1 = xor i16 %a, 1
ret i16 %1
}
diff --git a/test/CodeGen/MSP430/Inst16rm.ll b/test/CodeGen/MSP430/Inst16rm.ll
index 4f6998ee68df..44b8f39d8fa6 100644
--- a/test/CodeGen/MSP430/Inst16rm.ll
+++ b/test/CodeGen/MSP430/Inst16rm.ll
@@ -5,7 +5,7 @@ target triple = "msp430-generic-generic"
define i16 @add(i16 %a) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.w &foo, r15
+; CHECK: add.w &foo, r12
%1 = load i16, i16* @foo
%2 = add i16 %a, %1
ret i16 %2
@@ -13,7 +13,7 @@ define i16 @add(i16 %a) nounwind {
define i16 @and(i16 %a) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.w &foo, r15
+; CHECK: and.w &foo, r12
%1 = load i16, i16* @foo
%2 = and i16 %a, %1
ret i16 %2
@@ -21,7 +21,7 @@ define i16 @and(i16 %a) nounwind {
define i16 @bis(i16 %a) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.w &foo, r15
+; CHECK: bis.w &foo, r12
%1 = load i16, i16* @foo
%2 = or i16 %a, %1
ret i16 %2
@@ -29,7 +29,7 @@ define i16 @bis(i16 %a) nounwind {
define i16 @bic(i16 %a) nounwind {
; CHECK-LABEL: bic:
-; CHECK: bic.w &foo, r15
+; CHECK: bic.w &foo, r12
%1 = load i16, i16* @foo
%2 = xor i16 %1, -1
%3 = and i16 %a, %2
@@ -38,7 +38,7 @@ define i16 @bic(i16 %a) nounwind {
define i16 @xor(i16 %a) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.w &foo, r15
+; CHECK: xor.w &foo, r12
%1 = load i16, i16* @foo
%2 = xor i16 %a, %1
ret i16 %2
diff --git a/test/CodeGen/MSP430/Inst16rr.ll b/test/CodeGen/MSP430/Inst16rr.ll
index d74bfae9b938..75440ca2b403 100644
--- a/test/CodeGen/MSP430/Inst16rr.ll
+++ b/test/CodeGen/MSP430/Inst16rr.ll
@@ -4,34 +4,34 @@ target triple = "msp430-generic-generic"
define i16 @mov(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: mov:
-; CHECK: mov.w r14, r15
+; CHECK: mov.w r13, r12
ret i16 %b
}
define i16 @add(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.w r14, r15
+; CHECK: add.w r13, r12
%1 = add i16 %a, %b
ret i16 %1
}
define i16 @and(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.w r14, r15
+; CHECK: and.w r13, r12
%1 = and i16 %a, %b
ret i16 %1
}
define i16 @bis(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.w r14, r15
+; CHECK: bis.w r13, r12
%1 = or i16 %a, %b
ret i16 %1
}
define i16 @bic(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: bic:
-; CHECK: bic.w r14, r15
+; CHECK: bic.w r13, r12
%1 = xor i16 %b, -1
%2 = and i16 %a, %1
ret i16 %2
@@ -39,7 +39,7 @@ define i16 @bic(i16 %a, i16 %b) nounwind {
define i16 @xor(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.w r14, r15
+; CHECK: xor.w r13, r12
%1 = xor i16 %a, %b
ret i16 %1
}
diff --git a/test/CodeGen/MSP430/Inst8mr.ll b/test/CodeGen/MSP430/Inst8mr.ll
index f03c7e1a659b..7fbdff257fe7 100644
--- a/test/CodeGen/MSP430/Inst8mr.ll
+++ b/test/CodeGen/MSP430/Inst8mr.ll
@@ -5,14 +5,14 @@ target triple = "msp430-generic-generic"
define void @mov(i8 %a) nounwind {
; CHECK-LABEL: mov:
-; CHECK: mov.b r15, &foo
+; CHECK: mov.b r12, &foo
store i8 %a, i8* @foo
ret void
}
define void @and(i8 %a) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.b r15, &foo
+; CHECK: and.b r12, &foo
%1 = load i8, i8* @foo
%2 = and i8 %a, %1
store i8 %2, i8* @foo
@@ -21,7 +21,7 @@ define void @and(i8 %a) nounwind {
define void @add(i8 %a) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.b r15, &foo
+; CHECK: add.b r12, &foo
%1 = load i8, i8* @foo
%2 = add i8 %a, %1
store i8 %2, i8* @foo
@@ -30,7 +30,7 @@ define void @add(i8 %a) nounwind {
define void @bis(i8 %a) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.b r15, &foo
+; CHECK: bis.b r12, &foo
%1 = load i8, i8* @foo
%2 = or i8 %a, %1
store i8 %2, i8* @foo
@@ -39,7 +39,7 @@ define void @bis(i8 %a) nounwind {
define void @bic(i8 zeroext %m) nounwind {
; CHECK-LABEL: bic:
-; CHECK: bic.b r15, &foo
+; CHECK: bic.b r12, &foo
%1 = xor i8 %m, -1
%2 = load i8, i8* @foo
%3 = and i8 %2, %1
@@ -49,7 +49,7 @@ define void @bic(i8 zeroext %m) nounwind {
define void @xor(i8 %a) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.b r15, &foo
+; CHECK: xor.b r12, &foo
%1 = load i8, i8* @foo
%2 = xor i8 %a, %1
store i8 %2, i8* @foo
diff --git a/test/CodeGen/MSP430/Inst8ri.ll b/test/CodeGen/MSP430/Inst8ri.ll
index ec0dff9c563e..0e50f17f2a55 100644
--- a/test/CodeGen/MSP430/Inst8ri.ll
+++ b/test/CodeGen/MSP430/Inst8ri.ll
@@ -4,34 +4,34 @@ target triple = "msp430-generic-generic"
define i8 @mov() nounwind {
; CHECK-LABEL: mov:
-; CHECK: mov.b #1, r15
+; CHECK: mov.b #1, r12
ret i8 1
}
define i8 @add(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.b #1, r15
+; CHECK: add.b #1, r12
%1 = add i8 %a, 1
ret i8 %1
}
define i8 @and(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.b #1, r15
+; CHECK: and.b #1, r12
%1 = and i8 %a, 1
ret i8 %1
}
define i8 @bis(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.b #1, r15
+; CHECK: bis.b #1, r12
%1 = or i8 %a, 1
ret i8 %1
}
define i8 @xor(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.b #1, r15
+; CHECK: xor.b #1, r12
%1 = xor i8 %a, 1
ret i8 %1
}
diff --git a/test/CodeGen/MSP430/Inst8rm.ll b/test/CodeGen/MSP430/Inst8rm.ll
index e1a970395578..826a3c65ec94 100644
--- a/test/CodeGen/MSP430/Inst8rm.ll
+++ b/test/CodeGen/MSP430/Inst8rm.ll
@@ -5,7 +5,7 @@ target triple = "msp430-generic-generic"
define i8 @add(i8 %a) nounwind {
; CHECK-LABEL: add:
-; CHECK: add.b &foo, r15
+; CHECK: add.b &foo, r12
%1 = load i8, i8* @foo
%2 = add i8 %a, %1
ret i8 %2
@@ -13,7 +13,7 @@ define i8 @add(i8 %a) nounwind {
define i8 @and(i8 %a) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.b &foo, r15
+; CHECK: and.b &foo, r12
%1 = load i8, i8* @foo
%2 = and i8 %a, %1
ret i8 %2
@@ -21,7 +21,7 @@ define i8 @and(i8 %a) nounwind {
define i8 @bis(i8 %a) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.b &foo, r15
+; CHECK: bis.b &foo, r12
%1 = load i8, i8* @foo
%2 = or i8 %a, %1
ret i8 %2
@@ -29,7 +29,7 @@ define i8 @bis(i8 %a) nounwind {
define i8 @bic(i8 %a) nounwind {
; CHECK-LABEL: bic:
-; CHECK: bic.b &foo, r15
+; CHECK: bic.b &foo, r12
%1 = load i8, i8* @foo
%2 = xor i8 %1, -1
%3 = and i8 %a, %2
@@ -38,7 +38,7 @@ define i8 @bic(i8 %a) nounwind {
define i8 @xor(i8 %a) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.b &foo, r15
+; CHECK: xor.b &foo, r12
%1 = load i8, i8* @foo
%2 = xor i8 %a, %1
ret i8 %2
diff --git a/test/CodeGen/MSP430/Inst8rr.ll b/test/CodeGen/MSP430/Inst8rr.ll
index 76e8d1911282..f37bc32a28fe 100644
--- a/test/CodeGen/MSP430/Inst8rr.ll
+++ b/test/CodeGen/MSP430/Inst8rr.ll
@@ -4,7 +4,7 @@ target triple = "msp430-generic-generic"
define i8 @mov(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: mov:
-; CHECK: mov.{{[bw]}} r14, r15
+; CHECK: mov.{{[bw]}} r13, r12
ret i8 %b
}
@@ -17,21 +17,21 @@ define i8 @add(i8 %a, i8 %b) nounwind {
define i8 @and(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: and:
-; CHECK: and.w r14, r15
+; CHECK: and.w r13, r12
%1 = and i8 %a, %b
ret i8 %1
}
define i8 @bis(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: bis:
-; CHECK: bis.w r14, r15
+; CHECK: bis.w r13, r12
%1 = or i8 %a, %b
ret i8 %1
}
define i8 @bic(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: bic:
-; CHECK: bic.b r14, r15
+; CHECK: bic.b r13, r12
%1 = xor i8 %b, -1
%2 = and i8 %a, %1
ret i8 %2
@@ -39,7 +39,7 @@ define i8 @bic(i8 %a, i8 %b) nounwind {
define i8 @xor(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: xor:
-; CHECK: xor.w r14, r15
+; CHECK: xor.w r13, r12
%1 = xor i8 %a, %b
ret i8 %1
}
diff --git a/test/CodeGen/MSP430/bit.ll b/test/CodeGen/MSP430/bit.ll
index 45964f97f1bf..172822fbb5fe 100644
--- a/test/CodeGen/MSP430/bit.ll
+++ b/test/CodeGen/MSP430/bit.ll
@@ -12,7 +12,7 @@ define i8 @bitbrr(i8 %a, i8 %b) nounwind {
ret i8 %t3
}
; CHECK-LABEL: bitbrr:
-; CHECK: bit.b r14, r15
+; CHECK: bit.b r13, r12
define i8 @bitbri(i8 %a) nounwind {
%t1 = and i8 %a, 15
@@ -21,7 +21,7 @@ define i8 @bitbri(i8 %a) nounwind {
ret i8 %t3
}
; CHECK-LABEL: bitbri:
-; CHECK: bit.b #15, r15
+; CHECK: bit.b #15, r12
define i8 @bitbir(i8 %a) nounwind {
%t1 = and i8 15, %a
@@ -30,7 +30,7 @@ define i8 @bitbir(i8 %a) nounwind {
ret i8 %t3
}
; CHECK-LABEL: bitbir:
-; CHECK: bit.b #15, r15
+; CHECK: bit.b #15, r12
define i8 @bitbmi() nounwind {
%t1 = load i8, i8* @foo8
@@ -60,7 +60,7 @@ define i8 @bitbrm(i8 %a) nounwind {
ret i8 %t4
}
; CHECK-LABEL: bitbrm:
-; CHECK: bit.b &foo8, r15
+; CHECK: bit.b &foo8, r12
define i8 @bitbmr(i8 %a) nounwind {
%t1 = load i8, i8* @foo8
@@ -70,7 +70,7 @@ define i8 @bitbmr(i8 %a) nounwind {
ret i8 %t4
}
; CHECK-LABEL: bitbmr:
-; CHECK: bit.b r15, &foo8
+; CHECK: bit.b r12, &foo8
define i8 @bitbmm() nounwind {
%t1 = load i8, i8* @foo8
@@ -93,7 +93,7 @@ define i16 @bitwrr(i16 %a, i16 %b) nounwind {
ret i16 %t3
}
; CHECK-LABEL: bitwrr:
-; CHECK: bit.w r14, r15
+; CHECK: bit.w r13, r12
define i16 @bitwri(i16 %a) nounwind {
%t1 = and i16 %a, 4080
@@ -102,7 +102,7 @@ define i16 @bitwri(i16 %a) nounwind {
ret i16 %t3
}
; CHECK-LABEL: bitwri:
-; CHECK: bit.w #4080, r15
+; CHECK: bit.w #4080, r12
define i16 @bitwir(i16 %a) nounwind {
%t1 = and i16 4080, %a
@@ -111,7 +111,7 @@ define i16 @bitwir(i16 %a) nounwind {
ret i16 %t3
}
; CHECK-LABEL: bitwir:
-; CHECK: bit.w #4080, r15
+; CHECK: bit.w #4080, r12
define i16 @bitwmi() nounwind {
%t1 = load i16, i16* @foo16
@@ -141,7 +141,7 @@ define i16 @bitwrm(i16 %a) nounwind {
ret i16 %t4
}
; CHECK-LABEL: bitwrm:
-; CHECK: bit.w &foo16, r15
+; CHECK: bit.w &foo16, r12
define i16 @bitwmr(i16 %a) nounwind {
%t1 = load i16, i16* @foo16
@@ -151,7 +151,7 @@ define i16 @bitwmr(i16 %a) nounwind {
ret i16 %t4
}
; CHECK-LABEL: bitwmr:
-; CHECK: bit.w r15, &foo16
+; CHECK: bit.w r12, &foo16
define i16 @bitwmm() nounwind {
%t1 = load i16, i16* @foo16
diff --git a/test/CodeGen/MSP430/byval.ll b/test/CodeGen/MSP430/byval.ll
index 410a6b047b6e..401896b43c20 100644
--- a/test/CodeGen/MSP430/byval.ll
+++ b/test/CodeGen/MSP430/byval.ll
@@ -9,7 +9,7 @@ target triple = "msp430---elf"
define i16 @callee(%struct.Foo* byval %f) nounwind {
entry:
; CHECK-LABEL: callee:
-; CHECK: mov.w 2(r1), r15
+; CHECK: mov.w 2(r1), r12
%0 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i32 0, i32 0
%1 = load i16, i16* %0, align 2
ret i16 %1
diff --git a/test/CodeGen/MSP430/cc_args.ll b/test/CodeGen/MSP430/cc_args.ll
index 39e99e263744..70ac901f7e4e 100644
--- a/test/CodeGen/MSP430/cc_args.ll
+++ b/test/CodeGen/MSP430/cc_args.ll
@@ -7,12 +7,12 @@ define void @test() #0 {
entry:
; CHECK: test:
-; CHECK: mov.w #1, r15
+; CHECK: mov.w #1, r12
; CHECK: call #f_i16
call void @f_i16(i16 1)
-; CHECK: mov.w #772, r14
-; CHECK: mov.w #258, r15
+; CHECK: mov.w #772, r12
+; CHECK: mov.w #258, r13
; CHECK: call #f_i32
call void @f_i32(i32 16909060)
@@ -23,26 +23,34 @@ entry:
; CHECK: call #f_i64
call void @f_i64(i64 72623859790382856)
-; CHECK: mov.w #772, r14
-; CHECK: mov.w #258, r15
-; CHECK: mov.w #1800, r12
-; CHECK: mov.w #1286, r13
+; CHECK: mov.w #772, r12
+; CHECK: mov.w #258, r13
+; CHECK: mov.w #1800, r14
+; CHECK: mov.w #1286, r15
; CHECK: call #f_i32_i32
call void @f_i32_i32(i32 16909060, i32 84281096)
-; CHECK: mov.w #1, r15
+; CHECK: mov.w #1, r12
; CHECK: mov.w #772, r13
; CHECK: mov.w #258, r14
-; CHECK: mov.w #2, r12
+; CHECK: mov.w #2, r15
; CHECK: call #f_i16_i32_i16
call void @f_i16_i32_i16(i16 1, i32 16909060, i16 2)
-; CHECK: mov.w #2, 8(r1)
+; CHECK: mov.w #1286, 0(r1)
+; CHECK: mov.w #1, r12
+; CHECK: mov.w #772, r13
+; CHECK: mov.w #258, r14
+; CHECK: mov.w #1800, r15
+; CHECK: call #f_i16_i32_i32
+ call void @f_i16_i32_i32(i16 1, i32 16909060, i32 84281096)
+
; CHECK: mov.w #258, 6(r1)
; CHECK: mov.w #772, 4(r1)
; CHECK: mov.w #1286, 2(r1)
; CHECK: mov.w #1800, 0(r1)
-; CHECK: mov.w #1, r15
+; CHECK: mov.w #1, r12
+; CHECK: mov.w #2, r13
; CHECK: call #f_i16_i64_i16
call void @f_i16_i64_i16(i16 1, i64 72623859790382856, i16 2)
@@ -55,15 +63,15 @@ entry:
define void @f_i16(i16 %a) #0 {
; CHECK: f_i16:
-; CHECK: mov.w r15, &g_i16
+; CHECK: mov.w r12, &g_i16
store volatile i16 %a, i16* @g_i16, align 2
ret void
}
define void @f_i32(i32 %a) #0 {
; CHECK: f_i32:
-; CHECK: mov.w r15, &g_i32+2
-; CHECK: mov.w r14, &g_i32
+; CHECK: mov.w r13, &g_i32+2
+; CHECK: mov.w r12, &g_i32
store volatile i32 %a, i32* @g_i32, align 2
ret void
}
@@ -80,37 +88,50 @@ define void @f_i64(i64 %a) #0 {
define void @f_i32_i32(i32 %a, i32 %b) #0 {
; CHECK: f_i32_i32:
-; CHECK: mov.w r15, &g_i32+2
-; CHECK: mov.w r14, &g_i32
- store volatile i32 %a, i32* @g_i32, align 2
; CHECK: mov.w r13, &g_i32+2
; CHECK: mov.w r12, &g_i32
+ store volatile i32 %a, i32* @g_i32, align 2
+; CHECK: mov.w r15, &g_i32+2
+; CHECK: mov.w r14, &g_i32
store volatile i32 %b, i32* @g_i32, align 2
ret void
}
+define void @f_i16_i32_i32(i16 %a, i32 %b, i32 %c) #0 {
+; CHECK: f_i16_i32_i32:
+; CHECK: mov.w r12, &g_i16
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: mov.w r14, &g_i32+2
+; CHECK: mov.w r13, &g_i32
+ store volatile i32 %b, i32* @g_i32, align 2
+; CHECK: mov.w r15, &g_i32
+; CHECK: mov.w 4(r4), &g_i32+2
+ store volatile i32 %c, i32* @g_i32, align 2
+ ret void
+}
+
define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
; CHECK: f_i16_i32_i16:
-; CHECK: mov.w r15, &g_i16
+; CHECK: mov.w r12, &g_i16
store volatile i16 %a, i16* @g_i16, align 2
; CHECK: mov.w r14, &g_i32+2
; CHECK: mov.w r13, &g_i32
store volatile i32 %b, i32* @g_i32, align 2
-; CHECK: mov.w r12, &g_i16
+; CHECK: mov.w r15, &g_i16
store volatile i16 %c, i16* @g_i16, align 2
ret void
}
define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
; CHECK: f_i16_i64_i16:
-; CHECK: mov.w r15, &g_i16
+; CHECK: mov.w r12, &g_i16
store volatile i16 %a, i16* @g_i16, align 2
;CHECK: mov.w 10(r4), &g_i64+6
;CHECK: mov.w 8(r4), &g_i64+4
;CHECK: mov.w 6(r4), &g_i64+2
;CHECK: mov.w 4(r4), &g_i64
store volatile i64 %b, i64* @g_i64, align 2
-;CHECK: mov.w 12(r4), &g_i16
+;CHECK: mov.w r13, &g_i16
store volatile i16 %c, i16* @g_i16, align 2
ret void
}
diff --git a/test/CodeGen/MSP430/cc_ret.ll b/test/CodeGen/MSP430/cc_ret.ll
index c2a9ae664509..937db6dbf3bf 100644
--- a/test/CodeGen/MSP430/cc_ret.ll
+++ b/test/CodeGen/MSP430/cc_ret.ll
@@ -8,13 +8,13 @@ entry:
; CHECK: test:
; CHECK: call #f_i16
-; CHECK: mov.w r15, &g_i16
+; CHECK: mov.w r12, &g_i16
%0 = call i16 @f_i16()
store volatile i16 %0, i16* @g_i16
; CHECK: call #f_i32
-; CHECK: mov.w r15, &g_i32+2
-; CHECK: mov.w r14, &g_i32
+; CHECK: mov.w r13, &g_i32+2
+; CHECK: mov.w r12, &g_i32
%1 = call i32 @f_i32()
store volatile i32 %1, i32* @g_i32
@@ -35,15 +35,15 @@ entry:
define i16 @f_i16() #0 {
; CHECK: f_i16:
-; CHECK: mov.w #1, r15
+; CHECK: mov.w #1, r12
; CHECK: ret
ret i16 1
}
define i32 @f_i32() #0 {
; CHECK: f_i32:
-; CHECK: mov.w #772, r14
-; CHECK: mov.w #258, r15
+; CHECK: mov.w #772, r12
+; CHECK: mov.w #258, r13
; CHECK: ret
ret i32 16909060
}
diff --git a/test/CodeGen/MSP430/jumptable.ll b/test/CodeGen/MSP430/jumptable.ll
index 4ba930b04e39..5ccdbb701db1 100644
--- a/test/CodeGen/MSP430/jumptable.ll
+++ b/test/CodeGen/MSP430/jumptable.ll
@@ -11,9 +11,9 @@ entry:
%i.addr = alloca i16, align 2
store i16 %i, i16* %i.addr, align 2
%0 = load i16, i16* %i.addr, align 2
-; CHECK: mov.w #2, r14
+; CHECK: mov.w #2, r13
; CHECK: call #__mulhi3hw_noint
-; CHECK: br .LJTI0_0(r15)
+; CHECK: br .LJTI0_0(r12)
switch i16 %0, label %sw.default [
i16 0, label %sw.bb
i16 1, label %sw.bb1
diff --git a/test/CodeGen/MSP430/memset.ll b/test/CodeGen/MSP430/memset.ll
index 76cfb29586d7..a24bfafc2005 100644
--- a/test/CodeGen/MSP430/memset.ll
+++ b/test/CodeGen/MSP430/memset.ll
@@ -9,9 +9,9 @@ define void @test() nounwind {
entry:
; CHECK-LABEL: test:
%0 = load i8*, i8** @buf, align 2
-; CHECK: mov.w &buf, r15
-; CHECK-NEXT: mov.w #5, r14
-; CHECK-NEXT: mov.w #128, r13
+; CHECK: mov.w &buf, r12
+; CHECK-NEXT: mov.w #5, r13
+; CHECK-NEXT: mov.w #128, r14
; CHECK-NEXT: call #memset
call void @llvm.memset.p0i8.i16(i8* %0, i8 5, i16 128, i32 1, i1 false)
ret void
diff --git a/test/CodeGen/MSP430/setcc.ll b/test/CodeGen/MSP430/setcc.ll
index d5a8057ddd6c..6e2ec8ea3ea1 100644
--- a/test/CodeGen/MSP430/setcc.ll
+++ b/test/CodeGen/MSP430/setcc.ll
@@ -9,10 +9,10 @@ define i16 @sccweqand(i16 %a, i16 %b) nounwind {
ret i16 %t3
}
; CHECK-LABEL: sccweqand:
-; CHECK: bit.w r14, r15
-; CHECK: mov.w r2, r15
-; CHECK: rra.w r15
-; CHECK: and.w #1, r15
+; CHECK: bit.w r13, r12
+; CHECK: mov.w r2, r12
+; CHECK: rra.w r12
+; CHECK: and.w #1, r12
define i16 @sccwneand(i16 %a, i16 %b) nounwind {
%t1 = and i16 %a, %b
@@ -21,9 +21,9 @@ define i16 @sccwneand(i16 %a, i16 %b) nounwind {
ret i16 %t3
}
; CHECK-LABEL: sccwneand:
-; CHECK: bit.w r14, r15
-; CHECK: mov.w r2, r15
-; CHECK: and.w #1, r15
+; CHECK: bit.w r13, r12
+; CHECK: mov.w r2, r12
+; CHECK: and.w #1, r12
define i16 @sccwne(i16 %a, i16 %b) nounwind {
%t1 = icmp ne i16 %a, %b
@@ -31,11 +31,11 @@ define i16 @sccwne(i16 %a, i16 %b) nounwind {
ret i16 %t2
}
; CHECK-LABEL:sccwne:
-; CHECK: cmp.w r14, r15
-; CHECK: mov.w r2, r12
-; CHECK: rra.w r12
-; CHECK: mov.w #1, r15
-; CHECK: bic.w r12, r15
+; CHECK: cmp.w r13, r12
+; CHECK: mov.w r2, r13
+; CHECK: rra.w r13
+; CHECK: mov.w #1, r12
+; CHECK: bic.w r13, r12
define i16 @sccweq(i16 %a, i16 %b) nounwind {
%t1 = icmp eq i16 %a, %b
@@ -43,10 +43,10 @@ define i16 @sccweq(i16 %a, i16 %b) nounwind {
ret i16 %t2
}
; CHECK-LABEL:sccweq:
-; CHECK: cmp.w r14, r15
-; CHECK: mov.w r2, r15
-; CHECK: rra.w r15
-; CHECK: and.w #1, r15
+; CHECK: cmp.w r13, r12
+; CHECK: mov.w r2, r12
+; CHECK: rra.w r12
+; CHECK: and.w #1, r12
define i16 @sccwugt(i16 %a, i16 %b) nounwind {
%t1 = icmp ugt i16 %a, %b
@@ -54,9 +54,9 @@ define i16 @sccwugt(i16 %a, i16 %b) nounwind {
ret i16 %t2
}
; CHECK-LABEL:sccwugt:
-; CHECK: cmp.w r15, r14
-; CHECK: mov.w #1, r15
-; CHECK: bic.w r2, r15
+; CHECK: cmp.w r12, r13
+; CHECK: mov.w #1, r12
+; CHECK: bic.w r2, r12
define i16 @sccwuge(i16 %a, i16 %b) nounwind {
%t1 = icmp uge i16 %a, %b
@@ -64,9 +64,9 @@ define i16 @sccwuge(i16 %a, i16 %b) nounwind {
ret i16 %t2
}
; CHECK-LABEL:sccwuge:
-; CHECK: cmp.w r14, r15
-; CHECK: mov.w r2, r15
-; CHECK: and.w #1, r15
+; CHECK: cmp.w r13, r12
+; CHECK: mov.w r2, r12
+; CHECK: and.w #1, r12
define i16 @sccwult(i16 %a, i16 %b) nounwind {
%t1 = icmp ult i16 %a, %b
@@ -74,9 +74,9 @@ define i16 @sccwult(i16 %a, i16 %b) nounwind {
ret i16 %t2
}
; CHECK-LABEL:sccwult:
-; CHECK: cmp.w r14, r15
-; CHECK: mov.w #1, r15
-; CHECK: bic.w r2, r15
+; CHECK: cmp.w r13, r12
+; CHECK: mov.w #1, r12
+; CHECK: bic.w r2, r12
define i16 @sccwule(i16 %a, i16 %b) nounwind {
%t1 = icmp ule i16 %a, %b
@@ -84,9 +84,9 @@ define i16 @sccwule(i16 %a, i16 %b) nounwind {
ret i16 %t2
}
; CHECK-LABEL:sccwule:
-; CHECK: cmp.w r15, r14
-; CHECK: mov.w r2, r15
-; CHECK: and.w #1, r15
+; CHECK: cmp.w r12, r13
+; CHECK: mov.w r2, r12
+; CHECK: and.w #1, r12
define i16 @sccwsgt(i16 %a, i16 %b) nounwind {
%t1 = icmp sgt i16 %a, %b
diff --git a/test/CodeGen/MSP430/struct-return.ll b/test/CodeGen/MSP430/struct-return.ll
new file mode 100644
index 000000000000..c28bf06af439
--- /dev/null
+++ b/test/CodeGen/MSP430/struct-return.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
+target triple = "msp430---elf"
+
+; Allow simple structures to be returned by value.
+
+%s = type { i64, i64 }
+
+define %s @fred() #0 {
+; CHECK-LABEL: fred:
+; CHECK: mov.w #2314, 14(r12)
+; CHECK: mov.w #2828, 12(r12)
+; CHECK: mov.w #3342, 10(r12)
+; CHECK: mov.w #3840, 8(r12)
+; CHECK: mov.w #258, 6(r12)
+; CHECK: mov.w #772, 4(r12)
+; CHECK: mov.w #1286, 2(r12)
+; CHECK: mov.w #1800, 0(r12)
+ ret %s {i64 72623859790382856, i64 651345242494996224}
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/MSP430/vararg.ll b/test/CodeGen/MSP430/vararg.ll
index 9e511fce956f..6c8bceff5de9 100644
--- a/test/CodeGen/MSP430/vararg.ll
+++ b/test/CodeGen/MSP430/vararg.ll
@@ -25,13 +25,13 @@ define i16 @va_arg(i8* %vl) nounwind {
entry:
; CHECK-LABEL: va_arg:
%vl.addr = alloca i8*, align 2
-; CHECK: mov.w r15, 0(r1)
+; CHECK: mov.w r12, 0(r1)
store i8* %vl, i8** %vl.addr, align 2
-; CHECK: mov.w r15, [[REG:r[0-9]+]]
+; CHECK: mov.w r12, [[REG:r[0-9]+]]
; CHECK-NEXT: add.w #2, [[REG]]
; CHECK-NEXT: mov.w [[REG]], 0(r1)
%0 = va_arg i8** %vl.addr, i16
-; CHECK-NEXT: mov.w 0(r15), r15
+; CHECK-NEXT: mov.w 0(r12), r12
ret i16 %0
}
@@ -40,11 +40,11 @@ entry:
; CHECK-LABEL: va_copy:
%vl.addr = alloca i8*, align 2
%vl2 = alloca i8*, align 2
-; CHECK: mov.w r15, 2(r1)
+; CHECK: mov.w r12, 2(r1)
store i8* %vl, i8** %vl.addr, align 2
%0 = bitcast i8** %vl2 to i8*
%1 = bitcast i8** %vl.addr to i8*
-; CHECK-NEXT: mov.w r15, 0(r1)
+; CHECK-NEXT: mov.w r12, 0(r1)
call void @llvm.va_copy(i8* %0, i8* %1)
ret void
}
diff --git a/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll b/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
index c0229c626a0e..d3cc03ffc8e3 100644
--- a/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
+++ b/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
@@ -17,7 +17,7 @@ entry:
; STATIC-N32: lwc1 $f0, %lo(.LCPI0_0)($[[R0]])
; PIC-N64: ld $[[R0:[0-9]+]], %got_page(.LCPI0_0)
; PIC-N64: lwc1 $f0, %got_ofst(.LCPI0_0)($[[R0]])
-; STATIC-N64: ld $[[R0:[0-9]+]], %got_page(.LCPI0_0)
-; STATIC-N64: lwc1 $f0, %got_ofst(.LCPI0_0)($[[R0]])
+; STATIC-N64: lui $[[R0:[0-9]+]], %highest(.LCPI0_0)
+; STATIC-N64: lwc1 $f0, %lo(.LCPI0_0)($[[R0]])
ret float 0x400B333340000000
}
diff --git a/test/CodeGen/Mips/2010-07-20-Switch.ll b/test/CodeGen/Mips/2010-07-20-Switch.ll
index 5f0a0a5a4929..087a34f3c1bc 100644
--- a/test/CodeGen/Mips/2010-07-20-Switch.ll
+++ b/test/CodeGen/Mips/2010-07-20-Switch.ll
@@ -3,13 +3,13 @@
; RUN: llc < %s -march=mips -relocation-model=pic | \
; RUN: FileCheck %s -check-prefix=PIC-O32
; RUN: llc < %s -march=mips64 -relocation-model=pic -mcpu=mips4 | \
-; RUN: FileCheck %s -check-prefix=N64
+; RUN: FileCheck %s -check-prefix=PIC-N64
; RUN: llc < %s -march=mips64 -relocation-model=static -mcpu=mips4 | \
-; RUN: FileCheck %s -check-prefix=N64
+; RUN: FileCheck %s -check-prefix=STATIC-N64
; RUN: llc < %s -march=mips64 -relocation-model=pic -mcpu=mips64 | \
-; RUN: FileCheck %s -check-prefix=N64
+; RUN: FileCheck %s -check-prefix=PIC-N64
; RUN: llc < %s -march=mips64 -relocation-model=static -mcpu=mips64 | \
-; RUN: FileCheck %s -check-prefix=N64
+; RUN: FileCheck %s -check-prefix=STATIC-N64
define i32 @main() nounwind readnone {
entry:
@@ -20,18 +20,29 @@ entry:
; STATIC-O32: lui $[[R1:[0-9]+]], %hi($JTI0_0)
; STATIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]]
; STATIC-O32: lw $[[R3:[0-9]+]], %lo($JTI0_0)($[[R2]])
+
; PIC-O32: sll $[[R0:[0-9]+]], ${{[0-9]+}}, 2
; PIC-O32: lw $[[R1:[0-9]+]], %got($JTI0_0)
; PIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]]
; PIC-O32: lw $[[R4:[0-9]+]], %lo($JTI0_0)($[[R2]])
; PIC-O32: addu $[[R5:[0-9]+]], $[[R4:[0-9]+]]
; PIC-O32: jr $[[R5]]
-; N64: dsll $[[R0:[0-9]+]], ${{[0-9]+}}, 3
-; N64: ld $[[R1:[0-9]+]], %got_page(.LJTI0_0)
-; N64: daddu $[[R2:[0-9]+]], $[[R0:[0-9]+]], $[[R1]]
-; N64: ld $[[R4:[0-9]+]], %got_ofst(.LJTI0_0)($[[R2]])
-; N64: daddu $[[R5:[0-9]+]], $[[R4:[0-9]+]]
-; N64: jr $[[R5]]
+
+; STATIC-N64: mflo $[[R0:[0-9]]]
+; STATIC-N64: lui $[[R1:[0-9]]], %highest(.LJTI0_0)
+; STATIC-N64: daddiu $[[R2:[0-9]]], $[[R1]], %higher(.LJTI0_0)
+; STATIC-N64: dsll $[[R3:[0-9]]], $[[R2]], 16
+; STATIC-N64: daddiu $[[R4:[0-9]]], $[[R3]], %hi(.LJTI0_0)
+; STATIC-N64: dsll $[[R5:[0-9]]], $[[R4]], 16
+; STATIC-N64: daddu $[[R6:[0-9]]], $[[R0]], $[[R4]]
+; STATIC-N64: ld ${{[0-9]+}}, %lo(.LJTI0_0)($[[R6]])
+
+; PIC-N64: dsll $[[R0:[0-9]+]], ${{[0-9]+}}, 32
+; PIC-N64: ld $[[R1:[0-9]+]], %got_page(.LJTI0_0)
+; PIC-N64: daddu $[[R2:[0-9]+]], $[[R0:[0-9]+]], $[[R1]]
+; PIC-N64: ld $[[R4:[0-9]+]], %got_ofst(.LJTI0_0)($[[R2]])
+; PIC-N64: daddu $[[R5:[0-9]+]], $[[R4:[0-9]+]]
+; PIC-N64: jr $[[R5]]
switch i32 %0, label %bb4 [
i32 0, label %bb5
i32 1, label %bb1
@@ -65,12 +76,18 @@ bb5: ; preds = %entry
; PIC-O32: $JTI0_0:
; PIC-O32: .gpword
; PIC-O32: .gpword
-; PIC-O32: .gpword
-; PIC-O32: .gpword
-; N64: .p2align 3
-; N64: .LJTI0_0:
-; N64: .gpdword
-; N64: .gpdword
-; N64: .gpdword
-; N64: .gpdword
+; PIC-O32: .gpword
+; PIC-O32: .gpword
+; STATIC-N64: .p2align 3
+; STATIC-N64: LJTI0_0:
+; STATIC-N64: .8byte
+; STATIC-N64: .8byte
+; STATIC-N64: .8byte
+; STATIC-N64: .8byte
+;; PIC-N64: .p2align 3
+; PIC-N64: .LJTI0_0:
+; PIC-N64: .gpdword
+; PIC-N64: .gpdword
+; PIC-N64: .gpdword
+; PIC-N64: .gpdword
diff --git a/test/CodeGen/Mips/Fast-ISel/check-disabled-mcpus.ll b/test/CodeGen/Mips/Fast-ISel/check-disabled-mcpus.ll
index 5594de8177d4..b8973efda179 100644
--- a/test/CodeGen/Mips/Fast-ISel/check-disabled-mcpus.ll
+++ b/test/CodeGen/Mips/Fast-ISel/check-disabled-mcpus.ll
@@ -1,40 +1,40 @@
; Targets where we should not enable FastISel.
; RUN: llc -march=mips -mcpu=mips2 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips3 -O0 -relocation-model=pic -target-abi n64 \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips4 -O0 -relocation-model=pic -target-abi n64 \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips32r6 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mattr=mips16 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips32r2 -mattr=+micromips -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips32r3 -mattr=+micromips -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips32r5 -mattr=+micromips -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips64 -O0 -relocation-model=pic -target-abi n64 \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips64r2 -O0 -relocation-model=pic -target-abi n64 \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips64r3 -O0 -relocation-model=pic -target-abi n64 \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips64r5 -O0 -relocation-model=pic -target-abi n64 \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; RUN: llc -march=mips -mcpu=mips32r6 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s
; Valid targets for FastISel.
; RUN: llc -march=mips -mcpu=mips32r0 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s -check-prefix=FISEL
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s -check-prefix=FISEL
; RUN: llc -march=mips -mcpu=mips32r2 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose <%s 2>&1 | FileCheck %s -check-prefix=FISEL
+; RUN: -pass-remarks-missed=isel <%s 2>&1 | FileCheck %s -check-prefix=FISEL
; The CHECK prefix is being used by those targets that do not support FastISel.
; By checking that we don't emit the "FastISel missed terminator..." message,
diff --git a/test/CodeGen/Mips/Fast-ISel/fastcc-miss.ll b/test/CodeGen/Mips/Fast-ISel/fastcc-miss.ll
index d9ce8b3964a4..0aec8d506f77 100644
--- a/test/CodeGen/Mips/Fast-ISel/fastcc-miss.ll
+++ b/test/CodeGen/Mips/Fast-ISel/fastcc-miss.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -O0 -relocation-model=pic \
-; RUN: -fast-isel-verbose 2>&1 | FileCheck %s
+; RUN: -pass-remarks-missed=isel 2>&1 | FileCheck %s
; CHECK: FastISel missed call:
; CHECK-SAME: %call = call fastcc i32 @foo(i32 signext %a, i32 signext %b)
diff --git a/test/CodeGen/Mips/abicalls.ll b/test/CodeGen/Mips/abicalls.ll
index 26bbab40b3b3..2de539776879 100644
--- a/test/CodeGen/Mips/abicalls.ll
+++ b/test/CodeGen/Mips/abicalls.ll
@@ -1,7 +1,12 @@
; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=static %s -o - | FileCheck -check-prefixes=ABICALLS,STATIC %s
; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=pic %s -o - | FileCheck -check-prefixes=ABICALLS,PIC %s
-; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips4 -relocation-model=static %s -o - | FileCheck -check-prefixes=ABICALLS,PIC %s
-; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips64 -relocation-model=static %s -o - | FileCheck -check-prefixes=ABICALLS,PIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips4 -relocation-model=static %s -o - | FileCheck -check-prefixes=N64-STATIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips64 -relocation-model=static %s -o - | FileCheck -check-prefixes=N64-STATIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips4 -relocation-model=static -mattr=+sym32 %s -o - | FileCheck -check-prefixes=ABICALLS,STATIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips64 -relocation-model=static -mattr=+sym32 %s -o - | FileCheck -check-prefixes=ABICALLS,STATIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips4 -relocation-model=pic %s -o - | FileCheck -check-prefixes=ABICALLS %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips64 -relocation-model=pic %s -o - | FileCheck -check-prefixes=ABICALLS %s
+
; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -mattr noabicalls -relocation-model=static %s -o - | FileCheck -implicit-check-not='.abicalls' -implicit-check-not='pic0' %s
@@ -9,3 +14,6 @@
; STATIC: pic0
; PIC-NOT: pic0
+
+; N64-STATIC-NOT: .abicalls
+; N64-STATIC-NOT: .pic0
diff --git a/test/CodeGen/Mips/blockaddr.ll b/test/CodeGen/Mips/blockaddr.ll
index 9bc9a305a204..80b24bcdeb16 100644
--- a/test/CodeGen/Mips/blockaddr.ll
+++ b/test/CodeGen/Mips/blockaddr.ll
@@ -34,10 +34,14 @@ entry:
; PIC-N64: daddiu ${{[0-9]+}}, $[[R0]], %got_ofst(.Ltmp[[T0]])
; PIC-N64: ld $[[R1:[0-9]+]], %got_page(.Ltmp[[T1:[0-9]+]])
; PIC-N64: daddiu ${{[0-9]+}}, $[[R1]], %got_ofst(.Ltmp[[T1]])
-; STATIC-N64: ld $[[R2:[0-9]+]], %got_page(.Ltmp[[T2:[0-9]+]])
-; STATIC-N64: daddiu ${{[0-9]+}}, $[[R2]], %got_ofst(.Ltmp[[T2]])
-; STATIC-N64: ld $[[R3:[0-9]+]], %got_page(.Ltmp[[T3:[0-9]+]])
-; STATIC-N64: daddiu ${{[0-9]+}}, $[[R3]], %got_ofst(.Ltmp[[T3]])
+
+; STATIC-N64: lui $[[R0:[0-9]]], %highest(.Ltmp[[L0:[0-9]]])
+; STATIC-N64: daddiu $[[R1:[0-9]]], $[[R0]], %higher(.Ltmp[[L0]])
+; STATIC-N64: dsll $[[R2:[0-9]]], $[[R1]], 16
+; STATIC-N64: daddiu $[[R3:[0-9]]], $[[R2]], %hi(.Ltmp[[L0]])
+; STATIC-N64: dsll $[[R4:[0-9]]], $[[R3]], 16
+; STATIC-N64: daddiu $[[R5:[0-9]]], $[[R4]], %lo(.Ltmp[[L0]])
+
; STATIC-MIPS16-1: .ent f
; STATIC-MIPS16-2: .ent f
; STATIC-MIPS16-1: li $[[R1_16:[0-9]+]], %hi($tmp[[TI_16:[0-9]+]])
diff --git a/test/CodeGen/Mips/brconnez.ll b/test/CodeGen/Mips/brconnez.ll
index 27cf9e8cacb8..eafddccdd4c7 100644
--- a/test/CodeGen/Mips/brconnez.ll
+++ b/test/CodeGen/Mips/brconnez.ll
@@ -7,7 +7,7 @@ define void @test() nounwind {
entry:
%0 = load i32, i32* @j, align 4
%cmp = icmp eq i32 %0, 0
- br i1 %cmp, label %if.then, label %if.end
+ br i1 %cmp, label %if.then, label %if.end, !prof !1
; 16: bnez ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]]
; 16: lw ${{[0-9]+}}, %got(result)(${{[0-9]+}})
@@ -21,4 +21,4 @@ if.end: ; preds = %if.then, %entry
ret void
}
-
+!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/Mips/cconv/arguments-float.ll b/test/CodeGen/Mips/cconv/arguments-float.ll
index a76cf6226dc0..004f6d94749d 100644
--- a/test/CodeGen/Mips/cconv/arguments-float.ll
+++ b/test/CodeGen/Mips/cconv/arguments-float.ll
@@ -49,7 +49,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(doubles)
; The first four arguments are the same in O32/N32/N64.
; The first argument is floating point but soft-float is enabled so floating
@@ -63,39 +63,39 @@ entry:
; NEW-DAG: sd $5, 16([[R2]])
; O32 has run out of argument registers and starts using the stack
-; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp)
-; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp)
+; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 16($sp)
+; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 20($sp)
; O32-DAG: sw [[R3]], 24([[R2]])
; O32-DAG: sw [[R4]], 28([[R2]])
; NEW-DAG: sd $6, 24([[R2]])
-; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp)
-; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp)
+; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp)
+; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp)
; O32-DAG: sw [[R3]], 32([[R2]])
; O32-DAG: sw [[R4]], 36([[R2]])
; NEW-DAG: sd $7, 32([[R2]])
-; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp)
-; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp)
+; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp)
+; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp)
; O32-DAG: sw [[R3]], 40([[R2]])
; O32-DAG: sw [[R4]], 44([[R2]])
; NEW-DAG: sd $8, 40([[R2]])
-; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp)
-; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp)
+; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp)
+; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp)
; O32-DAG: sw [[R3]], 48([[R2]])
; O32-DAG: sw [[R4]], 52([[R2]])
; NEW-DAG: sd $9, 48([[R2]])
-; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 56($sp)
-; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 60($sp)
+; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp)
+; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp)
; O32-DAG: sw [[R3]], 56([[R2]])
; O32-DAG: sw [[R4]], 60([[R2]])
; NEW-DAG: sd $10, 56([[R2]])
; N32/N64 have run out of registers and starts using the stack too
-; O32-DAG: lw [[R3:\$[0-9]+]], 64($sp)
-; O32-DAG: lw [[R4:\$[0-9]+]], 68($sp)
+; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp)
+; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp)
; O32-DAG: sw [[R3]], 64([[R2]])
; O32-DAG: sw [[R4]], 68([[R2]])
; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp)
@@ -132,7 +132,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(floats)
; The first four arguments are the same in O32/N32/N64.
; The first argument is floating point but soft-float is enabled so floating
@@ -180,10 +180,10 @@ entry:
; ALL-LABEL: double_arg2:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
-; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
-; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
-; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: daddiu [[R1:\$[0-9]]], ${{[0-9]+}}, %lo(bytes)
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(doubles)
; The first four arguments are the same in O32/N32/N64.
; The first argument isn't floating point so floating point registers are not
@@ -207,10 +207,11 @@ entry:
; ALL-LABEL: float_arg2:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
-; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
-; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
-; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: daddiu [[R1:\$[0-9]]], ${{[0-9]+}}, %lo(bytes)
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(floats)
+
; The first four arguments are the same in O32/N32/N64.
; The first argument isn't floating point so floating point registers are not
diff --git a/test/CodeGen/Mips/cconv/arguments-fp128.ll b/test/CodeGen/Mips/cconv/arguments-fp128.ll
index 70df97608aa9..086ba9bce27a 100644
--- a/test/CodeGen/Mips/cconv/arguments-fp128.ll
+++ b/test/CodeGen/Mips/cconv/arguments-fp128.ll
@@ -30,7 +30,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(ldoubles)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(ldoubles)
; The first four arguments are the same in N32/N64.
; The first argument is floating point but soft-float is enabled so floating
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
index 5f7a86534bdf..c59ec02fa2d4 100644
--- a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
+++ b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
@@ -42,7 +42,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
; O32 forbids using floating point registers for the non-variable portion.
; N32/N64 allow it.
@@ -107,7 +107,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
; The first four arguments are the same in O32/N32/N64.
; The non-variable portion should be unaffected.
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float.ll b/test/CodeGen/Mips/cconv/arguments-hard-float.ll
index 2e753d0f07cb..24bb95c7c68f 100644
--- a/test/CodeGen/Mips/cconv/arguments-hard-float.ll
+++ b/test/CodeGen/Mips/cconv/arguments-hard-float.ll
@@ -49,7 +49,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(doubles)
; The first argument is floating point so floating point registers are used.
; The first argument is the same for O32/N32/N64 but the second argument differs
@@ -111,8 +111,8 @@ entry:
; ALL-LABEL: float_args:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
-; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
-; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(floats)(
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: daddiu [[R1:\$[0-9]]], ${{[0-9]+}}, %lo(floats)
; The first argument is floating point so floating point registers are used.
; The first argument is the same for O32/N32/N64 but the second argument differs
@@ -164,9 +164,9 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
-; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM64-DAG: daddiu [[R1:\$[0-9]]], ${{[0-9]+}}, %lo(bytes)
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(doubles)
; The first argument is the same in O32/N32/N64.
; ALL-DAG: sb $4, 1([[R1]])
@@ -195,9 +195,9 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
-; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM64-DAG: daddiu [[R1:\$[0-9]]], ${{[0-9]+}}, %lo(bytes)
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(floats)
; The first argument is the same in O32/N32/N64.
; ALL-DAG: sb $4, 1([[R1]])
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
index 1a3b664d9159..6c601e96ed8d 100644
--- a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
+++ b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
@@ -30,7 +30,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles)
-; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(ldoubles)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]]], ${{[0-9]+}}, %lo(ldoubles)
; The first four arguments are the same in N32/N64.
; ALL-DAG: sdc1 $f12, 16([[R2]])
diff --git a/test/CodeGen/Mips/cconv/arguments-struct.ll b/test/CodeGen/Mips/cconv/arguments-struct.ll
index 44ea7c0f8337..6288b5d52fd9 100644
--- a/test/CodeGen/Mips/cconv/arguments-struct.ll
+++ b/test/CodeGen/Mips/cconv/arguments-struct.ll
@@ -28,7 +28,7 @@ entry:
; SYM32-DAG: lui [[PTR_HI:\$[0-9]+]], %hi(bytes)
; SYM32-DAG: addiu [[PTR:\$[0-9]+]], [[PTR_HI]], %lo(bytes)
-; SYM64-DAG: ld [[PTR:\$[0-9]+]], %got_disp(bytes)(
+; SYM64-DAG: addiu [[PTR:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
; O32-BE-DAG: srl [[ARG:\$[0-9]+]], $4, 24
; O32-BE-DAG: sb [[ARG]], 1([[PTR]])
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
index ba3aeb598f50..b41b5b7597cb 100644
--- a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
+++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
@@ -259,7 +259,9 @@ entry:
call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
ret void
; CHECK-LABEL: smallStruct_8b:
+ ; Check that the structure is not shifted before the pointer to str is loaded.
; CHECK-NOT: dsll
+ ; CHECK: lui
}
define void @smallStruct_9b(%struct.SmallStruct_9b* %ss) #0 {
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
index 74d3d859ed75..8a20f5e43f1c 100644
--- a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
+++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
@@ -96,6 +96,7 @@ entry:
ret void
; CHECK-LABEL: smallStruct_1b1i:
; CHECK-NOT: dsll
+ ; CHECK: lui
}
define void @smallStruct_1b1s1b(%struct.SmallStruct_1b1s1b* %ss) #0 {
@@ -129,6 +130,7 @@ entry:
ret void
; CHECK-LABEL: smallStruct_1s1i:
; CHECK-NOT: dsll
+ ; CHECK: lui
}
define void @smallStruct_3b1s(%struct.SmallStruct_3b1s* %ss) #0 {
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs.ll b/test/CodeGen/Mips/cconv/arguments-varargs.ll
index 9c20b882dcb6..d662128945f8 100644
--- a/test/CodeGen/Mips/cconv/arguments-varargs.ll
+++ b/test/CodeGen/Mips/cconv/arguments-varargs.ll
@@ -85,7 +85,7 @@ entry:
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(hwords)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(hwords)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(hwords)
; ALL-DAG: sh [[ARG1]], 2([[GV]])
@@ -203,7 +203,7 @@ entry:
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(words)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(words)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(words)
; ALL-DAG: sw [[ARG1]], 4([[GV]])
@@ -315,16 +315,15 @@ entry:
; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte
; order.
; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
-; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]])
+; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]])
; O32-DAG: sw [[ARG1]], 8([[GV]])
-; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
-; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4
-; O32-DAG: sw [[VA2]], 0([[SP]])
-; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]])
+; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4
+; O32-DAG: sw [[VA3]], 0([[SP]])
+; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]])
; O32-DAG: sw [[ARG1]], 12([[GV]])
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(dwords)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]])
; NEW-DAG: sd [[ARG1]], 8([[GV]])
@@ -349,10 +348,9 @@ entry:
; Load the second argument from the variable portion and copy it to the global.
; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]])
; O32-DAG: sw [[ARG2]], 16([[GV]])
-; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
-; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4
-; O32-DAG: sw [[VA2]], 0([[SP]])
-; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]])
+; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4
+; O32-DAG: sw [[VA3]], 0([[SP]])
+; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]])
; O32-DAG: sw [[ARG2]], 20([[GV]])
; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]])
@@ -448,7 +446,7 @@ entry:
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(hwords)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(hwords)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(hwords)
; ALL-DAG: sh [[ARG1]], 2([[GV]])
@@ -566,7 +564,7 @@ entry:
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(words)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(words)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(words)
; ALL-DAG: sw [[ARG1]], 4([[GV]])
@@ -678,16 +676,15 @@ entry:
; Big-endian mode for N32/N64 must add an additional 4 to the offset due to byte
; order.
; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
-; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]])
+; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA_TMP2]])
; O32-DAG: sw [[ARG1]], 8([[GV]])
-; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
-; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4
-; O32-DAG: sw [[VA2]], 0([[SP]])
-; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]])
+; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4
+; O32-DAG: sw [[VA3]], 0([[SP]])
+; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]])
; O32-DAG: sw [[ARG1]], 12([[GV]])
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(dwords)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]])
; NEW-DAG: sd [[ARG1]], 8([[GV]])
@@ -712,10 +709,9 @@ entry:
; Load the second argument from the variable portion and copy it to the global.
; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]])
; O32-DAG: sw [[ARG2]], 16([[GV]])
-; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
-; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4
+; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4
; O32-DAG: sw [[VA2]], 0([[SP]])
-; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]])
+; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]])
; O32-DAG: sw [[ARG2]], 20([[GV]])
; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]])
@@ -810,7 +806,7 @@ entry:
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(hwords)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(hwords)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(hwords)
; ALL-DAG: sh [[ARG1]], 2([[GV]])
@@ -927,7 +923,7 @@ entry:
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(words)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(words)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(words)
; ALL-DAG: sw [[ARG1]], 4([[GV]])
@@ -1040,14 +1036,13 @@ entry:
; O32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]])
; O32-DAG: sw [[ARG1]], 8([[GV]])
-; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
-; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4
-; O32-DAG: sw [[VA2]], 0([[SP]])
-; O32-DAG: lw [[ARG1:\$[0-9]+]], 0([[VA]])
+; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4
+; O32-DAG: sw [[VA3]], 0([[SP]])
+; O32-DAG: lw [[ARG1:\$[0-9]+]], 4([[VA_TMP2]])
; O32-DAG: sw [[ARG1]], 12([[GV]])
; N32-DAG: addiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
-; N64-DAG: ld [[GV:\$[0-9]+]], %got_disp(dwords)(
+; N64-DAG: daddiu [[GV:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]])
; NEW-DAG: sd [[ARG1]], 8([[GV]])
@@ -1072,10 +1067,9 @@ entry:
; Load the second argument from the variable portion and copy it to the global.
; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]])
; O32-DAG: sw [[ARG2]], 16([[GV]])
-; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
-; O32-DAG: addiu [[VA2:\$[0-9]+]], [[VA]], 4
-; O32-DAG: sw [[VA2]], 0([[SP]])
-; O32-DAG: lw [[ARG2:\$[0-9]+]], 0([[VA]])
+; O32-DAG: addiu [[VA3:\$[0-9]+]], [[VA2]], 4
+; O32-DAG: sw [[VA3]], 0([[SP]])
+; O32-DAG: lw [[ARG2:\$[0-9]+]], 4([[VA_TMP2]])
; O32-DAG: sw [[ARG2]], 20([[GV]])
; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]])
diff --git a/test/CodeGen/Mips/cconv/arguments.ll b/test/CodeGen/Mips/cconv/arguments.ll
index 7af4e5517d51..2466d59045bb 100644
--- a/test/CodeGen/Mips/cconv/arguments.ll
+++ b/test/CodeGen/Mips/cconv/arguments.ll
@@ -55,7 +55,7 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
-; SYM64-DAG: ld [[R1:\$[0-9]+]], %got_disp(bytes)(
+; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
; The first four arguments are the same in O32/N32/N64
; ALL-DAG: sb $4, 1([[R1]])
@@ -120,9 +120,9 @@ entry:
; We won't test the way the global address is calculated in this test. This is
; just to get the register number for the other checks.
; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
-; SYM64-DAG: ld [[R1:\$[0-9]+]], %got_disp(bytes)(
+; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
-; SYM64-DAG: ld [[R2:\$[0-9]+]], %got_disp(dwords)(
+; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
; The first argument is the same in O32/N32/N64.
; ALL-DAG: sb $4, 1([[R1]])
diff --git a/test/CodeGen/Mips/cconv/return-float.ll b/test/CodeGen/Mips/cconv/return-float.ll
index b9a6d6c5bc0d..dd457fc18cd8 100644
--- a/test/CodeGen/Mips/cconv/return-float.ll
+++ b/test/CodeGen/Mips/cconv/return-float.ll
@@ -30,8 +30,7 @@ entry:
; O32-DAG: lw $2, %lo(float)([[R1]])
; N32-DAG: lui [[R1:\$[0-9]+]], %hi(float)
; N32-DAG: lw $2, %lo(float)([[R1]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(float)(
-; N64-DAG: lw $2, 0([[R1]])
+; N64-DAG: lw $2, %lo(float)([[R1:\$[0-9+]]])
define double @retdouble() nounwind {
entry:
@@ -44,5 +43,4 @@ entry:
; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], %lo(double)
; O32-DAG: lw $3, 4([[R2]])
; N32-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(double)(
-; N64-DAG: ld $2, 0([[R1]])
+; N64-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]])
diff --git a/test/CodeGen/Mips/cconv/return-hard-float.ll b/test/CodeGen/Mips/cconv/return-hard-float.ll
index 768cb6a9f2c6..44ef65ee2581 100644
--- a/test/CodeGen/Mips/cconv/return-hard-float.ll
+++ b/test/CodeGen/Mips/cconv/return-hard-float.ll
@@ -33,8 +33,7 @@ entry:
; O32-DAG: lwc1 $f0, %lo(float)([[R1]])
; N32-DAG: lui [[R1:\$[0-9]+]], %hi(float)
; N32-DAG: lwc1 $f0, %lo(float)([[R1]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(float)(
-; N64-DAG: lwc1 $f0, 0([[R1]])
+; N64-DAG: lwc1 $f0, %lo(float)([[R1:\$[0-9+]]])
define double @retdouble() nounwind {
entry:
@@ -45,8 +44,7 @@ entry:
; ALL-LABEL: retdouble:
; O32-DAG: ldc1 $f0, %lo(double)([[R1:\$[0-9]+]])
; N32-DAG: ldc1 $f0, %lo(double)([[R1:\$[0-9]+]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(double)(
-; N64-DAG: ldc1 $f0, 0([[R1]])
+; N64-DAG: ldc1 $f0, %lo(double)([[R1:\$[0-9]+]])
define { double, double } @retComplexDouble() #0 {
%retval = alloca { double, double }, align 8
diff --git a/test/CodeGen/Mips/cconv/return-hard-fp128.ll b/test/CodeGen/Mips/cconv/return-hard-fp128.ll
index bdbfb80bd4aa..e527866eb97b 100644
--- a/test/CodeGen/Mips/cconv/return-hard-fp128.ll
+++ b/test/CodeGen/Mips/cconv/return-hard-fp128.ll
@@ -24,8 +24,8 @@ entry:
; N32-DAG: dmtc1 [[R2]], $f0
; N32-DAG: dmtc1 [[R4]], $f2
-; N64-DAG: ld [[R2:\$[0-9]+]], %got_disp(fp128)([[R1:\$[0-9]+]])
-; N64-DAG: ld [[R3:\$[0-9]+]], 0([[R2]])
+; N64-DAG: lui [[R2:\$[0-9]+]], %highest(fp128)
+; N64-DAG: ld [[R3:\$[0-9]+]], %lo(fp128)([[R2]])
; N64-DAG: ld [[R4:\$[0-9]+]], 8([[R2]])
; N64-DAG: dmtc1 [[R3]], $f0
; N64-DAG: dmtc1 [[R4]], $f2
diff --git a/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll b/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
index 9b178e4380d1..492db7689500 100644
--- a/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
+++ b/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
@@ -29,8 +29,8 @@ entry:
; N32-DAG: ld [[R4:\$[0-9]+]], 8([[R3]])
; N32-DAG: dmtc1 [[R4]], $f1
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(struct_fp128)($1)
-; N64-DAG: ld [[R2:\$[0-9]+]], 0([[R1]])
+; N64-DAG: lui [[R1:\$[0-9]+]], %highest(struct_fp128)
+; N64-DAG: ld [[R2:\$[0-9]+]], %lo(struct_fp128)([[R1]])
; N64-DAG: dmtc1 [[R2]], $f0
; N64-DAG: ld [[R4:\$[0-9]+]], 8([[R1]])
; N64-DAG: dmtc1 [[R4]], $f1
diff --git a/test/CodeGen/Mips/cconv/return-struct.ll b/test/CodeGen/Mips/cconv/return-struct.ll
index da20919ffd42..0997cfbd98a1 100644
--- a/test/CodeGen/Mips/cconv/return-struct.ll
+++ b/test/CodeGen/Mips/cconv/return-struct.ll
@@ -37,12 +37,10 @@ entry:
; N32-BE-DAG: lb [[R2:\$[0-9]+]], %lo(struct_byte)([[R1]])
; N32-BE-DAG: dsll $2, [[R2]], 56
-; N64-LE-DAG: ld [[R1:\$[0-9]+]], %got_disp(struct_byte)($1)
-; N64-LE-DAG: lb $2, 0([[R1]])
+; N64-LE-DAG: lb $2, %lo(struct_byte)(${{[0-9]+}})
-; N64-BE-DAG: ld [[R1:\$[0-9]+]], %got_disp(struct_byte)($1)
-; N64-BE-DAG: lb [[R2:\$[0-9]+]], 0([[R1]])
-; N64-BE-DAG: dsll $2, [[R2]], 56
+; N64-BE-DAG: lb [[R1:\$[0-9]+]], %lo(struct_byte)(${{[0-9]+}})
+; N64-BE-DAG: dsll $2, [[R1]], 56
; This test is based on the way clang currently lowers {i8,i8} to {i16}.
; FIXME: It should probably work for without any lowering too but this doesn't
@@ -75,13 +73,15 @@ entry:
; N32-BE-DAG: lh [[R3:\$[0-9]+]], 8([[SP:\$sp]])
; N32-BE-DAG: dsll $2, [[R3]], 48
-; N64-LE-DAG: ld [[R1:\$[0-9]+]], %got_disp(struct_2byte)($1)
-; N64-LE-DAG: lhu [[R2:\$[0-9]+]], 0([[R1]])
+; N64-LE-DAG: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %hi(struct_2byte)
+; N64-LE-DAG: dsll [[R1:\$[0-9]]], $[[R0]], 16
+; N64-LE-DAG: lhu [[R2:\$[0-9]+]], %lo(struct_2byte)([[R1]])
; N64-LE-DAG: sh [[R2]], 8([[SP:\$sp]])
; N64-LE-DAG: lh $2, 8([[SP:\$sp]])
-; N64-BE-DAG: ld [[R1:\$[0-9]+]], %got_disp(struct_2byte)($1)
-; N64-BE-DAG: lhu [[R2:\$[0-9]+]], 0([[R1]])
+; N64-BE-DAG: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %hi(struct_2byte)
+; N64-BE-DAG: dsll $[[R1:[0-9]]], $[[R0]], 16
+; N64-BE-DAG: lhu [[R2:\$[0-9]+]], %lo(struct_2byte)($[[R1]])
; N64-BE-DAG: sh [[R2]], 8([[SP:\$sp]])
; N64-BE-DAG: lh [[R3:\$[0-9]+]], 8([[SP:\$sp]])
; N64-BE-DAG: dsll $2, [[R3]], 48
@@ -126,14 +126,14 @@ entry:
; N32-BE-DAG: or [[R4:\$[0-9]+]], [[R3]], [[R2]]
; N32-BE-DAG: dsll $2, [[R4]], 16
-; N64-LE-DAG: ld [[PTR:\$[0-9]+]], %got_disp(struct_3xi16)($1)
+; N64-LE-DAG: daddiu [[PTR:\$[0-9]+]], [[R0:\$[0-9]+]], %lo(struct_3xi16)
; N64-LE-DAG: lh [[R1:\$[0-9]+]], 4([[PTR]])
-; N64-LE-DAG: lwu [[R2:\$[0-9]+]], 0([[PTR]])
+; N64-LE-DAG: lwu [[R2:\$[0-9]+]], %lo(struct_3xi16)([[R0]])
; N64-LE-DAG: dsll [[R3:\$[0-9]+]], [[R1]], 32
; N64-LE-DAG: or $2, [[R2]], [[R3]]
-; N64-BE-DAG: ld [[PTR:\$[0-9]+]], %got_disp(struct_3xi16)($1)
-; N64-BE-DAG: lw [[R1:\$[0-9]+]], 0([[PTR]])
+; N64-BE-DAG: daddiu [[PTR:\$[0-9]+]], [[R0:\$[0-9]+]], %lo(struct_3xi16)
+; N64-BE-DAG: lw [[R1:\$[0-9]+]], %lo(struct_3xi16)([[R0]])
; N64-BE-DAG: dsll [[R2:\$[0-9]+]], [[R1]], 16
; N64-BE-DAG: lhu [[R3:\$[0-9]+]], 4([[PTR]])
; N64-BE-DAG: or [[R4:\$[0-9]+]], [[R3]], [[R2]]
@@ -161,9 +161,8 @@ entry:
; N32: jal memcpy
; sret pointer is already in $4
-; N64-DAG: ld $5, %got_disp(struct_128xi16)(
-; N64-DAG: ld $25, %call16(memcpy)(
-; N64: jalr $25
+; N64-DAG: lui ${{[0-9]}}, %highest(struct_128xi16)
+; N64: jal memcpy
; Ensure that large structures (>128-bit) are returned indirectly.
; This will generate inlined memcpy's anyway so pick the smallest large
@@ -214,13 +213,14 @@ entry:
; N32-DAG: sw [[T5]], 20([[RET_PTR]])
; sret pointer is already in $4
-; N64-DAG: ld [[PTR:\$[0-9]+]], %got_disp(struct_6xi32)(
-; N64-DAG: lw [[T0:\$[0-9]+]], 0([[PTR]])
+; N64-DAG: lui [[PTR_HI:\$[0-9]+]], %highest(struct_6xi32)
+; N64-DAG: daddiu [[PTR:\$[0-9]+]], [[PTR_HI]], %lo(struct_6xi32)
; N64-DAG: lw [[T1:\$[0-9]+]], 4([[PTR]])
; N64-DAG: lw [[T2:\$[0-9]+]], 8([[PTR]])
; N64-DAG: lw [[T3:\$[0-9]+]], 12([[PTR]])
; N64-DAG: lw [[T4:\$[0-9]+]], 16([[PTR]])
; N64-DAG: lw [[T5:\$[0-9]+]], 20([[PTR]])
+; N64-DAG: lw [[T0:\$[0-9]+]], %lo(struct_6xi32)([[PTR_HI]])
; N64-DAG: sw [[T0]], 0($4)
; N64-DAG: sw [[T1]], 4($4)
; N64-DAG: sw [[T2]], 8($4)
diff --git a/test/CodeGen/Mips/cconv/return.ll b/test/CodeGen/Mips/cconv/return.ll
index 561c94cb5783..c2bbe77e54bb 100644
--- a/test/CodeGen/Mips/cconv/return.ll
+++ b/test/CodeGen/Mips/cconv/return.ll
@@ -33,8 +33,8 @@ entry:
; O32-DAG: lbu $2, %lo(byte)([[R1]])
; N32-DAG: lui [[R1:\$[0-9]+]], %hi(byte)
; N32-DAG: lbu $2, %lo(byte)([[R1]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(byte)(
-; N64-DAG: lbu $2, 0([[R1]])
+; N64-DAG: lui [[R1:\$[0-9]+]], %highest(byte)
+; N64-DAG: lbu $2, %lo(byte)([[R1]])
define i32 @reti32() nounwind {
entry:
@@ -47,8 +47,8 @@ entry:
; O32-DAG: lw $2, %lo(word)([[R1]])
; N32-DAG: lui [[R1:\$[0-9]+]], %hi(word)
; N32-DAG: lw $2, %lo(word)([[R1]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(word)(
-; N64-DAG: lw $2, 0([[R1]])
+; N64-DAG: lui [[R1:\$[0-9]+]], %highest(word)
+; N64-DAG: lw $2, %lo(word)([[R1]])
define i64 @reti64() nounwind {
entry:
@@ -62,5 +62,5 @@ entry:
; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], %lo(dword)
; O32-DAG: lw $3, 4([[R2]])
; N32-DAG: ld $2, %lo(dword)([[R1:\$[0-9]+]])
-; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(dword)([[R1:\$[0-9]+]])
-; N64-DAG: ld $2, 0([[R1]])
+; N64-DAG: lui [[R1:\$[0-9]+]], %highest(dword)
+; N64-DAG: ld $2, %lo(dword)([[R1]])
diff --git a/test/CodeGen/Mips/cconv/roundl-call.ll b/test/CodeGen/Mips/cconv/roundl-call.ll
index 8e4d6597784c..0861197290ae 100644
--- a/test/CodeGen/Mips/cconv/roundl-call.ll
+++ b/test/CodeGen/Mips/cconv/roundl-call.ll
@@ -5,8 +5,8 @@
; RUN: llc -march=mips64 -mcpu=mips64 -target-abi=n64 -relocation-model=pic < \
; RUN: %s | FileCheck %s -check-prefixes=ALL,N64,HARD-FLOAT
-; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 -relocation-model=pic < \
-; RUN: %s | FileCheck %s -check-prefixes=ALL,N64,HARD-FLOAT
+; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 -relocation-model=pic \
+; RUN: < %s | FileCheck %s -check-prefixes=ALL,N64,HARD-FLOAT
; RUN: llc -march=mips64 -mcpu=mips64 -mattr=+soft-float -target-abi=n32 \
; RUN: -relocation-model=pic < %s | FileCheck %s -check-prefixes=ALL,N32,SOFT-FLOAT
@@ -14,9 +14,11 @@
; RUN: -relocation-model=pic < %s | FileCheck %s -check-prefixes=ALL,N32,SOFT-FLOAT
; RUN: llc -march=mips64 -mcpu=mips64 -mattr=+soft-float -target-abi=n64 < %s \
-; RUN: | FileCheck %s -check-prefixes=ALL,N64,SOFT-FLOAT
+; RUN: -relocation-model=pic | FileCheck %s \
+; RUN: -check-prefixes=ALL,N64,SOFT-FLOAT
; RUN: llc -march=mips64el -mcpu=mips64 -mattr=+soft-float -target-abi=n64 < \
-; RUN: %s | FileCheck %s -check-prefixes=ALL,N64,SOFT-FLOAT
+; RUN: %s -relocation-model=pic | FileCheck %s \
+; RUN: -check-prefixes=ALL,N64,SOFT-FLOAT
@fp128 = global fp128 zeroinitializer
diff --git a/test/CodeGen/Mips/cins.ll b/test/CodeGen/Mips/cins.ll
new file mode 100644
index 000000000000..4fe25564d1c1
--- /dev/null
+++ b/test/CodeGen/Mips/cins.ll
@@ -0,0 +1,92 @@
+; RUN: llc -march=mips64 -mcpu=octeon -target-abi=n64 < %s -o - | FileCheck %s
+
+define i64 @cins_zext(i32 signext %n) {
+entry:
+ %shl = shl i32 %n, 5
+ %conv = zext i32 %shl to i64
+ ret i64 %conv
+
+; CHECK-LABEL: cins_zext:
+; CHECK: cins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 5, 26
+
+}
+
+define i64 @cins_and_shl(i64 zeroext %n) {
+entry:
+ %and = shl i64 %n, 8
+ %shl = and i64 %and, 16776960
+ ret i64 %shl
+
+; CHECK-LABEL: cins_and_shl:
+; CHECK: cins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 8, 15
+
+}
+
+define i64 @cins_and_shl32(i64 zeroext %n) {
+entry:
+ %and = shl i64 %n, 38
+ %shl = and i64 %and, 18014123631575040
+ ret i64 %shl
+
+; CHECK-LABEL: cins_and_shl32:
+; CHECK: cins32 $[[R0:[0-9]+]], $[[R1:[0-9]+]], 6, 15
+
+}
+
+define zeroext i16 @cins_and_shl_16(i16 zeroext %n) {
+entry:
+ %0 = shl i16 %n, 2
+ %1 = and i16 %0, 60
+ ret i16 %1
+
+; CHECK-LABEL: cins_and_shl_16:
+; CHECK: cins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 2, 3
+
+}
+
+define zeroext i8 @cins_and_shl_8(i8 zeroext %n) {
+entry:
+ %0 = shl i8 %n, 2
+ %1 = and i8 %0, 12
+ ret i8 %1
+
+; CHECK-LABEL: cins_and_shl_8:
+; CHECK: cins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 2, 1
+
+}
+
+define i32 @cins_i32(i32 signext %a) {
+entry:
+ %and = shl i32 %a, 17
+ %shl = and i32 %and, 536739840
+ ret i32 %shl
+
+; CHECK-LABEL: cins_i32:
+; CHECK: cins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 17, 11
+
+}
+
+define i64 @cins_shl_and(i32 signext %n) {
+entry:
+ %and = and i32 %n, 65535
+ %conv = zext i32 %and to i64
+ %shl = shl nuw nsw i64 %conv, 31
+ ret i64 %shl
+
+; CHECK-LABEL: cins_shl_and:
+; CHECK: cins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 31, 15
+
+}
+
+
+define i64 @cins_shl_and32(i32 signext %n) {
+entry:
+ %and = and i32 %n, 65535
+ %conv = zext i32 %and to i64
+ %shl = shl nuw nsw i64 %conv, 47
+ ret i64 %shl
+
+; CHECK-LABEL: cins_shl_and32:
+; CHECK: cins32 $[[R0:[0-9]+]], $[[R1:[0-9]+]], 15, 15
+
+}
diff --git a/test/CodeGen/Mips/compactbranches/compact-branches-64.ll b/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
index cda42a33ccf6..1290acd29d96 100644
--- a/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
+++ b/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=mipsel -mcpu=mips64r6 -disable-mips-delay-filler -target-abi=n64 < %s | FileCheck %s
+; RUN: llc -relocation-model=pic -march=mipsel -mcpu=mips64r6 \
+; RUN: -disable-mips-delay-filler -target-abi=n64 < %s | FileCheck %s
; Function Attrs: nounwind
define void @l() {
diff --git a/test/CodeGen/Mips/compactbranches/compact-branches.ll b/test/CodeGen/Mips/compactbranches/compact-branches.ll
index 75ff8a0bbcbb..ebbeea592354 100644
--- a/test/CodeGen/Mips/compactbranches/compact-branches.ll
+++ b/test/CodeGen/Mips/compactbranches/compact-branches.ll
@@ -1,5 +1,7 @@
-; RUN: llc -march=mipsel -mcpu=mips32r6 -relocation-model=static -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=STATIC32
-; RUN: llc -march=mipsel -mcpu=mips64r6 -target-abi n64 -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=PIC
+; RUN: llc -march=mipsel -mcpu=mips32r6 -relocation-model=static \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=STATIC32
+; RUN: llc -march=mipsel -mcpu=mips64r6 -relocation-model=pic -target-abi n64 \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=PIC
; Function Attrs: nounwind
define void @l() {
@@ -38,7 +40,7 @@ entry:
; PIC: jalrc $25
%call1 = tail call i32 @i()
%cmp = icmp eq i32 %call, %call1
-; CHECK beqc
+; CHECK: beqc
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry:
@@ -61,7 +63,7 @@ entry:
; PIC: jalrc $25
%call = tail call i32 @k()
%cmp = icmp slt i32 %call, 0
-; CHECK : bgez
+; CHECK: bgez
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry:
diff --git a/test/CodeGen/Mips/compactbranches/empty-block.mir b/test/CodeGen/Mips/compactbranches/empty-block.mir
new file mode 100644
index 000000000000..7831e51e3157
--- /dev/null
+++ b/test/CodeGen/Mips/compactbranches/empty-block.mir
@@ -0,0 +1,92 @@
+# RUN: llc -march=mipsel -mcpu=mips32r6 -start-after=block-placement %s -o - | FileCheck %s
+
+# Check that empty blocks in the cfg don't cause the mips hazard scheduler to
+# crash and that the nop is inserted correctly.
+
+# CHECK: blezc
+# CHECK: nop
+# CHECK: # BB#1:
+# CHECK: .insn
+# CHECK: # BB#2:
+# CHECK: .insn
+# CHECK: # BB#3:
+# CHECK: jal
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
+
+ declare i32 @k()
+
+ declare void @f(i32)
+
+ define void @l5() {
+ entry:
+ %call = tail call i32 @k()
+ %cmp = icmp sgt i32 %call, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+ if.then: ; preds = %entry
+ tail call void @f(i32 signext 2)
+ br label %if.end
+
+ if.end: ; preds = %if.then, %entry
+ ret void
+ }
+
+---
+name: l5
+alignment: 2
+exposesReturnsTwice: false
+noVRegs: true
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 24
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 16
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+stack:
+ - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%ra' }
+body: |
+ bb.0.entry:
+ successors: %bb.1.if.then(0x50000000), %bb.4.if.end(0x30000000)
+ liveins: %ra
+
+ %sp = ADDiu %sp, -24
+ CFI_INSTRUCTION def_cfa_offset 24
+ SW killed %ra, %sp, 20 :: (store 4 into %stack.0)
+ CFI_INSTRUCTION offset %ra_64, -4
+ JAL @k, csr_o32_fp64, implicit-def dead %ra, implicit-def %sp, implicit-def %v0
+ BLEZ %v0, %bb.4.if.end, implicit-def %at
+
+ bb.1.if.then:
+ successors: %bb.2.if.then(0x80000000)
+
+ bb.2.if.then:
+ successors: %bb.3.if.then(0x80000000)
+
+ bb.3.if.then:
+ successors: %bb.4.if.end(0x80000000)
+
+ %a0 = ADDiu %zero, 2
+ JAL @f, csr_o32_fp64, implicit-def dead %ra, implicit killed %a0, implicit-def %sp
+
+ bb.4.if.end:
+ %ra = LW %sp, 20 :: (load 4 from %stack.0)
+ %sp = ADDiu %sp, 24
+ PseudoReturn undef %ra
+
+...
diff --git a/test/CodeGen/Mips/cstmaterialization/stack.ll b/test/CodeGen/Mips/cstmaterialization/stack.ll
index 7266d00069cc..41b5bf638107 100644
--- a/test/CodeGen/Mips/cstmaterialization/stack.ll
+++ b/test/CodeGen/Mips/cstmaterialization/stack.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=CHECK-MIPS32
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | \
+; RUN: llc -march=mips64el -mcpu=mips64 -relocation-model=pic < %s | \
; RUN: FileCheck %s -check-prefix=CHECK-MIPS64
; RUN: llc -march=mipsel -mcpu=mips64 -target-abi n32 < %s | \
; RUN: FileCheck %s -check-prefix=CHECK-MIPSN32
diff --git a/test/CodeGen/Mips/dext.ll b/test/CodeGen/Mips/dext.ll
new file mode 100644
index 000000000000..1794f16b2cd7
--- /dev/null
+++ b/test/CodeGen/Mips/dext.ll
@@ -0,0 +1,105 @@
+; RUN: llc -march=mips64 -mcpu=mips64r2 -target-abi=n64 < %s -o - | FileCheck %s
+
+define i64 @dext_add_zext(i32 signext %n) {
+entry:
+ %add = add i32 %n, 1
+ %res = zext i32 %add to i64
+ ret i64 %res
+
+; CHECK-LABEL: dext_add_zext:
+; CHECK: dext $[[R0:[0-9]+]], $[[R0:[0-9]+]], 0, 32
+
+}
+
+define i32 @ext_and24(i32 signext %a) {
+entry:
+ %and = and i32 %a, 16777215
+ ret i32 %and
+
+; CHECK-LABEL: ext_and24:
+; CHECK: ext $[[R0:[0-9]+]], $[[R1:[0-9]+]], 0, 24
+
+}
+
+define i64 @dext_and32(i64 zeroext %a) {
+entry:
+ %and = and i64 %a, 4294967295
+ ret i64 %and
+
+; CHECK-LABEL: dext_and32:
+; CHECK: dext $[[R0:[0-9]+]], $[[R1:[0-9]+]], 0, 32
+
+}
+
+define i64 @dext_and35(i64 zeroext %a) {
+entry:
+ %and = and i64 %a, 34359738367
+ ret i64 %and
+
+; CHECK-LABEL: dext_and35:
+; CHECK: dextm $[[R0:[0-9]+]], $[[R1:[0-9]+]], 0, 35
+
+}
+
+define i64 @dext_and20(i64 zeroext %a) {
+entry:
+ %and = and i64 %a, 1048575
+ ret i64 %and
+
+; CHECK-LABEL: dext_and20:
+; CHECK: dext $[[R0:[0-9]+]], $[[R1:[0-9]+]], 0, 20
+
+}
+
+define i64 @dext_and16(i64 zeroext %a) {
+entry:
+ %and = and i64 %a, 65535
+ ret i64 %and
+
+; CHECK-LABEL: dext_and16:
+; CHECK: andi $[[R0:[0-9]+]], $[[R1:[0-9]+]], 65535
+
+}
+
+define i64 @dext_lsr_and20(i64 zeroext %a) {
+entry:
+ %shr = lshr i64 %a, 5
+ %and = and i64 %shr, 1048575
+ ret i64 %and
+
+; CHECK-LABEL: dext_lsr_and20:
+; CHECK: dext $[[R0:[0-9]+]], $[[R1:[0-9]+]], 5, 20
+
+}
+
+define i64 @dext_lsr_and8(i64 zeroext %a) {
+entry:
+ %shr = lshr i64 %a, 40
+ %and = and i64 %shr, 255
+ ret i64 %and
+
+; CHECK-LABEL: dext_lsr_and8:
+; CHECK: dextu $[[R0:[0-9]+]], $[[R1:[0-9]+]], 40, 8
+
+}
+
+define i64 @dext_zext(i32 signext %a) {
+entry:
+ %conv = zext i32 %a to i64
+ ret i64 %conv
+
+; CHECK-LABEL: dext_zext:
+; CHECK: dext $[[R0:[0-9]+]], $[[R1:[0-9]+]], 0, 32
+
+}
+
+define i64 @dext_and_lsr(i64 zeroext %n) {
+entry:
+ %and = lshr i64 %n, 8
+ %shr = and i64 %and, 4095
+ ret i64 %shr
+
+; CHECK-LABEL: dext_and_lsr:
+; CHECK: dext $[[R0:[0-9]+]], $[[R1:[0-9]+]], 8, 12
+
+}
diff --git a/test/CodeGen/Mips/elf_eflags.ll b/test/CodeGen/Mips/elf_eflags.ll
index 40910d8987d2..80b9c48f5bee 100644
--- a/test/CodeGen/Mips/elf_eflags.ll
+++ b/test/CodeGen/Mips/elf_eflags.ll
@@ -14,7 +14,6 @@
; EF_MIPS_ARCH_64R2 (0x80000000)
; Note that EF_MIPS_CPIC is set by -mabicalls which is the default on Linux
-; TODO need to support -mno-abicalls
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE32 %s
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck -check-prefix=CHECK-LE32_PIC %s
@@ -24,12 +23,12 @@
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips %s -o - | FileCheck -check-prefix=CHECK-LE32R2-MICROMIPS_PIC %s
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips4 -target-abi n64 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE64 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips4 -target-abi n64 %s -o - | FileCheck -check-prefix=CHECK-LE64_PIC %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips4 -target-abi n64 -relocation-model=pic %s -o - | FileCheck -check-prefix=CHECK-LE64_PIC %s
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 -target-abi n64 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE64 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 -target-abi n64 %s -o - | FileCheck -check-prefix=CHECK-LE64_PIC %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 -target-abi n64 -relocation-model=pic %s -o - | FileCheck -check-prefix=CHECK-LE64_PIC %s
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 -target-abi n64 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE64R2 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 -target-abi n64 %s -o - | FileCheck -check-prefix=CHECK-LE64R2_PIC %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 -target-abi n64 -relocation-model=pic %s -o - | FileCheck -check-prefix=CHECK-LE64R2_PIC %s
; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+mips16 -relocation-model=pic %s -o - | FileCheck -check-prefix=CHECK-LE32R2-MIPS16 %s
@@ -61,7 +60,6 @@
; CHECK-LE32R2-MICROMIPS_PIC: .set micromips
;
; 64(R1) bit with NO_REORDER and static
-; CHECK-LE64: .abicalls
; CHECK-LE64: .set noreorder
;
; 64(R1) bit with NO_REORDER and PIC
@@ -69,7 +67,6 @@
; CHECK-LE64_PIC: .set noreorder
;
; 64R2 bit with NO_REORDER and static
-; CHECK-LE64R2: .abicalls
; CHECK-LE64R2: .set noreorder
;
; 64R2 bit with NO_REORDER and PIC
diff --git a/test/CodeGen/Mips/fastcc.ll b/test/CodeGen/Mips/fastcc.ll
index 13abc20eb3e8..fb1bc4d9a8ab 100644
--- a/test/CodeGen/Mips/fastcc.ll
+++ b/test/CodeGen/Mips/fastcc.ll
@@ -132,20 +132,19 @@ entry:
define internal fastcc void @callee0(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15, i32 %a16) nounwind noinline {
entry:
; CHECK: callee0
-; CHECK: sw $4
-; CHECK: sw $5
-; CHECK: sw $6
-; CHECK: sw $7
-; CHECK: sw $8
-; CHECK: sw $9
-; CHECK: sw $10
-; CHECK: sw $11
-; CHECK: sw $12
-; CHECK: sw $13
-; CHECK: sw $14
-; CHECK: sw $15
-; CHECK: sw $24
-; CHECK: sw $3
+; CHECK-DAG: sw $4
+; CHECK-DAG: sw $5
+; CHECK-DAG: sw $7
+; CHECK-DAG: sw $8
+; CHECK-DAG: sw $9
+; CHECK-DAG: sw $10
+; CHECK-DAG: sw $11
+; CHECK-DAG: sw $12
+; CHECK-DAG: sw $13
+; CHECK-DAG: sw $14
+; CHECK-DAG: sw $15
+; CHECK-DAG: sw $24
+; CHECK-DAG: sw $3
; t6, t7 and t8 are reserved in NaCl and cannot be used for fastcc.
; CHECK-NACL-NOT: sw $14
@@ -223,27 +222,27 @@ entry:
define internal fastcc void @callee1(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15, float %a16, float %a17, float %a18, float %a19, float %a20) nounwind noinline {
entry:
-; CHECK: callee1
-; CHECK: swc1 $f0
-; CHECK: swc1 $f1
-; CHECK: swc1 $f2
-; CHECK: swc1 $f3
-; CHECK: swc1 $f4
-; CHECK: swc1 $f5
-; CHECK: swc1 $f6
-; CHECK: swc1 $f7
-; CHECK: swc1 $f8
-; CHECK: swc1 $f9
-; CHECK: swc1 $f10
-; CHECK: swc1 $f11
-; CHECK: swc1 $f12
-; CHECK: swc1 $f13
-; CHECK: swc1 $f14
-; CHECK: swc1 $f15
-; CHECK: swc1 $f16
-; CHECK: swc1 $f17
-; CHECK: swc1 $f18
-; CHECK: swc1 $f19
+; CHECK-LABEL: callee1:
+; CHECK-DAG: swc1 $f0
+; CHECK-DAG: swc1 $f1
+; CHECK-DAG: swc1 $f2
+; CHECK-DAG: swc1 $f3
+; CHECK-DAG: swc1 $f4
+; CHECK-DAG: swc1 $f5
+; CHECK-DAG: swc1 $f6
+; CHECK-DAG: swc1 $f7
+; CHECK-DAG: swc1 $f8
+; CHECK-DAG: swc1 $f9
+; CHECK-DAG: swc1 $f10
+; CHECK-DAG: swc1 $f11
+; CHECK-DAG: swc1 $f12
+; CHECK-DAG: swc1 $f13
+; CHECK-DAG: swc1 $f14
+; CHECK-DAG: swc1 $f15
+; CHECK-DAG: swc1 $f16
+; CHECK-DAG: swc1 $f17
+; CHECK-DAG: swc1 $f18
+; CHECK-DAG: swc1 $f19
store float %a0, float* @gf0, align 4
store float %a1, float* @gf1, align 4
@@ -316,8 +315,6 @@ entry:
; NOODDSPREG-LABEL: callee2:
-; NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]]
-
; Check that first 10 arguments are received in even float registers
; f0, f2, ... , f18. Check that 11th argument is received on stack.
@@ -333,7 +330,7 @@ entry:
; NOODDSPREG-DAG: swc1 $f16, 32($[[R0]])
; NOODDSPREG-DAG: swc1 $f18, 36($[[R0]])
-; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp)
+; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 0($sp)
; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]])
store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4
@@ -397,7 +394,6 @@ entry:
; FP64-NOODDSPREG-LABEL: callee3:
-; FP64-NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]]
; Check that first 10 arguments are received in even float registers
; f0, f2, ... , f18. Check that 11th argument is received on stack.
@@ -414,7 +410,7 @@ entry:
; FP64-NOODDSPREG-DAG: sdc1 $f16, 64($[[R0]])
; FP64-NOODDSPREG-DAG: sdc1 $f18, 72($[[R0]])
-; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp)
+; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 0($sp)
; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 80($[[R0]])
store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8
diff --git a/test/CodeGen/Mips/fcmp.ll b/test/CodeGen/Mips/fcmp.ll
index e22b12a5d534..eb6c06db4eff 100644
--- a/test/CodeGen/Mips/fcmp.ll
+++ b/test/CodeGen/Mips/fcmp.ll
@@ -1076,12 +1076,12 @@ entry:
; 32-CMP-DAG: bnezc $[[T4]],
; 64-C-DAG: add.s $[[T0:f[0-9]+]], $f13, $f12
-; 64-C-DAG: lwc1 $[[T1:f[0-9]+]], %got_ofst(.LCPI32_0)(
+; 64-C-DAG: lwc1 $[[T1:f[0-9]+]], %lo(.LCPI32_0)(
; 64-C-DAG: c.ole.s $[[T0]], $[[T1]]
; 64-C-DAG: bc1t
; 64-CMP-DAG: add.s $[[T0:f[0-9]+]], $f13, $f12
-; 64-CMP-DAG: lwc1 $[[T1:f[0-9]+]], %got_ofst(.LCPI32_0)(
+; 64-CMP-DAG: lwc1 $[[T1:f[0-9]+]], %lo(.LCPI32_0)(
; 64-CMP-DAG: cmp.le.s $[[T2:f[0-9]+]], $[[T0]], $[[T1]]
; 64-CMP-DAG: mfc1 $[[T3:[0-9]+]], $[[T2]]
; FIXME: This instruction is redundant.
@@ -1102,16 +1102,17 @@ entry:
; MM32R6-DAG: andi16 $[[T5:[0-9]+]], $[[T4]], 1
; MM32R6-DAG: bnez $[[T5]],
-; MM64R6-DAG: lui $[[T0:[0-9]+]], %hi(%neg(%gp_rel(bug1_f32)))
-; MM64R6-DAG: daddu $[[T1:[0-9]+]], $[[T0]], $25
-; MM64R6-DAG: daddiu $[[T2:[0-9]+]], $[[T1]], %lo(%neg(%gp_rel(bug1_f32)))
-; MM64R6-DAG: add.s $[[T3:f[0-9]+]], $f13, $f12
-; MM64R6-DAG: ld $[[T4:[0-9]+]], %got_page(.LCPI32_0)($[[T2]])
-; MM64R6-DAG: lwc1 $[[T5:f[0-9]+]], %got_ofst(.LCPI32_0)($[[T4]])
-; MM64R6-DAG: cmp.le.s $[[T6:f[0-9]+]], $[[T3]], $[[T5]]
-; MM64R6-DAG: mfc1 $[[T7:[0-9]+]], $[[T6]]
-; MM64R6-DAG: andi16 $[[T8:[0-9]+]], $[[T7]], 1
-; MM64R6-DAG: bnez $[[T8]],
+; MM64R6-DAG: add.s $[[T0:f[0-9]+]], $f13, $f12
+; MM64R6-DAG: lui $[[T1:[0-9]+]], %highest(.LCPI32_0)
+; MM64R6-DAG: daddiu $[[T2:[0-9]+]], $[[T1]], %higher(.LCPI32_0)
+; MM64R6-DAG: dsll $[[T3:[0-9]+]], $[[T2]], 16
+; MM64R6-DAG: daddiu $[[T4:[0-9]+]], $[[T3]], %hi(.LCPI32_0)
+; MM64R6-DAG: dsll $[[T5:[0-9]+]], $[[T4]], 16
+; MM64R6-DAG: lwc1 $[[T6:f[0-9]+]], %lo(.LCPI32_0)($[[T5]])
+; MM64R6-DAG: cmp.le.s $[[T7:f[0-9]+]], $[[T0]], $[[T6]]
+; MM64R6-DAG: mfc1 $[[T8:[0-9]+]], $[[T7]]
+; MM64R6-DAG: andi16 $[[T9:[0-9]+]], $[[T8]], 1
+; MM64R6-DAG: bnez $[[T9]],
%add = fadd fast float %at, %angle
%cmp = fcmp ogt float %add, 1.000000e+00
@@ -1145,12 +1146,12 @@ entry:
; 32-CMP-DAG: bnezc $[[T4]],
; 64-C-DAG: add.d $[[T0:f[0-9]+]], $f13, $f12
-; 64-C-DAG: ldc1 $[[T1:f[0-9]+]], %got_ofst(.LCPI33_0)(
+; 64-C-DAG: ldc1 $[[T1:f[0-9]+]], %lo(.LCPI33_0)(
; 64-C-DAG: c.ole.d $[[T0]], $[[T1]]
; 64-C-DAG: bc1t
; 64-CMP-DAG: add.d $[[T0:f[0-9]+]], $f13, $f12
-; 64-CMP-DAG: ldc1 $[[T1:f[0-9]+]], %got_ofst(.LCPI33_0)(
+; 64-CMP-DAG: ldc1 $[[T1:f[0-9]+]], %lo(.LCPI33_0)(
; 64-CMP-DAG: cmp.le.d $[[T2:f[0-9]+]], $[[T0]], $[[T1]]
; 64-CMP-DAG: mfc1 $[[T3:[0-9]+]], $[[T2]]
; FIXME: This instruction is redundant.
@@ -1171,16 +1172,17 @@ entry:
; MM32R6-DAG: andi16 $[[T5:[0-9]+]], $[[T4]], 1
; MM32R6-DAG: bnez $[[T5]],
-; MM64R6-DAG: lui $[[T0:[0-9]+]], %hi(%neg(%gp_rel(bug1_f64)))
-; MM64R6-DAG: daddu $[[T1:[0-9]+]], $[[T0]], $25
-; MM64R6-DAG: daddiu $[[T2:[0-9]+]], $[[T1]], %lo(%neg(%gp_rel(bug1_f64)))
-; MM64R6-DAG: add.d $[[T3:f[0-9]+]], $f13, $f12
-; MM64R6-DAG: ld $[[T4:[0-9]+]], %got_page(.LCPI33_0)($[[T2]])
-; MM64R6-DAG: ldc1 $[[T5:f[0-9]+]], %got_ofst(.LCPI33_0)($[[T4]])
-; MM64R6-DAG: cmp.le.d $[[T6:f[0-9]+]], $[[T3]], $[[T5]]
-; MM64R6-DAG: mfc1 $[[T7:[0-9]+]], $[[T6]]
-; MM64R6-DAG: andi16 $[[T8:[0-9]+]], $[[T7]], 1
-; MM64R6-DAG: bnez $[[T8]],
+; MM64R6-DAG: add.d $[[T0:f[0-9]+]], $f13, $f12
+; MM64R6-DAG: lui $[[T1:[0-9]+]], %highest(.LCPI33_0)
+; MM64R6-DAG: daddiu $[[T2:[0-9]+]], $[[T1]], %higher(.LCPI33_0)
+; MM64R6-DAG: dsll $[[T3:[0-9]+]], $[[T2]], 16
+; MM64R6-DAG: daddiu $[[T4:[0-9]+]], $[[T3]], %hi(.LCPI33_0)
+; MM64R6-DAG: dsll $[[T5:[0-9]+]], $[[T4]], 16
+; MM64R6-DAG: ldc1 $[[T6:f[0-9]+]], %lo(.LCPI33_0)($[[T5]])
+; MM64R6-DAG: cmp.le.d $[[T7:f[0-9]+]], $[[T0]], $[[T6]]
+; MM64R6-DAG: mfc1 $[[T8:[0-9]+]], $[[T7]]
+; MM64R6-DAG: andi16 $[[T9:[0-9]+]], $[[T8]], 1
+; MM64R6-DAG: bnez $[[T9]],
%add = fadd fast double %at, %angle
%cmp = fcmp ogt double %add, 1.000000e+00
diff --git a/test/CodeGen/Mips/fcopysign-f32-f64.ll b/test/CodeGen/Mips/fcopysign-f32-f64.ll
index a3ea22feca25..e0229d14c526 100644
--- a/test/CodeGen/Mips/fcopysign-f32-f64.ll
+++ b/test/CodeGen/Mips/fcopysign-f32-f64.ll
@@ -36,12 +36,12 @@ define double @func3(double %d, float %f) nounwind readnone {
entry:
; ALL-LABEL: func3:
-; 64: dmfc1 $[[R0:[0-9]+]], ${{.*}}
+; 64: mfc1 $[[MFC:[0-9]+]], $f13
; 64: daddiu $[[R1:[0-9]+]], $zero, 1
+; 64: dmfc1 $[[R0:[0-9]+]], ${{.*}}
; 64: dsll $[[R2:[0-9]+]], $[[R1]], 63
; 64: daddiu $[[R3:[0-9]+]], $[[R2]], -1
; 64: and $[[AND0:[0-9]+]], $[[R0]], $[[R3]]
-; 64: mfc1 $[[MFC:[0-9]+]], $f13
; 64: srl $[[SRL:[0-9]+]], $[[MFC:[0-9]+]], 31
; 64: dsll $[[DSLL:[0-9]+]], $[[SRL]], 63
; 64: or $[[OR:[0-9]+]], $[[AND0]], $[[DSLL]]
diff --git a/test/CodeGen/Mips/global-address.ll b/test/CodeGen/Mips/global-address.ll
index ecf5e563a577..ed79de920e85 100644
--- a/test/CodeGen/Mips/global-address.ll
+++ b/test/CodeGen/Mips/global-address.ll
@@ -29,9 +29,15 @@ entry:
; PIC-N64: ld $[[R0:[0-9]+]], %got_page(s1)
; PIC-N64: lw ${{[0-9]+}}, %got_ofst(s1)($[[R0]])
; PIC-N64: ld ${{[0-9]+}}, %got_disp(g1)
-; STATIC-N64: ld $[[R1:[0-9]+]], %got_page(s1)
-; STATIC-N64: lw ${{[0-9]+}}, %got_ofst(s1)($[[R1]])
-; STATIC-N64: ld ${{[0-9]+}}, %got_disp(g1)
+; STATIC-N64: lui $[[R1:[0-9]+]], %highest(s1)
+; STATIC-N64: daddiu ${{[0-9]+}}, ${{[0-9]+}}, %higher(s1)
+; STATIC-N64: daddiu ${{[0-9]+}}, ${{[0-9]+}}, %hi(s1)
+; STATIC-N64: dsll $[[R2:[0-9]+]], $[[R1]], 16
+; STATIC-N64: lw ${{[0-9]+}}, %lo(s1)($[[R2]])
+; STATIC-N64: lui $[[R3:[0-9]+]], %highest(g1)
+; STATIC-N64: daddiu $[[R3]], $[[R3]], %higher(g1)
+; STATIC-N64: daddiu $[[R3]], $[[R3]], %hi(g1)
+; STATIC-N64: lw ${{[0-9]+}}, %lo(g1)($[[R3]])
%0 = load i32, i32* @s1, align 4
tail call void @foo1(i32 %0) nounwind
diff --git a/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll b/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll
index a99cb976eaa9..2a0904c54c9a 100644
--- a/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll
+++ b/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll
@@ -1,7 +1,9 @@
; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s --check-prefixes=ALL,R6
-; RUN: llc -march=mips -mcpu=mips64r6 -target-abi=n64 < %s | FileCheck %s --check-prefixes=ALL,R6
+; RUN: llc -march=mips -mcpu=mips64r6 -target-abi=n64 -relocation-model=pic \
+; RUN: < %s | FileCheck %s --check-prefixes=ALL,R6
; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s --check-prefixes=ALL,PRER6
-; RUN: llc -march=mips -mcpu=mips64 -target-abi=n64 < %s | FileCheck %s --check-prefixes=ALL,PRER6
+; RUN: llc -march=mips -mcpu=mips64 -target-abi=n64 -relocation-model=pic \
+; RUN: < %s | FileCheck %s --check-prefixes=ALL,PRER6
%struct.anon = type { [63 x i32], i32, i32 }
diff --git a/test/CodeGen/Mips/llvm-ir/ashr.ll b/test/CodeGen/Mips/llvm-ir/ashr.ll
index c8d0e76f94e2..f9fb91be0906 100644
--- a/test/CodeGen/Mips/llvm-ir/ashr.ll
+++ b/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -83,20 +83,23 @@ entry:
; M2: srav $[[T0:[0-9]+]], $4, $7
; M2: andi $[[T1:[0-9]+]], $7, 32
- ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: beqz $[[T1]], $[[BB0:BB[0-9_]+]]
; M2: move $3, $[[T0]]
+ ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: nop
+ ; M2: $[[EXIT:BB[0-9_]+]]:
+ ; M2: jr $ra
+ ; M2: nop
+ ; M2: $[[BB0]]:
; M2: srlv $[[T2:[0-9]+]], $5, $7
; M2: not $[[T3:[0-9]+]], $7
; M2: sll $[[T4:[0-9]+]], $4, 1
; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+ ; M2: beqz $[[T1]], $[[EXIT]]
; M2: or $3, $[[T3]], $[[T2]]
- ; M2: $[[BB0]]:
- ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
- ; M2: nop
- ; M2: sra $2, $4, 31
; M2: $[[BB1]]:
; M2: jr $ra
- ; M2: nop
+ ; M2: sra $2, $4, 31
; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7
; 32R1-R5: not $[[T1:[0-9]+]], $7
@@ -169,20 +172,23 @@ entry:
; M3: sll $[[T0:[0-9]+]], $7, 0
; M3: dsrav $[[T1:[0-9]+]], $4, $7
; M3: andi $[[T2:[0-9]+]], $[[T0]], 64
- ; M3: bnez $[[T3:[0-9]+]], [[BB0:.LBB[0-9_]+]]
+ ; M3: beqz $[[T3:[0-9]+]], [[BB0:.LBB[0-9_]+]]
; M3: move $3, $[[T1]]
+ ; M3: bnez $[[T3]], [[BB1:.LBB[0-9_]+]]
+ ; M3: nop
+ ; M3: [[EXIT:.LBB[0-9_]+]]:
+ ; M3: jr $ra
+ ; M3: nop
+ ; M3: [[BB0]]:
; M3: dsrlv $[[T4:[0-9]+]], $5, $7
; M3: dsll $[[T5:[0-9]+]], $4, 1
; M3: not $[[T6:[0-9]+]], $[[T0]]
; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: beqz $[[T3]], [[EXIT]]
; M3: or $3, $[[T7]], $[[T4]]
- ; M3: [[BB0]]:
- ; M3: beqz $[[T3]], [[BB1:.LBB[0-9_]+]]
- ; M3: nop
- ; M3: dsra $2, $4, 63
; M3: [[BB1]]:
; M3: jr $ra
- ; M3: nop
+ ; M3: dsra $2, $4, 63
; GP64-NOT-R6: dsrlv $[[T0:[0-9]+]], $5, $7
; GP64-NOT-R6: dsll $[[T1:[0-9]+]], $4, 1
diff --git a/test/CodeGen/Mips/llvm-ir/call.ll b/test/CodeGen/Mips/llvm-ir/call.ll
index 9af3e8d8cd23..a036fafbe969 100644
--- a/test/CodeGen/Mips/llvm-ir/call.ll
+++ b/test/CodeGen/Mips/llvm-ir/call.ll
@@ -6,24 +6,24 @@
; RUN: llc -march=mips -mcpu=mips32r5 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,O32,NOT-R6C
; RUN: llc -march=mips -mcpu=mips32r6 -relocation-model=pic -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,O32,R6C
; RUN: llc -march=mips -mcpu=mips32r6 -relocation-model=pic -mattr=+fp64,+nooddspreg -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,O32,R6C
-; RUN: llc -march=mips64 -mcpu=mips4 -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64 -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r2 -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r3 -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r5 -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r6 -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,R6C
+; RUN: llc -march=mips64 -mcpu=mips4 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r2 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r3 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r5 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r6 -relocation-model=pic -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64,R6C
; RUN: llc -march=mips -mcpu=mips32 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -check-prefix=NOT-R6C
; RUN: llc -march=mips -mcpu=mips32r2 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -check-prefix=NOT-R6C
; RUN: llc -march=mips -mcpu=mips32r3 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -check-prefix=NOT-R6C
; RUN: llc -march=mips -mcpu=mips32r5 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -check-prefix=NOT-R6C
; RUN: llc -march=mips -mcpu=mips32r6 -relocation-model=pic -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -check-prefix=R6C
; RUN: llc -march=mips -mcpu=mips32r6 -relocation-model=pic -mattr=+fp64,+nooddspreg -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -check-prefix=R6C
-; RUN: llc -march=mips64 -mcpu=mips4 -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64 -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r2 -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r3 -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r5 -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
-; RUN: llc -march=mips64 -mcpu=mips64r6 -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=R6C
+; RUN: llc -march=mips64 -mcpu=mips4 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r2 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r3 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r5 -relocation-model=pic -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NOT-R6C
+; RUN: llc -march=mips64 -mcpu=mips64r6 -relocation-model=pic -disable-mips-delay-filler -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=R6C
declare void @extern_void_void()
declare i32 @extern_i32_void()
diff --git a/test/CodeGen/Mips/llvm-ir/lshr.ll b/test/CodeGen/Mips/llvm-ir/lshr.ll
index 09617edc9406..926f3e4c8d79 100644
--- a/test/CodeGen/Mips/llvm-ir/lshr.ll
+++ b/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -81,20 +81,24 @@ entry:
; M2: srlv $[[T0:[0-9]+]], $4, $7
; M2: andi $[[T1:[0-9]+]], $7, 32
- ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: beqz $[[T1]], $[[BB0:BB[0-9_]+]]
; M2: move $3, $[[T0]]
+ ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: addiu $2, $zero, 0
+ ; M2: $[[EXIT:BB[0-9_]+]]:
+ ; M2: jr $ra
+ ; M2: nop
+ ; M2: $[[BB0]]:
; M2: srlv $[[T2:[0-9]+]], $5, $7
; M2: not $[[T3:[0-9]+]], $7
; M2: sll $[[T4:[0-9]+]], $4, 1
; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
; M2: or $3, $[[T3]], $[[T2]]
- ; M2: $[[BB0]]:
- ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: bnez $[[T1]], $[[EXIT:BB[0-9_]+]]
; M2: addiu $2, $zero, 0
- ; M2: move $2, $[[T0]]
; M2: $[[BB1]]:
; M2: jr $ra
- ; M2: nop
+ ; M2: move $2, $[[T0]]
; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7
; 32R1-R5: not $[[T1:[0-9]+]], $7
@@ -160,20 +164,24 @@ entry:
; M3: sll $[[T0:[0-9]+]], $7, 0
; M3: dsrlv $[[T1:[0-9]+]], $4, $7
; M3: andi $[[T2:[0-9]+]], $[[T0]], 64
- ; M3: bnez $[[T3:[0-9]+]], [[BB0:\.LBB[0-9_]+]]
+ ; M3: beqz $[[T3:[0-9]+]], [[BB0:\.LBB[0-9_]+]]
; M3: move $3, $[[T1]]
+ ; M3: beqz $[[T3]], [[BB1:\.LBB[0-9_]+]]
+ ; M3: daddiu $2, $zero, 0
+ ; M3: [[EXIT:\.LBB[0-9_]+]]:
+ ; M3: jr $ra
+ ; M3: nop
+ ; M3: [[BB0]]:
; M3: dsrlv $[[T4:[0-9]+]], $5, $7
; M3: dsll $[[T5:[0-9]+]], $4, 1
; M3: not $[[T6:[0-9]+]], $[[T0]]
; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
; M3: or $3, $[[T7]], $[[T4]]
- ; M3: [[BB0]]:
- ; M3: bnez $[[T3]], [[BB1:\.LBB[0-9_]+]]
+ ; M3: bnez $[[T3]], [[EXIT]]
; M3: daddiu $2, $zero, 0
- ; M3: move $2, $[[T1]]
; M3: [[BB1]]:
; M3: jr $ra
- ; M3: nop
+ ; M3: move $2, $[[T1]]
; GP64-NOT-R6: dsrlv $[[T0:[0-9]+]], $5, $7
; GP64-NOT-R6: dsll $[[T1:[0-9]+]], $4, 1
diff --git a/test/CodeGen/Mips/llvm-ir/shl.ll b/test/CodeGen/Mips/llvm-ir/shl.ll
index ce3b91373f7f..13545907e21e 100644
--- a/test/CodeGen/Mips/llvm-ir/shl.ll
+++ b/test/CodeGen/Mips/llvm-ir/shl.ll
@@ -97,20 +97,24 @@ entry:
; M2: sllv $[[T0:[0-9]+]], $5, $7
; M2: andi $[[T1:[0-9]+]], $7, 32
- ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: beqz $[[T1]], $[[BB0:BB[0-9_]+]]
; M2: move $2, $[[T0]]
+ ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: addiu $3, $zero, 0
+ ; M2: $[[EXIT:BB[0-9_]+]]:
+ ; M2: jr $ra
+ ; M2: nop
+ ; M2: $[[BB0]]:
; M2: sllv $[[T2:[0-9]+]], $4, $7
; M2: not $[[T3:[0-9]+]], $7
; M2: srl $[[T4:[0-9]+]], $5, 1
; M2: srlv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
; M2: or $2, $[[T2]], $[[T3]]
- ; M2: $[[BB0]]:
- ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: bnez $[[T1]], $[[EXIT]]
; M2: addiu $3, $zero, 0
- ; M2: move $3, $[[T0]]
; M2: $[[BB1]]:
; M2: jr $ra
- ; M2: nop
+ ; M2: move $3, $[[T0]]
; 32R1-R5: sllv $[[T0:[0-9]+]], $4, $7
; 32R1-R5: not $[[T1:[0-9]+]], $7
@@ -176,20 +180,24 @@ entry:
; M3: sll $[[T0:[0-9]+]], $7, 0
; M3: dsllv $[[T1:[0-9]+]], $5, $7
; M3: andi $[[T2:[0-9]+]], $[[T0]], 64
- ; M3: bnez $[[T3:[0-9]+]], [[BB0:\.LBB[0-9_]+]]
+ ; M3: beqz $[[T3:[0-9]+]], [[BB0:\.LBB[0-9_]+]]
; M3: move $2, $[[T1]]
+ ; M3: beqz $[[T3]], [[BB1:\.LBB[0-9_]+]]
+ ; M3: daddiu $3, $zero, 0
+ ; M3: [[EXIT:\.LBB[0-9_]+]]:
+ ; M3: jr $ra
+ ; M3: nop
+ ; M3: [[BB0]]:
; M3: dsllv $[[T4:[0-9]+]], $4, $7
; M3: dsrl $[[T5:[0-9]+]], $5, 1
; M3: not $[[T6:[0-9]+]], $[[T0]]
; M3: dsrlv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
; M3: or $2, $[[T4]], $[[T7]]
- ; M3: [[BB0]]:
- ; M3: bnez $[[T3]], [[BB1:\.LBB[0-9_]+]]
+ ; M3: bnez $[[T3]], [[EXIT]]
; M3: daddiu $3, $zero, 0
- ; M3: move $3, $[[T1]]
; M3: [[BB1]]:
; M3: jr $ra
- ; M3: nop
+ ; M3: move $3, $[[T1]]
; GP64-NOT-R6: dsllv $[[T0:[0-9]+]], $4, $7
; GP64-NOT-R6: dsrl $[[T1:[0-9]+]], $5, 1
diff --git a/test/CodeGen/Mips/load-store-left-right.ll b/test/CodeGen/Mips/load-store-left-right.ll
index 3bd924a81200..b998772d367c 100644
--- a/test/CodeGen/Mips/load-store-left-right.ll
+++ b/test/CodeGen/Mips/load-store-left-right.ll
@@ -8,8 +8,8 @@
; RUN: llc -march=mips64 -mcpu=mips4 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64-EB %s
; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64-EL %s
; RUN: llc -march=mips64 -mcpu=mips64 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64-EB %s
-; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64-EL %s
-; RUN: llc -march=mips64 -mcpu=mips64r2 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64R2-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64r2 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64,MIPS64R2-EB %s
; RUN: llc -march=mips64el -mcpu=mips64r6 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64R6,MIPS64R6-EL %s
; RUN: llc -march=mips64 -mcpu=mips64r6 -target-abi=n64 -relocation-model=pic < %s | FileCheck -check-prefixes=ALL,MIPS64R6,MIPS64R6-EB %s
@@ -37,9 +37,15 @@ entry:
; MIPS64-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
; MIPS64-EL: lwr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64R2-EL: lwr $[[R0]], 0($[[R1]])
+
; MIPS64-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: lwr $[[R0]], 3($[[R1]])
+; MIPS64R2-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: lwr $[[R0]], 3($[[R1]])
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
; MIPS64R6: lw $2, 0($[[PTR]])
@@ -63,9 +69,15 @@ entry:
; MIPS64-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
; MIPS64-EL: swr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64R2-EL: swr $[[R0]], 0($[[R1]])
+
; MIPS64-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: swr $[[R0]], 3($[[R1]])
+; MIPS64R2-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: swr $[[R0]], 3($[[R1]])
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
; MIPS64R6: sw $4, 0($[[PTR]])
@@ -94,9 +106,15 @@ entry:
; MIPS64-EL: ldl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
; MIPS64-EL: ldr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL: ldl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
+; MIPS64R2-EL: ldr $[[R0]], 0($[[R1]])
+
; MIPS64-EB: ldl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: ldr $[[R0]], 7($[[R1]])
+; MIPS64R2-EB: ldl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: ldr $[[R0]], 7($[[R1]])
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)(
; MIPS64R6: ld $2, 0($[[PTR]])
@@ -123,9 +141,15 @@ entry:
; MIPS64-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
; MIPS64-EL: lwr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64R2-EL: lwr $[[R0]], 0($[[R1]])
+
; MIPS64-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: lwr $[[R0]], 3($[[R1]])
+; MIPS64R2-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: lwr $[[R0]], 3($[[R1]])
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
; MIPS64R6: lw $2, 0($[[PTR]])
@@ -159,9 +183,17 @@ entry:
; MIPS64-EL-DAG: daddiu $[[R4:[0-9]+]], $[[R3]], -1
; MIPS64-EL-DAG: and ${{[0-9]+}}, $[[R0]], $[[R4]]
+; MIPS64R2-EL-DAG: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64R2-EL-DAG: lwr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL-DAG: dext $[[R0]], $[[R0]], 0, 32
+
; MIPS64-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: lwr $[[R0]], 3($[[R1]])
+; MIPS64R2-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: lwr $[[R0]], 3($[[R1]])
+; MIPS64R2-EB: dext $[[R0]], $[[R0]], 0, 32
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sui)(
; MIPS64R6: lwu $2, 0($[[PTR]])
@@ -191,9 +223,15 @@ entry:
; MIPS64-EL: sdl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
; MIPS64-EL: sdr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL: sdl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
+; MIPS64R2-EL: sdr $[[R0]], 0($[[R1]])
+
; MIPS64-EB: sdl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: sdr $[[R0]], 7($[[R1]])
+; MIPS64R2-EB: sdl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: sdr $[[R0]], 7($[[R1]])
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)(
; MIPS64R6: sd $4, 0($[[PTR]])
@@ -217,9 +255,15 @@ entry:
; MIPS64-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
; MIPS64-EL: swr $[[R0]], 0($[[R1]])
+; MIPS64R2-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64R2-EL: swr $[[R0]], 0($[[R1]])
+
; MIPS64-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
; MIPS64-EB: swr $[[R0]], 3($[[R1]])
+; MIPS64R2-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64R2-EB: swr $[[R0]], 3($[[R1]])
+
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
; MIPS64R6: sw $4, 0($[[PTR]])
@@ -247,15 +291,23 @@ entry:
; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s0)(
; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s0)(
; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
+; MIPS64R2-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
+; MIPS64R2-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
-; FIXME: We should be able to do better than this on MIPS32r6/MIPS64r6 since
-; we have unaligned halfword load/store available
-; ALL-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
-; ALL-DAG: sb $[[R1]], 2($[[PTR]])
-; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
-; ALL-DAG: sb $[[R1]], 3($[[PTR]])
+; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32-DAG: sb $[[R1]], 2($[[PTR]])
+; MIPS32-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]])
+; MIPS32-DAG: sb $[[R2]], 3($[[PTR]])
+
+; MIPS32R6: lhu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32R6: sh $[[R1]], 2($[[PTR]])
+
+; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-DAG: sb $[[R1]], 2($[[PTR]])
+; MIPS64-DAG: lbu $[[R2:[0-9]+]], 1($[[PTR]])
+; MIPS64-DAG: sb $[[R2]], 3($[[PTR]])
%0 = load %struct.S0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 0), align 1
store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0, %struct.S0* @struct_s0, i32 1), align 1
@@ -268,37 +320,65 @@ entry:
; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s1)(
; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s1)(
-; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS32-DAG: sb $[[R1]], 4($[[PTR]])
-; MIPS32-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
-; MIPS32-DAG: sb $[[R1]], 5($[[PTR]])
-; MIPS32-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]])
-; MIPS32-DAG: sb $[[R1]], 6($[[PTR]])
-; MIPS32-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]])
-; MIPS32-DAG: sb $[[R1]], 7($[[PTR]])
+; MIPS32-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS32-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+; MIPS32-EL-DAG: swl $[[R1]], 7($[[PTR]])
+; MIPS32-EL-DAG: swr $[[R1]], 4($[[PTR]])
+; MIPS32-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS32-EB-DAG: swl $[[R1]], 4($[[PTR]])
+; MIPS32-EB-DAG: swr $[[R1]], 7($[[PTR]])
+
+; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS32-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]])
; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s1)(
-; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS32R6-DAG: sh $[[R1]], 4($[[PTR]])
-; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]])
-; MIPS32R6-DAG: sh $[[R1]], 6($[[PTR]])
+; MIPS32R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32R6-DAG: sw $[[R1]], 4($[[PTR]])
; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
+; MIPS64R2-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
-; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS64-DAG: sb $[[R1]], 4($[[PTR]])
-; MIPS64-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
-; MIPS64-DAG: sb $[[R1]], 5($[[PTR]])
-; MIPS64-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]])
-; MIPS64-DAG: sb $[[R1]], 6($[[PTR]])
-; MIPS64-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]])
-; MIPS64-DAG: sb $[[R1]], 7($[[PTR]])
+; MIPS64R2-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
+
+; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+; MIPS64-EL-DAG: swl $[[R1]], 7($[[PTR]])
+; MIPS64-EL-DAG: swr $[[R1]], 4($[[PTR]])
+
+; MIPS64R2-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64R2-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+; MIPS64R2-EL-DAG: swl $[[R1]], 7($[[PTR]])
+; MIPS64R2-EL-DAG: swr $[[R1]], 4($[[PTR]])
+
+; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS64-EB-DAG: swl $[[R1]], 4($[[PTR]])
+; MIPS64-EB-DAG: swr $[[R1]], 7($[[PTR]])
+
+; MIPS64R2-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64R2-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS64R2-EB-DAG: swl $[[R1]], 4($[[PTR]])
+; MIPS64R2-EB-DAG: swr $[[R1]], 7($[[PTR]])
+
+; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 4($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 5($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 6($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64-NOLEFTRIGHT-DAG: sb $[[R1]], 7($[[PTR]])
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
-; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS64R6-DAG: sh $[[R1]], 4($[[PTR]])
-; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]])
-; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]])
+; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64R6-DAG: sw $[[R1]], 4($[[PTR]])
%0 = load %struct.S1, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 0), align 1
store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1, %struct.S1* @struct_s1, i32 1), align 1
@@ -336,30 +416,34 @@ entry:
; MIPS32R6-DAG: sw $[[R1]], 12($[[PTR]])
; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
-; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
-; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]])
-; MIPS64-EL-DAG: swl $[[R1]], 11($[[PTR]])
-; MIPS64-EL-DAG: swr $[[R1]], 8($[[PTR]])
-; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 7($[[PTR]])
-; MIPS64-EL-DAG: lwr $[[R1]], 4($[[PTR]])
-; MIPS64-EL-DAG: swl $[[R1]], 15($[[PTR]])
-; MIPS64-EL-DAG: swr $[[R1]], 12($[[PTR]])
+
+; MIPS64-EL-DAG: ldl $[[R1:[0-9]+]], 7($[[PTR]])
+; MIPS64-EL-DAG: ldr $[[R1]], 0($[[PTR]])
+; MIPS64-EL-DAG: sdl $[[R1]], 15($[[PTR]])
+; MIPS64-EL-DAG: sdr $[[R1]], 8($[[PTR]])
+
+; MIPS64R2-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
+
+; MIPS64R2-EL-DAG: ldl $[[R1:[0-9]+]], 7($[[PTR]])
+; MIPS64R2-EL-DAG: ldr $[[R1]], 0($[[PTR]])
+; MIPS64R2-EL-DAG: sdl $[[R1]], 15($[[PTR]])
+; MIPS64R2-EL-DAG: sdr $[[R1]], 8($[[PTR]])
; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
-; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]])
-; MIPS64-EB-DAG: swl $[[R1]], 8($[[PTR]])
-; MIPS64-EB-DAG: swr $[[R1]], 11($[[PTR]])
-; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 4($[[PTR]])
-; MIPS64-EB-DAG: lwr $[[R1]], 7($[[PTR]])
-; MIPS64-EB-DAG: swl $[[R1]], 12($[[PTR]])
-; MIPS64-EB-DAG: swr $[[R1]], 15($[[PTR]])
+; MIPS64-EB-DAG: ldl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-EB-DAG: ldr $[[R1]], 7($[[PTR]])
+; MIPS64-EB-DAG: sdl $[[R1]], 8($[[PTR]])
+; MIPS64-EB-DAG: sdr $[[R1]], 15($[[PTR]])
+
+; MIPS64R2-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
+; MIPS64R2-EB-DAG: ldl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64R2-EB-DAG: ldr $[[R1]], 7($[[PTR]])
+; MIPS64R2-EB-DAG: sdl $[[R1]], 8($[[PTR]])
+; MIPS64R2-EB-DAG: sdr $[[R1]], 15($[[PTR]])
; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
-; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS64R6-DAG: sw $[[R1]], 8($[[PTR]])
-; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]])
-; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]])
+; MIPS64R6-DAG: ld $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64R6-DAG: sd $[[R1]], 8($[[PTR]])
%0 = load %struct.S2, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 0), align 1
store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2, %struct.S2* @struct_s2, i32 1), align 1
@@ -416,20 +500,39 @@ entry:
; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]])
-; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
-; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
-; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]])
-; MIPS64-EB-DAG: dsll $[[R1]], $[[R1]], 32
+; MIPS64R2-EL: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
+; MIPS64R2-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64R2-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+
+; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
; MIPS64-EB-DAG: lbu $[[R2:[0-9]+]], 5($[[PTR]])
; MIPS64-EB-DAG: lbu $[[R3:[0-9]+]], 4($[[PTR]])
; MIPS64-EB-DAG: dsll $[[T0:[0-9]+]], $[[R3]], 8
; MIPS64-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]]
-; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16
-; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R1]], $[[T1]]
; MIPS64-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]])
+; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16
+; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS64-EB-DAG: dsll $[[R5:[0-9]+]], $[[R1]], 32
+; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R5]], $[[T1]]
; MIPS64-EB-DAG: dsll $[[T4:[0-9]+]], $[[R4]], 8
; MIPS64-EB-DAG: or $4, $[[T3]], $[[T4]]
+; MIPS64R2-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
+; MIPS64R2-EB-DAG: lbu $[[R1:[0-9]+]], 5($[[PTR]])
+; MIPS64R2-EB-DAG: lbu $[[R2:[0-9]+]], 4($[[PTR]])
+; MIPS64R2-EB-DAG: dsll $[[T0:[0-9]+]], $[[R2]], 8
+; MIPS64R2-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R1]]
+; MIPS64R2-EB-DAG: dsll $[[T1]], $[[T1]], 16
+; MIPS64R2-EB-DAG: lwl $[[R3:[0-9]+]], 0($[[PTR]])
+; MIPS64R2-EB-DAG: lwr $[[R3]], 3($[[PTR]])
+; MIPS64R2-EB-DAG: dext $[[R3]], $[[R3]], 0, 32
+; MIPS64R2-EB-DAG: dsll $[[R3]], $[[R3]], 32
+; MIPS64R2-EB-DAG: or $[[T2:[0-9]+]], $[[R3]], $[[T1]]
+; MIPS64R2-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]])
+; MIPS64R2-EB-DAG: dsll $[[T3:[0-9]+]], $[[R4]], 8
+; MIPS64R2-EB-DAG: or $4, $[[T2]], $[[T3]]
+
; MIPS64R6: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
tail call void @extern_func([7 x i8]* byval @arr) nounwind
diff --git a/test/CodeGen/Mips/mature-mc-support.ll b/test/CodeGen/Mips/mature-mc-support.ll
index 6e5998d8a7cb..9c93e96a376b 100644
--- a/test/CodeGen/Mips/mature-mc-support.ll
+++ b/test/CodeGen/Mips/mature-mc-support.ll
@@ -29,4 +29,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/Mips/micromips-compact-branches.ll b/test/CodeGen/Mips/micromips-compact-branches.ll
index c689944d386b..332cd8cd105c 100644
--- a/test/CodeGen/Mips/micromips-compact-branches.ll
+++ b/test/CodeGen/Mips/micromips-compact-branches.ll
@@ -6,7 +6,7 @@ entry:
%x = alloca i32, align 4
%0 = load i32, i32* %x, align 4
%cmp = icmp eq i32 %0, 0
- br i1 %cmp, label %if.then, label %if.end
+ br i1 %cmp, label %if.then, label %if.end, !prof !1
if.then:
store i32 10, i32* %x, align 4
@@ -17,3 +17,4 @@ if.end:
}
; CHECK: bnezc
+!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/Mips/micromips-li.ll b/test/CodeGen/Mips/micromips-li.ll
index ac315f938251..997f4e9196af 100644
--- a/test/CodeGen/Mips/micromips-li.ll
+++ b/test/CodeGen/Mips/micromips-li.ll
@@ -13,6 +13,6 @@ entry:
ret i32 0
}
-; CHECK: li16 ${{[2-7]|16|17}}, 1
; CHECK: addiu ${{[0-9]+}}, $zero, 2148
+; CHECK: li16 ${{[2-7]|16|17}}, 1
; CHECK: ori ${{[0-9]+}}, $zero, 33332
diff --git a/test/CodeGen/Mips/mips64-f128-call.ll b/test/CodeGen/Mips/mips64-f128-call.ll
index 9a093e6f9825..19fa8fc75245 100644
--- a/test/CodeGen/Mips/mips64-f128-call.ll
+++ b/test/CodeGen/Mips/mips64-f128-call.ll
@@ -4,8 +4,8 @@
@gld1 = external global fp128
; CHECK: foo0
-; CHECK: sdc1 $f13, 8(${{[0-9]+}})
-; CHECK: sdc1 $f12, 0(${{[0-9]+}})
+; CHECK-DAG: sdc1 $f12, %lo(gld0)(${{[0-9]+}})
+; CHECK-DAG: sdc1 $f13, 8(${{[0-9]+}})
define void @foo0(fp128 %a0) {
entry:
@@ -14,8 +14,8 @@ entry:
}
; CHECK: foo1
-; CHECK: ldc1 $f13, 8(${{[0-9]+}})
-; CHECK: ldc1 $f12, 0(${{[0-9]+}})
+; CHECK-DAG: ldc1 $f12, %lo(gld0)(${{[0-9]+}})
+; CHECK-DAG: ldc1 $f13, 8(${{[0-9]+}})
define void @foo1() {
entry:
@@ -26,13 +26,18 @@ entry:
declare void @foo2(fp128)
-; CHECK: foo3
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld0)
-; CHECK: sdc1 $f2, 8($[[R0]])
-; CHECK: sdc1 $f0, 0($[[R0]])
-; CHECK: ld $[[R1:[0-9]+]], %got_disp(gld1)
-; CHECK: ldc1 $f0, 0($[[R1]])
-; CHECK: ldc1 $f2, 8($[[R1]])
+
+; CHECK: foo3:
+
+; CHECK: daddiu $[[R2:[0-9]+]], $[[R1:[0-9]+]], %lo(gld0)
+; CHECK: sdc1 $f0, %lo(gld0)($[[R1]])
+; CHECK: sdc1 $f2, 8($[[R2]])
+; CHECK: daddiu $[[R3:[0-9]+]], ${{[0-9]+}}, %hi(gld1)
+; CHECK: dsll $[[R4:[0-9]+]], $[[R3]], 16
+; CHECK: ldc1 $f0, %lo(gld1)($[[R4]])
+; CHECK: daddiu $[[R5:[0-9]]], $[[R4]], %lo(gld1)
+; CHECK: ldc1 $f2, 8($[[R5]])
+
define fp128 @foo3() {
entry:
diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll
index 2b1c154f095b..a6dafb1abfd6 100644
--- a/test/CodeGen/Mips/mips64-f128.ll
+++ b/test/CodeGen/Mips/mips64-f128.ll
@@ -1,11 +1,15 @@
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips4 -mattr=+soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefixes=ALL,C_CC_FMT,PRER6
+; RUN: -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
+; RUN: %s -check-prefixes=ALL,C_CC_FMT,PRER6,NOT-R2R6
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64 -mattr=+soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefixes=ALL,C_CC_FMT,PRER6
-; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -mattr=+soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefixes=ALL,C_CC_FMT,PRER6
-; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -mattr=+soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefixes=ALL,CMP_CC_FMT,R6
+; RUN: -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
+; RUN: %s -check-prefixes=ALL,C_CC_FMT,PRER6,NOT-R2R6
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -mattr=+soft-float \
+; RUN: -O1 -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
+; RUN: %s -check-prefixes=ALL,C_CC_FMT,PRER6,R2R6
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -mattr=+soft-float \
+; RUN: -O1 -disable-mips-delay-filler -relocation-model=pic < %s | FileCheck \
+; RUN: %s -check-prefixes=ALL,CMP_CC_FMT,R6,R2R6
@gld0 = external global fp128
@gld1 = external global fp128
@@ -238,12 +242,16 @@ entry:
}
; ALL-LABEL: libcall1_fabsl:
-; ALL-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
-; ALL-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
-; ALL-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
-; ALL-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
-; ALL-DAG: and $4, $[[R0]], $[[R3]]
-; ALL-DAG: ld $2, 0($[[R4]])
+; NOT-R2R6-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
+; NOT-R2R6-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
+; NOT-R2R6-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
+; NOT-R2R6-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
+; NOT-R2R6-DAG: and $4, $[[R0]], $[[R3]]
+; NOT-R2R6-DAG: ld $2, 0($[[R4]])
+
+; R2R6-DAG: ld $[[R0:[0-9]+]], 0($[[R3:[0-9]+]])
+; R2R6-DAG: ld $[[R1:[0-9]+]], 8($[[R3]])
+; R2R6-DAG: dextm $[[R2:[0-9]+]], $[[R1]], 0, 63
define fp128 @libcall1_fabsl() {
entry:
@@ -410,17 +418,19 @@ entry:
declare fp128 @llvm.powi.f128(fp128, i32) #3
; ALL-LABEL: libcall2_copysignl:
-; ALL-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
-; ALL-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
-; ALL-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; ALL-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
-; ALL-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
-; ALL-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
-; ALL-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
-; ALL-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
-; ALL-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
-; ALL-DAG: or $4, $[[R8]], $[[R4]]
-; ALL-DAG: ld $2, 0($[[R5]])
+; ALL-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
+; ALL-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
+; ALL-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
+; ALL-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
+; ALL-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
+; ALL-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
+; NOT-R2R6-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
+; NOT-R2R6-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
+; NOT-R2R6-DAG: or $4, $[[R8]], $[[R4]]
+; R2R6-DAG: dextm $[[R7:[0-9]+]], $[[R6]], 0, 63
+; R2R6-DAG: or $4, $[[R7]], $[[R4]]
+; ALL-DAG: ld $2, 0($[[R5]])
define fp128 @libcall2_copysignl() {
entry:
@@ -573,10 +583,10 @@ entry:
; ALL-LABEL: store_LD_LD:
; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; ALL: ld $[[R1:[0-9]+]], 0($[[R0]])
; ALL: ld $[[R2:[0-9]+]], 8($[[R0]])
; ALL: ld $[[R3:[0-9]+]], %got_disp(gld0)
; ALL: sd $[[R2]], 8($[[R3]])
+; ALL: ld $[[R1:[0-9]+]], 0($[[R0]])
; ALL: sd $[[R1]], 0($[[R3]])
define void @store_LD_LD() {
diff --git a/test/CodeGen/Mips/mips64-libcall.ll b/test/CodeGen/Mips/mips64-libcall.ll
index 8512e9fcb72e..7c0e5b2bbfdc 100644
--- a/test/CodeGen/Mips/mips64-libcall.ll
+++ b/test/CodeGen/Mips/mips64-libcall.ll
@@ -20,7 +20,7 @@ declare double @floor(double) nounwind readnone
; Check call16.
;
; SOFT-LABEL: f64add:
-; SOFT: ld $25, %call16(__adddf3)
+; SOFT: jal __adddf3
define double @f64add(double %a, double %b) {
entry:
diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll
index 8f124c89db4b..c08c1b73d740 100644
--- a/test/CodeGen/Mips/mips64instrs.ll
+++ b/test/CodeGen/Mips/mips64instrs.ll
@@ -111,10 +111,8 @@ entry:
define i64 @f14(i64 %a, i64 %b) nounwind readnone {
entry:
; ALL-LABEL: f14:
-; ALL-DAG: ld $[[P0:[0-9]+]], %got_disp(gll0)(
-; ALL-DAG: ld $[[P1:[0-9]+]], %got_disp(gll1)(
-; ALL-DAG: ld $[[T0:[0-9]+]], 0($[[P0]])
-; ALL-DAG: ld $[[T1:[0-9]+]], 0($[[P1]])
+; ALL-DAG: ld $[[T0:[0-9]+]], %lo(gll0)(${{[0-9]+}})
+; ALL-DAG: ld $[[T1:[0-9]+]], %lo(gll1)(${{[0-9]+}})
; ACCMULDIV: ddiv $zero, $[[T0]], $[[T1]]
; ACCMULDIV: teq $[[T1]], $zero, 7
@@ -132,10 +130,8 @@ entry:
define i64 @f15() nounwind readnone {
entry:
; ALL-LABEL: f15:
-; ALL-DAG: ld $[[P0:[0-9]+]], %got_disp(gll0)(
-; ALL-DAG: ld $[[P1:[0-9]+]], %got_disp(gll1)(
-; ALL-DAG: ld $[[T0:[0-9]+]], 0($[[P0]])
-; ALL-DAG: ld $[[T1:[0-9]+]], 0($[[P1]])
+; ALL-DAG: ld $[[T0:[0-9]+]], %lo(gll0)(${{[0-9]+}})
+; ALL-DAG: ld $[[T1:[0-9]+]], %lo(gll1)(${{[0-9]+}})
; ACCMULDIV: ddivu $zero, $[[T0]], $[[T1]]
; ACCMULDIV: teq $[[T1]], $zero, 7
diff --git a/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
index 9663138d4c81..0260afaa1860 100644
--- a/test/CodeGen/Mips/mno-ldc1-sdc1.ll
+++ b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
@@ -130,12 +130,12 @@
; MM-MNO-PIC: addiu $[[R1:[0-9]+]], $[[R0]], %lo(_gp_disp)
; MM-MNO-PIC: addu $[[R2:[0-9]+]], $[[R1]], $25
; MM-MNO-PIC: lw $[[R3:[0-9]+]], %got(g0)($[[R2]])
-; MM-MNO-PIC: lw16 $[[R4:[0-9]+]], 0($[[R3]])
-; MM-MNO-PIC: lw16 $[[R5:[0-9]+]], 4($[[R3]])
-; MM-MNO-LE-PIC: mtc1 $[[R4]], $f0
-; MM-MNO-LE-PIC: mthc1 $[[R5]], $f0
-; MM-MNO-BE-PIC: mtc1 $[[R5]], $f0
-; MM-MNO-BE-PIC: mthc1 $[[R4]], $f0
+; MM-MNO-PIC-DAG: lw16 $[[R4:[0-9]+]], 0($[[R3]])
+; MM-MNO-PIC-DAG: lw16 $[[R5:[0-9]+]], 4($[[R3]])
+; MM-MNO-LE-PIC-DAG: mtc1 $[[R4]], $f0
+; MM-MNO-LE-PIC-DAG: mthc1 $[[R5]], $f0
+; MM-MNO-BE-PIC-DAG: mtc1 $[[R5]], $f0
+; MM-MNO-BE-PIC-DAG: mthc1 $[[R4]], $f0
; MM-STATIC-PIC: lui $[[R0:[0-9]+]], %hi(g0)
; MM-STATIC-PIC: ldc1 $f0, %lo(g0)($[[R0]])
@@ -214,13 +214,13 @@ entry:
; MM-MNO-PIC: lui $[[R0:[0-9]+]], %hi(_gp_disp)
; MM-MNO-PIC: addiu $[[R1:[0-9]+]], $[[R0]], %lo(_gp_disp)
; MM-MNO-PIC: addu $[[R2:[0-9]+]], $[[R1]], $25
-; MM-MNO-LE-PIC: mfc1 $[[R3:[0-9]+]], $f12
-; MM-MNO-BE-PIC: mfhc1 $[[R3:[0-9]+]], $f12
-; MM-MNO-PIC: lw $[[R4:[0-9]+]], %got(g0)($[[R2]])
-; MM-MNO-PIC: sw16 $[[R3]], 0($[[R4]])
-; MM-MNO-LE-PIC: mfhc1 $[[R5:[0-9]+]], $f12
-; MM-MNO-BE-PIC: mfc1 $[[R5:[0-9]+]], $f12
-; MM-MNO-PIC: sw16 $[[R5]], 4($[[R4]])
+; MM-MNO-LE-PIC-DAG: mfc1 $[[R3:[0-9]+]], $f12
+; MM-MNO-BE-PIC-DAG: mfhc1 $[[R3:[0-9]+]], $f12
+; MM-MNO-PIC-DAG: lw $[[R4:[0-9]+]], %got(g0)($[[R2]])
+; MM-MNO-PIC-DAG: sw16 $[[R3]], 0($[[R4]])
+; MM-MNO-LE-PIC-DAG: mfhc1 $[[R5:[0-9]+]], $f12
+; MM-MNO-BE-PIC-DAG: mfc1 $[[R5:[0-9]+]], $f12
+; MM-MNO-PIC-DAG: sw16 $[[R5]], 4($[[R4]])
; MM-STATIC-PIC: lui $[[R0:[0-9]+]], %hi(g0)
; MM-STATIC-PIC: sdc1 $f12, %lo(g0)($[[R0]])
@@ -267,8 +267,8 @@ entry:
; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $5, 3
; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $4, $[[R0]]
-; MM-MNO-PIC: lw16 $[[R2:[0-9]+]], 0($[[R1]])
-; MM-MNO-PIC: lw16 $[[R3:[0-9]+]], 4($[[R1]])
+; MM-MNO-PIC-DAG: lw16 $[[R2:[0-9]+]], 0($[[R1]])
+; MM-MNO-PIC-DAG: lw16 $[[R3:[0-9]+]], 4($[[R1]])
; MM-MNO-LE-PIC: mtc1 $[[R2]], $f0
; MM-MNO-LE-PIC: mthc1 $[[R3]], $f0
; MM-MNO-BE-PIC: mtc1 $[[R3]], $f0
@@ -313,14 +313,14 @@ entry:
; MM: addu16 $[[R1:[0-9]+]], $6, $[[R0]]
; MM: sdc1 $f12, 0($[[R1]])
-; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $7, 3
-; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]]
-; MM-MNO-LE-PIC: mfc1 $[[R2:[0-9]+]], $f12
-; MM-MNO-BE-PIC: mfhc1 $[[R2:[0-9]+]], $f12
-; MM-MNO-PIC: sw16 $[[R2]], 0($[[R1]])
-; MM-MNO-LE-PIC: mfhc1 $[[R3:[0-9]+]], $f12
-; MM-MNO-BE-PIC: mfc1 $[[R3:[0-9]+]], $f12
-; MM-MNO-PIC: sw16 $[[R3]], 4($[[R1]])
+; MM-MNO-PIC: sll16 $[[R0:[0-9]+]], $7, 3
+; MM-MNO-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]]
+; MM-MNO-LE-PIC-DAG: mfc1 $[[R2:[0-9]+]], $f12
+; MM-MNO-BE-PIC-DAG: mfhc1 $[[R2:[0-9]+]], $f12
+; MM-MNO-PIC-DAG: sw16 $[[R2]], 0($[[R1]])
+; MM-MNO-LE-PIC-DAG: mfhc1 $[[R3:[0-9]+]], $f12
+; MM-MNO-BE-PIC-DAG: mfc1 $[[R3:[0-9]+]], $f12
+; MM-MNO-PIC-DAG: sw16 $[[R3]], 4($[[R1]])
; MM-STATIC-PIC: sll16 $[[R0:[0-9]+]], $7, 3
; MM-STATIC-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]]
diff --git a/test/CodeGen/Mips/msa/3r_4r_widen.ll b/test/CodeGen/Mips/msa/3r_4r_widen.ll
index fe248eeb566b..467cff5a1a3c 100644
--- a/test/CodeGen/Mips/msa/3r_4r_widen.ll
+++ b/test/CodeGen/Mips/msa/3r_4r_widen.ll
@@ -5,18 +5,16 @@
; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
-@llvm_mips_dpadd_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_dpadd_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
@llvm_mips_dpadd_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
@llvm_mips_dpadd_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_dpadd_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_h_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
- %3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
+ %2 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>, <16 x i8> %0, <16 x i8> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_dpadd_s_h_RES
ret void
}
@@ -25,23 +23,21 @@ declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
; CHECK: llvm_mips_dpadd_s_h_test:
; CHECK: ld.b
; CHECK: ld.b
-; CHECK: ld.h
-; CHECK: dpadd_s.h
+; CHECK: ldi.h [[R1:\$w[0-9]+]],
+; CHECK: dpadd_s.h [[R1]],
; CHECK: st.h
; CHECK: .size llvm_mips_dpadd_s_h_test
;
-@llvm_mips_dpadd_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_dpadd_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
@llvm_mips_dpadd_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
@llvm_mips_dpadd_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_dpadd_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_w_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
- %3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
+ %2 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> <i32 4, i32 4, i32 4, i32 4>, <8 x i16> %0, <8 x i16> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_dpadd_s_w_RES
ret void
}
@@ -50,48 +46,44 @@ declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
; CHECK: llvm_mips_dpadd_s_w_test:
; CHECK: ld.h
; CHECK: ld.h
-; CHECK: ld.w
-; CHECK: dpadd_s.w
+; CHECK: ldi.w [[R1:\$w[0-9]+]],
+; CHECK: dpadd_s.w [[R1]],
; CHECK: st.w
; CHECK: .size llvm_mips_dpadd_s_w_test
;
-@llvm_mips_dpadd_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_dpadd_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
@llvm_mips_dpadd_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
@llvm_mips_dpadd_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_dpadd_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_s_d_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
- %3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
+ %2 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> <i64 4, i64 4>, <4 x i32> %0, <4 x i32> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_dpadd_s_d_RES
ret void
}
declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
; CHECK: llvm_mips_dpadd_s_d_test:
+; CHECK: ldi.d [[R1:\$w[0-9]+]],
; CHECK: ld.w
; CHECK: ld.w
-; CHECK: ld.d
-; CHECK: dpadd_s.d
+; CHECK: dpadd_s.d [[R1]],
; CHECK: st.d
; CHECK: .size llvm_mips_dpadd_s_d_test
;
-@llvm_mips_dpadd_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_dpadd_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
@llvm_mips_dpadd_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
@llvm_mips_dpadd_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_dpadd_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_h_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
- %3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
+ %2 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>, <16 x i8> %0, <16 x i8> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_dpadd_u_h_RES
ret void
}
@@ -100,23 +92,21 @@ declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
; CHECK: llvm_mips_dpadd_u_h_test:
; CHECK: ld.b
; CHECK: ld.b
-; CHECK: ld.h
-; CHECK: dpadd_u.h
+; CHECK: ldi.h [[R1:\$w[0-9]+]],
+; CHECK: dpadd_u.h [[R1]],
; CHECK: st.h
; CHECK: .size llvm_mips_dpadd_u_h_test
;
-@llvm_mips_dpadd_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_dpadd_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
@llvm_mips_dpadd_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
@llvm_mips_dpadd_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_dpadd_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_w_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
- %3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES
+ %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
+ %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
+ %2 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> <i32 4, i32 4, i32 4, i32 4>, <8 x i16> %0, <8 x i16> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_dpadd_u_w_RES
ret void
}
@@ -125,33 +115,31 @@ declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
; CHECK: llvm_mips_dpadd_u_w_test:
; CHECK: ld.h
; CHECK: ld.h
-; CHECK: ld.w
-; CHECK: dpadd_u.w
+; CHECK: ldi.w [[R1:\$w[0-9]+]],
+; CHECK: dpadd_u.w [[R1]],
; CHECK: st.w
; CHECK: .size llvm_mips_dpadd_u_w_test
;
-@llvm_mips_dpadd_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_dpadd_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
@llvm_mips_dpadd_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
@llvm_mips_dpadd_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_dpadd_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_u_d_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
- %3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES
+ %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
+ %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
+ %2 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> <i64 4, i64 4>, <4 x i32> %0, <4 x i32> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_dpadd_u_d_RES
ret void
}
declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
; CHECK: llvm_mips_dpadd_u_d_test:
+; CHECK: ldi.d [[R1:\$w[0-9]+]],
; CHECK: ld.w
; CHECK: ld.w
-; CHECK: ld.d
-; CHECK: dpadd_u.d
+; CHECK: dpadd_u.d [[R1]],
; CHECK: st.d
; CHECK: .size llvm_mips_dpadd_u_d_test
;
diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll
index d7a05800a273..c14221937f4d 100644
--- a/test/CodeGen/Mips/msa/basic_operations.ll
+++ b/test/CodeGen/Mips/msa/basic_operations.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mips -mattr=+msa,+fp64 -relocation-model=pic \
; RUN: -verify-machineinstrs < %s | \
-; RUN: FileCheck -check-prefixes=ALL,O32,MIPS32,ALL-BE %s
+; RUN: FileCheck -check-prefixes=ALL,O32,MIPS32,ALL-BE,O32-BE %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64 -relocation-model=pic \
; RUN: -verify-machineinstrs < %s | \
-; RUN: FileCheck -check-prefixes=ALL,O32,MIPS32,ALL-LE %s
+; RUN: FileCheck -check-prefixes=ALL,O32,MIPS32,ALL-LE,O32-LE %s
; RUN: llc -march=mips64 -target-abi n32 -mattr=+msa,+fp64 \
; RUN: -relocation-model=pic -verify-machineinstrs < %s | \
; RUN: FileCheck -check-prefixes=ALL,N32,MIPS64,ALL-BE %s
@@ -58,10 +58,19 @@ define void @const_v16i8() nounwind {
; ALL-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
- ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
- ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
- ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
- ; ALL: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]])
+ ; ALL-BE-DAG: lui [[R3:\$[0-9]+]], 1286
+ ; ALL-LE-DAG: lui [[R3:\$[0-9]+]], 2055
+ ; ALL-BE-DAG: ori [[R4:\$[0-9]+]], [[R3]], 1800
+ ; ALL-LE-DAG: ori [[R4:\$[0-9]+]], [[R3]], 1541
+ ; O32-BE: fill.w [[R1:\$w[0-9]+]], [[R4]]
+
+ ; O32: insert.w [[R1]][1], [[R2]]
+ ; O32: splati.d $w{{.*}}, [[R1]][0]
+
+ ; MIPS64-BE: dinsu [[R4]], [[R2]], 32, 32
+ ; MIPS64-LE: dinsu [[R2]], [[R4]], 32, 32
+ ; MIPS64-BE: fill.d $w{{.*}}, [[R4]]
+ ; MIPS64-LE: fill.d $w{{.*}}, [[R2]]
ret void
}
@@ -92,10 +101,19 @@ define void @const_v8i16() nounwind {
; ALL-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16
- ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
- ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
- ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
- ; ALL: ld.h [[R1:\$w[0-9]+]], 0([[G_PTR]])
+ ; ALL-BE-DAG: lui [[R3:\$[0-9]+]], 3
+ ; ALL-LE-DAG: lui [[R3:\$[0-9]+]], 4
+ ; ALL-BE-DAG: ori [[R4:\$[0-9]+]], [[R3]], 4
+ ; ALL-LE-DAG: ori [[R4:\$[0-9]+]], [[R3]], 3
+
+ ; O32-BE: fill.w [[R1:\$w[0-9]+]], [[R4]]
+ ; O32: insert.w [[R1]][1], [[R2]]
+ ; O32: splati.d $w{{.*}}, [[R1]][0]
+
+ ; MIPS64-BE: dinsu [[R4]], [[R2]], 32, 32
+ ; MIPS64-LE: dinsu [[R2]], [[R4]], 32, 32
+ ; MIPS64-BE: fill.d $w{{.*}}, [[R4]]
+ ; MIPS64-LE: fill.d $w{{.*}}, [[R2]]
ret void
}
@@ -122,10 +140,23 @@ define void @const_v4i32() nounwind {
; ALL: ldi.h [[R1:\$w[0-9]+]], 1
store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32
- ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
- ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
- ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
- ; ALL: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
+ ; -BE-DAG: ori [[R2:\$[0-9]+]], $zero, 1
+ ; O32-BE-DAG: ori [[R3:\$[0-9]+]], $zero, 1
+ ; O32-BE-DAG: ori [[R4:\$[0-9]+]], $zero, 2
+ ; O32-LE-DAG: ori [[R3:\$[0-9]+]], $zero, 2
+ ; O32-LE-DAG: ori [[R4:\$[0-9]+]], $zero, 1
+ ; O32: fill.w [[W0:\$w[0-9]+]], [[R4]]
+ ; O32: insert.w [[W0]][1], [[R3]]
+ ; O32: splati.d [[W1:\$w[0-9]+]], [[W0]]
+
+ ; MIPS64-DAG: ori [[R5:\$[0-9]+]], $zero, 2
+ ; MIPS64-DAG: ori [[R6:\$[0-9]+]], $zero, 1
+
+ ; MIPS64-BE: dinsu [[R5]], [[R6]], 32, 32
+ ; MIPS64-LE: dinsu [[R6]], [[R5]], 32, 32
+ ; MIPS64-BE: fill.d $w{{.*}}, [[R4]]
+ ; MIPS64-LE: fill.d $w{{.*}}, [[R2]]
+
store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll
index 2a260b2c5733..63fce5283ba0 100644
--- a/test/CodeGen/Mips/msa/bitwise.ll
+++ b/test/CodeGen/Mips/msa/bitwise.ll
@@ -1099,7 +1099,7 @@ define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
i8 63, i8 63, i8 63, i8 63,
i8 63, i8 63, i8 63, i8 63>
%5 = or <16 x i8> %3, %4
- ; CHECK-DAG: binsli.b [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsli.b [[R2]], [[R1]], 1
store <16 x i8> %5, <16 x i8>* %c
; CHECK-DAG: st.b [[R2]], 0($4)
@@ -1119,7 +1119,7 @@ define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
%4 = and <8 x i16> %2, <i16 16383, i16 16383, i16 16383, i16 16383,
i16 16383, i16 16383, i16 16383, i16 16383>
%5 = or <8 x i16> %3, %4
- ; CHECK-DAG: binsli.h [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsli.h [[R2]], [[R1]], 1
store <8 x i16> %5, <8 x i16>* %c
; CHECK-DAG: st.h [[R2]], 0($4)
@@ -1137,7 +1137,7 @@ define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
%3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472>
%4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
%5 = or <4 x i32> %3, %4
- ; CHECK-DAG: binsli.w [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsli.w [[R2]], [[R1]], 1
store <4 x i32> %5, <4 x i32>* %c
; CHECK-DAG: st.w [[R2]], 0($4)
@@ -1159,7 +1159,7 @@ define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
; issue. If the mask doesn't fit within a 10-bit immediate, it gets
; legalized into a constant pool. We should add a test to cover the
; other cases once they correctly select binsli.d.
- ; CHECK-DAG: binsli.d [[R2]], [[R1]], 61
+ ; CHECK-DAG: binsli.d [[R2]], [[R1]], 60
store <2 x i64> %5, <2 x i64>* %c
; CHECK-DAG: st.d [[R2]], 0($4)
@@ -1181,7 +1181,7 @@ define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
i8 252, i8 252, i8 252, i8 252,
i8 252, i8 252, i8 252, i8 252>
%5 = or <16 x i8> %3, %4
- ; CHECK-DAG: binsri.b [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsri.b [[R2]], [[R1]], 1
store <16 x i8> %5, <16 x i8>* %c
; CHECK-DAG: st.b [[R2]], 0($4)
@@ -1201,7 +1201,7 @@ define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
%4 = and <8 x i16> %2, <i16 65532, i16 65532, i16 65532, i16 65532,
i16 65532, i16 65532, i16 65532, i16 65532>
%5 = or <8 x i16> %3, %4
- ; CHECK-DAG: binsri.h [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsri.h [[R2]], [[R1]], 1
store <8 x i16> %5, <8 x i16>* %c
; CHECK-DAG: st.h [[R2]], 0($4)
@@ -1219,7 +1219,7 @@ define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
%3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
%4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292>
%5 = or <4 x i32> %3, %4
- ; CHECK-DAG: binsri.w [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsri.w [[R2]], [[R1]], 1
store <4 x i32> %5, <4 x i32>* %c
; CHECK-DAG: st.w [[R2]], 0($4)
@@ -1237,7 +1237,7 @@ define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
%3 = and <2 x i64> %1, <i64 3, i64 3>
%4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612>
%5 = or <2 x i64> %3, %4
- ; CHECK-DAG: binsri.d [[R2]], [[R1]], 2
+ ; CHECK-DAG: binsri.d [[R2]], [[R1]], 1
store <2 x i64> %5, <2 x i64>* %c
; CHECK-DAG: st.d [[R2]], 0($4)
diff --git a/test/CodeGen/Mips/msa/bmzi_bmnzi.ll b/test/CodeGen/Mips/msa/bmzi_bmnzi.ll
new file mode 100644
index 000000000000..d1cb3c348c73
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bmzi_bmnzi.ll
@@ -0,0 +1,55 @@
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s
+
+@llvm_mips_bmnzi_b_ARG1 = global <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, align 16
+@llvm_mips_bmnzi_b_ARG2 = global <16 x i8> zeroinitializer, align 16
+@llvm_mips_bmnzi_b_RES = global <16 x i8> zeroinitializer, align 16
+
+define void @llvm_mips_bmnzi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 240)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ %3 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 15)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ %4 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 170)
+ store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ ret void
+}
+; CHECK-LABEL: llvm_mips_bmnzi_b_test:
+; CHECK: lw [[R0:\$[0-9]+]], %got(llvm_mips_bmnzi_b_RES)(
+; CHECK: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG1)(
+; CHECK: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG2)(
+; CHECK: ld.b [[R3:\$w[0-9]+]], 0([[R2]])
+; CHECK: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK: move.v [[R5:\$w[0-9]+]], [[R4]]
+; CHECK: binsli.b [[R5]], [[R3]], 3
+; CHECK: binsri.b [[R5]], [[R3]], 3
+; CHECK: bmnzi.b [[R4]], [[R3]], 170
+
+define void @llvm_mips_bmzi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 240)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ %3 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 15)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ %4 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 170)
+ store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ ret void
+}
+; CHECK-LABEL: llvm_mips_bmzi_b_test:
+; CHECK: lw [[R0:\$[0-9]+]], %got(llvm_mips_bmnzi_b_RES)(
+; CHECK: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG2)(
+; CHECK: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG1)(
+; CHECK: ld.b [[R3:\$w[0-9]+]], 0([[R2]])
+; CHECK: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK: move.v [[R5:\$w[0-9]+]], [[R4]]
+; CHECK: binsli.b [[R5]], [[R3]], 3
+; CHECK: binsri.b [[R5]], [[R3]], 3
+; bmnzi.b is the same as bmzi.b with ws and wd_in swapped
+; CHECK: bmnzi.b [[R4]], [[R3]], 170
+
+declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind
+declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind
diff --git a/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/test/CodeGen/Mips/msa/f16-llvm-ir.ll
index 9957d5be26ed..ac69dc913c18 100644
--- a/test/CodeGen/Mips/msa/f16-llvm-ir.ll
+++ b/test/CodeGen/Mips/msa/f16-llvm-ir.ll
@@ -234,15 +234,15 @@ entry:
; MIPS32: insert.w $w[[W0]][1], $[[R1]]
; MIPS32: insert.w $w[[W0]][3], $[[R1]]
-; MIPS64-N64: ld $[[R3:[0-9]+]], %got_disp(h)
-; MIPS64-N32: lw $[[R3:[0-9]+]], %got_disp(h)
-; MIPS64: dmfc1 $[[R1:[0-9]+]], $f[[F2]]
-; MIPS64: fill.d $w[[W0:[0-9]+]], $[[R1]]
+; MIPS64-N64-DAG: ld $[[R3:[0-9]+]], %got_disp(h)
+; MIPS64-N32-DAG: lw $[[R3:[0-9]+]], %got_disp(h)
+; MIPS64-DAG: dmfc1 $[[R1:[0-9]+]], $f[[F2]]
+; MIPS64-DAG: fill.d $w[[W0:[0-9]+]], $[[R1]]
-; ALL: fexdo.w $w[[W1:[0-9]+]], $w[[W0]], $w[[W0]]
-; ALL: fexdo.h $w[[W2:[0-9]+]], $w[[W1]], $w[[W1]]
+; ALL-DAG: fexdo.w $w[[W1:[0-9]+]], $w[[W0]], $w[[W0]]
+; ALL-DAG: fexdo.h $w[[W2:[0-9]+]], $w[[W1]], $w[[W1]]
-; MIPS32: lw $[[R3:[0-9]+]], %got(h)
+; MIPS32-DAG: lw $[[R3:[0-9]+]], %got(h)
; ALL: copy_u.h $[[R2:[0-9]+]], $w[[W2]]
; ALL: sh $[[R2]], 0($[[R3]])
diff --git a/test/CodeGen/Mips/msa/i5-b.ll b/test/CodeGen/Mips/msa/i5-b.ll
index c588c8b2407e..5afd3cd48dd5 100644
--- a/test/CodeGen/Mips/msa/i5-b.ll
+++ b/test/CodeGen/Mips/msa/i5-b.ll
@@ -89,7 +89,7 @@ define void @llvm_mips_binsli_b_test() nounwind {
entry:
%0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1
%1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2
- %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7)
+ %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 6)
store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES
ret void
}
@@ -101,7 +101,7 @@ declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind
; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_b_ARG2)(
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
-; CHECK-DAG: binsli.b [[R3]], [[R4]], 7
+; CHECK-DAG: binsli.b [[R3]], [[R4]], 6
; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_b_RES)(
; CHECK-DAG: st.b [[R3]], 0([[R5]])
; CHECK: .size llvm_mips_binsli_b_test
@@ -193,7 +193,7 @@ define void @llvm_mips_binsri_b_test() nounwind {
entry:
%0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1
%1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2
- %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7)
+ %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 6)
store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES
ret void
}
@@ -205,7 +205,7 @@ declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind
; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_b_ARG2)(
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
-; CHECK-DAG: binsri.b [[R3]], [[R4]], 7
+; CHECK-DAG: binsri.b [[R3]], [[R4]], 6
; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_b_RES)(
; CHECK-DAG: st.b [[R3]], 0([[R5]])
; CHECK: .size llvm_mips_binsri_b_test
diff --git a/test/CodeGen/Mips/msa/i5_ld_st.ll b/test/CodeGen/Mips/msa/i5_ld_st.ll
index c644d242a003..812c400d46e4 100644
--- a/test/CodeGen/Mips/msa/i5_ld_st.ll
+++ b/test/CodeGen/Mips/msa/i5_ld_st.ll
@@ -336,8 +336,8 @@ entry:
; CHECK: llvm_mips_st_b_valid_range_tests:
; CHECK: ld.b
-; CHECK: st.b [[R1:\$w[0-9]+]], -512(
-; CHECK: st.b [[R1:\$w[0-9]+]], 511(
+; CHECK-DAG: st.b [[R1:\$w[0-9]+]], -512(
+; CHECK-DAG: st.b [[R1:\$w[0-9]+]], 511(
; CHECK: .size llvm_mips_st_b_valid_range_tests
;
@@ -351,10 +351,10 @@ entry:
}
; CHECK: llvm_mips_st_b_invalid_range_tests:
-; CHECK: addiu $2, $1, -513
+; CHECK: addiu $2, $1, 512
; CHECK: ld.b
; CHECK: st.b [[R1:\$w[0-9]+]], 0(
-; CHECK: addiu $1, $1, 512
+; CHECK: addiu $1, $1, -513
; CHECK: st.b [[R1:\$w[0-9]+]], 0(
; CHECK: .size llvm_mips_st_b_invalid_range_tests
;
@@ -404,8 +404,8 @@ entry:
; CHECK: llvm_mips_st_h_valid_range_tests:
; CHECK: ld.h
-; CHECK: st.h [[R1:\$w[0-9]+]], -1024(
-; CHECK: st.h [[R1:\$w[0-9]+]], 1022(
+; CHECK-DAG: st.h [[R1:\$w[0-9]+]], -1024(
+; CHECK-DAG: st.h [[R1:\$w[0-9]+]], 1022(
; CHECK: .size llvm_mips_st_h_valid_range_tests
;
@@ -419,10 +419,10 @@ entry:
}
; CHECK: llvm_mips_st_h_invalid_range_tests:
-; CHECK: addiu $2, $1, -1026
+; CHECK: addiu $2, $1, 1024
; CHECK: ld.h
; CHECK: st.h [[R1:\$w[0-9]+]], 0(
-; CHECK: addiu $1, $1, 1024
+; CHECK: addiu $1, $1, -1026
; CHECK: st.h [[R1:\$w[0-9]+]], 0(
; CHECK: .size llvm_mips_st_h_invalid_range_tests
;
@@ -472,8 +472,8 @@ entry:
; CHECK: llvm_mips_st_w_valid_range_tests:
; CHECK: ld.w
-; CHECK: st.w [[R1:\$w[0-9]+]], -2048(
-; CHECK: st.w [[R1:\$w[0-9]+]], 2044(
+; CHECK-DAG: st.w [[R1:\$w[0-9]+]], -2048(
+; CHECK-DAG: st.w [[R1:\$w[0-9]+]], 2044(
; CHECK: .size llvm_mips_st_w_valid_range_tests
;
@@ -487,10 +487,10 @@ entry:
}
; CHECK: llvm_mips_st_w_invalid_range_tests:
-; CHECK: addiu $2, $1, -2052
+; CHECK: addiu $2, $1, 2048
; CHECK: ld.w
; CHECK: st.w [[R1:\$w[0-9]+]], 0(
-; CHECK: addiu $1, $1, 2048
+; CHECK: addiu $1, $1, -2052
; CHECK: st.w [[R1:\$w[0-9]+]], 0(
; CHECK: .size llvm_mips_st_w_invalid_range_tests
;
@@ -540,8 +540,8 @@ entry:
; CHECK: llvm_mips_st_d_valid_range_tests:
; CHECK: ld.d
-; CHECK: st.d [[R1:\$w[0-9]+]], -4096(
-; CHECK: st.d [[R1:\$w[0-9]+]], 4088(
+; CHECK-DAG: st.d [[R1:\$w[0-9]+]], -4096(
+; CHECK-DAG: st.d [[R1:\$w[0-9]+]], 4088(
; CHECK: .size llvm_mips_st_d_valid_range_tests
;
@@ -555,10 +555,10 @@ entry:
}
; CHECK: llvm_mips_st_d_invalid_range_tests:
-; CHECK: addiu $2, $1, -4104
+; CHECK: addiu $2, $1, 4096
; CHECK: ld.d
; CHECK: st.d [[R1:\$w[0-9]+]], 0(
-; CHECK: addiu $1, $1, 4096
+; CHECK: addiu $1, $1, -4104
; CHECK: st.d [[R1:\$w[0-9]+]], 0(
; CHECK: .size llvm_mips_st_d_invalid_range_tests
;
diff --git a/test/CodeGen/Mips/msa/immediates.ll b/test/CodeGen/Mips/msa/immediates.ll
index b561ace30a8a..0e9fb4c7adfc 100644
--- a/test/CodeGen/Mips/msa/immediates.ll
+++ b/test/CodeGen/Mips/msa/immediates.ll
@@ -616,7 +616,7 @@ entry:
; CHECK: binsri.h
%a = load <8 x i16>, <8 x i16> * %ptr, align 16
%b = load <8 x i16>, <8 x i16> * %ptr2, align 16
- %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 15)
+ %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 14)
store <8 x i16> %r, <8 x i16> * %ptr, align 16
ret void
}
@@ -920,7 +920,7 @@ entry:
define void @bclri_d(<2 x i64> * %ptr) {
entry:
; CHECK-LABEL: bclri_d:
-; CHECK: and.v
+; CHECK: bclri.d
%a = load <2 x i64>, <2 x i64> * %ptr, align 16
%r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 16)
store <2 x i64> %r, <2 x i64> * %ptr, align 16
@@ -930,7 +930,7 @@ entry:
define void @binsli_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
entry:
; CHECK-LABEL: binsli_d:
-; CHECK: bsel.v
+; CHECK: binsli.d
%a = load <2 x i64>, <2 x i64> * %ptr, align 16
%b = load <2 x i64>, <2 x i64> * %ptr2, align 16
%r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %b, i32 4)
@@ -952,7 +952,7 @@ entry:
define void @bnegi_d(<2 x i64> * %ptr) {
entry:
; CHECK-LABEL: bnegi_d:
-; CHECK: xor.v
+; CHECK: bnegi.d
%a = load <2 x i64>, <2 x i64> * %ptr, align 16
%r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 9)
store <2 x i64> %r, <2 x i64> * %ptr, align 16
@@ -962,7 +962,7 @@ entry:
define void @bseti_d(<2 x i64> * %ptr) {
entry:
; CHECK-LABEL: bseti_d:
-; CHECK: or.v
+; CHECK: bseti.d
%a = load <2 x i64>, <2 x i64> * %ptr, align 16
%r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 25)
store <2 x i64> %r, <2 x i64> * %ptr, align 16
diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll
index 33431dba43c4..eadf4abfc759 100644
--- a/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/test/CodeGen/Mips/o32_cc_byval.ll
@@ -45,20 +45,18 @@ declare void @callee3(float, %struct.S3* byval, %struct.S1* byval)
define void @f2(float %f, %struct.S1* nocapture byval %s1) nounwind {
entry:
; CHECK: addiu $sp, $sp, -48
-; CHECK: sw $7, 60($sp)
-; CHECK: sw $6, 56($sp)
-; CHECK: lw $4, 80($sp)
-; CHECK: ldc1 $f[[F0:[0-9]+]], 72($sp)
-; CHECK: lw $[[R3:[0-9]+]], 64($sp)
-; CHECK: lw $[[R4:[0-9]+]], 68($sp)
-; CHECK: lw $[[R2:[0-9]+]], 60($sp)
-; CHECK: lh $[[R1:[0-9]+]], 58($sp)
-; CHECK: lb $[[R0:[0-9]+]], 56($sp)
-; CHECK: sw $[[R0]], 32($sp)
-; CHECK: sw $[[R1]], 28($sp)
-; CHECK: sw $[[R2]], 24($sp)
-; CHECK: sw $[[R4]], 20($sp)
-; CHECK: sw $[[R3]], 16($sp)
+; CHECK-DAG: sw $7, 60($sp)
+; CHECK-DAG: sw $6, 56($sp)
+; CHECK-DAG: ldc1 $f[[F0:[0-9]+]], 72($sp)
+; CHECK-DAG: lw $[[R3:[0-9]+]], 64($sp)
+; CHECK-DAG: lw $[[R4:[0-9]+]], 68($sp)
+; CHECK-DAG: lh $[[R1:[0-9]+]], 58($sp)
+; CHECK-DAG: lb $[[R0:[0-9]+]], 56($sp)
+; CHECK-DAG: sw $[[R0]], 32($sp)
+; CHECK-DAG: sw $[[R1]], 28($sp)
+; CHECK-DAG: sw $[[R4]], 20($sp)
+; CHECK-DAG: sw $[[R3]], 16($sp)
+; CHECK-DAG: sw $7, 24($sp)
; CHECK: mfc1 $6, $f[[F0]]
%i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
@@ -82,13 +80,11 @@ declare void @callee4(i32, double, i64, i32, i16 signext, i8 signext, float)
define void @f3(%struct.S2* nocapture byval %s2) nounwind {
entry:
; CHECK: addiu $sp, $sp, -48
-; CHECK: sw $7, 60($sp)
-; CHECK: sw $6, 56($sp)
-; CHECK: sw $5, 52($sp)
-; CHECK: sw $4, 48($sp)
-; CHECK: lw $4, 48($sp)
-; CHECK: lw $[[R0:[0-9]+]], 60($sp)
-; CHECK: sw $[[R0]], 24($sp)
+; CHECK-DAG: sw $7, 60($sp)
+; CHECK-DAG: sw $6, 56($sp)
+; CHECK-DAG: sw $5, 52($sp)
+; CHECK-DAG: sw $4, 48($sp)
+; CHECK-DAG: sw $7, 24($sp)
%arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0
%tmp = load i32, i32* %arrayidx, align 4
@@ -101,14 +97,14 @@ entry:
define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind {
entry:
; CHECK: addiu $sp, $sp, -48
-; CHECK: sw $7, 60($sp)
-; CHECK: sw $6, 56($sp)
-; CHECK: sw $5, 52($sp)
-; CHECK: lw $4, 60($sp)
-; CHECK: lw $[[R1:[0-9]+]], 80($sp)
-; CHECK: lb $[[R0:[0-9]+]], 52($sp)
-; CHECK: sw $[[R0]], 32($sp)
-; CHECK: sw $[[R1]], 24($sp)
+; CHECK-DAG: sw $7, 60($sp)
+; CHECK-DAG: sw $6, 56($sp)
+; CHECK-DAG: sw $5, 52($sp)
+; CHECK-DAG: lw $[[R1:[0-9]+]], 80($sp)
+; CHECK-DAG: lb $[[R0:[0-9]+]], 52($sp)
+; CHECK-DAG: sw $[[R0]], 32($sp)
+; CHECK-DAG: sw $[[R1]], 24($sp)
+; CHECK: move $4, $7
%i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
%tmp = load i32, i32* %i, align 4
diff --git a/test/CodeGen/Mips/o32_cc_vararg.ll b/test/CodeGen/Mips/o32_cc_vararg.ll
index b4597a3214e2..73aad48b73e6 100644
--- a/test/CodeGen/Mips/o32_cc_vararg.ll
+++ b/test/CodeGen/Mips/o32_cc_vararg.ll
@@ -29,9 +29,9 @@ entry:
; CHECK-LABEL: va1:
; CHECK: addiu $sp, $sp, -16
+; CHECK: sw $5, 20($sp)
; CHECK: sw $7, 28($sp)
; CHECK: sw $6, 24($sp)
-; CHECK: sw $5, 20($sp)
; CHECK: lw $2, 20($sp)
}
@@ -83,8 +83,8 @@ entry:
; CHECK-LABEL: va3:
; CHECK: addiu $sp, $sp, -16
-; CHECK: sw $7, 28($sp)
; CHECK: sw $6, 24($sp)
+; CHECK: sw $7, 28($sp)
; CHECK: lw $2, 24($sp)
}
@@ -236,8 +236,8 @@ entry:
ret i32 %tmp
; CHECK-LABEL: va9:
-; CHECK: addiu $sp, $sp, -32
-; CHECK: lw $2, 52($sp)
+; CHECK: addiu $sp, $sp, -24
+; CHECK: lw $2, 44($sp)
}
; double
diff --git a/test/CodeGen/Mips/return_address.ll b/test/CodeGen/Mips/return_address.ll
index 34b72baa6d25..54a106f4b349 100644
--- a/test/CodeGen/Mips/return_address.ll
+++ b/test/CodeGen/Mips/return_address.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel -verify-machineinstrs < %s | FileCheck %s
define i8* @f1() nounwind {
entry:
diff --git a/test/CodeGen/Mips/stackcoloring.ll b/test/CodeGen/Mips/stackcoloring.ll
index 817caee2f275..680b3128cc1b 100644
--- a/test/CodeGen/Mips/stackcoloring.ll
+++ b/test/CodeGen/Mips/stackcoloring.ll
@@ -11,7 +11,7 @@ define i32 @foo1() {
entry:
%b = alloca [16 x i32], align 4
%0 = bitcast [16 x i32]* %b to i8*
- call void @llvm.lifetime.start(i64 64, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 64, i8* %0)
%arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %b, i32 0, i32 0
br label %for.body
@@ -28,12 +28,12 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- call void @llvm.lifetime.end(i64 64, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %0)
ret i32 %add
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare i32 @foo2(i32, i32*)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
diff --git a/test/CodeGen/Mips/start-asm-file.ll b/test/CodeGen/Mips/start-asm-file.ll
index 6d0a5425230a..bcea5da59e75 100644
--- a/test/CodeGen/Mips/start-asm-file.ll
+++ b/test/CodeGen/Mips/start-asm-file.ll
@@ -75,7 +75,7 @@
; CHECK-PIC-N32-NLEGACY: .nan legacy
; CHECK-PIC-N32-N2008: .nan 2008
-; CHECK-STATIC-N64: .abicalls
+; CHECK-STATIC-N64-NOT: .abicalls
; CHECK-STATIC-N64-NOT: .option pic0
; CHECK-STATIC-N64: .section .mdebug.abi64
; CHECK-STATIC-N64-NLEGACY: .nan legacy
diff --git a/test/CodeGen/Mips/stchar.ll b/test/CodeGen/Mips/stchar.ll
index 34493e9ae338..a6021be8e808 100644
--- a/test/CodeGen/Mips/stchar.ll
+++ b/test/CodeGen/Mips/stchar.ll
@@ -34,7 +34,7 @@ entry:
; 16_h: lh ${{[0-9]+}}, [[offset2]](${{[0-9]+}})
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/Mips/tailcall/tailcall-wrong-isa.ll b/test/CodeGen/Mips/tailcall/tailcall-wrong-isa.ll
index 6b09b693a8e9..25a5e5b8fff5 100644
--- a/test/CodeGen/Mips/tailcall/tailcall-wrong-isa.ll
+++ b/test/CodeGen/Mips/tailcall/tailcall-wrong-isa.ll
@@ -4,8 +4,11 @@
; RUN: llc -filetype=obj -march=mipsel -relocation-model=static -verify-machineinstrs -mips-tail-calls=1 < %s -o - \
; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=STATIC32
-; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64 -verify-machineinstrs -mips-tail-calls=1 < %s -o - \
-; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=N64
+; RUN: llc -filetype=obj -march=mips64el -relocation-model=pic -mcpu=mips64 -verify-machineinstrs -mips-tail-calls=1 < %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=PIC64
+
+; RUN: llc -filetype=obj -march=mips64el -relocation-model=static -mcpu=mips64 -verify-machineinstrs -mips-tail-calls=1 < %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=STATIC64
; RUN: llc -filetype=obj -march=mipsel -relocation-model=pic -mattr=+micromips -mips-tail-calls=1 < %s -o - \
; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=PIC32MM
@@ -18,8 +21,11 @@
; RUN: llc -filetype=obj -march=mipsel -relocation-model=static -mcpu=mips32r6 -mips-tail-calls=1 < %s -o - \
; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=STATIC32R6
-; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64r6 -mips-tail-calls=1 < %s -o - \
-; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=N64R6
+; RUN: llc -filetype=obj -march=mips64el -relocation-model=pic -mcpu=mips64r6 -mips-tail-calls=1 < %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=PIC64R6
+; RUN: llc -filetype=obj -march=mips64el -relocation-model=static -mcpu=mips64r6 -mips-tail-calls=1 < %s -o - \
+; RUN: | llvm-objdump -d - | FileCheck %s -check-prefix=STATIC64R6
+
define internal i8 @f2(i8) {
ret i8 4
@@ -34,7 +40,8 @@ define i8 @f1(i8 signext %i) nounwind {
; PIC32: {{[0-9a-z]}}: 08 00 20 03 jr $25
; STATIC32: {{[0-9a-z]}}: 00 00 00 08 j 0
-; N64: {{[0-9a-z]+}}: 08 00 20 03 jr $25
+; PIC64: {{[0-9a-z]+}}: 08 00 20 03 jr $25
+; STATIC64: {{[0-9]}}: 00 00 00 08 j 0
; PIC32MM: {{[0-9a-z]+}}: b9 45 jrc $25
; STATIC32MM: {{[0-9a-z]}}: 00 d4 00 00 j 0
@@ -42,5 +49,5 @@ define i8 @f1(i8 signext %i) nounwind {
; PIC32R6: {{[0-9a-z]}}: 00 00 19 d8 jrc $25
; STATIC32R6: {{[0-9a-z]}}: 00 00 00 08 j 0
-; N64R6: {{[0-9a-z]+}}: 00 00 19 d8 jrc $25
-
+; PIC64R6: {{[0-9a-z]+}}: 00 00 19 d8 jrc $25
+; STATIC64R6: {{[0-9]}}: 00 00 00 08 j 0
diff --git a/test/CodeGen/Mips/tailcall/tailcall.ll b/test/CodeGen/Mips/tailcall/tailcall.ll
index b0ac28d819cf..3f04e1cf3053 100644
--- a/test/CodeGen/Mips/tailcall/tailcall.ll
+++ b/test/CodeGen/Mips/tailcall/tailcall.ll
@@ -2,8 +2,10 @@
; RUN: -verify-machineinstrs -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,PIC32
; RUN: llc -march=mipsel -relocation-model=static \
; RUN: -verify-machineinstrs -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,STATIC32
-; RUN: llc -march=mips64el -mcpu=mips64r2 \
-; RUN: -verify-machineinstrs -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -relocation-model=pic \
+; RUN: -verify-machineinstrs -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,PIC64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -relocation-model=static \
+; RUN: -verify-machineinstrs -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,STATIC64
; RUN: llc -march=mipsel -mattr=mips16 -relocation-model=pic \
; RUN: -verify-machineinstrs -mips-tail-calls=1 < %s | \
; RUN: FileCheck %s -check-prefixes=ALL,PIC16
@@ -15,17 +17,21 @@
; RUN: llc -march=mipsel -relocation-model=pic -mcpu=mips32r6 -mips-tail-calls=1 < %s | \
; RUN: FileCheck %s -check-prefixes=ALL,PIC32R6
-; RUN: llc -march=mipsel -relocation-model=static -mcpu=mips32r6 \
+; RUN: llc -march=mipsel -relocation-model=static -mcpu=mips32r2 \
; RUN: -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,STATIC32
-; RUN: llc -march=mips64el -mcpu=mips64r6 \
-; RUN: -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,N64R6
+; RUN: llc -march=mips64el -relocation-model=pic -mcpu=mips64r2 \
+; RUN: -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=PIC64
+; RUN: llc -march=mips64el -relocation-model=pic -mcpu=mips64r6 \
+; RUN: -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=STATIC64
; RUN: llc -march=mipsel -relocation-model=pic -mcpu=mips32r6 -mattr=+micromips \
; RUN: -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,PIC32MM
; RUN: llc -march=mipsel -relocation-model=static -mcpu=mips32r6 \
; RUN: -mattr=+micromips -mips-tail-calls=1 < %s | FileCheck %s -check-prefixes=ALL,STATIC32
-; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=+micromips -mips-tail-calls=1 < %s \
-; RUN: | FileCheck %s -check-prefixes=ALL,N64
+; RUN: llc -march=mips64el -relocation-model=pic -mcpu=mips64r6 \
+; RUN: -mattr=+micromips -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=PIC64R6MM
+; RUN: llc -march=mips64el -relocation-model=static -mcpu=mips64r6 \
+; RUN: -mattr=+micromips -mips-tail-calls=1 < %s | FileCheck %s -check-prefix=STATIC64
@g0 = common global i32 0, align 4
@g1 = common global i32 0, align 4
@@ -96,7 +102,8 @@ entry:
; PIC32R6: jalr $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
+; PIC64: jalr $25
+; STATIC64: jal
; N64R6: jalr $25
; PIC16: jalrc
@@ -113,8 +120,8 @@ entry:
; PIC32R6: jr $25
; PIC32MM: jr
; STATIC32: j
-; N64: jr $25
-; N64R6: jr $25
+; PIC64: jr $25
+; STATIC64: j
; PIC16: jalrc
%0 = load i32, i32* @g0, align 4
@@ -154,8 +161,10 @@ entry:
; PIC32R6: jrc $25
; PIC32MM: jrc
; STATIC32: j
-; N64: jr $25
-; N64R6: jrc $25
+; PIC64: jr $25
+; PIC64R6: jrc $25
+; PIC64R6MM: jr $25
+; STATIC64: j
; PIC16: jalrc
%call = tail call fastcc i32 @caller8_1()
@@ -169,8 +178,8 @@ entry:
; PIC32R6: jalr $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
-; N64R6: jalr $25
+; PIC64: jalr $25
+; STATIC64: jal
; PIC16: jalrc
%call = tail call i32 (i32, ...) @callee8(i32 2, i32 1) nounwind
@@ -190,8 +199,9 @@ entry:
; PIC32R6: jrc $25
; PIC32MM: jrc
; STATIC32: j
-; N64: jr $25
-; N64R6: jrc $25
+; PIC64: jr $25
+; STATIC64: j
+; PIC64R6: jrc $25
; PIC16: jalrc
%call = tail call fastcc i32 @caller9_1()
ret i32 %call
@@ -204,8 +214,9 @@ entry:
; PIC32R6: jalrc $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
-; N64R6: jalrc $25
+; STATIC64: jal
+; PIC64: jalr $25
+; PIC64R6: jalrc $25
; PIC16: jalrc
%call = tail call i32 @callee9(%struct.S* byval @gs1) nounwind
@@ -221,8 +232,9 @@ entry:
; PIC32R6: jalr $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
-; N64R6: jalr $25
+; STATIC64: jal
+; PIC64: jalr $25
+; PIC64R6: jalr $25
; PIC16: jalrc
%call = tail call i32 @callee10(i32 %a8, i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) nounwind
@@ -238,8 +250,9 @@ entry:
; PIC32R6: jalrc $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
-; N64R6: jalrc $25
+; STATIC64: jal
+; PIC64: jalr $25
+; PIC64R6: jalrc $25
; PIC16: jalrc
%call = tail call i32 @callee11(%struct.S* byval @gs1) nounwind
@@ -257,8 +270,9 @@ entry:
; PIC32R6: jalrc $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
-; N64R6: jalrc $25
+; STATIC64: jal
+; PIC64: jalr $25
+; PIC64R6: jalrc $25
; PIC16: jalrc
%0 = bitcast %struct.S* %a0 to i8*
@@ -271,13 +285,14 @@ declare i32 @callee13(i32, ...)
define i32 @caller13() nounwind {
entry:
-; ALL-LABEL: caller13
+; ALL-LABEL: caller13:
; PIC32: jalr $25
; PIC32R6: jalr $25
; PIC32MM: jalr $25
; STATIC32: jal
-; N64: jalr $25
-; N64R6: jalr $25
+; STATIC64: jal
+; PIC64R6: jalr $25
+; PIC64: jalr $25
; PIC16: jalrc
%call = tail call i32 (i32, ...) @callee13(i32 1, i32 2) nounwind
diff --git a/test/CodeGen/Mips/tnaked.ll b/test/CodeGen/Mips/tnaked.ll
index 08f1ab5be86e..7dff19c5d000 100644
--- a/test/CodeGen/Mips/tnaked.ll
+++ b/test/CodeGen/Mips/tnaked.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s -verify-machineinstrs | FileCheck %s
define void @tnaked() #0 {
diff --git a/test/CodeGen/Mips/xray-mips-attribute-instrumentation.ll b/test/CodeGen/Mips/xray-mips-attribute-instrumentation.ll
new file mode 100644
index 000000000000..a7c859a1815f
--- /dev/null
+++ b/test/CodeGen/Mips/xray-mips-attribute-instrumentation.ll
@@ -0,0 +1,147 @@
+; RUN: llc -filetype=asm -o - -mtriple=mips-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-MIPS32 %s
+; RUN: llc -filetype=asm -o - -mtriple=mipsel-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-MIPS32 %s
+; RUN: llc -filetype=asm -o - -mtriple=mips64-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-MIPS64 %s
+; RUN: llc -filetype=asm -o - -mtriple=mips64el-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-MIPS64 %s
+
+define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" {
+; CHECK: .p2align 2
+; CHECK-MIPS64-LABEL: .Lxray_sled_0:
+; CHECK-MIPS32-LABEL: $xray_sled_0:
+; CHECK-MIPS64: b .Ltmp0
+; CHECK-MIPS32: b $tmp0
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64-LABEL: .Ltmp0:
+; CHECK-MIPS32-LABEL: $tmp0:
+; CHECK-MIPS32: addiu $25, $25, 52
+ ret i32 0
+; CHECK: .p2align 2
+; CHECK-MIPS64-LABEL: .Lxray_sled_1:
+; CHECK-MIPS32-LABEL: $xray_sled_1:
+; CHECK-MIPS64: b .Ltmp1
+; CHECK-MIPS32: b $tmp1
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64-LABEL: .Ltmp1:
+; CHECK-MIPS32-LABEL: $tmp1:
+; CHECK-MIPS32: addiu $25, $25, 52
+}
+; CHECK: .section xray_instr_map,{{.*}}
+; CHECK-MIPS64: .8byte .Lxray_sled_0
+; CHECK-MIPS64: .8byte .Lxray_sled_1
+; CHECK-MIPS32: .4byte ($xray_sled_0)
+; CHECK-MIPS32: .4byte ($xray_sled_1)
+
+; We test multiple returns in a single function to make sure we're getting all
+; of them with XRay instrumentation.
+define i32 @bar(i32 %i) nounwind noinline uwtable "function-instrument"="xray-always" {
+; CHECK: .p2align 2
+; CHECK-MIPS64-LABEL: .Lxray_sled_2:
+; CHECK-MIPS32-LABEL: $xray_sled_2:
+; CHECK-MIPS64: b .Ltmp2
+; CHECK-MIPS32: b $tmp2
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64-LABEL: .Ltmp2:
+; CHECK-MIPS32-LABEL: $tmp2:
+; CHECK-MIPS32: addiu $25, $25, 52
+Test:
+ %cond = icmp eq i32 %i, 0
+ br i1 %cond, label %IsEqual, label %NotEqual
+IsEqual:
+ ret i32 0
+; CHECK: .p2align 2
+; CHECK-MIPS64-LABEL: .Lxray_sled_3:
+; CHECK-MIPS32-LABEL: $xray_sled_3:
+; CHECK-MIPS64: b .Ltmp3
+; CHECK-MIPS32: b $tmp3
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64-LABEL: .Ltmp3:
+; CHECK-MIPS32-LABEL: $tmp3:
+; CHECK-MIPS32: addiu $25, $25, 52
+NotEqual:
+ ret i32 1
+; CHECK: .p2align 2
+; CHECK-MIPS64-LABEL: .Lxray_sled_4:
+; CHECK-MIPS32-LABEL: $xray_sled_4:
+; CHECK-MIPS64: b .Ltmp4
+; CHECK-MIPS32: b $tmp4
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64: nop
+; CHECK-MIPS64-LABEL: .Ltmp4:
+; CHECK-MIPS32-LABEL: $tmp4:
+; CHECK-MIPS32: addiu $25, $25, 52
+}
+; CHECK: .section xray_instr_map,{{.*}}
+; CHECK-MIPS64: .8byte .Lxray_sled_2
+; CHECK-MIPS64: .8byte .Lxray_sled_3
+; CHECK-MIPS64: .8byte .Lxray_sled_4
+; CHECK-MIPS32: .4byte ($xray_sled_2)
+; CHECK-MIPS32: .4byte ($xray_sled_3)
+; CHECK-MIPS32: .4byte ($xray_sled_4)
diff --git a/test/CodeGen/Mips/xray-section-group.ll b/test/CodeGen/Mips/xray-section-group.ll
new file mode 100644
index 000000000000..d87f178ec4be
--- /dev/null
+++ b/test/CodeGen/Mips/xray-section-group.ll
@@ -0,0 +1,31 @@
+; RUN: llc -filetype=asm -o - -mtriple=mips-unknown-linux-gnu -function-sections < %s | FileCheck %s
+; RUN: llc -filetype=asm -o - -mtriple=mipsel-unknown-linux-gnu -function-sections < %s | FileCheck %s
+; RUN: llc -filetype=obj -o %t -mtriple=mips-unknown-linux-gnu -function-sections < %s
+; RUN: llvm-readobj -sections %t | FileCheck %s --check-prefix=CHECK-OBJ
+; RUN: llc -filetype=obj -o %t -mtriple=mipsel-unknown-linux-gnu -function-sections < %s
+; RUN: llvm-readobj -sections %t | FileCheck %s --check-prefix=CHECK-OBJ
+; RUN: llc -filetype=asm -o - -mtriple=mips64-unknown-linux-gnu -function-sections < %s | FileCheck %s
+; RUN: llc -filetype=asm -o - -mtriple=mips64el-unknown-linux-gnu -function-sections < %s | FileCheck %s
+; RUN: llc -filetype=obj -o %t -mtriple=mips64-unknown-linux-gnu -function-sections < %s
+; RUN: llvm-readobj -sections %t | FileCheck %s --check-prefix=CHECK-OBJ
+; RUN: llc -filetype=obj -o %t -mtriple=mips64el-unknown-linux-gnu -function-sections < %s
+; RUN: llvm-readobj -sections %t | FileCheck %s --check-prefix=CHECK-OBJ
+
+define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" {
+; CHECK: .section .text.foo,"ax",@progbits
+ ret i32 0
+; CHECK: .section xray_instr_map,"a",@progbits
+}
+
+; CHECK-OBJ: Section {
+; CHECK-OBJ: Name: xray_instr_map
+
+$bar = comdat any
+define i32 @bar() nounwind noinline uwtable "function-instrument"="xray-always" comdat($bar) {
+; CHECK: .section .text.bar,"axG",@progbits,bar,comdat
+ ret i32 1
+; CHECK: .section xray_instr_map,"aG",@progbits,bar,comdat
+}
+
+; CHECK-OBJ: Section {
+; CHECK-OBJ: Name: xray_instr_map
diff --git a/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index 1a4b0bad36e1..e84030f385c4 100644
--- a/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -15,3 +15,37 @@ define i32 @f(i32* %p) {
%sum = add i32 %v0, %v1
ret i32 %sum
}
+
+define half @fh(half* %p) {
+ %p.1 = getelementptr half, half* %p, i32 1
+ %p.2 = getelementptr half, half* %p, i32 2
+ %p.3 = getelementptr half, half* %p, i32 3
+ %p.4 = getelementptr half, half* %p, i32 4
+ %v0 = load half, half* %p, align 64
+ %v1 = load half, half* %p.1, align 4
+ %v2 = load half, half* %p.2, align 4
+ %v3 = load half, half* %p.3, align 4
+ %v4 = load half, half* %p.4, align 4
+ %sum1 = fadd half %v0, %v1
+ %sum2 = fadd half %v2, %v3
+ %sum3 = fadd half %sum1, %sum2
+ %sum = fadd half %sum3, %v4
+ ret half %sum
+}
+
+define float @ff(float* %p) {
+ %p.1 = getelementptr float, float* %p, i32 1
+ %p.2 = getelementptr float, float* %p, i32 2
+ %p.3 = getelementptr float, float* %p, i32 3
+ %p.4 = getelementptr float, float* %p, i32 4
+ %v0 = load float, float* %p, align 64
+ %v1 = load float, float* %p.1, align 4
+ %v2 = load float, float* %p.2, align 4
+ %v3 = load float, float* %p.3, align 4
+ %v4 = load float, float* %p.4, align 4
+ %sum1 = fadd float %v0, %v1
+ %sum2 = fadd float %v2, %v3
+ %sum3 = fadd float %sum1, %sum2
+ %sum = fadd float %sum3, %v4
+ ret float %sum
+}
diff --git a/test/CodeGen/NVPTX/access-non-generic.ll b/test/CodeGen/NVPTX/access-non-generic.ll
index c4cbeca4e409..d5776d77b10d 100644
--- a/test/CodeGen/NVPTX/access-non-generic.ll
+++ b/test/CodeGen/NVPTX/access-non-generic.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
-; RUN: opt < %s -S -nvptx-infer-addrspace | FileCheck %s --check-prefix IR
+; RUN: opt -mtriple=nvptx-- < %s -S -infer-address-spaces | FileCheck %s --check-prefix IR
+; RUN: opt -mtriple=nvptx64-- < %s -S -infer-address-spaces | FileCheck %s --check-prefix IR
@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
@scalar = internal addrspace(3) global float 0.000000e+00, align 4
diff --git a/test/CodeGen/NVPTX/add-128bit.ll b/test/CodeGen/NVPTX/add-128bit.ll
index 29e3cdffae7b..a077c3fcf891 100644
--- a/test/CodeGen/NVPTX/add-128bit.ll
+++ b/test/CodeGen/NVPTX/add-128bit.ll
@@ -8,7 +8,7 @@ define void @foo(i64 %a, i64 %add, i128* %retptr) {
; CHECK: add.s64
; CHECK: setp.lt.u64
; CHECK: setp.lt.u64
-; CHECK: selp.b64
+; CHECK: selp.u64
; CHECK: selp.b64
; CHECK: add.s64
%t1 = sext i64 %a to i128
diff --git a/test/CodeGen/NVPTX/aggregate-return.ll b/test/CodeGen/NVPTX/aggregate-return.ll
index 527c5c9aa85d..785b4d6d90dc 100644
--- a/test/CodeGen/NVPTX/aggregate-return.ll
+++ b/test/CodeGen/NVPTX/aggregate-return.ll
@@ -1,21 +1,40 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s
declare <2 x float> @barv(<2 x float> %input)
+declare <3 x float> @barv3(<3 x float> %input)
declare [2 x float] @bara([2 x float] %input)
declare {float, float} @bars({float, float} %input)
-define void @foov(<2 x float> %input, <2 x float>* %output) {
-; CHECK-LABEL: @foov
+define void @test_v2f32(<2 x float> %input, <2 x float>* %output) {
+; CHECK-LABEL: @test_v2f32
%call = tail call <2 x float> @barv(<2 x float> %input)
; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: ld.param.v2.f32 {[[ELEMV1:%f[0-9]+]], [[ELEMV2:%f[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0+0];
store <2 x float> %call, <2 x float>* %output, align 8
-; CHECK: st.v2.f32 [{{%rd[0-9]+}}], {[[ELEMV1]], [[ELEMV2]]}
+; CHECK: st.v2.f32 [{{%rd[0-9]+}}], {[[E0]], [[E1]]}
ret void
}
-define void @fooa([2 x float] %input, [2 x float]* %output) {
-; CHECK-LABEL: @fooa
+define void @test_v3f32(<3 x float> %input, <3 x float>* %output) {
+; CHECK-LABEL: @test_v3f32
+;
+ %call = tail call <3 x float> @barv3(<3 x float> %input)
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK-DAG: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.f32 [[E2:%f[0-9]+]], [retval0+8];
+; Make sure we don't load more values than than we need to.
+; CHECK-NOT: ld.param.f32 [[E3:%f[0-9]+]], [retval0+12];
+ store <3 x float> %call, <3 x float>* %output, align 8
+; CHECK-DAG: st.f32 [{{%rd[0-9]}}+8],
+; -- This is suboptimal. We should do st.v2.f32 instead
+; of combining 2xf32 info i64.
+; CHECK-DAG: st.u64 [{{%rd[0-9]}}],
+; CHECK: ret;
+ ret void
+}
+
+define void @test_a2f32([2 x float] %input, [2 x float]* %output) {
+; CHECK-LABEL: @test_a2f32
%call = tail call [2 x float] @bara([2 x float] %input)
; CHECK: .param .align 4 .b8 retval0[8];
; CHECK-DAG: ld.param.f32 [[ELEMA1:%f[0-9]+]], [retval0+0];
@@ -28,8 +47,8 @@ define void @fooa([2 x float] %input, [2 x float]* %output) {
; CHECK: ret
}
-define void @foos({float, float} %input, {float, float}* %output) {
-; CHECK-LABEL: @foos
+define void @test_s2f32({float, float} %input, {float, float}* %output) {
+; CHECK-LABEL: @test_s2f32
%call = tail call {float, float} @bars({float, float} %input)
; CHECK: .param .align 4 .b8 retval0[8];
; CHECK-DAG: ld.param.f32 [[ELEMS1:%f[0-9]+]], [retval0+0];
diff --git a/test/CodeGen/NVPTX/bug22322.ll b/test/CodeGen/NVPTX/bug22322.ll
index 0c4c30cf37ed..74133d3dcabd 100644
--- a/test/CodeGen/NVPTX/bug22322.ll
+++ b/test/CodeGen/NVPTX/bug22322.ll
@@ -17,7 +17,7 @@ _ZL11compute_vecRK6float3jb.exit:
%4 = add nsw i32 %2, %3
%5 = zext i32 %4 to i64
%6 = bitcast float* %ret_vec.sroa.8.i to i8*
- call void @llvm.lifetime.start(i64 4, i8* %6)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %6)
%7 = and i32 %4, 15
%8 = icmp eq i32 %7, 0
%9 = select i1 %8, float 0.000000e+00, float -1.000000e+00
@@ -26,7 +26,7 @@ _ZL11compute_vecRK6float3jb.exit:
%10 = fcmp olt float %9, 0.000000e+00
%ret_vec.sroa.8.i.val = load float, float* %ret_vec.sroa.8.i, align 4
%11 = select i1 %10, float 0.000000e+00, float %ret_vec.sroa.8.i.val
- call void @llvm.lifetime.end(i64 4, i8* %6)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %6)
%12 = getelementptr inbounds %class.float3, %class.float3* %dst, i64 %5, i32 0
store float 0.000000e+00, float* %12, align 4
%13 = getelementptr inbounds %class.float3, %class.float3* %dst, i64 %5, i32 1
@@ -46,10 +46,10 @@ declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x() #1
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() #1
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/NVPTX/combine-min-max.ll b/test/CodeGen/NVPTX/combine-min-max.ll
index 64bb7a37ffd2..3de86be10a5c 100644
--- a/test/CodeGen/NVPTX/combine-min-max.ll
+++ b/test/CodeGen/NVPTX/combine-min-max.ll
@@ -21,20 +21,140 @@ define i64 @ba_ne_i64(i64 %a, i64 %b) {
ret i64 %sel
}
-; PTX does have e.g. max.s16, but at least as of Kepler (sm_3x) that
-; gets compiled to SASS that converts the 16 bit parameters to 32 bit
-; before using a 32 bit instruction. That is probably not a win and
-; NVCC 7.5 does not emit 16 bit min/max either, presumably for that
-; reason.
+; *************************************
+; * All variations with i16
+
+; *** ab, unsigned, i16
define i16 @ab_ugt_i16(i16 %a, i16 %b) {
; LABEL: @ab_ugt_i16
-; CHECK-NOT: min
-; CHECK-NOT: max
+; CHECK: max.u16
%cmp = icmp ugt i16 %a, %b
%sel = select i1 %cmp, i16 %a, i16 %b
ret i16 %sel
}
+define i16 @ab_uge_i16(i16 %a, i16 %b) {
+; LABEL: @ab_uge_i16
+; CHECK: max.u16
+ %cmp = icmp uge i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+define i16 @ab_ult_i16(i16 %a, i16 %b) {
+; LABEL: @ab_ult_i16
+; CHECK: min.u16
+ %cmp = icmp ult i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+define i16 @ab_ule_i16(i16 %a, i16 %b) {
+; LABEL: @ab_ule_i16
+; CHECK: min.u16
+ %cmp = icmp ule i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+; *** ab, signed, i16
+define i16 @ab_sgt_i16(i16 %a, i16 %b) {
+; LABEL: @ab_ugt_i16
+; CHECK: max.s16
+ %cmp = icmp sgt i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+define i16 @ab_sge_i16(i16 %a, i16 %b) {
+; LABEL: @ab_sge_i16
+; CHECK: max.s16
+ %cmp = icmp sge i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+define i16 @ab_slt_i16(i16 %a, i16 %b) {
+; LABEL: @ab_slt_i16
+; CHECK: min.s16
+ %cmp = icmp slt i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+define i16 @ab_sle_i16(i16 %a, i16 %b) {
+; LABEL: @ab_sle_i16
+; CHECK: min.s16
+ %cmp = icmp sle i16 %a, %b
+ %sel = select i1 %cmp, i16 %a, i16 %b
+ ret i16 %sel
+}
+
+; *** ba, unsigned, i16
+define i16 @ba_ugt_i16(i16 %a, i16 %b) {
+; LABEL: @ba_ugt_i16
+; CHECK: min.u16
+ %cmp = icmp ugt i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+define i16 @ba_uge_i16(i16 %a, i16 %b) {
+; LABEL: @ba_uge_i16
+; CHECK: min.u16
+ %cmp = icmp uge i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+define i16 @ba_ult_i16(i16 %a, i16 %b) {
+; LABEL: @ba_ult_i16
+; CHECK: max.u16
+ %cmp = icmp ult i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+define i16 @ba_ule_i16(i16 %a, i16 %b) {
+; LABEL: @ba_ule_i16
+; CHECK: max.u16
+ %cmp = icmp ule i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+; *** ba, signed, i16
+define i16 @ba_sgt_i16(i16 %a, i16 %b) {
+; LBAEL: @ba_ugt_i16
+; CHECK: min.s16
+ %cmp = icmp sgt i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+define i16 @ba_sge_i16(i16 %a, i16 %b) {
+; LABEL: @ba_sge_i16
+; CHECK: min.s16
+ %cmp = icmp sge i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+define i16 @ba_slt_i16(i16 %a, i16 %b) {
+; LABEL: @ba_slt_i16
+; CHECK: max.s16
+ %cmp = icmp slt i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
+
+define i16 @ba_sle_i16(i16 %a, i16 %b) {
+; LABEL: @ba_sle_i16
+; CHECK: max.s16
+ %cmp = icmp sle i16 %a, %b
+ %sel = select i1 %cmp, i16 %b, i16 %a
+ ret i16 %sel
+}
; *************************************
; * All variations with i32
diff --git a/test/CodeGen/NVPTX/convert-fp.ll b/test/CodeGen/NVPTX/convert-fp.ll
index 4b5446e317f4..fd28a4f7cc67 100644
--- a/test/CodeGen/NVPTX/convert-fp.ll
+++ b/test/CodeGen/NVPTX/convert-fp.ll
@@ -1,44 +1,37 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
-
-define i16 @cvt_i16_f32(float %x) {
+define i16 @cvt_u16_f32(float %x) {
; CHECK: cvt.rzi.u16.f32 %rs{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
%a = fptoui float %x to i16
ret i16 %a
}
-
-define i16 @cvt_i16_f64(double %x) {
+define i16 @cvt_u16_f64(double %x) {
; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i16
ret i16 %a
}
-
-define i32 @cvt_i32_f32(float %x) {
+define i32 @cvt_u32_f32(float %x) {
; CHECK: cvt.rzi.u32.f32 %r{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
%a = fptoui float %x to i32
ret i32 %a
}
-
-define i32 @cvt_i32_f64(double %x) {
+define i32 @cvt_u32_f64(double %x) {
; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i32
ret i32 %a
}
-
-
-define i64 @cvt_i64_f32(float %x) {
+define i64 @cvt_u64_f32(float %x) {
; CHECK: cvt.rzi.u64.f32 %rd{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
%a = fptoui float %x to i64
ret i64 %a
}
-
-define i64 @cvt_i64_f64(double %x) {
+define i64 @cvt_u64_f64(double %x) {
; CHECK: cvt.rzi.u64.f64 %rd{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i64
@@ -51,63 +44,30 @@ define float @cvt_f32_i16(i16 %x) {
%a = uitofp i16 %x to float
ret float %a
}
-
define float @cvt_f32_i32(i32 %x) {
; CHECK: cvt.rn.f32.u32 %f{{[0-9]+}}, %r{{[0-9]+}};
; CHECK: ret;
%a = uitofp i32 %x to float
ret float %a
}
-
define float @cvt_f32_i64(i64 %x) {
; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rd{{[0-9]+}};
; CHECK: ret;
%a = uitofp i64 %x to float
ret float %a
}
-
-define float @cvt_f32_f64(double %x) {
-; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fd{{[0-9]+}};
-; CHECK: ret;
- %a = fptrunc double %x to float
- ret float %a
-}
-
-define float @cvt_f32_s16(i16 %x) {
-; CHECK: cvt.rn.f32.s16 %f{{[0-9]+}}, %rs{{[0-9]+}}
-; CHECK: ret
- %a = sitofp i16 %x to float
- ret float %a
-}
-
-define float @cvt_f32_s32(i32 %x) {
-; CHECK: cvt.rn.f32.s32 %f{{[0-9]+}}, %r{{[0-9]+}}
-; CHECK: ret
- %a = sitofp i32 %x to float
- ret float %a
-}
-
-define float @cvt_f32_s64(i64 %x) {
-; CHECK: cvt.rn.f32.s64 %f{{[0-9]+}}, %rd{{[0-9]+}}
-; CHECK: ret
- %a = sitofp i64 %x to float
- ret float %a
-}
-
define double @cvt_f64_i16(i16 %x) {
; CHECK: cvt.rn.f64.u16 %fd{{[0-9]+}}, %rs{{[0-9]+}};
; CHECK: ret;
%a = uitofp i16 %x to double
ret double %a
}
-
define double @cvt_f64_i32(i32 %x) {
; CHECK: cvt.rn.f64.u32 %fd{{[0-9]+}}, %r{{[0-9]+}};
; CHECK: ret;
%a = uitofp i32 %x to double
ret double %a
}
-
define double @cvt_f64_i64(i64 %x) {
; CHECK: cvt.rn.f64.u64 %fd{{[0-9]+}}, %rd{{[0-9]+}};
; CHECK: ret;
@@ -115,6 +75,12 @@ define double @cvt_f64_i64(i64 %x) {
ret double %a
}
+define float @cvt_f32_f64(double %x) {
+; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fptrunc double %x to float
+ ret float %a
+}
define double @cvt_f64_f32(float %x) {
; CHECK: cvt.f64.f32 %fd{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
@@ -122,23 +88,76 @@ define double @cvt_f64_f32(float %x) {
ret double %a
}
+define float @cvt_f32_s16(i16 %x) {
+; CHECK: cvt.rn.f32.s16 %f{{[0-9]+}}, %rs{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i16 %x to float
+ ret float %a
+}
+define float @cvt_f32_s32(i32 %x) {
+; CHECK: cvt.rn.f32.s32 %f{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i32 %x to float
+ ret float %a
+}
+define float @cvt_f32_s64(i64 %x) {
+; CHECK: cvt.rn.f32.s64 %f{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: ret
+ %a = sitofp i64 %x to float
+ ret float %a
+}
define double @cvt_f64_s16(i16 %x) {
; CHECK: cvt.rn.f64.s16 %fd{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%a = sitofp i16 %x to double
ret double %a
}
-
define double @cvt_f64_s32(i32 %x) {
; CHECK: cvt.rn.f64.s32 %fd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%a = sitofp i32 %x to double
ret double %a
}
-
define double @cvt_f64_s64(i64 %x) {
; CHECK: cvt.rn.f64.s64 %fd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%a = sitofp i64 %x to double
ret double %a
}
+
+define i16 @cvt_s16_f32(float %x) {
+; CHECK: cvt.rzi.s16.f32 %rs{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fptosi float %x to i16
+ ret i16 %a
+}
+define i16 @cvt_s16_f64(double %x) {
+; CHECK: cvt.rzi.s16.f64 %rs{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fptosi double %x to i16
+ ret i16 %a
+}
+define i32 @cvt_s32_f32(float %x) {
+; CHECK: cvt.rzi.s32.f32 %r{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fptosi float %x to i32
+ ret i32 %a
+}
+define i32 @cvt_s32_f64(double %x) {
+; CHECK: cvt.rzi.s32.f64 %r{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fptosi double %x to i32
+ ret i32 %a
+}
+define i64 @cvt_s64_f32(float %x) {
+; CHECK: cvt.rzi.s64.f32 %rd{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fptosi float %x to i64
+ ret i64 %a
+}
+define i64 @cvt_s64_f64(double %x) {
+; CHECK: cvt.rzi.s64.f64 %rd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fptosi double %x to i64
+ ret i64 %a
+}
diff --git a/test/CodeGen/NVPTX/ctlz.ll b/test/CodeGen/NVPTX/ctlz.ll
index bed15a9f6a54..005958bd938a 100644
--- a/test/CodeGen/NVPTX/ctlz.ll
+++ b/test/CodeGen/NVPTX/ctlz.ll
@@ -6,39 +6,127 @@ declare i16 @llvm.ctlz.i16(i16, i1) readnone
declare i32 @llvm.ctlz.i32(i32, i1) readnone
declare i64 @llvm.ctlz.i64(i64, i1) readnone
-define i32 @myctpop(i32 %a) {
-; CHECK: clz.b32
+; There should be no difference between llvm.ctlz.i32(%a, true) and
+; llvm.ctlz.i32(%a, false), as ptx's clz(0) is defined to return 0.
+
+; CHECK-LABEL: myctlz(
+define i32 @myctlz(i32 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: clz.b32
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
%val = call i32 @llvm.ctlz.i32(i32 %a, i1 false) readnone
ret i32 %val
}
-
-define i16 @myctpop16(i16 %a) {
-; CHECK: clz.b32
- %val = call i16 @llvm.ctlz.i16(i16 %a, i1 false) readnone
- ret i16 %val
+; CHECK-LABEL: myctlz_2(
+define i32 @myctlz_2(i32 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: clz.b32
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
+ %val = call i32 @llvm.ctlz.i32(i32 %a, i1 true) readnone
+ ret i32 %val
}
-define i64 @myctpop64(i64 %a) {
-; CHECK: clz.b64
+; PTX's clz.b64 returns a 32-bit value, but LLVM's intrinsic returns a 64-bit
+; value, so here we have to zero-extend it.
+; CHECK-LABEL: myctlz64(
+define i64 @myctlz64(i64 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: clz.b64
+; CHECK-NEXT: cvt.u64.u32
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
%val = call i64 @llvm.ctlz.i64(i64 %a, i1 false) readnone
ret i64 %val
}
+; CHECK-LABEL: myctlz64_2(
+define i64 @myctlz64_2(i64 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: clz.b64
+; CHECK-NEXT: cvt.u64.u32
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
+ %val = call i64 @llvm.ctlz.i64(i64 %a, i1 true) readnone
+ ret i64 %val
+}
-
-define i32 @myctpop_2(i32 %a) {
-; CHECK: clz.b32
- %val = call i32 @llvm.ctlz.i32(i32 %a, i1 true) readnone
- ret i32 %val
+; Here we truncate the 64-bit value of LLVM's ctlz intrinsic to 32 bits, the
+; natural return width of ptx's clz.b64 instruction. No conversions should be
+; necessary in the PTX.
+; CHECK-LABEL: myctlz64_as_32(
+define i32 @myctlz64_as_32(i64 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: clz.b64
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
+ %val = call i64 @llvm.ctlz.i64(i64 %a, i1 false) readnone
+ %trunc = trunc i64 %val to i32
+ ret i32 %trunc
+}
+; CHECK-LABEL: myctlz64_as_32_2(
+define i32 @myctlz64_as_32_2(i64 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: clz.b64
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
+ %val = call i64 @llvm.ctlz.i64(i64 %a, i1 false) readnone
+ %trunc = trunc i64 %val to i32
+ ret i32 %trunc
}
-define i16 @myctpop16_2(i16 %a) {
-; CHECK: clz.b32
+; ctlz.i16 is implemented by extending the input to i32, computing the result,
+; and then truncating the result back down to i16. But the NVPTX ABI
+; zero-extends i16 return values to i32, so the final truncation doesn't appear
+; in this function.
+; CHECK-LABEL: myctlz_ret16(
+define i16 @myctlz_ret16(i16 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: cvt.u32.u16
+; CHECK-NEXT: clz.b32
+; CHECK-NEXT: sub.
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
+ %val = call i16 @llvm.ctlz.i16(i16 %a, i1 false) readnone
+ ret i16 %val
+}
+; CHECK-LABEL: myctlz_ret16_2(
+define i16 @myctlz_ret16_2(i16 %a) {
+; CHECK: ld.param.
+; CHECK-NEXT: cvt.u32.u16
+; CHECK-NEXT: clz.b32
+; CHECK-NEXT: sub.
+; CHECK-NEXT: st.param.
+; CHECK-NEXT: ret;
%val = call i16 @llvm.ctlz.i16(i16 %a, i1 true) readnone
ret i16 %val
}
-define i64 @myctpop64_2(i64 %a) {
-; CHECK: clz.b64
- %val = call i64 @llvm.ctlz.i64(i64 %a, i1 true) readnone
- ret i64 %val
+; Here we store the result of ctlz.16 into an i16 pointer, so the trunc should
+; remain.
+; CHECK-LABEL: myctlz_store16(
+define void @myctlz_store16(i16 %a, i16* %b) {
+; CHECK: ld.param.
+; CHECK-NEXT: cvt.u32.u16
+; CHECK-NET: clz.b32
+; CHECK-DAG: cvt.u16.u32
+; CHECK-DAG: sub.
+; CHECK: st.{{[a-z]}}16
+; CHECK: ret;
+ %val = call i16 @llvm.ctlz.i16(i16 %a, i1 false) readnone
+ store i16 %val, i16* %b
+ ret void
+}
+; CHECK-LABEL: myctlz_store16_2(
+define void @myctlz_store16_2(i16 %a, i16* %b) {
+; CHECK: ld.param.
+; CHECK-NEXT: cvt.u32.u16
+; CHECK-NET: clz.b32
+; CHECK-DAG: cvt.u16.u32
+; CHECK-DAG: sub.
+; CHECK: st.{{[a-z]}}16
+; CHECK: ret;
+ %val = call i16 @llvm.ctlz.i16(i16 %a, i1 false) readnone
+ store i16 %val, i16* %b
+ ret void
}
diff --git a/test/CodeGen/NVPTX/f16-instructions.ll b/test/CodeGen/NVPTX/f16-instructions.ll
new file mode 100644
index 000000000000..403a67f02f80
--- /dev/null
+++ b/test/CodeGen/NVPTX/f16-instructions.ll
@@ -0,0 +1,1063 @@
+; ## Full FP16 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16 %s
+; ## FP16 support explicitly disabled.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+; ## FP16 is not supported by hardware.
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \
+; RUN: -disable-post-ra -disable-fp-elim \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: test_ret_const(
+; CHECK: mov.b16 [[R:%h[0-9]+]], 0x3C00;
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_ret_const() #0 {
+ ret half 1.0
+}
+
+; CHECK-LABEL: test_fadd(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fadd_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_param_1];
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fadd(half %a, half %b) #0 {
+ %r = fadd half %a, %b
+ ret half %r
+}
+
+; Check that we can lower fadd with immediate arguments.
+; CHECK-LABEL: test_fadd_imm_0(
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_imm_0_param_0];
+; CHECK-F16-DAG: mov.b16 [[A:%h[0-9]+]], 0x3C00;
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000;
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fadd_imm_0(half %b) #0 {
+ %r = fadd half 1.0, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fadd_imm_1(
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fadd_imm_1_param_0];
+; CHECK-F16-DAG: mov.b16 [[A:%h[0-9]+]], 0x3C00;
+; CHECK-F16-NEXT: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000;
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fadd_imm_1(half %a) #0 {
+ %r = fadd half %a, 1.0
+ ret half %r
+}
+
+; CHECK-LABEL: test_fsub(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fsub_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fsub_param_1];
+; CHECK-F16-NEXT: sub.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fsub(half %a, half %b) #0 {
+ %r = fsub half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fneg(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fneg_param_0];
+; CHECK-F16-NEXT: mov.b16 [[Z:%h[0-9]+]], 0x0000
+; CHECK-F16-NEXT: sub.rn.f16 [[R:%h[0-9]+]], [[Z]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000;
+; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[Z]], [[A32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fneg(half %a) #0 {
+ %r = fsub half 0.0, %a
+ ret half %r
+}
+
+; CHECK-LABEL: test_fmul(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fmul_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fmul_param_1];
+; CHECK-F16-NEXT: mul.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-NEXT: mul.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fmul(half %a, half %b) #0 {
+ %r = fmul half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fdiv(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fdiv_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fdiv_param_1];
+; CHECK-DAG: cvt.f32.f16 [[F0:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[F1:%f[0-9]+]], [[B]];
+; CHECK-NEXT: div.rn.f32 [[FR:%f[0-9]+]], [[F0]], [[F1]];
+; CHECK-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[FR]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_fdiv(half %a, half %b) #0 {
+ %r = fdiv half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_frem(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_frem_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_frem_param_1];
+; CHECK-DAG: cvt.f32.f16 [[FA:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[FB:%f[0-9]+]], [[B]];
+; CHECK-NEXT: div.rn.f32 [[D:%f[0-9]+]], [[FA]], [[FB]];
+; CHECK-NEXT: cvt.rmi.f32.f32 [[DI:%f[0-9]+]], [[D]];
+; CHECK-NEXT: mul.f32 [[RI:%f[0-9]+]], [[DI]], [[FB]];
+; CHECK-NEXT: sub.f32 [[RF:%f[0-9]+]], [[FA]], [[RI]];
+; CHECK-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_frem(half %a, half %b) #0 {
+ %r = frem half %a, %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_store(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_store_param_0];
+; CHECK-DAG: ld.param.u64 %[[PTR:rd[0-9]+]], [test_store_param_1];
+; CHECK-NEXT: st.b16 [%[[PTR]]], [[A]];
+; CHECK-NEXT: ret;
+define void @test_store(half %a, half* %b) #0 {
+ store half %a, half* %b
+ ret void
+}
+
+; CHECK-LABEL: test_load(
+; CHECK: ld.param.u64 %[[PTR:rd[0-9]+]], [test_load_param_0];
+; CHECK-NEXT: ld.b16 [[R:%h[0-9]+]], [%[[PTR]]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_load(half* %a) #0 {
+ %r = load half, half* %a
+ ret half %r
+}
+
+; CHECK-LABEL: .visible .func test_halfp0a1(
+; CHECK-DAG: ld.param.u64 %[[FROM:rd?[0-9]+]], [test_halfp0a1_param_0];
+; CHECK-DAG: ld.param.u64 %[[TO:rd?[0-9]+]], [test_halfp0a1_param_1];
+; CHECK-DAG: ld.u8 [[B0:%r[sd]?[0-9]+]], [%[[FROM]]]
+; CHECK-DAG: st.u8 [%[[TO]]], [[B0]]
+; CHECK-DAG: ld.u8 [[B1:%r[sd]?[0-9]+]], [%[[FROM]]+1]
+; CHECK-DAG: st.u8 [%[[TO]]+1], [[B1]]
+; CHECK: ret
+define void @test_halfp0a1(half * noalias readonly %from, half * %to) {
+ %1 = load half, half * %from , align 1
+ store half %1, half * %to , align 1
+ ret void
+}
+
+declare half @test_callee(half %a, half %b) #0
+
+; CHECK-LABEL: test_call(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_call_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_call_param_1];
+; CHECK: {
+; CHECK-DAG: .param .b32 param0;
+; CHECK-DAG: .param .b32 param1;
+; CHECK-DAG: st.param.b16 [param0+0], [[A]];
+; CHECK-DAG: st.param.b16 [param1+0], [[B]];
+; CHECK-DAG: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_call(half %a, half %b) #0 {
+ %r = call half @test_callee(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_call_flipped(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_call_flipped_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_call_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .b32 param0;
+; CHECK-DAG: .param .b32 param1;
+; CHECK-DAG: st.param.b16 [param0+0], [[B]];
+; CHECK-DAG: st.param.b16 [param1+0], [[A]];
+; CHECK-DAG: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_call_flipped(half %a, half %b) #0 {
+ %r = call half @test_callee(half %b, half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_tailcall_flipped_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_tailcall_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .b32 param0;
+; CHECK-DAG: .param .b32 param1;
+; CHECK-DAG: st.param.b16 [param0+0], [[B]];
+; CHECK-DAG: st.param.b16 [param1+0], [[A]];
+; CHECK-DAG: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_tailcall_flipped(half %a, half %b) #0 {
+ %r = tail call half @test_callee(half %b, half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_select(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_param_1];
+; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
+; CHECK-NEXT: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_select(half %a, half %b, i1 zeroext %c) #0 {
+ %r = select i1 %c, half %a, half %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_select_cc(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_cc_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_cc_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_select_cc_param_2];
+; CHECK-DAG: ld.param.b16 [[D:%h[0-9]+]], [test_select_cc_param_3];
+; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[C]], [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF:%f[0-9]+]], [[D]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]];
+; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]]
+; CHECK: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_select_cc(half %a, half %b, half %c, half %d) #0 {
+ %cc = fcmp une half %c, %d
+ %r = select i1 %cc, half %a, half %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_select_cc_f32_f16(
+; CHECK-DAG: ld.param.f32 [[A:%f[0-9]+]], [test_select_cc_f32_f16_param_0];
+; CHECK-DAG: ld.param.f32 [[B:%f[0-9]+]], [test_select_cc_f32_f16_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_select_cc_f32_f16_param_2];
+; CHECK-DAG: ld.param.b16 [[D:%h[0-9]+]], [test_select_cc_f32_f16_param_3];
+; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[C]], [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF:%f[0-9]+]], [[D]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]];
+; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]]
+; CHECK-NEXT: selp.f32 [[R:%f[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.f32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define float @test_select_cc_f32_f16(float %a, float %b, half %c, half %d) #0 {
+ %cc = fcmp une half %c, %d
+ %r = select i1 %cc, float %a, float %b
+ ret float %r
+}
+
+; CHECK-LABEL: test_select_cc_f16_f32(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_select_cc_f16_f32_param_0];
+; CHECK-DAG: ld.param.f32 [[C:%f[0-9]+]], [test_select_cc_f16_f32_param_2];
+; CHECK-DAG: ld.param.f32 [[D:%f[0-9]+]], [test_select_cc_f16_f32_param_3];
+; CHECK-DAG: setp.neu.f32 [[PRED:%p[0-9]+]], [[C]], [[D]]
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_select_cc_f16_f32_param_1];
+; CHECK-NEXT: selp.b16 [[R:%h[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define half @test_select_cc_f16_f32(half %a, half %b, float %c, float %d) #0 {
+ %cc = fcmp une float %c, %d
+ %r = select i1 %cc, half %a, half %b
+ ret half %r
+}
+
+; CHECK-LABEL: test_fcmp_une(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_une_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_une_param_1];
+; CHECK-F16: setp.neu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_une(half %a, half %b) #0 {
+ %r = fcmp une half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ueq(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ueq_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ueq_param_1];
+; CHECK-F16: setp.equ.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.equ.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ueq(half %a, half %b) #0 {
+ %r = fcmp ueq half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ugt(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ugt_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ugt_param_1];
+; CHECK-F16: setp.gtu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.gtu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ugt(half %a, half %b) #0 {
+ %r = fcmp ugt half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_uge(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_uge_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_uge_param_1];
+; CHECK-F16: setp.geu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.geu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_uge(half %a, half %b) #0 {
+ %r = fcmp uge half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ult(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ult_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ult_param_1];
+; CHECK-F16: setp.ltu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.ltu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ult(half %a, half %b) #0 {
+ %r = fcmp ult half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ule(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ule_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ule_param_1];
+; CHECK-F16: setp.leu.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.leu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ule(half %a, half %b) #0 {
+ %r = fcmp ule half %a, %b
+ ret i1 %r
+}
+
+
+; CHECK-LABEL: test_fcmp_uno(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_uno_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_uno_param_1];
+; CHECK-F16: setp.nan.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.nan.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_uno(half %a, half %b) #0 {
+ %r = fcmp uno half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_one(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_one_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_one_param_1];
+; CHECK-F16: setp.ne.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.ne.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_one(half %a, half %b) #0 {
+ %r = fcmp one half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_oeq(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_oeq_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_oeq_param_1];
+; CHECK-F16: setp.eq.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.eq.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_oeq(half %a, half %b) #0 {
+ %r = fcmp oeq half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ogt(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ogt_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ogt_param_1];
+; CHECK-F16: setp.gt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.gt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ogt(half %a, half %b) #0 {
+ %r = fcmp ogt half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_oge(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_oge_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_oge_param_1];
+; CHECK-F16: setp.ge.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.ge.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_oge(half %a, half %b) #0 {
+ %r = fcmp oge half %a, %b
+ ret i1 %r
+}
+
+; XCHECK-LABEL: test_fcmp_olt(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_olt_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_olt_param_1];
+; CHECK-F16: setp.lt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_olt(half %a, half %b) #0 {
+ %r = fcmp olt half %a, %b
+ ret i1 %r
+}
+
+; XCHECK-LABEL: test_fcmp_ole(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ole_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ole_param_1];
+; CHECK-F16: setp.le.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.le.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ole(half %a, half %b) #0 {
+ %r = fcmp ole half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_fcmp_ord(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fcmp_ord_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fcmp_ord_param_1];
+; CHECK-F16: setp.num.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.num.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i1 @test_fcmp_ord(half %a, half %b) #0 {
+ %r = fcmp ord half %a, %b
+ ret i1 %r
+}
+
+; CHECK-LABEL: test_br_cc(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_br_cc_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_br_cc_param_1];
+; CHECK-DAG: ld.param.u64 %[[C:rd[0-9]+]], [test_br_cc_param_2];
+; CHECK-DAG: ld.param.u64 %[[D:rd[0-9]+]], [test_br_cc_param_3];
+; CHECK-F16: setp.lt.f16 [[PRED:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]]
+; CHECK-NEXT: @[[PRED]] bra [[LABEL:LBB.*]];
+; CHECK: st.u32 [%[[C]]],
+; CHECK: [[LABEL]]:
+; CHECK: st.u32 [%[[D]]],
+; CHECK: ret;
+define void @test_br_cc(half %a, half %b, i32* %p1, i32* %p2) #0 {
+ %c = fcmp uge half %a, %b
+ br i1 %c, label %then, label %else
+then:
+ store i32 0, i32* %p1
+ ret void
+else:
+ store i32 0, i32* %p2
+ ret void
+}
+
+; CHECK-LABEL: test_phi(
+; CHECK: ld.param.u64 %[[P1:rd[0-9]+]], [test_phi_param_0];
+; CHECK: ld.b16 {{%h[0-9]+}}, [%[[P1]]];
+; CHECK: [[LOOP:LBB[0-9_]+]]:
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[AB:%h[0-9]+]];
+; CHECK: ld.b16 [[AB:%h[0-9]+]], [%[[P1]]];
+; CHECK: {
+; CHECK: st.param.b64 [param0+0], %[[P1]];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_dummy
+; CHECK: }
+; CHECK: setp.eq.b32 [[PRED:%p[0-9]+]], %r{{[0-9]+}}, 1;
+; CHECK: @[[PRED]] bra [[LOOP]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_phi(half* %p1) #0 {
+entry:
+ %a = load half, half* %p1
+ br label %loop
+loop:
+ %r = phi half [%a, %entry], [%b, %loop]
+ %b = load half, half* %p1
+ %c = call i1 @test_dummy(half* %p1)
+ br i1 %c, label %loop, label %return
+return:
+ ret half %r
+}
+declare i1 @test_dummy(half* %p1) #0
+
+; CHECK-LABEL: test_fptosi_i32(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptosi_i32_param_0];
+; CHECK: cvt.rzi.s32.f16 [[R:%r[0-9]+]], [[A]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i32 @test_fptosi_i32(half %a) #0 {
+ %r = fptosi half %a to i32
+ ret i32 %r
+}
+
+; CHECK-LABEL: test_fptosi_i64(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptosi_i64_param_0];
+; CHECK: cvt.rzi.s64.f16 [[R:%rd[0-9]+]], [[A]];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i64 @test_fptosi_i64(half %a) #0 {
+ %r = fptosi half %a to i64
+ ret i64 %r
+}
+
+; CHECK-LABEL: test_fptoui_i32(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptoui_i32_param_0];
+; CHECK: cvt.rzi.u32.f16 [[R:%r[0-9]+]], [[A]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i32 @test_fptoui_i32(half %a) #0 {
+ %r = fptoui half %a to i32
+ ret i32 %r
+}
+
+; CHECK-LABEL: test_fptoui_i64(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fptoui_i64_param_0];
+; CHECK: cvt.rzi.u64.f16 [[R:%rd[0-9]+]], [[A]];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i64 @test_fptoui_i64(half %a) #0 {
+ %r = fptoui half %a to i64
+ ret i64 %r
+}
+
+; CHECK-LABEL: test_uitofp_i32(
+; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_param_0];
+; CHECK: cvt.rn.f16.u32 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_uitofp_i32(i32 %a) #0 {
+ %r = uitofp i32 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_uitofp_i64(
+; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_uitofp_i64_param_0];
+; CHECK: cvt.rn.f16.u64 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_uitofp_i64(i64 %a) #0 {
+ %r = uitofp i64 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_sitofp_i32(
+; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_param_0];
+; CHECK: cvt.rn.f16.s32 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sitofp_i32(i32 %a) #0 {
+ %r = sitofp i32 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_sitofp_i64(
+; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_sitofp_i64_param_0];
+; CHECK: cvt.rn.f16.s64 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sitofp_i64(i64 %a) #0 {
+ %r = sitofp i64 %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_uitofp_i32_fadd(
+; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_fadd_param_0];
+; CHECK-DAG: cvt.rn.f16.u32 [[C:%h[0-9]+]], [[A]];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_uitofp_i32_fadd_param_1];
+; CHECK-F16: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[C]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_uitofp_i32_fadd(i32 %a, half %b) #0 {
+ %c = uitofp i32 %a to half
+ %r = fadd half %b, %c
+ ret half %r
+}
+
+; CHECK-LABEL: test_sitofp_i32_fadd(
+; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_fadd_param_0];
+; CHECK-DAG: cvt.rn.f16.s32 [[C:%h[0-9]+]], [[A]];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_sitofp_i32_fadd_param_1];
+; CHECK-F16: add.rn.f16 [[R:%h[0-9]+]], [[B]], [[C]];
+; XCHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; XCHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; XCHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]];
+; XCHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 {
+ %c = sitofp i32 %a to half
+ %r = fadd half %b, %c
+ ret half %r
+}
+
+; CHECK-LABEL: test_fptrunc_float(
+; CHECK: ld.param.f32 [[A:%f[0-9]+]], [test_fptrunc_float_param_0];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fptrunc_float(float %a) #0 {
+ %r = fptrunc float %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_fptrunc_double(
+; CHECK: ld.param.f64 [[A:%fd[0-9]+]], [test_fptrunc_double_param_0];
+; CHECK: cvt.rn.f16.f64 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fptrunc_double(double %a) #0 {
+ %r = fptrunc double %a to half
+ ret half %r
+}
+
+; CHECK-LABEL: test_fpext_float(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fpext_float_param_0];
+; CHECK: cvt.f32.f16 [[R:%f[0-9]+]], [[A]];
+; CHECK: st.param.f32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define float @test_fpext_float(half %a) #0 {
+ %r = fpext half %a to float
+ ret float %r
+}
+
+; CHECK-LABEL: test_fpext_double(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fpext_double_param_0];
+; CHECK: cvt.f64.f16 [[R:%fd[0-9]+]], [[A]];
+; CHECK: st.param.f64 [func_retval0+0], [[R]];
+; CHECK: ret;
+define double @test_fpext_double(half %a) #0 {
+ %r = fpext half %a to double
+ ret double %r
+}
+
+
+; CHECK-LABEL: test_bitcast_halftoi16(
+; CHECK: ld.param.b16 [[AH:%h[0-9]+]], [test_bitcast_halftoi16_param_0];
+; CHECK: mov.b16 [[AS:%rs[0-9]+]], [[AH]]
+; CHECK: cvt.u32.u16 [[R:%r[0-9]+]], [[AS]]
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i16 @test_bitcast_halftoi16(half %a) #0 {
+ %r = bitcast half %a to i16
+ ret i16 %r
+}
+
+; CHECK-LABEL: test_bitcast_i16tohalf(
+; CHECK: ld.param.u16 [[AS:%rs[0-9]+]], [test_bitcast_i16tohalf_param_0];
+; CHECK: mov.b16 [[AH:%h[0-9]+]], [[AS]]
+; CHECK: st.param.b16 [func_retval0+0], [[AH]];
+; CHECK: ret;
+define half @test_bitcast_i16tohalf(i16 %a) #0 {
+ %r = bitcast i16 %a to half
+ ret half %r
+}
+
+
+declare half @llvm.sqrt.f16(half %a) #0
+declare half @llvm.powi.f16(half %a, i32 %b) #0
+declare half @llvm.sin.f16(half %a) #0
+declare half @llvm.cos.f16(half %a) #0
+declare half @llvm.pow.f16(half %a, half %b) #0
+declare half @llvm.exp.f16(half %a) #0
+declare half @llvm.exp2.f16(half %a) #0
+declare half @llvm.log.f16(half %a) #0
+declare half @llvm.log10.f16(half %a) #0
+declare half @llvm.log2.f16(half %a) #0
+declare half @llvm.fma.f16(half %a, half %b, half %c) #0
+declare half @llvm.fabs.f16(half %a) #0
+declare half @llvm.minnum.f16(half %a, half %b) #0
+declare half @llvm.maxnum.f16(half %a, half %b) #0
+declare half @llvm.copysign.f16(half %a, half %b) #0
+declare half @llvm.floor.f16(half %a) #0
+declare half @llvm.ceil.f16(half %a) #0
+declare half @llvm.trunc.f16(half %a) #0
+declare half @llvm.rint.f16(half %a) #0
+declare half @llvm.nearbyint.f16(half %a) #0
+declare half @llvm.round.f16(half %a) #0
+declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0
+
+; CHECK-LABEL: test_sqrt(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_sqrt_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: sqrt.rn.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sqrt(half %a) #0 {
+ %r = call half @llvm.sqrt.f16(half %a)
+ ret half %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_powi(
+;define half @test_powi(half %a, i32 %b) #0 {
+; %r = call half @llvm.powi.f16(half %a, i32 %b)
+; ret half %r
+;}
+
+; CHECK-LABEL: test_sin(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_sin_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: sin.approx.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_sin(half %a) #0 #1 {
+ %r = call half @llvm.sin.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_cos(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_cos_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: cos.approx.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_cos(half %a) #0 #1 {
+ %r = call half @llvm.cos.f16(half %a)
+ ret half %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_pow(
+;define half @test_pow(half %a, half %b) #0 {
+; %r = call half @llvm.pow.f16(half %a, half %b)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp(
+;define half @test_exp(half %a) #0 {
+; %r = call half @llvm.exp.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp2(
+;define half @test_exp2(half %a) #0 {
+; %r = call half @llvm.exp2.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log(
+;define half @test_log(half %a) #0 {
+; %r = call half @llvm.log.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log10(
+;define half @test_log10(half %a) #0 {
+; %r = call half @llvm.log10.f16(half %a)
+; ret half %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log2(
+;define half @test_log2(half %a) #0 {
+; %r = call half @llvm.log2.f16(half %a)
+; ret half %r
+;}
+
+; CHECK-LABEL: test_fma(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fma_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fma_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_fma_param_2];
+; CHECK-F16: fma.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]], [[C]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret
+define half @test_fma(half %a, half %b, half %c) #0 {
+ %r = call half @llvm.fma.f16(half %a, half %b, half %c)
+ ret half %r
+}
+
+; CHECK-LABEL: test_fabs(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_fabs_param_0];
+; CHECK: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK: abs.f32 [[RF:%f[0-9]+]], [[AF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fabs(half %a) #0 {
+ %r = call half @llvm.fabs.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_minnum(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_minnum_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_minnum_param_1];
+; CHECK-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK: min.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_minnum(half %a, half %b) #0 {
+ %r = call half @llvm.minnum.f16(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_maxnum(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_maxnum_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_maxnum_param_1];
+; CHECK-DAG: cvt.f32.f16 [[AF:%f[0-9]+]], [[A]];
+; CHECK-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]];
+; CHECK: max.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]];
+; CHECK: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[RF]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_maxnum(half %a, half %b) #0 {
+ %r = call half @llvm.maxnum.f16(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_param_0];
+; CHECK-DAG: ld.param.b16 [[BH:%h[0-9]+]], [test_copysign_param_1];
+; CHECK-DAG: mov.b16 [[AS:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b16 [[BS:%rs[0-9]+]], [[BH]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AS]], 32767;
+; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BS]], -32768;
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_copysign(half %a, half %b) #0 {
+ %r = call half @llvm.copysign.f16(half %a, half %b)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign_f32(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_f32_param_0];
+; CHECK-DAG: ld.param.f32 [[BF:%f[0-9]+]], [test_copysign_f32_param_1];
+; CHECK-DAG: mov.b16 [[A:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b32 [[B:%r[0-9]+]], [[BF]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[A]], 32767;
+; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[B]], -2147483648;
+; CHECK-DAG: shr.u32 [[BX1:%r[0-9]+]], [[BX0]], 16;
+; CHECK-DAG: cvt.u16.u32 [[BX2:%rs[0-9]+]], [[BX1]];
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_copysign_f32(half %a, float %b) #0 {
+ %tb = fptrunc float %b to half
+ %r = call half @llvm.copysign.f16(half %a, half %tb)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign_f64(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_f64_param_0];
+; CHECK-DAG: ld.param.f64 [[BD:%fd[0-9]+]], [test_copysign_f64_param_1];
+; CHECK-DAG: mov.b16 [[A:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b64 [[B:%rd[0-9]+]], [[BD]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[A]], 32767;
+; CHECK-DAG: and.b64 [[BX0:%rd[0-9]+]], [[B]], -9223372036854775808;
+; CHECK-DAG: shr.u64 [[BX1:%rd[0-9]+]], [[BX0]], 48;
+; CHECK-DAG: cvt.u16.u64 [[BX2:%rs[0-9]+]], [[BX1]];
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_copysign_f64(half %a, double %b) #0 {
+ %tb = fptrunc double %b to half
+ %r = call half @llvm.copysign.f16(half %a, half %tb)
+ ret half %r
+}
+
+; CHECK-LABEL: test_copysign_extended(
+; CHECK-DAG: ld.param.b16 [[AH:%h[0-9]+]], [test_copysign_extended_param_0];
+; CHECK-DAG: ld.param.b16 [[BH:%h[0-9]+]], [test_copysign_extended_param_1];
+; CHECK-DAG: mov.b16 [[AS:%rs[0-9]+]], [[AH]];
+; CHECK-DAG: mov.b16 [[BS:%rs[0-9]+]], [[BH]];
+; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AS]], 32767;
+; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BS]], -32768;
+; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]];
+; CHECK: mov.b16 [[R:%h[0-9]+]], [[RX]];
+; CHECK: cvt.f32.f16 [[XR:%f[0-9]+]], [[R]];
+; CHECK: st.param.f32 [func_retval0+0], [[XR]];
+; CHECK: ret;
+define float @test_copysign_extended(half %a, half %b) #0 {
+ %r = call half @llvm.copysign.f16(half %a, half %b)
+ %xr = fpext half %r to float
+ ret float %xr
+}
+
+; CHECK-LABEL: test_floor(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_floor_param_0];
+; CHECK: cvt.rmi.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_floor(half %a) #0 {
+ %r = call half @llvm.floor.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_ceil(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_ceil_param_0];
+; CHECK: cvt.rpi.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_ceil(half %a) #0 {
+ %r = call half @llvm.ceil.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_trunc(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_trunc_param_0];
+; CHECK: cvt.rzi.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_trunc(half %a) #0 {
+ %r = call half @llvm.trunc.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_rint(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_rint_param_0];
+; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_rint(half %a) #0 {
+ %r = call half @llvm.rint.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_nearbyint(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_nearbyint_param_0];
+; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_nearbyint(half %a) #0 {
+ %r = call half @llvm.nearbyint.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_round(
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_round_param_0];
+; CHECK: cvt.rni.f16.f16 [[R:%h[0-9]+]], [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_round(half %a) #0 {
+ %r = call half @llvm.round.f16(half %a)
+ ret half %r
+}
+
+; CHECK-LABEL: test_fmuladd(
+; CHECK-DAG: ld.param.b16 [[A:%h[0-9]+]], [test_fmuladd_param_0];
+; CHECK-DAG: ld.param.b16 [[B:%h[0-9]+]], [test_fmuladd_param_1];
+; CHECK-DAG: ld.param.b16 [[C:%h[0-9]+]], [test_fmuladd_param_2];
+; CHECK-F16: fma.rn.f16 [[R:%h[0-9]+]], [[A]], [[B]], [[C]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[A32:%f[0-9]+]], [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]]
+; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]];
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%h[0-9]+]], [[R32]]
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_fmuladd(half %a, half %b, half %c) #0 {
+ %r = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
+ ret half %r
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "unsafe-fp-math" = "true" }
diff --git a/test/CodeGen/NVPTX/f16x2-instructions.ll b/test/CodeGen/NVPTX/f16x2-instructions.ll
new file mode 100644
index 000000000000..33bb616d895c
--- /dev/null
+++ b/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -0,0 +1,1426 @@
+; ## Full FP16 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-F16 %s
+; ## FP16 support explicitly disabled.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN: -O0 -disable-post-ra -disable-fp-elim --nvptx-no-f16-math \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+; ## FP16 is not supported by hardware.
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 -asm-verbose=false \
+; RUN: -disable-post-ra -disable-fp-elim \
+; RUN: | FileCheck -check-prefixes CHECK,CHECK-NOF16 %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: test_ret_const(
+; CHECK: mov.u32 [[T:%r[0-9+]]], 1073757184;
+; CHECK: mov.b32 [[R:%hh[0-9+]]], [[T]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_ret_const() #0 {
+ ret <2 x half> <half 1.0, half 2.0>
+}
+
+; CHECK-LABEL: test_extract_0(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_0_param_0];
+; CHECK: mov.b32 {[[R:%h[0-9]+]], %tmp_hi}, [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_extract_0(<2 x half> %a) #0 {
+ %e = extractelement <2 x half> %a, i32 0
+ ret half %e
+}
+
+; CHECK-LABEL: test_extract_1(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_1_param_0];
+; CHECK: mov.b32 {%tmp_lo, [[R:%h[0-9]+]]}, [[A]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_extract_1(<2 x half> %a) #0 {
+ %e = extractelement <2 x half> %a, i32 1
+ ret half %e
+}
+
+; CHECK-LABEL: test_extract_i(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_extract_i_param_0];
+; CHECK-DAG: ld.param.u64 [[IDX:%rd[0-9]+]], [test_extract_i_param_1];
+; CHECK-DAG: setp.eq.s64 [[PRED:%p[0-9]+]], [[IDX]], 0;
+; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[A]];
+; CHECK: selp.b16 [[R:%h[0-9]+]], [[E0]], [[E1]], [[PRED]];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK: ret;
+define half @test_extract_i(<2 x half> %a, i64 %idx) #0 {
+ %e = extractelement <2 x half> %a, i64 %idx
+ ret half %e
+}
+
+; CHECK-LABEL: test_fadd(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fadd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fadd_param_1];
+;
+; CHECK-F16-NEXT: add.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fadd(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fadd <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; Check that we can lower fadd with immediate arguments.
+; CHECK-LABEL: test_fadd_imm_0(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fadd_imm_0_param_0];
+;
+; CHECK-F16: mov.u32 [[I:%r[0-9+]]], 1073757184;
+; CHECK-F16: mov.b32 [[IHH:%hh[0-9+]]], [[I]];
+; CHECK-F16: add.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[IHH]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], 0f3F800000;
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], 0f40000000;
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fadd_imm_0(<2 x half> %a) #0 {
+ %r = fadd <2 x half> <half 1.0, half 2.0>, %a
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fadd_imm_1(
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fadd_imm_1_param_0];
+;
+; CHECK-F16: mov.u32 [[I:%r[0-9+]]], 1073757184;
+; CHECK-F16: mov.b32 [[IHH:%hh[0-9+]]], [[I]];
+; CHECK-F16: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[IHH]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], 0f3F800000;
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], 0f40000000;
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fadd_imm_1(<2 x half> %a) #0 {
+ %r = fadd <2 x half> %a, <half 1.0, half 2.0>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fsub(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fsub_param_0];
+;
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fsub_param_1];
+; CHECK-F16-NEXT: sub.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fsub(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fsub <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fneg(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fneg_param_0];
+;
+; CHECK-F16: mov.u32 [[I0:%r[0-9+]]], 0;
+; CHECK-F16: mov.b32 [[IHH0:%hh[0-9+]]], [[I0]];
+; CHECK-F16-NEXT: sub.rn.f16x2 [[R:%hh[0-9]+]], [[IHH0]], [[A]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000;
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR0:%f[0-9]+]], [[Z]], [[FA0]];
+; CHECK-NOF16-DAG: sub.rn.f32 [[FR1:%f[0-9]+]], [[Z]], [[FA1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fneg(<2 x half> %a) #0 {
+ %r = fsub <2 x half> <half 0.0, half 0.0>, %a
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fmul(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fmul_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fmul_param_1];
+; CHECK-F16-NEXT: mul.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: mul.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-NOF16-DAG: mul.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fmul(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fmul <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fdiv(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fdiv_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fdiv_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]];
+; CHECK-DAG: div.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-DAG: div.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]];
+; CHECK-NEXT: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_fdiv(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fdiv <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_frem(
+; -- Load two 16x2 inputs and split them into f16 elements
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_frem_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_frem_param_1];
+; -- Split into elements
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; -- promote to f32.
+; CHECK-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]];
+; -- frem(a[0],b[0]).
+; CHECK-DAG: div.rn.f32 [[FD0:%f[0-9]+]], [[FA0]], [[FB0]];
+; CHECK-DAG: cvt.rmi.f32.f32 [[DI0:%f[0-9]+]], [[FD0]];
+; CHECK-DAG: mul.f32 [[RI0:%f[0-9]+]], [[DI0]], [[FB0]];
+; CHECK-DAG: sub.f32 [[RF0:%f[0-9]+]], [[FA0]], [[RI0]];
+; -- frem(a[1],b[1]).
+; CHECK-DAG: div.rn.f32 [[FD1:%f[0-9]+]], [[FA1]], [[FB1]];
+; CHECK-DAG: cvt.rmi.f32.f32 [[DI1:%f[0-9]+]], [[FD1]];
+; CHECK-DAG: mul.f32 [[RI1:%f[0-9]+]], [[DI1]], [[FB1]];
+; CHECK-DAG: sub.f32 [[RF1:%f[0-9]+]], [[FA1]], [[RI1]];
+; -- convert back to f16.
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; -- merge into f16x2 and return it.
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_frem(<2 x half> %a, <2 x half> %b) #0 {
+ %r = frem <2 x half> %a, %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: .func test_ldst_v2f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v2f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v2f16_param_1];
+; CHECK-DAG: ld.b32 [[E:%hh[0-9]+]], [%[[A]]]
+; CHECK: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[E]];
+; CHECK-DAG: st.v2.b16 [%[[B]]], {[[E0]], [[E1]]};
+; CHECK: ret;
+define void @test_ldst_v2f16(<2 x half>* %a, <2 x half>* %b) {
+ %t1 = load <2 x half>, <2 x half>* %a
+ store <2 x half> %t1, <2 x half>* %b, align 16
+ ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v3f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v3f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v3f16_param_1];
+; -- v3 is inconvenient to capture as it's lowered as ld.b64 + fair
+; number of bitshifting instructions that may change at llvm's whim.
+; So we only verify that we only issue correct number of writes using
+; correct offset, but not the values we write.
+; CHECK-DAG: ld.u64
+; CHECK-DAG: st.u32 [%[[B]]],
+; CHECK-DAG: st.b16 [%[[B]]+4],
+; CHECK: ret;
+define void @test_ldst_v3f16(<3 x half>* %a, <3 x half>* %b) {
+ %t1 = load <3 x half>, <3 x half>* %a
+ store <3 x half> %t1, <3 x half>* %b, align 16
+ ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v4f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v4f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v4f16_param_1];
+; CHECK-DAG: ld.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [%[[A]]];
+; CHECK-DAG: st.v4.b16 [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: ret;
+define void @test_ldst_v4f16(<4 x half>* %a, <4 x half>* %b) {
+ %t1 = load <4 x half>, <4 x half>* %a
+ store <4 x half> %t1, <4 x half>* %b, align 16
+ ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v8f16(
+; CHECK-DAG: ld.param.u64 %[[A:rd[0-9]+]], [test_ldst_v8f16_param_0];
+; CHECK-DAG: ld.param.u64 %[[B:rd[0-9]+]], [test_ldst_v8f16_param_1];
+; CHECK-DAG: ld.v4.b32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [%[[A]]];
+; CHECK-DAG: st.v4.b32 [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: ret;
+define void @test_ldst_v8f16(<8 x half>* %a, <8 x half>* %b) {
+ %t1 = load <8 x half>, <8 x half>* %a
+ store <8 x half> %t1, <8 x half>* %b, align 16
+ ret void
+}
+
+declare <2 x half> @test_callee(<2 x half> %a, <2 x half> %b) #0
+
+; CHECK-LABEL: test_call(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_call_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_call_param_1];
+; CHECK: {
+; CHECK-DAG: .param .align 4 .b8 param0[4];
+; CHECK-DAG: .param .align 4 .b8 param1[4];
+; CHECK-DAG: st.param.b32 [param0+0], [[A]];
+; CHECK-DAG: st.param.b32 [param1+0], [[B]];
+; CHECK-DAG: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_call(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @test_callee(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_call_flipped(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_call_flipped_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_call_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .align 4 .b8 param0[4];
+; CHECK-DAG: .param .align 4 .b8 param1[4];
+; CHECK-DAG: st.param.b32 [param0+0], [[B]];
+; CHECK-DAG: st.param.b32 [param1+0], [[A]];
+; CHECK-DAG: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_call_flipped(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_tailcall_flipped_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_tailcall_flipped_param_1];
+; CHECK: {
+; CHECK-DAG: .param .align 4 .b8 param0[4];
+; CHECK-DAG: .param .align 4 .b8 param1[4];
+; CHECK-DAG: st.param.b32 [param0+0], [[B]];
+; CHECK-DAG: st.param.b32 [param1+0], [[A]];
+; CHECK-DAG: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_callee,
+; CHECK: );
+; CHECK-NEXT: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_tailcall_flipped(<2 x half> %a, <2 x half> %b) #0 {
+ %r = tail call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_select(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_param_1];
+; CHECK-DAG: ld.param.u8 [[C:%rs[0-9]+]], [test_select_param_2]
+; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
+; CHECK-NEXT: selp.b32 [[R:%hh[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_select(<2 x half> %a, <2 x half> %b, i1 zeroext %c) #0 {
+ %r = select i1 %c, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_select_cc(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_cc_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_cc_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_select_cc_param_2];
+; CHECK-DAG: ld.param.b32 [[D:%hh[0-9]+]], [test_select_cc_param_3];
+;
+; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[C]], [[D]]
+;
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: mov.b32 {[[D0:%h[0-9]+]], [[D1:%h[0-9]+]]}, [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF0:%f[0-9]+]], [[D0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF0:%f[0-9]+]], [[C0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF1:%f[0-9]+]], [[D1]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF1:%f[0-9]+]], [[C1]];
+; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[CF0]], [[DF0]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[CF1]], [[DF1]]
+;
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: selp.b16 [[R0:%h[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.b16 [[R1:%h[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_select_cc(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) #0 {
+ %cc = fcmp une <2 x half> %c, %d
+ %r = select <2 x i1> %cc, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_select_cc_f32_f16(
+; CHECK-DAG: ld.param.v2.f32 {[[A0:%f[0-9]+]], [[A1:%f[0-9]+]]}, [test_select_cc_f32_f16_param_0];
+; CHECK-DAG: ld.param.v2.f32 {[[B0:%f[0-9]+]], [[B1:%f[0-9]+]]}, [test_select_cc_f32_f16_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_select_cc_f32_f16_param_2];
+; CHECK-DAG: ld.param.b32 [[D:%hh[0-9]+]], [test_select_cc_f32_f16_param_3];
+;
+; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[C]], [[D]]
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: mov.b32 {[[D0:%h[0-9]+]], [[D1:%h[0-9]+]]}, [[D]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF0:%f[0-9]+]], [[D0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF0:%f[0-9]+]], [[C0]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[DF1:%f[0-9]+]], [[D1]];
+; CHECK-NOF16-DAG: cvt.f32.f16 [[CF1:%f[0-9]+]], [[C1]];
+; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[CF0]], [[DF0]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[CF1]], [[DF1]]
+;
+; CHECK-DAG: selp.f32 [[R0:%f[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.f32 [[R1:%f[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
+ <2 x half> %c, <2 x half> %d) #0 {
+ %cc = fcmp une <2 x half> %c, %d
+ %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %r
+}
+
+; CHECK-LABEL: test_select_cc_f16_f32(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_select_cc_f16_f32_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_select_cc_f16_f32_param_1];
+; CHECK-DAG: ld.param.v2.f32 {[[C0:%f[0-9]+]], [[C1:%f[0-9]+]]}, [test_select_cc_f16_f32_param_2];
+; CHECK-DAG: ld.param.v2.f32 {[[D0:%f[0-9]+]], [[D1:%f[0-9]+]]}, [test_select_cc_f16_f32_param_3];
+; CHECK-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[C0]], [[D0]]
+; CHECK-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[C1]], [[D1]]
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: selp.b16 [[R0:%h[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.b16 [[R1:%h[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
+ <2 x float> %c, <2 x float> %d) #0 {
+ %cc = fcmp une <2 x float> %c, %d
+ %r = select <2 x i1> %cc, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fcmp_une(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_une_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_une_param_1];
+; CHECK-F16: setp.neu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp une <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ueq(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ueq_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ueq_param_1];
+; CHECK-F16: setp.equ.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.equ.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.equ.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ueq <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ugt(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ugt_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ugt_param_1];
+; CHECK-F16: setp.gtu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.gtu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.gtu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ugt <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_uge(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_uge_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_uge_param_1];
+; CHECK-F16: setp.geu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.geu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.geu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp uge <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ult(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ult_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ult_param_1];
+; CHECK-F16: setp.ltu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.ltu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.ltu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ult <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ule(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ule_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ule_param_1];
+; CHECK-F16: setp.leu.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.leu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.leu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ule <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+
+; CHECK-LABEL: test_fcmp_uno(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_uno_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_uno_param_1];
+; CHECK-F16: setp.nan.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.nan.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.nan.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp uno <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_one(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_one_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_one_param_1];
+; CHECK-F16: setp.ne.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.ne.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.ne.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp one <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_oeq(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_oeq_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_oeq_param_1];
+; CHECK-F16: setp.eq.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.eq.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.eq.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp oeq <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ogt(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ogt_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ogt_param_1];
+; CHECK-F16: setp.gt.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.gt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.gt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ogt <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_oge(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_oge_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_oge_param_1];
+; CHECK-F16: setp.ge.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.ge.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.ge.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp oge <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_olt(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_olt_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_olt_param_1];
+; CHECK-F16: setp.lt.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.lt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.lt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp olt <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; XCHECK-LABEL: test_fcmp_ole(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ole_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ole_param_1];
+; CHECK-F16: setp.le.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.le.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.le.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ole <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fcmp_ord(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fcmp_ord_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fcmp_ord_param_1];
+; CHECK-F16: setp.num.f16x2 [[P0:%p[0-9]+]]|[[P1:%p[0-9]+]], [[A]], [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: setp.num.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]]
+; CHECK-NOF16-DAG: setp.num.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]]
+; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]];
+; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]];
+; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 {
+ %r = fcmp ord <2 x half> %a, %b
+ ret <2 x i1> %r
+}
+
+; CHECK-LABEL: test_fptosi_i32(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptosi_i32_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.s32.f16 [[R0:%r[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.s32.f16 [[R1:%r[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i32> @test_fptosi_i32(<2 x half> %a) #0 {
+ %r = fptosi <2 x half> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+; CHECK-LABEL: test_fptosi_i64(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptosi_i64_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.s64.f16 [[R0:%rd[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.s64.f16 [[R1:%rd[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i64> @test_fptosi_i64(<2 x half> %a) #0 {
+ %r = fptosi <2 x half> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+; CHECK-LABEL: test_fptoui_2xi32(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptoui_2xi32_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.u32.f16 [[R0:%r[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.u32.f16 [[R1:%r[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i32> @test_fptoui_2xi32(<2 x half> %a) #0 {
+ %r = fptoui <2 x half> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+; CHECK-LABEL: test_fptoui_2xi64(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fptoui_2xi64_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.rzi.u64.f16 [[R0:%rd[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rzi.u64.f16 [[R1:%rd[0-9]+]], [[A1]];
+; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i64> @test_fptoui_2xi64(<2 x half> %a) #0 {
+ %r = fptoui <2 x half> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+; CHECK-LABEL: test_uitofp_2xi32(
+; CHECK: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_uitofp_2xi32_param_0];
+; CHECK-DAG: cvt.rn.f16.u32 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.u32 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_uitofp_2xi32(<2 x i32> %a) #0 {
+ %r = uitofp <2 x i32> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_uitofp_2xi64(
+; CHECK: ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_uitofp_2xi64_param_0];
+; CHECK-DAG: cvt.rn.f32.u64 [[F0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f32.u64 [[F1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[F0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[F1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_uitofp_2xi64(<2 x i64> %a) #0 {
+ %r = uitofp <2 x i64> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_sitofp_2xi32(
+; CHECK: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_sitofp_2xi32_param_0];
+; CHECK-DAG: cvt.rn.f16.s32 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.s32 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sitofp_2xi32(<2 x i32> %a) #0 {
+ %r = sitofp <2 x i32> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_sitofp_2xi64(
+; CHECK: ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_sitofp_2xi64_param_0];
+; CHECK-DAG: cvt.rn.f32.s64 [[F0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f32.s64 [[F1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[F0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[F1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sitofp_2xi64(<2 x i64> %a) #0 {
+ %r = sitofp <2 x i64> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_uitofp_2xi32_fadd(
+; CHECK-DAG: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_uitofp_2xi32_fadd_param_1];
+; CHECK-DAG: cvt.rn.f16.u32 [[C0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.u32 [[C1:%h[0-9]+]], [[A1]];
+
+; CHECK-F16-DAG: mov.b32 [[C:%hh[0-9]+]], {[[C0]], [[C1]]}
+; CHECK-F16-DAG: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC1:%f[0-9]+]], [[C1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
+ %c = uitofp <2 x i32> %a to <2 x half>
+ %r = fadd <2 x half> %b, %c
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_sitofp_2xi32_fadd(
+; CHECK-DAG: ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_sitofp_2xi32_fadd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_sitofp_2xi32_fadd_param_1];
+; CHECK-DAG: cvt.rn.f16.s32 [[C0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.s32 [[C1:%h[0-9]+]], [[A1]];
+;
+; CHECK-F16-DAG: mov.b32 [[C:%hh[0-9]+]], {[[C0]], [[C1]]}
+; CHECK-F16-DAG: add.rn.f16x2 [[R:%hh[0-9]+]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC1:%f[0-9]+]], [[C1]]
+; CHECK-NOF16-DAG: add.rn.f32 [[FR0:%f[0-9]+]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: add.rn.f32 [[FR1:%f[0-9]+]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
+ %c = sitofp <2 x i32> %a to <2 x half>
+ %r = fadd <2 x half> %b, %c
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fptrunc_2xfloat(
+; CHECK: ld.param.v2.f32 {[[A0:%f[0-9]+]], [[A1:%f[0-9]+]]}, [test_fptrunc_2xfloat_param_0];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
+ %r = fptrunc <2 x float> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fptrunc_2xdouble(
+; CHECK: ld.param.v2.f64 {[[A0:%fd[0-9]+]], [[A1:%fd[0-9]+]]}, [test_fptrunc_2xdouble_param_0];
+; CHECK-DAG: cvt.rn.f16.f64 [[R0:%h[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.rn.f16.f64 [[R1:%h[0-9]+]], [[A1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
+ %r = fptrunc <2 x double> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fpext_2xfloat(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fpext_2xfloat_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[R0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[R1:%f[0-9]+]], [[A1]];
+; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK: ret;
+define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 {
+ %r = fpext <2 x half> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+; CHECK-LABEL: test_fpext_2xdouble(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fpext_2xdouble_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f64.f16 [[R0:%fd[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f64.f16 [[R1:%fd[0-9]+]], [[A1]];
+; CHECK-NEXT: st.param.v2.f64 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK: ret;
+define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 {
+ %r = fpext <2 x half> %a to <2 x double>
+ ret <2 x double> %r
+}
+
+
+; CHECK-LABEL: test_bitcast_2xhalf_to_2xi16(
+; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_bitcast_2xhalf_to_2xi16_param_0];
+; CHECK-DAG: cvt.u16.u32 [[R0:%rs[0-9]+]], [[A]]
+; CHECK-DAG: shr.u32 [[AH:%r[0-9]+]], [[A]], 16
+; CHECK-DAG: cvt.u16.u32 [[R1:%rs[0-9]+]], [[AH]]
+; CHECK: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK: ret;
+define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 {
+ %r = bitcast <2 x half> %a to <2 x i16>
+ ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_bitcast_2xi16_to_2xhalf(
+; CHECK: ld.param.v2.u16 {[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [test_bitcast_2xi16_to_2xhalf_param_0];
+; CHECK-DAG: cvt.u32.u16 [[R0:%r[0-9]+]], [[RS0]];
+; CHECK-DAG: cvt.u32.u16 [[R1:%r[0-9]+]], [[RS1]];
+; CHECK-DAG: shl.b32 [[R1H:%r[0-9]+]], [[R1]], 16;
+; CHECK-DAG: or.b32 [[R1H0L:%r[0-9]+]], [[R0]], [[R1H]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], [[R1H0L]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 {
+ %r = bitcast <2 x i16> %a to <2 x half>
+ ret <2 x half> %r
+}
+
+
+declare <2 x half> @llvm.sqrt.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b) #0
+declare <2 x half> @llvm.sin.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.cos.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.exp.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.exp2.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.log.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.log10.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.log2.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
+declare <2 x half> @llvm.fabs.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b) #0
+declare <2 x half> @llvm.floor.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.ceil.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.trunc.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.rint.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.nearbyint.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.round.f16(<2 x half> %a) #0
+declare <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
+
+; CHECK-LABEL: test_sqrt(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_sqrt_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: sqrt.rn.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: sqrt.rn.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sqrt(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.sqrt.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_powi(
+;define <2 x half> @test_powi(<2 x half> %a, <2 x i32> %b) #0 {
+; %r = call <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b)
+; ret <2 x half> %r
+;}
+
+; CHECK-LABEL: test_sin(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_sin_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: sin.approx.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: sin.approx.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_sin(<2 x half> %a) #0 #1 {
+ %r = call <2 x half> @llvm.sin.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_cos(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_cos_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cos.approx.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: cos.approx.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_cos(<2 x half> %a) #0 #1 {
+ %r = call <2 x half> @llvm.cos.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_pow(
+;define <2 x half> @test_pow(<2 x half> %a, <2 x half> %b) #0 {
+; %r = call <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp(
+;define <2 x half> @test_exp(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.exp.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp2(
+;define <2 x half> @test_exp2(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.exp2.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log(
+;define <2 x half> @test_log(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.log.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log10(
+;define <2 x half> @test_log10(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.log10.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log2(
+;define <2 x half> @test_log2(<2 x half> %a) #0 {
+; %r = call <2 x half> @llvm.log2.f16(<2 x half> %a)
+; ret <2 x half> %r
+;}
+
+; CHECK-LABEL: test_fma(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fma_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fma_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_fma_param_2];
+;
+; CHECK-F16: fma.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret
+define <2 x half> @test_fma(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
+ %r = call <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fabs(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_fabs_param_0];
+; CHECK: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: abs.f32 [[RF0:%f[0-9]+]], [[AF0]];
+; CHECK-DAG: abs.f32 [[RF1:%f[0-9]+]], [[AF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fabs(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.fabs.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_minnum(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_minnum_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_minnum_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[BF0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[BF1:%f[0-9]+]], [[B1]];
+; CHECK-DAG: min.f32 [[RF0:%f[0-9]+]], [[AF0]], [[BF0]];
+; CHECK-DAG: min.f32 [[RF1:%f[0-9]+]], [[AF1]], [[BF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_minnum(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_maxnum(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_maxnum_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_maxnum_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: cvt.f32.f16 [[AF0:%f[0-9]+]], [[A0]];
+; CHECK-DAG: cvt.f32.f16 [[AF1:%f[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.f32.f16 [[BF0:%f[0-9]+]], [[B0]];
+; CHECK-DAG: cvt.f32.f16 [[BF1:%f[0-9]+]], [[B1]];
+; CHECK-DAG: max.f32 [[RF0:%f[0-9]+]], [[AF0]], [[BF0]];
+; CHECK-DAG: max.f32 [[RF1:%f[0-9]+]], [[AF1]], [[BF1]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[RF0]];
+; CHECK-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[RF1]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_maxnum(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_copysign_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b16 [[BS0:%rs[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b16 [[BS1:%rs[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AX0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AX1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b16 [[BX0:%rs[0-9]+]], [[BS0]], -32768;
+; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[BS1]], -32768;
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AX0]], [[BX0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AX1]], [[BX1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_copysign(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign_f32(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_f32_param_0];
+; CHECK-DAG: ld.param.v2.f32 {[[B0:%f[0-9]+]], [[B1:%f[0-9]+]]}, [test_copysign_f32_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b32 [[BI0:%r[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b32 [[BI1:%r[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AI0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AI1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[BI0]], -2147483648;
+; CHECK-DAG: and.b32 [[BX1:%r[0-9]+]], [[BI1]], -2147483648;
+; CHECK-DAG: shr.u32 [[BY0:%r[0-9]+]], [[BX0]], 16;
+; CHECK-DAG: shr.u32 [[BY1:%r[0-9]+]], [[BX1]], 16;
+; CHECK-DAG: cvt.u16.u32 [[BZ0:%rs[0-9]+]], [[BY0]];
+; CHECK-DAG: cvt.u16.u32 [[BZ1:%rs[0-9]+]], [[BY1]];
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AI0]], [[BZ0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AI1]], [[BZ1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
+ %tb = fptrunc <2 x float> %b to <2 x half>
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %tb)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign_f64(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_f64_param_0];
+; CHECK-DAG: ld.param.v2.f64 {[[B0:%fd[0-9]+]], [[B1:%fd[0-9]+]]}, [test_copysign_f64_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b64 [[BI0:%rd[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b64 [[BI1:%rd[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AI0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AI1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b64 [[BX0:%rd[0-9]+]], [[BI0]], -9223372036854775808;
+; CHECK-DAG: and.b64 [[BX1:%rd[0-9]+]], [[BI1]], -9223372036854775808;
+; CHECK-DAG: shr.u64 [[BY0:%rd[0-9]+]], [[BX0]], 48;
+; CHECK-DAG: shr.u64 [[BY1:%rd[0-9]+]], [[BX1]], 48;
+; CHECK-DAG: cvt.u16.u64 [[BZ0:%rs[0-9]+]], [[BY0]];
+; CHECK-DAG: cvt.u16.u64 [[BZ1:%rs[0-9]+]], [[BY1]];
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AI0]], [[BZ0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AI1]], [[BZ1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 {
+ %tb = fptrunc <2 x double> %b to <2 x half>
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %tb)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_copysign_extended(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_copysign_extended_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_copysign_extended_param_1];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-DAG: mov.b16 [[AS0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG: mov.b16 [[AS1:%rs[0-9]+]], [[A1]];
+; CHECK-DAG: mov.b16 [[BS0:%rs[0-9]+]], [[B0]];
+; CHECK-DAG: mov.b16 [[BS1:%rs[0-9]+]], [[B1]];
+; CHECK-DAG: and.b16 [[AX0:%rs[0-9]+]], [[AS0]], 32767;
+; CHECK-DAG: and.b16 [[AX1:%rs[0-9]+]], [[AS1]], 32767;
+; CHECK-DAG: and.b16 [[BX0:%rs[0-9]+]], [[BS0]], -32768;
+; CHECK-DAG: and.b16 [[BX1:%rs[0-9]+]], [[BS1]], -32768;
+; CHECK-DAG: or.b16 [[RS0:%rs[0-9]+]], [[AX0]], [[BX0]];
+; CHECK-DAG: or.b16 [[RS1:%rs[0-9]+]], [[AX1]], [[BX1]];
+; CHECK-DAG: mov.b16 [[R0:%h[0-9]+]], [[RS0]];
+; CHECK-DAG: mov.b16 [[R1:%h[0-9]+]], [[RS1]];
+; CHECK-DAG: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: mov.b32 {[[RX0:%h[0-9]+]], [[RX1:%h[0-9]+]]}, [[R]]
+; CHECK-DAG: cvt.f32.f16 [[XR0:%f[0-9]+]], [[RX0]];
+; CHECK-DAG: cvt.f32.f16 [[XR1:%f[0-9]+]], [[RX1]];
+; CHECK: st.param.v2.f32 [func_retval0+0], {[[XR0]], [[XR1]]};
+; CHECK: ret;
+define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
+ %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b)
+ %xr = fpext <2 x half> %r to <2 x float>
+ ret <2 x float> %xr
+}
+
+; CHECK-LABEL: test_floor(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_floor_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rmi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rmi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_floor(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.floor.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_ceil(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_ceil_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rpi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rpi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_ceil(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.ceil.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_trunc(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_trunc_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rzi.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rzi.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_trunc(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.trunc.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_rint(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_rint_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_rint(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.rint.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_nearbyint(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_nearbyint_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_nearbyint(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.nearbyint.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_round(
+; CHECK: ld.param.b32 [[A:%hh[0-9]+]], [test_round_param_0];
+; CHECK-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R1:%h[0-9]+]], [[A1]];
+; CHECK-DAG: cvt.rni.f16.f16 [[R0:%h[0-9]+]], [[A0]];
+; CHECK: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_round(<2 x half> %a) #0 {
+ %r = call <2 x half> @llvm.round.f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
+; CHECK-LABEL: test_fmuladd(
+; CHECK-DAG: ld.param.b32 [[A:%hh[0-9]+]], [test_fmuladd_param_0];
+; CHECK-DAG: ld.param.b32 [[B:%hh[0-9]+]], [test_fmuladd_param_1];
+; CHECK-DAG: ld.param.b32 [[C:%hh[0-9]+]], [test_fmuladd_param_2];
+;
+; CHECK-F16: fma.rn.f16x2 [[R:%hh[0-9]+]], [[A]], [[B]], [[C]];
+;
+; CHECK-NOF16-DAG: mov.b32 {[[A0:%h[0-9]+]], [[A1:%h[0-9]+]]}, [[A]]
+; CHECK-NOF16-DAG: mov.b32 {[[B0:%h[0-9]+]], [[B1:%h[0-9]+]]}, [[B]]
+; CHECK-NOF16-DAG: mov.b32 {[[C0:%h[0-9]+]], [[C1:%h[0-9]+]]}, [[C]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA0:%f[0-9]+]], [[A0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB0:%f[0-9]+]], [[B0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FA1:%f[0-9]+]], [[A1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FB1:%f[0-9]+]], [[B1]]
+; CHECK-NOF16-DAG: cvt.f32.f16 [[FC0:%f[0-9]+]], [[C0]]
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR0:%f[0-9]+]], [[FA0]], [[FB0]], [[FC0]];
+; CHECK-NOF16-DAG: fma.rn.f32 [[FR1:%f[0-9]+]], [[FA1]], [[FB1]], [[FC1]];
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R0:%h[0-9]+]], [[FR0]]
+; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%h[0-9]+]], [[FR1]]
+; CHECK-NOF16: mov.b32 [[R:%hh[0-9]+]], {[[R0]], [[R1]]}
+;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x half> @test_fmuladd(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
+ %r = call <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
+ ret <2 x half> %r
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "unsafe-fp-math" = "true" }
diff --git a/test/CodeGen/NVPTX/fast-math.ll b/test/CodeGen/NVPTX/fast-math.ll
index d0a333d369ca..56b1f88f3b2e 100644
--- a/test/CodeGen/NVPTX/fast-math.ll
+++ b/test/CodeGen/NVPTX/fast-math.ll
@@ -1,25 +1,91 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-declare float @llvm.nvvm.sqrt.f(float)
+declare float @llvm.sqrt.f32(float)
+declare double @llvm.sqrt.f64(double)
-; CHECK-LABEL: sqrt_div
+; CHECK-LABEL: sqrt_div(
; CHECK: sqrt.rn.f32
; CHECK: div.rn.f32
define float @sqrt_div(float %a, float %b) {
- %t1 = tail call float @llvm.nvvm.sqrt.f(float %a)
+ %t1 = tail call float @llvm.sqrt.f32(float %a)
%t2 = fdiv float %t1, %b
ret float %t2
}
-; CHECK-LABEL: sqrt_div_fast
+; CHECK-LABEL: sqrt_div_fast(
; CHECK: sqrt.approx.f32
; CHECK: div.approx.f32
define float @sqrt_div_fast(float %a, float %b) #0 {
- %t1 = tail call float @llvm.nvvm.sqrt.f(float %a)
+ %t1 = tail call float @llvm.sqrt.f32(float %a)
%t2 = fdiv float %t1, %b
ret float %t2
}
+; CHECK-LABEL: sqrt_div_ftz(
+; CHECK: sqrt.rn.ftz.f32
+; CHECK: div.rn.ftz.f32
+define float @sqrt_div_ftz(float %a, float %b) #1 {
+ %t1 = tail call float @llvm.sqrt.f32(float %a)
+ %t2 = fdiv float %t1, %b
+ ret float %t2
+}
+
+; CHECK-LABEL: sqrt_div_fast_ftz(
+; CHECK: sqrt.approx.ftz.f32
+; CHECK: div.approx.ftz.f32
+define float @sqrt_div_fast_ftz(float %a, float %b) #0 #1 {
+ %t1 = tail call float @llvm.sqrt.f32(float %a)
+ %t2 = fdiv float %t1, %b
+ ret float %t2
+}
+
+; There are no fast-math or ftz versions of sqrt and div for f64. We use
+; reciprocal(rsqrt(x)) for sqrt(x), and emit a vanilla divide.
+
+; CHECK-LABEL: sqrt_div_fast_ftz_f64(
+; CHECK: rsqrt.approx.f64
+; CHECK: rcp.approx.ftz.f64
+; CHECK: div.rn.f64
+define double @sqrt_div_fast_ftz_f64(double %a, double %b) #0 #1 {
+ %t1 = tail call double @llvm.sqrt.f64(double %a)
+ %t2 = fdiv double %t1, %b
+ ret double %t2
+}
+
+; CHECK-LABEL: rsqrt(
+; CHECK-NOT: rsqrt.approx
+; CHECK: sqrt.rn.f32
+; CHECK-NOT: rsqrt.approx
+define float @rsqrt(float %a) {
+ %b = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %b
+ ret float %ret
+}
+
+; CHECK-LABEL: rsqrt_fast(
+; CHECK-NOT: div.
+; CHECK-NOT: sqrt.
+; CHECK: rsqrt.approx.f32
+; CHECK-NOT: div.
+; CHECK-NOT: sqrt.
+define float @rsqrt_fast(float %a) #0 {
+ %b = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %b
+ ret float %ret
+}
+
+; CHECK-LABEL: rsqrt_fast_ftz(
+; CHECK-NOT: div.
+; CHECK-NOT: sqrt.
+; CHECK: rsqrt.approx.ftz.f32
+; CHECK-NOT: div.
+; CHECK-NOT: sqrt.
+define float @rsqrt_fast_ftz(float %a) #0 #1 {
+ %b = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %b
+ ret float %ret
+}
+
; CHECK-LABEL: fadd
; CHECK: add.rn.f32
define float @fadd(float %a, float %b) {
@@ -34,5 +100,66 @@ define float @fadd_ftz(float %a, float %b) #1 {
ret float %t1
}
+declare float @llvm.sin.f32(float)
+declare float @llvm.cos.f32(float)
+
+; CHECK-LABEL: fsin_approx
+; CHECK: sin.approx.f32
+define float @fsin_approx(float %a) #0 {
+ %r = tail call float @llvm.sin.f32(float %a)
+ ret float %r
+}
+
+; CHECK-LABEL: fcos_approx
+; CHECK: cos.approx.f32
+define float @fcos_approx(float %a) #0 {
+ %r = tail call float @llvm.cos.f32(float %a)
+ ret float %r
+}
+
+; CHECK-LABEL: repeated_div_recip_allowed
+define float @repeated_div_recip_allowed(i1 %pred, float %a, float %b, float %divisor) {
+; CHECK: rcp.rn.f32
+; CHECK: mul.rn.f32
+; CHECK: mul.rn.f32
+ %x = fdiv arcp float %a, %divisor
+ %y = fdiv arcp float %b, %divisor
+ %z = select i1 %pred, float %x, float %y
+ ret float %z
+}
+
+; CHECK-LABEL: repeated_div_recip_allowed_ftz
+define float @repeated_div_recip_allowed_ftz(i1 %pred, float %a, float %b, float %divisor) #1 {
+; CHECK: rcp.rn.ftz.f32
+; CHECK: mul.rn.ftz.f32
+; CHECK: mul.rn.ftz.f32
+ %x = fdiv arcp float %a, %divisor
+ %y = fdiv arcp float %b, %divisor
+ %z = select i1 %pred, float %x, float %y
+ ret float %z
+}
+
+; CHECK-LABEL: repeated_div_fast
+define float @repeated_div_fast(i1 %pred, float %a, float %b, float %divisor) #0 {
+; CHECK: rcp.approx.f32
+; CHECK: mul.f32
+; CHECK: mul.f32
+ %x = fdiv float %a, %divisor
+ %y = fdiv float %b, %divisor
+ %z = select i1 %pred, float %x, float %y
+ ret float %z
+}
+
+; CHECK-LABEL: repeated_div_fast_ftz
+define float @repeated_div_fast_ftz(i1 %pred, float %a, float %b, float %divisor) #0 #1 {
+; CHECK: rcp.approx.ftz.f32
+; CHECK: mul.ftz.f32
+; CHECK: mul.ftz.f32
+ %x = fdiv float %a, %divisor
+ %y = fdiv float %b, %divisor
+ %z = select i1 %pred, float %x, float %y
+ ret float %z
+}
+
attributes #0 = { "unsafe-fp-math" = "true" }
attributes #1 = { "nvptx-f32ftz" = "true" }
diff --git a/test/CodeGen/NVPTX/fcos-no-fast-math.ll b/test/CodeGen/NVPTX/fcos-no-fast-math.ll
new file mode 100644
index 000000000000..d435c1d14fee
--- /dev/null
+++ b/test/CodeGen/NVPTX/fcos-no-fast-math.ll
@@ -0,0 +1,14 @@
+; RUN: not llc < %s -march=nvptx -mcpu=sm_20 2>&1 | FileCheck %s
+
+; Check that we fail to select fcos without fast-math enabled
+
+declare float @llvm.cos.f32(float)
+
+; CHECK: LLVM ERROR: Cannot select: {{.*}}: f32 = fcos
+; CHECK: In function: test_fcos_safe
+define float @test_fcos_safe(float %a) #0 {
+ %r = tail call float @llvm.cos.f32(float %a)
+ ret float %r
+}
+
+attributes #0 = { "unsafe-fp-math" = "false" }
diff --git a/test/CodeGen/NVPTX/fsin-no-fast-math.ll b/test/CodeGen/NVPTX/fsin-no-fast-math.ll
new file mode 100644
index 000000000000..56396b849250
--- /dev/null
+++ b/test/CodeGen/NVPTX/fsin-no-fast-math.ll
@@ -0,0 +1,14 @@
+; RUN: not llc < %s -march=nvptx -mcpu=sm_20 2>&1 | FileCheck %s
+
+; Check that we fail to select fsin without fast-math enabled
+
+declare float @llvm.sin.f32(float)
+
+; CHECK: LLVM ERROR: Cannot select: {{.*}}: f32 = fsin
+; CHECK: In function: test_fsin_safe
+define float @test_fsin_safe(float %a) #0 {
+ %r = tail call float @llvm.sin.f32(float %a)
+ ret float %r
+}
+
+attributes #0 = { "unsafe-fp-math" = "false" }
diff --git a/test/CodeGen/NVPTX/global-variable-big.ll b/test/CodeGen/NVPTX/global-variable-big.ll
new file mode 100644
index 000000000000..0c769a856080
--- /dev/null
+++ b/test/CodeGen/NVPTX/global-variable-big.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; Check that we can handle global variables of large integer type.
+
+; (lsb) 0x0102'0304'0506...0F10 (msb)
+@gv = addrspace(1) externally_initialized global i128 21345817372864405881847059188222722561, align 16
+; CHECK: .visible .global .align 16 .b8 gv[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
diff --git a/test/CodeGen/NVPTX/half.ll b/test/CodeGen/NVPTX/half.ll
index b99524162e65..6b8d01e0ed1b 100644
--- a/test/CodeGen/NVPTX/half.ll
+++ b/test/CodeGen/NVPTX/half.ll
@@ -2,8 +2,8 @@
define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
; CHECK-LABEL: @test_load_store
-; CHECK: ld.global.u16 [[TMP:%rs[0-9]+]], [{{%r[0-9]+}}]
-; CHECK: st.global.u16 [{{%r[0-9]+}}], [[TMP]]
+; CHECK: ld.global.b16 [[TMP:%h[0-9]+]], [{{%r[0-9]+}}]
+; CHECK: st.global.b16 [{{%r[0-9]+}}], [[TMP]]
%val = load half, half addrspace(1)* %in
store half %val, half addrspace(1) * %out
ret void
@@ -11,8 +11,8 @@ define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) {
; CHECK-LABEL: @test_bitcast_from_half
-; CHECK: ld.global.u16 [[TMP:%rs[0-9]+]], [{{%r[0-9]+}}]
-; CHECK: st.global.u16 [{{%r[0-9]+}}], [[TMP]]
+; CHECK: ld.global.b16 [[TMP:%h[0-9]+]], [{{%r[0-9]+}}]
+; CHECK: st.global.b16 [{{%r[0-9]+}}], [[TMP]]
%val = load half, half addrspace(1) * %in
%val_int = bitcast half %val to i16
store i16 %val_int, i16 addrspace(1)* %out
diff --git a/test/CodeGen/NVPTX/idioms.ll b/test/CodeGen/NVPTX/idioms.ll
new file mode 100644
index 000000000000..047325c85165
--- /dev/null
+++ b/test/CodeGen/NVPTX/idioms.ll
@@ -0,0 +1,31 @@
+; Check that various LLVM idioms get lowered to NVPTX as expected.
+
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+; CHECK-LABEL: abs_i16(
+define i16 @abs_i16(i16 %a) {
+; CHECK: abs.s16
+ %neg = sub i16 0, %a
+ %abs.cond = icmp sge i16 %a, 0
+ %abs = select i1 %abs.cond, i16 %a, i16 %neg
+ ret i16 %abs
+}
+
+; CHECK-LABEL: abs_i32(
+define i32 @abs_i32(i32 %a) {
+; CHECK: abs.s32
+ %neg = sub i32 0, %a
+ %abs.cond = icmp sge i32 %a, 0
+ %abs = select i1 %abs.cond, i32 %a, i32 %neg
+ ret i32 %abs
+}
+
+; CHECK-LABEL: abs_i64(
+define i64 @abs_i64(i64 %a) {
+; CHECK: abs.s64
+ %neg = sub i64 0, %a
+ %abs.cond = icmp sge i64 %a, 0
+ %abs = select i1 %abs.cond, i64 %a, i64 %neg
+ ret i64 %abs
+}
diff --git a/test/CodeGen/NVPTX/intrinsics.ll b/test/CodeGen/NVPTX/intrinsics.ll
index 06a8712c2102..668de8a994bc 100644
--- a/test/CodeGen/NVPTX/intrinsics.ll
+++ b/test/CodeGen/NVPTX/intrinsics.ll
@@ -1,28 +1,105 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
-define ptx_device float @test_fabsf(float %f) {
-; CHECK: abs.f32 %f{{[0-9]+}}, %f{{[0-9]+}};
-; CHECK: ret;
- %x = call float @llvm.fabs.f32(float %f)
- ret float %x
+; CHECK-LABEL test_fabsf(
+define float @test_fabsf(float %f) {
+; CHECK: abs.f32
+ %x = call float @llvm.fabs.f32(float %f)
+ ret float %x
}
-define ptx_device double @test_fabs(double %d) {
-; CHECK: abs.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}};
-; CHECK: ret;
- %x = call double @llvm.fabs.f64(double %d)
- ret double %x
+; CHECK-LABEL: test_fabs(
+define double @test_fabs(double %d) {
+; CHECK: abs.f64
+ %x = call double @llvm.fabs.f64(double %d)
+ ret double %x
}
+; CHECK-LABEL: test_nvvm_sqrt(
define float @test_nvvm_sqrt(float %a) {
-; CHECK: sqrt.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}};
-; CHECK: ret;
+; CHECK: sqrt.rn.f32
%val = call float @llvm.nvvm.sqrt.f(float %a)
ret float %val
}
+; CHECK-LABEL: test_llvm_sqrt(
+define float @test_llvm_sqrt(float %a) {
+; CHECK: sqrt.rn.f32
+ %val = call float @llvm.sqrt.f32(float %a)
+ ret float %val
+}
+
+; CHECK-LABEL: test_bitreverse32(
+define i32 @test_bitreverse32(i32 %a) {
+; CHECK: brev.b32
+ %val = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %val
+}
+
+; CHECK-LABEL: test_bitreverse64(
+define i64 @test_bitreverse64(i64 %a) {
+; CHECK: brev.b64
+ %val = call i64 @llvm.bitreverse.i64(i64 %a)
+ ret i64 %val
+}
+
+; CHECK-LABEL: test_popc32(
+define i32 @test_popc32(i32 %a) {
+; CHECK: popc.b32
+ %val = call i32 @llvm.ctpop.i32(i32 %a)
+ ret i32 %val
+}
+
+; CHECK-LABEL: test_popc64
+define i64 @test_popc64(i64 %a) {
+; CHECK: popc.b64
+; CHECK: cvt.u64.u32
+ %val = call i64 @llvm.ctpop.i64(i64 %a)
+ ret i64 %val
+}
+
+; NVPTX popc.b64 returns an i32 even though @llvm.ctpop.i64 returns an i64, so
+; if this function returns an i32, there's no need to do any type conversions
+; in the ptx.
+; CHECK-LABEL: test_popc64_trunc
+define i32 @test_popc64_trunc(i64 %a) {
+; CHECK: popc.b64
+; CHECK-NOT: cvt.
+ %val = call i64 @llvm.ctpop.i64(i64 %a)
+ %trunc = trunc i64 %val to i32
+ ret i32 %trunc
+}
+
+; llvm.ctpop.i16 is implemenented by converting to i32, running popc.b32, and
+; then converting back to i16.
+; CHECK-LABEL: test_popc16
+define void @test_popc16(i16 %a, i16* %b) {
+; CHECK: cvt.u32.u16
+; CHECK: popc.b32
+; CHECK: cvt.u16.u32
+ %val = call i16 @llvm.ctpop.i16(i16 %a)
+ store i16 %val, i16* %b
+ ret void
+}
+
+; If we call llvm.ctpop.i16 and then zext the result to i32, we shouldn't need
+; to do any conversions after calling popc.b32, because that returns an i32.
+; CHECK-LABEL: test_popc16_to_32
+define i32 @test_popc16_to_32(i16 %a) {
+; CHECK: cvt.u32.u16
+; CHECK: popc.b32
+; CHECK-NOT: cvt.
+ %val = call i16 @llvm.ctpop.i16(i16 %a)
+ %zext = zext i16 %val to i32
+ ret i32 %zext
+}
declare float @llvm.fabs.f32(float)
declare double @llvm.fabs.f64(double)
declare float @llvm.nvvm.sqrt.f(float)
+declare float @llvm.sqrt.f32(float)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
+declare i16 @llvm.ctpop.i16(i16)
+declare i32 @llvm.ctpop.i32(i32)
+declare i64 @llvm.ctpop.i64(i64)
diff --git a/test/CodeGen/NVPTX/ldg-invariant.ll b/test/CodeGen/NVPTX/ldg-invariant.ll
index 40dad1f1769b..311bea6f4164 100644
--- a/test/CodeGen/NVPTX/ldg-invariant.ll
+++ b/test/CodeGen/NVPTX/ldg-invariant.ll
@@ -10,6 +10,30 @@ define i32 @ld_global(i32 addrspace(1)* %ptr) {
ret i32 %a
}
+; CHECK-LABEL: @ld_global_v2i32
+define i32 @ld_global_v2i32(<2 x i32> addrspace(1)* %ptr) {
+; CHECK: ld.global.nc.v2.{{[a-z]}}32
+ %a = load <2 x i32>, <2 x i32> addrspace(1)* %ptr, !invariant.load !0
+ %v1 = extractelement <2 x i32> %a, i32 0
+ %v2 = extractelement <2 x i32> %a, i32 1
+ %sum = add i32 %v1, %v2
+ ret i32 %sum
+}
+
+; CHECK-LABEL: @ld_global_v4i32
+define i32 @ld_global_v4i32(<4 x i32> addrspace(1)* %ptr) {
+; CHECK: ld.global.nc.v4.{{[a-z]}}32
+ %a = load <4 x i32>, <4 x i32> addrspace(1)* %ptr, !invariant.load !0
+ %v1 = extractelement <4 x i32> %a, i32 0
+ %v2 = extractelement <4 x i32> %a, i32 1
+ %v3 = extractelement <4 x i32> %a, i32 2
+ %v4 = extractelement <4 x i32> %a, i32 3
+ %sum1 = add i32 %v1, %v2
+ %sum2 = add i32 %v3, %v4
+ %sum3 = add i32 %sum1, %sum2
+ ret i32 %sum3
+}
+
; CHECK-LABEL: @ld_not_invariant
define i32 @ld_not_invariant(i32 addrspace(1)* %ptr) {
; CHECK: ld.global.{{[a-z]}}32
diff --git a/test/CodeGen/NVPTX/ldparam-v4.ll b/test/CodeGen/NVPTX/ldparam-v4.ll
index ec306aafe854..4d082f6e9a58 100644
--- a/test/CodeGen/NVPTX/ldparam-v4.ll
+++ b/test/CodeGen/NVPTX/ldparam-v4.ll
@@ -2,8 +2,11 @@
declare <4 x float> @bar()
+; CHECK-LABEL: .func foo(
define void @foo(<4 x float>* %ptr) {
-; CHECK: ld.param.v4.f32
+; CHECK: ld.param.u32 %[[PTR:r[0-9]+]], [foo_param_0];
+; CHECK: ld.param.v4.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]], [[E2:%f[0-9]+]], [[E3:%f[0-9]+]]}, [retval0+0];
+; CHECK: st.v4.f32 [%[[PTR]]], {[[E0]], [[E1]], [[E2]], [[E3]]}
%val = tail call <4 x float> @bar()
store <4 x float> %val, <4 x float>* %ptr
ret void
diff --git a/test/CodeGen/NVPTX/lower-aggr-copies.ll b/test/CodeGen/NVPTX/lower-aggr-copies.ll
index ef570982b808..192d4becb059 100644
--- a/test/CodeGen/NVPTX/lower-aggr-copies.ll
+++ b/test/CodeGen/NVPTX/lower-aggr-copies.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix PTX
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 | FileCheck %s --check-prefix PTX
; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s --check-prefix IR
; Verify that the NVPTXLowerAggrCopies pass works as expected - calls to
@@ -27,9 +27,9 @@ entry:
; PTX: LBB[[LABEL:[_0-9]+]]:
; PTX: ld.u8 %rs[[REG:[0-9]+]]
; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
-; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
-; PTX-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
-; PTX-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
+; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
+; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
+; PTX: @%p[[PRED]] bra LBB[[LABEL]]
}
define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
@@ -45,9 +45,9 @@ entry:
; PTX: LBB[[LABEL:[_0-9]+]]:
; PTX: ld.volatile.u8 %rs[[REG:[0-9]+]]
; PTX: st.volatile.u8 [%rd{{[0-9]+}}], %rs[[REG]]
-; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
-; PTX-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
-; PTX-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
+; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
+; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
+; PTX: @%p[[PRED]] bra LBB[[LABEL]]
}
define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 {
@@ -78,12 +78,26 @@ entry:
; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
; PTX-LABEL: .visible .func (.param .b64 func_retval0) memset_caller(
-; PTX: ld.param.u8 %rs[[REG:[0-9]+]]
+; PTX: ld.param.u32 %r[[C:[0-9]+]]
+; PTX: cvt.u16.u32 %rs[[REG:[0-9]+]], %r[[C]];
; PTX: LBB[[LABEL:[_0-9]+]]:
; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
-; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
-; PTX-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
-; PTX-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
+; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
+; PTX: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
+; PTX: @%p[[PRED]] bra LBB[[LABEL]]
+}
+
+define i8* @volatile_memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
+entry:
+ %0 = trunc i32 %c to i8
+ tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 true)
+ ret i8* %dst
+
+; IR-LABEL: @volatile_memset_caller
+; IR: [[VAL:%[0-9]+]] = trunc i32 %c to i8
+; IR: loadstoreloop:
+; IR: [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
+; IR-NEXT: store volatile i8 [[VAL]], i8* [[STOREPTR]]
}
define i8* @memmove_caller(i8* %dst, i8* %src, i64 %n) #0 {
@@ -100,12 +114,12 @@ entry:
; PTX-LABEL: .visible .func (.param .b64 func_retval0) memmove_caller(
; PTX: ld.param.u64 %rd[[N:[0-9]+]]
-; PTX: setp.eq.s64 %p[[NEQ0:[0-9]+]], %rd[[N]], 0
-; PTX: setp.ge.u64 %p[[SRC_GT_THAN_DST:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; PTX-DAG: setp.eq.s64 %p[[NEQ0:[0-9]+]], %rd[[N]], 0
+; PTX-DAG: setp.ge.u64 %p[[SRC_GT_THAN_DST:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
; PTX-NEXT: @%p[[SRC_GT_THAN_DST]] bra LBB[[FORWARD_BB:[0-9_]+]]
; -- this is the backwards copying BB
; PTX: @%p[[NEQ0]] bra LBB[[EXIT:[0-9_]+]]
-; PTX: add.s64 %rd[[N]], %rd[[N]], -1
+; PTX: add.s64 %rd{{[0-9]}}, %rd{{[0-9]}}, -1
; PTX: ld.u8 %rs[[ELEMENT:[0-9]+]]
; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT]]
; -- this is the forwards copying BB
@@ -113,7 +127,7 @@ entry:
; PTX: @%p[[NEQ0]] bra LBB[[EXIT]]
; PTX: ld.u8 %rs[[ELEMENT2:[0-9]+]]
; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT2]]
-; PTX: add.s64 %rd[[INDEX:[0-9]+]], %rd[[INDEX]], 1
+; PTX: add.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, 1
; -- exit block
; PTX: LBB[[EXIT]]:
; PTX-NEXT: st.param.b64 [func_retval0
diff --git a/test/CodeGen/NVPTX/lower-alloca.ll b/test/CodeGen/NVPTX/lower-alloca.ll
index 4177cd1fe977..3db225ef0e75 100644
--- a/test/CodeGen/NVPTX/lower-alloca.ll
+++ b/test/CodeGen/NVPTX/lower-alloca.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -S -nvptx-lower-alloca -nvptx-infer-addrspace | FileCheck %s
+; RUN: opt < %s -S -nvptx-lower-alloca -infer-address-spaces | FileCheck %s
; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix PTX
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
diff --git a/test/CodeGen/NVPTX/math-intrins.ll b/test/CodeGen/NVPTX/math-intrins.ll
index de911d050755..828a8807dcfa 100644
--- a/test/CodeGen/NVPTX/math-intrins.ll
+++ b/test/CodeGen/NVPTX/math-intrins.ll
@@ -21,6 +21,8 @@ declare float @llvm.minnum.f32(float, float) #0
declare double @llvm.minnum.f64(double, double) #0
declare float @llvm.maxnum.f32(float, float) #0
declare double @llvm.maxnum.f64(double, double) #0
+declare float @llvm.fma.f32(float, float, float) #0
+declare double @llvm.fma.f64(double, double, double) #0
; ---- ceil ----
@@ -257,5 +259,28 @@ define double @max_double(double %a, double %b) {
ret double %x
}
+; ---- fma ----
+
+; CHECK-LABEL: @fma_float
+define float @fma_float(float %a, float %b, float %c) {
+ ; CHECK: fma.rn.f32
+ %x = call float @llvm.fma.f32(float %a, float %b, float %c)
+ ret float %x
+}
+
+; CHECK-LABEL: @fma_float_ftz
+define float @fma_float_ftz(float %a, float %b, float %c) #1 {
+ ; CHECK: fma.rn.ftz.f32
+ %x = call float @llvm.fma.f32(float %a, float %b, float %c)
+ ret float %x
+}
+
+; CHECK-LABEL: @fma_double
+define double @fma_double(double %a, double %b, double %c) {
+ ; CHECK: fma.rn.f64
+ %x = call double @llvm.fma.f64(double %a, double %b, double %c)
+ ret double %x
+}
+
attributes #0 = { nounwind readnone }
attributes #1 = { "nvptx-f32ftz" = "true" }
diff --git a/test/CodeGen/NVPTX/misaligned-vector-ldst.ll b/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
index 2ad72b018851..036d9638ceac 100644
--- a/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
+++ b/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
@@ -41,6 +41,64 @@ define <4 x float> @t4(i8* %p1) {
ret <4 x float> %r
}
+; CHECK-LABEL: .visible .func test_v1halfp0a1(
+; CHECK-DAG: ld.param.u64 %[[FROM:rd?[0-9]+]], [test_v1halfp0a1_param_0];
+; CHECK-DAG: ld.param.u64 %[[TO:rd?[0-9]+]], [test_v1halfp0a1_param_1];
+; CHECK-DAG: ld.u8 [[B0:%r[sd]?[0-9]+]], [%[[FROM]]]
+; CHECK-DAG: st.u8 [%[[TO]]], [[B0]]
+; CHECK-DAG: ld.u8 [[B1:%r[sd]?[0-9]+]], [%[[FROM]]+1]
+; CHECK-DAG: st.u8 [%[[TO]]+1], [[B1]]
+; CHECK: ret
+define void @test_v1halfp0a1(<1 x half> * noalias readonly %from, <1 x half> * %to) {
+ %1 = load <1 x half>, <1 x half> * %from , align 1
+ store <1 x half> %1, <1 x half> * %to , align 1
+ ret void
+}
+
+; CHECK-LABEL: .visible .func test_v2halfp0a1(
+; CHECK-DAG: ld.param.u64 %[[FROM:rd?[0-9]+]], [test_v2halfp0a1_param_0];
+; CHECK-DAG: ld.param.u64 %[[TO:rd?[0-9]+]], [test_v2halfp0a1_param_1];
+; CHECK-DAG: ld.u8 [[B0:%r[sd]?[0-9]+]], [%[[FROM]]]
+; CHECK-DAG: st.u8 [%[[TO]]],
+; CHECK-DAG: ld.u8 [[B1:%r[sd]?[0-9]+]], [%[[FROM]]+1]
+; CHECK-DAG: st.u8 [%[[TO]]+1],
+; CHECK-DAG: ld.u8 [[B2:%r[sd]?[0-9]+]], [%[[FROM]]+2]
+; CHECK-DAG: st.u8 [%[[TO]]+2],
+; CHECK-DAG: ld.u8 [[B3:%r[sd]?[0-9]+]], [%[[FROM]]+3]
+; CHECK-DAG: st.u8 [%[[TO]]+3],
+; CHECK: ret
+define void @test_v2halfp0a1(<2 x half> * noalias readonly %from, <2 x half> * %to) {
+ %1 = load <2 x half>, <2 x half> * %from , align 1
+ store <2 x half> %1, <2 x half> * %to , align 1
+ ret void
+}
+
+; CHECK-LABEL: .visible .func test_v4halfp0a1(
+; CHECK-DAG: ld.param.u64 %[[FROM:rd?[0-9]+]], [test_v4halfp0a1_param_0];
+; CHECK-DAG: ld.param.u64 %[[TO:rd?[0-9]+]], [test_v4halfp0a1_param_1];
+; CHECK-DAG: ld.u8 [[B0:%r[sd]?[0-9]+]], [%[[FROM]]]
+; CHECK-DAG: st.u8 [%[[TO]]], [[B0]]
+; CHECK-DAG: ld.u8 [[B1:%r[sd]?[0-9]+]], [%[[FROM]]+1]
+; CHECK-DAG: st.u8 [%[[TO]]+1], [[B1]]
+; CHECK-DAG: ld.u8 [[B2:%r[sd]?[0-9]+]], [%[[FROM]]+2]
+; CHECK-DAG: st.u8 [%[[TO]]+2], [[B2]]
+; CHECK-DAG: ld.u8 [[B3:%r[sd]?[0-9]+]], [%[[FROM]]+3]
+; CHECK-DAG: st.u8 [%[[TO]]+3], [[B3]]
+; CHECK-DAG: ld.u8 [[B4:%r[sd]?[0-9]+]], [%[[FROM]]+4]
+; CHECK-DAG: st.u8 [%[[TO]]+4], [[B4]]
+; CHECK-DAG: ld.u8 [[B5:%r[sd]?[0-9]+]], [%[[FROM]]+5]
+; CHECK-DAG: st.u8 [%[[TO]]+5], [[B5]]
+; CHECK-DAG: ld.u8 [[B6:%r[sd]?[0-9]+]], [%[[FROM]]+6]
+; CHECK-DAG: st.u8 [%[[TO]]+6], [[B6]]
+; CHECK-DAG: ld.u8 [[B7:%r[sd]?[0-9]+]], [%[[FROM]]+7]
+; CHECK-DAG: st.u8 [%[[TO]]+7], [[B7]]
+; CHECK: ret
+define void @test_v4halfp0a1(<4 x half> * noalias readonly %from, <4 x half> * %to) {
+ %1 = load <4 x half>, <4 x half> * %from , align 1
+ store <4 x half> %1, <4 x half> * %to , align 1
+ ret void
+}
+
; CHECK-LABEL: s1
define void @s1(<4 x float>* %p1, <4 x float> %v) {
diff --git a/test/CodeGen/NVPTX/named-barriers.ll b/test/CodeGen/NVPTX/named-barriers.ll
new file mode 100644
index 000000000000..accc0fd6fef7
--- /dev/null
+++ b/test/CodeGen/NVPTX/named-barriers.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+; Use bar.sync to arrive at a pre-computed barrier number and
+; wait for all threads in CTA to also arrive:
+define ptx_device void @test_barrier_named_cta() {
+; CHECK: mov.u32 %r[[REG0:[0-9]+]], 0;
+; CHECK: bar.sync %r[[REG0]];
+; CHECK: mov.u32 %r[[REG1:[0-9]+]], 10;
+; CHECK: bar.sync %r[[REG1]];
+; CHECK: mov.u32 %r[[REG2:[0-9]+]], 15;
+; CHECK: bar.sync %r[[REG2]];
+; CHECK: ret;
+ call void @llvm.nvvm.barrier.n(i32 0)
+ call void @llvm.nvvm.barrier.n(i32 10)
+ call void @llvm.nvvm.barrier.n(i32 15)
+ ret void
+}
+
+; Use bar.sync to arrive at a pre-computed barrier number and
+; wait for fixed number of cooperating threads to arrive:
+define ptx_device void @test_barrier_named() {
+; CHECK: mov.u32 %r[[REG0A:[0-9]+]], 32;
+; CHECK: mov.u32 %r[[REG0B:[0-9]+]], 0;
+; CHECK: bar.sync %r[[REG0B]], %r[[REG0A]];
+; CHECK: mov.u32 %r[[REG1A:[0-9]+]], 352;
+; CHECK: mov.u32 %r[[REG1B:[0-9]+]], 10;
+; CHECK: bar.sync %r[[REG1B]], %r[[REG1A]];
+; CHECK: mov.u32 %r[[REG2A:[0-9]+]], 992;
+; CHECK: mov.u32 %r[[REG2B:[0-9]+]], 15;
+; CHECK: bar.sync %r[[REG2B]], %r[[REG2A]];
+; CHECK: ret;
+ call void @llvm.nvvm.barrier(i32 0, i32 32)
+ call void @llvm.nvvm.barrier(i32 10, i32 352)
+ call void @llvm.nvvm.barrier(i32 15, i32 992)
+ ret void
+}
+
+declare void @llvm.nvvm.barrier(i32, i32)
+declare void @llvm.nvvm.barrier.n(i32)
diff --git a/test/CodeGen/NVPTX/nvvm-reflect.ll b/test/CodeGen/NVPTX/nvvm-reflect.ll
index 8c75dfc30a56..165597d6baff 100644
--- a/test/CodeGen/NVPTX/nvvm-reflect.ll
+++ b/test/CodeGen/NVPTX/nvvm-reflect.ll
@@ -1,30 +1,38 @@
-; RUN: opt < %s -S -nvvm-reflect -nvvm-reflect-list USE_MUL=0 -O2 | FileCheck %s --check-prefix=USE_MUL_0
-; RUN: opt < %s -S -nvvm-reflect -nvvm-reflect-list USE_MUL=1 -O2 | FileCheck %s --check-prefix=USE_MUL_1
+; We run nvvm-reflect (and then optimize) this module twice, once with metadata
+; that enables FTZ, and again with metadata that disables it.
-@str = private unnamed_addr addrspace(4) constant [8 x i8] c"USE_MUL\00"
+; RUN: cat %s > %t.noftz
+; RUN: echo '!0 = !{i32 4, !"nvvm-reflect-ftz", i32 0}' >> %t.noftz
+; RUN: opt %t.noftz -S -nvvm-reflect -O2 \
+; RUN: | FileCheck %s --check-prefix=USE_FTZ_0 --check-prefix=CHECK
+
+; RUN: cat %s > %t.ftz
+; RUN: echo '!0 = !{i32 4, !"nvvm-reflect-ftz", i32 1}' >> %t.ftz
+; RUN: opt %t.ftz -S -nvvm-reflect -O2 \
+; RUN: | FileCheck %s --check-prefix=USE_FTZ_1 --check-prefix=CHECK
+
+@str = private unnamed_addr addrspace(4) constant [11 x i8] c"__CUDA_FTZ\00"
declare i32 @__nvvm_reflect(i8*)
declare i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)*)
+; CHECK-LABEL: @foo
define float @foo(float %a, float %b) {
-; USE_MUL_0: define float @foo
-; USE_MUL_0-NOT: call i32 @__nvvm_reflect
-; USE_MUL_1: define float @foo
-; USE_MUL_1-NOT: call i32 @__nvvm_reflect
- %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)* getelementptr inbounds ([8 x i8], [8 x i8] addrspace(4)* @str, i32 0, i32 0))
+; CHECK-NOT: call i32 @__nvvm_reflect
+ %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)* getelementptr inbounds ([11 x i8], [11 x i8] addrspace(4)* @str, i32 0, i32 0))
%reflect = tail call i32 @__nvvm_reflect(i8* %ptr)
%cmp = icmp ugt i32 %reflect, 0
br i1 %cmp, label %use_mul, label %use_add
use_mul:
-; USE_MUL_1: fmul float %a, %b
-; USE_MUL_0-NOT: fadd float %a, %b
+; USE_FTZ_1: fmul float %a, %b
+; USE_FTZ_0-NOT: fadd float %a, %b
%ret1 = fmul float %a, %b
br label %exit
use_add:
-; USE_MUL_0: fadd float %a, %b
-; USE_MUL_1-NOT: fmul float %a, %b
+; USE_FTZ_0: fadd float %a, %b
+; USE_FTZ_1-NOT: fmul float %a, %b
%ret2 = fadd float %a, %b
br label %exit
@@ -35,14 +43,12 @@ exit:
declare i32 @llvm.nvvm.reflect.p0i8(i8*)
-; USE_MUL_0: define i32 @intrinsic
-; USE_MUL_1: define i32 @intrinsic
+; CHECK-LABEL: define i32 @intrinsic
define i32 @intrinsic() {
-; USE_MUL_0-NOT: call i32 @llvm.nvvm.reflect
-; USE_MUL_0: ret i32 0
-; USE_MUL_1-NOT: call i32 @llvm.nvvm.reflect
-; USE_MUL_1: ret i32 1
- %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)* getelementptr inbounds ([8 x i8], [8 x i8] addrspace(4)* @str, i32 0, i32 0))
+; CHECK-NOT: call i32 @llvm.nvvm.reflect
+; USE_FTZ_0: ret i32 0
+; USE_FTZ_1: ret i32 1
+ %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)* getelementptr inbounds ([11 x i8], [11 x i8] addrspace(4)* @str, i32 0, i32 0))
%reflect = tail call i32 @llvm.nvvm.reflect.p0i8(i8* %ptr)
ret i32 %reflect
}
@@ -50,26 +56,24 @@ define i32 @intrinsic() {
; CUDA-7.0 passes __nvvm_reflect argument slightly differently.
; Verify that it works, too
-@"$str" = private addrspace(1) constant [8 x i8] c"USE_MUL\00"
+@"$str" = private addrspace(1) constant [11 x i8] c"__CUDA_FTZ\00"
+; CHECK-LABEL: @bar
define float @bar(float %a, float %b) {
-; USE_MUL_0: define float @bar
-; USE_MUL_0-NOT: call i32 @__nvvm_reflect
-; USE_MUL_1: define float @bar
-; USE_MUL_1-NOT: call i32 @__nvvm_reflect
- %reflect = call i32 @__nvvm_reflect(i8* addrspacecast (i8 addrspace(1)* getelementptr inbounds ([8 x i8], [8 x i8] addrspace(1)* @"$str", i32 0, i32 0) to i8*))
+; CHECK-NOT: call i32 @__nvvm_reflect
+ %reflect = call i32 @__nvvm_reflect(i8* addrspacecast (i8 addrspace(1)* getelementptr inbounds ([11 x i8], [11 x i8] addrspace(1)* @"$str", i32 0, i32 0) to i8*))
%cmp = icmp ne i32 %reflect, 0
br i1 %cmp, label %use_mul, label %use_add
use_mul:
-; USE_MUL_1: fmul float %a, %b
-; USE_MUL_0-NOT: fadd float %a, %b
+; USE_FTZ_1: fmul float %a, %b
+; USE_FTZ_0-NOT: fadd float %a, %b
%ret1 = fmul float %a, %b
br label %exit
use_add:
-; USE_MUL_0: fadd float %a, %b
-; USE_MUL_1-NOT: fmul float %a, %b
+; USE_FTZ_0: fadd float %a, %b
+; USE_FTZ_1-NOT: fmul float %a, %b
%ret2 = fadd float %a, %b
br label %exit
@@ -77,3 +81,6 @@ exit:
%ret = phi float [%ret1, %use_mul], [%ret2, %use_add]
ret float %ret
}
+
+!llvm.module.flags = !{!0}
+; A module flag is added to the end of this file by the RUN lines at the top.
diff --git a/test/CodeGen/NVPTX/param-load-store.ll b/test/CodeGen/NVPTX/param-load-store.ll
new file mode 100644
index 000000000000..8a67567acc96
--- /dev/null
+++ b/test/CodeGen/NVPTX/param-load-store.ll
@@ -0,0 +1,939 @@
+; Verifies correctness of load/store of parameters and return values.
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 | FileCheck %s
+
+%s_i1 = type { i1 }
+%s_i8 = type { i8 }
+%s_i16 = type { i16 }
+%s_f16 = type { half }
+%s_i32 = type { i32 }
+%s_f32 = type { float }
+%s_i64 = type { i64 }
+%s_f64 = type { double }
+
+; More complicated types. i64 is used to increase natural alignment
+; requirement for the type.
+%s_i32x4 = type { i32, i32, i32, i32, i64}
+%s_i32f32 = type { i32, float, i32, float, i64}
+%s_i8i32x4 = type { i32, i32, i8, i32, i32, i64}
+%s_i8i32x4p = type <{ i32, i32, i8, i32, i32, i64}>
+%s_crossfield = type { i32, [2 x i32], <4 x i32>, [3 x {i32, i32, i32}]}
+; All scalar parameters must be at least 32 bits in size.
+; i1 is loaded/stored as i8.
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i1(
+; CHECK-NEXT: .param .b32 test_i1_param_0
+; CHECK: ld.param.u8 [[A8:%r[0-9]+]], [test_i1_param_0];
+; CHECK: and.b32 [[A:%r[0-9]+]], [[A8]], 1;
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]]
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni
+; CHECK-NEXT: test_i1,
+; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R:%r[0-9]+]], [[R8]], 1;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK: ret;
+define i1 @test_i1(i1 %a) {
+ %r = tail call i1 @test_i1(i1 %a);
+ ret i1 %r;
+}
+
+; Signed i1 is a somewhat special case. We only care about one bit and
+; then us neg.s32 to convert it to 32-bit -1 if it's set.
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i1s(
+; CHECK-NEXT: .param .b32 test_i1s_param_0
+; CHECK: ld.param.u8 [[A8:%rs[0-9]+]], [test_i1s_param_0];
+; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]];
+; CHECK: and.b32 [[A1:%r[0-9]+]], [[A32]], 1;
+; CHECK: neg.s32 [[A:%r[0-9]+]], [[A1]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni
+; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R1:%r[0-9]+]], [[R8]], 1;
+; CHECK: neg.s32 [[R:%r[0-9]+]], [[R1]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define signext i1 @test_i1s(i1 signext %a) {
+ %r = tail call signext i1 @test_i1s(i1 signext %a);
+ ret i1 %r;
+}
+
+; Make sure that i1 loads are vectorized as i8 loads, respecting each element alignment.
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v3i1(
+; CHECK-NEXT: .param .align 4 .b8 test_v3i1_param_0[4]
+; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i1_param_0+2];
+; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i1_param_0]
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK-DAG: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG: st.param.b8 [param0+2], [[E2]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i1,
+; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2];
+; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]}
+; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i1> @test_v3i1(<3 x i1> %a) {
+ %r = tail call <3 x i1> @test_v3i1(<3 x i1> %a);
+ ret <3 x i1> %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v4i1(
+; CHECK-NEXT: .param .align 4 .b8 test_v4i1_param_0[4]
+; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i1_param_0]
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK: test_v4i1,
+; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]};
+; CHECK-NEXT: ret;
+define <4 x i1> @test_v4i1(<4 x i1> %a) {
+ %r = tail call <4 x i1> @test_v4i1(<4 x i1> %a);
+ ret <4 x i1> %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v5i1(
+; CHECK-NEXT: .param .align 8 .b8 test_v5i1_param_0[8]
+; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i1_param_0+4];
+; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i1_param_0]
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b8 [param0+4], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i1,
+; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i1> @test_v5i1(<5 x i1> %a) {
+ %r = tail call <5 x i1> @test_v5i1(<5 x i1> %a);
+ ret <5 x i1> %r;
+}
+
+; Unsigned i8 is loaded directly into 32-bit register.
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i8(
+; CHECK-NEXT: .param .b32 test_i8_param_0
+; CHECK: ld.param.u8 [[A8:%rs[0-9]+]], [test_i8_param_0];
+; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]];
+; CHECK: and.b32 [[A:%r[0-9]+]], [[A32]], 255;
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK: test_i8,
+; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R:%r[0-9]+]], [[R32]], 255;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i8 @test_i8(i8 %a) {
+ %r = tail call i8 @test_i8(i8 %a);
+ ret i8 %r;
+}
+
+; signed i8 is loaded into 16-bit register which is then sign-extended to i32.
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i8s(
+; CHECK-NEXT: .param .b32 test_i8s_param_0
+; CHECK: ld.param.s8 [[A8:%rs[0-9]+]], [test_i8s_param_0];
+; CHECK: cvt.s32.s16 [[A:%r[0-9]+]], [[A8]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[A]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK: test_i8s,
+; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0];
+; -- This is suspicious (though correct) -- why not cvt.u8.u32, cvt.s8.s32 ?
+; CHECK: cvt.u16.u32 [[R16:%rs[0-9]+]], [[R32]];
+; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[R16]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define signext i8 @test_i8s(i8 signext %a) {
+ %r = tail call signext i8 @test_i8s(i8 signext %a);
+ ret i8 %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v3i8(
+; CHECK-NEXT: .param .align 4 .b8 test_v3i8_param_0[4]
+; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i8_param_0+2];
+; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i8_param_0];
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b8 [param0+2], [[E2]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i8,
+; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2];
+; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i8> @test_v3i8(<3 x i8> %a) {
+ %r = tail call <3 x i8> @test_v3i8(<3 x i8> %a);
+ ret <3 x i8> %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v4i8(
+; CHECK-NEXT: .param .align 4 .b8 test_v4i8_param_0[4]
+; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i8_param_0]
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i8,
+; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-NEXT: ret;
+define <4 x i8> @test_v4i8(<4 x i8> %a) {
+ %r = tail call <4 x i8> @test_v4i8(<4 x i8> %a);
+ ret <4 x i8> %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v5i8(
+; CHECK-NEXT: .param .align 8 .b8 test_v5i8_param_0[8]
+; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i8_param_0+4];
+; CHECK-DAG ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i8_param_0]
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b8 [param0+4], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i8,
+; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i8> @test_v5i8(<5 x i8> %a) {
+ %r = tail call <5 x i8> @test_v5i8(<5 x i8> %a);
+ ret <5 x i8> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i16(
+; CHECK-NEXT: .param .b32 test_i16_param_0
+; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16_param_0];
+; CHECK: cvt.u32.u16 [[E32:%r[0-9]+]], [[E16]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[E32]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i16,
+; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0];
+; CHECK: and.b32 [[R:%r[0-9]+]], [[RE32]], 65535;
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i16 @test_i16(i16 %a) {
+ %r = tail call i16 @test_i16(i16 %a);
+ ret i16 %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i16s(
+; CHECK-NEXT: .param .b32 test_i16s_param_0
+; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16s_param_0];
+; CHECK: cvt.s32.s16 [[E32:%r[0-9]+]], [[E16]];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[E32]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i16s,
+; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0];
+; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[RE32]];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define signext i16 @test_i16s(i16 signext %a) {
+ %r = tail call signext i16 @test_i16s(i16 signext %a);
+ ret i16 %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v3i16(
+; CHECK-NEXT: .param .align 8 .b8 test_v3i16_param_0[8]
+; CHECK-DAG: ld.param.u16 [[E2:%rs[0-9]+]], [test_v3i16_param_0+4];
+; CHECK-DAG: ld.param.v2.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i16_param_0];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b16 [param0+4], [[E2]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i16,
+; CHECK: ld.param.v2.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b16 [[RE2:%rs[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b16 [func_retval0+4], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i16> @test_v3i16(<3 x i16> %a) {
+ %r = tail call <3 x i16> @test_v3i16(<3 x i16> %a);
+ ret <3 x i16> %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v4i16(
+; CHECK-NEXT: .param .align 8 .b8 test_v4i16_param_0[8]
+; CHECK: ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i16_param_0]
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i16,
+; CHECK: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-NEXT: ret;
+define <4 x i16> @test_v4i16(<4 x i16> %a) {
+ %r = tail call <4 x i16> @test_v4i16(<4 x i16> %a);
+ ret <4 x i16> %r;
+}
+
+; CHECK: .func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v5i16(
+; CHECK-NEXT: .param .align 16 .b8 test_v5i16_param_0[16]
+; CHECK-DAG: ld.param.u16 [[E4:%rs[0-9]+]], [test_v5i16_param_0+8];
+; CHECK-DAG ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i16_param_0]
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i16,
+; CHECK-DAG: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b16 [[RE4:%rs[0-9]+]], [retval0+8];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b16 [func_retval0+8], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i16> @test_v5i16(<5 x i16> %a) {
+ %r = tail call <5 x i16> @test_v5i16(<5 x i16> %a);
+ ret <5 x i16> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_f16(
+; CHECK-NEXT: .param .b32 test_f16_param_0
+; CHECK: ld.param.b16 [[E:%h[0-9]+]], [test_f16_param_0];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b16 [param0+0], [[E]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_f16,
+; CHECK: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK: st.param.b16 [func_retval0+0], [[R]]
+; CHECK-NEXT: ret;
+define half @test_f16(half %a) {
+ %r = tail call half @test_f16(half %a);
+ ret half %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_v2f16(
+; CHECK-NEXT: .param .align 4 .b8 test_v2f16_param_0[4]
+; CHECK: ld.param.b32 [[E:%hh[0-9]+]], [test_v2f16_param_0];
+; CHECK: .param .align 4 .b8 param0[4];
+; CHECK: st.param.b32 [param0+0], [[E]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v2f16,
+; CHECK: ld.param.b32 [[R:%hh[0-9]+]], [retval0+0];
+; CHECK: st.param.b32 [func_retval0+0], [[R]]
+; CHECK-NEXT: ret;
+define <2 x half> @test_v2f16(<2 x half> %a) {
+ %r = tail call <2 x half> @test_v2f16(<2 x half> %a);
+ ret <2 x half> %r;
+}
+
+; CHECK:.func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v3f16(
+; CHECK: .param .align 8 .b8 test_v3f16_param_0[8]
+; CHECK-DAG: ld.param.b32 [[HH01:%hh[0-9]+]], [test_v3f16_param_0];
+; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[HH01]];
+; CHECK-DAG: ld.param.b16 [[E2:%h[0-9]+]], [test_v3f16_param_0+4];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK-DAG: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG: st.param.b16 [param0+4], [[E2]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK: test_v3f16,
+; CHECK-DAG: ld.param.v2.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b16 [[R2:%h[0-9]+]], [retval0+4];
+; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-DAG: st.param.b16 [func_retval0+4], [[R2]];
+; CHECK: ret;
+define <3 x half> @test_v3f16(<3 x half> %a) {
+ %r = tail call <3 x half> @test_v3f16(<3 x half> %a);
+ ret <3 x half> %r;
+}
+
+; CHECK:.func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_v4f16(
+; CHECK: .param .align 8 .b8 test_v4f16_param_0[8]
+; CHECK: ld.param.v2.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]]}, [test_v4f16_param_0];
+; CHECK-DAG: mov.b32 [[HH01:%hh[0-9]+]], [[R01]];
+; CHECK-DAG: mov.b32 [[HH23:%hh[0-9]+]], [[R23]];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.v2.b32 [param0+0], {[[HH01]], [[HH23]]};
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK: test_v4f16,
+; CHECK: ld.param.v2.b32 {[[RH01:%hh[0-9]+]], [[RH23:%hh[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[RH01]], [[RH23]]};
+; CHECK: ret;
+define <4 x half> @test_v4f16(<4 x half> %a) {
+ %r = tail call <4 x half> @test_v4f16(<4 x half> %a);
+ ret <4 x half> %r;
+}
+
+; CHECK:.func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v5f16(
+; CHECK: .param .align 16 .b8 test_v5f16_param_0[16]
+; CHECK-DAG: ld.param.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [test_v5f16_param_0];
+; CHECK-DAG: mov.b32 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]]}, [[HH01]];
+; CHECK-DAG: ld.param.b16 [[E4:%h[0-9]+]], [test_v5f16_param_0+8];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v4.b16 [param0+0],
+; CHECK-DAG: st.param.b16 [param0+8], [[E4]];
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK: test_v5f16,
+; CHECK-DAG: ld.param.v4.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]], [[R2:%h[0-9]+]], [[R3:%h[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b16 [[R4:%h[0-9]+]], [retval0+8];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]};
+; CHECK-DAG: st.param.b16 [func_retval0+8], [[R4]];
+; CHECK: ret;
+define <5 x half> @test_v5f16(<5 x half> %a) {
+ %r = tail call <5 x half> @test_v5f16(<5 x half> %a);
+ ret <5 x half> %r;
+}
+
+; CHECK:.func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v8f16(
+; CHECK: .param .align 16 .b8 test_v8f16_param_0[16]
+; CHECK: ld.param.v4.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]], [[R45:%r[0-9]+]], [[R67:%r[0-9]+]]}, [test_v8f16_param_0];
+; CHECK-DAG: mov.b32 [[HH01:%hh[0-9]+]], [[R01]];
+; CHECK-DAG: mov.b32 [[HH23:%hh[0-9]+]], [[R23]];
+; CHECK-DAG: mov.b32 [[HH45:%hh[0-9]+]], [[R45]];
+; CHECK-DAG: mov.b32 [[HH67:%hh[0-9]+]], [[R67]];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK: st.param.v4.b32 [param0+0], {[[HH01]], [[HH23]], [[HH45]], [[HH67]]};
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK: test_v8f16,
+; CHECK: ld.param.v4.b32 {[[RH01:%hh[0-9]+]], [[RH23:%hh[0-9]+]], [[RH45:%hh[0-9]+]], [[RH67:%hh[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b32 [func_retval0+0], {[[RH01]], [[RH23]], [[RH45]], [[RH67]]};
+; CHECK: ret;
+define <8 x half> @test_v8f16(<8 x half> %a) {
+ %r = tail call <8 x half> @test_v8f16(<8 x half> %a);
+ ret <8 x half> %r;
+}
+
+; CHECK:.func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v9f16(
+; CHECK: .param .align 32 .b8 test_v9f16_param_0[32]
+; CHECK-DAG: ld.param.v4.b16 {[[E0:%h[0-9]+]], [[E1:%h[0-9]+]], [[E2:%h[0-9]+]], [[E3:%h[0-9]+]]}, [test_v9f16_param_0];
+; CHECK-DAG: ld.param.v4.b16 {[[E4:%h[0-9]+]], [[E5:%h[0-9]+]], [[E6:%h[0-9]+]], [[E7:%h[0-9]+]]}, [test_v9f16_param_0+8];
+; CHECK-DAG: ld.param.b16 [[E8:%h[0-9]+]], [test_v9f16_param_0+16];
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK-DAG: st.param.v4.b16 [param0+0],
+; CHECK-DAG: st.param.v4.b16 [param0+8],
+; CHECK-DAG: st.param.b16 [param0+16], [[E8]];
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK: test_v9f16,
+; CHECK-DAG: ld.param.v4.b16 {[[R0:%h[0-9]+]], [[R1:%h[0-9]+]], [[R2:%h[0-9]+]], [[R3:%h[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.v4.b16 {[[R4:%h[0-9]+]], [[R5:%h[0-9]+]], [[R6:%h[0-9]+]], [[R7:%h[0-9]+]]}, [retval0+8];
+; CHECK-DAG: ld.param.b16 [[R8:%h[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]};
+; CHECK-DAG: st.param.v4.b16 [func_retval0+8], {[[R4]], [[R5]], [[R6]], [[R7]]};
+; CHECK-DAG: st.param.b16 [func_retval0+16], [[R8]];
+; CHECK: ret;
+define <9 x half> @test_v9f16(<9 x half> %a) {
+ %r = tail call <9 x half> @test_v9f16(<9 x half> %a);
+ ret <9 x half> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_i32(
+; CHECK-NEXT: .param .b32 test_i32_param_0
+; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_i32_param_0];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.b32 [param0+0], [[E]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i32,
+; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i32 @test_i32(i32 %a) {
+ %r = tail call i32 @test_i32(i32 %a);
+ ret i32 %r;
+}
+
+; CHECK: .func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v3i32(
+; CHECK-NEXT: .param .align 16 .b8 test_v3i32_param_0[16]
+; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_v3i32_param_0+8];
+; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v3i32_param_0];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b32 [param0+8], [[E2]];
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i32,
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
+; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i32> @test_v3i32(<3 x i32> %a) {
+ %r = tail call <3 x i32> @test_v3i32(<3 x i32> %a);
+ ret <3 x i32> %r;
+}
+
+; CHECK: .func (.param .align 16 .b8 func_retval0[16])
+; CHECK-LABEL: test_v4i32(
+; CHECK-NEXT: .param .align 16 .b8 test_v4i32_param_0[16]
+; CHECK: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v4i32_param_0]
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: .param .align 16 .b8 retval0[16];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i32,
+; CHECK: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0];
+; CHECK: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHCK-NEXT: ret;
+define <4 x i32> @test_v4i32(<4 x i32> %a) {
+ %r = tail call <4 x i32> @test_v4i32(<4 x i32> %a);
+ ret <4 x i32> %r;
+}
+
+; CHECK: .func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v5i32(
+; CHECK-NEXT: .param .align 32 .b8 test_v5i32_param_0[32]
+; CHECK-DAG: ld.param.u32 [[E4:%r[0-9]+]], [test_v5i32_param_0+16];
+; CHECK-DAG ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v5i32_param_0]
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK-DAG: st.param.b32 [param0+16], [[E4]];
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v5i32,
+; CHECK-DAG: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0];
+; CHECK-DAG: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK-DAG: st.param.b32 [func_retval0+16], [[RE4]];
+; CHECK-NEXT: ret;
+define <5 x i32> @test_v5i32(<5 x i32> %a) {
+ %r = tail call <5 x i32> @test_v5i32(<5 x i32> %a);
+ ret <5 x i32> %r;
+}
+
+; CHECK: .func (.param .b32 func_retval0)
+; CHECK-LABEL: test_f32(
+; CHECK-NEXT: .param .b32 test_f32_param_0
+; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_f32_param_0];
+; CHECK: .param .b32 param0;
+; CHECK: st.param.f32 [param0+0], [[E]];
+; CHECK: .param .b32 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_f32,
+; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0];
+; CHECK: st.param.f32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define float @test_f32(float %a) {
+ %r = tail call float @test_f32(float %a);
+ ret float %r;
+}
+
+; CHECK: .func (.param .b64 func_retval0)
+; CHECK-LABEL: test_i64(
+; CHECK-NEXT: .param .b64 test_i64_param_0
+; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_i64_param_0];
+; CHECK: .param .b64 param0;
+; CHECK: st.param.b64 [param0+0], [[E]];
+; CHECK: .param .b64 retval0;
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_i64,
+; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define i64 @test_i64(i64 %a) {
+ %r = tail call i64 @test_i64(i64 %a);
+ ret i64 %r;
+}
+
+; CHECK: .func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v3i64(
+; CHECK-NEXT: .param .align 32 .b8 test_v3i64_param_0[32]
+; CHECK-DAG: ld.param.u64 [[E2:%rd[0-9]+]], [test_v3i64_param_0+16];
+; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v3i64_param_0];
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b64 [param0+16], [[E2]];
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v3i64,
+; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b64 [[RE2:%rd[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]];
+; CHECK-NEXT: ret;
+define <3 x i64> @test_v3i64(<3 x i64> %a) {
+ %r = tail call <3 x i64> @test_v3i64(<3 x i64> %a);
+ ret <3 x i64> %r;
+}
+
+; For i64 vector loads are limited by PTX to 2 elements.
+; CHECK: .func (.param .align 32 .b8 func_retval0[32])
+; CHECK-LABEL: test_v4i64(
+; CHECK-NEXT: .param .align 32 .b8 test_v4i64_param_0[32]
+; CHECK-DAG: ld.param.v2.u64 {[[E2:%rd[0-9]+]], [[E3:%rd[0-9]+]]}, [test_v4i64_param_0+16];
+; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v4i64_param_0];
+; CHECK: .param .align 32 .b8 param0[32];
+; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.v2.b64 [param0+16], {[[E2]], [[E3]]};
+; CHECK: .param .align 32 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_v4i64,
+; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.v2.b64 {[[RE2:%rd[0-9]+]], [[RE3:%rd[0-9]+]]}, [retval0+16];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[RE2]], [[RE3]]};
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-NEXT: ret;
+define <4 x i64> @test_v4i64(<4 x i64> %a) {
+ %r = tail call <4 x i64> @test_v4i64(<4 x i64> %a);
+ ret <4 x i64> %r;
+}
+
+; Aggregates, on the other hand, do not get extended.
+
+; CHECK: .func (.param .align 1 .b8 func_retval0[1])
+; CHECK-LABEL: test_s_i1(
+; CHECK-NEXT: .align 1 .b8 test_s_i1_param_0[1]
+; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i1_param_0];
+; CHECK: .param .align 1 .b8 param0[1];
+; CHECK: st.param.b8 [param0+0], [[A]]
+; CHECK: .param .align 1 .b8 retval0[1];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_i1,
+; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0];
+; CHECK: st.param.b8 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i1 @test_s_i1(%s_i1 %a) {
+ %r = tail call %s_i1 @test_s_i1(%s_i1 %a);
+ ret %s_i1 %r;
+}
+
+; CHECK: .func (.param .align 1 .b8 func_retval0[1])
+; CHECK-LABEL: test_s_i8(
+; CHECK-NEXT: .param .align 1 .b8 test_s_i8_param_0[1]
+; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i8_param_0];
+; CHECK: .param .align 1 .b8 param0[1];
+; CHECK: st.param.b8 [param0+0], [[A]]
+; CHECK: .param .align 1 .b8 retval0[1];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_i8,
+; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0];
+; CHECK: st.param.b8 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i8 @test_s_i8(%s_i8 %a) {
+ %r = tail call %s_i8 @test_s_i8(%s_i8 %a);
+ ret %s_i8 %r;
+}
+
+; CHECK: .func (.param .align 2 .b8 func_retval0[2])
+; CHECK-LABEL: test_s_i16(
+; CHECK-NEXT: .param .align 2 .b8 test_s_i16_param_0[2]
+; CHECK: ld.param.u16 [[A:%rs[0-9]+]], [test_s_i16_param_0];
+; CHECK: .param .align 2 .b8 param0[2];
+; CHECK: st.param.b16 [param0+0], [[A]]
+; CHECK: .param .align 2 .b8 retval0[2];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_i16,
+; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i16 @test_s_i16(%s_i16 %a) {
+ %r = tail call %s_i16 @test_s_i16(%s_i16 %a);
+ ret %s_i16 %r;
+}
+
+; CHECK: .func (.param .align 2 .b8 func_retval0[2])
+; CHECK-LABEL: test_s_f16(
+; CHECK-NEXT: .param .align 2 .b8 test_s_f16_param_0[2]
+; CHECK: ld.param.b16 [[A:%h[0-9]+]], [test_s_f16_param_0];
+; CHECK: .param .align 2 .b8 param0[2];
+; CHECK: st.param.b16 [param0+0], [[A]]
+; CHECK: .param .align 2 .b8 retval0[2];
+; CHECK: call.uni
+; CHECK-NEXT: test_s_f16,
+; CHECK: ld.param.b16 [[R:%h[0-9]+]], [retval0+0];
+; CHECK: st.param.b16 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_f16 @test_s_f16(%s_f16 %a) {
+ %r = tail call %s_f16 @test_s_f16(%s_f16 %a);
+ ret %s_f16 %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_s_i32(
+; CHECK-NEXT: .param .align 4 .b8 test_s_i32_param_0[4]
+; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_s_i32_param_0];
+; CHECK: .param .align 4 .b8 param0[4]
+; CHECK: st.param.b32 [param0+0], [[E]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i32,
+; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0];
+; CHECK: st.param.b32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i32 @test_s_i32(%s_i32 %a) {
+ %r = tail call %s_i32 @test_s_i32(%s_i32 %a);
+ ret %s_i32 %r;
+}
+
+; CHECK: .func (.param .align 4 .b8 func_retval0[4])
+; CHECK-LABEL: test_s_f32(
+; CHECK-NEXT: .param .align 4 .b8 test_s_f32_param_0[4]
+; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_s_f32_param_0];
+; CHECK: .param .align 4 .b8 param0[4]
+; CHECK: st.param.f32 [param0+0], [[E]];
+; CHECK: .param .align 4 .b8 retval0[4];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_f32,
+; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0];
+; CHECK: st.param.f32 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_f32 @test_s_f32(%s_f32 %a) {
+ %r = tail call %s_f32 @test_s_f32(%s_f32 %a);
+ ret %s_f32 %r;
+}
+
+; CHECK: .func (.param .align 8 .b8 func_retval0[8])
+; CHECK-LABEL: test_s_i64(
+; CHECK-NEXT: .param .align 8 .b8 test_s_i64_param_0[8]
+; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_s_i64_param_0];
+; CHECK: .param .align 8 .b8 param0[8];
+; CHECK: st.param.b64 [param0+0], [[E]];
+; CHECK: .param .align 8 .b8 retval0[8];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i64,
+; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0];
+; CHECK: st.param.b64 [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define %s_i64 @test_s_i64(%s_i64 %a) {
+ %r = tail call %s_i64 @test_s_i64(%s_i64 %a);
+ ret %s_i64 %r;
+}
+
+; Fields that have different types, but identical sizes are not vectorized.
+; CHECK: .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i32f32(
+; CHECK: .param .align 8 .b8 test_s_i32f32_param_0[24]
+; CHECK-DAG: ld.param.u64 [[E4:%rd[0-9]+]], [test_s_i32f32_param_0+16];
+; CHECK-DAG: ld.param.f32 [[E3:%f[0-9]+]], [test_s_i32f32_param_0+12];
+; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_s_i32f32_param_0+8];
+; CHECK-DAG: ld.param.f32 [[E1:%f[0-9]+]], [test_s_i32f32_param_0+4];
+; CHECK-DAG: ld.param.u32 [[E0:%r[0-9]+]], [test_s_i32f32_param_0];
+; CHECK: .param .align 8 .b8 param0[24];
+; CHECK-DAG: st.param.b32 [param0+0], [[E0]];
+; CHECK-DAG: st.param.f32 [param0+4], [[E1]];
+; CHECK-DAG: st.param.b32 [param0+8], [[E2]];
+; CHECK-DAG: st.param.f32 [param0+12], [[E3]];
+; CHECK-DAG: st.param.b64 [param0+16], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i32f32,
+; CHECK-DAG: ld.param.b32 [[RE0:%r[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.f32 [[RE1:%f[0-9]+]], [retval0+4];
+; CHECK-DAG: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
+; CHECK-DAG: ld.param.f32 [[RE3:%f[0-9]+]], [retval0+12];
+; CHECK-DAG: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.b32 [func_retval0+0], [[RE0]];
+; CHECK-DAG: st.param.f32 [func_retval0+4], [[RE1]];
+; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]];
+; CHECK-DAG: st.param.f32 [func_retval0+12], [[RE3]];
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]];
+; CHECK: ret;
+define %s_i32f32 @test_s_i32f32(%s_i32f32 %a) {
+ %r = tail call %s_i32f32 @test_s_i32f32(%s_i32f32 %a);
+ ret %s_i32f32 %r;
+}
+
+; We do vectorize consecutive fields with matching types.
+; CHECK:.visible .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i32x4(
+; CHECK: .param .align 8 .b8 test_s_i32x4_param_0[24]
+; CHECK-DAG: ld.param.u64 [[RD1:%rd[0-9]+]], [test_s_i32x4_param_0+16];
+; CHECK-DAG: ld.param.v2.u32 {[[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_s_i32x4_param_0+8];
+; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i32x4_param_0];
+; CHECK: .param .align 8 .b8 param0[24];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.v2.b32 [param0+8], {[[E2]], [[E3]]};
+; CHECK: st.param.b64 [param0+16], [[E4]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i32x4,
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.v2.b32 {[[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+8];
+; CHECK: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16];
+; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK-DAG: st.param.v2.b32 [func_retval0+8], {[[RE2]], [[RE3]]};
+; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]];
+; CHECK: ret;
+
+define %s_i32x4 @test_s_i32x4(%s_i32x4 %a) {
+ %r = tail call %s_i32x4 @test_s_i32x4(%s_i32x4 %a);
+ ret %s_i32x4 %r;
+}
+
+; CHECK:.visible .func (.param .align 8 .b8 func_retval0[32])
+; CHECK-LABEL: test_s_i1i32x4(
+; CHECK: .param .align 8 .b8 test_s_i1i32x4_param_0[32]
+; CHECK: ld.param.u64 [[E5:%rd[0-9]+]], [test_s_i1i32x4_param_0+24];
+; CHECK: ld.param.u32 [[E4:%r[0-9]+]], [test_s_i1i32x4_param_0+16];
+; CHECK: ld.param.u32 [[E3:%r[0-9]+]], [test_s_i1i32x4_param_0+12];
+; CHECK: ld.param.u8 [[E2:%rs[0-9]+]], [test_s_i1i32x4_param_0+8];
+; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i1i32x4_param_0];
+; CHECK: .param .align 8 .b8 param0[32];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b8 [param0+8], [[E2]];
+; CHECK: st.param.b32 [param0+12], [[E3]];
+; CHECK: st.param.b32 [param0+16], [[E4]];
+; CHECK: st.param.b64 [param0+24], [[E5]];
+; CHECK: .param .align 8 .b8 retval0[32];
+; CHECK: call.uni (retval0),
+; CHECK: test_s_i1i32x4,
+; CHECK: (
+; CHECK: param0
+; CHECK: );
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+8];
+; CHECK: ld.param.b32 [[RE3:%r[0-9]+]], [retval0+12];
+; CHECK: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16];
+; CHECK: ld.param.b64 [[RE5:%rd[0-9]+]], [retval0+24];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK: st.param.b8 [func_retval0+8], [[RE2]];
+; CHECK: st.param.b32 [func_retval0+12], [[RE3]];
+; CHECK: st.param.b32 [func_retval0+16], [[RE4]];
+; CHECK: st.param.b64 [func_retval0+24], [[RE5]];
+; CHECK: ret;
+
+define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) {
+ %r = tail call %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a);
+ ret %s_i8i32x4 %r;
+}
+
+; -- All loads/stores from parameters aligned by one must be done one
+; -- byte at a time.
+; CHECK:.visible .func (.param .align 1 .b8 func_retval0[25])
+; CHECK-LABEL: test_s_i1i32x4p(
+; CHECK-DAG: .param .align 1 .b8 test_s_i1i32x4p_param_0[25]
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+24];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+23];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+22];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+21];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+20];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+19];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+18];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+17];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+16];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+15];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+14];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+13];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+12];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+11];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+10];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+9];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+8];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+7];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+6];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+5];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+4];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+3];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+2];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+1];
+; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0];
+; --- TODO
+; --- Unaligned parameter store/ return value load is broken in both nvcc
+; --- and llvm and needs to be fixed.
+; CHECK: .param .align 1 .b8 param0[25];
+; CHECK-DAG: st.param.b32 [param0+0],
+; CHECK-DAG: st.param.b32 [param0+4],
+; CHECK-DAG: st.param.b8 [param0+8],
+; CHECK-DAG: st.param.b32 [param0+9],
+; CHECK-DAG: st.param.b32 [param0+13],
+; CHECK-DAG: st.param.b64 [param0+17],
+; CHECK: .param .align 1 .b8 retval0[25];
+; CHECK: call.uni (retval0),
+; CHECK-NEXT: test_s_i1i32x4p,
+; CHECK-DAG: ld.param.b32 %r41, [retval0+0];
+; CHECK-DAG: ld.param.b32 %r42, [retval0+4];
+; CHECK-DAG: ld.param.b8 %rs2, [retval0+8];
+; CHECK-DAG: ld.param.b32 %r43, [retval0+9];
+; CHECK-DAG: ld.param.b32 %r44, [retval0+13];
+; CHECK-DAG: ld.param.b64 %rd23, [retval0+17];
+; CHECK-DAG: st.param.b32 [func_retval0+0],
+; CHECK-DAG: st.param.b32 [func_retval0+4],
+; CHECK-DAG: st.param.b8 [func_retval0+8],
+; CHECK-DAG: st.param.b32 [func_retval0+9],
+; CHECK-DAG: st.param.b32 [func_retval0+13],
+; CHECK-DAG: st.param.b64 [func_retval0+17],
+
+define %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a) {
+ %r = tail call %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a);
+ ret %s_i8i32x4p %r;
+}
+
+; Check that we can vectorize loads that span multiple aggregate fields.
+; CHECK:.visible .func (.param .align 16 .b8 func_retval0[80])
+; CHECK-LABEL: test_s_crossfield(
+; CHECK: .param .align 16 .b8 test_s_crossfield_param_0[80]
+; CHECK: ld.param.u32 [[E15:%r[0-9]+]], [test_s_crossfield_param_0+64];
+; CHECK: ld.param.v4.u32 {[[E11:%r[0-9]+]], [[E12:%r[0-9]+]], [[E13:%r[0-9]+]], [[E14:%r[0-9]+]]}, [test_s_crossfield_param_0+48];
+; CHECK: ld.param.v4.u32 {[[E7:%r[0-9]+]], [[E8:%r[0-9]+]], [[E9:%r[0-9]+]], [[E10:%r[0-9]+]]}, [test_s_crossfield_param_0+32];
+; CHECK: ld.param.v4.u32 {[[E3:%r[0-9]+]], [[E4:%r[0-9]+]], [[E5:%r[0-9]+]], [[E6:%r[0-9]+]]}, [test_s_crossfield_param_0+16];
+; CHECK: ld.param.u32 [[E2:%r[0-9]+]], [test_s_crossfield_param_0+8];
+; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_crossfield_param_0];
+; CHECK: .param .align 16 .b8 param0[80];
+; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK: st.param.b32 [param0+8], [[E2]];
+; CHECK: st.param.v4.b32 [param0+16], {[[E3]], [[E4]], [[E5]], [[E6]]};
+; CHECK: st.param.v4.b32 [param0+32], {[[E7]], [[E8]], [[E9]], [[E10]]};
+; CHECK: st.param.v4.b32 [param0+48], {[[E11]], [[E12]], [[E13]], [[E14]]};
+; CHECK: st.param.b32 [param0+64], [[E15]];
+; CHECK: .param .align 16 .b8 retval0[80];
+; CHECK: call.uni (retval0),
+; CHECK: test_s_crossfield,
+; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8];
+; CHECK: ld.param.v4.b32 {[[RE3:%r[0-9]+]], [[RE4:%r[0-9]+]], [[RE5:%r[0-9]+]], [[RE6:%r[0-9]+]]}, [retval0+16];
+; CHECK: ld.param.v4.b32 {[[RE7:%r[0-9]+]], [[RE8:%r[0-9]+]], [[RE9:%r[0-9]+]], [[RE10:%r[0-9]+]]}, [retval0+32];
+; CHECK: ld.param.v4.b32 {[[RE11:%r[0-9]+]], [[RE12:%r[0-9]+]], [[RE13:%r[0-9]+]], [[RE14:%r[0-9]+]]}, [retval0+48];
+; CHECK: ld.param.b32 [[RE15:%r[0-9]+]], [retval0+64];
+; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]};
+; CHECK: st.param.b32 [func_retval0+8], [[RE2]];
+; CHECK: st.param.v4.b32 [func_retval0+16], {[[RE3]], [[RE4]], [[RE5]], [[RE6]]};
+; CHECK: st.param.v4.b32 [func_retval0+32], {[[RE7]], [[RE8]], [[RE9]], [[RE10]]};
+; CHECK: st.param.v4.b32 [func_retval0+48], {[[RE11]], [[RE12]], [[RE13]], [[RE14]]};
+; CHECK: st.param.b32 [func_retval0+64], [[RE15]];
+; CHECK: ret;
+
+define %s_crossfield @test_s_crossfield(%s_crossfield %a) {
+ %r = tail call %s_crossfield @test_s_crossfield(%s_crossfield %a);
+ ret %s_crossfield %r;
+}
diff --git a/test/CodeGen/NVPTX/rsqrt.ll b/test/CodeGen/NVPTX/rsqrt.ll
deleted file mode 100644
index 3a52a493abdd..000000000000
--- a/test/CodeGen/NVPTX/rsqrt.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 -nvptx-prec-divf32=1 -nvptx-prec-sqrtf32=0 | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-
-declare float @llvm.nvvm.sqrt.f(float)
-
-define float @foo(float %a) {
-; CHECK: rsqrt.approx.f32
- %val = tail call float @llvm.nvvm.sqrt.f(float %a)
- %ret = fdiv float 1.0, %val
- ret float %ret
-}
-
diff --git a/test/CodeGen/NVPTX/sqrt-approx.ll b/test/CodeGen/NVPTX/sqrt-approx.ll
new file mode 100644
index 000000000000..1e28db44b804
--- /dev/null
+++ b/test/CodeGen/NVPTX/sqrt-approx.ll
@@ -0,0 +1,150 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -nvptx-prec-divf32=0 -nvptx-prec-sqrtf32=0 \
+; RUN: | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+
+declare float @llvm.sqrt.f32(float)
+declare double @llvm.sqrt.f64(double)
+
+; -- reciprocal sqrt --
+
+; CHECK-LABEL test_rsqrt32
+define float @test_rsqrt32(float %a) #0 {
+; CHECK: rsqrt.approx.f32
+ %val = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %val
+ ret float %ret
+}
+
+; CHECK-LABEL test_rsqrt_ftz
+define float @test_rsqrt_ftz(float %a) #0 #1 {
+; CHECK: rsqrt.approx.ftz.f32
+ %val = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %val
+ ret float %ret
+}
+
+; CHECK-LABEL test_rsqrt64
+define double @test_rsqrt64(double %a) #0 {
+; CHECK: rsqrt.approx.f64
+ %val = tail call double @llvm.sqrt.f64(double %a)
+ %ret = fdiv double 1.0, %val
+ ret double %ret
+}
+
+; CHECK-LABEL test_rsqrt64_ftz
+define double @test_rsqrt64_ftz(double %a) #0 #1 {
+; There's no rsqrt.approx.ftz.f64 instruction; we just use the non-ftz version.
+; CHECK: rsqrt.approx.f64
+ %val = tail call double @llvm.sqrt.f64(double %a)
+ %ret = fdiv double 1.0, %val
+ ret double %ret
+}
+
+; -- sqrt --
+
+; CHECK-LABEL test_sqrt32
+define float @test_sqrt32(float %a) #0 {
+; CHECK: sqrt.approx.f32
+ %ret = tail call float @llvm.sqrt.f32(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL test_sqrt_ftz
+define float @test_sqrt_ftz(float %a) #0 #1 {
+; CHECK: sqrt.approx.ftz.f32
+ %ret = tail call float @llvm.sqrt.f32(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL test_sqrt64
+define double @test_sqrt64(double %a) #0 {
+; There's no sqrt.approx.f64 instruction; we emit
+; reciprocal(rsqrt.approx.f64(x)). There's no non-ftz approximate reciprocal,
+; so we just use the ftz version.
+; CHECK: rsqrt.approx.f64
+; CHECK: rcp.approx.ftz.f64
+ %ret = tail call double @llvm.sqrt.f64(double %a)
+ ret double %ret
+}
+
+; CHECK-LABEL test_sqrt64_ftz
+define double @test_sqrt64_ftz(double %a) #0 #1 {
+; There's no sqrt.approx.ftz.f64 instruction; we just use the non-ftz version.
+; CHECK: rsqrt.approx.f64
+; CHECK: rcp.approx.ftz.f64
+ %ret = tail call double @llvm.sqrt.f64(double %a)
+ ret double %ret
+}
+
+; -- refined sqrt and rsqrt --
+;
+; The sqrt and rsqrt refinement algorithms both emit an rsqrt.approx, followed
+; by some math.
+
+; CHECK-LABEL: test_rsqrt32_refined
+define float @test_rsqrt32_refined(float %a) #0 #2 {
+; CHECK: rsqrt.approx.f32
+ %val = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %val
+ ret float %ret
+}
+
+; CHECK-LABEL: test_sqrt32_refined
+define float @test_sqrt32_refined(float %a) #0 #2 {
+; CHECK: rsqrt.approx.f32
+ %ret = tail call float @llvm.sqrt.f32(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: test_rsqrt64_refined
+define double @test_rsqrt64_refined(double %a) #0 #2 {
+; CHECK: rsqrt.approx.f64
+ %val = tail call double @llvm.sqrt.f64(double %a)
+ %ret = fdiv double 1.0, %val
+ ret double %ret
+}
+
+; CHECK-LABEL: test_sqrt64_refined
+define double @test_sqrt64_refined(double %a) #0 #2 {
+; CHECK: rsqrt.approx.f64
+ %ret = tail call double @llvm.sqrt.f64(double %a)
+ ret double %ret
+}
+
+; -- refined sqrt and rsqrt with ftz enabled --
+
+; CHECK-LABEL: test_rsqrt32_refined_ftz
+define float @test_rsqrt32_refined_ftz(float %a) #0 #1 #2 {
+; CHECK: rsqrt.approx.ftz.f32
+ %val = tail call float @llvm.sqrt.f32(float %a)
+ %ret = fdiv float 1.0, %val
+ ret float %ret
+}
+
+; CHECK-LABEL: test_sqrt32_refined_ftz
+define float @test_sqrt32_refined_ftz(float %a) #0 #1 #2 {
+; CHECK: rsqrt.approx.ftz.f32
+ %ret = tail call float @llvm.sqrt.f32(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: test_rsqrt64_refined_ftz
+define double @test_rsqrt64_refined_ftz(double %a) #0 #1 #2 {
+; There's no rsqrt.approx.ftz.f64, so we just use the non-ftz version.
+; CHECK: rsqrt.approx.f64
+ %val = tail call double @llvm.sqrt.f64(double %a)
+ %ret = fdiv double 1.0, %val
+ ret double %ret
+}
+
+; CHECK-LABEL: test_sqrt64_refined_ftz
+define double @test_sqrt64_refined_ftz(double %a) #0 #1 #2 {
+; CHECK: rsqrt.approx.f64
+ %ret = tail call double @llvm.sqrt.f64(double %a)
+ ret double %ret
+}
+
+attributes #0 = { "unsafe-fp-math" = "true" }
+attributes #1 = { "nvptx-f32ftz" = "true" }
+attributes #2 = { "reciprocal-estimates" = "rsqrtf:1,rsqrtd:1,sqrtf:1,sqrtd:1" }
diff --git a/test/CodeGen/NVPTX/vec-param-load.ll b/test/CodeGen/NVPTX/vec-param-load.ll
index 4193ac4085cc..bf26e5ff1bdb 100644
--- a/test/CodeGen/NVPTX/vec-param-load.ll
+++ b/test/CodeGen/NVPTX/vec-param-load.ll
@@ -2,12 +2,81 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-
-define <16 x float> @foo(<16 x float> %a) {
-; Make sure we index into vectors properly
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+48];
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+32];
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+16];
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0];
+define <16 x float> @test_v16f32(<16 x float> %a) {
+; CHECK-LABEL: test_v16f32(
+; CHECK-DAG: ld.param.v4.f32 {[[V_12_15:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+48];
+; CHECK-DAG: ld.param.v4.f32 {[[V_8_11:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+32];
+; CHECK-DAG: ld.param.v4.f32 {[[V_4_7:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+16];
+; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0];
+; CHECK-DAG: st.param.v4.f32 [func_retval0+0], {[[V_0_3]]}
+; CHECK-DAG: st.param.v4.f32 [func_retval0+16], {[[V_4_7]]}
+; CHECK-DAG: st.param.v4.f32 [func_retval0+32], {[[V_8_11]]}
+; CHECK-DAG: st.param.v4.f32 [func_retval0+48], {[[V_12_15]]}
+; CHECK: ret;
ret <16 x float> %a
}
+
+define <8 x float> @test_v8f32(<8 x float> %a) {
+; CHECK-LABEL: test_v8f32(
+; CHECK-DAG: ld.param.v4.f32 {[[V_4_7:(%f[0-9]+[, ]*){4}]]}, [test_v8f32_param_0+16];
+; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v8f32_param_0];
+; CHECK-DAG: st.param.v4.f32 [func_retval0+0], {[[V_0_3]]}
+; CHECK-DAG: st.param.v4.f32 [func_retval0+16], {[[V_4_7]]}
+; CHECK: ret;
+ ret <8 x float> %a
+}
+
+define <4 x float> @test_v4f32(<4 x float> %a) {
+; CHECK-LABEL: test_v4f32(
+; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v4f32_param_0];
+; CHECK-DAG: st.param.v4.f32 [func_retval0+0], {[[V_0_3]]}
+; CHECK: ret;
+ ret <4 x float> %a
+}
+
+define <2 x float> @test_v2f32(<2 x float> %a) {
+; CHECK-LABEL: test_v2f32(
+; CHECK-DAG: ld.param.v2.f32 {[[V_0_3:(%f[0-9]+[, ]*){2}]]}, [test_v2f32_param_0];
+; CHECK-DAG: st.param.v2.f32 [func_retval0+0], {[[V_0_3]]}
+; CHECK: ret;
+ ret <2 x float> %a
+}
+
+; Oddly shaped vectors should not load any extra elements.
+define <3 x float> @test_v3f32(<3 x float> %a) {
+; CHECK-LABEL: test_v3f32(
+; CHECK-DAG: ld.param.f32 [[V_2:%f[0-9]+]], [test_v3f32_param_0+8];
+; CHECK-DAG: ld.param.v2.f32 {[[V_0_1:(%f[0-9]+[, ]*){2}]]}, [test_v3f32_param_0];
+; CHECK-DAG: st.param.v2.f32 [func_retval0+0], {[[V_0_1]]}
+; CHECK-DAG: st.param.f32 [func_retval0+8], [[V_2]]
+; CHECK: ret;
+ ret <3 x float> %a
+}
+
+define <8 x i64> @test_v8i64(<8 x i64> %a) {
+; CHECK-LABEL: test_v8i64(
+; CHECK-DAG: ld.param.v2.u64 {[[V_6_7:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0+48];
+; CHECK-DAG: ld.param.v2.u64 {[[V_4_5:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0+32];
+; CHECK-DAG: ld.param.v2.u64 {[[V_2_3:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0+16];
+; CHECK-DAG: ld.param.v2.u64 {[[V_0_1:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0];
+; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[V_0_1]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[V_2_3]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+32], {[[V_4_5]]}
+; CHECK-DAG: st.param.v2.b64 [func_retval0+48], {[[V_6_7]]}
+; CHECK: ret;
+ ret <8 x i64> %a
+}
+
+define <16 x i16> @test_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: test_v16i16(
+; CHECK-DAG: ld.param.v4.u16 {[[V_12_15:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+24];
+; CHECK-DAG: ld.param.v4.u16 {[[V_8_11:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+16];
+; CHECK-DAG: ld.param.v4.u16 {[[V_4_7:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+8];
+; CHECK-DAG: ld.param.v4.u16 {[[V_0_3:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0];
+; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[V_0_3]]}
+; CHECK-DAG: st.param.v4.b16 [func_retval0+8], {[[V_4_7]]}
+; CHECK-DAG: st.param.v4.b16 [func_retval0+16], {[[V_8_11]]}
+; CHECK-DAG: st.param.v4.b16 [func_retval0+24], {[[V_12_15]]}
+; CHECK: ret;
+ ret <16 x i16> %a
+}
diff --git a/test/CodeGen/NVPTX/vec8.ll b/test/CodeGen/NVPTX/vec8.ll
index 03f5cfc6cb01..a86ba1e29d5c 100644
--- a/test/CodeGen/NVPTX/vec8.ll
+++ b/test/CodeGen/NVPTX/vec8.ll
@@ -4,10 +4,15 @@ target triple = "nvptx-unknown-cuda"
; CHECK: .visible .func foo
define void @foo(<8 x i8> %a, i8* %b) {
- %t0 = extractelement <8 x i8> %a, i32 0
-; CHECK-DAG: ld.param.v4.u8
-; CHECK-DAG: ld.param.u32
- store i8 %t0, i8* %b
+; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [foo_param_0]
+; CHECK-DAG: ld.param.v4.u8 {[[E4:%rs[0-9]+]], [[E5:%rs[0-9]+]], [[E6:%rs[0-9]+]], [[E7:%rs[0-9]+]]}, [foo_param_0+4]
+; CHECK-DAG: ld.param.u32 %[[B:r[0-9+]]], [foo_param_1]
+; CHECK: add.s16 [[T:%rs[0-9+]]], [[E1]], [[E6]];
+; CHECK: st.u8 [%[[B]]], [[T]];
+ %t0 = extractelement <8 x i8> %a, i32 1
+ %t1 = extractelement <8 x i8> %a, i32 6
+ %t = add i8 %t0, %t1
+ store i8 %t, i8* %b
ret void
}
diff --git a/test/CodeGen/NVPTX/vector-call.ll b/test/CodeGen/NVPTX/vector-call.ll
index 968d1d4a5f51..bf7b931a5758 100644
--- a/test/CodeGen/NVPTX/vector-call.ll
+++ b/test/CodeGen/NVPTX/vector-call.ll
@@ -4,9 +4,27 @@ target triple = "nvptx-unknown-cuda"
declare void @bar(<4 x i32>)
-; CHECK-LABEL: @foo
+; CHECK-LABEL: .func foo(
+; CHECK-DAG: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [foo_param_0];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK: call.uni
+; CHECK: ret;
define void @foo(<4 x i32> %a) {
-; CHECK: st.param.v4.b32
tail call void @bar(<4 x i32> %a)
ret void
}
+
+; CHECK-LABEL: .func foo3(
+; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [foo3_param_0];
+; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [foo3_param_0+8];
+; CHECK: .param .align 16 .b8 param0[16];
+; CHECK-DAG: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
+; CHECK-DAG: st.param.b32 [param0+8], [[E2]];
+; CHECK: call.uni
+; CHECK: ret;
+declare void @bar3(<3 x i32>)
+define void @foo3(<3 x i32> %a) {
+ tail call void @bar3(<3 x i32> %a)
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
index 264967157d7a..56f4a4173ef5 100644
--- a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
+++ b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
@@ -1,17 +1,33 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-apple-darwin | grep extsw | count 2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s
@lens = external global i8* ; <i8**> [#uses=1]
@vals = external global i32* ; <i32**> [#uses=1]
define i32 @test(i32 %i) {
- %tmp = load i8*, i8** @lens ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
- %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
- %tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp3 = load i32*, i32** @vals ; <i32*> [#uses=1]
- %tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
- %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
- %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
- ret i32 %tmp7
+; CHECK-LABEL: test:
+; CHECK: # BB#0:
+; CHECK-NEXT: addis 4, 2, .LC0@toc@ha
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: addis 5, 2, .LC1@toc@ha
+; CHECK-NEXT: ld 4, .LC0@toc@l(4)
+; CHECK-NEXT: ld 4, 0(4)
+; CHECK-NEXT: lbzx 3, 4, 3
+; CHECK-NEXT: ld 4, .LC1@toc@l(5)
+; CHECK-NEXT: subfic 3, 3, 1
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: ld 4, 0(4)
+; CHECK-NEXT: sldi 3, 3, 2
+; CHECK-NEXT: lwzx 3, 4, 3
+; CHECK-NEXT: blr
+ %tmp = load i8*, i8** @lens ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
+ %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
+ %tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
+ %tmp3 = load i32*, i32** @vals ; <i32*> [#uses=1]
+ %tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
+ %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
+ %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
+ ret i32 %tmp7
}
diff --git a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
index bd496704890f..53bad4fe06ee 100644
--- a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
+++ b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
@@ -1,5 +1,4 @@
; RUN: llc -mcpu=g5 < %s | FileCheck %s
-; RUN: llc -mcpu=g5 -addr-sink-using-gep=1 < %s | FileCheck %s
;; Formerly crashed, see PR 1508
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc64-apple-darwin8"
diff --git a/test/CodeGen/PowerPC/BreakableToken-reduced.ll b/test/CodeGen/PowerPC/BreakableToken-reduced.ll
index 39516537da42..dcc093041682 100644
--- a/test/CodeGen/PowerPC/BreakableToken-reduced.ll
+++ b/test/CodeGen/PowerPC/BreakableToken-reduced.ll
@@ -265,12 +265,12 @@ _ZNK4llvm9StringRef10startswithES0_.exit: ; preds = %entry._ZNK4llvm9Str
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
declare void @_ZN5clang6format17WhitespaceManager24replaceWhitespaceInTokenERKNS0_11FormatTokenEjjN4llvm9StringRefES6_bjji(%"class.clang::format::WhitespaceManager"*, %"struct.clang::format::FormatToken"* dereferenceable(272), i32 zeroext, i32 zeroext, [2 x i64], [2 x i64], i1 zeroext, i32 zeroext, i32 zeroext, i32 signext) #3
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
attributes #9 = { nounwind }
diff --git a/test/CodeGen/PowerPC/aantidep-def-ec.mir b/test/CodeGen/PowerPC/aantidep-def-ec.mir
index cf6ab35d8db7..09aac7b0240a 100644
--- a/test/CodeGen/PowerPC/aantidep-def-ec.mir
+++ b/test/CodeGen/PowerPC/aantidep-def-ec.mir
@@ -48,22 +48,6 @@ tracksRegLiveness: true
liveins:
- { reg: '%x3' }
- { reg: '%x4' }
-calleeSavedRegisters: [ '%cr2', '%cr3', '%cr4', '%f14', '%f15', '%f16',
- '%f17', '%f18', '%f19', '%f20', '%f21', '%f22',
- '%f23', '%f24', '%f25', '%f26', '%f27', '%f28',
- '%f29', '%f30', '%f31', '%r14', '%r15', '%r16',
- '%r17', '%r18', '%r19', '%r20', '%r21', '%r22',
- '%r23', '%r24', '%r25', '%r26', '%r27', '%r28',
- '%r29', '%r30', '%r31', '%v20', '%v21', '%v22',
- '%v23', '%v24', '%v25', '%v26', '%v27', '%v28',
- '%v29', '%v30', '%v31', '%vf20', '%vf21', '%vf22',
- '%vf23', '%vf24', '%vf25', '%vf26', '%vf27', '%vf28',
- '%vf29', '%vf30', '%vf31', '%x14', '%x15', '%x16',
- '%x17', '%x18', '%x19', '%x20', '%x21', '%x22',
- '%x23', '%x24', '%x25', '%x26', '%x27', '%x28',
- '%x29', '%x30', '%x31', '%cr2eq', '%cr3eq', '%cr4eq',
- '%cr2gt', '%cr3gt', '%cr4gt', '%cr2lt', '%cr3lt',
- '%cr4lt', '%cr2un', '%cr3un', '%cr4un' ]
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
diff --git a/test/CodeGen/PowerPC/addegluecrash.ll b/test/CodeGen/PowerPC/addegluecrash.ll
new file mode 100644
index 000000000000..7605340d305f
--- /dev/null
+++ b/test/CodeGen/PowerPC/addegluecrash.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) {
+; CHECK-LABEL: bn_mul_comba8:
+; CHECK: # BB#0:
+; CHECK-NEXT: ld 6, 0(4)
+; CHECK-NEXT: ld 7, 0(5)
+; CHECK-NEXT: mulhdu 8, 7, 6
+; CHECK-NEXT: ld 4, 8(4)
+; CHECK-NEXT: mulld 9, 4, 6
+; CHECK-NEXT: mulhdu 4, 4, 6
+; CHECK-NEXT: addc 6, 9, 8
+; CHECK-NEXT: addze 4, 4
+; CHECK-NEXT: ld 5, 8(5)
+; CHECK-NEXT: mulld 8, 5, 7
+; CHECK-NEXT: mulhdu 5, 5, 7
+; CHECK-NEXT: addc 6, 6, 8
+; CHECK-NEXT: addze 5, 5
+; CHECK-NEXT: add 4, 5, 4
+; CHECK-NEXT: cmpld 7, 4, 5
+; CHECK-NEXT: mfocrf 10, 1
+; CHECK-NEXT: rlwinm 10, 10, 29, 31, 31
+; CHECK-NEXT: # implicit-def: %X4
+; CHECK-NEXT: mr 4, 10
+; CHECK-NEXT: clrldi 4, 4, 32
+; CHECK-NEXT: std 4, 0(3)
+; CHECK-NEXT: blr
+ %1 = load i64, i64* %a, align 8
+ %conv = zext i64 %1 to i128
+ %2 = load i64, i64* %b, align 8
+ %conv2 = zext i64 %2 to i128
+ %mul = mul nuw i128 %conv2, %conv
+ %shr = lshr i128 %mul, 64
+ %agep = getelementptr inbounds i64, i64* %a, i64 1
+ %3 = load i64, i64* %agep, align 8
+ %conv14 = zext i64 %3 to i128
+ %mul15 = mul nuw i128 %conv14, %conv
+ %add17 = add i128 %mul15, %shr
+ %shr19 = lshr i128 %add17, 64
+ %conv20 = trunc i128 %shr19 to i64
+ %bgep = getelementptr inbounds i64, i64* %b, i64 1
+ %4 = load i64, i64* %bgep, align 8
+ %conv28 = zext i64 %4 to i128
+ %mul31 = mul nuw i128 %conv28, %conv2
+ %conv32 = and i128 %add17, 18446744073709551615
+ %add33 = add i128 %conv32, %mul31
+ %shr35 = lshr i128 %add33, 64
+ %conv36 = trunc i128 %shr35 to i64
+ %add37 = add i64 %conv36, %conv20
+ %cmp38 = icmp ult i64 %add37, %conv36
+ %conv148 = zext i1 %cmp38 to i64
+ store i64 %conv148, i64* %r, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/addi-licm.ll b/test/CodeGen/PowerPC/addi-licm.ll
index 37a14899debc..d0178a8aec0e 100644
--- a/test/CodeGen/PowerPC/addi-licm.ll
+++ b/test/CodeGen/PowerPC/addi-licm.ll
@@ -9,9 +9,9 @@ entry:
%x = alloca [2048 x float], align 4
%y = alloca [2048 x float], align 4
%0 = bitcast [2048 x float]* %x to i8*
- call void @llvm.lifetime.start(i64 8192, i8* %0) #2
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %0) #2
%1 = bitcast [2048 x float]* %y to i8*
- call void @llvm.lifetime.start(i64 8192, i8* %1) #2
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %1) #2
br label %for.body.i
; CHECK-LABEL: @foo
@@ -50,12 +50,12 @@ loop.exit: ; preds = %for.body.i
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
declare void @bar(float*, float*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
attributes #0 = { nounwind readonly }
attributes #1 = { nounwind }
diff --git a/test/CodeGen/PowerPC/anon_aggr.ll b/test/CodeGen/PowerPC/anon_aggr.ll
index f4e788849ec8..9b32a8f55f34 100644
--- a/test/CodeGen/PowerPC/anon_aggr.ll
+++ b/test/CodeGen/PowerPC/anon_aggr.ll
@@ -60,33 +60,34 @@ equal:
unequal:
ret i8* %array2_ptr
}
-
; CHECK-LABEL: func2:
-; CHECK: ld [[REG2:[0-9]+]], 72(1)
-; CHECK: cmpld {{([0-9]+,)?}}4, [[REG2]]
-; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]]
+; CHECK: cmpld {{([0-9]+,)?}}4, 6
+; CHECK-DAG: std 6, 72(1)
+; CHECK-DAG: std 5, 64(1)
+; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
-; DARWIN32: _func2:
-; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36
-; DARWIN32: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
+; DARWIN32-LABEL: _func2
+; DARWIN32-DAG: addi r[[REG8:[0-9]+]], r[[REGSP:[0-9]+]], 36
+; DARWIN32-DAG: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32: mr
-; DARWIN32: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
-; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
-; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]]
-; DARWIN32: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r3, -[[OFFSET1]]
-; DARWIN32: lwz r3, -[[OFFSET2]]
+; DARWIN32: mr r[[REG7:[0-9]+]], r5
+; DARWIN32-DAG: cmplw {{(cr[0-9]+,)?}}r5, r[[REG2]]
+; DARWIN32-DAG: stw r[[REG7]], -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET1]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET2]]
+
; DARWIN64: _func2:
; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: mr
; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
-; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
; DARWIN64: ld r3, -[[OFFSET2]]
@@ -106,24 +107,24 @@ unequal:
}
; CHECK-LABEL: func3:
-; CHECK: ld [[REG3:[0-9]+]], 72(1)
-; CHECK: ld [[REG4:[0-9]+]], 56(1)
-; CHECK: cmpld {{([0-9]+,)?}}[[REG4]], [[REG3]]
-; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1)
-; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1)
+; CHECK: cmpld {{([0-9]+,)?}}4, 6
+; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]](1)
+; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
-; DARWIN32: _func3:
-; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36
-; DARWIN32: addi r[[REG2:[0-9]+]], r[[REGSP]], 24
-; DARWIN32: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]])
-; DARWIN32: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]])
-; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
-; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]]
-; DARWIN32: stw r[[REG4]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r3, -[[OFFSET2]]
-; DARWIN32: lwz r3, -[[OFFSET1]]
+; DARWIN32-LABEL: _func3:
+; DARWIN32-DAG: stw r[[REG8:[0-9]+]], 44(r[[REGSP:[0-9]+]])
+; DARWIN32-DAG: stw r[[REG5:[0-9]+]], 32(r[[REGSP]])
+; DARWIN32-DAG: addi r[[REG5a:[0-9]+]], r[[REGSP:[0-9]+]], 36
+; DARWIN32-DAG: addi r[[REG8a:[0-9]+]], r[[REGSP]], 24
+; DARWIN32-DAG: lwz r[[REG5a:[0-9]+]], 44(r[[REGSP]])
+; DARWIN32-DAG: lwz r[[REG8a:[0-9]+]], 32(r[[REGSP]])
+; DARWIN32-DAG: cmplw {{(cr[0-9]+,)?}}r[[REG8a]], r[[REG5a]]
+; DARWIN32-DAG: stw r[[REG5a]], -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: stw r[[REG8a]], -[[OFFSET2:[0-9]+]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]]
; DARWIN64: _func3:
; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1)
diff --git a/test/CodeGen/PowerPC/atomics-regression.ll b/test/CodeGen/PowerPC/atomics-regression.ll
new file mode 100644
index 000000000000..9af82b625532
--- /dev/null
+++ b/test/CodeGen/PowerPC/atomics-regression.ll
@@ -0,0 +1,9546 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64le-linux-gnu < %s | FileCheck %s -check-prefix=PPC64LE
+
+define i8 @test0(i8* %ptr) {
+; PPC64LE-LABEL: test0:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr unordered, align 1
+ ret i8 %val
+}
+
+define i8 @test1(i8* %ptr) {
+; PPC64LE-LABEL: test1:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr monotonic, align 1
+ ret i8 %val
+}
+
+define i8 @test2(i8* %ptr) {
+; PPC64LE-LABEL: test2:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr acquire, align 1
+ ret i8 %val
+}
+
+define i8 @test3(i8* %ptr) {
+; PPC64LE-LABEL: test3:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr seq_cst, align 1
+ ret i8 %val
+}
+
+define i16 @test4(i16* %ptr) {
+; PPC64LE-LABEL: test4:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr unordered, align 2
+ ret i16 %val
+}
+
+define i16 @test5(i16* %ptr) {
+; PPC64LE-LABEL: test5:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr monotonic, align 2
+ ret i16 %val
+}
+
+define i16 @test6(i16* %ptr) {
+; PPC64LE-LABEL: test6:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr acquire, align 2
+ ret i16 %val
+}
+
+define i16 @test7(i16* %ptr) {
+; PPC64LE-LABEL: test7:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr seq_cst, align 2
+ ret i16 %val
+}
+
+define i32 @test8(i32* %ptr) {
+; PPC64LE-LABEL: test8:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr unordered, align 4
+ ret i32 %val
+}
+
+define i32 @test9(i32* %ptr) {
+; PPC64LE-LABEL: test9:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr monotonic, align 4
+ ret i32 %val
+}
+
+define i32 @test10(i32* %ptr) {
+; PPC64LE-LABEL: test10:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr acquire, align 4
+ ret i32 %val
+}
+
+define i32 @test11(i32* %ptr) {
+; PPC64LE-LABEL: test11:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr seq_cst, align 4
+ ret i32 %val
+}
+
+define i64 @test12(i64* %ptr) {
+; PPC64LE-LABEL: test12:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr unordered, align 8
+ ret i64 %val
+}
+
+define i64 @test13(i64* %ptr) {
+; PPC64LE-LABEL: test13:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr monotonic, align 8
+ ret i64 %val
+}
+
+define i64 @test14(i64* %ptr) {
+; PPC64LE-LABEL: test14:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr acquire, align 8
+ ret i64 %val
+}
+
+define i64 @test15(i64* %ptr) {
+; PPC64LE-LABEL: test15:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr seq_cst, align 8
+ ret i64 %val
+}
+
+define void @test16(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test16:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr unordered, align 1
+ ret void
+}
+
+define void @test17(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test17:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr monotonic, align 1
+ ret void
+}
+
+define void @test18(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test18:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr release, align 1
+ ret void
+}
+
+define void @test19(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test19:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr seq_cst, align 1
+ ret void
+}
+
+define void @test20(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test20:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr unordered, align 2
+ ret void
+}
+
+define void @test21(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test21:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr monotonic, align 2
+ ret void
+}
+
+define void @test22(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test22:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr release, align 2
+ ret void
+}
+
+define void @test23(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test23:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr seq_cst, align 2
+ ret void
+}
+
+define void @test24(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test24:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr unordered, align 4
+ ret void
+}
+
+define void @test25(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test25:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr monotonic, align 4
+ ret void
+}
+
+define void @test26(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test26:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr release, align 4
+ ret void
+}
+
+define void @test27(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test27:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr seq_cst, align 4
+ ret void
+}
+
+define void @test28(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test28:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr unordered, align 8
+ ret void
+}
+
+define void @test29(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test29:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr monotonic, align 8
+ ret void
+}
+
+define void @test30(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test30:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr release, align 8
+ ret void
+}
+
+define void @test31(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test31:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr seq_cst, align 8
+ ret void
+}
+
+define void @test32() {
+; PPC64LE-LABEL: test32:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence acquire
+ ret void
+}
+
+define void @test33() {
+; PPC64LE-LABEL: test33:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence release
+ ret void
+}
+
+define void @test34() {
+; PPC64LE-LABEL: test34:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence acq_rel
+ ret void
+}
+
+define void @test35() {
+; PPC64LE-LABEL: test35:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: blr
+ fence seq_cst
+ ret void
+}
+
+define void @test36() {
+; PPC64LE-LABEL: test36:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence singlethread acquire
+ ret void
+}
+
+define void @test37() {
+; PPC64LE-LABEL: test37:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence singlethread release
+ ret void
+}
+
+define void @test38() {
+; PPC64LE-LABEL: test38:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence singlethread acq_rel
+ ret void
+}
+
+define void @test39() {
+; PPC64LE-LABEL: test39:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: blr
+ fence singlethread seq_cst
+ ret void
+}
+
+define void @test40(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test40:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB40_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB40_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB40_2
+; PPC64LE-NEXT: .LBB40_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB40_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
+ ret void
+}
+
+define void @test41(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test41:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB41_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB41_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB41_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB41_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
+ ret void
+}
+
+define void @test42(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test42:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB42_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB42_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB42_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB42_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
+ ret void
+}
+
+define void @test43(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test43:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB43_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB43_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB43_2
+; PPC64LE-NEXT: .LBB43_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB43_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
+ ret void
+}
+
+define void @test44(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test44:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB44_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB44_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB44_2
+; PPC64LE-NEXT: .LBB44_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB44_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
+ ret void
+}
+
+define void @test45(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test45:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB45_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB45_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB45_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB45_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
+ ret void
+}
+
+define void @test46(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test46:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB46_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB46_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB46_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB46_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
+ ret void
+}
+
+define void @test47(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test47:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB47_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB47_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB47_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB47_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
+ ret void
+}
+
+define void @test48(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test48:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB48_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB48_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB48_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB48_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
+ ret void
+}
+
+define void @test49(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test49:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB49_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB49_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB49_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB49_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test50(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test50:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB50_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB50_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB50_2
+; PPC64LE-NEXT: .LBB50_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB50_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
+ ret void
+}
+
+define void @test51(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test51:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB51_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB51_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB51_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB51_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
+ ret void
+}
+
+define void @test52(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test52:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB52_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB52_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB52_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB52_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
+ ret void
+}
+
+define void @test53(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test53:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB53_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB53_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB53_2
+; PPC64LE-NEXT: .LBB53_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB53_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
+ ret void
+}
+
+define void @test54(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test54:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB54_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB54_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB54_2
+; PPC64LE-NEXT: .LBB54_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB54_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
+ ret void
+}
+
+define void @test55(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test55:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB55_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB55_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB55_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB55_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
+ ret void
+}
+
+define void @test56(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test56:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB56_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB56_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB56_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB56_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
+ ret void
+}
+
+define void @test57(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test57:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB57_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB57_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB57_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB57_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
+ ret void
+}
+
+define void @test58(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test58:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB58_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB58_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB58_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB58_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
+ ret void
+}
+
+define void @test59(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test59:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB59_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB59_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB59_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB59_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test60(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test60:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB60_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB60_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB60_2
+; PPC64LE-NEXT: .LBB60_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB60_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
+ ret void
+}
+
+define void @test61(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test61:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB61_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB61_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB61_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB61_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
+ ret void
+}
+
+define void @test62(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test62:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB62_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB62_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB62_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB62_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
+ ret void
+}
+
+define void @test63(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test63:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB63_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB63_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB63_2
+; PPC64LE-NEXT: .LBB63_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB63_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
+ ret void
+}
+
+define void @test64(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test64:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB64_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB64_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB64_2
+; PPC64LE-NEXT: .LBB64_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB64_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
+ ret void
+}
+
+define void @test65(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test65:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB65_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB65_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB65_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB65_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
+ ret void
+}
+
+define void @test66(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test66:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB66_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB66_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB66_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB66_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
+ ret void
+}
+
+define void @test67(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test67:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB67_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB67_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB67_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB67_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
+ ret void
+}
+
+define void @test68(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test68:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB68_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB68_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB68_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB68_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
+ ret void
+}
+
+define void @test69(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test69:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB69_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB69_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB69_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB69_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test70(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test70:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB70_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB70_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB70_2
+; PPC64LE-NEXT: .LBB70_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB70_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
+ ret void
+}
+
+define void @test71(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test71:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB71_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB71_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB71_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB71_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
+ ret void
+}
+
+define void @test72(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test72:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB72_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB72_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB72_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB72_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
+ ret void
+}
+
+define void @test73(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test73:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB73_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB73_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB73_2
+; PPC64LE-NEXT: .LBB73_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB73_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
+ ret void
+}
+
+define void @test74(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test74:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB74_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB74_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB74_2
+; PPC64LE-NEXT: .LBB74_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB74_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
+ ret void
+}
+
+define void @test75(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test75:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB75_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB75_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB75_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB75_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
+ ret void
+}
+
+define void @test76(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test76:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB76_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB76_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB76_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB76_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
+ ret void
+}
+
+define void @test77(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test77:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB77_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB77_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB77_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB77_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
+ ret void
+}
+
+define void @test78(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test78:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB78_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB78_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB78_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB78_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
+ ret void
+}
+
+define void @test79(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test79:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB79_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB79_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB79_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB79_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test80:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB80_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB80_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB80_2
+; PPC64LE-NEXT: .LBB80_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB80_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test81:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB81_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB81_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB81_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB81_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test82:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB82_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB82_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB82_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB82_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test83:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB83_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB83_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB83_2
+; PPC64LE-NEXT: .LBB83_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB83_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release monotonic
+ ret void
+}
+
+define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test84:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB84_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB84_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB84_2
+; PPC64LE-NEXT: .LBB84_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB84_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release acquire
+ ret void
+}
+
+define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test85:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB85_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB85_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB85_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB85_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test86:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB86_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB86_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB86_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB86_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test87:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB87_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB87_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB87_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB87_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test88:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB88_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB88_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB88_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB88_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test89:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB89_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB89_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB89_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB89_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test90:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB90_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB90_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB90_2
+; PPC64LE-NEXT: .LBB90_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB90_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test91:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB91_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB91_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB91_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB91_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test92:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB92_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB92_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB92_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB92_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test93:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB93_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB93_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB93_2
+; PPC64LE-NEXT: .LBB93_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB93_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release monotonic
+ ret void
+}
+
+define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test94:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB94_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB94_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB94_2
+; PPC64LE-NEXT: .LBB94_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB94_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release acquire
+ ret void
+}
+
+define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test95:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB95_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB95_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB95_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB95_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test96:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB96_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB96_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB96_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB96_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test97:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB97_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB97_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB97_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB97_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test98:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB98_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB98_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB98_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB98_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test99:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB99_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB99_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB99_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB99_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test100:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB100_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB100_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB100_2
+; PPC64LE-NEXT: .LBB100_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB100_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test101:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB101_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB101_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB101_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB101_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test102:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB102_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB102_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB102_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB102_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test103:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB103_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB103_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB103_2
+; PPC64LE-NEXT: .LBB103_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB103_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release monotonic
+ ret void
+}
+
+define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test104:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB104_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB104_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB104_2
+; PPC64LE-NEXT: .LBB104_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB104_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release acquire
+ ret void
+}
+
+define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test105:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB105_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB105_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB105_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB105_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test106:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB106_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB106_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB106_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB106_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test107:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB107_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB107_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB107_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB107_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test108:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB108_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB108_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB108_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB108_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test109:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB109_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB109_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB109_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB109_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test110:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB110_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB110_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB110_2
+; PPC64LE-NEXT: .LBB110_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB110_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test111:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB111_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB111_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB111_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB111_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test112:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB112_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB112_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB112_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB112_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test113:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB113_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB113_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB113_2
+; PPC64LE-NEXT: .LBB113_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB113_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release monotonic
+ ret void
+}
+
+define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test114:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB114_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB114_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB114_2
+; PPC64LE-NEXT: .LBB114_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB114_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release acquire
+ ret void
+}
+
+define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test115:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB115_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB115_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB115_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB115_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test116:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB116_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB116_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB116_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB116_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test117:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB117_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB117_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB117_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB117_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test118:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB118_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB118_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB118_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB118_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test119:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB119_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB119_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB119_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB119_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define i8 @test120(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test120:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB120_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB120_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test121(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test121:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB121_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB121_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test122(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test122:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB122_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB122_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test123(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test123:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB123_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB123_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test124(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test124:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB124_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB124_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test125(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test125:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB125_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB125_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test126(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test126:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB126_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB126_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test127(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test127:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB127_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB127_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test128(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test128:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB128_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB128_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test129(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test129:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB129_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB129_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test130(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test130:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB130_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB130_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test131(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test131:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB131_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB131_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test132(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test132:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB132_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB132_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test133(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test133:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB133_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB133_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test134(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test134:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB134_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB134_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test135(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test135:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB135_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB135_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test136(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test136:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB136_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB136_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test137(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test137:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB137_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB137_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test138(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test138:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB138_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB138_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test139(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test139:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB139_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB139_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test140(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test140:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB140_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB140_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test141(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test141:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB141_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB141_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test142(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test142:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB142_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB142_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test143(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test143:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB143_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB143_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test144(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test144:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB144_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB144_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test145(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test145:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB145_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB145_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test146(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test146:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB146_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB146_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test147(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test147:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB147_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB147_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test148(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test148:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB148_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB148_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test149(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test149:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB149_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB149_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test150(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test150:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB150_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB150_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test151(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test151:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB151_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB151_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test152(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test152:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB152_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB152_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test153(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test153:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB153_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB153_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test154(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test154:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB154_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB154_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test155(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test155:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB155_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB155_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test156(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test156:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB156_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB156_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test157(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test157:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB157_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB157_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test158(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test158:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB158_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB158_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test159(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test159:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB159_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB159_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test160(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test160:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB160_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB160_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test161(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test161:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB161_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB161_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test162(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test162:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB162_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB162_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test163(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test163:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB163_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB163_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test164(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test164:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB164_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB164_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test165(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test165:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB165_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB165_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test166(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test166:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB166_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB166_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test167(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test167:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB167_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB167_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test168(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test168:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB168_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB168_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test169(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test169:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB169_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB169_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test170(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test170:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB170_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB170_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test171(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test171:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB171_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB171_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test172(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test172:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB172_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB172_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test173(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test173:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB173_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB173_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test174(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test174:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB174_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB174_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test175(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test175:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB175_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB175_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test176(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test176:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB176_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: sub 6, 3, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB176_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test177(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test177:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB177_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB177_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test178(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test178:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB178_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB178_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test179(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test179:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB179_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB179_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test180(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test180:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB180_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB180_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test181(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test181:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB181_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB181_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test182(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test182:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB182_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB182_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test183(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test183:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB183_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB183_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test184(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test184:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB184_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB184_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test185(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test185:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB185_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB185_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test186(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test186:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB186_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB186_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test187(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test187:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB187_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB187_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test188(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test188:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB188_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB188_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test189(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test189:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB189_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB189_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test190(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test190:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB190_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB190_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test191(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test191:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB191_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB191_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test192(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test192:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB192_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB192_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test193(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test193:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB193_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB193_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test194(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test194:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB194_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB194_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test195(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test195:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB195_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB195_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test196(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test196:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB196_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB196_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test197(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test197:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB197_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB197_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test198(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test198:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB198_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB198_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test199(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test199:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB199_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB199_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test200(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test200:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB200_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB200_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test201(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test201:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB201_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB201_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test202(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test202:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB202_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB202_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test203(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test203:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB203_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB203_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test204(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test204:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB204_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB204_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test205(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test205:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB205_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB205_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test206(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test206:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB206_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB206_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test207(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test207:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB207_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB207_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test208(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test208:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB208_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB208_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test209(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test209:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB209_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB209_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test210(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test210:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB210_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB210_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test211(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test211:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB211_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB211_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test212(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test212:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB212_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB212_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test213(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test213:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB213_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB213_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test214(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test214:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB214_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB214_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test215(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test215:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB215_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB215_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test216(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test216:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB216_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB216_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test217(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test217:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB217_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB217_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test218(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test218:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB218_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB218_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test219(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test219:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB219_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB219_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test220(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test220:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB220_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB220_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test221(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test221:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB221_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB221_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test222(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test222:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB222_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB222_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test223(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test223:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB223_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB223_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test224(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test224:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB224_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB224_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test225(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test225:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB225_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB225_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test226(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test226:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB226_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB226_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test227(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test227:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB227_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB227_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test228(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test228:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB228_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB228_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test229(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test229:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB229_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB229_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test230(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test230:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB230_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB230_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test231(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test231:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB231_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB231_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test232(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test232:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB232_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB232_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test233(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test233:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB233_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB233_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test234(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test234:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB234_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB234_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test235(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test235:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB235_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB235_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test236(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test236:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB236_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB236_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test237(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test237:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB237_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB237_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test238(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test238:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB238_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB238_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test239(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test239:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB239_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB239_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test240(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test240:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB240_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB240_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test241(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test241:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB241_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB241_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test242(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test242:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB242_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB242_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test243(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test243:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB243_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB243_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test244(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test244:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB244_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB244_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test245(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test245:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB245_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB245_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test246(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test246:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB246_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB246_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test247(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test247:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB247_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB247_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test248(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test248:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB248_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB248_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test249(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test249:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB249_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB249_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test250(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test250:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB250_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB250_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test251(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test251:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB251_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB251_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test252(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test252:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB252_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB252_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test253(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test253:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB253_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB253_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test254(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test254:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB254_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB254_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test255(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test255:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB255_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB255_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test256(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test256:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB256_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB256_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test257(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test257:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB257_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB257_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test258(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test258:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB258_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB258_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test259(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test259:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB259_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB259_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test260(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test260:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB260_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB260_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB260_1
+; PPC64LE-NEXT: .LBB260_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test261(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test261:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB261_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB261_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB261_1
+; PPC64LE-NEXT: .LBB261_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test262(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test262:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB262_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB262_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB262_1
+; PPC64LE-NEXT: .LBB262_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test263(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test263:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB263_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB263_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB263_1
+; PPC64LE-NEXT: .LBB263_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test264(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test264:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB264_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB264_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB264_1
+; PPC64LE-NEXT: .LBB264_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test265(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test265:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB265_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB265_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB265_1
+; PPC64LE-NEXT: .LBB265_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test266(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test266:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB266_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB266_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB266_1
+; PPC64LE-NEXT: .LBB266_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test267(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test267:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB267_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB267_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB267_1
+; PPC64LE-NEXT: .LBB267_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test268(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test268:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB268_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB268_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB268_1
+; PPC64LE-NEXT: .LBB268_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test269(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test269:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB269_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB269_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB269_1
+; PPC64LE-NEXT: .LBB269_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test270(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test270:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB270_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB270_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB270_1
+; PPC64LE-NEXT: .LBB270_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test271(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test271:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB271_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB271_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB271_1
+; PPC64LE-NEXT: .LBB271_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test272(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test272:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB272_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB272_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB272_1
+; PPC64LE-NEXT: .LBB272_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test273(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test273:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB273_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB273_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB273_1
+; PPC64LE-NEXT: .LBB273_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test274(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test274:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB274_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB274_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB274_1
+; PPC64LE-NEXT: .LBB274_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test275(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test275:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB275_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB275_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB275_1
+; PPC64LE-NEXT: .LBB275_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test276(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test276:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB276_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: ble 0, .LBB276_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB276_1
+; PPC64LE-NEXT: .LBB276_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test277(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test277:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB277_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB277_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB277_1
+; PPC64LE-NEXT: .LBB277_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test278(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test278:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB278_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB278_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB278_1
+; PPC64LE-NEXT: .LBB278_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test279(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test279:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB279_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB279_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB279_1
+; PPC64LE-NEXT: .LBB279_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test280(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test280:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB280_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB280_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB280_1
+; PPC64LE-NEXT: .LBB280_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test281(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test281:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB281_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB281_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB281_1
+; PPC64LE-NEXT: .LBB281_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test282(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test282:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB282_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB282_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB282_1
+; PPC64LE-NEXT: .LBB282_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test283(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test283:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB283_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB283_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB283_1
+; PPC64LE-NEXT: .LBB283_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test284(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test284:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB284_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB284_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB284_1
+; PPC64LE-NEXT: .LBB284_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test285(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test285:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB285_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB285_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB285_1
+; PPC64LE-NEXT: .LBB285_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test286(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test286:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB286_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB286_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB286_1
+; PPC64LE-NEXT: .LBB286_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test287(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test287:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB287_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB287_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB287_1
+; PPC64LE-NEXT: .LBB287_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test288(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test288:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB288_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB288_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB288_1
+; PPC64LE-NEXT: .LBB288_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test289(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test289:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB289_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB289_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB289_1
+; PPC64LE-NEXT: .LBB289_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test290(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test290:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB290_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB290_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB290_1
+; PPC64LE-NEXT: .LBB290_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test291(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test291:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB291_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB291_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB291_1
+; PPC64LE-NEXT: .LBB291_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test292(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test292:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB292_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB292_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB292_1
+; PPC64LE-NEXT: .LBB292_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test293(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test293:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB293_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB293_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB293_1
+; PPC64LE-NEXT: .LBB293_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test294(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test294:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB294_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB294_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB294_1
+; PPC64LE-NEXT: .LBB294_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test295(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test295:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB295_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB295_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB295_1
+; PPC64LE-NEXT: .LBB295_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test296(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test296:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB296_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: bge 0, .LBB296_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB296_1
+; PPC64LE-NEXT: .LBB296_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test297(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test297:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB297_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB297_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB297_1
+; PPC64LE-NEXT: .LBB297_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test298(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test298:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB298_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB298_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB298_1
+; PPC64LE-NEXT: .LBB298_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test299(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test299:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB299_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB299_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB299_1
+; PPC64LE-NEXT: .LBB299_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test300(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test300:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB300_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB300_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB300_1
+; PPC64LE-NEXT: .LBB300_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test301(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test301:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB301_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB301_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB301_1
+; PPC64LE-NEXT: .LBB301_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test302(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test302:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB302_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB302_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB302_1
+; PPC64LE-NEXT: .LBB302_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test303(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test303:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB303_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB303_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB303_1
+; PPC64LE-NEXT: .LBB303_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test304(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test304:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB304_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB304_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB304_1
+; PPC64LE-NEXT: .LBB304_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test305(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test305:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB305_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB305_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB305_1
+; PPC64LE-NEXT: .LBB305_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test306(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test306:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB306_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB306_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB306_1
+; PPC64LE-NEXT: .LBB306_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test307(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test307:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB307_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB307_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB307_1
+; PPC64LE-NEXT: .LBB307_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test308(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test308:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB308_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB308_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB308_1
+; PPC64LE-NEXT: .LBB308_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test309(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test309:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB309_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB309_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB309_1
+; PPC64LE-NEXT: .LBB309_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test310(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test310:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB310_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB310_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB310_1
+; PPC64LE-NEXT: .LBB310_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test311(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test311:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB311_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB311_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB311_1
+; PPC64LE-NEXT: .LBB311_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test312(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test312:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB312_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB312_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB312_1
+; PPC64LE-NEXT: .LBB312_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test313(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test313:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB313_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB313_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB313_1
+; PPC64LE-NEXT: .LBB313_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test314(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test314:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB314_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB314_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB314_1
+; PPC64LE-NEXT: .LBB314_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test315(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test315:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB315_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB315_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB315_1
+; PPC64LE-NEXT: .LBB315_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test316(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test316:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB316_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: ble 0, .LBB316_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB316_1
+; PPC64LE-NEXT: .LBB316_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test317(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test317:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB317_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB317_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB317_1
+; PPC64LE-NEXT: .LBB317_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test318(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test318:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB318_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB318_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB318_1
+; PPC64LE-NEXT: .LBB318_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test319(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test319:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB319_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB319_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB319_1
+; PPC64LE-NEXT: .LBB319_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test320(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test320:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB320_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB320_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB320_1
+; PPC64LE-NEXT: .LBB320_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test321(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test321:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB321_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB321_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB321_1
+; PPC64LE-NEXT: .LBB321_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test322(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test322:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB322_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB322_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB322_1
+; PPC64LE-NEXT: .LBB322_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test323(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test323:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB323_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB323_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB323_1
+; PPC64LE-NEXT: .LBB323_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test324(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test324:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB324_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB324_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB324_1
+; PPC64LE-NEXT: .LBB324_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test325(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test325:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB325_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB325_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB325_1
+; PPC64LE-NEXT: .LBB325_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test326(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test326:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB326_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB326_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB326_1
+; PPC64LE-NEXT: .LBB326_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test327(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test327:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB327_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB327_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB327_1
+; PPC64LE-NEXT: .LBB327_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test328(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test328:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB328_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB328_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB328_1
+; PPC64LE-NEXT: .LBB328_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test329(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test329:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB329_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB329_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB329_1
+; PPC64LE-NEXT: .LBB329_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test330(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test330:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB330_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB330_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB330_1
+; PPC64LE-NEXT: .LBB330_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test331(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test331:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB331_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB331_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB331_1
+; PPC64LE-NEXT: .LBB331_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test332(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test332:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB332_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB332_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB332_1
+; PPC64LE-NEXT: .LBB332_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test333(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test333:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB333_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB333_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB333_1
+; PPC64LE-NEXT: .LBB333_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test334(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test334:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB334_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB334_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB334_1
+; PPC64LE-NEXT: .LBB334_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test335(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test335:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB335_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB335_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB335_1
+; PPC64LE-NEXT: .LBB335_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test336(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test336:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB336_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: bge 0, .LBB336_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB336_1
+; PPC64LE-NEXT: .LBB336_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test337(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test337:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB337_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB337_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB337_1
+; PPC64LE-NEXT: .LBB337_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test338(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test338:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB338_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB338_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB338_1
+; PPC64LE-NEXT: .LBB338_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test339(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test339:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB339_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB339_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB339_1
+; PPC64LE-NEXT: .LBB339_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test340(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test340:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB340_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB340_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test341(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test341:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB341_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB341_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test342(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test342:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB342_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB342_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test343(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test343:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB343_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB343_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test344(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test344:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB344_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB344_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test345(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test345:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB345_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB345_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test346(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test346:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB346_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB346_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test347(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test347:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB347_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB347_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test348(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test348:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB348_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB348_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test349(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test349:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB349_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB349_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test350(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test350:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB350_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB350_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test351(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test351:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB351_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB351_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test352(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test352:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB352_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB352_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test353(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test353:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB353_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB353_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test354(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test354:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB354_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB354_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test355(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test355:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB355_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB355_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test356(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test356:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB356_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB356_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test357(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test357:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB357_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB357_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test358(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test358:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB358_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB358_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test359(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test359:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB359_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB359_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test360(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test360:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB360_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB360_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test361(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test361:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB361_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB361_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test362(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test362:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB362_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB362_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test363(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test363:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB363_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB363_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test364(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test364:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB364_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB364_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test365(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test365:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB365_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB365_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test366(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test366:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB366_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB366_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test367(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test367:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB367_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB367_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test368(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test368:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB368_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB368_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test369(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test369:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB369_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB369_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test370(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test370:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB370_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB370_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test371(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test371:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB371_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB371_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test372(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test372:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB372_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB372_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test373(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test373:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB373_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB373_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test374(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test374:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB374_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB374_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test375(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test375:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB375_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB375_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test376(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test376:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB376_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB376_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test377(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test377:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB377_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB377_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test378(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test378:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB378_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB378_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test379(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test379:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB379_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB379_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test380(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test380:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB380_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB380_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test381(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test381:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB381_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB381_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test382(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test382:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB382_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB382_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test383(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test383:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB383_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB383_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test384(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test384:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB384_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB384_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test385(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test385:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB385_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB385_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test386(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test386:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB386_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB386_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test387(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test387:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB387_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB387_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test388(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test388:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB388_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB388_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test389(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test389:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB389_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB389_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test390(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test390:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB390_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB390_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test391(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test391:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB391_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB391_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test392(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test392:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB392_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB392_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test393(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test393:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB393_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB393_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test394(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test394:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB394_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB394_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test395(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test395:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB395_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB395_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test396(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test396:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB396_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: sub 6, 3, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB396_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test397(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test397:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB397_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB397_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test398(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test398:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB398_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB398_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test399(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test399:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB399_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB399_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test400(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test400:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB400_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB400_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test401(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test401:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB401_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB401_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test402(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test402:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB402_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB402_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test403(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test403:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB403_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB403_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test404(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test404:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB404_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB404_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test405(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test405:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB405_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB405_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test406(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test406:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB406_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB406_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test407(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test407:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB407_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB407_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test408(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test408:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB408_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB408_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test409(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test409:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB409_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB409_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test410(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test410:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB410_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB410_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test411(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test411:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB411_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB411_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test412(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test412:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB412_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB412_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test413(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test413:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB413_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB413_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test414(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test414:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB414_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB414_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test415(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test415:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB415_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB415_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test416(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test416:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB416_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB416_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test417(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test417:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB417_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB417_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test418(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test418:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB418_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB418_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test419(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test419:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB419_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB419_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test420(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test420:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB420_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB420_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test421(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test421:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB421_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB421_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test422(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test422:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB422_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB422_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test423(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test423:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB423_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB423_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test424(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test424:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB424_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB424_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test425(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test425:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB425_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB425_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test426(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test426:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB426_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB426_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test427(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test427:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB427_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB427_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test428(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test428:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB428_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB428_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test429(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test429:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB429_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB429_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test430(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test430:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB430_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB430_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test431(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test431:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB431_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB431_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test432(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test432:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB432_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB432_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test433(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test433:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB433_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB433_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test434(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test434:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB434_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB434_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test435(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test435:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB435_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB435_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test436(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test436:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB436_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB436_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test437(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test437:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB437_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB437_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test438(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test438:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB438_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB438_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test439(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test439:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB439_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB439_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test440(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test440:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB440_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB440_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test441(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test441:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB441_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB441_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test442(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test442:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB442_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB442_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test443(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test443:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB443_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB443_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test444(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test444:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB444_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB444_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test445(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test445:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB445_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB445_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test446(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test446:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB446_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB446_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test447(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test447:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB447_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB447_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test448(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test448:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB448_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB448_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test449(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test449:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB449_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB449_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test450(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test450:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB450_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB450_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test451(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test451:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB451_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB451_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test452(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test452:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB452_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB452_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test453(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test453:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB453_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB453_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test454(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test454:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB454_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB454_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test455(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test455:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB455_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB455_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test456(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test456:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB456_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB456_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test457(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test457:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB457_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB457_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test458(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test458:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB458_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB458_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test459(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test459:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB459_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB459_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test460(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test460:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB460_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB460_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test461(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test461:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB461_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB461_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test462(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test462:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB462_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB462_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test463(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test463:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB463_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB463_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test464(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test464:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB464_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB464_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test465(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test465:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB465_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB465_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test466(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test466:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB466_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB466_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test467(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test467:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB467_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB467_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test468(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test468:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB468_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB468_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test469(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test469:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB469_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB469_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test470(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test470:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB470_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB470_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test471(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test471:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB471_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB471_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test472(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test472:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB472_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB472_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test473(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test473:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB473_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB473_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test474(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test474:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB474_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB474_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test475(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test475:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB475_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB475_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test476(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test476:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB476_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB476_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test477(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test477:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB477_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB477_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test478(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test478:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB478_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB478_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test479(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test479:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB479_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB479_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test480(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test480:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB480_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB480_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB480_1
+; PPC64LE-NEXT: .LBB480_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test481(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test481:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB481_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB481_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB481_1
+; PPC64LE-NEXT: .LBB481_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test482(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test482:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB482_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB482_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB482_1
+; PPC64LE-NEXT: .LBB482_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test483(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test483:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB483_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB483_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB483_1
+; PPC64LE-NEXT: .LBB483_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test484(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test484:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB484_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB484_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB484_1
+; PPC64LE-NEXT: .LBB484_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test485(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test485:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB485_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB485_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB485_1
+; PPC64LE-NEXT: .LBB485_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test486(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test486:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB486_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB486_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB486_1
+; PPC64LE-NEXT: .LBB486_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test487(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test487:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB487_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB487_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB487_1
+; PPC64LE-NEXT: .LBB487_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test488(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test488:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB488_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB488_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB488_1
+; PPC64LE-NEXT: .LBB488_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test489(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test489:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB489_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB489_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB489_1
+; PPC64LE-NEXT: .LBB489_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test490(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test490:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB490_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB490_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB490_1
+; PPC64LE-NEXT: .LBB490_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test491(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test491:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB491_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB491_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB491_1
+; PPC64LE-NEXT: .LBB491_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test492(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test492:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB492_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB492_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB492_1
+; PPC64LE-NEXT: .LBB492_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test493(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test493:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB493_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB493_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB493_1
+; PPC64LE-NEXT: .LBB493_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test494(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test494:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB494_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB494_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB494_1
+; PPC64LE-NEXT: .LBB494_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test495(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test495:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB495_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB495_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB495_1
+; PPC64LE-NEXT: .LBB495_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test496(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test496:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB496_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: ble 0, .LBB496_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB496_1
+; PPC64LE-NEXT: .LBB496_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test497(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test497:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB497_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB497_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB497_1
+; PPC64LE-NEXT: .LBB497_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test498(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test498:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB498_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB498_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB498_1
+; PPC64LE-NEXT: .LBB498_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test499(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test499:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB499_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB499_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB499_1
+; PPC64LE-NEXT: .LBB499_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test500(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test500:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB500_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB500_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB500_1
+; PPC64LE-NEXT: .LBB500_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test501(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test501:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB501_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB501_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB501_1
+; PPC64LE-NEXT: .LBB501_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test502(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test502:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB502_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB502_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB502_1
+; PPC64LE-NEXT: .LBB502_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test503(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test503:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB503_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB503_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB503_1
+; PPC64LE-NEXT: .LBB503_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test504(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test504:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB504_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB504_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB504_1
+; PPC64LE-NEXT: .LBB504_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test505(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test505:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB505_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB505_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB505_1
+; PPC64LE-NEXT: .LBB505_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test506(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test506:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB506_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB506_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB506_1
+; PPC64LE-NEXT: .LBB506_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test507(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test507:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB507_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB507_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB507_1
+; PPC64LE-NEXT: .LBB507_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test508(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test508:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB508_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB508_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB508_1
+; PPC64LE-NEXT: .LBB508_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test509(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test509:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB509_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB509_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB509_1
+; PPC64LE-NEXT: .LBB509_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test510(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test510:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB510_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB510_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB510_1
+; PPC64LE-NEXT: .LBB510_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test511(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test511:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB511_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB511_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB511_1
+; PPC64LE-NEXT: .LBB511_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test512(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test512:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB512_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB512_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB512_1
+; PPC64LE-NEXT: .LBB512_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test513(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test513:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB513_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB513_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB513_1
+; PPC64LE-NEXT: .LBB513_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test514(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test514:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB514_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB514_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB514_1
+; PPC64LE-NEXT: .LBB514_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test515(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test515:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB515_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB515_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB515_1
+; PPC64LE-NEXT: .LBB515_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test516(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test516:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB516_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: bge 0, .LBB516_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB516_1
+; PPC64LE-NEXT: .LBB516_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test517(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test517:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB517_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB517_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB517_1
+; PPC64LE-NEXT: .LBB517_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test518(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test518:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB518_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB518_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB518_1
+; PPC64LE-NEXT: .LBB518_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test519(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test519:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB519_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB519_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB519_1
+; PPC64LE-NEXT: .LBB519_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test520(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test520:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB520_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB520_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB520_1
+; PPC64LE-NEXT: .LBB520_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test521(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test521:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB521_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB521_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB521_1
+; PPC64LE-NEXT: .LBB521_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test522(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test522:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB522_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB522_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB522_1
+; PPC64LE-NEXT: .LBB522_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test523(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test523:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB523_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB523_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB523_1
+; PPC64LE-NEXT: .LBB523_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test524(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test524:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB524_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB524_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB524_1
+; PPC64LE-NEXT: .LBB524_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test525(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test525:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB525_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB525_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB525_1
+; PPC64LE-NEXT: .LBB525_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test526(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test526:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB526_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB526_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB526_1
+; PPC64LE-NEXT: .LBB526_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test527(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test527:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB527_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB527_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB527_1
+; PPC64LE-NEXT: .LBB527_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test528(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test528:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB528_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB528_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB528_1
+; PPC64LE-NEXT: .LBB528_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test529(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test529:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB529_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB529_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB529_1
+; PPC64LE-NEXT: .LBB529_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test530(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test530:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB530_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB530_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB530_1
+; PPC64LE-NEXT: .LBB530_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test531(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test531:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB531_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB531_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB531_1
+; PPC64LE-NEXT: .LBB531_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test532(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test532:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB532_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB532_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB532_1
+; PPC64LE-NEXT: .LBB532_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test533(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test533:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB533_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB533_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB533_1
+; PPC64LE-NEXT: .LBB533_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test534(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test534:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB534_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB534_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB534_1
+; PPC64LE-NEXT: .LBB534_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test535(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test535:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB535_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB535_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB535_1
+; PPC64LE-NEXT: .LBB535_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test536(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test536:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB536_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: ble 0, .LBB536_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB536_1
+; PPC64LE-NEXT: .LBB536_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test537(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test537:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB537_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB537_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB537_1
+; PPC64LE-NEXT: .LBB537_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test538(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test538:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB538_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB538_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB538_1
+; PPC64LE-NEXT: .LBB538_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test539(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test539:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB539_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB539_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB539_1
+; PPC64LE-NEXT: .LBB539_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test540(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test540:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB540_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB540_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB540_1
+; PPC64LE-NEXT: .LBB540_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test541(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test541:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB541_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB541_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB541_1
+; PPC64LE-NEXT: .LBB541_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test542(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test542:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB542_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB542_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB542_1
+; PPC64LE-NEXT: .LBB542_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test543(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test543:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB543_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB543_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB543_1
+; PPC64LE-NEXT: .LBB543_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test544(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test544:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB544_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB544_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB544_1
+; PPC64LE-NEXT: .LBB544_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test545(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test545:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB545_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB545_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB545_1
+; PPC64LE-NEXT: .LBB545_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test546(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test546:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB546_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB546_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB546_1
+; PPC64LE-NEXT: .LBB546_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test547(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test547:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB547_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB547_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB547_1
+; PPC64LE-NEXT: .LBB547_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test548(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test548:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB548_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB548_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB548_1
+; PPC64LE-NEXT: .LBB548_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test549(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test549:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB549_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB549_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB549_1
+; PPC64LE-NEXT: .LBB549_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test550(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test550:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB550_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB550_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB550_1
+; PPC64LE-NEXT: .LBB550_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test551(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test551:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB551_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB551_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB551_1
+; PPC64LE-NEXT: .LBB551_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test552(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test552:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB552_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB552_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB552_1
+; PPC64LE-NEXT: .LBB552_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test553(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test553:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB553_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB553_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB553_1
+; PPC64LE-NEXT: .LBB553_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test554(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test554:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB554_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB554_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB554_1
+; PPC64LE-NEXT: .LBB554_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test555(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test555:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB555_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB555_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB555_1
+; PPC64LE-NEXT: .LBB555_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test556(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test556:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB556_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: bge 0, .LBB556_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB556_1
+; PPC64LE-NEXT: .LBB556_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test557(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test557:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB557_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB557_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB557_1
+; PPC64LE-NEXT: .LBB557_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test558(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test558:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB558_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB558_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB558_1
+; PPC64LE-NEXT: .LBB558_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test559(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test559:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB559_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB559_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB559_1
+; PPC64LE-NEXT: .LBB559_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
diff --git a/test/CodeGen/PowerPC/bitcasts-direct-move.ll b/test/CodeGen/PowerPC/bitcasts-direct-move.ll
index 79da5cb68740..d6c7dd3804ff 100644
--- a/test/CodeGen/PowerPC/bitcasts-direct-move.ll
+++ b/test/CodeGen/PowerPC/bitcasts-direct-move.ll
@@ -20,7 +20,7 @@ entry:
ret i64 %0
; CHECK-P7: stxsdx 1,
; CHECK-P7: ld 3,
-; CHECK: mfvsrd 3, 1
+; CHECK: mffprd 3, 1
}
define float @i32tof32(i32 signext %a) {
@@ -60,7 +60,7 @@ entry:
ret i64 %0
; CHECK-P7: stxsdx 1,
; CHECK-P7: ld 3,
-; CHECK: mfvsrd 3, 1
+; CHECK: mffprd 3, 1
}
define float @i32utof32(i32 zeroext %a) {
diff --git a/test/CodeGen/PowerPC/branch_coalesce.ll b/test/CodeGen/PowerPC/branch_coalesce.ll
new file mode 100644
index 000000000000..deb6d898c2e0
--- /dev/null
+++ b/test/CodeGen/PowerPC/branch_coalesce.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs -enable-branch-coalesce=true < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-linux-gnu -verify-machineinstrs -enable-branch-coalesce=true < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define double @testBranchCoal(double %a, double %b, double %c, i32 %x) {
+entry:
+ %test = icmp eq i32 %x, 0
+ %tmp1 = select i1 %test, double %a, double 2.000000e-03
+ %tmp2 = select i1 %test, double %b, double 0.000000e+00
+ %tmp3 = select i1 %test, double %c, double 5.000000e-03
+
+ %res1 = fadd double %tmp1, %tmp2
+ %result = fadd double %res1, %tmp3
+ ret double %result
+
+; CHECK-LABEL: @testBranchCoal
+; CHECK: cmplwi [[CMPR:[0-7]+]], 6, 0
+; CHECK: beq [[CMPR]], .LBB[[LAB1:[0-9_]+]]
+; CHECK-DAG: addis [[LD1REG:[0-9]+]], 2, .LCPI0_0@toc@ha
+; CHECK-DAG: addis [[LD2REG:[0-9]+]], 2, .LCPI0_1@toc@ha
+; CHECK-DAG: xxlxor 2, 2, 2
+; CHECK-NOT: beq
+; CHECK-DAG: addi [[LD1BASE:[0-9]+]], [[LD1REG]]
+; CHECK-DAG: addi [[LD2BASE:[0-9]+]], [[LD2REG]]
+; CHECK-DAG: lxsdx 1, 0, [[LD1BASE]]
+; CHECK-DAG: lxsdx 3, 0, [[LD2BASE]]
+; CHECK: .LBB[[LAB1]]
+; CHECK: xsadddp 0, 1, 2
+; CHECK: xsadddp 1, 0, 3
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/complex-return.ll b/test/CodeGen/PowerPC/complex-return.ll
index f6097e655128..ec87a89b1108 100644
--- a/test/CodeGen/PowerPC/complex-return.ll
+++ b/test/CodeGen/PowerPC/complex-return.ll
@@ -24,10 +24,10 @@ entry:
}
; CHECK-LABEL: foo:
-; CHECK: lfd 1
-; CHECK: lfd 2
-; CHECK: lfd 3
-; CHECK: lfd 4
+; CHECK-DAG: lfd 1
+; CHECK-DAG: lfd 2
+; CHECK-DAG: lfd 3
+; CHECK_DAG: lfd 4
define { float, float } @oof() nounwind {
entry:
@@ -50,6 +50,6 @@ entry:
}
; CHECK-LABEL: oof:
-; CHECK: lfs 2
-; CHECK: lfs 1
+; CHECK-DAG: lfs 2
+; CHECK-DAG: lfs 1
diff --git a/test/CodeGen/PowerPC/crbit-asm.ll b/test/CodeGen/PowerPC/crbit-asm.ll
index 11999670bd6a..c156d3bcc087 100644
--- a/test/CodeGen/PowerPC/crbit-asm.ll
+++ b/test/CodeGen/PowerPC/crbit-asm.ll
@@ -1,5 +1,8 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -O1 -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
+; RUN: llc -verify-machineinstrs -O1 -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
+
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -18,6 +21,10 @@ entry:
; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
; CHECK-DAG: li [[REG4:[0-9]+]], 1
; CHECK: isel 3, [[REG4]], [[REG1]], [[REG3]]
+; CHECK-NO-ISEL-LABEL: @testi1
+; CHECK-NO-ISEL: bclr 12, 20, 0
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/crbits.ll b/test/CodeGen/PowerPC/crbits.ll
index 97f02ef31b3e..a85237195c5e 100644
--- a/test/CodeGen/PowerPC/crbits.ll
+++ b/test/CodeGen/PowerPC/crbits.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -19,6 +20,12 @@ entry:
; CHECK: crnor
; CHECK: crnand [[REG4:[0-9]+]],
; CHECK: isel 3, 0, [[REG1]], [[REG4]]
+; CHECK-NO-ISEL-LABEL: @test1
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -135,7 +142,7 @@ entry:
ret i32 %cond
; CHECK-LABEL: @exttest7
-; CHECK-DAG: cmplwi {{[0-9]+}}, 3, 5
+; CHECK-DAG: cmpwi {{[0-9]+}}, 3, 5
; CHECK-DAG: li [[REG1:[0-9]+]], 8
; CHECK-DAG: li [[REG2:[0-9]+]], 7
; CHECK: isel 3, [[REG2]], [[REG1]],
diff --git a/test/CodeGen/PowerPC/ctrloop-i128.ll b/test/CodeGen/PowerPC/ctrloop-i128.ll
new file mode 100644
index 000000000000..8c1e0c160d30
--- /dev/null
+++ b/test/CodeGen/PowerPC/ctrloop-i128.ll
@@ -0,0 +1,34 @@
+; RUN: llc -O1 -verify-machineinstrs < %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: uwtable
+define fastcc void @_Crash_Fn() unnamed_addr #0 {
+entry-block:
+ br label %_Label_0
+
+_Label_0: ; preds = %_Label_0, %entry-block
+ %result.0138 = phi i128 [ %5, %_Label_0 ], [ 0, %entry-block ]
+ %iter.sroa.0.0137 = phi i8* [ %0, %_Label_0 ], [ undef, %entry-block ]
+ %0 = getelementptr inbounds i8, i8* %iter.sroa.0.0137, i64 1
+ %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %result.0138, i128 undef) #2
+ %2 = extractvalue { i128, i1 } %1, 0
+ %3 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %2, i128 0) #2
+ %4 = extractvalue { i128, i1 } %3, 1
+ %5 = extractvalue { i128, i1 } %3, 0
+ %6 = icmp eq i8* %0, null
+ br i1 %6, label %bb66.loopexit, label %_Label_0
+
+bb66.loopexit: ; preds = %_Label_0
+ unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare { i128, i1 } @llvm.sadd.with.overflow.i128(i128, i128) #1
+
+; Function Attrs: nounwind readnone
+declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128) #1
+
+attributes #0 = { uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/PowerPC/ctrloop-intrin.ll b/test/CodeGen/PowerPC/ctrloop-intrin.ll
index 3a6e8855971b..6ae5d3368c1a 100644
--- a/test/CodeGen/PowerPC/ctrloop-intrin.ll
+++ b/test/CodeGen/PowerPC/ctrloop-intrin.ll
@@ -17,10 +17,10 @@ target triple = "powerpc64le--linux-gnu"
@.str.11.98 = external hidden unnamed_addr constant [3 x i8], align 1
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
declare i8* @halide_string_to_string(i8*, i8*, i8*) #1
@@ -36,7 +36,7 @@ entry:
%buf = alloca [512 x i8], align 1
store double %arg, double* %arg.addr, align 8, !tbaa !4
%0 = bitcast i64* %bits to i8*
- call void @llvm.lifetime.start(i64 8, i8* %0) #0
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %0) #0
store i64 0, i64* %bits, align 8, !tbaa !8
%1 = bitcast double* %arg.addr to i8*
%call = call i8* @memcpy(i8* %0, i8* %1, i64 8) #2
@@ -245,7 +245,7 @@ if.end.105: ; preds = %if.end.84, %if.end.
%integer_exponent.0 = phi i32 [ 0, %if.end.84 ], [ %sub70, %if.end.66 ]
%fractional_part.2 = phi i64 [ %.fractional_part.0, %if.end.84 ], [ 0, %if.end.66 ]
%7 = bitcast [512 x i8]* %buf to i8*
- call void @llvm.lifetime.start(i64 512, i8* %7) #0
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %7) #0
%add.ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i64 0, i64 512
%add.ptr106 = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i64 0, i64 480
%call109 = call i8* @halide_int64_to_string(i8* %add.ptr106, i8* %add.ptr, i64 %integer_part.2, i32 1) #3
@@ -272,7 +272,7 @@ for.cond.cleanup: ; preds = %if.end.138, %if.end
%call142 = call i8* @halide_string_to_string(i8* %dst.addr.0, i8* %end, i8* %int_part_ptr.0.lcssa) #3
%call143 = call i8* @halide_string_to_string(i8* %call142, i8* %end, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.9.96, i64 0, i64 0)) #3
%call144 = call i8* @halide_int64_to_string(i8* %call143, i8* %end, i64 %fractional_part.2, i32 6) #3
- call void @llvm.lifetime.end(i64 512, i8* %9) #0
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %9) #0
br label %cleanup.148
for.cond.cleanup.115: ; preds = %for.body.116
@@ -315,7 +315,7 @@ if.end.138: ; preds = %if.then.136, %for.c
cleanup.148: ; preds = %for.cond.cleanup, %if.then.64, %if.end.59, %if.else.30, %if.then.28, %if.else.24, %if.then.22, %if.else.13, %if.then.11, %if.else, %if.then.6
%retval.1 = phi i8* [ %call7, %if.then.6 ], [ %call8, %if.else ], [ %call12, %if.then.11 ], [ %call14, %if.else.13 ], [ %call23, %if.then.22 ], [ %call25, %if.else.24 ], [ %call29, %if.then.28 ], [ %call31, %if.else.30 ], [ %call65, %if.then.64 ], [ %call61, %if.end.59 ], [ %call144, %for.cond.cleanup ]
%13 = bitcast i64* %bits to i8*
- call void @llvm.lifetime.end(i64 8, i8* %13) #0
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %13) #0
ret i8* %retval.1
}
diff --git a/test/CodeGen/PowerPC/expand-contiguous-isel.ll b/test/CodeGen/PowerPC/expand-contiguous-isel.ll
new file mode 100644
index 000000000000..5fe69ebcf58e
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-contiguous-isel.ll
@@ -0,0 +1,151 @@
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+; This file mainly tests that one of the ISEL instruction in the group uses the same register for operand RT, RA, RB
+; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=true < %s | FileCheck %s --check-prefix=CHECK-GEN-ISEL-TRUE
+; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck %s --implicit-check-not isel
+; Function Attrs: norecurse nounwind readnone
+@.str = private unnamed_addr constant [3 x i8] c"]]\00", align 1
+@.str.1 = private unnamed_addr constant [35 x i8] c"Index < Length && \22Invalid index!\22\00", align 1
+@.str.2 = private unnamed_addr constant [50 x i8] c"/home/jtony/src/llvm/include/llvm/ADT/StringRef.h\00", align 1
+@__PRETTY_FUNCTION__._ZNK4llvm9StringRefixEm = private unnamed_addr constant [47 x i8] c"char llvm::StringRef::operator[](size_t) const\00", align 1
+@.str.3 = private unnamed_addr constant [95 x i8] c"(data || length == 0) && \22StringRef cannot be built from a NULL argument with non-null length\22\00", align 1
+@__PRETTY_FUNCTION__._ZN4llvm9StringRefC2EPKcm = private unnamed_addr constant [49 x i8] c"llvm::StringRef::StringRef(const char *, size_t)\00", align 1
+; Function Attrs: nounwind
+define i64 @_Z3fn1N4llvm9StringRefE([2 x i64] %Str.coerce) local_unnamed_addr #0 {
+entry:
+ %Str.coerce.fca.0.extract = extractvalue [2 x i64] %Str.coerce, 0
+ %Str.coerce.fca.1.extract = extractvalue [2 x i64] %Str.coerce, 1
+ br label %while.cond.outer
+while.cond.outer: ; preds = %_ZNK4llvm9StringRef6substrEmm.exit, %entry
+ %Str.sroa.0.0.ph = phi i64 [ %8, %_ZNK4llvm9StringRef6substrEmm.exit ], [ %Str.coerce.fca.0.extract, %entry ]
+ %.sink.ph = phi i64 [ %sub.i, %_ZNK4llvm9StringRef6substrEmm.exit ], [ %Str.coerce.fca.1.extract, %entry ]
+ %BracketDepth.0.ph = phi i64 [ %BracketDepth.1, %_ZNK4llvm9StringRef6substrEmm.exit ], [ undef, %entry ]
+ %cmp65 = icmp eq i64 %BracketDepth.0.ph, 0
+ br i1 %cmp65, label %while.cond.us.preheader, label %while.cond.preheader
+while.cond.us.preheader: ; preds = %while.cond.outer
+ br label %while.cond.us
+while.cond.preheader: ; preds = %while.cond.outer
+ %cmp.i34129 = icmp eq i64 %.sink.ph, 0
+ br i1 %cmp.i34129, label %cond.false.i.loopexit135, label %_ZNK4llvm9StringRefixEm.exit.preheader
+_ZNK4llvm9StringRefixEm.exit.preheader: ; preds = %while.cond.preheader
+ br label %_ZNK4llvm9StringRefixEm.exit
+while.cond.us: ; preds = %while.cond.us.preheader, %_ZNK4llvm9StringRef6substrEmm.exit50.us
+ %Str.sroa.0.0.us = phi i64 [ %3, %_ZNK4llvm9StringRef6substrEmm.exit50.us ], [ %Str.sroa.0.0.ph, %while.cond.us.preheader ]
+ %.sink.us = phi i64 [ %sub.i41.us, %_ZNK4llvm9StringRef6substrEmm.exit50.us ], [ %.sink.ph, %while.cond.us.preheader ]
+ %cmp.i30.us = icmp ult i64 %.sink.us, 2
+ br i1 %cmp.i30.us, label %if.end.us, label %if.end.i.i.us
+if.end.i.i.us: ; preds = %while.cond.us
+ %0 = inttoptr i64 %Str.sroa.0.0.us to i8*
+ %call.i.i.us = tail call signext i32 @memcmp(i8* %0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), i64 2) #3
+ %phitmp.i.us = icmp eq i32 %call.i.i.us, 0
+ br i1 %phitmp.i.us, label %if.then, label %_ZNK4llvm9StringRefixEm.exit.us
+if.end.us: ; preds = %while.cond.us
+ %cmp.i34.us = icmp eq i64 %.sink.us, 0
+ br i1 %cmp.i34.us, label %cond.false.i.loopexit, label %_ZNK4llvm9StringRefixEm.exit.us
+_ZNK4llvm9StringRefixEm.exit.us: ; preds = %if.end.i.i.us, %if.end.us
+ %1 = inttoptr i64 %Str.sroa.0.0.us to i8*
+ %2 = load i8, i8* %1, align 1, !tbaa !2
+ switch i8 %2, label %_ZNK4llvm9StringRef6substrEmm.exit.loopexit [
+ i8 92, label %if.then4.us
+ i8 93, label %if.then9
+ ]
+if.then4.us: ; preds = %_ZNK4llvm9StringRefixEm.exit.us
+ %.sroa.speculated12.i38.us = select i1 %cmp.i30.us, i64 %.sink.us, i64 2
+ %add.ptr.i40.us = getelementptr inbounds i8, i8* %1, i64 %.sroa.speculated12.i38.us
+ %sub.i41.us = sub i64 %.sink.us, %.sroa.speculated12.i38.us
+ %tobool.i.i44.us = icmp ne i8* %add.ptr.i40.us, null
+ %cmp.i4.i45.us = icmp eq i64 %sub.i41.us, 0
+ %or.cond.i.i46.us = or i1 %tobool.i.i44.us, %cmp.i4.i45.us
+ br i1 %or.cond.i.i46.us, label %_ZNK4llvm9StringRef6substrEmm.exit50.us, label %cond.false.i.i47.loopexit
+_ZNK4llvm9StringRef6substrEmm.exit50.us: ; preds = %if.then4.us
+ %3 = ptrtoint i8* %add.ptr.i40.us to i64
+ br label %while.cond.us
+if.then: ; preds = %if.end.i.i.us
+ ret i64 undef
+cond.false.i.loopexit: ; preds = %if.end.us
+ br label %cond.false.i
+cond.false.i.loopexit134: ; preds = %_ZNK4llvm9StringRef6substrEmm.exit50
+ br label %cond.false.i
+cond.false.i.loopexit135: ; preds = %while.cond.preheader
+ br label %cond.false.i
+cond.false.i: ; preds = %cond.false.i.loopexit135, %cond.false.i.loopexit134, %cond.false.i.loopexit
+ tail call void @__assert_fail(i8* getelementptr inbounds ([35 x i8], [35 x i8]* @.str.1, i64 0, i64 0), i8* getelementptr inbounds ([50 x i8], [50 x i8]* @.str.2, i64 0, i64 0), i32 zeroext 225, i8* getelementptr inbounds ([47 x i8], [47 x i8]* @__PRETTY_FUNCTION__._ZNK4llvm9StringRefixEm, i64 0, i64 0)) #4
+ unreachable
+_ZNK4llvm9StringRefixEm.exit: ; preds = %_ZNK4llvm9StringRefixEm.exit.preheader, %_ZNK4llvm9StringRef6substrEmm.exit50
+ %.sink131 = phi i64 [ %sub.i41, %_ZNK4llvm9StringRef6substrEmm.exit50 ], [ %.sink.ph, %_ZNK4llvm9StringRefixEm.exit.preheader ]
+ %Str.sroa.0.0130 = phi i64 [ %6, %_ZNK4llvm9StringRef6substrEmm.exit50 ], [ %Str.sroa.0.0.ph, %_ZNK4llvm9StringRefixEm.exit.preheader ]
+ %4 = inttoptr i64 %Str.sroa.0.0130 to i8*
+ %5 = load i8, i8* %4, align 1, !tbaa !2
+ switch i8 %5, label %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 [
+ i8 92, label %if.then4
+ i8 93, label %if.end10
+ ]
+if.then4: ; preds = %_ZNK4llvm9StringRefixEm.exit
+ %cmp.i.i37 = icmp ult i64 %.sink131, 2
+ %.sroa.speculated12.i38 = select i1 %cmp.i.i37, i64 %.sink131, i64 2
+ %add.ptr.i40 = getelementptr inbounds i8, i8* %4, i64 %.sroa.speculated12.i38
+ %sub.i41 = sub i64 %.sink131, %.sroa.speculated12.i38
+ %tobool.i.i44 = icmp ne i8* %add.ptr.i40, null
+ %cmp.i4.i45 = icmp eq i64 %sub.i41, 0
+ %or.cond.i.i46 = or i1 %tobool.i.i44, %cmp.i4.i45
+ br i1 %or.cond.i.i46, label %_ZNK4llvm9StringRef6substrEmm.exit50, label %cond.false.i.i47.loopexit133
+cond.false.i.i47.loopexit: ; preds = %if.then4.us
+ br label %cond.false.i.i47
+cond.false.i.i47.loopexit133: ; preds = %if.then4
+ br label %cond.false.i.i47
+cond.false.i.i47: ; preds = %cond.false.i.i47.loopexit133, %cond.false.i.i47.loopexit
+ tail call void @__assert_fail(i8* getelementptr inbounds ([95 x i8], [95 x i8]* @.str.3, i64 0, i64 0), i8* getelementptr inbounds ([50 x i8], [50 x i8]* @.str.2, i64 0, i64 0), i32 zeroext 90, i8* getelementptr inbounds ([49 x i8], [49 x i8]* @__PRETTY_FUNCTION__._ZN4llvm9StringRefC2EPKcm, i64 0, i64 0)) #4
+ unreachable
+_ZNK4llvm9StringRef6substrEmm.exit50: ; preds = %if.then4
+ %6 = ptrtoint i8* %add.ptr.i40 to i64
+ %cmp.i34 = icmp eq i64 %sub.i41, 0
+ br i1 %cmp.i34, label %cond.false.i.loopexit134, label %_ZNK4llvm9StringRefixEm.exit
+if.then9: ; preds = %_ZNK4llvm9StringRefixEm.exit.us
+ tail call void @exit(i32 signext 1) #4
+ unreachable
+if.end10: ; preds = %_ZNK4llvm9StringRefixEm.exit
+ %dec = add i64 %BracketDepth.0.ph, -1
+ br label %_ZNK4llvm9StringRef6substrEmm.exit
+_ZNK4llvm9StringRef6substrEmm.exit.loopexit: ; preds = %_ZNK4llvm9StringRefixEm.exit.us
+ br label %_ZNK4llvm9StringRef6substrEmm.exit
+_ZNK4llvm9StringRef6substrEmm.exit.loopexit132: ; preds = %_ZNK4llvm9StringRefixEm.exit
+ br label %_ZNK4llvm9StringRef6substrEmm.exit
+_ZNK4llvm9StringRef6substrEmm.exit: ; preds = %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit, %if.end10
+ %.sink76 = phi i64 [ %.sink131, %if.end10 ], [ %.sink.us, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %.sink131, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %7 = phi i8* [ %4, %if.end10 ], [ %1, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %4, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %BracketDepth.1 = phi i64 [ %dec, %if.end10 ], [ 0, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %BracketDepth.0.ph, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %sub.i = add i64 %.sink76, -1
+ %add.ptr.i = getelementptr inbounds i8, i8* %7, i64 1
+ %8 = ptrtoint i8* %add.ptr.i to i64
+ br label %while.cond.outer
+
+; CHECK-LABEL: @_Z3fn1N4llvm9StringRefE
+; CHECK-GEN-ISEL-TRUE: isel [[SAME:r[0-9]+]], [[SAME]], [[SAME]]
+; CHECK-GEN-ISEL-TRUE: isel [[SAME:r[0-9]+]], {{r[0-9]+}}, [[SAME]]
+; CHECK: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi {{r[0-9]+}}, {{r[0-9]+}}, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+}
+
+
+
+; Function Attrs: noreturn nounwind
+declare void @exit(i32 signext) local_unnamed_addr #1
+; Function Attrs: nounwind readonly
+declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #2
+; Function Attrs: noreturn nounwind
+declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) local_unnamed_addr #1
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind readonly }
+attributes #4 = { noreturn nounwind }
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"clang version 4.0.0 (trunk 286863) (llvm/trunk 286967)"}
+!2 = !{!3, !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/expand-isel-1.mir b/test/CodeGen/PowerPC/expand-isel-1.mir
new file mode 100644
index 000000000000..e666ad47fca0
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-1.mir
@@ -0,0 +1,57 @@
+# This file tests the scenario: ISEL R0, ZERO, R0, CR
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r0 = ISEL %zero, %r0, %cr0gt
+ ; CHECK-LABEL: testExpandISEL
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK-NEXT: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r0 = ADDI %zero, 0
+
+ %x3 = EXTSW_32_64 %r0
+
+...
+
diff --git a/test/CodeGen/PowerPC/expand-isel-2.mir b/test/CodeGen/PowerPC/expand-isel-2.mir
new file mode 100644
index 000000000000..8e9c3a25e60c
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-2.mir
@@ -0,0 +1,57 @@
+# This file tests the scenario: ISEL RX, ZERO, RY, CR (X != 0 && Y != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3, %x4
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r3 = ISEL %zero, %r4, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r3 = ORI %r4, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r3 = ADDI %zero, 0
+
+ %x3 = EXTSW_32_64 %r3
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-3.mir b/test/CodeGen/PowerPC/expand-isel-3.mir
new file mode 100644
index 000000000000..c8987266f476
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-3.mir
@@ -0,0 +1,58 @@
+# This file tests the scenario: ISEL RX, RY, R0, CR (X != 0 && Y != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3, %x4
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r3 = ISEL %r4, %r0, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r3 = ORI %r0, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r3 = ADDI %r4, 0
+
+ %x3 = EXTSW_32_64 %r3
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-4.mir b/test/CodeGen/PowerPC/expand-isel-4.mir
new file mode 100644
index 000000000000..83624f7c1e34
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-4.mir
@@ -0,0 +1,59 @@
+# This file tests the scenario: ISEL R0, ZERO, RX, CR (X != 0)
+# It also tests redundant liveins (%x7) and killed registers.
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+ - { reg: '%x7' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3, %x7
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r0 = ISEL killed %zero, killed %r5, killed %cr0gt, implicit killed %cr0
+ ; CHECK: BC killed %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r0 = ORI killed %r5, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r0 = ADDI killed %zero, 0
+
+ %x0 = EXTSW_32_64 killed %r0
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-5.mir b/test/CodeGen/PowerPC/expand-isel-5.mir
new file mode 100644
index 000000000000..7a7130f80cf8
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-5.mir
@@ -0,0 +1,54 @@
+# This file tests the scenario: ISEL R0, RX, R0, CR (X != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r0 = ISEL %r5, %r0, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r0 = ADDI %r5, 0
+ %x3 = EXTSW_32_64 %r0
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-6.mir b/test/CodeGen/PowerPC/expand-isel-6.mir
new file mode 100644
index 000000000000..5aed399e677a
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-6.mir
@@ -0,0 +1,57 @@
+# This file tests the scenario when ISEL is the last instruction of the last
+# Basic Block, i.e., the BB cannot fall through to its successor situation.
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r3 = ISEL %zero, %r0, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r3 = ORI %r0, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r3 = ADDI %zero, 0
+
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-7.mir b/test/CodeGen/PowerPC/expand-isel-7.mir
new file mode 100644
index 000000000000..4043a45a2e70
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-7.mir
@@ -0,0 +1,58 @@
+# This file tests the scenario: ISEL RX, RY, RZ, CR (X != 0 && Y != 0, Z != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+ - { reg: '%x5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4, %x5
+
+ %r4 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r5 = ISEL %r3, %r4, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r5 = ORI %r4, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r5 = ADDI %r3, 0
+
+ %x5 = EXTSW_32_64 %r5
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-8.mir b/test/CodeGen/PowerPC/expand-isel-8.mir
new file mode 100644
index 000000000000..c8b857e69791
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-8.mir
@@ -0,0 +1,65 @@
+# This file tests combining three consecutive ISELs scenario.
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+ - { reg: '%x5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4, %x5
+
+ %r4 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r5 = ISEL %r3, %r4, %cr0gt
+ %r3 = ISEL %r4, %r5, %cr0gt
+ %r4 = ISEL %r3, %r5, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r5 = ORI %r4, 0
+ ; CHECK: %r3 = ORI %r5, 0
+ ; CHECK: %r4 = ORI %r5, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r5 = ADDI %r3, 0
+ ; CHECK: %r3 = ADDI %r4, 0
+ ; CHECK: %r4 = ADDI %r3, 0
+
+ %x5 = EXTSW_32_64 %r5
+ %x3 = EXTSW_32_64 %r3
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel.ll b/test/CodeGen/PowerPC/expand-isel.ll
new file mode 100644
index 000000000000..553cc3c372e5
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel.ll
@@ -0,0 +1,227 @@
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck %s --implicit-check-not isel
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToIfElse(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToIfElse
+; CHECK: addi r5, r3, 1
+; CHECK-NEXT: cmpwi cr0, r3, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToIf(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %j, i32 %i
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToIf
+; CHECK: cmpwi r3, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: blr
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r4, 0
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToElse(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %i, i32 %j
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToElse
+; CHECK: cmpwi r3, 0
+; CHECK-NEXT: bclr 12, 1, 0
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testReplaceISELWithCopy(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %j, i32 %j
+ ret i32 %cond
+
+; CHECK-LABEL: @testReplaceISELWithCopy
+
+; Fix me should really check: addi r3, r4, 0
+; but for some reason it's optimized to mr r3, r4
+; CHECK: mr r3, r4
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToNull(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %i, i32 %i
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToNull
+; CHECK-NOT: b {{.LBB[0-9]+}}
+; CHECK-NOT: bc
+; CHECK: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo2ORIs2ADDIs
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %g, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add = add nsw i32 %a.b, %d.f
+ ret i32 %add
+
+; CHECK-LABEL: @testExpandISELsTo2ORIs2ADDIs
+; CHECK: cmpwi r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: ori r12, r6, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r7, 0
+; CHECK-NEXT: addi r12, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r3, r3, r12
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo2ORIs1ADDI
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %a, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add = add nsw i32 %a.b, %d.f
+ ret i32 %add
+
+; CHECK-LABEL: @testExpandISELsTo2ORIs1ADDI
+; CHECK: cmpwi cr0, r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: ori r12, r6, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r12, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r3, r3, r12
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo1ORI1ADDI
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %a, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add1 = add nsw i32 %a.b, %d.f
+ %add2 = add nsw i32 %a, %add1
+ ret i32 %add2
+
+; CHECK-LABEL: @testExpandISELsTo1ORI1ADDI
+; CHECK: cmpwi cr0, r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r5, r6, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r4, r3, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r4, r4, r5
+; CHECK-NEXT: add r3, r3, r4
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo0ORI2ADDIs
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %a, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add1 = add nsw i32 %a.b, %d.f
+ %add2 = add nsw i32 %a, %add1
+ %sub1 = sub nsw i32 %add2, %d
+ ret i32 %sub1
+
+; CHECK-LABEL: @testExpandISELsTo0ORI2ADDIs
+; CHECK: cmpwi cr0, r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r4, r3, 0
+; CHECK-NEXT: addi r6, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r4, r4, r6
+; CHECK-NEXT: add r3, r3, r4
+; CHECK-NEXT: subf r3, r5, r3
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+
+@b = common local_unnamed_addr global i32 0, align 4
+@a = common local_unnamed_addr global i32 0, align 4
+; Function Attrs: norecurse nounwind readonly
+define signext i32 @testComplexISEL() #0 {
+entry:
+ %0 = load i32, i32* @b, align 4, !tbaa !1
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %if.end, label %cleanup
+
+if.end:
+ %1 = load i32, i32* @a, align 4, !tbaa !1
+ %conv = sext i32 %1 to i64
+ %2 = inttoptr i64 %conv to i32 (...)*
+ %cmp = icmp eq i32 (...)* %2, bitcast (i32 ()* @testComplexISEL to i32 (...)*)
+ %conv3 = zext i1 %cmp to i32
+ br label %cleanup
+
+cleanup:
+ %retval.0 = phi i32 [ %conv3, %if.end ], [ 1, %entry ]
+ ret i32 %retval.0
+
+; CHECK-LABEL: @testComplexISEL
+; CHECK: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r12, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+}
+
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/fast-isel-load-store.ll b/test/CodeGen/PowerPC/fast-isel-load-store.ll
index 1990f6b51d55..5317829c6ce9 100644
--- a/test/CodeGen/PowerPC/fast-isel-load-store.ll
+++ b/test/CodeGen/PowerPC/fast-isel-load-store.ll
@@ -196,7 +196,7 @@ define void @t17(i64 %v) nounwind {
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: addis
-; ELF64: ld
+; ELF64: addi
; ELF64: addi
; ELF64: lis
; ELF64: ori
diff --git a/test/CodeGen/PowerPC/fma-aggr-FMF.ll b/test/CodeGen/PowerPC/fma-aggr-FMF.ll
new file mode 100644
index 000000000000..8e97115bd1f2
--- /dev/null
+++ b/test/CodeGen/PowerPC/fma-aggr-FMF.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc64le-linux-gnu | FileCheck %s
+
+define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) {
+; CHECK-LABEL: can_fma_with_fewer_uses:
+; CHECK: # BB#0:
+; CHECK-NEXT: xsmulsp 0, 1, 2
+; CHECK-NEXT: fmr 1, 0
+; CHECK-NEXT: xsmaddasp 1, 3, 4
+; CHECK-NEXT: xsdivsp 1, 0, 1
+; CHECK-NEXT: blr
+ %mul1 = fmul contract float %f1, %f2
+ %mul2 = fmul contract float %f3, %f4
+ %add = fadd contract float %mul1, %mul2
+ %second_use_of_mul1 = fdiv float %mul1, %add
+ ret float %second_use_of_mul1
+}
+
+; There is no contract on the mul with no extra use so we can't fuse that.
+; Since we are fusing with the mul with an extra use, the fmul needs to stick
+; around beside the fma.
+define float @no_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) {
+; CHECK-LABEL: no_fma_with_fewer_uses:
+; CHECK: # BB#0:
+; CHECK-NEXT: xsmulsp 0, 3, 4
+; CHECK-NEXT: xsmulsp 13, 1, 2
+; CHECK-NEXT: xsmaddasp 0, 1, 2
+; CHECK-NEXT: xsdivsp 1, 13, 0
+; CHECK-NEXT: blr
+ %mul1 = fmul contract float %f1, %f2
+ %mul2 = fmul float %f3, %f4
+ %add = fadd contract float %mul1, %mul2
+ %second_use_of_mul1 = fdiv float %mul1, %add
+ ret float %second_use_of_mul1
+}
diff --git a/test/CodeGen/PowerPC/fold-zero.ll b/test/CodeGen/PowerPC/fold-zero.ll
index 5e620ece0a99..180d8e1b9f55 100644
--- a/test/CodeGen/PowerPC/fold-zero.ll
+++ b/test/CodeGen/PowerPC/fold-zero.ll
@@ -1,5 +1,6 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-crbits | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck -check-prefix=CHECK-CRB %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck --check-prefix=CHECK-CRB %s
+; RUN: llc -verify-machineinstrs -ppc-gen-isel=false < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -7,19 +8,33 @@ define i32 @test1(i1 %a, i32 %c) nounwind {
%x = select i1 %a, i32 %c, i32 0
ret i32 %x
-; CHECK: @test1
+; CHECK-LABEL: @test1
; CHECK-NOT: li {{[0-9]+}}, 0
; CHECK: isel 3, 0,
; CHECK: blr
+; CHECK-NO-ISEL-LABEL: @test1
+; CHECK-NO-ISEL: li 3, 0
+; CHECK-NO-ISEL-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 4, 0
+; CHECK-NO-ISEL-NEXT: blr
}
define i32 @test2(i1 %a, i32 %c) nounwind {
%x = select i1 %a, i32 0, i32 %c
ret i32 %x
-; CHECK-CRB: @test2
+; CHECK-CRB-LABEL: @test2
; CHECK-CRB-NOT: li {{[0-9]+}}, 0
; CHECK-CRB: isel 3, 0,
; CHECK-CRB: blr
+; CHECK-NO-ISEL-LABEL: @test2
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
}
diff --git a/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll b/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
index 9b8fd4095793..955b1f27ca26 100644
--- a/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
+++ b/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
@@ -323,7 +323,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z7testllff
; CHECK: xscvdpsxds [[CONVREG13:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG13]]
+; CHECK: mffprd 3, [[CONVREG13]]
}
; Function Attrs: nounwind
@@ -349,7 +349,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z7testlldd
; CHECK: xscvdpsxds [[CONVREG14:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG14]]
+; CHECK: mffprd 3, [[CONVREG14]]
}
; Function Attrs: nounwind
@@ -375,7 +375,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z8testullff
; CHECK: xscvdpuxds [[CONVREG15:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG15]]
+; CHECK: mffprd 3, [[CONVREG15]]
}
; Function Attrs: nounwind
@@ -401,7 +401,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z8testulldd
; CHECK: xscvdpuxds [[CONVREG16:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG16]]
+; CHECK: mffprd 3, [[CONVREG16]]
}
; Function Attrs: nounwind
diff --git a/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll b/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll
index 2e537cd8a560..cd4eac42f26c 100644
--- a/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll
+++ b/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll
@@ -11,21 +11,17 @@ entry:
; PPC64-DAG: stxsdx 1, 0, [[ADDR_LO:[0-9]+]]
; PPC64-DAG: addi [[ADDR_HI]], [[SP:[0-9]+]], [[OFFSET_HI:-?[0-9]+]]
; PPC64-DAG: addi [[ADDR_LO]], [[SP]], [[OFFSET_LO:-?[0-9]+]]
-; PPC64-DAG: li [[MASK_REG:[0-9]+]], 1
-; PPC64: sldi [[MASK_REG]], [[MASK_REG]], 63
; PPC64-DAG: ld [[HI:[0-9]+]], [[OFFSET_LO]]([[SP]])
; PPC64-DAG: ld [[LO:[0-9]+]], [[OFFSET_HI]]([[SP]])
-; PPC64: and [[FLIP_BIT:[0-9]+]], [[HI]], [[MASK_REG]]
+; PPC64-DAG: rldicr [[FLIP_BIT:[0-9]+]], [[HI]], 0, 0
; PPC64-DAG: xor 3, [[HI]], [[FLIP_BIT]]
; PPC64-DAG: xor 4, [[LO]], [[FLIP_BIT]]
; PPC64: blr
; PPC64-P8-LABEL: test_abs:
-; PPC64-P8-DAG: mfvsrd [[LO:[0-9]+]], 2
-; PPC64-P8-DAG: mfvsrd [[HI:[0-9]+]], 1
-; PPC64-P8-DAG: li [[MASK_REG:[0-9]+]], 1
-; PPC64-P8-DAG: sldi [[SHIFT_REG:[0-9]+]], [[MASK_REG]], 63
-; PPC64-P8: and [[FLIP_BIT:[0-9]+]], [[HI]], [[SHIFT_REG]]
+; PPC64-P8-DAG: mffprd [[LO:[0-9]+]], 2
+; PPC64-P8-DAG: mffprd [[HI:[0-9]+]], 1
+; PPC64-P8-DAG: rldicr [[FLIP_BIT:[0-9]+]], [[HI]], 0, 0
; PPC64-P8-DAG: xor 3, [[HI]], [[FLIP_BIT]]
; PPC64-P8-DAG: xor 4, [[LO]], [[FLIP_BIT]]
; PPC64-P8: blr
@@ -63,10 +59,10 @@ entry:
; PPC64: blr
; PPC64-P8-LABEL: test_neg:
-; PPC64-P8-DAG: mfvsrd [[LO:[0-9]+]], 2
-; PPC64-P8-DAG: mfvsrd [[HI:[0-9]+]], 1
+; PPC64-P8-DAG: mffprd [[LO:[0-9]+]], 2
+; PPC64-P8-DAG: mffprd [[HI:[0-9]+]], 1
; PPC64-P8-DAG: li [[IMM1:[0-9]+]], 1
-; PPC64-P8-DAG: sldi [[FLIP_BIT]], [[IMM1]], 63
+; PPC64-P8-DAG: sldi [[FLIP_BIT:[0-9]+]], [[IMM1]], 63
; PPC64-P8-NOT: BARRIER
; PPC64-P8-DAG: xor 3, [[HI]], [[FLIP_BIT]]
; PPC64-P8-DAG: xor 4, [[LO]], [[FLIP_BIT]]
@@ -93,29 +89,25 @@ entry:
; PPC64-LABEL: test_copysign:
; PPC64-DAG: stxsdx 1, 0, [[ADDR_REG:[0-9]+]]
; PPC64-DAG: addi [[ADDR_REG]], 1, [[OFFSET:-?[0-9]+]]
-; PPC64-DAG: li [[SIGN:[0-9]+]], 1
-; PPC64-DAG: sldi [[SIGN]], [[SIGN]], 63
; PPC64-DAG: li [[HI_TMP:[0-9]+]], 16399
; PPC64-DAG: sldi [[CST_HI:[0-9]+]], [[HI_TMP]], 48
; PPC64-DAG: li [[LO_TMP:[0-9]+]], 3019
; PPC64-DAG: sldi [[CST_LO:[0-9]+]], [[LO_TMP]], 52
; PPC64-NOT: BARRIER
; PPC64-DAG: ld [[X_HI:[0-9]+]], [[OFFSET]](1)
-; PPC64-DAG: and [[NEW_HI_TMP:[0-9]+]], [[X_HI]], [[SIGN]]
+; PPC64-DAG: rldicr [[NEW_HI_TMP:[0-9]+]], [[X_HI]], 0, 0
; PPC64-DAG: or 3, [[NEW_HI_TMP]], [[CST_HI]]
-; PPC64-DAG: xor 4, [[SIGN]], [[CST_LO]]
+; PPC64-DAG: xor 4, [[NEW_HI_TMP]], [[CST_LO]]
; PPC64: blr
; PPC64-P8-LABEL: test_copysign:
-; PPC64-P8-DAG: mfvsrd [[X_HI:[0-9]+]], 1
-; PPC64-P8-DAG: li [[SIGN:[0-9]+]], 1
-; PPC64-P8-DAG: sldi [[SIGN]], [[SIGN]], 63
+; PPC64-P8-DAG: mffprd [[X_HI:[0-9]+]], 1
; PPC64-P8-DAG: li [[HI_TMP:[0-9]+]], 16399
; PPC64-P8-DAG: sldi [[CST_HI:[0-9]+]], [[HI_TMP]], 48
; PPC64-P8-DAG: li [[LO_TMP:[0-9]+]], 3019
; PPC64-P8-DAG: sldi [[CST_LO:[0-9]+]], [[LO_TMP]], 52
; PPC64-P8-NOT: BARRIER
-; PPC64-P8-DAG: and [[NEW_HI_TMP:[0-9]+]], [[X_HI]], [[SIGN]]
+; PPC64-P8-DAG: rldicr [[NEW_HI_TMP:[0-9]+]], [[X_HI]], 0, 0
; PPC64-P8-DAG: or 3, [[NEW_HI_TMP]], [[CST_HI]]
; PPC64-P8-DAG: xor 4, [[NEW_HI_TMP]], [[CST_LO]]
; PPC64-P8: blr
@@ -128,7 +120,7 @@ entry:
; PPC32-DAG: oris {{[0-9]+}}, [[FLIP_BIT]], 16399
; PPC32-DAG: xoris {{[0-9]+}}, [[FLIP_BIT]], 48304
; PPC32: blr
- %0 = tail call ppc_fp128 @llvm.copysign.ppcf128(ppc_fp128 0xMBCB0000000000000400F000000000000, ppc_fp128 %x)
+ %0 = tail call ppc_fp128 @llvm.copysign.ppcf128(ppc_fp128 0xM400F000000000000BCB0000000000000, ppc_fp128 %x)
%1 = bitcast ppc_fp128 %0 to i128
ret i128 %1
}
diff --git a/test/CodeGen/PowerPC/i1-ext-fold.ll b/test/CodeGen/PowerPC/i1-ext-fold.ll
index 9a71b7baa66b..877da486bcd0 100644
--- a/test/CodeGen/PowerPC/i1-ext-fold.ll
+++ b/test/CodeGen/PowerPC/i1-ext-fold.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -11,11 +12,19 @@ entry:
ret i32 %shl
; CHECK-LABEL: @foo
+; CHECK-NO-ISEL-LABEL: @foo
; CHECK-DAG: cmpw
; CHECK-DAG: li [[REG1:[0-9]+]], 0
; CHECK-DAG: li [[REG2:[0-9]+]], 16
; CHECK: isel 3, [[REG2]], [[REG1]],
; CHECK: blr
+
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 12, 0
+; CHECK-NO-ISEL-NEXT: blr
}
; Function Attrs: nounwind readnone
@@ -28,11 +37,19 @@ entry:
ret i32 %add1
; CHECK-LABEL: @foo2
+; CHECK-NO-ISEL-LABEL: @foo2
; CHECK-DAG: cmpw
; CHECK-DAG: li [[REG1:[0-9]+]], 5
; CHECK-DAG: li [[REG2:[0-9]+]], 21
; CHECK: isel 3, [[REG2]], [[REG1]],
; CHECK: blr
+
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 12, 0
+; CHECK-NO-ISEL-NEXT: blr
}
; Function Attrs: nounwind readnone
@@ -44,10 +61,18 @@ entry:
ret i32 %shl
; CHECK-LABEL: @foo3
+; CHECK-NO-ISEL-LABEL: @foo3
; CHECK-DAG: cmpw
; CHECK-DAG: li [[REG1:[0-9]+]], 16
; CHECK: isel 3, 0, [[REG1]],
; CHECK: blr
+
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
}
attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/PowerPC/i1-to-double.ll b/test/CodeGen/PowerPC/i1-to-double.ll
index 4b13388ff460..7871ac7ae05b 100644
--- a/test/CodeGen/PowerPC/i1-to-double.ll
+++ b/test/CodeGen/PowerPC/i1-to-double.ll
@@ -7,15 +7,13 @@ define double @test(i1 %X) {
; CHECK-LABEL: @test
; CHECK: andi. {{[0-9]+}}, 3, 1
-; CHECK: bc 12, 1,
-
-; CHECK: li 3, .LCP[[L1:[A-Z0-9_]+]]@l
-; CHECK: addis 3, 3, .LCP[[L1]]@ha
-; CHECK: lfs 1, 0(3)
-; CHECK: blr
-
-; CHECK: li 3, .LCP[[L2:[A-Z0-9_]+]]@l
-; CHECK: addis 3, 3, .LCP[[L2]]@ha
-; CHECK: lfs 1, 0(3)
-; CHECK: blr
-
+; CHECK-NEXT: addis 4, 4, .LCPI
+; CHECK-NEXT: addis 5, 5, .LCPI
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori 3, 4, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi 3, 5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: lfs 1, 0(3)
+; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/i64_fp_round.ll b/test/CodeGen/PowerPC/i64_fp_round.ll
index 1e95dfdec71a..9fe7a3bfcbb7 100644
--- a/test/CodeGen/PowerPC/i64_fp_round.ll
+++ b/test/CodeGen/PowerPC/i64_fp_round.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt -ppc-gen-isel=false < %s | FileCheck %s --check-prefix=CHECK-NO-ISEL
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,10 +13,20 @@ entry:
; Note that only parts of the sequence are checked for here, to allow
; for minor code generation differences.
+;CHECK-LABEL: test
+;CHECK-NO-ISEL-LABEL: test
; CHECK: sradi [[REG1:[0-9]+]], 3, 53
; CHECK: addi [[REG2:[0-9]+]], [[REG1]], 1
; CHECK: cmpldi [[REG2]], 1
; CHECK: isel [[REG3:[0-9]+]], {{[0-9]+}}, 3, 1
+; CHECK-NO-ISEL: rldicr [[REG2:[0-9]+]], {{[0-9]+}}, 0, 52
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori [[REG3:[0-9]+]], 3, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi [[REG3]], [[REG2]], 0
+; CHECK-NO-ISEL-NEXT: [[SUCCESSOR]]
+; CHECK-NO-ISEL: std [[REG3]], -{{[0-9]+}}(1)
; CHECK: std [[REG3]], -{{[0-9]+}}(1)
diff --git a/test/CodeGen/PowerPC/ifcvt.ll b/test/CodeGen/PowerPC/ifcvt.ll
index 9c966c95b72d..b9b594a68f12 100644
--- a/test/CodeGen/PowerPC/ifcvt.ll
+++ b/test/CodeGen/PowerPC/ifcvt.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs -ppc-gen-isel=false | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -18,10 +19,18 @@ cond.false: ; preds = %sw.epilog
%add37 = add nsw i32 %conv29, %a
br label %cond.end
-; CHECK: @test
+; CHECK-LABEL: @test
+; CHECK-NO-ISEL-LABEL: @test
; CHECK: add [[REG:[0-9]+]],
; CHECK: subf [[REG2:[0-9]+]],
; CHECK: isel {{[0-9]+}}, [[REG]], [[REG2]],
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 5, 6, 0
+; CHECK-NO-ISEL: extsh 5, 5
+; CHECK-NO-ISEL-NEXT: add 3, 3, 5
+; CHECK-NO-ISEL-NEXT: blr
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %sub34, %cond.true ], [ %add37, %cond.false ]
diff --git a/test/CodeGen/PowerPC/indirectbr.ll b/test/CodeGen/PowerPC/indirectbr.ll
index d1e03ca7773a..c040d7859a8b 100644
--- a/test/CodeGen/PowerPC/indirectbr.ll
+++ b/test/CodeGen/PowerPC/indirectbr.ll
@@ -17,23 +17,35 @@ entry:
bb2: ; preds = %entry, %bb3
%gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
; PIC: mtctr
-; PIC-NEXT: li
-; PIC-NEXT: li
-; PIC-NEXT: li
-; PIC-NEXT: li
; PIC-NEXT: bctr
+; PIC: li
+; PIC: b LBB
+; PIC: li
+; PIC: b LBB
+; PIC: li
+; PIC: b LBB
+; PIC: li
+; PIC: b LBB
; STATIC: mtctr
-; STATIC-NEXT: li
-; STATIC-NEXT: li
-; STATIC-NEXT: li
-; STATIC-NEXT: li
; STATIC-NEXT: bctr
+; STATIC: li
+; STATIC: b LBB
+; STATIC: li
+; STATIC: b LBB
+; STATIC: li
+; STATIC: b LBB
+; STATIC: li
+; STATIC: b LBB
; PPC64: mtctr
-; PPC64-NEXT: li
-; PPC64-NEXT: li
-; PPC64-NEXT: li
-; PPC64-NEXT: li
; PPC64-NEXT: bctr
+; PPC64: li
+; PPC64: b LBB
+; PPC64: li
+; PPC64: b LBB
+; PPC64: li
+; PPC64: b LBB
+; PPC64: li
+; PPC64: b LBB
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
diff --git a/test/CodeGen/PowerPC/isel.ll b/test/CodeGen/PowerPC/isel.ll
index 1dc55fcc40b2..c1cceb967018 100644
--- a/test/CodeGen/PowerPC/isel.ll
+++ b/test/CodeGen/PowerPC/isel.ll
@@ -2,14 +2,22 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "powerpc64-unknown-linux-gnu"
; RUN: llc -verify-machineinstrs -mcpu=a2 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
define i64 @test1(i64 %a, i64 %b, i64 %c, i64 %d) {
entry:
%p = icmp uge i64 %a, %b
%x = select i1 %p, i64 %c, i64 %d
ret i64 %x
-; CHECK: @test1
+; CHECK-LABEL: @test1
+; CHECK-NO-ISEL-LABEL: @test1
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 6, 0
+; CHECK-NO-ISEL-NEXT: blr
}
define i32 @test2(i32 %a, i32 %b, i32 %c, i32 %d) {
@@ -17,7 +25,14 @@ entry:
%p = icmp uge i32 %a, %b
%x = select i1 %p, i32 %c, i32 %d
ret i32 %x
-; CHECK: @test2
+; CHECK-LABEL: @test2
+; CHECK-NO-ISEL-LABEL: @test2
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 6, 0
+; CHECK-NO-ISEL-NEXT: blr
}
diff --git a/test/CodeGen/PowerPC/jaggedstructs.ll b/test/CodeGen/PowerPC/jaggedstructs.ll
index b28b34d7814f..6128316f45fa 100644
--- a/test/CodeGen/PowerPC/jaggedstructs.ll
+++ b/test/CodeGen/PowerPC/jaggedstructs.ll
@@ -18,31 +18,31 @@ entry:
ret void
}
-; CHECK: std 6, 184(1)
-; CHECK: std 5, 176(1)
-; CHECK: std 4, 168(1)
-; CHECK: std 3, 160(1)
-; CHECK: lbz {{[0-9]+}}, 167(1)
-; CHECK: lhz {{[0-9]+}}, 165(1)
-; CHECK: stb {{[0-9]+}}, 55(1)
-; CHECK: sth {{[0-9]+}}, 53(1)
-; CHECK: lbz {{[0-9]+}}, 175(1)
-; CHECK: lwz {{[0-9]+}}, 171(1)
-; CHECK: stb {{[0-9]+}}, 63(1)
-; CHECK: stw {{[0-9]+}}, 59(1)
-; CHECK: lhz {{[0-9]+}}, 182(1)
-; CHECK: lwz {{[0-9]+}}, 178(1)
-; CHECK: sth {{[0-9]+}}, 70(1)
-; CHECK: stw {{[0-9]+}}, 66(1)
-; CHECK: lbz {{[0-9]+}}, 191(1)
-; CHECK: lhz {{[0-9]+}}, 189(1)
-; CHECK: lwz {{[0-9]+}}, 185(1)
-; CHECK: stb {{[0-9]+}}, 79(1)
-; CHECK: sth {{[0-9]+}}, 77(1)
-; CHECK: stw {{[0-9]+}}, 73(1)
-; CHECK: ld 6, 72(1)
-; CHECK: ld 5, 64(1)
-; CHECK: ld 4, 56(1)
-; CHECK: ld 3, 48(1)
+; CHECK-DAG: std 3, 160(1)
+; CHECK-DAG: std 6, 184(1)
+; CHECK-DAG: std 5, 176(1)
+; CHECK-DAG: std 4, 168(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 167(1)
+; CHECK-DAG: lhz {{[0-9]+}}, 165(1)
+; CHECK-DAG: stb {{[0-9]+}}, 55(1)
+; CHECK-DAG-DAG: sth {{[0-9]+}}, 53(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 175(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 171(1)
+; CHECK-DAG: stb {{[0-9]+}}, 63(1)
+; CHECK-DAG: stw {{[0-9]+}}, 59(1)
+; CHECK-DAG: lhz {{[0-9]+}}, 182(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 178(1)
+; CHECK-DAG: sth {{[0-9]+}}, 70(1)
+; CHECK-DAG: stw {{[0-9]+}}, 66(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 191(1)
+; CHECK-DAG: lhz {{[0-9]+}}, 189(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 185(1)
+; CHECK-DAG: stb {{[0-9]+}}, 79(1)
+; CHECK-DAG: sth {{[0-9]+}}, 77(1)
+; CHECK-DAG: stw {{[0-9]+}}, 73(1)
+; CHECK-DAG: ld 6, 72(1)
+; CHECK-DAG: ld 5, 64(1)
+; CHECK-DAG: ld 4, 56(1)
+; CHECK-DAG: ld 3, 48(1)
declare void @check(%struct.S3* byval, %struct.S5* byval, %struct.S6* byval, %struct.S7* byval)
diff --git a/test/CodeGen/PowerPC/lsa.ll b/test/CodeGen/PowerPC/lsa.ll
index dc74b9dbca22..d0ebd473133c 100644
--- a/test/CodeGen/PowerPC/lsa.ll
+++ b/test/CodeGen/PowerPC/lsa.ll
@@ -8,11 +8,11 @@ entry:
%w = alloca [8200 x i32], align 4
%q = alloca [8200 x i32], align 4
%0 = bitcast [8200 x i32]* %v to i8*
- call void @llvm.lifetime.start(i64 32800, i8* %0) #0
+ call void @llvm.lifetime.start.p0i8(i64 32800, i8* %0) #0
%1 = bitcast [8200 x i32]* %w to i8*
- call void @llvm.lifetime.start(i64 32800, i8* %1) #0
+ call void @llvm.lifetime.start.p0i8(i64 32800, i8* %1) #0
%2 = bitcast [8200 x i32]* %q to i8*
- call void @llvm.lifetime.start(i64 32800, i8* %2) #0
+ call void @llvm.lifetime.start.p0i8(i64 32800, i8* %2) #0
%arraydecay = getelementptr inbounds [8200 x i32], [8200 x i32]* %q, i64 0, i64 0
%arraydecay1 = getelementptr inbounds [8200 x i32], [8200 x i32]* %v, i64 0, i64 0
%arraydecay2 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 0
@@ -28,16 +28,16 @@ entry:
; CHECK: blr
%add = add nsw i32 %4, %3
- call void @llvm.lifetime.end(i64 32800, i8* %2) #0
- call void @llvm.lifetime.end(i64 32800, i8* %1) #0
- call void @llvm.lifetime.end(i64 32800, i8* %0) #0
+ call void @llvm.lifetime.end.p0i8(i64 32800, i8* %2) #0
+ call void @llvm.lifetime.end.p0i8(i64 32800, i8* %1) #0
+ call void @llvm.lifetime.end.p0i8(i64 32800, i8* %0) #0
ret i32 %add
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
declare void @bar(i32*, i32*, i32*)
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/mature-mc-support.ll b/test/CodeGen/PowerPC/mature-mc-support.ll
index aa387f6e2666..543877d60cfa 100644
--- a/test/CodeGen/PowerPC/mature-mc-support.ll
+++ b/test/CodeGen/PowerPC/mature-mc-support.ll
@@ -28,4 +28,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/PowerPC/mcm-obj.ll b/test/CodeGen/PowerPC/mcm-obj.ll
index 6b5b0c2b7425..fa899b5b3016 100644
--- a/test/CodeGen/PowerPC/mcm-obj.ll
+++ b/test/CodeGen/PowerPC/mcm-obj.ll
@@ -108,11 +108,10 @@ entry:
ret i32 %0
}
-; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
-; accessing tentatively declared variable ti.
+; Verify generation of relocations foraccessing variable ti.
;
; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM6]]
;
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
diff --git a/test/CodeGen/PowerPC/misched-inorder-latency.ll b/test/CodeGen/PowerPC/misched-inorder-latency.ll
index ded3111da977..26663d81f357 100644
--- a/test/CodeGen/PowerPC/misched-inorder-latency.ll
+++ b/test/CodeGen/PowerPC/misched-inorder-latency.ll
@@ -17,7 +17,7 @@ entry:
%sum1 = add i32 %sumin, 1
%val1 = load i32, i32* %ptr
%p = icmp eq i32 %sumin, 0
- br i1 %p, label %true, label %end
+ br i1 %p, label %true, label %end, !prof !1
true:
%sum2 = add i32 %sum1, 1
%ptr2 = getelementptr i32, i32* %ptr, i32 1
@@ -53,3 +53,5 @@ end:
ret i32 %valmerge
}
declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
+
+!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/PowerPC/optcmp.ll b/test/CodeGen/PowerPC/optcmp.ll
index 5e8ca5a6a678..a1921452d620 100644
--- a/test/CodeGen/PowerPC/optcmp.ll
+++ b/test/CodeGen/PowerPC/optcmp.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -mattr=-crbits -disable-ppc-cmp-opt=0 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -mattr=-crbits -disable-ppc-cmp-opt=0 -ppc-gen-isel=false | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -34,9 +35,14 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @fool
+; CHECK-LABEL: @fool
+; CHECK-NO-ISEL-LABEL: @fool
; CHECK: subf. [[REG:[0-9]+]], 4, 3
; CHECK: isel 3, 3, 4, 1
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
+
; CHECK: std [[REG]], 0(5)
}
@@ -48,9 +54,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foolb
+; CHECK-LABEL: @foolb
+; CHECK-NO-ISEL-LABEL: @foolb
; CHECK: subf. [[REG:[0-9]+]], 4, 3
; CHECK: isel 3, 4, 3, 1
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: b .LBB
+; CHECK-NO-ISEL addi: 3, 4, 0
; CHECK: std [[REG]], 0(5)
}
@@ -62,9 +72,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foolc
+; CHECK-LABEL: @foolc
+; CHECK-NO-ISEL-LABEL: @foolc
; CHECK: subf. [[REG:[0-9]+]], 3, 4
; CHECK: isel 3, 3, 4, 0
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
; CHECK: std [[REG]], 0(5)
}
@@ -76,9 +90,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foold
+; CHECK-LABEL: @foold
+; CHECK-NO-ISEL-LABEL: @foold
; CHECK: subf. [[REG:[0-9]+]], 3, 4
; CHECK: isel 3, 3, 4, 1
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
; CHECK: std [[REG]], 0(5)
}
@@ -90,9 +108,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foold2
+; CHECK-LABEL: @foold2
+; CHECK-NO-ISEL-LABEL: @foold2
; CHECK: subf. [[REG:[0-9]+]], 4, 3
; CHECK: isel 3, 3, 4, 0
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
; CHECK: std [[REG]], 0(5)
}
diff --git a/test/CodeGen/PowerPC/p8-isel-sched.ll b/test/CodeGen/PowerPC/p8-isel-sched.ll
index 6fa5616dd42a..b45a123f0276 100644
--- a/test/CodeGen/PowerPC/p8-isel-sched.ll
+++ b/test/CodeGen/PowerPC/p8-isel-sched.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -24,10 +25,20 @@ entry:
; Make sure that we don't schedule all of the isels together, they should be
; intermixed with the adds because each isel starts a new dispatch group.
; CHECK-LABEL: @foo
+; CHECK-NO-ISEL-LABEL: @foo
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 7, 12, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 7, 11, 0
; CHECK: addi
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 10, 11, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 10, 12, 0
; CHECK: blr
attributes #0 = { nounwind }
-
diff --git a/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll b/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
index 1f317992a3b7..f399b2584d0b 100644
--- a/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
+++ b/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
@@ -7,13 +7,10 @@
@d = common global double 0.000000e+00, align 8
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <16 x i8> @buildc(i8 zeroext %a) {
entry:
- %a.addr = alloca i8, align 1
- store i8 %a, i8* %a.addr, align 1
- %0 = load i8, i8* %a.addr, align 1
- %splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
+ %splat.splatinsert = insertelement <16 x i8> undef, i8 %a, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
; CHECK: sldi [[REG1:[0-9]+]], 3, 56
@@ -22,13 +19,10 @@ entry:
; CHECK-LE: xxswapd {{[0-9]+}}, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <8 x i16> @builds(i16 zeroext %a) {
entry:
- %a.addr = alloca i16, align 2
- store i16 %a, i16* %a.addr, align 2
- %0 = load i16, i16* %a.addr, align 2
- %splat.splatinsert = insertelement <8 x i16> undef, i16 %0, i32 0
+ %splat.splatinsert = insertelement <8 x i16> undef, i16 %a, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
; CHECK: sldi [[REG1:[0-9]+]], 3, 48
@@ -37,13 +31,10 @@ entry:
; CHECK-LE: xxswapd {{[0-9]+}}, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <4 x i32> @buildi(i32 zeroext %a) {
entry:
- %a.addr = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- %0 = load i32, i32* %a.addr, align 4
- %splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
+ %splat.splatinsert = insertelement <4 x i32> undef, i32 %a, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %splat.splat
; CHECK: mtvsrwz [[REG1:[0-9]+]], 3
@@ -52,13 +43,10 @@ entry:
; CHECK-LE: xxspltw 34, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <2 x i64> @buildl(i64 %a) {
entry:
- %a.addr = alloca i64, align 8
- store i64 %a, i64* %a.addr, align 8
- %0 = load i64, i64* %a.addr, align 8
- %splat.splatinsert = insertelement <2 x i64> undef, i64 %0, i32 0
+ %splat.splatinsert = insertelement <2 x i64> undef, i64 %a, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %splat.splat
; CHECK: mtvsrd {{[0-9]+}}, 3
@@ -66,13 +54,10 @@ entry:
; CHECK-LE: xxspltd 34, [[REG1]], 0
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <4 x float> @buildf(float %a) {
entry:
- %a.addr = alloca float, align 4
- store float %a, float* %a.addr, align 4
- %0 = load float, float* %a.addr, align 4
- %splat.splatinsert = insertelement <4 x float> undef, float %0, i32 0
+ %splat.splatinsert = insertelement <4 x float> undef, float %a, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %splat.splat
; CHECK: xscvdpspn [[REG1:[0-9]+]], 1
@@ -83,8 +68,8 @@ entry:
; The optimization to remove stack operations from PPCDAGToDAGISel::Select
; should still trigger for v2f64, producing an lxvdsx.
-; Function Attrs: nounwind
-define <2 x double> @buildd() #0 {
+; Function Attrs: norecurse nounwind readonly
+define <2 x double> @buildd() {
entry:
%0 = load double, double* @d, align 8
%splat.splatinsert = insertelement <2 x double> undef, double %0, i32 0
@@ -96,13 +81,10 @@ entry:
; CHECK-LE: lxvdsx 34, 0, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc0(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 0
+ %vecext = extractelement <16 x i8> %vsc, i32 0
ret i8 %vecext
; CHECK-LABEL: @getsc0
; CHECK: mfvsrd 3, 34
@@ -114,13 +96,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc1(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 1
+ %vecext = extractelement <16 x i8> %vsc, i32 1
ret i8 %vecext
; CHECK-LABEL: @getsc1
; CHECK: mfvsrd 3, 34
@@ -132,13 +111,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc2(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 2
+ %vecext = extractelement <16 x i8> %vsc, i32 2
ret i8 %vecext
; CHECK-LABEL: @getsc2
; CHECK: mfvsrd 3, 34
@@ -150,13 +126,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc3(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 3
+ %vecext = extractelement <16 x i8> %vsc, i32 3
ret i8 %vecext
; CHECK-LABEL: @getsc3
; CHECK: mfvsrd 3, 34
@@ -168,13 +141,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc4(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 4
+ %vecext = extractelement <16 x i8> %vsc, i32 4
ret i8 %vecext
; CHECK-LABEL: @getsc4
; CHECK: mfvsrd 3, 34
@@ -186,13 +156,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc5(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 5
+ %vecext = extractelement <16 x i8> %vsc, i32 5
ret i8 %vecext
; CHECK-LABEL: @getsc5
; CHECK: mfvsrd 3, 34
@@ -204,13 +171,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc6(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 6
+ %vecext = extractelement <16 x i8> %vsc, i32 6
ret i8 %vecext
; CHECK-LABEL: @getsc6
; CHECK: mfvsrd 3, 34
@@ -222,13 +186,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc7(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 7
+ %vecext = extractelement <16 x i8> %vsc, i32 7
ret i8 %vecext
; CHECK-LABEL: @getsc7
; CHECK: mfvsrd 3, 34
@@ -240,13 +201,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc8(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 8
+ %vecext = extractelement <16 x i8> %vsc, i32 8
ret i8 %vecext
; CHECK-LABEL: @getsc8
; CHECK: mfvsrd 3,
@@ -258,13 +216,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc9(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 9
+ %vecext = extractelement <16 x i8> %vsc, i32 9
ret i8 %vecext
; CHECK-LABEL: @getsc9
; CHECK: mfvsrd 3,
@@ -276,13 +231,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc10(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 10
+ %vecext = extractelement <16 x i8> %vsc, i32 10
ret i8 %vecext
; CHECK-LABEL: @getsc10
; CHECK: mfvsrd 3,
@@ -294,13 +246,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc11(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 11
+ %vecext = extractelement <16 x i8> %vsc, i32 11
ret i8 %vecext
; CHECK-LABEL: @getsc11
; CHECK: mfvsrd 3,
@@ -312,13 +261,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc12(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 12
+ %vecext = extractelement <16 x i8> %vsc, i32 12
ret i8 %vecext
; CHECK-LABEL: @getsc12
; CHECK: mfvsrd 3,
@@ -330,13 +276,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc13(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 13
+ %vecext = extractelement <16 x i8> %vsc, i32 13
ret i8 %vecext
; CHECK-LABEL: @getsc13
; CHECK: mfvsrd 3,
@@ -348,13 +291,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc14(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 14
+ %vecext = extractelement <16 x i8> %vsc, i32 14
ret i8 %vecext
; CHECK-LABEL: @getsc14
; CHECK: mfvsrd 3,
@@ -366,13 +306,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc15(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 15
+ %vecext = extractelement <16 x i8> %vsc, i32 15
ret i8 %vecext
; CHECK-LABEL: @getsc15
; CHECK: mfvsrd 3,
@@ -383,13 +320,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc0(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 0
+ %vecext = extractelement <16 x i8> %vuc, i32 0
ret i8 %vecext
; CHECK-LABEL: @getuc0
; CHECK: mfvsrd 3, 34
@@ -400,13 +334,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc1(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 1
+ %vecext = extractelement <16 x i8> %vuc, i32 1
ret i8 %vecext
; CHECK-LABEL: @getuc1
; CHECK: mfvsrd 3, 34
@@ -418,13 +349,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc2(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 2
+ %vecext = extractelement <16 x i8> %vuc, i32 2
ret i8 %vecext
; CHECK-LABEL: @getuc2
; CHECK: mfvsrd 3, 34
@@ -436,13 +364,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc3(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 3
+ %vecext = extractelement <16 x i8> %vuc, i32 3
ret i8 %vecext
; CHECK-LABEL: @getuc3
; CHECK: mfvsrd 3, 34
@@ -454,13 +379,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc4(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 4
+ %vecext = extractelement <16 x i8> %vuc, i32 4
ret i8 %vecext
; CHECK-LABEL: @getuc4
; CHECK: mfvsrd 3, 34
@@ -472,13 +394,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc5(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 5
+ %vecext = extractelement <16 x i8> %vuc, i32 5
ret i8 %vecext
; CHECK-LABEL: @getuc5
; CHECK: mfvsrd 3, 34
@@ -490,13 +409,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc6(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 6
+ %vecext = extractelement <16 x i8> %vuc, i32 6
ret i8 %vecext
; CHECK-LABEL: @getuc6
; CHECK: mfvsrd 3, 34
@@ -508,13 +424,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc7(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 7
+ %vecext = extractelement <16 x i8> %vuc, i32 7
ret i8 %vecext
; CHECK-LABEL: @getuc7
; CHECK: mfvsrd 3, 34
@@ -525,13 +438,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc8(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 8
+ %vecext = extractelement <16 x i8> %vuc, i32 8
ret i8 %vecext
; CHECK-LABEL: @getuc8
; CHECK: mfvsrd 3,
@@ -542,13 +452,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc9(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 9
+ %vecext = extractelement <16 x i8> %vuc, i32 9
ret i8 %vecext
; CHECK-LABEL: @getuc9
; CHECK: mfvsrd 3,
@@ -560,13 +467,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc10(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 10
+ %vecext = extractelement <16 x i8> %vuc, i32 10
ret i8 %vecext
; CHECK-LABEL: @getuc10
; CHECK: mfvsrd 3,
@@ -578,13 +482,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc11(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 11
+ %vecext = extractelement <16 x i8> %vuc, i32 11
ret i8 %vecext
; CHECK-LABEL: @getuc11
; CHECK: mfvsrd 3,
@@ -596,13 +497,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc12(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 12
+ %vecext = extractelement <16 x i8> %vuc, i32 12
ret i8 %vecext
; CHECK-LABEL: @getuc12
; CHECK: mfvsrd 3,
@@ -614,13 +512,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc13(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 13
+ %vecext = extractelement <16 x i8> %vuc, i32 13
ret i8 %vecext
; CHECK-LABEL: @getuc13
; CHECK: mfvsrd 3,
@@ -632,13 +527,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc14(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 14
+ %vecext = extractelement <16 x i8> %vuc, i32 14
ret i8 %vecext
; CHECK-LABEL: @getuc14
; CHECK: mfvsrd 3,
@@ -650,13 +542,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc15(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 15
+ %vecext = extractelement <16 x i8> %vuc, i32 15
ret i8 %vecext
; CHECK-LABEL: @getuc15
; CHECK: mfvsrd 3,
@@ -667,16 +556,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getvelsc(<16 x i8> %vsc, i32 signext %i) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- %i.addr = alloca i32, align 4
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <16 x i8> %0, i32 %1
+ %vecext = extractelement <16 x i8> %vsc, i32 %i
ret i8 %vecext
; CHECK-LABEL: @getvelsc
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 8
@@ -701,16 +584,10 @@ entry:
; CHECK-DAG-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getveluc(<16 x i8> %vuc, i32 signext %i) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- %i.addr = alloca i32, align 4
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <16 x i8> %0, i32 %1
+ %vecext = extractelement <16 x i8> %vuc, i32 %i
ret i8 %vecext
; CHECK-LABEL: @getveluc
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 8
@@ -735,13 +612,10 @@ entry:
; CHECK-DAG-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss0(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 0
+ %vecext = extractelement <8 x i16> %vss, i32 0
ret i16 %vecext
; CHECK-LABEL: @getss0
; CHECK: mfvsrd 3, 34
@@ -753,13 +627,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss1(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 1
+ %vecext = extractelement <8 x i16> %vss, i32 1
ret i16 %vecext
; CHECK-LABEL: @getss1
; CHECK: mfvsrd 3, 34
@@ -771,13 +642,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss2(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 2
+ %vecext = extractelement <8 x i16> %vss, i32 2
ret i16 %vecext
; CHECK-LABEL: @getss2
; CHECK: mfvsrd 3, 34
@@ -789,13 +657,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss3(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 3
+ %vecext = extractelement <8 x i16> %vss, i32 3
ret i16 %vecext
; CHECK-LABEL: @getss3
; CHECK: mfvsrd 3, 34
@@ -807,13 +672,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss4(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 4
+ %vecext = extractelement <8 x i16> %vss, i32 4
ret i16 %vecext
; CHECK-LABEL: @getss4
; CHECK: mfvsrd 3,
@@ -825,13 +687,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss5(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 5
+ %vecext = extractelement <8 x i16> %vss, i32 5
ret i16 %vecext
; CHECK-LABEL: @getss5
; CHECK: mfvsrd 3,
@@ -843,13 +702,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss6(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 6
+ %vecext = extractelement <8 x i16> %vss, i32 6
ret i16 %vecext
; CHECK-LABEL: @getss6
; CHECK: mfvsrd 3,
@@ -861,13 +717,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss7(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 7
+ %vecext = extractelement <8 x i16> %vss, i32 7
ret i16 %vecext
; CHECK-LABEL: @getss7
; CHECK: mfvsrd 3,
@@ -878,13 +731,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus0(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 0
+ %vecext = extractelement <8 x i16> %vus, i32 0
ret i16 %vecext
; CHECK-LABEL: @getus0
; CHECK: mfvsrd 3, 34
@@ -895,13 +745,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus1(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 1
+ %vecext = extractelement <8 x i16> %vus, i32 1
ret i16 %vecext
; CHECK-LABEL: @getus1
; CHECK: mfvsrd 3, 34
@@ -913,13 +760,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus2(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 2
+ %vecext = extractelement <8 x i16> %vus, i32 2
ret i16 %vecext
; CHECK-LABEL: @getus2
; CHECK: mfvsrd 3, 34
@@ -931,13 +775,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus3(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 3
+ %vecext = extractelement <8 x i16> %vus, i32 3
ret i16 %vecext
; CHECK-LABEL: @getus3
; CHECK: mfvsrd 3, 34
@@ -948,13 +789,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus4(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 4
+ %vecext = extractelement <8 x i16> %vus, i32 4
ret i16 %vecext
; CHECK-LABEL: @getus4
; CHECK: mfvsrd 3,
@@ -965,13 +803,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus5(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 5
+ %vecext = extractelement <8 x i16> %vus, i32 5
ret i16 %vecext
; CHECK-LABEL: @getus5
; CHECK: mfvsrd 3,
@@ -983,13 +818,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus6(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 6
+ %vecext = extractelement <8 x i16> %vus, i32 6
ret i16 %vecext
; CHECK-LABEL: @getus6
; CHECK: mfvsrd 3,
@@ -1001,13 +833,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus7(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 7
+ %vecext = extractelement <8 x i16> %vus, i32 7
ret i16 %vecext
; CHECK-LABEL: @getus7
; CHECK: mfvsrd 3,
@@ -1018,16 +847,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getvelss(<8 x i16> %vss, i32 signext %i) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- %i.addr = alloca i32, align 4
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <8 x i16> %0, i32 %1
+ %vecext = extractelement <8 x i16> %vss, i32 %i
ret i16 %vecext
; CHECK-LABEL: @getvelss
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 4
@@ -1054,16 +877,10 @@ entry:
; CHECK-DAG-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getvelus(<8 x i16> %vus, i32 signext %i) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- %i.addr = alloca i32, align 4
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <8 x i16> %0, i32 %1
+ %vecext = extractelement <8 x i16> %vus, i32 %i
ret i16 %vecext
; CHECK-LABEL: @getvelus
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 4
@@ -1090,13 +907,10 @@ entry:
; CHECK-DAG-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi0(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 0
+ %vecext = extractelement <4 x i32> %vsi, i32 0
ret i32 %vecext
; CHECK-LABEL: @getsi0
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 3
@@ -1108,13 +922,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi1(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 1
+ %vecext = extractelement <4 x i32> %vsi, i32 1
ret i32 %vecext
; CHECK-LABEL: @getsi1
; CHECK: mfvsrwz 3, 34
@@ -1125,13 +936,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi2(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 2
+ %vecext = extractelement <4 x i32> %vsi, i32 2
ret i32 %vecext
; CHECK-LABEL: @getsi2
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 1
@@ -1142,13 +950,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi3(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 3
+ %vecext = extractelement <4 x i32> %vsi, i32 3
ret i32 %vecext
; CHECK-LABEL: @getsi3
; CHECK: xxswapd [[SHL:[0-9]+]], 34
@@ -1160,13 +965,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui0(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 0
+ %vecext = extractelement <4 x i32> %vui, i32 0
ret i32 %vecext
; CHECK-LABEL: @getui0
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 3
@@ -1178,13 +980,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui1(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 1
+ %vecext = extractelement <4 x i32> %vui, i32 1
ret i32 %vecext
; CHECK-LABEL: @getui1
; CHECK: mfvsrwz 3, 34
@@ -1195,13 +994,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui2(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 2
+ %vecext = extractelement <4 x i32> %vui, i32 2
ret i32 %vecext
; CHECK-LABEL: @getui2
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 1
@@ -1212,13 +1008,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui3(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 3
+ %vecext = extractelement <4 x i32> %vui, i32 3
ret i32 %vecext
; CHECK-LABEL: @getui3
; CHECK: xxswapd [[SHL:[0-9]+]], 34
@@ -1230,45 +1023,30 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getvelsi(<4 x i32> %vsi, i32 signext %i) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- %i.addr = alloca i32, align 4
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <4 x i32> %0, i32 %1
+ %vecext = extractelement <4 x i32> %vsi, i32 %i
ret i32 %vecext
; CHECK-LABEL: @getvelsi
; CHECK-LE-LABEL: @getvelsi
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getvelui(<4 x i32> %vui, i32 signext %i) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- %i.addr = alloca i32, align 4
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <4 x i32> %0, i32 %1
+ %vecext = extractelement <4 x i32> %vui, i32 %i
ret i32 %vecext
; CHECK-LABEL: @getvelui
; CHECK-LE-LABEL: @getvelui
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getsl0(<2 x i64> %vsl) {
entry:
- %vsl.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 0
+ %vecext = extractelement <2 x i64> %vsl, i32 0
ret i64 %vecext
; CHECK-LABEL: @getsl0
; CHECK: mfvsrd 3, 34
@@ -1277,13 +1055,10 @@ entry:
; CHECK-LE: mfvsrd 3, [[SWP]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getsl1(<2 x i64> %vsl) {
entry:
- %vsl.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 1
+ %vecext = extractelement <2 x i64> %vsl, i32 1
ret i64 %vecext
; CHECK-LABEL: @getsl1
; CHECK: xxswapd [[SWP:[0-9]+]], 34
@@ -1292,13 +1067,10 @@ entry:
; CHECK-LE: mfvsrd 3, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getul0(<2 x i64> %vul) {
entry:
- %vul.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vul, <2 x i64>* %vul.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vul.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 0
+ %vecext = extractelement <2 x i64> %vul, i32 0
ret i64 %vecext
; CHECK-LABEL: @getul0
; CHECK: mfvsrd 3, 34
@@ -1307,13 +1079,10 @@ entry:
; CHECK-LE: mfvsrd 3, [[SWP]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getul1(<2 x i64> %vul) {
entry:
- %vul.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vul, <2 x i64>* %vul.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vul.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 1
+ %vecext = extractelement <2 x i64> %vul, i32 1
ret i64 %vecext
; CHECK-LABEL: @getul1
; CHECK: xxswapd [[SWP:[0-9]+]], 34
@@ -1322,45 +1091,30 @@ entry:
; CHECK-LE: mfvsrd 3, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getvelsl(<2 x i64> %vsl, i32 signext %i) {
entry:
- %vsl.addr = alloca <2 x i64>, align 16
- %i.addr = alloca i32, align 4
- store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <2 x i64> %0, i32 %1
+ %vecext = extractelement <2 x i64> %vsl, i32 %i
ret i64 %vecext
; CHECK-LABEL: @getvelsl
; CHECK-LE-LABEL: @getvelsl
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getvelul(<2 x i64> %vul, i32 signext %i) {
entry:
- %vul.addr = alloca <2 x i64>, align 16
- %i.addr = alloca i32, align 4
- store <2 x i64> %vul, <2 x i64>* %vul.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <2 x i64>, <2 x i64>* %vul.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <2 x i64> %0, i32 %1
+ %vecext = extractelement <2 x i64> %vul, i32 %i
ret i64 %vecext
; CHECK-LABEL: @getvelul
; CHECK-LE-LABEL: @getvelul
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf0(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 0
+ %vecext = extractelement <4 x float> %vf, i32 0
ret float %vecext
; CHECK-LABEL: @getf0
; CHECK: xscvspdpn 1, 34
@@ -1369,13 +1123,10 @@ entry:
; CHECK-LE: xscvspdpn 1, [[SHL]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf1(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 1
+ %vecext = extractelement <4 x float> %vf, i32 1
ret float %vecext
; CHECK-LABEL: @getf1
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 1
@@ -1385,13 +1136,10 @@ entry:
; CHECK-LE: xscvspdpn 1, [[SHL]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf2(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 2
+ %vecext = extractelement <4 x float> %vf, i32 2
ret float %vecext
; CHECK-LABEL: @getf2
; CHECK: xxswapd [[SHL:[0-9]+]], 34
@@ -1401,13 +1149,10 @@ entry:
; CHECK-LE: xscvspdpn 1, [[SHL]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf3(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 3
+ %vecext = extractelement <4 x float> %vf, i32 3
ret float %vecext
; CHECK-LABEL: @getf3
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 3
@@ -1416,29 +1161,20 @@ entry:
; CHECK-LE: xscvspdpn 1, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getvelf(<4 x float> %vf, i32 signext %i) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- %i.addr = alloca i32, align 4
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <4 x float> %0, i32 %1
+ %vecext = extractelement <4 x float> %vf, i32 %i
ret float %vecext
; CHECK-LABEL: @getvelf
; CHECK-LE-LABEL: @getvelf
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define double @getd0(<2 x double> %vd) {
entry:
- %vd.addr = alloca <2 x double>, align 16
- store <2 x double> %vd, <2 x double>* %vd.addr, align 16
- %0 = load <2 x double>, <2 x double>* %vd.addr, align 16
- %vecext = extractelement <2 x double> %0, i32 0
+ %vecext = extractelement <2 x double> %vd, i32 0
ret double %vecext
; CHECK-LABEL: @getd0
; CHECK: xxlor 1, 34, 34
@@ -1446,13 +1182,10 @@ entry:
; CHECK-LE: xxswapd 1, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define double @getd1(<2 x double> %vd) {
entry:
- %vd.addr = alloca <2 x double>, align 16
- store <2 x double> %vd, <2 x double>* %vd.addr, align 16
- %0 = load <2 x double>, <2 x double>* %vd.addr, align 16
- %vecext = extractelement <2 x double> %0, i32 1
+ %vecext = extractelement <2 x double> %vd, i32 1
ret double %vecext
; CHECK-LABEL: @getd1
; CHECK: xxswapd 1, 34
@@ -1460,16 +1193,10 @@ entry:
; CHECK-LE: xxlor 1, 34, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define double @getveld(<2 x double> %vd, i32 signext %i) {
entry:
- %vd.addr = alloca <2 x double>, align 16
- %i.addr = alloca i32, align 4
- store <2 x double> %vd, <2 x double>* %vd.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <2 x double>, <2 x double>* %vd.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <2 x double> %0, i32 %1
+ %vecext = extractelement <2 x double> %vd, i32 %i
ret double %vecext
; CHECK-LABEL: @getveld
; CHECK-LE-LABEL: @getveld
diff --git a/test/CodeGen/PowerPC/ppc-crbits-onoff.ll b/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
index fbf69d5319be..0e7f8f1bc668 100644
--- a/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
+++ b/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,10 +13,16 @@ entry:
ret i32 %and
; CHECK-LABEL: @crbitsoff
+; CHECK-NO-ISEL-LABEL: @crbitsoff
; CHECK-DAG: cmplwi {{[0-9]+}}, 3, 0
; CHECK-DAG: li [[REG2:[0-9]+]], 1
; CHECK-DAG: cntlzw [[REG3:[0-9]+]],
; CHECK: isel [[REG4:[0-9]+]], 0, [[REG2]]
+; CHECK-NO-ISEL: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 4, 5, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 4, 0, 0
; CHECK: and 3, [[REG4]], [[REG3]]
; CHECK: blr
}
@@ -29,11 +36,17 @@ entry:
ret i32 %and
; CHECK-LABEL: @crbitson
+; CHECK-NO-ISEL-LABEL: @crbitson
; CHECK-DAG: cmpwi {{[0-9]+}}, 3, 0
; CHECK-DAG: cmpwi {{[0-9]+}}, 4, 0
; CHECK-DAG: li [[REG2:[0-9]+]], 1
; CHECK-DAG: crorc [[REG3:[0-9]+]],
; CHECK: isel 3, 0, [[REG2]], [[REG3]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll b/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
index 08e39ed05117..10edefb2e21d 100644
--- a/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
+++ b/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
@@ -403,7 +403,7 @@ entry:
; CHECK: [[ELSE_LABEL]]
; CHECK-NEXT: slwi 3, 4, 1
; DISABLE: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
-; CHECK-NEXT blr
+; CHECK-NEXT: blr
;
define i32 @inlineAsm(i32 %cond, i32 %N) {
entry:
diff --git a/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
index c3cccd5b2935..d59dc64dcf85 100644
--- a/test/CodeGen/PowerPC/ppc64-align-long-double.ll
+++ b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
@@ -1,6 +1,6 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=-vsx < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-VSX %s
-; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-P9 %s
; Verify internal alignment of long double in a struct. The double
; argument comes in in GPR3; GPR4 is skipped; GPRs 5 and 6 contain
@@ -19,19 +19,44 @@ entry:
ret ppc_fp128 %0
}
+; The additional stores are caused because we forward the value in the
+; store->load->bitcast path to make a store and bitcast of the same
+; value. Since the target does bitcast through memory and we no longer
+; remember the address we need to do the store in a fresh local
+; address.
+
; CHECK-DAG: std 6, 72(1)
; CHECK-DAG: std 5, 64(1)
; CHECK-DAG: std 4, 56(1)
; CHECK-DAG: std 3, 48(1)
-; CHECK: lfd 1, 64(1)
-; CHECK: lfd 2, 72(1)
+
+; CHECK-DAG: std 5, -16(1)
+; CHECK-DAG: std 6, -8(1)
+; CHECK-DAG: lfd 1, -16(1)
+; CHECK-DAG: lfd 2, -8(1)
+
+; FIXMECHECK: lfd 1, 64(1)
+; FIXMECHECK: lfd 2, 72(1)
; CHECK-VSX-DAG: std 6, 72(1)
; CHECK-VSX-DAG: std 5, 64(1)
; CHECK-VSX-DAG: std 4, 56(1)
; CHECK-VSX-DAG: std 3, 48(1)
-; CHECK-VSX: li 3, 16
-; CHECK-VSX: addi 4, 1, 48
-; CHECK-VSX: lxsdx 1, 4, 3
-; CHECK-VSX: li 3, 24
-; CHECK-VSX: lxsdx 2, 4, 3
+; CHECK-VSX-DAG: std 5, -16(1)
+; CHECK-VSX-DAG: std 6, -8(1)
+; CHECK-VSX: addi 3, 1, -16
+; CHECK-VSX: lxsdx 1, 0, 3
+; CHECK-VSX: addi 3, 1, -8
+; CHECK-VSX: lxsdx 2, 0, 3
+
+; FIXME-VSX: addi 4, 1, 48
+; FIXME-VSX: lxsdx 1, 4, 3
+; FIXME-VSX: li 3, 24
+; FIXME-VSX: lxsdx 2, 4, 3
+
+; CHECK-P9: std 6, 72(1)
+; CHECK-P9: std 5, 64(1)
+; CHECK-P9: std 4, 56(1)
+; CHECK-P9: std 3, 48(1)
+; CHECK-P9: mtvsrd 1, 5
+; CHECK-P9: mtvsrd 2, 6
diff --git a/test/CodeGen/PowerPC/ppc64-gep-opt.ll b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
index 1a78310ddf32..d1ae1bcbd88c 100644
--- a/test/CodeGen/PowerPC/ppc64-gep-opt.ll
+++ b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
@@ -84,9 +84,9 @@ exit:
; CHECK-NoAA: add i64 [[TMP:%[a-zA-Z0-9]+]], 528
; CHECK-NoAA: add i64 [[TMP]], 532
; CHECK-NoAA: if.true:
-; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 532
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* {{.*}}, i64 532
; CHECK-NoAA: exit:
-; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 528
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* {{.*}}, i64 528
; CHECK-UseAA-LABEL: test_GEP_across_BB(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
diff --git a/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
index 25b3e5d89331..6fcbdda4e34f 100644
--- a/test/CodeGen/PowerPC/ppc64le-aggregates.ll
+++ b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
@@ -284,10 +284,7 @@ entry:
; CHECK-DAG: lfs 12, 12({{[0-9]+}})
; CHECK-DAG: lfs 13, 16({{[0-9]+}})
-; CHECK-DAG: lwz [[REG0:[0-9]+]], 0({{[0-9]+}})
-; CHECK-DAG: lwz [[REG1:[0-9]+]], 4({{[0-9]+}})
-; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 32
-; CHECK-DAG: or 10, [[REG0]], [[REG2]]
+; CHECK-DAG: ld 10, 0({{[0-9]+}})
; CHECK: bl test2
declare void @test2([8 x float], [5 x float], [2 x float])
diff --git a/test/CodeGen/PowerPC/pr30451.ll b/test/CodeGen/PowerPC/pr30451.ll
index 930553451cf8..9b07df00f9c3 100644
--- a/test/CodeGen/PowerPC/pr30451.ll
+++ b/test/CodeGen/PowerPC/pr30451.ll
@@ -3,11 +3,11 @@ define i8 @atomic_min_i8() {
top:
%0 = alloca i8, align 2
%1 = bitcast i8* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i8 -1, i8* %0, align 2
%2 = atomicrmw min i8* %0, i8 0 acq_rel
%3 = load atomic i8, i8* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i8 %3
; CHECK-LABEL: atomic_min_i8
; CHECK: lbarx [[DST:[0-9]+]],
@@ -19,11 +19,11 @@ define i16 @atomic_min_i16() {
top:
%0 = alloca i16, align 2
%1 = bitcast i16* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i16 -1, i16* %0, align 2
%2 = atomicrmw min i16* %0, i16 0 acq_rel
%3 = load atomic i16, i16* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i16 %3
; CHECK-LABEL: atomic_min_i16
; CHECK: lharx [[DST:[0-9]+]],
@@ -36,11 +36,11 @@ define i8 @atomic_max_i8() {
top:
%0 = alloca i8, align 2
%1 = bitcast i8* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i8 -1, i8* %0, align 2
%2 = atomicrmw max i8* %0, i8 0 acq_rel
%3 = load atomic i8, i8* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i8 %3
; CHECK-LABEL: atomic_max_i8
; CHECK: lbarx [[DST:[0-9]+]],
@@ -52,11 +52,11 @@ define i16 @atomic_max_i16() {
top:
%0 = alloca i16, align 2
%1 = bitcast i16* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i16 -1, i16* %0, align 2
%2 = atomicrmw max i16* %0, i16 0 acq_rel
%3 = load atomic i16, i16* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i16 %3
; CHECK-LABEL: atomic_max_i16
; CHECK: lharx [[DST:[0-9]+]],
@@ -65,5 +65,5 @@ define i16 @atomic_max_i16() {
; CHECK-NEXT: ble 0
}
-declare void @llvm.lifetime.start(i64, i8*)
-declare void @llvm.lifetime.end(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
diff --git a/test/CodeGen/PowerPC/pr32063.ll b/test/CodeGen/PowerPC/pr32063.ll
new file mode 100644
index 000000000000..f031ec83c55e
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr32063.ll
@@ -0,0 +1,16 @@
+; RUN: llc -O2 < %s | FileCheck %s
+target triple = "powerpc64le-linux-gnu"
+
+define void @foo(i32 %v, i16* %p) {
+ %1 = and i32 %v, -65536
+ %2 = tail call i32 @llvm.bswap.i32(i32 %1)
+ %conv = trunc i32 %2 to i16
+ store i16 %conv, i16* %p
+ ret void
+
+; CHECK: srwi
+; CHECK: sthbrx
+; CHECK-NOT: stwbrx
+}
+
+declare i32 @llvm.bswap.i32(i32)
diff --git a/test/CodeGen/PowerPC/pr32140.ll b/test/CodeGen/PowerPC/pr32140.ll
new file mode 100644
index 000000000000..827a90404e4b
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr32140.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64le-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+@as = common local_unnamed_addr global i16 0, align 2
+@bs = common local_unnamed_addr global i16 0, align 2
+@ai = common local_unnamed_addr global i32 0, align 4
+@bi = common local_unnamed_addr global i32 0, align 4
+
+define void @bswapStorei64Toi32() {
+; CHECK-LABEL: bswapStorei64Toi32:
+; CHECK: # BB#0: # %entry
+; CHECK: lwa 3, 0(3)
+; CHECK-NEXT: rldicl 3, 3, 32, 32
+; CHECK-NEXT: stwbrx 3, 0, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = load i32, i32* @ai, align 4
+ %conv.i = sext i32 %0 to i64
+ %or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
+ %conv = trunc i64 %or26.i to i32
+ store i32 %conv, i32* @bi, align 4
+ ret void
+}
+
+define void @bswapStorei32Toi16() {
+; CHECK-LABEL: bswapStorei32Toi16:
+; CHECK: # BB#0: # %entry
+; CHECK: lha 3, 0(3)
+; CHECK-NEXT: srwi 3, 3, 16
+; CHECK-NEXT: sthbrx 3, 0, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = load i16, i16* @as, align 2
+ %conv.i = sext i16 %0 to i32
+ %or26.i = tail call i32 @llvm.bswap.i32(i32 %conv.i)
+ %conv = trunc i32 %or26.i to i16
+ store i16 %conv, i16* @bs, align 2
+ ret void
+}
+
+define void @bswapStorei64Toi16() {
+; CHECK-LABEL: bswapStorei64Toi16:
+; CHECK: # BB#0: # %entry
+; CHECK: lha 3, 0(3)
+; CHECK-NEXT: rldicl 3, 3, 16, 48
+; CHECK-NEXT: sthbrx 3, 0, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = load i16, i16* @as, align 2
+ %conv.i = sext i16 %0 to i64
+ %or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
+ %conv = trunc i64 %or26.i to i16
+ store i16 %conv, i16* @bs, align 2
+ ret void
+}
+
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
diff --git a/test/CodeGen/PowerPC/pristine-and-livein.mir b/test/CodeGen/PowerPC/pristine-and-livein.mir
new file mode 100644
index 000000000000..6d93bb68c102
--- /dev/null
+++ b/test/CodeGen/PowerPC/pristine-and-livein.mir
@@ -0,0 +1,330 @@
+# RUN: llc -run-pass=post-RA-sched %s -o - | FileCheck %s
+
+# CHECK: callee-saved-register: '[[REG:%x[0-9]+]]'
+# CHECK: callee-saved-register: '{{%x[0-9]+}}'
+# CHECK-NOT: [[REG]] = LI8 0
+# CHECK: STD killed [[REG]],
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "bugpoint-output-4d91ae2.bc"
+ target datalayout = "e-m:e-i64:64-n32:64"
+ target triple = "powerpc64le--linux-gnu"
+
+ ; Function Attrs: norecurse nounwind readonly
+ define i64 @adler32_z(i64 %adler, i8* readonly %buf, i64 %len) local_unnamed_addr #0 {
+ entry:
+ %shr = lshr i64 %adler, 16
+ %and = and i64 %shr, 65535
+ %and1 = and i64 %adler, 65535
+ br i1 undef, label %if.then, label %if.end15
+
+ if.then: ; preds = %entry
+ %add5 = add nsw i64 %and1, %and
+ %sub9 = add nsw i64 %add5, 281474976645135
+ %shl = shl i64 %add5, 16
+ %or = or i64 %shl, %and1
+ br label %cleanup
+
+ if.end15: ; preds = %entry
+ br i1 undef, label %while.cond.preheader, label %while.cond30.preheader
+
+ while.cond30.preheader: ; preds = %if.end15
+ br i1 undef, label %while.body33.preheader, label %while.body109.preheader
+
+ while.body33.preheader: ; preds = %while.cond30.preheader
+ br label %while.body33
+
+ while.cond.preheader: ; preds = %if.end15
+ %sub25 = add i64 %and1, -65521
+ %rem = urem i64 %and, 65521
+ %shl27 = shl nuw nsw i64 %rem, 16
+ %or28 = or i64 %shl27, %and1
+ br label %cleanup
+
+ while.body33: ; preds = %do.end, %while.body33.preheader
+ %indvar = phi i64 [ %indvar.next, %do.end ], [ 0, %while.body33.preheader ]
+ %sum2.2385 = phi i64 [ %rem102, %do.end ], [ %and, %while.body33.preheader ]
+ %len.addr.1384 = phi i64 [ %sub34, %do.end ], [ %len, %while.body33.preheader ]
+ %buf.addr.1383 = phi i8* [ %scevgep390, %do.end ], [ %buf, %while.body33.preheader ]
+ %adler.addr.3382 = phi i64 [ %rem101, %do.end ], [ %and1, %while.body33.preheader ]
+ %0 = mul i64 %indvar, 5552
+ %1 = add i64 %0, -13
+ %scevgep2 = getelementptr i8, i8* %buf, i64 %1
+ %sub34 = add i64 %len.addr.1384, -5552
+ call void @llvm.ppc.mtctr.i64(i64 347)
+ br label %do.body
+
+ do.body: ; preds = %do.body, %while.body33
+ %adler.addr.4 = phi i64 [ %adler.addr.3382, %while.body33 ], [ %add49, %do.body ]
+ %sum2.3 = phi i64 [ %sum2.2385, %while.body33 ], [ %add98, %do.body ]
+ %tmp15.phi = phi i8* [ %scevgep2, %while.body33 ], [ %tmp15.inc, %do.body ]
+ %tmp15.inc = getelementptr i8, i8* %tmp15.phi, i64 16
+ %add38 = add i64 %adler.addr.4, %sum2.3
+ %add42 = add i64 %add38, %adler.addr.4
+ %add46 = add i64 %add42, %adler.addr.4
+ %tmp15 = load i8, i8* %tmp15.inc, align 1, !tbaa !1
+ %conv48 = zext i8 %tmp15 to i64
+ %add49 = add i64 %adler.addr.4, %conv48
+ %add50 = add i64 %add46, %add49
+ %add54 = add i64 %add50, %add49
+ %add58 = add i64 %add54, %add49
+ %add62 = add i64 %add58, %add49
+ %add66 = add i64 %add62, %add49
+ %add70 = add i64 %add66, %add49
+ %add74 = add i64 %add70, %add49
+ %add78 = add i64 %add74, %add49
+ %add82 = add i64 %add78, %add49
+ %add86 = add i64 %add82, %add49
+ %add90 = add i64 %add86, %add49
+ %add94 = add i64 %add90, %add49
+ %add98 = add i64 %add94, %add49
+ %2 = call i1 @llvm.ppc.is.decremented.ctr.nonzero()
+ br i1 %2, label %do.body, label %do.end
+
+ do.end: ; preds = %do.body
+ %scevgep390 = getelementptr i8, i8* %buf.addr.1383, i64 5552
+ %rem101 = urem i64 %add49, 65521
+ %rem102 = urem i64 %add98, 65521
+ %cmp31 = icmp ugt i64 %sub34, 5551
+ %indvar.next = add i64 %indvar, 1
+ br i1 %cmp31, label %while.body33, label %while.end103
+
+ while.end103: ; preds = %do.end
+ br i1 undef, label %if.end188, label %while.body109.preheader
+
+ while.body109.preheader: ; preds = %while.end103, %while.cond30.preheader
+ %buf.addr.1.lcssa394400 = phi i8* [ %buf, %while.cond30.preheader ], [ %scevgep390, %while.end103 ]
+ %arrayidx151 = getelementptr inbounds i8, i8* %buf.addr.1.lcssa394400, i64 10
+ %tmp45 = load i8, i8* %arrayidx151, align 1, !tbaa !1
+ %conv152 = zext i8 %tmp45 to i64
+ br label %while.body109
+
+ while.body109: ; preds = %while.body109, %while.body109.preheader
+ %adler.addr.5373 = phi i64 [ %add153, %while.body109 ], [ undef, %while.body109.preheader ]
+ %add153 = add i64 %adler.addr.5373, %conv152
+ br label %while.body109
+
+ if.end188: ; preds = %while.end103
+ %shl189 = shl nuw nsw i64 %rem102, 16
+ %or190 = or i64 %shl189, %rem101
+ br label %cleanup
+
+ cleanup: ; preds = %if.end188, %while.cond.preheader, %if.then
+ %retval.0 = phi i64 [ %or, %if.then ], [ %or28, %while.cond.preheader ], [ %or190, %if.end188 ]
+ ret i64 %retval.0
+ }
+
+ ; Function Attrs: nounwind
+ declare void @llvm.ppc.mtctr.i64(i64) #1
+
+ ; Function Attrs: nounwind
+ declare i1 @llvm.ppc.is.decremented.ctr.nonzero() #1
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #1
+
+ attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #1 = { nounwind }
+
+ !llvm.ident = !{!0}
+
+ !0 = !{!"clang version 5.0.0 "}
+ !1 = !{!2, !2, i64 0}
+ !2 = !{!"omnipotent char", !3, i64 0}
+ !3 = !{!"Simple C/C++ TBAA"}
+
+...
+---
+name: adler32_z
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+ - { reg: '%x5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+fixedStack:
+ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%x30' }
+ - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%x29' }
+ - { id: 2, offset: -8, size: 8, alignment: 8, isImmutable: true, isAliased: false }
+body: |
+ bb.0.entry:
+ successors: %bb.1.if.then(0x40000000), %bb.3.if.end15(0x40000000)
+ liveins: %x3, %x4, %x5, %x29, %x30
+
+ %x6 = RLWINM8 %x3, 16, 16, 31
+ %x3 = RLDICL killed %x3, 0, 48
+ BC undef %cr5lt, %bb.3.if.end15
+
+ bb.1.if.then:
+ successors: %bb.2.if.then(0x80000000)
+ liveins: %x3, %x6, %x29, %x30
+
+ %x4 = ADD8 %x3, killed %x6
+
+ bb.2.if.then:
+ liveins: %lr8, %rm, %x3, %x4
+
+ %x4 = RLDICR killed %x4, 16, 47
+ %x3 = OR8 killed %x4, killed %x3
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+ bb.3.if.end15:
+ successors: %bb.6.while.cond.preheader(0x40000000), %bb.4.while.cond30.preheader(0x40000000)
+ liveins: %x3, %x4, %x5, %x6, %x29, %x30
+
+ BC undef %cr5lt, %bb.6.while.cond.preheader
+
+ bb.4.while.cond30.preheader:
+ successors: %bb.7.while.body33.preheader(0x40000000), %bb.5(0x40000000)
+ liveins: %x3, %x4, %x5, %x6, %x29, %x30
+
+ BCn undef %cr5lt, %bb.7.while.body33.preheader
+
+ bb.5:
+ successors: %bb.12.while.body109.preheader(0x80000000)
+ liveins: %x4, %x29, %x30
+
+ %x7 = OR8 %x4, killed %x4
+ B %bb.12.while.body109.preheader
+
+ bb.6.while.cond.preheader:
+ successors: %bb.2.if.then(0x80000000)
+ liveins: %x3, %x6, %x29, %x30
+
+ %x4 = LIS8 15
+ %x4 = ORI8 killed %x4, 225
+ %x4 = RLDICR killed %x4, 32, 31
+ %x4 = ORIS8 killed %x4, 3375
+ %x4 = ORI8 killed %x4, 50637
+ %x4 = MULHDU %x6, killed %x4
+ %x5 = SUBF8 %x4, %x6
+ %x5 = RLDICL killed %x5, 63, 1
+ %x4 = ADD8 killed %x5, killed %x4
+ %x5 = LI8 0
+ %x4 = RLDICL killed %x4, 49, 15
+ %x5 = ORI8 killed %x5, 65521
+ %x4 = MULLD killed %x4, killed %x5
+ %x4 = SUBF8 killed %x4, killed %x6
+ B %bb.2.if.then
+
+ bb.7.while.body33.preheader:
+ successors: %bb.8.while.body33(0x80000000)
+ liveins: %x3, %x4, %x5, %x6, %x29, %x30
+
+ STD killed %x29, -24, %x1 :: (store 8 into %fixed-stack.1)
+ STD killed %x30, -16, %x1 :: (store 8 into %fixed-stack.0, align 16)
+ %x7 = LIS8 15
+ %x7 = ORI8 killed %x7, 225
+ %x7 = RLDICR killed %x7, 32, 31
+ %x8 = LI8 0
+ %x7 = ORIS8 killed %x7, 3375
+ %x9 = LI8 347
+ %x10 = ORI8 killed %x7, 50637
+ %x11 = ORI8 %x8, 65521
+ %x7 = OR8 %x4, %x4
+
+ bb.8.while.body33:
+ successors: %bb.9.do.body(0x80000000)
+ liveins: %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11
+
+ %x12 = MULLI8 %x8, 5552
+ %x12 = ADD8 %x4, killed %x12
+ %x12 = ADDI8 killed %x12, -13
+ %x5 = ADDI8 killed %x5, -5552
+ MTCTR8loop %x9, implicit-def dead %ctr8
+
+ bb.9.do.body:
+ successors: %bb.9.do.body(0x7c000000), %bb.10.do.end(0x04000000)
+ liveins: %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11, %x12
+
+ %x0, %x12 = LBZU8 16, killed %x12 :: (load 1 from %ir.tmp15.inc, !tbaa !1)
+ %x6 = ADD8 %x3, killed %x6
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x3 = ADD8 killed %x3, killed %x0
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ BDNZ8 %bb.9.do.body, implicit-def %ctr8, implicit %ctr8
+
+ bb.10.do.end:
+ successors: %bb.8.while.body33(0x7c000000), %bb.11.while.end103(0x04000000)
+ liveins: %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11
+
+ %x12 = MULHDU %x3, %x10
+ %x0 = MULHDU %x6, %x10
+ %x30 = SUBF8 %x12, %x3
+ %x29 = SUBF8 %x0, %x6
+ %x30 = RLDICL killed %x30, 63, 1
+ %x29 = RLDICL killed %x29, 63, 1
+ %x12 = ADD8 killed %x30, killed %x12
+ %x0 = ADD8 killed %x29, killed %x0
+ %cr0 = CMPLDI %x5, 5551
+ %x12 = RLDICL killed %x12, 49, 15
+ %x0 = RLDICL killed %x0, 49, 15
+ %x12 = MULLD killed %x12, %x11
+ %x0 = MULLD killed %x0, %x11
+ %x7 = ADDI8 killed %x7, 5552
+ %x3 = SUBF8 killed %x12, killed %x3
+ %x6 = SUBF8 killed %x0, killed %x6
+ %x8 = ADDI8 killed %x8, 1
+ BCC 44, killed %cr0, %bb.8.while.body33
+
+ bb.11.while.end103:
+ successors: %bb.14.if.end188(0x40000000), %bb.12.while.body109.preheader(0x40000000)
+ liveins: %x3, %x6, %x7
+
+ %x30 = LD -16, %x1 :: (load 8 from %fixed-stack.0, align 16)
+ %x29 = LD -24, %x1 :: (load 8 from %fixed-stack.1)
+ BC undef %cr5lt, %bb.14.if.end188
+
+ bb.12.while.body109.preheader:
+ successors: %bb.13.while.body109(0x80000000)
+ liveins: %x7, %x29, %x30
+
+ %x3 = LBZ8 10, killed %x7 :: (load 1 from %ir.arrayidx151, !tbaa !1)
+ %x4 = IMPLICIT_DEF
+
+ bb.13.while.body109:
+ successors: %bb.13.while.body109(0x80000000)
+ liveins: %x3, %x4, %x29, %x30
+
+ %x4 = ADD8 killed %x4, %x3
+ B %bb.13.while.body109
+
+ bb.14.if.end188:
+ liveins: %x3, %x6, %x29, %x30
+
+ %x4 = RLDICR killed %x6, 16, 47
+ %x3 = OR8 killed %x4, killed %x3
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+...
diff --git a/test/CodeGen/PowerPC/select-i1-vs-i1.ll b/test/CodeGen/PowerPC/select-i1-vs-i1.ll
index a8f1ef1dd284..b7beb8165fdf 100644
--- a/test/CodeGen/PowerPC/select-i1-vs-i1.ll
+++ b/test/CodeGen/PowerPC/select-i1-vs-i1.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -15,10 +16,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32slt
+; CHECK-NO-ISEL-LABEL: @testi32slt
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -30,11 +38,17 @@ entry:
%cond = select i1 %cmp3, i32 %a1, i32 %a2
ret i32 %cond
-; CHECK-LABEL: @testi32ult
+; CHECK-NO-ISEL-LABEL: @testi32ult
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -47,10 +61,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32sle
+; CHECK-NO-ISEL-LABEL: @testi32sle
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -63,10 +84,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32ule
+; CHECK-NO-ISEL-LABEL: @testi32ule
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -79,10 +107,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32eq
+; CHECK-NO-ISEL-LABEL: @testi32eq
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: creqv [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -95,10 +130,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32sge
+; CHECK-NO-ISEL-LABEL: @testi32sge
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -111,10 +153,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32uge
+; CHECK-NO-ISEL-LABEL: @testi32uge
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -127,10 +176,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32sgt
+; CHECK-NO-ISEL-LABEL: @testi32sgt
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -143,10 +199,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32ugt
+; CHECK-NO-ISEL-LABEL: @testi32ugt
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -159,10 +222,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32ne
+; CHECK-NO-ISEL-LABEL: @testi32ne
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -175,10 +245,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64slt
+; CHECK-NO-ISEL-LABEL: @testi64slt
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -191,10 +268,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ult
+; CHECK-NO-ISEL-LABEL: @testi64ult
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -207,10 +291,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64sle
+; CHECK-NO-ISEL-LABEL: @testi64sle
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -223,10 +314,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ule
+; CHECK-NO-ISEL-LABEL: @testi64ule
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -239,10 +337,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64eq
+; CHECK-NO-ISEL-LABEL: @testi64eq
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: creqv [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -255,10 +360,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64sge
+; CHECK-NO-ISEL-LABEL: @testi64sge
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -271,10 +383,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64uge
+; CHECK-NO-ISEL-LABEL: @testi64uge
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -287,10 +406,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64sgt
+; CHECK-NO-ISEL-LABEL: @testi64sgt
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -303,10 +429,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ugt
+; CHECK-NO-ISEL-LABEL: @testi64ugt
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -319,10 +452,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ne
+; CHECK-NO-ISEL-LABEL: @testi64ne
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -719,7 +859,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -736,7 +876,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -753,7 +893,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -770,7 +910,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -787,9 +927,9 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bc 12, [[REG1]], .LBB[[BB1:[0-9_]+]]
-; CHECK: vor 3, 2, 2
+; CHECK: vmr 3, 2
; CHECK: .LBB[[BB1]]
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -806,7 +946,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -823,7 +963,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -840,7 +980,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -857,7 +997,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -874,7 +1014,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -922,7 +1062,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -939,7 +1079,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -956,7 +1096,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -973,7 +1113,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -990,9 +1130,9 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bc 12, [[REG1]], .LBB[[BB55:[0-9_]+]]
-; CHECK: vor 3, 2, 2
+; CHECK: vmr 3, 2
; CHECK: .LBB[[BB55]]
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1009,7 +1149,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1026,7 +1166,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1043,7 +1183,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1060,7 +1200,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1077,7 +1217,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/select_const.ll b/test/CodeGen/PowerPC/select_const.ll
new file mode 100644
index 000000000000..29548123be88
--- /dev/null
+++ b/test/CodeGen/PowerPC/select_const.ll
@@ -0,0 +1,789 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs -mattr=+isel | FileCheck %s --check-prefix=ALL --check-prefix=ISEL
+; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs -mattr=-isel | FileCheck %s --check-prefix=ALL --check-prefix=NO_ISEL
+
+; Select of constants: control flow / conditional moves can always be replaced by logic+math (but may not be worth it?).
+; Test the zeroext/signext variants of each pattern to see if that makes a difference.
+
+; select Cond, 0, 1 --> zext (!Cond)
+
+define i32 @select_0_or_1(i1 %cond) {
+; ALL-LABEL: select_0_or_1:
+; ALL: # BB#0:
+; ALL-NEXT: not 3, 3
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_0_or_1_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: xori 3, 3, 1
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_1_signext(i1 signext %cond) {
+; ALL-LABEL: select_0_or_1_signext:
+; ALL: # BB#0:
+; ALL-NEXT: not 3, 3
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+; select Cond, 1, 0 --> zext (Cond)
+
+define i32 @select_1_or_0(i1 %cond) {
+; ALL-LABEL: select_1_or_0:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_1_or_0_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_1_or_0_signext(i1 signext %cond) {
+; ALL-LABEL: select_1_or_0_signext:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, 0, -1 --> sext (!Cond)
+
+define i32 @select_0_or_neg1(i1 %cond) {
+; ISEL-LABEL: select_0_or_neg1:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_0_or_neg1:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bc 12, 1, .LBB6_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB6_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
+; ISEL-LABEL: select_0_or_neg1_zeroext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_0_or_neg1_zeroext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bc 12, 1, .LBB7_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB7_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_signext(i1 signext %cond) {
+; ISEL-LABEL: select_0_or_neg1_signext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_0_or_neg1_signext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bc 12, 1, .LBB8_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB8_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+; select Cond, -1, 0 --> sext (Cond)
+
+define i32 @select_neg1_or_0(i1 %cond) {
+; ISEL-LABEL: select_neg1_or_0:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_neg1_or_0:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
+; ISEL-LABEL: select_neg1_or_0_zeroext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_neg1_or_0_zeroext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_neg1_or_0_signext(i1 signext %cond) {
+; ISEL-LABEL: select_neg1_or_0_signext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_neg1_or_0_signext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, C+1, C --> add (zext Cond), C
+
+define i32 @select_Cplus1_C(i1 %cond) {
+; ALL-LABEL: select_Cplus1_C:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: addi 3, 3, 41
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_Cplus1_C_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: addi 3, 3, 41
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_signext(i1 signext %cond) {
+; ALL-LABEL: select_Cplus1_C_signext:
+; ALL: # BB#0:
+; ALL-NEXT: subfic 3, 3, 41
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+; select Cond, C, C+1 --> add (sext Cond), C
+
+define i32 @select_C_Cplus1(i1 %cond) {
+; ALL-LABEL: select_C_Cplus1:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: subfic 3, 3, 42
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_C_Cplus1_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: subfic 3, 3, 42
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_signext(i1 signext %cond) {
+; ALL-LABEL: select_C_Cplus1_signext:
+; ALL: # BB#0:
+; ALL-NEXT: addi 3, 3, 42
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+; In general, select of 2 constants could be:
+; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2 --> add (and (sext Cond), C1-C2), C2
+
+define i32 @select_C1_C2(i1 %cond) {
+; ISEL-LABEL: select_C1_C2:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 421
+; ISEL-NEXT: li 3, 42
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_C1_C2:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 421
+; NO_ISEL-NEXT: li 3, 42
+; NO_ISEL-NEXT: bc 12, 1, .LBB18_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB18_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
+; ISEL-LABEL: select_C1_C2_zeroext:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 421
+; ISEL-NEXT: li 3, 42
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_C1_C2_zeroext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 421
+; NO_ISEL-NEXT: li 3, 42
+; NO_ISEL-NEXT: bc 12, 1, .LBB19_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB19_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_signext(i1 signext %cond) {
+; ISEL-LABEL: select_C1_C2_signext:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 421
+; ISEL-NEXT: li 3, 42
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_C1_C2_signext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 421
+; NO_ISEL-NEXT: li 3, 42
+; NO_ISEL-NEXT: bc 12, 1, .LBB20_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB20_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+; A binary operator with constant after the select should always get folded into the select.
+
+define i8 @sel_constants_add_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_add_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 1
+; ISEL-NEXT: li 3, 28
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_add_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 1
+; NO_ISEL-NEXT: li 3, 28
+; NO_ISEL-NEXT: bc 12, 1, .LBB21_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB21_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = add i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_sub_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_sub_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: li 4, 18
+; ISEL-NEXT: ori 3, 3, 65527
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_sub_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 18
+; NO_ISEL-NEXT: ori 3, 3, 65527
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = sub i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_mul_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_mul_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: lis 4, 16383
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: ori 3, 4, 65531
+; ISEL-NEXT: li 4, 115
+; ISEL-NEXT: sldi 3, 3, 2
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_mul_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: lis 4, 16383
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: ori 3, 4, 65531
+; NO_ISEL-NEXT: li 4, 115
+; NO_ISEL-NEXT: sldi 3, 3, 2
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = mul i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_sdiv_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_sdiv_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 3, 4
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_sdiv_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 4
+; NO_ISEL-NEXT: bc 12, 1, .LBB24_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB24_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = sdiv i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_udiv_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_udiv_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 50
+; ISEL-NEXT: li 3, 4
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_udiv_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 50
+; NO_ISEL-NEXT: li 3, 4
+; NO_ISEL-NEXT: bc 12, 1, .LBB25_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB25_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = udiv i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_srem_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_srem_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: lis 4, 16383
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: ori 3, 4, 65535
+; ISEL-NEXT: li 4, 3
+; ISEL-NEXT: sldi 3, 3, 2
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_srem_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: lis 4, 16383
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: ori 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 3
+; NO_ISEL-NEXT: sldi 3, 3, 2
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = srem i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_urem_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_urem_constant:
+; ALL: # BB#0:
+; ALL-NEXT: rlwinm 3, 3, 0, 31, 31
+; ALL-NEXT: subfic 3, 3, 3
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = urem i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_and_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_and_constant:
+; ALL: # BB#0:
+; ALL-NEXT: rlwinm 3, 3, 0, 31, 31
+; ALL-NEXT: subfic 3, 3, 5
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = and i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_or_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_or_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: li 4, 23
+; ISEL-NEXT: ori 3, 3, 65533
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_or_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 23
+; NO_ISEL-NEXT: ori 3, 3, 65533
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = or i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_xor_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_xor_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: li 4, 18
+; ISEL-NEXT: ori 3, 3, 65529
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_xor_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 18
+; NO_ISEL-NEXT: ori 3, 3, 65529
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = xor i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_shl_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_shl_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: lis 5, 511
+; ISEL-NEXT: lis 4, 2047
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: ori 3, 4, 65535
+; ISEL-NEXT: ori 12, 5, 65535
+; ISEL-NEXT: sldi 3, 3, 5
+; ISEL-NEXT: sldi 4, 12, 7
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_shl_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: lis 5, 511
+; NO_ISEL-NEXT: lis 4, 2047
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: ori 3, 4, 65535
+; NO_ISEL-NEXT: ori 12, 5, 65535
+; NO_ISEL-NEXT: sldi 3, 3, 5
+; NO_ISEL-NEXT: sldi 4, 12, 7
+; NO_ISEL-NEXT: bc 12, 1, .LBB31_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB31_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = shl i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_lshr_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_lshr_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 7
+; ISEL-NEXT: li 3, 0
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_lshr_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 7
+; NO_ISEL-NEXT: li 3, 0
+; NO_ISEL-NEXT: bc 12, 1, .LBB32_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB32_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = lshr i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_ashr_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_ashr_constant:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: neg 3, 3
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = ashr i8 %sel, 5
+ ret i8 %bo
+}
+
+define double @sel_constants_fadd_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fadd_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fadd_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB34_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB34_2
+; NO_ISEL-NEXT: .LBB34_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fadd double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_fsub_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fsub_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fsub_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB35_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB35_2
+; NO_ISEL-NEXT: .LBB35_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fsub double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_fmul_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fmul_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fmul_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB36_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB36_2
+; NO_ISEL-NEXT: .LBB36_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fmul double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_fdiv_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fdiv_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fdiv_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB37_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB37_2
+; NO_ISEL-NEXT: .LBB37_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fdiv double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_frem_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_frem_constant:
+; ALL: # BB#0:
+; ALL-NEXT: andi. 3, 3, 1
+; ALL-NEXT: bc 12, 1, .LBB38_2
+; ALL-NEXT: # BB#1:
+; ALL-NEXT: addis 3, 2, .LCPI38_0@toc@ha
+; ALL-NEXT: addi 3, 3, .LCPI38_0@toc@l
+; ALL-NEXT: lxsdx 1, 0, 3
+; ALL-NEXT: blr
+; ALL-NEXT: .LBB38_2:
+; ALL-NEXT: addis 3, 2, .LCPI38_1@toc@ha
+; ALL-NEXT: addi 3, 3, .LCPI38_1@toc@l
+; ALL-NEXT: lxsspx 1, 0, 3
+; ALL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = frem double %sel, 5.1
+ ret double %bo
+}
+
diff --git a/test/CodeGen/PowerPC/setcc-logic.ll b/test/CodeGen/PowerPC/setcc-logic.ll
new file mode 100644
index 000000000000..2ed08e2ae380
--- /dev/null
+++ b/test/CodeGen/PowerPC/setcc-logic.ll
@@ -0,0 +1,478 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc64le-unknown-unknown | FileCheck %s
+
+define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %a = icmp eq i32 %P, 0
+ %b = icmp eq i32 %Q, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: li 12, 1
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: isel 3, 12, 5, 2
+; CHECK-NEXT: blr
+ %a = icmp eq i32 %P, -1
+ %b = icmp eq i32 %Q, -1
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %a = icmp ne i32 %P, 0
+ %b = icmp ne i32 %Q, 0
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: isel 3, 0, 5, 2
+; CHECK-NEXT: blr
+ %a = icmp ne i32 %P, -1
+ %b = icmp ne i32 %Q, -1
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
+define i32 @all_bits_clear_branch(i32* %P, i32* %Q) {
+; CHECK-LABEL: all_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or. 3, 3, 4
+; CHECK-NEXT: bne 0, .LBB8_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB8_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp eq i32* %P, null
+ %b = icmp eq i32* %Q, null
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, 0
+; CHECK-NEXT: blt 0, .LBB9_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB9_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_bits_set_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: bne 0, .LBB10_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB10_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp eq i32 %P, -1
+ %b = icmp eq i32 %Q, -1
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: bgt 0, .LBB11_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB11_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
+define i32 @any_bits_set_branch(i32* %P, i32* %Q) {
+; CHECK-LABEL: any_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or. 3, 3, 4
+; CHECK-NEXT: beq 0, .LBB12_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB12_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp ne i32* %P, null
+ %b = icmp ne i32* %Q, null
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: bgt 0, .LBB13_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB13_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_bits_clear_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: beq 0, .LBB14_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB14_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp ne i32 %P, -1
+ %b = icmp ne i32 %Q, -1
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, 0
+; CHECK-NEXT: blt 0, .LBB15_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB15_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp eq <4 x i32> %P, zeroinitializer
+ %b = icmp eq <4 x i32> %Q, zeroinitializer
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_sign_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp eq <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp eq <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_sign_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 4, 2
+; CHECK-NEXT: blr
+ %a = icmp slt <4 x i32> %P, zeroinitializer
+ %b = icmp slt <4 x i32> %Q, zeroinitializer
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: xxlnor 34, 34, 34
+; CHECK-NEXT: blr
+ %a = icmp ne <4 x i32> %P, zeroinitializer
+ %b = icmp ne <4 x i32> %Q, zeroinitializer
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_sign_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 4, 2
+; CHECK-NEXT: blr
+ %a = icmp slt <4 x i32> %P, zeroinitializer
+ %b = icmp slt <4 x i32> %Q, zeroinitializer
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: xxlnor 34, 34, 34
+; CHECK-NEXT: blr
+ %a = icmp ne <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp ne <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_sign_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) {
+; CHECK-LABEL: ne_neg1_and_ne_zero:
+; CHECK: # BB#0:
+; CHECK-NEXT: addi 3, 3, 1
+; CHECK-NEXT: li 4, 0
+; CHECK-NEXT: li 12, 1
+; CHECK-NEXT: cmpldi 3, 1
+; CHECK-NEXT: isel 3, 12, 4, 1
+; CHECK-NEXT: blr
+ %cmp1 = icmp ne i64 %x, -1
+ %cmp2 = icmp ne i64 %x, 0
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; PR32401 - https://bugs.llvm.org/show_bug.cgi?id=32401
+
+define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) {
+; CHECK-LABEL: and_eq:
+; CHECK: # BB#0:
+; CHECK-NEXT: xor 5, 5, 6
+; CHECK-NEXT: xor 3, 3, 4
+; CHECK-NEXT: or 3, 3, 5
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %cmp1 = icmp eq i16 %a, %b
+ %cmp2 = icmp eq i16 %c, %d
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: or_ne:
+; CHECK: # BB#0:
+; CHECK-NEXT: xor 5, 5, 6
+; CHECK-NEXT: xor 3, 3, 4
+; CHECK-NEXT: or 3, 3, 5
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %cmp1 = icmp ne i32 %a, %b
+ %cmp2 = icmp ne i32 %c, %d
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+; This should not be transformed because vector compares + bitwise logic are faster.
+
+define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
+; CHECK-LABEL: and_eq_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vcmpequw 2, 2, 3
+; CHECK-NEXT: vcmpequw 19, 4, 5
+; CHECK-NEXT: xxland 34, 34, 51
+; CHECK-NEXT: blr
+ %cmp1 = icmp eq <4 x i32> %a, %b
+ %cmp2 = icmp eq <4 x i32> %c, %d
+ %and = and <4 x i1> %cmp1, %cmp2
+ ret <4 x i1> %and
+}
+
diff --git a/test/CodeGen/PowerPC/setcc-to-sub.ll b/test/CodeGen/PowerPC/setcc-to-sub.ll
index 335bb403cd7f..752ebe0c9d8b 100644
--- a/test/CodeGen/PowerPC/setcc-to-sub.ll
+++ b/test/CodeGen/PowerPC/setcc-to-sub.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr8 < %s | FileCheck %s
@@ -6,6 +7,15 @@
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -15,18 +25,20 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ult i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test1
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG1]], [[REG2]]
-; CHECK-NEXT: rldicl 3, [[REG3]]
-; CHECK: blr
-
}
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 4, 3
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: xori 3, 3, 1
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -36,19 +48,19 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ule i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test2
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG2]], [[REG1]]
-; CHECK-NEXT: rldicl [[REG4:[0-9]*]], [[REG3]]
-; CHECK-NEXT: xori 3, [[REG4]], 1
-; CHECK: blr
-
}
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test3:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 4, 3
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -58,18 +70,20 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ugt i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test3
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG2]], [[REG1]]
-; CHECK-NEXT: rldicl 3, [[REG3]]
-; CHECK: blr
-
}
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: xori 3, 3, 1
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -79,15 +93,6 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp uge i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test4
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG1]], [[REG2]]
-; CHECK-NEXT: rldicl [[REG4:[0-9]*]], [[REG3]]
-; CHECK-NEXT: xori 3, [[REG4]], 1
-; CHECK: blr
-
}
!1 = !{!2, !2, i64 0}
diff --git a/test/CodeGen/PowerPC/sjlj_no0x.ll b/test/CodeGen/PowerPC/sjlj_no0x.ll
new file mode 100644
index 000000000000..2018bcbbc931
--- /dev/null
+++ b/test/CodeGen/PowerPC/sjlj_no0x.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind
+define void @_Z23BuiltinLongJmpFunc1_bufv() #0 {
+entry:
+ call void @llvm.eh.sjlj.longjmp(i8* bitcast (void ()* @_Z23BuiltinLongJmpFunc1_bufv to i8*))
+ unreachable
+
+; CHECK: @_Z23BuiltinLongJmpFunc1_bufv
+; CHECK: addis [[REG:[0-9]+]], 2, .LC0@toc@ha
+; CHECK: ld 31, 0([[REG]])
+; CHECK: ld [[REG2:[0-9]+]], 8([[REG]])
+; CHECK-DAG: ld 1, 16([[REG]])
+; CHECK-DAG: ld 30, 32([[REG]])
+; CHECK-DAG: ld 2, 24([[REG]])
+; CHECK-DAG: mtctr [[REG2]]
+; CHECK: bctr
+
+return: ; No predecessors!
+ ret void
+}
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.eh.sjlj.longjmp(i8*) #1
diff --git a/test/CodeGen/PowerPC/srl-mask.ll b/test/CodeGen/PowerPC/srl-mask.ll
index e581eae0ee57..1a429b1bae36 100644
--- a/test/CodeGen/PowerPC/srl-mask.ll
+++ b/test/CodeGen/PowerPC/srl-mask.ll
@@ -12,5 +12,16 @@ entry:
; CHECK: blr
}
+; for AND with an immediate like (x & ~0xFFFF)
+; we should use rldicl instruction
+define i64 @bar(i64 %x) #0 {
+entry:
+; CHECK-LABEL: @bar
+ %a = and i64 %x, 18446744073709486080
+; CHECK: rldicr 3, 3, 0, 47
+ ret i64 %a
+; CHECK: blr
+}
+
attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/stacksize.ll b/test/CodeGen/PowerPC/stacksize.ll
new file mode 100644
index 000000000000..947aaa0fa49e
--- /dev/null
+++ b/test/CodeGen/PowerPC/stacksize.ll
@@ -0,0 +1,86 @@
+; For ELFv2 ABI, we can avoid allocating the parameter area in the stack frame of the caller function
+; if all the arguments can be passed to the callee in registers.
+; For ELFv1 ABI, we always need to allocate the parameter area.
+
+; Tests for ELFv2 ABI
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=PPC64-ELFV2
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=PPC64-ELFV2
+
+; Tests for ELFv1 ABI
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=PPC64-ELFV1
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=PPC64-ELFV1
+
+; If the callee has at most eight integer args, parameter area can be ommited for ELFv2 ABI.
+
+; PPC64-ELFV2-LABEL: WithoutParamArea1:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -32(1)
+; PPC64-ELFV2: addi 1, 1, 32
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithoutParamArea1:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -112(1)
+; PPC64-ELFV1: addi 1, 1, 112
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithoutParamArea1(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 @onearg(i32 signext %a) #2
+ ret i32 %call
+}
+
+; PPC64-ELFV2-LABEL: WithoutParamArea2:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -32(1)
+; PPC64-ELFV2: addi 1, 1, 32
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithoutParamArea2:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -112(1)
+; PPC64-ELFV1: addi 1, 1, 112
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithoutParamArea2(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 @eightargs(i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a) #2
+ ret i32 %call
+}
+
+; If the callee has more than eight integer args or variable number of args,
+; parameter area cannot be ommited even for ELFv2 ABI
+
+; PPC64-ELFV2-LABEL: WithParamArea1:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -96(1)
+; PPC64-ELFV2: addi 1, 1, 96
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithParamArea1:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -112(1)
+; PPC64-ELFV1: addi 1, 1, 112
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithParamArea1(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 (i32, ...) @varargs(i32 signext %a, i32 signext %a) #2
+ ret i32 %call
+}
+
+; PPC64-ELFV2-LABEL: WithParamArea2:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -112(1)
+; PPC64-ELFV2: addi 1, 1, 112
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithParamArea2:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -128(1)
+; PPC64-ELFV1: addi 1, 1, 128
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithParamArea2(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 @nineargs(i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a) #2
+ ret i32 %call
+}
+
+declare signext i32 @onearg(i32 signext) local_unnamed_addr #1
+declare signext i32 @eightargs(i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext) local_unnamed_addr #1
+declare signext i32 @nineargs(i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext) local_unnamed_addr #1
+declare signext i32 @varargs(i32 signext, ...) local_unnamed_addr #1
+
diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll
index 3777f3ec5bab..01b0848e7070 100644
--- a/test/CodeGen/PowerPC/structsinmem.ll
+++ b/test/CodeGen/PowerPC/structsinmem.ll
@@ -113,13 +113,13 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: lha {{[0-9]+}}, 126(1)
-; CHECK: lha {{[0-9]+}}, 132(1)
-; CHECK: lbz {{[0-9]+}}, 119(1)
-; CHECK: lwz {{[0-9]+}}, 140(1)
-; CHECK: lwz {{[0-9]+}}, 144(1)
-; CHECK: lwz {{[0-9]+}}, 152(1)
-; CHECK: lwz {{[0-9]+}}, 160(1)
+; CHECK-DAG: lha {{[0-9]+}}, 126(1)
+; CHECK-DAG: lha {{[0-9]+}}, 132(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 119(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 140(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 144(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 152(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 160(1)
}
define i32 @caller2() nounwind {
@@ -205,11 +205,11 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: lha {{[0-9]+}}, 126(1)
-; CHECK: lha {{[0-9]+}}, 133(1)
-; CHECK: lbz {{[0-9]+}}, 119(1)
-; CHECK: lwz {{[0-9]+}}, 140(1)
-; CHECK: lwz {{[0-9]+}}, 147(1)
-; CHECK: lwz {{[0-9]+}}, 154(1)
-; CHECK: lwz {{[0-9]+}}, 161(1)
+; CHECK-DAG: lha {{[0-9]+}}, 126(1)
+; CHECK-DAG: lha {{[0-9]+}}, 133(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 119(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 140(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 147(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 154(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 161(1)
}
diff --git a/test/CodeGen/PowerPC/structsinregs.ll b/test/CodeGen/PowerPC/structsinregs.ll
index e27041dd4c88..54679f259e9a 100644
--- a/test/CodeGen/PowerPC/structsinregs.ll
+++ b/test/CodeGen/PowerPC/structsinregs.ll
@@ -59,6 +59,7 @@ entry:
%call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7)
ret i32 %call
+; CHECK-LABEL: caller1
; CHECK: ld 9, 112(31)
; CHECK: ld 8, 120(31)
; CHECK: ld 7, 128(31)
@@ -97,20 +98,21 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: std 9, 96(1)
-; CHECK: std 8, 88(1)
-; CHECK: std 7, 80(1)
-; CHECK: stw 6, 76(1)
-; CHECK: stw 5, 68(1)
-; CHECK: sth 4, 62(1)
-; CHECK: stb 3, 55(1)
-; CHECK: lha {{[0-9]+}}, 62(1)
-; CHECK: lha {{[0-9]+}}, 68(1)
-; CHECK: lbz {{[0-9]+}}, 55(1)
-; CHECK: lwz {{[0-9]+}}, 76(1)
-; CHECK: lwz {{[0-9]+}}, 80(1)
-; CHECK: lwz {{[0-9]+}}, 88(1)
-; CHECK: lwz {{[0-9]+}}, 96(1)
+; CHECK-LABEL: callee1
+; CHECK-DAG: std 9, 96(1)
+; CHECK-DAG: std 8, 88(1)
+; CHECK-DAG: std 7, 80(1)
+; CHECK-DAG: stw 6, 76(1)
+; CHECK-DAG: stw 5, 68(1)
+; CHECK-DAG: sth 4, 62(1)
+; CHECK-DAG: stb 3, 55(1)
+; CHECK-DAG: lha {{[0-9]+}}, 62(1)
+; CHECK-DAG: lha {{[0-9]+}}, 68(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 55(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 76(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 80(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 88(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 96(1)
}
define i32 @caller2() nounwind {
@@ -139,6 +141,7 @@ entry:
%call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7)
ret i32 %call
+; CHECK-LABEL: caller2
; CHECK: stb {{[0-9]+}}, 71(1)
; CHECK: sth {{[0-9]+}}, 69(1)
; CHECK: stb {{[0-9]+}}, 87(1)
@@ -184,18 +187,19 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: std 9, 96(1)
-; CHECK: std 8, 88(1)
-; CHECK: std 7, 80(1)
-; CHECK: stw 6, 76(1)
-; CHECK: std 5, 64(1)
-; CHECK: sth 4, 62(1)
-; CHECK: stb 3, 55(1)
-; CHECK: lha {{[0-9]+}}, 62(1)
-; CHECK: lha {{[0-9]+}}, 69(1)
-; CHECK: lbz {{[0-9]+}}, 55(1)
-; CHECK: lwz {{[0-9]+}}, 76(1)
-; CHECK: lwz {{[0-9]+}}, 83(1)
-; CHECK: lwz {{[0-9]+}}, 90(1)
-; CHECK: lwz {{[0-9]+}}, 97(1)
+; CHECK-LABEL: callee2
+; CHECK-DAG: std 9, 96(1)
+; CHECK-DAG: std 8, 88(1)
+; CHECK-DAG: std 7, 80(1)
+; CHECK-DAG: stw 6, 76(1)
+; CHECK-DAG: std 5, 64(1)
+; CHECK-DAG: sth 4, 62(1)
+; CHECK-DAG: stb 3, 55(1)
+; CHECK-DAG: lha {{[0-9]+}}, 62(1)
+; CHECK-DAG: lha {{[0-9]+}}, 69(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 55(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 76(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 83(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 90(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 97(1)
}
diff --git a/test/CodeGen/PowerPC/subreg-postra-2.ll b/test/CodeGen/PowerPC/subreg-postra-2.ll
index fb33b9e35425..338000cd8bae 100644
--- a/test/CodeGen/PowerPC/subreg-postra-2.ll
+++ b/test/CodeGen/PowerPC/subreg-postra-2.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -31,10 +32,16 @@ while.end418: ; preds = %wait_on_buffer.exit
br i1 %tobool419, label %if.end421, label %if.then420
; CHECK-LABEL: @jbd2_journal_commit_transaction
+; CHECK-NO-ISEL-LABEL: @jbd2_journal_commit_transaction
; CHECK: andi.
; CHECK: crmove [[REG:[0-9]+]], 1
; CHECK: stdcx.
; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}, [[REG]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 4, 7, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 4, 3, 0
if.then420: ; preds = %while.end418
unreachable
diff --git a/test/CodeGen/PowerPC/subreg-postra.ll b/test/CodeGen/PowerPC/subreg-postra.ll
index 877ceccd918a..7557e4e9a467 100644
--- a/test/CodeGen/PowerPC/subreg-postra.ll
+++ b/test/CodeGen/PowerPC/subreg-postra.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -145,10 +146,15 @@ wait_on_buffer.exit1319: ; preds = %while.body392
br i1 %inp8, label %while.end418, label %while.body392
; CHECK-LABEL: @jbd2_journal_commit_transaction
+; CHECK-NO-ISEL-LABEL: @jbd2_journal_commit_transaction
; CHECK: andi.
; CHECK: crmove
; CHECK: stdcx.
; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}},
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 30, 3, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
+
while.end418: ; preds = %wait_on_buffer.exit1319, %do.body378
%err.4.lcssa = phi i32 [ %inp2, %do.body378 ], [ %.err.4, %wait_on_buffer.exit1319 ]
diff --git a/test/CodeGen/PowerPC/subtract_from_imm.ll b/test/CodeGen/PowerPC/subtract_from_imm.ll
new file mode 100644
index 000000000000..8fa07b671a3d
--- /dev/null
+++ b/test/CodeGen/PowerPC/subtract_from_imm.ll
@@ -0,0 +1,41 @@
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
+
+; Make sure that the subfic is generated iff possible
+
+define i64 @subtract_from_imm1(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm1
+; CHECK: subfic 3, 3, 32767
+; CHECK: blr
+ %sub = sub i64 32767, %v
+ ret i64 %sub
+}
+
+define i64 @subtract_from_imm2(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm2
+; CHECK-NOT: subfic
+; CHECK: blr
+ %sub = sub i64 32768, %v
+ ret i64 %sub
+}
+
+define i64 @subtract_from_imm3(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm3
+; CHECK: subfic 3, 3, -32768
+; CHECK: blr
+ %sub = sub i64 -32768, %v
+ ret i64 %sub
+}
+
+define i64 @subtract_from_imm4(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm4
+; CHECK-NOT: subfic
+; CHECK: blr
+ %sub = sub i64 -32769, %v
+ ret i64 %sub
+}
+
diff --git a/test/CodeGen/PowerPC/swaps-le-4.ll b/test/CodeGen/PowerPC/swaps-le-4.ll
index 87c6dac9630b..2bf684d9d614 100644
--- a/test/CodeGen/PowerPC/swaps-le-4.ll
+++ b/test/CodeGen/PowerPC/swaps-le-4.ll
@@ -8,11 +8,11 @@ define void @bar() {
entry:
%x = alloca <2 x i64>, align 16
%0 = bitcast <2 x i64>* %x to i8*
- call void @llvm.lifetime.start(i64 16, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
%arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %x, i64 0, i64 0
store <2 x i64> <i64 0, i64 1>, <2 x i64>* %x, align 16
call void @foo(i64* %arrayidx)
- call void @llvm.lifetime.end(i64 16, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
ret void
}
@@ -21,7 +21,7 @@ entry:
; CHECK: stxvd2x
; CHECK-NOT: xxswapd
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo(i64*)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
diff --git a/test/CodeGen/PowerPC/swaps-le-7.ll b/test/CodeGen/PowerPC/swaps-le-7.ll
index dc3c49730700..1d5f50da398e 100644
--- a/test/CodeGen/PowerPC/swaps-le-7.ll
+++ b/test/CodeGen/PowerPC/swaps-le-7.ll
@@ -11,11 +11,11 @@
; CHECK-LABEL: @zg
; CHECK: xxspltd
; CHECK-NEXT: xxspltd
-; CHECK-NEXT: xxswapd
; CHECK-NEXT: xvmuldp
; CHECK-NEXT: xvmuldp
; CHECK-NEXT: xvsubdp
; CHECK-NEXT: xvadddp
+; CHECK-NEXT: xxswapd
; CHECK-NEXT: xxpermdi
; CHECK-NEXT: xvsubdp
; CHECK-NEXT: xxswapd
@@ -52,4 +52,4 @@ L.JA291:
ret void
}
-attributes #0 = { noinline } \ No newline at end of file
+attributes #0 = { noinline }
diff --git a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
index 5d03af801fc6..0b1014571613 100644
--- a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
+++ b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
@@ -3,7 +3,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
declare void @f1()
declare void @f2()
@@ -54,11 +54,11 @@ if.else: ; preds = %sw.default
br label %dup2
dup1: ; preds = %sw.0, %sw.1
- call void @llvm.lifetime.end(i64 8, i8* nonnull undef) #0
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
unreachable
dup2: ; preds = %if.then, %if.else
- call void @llvm.lifetime.end(i64 8, i8* nonnull undef) #0
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
unreachable
}
diff --git a/test/CodeGen/PowerPC/tail-dup-break-cfg.ll b/test/CodeGen/PowerPC/tail-dup-break-cfg.ll
new file mode 100644
index 000000000000..f19b11f2ae4c
--- /dev/null
+++ b/test/CodeGen/PowerPC/tail-dup-break-cfg.ll
@@ -0,0 +1,140 @@
+; RUN: llc -O2 -o - %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-grtev4-linux-gnu"
+
+; Intended layout:
+; The code for tail-duplication during layout will produce the layout:
+; test1
+; test2
+; body1 (with copy of test2)
+; body2
+; exit
+
+;CHECK-LABEL: tail_dup_break_cfg:
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 12, 1, [[BODY1LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: bne 0, [[BODY2LABEL:[._0-9A-Za-z]+]]
+;CHECK: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
+;CHECK: blr
+;CHECK-NEXT: [[BODY1LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, [[EXITLABEL]]
+;CHECK-NEXT: [[BODY2LABEL:[._0-9A-Za-z]+]]:
+;CHECK: b [[EXITLABEL]]
+define void @tail_dup_break_cfg(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %body1, !prof !1 ; %test2 more likely
+body1:
+ call void @a()
+ call void @a()
+ call void @a()
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %exit, label %body2, !prof !1 ; %exit more likely
+body2:
+ call void @b()
+ call void @b()
+ call void @b()
+ call void @b()
+ br label %exit
+exit:
+ ret void
+}
+
+; The branch weights here hint that we shouldn't tail duplicate in this case.
+;CHECK-LABEL: tail_dup_dont_break_cfg:
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 4, 1, [[TEST2LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %body1
+;CHECK: [[TEST2LABEL]]: # %test2
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, [[EXITLABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %body2
+;CHECK: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
+;CHECK: blr
+define void @tail_dup_dont_break_cfg(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %body1, !prof !1 ; %test2 more likely
+body1:
+ call void @a()
+ call void @a()
+ call void @a()
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp ne i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %body2, label %exit, !prof !3 ; %body2 more likely
+body2:
+ call void @b()
+ call void @b()
+ call void @b()
+ call void @b()
+ br label %exit
+exit:
+ ret void
+}
+declare void @a()
+declare void @b()
+declare void @c()
+declare void @d()
+
+; This function arranges for the successors of %succ to have already been laid
+; out. When we consider whether to lay out succ after bb and to tail-duplicate
+; it, v and ret have already been placed, so we tail-duplicate as it removes a
+; branch and strictly increases fallthrough
+; CHECK-LABEL: tail_dup_no_succ
+; CHECK: # %entry
+; CHECK: # %v
+; CHECK: # %ret
+; CHECK: # %bb
+; CHECK: # %succ
+; CHECK: # %c
+; CHECK: bl c
+; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
+; CHECK: beq
+; CHECK: b
+define void @tail_dup_no_succ(i32 %tag) {
+entry:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %v, label %bb, !prof !2 ; %v very much more likely
+bb:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %succ, label %c, !prof !3 ; %succ more likely
+c:
+ call void @c()
+ call void @c()
+ br label %succ
+succ:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %ret, label %v, !prof !1 ; %u more likely
+v:
+ call void @d()
+ call void @d()
+ br label %ret
+ret:
+ ret void
+}
+
+
+!1 = !{!"branch_weights", i32 5, i32 3}
+!2 = !{!"branch_weights", i32 95, i32 5}
+!3 = !{!"branch_weights", i32 8, i32 3}
diff --git a/test/CodeGen/PowerPC/tail-dup-layout.ll b/test/CodeGen/PowerPC/tail-dup-layout.ll
index 6790aa8e9441..c9b5bf8c9eeb 100644
--- a/test/CodeGen/PowerPC/tail-dup-layout.ll
+++ b/test/CodeGen/PowerPC/tail-dup-layout.ll
@@ -1,59 +1,59 @@
-; RUN: llc -outline-optional-branches -O2 < %s | FileCheck %s
+; RUN: llc -O2 < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-grtev4-linux-gnu"
; Intended layout:
-; The outlining flag produces the layout
+; The chain-based outlining produces the layout
; test1
; test2
; test3
; test4
-; exit
; optional1
; optional2
; optional3
; optional4
+; exit
; Tail duplication puts test n+1 at the end of optional n
; so optional1 includes a copy of test2 at the end, and branches
; to test3 (at the top) or falls through to optional 2.
-; The CHECK statements check for the whole string of tests and exit block,
+; The CHECK statements check for the whole string of tests
; and then check that the correct test has been duplicated into the end of
; the optional blocks and that the optional blocks are in the correct order.
-;CHECK-LABEL: f:
+;CHECK-LABEL: straight_test:
; test1 may have been merged with entry
;CHECK: mr [[TAGREG:[0-9]+]], 3
;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
-;CHECK-NEXT: bc 12, 1, [[OPT1LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST2LABEL:[._0-9A-Za-z]+]]: # %test2
+;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
-;CHECK-NEXT: bne 0, [[OPT2LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST3LABEL:[._0-9A-Za-z]+]]: # %test3
+;CHECK-NEXT: bne 0, .[[OPT2LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST3LABEL:[_0-9A-Za-z]+]]: # %test3
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
-;CHECK-NEXT: bne 0, .[[OPT3LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST4LABEL:[._0-9A-Za-z]+]]: # %test4
+;CHECK-NEXT: bne 0, .[[OPT3LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST4LABEL:[_0-9A-Za-z]+]]: # %test4
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
-;CHECK-NEXT: bne 0, .[[OPT4LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
+;CHECK-NEXT: bne 0, .[[OPT4LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[EXITLABEL:[_0-9A-Za-z]+]]: # %exit
;CHECK: blr
-;CHECK-NEXT: [[OPT1LABEL]]
+;CHECK-NEXT: .[[OPT1LABEL]]:
;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
-;CHECK-NEXT: beq 0, [[TEST3LABEL]]
-;CHECK-NEXT: [[OPT2LABEL]]
+;CHECK-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-NEXT: .[[OPT2LABEL]]:
;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
-;CHECK-NEXT: beq 0, [[TEST4LABEL]]
-;CHECK-NEXT: [[OPT3LABEL]]
+;CHECK-NEXT: beq 0, .[[TEST4LABEL]]
+;CHECK-NEXT: .[[OPT3LABEL]]:
;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
-;CHECK-NEXT: beq 0, [[EXITLABEL]]
-;CHECK-NEXT: [[OPT4LABEL]]
-;CHECK: b [[EXITLABEL]]
+;CHECK-NEXT: beq 0, .[[EXITLABEL]]
+;CHECK-NEXT: .[[OPT4LABEL]]:
+;CHECK: b .[[EXITLABEL]]
-define void @f(i32 %tag) {
+define void @straight_test(i32 %tag) {
entry:
br label %test1
test1:
%tagbit1 = and i32 %tag, 1
%tagbit1eq0 = icmp eq i32 %tagbit1, 0
- br i1 %tagbit1eq0, label %test2, label %optional1
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !1
optional1:
call void @a()
call void @a()
@@ -63,7 +63,7 @@ optional1:
test2:
%tagbit2 = and i32 %tag, 2
%tagbit2eq0 = icmp eq i32 %tagbit2, 0
- br i1 %tagbit2eq0, label %test3, label %optional2
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !1
optional2:
call void @b()
call void @b()
@@ -73,7 +73,7 @@ optional2:
test3:
%tagbit3 = and i32 %tag, 4
%tagbit3eq0 = icmp eq i32 %tagbit3, 0
- br i1 %tagbit3eq0, label %test4, label %optional3
+ br i1 %tagbit3eq0, label %test4, label %optional3, !prof !1
optional3:
call void @c()
call void @c()
@@ -83,7 +83,7 @@ optional3:
test4:
%tagbit4 = and i32 %tag, 8
%tagbit4eq0 = icmp eq i32 %tagbit4, 0
- br i1 %tagbit4eq0, label %exit, label %optional4
+ br i1 %tagbit4eq0, label %exit, label %optional4, !prof !1
optional4:
call void @d()
call void @d()
@@ -94,7 +94,449 @@ exit:
ret void
}
+; Intended layout:
+; The chain-of-triangles based duplicating produces the layout
+; test1
+; test2
+; test3
+; test4
+; optional1
+; optional2
+; optional3
+; optional4
+; exit
+; even for 50/50 branches.
+; Tail duplication puts test n+1 at the end of optional n
+; so optional1 includes a copy of test2 at the end, and branches
+; to test3 (at the top) or falls through to optional 2.
+; The CHECK statements check for the whole string of tests
+; and then check that the correct test has been duplicated into the end of
+; the optional blocks and that the optional blocks are in the correct order.
+;CHECK-LABEL: straight_test_50:
+; test1 may have been merged with entry
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: bne 0, .[[OPT2LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST3LABEL:[_0-9A-Za-z]+]]: # %test3
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: bne 0, .[[OPT3LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[EXITLABEL:[_0-9A-Za-z]+]]: # %exit
+;CHECK: blr
+;CHECK-NEXT: .[[OPT1LABEL]]:
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-NEXT: .[[OPT2LABEL]]:
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: beq 0, .[[EXITLABEL]]
+;CHECK-NEXT: .[[OPT3LABEL]]:
+;CHECK: b .[[EXITLABEL]]
+
+define void @straight_test_50(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !2
+optional1:
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !2
+optional2:
+ call void @b()
+ br label %test3
+test3:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %exit, label %optional3, !prof !1
+optional3:
+ call void @c()
+ br label %exit
+exit:
+ ret void
+}
+
+; Intended layout:
+; The chain-based outlining produces the layout
+; entry
+; --- Begin loop ---
+; for.latch
+; for.check
+; test1
+; test2
+; test3
+; test4
+; optional1
+; optional2
+; optional3
+; optional4
+; --- End loop ---
+; exit
+; The CHECK statements check for the whole string of tests and exit block,
+; and then check that the correct test has been duplicated into the end of
+; the optional blocks and that the optional blocks are in the correct order.
+;CHECK-LABEL: loop_test:
+;CHECK: add [[TAGPTRREG:[0-9]+]], 3, 4
+;CHECK: .[[LATCHLABEL:[._0-9A-Za-z]+]]: # %for.latch
+;CHECK: addi
+;CHECK: .[[CHECKLABEL:[._0-9A-Za-z]+]]: # %for.check
+;CHECK: lwz [[TAGREG:[0-9]+]], 0([[TAGPTRREG]])
+;CHECK: # %test1
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: bne 0, .[[OPT2LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST3LABEL:[._0-9A-Za-z]+]]: # %test3
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: bne 0, .[[OPT3LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST4LABEL:[._0-9A-Za-z]+]]: # %{{(test4|optional3)}}
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
+;CHECK-NEXT: beq 0, .[[LATCHLABEL]]
+;CHECK-NEXT: b .[[OPT4LABEL:[._0-9A-Za-z]+]]
+;CHECK: [[OPT1LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-NEXT: .[[OPT2LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: beq 0, .[[TEST4LABEL]]
+;CHECK-NEXT: .[[OPT3LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
+;CHECK-NEXT: beq 0, .[[LATCHLABEL]]
+;CHECK: [[OPT4LABEL]]:
+;CHECK: b .[[LATCHLABEL]]
+define void @loop_test(i32* %tags, i32 %count) {
+entry:
+ br label %for.check
+for.check:
+ %count.loop = phi i32 [%count, %entry], [%count.sub, %for.latch]
+ %done.count = icmp ugt i32 %count.loop, 0
+ %tag_ptr = getelementptr inbounds i32, i32* %tags, i32 %count
+ %tag = load i32, i32* %tag_ptr
+ %done.tag = icmp eq i32 %tag, 0
+ %done = and i1 %done.count, %done.tag
+ br i1 %done, label %test1, label %exit, !prof !1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !1
+optional1:
+ call void @a()
+ call void @a()
+ call void @a()
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !1
+optional2:
+ call void @b()
+ call void @b()
+ call void @b()
+ call void @b()
+ br label %test3
+test3:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %test4, label %optional3, !prof !1
+optional3:
+ call void @c()
+ call void @c()
+ call void @c()
+ call void @c()
+ br label %test4
+test4:
+ %tagbit4 = and i32 %tag, 8
+ %tagbit4eq0 = icmp eq i32 %tagbit4, 0
+ br i1 %tagbit4eq0, label %for.latch, label %optional4, !prof !1
+optional4:
+ call void @d()
+ call void @d()
+ call void @d()
+ call void @d()
+ br label %for.latch
+for.latch:
+ %count.sub = sub i32 %count.loop, 1
+ br label %for.check
+exit:
+ ret void
+}
+
+; The block then2 is not unavoidable, meaning it does not dominate the exit.
+; But since it can be tail-duplicated, it should be placed as a fallthrough from
+; test2 and copied. The purpose here is to make sure that the tail-duplication
+; code is independent of the outlining code, which works by choosing the
+; "unavoidable" blocks.
+; CHECK-LABEL: avoidable_test:
+; CHECK: # %entry
+; CHECK: andi.
+; CHECK: # %test2
+; Make sure then2 falls through from test2
+; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
+; CHECK: # %then2
+; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
+; CHECK: # %else1
+; CHECK: bl a
+; CHECK: bl a
+; Make sure then2 was copied into else1
+; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
+; CHECK: # %end1
+; CHECK: bl d
+; CHECK: # %else2
+; CHECK: bl c
+; CHECK: # %end2
+define void @avoidable_test(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %else1, !prof !1 ; %test2 more likely
+else1:
+ call void @a()
+ call void @a()
+ br label %then2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %then2, label %else2, !prof !1 ; %then2 more likely
+then2:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %end2, label %end1, !prof !1 ; %end2 more likely
+else2:
+ call void @c()
+ br label %end2
+end2:
+ ret void
+end1:
+ call void @d()
+ ret void
+}
+
+; CHECK-LABEL: trellis_test
+; The number in the block labels is the expected block frequency given the
+; probabilities annotated. There is a conflict in the b;c->d;e trellis that
+; should be resolved as c->e;b->d.
+; The d;e->f;g trellis should be resolved as e->g;d->f.
+; The f;g->h;i trellis should be resolved as f->i;g->h.
+; The h;i->j;ret trellis contains a triangle edge, and should be resolved as
+; h->j->ret
+; CHECK: # %entry
+; CHECK: # %c10
+; CHECK: # %e9
+; CHECK: # %g10
+; CHECK: # %h10
+; CHECK: # %j8
+; CHECK: # %ret
+; CHECK: # %b6
+; CHECK: # %d7
+; CHECK: # %f6
+; CHECK: # %i6
+define void @trellis_test(i32 %tag) {
+entry:
+ br label %a16
+a16:
+ call void @a()
+ call void @a()
+ %tagbits.a = and i32 %tag, 3
+ %tagbits.a.eq0 = icmp eq i32 %tagbits.a, 0
+ br i1 %tagbits.a.eq0, label %c10, label %b6, !prof !1 ; 10 to 6
+c10:
+ call void @c()
+ call void @c()
+ %tagbits.c = and i32 %tag, 12
+ %tagbits.c.eq0 = icmp eq i32 %tagbits.c, 0
+ ; Both of these edges should be hotter than the other incoming edge
+ ; for e9 or d7
+ br i1 %tagbits.c.eq0, label %e9, label %d7, !prof !3 ; 6 to 4
+e9:
+ call void @e()
+ call void @e()
+ %tagbits.e = and i32 %tag, 48
+ %tagbits.e.eq0 = icmp eq i32 %tagbits.e, 0
+ br i1 %tagbits.e.eq0, label %g10, label %f6, !prof !4 ; 7 to 2
+g10:
+ call void @g()
+ call void @g()
+ %tagbits.g = and i32 %tag, 192
+ %tagbits.g.eq0 = icmp eq i32 %tagbits.g, 0
+ br i1 %tagbits.g.eq0, label %i6, label %h10, !prof !5 ; 2 to 8
+i6:
+ call void @i()
+ call void @i()
+ %tagbits.i = and i32 %tag, 768
+ %tagbits.i.eq0 = icmp eq i32 %tagbits.i, 0
+ br i1 %tagbits.i.eq0, label %ret, label %j8, !prof !2 ; balanced (3 to 3)
+b6:
+ call void @b()
+ call void @b()
+ %tagbits.b = and i32 %tag, 12
+ %tagbits.b.eq1 = icmp eq i32 %tagbits.b, 8
+ br i1 %tagbits.b.eq1, label %e9, label %d7, !prof !2 ; balanced (3 to 3)
+d7:
+ call void @d()
+ call void @d()
+ %tagbits.d = and i32 %tag, 48
+ %tagbits.d.eq1 = icmp eq i32 %tagbits.d, 32
+ br i1 %tagbits.d.eq1, label %g10, label %f6, !prof !6 ; 3 to 4
+f6:
+ call void @f()
+ call void @f()
+ %tagbits.f = and i32 %tag, 192
+ %tagbits.f.eq1 = icmp eq i32 %tagbits.f, 128
+ br i1 %tagbits.f.eq1, label %i6, label %h10, !prof !7 ; 4 to 2
+h10:
+ call void @h()
+ call void @h()
+ %tagbits.h = and i32 %tag, 768
+ %tagbits.h.eq1 = icmp eq i32 %tagbits.h, 512
+ br i1 %tagbits.h.eq1, label %ret, label %j8, !prof !2 ; balanced (5 to 5)
+j8:
+ call void @j()
+ call void @j()
+ br label %ret
+ret:
+ ret void
+}
+
+; Verify that we still consider tail-duplication opportunities if we find a
+; triangle trellis. Here D->F->G is the triangle, and D;E are both predecessors
+; of both F and G. The basic trellis algorithm picks the F->G edge, but after
+; checking, it's profitable to duplicate G into F. The weights here are not
+; really important. They are there to help make the test stable.
+; CHECK-LABEL: trellis_then_dup_test
+; CHECK: # %entry
+; CHECK: # %b
+; CHECK: # %d
+; CHECK: # %g
+; CHECK: # %ret1
+; CHECK: # %c
+; CHECK: # %e
+; CHECK: # %f
+; CHECK: # %ret2
+; CHECK: # %ret
+define void @trellis_then_dup_test(i32 %tag) {
+entry:
+ br label %a
+a:
+ call void @a()
+ call void @a()
+ %tagbits.a = and i32 %tag, 3
+ %tagbits.a.eq0 = icmp eq i32 %tagbits.a, 0
+ br i1 %tagbits.a.eq0, label %b, label %c, !prof !1 ; 5 to 3
+b:
+ call void @b()
+ call void @b()
+ %tagbits.b = and i32 %tag, 12
+ %tagbits.b.eq1 = icmp eq i32 %tagbits.b, 8
+ br i1 %tagbits.b.eq1, label %d, label %e, !prof !1 ; 5 to 3
+d:
+ call void @d()
+ call void @d()
+ %tagbits.d = and i32 %tag, 48
+ %tagbits.d.eq1 = icmp eq i32 %tagbits.d, 32
+ br i1 %tagbits.d.eq1, label %g, label %f, !prof !1 ; 5 to 3
+f:
+ call void @f()
+ call void @f()
+ br label %g
+g:
+ %tagbits.g = and i32 %tag, 192
+ %tagbits.g.eq0 = icmp eq i32 %tagbits.g, 0
+ br i1 %tagbits.g.eq0, label %ret1, label %ret2, !prof !2 ; balanced
+c:
+ call void @c()
+ call void @c()
+ %tagbits.c = and i32 %tag, 12
+ %tagbits.c.eq0 = icmp eq i32 %tagbits.c, 0
+ br i1 %tagbits.c.eq0, label %d, label %e, !prof !1 ; 5 to 3
+e:
+ call void @e()
+ call void @e()
+ %tagbits.e = and i32 %tag, 48
+ %tagbits.e.eq0 = icmp eq i32 %tagbits.e, 0
+ br i1 %tagbits.e.eq0, label %g, label %f, !prof !1 ; 5 to 3
+ret1:
+ call void @a()
+ br label %ret
+ret2:
+ call void @b()
+ br label %ret
+ret:
+ ret void
+}
+
+; Verify that we did not mis-identify triangle trellises if it is not
+; really a triangle.
+; CHECK-LABEL: trellis_no_triangle
+; CHECK: # %entry
+; CHECK: # %b
+; CHECK: # %d
+; CHECK: # %ret
+; CHECK: # %c
+; CHECK: # %e
+define void @trellis_no_triangle(i32 %tag) {
+entry:
+ br label %a
+a:
+ call void @a()
+ call void @a()
+ %tagbits.a = and i32 %tag, 3
+ %tagbits.a.eq0 = icmp eq i32 %tagbits.a, 0
+ br i1 %tagbits.a.eq0, label %b, label %c, !prof !8 ; 98 to 2
+b:
+ call void @b()
+ call void @b()
+ %tagbits.b = and i32 %tag, 12
+ %tagbits.b.eq1 = icmp eq i32 %tagbits.b, 8
+ br i1 %tagbits.b.eq1, label %d, label %e, !prof !9 ; 97 to 1
+d:
+ call void @d()
+ call void @d()
+ %tagbits.d = and i32 %tag, 48
+ %tagbits.d.eq1 = icmp eq i32 %tagbits.d, 32
+ br i1 %tagbits.d.eq1, label %ret, label %e, !prof !10 ; 96 to 2
+c:
+ call void @c()
+ call void @c()
+ %tagbits.c = and i32 %tag, 12
+ %tagbits.c.eq0 = icmp eq i32 %tagbits.c, 0
+ br i1 %tagbits.c.eq0, label %d, label %e, !prof !2 ; 1 to 1
+e:
+ call void @e()
+ call void @e()
+ br label %ret
+ret:
+ call void @f()
+ ret void
+}
+
declare void @a()
declare void @b()
declare void @c()
declare void @d()
+declare void @e()
+declare void @f()
+declare void @g()
+declare void @h()
+declare void @i()
+declare void @j()
+
+!1 = !{!"branch_weights", i32 5, i32 3}
+!2 = !{!"branch_weights", i32 50, i32 50}
+!3 = !{!"branch_weights", i32 6, i32 4}
+!4 = !{!"branch_weights", i32 7, i32 2}
+!5 = !{!"branch_weights", i32 2, i32 8}
+!6 = !{!"branch_weights", i32 3, i32 4}
+!7 = !{!"branch_weights", i32 4, i32 2}
+!8 = !{!"branch_weights", i32 98, i32 2}
+!9 = !{!"branch_weights", i32 97, i32 1}
+!10 = !{!"branch_weights", i32 96, i32 2}
diff --git a/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
index e83124cbb990..21ccbf6f1ead 100644
--- a/test/CodeGen/PowerPC/toc-load-sched-bug.ll
+++ b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
@@ -223,7 +223,7 @@ if.then: ; preds = %_ZNK4llvm7ErrorOrIS
%10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
%11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
call void @llvm.memset.p0i8.i64(i8* %11, i8 0, i64 16, i32 8, i1 false) #3
- call void @llvm.lifetime.start(i64 1, i8* %10) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %10) #3
%tobool.i.i4.i = icmp eq i8* %4, null
br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i
@@ -237,7 +237,7 @@ if.end.i.i8.i: ; preds = %if.then
br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
_ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.then.i.i6.i
- call void @llvm.lifetime.end(i64 1, i8* %10) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %10) #3
%LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
store i32 -1, i32* %LineNo.i, align 8, !tbaa !14
%ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
@@ -246,7 +246,7 @@ _ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.
store i32 0, i32* %Kind.i, align 8, !tbaa !22
%Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
%12 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %12) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %12) #3
%tobool.i.i.i = icmp eq i8* %8, null
br i1 %tobool.i.i.i, label %if.then.i.i.i, label %if.end.i.i.i
@@ -260,7 +260,7 @@ if.end.i.i.i: ; preds = %_ZNK4llvm9StringRef
br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds = %if.then.i.i.i, %if.end.i.i.i
- call void @llvm.lifetime.end(i64 1, i8* %12) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %12) #3
%_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
%Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
@@ -320,7 +320,7 @@ _ZN4llvm12SMDiagnosticaSEOS0_.exit: ; preds = %_ZN4llvm12SMDiagnos
%call2.i.i42 = call dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %24, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %25) #3
call void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* %ref.tmp) #3
%26 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %26) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %26) #3
%27 = bitcast i8* %arrayidx.i.i.i36 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
%cmp.i.i.i = icmp eq i8* %arrayidx.i.i.i36, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
br i1 %cmp.i.i.i, label %_ZNSsD1Ev.exit, label %if.then.i.i.i45, !prof !28
@@ -332,11 +332,11 @@ if.then.i.i.i45: ; preds = %_ZN4llvm12SMDiagnos
if.then.i.i.i.i: ; preds = %if.then.i.i.i45
%.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i to i8*
- call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
%29 = atomicrmw volatile add i32* %28, i32 -1 acq_rel
store i32 %29, i32* %.atomicdst.i.i.i.i.i, align 4
%.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32, i32* %.atomicdst.i.i.i.i.i, align 4
- call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
if.else.i.i.i.i: ; preds = %if.then.i.i.i45
@@ -355,9 +355,9 @@ if.then4.i.i.i: ; preds = %_ZN9__gnu_cxxL27__e
br label %_ZNSsD1Ev.exit
_ZNSsD1Ev.exit: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i, %if.then4.i.i.i
- call void @llvm.lifetime.end(i64 1, i8* %26) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %26) #3
%31 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %31) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %31) #3
%_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
%32 = load i8*, i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
%arrayidx.i.i.i49 = getelementptr inbounds i8, i8* %32, i64 -24
@@ -372,11 +372,11 @@ if.then.i.i.i52: ; preds = %_ZNSsD1Ev.exit
if.then.i.i.i.i55: ; preds = %if.then.i.i.i52
%.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i46 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
%35 = atomicrmw volatile add i32* %34, i32 -1 acq_rel
store i32 %35, i32* %.atomicdst.i.i.i.i.i46, align 4
%.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32, i32* %.atomicdst.i.i.i.i.i46, align 4
- call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
if.else.i.i.i.i57: ; preds = %if.then.i.i.i52
@@ -395,7 +395,7 @@ if.then4.i.i.i61: ; preds = %_ZN9__gnu_cxxL27__e
br label %_ZNSsD1Ev.exit62
_ZNSsD1Ev.exit62: ; preds = %_ZNSsD1Ev.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60, %if.then4.i.i.i61
- call void @llvm.lifetime.end(i64 1, i8* %31) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %31) #3
br label %cleanup
cond.false.i.i: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
@@ -438,10 +438,10 @@ _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #3
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #3
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #3
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #3
; Function Attrs: noreturn nounwind
declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) #4
diff --git a/test/CodeGen/PowerPC/vec_absd.ll b/test/CodeGen/PowerPC/vec_absd.ll
index 37a3a5c94a33..268587bb2eaf 100644
--- a/test/CodeGen/PowerPC/vec_absd.ll
+++ b/test/CodeGen/PowerPC/vec_absd.ll
@@ -18,7 +18,7 @@ entry:
ret <16 x i8> %res
; CHECK-LABEL: @test_byte
; CHECK: vabsdub 2, 2, 3
-; CHECK blr
+; CHECK: blr
}
define <8 x i16> @test_half(<8 x i16> %a, <8 x i16> %b) {
@@ -27,7 +27,7 @@ entry:
ret <8 x i16> %res
; CHECK-LABEL: @test_half
; CHECK: vabsduh 2, 2, 3
-; CHECK blr
+; CHECK: blr
}
define <4 x i32> @test_word(<4 x i32> %a, <4 x i32> %b) {
diff --git a/test/CodeGen/PowerPC/vec_cmp.ll b/test/CodeGen/PowerPC/vec_cmp.ll
index 0eaac554aa4d..88de9a17d91e 100644
--- a/test/CodeGen/PowerPC/vec_cmp.ll
+++ b/test/CodeGen/PowerPC/vec_cmp.ll
@@ -54,7 +54,7 @@ entry:
}
; CHECK-LABEL: v16si8_cmp_ne:
; CHECK: vcmpequb [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16si8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -64,7 +64,7 @@ entry:
}
; CHECK-LABEL: v16si8_cmp_le:
; CHECK: vcmpgtsb [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16ui8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -74,7 +74,7 @@ entry:
}
; CHECK-LABEL: v16ui8_cmp_le:
; CHECK: vcmpgtub [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16si8_cmp_lt(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -120,7 +120,7 @@ entry:
}
; CHECK-LABEL: v16si8_cmp_ge:
; CHECK: vcmpgtsb [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16ui8_cmp_ge(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -130,7 +130,7 @@ entry:
}
; CHECK-LABEL: v16ui8_cmp_ge:
; CHECK: vcmpgtub [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <32 x i8> @v32si8_cmp(<32 x i8> %x, <32 x i8> %y) nounwind readnone {
@@ -180,7 +180,7 @@ entry:
}
; CHECK-LABEL: v8si16_cmp_ne:
; CHECK: vcmpequh [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8si16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -190,7 +190,7 @@ entry:
}
; CHECK-LABEL: v8si16_cmp_le:
; CHECK: vcmpgtsh [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8ui16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -200,7 +200,7 @@ entry:
}
; CHECK-LABEL: v8ui16_cmp_le:
; CHECK: vcmpgtuh [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8si16_cmp_lt(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -246,7 +246,7 @@ entry:
}
; CHECK-LABEL: v8si16_cmp_ge:
; CHECK: vcmpgtsh [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8ui16_cmp_ge(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -256,7 +256,7 @@ entry:
}
; CHECK-LABEL: v8ui16_cmp_ge:
; CHECK: vcmpgtuh [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i16> @v16si16_cmp(<16 x i16> %x, <16 x i16> %y) nounwind readnone {
@@ -309,7 +309,7 @@ entry:
}
; CHECK-LABEL: v4si32_cmp_ne:
; CHECK: vcmpequw [[RCMP:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RCMP]], [[RCMP]]
+; CHECK-NEXT: vnot 2, [[RCMP]]
define <4 x i32> @v4si32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -319,7 +319,7 @@ entry:
}
; CHECK-LABEL: v4si32_cmp_le:
; CHECK: vcmpgtsw [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x i32> @v4ui32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -329,7 +329,7 @@ entry:
}
; CHECK-LABEL: v4ui32_cmp_le:
; CHECK: vcmpgtuw [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x i32> @v4si32_cmp_lt(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -375,7 +375,7 @@ entry:
}
; CHECK-LABEL: v4si32_cmp_ge:
; CHECK: vcmpgtsw [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x i32> @v4ui32_cmp_ge(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -385,7 +385,7 @@ entry:
}
; CHECK-LABEL: v4ui32_cmp_ge:
; CHECK: vcmpgtuw [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i32> @v8si32_cmp(<8 x i32> %x, <8 x i32> %y) nounwind readnone {
@@ -458,7 +458,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ne:
; CHECK: vcmpeqfp [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_le(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -509,7 +509,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ule:
; CHECK: vcmpgtfp [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_ult(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -520,7 +520,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ult:
; CHECK: vcmpgefp [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_uge(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -531,7 +531,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_uge:
; CHECK: vcmpgtfp [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_ugt(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -542,7 +542,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ugt:
; CHECK: vcmpgefp [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x float> @v8f32_cmp(<8 x float> %x, <8 x float> %y) nounwind readnone {
diff --git a/test/CodeGen/PowerPC/vsx-args.ll b/test/CodeGen/PowerPC/vsx-args.ll
index 252f9b360b96..7fa31aea84ba 100644
--- a/test/CodeGen/PowerPC/vsx-args.ll
+++ b/test/CodeGen/PowerPC/vsx-args.ll
@@ -13,10 +13,10 @@ entry:
ret <2 x double> %v
; CHECK-LABEL: @main
-; CHECK-DAG: vor [[V:[0-9]+]], 2, 2
-; CHECK-DAG: vor 2, 3, 3
-; CHECK-DAG: vor 3, 4, 4
-; CHECK-DAG: vor 4, [[V]], [[V]]
+; CHECK-DAG: vmr [[V:[0-9]+]], 2
+; CHECK-DAG: vmr 2, 3
+; CHECK-DAG: vmr 3, 4
+; CHECK-DAG: vmr 4, [[V]]
; CHECK: bl sv
; CHECK: lxvd2x [[VC:[0-9]+]],
; CHECK: xvadddp 34, 34, [[VC]]
@@ -24,8 +24,8 @@ entry:
; CHECK-FISL-LABEL: @main
; CHECK-FISL: stxvd2x 34
-; CHECK-FISL: vor 2, 3, 3
-; CHECK-FISL: vor 3, 4, 4
+; CHECK-FISL: vmr 2, 3
+; CHECK-FISL: vmr 3, 4
; CHECK-FISL: lxvd2x 36
; CHECK-FISL: bl sv
; CHECK-FISL: lxvd2x [[VC:[0-9]+]],
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy1.ll b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
index 592f85e2bcaf..1d6718279a0d 100644
--- a/test/CodeGen/PowerPC/vsx-infl-copy1.ll
+++ b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
@@ -11,15 +11,15 @@ entry:
br label %vector.body
; CHECK-LABEL: @_Z8example9Pj
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
diff --git a/test/CodeGen/PowerPC/vsx-p9.ll b/test/CodeGen/PowerPC/vsx-p9.ll
index e8a0a3bcf92a..ba359501ccc5 100644
--- a/test/CodeGen/PowerPC/vsx-p9.ll
+++ b/test/CodeGen/PowerPC/vsx-p9.ll
@@ -277,8 +277,8 @@ entry:
%0 = tail call <2 x i64> @llvm.ppc.vsx.xvxexpdp(<2 x double> %a)
ret <2 x i64> %0
; CHECK-LABEL: testXVXEXPDP
-; CHECK xvxexpdp 34, 34
-; CHECK blr
+; CHECK: xvxexpdp 34, 34
+; CHECK: blr
}
; Function Attrs: nounwind readnone
declare <2 x i64>@llvm.ppc.vsx.xvxexpdp(<2 x double>)
@@ -289,8 +289,8 @@ entry:
%0 = tail call <4 x i32> @llvm.ppc.vsx.xvxsigsp(<4 x float> %a)
ret <4 x i32> %0
; CHECK-LABEL: testXVXSIGSP
-; CHECK xvxsigsp 34, 34
-; CHECK blr
+; CHECK: xvxsigsp 34, 34
+; CHECK: blr
}
; Function Attrs: nounwind readnone
declare <4 x i32> @llvm.ppc.vsx.xvxsigsp(<4 x float>)
@@ -301,8 +301,8 @@ entry:
%0 = tail call <2 x i64> @llvm.ppc.vsx.xvxsigdp(<2 x double> %a)
ret <2 x i64> %0
; CHECK-LABEL: testXVXSIGDP
-; CHECK xvxsigdp 34, 34
-; CHECK blr
+; CHECK: xvxsigdp 34, 34
+; CHECK: blr
}
; Function Attrs: nounwind readnone
declare <2 x i64> @llvm.ppc.vsx.xvxsigdp(<2 x double>)
diff --git a/test/CodeGen/SPARC/mature-mc-support.ll b/test/CodeGen/SPARC/mature-mc-support.ll
index 4ed33098051d..3951ddd604c4 100644
--- a/test/CodeGen/SPARC/mature-mc-support.ll
+++ b/test/CodeGen/SPARC/mature-mc-support.ll
@@ -17,4 +17,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/SPARC/register-clobber.ll b/test/CodeGen/SPARC/register-clobber.ll
new file mode 100644
index 000000000000..0ea36df6d015
--- /dev/null
+++ b/test/CodeGen/SPARC/register-clobber.ll
@@ -0,0 +1,35 @@
+; RUN: llc -march=sparc < %s | FileCheck %s
+
+;; Verify that g1 (the output of first asm) is properly understood to
+;; be clobbered by the call instruction, and moved out of the way
+;; before it. (NOTE: remember delay slot; mov executes before call)
+
+; CHECK-LABEL: test1:
+; CHECK: ta 9
+; CHECK: call dosomething
+; CHECK: mov %g1, %i0
+
+define i32 @test1() nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ta $1", "={r1},i"(i32 9) nounwind
+ tail call void @dosomething() nounwind
+ ret i32 %0
+}
+
+;; Also check using the value.
+; CHECK-LABEL: test2:
+; CHECK: ta 9
+; CHECK: call dosomething
+; CHECK: mov %g1, %i0
+; CHECK: mov %i0, %g1
+; CHECK: ta 10
+
+define void @test2() local_unnamed_addr nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ta $1", "={r1},i"(i32 9) nounwind
+ tail call void @dosomething() nounwind
+ tail call void asm sideeffect "ta $0", "i,{r1}"(i32 10, i32 %0) nounwind
+ ret void
+}
+
+declare void @dosomething() local_unnamed_addr nounwind
diff --git a/test/CodeGen/SPARC/reserved-regs.ll b/test/CodeGen/SPARC/reserved-regs.ll
index fe208015827b..c5a124f538f9 100644
--- a/test/CodeGen/SPARC/reserved-regs.ll
+++ b/test/CodeGen/SPARC/reserved-regs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=sparc < %s | FileCheck %s
+; RUN: llc -march=sparc -verify-machineinstrs < %s | FileCheck %s
@g = common global [32 x i32] zeroinitializer, align 16
@h = common global [16 x i64] zeroinitializer, align 16
@@ -6,6 +6,7 @@
;; Ensures that we don't use registers which are supposed to be reserved.
; CHECK-LABEL: use_all_i32_regs:
+; CHECK: save %sp
; CHECK-NOT: %g0
; CHECK-NOT: %g1
; CHECK-NOT: %g5
@@ -86,6 +87,7 @@ entry:
; CHECK-LABEL: use_all_i64_regs:
+; CHECK: save %sp
; CHECK-NOT: %g0
; CHECK-NOT: %g1
; CHECK-NOT: %g4
diff --git a/test/CodeGen/SPARC/sjlj.ll b/test/CodeGen/SPARC/sjlj.ll
index 3bf583aa4754..459630f9255f 100755
--- a/test/CodeGen/SPARC/sjlj.ll
+++ b/test/CodeGen/SPARC/sjlj.ll
@@ -66,13 +66,18 @@ return: ; preds = %if.end, %if.then
; CHECK: ba .LBB1_1
; CHECK: nop
; CHECK:.LBB1_1: ! %entry
-; CHECK: ba .LBB1_3
; CHECK: mov %g0, %i0
+; CHECK: ! %entry
+; CHECK: cmp %i0, 0
+; CHECK: be .LBB1_5
+; CHECK: nop
+; CHECK:.LBB1_4:
+; CHECK: mov 1, %i0
+; CHECK: ba .LBB1_6
; CHECK:.LBB1_2: ! Block address taken
; CHECK: mov 1, %i0
-; CHECK:.LBB1_3: ! %entry
; CHECK: cmp %i0, 0
-; CHECK: be .LBB1_5
+; CHECK: bne .LBB1_4
; CHECK: nop
}
declare i8* @llvm.frameaddress(i32) #2
diff --git a/test/CodeGen/SystemZ/DAGCombine_trunc_extract.ll b/test/CodeGen/SystemZ/DAGCombine_trunc_extract.ll
new file mode 100644
index 000000000000..63c1c6363189
--- /dev/null
+++ b/test/CodeGen/SystemZ/DAGCombine_trunc_extract.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=s390x-linux-gnu -mcpu=zEC12 < %s | FileCheck %s
+;
+; Check that DAGCombiner doesn't crash in SystemZ combineTruncateExtract()
+; when handling EXTRACT_VECTOR_ELT without vector support.
+
+define void @autogen_SD21598(<2 x i8> %Arg) {
+; CHECK: stc %r3, 0(%r1)
+; CHECK: j .LBB0_1
+
+entry:
+ br label %loop
+
+loop: ; preds = %CF249, %CF247
+ %Shuff = shufflevector <2 x i8> undef, <2 x i8> %Arg, <2 x i32> <i32 3, i32 1>
+ %E = extractelement <2 x i8> %Shuff, i32 0
+ store i8 %E, i8* undef
+ br label %loop
+}
diff --git a/test/CodeGen/SystemZ/DAGCombiner_illegal_BUILD_VECTOR.ll b/test/CodeGen/SystemZ/DAGCombiner_illegal_BUILD_VECTOR.ll
new file mode 100644
index 000000000000..3e5757c902cc
--- /dev/null
+++ b/test/CodeGen/SystemZ/DAGCombiner_illegal_BUILD_VECTOR.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+;
+; Check that DAGCombiner does not crash after producing an illegal
+; BUILD_VECTOR node.
+
+
+define void @pr32422() {
+; CHECK: cdbr %f0, %f0
+; CHECK: jo .LBB0_1
+
+BB:
+ %I = insertelement <8 x i8> zeroinitializer, i8 -95, i32 3
+ %I8 = insertelement <8 x i8> zeroinitializer, i8 -119, i32 2
+ %FC = uitofp <8 x i8> %I8 to <8 x float>
+ %Cmp18 = fcmp uno <8 x float> zeroinitializer, %FC
+ %I22 = insertelement <8 x i1> %Cmp18, i1 true, i32 5
+ br label %CF
+
+CF: ; preds = %CF, %BB
+ %Cmp40 = fcmp uno double 0xC663C682E9619F00, undef
+ br i1 %Cmp40, label %CF, label %CF353
+
+CF353: ; preds = %CF
+ %E195 = extractelement <8 x i1> %I22, i32 4
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/expand-zext-pseudo.ll b/test/CodeGen/SystemZ/expand-zext-pseudo.ll
new file mode 100644
index 000000000000..1ee42885cb9c
--- /dev/null
+++ b/test/CodeGen/SystemZ/expand-zext-pseudo.ll
@@ -0,0 +1,132 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs | FileCheck %s
+;
+; Test that a def operand of super-reg is not dropped during post RA pseudo
+; expansion in expandZExtPseudo().
+
+define void @fun_llvm_stress_reduced(i8*, i32*, i64*, i32) {
+; CHECK: .text
+BB:
+ %A = alloca i32
+ %Sl24 = select i1 undef, i32* %1, i32* %1
+ %L26 = load i16, i16* undef
+ %L32 = load i32, i32* %Sl24
+ br label %CF847
+
+CF847: ; preds = %CF878, %BB
+ %L61 = load i16, i16* undef
+ br label %CF878
+
+CF878: ; preds = %CF847
+ %PC66 = bitcast i32* %Sl24 to double*
+ %Sl67 = select i1 undef, <2 x i32> undef, <2 x i32> undef
+ %Cmp68 = icmp ugt i32 undef, %3
+ br i1 %Cmp68, label %CF847, label %CF863
+
+CF863: ; preds = %CF878
+ %L84 = load i16, i16* undef
+ br label %CF825
+
+CF825: ; preds = %CF825, %CF863
+ %Sl105 = select i1 undef, i1 undef, i1 undef
+ br i1 %Sl105, label %CF825, label %CF856
+
+CF856: ; preds = %CF856, %CF825
+ %Cmp114 = icmp ult i16 -24837, %L61
+ br i1 %Cmp114, label %CF856, label %CF875
+
+CF875: ; preds = %CF856
+ %Shuff124 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
+ %PC126 = bitcast i32* %A to i64*
+ br label %CF827
+
+CF827: ; preds = %CF923, %CF911, %CF875
+ %Sl142 = select i1 undef, i64 undef, i64 -1
+ %B148 = sdiv i32 409071, 409071
+ %E153 = extractelement <2 x i32> %Shuff124, i32 1
+ br label %CF911
+
+CF911: ; preds = %CF827
+ br i1 undef, label %CF827, label %CF867
+
+CF867: ; preds = %CF911
+ br label %CF870
+
+CF870: ; preds = %CF870, %CF867
+ store i8 0, i8* %0
+ %FC176 = fptoui double undef to i1
+ br i1 %FC176, label %CF870, label %CF923
+
+CF923: ; preds = %CF870
+ %L179 = load i16, i16* undef
+ %Sl191 = select i1 undef, i64* %PC126, i64* %PC126
+ br i1 false, label %CF827, label %CF828
+
+CF828: ; preds = %CF905, %CF923
+ %B205 = urem i16 -7553, undef
+ %E209 = extractelement <2 x i32> %Sl67, i32 1
+ %Cmp215 = icmp ugt i16 %L179, 0
+ br label %CF905
+
+CF905: ; preds = %CF828
+ %E231 = extractelement <4 x i1> undef, i32 1
+ br i1 %E231, label %CF828, label %CF829
+
+CF829: ; preds = %CF909, %CF829, %CF905
+ %B234 = udiv i16 %L26, %L84
+ br i1 undef, label %CF829, label %CF894
+
+CF894: ; preds = %CF894, %CF829
+ store i64 %Sl142, i64* %Sl191
+ %Sl241 = select i1 %Cmp114, i1 false, i1 %Cmp215
+ br i1 %Sl241, label %CF894, label %CF907
+
+CF907: ; preds = %CF894
+ %B247 = udiv i32 0, %E153
+ %PC248 = bitcast i64* %2 to i8*
+ br label %CF909
+
+CF909: ; preds = %CF907
+ store i1 %FC176, i1* undef
+ %Cmp263 = icmp ugt i1 undef, %Sl241
+ br i1 %Cmp263, label %CF829, label %CF830
+
+CF830: ; preds = %CF909
+ %B304 = urem i16 %L84, %B205
+ %I311 = insertelement <2 x i32> %Shuff124, i32 %B247, i32 1
+ store i8 0, i8* %0
+ %Sl373 = select i1 %Cmp68, i32 0, i32 %E153
+ br label %CF833
+
+CF833: ; preds = %CF880, %CF830
+ br label %CF880
+
+CF880: ; preds = %CF833
+ %Cmp412 = icmp ne i16 %B234, -18725
+ br i1 %Cmp412, label %CF833, label %CF865
+
+CF865: ; preds = %CF880
+ store double 0.000000e+00, double* %PC66
+ br label %CF860
+
+CF860: ; preds = %CF860, %CF865
+ store i8 0, i8* %PC248
+ %Cmp600 = icmp sge i32 %B148, undef
+ br i1 %Cmp600, label %CF860, label %CF913
+
+CF913: ; preds = %CF860
+ store i32 %E209, i32* undef
+ store i32 %Sl373, i32* undef
+ %Cmp771 = icmp ule i32 undef, %L32
+ br label %CF842
+
+CF842: ; preds = %CF925, %CF913
+ br label %CF925
+
+CF925: ; preds = %CF842
+ %Cmp778 = icmp sgt i1 %Cmp771, %Sl241
+ br i1 %Cmp778, label %CF842, label %CF898
+
+CF898: ; preds = %CF925
+ %Sl785 = select i1 %Cmp600, i16 undef, i16 %B304
+ unreachable
+}
diff --git a/test/CodeGen/SystemZ/extract-vector-elt-zEC12.ll b/test/CodeGen/SystemZ/extract-vector-elt-zEC12.ll
new file mode 100644
index 000000000000..7bfe5ac8c1a5
--- /dev/null
+++ b/test/CodeGen/SystemZ/extract-vector-elt-zEC12.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=zEC12 | FileCheck %s
+;
+; Test that <1 x i8> is legalized properly without vector support.
+
+define void @autogen_SD18500(i8*) {
+; CHECK: .text
+BB:
+ %L5 = load i8, i8* %0
+ %I22 = insertelement <1 x i8> undef, i8 %L5, i32 0
+ %Cmp53 = icmp ule i1 undef, undef
+ br label %CF244
+
+CF244: ; preds = %CF244, %BB
+ %Sl119 = select i1 %Cmp53, <1 x i8> %I22, <1 x i8> undef
+ %Cmp148 = fcmp une float 0x3E03A81780000000, 0x42D92DCD00000000
+ br i1 %Cmp148, label %CF244, label %CF241
+
+CF241: ; preds = %CF241, %CF244
+ %Sl199 = select i1 true, <1 x i8> %Sl119, <1 x i8> zeroinitializer
+ br label %CF241
+}
diff --git a/test/CodeGen/SystemZ/fold-memory-op-impl.ll b/test/CodeGen/SystemZ/fold-memory-op-impl.ll
new file mode 100644
index 000000000000..dda4df90d1b9
--- /dev/null
+++ b/test/CodeGen/SystemZ/fold-memory-op-impl.ll
@@ -0,0 +1,129 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs | FileCheck %s
+;
+; Test that foldMemoryOperandImpl() doesn't drop subreg / read-undef flags.
+
+
+define void @fun_llvm_stress_reduced(i8*, i32*, i64*, i64, i8) {
+; CHECK: .text
+BB:
+ %A4 = alloca <4 x i64>
+ %A1 = alloca <8 x i1>
+ %E6 = extractelement <4 x i1> undef, i32 3
+ %L23 = load i8, i8* %0
+ %B27 = fmul double 0x59A989483BA7E0C6, undef
+ %L30 = load i16, i16* undef
+ store i16 -11933, i16* undef
+ %L46 = load i16, i16* undef
+ %L61 = load i16, i16* undef
+ %Sl74 = select i1 undef, i1 undef, i1 true
+ br label %CF846
+
+CF846: ; preds = %CF877, %BB
+ %I86 = insertelement <4 x i1> undef, i1 undef, i32 0
+ %Cmp89 = icmp ne i64 undef, %3
+ %L90 = load i16, i16* undef
+ %Shuff92 = shufflevector <4 x i16> zeroinitializer, <4 x i16> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 undef, i32 6>
+ br label %CF877
+
+CF877: ; preds = %CF846
+ store i16 %L61, i16* undef
+ %Cmp110 = icmp eq i16 %L61, undef
+ br i1 %Cmp110, label %CF846, label %CF862
+
+CF862: ; preds = %CF877
+ %I114 = insertelement <4 x i64> zeroinitializer, i64 0, i32 0
+ %B115 = shl <4 x i64> zeroinitializer, %I114
+ %Sl124 = select i1 true, <8 x i1>* %A1, <8 x i1>* %A1
+ %B130 = frem double %B27, 0x59A989483BA7E0C6
+ %E143 = extractelement <4 x i64> %B115, i32 1
+ %Sl148 = select i1 %Cmp89, <1 x i32> undef, <1 x i32> zeroinitializer
+ br label %CF855
+
+CF855: ; preds = %CF855, %CF862
+ %Sl171 = select i1 %Sl74, i1 %E6, i1 undef
+ br i1 %Sl171, label %CF855, label %CF874
+
+CF874: ; preds = %CF855
+ %PC186 = bitcast i32* %1 to i16*
+ %L196 = load i16, i16* undef
+ %B207 = or i8 %4, %L23
+ %L211 = load <8 x i1>, <8 x i1>* %Sl124
+ %B215 = fdiv double 0x8421A9C0D21F6D3E, %B130
+ %L218 = load i16, i16* %PC186
+ %Sl223 = select i1 %Sl171, <4 x i1> %I86, <4 x i1> undef
+ br label %CF826
+
+CF826: ; preds = %CF866, %CF910, %CF874
+ %B245 = ashr i16 -11933, %L46
+ br label %CF910
+
+CF910: ; preds = %CF826
+ %L257 = load i8, i8* %0
+ %BC262 = bitcast i64 %E143 to double
+ store i16 %L196, i16* %PC186
+ %E266 = extractelement <4 x i16> %Shuff92, i32 0
+ %Sl271 = select i1 %Cmp89, i1 %Cmp89, i1 %Cmp110
+ br i1 %Sl271, label %CF826, label %CF866
+
+CF866: ; preds = %CF910
+ store i64 %E143, i64* %2
+ %I276 = insertelement <4 x double> undef, double %BC262, i32 3
+ %L281 = load <8 x i1>, <8 x i1>* %Sl124
+ %E282 = extractelement <4 x i1> zeroinitializer, i32 2
+ br i1 %E282, label %CF826, label %CF848
+
+CF848: ; preds = %CF866
+ %Cmp288 = fcmp olt <4 x double> undef, %I276
+ %FC294 = fptosi double undef to i16
+ %Cmp296 = icmp ule i16 %FC294, %B245
+ store i16 %L218, i16* undef
+ store i8 %L23, i8* %0
+ %E320 = extractelement <4 x i1> %Sl223, i32 1
+ %PC337 = bitcast <8 x i1>* %Sl124 to i1*
+ %Cmp345 = icmp uge <1 x i32> undef, %Sl148
+ store i16 %L196, i16* %PC186
+ br label %CF893
+
+CF893: ; preds = %CF893, %CF848
+ %Cmp361 = fcmp uge float undef, undef
+ br i1 %Cmp361, label %CF893, label %CF906
+
+CF906: ; preds = %CF893
+ store i16 -11933, i16* undef
+ %Shuff379 = shufflevector <1 x i1> undef, <1 x i1> %Cmp345, <1 x i32> <i32 1>
+ br label %CF850
+
+CF850: ; preds = %CF850, %CF906
+ br i1 undef, label %CF850, label %CF925
+
+CF925: ; preds = %CF850
+ store i16 %E266, i16* %PC186
+ %Cmp413 = icmp ugt i8 %L257, undef
+ store i16 %L30, i16* %PC186
+ %Sl420 = select i1 %Sl171, <8 x i1> undef, <8 x i1> %L281
+ store i16 %L90, i16* undef
+ %FC469 = uitofp i1 %Cmp296 to float
+ store i1 %Cmp413, i1* %PC337
+ br label %CF833
+
+CF833: ; preds = %CF833, %CF925
+ store i8 %B207, i8* %0
+ %E509 = extractelement <8 x i1> %L211, i32 7
+ br i1 %E509, label %CF833, label %CF882
+
+CF882: ; preds = %CF833
+ store i1 %Sl271, i1* %PC337
+ br label %CF852
+
+CF852: ; preds = %CF896, %CF882
+ store i1 %Sl74, i1* %PC337
+ br label %CF896
+
+CF896: ; preds = %CF852
+ %E576 = extractelement <4 x i1> %Cmp288, i32 3
+ br i1 %E576, label %CF852, label %CF890
+
+CF890: ; preds = %CF896
+ %Sl581 = select i1 undef, float undef, float %FC469
+ unreachable
+}
diff --git a/test/CodeGen/SystemZ/fp-cmp-05.ll b/test/CodeGen/SystemZ/fp-cmp-05.ll
index 92b5056cfbbe..d25c8e78cc3e 100644
--- a/test/CodeGen/SystemZ/fp-cmp-05.ll
+++ b/test/CodeGen/SystemZ/fp-cmp-05.ll
@@ -9,7 +9,7 @@
; Test f32
define float @f1(float %a, float %b, float %f) {
; CHECK-LABEL: f1:
-; CHECK: lcebr
+; CHECK: ltebr
; CHECK-NEXT: ber %r14
%neg = fsub float -0.0, %f
%cond = fcmp oeq float %neg, 0.0
@@ -20,7 +20,7 @@ define float @f1(float %a, float %b, float %f) {
; Test f64
define double @f2(double %a, double %b, double %f) {
; CHECK-LABEL: f2:
-; CHECK: lcdbr
+; CHECK: ltdbr
; CHECK-NEXT: ber %r14
%neg = fsub double -0.0, %f
%cond = fcmp oeq double %neg, 0.0
@@ -33,7 +33,7 @@ define double @f2(double %a, double %b, double %f) {
declare float @llvm.fabs.f32(float %f)
define float @f3(float %a, float %b, float %f) {
; CHECK-LABEL: f3:
-; CHECK: lnebr
+; CHECK: lpebr
; CHECK-NEXT: ber %r14
%abs = call float @llvm.fabs.f32(float %f)
%neg = fsub float -0.0, %abs
@@ -46,7 +46,7 @@ define float @f3(float %a, float %b, float %f) {
declare double @llvm.fabs.f64(double %f)
define double @f4(double %a, double %b, double %f) {
; CHECK-LABEL: f4:
-; CHECK: lndbr
+; CHECK: lpdbr
; CHECK-NEXT: ber %r14
%abs = call double @llvm.fabs.f64(double %f)
%neg = fsub double -0.0, %abs
diff --git a/test/CodeGen/SystemZ/int-cmp-44.ll b/test/CodeGen/SystemZ/int-cmp-44.ll
index 1b9a4ae353fe..85a8788a3bdd 100644
--- a/test/CodeGen/SystemZ/int-cmp-44.ll
+++ b/test/CodeGen/SystemZ/int-cmp-44.ll
@@ -473,8 +473,8 @@ entry:
%xor = xor i32 %val, 1
%add = add i32 %xor, 1000000
call void @foo()
- %cmp = icmp ne i32 %add, 0
- br i1 %cmp, label %exit, label %store
+ %cmp = icmp eq i32 %add, 0
+ br i1 %cmp, label %store, label %exit, !prof !1
store:
store i32 %add, i32 *%ptr
@@ -888,3 +888,5 @@ store:
exit:
ret i64 %res
}
+
+!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/SystemZ/locr-legal-regclass.ll b/test/CodeGen/SystemZ/locr-legal-regclass.ll
new file mode 100644
index 000000000000..1f792439a49c
--- /dev/null
+++ b/test/CodeGen/SystemZ/locr-legal-regclass.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=zEC12 -verify-machineinstrs | FileCheck %s
+;
+; Test that early if conversion produces LOCR with operands of the right
+; register classes.
+
+define void @autogen_SD4739(i8*) {
+; CHECK-NOT: Expected a GR32Bit register, but got a GRX32Bit register
+BB:
+ %L34 = load i8, i8* %0
+ %Cmp56 = icmp sgt i8 undef, %L34
+ br label %CF246
+
+CF246: ; preds = %CF246, %BB
+ %Sl163 = select i1 %Cmp56, i8 %L34, i8 undef
+ br i1 undef, label %CF246, label %CF248
+
+CF248: ; preds = %CF248, %CF246
+ store i8 %Sl163, i8* %0
+ br label %CF248
+}
diff --git a/test/CodeGen/SystemZ/mature-mc-support.ll b/test/CodeGen/SystemZ/mature-mc-support.ll
index 5520f55e1e29..a01716c27670 100644
--- a/test/CodeGen/SystemZ/mature-mc-support.ll
+++ b/test/CodeGen/SystemZ/mature-mc-support.ll
@@ -12,4 +12,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/SystemZ/memchr-01.ll b/test/CodeGen/SystemZ/memchr-01.ll
index f4d381b37f26..0cfca2af1e98 100644
--- a/test/CodeGen/SystemZ/memchr-01.ll
+++ b/test/CodeGen/SystemZ/memchr-01.ll
@@ -1,21 +1,57 @@
-; Test memchr using SRST, with a weird but usable prototype.
+; Test memchr using SRST, with the correct prototype.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
-declare i8 *@memchr(i8 *%src, i16 %char, i32 %len)
+declare i8 *@memchr(i8 *%src, i32 %char, i64 %len)
; Test a simple forwarded call.
-define i8 *@f1(i8 *%src, i16 %char, i32 %len) {
+define i8 *@f1(i64 %len, i8 *%src, i32 %char) {
; CHECK-LABEL: f1:
-; CHECK-DAG: lgr [[REG:%r[1-5]]], %r2
-; CHECK-DAG: algfr %r2, %r4
-; CHECK-DAG: llcr %r0, %r3
+; CHECK-DAG: agr %r2, %r3
+; CHECK-DAG: llcr %r0, %r4
; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: srst %r2, [[REG]]
+; CHECK: srst %r2, %r3
; CHECK-NEXT: jo [[LABEL]]
; CHECK: blr %r14
; CHECK: lghi %r2, 0
; CHECK: br %r14
- %res = call i8 *@memchr(i8 *%src, i16 %char, i32 %len)
+ %res = call i8 *@memchr(i8 *%src, i32 %char, i64 %len)
ret i8 *%res
}
+
+; Test a doubled call with no use of %r0 in between. There should be a
+; single load of %r0.
+define i8 *@f2(i8 *%src, i8 *%charptr, i64 %len) {
+; CHECK-LABEL: f2:
+; CHECK: llc %r0, 0(%r3)
+; CHECK-NOT: %r0
+; CHECK: srst [[RES1:%r[1-5]]], %r2
+; CHECK-NOT: %r0
+; CHECK: srst %r2, [[RES1]]
+; CHECK: br %r14
+ %char = load volatile i8 , i8 *%charptr
+ %charext = zext i8 %char to i32
+ %res1 = call i8 *@memchr(i8 *%src, i32 %charext, i64 %len)
+ %res2 = call i8 *@memchr(i8 *%res1, i32 %charext, i64 %len)
+ ret i8 *%res2
+}
+
+; Test a doubled call with a use of %r0 in between. %r0 must be loaded
+; for each loop.
+define i8 *@f3(i8 *%src, i8 *%charptr, i64 %len) {
+; CHECK-LABEL: f3:
+; CHECK: llc [[CHAR:%r[1-5]]], 0(%r3)
+; CHECK: lr %r0, [[CHAR]]
+; CHECK: srst [[RES1:%r[1-5]]], %r2
+; CHECK: lhi %r0, 0
+; CHECK: blah %r0
+; CHECK: lr %r0, [[CHAR]]
+; CHECK: srst %r2, [[RES1]]
+; CHECK: br %r14
+ %char = load volatile i8 , i8 *%charptr
+ %charext = zext i8 %char to i32
+ %res1 = call i8 *@memchr(i8 *%src, i32 %charext, i64 %len)
+ call void asm sideeffect "blah $0", "{r0}" (i32 0)
+ %res2 = call i8 *@memchr(i8 *%res1, i32 %charext, i64 %len)
+ ret i8 *%res2
+}
diff --git a/test/CodeGen/SystemZ/memchr-02.ll b/test/CodeGen/SystemZ/memchr-02.ll
deleted file mode 100644
index 0cfca2af1e98..000000000000
--- a/test/CodeGen/SystemZ/memchr-02.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; Test memchr using SRST, with the correct prototype.
-;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
-
-declare i8 *@memchr(i8 *%src, i32 %char, i64 %len)
-
-; Test a simple forwarded call.
-define i8 *@f1(i64 %len, i8 *%src, i32 %char) {
-; CHECK-LABEL: f1:
-; CHECK-DAG: agr %r2, %r3
-; CHECK-DAG: llcr %r0, %r4
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: srst %r2, %r3
-; CHECK-NEXT: jo [[LABEL]]
-; CHECK: blr %r14
-; CHECK: lghi %r2, 0
-; CHECK: br %r14
- %res = call i8 *@memchr(i8 *%src, i32 %char, i64 %len)
- ret i8 *%res
-}
-
-; Test a doubled call with no use of %r0 in between. There should be a
-; single load of %r0.
-define i8 *@f2(i8 *%src, i8 *%charptr, i64 %len) {
-; CHECK-LABEL: f2:
-; CHECK: llc %r0, 0(%r3)
-; CHECK-NOT: %r0
-; CHECK: srst [[RES1:%r[1-5]]], %r2
-; CHECK-NOT: %r0
-; CHECK: srst %r2, [[RES1]]
-; CHECK: br %r14
- %char = load volatile i8 , i8 *%charptr
- %charext = zext i8 %char to i32
- %res1 = call i8 *@memchr(i8 *%src, i32 %charext, i64 %len)
- %res2 = call i8 *@memchr(i8 *%res1, i32 %charext, i64 %len)
- ret i8 *%res2
-}
-
-; Test a doubled call with a use of %r0 in between. %r0 must be loaded
-; for each loop.
-define i8 *@f3(i8 *%src, i8 *%charptr, i64 %len) {
-; CHECK-LABEL: f3:
-; CHECK: llc [[CHAR:%r[1-5]]], 0(%r3)
-; CHECK: lr %r0, [[CHAR]]
-; CHECK: srst [[RES1:%r[1-5]]], %r2
-; CHECK: lhi %r0, 0
-; CHECK: blah %r0
-; CHECK: lr %r0, [[CHAR]]
-; CHECK: srst %r2, [[RES1]]
-; CHECK: br %r14
- %char = load volatile i8 , i8 *%charptr
- %charext = zext i8 %char to i32
- %res1 = call i8 *@memchr(i8 *%src, i32 %charext, i64 %len)
- call void asm sideeffect "blah $0", "{r0}" (i32 0)
- %res2 = call i8 *@memchr(i8 *%res1, i32 %charext, i64 %len)
- ret i8 *%res2
-}
diff --git a/test/CodeGen/SystemZ/memcmp-02.ll b/test/CodeGen/SystemZ/memcmp-02.ll
deleted file mode 100644
index da11170def79..000000000000
--- a/test/CodeGen/SystemZ/memcmp-02.ll
+++ /dev/null
@@ -1,139 +0,0 @@
-; Test memcmp using CLC, with i64 results.
-;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-
-declare i64 @memcmp(i8 *%src1, i8 *%src2, i64 %size)
-
-; Zero-length comparisons should be optimized away.
-define i64 @f1(i8 *%src1, i8 *%src2) {
-; CHECK-LABEL: f1:
-; CHECK: lghi %r2, 0
-; CHECK: br %r14
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 0)
- ret i64 %res
-}
-
-; Check a case where the result is used as an integer.
-define i64 @f2(i8 *%src1, i8 *%src2) {
-; CHECK-LABEL: f2:
-; CHECK: clc 0(2,%r2), 0(%r3)
-; CHECK: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll [[REG]], [[REG]], 31
-; CHECK: lgfr %r2, [[REG]]
-; CHECK: br %r14
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 2)
- ret i64 %res
-}
-
-; Check a case where the result is tested for equality.
-define void @f3(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f3:
-; CHECK: clc 0(3,%r2), 0(%r3)
-; CHECK-NEXT: ber %r14
-; CHECK: br %r14
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 3)
- %cmp = icmp eq i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret void
-}
-
-; Check a case where the result is tested for inequality.
-define void @f4(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f4:
-; CHECK: clc 0(4,%r2), 0(%r3)
-; CHECK-NEXT: blhr %r14
-; CHECK: br %r14
-entry:
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 4)
- %cmp = icmp ne i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret void
-}
-
-; Check a case where the result is tested via slt.
-define void @f5(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f5:
-; CHECK: clc 0(5,%r2), 0(%r3)
-; CHECK-NEXT: blr %r14
-; CHECK: br %r14
-entry:
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 5)
- %cmp = icmp slt i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret void
-}
-
-; Check a case where the result is tested for sgt.
-define void @f6(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f6:
-; CHECK: clc 0(6,%r2), 0(%r3)
-; CHECK-NEXT: bhr %r14
-; CHECK: br %r14
-entry:
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 6)
- %cmp = icmp sgt i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret void
-}
-
-; Check the upper end of the CLC range. Here the result is used both as
-; an integer and for branching.
-define i64 @f7(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f7:
-; CHECK: clc 0(256,%r2), 0(%r3)
-; CHECK: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll [[REG]], [[REG]], 31
-; CHECK: lgfr %r2, [[REG]]
-; CHECK: blr %r14
-; CHECK: br %r14
-entry:
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 256)
- %cmp = icmp slt i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret i64 %res
-}
-
-; 257 bytes needs two CLCs.
-define i64 @f8(i8 *%src1, i8 *%src2) {
-; CHECK-LABEL: f8:
-; CHECK: clc 0(256,%r2), 0(%r3)
-; CHECK: jlh [[LABEL:\..*]]
-; CHECK: clc 256(1,%r2), 256(%r3)
-; CHECK: [[LABEL]]:
-; CHECK: ipm [[REG:%r[0-5]]]
-; CHECK: br %r14
- %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 257)
- ret i64 %res
-}
diff --git a/test/CodeGen/SystemZ/pr32372.ll b/test/CodeGen/SystemZ/pr32372.ll
new file mode 100644
index 000000000000..c18e238fbaf9
--- /dev/null
+++ b/test/CodeGen/SystemZ/pr32372.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -o - -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @pr32372(i8*) {
+; CHECK-LABEL: pr32372:
+; CHECK: # BB#0: # %BB
+; CHECK-NEXT: llc %r1, 0(%r2)
+; CHECK-NEXT: mvhhi 0(%r1), -3825
+; CHECK-NEXT: llill %r0, 0
+; CHECK-NEXT: dlr %r0, %r1
+; CHECK-NEXT: .LBB0_1: # %CF251
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: j .LBB0_1
+BB:
+ %L = load i8, i8* %0
+ store i16 -3825, i16* undef
+ %L5 = load i8, i8* %0
+ %B9 = urem i8 %L5, %L
+ %I107 = insertelement <8 x i8> zeroinitializer, i8 %B9, i32 7
+ %ZE141 = zext i8 %L5 to i16
+ br label %CF251
+
+CF251: ; preds = %CF258, %CF251, %BB
+ %Shuff217 = shufflevector <8 x i8> zeroinitializer, <8 x i8> %I107, <8 x i32> <i32 0, i32 2, i32 undef, i32 6, i32 8, i32 undef, i32 12, i32 14>
+ %Cmp227 = icmp sge i16 %ZE141, 0
+ br i1 %Cmp227, label %CF251, label %CF258
+
+CF258: ; preds = %CF251
+ %Shuff230 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
+ br label %CF251
+}
diff --git a/test/CodeGen/SystemZ/pr32505.ll b/test/CodeGen/SystemZ/pr32505.ll
new file mode 100644
index 000000000000..6abad0220164
--- /dev/null
+++ b/test/CodeGen/SystemZ/pr32505.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mcpu=zEC12 -o - %s | FileCheck %s
+
+target triple = "s390x-ibm-linux"
+
+define <2 x float> @pr32505(<2 x i8> * %a) {
+; CHECK-LABEL: pr32505:
+; CHECK: # BB#0:
+; CHECK-NEXT: lbh %r0, 0(%r2)
+; CHECK-NEXT: ldgr %f0, %r0
+; CHECK-NEXT: lbh %r0, 1(%r2)
+; CHECK-NEXT: ldgr %f2, %r0
+; CHECK-NEXT: # kill: %F0S<def> %F0S<kill> %F0D<kill>
+; CHECK-NEXT: # kill: %F2S<def> %F2S<kill> %F2D<kill>
+; CHECK-NEXT: br %r14
+ %L17 = load <2 x i8>, <2 x i8>* %a
+ %Se21 = sext <2 x i8> %L17 to <2 x i32>
+ %BC = bitcast <2 x i32> %Se21 to <2 x float>
+ ret <2 x float> %BC
+}
diff --git a/test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll b/test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll
new file mode 100644
index 000000000000..db6e3653b506
--- /dev/null
+++ b/test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll
@@ -0,0 +1,413 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs | FileCheck %s
+;
+; Regression test for a machine verifier complaint discovered with llvm-stress.
+; Test that splitting of a 128 bit store does not result in use of undef phys reg.
+
+define void @autogen_SD29355(i8*, i32*, i64*, i32, i64, i8) {
+; CHECK: .text
+BB:
+ %A4 = alloca double
+ %A3 = alloca float
+ %A2 = alloca i8
+ %A1 = alloca double
+ %A = alloca i64
+ %L = load i8, i8* %0
+ store i8 33, i8* %0
+ %E = extractelement <8 x i1> zeroinitializer, i32 2
+ br label %CF261
+
+CF261: ; preds = %BB
+ %Shuff = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 undef, i32 3>
+ %I = insertelement <8 x i8> zeroinitializer, i8 69, i32 3
+ %B = udiv i8 -99, 33
+ %Tr = trunc i64 -1 to i32
+ %Sl = select i1 true, i64* %2, i64* %2
+ %L5 = load i64, i64* %Sl
+ store i64 %L5, i64* %2
+ %E6 = extractelement <4 x i16> zeroinitializer, i32 3
+ %Shuff7 = shufflevector <4 x i16> zeroinitializer, <4 x i16> zeroinitializer, <4 x i32> <i32 6, i32 0, i32 2, i32 4>
+ %I8 = insertelement <4 x i16> %Shuff7, i16 27357, i32 0
+ %B9 = xor <4 x i16> %Shuff7, %Shuff7
+ %Tr10 = trunc i64 %4 to i1
+ br label %CF239
+
+CF239: ; preds = %CF261
+ %Sl11 = select i1 %Tr10, i16 -1, i16 27357
+ %L12 = load i8, i8* %0
+ store i64 %L5, i64* %A
+ %E13 = extractelement <8 x i1> zeroinitializer, i32 0
+ br label %CF238
+
+CF238: ; preds = %CF238, %CF239
+ %Shuff14 = shufflevector <4 x i16> zeroinitializer, <4 x i16> zeroinitializer, <4 x i32> <i32 undef, i32 5, i32 7, i32 1>
+ %I15 = insertelement <4 x i16> %Shuff7, i16 -1, i32 1
+ %B16 = fsub double 0xDACBFCEAC1C99968, 0xDACBFCEAC1C99968
+ %Sl17 = select i1 %E, i64* %Sl, i64* %Sl
+ %Cmp = icmp ugt i16 %E6, 27357
+ br i1 %Cmp, label %CF238, label %CF251
+
+CF251: ; preds = %CF238
+ %L18 = load i64, i64* %Sl17
+ store i64 0, i64* %Sl
+ %E19 = extractelement <4 x i16> zeroinitializer, i32 1
+ %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 undef, i32 2>
+ %I21 = insertelement <2 x i1> zeroinitializer, i1 true, i32 0
+ %FC = fptoui float 0x3BE9BD7D80000000 to i1
+ br label %CF237
+
+CF237: ; preds = %CF237, %CF271, %CF268, %CF251
+ %Sl22 = select i1 true, i16 -1, i16 %E6
+ %Cmp23 = icmp sgt i1 %E13, true
+ br i1 %Cmp23, label %CF237, label %CF256
+
+CF256: ; preds = %CF256, %CF237
+ %L24 = load i64, i64* %A
+ store i64 %L5, i64* %Sl17
+ %E25 = extractelement <4 x i16> zeroinitializer, i32 3
+ %Shuff26 = shufflevector <4 x i16> %Shuff7, <4 x i16> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
+ %I27 = insertelement <4 x i16> zeroinitializer, i16 %Sl22, i32 0
+ %B28 = udiv i16 %Sl11, -1
+ %ZE = zext i1 true to i32
+ %Sl29 = select i1 true, i8 -99, i8 33
+ %Cmp30 = fcmp ord double 0xC275146F92573C4, 0x16FB351AF5F9C998
+ br i1 %Cmp30, label %CF256, label %CF271
+
+CF271: ; preds = %CF256
+ %L31 = load i8, i8* %0
+ store i64 %L5, i64* %Sl
+ %E32 = extractelement <4 x i16> zeroinitializer, i32 2
+ %Shuff33 = shufflevector <1 x i32> zeroinitializer, <1 x i32> zeroinitializer, <1 x i32> <i32 1>
+ %I34 = insertelement <4 x i16> zeroinitializer, i16 %Sl11, i32 1
+ %PC = bitcast double* %A4 to i1*
+ %Sl35 = select i1 %FC, i32* %1, i32* %1
+ %Cmp36 = icmp ult <2 x i1> %Shuff20, %Shuff20
+ %L37 = load i64, i64* %Sl
+ store i64 %L5, i64* %Sl
+ %E38 = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff39 = shufflevector <4 x i16> zeroinitializer, <4 x i16> %Shuff7, <4 x i32> <i32 undef, i32 1, i32 3, i32 undef>
+ %I40 = insertelement <4 x i16> %Shuff7, i16 %E19, i32 1
+ %ZE41 = zext i1 true to i16
+ %Sl42 = select i1 true, i1 true, i1 true
+ br i1 %Sl42, label %CF237, label %CF246
+
+CF246: ; preds = %CF246, %CF271
+ %Cmp43 = icmp uge i64 %L37, %L18
+ br i1 %Cmp43, label %CF246, label %CF249
+
+CF249: ; preds = %CF249, %CF263, %CF246
+ %L44 = load i64, i64* %A
+ store i64 %L5, i64* %Sl17
+ %E45 = extractelement <4 x i16> %Shuff14, i32 2
+ %Shuff46 = shufflevector <1 x i32> zeroinitializer, <1 x i32> zeroinitializer, <1 x i32> <i32 1>
+ %I47 = insertelement <4 x i16> %Shuff7, i16 %E6, i32 1
+ %Sl48 = select i1 %FC, double 0xDACBFCEAC1C99968, double 0xDACBFCEAC1C99968
+ %Cmp49 = fcmp ult double 0x9E8F85AE4F8D6C2C, 0x5A7FED9E637D2C1C
+ br i1 %Cmp49, label %CF249, label %CF263
+
+CF263: ; preds = %CF249
+ %L50 = load i64, i64* %Sl
+ store i1 true, i1* %PC
+ %E51 = extractelement <2 x i1> zeroinitializer, i32 0
+ br i1 %E51, label %CF249, label %CF259
+
+CF259: ; preds = %CF259, %CF263
+ %Shuff52 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 5, i32 7, i32 1>
+ %I53 = insertelement <4 x i16> zeroinitializer, i16 -1, i32 1
+ %B54 = or <2 x i16> %Shuff, zeroinitializer
+ %Sl55 = select i1 %Sl42, i16 %Sl22, i16 27357
+ %Cmp56 = icmp uge i1 %Sl42, true
+ br i1 %Cmp56, label %CF259, label %CF268
+
+CF268: ; preds = %CF259
+ %L57 = load i8, i8* %0
+ store i64 %L5, i64* %Sl
+ %E58 = extractelement <4 x i16> %Shuff14, i32 1
+ %Shuff59 = shufflevector <1 x i32> %Shuff33, <1 x i32> %Shuff33, <1 x i32> zeroinitializer
+ %I60 = insertelement <2 x i1> %Shuff20, i1 true, i32 0
+ %B61 = frem double 0x5A7FED9E637D2C1C, %B16
+ %FC62 = sitofp i8 -99 to float
+ %Sl63 = select i1 true, i16 %E19, i16 -1
+ %Cmp64 = icmp slt i16 %Sl63, 27357
+ br i1 %Cmp64, label %CF237, label %CF241
+
+CF241: ; preds = %CF241, %CF265, %CF268
+ %L65 = load i1, i1* %PC
+ br i1 %L65, label %CF241, label %CF262
+
+CF262: ; preds = %CF262, %CF270, %CF241
+ store i64 %L37, i64* %Sl
+ %E66 = extractelement <4 x i16> %Shuff14, i32 2
+ %Shuff67 = shufflevector <4 x i16> %Shuff26, <4 x i16> %Shuff7, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
+ %I68 = insertelement <2 x i32> zeroinitializer, i32 454413, i32 1
+ %B69 = sub <4 x i16> %I8, %Shuff7
+ %Tr70 = trunc i16 %E32 to i1
+ br i1 %Tr70, label %CF262, label %CF270
+
+CF270: ; preds = %CF262
+ %Sl71 = select i1 %Sl42, <8 x i1> zeroinitializer, <8 x i1> zeroinitializer
+ %Cmp72 = icmp sge <2 x i16> %B54, zeroinitializer
+ %L73 = load i64, i64* %Sl
+ store i64 %L73, i64* %Sl
+ %E74 = extractelement <8 x i1> %Sl71, i32 5
+ br i1 %E74, label %CF262, label %CF265
+
+CF265: ; preds = %CF270
+ %Shuff75 = shufflevector <2 x i32> %I68, <2 x i32> zeroinitializer, <2 x i32> <i32 undef, i32 2>
+ %I76 = insertelement <2 x i1> %Cmp72, i1 %Sl42, i32 0
+ %B77 = xor i16 27357, %B28
+ %PC78 = bitcast i1* %PC to i32*
+ %Sl79 = select i1 %Cmp64, <4 x i16> %Shuff14, <4 x i16> %Shuff7
+ %Cmp80 = icmp slt <2 x i1> zeroinitializer, %Shuff20
+ %L81 = load i1, i1* %PC
+ br i1 %L81, label %CF241, label %CF245
+
+CF245: ; preds = %CF245, %CF265
+ store i1 true, i1* %PC
+ %E82 = extractelement <1 x i32> %Shuff33, i32 0
+ %Shuff83 = shufflevector <4 x i16> zeroinitializer, <4 x i16> %Shuff14, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I84 = insertelement <2 x i1> %Shuff20, i1 %Sl42, i32 0
+ %FC85 = uitofp i1 %Cmp to float
+ %Sl86 = select i1 %Tr10, i16 -1, i16 %Sl63
+ %Cmp87 = icmp ugt <2 x i1> %I76, %I60
+ %L88 = load i32, i32* %PC78
+ store i8 33, i8* %0
+ %E89 = extractelement <2 x i32> zeroinitializer, i32 1
+ %Shuff90 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff52, <4 x i32> <i32 0, i32 undef, i32 4, i32 6>
+ %I91 = insertelement <2 x i32> %Shuff75, i32 %ZE, i32 0
+ %B92 = add i64 -1, %L73
+ %Tr93 = trunc i64 0 to i16
+ %Sl94 = select i1 %FC, i64 %L37, i64 %L5
+ %Cmp95 = icmp sge i64 454853, %B92
+ br i1 %Cmp95, label %CF245, label %CF257
+
+CF257: ; preds = %CF245
+ %L96 = load i64, i64* %Sl
+ store i1 true, i1* %PC
+ %E97 = extractelement <2 x i1> %Shuff20, i32 1
+ br label %CF
+
+CF: ; preds = %CF, %CF258, %CF257
+ %Shuff98 = shufflevector <2 x i1> %Cmp80, <2 x i1> zeroinitializer, <2 x i32> <i32 undef, i32 0>
+ %I99 = insertelement <2 x i1> %Shuff98, i1 %Cmp30, i32 0
+ %B100 = sub <8 x i8> zeroinitializer, zeroinitializer
+ %FC101 = uitofp <2 x i1> %I99 to <2 x double>
+ %Sl102 = select i1 %FC, i16 %Sl63, i16 %E58
+ %Cmp103 = fcmp ord double %B16, 0xDACBFCEAC1C99968
+ br i1 %Cmp103, label %CF, label %CF240
+
+CF240: ; preds = %CF240, %CF260, %CF
+ %L104 = load i32, i32* %1
+ store i1 true, i1* %PC
+ %E105 = extractelement <4 x i16> %I8, i32 1
+ %Shuff106 = shufflevector <4 x i16> %Shuff7, <4 x i16> %I34, <4 x i32> <i32 4, i32 undef, i32 undef, i32 2>
+ %I107 = insertelement <2 x i1> %Cmp87, i1 %FC, i32 0
+ %ZE108 = zext <4 x i16> %B69 to <4 x i64>
+ %Sl109 = select i1 %Cmp, i16 27357, i16 %Sl102
+ %Cmp110 = icmp sge <4 x i16> %B9, zeroinitializer
+ %L111 = load i64, i64* %Sl
+ store i8 %L57, i8* %0
+ %E112 = extractelement <2 x i1> %Shuff98, i32 0
+ br i1 %E112, label %CF240, label %CF254
+
+CF254: ; preds = %CF254, %CF267, %CF264, %CF240
+ %Shuff113 = shufflevector <2 x i32> %I68, <2 x i32> zeroinitializer, <2 x i32> undef
+ %I114 = insertelement <4 x i16> zeroinitializer, i16 27357, i32 3
+ %B115 = and i16 %Sl102, %Sl11
+ %FC116 = uitofp i16 %B115 to double
+ %Sl117 = select i1 %L81, i32* %1, i32* %1
+ %Cmp118 = icmp ne i64 %Sl94, %L50
+ br i1 %Cmp118, label %CF254, label %CF267
+
+CF267: ; preds = %CF254
+ %L119 = load i64, i64* %Sl
+ store i32 %ZE, i32* %PC78
+ %E120 = extractelement <4 x i16> zeroinitializer, i32 1
+ %Shuff121 = shufflevector <1 x i32> %Shuff33, <1 x i32> %Shuff33, <1 x i32> zeroinitializer
+ %I122 = insertelement <1 x i32> %Shuff121, i32 %E82, i32 0
+ %B123 = mul <4 x i16> %I40, %I34
+ %Sl124 = select i1 %FC, <4 x i1> %Cmp110, <4 x i1> %Cmp110
+ %Cmp125 = icmp ne <4 x i64> %ZE108, zeroinitializer
+ %L126 = load i64, i64* %Sl
+ store i32 %ZE, i32* %Sl117
+ %E127 = extractelement <2 x i1> %Cmp87, i32 1
+ br i1 %E127, label %CF254, label %CF264
+
+CF264: ; preds = %CF267
+ %Shuff128 = shufflevector <4 x i16> %Shuff83, <4 x i16> %I47, <4 x i32> <i32 undef, i32 2, i32 undef, i32 6>
+ %I129 = insertelement <4 x i16> %Shuff67, i16 %Sl109, i32 2
+ %B130 = add i32 %3, %E38
+ %FC131 = sitofp i32 %3 to float
+ %Sl132 = select i1 %Sl42, i64 %L24, i64 %L5
+ %Cmp133 = icmp eq <2 x i1> %I99, %Shuff20
+ %L134 = load i32, i32* %PC78
+ store i32 %L104, i32* %1
+ %E135 = extractelement <8 x i1> zeroinitializer, i32 4
+ br i1 %E135, label %CF254, label %CF260
+
+CF260: ; preds = %CF264
+ %Shuff136 = shufflevector <1 x i32> %Shuff59, <1 x i32> %Shuff121, <1 x i32> undef
+ %I137 = insertelement <4 x i16> %Shuff67, i16 %Sl55, i32 3
+ %B138 = lshr <1 x i32> %Shuff33, %Shuff59
+ %Sl139 = select i1 %E135, i64 %L119, i64 %L126
+ %Cmp140 = icmp slt i8 -99, %Sl29
+ br i1 %Cmp140, label %CF240, label %CF247
+
+CF247: ; preds = %CF247, %CF272, %CF260
+ %L141 = load i32, i32* %Sl117
+ store i8 %5, i8* %0
+ %E142 = extractelement <2 x i1> %Cmp36, i32 1
+ br i1 %E142, label %CF247, label %CF272
+
+CF272: ; preds = %CF247
+ %Shuff143 = shufflevector <4 x i64> %Shuff90, <4 x i64> %Shuff52, <4 x i32> <i32 6, i32 undef, i32 2, i32 undef>
+ %I144 = insertelement <1 x i32> %Shuff121, i32 %L88, i32 0
+ %Tr145 = trunc i64 %Sl139 to i16
+ %Sl146 = select i1 %Cmp49, i32 %L134, i32 %L104
+ %L147 = load i32, i32* %PC78
+ store i32 %Tr, i32* %Sl117
+ %E148 = extractelement <4 x i16> %Shuff67, i32 3
+ %Shuff149 = shufflevector <4 x i16> zeroinitializer, <4 x i16> %Shuff67, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I150 = insertelement <2 x i1> zeroinitializer, i1 %E127, i32 0
+ %B151 = fdiv double 0x16FB351AF5F9C998, 0xC275146F92573C4
+ %FC152 = uitofp <1 x i32> %I144 to <1 x double>
+ %Sl153 = select i1 %Cmp118, <1 x i32> %Shuff136, <1 x i32> %Shuff121
+ %Cmp154 = icmp ule i8 %5, %Sl29
+ br i1 %Cmp154, label %CF247, label %CF253
+
+CF253: ; preds = %CF253, %CF269, %CF272
+ %L155 = load i32, i32* %Sl117
+ store i32 %L141, i32* %PC78
+ %E156 = extractelement <4 x i1> %Cmp125, i32 2
+ br i1 %E156, label %CF253, label %CF269
+
+CF269: ; preds = %CF253
+ %Shuff157 = shufflevector <1 x i32> %Shuff46, <1 x i32> %Shuff121, <1 x i32> <i32 1>
+ %I158 = insertelement <4 x i16> %Shuff128, i16 %E66, i32 1
+ %B159 = shl i64 %L119, %L73
+ %Se = sext i16 %B77 to i32
+ %Sl160 = select i1 %Cmp56, i16 %Sl63, i16 %B77
+ %L161 = load i64, i64* %Sl
+ store i32 %B130, i32* %Sl117
+ %E162 = extractelement <1 x i32> %Shuff59, i32 0
+ %Shuff163 = shufflevector <4 x i16> %Shuff7, <4 x i16> %Shuff67, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
+ %I164 = insertelement <4 x i16> %Shuff106, i16 27357, i32 3
+ %Se165 = sext <4 x i1> %Sl124 to <4 x i8>
+ %Sl166 = select i1 true, i1 %Cmp, i1 %Tr70
+ br i1 %Sl166, label %CF253, label %CF255
+
+CF255: ; preds = %CF255, %CF266, %CF269
+ %Cmp167 = icmp sge i64 %4, %L24
+ br i1 %Cmp167, label %CF255, label %CF266
+
+CF266: ; preds = %CF255
+ %L168 = load i8, i8* %0
+ store i32 %E38, i32* %PC78
+ %E169 = extractelement <2 x i16> zeroinitializer, i32 1
+ %Shuff170 = shufflevector <4 x i16> %Sl79, <4 x i16> %I137, <4 x i32> <i32 6, i32 0, i32 2, i32 4>
+ %I171 = insertelement <4 x i16> %Shuff163, i16 %ZE41, i32 0
+ %Tr172 = trunc i16 %Tr145 to i1
+ br i1 %Tr172, label %CF255, label %CF258
+
+CF258: ; preds = %CF266
+ %Sl173 = select i1 true, <2 x i32> %I68, <2 x i32> %I91
+ %Cmp174 = icmp ugt <2 x i1> %Cmp72, %I150
+ %L175 = load i32, i32* %Sl117
+ store i32 %L104, i32* %Sl117
+ %E176 = extractelement <4 x i16> %Shuff67, i32 1
+ %Shuff177 = shufflevector <1 x i32> %Shuff121, <1 x i32> %Shuff33, <1 x i32> zeroinitializer
+ %I178 = insertelement <4 x i16> zeroinitializer, i16 27357, i32 0
+ %FC179 = sitofp <4 x i16> %I47 to <4 x float>
+ %Sl180 = select i1 %FC, i64 %L126, i64 %B92
+ %Cmp181 = fcmp ugt double %B61, %B16
+ br i1 %Cmp181, label %CF, label %CF236
+
+CF236: ; preds = %CF236, %CF258
+ %L182 = load i8, i8* %0
+ store i32 %E38, i32* %Sl117
+ %E183 = extractelement <1 x i32> %Shuff121, i32 0
+ %Shuff184 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff90, <4 x i32> <i32 7, i32 undef, i32 3, i32 5>
+ %I185 = insertelement <4 x i16> %Shuff106, i16 %Tr93, i32 1
+ %ZE186 = zext i32 %E162 to i64
+ %Sl187 = select i1 %Cmp95, <8 x i8> %B100, <8 x i8> %B100
+ %Cmp188 = icmp uge i16 %B115, %Sl11
+ br i1 %Cmp188, label %CF236, label %CF242
+
+CF242: ; preds = %CF242, %CF250, %CF248, %CF236
+ %L189 = load i8, i8* %0
+ store i8 %Sl29, i8* %0
+ %E190 = extractelement <4 x i16> %B9, i32 3
+ %Shuff191 = shufflevector <4 x i16> %Shuff26, <4 x i16> %Shuff26, <4 x i32> <i32 6, i32 0, i32 2, i32 4>
+ %I192 = insertelement <1 x i32> %I122, i32 %3, i32 0
+ %B193 = udiv i8 %5, %L168
+ %Se194 = sext <8 x i1> %Sl71 to <8 x i32>
+ %Sl195 = select i1 %Cmp188, i8 %L182, i8 %L168
+ %Cmp196 = icmp slt i16 %B77, %Sl102
+ br i1 %Cmp196, label %CF242, label %CF250
+
+CF250: ; preds = %CF242
+ %L197 = load i64, i64* %Sl
+ store i32 %ZE, i32* %Sl117
+ %E198 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E198, label %CF242, label %CF244
+
+CF244: ; preds = %CF244, %CF250
+ %Shuff199 = shufflevector <1 x i32> %Shuff46, <1 x i32> %Shuff177, <1 x i32> zeroinitializer
+ %I200 = insertelement <4 x i16> %Shuff191, i16 %Sl86, i32 0
+ %B201 = mul i16 %ZE41, %E169
+ %Se202 = sext <4 x i16> %I171 to <4 x i64>
+ %Sl203 = select i1 %Sl166, i32 %E162, i32 %E82
+ %Cmp204 = icmp ule i16 %E32, %E120
+ br i1 %Cmp204, label %CF244, label %CF248
+
+CF248: ; preds = %CF244
+ %L205 = load float, float* %A3
+ store i32 %Tr, i32* %PC78
+ %E206 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E206, label %CF242, label %CF243
+
+CF243: ; preds = %CF243, %CF273, %CF248
+ %Shuff207 = shufflevector <8 x i1> zeroinitializer, <8 x i1> %Sl71, <8 x i32> <i32 4, i32 6, i32 8, i32 undef, i32 12, i32 undef, i32 undef, i32 2>
+ %I208 = insertelement <2 x i1> %Shuff20, i1 %E198, i32 0
+ %B209 = xor <4 x i16> %I129, %I34
+ %FC210 = uitofp <8 x i8> zeroinitializer to <8 x double>
+ %Sl211 = select i1 %E74, i16 %Tr93, i16 %E19
+ %Cmp212 = icmp ugt i32 %Se, %E38
+ br i1 %Cmp212, label %CF243, label %CF273
+
+CF273: ; preds = %CF243
+ %L213 = load i32, i32* %PC78
+ store i8 %L168, i8* %0
+ %E214 = extractelement <2 x i32> %Shuff113, i32 1
+ %Shuff215 = shufflevector <4 x i16> %Shuff128, <4 x i16> %I137, <4 x i32> <i32 6, i32 0, i32 2, i32 4>
+ %I216 = insertelement <2 x i1> %Shuff20, i1 %Cmp30, i32 0
+ %B217 = sub <4 x i16> %Shuff83, %I185
+ %Tr218 = trunc <4 x i16> %B9 to <4 x i1>
+ %Sl219 = select i1 %Cmp154, i8 %B, i8 %5
+ %Cmp220 = icmp uge <4 x i64> %Shuff52, %Shuff52
+ %L221 = load i32, i32* %Sl117
+ store i8 %L168, i8* %0
+ %E222 = extractelement <4 x i16> %Shuff191, i32 0
+ %Shuff223 = shufflevector <4 x i16> %Shuff26, <4 x i16> %I34, <4 x i32> <i32 undef, i32 1, i32 3, i32 5>
+ %I224 = insertelement <4 x i16> %Shuff26, i16 %Tr145, i32 1
+ %FC225 = sitofp i1 %Cmp56 to float
+ %Sl226 = select i1 %E, i1 %Cmp154, i1 %Sl166
+ br i1 %Sl226, label %CF243, label %CF252
+
+CF252: ; preds = %CF273
+ %Cmp227 = icmp ugt <4 x i64> %Shuff143, zeroinitializer
+ %L228 = load i32, i32* %Sl117
+ store i32 %Tr, i32* %PC78
+ %E229 = extractelement <4 x i16> %Shuff163, i32 2
+ %Shuff230 = shufflevector <1 x i32> %Shuff199, <1 x i32> zeroinitializer, <1 x i32> <i32 1>
+ %I231 = insertelement <4 x i16> %Shuff106, i16 %E32, i32 1
+ %B232 = srem i32 %Sl203, %Sl203
+ %FC233 = fptoui double 0x5A7FED9E637D2C1C to i32
+ %Sl234 = select i1 %Cmp103, i8 %B193, i8 %L168
+ %Cmp235 = icmp uge <2 x i16> zeroinitializer, zeroinitializer
+ store i32 %ZE, i32* %PC78
+ store i64 %L5, i64* %Sl
+ store i8 33, i8* %0
+ store i8 %L168, i8* %0
+ store i1 %Sl226, i1* %PC
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/stack-guard.ll b/test/CodeGen/SystemZ/stack-guard.ll
index 0889e7ba941e..2908cbe92bbb 100644
--- a/test/CodeGen/SystemZ/stack-guard.ll
+++ b/test/CodeGen/SystemZ/stack-guard.ll
@@ -17,19 +17,19 @@ define i32 @test_stack_guard() #0 {
entry:
%a1 = alloca [256 x i32], align 4
%0 = bitcast [256 x i32]* %a1 to i8*
- call void @llvm.lifetime.start(i64 1024, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %0)
%arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
- call void @llvm.lifetime.end(i64 1024, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %0)
ret i32 0
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { sspstrong }
diff --git a/test/CodeGen/SystemZ/strcmp-02.ll b/test/CodeGen/SystemZ/strcmp-02.ll
deleted file mode 100644
index 99d7d9cfa692..000000000000
--- a/test/CodeGen/SystemZ/strcmp-02.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; Test strcmp using CLST, i64 version.
-;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-
-declare i64 @strcmp(i8 *%src1, i8 *%src2)
-
-; Check a case where the result is used as an integer.
-define i64 @f1(i8 *%src1, i8 *%src2) {
-; CHECK-LABEL: f1:
-; CHECK: lhi %r0, 0
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: clst %r2, %r3
-; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
-; CHECK-NEXT: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll [[REG]], [[REG]], 31
-; CHECK: lgfr %r2, [[REG]]
-; CHECK: br %r14
- %res = call i64 @strcmp(i8 *%src1, i8 *%src2)
- ret i64 %res
-}
-
-; Check a case where the result is tested for equality.
-define void @f2(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f2:
-; CHECK: lhi %r0, 0
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: clst %r2, %r3
-; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
-; CHECK-NEXT: ber %r14
-; CHECK: br %r14
- %res = call i64 @strcmp(i8 *%src1, i8 *%src2)
- %cmp = icmp eq i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret void
-}
-
-; Test a case where the result is used both as an integer and for
-; branching.
-define i64 @f3(i8 *%src1, i8 *%src2, i64 *%dest) {
-; CHECK-LABEL: f3:
-; CHECK: lhi %r0, 0
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: clst %r2, %r3
-; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
-; CHECK-NEXT: ipm [[REG:%r[0-5]]]
-; CHECK: srl [[REG]], 28
-; CHECK: rll [[REG]], [[REG]], 31
-; CHECK: lgfr %r2, [[REG]]
-; CHECK: blr %r14
-; CHECK: br %r14
-entry:
- %res = call i64 @strcmp(i8 *%src1, i8 *%src2)
- %cmp = icmp slt i64 %res, 0
- br i1 %cmp, label %exit, label %store
-
-store:
- store i64 0, i64 *%dest
- br label %exit
-
-exit:
- ret i64 %res
-}
diff --git a/test/CodeGen/SystemZ/strlen-02.ll b/test/CodeGen/SystemZ/strlen-02.ll
deleted file mode 100644
index e1abbff4b4e0..000000000000
--- a/test/CodeGen/SystemZ/strlen-02.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; Test strlen using SRST, i32 version.
-;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-
-declare i32 @strlen(i8 *%src)
-declare i32 @strnlen(i8 *%src, i32 %len)
-
-; Test strlen with an i32-based prototype. It would also be valid for
-; the uses of %r3 and REG after the LGR to be swapped.
-define i32 @f1(i32 %dummy, i8 *%src) {
-; CHECK-LABEL: f1:
-; CHECK-DAG: lhi %r0, 0
-; CHECK-DAG: lghi %r2, 0
-; CHECK-DAG: lgr [[REG:%r[145]]], %r3
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK-NEXT: srst %r2, [[REG]]
-; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
-; CHECK-NEXT: sgr %r2, %r3
-; CHECK: br %r14
- %res = call i32 @strlen(i8 *%src)
- ret i32 %res
-}
-
-; Test strnlen with an i32-based prototype.
-define i32 @f2(i32 zeroext %len, i8 *%src) {
-; CHECK-LABEL: f2:
-; CHECK-DAG: agr %r2, %r3
-; CHECK-DAG: lhi %r0, 0
-; CHECK-DAG: lgr [[REG:%r[145]]], %r3
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK-NEXT: srst %r2, [[REG]]
-; CHECK-NEXT: jo [[LABEL]]
-; CHECK-NEXT: BB#{{[0-9]+}}
-; CHECK-NEXT: sgr %r2, %r3
-; CHECK: br %r14
- %res = call i32 @strnlen(i8 *%src, i32 %len)
- ret i32 %res
-}
diff --git a/test/CodeGen/SystemZ/unaligned-01.ll b/test/CodeGen/SystemZ/unaligned-01.ll
index 94cad0e1743a..2af1aa79a23f 100644
--- a/test/CodeGen/SystemZ/unaligned-01.ll
+++ b/test/CodeGen/SystemZ/unaligned-01.ll
@@ -1,10 +1,7 @@
; Check that unaligned accesses are allowed in general. We check the
; few exceptions (like CRL) in their respective test files.
;
-; FIXME: -combiner-alias-analysis (the default for SystemZ) stops
-; f1 from being optimized.
-; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis=false \
-; RUN: | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
; Check that these four byte stores become a single word store.
define void @f1(i8 *%ptr) {
diff --git a/test/CodeGen/SystemZ/undef-flag.ll b/test/CodeGen/SystemZ/undef-flag.ll
new file mode 100644
index 000000000000..0e6d87ec960f
--- /dev/null
+++ b/test/CodeGen/SystemZ/undef-flag.ll
@@ -0,0 +1,22 @@
+; Test that the backend does not mess up the I/R in case of a use of an undef
+; register. This typically happens while expanding a pseudo or otherwise
+; replacing an instruction for another.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs | FileCheck %s
+
+; LLCRMux
+define void @f1(i8*) {
+; CHECK-LABEL: f1:
+; CHECK-NOT: *** Bad machine code: Using an undefined physical register ***
+BB:
+ %L5 = load i8, i8* %0
+ %B9 = lshr i8 %L5, -1
+ br label %CF
+
+CF: ; preds = %CF, %BB
+ %Cmp25 = icmp ne i8 27, %B9
+ br i1 %Cmp25, label %CF, label %CF34
+
+CF34: ; preds = %CF
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll b/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll
new file mode 100644
index 000000000000..271513f2e9ed
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-cmp-cmp-logic-select.ll
@@ -0,0 +1,5784 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;
+; Test that a vector select with a logic combination of two compares do not
+; produce any unnecessary pack, unpack or shift instructions.
+; And, Or and Xor are tested.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+
+define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun0:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i8> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i8> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i8> @fun2(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v1, %v28, %v30
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vpkh %v1, %v1, %v1
+; CHECK-NEXT: vn %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i32> @fun3(<2 x i8> %val1, <2 x i8> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun3:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i32> @fun4(<2 x i8> %val1, <2 x i8> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun4:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i16> @fun5(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun5:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i64> @fun6(<2 x i8> %val1, <2 x i8> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun6:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i8> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun7:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun8(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun8:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun9(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun9:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i8> @fun10(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun10:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v28, %v30
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vpkf %v1, %v1, %v1
+; CHECK-NEXT: vn %v0, %v0, %v1
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i8> @fun11(<2 x i16> %val1, <2 x i16> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun11:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI11_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x double> @fun12(<2 x i16> %val1, <2 x i16> %val2, <2 x float> %val3, <2 x float> %val4, <2 x double> %val5, <2 x double> %val6) {
+; CHECK-LABEL: fun12:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x double> %val5, <2 x double> %val6
+ ret <2 x double> %sel
+}
+
+define <2 x i16> @fun13(<2 x i16> %val1, <2 x i16> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun13:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI13_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i16> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun14:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun15:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun16(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun16:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i64> @fun17(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun17:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i16> @fun18(<2 x i32> %val1, <2 x i32> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun18:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x float> @fun19(<2 x i32> %val1, <2 x i32> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
+; CHECK-LABEL: fun19:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v1, %v28, %v30
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v1, %v1, %v1
+; CHECK-NEXT: vn %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x float> %val5, <2 x float> %val6
+ ret <2 x float> %sel
+}
+
+define <2 x i16> @fun20(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun20:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI20_0
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i64> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun21:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i64> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun22:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i16> @fun23(<2 x i64> %val1, <2 x i64> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun23:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI23_0
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x float> @fun24(<2 x float> %val1, <2 x float> %val2, <2 x float> %val3, <2 x float> %val4, <2 x float> %val5, <2 x float> %val6) {
+; CHECK-LABEL: fun24:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <2 x float> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x float> %val5, <2 x float> %val6
+ ret <2 x float> %sel
+}
+
+define <2 x i32> @fun25(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun25:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v28, %v30
+; CHECK-NEXT: vpkg %v1, %v1, %v1
+; CHECK-NEXT: vn %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <2 x float> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = and <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <4 x i16> @fun26(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun26:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun27(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun27:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun28(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun28:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x i32> @fun29(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun29:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v30, %v27
+; CHECK-NEXT: vceqg %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i16> @fun30(<4 x i32> %val1, <4 x i32> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun30:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x i8> @fun31(<4 x i32> %val1, <4 x i32> %val2, <4 x double> %val3, <4 x double> %val4, <4 x i8> %val5, <4 x i8> %val6) {
+; CHECK-LABEL: fun31:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v27
+; CHECK-NEXT: vfchdb %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI31_0
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i8> %val5, <4 x i8> %val6
+ ret <4 x i8> %sel
+}
+
+define <4 x i32> @fun32(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun32:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v29
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun33(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun33:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v25, %v29
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqg %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x i64> @fun34(<4 x i64> %val1, <4 x i64> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun34:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v2, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun35(<4 x i64> %val1, <4 x i64> %val2, <4 x double> %val3, <4 x double> %val4, <4 x float> %val5, <4 x float> %val6) {
+; CHECK-LABEL: fun35:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v29
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x float> %val5, <4 x float> %val6
+ ret <4 x float> %sel
+}
+
+define <4 x i16> @fun36(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun36:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x float> @fun37(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
+; CHECK-LABEL: fun37:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x float> %val5, <4 x float> %val6
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun38(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
+; CHECK-LABEL: fun38:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x double> %val5, <4 x double> %val6
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun39(<4 x float> %val1, <4 x float> %val2, <4 x double> %val3, <4 x double> %val4, <4 x i8> %val5, <4 x i8> %val6) {
+; CHECK-LABEL: fun39:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v27
+; CHECK-NEXT: vfchdb %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: larl %r1, .LCPI39_0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = and <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i8> %val5, <4 x i8> %val6
+ ret <4 x i8> %sel
+}
+
+define <8 x i8> @fun40(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i8> %val5, <8 x i8> %val6) {
+; CHECK-LABEL: fun40:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i8> %val5, <8 x i8> %val6
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun41(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i16> %val5, <8 x i16> %val6) {
+; CHECK-LABEL: fun41:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i16> %val5, <8 x i16> %val6
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun42(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun42:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun43(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i64> %val5, <8 x i64> %val6) {
+; CHECK-LABEL: fun43:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vceqf %v0, %v28, %v25
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v26, %v31, %v2, %v0
+; CHECK-NEXT: vceqf %v0, %v30, %v27
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i32> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i64> %val5, <8 x i64> %val6
+ ret <8 x i64> %sel
+}
+
+define <8 x i8> @fun44(<8 x i16> %val1, <8 x i16> %val2, <8 x i64> %val3, <8 x i64> %val4, <8 x i8> %val5, <8 x i8> %val6) {
+; CHECK-LABEL: fun44:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v30, %v31
+; CHECK-NEXT: vceqg %v2, %v28, %v29
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vlrepg %v1, 200(%r15)
+; CHECK-NEXT: vlrepg %v2, 192(%r15)
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i64> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i8> %val5, <8 x i8> %val6
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun45(<8 x i16> %val1, <8 x i16> %val2, <8 x float> %val3, <8 x float> %val4, <8 x i16> %val5, <8 x i16> %val6) {
+; CHECK-LABEL: fun45:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <8 x float> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i16> %val5, <8 x i16> %val6
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun46(<8 x i16> %val1, <8 x i16> %val2, <8 x double> %val3, <8 x double> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun46:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x i32> @fun47(<8 x i32> %val1, <8 x i32> %val2, <8 x i64> %val3, <8 x i64> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun47:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = icmp eq <8 x i64> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x double> @fun48(<8 x i32> %val1, <8 x i32> %val2, <8 x float> %val3, <8 x float> %val4, <8 x double> %val5, <8 x double> %val6) {
+; CHECK-LABEL: fun48:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v29, %v29
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v29, %v29
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v31, %v31
+; CHECK-NEXT: vmrlf %v2, %v27, %v27
+; CHECK-NEXT: vmrhf %v3, %v27, %v27
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v31, %v31
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vceqf %v2, %v26, %v30
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <8 x float> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x double> %val5, <8 x double> %val6
+ ret <8 x double> %sel
+}
+
+define <8 x double> @fun49(<8 x i32> %val1, <8 x i32> %val2, <8 x double> %val3, <8 x double> %val4, <8 x double> %val5, <8 x double> %val6) {
+; CHECK-LABEL: fun49:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vfchdb %v0, %v25, %v0
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vceqf %v2, %v26, %v30
+; CHECK-NEXT: vfchdb %v0, %v29, %v0
+; CHECK-NEXT: vuphf %v3, %v2
+; CHECK-NEXT: vn %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x double> %val5, <8 x double> %val6
+ ret <8 x double> %sel
+}
+
+define <8 x i64> @fun50(<8 x float> %val1, <8 x float> %val2, <8 x double> %val3, <8 x double> %val4, <8 x i64> %val5, <8 x i64> %val6) {
+; CHECK-LABEL: fun50:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 224(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vn %v1, %v1, %v2
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vfchdb %v3, %v29, %v3
+; CHECK-NEXT: vn %v2, %v2, %v3
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vfchdb %v2, %v27, %v2
+; CHECK-NEXT: vn %v0, %v0, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vfchdb %v1, %v31, %v1
+; CHECK-NEXT: vn %v0, %v0, %v1
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <8 x float> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = and <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i64> %val5, <8 x i64> %val6
+ ret <8 x i64> %sel
+}
+
+define <16 x i8> @fun51(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun51:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i8> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun52(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun52:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i8> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i64> @fun53(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun53:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vceqh %v0, %v28, %v25
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vuphh %v2, %v0
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v24, %v29, %v3, %v2
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v26, %v31, %v3, %v2
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v3, 288(%r15)
+; CHECK-NEXT: vl %v4, 160(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v0, %v3, %v2, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vceqh %v2, %v30, %v27
+; CHECK-NEXT: vlr %v30, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vn %v1, %v1, %v2
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v25, %v4, %v3, %v2
+; CHECK-NEXT: vl %v3, 336(%r15)
+; CHECK-NEXT: vl %v4, 208(%r15)
+; CHECK-NEXT: vpkg %v2, %v1, %v1
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v27, %v4, %v3, %v2
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vmrlg %v2, %v1, %v1
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v1, %v1, %v1, 12
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v29, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i64> @fun54(<16 x i8> %val1, <16 x i8> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun54:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vceqf %v0, %v28, %v29
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vl %v3, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vpkg %v2, %v1, %v1
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vl %v5, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vmrlg %v3, %v1, %v1
+; CHECK-NEXT: vuphb %v3, %v3
+; CHECK-NEXT: vceqf %v2, %v25, %v2
+; CHECK-NEXT: vuphh %v3, %v3
+; CHECK-NEXT: vn %v2, %v3, %v2
+; CHECK-NEXT: vuphf %v3, %v2
+; CHECK-NEXT: vsldb %v1, %v1, %v1, 12
+; CHECK-NEXT: vsel %v25, %v5, %v4, %v3
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vl %v4, 416(%r15)
+; CHECK-NEXT: vl %v5, 288(%r15)
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vceqf %v3, %v27, %v3
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v1, %v1, %v3
+; CHECK-NEXT: vuphf %v3, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v29, %v5, %v4, %v3
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vl %v4, 240(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v4, %v3, %v0
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i32> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i64> @fun55(<16 x i8> %val1, <16 x i8> %val2, <16 x i64> %val3, <16 x i64> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun55:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vceqg %v1, %v28, %v0
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 448(%r15)
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vpkf %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v30, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 464(%r15)
+; CHECK-NEXT: vl %v3, 336(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v1
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 480(%r15)
+; CHECK-NEXT: vsel %v28, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 6
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v27, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 496(%r15)
+; CHECK-NEXT: vsel %v30, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v3, 384(%r15)
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 512(%r15)
+; CHECK-NEXT: vsel %v25, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v3, 400(%r15)
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 10
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v31, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 528(%r15)
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 288(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vl %v3, 416(%r15)
+; CHECK-NEXT: vceqg %v1, %v2, %v1
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 12
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 14
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 544(%r15)
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 432(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vn %v0, %v0, %v1
+; CHECK-NEXT: vl %v1, 560(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i64> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i16> @fun56(<16 x i8> %val1, <16 x i8> %val2, <16 x float> %val3, <16 x float> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun56:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vmrlf %v4, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v29, %v29
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vmrlf %v2, %v0, %v0
+; CHECK-NEXT: vmrlf %v3, %v27, %v27
+; CHECK-NEXT: vmrhf %v0, %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vmrhf %v3, %v27, %v27
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v0, %v3, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v2
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vpkf %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <16 x float> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i8> @fun57(<16 x i8> %val1, <16 x i8> %val2, <16 x double> %val3, <16 x double> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun57:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 288(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v31, %v1
+; CHECK-NEXT: vfchdb %v2, %v29, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vfchdb %v1, %v27, %v1
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vfchdb %v2, %v30, %v2
+; CHECK-NEXT: vfchdb %v3, %v28, %v3
+; CHECK-NEXT: vpkg %v2, %v3, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <16 x double> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i8> @fun58(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun58:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v27, %v31
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v25, %v29
+; CHECK-NEXT: vceqh %v2, %v24, %v28
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun59(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun59:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v25, %v29
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqh %v0, %v27, %v31
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun60(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun60:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v25, %v29
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v27, %v31
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <16 x i8> @fun61(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun61:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqf %v0, %v31, %v0
+; CHECK-NEXT: vceqf %v1, %v29, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqf %v1, %v27, %v1
+; CHECK-NEXT: vceqf %v2, %v25, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vceqh %v2, %v24, %v28
+; CHECK-NEXT: vn %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i32> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i32> @fun62(<16 x i16> %val1, <16 x i16> %val2, <16 x i64> %val3, <16 x i64> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun62:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 416(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqg %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vceqg %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vuphh %v3, %v2
+; CHECK-NEXT: vn %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 448(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v3, %v29, %v3
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v3, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 336(%r15)
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vceqg %v1, %v3, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i64> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <16 x double> @fun63(<16 x i16> %val1, <16 x i16> %val2, <16 x float> %val3, <16 x float> %val4, <16 x double> %val5, <16 x double> %val6) {
+; CHECK-LABEL: fun63:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vmrlf %v1, %v0, %v0
+; CHECK-NEXT: vmrlf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v0, %v0, %v0
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vl %v5, 416(%r15)
+; CHECK-NEXT: vl %v6, 288(%r15)
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vsel %v24, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrlf %v4, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v27, %v27
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vl %v3, 384(%r15)
+; CHECK-NEXT: vn %v1, %v1, %v2
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrlf %v4, %v29, %v29
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vceqh %v3, %v26, %v30
+; CHECK-NEXT: vuphh %v4, %v3
+; CHECK-NEXT: vn %v2, %v4, %v2
+; CHECK-NEXT: vuphf %v4, %v2
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vl %v4, 208(%r15)
+; CHECK-NEXT: vmrlf %v5, %v4, %v4
+; CHECK-NEXT: vmrlf %v6, %v31, %v31
+; CHECK-NEXT: vmrhf %v4, %v4, %v4
+; CHECK-NEXT: vmrlg %v3, %v3, %v3
+; CHECK-NEXT: vuphh %v3, %v3
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v5, %v6, %v5
+; CHECK-NEXT: vmrhf %v6, %v31, %v31
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v4, %v6, %v4
+; CHECK-NEXT: vl %v6, 320(%r15)
+; CHECK-NEXT: vpkg %v4, %v4, %v5
+; CHECK-NEXT: vl %v5, 448(%r15)
+; CHECK-NEXT: vn %v3, %v3, %v4
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v29, %v6, %v5, %v4
+; CHECK-NEXT: vl %v4, 368(%r15)
+; CHECK-NEXT: vl %v5, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v5, %v4, %v0
+; CHECK-NEXT: vl %v4, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v4, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vmrlg %v0, %v2, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vmrlg %v0, %v3, %v3
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <16 x float> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x double> %val5, <16 x double> %val6
+ ret <16 x double> %sel
+}
+
+define <16 x i32> @fun64(<16 x i16> %val1, <16 x i16> %val2, <16 x double> %val3, <16 x double> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vn %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 416(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vuphh %v3, %v2
+; CHECK-NEXT: vn %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 448(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v3, %v29, %v3
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v3, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 336(%r15)
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vfchdb %v1, %v3, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vn %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <16 x double> %val3, %val4
+ %and = and <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <2 x i8> @fun65(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun65:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i8> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun66(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun66:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i8> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i8> @fun67(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun67:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v1, %v28, %v30
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vpkh %v1, %v1, %v1
+; CHECK-NEXT: vo %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i32> @fun68(<2 x i8> %val1, <2 x i8> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun68:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i32> @fun69(<2 x i8> %val1, <2 x i8> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun69:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i16> @fun70(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun70:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i64> @fun71(<2 x i8> %val1, <2 x i8> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun71:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i8> @fun72(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun72:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun73(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun73:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun74(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun74:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i8> @fun75(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun75:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v28, %v30
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vpkf %v1, %v1, %v1
+; CHECK-NEXT: vo %v0, %v0, %v1
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i8> @fun76(<2 x i16> %val1, <2 x i16> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun76:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI76_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x double> @fun77(<2 x i16> %val1, <2 x i16> %val2, <2 x float> %val3, <2 x float> %val4, <2 x double> %val5, <2 x double> %val6) {
+; CHECK-LABEL: fun77:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x double> %val5, <2 x double> %val6
+ ret <2 x double> %sel
+}
+
+define <2 x i16> @fun78(<2 x i16> %val1, <2 x i16> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun78:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI78_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i16> @fun79(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun79:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun80(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun80:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun81(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun81:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i64> @fun82(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun82:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i16> @fun83(<2 x i32> %val1, <2 x i32> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun83:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x float> @fun84(<2 x i32> %val1, <2 x i32> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
+; CHECK-LABEL: fun84:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v1, %v28, %v30
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v1, %v1, %v1
+; CHECK-NEXT: vo %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x float> %val5, <2 x float> %val6
+ ret <2 x float> %sel
+}
+
+define <2 x i16> @fun85(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun85:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI85_0
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i64> @fun86(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun86:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i64> @fun87(<2 x i64> %val1, <2 x i64> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun87:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i16> @fun88(<2 x i64> %val1, <2 x i64> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun88:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI88_0
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x float> @fun89(<2 x float> %val1, <2 x float> %val2, <2 x float> %val3, <2 x float> %val4, <2 x float> %val5, <2 x float> %val6) {
+; CHECK-LABEL: fun89:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <2 x float> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x float> %val5, <2 x float> %val6
+ ret <2 x float> %sel
+}
+
+define <2 x i32> @fun90(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun90:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v28, %v30
+; CHECK-NEXT: vpkg %v1, %v1, %v1
+; CHECK-NEXT: vo %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <2 x float> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = or <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <4 x i16> @fun91(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun91:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun92(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun92:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun93(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun93:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x i32> @fun94(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun94:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v30, %v27
+; CHECK-NEXT: vceqg %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i16> @fun95(<4 x i32> %val1, <4 x i32> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun95:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x i8> @fun96(<4 x i32> %val1, <4 x i32> %val2, <4 x double> %val3, <4 x double> %val4, <4 x i8> %val5, <4 x i8> %val6) {
+; CHECK-LABEL: fun96:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v27
+; CHECK-NEXT: vfchdb %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI96_0
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i8> %val5, <4 x i8> %val6
+ ret <4 x i8> %sel
+}
+
+define <4 x i32> @fun97(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun97:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v29
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun98(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun98:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v25, %v29
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqg %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x i64> @fun99(<4 x i64> %val1, <4 x i64> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun99:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v2, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun100(<4 x i64> %val1, <4 x i64> %val2, <4 x double> %val3, <4 x double> %val4, <4 x float> %val5, <4 x float> %val6) {
+; CHECK-LABEL: fun100:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v29
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x float> %val5, <4 x float> %val6
+ ret <4 x float> %sel
+}
+
+define <4 x i16> @fun101(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun101:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x float> @fun102(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
+; CHECK-LABEL: fun102:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x float> %val5, <4 x float> %val6
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun103(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
+; CHECK-LABEL: fun103:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x double> %val5, <4 x double> %val6
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun104(<4 x float> %val1, <4 x float> %val2, <4 x double> %val3, <4 x double> %val4, <4 x i8> %val5, <4 x i8> %val6) {
+; CHECK-LABEL: fun104:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v27
+; CHECK-NEXT: vfchdb %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: larl %r1, .LCPI104_0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = or <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i8> %val5, <4 x i8> %val6
+ ret <4 x i8> %sel
+}
+
+define <8 x i8> @fun105(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i8> %val5, <8 x i8> %val6) {
+; CHECK-LABEL: fun105:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i8> %val5, <8 x i8> %val6
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun106(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i16> %val5, <8 x i16> %val6) {
+; CHECK-LABEL: fun106:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i16> %val5, <8 x i16> %val6
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun107(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun107:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun108(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i64> %val5, <8 x i64> %val6) {
+; CHECK-LABEL: fun108:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vceqf %v0, %v28, %v25
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v26, %v31, %v2, %v0
+; CHECK-NEXT: vceqf %v0, %v30, %v27
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i32> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i64> %val5, <8 x i64> %val6
+ ret <8 x i64> %sel
+}
+
+define <8 x i8> @fun109(<8 x i16> %val1, <8 x i16> %val2, <8 x i64> %val3, <8 x i64> %val4, <8 x i8> %val5, <8 x i8> %val6) {
+; CHECK-LABEL: fun109:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v30, %v31
+; CHECK-NEXT: vceqg %v2, %v28, %v29
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vlrepg %v1, 200(%r15)
+; CHECK-NEXT: vlrepg %v2, 192(%r15)
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i64> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i8> %val5, <8 x i8> %val6
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun110(<8 x i16> %val1, <8 x i16> %val2, <8 x float> %val3, <8 x float> %val4, <8 x i16> %val5, <8 x i16> %val6) {
+; CHECK-LABEL: fun110:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <8 x float> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i16> %val5, <8 x i16> %val6
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun111(<8 x i16> %val1, <8 x i16> %val2, <8 x double> %val3, <8 x double> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun111:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x i32> @fun112(<8 x i32> %val1, <8 x i32> %val2, <8 x i64> %val3, <8 x i64> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun112:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = icmp eq <8 x i64> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x double> @fun113(<8 x i32> %val1, <8 x i32> %val2, <8 x float> %val3, <8 x float> %val4, <8 x double> %val5, <8 x double> %val6) {
+; CHECK-LABEL: fun113:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v29, %v29
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v29, %v29
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v31, %v31
+; CHECK-NEXT: vmrlf %v2, %v27, %v27
+; CHECK-NEXT: vmrhf %v3, %v27, %v27
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v31, %v31
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vceqf %v2, %v26, %v30
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <8 x float> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x double> %val5, <8 x double> %val6
+ ret <8 x double> %sel
+}
+
+define <8 x double> @fun114(<8 x i32> %val1, <8 x i32> %val2, <8 x double> %val3, <8 x double> %val4, <8 x double> %val5, <8 x double> %val6) {
+; CHECK-LABEL: fun114:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vfchdb %v0, %v25, %v0
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vceqf %v2, %v26, %v30
+; CHECK-NEXT: vfchdb %v0, %v29, %v0
+; CHECK-NEXT: vuphf %v3, %v2
+; CHECK-NEXT: vo %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x double> %val5, <8 x double> %val6
+ ret <8 x double> %sel
+}
+
+define <8 x i64> @fun115(<8 x float> %val1, <8 x float> %val2, <8 x double> %val3, <8 x double> %val4, <8 x i64> %val5, <8 x i64> %val6) {
+; CHECK-LABEL: fun115:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 224(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vo %v1, %v1, %v2
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vfchdb %v3, %v29, %v3
+; CHECK-NEXT: vo %v2, %v2, %v3
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vfchdb %v2, %v27, %v2
+; CHECK-NEXT: vo %v0, %v0, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vfchdb %v1, %v31, %v1
+; CHECK-NEXT: vo %v0, %v0, %v1
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <8 x float> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = or <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i64> %val5, <8 x i64> %val6
+ ret <8 x i64> %sel
+}
+
+define <16 x i8> @fun116(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun116:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i8> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun117(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun117:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i8> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i64> @fun118(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun118:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vceqh %v0, %v28, %v25
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vuphh %v2, %v0
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v24, %v29, %v3, %v2
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v26, %v31, %v3, %v2
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v3, 288(%r15)
+; CHECK-NEXT: vl %v4, 160(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v0, %v3, %v2, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vceqh %v2, %v30, %v27
+; CHECK-NEXT: vlr %v30, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vo %v1, %v1, %v2
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v25, %v4, %v3, %v2
+; CHECK-NEXT: vl %v3, 336(%r15)
+; CHECK-NEXT: vl %v4, 208(%r15)
+; CHECK-NEXT: vpkg %v2, %v1, %v1
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v27, %v4, %v3, %v2
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vmrlg %v2, %v1, %v1
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v1, %v1, %v1, 12
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v29, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i64> @fun119(<16 x i8> %val1, <16 x i8> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun119:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vceqf %v0, %v28, %v29
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vl %v3, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vpkg %v2, %v1, %v1
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vl %v5, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vmrlg %v3, %v1, %v1
+; CHECK-NEXT: vuphb %v3, %v3
+; CHECK-NEXT: vceqf %v2, %v25, %v2
+; CHECK-NEXT: vuphh %v3, %v3
+; CHECK-NEXT: vo %v2, %v3, %v2
+; CHECK-NEXT: vuphf %v3, %v2
+; CHECK-NEXT: vsldb %v1, %v1, %v1, 12
+; CHECK-NEXT: vsel %v25, %v5, %v4, %v3
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vl %v4, 416(%r15)
+; CHECK-NEXT: vl %v5, 288(%r15)
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vceqf %v3, %v27, %v3
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v1, %v1, %v3
+; CHECK-NEXT: vuphf %v3, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v29, %v5, %v4, %v3
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vl %v4, 240(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v4, %v3, %v0
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i32> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i64> @fun120(<16 x i8> %val1, <16 x i8> %val2, <16 x i64> %val3, <16 x i64> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun120:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vceqg %v1, %v28, %v0
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 448(%r15)
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vpkf %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v30, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 464(%r15)
+; CHECK-NEXT: vl %v3, 336(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v1
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 480(%r15)
+; CHECK-NEXT: vsel %v28, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 6
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v27, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 496(%r15)
+; CHECK-NEXT: vsel %v30, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v3, 384(%r15)
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 512(%r15)
+; CHECK-NEXT: vsel %v25, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v3, 400(%r15)
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 10
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v31, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 528(%r15)
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 288(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vl %v3, 416(%r15)
+; CHECK-NEXT: vceqg %v1, %v2, %v1
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 12
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 14
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 544(%r15)
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 432(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vo %v0, %v0, %v1
+; CHECK-NEXT: vl %v1, 560(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i64> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i16> @fun121(<16 x i8> %val1, <16 x i8> %val2, <16 x float> %val3, <16 x float> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun121:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vmrlf %v4, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v29, %v29
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vmrlf %v2, %v0, %v0
+; CHECK-NEXT: vmrlf %v3, %v27, %v27
+; CHECK-NEXT: vmrhf %v0, %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vmrhf %v3, %v27, %v27
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v0, %v3, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v2
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vpkf %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <16 x float> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i8> @fun122(<16 x i8> %val1, <16 x i8> %val2, <16 x double> %val3, <16 x double> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun122:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 288(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v31, %v1
+; CHECK-NEXT: vfchdb %v2, %v29, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vfchdb %v1, %v27, %v1
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vfchdb %v2, %v30, %v2
+; CHECK-NEXT: vfchdb %v3, %v28, %v3
+; CHECK-NEXT: vpkg %v2, %v3, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <16 x double> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i8> @fun123(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun123:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v27, %v31
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v25, %v29
+; CHECK-NEXT: vceqh %v2, %v24, %v28
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun124(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun124:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v25, %v29
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqh %v0, %v27, %v31
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun125(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun125:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v25, %v29
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v27, %v31
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <16 x i8> @fun126(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun126:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqf %v0, %v31, %v0
+; CHECK-NEXT: vceqf %v1, %v29, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqf %v1, %v27, %v1
+; CHECK-NEXT: vceqf %v2, %v25, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vceqh %v2, %v24, %v28
+; CHECK-NEXT: vo %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i32> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i32> @fun127(<16 x i16> %val1, <16 x i16> %val2, <16 x i64> %val3, <16 x i64> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun127:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 416(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqg %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vceqg %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vuphh %v3, %v2
+; CHECK-NEXT: vo %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 448(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v3, %v29, %v3
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v3, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 336(%r15)
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vceqg %v1, %v3, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i64> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <16 x double> @fun128(<16 x i16> %val1, <16 x i16> %val2, <16 x float> %val3, <16 x float> %val4, <16 x double> %val5, <16 x double> %val6) {
+; CHECK-LABEL: fun128:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vmrlf %v1, %v0, %v0
+; CHECK-NEXT: vmrlf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v0, %v0, %v0
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vl %v5, 416(%r15)
+; CHECK-NEXT: vl %v6, 288(%r15)
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vsel %v24, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrlf %v4, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v27, %v27
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vl %v3, 384(%r15)
+; CHECK-NEXT: vo %v1, %v1, %v2
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrlf %v4, %v29, %v29
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vceqh %v3, %v26, %v30
+; CHECK-NEXT: vuphh %v4, %v3
+; CHECK-NEXT: vo %v2, %v4, %v2
+; CHECK-NEXT: vuphf %v4, %v2
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vl %v4, 208(%r15)
+; CHECK-NEXT: vmrlf %v5, %v4, %v4
+; CHECK-NEXT: vmrlf %v6, %v31, %v31
+; CHECK-NEXT: vmrhf %v4, %v4, %v4
+; CHECK-NEXT: vmrlg %v3, %v3, %v3
+; CHECK-NEXT: vuphh %v3, %v3
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v5, %v6, %v5
+; CHECK-NEXT: vmrhf %v6, %v31, %v31
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v4, %v6, %v4
+; CHECK-NEXT: vl %v6, 320(%r15)
+; CHECK-NEXT: vpkg %v4, %v4, %v5
+; CHECK-NEXT: vl %v5, 448(%r15)
+; CHECK-NEXT: vo %v3, %v3, %v4
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v29, %v6, %v5, %v4
+; CHECK-NEXT: vl %v4, 368(%r15)
+; CHECK-NEXT: vl %v5, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v5, %v4, %v0
+; CHECK-NEXT: vl %v4, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v4, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vmrlg %v0, %v2, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vmrlg %v0, %v3, %v3
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <16 x float> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x double> %val5, <16 x double> %val6
+ ret <16 x double> %sel
+}
+
+define <16 x i32> @fun129(<16 x i16> %val1, <16 x i16> %val2, <16 x double> %val3, <16 x double> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun129:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vo %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 416(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vuphh %v3, %v2
+; CHECK-NEXT: vo %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 448(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v3, %v29, %v3
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v3, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 336(%r15)
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vfchdb %v1, %v3, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vo %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <16 x double> %val3, %val4
+ %and = or <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <2 x i8> @fun130(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun130:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i8> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun131(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun131:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i8> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i8> @fun132(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun132:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v1, %v28, %v30
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vpkh %v1, %v1, %v1
+; CHECK-NEXT: vx %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i32> @fun133(<2 x i8> %val1, <2 x i8> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun133:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i32> @fun134(<2 x i8> %val1, <2 x i8> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun134:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i16> @fun135(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun135:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i64> @fun136(<2 x i8> %val1, <2 x i8> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun136:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i8> @fun137(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun137:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun138(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun138:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun139(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun139:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i16> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i8> @fun140(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun140:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v28, %v30
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vpkf %v1, %v1, %v1
+; CHECK-NEXT: vx %v0, %v0, %v1
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x i8> @fun141(<2 x i16> %val1, <2 x i16> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i8> %val5, <2 x i8> %val6) {
+; CHECK-LABEL: fun141:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI141_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6
+ ret <2 x i8> %sel
+}
+
+define <2 x double> @fun142(<2 x i16> %val1, <2 x i16> %val2, <2 x float> %val3, <2 x float> %val4, <2 x double> %val5, <2 x double> %val6) {
+; CHECK-LABEL: fun142:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x double> %val5, <2 x double> %val6
+ ret <2 x double> %sel
+}
+
+define <2 x i16> @fun143(<2 x i16> %val1, <2 x i16> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun143:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI143_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i16> @fun144(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun144:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun145(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun145:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun146(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun146:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i32> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i64> @fun147(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun147:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i16> @fun148(<2 x i32> %val1, <2 x i32> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun148:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x float> @fun149(<2 x i32> %val1, <2 x i32> %val2, <2 x double> %val3, <2 x double> %val4, <2 x float> %val5, <2 x float> %val6) {
+; CHECK-LABEL: fun149:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v1, %v28, %v30
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v1, %v1, %v1
+; CHECK-NEXT: vx %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x float> %val5, <2 x float> %val6
+ ret <2 x float> %sel
+}
+
+define <2 x i16> @fun150(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun150:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI150_0
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x i64> @fun151(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun151:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = icmp eq <2 x i64> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i64> @fun152(<2 x i64> %val1, <2 x i64> %val2, <2 x float> %val3, <2 x float> %val4, <2 x i64> %val5, <2 x i64> %val6) {
+; CHECK-LABEL: fun152:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i64> %val5, <2 x i64> %val6
+ ret <2 x i64> %sel
+}
+
+define <2 x i16> @fun153(<2 x i64> %val1, <2 x i64> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i16> %val5, <2 x i16> %val6) {
+; CHECK-LABEL: fun153:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v28, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI153_0
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <2 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6
+ ret <2 x i16> %sel
+}
+
+define <2 x float> @fun154(<2 x float> %val1, <2 x float> %val2, <2 x float> %val3, <2 x float> %val4, <2 x float> %val5, <2 x float> %val6) {
+; CHECK-LABEL: fun154:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <2 x float> %val1, %val2
+ %cmp1 = fcmp ogt <2 x float> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x float> %val5, <2 x float> %val6
+ ret <2 x float> %sel
+}
+
+define <2 x i32> @fun155(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4, <2 x i32> %val5, <2 x i32> %val6) {
+; CHECK-LABEL: fun155:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v28, %v30
+; CHECK-NEXT: vpkg %v1, %v1, %v1
+; CHECK-NEXT: vx %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <2 x float> %val1, %val2
+ %cmp1 = fcmp ogt <2 x double> %val3, %val4
+ %and = xor <2 x i1> %cmp0, %cmp1
+ %sel = select <2 x i1> %and, <2 x i32> %val5, <2 x i32> %val6
+ ret <2 x i32> %sel
+}
+
+define <4 x i16> @fun156(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun156:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun157(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun157:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun158(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun158:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v28, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i32> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x i32> @fun159(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun159:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v30, %v27
+; CHECK-NEXT: vceqg %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i16> @fun160(<4 x i32> %val1, <4 x i32> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun160:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x i8> @fun161(<4 x i32> %val1, <4 x i32> %val2, <4 x double> %val3, <4 x double> %val4, <4 x i8> %val5, <4 x i8> %val6) {
+; CHECK-LABEL: fun161:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v27
+; CHECK-NEXT: vfchdb %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v26
+; CHECK-NEXT: larl %r1, .LCPI161_0
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i8> %val5, <4 x i8> %val6
+ ret <4 x i8> %sel
+}
+
+define <4 x i32> @fun162(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i32> %val5, <4 x i32> %val6) {
+; CHECK-LABEL: fun162:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v29
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i32> %val5, <4 x i32> %val6
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun163(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun163:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v25, %v29
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqg %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = icmp eq <4 x i64> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x i64> @fun164(<4 x i64> %val1, <4 x i64> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i64> %val5, <4 x i64> %val6) {
+; CHECK-LABEL: fun164:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v2, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i64> %val5, <4 x i64> %val6
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun165(<4 x i64> %val1, <4 x i64> %val2, <4 x double> %val3, <4 x double> %val4, <4 x float> %val5, <4 x float> %val6) {
+; CHECK-LABEL: fun165:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v27, %v31
+; CHECK-NEXT: vceqg %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v29
+; CHECK-NEXT: vceqg %v2, %v24, %v28
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <4 x i64> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x float> %val5, <4 x float> %val6
+ ret <4 x float> %sel
+}
+
+define <4 x i16> @fun166(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x i16> %val5, <4 x i16> %val6) {
+; CHECK-LABEL: fun166:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i16> %val5, <4 x i16> %val6
+ ret <4 x i16> %sel
+}
+
+define <4 x float> @fun167(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x float> %val5, <4 x float> %val6) {
+; CHECK-LABEL: fun167:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x float> %val5, <4 x float> %val6
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun168(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4, <4 x double> %val5, <4 x double> %val6) {
+; CHECK-LABEL: fun168:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x float> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x double> %val5, <4 x double> %val6
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun169(<4 x float> %val1, <4 x float> %val2, <4 x double> %val3, <4 x double> %val4, <4 x i8> %val5, <4 x i8> %val6) {
+; CHECK-LABEL: fun169:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v27
+; CHECK-NEXT: vfchdb %v1, %v28, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: larl %r1, .LCPI169_0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <4 x float> %val1, %val2
+ %cmp1 = fcmp ogt <4 x double> %val3, %val4
+ %and = xor <4 x i1> %cmp0, %cmp1
+ %sel = select <4 x i1> %and, <4 x i8> %val5, <4 x i8> %val6
+ ret <4 x i8> %sel
+}
+
+define <8 x i8> @fun170(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i8> %val5, <8 x i8> %val6) {
+; CHECK-LABEL: fun170:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i8> %val5, <8 x i8> %val6
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun171(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i16> %val5, <8 x i16> %val6) {
+; CHECK-LABEL: fun171:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i16> %val5, <8 x i16> %val6
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun172(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun172:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v28, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i16> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun173(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x i32> %val4, <8 x i64> %val5, <8 x i64> %val6) {
+; CHECK-LABEL: fun173:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vceqf %v0, %v28, %v25
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v26, %v31, %v2, %v0
+; CHECK-NEXT: vceqf %v0, %v30, %v27
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i32> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i64> %val5, <8 x i64> %val6
+ ret <8 x i64> %sel
+}
+
+define <8 x i8> @fun174(<8 x i16> %val1, <8 x i16> %val2, <8 x i64> %val3, <8 x i64> %val4, <8 x i8> %val5, <8 x i8> %val6) {
+; CHECK-LABEL: fun174:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v30, %v31
+; CHECK-NEXT: vceqg %v2, %v28, %v29
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vlrepg %v1, 200(%r15)
+; CHECK-NEXT: vlrepg %v2, 192(%r15)
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = icmp eq <8 x i64> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i8> %val5, <8 x i8> %val6
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun175(<8 x i16> %val1, <8 x i16> %val2, <8 x float> %val3, <8 x float> %val4, <8 x i16> %val5, <8 x i16> %val6) {
+; CHECK-LABEL: fun175:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v29, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <8 x float> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i16> %val5, <8 x i16> %val6
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun176(<8 x i16> %val1, <8 x i16> %val2, <8 x double> %val3, <8 x double> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun176:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v26
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x i32> @fun177(<8 x i32> %val1, <8 x i32> %val2, <8 x i64> %val3, <8 x i64> %val4, <8 x i32> %val5, <8 x i32> %val6) {
+; CHECK-LABEL: fun177:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = icmp eq <8 x i64> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i32> %val5, <8 x i32> %val6
+ ret <8 x i32> %sel
+}
+
+define <8 x double> @fun178(<8 x i32> %val1, <8 x i32> %val2, <8 x float> %val3, <8 x float> %val4, <8 x double> %val5, <8 x double> %val6) {
+; CHECK-LABEL: fun178:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v29, %v29
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v29, %v29
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v31, %v31
+; CHECK-NEXT: vmrlf %v2, %v27, %v27
+; CHECK-NEXT: vmrhf %v3, %v27, %v27
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v31, %v31
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vceqf %v2, %v26, %v30
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <8 x float> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x double> %val5, <8 x double> %val6
+ ret <8 x double> %sel
+}
+
+define <8 x double> @fun179(<8 x i32> %val1, <8 x i32> %val2, <8 x double> %val3, <8 x double> %val4, <8 x double> %val5, <8 x double> %val6) {
+; CHECK-LABEL: fun179:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vfchdb %v0, %v25, %v0
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vceqf %v2, %v26, %v30
+; CHECK-NEXT: vfchdb %v0, %v29, %v0
+; CHECK-NEXT: vuphf %v3, %v2
+; CHECK-NEXT: vx %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <8 x i32> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x double> %val5, <8 x double> %val6
+ ret <8 x double> %sel
+}
+
+define <8 x i64> @fun180(<8 x float> %val1, <8 x float> %val2, <8 x double> %val3, <8 x double> %val4, <8 x i64> %val5, <8 x i64> %val6) {
+; CHECK-LABEL: fun180:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 224(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vx %v1, %v1, %v2
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vfchdb %v3, %v29, %v3
+; CHECK-NEXT: vx %v2, %v2, %v3
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vfchdb %v2, %v27, %v2
+; CHECK-NEXT: vx %v0, %v0, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vfchdb %v1, %v31, %v1
+; CHECK-NEXT: vx %v0, %v0, %v1
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = fcmp ogt <8 x float> %val1, %val2
+ %cmp1 = fcmp ogt <8 x double> %val3, %val4
+ %and = xor <8 x i1> %cmp0, %cmp1
+ %sel = select <8 x i1> %and, <8 x i64> %val5, <8 x i64> %val6
+ ret <8 x i64> %sel
+}
+
+define <16 x i8> @fun181(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun181:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i8> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun182(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun182:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v28, %v30
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v1
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i8> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i64> @fun183(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun183:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vceqh %v0, %v28, %v25
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vuphh %v2, %v0
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v24, %v29, %v3, %v2
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v26, %v31, %v3, %v2
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v3, 288(%r15)
+; CHECK-NEXT: vl %v4, 160(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v0, %v3, %v2, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vceqh %v2, %v30, %v27
+; CHECK-NEXT: vlr %v30, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vx %v1, %v1, %v2
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v25, %v4, %v3, %v2
+; CHECK-NEXT: vl %v3, 336(%r15)
+; CHECK-NEXT: vl %v4, 208(%r15)
+; CHECK-NEXT: vpkg %v2, %v1, %v1
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v27, %v4, %v3, %v2
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vmrlg %v2, %v1, %v1
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v1, %v1, %v1, 12
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v29, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i64> @fun184(<16 x i8> %val1, <16 x i8> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun184:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vceqf %v0, %v28, %v29
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vl %v3, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vpkg %v2, %v1, %v1
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vl %v5, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vmrlg %v3, %v1, %v1
+; CHECK-NEXT: vuphb %v3, %v3
+; CHECK-NEXT: vceqf %v2, %v25, %v2
+; CHECK-NEXT: vuphh %v3, %v3
+; CHECK-NEXT: vx %v2, %v3, %v2
+; CHECK-NEXT: vuphf %v3, %v2
+; CHECK-NEXT: vsldb %v1, %v1, %v1, 12
+; CHECK-NEXT: vsel %v25, %v5, %v4, %v3
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vl %v4, 416(%r15)
+; CHECK-NEXT: vl %v5, 288(%r15)
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vceqf %v3, %v27, %v3
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v1, %v1, %v3
+; CHECK-NEXT: vuphf %v3, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v29, %v5, %v4, %v3
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vl %v4, 240(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v4, %v3, %v0
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i32> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i64> @fun185(<16 x i8> %val1, <16 x i8> %val2, <16 x i64> %val3, <16 x i64> %val4, <16 x i64> %val5, <16 x i64> %val6) {
+; CHECK-LABEL: fun185:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vceqg %v1, %v28, %v0
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 448(%r15)
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vpkf %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v30, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 464(%r15)
+; CHECK-NEXT: vl %v3, 336(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v1
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 480(%r15)
+; CHECK-NEXT: vsel %v28, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 6
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v27, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 496(%r15)
+; CHECK-NEXT: vsel %v30, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v3, 384(%r15)
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 512(%r15)
+; CHECK-NEXT: vsel %v25, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v3, 400(%r15)
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 10
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vceqg %v1, %v31, %v1
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 528(%r15)
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 288(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vl %v3, 416(%r15)
+; CHECK-NEXT: vceqg %v1, %v2, %v1
+; CHECK-NEXT: vsldb %v2, %v0, %v0, 12
+; CHECK-NEXT: vuphb %v2, %v2
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 14
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 544(%r15)
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vceqg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 432(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vx %v0, %v0, %v1
+; CHECK-NEXT: vl %v1, 560(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = icmp eq <16 x i64> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i64> %val5, <16 x i64> %val6
+ ret <16 x i64> %sel
+}
+
+define <16 x i16> @fun186(<16 x i8> %val1, <16 x i8> %val2, <16 x float> %val3, <16 x float> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun186:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vmrlf %v4, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v29, %v29
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vuphb %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vmrlf %v2, %v0, %v0
+; CHECK-NEXT: vmrlf %v3, %v27, %v27
+; CHECK-NEXT: vmrhf %v0, %v0, %v0
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vmrhf %v3, %v27, %v27
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v0, %v3, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v2
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vpkf %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <16 x float> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i8> @fun187(<16 x i8> %val1, <16 x i8> %val2, <16 x double> %val3, <16 x double> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun187:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 288(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v31, %v1
+; CHECK-NEXT: vfchdb %v2, %v29, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vfchdb %v1, %v27, %v1
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vfchdb %v2, %v30, %v2
+; CHECK-NEXT: vfchdb %v3, %v28, %v3
+; CHECK-NEXT: vpkg %v2, %v3, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vceqb %v1, %v24, %v26
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i8> %val1, %val2
+ %cmp1 = fcmp ogt <16 x double> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i8> @fun188(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun188:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v27, %v31
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v25, %v29
+; CHECK-NEXT: vceqh %v2, %v24, %v28
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun189(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i16> %val5, <16 x i16> %val6) {
+; CHECK-LABEL: fun189:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v25, %v29
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqh %v0, %v27, %v31
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun190(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun190:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v25, %v29
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v27, %v31
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v26, %v3, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i16> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <16 x i8> @fun191(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4, <16 x i8> %val5, <16 x i8> %val6) {
+; CHECK-LABEL: fun191:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqf %v0, %v31, %v0
+; CHECK-NEXT: vceqf %v1, %v29, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqf %v1, %v27, %v1
+; CHECK-NEXT: vceqf %v2, %v25, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vceqh %v2, %v24, %v28
+; CHECK-NEXT: vx %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i32> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6
+ ret <16 x i8> %sel
+}
+
+define <16 x i32> @fun192(<16 x i16> %val1, <16 x i16> %val2, <16 x i64> %val3, <16 x i64> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun192:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 416(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqg %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vceqg %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vuphh %v3, %v2
+; CHECK-NEXT: vx %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 448(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v3, %v29, %v3
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v3, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 336(%r15)
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vceqg %v1, %v3, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = icmp eq <16 x i64> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
+define <16 x double> @fun193(<16 x i16> %val1, <16 x i16> %val2, <16 x float> %val3, <16 x float> %val4, <16 x double> %val5, <16 x double> %val6) {
+; CHECK-LABEL: fun193:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vmrlf %v1, %v0, %v0
+; CHECK-NEXT: vmrlf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v0, %v0, %v0
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vl %v4, 224(%r15)
+; CHECK-NEXT: vl %v5, 416(%r15)
+; CHECK-NEXT: vl %v6, 288(%r15)
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v1
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vuphf %v2, %v0
+; CHECK-NEXT: vsel %v24, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrlf %v4, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v27, %v27
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vl %v4, 256(%r15)
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vl %v3, 384(%r15)
+; CHECK-NEXT: vx %v1, %v1, %v2
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v2
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vmrlf %v3, %v2, %v2
+; CHECK-NEXT: vmrlf %v4, %v29, %v29
+; CHECK-NEXT: vmrhf %v2, %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v2, %v4, %v2
+; CHECK-NEXT: vpkg %v2, %v2, %v3
+; CHECK-NEXT: vceqh %v3, %v26, %v30
+; CHECK-NEXT: vuphh %v4, %v3
+; CHECK-NEXT: vx %v2, %v4, %v2
+; CHECK-NEXT: vuphf %v4, %v2
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vl %v4, 208(%r15)
+; CHECK-NEXT: vmrlf %v5, %v4, %v4
+; CHECK-NEXT: vmrlf %v6, %v31, %v31
+; CHECK-NEXT: vmrhf %v4, %v4, %v4
+; CHECK-NEXT: vmrlg %v3, %v3, %v3
+; CHECK-NEXT: vuphh %v3, %v3
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v5, %v6, %v5
+; CHECK-NEXT: vmrhf %v6, %v31, %v31
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v4, %v6, %v4
+; CHECK-NEXT: vl %v6, 320(%r15)
+; CHECK-NEXT: vpkg %v4, %v4, %v5
+; CHECK-NEXT: vl %v5, 448(%r15)
+; CHECK-NEXT: vx %v3, %v3, %v4
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v29, %v6, %v5, %v4
+; CHECK-NEXT: vl %v4, 368(%r15)
+; CHECK-NEXT: vl %v5, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v5, %v4, %v0
+; CHECK-NEXT: vl %v4, 272(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v4, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vmrlg %v0, %v2, %v2
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vmrlg %v0, %v3, %v3
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <16 x float> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x double> %val5, <16 x double> %val6
+ ret <16 x double> %sel
+}
+
+define <16 x i32> @fun194(<16 x i16> %val1, <16 x i16> %val2, <16 x double> %val3, <16 x double> %val4, <16 x i32> %val5, <16 x i32> %val6) {
+; CHECK-LABEL: fun194:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vx %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 416(%r15)
+; CHECK-NEXT: vl %v3, 352(%r15)
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vl %v0, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v0, %v2, %v0
+; CHECK-NEXT: vceqh %v2, %v26, %v30
+; CHECK-NEXT: vuphh %v3, %v2
+; CHECK-NEXT: vx %v0, %v3, %v0
+; CHECK-NEXT: vl %v3, 448(%r15)
+; CHECK-NEXT: vl %v4, 384(%r15)
+; CHECK-NEXT: vsel %v28, %v4, %v3, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v3, %v29, %v3
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vpkg %v0, %v3, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v3, 368(%r15)
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vsel %v26, %v3, %v1, %v0
+; CHECK-NEXT: vl %v0, 336(%r15)
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vfchdb %v1, %v3, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vx %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp0 = icmp eq <16 x i16> %val1, %val2
+ %cmp1 = fcmp ogt <16 x double> %val3, %val4
+ %and = xor <16 x i1> %cmp0, %cmp1
+ %sel = select <16 x i1> %and, <16 x i32> %val5, <16 x i32> %val6
+ ret <16 x i32> %sel
+}
+
diff --git a/test/CodeGen/SystemZ/vec-cmpsel.ll b/test/CodeGen/SystemZ/vec-cmpsel.ll
new file mode 100644
index 000000000000..2d518a2cc838
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-cmpsel.ll
@@ -0,0 +1,3378 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;
+; Test that vector compare / select combinations do not produce any
+; unnecessary pack /unpack / shift instructions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+
+define <2 x i8> @fun0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4) {
+; CHECK-LABEL: fun0:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun1(<2 x i8> %val1, <2 x i8> %val2, <2 x i16> %val3, <2 x i16> %val4) {
+; CHECK-LABEL: fun1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun2(<2 x i8> %val1, <2 x i8> %val2, <2 x i32> %val3, <2 x i32> %val4) {
+; CHECK-LABEL: fun2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun3(<2 x i8> %val1, <2 x i8> %val2, <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: fun3:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+}
+
+define <2 x float> @fun4(<2 x i8> %val1, <2 x i8> %val2, <2 x float> %val3, <2 x float> %val4) {
+; CHECK-LABEL: fun4:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+}
+
+define <2 x double> @fun5(<2 x i8> %val1, <2 x i8> %val2, <2 x double> %val3, <2 x double> %val4) {
+; CHECK-LABEL: fun5:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i8> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+}
+
+define <2 x i8> @fun6(<2 x i16> %val1, <2 x i16> %val2, <2 x i8> %val3, <2 x i8> %val4) {
+; CHECK-LABEL: fun6:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun7(<2 x i16> %val1, <2 x i16> %val2, <2 x i16> %val3, <2 x i16> %val4) {
+; CHECK-LABEL: fun7:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun8(<2 x i16> %val1, <2 x i16> %val2, <2 x i32> %val3, <2 x i32> %val4) {
+; CHECK-LABEL: fun8:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun9(<2 x i16> %val1, <2 x i16> %val2, <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: fun9:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+}
+
+define <2 x float> @fun10(<2 x i16> %val1, <2 x i16> %val2, <2 x float> %val3, <2 x float> %val4) {
+; CHECK-LABEL: fun10:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+}
+
+define <2 x double> @fun11(<2 x i16> %val1, <2 x i16> %val2, <2 x double> %val3, <2 x double> %val4) {
+; CHECK-LABEL: fun11:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i16> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+}
+
+define <2 x i8> @fun12(<2 x i32> %val1, <2 x i32> %val2, <2 x i8> %val3, <2 x i8> %val4) {
+; CHECK-LABEL: fun12:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI12_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun13(<2 x i32> %val1, <2 x i32> %val2, <2 x i16> %val3, <2 x i16> %val4) {
+; CHECK-LABEL: fun13:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun14(<2 x i32> %val1, <2 x i32> %val2, <2 x i32> %val3, <2 x i32> %val4) {
+; CHECK-LABEL: fun14:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun15(<2 x i32> %val1, <2 x i32> %val2, <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: fun15:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+}
+
+define <2 x float> @fun16(<2 x i32> %val1, <2 x i32> %val2, <2 x float> %val3, <2 x float> %val4) {
+; CHECK-LABEL: fun16:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+}
+
+define <2 x double> @fun17(<2 x i32> %val1, <2 x i32> %val2, <2 x double> %val3, <2 x double> %val4) {
+; CHECK-LABEL: fun17:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i32> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+}
+
+define <2 x i8> @fun18(<2 x i64> %val1, <2 x i64> %val2, <2 x i8> %val3, <2 x i8> %val4) {
+; CHECK-LABEL: fun18:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v26
+; CHECK-NEXT: vrepih %v1, 1807
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun19(<2 x i64> %val1, <2 x i64> %val2, <2 x i16> %val3, <2 x i16> %val4) {
+; CHECK-LABEL: fun19:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI19_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vceqg %v0, %v24, %v26
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun20(<2 x i64> %val1, <2 x i64> %val2, <2 x i32> %val3, <2 x i32> %val4) {
+; CHECK-LABEL: fun20:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun21(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: fun21:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+}
+
+define <2 x float> @fun22(<2 x i64> %val1, <2 x i64> %val2, <2 x float> %val3, <2 x float> %val4) {
+; CHECK-LABEL: fun22:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+}
+
+define <2 x double> @fun23(<2 x i64> %val1, <2 x i64> %val2, <2 x double> %val3, <2 x double> %val4) {
+; CHECK-LABEL: fun23:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+}
+
+define <4 x i8> @fun24(<4 x i8> %val1, <4 x i8> %val2, <4 x i8> %val3, <4 x i8> %val4) {
+; CHECK-LABEL: fun24:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+}
+
+define <4 x i16> @fun25(<4 x i8> %val1, <4 x i8> %val2, <4 x i16> %val3, <4 x i16> %val4) {
+; CHECK-LABEL: fun25:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun26(<4 x i8> %val1, <4 x i8> %val2, <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: fun26:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun27(<4 x i8> %val1, <4 x i8> %val2, <4 x i64> %val3, <4 x i64> %val4) {
+; CHECK-LABEL: fun27:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun28(<4 x i8> %val1, <4 x i8> %val2, <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: fun28:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun29(<4 x i8> %val1, <4 x i8> %val2, <4 x double> %val3, <4 x double> %val4) {
+; CHECK-LABEL: fun29:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i8> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun30(<4 x i16> %val1, <4 x i16> %val2, <4 x i8> %val3, <4 x i8> %val4) {
+; CHECK-LABEL: fun30:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+}
+
+define <4 x i16> @fun31(<4 x i16> %val1, <4 x i16> %val2, <4 x i16> %val3, <4 x i16> %val4) {
+; CHECK-LABEL: fun31:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun32(<4 x i16> %val1, <4 x i16> %val2, <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: fun32:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun33(<4 x i16> %val1, <4 x i16> %val2, <4 x i64> %val3, <4 x i64> %val4) {
+; CHECK-LABEL: fun33:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun34(<4 x i16> %val1, <4 x i16> %val2, <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: fun34:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun35(<4 x i16> %val1, <4 x i16> %val2, <4 x double> %val3, <4 x double> %val4) {
+; CHECK-LABEL: fun35:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i16> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun36(<4 x i32> %val1, <4 x i32> %val2, <4 x i8> %val3, <4 x i8> %val4) {
+; CHECK-LABEL: fun36:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI36_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+}
+
+define <4 x i16> @fun37(<4 x i32> %val1, <4 x i32> %val2, <4 x i16> %val3, <4 x i16> %val4) {
+; CHECK-LABEL: fun37:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun38(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: fun38:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun39(<4 x i32> %val1, <4 x i32> %val2, <4 x i64> %val3, <4 x i64> %val4) {
+; CHECK-LABEL: fun39:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun40(<4 x i32> %val1, <4 x i32> %val2, <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: fun40:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun41(<4 x i32> %val1, <4 x i32> %val2, <4 x double> %val3, <4 x double> %val4) {
+; CHECK-LABEL: fun41:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v26
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun42(<4 x i64> %val1, <4 x i64> %val2, <4 x i8> %val3, <4 x i8> %val4) {
+; CHECK-LABEL: fun42:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI42_0
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vceqg %v0, %v26, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+}
+
+define <4 x i16> @fun43(<4 x i64> %val1, <4 x i64> %val2, <4 x i16> %val3, <4 x i16> %val4) {
+; CHECK-LABEL: fun43:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI43_0
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vceqg %v0, %v26, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun44(<4 x i64> %val1, <4 x i64> %val2, <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: fun44:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v26, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun45(<4 x i64> %val1, <4 x i64> %val2, <4 x i64> %val3, <4 x i64> %val4) {
+; CHECK-LABEL: fun45:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vceqg %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun46(<4 x i64> %val1, <4 x i64> %val2, <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: fun46:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v26, %v30
+; CHECK-NEXT: vceqg %v1, %v24, %v28
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun47(<4 x i64> %val1, <4 x i64> %val2, <4 x double> %val3, <4 x double> %val4) {
+; CHECK-LABEL: fun47:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vceqg %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i64> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+}
+
+define <8 x i8> @fun48(<8 x i8> %val1, <8 x i8> %val2, <8 x i8> %val3, <8 x i8> %val4) {
+; CHECK-LABEL: fun48:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun49(<8 x i8> %val1, <8 x i8> %val2, <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: fun49:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun50(<8 x i8> %val1, <8 x i8> %val2, <8 x i32> %val3, <8 x i32> %val4) {
+; CHECK-LABEL: fun50:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun51(<8 x i8> %val1, <8 x i8> %val2, <8 x i64> %val3, <8 x i64> %val4) {
+; CHECK-LABEL: fun51:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v29, %v1
+; CHECK-NEXT: vpkf %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v31, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 6
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v27, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+}
+
+define <8 x float> @fun52(<8 x i8> %val1, <8 x i8> %val2, <8 x float> %val3, <8 x float> %val4) {
+; CHECK-LABEL: fun52:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+}
+
+define <8 x double> @fun53(<8 x i8> %val1, <8 x i8> %val2, <8 x double> %val3, <8 x double> %val4) {
+; CHECK-LABEL: fun53:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v29, %v1
+; CHECK-NEXT: vpkf %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v31, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 6
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v27, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i8> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+}
+
+define <8 x i8> @fun54(<8 x i16> %val1, <8 x i16> %val2, <8 x i8> %val3, <8 x i8> %val4) {
+; CHECK-LABEL: fun54:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vpkh %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun55(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: fun55:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun56(<8 x i16> %val1, <8 x i16> %val2, <8 x i32> %val3, <8 x i32> %val4) {
+; CHECK-LABEL: fun56:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun57(<8 x i16> %val1, <8 x i16> %val2, <8 x i64> %val3, <8 x i64> %val4) {
+; CHECK-LABEL: fun57:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v29, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v31, %v1
+; CHECK-NEXT: vmrlg %v1, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v27, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+}
+
+define <8 x float> @fun58(<8 x i16> %val1, <8 x i16> %val2, <8 x float> %val3, <8 x float> %val4) {
+; CHECK-LABEL: fun58:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+}
+
+define <8 x double> @fun59(<8 x i16> %val1, <8 x i16> %val2, <8 x double> %val3, <8 x double> %val4) {
+; CHECK-LABEL: fun59:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v26
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v29, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v31, %v1
+; CHECK-NEXT: vmrlg %v1, %v0, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v27, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+}
+
+define <8 x i8> @fun60(<8 x i32> %val1, <8 x i32> %val2, <8 x i8> %val3, <8 x i8> %val4) {
+; CHECK-LABEL: fun60:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI60_0
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vceqf %v0, %v26, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun61(<8 x i32> %val1, <8 x i32> %val2, <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: fun61:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v26, %v30
+; CHECK-NEXT: vceqf %v1, %v24, %v28
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun62(<8 x i32> %val1, <8 x i32> %val2, <8 x i32> %val3, <8 x i32> %val4) {
+; CHECK-LABEL: fun62:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vceqf %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun63(<8 x i32> %val1, <8 x i32> %val2, <8 x i64> %val3, <8 x i64> %val4) {
+; CHECK-LABEL: fun63:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v28
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vceqf %v1, %v26, %v30
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v26, %v27, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+}
+
+define <8 x float> @fun64(<8 x i32> %val1, <8 x i32> %val2, <8 x float> %val3, <8 x float> %val4) {
+; CHECK-LABEL: fun64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vceqf %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+}
+
+define <8 x double> @fun65(<8 x i32> %val1, <8 x i32> %val2, <8 x double> %val3, <8 x double> %val4) {
+; CHECK-LABEL: fun65:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v24, %v28
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vceqf %v1, %v26, %v30
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v26, %v27, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i32> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+}
+
+define <8 x i8> @fun66(<8 x i64> %val1, <8 x i64> %val2, <8 x i8> %val3, <8 x i8> %val4) {
+; CHECK-LABEL: fun66:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v30, %v31
+; CHECK-NEXT: vceqg %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v26, %v27
+; CHECK-NEXT: vceqg %v2, %v24, %v25
+; CHECK-NEXT: larl %r1, .LCPI66_0
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vlrepg %v1, 168(%r15)
+; CHECK-NEXT: vlrepg %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun67(<8 x i64> %val1, <8 x i64> %val2, <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: fun67:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v30, %v31
+; CHECK-NEXT: vceqg %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vceqg %v1, %v26, %v27
+; CHECK-NEXT: vceqg %v2, %v24, %v25
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun68(<8 x i64> %val1, <8 x i64> %val2, <8 x i32> %val3, <8 x i32> %val4) {
+; CHECK-LABEL: fun68:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v26, %v27
+; CHECK-NEXT: vceqg %v1, %v24, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqg %v0, %v30, %v31
+; CHECK-NEXT: vceqg %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun69(<8 x i64> %val1, <8 x i64> %val2, <8 x i64> %val3, <8 x i64> %val4) {
+; CHECK-LABEL: fun69:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v24, %v25
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqg %v0, %v26, %v27
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v28, %v29
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v31
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+}
+
+define <8 x float> @fun70(<8 x i64> %val1, <8 x i64> %val2, <8 x float> %val3, <8 x float> %val4) {
+; CHECK-LABEL: fun70:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqg %v0, %v26, %v27
+; CHECK-NEXT: vceqg %v1, %v24, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqg %v0, %v30, %v31
+; CHECK-NEXT: vceqg %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+}
+
+define <8 x double> @fun71(<8 x i64> %val1, <8 x i64> %val2, <8 x double> %val3, <8 x double> %val4) {
+; CHECK-LABEL: fun71:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v24, %v25
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqg %v0, %v26, %v27
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v28, %v29
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v31
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i64> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+}
+
+define <16 x i8> @fun72(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: fun72:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun73(<16 x i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4) {
+; CHECK-LABEL: fun73:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun74(<16 x i8> %val1, <16 x i8> %val2, <16 x i32> %val3, <16 x i32> %val4) {
+; CHECK-LABEL: fun74:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v29, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v31, %v1
+; CHECK-NEXT: vmrlg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v27, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+}
+
+define <16 x i64> @fun75(<16 x i8> %val1, <16 x i8> %val2, <16 x i64> %val3, <16 x i64> %val4) {
+; CHECK-LABEL: fun75:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v2, %v1
+; CHECK-NEXT: vpkf %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v2, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vsldb %v1, %v0, %v0, 6
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v30, %v27, %v2, %v1
+; CHECK-NEXT: vl %v2, 256(%r15)
+; CHECK-NEXT: vmrlg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v25, %v29, %v2, %v1
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vsldb %v1, %v0, %v0, 10
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v27, %v31, %v2, %v1
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsldb %v1, %v0, %v0, 12
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 14
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+}
+
+define <16 x float> @fun76(<16 x i8> %val1, <16 x i8> %val2, <16 x float> %val3, <16 x float> %val4) {
+; CHECK-LABEL: fun76:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v29, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v31, %v1
+; CHECK-NEXT: vmrlg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v27, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+}
+
+define <16 x double> @fun77(<16 x i8> %val1, <16 x i8> %val2, <16 x double> %val3, <16 x double> %val4) {
+; CHECK-LABEL: fun77:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqb %v0, %v24, %v26
+; CHECK-NEXT: vuphb %v1, %v0
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v2, %v1
+; CHECK-NEXT: vpkf %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v2, %v1
+; CHECK-NEXT: vpkg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vsel %v28, %v25, %v2, %v1
+; CHECK-NEXT: vl %v2, 240(%r15)
+; CHECK-NEXT: vsldb %v1, %v0, %v0, 6
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v30, %v27, %v2, %v1
+; CHECK-NEXT: vl %v2, 256(%r15)
+; CHECK-NEXT: vmrlg %v1, %v0, %v0
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v25, %v29, %v2, %v1
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vsldb %v1, %v0, %v0, 10
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v27, %v31, %v2, %v1
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsldb %v1, %v0, %v0, 12
+; CHECK-NEXT: vuphb %v1, %v1
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 14
+; CHECK-NEXT: vuphh %v1, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v1
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+}
+
+define <16 x i8> @fun78(<16 x i16> %val1, <16 x i16> %val2, <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: fun78:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v26, %v30
+; CHECK-NEXT: vceqh %v1, %v24, %v28
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun79(<16 x i16> %val1, <16 x i16> %val2, <16 x i16> %val3, <16 x i16> %val4) {
+; CHECK-LABEL: fun79:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vceqh %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun80(<16 x i16> %val1, <16 x i16> %val2, <16 x i32> %val3, <16 x i32> %val4) {
+; CHECK-LABEL: fun80:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v28
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v26, %v27, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+}
+
+define <16 x i64> @fun81(<16 x i16> %val1, <16 x i16> %val2, <16 x i64> %val3, <16 x i64> %val4) {
+; CHECK-LABEL: fun81:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v28
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vl %v3, 288(%r15)
+; CHECK-NEXT: vl %v4, 160(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v25, %v4, %v3, %v2
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v26, %v27, %v3, %v2
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v2, %v0
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vsldb %v0, %v1, %v1, 12
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+}
+
+define <16 x float> @fun82(<16 x i16> %val1, <16 x i16> %val2, <16 x float> %val3, <16 x float> %val4) {
+; CHECK-LABEL: fun82:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v28
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v26, %v27, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+}
+
+define <16 x double> @fun83(<16 x i16> %val1, <16 x i16> %val2, <16 x double> %val3, <16 x double> %val4) {
+; CHECK-LABEL: fun83:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqh %v0, %v24, %v28
+; CHECK-NEXT: vuphh %v1, %v0
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vceqh %v1, %v26, %v30
+; CHECK-NEXT: vuphh %v2, %v1
+; CHECK-NEXT: vl %v3, 288(%r15)
+; CHECK-NEXT: vl %v4, 160(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v25, %v4, %v3, %v2
+; CHECK-NEXT: vpkg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vsel %v26, %v27, %v3, %v2
+; CHECK-NEXT: vmrlg %v2, %v0, %v0
+; CHECK-NEXT: vuphh %v2, %v2
+; CHECK-NEXT: vsldb %v0, %v0, %v0, 12
+; CHECK-NEXT: vl %v3, 256(%r15)
+; CHECK-NEXT: vuphf %v2, %v2
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 272(%r15)
+; CHECK-NEXT: vl %v3, 176(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v2, %v0
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v29, %v3, %v2, %v0
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vsldb %v0, %v1, %v1, 12
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i16> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+}
+
+define <16 x i8> @fun84(<16 x i32> %val1, <16 x i32> %val2, <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: fun84:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vceqf %v1, %v28, %v29
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vceqf %v1, %v26, %v27
+; CHECK-NEXT: vceqf %v2, %v24, %v25
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun85(<16 x i32> %val1, <16 x i32> %val2, <16 x i16> %val3, <16 x i16> %val4) {
+; CHECK-LABEL: fun85:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v0, %v26, %v27
+; CHECK-NEXT: vceqf %v1, %v24, %v25
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vceqf %v1, %v28, %v29
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun86(<16 x i32> %val1, <16 x i32> %val2, <16 x i32> %val3, <16 x i32> %val4) {
+; CHECK-LABEL: fun86:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqf %v0, %v24, %v25
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqf %v0, %v26, %v27
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vceqf %v0, %v28, %v29
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+}
+
+define <16 x i64> @fun87(<16 x i32> %val1, <16 x i32> %val2, <16 x i64> %val3, <16 x i64> %val4) {
+; CHECK-LABEL: fun87:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v24, %v25
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphf %v0, %v1
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vceqf %v2, %v26, %v27
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v0, %v2
+; CHECK-NEXT: vsel %v0, %v4, %v3, %v0
+; CHECK-NEXT: vceqf %v3, %v28, %v29
+; CHECK-NEXT: vl %v5, 352(%r15)
+; CHECK-NEXT: vl %v6, 224(%r15)
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vceqf %v4, %v30, %v31
+; CHECK-NEXT: vl %v6, 384(%r15)
+; CHECK-NEXT: vl %v7, 256(%r15)
+; CHECK-NEXT: vuphf %v5, %v4
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v29, %v7, %v6, %v5
+; CHECK-NEXT: vl %v5, 304(%r15)
+; CHECK-NEXT: vl %v6, 176(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v6, %v5, %v1
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vl %v5, 208(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v30, %v5, %v2, %v1
+; CHECK-NEXT: vmrlg %v1, %v3, %v3
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vlr %v28, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v1, %v4, %v4
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+}
+
+define <16 x float> @fun88(<16 x i32> %val1, <16 x i32> %val2, <16 x float> %val3, <16 x float> %val4) {
+; CHECK-LABEL: fun88:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqf %v0, %v24, %v25
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vceqf %v0, %v26, %v27
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vceqf %v0, %v28, %v29
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vceqf %v0, %v30, %v31
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+}
+
+define <16 x double> @fun89(<16 x i32> %val1, <16 x i32> %val2, <16 x double> %val3, <16 x double> %val4) {
+; CHECK-LABEL: fun89:
+; CHECK: # BB#0:
+; CHECK-NEXT: vceqf %v1, %v24, %v25
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vuphf %v0, %v1
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vceqf %v2, %v26, %v27
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vuphf %v0, %v2
+; CHECK-NEXT: vsel %v0, %v4, %v3, %v0
+; CHECK-NEXT: vceqf %v3, %v28, %v29
+; CHECK-NEXT: vl %v5, 352(%r15)
+; CHECK-NEXT: vl %v6, 224(%r15)
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vceqf %v4, %v30, %v31
+; CHECK-NEXT: vl %v6, 384(%r15)
+; CHECK-NEXT: vl %v7, 256(%r15)
+; CHECK-NEXT: vuphf %v5, %v4
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vsel %v29, %v7, %v6, %v5
+; CHECK-NEXT: vl %v5, 304(%r15)
+; CHECK-NEXT: vl %v6, 176(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v26, %v6, %v5, %v1
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vl %v5, 208(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v30, %v5, %v2, %v1
+; CHECK-NEXT: vmrlg %v1, %v3, %v3
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vlr %v28, %v0
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v1, %v4, %v4
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i32> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+}
+
+define <16 x i8> @fun90(<16 x i64> %val1, <16 x i64> %val2, <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: fun90:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vceqg %v1, %v27, %v1
+; CHECK-NEXT: vceqg %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vceqg %v1, %v30, %v1
+; CHECK-NEXT: vceqg %v2, %v28, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vceqg %v2, %v26, %v2
+; CHECK-NEXT: vceqg %v3, %v24, %v3
+; CHECK-NEXT: vpkg %v2, %v3, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun91(<16 x i64> %val1, <16 x i64> %val2, <16 x i16> %val3, <16 x i16> %val4) {
+; CHECK-LABEL: fun91:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v0
+; CHECK-NEXT: vceqg %v1, %v28, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vceqg %v1, %v26, %v1
+; CHECK-NEXT: vceqg %v2, %v24, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vceqg %v1, %v27, %v1
+; CHECK-NEXT: vceqg %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun92(<16 x i64> %val1, <16 x i64> %val2, <16 x i32> %val3, <16 x i32> %val4) {
+; CHECK-LABEL: fun92:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v26, %v0
+; CHECK-NEXT: vceqg %v1, %v24, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 352(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v0
+; CHECK-NEXT: vceqg %v1, %v28, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 368(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 384(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+}
+
+define <16 x i64> @fun93(<16 x i64> %val1, <16 x i64> %val2, <16 x i64> %val3, <16 x i64> %val4) {
+; CHECK-LABEL: fun93:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vl %v1, 416(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vceqg %v0, %v24, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vceqg %v0, %v26, %v0
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vl %v1, 448(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vceqg %v0, %v28, %v0
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 224(%r15)
+; CHECK-NEXT: vl %v1, 480(%r15)
+; CHECK-NEXT: vl %v2, 352(%r15)
+; CHECK-NEXT: vceqg %v0, %v25, %v0
+; CHECK-NEXT: vsel %v25, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 496(%r15)
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v29, %v0
+; CHECK-NEXT: vl %v1, 512(%r15)
+; CHECK-NEXT: vl %v2, 384(%r15)
+; CHECK-NEXT: vsel %v29, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vl %v1, 528(%r15)
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+}
+
+define <16 x float> @fun94(<16 x i64> %val1, <16 x i64> %val2, <16 x float> %val3, <16 x float> %val4) {
+; CHECK-LABEL: fun94:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vceqg %v0, %v26, %v0
+; CHECK-NEXT: vceqg %v1, %v24, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 352(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v0
+; CHECK-NEXT: vceqg %v1, %v28, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 368(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vceqg %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 384(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vceqg %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+}
+
+define <16 x double> @fun95(<16 x i64> %val1, <16 x i64> %val2, <16 x double> %val3, <16 x double> %val4) {
+; CHECK-LABEL: fun95:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vl %v1, 416(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vceqg %v0, %v24, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vceqg %v0, %v26, %v0
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vl %v1, 448(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vceqg %v0, %v28, %v0
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vceqg %v0, %v30, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 224(%r15)
+; CHECK-NEXT: vl %v1, 480(%r15)
+; CHECK-NEXT: vl %v2, 352(%r15)
+; CHECK-NEXT: vceqg %v0, %v25, %v0
+; CHECK-NEXT: vsel %v25, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 496(%r15)
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vceqg %v0, %v27, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 256(%r15)
+; CHECK-NEXT: vceqg %v0, %v29, %v0
+; CHECK-NEXT: vl %v1, 512(%r15)
+; CHECK-NEXT: vl %v2, 384(%r15)
+; CHECK-NEXT: vsel %v29, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vceqg %v0, %v31, %v0
+; CHECK-NEXT: vl %v1, 528(%r15)
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i64> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+}
+
+define <2 x i8> @fun96(<2 x float> %val1, <2 x float> %val2, <2 x i8> %val3, <2 x i8> %val4) {
+; CHECK-LABEL: fun96:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: larl %r1, .LCPI96_0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun97(<2 x float> %val1, <2 x float> %val2, <2 x i16> %val3, <2 x i16> %val4) {
+; CHECK-LABEL: fun97:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun98(<2 x float> %val1, <2 x float> %val2, <2 x i32> %val3, <2 x i32> %val4) {
+; CHECK-LABEL: fun98:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun99(<2 x float> %val1, <2 x float> %val2, <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: fun99:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+}
+
+define <2 x float> @fun100(<2 x float> %val1, <2 x float> %val2, <2 x float> %val3, <2 x float> %val4) {
+; CHECK-LABEL: fun100:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+}
+
+define <2 x double> @fun101(<2 x float> %val1, <2 x float> %val2, <2 x double> %val3, <2 x double> %val4) {
+; CHECK-LABEL: fun101:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x float> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+}
+
+define <2 x i8> @fun102(<2 x double> %val1, <2 x double> %val2, <2 x i8> %val3, <2 x i8> %val4) {
+; CHECK-LABEL: fun102:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v26
+; CHECK-NEXT: vrepih %v1, 1807
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i8> %val3, <2 x i8> %val4
+ ret <2 x i8> %sel
+}
+
+define <2 x i16> @fun103(<2 x double> %val1, <2 x double> %val2, <2 x i16> %val3, <2 x i16> %val4) {
+; CHECK-LABEL: fun103:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI103_0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vfchdb %v0, %v24, %v26
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i16> %val3, <2 x i16> %val4
+ ret <2 x i16> %sel
+}
+
+define <2 x i32> @fun104(<2 x double> %val1, <2 x double> %val2, <2 x i32> %val3, <2 x i32> %val4) {
+; CHECK-LABEL: fun104:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i32> %val3, <2 x i32> %val4
+ ret <2 x i32> %sel
+}
+
+define <2 x i64> @fun105(<2 x double> %val1, <2 x double> %val2, <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: fun105:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %sel
+}
+
+define <2 x float> @fun106(<2 x double> %val1, <2 x double> %val2, <2 x float> %val3, <2 x float> %val4) {
+; CHECK-LABEL: fun106:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v26
+; CHECK-NEXT: vpkg %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x float> %val3, <2 x float> %val4
+ ret <2 x float> %sel
+}
+
+define <2 x double> @fun107(<2 x double> %val1, <2 x double> %val2, <2 x double> %val3, <2 x double> %val4) {
+; CHECK-LABEL: fun107:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <2 x double> %val1, %val2
+ %sel = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
+ ret <2 x double> %sel
+}
+
+define <4 x i8> @fun108(<4 x float> %val1, <4 x float> %val2, <4 x i8> %val3, <4 x i8> %val4) {
+; CHECK-LABEL: fun108:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: larl %r1, .LCPI108_0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+}
+
+define <4 x i16> @fun109(<4 x float> %val1, <4 x float> %val2, <4 x i16> %val3, <4 x i16> %val4) {
+; CHECK-LABEL: fun109:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vpkf %v0, %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun110(<4 x float> %val1, <4 x float> %val2, <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: fun110:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun111(<4 x float> %val1, <4 x float> %val2, <4 x i64> %val3, <4 x i64> %val4) {
+; CHECK-LABEL: fun111:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun112(<4 x float> %val1, <4 x float> %val2, <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: fun112:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v30, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun113(<4 x float> %val1, <4 x float> %val2, <4 x double> %val3, <4 x double> %val4) {
+; CHECK-LABEL: fun113:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v26, %v26
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v24, %v28, %v25, %v1
+; CHECK-NEXT: vsel %v26, %v30, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+}
+
+define <4 x i8> @fun114(<4 x double> %val1, <4 x double> %val2, <4 x i8> %val3, <4 x i8> %val4) {
+; CHECK-LABEL: fun114:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI114_0
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vfchdb %v0, %v26, %v30
+; CHECK-NEXT: vfchdb %v1, %v24, %v28
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i8> %val3, <4 x i8> %val4
+ ret <4 x i8> %sel
+}
+
+define <4 x i16> @fun115(<4 x double> %val1, <4 x double> %val2, <4 x i16> %val3, <4 x i16> %val4) {
+; CHECK-LABEL: fun115:
+; CHECK: # BB#0:
+; CHECK-NEXT: larl %r1, .LCPI115_0
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vfchdb %v0, %v26, %v30
+; CHECK-NEXT: vfchdb %v1, %v24, %v28
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i16> %val3, <4 x i16> %val4
+ ret <4 x i16> %sel
+}
+
+define <4 x i32> @fun116(<4 x double> %val1, <4 x double> %val2, <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: fun116:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v26, %v30
+; CHECK-NEXT: vfchdb %v1, %v24, %v28
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %sel
+}
+
+define <4 x i64> @fun117(<4 x double> %val1, <4 x double> %val2, <4 x i64> %val3, <4 x i64> %val4) {
+; CHECK-LABEL: fun117:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vfchdb %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x i64> %val3, <4 x i64> %val4
+ ret <4 x i64> %sel
+}
+
+define <4 x float> @fun118(<4 x double> %val1, <4 x double> %val2, <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: fun118:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v26, %v30
+; CHECK-NEXT: vfchdb %v1, %v24, %v28
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %sel
+}
+
+define <4 x double> @fun119(<4 x double> %val1, <4 x double> %val2, <4 x double> %val3, <4 x double> %val4) {
+; CHECK-LABEL: fun119:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v24, %v28
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vfchdb %v0, %v26, %v30
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x double> %val1, %val2
+ %sel = select <4 x i1> %cmp, <4 x double> %val3, <4 x double> %val4
+ ret <4 x double> %sel
+}
+
+define <8 x i8> @fun120(<8 x float> %val1, <8 x float> %val2, <8 x i8> %val3, <8 x i8> %val4) {
+; CHECK-LABEL: fun120:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: larl %r1, .LCPI120_0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun121(<8 x float> %val1, <8 x float> %val2, <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: fun121:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v27, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun122(<8 x float> %val1, <8 x float> %val2, <8 x i32> %val3, <8 x i32> %val4) {
+; CHECK-LABEL: fun122:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun123(<8 x float> %val1, <8 x float> %val2, <8 x i64> %val3, <8 x i64> %val4) {
+; CHECK-LABEL: fun123:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v27, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+}
+
+define <8 x float> @fun124(<8 x float> %val1, <8 x float> %val2, <8 x float> %val3, <8 x float> %val4) {
+; CHECK-LABEL: fun124:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v29, %v0
+; CHECK-NEXT: vmrlf %v0, %v30, %v30
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vsel %v26, %v27, %v31, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+}
+
+define <8 x double> @fun125(<8 x float> %val1, <8 x float> %val2, <8 x double> %val3, <8 x double> %val4) {
+; CHECK-LABEL: fun125:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v28, %v28
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vuphf %v1, %v0
+; CHECK-NEXT: vsel %v24, %v25, %v2, %v1
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrlg %v0, %v0, %v0
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 192(%r15)
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vuphf %v2, %v1
+; CHECK-NEXT: vsel %v28, %v29, %v3, %v2
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v27, %v2, %v0
+; CHECK-NEXT: vmrlg %v0, %v1, %v1
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vuphf %v0, %v0
+; CHECK-NEXT: vsel %v30, %v31, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x float> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+}
+
+define <8 x i8> @fun126(<8 x double> %val1, <8 x double> %val2, <8 x i8> %val3, <8 x i8> %val4) {
+; CHECK-LABEL: fun126:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v26, %v27
+; CHECK-NEXT: vfchdb %v2, %v24, %v25
+; CHECK-NEXT: larl %r1, .LCPI126_0
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 0(%r1)
+; CHECK-NEXT: vperm %v0, %v1, %v0, %v2
+; CHECK-NEXT: vlrepg %v1, 168(%r15)
+; CHECK-NEXT: vlrepg %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i8> %val3, <8 x i8> %val4
+ ret <8 x i8> %sel
+}
+
+define <8 x i16> @fun127(<8 x double> %val1, <8 x double> %val2, <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: fun127:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vfchdb %v1, %v26, %v27
+; CHECK-NEXT: vfchdb %v2, %v24, %v25
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %sel
+}
+
+define <8 x i32> @fun128(<8 x double> %val1, <8 x double> %val2, <8 x i32> %val3, <8 x i32> %val4) {
+; CHECK-LABEL: fun128:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v26, %v27
+; CHECK-NEXT: vfchdb %v1, %v24, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i32> %val3, <8 x i32> %val4
+ ret <8 x i32> %sel
+}
+
+define <8 x i64> @fun129(<8 x double> %val1, <8 x double> %val2, <8 x i64> %val3, <8 x i64> %val4) {
+; CHECK-LABEL: fun129:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v24, %v25
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v26, %v27
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v28, %v29
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x i64> %val3, <8 x i64> %val4
+ ret <8 x i64> %sel
+}
+
+define <8 x float> @fun130(<8 x double> %val1, <8 x double> %val2, <8 x float> %val3, <8 x float> %val4) {
+; CHECK-LABEL: fun130:
+; CHECK: # BB#0:
+; CHECK-NEXT: vfchdb %v0, %v26, %v27
+; CHECK-NEXT: vfchdb %v1, %v24, %v25
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vfchdb %v1, %v28, %v29
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x float> %val3, <8 x float> %val4
+ ret <8 x float> %sel
+}
+
+define <8 x double> @fun131(<8 x double> %val1, <8 x double> %val2, <8 x double> %val3, <8 x double> %val4) {
+; CHECK-LABEL: fun131:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v24, %v25
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vfchdb %v0, %v26, %v27
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v28, %v29
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v31
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <8 x double> %val1, %val2
+ %sel = select <8 x i1> %cmp, <8 x double> %val3, <8 x double> %val4
+ ret <8 x double> %sel
+}
+
+define <16 x i8> @fun132(<16 x float> %val1, <16 x float> %val2, <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: fun132:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vmrhf %v4, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v29, %v29
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v27, %v27
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v27, %v27
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vmrlf %v2, %v25, %v25
+; CHECK-NEXT: vmrlf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vmrhf %v3, %v25, %v25
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vpkg %v2, %v3, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun133(<16 x float> %val1, <16 x float> %val2, <16 x i16> %val3, <16 x i16> %val4) {
+; CHECK-LABEL: fun133:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vmrhf %v3, %v24, %v24
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v25, %v25
+; CHECK-NEXT: vmrlf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v25, %v25
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vmrhf %v3, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vmrlf %v1, %v29, %v29
+; CHECK-NEXT: vmrlf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vmrhf %v2, %v29, %v29
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun134(<16 x float> %val1, <16 x float> %val2, <16 x i32> %val3, <16 x i32> %val4) {
+; CHECK-LABEL: fun134:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v25, %v25
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v25, %v25
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v29, %v29
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v29, %v29
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+}
+
+define <16 x i64> @fun135(<16 x float> %val1, <16 x float> %val2, <16 x i64> %val3, <16 x i64> %val4) {
+; CHECK-LABEL: fun135:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v25, %v25
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v25, %v25
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vl %v6, 224(%r15)
+; CHECK-NEXT: vl %v7, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vpkg %v1, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v1
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vmrhf %v5, %v28, %v28
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vmrhf %v2, %v27, %v27
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vpkg %v2, %v2, %v0
+; CHECK-NEXT: vuphf %v0, %v2
+; CHECK-NEXT: vsel %v0, %v4, %v3, %v0
+; CHECK-NEXT: vmrlf %v3, %v29, %v29
+; CHECK-NEXT: vmrlf %v4, %v28, %v28
+; CHECK-NEXT: vlr %v28, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v29, %v29
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vfchdb %v4, %v5, %v4
+; CHECK-NEXT: vl %v5, 352(%r15)
+; CHECK-NEXT: vpkg %v3, %v4, %v3
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vmrlf %v4, %v31, %v31
+; CHECK-NEXT: vmrlf %v5, %v30, %v30
+; CHECK-NEXT: vmrhf %v6, %v30, %v30
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vfchdb %v4, %v5, %v4
+; CHECK-NEXT: vmrhf %v5, %v31, %v31
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v5, %v6, %v5
+; CHECK-NEXT: vl %v6, 384(%r15)
+; CHECK-NEXT: vpkg %v4, %v5, %v4
+; CHECK-NEXT: vuphf %v5, %v4
+; CHECK-NEXT: vsel %v29, %v7, %v6, %v5
+; CHECK-NEXT: vl %v5, 304(%r15)
+; CHECK-NEXT: vl %v6, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v6, %v5, %v1
+; CHECK-NEXT: vl %v5, 208(%r15)
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v30, %v5, %v2, %v1
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vmrlg %v1, %v3, %v3
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v1, %v4, %v4
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+}
+
+define <16 x float> @fun136(<16 x float> %val1, <16 x float> %val2, <16 x float> %val3, <16 x float> %val4) {
+; CHECK-LABEL: fun136:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v25, %v25
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v25, %v25
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v1, %v26, %v26
+; CHECK-NEXT: vmrhf %v2, %v26, %v26
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v27, %v27
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v29, %v29
+; CHECK-NEXT: vmrlf %v1, %v28, %v28
+; CHECK-NEXT: vmrhf %v2, %v28, %v28
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v29, %v29
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vmrlf %v0, %v31, %v31
+; CHECK-NEXT: vmrlf %v1, %v30, %v30
+; CHECK-NEXT: vmrhf %v2, %v30, %v30
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v31, %v31
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 208(%r15)
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 272(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+}
+
+define <16 x double> @fun137(<16 x float> %val1, <16 x float> %val2, <16 x double> %val3, <16 x double> %val4) {
+; CHECK-LABEL: fun137:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmrlf %v0, %v25, %v25
+; CHECK-NEXT: vmrlf %v1, %v24, %v24
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vfchdb %v0, %v1, %v0
+; CHECK-NEXT: vmrhf %v1, %v25, %v25
+; CHECK-NEXT: vmrhf %v2, %v24, %v24
+; CHECK-NEXT: vldeb %v1, %v1
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vl %v4, 192(%r15)
+; CHECK-NEXT: vl %v6, 224(%r15)
+; CHECK-NEXT: vl %v7, 256(%r15)
+; CHECK-NEXT: vfchdb %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vpkg %v1, %v1, %v0
+; CHECK-NEXT: vuphf %v0, %v1
+; CHECK-NEXT: vsel %v24, %v3, %v2, %v0
+; CHECK-NEXT: vmrlf %v0, %v27, %v27
+; CHECK-NEXT: vmrlf %v2, %v26, %v26
+; CHECK-NEXT: vmrhf %v3, %v26, %v26
+; CHECK-NEXT: vmrhf %v5, %v28, %v28
+; CHECK-NEXT: vmrlg %v1, %v1, %v1
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vldeb %v0, %v0
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vfchdb %v0, %v2, %v0
+; CHECK-NEXT: vmrhf %v2, %v27, %v27
+; CHECK-NEXT: vldeb %v2, %v2
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vfchdb %v2, %v3, %v2
+; CHECK-NEXT: vl %v3, 320(%r15)
+; CHECK-NEXT: vpkg %v2, %v2, %v0
+; CHECK-NEXT: vuphf %v0, %v2
+; CHECK-NEXT: vsel %v0, %v4, %v3, %v0
+; CHECK-NEXT: vmrlf %v3, %v29, %v29
+; CHECK-NEXT: vmrlf %v4, %v28, %v28
+; CHECK-NEXT: vlr %v28, %v0
+; CHECK-NEXT: vldeb %v3, %v3
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vfchdb %v3, %v4, %v3
+; CHECK-NEXT: vmrhf %v4, %v29, %v29
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vfchdb %v4, %v5, %v4
+; CHECK-NEXT: vl %v5, 352(%r15)
+; CHECK-NEXT: vpkg %v3, %v4, %v3
+; CHECK-NEXT: vuphf %v4, %v3
+; CHECK-NEXT: vsel %v25, %v6, %v5, %v4
+; CHECK-NEXT: vmrlf %v4, %v31, %v31
+; CHECK-NEXT: vmrlf %v5, %v30, %v30
+; CHECK-NEXT: vmrhf %v6, %v30, %v30
+; CHECK-NEXT: vldeb %v4, %v4
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vfchdb %v4, %v5, %v4
+; CHECK-NEXT: vmrhf %v5, %v31, %v31
+; CHECK-NEXT: vldeb %v5, %v5
+; CHECK-NEXT: vldeb %v6, %v6
+; CHECK-NEXT: vfchdb %v5, %v6, %v5
+; CHECK-NEXT: vl %v6, 384(%r15)
+; CHECK-NEXT: vpkg %v4, %v5, %v4
+; CHECK-NEXT: vuphf %v5, %v4
+; CHECK-NEXT: vsel %v29, %v7, %v6, %v5
+; CHECK-NEXT: vl %v5, 304(%r15)
+; CHECK-NEXT: vl %v6, 176(%r15)
+; CHECK-NEXT: vsel %v26, %v6, %v5, %v1
+; CHECK-NEXT: vl %v5, 208(%r15)
+; CHECK-NEXT: vmrlg %v1, %v2, %v2
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v30, %v5, %v2, %v1
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vmrlg %v1, %v3, %v3
+; CHECK-NEXT: vl %v3, 240(%r15)
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v27, %v3, %v2, %v1
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vl %v3, 272(%r15)
+; CHECK-NEXT: vmrlg %v1, %v4, %v4
+; CHECK-NEXT: vuphf %v1, %v1
+; CHECK-NEXT: vsel %v31, %v3, %v2, %v1
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x float> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+}
+
+define <16 x i8> @fun138(<16 x double> %val1, <16 x double> %val2, <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: fun138:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vfchdb %v1, %v27, %v1
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 208(%r15)
+; CHECK-NEXT: vl %v2, 192(%r15)
+; CHECK-NEXT: vfchdb %v1, %v30, %v1
+; CHECK-NEXT: vfchdb %v2, %v28, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vl %v2, 176(%r15)
+; CHECK-NEXT: vl %v3, 160(%r15)
+; CHECK-NEXT: vfchdb %v2, %v26, %v2
+; CHECK-NEXT: vfchdb %v3, %v24, %v3
+; CHECK-NEXT: vpkg %v2, %v3, %v2
+; CHECK-NEXT: vpkf %v1, %v2, %v1
+; CHECK-NEXT: vpkh %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 304(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %sel
+}
+
+define <16 x i16> @fun139(<16 x double> %val1, <16 x double> %val2, <16 x i16> %val3, <16 x i16> %val4) {
+; CHECK-LABEL: fun139:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v0
+; CHECK-NEXT: vfchdb %v1, %v28, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 176(%r15)
+; CHECK-NEXT: vl %v2, 160(%r15)
+; CHECK-NEXT: vfchdb %v1, %v26, %v1
+; CHECK-NEXT: vfchdb %v2, %v24, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 320(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 240(%r15)
+; CHECK-NEXT: vl %v2, 224(%r15)
+; CHECK-NEXT: vfchdb %v1, %v27, %v1
+; CHECK-NEXT: vfchdb %v2, %v25, %v2
+; CHECK-NEXT: vpkg %v1, %v2, %v1
+; CHECK-NEXT: vpkf %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 336(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i16> %val3, <16 x i16> %val4
+ ret <16 x i16> %sel
+}
+
+define <16 x i32> @fun140(<16 x double> %val1, <16 x double> %val2, <16 x i32> %val3, <16 x i32> %val4) {
+; CHECK-LABEL: fun140:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v26, %v0
+; CHECK-NEXT: vfchdb %v1, %v24, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 352(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v0
+; CHECK-NEXT: vfchdb %v1, %v28, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 368(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 384(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i32> %val3, <16 x i32> %val4
+ ret <16 x i32> %sel
+}
+
+define <16 x i64> @fun141(<16 x double> %val1, <16 x double> %val2, <16 x i64> %val3, <16 x i64> %val4) {
+; CHECK-LABEL: fun141:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vl %v1, 416(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vfchdb %v0, %v24, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vfchdb %v0, %v26, %v0
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vl %v1, 448(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vfchdb %v0, %v28, %v0
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 224(%r15)
+; CHECK-NEXT: vl %v1, 480(%r15)
+; CHECK-NEXT: vl %v2, 352(%r15)
+; CHECK-NEXT: vfchdb %v0, %v25, %v0
+; CHECK-NEXT: vsel %v25, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 496(%r15)
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v29, %v0
+; CHECK-NEXT: vl %v1, 512(%r15)
+; CHECK-NEXT: vl %v2, 384(%r15)
+; CHECK-NEXT: vsel %v29, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vl %v1, 528(%r15)
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x i64> %val3, <16 x i64> %val4
+ ret <16 x i64> %sel
+}
+
+define <16 x float> @fun142(<16 x double> %val1, <16 x double> %val2, <16 x float> %val3, <16 x float> %val4) {
+; CHECK-LABEL: fun142:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 160(%r15)
+; CHECK-NEXT: vfchdb %v0, %v26, %v0
+; CHECK-NEXT: vfchdb %v1, %v24, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 352(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 192(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v0
+; CHECK-NEXT: vfchdb %v1, %v28, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 368(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 224(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vfchdb %v1, %v25, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 384(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vl %v1, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vfchdb %v1, %v29, %v1
+; CHECK-NEXT: vpkg %v0, %v1, %v0
+; CHECK-NEXT: vl %v1, 400(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x float> %val3, <16 x float> %val4
+ ret <16 x float> %sel
+}
+
+define <16 x double> @fun143(<16 x double> %val1, <16 x double> %val2, <16 x double> %val3, <16 x double> %val4) {
+; CHECK-LABEL: fun143:
+; CHECK: # BB#0:
+; CHECK-NEXT: vl %v0, 160(%r15)
+; CHECK-NEXT: vl %v1, 416(%r15)
+; CHECK-NEXT: vl %v2, 288(%r15)
+; CHECK-NEXT: vfchdb %v0, %v24, %v0
+; CHECK-NEXT: vsel %v24, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 176(%r15)
+; CHECK-NEXT: vl %v1, 432(%r15)
+; CHECK-NEXT: vl %v2, 304(%r15)
+; CHECK-NEXT: vfchdb %v0, %v26, %v0
+; CHECK-NEXT: vsel %v26, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 192(%r15)
+; CHECK-NEXT: vl %v1, 448(%r15)
+; CHECK-NEXT: vl %v2, 320(%r15)
+; CHECK-NEXT: vfchdb %v0, %v28, %v0
+; CHECK-NEXT: vsel %v28, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 208(%r15)
+; CHECK-NEXT: vl %v1, 464(%r15)
+; CHECK-NEXT: vl %v2, 336(%r15)
+; CHECK-NEXT: vfchdb %v0, %v30, %v0
+; CHECK-NEXT: vsel %v30, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 224(%r15)
+; CHECK-NEXT: vl %v1, 480(%r15)
+; CHECK-NEXT: vl %v2, 352(%r15)
+; CHECK-NEXT: vfchdb %v0, %v25, %v0
+; CHECK-NEXT: vsel %v25, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 240(%r15)
+; CHECK-NEXT: vl %v1, 496(%r15)
+; CHECK-NEXT: vl %v2, 368(%r15)
+; CHECK-NEXT: vfchdb %v0, %v27, %v0
+; CHECK-NEXT: vsel %v27, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 256(%r15)
+; CHECK-NEXT: vfchdb %v0, %v29, %v0
+; CHECK-NEXT: vl %v1, 512(%r15)
+; CHECK-NEXT: vl %v2, 384(%r15)
+; CHECK-NEXT: vsel %v29, %v2, %v1, %v0
+; CHECK-NEXT: vl %v0, 272(%r15)
+; CHECK-NEXT: vfchdb %v0, %v31, %v0
+; CHECK-NEXT: vl %v1, 528(%r15)
+; CHECK-NEXT: vl %v2, 400(%r15)
+; CHECK-NEXT: vsel %v31, %v2, %v1, %v0
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <16 x double> %val1, %val2
+ %sel = select <16 x i1> %cmp, <16 x double> %val3, <16 x double> %val4
+ ret <16 x double> %sel
+}
+
diff --git a/test/CodeGen/SystemZ/vec-sext.ll b/test/CodeGen/SystemZ/vec-sext.ll
new file mode 100644
index 000000000000..9831de52ee83
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-sext.ll
@@ -0,0 +1,91 @@
+; Test that vector sexts are done efficently with unpack instructions also in
+; case of fewer elements than allowed, e.g. <2 x i32>.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+
+define <2 x i16> @fun1(<2 x i8> %val1) {
+; CHECK-LABEL: fun1:
+; CHECK: vuphb %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = sext <2 x i8> %val1 to <2 x i16>
+ ret <2 x i16> %z
+}
+
+define <2 x i32> @fun2(<2 x i8> %val1) {
+; CHECK-LABEL: fun2:
+; CHECK: vuphb %v0, %v24
+; CHECK-NEXT: vuphh %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = sext <2 x i8> %val1 to <2 x i32>
+ ret <2 x i32> %z
+}
+
+define <2 x i64> @fun3(<2 x i8> %val1) {
+; CHECK-LABEL: fun3:
+; CHECK: vuphb %v0, %v24
+; CHECK-NEXT: vuphh %v0, %v0
+; CHECK-NEXT: vuphf %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = sext <2 x i8> %val1 to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define <2 x i32> @fun4(<2 x i16> %val1) {
+; CHECK-LABEL: fun4:
+; CHECK: vuphh %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = sext <2 x i16> %val1 to <2 x i32>
+ ret <2 x i32> %z
+}
+
+define <2 x i64> @fun5(<2 x i16> %val1) {
+; CHECK-LABEL: fun5:
+; CHECK: vuphh %v0, %v24
+; CHECK-NEXT: vuphf %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = sext <2 x i16> %val1 to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define <2 x i64> @fun6(<2 x i32> %val1) {
+; CHECK-LABEL: fun6:
+; CHECK: vuphf %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = sext <2 x i32> %val1 to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define <4 x i16> @fun7(<4 x i8> %val1) {
+; CHECK-LABEL: fun7:
+; CHECK: vuphb %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = sext <4 x i8> %val1 to <4 x i16>
+ ret <4 x i16> %z
+}
+
+define <4 x i32> @fun8(<4 x i8> %val1) {
+; CHECK-LABEL: fun8:
+; CHECK: vuphb %v0, %v24
+; CHECK-NEXT: vuphh %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = sext <4 x i8> %val1 to <4 x i32>
+ ret <4 x i32> %z
+}
+
+define <4 x i32> @fun9(<4 x i16> %val1) {
+; CHECK-LABEL: fun9:
+; CHECK: vuphh %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = sext <4 x i16> %val1 to <4 x i32>
+ ret <4 x i32> %z
+}
+
+define <8 x i16> @fun10(<8 x i8> %val1) {
+; CHECK-LABEL: fun10:
+; CHECK: vuphb %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = sext <8 x i8> %val1 to <8 x i16>
+ ret <8 x i16> %z
+}
+
diff --git a/test/CodeGen/SystemZ/vec-trunc-to-i1.ll b/test/CodeGen/SystemZ/vec-trunc-to-i1.ll
new file mode 100644
index 000000000000..705fe3dbac90
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-trunc-to-i1.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+;
+; Check that a widening truncate to a vector of i1 elements can be handled.
+
+
+define void @pr32275(<4 x i8> %B15) {
+; CHECK-LABEL: pr32275:
+; CHECK: # BB#0: # %BB
+; CHECK-NEXT: vrepif %v0, 1
+; CHECK-NEXT: .LBB0_1: # %CF34
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vlgvb %r0, %v24, 3
+; CHECK-NEXT: vlgvb %r1, %v24, 1
+; CHECK-NEXT: vlvgp %v1, %r1, %r0
+; CHECK-NEXT: vlgvb %r0, %v24, 0
+; CHECK-NEXT: vlvgf %v1, %r0, 0
+; CHECK-NEXT: vlgvb %r0, %v24, 2
+; CHECK-NEXT: vlvgf %v1, %r0, 2
+; CHECK-NEXT: vn %v1, %v1, %v0
+; CHECK-NEXT: vlgvf %r0, %v1, 3
+; CHECK-NEXT: tmll %r0, 1
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # BB#2: # %CF36
+; CHECK-NEXT: br %r14
+BB:
+ br label %CF34
+
+CF34:
+ %Tr24 = trunc <4 x i8> %B15 to <4 x i1>
+ %E28 = extractelement <4 x i1> %Tr24, i32 3
+ br i1 %E28, label %CF34, label %CF36
+
+CF36:
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/vec-zext.ll b/test/CodeGen/SystemZ/vec-zext.ll
new file mode 100644
index 000000000000..831594d4020c
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-zext.ll
@@ -0,0 +1,91 @@
+; Test that vector zexts are done efficently with unpack instructions also in
+; case of fewer elements than allowed, e.g. <2 x i32>.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+
+define <2 x i16> @fun1(<2 x i8> %val1) {
+; CHECK-LABEL: fun1:
+; CHECK: vuplhb %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = zext <2 x i8> %val1 to <2 x i16>
+ ret <2 x i16> %z
+}
+
+define <2 x i32> @fun2(<2 x i8> %val1) {
+; CHECK-LABEL: fun2:
+; CHECK: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhh %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = zext <2 x i8> %val1 to <2 x i32>
+ ret <2 x i32> %z
+}
+
+define <2 x i64> @fun3(<2 x i8> %val1) {
+; CHECK-LABEL: fun3:
+; CHECK: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhh %v0, %v0
+; CHECK-NEXT: vuplhf %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = zext <2 x i8> %val1 to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define <2 x i32> @fun4(<2 x i16> %val1) {
+; CHECK-LABEL: fun4:
+; CHECK: vuplhh %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = zext <2 x i16> %val1 to <2 x i32>
+ ret <2 x i32> %z
+}
+
+define <2 x i64> @fun5(<2 x i16> %val1) {
+; CHECK-LABEL: fun5:
+; CHECK: vuplhh %v0, %v24
+; CHECK-NEXT: vuplhf %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = zext <2 x i16> %val1 to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define <2 x i64> @fun6(<2 x i32> %val1) {
+; CHECK-LABEL: fun6:
+; CHECK: vuplhf %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = zext <2 x i32> %val1 to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define <4 x i16> @fun7(<4 x i8> %val1) {
+; CHECK-LABEL: fun7:
+; CHECK: vuplhb %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = zext <4 x i8> %val1 to <4 x i16>
+ ret <4 x i16> %z
+}
+
+define <4 x i32> @fun8(<4 x i8> %val1) {
+; CHECK-LABEL: fun8:
+; CHECK: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhh %v24, %v0
+; CHECK-NEXT: br %r14
+ %z = zext <4 x i8> %val1 to <4 x i32>
+ ret <4 x i32> %z
+}
+
+define <4 x i32> @fun9(<4 x i16> %val1) {
+; CHECK-LABEL: fun9:
+; CHECK: vuplhh %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = zext <4 x i16> %val1 to <4 x i32>
+ ret <4 x i32> %z
+}
+
+define <8 x i16> @fun10(<8 x i8> %val1) {
+; CHECK-LABEL: fun10:
+; CHECK: vuplhb %v24, %v24
+; CHECK-NEXT: br %r14
+ %z = zext <8 x i8> %val1 to <8 x i16>
+ ret <8 x i16> %z
+}
+
diff --git a/test/CodeGen/SystemZ/vectorizer-output-3xi32.ll b/test/CodeGen/SystemZ/vectorizer-output-3xi32.ll
new file mode 100644
index 000000000000..3e7ba6095926
--- /dev/null
+++ b/test/CodeGen/SystemZ/vectorizer-output-3xi32.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13
+;
+; This tescase origininates from the BB-vectorizer output.
+
+define void @fun() {
+ %1 = zext <3 x i1> zeroinitializer to <3 x i32>
+ %2 = extractelement <3 x i32> %1, i32 2
+ store i32 %2, i32* undef, align 8
+ unreachable
+}
diff --git a/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll b/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
index 2f8e36b66b87..08349a31dfa2 100644
--- a/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
+++ b/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
@@ -9,9 +9,9 @@
define void @_Z19getClosestDiagonal3ii(%0* noalias sret, i32, i32) nounwind {
; CHECK: bl ___muldf3
-; CHECK: bl ___muldf3
; CHECK: beq LBB0
; CHECK: bl ___muldf3
+; CHECK: bl ___muldf3
; <label>:3
switch i32 %1, label %4 [
i32 0, label %5
diff --git a/test/CodeGen/Thumb/PR17309.ll b/test/CodeGen/Thumb/PR17309.ll
index f1033e7d7418..8869537425b7 100644
--- a/test/CodeGen/Thumb/PR17309.ll
+++ b/test/CodeGen/Thumb/PR17309.ll
@@ -11,9 +11,9 @@ define void @pass_C() #0 {
entry:
%c = alloca %struct.C, align 1
%0 = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 0, i32 0
- call void @llvm.lifetime.start(i64 1000, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 1000, i8* %0) #1
call void @use_C(%struct.C* byval %c) #3
- call void @llvm.lifetime.end(i64 1000, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 1000, i8* %0) #1
ret void
}
@@ -24,9 +24,9 @@ define void @pass_S() #0 {
entry:
%s = alloca %struct.S, align 2
%0 = bitcast %struct.S* %s to i8*
- call void @llvm.lifetime.start(i64 2000, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 2000, i8* %0) #1
call void @use_S(%struct.S* byval %s) #3
- call void @llvm.lifetime.end(i64 2000, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 2000, i8* %0) #1
ret void
}
@@ -37,9 +37,9 @@ define void @pass_I() #0 {
entry:
%i = alloca %struct.I, align 4
%0 = bitcast %struct.I* %i to i8*
- call void @llvm.lifetime.start(i64 4000, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 4000, i8* %0) #1
call void @use_I(%struct.I* byval %i) #3
- call void @llvm.lifetime.end(i64 4000, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 4000, i8* %0) #1
ret void
}
@@ -47,8 +47,8 @@ declare void @use_C(%struct.C* byval) #2
declare void @use_S(%struct.S* byval) #2
declare void @use_I(%struct.I* byval) #2
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Thumb/cmp-add-fold.ll b/test/CodeGen/Thumb/cmp-add-fold.ll
index b0ad8ab93f8a..aa61b0825b0c 100644
--- a/test/CodeGen/Thumb/cmp-add-fold.ll
+++ b/test/CodeGen/Thumb/cmp-add-fold.ll
@@ -2,8 +2,9 @@
; RUN: llc -mtriple=thumbv7m-eabi -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK --check-prefix=T2 %s
; CHECK-LABEL: addri1:
-; CHECK: adds r0, #3
+; T1: adds r0, r0, #3
; T1-NEXT: b{{eq|ne}}
+; T2: adds r0, #3
; T2-NOT: cmp
define i32 @addri1(i32 %a, i32 %b) {
%c = add i32 %a, 3
diff --git a/test/CodeGen/Thumb/copy_thumb.ll b/test/CodeGen/Thumb/copy_thumb.ll
index 528f54bd84e6..008c31aba7aa 100644
--- a/test/CodeGen/Thumb/copy_thumb.ll
+++ b/test/CodeGen/Thumb/copy_thumb.ll
@@ -16,15 +16,9 @@
; RUN: llc -mtriple=thumbv4t-none--eabi < %s | FileCheck %s --check-prefix=CHECK-NOLOLOMOV
; RUN: llc -mtriple=thumbv5-none--eabi < %s | FileCheck %s --check-prefix=CHECK-NOLOLOMOV
; CHECK-NOLOLOMOV-LABEL: foo
-; CHECK-NOLOLOMOV-NOT: mov [[TMP:r[0-7]]], [[SRC1:r[01]]]
-; CHECK-NOLOLOMOV: push {[[SRC1:r[01]]]}
-; CHECK-NOLOLOMOV-NEXT: pop {[[TMP:r[0-7]]]}
-; CHECK-NOLOLOMOV-NOT: mov [[TMP:r[0-7]]], [[SRC1:r[01]]]
-; CHECK-NOLOLOMOV: push {[[SRC2:r[01]]]}
-; CHECK-NOLOLOMOV-NEXT: pop {[[SRC1]]}
-; CHECK-NOLOLOMOV-NOT: mov [[TMP:r[0-7]]], [[SRC1:r[01]]]
-; CHECK-NOLOLOMOV: push {[[TMP]]}
-; CHECK-NOLOLOMOV-NEXT: pop {[[SRC2]]}
+; CHECK-NOLOLOMOV: movs [[TMP:r[0-7]]], [[SRC1:r[01]]]
+; CHECK-NOLOLOMOV-NEXT: movs [[SRC1]], [[SRC2:r[01]]]
+; CHECK-NOLOLOMOV-NEXT: movs [[SRC2]], [[TMP]]
; CHECK-NOLOLOMOV-LABEL: bar
; CHECK-NOLOLOMOV-LABEL: fnend
diff --git a/test/CodeGen/Thumb/ispositive.ll b/test/CodeGen/Thumb/ispositive.ll
index 8d396878932b..a9b2c139797e 100644
--- a/test/CodeGen/Thumb/ispositive.ll
+++ b/test/CodeGen/Thumb/ispositive.ll
@@ -9,3 +9,12 @@ entry:
ret i32 %1
}
+define i32 @test2(i32 %X) {
+entry:
+; CHECK-LABEL: test2:
+; CHECK: lsls r1, r1, #31
+; CHECK-NEXT: adds
+ %tmp1 = sub i32 %X, 2147483648
+ ret i32 %tmp1
+}
+
diff --git a/test/CodeGen/Thumb/long.ll b/test/CodeGen/Thumb/long.ll
index 33f63892ec3f..c549bd425aaf 100644
--- a/test/CodeGen/Thumb/long.ll
+++ b/test/CodeGen/Thumb/long.ll
@@ -1,41 +1,88 @@
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumb-apple-darwin %s -o - | FileCheck %s -check-prefix CHECK-DARWIN
+; RUN: llc -mtriple=thumb-eabi %s -verify-machineinstrs -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-apple-darwin %s -verify-machineinstrs -o - | \
+; RUN: FileCheck %s -check-prefix CHECK -check-prefix CHECK-DARWIN
define i64 @f1() {
entry:
ret i64 0
+; CHECK-LABEL: f1:
+; CHECK: movs r0, #0
+; CHECK: movs r1, r0
}
define i64 @f2() {
entry:
ret i64 1
+; CHECK-LABEL: f2:
+; CHECK: movs r0, #1
+; CHECK: movs r1, #0
}
define i64 @f3() {
entry:
ret i64 2147483647
+; CHECK-LABEL: f3:
+; CHECK: ldr r0,
+; CHECK: movs r1, #0
}
define i64 @f4() {
entry:
ret i64 2147483648
+; CHECK-LABEL: f4:
+; CHECK: movs r0, #1
+; CHECK: lsls r0, r0, #31
+; CHECK: movs r1, #0
}
define i64 @f5() {
entry:
ret i64 9223372036854775807
+; CHECK-LABEL: f5:
+; CHECK: movs r0, #0
+; CHECK: mvns r0, r0
+; CHECK: ldr r1,
}
define i64 @f6(i64 %x, i64 %y) {
entry:
%tmp1 = add i64 %y, 1 ; <i64> [#uses=1]
ret i64 %tmp1
+; CHECK-LABEL: f6:
+; CHECK: movs r1, #0
+; CHECK: adds r0, r2, #1
+; CHECK: adcs r1, r3
+}
+
+define i64 @f6a(i64 %x, i64 %y) {
+entry:
+ %tmp1 = add i64 %y, 10
+ ret i64 %tmp1
+; CHECK-LABEL: f6a:
+; CHECK: movs r1, #0
+; CHECK: adds r2, #10
+; CHECK: adcs r1, r3
+; CHECK: movs r0, r2
+}
+
+define i64 @f6b(i64 %x, i64 %y) {
+entry:
+ %tmp1 = add i64 %y, 1000
+ ret i64 %tmp1
+; CHECK-LABEL: f6b:
+; CHECK: movs r0, #125
+; CHECK: lsls r0, r0, #3
+; CHECK: movs r1, #0
+; CHECK: adds r0, r2, r0
+; CHECK: adcs r1, r3
}
define void @f7() {
entry:
%tmp = call i64 @f8( ) ; <i64> [#uses=0]
ret void
+; CHECK-LABEL: f7:
+; CHECK: bl
}
declare i64 @f8()
@@ -44,6 +91,57 @@ define i64 @f9(i64 %a, i64 %b) {
entry:
%tmp = sub i64 %a, %b ; <i64> [#uses=1]
ret i64 %tmp
+; CHECK-LABEL: f9:
+; CHECK: subs r0, r0, r2
+; CHECK: sbcs r1, r3
+}
+
+define i64 @f9a(i64 %x, i64 %y) { ; ADDC with small negative imm => SUBS imm
+entry:
+ %tmp1 = sub i64 %y, 10
+ ret i64 %tmp1
+; CHECK-LABEL: f9a:
+; CHECK: movs r0, #0
+; CHECK: subs r2, #10
+; CHECK: sbcs r3, r0
+; CHECK: movs r0, r2
+; CHECK: movs r1, r3
+}
+
+define i64 @f9b(i64 %x, i64 %y) { ; ADDC with big negative imm => SUBS reg
+entry:
+ %tmp1 = sub i64 1000, %y
+ ret i64 %tmp1
+; CHECK-LABEL: f9b:
+; CHECK: movs r0, #125
+; CHECK: lsls r0, r0, #3
+; CHECK: movs r1, #0
+; CHECK: subs r0, r0, r2
+; CHECK: sbcs r1, r3
+}
+
+define i64 @f9c(i64 %x, i32 %y) { ; SUBS with small positive imm => SUBS imm
+entry:
+ %conv = sext i32 %y to i64
+ %shl = shl i64 %conv, 32
+ %or = or i64 %shl, 1
+ %sub = sub nsw i64 %x, %or
+ ret i64 %sub
+; CHECK-LABEL: f9c:
+; CHECK: subs r0, r0, #1
+; CHECK: sbcs r1, r2
+}
+
+define i64 @f9d(i64 %x, i32 %y) { ; SUBS with small negative imm => ADDS imm
+entry:
+ %conv = sext i32 %y to i64
+ %shl = shl i64 %conv, 32
+ %or = or i64 %shl, 4294967295
+ %sub = sub nsw i64 %x, %or
+ ret i64 %sub
+; CHECK-LABEL: f9d:
+; CHECK: adds r0, r0, #1
+; CHECK: sbcs r1, r2
}
define i64 @f(i32 %a, i32 %b) {
@@ -52,6 +150,9 @@ entry:
%tmp1 = sext i32 %b to i64 ; <i64> [#uses=1]
%tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
ret i64 %tmp2
+; CHECK-LABEL: f:
+; CHECK-V6: bl __aeabi_lmul
+; CHECK-DARWIN: __muldi3
}
define i64 @g(i32 %a, i32 %b) {
@@ -60,6 +161,9 @@ entry:
%tmp1 = zext i32 %b to i64 ; <i64> [#uses=1]
%tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
ret i64 %tmp2
+; CHECK-LABEL: g:
+; CHECK-V6: bl __aeabi_lmul
+; CHECK-DARWIN: __muldi3
}
define i64 @f10() {
@@ -67,16 +171,38 @@ entry:
%a = alloca i64, align 8 ; <i64*> [#uses=1]
%retval = load i64, i64* %a ; <i64> [#uses=1]
ret i64 %retval
+; CHECK-LABEL: f10:
+; CHECK: sub sp, #8
+; CHECK: ldr r0, [sp]
+; CHECK: ldr r1, [sp, #4]
+; CHECK: add sp, #8
}
-; CHECK: mvn
-; CHECK-NOT: mvn
-
-; CHECK: adc
-; CHECK-NOT: adc
-
-; CHECK: sbc
-; CHECK-NOT: sbc
-
-; CHECK-DARWIN: __muldi3
+define i64 @f11(i64 %x, i64 %y) {
+entry:
+ %tmp1 = add i64 -1000, %y
+ %tmp2 = add i64 %tmp1, -1000
+ ret i64 %tmp2
+; CHECK-LABEL: f11:
+; CHECK: movs r0, #125
+; CHECK: lsls r0, r0, #3
+; CHECK: movs r1, #0
+; CHECK: subs r2, r2, r0
+; CHECK: sbcs r3, r1
+; CHECK: subs r0, r2, r0
+; CHECK: sbcs r3, r1
+; CHECK: movs r1, r3
+}
+; "sub 2147483648" has to be lowered into "add -2147483648"
+define i64 @f12(i64 %x, i64 %y) {
+entry:
+ %tmp1 = sub i64 %x, 2147483648
+ ret i64 %tmp1
+; CHECK-LABEL: f12:
+; CHECK: movs r2, #1
+; CHECK: lsls r2, r2, #31
+; CHECK: movs r3, #0
+; CHECK: adds r0, r0, r2
+; CHECK: sbcs r1, r3
+}
diff --git a/test/CodeGen/Thumb/mature-mc-support.ll b/test/CodeGen/Thumb/mature-mc-support.ll
index d7f8ae6c6c4d..6a638d405069 100644
--- a/test/CodeGen/Thumb/mature-mc-support.ll
+++ b/test/CodeGen/Thumb/mature-mc-support.ll
@@ -9,4 +9,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/Thumb/remove-unneeded-push-pop.ll b/test/CodeGen/Thumb/remove-unneeded-push-pop.ll
new file mode 100644
index 000000000000..054be2ea8587
--- /dev/null
+++ b/test/CodeGen/Thumb/remove-unneeded-push-pop.ll
@@ -0,0 +1,1052 @@
+; RUN: llc -O0 -mtriple thumbv6m-arm-none-eabi < %s | FileCheck %s
+
+@a = external hidden global i32*, align 4
+@f = external hidden global i32, align 4
+
+define hidden void @foo() {
+entry:
+; CHECK-NOT: push {lr}
+; CHECK-NOT: pop {pc}
+ store i32 24654, i32* @f, align 4
+ br label %if.end
+
+if.end: ; preds = %entry
+ %0 = load i32*, i32** @a, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %0, i32 2
+ %1 = load i32, i32* %arrayidx1, align 4
+ %tobool2 = icmp ne i32 %1, 0
+ br i1 %tobool2, label %if.then3, label %if.end4
+
+if.then3: ; preds = %if.end
+ store i32 17785, i32* @f, align 4
+ br label %if.end4
+
+if.end4: ; preds = %if.then3, %if.end
+ %2 = load i32*, i32** @a, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %2, i32 3
+ %3 = load i32, i32* %arrayidx5, align 4
+ %tobool6 = icmp ne i32 %3, 0
+ br i1 %tobool6, label %if.then7, label %if.end8
+
+if.then7: ; preds = %if.end4
+ store i32 10342, i32* @f, align 4
+ br label %if.end8
+
+if.end8: ; preds = %if.then7, %if.end4
+ %4 = load i32*, i32** @a, align 4
+ %arrayidx9 = getelementptr inbounds i32, i32* %4, i32 4
+ %5 = load i32, i32* %arrayidx9, align 4
+ %tobool10 = icmp ne i32 %5, 0
+ br i1 %tobool10, label %if.then11, label %if.end12
+
+if.then11: ; preds = %if.end8
+ store i32 29082, i32* @f, align 4
+ br label %if.end12
+
+if.end12: ; preds = %if.then11, %if.end8
+ %6 = load i32*, i32** @a, align 4
+ %arrayidx13 = getelementptr inbounds i32, i32* %6, i32 5
+ %7 = load i32, i32* %arrayidx13, align 4
+ %tobool14 = icmp ne i32 %7, 0
+ br i1 %tobool14, label %if.then15, label %if.end16
+
+if.then15: ; preds = %if.end12
+ store i32 29893, i32* @f, align 4
+ br label %if.end16
+
+if.end16: ; preds = %if.then15, %if.end12
+ %8 = load i32*, i32** @a, align 4
+ %arrayidx17 = getelementptr inbounds i32, i32* %8, i32 6
+ %9 = load i32, i32* %arrayidx17, align 4
+ %tobool18 = icmp ne i32 %9, 0
+ br i1 %tobool18, label %if.then19, label %if.end20
+
+if.then19: ; preds = %if.end16
+ store i32 19071, i32* @f, align 4
+ br label %if.end20
+
+if.end20: ; preds = %if.then19, %if.end16
+ %10 = load i32*, i32** @a, align 4
+ %arrayidx21 = getelementptr inbounds i32, i32* %10, i32 7
+ %11 = load i32, i32* %arrayidx21, align 4
+ %tobool22 = icmp ne i32 %11, 0
+ br i1 %tobool22, label %if.then23, label %if.end24
+
+if.then23: ; preds = %if.end20
+ store i32 6154, i32* @f, align 4
+ br label %if.end24
+
+if.end24: ; preds = %if.then23, %if.end20
+ %12 = load i32*, i32** @a, align 4
+ %arrayidx25 = getelementptr inbounds i32, i32* %12, i32 8
+ %13 = load i32, i32* %arrayidx25, align 4
+ %tobool26 = icmp ne i32 %13, 0
+ br i1 %tobool26, label %if.then27, label %if.end28
+
+if.then27: ; preds = %if.end24
+ store i32 30498, i32* @f, align 4
+ br label %if.end28
+
+if.end28: ; preds = %if.then27, %if.end24
+ %14 = load i32*, i32** @a, align 4
+ %arrayidx29 = getelementptr inbounds i32, i32* %14, i32 9
+ %15 = load i32, i32* %arrayidx29, align 4
+ %tobool30 = icmp ne i32 %15, 0
+ br i1 %tobool30, label %if.then31, label %if.end32
+
+if.then31: ; preds = %if.end28
+ store i32 16667, i32* @f, align 4
+ br label %if.end32
+
+if.end32: ; preds = %if.then31, %if.end28
+ %16 = load i32*, i32** @a, align 4
+ %arrayidx33 = getelementptr inbounds i32, i32* %16, i32 10
+ %17 = load i32, i32* %arrayidx33, align 4
+ %tobool34 = icmp ne i32 %17, 0
+ br i1 %tobool34, label %if.then35, label %if.end36
+
+if.then35: ; preds = %if.end32
+ store i32 195, i32* @f, align 4
+ br label %if.end36
+
+if.end36: ; preds = %if.then35, %if.end32
+ %18 = load i32*, i32** @a, align 4
+ %arrayidx37 = getelementptr inbounds i32, i32* %18, i32 11
+ %19 = load i32, i32* %arrayidx37, align 4
+ %tobool38 = icmp ne i32 %19, 0
+ br i1 %tobool38, label %if.then39, label %if.end40
+
+if.then39: ; preds = %if.end36
+ store i32 14665, i32* @f, align 4
+ br label %if.end40
+
+if.end40: ; preds = %if.then39, %if.end36
+ %20 = load i32*, i32** @a, align 4
+ %arrayidx41 = getelementptr inbounds i32, i32* %20, i32 12
+ %21 = load i32, i32* %arrayidx41, align 4
+ %tobool42 = icmp ne i32 %21, 0
+ br i1 %tobool42, label %if.then43, label %if.end44
+
+if.then43: ; preds = %if.end40
+ store i32 19305, i32* @f, align 4
+ br label %if.end44
+
+if.end44: ; preds = %if.then43, %if.end40
+ %22 = load i32*, i32** @a, align 4
+ %arrayidx45 = getelementptr inbounds i32, i32* %22, i32 13
+ %23 = load i32, i32* %arrayidx45, align 4
+ %tobool46 = icmp ne i32 %23, 0
+ br i1 %tobool46, label %if.then47, label %if.end48
+
+if.then47: ; preds = %if.end44
+ store i32 15133, i32* @f, align 4
+ br label %if.end48
+
+if.end48: ; preds = %if.then47, %if.end44
+ %24 = load i32*, i32** @a, align 4
+ %arrayidx49 = getelementptr inbounds i32, i32* %24, i32 14
+ %25 = load i32, i32* %arrayidx49, align 4
+ %tobool50 = icmp ne i32 %25, 0
+ br i1 %tobool50, label %if.then51, label %if.end52
+
+if.then51: ; preds = %if.end48
+ store i32 19173, i32* @f, align 4
+ br label %if.end52
+
+if.end52: ; preds = %if.then51, %if.end48
+ br label %if.then55
+
+if.then55: ; preds = %if.end52
+ store i32 14025, i32* @f, align 4
+ br label %if.end56
+
+if.end56: ; preds = %if.then55
+ %26 = load i32*, i32** @a, align 4
+ %arrayidx57 = getelementptr inbounds i32, i32* %26, i32 16
+ %27 = load i32, i32* %arrayidx57, align 4
+ %tobool58 = icmp ne i32 %27, 0
+ br i1 %tobool58, label %if.then59, label %if.end60
+
+if.then59: ; preds = %if.end56
+ store i32 8209, i32* @f, align 4
+ br label %if.end60
+
+if.end60: ; preds = %if.then59, %if.end56
+ %28 = load i32*, i32** @a, align 4
+ %arrayidx61 = getelementptr inbounds i32, i32* %28, i32 17
+ %29 = load i32, i32* %arrayidx61, align 4
+ %tobool62 = icmp ne i32 %29, 0
+ br i1 %tobool62, label %if.then63, label %if.end64
+
+if.then63: ; preds = %if.end60
+ store i32 29621, i32* @f, align 4
+ br label %if.end64
+
+if.end64: ; preds = %if.then63, %if.end60
+ %30 = load i32*, i32** @a, align 4
+ %arrayidx65 = getelementptr inbounds i32, i32* %30, i32 18
+ %31 = load i32, i32* %arrayidx65, align 4
+ %tobool66 = icmp ne i32 %31, 0
+ br i1 %tobool66, label %if.then67, label %if.end68
+
+if.then67: ; preds = %if.end64
+ store i32 14963, i32* @f, align 4
+ br label %if.end68
+
+if.end68: ; preds = %if.then67, %if.end64
+ %32 = load i32*, i32** @a, align 4
+ %arrayidx69 = getelementptr inbounds i32, i32* %32, i32 19
+ %33 = load i32, i32* %arrayidx69, align 4
+ %tobool70 = icmp ne i32 %33, 0
+ br i1 %tobool70, label %if.then71, label %if.end72
+
+if.then71: ; preds = %if.end68
+ store i32 32282, i32* @f, align 4
+ br label %if.end72
+
+if.end72: ; preds = %if.then71, %if.end68
+ %34 = load i32*, i32** @a, align 4
+ %arrayidx73 = getelementptr inbounds i32, i32* %34, i32 20
+ %35 = load i32, i32* %arrayidx73, align 4
+ %tobool74 = icmp ne i32 %35, 0
+ br i1 %tobool74, label %if.then75, label %if.end76
+
+if.then75: ; preds = %if.end72
+ store i32 3072, i32* @f, align 4
+ br label %if.end76
+
+if.end76: ; preds = %if.then75, %if.end72
+ %36 = load i32*, i32** @a, align 4
+ %arrayidx77 = getelementptr inbounds i32, i32* %36, i32 21
+ %37 = load i32, i32* %arrayidx77, align 4
+ %tobool78 = icmp ne i32 %37, 0
+ br i1 %tobool78, label %if.then79, label %if.end80
+
+if.then79: ; preds = %if.end76
+ store i32 1992, i32* @f, align 4
+ br label %if.end80
+
+if.end80: ; preds = %if.then79, %if.end76
+ %38 = load i32*, i32** @a, align 4
+ %arrayidx81 = getelementptr inbounds i32, i32* %38, i32 22
+ %39 = load i32, i32* %arrayidx81, align 4
+ %tobool82 = icmp ne i32 %39, 0
+ br i1 %tobool82, label %if.then83, label %if.end84
+
+if.then83: ; preds = %if.end80
+ store i32 9614, i32* @f, align 4
+ br label %if.end84
+
+if.end84: ; preds = %if.then83, %if.end80
+ %40 = load i32*, i32** @a, align 4
+ %arrayidx85 = getelementptr inbounds i32, i32* %40, i32 23
+ %41 = load i32, i32* %arrayidx85, align 4
+ %tobool86 = icmp ne i32 %41, 0
+ br i1 %tobool86, label %if.then87, label %if.end88
+
+if.then87: ; preds = %if.end84
+ store i32 25931, i32* @f, align 4
+ br label %if.end88
+
+if.end88: ; preds = %if.then87, %if.end84
+ %42 = load i32*, i32** @a, align 4
+ %arrayidx89 = getelementptr inbounds i32, i32* %42, i32 24
+ %43 = load i32, i32* %arrayidx89, align 4
+ %tobool90 = icmp ne i32 %43, 0
+ br i1 %tobool90, label %if.then91, label %if.end92
+
+if.then91: ; preds = %if.end88
+ store i32 22035, i32* @f, align 4
+ br label %if.end92
+
+if.end92: ; preds = %if.then91, %if.end88
+ %44 = load i32*, i32** @a, align 4
+ %arrayidx93 = getelementptr inbounds i32, i32* %44, i32 25
+ %45 = load i32, i32* %arrayidx93, align 4
+ %tobool94 = icmp ne i32 %45, 0
+ br i1 %tobool94, label %if.then95, label %if.end96
+
+if.then95: ; preds = %if.end92
+ store i32 10712, i32* @f, align 4
+ br label %if.end96
+
+if.end96: ; preds = %if.then95, %if.end92
+ %46 = load i32*, i32** @a, align 4
+ %arrayidx97 = getelementptr inbounds i32, i32* %46, i32 26
+ %47 = load i32, i32* %arrayidx97, align 4
+ %tobool98 = icmp ne i32 %47, 0
+ br i1 %tobool98, label %if.then99, label %if.end100
+
+if.then99: ; preds = %if.end96
+ store i32 18267, i32* @f, align 4
+ br label %if.end100
+
+if.end100: ; preds = %if.then99, %if.end96
+ %48 = load i32*, i32** @a, align 4
+ %arrayidx101 = getelementptr inbounds i32, i32* %48, i32 27
+ %49 = load i32, i32* %arrayidx101, align 4
+ %tobool102 = icmp ne i32 %49, 0
+ br i1 %tobool102, label %if.then103, label %if.end104
+
+if.then103: ; preds = %if.end100
+ store i32 30432, i32* @f, align 4
+ br label %if.end104
+
+if.end104: ; preds = %if.then103, %if.end100
+ %50 = load i32*, i32** @a, align 4
+ %arrayidx105 = getelementptr inbounds i32, i32* %50, i32 28
+ %51 = load i32, i32* %arrayidx105, align 4
+ %tobool106 = icmp ne i32 %51, 0
+ br i1 %tobool106, label %if.then107, label %if.end108
+
+if.then107: ; preds = %if.end104
+ store i32 5847, i32* @f, align 4
+ br label %if.end108
+
+if.end108: ; preds = %if.then107, %if.end104
+ %52 = load i32*, i32** @a, align 4
+ %arrayidx109 = getelementptr inbounds i32, i32* %52, i32 29
+ %53 = load i32, i32* %arrayidx109, align 4
+ %tobool110 = icmp ne i32 %53, 0
+ br i1 %tobool110, label %if.then111, label %if.end112
+
+if.then111: ; preds = %if.end108
+ store i32 14705, i32* @f, align 4
+ br label %if.end112
+
+if.end112: ; preds = %if.then111, %if.end108
+ %54 = load i32*, i32** @a, align 4
+ %arrayidx113 = getelementptr inbounds i32, i32* %54, i32 30
+ %55 = load i32, i32* %arrayidx113, align 4
+ %tobool114 = icmp ne i32 %55, 0
+ br i1 %tobool114, label %if.then115, label %if.end116
+
+if.then115: ; preds = %if.end112
+ store i32 28488, i32* @f, align 4
+ br label %if.end116
+
+if.end116: ; preds = %if.then115, %if.end112
+ %56 = load i32*, i32** @a, align 4
+ %arrayidx117 = getelementptr inbounds i32, i32* %56, i32 31
+ %57 = load i32, i32* %arrayidx117, align 4
+ %tobool118 = icmp ne i32 %57, 0
+ br i1 %tobool118, label %if.then119, label %if.end120
+
+if.then119: ; preds = %if.end116
+ store i32 13853, i32* @f, align 4
+ br label %if.end120
+
+if.end120: ; preds = %if.then119, %if.end116
+ %58 = load i32*, i32** @a, align 4
+ %arrayidx121 = getelementptr inbounds i32, i32* %58, i32 32
+ %59 = load i32, i32* %arrayidx121, align 4
+ %tobool122 = icmp ne i32 %59, 0
+ br i1 %tobool122, label %if.then123, label %if.end124
+
+if.then123: ; preds = %if.end120
+ store i32 31379, i32* @f, align 4
+ br label %if.end124
+
+if.end124: ; preds = %if.then123, %if.end120
+ %60 = load i32*, i32** @a, align 4
+ %arrayidx125 = getelementptr inbounds i32, i32* %60, i32 33
+ %61 = load i32, i32* %arrayidx125, align 4
+ %tobool126 = icmp ne i32 %61, 0
+ br i1 %tobool126, label %if.then127, label %if.end128
+
+if.then127: ; preds = %if.end124
+ store i32 7010, i32* @f, align 4
+ br label %if.end128
+
+if.end128: ; preds = %if.then127, %if.end124
+ br label %if.then131
+
+if.then131: ; preds = %if.end128
+ store i32 31840, i32* @f, align 4
+ br label %if.end132
+
+if.end132: ; preds = %if.then131
+ %62 = load i32*, i32** @a, align 4
+ %arrayidx133 = getelementptr inbounds i32, i32* %62, i32 35
+ %63 = load i32, i32* %arrayidx133, align 4
+ %tobool134 = icmp ne i32 %63, 0
+ br i1 %tobool134, label %if.then135, label %if.end136
+
+if.then135: ; preds = %if.end132
+ store i32 16119, i32* @f, align 4
+ br label %if.end136
+
+if.end136: ; preds = %if.then135, %if.end132
+ %64 = load i32*, i32** @a, align 4
+ %arrayidx137 = getelementptr inbounds i32, i32* %64, i32 36
+ %65 = load i32, i32* %arrayidx137, align 4
+ %tobool138 = icmp ne i32 %65, 0
+ br i1 %tobool138, label %if.then139, label %if.end140
+
+if.then139: ; preds = %if.end136
+ store i32 7119, i32* @f, align 4
+ br label %if.end140
+
+if.end140: ; preds = %if.then139, %if.end136
+ %66 = load i32*, i32** @a, align 4
+ %arrayidx141 = getelementptr inbounds i32, i32* %66, i32 37
+ %67 = load i32, i32* %arrayidx141, align 4
+ %tobool142 = icmp ne i32 %67, 0
+ br i1 %tobool142, label %if.then143, label %if.end144
+
+if.then143: ; preds = %if.end140
+ store i32 3333, i32* @f, align 4
+ br label %if.end144
+
+if.end144: ; preds = %if.then143, %if.end140
+ %68 = load i32*, i32** @a, align 4
+ %arrayidx145 = getelementptr inbounds i32, i32* %68, i32 38
+ %69 = load i32, i32* %arrayidx145, align 4
+ %tobool146 = icmp ne i32 %69, 0
+ br i1 %tobool146, label %if.then147, label %if.end148
+
+if.then147: ; preds = %if.end144
+ store i32 6430, i32* @f, align 4
+ br label %if.end148
+
+if.end148: ; preds = %if.then147, %if.end144
+ %70 = load i32*, i32** @a, align 4
+ %arrayidx149 = getelementptr inbounds i32, i32* %70, i32 39
+ %71 = load i32, i32* %arrayidx149, align 4
+ %tobool150 = icmp ne i32 %71, 0
+ br i1 %tobool150, label %if.then151, label %if.end152
+
+if.then151: ; preds = %if.end148
+ store i32 19857, i32* @f, align 4
+ br label %if.end152
+
+if.end152: ; preds = %if.then151, %if.end148
+ %72 = load i32*, i32** @a, align 4
+ %arrayidx153 = getelementptr inbounds i32, i32* %72, i32 40
+ %73 = load i32, i32* %arrayidx153, align 4
+ %tobool154 = icmp ne i32 %73, 0
+ br i1 %tobool154, label %if.then155, label %if.end156
+
+if.then155: ; preds = %if.end152
+ store i32 13237, i32* @f, align 4
+ br label %if.end156
+
+if.end156: ; preds = %if.then155, %if.end152
+ br label %if.then159
+
+if.then159: ; preds = %if.end156
+ store i32 163, i32* @f, align 4
+ br label %if.end160
+
+if.end160: ; preds = %if.then159
+ %74 = load i32*, i32** @a, align 4
+ %arrayidx161 = getelementptr inbounds i32, i32* %74, i32 42
+ %75 = load i32, i32* %arrayidx161, align 4
+ %tobool162 = icmp ne i32 %75, 0
+ br i1 %tobool162, label %if.then163, label %if.end164
+
+if.then163: ; preds = %if.end160
+ store i32 1961, i32* @f, align 4
+ br label %if.end164
+
+if.end164: ; preds = %if.then163, %if.end160
+ %76 = load i32*, i32** @a, align 4
+ %arrayidx165 = getelementptr inbounds i32, i32* %76, i32 43
+ %77 = load i32, i32* %arrayidx165, align 4
+ %tobool166 = icmp ne i32 %77, 0
+ br i1 %tobool166, label %if.then167, label %if.end168
+
+if.then167: ; preds = %if.end164
+ store i32 11325, i32* @f, align 4
+ br label %if.end168
+
+if.end168: ; preds = %if.then167, %if.end164
+ %78 = load i32*, i32** @a, align 4
+ %arrayidx169 = getelementptr inbounds i32, i32* %78, i32 44
+ %79 = load i32, i32* %arrayidx169, align 4
+ %tobool170 = icmp ne i32 %79, 0
+ br i1 %tobool170, label %if.then171, label %if.end172
+
+if.then171: ; preds = %if.end168
+ store i32 12189, i32* @f, align 4
+ br label %if.end172
+
+if.end172: ; preds = %if.then171, %if.end168
+ %80 = load i32*, i32** @a, align 4
+ %arrayidx173 = getelementptr inbounds i32, i32* %80, i32 45
+ %81 = load i32, i32* %arrayidx173, align 4
+ %tobool174 = icmp ne i32 %81, 0
+ br i1 %tobool174, label %if.then175, label %if.end176
+
+if.then175: ; preds = %if.end172
+ store i32 15172, i32* @f, align 4
+ br label %if.end176
+
+if.end176: ; preds = %if.then175, %if.end172
+ br label %if.then179
+
+if.then179: ; preds = %if.end176
+ store i32 13491, i32* @f, align 4
+ br label %if.end180
+
+if.end180: ; preds = %if.then179
+ %82 = load i32*, i32** @a, align 4
+ %arrayidx181 = getelementptr inbounds i32, i32* %82, i32 47
+ %83 = load i32, i32* %arrayidx181, align 4
+ %tobool182 = icmp ne i32 %83, 0
+ br i1 %tobool182, label %if.then183, label %if.end184
+
+if.then183: ; preds = %if.end180
+ store i32 9521, i32* @f, align 4
+ br label %if.end184
+
+if.end184: ; preds = %if.then183, %if.end180
+ %84 = load i32*, i32** @a, align 4
+ %arrayidx185 = getelementptr inbounds i32, i32* %84, i32 48
+ %85 = load i32, i32* %arrayidx185, align 4
+ %tobool186 = icmp ne i32 %85, 0
+ br i1 %tobool186, label %if.then187, label %if.end188
+
+if.then187: ; preds = %if.end184
+ store i32 448, i32* @f, align 4
+ br label %if.end188
+
+if.end188: ; preds = %if.then187, %if.end184
+ %86 = load i32*, i32** @a, align 4
+ %arrayidx189 = getelementptr inbounds i32, i32* %86, i32 49
+ %87 = load i32, i32* %arrayidx189, align 4
+ %tobool190 = icmp ne i32 %87, 0
+ br i1 %tobool190, label %if.then191, label %if.end192
+
+if.then191: ; preds = %if.end188
+ store i32 13468, i32* @f, align 4
+ br label %if.end192
+
+if.end192: ; preds = %if.then191, %if.end188
+ %88 = load i32*, i32** @a, align 4
+ %arrayidx193 = getelementptr inbounds i32, i32* %88, i32 50
+ %89 = load i32, i32* %arrayidx193, align 4
+ %tobool194 = icmp ne i32 %89, 0
+ br i1 %tobool194, label %if.then195, label %if.end196
+
+if.then195: ; preds = %if.end192
+ store i32 16190, i32* @f, align 4
+ br label %if.end196
+
+if.end196: ; preds = %if.then195, %if.end192
+ %90 = load i32*, i32** @a, align 4
+ %arrayidx197 = getelementptr inbounds i32, i32* %90, i32 51
+ %91 = load i32, i32* %arrayidx197, align 4
+ %tobool198 = icmp ne i32 %91, 0
+ br i1 %tobool198, label %if.then199, label %if.end200
+
+if.then199: ; preds = %if.end196
+ store i32 8602, i32* @f, align 4
+ br label %if.end200
+
+if.end200: ; preds = %if.then199, %if.end196
+ %92 = load i32*, i32** @a, align 4
+ %arrayidx201 = getelementptr inbounds i32, i32* %92, i32 52
+ %93 = load i32, i32* %arrayidx201, align 4
+ %tobool202 = icmp ne i32 %93, 0
+ br i1 %tobool202, label %if.then203, label %if.end204
+
+if.then203: ; preds = %if.end200
+ store i32 21083, i32* @f, align 4
+ br label %if.end204
+
+if.end204: ; preds = %if.then203, %if.end200
+ %94 = load i32*, i32** @a, align 4
+ %arrayidx205 = getelementptr inbounds i32, i32* %94, i32 53
+ %95 = load i32, i32* %arrayidx205, align 4
+ %tobool206 = icmp ne i32 %95, 0
+ br i1 %tobool206, label %if.then207, label %if.end208
+
+if.then207: ; preds = %if.end204
+ store i32 5172, i32* @f, align 4
+ br label %if.end208
+
+if.end208: ; preds = %if.then207, %if.end204
+ %96 = load i32*, i32** @a, align 4
+ %arrayidx209 = getelementptr inbounds i32, i32* %96, i32 54
+ %97 = load i32, i32* %arrayidx209, align 4
+ %tobool210 = icmp ne i32 %97, 0
+ br i1 %tobool210, label %if.then211, label %if.end212
+
+if.then211: ; preds = %if.end208
+ store i32 32505, i32* @f, align 4
+ br label %if.end212
+
+if.end212: ; preds = %if.then211, %if.end208
+ br label %if.then215
+
+if.then215: ; preds = %if.end212
+ store i32 23490, i32* @f, align 4
+ br label %if.end216
+
+if.end216: ; preds = %if.then215
+ %98 = load i32*, i32** @a, align 4
+ %arrayidx217 = getelementptr inbounds i32, i32* %98, i32 56
+ %99 = load i32, i32* %arrayidx217, align 4
+ %tobool218 = icmp ne i32 %99, 0
+ br i1 %tobool218, label %if.then219, label %if.end220
+
+if.then219: ; preds = %if.end216
+ store i32 30699, i32* @f, align 4
+ br label %if.end220
+
+if.end220: ; preds = %if.then219, %if.end216
+ %100 = load i32*, i32** @a, align 4
+ %arrayidx221 = getelementptr inbounds i32, i32* %100, i32 57
+ %101 = load i32, i32* %arrayidx221, align 4
+ %tobool222 = icmp ne i32 %101, 0
+ br i1 %tobool222, label %if.then223, label %if.end224
+
+if.then223: ; preds = %if.end220
+ store i32 16286, i32* @f, align 4
+ br label %if.end224
+
+if.end224: ; preds = %if.then223, %if.end220
+ %102 = load i32*, i32** @a, align 4
+ %arrayidx225 = getelementptr inbounds i32, i32* %102, i32 58
+ %103 = load i32, i32* %arrayidx225, align 4
+ %tobool226 = icmp ne i32 %103, 0
+ br i1 %tobool226, label %if.then227, label %if.end228
+
+if.then227: ; preds = %if.end224
+ store i32 17939, i32* @f, align 4
+ br label %if.end228
+
+if.end228: ; preds = %if.then227, %if.end224
+ %104 = load i32*, i32** @a, align 4
+ %arrayidx229 = getelementptr inbounds i32, i32* %104, i32 59
+ %105 = load i32, i32* %arrayidx229, align 4
+ %tobool230 = icmp ne i32 %105, 0
+ br i1 %tobool230, label %if.then231, label %if.end232
+
+if.then231: ; preds = %if.end228
+ store i32 25148, i32* @f, align 4
+ br label %if.end232
+
+if.end232: ; preds = %if.then231, %if.end228
+ %106 = load i32*, i32** @a, align 4
+ %arrayidx233 = getelementptr inbounds i32, i32* %106, i32 60
+ %107 = load i32, i32* %arrayidx233, align 4
+ %tobool234 = icmp ne i32 %107, 0
+ br i1 %tobool234, label %if.then235, label %if.end236
+
+if.then235: ; preds = %if.end232
+ store i32 644, i32* @f, align 4
+ br label %if.end236
+
+if.end236: ; preds = %if.then235, %if.end232
+ br label %if.then239
+
+if.then239: ; preds = %if.end236
+ store i32 23457, i32* @f, align 4
+ br label %if.end240
+
+if.end240: ; preds = %if.then239
+ %108 = load i32*, i32** @a, align 4
+ %arrayidx241 = getelementptr inbounds i32, i32* %108, i32 62
+ %109 = load i32, i32* %arrayidx241, align 4
+ %tobool242 = icmp ne i32 %109, 0
+ br i1 %tobool242, label %if.then243, label %if.end244
+
+if.then243: ; preds = %if.end240
+ store i32 21116, i32* @f, align 4
+ br label %if.end244
+
+if.end244: ; preds = %if.then243, %if.end240
+ br label %if.then247
+
+if.then247: ; preds = %if.end244
+ store i32 10066, i32* @f, align 4
+ br label %if.end248
+
+if.end248: ; preds = %if.then247
+ %110 = load i32*, i32** @a, align 4
+ %arrayidx249 = getelementptr inbounds i32, i32* %110, i32 64
+ %111 = load i32, i32* %arrayidx249, align 4
+ %tobool250 = icmp ne i32 %111, 0
+ br i1 %tobool250, label %if.then251, label %if.end252
+
+if.then251: ; preds = %if.end248
+ store i32 9058, i32* @f, align 4
+ br label %if.end252
+
+if.end252: ; preds = %if.then251, %if.end248
+ %112 = load i32*, i32** @a, align 4
+ %arrayidx253 = getelementptr inbounds i32, i32* %112, i32 65
+ %113 = load i32, i32* %arrayidx253, align 4
+ %tobool254 = icmp ne i32 %113, 0
+ br i1 %tobool254, label %if.then255, label %if.end256
+
+if.then255: ; preds = %if.end252
+ store i32 8383, i32* @f, align 4
+ br label %if.end256
+
+if.end256: ; preds = %if.then255, %if.end252
+ %114 = load i32*, i32** @a, align 4
+ %arrayidx257 = getelementptr inbounds i32, i32* %114, i32 66
+ %115 = load i32, i32* %arrayidx257, align 4
+ %tobool258 = icmp ne i32 %115, 0
+ br i1 %tobool258, label %if.then259, label %if.end260
+
+if.then259: ; preds = %if.end256
+ store i32 31069, i32* @f, align 4
+ br label %if.end260
+
+if.end260: ; preds = %if.then259, %if.end256
+ %116 = load i32*, i32** @a, align 4
+ %arrayidx261 = getelementptr inbounds i32, i32* %116, i32 67
+ %117 = load i32, i32* %arrayidx261, align 4
+ %tobool262 = icmp ne i32 %117, 0
+ br i1 %tobool262, label %if.then263, label %if.end264
+
+if.then263: ; preds = %if.end260
+ store i32 32280, i32* @f, align 4
+ br label %if.end264
+
+if.end264: ; preds = %if.then263, %if.end260
+ br label %if.then267
+
+if.then267: ; preds = %if.end264
+ store i32 1553, i32* @f, align 4
+ br label %if.end268
+
+if.end268: ; preds = %if.then267
+ %118 = load i32*, i32** @a, align 4
+ %arrayidx269 = getelementptr inbounds i32, i32* %118, i32 69
+ %119 = load i32, i32* %arrayidx269, align 4
+ %tobool270 = icmp ne i32 %119, 0
+ br i1 %tobool270, label %if.then271, label %if.end272
+
+if.then271: ; preds = %if.end268
+ store i32 8118, i32* @f, align 4
+ br label %if.end272
+
+if.end272: ; preds = %if.then271, %if.end268
+ %120 = load i32*, i32** @a, align 4
+ %arrayidx273 = getelementptr inbounds i32, i32* %120, i32 70
+ %121 = load i32, i32* %arrayidx273, align 4
+ %tobool274 = icmp ne i32 %121, 0
+ br i1 %tobool274, label %if.then275, label %if.end276
+
+if.then275: ; preds = %if.end272
+ store i32 12959, i32* @f, align 4
+ br label %if.end276
+
+if.end276: ; preds = %if.then275, %if.end272
+ %122 = load i32*, i32** @a, align 4
+ %arrayidx277 = getelementptr inbounds i32, i32* %122, i32 71
+ %123 = load i32, i32* %arrayidx277, align 4
+ %tobool278 = icmp ne i32 %123, 0
+ br i1 %tobool278, label %if.then279, label %if.end280
+
+if.then279: ; preds = %if.end276
+ store i32 675, i32* @f, align 4
+ br label %if.end280
+
+if.end280: ; preds = %if.then279, %if.end276
+ %124 = load i32*, i32** @a, align 4
+ %arrayidx281 = getelementptr inbounds i32, i32* %124, i32 72
+ %125 = load i32, i32* %arrayidx281, align 4
+ %tobool282 = icmp ne i32 %125, 0
+ br i1 %tobool282, label %if.then283, label %if.end284
+
+if.then283: ; preds = %if.end280
+ store i32 29144, i32* @f, align 4
+ br label %if.end284
+
+if.end284: ; preds = %if.then283, %if.end280
+ %126 = load i32*, i32** @a, align 4
+ %arrayidx285 = getelementptr inbounds i32, i32* %126, i32 73
+ %127 = load i32, i32* %arrayidx285, align 4
+ %tobool286 = icmp ne i32 %127, 0
+ br i1 %tobool286, label %if.then287, label %if.end288
+
+if.then287: ; preds = %if.end284
+ store i32 26130, i32* @f, align 4
+ br label %if.end288
+
+if.end288: ; preds = %if.then287, %if.end284
+ %128 = load i32*, i32** @a, align 4
+ %arrayidx289 = getelementptr inbounds i32, i32* %128, i32 74
+ %129 = load i32, i32* %arrayidx289, align 4
+ %tobool290 = icmp ne i32 %129, 0
+ br i1 %tobool290, label %if.then291, label %if.end292
+
+if.then291: ; preds = %if.end288
+ store i32 31934, i32* @f, align 4
+ br label %if.end292
+
+if.end292: ; preds = %if.then291, %if.end288
+ %130 = load i32*, i32** @a, align 4
+ %arrayidx293 = getelementptr inbounds i32, i32* %130, i32 75
+ %131 = load i32, i32* %arrayidx293, align 4
+ %tobool294 = icmp ne i32 %131, 0
+ br i1 %tobool294, label %if.then295, label %if.end296
+
+if.then295: ; preds = %if.end292
+ store i32 25862, i32* @f, align 4
+ br label %if.end296
+
+if.end296: ; preds = %if.then295, %if.end292
+ %132 = load i32*, i32** @a, align 4
+ %arrayidx297 = getelementptr inbounds i32, i32* %132, i32 76
+ %133 = load i32, i32* %arrayidx297, align 4
+ %tobool298 = icmp ne i32 %133, 0
+ br i1 %tobool298, label %if.then299, label %if.end300
+
+if.then299: ; preds = %if.end296
+ store i32 10642, i32* @f, align 4
+ br label %if.end300
+
+if.end300: ; preds = %if.then299, %if.end296
+ %134 = load i32*, i32** @a, align 4
+ %arrayidx301 = getelementptr inbounds i32, i32* %134, i32 77
+ %135 = load i32, i32* %arrayidx301, align 4
+ %tobool302 = icmp ne i32 %135, 0
+ br i1 %tobool302, label %if.then303, label %if.end304
+
+if.then303: ; preds = %if.end300
+ store i32 20209, i32* @f, align 4
+ br label %if.end304
+
+if.end304: ; preds = %if.then303, %if.end300
+ %136 = load i32*, i32** @a, align 4
+ %arrayidx305 = getelementptr inbounds i32, i32* %136, i32 78
+ %137 = load i32, i32* %arrayidx305, align 4
+ %tobool306 = icmp ne i32 %137, 0
+ br i1 %tobool306, label %if.then307, label %if.end308
+
+if.then307: ; preds = %if.end304
+ store i32 30889, i32* @f, align 4
+ br label %if.end308
+
+if.end308: ; preds = %if.then307, %if.end304
+ %138 = load i32*, i32** @a, align 4
+ %arrayidx309 = getelementptr inbounds i32, i32* %138, i32 79
+ %139 = load i32, i32* %arrayidx309, align 4
+ %tobool310 = icmp ne i32 %139, 0
+ br i1 %tobool310, label %if.then311, label %if.end312
+
+if.then311: ; preds = %if.end308
+ store i32 18688, i32* @f, align 4
+ br label %if.end312
+
+if.end312: ; preds = %if.then311, %if.end308
+ %140 = load i32*, i32** @a, align 4
+ %arrayidx313 = getelementptr inbounds i32, i32* %140, i32 80
+ %141 = load i32, i32* %arrayidx313, align 4
+ %tobool314 = icmp ne i32 %141, 0
+ br i1 %tobool314, label %if.then315, label %if.end316
+
+if.then315: ; preds = %if.end312
+ store i32 28726, i32* @f, align 4
+ br label %if.end316
+
+if.end316: ; preds = %if.then315, %if.end312
+ %142 = load i32*, i32** @a, align 4
+ %arrayidx317 = getelementptr inbounds i32, i32* %142, i32 81
+ %143 = load i32, i32* %arrayidx317, align 4
+ %tobool318 = icmp ne i32 %143, 0
+ br i1 %tobool318, label %if.then319, label %if.end320
+
+if.then319: ; preds = %if.end316
+ store i32 4266, i32* @f, align 4
+ br label %if.end320
+
+if.end320: ; preds = %if.then319, %if.end316
+ %144 = load i32*, i32** @a, align 4
+ %arrayidx321 = getelementptr inbounds i32, i32* %144, i32 82
+ %145 = load i32, i32* %arrayidx321, align 4
+ %tobool322 = icmp ne i32 %145, 0
+ br i1 %tobool322, label %if.then323, label %if.end324
+
+if.then323: ; preds = %if.end320
+ store i32 15461, i32* @f, align 4
+ br label %if.end324
+
+if.end324: ; preds = %if.then323, %if.end320
+ %146 = load i32*, i32** @a, align 4
+ %arrayidx325 = getelementptr inbounds i32, i32* %146, i32 83
+ %147 = load i32, i32* %arrayidx325, align 4
+ %tobool326 = icmp ne i32 %147, 0
+ br i1 %tobool326, label %if.then327, label %if.end328
+
+if.then327: ; preds = %if.end324
+ store i32 24716, i32* @f, align 4
+ br label %if.end328
+
+if.end328: ; preds = %if.then327, %if.end324
+ br label %if.then331
+
+if.then331: ; preds = %if.end328
+ store i32 18727, i32* @f, align 4
+ br label %if.end332
+
+if.end332: ; preds = %if.then331
+ %148 = load i32*, i32** @a, align 4
+ %arrayidx333 = getelementptr inbounds i32, i32* %148, i32 85
+ %149 = load i32, i32* %arrayidx333, align 4
+ %tobool334 = icmp ne i32 %149, 0
+ br i1 %tobool334, label %if.then335, label %if.end336
+
+if.then335: ; preds = %if.end332
+ store i32 29505, i32* @f, align 4
+ br label %if.end336
+
+if.end336: ; preds = %if.then335, %if.end332
+ %150 = load i32*, i32** @a, align 4
+ %arrayidx337 = getelementptr inbounds i32, i32* %150, i32 86
+ %151 = load i32, i32* %arrayidx337, align 4
+ %tobool338 = icmp ne i32 %151, 0
+ br i1 %tobool338, label %if.then339, label %if.end340
+
+if.then339: ; preds = %if.end336
+ store i32 27008, i32* @f, align 4
+ br label %if.end340
+
+if.end340: ; preds = %if.then339, %if.end336
+ %152 = load i32*, i32** @a, align 4
+ %arrayidx341 = getelementptr inbounds i32, i32* %152, i32 87
+ %153 = load i32, i32* %arrayidx341, align 4
+ %tobool342 = icmp ne i32 %153, 0
+ br i1 %tobool342, label %if.then343, label %if.end344
+
+if.then343: ; preds = %if.end340
+ store i32 6550, i32* @f, align 4
+ br label %if.end344
+
+if.end344: ; preds = %if.then343, %if.end340
+ br label %if.then347
+
+if.then347: ; preds = %if.end344
+ store i32 1117, i32* @f, align 4
+ br label %if.end348
+
+if.end348: ; preds = %if.then347
+ %154 = load i32*, i32** @a, align 4
+ %arrayidx349 = getelementptr inbounds i32, i32* %154, i32 89
+ %155 = load i32, i32* %arrayidx349, align 4
+ %tobool350 = icmp ne i32 %155, 0
+ br i1 %tobool350, label %if.then351, label %if.end352
+
+if.then351: ; preds = %if.end348
+ store i32 20118, i32* @f, align 4
+ br label %if.end352
+
+if.end352: ; preds = %if.then351, %if.end348
+ %156 = load i32*, i32** @a, align 4
+ %arrayidx353 = getelementptr inbounds i32, i32* %156, i32 90
+ %157 = load i32, i32* %arrayidx353, align 4
+ %tobool354 = icmp ne i32 %157, 0
+ br i1 %tobool354, label %if.then355, label %if.end356
+
+if.then355: ; preds = %if.end352
+ store i32 13650, i32* @f, align 4
+ br label %if.end356
+
+if.end356: ; preds = %if.then355, %if.end352
+ br label %if.then359
+
+if.then359: ; preds = %if.end356
+ store i32 18642, i32* @f, align 4
+ br label %if.end360
+
+if.end360: ; preds = %if.then359
+ %158 = load i32*, i32** @a, align 4
+ %arrayidx361 = getelementptr inbounds i32, i32* %158, i32 92
+ %159 = load i32, i32* %arrayidx361, align 4
+ %tobool362 = icmp ne i32 %159, 0
+ br i1 %tobool362, label %if.then363, label %if.end364
+
+if.then363: ; preds = %if.end360
+ store i32 30662, i32* @f, align 4
+ br label %if.end364
+
+if.end364: ; preds = %if.then363, %if.end360
+ %160 = load i32*, i32** @a, align 4
+ %arrayidx365 = getelementptr inbounds i32, i32* %160, i32 93
+ %161 = load i32, i32* %arrayidx365, align 4
+ %tobool366 = icmp ne i32 %161, 0
+ br i1 %tobool366, label %if.then367, label %if.end368
+
+if.then367: ; preds = %if.end364
+ store i32 8095, i32* @f, align 4
+ br label %if.end368
+
+if.end368: ; preds = %if.then367, %if.end364
+ %162 = load i32*, i32** @a, align 4
+ %arrayidx369 = getelementptr inbounds i32, i32* %162, i32 94
+ %163 = load i32, i32* %arrayidx369, align 4
+ %tobool370 = icmp ne i32 %163, 0
+ br i1 %tobool370, label %if.then371, label %if.end372
+
+if.then371: ; preds = %if.end368
+ store i32 8442, i32* @f, align 4
+ br label %if.end372
+
+if.end372: ; preds = %if.then371, %if.end368
+ %164 = load i32*, i32** @a, align 4
+ %arrayidx373 = getelementptr inbounds i32, i32* %164, i32 95
+ %165 = load i32, i32* %arrayidx373, align 4
+ %tobool374 = icmp ne i32 %165, 0
+ br i1 %tobool374, label %if.then375, label %if.end376
+
+if.then375: ; preds = %if.end372
+ store i32 8153, i32* @f, align 4
+ br label %if.end376
+
+if.end376: ; preds = %if.then375, %if.end372
+ br label %if.then379
+
+if.then379: ; preds = %if.end376
+ store i32 12965, i32* @f, align 4
+ br label %if.end380
+
+if.end380: ; preds = %if.then379
+ %166 = load i32*, i32** @a, align 4
+ %arrayidx381 = getelementptr inbounds i32, i32* %166, i32 97
+ %167 = load i32, i32* %arrayidx381, align 4
+ %tobool382 = icmp ne i32 %167, 0
+ br i1 %tobool382, label %if.then383, label %if.end384
+
+if.then383: ; preds = %if.end380
+ store i32 14277, i32* @f, align 4
+ br label %if.end384
+
+if.end384: ; preds = %if.then383, %if.end380
+ br label %if.then387
+
+if.then387: ; preds = %if.end384
+ store i32 1997, i32* @f, align 4
+ br label %if.end388
+
+if.end388: ; preds = %if.then387
+ %168 = load i32*, i32** @a, align 4
+ %arrayidx389 = getelementptr inbounds i32, i32* %168, i32 99
+ %169 = load i32, i32* %arrayidx389, align 4
+ %tobool390 = icmp ne i32 %169, 0
+ br i1 %tobool390, label %if.then391, label %if.end392
+
+if.then391: ; preds = %if.end388
+ store i32 31385, i32* @f, align 4
+ br label %if.end392
+
+if.end392: ; preds = %if.then391, %if.end388
+ %170 = load i32*, i32** @a, align 4
+ %arrayidx393 = getelementptr inbounds i32, i32* %170, i32 100
+ %171 = load i32, i32* %arrayidx393, align 4
+ %tobool394 = icmp ne i32 %171, 0
+ br i1 %tobool394, label %if.then395, label %if.end396
+
+if.then395: ; preds = %if.end392
+ store i32 8286, i32* @f, align 4
+ br label %if.end396
+
+if.end396: ; preds = %if.then395, %if.end392
+ ret void
+}
diff --git a/test/CodeGen/Thumb/stack-access.ll b/test/CodeGen/Thumb/stack-access.ll
index fded4104207c..44217aba62d5 100644
--- a/test/CodeGen/Thumb/stack-access.ll
+++ b/test/CodeGen/Thumb/stack-access.ll
@@ -74,15 +74,17 @@ define zeroext i16 @test6() {
}
; Accessing the bottom of a large array shouldn't require materializing a base
+;
+; CHECK: movs [[REG:r[0-9]+]], #1
+; CHECK: str [[REG]], [sp, #16]
+; CHECK: str [[REG]], [sp, #4]
+
define void @test7() {
%arr = alloca [200 x i32], align 4
- ; CHECK: movs [[REG:r[0-9]+]], #1
- ; CHECK: str [[REG]], [sp, #4]
%arrayidx = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 1
store i32 1, i32* %arrayidx, align 4
- ; CHECK: str [[REG]], [sp, #16]
%arrayidx1 = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 4
store i32 1, i32* %arrayidx1, align 4
@@ -96,30 +98,36 @@ define void @test8() {
%arr1 = alloca [224 x i32], align 4
; CHECK: movs [[REG:r[0-9]+]], #1
-; CHECK: str [[REG]], [sp]
+; CHECK-DAG: str [[REG]], [sp]
%arr1idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr1, i32 0, i32 0
store i32 1, i32* %arr1idx1, align 4
; Offset in range for sp-based store, but not for non-sp-based store
-; CHECK: str [[REG]], [sp, #128]
+; CHECK-DAG: str [[REG]], [sp, #128]
%arr1idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr1, i32 0, i32 32
store i32 1, i32* %arr1idx2, align 4
-; CHECK: str [[REG]], [sp, #896]
+; CHECK-DAG: str [[REG]], [sp, #896]
%arr2idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr2, i32 0, i32 0
store i32 1, i32* %arr2idx1, align 4
; %arr2 is in range, but this element of it is not
-; CHECK: str [[REG]], [{{r[0-9]+}}]
+; CHECK-DAG: ldr [[RA:r[0-9]+]], .LCPI7_2
+; CHECK-DAG: add [[RA]], sp
+; CHECK-DAG: str [[REG]], [{{r[0-9]+}}]
%arr2idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr2, i32 0, i32 32
store i32 1, i32* %arr2idx2, align 4
; %arr3 is not in range
-; CHECK: str [[REG]], [{{r[0-9]+}}]
+; CHECK-DAG: ldr [[RB:r[0-9]+]], .LCPI7_3
+; CHECK-DAG: add [[RB]], sp
+; CHECK-DAG: str [[REG]], [{{r[0-9]+}}]
%arr3idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr3, i32 0, i32 0
store i32 1, i32* %arr3idx1, align 4
-; CHECK: str [[REG]], [{{r[0-9]+}}]
+; CHECK-DAG: ldr [[RC:r[0-9]+]], .LCPI7_4
+; CHECK-DAG: add [[RC]], sp
+; CHECK-DAG: str [[REG]], [{{r[0-9]+}}]
%arr3idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr3, i32 0, i32 32
store i32 1, i32* %arr3idx2, align 4
diff --git a/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll b/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll
index 97c66d9dc865..6678f68c4e89 100644
--- a/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll
+++ b/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll
@@ -12,18 +12,18 @@ entry:
%0 = bitcast %deque* %var3 to i8*
%1 = bitcast %iterator* %var1 to i8*
- call void @llvm.lifetime.start(i64 16, i8* %1) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %1) nounwind
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %0, i32 16, i32 4, i1 false)
- call void @llvm.lifetime.end(i64 16, i8* %1) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %1) nounwind
%2 = bitcast %insert_iterator* %var2 to i8*
- call void @llvm.lifetime.start(i64 20, i8* %2) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 20, i8* %2) nounwind
ret i32 0
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/Thumb/stack_guard_remat.ll b/test/CodeGen/Thumb/stack_guard_remat.ll
index 41edef5a58e6..294c6a6bd454 100644
--- a/test/CodeGen/Thumb/stack_guard_remat.ll
+++ b/test/CodeGen/Thumb/stack_guard_remat.ll
@@ -27,20 +27,20 @@
define i32 @test_stack_guard_remat() #0 {
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
- call void @llvm.lifetime.start(i64 1024, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %1)
%2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
- call void @llvm.lifetime.end(i64 1024, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %1)
ret i32 0
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Thumb/stm-deprecated.ll b/test/CodeGen/Thumb/stm-deprecated.ll
new file mode 100644
index 000000000000..ffe2c0afd921
--- /dev/null
+++ b/test/CodeGen/Thumb/stm-deprecated.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mtriple=thumbv6m-eabi -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv5e-linux-gnueabi -verify-machineinstrs %s -o - | FileCheck %s
+
+%0 = type { %0*, %0*, i32 }
+
+@x1 = external global %0, align 4
+@x2 = external global %0, align 4
+
+; CHECK: str r0, [r1]
+; CHECK-NEXT: str r1, [r1, #4]
+; CHECK-NOT: stm
+
+define void @foo(i32 %unused, %0* %x) {
+ %first = getelementptr inbounds %0, %0* %x, i32 0, i32 0
+ %second = getelementptr inbounds %0, %0* %x, i32 0, i32 1
+ store %0* @x1, %0** %first
+ store %0* %x, %0** %second
+ unreachable
+}
diff --git a/test/CodeGen/Thumb/tbb-reuse.mir b/test/CodeGen/Thumb/tbb-reuse.mir
new file mode 100644
index 000000000000..15b9fa184c38
--- /dev/null
+++ b/test/CodeGen/Thumb/tbb-reuse.mir
@@ -0,0 +1,151 @@
+# RUN: llc -run-pass arm-cp-islands %s -o - | FileCheck %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv6m--none-eabi"
+
+ declare void @exit0()
+
+ declare void @exit1(i32)
+
+ declare void @exit2()
+
+ declare void @exit3()
+
+ declare void @exit4()
+
+ define void @jump_table(i32 %val, i32 %arg2, i32 %arg3, i32 %arg4) {
+ entry:
+ switch i32 %val, label %default [
+ i32 1, label %lab1
+ i32 2, label %lab2
+ i32 3, label %lab3
+ i32 4, label %lab4
+ ]
+
+ default: ; preds = %entry
+ tail call void @exit0()
+ ret void
+
+ lab1: ; preds = %entry
+ %b = sub i32 %val, 1
+ %a = shl i32 %b, 2
+ tail call void @exit1(i32 %a)
+ ret void
+
+ lab2: ; preds = %entry
+ tail call void @exit2()
+ ret void
+
+ lab3: ; preds = %entry
+ tail call void @exit3()
+ ret void
+
+ lab4: ; preds = %entry
+ tail call void @exit4()
+ ret void
+ }
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #0
+
+ attributes #0 = { nounwind }
+
+...
+---
+name: jump_table
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0' }
+calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13',
+ '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4',
+ '%r5', '%r6', '%r7', '%r8', '%r9', '%r10', '%r11',
+ '%s16', '%s17', '%s18', '%s19', '%s20', '%s21',
+ '%s22', '%s23', '%s24', '%s25', '%s26', '%s27',
+ '%s28', '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11',
+ '%d10_d12', '%d11_d13', '%d12_d14', '%d13_d15',
+ '%q4_q5', '%q5_q6', '%q6_q7', '%q4_q5_q6_q7', '%r4_r5',
+ '%r6_r7', '%r8_r9', '%r10_r11', '%d8_d9_d10', '%d9_d10_d11',
+ '%d10_d11_d12', '%d11_d12_d13', '%d12_d13_d14',
+ '%d13_d14_d15', '%d8_d10_d12', '%d9_d11_d13', '%d10_d12_d14',
+ '%d11_d13_d15', '%d8_d10_d12_d14', '%d9_d11_d13_d15',
+ '%d9_d10', '%d11_d12', '%d13_d14', '%d9_d10_d11_d12',
+ '%d11_d12_d13_d14' ]
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 8
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+stack:
+ - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr' }
+ - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' }
+jumpTable:
+ kind: inline
+ entries:
+ - id: 0
+ blocks: [ '%bb.3.lab1', '%bb.4.lab2', '%bb.5.lab3', '%bb.6.lab4' ]
+# r1 is redefined in the middle of the recognizable jump sequence - it shouldn't be clobbered!
+# CHECK-NOT: tTBB_JT
+
+body: |
+ bb.0.entry:
+ successors: %bb.2.default(0x19999998), %bb.1.entry(0x66666668)
+ liveins: %r0, %r7, %lr
+
+ frame-setup tPUSH 14, _, killed %r7, killed %lr, implicit-def %sp, implicit %sp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ frame-setup CFI_INSTRUCTION offset %lr, -4
+ frame-setup CFI_INSTRUCTION offset %r7, -8
+ %r1, dead %cpsr = tSUBi3 %r0, 1, 14, _
+ tCMPi8 %r1, 3, 14, _, implicit-def %cpsr
+ tBcc %bb.2.default, 8, killed %cpsr
+
+ bb.1.entry:
+ successors: %bb.3.lab1(0x20000000), %bb.4.lab2(0x20000000), %bb.5.lab3(0x20000000), %bb.6.lab4(0x20000000)
+ liveins: %r0, %r1
+
+ %r1, dead %cpsr = tLSLri killed %r1, 2, 14, _
+ %r2 = tLEApcrelJT %jump-table.0, 14, _
+ %r2 = tLDRr killed %r1, killed %r2, 14, _ :: (load 4 from jump-table)
+ %r1, dead %cpsr = tLSLri %r2, 2, 14, _
+ tBR_JTr killed %r2, %jump-table.0
+
+ bb.2.default:
+ tBL 14, _, @exit0, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+
+ bb.3.lab1:
+ liveins: %r0,%r1
+
+ tBL 14, _, @exit1, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp
+ tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+
+ bb.4.lab2:
+ tBL 14, _, @exit2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+
+ bb.5.lab3:
+ tBL 14, _, @exit3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+
+ bb.6.lab4:
+ tBL 14, _, @exit4, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
+ tPOP_RET 14, _, def %r7, def %pc, implicit-def %sp, implicit %sp
+
+...
diff --git a/test/CodeGen/Thumb/thumb-shrink-wrapping.ll b/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
index 6114b72569e7..c571e351a1ef 100644
--- a/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
+++ b/test/CodeGen/Thumb/thumb-shrink-wrapping.ll
@@ -1,11 +1,12 @@
-; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumb-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumb-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE --check-prefix=ENABLE-V4T
-; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumbv5-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=true -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumbv5-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE --check-prefix=ENABLE-V5T
-; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumb-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumb-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE --check-prefix=DISABLE-V4T
-; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -mtriple=thumbv5-macho \
+; RUN: llc %s -o - -enable-shrink-wrap=false -ifcvt-fn-start=1 -ifcvt-fn-stop=0 -tail-dup-placement=0 -mtriple=thumbv5-macho \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE --check-prefix=DISABLE-V5T
+
;
; Note: Lots of tests use inline asm instead of regular calls.
; This allows to have a better control on what the allocation will do.
@@ -15,6 +16,8 @@
; edges.
; Also disable the late if-converter as it makes harder to reason on
; the diffs.
+; Disable tail-duplication during placement, as v4t vs v5t get different
+; results due to branches not being analyzable under v5
; Initial motivating example: Simple diamond with a call just on one side.
; CHECK-LABEL: foo:
@@ -502,14 +505,9 @@ if.end: ; preds = %for.body, %if.else
; CHECK-NEXT: str r1, {{\[}}[[TMP_SP]]]
; CHECK-NEXT: str r1, {{\[}}[[TMP_SP]], #4]
; CHECK-NEXT: str r1, {{\[}}[[TMP_SP]], #8]
-; Thumb has quite a strange way for moving stuff
-; in around. Oh well, match the current sequence.
-; CHECK: push {r1}
-; CHECK-NEXT: pop {r0}
-; CHECK: push {r1}
-; CHECK-NEXT: pop {r2}
-; CHECK: push {r1}
-; CHECK-NEXT: pop {r3}
+; CHECK: movs r0, r1
+; CHECK-NEXT: movs r2, r1
+; CHECK-NEXT: movs r3, r1
; CHECK-NEXT: bl
; CHECK-NEXT: lsls r0, r0, #3
;
diff --git a/test/CodeGen/Thumb2/cbnz.ll b/test/CodeGen/Thumb2/cbnz.ll
index 5c0bb5bfe1cd..e11c4038678c 100644
--- a/test/CodeGen/Thumb2/cbnz.ll
+++ b/test/CodeGen/Thumb2/cbnz.ll
@@ -26,7 +26,7 @@ t:
call void @x()
call void @x()
call void @x()
- ; CHECK: cbnz
+ ; CHECK: cbz
%q = icmp eq i32 %y, 0
br i1 %q, label %t2, label %f
diff --git a/test/CodeGen/Thumb2/float-cmp.ll b/test/CodeGen/Thumb2/float-cmp.ll
index 77b0999337c6..834812cddd6d 100644
--- a/test/CodeGen/Thumb2/float-cmp.ll
+++ b/test/CodeGen/Thumb2/float-cmp.ll
@@ -15,7 +15,7 @@ define i1 @cmp_f_false(float %a, float %b) {
define i1 @cmp_f_oeq(float %a, float %b) {
; CHECK-LABEL: cmp_f_oeq:
; NONE: bl __aeabi_fcmpeq
-; HARD: vcmpe.f32
+; HARD: vcmp.f32
; HARD: moveq r0, #1
%1 = fcmp oeq float %a, %b
ret i1 %1
@@ -56,7 +56,7 @@ define i1 @cmp_f_one(float %a, float %b) {
; CHECK-LABEL: cmp_f_one:
; NONE: bl __aeabi_fcmpgt
; NONE: bl __aeabi_fcmplt
-; HARD: vcmpe.f32
+; HARD: vcmp.f32
; HARD: movmi r0, #1
; HARD: movgt r0, #1
%1 = fcmp one float %a, %b
@@ -73,7 +73,7 @@ define i1 @cmp_f_ord(float %a, float %b) {
; CHECK-LABEL: cmp_f_ueq:
; NONE: bl __aeabi_fcmpeq
; NONE: bl __aeabi_fcmpun
-; HARD: vcmpe.f32
+; HARD: vcmp.f32
; HARD: moveq r0, #1
; HARD: movvs r0, #1
%1 = fcmp ueq float %a, %b
@@ -122,7 +122,7 @@ define i1 @cmp_f_ule(float %a, float %b) {
define i1 @cmp_f_une(float %a, float %b) {
; CHECK-LABEL: cmp_f_une:
; NONE: bl __aeabi_fcmpeq
-; HARD: vcmpe.f32
+; HARD: vcmp.f32
; HARD: movne r0, #1
%1 = fcmp une float %a, %b
ret i1 %1
@@ -154,7 +154,7 @@ define i1 @cmp_d_oeq(double %a, double %b) {
; CHECK-LABEL: cmp_d_oeq:
; NONE: bl __aeabi_dcmpeq
; SP: bl __aeabi_dcmpeq
-; DP: vcmpe.f64
+; DP: vcmp.f64
; DP: moveq r0, #1
%1 = fcmp oeq double %a, %b
ret i1 %1
@@ -201,7 +201,7 @@ define i1 @cmp_d_one(double %a, double %b) {
; NONE: bl __aeabi_dcmplt
; SP: bl __aeabi_dcmpgt
; SP: bl __aeabi_dcmplt
-; DP: vcmpe.f64
+; DP: vcmp.f64
; DP: movmi r0, #1
; DP: movgt r0, #1
%1 = fcmp one double %a, %b
@@ -259,7 +259,7 @@ define i1 @cmp_d_ueq(double %a, double %b) {
; NONE: bl __aeabi_dcmpun
; SP: bl __aeabi_dcmpeq
; SP: bl __aeabi_dcmpun
-; DP: vcmpe.f64
+; DP: vcmp.f64
; DP: moveq r0, #1
; DP: movvs r0, #1
%1 = fcmp ueq double %a, %b
@@ -290,7 +290,7 @@ define i1 @cmp_d_une(double %a, double %b) {
; CHECK-LABEL: cmp_d_une:
; NONE: bl __aeabi_dcmpeq
; SP: bl __aeabi_dcmpeq
-; DP: vcmpe.f64
+; DP: vcmp.f64
; DP: movne r0, #1
%1 = fcmp une double %a, %b
ret i1 %1
diff --git a/test/CodeGen/Thumb2/ifcvt-compare.ll b/test/CodeGen/Thumb2/ifcvt-compare.ll
index 7b5ce4fa3f5f..688195f579eb 100644
--- a/test/CodeGen/Thumb2/ifcvt-compare.ll
+++ b/test/CodeGen/Thumb2/ifcvt-compare.ll
@@ -4,7 +4,7 @@ declare void @x()
define void @f0(i32 %x) optsize {
; CHECK-LABEL: f0:
- ; CHECK: cbnz
+ ; CHECK: cbz
%p = icmp eq i32 %x, 0
br i1 %p, label %t, label %f
diff --git a/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll b/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
index ae3084dcc62e..65ee4283b3f7 100644
--- a/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
+++ b/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
@@ -3,7 +3,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv7-unknown-linux-gnueabihf"
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
declare void @_ZNSaIcEC2Ev() unnamed_addr #0 align 2
@@ -25,7 +25,7 @@ define hidden void @_ZN4llvm14DOTGraphTraitsIPNS_13ScheduleDAGMIEE17getEdgeAttri
br label %3
; <label>:2: ; preds = %0
- call void @llvm.lifetime.start(i64 1, i8* undef) #0
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* undef) #0
call void @_ZNSaIcEC2Ev() #0
br label %3
diff --git a/test/CodeGen/Thumb2/intrinsics-coprocessor.ll b/test/CodeGen/Thumb2/intrinsics-coprocessor.ll
new file mode 100644
index 000000000000..248ec223a61e
--- /dev/null
+++ b/test/CodeGen/Thumb2/intrinsics-coprocessor.ll
@@ -0,0 +1,93 @@
+; RUN: llc < %s -march=thumb -mtriple=thumbv7-eabi -mcpu=cortex-a8 -show-mc-encoding | FileCheck %s
+define void @coproc(i8* %i) nounwind {
+entry:
+ ; CHECK: mrc p7, #1, r{{[0-9]+}}, c1, c1, #4
+ %0 = tail call i32 @llvm.arm.mrc(i32 7, i32 1, i32 1, i32 1, i32 4) nounwind
+ ; CHECK: mcr p7, #1, r{{[0-9]+}}, c1, c1, #4
+ tail call void @llvm.arm.mcr(i32 7, i32 1, i32 %0, i32 1, i32 1, i32 4) nounwind
+ ; CHECK: mrc2 p7, #1, r{{[0-9]+}}, c1, c1, #4
+ %1 = tail call i32 @llvm.arm.mrc2(i32 7, i32 1, i32 1, i32 1, i32 4) nounwind
+ ; CHECK: mcr2 p7, #1, r{{[0-9]+}}, c1, c1, #4
+ tail call void @llvm.arm.mcr2(i32 7, i32 1, i32 %1, i32 1, i32 1, i32 4) nounwind
+ ; CHECK: mcrr p7, #1, r{{[0-9]+}}, r{{[0-9]+}}, c1
+ tail call void @llvm.arm.mcrr(i32 7, i32 1, i32 %0, i32 %1, i32 1) nounwind
+ ; CHECK: mcrr2 p7, #1, r{{[0-9]+}}, r{{[0-9]+}}, c1
+ tail call void @llvm.arm.mcrr2(i32 7, i32 1, i32 %0, i32 %1, i32 1) nounwind
+ ; CHECK: cdp p7, #3, c1, c1, c1, #5
+ tail call void @llvm.arm.cdp(i32 7, i32 3, i32 1, i32 1, i32 1, i32 5) nounwind
+ ; CHECK: cdp2 p7, #3, c1, c1, c1, #5
+ tail call void @llvm.arm.cdp2(i32 7, i32 3, i32 1, i32 1, i32 1, i32 5) nounwind
+ ; CHECK: ldc p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.ldc(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: ldcl p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.ldcl(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: ldc2 p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.ldc2(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: ldc2l p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.ldc2l(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: stc p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.stc(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: stcl p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.stcl(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: stc2 p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.stc2(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: stc2l p7, c3, [r{{[0-9]+}}]
+ tail call void @llvm.arm.stc2l(i32 7, i32 3, i8* %i) nounwind
+ ; CHECK: mrrc p1, #2, r{{[0-9]+}}, r{{[0-9]+}}, c3
+ %2 = tail call { i32, i32 } @llvm.arm.mrrc(i32 1, i32 2, i32 3) nounwind
+ ; CHECK: mrrc2 p1, #2, r{{[0-9]+}}, r{{[0-9]+}}, c3
+ %3 = tail call { i32, i32 } @llvm.arm.mrrc2(i32 1, i32 2, i32 3) nounwind
+ ret void
+}
+
+define hidden void @cond_cdp(i32 %a) {
+; CHECK-LABEL: cond_cdp:
+entry:
+ %tobool = icmp eq i32 %a, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+; CHECK: it ne
+; CHECK: cdpne p15, #0, c0, c0, c0, #0 @ encoding: [0x00,0xee,0x00,0x0f]
+ tail call void @llvm.arm.cdp(i32 15, i32 0, i32 0, i32 0, i32 0, i32 0)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+declare void @llvm.arm.ldc(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.ldcl(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.ldc2(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.ldc2l(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.stc(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.stcl(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.stc2(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.stc2l(i32, i32, i8*) nounwind
+
+declare void @llvm.arm.cdp2(i32, i32, i32, i32, i32, i32) nounwind
+
+declare void @llvm.arm.cdp(i32, i32, i32, i32, i32, i32) nounwind
+
+declare void @llvm.arm.mcrr2(i32, i32, i32, i32, i32) nounwind
+
+declare void @llvm.arm.mcrr(i32, i32, i32, i32, i32) nounwind
+
+declare void @llvm.arm.mcr2(i32, i32, i32, i32, i32, i32) nounwind
+
+declare i32 @llvm.arm.mrc2(i32, i32, i32, i32, i32) nounwind
+
+declare void @llvm.arm.mcr(i32, i32, i32, i32, i32, i32) nounwind
+
+declare i32 @llvm.arm.mrc(i32, i32, i32, i32, i32) nounwind
+
+declare { i32, i32 } @llvm.arm.mrrc(i32, i32, i32) nounwind
+
+declare { i32, i32 } @llvm.arm.mrrc2(i32, i32, i32) nounwind
diff --git a/test/CodeGen/Thumb2/stack_guard_remat.ll b/test/CodeGen/Thumb2/stack_guard_remat.ll
index cf34e8c0c2fb..839a506b35e6 100644
--- a/test/CodeGen/Thumb2/stack_guard_remat.ll
+++ b/test/CodeGen/Thumb2/stack_guard_remat.ll
@@ -24,20 +24,20 @@
define i32 @test_stack_guard_remat() #0 {
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
- call void @llvm.lifetime.start(i64 1024, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %1)
%2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
- call void @llvm.lifetime.end(i64 1024, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %1)
ret i32 0
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Thumb2/tbb-removeadd.mir b/test/CodeGen/Thumb2/tbb-removeadd.mir
new file mode 100644
index 000000000000..89ed98720539
--- /dev/null
+++ b/test/CodeGen/Thumb2/tbb-removeadd.mir
@@ -0,0 +1,124 @@
+#RUN: llc -run-pass arm-cp-islands %s -o - | FileCheck %s
+
+--- |
+ ; ModuleID = 'test.ll'
+ source_filename = "test.c"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv8r-arm-none-eabi"
+
+ define void @Func(i32 %i, i32* nocapture %p) local_unnamed_addr {
+ entry:
+ switch i32 %i, label %sw.epilog [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.epilog.sink.split
+ i32 4, label %sw.bb3
+ ]
+
+ sw.bb: ; preds = %entry
+ br label %sw.epilog.sink.split
+
+ sw.bb1: ; preds = %entry
+ store i32 0, i32* %p, align 4
+ br label %sw.epilog.sink.split
+
+ sw.bb3: ; preds = %entry
+ br label %sw.epilog.sink.split
+
+ sw.epilog.sink.split: ; preds = %sw.bb3, %sw.bb1, %sw.bb, %entry
+ %.sink = phi i32 [ 2, %sw.bb3 ], [ 0, %sw.bb ], [ 1, %entry ], [ 1, %sw.bb1 ]
+ store i32 %.sink, i32* %p, align 4
+ br label %sw.epilog
+
+ sw.epilog: ; preds = %sw.epilog.sink.split, %entry
+ ret void
+ }
+
+...
+---
+name: Func
+alignment: 1
+exposesReturnsTwice: false
+noVRegs: true
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0' }
+ - { reg: '%r1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+jumpTable:
+ kind: inline
+ entries:
+ - id: 0
+ blocks: [ '%bb.2.sw.bb', '%bb.3.sw.bb1', '%bb.5.sw.epilog.sink.split',
+ '%bb.6.sw.epilog', '%bb.4.sw.bb3' ]
+# The ADD should be deleted along with the LEA
+# CHECK-NOT: t2LEApcrelJT
+# CHECK-NOT: t2ADDrs
+# CHECK: tMOVi8
+# CHECK: t2TBB_JT
+
+body: |
+ bb.0.entry:
+ successors: %bb.6.sw.epilog(0x0ccccccb), %bb.1.entry(0x73333335)
+ liveins: %r0, %r1
+
+ tCMPi8 %r0, 4, 14, _, implicit-def %cpsr
+ t2Bcc %bb.6.sw.epilog, 8, killed %cpsr
+
+ bb.1.entry:
+ successors: %bb.2.sw.bb(0x1c71c71c), %bb.3.sw.bb1(0x1c71c71c), %bb.5.sw.epilog.sink.split(0x1c71c71c), %bb.6.sw.epilog(0x0e38e38e), %bb.4.sw.bb3(0x1c71c71c)
+ liveins: %r0, %r1
+
+ %r2 = t2LEApcrelJT %jump-table.0, 14, _
+ %r3 = t2ADDrs killed %r2, %r0, 18, 14, _, _
+ %r2, dead %cpsr = tMOVi8 1, 14, _
+ t2BR_JT killed %r3, killed %r0, %jump-table.0
+
+ bb.2.sw.bb:
+ successors: %bb.5.sw.epilog.sink.split(0x80000000)
+ liveins: %r1
+
+ %r2, dead %cpsr = tMOVi8 0, 14, _
+ t2B %bb.5.sw.epilog.sink.split, 14, _
+
+ bb.3.sw.bb1:
+ successors: %bb.5.sw.epilog.sink.split(0x80000000)
+ liveins: %r1
+
+ %r0, dead %cpsr = tMOVi8 0, 14, _
+ %r2, dead %cpsr = tMOVi8 1, 14, _
+ tSTRi killed %r0, %r1, 0, 14, _ :: (store 4 into %ir.p)
+ t2B %bb.5.sw.epilog.sink.split, 14, _
+
+ bb.4.sw.bb3:
+ successors: %bb.5.sw.epilog.sink.split(0x80000000)
+ liveins: %r1
+
+ %r2, dead %cpsr = tMOVi8 2, 14, _
+
+ bb.5.sw.epilog.sink.split:
+ successors: %bb.6.sw.epilog(0x80000000)
+ liveins: %r1, %r2
+
+ tSTRi killed %r2, killed %r1, 0, 14, _ :: (store 4 into %ir.p)
+
+ bb.6.sw.epilog:
+ tBX_RET 14, _
+
+...
diff --git a/test/CodeGen/Thumb2/thumb2-pack.ll b/test/CodeGen/Thumb2/thumb2-pack.ll
index 4825628f3014..26b68ec443b9 100644
--- a/test/CodeGen/Thumb2/thumb2-pack.ll
+++ b/test/CodeGen/Thumb2/thumb2-pack.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; CHECK: test1
; CHECK: pkhbt r0, r0, r1, lsl #16
diff --git a/test/CodeGen/Thumb2/thumb2-rev.ll b/test/CodeGen/Thumb2/thumb2-rev.ll
index 873a2d4cf7de..81d0822d500b 100644
--- a/test/CodeGen/Thumb2/thumb2-rev.ll
+++ b/test/CodeGen/Thumb2/thumb2-rev.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+v7,+t2xtpk %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+v7 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-smla.ll b/test/CodeGen/Thumb2/thumb2-smla.ll
index 5ddaf9353f92..f1850d460928 100644
--- a/test/CodeGen/Thumb2/thumb2-smla.ll
+++ b/test/CodeGen/Thumb2/thumb2-smla.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk,+dsp %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk,+dsp -arm-use-mulops=false %s -o - | FileCheck %s -check-prefix=NO_MULOPS
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+dsp %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+dsp -arm-use-mulops=false %s -o - | FileCheck %s -check-prefix=NO_MULOPS
define i32 @f3(i32 %a, i16 %x, i32 %y) {
; CHECK: f3
diff --git a/test/CodeGen/Thumb2/thumb2-smul.ll b/test/CodeGen/Thumb2/thumb2-smul.ll
index a196a3c79ae9..53fca567af16 100644
--- a/test/CodeGen/Thumb2/thumb2-smul.ll
+++ b/test/CodeGen/Thumb2/thumb2-smul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk,+dsp %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+dsp %s -o - | FileCheck %s
@x = weak global i16 0 ; <i16*> [#uses=1]
@y = weak global i16 0 ; <i16*> [#uses=0]
diff --git a/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll b/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
index 693a8e4e99f7..c1170137c7fc 100644
--- a/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
+++ b/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
@@ -1,38 +1,45 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m4 %s -o - | FileCheck %s --check-prefix=CHECK-M4
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m4 %s -o - | FileCheck %s --check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi %s -o - | FileCheck %s -check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK-DSP
define i32 @test1(i16 zeroext %z) nounwind {
; CHECK-LABEL: test1:
-; CHECK: sxth
+; CHECK-DSP: sxth
+; CHECK-NO-DSP: sxth
%r = sext i16 %z to i32
ret i32 %r
}
define i32 @test2(i8 zeroext %z) nounwind {
; CHECK-LABEL: test2:
-; CHECK: sxtb
+; CHECK-DSP: sxtb
+; CHECK-NO-DSP: sxtb
%r = sext i8 %z to i32
ret i32 %r
}
define i32 @test3(i16 signext %z) nounwind {
; CHECK-LABEL: test3:
-; CHECK: uxth
+; CHECK-DSP: uxth
+; CHECK-NO-DSP: uxth
%r = zext i16 %z to i32
ret i32 %r
}
define i32 @test4(i8 signext %z) nounwind {
; CHECK-LABEL: test4:
-; CHECK: uxtb
+; CHECK-DSP: uxtb
+; CHECK-NO-DSP: uxtb
%r = zext i8 %z to i32
ret i32 %r
}
define i32 @test5(i32 %a, i8 %b) {
; CHECK-LABEL: test5:
-; CHECK-NOT: sxtab
-; CHECK-M4: sxtab r0, r0, r1
+; CHECK-DSP: sxtab r0, r0, r1
+; CHECK-NO-DSP-NOT: sxtab
%sext = sext i8 %b to i32
%add = add i32 %a, %sext
ret i32 %add
@@ -40,8 +47,8 @@ define i32 @test5(i32 %a, i8 %b) {
define i32 @test6(i32 %a, i32 %b) {
; CHECK-LABEL: test6:
-; CHECK-NOT: sxtab
-; CHECK-M4: sxtab r0, r0, r1
+; CHECK-DSP: sxtab r0, r0, r1
+; CHECK-NO-DSP-NOT: sxtab
%shl = shl i32 %b, 24
%ashr = ashr i32 %shl, 24
%add = add i32 %a, %ashr
@@ -50,8 +57,8 @@ define i32 @test6(i32 %a, i32 %b) {
define i32 @test7(i32 %a, i16 %b) {
; CHECK-LABEL: test7:
-; CHECK-NOT: sxtah
-; CHECK-M4: sxtah r0, r0, r1
+; CHECK-DSP: sxtah r0, r0, r1
+; CHECK-NO-DSPNOT: sxtah
%sext = sext i16 %b to i32
%add = add i32 %a, %sext
ret i32 %add
@@ -59,8 +66,8 @@ define i32 @test7(i32 %a, i16 %b) {
define i32 @test8(i32 %a, i32 %b) {
; CHECK-LABEL: test8:
-; CHECK-NOT: sxtah
-; CHECK-M4: sxtah r0, r0, r1
+; CHECK-DSP: sxtah r0, r0, r1
+; CHECK-NO-DSP-NOT: sxtah
%shl = shl i32 %b, 16
%ashr = ashr i32 %shl, 16
%add = add i32 %a, %ashr
@@ -69,8 +76,8 @@ define i32 @test8(i32 %a, i32 %b) {
define i32 @test9(i32 %a, i8 %b) {
; CHECK-LABEL: test9:
-; CHECK-NOT: uxtab
-; CHECK-M4: uxtab r0, r0, r1
+; CHECK-DSP: uxtab r0, r0, r1
+; CHECK-NO-DSP-NOT: uxtab
%zext = zext i8 %b to i32
%add = add i32 %a, %zext
ret i32 %add
@@ -78,8 +85,8 @@ define i32 @test9(i32 %a, i8 %b) {
define i32 @test10(i32 %a, i32 %b) {
;CHECK-LABEL: test10:
-;CHECK-NOT: uxtab
-;CHECK-M4: uxtab r0, r0, r1
+;CHECK-DSP: uxtab r0, r0, r1
+;CHECK-NO-DSP-NOT: uxtab
%and = and i32 %b, 255
%add = add i32 %a, %and
ret i32 %add
@@ -87,8 +94,8 @@ define i32 @test10(i32 %a, i32 %b) {
define i32 @test11(i32 %a, i16 %b) {
; CHECK-LABEL: test11:
-; CHECK-NOT: uxtah
-; CHECK-M4: uxtah r0, r0, r1
+; CHECK-DSP: uxtah r0, r0, r1
+; CHECK-NO-DSP-NOT: uxtah
%zext = zext i16 %b to i32
%add = add i32 %a, %zext
ret i32 %add
@@ -96,8 +103,8 @@ define i32 @test11(i32 %a, i16 %b) {
define i32 @test12(i32 %a, i32 %b) {
;CHECK-LABEL: test12:
-;CHECK-NOT: uxtah
-;CHECK-M4: uxtah r0, r0, r1
+;CHECK-DSP: uxtah r0, r0, r1
+;CHECK-NO-DSP-NOT: uxtah
%and = and i32 %b, 65535
%add = add i32 %a, %and
ret i32 %add
diff --git a/test/CodeGen/Thumb2/thumb2-sxt_rot.ll b/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
index a4f8aa0dbd03..c4af67a2f91d 100644
--- a/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
@@ -1,18 +1,21 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=CHECK-M3
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s %s -o - | FileCheck %s --check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi %s -o - | FileCheck %s -check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK-DSP
define i32 @test0(i8 %A) {
; CHECK-LABEL: test0:
-; CHECK: sxtb r0, r0
-; CHECK-M3: sxtb r0, r0
+; CHECK-DSP: sxtb r0, r0
+; CHECK-NO-DSP: sxtb r0, r0
%B = sext i8 %A to i32
ret i32 %B
}
define signext i8 @test1(i32 %A) {
; CHECK-LABEL: test1:
-; CHECK: sbfx r0, r0, #8, #8
-; CHECK-M3: sbfx r0, r0, #8, #8
+; CHECK-DSP: sbfx r0, r0, #8, #8
+; CHECK-NO-DSP: sbfx r0, r0, #8, #8
%B = lshr i32 %A, 8
%C = shl i32 %A, 24
%D = or i32 %B, %C
@@ -22,8 +25,8 @@ define signext i8 @test1(i32 %A) {
define signext i32 @test2(i32 %A, i32 %X) {
; CHECK-LABEL: test2:
-; CHECK: sxtab r0, r1, r0, ror #8
-; CHECK-M3-NOT: sxtab
+; CHECK-DSP: sxtab r0, r1, r0, ror #8
+; CHECK-NO-DSP-NOT: sxtab
%B = lshr i32 %A, 8
%C = shl i32 %A, 24
%D = or i32 %B, %C
@@ -35,8 +38,8 @@ define signext i32 @test2(i32 %A, i32 %X) {
define i32 @test3(i32 %A, i32 %X) {
; CHECK-LABEL: test3:
-; CHECK: sxtah r0, r0, r1, ror #8
-; CHECK-M3-NOT: sxtah
+; CHECK-DSP: sxtah r0, r0, r1, ror #8
+; CHECK-NO-DSP-NOT: sxtah
%X.hi = lshr i32 %X, 8
%X.trunc = trunc i32 %X.hi to i16
%addend = sext i16 %X.trunc to i32
@@ -46,8 +49,8 @@ define i32 @test3(i32 %A, i32 %X) {
define signext i32 @test4(i32 %A, i32 %X) {
; CHECK-LABEL: test4:
-; CHECK: sxtab r0, r1, r0, ror #16
-; CHECK-M3-NOT: sxtab
+; CHECK-DSP: sxtab r0, r1, r0, ror #16
+; CHECK-NO-DSP-NOT: sxtab
%B = lshr i32 %A, 16
%C = shl i32 %A, 16
%D = or i32 %B, %C
@@ -59,8 +62,8 @@ define signext i32 @test4(i32 %A, i32 %X) {
define signext i32 @test5(i32 %A, i32 %X) {
; CHECK-LABEL: test5:
-; CHECK: sxtah r0, r1, r0, ror #24
-; CHECK-M3-NOT: sxtah
+; CHECK-DSP: sxtah r0, r1, r0, ror #24
+; CHECK-NO-DSP-NOT: sxtah
%B = lshr i32 %A, 24
%C = shl i32 %A, 8
%D = or i32 %B, %C
diff --git a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
index 891c706972c0..22740b715dcb 100644
--- a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
@@ -1,21 +1,22 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s --check-prefix=A8
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=M3
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s --check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi %s -o - | FileCheck %s -check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK-DSP
; rdar://11318438
define zeroext i8 @test1(i32 %A.u) {
; CHECK-LABEL: test1:
-; A8: uxtb r0, r0
+; CHECK-DSP: uxtb r0, r0
+; CHECK-NO-DSP: uxtb r0, r0
%B.u = trunc i32 %A.u to i8
ret i8 %B.u
}
define zeroext i32 @test2(i32 %A.u, i32 %B.u) {
; CHECK-LABEL: test2:
-; A8: uxtab r0, r0, r1
-
-; M3: uxtb r1, r1
-; M3-NOT: uxtab
-; M3: add r0, r1
+; CHECK-DSP: uxtab r0, r0, r1
+; CHECK-NO-DSP-NOT: uxtab
%C.u = trunc i32 %B.u to i8
%D.u = zext i8 %C.u to i32
%E.u = add i32 %A.u, %D.u
@@ -24,8 +25,8 @@ define zeroext i32 @test2(i32 %A.u, i32 %B.u) {
define zeroext i32 @test3(i32 %A.u) {
; CHECK-LABEL: test3:
-; A8: ubfx r0, r0, #8, #16
-; M3: ubfx r0, r0, #8, #16
+; CHECK-DSP: ubfx r0, r0, #8, #16
+; CHECK-NO-DSP: ubfx r0, r0, #8, #16
%B.u = lshr i32 %A.u, 8
%C.u = shl i32 %A.u, 24
%D.u = or i32 %B.u, %C.u
@@ -36,8 +37,8 @@ define zeroext i32 @test3(i32 %A.u) {
define i32 @test4(i32 %A, i32 %X) {
; CHECK-LABEL: test4:
-; A8: uxtab r0, r0, r1, ror #16
-; M3-NOT: uxtab
+; CHECK-DSP: uxtab r0, r0, r1, ror #16
+; CHECK-NO-DSP-NOT: uxtab
%X.hi = lshr i32 %X, 16
%X.trunc = trunc i32 %X.hi to i8
%addend = zext i8 %X.trunc to i32
@@ -47,8 +48,8 @@ define i32 @test4(i32 %A, i32 %X) {
define i32 @test5(i32 %A, i32 %X) {
; CHECK-LABEL: test5:
-; A8: uxtah r0, r0, r1, ror #8
-; M3-NOT: uxtah
+; CHECK-DSP: uxtah r0, r0, r1, ror #8
+; CHECK-NO-DSP-NOT: uxtah
%X.hi = lshr i32 %X, 8
%X.trunc = trunc i32 %X.hi to i16
%addend = zext i16 %X.trunc to i32
@@ -58,8 +59,8 @@ define i32 @test5(i32 %A, i32 %X) {
define i32 @test6(i32 %A, i32 %X) {
; CHECK-LABEL: test6:
-; A8: uxtab r0, r0, r1, ror #8
-; M3-NOT: uxtab
+; CHECK-DSP: uxtab r0, r0, r1, ror #8
+; CHECK-NO-DSP-NOT: uxtab
%X.hi = lshr i32 %X, 8
%X.trunc = trunc i32 %X.hi to i8
%addend = zext i8 %X.trunc to i32
@@ -69,8 +70,8 @@ define i32 @test6(i32 %A, i32 %X) {
define i32 @test7(i32 %A, i32 %X) {
; CHECK-LABEL: test7:
-; A8: uxtah r0, r0, r1, ror #24
-; M3-NOT: uxtah
+; CHECK-DSP: uxtah r0, r0, r1, ror #24
+; CHECK-NO-DSP-NOT: uxtah
%lshr = lshr i32 %X, 24
%shl = shl i32 %X, 8
%or = or i32 %lshr, %shl
@@ -82,8 +83,8 @@ define i32 @test7(i32 %A, i32 %X) {
define i32 @test8(i32 %A, i32 %X) {
; CHECK-LABEL: test8:
-; A8: uxtah r0, r0, r1, ror #24
-; M3-NOT: uxtah
+; CHECK-DSP: uxtah r0, r0, r1, ror #24
+; CHECK-NO-DSP-NOT: uxtah
%lshr = lshr i32 %X, 24
%shl = shl i32 %X, 8
%or = or i32 %lshr, %shl
diff --git a/test/CodeGen/Thumb2/thumb2-uxtb.ll b/test/CodeGen/Thumb2/thumb2-uxtb.ll
index b8b1bc832d96..af4532cf6f3d 100644
--- a/test/CodeGen/Thumb2/thumb2-uxtb.ll
+++ b/test/CodeGen/Thumb2/thumb2-uxtb.ll
@@ -1,72 +1,63 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=ARMv7A
-; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s -check-prefix=ARMv7M
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s -check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi %s -o - | FileCheck %s -check-prefix=CHECK-NO-DSP
+; RUN: llc -mtriple=thumbv8m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK-DSP
define i32 @test1(i32 %x) {
-; ARMv7A: test1
-; ARMv7A: uxtb16 r0, r0
-
-; ARMv7M: test1
-; ARMv7M: bic r0, r0, #-16711936
+; CHECK-LABEL: test1
+; CHECK-DSP: uxtb16 r0, r0
+; CHECK-NO-DSP: bic r0, r0, #-16711936
%tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1]
ret i32 %tmp1
}
; PR7503
define i32 @test2(i32 %x) {
-; ARMv7A: test2
-; ARMv7A: uxtb16 r0, r0, ror #8
-
-; ARMv7M: test2
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, lsr #8
+; CHECK-LABEL: test2
+; CHECK-DSP: uxtb16 r0, r0, ror #8
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, lsr #8
%tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
ret i32 %tmp2
}
define i32 @test3(i32 %x) {
-; ARMv7A: test3
-; ARMv7A: uxtb16 r0, r0, ror #8
-
-; ARMv7M: test3
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, lsr #8
+; CHECK-LABEL: test3
+; CHECK-DSP: uxtb16 r0, r0, ror #8
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, lsr #8
%tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
ret i32 %tmp2
}
define i32 @test4(i32 %x) {
-; ARMv7A: test4
-; ARMv7A: uxtb16 r0, r0, ror #8
-
-; ARMv7M: test4
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, lsr #8
+; CHECK-LABEL: test4
+; CHECK-DSP: uxtb16 r0, r0, ror #8
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, lsr #8
%tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
%tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
ret i32 %tmp6
}
define i32 @test5(i32 %x) {
-; ARMv7A: test5
-; ARMv7A: uxtb16 r0, r0, ror #8
-
-; ARMv7M: test5
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, lsr #8
+; CHECK-LABEL: test5
+; CHECK-DSP: uxtb16 r0, r0, ror #8
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, lsr #8
%tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
ret i32 %tmp2
}
define i32 @test6(i32 %x) {
-; ARMv7A: test6
-; ARMv7A: uxtb16 r0, r0, ror #16
-
-; ARMv7M: test6
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, ror #16
+; CHECK-LABEL: test6
+; CHECK-DSP: uxtb16 r0, r0, ror #16
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, ror #16
%tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
%tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
@@ -76,12 +67,10 @@ define i32 @test6(i32 %x) {
}
define i32 @test7(i32 %x) {
-; ARMv7A: test7
-; ARMv7A: uxtb16 r0, r0, ror #16
-
-; ARMv7M: test7
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, ror #16
+; CHECK-LABEL: test7
+; CHECK-DSP: uxtb16 r0, r0, ror #16
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, ror #16
%tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
%tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
@@ -91,12 +80,10 @@ define i32 @test7(i32 %x) {
}
define i32 @test8(i32 %x) {
-; ARMv7A: test8
-; ARMv7A: uxtb16 r0, r0, ror #24
-
-; ARMv7M: test8
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, ror #24
+; CHECK-LABEL: test8
+; CHECK-DSP: uxtb16 r0, r0, ror #24
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, ror #24
%tmp1 = shl i32 %x, 8 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1]
%tmp5 = lshr i32 %x, 24 ; <i32> [#uses=1]
@@ -105,12 +92,10 @@ define i32 @test8(i32 %x) {
}
define i32 @test9(i32 %x) {
-; ARMv7A: test9
-; ARMv7A: uxtb16 r0, r0, ror #24
-
-; ARMv7M: test9
-; ARMv7M: mov.w r1, #16711935
-; ARMv7M: and.w r0, r1, r0, ror #24
+; CHECK-LABEL: test9
+; CHECK-DSP: uxtb16 r0, r0, ror #24
+; CHECK-NO-DSP: mov.w r1, #16711935
+; CHECK-NO-DSP: and.w r0, r1, r0, ror #24
%tmp1 = lshr i32 %x, 24 ; <i32> [#uses=1]
%tmp4 = shl i32 %x, 8 ; <i32> [#uses=1]
%tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
@@ -119,19 +104,18 @@ define i32 @test9(i32 %x) {
}
define i32 @test10(i32 %p0) {
-; ARMv7A: test10
-; ARMv7A: mov.w r1, #16253176
-; ARMv7A: and.w r0, r1, r0, lsr #7
-; ARMv7A: lsrs r1, r0, #5
-; ARMv7A: uxtb16 r1, r1
-; ARMv7A: orrs r0, r1
-
-; ARMv7M: test10
-; ARMv7M: mov.w r1, #16253176
-; ARMv7M: and.w r0, r1, r0, lsr #7
-; ARMv7M: mov.w r1, #458759
-; ARMv7M: and.w r1, r1, r0, lsr #5
-; ARMv7M: orrs r0, r1
+; CHECK-LABEL: test10
+; CHECK-DSP: mov.w r1, #16253176
+; CHECK-DSP: and.w r0, r1, r0, lsr #7
+; CHECK-DSP: lsrs r1, r0, #5
+; CHECK-DSP: uxtb16 r1, r1
+; CHECk-DSP: orrs r0, r1
+
+; CHECK-NO-DSP: mov.w r1, #16253176
+; CHECK-NO-DSP: and.w r0, r1, r0, lsr #7
+; CHECK-NO-DSP: mov.w r1, #458759
+; CHECK-NO-DSP: and.w r1, r1, r0, lsr #5
+; CHECK-NO-DSP: orrs r0, r1
%tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 16253176 ; <i32> [#uses=2]
%tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1]
diff --git a/test/CodeGen/Thumb2/v8_IT_4.ll b/test/CodeGen/Thumb2/v8_IT_4.ll
index 5a80d8cd7b4e..5901a8e81caf 100644
--- a/test/CodeGen/Thumb2/v8_IT_4.ll
+++ b/test/CodeGen/Thumb2/v8_IT_4.ll
@@ -12,10 +12,11 @@
define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
; CHECK-LABEL: _ZNKSs7compareERKSs:
-; CHECK: cbnz r0,
+; CHECK: cbz r0,
+; CHECK-NEXT: %bb1
+; CHECK-NEXT: pop.w
; CHECK-NEXT: %bb
; CHECK-NEXT: sub{{(.w)?}} r0, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK-NEXT: %bb1
; CHECK-NEXT: pop.w
entry:
%0 = tail call arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
diff --git a/test/CodeGen/WebAssembly/address-offsets.ll b/test/CodeGen/WebAssembly/address-offsets.ll
index b9efec86f0da..da198978fc2f 100644
--- a/test/CodeGen/WebAssembly/address-offsets.ll
+++ b/test/CodeGen/WebAssembly/address-offsets.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test folding constant offsets and symbols into load and store addresses under
; a variety of circumstances.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
@g = external global [0 x i32], align 4
diff --git a/test/CodeGen/WebAssembly/byval.ll b/test/CodeGen/WebAssembly/byval.ll
index 7a995769a8e7..907320d7977c 100644
--- a/test/CodeGen/WebAssembly/byval.ll
+++ b/test/CodeGen/WebAssembly/byval.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -verify-machineinstrs -fast-isel | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -verify-machineinstrs -fast-isel | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
%SmallStruct = type { i32 }
%OddStruct = type { i32, i8, i32 }
@@ -23,15 +23,13 @@ declare void @ext_byval_func_empty(%EmptyStruct* byval)
; CHECK-LABEL: byval_arg
define void @byval_arg(%SmallStruct* %ptr) {
; CHECK: .param i32
- ; CHECK: i32.const $push[[L4:.+]]=, 0
; Subtract 16 from SP (SP is 16-byte aligned)
- ; CHECK: i32.const $push[[L1:.+]]=, 0
- ; CHECK-NEXT: i32.load $push[[L2:.+]]=, __stack_pointer($pop[[L1]])
+ ; CHECK-NEXT: get_global $push[[L2:.+]]=, 0
; CHECK-NEXT: i32.const $push[[L3:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L11:.+]]=, $pop[[L2]], $pop[[L3]]
; Ensure SP is stored back before the call
; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L4]]), $pop[[L10]]{{$}}
+ ; CHECK-NEXT: set_global 0, $pop[[L10]]{{$}}
; Copy the SmallStruct argument to the stack (SP+12, original SP-4)
; CHECK-NEXT: i32.load $push[[L0:.+]]=, 0($0)
; CHECK-NEXT: i32.store 12($[[SP]]), $pop[[L0]]
@@ -41,10 +39,9 @@ define void @byval_arg(%SmallStruct* %ptr) {
; CHECK-NEXT: call ext_byval_func@FUNCTION, $pop[[ARG]]{{$}}
call void @ext_byval_func(%SmallStruct* byval %ptr)
; Restore the stack
- ; CHECK-NEXT: i32.const $push[[L7:.+]]=, 0
; CHECK-NEXT: i32.const $push[[L6:.+]]=, 16
; CHECK-NEXT: i32.add $push[[L8:.+]]=, $[[SP]], $pop[[L6]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L7]]), $pop[[L8]]
+ ; CHECK-NEXT: set_global 0, $pop[[L8]]
; CHECK-NEXT: return
ret void
}
@@ -56,7 +53,7 @@ define void @byval_arg_align8(%SmallStruct* %ptr) {
; CHECK: i32.const $push[[L1:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L11:.+]]=, {{.+}}, $pop[[L1]]
; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
- ; CHECK-NEXT: i32.store __stack_pointer($pop{{.+}}), $pop[[L10]]{{$}}
+ ; CHECK-NEXT: set_global 0, $pop[[L10]]{{$}}
; Copy the SmallStruct argument to the stack (SP+8, original SP-8)
; CHECK-NEXT: i32.load $push[[L0:.+]]=, 0($0){{$}}
; CHECK-NEXT: i32.store 8($[[SP]]), $pop[[L0]]{{$}}
@@ -75,7 +72,7 @@ define void @byval_arg_double(%AlignedStruct* %ptr) {
; CHECK: i32.const $push[[L1:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L14:.+]]=, {{.+}}, $pop[[L1]]
; CHECK-NEXT: tee_local $push[[L13:.+]]=, $[[SP:.+]]=, $pop[[L14]]
- ; CHECK-NEXT: i32.store {{.+}}, $pop[[L13]]
+ ; CHECK-NEXT: set_global 0, $pop[[L13]]
; Copy the AlignedStruct argument to the stack (SP+0, original SP-16)
; Just check the last load/store pair of the memcpy
; CHECK: i64.load $push[[L4:.+]]=, 0($0)
@@ -113,13 +110,11 @@ define void @byval_empty_callee(%EmptyStruct* byval %ptr) {
; Call memcpy for "big" byvals.
; CHECK-LABEL: big_byval:
-; CHECK: i32.const $push[[L4:.+]]=, 0
-; CHECK: i32.const $push[[L1:.+]]=, 0
-; CHECK-NEXT: i32.load $push[[L2:.+]]=, __stack_pointer($pop[[L1]])
+; CHECK: get_global $push[[L2:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L3:.+]]=, 131072
; CHECK-NEXT: i32.sub $push[[L11:.+]]=, $pop[[L2]], $pop[[L3]]
; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
-; CHECK-NEXT: i32.store __stack_pointer($pop[[L4]]), $pop[[L10]]{{$}}
+; CHECK-NEXT: set_global 0, $pop[[L10]]{{$}}
; CHECK-NEXT: i32.const $push[[L0:.+]]=, 131072
; CHECK-NEXT: i32.call $push[[L11:.+]]=, memcpy@FUNCTION, $[[SP]], ${{.+}}, $pop{{.+}}
; CHECK-NEXT: tee_local $push[[L9:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
diff --git a/test/CodeGen/WebAssembly/call.ll b/test/CodeGen/WebAssembly/call.ll
index 1a9d5b8fb8e6..1cf42242a6cc 100644
--- a/test/CodeGen/WebAssembly/call.ll
+++ b/test/CodeGen/WebAssembly/call.ll
@@ -4,7 +4,7 @@
; Test that basic call operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare i32 @i32_nullary()
declare i32 @i32_unary(i32)
@@ -61,7 +61,8 @@ define void @call_void_nullary() {
; CHECK-LABEL: call_i32_unary:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_unary@FUNCTION, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_unary@FUNCTION, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_i32_unary(i32 %a) {
%r = call i32 @i32_unary(i32 %a)
@@ -71,7 +72,9 @@ define i32 @call_i32_unary(i32 %a) {
; CHECK-LABEL: call_i32_binary:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_binary@FUNCTION, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: {{^}} i32.call $push[[NUM:[0-9]+]]=, i32_binary@FUNCTION, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_i32_binary(i32 %a, i32 %b) {
%r = call i32 @i32_binary(i32 %a, i32 %b)
@@ -80,7 +83,8 @@ define i32 @call_i32_binary(i32 %a, i32 %b) {
; CHECK-LABEL: call_indirect_void:
; CHECK-NEXT: .param i32{{$}}
-; CHECK-NEXT: {{^}} call_indirect $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: {{^}} call_indirect $pop[[L0]]{{$}}
; CHECK-NEXT: return{{$}}
define void @call_indirect_void(void ()* %callee) {
call void %callee()
@@ -90,7 +94,8 @@ define void @call_indirect_void(void ()* %callee) {
; CHECK-LABEL: call_indirect_i32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @call_indirect_i32(i32 ()* %callee) {
%t = call i32 %callee()
@@ -99,7 +104,9 @@ define i32 @call_indirect_i32(i32 ()* %callee) {
; CHECK-LABEL: call_indirect_arg:
; CHECK-NEXT: .param i32, i32{{$}}
-; CHECK-NEXT: {{^}} call_indirect $1, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: {{^}} call_indirect $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return{{$}}
define void @call_indirect_arg(void (i32)* %callee, i32 %arg) {
call void %callee(i32 %arg)
@@ -108,7 +115,11 @@ define void @call_indirect_arg(void (i32)* %callee, i32 %arg) {
; CHECK-LABEL: call_indirect_arg_2:
; CHECK-NEXT: .param i32, i32, i32{{$}}
-; CHECK-NEXT: {{^}} i32.call_indirect $drop=, $1, $2, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 2{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: {{^}} i32.call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]], $pop[[L2]]{{$}}
+; CHECK-NEXT: drop $pop[[NUM]]{{$}}
; CHECK-NEXT: return{{$}}
define void @call_indirect_arg_2(i32 (i32, i32)* %callee, i32 %arg, i32 %arg2) {
call i32 %callee(i32 %arg, i32 %arg2)
diff --git a/test/CodeGen/WebAssembly/cfg-stackify.ll b/test/CodeGen/WebAssembly/cfg-stackify.ll
index 3b42df190266..ae6dd7a34ef8 100644
--- a/test/CodeGen/WebAssembly/cfg-stackify.ll
+++ b/test/CodeGen/WebAssembly/cfg-stackify.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-block-placement -verify-machineinstrs -fast-isel=false -machine-sink-split-probability-threshold=0 -cgp-freq-ratio-to-skip-merge=1000 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -tail-dup-placement=0 -verify-machineinstrs -fast-isel=false -machine-sink-split-probability-threshold=0 -cgp-freq-ratio-to-skip-merge=1000 | FileCheck -check-prefix=OPT %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -disable-block-placement -verify-machineinstrs -fast-isel=false -machine-sink-split-probability-threshold=0 -cgp-freq-ratio-to-skip-merge=1000 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -tail-dup-placement=0 -verify-machineinstrs -fast-isel=false -machine-sink-split-probability-threshold=0 -cgp-freq-ratio-to-skip-merge=1000 | FileCheck -check-prefix=OPT %s
; Test the CFG stackifier pass.
@@ -7,7 +7,7 @@
; optnone test.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare void @something()
@@ -1144,7 +1144,6 @@ bb7:
; optnone to disable optimizations to test this case.
; CHECK-LABEL: test13:
-; CHECK-NEXT: .local i32{{$}}
; CHECK-NEXT: block {{$}}
; CHECK-NEXT: block {{$}}
; CHECK: br_if 0, $pop0{{$}}
@@ -1161,7 +1160,6 @@ bb7:
; CHECK-NEXT: end_block{{$}}
; CHECK-NEXT: unreachable{{$}}
; OPT-LABEL: test13:
-; OPT-NEXT: .local i32{{$}}
; OPT-NEXT: block {{$}}
; OPT-NEXT: block {{$}}
; OPT: br_if 0, $pop0{{$}}
diff --git a/test/CodeGen/WebAssembly/cfi.ll b/test/CodeGen/WebAssembly/cfi.ll
index e5664ba73a0d..992e0f0c63d8 100644
--- a/test/CodeGen/WebAssembly/cfi.ll
+++ b/test/CodeGen/WebAssembly/cfi.ll
@@ -3,7 +3,7 @@
; Tests that we correctly assign indexes for control flow integrity.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
@0 = private unnamed_addr constant [2 x void (...)*] [void (...)* bitcast (void ()* @f to void (...)*), void (...)* bitcast (void ()* @g to void (...)*)], align 16
diff --git a/test/CodeGen/WebAssembly/comparisons_f32.ll b/test/CodeGen/WebAssembly/comparisons_f32.ll
index 10e037d57a7a..8051b25689dd 100644
--- a/test/CodeGen/WebAssembly/comparisons_f32.ll
+++ b/test/CodeGen/WebAssembly/comparisons_f32.ll
@@ -4,13 +4,17 @@
; expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: ord_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.eq $push[[NUM0:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.eq $push[[NUM1:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
; CHECK-NEXT: i32.and $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
; CHECK-NEXT: return $pop[[NUM2]]{{$}}
define i32 @ord_f32(float %x, float %y) {
@@ -22,8 +26,12 @@ define i32 @ord_f32(float %x, float %y) {
; CHECK-LABEL: uno_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM0:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
; CHECK-NEXT: return $pop[[NUM2]]{{$}}
define i32 @uno_f32(float %x, float %y) {
@@ -35,7 +43,9 @@ define i32 @uno_f32(float %x, float %y) {
; CHECK-LABEL: oeq_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.eq $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @oeq_f32(float %x, float %y) {
%a = fcmp oeq float %x, %y
@@ -44,7 +54,7 @@ define i32 @oeq_f32(float %x, float %y) {
}
; CHECK-LABEL: une_f32:
-; CHECK: f32.ne $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f32.ne $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @une_f32(float %x, float %y) {
%a = fcmp une float %x, %y
@@ -53,7 +63,7 @@ define i32 @une_f32(float %x, float %y) {
}
; CHECK-LABEL: olt_f32:
-; CHECK: f32.lt $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f32.lt $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @olt_f32(float %x, float %y) {
%a = fcmp olt float %x, %y
@@ -62,7 +72,7 @@ define i32 @olt_f32(float %x, float %y) {
}
; CHECK-LABEL: ole_f32:
-; CHECK: f32.le $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f32.le $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ole_f32(float %x, float %y) {
%a = fcmp ole float %x, %y
@@ -71,7 +81,7 @@ define i32 @ole_f32(float %x, float %y) {
}
; CHECK-LABEL: ogt_f32:
-; CHECK: f32.gt $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f32.gt $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ogt_f32(float %x, float %y) {
%a = fcmp ogt float %x, %y
@@ -80,7 +90,7 @@ define i32 @ogt_f32(float %x, float %y) {
}
; CHECK-LABEL: oge_f32:
-; CHECK: f32.ge $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f32.ge $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @oge_f32(float %x, float %y) {
%a = fcmp oge float %x, %y
@@ -93,9 +103,15 @@ define i32 @oge_f32(float %x, float %y) {
; CHECK-LABEL: ueq_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.eq $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -108,9 +124,15 @@ define i32 @ueq_f32(float %x, float %y) {
; CHECK-LABEL: one_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f32.eq $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.eq $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.eq $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.and $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.and $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]
@@ -123,9 +145,15 @@ define i32 @one_f32(float %x, float %y) {
; CHECK-LABEL: ult_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.lt $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.lt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -138,9 +166,15 @@ define i32 @ult_f32(float %x, float %y) {
; CHECK-LABEL: ule_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.le $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.le $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -153,9 +187,15 @@ define i32 @ule_f32(float %x, float %y) {
; CHECK-LABEL: ugt_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.gt $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.gt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -168,9 +208,15 @@ define i32 @ugt_f32(float %x, float %y) {
; CHECK-LABEL: uge_f32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f32.ge $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ge $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
diff --git a/test/CodeGen/WebAssembly/comparisons_f64.ll b/test/CodeGen/WebAssembly/comparisons_f64.ll
index 7d038a09ccbf..6694f989627f 100644
--- a/test/CodeGen/WebAssembly/comparisons_f64.ll
+++ b/test/CodeGen/WebAssembly/comparisons_f64.ll
@@ -4,13 +4,17 @@
; expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: ord_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
; CHECK-NEXT: i32.and $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
; CHECK-NEXT: return $pop[[NUM2]]{{$}}
define i32 @ord_f64(double %x, double %y) {
@@ -22,8 +26,12 @@ define i32 @ord_f64(double %x, double %y) {
; CHECK-LABEL: uno_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
; CHECK-NEXT: return $pop[[NUM2]]{{$}}
define i32 @uno_f64(double %x, double %y) {
@@ -35,7 +43,9 @@ define i32 @uno_f64(double %x, double %y) {
; CHECK-LABEL: oeq_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.eq $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @oeq_f64(double %x, double %y) {
%a = fcmp oeq double %x, %y
@@ -44,7 +54,7 @@ define i32 @oeq_f64(double %x, double %y) {
}
; CHECK-LABEL: une_f64:
-; CHECK: f64.ne $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f64.ne $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @une_f64(double %x, double %y) {
%a = fcmp une double %x, %y
@@ -53,7 +63,7 @@ define i32 @une_f64(double %x, double %y) {
}
; CHECK-LABEL: olt_f64:
-; CHECK: f64.lt $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f64.lt $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @olt_f64(double %x, double %y) {
%a = fcmp olt double %x, %y
@@ -62,7 +72,7 @@ define i32 @olt_f64(double %x, double %y) {
}
; CHECK-LABEL: ole_f64:
-; CHECK: f64.le $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f64.le $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ole_f64(double %x, double %y) {
%a = fcmp ole double %x, %y
@@ -71,7 +81,7 @@ define i32 @ole_f64(double %x, double %y) {
}
; CHECK-LABEL: ogt_f64:
-; CHECK: f64.gt $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f64.gt $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ogt_f64(double %x, double %y) {
%a = fcmp ogt double %x, %y
@@ -80,7 +90,7 @@ define i32 @ogt_f64(double %x, double %y) {
}
; CHECK-LABEL: oge_f64:
-; CHECK: f64.ge $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: f64.ge $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @oge_f64(double %x, double %y) {
%a = fcmp oge double %x, %y
@@ -93,9 +103,15 @@ define i32 @oge_f64(double %x, double %y) {
; CHECK-LABEL: ueq_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.eq $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -108,9 +124,15 @@ define i32 @ueq_f64(double %x, double %y) {
; CHECK-LABEL: one_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.eq $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.eq $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.eq $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.and $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.and $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]
@@ -123,9 +145,15 @@ define i32 @one_f64(double %x, double %y) {
; CHECK-LABEL: ult_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.lt $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.lt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -138,9 +166,15 @@ define i32 @ult_f64(double %x, double %y) {
; CHECK-LABEL: ule_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.le $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.le $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -153,9 +187,15 @@ define i32 @ule_f64(double %x, double %y) {
; CHECK-LABEL: ugt_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.gt $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.gt $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
@@ -168,9 +208,15 @@ define i32 @ugt_f64(double %x, double %y) {
; CHECK-LABEL: uge_f64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: f64.ge $push[[NUM0:[0-9]+]]=, $0, $1{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $0, $0{{$}}
-; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $1, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ge $push[[NUM0:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: get_local $push[[L2:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM1:[0-9]+]]=, $pop[[L2]], $pop[[L3]]{{$}}
+; CHECK-NEXT: get_local $push[[L4:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: get_local $push[[L5:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.ne $push[[NUM2:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM3:[0-9]+]]=, $pop[[NUM1]], $pop[[NUM2]]{{$}}
; CHECK-NEXT: i32.or $push[[NUM4:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM3]]{{$}}
; CHECK-NEXT: return $pop[[NUM4]]{{$}}
diff --git a/test/CodeGen/WebAssembly/comparisons_i32.ll b/test/CodeGen/WebAssembly/comparisons_i32.ll
index d2ba73f79a3d..a9a79c24fb47 100644
--- a/test/CodeGen/WebAssembly/comparisons_i32.ll
+++ b/test/CodeGen/WebAssembly/comparisons_i32.ll
@@ -4,12 +4,14 @@
; Test that basic 32-bit integer comparison operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: eq_i32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.eq $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @eq_i32(i32 %x, i32 %y) {
%a = icmp eq i32 %x, %y
@@ -18,7 +20,7 @@ define i32 @eq_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: ne_i32:
-; CHECK: i32.ne $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.ne $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ne_i32(i32 %x, i32 %y) {
%a = icmp ne i32 %x, %y
@@ -27,7 +29,7 @@ define i32 @ne_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: slt_i32:
-; CHECK: i32.lt_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.lt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @slt_i32(i32 %x, i32 %y) {
%a = icmp slt i32 %x, %y
@@ -36,7 +38,7 @@ define i32 @slt_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: sle_i32:
-; CHECK: i32.le_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.le_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @sle_i32(i32 %x, i32 %y) {
%a = icmp sle i32 %x, %y
@@ -45,7 +47,7 @@ define i32 @sle_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: ult_i32:
-; CHECK: i32.lt_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.lt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ult_i32(i32 %x, i32 %y) {
%a = icmp ult i32 %x, %y
@@ -54,7 +56,7 @@ define i32 @ult_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: ule_i32:
-; CHECK: i32.le_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.le_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ule_i32(i32 %x, i32 %y) {
%a = icmp ule i32 %x, %y
@@ -63,7 +65,7 @@ define i32 @ule_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: sgt_i32:
-; CHECK: i32.gt_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.gt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @sgt_i32(i32 %x, i32 %y) {
%a = icmp sgt i32 %x, %y
@@ -72,7 +74,7 @@ define i32 @sgt_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: sge_i32:
-; CHECK: i32.ge_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.ge_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @sge_i32(i32 %x, i32 %y) {
%a = icmp sge i32 %x, %y
@@ -81,7 +83,7 @@ define i32 @sge_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: ugt_i32:
-; CHECK: i32.gt_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.gt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ugt_i32(i32 %x, i32 %y) {
%a = icmp ugt i32 %x, %y
@@ -90,7 +92,7 @@ define i32 @ugt_i32(i32 %x, i32 %y) {
}
; CHECK-LABEL: uge_i32:
-; CHECK: i32.ge_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i32.ge_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @uge_i32(i32 %x, i32 %y) {
%a = icmp uge i32 %x, %y
diff --git a/test/CodeGen/WebAssembly/comparisons_i64.ll b/test/CodeGen/WebAssembly/comparisons_i64.ll
index 80950ae5cd9a..106520483c8f 100644
--- a/test/CodeGen/WebAssembly/comparisons_i64.ll
+++ b/test/CodeGen/WebAssembly/comparisons_i64.ll
@@ -4,12 +4,14 @@
; Test that basic 64-bit integer comparison operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: eq_i64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i64.eq $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.eq $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @eq_i64(i64 %x, i64 %y) {
%a = icmp eq i64 %x, %y
@@ -18,7 +20,7 @@ define i32 @eq_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: ne_i64:
-; CHECK: i64.ne $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.ne $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ne_i64(i64 %x, i64 %y) {
%a = icmp ne i64 %x, %y
@@ -27,7 +29,7 @@ define i32 @ne_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: slt_i64:
-; CHECK: i64.lt_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.lt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @slt_i64(i64 %x, i64 %y) {
%a = icmp slt i64 %x, %y
@@ -36,7 +38,7 @@ define i32 @slt_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: sle_i64:
-; CHECK: i64.le_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.le_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @sle_i64(i64 %x, i64 %y) {
%a = icmp sle i64 %x, %y
@@ -45,7 +47,7 @@ define i32 @sle_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: ult_i64:
-; CHECK: i64.lt_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.lt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ult_i64(i64 %x, i64 %y) {
%a = icmp ult i64 %x, %y
@@ -54,7 +56,7 @@ define i32 @ult_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: ule_i64:
-; CHECK: i64.le_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.le_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ule_i64(i64 %x, i64 %y) {
%a = icmp ule i64 %x, %y
@@ -63,7 +65,7 @@ define i32 @ule_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: sgt_i64:
-; CHECK: i64.gt_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.gt_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @sgt_i64(i64 %x, i64 %y) {
%a = icmp sgt i64 %x, %y
@@ -72,7 +74,7 @@ define i32 @sgt_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: sge_i64:
-; CHECK: i64.ge_s $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.ge_s $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @sge_i64(i64 %x, i64 %y) {
%a = icmp sge i64 %x, %y
@@ -81,7 +83,7 @@ define i32 @sge_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: ugt_i64:
-; CHECK: i64.gt_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.gt_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ugt_i64(i64 %x, i64 %y) {
%a = icmp ugt i64 %x, %y
@@ -90,7 +92,7 @@ define i32 @ugt_i64(i64 %x, i64 %y) {
}
; CHECK-LABEL: uge_i64:
-; CHECK: i64.ge_u $push[[NUM:[0-9]+]]=, $0, $1{{$}}
+; CHECK: i64.ge_u $push[[NUM:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @uge_i64(i64 %x, i64 %y) {
%a = icmp uge i64 %x, %y
diff --git a/test/CodeGen/WebAssembly/conv.ll b/test/CodeGen/WebAssembly/conv.ll
index 27cebb117dd4..913c4b0b19ea 100644
--- a/test/CodeGen/WebAssembly/conv.ll
+++ b/test/CodeGen/WebAssembly/conv.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that basic conversion operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: i32_wrap_i64:
; CHECK-NEXT: .param i64{{$}}
diff --git a/test/CodeGen/WebAssembly/copysign-casts.ll b/test/CodeGen/WebAssembly/copysign-casts.ll
index f8e50d043ca9..7cd40efafcd5 100644
--- a/test/CodeGen/WebAssembly/copysign-casts.ll
+++ b/test/CodeGen/WebAssembly/copysign-casts.ll
@@ -4,14 +4,14 @@
; unfolded.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare double @copysign(double, double) nounwind readnone
declare float @copysignf(float, float) nounwind readnone
; CHECK-LABEL: fold_promote:
-; CHECK: f64.promote/f32 $push0=, $1{{$}}
-; CHECK: f64.copysign $push1=, $0, $pop0{{$}}
+; CHECK: f64.promote/f32 $push0=, $pop{{[0-9]+}}{{$}}
+; CHECK: f64.copysign $push1=, $pop{{[0-9]+}}, $pop0{{$}}
define double @fold_promote(double %a, float %b) {
%c = fpext float %b to double
%t = call double @copysign(double %a, double %c)
@@ -19,8 +19,8 @@ define double @fold_promote(double %a, float %b) {
}
; CHECK-LABEL: fold_demote:{{$}}
-; CHECK: f32.demote/f64 $push0=, $1{{$}}
-; CHECK: f32.copysign $push1=, $0, $pop0{{$}}
+; CHECK: f32.demote/f64 $push0=, $pop{{[0-9]+}}{{$}}
+; CHECK: f32.copysign $push1=, $pop{{[0-9]+}}, $pop0{{$}}
define float @fold_demote(float %a, double %b) {
%c = fptrunc double %b to float
%t = call float @copysignf(float %a, float %c)
diff --git a/test/CodeGen/WebAssembly/cpus.ll b/test/CodeGen/WebAssembly/cpus.ll
index 78aee0f59d92..9b4ac4425ca9 100644
--- a/test/CodeGen/WebAssembly/cpus.ll
+++ b/test/CodeGen/WebAssembly/cpus.ll
@@ -1,13 +1,13 @@
; This tests that llc accepts all valid WebAssembly CPUs.
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=mvp 2>&1 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown -mcpu=mvp 2>&1 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=bleeding-edge 2>&1 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown -mcpu=bleeding-edge 2>&1 | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
-; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown-wasm -mcpu=mvp 2>&1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=mvp 2>&1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown-wasm -mcpu=generic 2>&1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=generic 2>&1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown-wasm -mcpu=bleeding-edge 2>&1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=bleeding-edge 2>&1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown-wasm -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
+; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
; CHECK-NOT: is not a recognized processor for this target
; INVALID: {{.+}} is not a recognized processor for this target
diff --git a/test/CodeGen/WebAssembly/dbgvalue.ll b/test/CodeGen/WebAssembly/dbgvalue.ll
index c6a091bc78c8..eb39c6da1c99 100644
--- a/test/CodeGen/WebAssembly/dbgvalue.ll
+++ b/test/CodeGen/WebAssembly/dbgvalue.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown | FileCheck %s
+; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
; CHECK: BB#0
; CHECK: #DEBUG_VALUE: usage:self <- %vreg4
@@ -6,7 +6,7 @@
; CHECK: DW_TAG_variable
source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
@key = external local_unnamed_addr global [15 x i8], align 1
@.str = external unnamed_addr constant [33 x i8], align 1
diff --git a/test/CodeGen/WebAssembly/dead-vreg.ll b/test/CodeGen/WebAssembly/dead-vreg.ll
index 190a08564001..06487e4cd363 100644
--- a/test/CodeGen/WebAssembly/dead-vreg.ll
+++ b/test/CodeGen/WebAssembly/dead-vreg.ll
@@ -3,7 +3,7 @@
; Check that unused vregs aren't assigned registers.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
define void @foo(i32* nocapture %a, i32 %w, i32 %h) {
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/WebAssembly/divrem-constant.ll b/test/CodeGen/WebAssembly/divrem-constant.ll
index 6150cab4d4fd..1b4d30ad9493 100644
--- a/test/CodeGen/WebAssembly/divrem-constant.ll
+++ b/test/CodeGen/WebAssembly/divrem-constant.ll
@@ -3,7 +3,7 @@
; Test that integer div and rem by constant are optimized appropriately.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: test_udiv_2:
; CHECK: i32.shr_u
diff --git a/test/CodeGen/WebAssembly/f16.ll b/test/CodeGen/WebAssembly/f16.ll
new file mode 100644
index 000000000000..6915f93e9b96
--- /dev/null
+++ b/test/CodeGen/WebAssembly/f16.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -fast-isel | FileCheck %s
+
+; Test that f16 is expanded.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: demote:
+; CHECK-NEXT: .param f32{{$}}
+; CHECK-NEXT: .result f32{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.call $push[[L1:[0-9]+]]=, __gnu_f2h_ieee@FUNCTION, $pop[[L0]]{{$}}
+; CHECK-NEXT: f32.call $push[[L2:[0-9]+]]=, __gnu_h2f_ieee@FUNCTION, $pop[[L1]]{{$}}
+; CHECK-NEXT: return $pop[[L2]]{{$}}
+define half @demote(float %f) {
+ %t = fptrunc float %f to half
+ ret half %t
+}
+
+; CHECK-LABEL: promote:
+; CHECK-NEXT: .param f32{{$}}
+; CHECK-NEXT: .result f32{{$}}
+; CHECK-NEXT: get_local $push0=, 0{{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define float @promote(half %f) {
+ %t = fpext half %f to float
+ ret float %t
+}
diff --git a/test/CodeGen/WebAssembly/f32.ll b/test/CodeGen/WebAssembly/f32.ll
index 1c1d8191a987..45f00aa5a01f 100644
--- a/test/CodeGen/WebAssembly/f32.ll
+++ b/test/CodeGen/WebAssembly/f32.ll
@@ -3,7 +3,7 @@
; Test that basic 32-bit floating-point operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare float @llvm.fabs.f32(float)
declare float @llvm.copysign.f32(float, float)
@@ -18,104 +18,106 @@ declare float @llvm.fma.f32(float, float, float)
; CHECK-LABEL: fadd32:
; CHECK-NEXT: .param f32, f32{{$}}
; CHECK-NEXT: .result f32{{$}}
-; CHECK-NEXT: f32.add $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.add $push[[LR:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fadd32(float %x, float %y) {
%a = fadd float %x, %y
ret float %a
}
; CHECK-LABEL: fsub32:
-; CHECK: f32.sub $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.sub $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fsub32(float %x, float %y) {
%a = fsub float %x, %y
ret float %a
}
; CHECK-LABEL: fmul32:
-; CHECK: f32.mul $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.mul $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fmul32(float %x, float %y) {
%a = fmul float %x, %y
ret float %a
}
; CHECK-LABEL: fdiv32:
-; CHECK: f32.div $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.div $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fdiv32(float %x, float %y) {
%a = fdiv float %x, %y
ret float %a
}
; CHECK-LABEL: fabs32:
-; CHECK: f32.abs $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.abs $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fabs32(float %x) {
%a = call float @llvm.fabs.f32(float %x)
ret float %a
}
; CHECK-LABEL: fneg32:
-; CHECK: f32.neg $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.neg $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fneg32(float %x) {
%a = fsub float -0., %x
ret float %a
}
; CHECK-LABEL: copysign32:
-; CHECK: f32.copysign $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.copysign $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @copysign32(float %x, float %y) {
%a = call float @llvm.copysign.f32(float %x, float %y)
ret float %a
}
; CHECK-LABEL: sqrt32:
-; CHECK: f32.sqrt $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.sqrt $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @sqrt32(float %x) {
%a = call float @llvm.sqrt.f32(float %x)
ret float %a
}
; CHECK-LABEL: ceil32:
-; CHECK: f32.ceil $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.ceil $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @ceil32(float %x) {
%a = call float @llvm.ceil.f32(float %x)
ret float %a
}
; CHECK-LABEL: floor32:
-; CHECK: f32.floor $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.floor $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @floor32(float %x) {
%a = call float @llvm.floor.f32(float %x)
ret float %a
}
; CHECK-LABEL: trunc32:
-; CHECK: f32.trunc $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.trunc $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @trunc32(float %x) {
%a = call float @llvm.trunc.f32(float %x)
ret float %a
}
; CHECK-LABEL: nearest32:
-; CHECK: f32.nearest $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.nearest $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @nearest32(float %x) {
%a = call float @llvm.nearbyint.f32(float %x)
ret float %a
}
; CHECK-LABEL: nearest32_via_rint:
-; CHECK: f32.nearest $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f32.nearest $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @nearest32_via_rint(float %x) {
%a = call float @llvm.rint.f32(float %x)
ret float %a
@@ -128,7 +130,7 @@ define float @nearest32_via_rint(float %x) {
; tests.
; CHECK-LABEL: fmin32:
-; CHECK: f32.min $push1=, $0, $pop0{{$}}
+; CHECK: f32.min $push1=, $pop{{[0-9]+}}, $pop[[LR]]{{$}}
; CHECK-NEXT: return $pop1{{$}}
define float @fmin32(float %x) {
%a = fcmp ult float %x, 0.0
@@ -137,7 +139,7 @@ define float @fmin32(float %x) {
}
; CHECK-LABEL: fmax32:
-; CHECK: f32.max $push1=, $0, $pop0{{$}}
+; CHECK: f32.max $push1=, $pop{{[0-9]+}}, $pop[[LR]]{{$}}
; CHECK-NEXT: return $pop1{{$}}
define float @fmax32(float %x) {
%a = fcmp ugt float %x, 0.0
@@ -146,8 +148,8 @@ define float @fmax32(float %x) {
}
; CHECK-LABEL: fma32:
-; CHECK: {{^}} f32.call $push0=, fmaf@FUNCTION, $0, $1, $2{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: {{^}} f32.call $push[[LR:[0-9]+]]=, fmaf@FUNCTION, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define float @fma32(float %a, float %b, float %c) {
%d = call float @llvm.fma.f32(float %a, float %b, float %c)
ret float %d
diff --git a/test/CodeGen/WebAssembly/f64.ll b/test/CodeGen/WebAssembly/f64.ll
index 670f3f0b6978..fb52c3f92ad6 100644
--- a/test/CodeGen/WebAssembly/f64.ll
+++ b/test/CodeGen/WebAssembly/f64.ll
@@ -3,7 +3,7 @@
; Test that basic 64-bit floating-point operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare double @llvm.fabs.f64(double)
declare double @llvm.copysign.f64(double, double)
@@ -18,104 +18,106 @@ declare double @llvm.fma.f64(double, double, double)
; CHECK-LABEL: fadd64:
; CHECK-NEXT: .param f64, f64{{$}}
; CHECK-NEXT: .result f64{{$}}
-; CHECK-NEXT: f64.add $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.add $push[[LR:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fadd64(double %x, double %y) {
%a = fadd double %x, %y
ret double %a
}
; CHECK-LABEL: fsub64:
-; CHECK: f64.sub $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.sub $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fsub64(double %x, double %y) {
%a = fsub double %x, %y
ret double %a
}
; CHECK-LABEL: fmul64:
-; CHECK: f64.mul $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.mul $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fmul64(double %x, double %y) {
%a = fmul double %x, %y
ret double %a
}
; CHECK-LABEL: fdiv64:
-; CHECK: f64.div $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.div $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fdiv64(double %x, double %y) {
%a = fdiv double %x, %y
ret double %a
}
; CHECK-LABEL: fabs64:
-; CHECK: f64.abs $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.abs $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fabs64(double %x) {
%a = call double @llvm.fabs.f64(double %x)
ret double %a
}
; CHECK-LABEL: fneg64:
-; CHECK: f64.neg $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.neg $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fneg64(double %x) {
%a = fsub double -0., %x
ret double %a
}
; CHECK-LABEL: copysign64:
-; CHECK: f64.copysign $push0=, $0, $1{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.copysign $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @copysign64(double %x, double %y) {
%a = call double @llvm.copysign.f64(double %x, double %y)
ret double %a
}
; CHECK-LABEL: sqrt64:
-; CHECK: f64.sqrt $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.sqrt $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @sqrt64(double %x) {
%a = call double @llvm.sqrt.f64(double %x)
ret double %a
}
; CHECK-LABEL: ceil64:
-; CHECK: f64.ceil $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.ceil $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @ceil64(double %x) {
%a = call double @llvm.ceil.f64(double %x)
ret double %a
}
; CHECK-LABEL: floor64:
-; CHECK: f64.floor $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.floor $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @floor64(double %x) {
%a = call double @llvm.floor.f64(double %x)
ret double %a
}
; CHECK-LABEL: trunc64:
-; CHECK: f64.trunc $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.trunc $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @trunc64(double %x) {
%a = call double @llvm.trunc.f64(double %x)
ret double %a
}
; CHECK-LABEL: nearest64:
-; CHECK: f64.nearest $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.nearest $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @nearest64(double %x) {
%a = call double @llvm.nearbyint.f64(double %x)
ret double %a
}
; CHECK-LABEL: nearest64_via_rint:
-; CHECK: f64.nearest $push0=, $0{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: f64.nearest $push[[LR:[0-9]+]]=, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @nearest64_via_rint(double %x) {
%a = call double @llvm.rint.f64(double %x)
ret double %a
@@ -128,7 +130,7 @@ define double @nearest64_via_rint(double %x) {
; tests.
; CHECK-LABEL: fmin64:
-; CHECK: f64.min $push1=, $0, $pop0{{$}}
+; CHECK: f64.min $push1=, $pop{{[0-9]+}}, $pop[[LR]]{{$}}
; CHECK-NEXT: return $pop1{{$}}
define double @fmin64(double %x) {
%a = fcmp ult double %x, 0.0
@@ -137,7 +139,7 @@ define double @fmin64(double %x) {
}
; CHECK-LABEL: fmax64:
-; CHECK: f64.max $push1=, $0, $pop0{{$}}
+; CHECK: f64.max $push1=, $pop{{[0-9]+}}, $pop[[LR]]{{$}}
; CHECK-NEXT: return $pop1{{$}}
define double @fmax64(double %x) {
%a = fcmp ugt double %x, 0.0
@@ -146,8 +148,8 @@ define double @fmax64(double %x) {
}
; CHECK-LABEL: fma64:
-; CHECK: {{^}} f64.call $push0=, fma@FUNCTION, $0, $1, $2{{$}}
-; CHECK-NEXT: return $pop0{{$}}
+; CHECK: {{^}} f64.call $push[[LR:[0-9]+]]=, fma@FUNCTION, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
+; CHECK-NEXT: return $pop[[LR]]{{$}}
define double @fma64(double %a, double %b, double %c) {
%d = call double @llvm.fma.f64(double %a, double %b, double %c)
ret double %d
diff --git a/test/CodeGen/WebAssembly/fast-isel-noreg.ll b/test/CodeGen/WebAssembly/fast-isel-noreg.ll
index a2504822dd1c..229651d093f0 100644
--- a/test/CodeGen/WebAssembly/fast-isel-noreg.ll
+++ b/test/CodeGen/WebAssembly/fast-isel-noreg.ll
@@ -4,7 +4,7 @@
; Test that FastISel does not generate instructions with NoReg
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK: i32.const $push0=, 0
define hidden i32 @a() #0 {
diff --git a/test/CodeGen/WebAssembly/fast-isel.ll b/test/CodeGen/WebAssembly/fast-isel.ll
index 953bd610b1bc..457c5874e493 100644
--- a/test/CodeGen/WebAssembly/fast-isel.ll
+++ b/test/CodeGen/WebAssembly/fast-isel.ll
@@ -1,9 +1,10 @@
; RUN: llc < %s -asm-verbose=false \
; RUN: -fast-isel -fast-isel-abort=1 -verify-machineinstrs \
+; RUN: -disable-wasm-explicit-locals \
; RUN: | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; This tests very minimal fast-isel functionality.
diff --git a/test/CodeGen/WebAssembly/frem.ll b/test/CodeGen/WebAssembly/frem.ll
index b8745224ab82..1a9c13417b67 100644
--- a/test/CodeGen/WebAssembly/frem.ll
+++ b/test/CodeGen/WebAssembly/frem.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that the frem instruction works.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: frem32:
; CHECK-NEXT: .param f32, f32{{$}}
diff --git a/test/CodeGen/WebAssembly/func.ll b/test/CodeGen/WebAssembly/func.ll
index 71c00a46de86..994ef62bf54d 100644
--- a/test/CodeGen/WebAssembly/func.ll
+++ b/test/CodeGen/WebAssembly/func.ll
@@ -1,13 +1,13 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that basic functions assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: f0:
; CHECK: return{{$}}
-; CHECK: .endfunc{{$}}
+; CHECK: end_function{{$}}
; CHECK: .size f0,
define void @f0() {
ret void
diff --git a/test/CodeGen/WebAssembly/function-bitcasts.ll b/test/CodeGen/WebAssembly/function-bitcasts.ll
index e4f8f3fb6ca9..3f20aef08115 100644
--- a/test/CodeGen/WebAssembly/function-bitcasts.ll
+++ b/test/CodeGen/WebAssembly/function-bitcasts.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s
; Test that function pointer casts are replaced with wrappers.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: test:
; CHECK-NEXT: call .Lbitcast@FUNCTION{{$}}
@@ -20,31 +20,39 @@ target triple = "wasm32-unknown-unknown"
; CHECK-NEXT: call foo2@FUNCTION{{$}}
; CHECK-NEXT: call foo1@FUNCTION{{$}}
; CHECK-NEXT: call foo3@FUNCTION{{$}}
-; CHECK-NEXT: .endfunc
+; CHECK-NEXT: end_function
+
+; CHECK-LABEL: test_varargs:
+; CHECK: set_global
+; CHECK: i32.const $push[[L3:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: call vararg@FUNCTION, $pop[[L3]]{{$}}
+; CHECK-NEXT: i32.const $push[[L4:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.store 0($[[L5:[0-9]+]]), $pop[[L4]]{{$}}
+; CHECK-NEXT: call plain@FUNCTION, $[[L5]]{{$}}
; CHECK-LABEL: .Lbitcast:
-; CHECK-NEXT: .local i32
; CHECK-NEXT: call has_i32_arg@FUNCTION, $0{{$}}
-; CHECK-NEXT: .endfunc
+; CHECK-NEXT: end_function
; CHECK-LABEL: .Lbitcast.1:
; CHECK-NEXT: call $drop=, has_i32_ret@FUNCTION{{$}}
-; CHECK-NEXT: .endfunc
+; CHECK-NEXT: end_function
; CHECK-LABEL: .Lbitcast.2:
; CHECK-NEXT: .param i32
; CHECK-NEXT: call foo0@FUNCTION{{$}}
-; CHECK-NEXT: .endfunc
+; CHECK-NEXT: end_function
; CHECK-LABEL: .Lbitcast.3:
; CHECK-NEXT: .result i32
-; CHECK-NEXT: .local i32
; CHECK-NEXT: call foo1@FUNCTION{{$}}
; CHECK-NEXT: copy_local $push0=, $0
-; CHECK-NEXT: .endfunc
+; CHECK-NEXT: end_function
declare void @has_i32_arg(i32)
declare i32 @has_i32_ret()
+declare void @vararg(...)
+declare void @plain(i32)
declare void @foo0()
declare void @foo1()
@@ -70,3 +78,9 @@ entry:
ret void
}
+
+define void @test_varargs() {
+ call void bitcast (void (...)* @vararg to void (i32)*)(i32 0)
+ call void (...) bitcast (void (i32)* @plain to void (...)*)(i32 0)
+ ret void
+}
diff --git a/test/CodeGen/WebAssembly/global.ll b/test/CodeGen/WebAssembly/global.ll
index 1d24035d8dd4..599eb53b431b 100644
--- a/test/CodeGen/WebAssembly/global.ll
+++ b/test/CodeGen/WebAssembly/global.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that globals assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-NOT: llvm.used
; CHECK-NOT: llvm.metadata
@@ -42,15 +42,21 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
@ud = internal global i32 undef
; CHECK: .type nil,@object
-; CHECK-NEXT: .lcomm nil,4,2{{$}}
+; CHECK: .p2align 2
+; CHECK: nil:
+; CHECK: .int32 0
+; CHECK: .size nil, 4
@nil = internal global i32 zeroinitializer
; CHECK: .type z,@object
-; CHECK-NEXT: .lcomm z,4,2{{$}}
+; CHECK: .p2align 2
+; CHECK: z:
+; CHECK: .int32 0
+; CHECK: .size z, 4
@z = internal global i32 0
-; CHECK-NEXT: .type one,@object
-; CHECK-NEXT: .p2align 2{{$}}
+; CHECK: .type one,@object
+; CHECK: .p2align 2{{$}}
; CHECK-NEXT: one:
; CHECK-NEXT: .int32 1{{$}}
; CHECK-NEXT: .size one, 4{{$}}
@@ -78,11 +84,17 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
@ud64 = internal global i64 undef
; CHECK: .type nil64,@object
-; CHECK: .lcomm nil64,8,3{{$}}
+; CHECK: .p2align 3{{$}}
+; CHECK-NEXT: nil64:
+; CHECK-NEXT: .int64 0{{$}}
+; CHECK-NEXT: .size nil64, 8{{$}}
@nil64 = internal global i64 zeroinitializer
; CHECK: .type z64,@object
-; CHECK: .lcomm z64,8,3{{$}}
+; CHECK: .p2align 3{{$}}
+; CHECK-NEXT: z64:
+; CHECK-NEXT: .int64 0{{$}}
+; CHECK-NEXT: .size z64, 8{{$}}
@z64 = internal global i64 0
; CHECK: .type twoP32,@object
@@ -107,11 +119,17 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
@f32ud = internal global float undef
; CHECK: .type f32nil,@object
-; CHECK: .lcomm f32nil,4,2{{$}}
+; CHECK: .p2align 2{{$}}
+; CHECK-NEXT: f32nil:
+; CHECK-NEXT: .int32 0{{$}}
+; CHECK-NEXT: .size f32nil, 4{{$}}
@f32nil = internal global float zeroinitializer
; CHECK: .type f32z,@object
-; CHECK: .lcomm f32z,4,2{{$}}
+; CHECK: .p2align 2{{$}}
+; CHECK-NEXT: f32z:
+; CHECK-NEXT: .int32 0{{$}}
+; CHECK-NEXT: .size f32z, 4{{$}}
@f32z = internal global float 0.0
; CHECK: .type f32nz,@object
@@ -136,11 +154,17 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
@f64ud = internal global double undef
; CHECK: .type f64nil,@object
-; CHECK: .lcomm f64nil,8,3{{$}}
+; CHECK: .p2align 3{{$}}
+; CHECK-NEXT: f64nil:
+; CHECK-NEXT: .int64 0{{$}}
+; CHECK-NEXT: .size f64nil, 8{{$}}
@f64nil = internal global double zeroinitializer
; CHECK: .type f64z,@object
-; CHECK: .lcomm f64z,8,3{{$}}
+; CHECK: .p2align 3{{$}}
+; CHECK-NEXT: f64z:
+; CHECK-NEXT: .int64 0{{$}}
+; CHECK-NEXT: .size f64z, 8{{$}}
@f64z = internal global double 0.0
; CHECK: .type f64nz,@object
@@ -168,7 +192,7 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
; Constant global.
; CHECK: .type rom,@object{{$}}
-; CHECK: .section .rodata,"a",@progbits{{$}}
+; CHECK: .section .rodata.rom,
; CHECK: .globl rom{{$}}
; CHECK: .p2align 4{{$}}
; CHECK: rom:
@@ -177,11 +201,11 @@ define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) {
@rom = constant [128 x i32] zeroinitializer, align 16
; CHECK: .type array,@object
-; CHECK-NEXT: array:
+; CHECK: array:
; CHECK-NEXT: .skip 8
; CHECK-NEXT: .size array, 8
; CHECK: .type pointer_to_array,@object
-; CHECK-NEXT: .section .data.rel.ro,"aw",@progbits
+; CHECK-NEXT: .section .data.rel.ro.pointer_to_array,
; CHECK-NEXT: .globl pointer_to_array
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: pointer_to_array:
diff --git a/test/CodeGen/WebAssembly/globl.ll b/test/CodeGen/WebAssembly/globl.ll
index 3ebd3d88fb4e..ba9f6659d7d7 100644
--- a/test/CodeGen/WebAssembly/globl.ll
+++ b/test/CodeGen/WebAssembly/globl.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -asm-verbose=false | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK: .globl foo
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/WebAssembly/i128.ll b/test/CodeGen/WebAssembly/i128.ll
index 29bf787863d5..2e44af9c5184 100644
--- a/test/CodeGen/WebAssembly/i128.ll
+++ b/test/CodeGen/WebAssembly/i128.ll
@@ -3,7 +3,7 @@
; Test that basic 128-bit integer operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare i128 @llvm.ctlz.i128(i128, i1)
declare i128 @llvm.cttz.i128(i128, i1)
diff --git a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
index fb7deecff33a..661d1b7bfc3e 100644
--- a/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test loads and stores with custom alignment values.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: ldi32_a1:
; CHECK-NEXT: .param i32{{$}}
diff --git a/test/CodeGen/WebAssembly/i32.ll b/test/CodeGen/WebAssembly/i32.ll
index a07dd02beced..e451695d8903 100644
--- a/test/CodeGen/WebAssembly/i32.ll
+++ b/test/CodeGen/WebAssembly/i32.ll
@@ -3,7 +3,7 @@
; Test that basic 32-bit integer operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare i32 @llvm.ctlz.i32(i32, i1)
declare i32 @llvm.cttz.i32(i32, i1)
@@ -12,7 +12,9 @@ declare i32 @llvm.ctpop.i32(i32)
; CHECK-LABEL: add32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.add $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.add $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @add32(i32 %x, i32 %y) {
%a = add i32 %x, %y
@@ -22,7 +24,9 @@ define i32 @add32(i32 %x, i32 %y) {
; CHECK-LABEL: sub32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.sub $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.sub $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @sub32(i32 %x, i32 %y) {
%a = sub i32 %x, %y
@@ -32,7 +36,9 @@ define i32 @sub32(i32 %x, i32 %y) {
; CHECK-LABEL: mul32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.mul $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.mul $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @mul32(i32 %x, i32 %y) {
%a = mul i32 %x, %y
@@ -42,7 +48,9 @@ define i32 @mul32(i32 %x, i32 %y) {
; CHECK-LABEL: sdiv32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.div_s $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.div_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @sdiv32(i32 %x, i32 %y) {
%a = sdiv i32 %x, %y
@@ -52,7 +60,9 @@ define i32 @sdiv32(i32 %x, i32 %y) {
; CHECK-LABEL: udiv32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.div_u $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.div_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @udiv32(i32 %x, i32 %y) {
%a = udiv i32 %x, %y
@@ -62,7 +72,9 @@ define i32 @udiv32(i32 %x, i32 %y) {
; CHECK-LABEL: srem32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.rem_s $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.rem_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @srem32(i32 %x, i32 %y) {
%a = srem i32 %x, %y
@@ -72,7 +84,9 @@ define i32 @srem32(i32 %x, i32 %y) {
; CHECK-LABEL: urem32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.rem_u $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.rem_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @urem32(i32 %x, i32 %y) {
%a = urem i32 %x, %y
@@ -82,7 +96,9 @@ define i32 @urem32(i32 %x, i32 %y) {
; CHECK-LABEL: and32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.and $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.and $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @and32(i32 %x, i32 %y) {
%a = and i32 %x, %y
@@ -92,7 +108,9 @@ define i32 @and32(i32 %x, i32 %y) {
; CHECK-LABEL: or32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.or $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.or $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @or32(i32 %x, i32 %y) {
%a = or i32 %x, %y
@@ -102,7 +120,9 @@ define i32 @or32(i32 %x, i32 %y) {
; CHECK-LABEL: xor32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.xor $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.xor $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @xor32(i32 %x, i32 %y) {
%a = xor i32 %x, %y
@@ -112,7 +132,9 @@ define i32 @xor32(i32 %x, i32 %y) {
; CHECK-LABEL: shl32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.shl $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.shl $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @shl32(i32 %x, i32 %y) {
%a = shl i32 %x, %y
@@ -122,7 +144,9 @@ define i32 @shl32(i32 %x, i32 %y) {
; CHECK-LABEL: shr32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.shr_u $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.shr_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @shr32(i32 %x, i32 %y) {
%a = lshr i32 %x, %y
@@ -132,7 +156,9 @@ define i32 @shr32(i32 %x, i32 %y) {
; CHECK-LABEL: sar32:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.shr_s $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.shr_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @sar32(i32 %x, i32 %y) {
%a = ashr i32 %x, %y
@@ -142,7 +168,8 @@ define i32 @sar32(i32 %x, i32 %y) {
; CHECK-LABEL: clz32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.clz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.clz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @clz32(i32 %x) {
%a = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
@@ -152,7 +179,8 @@ define i32 @clz32(i32 %x) {
; CHECK-LABEL: clz32_zero_undef:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.clz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.clz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @clz32_zero_undef(i32 %x) {
%a = call i32 @llvm.ctlz.i32(i32 %x, i1 true)
@@ -162,7 +190,8 @@ define i32 @clz32_zero_undef(i32 %x) {
; CHECK-LABEL: ctz32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.ctz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.ctz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @ctz32(i32 %x) {
%a = call i32 @llvm.cttz.i32(i32 %x, i1 false)
@@ -172,7 +201,8 @@ define i32 @ctz32(i32 %x) {
; CHECK-LABEL: ctz32_zero_undef:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.ctz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.ctz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @ctz32_zero_undef(i32 %x) {
%a = call i32 @llvm.cttz.i32(i32 %x, i1 true)
@@ -182,7 +212,8 @@ define i32 @ctz32_zero_undef(i32 %x) {
; CHECK-LABEL: popcnt32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.popcnt $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.popcnt $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @popcnt32(i32 %x) {
%a = call i32 @llvm.ctpop.i32(i32 %x)
@@ -192,7 +223,8 @@ define i32 @popcnt32(i32 %x) {
; CHECK-LABEL: eqz32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.eqz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.eqz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @eqz32(i32 %x) {
%a = icmp eq i32 %x, 0
@@ -203,7 +235,9 @@ define i32 @eqz32(i32 %x) {
; CHECK-LABEL: rotl:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.rotl $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.rotl $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i32 @rotl(i32 %x, i32 %y) {
%z = sub i32 32, %y
@@ -216,7 +250,9 @@ define i32 @rotl(i32 %x, i32 %y) {
; CHECK-LABEL: masked_rotl:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.rotl $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.rotl $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i32 @masked_rotl(i32 %x, i32 %y) {
%a = and i32 %y, 31
@@ -230,7 +266,9 @@ define i32 @masked_rotl(i32 %x, i32 %y) {
; CHECK-LABEL: rotr:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.rotr $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.rotr $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i32 @rotr(i32 %x, i32 %y) {
%z = sub i32 32, %y
@@ -243,7 +281,9 @@ define i32 @rotr(i32 %x, i32 %y) {
; CHECK-LABEL: masked_rotr:
; CHECK-NEXT: .param i32, i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.rotr $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.rotr $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i32 @masked_rotr(i32 %x, i32 %y) {
%a = and i32 %y, 31
diff --git a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
index a3901dfc079a..1ccb74cb9d28 100644
--- a/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
+++ b/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test loads and stores with custom alignment values.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: ldi64_a1:
; CHECK-NEXT: .param i32{{$}}
diff --git a/test/CodeGen/WebAssembly/i64.ll b/test/CodeGen/WebAssembly/i64.ll
index 93e32bfc0e1d..4386bed4ebf7 100644
--- a/test/CodeGen/WebAssembly/i64.ll
+++ b/test/CodeGen/WebAssembly/i64.ll
@@ -3,7 +3,7 @@
; Test that basic 64-bit integer operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare i64 @llvm.ctlz.i64(i64, i1)
declare i64 @llvm.cttz.i64(i64, i1)
@@ -12,7 +12,9 @@ declare i64 @llvm.ctpop.i64(i64)
; CHECK-LABEL: add64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.add $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.add $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @add64(i64 %x, i64 %y) {
%a = add i64 %x, %y
@@ -22,7 +24,9 @@ define i64 @add64(i64 %x, i64 %y) {
; CHECK-LABEL: sub64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.sub $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.sub $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @sub64(i64 %x, i64 %y) {
%a = sub i64 %x, %y
@@ -32,7 +36,9 @@ define i64 @sub64(i64 %x, i64 %y) {
; CHECK-LABEL: mul64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.mul $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.mul $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @mul64(i64 %x, i64 %y) {
%a = mul i64 %x, %y
@@ -42,7 +48,9 @@ define i64 @mul64(i64 %x, i64 %y) {
; CHECK-LABEL: sdiv64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.div_s $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.div_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @sdiv64(i64 %x, i64 %y) {
%a = sdiv i64 %x, %y
@@ -52,7 +60,9 @@ define i64 @sdiv64(i64 %x, i64 %y) {
; CHECK-LABEL: udiv64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.div_u $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.div_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @udiv64(i64 %x, i64 %y) {
%a = udiv i64 %x, %y
@@ -62,7 +72,9 @@ define i64 @udiv64(i64 %x, i64 %y) {
; CHECK-LABEL: srem64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.rem_s $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.rem_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @srem64(i64 %x, i64 %y) {
%a = srem i64 %x, %y
@@ -72,7 +84,9 @@ define i64 @srem64(i64 %x, i64 %y) {
; CHECK-LABEL: urem64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.rem_u $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.rem_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @urem64(i64 %x, i64 %y) {
%a = urem i64 %x, %y
@@ -82,7 +96,9 @@ define i64 @urem64(i64 %x, i64 %y) {
; CHECK-LABEL: and64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.and $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.and $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @and64(i64 %x, i64 %y) {
%a = and i64 %x, %y
@@ -92,7 +108,9 @@ define i64 @and64(i64 %x, i64 %y) {
; CHECK-LABEL: or64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.or $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.or $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @or64(i64 %x, i64 %y) {
%a = or i64 %x, %y
@@ -102,7 +120,9 @@ define i64 @or64(i64 %x, i64 %y) {
; CHECK-LABEL: xor64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.xor $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.xor $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @xor64(i64 %x, i64 %y) {
%a = xor i64 %x, %y
@@ -112,7 +132,9 @@ define i64 @xor64(i64 %x, i64 %y) {
; CHECK-LABEL: shl64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.shl $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.shl $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @shl64(i64 %x, i64 %y) {
%a = shl i64 %x, %y
@@ -122,7 +144,9 @@ define i64 @shl64(i64 %x, i64 %y) {
; CHECK-LABEL: shr64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.shr_u $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.shr_u $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @shr64(i64 %x, i64 %y) {
%a = lshr i64 %x, %y
@@ -132,7 +156,9 @@ define i64 @shr64(i64 %x, i64 %y) {
; CHECK-LABEL: sar64:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.shr_s $push0=, $0, $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.shr_s $push0=, $pop[[L0]], $pop[[L1]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @sar64(i64 %x, i64 %y) {
%a = ashr i64 %x, %y
@@ -142,7 +168,8 @@ define i64 @sar64(i64 %x, i64 %y) {
; CHECK-LABEL: clz64:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.clz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.clz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @clz64(i64 %x) {
%a = call i64 @llvm.ctlz.i64(i64 %x, i1 false)
@@ -152,7 +179,8 @@ define i64 @clz64(i64 %x) {
; CHECK-LABEL: clz64_zero_undef:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.clz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.clz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @clz64_zero_undef(i64 %x) {
%a = call i64 @llvm.ctlz.i64(i64 %x, i1 true)
@@ -162,7 +190,8 @@ define i64 @clz64_zero_undef(i64 %x) {
; CHECK-LABEL: ctz64:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.ctz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.ctz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @ctz64(i64 %x) {
%a = call i64 @llvm.cttz.i64(i64 %x, i1 false)
@@ -172,7 +201,8 @@ define i64 @ctz64(i64 %x) {
; CHECK-LABEL: ctz64_zero_undef:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.ctz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.ctz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @ctz64_zero_undef(i64 %x) {
%a = call i64 @llvm.cttz.i64(i64 %x, i1 true)
@@ -182,7 +212,8 @@ define i64 @ctz64_zero_undef(i64 %x) {
; CHECK-LABEL: popcnt64:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.popcnt $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.popcnt $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @popcnt64(i64 %x) {
%a = call i64 @llvm.ctpop.i64(i64 %x)
@@ -192,7 +223,8 @@ define i64 @popcnt64(i64 %x) {
; CHECK-LABEL: eqz64:
; CHECK-NEXT: .param i64{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i64.eqz $push0=, $0{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.eqz $push0=, $pop[[L0]]{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @eqz64(i64 %x) {
%a = icmp eq i64 %x, 0
@@ -203,7 +235,9 @@ define i32 @eqz64(i64 %x) {
; CHECK-LABEL: rotl:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.rotl $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.rotl $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i64 @rotl(i64 %x, i64 %y) {
%z = sub i64 64, %y
@@ -216,7 +250,9 @@ define i64 @rotl(i64 %x, i64 %y) {
; CHECK-LABEL: masked_rotl:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.rotl $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.rotl $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i64 @masked_rotl(i64 %x, i64 %y) {
%a = and i64 %y, 63
@@ -230,7 +266,9 @@ define i64 @masked_rotl(i64 %x, i64 %y) {
; CHECK-LABEL: rotr:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.rotr $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.rotr $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i64 @rotr(i64 %x, i64 %y) {
%z = sub i64 64, %y
@@ -243,7 +281,9 @@ define i64 @rotr(i64 %x, i64 %y) {
; CHECK-LABEL: masked_rotr:
; CHECK-NEXT: .param i64, i64{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.rotr $push0=, $0, $1
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.rotr $push0=, $pop[[L0]], $pop[[L1]]
; CHECK-NEXT: return $pop0{{$}}
define i64 @masked_rotr(i64 %x, i64 %y) {
%a = and i64 %y, 63
diff --git a/test/CodeGen/WebAssembly/ident.ll b/test/CodeGen/WebAssembly/ident.ll
index 49c188ec2578..e5d85d090f11 100644
--- a/test/CodeGen/WebAssembly/ident.ll
+++ b/test/CodeGen/WebAssembly/ident.ll
@@ -3,7 +3,7 @@
; Test llvm.ident.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK: .ident "hello world"
diff --git a/test/CodeGen/WebAssembly/immediates.ll b/test/CodeGen/WebAssembly/immediates.ll
index 3d11f9410a79..1182423a594e 100644
--- a/test/CodeGen/WebAssembly/immediates.ll
+++ b/test/CodeGen/WebAssembly/immediates.ll
@@ -3,7 +3,7 @@
; Test that basic immediates assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: zero_i32:
; CHECK-NEXT: .result i32{{$}}
diff --git a/test/CodeGen/WebAssembly/implicit-def.ll b/test/CodeGen/WebAssembly/implicit-def.ll
index 01ee171b449b..1f9f74887e8a 100644
--- a/test/CodeGen/WebAssembly/implicit-def.ll
+++ b/test/CodeGen/WebAssembly/implicit-def.ll
@@ -1,6 +1,6 @@
; RUN: llc -o - %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; Test that stackified IMPLICIT_DEF instructions are converted into
; CONST_I32 to provide an explicit push.
diff --git a/test/CodeGen/WebAssembly/inline-asm.ll b/test/CodeGen/WebAssembly/inline-asm.ll
index 9b72eb65e0d5..56576305d9e2 100644
--- a/test/CodeGen/WebAssembly/inline-asm.ll
+++ b/test/CodeGen/WebAssembly/inline-asm.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -no-integrated-as | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -no-integrated-as | FileCheck %s
; Test basic inline assembly. Pass -no-integrated-as since these aren't
; actually valid assembly syntax.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: foo:
; CHECK-NEXT: .param i32{{$}}
@@ -33,7 +33,6 @@ entry:
; CHECK-LABEL: imm:
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: .local i32{{$}}
; CHECK-NEXT: #APP{{$}}
; CHECK-NEXT: # $0 = ccc(42){{$}}
; CHECK-NEXT: #NO_APP{{$}}
diff --git a/test/CodeGen/WebAssembly/irreducible-cfg.ll b/test/CodeGen/WebAssembly/irreducible-cfg.ll
index 8fe7d10c5f31..dd47b5827d5b 100644
--- a/test/CodeGen/WebAssembly/irreducible-cfg.ll
+++ b/test/CodeGen/WebAssembly/irreducible-cfg.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-block-placement | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-block-placement -disable-wasm-explicit-locals | FileCheck %s
; Test irreducible CFG handling.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; A simple loop with two entries.
diff --git a/test/CodeGen/WebAssembly/legalize.ll b/test/CodeGen/WebAssembly/legalize.ll
index 5cbfb8ace9ed..978e72b5b85b 100644
--- a/test/CodeGen/WebAssembly/legalize.ll
+++ b/test/CodeGen/WebAssembly/legalize.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test various types and operators that need to be legalized.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: shl_i3:
; CHECK: i32.const $push0=, 7{{$}}
diff --git a/test/CodeGen/WebAssembly/load-ext.ll b/test/CodeGen/WebAssembly/load-ext.ll
index 48a7ce7c4bd2..a624995ea625 100644
--- a/test/CodeGen/WebAssembly/load-ext.ll
+++ b/test/CodeGen/WebAssembly/load-ext.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that extending loads are assembled properly.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: sext_i8_i32:
; CHECK: i32.load8_s $push0=, 0($0){{$}}
diff --git a/test/CodeGen/WebAssembly/load-store-i1.ll b/test/CodeGen/WebAssembly/load-store-i1.ll
index ea0ec717c7a0..9882609d773b 100644
--- a/test/CodeGen/WebAssembly/load-store-i1.ll
+++ b/test/CodeGen/WebAssembly/load-store-i1.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that i1 extending loads and truncating stores are assembled properly.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: load_u_i1_i32:
; CHECK: i32.load8_u $push[[NUM0:[0-9]+]]=, 0($0){{$}}
diff --git a/test/CodeGen/WebAssembly/load.ll b/test/CodeGen/WebAssembly/load.ll
index a8e174e914e1..165d145fde1a 100644
--- a/test/CodeGen/WebAssembly/load.ll
+++ b/test/CodeGen/WebAssembly/load.ll
@@ -4,12 +4,13 @@
; Test that basic loads are assembled properly.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: ldi32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i32 @ldi32(i32 *%p) {
%v = load i32, i32* %p
@@ -19,7 +20,8 @@ define i32 @ldi32(i32 *%p) {
; CHECK-LABEL: ldi64:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i64{{$}}
-; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define i64 @ldi64(i64 *%p) {
%v = load i64, i64* %p
@@ -29,7 +31,8 @@ define i64 @ldi64(i64 *%p) {
; CHECK-LABEL: ldf32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result f32{{$}}
-; CHECK-NEXT: f32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define float @ldf32(float *%p) {
%v = load float, float* %p
@@ -39,7 +42,8 @@ define float @ldf32(float *%p) {
; CHECK-LABEL: ldf64:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result f64{{$}}
-; CHECK-NEXT: f64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: f64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
; CHECK-NEXT: return $pop[[NUM]]{{$}}
define double @ldf64(double *%p) {
%v = load double, double* %p
diff --git a/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll b/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
index 8283b49cd584..91fde29ea59e 100644
--- a/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
+++ b/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s | FileCheck %s --check-prefix=NONE
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
%struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
diff --git a/test/CodeGen/WebAssembly/lower-em-exceptions-whitelist.ll b/test/CodeGen/WebAssembly/lower-em-exceptions-whitelist.ll
index 5fcc39909b05..3864e445f639 100644
--- a/test/CodeGen/WebAssembly/lower-em-exceptions-whitelist.ll
+++ b/test/CodeGen/WebAssembly/lower-em-exceptions-whitelist.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -wasm-lower-em-ehsjlj -emscripten-cxx-exceptions-whitelist=do_catch -S | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
define void @dont_catch() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
; CHECK-LABEL: @dont_catch(
diff --git a/test/CodeGen/WebAssembly/lower-em-exceptions.ll b/test/CodeGen/WebAssembly/lower-em-exceptions.ll
index 60953cdb6efe..060f481c3265 100644
--- a/test/CodeGen/WebAssembly/lower-em-exceptions.ll
+++ b/test/CodeGen/WebAssembly/lower-em-exceptions.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -wasm-lower-em-ehsjlj -S | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
@_ZTIi = external constant i8*
@_ZTIc = external constant i8*
diff --git a/test/CodeGen/WebAssembly/lower-em-sjlj.ll b/test/CodeGen/WebAssembly/lower-em-sjlj.ll
index 40b9d62a0360..cf42219c0114 100644
--- a/test/CodeGen/WebAssembly/lower-em-sjlj.ll
+++ b/test/CodeGen/WebAssembly/lower-em-sjlj.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -wasm-lower-em-ehsjlj -S | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
%struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
diff --git a/test/CodeGen/WebAssembly/mem-intrinsics.ll b/test/CodeGen/WebAssembly/mem-intrinsics.ll
index 0ac1e1e182cd..32a7117a1ea6 100644
--- a/test/CodeGen/WebAssembly/mem-intrinsics.ll
+++ b/test/CodeGen/WebAssembly/mem-intrinsics.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -tail-dup-placement=0| FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -tail-dup-placement=0 | FileCheck %s
; Test memcpy, memmove, and memset intrinsics.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
diff --git a/test/CodeGen/WebAssembly/memory-addr32.ll b/test/CodeGen/WebAssembly/memory-addr32.ll
index 583201b15f99..ad599b1b3f17 100644
--- a/test/CodeGen/WebAssembly/memory-addr32.ll
+++ b/test/CodeGen/WebAssembly/memory-addr32.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that basic memory operations assemble as expected with 32-bit addresses.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare i32 @llvm.wasm.current.memory.i32() nounwind readonly
-declare void @llvm.wasm.grow.memory.i32(i32) nounwind
+declare i32 @llvm.wasm.grow.memory.i32(i32) nounwind
; CHECK-LABEL: current_memory:
; CHECK-NEXT: .result i32{{$}}
@@ -19,9 +19,10 @@ define i32 @current_memory() {
; CHECK-LABEL: grow_memory:
; CHECK-NEXT: .param i32{{$}}
-; CHECK: grow_memory $0{{$}}
-; CHECK-NEXT: return{{$}}
-define void @grow_memory(i32 %n) {
- call void @llvm.wasm.grow.memory.i32(i32 %n)
- ret void
+; CHECK-NEXT: .result i32{{$}}
+; CHECK: grow_memory $push0=, $0{{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @grow_memory(i32 %n) {
+ %a = call i32 @llvm.wasm.grow.memory.i32(i32 %n)
+ ret i32 %a
}
diff --git a/test/CodeGen/WebAssembly/non-executable-stack.ll b/test/CodeGen/WebAssembly/non-executable-stack.ll
index b81063724e9c..f1e1ba36a790 100644
--- a/test/CodeGen/WebAssembly/non-executable-stack.ll
+++ b/test/CodeGen/WebAssembly/non-executable-stack.ll
@@ -4,6 +4,6 @@
; because wasm's stack is always non-executable.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-NOT: .note.GNU-stack
diff --git a/test/CodeGen/WebAssembly/offset-folding.ll b/test/CodeGen/WebAssembly/offset-folding.ll
index 863549fc20fc..e8e98ecc3307 100644
--- a/test/CodeGen/WebAssembly/offset-folding.ll
+++ b/test/CodeGen/WebAssembly/offset-folding.ll
@@ -3,7 +3,7 @@
; Test that constant offsets can be folded into global addresses.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
@x = external global [0 x i32]
@y = global [50 x i32] zeroinitializer
diff --git a/test/CodeGen/WebAssembly/offset.ll b/test/CodeGen/WebAssembly/offset.ll
index 37f08abc9fa8..27c71873302a 100644
--- a/test/CodeGen/WebAssembly/offset.ll
+++ b/test/CodeGen/WebAssembly/offset.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s
; Test constant load and store address offsets.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; With an nuw add, we can fold an offset.
diff --git a/test/CodeGen/WebAssembly/phi.ll b/test/CodeGen/WebAssembly/phi.ll
index 747ae5cb15d4..4aae92df54d9 100644
--- a/test/CodeGen/WebAssembly/phi.ll
+++ b/test/CodeGen/WebAssembly/phi.ll
@@ -1,15 +1,16 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -verify-machineinstrs | FileCheck %s
; Test that phis are lowered.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; Basic phi triangle.
; CHECK-LABEL: test0:
-; CHECK: div_s $[[NUM0:[0-9]+]]=, $0, $pop[[NUM1:[0-9]+]]{{$}}
-; CHECK: return $[[NUM0]]{{$}}
+; CHECK: return $0
+; CHECK: div_s $push[[NUM0:[0-9]+]]=, $0, $pop[[NUM1:[0-9]+]]{{$}}
+; CHECK: return $pop[[NUM0]]{{$}}
define i32 @test0(i32 %p) {
entry:
%t = icmp slt i32 %p, 0
diff --git a/test/CodeGen/WebAssembly/reg-stackify.ll b/test/CodeGen/WebAssembly/reg-stackify.ll
index 00469132c953..d1423b5db395 100644
--- a/test/CodeGen/WebAssembly/reg-stackify.ll
+++ b/test/CodeGen/WebAssembly/reg-stackify.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -verify-machineinstrs | FileCheck %s
; Test the register stackifier pass.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; No because of pointer aliasing.
@@ -126,7 +126,6 @@ false:
; CHECK-LABEL: multiple_uses:
; CHECK: .param i32, i32, i32{{$}}
-; CHECK-NEXT: .local i32{{$}}
; CHECK-NEXT: block {{$}}
; CHECK-NEXT: i32.load $push[[NUM0:[0-9]+]]=, 0($2){{$}}
; CHECK-NEXT: tee_local $push[[NUM1:[0-9]+]]=, $3=, $pop[[NUM0]]{{$}}
@@ -449,8 +448,7 @@ bb10: ; preds = %bb9, %bb
; CHECK-LABEL: stackpointer_dependency:
; CHECK: call {{.+}}, stackpointer_callee@FUNCTION,
-; CHECK: i32.const $push[[L0:.+]]=, 0
-; CHECK-NEXT: i32.store __stack_pointer($pop[[L0]]),
+; CHECK-NEXT: set_global 0,
declare i32 @stackpointer_callee(i8* readnone, i8* readnone)
declare i8* @llvm.frameaddress(i32)
define i32 @stackpointer_dependency(i8* readnone) {
diff --git a/test/CodeGen/WebAssembly/return-int32.ll b/test/CodeGen/WebAssembly/return-int32.ll
index 9e663b969e14..a6634b740cfc 100644
--- a/test/CodeGen/WebAssembly/return-int32.ll
+++ b/test/CodeGen/WebAssembly/return-int32.ll
@@ -2,13 +2,13 @@
; RUN: llc < %s -asm-verbose=false -fast-isel -fast-isel-abort=1 | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: return_i32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: copy_local $push0=, $0
-; CHECK-NEXT: .endfunc{{$}}
+; CHECK-NEXT: get_local $push0=, 0
+; CHECK-NEXT: end_function{{$}}
define i32 @return_i32(i32 %p) {
ret i32 %p
}
@@ -19,7 +19,7 @@ define i32 @return_i32(i32 %p) {
; CHECK-NEXT: return $pop[[L0]]{{$}}
; CHECK: store
; CHECK-NEXT: i32.const $push{{[^,]+}}=, 3{{$}}
-; CHECK-NEXT: .endfunc{{$}}
+; CHECK-NEXT: end_function{{$}}
define i32 @return_i32_twice(i32 %a) {
%b = icmp ne i32 %a, 0
br i1 %b, label %true, label %false
diff --git a/test/CodeGen/WebAssembly/return-void.ll b/test/CodeGen/WebAssembly/return-void.ll
index c3a600f7838d..90cf37fd2c69 100644
--- a/test/CodeGen/WebAssembly/return-void.ll
+++ b/test/CodeGen/WebAssembly/return-void.ll
@@ -2,10 +2,10 @@
; RUN: llc < %s -asm-verbose=false -fast-isel -fast-isel-abort=1 | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: return_void:
-; CHECK-NEXT: .endfunc{{$}}
+; CHECK-NEXT: end_function{{$}}
define void @return_void() {
ret void
}
@@ -14,7 +14,7 @@ define void @return_void() {
; CHECK: store
; CHECK-NEXT: return{{$}}
; CHECK: store
-; CHECK-NEXT: .endfunc{{$}}
+; CHECK-NEXT: end_function{{$}}
define void @return_void_twice(i32 %a) {
%b = icmp ne i32 %a, 0
br i1 %b, label %true, label %false
diff --git a/test/CodeGen/WebAssembly/returned.ll b/test/CodeGen/WebAssembly/returned.ll
index a277928ae400..b059fd8a5987 100644
--- a/test/CodeGen/WebAssembly/returned.ll
+++ b/test/CodeGen/WebAssembly/returned.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test that the "returned" attribute is optimized effectively.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: _Z3foov:
; CHECK-NEXT: .result i32{{$}}
diff --git a/test/CodeGen/WebAssembly/select.ll b/test/CodeGen/WebAssembly/select.ll
index 06837e4c2368..b25f16c499a8 100644
--- a/test/CodeGen/WebAssembly/select.ll
+++ b/test/CodeGen/WebAssembly/select.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -fast-isel -fast-isel-abort=1 | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -fast-isel -fast-isel-abort=1 | FileCheck %s
; Test that wasm select instruction is selected from LLVM select instruction.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: select_i32_bool:
; CHECK-NEXT: .param i32, i32, i32{{$}}
diff --git a/test/CodeGen/WebAssembly/signext-zeroext.ll b/test/CodeGen/WebAssembly/signext-zeroext.ll
index f9561da5363d..b07c7f669c37 100644
--- a/test/CodeGen/WebAssembly/signext-zeroext.ll
+++ b/test/CodeGen/WebAssembly/signext-zeroext.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Test zeroext and signext ABI keywords
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: z2s_func:
; CHECK-NEXT: .param i32{{$}}
diff --git a/test/CodeGen/WebAssembly/simd-arith.ll b/test/CodeGen/WebAssembly/simd-arith.ll
index f0e71f2cc104..62c659b7c01c 100644
--- a/test/CodeGen/WebAssembly/simd-arith.ll
+++ b/test/CodeGen/WebAssembly/simd-arith.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -mattr=+simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -mattr=-simd128 | FileCheck %s --check-prefixes CHECK,NO-SIMD128
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -mattr=-simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=-simd128 | FileCheck %s --check-prefixes CHECK,NO-SIMD128
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=-simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,NO-SIMD128
; Test that basic SIMD128 arithmetic operations assemble as expected.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare i32 @llvm.ctlz.i32(i32, i1)
declare i32 @llvm.cttz.i32(i32, i1)
diff --git a/test/CodeGen/WebAssembly/stack-alignment.ll b/test/CodeGen/WebAssembly/stack-alignment.ll
index 3bb6617f8779..95aa1f9dbf07 100644
--- a/test/CodeGen/WebAssembly/stack-alignment.ll
+++ b/test/CodeGen/WebAssembly/stack-alignment.ll
@@ -1,21 +1,23 @@
; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare void @somefunc(i32*)
; CHECK-LABEL: underalign:
-; CHECK: i32.load $push[[L1:.+]]=, __stack_pointer{{.+}}
+; CHECK: get_global $push[[L1:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L2:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L10:.+]]=, $pop[[L1]], $pop[[L2]]
-; CHECK-NEXT: tee_local $push{{.+}}=, $[[SP:.+]]=, $pop[[L10]]
+; CHECK-NEXT: tee_local $push{{.+}}=, [[SP:.+]], $pop[[L10]]
-; CHECK: i32.add $push[[underaligned:.+]]=, $[[SP]], $pop{{.+}}
+; CHECK: get_local $push[[L3:.+]]=, [[SP]]{{$}}
+; CHECK: i32.add $push[[underaligned:.+]]=, $pop[[L3]], $pop{{.+}}
; CHECK-NEXT: call somefunc@FUNCTION, $pop[[underaligned]]
-; CHECK: i32.add $push[[L5:.+]]=, $[[SP]], $pop{{.+}}
-; CHECK-NEXT: i32.store __stack_pointer($pop{{.+}}), $pop[[L5]]
+; CHECK: get_local $push[[M4:.+]]=, [[SP]]{{$}}
+; CHECK: i32.add $push[[L5:.+]]=, $pop[[M4]], $pop{{.+}}
+; CHECK-NEXT: set_global 0, $pop[[L5]]
define void @underalign() {
entry:
%underaligned = alloca i32, align 8
@@ -24,18 +26,19 @@ entry:
}
; CHECK-LABEL: overalign:
-; CHECK: i32.load $push[[L10:.+]]=, __stack_pointer
-; CHECK-NEXT: tee_local $push[[L9:.+]]=, $[[BP:.+]]=, $pop[[L10]]
+; CHECK: get_global $push[[L10:.+]]=, 0{{$}}
+; CHECK-NEXT: tee_local $push[[L9:.+]]=, [[BP:.+]], $pop[[L10]]
; CHECK-NEXT: i32.const $push[[L2:.+]]=, 32
; CHECK-NEXT: i32.sub $push[[L8:.+]]=, $pop[[L9]], $pop[[L2]]
; CHECK-NEXT: i32.const $push[[L3:.+]]=, -32
; CHECK-NEXT: i32.and $push[[L7:.+]]=, $pop[[L8]], $pop[[L3]]
-; CHECK-NEXT: tee_local $push{{.+}}=, $[[SP:.+]]=, $pop[[L7]]
+; CHECK-NEXT: tee_local $push{{.+}}=, [[SP:.+]], $pop[[L7]]
-; CHECK: call somefunc@FUNCTION, $[[SP]]
+; CHECK: get_local $push[[M5:.+]]=, [[SP]]{{$}}
+; CHECK: call somefunc@FUNCTION, $pop[[M5]]{{$}}
-; CHECK: copy_local $push[[L5:.+]]=, $[[BP]]
-; CHECK-NEXT: i32.store __stack_pointer($pop{{.+}}), $pop[[L5]]
+; CHECK: get_local $push[[M6:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: set_global 0, $pop[[M6]]
define void @overalign() {
entry:
%overaligned = alloca i32, align 32
@@ -44,19 +47,21 @@ entry:
}
; CHECK-LABEL: over_and_normal_align:
-; CHECK: i32.load $push[[L14:.+]]=, __stack_pointer
-; CHECK-NEXT: tee_local $push[[L13:.+]]=, $[[BP:.+]]=, $pop[[L14]]
+; CHECK: get_global $push[[L14:.+]]=, 0{{$}}
+; CHECK-NEXT: tee_local $push[[L13:.+]]=, [[BP:.+]], $pop[[L14]]
; CHECK: i32.sub $push[[L12:.+]]=, $pop[[L13]], $pop{{.+}}
; CHECK: i32.and $push[[L11:.+]]=, $pop[[L12]], $pop{{.+}}
-; CHECK-NEXT: tee_local $push{{.+}}=, $[[SP]]=, $pop[[L11]]
+; CHECK-NEXT: tee_local $push{{.+}}=, [[SP:.+]], $pop[[L11]]
-; CHECK: i32.add $push[[L6:.+]]=, $[[SP]], $pop{{.+}}
+; CHECK: get_local $push[[M6:.+]]=, [[SP]]{{$}}
+; CHECK: i32.add $push[[L6:.+]]=, $pop[[M6]], $pop{{.+}}
; CHECK-NEXT: call somefunc@FUNCTION, $pop[[L6]]
-; CHECK: i32.add $push[[L8:.+]]=, $[[SP]], $pop{{.+}}
+; CHECK: get_local $push[[M7:.+]]=, [[SP]]{{$}}
+; CHECK: i32.add $push[[L8:.+]]=, $pop[[M7]], $pop{{.+}}
; CHECK-NEXT: call somefunc@FUNCTION, $pop[[L8]]
-; CHECK: copy_local $push[[L9:.+]]=, $[[BP]]
-; CHECK-NEXT: i32.store __stack_pointer({{.+}}), $pop[[L9]]
+; CHECK: get_local $push[[L6:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: set_global 0, $pop[[L6]]
define void @over_and_normal_align() {
entry:
%over = alloca i32, align 32
@@ -67,14 +72,16 @@ entry:
}
; CHECK-LABEL: dynamic_overalign:
-; CHECK: i32.load $push[[L18:.+]]=, __stack_pointer
-; CHECK-NEXT: tee_local $push[[L17:.+]]=, $[[SP:.+]]=, $pop[[L18]]
-; CHECK-NEXT: copy_local $[[BP:.+]]=, $pop[[L17]]
-; CHECK: tee_local $push{{.+}}=, $[[SP_2:.+]]=, $pop{{.+}}
+; CHECK: get_global $push[[L18:.+]]=, 0{{$}}
+; CHECK-NEXT: tee_local $push[[L17:.+]]=, [[SP:.+]], $pop[[L18]]
+; CHECK-NEXT: set_local [[BP:.+]], $pop[[L17]]
+; CHECK: tee_local $push{{.+}}=, [[SP_2:.+]], $pop{{.+}}
-; CHECK: call somefunc@FUNCTION, $[[SP_2]]
+; CHECK: get_local $push[[M8:.+]]=, [[SP_2]]{{$}}
+; CHECK: call somefunc@FUNCTION, $pop[[M8]]
-; CHECK: i32.store __stack_pointer($pop{{.+}}), $[[BP]]
+; CHECK: get_local $push[[M9:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: set_global 0, $pop[[M9]]
define void @dynamic_overalign(i32 %num) {
entry:
%dynamic = alloca i32, i32 %num, align 32
@@ -83,20 +90,22 @@ entry:
}
; CHECK-LABEL: overalign_and_dynamic:
-; CHECK: i32.load $push[[L21:.+]]=, __stack_pointer
-; CHECK-NEXT: tee_local $push[[L20:.+]]=, $[[BP:.+]]=, $pop[[L21]]
+; CHECK: get_global $push[[L21:.+]]=, 0{{$}}
+; CHECK-NEXT: tee_local $push[[L20:.+]]=, [[BP:.+]], $pop[[L21]]
; CHECK: i32.sub $push[[L19:.+]]=, $pop[[L20]], $pop{{.+}}
; CHECK: i32.and $push[[L18:.+]]=, $pop[[L19]], $pop{{.+}}
-; CHECK: tee_local $push{{.+}}=, $[[FP:.+]]=, $pop[[L18]]
-; CHECK: i32.sub $push[[L16:.+]]=, $[[FP]], $pop{{.+}}
-; CHECK-NEXT: tee_local $push{{.+}}=, $[[SP:.+]]=, $pop[[L16]]
+; CHECK: tee_local $push{{.+}}=, [[FP:.+]], $pop[[L18]]
+; CHECK: get_local $push[[M10:.+]]=, [[FP]]{{$}}
+; CHECK: i32.sub $push[[L16:.+]]=, $pop[[M10]], $pop{{.+}}
+; CHECK-NEXT: tee_local $push{{.+}}=, [[SP:.+]], $pop[[L16]]
-; CHECK: copy_local $push[[over:.+]]=, $[[FP]]
+; CHECK: get_local $push[[over:.+]]=, [[FP]]
; CHECK-NEXT: call somefunc@FUNCTION, $pop[[over]]
-; CHECK-NEXT: call somefunc@FUNCTION, $[[SP]]
+; CHECK: get_local $push[[another:.+]]=, [[SP]]
+; CHECK-NEXT: call somefunc@FUNCTION, $pop[[another]]
-; CHECK: copy_local $push[[L12:.+]]=, $[[BP]]
-; CHECK-NEXT: i32.store __stack_pointer($pop{{.+}}), $pop[[L12]]
+; CHECK: get_local $push[[M11:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: set_global 0, $pop[[M11]]
define void @overalign_and_dynamic(i32 %num) {
entry:
%over = alloca i32, align 32
@@ -107,24 +116,27 @@ entry:
}
; CHECK-LABEL: overalign_static_and_dynamic:
-; CHECK: i32.load $push[[L26:.+]]=, __stack_pointer
-; CHECK-NEXT: tee_local $push[[L25:.+]]=, $[[BP:.+]]=, $pop[[L26]]
+; CHECK: get_global $push[[L26:.+]]=, 0{{$}}
+; CHECK-NEXT: tee_local $push[[L25:.+]]=, [[BP:.+]], $pop[[L26]]
; CHECK: i32.sub $push[[L24:.+]]=, $pop[[L25]], $pop{{.+}}
; CHECK: i32.and $push[[L23:.+]]=, $pop[[L24]], $pop{{.+}}
-; CHECK: tee_local $push{{.+}}=, $[[FP:.+]]=, $pop[[L23]]
-; CHECK: i32.sub $push[[L21:.+]]=, $[[FP]], $pop{{.+}}
-; CHECK-NEXT: tee_local $push{{.+}}=, $[[SP:.+]]=, $pop[[L21]]
+; CHECK: tee_local $push{{.+}}=, [[FP:.+]], $pop[[L23]]
+; CHECK: get_local $push[[M12:.+]]=, [[FP]]{{$}}
+; CHECK: i32.sub $push[[L21:.+]]=, $pop[[M12]], $pop{{.+}}
+; CHECK-NEXT: tee_local $push{{.+}}=, [[SP:.+]], $pop[[L21]]
-; CHECK: copy_local $push[[L19:.+]]=, $[[FP]]
-; CHECK: tee_local $push[[L18:.+]]=, $[[FP_2:.+]]=, $pop[[L19]]
+; CHECK: get_local $push[[L19:.+]]=, [[FP]]
+; CHECK: tee_local $push[[L18:.+]]=, [[FP_2:.+]], $pop[[L19]]
; CHECK: i32.add $push[[over:.+]]=, $pop[[L18]], $pop{{.+}}
; CHECK-NEXT: call somefunc@FUNCTION, $pop[[over]]
-; CHECK: call somefunc@FUNCTION, $[[SP]]
-; CHECK: i32.add $push[[static:.+]]=, $[[FP_2]], $pop{{.+}}
+; CHECK: get_local $push[[M12:.+]]=, [[SP]]
+; CHECK: call somefunc@FUNCTION, $pop[[M12]]
+; CHECK: get_local $push[[M13:.+]]=, [[FP_2]]
+; CHECK: i32.add $push[[static:.+]]=, $pop[[M13]], $pop{{.+}}
; CHECK-NEXT: call somefunc@FUNCTION, $pop[[static]]
-; CHECK: copy_local $push[[L16:.+]]=, $[[BP]]
-; CHECK-NEXT: i32.store __stack_pointer({{.+}}), $pop[[L16]]
+; CHECK: get_local $push[[M14:.+]]=, [[BP]]{{$}}
+; CHECK-NEXT: set_global 0, $pop[[M14]]
define void @overalign_static_and_dynamic(i32 %num) {
entry:
%over = alloca i32, align 32
diff --git a/test/CodeGen/WebAssembly/store-trunc.ll b/test/CodeGen/WebAssembly/store-trunc.ll
index 436933880481..ff358227d987 100644
--- a/test/CodeGen/WebAssembly/store-trunc.ll
+++ b/test/CodeGen/WebAssembly/store-trunc.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s
; Test that truncating stores are assembled properly.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: trunc_i8_i32:
; CHECK: i32.store8 0($0), $1{{$}}
diff --git a/test/CodeGen/WebAssembly/store.ll b/test/CodeGen/WebAssembly/store.ll
index 3852b6e420ca..153d7d9addf7 100644
--- a/test/CodeGen/WebAssembly/store.ll
+++ b/test/CodeGen/WebAssembly/store.ll
@@ -4,11 +4,13 @@
; Test that basic stores are assembled properly.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: sti32:
; CHECK-NEXT: .param i32, i32{{$}}
-; CHECK-NEXT: i32.store 0($0), $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i32.store 0($pop[[L0]]), $pop[[L1]]{{$}}
; CHECK-NEXT: return{{$}}
define void @sti32(i32 *%p, i32 %v) {
store i32 %v, i32* %p
@@ -17,7 +19,9 @@ define void @sti32(i32 *%p, i32 %v) {
; CHECK-LABEL: sti64:
; CHECK-NEXT: .param i32, i64{{$}}
-; CHECK-NEXT: i64.store 0($0), $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: i64.store 0($pop[[L0]]), $pop[[L1]]{{$}}
; CHECK-NEXT: return{{$}}
define void @sti64(i64 *%p, i64 %v) {
store i64 %v, i64* %p
@@ -26,7 +30,9 @@ define void @sti64(i64 *%p, i64 %v) {
; CHECK-LABEL: stf32:
; CHECK-NEXT: .param i32, f32{{$}}
-; CHECK-NEXT: f32.store 0($0), $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f32.store 0($pop[[L0]]), $pop[[L1]]{{$}}
; CHECK-NEXT: return{{$}}
define void @stf32(float *%p, float %v) {
store float %v, float* %p
@@ -35,7 +41,9 @@ define void @stf32(float *%p, float %v) {
; CHECK-LABEL: stf64:
; CHECK-NEXT: .param i32, f64{{$}}
-; CHECK-NEXT: f64.store 0($0), $1{{$}}
+; CHECK-NEXT: get_local $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: get_local $push[[L1:[0-9]+]]=, 1{{$}}
+; CHECK-NEXT: f64.store 0($pop[[L0]]), $pop[[L1]]{{$}}
; CHECK-NEXT: return{{$}}
define void @stf64(double *%p, double %v) {
store double %v, double* %p
diff --git a/test/CodeGen/WebAssembly/switch.ll b/test/CodeGen/WebAssembly/switch.ll
index c6354baa57a6..18eac5534a45 100644
--- a/test/CodeGen/WebAssembly/switch.ll
+++ b/test/CodeGen/WebAssembly/switch.ll
@@ -4,7 +4,7 @@
; the blocks in a way that isn't interesting here.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare void @foo0()
declare void @foo1()
diff --git a/test/CodeGen/WebAssembly/unreachable.ll b/test/CodeGen/WebAssembly/unreachable.ll
index 77fda44d5ff3..de96b0927563 100644
--- a/test/CodeGen/WebAssembly/unreachable.ll
+++ b/test/CodeGen/WebAssembly/unreachable.ll
@@ -5,7 +5,7 @@
; wasm unreachable
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare void @llvm.trap()
declare void @llvm.debugtrap()
diff --git a/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll b/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
index ef4318ec299b..c3d420a6ece6 100644
--- a/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
+++ b/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
@@ -5,13 +5,14 @@
; conversions are implemented.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: test:
; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
; CHECK-NEXT: call has_i64_arg@FUNCTION, $pop[[L0]]{{$}}
-; CHECK-NEXT: i32.call $drop=, has_i64_ret@FUNCTION{{$}}
-; CHECK-NEXT: .endfunc
+; CHECK-NEXT: i32.call $push{{[0-9]+}}=, has_i64_ret@FUNCTION{{$}}
+; CHECK-NEXT: drop
+; CHECK-NEXT: end_function
; CHECK-NOT: .Lbitcast
diff --git a/test/CodeGen/WebAssembly/unused-argument.ll b/test/CodeGen/WebAssembly/unused-argument.ll
index ff943b215438..a70fc4bd2a46 100644
--- a/test/CodeGen/WebAssembly/unused-argument.ll
+++ b/test/CodeGen/WebAssembly/unused-argument.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
; Make sure that argument offsets are correct even if some arguments are unused.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; CHECK-LABEL: unused_first:
; CHECK-NEXT: .param i32, i32{{$}}
diff --git a/test/CodeGen/WebAssembly/userstack.ll b/test/CodeGen/WebAssembly/userstack.ll
index a163f879f6df..57ca75705e5e 100644
--- a/test/CodeGen/WebAssembly/userstack.ll
+++ b/test/CodeGen/WebAssembly/userstack.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
declare void @ext_func(i64* %ptr)
declare void @ext_func_i32(i32* %ptr)
@@ -10,39 +10,38 @@ declare void @ext_func_i32(i32* %ptr)
; Check that there is an extra local for the stack pointer.
; CHECK: .local i32{{$}}
define void @alloca32() noredzone {
- ; CHECK: i32.const $push[[L4:.+]]=, 0{{$}}
- ; CHECK: i32.const $push[[L1:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L2:.+]]=, __stack_pointer($pop[[L1]])
+ ; CHECK-NEXT: get_global $push[[L2:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L3:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L9:.+]]=, $pop[[L2]], $pop[[L3]]
- ; CHECK-NEXT: tee_local $push[[L8:.+]]=, $[[SP:.+]]=, $pop[[L9]]{{$}}
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L4]]), $pop[[L8]]{{$}}
+ ; CHECK-NEXT: tee_local $push[[L8:.+]]=, [[SP:.+]], $pop[[L9]]{{$}}
+ ; CHECK-NEXT: set_global 0, $pop[[L8]]{{$}}
%retval = alloca i32
+ ; CHECK: get_local $push[[L4:.+]]=, [[SP]]{{$}}
; CHECK: i32.const $push[[L0:.+]]=, 0
- ; CHECK: i32.store 12($[[SP]]), $pop[[L0]]
+ ; CHECK: i32.store 12($pop[[L4]]), $pop[[L0]]
store i32 0, i32* %retval
- ; CHECK: i32.const $push[[L6:.+]]=, 0
+ ; CHECK: get_local $push[[L6:.+]]=, [[SP]]{{$}}
; CHECK-NEXT: i32.const $push[[L5:.+]]=, 16
- ; CHECK-NEXT: i32.add $push[[L7:.+]]=, $[[SP]], $pop[[L5]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L6]]), $pop[[L7]]
+ ; CHECK-NEXT: i32.add $push[[L7:.+]]=, $pop[[L6]], $pop[[L5]]
+ ; CHECK-NEXT: set_global 0, $pop[[L7]]
ret void
}
; CHECK-LABEL: alloca3264:
; CHECK: .local i32{{$}}
define void @alloca3264() {
- ; CHECK: i32.const $push[[L2:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L3:.+]]=, __stack_pointer($pop[[L2]])
+ ; CHECK: get_global $push[[L3:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L4:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L6:.+]]=, $pop[[L3]], $pop[[L4]]
- ; CHECK-NEXT: tee_local $push[[L5:.+]]=, $[[SP:.+]]=, $pop[[L6]]
+ ; CHECK-NEXT: tee_local $push[[L5:.+]]=, [[SP:.+]], $pop[[L6]]
%r1 = alloca i32
%r2 = alloca double
; CHECK-NEXT: i32.const $push[[L0:.+]]=, 0
; CHECK-NEXT: i32.store 12($pop[[L5]]), $pop[[L0]]
store i32 0, i32* %r1
+ ; CHECK-NEXT: get_local $push[[L2:.+]]=, [[SP]]{{$}}
; CHECK-NEXT: i64.const $push[[L1:.+]]=, 0
- ; CHECK-NEXT: i64.store 0($[[SP]]), $pop[[L1]]
+ ; CHECK-NEXT: i64.store 0($pop[[L2]]), $pop[[L1]]
store double 0.0, double* %r2
; CHECK-NEXT: return
ret void
@@ -51,30 +50,29 @@ define void @alloca3264() {
; CHECK-LABEL: allocarray:
; CHECK: .local i32{{$}}
define void @allocarray() {
- ; CHECK: i32.const $push[[L6:.+]]=, 0{{$}}
- ; CHECK: i32.const $push[[L3:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L4:.+]]=, __stack_pointer($pop[[L3]])
+ ; CHECK-NEXT: get_global $push[[L4:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L5:.+]]=, 144{{$}}
; CHECK-NEXT: i32.sub $push[[L12:.+]]=, $pop[[L4]], $pop[[L5]]
- ; CHECK-NEXT: tee_local $push[[L11:.+]]=, $0=, $pop[[L12]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L6]]), $pop[[L11]]
+ ; CHECK-NEXT: tee_local $push[[L11:.+]]=, 0, $pop[[L12]]
+ ; CHECK-NEXT: set_global 0, $pop[[L11]]
%r = alloca [33 x i32]
; CHECK: i32.const $push{{.+}}=, 24
- ; CHECK-NEXT: i32.add $push[[L3:.+]]=, $[[SP]], $pop{{.+}}
+ ; CHECK-NEXT: i32.add $push[[L3:.+]]=, $pop{{.+}}, $pop{{.+}}
; CHECK-NEXT: i32.const $push[[L1:.+]]=, 1{{$}}
; CHECK-NEXT: i32.store 0($pop[[L3]]), $pop[[L1]]{{$}}
+ ; CHECK-NEXT: get_local $push[[L4:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L10:.+]]=, 1{{$}}
- ; CHECK-NEXT: i32.store 12(${{.+}}), $pop[[L10]]{{$}}
+ ; CHECK-NEXT: i32.store 12($pop[[L4]]), $pop[[L10]]{{$}}
%p = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 0
store i32 1, i32* %p
%p2 = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 3
store i32 1, i32* %p2
- ; CHECK: i32.const $push[[L9:.+]]=, 0{{$}}
+ ; CHECK-NEXT: get_local $push[[L2:.+]]=, [[SP]]{{$}}
; CHECK-NEXT: i32.const $push[[L7:.+]]=, 144
- ; CHECK-NEXT: i32.add $push[[L8:.+]]=, $[[SP]], $pop[[L7]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L9]]), $pop[[L8]]
+ ; CHECK-NEXT: i32.add $push[[L8:.+]]=, $pop[[L2]], $pop[[L7]]
+ ; CHECK-NEXT: set_global 0, $pop[[L8]]
ret void
}
@@ -82,24 +80,27 @@ define void @allocarray() {
define void @non_mem_use(i8** %addr) {
; CHECK: i32.const $push[[L2:.+]]=, 48
; CHECK-NEXT: i32.sub $push[[L12:.+]]=, {{.+}}, $pop[[L2]]
- ; CHECK-NEXT: tee_local $push[[L11:.+]]=, $[[SP:.+]]=, $pop[[L12]]
- ; CHECK-NEXT: i32.store {{.+}}, $pop[[L11]]
+ ; CHECK-NEXT: tee_local $push[[L11:.+]]=, [[SP:.+]], $pop[[L12]]
+ ; CHECK-NEXT: set_global 0, $pop[[L11]]
%buf = alloca [27 x i8], align 16
%r = alloca i64
%r2 = alloca i64
; %r is at SP+8
+ ; CHECK: get_local $push[[L3:.+]]=, [[SP]]
; CHECK: i32.const $push[[OFF:.+]]=, 8
- ; CHECK-NEXT: i32.add $push[[ARG1:.+]]=, $[[SP]], $pop[[OFF]]
+ ; CHECK-NEXT: i32.add $push[[ARG1:.+]]=, $pop[[L3]], $pop[[OFF]]
; CHECK-NEXT: call ext_func@FUNCTION, $pop[[ARG1]]
call void @ext_func(i64* %r)
; %r2 is at SP+0, no add needed
- ; CHECK-NEXT: call ext_func@FUNCTION, $[[SP]]
+ ; CHECK: get_local $push[[L4:.+]]=, [[SP]]
+ ; CHECK-NEXT: call ext_func@FUNCTION, $pop[[L4]]
call void @ext_func(i64* %r2)
; Use as a value, but in a store
; %buf is at SP+16
+ ; CHECK: get_local $push[[L5:.+]]=, [[SP]]
; CHECK: i32.const $push[[OFF:.+]]=, 16
- ; CHECK-NEXT: i32.add $push[[VAL:.+]]=, $[[SP]], $pop[[OFF]]
- ; CHECK-NEXT: i32.store 0($0), $pop[[VAL]]
+ ; CHECK-NEXT: i32.add $push[[VAL:.+]]=, $pop[[L5]], $pop[[OFF]]
+ ; CHECK-NEXT: i32.store 0($pop{{.+}}), $pop[[VAL]]
%gep = getelementptr inbounds [27 x i8], [27 x i8]* %buf, i32 0, i32 0
store i8* %gep, i8** %addr
ret void
@@ -108,13 +109,11 @@ define void @non_mem_use(i8** %addr) {
; CHECK-LABEL: allocarray_inbounds:
; CHECK: .local i32{{$}}
define void @allocarray_inbounds() {
- ; CHECK: i32.const $push[[L5:.+]]=, 0{{$}}
- ; CHECK: i32.const $push[[L2:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L3:.+]]=, __stack_pointer($pop[[L2]])
+ ; CHECK: get_global $push[[L3:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L4:.+]]=, 32{{$}}
; CHECK-NEXT: i32.sub $push[[L11:.+]]=, $pop[[L3]], $pop[[L4]]
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, $[[SP:.+]]=, $pop[[L11]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L5]]), $pop[[L10]]{{$}}
+ ; CHECK-NEXT: tee_local $push[[L10:.+]]=, [[SP:.+]], $pop[[L11]]
+ ; CHECK-NEXT: set_global 0, $pop[[L10]]{{$}}
%r = alloca [5 x i32]
; CHECK: i32.const $push[[L3:.+]]=, 1
; CHECK-DAG: i32.store 24(${{.+}}), $pop[[L3]]
@@ -126,45 +125,39 @@ define void @allocarray_inbounds() {
store i32 1, i32* %p2
call void @ext_func(i64* null);
; CHECK: call ext_func
- ; CHECK: i32.const $push[[L6:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.const $push[[L5:.+]]=, 32{{$}}
+ ; CHECK: i32.const $push[[L5:.+]]=, 32{{$}}
; CHECK-NEXT: i32.add $push[[L7:.+]]=, ${{.+}}, $pop[[L5]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop[[L6]]), $pop[[L7]]
+ ; CHECK-NEXT: set_global 0, $pop[[L7]]
ret void
}
; CHECK-LABEL: dynamic_alloca:
define void @dynamic_alloca(i32 %alloc) {
- ; CHECK: i32.const $push[[L7:.+]]=, 0{{$}}
- ; CHECK: i32.const $push[[L1:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L13:.+]]=, __stack_pointer($pop[[L1]])
+ ; CHECK: get_global $push[[L13:.+]]=, 0{{$}}
; CHECK-NEXT: tee_local $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
; Target independent codegen bumps the stack pointer.
; CHECK: i32.sub
; Check that SP is written back to memory after decrement
- ; CHECK: i32.store __stack_pointer($pop{{.+}}),
+ ; CHECK: set_global 0,
%r = alloca i32, i32 %alloc
; Target-independent codegen also calculates the store addr
; CHECK: call ext_func_i32@FUNCTION
call void @ext_func_i32(i32* %r)
- ; CHECK: i32.const $push[[L3:.+]]=, 0{{$}}
- ; CHECK: i32.store __stack_pointer($pop[[L3]]), $pop{{.+}}
+ ; CHECK: set_global 0, $pop{{.+}}
ret void
}
; CHECK-LABEL: dynamic_alloca_redzone:
define void @dynamic_alloca_redzone(i32 %alloc) {
- ; CHECK: i32.const $push[[L8:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L13:.+]]=, __stack_pointer($pop[[L1]])
+ ; CHECK: get_global $push[[L13:.+]]=, 0{{$}}
; CHECK-NEXT: tee_local $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
- ; CHECK-NEXT: copy_local [[FP:.+]]=, $pop[[L12]]{{$}}
; Target independent codegen bumps the stack pointer
; CHECK: i32.sub
%r = alloca i32, i32 %alloc
- ; CHECK-NEXT: tee_local $push[[L8:.+]]=, $0=, $pop
- ; CHECK-NEXT: copy_local $drop=, $pop[[L8]]{{$}}
+ ; CHECK-NEXT: tee_local $push[[L8:.+]]=, {{.+}}, $pop
+ ; CHECK: get_local $push[[L7:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L6:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.store 0($0), $pop[[L6]]{{$}}
+ ; CHECK-NEXT: i32.store 0($pop[[L7]]), $pop[[L6]]{{$}}
store i32 0, i32* %r
; CHECK-NEXT: return
ret void
@@ -173,17 +166,15 @@ define void @dynamic_alloca_redzone(i32 %alloc) {
; CHECK-LABEL: dynamic_static_alloca:
define void @dynamic_static_alloca(i32 %alloc) noredzone {
; Decrement SP in the prolog by the static amount and writeback to memory.
- ; CHECK: i32.const $push[[L13:.+]]=, 0{{$}}
- ; CHECK: i32.const $push[[L10:.+]]=, 0{{$}}
- ; CHECK-NEXT: i32.load $push[[L11:.+]]=, __stack_pointer($pop[[L10]])
+ ; CHECK: get_global $push[[L11:.+]]=, 0{{$}}
; CHECK-NEXT: i32.const $push[[L12:.+]]=, 16
; CHECK-NEXT: i32.sub $push[[L23:.+]]=, $pop[[L11]], $pop[[L12]]
- ; CHECK-NEXT: tee_local $push[[L22:.+]]=, $[[SP:.+]]=, $pop[[L23]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop{{.+}}), $pop[[L22]]
+ ; CHECK-NEXT: tee_local $push[[L22:.+]]=, [[SP:.+]], $pop[[L23]]
+ ; CHECK-NEXT: set_global 0, $pop[[L22]]
; Alloc and write to a static alloca
- ; CHECK: copy_local $push[[L21:.+]]=, $[[SP]]
- ; CHECK-NEXT: tee_local $push[[pushedFP:.+]]=, $[[FP:.+]]=, $pop[[L21]]
+ ; CHECK: get_local $push[[L21:.+]]=, [[SP:.+]]
+ ; CHECK-NEXT: tee_local $push[[pushedFP:.+]]=, [[FP:.+]], $pop[[L21]]
; CHECK-NEXT: i32.const $push[[L0:.+]]=, 101
; CHECK-NEXT: i32.store [[static_offset:.+]]($pop[[pushedFP]]), $pop[[L0]]
%static = alloca i32
@@ -191,44 +182,51 @@ define void @dynamic_static_alloca(i32 %alloc) noredzone {
; Decrement SP in the body by the dynamic amount.
; CHECK: i32.sub
- ; CHECK: tee_local $push{{.+}}=, $[[dynamic_local:.+]]=, $pop{{.+}}
- ; CHECK: i32.store __stack_pointer
+ ; CHECK: tee_local $push[[L16:.+]]=, [[dynamic_local:.+]], $pop{{.+}}
+ ; CHECK: tee_local $push[[L15:.+]]=, [[other:.+]], $pop[[L16]]{{$}}
+ ; CHECK: set_global 0, $pop[[L15]]{{$}}
%dynamic = alloca i32, i32 %alloc
; Ensure we don't modify the frame pointer after assigning it.
; CHECK-NOT: $[[FP]]=
; Ensure the static address doesn't change after modifying the stack pointer.
+ ; CHECK: get_local $push[[L17:.+]]=, [[FP]]
; CHECK: i32.const $push[[L7:.+]]=, 102
- ; CHECK-NEXT: i32.store [[static_offset]]($[[FP]]), $pop[[L7]]
+ ; CHECK-NEXT: i32.store [[static_offset]]($pop[[L17]]), $pop[[L7]]
+ ; CHECK-NEXT: get_local $push[[L9:.+]]=, [[dynamic_local]]{{$}}
; CHECK-NEXT: i32.const $push[[L8:.+]]=, 103
- ; CHECK-NEXT: i32.store 0($[[dynamic_local]]), $pop[[L8]]
+ ; CHECK-NEXT: i32.store 0($pop[[L9]]), $pop[[L8]]
store volatile i32 102, i32* %static
store volatile i32 103, i32* %dynamic
; Decrement SP in the body by the dynamic amount.
; CHECK: i32.sub
- ; CHECK: tee_local $push{{.+}}=, $[[dynamic2_local:.+]]=, $pop{{.+}}
+ ; CHECK: tee_local $push{{.+}}=, [[dynamic2_local:.+]], $pop{{.+}}
%dynamic.2 = alloca i32, i32 %alloc
; CHECK-NOT: $[[FP]]=
; Ensure neither the static nor dynamic address changes after the second
; modification of the stack pointer.
+ ; CHECK: get_local $push[[L22:.+]]=, [[FP]]
; CHECK: i32.const $push[[L9:.+]]=, 104
- ; CHECK-NEXT: i32.store [[static_offset]]($[[FP]]), $pop[[L9]]
+ ; CHECK-NEXT: i32.store [[static_offset]]($pop[[L22]]), $pop[[L9]]
+ ; CHECK-NEXT: get_local $push[[L23:.+]]=, [[dynamic_local]]
; CHECK-NEXT: i32.const $push[[L10:.+]]=, 105
- ; CHECK-NEXT: i32.store 0($[[dynamic_local]]), $pop[[L10]]
+ ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L10]]
+ ; CHECK-NEXT: get_local $push[[L23:.+]]=, [[dynamic2_local]]
; CHECK-NEXT: i32.const $push[[L11:.+]]=, 106
- ; CHECK-NEXT: i32.store 0($[[dynamic2_local]]), $pop[[L11]]
+ ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L11]]
store volatile i32 104, i32* %static
store volatile i32 105, i32* %dynamic
store volatile i32 106, i32* %dynamic.2
; Writeback to memory.
- ; CHECK: i32.const $push[[L17:.+]]=, 16
- ; CHECK-NEXT: i32.add $push[[L18:.+]]=, $[[FP]], $pop[[L17]]
- ; CHECK-NEXT: i32.store __stack_pointer($pop{{.+}}), $pop[[L18]]
+ ; CHECK: get_local $push[[L24:.+]]=, [[FP]]{{$}}
+ ; CHECK: i32.const $push[[L18:.+]]=, 16
+ ; CHECK-NEXT: i32.add $push[[L19:.+]]=, $pop[[L24]], $pop[[L18]]
+ ; CHECK-NEXT: set_global 0, $pop[[L19]]
ret void
}
@@ -237,16 +235,17 @@ declare void @llvm.stackrestore(i8*)
; CHECK-LABEL: llvm_stack_builtins:
define void @llvm_stack_builtins(i32 %alloc) noredzone {
- ; CHECK: i32.load $push[[L11:.+]]=, __stack_pointer($pop{{.+}})
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, ${{.+}}=, $pop[[L11]]
- ; CHECK-NEXT: copy_local $[[STACK:.+]]=, $pop[[L10]]
+ ; CHECK: get_global $push[[L11:.+]]=, 0{{$}}
+ ; CHECK-NEXT: tee_local $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
+ ; CHECK-NEXT: set_local [[STACK:.+]], $pop[[L10]]
%stack = call i8* @llvm.stacksave()
; Ensure we don't reassign the stacksave local
- ; CHECK-NOT: $[[STACK]]=
+ ; CHECK-NOT: set_local [[STACK]],
%dynamic = alloca i32, i32 %alloc
- ; CHECK: i32.store __stack_pointer($pop{{.+}}), $[[STACK]]
+ ; CHECK: get_local $push[[L12:.+]]=, [[STACK]]
+ ; CHECK-NEXT: set_global 0, $pop[[L12]]
call void @llvm.stackrestore(i8* %stack)
ret void
@@ -257,14 +256,15 @@ define void @llvm_stack_builtins(i32 %alloc) noredzone {
; moved after the stack pointer was updated for the dynamic alloca.
; CHECK-LABEL: dynamic_alloca_nouse:
define void @dynamic_alloca_nouse(i32 %alloc) noredzone {
- ; CHECK: i32.load $push[[L11:.+]]=, __stack_pointer($pop{{.+}})
- ; CHECK-NEXT: tee_local $push[[L10:.+]]=, ${{.+}}=, $pop[[L11]]
- ; CHECK-NEXT: copy_local $[[FP:.+]]=, $pop[[L10]]
+ ; CHECK: get_global $push[[L11:.+]]=, 0{{$}}
+ ; CHECK-NEXT: tee_local $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
+ ; CHECK-NEXT: set_local [[FP:.+]], $pop[[L10]]
%dynamic = alloca i32, i32 %alloc
- ; CHECK-NOT: $[[FP]]=,
+ ; CHECK-NOT: set_local [[FP]],
- ; CHECK: i32.store __stack_pointer($pop{{.+}}), $[[FP]]
+ ; CHECK: get_local $push[[L12:.+]]=, [[FP]]
+ ; CHECK-NEXT: set_global 0, $pop[[L12]]
ret void
}
@@ -278,12 +278,13 @@ entry:
%addr = alloca i32
; CHECK: i32.const $push[[OFF:.+]]=, 12
; CHECK-NEXT: i32.add $push[[ADDR:.+]]=, $pop[[L3]], $pop[[OFF]]
- ; CHECK-NEXT: copy_local [[COPY:.+]]=, $pop[[ADDR]]
+ ; CHECK-NEXT: set_local [[COPY:.+]], $pop[[ADDR]]
br label %body
body:
%a = phi i32* [%addr, %entry], [%b, %body]
store i32 1, i32* %a
- ; CHECK: i32.store 0([[COPY]]),
+ ; CHECK: get_local $push[[L12:.+]]=, [[COPY]]
+ ; CHECK: i32.store 0($pop[[L12]]),
br i1 %cond, label %body, label %exit
exit:
ret void
@@ -294,13 +295,11 @@ declare i8* @llvm.frameaddress(i32)
; Test __builtin_frame_address(0).
; CHECK-LABEL: frameaddress_0:
-; CHECK: i32.const $push[[L0:.+]]=, 0{{$}}
-; CHECK-NEXT: i32.load $push[[L3:.+]]=, __stack_pointer($pop[[L0]])
-; CHECK-NEXT: copy_local $push[[L4:.+]]=, $pop[[L3]]{{$}}
-; CHECK-NEXT: tee_local $push[[L2:.+]]=, $[[FP:.+]]=, $pop[[L4]]{{$}}
+; CHECK: get_global $push[[L3:.+]]=, 0{{$}}
+; CHECK-NEXT: tee_local $push[[L2:.+]]=, [[FP:.+]], $pop[[L3]]{{$}}
; CHECK-NEXT: call use_i8_star@FUNCTION, $pop[[L2]]
-; CHECK-NEXT: i32.const $push[[L1:.+]]=, 0{{$}}
-; CHECK-NEXT: i32.store __stack_pointer($pop[[L1]]), $[[FP]]
+; CHECK-NEXT: get_local $push[[L5:.+]]=, [[FP]]
+; CHECK-NEXT: set_global 0, $pop[[L5]]
define void @frameaddress_0() {
%t = call i8* @llvm.frameaddress(i32 0)
call void @use_i8_star(i8* %t)
@@ -321,7 +320,7 @@ define void @frameaddress_1() {
; Test a stack address passed to an inline asm.
; CHECK-LABEL: inline_asm:
-; CHECK: __stack_pointer
+; CHECK: get_global {{.+}}, 0{{$}}
; CHECK: #APP
; CHECK-NEXT: # %{{[0-9]+}}{{$}}
; CHECK-NEXT: #NO_APP
diff --git a/test/CodeGen/WebAssembly/varargs.ll b/test/CodeGen/WebAssembly/varargs.ll
index c77ed10c2584..3f04700131cc 100644
--- a/test/CodeGen/WebAssembly/varargs.ll
+++ b/test/CodeGen/WebAssembly/varargs.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -verify-machineinstrs | FileCheck %s
; Test varargs constructs.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
; Test va_start.
@@ -52,7 +52,6 @@ entry:
; CHECK-LABEL: arg_i8:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: .local i32{{$}}
; CHECK-NEXT: i32.load $push[[NUM0:[0-9]+]]=, 0($0){{$}}
; CHECK-NEXT: tee_local $push[[NUM1:[0-9]+]]=, $1=, $pop[[NUM0]]{{$}}
; CHECK-NEXT: i32.const $push[[NUM2:[0-9]+]]=, 4{{$}}
@@ -71,7 +70,6 @@ entry:
; CHECK-LABEL: arg_i32:
; CHECK-NEXT: .param i32{{$}}
; CHECK-NEXT: .result i32{{$}}
-; CHECK-NEXT: .local i32{{$}}
; CHECK-NEXT: i32.load $push[[NUM0:[0-9]+]]=, 0($0){{$}}
; CHECK-NEXT: i32.const $push[[NUM1:[0-9]+]]=, 3{{$}}
; CHECK-NEXT: i32.add $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
@@ -93,7 +91,6 @@ entry:
; CHECK-LABEL: arg_i128:
; CHECK-NEXT: .param i32, i32{{$}}
-; CHECK-NEXT: .local
; CHECK: i32.and
; CHECK: i64.load
; CHECK: i64.load
@@ -123,8 +120,8 @@ define void @caller_none() {
; disabling it.
; CHECK-LABEL: caller_some
-; CHECK: i32.store
-; CHECK: i64.store
+; CHECK-DAG: i32.store
+; CHECK-DAG: i64.store
define void @caller_some() {
call void (...) @callee(i32 0, double 2.0)
ret void
diff --git a/test/CodeGen/WebAssembly/vtable.ll b/test/CodeGen/WebAssembly/vtable.ll
index 739ba2aaf5a5..b39e7bc0f7f2 100644
--- a/test/CodeGen/WebAssembly/vtable.ll
+++ b/test/CodeGen/WebAssembly/vtable.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -asm-verbose=false | FileCheck %s --check-prefix=TYPEINFONAME
-; RUN: llc < %s -asm-verbose=false | FileCheck %s --check-prefix=VTABLE
-; RUN: llc < %s -asm-verbose=false | FileCheck %s --check-prefix=TYPEINFO
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s --check-prefix=TYPEINFONAME
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s --check-prefix=VTABLE
+; RUN: llc < %s -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s --check-prefix=TYPEINFO
; Test that simple vtables assemble as expected.
;
@@ -12,7 +12,7 @@
; Each with a virtual dtor and method foo.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
%struct.A = type { i32 (...)** }
%struct.B = type { %struct.A }
@@ -36,7 +36,7 @@ target triple = "wasm32-unknown-unknown"
@_ZTS1D = constant [3 x i8] c"1D\00"
; VTABLE: .type _ZTV1A,@object
-; VTABLE-NEXT: .section .data.rel.ro,"aw",@progbits
+; VTABLE-NEXT: .section .data.rel.ro._ZTV1A,
; VTABLE-NEXT: .globl _ZTV1A
; VTABLE-LABEL: _ZTV1A:
; VTABLE-NEXT: .int32 0
@@ -47,6 +47,7 @@ target triple = "wasm32-unknown-unknown"
; VTABLE-NEXT: .size _ZTV1A, 20
@_ZTV1A = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.A*)* @_ZN1AD0Ev to i8*), i8* bitcast (void (%struct.A*)* @_ZN1A3fooEv to i8*)], align 4
; VTABLE: .type _ZTV1B,@object
+; VTABLE-NEXT: .section .data.rel.ro._ZTV1B,
; VTABLE-NEXT: .globl _ZTV1B
; VTABLE-LABEL: _ZTV1B:
; VTABLE-NEXT: .int32 0
@@ -57,6 +58,7 @@ target triple = "wasm32-unknown-unknown"
; VTABLE-NEXT: .size _ZTV1B, 20
@_ZTV1B = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1B to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.B*)* @_ZN1BD0Ev to i8*), i8* bitcast (void (%struct.B*)* @_ZN1B3fooEv to i8*)], align 4
; VTABLE: .type _ZTV1C,@object
+; VTABLE-NEXT: .section .data.rel.ro._ZTV1C,
; VTABLE-NEXT: .globl _ZTV1C
; VTABLE-LABEL: _ZTV1C:
; VTABLE-NEXT: .int32 0
@@ -67,6 +69,7 @@ target triple = "wasm32-unknown-unknown"
; VTABLE-NEXT: .size _ZTV1C, 20
@_ZTV1C = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1C to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.C*)* @_ZN1CD0Ev to i8*), i8* bitcast (void (%struct.C*)* @_ZN1C3fooEv to i8*)], align 4
; VTABLE: .type _ZTV1D,@object
+; VTABLE-NEXT: .section .data.rel.ro._ZTV1D,
; VTABLE-NEXT: .globl _ZTV1D
; VTABLE-LABEL: _ZTV1D:
; VTABLE-NEXT: .int32 0
diff --git a/test/CodeGen/X86/2003-11-03-GlobalBool.ll b/test/CodeGen/X86/2003-11-03-GlobalBool.ll
index f201b981a872..e0d4988abbf7 100644
--- a/test/CodeGen/X86/2003-11-03-GlobalBool.ll
+++ b/test/CodeGen/X86/2003-11-03-GlobalBool.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | \
-; RUN: not grep ".byte[[:space:]]*true"
+; RUN: llc < %s -march=x86 | FileCheck %s
-@X = global i1 true ; <i1*> [#uses=0]
+@X = global i1 true
+; CHECK-NOT: .byte true
diff --git a/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll b/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll
index dde210b776af..bd3317a68b8c 100644
--- a/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll
+++ b/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll
@@ -1,18 +1,23 @@
-; RUN: llc < %s -march=x86 | grep "(%esp"
-; RUN: llc < %s -march=x86 | grep "pushl %ebp" | count 1
-; RUN: llc < %s -march=x86 | grep "popl %ebp" | count 1
+; RUN: llc < %s -march=x86 | FileCheck %s
declare i8* @llvm.returnaddress(i32)
declare i8* @llvm.frameaddress(i32)
define i8* @test1() {
- %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
+; CHECK-LABEL: test1:
+entry:
+ %X = call i8* @llvm.returnaddress( i32 0 )
+ ret i8* %X
+; CHECK: movl {{.*}}(%esp), %eax
}
define i8* @test2() {
- %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
+; CHECK-LABEL: test2:
+entry:
+ %X = call i8* @llvm.frameaddress( i32 0 )
+ ret i8* %X
+; CHECK: pushl %ebp
+; CHECK: popl %ebp
}
diff --git a/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll b/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll
index f986ebd35f85..d7f7e262d893 100644
--- a/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll
+++ b/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll
@@ -1,5 +1,10 @@
-; RUN: llc < %s -march=x86 | grep -i ESP | not grep sub
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+target triple = "i686-unknown-unknown"
define i32 @test(i32 %X) {
- ret i32 %X
+; CHECK-LABEL: test:
+entry:
+ ret i32 %X
+; CHECK-NOT: subl %esp
}
diff --git a/test/CodeGen/X86/2005-01-17-CycleInDAG.ll b/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
index 48236cd0c8fe..7bb634d97130 100644
--- a/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
+++ b/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
@@ -3,15 +3,18 @@
; is invalid code (there is no correct way to order the instruction). Check
; that we do not fold the load into the sub.
-; RUN: llc < %s -march=x86 | not grep sub.*GLOBAL
+; RUN: llc < %s -march=x86 | FileCheck %s
-@GLOBAL = external global i32 ; <i32*> [#uses=1]
+@GLOBAL = external global i32
define i32 @test(i32* %P1, i32* %P2, i32* %P3) nounwind {
- %L = load i32, i32* @GLOBAL ; <i32> [#uses=1]
- store i32 12, i32* %P2
- %Y = load i32, i32* %P3 ; <i32> [#uses=1]
- %Z = sub i32 %Y, %L ; <i32> [#uses=1]
- ret i32 %Z
+; CHECK-LABEL: test:
+entry:
+ %L = load i32, i32* @GLOBAL
+ store i32 12, i32* %P2
+ %Y = load i32, i32* %P3
+ %Z = sub i32 %Y, %L
+ ret i32 %Z
+; CHECK-NOT: {{sub.*GLOBAL}}
}
diff --git a/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll b/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
index a05fc840922f..1e3a0937d5b1 100644
--- a/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
+++ b/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 | not grep 18446744073709551612
+; RUN: llc < %s -march=x86 | FileCheck %s
@A = external global i32 ; <i32*> [#uses=1]
@Y = global i32* getelementptr (i32, i32* @A, i32 -1) ; <i32**> [#uses=0]
+; CHECK-NOT: 18446744073709551612
diff --git a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
index f6b5b2c103fe..48f5bc3e2986 100644
--- a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
+++ b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
@@ -1,20 +1,31 @@
-; RUN: llc < %s -march=x86 -mcpu=generic | \
-; RUN: grep shld | count 1
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
;
; Check that the isel does not fold the shld, which already folds a load
; and has two uses, into a store.
-@A = external global i32 ; <i32*> [#uses=2]
+target triple = "i686-unknown-unknown"
+
+@A = external global i32
define i32 @test5(i32 %B, i8 %C) {
- %tmp.1 = load i32, i32* @A ; <i32> [#uses=1]
- %shift.upgrd.1 = zext i8 %C to i32 ; <i32> [#uses=1]
- %tmp.2 = shl i32 %tmp.1, %shift.upgrd.1 ; <i32> [#uses=1]
- %tmp.3 = sub i8 32, %C ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %tmp.3 to i32 ; <i32> [#uses=1]
- %tmp.4 = lshr i32 %B, %shift.upgrd.2 ; <i32> [#uses=1]
- %tmp.5 = or i32 %tmp.4, %tmp.2 ; <i32> [#uses=2]
- store i32 %tmp.5, i32* @A
- ret i32 %tmp.5
+; CHECK-LABEL: test5:
+; CHECK: # BB#0:
+; CHECK-NEXT: movb {{[0-9]+}}(%esp), %cl
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movl A, %eax
+; CHECK-NEXT: shldl %cl, %edx, %eax
+; CHECK-NEXT: movl %eax, A
+; CHECK-NEXT: retl
+entry:
+ %tmp.1 = load i32, i32* @A
+ %shift.upgrd.1 = zext i8 %C to i32
+ %tmp.2 = shl i32 %tmp.1, %shift.upgrd.1
+ %tmp.3 = sub i8 32, %C
+ %shift.upgrd.2 = zext i8 %tmp.3 to i32
+ %tmp.4 = lshr i32 %B, %shift.upgrd.2
+ %tmp.5 = or i32 %tmp.4, %tmp.2
+ store i32 %tmp.5, i32* @A
+ ret i32 %tmp.5
}
diff --git a/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll b/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
index f8bf0991fb14..ca3eb9cda372 100644
--- a/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
+++ b/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
@@ -1,12 +1,24 @@
-; RUN: llc < %s -march=x86 | not grep "subl.*%esp"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s
define i32 @f(i32 %a, i32 %b) {
- %tmp.2 = mul i32 %a, %a ; <i32> [#uses=1]
- %tmp.5 = shl i32 %a, 1 ; <i32> [#uses=1]
- %tmp.6 = mul i32 %tmp.5, %b ; <i32> [#uses=1]
- %tmp.10 = mul i32 %b, %b ; <i32> [#uses=1]
- %tmp.7 = add i32 %tmp.10, %tmp.2 ; <i32> [#uses=1]
- %tmp.11 = add i32 %tmp.7, %tmp.6 ; <i32> [#uses=1]
- ret i32 %tmp.11
+; CHECK-LABEL: f:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: imull %edx, %edx
+; CHECK-NEXT: imull %eax, %ecx
+; CHECK-NEXT: imull %eax, %eax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: leal (%eax,%ecx,2), %eax
+; CHECK-NEXT: retl
+ %tmp.2 = mul i32 %a, %a
+ %tmp.5 = shl i32 %a, 1
+ %tmp.6 = mul i32 %tmp.5, %b
+ %tmp.10 = mul i32 %b, %b
+ %tmp.7 = add i32 %tmp.10, %tmp.2
+ %tmp.11 = add i32 %tmp.7, %tmp.6
+ ret i32 %tmp.11
}
diff --git a/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll b/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
index 7673124d5dda..6963b1d92f6c 100644
--- a/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
+++ b/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
@@ -1,13 +1,14 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=x86 -stats 2>&1 | \
-; RUN: grep asm-printer | grep 7
+; RUN: llc < %s -march=x86 -stats 2>&1 | FileCheck %s
+; CHECK: 7 asm-printer
define i32 @g(i32 %a, i32 %b) nounwind {
- %tmp.1 = shl i32 %b, 1 ; <i32> [#uses=1]
- %tmp.3 = add i32 %tmp.1, %a ; <i32> [#uses=1]
- %tmp.5 = mul i32 %tmp.3, %a ; <i32> [#uses=1]
- %tmp.8 = mul i32 %b, %b ; <i32> [#uses=1]
- %tmp.9 = add i32 %tmp.5, %tmp.8 ; <i32> [#uses=1]
- ret i32 %tmp.9
+entry:
+ %tmp.1 = shl i32 %b, 1
+ %tmp.3 = add i32 %tmp.1, %a
+ %tmp.5 = mul i32 %tmp.3, %a
+ %tmp.8 = mul i32 %b, %b
+ %tmp.9 = add i32 %tmp.5, %tmp.8
+ ret i32 %tmp.9
}
diff --git a/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll b/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
index f159bcdee134..645221fe299e 100644
--- a/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
+++ b/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -march=x86 -mtriple=i686-darwin | FileCheck %s
-; RUN: llc < %s -march=x86 -mtriple=i686-darwin -addr-sink-using-gep=1 | FileCheck %s
define void @foo(i8** %buf, i32 %size, i32 %col, i8* %p) nounwind {
entry:
diff --git a/test/CodeGen/X86/2008-02-14-BitMiscompile.ll b/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
index 259a3acd2db2..fdc1c3bb67ba 100644
--- a/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
+++ b/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s
define i32 @test(i1 %A) {
@@ -9,7 +9,6 @@ define i32 @test(i1 %A) {
; CHECK-NEXT: negl %eax
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
-;
%B = zext i1 %A to i32
%C = sub i32 0, %B
%D = and i32 %C, 255
diff --git a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
index fc5520e12ac0..24abb719b0f9 100644
--- a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
+++ b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
@@ -8,9 +8,10 @@ target triple = "i386-apple-darwin10.0.0"
@.str = internal constant [4 x i8] c"%p\0A\00" ; <[4 x i8]*> [#uses=1]
@llvm.used = appending global [1 x i8*] [i8* bitcast (i8* (%struct.S*, i32, %struct.S*)* @_Z4test1SiS_ to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-; Verify that %esi gets spilled before the call.
+; Verify that %s1 gets spilled before the call.
; CHECK: Z4test1SiS
-; CHECK: movl %esi,{{.*}}(%ebp)
+; CHECK: leal 8(%ebp), %[[reg:[^ ]*]]
+; CHECK: movl %[[reg]],{{.*}}(%ebp) ## 4-byte Spill
; CHECK: calll __Z6throwsv
define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
diff --git a/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll b/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
index ab9715d22377..66d3f3108ec4 100644
--- a/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
+++ b/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
@@ -1,36 +1,58 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; PR7814
-@g_16 = global i64 -3738643449681751625, align 8 ; <i64*> [#uses=1]
-@g_38 = global i32 0, align 4 ; <i32*> [#uses=2]
-@.str = private constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+@g_16 = global i64 -3738643449681751625, align 8
+@g_38 = global i32 0, align 4
+@.str = private constant [4 x i8] c"%d\0A\00"
define i32 @main() nounwind {
+; CHECK-LABEL: main:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cmpq $0, {{.*}}(%rip)
+; CHECK-NEXT: movb $-106, %al
+; CHECK-NEXT: jne .LBB0_2
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: .LBB0_2: # %entry
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: jle .LBB0_3
+; CHECK-NEXT: # BB#4: # %if.then
+; CHECK-NEXT: movl $1, {{.*}}(%rip)
+; CHECK-NEXT: movl $1, %esi
+; CHECK-NEXT: jmp .LBB0_5
+; CHECK-NEXT: .LBB0_3: # %entry.if.end_crit_edge
+; CHECK-NEXT: movl {{.*}}(%rip), %esi
+; CHECK-NEXT: .LBB0_5: # %if.end
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: movl $.L.str, %edi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: callq printf
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
entry:
- %tmp = load i64, i64* @g_16 ; <i64> [#uses=1]
- %not.lnot = icmp ne i64 %tmp, 0 ; <i1> [#uses=1]
- %conv = sext i1 %not.lnot to i64 ; <i64> [#uses=1]
- %and = and i64 %conv, 150 ; <i64> [#uses=1]
- %conv.i = trunc i64 %and to i8 ; <i8> [#uses=1]
- %cmp = icmp sgt i8 %conv.i, 0 ; <i1> [#uses=1]
+ %tmp = load i64, i64* @g_16
+ %not.lnot = icmp ne i64 %tmp, 0
+ %conv = sext i1 %not.lnot to i64
+ %and = and i64 %conv, 150
+ %conv.i = trunc i64 %and to i8
+ %cmp = icmp sgt i8 %conv.i, 0
br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
-; CHECK: andl $150
-; CHECK-NEXT: testb
-; CHECK-NEXT: jle
-
-entry.if.end_crit_edge: ; preds = %entry
- %tmp4.pre = load i32, i32* @g_38 ; <i32> [#uses=1]
+entry.if.end_crit_edge:
+ %tmp4.pre = load i32, i32* @g_38
br label %if.end
-if.then: ; preds = %entry
+if.then:
store i32 1, i32* @g_38
br label %if.end
-if.end: ; preds = %entry.if.end_crit_edge, %if.then
+if.end:
%tmp4 = phi i32 [ %tmp4.pre, %entry.if.end_crit_edge ], [ 1, %if.then ] ; <i32> [#uses=1]
%call5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %tmp4) nounwind ; <i32> [#uses=0]
ret i32 0
}
declare i32 @printf(i8* nocapture, ...) nounwind
+
diff --git a/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll b/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
index b7380196bd9b..54a7763eb696 100644
--- a/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
+++ b/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -combiner-alias-analysis -march=x86-64 -mcpu=core2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=core2 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.4"
diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index b52726962405..ba5de8eb5fcb 100644
--- a/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -1,23 +1,44 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefix=X64
; Make sure that we don't crash when legalizing vselect and vsetcc and that
; we are able to generate vector blend instructions.
-; CHECK-LABEL: simple_widen
-; CHECK-NOT: blend
-; CHECK: ret
define void @simple_widen(<2 x float> %a, <2 x float> %b) {
+; X32-LABEL: simple_widen:
+; X32: # BB#0: # %entry
+; X32-NEXT: extractps $1, %xmm1, (%eax)
+; X32-NEXT: movss %xmm1, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: simple_widen:
+; X64: # BB#0: # %entry
+; X64-NEXT: movlps %xmm1, (%rax)
+; X64-NEXT: retq
entry:
%0 = select <2 x i1> undef, <2 x float> %a, <2 x float> %b
store <2 x float> %0, <2 x float>* undef
ret void
}
-; CHECK-LABEL: complex_inreg_work
-; CHECK: blend
-; CHECK: ret
-
define void @complex_inreg_work(<2 x float> %a, <2 x float> %b) {
+; X32-LABEL: complex_inreg_work:
+; X32: # BB#0: # %entry
+; X32-NEXT: movaps %xmm0, %xmm2
+; X32-NEXT: cmpordps %xmm0, %xmm0
+; X32-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; X32-NEXT: extractps $1, %xmm1, (%eax)
+; X32-NEXT: movss %xmm1, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: complex_inreg_work:
+; X64: # BB#0: # %entry
+; X64-NEXT: movaps %xmm0, %xmm2
+; X64-NEXT: cmpordps %xmm0, %xmm0
+; X64-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; X64-NEXT: movlps %xmm1, (%rax)
+; X64-NEXT: retq
entry:
%0 = fcmp oeq <2 x float> undef, undef
%1 = select <2 x i1> %0, <2 x float> %a, <2 x float> %b
@@ -25,22 +46,67 @@ entry:
ret void
}
-; CHECK-LABEL: zero_test
-; CHECK: xorps %xmm0, %xmm0
-; CHECK: ret
-
define void @zero_test() {
+; X32-LABEL: zero_test:
+; X32: # BB#0: # %entry
+; X32-NEXT: pxor %xmm0, %xmm0
+; X32-NEXT: pextrd $1, %xmm0, (%eax)
+; X32-NEXT: movd %xmm0, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: zero_test:
+; X64: # BB#0: # %entry
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: movlps %xmm0, (%rax)
+; X64-NEXT: retq
entry:
%0 = select <2 x i1> undef, <2 x float> undef, <2 x float> zeroinitializer
store <2 x float> %0, <2 x float>* undef
ret void
}
-; CHECK-LABEL: full_test
-; CHECK: blend
-; CHECK: ret
-
define void @full_test() {
+; X32-LABEL: full_test:
+; X32: # BB#0: # %entry
+; X32-NEXT: subl $60, %esp
+; X32-NEXT: .Lcfi0:
+; X32-NEXT: .cfi_def_cfa_offset 64
+; X32-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; X32-NEXT: cvttps2dq %xmm2, %xmm0
+; X32-NEXT: cvtdq2ps %xmm0, %xmm1
+; X32-NEXT: xorps %xmm0, %xmm0
+; X32-NEXT: cmpltps %xmm2, %xmm0
+; X32-NEXT: movaps {{.*#+}} xmm3 = <1,1,u,u>
+; X32-NEXT: addps %xmm1, %xmm3
+; X32-NEXT: movaps %xmm1, %xmm4
+; X32-NEXT: blendvps %xmm0, %xmm3, %xmm4
+; X32-NEXT: cmpeqps %xmm2, %xmm1
+; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: blendvps %xmm0, %xmm2, %xmm4
+; X32-NEXT: extractps $1, %xmm4, {{[0-9]+}}(%esp)
+; X32-NEXT: movss %xmm4, {{[0-9]+}}(%esp)
+; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT: addl $60, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: full_test:
+; X64: # BB#0: # %entry
+; X64-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; X64-NEXT: cvttps2dq %xmm2, %xmm0
+; X64-NEXT: cvtdq2ps %xmm0, %xmm1
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: cmpltps %xmm2, %xmm0
+; X64-NEXT: movaps {{.*#+}} xmm3 = <1,1,u,u>
+; X64-NEXT: addps %xmm1, %xmm3
+; X64-NEXT: movaps %xmm1, %xmm4
+; X64-NEXT: blendvps %xmm0, %xmm3, %xmm4
+; X64-NEXT: cmpeqps %xmm2, %xmm1
+; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: blendvps %xmm0, %xmm2, %xmm4
+; X64-NEXT: movlps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movlps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-NEXT: retq
entry:
%Cy300 = alloca <4 x float>
%Cy11a = alloca <2 x float>
@@ -62,5 +128,3 @@ define void @full_test() {
store <2 x float> %8, <2 x float>* %Cy11a
ret void
}
-
-
diff --git a/test/CodeGen/X86/2011-10-21-widen-cmp.ll b/test/CodeGen/X86/2011-10-21-widen-cmp.ll
index 9e6e2f70b0a7..9232eba213bf 100644
--- a/test/CodeGen/X86/2011-10-21-widen-cmp.ll
+++ b/test/CodeGen/X86/2011-10-21-widen-cmp.ll
@@ -9,9 +9,7 @@ define void @cmp_2_floats(<2 x float> %a, <2 x float> %b) {
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movaps %xmm0, %xmm2
; CHECK-NEXT: cmpordps %xmm0, %xmm0
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
-; CHECK-NEXT: pslld $31, %xmm0
-; CHECK-NEXT: blendvps %xmm2, %xmm1
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
; CHECK-NEXT: movlps %xmm1, (%rax)
; CHECK-NEXT: retq
entry:
@@ -26,7 +24,7 @@ define void @cmp_2_doubles(<2 x double> %a, <2 x double> %b) {
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movapd %xmm0, %xmm2
; CHECK-NEXT: cmpordpd %xmm0, %xmm0
-; CHECK-NEXT: blendvpd %xmm2, %xmm1
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; CHECK-NEXT: movapd %xmm1, (%rax)
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/2011-11-30-or.ll b/test/CodeGen/X86/2011-11-30-or.ll
index 8378a022eab7..5c324a423923 100644
--- a/test/CodeGen/X86/2011-11-30-or.ll
+++ b/test/CodeGen/X86/2011-11-30-or.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.6.6"
; Test that the order of operands is correct
; CHECK: select_func
-; CHECK: pblendvb {{LCPI0_[0-9]*}}(%rip), %xmm1
+; CHECK: pblendvb %xmm0, {{LCPI0_[0-9]*}}(%rip), %xmm1
; CHECK: ret
define void @select_func(<8 x i16> %in) {
diff --git a/test/CodeGen/X86/2011-12-15-vec_shift.ll b/test/CodeGen/X86/2011-12-15-vec_shift.ll
index 4d49b3af88ee..70783509bb7f 100644
--- a/test/CodeGen/X86/2011-12-15-vec_shift.ll
+++ b/test/CodeGen/X86/2011-12-15-vec_shift.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-apple-macosx10.7"
define <16 x i8> @shift(<16 x i8> %a, <16 x i8> %b) nounwind {
; Make sure operands to pblend are in the right order.
; CHECK-W-SSE4: psllw $4, [[REG1:%xmm.]]
- ; CHECK-W-SSE4: pblendvb [[REG1]],{{ %xmm.}}
+ ; CHECK-W-SSE4: pblendvb %xmm0, [[REG1]],{{ %xmm.}}
; CHECK-W-SSE4: psllw $2
; Make sure we're masking and pcmp'ing the VSELECT conditon vector.
diff --git a/test/CodeGen/X86/2011-12-8-bitcastintprom.ll b/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
index 0cae34c9dfca..e2ccaa1b8378 100644
--- a/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
+++ b/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
@@ -18,7 +18,6 @@ define void @prom_bug(<4 x i8> %t, i16* %p) {
; SSE41-LABEL: prom_bug:
; SSE41: ## BB#0:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: pextrw $0, %xmm0, (%rdi)
; SSE41-NEXT: retq
%r = bitcast <4 x i8> %t to <2 x i16>
diff --git a/test/CodeGen/X86/2012-07-10-extload64.ll b/test/CodeGen/X86/2012-07-10-extload64.ll
index a366102fbd74..a41123e40a58 100644
--- a/test/CodeGen/X86/2012-07-10-extload64.ll
+++ b/test/CodeGen/X86/2012-07-10-extload64.ll
@@ -1,32 +1,42 @@
-; RUN: llc < %s -march=x86 -mcpu=corei7 -mtriple=i686-pc-win32 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-pc-win32 -mcpu=corei7 | FileCheck %s
-; CHECK: load_store
define void @load_store(<4 x i16>* %in) {
+; CHECK-LABEL: load_store:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; CHECK-NEXT: paddw %xmm0, %xmm0
+; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; CHECK-NEXT: movq %xmm0, (%eax)
+; CHECK-NEXT: retl
entry:
-; CHECK: pmovzxwd
%A27 = load <4 x i16>, <4 x i16>* %in, align 4
%A28 = add <4 x i16> %A27, %A27
-; CHECK: movq
store <4 x i16> %A28, <4 x i16>* %in, align 4
ret void
-; CHECK: ret
}
; Make sure that we store a 64bit value, even on 32bit systems.
-;CHECK-LABEL: store_64:
define void @store_64(<2 x i32>* %ptr) {
+; CHECK-LABEL: store_64:
+; CHECK: # BB#0: # %BB
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: movlps %xmm0, (%eax)
+; CHECK-NEXT: retl
BB:
store <2 x i32> zeroinitializer, <2 x i32>* %ptr
ret void
-;CHECK: movlps
-;CHECK: ret
}
-;CHECK-LABEL: load_64:
define <2 x i32> @load_64(<2 x i32>* %ptr) {
+; CHECK-LABEL: load_64:
+; CHECK: # BB#0: # %BB
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; CHECK-NEXT: retl
BB:
%t = load <2 x i32>, <2 x i32>* %ptr
ret <2 x i32> %t
-;CHECK: pmovzxdq
-;CHECK: ret
}
diff --git a/test/CodeGen/X86/2012-11-28-merge-store-alias.ll b/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
index c16deeff3d99..2e8206a75916 100644
--- a/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
+++ b/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
@@ -3,8 +3,8 @@
; CHECK: merge_stores_can
; CHECK: callq foo
; CHECK: xorps %xmm0, %xmm0
-; CHECK-NEXT: movl 36(%rsp), %ebp
; CHECK-NEXT: movups %xmm0
+; CHECK-NEXT: movl 36(%rsp), %ebp
; CHECK: callq foo
; CHECK: ret
declare i32 @foo([10 x i32]* )
diff --git a/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll b/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll
new file mode 100644
index 000000000000..9dff4e596caa
--- /dev/null
+++ b/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll
@@ -0,0 +1,60 @@
+; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck --check-prefix=CHECK %s
+
+declare x86_regcallcc i32 @callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0);
+
+; In RegCall calling convention, ESI and EDI are callee saved registers.
+; One might think that the caller could assume that ESI value is the same before
+; and after calling the callee.
+; However, RegCall also says that a register that was used for
+; passing/returning argumnets, can be assumed to be modified by the callee.
+; In other words, it is no longer a callee saved register.
+; In this case we want to see that EDX/ECX values are saved and EDI/ESI are assumed
+; to be modified by the callee.
+; This is a hipe CC function that doesn't save any register for the caller.
+; So we can be sure that there is no other reason to save EDX/ECX.
+; The caller arguments are expected to be passed (in the following order)
+; in registers: ESI, EBP, EAX, EDX and ECX.
+define cc 11 i32 @caller(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind {
+ %b1 = call x86_regcallcc i32 @callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0)
+ %b2 = add i32 %b1, %d0
+ %b3 = add i32 %b2, %e0
+ ret i32 %b3
+}
+; CHECK-LABEL: caller
+; CHECK: subl $12, %esp
+; CHECK-NEXT: movl %ecx, 8(%esp)
+; CHECK-NEXT: movl %edx, %ebx
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: movl %ebp, %ecx
+; CHECK-NEXT: movl %ebx, %edi
+; CHECK-NEXT: movl 8(%esp), %ebp
+; CHECK-NEXT: movl %ebp, %esi
+; CHECK-NEXT: calll callee
+; CHECK-NEXT: leal (%eax,%ebx), %esi
+; CHECK-NEXT: addl %ebp, %esi
+; CHECK-NEXT: addl $12, %esp
+; CHECK-NEXT: retl
+
+!hipe.literals = !{ !0, !1, !2 }
+!0 = !{ !"P_NSP_LIMIT", i32 120 }
+!1 = !{ !"X86_LEAF_WORDS", i32 24 }
+!2 = !{ !"AMD64_LEAF_WORDS", i32 18 }
+
+; Make sure that the callee doesn't save parameters that were passed as arguments.
+; The caller arguments are expected to be passed (in the following order)
+; in registers: EAX, ECX, EDX, EDI and ESI.
+; The result will return in EAX, ECX and EDX.
+define x86_regcallcc {i32, i32, i32} @test_callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind {
+ %b1 = mul i32 7, %e0
+ %b2 = udiv i32 5, %e0
+ %b3 = mul i32 7, %d0
+ %b4 = insertvalue {i32, i32, i32} undef, i32 %b1, 0
+ %b5 = insertvalue {i32, i32, i32} %b4, i32 %b2, 1
+ %b6 = insertvalue {i32, i32, i32} %b5, i32 %b3, 2
+ ret {i32, i32, i32} %b6
+}
+; CHECK-LABEL: test_callee
+; CHECK-NOT: pushl %esi
+; CHECK-NOT: pushl %edi
+; CHECK: retl
diff --git a/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir b/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir
new file mode 100644
index 000000000000..c4e5fb2d05fc
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/X86-regbankselect.mir
@@ -0,0 +1,634 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+
+--- |
+ ; ModuleID = 'tmp.ll'
+ source_filename = "tmp.ll"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64--linux-gnu"
+
+ define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
+ %ret = add i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
+ %ret = add i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
+ %ret = add i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
+ %ret = add i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+ define float @test_add_float(float %arg1, float %arg2) {
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_add_double(double %arg1, double %arg2) {
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+ }
+
+ define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %ret = add <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
+ %ret = fadd <4 x float> %arg1, %arg2
+ ret <4 x float> %ret
+ }
+
+ define i8 @test_load_i8(i8* %p1) {
+ %r = load i8, i8* %p1
+ ret i8 %r
+ }
+
+ define i16 @test_load_i16(i16* %p1) {
+ %r = load i16, i16* %p1
+ ret i16 %r
+ }
+
+ define i32 @test_load_i32(i32* %p1) {
+ %r = load i32, i32* %p1
+ ret i32 %r
+ }
+
+ define i64 @test_load_i64(i64* %p1) {
+ %r = load i64, i64* %p1
+ ret i64 %r
+ }
+
+ define float @test_load_float(float* %p1) {
+ %r = load float, float* %p1
+ ret float %r
+ }
+
+ define double @test_load_double(double* %p1) {
+ %r = load double, double* %p1
+ ret double %r
+ }
+
+ define <4 x i32> @test_load_v4i32(<4 x i32>* %p1) {
+ %r = load <4 x i32>, <4 x i32>* %p1, align 16
+ ret <4 x i32> %r
+ }
+
+ define i32* @test_store_i32(i32 %val, i32* %p1) {
+ store i32 %val, i32* %p1
+ ret i32* %p1
+ }
+
+ define i64* @test_store_i64(i64 %val, i64* %p1) {
+ store i64 %val, i64* %p1
+ ret i64* %p1
+ }
+
+ define float* @test_store_float(float %val, float* %p1) {
+ store float %val, float* %p1
+ ret float* %p1
+ }
+
+ define double* @test_store_double(double %val, double* %p1) {
+ store double %val, double* %p1
+ ret double* %p1
+ }
+
+ define void @constInt_check() {
+ ret void
+ }
+
+...
+---
+name: test_add_i8
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_i8
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+# CHECK: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_ADD %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_add_i16
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_i16
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+# CHECK: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_ADD %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_add_i32
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_i32
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+# CHECK: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_ADD %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_add_i64
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_i64
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+# CHECK: - { id: 2, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_ADD %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
+---
+name: test_add_float
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_float
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+# CHECK: - { id: 2, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FADD %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_double
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_double
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+# CHECK: - { id: 2, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FADD %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_v4i32
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_v4i32
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+# CHECK: - { id: 2, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_ADD %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_v4f32
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_v4f32
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+# CHECK: - { id: 2, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_FADD %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_load_i8
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_i8
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_load_i16
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_i16
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_load_i32
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_i32
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_load_i64
+alignment: 4
+exposesReturnsTwice: false
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_i64
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
+---
+name: test_load_float
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_float
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %xmm0 = COPY %1(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_load_double
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_double
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %xmm0 = COPY %1(s64)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_load_v4i32
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_load_v4i32
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
+ %xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_store_i32
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_store_i32
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %rsi
+
+ %0(s32) = COPY %edi
+ %1(p0) = COPY %rsi
+ G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+name: test_store_i64
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_store_i64
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(p0) = COPY %rsi
+ G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+name: test_store_float
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_store_float
+# CHECK: registers:
+
+# FAST-NEXT: - { id: 0, class: vecr }
+# FAST-NEXT: - { id: 1, class: gpr }
+# FAST-NEXT: - { id: 2, class: gpr }
+
+# GREEDY-NEXT: - { id: 0, class: vecr }
+# GREEDY-NEXT: - { id: 1, class: gpr }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(s32) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ ; CHECK: %1(p0) = COPY %rdi
+
+ ; FAST-NEXT: %2(s32) = COPY %0(s32)
+ ; FAST-NEXT: G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1)
+
+ ; GREEDY-NEXT: G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
+
+ G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+name: test_store_double
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK-LABEL: name: test_store_double
+# CHECK: registers:
+
+# FAST-NEXT: - { id: 0, class: vecr }
+# FAST-NEXT: - { id: 1, class: gpr }
+# FAST-NEXT: - { id: 2, class: gpr }
+
+# GREEDY-NEXT: - { id: 0, class: vecr }
+# GREEDY-NEXT: - { id: 1, class: gpr }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(s64) = COPY %xmm0
+ %1(p0) = COPY %rdi
+
+ ; CHECK: %1(p0) = COPY %rdi
+
+ ; FAST-NEXT: %2(s64) = COPY %0(s64)
+ ; FAST-NEXT: G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1)
+
+ ; GREEDY-NEXT: G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
+
+ G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+name: constInt_check
+alignment: 4
+legalized: true
+# CHECK-LABEL: name: constInt_check
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0 (%ir-block.0):
+ %0(s8) = G_CONSTANT i8 8
+ %1(s16) = G_CONSTANT i16 16
+ %2(s32) = G_CONSTANT i32 32
+ %3(s64) = G_CONSTANT i64 64
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/binop-isel.ll b/test/CodeGen/X86/GlobalISel/binop-isel.ll
new file mode 100644
index 000000000000..8499dd958447
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/binop-isel.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL
+
+define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_add_i64:
+; ALL: # BB#0:
+; ALL-NEXT: leaq (%rsi,%rdi), %rax
+; ALL-NEXT: retq
+ %ret = add i64 %arg1, %arg2
+ ret i64 %ret
+}
+
+define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_add_i32:
+; ALL: # BB#0:
+; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ALL-NEXT: leal (%rsi,%rdi), %eax
+; ALL-NEXT: retq
+ %ret = add i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_sub_i64:
+; ALL: # BB#0:
+; ALL-NEXT: subq %rsi, %rdi
+; ALL-NEXT: movq %rdi, %rax
+; ALL-NEXT: retq
+ %ret = sub i64 %arg1, %arg2
+ ret i64 %ret
+}
+
+define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_sub_i32:
+; ALL: # BB#0:
+; ALL-NEXT: subl %esi, %edi
+; ALL-NEXT: movl %edi, %eax
+; ALL-NEXT: retq
+ %ret = sub i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define float @test_add_float(float %arg1, float %arg2) {
+; SSE-LABEL: test_add_float:
+; SSE: # BB#0:
+; SSE-NEXT: addss %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_add_float:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_add_double(double %arg1, double %arg2) {
+; SSE-LABEL: test_add_double:
+; SSE: # BB#0:
+; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_add_double:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+}
+
+define float @test_sub_float(float %arg1, float %arg2) {
+; SSE-LABEL: test_sub_float:
+; SSE: # BB#0:
+; SSE-NEXT: subss %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_sub_float:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_sub_double(double %arg1, double %arg2) {
+; SSE-LABEL: test_sub_double:
+; SSE: # BB#0:
+; SSE-NEXT: subsd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_sub_double:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+}
+
+define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+; SSE-LABEL: test_add_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_add_v4i32:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = add <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+}
+
+define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+; SSE-LABEL: test_sub_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: psubd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_sub_v4i32:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = sub <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+}
+
+define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
+; SSE-LABEL: test_add_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_add_v4f32:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = fadd <4 x float> %arg1, %arg2
+ ret <4 x float> %ret
+}
+
+define <4 x float> @test_sub_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
+; SSE-LABEL: test_sub_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: subps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_sub_v4f32:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vsubps %xmm1, %xmm0, %xmm0
+; ALL_AVX-NEXT: retq
+ %ret = fsub <4 x float> %arg1, %arg2
+ ret <4 x float> %ret
+}
+
+define i32 @test_copy_float(float %val) {
+; SSE-LABEL: test_copy_float:
+; SSE: # BB#0:
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_copy_float:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vmovd %xmm0, %eax
+; ALL_AVX-NEXT: retq
+ %r = bitcast float %val to i32
+ ret i32 %r
+}
+
+define float @test_copy_i32(i32 %val) {
+; SSE-LABEL: test_copy_i32:
+; SSE: # BB#0:
+; SSE-NEXT: movd %edi, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_copy_i32:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vmovd %edi, %xmm0
+; ALL_AVX-NEXT: retq
+ %r = bitcast i32 %val to float
+ ret float %r
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/constant.ll b/test/CodeGen/X86/GlobalISel/constant.ll
new file mode 100644
index 000000000000..cab043a51f05
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/constant.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+define i8 @const_i8() {
+; ALL-LABEL: const_i8:
+; ALL: # BB#0:
+; ALL-NEXT: movb $2, %al
+; ALL-NEXT: retq
+ ret i8 2
+}
+
+define i16 @const_i16() {
+; ALL-LABEL: const_i16:
+; ALL: # BB#0:
+; ALL-NEXT: movw $3, %ax
+; ALL-NEXT: retq
+ ret i16 3
+}
+
+define i32 @const_i32() {
+; ALL-LABEL: const_i32:
+; ALL: # BB#0:
+; ALL-NEXT: movl $4, %eax
+; ALL-NEXT: retq
+ ret i32 4
+}
+
+define i64 @const_i64() {
+; ALL-LABEL: const_i64:
+; ALL: # BB#0:
+; ALL-NEXT: movabsq $68719476720, %rax # imm = 0xFFFFFFFF0
+; ALL-NEXT: retq
+ ret i64 68719476720
+}
+
+;i64 value fit into u32
+define i64 @const_i64_u32() {
+; ALL-LABEL: const_i64_u32:
+; ALL: # BB#0:
+; ALL-NEXT: movq $1879048192, %rax # imm = 0x70000000
+; ALL-NEXT: retq
+ ret i64 1879048192
+}
+
+;i64 value fit into i32
+define i64 @const_i64_i32() {
+; ALL-LABEL: const_i64_i32:
+; ALL: # BB#0:
+; ALL-NEXT: movq $-1, %rax
+; ALL-NEXT: retq
+ ret i64 -1
+}
+
+
diff --git a/test/CodeGen/X86/GlobalISel/frameIndex-instructionselect.mir b/test/CodeGen/X86/GlobalISel/frameIndex-instructionselect.mir
new file mode 100644
index 000000000000..2fa9ac23a7af
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/frameIndex-instructionselect.mir
@@ -0,0 +1,36 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ABI
+
+--- |
+ define i32* @allocai32() {
+ %ptr1 = alloca i32
+ ret i32* %ptr1
+ }
+
+...
+---
+name: allocai32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: allocai32
+# CHECK: registers:
+# CHECK-X32: - { id: 0, class: gr32 }
+# CHECK-X32ABI: - { id: 0, class: gr32 }
+# CHECK-X64: - { id: 0, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+stack:
+ - { id: 0, name: ptr1, offset: 0, size: 4, alignment: 4 }
+
+# CHECK-X32: %0 = LEA32r %stack.0.ptr1, 1, _, 0, _
+# CHECK-X32ABI: %0 = LEA64_32r %stack.0.ptr1, 1, _, 0, _
+# CHECK-X64: %0 = LEA64r %stack.0.ptr1, 1, _, 0, _
+body: |
+ bb.1 (%ir-block.0):
+ %0(p0) = G_FRAME_INDEX %stack.0.ptr1
+ %eax = COPY %0(p0)
+ RET 0, implicit %eax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/frameIndex.ll b/test/CodeGen/X86/GlobalISel/frameIndex.ll
new file mode 100644
index 000000000000..2bb11adcc3b5
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/frameIndex.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=i386-linux-gnu < %s -o - | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel < %s -o - | FileCheck %s --check-prefix=X32ABI
+; RUN: llc -mtriple=x86_64-linux-gnux32 < %s -o - | FileCheck %s --check-prefix=X32ABI
+
+define i32* @allocai32() {
+; X64-LABEL: allocai32:
+; X64: # BB#0:
+; X64-NEXT: leaq -4(%rsp), %rax
+; X64-NEXT: retq
+;
+; X32-LABEL: allocai32:
+; X32: # BB#0:
+; X32-NEXT: pushl %eax
+; X32-NEXT: .Lcfi0:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: movl %esp, %eax
+; X32-NEXT: popl %ecx
+; X32-NEXT: retl
+;
+; X32ABI-LABEL: allocai32:
+; X32ABI: # BB#0:
+; X32ABI-NEXT: leal -4(%rsp), %eax
+; X32ABI-NEXT: retq
+ %ptr1 = alloca i32
+ ret i32* %ptr1
+}
diff --git a/test/CodeGen/X86/GlobalISel/irtranslator-call.ll b/test/CodeGen/X86/GlobalISel/irtranslator-call.ll
index 425d2609380e..c1bf44417666 100644
--- a/test/CodeGen/X86/GlobalISel/irtranslator-call.ll
+++ b/test/CodeGen/X86/GlobalISel/irtranslator-call.ll
@@ -5,6 +5,7 @@ define void @test_void_return() {
; CHECK-LABEL: name: test_void_return
; CHECK: alignment: 4
; CHECK-NEXT: exposesReturnsTwice: false
+; CHECK-NEXT: noVRegs: false
; CHECK-NEXT: legalized: false
; CHECK-NEXT: regBankSelected: false
; CHECK-NEXT: selected: false
diff --git a/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
new file mode 100644
index 000000000000..616cb70652bb
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -0,0 +1,310 @@
+; RUN: llc -mtriple=i386-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+@a1_8bit = external global i8
+@a7_8bit = external global i8
+@a8_8bit = external global i8
+
+define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4,
+ i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
+
+; ALL-LABEL: name: test_i8_args_8
+
+; X64: fixedStack:
+; X64: id: [[STACK8:[0-9]+]], offset: 8, size: 1, alignment: 8, isImmutable: true, isAliased: false
+; X64: id: [[STACK0:[0-9]+]], offset: 0, size: 1, alignment: 16, isImmutable: true, isAliased: false
+; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d
+; X64: [[ARG1:%[0-9]+]](s8) = COPY %edi
+; X64-NEXT: %{{[0-9]+}}(s8) = COPY %esi
+; X64-NEXT: %{{[0-9]+}}(s8) = COPY %edx
+; X64-NEXT: %{{[0-9]+}}(s8) = COPY %ecx
+; X64-NEXT: %{{[0-9]+}}(s8) = COPY %r8d
+; X64-NEXT: %{{[0-9]+}}(s8) = COPY %r9d
+; X64-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X64-NEXT: [[ARG7:%[0-9]+]](s8) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK0]], align 0)
+; X64-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
+; X64-NEXT: [[ARG8:%[0-9]+]](s8) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK8]], align 0)
+
+; X32: fixedStack:
+; X32: id: [[STACK28:[0-9]+]], offset: 28, size: 1, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK24:[0-9]+]], offset: 24, size: 1, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK20:[0-9]+]], offset: 20, size: 1, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK16:[0-9]+]], offset: 16, size: 1, alignment: 16, isImmutable: true, isAliased: false }
+; X32: id: [[STACK12:[0-9]+]], offset: 12, size: 1, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK8:[0-9]+]], offset: 8, size: 1, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK4:[0-9]+]], offset: 4, size: 1, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 1, alignment: 16, isImmutable: true, isAliased: false }
+; X32: [[ARG1_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X32-NEXT: [[ARG1:%[0-9]+]](s8) = G_LOAD [[ARG1_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK0]], align 0)
+; X32-NEXT: [[ARG2_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]]
+; X32-NEXT: [[ARG2:%[0-9]+]](s8) = G_LOAD [[ARG2_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK4]], align 0)
+; X32-NEXT: [[ARG3_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
+; X32-NEXT: [[ARG3:%[0-9]+]](s8) = G_LOAD [[ARG3_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK8]], align 0)
+; X32-NEXT: [[ARG4_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK12]]
+; X32-NEXT: [[ARG4:%[0-9]+]](s8) = G_LOAD [[ARG4_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK12]], align 0)
+; X32-NEXT: [[ARG5_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK16]]
+; X32-NEXT: [[ARG5:%[0-9]+]](s8) = G_LOAD [[ARG5_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK16]], align 0)
+; X32-NEXT: [[ARG6_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK20]]
+; X32-NEXT: [[ARG6:%[0-9]+]](s8) = G_LOAD [[ARG6_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK20]], align 0)
+; X32-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK24]]
+; X32-NEXT: [[ARG7:%[0-9]+]](s8) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK24]], align 0)
+; X32-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK28]]
+; X32-NEXT: [[ARG8:%[0-9]+]](s8) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK28]], align 0)
+
+; ALL-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_8bit
+; ALL-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_8bit
+; ALL-NEXT: [[GADDR_A8:%[0-9]+]](p0) = G_GLOBAL_VALUE @a8_8bit
+; ALL-NEXT: G_STORE [[ARG1]](s8), [[GADDR_A1]](p0) :: (store 1 into @a1_8bit)
+; ALL-NEXT: G_STORE [[ARG7]](s8), [[GADDR_A7]](p0) :: (store 1 into @a7_8bit)
+; ALL-NEXT: G_STORE [[ARG8]](s8), [[GADDR_A8]](p0) :: (store 1 into @a8_8bit)
+; ALL-NEXT: %al = COPY [[ARG1]](s8)
+; ALL-NEXT: RET 0, implicit %al
+
+entry:
+ store i8 %arg1, i8* @a1_8bit
+ store i8 %arg7, i8* @a7_8bit
+ store i8 %arg8, i8* @a8_8bit
+ ret i8 %arg1
+}
+
+@a1_32bit = external global i32
+@a7_32bit = external global i32
+@a8_32bit = external global i32
+
+define i32 @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4,
+ i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) {
+
+; ALL-LABEL: name: test_i32_args_8
+
+; X64: fixedStack:
+; X64: id: [[STACK8:[0-9]+]], offset: 8, size: 4, alignment: 8, isImmutable: true, isAliased: false
+; X64: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false
+; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d
+; X64: [[ARG1:%[0-9]+]](s32) = COPY %edi
+; X64-NEXT: %{{[0-9]+}}(s32) = COPY %esi
+; X64-NEXT: %{{[0-9]+}}(s32) = COPY %edx
+; X64-NEXT: %{{[0-9]+}}(s32) = COPY %ecx
+; X64-NEXT: %{{[0-9]+}}(s32) = COPY %r8d
+; X64-NEXT: %{{[0-9]+}}(s32) = COPY %r9d
+; X64-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X64-NEXT: [[ARG7:%[0-9]+]](s32) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0)
+; X64-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
+; X64-NEXT: [[ARG8:%[0-9]+]](s32) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK8]], align 0)
+
+; X32: fixedStack:
+; X32: id: [[STACK28:[0-9]+]], offset: 28, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK24:[0-9]+]], offset: 24, size: 4, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK20:[0-9]+]], offset: 20, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK16:[0-9]+]], offset: 16, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+; X32: id: [[STACK12:[0-9]+]], offset: 12, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK8:[0-9]+]], offset: 8, size: 4, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK4:[0-9]+]], offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+; X32: [[ARG1_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X32-NEXT: [[ARG1:%[0-9]+]](s32) = G_LOAD [[ARG1_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0)
+; X32-NEXT: [[ARG2_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]]
+; X32-NEXT: [[ARG2:%[0-9]+]](s32) = G_LOAD [[ARG2_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK4]], align 0)
+; X32-NEXT: [[ARG3_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
+; X32-NEXT: [[ARG3:%[0-9]+]](s32) = G_LOAD [[ARG3_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK8]], align 0)
+; X32-NEXT: [[ARG4_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK12]]
+; X32-NEXT: [[ARG4:%[0-9]+]](s32) = G_LOAD [[ARG4_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK12]], align 0)
+; X32-NEXT: [[ARG5_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK16]]
+; X32-NEXT: [[ARG5:%[0-9]+]](s32) = G_LOAD [[ARG5_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK16]], align 0)
+; X32-NEXT: [[ARG6_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK20]]
+; X32-NEXT: [[ARG6:%[0-9]+]](s32) = G_LOAD [[ARG6_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK20]], align 0)
+; X32-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK24]]
+; X32-NEXT: [[ARG7:%[0-9]+]](s32) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK24]], align 0)
+; X32-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK28]]
+; X32-NEXT: [[ARG8:%[0-9]+]](s32) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK28]], align 0)
+
+; ALL-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_32bit
+; ALL-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_32bit
+; ALL-NEXT: [[GADDR_A8:%[0-9]+]](p0) = G_GLOBAL_VALUE @a8_32bit
+; ALL-NEXT: G_STORE [[ARG1]](s32), [[GADDR_A1]](p0) :: (store 4 into @a1_32bit)
+; ALL-NEXT: G_STORE [[ARG7]](s32), [[GADDR_A7]](p0) :: (store 4 into @a7_32bit)
+; ALL-NEXT: G_STORE [[ARG8]](s32), [[GADDR_A8]](p0) :: (store 4 into @a8_32bit)
+; ALL-NEXT: %eax = COPY [[ARG1]](s32)
+; ALL-NEXT: RET 0, implicit %eax
+
+entry:
+ store i32 %arg1, i32* @a1_32bit
+ store i32 %arg7, i32* @a7_32bit
+ store i32 %arg8, i32* @a8_32bit
+ ret i32 %arg1
+}
+
+@a1_64bit = external global i64
+@a7_64bit = external global i64
+@a8_64bit = external global i64
+
+define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4,
+ i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
+
+; ALL-LABEL: name: test_i64_args_8
+; X64: fixedStack:
+; X64: id: [[STACK8:[0-9]+]], offset: 8, size: 8, alignment: 8, isImmutable: true, isAliased: false
+; X64: id: [[STACK0:[0-9]+]], offset: 0, size: 8, alignment: 16, isImmutable: true, isAliased: false
+; X64: liveins: %rcx, %rdi, %rdx, %rsi, %r8, %r9
+; X64: [[ARG1:%[0-9]+]](s64) = COPY %rdi
+; X64-NEXT: %{{[0-9]+}}(s64) = COPY %rsi
+; X64-NEXT: %{{[0-9]+}}(s64) = COPY %rdx
+; X64-NEXT: %{{[0-9]+}}(s64) = COPY %rcx
+; X64-NEXT: %{{[0-9]+}}(s64) = COPY %r8
+; X64-NEXT: %{{[0-9]+}}(s64) = COPY %r9
+; X64-NEXT: [[ARG7_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X64-NEXT: [[ARG7:%[0-9]+]](s64) = G_LOAD [[ARG7_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK0]], align 0)
+; X64-NEXT: [[ARG8_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
+; X64-NEXT: [[ARG8:%[0-9]+]](s64) = G_LOAD [[ARG8_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK8]], align 0)
+
+; X32: fixedStack:
+; X32: id: [[STACK60:[0-9]+]], offset: 60, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK56:[0-9]+]], offset: 56, size: 4, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK52:[0-9]+]], offset: 52, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK48:[0-9]+]], offset: 48, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+; X32: id: [[STACK44:[0-9]+]], offset: 44, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK40:[0-9]+]], offset: 40, size: 4, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK36:[0-9]+]], offset: 36, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK32:[0-9]+]], offset: 32, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+; X32: id: [[STACK28:[0-9]+]], offset: 28, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK24:[0-9]+]], offset: 24, size: 4, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK20:[0-9]+]], offset: 20, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK16:[0-9]+]], offset: 16, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+; X32: id: [[STACK12:[0-9]+]], offset: 12, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK8:[0-9]+]], offset: 8, size: 4, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK4:[0-9]+]], offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+
+; X32: [[ARG1L_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X32-NEXT: [[ARG1L:%[0-9]+]](s32) = G_LOAD [[ARG1L_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0)
+; X32-NEXT: [[ARG1H_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]]
+; X32-NEXT: [[ARG1H:%[0-9]+]](s32) = G_LOAD [[ARG1H_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK4]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK8]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK12]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK12]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK16]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK16]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK20]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK20]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK24]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK24]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK28]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK28]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK32]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK32]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK36]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK36]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK40]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK40]], align 0)
+; X32-NEXT: %{{[0-9]+}}(p0) = G_FRAME_INDEX %fixed-stack.[[STACK44]]
+; X32-NEXT: %{{[0-9]+}}(s32) = G_LOAD %{{[0-9]+}}(p0) :: (invariant load 4 from %fixed-stack.[[STACK44]], align 0)
+; X32-NEXT: [[ARG7L_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK48]]
+; X32-NEXT: [[ARG7L:%[0-9]+]](s32) = G_LOAD [[ARG7L_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK48]], align 0)
+; X32-NEXT: [[ARG7H_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK52]]
+; X32-NEXT: [[ARG7H:%[0-9]+]](s32) = G_LOAD [[ARG7H_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK52]], align 0)
+; X32-NEXT: [[ARG8L_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK56]]
+; X32-NEXT: [[ARG8L:%[0-9]+]](s32) = G_LOAD [[ARG8L_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK56]], align 0)
+; X32-NEXT: [[ARG8H_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK60]]
+; X32-NEXT: [[ARG8H:%[0-9]+]](s32) = G_LOAD [[ARG8H_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK60]], align 0)
+
+; X32-NEXT: [[UNDEF:%[0-9]+]](s64) = IMPLICIT_DEF
+; X32-NEXT: [[ARG1_TMP0:%[0-9]+]](s64) = G_INSERT [[UNDEF]], [[ARG1L]](s32), 0
+; X32-NEXT: [[ARG1_TMP1:%[0-9]+]](s64) = G_INSERT [[ARG1_TMP0]], [[ARG1H]](s32), 32
+; X32-NEXT: [[ARG1:%[0-9]+]](s64) = COPY [[ARG1_TMP1]]
+ ; ... a bunch more that we don't track ...
+ ; X32: IMPLICIT_DEF
+ ; X32: IMPLICIT_DEF
+ ; X32: IMPLICIT_DEF
+ ; X32: IMPLICIT_DEF
+ ; X32: IMPLICIT_DEF
+; X32: [[UNDEF:%[0-9]+]](s64) = IMPLICIT_DEF
+; X32-NEXT: [[ARG7_TMP0:%[0-9]+]](s64) = G_INSERT [[UNDEF]], [[ARG7L]](s32), 0
+; X32-NEXT: [[ARG7_TMP1:%[0-9]+]](s64) = G_INSERT [[ARG7_TMP0]], [[ARG7H]](s32), 32
+; X32-NEXT: [[ARG7:%[0-9]+]](s64) = COPY [[ARG7_TMP1]]
+; X32-NEXT: [[UNDEF:%[0-9]+]](s64) = IMPLICIT_DEF
+; X32-NEXT: [[ARG8_TMP0:%[0-9]+]](s64) = G_INSERT [[UNDEF]], [[ARG8L]](s32), 0
+; X32-NEXT: [[ARG8_TMP1:%[0-9]+]](s64) = G_INSERT [[ARG8_TMP0]], [[ARG8H]](s32), 32
+; X32-NEXT: [[ARG8:%[0-9]+]](s64) = COPY [[ARG8_TMP1]]
+
+; ALL-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_64bit
+; ALL-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_64bit
+; ALL-NEXT: [[GADDR_A8:%[0-9]+]](p0) = G_GLOBAL_VALUE @a8_64bit
+; ALL-NEXT: G_STORE [[ARG1]](s64), [[GADDR_A1]](p0) :: (store 8 into @a1_64bit
+; ALL-NEXT: G_STORE [[ARG7]](s64), [[GADDR_A7]](p0) :: (store 8 into @a7_64bit
+; ALL-NEXT: G_STORE [[ARG8]](s64), [[GADDR_A8]](p0) :: (store 8 into @a8_64bit
+
+; X64-NEXT: %rax = COPY [[ARG1]](s64)
+; X64-NEXT: RET 0, implicit %rax
+
+; X32-NEXT: [[RETL:%[0-9]+]](s32) = G_EXTRACT [[ARG1:%[0-9]+]](s64), 0
+; X32-NEXT: [[RETH:%[0-9]+]](s32) = G_EXTRACT [[ARG1:%[0-9]+]](s64), 32
+; X32-NEXT: %eax = COPY [[RETL:%[0-9]+]](s32)
+; X32-NEXT: %edx = COPY [[RETH:%[0-9]+]](s32)
+; X32-NEXT: RET 0, implicit %eax, implicit %edx
+
+entry:
+ store i64 %arg1, i64* @a1_64bit
+ store i64 %arg7, i64* @a7_64bit
+ store i64 %arg8, i64* @a8_64bit
+ ret i64 %arg1
+}
+
+define float @test_float_args(float %arg1, float %arg2) {
+; ALL-LABEL:name: test_float_args
+
+; X64: liveins: %xmm0, %xmm1
+; X64: [[ARG1:%[0-9]+]](s32) = COPY %xmm0
+; X64-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %xmm1
+; X64-NEXT: %xmm0 = COPY [[ARG2:%[0-9]+]](s32)
+; X64-NEXT: RET 0, implicit %xmm0
+
+; X32: fixedStack:
+; X32: id: [[STACK4:[0-9]+]], offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
+; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+; X32: [[ARG1_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X32-NEXT: [[ARG1:%[0-9]+]](s32) = G_LOAD [[ARG1_ADDR:%[0-9]+]](p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0)
+; X32-NEXT: [[ARG2_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]]
+; X32-NEXT: [[ARG2:%[0-9]+]](s32) = G_LOAD [[ARG2_ADDR:%[0-9]+]](p0) :: (invariant load 4 from %fixed-stack.[[STACK4]], align 0)
+; X32-NEXT: %fp0 = COPY [[ARG2:%[0-9]+]](s32)
+; X32-NEXT: RET 0, implicit %fp0
+
+ ret float %arg2
+}
+
+define double @test_double_args(double %arg1, double %arg2) {
+; ALL-LABEL:name: test_double_args
+; X64: liveins: %xmm0, %xmm1
+; X64: [[ARG1:%[0-9]+]](s64) = COPY %xmm0
+; X64-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %xmm1
+; X64-NEXT: %xmm0 = COPY [[ARG2:%[0-9]+]](s64)
+; X64-NEXT: RET 0, implicit %xmm0
+
+; X32: fixedStack:
+; X32: id: [[STACK4:[0-9]+]], offset: 8, size: 8, alignment: 8, isImmutable: true, isAliased: false }
+; X32: id: [[STACK0:[0-9]+]], offset: 0, size: 8, alignment: 16, isImmutable: true, isAliased: false }
+; X32: [[ARG1_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+; X32-NEXT: [[ARG1:%[0-9]+]](s64) = G_LOAD [[ARG1_ADDR:%[0-9]+]](p0) :: (invariant load 8 from %fixed-stack.[[STACK0]], align 0)
+; X32-NEXT: [[ARG2_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK4]]
+; X32-NEXT: [[ARG2:%[0-9]+]](s64) = G_LOAD [[ARG2_ADDR:%[0-9]+]](p0) :: (invariant load 8 from %fixed-stack.[[STACK4]], align 0)
+; X32-NEXT: %fp0 = COPY [[ARG2:%[0-9]+]](s64)
+; X32-NEXT: RET 0, implicit %fp0
+
+ ret double %arg2
+}
+
+define i32 * @test_memop_i32(i32 * %p1) {
+; ALL-LABEL:name: test_memop_i32
+;X64 liveins: %rdi
+;X64: %0(p0) = COPY %rdi
+;X64-NEXT: %rax = COPY %0(p0)
+;X64-NEXT: RET 0, implicit %rax
+
+;X32: fixedStack:
+;X32: id: [[STACK0:[0-9]+]], offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+;X32: %1(p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
+;X32-NEXT: %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.[[STACK0]], align 0)
+;X32-NEXT: %eax = COPY %0(p0)
+;X32-NEXT: RET 0, implicit %eax
+
+ ret i32 * %p1;
+} \ No newline at end of file
diff --git a/test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll b/test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll
new file mode 100644
index 000000000000..e2d938550aea
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/irtranslator-callingconv_64bit.ll
@@ -0,0 +1,29 @@
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
+; X64: name: test_v4i32_args
+; X64: liveins: %xmm0, %xmm1
+; X64: [[ARG1:%[0-9]+]](<4 x s32>) = COPY %xmm0
+; X64-NEXT: [[ARG2:%[0-9]+]](<4 x s32>) = COPY %xmm1
+; X64-NEXT: %xmm0 = COPY [[ARG2:%[0-9]+]](<4 x s32>)
+; X64-NEXT: RET 0, implicit %xmm0
+ ret <4 x i32> %arg2
+}
+
+define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
+; X64: name: test_v8i32_args
+; X64: liveins: %xmm0, %xmm1
+; X64: [[ARG1L:%[0-9]+]](<4 x s32>) = COPY %xmm0
+; X64-NEXT: [[ARG1H:%[0-9]+]](<4 x s32>) = COPY %xmm1
+; X64-NEXT: [[UNDEF:%[0-9]+]](<8 x s32>) = IMPLICIT_DEF
+; X64-NEXT: [[ARG1_TMP0:%[0-9]+]](<8 x s32>) = G_INSERT [[UNDEF]], [[ARG1L]](<4 x s32>), 0
+; X64-NEXT: [[ARG1_TMP1:%[0-9]+]](<8 x s32>) = G_INSERT [[ARG1_TMP0]], [[ARG1H]](<4 x s32>), 128
+; X64-NEXT: [[ARG1:%[0-9]+]](<8 x s32>) = COPY [[ARG1_TMP1]]
+; X64-NEXT: [[RETL:%[0-9]+]](<4 x s32>) = G_EXTRACT [[ARG1:%[0-9]+]](<8 x s32>), 0
+; X64-NEXT: [[RETH:%[0-9]+]](<4 x s32>) = G_EXTRACT [[ARG1:%[0-9]+]](<8 x s32>), 128
+; X64-NEXT: %xmm0 = COPY [[RETL:%[0-9]+]](<4 x s32>)
+; X64-NEXT: %xmm1 = COPY [[RETH:%[0-9]+]](<4 x s32>)
+; X64-NEXT: RET 0, implicit %xmm0, implicit %xmm1
+
+ ret <8 x i32> %arg1
+}
diff --git a/test/CodeGen/X86/GlobalISel/legalize-add.mir b/test/CodeGen/X86/GlobalISel/legalize-add.mir
new file mode 100644
index 000000000000..22619cc71033
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-add.mir
@@ -0,0 +1,40 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64--linux-gnu"
+
+ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
+ %ret = add i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+...
+---
+name: test_add_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+ ; CHECK-LABEL: name: test_add_i32
+ ; CHECK: [[VAL1:%.*]](s32) = COPY %edi
+ ; CHECK: [[VAL2:%.*]](s32) = COPY %esi
+ ; CHECK: [[RES:%.*]](s32) = G_ADD [[VAL1:%.*]], [[VAL2:%.*]]
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_ADD %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-const.mir b/test/CodeGen/X86/GlobalISel/legalize-const.mir
new file mode 100644
index 000000000000..612d33a77fc9
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-const.mir
@@ -0,0 +1,43 @@
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+--- |
+ define void @constInt_check() {
+ ret void
+ }
+
+...
+---
+name: constInt_check
+# ALL-LABEL: name: constInt_check
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ ; ALL: %5(s8) = G_CONSTANT i8 -1
+ ; ALL: %0(s1) = G_TRUNC %5(s8)
+ %0(s1) = G_CONSTANT i1 1
+
+ ; ALL: %1(s8) = G_CONSTANT i8 8
+ %1(s8) = G_CONSTANT i8 8
+
+ ; ALL: %2(s16) = G_CONSTANT i16 16
+ %2(s16) = G_CONSTANT i16 16
+
+ ; ALL: %3(s32) = G_CONSTANT i32 32
+ %3(s32) = G_CONSTANT i32 32
+
+ ; X64: %4(s64) = G_CONSTANT i64 64
+
+ ; X32: %6(s32) = G_CONSTANT i32 64
+ ; X32: %7(s32) = G_CONSTANT i32 0
+ ; X32: %4(s64) = G_MERGE_VALUES %6(s32), %7(s32)
+ %4(s64) = G_CONSTANT i64 64
+
+ RET 0
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub.mir b/test/CodeGen/X86/GlobalISel/legalize-sub.mir
new file mode 100644
index 000000000000..26ef285929a6
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-sub.mir
@@ -0,0 +1,40 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64--linux-gnu"
+
+ define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
+ %ret = sub i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+...
+---
+name: test_sub_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+ ; CHECK-LABEL: name: test_sub_i32
+ ; CHECK: [[VAL1:%.*]](s32) = COPY %edi
+ ; CHECK: [[VAL2:%.*]](s32) = COPY %esi
+ ; CHECK: [[RES:%.*]](s32) = G_SUB [[VAL1:%.*]], [[VAL2:%.*]]
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_SUB %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/memop-isel.ll b/test/CodeGen/X86/GlobalISel/memop-isel.ll
new file mode 100644
index 000000000000..6fe66436e4a8
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/memop-isel.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST
+; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_FAST --check-prefix=AVX_FAST
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_GREEDY --check-prefix=AVX_GREEDY
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_FAST --check-prefix=AVX512F_FAST
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_GREEDY --check-prefix=AVX512F_GREEDY
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_FAST --check-prefix=AVX512VL_FAST
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=ALL_AVX_GREEDY --check-prefix=AVX512VL_GREEDY
+
+
+define i8 @test_load_i8(i8 * %p1) {
+; ALL-LABEL: test_load_i8:
+; ALL: # BB#0:
+; ALL-NEXT: movb (%rdi), %al
+; ALL-NEXT: retq
+ %r = load i8, i8* %p1
+ ret i8 %r
+}
+
+define i16 @test_load_i16(i16 * %p1) {
+; ALL-LABEL: test_load_i16:
+; ALL: # BB#0:
+; ALL-NEXT: movzwl (%rdi), %eax
+; ALL-NEXT: retq
+ %r = load i16, i16* %p1
+ ret i16 %r
+}
+
+define i32 @test_load_i32(i32 * %p1) {
+; ALL-LABEL: test_load_i32:
+; ALL: # BB#0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: retq
+ %r = load i32, i32* %p1
+ ret i32 %r
+}
+
+define i64 @test_load_i64(i64 * %p1) {
+; ALL-LABEL: test_load_i64:
+; ALL: # BB#0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: retq
+ %r = load i64, i64* %p1
+ ret i64 %r
+}
+
+define float @test_load_float(float * %p1) {
+; SSE-LABEL: test_load_float:
+; SSE: # BB#0:
+; SSE-NEXT: movl (%rdi), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_load_float:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: movl (%rdi), %eax
+; ALL_AVX-NEXT: vmovd %eax, %xmm0
+; ALL_AVX-NEXT: retq
+ %r = load float, float* %p1
+ ret float %r
+}
+
+define double @test_load_double(double * %p1) {
+; SSE-LABEL: test_load_double:
+; SSE: # BB#0:
+; SSE-NEXT: movq (%rdi), %rax
+; SSE-NEXT: movd %rax, %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_load_double:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: movq (%rdi), %rax
+; ALL_AVX-NEXT: vmovq %rax, %xmm0
+; ALL_AVX-NEXT: retq
+ %r = load double, double* %p1
+ ret double %r
+}
+
+define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
+; SSE-LABEL: test_load_v4i32_noalign:
+; SSE: # BB#0:
+; SSE-NEXT: movups (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_load_v4i32_noalign:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vmovups (%rdi), %xmm0
+; ALL_AVX-NEXT: retq
+ %r = load <4 x i32>, <4 x i32>* %p1, align 1
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @test_load_v4i32_align(<4 x i32> * %p1) {
+; SSE-LABEL: test_load_v4i32_align:
+; SSE: # BB#0:
+; SSE-NEXT: movaps (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; ALL_AVX-LABEL: test_load_v4i32_align:
+; ALL_AVX: # BB#0:
+; ALL_AVX-NEXT: vmovaps (%rdi), %xmm0
+; ALL_AVX-NEXT: retq
+ %r = load <4 x i32>, <4 x i32>* %p1, align 16
+ ret <4 x i32> %r
+}
+
+define i32 * @test_store_i32(i32 %val, i32 * %p1) {
+; ALL-LABEL: test_store_i32:
+; ALL: # BB#0:
+; ALL-NEXT: movl %edi, (%rsi)
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ store i32 %val, i32* %p1
+ ret i32 * %p1;
+}
+
+define i64 * @test_store_i64(i64 %val, i64 * %p1) {
+; ALL-LABEL: test_store_i64:
+; ALL: # BB#0:
+; ALL-NEXT: movq %rdi, (%rsi)
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ store i64 %val, i64* %p1
+ ret i64 * %p1;
+}
+
+define float * @test_store_float(float %val, float * %p1) {
+;
+; SSE_FAST-LABEL: test_store_float:
+; SSE_FAST: # BB#0:
+; SSE_FAST-NEXT: movd %xmm0, %eax
+; SSE_FAST-NEXT: movl %eax, (%rdi)
+; SSE_FAST-NEXT: movq %rdi, %rax
+; SSE_FAST-NEXT: retq
+;
+; SSE_GREEDY-LABEL: test_store_float:
+; SSE_GREEDY: # BB#0:
+; SSE_GREEDY-NEXT: movss %xmm0, (%rdi)
+; SSE_GREEDY-NEXT: movq %rdi, %rax
+; SSE_GREEDY-NEXT: retq
+;
+; ALL_AVX_FAST-LABEL: test_store_float:
+; ALL_AVX_FAST: # BB#0:
+; ALL_AVX_FAST-NEXT: vmovd %xmm0, %eax
+; ALL_AVX_FAST-NEXT: movl %eax, (%rdi)
+; ALL_AVX_FAST-NEXT: movq %rdi, %rax
+; ALL_AVX_FAST-NEXT: retq
+;
+; ALL_AVX_GREEDY-LABEL: test_store_float:
+; ALL_AVX_GREEDY: # BB#0:
+; ALL_AVX_GREEDY-NEXT: vmovss %xmm0, (%rdi)
+; ALL_AVX_GREEDY-NEXT: movq %rdi, %rax
+; ALL_AVX_GREEDY-NEXT: retq
+ store float %val, float* %p1
+ ret float * %p1;
+}
+
+define double * @test_store_double(double %val, double * %p1) {
+;
+; SSE_FAST-LABEL: test_store_double:
+; SSE_FAST: # BB#0:
+; SSE_FAST-NEXT: movd %xmm0, %rax
+; SSE_FAST-NEXT: movq %rax, (%rdi)
+; SSE_FAST-NEXT: movq %rdi, %rax
+; SSE_FAST-NEXT: retq
+;
+; SSE_GREEDY-LABEL: test_store_double:
+; SSE_GREEDY: # BB#0:
+; SSE_GREEDY-NEXT: movsd %xmm0, (%rdi)
+; SSE_GREEDY-NEXT: movq %rdi, %rax
+; SSE_GREEDY-NEXT: retq
+;
+; ALL_AVX_FAST-LABEL: test_store_double:
+; ALL_AVX_FAST: # BB#0:
+; ALL_AVX_FAST-NEXT: vmovq %xmm0, %rax
+; ALL_AVX_FAST-NEXT: movq %rax, (%rdi)
+; ALL_AVX_FAST-NEXT: movq %rdi, %rax
+; ALL_AVX_FAST-NEXT: retq
+;
+; ALL_AVX_GREEDY-LABEL: test_store_double:
+; ALL_AVX_GREEDY: # BB#0:
+; ALL_AVX_GREEDY-NEXT: vmovsd %xmm0, (%rdi)
+; ALL_AVX_GREEDY-NEXT: movq %rdi, %rax
+; ALL_AVX_GREEDY-NEXT: retq
+ store double %val, double* %p1
+ ret double * %p1;
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir
new file mode 100644
index 000000000000..f6b97b578b92
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -0,0 +1,143 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+
+--- |
+ define i8 @const_i8() {
+ ret i8 2
+ }
+
+ define i16 @const_i16() {
+ ret i16 3
+ }
+
+ define i32 @const_i32() {
+ ret i32 4
+ }
+
+ define i64 @const_i64() {
+ ret i64 68719476720
+ }
+
+ define i64 @const_i64_u32() {
+ ret i64 1879048192
+ }
+
+ define i64 @const_i64_i32() {
+ ret i64 -1
+ }
+
+...
+---
+name: const_i8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: const_i8
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr8 }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: body:
+# CHECK: %0 = MOV8ri 2
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = G_CONSTANT i8 2
+ %al = COPY %0(s8)
+ RET 0, implicit %al
+
+...
+---
+name: const_i16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: const_i16
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr16 }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: body:
+# CHECK: %0 = MOV16ri 3
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = G_CONSTANT i16 3
+ %ax = COPY %0(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: const_i32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: const_i32
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: body:
+# CHECK: %0 = MOV32ri 4
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = G_CONSTANT i32 4
+ %eax = COPY %0(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: const_i64
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: const_i64
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: body:
+# CHECK: %0 = MOV64ri 68719476720
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = G_CONSTANT i64 68719476720
+ %rax = COPY %0(s64)
+ RET 0, implicit %rax
+
+...
+---
+name: const_i64_u32
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: const_i64_u32
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: body:
+# CHECK: %0 = MOV64ri32 1879048192
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = G_CONSTANT i64 1879048192
+ %rax = COPY %0(s64)
+ RET 0, implicit %rax
+
+...
+---
+name: const_i64_i32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: const_i64_i32
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: body:
+# CHECK: %0 = MOV64ri32 -1
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = G_CONSTANT i64 -1
+ %rax = COPY %0(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/x86_64-instructionselect.mir b/test/CodeGen/X86/GlobalISel/x86_64-instructionselect.mir
new file mode 100644
index 000000000000..17522c3cb45e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/x86_64-instructionselect.mir
@@ -0,0 +1,1022 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+
+--- |
+ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
+ %ret = add i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
+ %ret = add i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
+ %ret = sub i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+ define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
+ %ret = sub i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define float @test_add_float(float %arg1, float %arg2) {
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_add_double(double %arg1, double %arg2) {
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+ }
+
+ define float @test_sub_float(float %arg1, float %arg2) {
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_sub_double(double %arg1, double %arg2) {
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+ }
+
+ define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %ret = add <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %ret = sub <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <4 x float> @test_add_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
+ %ret = fadd <4 x float> %arg1, %arg2
+ ret <4 x float> %ret
+ }
+
+ define <4 x float> @test_sub_v4f32(<4 x float> %arg1, <4 x float> %arg2) {
+ %ret = fsub <4 x float> %arg1, %arg2
+ ret <4 x float> %ret
+ }
+
+ define i8 @test_load_i8(i8* %p1) {
+ %r = load i8, i8* %p1
+ ret i8 %r
+ }
+
+ define i16 @test_load_i16(i16* %p1) {
+ %r = load i16, i16* %p1
+ ret i16 %r
+ }
+
+ define i32 @test_load_i32(i32* %p1) {
+ %r = load i32, i32* %p1
+ ret i32 %r
+ }
+
+ define i64 @test_load_i64(i64* %p1) {
+ %r = load i64, i64* %p1
+ ret i64 %r
+ }
+
+ define float @test_load_float(float* %p1) {
+ %r = load float, float* %p1
+ ret float %r
+ }
+
+ define float @test_load_float_vecreg(float* %p1) {
+ %r = load float, float* %p1
+ ret float %r
+ }
+
+
+ define double @test_load_double(double* %p1) {
+ %r = load double, double* %p1
+ ret double %r
+ }
+
+ define double @test_load_double_vecreg(double* %p1) {
+ %r = load double, double* %p1
+ ret double %r
+ }
+
+ define <4 x i32> @test_load_v4i32_noalign(<4 x i32>* %p1) {
+ %r = load <4 x i32>, <4 x i32>* %p1, align 1
+ ret <4 x i32> %r
+ }
+
+ define <4 x i32> @test_load_v4i32_align(<4 x i32>* %p1) {
+ %r = load <4 x i32>, <4 x i32>* %p1, align 16
+ ret <4 x i32> %r
+ }
+
+ define i32* @test_store_i32(i32 %val, i32* %p1) {
+ store i32 %val, i32* %p1
+ ret i32* %p1
+ }
+
+ define i64* @test_store_i64(i64 %val, i64* %p1) {
+ store i64 %val, i64* %p1
+ ret i64* %p1
+ }
+
+ define float* @test_store_float(float %val, float* %p1) {
+ store float %val, float* %p1
+ ret float* %p1
+ }
+
+ define float* @test_store_float_vec(float %val, float* %p1) {
+ store float %val, float* %p1
+ ret float* %p1
+ }
+
+ define double* @test_store_double(double %val, double* %p1) {
+ store double %val, double* %p1
+ ret double* %p1
+ }
+
+ define double* @test_store_double_vec(double %val, double* %p1) {
+ store double %val, double* %p1
+ ret double* %p1
+ }
+
+ define <4 x i32>* @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
+ store <4 x i32> %val, <4 x i32>* %p1, align 16
+ ret <4 x i32>* %p1
+ }
+
+ define <4 x i32>* @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
+ store <4 x i32> %val, <4 x i32>* %p1, align 1
+ ret <4 x i32>* %p1
+ }
+
+...
+
+---
+name: test_add_i64
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64 }
+# ALL-NEXT: - { id: 1, class: gr64 }
+# ALL-NEXT: - { id: 2, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = ADD64rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_ADD %0, %1
+ %rax = COPY %2(s64)
+
+...
+
+---
+name: test_add_i32
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32 }
+# ALL-NEXT: - { id: 1, class: gr32 }
+# ALL-NEXT: - { id: 2, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = ADD32rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_ADD %0, %1
+ %rax = COPY %2(s32)
+
+...
+
+---
+name: test_sub_i64
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64 }
+# ALL-NEXT: - { id: 1, class: gr64 }
+# ALL-NEXT: - { id: 2, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = SUB64rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_SUB %0, %1
+ %rax = COPY %2(s64)
+
+...
+
+---
+name: test_sub_i32
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32 }
+# ALL-NEXT: - { id: 1, class: gr32 }
+# ALL-NEXT: - { id: 2, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = SUB32rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_SUB %0, %1
+ %rax = COPY %2(s32)
+
+...
+
+---
+name: test_add_float
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32 }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32 }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32 }
+# AVX512ALL-NEXT: - { id: 0, class: fr32x }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDSSrr %0, %1
+# AVX-NEXT: %2 = VADDSSrr %0, %1
+# AVX512F-NEXT: %2 = VADDSSZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FADD %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_double
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64 }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64 }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64 }
+# AVX512ALL-NEXT: - { id: 0, class: fr64x }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDSDrr %0, %1
+# AVX-NEXT: %2 = VADDSDrr %0, %1
+# AVX512F-NEXT: %2 = VADDSDZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FADD %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_float
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32 }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32 }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32 }
+# AVX512ALL-NEXT: - { id: 0, class: fr32x }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBSSrr %0, %1
+# AVX-NEXT: %2 = VSUBSSrr %0, %1
+# AVX512F-NEXT: %2 = VSUBSSZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_double
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64 }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64 }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64 }
+# AVX512ALL-NEXT: - { id: 0, class: fr64x }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBSDrr %0, %1
+# AVX-NEXT: %2 = VSUBSDrr %0, %1
+# AVX512F-NEXT: %2 = VSUBSDZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+...
+---
+name: test_add_v4i32
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512VL-NEXT: - { id: 0, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 1, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 2, class: vr128 }
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = PADDDrr %0, %1
+# AVX-NEXT: %2 = VPADDDrr %0, %1
+# AVX512F-NEXT: %2 = VPADDDrr %0, %1
+# AVX512VL-NEXT: %2 = VPADDDZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_ADD %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_v4i32
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512VL-NEXT: - { id: 0, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 1, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 2, class: vr128 }
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = PSUBDrr %0, %1
+# AVX-NEXT: %2 = VPSUBDrr %0, %1
+# AVX512F-NEXT: %2 = VPSUBDrr %0, %1
+# AVX512VL-NEXT: %2 = VPSUBDZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_SUB %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_v4f32
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512VL-NEXT: - { id: 0, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 1, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 2, class: vr128 }
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDPSrr %0, %1
+# AVX-NEXT: %2 = VADDPSrr %0, %1
+# AVX512F-NEXT: %2 = VADDPSrr %0, %1
+# AVX512VL-NEXT: %2 = VADDPSZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_FADD %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_v4f32
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+tracksRegLiveness: true
+# ALL: registers:
+# NO_AVX512VL-NEXT: - { id: 0, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 1, class: vr128 }
+# NO_AVX512VL-NEXT: - { id: 2, class: vr128 }
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %0 = COPY %xmm0
+# ALL-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBPSrr %0, %1
+# AVX-NEXT: %2 = VSUBPSrr %0, %1
+# AVX512F-NEXT: %2 = VSUBPSrr %0, %1
+# AVX512VL-NEXT: %2 = VSUBPSZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_FSUB %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_i8
+name: test_load_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr8 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.p1)
+# ALL: %al = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+# ALL-LABEL: name: test_load_i16
+name: test_load_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr16 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = MOV16rm %0, 1, _, 0, _ :: (load 2 from %ir.p1)
+# ALL: %ax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+# ALL-LABEL: name: test_load_i32
+name: test_load_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr32 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# ALL: %eax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+# ALL-LABEL: name: test_load_i64
+name: test_load_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = MOV64rm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_load_float
+name: test_load_float
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr32 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %xmm0 = COPY %1(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_float_vecreg
+name: test_load_float_vecreg
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# NO_AVX512F: - { id: 1, class: fr32 }
+# AVX512ALL: - { id: 1, class: fr32x }
+ - { id: 0, class: gpr }
+ - { id: 1, class: vecr }
+# ALL: %0 = COPY %rdi
+# SSE: %1 = MOVSSrm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# AVX: %1 = VMOVSSrm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# AVX512ALL: %1 = VMOVSSZrm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %xmm0 = COPY %1(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_double
+name: test_load_double
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = MOV64rm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %xmm0 = COPY %1(s64)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_double_vecreg
+name: test_load_double_vecreg
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# NO_AVX512F: - { id: 1, class: fr64 }
+# AVX512ALL: - { id: 1, class: fr64x }
+ - { id: 0, class: gpr }
+ - { id: 1, class: vecr }
+# ALL: %0 = COPY %rdi
+# SSE: %1 = MOVSDrm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# AVX: %1 = VMOVSDrm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# AVX512ALL: %1 = VMOVSDZrm %0, 1, _, 0, _ :: (load 8 from %ir.p1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %xmm0 = COPY %1(s64)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_v4i32_noalign
+name: test_load_v4i32_noalign
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# NO_AVX512F: - { id: 1, class: vr128 }
+# AVX512ALL: - { id: 1, class: vr128x }
+ - { id: 0, class: gpr }
+ - { id: 1, class: vecr }
+# ALL: %0 = COPY %rdi
+# SSE: %1 = MOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# AVX: %1 = VMOVUPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# AVX512F: %1 = VMOVUPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# AVX512VL: %1 = VMOVUPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1, align 1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
+ %xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_load_v4i32_align
+name: test_load_v4i32_align
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# NO_AVX512F: - { id: 1, class: vr128 }
+# AVX512ALL: - { id: 1, class: vr128x }
+ - { id: 0, class: gpr }
+ - { id: 1, class: vecr }
+# ALL: %0 = COPY %rdi
+# SSE: %1 = MOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# AVX: %1 = VMOVAPSrm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# AVX512F: %1 = VMOVAPSZ128rm_NOVLX %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# AVX512VL: %1 = VMOVAPSZ128rm %0, 1, _, 0, _ :: (load 16 from %ir.p1)
+# ALL: %xmm0 = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1)
+ %xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+# ALL-LABEL: name: test_store_i32
+name: test_store_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr32 }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %edi
+# ALL: %1 = COPY %rsi
+# ALL: MOV32mr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %rsi
+
+ %0(s32) = COPY %edi
+ %1(p0) = COPY %rsi
+ G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_i64
+name: test_store_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: gr64 }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL: %1 = COPY %rsi
+# ALL: MOV64mr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(p0) = COPY %rsi
+ G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_float
+name: test_store_float
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: fr32x }
+# ALL: - { id: 1, class: gr64 }
+# ALL: - { id: 2, class: gr32 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# ALL: %2 = COPY %0
+# ALL: MOV32mr %1, 1, _, 0, _, %2 :: (store 4 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(s32) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ %2(s32) = COPY %0(s32)
+ G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_float_vec
+name: test_store_float_vec
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# NO_AVX512F: - { id: 0, class: fr32 }
+# AVX512ALL: - { id: 0, class: fr32x }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# SSE: MOVSSmr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
+# AVX: VMOVSSmr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
+# AVX512ALL: VMOVSSZmr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(s32) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_double
+name: test_store_double
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# ALL: - { id: 0, class: fr64x }
+# ALL: - { id: 1, class: gr64 }
+# ALL: - { id: 2, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# ALL: %2 = COPY %0
+# ALL: MOV64mr %1, 1, _, 0, _, %2 :: (store 8 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(s64) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ %2(s64) = COPY %0(s64)
+ G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_double_vec
+name: test_store_double_vec
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# NO_AVX512F: - { id: 0, class: fr64 }
+# AVX512ALL: - { id: 0, class: fr64x }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# SSE: MOVSDmr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
+# AVX: VMOVSDmr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
+# AVX512ALL: VMOVSDZmr %1, 1, _, 0, _, %0 :: (store 8 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(s64) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_v4i32_align
+name: test_store_v4i32_align
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# NO_AVX512F: - { id: 0, class: vr128 }
+# AVX512ALL: - { id: 0, class: vr128x }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# SSE: MOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# AVX: VMOVAPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# AVX512VL: VMOVAPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 16)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
+---
+# ALL-LABEL: name: test_store_v4i32_noalign
+name: test_store_v4i32_noalign
+alignment: 4
+legalized: true
+regBankSelected: true
+registers:
+# NO_AVX512F: - { id: 0, class: vr128 }
+# AVX512ALL: - { id: 0, class: vr128x }
+# ALL: - { id: 1, class: gr64 }
+ - { id: 0, class: vecr }
+ - { id: 1, class: gpr }
+# ALL: %0 = COPY %xmm0
+# ALL: %1 = COPY %rdi
+# SSE: MOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# AVX: VMOVUPSmr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512VL: VMOVUPSZ128mr %1, 1, _, 0, _, %0 :: (store 16 into %ir.p1, align 1)
+# ALL: %rax = COPY %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %xmm0
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(p0) = COPY %rdi
+ G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 1)
+ %rax = COPY %1(p0)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll
index 4d7cb765d7b9..4303b6254464 100644
--- a/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -1,6 +1,5 @@
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx -fixup-byte-word-insts=1 < %s | FileCheck -check-prefix=CHECK -check-prefix=BWON %s
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx -fixup-byte-word-insts=0 < %s | FileCheck -check-prefix=CHECK -check-prefix=BWOFF %s
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx -addr-sink-using-gep=1 < %s | FileCheck -check-prefix=CHECK -check-prefix=BWON %s
%struct.A = type { i8, i8, i8, i8, i8, i8, i8, i8 }
%struct.B = type { i32, i32, i32, i32, i32, i32, i32, i32 }
@@ -111,8 +110,7 @@ define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind
; CHECK-LABEL: merge_nonconst_store:
; CHECK: movl $67305985
; CHECK: movb
-; CHECK: movb
-; CHECK: movb
+; CHECK: movw
; CHECK: movb
; CHECK: ret
define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
@@ -292,16 +290,12 @@ block4: ; preds = %4, %.lr.ph
ret void
}
-;; On x86, even unaligned copies should be merged to vector ops.
-;; TODO: however, this cannot happen at the moment, due to brokenness
-;; in MergeConsecutiveStores. See UseAA FIXME in DAGCombiner.cpp
-;; visitSTORE.
-
+;; On x86, even unaligned copies can be merged to vector ops.
; CHECK-LABEL: merge_loads_no_align:
; load:
-; CHECK-NOT: vmovups ;; TODO
+; CHECK: vmovups
; store:
-; CHECK-NOT: vmovups ;; TODO
+; CHECK: vmovups
; CHECK: ret
define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
%a1 = icmp sgt i32 %count, 0
@@ -583,8 +577,8 @@ define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
; CHECK-LABEL: merge_vec_element_and_scalar_load
; CHECK: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rcx
; CHECK-NEXT: movq %rax, 32(%rdi)
-; CHECK-NEXT: movq 8(%rdi), %rax
-; CHECK-NEXT: movq %rax, 40(%rdi)
+; CHECK-NEXT: movq %rcx, 40(%rdi)
; CHECK-NEXT: retq
}
diff --git a/test/CodeGen/X86/StackColoring-dbg.ll b/test/CodeGen/X86/StackColoring-dbg.ll
index 15be7aa1029f..0ebd01d1c4ed 100644
--- a/test/CodeGen/X86/StackColoring-dbg.ll
+++ b/test/CodeGen/X86/StackColoring-dbg.ll
@@ -15,16 +15,16 @@ entry:
br label %for.body
for.body:
- call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
- call void @llvm.lifetime.start(i64 -1, i8* %x.i) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x.i) nounwind
call void @llvm.dbg.declare(metadata i8* %x.i, metadata !22, metadata !DIExpression()) nounwind, !dbg !DILocation(scope: !2)
br label %for.body
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!23}
diff --git a/test/CodeGen/X86/StackColoring.ll b/test/CodeGen/X86/StackColoring.ll
index f974cdc30a21..93888c470e2d 100644
--- a/test/CodeGen/X86/StackColoring.ll
+++ b/test/CodeGen/X86/StackColoring.ll
@@ -15,14 +15,14 @@ entry:
%a2 = alloca [16 x i8*], align 8
%b = bitcast [17 x i8*]* %a to i8*
%b2 = bitcast [16 x i8*]* %a2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b)
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
- call void @llvm.lifetime.end(i64 -1, i8* %b)
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
- call void @llvm.lifetime.end(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b2)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
@@ -40,22 +40,22 @@ entry:
%a2 = alloca [16 x i8*], align 8
%b = bitcast [17 x i8*]* %a to i8*
%b2 = bitcast [16 x i8*]* %a2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b)
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
- call void @llvm.lifetime.end(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b2)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
- call void @llvm.lifetime.end(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
ret i32 %t7
bb3:
- call void @llvm.lifetime.end(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
ret i32 0
}
@@ -69,16 +69,16 @@ entry:
%a2 = alloca [16 x i8*], align 8
%b = bitcast [17 x i8*]* %a to i8*
%b2 = bitcast [16 x i8*]* %a2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b)
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
- call void @llvm.lifetime.end(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
- call void @llvm.lifetime.end(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b2)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
@@ -102,21 +102,21 @@ entry:
%b2 = bitcast [13 x i8*]* %a2 to i8*
%b3 = bitcast [12 x i8*]* %a3 to i8*
%b4 = bitcast [11 x i8*]* %a4 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b4)
- call void @llvm.lifetime.start(i64 -1, i8* %b1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b4)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b1)
%t1 = call i32 @foo(i32 %in, i8* %b1)
%t2 = call i32 @foo(i32 %in, i8* %b1)
- call void @llvm.lifetime.end(i64 -1, i8* %b1)
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t9 = call i32 @foo(i32 %in, i8* %b2)
%t8 = call i32 @foo(i32 %in, i8* %b2)
- call void @llvm.lifetime.end(i64 -1, i8* %b2)
- call void @llvm.lifetime.start(i64 -1, i8* %b3)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b3)
%t3 = call i32 @foo(i32 %in, i8* %b3)
%t4 = call i32 @foo(i32 %in, i8* %b3)
- call void @llvm.lifetime.end(i64 -1, i8* %b3)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b3)
%t11 = call i32 @foo(i32 %in, i8* %b4)
- call void @llvm.lifetime.end(i64 -1, i8* %b4)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b4)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
@@ -137,23 +137,23 @@ entry:
%b2 = bitcast [13 x i8*]* %a2 to i8*
%b3 = bitcast [12 x i8*]* %a3 to i8*
%b4 = bitcast [11 x i8*]* %a4 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b1)
%t1 = call i32 @foo(i32 %in, i8* %b1)
%t2 = call i32 @foo(i32 %in, i8* %b1)
- call void @llvm.lifetime.end(i64 -1, i8* %b1)
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t9 = call i32 @foo(i32 %in, i8* %b2)
%t8 = call i32 @foo(i32 %in, i8* %b2)
- call void @llvm.lifetime.end(i64 -1, i8* %b2)
- call void @llvm.lifetime.start(i64 -1, i8* %b3)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b3)
%t3 = call i32 @foo(i32 %in, i8* %b3)
%t4 = call i32 @foo(i32 %in, i8* %b3)
- call void @llvm.lifetime.end(i64 -1, i8* %b3)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b3)
br i1 undef, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %b4)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b4)
%t11 = call i32 @foo(i32 %in, i8* %b4)
- call void @llvm.lifetime.end(i64 -1, i8* %b4)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b4)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
@@ -174,13 +174,13 @@ entry:
%a2 = alloca [16 x i8*], align 8
%b = bitcast [17 x i8*]* %a to i8*
%b2 = bitcast [16 x i8*]* %a2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b)
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
- call void @llvm.lifetime.end(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
%t5 = add i32 %t1, %t2
@@ -200,13 +200,13 @@ entry:
%a2 = alloca [16 x i8*], align 8
%b = bitcast [17 x i8*]* %a to i8*
%b2 = bitcast [16 x i8*]* %a2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b)
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.end(i64 -1, i8* %b)
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
%t5 = add i32 %t1, %t2
@@ -229,10 +229,10 @@ entry:
%b2 = bitcast [16 x i8*]* %a2 to i8*
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
- call void @llvm.lifetime.end(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
%t5 = add i32 %t1, %t2
@@ -254,19 +254,19 @@ entry:
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
%0 = bitcast [100 x i32]* %A.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0) nounwind
%1 = bitcast [100 x i32]* %B.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %1) nounwind
call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1) nounwind
%2 = bitcast [100 x i32]* %A.i1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %2) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %2) nounwind
%3 = bitcast [100 x i32]* %B.i2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %3) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %3) nounwind
call void @bar([100 x i32]* %A.i1, [100 x i32]* %B.i2) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %2) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %3) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %2) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %3) nounwind
ret void
}
@@ -281,7 +281,7 @@ entry:
%b2 = bitcast [16 x i8*]* %a2 to i8*
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
- call void @llvm.lifetime.end(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
br i1 %d, label %bb0, label %bb1
bb0:
@@ -294,13 +294,13 @@ bb1:
bb2:
%split = phi i8* [ %I1, %bb0 ], [ %I2, %bb1 ]
- call void @llvm.lifetime.start(i64 -1, i8* %split)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %split)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
- call void @llvm.lifetime.end(i64 -1, i8* %split)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %split)
ret i32 %t7
bb3:
ret i32 0
@@ -318,21 +318,21 @@ entry:
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
%0 = bitcast [100 x i32]* %A.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind ; <---- start #1
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0) nounwind ; <---- start #1
%1 = bitcast [100 x i32]* %B.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %1) nounwind
call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1) nounwind
%2 = bitcast [100 x i32]* %A.i1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %2) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %2) nounwind
%3 = bitcast [100 x i32]* %B.i2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %3) nounwind
- call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind ; <---- start #2
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %3) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0) nounwind ; <---- start #2
call void @bar([100 x i32]* %A.i1, [100 x i32]* %B.i2) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %2) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %3) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %2) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %3) nounwind
ret void
}
@@ -344,11 +344,11 @@ entry:
%b2 = bitcast [16 x i8*]* %a2 to i8*
%t1 = call i32 @foo(i32 %in, i8* %b)
%t2 = call i32 @foo(i32 %in, i8* %b)
- call void @llvm.lifetime.end(i64 -1, i8* %b)
- call void @llvm.lifetime.start(i64 -1, i8* %b)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %b2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b2)
%t3 = call i32 @foo(i32 %in, i8* %b2)
%t4 = call i32 @foo(i32 %in, i8* %b2)
%t5 = add i32 %t1, %t2
@@ -369,11 +369,11 @@ define void @myCall_pr15707() {
%buf1 = alloca i8, i32 100000, align 16
%buf2 = alloca i8, i32 100000, align 16
- call void @llvm.lifetime.start(i64 -1, i8* %buf1)
- call void @llvm.lifetime.end(i64 -1, i8* %buf1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %buf1)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %buf1)
- call void @llvm.lifetime.start(i64 -1, i8* %buf1)
- call void @llvm.lifetime.start(i64 -1, i8* %buf2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %buf1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %buf2)
%result1 = call i32 @foo(i32 0, i8* %buf1)
%result2 = call i32 @foo(i32 0, i8* %buf2)
ret void
@@ -390,12 +390,12 @@ entry:
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
%0 = bitcast [100 x i32]* %A.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0) nounwind
%1 = bitcast [100 x i32]* %B.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %1) nounwind
call void @bar([100 x i32]* %A.i, [100 x i32]* %B.i) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1) nounwind
br label %block2
block2:
@@ -418,13 +418,13 @@ define i32 @shady_range(i32 %argc, i8** nocapture %argv) uwtable {
%b8 = bitcast [4 x %struct.Klass]* %b.i to i8*
; I am used outside the lifetime zone below:
%z2 = getelementptr inbounds [4 x %struct.Klass], [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0
- call void @llvm.lifetime.start(i64 -1, i8* %a8)
- call void @llvm.lifetime.start(i64 -1, i8* %b8)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %a8)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b8)
%z3 = load i32, i32* %z2, align 16
%r = call i32 @foo(i32 %z3, i8* %a8)
%r2 = call i32 @foo(i32 %z3, i8* %b8)
- call void @llvm.lifetime.end(i64 -1, i8* %a8)
- call void @llvm.lifetime.end(i64 -1, i8* %b8)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %a8)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b8)
ret i32 9
}
@@ -446,38 +446,38 @@ entry:
%b4 = alloca [128 x i32], align 16
%b5 = alloca [128 x i32], align 16
%tmp = bitcast [128 x i32]* %b1 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp)
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp)
%tmp1 = bitcast [128 x i32]* %b2 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp1)
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp1)
%and = and i32 %x, 1
%tobool = icmp eq i32 %and, 0
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
%tmp2 = bitcast [128 x i32]* %b3 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp2)
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp2)
%a1 = getelementptr inbounds [128 x i32], [128 x i32]* %b1, i64 0, i64 0
%a2 = getelementptr inbounds [128 x i32], [128 x i32]* %b3, i64 0, i64 0
call void @initb(i32* %a1, i32* %a2, i32* null)
- call void @llvm.lifetime.end(i64 512, i8* %tmp2)
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp2)
br label %if.end
if.else: ; preds = %entry
%tmp3 = bitcast [128 x i32]* %b4 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp3)
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp3)
%tmp4 = bitcast [128 x i32]* %b5 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp4)
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp4)
%a3 = getelementptr inbounds [128 x i32], [128 x i32]* %b2, i64 0, i64 0
%a4 = getelementptr inbounds [128 x i32], [128 x i32]* %b4, i64 0, i64 0
%a5 = getelementptr inbounds [128 x i32], [128 x i32]* %b5, i64 0, i64 0
call void @initb(i32* %a3, i32* %a4, i32* %a5) #3
- call void @llvm.lifetime.end(i64 512, i8* %tmp4)
- call void @llvm.lifetime.end(i64 512, i8* %tmp3)
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp4)
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp3)
br label %if.end
if.end: ; preds = %if.else, %if.then
- call void @llvm.lifetime.end(i64 512, i8* %tmp1)
- call void @llvm.lifetime.end(i64 512, i8* %tmp)
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp1)
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp)
ret i32 0
}
@@ -499,9 +499,9 @@ entry:
%b2 = alloca [128 x i32], align 16
%b3 = alloca [128 x i32], align 16
%tmp = bitcast [128 x i32]* %b1 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp) #3
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp) #3
%tmp1 = bitcast [128 x i32]* %b2 to i8*
- call void @llvm.lifetime.start(i64 512, i8* %tmp1) #3
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp1) #3
%and = and i32 %x, 1
%tobool = icmp eq i32 %and, 0
br i1 %tobool, label %if.else, label %if.then
@@ -526,9 +526,9 @@ while.body.lr.ph: ; preds = %if.else
while.body: ; preds = %while.body.lr.ph, %while.body
%x.addr.06 = phi i32 [ %x, %while.body.lr.ph ], [ %dec, %while.body ]
%dec = add nsw i32 %x.addr.06, -1
- call void @llvm.lifetime.start(i64 512, i8* %tmp2) #3
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %tmp2) #3
call void @inita(i32* %arraydecay3) #3
- call void @llvm.lifetime.end(i64 512, i8* %tmp2) #3
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp2) #3
%tobool2 = icmp eq i32 %dec, 0
br i1 %tobool2, label %if.end.loopexit, label %while.body
@@ -536,8 +536,8 @@ if.end.loopexit: ; preds = %while.body
br label %if.end
if.end: ; preds = %if.end.loopexit, %if.else, %if.then
- call void @llvm.lifetime.end(i64 512, i8* %tmp1) #3
- call void @llvm.lifetime.end(i64 512, i8* %tmp) #3
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp1) #3
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %tmp) #3
ret i32 0
}
@@ -556,25 +556,25 @@ entry:
%buffer.i = alloca [12 x i32], align 16
%abc = alloca [12 x i32], align 16
%tmp = bitcast [12 x i32]* %buffer.i to i8*
- call void @llvm.lifetime.start(i64 48, i8* %tmp)
+ call void @llvm.lifetime.start.p0i8(i64 48, i8* %tmp)
%idxprom.i = sext i32 %y to i64
%arrayidx.i = getelementptr inbounds [12 x i32], [12 x i32]* %buffer.i, i64 0, i64 %idxprom.i
call void @inita(i32* %arrayidx.i)
%add.i = add nsw i32 %x, %y
- call void @llvm.lifetime.end(i64 48, i8* %tmp)
+ call void @llvm.lifetime.end.p0i8(i64 48, i8* %tmp)
%tobool = icmp eq i32 %y, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
%tmp1 = bitcast [12 x i32]* %abc to i8*
- call void @llvm.lifetime.start(i64 48, i8* %tmp1)
+ call void @llvm.lifetime.start.p0i8(i64 48, i8* %tmp1)
%arrayidx = getelementptr inbounds [12 x i32], [12 x i32]* %abc, i64 0, i64 %idxprom.i
call void @inita(i32* %arrayidx)
- call void @llvm.lifetime.start(i64 48, i8* %tmp)
+ call void @llvm.lifetime.start.p0i8(i64 48, i8* %tmp)
call void @inita(i32* %arrayidx.i)
%add.i9 = add nsw i32 %add.i, %y
- call void @llvm.lifetime.end(i64 48, i8* %tmp)
- call void @llvm.lifetime.end(i64 48, i8* %tmp1)
+ call void @llvm.lifetime.end.p0i8(i64 48, i8* %tmp)
+ call void @llvm.lifetime.end.p0i8(i64 48, i8* %tmp1)
br label %if.end
if.end: ; preds = %if.then, %entry
@@ -588,8 +588,8 @@ declare void @initb(i32*,i32*,i32*)
declare void @bar([100 x i32]* , [100 x i32]*) nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
declare i32 @foo(i32, i8*)
diff --git a/test/CodeGen/X86/absolute-cmp.ll b/test/CodeGen/X86/absolute-cmp.ll
new file mode 100644
index 000000000000..01e8a90177cc
--- /dev/null
+++ b/test/CodeGen/X86/absolute-cmp.ll
@@ -0,0 +1,39 @@
+; RUN: llc < %s | FileCheck %s
+; RUN: llc -relocation-model=pic < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@cmp8 = external hidden global i8, !absolute_symbol !0
+@cmp32 = external hidden global i8, !absolute_symbol !1
+
+declare void @f()
+
+define void @foo8(i64 %val) {
+ ; CHECK: cmpq $cmp8@ABS8, %rdi
+ %cmp = icmp ule i64 %val, ptrtoint (i8* @cmp8 to i64)
+ br i1 %cmp, label %t, label %f
+
+t:
+ call void @f()
+ ret void
+
+f:
+ ret void
+}
+
+define void @foo32(i64 %val) {
+ ; CHECK: cmpq $cmp32, %rdi
+ %cmp = icmp ule i64 %val, ptrtoint (i8* @cmp32 to i64)
+ br i1 %cmp, label %t, label %f
+
+t:
+ call void @f()
+ ret void
+
+f:
+ ret void
+}
+
+!0 = !{i64 0, i64 128}
+!1 = !{i64 0, i64 2147483648}
diff --git a/test/CodeGen/X86/absolute-rotate.ll b/test/CodeGen/X86/absolute-rotate.ll
index c0ecb82adc2f..6240e8d3f76f 100644
--- a/test/CodeGen/X86/absolute-rotate.ll
+++ b/test/CodeGen/X86/absolute-rotate.ll
@@ -11,7 +11,7 @@ declare void @f()
define void @foo(i64 %val) {
%shr = lshr i64 %val, zext (i8 ptrtoint (i8* @align to i8) to i64)
%shl = shl i64 %val, zext (i8 sub (i8 64, i8 ptrtoint (i8* @align to i8)) to i64)
- ; CHECK: rorq $align, %rdi
+ ; CHECK: rorq $align@ABS8, %rdi
%ror = or i64 %shr, %shl
%cmp = icmp ult i64 %ror, 109
br i1 %cmp, label %t, label %f
@@ -24,4 +24,4 @@ f:
ret void
}
-!0 = !{i64 0, i64 256}
+!0 = !{i64 0, i64 128}
diff --git a/test/CodeGen/X86/add-of-carry.ll b/test/CodeGen/X86/add-of-carry.ll
index 44b587af3aaa..b9f7fc68cf68 100644
--- a/test/CodeGen/X86/add-of-carry.ll
+++ b/test/CodeGen/X86/add-of-carry.ll
@@ -1,13 +1,18 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s
+
+; These tests use adc/sbb in place of set+add/sub. Should this transform
+; be enabled by micro-architecture rather than as part of generic lowering/isel?
+
; <rdar://problem/8449754>
define i32 @test1(i32 %sum, i32 %x) nounwind readnone ssp {
-entry:
; CHECK-LABEL: test1:
-; CHECK: movl
-; CHECK-NEXT: addl
-; CHECK-NEXT: adcl $0
-; CHECK-NEXT: ret
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: adcl $0, %eax
+; CHECK-NEXT: retl
%add4 = add i32 %x, %sum
%cmp = icmp ult i32 %add4, %x
%inc = zext i1 %cmp to i32
@@ -16,14 +21,18 @@ entry:
}
; <rdar://problem/12579915>
+
define i32 @test2(i32 %x, i32 %y, i32 %res) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: sbbl $0, %eax
+; CHECK-NEXT: retl
%cmp = icmp ugt i32 %x, %y
%dec = sext i1 %cmp to i32
%dec.res = add nsw i32 %dec, %res
ret i32 %dec.res
-; CHECK-LABEL: test2:
-; CHECK: cmpl
-; CHECK: sbbl
-; CHECK: ret
}
+
diff --git a/test/CodeGen/X86/adde-carry.ll b/test/CodeGen/X86/adde-carry.ll
index e86adf4b1784..9483a6b492c5 100644
--- a/test/CodeGen/X86/adde-carry.ll
+++ b/test/CodeGen/X86/adde-carry.ll
@@ -1,6 +1,14 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s -check-prefix=CHECK-64
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: a:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addq %rcx, %rdx
+; CHECK-NEXT: adcq $0, %r8
+; CHECK-NEXT: movq %r8, (%rdi)
+; CHECK-NEXT: movq %rdx, (%rsi)
+; CHECK-NEXT: retq
entry:
%0 = zext i64 %a to i128
%1 = zext i64 %b to i128
@@ -14,7 +22,173 @@ entry:
%8 = trunc i128 %2 to i64
store i64 %8, i64* %t, align 8
ret void
+}
+
+define void @b(i32* nocapture %r, i64 %a, i64 %b, i32 %c) nounwind {
+; CHECK-LABEL: b:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addq %rdx, %rsi
+; CHECK-NEXT: adcl $0, %ecx
+; CHECK-NEXT: movl %ecx, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = zext i64 %a to i128
+ %1 = zext i64 %b to i128
+ %2 = zext i32 %c to i128
+ %3 = add i128 %1, %0
+ %4 = lshr i128 %3, 64
+ %5 = add i128 %4, %2
+ %6 = trunc i128 %5 to i32
+ store i32 %6, i32* %r, align 4
+ ret void
+}
+
+define void @c(i16* nocapture %r, i64 %a, i64 %b, i16 %c) nounwind {
+; CHECK-LABEL: c:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addq %rdx, %rsi
+; CHECK-NEXT: adcl $0, %ecx
+; CHECK-NEXT: movw %cx, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = zext i64 %a to i128
+ %1 = zext i64 %b to i128
+ %2 = zext i16 %c to i128
+ %3 = add i128 %1, %0
+ %4 = lshr i128 %3, 64
+ %5 = add i128 %4, %2
+ %6 = trunc i128 %5 to i16
+ store i16 %6, i16* %r, align 4
+ ret void
+}
+
+define void @d(i8* nocapture %r, i64 %a, i64 %b, i8 %c) nounwind {
+; CHECK-LABEL: d:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addq %rdx, %rsi
+; CHECK-NEXT: adcl $0, %ecx
+; CHECK-NEXT: movb %cl, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = zext i64 %a to i128
+ %1 = zext i64 %b to i128
+ %2 = zext i8 %c to i128
+ %3 = add i128 %1, %0
+ %4 = lshr i128 %3, 64
+ %5 = add i128 %4, %2
+ %6 = trunc i128 %5 to i8
+ store i8 %6, i8* %r, align 4
+ ret void
+}
+
+%scalar = type { [4 x i64] }
-; CHECK-64: addq
-; CHECK-64: adcq $0
+define %scalar @pr31719(%scalar* nocapture readonly %this, %scalar %arg.b) {
+; CHECK-LABEL: pr31719:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addq (%rsi), %rdx
+; CHECK-NEXT: sbbq %r10, %r10
+; CHECK-NEXT: andl $1, %r10d
+; CHECK-NEXT: addq 8(%rsi), %rcx
+; CHECK-NEXT: sbbq %r11, %r11
+; CHECK-NEXT: andl $1, %r11d
+; CHECK-NEXT: addq %r10, %rcx
+; CHECK-NEXT: adcq $0, %r11
+; CHECK-NEXT: addq 16(%rsi), %r8
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: addq %r11, %r8
+; CHECK-NEXT: adcq $0, %rax
+; CHECK-NEXT: addq 24(%rsi), %r9
+; CHECK-NEXT: addq %rax, %r9
+; CHECK-NEXT: movq %rdx, (%rdi)
+; CHECK-NEXT: movq %rcx, 8(%rdi)
+; CHECK-NEXT: movq %r8, 16(%rdi)
+; CHECK-NEXT: movq %r9, 24(%rdi)
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = extractvalue %scalar %arg.b, 0
+ %.elt = extractvalue [4 x i64] %0, 0
+ %.elt24 = extractvalue [4 x i64] %0, 1
+ %.elt26 = extractvalue [4 x i64] %0, 2
+ %.elt28 = extractvalue [4 x i64] %0, 3
+ %1 = getelementptr inbounds %scalar , %scalar* %this, i64 0, i32 0, i64 0
+ %2 = load i64, i64* %1, align 8
+ %3 = zext i64 %2 to i128
+ %4 = zext i64 %.elt to i128
+ %5 = add nuw nsw i128 %3, %4
+ %6 = trunc i128 %5 to i64
+ %7 = lshr i128 %5, 64
+ %8 = getelementptr inbounds %scalar , %scalar * %this, i64 0, i32 0, i64 1
+ %9 = load i64, i64* %8, align 8
+ %10 = zext i64 %9 to i128
+ %11 = zext i64 %.elt24 to i128
+ %12 = add nuw nsw i128 %10, %11
+ %13 = add nuw nsw i128 %12, %7
+ %14 = trunc i128 %13 to i64
+ %15 = lshr i128 %13, 64
+ %16 = getelementptr inbounds %scalar , %scalar* %this, i64 0, i32 0, i64 2
+ %17 = load i64, i64* %16, align 8
+ %18 = zext i64 %17 to i128
+ %19 = zext i64 %.elt26 to i128
+ %20 = add nuw nsw i128 %18, %19
+ %21 = add nuw nsw i128 %20, %15
+ %22 = trunc i128 %21 to i64
+ %23 = lshr i128 %21, 64
+ %24 = getelementptr inbounds %scalar , %scalar* %this, i64 0, i32 0, i64 3
+ %25 = load i64, i64* %24, align 8
+ %26 = zext i64 %25 to i128
+ %27 = zext i64 %.elt28 to i128
+ %28 = add nuw nsw i128 %26, %27
+ %29 = add nuw nsw i128 %28, %23
+ %30 = trunc i128 %29 to i64
+ %31 = insertvalue [4 x i64] undef, i64 %6, 0
+ %32 = insertvalue [4 x i64] %31, i64 %14, 1
+ %33 = insertvalue [4 x i64] %32, i64 %22, 2
+ %34 = insertvalue [4 x i64] %33, i64 %30, 3
+ %35 = insertvalue %scalar undef, [4 x i64] %34, 0
+ ret %scalar %35
+}
+
+%accumulator= type { i64, i64, i32 }
+
+define void @muladd(%accumulator* nocapture %this, i64 %arg.a, i64 %arg.b) {
+; CHECK-LABEL: muladd:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movq %rdx, %rax
+; CHECK-NEXT: mulq %rsi
+; CHECK-NEXT: addq (%rdi), %rax
+; CHECK-NEXT: adcq $0, %rdx
+; CHECK-NEXT: movq %rax, (%rdi)
+; CHECK-NEXT: addq 8(%rdi), %rdx
+; CHECK-NEXT: movq %rdx, 8(%rdi)
+; CHECK-NEXT: sbbl %eax, %eax
+; CHECK-NEXT: subl %eax, 16(%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = zext i64 %arg.a to i128
+ %1 = zext i64 %arg.b to i128
+ %2 = mul nuw i128 %1, %0
+ %3 = getelementptr inbounds %accumulator, %accumulator* %this, i64 0, i32 0
+ %4 = load i64, i64* %3, align 8
+ %5 = zext i64 %4 to i128
+ %6 = add i128 %5, %2
+ %7 = trunc i128 %6 to i64
+ store i64 %7, i64* %3, align 8
+ %8 = lshr i128 %6, 64
+ %9 = getelementptr inbounds %accumulator, %accumulator* %this, i64 0, i32 1
+ %10 = load i64, i64* %9, align 8
+ %11 = zext i64 %10 to i128
+ %12 = add nuw nsw i128 %8, %11
+ %13 = trunc i128 %12 to i64
+ store i64 %13, i64* %9, align 8
+ %14 = lshr i128 %12, 64
+ %15 = getelementptr inbounds %accumulator, %accumulator* %this, i64 0, i32 2
+ %16 = load i32, i32* %15, align 4
+ %17 = zext i32 %16 to i128
+ %18 = add nuw nsw i128 %14, %17
+ %19 = trunc i128 %18 to i32
+ store i32 %19, i32* %15, align 4
+ ret void
}
diff --git a/test/CodeGen/X86/aes_intrinsics.ll b/test/CodeGen/X86/aes_intrinsics.ll
index fc1a2cc61289..fc3d55a05429 100644
--- a/test/CodeGen/X86/aes_intrinsics.ll
+++ b/test/CodeGen/X86/aes_intrinsics.ll
@@ -1,7 +1,17 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+aes,-avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+aes,-avx -show-mc-encoding | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+aes,+avx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK
define <2 x i64> @test_x86_aesni_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: aesdec
+; CHECK-LABEL: test_x86_aesni_aesdec:
+; CHECK: ## BB#0:
+; CHECK-NEXT: aesdec %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xde,0xc1]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: test_x86_aesni_aesdec:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: vaesdec %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xde,0xc1]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %res
}
@@ -9,7 +19,15 @@ declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_x86_aesni_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: aesdeclast
+; CHECK-LABEL: test_x86_aesni_aesdeclast:
+; CHECK: ## BB#0:
+; CHECK-NEXT: aesdeclast %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xdf,0xc1]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: test_x86_aesni_aesdeclast:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdf,0xc1]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %res
}
@@ -17,7 +35,15 @@ declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind read
define <2 x i64> @test_x86_aesni_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: aesenc
+; CHECK-LABEL: test_x86_aesni_aesenc:
+; CHECK: ## BB#0:
+; CHECK-NEXT: aesenc %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xdc,0xc1]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: test_x86_aesni_aesenc:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: vaesenc %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdc,0xc1]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %res
}
@@ -25,7 +51,15 @@ declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_x86_aesni_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: aesenclast
+; CHECK-LABEL: test_x86_aesni_aesenclast:
+; CHECK: ## BB#0:
+; CHECK-NEXT: aesenclast %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0xdd,0xc1]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: test_x86_aesni_aesenclast:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdd,0xc1]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %res
}
@@ -33,7 +67,15 @@ declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind read
define <2 x i64> @test_x86_aesni_aesimc(<2 x i64> %a0) {
- ; CHECK: aesimc
+; CHECK-LABEL: test_x86_aesni_aesimc:
+; CHECK: ## BB#0:
+; CHECK-NEXT: aesimc %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0xdb,0xc0]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: test_x86_aesni_aesimc:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: vaesimc %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdb,0xc0]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %res
}
@@ -41,7 +83,15 @@ declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>) nounwind readnone
define <2 x i64> @test_x86_aesni_aeskeygenassist(<2 x i64> %a0) {
- ; CHECK: aeskeygenassist
+; CHECK-LABEL: test_x86_aesni_aeskeygenassist:
+; CHECK: ## BB#0:
+; CHECK-NEXT: aeskeygenassist $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0xdf,0xc0,0x07]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: test_x86_aesni_aeskeygenassist:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0xdf,0xc0,0x07]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7) ; <<2 x i64>> [#uses=1]
ret <2 x i64> %res
}
diff --git a/test/CodeGen/X86/and-sink.ll b/test/CodeGen/X86/and-sink.ll
new file mode 100644
index 000000000000..46e50f2a6a74
--- /dev/null
+++ b/test/CodeGen/X86/and-sink.ll
@@ -0,0 +1,181 @@
+; RUN: llc -mtriple=i686-unknown -verify-machineinstrs < %s | FileCheck %s
+; RUN: opt < %s -codegenprepare -S -mtriple=x86_64-unknown-unknown | FileCheck --check-prefix=CHECK-CGP %s
+
+@A = global i32 zeroinitializer
+@B = global i32 zeroinitializer
+@C = global i32 zeroinitializer
+
+; Test that 'and' is sunk into bb0.
+define i32 @and_sink1(i32 %a, i1 %c) {
+; CHECK-LABEL: and_sink1:
+; CHECK: testb $1,
+; CHECK: je
+; CHECK-NOT: andl $4,
+; CHECK: movl $0, A
+; CHECK: testb $4,
+; CHECK: jne
+
+; CHECK-CGP-LABEL: @and_sink1(
+; CHECK-CGP-NOT: and i32
+ %and = and i32 %a, 4
+ br i1 %c, label %bb0, label %bb2
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP: and i32
+; CHECK-CGP-NEXT: icmp eq i32
+; CHECK-CGP-NEXT: store
+; CHECK-CGP-NEXT: br
+ %cmp = icmp eq i32 %and, 0
+ store i32 0, i32* @A
+ br i1 %cmp, label %bb1, label %bb2
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+; Test that both 'and' and cmp get sunk to bb1.
+define i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
+; CHECK-LABEL: and_sink2:
+; CHECK: movl $0, A
+; CHECK: testb $1,
+; CHECK: je
+; CHECK-NOT: andl $4,
+; CHECK: movl $0, B
+; CHECK: testb $1,
+; CHECK: je
+; CHECK: movl $0, C
+; CHECK: testb $4,
+; CHECK: jne
+
+; CHECK-CGP-LABEL: @and_sink2(
+; CHECK-CGP-NOT: and i32
+ %and = and i32 %a, 4
+ store i32 0, i32* @A
+ br i1 %c, label %bb0, label %bb3
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP-NOT: and i32
+; CHECK-CGP-NOT: icmp
+ %cmp = icmp eq i32 %and, 0
+ store i32 0, i32* @B
+ br i1 %c2, label %bb1, label %bb3
+bb1:
+; CHECK-CGP-LABEL: bb1:
+; CHECK-CGP: and i32
+; CHECK-CGP-NEXT: icmp eq i32
+; CHECK-CGP-NEXT: store
+; CHECK-CGP-NEXT: br
+ store i32 0, i32* @C
+ br i1 %cmp, label %bb2, label %bb0
+bb2:
+ ret i32 1
+bb3:
+ ret i32 0
+}
+
+; Test that CodeGenPrepare doesn't get stuck in a loop sinking and hoisting a masked load.
+define i32 @and_sink3(i1 %c, i32* %p) {
+; CHECK-LABEL: and_sink3:
+; CHECK: testb $1,
+; CHECK: je
+; CHECK: movzbl
+; CHECK-DAG: movl $0, A
+; CHECK-DAG: testl %
+; CHECK: je
+
+; CHECK-CGP-LABEL: @and_sink3(
+; CHECK-CGP: load i32
+; CHECK-CGP-NEXT: and i32
+ %load = load i32, i32* %p
+ %and = and i32 %load, 255
+ br i1 %c, label %bb0, label %bb2
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP-NOT: and i32
+; CHECK-CGP: icmp eq i32
+ %cmp = icmp eq i32 %and, 0
+ store i32 0, i32* @A
+ br i1 %cmp, label %bb1, label %bb2
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+; Test that CodeGenPrepare sinks/duplicates non-immediate 'and'.
+define i32 @and_sink4(i32 %a, i32 %b, i1 %c) {
+; CHECK-LABEL: and_sink4:
+; CHECK: testb $1,
+; CHECK: je
+; CHECK-NOT: andl
+; CHECK-DAG: movl $0, A
+; CHECK-DAG: testl [[REG1:%[a-z0-9]+]], [[REG2:%[a-z0-9]+]]
+; CHECK: jne
+; CHECK-DAG: movl {{%[a-z0-9]+}}, B
+; CHECK-DAG: testl [[REG1]], [[REG2]]
+; CHECK: je
+
+; CHECK-CGP-LABEL: @and_sink4(
+; CHECK-CGP-NOT: and i32
+; CHECK-CGP-NOT: icmp
+ %and = and i32 %a, %b
+ %cmp = icmp eq i32 %and, 0
+ br i1 %c, label %bb0, label %bb3
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP: and i32
+; CHECK-CGP-NEXT: icmp eq i32
+ store i32 0, i32* @A
+ br i1 %cmp, label %bb1, label %bb3
+bb1:
+; CHECK-CGP-LABEL: bb1:
+; CHECK-CGP: and i32
+; CHECK-CGP-NEXT: icmp eq i32
+ %add = add i32 %a, %b
+ store i32 %add, i32* @B
+ br i1 %cmp, label %bb2, label %bb3
+bb2:
+ ret i32 1
+bb3:
+ ret i32 0
+}
+
+
+; Test that CodeGenPrepare doesn't sink/duplicate non-immediate 'and'
+; when it would increase register pressure.
+define i32 @and_sink5(i32 %a, i32 %b, i32 %a2, i32 %b2, i1 %c) {
+; CHECK-LABEL: and_sink5:
+; CHECK: testb $1,
+; CHECK: je
+; CHECK-DAG: andl {{[0-9]+\(%[a-z0-9]+\)}}, [[REG:%[a-z0-9]+]]
+; CHECK-DAG: movl $0, A
+; CHECK: jne
+; CHECK-DAG: movl {{%[a-z0-9]+}}, B
+; CHECK-DAG: testl [[REG]], [[REG]]
+; CHECK: je
+
+; CHECK-CGP-LABEL: @and_sink5(
+; CHECK-CGP: and i32
+; CHECK-CGP-NOT: icmp
+ %and = and i32 %a, %b
+ %cmp = icmp eq i32 %and, 0
+ br i1 %c, label %bb0, label %bb3
+bb0:
+; CHECK-CGP-LABEL: bb0:
+; CHECK-CGP-NOT: and i32
+; CHECK-CGP: icmp eq i32
+ store i32 0, i32* @A
+ br i1 %cmp, label %bb1, label %bb3
+bb1:
+; CHECK-CGP-LABEL: bb1:
+; CHECK-CGP-NOT: and i32
+; CHECK-CGP: icmp eq i32
+ %add = add i32 %a2, %b2
+ store i32 %add, i32* @B
+ br i1 %cmp, label %bb2, label %bb3
+bb2:
+ ret i32 1
+bb3:
+ ret i32 0
+}
diff --git a/test/CodeGen/X86/arg-copy-elide.ll b/test/CodeGen/X86/arg-copy-elide.ll
new file mode 100644
index 000000000000..b9a2eeeb7f8f
--- /dev/null
+++ b/test/CodeGen/X86/arg-copy-elide.ll
@@ -0,0 +1,299 @@
+; RUN: llc -mtriple=i686-windows < %s | FileCheck %s
+
+declare void @addrof_i1(i1*)
+declare void @addrof_i32(i32*)
+declare void @addrof_i64(i64*)
+declare void @addrof_i128(i128*)
+declare void @addrof_i32_x3(i32*, i32*, i32*)
+
+define void @simple(i32 %x) {
+entry:
+ %x.addr = alloca i32
+ store i32 %x, i32* %x.addr
+ call void @addrof_i32(i32* %x.addr)
+ ret void
+}
+
+; CHECK-LABEL: _simple:
+; CHECK: leal 4(%esp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: retl
+
+
+; We need to load %x before calling addrof_i32 now because it could mutate %x in
+; place.
+
+define i32 @use_arg(i32 %x) {
+entry:
+ %x.addr = alloca i32
+ store i32 %x, i32* %x.addr
+ call void @addrof_i32(i32* %x.addr)
+ ret i32 %x
+}
+
+; CHECK-LABEL: _use_arg:
+; CHECK: pushl %[[csr:[^ ]*]]
+; CHECK-DAG: movl 8(%esp), %[[csr]]
+; CHECK-DAG: leal 8(%esp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: movl %[[csr]], %eax
+; CHECK: popl %[[csr]]
+; CHECK: retl
+
+; We won't copy elide for types needing legalization such as i64 or i1.
+
+define i64 @split_i64(i64 %x) {
+entry:
+ %x.addr = alloca i64, align 4
+ store i64 %x, i64* %x.addr, align 4
+ call void @addrof_i64(i64* %x.addr)
+ ret i64 %x
+}
+
+; CHECK-LABEL: _split_i64:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
+; CHECK: pushl %[[csr2:[^ ]*]]
+; CHECK: pushl %[[csr1:[^ ]*]]
+; CHECK: andl $-8, %esp
+; CHECK-DAG: movl 8(%ebp), %[[csr1]]
+; CHECK-DAG: movl 12(%ebp), %[[csr2]]
+; CHECK-DAG: leal 8(%ebp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i64
+; CHECK-DAG: movl %[[csr1]], %eax
+; CHECK-DAG: movl %[[csr2]], %edx
+; CHECK: leal -8(%ebp), %esp
+; CHECK: popl %[[csr1]]
+; CHECK: popl %[[csr2]]
+; CHECK: popl %ebp
+; CHECK: retl
+
+define i1 @i1_arg(i1 %x) {
+ %x.addr = alloca i1
+ store i1 %x, i1* %x.addr
+ call void @addrof_i1(i1* %x.addr)
+ ret i1 %x
+}
+
+; CHECK-LABEL: _i1_arg:
+; CHECK: pushl %ebx
+; CHECK: movb 8(%esp), %bl
+; CHECK: leal 8(%esp), %eax
+; CHECK: pushl %eax
+; CHECK: calll _addrof_i1
+; CHECK: addl $4, %esp
+; CHECK: movl %ebx, %eax
+; CHECK: popl %ebx
+; CHECK: retl
+
+; We can't copy elide when an i64 is split between registers and memory in a
+; fastcc function.
+
+define fastcc i64 @fastcc_split_i64(i64* %p, i64 %x) {
+entry:
+ %x.addr = alloca i64, align 4
+ store i64 %x, i64* %x.addr, align 4
+ call void @addrof_i64(i64* %x.addr)
+ ret i64 %x
+}
+
+; CHECK-LABEL: _fastcc_split_i64:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
+; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
+; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
+; CHECK-DAG: movl %[[r2]], 4(%esp)
+; CHECK-DAG: movl %[[r1]], (%esp)
+; CHECK: movl %esp, %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i64
+; CHECK: popl %ebp
+; CHECK: retl
+
+
+; We can't copy elide when it would reduce the user requested alignment.
+
+define void @high_alignment(i32 %x) {
+entry:
+ %x.p = alloca i32, align 128
+ store i32 %x, i32* %x.p
+ call void @addrof_i32(i32* %x.p)
+ ret void
+}
+
+; CHECK-LABEL: _high_alignment:
+; CHECK: andl $-128, %esp
+; CHECK: movl 8(%ebp), %[[reg:[^ ]*]]
+; CHECK: movl %[[reg]], (%esp)
+; CHECK: movl %esp, %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: retl
+
+
+; We can't copy elide when it would reduce the ABI required alignment.
+; FIXME: We should lower the ABI alignment of i64 on Windows, since MSVC
+; doesn't guarantee it.
+
+define void @abi_alignment(i64 %x) {
+entry:
+ %x.p = alloca i64
+ store i64 %x, i64* %x.p
+ call void @addrof_i64(i64* %x.p)
+ ret void
+}
+
+; CHECK-LABEL: _abi_alignment:
+; CHECK: andl $-8, %esp
+; CHECK: movl 8(%ebp), %[[reg:[^ ]*]]
+; CHECK: movl %[[reg]], (%esp)
+; CHECK: movl %esp, %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i64
+; CHECK: retl
+
+
+; The code we generate for this is unimportant. This is mostly a crash test.
+
+define void @split_i128(i128* %sret, i128 %x) {
+entry:
+ %x.addr = alloca i128
+ store i128 %x, i128* %x.addr
+ call void @addrof_i128(i128* %x.addr)
+ store i128 %x, i128* %sret
+ ret void
+}
+
+; CHECK-LABEL: _split_i128:
+; CHECK: pushl %ebp
+; CHECK: calll _addrof_i128
+; CHECK: retl
+
+
+; Check that we load all of x, y, and z before the call.
+
+define i32 @three_args(i32 %x, i32 %y, i32 %z) {
+entry:
+ %z.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ %x.addr = alloca i32, align 4
+ store i32 %z, i32* %z.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ store i32 %x, i32* %x.addr, align 4
+ call void @addrof_i32_x3(i32* %x.addr, i32* %y.addr, i32* %z.addr)
+ %s1 = add i32 %x, %y
+ %sum = add i32 %s1, %z
+ ret i32 %sum
+}
+
+; CHECK-LABEL: _three_args:
+; CHECK: pushl %[[csr:[^ ]*]]
+; CHECK-DAG: movl {{[0-9]+}}(%esp), %[[csr]]
+; CHECK-DAG: addl {{[0-9]+}}(%esp), %[[csr]]
+; CHECK-DAG: addl {{[0-9]+}}(%esp), %[[csr]]
+; CHECK-DAG: leal 8(%esp), %[[x:[^ ]*]]
+; CHECK-DAG: leal 12(%esp), %[[y:[^ ]*]]
+; CHECK-DAG: leal 16(%esp), %[[z:[^ ]*]]
+; CHECK: pushl %[[z]]
+; CHECK: pushl %[[y]]
+; CHECK: pushl %[[x]]
+; CHECK: calll _addrof_i32_x3
+; CHECK: movl %[[csr]], %eax
+; CHECK: popl %[[csr]]
+; CHECK: retl
+
+
+define void @two_args_same_alloca(i32 %x, i32 %y) {
+entry:
+ %x.addr = alloca i32
+ store i32 %x, i32* %x.addr
+ store i32 %y, i32* %x.addr
+ call void @addrof_i32(i32* %x.addr)
+ ret void
+}
+
+; CHECK-LABEL: _two_args_same_alloca:
+; CHECK: movl 8(%esp), {{.*}}
+; CHECK: movl {{.*}}, 4(%esp)
+; CHECK: leal 4(%esp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: retl
+
+
+define void @avoid_byval(i32* byval %x) {
+entry:
+ %x.p.p = alloca i32*
+ store i32* %x, i32** %x.p.p
+ call void @addrof_i32(i32* %x)
+ ret void
+}
+
+; CHECK-LABEL: _avoid_byval:
+; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: retl
+
+
+define void @avoid_inalloca(i32* inalloca %x) {
+entry:
+ %x.p.p = alloca i32*
+ store i32* %x, i32** %x.p.p
+ call void @addrof_i32(i32* %x)
+ ret void
+}
+
+; CHECK-LABEL: _avoid_inalloca:
+; CHECK: leal {{[0-9]+}}(%esp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: retl
+
+
+; Don't elide the copy when the alloca is escaped with a store.
+
+define void @escape_with_store(i32 %x) {
+ %x1 = alloca i32
+ %x2 = alloca i32*
+ store i32* %x1, i32** %x2
+ %x3 = load i32*, i32** %x2
+ store i32 0, i32* %x3
+ store i32 %x, i32* %x1
+ call void @addrof_i32(i32* %x1)
+ ret void
+}
+
+; CHECK-LABEL: _escape_with_store:
+; CHECK-DAG: movl {{.*}}(%esp), %[[reg:[^ ]*]]
+; CHECK-DAG: movl $0, [[offs:[0-9]*]](%esp)
+; CHECK: movl %[[reg]], [[offs]](%esp)
+; CHECK: calll _addrof_i32
+
+
+; This test case exposed issues with the use of TokenFactor.
+
+define void @sret_and_elide(i32* sret %sret, i32 %v) {
+ %v.p = alloca i32
+ store i32 %v, i32* %v.p
+ call void @addrof_i32(i32* %v.p)
+ store i32 %v, i32* %sret
+ ret void
+}
+
+; CHECK-LABEL: _sret_and_elide:
+; CHECK: pushl
+; CHECK: pushl
+; CHECK: movl 12(%esp), %[[sret:[^ ]*]]
+; CHECK: movl 16(%esp), %[[v:[^ ]*]]
+; CHECK: leal 16(%esp), %[[reg:[^ ]*]]
+; CHECK: pushl %[[reg]]
+; CHECK: calll _addrof_i32
+; CHECK: movl %[[v]], (%[[sret]])
+; CHECK: movl %[[sret]], %eax
+; CHECK: popl
+; CHECK: popl
+; CHECK: retl
diff --git a/test/CodeGen/X86/atomic128.ll b/test/CodeGen/X86/atomic128.ll
index 1bf7bfbfa260..77bbdec826a5 100644
--- a/test/CodeGen/X86/atomic128.ll
+++ b/test/CodeGen/X86/atomic128.ll
@@ -1,20 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
@var = global i128 0
-define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
-; CHECK-LABEL: val_compare_and_swap:
; Due to the scheduling right after isel for cmpxchg and given the
; machine scheduler and copy coalescer do not mess up with physical
; register live-ranges, we end up with a useless copy.
-;
-; CHECK: movq %rcx, [[TMP:%r[0-9a-z]+]]
-; CHECK: movq %rsi, %rax
-; CHECK: movq %r8, %rcx
-; CHECK: movq [[TMP]], %rbx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-
+define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi1:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rcx, %r9
+; CHECK-NEXT: movq %rsi, %rax
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: movq %r9, %rbx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
%val = extractvalue { i128, i1 } %pair, 0
ret i128 %val
@@ -22,24 +28,31 @@ define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
define void @fetch_and_nand(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_nand:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: movq %rdx, %rcx
-; CHECK: andq [[INCHI]], %rcx
-; CHECK: movq %rax, %rbx
- ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
-; CHECK: andq %rsi, %rbx
-; CHECK: notq %rbx
-; CHECK: notq %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi2:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi3:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB1_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: andq %r8, %rcx
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: andq %rsi, %rbx
+; CHECK-NEXT: notq %rbx
+; CHECK-NEXT: notq %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB1_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw nand i128* %p, i128 %bits release
store i128 %val, i128* @var, align 16
ret void
@@ -47,23 +60,29 @@ define void @fetch_and_nand(i128* %p, i128 %bits) {
define void @fetch_and_or(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_or:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: movq %rax, %rbx
- ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
-; CHECK: orq %rsi, %rbx
-; CHECK: movq %rdx, %rcx
-; CHECK: orq [[INCHI]], %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi4:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi5:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB2_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: orq %rsi, %rbx
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: orq %r8, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB2_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw or i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -71,23 +90,29 @@ define void @fetch_and_or(i128* %p, i128 %bits) {
define void @fetch_and_add(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_add:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: movq %rax, %rbx
- ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
-; CHECK: addq %rsi, %rbx
-; CHECK: movq %rdx, %rcx
-; CHECK: adcq [[INCHI]], %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi6:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi7:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB3_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: addq %rsi, %rbx
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: adcq %r8, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB3_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw add i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -95,23 +120,29 @@ define void @fetch_and_add(i128* %p, i128 %bits) {
define void @fetch_and_sub(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_sub:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: movq %rax, %rbx
- ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
-; CHECK: subq %rsi, %rbx
-; CHECK: movq %rdx, %rcx
-; CHECK: sbbq [[INCHI]], %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi8:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi9:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB4_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movq %rax, %rbx
+; CHECK-NEXT: subq %rsi, %rbx
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: sbbq %r8, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB4_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw sub i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -119,24 +150,35 @@ define void @fetch_and_sub(i128* %p, i128 %bits) {
define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_min:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpq
-; CHECK: sbbq
-; CHECK: setg
-; CHECK: cmovneq %rax, %rbx
-; CHECK: movq [[INCHI]], %rcx
-; CHECK: cmovneq %rdx, %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi10:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi11:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB5_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmpq %rax, %rsi
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: sbbq %rdx, %rcx
+; CHECK-NEXT: setge %cl
+; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB5_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw min i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -144,24 +186,35 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_max:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpq
-; CHECK: sbbq
-; CHECK: setge
-; CHECK: cmovneq %rax, %rbx
-; CHECK: movq [[INCHI]], %rcx
-; CHECK: cmovneq %rdx, %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi12:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi13:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB6_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmpq %rsi, %rax
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: sbbq %r8, %rcx
+; CHECK-NEXT: setge %cl
+; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB6_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw max i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -169,24 +222,35 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_umin:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpq
-; CHECK: sbbq
-; CHECK: seta
-; CHECK: cmovneq %rax, %rbx
-; CHECK: movq [[INCHI]], %rcx
-; CHECK: cmovneq %rdx, %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi14:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi15:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB7_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmpq %rax, %rsi
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: sbbq %rdx, %rcx
+; CHECK-NEXT: setae %cl
+; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB7_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw umin i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -194,24 +258,35 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_umax:
-; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
-; CHECK-DAG: movq (%rdi), %rax
-; CHECK-DAG: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpq
-; CHECK: sbbq
-; CHECK: setb
-; CHECK: cmovneq %rax, %rbx
-; CHECK: movq [[INCHI]], %rcx
-; CHECK: cmovneq %rdx, %rcx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
-; CHECK: movq %rax, _var
-; CHECK: movq %rdx, _var+8
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi16:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi17:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %r8
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB8_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: cmpq %rax, %rsi
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: sbbq %rdx, %rcx
+; CHECK-NEXT: setb %cl
+; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: cmovneq %rax, %rbx
+; CHECK-NEXT: testb %cl, %cl
+; CHECK-NEXT: movq %r8, %rcx
+; CHECK-NEXT: cmovneq %rdx, %rcx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB8_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%val = atomicrmw umax i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -219,75 +294,110 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
define i128 @atomic_load_seq_cst(i128* %p) {
; CHECK-LABEL: atomic_load_seq_cst:
-; CHECK: xorl %eax, %eax
-; CHECK: xorl %edx, %edx
-; CHECK: xorl %ecx, %ecx
-; CHECK: xorl %ebx, %ebx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi18:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi19:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%r = load atomic i128, i128* %p seq_cst, align 16
ret i128 %r
}
define i128 @atomic_load_relaxed(i128* %p) {
-; CHECK: atomic_load_relaxed:
-; CHECK: xorl %eax, %eax
-; CHECK: xorl %edx, %edx
-; CHECK: xorl %ecx, %ecx
-; CHECK: xorl %ebx, %ebx
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-
+; CHECK-LABEL: atomic_load_relaxed:
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi20:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi21:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
%r = load atomic i128, i128* %p monotonic, align 16
ret i128 %r
}
define void @atomic_store_seq_cst(i128* %p, i128 %in) {
; CHECK-LABEL: atomic_store_seq_cst:
-; CHECK: movq %rdx, %rcx
-; CHECK: movq %rsi, %rbx
-; CHECK: movq (%rdi), %rax
-; CHECK: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-; CHECK-NOT: callq ___sync_lock_test_and_set_16
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi22:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi23:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB11_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB11_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
store atomic i128 %in, i128* %p seq_cst, align 16
ret void
}
define void @atomic_store_release(i128* %p, i128 %in) {
; CHECK-LABEL: atomic_store_release:
-; CHECK: movq %rdx, %rcx
-; CHECK: movq %rsi, %rbx
-; CHECK: movq (%rdi), %rax
-; CHECK: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi24:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi25:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB12_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB12_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
store atomic i128 %in, i128* %p release, align 16
ret void
}
define void @atomic_store_relaxed(i128* %p, i128 %in) {
; CHECK-LABEL: atomic_store_relaxed:
-; CHECK: movq %rdx, %rcx
-; CHECK: movq %rsi, %rbx
-; CHECK: movq (%rdi), %rax
-; CHECK: movq 8(%rdi), %rdx
-
-; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: lock
-; CHECK: cmpxchg16b (%rdi)
-; CHECK: jne [[LOOP]]
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: Lcfi26:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: Lcfi27:
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdx, %rcx
+; CHECK-NEXT: movq %rsi, %rbx
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rdx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: LBB13_1: ## %atomicrmw.start
+; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lock cmpxchg16b (%rdi)
+; CHECK-NEXT: jne LBB13_1
+; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
store atomic i128 %in, i128* %p unordered, align 16
ret void
}
diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll
index 99d3f206f0c1..2aaf14001758 100644
--- a/test/CodeGen/X86/avg.ll
+++ b/test/CodeGen/X86/avg.ll
@@ -140,82 +140,82 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm8[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm10, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm10, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm15 = xmm11[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3],xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm15, %xmm2
-; SSE2-NEXT: paddd %xmm9, %xmm13
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: paddd %xmm9, %xmm13
+; SSE2-NEXT: paddd %xmm15, %xmm2
; SSE2-NEXT: paddd %xmm14, %xmm5
-; SSE2-NEXT: paddd %xmm10, %xmm3
-; SSE2-NEXT: paddd %xmm12, %xmm6
; SSE2-NEXT: paddd %xmm8, %xmm0
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm3
; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm4, %xmm7
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm6
; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm0
; SSE2-NEXT: paddd %xmm4, %xmm5
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: paddd %xmm4, %xmm13
; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm13
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE2-NEXT: pand %xmm4, %xmm7
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm7, %xmm0
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm7, %xmm3
+; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm6, %xmm0
; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm13
; SSE2-NEXT: pand %xmm4, %xmm13
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: packuswb %xmm13, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm13, %xmm1
; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
@@ -234,6 +234,7 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i8:
@@ -241,6 +242,7 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
; AVX512BW-NEXT: vmovdqa (%rsi), %ymm0
; AVX512BW-NEXT: vpavgb (%rdi), %ymm0, %ymm0
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %a
%2 = load <32 x i8>, <32 x i8>* %b
@@ -261,194 +263,193 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-NEXT: .Lcfi0:
; SSE2-NEXT: .cfi_def_cfa_offset 160
; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm2
+; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa 32(%rdi), %xmm5
; SSE2-NEXT: movdqa 48(%rdi), %xmm6
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm6[2,3,0,1]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm6, %xmm8
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm6, %xmm1
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm9, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa (%rsi), %xmm15
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm15[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa (%rsi), %xmm14
+; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; SSE2-NEXT: movdqa 16(%rsi), %xmm12
+; SSE2-NEXT: movdqa %xmm12, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm12, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm11
; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: movdqa 48(%rsi), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm3
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: movdqa 48(%rsi), %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3],xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm9, %xmm8
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm8, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
; SSE2-NEXT: paddd (%rsp), %xmm11 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm12 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm14 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm15 # 16-byte Folded Reload
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm10
; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm13
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm12
; SSE2-NEXT: paddd %xmm0, %xmm6
+; SSE2-NEXT: paddd %xmm0, %xmm10
+; SSE2-NEXT: paddd %xmm0, %xmm12
; SSE2-NEXT: paddd %xmm0, %xmm11
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm4
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: paddd %xmm0, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm3
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm15
-; SSE2-NEXT: psrld $1, %xmm10
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm0, %xmm10
; SSE2-NEXT: pand %xmm0, %xmm15
-; SSE2-NEXT: packuswb %xmm10, %xmm15
-; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: packuswb %xmm15, %xmm7
; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm9
; SSE2-NEXT: pand %xmm0, %xmm14
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: packuswb %xmm14, %xmm7
-; SSE2-NEXT: packuswb %xmm7, %xmm15
-; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: packuswb %xmm9, %xmm14
+; SSE2-NEXT: packuswb %xmm7, %xmm14
+; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: psrld $1, %xmm13
; SSE2-NEXT: pand %xmm0, %xmm13
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm13, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm0, %xmm6
+; SSE2-NEXT: packuswb %xmm13, %xmm6
; SSE2-NEXT: psrld $1, %xmm12
+; SSE2-NEXT: psrld $1, %xmm10
+; SSE2-NEXT: pand %xmm0, %xmm10
; SSE2-NEXT: pand %xmm0, %xmm12
-; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: packuswb %xmm12, %xmm6
-; SSE2-NEXT: packuswb %xmm6, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: packuswb %xmm10, %xmm12
+; SSE2-NEXT: packuswb %xmm6, %xmm12
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: psrld $1, %xmm11
; SSE2-NEXT: pand %xmm0, %xmm11
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm11, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: packuswb %xmm11, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: packuswb %xmm6, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: movdqa %xmm9, %xmm5
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: movdqa %xmm8, %xmm5
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: packuswb %xmm5, %xmm3
-; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: packuswb %xmm5, %xmm4
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: packuswb %xmm5, %xmm8
-; SSE2-NEXT: packuswb %xmm8, %xmm3
-; SSE2-NEXT: movdqu %xmm3, (%rax)
-; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm5, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm15, (%rax)
+; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm12, (%rax)
+; SSE2-NEXT: movdqu %xmm14, (%rax)
; SSE2-NEXT: addq $152, %rsp
; SSE2-NEXT: retq
;
@@ -495,7 +496,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm7
; AVX2-NEXT: vpsrld $1, %ymm10, %ymm8
; AVX2-NEXT: vpsrld $1, %ymm9, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm3[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -563,6 +564,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8:
@@ -570,6 +572,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX512BW-NEXT: vmovdqu8 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <64 x i8>, <64 x i8>* %a
%2 = load <64 x i8>, <64 x i8>* %b
@@ -727,6 +730,7 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v16i16:
@@ -734,6 +738,7 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
; AVX512BW-NEXT: vmovdqa (%rsi), %ymm0
; AVX512BW-NEXT: vpavgw (%rdi), %ymm0, %ymm0
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %a
%2 = load <16 x i16>, <16 x i16>* %b
@@ -858,7 +863,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
@@ -889,6 +894,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, (%rax)
; AVX512F-NEXT: vpmovdw %zmm1, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i16:
@@ -896,6 +902,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512BW-NEXT: vmovdqu16 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu16 %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i16>, <32 x i16>* %a
%2 = load <32 x i16>, <32 x i16>* %b
@@ -1045,82 +1052,82 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm8[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm10, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm10, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm15 = xmm11[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3],xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm15, %xmm2
-; SSE2-NEXT: paddd %xmm9, %xmm13
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: paddd %xmm9, %xmm13
+; SSE2-NEXT: paddd %xmm15, %xmm2
; SSE2-NEXT: paddd %xmm14, %xmm5
-; SSE2-NEXT: paddd %xmm10, %xmm3
-; SSE2-NEXT: paddd %xmm12, %xmm6
; SSE2-NEXT: paddd %xmm8, %xmm0
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm3
; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm4, %xmm7
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm6
; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm0
; SSE2-NEXT: paddd %xmm4, %xmm5
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: paddd %xmm4, %xmm13
; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm13
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE2-NEXT: pand %xmm4, %xmm7
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm7, %xmm0
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm7, %xmm3
+; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm6, %xmm0
; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm13
; SSE2-NEXT: pand %xmm4, %xmm13
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: packuswb %xmm13, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm13, %xmm1
; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
@@ -1139,6 +1146,7 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i8_2:
@@ -1146,6 +1154,7 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %a
%2 = load <32 x i8>, <32 x i8>* %b
@@ -1162,136 +1171,136 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-LABEL: avg_v64i8_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rsi), %xmm15
-; SSE2-NEXT: movdqa 16(%rsi), %xmm13
+; SSE2-NEXT: movdqa (%rsi), %xmm14
+; SSE2-NEXT: movdqa 16(%rsi), %xmm12
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa 48(%rsi), %xmm3
+; SSE2-NEXT: movdqa 48(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm15[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm13[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm13, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm12, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm11
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm12, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm11
; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: paddd %xmm1, %xmm1
-; SSE2-NEXT: paddd %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: paddd %xmm3, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: paddd %xmm4, %xmm4
-; SSE2-NEXT: paddd %xmm5, %xmm5
-; SSE2-NEXT: paddd %xmm10, %xmm10
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm3, %xmm3
; SSE2-NEXT: paddd %xmm2, %xmm2
+; SSE2-NEXT: paddd %xmm10, %xmm10
+; SSE2-NEXT: paddd %xmm5, %xmm5
; SSE2-NEXT: paddd %xmm11, %xmm11
-; SSE2-NEXT: paddd %xmm6, %xmm6
; SSE2-NEXT: paddd %xmm12, %xmm12
-; SSE2-NEXT: paddd %xmm13, %xmm13
; SSE2-NEXT: paddd %xmm9, %xmm9
-; SSE2-NEXT: paddd %xmm7, %xmm7
+; SSE2-NEXT: paddd %xmm6, %xmm6
+; SSE2-NEXT: paddd %xmm13, %xmm13
; SSE2-NEXT: paddd %xmm14, %xmm14
-; SSE2-NEXT: paddd %xmm15, %xmm15
; SSE2-NEXT: paddd %xmm8, %xmm8
+; SSE2-NEXT: paddd %xmm7, %xmm7
+; SSE2-NEXT: paddd %xmm15, %xmm15
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm8
; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm13
-; SSE2-NEXT: paddd %xmm0, %xmm12
; SSE2-NEXT: paddd %xmm0, %xmm6
+; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: paddd %xmm0, %xmm12
; SSE2-NEXT: paddd %xmm0, %xmm11
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm10
; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm0, %xmm10
+; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: paddd %xmm0, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm3
; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm15
-; SSE2-NEXT: psrld $1, %xmm8
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm0, %xmm8
; SSE2-NEXT: pand %xmm0, %xmm15
-; SSE2-NEXT: packuswb %xmm8, %xmm15
-; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: packuswb %xmm15, %xmm7
; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: psrld $1, %xmm8
+; SSE2-NEXT: pand %xmm0, %xmm8
; SSE2-NEXT: pand %xmm0, %xmm14
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: packuswb %xmm14, %xmm7
-; SSE2-NEXT: packuswb %xmm7, %xmm15
+; SSE2-NEXT: packuswb %xmm8, %xmm14
+; SSE2-NEXT: packuswb %xmm7, %xmm14
+; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: psrld $1, %xmm13
-; SSE2-NEXT: psrld $1, %xmm9
-; SSE2-NEXT: pand %xmm0, %xmm9
; SSE2-NEXT: pand %xmm0, %xmm13
-; SSE2-NEXT: packuswb %xmm9, %xmm13
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm0, %xmm6
+; SSE2-NEXT: packuswb %xmm13, %xmm6
; SSE2-NEXT: psrld $1, %xmm12
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm9
; SSE2-NEXT: pand %xmm0, %xmm12
-; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: packuswb %xmm12, %xmm6
-; SSE2-NEXT: packuswb %xmm6, %xmm13
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: packuswb %xmm9, %xmm12
+; SSE2-NEXT: packuswb %xmm6, %xmm12
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: psrld $1, %xmm11
; SSE2-NEXT: pand %xmm0, %xmm11
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm11, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: packuswb %xmm11, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: psrld $1, %xmm10
; SSE2-NEXT: pand %xmm0, %xmm10
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: packuswb %xmm10, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm10, %xmm2
; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: packuswb %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: packuswb %xmm5, %xmm4
; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: packuswb %xmm1, %xmm3
-; SSE2-NEXT: movdqu %xmm3, (%rax)
+; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
-; SSE2-NEXT: movdqu %xmm13, (%rax)
-; SSE2-NEXT: movdqu %xmm15, (%rax)
+; SSE2-NEXT: movdqu %xmm12, (%rax)
+; SSE2-NEXT: movdqu %xmm14, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v64i8_2:
@@ -1329,7 +1338,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) {
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm7
; AVX2-NEXT: vpsrld $1, %ymm10, %ymm8
; AVX2-NEXT: vpsrld $1, %ymm9, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm3[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1393,6 +1402,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) {
; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8_2:
@@ -1400,6 +1410,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) {
; AVX512BW-NEXT: vmovdqu8 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgb %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <64 x i8>, <64 x i8>* %a
%2 = load <64 x i8>, <64 x i8>* %b
@@ -1558,6 +1569,7 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v16i16_2:
@@ -1565,6 +1577,7 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpavgw (%rsi), %ymm0, %ymm0
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %a
%2 = load <16 x i16>, <16 x i16>* %b
@@ -1689,7 +1702,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
@@ -1720,6 +1733,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, (%rax)
; AVX512F-NEXT: vpmovdw %zmm1, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i16_2:
@@ -1727,6 +1741,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu16 %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i16>, <32 x i16>* %a
%2 = load <32 x i16>, <32 x i16>* %b
@@ -1854,62 +1869,62 @@ define void @avg_v16i8_const(<16 x i8>* %a) {
define void @avg_v32i8_const(<32 x i8>* %a) {
; SSE2-LABEL: avg_v32i8_const:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa (%rdi), %xmm5
; SSE2-NEXT: movdqa 16(%rdi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [5,6,7,8]
-; SSE2-NEXT: paddd %xmm9, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4]
-; SSE2-NEXT: paddd %xmm1, %xmm5
+; SSE2-NEXT: paddd %xmm9, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,2,3,4]
+; SSE2-NEXT: paddd %xmm3, %xmm7
; SSE2-NEXT: paddd %xmm9, %xmm6
-; SSE2-NEXT: paddd %xmm1, %xmm7
+; SSE2-NEXT: paddd %xmm3, %xmm4
; SSE2-NEXT: paddd %xmm9, %xmm2
-; SSE2-NEXT: paddd %xmm1, %xmm3
-; SSE2-NEXT: paddd %xmm9, %xmm0
-; SSE2-NEXT: paddd %xmm1, %xmm8
-; SSE2-NEXT: psrld $1, %xmm8
+; SSE2-NEXT: paddd %xmm3, %xmm8
+; SSE2-NEXT: paddd %xmm9, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm0
; SSE2-NEXT: psrld $1, %xmm0
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm8
; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm1, %xmm5
-; SSE2-NEXT: packuswb %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm1, %xmm6
-; SSE2-NEXT: pand %xmm1, %xmm7
-; SSE2-NEXT: packuswb %xmm6, %xmm7
-; SSE2-NEXT: packuswb %xmm7, %xmm5
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: packuswb %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm8
-; SSE2-NEXT: packuswb %xmm0, %xmm8
-; SSE2-NEXT: packuswb %xmm8, %xmm3
-; SSE2-NEXT: movdqu %xmm3, (%rax)
-; SSE2-NEXT: movdqu %xmm5, (%rax)
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm7
+; SSE2-NEXT: packuswb %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: packuswb %xmm6, %xmm4
+; SSE2-NEXT: packuswb %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm8
+; SSE2-NEXT: packuswb %xmm2, %xmm8
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm8, %xmm0
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: movdqu %xmm4, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v32i8_const:
@@ -1925,6 +1940,7 @@ define void @avg_v32i8_const(<32 x i8>* %a) {
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i8_const:
@@ -1932,6 +1948,7 @@ define void @avg_v32i8_const(<32 x i8>* %a) {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i8>, <32 x i8>* %a
%2 = zext <32 x i8> %1 to <32 x i32>
@@ -1945,121 +1962,121 @@ define void @avg_v32i8_const(<32 x i8>* %a) {
define void @avg_v64i8_const(<64 x i8>* %a) {
; SSE2-LABEL: avg_v64i8_const:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm7
-; SSE2-NEXT: movdqa 16(%rdi), %xmm1
-; SSE2-NEXT: movdqa 32(%rdi), %xmm14
+; SSE2-NEXT: movdqa (%rdi), %xmm5
+; SSE2-NEXT: movdqa 16(%rdi), %xmm6
+; SSE2-NEXT: movdqa 32(%rdi), %xmm15
; SSE2-NEXT: movdqa 48(%rdi), %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm11[2,3,0,1]
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm9
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm12
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm14[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm13, %xmm10
+; SSE2-NEXT: movdqa %xmm11, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm10
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm9
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm11, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm14, %xmm15
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm14, %xmm13
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm12
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm8
+; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [5,6,7,8]
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: paddd %xmm0, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm3
+; SSE2-NEXT: paddd %xmm0, %xmm15
; SSE2-NEXT: paddd %xmm0, %xmm14
-; SSE2-NEXT: paddd %xmm0, %xmm13
; SSE2-NEXT: paddd %xmm0, %xmm11
; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm9, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,2,3,4]
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: paddd %xmm0, %xmm10
; SSE2-NEXT: paddd %xmm0, %xmm12
+; SSE2-NEXT: paddd %xmm0, %xmm13
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm9, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: paddd %xmm0, %xmm10
; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm0, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: packuswb %xmm5, %xmm7
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm7, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm0, %xmm6
; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: packuswb %xmm4, %xmm5
-; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm4
; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: packuswb %xmm1, %xmm8
-; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: packuswb %xmm3, %xmm6
-; SSE2-NEXT: packuswb %xmm6, %xmm8
+; SSE2-NEXT: pand %xmm0, %xmm8
+; SSE2-NEXT: packuswb %xmm3, %xmm8
+; SSE2-NEXT: packuswb %xmm4, %xmm8
+; SSE2-NEXT: psrld $1, %xmm12
; SSE2-NEXT: psrld $1, %xmm15
-; SSE2-NEXT: psrld $1, %xmm14
-; SSE2-NEXT: pand %xmm0, %xmm14
; SSE2-NEXT: pand %xmm0, %xmm15
-; SSE2-NEXT: packuswb %xmm14, %xmm15
-; SSE2-NEXT: psrld $1, %xmm10
+; SSE2-NEXT: pand %xmm0, %xmm12
+; SSE2-NEXT: packuswb %xmm15, %xmm12
; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: psrld $1, %xmm14
+; SSE2-NEXT: pand %xmm0, %xmm14
; SSE2-NEXT: pand %xmm0, %xmm13
-; SSE2-NEXT: pand %xmm0, %xmm10
-; SSE2-NEXT: packuswb %xmm13, %xmm10
-; SSE2-NEXT: packuswb %xmm10, %xmm15
-; SSE2-NEXT: psrld $1, %xmm12
+; SSE2-NEXT: packuswb %xmm14, %xmm13
+; SSE2-NEXT: packuswb %xmm12, %xmm13
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: psrld $1, %xmm11
; SSE2-NEXT: pand %xmm0, %xmm11
-; SSE2-NEXT: pand %xmm0, %xmm12
-; SSE2-NEXT: packuswb %xmm11, %xmm12
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm11, %xmm2
+; SSE2-NEXT: psrld $1, %xmm10
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: packuswb %xmm1, %xmm12
-; SSE2-NEXT: movdqu %xmm12, (%rax)
-; SSE2-NEXT: movdqu %xmm15, (%rax)
+; SSE2-NEXT: pand %xmm0, %xmm10
+; SSE2-NEXT: packuswb %xmm3, %xmm10
+; SSE2-NEXT: packuswb %xmm2, %xmm10
+; SSE2-NEXT: movdqu %xmm10, (%rax)
+; SSE2-NEXT: movdqu %xmm13, (%rax)
; SSE2-NEXT: movdqu %xmm8, (%rax)
-; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v64i8_const:
@@ -2089,7 +2106,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) {
; AVX2-NEXT: vpsrld $1, %ymm5, %ymm5
; AVX2-NEXT: vpsrld $1, %ymm6, %ymm6
; AVX2-NEXT: vpsrld $1, %ymm7, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm3[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2149,6 +2166,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) {
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
; AVX512F-NEXT: vmovdqu %ymm2, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v64i8_const:
@@ -2156,6 +2174,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) {
; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <64 x i8>, <64 x i8>* %a
%2 = zext <64 x i8> %1 to <64 x i32>
@@ -2289,6 +2308,7 @@ define void @avg_v16i16_const(<16 x i16>* %a) {
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v16i16_const:
@@ -2296,6 +2316,7 @@ define void @avg_v16i16_const(<16 x i16>* %a) {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %a
%2 = zext <16 x i16> %1 to <16 x i32>
@@ -2385,7 +2406,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) {
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm2
@@ -2412,6 +2433,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) {
; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1
; AVX512F-NEXT: vpmovdw %zmm1, (%rax)
; AVX512F-NEXT: vpmovdw %zmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: avg_v32i16_const:
@@ -2419,6 +2441,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) {
; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu16 %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i16>, <32 x i16>* %a
%2 = zext <32 x i16> %1 to <32 x i32>
diff --git a/test/CodeGen/X86/avx-cvt-3.ll b/test/CodeGen/X86/avx-cvt-3.ll
new file mode 100644
index 000000000000..066719b3bfe8
--- /dev/null
+++ b/test/CodeGen/X86/avx-cvt-3.ll
@@ -0,0 +1,148 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=X64
+
+; Insertion/shuffles of all-zero/all-bits/constants into v8i32->v8f32 sitofp conversion.
+
+define <8 x float> @sitofp_insert_zero_v8i32(<8 x i32> %a0) {
+; X86-LABEL: sitofp_insert_zero_v8i32:
+; X86: # BB#0:
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
+; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_insert_zero_v8i32:
+; X64: # BB#0:
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
+; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X64-NEXT: retq
+ %1 = insertelement <8 x i32> %a0, i32 0, i32 0
+ %2 = insertelement <8 x i32> %1, i32 0, i32 2
+ %3 = insertelement <8 x i32> %2, i32 0, i32 4
+ %4 = insertelement <8 x i32> %3, i32 0, i32 5
+ %5 = sitofp <8 x i32> %4 to <8 x float>
+ ret <8 x float> %5
+}
+
+define <8 x float> @sitofp_shuffle_zero_v8i32(<8 x i32> %a0) {
+; X86-LABEL: sitofp_shuffle_zero_v8i32:
+; X86: # BB#0:
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_shuffle_zero_v8i32:
+; X64: # BB#0:
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X64-NEXT: retq
+ %1 = shufflevector <8 x i32> %a0, <8 x i32> <i32 0, i32 undef, i32 0, i32 undef, i32 0, i32 undef, i32 0, i32 undef>, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ %2 = sitofp <8 x i32> %1 to <8 x float>
+ ret <8 x float> %2
+}
+
+define <8 x float> @sitofp_insert_allbits_v8i32(<8 x i32> %a0) {
+; X86-LABEL: sitofp_insert_allbits_v8i32:
+; X86: # BB#0:
+; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
+; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_insert_allbits_v8i32:
+; X64: # BB#0:
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
+; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X64-NEXT: retq
+ %1 = insertelement <8 x i32> %a0, i32 -1, i32 0
+ %2 = insertelement <8 x i32> %1, i32 -1, i32 2
+ %3 = insertelement <8 x i32> %2, i32 -1, i32 4
+ %4 = insertelement <8 x i32> %3, i32 -1, i32 5
+ %5 = sitofp <8 x i32> %4 to <8 x float>
+ ret <8 x float> %5
+}
+
+define <8 x float> @sitofp_shuffle_allbits_v8i32(<8 x i32> %a0) {
+; X86-LABEL: sitofp_shuffle_allbits_v8i32:
+; X86: # BB#0:
+; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_shuffle_allbits_v8i32:
+; X64: # BB#0:
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X64-NEXT: retq
+ %1 = shufflevector <8 x i32> %a0, <8 x i32> <i32 -1, i32 undef, i32 -1, i32 undef, i32 -1, i32 undef, i32 -1, i32 undef>, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ %2 = sitofp <8 x i32> %1 to <8 x float>
+ ret <8 x float> %2
+}
+
+define <8 x float> @sitofp_insert_constants_v8i32(<8 x i32> %a0) {
+; X86-LABEL: sitofp_insert_constants_v8i32:
+; X86: # BB#0:
+; X86-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
+; X86-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: movl $2, %eax
+; X86-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
+; X86-NEXT: movl $-3, %eax
+; X86-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_insert_constants_v8i32:
+; X64: # BB#0:
+; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7]
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT: movl $2, %eax
+; X64-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
+; X64-NEXT: movl $-3, %eax
+; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X64-NEXT: retq
+ %1 = insertelement <8 x i32> %a0, i32 0, i32 0
+ %2 = insertelement <8 x i32> %1, i32 -1, i32 2
+ %3 = insertelement <8 x i32> %2, i32 2, i32 4
+ %4 = insertelement <8 x i32> %3, i32 -3, i32 5
+ %5 = sitofp <8 x i32> %4 to <8 x float>
+ ret <8 x float> %5
+}
+
+define <8 x float> @sitofp_shuffle_constants_v8i32(<8 x i32> %a0) {
+; X86-LABEL: sitofp_shuffle_constants_v8i32:
+; X86: # BB#0:
+; X86-NEXT: vblendps {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7]
+; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_shuffle_constants_v8i32:
+; X64: # BB#0:
+; X64-NEXT: vblendps {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7]
+; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; X64-NEXT: retq
+ %1 = shufflevector <8 x i32> %a0, <8 x i32> <i32 0, i32 undef, i32 -1, i32 undef, i32 2, i32 undef, i32 -3, i32 undef>, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ %2 = sitofp <8 x i32> %1 to <8 x float>
+ ret <8 x float> %2
+}
diff --git a/test/CodeGen/X86/avx-cvt.ll b/test/CodeGen/X86/avx-cvt.ll
index a7cd8cf23984..f2900dba938a 100644
--- a/test/CodeGen/X86/avx-cvt.ll
+++ b/test/CodeGen/X86/avx-cvt.ll
@@ -136,7 +136,8 @@ define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp {
define void @fpext() nounwind uwtable {
; CHECK-LABEL: fpext:
; CHECK: # BB#0:
-; CHECK-NEXT: vcvtss2sd -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
%f = alloca float, align 4
diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index fe4fc65ef715..4a86fa22f081 100644
--- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -3310,16 +3310,16 @@ define <8 x float> @test_mm256_sub_ps(<8 x float> %a0, <8 x float> %a1) nounwind
define i32 @test_mm_testc_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm_testc_pd:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testc_pd:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: retq
%res = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
ret i32 %res
@@ -3329,17 +3329,17 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_mm256_testc_pd(<4 x double> %a0, <4 x double> %a1) nounwind {
; X32-LABEL: test_mm256_testc_pd:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestpd %ymm1, %ymm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testc_pd:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestpd %ymm1, %ymm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
@@ -3350,16 +3350,16 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_mm_testc_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_testc_ps:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testc_ps:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: retq
%res = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
ret i32 %res
@@ -3369,17 +3369,17 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_mm256_testc_ps(<8 x float> %a0, <8 x float> %a1) nounwind {
; X32-LABEL: test_mm256_testc_ps:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vtestps %ymm1, %ymm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testc_ps:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vtestps %ymm1, %ymm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
@@ -3390,17 +3390,17 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
define i32 @test_mm256_testc_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_testc_si256:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: vptest %ymm1, %ymm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_testc_si256:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: vptest %ymm1, %ymm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1)
diff --git a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index 1db0256e8e38..27aeb77468ce 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -403,8 +403,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse2_storeu_pd:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovupd %xmm0, (%eax)
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index a8befa8b0e1d..70e31771071f 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -1,2631 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx,aes,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx,pclmul -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
-define <2 x i64> @test_x86_aesni_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_aesni_aesdec:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaesdec %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xde,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <2 x i64> @test_x86_aesni_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_aesni_aesdeclast:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdf,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <2 x i64> @test_x86_aesni_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_aesni_aesenc:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaesenc %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdc,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <2 x i64> @test_x86_aesni_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_aesni_aesenclast:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdd,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <2 x i64> @test_x86_aesni_aesimc(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_aesni_aesimc:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaesimc %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0xdb,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>) nounwind readnone
-
-
-define <2 x i64> @test_x86_aesni_aeskeygenassist(<2 x i64> %a0) {
-; CHECK-LABEL: test_x86_aesni_aeskeygenassist:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0xdf,0xc0,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64>, i8) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_cmp_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcmpordpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_cmp_sd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcmpordsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_comieq_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_comieq_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX512VL-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX512VL-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX512VL-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_comige_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_comige_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_comigt_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_comigt_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_comile_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_comile_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_comilt_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2f,0xc8]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_comilt_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_comineq_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: vcomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_comineq_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
-; AVX512VL-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX512VL-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX512VL-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse2_cvtdq2ps(<4 x i32> %a0) {
-; AVX-LABEL: test_x86_sse2_cvtdq2ps:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5b,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvtdq2ps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvtdq2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_cvtpd2dq(<2 x double> %a0) {
-; AVX-LABEL: test_x86_sse2_cvtpd2dq:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvtpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xe6,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvtpd2dq:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvtpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse2_cvtpd2ps(<2 x double> %a0) {
-; AVX-LABEL: test_x86_sse2_cvtpd2ps:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvtpd2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5a,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvtpd2ps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvtpd2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_cvtps2dq(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtps2dq:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5b,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse2_cvtsd2si(<2 x double> %a0) {
-; AVX-LABEL: test_x86_sse2_cvtsd2si:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvtsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2d,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvtsd2si:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvtsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_cvtsd2ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2ss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5a,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_cvtsi2sd(<2 x double> %a0, i32 %a1) {
-; AVX-LABEL: test_x86_sse2_cvtsi2sd:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvtsi2sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %a0, i32 %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_cvtss2sd(<2 x double> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse2_cvtss2sd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double>, <4 x float>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_cvttpd2dq(<2 x double> %a0) {
-; AVX-LABEL: test_x86_sse2_cvttpd2dq:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe6,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvttpd2dq:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvttpd2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_cvttps2dq(<4 x float> %a0) {
-; AVX-LABEL: test_x86_sse2_cvttps2dq:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvttps2dq %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5b,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvttps2dq:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvttps2dq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse2_cvttsd2si(<2 x double> %a0) {
-; AVX-LABEL: test_x86_sse2_cvttsd2si:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvttsd2si %xmm0, %eax ## encoding: [0xc5,0xfb,0x2c,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_cvttsd2si:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvttsd2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
-
-
-
-define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_max_pd:
-; AVX: ## BB#0:
-; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5f,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_max_pd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_max_sd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_min_pd:
-; AVX: ## BB#0:
-; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x5d,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_min_pd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vminpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse2_min_sd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_movmsk_pd(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse2_movmsk_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmovmskpd %xmm0, %eax ## encoding: [0xc5,0xf9,0x50,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
-
-
-
-
-define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse2_packssdw_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x6b,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_packssdw_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_packsswb_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_packsswb_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_packuswb_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x67,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_packuswb_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_padds_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_padds_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_padds_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_padds_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_padds_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_padds_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_paddus_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_paddus_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_paddus_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_paddus_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_paddus_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_pavg_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe0,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pavg_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe0,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_pavg_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe3,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pavg_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpavgw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe3,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_pmadd_wd:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf5,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmadd_wd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_pmaxs_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_pmaxs_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmaxs_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_pmaxu_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_pmaxu_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmaxu_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_pmins_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_pmins_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmins_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_pminu_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_pminu_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pminu_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define i32 @test_x86_sse2_pmovmskb_128(<16 x i8> %a0) {
-; CHECK-LABEL: test_x86_sse2_pmovmskb_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpmovmskb %xmm0, %eax ## encoding: [0xc5,0xf9,0xd7,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_pmulh_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe5,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmulh_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_pmulhu_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe4,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmulhu_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_pmulu_dq(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse2_pmulu_dq:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf4,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pmulu_dq:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_psad_bw:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psad_bw:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse2_psll_d:
-; AVX: ## BB#0:
-; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf2,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psll_d:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpslld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) {
-; AVX-LABEL: test_x86_sse2_psll_q:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf3,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psll_q:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsllq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_psll_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf1,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psll_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsllw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_pslli_d(<4 x i32> %a0) {
-; AVX-LABEL: test_x86_sse2_pslli_d:
-; AVX: ## BB#0:
-; AVX-NEXT: vpslld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xf0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pslli_d:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpslld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_pslli_q(<2 x i64> %a0) {
-; AVX-LABEL: test_x86_sse2_pslli_q:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsllq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pslli_q:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsllq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_pslli_w(<8 x i16> %a0) {
-; AVX-LABEL: test_x86_sse2_pslli_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsllw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xf0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_pslli_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsllw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse2_psra_d:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe2,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psra_d:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrad %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_psra_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe1,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psra_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_psrai_d(<4 x i32> %a0) {
-; AVX-LABEL: test_x86_sse2_psrai_d:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrad $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xe0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrai_d:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrad $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psrai_w(<8 x i16> %a0) {
-; AVX-LABEL: test_x86_sse2_psrai_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsraw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xe0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrai_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsraw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse2_psrl_d:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd2,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrl_d:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) {
-; AVX-LABEL: test_x86_sse2_psrl_q:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd3,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrl_q:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_psrl_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrl_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse2_psrli_d(<4 x i32> %a0) {
-; AVX-LABEL: test_x86_sse2_psrli_d:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrld $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x72,0xd0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrli_d:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrld $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psrli_q(<2 x i64> %a0) {
-; AVX-LABEL: test_x86_sse2_psrli_q:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrlq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrli_q:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrlq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psrli_w(<8 x i16> %a0) {
-; AVX-LABEL: test_x86_sse2_psrli_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsrlw $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x71,0xd0,0x07]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psrli_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsrlw $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x07]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_psubs_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_psubs_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psubs_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psubs_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_psubs_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psubs_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse2_psubus_b:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psubus_b:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse2_psubus_w:
-; AVX: ## BB#0:
-; AVX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_psubus_w:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_sqrt_pd(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse2_sqrt_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vsqrtpd %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x51,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_sqrt_sd(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse2_sqrt_sd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_ucomieq_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_ucomieq_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX512VL-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX512VL-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX512VL-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_ucomige_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_ucomige_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_ucomigt_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_ucomigt_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_ucomile_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_ucomile_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_ucomilt_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomisd %xmm0, %xmm1 ## encoding: [0xc5,0xf9,0x2e,0xc8]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_ucomilt_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomisd %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
-; AVX-LABEL: test_x86_sse2_ucomineq_sd:
-; AVX: ## BB#0:
-; AVX-NEXT: vucomisd %xmm1, %xmm0 ## encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse2_ucomineq_sd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vucomisd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
-; AVX512VL-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX512VL-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX512VL-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse3_addsub_pd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse3_addsub_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd0,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse3_addsub_ps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse3_addsub_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0xd0,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse3_hadd_pd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse3_hadd_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x7c,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse3_hadd_ps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse3_hadd_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x7c,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse3_hsub_pd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse3_hsub_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x7d,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse3_hsub_ps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse3_hsub_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vhsubps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x7d,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse3_ldu_dq(i8* %a0) {
-; CHECK-LABEL: test_x86_sse3_ldu_dq:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: vlddqu (%eax), %xmm0 ## encoding: [0xc5,0xfb,0xf0,0x00]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8*) nounwind readonly
-
-
-define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
-; CHECK-LABEL: test_x86_sse41_blendvpd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
-; CHECK-LABEL: test_x86_sse41_blendvps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse41_dppd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
-
-
-define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse41_dpps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-
-define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse41_insertps:
-; AVX: ## BB#0:
-; AVX-NEXT: vinsertps $21, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x15]
-; AVX-NEXT: ## xmm0 = zero,xmm1[0],zero,xmm0[3]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_insertps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vinsertps $21, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x21,0xc1,0x15]
-; AVX512VL-NEXT: ## xmm0 = zero,xmm1[0],zero,xmm0[3]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 21) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-
-
-define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse41_mpsadbw:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse41_packusdw:
-; AVX: ## BB#0:
-; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_packusdw:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
-; CHECK-LABEL: test_x86_sse41_pblendvb:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) {
-; CHECK-LABEL: test_x86_sse41_phminposuw:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse41_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse41_pmaxsb:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pmaxsb:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse41_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse41_pmaxsd:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pmaxsd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse41_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse41_pmaxud:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pmaxud:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse41_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse41_pmaxuw:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pmaxuw:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse41_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_sse41_pminsb:
-; AVX: ## BB#0:
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pminsb:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x38,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse41_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse41_pminsd:
-; AVX: ## BB#0:
-; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pminsd:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x39,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <4 x i32> @test_x86_sse41_pminud(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse41_pminud:
-; AVX: ## BB#0:
-; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pminud:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_sse41_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_sse41_pminuw:
-; AVX: ## BB#0:
-; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pminuw:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse41_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
-; AVX-LABEL: test_x86_sse41_pmuldq:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x28,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse41_pmuldq:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x28,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_sse41_ptestc:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; CHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; CHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_sse41_ptestnzc:
-; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; CHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK-LABEL: test_x86_sse41_ptestz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; CHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse41_round_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
-
-
-define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse41_round_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
-
-
-define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test_x86_sse41_round_sd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
-
-
-define <4 x float> @test_x86_sse41_round_ss(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse41_round_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
-; CHECK-LABEL: test_x86_sse42_pcmpestri128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; CHECK-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
-; AVX-LABEL: test_x86_sse42_pcmpestri128_load:
-; AVX: ## BB#0:
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX-NEXT: vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
-; AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; AVX-NEXT: vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
-; AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse42_pcmpestri128_load:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vmovdqu (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x00]
-; AVX512VL-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; AVX512VL-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; AVX512VL-NEXT: vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
-; AVX512VL-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %1 = load <16 x i8>, <16 x i8>* %a0
- %2 = load <16 x i8>, <16 x i8>* %a2
- %res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-
-
-define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; CHECK-LABEL: test_x86_sse42_pcmpestria128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: pushl %ebx ## encoding: [0x53]
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; CHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; CHECK-NEXT: seta %bl ## encoding: [0x0f,0x97,0xc3]
-; CHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
-; CHECK-NEXT: popl %ebx ## encoding: [0x5b]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) {
-; CHECK-LABEL: test_x86_sse42_pcmpestric128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; CHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; CHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpestrio128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; CHECK-LABEL: test_x86_sse42_pcmpestrio128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: pushl %ebx ## encoding: [0x53]
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; CHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; CHECK-NEXT: seto %bl ## encoding: [0x0f,0x90,0xc3]
-; CHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
-; CHECK-NEXT: popl %ebx ## encoding: [0x5b]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpestris128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; CHECK-LABEL: test_x86_sse42_pcmpestris128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: pushl %ebx ## encoding: [0x53]
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; CHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; CHECK-NEXT: sets %bl ## encoding: [0x0f,0x98,0xc3]
-; CHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
-; CHECK-NEXT: popl %ebx ## encoding: [0x5b]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpestriz128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
-; CHECK-LABEL: test_x86_sse42_pcmpestriz128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: pushl %ebx ## encoding: [0x53]
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
-; CHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; CHECK-NEXT: sete %bl ## encoding: [0x0f,0x94,0xc3]
-; CHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
-; CHECK-NEXT: popl %ebx ## encoding: [0x5b]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse42_pcmpestrm128(<16 x i8> %a0, <16 x i8> %a2) {
-; CHECK-LABEL: test_x86_sse42_pcmpestrm128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: vpcmpestrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2) {
-; CHECK-LABEL: test_x86_sse42_pcmpestrm128_load:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; CHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
-; CHECK-NEXT: vpcmpestrm $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x01,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %1 = load <16 x i8>, <16 x i8>* %a2
- %res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-
-
-define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistri128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; CHECK-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
-; AVX-LABEL: test_x86_sse42_pcmpistri128_load:
-; AVX: ## BB#0:
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; AVX-NEXT: vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
-; AVX-NEXT: vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
-; AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse42_pcmpistri128_load:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; AVX512VL-NEXT: vmovdqu (%ecx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x01]
-; AVX512VL-NEXT: vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
-; AVX512VL-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %1 = load <16 x i8>, <16 x i8>* %a0
- %2 = load <16 x i8>, <16 x i8>* %a1
- %res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-
-
-define i32 @test_x86_sse42_pcmpistria128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistria128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; CHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpistric128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistric128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; CHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; CHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistrio128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; CHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; CHECK-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpistris128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistris128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; CHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; CHECK-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse42_pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistriz128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; CHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistrm128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpistrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone
-
-
-define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
-; CHECK-LABEL: test_x86_sse42_pcmpistrm128_load:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: vpcmpistrm $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x00,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %1 = load <16 x i8>, <16 x i8>* %a1
- %res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-
-
-define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse_cmp_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcmpordps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0xc2,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse_cmp_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vcmpordss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0xc2,0xc1,0x07]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind readnone
-
-
-define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_comieq_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_comieq_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX512VL-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX512VL-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX512VL-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_comige_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_comige_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.comige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_comigt_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_comigt_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.comigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_comile_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_comile_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.comile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_comilt_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_comilt_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.comilt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_comineq_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_comineq_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
-; AVX512VL-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX512VL-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX512VL-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.comineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.comineq.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_cvtsi2ss(<4 x float> %a0) {
-; AVX-LABEL: test_x86_sse_cvtsi2ss:
-; AVX: ## BB#0:
-; AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; AVX-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x2a,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_cvtsi2ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
-; AVX512VL-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float>, i32) nounwind readnone
-
-
-define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) {
-; AVX-LABEL: test_x86_sse_cvtss2si:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvtss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2d,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_cvtss2si:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvtss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2d,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) {
-; AVX-LABEL: test_x86_sse_cvttss2si:
-; AVX: ## BB#0:
-; AVX-NEXT: vcvttss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2c,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_cvttss2si:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vcvttss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2c,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
-
-
-define void @test_x86_sse_ldmxcsr(i8* %a0) {
-; CHECK-LABEL: test_x86_sse_ldmxcsr:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: vldmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x10]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- call void @llvm.x86.sse.ldmxcsr(i8* %a0)
- ret void
-}
-declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind
-
-
-
-define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_max_ps:
-; AVX: ## BB#0:
-; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_max_ps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse_max_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5f,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_min_ps:
-; AVX: ## BB#0:
-; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5d,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_min_ps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test_x86_sse_min_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vminss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5d,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse_movmsk_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmovmskps %xmm0, %eax ## encoding: [0xc5,0xf8,0x50,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
-
-
-
-define <4 x float> @test_x86_sse_rcp_ps(<4 x float> %a0) {
-; AVX-LABEL: test_x86_sse_rcp_ps:
-; AVX: ## BB#0:
-; AVX-NEXT: vrcpps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x53,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_rcp_ps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vrcp14ps %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x4c,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_rcp_ss(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse_rcp_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vrcpss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x53,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_rsqrt_ps(<4 x float> %a0) {
-; AVX-LABEL: test_x86_sse_rsqrt_ps:
-; AVX: ## BB#0:
-; AVX-NEXT: vrsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x52,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_rsqrt_ps:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vrsqrt14ps %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x4e,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse_rsqrt_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x52,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_sqrt_ps(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse_sqrt_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x51,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_sqrt_ss(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse_sqrt_ss:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x51,0xc0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
-
-
-define void @test_x86_sse_stmxcsr(i8* %a0) {
-; CHECK-LABEL: test_x86_sse_stmxcsr:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: vstmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x18]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- call void @llvm.x86.sse.stmxcsr(i8* %a0)
- ret void
-}
-declare void @llvm.x86.sse.stmxcsr(i8*) nounwind
-
-
-define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_ucomieq_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_ucomieq_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX512VL-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
-; AVX512VL-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
-; AVX512VL-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_ucomige_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_ucomige_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.ucomige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_ucomigt_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_ucomigt_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.ucomigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_ucomile_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
-; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_ucomile_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
-; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.ucomile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_ucomilt_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_ucomilt_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.ucomilt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
-; AVX-LABEL: test_x86_sse_ucomineq_ss:
-; AVX: ## BB#0:
-; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_sse_ucomineq_ss:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
-; AVX512VL-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
-; AVX512VL-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
-; AVX512VL-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
-; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call i32 @llvm.x86.sse.ucomineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
- ret i32 %res
-}
-declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnone
-
-
-define <16 x i8> @test_x86_ssse3_pabs_b_128(<16 x i8> %a0) {
-; AVX-LABEL: test_x86_ssse3_pabs_b_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpabsb %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pabs_b_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpabsb %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
-
-
-define <4 x i32> @test_x86_ssse3_pabs_d_128(<4 x i32> %a0) {
-; AVX-LABEL: test_x86_ssse3_pabs_d_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpabsd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pabs_d_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpabsd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1e,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_pabs_w_128(<8 x i16> %a0) {
-; AVX-LABEL: test_x86_ssse3_pabs_w_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpabsw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pabs_w_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpabsw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
-
-
-define <4 x i32> @test_x86_ssse3_phadd_d_128(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_ssse3_phadd_d_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x02,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_phadd_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_x86_ssse3_phadd_sw_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x03,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_phadd_w_128(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_x86_ssse3_phadd_w_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x01,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <4 x i32> @test_x86_ssse3_phsub_d_128(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_ssse3_phsub_d_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphsubd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x06,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_phsub_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_x86_ssse3_phsub_sw_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x07,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_phsub_w_128(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_x86_ssse3_phsub_w_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vphsubw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x05,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_ssse3_pmadd_ub_sw_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x04,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pmadd_ub_sw_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x04,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-; Make sure we don't commute this operation.
-define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128_load_op0(<16 x i8>* %ptr, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
-; AVX: ## BB#0:
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX-NEXT: vmovdqa (%eax), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x08]
-; AVX-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0x04,0xc0]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vmovdqu (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x08]
-; AVX512VL-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x04,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %a0 = load <16 x i8>, <16 x i8>* %ptr
- %res = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-
-
-define <8 x i16> @test_x86_ssse3_pmul_hr_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
-; AVX-LABEL: test_x86_ssse3_pmul_hr_sw_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0b,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pmul_hr_sw_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0b,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
-define <16 x i8> @test_x86_ssse3_pshuf_b_128(<16 x i8> %a0, <16 x i8> %a1) {
-; AVX-LABEL: test_x86_ssse3_pshuf_b_128:
-; AVX: ## BB#0:
-; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x00,0xc1]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_ssse3_pshuf_b_128:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xc1]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <16 x i8> @test_x86_ssse3_psign_b_128(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: test_x86_ssse3_psign_b_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpsignb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x08,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind readnone
-
-
-define <4 x i32> @test_x86_ssse3_psign_d_128(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL: test_x86_ssse3_psign_d_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpsignd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0a,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <8 x i16> @test_x86_ssse3_psign_w_128(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test_x86_ssse3_psign_w_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpsignw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x09,0xc1]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16>, <8 x i16>) nounwind readnone
-
-
define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_addsub_pd_256:
; CHECK: ## BB#0:
@@ -2773,6 +149,7 @@ define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) {
; AVX512VL-LABEL: test_x86_avx_cvt_pd2_ps_256:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcvtpd2ps %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x5a,0xc0]
+; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
@@ -2790,6 +167,7 @@ define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) {
; AVX512VL-LABEL: test_x86_avx_cvt_pd2dq_256:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcvtpd2dq %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xff,0xe6,0xc0]
+; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1]
ret <4 x i32> %res
@@ -2834,6 +212,7 @@ define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) {
; AVX512VL-LABEL: test_x86_avx_cvtt_pd2dq_256:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcvttpd2dq %ymm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe6,0xc0]
+; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1]
ret <4 x i32> %res
@@ -2985,18 +364,12 @@ declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind
define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) {
-; AVX-LABEL: test_x86_avx_maskstore_pd_256:
-; AVX: ## BB#0:
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2f,0x08]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_maskstore_pd_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2f,0x08]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_maskstore_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2f,0x08]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %mask, <4 x double> %a2)
ret void
}
@@ -3016,18 +389,12 @@ declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind
define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) {
-; AVX-LABEL: test_x86_avx_maskstore_ps_256:
-; AVX: ## BB#0:
-; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2e,0x08]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_maskstore_ps_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2e,0x08]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_maskstore_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x2e,0x08]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %mask, <8 x float> %a2)
ret void
}
@@ -3099,16 +466,11 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) {
-; AVX-LABEL: test_x86_avx_movmsk_pd_256:
-; AVX: ## BB#0:
-; AVX-NEXT: vmovmskpd %ymm0, %eax ## encoding: [0xc5,0xfd,0x50,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_movmsk_pd_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vmovmskpd %ymm0, %eax ## encoding: [0xc5,0xfd,0x50,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_movmsk_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovmskpd %ymm0, %eax ## encoding: [0xc5,0xfd,0x50,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3116,16 +478,11 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) {
-; AVX-LABEL: test_x86_avx_movmsk_ps_256:
-; AVX: ## BB#0:
-; AVX-NEXT: vmovmskps %ymm0, %eax ## encoding: [0xc5,0xfc,0x50,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_movmsk_ps_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vmovmskps %ymm0, %eax ## encoding: [0xc5,0xfc,0x50,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_movmsk_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovmskps %ymm0, %eax ## encoding: [0xc5,0xfc,0x50,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3138,20 +495,13 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) {
-; AVX-LABEL: test_x86_avx_ptestc_256:
-; AVX: ## BB#0:
-; AVX-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
-; AVX-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; AVX-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_ptestc_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
-; AVX512VL-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; AVX512VL-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_ptestc_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
+; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3159,20 +509,13 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) {
-; AVX-LABEL: test_x86_avx_ptestnzc_256:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_ptestnzc_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_ptestnzc_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
+; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3180,20 +523,13 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone
define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) {
-; AVX-LABEL: test_x86_avx_ptestz_256:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
-; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_ptestz_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
-; AVX512VL-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_ptestz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vptest %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x17,0xc1]
+; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3410,9 +746,9 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_avx_vtestc_pd:
; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; CHECK-NEXT: vtestpd %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0f,0xc1]
-; CHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; CHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
@@ -3421,20 +757,13 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) {
-; AVX-LABEL: test_x86_avx_vtestc_pd_256:
-; AVX: ## BB#0:
-; AVX-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
-; AVX-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; AVX-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_vtestc_pd_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
-; AVX512VL-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; AVX512VL-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_vtestc_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
+; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3444,9 +773,9 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_avx_vtestc_ps:
; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; CHECK-NEXT: vtestps %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x0e,0xc1]
-; CHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; CHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
@@ -3455,20 +784,13 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) {
-; AVX-LABEL: test_x86_avx_vtestc_ps_256:
-; AVX: ## BB#0:
-; AVX-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
-; AVX-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; AVX-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_vtestc_ps_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
-; AVX512VL-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; AVX512VL-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_vtestc_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
+; CHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3489,20 +811,13 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn
define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) {
-; AVX-LABEL: test_x86_avx_vtestnzc_pd_256:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_vtestnzc_pd_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_vtestnzc_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
+; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3523,20 +838,13 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon
define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) {
-; AVX-LABEL: test_x86_avx_vtestnzc_ps_256:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
-; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_vtestnzc_ps_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
-; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_vtestnzc_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
+; CHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3557,20 +865,13 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon
define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) {
-; AVX-LABEL: test_x86_avx_vtestz_pd_256:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
-; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_vtestz_pd_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
-; AVX512VL-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_vtestz_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vtestpd %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0f,0xc1]
+; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3591,20 +892,13 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) {
-; AVX-LABEL: test_x86_avx_vtestz_ps_256:
-; AVX: ## BB#0:
-; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
-; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx_vtestz_ps_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
-; AVX512VL-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
-; AVX512VL-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx_vtestz_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
+; CHECK-NEXT: vtestps %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0e,0xc1]
+; CHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
@@ -3632,114 +926,12 @@ define void @test_x86_avx_vzeroupper() {
}
declare void @llvm.x86.avx.vzeroupper() nounwind
-; Make sure instructions with no AVX equivalents, but are associated with SSEX feature flags still work
-
-define void @monitor(i8* %P, i32 %E, i32 %H) nounwind {
-; CHECK-LABEL: monitor:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x0c]
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: leal (%eax), %eax ## encoding: [0x8d,0x00]
-; CHECK-NEXT: monitor ## encoding: [0x0f,0x01,0xc8]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- tail call void @llvm.x86.sse3.monitor(i8* %P, i32 %E, i32 %H)
- ret void
-}
-declare void @llvm.x86.sse3.monitor(i8*, i32, i32) nounwind
-
-define void @mwait(i32 %E, i32 %H) nounwind {
-; CHECK-LABEL: mwait:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
-; CHECK-NEXT: mwait ## encoding: [0x0f,0x01,0xc9]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- tail call void @llvm.x86.sse3.mwait(i32 %E, i32 %H)
- ret void
-}
-declare void @llvm.x86.sse3.mwait(i32, i32) nounwind
-
-define void @sfence() nounwind {
-; CHECK-LABEL: sfence:
-; CHECK: ## BB#0:
-; CHECK-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- tail call void @llvm.x86.sse.sfence()
- ret void
-}
-declare void @llvm.x86.sse.sfence() nounwind
-
-define void @lfence() nounwind {
-; CHECK-LABEL: lfence:
-; CHECK: ## BB#0:
-; CHECK-NEXT: lfence ## encoding: [0x0f,0xae,0xe8]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- tail call void @llvm.x86.sse2.lfence()
- ret void
-}
-declare void @llvm.x86.sse2.lfence() nounwind
-
-define void @mfence() nounwind {
-; CHECK-LABEL: mfence:
-; CHECK: ## BB#0:
-; CHECK-NEXT: mfence ## encoding: [0x0f,0xae,0xf0]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- tail call void @llvm.x86.sse2.mfence()
- ret void
-}
-declare void @llvm.x86.sse2.mfence() nounwind
-
-define void @clflush(i8* %p) nounwind {
-; CHECK-LABEL: clflush:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: clflush (%eax) ## encoding: [0x0f,0xae,0x38]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- tail call void @llvm.x86.sse2.clflush(i8* %p)
- ret void
-}
-declare void @llvm.x86.sse2.clflush(i8*) nounwind
-
-define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
-; CHECK-LABEL: crc32_32_8:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
- ret i32 %tmp
-}
-declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
-
-define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
-; CHECK-LABEL: crc32_32_16:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
- ret i32 %tmp
-}
-declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
-
-define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: crc32_32_32:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; CHECK-NEXT: crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
-; CHECK-NEXT: retl ## encoding: [0xc3]
- %tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-declare i32 @llvm.x86.sse42.crc32.32.32(i32, i32) nounwind
-
define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX-LABEL: movnt_dq:
; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX-NEXT: vpaddq LCPI247_0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
-; AVX-NEXT: ## fixup A - offset: 4, value: LCPI247_0, kind: FK_Data_4
+; AVX-NEXT: vpaddq LCPI65_0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
+; AVX-NEXT: ## fixup A - offset: 4, value: LCPI65_0, kind: FK_Data_4
; AVX-NEXT: vmovntdq %ymm0, (%eax) ## encoding: [0xc5,0xfd,0xe7,0x00]
; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX-NEXT: retl ## encoding: [0xc3]
@@ -3747,9 +939,10 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX512VL-LABEL: movnt_dq:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vpaddq LCPI247_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
-; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI247_0, kind: FK_Data_4
+; AVX512VL-NEXT: vpaddq LCPI65_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI65_0, kind: FK_Data_4
; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00]
+; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%a2 = add <2 x i64> %a1, <i64 1, i64 1>
%a3 = shufflevector <2 x i64> %a2, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -3770,6 +963,7 @@ define void @movnt_ps(i8* %p, <8 x float> %a) nounwind {
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX512VL-NEXT: vmovntps %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x00]
+; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.avx.movnt.ps.256(i8* %p, <8 x float> %a) nounwind
ret void
@@ -3793,6 +987,7 @@ define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind {
; AVX512VL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x57,0xc9]
; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; AVX512VL-NEXT: vmovntpd %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x00]
+; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%a2 = fadd <4 x double> %a1, <double 0x0, double 0x0, double 0x0, double 0x0>
tail call void @llvm.x86.avx.movnt.pd.256(i8* %p, <4 x double> %a2) nounwind
diff --git a/test/CodeGen/X86/avx-intrinsics-x86_64.ll b/test/CodeGen/X86/avx-intrinsics-x86_64.ll
index 252574d84d8f..909c69cb9a17 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86_64.ll
@@ -1,51 +1,45 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86-64 -mcpu=corei7 -mattr=avx | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86-64 -mcpu=corei7 -mattr=avx512vl | FileCheck %s
-
-define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
- ; CHECK: vcvtsd2si
- %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
- ret i64 %res
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86-64 -mcpu=corei7 -mattr=avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86-64 -mcpu=corei7 -mattr=avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
+
+define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
+; AVX-LABEL: test_x86_avx_vzeroall:
+; AVX: ## BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill
+; AVX-NEXT: vzeroall
+; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload
+; AVX-NEXT: retq
+;
+; AVX512VL-LABEL: test_x86_avx_vzeroall:
+; AVX512VL: ## BB#0:
+; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16
+; AVX512VL-NEXT: vzeroall
+; AVX512VL-NEXT: vmovapd %ymm16, %ymm0
+; AVX512VL-NEXT: retq
+ %c = fadd <4 x double> %a, %b
+ call void @llvm.x86.avx.vzeroall()
+ ret <4 x double> %c
}
-declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
-
-
-define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
- ; CHECK: vcvtsi2sd
- %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
-
-
-define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
- ; CHECK: vcvttsd2si
- %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
- ret i64 %res
-}
-declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
-
-
-define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
- ; CHECK: vcvtss2si
- %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
- ret i64 %res
+declare void @llvm.x86.avx.vzeroall() nounwind
+
+define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) {
+; AVX-LABEL: test_x86_avx_vzeroupper:
+; AVX: ## BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload
+; AVX-NEXT: retq
+;
+; AVX512VL-LABEL: test_x86_avx_vzeroupper:
+; AVX512VL: ## BB#0:
+; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: vmovapd %ymm16, %ymm0
+; AVX512VL-NEXT: retq
+ %c = fadd <4 x double> %a, %b
+ call void @llvm.x86.avx.vzeroupper()
+ ret <4 x double> %c
}
-declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
- ; CHECK: vcvtsi2ss
- %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
-
-
-define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
- ; CHECK: vcvttss2si
- %res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
- ret i64 %res
-}
-declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
-
-
+declare void @llvm.x86.avx.vzeroupper() nounwind
diff --git a/test/CodeGen/X86/avx-shuffle-x86_32.ll b/test/CodeGen/X86/avx-shuffle-x86_32.ll
index 3fe0784c5201..6defe7efb941 100755
--- a/test/CodeGen/X86/avx-shuffle-x86_32.ll
+++ b/test/CodeGen/X86/avx-shuffle-x86_32.ll
@@ -16,8 +16,7 @@ define <8 x i16> @test2(<4 x i16>* %v) nounwind {
; CHECK-LABEL: test2:
; CHECK: # BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
%v9 = load <4 x i16>, <4 x i16> * %v, align 8
%v10 = shufflevector <4 x i16> %v9, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/avx-trunc.ll b/test/CodeGen/X86/avx-trunc.ll
index 789ca2413940..1a9acd007778 100755
--- a/test/CodeGen/X86/avx-trunc.ll
+++ b/test/CodeGen/X86/avx-trunc.ll
@@ -39,3 +39,5 @@ define <16 x i8> @trunc_16_8(<16 x i16> %A) nounwind uwtable readnone ssp{
%B = trunc <16 x i16> %A to <16 x i8>
ret <16 x i8> %B
}
+
+
diff --git a/test/CodeGen/X86/avx-vbroadcast.ll b/test/CodeGen/X86/avx-vbroadcast.ll
index 0cd236da24ac..41ea2a8c3677 100644
--- a/test/CodeGen/X86/avx-vbroadcast.ll
+++ b/test/CodeGen/X86/avx-vbroadcast.ll
@@ -28,6 +28,40 @@ entry:
ret <4 x i64> %vecinit6.i
}
+define <4 x i64> @A2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: A2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl (%ecx), %edx
+; X32-NEXT: movl 4(%ecx), %ecx
+; X32-NEXT: movl %ecx, 4(%eax)
+; X32-NEXT: movl %edx, (%eax)
+; X32-NEXT: vmovd %edx, %xmm0
+; X32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: A2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: vmovq %rax, %xmm0
+; X64-NEXT: movq %rax, (%rsi)
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
+entry:
+ %q = load i64, i64* %ptr, align 8
+ store i64 %q, i64* %ptr2, align 8 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
+ %vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1
+ %vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2
+ %vecinit6.i = insertelement <4 x i64> %vecinit4.i, i64 %q, i32 3
+ ret <4 x i64> %vecinit6.i
+}
+
define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: B:
; X32: ## BB#0: ## %entry
@@ -48,6 +82,64 @@ entry:
ret <8 x i32> %vecinit6.i
}
+define <8 x i32> @B2(i32* %ptr) nounwind uwtable readnone ssp {
+; X32-LABEL: B2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: B2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
+entry:
+ %q = load i32, i32* %ptr, align 4
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %vecinit2.i = insertelement <8 x i32> %vecinit.i, i32 %q, i32 1
+ %vecinit4.i = insertelement <8 x i32> %vecinit2.i, i32 %q, i32 2
+ %vecinit6.i = insertelement <8 x i32> %vecinit4.i, i32 %q, i32 3
+ %vecinit8.i = insertelement <8 x i32> %vecinit6.i, i32 %q, i32 4
+ %vecinit10.i = insertelement <8 x i32> %vecinit8.i, i32 %q, i32 5
+ %vecinit12.i = insertelement <8 x i32> %vecinit10.i, i32 %q, i32 6
+ %vecinit14.i = insertelement <8 x i32> %vecinit12.i, i32 %q, i32 7
+ ret <8 x i32> %vecinit14.i
+}
+
+define <8 x i32> @B3(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: B3:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl (%ecx), %ecx
+; X32-NEXT: vmovd %ecx, %xmm0
+; X32-NEXT: movl %ecx, (%eax)
+; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: B3:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: vmovd %eax, %xmm0
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
+entry:
+ %q = load i32, i32* %ptr, align 4
+ store i32 %q, i32* %ptr2, align 4 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %vecinit2.i = insertelement <8 x i32> %vecinit.i, i32 %q, i32 1
+ %vecinit4.i = insertelement <8 x i32> %vecinit2.i, i32 %q, i32 2
+ %vecinit6.i = insertelement <8 x i32> %vecinit4.i, i32 %q, i32 3
+ %vecinit8.i = insertelement <8 x i32> %vecinit6.i, i32 %q, i32 4
+ %vecinit10.i = insertelement <8 x i32> %vecinit8.i, i32 %q, i32 5
+ %vecinit12.i = insertelement <8 x i32> %vecinit10.i, i32 %q, i32 6
+ %vecinit14.i = insertelement <8 x i32> %vecinit12.i, i32 %q, i32 7
+ ret <8 x i32> %vecinit14.i
+}
+
define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: C:
; X32: ## BB#0: ## %entry
@@ -68,6 +160,34 @@ entry:
ret <4 x double> %vecinit6.i
}
+define <4 x double> @C2(double* %ptr, double* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: C2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: vmovsd %xmm0, (%eax)
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: C2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: vmovsd %xmm0, (%rsi)
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
+entry:
+ %q = load double, double* %ptr, align 8
+ store double %q, double* %ptr2, align 8 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <4 x double> undef, double %q, i32 0
+ %vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1
+ %vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2
+ %vecinit6.i = insertelement <4 x double> %vecinit4.i, double %q, i32 3
+ ret <4 x double> %vecinit6.i
+}
+
define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: D:
; X32: ## BB#0: ## %entry
@@ -88,6 +208,62 @@ entry:
ret <8 x float> %vecinit6.i
}
+define <8 x float> @D2(float* %ptr) nounwind uwtable readnone ssp {
+; X32-LABEL: D2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: D2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
+entry:
+ %q = load float, float* %ptr, align 4
+ %vecinit.i = insertelement <8 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <8 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <8 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <8 x float> %vecinit4.i, float %q, i32 3
+ %vecinit8.i = insertelement <8 x float> %vecinit6.i, float %q, i32 4
+ %vecinit10.i = insertelement <8 x float> %vecinit8.i, float %q, i32 5
+ %vecinit12.i = insertelement <8 x float> %vecinit10.i, float %q, i32 6
+ %vecinit14.i = insertelement <8 x float> %vecinit12.i, float %q, i32 7
+ ret <8 x float> %vecinit14.i
+}
+
+define <8 x float> @D3(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: D3:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss %xmm0, (%eax)
+; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: D3:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
+entry:
+ %q = load float, float* %ptr, align 4
+ store float %q, float* %ptr2, align 4 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <8 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <8 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <8 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <8 x float> %vecinit4.i, float %q, i32 3
+ %vecinit8.i = insertelement <8 x float> %vecinit6.i, float %q, i32 4
+ %vecinit10.i = insertelement <8 x float> %vecinit8.i, float %q, i32 5
+ %vecinit12.i = insertelement <8 x float> %vecinit10.i, float %q, i32 6
+ %vecinit14.i = insertelement <8 x float> %vecinit12.i, float %q, i32 7
+ ret <8 x float> %vecinit14.i
+}
+
;;;; 128-bit versions
define <4 x float> @e(float* %ptr) nounwind uwtable readnone ssp {
@@ -110,6 +286,32 @@ entry:
ret <4 x float> %vecinit6.i
}
+define <4 x float> @e2(float* %ptr, float* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: e2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss %xmm0, (%eax)
+; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: e2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT: retq
+entry:
+ %q = load float, float* %ptr, align 4
+ store float %q, float* %ptr2, align 4 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ ret <4 x float> %vecinit6.i
+}
+
; Don't broadcast constants on pre-AVX2 hardware.
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e2:
@@ -150,6 +352,34 @@ entry:
ret <4 x i32> %vecinit6.i
}
+define <4 x i32> @F2(i32* %ptr, i32* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: F2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl (%ecx), %ecx
+; X32-NEXT: movl %ecx, (%eax)
+; X32-NEXT: vmovd %ecx, %xmm0
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: F2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl %eax, (%rsi)
+; X64-NEXT: vmovd %eax, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT: retq
+entry:
+ %q = load i32, i32* %ptr, align 4
+ store i32 %q, i32* %ptr2, align 4 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %q, i32 1
+ %vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %q, i32 2
+ %vecinit6.i = insertelement <4 x i32> %vecinit4.i, i32 %q, i32 3
+ ret <4 x i32> %vecinit6.i
+}
+
; FIXME: Pointer adjusted broadcasts
define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
@@ -382,6 +612,36 @@ entry:
ret <2 x i64> %vecinit2.i
}
+define <2 x i64> @G2(i64* %ptr, i64* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: G2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl (%ecx), %edx
+; X32-NEXT: movl 4(%ecx), %ecx
+; X32-NEXT: movl %ecx, 4(%eax)
+; X32-NEXT: movl %edx, (%eax)
+; X32-NEXT: vmovd %edx, %xmm0
+; X32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: G2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: movq %rax, (%rsi)
+; X64-NEXT: vmovq %rax, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X64-NEXT: retq
+entry:
+ %q = load i64, i64* %ptr, align 8
+ store i64 %q, i64* %ptr2, align 8 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <2 x i64> undef, i64 %q, i32 0
+ %vecinit2.i = insertelement <2 x i64> %vecinit.i, i64 %q, i32 1
+ ret <2 x i64> %vecinit2.i
+}
+
define <4 x i32> @H(<4 x i32> %a) {
; X32-LABEL: H:
; X32: ## BB#0: ## %entry
@@ -415,6 +675,30 @@ entry:
ret <2 x double> %vecinit2.i
}
+define <2 x double> @I2(double* %ptr, double* %ptr2) nounwind uwtable readnone ssp {
+; X32-LABEL: I2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: vmovsd %xmm0, (%eax)
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: I2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: vmovsd %xmm0, (%rsi)
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: retq
+entry:
+ %q = load double, double* %ptr, align 4
+ store double %q, double* %ptr2, align 4 ; to create a chain to prevent broadcast
+ %vecinit.i = insertelement <2 x double> undef, double %q, i32 0
+ %vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
+ ret <2 x double> %vecinit2.i
+}
+
define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
; X32-LABEL: _RR:
; X32: ## BB#0: ## %entry
@@ -558,12 +842,15 @@ define float @broadcast_lifetime() nounwind {
; X32-NEXT: leal {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, (%esp)
; X32-NEXT: calll _gfunc
-; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
; X32-NEXT: movl %esi, (%esp)
; X32-NEXT: calll _gfunc
-; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT: vsubss {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: vpermilps $0, {{[0-9]+}}(%esp), %xmm1 ## 16-byte Folded Reload
+; X32-NEXT: ## xmm1 = mem[0,0,0,0]
+; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X32-NEXT: vsubss %xmm1, %xmm0, %xmm0
; X32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X32-NEXT: flds {{[0-9]+}}(%esp)
; X32-NEXT: addl $56, %esp
@@ -575,12 +862,15 @@ define float @broadcast_lifetime() nounwind {
; X64-NEXT: subq $40, %rsp
; X64-NEXT: movq %rsp, %rdi
; X64-NEXT: callq _gfunc
-; X64-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %xmm0
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
; X64-NEXT: movq %rsp, %rdi
; X64-NEXT: callq _gfunc
-; X64-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %xmm0
-; X64-NEXT: vsubss {{[0-9]+}}(%rsp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: vpermilps $0, {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Folded Reload
+; X64-NEXT: ## xmm1 = mem[0,0,0,0]
+; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-NEXT: vsubss %xmm1, %xmm0, %xmm0
; X64-NEXT: addq $40, %rsp
; X64-NEXT: retq
%1 = alloca <4 x float>, align 16
@@ -588,15 +878,15 @@ define float @broadcast_lifetime() nounwind {
%3 = bitcast <4 x float>* %1 to i8*
%4 = bitcast <4 x float>* %2 to i8*
- call void @llvm.lifetime.start(i64 16, i8* %3)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
call void @gfunc(<4 x float>* %1)
%5 = load <4 x float>, <4 x float>* %1, align 16
- call void @llvm.lifetime.end(i64 16, i8* %3)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
- call void @llvm.lifetime.start(i64 16, i8* %4)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %4)
call void @gfunc(<4 x float>* %2)
%6 = load <4 x float>, <4 x float>* %2, align 16
- call void @llvm.lifetime.end(i64 16, i8* %4)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %4)
%7 = extractelement <4 x float> %5, i32 1
%8 = extractelement <4 x float> %6, i32 1
@@ -605,5 +895,5 @@ define float @broadcast_lifetime() nounwind {
}
declare void @gfunc(<4 x float>*)
-declare void @llvm.lifetime.start(i64, i8*)
-declare void @llvm.lifetime.end(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
diff --git a/test/CodeGen/X86/avx-vperm2x128.ll b/test/CodeGen/X86/avx-vperm2x128.ll
index f7f54a01d7ff..f4a77c370db5 100644
--- a/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/test/CodeGen/X86/avx-vperm2x128.ll
@@ -466,8 +466,7 @@ define <4 x i64> @shuffle_v4i64_67zz(<4 x i64> %a, <4 x i64> %b) {
; AVX1: ## BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_67zz:
diff --git a/test/CodeGen/X86/avx-vzeroupper.ll b/test/CodeGen/X86/avx-vzeroupper.ll
index 3c52aaf71adc..cf514d7aeb31 100644
--- a/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/test/CodeGen/X86/avx-vzeroupper.ll
@@ -1,8 +1,9 @@
; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
-; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mattr=+avx,+fast-partial-ymm-write | FileCheck --check-prefix=FASTYMM %s
+; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s
+; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mattr=+avx,+fast-partial-ymm-or-zmm-write | FileCheck --check-prefix=FAST-YMM-ZMM %s
; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mcpu=btver2 | FileCheck --check-prefix=BTVER2 %s
-; FASTYMM-NOT: vzeroupper
+; FAST-YMM-ZMM-NOT: vzeroupper
; BTVER2-NOT: vzeroupper
declare i32 @foo()
diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll
index f0fb58ff7c8a..26edafbdb64f 100755
--- a/test/CodeGen/X86/avx2-conversions.ll
+++ b/test/CodeGen/X86/avx2-conversions.ll
@@ -25,7 +25,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32-LABEL: trunc8:
; X32: ## BB#0:
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -33,7 +33,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
;
; X64-LABEL: trunc8:
; X64: ## BB#0:
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index cd8c354e9960..d162b4755ee1 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -9,12 +9,14 @@ define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x floa
; X32-LABEL: test_x86_avx2_gather_d_ps:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
; X32-NEXT: vmovaps %xmm2, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
; X64: ## BB#0:
+; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
; X64-NEXT: retq
@@ -30,12 +32,14 @@ define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x dou
; X32-LABEL: test_x86_avx2_gather_d_pd:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
; X32-NEXT: vmovapd %xmm2, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
; X64: ## BB#0:
+; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovapd %xmm2, %xmm0
; X64-NEXT: retq
@@ -51,12 +55,14 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x
; X32-LABEL: test_x86_avx2_gather_d_ps_256:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vxorps %ymm2, %ymm2, %ymm2
; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
; X32-NEXT: vmovaps %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
; X64: ## BB#0:
+; X64-NEXT: vxorps %ymm2, %ymm2, %ymm2
; X64-NEXT: vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
; X64-NEXT: vmovaps %ymm2, %ymm0
; X64-NEXT: retq
@@ -72,12 +78,14 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
; X32-LABEL: test_x86_avx2_gather_d_pd_256:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vxorpd %ymm2, %ymm2, %ymm2
; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
; X32-NEXT: vmovapd %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
; X64: ## BB#0:
+; X64-NEXT: vxorpd %ymm2, %ymm2, %ymm2
; X64-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
; X64-NEXT: vmovapd %ymm2, %ymm0
; X64-NEXT: retq
@@ -85,3 +93,55 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 8) ;
ret <4 x double> %res
}
+
+define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm_i32gather_epi32:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X32-NEXT: vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
+; X32-NEXT: vmovdqa %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_i32gather_epi32:
+; X64: ## BB#0:
+; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
+; X64-NEXT: vmovdqa %xmm1, %xmm0
+; X64-NEXT: retq
+ %arg0 = bitcast i32 *%a0 to i8*
+ %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
+ %mask = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x i32>
+ %call = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, i8* %arg0, <4 x i32> %arg1, <4 x i32> %mask, i8 2)
+ %bc = bitcast <4 x i32> %call to <2 x i64>
+ ret <2 x i64> %bc
+}
+declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>, i8) nounwind readonly
+
+define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
+; X32-LABEL: test_mm_i32gather_pd:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X32-NEXT: vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
+; X32-NEXT: vmovapd %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm_i32gather_pd:
+; X64: ## BB#0:
+; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
+; X64-NEXT: vmovapd %xmm1, %xmm0
+; X64-NEXT: retq
+ %arg0 = bitcast double *%a0 to i8*
+ %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
+ %cmp = fcmp oeq <2 x double> zeroinitializer, zeroinitializer
+ %sext = sext <2 x i1> %cmp to <2 x i64>
+ %mask = bitcast <2 x i64> %sext to <2 x double>
+ %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, i8* %arg0, <4 x i32> %arg1, <2 x double> %mask, i8 2)
+ ret <2 x double> %res
+}
diff --git a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
index d7a1422e992f..cb0abf3b137f 100644
--- a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -1068,6 +1068,7 @@ define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovdqa %xmm1, %xmm0
; X32-NEXT: retl
@@ -1075,6 +1076,7 @@ define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i32gather_epi32:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -1112,6 +1114,7 @@ define <4 x i64> @test_mm256_i32gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpgatherdd %ymm2, (%eax,%ymm0,2), %ymm1
; X32-NEXT: vmovdqa %ymm1, %ymm0
; X32-NEXT: retl
@@ -1119,6 +1122,7 @@ define <4 x i64> @test_mm256_i32gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X64-LABEL: test_mm256_i32gather_epi32:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm0,2), %ymm1
; X64-NEXT: vmovdqa %ymm1, %ymm0
; X64-NEXT: retq
@@ -1156,6 +1160,7 @@ define <2 x i64> @test_mm_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpgatherdq %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovdqa %xmm1, %xmm0
; X32-NEXT: retl
@@ -1163,6 +1168,7 @@ define <2 x i64> @test_mm_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i32gather_epi64:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -1195,6 +1201,7 @@ define <4 x i64> @test_mm256_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpgatherdq %ymm2, (%eax,%xmm0,2), %ymm1
; X32-NEXT: vmovdqa %ymm1, %ymm0
; X32-NEXT: retl
@@ -1202,6 +1209,7 @@ define <4 x i64> @test_mm256_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm256_i32gather_epi64:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm0,2), %ymm1
; X64-NEXT: vmovdqa %ymm1, %ymm0
; X64-NEXT: retq
@@ -1234,6 +1242,7 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-NEXT: vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovapd %xmm1, %xmm0
; X32-NEXT: retl
@@ -1241,6 +1250,7 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i32gather_pd:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -1318,6 +1328,7 @@ define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vgatherdps %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
@@ -1325,6 +1336,7 @@ define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i32gather_ps:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -1402,6 +1414,7 @@ define <2 x i64> @test_mm_i64gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpgatherqd %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovdqa %xmm1, %xmm0
; X32-NEXT: retl
@@ -1409,6 +1422,7 @@ define <2 x i64> @test_mm_i64gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i64gather_epi32:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -1444,6 +1458,7 @@ define <2 x i64> @test_mm256_i64gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpgatherqd %xmm2, (%eax,%ymm0,2), %xmm1
; X32-NEXT: vmovdqa %xmm1, %xmm0
; X32-NEXT: vzeroupper
@@ -1452,6 +1467,7 @@ define <2 x i64> @test_mm256_i64gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X64-LABEL: test_mm256_i64gather_epi32:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm0,2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
; X64-NEXT: vzeroupper
@@ -1490,6 +1506,7 @@ define <2 x i64> @test_mm_i64gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpgatherqq %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovdqa %xmm1, %xmm0
; X32-NEXT: retl
@@ -1497,6 +1514,7 @@ define <2 x i64> @test_mm_i64gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i64gather_epi64:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
; X64-NEXT: retq
@@ -1527,6 +1545,7 @@ define <4 x i64> @test_mm256_i64gather_epi64(i64 *%a0, <4 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpgatherqq %ymm2, (%eax,%ymm0,2), %ymm1
; X32-NEXT: vmovdqa %ymm1, %ymm0
; X32-NEXT: retl
@@ -1534,6 +1553,7 @@ define <4 x i64> @test_mm256_i64gather_epi64(i64 *%a0, <4 x i64> %a1) {
; X64-LABEL: test_mm256_i64gather_epi64:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm0,2), %ymm1
; X64-NEXT: vmovdqa %ymm1, %ymm0
; X64-NEXT: retq
@@ -1564,6 +1584,7 @@ define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-NEXT: vgatherqpd %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovapd %xmm1, %xmm0
; X32-NEXT: retl
@@ -1571,6 +1592,7 @@ define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i64gather_pd:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovapd %xmm1, %xmm0
; X64-NEXT: retq
@@ -1644,6 +1666,7 @@ define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vgatherqps %xmm2, (%eax,%xmm0,2), %xmm1
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: retl
@@ -1651,6 +1674,7 @@ define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
; X64-LABEL: test_mm_i64gather_ps:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -1684,6 +1708,7 @@ define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-NEXT: vgatherqps %xmm2, (%eax,%ymm0,2), %xmm1
; X32-NEXT: vmovaps %xmm1, %xmm0
; X32-NEXT: vzeroupper
@@ -1692,6 +1717,7 @@ define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
; X64-LABEL: test_mm256_i64gather_ps:
; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm0,2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
index b6b8447beda1..25b0a9a1a725 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
@@ -1,5 +1,5 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mattr=avx2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx2 | FileCheck %s
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendw:
@@ -34,6 +34,18 @@ define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i32) nounwind readnone
+define <4 x i64> @test_x86_avx2_movntdqa(i8* %a0) {
+; CHECK-LABEL: test_x86_avx2_movntdqa:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: vmovntdqa (%eax), %ymm0
+; CHECK-NEXT: retl
+ %res = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
+
+
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_mpsadbw:
; CHECK: ## BB#0:
@@ -370,7 +382,7 @@ define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx_storeu_dq_256:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vpaddb LCPI33_0, %ymm0, %ymm0
+; CHECK-NEXT: vpaddb LCPI34_0, %ymm0, %ymm0
; CHECK-NEXT: vmovdqu %ymm0, (%eax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
@@ -385,7 +397,6 @@ define <32 x i8> @mm256_max_epi8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %res
}
@@ -396,7 +407,6 @@ define <16 x i16> @mm256_max_epi16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %res
}
@@ -407,7 +417,6 @@ define <8 x i32> @mm256_max_epi32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1)
ret <8 x i32> %res
}
@@ -418,7 +427,6 @@ define <32 x i8> @mm256_max_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %res
}
@@ -429,7 +437,6 @@ define <16 x i16> @mm256_max_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %res
}
@@ -440,7 +447,6 @@ define <8 x i32> @mm256_max_epu32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1)
ret <8 x i32> %res
}
@@ -451,7 +457,6 @@ define <32 x i8> @mm256_min_epi8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %res
}
@@ -462,7 +467,6 @@ define <16 x i16> @mm256_min_epi16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %res
}
@@ -473,7 +477,6 @@ define <8 x i32> @mm256_min_epi32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1)
ret <8 x i32> %res
}
@@ -484,7 +487,6 @@ define <32 x i8> @mm256_min_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %res
}
@@ -495,7 +497,6 @@ define <16 x i16> @mm256_min_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %res
}
@@ -506,7 +507,6 @@ define <8 x i32> @mm256_min_epu32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retl
-;
%res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1)
ret <8 x i32> %res
}
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86.ll b/test/CodeGen/X86/avx2-intrinsics-x86.ll
index 830e68fb1e34..52e37dbf2696 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86.ll
@@ -227,16 +227,11 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) {
-; AVX2-LABEL: test_x86_avx2_pmovmskb:
-; AVX2: ## BB#0:
-; AVX2-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
-; AVX2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX2-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_pmovmskb:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx2_pmovmskb:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0) ; <i32> [#uses=1]
ret i32 %res
}
@@ -841,24 +836,6 @@ define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) {
declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone
-define <4 x i64> @test_x86_avx2_movntdqa(i8* %a0) {
-; AVX2-LABEL: test_x86_avx2_movntdqa:
-; AVX2: ## BB#0:
-; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT: vmovntdqa (%eax), %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2a,0x00]
-; AVX2-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_movntdqa:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vmovntdqa (%eax), %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2a,0x00]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
- %res = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0) ; <<4 x i64>> [#uses=1]
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
-
-
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_mpsadbw:
; CHECK: ## BB#0:
@@ -1179,18 +1156,12 @@ declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind
define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
-; AVX2-LABEL: test_x86_avx2_maskstore_q_256:
-; AVX2: ## BB#0:
-; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
-; AVX2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX2-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_maskstore_q_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx2_maskstore_q_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
call void @llvm.x86.avx2.maskstore.q.256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2)
ret void
}
@@ -1210,18 +1181,12 @@ declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind
define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
-; AVX2-LABEL: test_x86_avx2_maskstore_d_256:
-; AVX2: ## BB#0:
-; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
-; AVX2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX2-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_maskstore_d_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx2_maskstore_d_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
call void @llvm.x86.avx2.maskstore.d.256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2)
ret void
}
@@ -1375,18 +1340,18 @@ define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2: ## BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; AVX2-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI91_0, kind: FK_Data_4
-; AVX2-NEXT: vpsravd LCPI91_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; AVX2-NEXT: ## fixup A - offset: 5, value: LCPI91_1, kind: FK_Data_4
+; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4
+; AVX2-NEXT: vpsravd LCPI90_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; AVX2-NEXT: ## fixup A - offset: 5, value: LCPI90_1, kind: FK_Data_4
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vmovdqa LCPI91_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
+; AVX512VL-NEXT: vmovdqa LCPI90_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI91_0, kind: FK_Data_4
-; AVX512VL-NEXT: vpsravd LCPI91_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI91_1, kind: FK_Data_4
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4
+; AVX512VL-NEXT: vpsravd LCPI90_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI90_1, kind: FK_Data_4
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
ret <4 x i32> %res
@@ -1412,18 +1377,18 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1)
; AVX2: ## BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; AVX2-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI93_0, kind: FK_Data_4
-; AVX2-NEXT: vpsravd LCPI93_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; AVX2-NEXT: ## fixup A - offset: 5, value: LCPI93_1, kind: FK_Data_4
+; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI92_0, kind: FK_Data_4
+; AVX2-NEXT: vpsravd LCPI92_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; AVX2-NEXT: ## fixup A - offset: 5, value: LCPI92_1, kind: FK_Data_4
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: vmovdqa LCPI93_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
+; AVX512VL-NEXT: vmovdqa LCPI92_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI93_0, kind: FK_Data_4
-; AVX512VL-NEXT: vpsravd LCPI93_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI93_1, kind: FK_Data_4
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI92_0, kind: FK_Data_4
+; AVX512VL-NEXT: vpsravd LCPI92_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI92_1, kind: FK_Data_4
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
ret <8 x i32> %res
@@ -1522,18 +1487,12 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*,
<2 x i64>, <4 x float>, i8) nounwind readonly
define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask) {
-; AVX2-LABEL: test_x86_avx2_gather_q_ps_256:
-; AVX2: ## BB#0:
-; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
-; AVX2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX2-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_gather_q_ps_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx2_gather_q_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0,
i8* %a1, <4 x i64> %idx, <4 x float> %mask, i8 2) ;
ret <4 x float> %res
@@ -1633,18 +1592,12 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*,
<2 x i64>, <4 x i32>, i8) nounwind readonly
define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask) {
-; AVX2-LABEL: test_x86_avx2_gather_q_d_256:
-; AVX2: ## BB#0:
-; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
-; AVX2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
-; AVX2-NEXT: retl ## encoding: [0xc3]
-;
-; AVX512VL-LABEL: test_x86_avx2_gather_q_d_256:
-; AVX512VL: ## BB#0:
-; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
-; AVX512VL-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_avx2_gather_q_d_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
+; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0,
i8* %a1, <4 x i64> %idx, <4 x i32> %mask, i8 2) ;
ret <4 x i32> %res
diff --git a/test/CodeGen/X86/avx2-shift.ll b/test/CodeGen/X86/avx2-shift.ll
index 887fef113e72..4345bd6f7926 100644
--- a/test/CodeGen/X86/avx2-shift.ll
+++ b/test/CodeGen/X86/avx2-shift.ll
@@ -530,7 +530,7 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -541,7 +541,7 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
@@ -556,7 +556,7 @@ define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -567,7 +567,7 @@ define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
@@ -582,7 +582,7 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -593,7 +593,7 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx2-vbroadcast.ll b/test/CodeGen/X86/avx2-vbroadcast.ll
index f65f485cc62c..ba47e2ba15c2 100644
--- a/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -1133,97 +1133,52 @@ eintry:
}
define void @isel_crash_32b(i8* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_32b:
-; X32-AVX2: ## BB#0: ## %eintry
-; X32-AVX2-NEXT: pushl %ebp
-; X32-AVX2-NEXT: Lcfi1:
-; X32-AVX2-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT: Lcfi2:
-; X32-AVX2-NEXT: .cfi_offset %ebp, -8
-; X32-AVX2-NEXT: movl %esp, %ebp
-; X32-AVX2-NEXT: Lcfi3:
-; X32-AVX2-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT: andl $-32, %esp
-; X32-AVX2-NEXT: subl $128, %esp
-; X32-AVX2-NEXT: movl 8(%ebp), %eax
-; X32-AVX2-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT: vpbroadcastb (%eax), %ymm1
-; X32-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: movl %ebp, %esp
-; X32-AVX2-NEXT: popl %ebp
-; X32-AVX2-NEXT: vzeroupper
-; X32-AVX2-NEXT: retl
-;
-; X64-AVX2-LABEL: isel_crash_32b:
-; X64-AVX2: ## BB#0: ## %eintry
-; X64-AVX2-NEXT: pushq %rbp
-; X64-AVX2-NEXT: Lcfi0:
-; X64-AVX2-NEXT: .cfi_def_cfa_offset 16
-; X64-AVX2-NEXT: Lcfi1:
-; X64-AVX2-NEXT: .cfi_offset %rbp, -16
-; X64-AVX2-NEXT: movq %rsp, %rbp
-; X64-AVX2-NEXT: Lcfi2:
-; X64-AVX2-NEXT: .cfi_def_cfa_register %rbp
-; X64-AVX2-NEXT: andq $-32, %rsp
-; X64-AVX2-NEXT: subq $128, %rsp
-; X64-AVX2-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X64-AVX2-NEXT: vmovaps %ymm0, (%rsp)
-; X64-AVX2-NEXT: movb (%rdi), %al
-; X64-AVX2-NEXT: vmovd %eax, %xmm1
-; X64-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; X64-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT: movq %rbp, %rsp
-; X64-AVX2-NEXT: popq %rbp
-; X64-AVX2-NEXT: vzeroupper
-; X64-AVX2-NEXT: retq
-;
-; X32-AVX512VL-LABEL: isel_crash_32b:
-; X32-AVX512VL: ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT: pushl %ebp
-; X32-AVX512VL-NEXT: Lcfi1:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT: Lcfi2:
-; X32-AVX512VL-NEXT: .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT: movl %esp, %ebp
-; X32-AVX512VL-NEXT: Lcfi3:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT: andl $-32, %esp
-; X32-AVX512VL-NEXT: subl $128, %esp
-; X32-AVX512VL-NEXT: movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT: vpbroadcastb (%eax), %ymm1
-; X32-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: movl %ebp, %esp
-; X32-AVX512VL-NEXT: popl %ebp
-; X32-AVX512VL-NEXT: retl
+; X32-LABEL: isel_crash_32b:
+; X32: ## BB#0: ## %eintry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: Lcfi1:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: Lcfi2:
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: Lcfi3:
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: andl $-32, %esp
+; X32-NEXT: subl $128, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-NEXT: vmovaps %ymm0, (%esp)
+; X32-NEXT: vpbroadcastb (%eax), %ymm1
+; X32-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
;
-; X64-AVX512VL-LABEL: isel_crash_32b:
-; X64-AVX512VL: ## BB#0: ## %eintry
-; X64-AVX512VL-NEXT: pushq %rbp
-; X64-AVX512VL-NEXT: Lcfi0:
-; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16
-; X64-AVX512VL-NEXT: Lcfi1:
-; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16
-; X64-AVX512VL-NEXT: movq %rsp, %rbp
-; X64-AVX512VL-NEXT: Lcfi2:
-; X64-AVX512VL-NEXT: .cfi_def_cfa_register %rbp
-; X64-AVX512VL-NEXT: andq $-32, %rsp
-; X64-AVX512VL-NEXT: subq $128, %rsp
-; X64-AVX512VL-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X64-AVX512VL-NEXT: vmovaps %ymm0, (%rsp)
-; X64-AVX512VL-NEXT: movb (%rdi), %al
-; X64-AVX512VL-NEXT: vmovd %eax, %xmm1
-; X64-AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
-; X64-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: movq %rbp, %rsp
-; X64-AVX512VL-NEXT: popq %rbp
-; X64-AVX512VL-NEXT: retq
+; X64-LABEL: isel_crash_32b:
+; X64: ## BB#0: ## %eintry
+; X64-NEXT: pushq %rbp
+; X64-NEXT: Lcfi0:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: Lcfi1:
+; X64-NEXT: .cfi_offset %rbp, -16
+; X64-NEXT: movq %rsp, %rbp
+; X64-NEXT: Lcfi2:
+; X64-NEXT: .cfi_def_cfa_register %rbp
+; X64-NEXT: andq $-32, %rsp
+; X64-NEXT: subq $128, %rsp
+; X64-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-NEXT: vmovaps %ymm0, (%rsp)
+; X64-NEXT: movb (%rdi), %al
+; X64-NEXT: vmovd %eax, %xmm1
+; X64-NEXT: vpbroadcastb %xmm1, %ymm1
+; X64-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; X64-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
+; X64-NEXT: movq %rbp, %rsp
+; X64-NEXT: popq %rbp
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
eintry:
%__a.addr.i = alloca <4 x i64>, align 16
%__b.addr.i = alloca <4 x i64>, align 16
@@ -1280,97 +1235,52 @@ entry:
}
define void @isel_crash_16w(i16* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_16w:
-; X32-AVX2: ## BB#0: ## %eintry
-; X32-AVX2-NEXT: pushl %ebp
-; X32-AVX2-NEXT: Lcfi5:
-; X32-AVX2-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT: Lcfi6:
-; X32-AVX2-NEXT: .cfi_offset %ebp, -8
-; X32-AVX2-NEXT: movl %esp, %ebp
-; X32-AVX2-NEXT: Lcfi7:
-; X32-AVX2-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT: andl $-32, %esp
-; X32-AVX2-NEXT: subl $128, %esp
-; X32-AVX2-NEXT: movl 8(%ebp), %eax
-; X32-AVX2-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT: vpbroadcastw (%eax), %ymm1
-; X32-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: movl %ebp, %esp
-; X32-AVX2-NEXT: popl %ebp
-; X32-AVX2-NEXT: vzeroupper
-; X32-AVX2-NEXT: retl
-;
-; X64-AVX2-LABEL: isel_crash_16w:
-; X64-AVX2: ## BB#0: ## %eintry
-; X64-AVX2-NEXT: pushq %rbp
-; X64-AVX2-NEXT: Lcfi3:
-; X64-AVX2-NEXT: .cfi_def_cfa_offset 16
-; X64-AVX2-NEXT: Lcfi4:
-; X64-AVX2-NEXT: .cfi_offset %rbp, -16
-; X64-AVX2-NEXT: movq %rsp, %rbp
-; X64-AVX2-NEXT: Lcfi5:
-; X64-AVX2-NEXT: .cfi_def_cfa_register %rbp
-; X64-AVX2-NEXT: andq $-32, %rsp
-; X64-AVX2-NEXT: subq $128, %rsp
-; X64-AVX2-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X64-AVX2-NEXT: vmovaps %ymm0, (%rsp)
-; X64-AVX2-NEXT: movw (%rdi), %ax
-; X64-AVX2-NEXT: vmovd %eax, %xmm1
-; X64-AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
-; X64-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX2-NEXT: movq %rbp, %rsp
-; X64-AVX2-NEXT: popq %rbp
-; X64-AVX2-NEXT: vzeroupper
-; X64-AVX2-NEXT: retq
-;
-; X32-AVX512VL-LABEL: isel_crash_16w:
-; X32-AVX512VL: ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT: pushl %ebp
-; X32-AVX512VL-NEXT: Lcfi5:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT: Lcfi6:
-; X32-AVX512VL-NEXT: .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT: movl %esp, %ebp
-; X32-AVX512VL-NEXT: Lcfi7:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT: andl $-32, %esp
-; X32-AVX512VL-NEXT: subl $128, %esp
-; X32-AVX512VL-NEXT: movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT: vpbroadcastw (%eax), %ymm1
-; X32-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: movl %ebp, %esp
-; X32-AVX512VL-NEXT: popl %ebp
-; X32-AVX512VL-NEXT: retl
+; X32-LABEL: isel_crash_16w:
+; X32: ## BB#0: ## %eintry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: Lcfi5:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: Lcfi6:
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: Lcfi7:
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: andl $-32, %esp
+; X32-NEXT: subl $128, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-NEXT: vmovaps %ymm0, (%esp)
+; X32-NEXT: vpbroadcastw (%eax), %ymm1
+; X32-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
;
-; X64-AVX512VL-LABEL: isel_crash_16w:
-; X64-AVX512VL: ## BB#0: ## %eintry
-; X64-AVX512VL-NEXT: pushq %rbp
-; X64-AVX512VL-NEXT: Lcfi3:
-; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16
-; X64-AVX512VL-NEXT: Lcfi4:
-; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16
-; X64-AVX512VL-NEXT: movq %rsp, %rbp
-; X64-AVX512VL-NEXT: Lcfi5:
-; X64-AVX512VL-NEXT: .cfi_def_cfa_register %rbp
-; X64-AVX512VL-NEXT: andq $-32, %rsp
-; X64-AVX512VL-NEXT: subq $128, %rsp
-; X64-AVX512VL-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X64-AVX512VL-NEXT: vmovaps %ymm0, (%rsp)
-; X64-AVX512VL-NEXT: movw (%rdi), %ax
-; X64-AVX512VL-NEXT: vmovd %eax, %xmm1
-; X64-AVX512VL-NEXT: vpbroadcastw %xmm1, %ymm1
-; X64-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: movq %rbp, %rsp
-; X64-AVX512VL-NEXT: popq %rbp
-; X64-AVX512VL-NEXT: retq
+; X64-LABEL: isel_crash_16w:
+; X64: ## BB#0: ## %eintry
+; X64-NEXT: pushq %rbp
+; X64-NEXT: Lcfi3:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: Lcfi4:
+; X64-NEXT: .cfi_offset %rbp, -16
+; X64-NEXT: movq %rsp, %rbp
+; X64-NEXT: Lcfi5:
+; X64-NEXT: .cfi_def_cfa_register %rbp
+; X64-NEXT: andq $-32, %rsp
+; X64-NEXT: subq $128, %rsp
+; X64-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X64-NEXT: vmovaps %ymm0, (%rsp)
+; X64-NEXT: movw (%rdi), %ax
+; X64-NEXT: vmovd %eax, %xmm1
+; X64-NEXT: vpbroadcastw %xmm1, %ymm1
+; X64-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; X64-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
+; X64-NEXT: movq %rbp, %rsp
+; X64-NEXT: popq %rbp
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
eintry:
%__a.addr.i = alloca <4 x i64>, align 16
%__b.addr.i = alloca <4 x i64>, align 16
@@ -1419,7 +1329,7 @@ define void @isel_crash_4d(i32* %cV_R.addr) {
; X64-AVX512VL-NEXT: movl (%rdi), %eax
; X64-AVX512VL-NEXT: vpbroadcastd %eax, %xmm1
; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-AVX512VL-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: retq
entry:
%__a.addr.i = alloca <2 x i64>, align 16
@@ -1437,28 +1347,28 @@ entry:
}
define void @isel_crash_8d(i32* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_8d:
-; X32-AVX2: ## BB#0: ## %eintry
-; X32-AVX2-NEXT: pushl %ebp
-; X32-AVX2-NEXT: Lcfi9:
-; X32-AVX2-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT: Lcfi10:
-; X32-AVX2-NEXT: .cfi_offset %ebp, -8
-; X32-AVX2-NEXT: movl %esp, %ebp
-; X32-AVX2-NEXT: Lcfi11:
-; X32-AVX2-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT: andl $-32, %esp
-; X32-AVX2-NEXT: subl $128, %esp
-; X32-AVX2-NEXT: movl 8(%ebp), %eax
-; X32-AVX2-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT: vbroadcastss (%eax), %ymm1
-; X32-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: movl %ebp, %esp
-; X32-AVX2-NEXT: popl %ebp
-; X32-AVX2-NEXT: vzeroupper
-; X32-AVX2-NEXT: retl
+; X32-LABEL: isel_crash_8d:
+; X32: ## BB#0: ## %eintry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: Lcfi9:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: Lcfi10:
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: Lcfi11:
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: andl $-32, %esp
+; X32-NEXT: subl $128, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-NEXT: vmovaps %ymm0, (%esp)
+; X32-NEXT: vbroadcastss (%eax), %ymm1
+; X32-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_8d:
; X64-AVX2: ## BB#0: ## %eintry
@@ -1484,28 +1394,6 @@ define void @isel_crash_8d(i32* %cV_R.addr) {
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
-; X32-AVX512VL-LABEL: isel_crash_8d:
-; X32-AVX512VL: ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT: pushl %ebp
-; X32-AVX512VL-NEXT: Lcfi9:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT: Lcfi10:
-; X32-AVX512VL-NEXT: .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT: movl %esp, %ebp
-; X32-AVX512VL-NEXT: Lcfi11:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT: andl $-32, %esp
-; X32-AVX512VL-NEXT: subl $128, %esp
-; X32-AVX512VL-NEXT: movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT: vbroadcastss (%eax), %ymm1
-; X32-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: movl %ebp, %esp
-; X32-AVX512VL-NEXT: popl %ebp
-; X32-AVX512VL-NEXT: retl
-;
; X64-AVX512VL-LABEL: isel_crash_8d:
; X64-AVX512VL: ## BB#0: ## %eintry
; X64-AVX512VL-NEXT: pushq %rbp
@@ -1523,9 +1411,10 @@ define void @isel_crash_8d(i32* %cV_R.addr) {
; X64-AVX512VL-NEXT: movl (%rdi), %eax
; X64-AVX512VL-NEXT: vpbroadcastd %eax, %ymm1
; X64-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; X64-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: movq %rbp, %rsp
; X64-AVX512VL-NEXT: popq %rbp
+; X64-AVX512VL-NEXT: vzeroupper
; X64-AVX512VL-NEXT: retq
eintry:
%__a.addr.i = alloca <4 x i64>, align 16
@@ -1580,7 +1469,7 @@ define void @isel_crash_2q(i64* %cV_R.addr) {
; X64-AVX512VL-NEXT: movq (%rdi), %rax
; X64-AVX512VL-NEXT: vpbroadcastq %rax, %xmm1
; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-AVX512VL-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: retq
entry:
%__a.addr.i = alloca <2 x i64>, align 16
@@ -1597,34 +1486,34 @@ entry:
}
define void @isel_crash_4q(i64* %cV_R.addr) {
-; X32-AVX2-LABEL: isel_crash_4q:
-; X32-AVX2: ## BB#0: ## %eintry
-; X32-AVX2-NEXT: pushl %ebp
-; X32-AVX2-NEXT: Lcfi13:
-; X32-AVX2-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX2-NEXT: Lcfi14:
-; X32-AVX2-NEXT: .cfi_offset %ebp, -8
-; X32-AVX2-NEXT: movl %esp, %ebp
-; X32-AVX2-NEXT: Lcfi15:
-; X32-AVX2-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX2-NEXT: andl $-32, %esp
-; X32-AVX2-NEXT: subl $128, %esp
-; X32-AVX2-NEXT: movl 8(%ebp), %eax
-; X32-AVX2-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX2-NEXT: movl (%eax), %ecx
-; X32-AVX2-NEXT: movl 4(%eax), %eax
-; X32-AVX2-NEXT: vmovd %ecx, %xmm1
-; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; X32-AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
-; X32-AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; X32-AVX2-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX2-NEXT: movl %ebp, %esp
-; X32-AVX2-NEXT: popl %ebp
-; X32-AVX2-NEXT: vzeroupper
-; X32-AVX2-NEXT: retl
+; X32-LABEL: isel_crash_4q:
+; X32: ## BB#0: ## %eintry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: Lcfi13:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: Lcfi14:
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: Lcfi15:
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: andl $-32, %esp
+; X32-NEXT: subl $128, %esp
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-NEXT: vmovaps %ymm0, (%esp)
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl 4(%eax), %eax
+; X32-NEXT: vmovd %ecx, %xmm1
+; X32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
+; X32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; X32-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
+; X32-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X32-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_4q:
; X64-AVX2: ## BB#0: ## %eintry
@@ -1650,34 +1539,6 @@ define void @isel_crash_4q(i64* %cV_R.addr) {
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
-; X32-AVX512VL-LABEL: isel_crash_4q:
-; X32-AVX512VL: ## BB#0: ## %eintry
-; X32-AVX512VL-NEXT: pushl %ebp
-; X32-AVX512VL-NEXT: Lcfi13:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_offset 8
-; X32-AVX512VL-NEXT: Lcfi14:
-; X32-AVX512VL-NEXT: .cfi_offset %ebp, -8
-; X32-AVX512VL-NEXT: movl %esp, %ebp
-; X32-AVX512VL-NEXT: Lcfi15:
-; X32-AVX512VL-NEXT: .cfi_def_cfa_register %ebp
-; X32-AVX512VL-NEXT: andl $-32, %esp
-; X32-AVX512VL-NEXT: subl $128, %esp
-; X32-AVX512VL-NEXT: movl 8(%ebp), %eax
-; X32-AVX512VL-NEXT: vxorps %ymm0, %ymm0, %ymm0
-; X32-AVX512VL-NEXT: vmovaps %ymm0, (%esp)
-; X32-AVX512VL-NEXT: movl (%eax), %ecx
-; X32-AVX512VL-NEXT: movl 4(%eax), %eax
-; X32-AVX512VL-NEXT: vmovd %ecx, %xmm1
-; X32-AVX512VL-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; X32-AVX512VL-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
-; X32-AVX512VL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; X32-AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
-; X32-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%esp)
-; X32-AVX512VL-NEXT: movl %ebp, %esp
-; X32-AVX512VL-NEXT: popl %ebp
-; X32-AVX512VL-NEXT: retl
-;
; X64-AVX512VL-LABEL: isel_crash_4q:
; X64-AVX512VL: ## BB#0: ## %eintry
; X64-AVX512VL-NEXT: pushq %rbp
@@ -1695,9 +1556,10 @@ define void @isel_crash_4q(i64* %cV_R.addr) {
; X64-AVX512VL-NEXT: movq (%rdi), %rax
; X64-AVX512VL-NEXT: vpbroadcastq %rax, %ymm1
; X64-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
-; X64-AVX512VL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; X64-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: movq %rbp, %rsp
; X64-AVX512VL-NEXT: popq %rbp
+; X64-AVX512VL-NEXT: vzeroupper
; X64-AVX512VL-NEXT: retq
eintry:
%__a.addr.i = alloca <4 x i64>, align 16
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index d509046cccd5..45a1cd975038 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -407,7 +407,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -418,7 +418,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
@@ -499,7 +499,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -510,7 +510,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
@@ -617,7 +617,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
@@ -628,7 +628,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx512-adc-sbb.ll b/test/CodeGen/X86/avx512-adc-sbb.ll
new file mode 100644
index 000000000000..c994fdef6919
--- /dev/null
+++ b/test/CodeGen/X86/avx512-adc-sbb.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx512f %s -o - | FileCheck %s
+
+; This asserted because we didn't account for a zext of a non-SETCC node:
+; https://bugs.llvm.org/show_bug.cgi?id=32316
+
+define i8 @PR32316(i8 %t1, i32 %t5, i8 %t8) {
+; CHECK-LABEL: PR32316:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: cmpl %esi, %eax
+; CHECK-NEXT: seta %al
+; CHECK-NEXT: cmpb $1, %dl
+; CHECK-NEXT: sbbb $-1, %al
+; CHECK-NEXT: retq
+ %t2 = icmp eq i8 %t1, 0
+ %t3 = zext i1 %t2 to i32
+ %t6 = icmp ugt i32 %t3, %t5
+ %t7 = zext i1 %t6 to i8
+ %t9 = icmp ne i8 %t8, 0
+ %t10 = zext i1 %t9 to i8
+ %t11 = add i8 %t7, %t10
+ ret i8 %t11
+}
+
diff --git a/test/CodeGen/X86/avx512-any_extend_load.ll b/test/CodeGen/X86/avx512-any_extend_load.ll
index 87f8cc9a418e..f6ab0044ee80 100644
--- a/test/CodeGen/X86/avx512-any_extend_load.ll
+++ b/test/CodeGen/X86/avx512-any_extend_load.ll
@@ -4,12 +4,20 @@
define void @any_extend_load_v8i64(<8 x i8> * %ptr) {
-; ALL-LABEL: any_extend_load_v8i64:
-; ALL: # BB#0:
-; ALL-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
-; ALL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
-; ALL-NEXT: vpmovqb %zmm0, (%rdi)
-; ALL-NEXT: retq
+; KNL-LABEL: any_extend_load_v8i64:
+; KNL: # BB#0:
+; KNL-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
+; KNL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; KNL-NEXT: vpmovqb %zmm0, (%rdi)
+; KNL-NEXT: retq
+;
+; SKX-LABEL: any_extend_load_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
+; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; SKX-NEXT: vpmovqb %zmm0, (%rdi)
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
%wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
%1 = zext <8 x i8> %wide.load to <8 x i64>
%2 = add nuw nsw <8 x i64> %1, <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4>
@@ -33,6 +41,7 @@ define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; SKX-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: vpmovdb %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
%1 = zext <8 x i8> %wide.load to <8 x i32>
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index 5bb21ef5aa25..26be20840563 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -233,6 +233,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq128:
diff --git a/test/CodeGen/X86/avx512-bugfix-26264.ll b/test/CodeGen/X86/avx512-bugfix-26264.ll
index b15d28a649b3..b29b6ee0658d 100644
--- a/test/CodeGen/X86/avx512-bugfix-26264.ll
+++ b/test/CodeGen/X86/avx512-bugfix-26264.ll
@@ -6,17 +6,14 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
; AVX512BW: ## BB#0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
-; AVX512BW-NEXT: vmovupd (%rdi), %zmm1 {%k1}
+; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
-; AVX512BW-NEXT: vmovupd 128(%rdi), %zmm3 {%k2}
+; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k2}
; AVX512BW-NEXT: kshiftrw $8, %k1, %k1
-; AVX512BW-NEXT: vmovupd 64(%rdi), %zmm2 {%k1}
+; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: kshiftrw $8, %k2, %k1
-; AVX512BW-NEXT: vmovupd 192(%rdi), %zmm4 {%k1}
-; AVX512BW-NEXT: vmovapd %zmm1, %zmm0
-; AVX512BW-NEXT: vmovapd %zmm2, %zmm1
-; AVX512BW-NEXT: vmovapd %zmm3, %zmm2
-; AVX512BW-NEXT: vmovapd %zmm4, %zmm3
+; AVX512BW-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1}
+; AVX512BW-NEXT: vmovapd %zmm5, %zmm2
; AVX512BW-NEXT: retq
%res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0)
ret <32 x double> %res
@@ -27,17 +24,14 @@ define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64
; AVX512BW: ## BB#0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
-; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm1 {%k1}
+; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
-; AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm3 {%k2}
+; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm5 {%k2}
; AVX512BW-NEXT: kshiftrw $8, %k1, %k1
-; AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm2 {%k1}
+; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: kshiftrw $8, %k2, %k1
-; AVX512BW-NEXT: vmovdqu64 192(%rdi), %zmm4 {%k1}
-; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3
+; AVX512BW-NEXT: vpblendmq 192(%rdi), %zmm4, %zmm3 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2
; AVX512BW-NEXT: retq
%res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask, <32 x i64> %src0)
ret <32 x i64> %res
diff --git a/test/CodeGen/X86/avx512-calling-conv.ll b/test/CodeGen/X86/avx512-calling-conv.ll
index 1a91bc1dee9a..138b8750633c 100644
--- a/test/CodeGen/X86/avx512-calling-conv.ll
+++ b/test/CodeGen/X86/avx512-calling-conv.ll
@@ -140,6 +140,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: callq _func8xi1
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SKX-NEXT: vpslld $31, %ymm0, %ymm0
@@ -192,6 +193,7 @@ define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: callq _func16xi1
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
@@ -291,11 +293,12 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: callq _func8xi1
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: movb $85, %al
-; SKX-NEXT: kmovb %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: kandb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: popq %rax
diff --git a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index 9775c79796f7..63b0281a7339 100644
--- a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -19,6 +19,7 @@ define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float
; CHECK-NEXT: korw %k3, %k2, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
entry:
%0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %x, i32 13, i16 -1, i32 4)
diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll
index 78df51be5c3e..c1b64743f898 100644
--- a/test/CodeGen/X86/avx512-cmp.ll
+++ b/test/CodeGen/X86/avx512-cmp.ll
@@ -69,13 +69,14 @@ define float @test5(float %p) #0 {
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vucomiss %xmm1, %xmm0
; ALL-NEXT: jne LBB3_1
-; ALL-NEXT: jnp LBB3_2
+; ALL-NEXT: jp LBB3_1
+; ALL-NEXT: ## BB#2: ## %return
+; ALL-NEXT: retq
; ALL-NEXT: LBB3_1: ## %if.end
; ALL-NEXT: seta %al
; ALL-NEXT: movzbl %al, %eax
; ALL-NEXT: leaq {{.*}}(%rip), %rcx
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; ALL-NEXT: LBB3_2: ## %return
; ALL-NEXT: retq
entry:
%cmp = fcmp oeq float %p, 0.000000e+00
@@ -119,12 +120,12 @@ entry:
define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; ALL-LABEL: test8:
; ALL: ## BB#0:
+; ALL-NEXT: notl %edi
+; ALL-NEXT: xorl $-2147483648, %esi ## imm = 0x80000000
; ALL-NEXT: testl %edx, %edx
; ALL-NEXT: movl $1, %eax
; ALL-NEXT: cmovel %eax, %edx
-; ALL-NEXT: cmpl $-2147483648, %esi ## imm = 0x80000000
-; ALL-NEXT: cmovnel %edx, %eax
-; ALL-NEXT: cmpl $-1, %edi
+; ALL-NEXT: orl %edi, %esi
; ALL-NEXT: cmovnel %edx, %eax
; ALL-NEXT: retq
%tmp1 = icmp eq i32 %a1, -1
@@ -157,26 +158,47 @@ B:
}
define i32 @test10(i64 %b, i64 %c, i1 %d) {
-; ALL-LABEL: test10:
-; ALL: ## BB#0:
-; ALL-NEXT: andl $1, %edx
-; ALL-NEXT: kmovw %edx, %k0
-; ALL-NEXT: cmpq %rsi, %rdi
-; ALL-NEXT: sete %al
-; ALL-NEXT: andl $1, %eax
-; ALL-NEXT: kmovw %eax, %k1
-; ALL-NEXT: korw %k1, %k0, %k1
-; ALL-NEXT: kxorw %k1, %k0, %k0
-; ALL-NEXT: kmovw %k0, %eax
-; ALL-NEXT: andl $1, %eax
-; ALL-NEXT: testb %al, %al
-; ALL-NEXT: je LBB8_1
-; ALL-NEXT: ## BB#2: ## %if.end.i
-; ALL-NEXT: movl $6, %eax
-; ALL-NEXT: retq
-; ALL-NEXT: LBB8_1: ## %if.then.i
-; ALL-NEXT: movl $5, %eax
-; ALL-NEXT: retq
+; KNL-LABEL: test10:
+; KNL: ## BB#0:
+; KNL-NEXT: andl $1, %edx
+; KNL-NEXT: kmovw %edx, %k0
+; KNL-NEXT: cmpq %rsi, %rdi
+; KNL-NEXT: sete %al
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: korw %k1, %k0, %k1
+; KNL-NEXT: kxorw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: testb %al, %al
+; KNL-NEXT: je LBB8_1
+; KNL-NEXT: ## BB#2: ## %if.end.i
+; KNL-NEXT: movl $6, %eax
+; KNL-NEXT: retq
+; KNL-NEXT: LBB8_1: ## %if.then.i
+; KNL-NEXT: movl $5, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test10:
+; SKX: ## BB#0:
+; SKX-NEXT: andl $1, %edx
+; SKX-NEXT: kmovd %edx, %k0
+; SKX-NEXT: cmpq %rsi, %rdi
+; SKX-NEXT: sete %al
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: korw %k1, %k0, %k1
+; SKX-NEXT: kxorw %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: testb %al, %al
+; SKX-NEXT: je LBB8_1
+; SKX-NEXT: ## BB#2: ## %if.end.i
+; SKX-NEXT: movl $6, %eax
+; SKX-NEXT: retq
+; SKX-NEXT: LBB8_1: ## %if.then.i
+; SKX-NEXT: movl $5, %eax
+; SKX-NEXT: retq
%cmp8.i = icmp eq i64 %b, %c
%or1 = or i1 %d, %cmp8.i
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 87deeb9e16c0..2b55372f3066 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -1,11 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=KNL
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=ALL --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLBW --check-prefix=SKX
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLNOBW --check-prefix=AVX512VL
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=DQ --check-prefix=AVX512DQ
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=AVX512BW
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLNOBW --check-prefix=AVX512VLDQ
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLBW --check-prefix=AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLBW --check-prefix=SKX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLNOBW --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NOVL --check-prefix=DQ --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLNOBW --check-prefix=AVX512VLDQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLBW --check-prefix=AVX512VLBW
define <16 x float> @sitof32(<16 x i32> %a) nounwind {
@@ -110,40 +110,78 @@ define <2 x float> @sltof2f32(<2 x i64> %a) {
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x float>
ret <2 x float>%b
}
define <4 x float> @sltof4f32_mem(<4 x i64>* %a) {
-; NODQ-LABEL: sltof4f32_mem:
-; NODQ: ## BB#0:
-; NODQ-NEXT: vmovdqu (%rdi), %ymm0
-; NODQ-NEXT: vpextrq $1, %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
-; NODQ-NEXT: vmovq %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
-; NODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; NODQ-NEXT: vextracti128 $1, %ymm0, %xmm0
-; NODQ-NEXT: vmovq %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
-; NODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; NODQ-NEXT: vpextrq $1, %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
-; NODQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; NODQ-NEXT: retq
+; KNL-LABEL: sltof4f32_mem:
+; KNL: ## BB#0:
+; KNL-NEXT: vmovdqu (%rdi), %ymm0
+; KNL-NEXT: vpextrq $1, %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; KNL-NEXT: vpextrq $1, %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
+; KNL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; KNL-NEXT: retq
;
; VLDQ-LABEL: sltof4f32_mem:
; VLDQ: ## BB#0:
; VLDQ-NEXT: vcvtqq2psy (%rdi), %xmm0
; VLDQ-NEXT: retq
;
+; VLNODQ-LABEL: sltof4f32_mem:
+; VLNODQ: ## BB#0:
+; VLNODQ-NEXT: vmovdqu (%rdi), %ymm0
+; VLNODQ-NEXT: vpextrq $1, %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
+; VLNODQ-NEXT: vmovq %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; VLNODQ-NEXT: vextracti128 $1, %ymm0, %xmm0
+; VLNODQ-NEXT: vmovq %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; VLNODQ-NEXT: vpextrq $1, %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; VLNODQ-NEXT: vzeroupper
+; VLNODQ-NEXT: retq
+;
; AVX512DQ-LABEL: sltof4f32_mem:
; AVX512DQ: ## BB#0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: sltof4f32_mem:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vmovdqu (%rdi), %ymm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%a1 = load <4 x i64>, <4 x i64>* %a, align 8
%b = sitofp <4 x i64> %a1 to <4 x float>
ret <4 x float>%b
@@ -218,65 +256,137 @@ define <4 x i64> @f32tosl(<4 x float> %a) {
}
define <4 x float> @sltof432(<4 x i64> %a) {
-; NODQ-LABEL: sltof432:
-; NODQ: ## BB#0:
-; NODQ-NEXT: vpextrq $1, %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
-; NODQ-NEXT: vmovq %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
-; NODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; NODQ-NEXT: vextracti128 $1, %ymm0, %xmm0
-; NODQ-NEXT: vmovq %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
-; NODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; NODQ-NEXT: vpextrq $1, %xmm0, %rax
-; NODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
-; NODQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; NODQ-NEXT: retq
+; KNL-LABEL: sltof432:
+; KNL: ## BB#0:
+; KNL-NEXT: vpextrq $1, %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; KNL-NEXT: vpextrq $1, %xmm0, %rax
+; KNL-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
+; KNL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; KNL-NEXT: retq
;
; VLDQ-LABEL: sltof432:
; VLDQ: ## BB#0:
; VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
+; VLDQ-NEXT: vzeroupper
; VLDQ-NEXT: retq
;
+; VLNODQ-LABEL: sltof432:
+; VLNODQ: ## BB#0:
+; VLNODQ-NEXT: vpextrq $1, %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
+; VLNODQ-NEXT: vmovq %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; VLNODQ-NEXT: vextracti128 $1, %ymm0, %xmm0
+; VLNODQ-NEXT: vmovq %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; VLNODQ-NEXT: vpextrq $1, %xmm0, %rax
+; VLNODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; VLNODQ-NEXT: vzeroupper
+; VLNODQ-NEXT: retq
+;
; AVX512DQ-LABEL: sltof432:
; AVX512DQ: ## BB#0:
; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: sltof432:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512BW-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x float>
ret <4 x float> %b
}
define <4 x float> @ultof432(<4 x i64> %a) {
-; NODQ-LABEL: ultof432:
-; NODQ: ## BB#0:
-; NODQ-NEXT: vpextrq $1, %xmm0, %rax
-; NODQ-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
-; NODQ-NEXT: vmovq %xmm0, %rax
-; NODQ-NEXT: vcvtusi2ssq %rax, %xmm2, %xmm2
-; NODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; NODQ-NEXT: vextracti128 $1, %ymm0, %xmm0
-; NODQ-NEXT: vmovq %xmm0, %rax
-; NODQ-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm2
-; NODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; NODQ-NEXT: vpextrq $1, %xmm0, %rax
-; NODQ-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
-; NODQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; NODQ-NEXT: retq
+; KNL-LABEL: ultof432:
+; KNL: ## BB#0:
+; KNL-NEXT: vpextrq $1, %xmm0, %rax
+; KNL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: vcvtusi2ssq %rax, %xmm2, %xmm2
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vmovq %xmm0, %rax
+; KNL-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm2
+; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; KNL-NEXT: vpextrq $1, %xmm0, %rax
+; KNL-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
+; KNL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; KNL-NEXT: retq
;
; VLDQ-LABEL: ultof432:
; VLDQ: ## BB#0:
; VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
+; VLDQ-NEXT: vzeroupper
; VLDQ-NEXT: retq
;
+; VLNODQ-LABEL: ultof432:
+; VLNODQ: ## BB#0:
+; VLNODQ-NEXT: vpextrq $1, %xmm0, %rax
+; VLNODQ-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
+; VLNODQ-NEXT: vmovq %xmm0, %rax
+; VLNODQ-NEXT: vcvtusi2ssq %rax, %xmm2, %xmm2
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; VLNODQ-NEXT: vextracti128 $1, %ymm0, %xmm0
+; VLNODQ-NEXT: vmovq %xmm0, %rax
+; VLNODQ-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm2
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; VLNODQ-NEXT: vpextrq $1, %xmm0, %rax
+; VLNODQ-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
+; VLNODQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; VLNODQ-NEXT: vzeroupper
+; VLNODQ-NEXT: retq
+;
; AVX512DQ-LABEL: ultof432:
; AVX512DQ: ## BB#0:
; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: ultof432:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512BW-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512BW-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512BW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%b = uitofp <4 x i64> %a to <4 x float>
ret <4 x float> %b
}
@@ -355,17 +465,33 @@ define <8 x i32> @fptoui_256(<8 x float> %a) nounwind {
}
define <4 x i32> @fptoui_128(<4 x float> %a) nounwind {
-; NOVL-LABEL: fptoui_128:
-; NOVL: ## BB#0:
-; NOVL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; NOVL-NEXT: retq
+; KNL-LABEL: fptoui_128:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: vcvttps2udq %zmm0, %zmm0
+; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: retq
;
; VL-LABEL: fptoui_128:
; VL: ## BB#0:
; VL-NEXT: vcvttps2udq %xmm0, %xmm0
; VL-NEXT: retq
+;
+; AVX512DQ-LABEL: fptoui_128:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: fptoui_128:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%b = fptoui <4 x float> %a to <4 x i32>
ret <4 x i32> %b
}
@@ -380,17 +506,34 @@ define <8 x i32> @fptoui01(<8 x double> %a) nounwind {
}
define <4 x i32> @fptoui_256d(<4 x double> %a) nounwind {
-; NOVL-LABEL: fptoui_256d:
-; NOVL: ## BB#0:
-; NOVL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
-; NOVL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; NOVL-NEXT: retq
+; KNL-LABEL: fptoui_256d:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: vcvttpd2udq %zmm0, %ymm0
+; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: retq
;
; VL-LABEL: fptoui_256d:
; VL: ## BB#0:
; VL-NEXT: vcvttpd2udq %ymm0, %xmm0
+; VL-NEXT: vzeroupper
; VL-NEXT: retq
+;
+; AVX512DQ-LABEL: fptoui_256d:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: fptoui_256d:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vcvttpd2udq %zmm0, %ymm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%b = fptoui <4 x double> %a to <4 x i32>
ret <4 x i32> %b
}
@@ -404,34 +547,70 @@ define <8 x double> @sitof64(<8 x i32> %a) {
ret <8 x double> %b
}
define <8 x double> @sitof64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
-; NODQ-LABEL: sitof64_mask:
-; NODQ: ## BB#0:
-; NODQ-NEXT: kmovw %edi, %k1
-; NODQ-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
-; NODQ-NEXT: retq
+; KNL-LABEL: sitof64_mask:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
+; KNL-NEXT: retq
;
-; DQ-LABEL: sitof64_mask:
-; DQ: ## BB#0:
-; DQ-NEXT: kmovb %edi, %k1
-; DQ-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
-; DQ-NEXT: retq
+; VLBW-LABEL: sitof64_mask:
+; VLBW: ## BB#0:
+; VLBW-NEXT: kmovd %edi, %k1
+; VLBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
+; VLBW-NEXT: retq
+;
+; VLNOBW-LABEL: sitof64_mask:
+; VLNOBW: ## BB#0:
+; VLNOBW-NEXT: kmovw %edi, %k1
+; VLNOBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
+; VLNOBW-NEXT: retq
+;
+; AVX512DQ-LABEL: sitof64_mask:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k1
+; AVX512DQ-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: sitof64_mask:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
+; AVX512BW-NEXT: retq
%1 = bitcast i8 %c to <8 x i1>
%2 = sitofp <8 x i32> %b to <8 x double>
%3 = select <8 x i1> %1, <8 x double> %2, <8 x double> %a
ret <8 x double> %3
}
define <8 x double> @sitof64_maskz(<8 x i32> %a, i8 %b) nounwind {
-; NODQ-LABEL: sitof64_maskz:
-; NODQ: ## BB#0:
-; NODQ-NEXT: kmovw %edi, %k1
-; NODQ-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
-; NODQ-NEXT: retq
+; KNL-LABEL: sitof64_maskz:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; KNL-NEXT: retq
;
-; DQ-LABEL: sitof64_maskz:
-; DQ: ## BB#0:
-; DQ-NEXT: kmovb %edi, %k1
-; DQ-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
-; DQ-NEXT: retq
+; VLBW-LABEL: sitof64_maskz:
+; VLBW: ## BB#0:
+; VLBW-NEXT: kmovd %edi, %k1
+; VLBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; VLBW-NEXT: retq
+;
+; VLNOBW-LABEL: sitof64_maskz:
+; VLNOBW: ## BB#0:
+; VLNOBW-NEXT: kmovw %edi, %k1
+; VLNOBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; VLNOBW-NEXT: retq
+;
+; AVX512DQ-LABEL: sitof64_maskz:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k1
+; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: sitof64_maskz:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
%1 = bitcast i8 %b to <8 x i1>
%2 = sitofp <8 x i32> %a to <8 x double>
%3 = select <8 x i1> %1, <8 x double> %2, <8 x double> zeroinitializer
@@ -448,10 +627,16 @@ define <8 x i32> @fptosi01(<8 x double> %a) {
}
define <4 x i32> @fptosi03(<4 x double> %a) {
-; ALL-LABEL: fptosi03:
-; ALL: ## BB#0:
-; ALL-NEXT: vcvttpd2dq %ymm0, %xmm0
-; ALL-NEXT: retq
+; KNL-LABEL: fptosi03:
+; KNL: ## BB#0:
+; KNL-NEXT: vcvttpd2dq %ymm0, %xmm0
+; KNL-NEXT: retq
+;
+; AVX512-LABEL: fptosi03:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vcvttpd2dq %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%b = fptosi <4 x double> %a to <4 x i32>
ret <4 x i32> %b
}
@@ -475,29 +660,54 @@ define <16 x float> @fptrunc00(<16 x double> %b) nounwind {
}
define <4 x float> @fptrunc01(<4 x double> %b) {
-; ALL-LABEL: fptrunc01:
-; ALL: ## BB#0:
-; ALL-NEXT: vcvtpd2ps %ymm0, %xmm0
-; ALL-NEXT: retq
+; KNL-LABEL: fptrunc01:
+; KNL: ## BB#0:
+; KNL-NEXT: vcvtpd2ps %ymm0, %xmm0
+; KNL-NEXT: retq
+;
+; AVX512-LABEL: fptrunc01:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vcvtpd2ps %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%a = fptrunc <4 x double> %b to <4 x float>
ret <4 x float> %a
}
define <4 x float> @fptrunc02(<4 x double> %b, <4 x i1> %mask) {
-; NOVL-LABEL: fptrunc02:
-; NOVL: ## BB#0:
-; NOVL-NEXT: vpslld $31, %xmm1, %xmm1
-; NOVL-NEXT: vpsrad $31, %xmm1, %xmm1
-; NOVL-NEXT: vcvtpd2ps %ymm0, %xmm0
-; NOVL-NEXT: vpand %xmm0, %xmm1, %xmm0
-; NOVL-NEXT: retq
+; KNL-LABEL: fptrunc02:
+; KNL: ## BB#0:
+; KNL-NEXT: vpslld $31, %xmm1, %xmm1
+; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
+; KNL-NEXT: vcvtpd2ps %ymm0, %xmm0
+; KNL-NEXT: vpand %xmm0, %xmm1, %xmm0
+; KNL-NEXT: retq
;
; VL-LABEL: fptrunc02:
; VL: ## BB#0:
; VL-NEXT: vpslld $31, %xmm1, %xmm1
; VL-NEXT: vptestmd %xmm1, %xmm1, %k1
; VL-NEXT: vcvtpd2ps %ymm0, %xmm0 {%k1} {z}
+; VL-NEXT: vzeroupper
; VL-NEXT: retq
+;
+; AVX512DQ-LABEL: fptrunc02:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX512DQ-NEXT: vcvtpd2ps %ymm0, %xmm0
+; AVX512DQ-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: fptrunc02:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX512BW-NEXT: vcvtpd2ps %ymm0, %xmm0
+; AVX512BW-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%a = fptrunc <4 x double> %b to <4 x float>
%c = select <4 x i1>%mask, <4 x float>%a, <4 x float> zeroinitializer
ret <4 x float> %c
@@ -685,34 +895,70 @@ define <16 x double> @uitof64(<16 x i32> %a) nounwind {
ret <16 x double> %b
}
define <8 x double> @uitof64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
-; NODQ-LABEL: uitof64_mask:
-; NODQ: ## BB#0:
-; NODQ-NEXT: kmovw %edi, %k1
-; NODQ-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
-; NODQ-NEXT: retq
+; KNL-LABEL: uitof64_mask:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
+; KNL-NEXT: retq
;
-; DQ-LABEL: uitof64_mask:
-; DQ: ## BB#0:
-; DQ-NEXT: kmovb %edi, %k1
-; DQ-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
-; DQ-NEXT: retq
+; VLBW-LABEL: uitof64_mask:
+; VLBW: ## BB#0:
+; VLBW-NEXT: kmovd %edi, %k1
+; VLBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
+; VLBW-NEXT: retq
+;
+; VLNOBW-LABEL: uitof64_mask:
+; VLNOBW: ## BB#0:
+; VLNOBW-NEXT: kmovw %edi, %k1
+; VLNOBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
+; VLNOBW-NEXT: retq
+;
+; AVX512DQ-LABEL: uitof64_mask:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k1
+; AVX512DQ-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: uitof64_mask:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
+; AVX512BW-NEXT: retq
%1 = bitcast i8 %c to <8 x i1>
%2 = uitofp <8 x i32> %b to <8 x double>
%3 = select <8 x i1> %1, <8 x double> %2, <8 x double> %a
ret <8 x double> %3
}
define <8 x double> @uitof64_maskz(<8 x i32> %a, i8 %b) nounwind {
-; NODQ-LABEL: uitof64_maskz:
-; NODQ: ## BB#0:
-; NODQ-NEXT: kmovw %edi, %k1
-; NODQ-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
-; NODQ-NEXT: retq
+; KNL-LABEL: uitof64_maskz:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; KNL-NEXT: retq
;
-; DQ-LABEL: uitof64_maskz:
-; DQ: ## BB#0:
-; DQ-NEXT: kmovb %edi, %k1
-; DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
-; DQ-NEXT: retq
+; VLBW-LABEL: uitof64_maskz:
+; VLBW: ## BB#0:
+; VLBW-NEXT: kmovd %edi, %k1
+; VLBW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; VLBW-NEXT: retq
+;
+; VLNOBW-LABEL: uitof64_maskz:
+; VLNOBW: ## BB#0:
+; VLNOBW-NEXT: kmovw %edi, %k1
+; VLNOBW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; VLNOBW-NEXT: retq
+;
+; AVX512DQ-LABEL: uitof64_maskz:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k1
+; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: uitof64_maskz:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
%1 = bitcast i8 %b to <8 x i1>
%2 = uitofp <8 x i32> %a to <8 x double>
%3 = select <8 x i1> %1, <8 x double> %2, <8 x double> zeroinitializer
@@ -761,17 +1007,33 @@ define <8 x float> @uitof32_256(<8 x i32> %a) nounwind {
}
define <4 x float> @uitof32_128(<4 x i32> %a) nounwind {
-; NOVL-LABEL: uitof32_128:
-; NOVL: ## BB#0:
-; NOVL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; NOVL-NEXT: retq
+; KNL-LABEL: uitof32_128:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: vcvtudq2ps %zmm0, %zmm0
+; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: retq
;
; VL-LABEL: uitof32_128:
; VL: ## BB#0:
; VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; VL-NEXT: retq
+;
+; AVX512DQ-LABEL: uitof32_128:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: uitof32_128:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vcvtudq2ps %zmm0, %zmm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%b = uitofp <4 x i32> %a to <4 x float>
ret <4 x float> %b
}
@@ -917,11 +1179,9 @@ define <16 x double> @sitofp_16i1_double(<16 x double> %a) {
; AVX512DQ-NEXT: vxorpd %zmm2, %zmm2, %zmm2
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm2, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm2, %k1
-; AVX512DQ-NEXT: vpmovm2q %k1, %zmm0
-; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
-; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
; AVX512DQ-NEXT: vcvtdq2pd %ymm1, %zmm1
; AVX512DQ-NEXT: retq
%cmpres = fcmp ogt <16 x double> %a, zeroinitializer
@@ -960,8 +1220,7 @@ define <8 x double> @sitofp_8i1_double(<8 x double> %a) {
; AVX512DQ: ## BB#0:
; AVX512DQ-NEXT: vxorpd %zmm1, %zmm1, %zmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: retq
%cmpres = fcmp ogt <8 x double> %a, zeroinitializer
@@ -1002,8 +1261,7 @@ define <8 x float> @sitofp_8i1_float(<8 x float> %a) {
; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vxorps %ymm1, %ymm1, %ymm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512DQ-NEXT: retq
%cmpres = fcmp ogt <8 x float> %a, zeroinitializer
@@ -1075,7 +1333,6 @@ define <2 x float> @sitofp_2i1_float(<2 x float> %a) {
; NOVL: ## BB#0:
; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
-; NOVL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
; NOVL-NEXT: vcvtdq2ps %xmm0, %xmm0
; NOVL-NEXT: retq
;
diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll
index 03d6127ae5dc..796ee83b6fa7 100644
--- a/test/CodeGen/X86/avx512-ext.ll
+++ b/test/CodeGen/X86/avx512-ext.ll
@@ -491,8 +491,7 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind re
; KNL-LABEL: zext_2x8mem_to_2x64:
; KNL: ## BB#0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
@@ -512,8 +511,7 @@ define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwin
; KNL-LABEL: sext_2x8mem_to_2x64mask:
; KNL: ## BB#0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxbq (%rdi), %xmm1
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
@@ -872,8 +870,7 @@ define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind
; KNL-LABEL: zext_2x16mem_to_2x64:
; KNL: ## BB#0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
@@ -894,8 +891,7 @@ define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounw
; KNL-LABEL: sext_2x16mem_to_2x64mask:
; KNL: ## BB#0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxwq (%rdi), %xmm1
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
@@ -1061,8 +1057,7 @@ define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind
; KNL-LABEL: zext_2x32mem_to_2x64:
; KNL: ## BB#0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
@@ -1083,8 +1078,7 @@ define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounw
; KNL-LABEL: sext_2x32mem_to_2x64mask:
; KNL: ## BB#0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxdq (%rdi), %xmm1
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
@@ -1294,11 +1288,17 @@ define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
}
define <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
-; ALL-LABEL: zext_16i1_to_16xi32:
-; ALL: ## BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; ALL-NEXT: retq
+; KNL-LABEL: zext_16i1_to_16xi32:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; KNL-NEXT: retq
+;
+; SKX-LABEL: zext_16i1_to_16xi32:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; SKX-NEXT: retq
%a = bitcast i16 %b to <16 x i1>
%c = zext <16 x i1> %a to <16 x i32>
ret <16 x i32> %c
@@ -1313,7 +1313,7 @@ define <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
;
; SKX-LABEL: zext_8i1_to_8xi64:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
%a = bitcast i8 %b to <8 x i1>
@@ -1328,13 +1328,15 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
; SKX: ## BB#0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -1342,12 +1344,22 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
}
define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
-; ALL-LABEL: trunc_16i32_to_16i1:
-; ALL: ## BB#0:
-; ALL-NEXT: vpslld $31, %zmm0, %zmm0
-; ALL-NEXT: vptestmd %zmm0, %zmm0, %k0
-; ALL-NEXT: kmovw %k0, %eax
-; ALL-NEXT: retq
+; KNL-LABEL: trunc_16i32_to_16i1:
+; KNL: ## BB#0:
+; KNL-NEXT: vpslld $31, %zmm0, %zmm0
+; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: trunc_16i32_to_16i1:
+; SKX: ## BB#0:
+; SKX-NEXT: vpslld $31, %zmm0, %zmm0
+; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
%mask_b = trunc <16 x i32>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
ret i16 %mask
@@ -1384,13 +1396,15 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
; SKX: ## BB#0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
-; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -1418,17 +1432,31 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define i16 @trunc_i32_to_i1(i32 %a) {
-; ALL-LABEL: trunc_i32_to_i1:
-; ALL: ## BB#0:
-; ALL-NEXT: andl $1, %edi
-; ALL-NEXT: kmovw %edi, %k0
-; ALL-NEXT: movw $-4, %ax
-; ALL-NEXT: kmovw %eax, %k1
-; ALL-NEXT: kshiftrw $1, %k1, %k1
-; ALL-NEXT: kshiftlw $1, %k1, %k1
-; ALL-NEXT: korw %k0, %k1, %k0
-; ALL-NEXT: kmovw %k0, %eax
-; ALL-NEXT: retq
+; KNL-LABEL: trunc_i32_to_i1:
+; KNL: ## BB#0:
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: movw $-4, %ax
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kshiftrw $1, %k1, %k1
+; KNL-NEXT: kshiftlw $1, %k1, %k1
+; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: trunc_i32_to_i1:
+; SKX: ## BB#0:
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: movw $-4, %ax
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: kshiftrw $1, %k1, %k1
+; SKX-NEXT: kshiftlw $1, %k1, %k1
+; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: retq
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
%res = bitcast <16 x i1> %maskv to i16
@@ -1447,6 +1475,7 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; SKX: ## BB#0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = icmp slt <8 x i32> %a1, %a2
%y = sext <8 x i1> %x to <8 x i16>
@@ -1488,11 +1517,18 @@ define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
}
define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
-; ALL-LABEL: extload_v8i64:
-; ALL: ## BB#0:
-; ALL-NEXT: vpmovsxbq (%rdi), %zmm0
-; ALL-NEXT: vmovdqa64 %zmm0, (%rsi)
-; ALL-NEXT: retq
+; KNL-LABEL: extload_v8i64:
+; KNL: ## BB#0:
+; KNL-NEXT: vpmovsxbq (%rdi), %zmm0
+; KNL-NEXT: vmovdqa64 %zmm0, (%rsi)
+; KNL-NEXT: retq
+;
+; SKX-LABEL: extload_v8i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vpmovsxbq (%rdi), %zmm0
+; SKX-NEXT: vmovdqa64 %zmm0, (%rsi)
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
%sign_load = load <8 x i8>, <8 x i8>* %a
%c = sext <8 x i8> %sign_load to <8 x i64>
store <8 x i64> %c, <8 x i64>* %res
@@ -1502,301 +1538,22 @@ define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
; KNL-LABEL: test21:
; KNL: ## BB#0:
-; KNL-NEXT: pushq %rbp
-; KNL-NEXT: pushq %r15
-; KNL-NEXT: pushq %r14
-; KNL-NEXT: pushq %r13
-; KNL-NEXT: pushq %r12
-; KNL-NEXT: pushq %rbx
-; KNL-NEXT: vpmovsxbd %xmm7, %zmm7
-; KNL-NEXT: vpslld $31, %zmm7, %zmm7
-; KNL-NEXT: vpmovsxbd %xmm6, %zmm6
-; KNL-NEXT: vpslld $31, %zmm6, %zmm6
-; KNL-NEXT: vpmovsxbd %xmm5, %zmm5
-; KNL-NEXT: vpslld $31, %zmm5, %zmm5
-; KNL-NEXT: vpmovsxbd %xmm4, %zmm4
-; KNL-NEXT: vpslld $31, %zmm4, %zmm4
-; KNL-NEXT: vptestmd %zmm4, %zmm4, %k0
-; KNL-NEXT: kshiftlw $14, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: kshiftlw $15, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r13d
-; KNL-NEXT: kshiftlw $13, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %ecx
-; KNL-NEXT: kshiftlw $12, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r11d
-; KNL-NEXT: kshiftlw $11, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r8d
-; KNL-NEXT: kshiftlw $10, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %edi
-; KNL-NEXT: kshiftlw $9, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %edx
-; KNL-NEXT: kshiftlw $8, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %esi
-; KNL-NEXT: kshiftlw $7, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %ebx
-; KNL-NEXT: kshiftlw $6, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %ebp
-; KNL-NEXT: kshiftlw $5, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r14d
-; KNL-NEXT: kshiftlw $4, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r15d
-; KNL-NEXT: kshiftlw $3, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r12d
-; KNL-NEXT: kshiftlw $2, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r10d
-; KNL-NEXT: kshiftlw $1, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %r9d
-; KNL-NEXT: vptestmd %zmm5, %zmm5, %k1
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vmovd %r13d, %xmm4
-; KNL-NEXT: kmovw %k0, %r13d
-; KNL-NEXT: kshiftlw $14, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: kshiftlw $15, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $2, %ecx, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %ecx
-; KNL-NEXT: kshiftlw $13, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $3, %r11d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r11d
-; KNL-NEXT: kshiftlw $12, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $4, %r8d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r8d
-; KNL-NEXT: kshiftlw $11, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $5, %edi, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %edi
-; KNL-NEXT: movl %edi, -{{[0-9]+}}(%rsp) ## 4-byte Spill
-; KNL-NEXT: kshiftlw $10, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $6, %edx, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %edx
-; KNL-NEXT: kshiftlw $9, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $7, %esi, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %esi
-; KNL-NEXT: kshiftlw $8, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $8, %ebx, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %ebx
-; KNL-NEXT: kshiftlw $7, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $9, %ebp, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %ebp
-; KNL-NEXT: kshiftlw $6, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $10, %r14d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r14d
-; KNL-NEXT: kshiftlw $5, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $11, %r15d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r15d
-; KNL-NEXT: kshiftlw $4, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $12, %r12d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %edi
-; KNL-NEXT: kshiftlw $3, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $13, %r10d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r10d
-; KNL-NEXT: kshiftlw $2, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $14, %r9d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r9d
-; KNL-NEXT: kshiftlw $1, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $15, %r13d, %xmm4, %xmm4
-; KNL-NEXT: kmovw %k0, %r12d
-; KNL-NEXT: vptestmd %zmm6, %zmm6, %k0
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vmovd %ecx, %xmm5
-; KNL-NEXT: kmovw %k1, %r13d
-; KNL-NEXT: kshiftlw $14, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $1, %eax, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ## 4-byte Spill
-; KNL-NEXT: kshiftlw $15, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $2, %r11d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: kshiftlw $13, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $3, %r8d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %ecx
-; KNL-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) ## 4-byte Spill
-; KNL-NEXT: kshiftlw $12, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $4, -{{[0-9]+}}(%rsp), %xmm5, %xmm5 ## 4-byte Folded Reload
-; KNL-NEXT: kmovw %k1, %ecx
-; KNL-NEXT: kshiftlw $11, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $5, %edx, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %r8d
-; KNL-NEXT: kshiftlw $10, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $6, %esi, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %edx
-; KNL-NEXT: kshiftlw $9, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $7, %ebx, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %esi
-; KNL-NEXT: kshiftlw $8, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $8, %ebp, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %ebp
-; KNL-NEXT: kshiftlw $7, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $9, %r14d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %ebx
-; KNL-NEXT: kshiftlw $6, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $10, %r15d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %r11d
-; KNL-NEXT: kshiftlw $5, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $11, %edi, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %edi
-; KNL-NEXT: kshiftlw $4, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $12, %r10d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %r10d
-; KNL-NEXT: kshiftlw $3, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $13, %r9d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %r9d
-; KNL-NEXT: kshiftlw $2, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $14, %r12d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %r14d
-; KNL-NEXT: kshiftlw $1, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: vpinsrb $15, %r13d, %xmm5, %xmm5
-; KNL-NEXT: kmovw %k1, %r15d
-; KNL-NEXT: vptestmd %zmm7, %zmm7, %k1
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vmovd %eax, %xmm6
-; KNL-NEXT: kmovw %k0, %r12d
-; KNL-NEXT: kshiftlw $14, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $1, -{{[0-9]+}}(%rsp), %xmm6, %xmm6 ## 4-byte Folded Reload
-; KNL-NEXT: kmovw %k0, %r13d
-; KNL-NEXT: kshiftlw $15, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $2, -{{[0-9]+}}(%rsp), %xmm6, %xmm6 ## 4-byte Folded Reload
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: kshiftlw $13, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $3, %ecx, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %ecx
-; KNL-NEXT: kshiftlw $12, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $4, %r8d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r8d
-; KNL-NEXT: kshiftlw $11, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $5, %edx, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %edx
-; KNL-NEXT: kshiftlw $10, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $6, %esi, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %esi
-; KNL-NEXT: kshiftlw $9, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $7, %ebp, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %ebp
-; KNL-NEXT: kshiftlw $8, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $8, %ebx, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %ebx
-; KNL-NEXT: kshiftlw $7, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $9, %r11d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r11d
-; KNL-NEXT: kshiftlw $6, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $10, %edi, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %edi
-; KNL-NEXT: kshiftlw $5, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $11, %r10d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r10d
-; KNL-NEXT: kshiftlw $4, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $12, %r9d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r9d
-; KNL-NEXT: kshiftlw $3, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $13, %r14d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r14d
-; KNL-NEXT: kshiftlw $2, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $14, %r15d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r15d
-; KNL-NEXT: kshiftlw $1, %k1, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: vpinsrb $15, %r12d, %xmm6, %xmm6
-; KNL-NEXT: kmovw %k0, %r12d
-; KNL-NEXT: kshiftrw $15, %k1, %k0
-; KNL-NEXT: vmovd %eax, %xmm7
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: vpinsrb $1, %r13d, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $2, %ecx, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $3, %r8d, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $4, %edx, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $5, %esi, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $6, %ebp, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $7, %ebx, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $8, %r11d, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $9, %edi, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $10, %r10d, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $11, %r9d, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $12, %r14d, %xmm7, %xmm7
-; KNL-NEXT: vpinsrb $13, %r15d, %xmm7, %xmm7
+; KNL-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero,xmm7[8],zero,xmm7[9],zero,xmm7[10],zero,xmm7[11],zero,xmm7[12],zero,xmm7[13],zero,xmm7[14],zero,xmm7[15],zero
+; KNL-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
+; KNL-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
; KNL-NEXT: vpsllw $15, %ymm4, %ymm4
; KNL-NEXT: vpsraw $15, %ymm4, %ymm4
; KNL-NEXT: vpand %ymm0, %ymm4, %ymm0
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
-; KNL-NEXT: vpsllw $15, %ymm4, %ymm4
+; KNL-NEXT: vpsllw $15, %ymm5, %ymm4
; KNL-NEXT: vpsraw $15, %ymm4, %ymm4
; KNL-NEXT: vpand %ymm1, %ymm4, %ymm1
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
-; KNL-NEXT: vpsllw $15, %ymm4, %ymm4
+; KNL-NEXT: vpsllw $15, %ymm6, %ymm4
; KNL-NEXT: vpsraw $15, %ymm4, %ymm4
; KNL-NEXT: vpand %ymm2, %ymm4, %ymm2
-; KNL-NEXT: vpinsrb $14, %r12d, %xmm7, %xmm4
-; KNL-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
-; KNL-NEXT: vpsllw $15, %ymm4, %ymm4
+; KNL-NEXT: vpsllw $15, %ymm7, %ymm4
; KNL-NEXT: vpsraw $15, %ymm4, %ymm4
; KNL-NEXT: vpand %ymm3, %ymm4, %ymm3
-; KNL-NEXT: popq %rbx
-; KNL-NEXT: popq %r12
-; KNL-NEXT: popq %r13
-; KNL-NEXT: popq %r14
-; KNL-NEXT: popq %r15
-; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
;
; SKX-LABEL: test21:
diff --git a/test/CodeGen/X86/avx512-extract-subvector.ll b/test/CodeGen/X86/avx512-extract-subvector.ll
index 391bf6ba4554..2d0a81046b4e 100644
--- a/test/CodeGen/X86/avx512-extract-subvector.ll
+++ b/test/CodeGen/X86/avx512-extract-subvector.ll
@@ -6,6 +6,7 @@ define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16:
; SKX: ## BB#0:
; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
ret <8 x i16> %r1
@@ -15,6 +16,7 @@ define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounw
; SKX-LABEL: extract_subvector128_v32i16_first_element:
; SKX: ## BB#0:
; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %r1
@@ -24,6 +26,7 @@ define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8:
; SKX: ## BB#0:
; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
ret <16 x i8> %r1
@@ -33,6 +36,7 @@ define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwin
; SKX-LABEL: extract_subvector128_v64i8_first_element:
; SKX: ## BB#0:
; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i8> %r1
@@ -61,6 +65,7 @@ define void @extract_subvector256_v8f64_store(double* nocapture %addr, <4 x doub
; SKX-LABEL: extract_subvector256_v8f64_store:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 2, i32 3>
@@ -73,6 +78,7 @@ define void @extract_subvector256_v8f32_store(float* nocapture %addr, <8 x float
; SKX-LABEL: extract_subvector256_v8f32_store:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -85,6 +91,7 @@ define void @extract_subvector256_v4i64_store(i64* nocapture %addr, <4 x i64> %a
; SKX-LABEL: extract_subvector256_v4i64_store:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vextracti128 $1, %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
@@ -97,6 +104,7 @@ define void @extract_subvector256_v8i32_store(i32* nocapture %addr, <8 x i32> %a
; SKX-LABEL: extract_subvector256_v8i32_store:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vextracti128 $1, %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -109,6 +117,7 @@ define void @extract_subvector256_v16i16_store(i16* nocapture %addr, <16 x i16>
; SKX-LABEL: extract_subvector256_v16i16_store:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vextracti128 $1, %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -121,6 +130,7 @@ define void @extract_subvector256_v32i8_store(i8* nocapture %addr, <32 x i8> %a)
; SKX-LABEL: extract_subvector256_v32i8_store:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vextracti128 $1, %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i8> %a, <32 x i8> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -133,6 +143,7 @@ define void @extract_subvector256_v4f64_store_lo(double* nocapture %addr, <4 x d
; SKX-LABEL: extract_subvector256_v4f64_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -145,6 +156,7 @@ define void @extract_subvector256_v4f64_store_lo_align_16(double* nocapture %add
; SKX-LABEL: extract_subvector256_v4f64_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -157,6 +169,7 @@ define void @extract_subvector256_v4f32_store_lo(float* nocapture %addr, <8 x fl
; SKX-LABEL: extract_subvector256_v4f32_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -169,6 +182,7 @@ define void @extract_subvector256_v4f32_store_lo_align_16(float* nocapture %addr
; SKX-LABEL: extract_subvector256_v4f32_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -181,6 +195,7 @@ define void @extract_subvector256_v2i64_store_lo(i64* nocapture %addr, <4 x i64>
; SKX-LABEL: extract_subvector256_v2i64_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -193,6 +208,7 @@ define void @extract_subvector256_v2i64_store_lo_align_16(i64* nocapture %addr,
; SKX-LABEL: extract_subvector256_v2i64_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -205,6 +221,7 @@ define void @extract_subvector256_v4i32_store_lo(i32* nocapture %addr, <8 x i32>
; SKX-LABEL: extract_subvector256_v4i32_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -217,6 +234,7 @@ define void @extract_subvector256_v4i32_store_lo_align_16(i32* nocapture %addr,
; SKX-LABEL: extract_subvector256_v4i32_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -229,6 +247,7 @@ define void @extract_subvector256_v8i16_store_lo(i16* nocapture %addr, <16 x i16
; SKX-LABEL: extract_subvector256_v8i16_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -241,6 +260,7 @@ define void @extract_subvector256_v8i16_store_lo_align_16(i16* nocapture %addr,
; SKX-LABEL: extract_subvector256_v8i16_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -253,6 +273,7 @@ define void @extract_subvector256_v16i8_store_lo(i8* nocapture %addr, <32 x i8>
; SKX-LABEL: extract_subvector256_v16i8_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i8> %a, <32 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -265,6 +286,7 @@ define void @extract_subvector256_v16i8_store_lo_align_16(i8* nocapture %addr, <
; SKX-LABEL: extract_subvector256_v16i8_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i8> %a, <32 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -277,6 +299,7 @@ define void @extract_subvector512_v2f64_store_lo(double* nocapture %addr, <8 x d
; SKX-LABEL: extract_subvector512_v2f64_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -289,6 +312,7 @@ define void @extract_subvector512_v2f64_store_lo_align_16(double* nocapture %add
; SKX-LABEL: extract_subvector512_v2f64_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -301,6 +325,7 @@ define void @extract_subvector512_v4f32_store_lo(float* nocapture %addr, <16 x f
; SKX-LABEL: extract_subvector512_v4f32_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -313,6 +338,7 @@ define void @extract_subvector512_v4f32_store_lo_align_16(float* nocapture %addr
; SKX-LABEL: extract_subvector512_v4f32_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -325,6 +351,7 @@ define void @extract_subvector512_v2i64_store_lo(i64* nocapture %addr, <8 x i64>
; SKX-LABEL: extract_subvector512_v2i64_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -337,6 +364,7 @@ define void @extract_subvector512_v2i64_store_lo_align_16(i64* nocapture %addr,
; SKX-LABEL: extract_subvector512_v2i64_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -349,6 +377,7 @@ define void @extract_subvector512_v4i32_store_lo(i32* nocapture %addr, <16 x i32
; SKX-LABEL: extract_subvector512_v4i32_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -361,6 +390,7 @@ define void @extract_subvector512_v4i32_store_lo_align_16(i32* nocapture %addr,
; SKX-LABEL: extract_subvector512_v4i32_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -373,6 +403,7 @@ define void @extract_subvector512_v8i16_store_lo(i16* nocapture %addr, <32 x i16
; SKX-LABEL: extract_subvector512_v8i16_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i16> %a, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -385,6 +416,7 @@ define void @extract_subvector512_v16i8_store_lo(i8* nocapture %addr, <64 x i8>
; SKX-LABEL: extract_subvector512_v16i8_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <64 x i8> %a, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -397,6 +429,7 @@ define void @extract_subvector512_v16i8_store_lo_align_16(i8* nocapture %addr, <
; SKX-LABEL: extract_subvector512_v16i8_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <64 x i8> %a, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -409,6 +442,7 @@ define void @extract_subvector512_v4f64_store_lo(double* nocapture %addr, <8 x d
; SKX-LABEL: extract_subvector512_v4f64_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -421,6 +455,7 @@ define void @extract_subvector512_v4f64_store_lo_align_16(double* nocapture %add
; SKX-LABEL: extract_subvector512_v4f64_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -433,6 +468,7 @@ define void @extract_subvector512_v4f64_store_lo_align_32(double* nocapture %add
; SKX-LABEL: extract_subvector512_v4f64_store_lo_align_32:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -445,6 +481,7 @@ define void @extract_subvector512_v8f32_store_lo(float* nocapture %addr, <16 x f
; SKX-LABEL: extract_subvector512_v8f32_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -457,6 +494,7 @@ define void @extract_subvector512_v8f32_store_lo_align_16(float* nocapture %addr
; SKX-LABEL: extract_subvector512_v8f32_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -469,6 +507,7 @@ define void @extract_subvector512_v8f32_store_lo_align_32(float* nocapture %addr
; SKX-LABEL: extract_subvector512_v8f32_store_lo_align_32:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -481,6 +520,7 @@ define void @extract_subvector512_v4i64_store_lo(i64* nocapture %addr, <8 x i64>
; SKX-LABEL: extract_subvector512_v4i64_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -493,6 +533,7 @@ define void @extract_subvector512_v4i64_store_lo_align_16(i64* nocapture %addr,
; SKX-LABEL: extract_subvector512_v4i64_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -505,6 +546,7 @@ define void @extract_subvector512_v4i64_store_lo_align_32(i64* nocapture %addr,
; SKX-LABEL: extract_subvector512_v4i64_store_lo_align_32:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -517,6 +559,7 @@ define void @extract_subvector512_v8i32_store_lo(i32* nocapture %addr, <16 x i32
; SKX-LABEL: extract_subvector512_v8i32_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -529,6 +572,7 @@ define void @extract_subvector512_v8i32_store_lo_align_16(i32* nocapture %addr,
; SKX-LABEL: extract_subvector512_v8i32_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -541,6 +585,7 @@ define void @extract_subvector512_v8i32_store_lo_align_32(i32* nocapture %addr,
; SKX-LABEL: extract_subvector512_v8i32_store_lo_align_32:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -553,6 +598,7 @@ define void @extract_subvector512_v16i16_store_lo(i16* nocapture %addr, <32 x i1
; SKX-LABEL: extract_subvector512_v16i16_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i16> %a, <32 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -565,6 +611,7 @@ define void @extract_subvector512_v16i16_store_lo_align_16(i16* nocapture %addr,
; SKX-LABEL: extract_subvector512_v16i16_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i16> %a, <32 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -577,6 +624,7 @@ define void @extract_subvector512_v16i16_store_lo_align_32(i16* nocapture %addr,
; SKX-LABEL: extract_subvector512_v16i16_store_lo_align_32:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <32 x i16> %a, <32 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -589,6 +637,7 @@ define void @extract_subvector512_v32i8_store_lo(i8* nocapture %addr, <64 x i8>
; SKX-LABEL: extract_subvector512_v32i8_store_lo:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <64 x i8> %a, <64 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -601,6 +650,7 @@ define void @extract_subvector512_v32i8_store_lo_align_16(i8* nocapture %addr, <
; SKX-LABEL: extract_subvector512_v32i8_store_lo_align_16:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <64 x i8> %a, <64 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -613,6 +663,7 @@ define void @extract_subvector512_v32i8_store_lo_align_32(i8* nocapture %addr, <
; SKX-LABEL: extract_subvector512_v32i8_store_lo_align_32:
; SKX: ## BB#0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = shufflevector <64 x i8> %a, <64 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -624,7 +675,7 @@ entry:
define <4 x double> @test_mm512_mask_extractf64x4_pd(<4 x double> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf64x4_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x4 $1, %zmm1, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -638,7 +689,7 @@ entry:
define <4 x double> @test_mm512_maskz_extractf64x4_pd(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf64x4_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -652,8 +703,9 @@ entry:
define <4 x float> @test_mm512_mask_extractf32x4_ps(<4 x float> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf32x4_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %zmm1, %xmm0 {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = bitcast <8 x double> %__A to <16 x float>
@@ -667,8 +719,9 @@ entry:
define <4 x float> @test_mm512_maskz_extractf32x4_ps(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf32x4_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = bitcast <8 x double> %__A to <16 x float>
@@ -682,8 +735,9 @@ entry:
define <2 x double> @test_mm256_mask_extractf64x2_pd(<2 x double> %__W, i8 %__U, <4 x double> %__A) {
; SKX-LABEL: test_mm256_mask_extractf64x2_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $1, %ymm1, %xmm0 {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <4 x double> %__A, <4 x double> undef, <2 x i32> <i32 2, i32 3>
@@ -696,8 +750,9 @@ entry:
define <2 x double> @test_mm256_maskz_extractf64x2_pd(i8 %__U, <4 x double> %__A) {
; SKX-LABEL: test_mm256_maskz_extractf64x2_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <4 x double> %__A, <4 x double> undef, <2 x i32> <i32 2, i32 3>
@@ -710,8 +765,9 @@ entry:
define <2 x i64> @test_mm256_mask_extracti64x2_epi64(<2 x i64> %__W, i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_mask_extracti64x2_epi64:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti64x2 $1, %ymm1, %xmm0 {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <4 x i64> %__A, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
@@ -724,8 +780,9 @@ entry:
define <2 x i64> @test_mm256_maskz_extracti64x2_epi64(i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_maskz_extracti64x2_epi64:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti64x2 $1, %ymm0, %xmm0 {%k1} {z}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <4 x i64> %__A, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
@@ -738,8 +795,9 @@ entry:
define <4 x float> @test_mm256_mask_extractf32x4_ps(<4 x float> %__W, i8 %__U, <8 x float> %__A) {
; SKX-LABEL: test_mm256_mask_extractf32x4_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %ymm1, %xmm0 {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <8 x float> %__A, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -752,8 +810,9 @@ entry:
define <4 x float> @test_mm256_maskz_extractf32x4_ps(i8 %__U, <8 x float> %__A) {
; SKX-LABEL: test_mm256_maskz_extractf32x4_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <8 x float> %__A, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -766,8 +825,9 @@ entry:
define <2 x i64> @test_mm256_mask_extracti32x4_epi32(<2 x i64> %__W, i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_mask_extracti32x4_epi32:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti32x4 $1, %ymm1, %xmm0 {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
@@ -783,8 +843,9 @@ entry:
define <2 x i64> @test_mm256_maskz_extracti32x4_epi32(i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_maskz_extracti32x4_epi32:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti32x4 $1, %ymm0, %xmm0 {%k1} {z}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%0 = bitcast <4 x i64> %__A to <8 x i32>
@@ -799,7 +860,7 @@ entry:
define <8 x float> @test_mm512_mask_extractf32x8_ps(<8 x float> %__W, i8 %__U, <16 x float> %__A) {
; SKX-LABEL: test_mm512_mask_extractf32x8_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x8 $1, %zmm1, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -812,7 +873,7 @@ entry:
define <8 x float> @test_mm512_maskz_extractf32x8_ps(i8 %__U, <16 x float> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf32x8_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -825,8 +886,9 @@ entry:
define <2 x double> @test_mm512_mask_extractf64x2_pd(<2 x double> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf64x2_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $3, %zmm1, %xmm0 {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <8 x double> %__A, <8 x double> undef, <2 x i32> <i32 6, i32 7>
@@ -839,8 +901,9 @@ entry:
define <2 x double> @test_mm512_maskz_extractf64x2_pd(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf64x2_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $3, %zmm0, %xmm0 {%k1} {z}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
entry:
%shuffle = shufflevector <8 x double> %__A, <8 x double> undef, <2 x i32> <i32 6, i32 7>
diff --git a/test/CodeGen/X86/avx512-fsel.ll b/test/CodeGen/X86/avx512-fsel.ll
index c6f2da6ff60b..a9b8914ee1fe 100644
--- a/test/CodeGen/X86/avx512-fsel.ll
+++ b/test/CodeGen/X86/avx512-fsel.ll
@@ -10,25 +10,24 @@ define i32 @test(float %a, float %b) {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: Lcfi0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: vucomiss %xmm1, %xmm0
-; CHECK-NEXT: setnp %cl
-; CHECK-NEXT: sete %dl
-; CHECK-NEXT: setp %sil
-; CHECK-NEXT: setne %dil
-; CHECK-NEXT: andb %cl, %dl
-; CHECK-NEXT: ## implicit-def: %R8D
-; CHECK-NEXT: movb %dl, %r8b
-; CHECK-NEXT: andl $1, %r8d
-; CHECK-NEXT: kmovw %r8d, %k0
-; CHECK-NEXT: orb %sil, %dil
-; CHECK-NEXT: ## implicit-def: %R8D
-; CHECK-NEXT: movb %dil, %r8b
-; CHECK-NEXT: andl $1, %r8d
-; CHECK-NEXT: kmovw %r8d, %k1
-; CHECK-NEXT: kmovw %k1, %ecx
-; CHECK-NEXT: testb $1, %cl
-; CHECK-NEXT: movb %al, {{[0-9]+}}(%rsp) ## 1-byte Spill
+; CHECK-NEXT: setp %al
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: setnp %dl
+; CHECK-NEXT: sete %sil
+; CHECK-NEXT: andb %dl, %sil
+; CHECK-NEXT: ## implicit-def: %EDI
+; CHECK-NEXT: movb %sil, %dil
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: ## implicit-def: %EDI
+; CHECK-NEXT: movb %cl, %dil
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: kmovw %k1, %edi
+; CHECK-NEXT: movb %dil, %al
+; CHECK-NEXT: testb $1, %al
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: jne LBB0_1
; CHECK-NEXT: jmp LBB0_2
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 7c763d95ade6..4890afec2164 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -14,11 +14,12 @@ declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, i8, <8 x i64>, <8 x double>,
define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dps:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
; CHECK-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vscatterdps %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
%ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -29,11 +30,12 @@ define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8*
define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dpd:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
; CHECK-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vscatterdpd %zmm1, (%rdx,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -44,11 +46,12 @@ define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %b
define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qps:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k2}
; CHECK-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vscatterqps %ymm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
@@ -59,11 +62,12 @@ define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %ba
define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qpd:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k2}
; CHECK-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vscatterqpd %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
@@ -86,11 +90,12 @@ declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, i8, <8 x i64>, <8 x i64>, i3
define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dd:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdd (%rsi,%zmm0,4), %zmm1 {%k2}
; CHECK-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vpscatterdd %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
%ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -101,11 +106,12 @@ define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %ba
define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qd:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqd (%rsi,%zmm0,4), %ymm1 {%k2}
; CHECK-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vpscatterqd %ymm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
@@ -116,11 +122,12 @@ define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base,
define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qq:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqq (%rsi,%zmm0,4), %zmm1 {%k2}
; CHECK-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vpscatterqq %zmm1, (%rdx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
@@ -131,11 +138,12 @@ define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base,
define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dq:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdq (%rsi,%ymm0,4), %zmm1 {%k2}
; CHECK-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vpscatterdq %zmm1, (%rdx,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -146,9 +154,10 @@ define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base,
define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_dpd_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
store <8 x double> %x, <8 x double>* %stbuf
@@ -158,9 +167,10 @@ define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %m
define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_qpd_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
store <8 x double> %x, <8 x double>* %stbuf
@@ -170,7 +180,7 @@ define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %m
define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_dps_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -181,7 +191,7 @@ define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %s
define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_qps_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -192,9 +202,10 @@ define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src,
define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dpd_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = load <8 x double>, <8 x double>* %src, align 64
call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind, <8 x double> %x, i32 4)
@@ -204,9 +215,10 @@ define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8
define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qpd_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = load <8 x double>, <8 x double>* %src, align 64
call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x double> %x, i32 4)
@@ -216,9 +228,10 @@ define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8
define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dps_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = load <16 x float>, <16 x float>* %src, align 64
call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind, <16 x float> %x, i32 4)
@@ -228,9 +241,10 @@ define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i1
define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qps_execdomain:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = load <8 x float>, <8 x float>* %src, align 32
call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x float> %x, i32 4)
@@ -240,11 +254,13 @@ define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %
define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_qps:
; CHECK: ## BB#0:
+; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vgatherqps (%rdi,%zmm0,4), %ymm1 {%k2}
; CHECK-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: vscatterqps %ymm1, (%rsi,%zmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 -1, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
@@ -262,16 +278,17 @@ define void @prefetch(<8 x i64> %ind, i8* %base) {
; CHECK-NEXT: kxorw %k0, %k0, %k1
; CHECK-NEXT: vgatherpf1qps (%rdi,%zmm0,4) {%k1}
; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: kmovb %eax, %k1
+; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vscatterpf0qps (%rdi,%zmm0,2) {%k1}
; CHECK-NEXT: movb $120, %al
-; CHECK-NEXT: kmovb %eax, %k1
+; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vscatterpf1qps (%rdi,%zmm0,2) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 4, i32 0)
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 0, <8 x i64> %ind, i8* %base, i32 4, i32 1)
- call void @llvm.x86.avx512.scatterpf.qps.512(i8 1, <8 x i64> %ind, i8* %base, i32 2, i32 0)
- call void @llvm.x86.avx512.scatterpf.qps.512(i8 120, <8 x i64> %ind, i8* %base, i32 2, i32 1)
+ call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 4, i32 3)
+ call void @llvm.x86.avx512.gatherpf.qps.512(i8 0, <8 x i64> %ind, i8* %base, i32 4, i32 2)
+ call void @llvm.x86.avx512.scatterpf.qps.512(i8 1, <8 x i64> %ind, i8* %base, i32 2, i32 3)
+ call void @llvm.x86.avx512.scatterpf.qps.512(i8 120, <8 x i64> %ind, i8* %base, i32 2, i32 2)
ret void
}
@@ -280,12 +297,12 @@ declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, i8*, <2 x i64
define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovapd %xmm0, %xmm2
-; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,4), %xmm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,2), %xmm0 {%k1}
-; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 2)
@@ -298,9 +315,9 @@ declare <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64>, i8*, <2 x i64>, i8,
define <2 x i64>@test_int_x86_avx512_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
-; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 8)
%res1 = call <2 x i64> @llvm.x86.avx512.gather3div2.di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 8)
@@ -313,12 +330,12 @@ declare <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double>, i8*, <4 x i64
define <4 x double>@test_int_x86_avx512_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovapd %ymm0, %ymm2
-; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,4), %ymm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,2), %ymm0 {%k1}
-; CHECK-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vxorpd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,2), %ymm2 {%k1}
+; CHECK-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
%res1 = call <4 x double> @llvm.x86.avx512.gather3div4.df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 2)
@@ -331,12 +348,12 @@ declare <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64>, i8*, <4 x i64>, i8,
define <4 x i64>@test_int_x86_avx512_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovdqa %ymm0, %ymm2
-; CHECK-NEXT: vpgatherqq (%rdi,%ymm1,8), %ymm2 {%k1}
-; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
-; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; CHECK-NEXT: vpgatherqq (%rdi,%ymm1,8), %ymm2 {%k1}
+; CHECK-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 8)
%res1 = call <4 x i64> @llvm.x86.avx512.gather3div4.di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 8)
@@ -349,12 +366,12 @@ declare <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float>, i8*, <2 x i64>,
define <4 x float>@test_int_x86_avx512_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm2
-; CHECK-NEXT: vgatherqps (%rdi,%xmm1,4), %xmm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherqps (%rdi,%xmm1,2), %xmm0 {%k1}
-; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vgatherqps (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.gather3div4.sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 -1, i32 2)
@@ -367,9 +384,9 @@ declare <4 x i32> @llvm.x86.avx512.gather3div4.si(<4 x i32>, i8*, <2 x i64>, i8,
define <4 x i32>@test_int_x86_avx512_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
-; CHECK-NEXT: vmovdqa %xmm0, %xmm2
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpgatherqd (%rdi,%xmm1,4), %xmm2 {%k2}
; CHECK-NEXT: vpgatherqd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
@@ -385,12 +402,13 @@ declare <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float>, i8*, <4 x i64>,
define <4 x float>@test_int_x86_avx512_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm2
-; CHECK-NEXT: vgatherqps (%rdi,%ymm1,4), %xmm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherqps (%rdi,%ymm1,2), %xmm0 {%k1}
-; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vgatherqps (%rdi,%ymm1,2), %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.gather3div8.sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 -1, i32 2)
@@ -403,12 +421,13 @@ declare <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32>, i8*, <4 x i64>, i8,
define <4 x i32>@test_int_x86_avx512_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm2
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqd (%rdi,%ymm1,4), %xmm2 {%k2}
; CHECK-NEXT: vpgatherqd (%rdi,%ymm1,2), %xmm0 {%k1}
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 4)
%res1 = call <4 x i32> @llvm.x86.avx512.gather3div8.si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3, i32 2)
@@ -421,12 +440,12 @@ declare <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double>, i8*, <4 x i32
define <2 x double>@test_int_x86_avx512_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovapd %xmm0, %xmm2
-; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %xmm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,2), %xmm0 {%k1}
-; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.gather3siv2.df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
@@ -439,7 +458,7 @@ declare <2 x i64> @llvm.x86.avx512.gather3siv2.di(<2 x i64>, i8*, <4 x i32>, i8,
define <2 x i64>@test_int_x86_avx512_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -454,12 +473,12 @@ declare <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double>, i8*, <4 x i32
define <4 x double>@test_int_x86_avx512_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovapd %ymm0, %ymm2
-; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %ymm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,2), %ymm0 {%k1}
-; CHECK-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vxorpd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,2), %ymm2 {%k1}
+; CHECK-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
%res1 = call <4 x double> @llvm.x86.avx512.gather3siv4.df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
@@ -472,9 +491,9 @@ declare <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64>, i8*, <4 x i32>, i8,
define <4 x i64>@test_int_x86_avx512_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
-; CHECK-NEXT: vpaddq %ymm0, %ymm0, %ymm0
+; CHECK-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
%res1 = call <4 x i64> @llvm.x86.avx512.gather3siv4.di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 8)
@@ -487,12 +506,12 @@ declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, i8*, <4 x i32>,
define <4 x float>@test_int_x86_avx512_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm2
-; CHECK-NEXT: vgatherdps (%rdi,%xmm1,4), %xmm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherdps (%rdi,%xmm1,2), %xmm0 {%k1}
-; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vgatherdps (%rdi,%xmm1,2), %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 -1, i32 2)
@@ -505,9 +524,9 @@ declare <4 x i32> @llvm.x86.avx512.gather3siv4.si(<4 x i32>, i8*, <4 x i32>, i8,
define <4 x i32>@test_int_x86_avx512_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
-; CHECK-NEXT: vmovdqa %xmm0, %xmm2
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpgatherdd (%rdi,%xmm1,4), %xmm2 {%k2}
; CHECK-NEXT: vpgatherdd (%rdi,%xmm1,2), %xmm0 {%k1}
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
@@ -523,12 +542,12 @@ declare <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float>, i8*, <8 x i32>,
define <8 x float>@test_int_x86_avx512_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
-; CHECK-NEXT: vmovaps %ymm0, %ymm2
-; CHECK-NEXT: vgatherdps (%rdi,%ymm1,4), %ymm2 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherdps (%rdi,%ymm1,2), %ymm0 {%k1}
-; CHECK-NEXT: vaddps %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vxorps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT: vgatherdps (%rdi,%ymm1,2), %ymm2 {%k1}
+; CHECK-NEXT: vaddps %ymm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3, i32 4)
%res1 = call <8 x float> @llvm.x86.avx512.gather3siv8.sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 -1, i32 2)
@@ -541,7 +560,7 @@ declare <8 x i32> @llvm.x86.avx512.gather3siv8.si(<8 x i32>, i8*, <8 x i32>, i8,
define <8 x i32>@test_int_x86_avx512_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm2
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdd (%rdi,%ymm1,4), %ymm2 {%k2}
@@ -559,7 +578,7 @@ declare void @llvm.x86.avx512.scatterdiv2.df(i8*, i8, <2 x i64>, <2 x double>, i
define void@test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
; CHECK-NEXT: vscatterqpd %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -574,7 +593,7 @@ declare void @llvm.x86.avx512.scatterdiv2.di(i8*, i8, <2 x i64>, <2 x i64>, i32)
define void@test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterqq %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -589,10 +608,11 @@ declare void @llvm.x86.avx512.scatterdiv4.df(i8*, i8, <4 x i64>, <4 x double>, i
define void@test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqpd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vscatterqpd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scatterdiv4.df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3, i32 2)
call void @llvm.x86.avx512.scatterdiv4.df(i8* %x0, i8 -1, <4 x i64> %x2, <4 x double> %x3, i32 4)
@@ -604,10 +624,11 @@ declare void @llvm.x86.avx512.scatterdiv4.di(i8*, i8, <4 x i64>, <4 x i64>, i32)
define void@test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterqq %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scatterdiv4.di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3, i32 2)
call void @llvm.x86.avx512.scatterdiv4.di(i8* %x0, i8 -1, <4 x i64> %x2, <4 x i64> %x3, i32 4)
@@ -619,7 +640,7 @@ declare void @llvm.x86.avx512.scatterdiv4.sf(i8*, i8, <2 x i64>, <4 x float>, i3
define void@test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -634,7 +655,7 @@ declare void @llvm.x86.avx512.scatterdiv4.si(i8*, i8, <2 x i64>, <4 x i32>, i32)
define void@test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -649,10 +670,11 @@ declare void @llvm.x86.avx512.scatterdiv8.sf(i8*, i8, <4 x i64>, <4 x float>, i3
define void@test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scatterdiv8.sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3, i32 2)
call void @llvm.x86.avx512.scatterdiv8.sf(i8* %x0, i8 -1, <4 x i64> %x2, <4 x float> %x3, i32 4)
@@ -664,10 +686,11 @@ declare void @llvm.x86.avx512.scatterdiv8.si(i8*, i8, <4 x i64>, <4 x i32>, i32)
define void@test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scatterdiv8.si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3, i32 2)
call void @llvm.x86.avx512.scatterdiv8.si(i8* %x0, i8 -1, <4 x i64> %x2, <4 x i32> %x3, i32 4)
@@ -679,7 +702,7 @@ declare void @llvm.x86.avx512.scattersiv2.df(i8*, i8, <4 x i32>, <2 x double>, i
define void@test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
; CHECK-NEXT: vscatterdpd %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -694,7 +717,7 @@ declare void @llvm.x86.avx512.scattersiv2.di(i8*, i8, <4 x i32>, <2 x i64>, i32)
define void@test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
; CHECK-NEXT: vpscatterdq %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -709,10 +732,11 @@ declare void @llvm.x86.avx512.scattersiv4.df(i8*, i8, <4 x i32>, <4 x double>, i
define void@test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdpd %ymm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vscatterdpd %ymm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scattersiv4.df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3, i32 2)
call void @llvm.x86.avx512.scattersiv4.df(i8* %x0, i8 -1, <4 x i32> %x2, <4 x double> %x3, i32 4)
@@ -724,10 +748,11 @@ declare void @llvm.x86.avx512.scattersiv4.di(i8*, i8, <4 x i32>, <4 x i64>, i32)
define void@test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %ymm1, (%rdi,%xmm0,2) {%k2}
; CHECK-NEXT: vpscatterdq %ymm1, (%rdi,%xmm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scattersiv4.di(i8* %x0, i8 -1, <4 x i32> %x2, <4 x i64> %x3, i32 2)
call void @llvm.x86.avx512.scattersiv4.di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3, i32 4)
@@ -739,7 +764,7 @@ declare void @llvm.x86.avx512.scattersiv4.sf(i8*, i8, <4 x i32>, <4 x float>, i3
define void@test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vscatterdps %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -754,7 +779,7 @@ declare void @llvm.x86.avx512.scattersiv4.si(i8*, i8, <4 x i32>, <4 x i32>, i32)
define void@test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1}
@@ -769,10 +794,11 @@ declare void @llvm.x86.avx512.scattersiv8.sf(i8*, i8, <8 x i32>, <8 x float>, i3
define void@test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vscatterdps %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scattersiv8.sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3, i32 2)
call void @llvm.x86.avx512.scattersiv8.sf(i8* %x0, i8 -1, <8 x i32> %x2, <8 x float> %x3, i32 4)
@@ -784,10 +810,11 @@ declare void @llvm.x86.avx512.scattersiv8.si(i8*, i8, <8 x i32>, <8 x i32>, i32)
define void@test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 -1, <8 x i32> %x2, <8 x i32> %x3, i32 4)
@@ -802,11 +829,12 @@ define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-NEXT: kxorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: kmovb %eax, %k1
+; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: movb $96, %al
-; CHECK-NEXT: kmovb %eax, %k1
+; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,4) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 -1, <8 x i32> %x2, <8 x i32> %x3, i32 2)
call void @llvm.x86.avx512.scattersiv8.si(i8* %x0, i8 0, <8 x i32> %x2, <8 x i32> %x3, i32 4)
@@ -819,17 +847,17 @@ define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %b
; CHECK-LABEL: gather_mask_test:
; CHECK: ## BB#0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vmovaps %zmm1, %zmm2
+; CHECK-NEXT: vxorps %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
; CHECK-NEXT: movw $1, %ax
-; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm4
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm4 {%k1}
; CHECK-NEXT: movw $220, %ax
-; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
; CHECK-NEXT: vaddps %zmm4, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index cb8ed0e59a3a..87928348a851 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -1,24 +1,25 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck --check-prefix=KNL %s
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX --check-prefix=SKX_ONLY %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=skx -mattr=avx512vbmi | FileCheck --check-prefix=SKX --check-prefix=SKX_VBMI %s
define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
; KNL-LABEL: test1:
; KNL: ## BB#0:
; KNL-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],mem[0],xmm0[2,3]
-; KNL-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm0
-; KNL-NEXT: vextractf32x4 $3, %zmm0, %xmm2
-; KNL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
-; KNL-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
+; KNL-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
+; KNL-NEXT: vextractf32x4 $3, %zmm0, %xmm0
+; KNL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; KNL-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test1:
; SKX: ## BB#0:
; SKX-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],mem[0],xmm0[2,3]
-; SKX-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm0
-; SKX-NEXT: vextractf32x4 $3, %zmm0, %xmm2
-; SKX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
-; SKX-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
+; SKX-NEXT: vextractf32x4 $3, %zmm0, %xmm0
+; SKX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; SKX-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
; SKX-NEXT: retq
%rrr = load float, float* %br
%rrr2 = insertelement <16 x float> %x, float %rrr, i32 1
@@ -30,19 +31,19 @@ define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
; KNL-LABEL: test2:
; KNL: ## BB#0:
; KNL-NEXT: vmovhpd {{.*#+}} xmm2 = xmm0[0],mem[0]
-; KNL-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm0
-; KNL-NEXT: vextractf32x4 $3, %zmm0, %xmm2
-; KNL-NEXT: vmovsd {{.*#+}} xmm1 = xmm1[0],xmm2[1]
-; KNL-NEXT: vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
+; KNL-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
+; KNL-NEXT: vextractf32x4 $3, %zmm0, %xmm0
+; KNL-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; KNL-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test2:
; SKX: ## BB#0:
; SKX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm0[0],mem[0]
-; SKX-NEXT: vinsertf64x2 $0, %xmm2, %zmm0, %zmm0
-; SKX-NEXT: vextractf64x2 $3, %zmm0, %xmm2
-; SKX-NEXT: vmovsd {{.*#+}} xmm1 = xmm1[0],xmm2[1]
-; SKX-NEXT: vinsertf64x2 $3, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vinsertf64x2 $0, %xmm2, %zmm0, %zmm2
+; SKX-NEXT: vextractf64x2 $3, %zmm0, %xmm0
+; SKX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SKX-NEXT: vinsertf64x2 $3, %xmm0, %zmm2, %zmm0
; SKX-NEXT: retq
%rrr = load double, double* %br
%rrr2 = insertelement <8 x double> %x, double %rrr, i32 1
@@ -123,16 +124,31 @@ define void @test6(<4 x float> %x, float* %out) nounwind {
define float @test7(<16 x float> %x, i32 %ind) nounwind {
; KNL-LABEL: test7:
; KNL: ## BB#0:
-; KNL-NEXT: vmovd %edi, %xmm1
-; KNL-NEXT: vpermps %zmm0, %zmm1, %zmm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
;
; SKX-LABEL: test7:
; SKX: ## BB#0:
-; SKX-NEXT: vmovd %edi, %xmm1
-; SKX-NEXT: vpermps %zmm0, %zmm1, %zmm0
-; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%e = extractelement <16 x float> %x, i32 %ind
ret float %e
@@ -141,18 +157,31 @@ define float @test7(<16 x float> %x, i32 %ind) nounwind {
define double @test8(<8 x double> %x, i32 %ind) nounwind {
; KNL-LABEL: test8:
; KNL: ## BB#0:
-; KNL-NEXT: movslq %edi, %rax
-; KNL-NEXT: vmovq %rax, %xmm1
-; KNL-NEXT: vpermpd %zmm0, %zmm1, %zmm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
;
; SKX-LABEL: test8:
; SKX: ## BB#0:
-; SKX-NEXT: movslq %edi, %rax
-; SKX-NEXT: vmovq %rax, %xmm1
-; SKX-NEXT: vpermpd %zmm0, %zmm1, %zmm0
-; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%e = extractelement <8 x double> %x, i32 %ind
ret double %e
@@ -161,16 +190,31 @@ define double @test8(<8 x double> %x, i32 %ind) nounwind {
define float @test9(<8 x float> %x, i32 %ind) nounwind {
; KNL-LABEL: test9:
; KNL: ## BB#0:
-; KNL-NEXT: vmovd %edi, %xmm1
-; KNL-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
; SKX: ## BB#0:
-; SKX-NEXT: vmovd %edi, %xmm1
-; SKX-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %ymm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%e = extractelement <8 x float> %x, i32 %ind
ret float %e
@@ -179,16 +223,31 @@ define float @test9(<8 x float> %x, i32 %ind) nounwind {
define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
; KNL-LABEL: test10:
; KNL: ## BB#0:
-; KNL-NEXT: vmovd %edi, %xmm1
-; KNL-NEXT: vpermd %zmm0, %zmm1, %zmm0
-; KNL-NEXT: vmovd %xmm0, %eax
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: movl (%rsp,%rdi,4), %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
;
; SKX-LABEL: test10:
; SKX: ## BB#0:
-; SKX-NEXT: vmovd %edi, %xmm1
-; SKX-NEXT: vpermd %zmm0, %zmm1, %zmm0
-; SKX-NEXT: vmovd %xmm0, %eax
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: movl (%rsp,%rdi,4), %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%e = extractelement <16 x i32> %x, i32 %ind
ret i32 %e
@@ -216,7 +275,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; SKX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftlw $11, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: testb %al, %al
; SKX-NEXT: je LBB10_2
@@ -258,11 +317,12 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; SKX-NEXT: kunpckbw %k0, %k1, %k0
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: testb %al, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%cmpvector_func.i = icmp slt <16 x i64> %a, %b
%extract24vector_func.i = extractelement <16 x i1> %cmpvector_func.i, i32 0
@@ -283,6 +343,7 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
@@ -290,13 +351,14 @@ define i16 @test13(i32 %a, i32 %b) {
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: movw $-4, %ax
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: kshiftrw $1, %k1, %k1
; SKX-NEXT: kshiftlw $1, %k1, %k1
; SKX-NEXT: korw %k0, %k1, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
%cmp_res = icmp ult i32 %a, %b
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
@@ -322,11 +384,12 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; SKX-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
; SKX-NEXT: kshiftlb $3, %k0, %k0
; SKX-NEXT: kshiftrb $7, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: testb %al, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%cmpvector_func.i = icmp slt <8 x i64> %a, %b
%extract24vector_func.i = extractelement <8 x i1> %cmpvector_func.i, i32 4
@@ -372,6 +435,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL-NEXT: vpslld $31, %zmm2, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test16:
@@ -379,13 +443,15 @@ define i16 @test16(i1 *%addr, i16 %a) {
; SKX-NEXT: movzbl (%rdi), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kmovw %esi, %k1
+; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: vpmovm2d %k1, %zmm0
; SKX-NEXT: vpmovm2d %k0, %zmm1
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
; SKX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovd2m %zmm2, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i16 %a to <16 x i1>
@@ -408,6 +474,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test17:
@@ -415,13 +482,15 @@ define i8 @test17(i1 *%addr, i8 %a) {
; SKX-NEXT: movzbl (%rdi), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kmovb %esi, %k1
+; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: vpmovm2q %k1, %zmm0
; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
-; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i8 %a to <8 x i1>
@@ -443,6 +512,7 @@ define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) {
; SKX-NEXT: vpextrq $1, %xmm0, %rax
; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm0
; SKX-NEXT: vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <8 x i64> %x, i32 1
%r2 = extractelement <8 x i64> %x, i32 3
@@ -463,6 +533,7 @@ define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) {
; SKX-NEXT: vpextrq $1, %xmm0, %rax
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <4 x i64> %x, i32 1
%r2 = extractelement <4 x i64> %x, i32 3
@@ -501,6 +572,7 @@ define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) {
; SKX-NEXT: vpextrd $1, %xmm0, %eax
; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
; SKX-NEXT: vpextrd $1, %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <16 x i32> %x, i32 1
%r2 = extractelement <16 x i32> %x, i32 5
@@ -521,6 +593,7 @@ define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) {
; SKX-NEXT: vpextrd $1, %xmm0, %eax
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vpextrd $1, %xmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <8 x i32> %x, i32 1
%r2 = extractelement <8 x i32> %x, i32 5
@@ -561,6 +634,7 @@ define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
; SKX-NEXT: vpextrw $1, %xmm0, (%rdi)
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <32 x i16> %x, i32 1
%r2 = extractelement <32 x i16> %x, i32 9
@@ -583,6 +657,7 @@ define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vpextrw $1, %xmm0, (%rdi)
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <16 x i16> %x, i32 1
%r2 = extractelement <16 x i16> %x, i32 9
@@ -625,6 +700,7 @@ define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
; SKX-NEXT: vpextrb $1, %xmm0, (%rdi)
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <64 x i8> %x, i32 1
%r2 = extractelement <64 x i8> %x, i32 17
@@ -647,6 +723,7 @@ define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vpextrb $1, %xmm0, (%rdi)
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = extractelement <32 x i8> %x, i32 1
%r2 = extractelement <32 x i8> %x, i32 17
@@ -678,19 +755,19 @@ define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) {
; KNL-LABEL: insert_v8i64:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; KNL-NEXT: vextracti32x4 $1, %zmm0, %xmm1
-; KNL-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
-; KNL-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
+; KNL-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; KNL-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
+; KNL-NEXT: vinserti32x4 $1, %xmm0, %zmm1, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v8i64:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vinserti64x2 $0, %xmm1, %zmm0, %zmm0
-; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm1
-; SKX-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
-; SKX-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vinserti64x2 $0, %xmm1, %zmm0, %zmm1
+; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm0
+; SKX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
+; SKX-NEXT: vinserti64x2 $1, %xmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%val = load i64, i64* %ptr
%r1 = insertelement <8 x i64> %x, i64 %val, i32 1
@@ -702,19 +779,19 @@ define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
; KNL-LABEL: insert_v4i64:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; KNL-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v4i64:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
-; SKX-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
-; SKX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
+; SKX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; SKX-NEXT: retq
%val = load i64, i64* %ptr
%r1 = insertelement <4 x i64> %x, i64 %val, i32 1
@@ -744,19 +821,19 @@ define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) {
; KNL-LABEL: insert_v16i32:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; KNL-NEXT: vextracti32x4 $1, %zmm0, %xmm1
-; KNL-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
-; KNL-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
+; KNL-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; KNL-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; KNL-NEXT: vinserti32x4 $1, %xmm0, %zmm1, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v16i32:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm1
-; SKX-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
-; SKX-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; SKX-NEXT: vinserti32x4 $1, %xmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%val = load i32, i32* %ptr
%r1 = insertelement <16 x i32> %x, i32 %val, i32 1
@@ -768,19 +845,19 @@ define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) {
; KNL-LABEL: insert_v8i32:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; KNL-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v8i32:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
-; SKX-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
-; SKX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; SKX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; SKX-NEXT: retq
%val = load i32, i32* %ptr
%r1 = insertelement <8 x i32> %x, i32 %val, i32 1
@@ -810,19 +887,19 @@ define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
; KNL-LABEL: insert_v32i16:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm2
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; KNL-NEXT: vpinsrw $1, %edi, %xmm2, %xmm2
-; KNL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; KNL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v32i16:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm1
-; SKX-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
-; SKX-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; SKX-NEXT: vinserti32x4 $1, %xmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%val = load i16, i16* %ptr
%r1 = insertelement <32 x i16> %x, i16 %val, i32 1
@@ -834,19 +911,19 @@ define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) {
; KNL-LABEL: insert_v16i16:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; KNL-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v16i16:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
-; SKX-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
-; SKX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; SKX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; SKX-NEXT: retq
%val = load i16, i16* %ptr
%r1 = insertelement <16 x i16> %x, i16 %val, i32 1
@@ -885,10 +962,10 @@ define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
; SKX-LABEL: insert_v64i8:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; SKX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
-; SKX-NEXT: vpinsrb $2, %edi, %xmm1, %xmm1
-; SKX-NEXT: vinserti32x4 $3, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
+; SKX-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; SKX-NEXT: vpinsrb $2, %edi, %xmm0, %xmm0
+; SKX-NEXT: vinserti32x4 $3, %xmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%val = load i8, i8* %ptr
%r1 = insertelement <64 x i8> %x, i8 %val, i32 1
@@ -900,19 +977,19 @@ define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) {
; KNL-LABEL: insert_v32i8:
; KNL: ## BB#0:
; KNL-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; KNL-NEXT: vpinsrb $1, %edi, %xmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v32i8:
; SKX: ## BB#0:
; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
-; SKX-NEXT: vpinsrb $1, %edi, %xmm1, %xmm1
-; SKX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0
+; SKX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; SKX-NEXT: retq
%val = load i8, i8* %ptr
%r1 = insertelement <32 x i8> %x, i8 %val, i32 1
@@ -1051,149 +1128,148 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $32, %rsp
+; KNL-NEXT: xorl %eax, %eax
; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: setb %al
; KNL-NEXT: vpcmpltud %zmm3, %zmm1, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: kmovw %k1, %ecx
; KNL-NEXT: kshiftlw $15, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %ecx
-; KNL-NEXT: vmovd %ecx, %xmm1
-; KNL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %edx
+; KNL-NEXT: vmovd %edx, %xmm1
+; KNL-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $13, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $12, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $11, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $10, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $5, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $9, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $6, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $8, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $7, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $8, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $6, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $5, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $4, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $3, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $2, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftlw $1, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; KNL-NEXT: kmovw %k0, %ecx
+; KNL-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm1
; KNL-NEXT: vpcmpltud %zmm2, %zmm0, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
+; KNL-NEXT: kmovw %k1, %ecx
; KNL-NEXT: kshiftlw $15, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %ecx
-; KNL-NEXT: vmovd %ecx, %xmm0
-; KNL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %edx
+; KNL-NEXT: vmovd %edx, %xmm0
+; KNL-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $13, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $12, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $11, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $10, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $9, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $8, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $7, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $6, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $5, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $4, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $3, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $2, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftlw $1, %k0, %k1
; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kmovw %k1, %eax
-; KNL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k1, %ecx
+; KNL-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; KNL-NEXT: kmovw %k0, %ecx
+; KNL-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0
; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; KNL-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
-; KNL-NEXT: sbbl %eax, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1208,7 +1284,7 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1
; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2
; SKX-NEXT: kunpckwd %k1, %k2, %k1
@@ -1218,6 +1294,7 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
; SKX-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovw2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <32 x i32> %x, %y
@@ -1265,6 +1342,7 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_iinsertelement_v4i1:
@@ -1272,14 +1350,15 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1
; SKX-NEXT: vpmovm2d %k1, %xmm0
; SKX-NEXT: vpmovm2d %k0, %xmm1
; SKX-NEXT: vpbroadcastq %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SKX-NEXT: vpmovd2m %xmm0, %k0
-; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <4 x i32> %x, %y
@@ -1310,6 +1389,7 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_iinsertelement_v2i1:
@@ -1317,13 +1397,14 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
; SKX-NEXT: kshiftlw $1, %k1, %k1
; SKX-NEXT: kshiftrw $1, %k1, %k1
; SKX-NEXT: kshiftlw $1, %k0, %k0
; SKX-NEXT: korw %k0, %k1, %k0
-; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <2 x i64> %x, %y
@@ -1340,10 +1421,8 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
-; KNL-NEXT: vmovq %xmm0, %rax
-; KNL-NEXT: testb $1, %al
-; KNL-NEXT: sete %al
-; KNL-NEXT: addb $3, %al
+; KNL-NEXT: vpextrb $0, %xmm0, %eax
+; KNL-NEXT: addb $4, %al
; KNL-NEXT: movzbl %al, %eax
; KNL-NEXT: retq
;
@@ -1352,11 +1431,11 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
-; SKX-NEXT: sete %al
-; SKX-NEXT: addb $3, %al
+; SKX-NEXT: cmpb $1, %al
+; SKX-NEXT: movb $3, %al
+; SKX-NEXT: adcb $0, %al
; SKX-NEXT: movzbl %al, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
@@ -1365,6 +1444,37 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
ret i8 %res
}
+define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
+; KNL-LABEL: extractelement_v2i1_alt:
+; KNL: ## BB#0:
+; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; KNL-NEXT: vpextrb $0, %xmm0, %eax
+; KNL-NEXT: addb $4, %al
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: extractelement_v2i1_alt:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
+; SKX-NEXT: kshiftlw $15, %k0, %k0
+; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: cmpb $1, %al
+; SKX-NEXT: movb $3, %al
+; SKX-NEXT: adcb $0, %al
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <2 x i64> %a, %b
+ %t2 = extractelement <2 x i1> %t1, i32 0
+ %sext = sext i1 %t2 to i8
+ %res = add i8 %sext, 4
+ ret i8 %res
+}
+
define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: test_extractelement_v4i1:
; KNL: ## BB#0:
@@ -1381,7 +1491,7 @@ define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: kshiftlw $12, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <4 x i32> %a, %b
@@ -1406,8 +1516,9 @@ define zeroext i8 @test_extractelement_v32i1(<32 x i8> %a, <32 x i8> %b) {
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: kshiftld $29, %k0, %k0
; SKX-NEXT: kshiftrd $31, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%t1 = icmp ugt <32 x i8> %a, %b
%t2 = extractelement <32 x i1> %t1, i32 2
@@ -1424,9 +1535,7 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpextrb $15, %xmm0, %eax
-; KNL-NEXT: testb $1, %al
-; KNL-NEXT: sete %al
-; KNL-NEXT: addb $3, %al
+; KNL-NEXT: addb $4, %al
; KNL-NEXT: movzbl %al, %eax
; KNL-NEXT: retq
;
@@ -1434,15 +1543,996 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; SKX: ## BB#0:
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
-; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
-; SKX-NEXT: sete %al
-; SKX-NEXT: addb $3, %al
+; SKX-NEXT: cmpb $1, %al
+; SKX-NEXT: movb $3, %al
+; SKX-NEXT: adcb $0, %al
; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%t1 = icmp ugt <64 x i8> %a, %b
%t2 = extractelement <64 x i1> %t1, i32 63
%res = select i1 %t2, i8 3, i8 4
ret i8 %res
}
+
+define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
+; KNL-LABEL: extractelement_v64i1_alt:
+; KNL: ## BB#0:
+; KNL-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; KNL-NEXT: vpxor %ymm0, %ymm3, %ymm2
+; KNL-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; KNL-NEXT: vpextrb $15, %xmm0, %eax
+; KNL-NEXT: addb $4, %al
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: extractelement_v64i1_alt:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
+; SKX-NEXT: kshiftrq $63, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: cmpb $1, %al
+; SKX-NEXT: movb $3, %al
+; SKX-NEXT: adcb $0, %al
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t1 = icmp ugt <64 x i8> %a, %b
+ %t2 = extractelement <64 x i1> %t1, i32 63
+ %sext = sext i1 %t2 to i8
+ %res = add i8 %sext, 4
+ ret i8 %res
+}
+
+define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v2i64:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: movq -24(%rsp,%rdi,8), %rax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v2i64:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: movq -24(%rsp,%rdi,8), %rax
+; SKX-NEXT: retq
+ %t2 = extractelement <2 x i64> %t1, i32 %index
+ ret i64 %t2
+}
+
+define i64 @test_extractelement_variable_v4i64(<4 x i64> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v4i64:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi3:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi4:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi5:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $3, %edi
+; KNL-NEXT: movq (%rsp,%rdi,8), %rax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v4i64:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi0:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi1:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi2:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %ymm0, (%rsp)
+; SKX-NEXT: andl $3, %edi
+; SKX-NEXT: movq (%rsp,%rdi,8), %rax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <4 x i64> %t1, i32 %index
+ ret i64 %t2
+}
+
+define i64 @test_extractelement_variable_v8i64(<8 x i64> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v8i64:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi6:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi7:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi8:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: movq (%rsp,%rdi,8), %rax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v8i64:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi3:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi4:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi5:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: movq (%rsp,%rdi,8), %rax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <8 x i64> %t1, i32 %index
+ ret i64 %t2
+}
+
+define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v2f64:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v2f64:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; SKX-NEXT: retq
+ %t2 = extractelement <2 x double> %t1, i32 %index
+ ret double %t2
+}
+
+define double @test_extractelement_variable_v4f64(<4 x double> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v4f64:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi9:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi10:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi11:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $3, %edi
+; KNL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v4f64:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi6:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi7:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi8:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %ymm0, (%rsp)
+; SKX-NEXT: andl $3, %edi
+; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <4 x double> %t1, i32 %index
+ ret double %t2
+}
+
+define double @test_extractelement_variable_v8f64(<8 x double> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v8f64:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi12:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi13:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi14:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v8f64:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi9:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi10:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi11:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <8 x double> %t1, i32 %index
+ ret double %t2
+}
+
+define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v4i32:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $3, %edi
+; KNL-NEXT: movl -24(%rsp,%rdi,4), %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v4i32:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $3, %edi
+; SKX-NEXT: movl -24(%rsp,%rdi,4), %eax
+; SKX-NEXT: retq
+ %t2 = extractelement <4 x i32> %t1, i32 %index
+ ret i32 %t2
+}
+
+define i32 @test_extractelement_variable_v8i32(<8 x i32> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v8i32:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi15:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi16:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi17:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: movl (%rsp,%rdi,4), %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v8i32:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi12:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi13:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi14:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %ymm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: movl (%rsp,%rdi,4), %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <8 x i32> %t1, i32 %index
+ ret i32 %t2
+}
+
+define i32 @test_extractelement_variable_v16i32(<16 x i32> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v16i32:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi18:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi19:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi20:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: movl (%rsp,%rdi,4), %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v16i32:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi15:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi16:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi17:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: movl (%rsp,%rdi,4), %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <16 x i32> %t1, i32 %index
+ ret i32 %t2
+}
+
+define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v4f32:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $3, %edi
+; KNL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v4f32:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $3, %edi
+; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: retq
+ %t2 = extractelement <4 x float> %t1, i32 %index
+ ret float %t2
+}
+
+define float @test_extractelement_variable_v8f32(<8 x float> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v8f32:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi21:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi22:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi23:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v8f32:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi18:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi19:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi20:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %ymm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <8 x float> %t1, i32 %index
+ ret float %t2
+}
+
+define float @test_extractelement_variable_v16f32(<16 x float> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v16f32:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi24:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi25:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi26:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %zmm0, (%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v16f32:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi21:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi22:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi23:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovaps %zmm0, (%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <16 x float> %t1, i32 %index
+ ret float %t2
+}
+
+define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v8i16:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: movzwl -24(%rsp,%rdi,2), %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v8i16:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovdqu %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: movzwl -24(%rsp,%rdi,2), %eax
+; SKX-NEXT: retq
+ %t2 = extractelement <8 x i16> %t1, i32 %index
+ ret i16 %t2
+}
+
+define i16 @test_extractelement_variable_v16i16(<16 x i16> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v16i16:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi27:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi28:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi29:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: movzwl (%rsp,%rdi,2), %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v16i16:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi24:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi25:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi26:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovdqu %ymm0, (%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <16 x i16> %t1, i32 %index
+ ret i16 %t2
+}
+
+define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v32i16:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi30:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi31:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi32:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $31, %edi
+; KNL-NEXT: movzwl (%rsp,%rdi,2), %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi27:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi28:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi29:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovdqu16 %zmm0, (%rsp)
+; SKX-NEXT: andl $31, %edi
+; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t2 = extractelement <32 x i16> %t1, i32 %index
+ ret i16 %t2
+}
+
+define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v16i8:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
+; KNL-NEXT: movb (%rdi,%rax), %al
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v16i8:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovdqu %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
+; SKX-NEXT: movb (%rdi,%rax), %al
+; SKX-NEXT: retq
+ %t2 = extractelement <16 x i8> %t1, i32 %index
+ ret i8 %t2
+}
+
+define i8 @test_extractelement_variable_v32i8(<32 x i8> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v32i8:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi33:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi34:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi35:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $31, %edi
+; KNL-NEXT: movq %rsp, %rax
+; KNL-NEXT: movb (%rdi,%rax), %al
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v32i8:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi30:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi31:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi32:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-32, %rsp
+; SKX-NEXT: subq $64, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovdqu %ymm0, (%rsp)
+; SKX-NEXT: andl $31, %edi
+; SKX-NEXT: movq %rsp, %rax
+; SKX-NEXT: movb (%rdi,%rax), %al
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+
+ %t2 = extractelement <32 x i8> %t1, i32 %index
+ ret i8 %t2
+}
+
+define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
+; KNL-LABEL: test_extractelement_variable_v64i8:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi36:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi37:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi38:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: andl $63, %edi
+; KNL-NEXT: movq %rsp, %rax
+; KNL-NEXT: movb (%rdi,%rax), %al
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v64i8:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi33:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi34:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi35:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vmovdqu8 %zmm0, (%rsp)
+; SKX-NEXT: andl $63, %edi
+; SKX-NEXT: movq %rsp, %rax
+; SKX-NEXT: movb (%rdi,%rax), %al
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+
+ %t2 = extractelement <64 x i8> %t1, i32 %index
+ ret i8 %t2
+}
+
+define i8 @test_extractelement_variable_v64i8_indexi8(<64 x i8> %t1, i8 %index) {
+; KNL-LABEL: test_extractelement_variable_v64i8_indexi8:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi39:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi40:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi41:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: addb %dil, %dil
+; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; KNL-NEXT: vmovaps %ymm0, (%rsp)
+; KNL-NEXT: movzbl %dil, %eax
+; KNL-NEXT: andl $63, %eax
+; KNL-NEXT: movq %rsp, %rcx
+; KNL-NEXT: movb (%rax,%rcx), %al
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_variable_v64i8_indexi8:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi36:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi37:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi38:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: addb %dil, %dil
+; SKX-NEXT: vmovdqu8 %zmm0, (%rsp)
+; SKX-NEXT: movzbl %dil, %eax
+; SKX-NEXT: andl $63, %eax
+; SKX-NEXT: movq %rsp, %rcx
+; SKX-NEXT: movb (%rax,%rcx), %al
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+
+ %i = add i8 %index, %index
+ %t2 = extractelement <64 x i8> %t1, i8 %i
+ ret i8 %t2
+}
+
+define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
+; KNL-LABEL: test_extractelement_varible_v2i1:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: movl -24(%rsp,%rdi,8), %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_varible_v2i1:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
+; SKX-NEXT: vpmovm2q %k0, %xmm0
+; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: movl -24(%rsp,%rdi,8), %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <2 x i64> %a, %b
+ %t2 = extractelement <2 x i1> %t1, i32 %index
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
+; KNL-LABEL: test_extractelement_varible_v4i1:
+; KNL: ## BB#0:
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; KNL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; KNL-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
+; KNL-NEXT: andl $3, %edi
+; KNL-NEXT: movl -24(%rsp,%rdi,4), %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_varible_v4i1:
+; SKX: ## BB#0:
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
+; SKX-NEXT: vpmovm2d %k0, %xmm0
+; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SKX-NEXT: andl $3, %edi
+; SKX-NEXT: movl -24(%rsp,%rdi,4), %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: retq
+ %t1 = icmp ugt <4 x i32> %a, %b
+ %t2 = extractelement <4 x i1> %t1, i32 %index
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, i32 %index) {
+; KNL-LABEL: test_extractelement_varible_v8i1:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi42:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi43:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi44:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa64 %zmm0, (%rsp)
+; KNL-NEXT: andl $7, %edi
+; KNL-NEXT: movl (%rsp,%rdi,8), %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_varible_v8i1:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi39:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi40:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi41:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
+; SKX-NEXT: vpmovm2q %k0, %zmm0
+; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
+; SKX-NEXT: andl $7, %edi
+; SKX-NEXT: movl (%rsp,%rdi,8), %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t1 = icmp ugt <8 x i32> %a, %b
+ %t2 = extractelement <8 x i1> %t1, i32 %index
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %b, i32 %index) {
+; KNL-LABEL: test_extractelement_varible_v16i1:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi45:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi46:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi47:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-64, %rsp
+; KNL-NEXT: subq $128, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa32 %zmm0, (%rsp)
+; KNL-NEXT: andl $15, %edi
+; KNL-NEXT: movl (%rsp,%rdi,4), %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_varible_v16i1:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi42:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi43:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi44:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; SKX-NEXT: vpmovm2d %k0, %zmm0
+; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
+; SKX-NEXT: andl $15, %edi
+; SKX-NEXT: movl (%rsp,%rdi,4), %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t1 = icmp ugt <16 x i32> %a, %b
+ %t2 = extractelement <16 x i1> %t1, i32 %index
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
+define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b, i32 %index) {
+; KNL-LABEL: test_extractelement_varible_v32i1:
+; KNL: ## BB#0:
+; KNL-NEXT: pushq %rbp
+; KNL-NEXT: Lcfi48:
+; KNL-NEXT: .cfi_def_cfa_offset 16
+; KNL-NEXT: Lcfi49:
+; KNL-NEXT: .cfi_offset %rbp, -16
+; KNL-NEXT: movq %rsp, %rbp
+; KNL-NEXT: Lcfi50:
+; KNL-NEXT: .cfi_def_cfa_register %rbp
+; KNL-NEXT: andq $-32, %rsp
+; KNL-NEXT: subq $64, %rsp
+; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; KNL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; KNL-NEXT: vmovdqa %ymm0, (%rsp)
+; KNL-NEXT: andl $31, %edi
+; KNL-NEXT: movq %rsp, %rax
+; KNL-NEXT: movb (%rdi,%rax), %al
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: movq %rbp, %rsp
+; KNL-NEXT: popq %rbp
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_extractelement_varible_v32i1:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi45:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi46:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi47:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
+; SKX-NEXT: vpmovm2w %k0, %zmm0
+; SKX-NEXT: vmovdqu16 %zmm0, (%rsp)
+; SKX-NEXT: andl $31, %edi
+; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t1 = icmp ugt <32 x i8> %a, %b
+ %t2 = extractelement <32 x i1> %t1, i32 %index
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
diff --git a/test/CodeGen/X86/avx512-insert-extract_i1.ll b/test/CodeGen/X86/avx512-insert-extract_i1.ll
new file mode 100644
index 000000000000..a1d1a7dae190
--- /dev/null
+++ b/test/CodeGen/X86/avx512-insert-extract_i1.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX --check-prefix=SKX_ONLY %s
+
+; TODO - fix fail on KNL and move this test to avx512-insert-extract.ll
+
+define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b, i32 %index) {
+; SKX-LABEL: test_extractelement_varible_v64i1:
+; SKX: ## BB#0:
+; SKX-NEXT: pushq %rbp
+; SKX-NEXT: Lcfi0:
+; SKX-NEXT: .cfi_def_cfa_offset 16
+; SKX-NEXT: Lcfi1:
+; SKX-NEXT: .cfi_offset %rbp, -16
+; SKX-NEXT: movq %rsp, %rbp
+; SKX-NEXT: Lcfi2:
+; SKX-NEXT: .cfi_def_cfa_register %rbp
+; SKX-NEXT: andq $-64, %rsp
+; SKX-NEXT: subq $128, %rsp
+; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
+; SKX-NEXT: vpmovm2b %k0, %zmm0
+; SKX-NEXT: vmovdqu8 %zmm0, (%rsp)
+; SKX-NEXT: andl $63, %edi
+; SKX-NEXT: movq %rsp, %rax
+; SKX-NEXT: movb (%rdi,%rax), %al
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: movq %rbp, %rsp
+; SKX-NEXT: popq %rbp
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %t1 = icmp ugt <64 x i8> %a, %b
+ %t2 = extractelement <64 x i1> %t1, i32 %index
+ %res = zext i1 %t2 to i8
+ ret i8 %res
+}
+
diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 8590d641a4c5..0e7a8d25c56f 100644
--- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -504,6 +504,7 @@ define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -515,6 +516,7 @@ define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -527,6 +529,7 @@ define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -538,6 +541,7 @@ define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -550,6 +554,7 @@ define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -561,6 +566,7 @@ define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -573,6 +579,7 @@ define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -584,6 +591,7 @@ define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -3053,3 +3061,14 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6
%res4 = add <8 x i64> %res2, %res3
ret <8 x i64> %res4
}
+
+define <8 x i64> @test_x86_avx512_movntdqa(i8* %a0) {
+; CHECK-LABEL: test_x86_avx512_movntdqa:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovntdqa (%rdi), %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %a0)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*) nounwind readonly
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index 3015a2b499ff..cc5e9e038e0b 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -21,9 +21,9 @@ define i32 @test_kortestc(i16 %a0, i16 %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: kortestw %k0, %k1
-; CHECK-NEXT: sbbl %eax, %eax
-; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: setb %al
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.kortestc.w(i16 %a0, i16 %a1)
ret i32 %res
@@ -33,19 +33,38 @@ declare i16 @llvm.x86.avx512.kand.w(i16, i16) nounwind readnone
define i16 @test_kand(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kand:
; CHECK: ## BB#0:
-; CHECK-NEXT: movw $8, %ax
-; CHECK-NEXT: kmovw %eax, %k0
+; CHECK-NEXT: kmovw %esi, %k0
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: movw $8, %ax
+; CHECK-NEXT: kmovw %eax, %k2
; CHECK-NEXT: kandw %k0, %k1, %k0
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: kandw %k1, %k0, %k0
+; CHECK-NEXT: kandw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
ret i16 %t2
}
+declare i16 @llvm.x86.avx512.kandn.w(i16, i16) nounwind readnone
+define i16 @test_kandn(i16 %a0, i16 %a1) {
+; CHECK-LABEL: test_kandn:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: movw $8, %ax
+; CHECK-NEXT: kmovw %eax, %k2
+; CHECK-NEXT: kandnw %k2, %k1, %k1
+; CHECK-NEXT: kandnw %k0, %k1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: retq
+ %t1 = call i16 @llvm.x86.avx512.kandn.w(i16 %a0, i16 8)
+ %t2 = call i16 @llvm.x86.avx512.kandn.w(i16 %t1, i16 %a1)
+ ret i16 %t2
+}
+
declare i16 @llvm.x86.avx512.knot.w(i16) nounwind readnone
define i16 @test_knot(i16 %a0) {
; CHECK-LABEL: test_knot:
@@ -53,11 +72,30 @@ define i16 @test_knot(i16 %a0) {
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
ret i16 %res
}
+declare i16 @llvm.x86.avx512.kor.w(i16, i16) nounwind readnone
+define i16 @test_kor(i16 %a0, i16 %a1) {
+; CHECK-LABEL: test_kor:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: movw $8, %ax
+; CHECK-NEXT: kmovw %eax, %k2
+; CHECK-NEXT: korw %k0, %k1, %k0
+; CHECK-NEXT: korw %k0, %k2, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: retq
+ %t1 = call i16 @llvm.x86.avx512.kor.w(i16 %a0, i16 8)
+ %t2 = call i16 @llvm.x86.avx512.kor.w(i16 %t1, i16 %a1)
+ ret i16 %t2
+}
+
declare i16 @llvm.x86.avx512.kunpck.bw(i16, i16) nounwind readnone
define i16 @unpckbw_test(i16 %a0, i16 %a1) {
@@ -67,11 +105,48 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) {
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: kunpckbw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
ret i16 %res
}
+declare i16 @llvm.x86.avx512.kxnor.w(i16, i16) nounwind readnone
+define i16 @test_kxnor(i16 %a0, i16 %a1) {
+; CHECK-LABEL: test_kxnor:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: movw $8, %ax
+; CHECK-NEXT: kmovw %eax, %k2
+; CHECK-NEXT: kxorw %k0, %k1, %k0
+; CHECK-NEXT: kxorw %k0, %k2, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: retq
+ %t1 = call i16 @llvm.x86.avx512.kxnor.w(i16 %a0, i16 8)
+ %t2 = call i16 @llvm.x86.avx512.kxnor.w(i16 %t1, i16 %a1)
+ ret i16 %t2
+}
+
+declare i16 @llvm.x86.avx512.kxor.w(i16, i16) nounwind readnone
+define i16 @test_kxor(i16 %a0, i16 %a1) {
+; CHECK-LABEL: test_kxor:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: movw $8, %ax
+; CHECK-NEXT: kmovw %eax, %k2
+; CHECK-NEXT: kxorw %k0, %k1, %k0
+; CHECK-NEXT: kxorw %k0, %k2, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: retq
+ %t1 = call i16 @llvm.x86.avx512.kxor.w(i16 %a0, i16 8)
+ %t2 = call i16 @llvm.x86.avx512.kxor.w(i16 %t1, i16 %a1)
+ ret i16 %t2
+}
+
define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
; CHECK-LABEL: test_rcp_ps_512:
; CHECK: ## BB#0:
@@ -223,7 +298,7 @@ define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
@@ -641,99 +716,12 @@ define <8 x double> @test_x86_vbroadcast_sd_512(i8* %a0) {
}
declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
-define <16 x i32> @test_conflict_d(<16 x i32> %a) {
-; CHECK-LABEL: test_conflict_d:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpconflictd %zmm0, %zmm0
-; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
- ret <16 x i32> %res
-}
-
-declare <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
-
-define <8 x i64> @test_conflict_q(<8 x i64> %a) {
-; CHECK-LABEL: test_conflict_q:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpconflictq %zmm0, %zmm0
-; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
- ret <8 x i64> %res
-}
-
-declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
-
-define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
-; CHECK-LABEL: test_maskz_conflict_d:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpconflictd %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
- ret <16 x i32> %res
-}
-
-define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
-; CHECK-LABEL: test_mask_conflict_q:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpconflictq %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
- ret <8 x i64> %res
-}
-
-define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
-; CHECK-LABEL: test_lzcnt_d:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vplzcntd %zmm0, %zmm0
-; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
- ret <16 x i32> %res
-}
-
-declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
-
-define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
-; CHECK-LABEL: test_lzcnt_q:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vplzcntq %zmm0, %zmm0
-; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
- ret <8 x i64> %res
-}
-
-declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
-
-
-define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
-; CHECK-LABEL: test_mask_lzcnt_d:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vplzcntd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
- ret <16 x i32> %res
-}
-
-define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
-; CHECK-LABEL: test_mask_lzcnt_q:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vplzcntq %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
- ret <8 x i64> %res
-}
-
define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: test_cmpps:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
ret i16 %res
@@ -745,6 +733,7 @@ define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
ret i8 %res
@@ -812,11 +801,12 @@ define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) {
; CHECK-LABEL: test_vptestmq:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m)
@@ -829,9 +819,9 @@ define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) {
; CHECK-LABEL: test_vptestmd:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
@@ -858,29 +848,29 @@ declare void @llvm.x86.avx512.mask.store.ss(i8*, <4 x float>, i8 )
define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k3
-; CHECK-NEXT: vpcmpltd %zmm1, %zmm0, %k4
-; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k5
-; CHECK-NEXT: vpcmpunordd %zmm1, %zmm0, %k6
-; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k7
-; CHECK-NEXT: vpcmpnltd %zmm1, %zmm0, %k2
-; CHECK-NEXT: vpcmpnled %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpcmpordd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k4, %eax
-; CHECK-NEXT: kmovw %k3, %ecx
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; CHECK-NEXT: vpcmpltd %zmm1, %zmm0, %k1
+; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k2
+; CHECK-NEXT: vpcmpunordd %zmm1, %zmm0, %k3
+; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k4
+; CHECK-NEXT: vpcmpnltd %zmm1, %zmm0, %k5
+; CHECK-NEXT: vpcmpnled %zmm1, %zmm0, %k6
+; CHECK-NEXT: vpcmpordd %zmm1, %zmm0, %k7
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k5, %eax
+; CHECK-NEXT: kmovw %k2, %eax
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
@@ -905,30 +895,30 @@ define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_cmp_d_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k4 {%k3}
-; CHECK-NEXT: vpcmpltd %zmm1, %zmm0, %k5 {%k3}
-; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k6 {%k3}
-; CHECK-NEXT: vpcmpunordd %zmm1, %zmm0, %k7 {%k3}
-; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 {%k3}
-; CHECK-NEXT: vpcmpnltd %zmm1, %zmm0, %k2 {%k3}
-; CHECK-NEXT: vpcmpnled %zmm1, %zmm0, %k1 {%k3}
-; CHECK-NEXT: vpcmpordd %zmm1, %zmm0, %k3 {%k3}
-; CHECK-NEXT: kmovw %k5, %eax
-; CHECK-NEXT: kmovw %k4, %ecx
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: vpcmpltd %zmm1, %zmm0, %k2 {%k1}
+; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k3 {%k1}
+; CHECK-NEXT: vpcmpunordd %zmm1, %zmm0, %k4 {%k1}
+; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k5 {%k1}
+; CHECK-NEXT: vpcmpnltd %zmm1, %zmm0, %k6 {%k1}
+; CHECK-NEXT: vpcmpnled %zmm1, %zmm0, %k7 {%k1}
+; CHECK-NEXT: vpcmpordd %zmm1, %zmm0, %k1 {%k1}
+; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k3, %eax
+; CHECK-NEXT: kmovw %k1, %eax
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
@@ -955,29 +945,29 @@ declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) no
define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequd %zmm1, %zmm0, %k3
-; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k4
-; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k5
-; CHECK-NEXT: vpcmpunordud %zmm1, %zmm0, %k6
-; CHECK-NEXT: vpcmpnequd %zmm1, %zmm0, %k7
-; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k2
-; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpcmpordud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k4, %eax
-; CHECK-NEXT: kmovw %k3, %ecx
+; CHECK-NEXT: vpcmpequd %zmm1, %zmm0, %k0
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k1
+; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k2
+; CHECK-NEXT: vpcmpunordud %zmm1, %zmm0, %k3
+; CHECK-NEXT: vpcmpnequd %zmm1, %zmm0, %k4
+; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k5
+; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k6
+; CHECK-NEXT: vpcmpordud %zmm1, %zmm0, %k7
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k5, %eax
+; CHECK-NEXT: kmovw %k2, %eax
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
@@ -1002,30 +992,30 @@ define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3
-; CHECK-NEXT: vpcmpequd %zmm1, %zmm0, %k4 {%k3}
-; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k5 {%k3}
-; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k6 {%k3}
-; CHECK-NEXT: vpcmpunordud %zmm1, %zmm0, %k7 {%k3}
-; CHECK-NEXT: vpcmpnequd %zmm1, %zmm0, %k0 {%k3}
-; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k2 {%k3}
-; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k1 {%k3}
-; CHECK-NEXT: vpcmpordud %zmm1, %zmm0, %k3 {%k3}
-; CHECK-NEXT: kmovw %k5, %eax
-; CHECK-NEXT: kmovw %k4, %ecx
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpcmpequd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k2 {%k1}
+; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k3 {%k1}
+; CHECK-NEXT: vpcmpunordud %zmm1, %zmm0, %k4 {%k1}
+; CHECK-NEXT: vpcmpnequd %zmm1, %zmm0, %k5 {%k1}
+; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k6 {%k1}
+; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k7 {%k1}
+; CHECK-NEXT: vpcmpordud %zmm1, %zmm0, %k1 {%k1}
+; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k3, %eax
+; CHECK-NEXT: kmovw %k1, %eax
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
@@ -1052,29 +1042,29 @@ declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) n
define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k3
-; CHECK-NEXT: vpcmpltq %zmm1, %zmm0, %k4
-; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k5
-; CHECK-NEXT: vpcmpunordq %zmm1, %zmm0, %k6
-; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k7
-; CHECK-NEXT: vpcmpnltq %zmm1, %zmm0, %k2
-; CHECK-NEXT: vpcmpnleq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpcmpordq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k4, %eax
-; CHECK-NEXT: kmovw %k3, %ecx
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: vpcmpltq %zmm1, %zmm0, %k1
+; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k2
+; CHECK-NEXT: vpcmpunordq %zmm1, %zmm0, %k3
+; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k4
+; CHECK-NEXT: vpcmpnltq %zmm1, %zmm0, %k5
+; CHECK-NEXT: vpcmpnleq %zmm1, %zmm0, %k6
+; CHECK-NEXT: vpcmpordq %zmm1, %zmm0, %k7
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k5, %eax
+; CHECK-NEXT: kmovw %k2, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
@@ -1099,30 +1089,30 @@ define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3
-; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k4 {%k3}
-; CHECK-NEXT: vpcmpltq %zmm1, %zmm0, %k5 {%k3}
-; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k6 {%k3}
-; CHECK-NEXT: vpcmpunordq %zmm1, %zmm0, %k7 {%k3}
-; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k0 {%k3}
-; CHECK-NEXT: vpcmpnltq %zmm1, %zmm0, %k2 {%k3}
-; CHECK-NEXT: vpcmpnleq %zmm1, %zmm0, %k1 {%k3}
-; CHECK-NEXT: vpcmpordq %zmm1, %zmm0, %k3 {%k3}
-; CHECK-NEXT: kmovw %k5, %eax
-; CHECK-NEXT: kmovw %k4, %ecx
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: vpcmpltq %zmm1, %zmm0, %k2 {%k1}
+; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k3 {%k1}
+; CHECK-NEXT: vpcmpunordq %zmm1, %zmm0, %k4 {%k1}
+; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k5 {%k1}
+; CHECK-NEXT: vpcmpnltq %zmm1, %zmm0, %k6 {%k1}
+; CHECK-NEXT: vpcmpnleq %zmm1, %zmm0, %k7 {%k1}
+; CHECK-NEXT: vpcmpordq %zmm1, %zmm0, %k1 {%k1}
+; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k3, %eax
+; CHECK-NEXT: kmovw %k1, %eax
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
@@ -1149,29 +1139,29 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequq %zmm1, %zmm0, %k3
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k4
-; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k5
-; CHECK-NEXT: vpcmpunorduq %zmm1, %zmm0, %k6
-; CHECK-NEXT: vpcmpnequq %zmm1, %zmm0, %k7
-; CHECK-NEXT: vpcmpnltuq %zmm1, %zmm0, %k2
-; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpcmporduq %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k4, %eax
-; CHECK-NEXT: kmovw %k3, %ecx
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0
+; CHECK-NEXT: vpcmpequq %zmm1, %zmm0, %k0
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
+; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k2
+; CHECK-NEXT: vpcmpunorduq %zmm1, %zmm0, %k3
+; CHECK-NEXT: vpcmpnequq %zmm1, %zmm0, %k4
+; CHECK-NEXT: vpcmpnltuq %zmm1, %zmm0, %k5
+; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k6
+; CHECK-NEXT: vpcmporduq %zmm1, %zmm0, %k7
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k5, %eax
+; CHECK-NEXT: kmovw %k2, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
@@ -1196,30 +1186,30 @@ define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3
-; CHECK-NEXT: vpcmpequq %zmm1, %zmm0, %k4 {%k3}
-; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k5 {%k3}
-; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k6 {%k3}
-; CHECK-NEXT: vpcmpunorduq %zmm1, %zmm0, %k7 {%k3}
-; CHECK-NEXT: vpcmpnequq %zmm1, %zmm0, %k0 {%k3}
-; CHECK-NEXT: vpcmpnltuq %zmm1, %zmm0, %k2 {%k3}
-; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1 {%k3}
-; CHECK-NEXT: vpcmporduq %zmm1, %zmm0, %k3 {%k3}
-; CHECK-NEXT: kmovw %k5, %eax
-; CHECK-NEXT: kmovw %k4, %ecx
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpcmpequq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k2 {%k1}
+; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k3 {%k1}
+; CHECK-NEXT: vpcmpunorduq %zmm1, %zmm0, %k4 {%k1}
+; CHECK-NEXT: vpcmpnequq %zmm1, %zmm0, %k5 {%k1}
+; CHECK-NEXT: vpcmpnltuq %zmm1, %zmm0, %k6 {%k1}
+; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k7 {%k1}
+; CHECK-NEXT: vpcmporduq %zmm1, %zmm0, %k1 {%k1}
+; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm0
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k6, %eax
+; CHECK-NEXT: kmovw %k3, %eax
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k7, %eax
+; CHECK-NEXT: kmovw %k4, %eax
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: kmovw %k5, %eax
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k2, %eax
+; CHECK-NEXT: kmovw %k6, %eax
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kmovw %k7, %eax
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; CHECK-NEXT: kmovw %k3, %eax
+; CHECK-NEXT: kmovw %k1, %eax
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
@@ -2301,6 +2291,39 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
ret <4 x float> %res
}
+define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_add_ss_current_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a1.val = load float, float* %a1
+ %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
+ %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
+ %a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
+ %a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> %a2, i8 %mask, i32 4)
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
+; CHECK-LABEL: test_maskz_add_ss_current_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %a1.val = load float, float* %a1
+ %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
+ %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
+ %a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
+ %a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> zeroinitializer, i8 %mask, i32 4)
+ ret <4 x float> %res
+}
+
declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
@@ -2383,6 +2406,35 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
ret <2 x double> %res
}
+define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_add_sd_current_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a1.val = load double, double* %a1
+ %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
+ %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
+ %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> %a2, i8 %mask, i32 4)
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
+; CHECK-LABEL: test_maskz_add_sd_current_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %a1.val = load double, double* %a1
+ %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
+ %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
+ %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> zeroinitializer, i8 %mask, i32 4)
+ ret <2 x double> %res
+}
+
declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
@@ -2448,6 +2500,39 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 4)
ret <4 x float> %res
}
+
+define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_max_ss_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a1.val = load float, float* %a1
+ %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
+ %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
+ %a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
+ %a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> %a2, i8 %mask, i32 4)
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
+; CHECK-LABEL: test_maskz_max_ss_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %a1.val = load float, float* %a1
+ %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
+ %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
+ %a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
+ %a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> zeroinitializer, i8 %mask, i32 4)
+ ret <4 x float> %res
+}
declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
@@ -2514,6 +2599,35 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
ret <2 x double> %res
}
+define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_max_sd_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a1.val = load double, double* %a1
+ %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
+ %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
+ %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> %a2, i8 %mask, i32 4)
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
+; CHECK-LABEL: test_maskz_max_sd_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %a1.val = load double, double* %a1
+ %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
+ %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
+ %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> zeroinitializer, i8 %mask, i32 4)
+ ret <2 x double> %res
+}
+
define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
; CHECK: ## BB#0:
@@ -2666,9 +2780,9 @@ define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0,
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm3
-; CHECK-NEXT: vpermi2pd %zmm2, %zmm0, %zmm3 {%k1}
-; CHECK-NEXT: vpermi2pd %zmm2, %zmm0, %zmm1
-; CHECK-NEXT: vaddpd %zmm1, %zmm3, %zmm0
+; CHECK-NEXT: vpermi2pd %zmm2, %zmm0, %zmm3
+; CHECK-NEXT: vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vaddpd %zmm3, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3)
%res1 = call <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 -1)
@@ -2683,9 +2797,9 @@ define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0,
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
-; CHECK-NEXT: vpermi2ps %zmm2, %zmm0, %zmm3 {%k1}
-; CHECK-NEXT: vpermi2ps %zmm2, %zmm0, %zmm1
-; CHECK-NEXT: vaddps %zmm1, %zmm3, %zmm0
+; CHECK-NEXT: vpermi2ps %zmm2, %zmm0, %zmm3
+; CHECK-NEXT: vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vaddps %zmm3, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3)
%res1 = call <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 -1)
@@ -2700,9 +2814,9 @@ define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
-; CHECK-NEXT: vpermi2q %zmm2, %zmm0, %zmm3 {%k1}
-; CHECK-NEXT: vpermi2q %zmm2, %zmm0, %zmm1
-; CHECK-NEXT: vpaddq %zmm1, %zmm3, %zmm0
+; CHECK-NEXT: vpermi2q %zmm2, %zmm0, %zmm3
+; CHECK-NEXT: vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vpaddq %zmm3, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
@@ -2755,9 +2869,9 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0,
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
-; CHECK-NEXT: vpermt2ps %zmm2, %zmm0, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpermt2ps %zmm2, %zmm0, %zmm1
-; CHECK-NEXT: vaddps %zmm1, %zmm3, %zmm0
+; CHECK-NEXT: vpermt2ps %zmm2, %zmm0, %zmm3
+; CHECK-NEXT: vpermt2ps %zmm2, %zmm0, %zmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %zmm3, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3)
%res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1)
@@ -2773,9 +2887,9 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
-; CHECK-NEXT: vpermt2q %zmm2, %zmm0, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpermt2q %zmm2, %zmm0, %zmm1
-; CHECK-NEXT: vpaddq %zmm1, %zmm3, %zmm0
+; CHECK-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
+; CHECK-NEXT: vpermt2q %zmm2, %zmm0, %zmm1 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm3, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
@@ -2790,9 +2904,9 @@ define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm3
-; CHECK-NEXT: vpermt2d %zmm2, %zmm0, %zmm3 {%k1}
-; CHECK-NEXT: vpermt2d %zmm2, %zmm0, %zmm1
-; CHECK-NEXT: vpaddd %zmm1, %zmm3, %zmm0
+; CHECK-NEXT: vpermt2d %zmm2, %zmm0, %zmm3
+; CHECK-NEXT: vpermt2d %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vpaddd %zmm3, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
@@ -2836,8 +2950,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovqb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovqb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovqb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovqb %zmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -2870,8 +2984,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovsqb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovsqb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovsqb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovsqb %zmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -2904,8 +3018,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovusqb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovusqb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovusqb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovusqb %zmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -2938,8 +3052,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovqw %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovqw %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovqw %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovqw %zmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0
@@ -2972,8 +3086,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovsqw %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovsqw %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovsqw %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovsqw %zmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0
@@ -3006,8 +3120,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovusqw %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovusqw %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovusqw %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovusqw %zmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0
@@ -3040,8 +3154,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovqd %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovqd %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpmovqd %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovqd %zmm0, %ymm0
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0
@@ -3074,8 +3188,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovsqd %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovsqd %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpmovsqd %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovsqd %zmm0, %ymm0
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0
@@ -3108,8 +3222,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovusqd %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovusqd %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpmovusqd %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovusqd %zmm0, %ymm0
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0
@@ -3142,8 +3256,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovdb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovdb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovdb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovdb %zmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -3176,8 +3290,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovsdb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovsdb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovsdb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovsdb %zmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -3210,8 +3324,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovusdb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovusdb %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpmovusdb %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vpmovusdb %zmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
@@ -3244,8 +3358,8 @@ define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovdw %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovdw %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpmovdw %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovdw %zmm0, %ymm0
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -3278,8 +3392,8 @@ define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i1
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovsdw %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovsdw %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpmovsdw %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovsdw %zmm0, %ymm0
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -3312,8 +3426,8 @@ define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpmovusdw %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovusdw %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpmovusdw %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vpmovusdw %zmm0, %ymm0
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
@@ -3566,7 +3680,7 @@ define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x dou
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4
; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -3700,8 +3814,8 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_f64x2(<8 x double> %x0, <8 x d
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f64x2:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm3 {%k1} {z} = zmm0[4,5,2,3],zmm1[2,3,0,1]
+; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,2,3],zmm1[2,3,0,1]
; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: vaddpd %zmm3, %zmm0, %zmm0
@@ -3978,9 +4092,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vpaddd %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vpaddd %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %x4)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
@@ -3995,9 +4109,9 @@ define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vpaddd %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %x4)
%res1 = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
@@ -4012,9 +4126,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vpaddq %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vpaddq %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33, i8 %x4)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33, i8 -1)
@@ -4029,9 +4143,9 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vpaddq %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.maskz.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33, i8 %x4)
%res1 = call <8 x i64> @llvm.x86.avx512.maskz.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33, i8 -1)
@@ -4155,6 +4269,18 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0,
ret <16 x float> %res5
}
+define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512_load(<4 x float>* %x0ptr, <16 x float> %x2, i16 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovaps (%rdi), %xmm1
+; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %x0 = load <4 x float>, <4 x float>* %x0ptr
+ %res = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float> %x0, <16 x float> %x2, i16 %mask)
+ ret <16 x float> %res
+}
+
declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double>, <8 x double>, i8)
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
@@ -4177,6 +4303,19 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0
ret <8 x double> %res5
}
+define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512_load(<4 x double>* %x0ptr, <8 x double> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovapd (%rdi), %ymm1
+; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+
+ %x0 = load <4 x double>, <4 x double>* %x0ptr
+ %res = call <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double> %x0, <8 x double> %x2, i8 %mask)
+ ret <8 x double> %res
+}
+
declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
@@ -4199,6 +4338,19 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16
ret <16 x i32> %res5
}
+define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512_load(<4 x i32>* %x0ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa (%rdi), %xmm1
+; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+
+ %x0 = load <4 x i32>, <4 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask)
+ ret <16 x i32> %res
+}
+
declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
@@ -4221,17 +4373,30 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x
ret <8 x i64> %res5
}
+define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512_load(<4 x i64>* %x0ptr, <8 x i64> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa (%rdi), %ymm1
+; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+
+ %x0 = load <4 x i64>, <4 x i64>* %x0ptr
+ %res = call <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask)
+ ret <8 x i64> %res
+}
+
declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm3
; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm3 {%k1} {z}
-; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpaddd %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> zeroinitializer, i16 %x3)
@@ -4247,11 +4412,11 @@ define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm3
; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm3 {%k1} {z}
-; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm0
-; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpaddq %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
@@ -4307,11 +4472,11 @@ define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_df_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vaddpd %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3)
%res1 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> zeroinitializer, i8 %x3)
@@ -4327,11 +4492,11 @@ define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_di_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0
-; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpaddq %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
@@ -4347,11 +4512,11 @@ define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <1
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0
-; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vaddps %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vaddps %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3)
%res1 = call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> zeroinitializer, i16 %x3)
@@ -4367,11 +4532,11 @@ define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_si_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm3
; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm1
-; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vpaddd %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> zeroinitializer, i16 %x3)
@@ -4504,13 +4669,13 @@ define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0,
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
-; CHECK-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1} {z}
+; CHECK-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3
; CHECK-NEXT: vmovaps %zmm0, %zmm4
-; CHECK-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm4
+; CHECK-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT: vaddps %zmm0, %zmm3, %zmm0
-; CHECK-NEXT: vaddps %zmm4, %zmm0, %zmm0
+; CHECK-NEXT: vaddps %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vaddps %zmm3, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 8)
@@ -4574,9 +4739,9 @@ define i16@test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16
; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
@@ -4593,11 +4758,12 @@ define i8@test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2
; CHECK-LABEL: test_int_x86_avx512_ptestnm_q_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1)
@@ -4609,8 +4775,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pbroadcastd_gpr_512(i32 %x0, <16 x i3
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastd_gpr_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
; CHECK-NEXT: vpbroadcastd %edi, %zmm1 {%k1} {z}
+; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
; CHECK-NEXT: vpbroadcastd %edi, %zmm2
; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
@@ -4629,8 +4795,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pbroadcastq_gpr_512(i64 %x0, <8 x i64>
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastq_gpr_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
; CHECK-NEXT: vpbroadcastq %rdi, %zmm1 {%k1} {z}
+; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
; CHECK-NEXT: vpbroadcastq %rdi, %zmm2
; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -4651,11 +4817,11 @@ define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x do
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm3
+; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm0, %xmm4
+; CHECK-NEXT: vmovapd %xmm0, %xmm4
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4
-; CHECK-NEXT: vmovaps %xmm0, %xmm5
+; CHECK-NEXT: vmovapd %xmm0, %xmm5
; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5 {%k1}
; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0
; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm1
@@ -4707,7 +4873,7 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x d
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm3
+; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z}
; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
@@ -4739,11 +4905,11 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x d
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm2, %xmm4
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovaps %xmm2, %xmm5
+; CHECK-NEXT: vmovapd %xmm2, %xmm5
; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
@@ -4788,6 +4954,110 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x flo
ret <4 x float> %res6
}
+define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
+; CHECK-LABEL: fmadd_ss_mask_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1}
+; CHECK-NEXT: vmovss %xmm0, (%rdi)
+; CHECK-NEXT: retq
+ %a.val = load float, float* %a
+ %av0 = insertelement <4 x float> undef, float %a.val, i32 0
+ %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
+ %av2 = insertelement <4 x float> %av1, float 0.000000e+00, i32 2
+ %av = insertelement <4 x float> %av2, float 0.000000e+00, i32 3
+
+ %b.val = load float, float* %b
+ %bv0 = insertelement <4 x float> undef, float %b.val, i32 0
+ %bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
+ %bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
+ %bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
+
+ %vr = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %av, <4 x float> %bv, <4 x float> %av, i8 %c, i32 4)
+
+ %sr = extractelement <4 x float> %vr, i32 0
+ store float %sr, float* %a
+ ret void
+}
+
+define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
+; CHECK-LABEL: fmadd_ss_maskz_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovss %xmm0, (%rdi)
+; CHECK-NEXT: retq
+ %a.val = load float, float* %a
+ %av0 = insertelement <4 x float> undef, float %a.val, i32 0
+ %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
+ %av2 = insertelement <4 x float> %av1, float 0.000000e+00, i32 2
+ %av = insertelement <4 x float> %av2, float 0.000000e+00, i32 3
+
+ %b.val = load float, float* %b
+ %bv0 = insertelement <4 x float> undef, float %b.val, i32 0
+ %bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
+ %bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
+ %bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
+
+ %vr = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %av, <4 x float> %bv, <4 x float> %av, i8 %c, i32 4)
+
+ %sr = extractelement <4 x float> %vr, i32 0
+ store float %sr, float* %a
+ ret void
+}
+
+define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
+; CHECK-LABEL: fmadd_sd_mask_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1}
+; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
+; CHECK-NEXT: retq
+ %a.val = load double, double* %a
+ %av0 = insertelement <2 x double> undef, double %a.val, i32 0
+ %av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
+
+ %b.val = load double, double* %b
+ %bv0 = insertelement <2 x double> undef, double %b.val, i32 0
+ %bv = insertelement <2 x double> %bv0, double 0.000000e+00, i32 1
+
+ %vr = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %av, <2 x double> %bv, <2 x double> %av, i8 %c, i32 4)
+
+ %sr = extractelement <2 x double> %vr, i32 0
+ store double %sr, double* %a
+ ret void
+}
+
+define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
+; CHECK-LABEL: fmadd_sd_maskz_memfold:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: kmovw %edx, %k1
+; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
+; CHECK-NEXT: retq
+ %a.val = load double, double* %a
+ %av0 = insertelement <2 x double> undef, double %a.val, i32 0
+ %av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
+
+ %b.val = load double, double* %b
+ %bv0 = insertelement <2 x double> undef, double %b.val, i32 0
+ %bv = insertelement <2 x double> %bv0, double 0.000000e+00, i32 1
+
+ %vr = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %av, <2 x double> %bv, <2 x double> %av, i8 %c, i32 4)
+
+ %sr = extractelement <2 x double> %vr, i32 0
+ store double %sr, double* %a
+ ret void
+}
+
declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32)
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
@@ -4795,11 +5065,11 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x d
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm2, %xmm4
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovaps %xmm2, %xmm5
+; CHECK-NEXT: vmovapd %xmm2, %xmm5
; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
@@ -4851,11 +5121,11 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x
; CHECK: ## BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm3
+; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm2, %xmm4
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vmovaps %xmm2, %xmm5
+; CHECK-NEXT: vmovapd %xmm2, %xmm5
; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
diff --git a/test/CodeGen/X86/avx512-load-store.ll b/test/CodeGen/X86/avx512-load-store.ll
index fe1003e8b739..3295c66c6d42 100644
--- a/test/CodeGen/X86/avx512-load-store.ll
+++ b/test/CodeGen/X86/avx512-load-store.ll
@@ -1,12 +1,22 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -O2 -mattr=avx512f -mtriple=x86_64-unknown | FileCheck %s
+; RUN: llc < %s -O2 -mattr=avx512f -mtriple=x86_64-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64
+; RUN: llc < %s -O2 -mattr=avx512f -mtriple=i386-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32
define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
-; CHECK-LABEL: test_mm_mask_move_ss:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_mask_move_ss:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_mask_move_ss:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
+; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: kmovw %eax, %k1
+; CHECK32-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
+; CHECK32-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; CHECK32-NEXT: retl
entry:
%0 = and i8 %__U, 1
%tobool.i = icmp ne i8 %0, 0
@@ -18,11 +28,21 @@ entry:
}
define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
-; CHECK-LABEL: test_mm_maskz_move_ss:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_maskz_move_ss:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_maskz_move_ss:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
+; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: kmovw %eax, %k1
+; CHECK32-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK32-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK32-NEXT: vmovss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
+; CHECK32-NEXT: retl
entry:
%0 = and i8 %__U, 1
%tobool.i = icmp ne i8 %0, 0
@@ -33,11 +53,20 @@ entry:
}
define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 {
-; CHECK-LABEL: test_mm_mask_move_sd:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_mask_move_sd:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_mask_move_sd:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
+; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: kmovw %eax, %k1
+; CHECK32-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
+; CHECK32-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; CHECK32-NEXT: retl
entry:
%0 = and i8 %__U, 1
%tobool.i = icmp ne i8 %0, 0
@@ -49,11 +78,21 @@ entry:
}
define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 {
-; CHECK-LABEL: test_mm_maskz_move_sd:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_maskz_move_sd:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_maskz_move_sd:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
+; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: kmovw %eax, %k1
+; CHECK32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; CHECK32-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK32-NEXT: vmovsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; CHECK32-NEXT: retl
entry:
%0 = and i8 %__U, 1
%tobool.i = icmp ne i8 %0, 0
@@ -64,11 +103,19 @@ entry:
}
define void @test_mm_mask_store_ss(float* %__W, i8 zeroext %__U, <4 x float> %__A) local_unnamed_addr #1 {
-; CHECK-LABEL: test_mm_mask_store_ss:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vmovss %xmm0, (%rdi) {%k1}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_mask_store_ss:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %esi, %k1
+; CHECK64-NEXT: vmovss %xmm0, (%rdi) {%k1}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_mask_store_ss:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; CHECK32-NEXT: kmovw %ecx, %k1
+; CHECK32-NEXT: vmovss %xmm0, (%eax) {%k1}
+; CHECK32-NEXT: retl
entry:
%0 = bitcast float* %__W to <16 x float>*
%shuffle.i.i = shufflevector <4 x float> %__A, <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -80,11 +127,19 @@ entry:
}
define void @test_mm_mask_store_sd(double* %__W, i8 zeroext %__U, <2 x double> %__A) local_unnamed_addr #1 {
-; CHECK-LABEL: test_mm_mask_store_sd:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vmovsd %xmm0, (%rdi) {%k1}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_mask_store_sd:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %esi, %k1
+; CHECK64-NEXT: vmovsd %xmm0, (%rdi) {%k1}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_mask_store_sd:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT: kmovw %ecx, %k1
+; CHECK32-NEXT: vmovsd %xmm0, (%eax) {%k1}
+; CHECK32-NEXT: retl
entry:
%0 = bitcast double* %__W to <8 x double>*
%shuffle.i.i = shufflevector <2 x double> %__A, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -95,11 +150,19 @@ entry:
}
define <4 x float> @test_mm_mask_load_ss(<4 x float> %__A, i8 zeroext %__U, float* %__W) local_unnamed_addr #2 {
-; CHECK-LABEL: test_mm_mask_load_ss:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovss (%rsi), %xmm0 {%k1}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_mask_load_ss:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_mask_load_ss:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; CHECK32-NEXT: kmovw %ecx, %k1
+; CHECK32-NEXT: vmovss (%eax), %xmm0 {%k1}
+; CHECK32-NEXT: retl
entry:
%shuffle.i = shufflevector <4 x float> %__A, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
%0 = bitcast float* %__W to <16 x float>*
@@ -113,11 +176,19 @@ entry:
}
define <2 x double> @test_mm_mask_load_sd(<2 x double> %__A, i8 zeroext %__U, double* %__W) local_unnamed_addr #2 {
-; CHECK-LABEL: test_mm_mask_load_sd:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovsd (%rsi), %xmm0 {%k1}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_mask_load_sd:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovsd (%rsi), %xmm0 {%k1}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_mask_load_sd:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT: kmovw %ecx, %k1
+; CHECK32-NEXT: vmovsd (%eax), %xmm0 {%k1}
+; CHECK32-NEXT: retl
entry:
%shuffle5.i = insertelement <2 x double> %__A, double 0.000000e+00, i32 1
%0 = bitcast double* %__W to <8 x double>*
@@ -130,11 +201,19 @@ entry:
}
define <4 x float> @test_mm_maskz_load_ss(i8 zeroext %__U, float* %__W) local_unnamed_addr #2 {
-; CHECK-LABEL: test_mm_maskz_load_ss:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovss (%rsi), %xmm0 {%k1} {z}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_maskz_load_ss:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1} {z}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_maskz_load_ss:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; CHECK32-NEXT: kmovw %ecx, %k1
+; CHECK32-NEXT: vmovss (%eax), %xmm0 {%k1} {z}
+; CHECK32-NEXT: retl
entry:
%0 = bitcast float* %__W to <16 x float>*
%1 = and i8 %__U, 1
@@ -146,11 +225,19 @@ entry:
}
define <2 x double> @test_mm_maskz_load_sd(i8 zeroext %__U, double* %__W) local_unnamed_addr #2 {
-; CHECK-LABEL: test_mm_maskz_load_sd:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovsd (%rsi), %xmm0 {%k1} {z}
-; CHECK-NEXT: retq
+; CHECK64-LABEL: test_mm_maskz_load_sd:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: kmovw %edi, %k1
+; CHECK64-NEXT: vmovsd (%rsi), %xmm0 {%k1} {z}
+; CHECK64-NEXT: retq
+;
+; CHECK32-LABEL: test_mm_maskz_load_sd:
+; CHECK32: # BB#0: # %entry
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT: kmovw %ecx, %k1
+; CHECK32-NEXT: vmovsd (%eax), %xmm0 {%k1} {z}
+; CHECK32-NEXT: retl
entry:
%0 = bitcast double* %__W to <8 x double>*
%1 = and i8 %__U, 1
diff --git a/test/CodeGen/X86/avx512-logic.ll b/test/CodeGen/X86/avx512-logic.ll
index 119e03dc19da..7153c1ffaaa6 100644
--- a/test/CodeGen/X86/avx512-logic.ll
+++ b/test/CodeGen/X86/avx512-logic.ll
@@ -299,7 +299,7 @@ define <16 x float> @masked_and_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
;
; SKX-LABEL: masked_and_v16f32:
; SKX: ## BB#0:
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0
; SKX-NEXT: retq
@@ -324,7 +324,7 @@ define <16 x float> @masked_or_v16f32(<16 x float> %a, <16 x float> %b, <16 x fl
;
; SKX-LABEL: masked_or_v16f32:
; SKX: ## BB#0:
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0
; SKX-NEXT: retq
@@ -349,7 +349,7 @@ define <16 x float> @masked_xor_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
;
; SKX-LABEL: masked_xor_v16f32:
; SKX: ## BB#0:
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddps %zmm2, %zmm3, %zmm0
; SKX-NEXT: retq
@@ -374,7 +374,7 @@ define <8 x double> @masked_and_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
;
; SKX-LABEL: masked_and_v8f64:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0
; SKX-NEXT: retq
@@ -399,7 +399,7 @@ define <8 x double> @masked_or_v8f64(<8 x double> %a, <8 x double> %b, <8 x doub
;
; SKX-LABEL: masked_or_v8f64:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0
; SKX-NEXT: retq
@@ -424,7 +424,7 @@ define <8 x double> @masked_xor_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
;
; SKX-LABEL: masked_xor_v8f64:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm0, %zmm2 {%k1}
; SKX-NEXT: vaddpd %zmm2, %zmm3, %zmm0
; SKX-NEXT: retq
@@ -448,7 +448,7 @@ define <8 x i64> @test_mm512_mask_and_epi32(<8 x i64> %__src, i16 zeroext %__k,
;
; SKX-LABEL: test_mm512_mask_and_epi32:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -470,7 +470,7 @@ define <8 x i64> @test_mm512_mask_or_epi32(<8 x i64> %__src, i16 zeroext %__k, <
;
; SKX-LABEL: test_mm512_mask_or_epi32:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -492,7 +492,7 @@ define <8 x i64> @test_mm512_mask_xor_epi32(<8 x i64> %__src, i16 zeroext %__k,
;
; SKX-LABEL: test_mm512_mask_xor_epi32:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -514,7 +514,7 @@ define <8 x double> @test_mm512_mask_xor_pd(<8 x double> %__W, i8 zeroext %__U,
;
; SKX-LABEL: test_mm512_mask_xor_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -536,7 +536,7 @@ define <8 x double> @test_mm512_maskz_xor_pd(i8 zeroext %__U, <8 x double> %__A,
;
; SKX-LABEL: test_mm512_maskz_xor_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -558,7 +558,7 @@ define <16 x float> @test_mm512_mask_xor_ps(<16 x float> %__W, i16 zeroext %__U,
;
; SKX-LABEL: test_mm512_mask_xor_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -580,7 +580,7 @@ define <16 x float> @test_mm512_maskz_xor_ps(i16 zeroext %__U, <16 x float> %__A
;
; SKX-LABEL: test_mm512_maskz_xor_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -602,7 +602,7 @@ define <8 x double> @test_mm512_mask_or_pd(<8 x double> %__W, i8 zeroext %__U, <
;
; SKX-LABEL: test_mm512_mask_or_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -624,7 +624,7 @@ define <8 x double> @test_mm512_maskz_or_pd(i8 zeroext %__U, <8 x double> %__A,
;
; SKX-LABEL: test_mm512_maskz_or_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -646,7 +646,7 @@ define <16 x float> @test_mm512_mask_or_ps(<16 x float> %__W, i16 zeroext %__U,
;
; SKX-LABEL: test_mm512_mask_or_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -668,7 +668,7 @@ define <16 x float> @test_mm512_maskz_or_ps(i16 zeroext %__U, <16 x float> %__A,
;
; SKX-LABEL: test_mm512_maskz_or_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -690,7 +690,7 @@ define <8 x double> @test_mm512_mask_and_pd(<8 x double> %__W, i8 zeroext %__U,
;
; SKX-LABEL: test_mm512_mask_and_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -712,7 +712,7 @@ define <8 x double> @test_mm512_maskz_and_pd(i8 zeroext %__U, <8 x double> %__A,
;
; SKX-LABEL: test_mm512_maskz_and_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -734,7 +734,7 @@ define <16 x float> @test_mm512_mask_and_ps(<16 x float> %__W, i16 zeroext %__U,
;
; SKX-LABEL: test_mm512_mask_and_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -756,7 +756,7 @@ define <16 x float> @test_mm512_maskz_and_ps(i16 zeroext %__U, <16 x float> %__A
;
; SKX-LABEL: test_mm512_maskz_and_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %zmm0, %zmm1, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -778,7 +778,7 @@ define <8 x double> @test_mm512_mask_andnot_pd(<8 x double> %__W, i8 zeroext %__
;
; SKX-LABEL: test_mm512_mask_andnot_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -801,7 +801,7 @@ define <8 x double> @test_mm512_maskz_andnot_pd(i8 zeroext %__U, <8 x double> %_
;
; SKX-LABEL: test_mm512_maskz_andnot_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -824,7 +824,7 @@ define <16 x float> @test_mm512_mask_andnot_ps(<16 x float> %__W, i16 zeroext %_
;
; SKX-LABEL: test_mm512_mask_andnot_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %zmm2, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -847,7 +847,7 @@ define <16 x float> @test_mm512_maskz_andnot_ps(i16 zeroext %__U, <16 x float> %
;
; SKX-LABEL: test_mm512_maskz_andnot_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %zmm1, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index b127585dc87b..aec1339d653d 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -1,14 +1,42 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -stack-symbol-ordering=0 -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=KNL
; RUN: llc < %s -stack-symbol-ordering=0 -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
+; RUN: llc < %s -stack-symbol-ordering=0 -march=x86-64 -mtriple=x86_64-apple-darwin -mattr=+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW
+; RUN: llc < %s -stack-symbol-ordering=0 -march=x86-64 -mtriple=x86_64-apple-darwin -mattr=+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512DQ
+
define i16 @mask16(i16 %x) {
-; CHECK-LABEL: mask16:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; KNL-LABEL: mask16:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: knotw %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: mask16:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: knotw %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: mask16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mask16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: knotw %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <16 x i1> %m1 to i16
@@ -16,12 +44,33 @@ define i16 @mask16(i16 %x) {
}
define i32 @mask16_zext(i16 %x) {
-; CHECK-LABEL: mask16_zext:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; KNL-LABEL: mask16_zext:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: knotw %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: mask16_zext:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: knotw %k0, %k0
+; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: mask16_zext:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovw %k0, %eax
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mask16_zext:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: knotw %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%m2 = bitcast <16 x i1> %m1 to i16
@@ -35,14 +84,32 @@ define i8 @mask8(i8 %x) {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: mask8:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k0
+; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
-; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: mask8:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mask8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: knotb %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
@@ -60,10 +127,25 @@ define i32 @mask8_zext(i8 %x) {
;
; SKX-LABEL: mask8_zext:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k0
+; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: mask8_zext:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movzbl %al, %eax
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mask8_zext:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: knotb %k0, %k0
+; AVX512DQ-NEXT: kmovb %k0, %eax
+; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%m2 = bitcast <8 x i1> %m1 to i8
@@ -102,6 +184,22 @@ define void @mask8_mem(i8* %ptr) {
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: mask8_mem:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: movzbl (%rdi), %eax
+; AVX512BW-NEXT: kmovd %eax, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mask8_mem:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovb (%rdi), %k0
+; AVX512DQ-NEXT: knotb %k0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: retq
%x = load i8, i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -129,15 +227,49 @@ define i16 @mand16(i16 %x, i16 %y) {
}
define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
-; CHECK-LABEL: mand16_mem:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw (%rdi), %k0
-; CHECK-NEXT: kmovw (%rsi), %k1
-; CHECK-NEXT: kandw %k1, %k0, %k2
-; CHECK-NEXT: kxorw %k1, %k0, %k0
-; CHECK-NEXT: korw %k0, %k2, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; KNL-LABEL: mand16_mem:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw (%rdi), %k0
+; KNL-NEXT: kmovw (%rsi), %k1
+; KNL-NEXT: kandw %k1, %k0, %k2
+; KNL-NEXT: kxorw %k1, %k0, %k0
+; KNL-NEXT: korw %k0, %k2, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: mand16_mem:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovw (%rdi), %k0
+; SKX-NEXT: kmovw (%rsi), %k1
+; SKX-NEXT: kandw %k1, %k0, %k2
+; SKX-NEXT: kxorw %k1, %k0, %k0
+; SKX-NEXT: korw %k0, %k2, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: mand16_mem:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovw (%rdi), %k0
+; AVX512BW-NEXT: kmovw (%rsi), %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k2
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: korw %k0, %k2, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mand16_mem:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw (%rdi), %k0
+; AVX512DQ-NEXT: kmovw (%rsi), %k1
+; AVX512DQ-NEXT: kandw %k1, %k0, %k2
+; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
+; AVX512DQ-NEXT: korw %k0, %k2, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
%mc = and <16 x i1> %ma, %mb
@@ -153,14 +285,32 @@ define i8 @shuf_test1(i16 %v) nounwind {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kshiftrw $8, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: shuf_test1:
; SKX: ## BB#0:
-; SKX-NEXT: kmovw %edi, %k0
+; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kshiftrw $8, %k0, %k0
-; SKX-NEXT: kmovb %k0, %eax
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: shuf_test1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: shuf_test1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%mask1 = bitcast <8 x i1> %mask to i8
@@ -168,14 +318,44 @@ define i8 @shuf_test1(i16 %v) nounwind {
}
define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
-; CHECK-LABEL: zext_test1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kshiftlw $10, %k0, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: retq
+; KNL-LABEL: zext_test1:
+; KNL: ## BB#0:
+; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; KNL-NEXT: kshiftlw $10, %k0, %k0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: zext_test1:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; SKX-NEXT: kshiftlw $10, %k0, %k0
+; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: zext_test1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: zext_test1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: andl $1, %eax
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
%cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
%res = zext i1 %cmp_res.i1 to i32
@@ -183,15 +363,48 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
}
define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
-; CHECK-LABEL: zext_test2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kshiftlw $10, %k0, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
-; CHECK-NEXT: retq
+; KNL-LABEL: zext_test2:
+; KNL: ## BB#0:
+; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; KNL-NEXT: kshiftlw $10, %k0, %k0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: zext_test2:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; SKX-NEXT: kshiftlw $10, %k0, %k0
+; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: zext_test2:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: zext_test2:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: andl $1, %eax
+; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
%cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
%res = zext i1 %cmp_res.i1 to i16
@@ -199,15 +412,48 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
}
define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
-; CHECK-LABEL: zext_test3:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
-; CHECK-NEXT: kshiftlw $10, %k0, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
-; CHECK-NEXT: retq
+; KNL-LABEL: zext_test3:
+; KNL: ## BB#0:
+; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; KNL-NEXT: kshiftlw $10, %k0, %k0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: zext_test3:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; SKX-NEXT: kshiftlw $10, %k0, %k0
+; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: zext_test3:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: zext_test3:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: andl $1, %eax
+; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
%cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
%res = zext i1 %cmp_res.i1 to i8
@@ -231,6 +477,23 @@ define i8 @conv1(<8 x i1>* %R) {
; SKX-NEXT: movb $-2, -{{[0-9]+}}(%rsp)
; SKX-NEXT: movb $-2, %al
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: conv1:
+; AVX512BW: ## BB#0: ## %entry
+; AVX512BW-NEXT: kxnorw %k0, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: movb $-2, -{{[0-9]+}}(%rsp)
+; AVX512BW-NEXT: movb $-2, %al
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: conv1:
+; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ-NEXT: kxnorw %k0, %k0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: movb $-2, -{{[0-9]+}}(%rsp)
+; AVX512DQ-NEXT: movb $-2, %al
+; AVX512DQ-NEXT: retq
entry:
store <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1>* %R
@@ -257,7 +520,28 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; SKX-NEXT: vpcmpgtq %ymm3, %ymm2, %k1
; SKX-NEXT: kandnw %k0, %k1, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test4:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test4:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
+; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%x_gt_y = icmp sgt <4 x i64> %x, %y
%x1_gt_y1 = icmp sgt <4 x i64> %x1, %y1
%res = icmp sgt <4 x i1>%x_gt_y, %x1_gt_y1
@@ -280,6 +564,20 @@ define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1
; SKX-NEXT: kandnw %k1, %k0, %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test5:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX512BW-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test5:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX512DQ-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX512DQ-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX512DQ-NEXT: retq
%x_gt_y = icmp slt <2 x i64> %x, %y
%x1_gt_y1 = icmp sgt <2 x i64> %x1, %y1
%res = icmp slt <2 x i1>%x_gt_y, %x1_gt_y1
@@ -316,10 +614,34 @@ define void @test7(<8 x i1> %mask) {
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: movb $85, %al
-; SKX-NEXT: kmovb %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: ktestb %k0, %k0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test7:
+; AVX512BW: ## BB#0: ## %allocas
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: movb $85, %al
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: testb %al, %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test7:
+; AVX512DQ: ## BB#0: ## %allocas
+; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
+; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: movb $85, %al
+; AVX512DQ-NEXT: kmovw %eax, %k1
+; AVX512DQ-NEXT: korb %k1, %k0, %k0
+; AVX512DQ-NEXT: ktestb %k0, %k0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
allocas:
%a= or <8 x i1> %mask, <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>
%b = bitcast <8 x i1> %a to i8
@@ -356,11 +678,45 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; SKX-NEXT: ## BB#2:
; SKX-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
; SKX-NEXT: LBB17_1:
; SKX-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test8:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: jg LBB17_1
+; AVX512BW-NEXT: ## BB#2:
+; AVX512BW-NEXT: vpcmpltud %zmm2, %zmm1, %k0
+; AVX512BW-NEXT: jmp LBB17_3
+; AVX512BW-NEXT: LBB17_1:
+; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
+; AVX512BW-NEXT: LBB17_3:
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: jg LBB17_1
+; AVX512DQ-NEXT: ## BB#2:
+; AVX512DQ-NEXT: vpcmpltud %zmm2, %zmm1, %k0
+; AVX512DQ-NEXT: jmp LBB17_3
+; AVX512DQ-NEXT: LBB17_1:
+; AVX512DQ-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
+; AVX512DQ-NEXT: LBB17_3:
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%cond = icmp sgt i32 %a1, %b1
%cmp1 = icmp sgt <16 x i32> %a, zeroinitializer
%cmp2 = icmp ult <16 x i32> %b, zeroinitializer
@@ -398,6 +754,39 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test9:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: jg LBB18_1
+; AVX512BW-NEXT: ## BB#2:
+; AVX512BW-NEXT: vpsllw $7, %xmm1, %xmm0
+; AVX512BW-NEXT: jmp LBB18_3
+; AVX512BW-NEXT: LBB18_1:
+; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: LBB18_3:
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test9:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: jg LBB18_1
+; AVX512DQ-NEXT: ## BB#2:
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm0
+; AVX512DQ-NEXT: jmp LBB18_3
+; AVX512DQ-NEXT: LBB18_1:
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: LBB18_3:
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%mask = icmp sgt i32 %a1, %b1
%c = select i1 %mask, <16 x i1>%a, <16 x i1>%b
ret <16 x i1>%c
@@ -430,6 +819,24 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x i1>%b, i32 %a1, i32 %b1) {
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test11:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: jg LBB20_2
+; AVX512BW-NEXT: ## BB#1:
+; AVX512BW-NEXT: vmovaps %xmm1, %xmm0
+; AVX512BW-NEXT: LBB20_2:
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test11:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: jg LBB20_2
+; AVX512DQ-NEXT: ## BB#1:
+; AVX512DQ-NEXT: vmovaps %xmm1, %xmm0
+; AVX512DQ-NEXT: LBB20_2:
+; AVX512DQ-NEXT: retq
%mask = icmp sgt i32 %a1, %b1
%c = select i1 %mask, <4 x i1>%a, <4 x i1>%b
ret <4 x i1>%c
@@ -480,9 +887,33 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
; SKX-NEXT: movw $21845, %ax ## imm = 0x5555
; SKX-NEXT: movw $1, %cx
; SKX-NEXT: cmovgw %ax, %cx
-; SKX-NEXT: kmovw %ecx, %k0
+; SKX-NEXT: kmovd %ecx, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test15:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movw $21845, %ax ## imm = 0x5555
+; AVX512BW-NEXT: movw $1, %cx
+; AVX512BW-NEXT: cmovgw %ax, %cx
+; AVX512BW-NEXT: kmovd %ecx, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test15:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movw $21845, %ax ## imm = 0x5555
+; AVX512DQ-NEXT: movw $1, %cx
+; AVX512DQ-NEXT: cmovgw %ax, %cx
+; AVX512DQ-NEXT: kmovw %ecx, %k0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%a = bitcast i16 21845 to <16 x i1>
%b = bitcast i16 1 to <16 x i1>
%mask = icmp sgt i32 %x, %y
@@ -509,18 +940,13 @@ define <64 x i8> @test16(i64 %x) {
; KNL-NEXT: movl %edi, {{[0-9]+}}(%rsp)
; KNL-NEXT: kmovw (%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; KNL-NEXT: movl $1, %eax
-; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
-; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -528,6 +954,10 @@ define <64 x i8> @test16(i64 %x) {
; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
; KNL-NEXT: vpmovdb %zmm2, %xmm2
; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; KNL-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
@@ -540,13 +970,67 @@ define <64 x i8> @test16(i64 %x) {
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm1
-; SKX-NEXT: vmovdqu {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: movl $32, %eax
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k0
+; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
+; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
+; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
+; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: pushq %rbp
+; AVX512DQ-NEXT: Lcfi0:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT: Lcfi1:
+; AVX512DQ-NEXT: .cfi_offset %rbp, -16
+; AVX512DQ-NEXT: movq %rsp, %rbp
+; AVX512DQ-NEXT: Lcfi2:
+; AVX512DQ-NEXT: .cfi_def_cfa_register %rbp
+; AVX512DQ-NEXT: andq $-32, %rsp
+; AVX512DQ-NEXT: subq $64, %rsp
+; AVX512DQ-NEXT: movl %edi, (%rsp)
+; AVX512DQ-NEXT: shrq $32, %rdi
+; AVX512DQ-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX512DQ-NEXT: kmovw (%rsp), %k0
+; AVX512DQ-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: movl $1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: kmovw {{[0-9]+}}(%rsp), %k0
+; AVX512DQ-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm2
+; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
+; AVX512DQ-NEXT: movq %rbp, %rsp
+; AVX512DQ-NEXT: popq %rbp
+; AVX512DQ-NEXT: retq
%a = bitcast i64 %x to <64 x i1>
%b = insertelement <64 x i1>%a, i1 true, i32 5
%c = sext <64 x i1>%b to <64 x i8>
@@ -572,20 +1056,15 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; KNL-NEXT: movl %edi, {{[0-9]+}}(%rsp)
; KNL-NEXT: kmovw (%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; KNL-NEXT: xorl %eax, %eax
; KNL-NEXT: cmpl %edx, %esi
; KNL-NEXT: setg %al
-; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
-; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
@@ -593,6 +1072,10 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
; KNL-NEXT: vpmovdb %zmm2, %xmm2
; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; KNL-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
@@ -603,17 +1086,75 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; SKX-NEXT: cmpl %edx, %esi
; SKX-NEXT: setg %al
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm1
-; SKX-NEXT: vmovdqu {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; SKX-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: movl $32, %eax
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test17:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k0
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: setg %al
+; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
+; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test17:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: pushq %rbp
+; AVX512DQ-NEXT: Lcfi3:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT: Lcfi4:
+; AVX512DQ-NEXT: .cfi_offset %rbp, -16
+; AVX512DQ-NEXT: movq %rsp, %rbp
+; AVX512DQ-NEXT: Lcfi5:
+; AVX512DQ-NEXT: .cfi_def_cfa_register %rbp
+; AVX512DQ-NEXT: andq $-32, %rsp
+; AVX512DQ-NEXT: subq $64, %rsp
+; AVX512DQ-NEXT: movl %edi, (%rsp)
+; AVX512DQ-NEXT: shrq $32, %rdi
+; AVX512DQ-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; AVX512DQ-NEXT: kmovw (%rsp), %k0
+; AVX512DQ-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: setg %al
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: kmovw {{[0-9]+}}(%rsp), %k0
+; AVX512DQ-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm2
+; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
+; AVX512DQ-NEXT: movq %rbp, %rsp
+; AVX512DQ-NEXT: popq %rbp
+; AVX512DQ-NEXT: retq
%a = bitcast i64 %x to <64 x i1>
%b = icmp sgt i32 %y, %z
%c = insertelement <64 x i1>%a, i1 %b, i32 5
@@ -646,8 +1187,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
;
; SKX-LABEL: test18:
; SKX: ## BB#0:
-; SKX-NEXT: kmovb %edi, %k0
-; SKX-NEXT: kmovw %esi, %k1
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
; SKX-NEXT: kshiftlw $6, %k1, %k1
@@ -662,7 +1203,53 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; SKX-NEXT: kshiftlb $7, %k2, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test18:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: kmovd %esi, %k2
+; AVX512BW-NEXT: kshiftlw $7, %k2, %k0
+; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
+; AVX512BW-NEXT: kshiftlw $6, %k2, %k2
+; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
+; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vpsllq $63, %zmm2, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
+; AVX512BW-NEXT: kshiftrw $1, %k1, %k1
+; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test18:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
+; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
+; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
+; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
+; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
+; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
+; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
+; AVX512DQ-NEXT: korb %k1, %k0, %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%b1 = bitcast i16 %y to <16 x i1>
%el1 = extractelement <16 x i1>%b1, i32 8
@@ -691,6 +1278,26 @@ define <32 x i16> @test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
; SKX-NEXT: vpmovb2m %ymm1, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test21:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $7, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test21:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512DQ-NEXT: vpsllw $15, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $15, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512DQ-NEXT: vpsllw $15, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsraw $15, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: retq
%ret = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> zeroinitializer
ret <32 x i16> %ret
}
@@ -711,6 +1318,25 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test22:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test22:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
store <4 x i1> %a, <4 x i1>* %addr
ret void
}
@@ -731,6 +1357,25 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test23:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test23:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
store <2 x i1> %a, <2 x i1>* %addr
ret void
}
@@ -750,12 +1395,33 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; SKX-LABEL: store_v1i1:
; SKX: ## BB#0:
; SKX-NEXT: andl $1, %edi
-; SKX-NEXT: kmovw %edi, %k0
+; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovb %k0, (%rsi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_v1i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: andl $1, %edi
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
+; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rsi)
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_v1i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: andl $1, %edi
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rsi)
+; AVX512DQ-NEXT: retq
%x = xor <1 x i1> %c, <i1 1>
store <1 x i1> %x, <1 x i1>* %ptr, align 4
ret void
@@ -778,6 +1444,25 @@ define void @store_v2i1(<2 x i1> %c , <2 x i1>* %ptr) {
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_v2i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_v2i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%x = xor <2 x i1> %c, <i1 1, i1 1>
store <2 x i1> %x, <2 x i1>* %ptr, align 4
ret void
@@ -801,6 +1486,27 @@ define void @store_v4i1(<4 x i1> %c , <4 x i1>* %ptr) {
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_v4i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX512BW-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_v4i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX512DQ-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%x = xor <4 x i1> %c, <i1 1, i1 1, i1 1, i1 1>
store <4 x i1> %x, <4 x i1>* %ptr, align 4
ret void
@@ -824,6 +1530,26 @@ define void @store_v8i1(<8 x i1> %c , <8 x i1>* %ptr) {
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_v8i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_v8i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
+; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: knotb %k0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%x = xor <8 x i1> %c, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
store <8 x i1> %x, <8 x i1>* %ptr, align 4
ret void
@@ -846,6 +1572,25 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovw %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_v16i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: knotw %k0, %k0
+; AVX512BW-NEXT: kmovw %k0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_v16i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: knotw %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%x = xor <16 x i1> %c, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
store <16 x i1> %x, <16 x i1>* %ptr, align 4
ret void
@@ -884,13 +1629,40 @@ define void @f1(i32 %c) {
; SKX-NEXT: movzbl {{.*}}(%rip), %edi
; SKX-NEXT: movl %edi, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovw %eax, %k0
+; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovb %k0, {{.*}}(%rip)
; SKX-NEXT: xorl $1, %edi
; SKX-NEXT: jmp _f2 ## TAILCALL
+;
+; AVX512BW-LABEL: f1:
+; AVX512BW: ## BB#0: ## %entry
+; AVX512BW-NEXT: movzbl {{.*}}(%rip), %edi
+; AVX512BW-NEXT: movl %edi, %eax
+; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: kmovd %eax, %k0
+; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
+; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, {{.*}}(%rip)
+; AVX512BW-NEXT: xorl $1, %edi
+; AVX512BW-NEXT: jmp _f2 ## TAILCALL
+;
+; AVX512DQ-LABEL: f1:
+; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ-NEXT: movzbl {{.*}}(%rip), %edi
+; AVX512DQ-NEXT: movl %edi, %eax
+; AVX512DQ-NEXT: andl $1, %eax
+; AVX512DQ-NEXT: kmovw %eax, %k0
+; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovb %k0, {{.*}}(%rip)
+; AVX512DQ-NEXT: xorl $1, %edi
+; AVX512DQ-NEXT: jmp _f2 ## TAILCALL
entry:
%.b1 = load i1, i1* @f1.v, align 4
%not..b1 = xor i1 %.b1, true
@@ -927,14 +1699,8 @@ define void @store_i8_i1(i8 %x, i1 *%y) {
define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
; KNL-LABEL: test_build_vec_v32i1:
; KNL: ## BB#0:
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; KNL-NEXT: vpsllw $15, %ymm2, %ymm2
-; KNL-NEXT: vpsraw $15, %ymm2, %ymm2
-; KNL-NEXT: vpand %ymm0, %ymm2, %ymm0
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; KNL-NEXT: vpsllw $15, %ymm2, %ymm2
-; KNL-NEXT: vpsraw $15, %ymm2, %ymm2
-; KNL-NEXT: vpand %ymm1, %ymm2, %ymm1
+; KNL-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: test_build_vec_v32i1:
@@ -943,6 +1709,19 @@ define <32 x i16> @test_build_vec_v32i1(<32 x i16> %x) {
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_build_vec_v32i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: movl $1497715861, %eax ## imm = 0x59455495
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_build_vec_v32i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%ret = select <32 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false>, <32 x i16> %x, <32 x i16> zeroinitializer
ret <32 x i16> %ret
}
@@ -960,6 +1739,19 @@ define <64 x i8> @test_build_vec_v64i1(<64 x i8> %x) {
; SKX-NEXT: kmovq %rax, %k1
; SKX-NEXT: vmovdqu8 %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_build_vec_v64i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: movabsq $6432645796886517060, %rax ## imm = 0x5945594549549544
+; AVX512BW-NEXT: kmovq %rax, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_build_vec_v64i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%ret = select <64 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 true, i1 false>, <64 x i8> %x, <64 x i8> zeroinitializer
ret <64 x i8> %ret
}
@@ -991,10 +1783,47 @@ define void @ktest_1(<8 x double> %in, double * %base) {
; SKX-NEXT: je LBB41_2
; SKX-NEXT: ## BB#1: ## %L1
; SKX-NEXT: vmovapd %zmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
; SKX-NEXT: LBB41_2: ## %L2
; SKX-NEXT: vmovapd %zmm0, 8(%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: ktest_1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vmovupd (%rdi), %zmm1
+; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
+; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
+; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: testb %al, %al
+; AVX512BW-NEXT: je LBB41_2
+; AVX512BW-NEXT: ## BB#1: ## %L1
+; AVX512BW-NEXT: vmovapd %zmm0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+; AVX512BW-NEXT: LBB41_2: ## %L2
+; AVX512BW-NEXT: vmovapd %zmm0, 8(%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: ktest_1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovupd (%rdi), %zmm1
+; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
+; AVX512DQ-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
+; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1}
+; AVX512DQ-NEXT: ktestb %k0, %k0
+; AVX512DQ-NEXT: je LBB41_2
+; AVX512DQ-NEXT: ## BB#1: ## %L1
+; AVX512DQ-NEXT: vmovapd %zmm0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+; AVX512DQ-NEXT: LBB41_2: ## %L2
+; AVX512DQ-NEXT: vmovapd %zmm0, 8(%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%addr1 = getelementptr double, double * %base, i64 0
%addr2 = getelementptr double, double * %base, i64 1
@@ -1167,10 +1996,6 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
; KNL-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; KNL-NEXT: vpsllw $7, %ymm2, %ymm2
-; KNL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; KNL-NEXT: vpxor %ymm3, %ymm3, %ymm3
-; KNL-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2
; KNL-NEXT: vmovups 4(%rdi), %zmm3 {%k2} {z}
; KNL-NEXT: vmovups 68(%rdi), %zmm4 {%k1} {z}
; KNL-NEXT: vcmpltps %zmm4, %zmm1, %k0
@@ -1344,11 +2169,338 @@ define void @ktest_2(<32 x float> %in, float * %base) {
; SKX-NEXT: ## BB#1: ## %L1
; SKX-NEXT: vmovaps %zmm0, (%rdi)
; SKX-NEXT: vmovaps %zmm1, 64(%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
; SKX-NEXT: LBB42_2: ## %L2
; SKX-NEXT: vmovaps %zmm0, 4(%rdi)
; SKX-NEXT: vmovaps %zmm1, 68(%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: ktest_2:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vmovups (%rdi), %zmm2
+; AVX512BW-NEXT: vmovups 64(%rdi), %zmm3
+; AVX512BW-NEXT: vcmpltps %zmm0, %zmm2, %k1
+; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k2
+; AVX512BW-NEXT: kunpckwd %k1, %k2, %k0
+; AVX512BW-NEXT: vmovups 68(%rdi), %zmm2 {%k2} {z}
+; AVX512BW-NEXT: vmovups 4(%rdi), %zmm3 {%k1} {z}
+; AVX512BW-NEXT: vcmpltps %zmm3, %zmm0, %k1
+; AVX512BW-NEXT: vcmpltps %zmm2, %zmm1, %k2
+; AVX512BW-NEXT: kunpckwd %k1, %k2, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: ktestd %k0, %k0
+; AVX512BW-NEXT: je LBB42_2
+; AVX512BW-NEXT: ## BB#1: ## %L1
+; AVX512BW-NEXT: vmovaps %zmm0, (%rdi)
+; AVX512BW-NEXT: vmovaps %zmm1, 64(%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+; AVX512BW-NEXT: LBB42_2: ## %L2
+; AVX512BW-NEXT: vmovaps %zmm0, 4(%rdi)
+; AVX512BW-NEXT: vmovaps %zmm1, 68(%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: ktest_2:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: pushq %rbp
+; AVX512DQ-NEXT: Lcfi6:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT: Lcfi7:
+; AVX512DQ-NEXT: .cfi_offset %rbp, -16
+; AVX512DQ-NEXT: movq %rsp, %rbp
+; AVX512DQ-NEXT: Lcfi8:
+; AVX512DQ-NEXT: .cfi_def_cfa_register %rbp
+; AVX512DQ-NEXT: andq $-32, %rsp
+; AVX512DQ-NEXT: subq $32, %rsp
+; AVX512DQ-NEXT: vmovups (%rdi), %zmm2
+; AVX512DQ-NEXT: vmovups 64(%rdi), %zmm3
+; AVX512DQ-NEXT: vcmpltps %zmm1, %zmm3, %k1
+; AVX512DQ-NEXT: kshiftlw $14, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm3
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $13, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $12, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $11, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $10, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $9, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $8, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $7, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $6, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $5, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $4, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $3, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $2, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $1, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm2, %k2
+; AVX512DQ-NEXT: kshiftlw $14, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm2
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $13, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $12, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $11, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $10, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $9, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $8, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $7, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $6, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $5, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $4, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $3, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $2, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $1, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftrw $15, %k2, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovups 4(%rdi), %zmm3 {%k2} {z}
+; AVX512DQ-NEXT: vmovups 68(%rdi), %zmm4 {%k1} {z}
+; AVX512DQ-NEXT: vcmpltps %zmm4, %zmm1, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm4
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4
+; AVX512DQ-NEXT: vcmpltps %zmm3, %zmm0, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm3
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpor %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-NEXT: vpmovsxbd %xmm3, %zmm3
+; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512DQ-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
+; AVX512DQ-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512DQ-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512DQ-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rsp)
+; AVX512DQ-NEXT: cmpl $0, (%rsp)
+; AVX512DQ-NEXT: je LBB42_2
+; AVX512DQ-NEXT: ## BB#1: ## %L1
+; AVX512DQ-NEXT: vmovaps %zmm0, (%rdi)
+; AVX512DQ-NEXT: vmovaps %zmm1, 64(%rdi)
+; AVX512DQ-NEXT: jmp LBB42_3
+; AVX512DQ-NEXT: LBB42_2: ## %L2
+; AVX512DQ-NEXT: vmovaps %zmm0, 4(%rdi)
+; AVX512DQ-NEXT: vmovaps %zmm1, 68(%rdi)
+; AVX512DQ-NEXT: LBB42_3: ## %End
+; AVX512DQ-NEXT: movq %rbp, %rsp
+; AVX512DQ-NEXT: popq %rbp
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%addr1 = getelementptr float, float * %base, i64 0
%addr2 = getelementptr float, float * %base, i64 1
@@ -1389,6 +2541,19 @@ define <8 x i64> @load_8i1(<8 x i1>* %a) {
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: load_8i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: movzbl (%rdi), %eax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: load_8i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovb (%rdi), %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT: retq
%b = load <8 x i1>, <8 x i1>* %a
%c = sext <8 x i1> %b to <8 x i64>
ret <8 x i64> %c
@@ -1406,6 +2571,18 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) {
; SKX-NEXT: kmovw (%rdi), %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: load_16i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovw (%rdi), %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: load_16i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw (%rdi), %k0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: retq
%b = load <16 x i1>, <16 x i1>* %a
%c = sext <16 x i1> %b to <16 x i32>
ret <16 x i32> %c
@@ -1425,6 +2602,23 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: load_2i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: movzbl (%rdi), %eax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: load_2i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovb (%rdi), %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%b = load <2 x i1>, <2 x i1>* %a
%c = sext <2 x i1> %b to <2 x i16>
ret <2 x i16> %c
@@ -1445,6 +2639,24 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: load_4i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: movzbl (%rdi), %eax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: load_4i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovb (%rdi), %k0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%b = load <4 x i1>, <4 x i1>* %a
%c = sext <4 x i1> %b to <4 x i16>
ret <4 x i16> %c
@@ -1466,6 +2678,22 @@ define <32 x i16> @load_32i1(<32 x i1>* %a) {
; SKX-NEXT: kmovd (%rdi), %k0
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: load_32i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd (%rdi), %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: load_32i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw (%rdi), %k0
+; AVX512DQ-NEXT: kmovw 2(%rdi), %k1
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm1
+; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512DQ-NEXT: retq
%b = load <32 x i1>, <32 x i1>* %a
%c = sext <32 x i1> %b to <32 x i16>
ret <32 x i16> %c
@@ -1495,6 +2723,30 @@ define <64 x i8> @load_64i1(<64 x i1>* %a) {
; SKX-NEXT: kmovq (%rdi), %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: load_64i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq (%rdi), %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: load_64i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw (%rdi), %k0
+; AVX512DQ-NEXT: kmovw 2(%rdi), %k1
+; AVX512DQ-NEXT: kmovw 4(%rdi), %k2
+; AVX512DQ-NEXT: kmovw 6(%rdi), %k3
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpmovm2d %k1, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovm2d %k2, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vpmovm2d %k3, %zmm2
+; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%b = load <64 x i1>, <64 x i1>* %a
%c = sext <64 x i1> %b to <64 x i8>
ret <64 x i8> %c
@@ -1516,6 +2768,24 @@ define void @store_8i1(<8 x i1>* %a, <8 x i1> %v) {
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_8i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_8i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
+; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
store <8 x i1> %v, <8 x i1>* %a
ret void
}
@@ -1536,6 +2806,24 @@ define void @store_8i1_1(<8 x i1>* %a, <8 x i16> %v) {
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovb %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_8i1_1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movb %al, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_8i1_1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovsxwq %xmm0, %zmm0
+; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%v1 = trunc <8 x i16> %v to <8 x i1>
store <8 x i1> %v1, <8 x i1>* %a
ret void
@@ -1556,6 +2844,23 @@ define void @store_16i1(<16 x i1>* %a, <16 x i1> %v) {
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovw %k0, (%rdi)
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_16i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kmovw %k0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_16i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
store <16 x i1> %v, <16 x i1>* %a
ret void
}
@@ -1579,7 +2884,30 @@ define void @store_32i1(<32 x i1>* %a, <32 x i1> %v) {
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k0
; SKX-NEXT: kmovd %k0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_32i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_32i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512DQ-NEXT: kmovw %k0, 2(%rdi)
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
store <32 x i1> %v, <32 x i1>* %a
ret void
}
@@ -1606,7 +2934,33 @@ define void @store_32i1_1(<32 x i1>* %a, <32 x i16> %v) {
; SKX-NEXT: vpsllw $15, %zmm0, %zmm0
; SKX-NEXT: vpmovw2m %zmm0, %k0
; SKX-NEXT: kmovd %k0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_32i1_1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $15, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_32i1_1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512DQ-NEXT: kmovw %k0, 2(%rdi)
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rdi)
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%v1 = trunc <32 x i16> %v to <32 x i1>
store <32 x i1> %v1, <32 x i1>* %a
ret void
@@ -1940,7 +3294,337 @@ define void @store_64i1(<64 x i1>* %a, <64 x i1> %v) {
; SKX-NEXT: vpsllw $7, %zmm0, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k0
; SKX-NEXT: kmovq %k0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: store_64i1:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kmovq %k0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: store_64i1:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: pushq %rbp
+; AVX512DQ-NEXT: Lcfi9:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 16
+; AVX512DQ-NEXT: pushq %r15
+; AVX512DQ-NEXT: Lcfi10:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 24
+; AVX512DQ-NEXT: pushq %r14
+; AVX512DQ-NEXT: Lcfi11:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 32
+; AVX512DQ-NEXT: pushq %r13
+; AVX512DQ-NEXT: Lcfi12:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 40
+; AVX512DQ-NEXT: pushq %r12
+; AVX512DQ-NEXT: Lcfi13:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 48
+; AVX512DQ-NEXT: pushq %rbx
+; AVX512DQ-NEXT: Lcfi14:
+; AVX512DQ-NEXT: .cfi_def_cfa_offset 56
+; AVX512DQ-NEXT: Lcfi15:
+; AVX512DQ-NEXT: .cfi_offset %rbx, -56
+; AVX512DQ-NEXT: Lcfi16:
+; AVX512DQ-NEXT: .cfi_offset %r12, -48
+; AVX512DQ-NEXT: Lcfi17:
+; AVX512DQ-NEXT: .cfi_offset %r13, -40
+; AVX512DQ-NEXT: Lcfi18:
+; AVX512DQ-NEXT: .cfi_offset %r14, -32
+; AVX512DQ-NEXT: Lcfi19:
+; AVX512DQ-NEXT: .cfi_offset %r15, -24
+; AVX512DQ-NEXT: Lcfi20:
+; AVX512DQ-NEXT: .cfi_offset %rbp, -16
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512DQ-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512DQ-NEXT: vpmovsxbd %xmm3, %zmm3
+; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r8d
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r9d
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r10d
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r11d
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r14d
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r15d
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r12d
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r13d
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ebx
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ebp
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %edx
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %esi
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: vmovd %r9d, %xmm3
+; AVX512DQ-NEXT: kmovw %k1, %r9d
+; AVX512DQ-NEXT: vptestmd %zmm2, %zmm2, %k2
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: vpinsrb $1, %r8d, %xmm3, %xmm2
+; AVX512DQ-NEXT: vpinsrb $2, %r10d, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $3, %r11d, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $4, %r14d, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $5, %r15d, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $6, %r12d, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $7, %r13d, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $8, %ebx, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $9, %ebp, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $11, %ecx, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $12, %edx, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $13, %esi, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpinsrb $14, %r9d, %xmm2, %xmm2
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512DQ-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512DQ-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512DQ-NEXT: kmovw %k0, 6(%rdi)
+; AVX512DQ-NEXT: kshiftlw $14, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r8d
+; AVX512DQ-NEXT: kshiftlw $15, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r10d
+; AVX512DQ-NEXT: kshiftlw $13, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r9d
+; AVX512DQ-NEXT: kshiftlw $12, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r11d
+; AVX512DQ-NEXT: kshiftlw $11, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r14d
+; AVX512DQ-NEXT: kshiftlw $10, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r15d
+; AVX512DQ-NEXT: kshiftlw $9, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r12d
+; AVX512DQ-NEXT: kshiftlw $8, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r13d
+; AVX512DQ-NEXT: kshiftlw $7, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ecx
+; AVX512DQ-NEXT: kshiftlw $6, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %esi
+; AVX512DQ-NEXT: kshiftlw $5, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ebp
+; AVX512DQ-NEXT: kshiftlw $4, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ebx
+; AVX512DQ-NEXT: kshiftlw $3, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: kshiftlw $2, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %edx
+; AVX512DQ-NEXT: kshiftlw $1, %k2, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: vmovd %r10d, %xmm2
+; AVX512DQ-NEXT: kmovw %k0, %r10d
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k2, %k0
+; AVX512DQ-NEXT: vpinsrb $1, %r8d, %xmm2, %xmm1
+; AVX512DQ-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $3, %r11d, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $4, %r14d, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $5, %r15d, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $6, %r12d, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $7, %r13d, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $8, %ecx, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $10, %ebp, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $11, %ebx, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $13, %edx, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpinsrb $14, %r10d, %xmm1, %xmm1
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512DQ-NEXT: kmovw %k0, 4(%rdi)
+; AVX512DQ-NEXT: kshiftlw $14, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r8d
+; AVX512DQ-NEXT: kshiftlw $15, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r10d
+; AVX512DQ-NEXT: kshiftlw $13, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r9d
+; AVX512DQ-NEXT: kshiftlw $12, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r11d
+; AVX512DQ-NEXT: kshiftlw $11, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r14d
+; AVX512DQ-NEXT: kshiftlw $10, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r15d
+; AVX512DQ-NEXT: kshiftlw $9, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r12d
+; AVX512DQ-NEXT: kshiftlw $8, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %r13d
+; AVX512DQ-NEXT: kshiftlw $7, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ecx
+; AVX512DQ-NEXT: kshiftlw $6, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %esi
+; AVX512DQ-NEXT: kshiftlw $5, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ebp
+; AVX512DQ-NEXT: kshiftlw $4, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %ebx
+; AVX512DQ-NEXT: kshiftlw $3, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: kshiftlw $2, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %edx
+; AVX512DQ-NEXT: kshiftlw $1, %k1, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: vmovd %r10d, %xmm1
+; AVX512DQ-NEXT: kmovw %k0, %r10d
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: vpinsrb $1, %r8d, %xmm1, %xmm0
+; AVX512DQ-NEXT: vpinsrb $2, %r9d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $9, %esi, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $10, %ebp, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $13, %edx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $14, %r10d, %xmm0, %xmm0
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512DQ-NEXT: kmovw %k1, 2(%rdi)
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r8d
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r9d
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r10d
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r11d
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r14d
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r15d
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r12d
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %r13d
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %edx
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %esi
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ebp
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ebx
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: vmovd %r9d, %xmm0
+; AVX512DQ-NEXT: kmovw %k1, %r9d
+; AVX512DQ-NEXT: vpinsrb $1, %r8d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $2, %r10d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $3, %r11d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $8, %edx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $9, %esi, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $10, %ebp, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpinsrb $14, %r9d, %xmm0, %xmm0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rdi)
+; AVX512DQ-NEXT: popq %rbx
+; AVX512DQ-NEXT: popq %r12
+; AVX512DQ-NEXT: popq %r13
+; AVX512DQ-NEXT: popq %r14
+; AVX512DQ-NEXT: popq %r15
+; AVX512DQ-NEXT: popq %rbp
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
store <64 x i1> %v, <64 x i1>* %a
ret void
}
@@ -1961,7 +3645,27 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; SKX-NEXT: kmovb %k0, %eax
; SKX-NEXT: addl %eax, %eax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_bitcast_v8i1_zext:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: movzbl %al, %eax
+; AVX512BW-NEXT: addl %eax, %eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_bitcast_v8i1_zext:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: kmovb %k0, %eax
+; AVX512DQ-NEXT: addl %eax, %eax
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%v1 = icmp eq <16 x i32> %a, zeroinitializer
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%mask1 = bitcast <8 x i1> %mask to i8
@@ -1971,16 +3675,301 @@ define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) {
}
define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) {
-; CHECK-LABEL: test_bitcast_v16i1_zext:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1
-; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: retq
+; KNL-LABEL: test_bitcast_v16i1_zext:
+; KNL: ## BB#0:
+; KNL-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: addl %eax, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_bitcast_v16i1_zext:
+; SKX: ## BB#0:
+; SKX-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; SKX-NEXT: kmovw %k0, %eax
+; SKX-NEXT: addl %eax, %eax
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_bitcast_v16i1_zext:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: kmovw %k0, %eax
+; AVX512BW-NEXT: addl %eax, %eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_bitcast_v16i1_zext:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: addl %eax, %eax
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
%v1 = icmp eq <16 x i32> %a, zeroinitializer
%mask1 = bitcast <16 x i1> %v1 to i16
%val = zext i16 %mask1 to i32
%val1 = add i32 %val, %val
ret i32 %val1
}
+
+define i16 @test_v16i1_add(i16 %x, i16 %y) {
+; KNL-LABEL: test_v16i1_add:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kxorw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_v16i1_add:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: kxorw %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_v16i1_add:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_v16i1_add:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
+ %m0 = bitcast i16 %x to <16 x i1>
+ %m1 = bitcast i16 %y to <16 x i1>
+ %m2 = add <16 x i1> %m0, %m1
+ %ret = bitcast <16 x i1> %m2 to i16
+ ret i16 %ret
+}
+
+define i16 @test_v16i1_sub(i16 %x, i16 %y) {
+; KNL-LABEL: test_v16i1_sub:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kxorw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_v16i1_sub:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: kxorw %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_v16i1_sub:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_v16i1_sub:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
+ %m0 = bitcast i16 %x to <16 x i1>
+ %m1 = bitcast i16 %y to <16 x i1>
+ %m2 = sub <16 x i1> %m0, %m1
+ %ret = bitcast <16 x i1> %m2 to i16
+ ret i16 %ret
+}
+
+define i16 @test_v16i1_mul(i16 %x, i16 %y) {
+; KNL-LABEL: test_v16i1_mul:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kandw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_v16i1_mul:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: kandw %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_v16i1_mul:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_v16i1_mul:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kandw %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
+ %m0 = bitcast i16 %x to <16 x i1>
+ %m1 = bitcast i16 %y to <16 x i1>
+ %m2 = mul <16 x i1> %m0, %m1
+ %ret = bitcast <16 x i1> %m2 to i16
+ ret i16 %ret
+}
+
+define i8 @test_v8i1_add(i8 %x, i8 %y) {
+; KNL-LABEL: test_v8i1_add:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kxorw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_v8i1_add:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: kxorb %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_v8i1_add:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_v8i1_add:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
+ %m0 = bitcast i8 %x to <8 x i1>
+ %m1 = bitcast i8 %y to <8 x i1>
+ %m2 = add <8 x i1> %m0, %m1
+ %ret = bitcast <8 x i1> %m2 to i8
+ ret i8 %ret
+}
+
+define i8 @test_v8i1_sub(i8 %x, i8 %y) {
+; KNL-LABEL: test_v8i1_sub:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kxorw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_v8i1_sub:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: kxorb %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_v8i1_sub:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_v8i1_sub:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
+ %m0 = bitcast i8 %x to <8 x i1>
+ %m1 = bitcast i8 %y to <8 x i1>
+ %m2 = sub <8 x i1> %m0, %m1
+ %ret = bitcast <8 x i1> %m2 to i8
+ ret i8 %ret
+}
+
+define i8 @test_v8i1_mul(i8 %x, i8 %y) {
+; KNL-LABEL: test_v8i1_mul:
+; KNL: ## BB#0:
+; KNL-NEXT: kmovw %edi, %k0
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kandw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test_v8i1_mul:
+; SKX: ## BB#0:
+; SKX-NEXT: kmovd %edi, %k0
+; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: kandb %k1, %k0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: retq
+;
+; AVX512BW-LABEL: test_v8i1_mul:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: test_v8i1_mul:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: kmovw %edi, %k0
+; AVX512DQ-NEXT: kmovw %esi, %k1
+; AVX512DQ-NEXT: kandb %k1, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: retq
+ %m0 = bitcast i8 %x to <8 x i1>
+ %m1 = bitcast i8 %y to <8 x i1>
+ %m2 = mul <8 x i1> %m0, %m1
+ %ret = bitcast <8 x i1> %m2 to i8
+ ret i8 %ret
+}
diff --git a/test/CodeGen/X86/avx512-mask-spills.ll b/test/CodeGen/X86/avx512-mask-spills.ll
index d00d9bfdfcd7..4ef88ac495c3 100644
--- a/test/CodeGen/X86/avx512-mask-spills.ll
+++ b/test/CodeGen/X86/avx512-mask-spills.ll
@@ -37,6 +37,7 @@ define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
@@ -62,6 +63,7 @@ define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
@@ -86,6 +88,7 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
; CHECK-NEXT: kmovd %k0, {{[0-9]+}}(%rsp) ## 4-byte Spill
; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovd %k0, (%rsp) ## 4-byte Spill
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovd {{[0-9]+}}(%rsp), %k0 ## 4-byte Reload
; CHECK-NEXT: kmovd (%rsp), %k1 ## 4-byte Reload
@@ -110,6 +113,7 @@ define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) {
; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill
; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k0 ## 8-byte Reload
; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k1 ## 8-byte Reload
diff --git a/test/CodeGen/X86/avx512-masked-memop-64-32.ll b/test/CodeGen/X86/avx512-masked-memop-64-32.ll
index 95ce212ab5a3..607c4f4ade6f 100644
--- a/test/CodeGen/X86/avx512-masked-memop-64-32.ll
+++ b/test/CodeGen/X86/avx512-masked-memop-64-32.ll
@@ -32,6 +32,7 @@ define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2
; AVX512-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v16i32.p0v16i32(<16 x i32>%val, <16 x i32>* %addr, i32 4, <16 x i1>%mask)
@@ -43,8 +44,7 @@ define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float
; AVX512: ## BB#0:
; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2
; AVX512-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
-; AVX512-NEXT: vmovups (%rdi), %zmm1 {%k1}
-; AVX512-NEXT: vmovaps %zmm1, %zmm0
+; AVX512-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
; AVX512-NEXT: retq
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
%res = call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* %addr, i32 4, <16 x i1>%mask, <16 x float> %dst)
@@ -57,6 +57,7 @@ define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val)
; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2
; AVX512-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512-NEXT: vmovups %zmm1, (%rdi) {%k1}
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v16f32.p0v16f32(<16 x float>%val, <16 x float>* %addr, i32 4, <16 x i1>%mask)
@@ -68,6 +69,7 @@ define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
; AVX512: ## BB#0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovlps %xmm0, 48(%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %addr, i32 4, <8 x i1><i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false>)
ret void
@@ -144,6 +146,7 @@ define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %sr
; AVX512F-NEXT: vmovdqu64 %zmm1, (%rdi) {%k1}
; AVX512F-NEXT: kshiftrw $8, %k1, %k1
; AVX512F-NEXT: vmovdqu64 %zmm2, 64(%rdi) {%k1}
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_store_16i64:
@@ -153,6 +156,7 @@ define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %sr
; SKX-NEXT: vmovdqu64 %zmm1, (%rdi) {%k1}
; SKX-NEXT: kshiftrw $8, %k1, %k1
; SKX-NEXT: vmovdqu64 %zmm2, 64(%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
call void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %src0, <16 x i64>* %ptrs, i32 4, <16 x i1> %mask)
ret void
@@ -168,6 +172,7 @@ define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x doubl
; AVX512F-NEXT: vmovupd %zmm1, (%rdi) {%k1}
; AVX512F-NEXT: kshiftrw $8, %k1, %k1
; AVX512F-NEXT: vmovupd %zmm2, 64(%rdi) {%k1}
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_store_16f64:
@@ -177,6 +182,7 @@ define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x doubl
; SKX-NEXT: vmovupd %zmm1, (%rdi) {%k1}
; SKX-NEXT: kshiftrw $8, %k1, %k1
; SKX-NEXT: vmovupd %zmm2, 64(%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
call void @llvm.masked.store.v16f64.p0v16f64(<16 x double> %src0, <16 x double>* %ptrs, i32 4, <16 x i1> %mask)
ret void
@@ -189,22 +195,18 @@ define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
-; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm1 {%k1}
+; AVX512F-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: kshiftrw $8, %k1, %k1
-; AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm2 {%k1}
-; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm1
+; AVX512F-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k1}
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_load_16i64:
; SKX: ## BB#0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
-; SKX-NEXT: vmovdqu64 (%rdi), %zmm1 {%k1}
+; SKX-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
; SKX-NEXT: kshiftrw $8, %k1, %k1
-; SKX-NEXT: vmovdqu64 64(%rdi), %zmm2 {%k1}
-; SKX-NEXT: vmovdqa64 %zmm1, %zmm0
-; SKX-NEXT: vmovdqa64 %zmm2, %zmm1
+; SKX-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k1}
; SKX-NEXT: retq
%res = call <16 x i64> @llvm.masked.load.v16i64.p0v16i64(<16 x i64>* %ptrs, i32 4, <16 x i1> %mask, <16 x i64> %src0)
ret <16 x i64> %res
@@ -217,22 +219,18 @@ define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
-; AVX512F-NEXT: vmovupd (%rdi), %zmm1 {%k1}
+; AVX512F-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: kshiftrw $8, %k1, %k1
-; AVX512F-NEXT: vmovupd 64(%rdi), %zmm2 {%k1}
-; AVX512F-NEXT: vmovapd %zmm1, %zmm0
-; AVX512F-NEXT: vmovapd %zmm2, %zmm1
+; AVX512F-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1}
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_load_16f64:
; SKX: ## BB#0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
-; SKX-NEXT: vmovupd (%rdi), %zmm1 {%k1}
+; SKX-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
; SKX-NEXT: kshiftrw $8, %k1, %k1
-; SKX-NEXT: vmovupd 64(%rdi), %zmm2 {%k1}
-; SKX-NEXT: vmovapd %zmm1, %zmm0
-; SKX-NEXT: vmovapd %zmm2, %zmm1
+; SKX-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1}
; SKX-NEXT: retq
%res = call <16 x double> @llvm.masked.load.v16f64.p0v16f64(<16 x double>* %ptrs, i32 4, <16 x i1> %mask, <16 x double> %src0)
ret <16 x double> %res
@@ -246,36 +244,30 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32
; AVX512F-NEXT: vpmovsxbd %xmm5, %zmm5
; AVX512F-NEXT: vpslld $31, %zmm5, %zmm5
; AVX512F-NEXT: vptestmd %zmm5, %zmm5, %k1
-; AVX512F-NEXT: vmovupd 128(%rdi), %zmm3 {%k1}
+; AVX512F-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k1}
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k2
-; AVX512F-NEXT: vmovupd (%rdi), %zmm1 {%k2}
+; AVX512F-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k2}
; AVX512F-NEXT: kshiftrw $8, %k1, %k1
-; AVX512F-NEXT: vmovupd 192(%rdi), %zmm4 {%k1}
+; AVX512F-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1}
; AVX512F-NEXT: kshiftrw $8, %k2, %k1
-; AVX512F-NEXT: vmovupd 64(%rdi), %zmm2 {%k1}
-; AVX512F-NEXT: vmovapd %zmm1, %zmm0
-; AVX512F-NEXT: vmovapd %zmm2, %zmm1
-; AVX512F-NEXT: vmovapd %zmm3, %zmm2
-; AVX512F-NEXT: vmovapd %zmm4, %zmm3
+; AVX512F-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1}
+; AVX512F-NEXT: vmovapd %zmm5, %zmm2
; AVX512F-NEXT: retq
;
; SKX-LABEL: test_load_32f64:
; SKX: ## BB#0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k1
-; SKX-NEXT: vmovupd (%rdi), %zmm1 {%k1}
+; SKX-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
; SKX-NEXT: kshiftrd $16, %k1, %k2
-; SKX-NEXT: vmovupd 128(%rdi), %zmm3 {%k2}
+; SKX-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k2}
; SKX-NEXT: kshiftrw $8, %k1, %k1
-; SKX-NEXT: vmovupd 64(%rdi), %zmm2 {%k1}
+; SKX-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1}
; SKX-NEXT: kshiftrw $8, %k2, %k1
-; SKX-NEXT: vmovupd 192(%rdi), %zmm4 {%k1}
-; SKX-NEXT: vmovapd %zmm1, %zmm0
-; SKX-NEXT: vmovapd %zmm2, %zmm1
-; SKX-NEXT: vmovapd %zmm3, %zmm2
-; SKX-NEXT: vmovapd %zmm4, %zmm3
+; SKX-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1}
+; SKX-NEXT: vmovapd %zmm5, %zmm2
; SKX-NEXT: retq
%res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0)
ret <32 x double> %res
diff --git a/test/CodeGen/X86/avx512-masked_memop-16-8.ll b/test/CodeGen/X86/avx512-masked_memop-16-8.ll
index bab8b96d9b8e..aedfbf7dbd65 100644
--- a/test/CodeGen/X86/avx512-masked_memop-16-8.ll
+++ b/test/CodeGen/X86/avx512-masked_memop-16-8.ll
@@ -20,8 +20,7 @@ define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
-; CHECK-NEXT: vmovdqu8 (%rdi), %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> %val)
ret <32 x i8> %res
@@ -33,8 +32,7 @@ define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k1
-; CHECK-NEXT: vmovdqu8 (%rdi), %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
ret <64 x i8> %res
@@ -70,8 +68,7 @@ define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
-; CHECK-NEXT: vmovdqu16 (%rdi), %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>* %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
ret <32 x i16> %res
@@ -96,6 +93,7 @@ define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8>
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
; CHECK-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
ret void
@@ -108,6 +106,7 @@ define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8>
; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k1
; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask)
ret void
@@ -132,6 +131,7 @@ define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %xmm0, %k1
; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.masked.store.v16i16.p0v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
ret void
@@ -144,6 +144,7 @@ define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i1
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %ymm0, %k1
; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
call void @llvm.masked.store.v32i16.p0v32i16(<32 x i16> %val, <32 x i16>* %addr, i32 4, <32 x i1>%mask)
ret void
diff --git a/test/CodeGen/X86/avx512-memfold.ll b/test/CodeGen/X86/avx512-memfold.ll
new file mode 100644
index 000000000000..d754b2b78f6c
--- /dev/null
+++ b/test/CodeGen/X86/avx512-memfold.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: retq
+ %b.val = load float, float* %b
+ %bv0 = insertelement <4 x float> undef, float %b.val, i32 0
+ %bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
+ %bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
+ %bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %a, <4 x float> %bv, i32 3, i8 %mask, i32 4)
+ ret i8 %res2
+}
+declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
+
+define <4 x float> @test_mask_max_ss(<4 x float> %a, float* %b, i8 %mask) {
+; CHECK-LABEL: test_mask_max_ss:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %b.val = load float, float* %b
+ %bv0 = insertelement <4 x float> undef, float %b.val, i32 0
+ %bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
+ %bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
+ %bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a, <4 x float> %bv, <4 x float> zeroinitializer, i8 %mask, i32 4)
+ ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
+
+define <4 x float> @test_maskz_add_ss(<4 x float> %a, float* %b, i8 %mask) {
+; CHECK-LABEL: test_maskz_add_ss:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %b.val = load float, float* %b
+ %bv0 = insertelement <4 x float> undef, float %b.val, i32 0
+ %bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
+ %bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
+ %bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a, <4 x float> %bv, <4 x float> zeroinitializer, i8 %mask, i32 4)
+ ret <4 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
+
+declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32)
+
+define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, double* %c, i8 %mask){
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: andl $1, %esi
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: retq
+ %c.val = load double, double* %c
+ %cv0 = insertelement <2 x double> undef, double %c.val, i32 0
+ %cv = insertelement <2 x double> %cv0, double 0.000000e+00, i32 1
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %cv, i8 %mask, i32 4)
+ ret <2 x double> %res
+}
diff --git a/test/CodeGen/X86/avx512-mov.ll b/test/CodeGen/X86/avx512-mov.ll
index 9234ae838cff..df988185efc5 100644
--- a/test/CodeGen/X86/avx512-mov.ll
+++ b/test/CodeGen/X86/avx512-mov.ll
@@ -4,7 +4,7 @@
define i32 @test1(float %x) {
; CHECK-LABEL: test1:
; CHECK: ## BB#0:
-; CHECK-NEXT: vmovd %xmm0, %eax ## encoding: [0x62,0xf1,0x7d,0x08,0x7e,0xc0]
+; CHECK-NEXT: vmovd %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = bitcast float %x to i32
ret i32 %res
diff --git a/test/CodeGen/X86/avx512-pmovxrm.ll b/test/CodeGen/X86/avx512-pmovxrm.ll
index 7c3965e08632..ab3f32091fcb 100644
--- a/test/CodeGen/X86/avx512-pmovxrm.ll
+++ b/test/CodeGen/X86/avx512-pmovxrm.ll
@@ -135,14 +135,12 @@ define <8 x i64> @test_llvm_x86_avx512_pmovzxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx512_pmovzxbq:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vmovdqu (%eax), %xmm0
-; X32-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx512_pmovzxbq:
; X64: ## BB#0:
-; X64-NEXT: vmovdqu (%rdi), %xmm0
-; X64-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/test/CodeGen/X86/avx512-regcall-Mask.ll b/test/CodeGen/X86/avx512-regcall-Mask.ll
index 325097ee9510..781112866ca5 100644
--- a/test/CodeGen/X86/avx512-regcall-Mask.ll
+++ b/test/CodeGen/X86/avx512-regcall-Mask.ll
@@ -251,9 +251,9 @@ entry:
}
; CHECK-LABEL: test_argv16i1:
-; CHECK: kmovw %edx, %k{{[0-9]+}}
-; CHECK: kmovw %ecx, %k{{[0-9]+}}
-; CHECK: kmovw %eax, %k{{[0-9]+}}
+; CHECK: kmovd %edx, %k{{[0-9]+}}
+; CHECK: kmovd %ecx, %k{{[0-9]+}}
+; CHECK: kmovd %eax, %k{{[0-9]+}}
; CHECK: ret{{l|q}}
; Test regcall when receiving arguments of v16i1 type
@@ -301,9 +301,9 @@ entry:
}
; CHECK-LABEL: test_argv8i1:
-; CHECK: kmovw %edx, %k{{[0-9]+}}
-; CHECK: kmovw %ecx, %k{{[0-9]+}}
-; CHECK: kmovw %eax, %k{{[0-9]+}}
+; CHECK: kmovd %edx, %k{{[0-9]+}}
+; CHECK: kmovd %ecx, %k{{[0-9]+}}
+; CHECK: kmovd %eax, %k{{[0-9]+}}
; CHECK: ret{{l|q}}
; Test regcall when receiving arguments of v8i1 type
@@ -339,7 +339,7 @@ define x86_regcallcc <8 x i1> @test_retv8i1() {
; CHECK-LABEL: caller_retv8i1:
; CHECK: call{{l|q}} {{_*}}test_retv8i1
-; CHECK: kmovw %eax, %k{{[0-9]+}}
+; CHECK: kmovd %eax, %k{{[0-9]+}}
; CHECK: ret{{l|q}}
; Test regcall when processing result of v8i1 type
diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll
index a29c1e4628a1..334097917853 100644
--- a/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -325,13 +325,11 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
}
; X32-LABEL: test_argRet128Vector:
-; X32: vmovdqa{{.*}} %xmm0, %xmm1
-; X32: vmovdqa{{.*}} %xmm1, %xmm0
+; X32: vpblend{{.*}} %xmm0, %xmm1, %xmm0
; X32: ret{{.*}}
; WIN64-LABEL: test_argRet128Vector:
-; WIN64: vmovdqa{{.*}} %xmm0, %xmm1
-; WIN64: vmovdqa{{.*}} %xmm1, %xmm0
+; WIN64: vpblend{{.*}} %xmm0, %xmm1, %xmm0
; WIN64: ret{{.*}}
; Test regcall when receiving/returning 128 bit vector
@@ -360,13 +358,11 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
}
; X32-LABEL: test_argRet256Vector:
-; X32: vmovdqa{{.*}} %ymm0, %ymm1
-; X32: vmovdqa{{.*}} %ymm1, %ymm0
+; X32: vpblend{{.*}} %ymm0, %ymm1, %ymm0
; X32: ret{{.*}}
; WIN64-LABEL: test_argRet256Vector:
-; WIN64: vmovdqa{{.*}} %ymm0, %ymm1
-; WIN64: vmovdqa{{.*}} %ymm1, %ymm0
+; WIN64: vpblend{{.*}} %ymm0, %ymm1, %ymm0
; WIN64: ret{{.*}}
; Test regcall when receiving/returning 256 bit vector
@@ -395,13 +391,11 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) {
}
; X32-LABEL: test_argRet512Vector:
-; X32: vmovdqa{{.*}} %zmm0, %zmm1
-; X32: vmovdqa{{.*}} %zmm1, %zmm0
+; X32: vpblend{{.*}} %zmm0, %zmm1, %zmm0
; X32: ret{{.*}}
; WIN64-LABEL: test_argRet512Vector:
-; WIN64: vmovdqa{{.*}} %zmm0, %zmm1
-; WIN64: vmovdqa{{.*}} %zmm1, %zmm0
+; WIN64: vpblend{{.*}} %zmm0, %zmm1, %zmm0
; WIN64: ret{{.*}}
; Test regcall when receiving/returning 512 bit vector
@@ -475,32 +469,27 @@ define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b,
ret <32 x float> %x4
}
-; X32-LABEL: pushl {{%e(si|di|bx|bp)}}
-; X32: pushl {{%e(si|di|bx|bp)}}
-; X32: pushl {{%e(si|di|bx|bp)}}
-; X32: pushl {{%e(si|di|bx|bp)}}
-; X32: popl {{%e(si|di|bx|bp)}}
-; X32: popl {{%e(si|di|bx|bp)}}
-; X32: popl {{%e(si|di|bx|bp)}}
-; X32: popl {{%e(si|di|bx|bp)}}
+; X32-LABEL: testi32_inp
+; X32: pushl {{%e(bx|bp)}}
+; X32: pushl {{%e(bx|bp)}}
+; X32: popl {{%e(bx|bp)}}
+; X32: popl {{%e(bx|bp)}}
; X32: retl
-; WIN64-LABEL: pushq {{%r(bp|bx|1[0-5])}}
+; WIN64-LABEL: testi32_inp
; WIN64: pushq {{%r(bp|bx|1[0-5])}}
; WIN64: pushq {{%r(bp|bx|1[0-5])}}
; WIN64: pushq {{%r(bp|bx|1[0-5])}}
; WIN64: popq {{%r(bp|bx|1[0-5])}}
; WIN64: popq {{%r(bp|bx|1[0-5])}}
; WIN64: popq {{%r(bp|bx|1[0-5])}}
-; WIN64: popq {{%r(bp|bx|1[0-5])}}
; WIN64: retq
-; LINUXOSX64-LABEL: pushq {{%r(bp|bx|1[2-5])}}
+; LINUXOSX64-LABEL: testi32_inp
; LINUXOSX64: pushq {{%r(bp|bx|1[2-5])}}
; LINUXOSX64: pushq {{%r(bp|bx|1[2-5])}}
; LINUXOSX64: popq {{%r(bp|bx|1[2-5])}}
; LINUXOSX64: popq {{%r(bp|bx|1[2-5])}}
-; LINUXOSX64: popq {{%r(bp|bx|1[2-5])}}
; LINUXOSX64: retq
; Test regcall when running multiple input parameters - callee saved GPRs
diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll
index 3f427298c177..1859b1bcfaf6 100644
--- a/test/CodeGen/X86/avx512-select.ll
+++ b/test/CodeGen/X86/avx512-select.ll
@@ -90,6 +90,7 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -120,6 +121,7 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -137,6 +139,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
; CHECK-NEXT: kandw %k0, %k1, %k0
; CHECK-NEXT: korw %k2, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%mask = bitcast i8 %m to <8 x i1>
%a = bitcast i8 %a.0 to <8 x i1>
@@ -149,10 +152,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
define i64 @pr30249() {
; CHECK-LABEL: pr30249:
; CHECK: ## BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: cmpb $1, %cl
-; CHECK-NEXT: movl $1, %eax
-; CHECK-NEXT: adcxq %rcx, %rax
+; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: retq
%v = select i1 undef , i64 1, i64 2
ret i64 %v
diff --git a/test/CodeGen/X86/avx512-skx-insert-subvec.ll b/test/CodeGen/X86/avx512-skx-insert-subvec.ll
index 2200f1159880..b7f80ec97150 100644
--- a/test/CodeGen/X86/avx512-skx-insert-subvec.ll
+++ b/test/CodeGen/X86/avx512-skx-insert-subvec.ll
@@ -30,11 +30,12 @@ define <8 x i1> @test2(<2 x i1> %a) {
; CHECK: # BB#0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
-; CHECK-NEXT: vpxord %zmm0, %zmm0, %zmm0
-; CHECK-NEXT: vpmovm2q %k0, %zmm1
-; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; CHECK-NEXT: vpmovm2q %k0, %zmm0
+; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = shufflevector <2 x i1> %a, <2 x i1> zeroinitializer, <8 x i32> <i32 3, i32 3, i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef>
ret <8 x i1> %res
@@ -167,3 +168,15 @@ define <2 x i1> @test10(<4 x i1> %a, <4 x i1> %b) {
%res = shufflevector <4 x i1> %a, <4 x i1> %b, <2 x i32> <i32 2, i32 3>
ret <2 x i1> %res
}
+
+define <8 x i1> @test11(<4 x i1> %a, <4 x i1>%b) {
+; CHECK-LABEL: test11:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
+; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
+; CHECK-NEXT: kshiftlb $4, %k0, %k0
+; CHECK-NEXT: vpmovm2w %k0, %xmm0
+; CHECK-NEXT: retq
+ %res = shufflevector <4 x i1> %a, <4 x i1> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x i1> %res
+}
diff --git a/test/CodeGen/X86/avx512-trunc.ll b/test/CodeGen/X86/avx512-trunc.ll
index 646697b82c2d..1c88ce6eb2f7 100644
--- a/test/CodeGen/X86/avx512-trunc.ll
+++ b/test/CodeGen/X86/avx512-trunc.ll
@@ -8,6 +8,7 @@ define <16 x i8> @trunc_16x32_to_16x8(<16 x i32> %i) #0 {
; ALL-LABEL: trunc_16x32_to_16x8:
; ALL: ## BB#0:
; ALL-NEXT: vpmovdb %zmm0, %xmm0
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <16 x i32> %i to <16 x i8>
ret <16 x i8> %x
@@ -17,6 +18,7 @@ define <8 x i16> @trunc_8x64_to_8x16(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_8x64_to_8x16:
; ALL: ## BB#0:
; ALL-NEXT: vpmovqw %zmm0, %xmm0
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i16>
ret <8 x i16> %x
@@ -35,6 +37,7 @@ define <8 x i8> @trunc_qb_512(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_qb_512:
; ALL: ## BB#0:
; ALL-NEXT: vpmovqw %zmm0, %xmm0
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i8>
ret <8 x i8> %x
@@ -44,6 +47,7 @@ define void @trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) #0 {
; ALL-LABEL: trunc_qb_512_mem:
; ALL: ## BB#0:
; ALL-NEXT: vpmovqb %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i8>
store <8 x i8> %x, <8 x i8>* %res
@@ -56,11 +60,13 @@ define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qb_256:
; SKX: ## BB#0:
; SKX-NEXT: vpmovqd %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <4 x i64> %i to <4 x i8>
ret <4 x i8> %x
@@ -73,11 +79,13 @@ define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovd %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qb_256_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovqb %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <4 x i64> %i to <4 x i8>
store <4 x i8> %x, <4 x i8>* %res
@@ -112,6 +120,7 @@ define <8 x i16> @trunc_qw_512(<8 x i64> %i) #0 {
; ALL-LABEL: trunc_qw_512:
; ALL: ## BB#0:
; ALL-NEXT: vpmovqw %zmm0, %xmm0
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i16>
ret <8 x i16> %x
@@ -121,6 +130,7 @@ define void @trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) #0 {
; ALL-LABEL: trunc_qw_512_mem:
; ALL: ## BB#0:
; ALL-NEXT: vpmovqw %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i16>
store <8 x i16> %x, <8 x i16>* %res
@@ -133,11 +143,13 @@ define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qw_256:
; SKX: ## BB#0:
; SKX-NEXT: vpmovqd %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <4 x i64> %i to <4 x i16>
ret <4 x i16> %x
@@ -150,11 +162,13 @@ define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; KNL-NEXT: vmovq %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qw_256_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovqw %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <4 x i64> %i to <4 x i16>
store <4 x i16> %x, <4 x i16>* %res
@@ -199,6 +213,7 @@ define void @trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) #0 {
; ALL-LABEL: trunc_qd_512_mem:
; ALL: ## BB#0:
; ALL-NEXT: vpmovqd %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <8 x i64> %i to <8 x i32>
store <8 x i32> %x, <8 x i32>* %res
@@ -211,11 +226,13 @@ define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qd_256:
; SKX: ## BB#0:
; SKX-NEXT: vpmovqd %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <4 x i64> %i to <4 x i32>
ret <4 x i32> %x
@@ -227,11 +244,13 @@ define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_qd_256_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovqd %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <4 x i64> %i to <4 x i32>
store <4 x i32> %x, <4 x i32>* %res
@@ -266,6 +285,7 @@ define <16 x i8> @trunc_db_512(<16 x i32> %i) #0 {
; ALL-LABEL: trunc_db_512:
; ALL: ## BB#0:
; ALL-NEXT: vpmovdb %zmm0, %xmm0
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <16 x i32> %i to <16 x i8>
ret <16 x i8> %x
@@ -275,6 +295,7 @@ define void @trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) #0 {
; ALL-LABEL: trunc_db_512_mem:
; ALL: ## BB#0:
; ALL-NEXT: vpmovdb %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <16 x i32> %i to <16 x i8>
store <16 x i8> %x, <16 x i8>* %res
@@ -287,11 +308,13 @@ define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_db_256:
; SKX: ## BB#0:
; SKX-NEXT: vpmovdw %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <8 x i32> %i to <8 x i8>
ret <8 x i8> %x
@@ -304,11 +327,13 @@ define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovq %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_db_256_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovdb %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <8 x i32> %i to <8 x i8>
store <8 x i8> %x, <8 x i8>* %res
@@ -352,6 +377,7 @@ define void @trunc_dw_512_mem(<16 x i32> %i, <16 x i16>* %res) #0 {
; ALL-LABEL: trunc_dw_512_mem:
; ALL: ## BB#0:
; ALL-NEXT: vpmovdw %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%x = trunc <16 x i32> %i to <16 x i16>
store <16 x i16> %x, <16 x i16>* %res
@@ -364,11 +390,13 @@ define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_dw_256:
; SKX: ## BB#0:
; SKX-NEXT: vpmovdw %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <8 x i32> %i to <8 x i16>
ret <8 x i16> %x
@@ -380,11 +408,13 @@ define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_dw_256_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovdw %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <8 x i32> %i to <8 x i16>
store <8 x i16> %x, <8 x i16>* %res
@@ -434,11 +464,13 @@ define void @trunc_wb_512_mem(<32 x i16> %i, <32 x i8>* %res) #0 {
; KNL-NEXT: vpmovdb %zmm1, %xmm1
; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; KNL-NEXT: vmovdqa %ymm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_512_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovwb %zmm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <32 x i16> %i to <32 x i8>
store <32 x i8> %x, <32 x i8>* %res
@@ -450,11 +482,13 @@ define <16 x i8> @trunc_wb_256(<16 x i16> %i) #0 {
; KNL: ## BB#0:
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_256:
; SKX: ## BB#0:
; SKX-NEXT: vpmovwb %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <16 x i16> %i to <16 x i8>
ret <16 x i8> %x
@@ -466,11 +500,13 @@ define void @trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) #0 {
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_wb_256_mem:
; SKX: ## BB#0:
; SKX-NEXT: vpmovwb %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = trunc <16 x i16> %i to <16 x i8>
store <16 x i8> %x, <16 x i8>* %res
@@ -500,3 +536,244 @@ define void @trunc_wb_128_mem(<8 x i16> %i, <8 x i8>* %res) #0 {
store <8 x i8> %x, <8 x i8>* %res
ret void
}
+
+
+define void @usat_trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) {
+; KNL-LABEL: usat_trunc_wb_256_mem:
+; KNL: ## BB#0:
+; KNL-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
+; KNL-NEXT: vpmovdb %zmm0, %xmm0
+; KNL-NEXT: vmovdqu %xmm0, (%rdi)
+; KNL-NEXT: vzeroupper
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_wb_256_mem:
+; SKX: ## BB#0:
+; SKX-NEXT: vpmovuswb %ymm0, (%rdi)
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %x3 = icmp ult <16 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x5 = select <16 x i1> %x3, <16 x i16> %i, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x6 = trunc <16 x i16> %x5 to <16 x i8>
+ store <16 x i8> %x6, <16 x i8>* %res, align 1
+ ret void
+}
+
+define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
+; KNL-LABEL: usat_trunc_wb_256:
+; KNL: ## BB#0:
+; KNL-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
+; KNL-NEXT: vpmovdb %zmm0, %xmm0
+; KNL-NEXT: vzeroupper
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_wb_256:
+; SKX: ## BB#0:
+; SKX-NEXT: vpmovuswb %ymm0, %xmm0
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %x3 = icmp ult <16 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x5 = select <16 x i1> %x3, <16 x i16> %i, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x6 = trunc <16 x i16> %x5 to <16 x i8>
+ ret <16 x i8> %x6
+}
+
+define void @usat_trunc_wb_128_mem(<8 x i16> %i, <8 x i8>* %res) {
+; KNL-LABEL: usat_trunc_wb_128_mem:
+; KNL: ## BB#0:
+; KNL-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; KNL-NEXT: vmovq %xmm0, (%rdi)
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_wb_128_mem:
+; SKX: ## BB#0:
+; SKX-NEXT: vpmovuswb %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %x3 = icmp ult <8 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x5 = select <8 x i1> %x3, <8 x i16> %i, <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x6 = trunc <8 x i16> %x5 to <8 x i8>
+ store <8 x i8> %x6, <8 x i8>* %res, align 1
+ ret void
+}
+
+define void @usat_trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) {
+; ALL-LABEL: usat_trunc_db_512_mem:
+; ALL: ## BB#0:
+; ALL-NEXT: vpmovusdb %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %x3 = icmp ult <16 x i32> %i, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %x5 = select <16 x i1> %x3, <16 x i32> %i, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %x6 = trunc <16 x i32> %x5 to <16 x i8>
+ store <16 x i8> %x6, <16 x i8>* %res, align 1
+ ret void
+}
+
+define void @usat_trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) {
+; ALL-LABEL: usat_trunc_qb_512_mem:
+; ALL: ## BB#0:
+; ALL-NEXT: vpmovusqb %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %x3 = icmp ult <8 x i64> %i, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %x5 = select <8 x i1> %x3, <8 x i64> %i, <8 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %x6 = trunc <8 x i64> %x5 to <8 x i8>
+ store <8 x i8> %x6, <8 x i8>* %res, align 1
+ ret void
+}
+
+define void @usat_trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) {
+; ALL-LABEL: usat_trunc_qd_512_mem:
+; ALL: ## BB#0:
+; ALL-NEXT: vpmovusqd %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %x3 = icmp ult <8 x i64> %i, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %x5 = select <8 x i1> %x3, <8 x i64> %i, <8 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %x6 = trunc <8 x i64> %x5 to <8 x i32>
+ store <8 x i32> %x6, <8 x i32>* %res, align 1
+ ret void
+}
+
+define void @usat_trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) {
+; ALL-LABEL: usat_trunc_qw_512_mem:
+; ALL: ## BB#0:
+; ALL-NEXT: vpmovusqw %zmm0, (%rdi)
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %x3 = icmp ult <8 x i64> %i, <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %x5 = select <8 x i1> %x3, <8 x i64> %i, <8 x i64> <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %x6 = trunc <8 x i64> %x5 to <8 x i16>
+ store <8 x i16> %x6, <8 x i16>* %res, align 1
+ ret void
+}
+
+define <32 x i8> @usat_trunc_db_1024(<32 x i32> %i) {
+; KNL-LABEL: usat_trunc_db_1024:
+; KNL: ## BB#0:
+; KNL-NEXT: vpmovusdb %zmm0, %xmm0
+; KNL-NEXT: vpmovusdb %zmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_db_1024:
+; SKX: ## BB#0:
+; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2
+; SKX-NEXT: vpminud %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vpminud %zmm2, %zmm0, %zmm0
+; SKX-NEXT: vpmovdw %zmm0, %ymm0
+; SKX-NEXT: vpmovdw %zmm1, %ymm1
+; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: vpmovwb %zmm0, %ymm0
+; SKX-NEXT: retq
+ %x3 = icmp ult <32 x i32> %i, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %x5 = select <32 x i1> %x3, <32 x i32> %i, <32 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %x6 = trunc <32 x i32> %x5 to <32 x i8>
+ ret <32 x i8> %x6
+}
+
+define void @usat_trunc_db_1024_mem(<32 x i32> %i, <32 x i8>* %p) {
+; KNL-LABEL: usat_trunc_db_1024_mem:
+; KNL: ## BB#0:
+; KNL-NEXT: vpmovusdb %zmm0, %xmm0
+; KNL-NEXT: vpmovusdb %zmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vmovdqu %ymm0, (%rdi)
+; KNL-NEXT: vzeroupper
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_db_1024_mem:
+; SKX: ## BB#0:
+; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2
+; SKX-NEXT: vpminud %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vpminud %zmm2, %zmm0, %zmm0
+; SKX-NEXT: vpmovdw %zmm0, %ymm0
+; SKX-NEXT: vpmovdw %zmm1, %ymm1
+; SKX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: vpmovwb %zmm0, (%rdi)
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %x3 = icmp ult <32 x i32> %i, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %x5 = select <32 x i1> %x3, <32 x i32> %i, <32 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %x6 = trunc <32 x i32> %x5 to <32 x i8>
+ store <32 x i8>%x6, <32 x i8>* %p, align 1
+ ret void
+}
+
+define <16 x i16> @usat_trunc_dw_512(<16 x i32> %i) {
+; ALL-LABEL: usat_trunc_dw_512:
+; ALL: ## BB#0:
+; ALL-NEXT: vpmovusdw %zmm0, %ymm0
+; ALL-NEXT: retq
+ %x3 = icmp ult <16 x i32> %i, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %x5 = select <16 x i1> %x3, <16 x i32> %i, <16 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %x6 = trunc <16 x i32> %x5 to <16 x i16>
+ ret <16 x i16> %x6
+}
+
+define <8 x i8> @usat_trunc_wb_128(<8 x i16> %i) {
+; ALL-LABEL: usat_trunc_wb_128:
+; ALL: ## BB#0:
+; ALL-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
+; ALL-NEXT: retq
+ %x3 = icmp ult <8 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x5 = select <8 x i1> %x3, <8 x i16> %i, <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x6 = trunc <8 x i16> %x5 to <8 x i8>
+ ret <8 x i8>%x6
+}
+
+define <16 x i16> @usat_trunc_qw_1024(<16 x i64> %i) {
+; KNL-LABEL: usat_trunc_qw_1024:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm2
+; KNL-NEXT: vpminuq %zmm2, %zmm1, %zmm1
+; KNL-NEXT: vpminuq %zmm2, %zmm0, %zmm0
+; KNL-NEXT: vpmovqd %zmm0, %ymm0
+; KNL-NEXT: vpmovqd %zmm1, %ymm1
+; KNL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; KNL-NEXT: vpmovdw %zmm0, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_qw_1024:
+; SKX: ## BB#0:
+; SKX-NEXT: vpbroadcastq {{.*}}(%rip), %zmm2
+; SKX-NEXT: vpminuq %zmm2, %zmm1, %zmm1
+; SKX-NEXT: vpminuq %zmm2, %zmm0, %zmm0
+; SKX-NEXT: vpmovqd %zmm0, %ymm0
+; SKX-NEXT: vpmovqd %zmm1, %ymm1
+; SKX-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
+; SKX-NEXT: vpmovdw %zmm0, %ymm0
+; SKX-NEXT: retq
+ %x3 = icmp ult <16 x i64> %i, <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %x5 = select <16 x i1> %x3, <16 x i64> %i, <16 x i64> <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %x6 = trunc <16 x i64> %x5 to <16 x i16>
+ ret <16 x i16> %x6
+}
+
+define <16 x i8> @usat_trunc_db_256(<8 x i32> %x) {
+; KNL-LABEL: usat_trunc_db_256:
+; KNL: ## BB#0:
+; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; KNL-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; KNL-NEXT: vpmovdw %zmm0, %ymm0
+; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; KNL-NEXT: vzeroupper
+; KNL-NEXT: retq
+;
+; SKX-LABEL: usat_trunc_db_256:
+; SKX: ## BB#0:
+; SKX-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; SKX-NEXT: vpmovdw %ymm0, %xmm0
+; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+ %tmp1 = icmp ult <8 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %tmp2 = select <8 x i1> %tmp1, <8 x i32> %x, <8 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %tmp3 = trunc <8 x i32> %tmp2 to <8 x i8>
+ %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll
index 1991ee4f3376..350c0d7873ea 100644
--- a/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -128,7 +128,7 @@ define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %m
; ALL-NEXT: vpxor %ymm3, %ymm3, %ymm3
; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
-; ALL-NEXT: vmovaps %zmm1, %zmm0
+; ALL-NEXT: vmovapd %zmm1, %zmm0
; ALL-NEXT: retq
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x double> undef, double %a, i32 0
@@ -406,14 +406,14 @@ declare void @func_f32(float)
define <16 x float> @broadcast_ss_spill(float %x) {
; ALL-LABEL: broadcast_ss_spill:
; ALL: # BB#0:
-; ALL-NEXT: pushq %rax
+; ALL-NEXT: subq $24, %rsp
; ALL-NEXT: .Lcfi0:
-; ALL-NEXT: .cfi_def_cfa_offset 16
+; ALL-NEXT: .cfi_def_cfa_offset 32
; ALL-NEXT: vaddss %xmm0, %xmm0, %xmm0
-; ALL-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; ALL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: callq func_f32
-; ALL-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %zmm0 # 4-byte Folded Reload
-; ALL-NEXT: popq %rax
+; ALL-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload
+; ALL-NEXT: addq $24, %rsp
; ALL-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
@@ -426,14 +426,14 @@ declare void @func_f64(double)
define <8 x double> @broadcast_sd_spill(double %x) {
; ALL-LABEL: broadcast_sd_spill:
; ALL: # BB#0:
-; ALL-NEXT: pushq %rax
+; ALL-NEXT: subq $24, %rsp
; ALL-NEXT: .Lcfi1:
-; ALL-NEXT: .cfi_def_cfa_offset 16
+; ALL-NEXT: .cfi_def_cfa_offset 32
; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0
-; ALL-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Spill
+; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: callq func_f64
-; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 8-byte Folded Reload
-; ALL-NEXT: popq %rax
+; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload
+; ALL-NEXT: addq $24, %rsp
; ALL-NEXT: retq
%a = fadd double %x, %x
call void @func_f64(double %a)
diff --git a/test/CodeGen/X86/avx512-vbroadcasti128.ll b/test/CodeGen/X86/avx512-vbroadcasti128.ll
index ad8a29cacd82..ed19324df995 100644
--- a/test/CodeGen/X86/avx512-vbroadcasti128.ll
+++ b/test/CodeGen/X86/avx512-vbroadcasti128.ll
@@ -10,13 +10,13 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512BWVL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
@@ -34,13 +34,13 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512BWVL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
@@ -58,7 +58,7 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -70,7 +70,7 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -82,7 +82,7 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -94,7 +94,7 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
@@ -182,7 +182,7 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
@@ -195,7 +195,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
;
; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512DQVL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
@@ -208,7 +208,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
@@ -221,7 +221,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512DQVL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index 361ee1ddbf9d..2b04b9229b3d 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -6,8 +6,7 @@ define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
; CHECK-LABEL: test1:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovaps %zmm1, %zmm0
+; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = fcmp ole <16 x float> %x, %y
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
@@ -18,8 +17,7 @@ define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
; CHECK-LABEL: test2:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmplepd %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovapd %zmm1, %zmm0
+; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = fcmp ole <8 x double> %x, %y
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
@@ -30,8 +28,7 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
; CHECK-LABEL: test3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %yp, align 4
%mask = icmp eq <16 x i32> %x, %y
@@ -43,8 +40,7 @@ define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1)
; CHECK-LABEL: test4_unsigned:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp uge <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
@@ -55,8 +51,7 @@ define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
; CHECK-LABEL: test5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp eq <8 x i64> %x, %y
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
@@ -67,8 +62,7 @@ define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) noun
; CHECK-LABEL: test6_unsigned:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ugt <8 x i64> %x, %y
%max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
@@ -87,8 +81,7 @@ define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
; SKX: ## BB#0:
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2
; SKX-NEXT: vcmpltps %xmm2, %xmm0, %k1
-; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1}
-; SKX-NEXT: vmovaps %xmm1, %xmm0
+; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = fcmp olt <4 x float> %a, zeroinitializer
@@ -108,8 +101,7 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
; SKX: ## BB#0:
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; SKX-NEXT: vcmpltpd %xmm2, %xmm0, %k1
-; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1}
-; SKX-NEXT: vmovapd %xmm1, %xmm0
+; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = fcmp olt <2 x double> %a, zeroinitializer
%c = select <2 x i1>%mask, <2 x double>%a, <2 x double>%b
@@ -122,15 +114,14 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vmovdqa %ymm1, %ymm0
+; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
; SKX: ## BB#0:
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
-; SKX-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovdqa %ymm1, %ymm0
+; SKX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
@@ -143,15 +134,14 @@ define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
-; KNL-NEXT: vmovaps %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vmovaps %ymm1, %ymm0
+; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test10:
; SKX: ## BB#0:
; SKX-NEXT: vcmpeqps %ymm1, %ymm0, %k1
-; SKX-NEXT: vmovaps %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovaps %ymm1, %ymm0
+; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%mask = fcmp oeq <8 x float> %x, %y
@@ -170,13 +160,24 @@ define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
}
define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
-; CHECK-LABEL: test12:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
-; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
-; CHECK-NEXT: kunpckbw %k0, %k1, %k0
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: retq
+; KNL-LABEL: test12:
+; KNL: ## BB#0:
+; KNL-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
+; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
+; KNL-NEXT: kunpckbw %k0, %k1, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test12:
+; SKX: ## BB#0:
+; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
+; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
+; SKX-NEXT: kunpckbw %k0, %k1, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
%res = icmp eq <16 x i64> %a, %b
%res1 = bitcast <16 x i1> %res to i16
ret i16 %res1
@@ -336,6 +337,7 @@ define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckwd %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%res = icmp eq <32 x i32> %a, %b
%res1 = bitcast <32 x i1> %res to i32
@@ -647,6 +649,7 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckdq %k0, %k1, %k0
; SKX-NEXT: kmovq %k0, %rax
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%res = icmp eq <64 x i16> %a, %b
%res1 = bitcast <64 x i1> %res to i64
@@ -699,8 +702,7 @@ define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind
; CHECK-LABEL: test16:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
-; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sge <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
@@ -711,8 +713,7 @@ define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-LABEL: test17:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sgt <16 x i32> %x, %y
@@ -724,8 +725,7 @@ define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-LABEL: test18:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sle <16 x i32> %x, %y
@@ -737,8 +737,7 @@ define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-LABEL: test19:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp ule <16 x i32> %x, %y
@@ -751,8 +750,7 @@ define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i3
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp eq <16 x i32> %x1, %y1
%mask0 = icmp eq <16 x i32> %x, %y
@@ -766,8 +764,7 @@ define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i64> %x1, %y1
%mask0 = icmp sle <8 x i64> %x, %y
@@ -781,8 +778,7 @@ define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i6
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sgt <8 x i64> %x1, %y1
%y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
@@ -797,8 +793,7 @@ define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
@@ -812,8 +807,7 @@ define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
; CHECK-LABEL: test24:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k1
-; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
@@ -827,8 +821,7 @@ define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind
; CHECK-LABEL: test25:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi){1to16}, %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
@@ -843,8 +836,7 @@ define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -861,8 +853,7 @@ define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -914,6 +905,7 @@ define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32>
; SKX-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x_gt_y = icmp sgt <16 x i32> %x, %y
%x1_gt_y1 = icmp sgt <16 x i32> %x1, %y1
@@ -932,8 +924,7 @@ define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
; SKX-LABEL: test30:
; SKX: ## BB#0:
; SKX-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
-; SKX-NEXT: vmovapd %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovapd %ymm1, %ymm0
+; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%mask = fcmp oeq <4 x double> %x, %y
@@ -951,8 +942,7 @@ define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp
; SKX-LABEL: test31:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi), %xmm0, %k1
-; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1}
-; SKX-NEXT: vmovapd %xmm1, %xmm0
+; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%y = load <2 x double>, <2 x double>* %yp, align 4
@@ -971,8 +961,7 @@ define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp
; SKX-LABEL: test32:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi), %ymm0, %k1
-; SKX-NEXT: vmovapd %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovapd %ymm1, %ymm0
+; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%y = load <4 x double>, <4 x double>* %yp, align 4
@@ -985,8 +974,7 @@ define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp
; CHECK-LABEL: test33:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltpd (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovapd %zmm1, %zmm0
+; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x double>, <8 x double>* %yp, align 4
%mask = fcmp olt <8 x double> %x, %y
@@ -1004,8 +992,7 @@ define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) no
; SKX-LABEL: test34:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi), %xmm0, %k1
-; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1}
-; SKX-NEXT: vmovaps %xmm1, %xmm0
+; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%y = load <4 x float>, <4 x float>* %yp, align 4
%mask = fcmp olt <4 x float> %x, %y
@@ -1020,15 +1007,14 @@ define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) no
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vmovups (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
-; KNL-NEXT: vmovaps %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vmovaps %ymm1, %ymm0
+; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test35:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi), %ymm0, %k1
-; SKX-NEXT: vmovaps %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovaps %ymm1, %ymm0
+; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%y = load <8 x float>, <8 x float>* %yp, align 4
@@ -1041,8 +1027,7 @@ define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp
; CHECK-LABEL: test36:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltps (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovaps %zmm1, %zmm0
+; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x float>, <16 x float>* %yp, align 4
%mask = fcmp olt <16 x float> %x, %y
@@ -1054,8 +1039,7 @@ define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nou
; CHECK-LABEL: test37:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1
-; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovapd %zmm1, %zmm0
+; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%a = load double, double* %ptr
@@ -1078,8 +1062,7 @@ define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nou
; SKX-LABEL: test38:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi){1to4}, %ymm0, %k1
-; SKX-NEXT: vmovapd %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovapd %ymm1, %ymm0
+; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%a = load double, double* %ptr
@@ -1102,8 +1085,7 @@ define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nou
; SKX-LABEL: test39:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi){1to2}, %xmm0, %k1
-; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1}
-; SKX-NEXT: vmovapd %xmm1, %xmm0
+; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%a = load double, double* %ptr
@@ -1120,8 +1102,7 @@ define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) n
; CHECK-LABEL: test40:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltps (%rdi){1to16}, %zmm0, %k1
-; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovaps %zmm1, %zmm0
+; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%a = load float, float* %ptr
@@ -1140,15 +1121,14 @@ define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) noun
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vbroadcastss (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
-; KNL-NEXT: vmovaps %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vmovaps %ymm1, %ymm0
+; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test41:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi){1to8}, %ymm0, %k1
-; SKX-NEXT: vmovaps %ymm0, %ymm1 {%k1}
-; SKX-NEXT: vmovaps %ymm1, %ymm0
+; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%a = load float, float* %ptr
@@ -1171,8 +1151,7 @@ define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) noun
; SKX-LABEL: test42:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi){1to4}, %xmm0, %k1
-; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1}
-; SKX-NEXT: vmovaps %xmm1, %xmm0
+; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%a = load float, float* %ptr
@@ -1191,8 +1170,7 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; KNL-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL-NEXT: vptestmq %zmm2, %zmm2, %k1
; KNL-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
-; KNL-NEXT: vmovapd %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vmovapd %zmm1, %zmm0
+; KNL-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; SKX-LABEL: test43:
@@ -1200,8 +1178,7 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; SKX-NEXT: vpsllw $15, %xmm2, %xmm2
; SKX-NEXT: vpmovw2m %xmm2, %k1
; SKX-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
-; SKX-NEXT: vmovapd %zmm0, %zmm1 {%k1}
-; SKX-NEXT: vmovapd %zmm1, %zmm0
+; SKX-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
; SKX-NEXT: retq
%a = load double, double* %ptr
@@ -1263,11 +1240,7 @@ define <2 x i64> @test46(<2 x float> %x, <2 x float> %y) #0 {
; KNL-LABEL: test46:
; KNL: ## BB#0:
; KNL-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
-; KNL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; KNL-NEXT: vpsllq $32, %xmm0, %xmm0
-; KNL-NEXT: vpsrad $31, %xmm0, %xmm1
-; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; KNL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; KNL-NEXT: vpmovsxdq %xmm0, %xmm0
; KNL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: retq
;
diff --git a/test/CodeGen/X86/avx512-vpermv3-commute.ll b/test/CodeGen/X86/avx512-vpermv3-commute.ll
index 471a8ea49f69..2827f471762f 100644
--- a/test/CodeGen/X86/avx512-vpermv3-commute.ll
+++ b/test/CodeGen/X86/avx512-vpermv3-commute.ll
@@ -53,7 +53,7 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2d (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2p
@@ -66,7 +66,7 @@ declare <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64>, <8 x do
define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, double* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2pd (%rdi){1to8}, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2s = load double, double* %x2ptr
@@ -81,7 +81,7 @@ declare <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32>, <16 x
define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2ps %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3)
@@ -94,7 +94,7 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2q %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
@@ -128,7 +128,7 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.128(<4 x i32>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2d %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
@@ -138,7 +138,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x
define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128_broadcast(<4 x i32> %x0, <4 x i32> %x1, i32* %x2ptr, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_128_broadcast:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2d (%rdi){1to4}, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2s = load i32, i32* %x2ptr
@@ -164,7 +164,7 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.256(<8 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
@@ -296,7 +296,7 @@ declare <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpermi2b %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
@@ -306,7 +306,7 @@ define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128(<16 x i8> %x0, <16
define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128_load(<16 x i8> %x0, <16 x i8> %x1, <16 x i8>* %x2p, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_128_load:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpermi2b (%rdi), %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2 = load <16 x i8>, <16 x i8>* %x2p
diff --git a/test/CodeGen/X86/avx512-vpternlog-commute.ll b/test/CodeGen/X86/avx512-vpternlog-commute.ll
index 9cb82bcd66f7..c917e0b17f1c 100644
--- a/test/CodeGen/X86/avx512-vpternlog-commute.ll
+++ b/test/CodeGen/X86/avx512-vpternlog-commute.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
; These test cases demonstrate cases where vpternlog could benefit from being commuted.
@@ -9,485 +9,1060 @@ declare <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32>, <16 x i32>,
define <16 x i32> @vpternlog_v16i32_012(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $9, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $9, %zmm0, %zmm2, %zmm1
+; CHECK-NEXT: vpternlogd $78, %zmm0, %zmm2, %zmm1
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012_load0:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_012_load1:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_012_load2:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102_load0:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_102_load1:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_102_load2:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210_load0:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_210_load1:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_210_load2:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_021_load0:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
; CHECK-LABEL: vpternlog_v16i32_021_load1:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
; CHECK-LABEL: vpternlog_v16i32_021_load2:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 -1)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 -1)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
+define <16 x i32> @vpternlog_v16i32_012_mask1(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_mask1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
+ ret <16 x i32> %res2
+}
+
+define <16 x i32> @vpternlog_v16i32_012_mask2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_mask2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $58, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
+ ret <16 x i32> %res2
+}
+
define <16 x i32> @vpternlog_v16i32_012_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
-; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
+define <16 x i32> @vpternlog_v16i32_012_load0_mask1(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load0_mask1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
+ ret <16 x i32> %res2
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load0_mask2(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load0_mask2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
+ ret <16 x i32> %res2
+}
+
define <16 x i32> @vpternlog_v16i32_012_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load1_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
+define <16 x i32> @vpternlog_v16i32_012_load1_mask2(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load1_mask2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
+ ret <16 x i32> %res2
+}
+
define <16 x i32> @vpternlog_v16i32_012_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load2_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
+define <16 x i32> @vpternlog_v16i32_012_load2_mask1(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load2_mask1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
+ ret <16 x i32> %res2
+}
+
define <16 x i32> @vpternlog_v16i32_102_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load0_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load1_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
-; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load2_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load0_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load1_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load2_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
-; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load0_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
-; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load1_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load2_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpternlogd $9, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpternlogd $9, %zmm0, %zmm2, %zmm1 {%k1} {z}
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpternlogd $78, %zmm0, %zmm2, %zmm1 {%k1} {z}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load0_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load1_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_012_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_012_load2_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load0_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load1_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_102_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_102_load2_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load0_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load1_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_210_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_210_load2_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load0_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load1_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
ret <16 x i32> %res
}
define <16 x i32> @vpternlog_v16i32_021_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
; CHECK-LABEL: vpternlog_v16i32_021_load2_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1
-; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
- %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0_scalar = load i32, i32* %ptr_x0
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0_scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1_scalar = load i32, i32* %ptr_x1
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1_scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2_scalar = load i32, i32* %ptr_x2
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2_scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0_scalar = load i32, i32* %ptr_x0
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0_scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1_scalar = load i32, i32* %ptr_x1
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1_scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2_scalar = load i32, i32* %ptr_x2
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2_scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0_scalar = load i32, i32* %ptr_x0
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0_scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1_scalar = load i32, i32* %ptr_x1
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1_scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2_scalar = load i32, i32* %ptr_x2
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2_scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
ret <16 x i32> %res
}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_broadcast0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_broadcast1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_broadcast2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_broadcast2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_broadcast2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_broadcast0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_broadcast1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_broadcast2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask1(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
+ ret <16 x i32> %res2
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask2(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0scalar = load i32, i32* %x0ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
+ %x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
+ ret <16 x i32> %res2
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask2(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_mask2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1scalar = load i32, i32* %x1ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
+ %x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
+ ret <16 x i32> %res2
+}
+
+define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask1(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_mask1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2scalar = load i32, i32* %x2ptr
+ %vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
+ %x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
+ ret <16 x i32> %res2
+}
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index 507205ceb4f9..9b4e73a18fc2 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -992,3 +992,575 @@ define <64 x i8>@test_int_x86_avx512_mask_pshuf_b_512(<64 x i8> %x0, <64 x i8> %
ret <64 x i8> %res2
}
+
+declare <64 x i8> @llvm.x86.avx512.cvtmask2b.512(i64)
+
+define <64 x i8>@test_int_x86_avx512_cvtmask2b_512(i64 %x0) {
+; AVX512BW-LABEL: test_int_x86_avx512_cvtmask2b_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_int_x86_avx512_cvtmask2b_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k0
+; AVX512F-32-NEXT: vpmovm2b %k0, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.cvtmask2b.512(i64 %x0)
+ ret <64 x i8> %res
+}
+
+declare <32 x i16> @llvm.x86.avx512.cvtmask2w.512(i32)
+
+define <32 x i16>@test_int_x86_avx512_cvtmask2w_512(i32 %x0) {
+; AVX512BW-LABEL: test_int_x86_avx512_cvtmask2w_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_int_x86_avx512_cvtmask2w_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
+; AVX512F-32-NEXT: vpmovm2w %k0, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.cvtmask2w.512(i32 %x0)
+ ret <32 x i16> %res
+}
+define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rr_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rr_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rrk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rrk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rrkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rrkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rm_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rm_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <16 x i32>, <16 x i32>* %ptr_b
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rmk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rmk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <16 x i32>, <16 x i32>* %ptr_b
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rmkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackssdw (%rdi), %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rmkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %b = load <16 x i32>, <16 x i32>* %ptr_b
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rmb_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rmb_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rmbk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rmbk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT: retl
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi32_rmbkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackssdw (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi32_rmbkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackssdw (%eax){1to16}, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %res
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32>, <16 x i32>, <32 x i16>, i32)
+
+define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
+; AVX512BW-LABEL: test_mask_packs_epi16_rr_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi16_rr_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi16_rrk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k1
+; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi16_rrk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi16_rrkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k1
+; AVX512BW-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi16_rrkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
+; AVX512BW-LABEL: test_mask_packs_epi16_rm_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi16_rm_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <32 x i16>, <32 x i16>* %ptr_b
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi16_rmk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rsi, %k1
+; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi16_rmk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <32 x i16>, <32 x i16>* %ptr_b
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packs_epi16_rmkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rsi, %k1
+; AVX512BW-NEXT: vpacksswb (%rdi), %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packs_epi16_rmkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %b = load <32 x i16>, <32 x i16>* %ptr_b
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
+ ret <64 x i8> %res
+}
+
+declare <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16>, <32 x i16>, <64 x i8>, i64)
+
+
+define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rr_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rr_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rrk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rrk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rrkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rrkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rm_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rm_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <16 x i32>, <16 x i32>* %ptr_b
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rmk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rmk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <16 x i32>, <16 x i32>* %ptr_b
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rmkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackusdw (%rdi), %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rmkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %b = load <16 x i32>, <16 x i32>* %ptr_b
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rmb_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rmb_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rmbk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rmbk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT: retl
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
+ ret <32 x i16> %res
+}
+
+define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi32_rmbkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovd %esi, %k1
+; AVX512BW-NEXT: vpackusdw (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi32_rmbkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackusdw (%eax){1to16}, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
+ %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %res
+}
+
+declare <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32>, <16 x i32>, <32 x i16>, i32)
+
+define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
+; AVX512BW-LABEL: test_mask_packus_epi16_rr_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi16_rr_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi16_rrk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k1
+; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi16_rrk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi16_rrkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rdi, %k1
+; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi16_rrkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
+; AVX512BW-LABEL: test_mask_packus_epi16_rm_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi16_rm_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <32 x i16>, <32 x i16>* %ptr_b
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi16_rmk_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rsi, %k1
+; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi16_rmk_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm1 {%k1}
+; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-32-NEXT: retl
+ %b = load <32 x i16>, <32 x i16>* %ptr_b
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
+ ret <64 x i8> %res
+}
+
+define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
+; AVX512BW-LABEL: test_mask_packus_epi16_rmkz_512:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: kmovq %rsi, %k1
+; AVX512BW-NEXT: vpackuswb (%rdi), %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
+;
+; AVX512F-32-LABEL: test_mask_packus_epi16_rmkz_512:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
+; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: retl
+ %b = load <32 x i16>, <32 x i16>* %ptr_b
+ %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
+ ret <64 x i8> %res
+}
+
+declare <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16>, <32 x i16>, <64 x i8>, i64)
diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll
index d9a9a2d655b4..3337f42eb142 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -660,8 +660,8 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
@@ -678,8 +678,10 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
@@ -694,8 +696,10 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackssdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
@@ -710,8 +714,8 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%b = load <16 x i32>, <16 x i32>* %ptr_b
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
@@ -730,8 +734,10 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%b = load <16 x i32>, <16 x i32>* %ptr_b
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
@@ -748,8 +754,10 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %pt
; AVX512F-32-NEXT: vpackssdw (%eax), %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
%b = load <16 x i32>, <16 x i32>* %ptr_b
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
@@ -766,8 +774,8 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
@@ -788,8 +796,10 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <3
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
@@ -808,11 +818,13 @@ define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
- %res = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
-declare <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32>, <16 x i32>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32>, <16 x i32>)
define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_packs_epi16_rr_512:
@@ -824,8 +836,8 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
+ ret <64 x i8> %1
}
define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
@@ -838,14 +850,14 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rrk_512:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> %passThru
+ ret <64 x i8> %3
}
define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
@@ -857,13 +869,13 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i
;
; AVX512F-32-LABEL: test_mask_packs_epi16_rrkz_512:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> zeroinitializer
+ ret <64 x i8> %3
}
define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
@@ -878,8 +890,8 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
+ ret <64 x i8> %1
}
define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
@@ -893,15 +905,15 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_
; AVX512F-32-LABEL: test_mask_packs_epi16_rmk_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm1 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> %passThru
+ ret <64 x i8> %3
}
define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
@@ -914,17 +926,17 @@ define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr
; AVX512F-32-LABEL: test_mask_packs_epi16_rmkz_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpacksswb (%eax), %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %res = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> zeroinitializer
+ ret <64 x i8> %3
}
-declare <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16>, <32 x i16>, <64 x i8>, i64)
+declare <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
@@ -937,8 +949,8 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) {
@@ -955,8 +967,10 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b,
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) {
@@ -971,8 +985,10 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b,
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackusdw %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
@@ -987,8 +1003,8 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%b = load <16 x i32>, <16 x i32>* %ptr_b
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
@@ -1007,8 +1023,10 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %pt
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%b = load <16 x i32>, <16 x i32>* %ptr_b
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i32 %mask) {
@@ -1025,8 +1043,10 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %p
; AVX512F-32-NEXT: vpackusdw (%eax), %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
%b = load <16 x i32>, <16 x i32>* %ptr_b
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
@@ -1043,8 +1063,8 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 -1)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ ret <32 x i16> %1
}
define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <32 x i16> %passThru, i32 %mask) {
@@ -1065,8 +1085,10 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
+ ret <32 x i16> %3
}
define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i32 %mask) {
@@ -1085,11 +1107,13 @@ define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b,
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
- %res = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a, <16 x i32> %b, <32 x i16> zeroinitializer, i32 %mask)
- ret <32 x i16> %res
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %a, <16 x i32> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
+ ret <32 x i16> %3
}
-declare <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32>, <16 x i32>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32>, <16 x i32>)
define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_packus_epi16_rr_512:
@@ -1101,8 +1125,8 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
+ ret <64 x i8> %1
}
define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) {
@@ -1115,14 +1139,14 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rrk_512:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> %passThru
+ ret <64 x i8> %3
}
define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) {
@@ -1134,13 +1158,13 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b,
;
; AVX512F-32-LABEL: test_mask_packus_epi16_rrkz_512:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> zeroinitializer
+ ret <64 x i8> %3
}
define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
@@ -1155,8 +1179,8 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 -1)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
+ ret <64 x i8> %1
}
define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <64 x i8> %passThru, i64 %mask) {
@@ -1170,15 +1194,15 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr
; AVX512F-32-LABEL: test_mask_packus_epi16_rmk_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm1 {%k1}
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> %passThru
+ ret <64 x i8> %3
}
define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i64 %mask) {
@@ -1191,17 +1215,17 @@ define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %pt
; AVX512F-32-LABEL: test_mask_packus_epi16_rmkz_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
+; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpackuswb (%eax), %zmm0, %zmm0 {%k1} {z}
; AVX512F-32-NEXT: retl
%b = load <32 x i16>, <32 x i16>* %ptr_b
- %res = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a, <32 x i16> %b, <64 x i8> zeroinitializer, i64 %mask)
- ret <64 x i8> %res
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %a, <32 x i16> %b)
+ %2 = bitcast i64 %mask to <64 x i1>
+ %3 = select <64 x i1> %2, <64 x i8> %1, <64 x i8> zeroinitializer
+ ret <64 x i8> %3
}
-declare <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16>, <32 x i16>, <64 x i8>, i64)
+declare <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16>, <32 x i16>)
define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
; AVX512BW-LABEL: test_mask_adds_epi16_rr_512:
@@ -2108,7 +2132,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16>, <32 x i16>, <1
define <16 x i32>@test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2, i16 %x3) {
; AVX512BW-LABEL: test_int_x86_avx512_mask_pmaddw_d_512:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: kmovw %edi, %k1
+; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -2271,44 +2295,6 @@ define i32@test_int_x86_avx512_cvtw2mask_512(<32 x i16> %x0) {
ret i32 %res
}
-declare <64 x i8> @llvm.x86.avx512.cvtmask2b.512(i64)
-
-define <64 x i8>@test_int_x86_avx512_cvtmask2b_512(i64 %x0) {
-; AVX512BW-LABEL: test_int_x86_avx512_cvtmask2b_512:
-; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: kmovq %rdi, %k0
-; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: retq
-;
-; AVX512F-32-LABEL: test_int_x86_avx512_cvtmask2b_512:
-; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
-; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k0
-; AVX512F-32-NEXT: vpmovm2b %k0, %zmm0
-; AVX512F-32-NEXT: retl
- %res = call <64 x i8> @llvm.x86.avx512.cvtmask2b.512(i64 %x0)
- ret <64 x i8> %res
-}
-
-declare <32 x i16> @llvm.x86.avx512.cvtmask2w.512(i32)
-
-define <32 x i16>@test_int_x86_avx512_cvtmask2w_512(i32 %x0) {
-; AVX512BW-LABEL: test_int_x86_avx512_cvtmask2w_512:
-; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: kmovd %edi, %k0
-; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: retq
-;
-; AVX512F-32-LABEL: test_int_x86_avx512_cvtmask2w_512:
-; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
-; AVX512F-32-NEXT: vpmovm2w %k0, %zmm0
-; AVX512F-32-NEXT: retl
- %res = call <32 x i16> @llvm.x86.avx512.cvtmask2w.512(i32 %x0)
- ret <32 x i16> %res
-}
-
declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) {
diff --git a/test/CodeGen/X86/avx512bw-mask-op.ll b/test/CodeGen/X86/avx512bw-mask-op.ll
index 619c42494e2d..e000ef4068f6 100644
--- a/test/CodeGen/X86/avx512bw-mask-op.ll
+++ b/test/CodeGen/X86/avx512bw-mask-op.ll
@@ -150,3 +150,93 @@ define i64 @mand64_mem(<64 x i1>* %x, <64 x i1>* %y) {
%ret = bitcast <64 x i1> %me to i64
ret i64 %ret
}
+
+define i32 @test_v32i1_add(i32 %x, i32 %y) {
+; CHECK-LABEL: test_v32i1_add:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: kxord %k1, %k0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+ %m0 = bitcast i32 %x to <32 x i1>
+ %m1 = bitcast i32 %y to <32 x i1>
+ %m2 = add <32 x i1> %m0, %m1
+ %ret = bitcast <32 x i1> %m2 to i32
+ ret i32 %ret
+}
+
+define i32 @test_v32i1_sub(i32 %x, i32 %y) {
+; CHECK-LABEL: test_v32i1_sub:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: kxord %k1, %k0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+ %m0 = bitcast i32 %x to <32 x i1>
+ %m1 = bitcast i32 %y to <32 x i1>
+ %m2 = sub <32 x i1> %m0, %m1
+ %ret = bitcast <32 x i1> %m2 to i32
+ ret i32 %ret
+}
+
+define i32 @test_v32i1_mul(i32 %x, i32 %y) {
+; CHECK-LABEL: test_v32i1_mul:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kmovd %esi, %k1
+; CHECK-NEXT: kandd %k1, %k0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+ %m0 = bitcast i32 %x to <32 x i1>
+ %m1 = bitcast i32 %y to <32 x i1>
+ %m2 = mul <32 x i1> %m0, %m1
+ %ret = bitcast <32 x i1> %m2 to i32
+ ret i32 %ret
+}
+
+define i64 @test_v64i1_add(i64 %x, i64 %y) {
+; CHECK-LABEL: test_v64i1_add:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovq %rdi, %k0
+; CHECK-NEXT: kmovq %rsi, %k1
+; CHECK-NEXT: kxorq %k1, %k0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+ %m0 = bitcast i64 %x to <64 x i1>
+ %m1 = bitcast i64 %y to <64 x i1>
+ %m2 = add <64 x i1> %m0, %m1
+ %ret = bitcast <64 x i1> %m2 to i64
+ ret i64 %ret
+}
+
+define i64 @test_v64i1_sub(i64 %x, i64 %y) {
+; CHECK-LABEL: test_v64i1_sub:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovq %rdi, %k0
+; CHECK-NEXT: kmovq %rsi, %k1
+; CHECK-NEXT: kxorq %k1, %k0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+ %m0 = bitcast i64 %x to <64 x i1>
+ %m1 = bitcast i64 %y to <64 x i1>
+ %m2 = sub <64 x i1> %m0, %m1
+ %ret = bitcast <64 x i1> %m2 to i64
+ ret i64 %ret
+}
+
+define i64 @test_v64i1_mul(i64 %x, i64 %y) {
+; CHECK-LABEL: test_v64i1_mul:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovq %rdi, %k0
+; CHECK-NEXT: kmovq %rsi, %k1
+; CHECK-NEXT: kandq %k1, %k0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+ %m0 = bitcast i64 %x to <64 x i1>
+ %m1 = bitcast i64 %y to <64 x i1>
+ %m2 = mul <64 x i1> %m0, %m1
+ %ret = bitcast <64 x i1> %m2 to i64
+ ret i64 %ret
+}
diff --git a/test/CodeGen/X86/avx512bw-vec-cmp.ll b/test/CodeGen/X86/avx512bw-vec-cmp.ll
index 34432468921b..016837e61307 100644
--- a/test/CodeGen/X86/avx512bw-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bw-vec-cmp.ll
@@ -5,8 +5,7 @@ define <64 x i8> @test1(<64 x i8> %x, <64 x i8> %y) nounwind {
; CHECK-LABEL: test1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp eq <64 x i8> %x, %y
%max = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %y
@@ -17,8 +16,7 @@ define <64 x i8> @test2(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK-LABEL: test2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmb %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sgt <64 x i8> %x, %y
%max = select <64 x i1> %mask, <64 x i8> %x1, <64 x i8> %y
@@ -29,8 +27,7 @@ define <32 x i16> @test3(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1) nounwind
; CHECK-LABEL: test3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k1
-; CHECK-NEXT: vmovdqu16 %zmm2, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sge <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x1, <32 x i16> %y
@@ -41,8 +38,7 @@ define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK-LABEL: test4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmb %zmm2, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ugt <64 x i8> %x, %y
%max = select <64 x i1> %mask, <64 x i8> %x1, <64 x i8> %y
@@ -53,8 +49,7 @@ define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwin
; CHECK-LABEL: test5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %yp, align 4
%mask = icmp eq <32 x i16> %x, %y
@@ -66,8 +61,7 @@ define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK-LABEL: test6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sgt <32 x i16> %x, %y
@@ -79,8 +73,7 @@ define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK-LABEL: test7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sle <32 x i16> %x, %y
@@ -92,8 +85,7 @@ define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK-LABEL: test8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleuw (%rdi), %zmm0, %k1
-; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp ule <32 x i16> %x, %y
@@ -106,8 +98,7 @@ define <32 x i16> @test9(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1, <32 x i16
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 {%k1}
-; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp eq <32 x i16> %x1, %y1
%mask0 = icmp eq <32 x i16> %x, %y
@@ -121,8 +112,7 @@ define <64 x i8> @test10(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1, <64 x i8> %y
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleb %zmm2, %zmm3, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: vpblendmb %zmm0, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <64 x i8> %x1, %y1
%mask0 = icmp sle <64 x i8> %x, %y
@@ -136,8 +126,7 @@ define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sgt <64 x i8> %x1, %y1
%y = load <64 x i8>, <64 x i8>* %y.ptr, align 4
@@ -152,8 +141,7 @@ define <32 x i16> @test12(<32 x i16> %x, <32 x i16>* %y.ptr, <32 x i16> %x1, <32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <32 x i16> %x1, %y1
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
index 7cd0da9564ff..98b346a2d733 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
@@ -24,13 +24,13 @@ define <2 x i64> @test_mm_mask_broadcastb_epi8(<2 x i64> %a0, i16 %a1, <2 x i64>
; X32-LABEL: test_mm_mask_broadcastb_epi8:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
-; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastb %xmm1, %xmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastb_epi8:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -46,13 +46,13 @@ define <2 x i64> @test_mm_maskz_broadcastb_epi8(i16 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastb_epi8:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
-; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastb_epi8:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z}
; X64-NEXT: retq
%arg0 = bitcast i16 %a0 to <16 x i1>
@@ -142,13 +142,13 @@ define <2 x i64> @test_mm_mask_broadcastw_epi16(<2 x i64> %a0, i8 %a1, <2 x i64>
; X32-LABEL: test_mm_mask_broadcastw_epi16:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastw %xmm1, %xmm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastw_epi16:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -164,13 +164,13 @@ define <2 x i64> @test_mm_maskz_broadcastw_epi16(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastw_epi16:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastw_epi16:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z}
; X64-NEXT: retq
%arg0 = bitcast i8 %a0 to <8 x i1>
@@ -201,13 +201,13 @@ define <4 x i64> @test_mm256_mask_broadcastw_epi16(<4 x i64> %a0, i16 %a1, <2 x
; X32-LABEL: test_mm256_mask_broadcastw_epi16:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
-; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastw %xmm1, %ymm0 {%k1}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastw_epi16:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm1, %ymm0 {%k1}
; X64-NEXT: retq
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -223,13 +223,13 @@ define <4 x i64> @test_mm256_maskz_broadcastw_epi16(i16 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastw_epi16:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
-; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: kmovd %eax, %k1
; X32-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastw_epi16:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z}
; X64-NEXT: retq
%arg0 = bitcast i16 %a0 to <16 x i1>
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 0a8e1445be88..7df07b0413ed 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -27,7 +27,7 @@ define <16 x i8>@test_int_x86_avx512_pbroadcastb_128(<16 x i8> %x0, <16 x i8> %x
; CHECK-LABEL: test_int_x86_avx512_pbroadcastb_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x78,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x78,0xc8]
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x78,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc9]
@@ -47,7 +47,7 @@ define <16 x i16>@test_int_x86_avx512_pbroadcastw_256(<8 x i16> %x0, <16 x i16>
; CHECK-LABEL: test_int_x86_avx512_pbroadcastw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x79,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x79,0xc8]
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x79,0xc0]
; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc9]
@@ -67,7 +67,7 @@ define <8 x i16>@test_int_x86_avx512_pbroadcastw_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-LABEL: test_int_x86_avx512_pbroadcastw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x79,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x79,0xc8]
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x79,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc9]
@@ -126,7 +126,7 @@ declare void @llvm.x86.avx512.mask.storeu.b.128(i8*, <16 x i8>, i16)
define void@test_int_x86_avx512_mask_storeu_b_128(i8* %ptr1, i8* %ptr2, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
+; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu8 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqu %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x06]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -154,7 +154,7 @@ declare void @llvm.x86.avx512.mask.storeu.w.128(i8*, <8 x i16>, i8)
define void@test_int_x86_avx512_mask_storeu_w_128(i8* %ptr1, i8* %ptr2, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
+; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x7f,0x07]
; CHECK-NEXT: vmovdqu %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x06]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -168,7 +168,7 @@ declare void @llvm.x86.avx512.mask.storeu.w.256(i8*, <16 x i16>, i16)
define void@test_int_x86_avx512_mask_storeu_w_256(i8* %ptr1, i8* %ptr2, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
+; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x7f,0x07]
; CHECK-NEXT: vmovdqu %ymm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x7f,0x06]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -183,7 +183,7 @@ define <8 x i16>@test_int_x86_avx512_mask_loadu_w_128(i8* %ptr, i8* %ptr2, <8 x
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovdqu (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x07]
-; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
+; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 (%rsi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x6f,0x06]
; CHECK-NEXT: vmovdqu16 (%rdi), %xmm1 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x6f,0x0f]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
@@ -201,7 +201,7 @@ define <16 x i16>@test_int_x86_avx512_mask_loadu_w_256(i8* %ptr, i8* %ptr2, <16
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovdqu (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x6f,0x07]
-; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
+; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu16 (%rsi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x6f,0x06]
; CHECK-NEXT: vmovdqu16 (%rdi), %ymm1 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0xa9,0x6f,0x0f]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc1]
@@ -219,7 +219,7 @@ define <16 x i8>@test_int_x86_avx512_mask_loadu_b_128(i8* %ptr, i8* %ptr2, <16 x
; CHECK-LABEL: test_int_x86_avx512_mask_loadu_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovdqu (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x07]
-; CHECK-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
+; CHECK-NEXT: kmovd %edx, %k1 ## encoding: [0xc5,0xfb,0x92,0xca]
; CHECK-NEXT: vmovdqu8 (%rsi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x6f,0x06]
; CHECK-NEXT: vmovdqu8 (%rdi), %xmm1 {%k1} {z} ## encoding: [0x62,0xf1,0x7f,0x89,0x6f,0x0f]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
@@ -256,7 +256,7 @@ define <16 x i8>@test_int_x86_avx512_mask_palignr_128(<16 x i8> %x0, <16 x i8> %
; CHECK: ## BB#0:
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0f,0xd9,0x02]
; CHECK-NEXT: ## xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x0f,0xd1,0x02]
; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1]
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x0f,0xc1,0x02]
@@ -302,7 +302,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pshufh_w_128(<8 x i16> %x0, i32 %x1, <
; CHECK: ## BB#0:
; CHECK-NEXT: vpshufhw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x70,0xd0,0x03]
; CHECK-NEXT: ## xmm2 = xmm0[0,1,2,3,7,4,4,4]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpshufhw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x70,0xc8,0x03]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,1,2,3,7,4,4,4]
; CHECK-NEXT: vpshufhw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x70,0xc0,0x03]
@@ -325,7 +325,7 @@ define <16 x i16>@test_int_x86_avx512_mask_pshufh_w_256(<16 x i16> %x0, i32 %x1,
; CHECK: ## BB#0:
; CHECK-NEXT: vpshufhw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfe,0x70,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpshufhw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x70,0xc8,0x03]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,1,2,3,7,4,4,4,8,9,10,11,15,12,12,12]
; CHECK-NEXT: vpshufhw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x70,0xc0,0x03]
@@ -348,7 +348,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pshufl_w_128(<8 x i16> %x0, i32 %x1, <
; CHECK: ## BB#0:
; CHECK-NEXT: vpshuflw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xd0,0x03]
; CHECK-NEXT: ## xmm2 = xmm0[3,0,0,0,4,5,6,7]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpshuflw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x70,0xc8,0x03]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[3,0,0,0,4,5,6,7]
; CHECK-NEXT: vpshuflw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7f,0x89,0x70,0xc0,0x03]
@@ -371,7 +371,7 @@ define <16 x i16>@test_int_x86_avx512_mask_pshufl_w_256(<16 x i16> %x0, i32 %x1,
; CHECK: ## BB#0:
; CHECK-NEXT: vpshuflw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xff,0x70,0xd0,0x03]
; CHECK-NEXT: ## ymm2 = ymm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpshuflw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x70,0xc8,0x03]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[3,0,0,0,4,5,6,7,11,8,8,8,12,13,14,15]
; CHECK-NEXT: vpshuflw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7f,0xa9,0x70,0xc0,0x03]
@@ -414,7 +414,8 @@ define i16 @test_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_pcmpeq_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
ret i16 %res
@@ -423,9 +424,10 @@ define i16 @test_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b) {
define i16 @test_mask_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
ret i16 %res
@@ -460,7 +462,8 @@ define i16 @test_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_pcmpgt_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
ret i16 %res
@@ -469,9 +472,10 @@ define i16 @test_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b) {
define i16 @test_mask_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
ret i16 %res
@@ -486,7 +490,7 @@ define <16 x i8>@test_int_x86_avx512_mask_punpckhb_w_128(<16 x i8> %x0, <16 x i8
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpckhbw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x68,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpunpckhbw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x68,0xd1]
; CHECK-NEXT: ## xmm2 {%k1} = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; CHECK-NEXT: vpaddb %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc3]
@@ -504,7 +508,7 @@ define <16 x i8>@test_int_x86_avx512_mask_punpcklb_w_128(<16 x i8> %x0, <16 x i8
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpcklbw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x60,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpunpcklbw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x60,0xd1]
; CHECK-NEXT: ## xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: vpaddb %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc3]
@@ -558,7 +562,7 @@ define <8 x i16>@test_int_x86_avx512_mask_punpcklw_d_128(<8 x i16> %x0, <8 x i16
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpcklwd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x61,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpunpcklwd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x61,0xd1]
; CHECK-NEXT: ## xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
@@ -576,7 +580,7 @@ define <8 x i16>@test_int_x86_avx512_mask_punpckhw_d_128(<8 x i16> %x0, <8 x i16
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpckhwd %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x69,0xd9]
; CHECK-NEXT: ## xmm3 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpunpckhwd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x69,0xd1]
; CHECK-NEXT: ## xmm2 {%k1} = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
@@ -594,7 +598,7 @@ define <16 x i16>@test_int_x86_avx512_mask_punpcklw_d_256(<16 x i16> %x0, <16 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpcklwd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x61,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpunpcklwd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x61,0xd1]
; CHECK-NEXT: ## ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
@@ -612,7 +616,7 @@ define <16 x i16>@test_int_x86_avx512_mask_punpckhw_d_256(<16 x i16> %x0, <16 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpckhwd %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x69,0xd9]
; CHECK-NEXT: ## ymm3 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpunpckhwd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x69,0xd1]
; CHECK-NEXT: ## ymm2 {%k1} = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc3]
@@ -635,7 +639,7 @@ define <8 x i16> @test_mask_add_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_add_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfd,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -646,7 +650,7 @@ define <8 x i16> @test_mask_add_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i
define <8 x i16> @test_mask_add_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.padd.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -666,7 +670,7 @@ define <8 x i16> @test_mask_add_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_add_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfd,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -678,7 +682,7 @@ define <8 x i16> @test_mask_add_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <
define <8 x i16> @test_mask_add_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -700,7 +704,7 @@ define <16 x i16> @test_mask_add_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_add_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfd,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -711,7 +715,7 @@ define <16 x i16> @test_mask_add_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16
define <16 x i16> @test_mask_add_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.padd.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -731,7 +735,7 @@ define <16 x i16> @test_mask_add_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b)
define <16 x i16> @test_mask_add_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfd,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -743,7 +747,7 @@ define <16 x i16> @test_mask_add_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_add_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_add_epi16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -765,7 +769,7 @@ define <8 x i16> @test_mask_sub_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_sub_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -776,7 +780,7 @@ define <8 x i16> @test_mask_sub_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i
define <8 x i16> @test_mask_sub_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psub.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -796,7 +800,7 @@ define <8 x i16> @test_mask_sub_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_sub_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf9,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -808,7 +812,7 @@ define <8 x i16> @test_mask_sub_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <
define <8 x i16> @test_mask_sub_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -830,7 +834,7 @@ define <16 x i16> @test_mask_sub_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_sub_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -841,7 +845,7 @@ define <16 x i16> @test_mask_sub_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16
define <16 x i16> @test_mask_sub_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xf9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psub.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -861,7 +865,7 @@ define <16 x i16> @test_mask_sub_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b)
define <16 x i16> @test_mask_sub_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf9,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -873,7 +877,7 @@ define <16 x i16> @test_mask_sub_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_sub_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_sub_epi16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xf9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -1090,7 +1094,7 @@ define <8 x i16> @test_mask_mullo_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_mullo_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd5,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1101,7 +1105,7 @@ define <8 x i16> @test_mask_mullo_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_mullo_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.pmull.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -1121,7 +1125,7 @@ define <8 x i16> @test_mask_mullo_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
define <8 x i16> @test_mask_mullo_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd5,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1133,7 +1137,7 @@ define <8 x i16> @test_mask_mullo_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_mullo_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -1155,7 +1159,7 @@ define <16 x i16> @test_mask_mullo_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_mullo_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd5,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1166,7 +1170,7 @@ define <16 x i16> @test_mask_mullo_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <
define <16 x i16> @test_mask_mullo_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd5,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.pmull.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -1186,7 +1190,7 @@ define <16 x i16> @test_mask_mullo_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_mullo_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd5,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1198,7 +1202,7 @@ define <16 x i16> @test_mask_mullo_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr
define <16 x i16> @test_mask_mullo_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_mullo_epi16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmullw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd5,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -1213,7 +1217,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmaxs.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pmaxs_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3c,0xd1]
; CHECK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x3c,0xc1]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
@@ -1246,7 +1250,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pmaxs_w_128(<8 x i16> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xee,0xd1]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1261,7 +1265,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmaxs.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmaxs_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xee,0xd1]
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xee,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -1277,7 +1281,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmaxu.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pmaxu_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2,i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxub %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xde,0xd1]
; CHECK-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xde,0xc1]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
@@ -1310,7 +1314,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pmaxu_w_128(<8 x i16> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3e,0xd1]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1325,7 +1329,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmaxu.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmaxu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3e,0xd1]
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x3e,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -1341,7 +1345,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmins.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pmins_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x38,0xd1]
; CHECK-NEXT: vpminsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x38,0xc1]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
@@ -1374,7 +1378,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pmins_w_128(<8 x i16> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpminsw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xea,0xd1]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1389,7 +1393,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmins.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmins_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xea,0xd1]
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xea,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -1405,7 +1409,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pminu.b.128(<16 x i8>, <16 x i8>, <16 x
define <16 x i8>@test_int_x86_avx512_mask_pminu_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminub %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xda,0xd1]
; CHECK-NEXT: vpminub %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xda,0xc1]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
@@ -1438,7 +1442,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pminu_w_128(<8 x i16> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpminuw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminuw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x3a,0xd1]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1453,7 +1457,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pminu.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pminu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x3a,0xd1]
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x3a,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -1470,7 +1474,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_w_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd1,0xd1]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd1,0xc1]
; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xcb]
@@ -1490,7 +1494,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_w_256(<16 x i16> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd1,0xd1]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd1,0xc1]
; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xcb]
@@ -1510,7 +1514,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psra_w_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-LABEL: test_int_x86_avx512_mask_psra_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsraw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsraw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe1,0xd1]
; CHECK-NEXT: vpsraw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe1,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -1530,7 +1534,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psra_w_256(<16 x i16> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_psra_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe1,0xd1]
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe1,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -1550,7 +1554,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psll_w_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-LABEL: test_int_x86_avx512_mask_psll_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf1,0xd1]
; CHECK-NEXT: vpsllw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xf1,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -1570,7 +1574,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psll_w_256(<16 x i16> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_psll_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf1,0xd1]
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xf1,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -1590,7 +1594,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_wi_128(<8 x i16> %x0, i32 %x1, <8
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_wi_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xd0,0x03]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xd0,0x03]
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xd0,0x03]
; CHECK-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca]
@@ -1610,7 +1614,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_wi_256(<16 x i16> %x0, i32 %x1,
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_wi_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xd0,0x03]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xd0,0x03]
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xd0,0x03]
; CHECK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xca]
@@ -1630,7 +1634,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psra_wi_128(<8 x i16> %x0, i32 %x1, <8
; CHECK-LABEL: test_int_x86_avx512_mask_psra_wi_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsraw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xe0,0x03]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsraw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xe0,0x03]
; CHECK-NEXT: vpsraw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xe0,0x03]
; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0]
@@ -1650,7 +1654,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psra_wi_256(<16 x i16> %x0, i32 %x1,
; CHECK-LABEL: test_int_x86_avx512_mask_psra_wi_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xe0,0x03]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xe0,0x03]
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xe0,0x03]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
@@ -1670,7 +1674,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psll_wi_128(<8 x i16> %x0, i32 %x1, <8
; CHECK-LABEL: test_int_x86_avx512_mask_psll_wi_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xf0,0x03]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsllw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xf0,0x03]
; CHECK-NEXT: vpsllw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xf0,0x03]
; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0]
@@ -1690,7 +1694,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psll_wi_256(<16 x i16> %x0, i32 %x1,
; CHECK-LABEL: test_int_x86_avx512_mask_psll_wi_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xf0,0x03]
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xf0,0x03]
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xf0,0x03]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
@@ -1710,7 +1714,7 @@ define <16 x i8>@test_int_x86_avx512_mask_pshuf_b_128(<16 x i8> %x0, <16 x i8> %
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpshufb %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x00,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpshufb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x00,0xd1]
; CHECK-NEXT: vpaddb %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1743,7 +1747,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovzxb_w_128(<16 x i8> %x0, <8 x i16>
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovzxbw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x30,0xd0]
; CHECK-NEXT: ## xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovzxbw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x30,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: vpmovzxbw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x30,0xc0]
@@ -1766,7 +1770,7 @@ define <16 x i16>@test_int_x86_avx512_mask_pmovzxb_w_256(<16 x i8> %x0, <16 x i1
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovzxbw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x30,0xd0]
; CHECK-NEXT: ## ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovzxbw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x30,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: vpmovzxbw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x30,0xc0]
@@ -1789,7 +1793,7 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovsxb_w_128(<16 x i8> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovsxbw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x20,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxbw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x20,0xc8]
; CHECK-NEXT: vpmovsxbw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x20,0xc0]
; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0]
@@ -1809,7 +1813,7 @@ define <16 x i16>@test_int_x86_avx512_mask_pmovsxb_w_256(<16 x i8> %x0, <16 x i1
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x20,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x20,0xc8]
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x20,0xc0]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
@@ -1829,7 +1833,7 @@ define <2 x i64>@test_int_x86_avx512_mask_pmovsxd_q_128(<4 x i32> %x0, <2 x i64>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovsxdq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x25,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x25,0xc8]
; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x25,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -1849,7 +1853,7 @@ define <4 x i64>@test_int_x86_avx512_mask_pmovsxd_q_256(<4 x i32> %x0, <4 x i64>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x25,0xd0]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x25,0xc8]
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x25,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -1863,3 +1867,728 @@ define <4 x i64>@test_int_x86_avx512_mask_pmovsxd_q_256(<4 x i32> %x0, <4 x i64>
ret <4 x i64> %res4
}
+
+declare <16 x i8> @llvm.x86.avx512.cvtmask2b.128(i16)
+
+define <16 x i8>@test_int_x86_avx512_cvtmask2b_128(i16 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2b_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
+; CHECK-NEXT: vpmovm2b %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.cvtmask2b.128(i16 %x0)
+ ret <16 x i8> %res
+}
+
+declare <32 x i8> @llvm.x86.avx512.cvtmask2b.256(i32)
+
+define <32 x i8>@test_int_x86_avx512_cvtmask2b_256(i32 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2b_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
+; CHECK-NEXT: vpmovm2b %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x28,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.cvtmask2b.256(i32 %x0)
+ ret <32 x i8> %res
+}
+
+declare <8 x i16> @llvm.x86.avx512.cvtmask2w.128(i8)
+
+define <8 x i16>@test_int_x86_avx512_cvtmask2w_128(i8 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2w_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
+; CHECK-NEXT: vpmovm2w %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.cvtmask2w.128(i8 %x0)
+ ret <8 x i16> %res
+}
+
+declare <16 x i16> @llvm.x86.avx512.cvtmask2w.256(i16)
+
+define <16 x i16>@test_int_x86_avx512_cvtmask2w_256(i16 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2w_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
+; CHECK-NEXT: vpmovm2w %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x28,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.cvtmask2w.256(i16 %x0)
+ ret <16 x i16> %res
+}
+define <8 x i16> @test_mask_packs_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_mask_packs_epi32_rr_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rrk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0xd1]
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rrkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
+; CHECK-LABEL: test_mask_packs_epi32_rm_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <4 x i32>, <4 x i32>* %ptr_b
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0x0f]
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <4 x i32>, <4 x i32>* %ptr_b
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <4 x i32>, <4 x i32>* %ptr_b
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
+; CHECK-LABEL: test_mask_packs_epi32_rmb_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmbk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0x6b,0x0f]
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packs_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmbkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
+ ret <8 x i16> %res
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32>, <4 x i32>, <8 x i16>, i8)
+
+define <16 x i16> @test_mask_packs_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: test_mask_packs_epi32_rr_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rrk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0xd1]
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rrkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
+; CHECK-LABEL: test_mask_packs_epi32_rm_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i32>, <8 x i32>* %ptr_b
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0x0f]
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i32>, <8 x i32>* %ptr_b
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i32>, <8 x i32>* %ptr_b
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
+; CHECK-LABEL: test_mask_packs_epi32_rmb_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmbk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0x6b,0x0f]
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packs_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi32_rmbkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0x6b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
+ ret <16 x i16> %res
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32>, <8 x i32>, <16 x i16>, i16)
+
+define <16 x i8> @test_mask_packs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_mask_packs_epi16_rr_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rrk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0xd1]
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rrkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_packs_epi16_rm_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i16>, <8 x i16>* %ptr_b
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rmk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0x0f]
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i16>, <8 x i16>* %ptr_b
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rmkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i16>, <8 x i16>* %ptr_b
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
+ ret <16 x i8> %res
+}
+
+declare <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16>, <8 x i16>, <16 x i8>, i16)
+
+define <32 x i8> @test_mask_packs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: test_mask_packs_epi16_rr_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rrk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0xd1]
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rrkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_packs_epi16_rm_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <16 x i16>, <16 x i16>* %ptr_b
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rmk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0x0f]
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <16 x i16>, <16 x i16>* %ptr_b
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
+; CHECK-LABEL: test_mask_packs_epi16_rmkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <16 x i16>, <16 x i16>* %ptr_b
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
+ ret <32 x i8> %res
+}
+
+declare <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16>, <16 x i16>, <32 x i8>, i32)
+
+
+define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_mask_packus_epi32_rr_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rrk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0xd1]
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rrkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
+; CHECK-LABEL: test_mask_packus_epi32_rm_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <4 x i32>, <4 x i32>* %ptr_b
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0x0f]
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <4 x i32>, <4 x i32>* %ptr_b
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <4 x i32>, <4 x i32>* %ptr_b
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
+; CHECK-LABEL: test_mask_packus_epi32_rmb_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x18,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmbk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0x2b,0x0f]
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @test_mask_packus_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmbkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x99,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
+ ret <8 x i16> %res
+}
+
+declare <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32>, <4 x i32>, <8 x i16>, i8)
+
+define <16 x i16> @test_mask_packus_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: test_mask_packus_epi32_rr_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rrk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0xd1]
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rrkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
+; CHECK-LABEL: test_mask_packus_epi32_rm_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i32>, <8 x i32>* %ptr_b
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0x0f]
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i32>, <8 x i32>* %ptr_b
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i32>, <8 x i32>* %ptr_b
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
+; CHECK-LABEL: test_mask_packus_epi32_rmb_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x38,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmbk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x39,0x2b,0x0f]
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
+ ret <16 x i16> %res
+}
+
+define <16 x i16> @test_mask_packus_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi32_rmbkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xb9,0x2b,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load i32, i32* %ptr_b
+ %vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
+ %b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
+ %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
+ ret <16 x i16> %res
+}
+
+declare <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32>, <8 x i32>, <16 x i16>, i16)
+
+define <16 x i8> @test_mask_packus_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_mask_packus_epi16_rr_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packus_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rrk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0xd1]
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packus_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rrkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_packus_epi16_rm_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i16>, <8 x i16>* %ptr_b
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packus_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rmk_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0x0f]
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i16>, <8 x i16>* %ptr_b
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_mask_packus_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rmkz_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <8 x i16>, <8 x i16>* %ptr_b
+ %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
+ ret <16 x i8> %res
+}
+
+declare <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16>, <8 x i16>, <16 x i8>, i16)
+
+define <32 x i8> @test_mask_packus_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: test_mask_packus_epi16_rr_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rrk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0xd1]
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rrkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
+; CHECK-LABEL: test_mask_packus_epi16_rm_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <16 x i16>, <16 x i16>* %ptr_b
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rmk_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0x0f]
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <16 x i16>, <16 x i16>* %ptr_b
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
+ ret <32 x i8> %res
+}
+
+define <32 x i8> @test_mask_packus_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
+; CHECK-LABEL: test_mask_packus_epi16_rmkz_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
+; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %b = load <16 x i16>, <16 x i16>* %ptr_b
+ %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
+ ret <32 x i8> %res
+}
+
+declare <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16>, <16 x i16>, <32 x i8>, i32)
+
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index 7a9d7d7885ff..1d0a3be06943 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -204,29 +204,29 @@ declare i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8>, <32 x i8>, i32, i32) nou
define <8 x i16> @test_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_cmp_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xd9,0x00]
-; CHECK-NEXT: vpcmpltw %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xe1,0x01]
-; CHECK-NEXT: vpcmplew %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xe9,0x02]
-; CHECK-NEXT: vpcmpunordw %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xf1,0x03]
-; CHECK-NEXT: vpcmpneqw %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltw %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnlew %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltw %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xc9,0x01]
+; CHECK-NEXT: vpcmplew %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordw %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqw %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltw %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnlew %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordw %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x3f,0xf9,0x07]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 -1)
@@ -251,30 +251,30 @@ define <8 x i16> @test_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
define <8 x i16> @test_mask_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_cmp_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k4 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xe1,0x00]
-; CHECK-NEXT: vpcmpltw %ymm1, %ymm0, %k5 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xe9,0x01]
-; CHECK-NEXT: vpcmplew %ymm1, %ymm0, %k6 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordw %ymm1, %ymm0, %k7 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xf9,0x03]
-; CHECK-NEXT: vpcmpneqw %ymm1, %ymm0, %k0 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltw %ymm1, %ymm0, %k2 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnlew %ymm1, %ymm0, %k1 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordw %ymm1, %ymm0, %k3 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3f,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltw %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xd1,0x01]
+; CHECK-NEXT: vpcmplew %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordw %ymm1, %ymm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqw %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltw %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnlew %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordw %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3f,0xc9,0x07]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 %mask)
@@ -301,29 +301,29 @@ declare i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16>, <16 x i16>, i32, i16) no
define <8 x i16> @test_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_ucmp_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequw %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xd9,0x00]
-; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xe1,0x01]
-; CHECK-NEXT: vpcmpleuw %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xe9,0x02]
-; CHECK-NEXT: vpcmpunorduw %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xf1,0x03]
-; CHECK-NEXT: vpcmpnequw %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltuw %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuw %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
+; CHECK-NEXT: vpcmpequw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleuw %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunorduw %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequw %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltuw %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleuw %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xf1,0x06]
+; CHECK-NEXT: vpcmporduw %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x3e,0xf9,0x07]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 -1)
@@ -348,30 +348,30 @@ define <8 x i16> @test_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
define <8 x i16> @test_mask_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_ucmp_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpequw %ymm1, %ymm0, %k4 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xe1,0x00]
-; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k5 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xe9,0x01]
-; CHECK-NEXT: vpcmpleuw %ymm1, %ymm0, %k6 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunorduw %ymm1, %ymm0, %k7 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xf9,0x03]
-; CHECK-NEXT: vpcmpnequw %ymm1, %ymm0, %k0 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltuw %ymm1, %ymm0, %k2 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuw %ymm1, %ymm0, %k1 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduw %ymm1, %ymm0, %k3 {%k3} ## encoding: [0x62,0xf3,0xfd,0x2b,0x3e,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpcmpequw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleuw %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunorduw %ymm1, %ymm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequw %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltuw %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleuw %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xf9,0x06]
+; CHECK-NEXT: vpcmporduw %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x3e,0xc9,0x07]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 %mask)
@@ -401,7 +401,8 @@ define i16 @test_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_pcmpeq_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -410,9 +411,10 @@ define i16 @test_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b) {
define i16 @test_mask_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -424,7 +426,8 @@ define i8 @test_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_pcmpeq_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -433,9 +436,10 @@ define i8 @test_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b) {
define i8 @test_mask_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpeq_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -447,7 +451,8 @@ define i16 @test_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: test_pcmpgt_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -456,9 +461,10 @@ define i16 @test_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b) {
define i16 @test_mask_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -470,7 +476,8 @@ define i8 @test_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_pcmpgt_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -479,9 +486,10 @@ define i8 @test_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b) {
define i8 @test_mask_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_pcmpgt_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -492,29 +500,29 @@ declare i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16>, <8 x i16>, i8)
define <8 x i16> @test_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_cmp_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xd9,0x00]
-; CHECK-NEXT: vpcmpltb %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xe1,0x01]
-; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xe9,0x02]
-; CHECK-NEXT: vpcmpunordb %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xf1,0x03]
-; CHECK-NEXT: vpcmpneqb %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltb %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleb %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltb %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xc9,0x01]
+; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordb %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqb %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltb %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleb %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordb %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x3f,0xf9,0x07]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 -1)
@@ -539,30 +547,30 @@ define <8 x i16> @test_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @test_mask_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_cmp_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k4 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xe1,0x00]
-; CHECK-NEXT: vpcmpltb %xmm1, %xmm0, %k5 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xe9,0x01]
-; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k6 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordb %xmm1, %xmm0, %k7 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xf9,0x03]
-; CHECK-NEXT: vpcmpneqb %xmm1, %xmm0, %k0 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltb %xmm1, %xmm0, %k2 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleb %xmm1, %xmm0, %k1 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordb %xmm1, %xmm0, %k3 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3f,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltb %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xd1,0x01]
+; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordb %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqb %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltb %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleb %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordb %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3f,0xc9,0x07]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 %mask)
@@ -589,29 +597,29 @@ declare i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8>, <16 x i8>, i32, i16) noun
define <8 x i16> @test_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_ucmp_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequb %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xd9,0x00]
-; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xe1,0x01]
-; CHECK-NEXT: vpcmpleub %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xe9,0x02]
-; CHECK-NEXT: vpcmpunordub %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xf1,0x03]
-; CHECK-NEXT: vpcmpnequb %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltub %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xc9,0x06]
-; CHECK-NEXT: vpcmpordub %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
+; CHECK-NEXT: vpcmpequb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleub %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordub %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequb %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltub %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xf1,0x06]
+; CHECK-NEXT: vpcmpordub %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x3e,0xf9,0x07]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 -1)
@@ -636,30 +644,30 @@ define <8 x i16> @test_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @test_mask_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
; CHECK-LABEL: test_mask_ucmp_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpequb %xmm1, %xmm0, %k4 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xe1,0x00]
-; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k5 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xe9,0x01]
-; CHECK-NEXT: vpcmpleub %xmm1, %xmm0, %k6 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordub %xmm1, %xmm0, %k7 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xf9,0x03]
-; CHECK-NEXT: vpcmpnequb %xmm1, %xmm0, %k0 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltub %xmm1, %xmm0, %k2 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k1 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xc9,0x06]
-; CHECK-NEXT: vpcmpordub %xmm1, %xmm0, %k3 {%k3} ## encoding: [0x62,0xf3,0x7d,0x0b,0x3e,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpcmpequb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleub %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordub %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequb %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltub %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xf9,0x06]
+; CHECK-NEXT: vpcmpordub %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x3e,0xc9,0x07]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 %mask)
@@ -686,29 +694,29 @@ declare i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8>, <16 x i8>, i32, i16) nou
define <8 x i8> @test_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_cmp_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xd9,0x00]
-; CHECK-NEXT: vpcmpltw %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xe1,0x01]
-; CHECK-NEXT: vpcmplew %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xe9,0x02]
-; CHECK-NEXT: vpcmpunordw %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xf1,0x03]
-; CHECK-NEXT: vpcmpneqw %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltw %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnlew %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltw %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xc9,0x01]
+; CHECK-NEXT: vpcmplew %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordw %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqw %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltw %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnlew %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordw %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x3f,0xf9,0x07]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 -1)
@@ -733,30 +741,30 @@ define <8 x i8> @test_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i8> @test_mask_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k4 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xe1,0x00]
-; CHECK-NEXT: vpcmpltw %xmm1, %xmm0, %k5 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xe9,0x01]
-; CHECK-NEXT: vpcmplew %xmm1, %xmm0, %k6 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordw %xmm1, %xmm0, %k7 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xf9,0x03]
-; CHECK-NEXT: vpcmpneqw %xmm1, %xmm0, %k0 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltw %xmm1, %xmm0, %k2 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnlew %xmm1, %xmm0, %k1 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordw %xmm1, %xmm0, %k3 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3f,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltw %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xd1,0x01]
+; CHECK-NEXT: vpcmplew %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordw %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqw %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltw %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnlew %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordw %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3f,0xc9,0x07]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 %mask)
@@ -783,29 +791,29 @@ declare i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16>, <8 x i16>, i32, i8) nounwi
define <8 x i8> @test_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK-LABEL: test_ucmp_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequw %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xd9,0x00]
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xe1,0x01]
-; CHECK-NEXT: vpcmpleuw %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xe9,0x02]
-; CHECK-NEXT: vpcmpunorduw %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xf1,0x03]
-; CHECK-NEXT: vpcmpnequw %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltuw %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuw %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpequw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleuw %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunorduw %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequw %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltuw %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleuw %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xf1,0x06]
+; CHECK-NEXT: vpcmporduw %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x3e,0xf9,0x07]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 -1)
@@ -830,30 +838,30 @@ define <8 x i8> @test_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i8> @test_mask_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpequw %xmm1, %xmm0, %k4 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xe1,0x00]
-; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k5 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xe9,0x01]
-; CHECK-NEXT: vpcmpleuw %xmm1, %xmm0, %k6 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunorduw %xmm1, %xmm0, %k7 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xf9,0x03]
-; CHECK-NEXT: vpcmpnequw %xmm1, %xmm0, %k0 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltuw %xmm1, %xmm0, %k2 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuw %xmm1, %xmm0, %k1 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduw %xmm1, %xmm0, %k3 {%k3} ## encoding: [0x62,0xf3,0xfd,0x0b,0x3e,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpcmpequw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleuw %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunorduw %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequw %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltuw %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleuw %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xf9,0x06]
+; CHECK-NEXT: vpcmporduw %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x3e,0xc9,0x07]
+; CHECK-NEXT: kmovd %k2, %eax ## encoding: [0xc5,0xfb,0x93,0xc2]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovd %k3, %eax ## encoding: [0xc5,0xfb,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovd %k4, %eax ## encoding: [0xc5,0xfb,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k5, %eax ## encoding: [0xc5,0xfb,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovd %k6, %eax ## encoding: [0xc5,0xfb,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovd %k7, %eax ## encoding: [0xc5,0xfb,0x93,0xc7]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovd %k1, %eax ## encoding: [0xc5,0xfb,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 %mask)
@@ -877,1145 +885,38 @@ define <8 x i8> @test_mask_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
declare i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16>, <8 x i16>, i32, i8) nounwind readnone
-declare <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
-
-define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd256_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
- ret <8 x float> %res
-}
-
-declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
-
-define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-declare <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask) {
-; CHECK-LABEL: test_mask_fmadd256_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask)
- ret <4 x double> %res
-}
-
-declare <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
-; CHECK-LABEL: test_mask_fmadd128_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask)
- ret <2 x double> %res
-}
-
-define <2 x double>@test_int_x86_avx512_mask_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8]
-; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xd9]
-; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda]
-; CHECK-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd9]
-; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
-; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0xa8,0xda]
-; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-define <4 x double>@test_int_x86_avx512_mask_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
-; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xd9]
-; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_mask3_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda]
-; CHECK-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd9]
-; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
-; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0xa8,0xda]
-; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-define <4 x float>@test_int_x86_avx512_mask_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
-; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xd9]
-; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda]
-; CHECK-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd9]
-; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
-; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xa8,0xda]
-; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-define <8 x float>@test_int_x86_avx512_mask_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
-; CHECK-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xd9]
-; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_mask3_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; CHECK-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd9]
-; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_maskz_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
-; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xa8,0xda]
-; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-
-declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_mask3_vfmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda]
-; CHECK-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd9]
-; CHECK-NEXT: vfmsub213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaa,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-
-declare <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_mask3_vfmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda]
-; CHECK-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd9]
-; CHECK-NEXT: vfmsub213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xaa,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda]
-; CHECK-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd9]
-; CHECK-NEXT: vfmsub213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaa,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; CHECK-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd9]
-; CHECK-NEXT: vfmsub213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xaa,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
-
-define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmadd256_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
- ret <8 x float> %res
-}
-
-declare <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
-
-define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmadd128_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-declare <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
-
-define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmadd256_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
- ret <4 x double> %res
-}
-
-declare <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
-
-define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmadd128_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
- ret <2 x double> %res
-}
-
-declare <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
-
-define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmsub256_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
- ret <8 x float> %res
-}
-
-declare <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
-
-define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmsub128_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-declare <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
-
-define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmsub256_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
- ret <4 x double> %res
-}
-
-declare <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
-
-define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfnmsub128_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
- ret <2 x double> %res
-}
-
-
-define <2 x double>@test_int_x86_avx512_mask_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8]
-; CHECK-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xd9]
-; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda]
-; CHECK-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd9]
-; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-define <4 x double>@test_int_x86_avx512_mask_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
-; CHECK-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xd9]
-; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda]
-; CHECK-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd9]
-; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-define <4 x float>@test_int_x86_avx512_mask_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
-; CHECK-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xd9]
-; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda]
-; CHECK-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd9]
-; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-define <8 x float>@test_int_x86_avx512_mask_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
-; CHECK-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xd9]
-; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; CHECK-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd9]
-; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-define <2 x double>@test_int_x86_avx512_mask_vfnmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8]
-; CHECK-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xd9]
-; CHECK-NEXT: vfnmadd213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xac,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-define <4 x double>@test_int_x86_avx512_mask_vfnmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
-; CHECK-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xd9]
-; CHECK-NEXT: vfnmadd213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xac,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-define <4 x float>@test_int_x86_avx512_mask_vfnmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
-; CHECK-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xd9]
-; CHECK-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xac,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-define <8 x float>@test_int_x86_avx512_mask_vfnmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
-; CHECK-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xd9]
-; CHECK-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xac,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
-
-define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask) {
-; CHECK-LABEL: test_mask_fmaddsub256_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask)
- ret <8 x float> %res
-}
-
-declare <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
-
-define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
-; CHECK-LABEL: test_mask_fmaddsub128_ps:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask)
- ret <4 x float> %res
-}
-
-declare <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
-
-define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmaddsub256_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
- ret <4 x double> %res
-}
-
-declare <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
-
-define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmaddsub128_pd:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
- ret <2 x double> %res
-}
-
-define <2 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8]
-; CHECK-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xd9]
-; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-declare <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda]
-; CHECK-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd9]
-; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-declare <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
-; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0xa6,0xda]
-; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2 = fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-define <4 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8]
-; CHECK-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xd9]
-; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda]
-; CHECK-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd9]
-; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
-; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0xa6,0xda]
-; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2 = fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-define <4 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
-; CHECK-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xd9]
-; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda]
-; CHECK-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd9]
-; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
-; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xa6,0xda]
-; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2 = fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-define <8 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
-; CHECK-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xd9]
-; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; CHECK-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd9]
-; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
-; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xa6,0xda]
-; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2 = fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-declare <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
-
-define <2 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda]
-; CHECK-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd9]
-; CHECK-NEXT: vfmsubadd213pd %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa7,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
- %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
- %res2=fadd <2 x double> %res, %res1
- ret <2 x double> %res2
-}
-
-declare <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
-
-define <4 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovapd %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda]
-; CHECK-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd9]
-; CHECK-NEXT: vfmsubadd213pd %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa7,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
- %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
- %res2=fadd <4 x double> %res, %res1
- ret <4 x double> %res2
-}
-
-declare <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
-
-define <4 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %xmm2, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda]
-; CHECK-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd9]
-; CHECK-NEXT: vfmsubadd213ps %xmm2, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa7,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
- %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
- %res2=fadd <4 x float> %res, %res1
- ret <4 x float> %res2
-}
-
-declare <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
-
-define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
-; CHECK-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd9]
-; CHECK-NEXT: vfmsubadd213ps %ymm2, %ymm0, %ymm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa7,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
- %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
- %res2=fadd <8 x float> %res, %res1
- ret <8 x float> %res2
-}
-
-
-define <4 x float> @test_mask_vfmadd128_ps_r(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_r:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmk:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <4 x float>, <4 x float>* %ptr_a2
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmka:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 8
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <4 x float>, <4 x float>* %ptr_a2
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 4
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmb:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %q = load float, float* %ptr_a2
- %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
- %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
- %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
- %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmba:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %q = load float, float* %ptr_a2, align 4
- %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
- %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
- %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
- %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 %mask) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %q = load float, float* %ptr_a2
- %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
- %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
- %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
- %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 -1) nounwind
- ret <4 x float> %res
-}
-
-define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
-; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %q = load float, float* %ptr_a2, align 4
- %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
- %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
- %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
- %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
- %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 -1) nounwind
- ret <4 x float> %res
-}
-
-define <2 x double> @test_mask_vfmadd128_pd_r(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_pd_r:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
- ret <2 x double> %res
-}
-
-define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
-; CHECK-LABEL: test_mask_vfmadd128_pd_rz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
- ret <2 x double> %res
-}
-
-define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd128_pd_rmk:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <2 x double>, <2 x double>* %ptr_a2
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
- ret <2 x double> %res
-}
-
-define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2) {
-; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <2 x double>, <2 x double>* %ptr_a2
- %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
- ret <2 x double> %res
-}
-
-define <4 x double> @test_mask_vfmadd256_pd_r(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd256_pd_r:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
- ret <4 x double> %res
-}
-
-define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
-; CHECK-LABEL: test_mask_vfmadd256_pd_rz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
- ret <4 x double> %res
-}
-
-define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) {
-; CHECK-LABEL: test_mask_vfmadd256_pd_rmk:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <4 x double>, <4 x double>* %ptr_a2
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
- ret <4 x double> %res
-}
-
-define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2) {
-; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %a2 = load <4 x double>, <4 x double>* %ptr_a2
- %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
- ret <4 x double> %res
-}
-
define <8 x i16> @test_mask_packs_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_packs_epi32_rr_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_packs_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packs_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
@@ -2024,31 +925,35 @@ define <8 x i16> @test_mask_packs_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_packs_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packs_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
@@ -2059,67 +964,75 @@ define <8 x i16> @test_mask_packs_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_packs_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packs_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
- %res = call <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
-declare <8 x i16> @llvm.x86.avx512.mask.packssdw.128(<4 x i32>, <4 x i32>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
define <16 x i16> @test_mask_packs_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_packs_epi32_rr_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_packs_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packs_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
@@ -2128,31 +1041,35 @@ define <16 x i16> @test_mask_packs_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b)
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_packs_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packs_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
@@ -2163,67 +1080,75 @@ define <16 x i16> @test_mask_packs_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_packs_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0x6b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packs_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi32_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackssdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0x6b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
- %res = call <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
-declare <16 x i16> @llvm.x86.avx512.mask.packssdw.256(<8 x i32>, <8 x i32>, <16 x i16>, i16)
+declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>)
define <16 x i8> @test_mask_packs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_packs_epi16_rr_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
+ ret <16 x i8> %1
}
define <16 x i8> @test_mask_packs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_packs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
@@ -2232,42 +1157,46 @@ define <16 x i8> @test_mask_packs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
+ ret <16 x i8> %1
}
define <16 x i8> @test_mask_packs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x63,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_packs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packs_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %res = call <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+ ret <16 x i8> %3
}
-declare <16 x i8> @llvm.x86.avx512.mask.packsswb.128(<8 x i16>, <8 x i16>, <16 x i8>, i16)
+declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>)
define <32 x i8> @test_mask_packs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_packs_epi16_rr_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
+ ret <32 x i8> %1
}
define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
@@ -2277,8 +1206,10 @@ define <32 x i8> @test_mask_packs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <3
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x63,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
@@ -2287,8 +1218,10 @@ define <32 x i8> @test_mask_packs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
@@ -2297,8 +1230,8 @@ define <32 x i8> @test_mask_packs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
+ ret <32 x i8> %1
}
define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
@@ -2309,8 +1242,10 @@ define <32 x i8> @test_mask_packs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_packs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
@@ -2320,11 +1255,13 @@ define <32 x i8> @test_mask_packs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr
; CHECK-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x63,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %res = call <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+ ret <32 x i8> %3
}
-declare <32 x i8> @llvm.x86.avx512.mask.packsswb.256(<16 x i16>, <16 x i16>, <32 x i8>, i32)
+declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>)
define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
@@ -2332,29 +1269,33 @@ define <8 x i16> @test_mask_packus_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_packus_epi32_rrk_128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packus_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
@@ -2363,31 +1304,35 @@ define <8 x i16> @test_mask_packus_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b)
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_packus_epi32_rmk_128(<4 x i32> %a, <4 x i32>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packus_epi32_rmkz_128(<4 x i32> %a, <4 x i32>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
@@ -2398,67 +1343,75 @@ define <8 x i16> @test_mask_packus_epi32_rmb_128(<4 x i32> %a, i32* %ptr_b) {
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 -1)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ ret <8 x i16> %1
}
define <8 x i16> @test_mask_packus_epi32_rmbk_128(<4 x i32> %a, i32* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> %passThru, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
+ ret <8 x i16> %3
}
define <8 x i16> @test_mask_packus_epi32_rmbkz_128(<4 x i32> %a, i32* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x99,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%b = shufflevector <4 x i32> %vecinit.i, <4 x i32> undef, <4 x i32> zeroinitializer
- %res = call <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32> %a, <4 x i32> %b, <8 x i16> zeroinitializer, i8 %mask)
- ret <8 x i16> %res
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a, <4 x i32> %b)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
+ ret <8 x i16> %3
}
-declare <8 x i16> @llvm.x86.avx512.mask.packusdw.128(<4 x i32>, <4 x i32>, <8 x i16>, i8)
+declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>)
define <16 x i16> @test_mask_packus_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_packus_epi32_rr_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_packus_epi32_rrk_256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packus_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
@@ -2467,31 +1420,35 @@ define <16 x i16> @test_mask_packus_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_packus_epi32_rmk_256(<8 x i32> %a, <8 x i32>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packus_epi32_rmkz_256(<8 x i32> %a, <8 x i32>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
@@ -2502,67 +1459,75 @@ define <16 x i16> @test_mask_packus_epi32_rmb_256(<8 x i32> %a, i32* %ptr_b) {
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 -1)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ ret <16 x i16> %1
}
define <16 x i16> @test_mask_packus_epi32_rmbk_256(<8 x i32> %a, i32* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x39,0x2b,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> %passThru, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
+ ret <16 x i16> %3
}
define <16 x i16> @test_mask_packus_epi32_rmbkz_256(<8 x i32> %a, i32* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi32_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackusdw (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xb9,0x2b,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i32, i32* %ptr_b
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%b = shufflevector <8 x i32> %vecinit.i, <8 x i32> undef, <8 x i32> zeroinitializer
- %res = call <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32> %a, <8 x i32> %b, <16 x i16> zeroinitializer, i16 %mask)
- ret <16 x i16> %res
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a, <8 x i32> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
+ ret <16 x i16> %3
}
-declare <16 x i16> @llvm.x86.avx512.mask.packusdw.256(<8 x i32>, <8 x i32>, <16 x i16>, i16)
+declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>)
define <16 x i8> @test_mask_packus_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_packus_epi16_rr_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
+ ret <16 x i8> %1
}
define <16 x i8> @test_mask_packus_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_packus_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
@@ -2571,42 +1536,46 @@ define <16 x i8> @test_mask_packus_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b)
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 -1)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
+ ret <16 x i8> %1
}
define <16 x i8> @test_mask_packus_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x67,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> %passThru, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
+ ret <16 x i8> %3
}
define <16 x i8> @test_mask_packus_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_packus_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
- %res = call <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16> %a, <8 x i16> %b, <16 x i8> zeroinitializer, i16 %mask)
- ret <16 x i8> %res
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a, <8 x i16> %b)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
+ ret <16 x i8> %3
}
-declare <16 x i8> @llvm.x86.avx512.mask.packuswb.128(<8 x i16>, <8 x i16>, <16 x i8>, i16)
+declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>)
define <32 x i8> @test_mask_packus_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: test_mask_packus_epi16_rr_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
+ ret <32 x i8> %1
}
define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask) {
@@ -2616,8 +1585,10 @@ define <32 x i8> @test_mask_packus_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x67,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i32 %mask) {
@@ -2626,8 +1597,10 @@ define <32 x i8> @test_mask_packus_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b,
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
@@ -2636,8 +1609,8 @@ define <32 x i8> @test_mask_packus_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 -1)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
+ ret <32 x i8> %1
}
define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
@@ -2648,8 +1621,10 @@ define <32 x i8> @test_mask_packus_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> %passThru, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
+ ret <32 x i8> %3
}
define <32 x i8> @test_mask_packus_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i32 %mask) {
@@ -2659,11 +1634,13 @@ define <32 x i8> @test_mask_packus_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %pt
; CHECK-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x67,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
- %res = call <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16> %a, <16 x i16> %b, <32 x i8> zeroinitializer, i32 %mask)
- ret <32 x i8> %res
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b)
+ %2 = bitcast i32 %mask to <32 x i1>
+ %3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
+ ret <32 x i8> %3
}
-declare <32 x i8> @llvm.x86.avx512.mask.packuswb.256(<16 x i16>, <16 x i16>, <32 x i8>, i32)
+declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>)
define <8 x i16> @test_mask_adds_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test_mask_adds_epi16_rr_128:
@@ -2677,7 +1654,7 @@ define <8 x i16> @test_mask_adds_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2688,7 +1665,7 @@ define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.padds.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -2708,7 +1685,7 @@ define <8 x i16> @test_mask_adds_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2720,7 +1697,7 @@ define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_adds_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -2742,7 +1719,7 @@ define <16 x i16> @test_mask_adds_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2753,7 +1730,7 @@ define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.padds.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -2773,7 +1750,7 @@ define <16 x i16> @test_mask_adds_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2785,7 +1762,7 @@ define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_adds_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -2807,7 +1784,7 @@ define <8 x i16> @test_mask_subs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2818,7 +1795,7 @@ define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psubs.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -2838,7 +1815,7 @@ define <8 x i16> @test_mask_subs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2850,7 +1827,7 @@ define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_subs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -2872,7 +1849,7 @@ define <16 x i16> @test_mask_subs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2883,7 +1860,7 @@ define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psubs.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -2903,7 +1880,7 @@ define <16 x i16> @test_mask_subs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2915,7 +1892,7 @@ define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_subs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -2937,7 +1914,7 @@ define <8 x i16> @test_mask_adds_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_adds_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdd,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2948,7 +1925,7 @@ define <8 x i16> @test_mask_adds_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_adds_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.paddus.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -2968,7 +1945,7 @@ define <8 x i16> @test_mask_adds_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_adds_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdd,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -2980,7 +1957,7 @@ define <8 x i16> @test_mask_adds_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_adds_epu16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -3002,7 +1979,7 @@ define <16 x i16> @test_mask_adds_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_adds_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdd,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3013,7 +1990,7 @@ define <16 x i16> @test_mask_adds_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_adds_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.paddus.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -3033,7 +2010,7 @@ define <16 x i16> @test_mask_adds_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_adds_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xdd,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3045,7 +2022,7 @@ define <16 x i16> @test_mask_adds_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_adds_epu16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xdd,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -3067,7 +2044,7 @@ define <8 x i16> @test_mask_subs_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test_mask_subs_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd9,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3078,7 +2055,7 @@ define <8 x i16> @test_mask_subs_epu16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x
define <8 x i16> @test_mask_subs_epu16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psubus.w.128(<8 x i16> %a, <8 x i16> %b, <8 x i16> zeroinitializer, i8 %mask)
@@ -3098,7 +2075,7 @@ define <8 x i16> @test_mask_subs_epu16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
define <8 x i16> @test_mask_subs_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd9,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3110,7 +2087,7 @@ define <8 x i16> @test_mask_subs_epu16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b,
define <8 x i16> @test_mask_subs_epu16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i16>, <8 x i16>* %ptr_b
@@ -3132,7 +2109,7 @@ define <16 x i16> @test_mask_subs_epu16_rr_256(<16 x i16> %a, <16 x i16> %b) {
define <16 x i16> @test_mask_subs_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd9,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3143,7 +2120,7 @@ define <16 x i16> @test_mask_subs_epu16_rrk_256(<16 x i16> %a, <16 x i16> %b, <1
define <16 x i16> @test_mask_subs_epu16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psubus.w.256(<16 x i16> %a, <16 x i16> %b, <16 x i16> zeroinitializer, i16 %mask)
@@ -3163,7 +2140,7 @@ define <16 x i16> @test_mask_subs_epu16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b
define <16 x i16> @test_mask_subs_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd9,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3175,7 +2152,7 @@ define <16 x i16> @test_mask_subs_epu16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_
define <16 x i16> @test_mask_subs_epu16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu16_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd9,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i16>, <16 x i16>* %ptr_b
@@ -3197,7 +2174,7 @@ define <16 x i8> @test_mask_adds_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3208,7 +2185,7 @@ define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.padds.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 %mask)
@@ -3228,7 +2205,7 @@ define <16 x i8> @test_mask_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3240,7 +2217,7 @@ define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_adds_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epi8_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -3327,7 +2304,7 @@ define <16 x i8> @test_mask_subs_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3338,7 +2315,7 @@ define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.psubs.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 %mask)
@@ -3358,7 +2335,7 @@ define <16 x i8> @test_mask_subs_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3370,7 +2347,7 @@ define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_subs_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epi8_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -3457,7 +2434,7 @@ define <16 x i8> @test_mask_adds_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_adds_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdc,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3468,7 +2445,7 @@ define <16 x i8> @test_mask_adds_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_adds_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.paddus.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 %mask)
@@ -3488,7 +2465,7 @@ define <16 x i8> @test_mask_adds_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_adds_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xdc,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3500,7 +2477,7 @@ define <16 x i8> @test_mask_adds_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_adds_epu8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_adds_epu8_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xdc,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -3587,7 +2564,7 @@ define <16 x i8> @test_mask_subs_epu8_rr_128(<16 x i8> %a, <16 x i8> %b) {
define <16 x i8> @test_mask_subs_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd8,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3598,7 +2575,7 @@ define <16 x i8> @test_mask_subs_epu8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x
define <16 x i8> @test_mask_subs_epu8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.psubus.b.128(<16 x i8> %a, <16 x i8> %b, <16 x i8> zeroinitializer, i16 %mask)
@@ -3618,7 +2595,7 @@ define <16 x i8> @test_mask_subs_epu8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
define <16 x i8> @test_mask_subs_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd8,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -3630,7 +2607,7 @@ define <16 x i8> @test_mask_subs_epu8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <
define <16 x i8> @test_mask_subs_epu8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
; CHECK-LABEL: test_mask_subs_epu8_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd8,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <16 x i8>, <16 x i8>* %ptr_b
@@ -3710,11 +2687,11 @@ declare <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16>, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x7d,0xda]
-; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0xfd,0x08,0x7d,0xca]
-; CHECK-NEXT: vpaddw %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc1]
+; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x7d,0xda]
+; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x7d,0xca]
+; CHECK-NEXT: vpaddw %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
@@ -3727,11 +2704,11 @@ declare <8 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.128(<8 x i16>, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_hi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x7d,0xda]
-; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0xfd,0x08,0x7d,0xca]
-; CHECK-NEXT: vpaddw %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc1]
+; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x7d,0xda]
+; CHECK-NEXT: vpermt2w %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x7d,0xca]
+; CHECK-NEXT: vpaddw %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
@@ -3744,11 +2721,11 @@ declare <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16>, <16 x i16
define <16 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_hi_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
-; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x7d,0xda]
-; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0xfd,0x28,0x7d,0xca]
-; CHECK-NEXT: vpaddw %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc1]
+; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x7d,0xda]
+; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x7d,0xca]
+; CHECK-NEXT: vpaddw %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
@@ -3761,11 +2738,11 @@ declare <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16>, <16 x i1
define <16 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_hi_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
-; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x7d,0xda]
-; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0xfd,0x28,0x7d,0xca]
-; CHECK-NEXT: vpaddw %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc1]
+; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x7d,0xda]
+; CHECK-NEXT: vpermt2w %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x7d,0xca]
+; CHECK-NEXT: vpaddw %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
@@ -3778,11 +2755,11 @@ declare <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16>, <8 x i16>,
define <8 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_hi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermi2w %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x75,0xda]
-; CHECK-NEXT: vpermi2w %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0xfd,0x08,0x75,0xca]
-; CHECK-NEXT: vpaddw %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc1]
+; CHECK-NEXT: vpermi2w %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x75,0xda]
+; CHECK-NEXT: vpermi2w %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x75,0xca]
+; CHECK-NEXT: vpaddw %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
@@ -3795,11 +2772,11 @@ declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16
define <16 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_hi_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
-; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x75,0xda]
-; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0xfd,0x28,0x75,0xca]
-; CHECK-NEXT: vpaddw %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfd,0xc1]
+; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x75,0xda]
+; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x75,0xca]
+; CHECK-NEXT: vpaddw %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
@@ -3812,7 +2789,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pavg.b.128(<16 x i8>, <16 x i8>, <16 x i
define <16 x i8>@test_int_x86_avx512_mask_pavg_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pavg_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe0,0xd1]
; CHECK-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe0,0xc1]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
@@ -3844,7 +2821,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pavg.w.128(<8 x i16>, <8 x i16>, <8 x i1
define <8 x i16>@test_int_x86_avx512_mask_pavg_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pavg_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe3,0xd1]
; CHECK-NEXT: vpavgw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe3,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -3860,7 +2837,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pavg.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pavg_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pavg_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe3,0xd1]
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe3,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -3876,7 +2853,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.pabs.b.128(<16 x i8>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pabs_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x1c,0xc8]
; CHECK-NEXT: vpabsb %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1c,0xc0]
; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
@@ -3908,7 +2885,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pabs.w.128(<8 x i16>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pabs_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x1d,0xc8]
; CHECK-NEXT: vpabsw %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x1d,0xc0]
; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0]
@@ -3924,7 +2901,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pabs.w.256(<16 x i16>, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_mask_pabs_w_256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpabsw %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x1d,0xc8]
; CHECK-NEXT: vpabsw %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x1d,0xc0]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
@@ -3940,7 +2917,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmulhu.w.128(<8 x i16>, <8 x i16>, <8 x
define <8 x i16>@test_int_x86_avx512_mask_pmulhu_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhu_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe4,0xd1]
; CHECK-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -3956,7 +2933,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmulhu.w.256(<16 x i16>, <16 x i16>, <1
define <16 x i16>@test_int_x86_avx512_mask_pmulhu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhu_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhuw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe4,0xd1]
; CHECK-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -3972,7 +2949,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmulh.w.128(<8 x i16>, <8 x i16>, <8 x i
define <8 x i16>@test_int_x86_avx512_mask_pmulh_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulh_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe5,0xd1]
; CHECK-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -3988,7 +2965,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmulh.w.256(<16 x i16>, <16 x i16>, <16
define <16 x i16>@test_int_x86_avx512_mask_pmulh_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulh_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe5,0xd1]
; CHECK-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -4004,7 +2981,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_pmulhr_sw_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhr_sw_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x0b,0xd1]
; CHECK-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x0b,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -4020,7 +2997,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_pmulhr_sw_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmulhr_sw_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x0b,0xd1]
; CHECK-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -4036,9 +3013,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.wb.128(<8 x i16>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmov_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovwb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x30,0xc1]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovwb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x30,0xc2]
+; CHECK-NEXT: vpmovwb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x30,0xc1]
; CHECK-NEXT: vpmovwb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x30,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -4056,7 +3033,7 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.128(i8* %ptr, <8 x i16>, i8)
define void @test_int_x86_avx512_mask_pmov_wb_mem_128(i8* %ptr, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_mem_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovwb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x30,0x07]
; CHECK-NEXT: vpmovwb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x30,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4070,9 +3047,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.128(<8 x i16>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovswb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x20,0xc1]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovswb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x20,0xc2]
+; CHECK-NEXT: vpmovswb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x20,0xc1]
; CHECK-NEXT: vpmovswb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x20,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -4090,7 +3067,7 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.128(i8* %ptr, <8 x i16>, i8)
define void @test_int_x86_avx512_mask_pmovs_wb_mem_128(i8* %ptr, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_mem_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovswb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x20,0x07]
; CHECK-NEXT: vpmovswb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x20,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4104,9 +3081,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.128(<8 x i16>, <16 x i8>, i8)
define <16 x i8>@test_int_x86_avx512_mask_pmovus_wb_128(<8 x i16> %x0, <16 x i8> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovuswb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x10,0xc1]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovuswb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x10,0xc2]
+; CHECK-NEXT: vpmovuswb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x10,0xc1]
; CHECK-NEXT: vpmovuswb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x10,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -4124,7 +3101,7 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.128(i8* %ptr, <8 x i16>, i8)
define void @test_int_x86_avx512_mask_pmovus_wb_mem_128(i8* %ptr, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_mem_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovuswb %xmm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x08,0x10,0x07]
; CHECK-NEXT: vpmovuswb %xmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4138,9 +3115,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.wb.256(<16 x i16>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmov_wb_256(<16 x i16> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovwb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x30,0xc1]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovwb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x30,0xc2]
+; CHECK-NEXT: vpmovwb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x30,0xc1]
; CHECK-NEXT: vpmovwb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x30,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -4158,7 +3135,7 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.256(i8* %ptr, <16 x i16>, i16)
define void @test_int_x86_avx512_mask_pmov_wb_mem_256(i8* %ptr, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_wb_mem_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovwb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x30,0x07]
; CHECK-NEXT: vpmovwb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x30,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4172,9 +3149,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.256(<16 x i16>, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pmovs_wb_256(<16 x i16> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovswb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x20,0xc1]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovswb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x20,0xc2]
+; CHECK-NEXT: vpmovswb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x20,0xc1]
; CHECK-NEXT: vpmovswb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x20,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -4192,7 +3169,7 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.256(i8* %ptr, <16 x i16>, i16)
define void @test_int_x86_avx512_mask_pmovs_wb_mem_256(i8* %ptr, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_wb_mem_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovswb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x20,0x07]
; CHECK-NEXT: vpmovswb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x20,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4206,9 +3183,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.256(<16 x i16>, <16 x i8>, i16
define <16 x i8>@test_int_x86_avx512_mask_pmovus_wb_256(<16 x i16> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovuswb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x10,0xc1]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmovuswb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x10,0xc2]
+; CHECK-NEXT: vpmovuswb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x10,0xc1]
; CHECK-NEXT: vpmovuswb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x10,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -4226,7 +3203,7 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.256(i8* %ptr, <16 x i16>, i16)
define void @test_int_x86_avx512_mask_pmovus_wb_mem_256(i8* %ptr, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_wb_mem_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpmovuswb %ymm0, (%rdi) ## encoding: [0x62,0xf2,0x7e,0x28,0x10,0x07]
; CHECK-NEXT: vpmovuswb %ymm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4240,7 +3217,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pmaddw.d.128(<8 x i16>, <8 x i16>, <4 x
define <4 x i32>@test_int_x86_avx512_mask_pmaddw_d_128(<8 x i16> %x0, <8 x i16> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddw_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddwd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xf5,0xd1]
; CHECK-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
@@ -4256,7 +3233,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmaddw.d.256(<16 x i16>, <16 x i16>, <8
define <8 x i32>@test_int_x86_avx512_mask_pmaddw_d_256(<16 x i16> %x0, <16 x i16> %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddw_d_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xf5,0xd1]
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
@@ -4272,7 +3249,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmaddubs.w.128(<16 x i8>, <16 x i8>, <8
define <8 x i16>@test_int_x86_avx512_mask_pmaddubs_w_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddubs_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x04,0xd1]
; CHECK-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x04,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -4288,7 +3265,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmaddubs.w.256(<32 x i8>, <32 x i8>, <1
define <16 x i16>@test_int_x86_avx512_mask_pmaddubs_w_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmaddubs_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x04,0xd1]
; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -4304,12 +3281,12 @@ declare <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8>, <16 x i8>, i32,
define <8 x i16>@test_int_x86_avx512_mask_dbpsadbw_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf3,0x7d,0x08,0x42,0xd9,0x02]
; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x42,0xd1,0x02]
-; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x42,0xd9,0x02]
-; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x42,0xc1,0x02]
-; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xcb]
-; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
+; CHECK-NEXT: vdbpsadbw $2, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x42,0xc1,0x02]
+; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; CHECK-NEXT: vpaddw %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfd,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <8 x i16> %x3, i8 %x4)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <8 x i16> zeroinitializer, i8 %x4)
@@ -4324,12 +3301,12 @@ declare <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8>, <32 x i8>, i32,
define <16 x i16>@test_int_x86_avx512_mask_dbpsadbw_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_dbpsadbw_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf3,0x7d,0x28,0x42,0xd9,0x02]
; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x42,0xd1,0x02]
-; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x42,0xd9,0x02]
-; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x42,0xc1,0x02]
-; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xcb]
-; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
+; CHECK-NEXT: vdbpsadbw $2, %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x42,0xc1,0x02]
+; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; CHECK-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <16 x i16> %x3, i16 %x4)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.dbpsadbw.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <16 x i16> zeroinitializer, i16 %x4)
@@ -4345,7 +3322,8 @@ define i16@test_int_x86_avx512_cvtb2mask_128(<16 x i8> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtb2mask_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8> %x0)
ret i16 %res
@@ -4369,7 +3347,8 @@ define i8@test_int_x86_avx512_cvtw2mask_128(<8 x i16> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtw2mask_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %x0)
ret i8 %res
@@ -4381,67 +3360,20 @@ define i16@test_int_x86_avx512_cvtw2mask_256(<16 x i16> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtw2mask_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16> %x0)
ret i16 %res
}
-declare <16 x i8> @llvm.x86.avx512.cvtmask2b.128(i16)
-
-define <16 x i8>@test_int_x86_avx512_cvtmask2b_128(i16 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2b_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
-; CHECK-NEXT: vpmovm2b %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i8> @llvm.x86.avx512.cvtmask2b.128(i16 %x0)
- ret <16 x i8> %res
-}
-
-declare <32 x i8> @llvm.x86.avx512.cvtmask2b.256(i32)
-
-define <32 x i8>@test_int_x86_avx512_cvtmask2b_256(i32 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2b_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovd %edi, %k0 ## encoding: [0xc5,0xfb,0x92,0xc7]
-; CHECK-NEXT: vpmovm2b %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x28,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <32 x i8> @llvm.x86.avx512.cvtmask2b.256(i32 %x0)
- ret <32 x i8> %res
-}
-
-declare <8 x i16> @llvm.x86.avx512.cvtmask2w.128(i8)
-
-define <8 x i16>@test_int_x86_avx512_cvtmask2w_128(i8 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2w_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
-; CHECK-NEXT: vpmovm2w %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i16> @llvm.x86.avx512.cvtmask2w.128(i8 %x0)
- ret <8 x i16> %res
-}
-
-declare <16 x i16> @llvm.x86.avx512.cvtmask2w.256(i16)
-
-define <16 x i16>@test_int_x86_avx512_cvtmask2w_256(i16 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2w_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
-; CHECK-NEXT: vpmovm2w %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x28,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <16 x i16> @llvm.x86.avx512.cvtmask2w.256(i16 %x0)
- ret <16 x i16> %res
-}
-
declare <16 x i16> @llvm.x86.avx512.mask.psrlv16.hi(<16 x i16>, <16 x i16>, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_mask_psrlv16_hi(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv16_hi:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsrlvw %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x10,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlvw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x10,0xd1]
; CHECK-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x10,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -4461,7 +3393,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psrlv8_hi(<8 x i16> %x0, <8 x i16> %x1
; CHECK-LABEL: test_int_x86_avx512_mask_psrlv8_hi:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsrlvw %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x10,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlvw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x10,0xd1]
; CHECK-NEXT: vpsrlvw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x10,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -4481,7 +3413,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psrav16_hi(<16 x i16> %x0, <16 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_psrav16_hi:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsravw %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x11,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsravw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x11,0xd1]
; CHECK-NEXT: vpsravw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x11,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -4501,7 +3433,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psrav8_hi(<8 x i16> %x0, <8 x i16> %x1
; CHECK-LABEL: test_int_x86_avx512_mask_psrav8_hi:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsravw %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x11,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsravw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x11,0xd1]
; CHECK-NEXT: vpsravw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x11,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -4521,7 +3453,7 @@ define <16 x i16>@test_int_x86_avx512_mask_psllv16_hi(<16 x i16> %x0, <16 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_psllv16_hi:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllvw %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x12,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllvw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x12,0xd1]
; CHECK-NEXT: vpsllvw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x12,0xc1]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
@@ -4541,7 +3473,7 @@ define <8 x i16>@test_int_x86_avx512_mask_psllv8_hi(<8 x i16> %x0, <8 x i16> %x1
; CHECK-LABEL: test_int_x86_avx512_mask_psllv8_hi:
; CHECK: ## BB#0:
; CHECK-NEXT: vpsllvw %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x12,0xd9]
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsllvw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x12,0xd1]
; CHECK-NEXT: vpsllvw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x12,0xc1]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
@@ -4560,12 +3492,12 @@ declare <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16>, <8 x i16>, <8
define <8 x i16>@test_int_x86_avx512_mask_permvar_hi_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_hi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm3 ## encoding: [0x62,0xf2,0xf5,0x08,0x8d,0xd8]
; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0x8d,0xd0]
-; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0x8d,0xd8]
-; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0xf5,0x08,0x8d,0xc0]
-; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xcb]
-; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0]
+; CHECK-NEXT: vpermw %xmm0, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0x8d,0xc0]
+; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
+; CHECK-NEXT: vpaddw %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> zeroinitializer, i8 %x3)
@@ -4580,12 +3512,12 @@ declare <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16>, <16 x i16>,
define <16 x i16>@test_int_x86_avx512_mask_permvar_hi_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_hi_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm3 ## encoding: [0x62,0xf2,0xf5,0x28,0x8d,0xd8]
; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x8d,0xd0]
-; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0x8d,0xd8]
-; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf2,0xf5,0x28,0x8d,0xc0]
-; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xcb]
-; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
+; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0x8d,0xc0]
+; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; CHECK-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %x3)
@@ -4600,11 +3532,11 @@ declare i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8>, <16 x i8>, i16)
define i16@test_int_x86_avx512_ptestm_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestmb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x08,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vptestmb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x26,0xc1]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4637,12 +3569,13 @@ declare i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16>, <8 x i16>, i8)
define i8@test_int_x86_avx512_ptestm_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestmw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vptestmw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x26,0xc1]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -4655,11 +3588,11 @@ declare i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16>, <16 x i16>, i16)
define i16@test_int_x86_avx512_ptestm_w_256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestm_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestmw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestmw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vptestmw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x26,0xc1]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4674,11 +3607,11 @@ declare i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8>, <16 x i8>, i16)
define i16@test_int_x86_avx512_ptestnm_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_b_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestnmb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vptestnmb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x26,0xc1]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4711,12 +3644,13 @@ declare i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16>, <8 x i16>, i8 %x2)
define i8@test_int_x86_avx512_ptestnm_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_w_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestnmw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfe,0x09,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vptestnmw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfe,0x09,0x26,0xc1]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -4729,11 +3663,11 @@ declare i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16>, <16 x i16>, i16 %x2)
define i16@test_int_x86_avx512_ptestnm_w_256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2) {
; CHECK-LABEL: test_int_x86_avx512_ptestnm_w_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestnmw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfe,0x29,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vptestnmw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x26,0xc1]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovd %k0, %ecx ## encoding: [0xc5,0xfb,0x93,0xc8]
+; CHECK-NEXT: vptestnmw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfe,0x29,0x26,0xc1]
+; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -4768,9 +3702,9 @@ declare <16 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.128(i8, <16 x i8>, i16)
define <16 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_128(i8 %x0, <16 x i8> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastb %dil, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7a,0xc7]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastb %dil, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7a,0xcf]
+; CHECK-NEXT: vpbroadcastb %dil, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7a,0xc7]
; CHECK-NEXT: vpbroadcastb %dil, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xd7]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
@@ -4788,9 +3722,9 @@ declare <16 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.256(i16, <16 x i16>, i
define <16 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_256(i16 %x0, <16 x i16> %x1, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastw %di, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7b,0xc7]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastw %di, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7b,0xcf]
+; CHECK-NEXT: vpbroadcastw %di, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7b,0xc7]
; CHECK-NEXT: vpbroadcastw %di, %ymm2 ## encoding: [0x62,0xf2,0x7d,0x28,0x7b,0xd7]
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
@@ -4808,9 +3742,9 @@ declare <8 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.128(i16, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_128(i16 %x0, <8 x i16> %x1, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastw %di, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7b,0xc7]
+; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpbroadcastw %di, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7b,0xcf]
+; CHECK-NEXT: vpbroadcastw %di, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7b,0xc7]
; CHECK-NEXT: vpbroadcastw %di, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xd7]
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0]
; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0]
diff --git a/test/CodeGen/X86/avx512bwvl-vec-cmp.ll b/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
index 3e7f0acae78b..17e581bbb501 100644
--- a/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
@@ -5,8 +5,7 @@ define <32 x i8> @test256_1(<32 x i8> %x, <32 x i8> %y) nounwind {
; CHECK-LABEL: test256_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
-; CHECK-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp eq <32 x i8> %x, %y
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %y
@@ -17,8 +16,7 @@ define <32 x i8> @test256_2(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
; CHECK-LABEL: test256_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
-; CHECK-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1}
-; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sgt <32 x i8> %x, %y
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %x1
@@ -29,8 +27,7 @@ define <16 x i16> @test256_3(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1) nounw
; CHECK-LABEL: test256_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k1
-; CHECK-NEXT: vmovdqu16 %ymm2, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sge <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x1, <16 x i16> %y
@@ -41,8 +38,7 @@ define <32 x i8> @test256_4(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
; CHECK-LABEL: test256_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleub %ymm1, %ymm0, %k1
-; CHECK-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1}
-; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ugt <32 x i8> %x, %y
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %x1
@@ -53,8 +49,7 @@ define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nou
; CHECK-LABEL: test256_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %yp, align 4
%mask = icmp eq <16 x i16> %x, %y
@@ -66,8 +61,7 @@ define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK-LABEL: test256_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sgt <16 x i16> %x, %y
@@ -79,8 +73,7 @@ define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK-LABEL: test256_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sle <16 x i16> %x, %y
@@ -92,8 +85,7 @@ define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK-LABEL: test256_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleuw (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp ule <16 x i16> %x, %y
@@ -106,8 +98,7 @@ define <16 x i16> @test256_9(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1, <16 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 {%k1}
-; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp eq <16 x i16> %x1, %y1
%mask0 = icmp eq <16 x i16> %x, %y
@@ -121,8 +112,7 @@ define <32 x i8> @test256_10(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1, <32 x i8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpleb %ymm2, %ymm3, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1}
-; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <32 x i8> %x1, %y1
%mask0 = icmp sle <32 x i8> %x, %y
@@ -136,8 +126,7 @@ define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sgt <32 x i8> %x1, %y1
%y = load <32 x i8>, <32 x i8>* %y.ptr, align 4
@@ -152,8 +141,7 @@ define <16 x i16> @test256_12(<16 x i16> %x, <16 x i16>* %y.ptr, <16 x i16> %x1,
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i16> %x1, %y1
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
@@ -167,8 +155,7 @@ define <16 x i8> @test128_1(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-LABEL: test128_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp eq <16 x i8> %x, %y
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %y
@@ -179,8 +166,7 @@ define <16 x i8> @test128_2(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
; CHECK-LABEL: test128_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sgt <16 x i8> %x, %y
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %x1
@@ -191,8 +177,7 @@ define <8 x i16> @test128_3(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1) nounwind
; CHECK-LABEL: test128_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm2, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sge <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x1, <8 x i16> %y
@@ -203,8 +188,7 @@ define <16 x i8> @test128_4(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
; CHECK-LABEL: test128_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ugt <16 x i8> %x, %y
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %x1
@@ -215,8 +199,7 @@ define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwin
; CHECK-LABEL: test128_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %yp, align 4
%mask = icmp eq <8 x i16> %x, %y
@@ -228,8 +211,7 @@ define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK-LABEL: test128_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sgt <8 x i16> %x, %y
@@ -241,8 +223,7 @@ define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK-LABEL: test128_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sle <8 x i16> %x, %y
@@ -254,8 +235,7 @@ define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK-LABEL: test128_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleuw (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp ule <8 x i16> %x, %y
@@ -268,8 +248,7 @@ define <8 x i16> @test128_9(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1, <8 x i16>
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 {%k1}
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp eq <8 x i16> %x1, %y1
%mask0 = icmp eq <8 x i16> %x, %y
@@ -283,8 +262,7 @@ define <16 x i8> @test128_10(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1, <16 x i8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpleb %xmm2, %xmm3, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i8> %x1, %y1
%mask0 = icmp sle <16 x i8> %x, %y
@@ -298,8 +276,7 @@ define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sgt <16 x i8> %x1, %y1
%y = load <16 x i8>, <16 x i8>* %y.ptr, align 4
@@ -314,8 +291,7 @@ define <8 x i16> @test128_12(<8 x i16> %x, <8 x i16>* %y.ptr, <8 x i16> %x1, <8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i16> %x1, %y1
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
diff --git a/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll
new file mode 100644
index 000000000000..e5dbff9ac515
--- /dev/null
+++ b/test/CodeGen/X86/avx512cd-intrinsics-upgrade.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s
+
+define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
+; CHECK-LABEL: test_lzcnt_d:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntd %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
+
+define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
+; CHECK-LABEL: test_lzcnt_q:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntq %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+
+define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_lzcnt_d:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntd %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_lzcnt_q:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntq %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
+ ret <8 x i64> %res
+}
diff --git a/test/CodeGen/X86/avx512cd-intrinsics.ll b/test/CodeGen/X86/avx512cd-intrinsics.ll
index febd3d69dd18..7e5a3e8fe25d 100644
--- a/test/CodeGen/X86/avx512cd-intrinsics.ll
+++ b/test/CodeGen/X86/avx512cd-intrinsics.ll
@@ -1,18 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s
define <16 x i32> @test_x86_vbroadcastmw_512(i16 %a0) {
- ; CHECK: test_x86_vbroadcastmw_512
- ; CHECK: vpbroadcastmw2d %k0, %zmm0
- %res = call <16 x i32> @llvm.x86.avx512.broadcastmw.512(i16 %a0) ;
+; CHECK-LABEL: test_x86_vbroadcastmw_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: vpbroadcastmw2d %k0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.broadcastmw.512(i16 %a0)
ret <16 x i32> %res
}
declare <16 x i32> @llvm.x86.avx512.broadcastmw.512(i16)
define <8 x i64> @test_x86_broadcastmb_512(i8 %a0) {
- ; CHECK: test_x86_broadcastmb_512
- ; CHECK: vpbroadcastmb2q %k0, %zmm0
- %res = call <8 x i64> @llvm.x86.avx512.broadcastmb.512(i8 %a0) ;
+; CHECK-LABEL: test_x86_broadcastmb_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: vpbroadcastmb2q %k0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.broadcastmb.512(i8 %a0)
ret <8 x i64> %res
}
declare <8 x i64> @llvm.x86.avx512.broadcastmb.512(i8)
+declare <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
+
+define <8 x i64> @test_conflict_q(<8 x i64> %a) {
+; CHECK-LABEL: test_conflict_q:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpconflictq %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
+; CHECK-LABEL: test_maskz_conflict_d:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpconflictd %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_conflict_q:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpconflictq %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
+; CHECK-LABEL: test_lzcnt_d:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntd %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
+ ret <16 x i32> %1
+}
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) #0
+
+define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
+; CHECK-LABEL: test_lzcnt_q:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntq %zmm0, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
+ ret <8 x i64> %1
+}
+declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) #0
+
+define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
+; CHECK-LABEL: test_mask_lzcnt_d:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntd %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
+ %2 = bitcast i16 %mask to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %b
+ ret <16 x i32> %3
+}
+
+define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+; CHECK-LABEL: test_mask_lzcnt_q:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntq %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %b
+ ret <8 x i64> %3
+}
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
new file mode 100644
index 000000000000..8f528394f5bd
--- /dev/null
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s
+
+declare <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32>, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntd %xmm0, %xmm2
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %res = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
+ %res1 = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 -1)
+ %res3 = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> zeroinitializer, i8 %x2)
+ %res2 = add <4 x i32> %res, %res1
+ %res4 = add <4 x i32> %res2, %res3
+ ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32>, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_vplzcnt_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntd %ymm0, %ymm2
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntd %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm0
+; CHECK-NEXT: retq
+ %res = call <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
+ %res1 = call <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 -1)
+ %res2 = add <8 x i32> %res, %res1
+ ret <8 x i32> %res2
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_vplzcnt_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntq %xmm0, %xmm2
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntq %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %res = call <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 -1)
+ %res2 = add <2 x i64> %res, %res1
+ ret <2 x i64> %res2
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntq %ymm0, %ymm2
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vplzcntq %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm0
+; CHECK-NEXT: retq
+ %res = call <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 -1)
+ %res2 = add <4 x i64> %res, %res1
+ ret <4 x i64> %res2
+}
+
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics.ll b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
index b27b795b4409..37aea45e6107 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
@@ -1,75 +1,83 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s
-declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readonly
-
-declare <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32>, <4 x i32>, i8)
-
-define <4 x i32>@test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
+define <4 x i32> @test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_128:
; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vplzcntd %xmm0, %xmm2 {%k1} {z}
-; CHECK-NEXT: vplzcntd %xmm0, %xmm0
+; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; CHECK-NEXT: retq
- %res = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
- %res1 = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 -1)
- %res3 = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> zeroinitializer, i8 %x2)
- %res2 = add <4 x i32> %res, %res1
- %res4 = add <4 x i32> %res2, %res3
+ %1 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
+ %2 = bitcast i8 %x2 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x1
+ %4 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
+ %5 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
+ %6 = bitcast i8 %x2 to <8 x i1>
+ %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
+ %res2 = add <4 x i32> %3, %4
+ %res4 = add <4 x i32> %res2, %7
ret <4 x i32> %res4
}
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) #0
-declare <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32>, <8 x i32>, i8)
-
-define <8 x i32>@test_int_x86_avx512_mask_vplzcnt_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
+define <8 x i32> @test_int_x86_avx512_mask_vplzcnt_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_d_256:
; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntd %ymm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vplzcntd %ymm0, %ymm0
-; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
- %res = call <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
- %res1 = call <8 x i32> @llvm.x86.avx512.mask.lzcnt.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 -1)
- %res2 = add <8 x i32> %res, %res1
+ %1 = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %x0, i1 false)
+ %2 = bitcast i8 %x2 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x1
+ %4 = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %x0, i1 false)
+ %res2 = add <8 x i32> %3, %4
ret <8 x i32> %res2
}
+declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) #0
-declare <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64>, <2 x i64>, i8)
-
-define <2 x i64>@test_int_x86_avx512_mask_vplzcnt_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
+define <2 x i64> @test_int_x86_avx512_mask_vplzcnt_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_128:
; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntq %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vplzcntq %xmm0, %xmm0
-; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm0
; CHECK-NEXT: retq
- %res = call <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
- %res1 = call <2 x i64> @llvm.x86.avx512.mask.lzcnt.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 -1)
- %res2 = add <2 x i64> %res, %res1
+ %1 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x0, i1 false)
+ %2 = bitcast i8 %x2 to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+ %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x1
+ %4 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x0, i1 false)
+ %res2 = add <2 x i64> %3, %4
ret <2 x i64> %res2
}
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
-declare <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64>, <4 x i64>, i8)
-
-define <4 x i64>@test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
+define <4 x i64> @test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_vplzcnt_q_256:
; CHECK: ## BB#0:
+; CHECK-NEXT: vplzcntq %ymm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntq %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vplzcntq %ymm0, %ymm0
-; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm0
; CHECK-NEXT: retq
- %res = call <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
- %res1 = call <4 x i64> @llvm.x86.avx512.mask.lzcnt.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 -1)
- %res2 = add <4 x i64> %res, %res1
+ %1 = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %x0, i1 false)
+ %2 = bitcast i8 %x2 to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x1
+ %4 = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %x0, i1 false)
+ %res2 = add <4 x i64> %3, %4
ret <4 x i64> %res2
}
+declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) #0
declare <4 x i32> @llvm.x86.avx512.mask.conflict.d.128(<4 x i32>, <4 x i32>, i8)
@@ -77,8 +85,8 @@ define <4 x i32>@test_int_x86_avx512_mask_vpconflict_d_128(<4 x i32> %x0, <4 x i
; CHECK-LABEL: test_int_x86_avx512_mask_vpconflict_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vpconflictd %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vpconflictd %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpconflictd %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vpconflictd %xmm0, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index f4cf22c5ed3a..c5478dad4224 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -7,7 +7,7 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0,
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_512:
; CHECK: ## BB#0:
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm0
-; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: kshiftlb $7, %k0, %k1
; CHECK-NEXT: kshiftrb $7, %k1, %k1
; CHECK-NEXT: kshiftlb $6, %k0, %k0
@@ -18,8 +18,7 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0,
; CHECK-NEXT: vmovq %rax, %xmm3
; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; CHECK-NEXT: vpsllq $63, %xmm2, %xmm2
-; CHECK-NEXT: vpsrad $31, %xmm2, %xmm2
-; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; CHECK-NEXT: vpsraq $63, %zmm2, %zmm2
; CHECK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; CHECK-NEXT: vandpd %xmm0, %xmm2, %xmm2
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
@@ -39,7 +38,7 @@ define <8 x float>@test_int_x86_avx512_mask_vextractf32x8(<16 x float> %x0, <8 x
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf32x8:
; CHECK: ## BB#0:
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm2
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1
@@ -78,9 +77,8 @@ declare <8 x double> @llvm.x86.avx512.mask.insertf64x2.512(<8 x double>, <2 x do
define <8 x double>@test_int_x86_avx512_mask_insertf64x2_512(<8 x double> %x0, <2 x double> %x1,<8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x2_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %zmm0, %zmm3
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -119,9 +117,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.inserti64x2.512(<8 x i64>, <2 x i64>, i3
define <8 x i64>@test_int_x86_avx512_mask_inserti64x2_512(<8 x i64> %x0, <2 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x2_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm3
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
@@ -134,3 +131,28 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x2_512(<8 x i64> %x0, <2 x i6
%res4 = add <8 x i64> %res2, %res3
ret <8 x i64> %res4
}
+
+
+declare <16 x i32> @llvm.x86.avx512.cvtmask2d.512(i16)
+
+define <16 x i32>@test_int_x86_avx512_cvtmask2d_512(i16 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: vpmovm2d %k0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.cvtmask2d.512(i16 %x0)
+ ret <16 x i32> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.cvtmask2q.512(i8)
+
+define <8 x i64>@test_int_x86_avx512_cvtmask2q_512(i8 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_512:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: vpmovm2q %k0, %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x i64> @llvm.x86.avx512.cvtmask2q.512(i8 %x0)
+ ret <8 x i64> %res
+}
diff --git a/test/CodeGen/X86/avx512dq-intrinsics.ll b/test/CodeGen/X86/avx512dq-intrinsics.ll
index 375d63264517..000390404b54 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -6,7 +6,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtpd2qq.512(<8 x double>, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_cvt_pd2qq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2qq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2qq {ru-sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtpd2qq {rn-sae}, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -22,7 +22,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtpd2uqq.512(<8 x double>, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_cvt_pd2uqq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2uqq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtpd2uqq {ru-sae}, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtpd2uqq {rn-sae}, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -38,7 +38,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtps2qq.512(<8 x float>, <8 x i64>, i8,
define <8 x i64>@test_int_x86_avx512_mask_cvt_ps2qq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2qq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2qq {ru-sae}, %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2qq {rn-sae}, %ymm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -54,7 +54,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvtps2uqq.512(<8 x float>, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_cvt_ps2uqq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2uqq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2uqq {ru-sae}, %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtps2uqq {rn-sae}, %ymm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -70,7 +70,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtqq2pd.512(<8 x i64>, <8 x double>,
define <8 x double>@test_int_x86_avx512_mask_cvt_qq2pd_512(<8 x i64> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2pd_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtqq2pd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtqq2pd {rn-sae}, %zmm0, %zmm0
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
@@ -86,7 +86,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtqq2ps.512(<8 x i64>, <8 x float>, i
define <8 x float>@test_int_x86_avx512_mask_cvt_qq2ps_512(<8 x i64> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtqq2ps %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtqq2ps {rn-sae}, %zmm0, %ymm0
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0
@@ -102,7 +102,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttpd2qq.512(<8 x double>, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_cvtt_pd2qq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2qq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttpd2qq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttpd2qq {sae}, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -118,7 +118,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttpd2uqq.512(<8 x double>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_mask_cvtt_pd2uqq_512(<8 x double> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2uqq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttpd2uqq %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttpd2uqq {sae}, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -134,7 +134,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttps2qq.512(<8 x float>, <8 x i64>, i8
define <8 x i64>@test_int_x86_avx512_mask_cvtt_ps2qq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2qq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttps2qq %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttps2qq {sae}, %ymm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -150,7 +150,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.cvttps2uqq.512(<8 x float>, <8 x i64>, i
define <8 x i64>@test_int_x86_avx512_mask_cvtt_ps2uqq_512(<8 x float> %x0, <8 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2uqq_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvttps2uqq %ymm0, %zmm1 {%k1}
; CHECK-NEXT: vcvttps2uqq {sae}, %ymm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
@@ -166,7 +166,7 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtuqq2pd.512(<8 x i64>, <8 x double>
define <8 x double>@test_int_x86_avx512_mask_cvt_uqq2pd_512(<8 x i64> %x0, <8 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2pd_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtuqq2pd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vcvtuqq2pd {rn-sae}, %zmm0, %zmm0
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
@@ -182,7 +182,7 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtuqq2ps.512(<8 x i64>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_512(<8 x i64> %x0, <8 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtuqq2ps %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtuqq2ps {rn-sae}, %zmm0, %ymm0
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0
@@ -198,7 +198,7 @@ declare <8 x double> @llvm.x86.avx512.mask.reduce.pd.512(<8 x double>, i32, <8 x
define <8 x double>@test_int_x86_avx512_mask_reduce_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_pd_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducepd $8, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vreducepd $4, {sae}, %zmm0, %zmm0
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
@@ -230,7 +230,7 @@ declare <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double>, <8 x doubl
define <8 x double>@test_int_x86_avx512_mask_range_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_pd_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangepd $8, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vrangepd $4, {sae}, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -330,12 +330,13 @@ declare i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_pd_512(<8 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_pd_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclasspd $2, %zmm0, %k0 {%k1}
-; CHECK-NEXT: kmovb %k0, %ecx
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vfpclasspd $4, %zmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 4, i8 -1)
@@ -348,9 +349,9 @@ define i16@test_int_x86_avx512_mask_fpclass_ps_512(<16 x float> %x0, i16 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ps_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vfpclassps $4, %zmm0, %k0
-; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vfpclassps $4, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vfpclassps $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
@@ -452,6 +453,7 @@ define i16@test_int_x86_avx512_cvtd2mask_512(<16 x i32> %x0) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovd2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32> %x0)
ret i16 %res
@@ -463,36 +465,13 @@ define i8@test_int_x86_avx512_cvtq2mask_512(<8 x i64> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtq2mask_512:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovq2m %zmm0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64> %x0)
ret i8 %res
}
-declare <16 x i32> @llvm.x86.avx512.cvtmask2d.512(i16)
-
-define <16 x i32>@test_int_x86_avx512_cvtmask2d_512(i16 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_512:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: vpmovm2d %k0, %zmm0
-; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.cvtmask2d.512(i16 %x0)
- ret <16 x i32> %res
-}
-
-declare <8 x i64> @llvm.x86.avx512.cvtmask2q.512(i8)
-
-define <8 x i64>@test_int_x86_avx512_cvtmask2q_512(i8 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_512:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k0
-; CHECK-NEXT: vpmovm2q %k0, %zmm0
-; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.cvtmask2q.512(i8 %x0)
- ret <8 x i64> %res
-}
-
declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x8.512(<8 x float>, <16 x float>, i16)
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) {
@@ -515,13 +494,26 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0,
ret <16 x float> %res5
}
+define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512_load(<8 x float>* %x0ptr, <16 x float> %x2, i16 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovaps (%rdi), %ymm1
+; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; CHECK-NEXT: retq
+
+ %x0 = load <8 x float>, <8 x float>* %x0ptr
+ %res = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x8.512(<8 x float> %x0, <16 x float> %x2, i16 %mask)
+ ret <16 x float> %res
+}
+
declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x2.512(<2 x double>, <8 x double>, i8)
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512:
; CHECK: ## BB#0:
; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} {z} = zmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1,0,1,0,1]
@@ -537,6 +529,19 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0
ret <8 x double> %res5
}
+define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512_load(<2 x double>* %x0ptr, <8 x double> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovapd (%rdi), %xmm1
+; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,0,1,0,1,0,1]
+; CHECK-NEXT: retq
+
+ %x0 = load <2 x double>, <2 x double>* %x0ptr
+ %res = call <8 x double> @llvm.x86.avx512.mask.broadcastf64x2.512(<2 x double> %x0, <8 x double> %x2, i8 %mask)
+ ret <8 x double> %res
+}
+
declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x8.512(<8 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) {
@@ -559,13 +564,26 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16
ret <16 x i32> %res5
}
+define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512_load(<8 x i32>* %x0ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa (%rdi), %ymm1
+; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} = zmm1[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; CHECK-NEXT: retq
+
+ %x0 = load <8 x i32>, <8 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x8.512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask)
+ ret <16 x i32> %res
+}
+
declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x2.512(<2 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512:
; CHECK: ## BB#0:
; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} {z} = zmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1,0,1,0,1]
@@ -580,3 +598,16 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x
%res5 = add <8 x i64> %res3, %res4
ret <8 x i64> %res5
}
+
+define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512_load(<2 x i64>* %x0ptr, <8 x i64> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa (%rdi), %xmm1
+; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm1[0,1,0,1,0,1,0,1]
+; CHECK-NEXT: retq
+
+ %x0 = load <2 x i64>, <2 x i64>* %x0ptr
+ %res = call <8 x i64> @llvm.x86.avx512.mask.broadcasti64x2.512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask)
+ ret <8 x i64> %res
+}
diff --git a/test/CodeGen/X86/avx512dq-mask-op.ll b/test/CodeGen/X86/avx512dq-mask-op.ll
index e83aa14d35e3..f0ae1b0129a8 100644
--- a/test/CodeGen/X86/avx512dq-mask-op.ll
+++ b/test/CodeGen/X86/avx512dq-mask-op.ll
@@ -4,9 +4,10 @@
define i8 @mask8(i8 %x) {
; CHECK-LABEL: mask8:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
-; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -55,7 +56,8 @@ define i8 @mand8_mem(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-NEXT: kandb %k1, %k0, %k2
; CHECK-NEXT: kxorb %k1, %k0, %k0
; CHECK-NEXT: korb %k0, %k2, %k0
-; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%ma = load <8 x i1>, <8 x i1>* %x
%mb = load <8 x i1>, <8 x i1>* %y
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
index f8460bf880f9..52a84deebf51 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
@@ -13,7 +13,7 @@ define <4 x float> @test_mask_andnot_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_andnot_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x55,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -24,7 +24,7 @@ define <4 x float> @test_mask_andnot_ps_rrk_128(<4 x float> %a, <4 x float> %b,
define <4 x float> @test_mask_andnot_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.andn.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 %mask)
@@ -44,7 +44,7 @@ define <4 x float> @test_mask_andnot_ps_rm_128(<4 x float> %a, <4 x float>* %ptr
define <4 x float> @test_mask_andnot_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x55,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -56,7 +56,7 @@ define <4 x float> @test_mask_andnot_ps_rmk_128(<4 x float> %a, <4 x float>* %pt
define <4 x float> @test_mask_andnot_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -79,7 +79,7 @@ define <4 x float> @test_mask_andnot_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_andnot_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x55,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -93,7 +93,7 @@ define <4 x float> @test_mask_andnot_ps_rmbk_128(<4 x float> %a, float* %ptr_b,
define <4 x float> @test_mask_andnot_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -117,7 +117,7 @@ define <8 x float> @test_mask_andnot_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_andnot_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x55,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -128,7 +128,7 @@ define <8 x float> @test_mask_andnot_ps_rrk_256(<8 x float> %a, <8 x float> %b,
define <8 x float> @test_mask_andnot_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x55,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.andn.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 %mask)
@@ -148,7 +148,7 @@ define <8 x float> @test_mask_andnot_ps_rm_256(<8 x float> %a, <8 x float>* %ptr
define <8 x float> @test_mask_andnot_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x55,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -160,7 +160,7 @@ define <8 x float> @test_mask_andnot_ps_rmk_256(<8 x float> %a, <8 x float>* %pt
define <8 x float> @test_mask_andnot_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -183,7 +183,7 @@ define <8 x float> @test_mask_andnot_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_andnot_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x55,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -197,7 +197,7 @@ define <8 x float> @test_mask_andnot_ps_rmbk_256(<8 x float> %a, float* %ptr_b,
define <8 x float> @test_mask_andnot_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_andnot_ps_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x55,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -325,7 +325,7 @@ define <4 x float> @test_mask_and_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_and_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x54,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -336,7 +336,7 @@ define <4 x float> @test_mask_and_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4
define <4 x float> @test_mask_and_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.and.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 %mask)
@@ -356,7 +356,7 @@ define <4 x float> @test_mask_and_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b)
define <4 x float> @test_mask_and_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x54,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -368,7 +368,7 @@ define <4 x float> @test_mask_and_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b
define <4 x float> @test_mask_and_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -391,7 +391,7 @@ define <4 x float> @test_mask_and_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_and_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x54,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -405,7 +405,7 @@ define <4 x float> @test_mask_and_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4
define <4 x float> @test_mask_and_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -429,7 +429,7 @@ define <8 x float> @test_mask_and_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_and_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x54,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -440,7 +440,7 @@ define <8 x float> @test_mask_and_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8
define <8 x float> @test_mask_and_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x54,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.and.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 %mask)
@@ -460,7 +460,7 @@ define <8 x float> @test_mask_and_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b)
define <8 x float> @test_mask_and_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x54,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -472,7 +472,7 @@ define <8 x float> @test_mask_and_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b
define <8 x float> @test_mask_and_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -495,7 +495,7 @@ define <8 x float> @test_mask_and_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_and_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x54,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -509,7 +509,7 @@ define <8 x float> @test_mask_and_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8
define <8 x float> @test_mask_and_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_and_ps_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x54,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -637,7 +637,7 @@ define <4 x float> @test_mask_or_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_or_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x56,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -648,7 +648,7 @@ define <4 x float> @test_mask_or_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x
define <4 x float> @test_mask_or_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.or.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 %mask)
@@ -668,7 +668,7 @@ define <4 x float> @test_mask_or_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b)
define <4 x float> @test_mask_or_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x56,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -680,7 +680,7 @@ define <4 x float> @test_mask_or_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b,
define <4 x float> @test_mask_or_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -703,7 +703,7 @@ define <4 x float> @test_mask_or_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_or_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x56,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -717,7 +717,7 @@ define <4 x float> @test_mask_or_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x
define <4 x float> @test_mask_or_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -741,7 +741,7 @@ define <8 x float> @test_mask_or_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_or_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x56,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -752,7 +752,7 @@ define <8 x float> @test_mask_or_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x
define <8 x float> @test_mask_or_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x56,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.or.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 %mask)
@@ -772,7 +772,7 @@ define <8 x float> @test_mask_or_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b)
define <8 x float> @test_mask_or_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x56,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -784,7 +784,7 @@ define <8 x float> @test_mask_or_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b,
define <8 x float> @test_mask_or_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -807,7 +807,7 @@ define <8 x float> @test_mask_or_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_or_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x56,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -821,7 +821,7 @@ define <8 x float> @test_mask_or_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x
define <8 x float> @test_mask_or_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_or_ps_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x56,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -949,7 +949,7 @@ define <4 x float> @test_mask_xor_ps_rr_128(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mask_xor_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x57,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -960,7 +960,7 @@ define <4 x float> @test_mask_xor_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4
define <4 x float> @test_mask_xor_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.xor.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 %mask)
@@ -980,7 +980,7 @@ define <4 x float> @test_mask_xor_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b)
define <4 x float> @test_mask_xor_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x57,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -992,7 +992,7 @@ define <4 x float> @test_mask_xor_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b
define <4 x float> @test_mask_xor_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x float>, <4 x float>* %ptr_b
@@ -1015,7 +1015,7 @@ define <4 x float> @test_mask_xor_ps_rmb_128(<4 x float> %a, float* %ptr_b) {
define <4 x float> @test_mask_xor_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x57,0x0f]
; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1029,7 +1029,7 @@ define <4 x float> @test_mask_xor_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4
define <4 x float> @test_mask_xor_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -1053,7 +1053,7 @@ define <8 x float> @test_mask_xor_ps_rr_256(<8 x float> %a, <8 x float> %b) {
define <8 x float> @test_mask_xor_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x57,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1064,7 +1064,7 @@ define <8 x float> @test_mask_xor_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8
define <8 x float> @test_mask_xor_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x57,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.xor.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 %mask)
@@ -1084,7 +1084,7 @@ define <8 x float> @test_mask_xor_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b)
define <8 x float> @test_mask_xor_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x57,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1096,7 +1096,7 @@ define <8 x float> @test_mask_xor_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b
define <8 x float> @test_mask_xor_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x float>, <8 x float>* %ptr_b
@@ -1119,7 +1119,7 @@ define <8 x float> @test_mask_xor_ps_rmb_256(<8 x float> %a, float* %ptr_b) {
define <8 x float> @test_mask_xor_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x57,0x0f]
; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1133,7 +1133,7 @@ define <8 x float> @test_mask_xor_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8
define <8 x float> @test_mask_xor_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_xor_ps_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x57,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load float, float* %ptr_b
@@ -1261,7 +1261,7 @@ define <8 x i64> @test_mask_mullo_epi64_rr_512(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @test_mask_mullo_epi64_rrk_512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrk_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x40,0xd1]
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1272,7 +1272,7 @@ define <8 x i64> @test_mask_mullo_epi64_rrk_512(<8 x i64> %a, <8 x i64> %b, <8 x
define <8 x i64> @test_mask_mullo_epi64_rrkz_512(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrkz_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.avx512.mask.pmull.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
@@ -1292,7 +1292,7 @@ define <8 x i64> @test_mask_mullo_epi64_rm_512(<8 x i64> %a, <8 x i64>* %ptr_b)
define <8 x i64> @test_mask_mullo_epi64_rmk_512(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmk_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x40,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1304,7 +1304,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmk_512(<8 x i64> %a, <8 x i64>* %ptr_b,
define <8 x i64> @test_mask_mullo_epi64_rmkz_512(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmkz_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i64>, <8 x i64>* %ptr_b
@@ -1327,7 +1327,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmb_512(<8 x i64> %a, i64* %ptr_b) {
define <8 x i64> @test_mask_mullo_epi64_rmbk_512(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbk_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x59,0x40,0x0f]
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1341,7 +1341,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmbk_512(<8 x i64> %a, i64* %ptr_b, <8 x
define <8 x i64> @test_mask_mullo_epi64_rmbkz_512(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xd9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -1364,7 +1364,7 @@ define <4 x i64> @test_mask_mullo_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @test_mask_mullo_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x40,0xd1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1375,7 +1375,7 @@ define <4 x i64> @test_mask_mullo_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x
define <4 x i64> @test_mask_mullo_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pmull.q.256(<4 x i64> %a, <4 x i64> %b, <4 x i64> zeroinitializer, i8 %mask)
@@ -1395,7 +1395,7 @@ define <4 x i64> @test_mask_mullo_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b)
define <4 x i64> @test_mask_mullo_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x40,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1407,7 +1407,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b,
define <4 x i64> @test_mask_mullo_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i64>, <4 x i64>* %ptr_b
@@ -1430,7 +1430,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) {
define <4 x i64> @test_mask_mullo_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbk_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x39,0x40,0x0f]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1444,7 +1444,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x
define <4 x i64> @test_mask_mullo_epi64_rmbkz_256(<4 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xb9,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -1468,7 +1468,7 @@ define <2 x i64> @test_mask_mullo_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @test_mask_mullo_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x40,0xd1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1479,7 +1479,7 @@ define <2 x i64> @test_mask_mullo_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x
define <2 x i64> @test_mask_mullo_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rrkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x40,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pmull.q.128(<2 x i64> %a, <2 x i64> %b, <2 x i64> zeroinitializer, i8 %mask)
@@ -1499,7 +1499,7 @@ define <2 x i64> @test_mask_mullo_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b)
define <2 x i64> @test_mask_mullo_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x40,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1511,7 +1511,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b,
define <2 x i64> @test_mask_mullo_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <2 x i64>, <2 x i64>* %ptr_b
@@ -1534,7 +1534,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) {
define <2 x i64> @test_mask_mullo_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x i64> %passThru, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbk_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0x40,0x0f]
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1548,7 +1548,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x
define <2 x i64> @test_mask_mullo_epi64_rmbkz_128(<2 x i64> %a, i64* %ptr_b, i8 %mask) {
; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1 ## encoding: [0xc5,0xf9,0x92,0xce]
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x99,0x40,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%q = load i64, i64* %ptr_b
@@ -1566,7 +1566,7 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0,
; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01]
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x19,0xc1,0x01]
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x19,0xc0,0x01]
; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
@@ -1586,7 +1586,7 @@ define <4 x double>@test_int_x86_avx512_mask_insertf64x2_256(<4 x double> %x0, <
; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x2_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xd1,0x01]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x18,0xc1,0x01]
; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xcb]
@@ -1606,7 +1606,7 @@ define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i6
; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x2_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xd1,0x01]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x38,0xc1,0x01]
; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
@@ -1619,3 +1619,51 @@ define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i6
%res4 = add <4 x i64> %res3, %res2
ret <4 x i64> %res4
}
+
+declare <4 x i32> @llvm.x86.avx512.cvtmask2d.128(i8)
+
+define <4 x i32>@test_int_x86_avx512_cvtmask2d_128(i8 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
+; CHECK-NEXT: vpmovm2d %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx512.cvtmask2d.128(i8 %x0)
+ ret <4 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx512.cvtmask2d.256(i8)
+
+define <8 x i32>@test_int_x86_avx512_cvtmask2d_256(i8 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
+; CHECK-NEXT: vpmovm2d %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x38,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx512.cvtmask2d.256(i8 %x0)
+ ret <8 x i32> %res
+}
+
+declare <2 x i64> @llvm.x86.avx512.cvtmask2q.128(i8)
+
+define <2 x i64>@test_int_x86_avx512_cvtmask2q_128(i8 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
+; CHECK-NEXT: vpmovm2q %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x i64> @llvm.x86.avx512.cvtmask2q.128(i8 %x0)
+ ret <2 x i64> %res
+}
+
+declare <4 x i64> @llvm.x86.avx512.cvtmask2q.256(i8)
+
+define <4 x i64>@test_int_x86_avx512_cvtmask2q_256(i8 %x0) {
+; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7]
+; CHECK-NEXT: vpmovm2q %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x38,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x i64> @llvm.x86.avx512.cvtmask2q.256(i8 %x0)
+ ret <4 x i64> %res
+}
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
index 3430c5715376..ad9ea93c2031 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
@@ -6,7 +6,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtpd2qq.128(<2 x double>, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_cvt_pd2qq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2qq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x7b,0xc8]
; CHECK-NEXT: vcvtpd2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x7b,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -22,7 +22,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtpd2qq.256(<4 x double>, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_cvt_pd2qq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2qq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2qq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x7b,0xc8]
; CHECK-NEXT: vcvtpd2qq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x7b,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -38,7 +38,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtpd2uqq.128(<2 x double>, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_cvt_pd2uqq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2uqq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x79,0xc8]
; CHECK-NEXT: vcvtpd2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x79,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -54,7 +54,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtpd2uqq.256(<4 x double>, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_cvt_pd2uqq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2uqq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtpd2uqq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x79,0xc8]
; CHECK-NEXT: vcvtpd2uqq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x79,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -70,7 +70,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtps2qq.128(<4 x float>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_cvt_ps2qq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2qq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x7b,0xc8]
; CHECK-NEXT: vcvtps2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x7b,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -86,7 +86,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtps2qq.256(<4 x float>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_cvt_ps2qq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2qq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2qq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x7b,0xc8]
; CHECK-NEXT: vcvtps2qq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x7b,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -102,7 +102,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvtps2uqq.128(<4 x float>, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_cvt_ps2uqq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2uqq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x79,0xc8]
; CHECK-NEXT: vcvtps2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x79,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -118,7 +118,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvtps2uqq.256(<4 x float>, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_cvt_ps2uqq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2uqq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtps2uqq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x79,0xc8]
; CHECK-NEXT: vcvtps2uqq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x79,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -134,7 +134,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtqq2pd.128(<2 x i64>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_cvt_qq2pd_128(<2 x i64> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2pd_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0xe6,0xc8]
; CHECK-NEXT: vcvtqq2pd %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfe,0x08,0xe6,0xc0]
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
@@ -150,7 +150,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtqq2pd.256(<4 x i64>, <4 x double>,
define <4 x double>@test_int_x86_avx512_mask_cvt_qq2pd_256(<4 x i64> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2pd_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0xe6,0xc8]
; CHECK-NEXT: vcvtqq2pd %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfe,0x28,0xe6,0xc0]
; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
@@ -166,7 +166,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtqq2ps.128(<2 x i64>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_128(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x5b,0xc8]
; CHECK-NEXT: vcvtqq2ps %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x08,0x5b,0xc0]
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc0]
@@ -180,7 +180,7 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_128(<2 x i64> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_128_zext(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_128_zext:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x09,0x5b,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
; CHECK-NEXT: ## xmm1 = xmm1[0],zero
@@ -200,7 +200,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtqq2ps.256(<4 x i64>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_cvt_qq2ps_256(<4 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_qq2ps_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtqq2ps %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfc,0x29,0x5b,0xc8]
; CHECK-NEXT: vcvtqq2ps %ymm0, %xmm0 ## encoding: [0x62,0xf1,0xfc,0x28,0x5b,0xc0]
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc0]
@@ -216,7 +216,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttpd2qq.128(<2 x double>, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_cvtt_pd2qq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2qq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvttpd2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x7a,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -232,7 +232,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttpd2qq.256(<4 x double>, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_cvtt_pd2qq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2qq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2qq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvttpd2qq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x7a,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -248,7 +248,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttpd2uqq.128(<2 x double>, <2 x i64>,
define <2 x i64>@test_int_x86_avx512_mask_cvtt_pd2uqq_128(<2 x double> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2uqq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x78,0xc8]
; CHECK-NEXT: vcvttpd2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x78,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -264,7 +264,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttpd2uqq.256(<4 x double>, <4 x i64>,
define <4 x i64>@test_int_x86_avx512_mask_cvtt_pd2uqq_256(<4 x double> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2uqq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttpd2uqq %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x78,0xc8]
; CHECK-NEXT: vcvttpd2uqq %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x78,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -280,7 +280,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttps2qq.128(<4 x float>, <2 x i64>, i8
define <2 x i64>@test_int_x86_avx512_mask_cvtt_ps2qq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2qq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2qq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvttps2qq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x7a,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -296,7 +296,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttps2qq.256(<4 x float>, <4 x i64>, i8
define <4 x i64>@test_int_x86_avx512_mask_cvtt_ps2qq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2qq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2qq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvttps2qq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x7a,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -312,7 +312,7 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtuqq2pd.128(<2 x i64>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_cvt_uqq2pd_128(<2 x i64> %x0, <2 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2pd_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2pd %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfe,0x08,0x7a,0xc0]
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
@@ -328,7 +328,7 @@ declare <4 x double> @llvm.x86.avx512.mask.cvtuqq2pd.256(<4 x i64>, <4 x double>
define <4 x double>@test_int_x86_avx512_mask_cvt_uqq2pd_256(<4 x i64> %x0, <4 x double> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2pd_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2pd %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfe,0x28,0x7a,0xc0]
; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
@@ -344,7 +344,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtuqq2ps.128(<2 x i64>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_128(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2ps %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x08,0x7a,0xc0]
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc0]
@@ -358,7 +358,7 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_128(<2 x i64> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_128_zext(<2 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_128_zext:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2ps %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x7a,0xc8]
; CHECK-NEXT: vmovq %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc9]
; CHECK-NEXT: ## xmm1 = xmm1[0],zero
@@ -378,7 +378,7 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtuqq2ps.256(<4 x i64>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_cvt_uqq2ps_256(<4 x i64> %x0, <4 x float> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_uqq2ps_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvtuqq2ps %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x7a,0xc8]
; CHECK-NEXT: vcvtuqq2ps %ymm0, %xmm0 ## encoding: [0x62,0xf1,0xff,0x28,0x7a,0xc0]
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc0]
@@ -394,7 +394,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.cvttps2uqq.128(<4 x float>, <2 x i64>, i
define <2 x i64>@test_int_x86_avx512_mask_cvtt_ps2uqq_128(<4 x float> %x0, <2 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2uqq_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2uqq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x78,0xc8]
; CHECK-NEXT: vcvttps2uqq %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x78,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -410,7 +410,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.cvttps2uqq.256(<4 x float>, <4 x i64>, i
define <4 x i64>@test_int_x86_avx512_mask_cvtt_ps2uqq_256(<4 x float> %x0, <4 x i64> %x1, i8 %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2uqq_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vcvttps2uqq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x78,0xc8]
; CHECK-NEXT: vcvttps2uqq %xmm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x78,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -426,7 +426,7 @@ declare <2 x double> @llvm.x86.avx512.mask.reduce.pd.128(<2 x double>, i32, <2 x
define <2 x double>@test_int_x86_avx512_mask_reduce_pd_128(<2 x double> %x0, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_pd_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreducepd $4, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x56,0xc8,0x04]
; CHECK-NEXT: vreducepd $8, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x56,0xc0,0x08]
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
@@ -442,7 +442,7 @@ declare <4 x double> @llvm.x86.avx512.mask.reduce.pd.256(<4 x double>, i32, <4 x
define <4 x double>@test_int_x86_avx512_mask_reduce_pd_256(<4 x double> %x0, <4 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_pd_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreducepd $4, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x56,0xc8,0x04]
; CHECK-NEXT: vreducepd $0, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x56,0xc0,0x00]
; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
@@ -458,7 +458,7 @@ declare <4 x float> @llvm.x86.avx512.mask.reduce.ps.128(<4 x float>, i32, <4 x f
define <4 x float>@test_int_x86_avx512_mask_reduce_ps_128(<4 x float> %x0, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ps_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreduceps $4, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x56,0xc8,0x04]
; CHECK-NEXT: vreduceps $88, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x56,0xc0,0x58]
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc0]
@@ -474,7 +474,7 @@ declare <8 x float> @llvm.x86.avx512.mask.reduce.ps.256(<8 x float>, i32, <8 x f
define <8 x float>@test_int_x86_avx512_mask_reduce_ps_256(<8 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ps_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vreduceps $11, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x56,0xc8,0x0b]
; CHECK-NEXT: vreduceps $11, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x56,0xc0,0x0b]
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc0]
@@ -490,7 +490,7 @@ declare <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double>, <2 x doubl
define <2 x double>@test_int_x86_avx512_mask_range_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_pd_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangepd $4, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x50,0xd1,0x04]
; CHECK-NEXT: vrangepd $8, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x50,0xc1,0x08]
; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc0]
@@ -506,7 +506,7 @@ declare <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double>, <4 x doubl
define <4 x double>@test_int_x86_avx512_mask_range_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_pd_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangepd $4, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x50,0xd1,0x04]
; CHECK-NEXT: vrangepd $88, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x50,0xc1,0x58]
; CHECK-NEXT: vaddpd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc0]
@@ -522,7 +522,7 @@ declare <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_range_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ps_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangeps $4, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x50,0xd1,0x04]
; CHECK-NEXT: vrangeps $88, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x50,0xc1,0x58]
; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc0]
@@ -538,7 +538,7 @@ declare <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float>, <8 x float>,
define <8 x float>@test_int_x86_avx512_mask_range_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ps_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrangeps $4, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x50,0xd1,0x04]
; CHECK-NEXT: vrangeps $88, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x50,0xc1,0x58]
; CHECK-NEXT: vaddps %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc0]
@@ -554,12 +554,13 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ps_128(<4 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ps_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclassps $2, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x66,0xc0,0x02]
-; CHECK-NEXT: kmovb %k0, %ecx ## encoding: [0xc5,0xf9,0x93,0xc8]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vfpclassps $4, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x66,0xc0,0x04]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 4, i8 -1)
@@ -572,12 +573,13 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ps_256(<8 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ps_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclassps $2, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x66,0xc0,0x02]
-; CHECK-NEXT: kmovb %k0, %ecx ## encoding: [0xc5,0xf9,0x93,0xc8]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vfpclassps $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x66,0xc0,0x04]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 4, i8 -1)
@@ -590,12 +592,13 @@ declare i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_pd_128(<2 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_pd_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclasspd $4, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x66,0xc0,0x04]
-; CHECK-NEXT: kmovb %k0, %ecx ## encoding: [0xc5,0xf9,0x93,0xc8]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vfpclasspd $2, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x66,0xc0,0x02]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 2, i8 -1)
@@ -608,12 +611,13 @@ declare i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_pd_256(<4 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_pd_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclasspd $2, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x66,0xc0,0x02]
-; CHECK-NEXT: kmovb %k0, %ecx ## encoding: [0xc5,0xf9,0x93,0xc8]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vfpclasspd $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x66,0xc0,0x04]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 4, i8 -1)
@@ -626,13 +630,12 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float>, <8 x f
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x19,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x19,0xd0]
; CHECK-NEXT: ## ymm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x19,0xc0]
-; CHECK-NEXT: ## ymm0 = xmm0[0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xc0]
; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -649,13 +652,12 @@ declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x2.256(<4 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcasti32x2 (%rsi), %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x59,0x0e]
; CHECK-NEXT: ## ymm1 {%k1} = mem[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x59,0xd0]
; CHECK-NEXT: ## ymm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vbroadcasti32x2 %xmm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x59,0xc0]
-; CHECK-NEXT: ## ymm0 = xmm0[0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x59,0xc0]
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -675,10 +677,10 @@ declare <4 x i32> @llvm.x86.avx512.mask.broadcasti32x2.128(<4 x i32>, <4 x i32>,
define <4 x i32>@test_int_x86_avx512_mask_broadcasti32x2_128(<4 x i32> %x0, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x59,0xc8]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x59,0xd0]
-; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x59,0xc0]
+; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -696,7 +698,8 @@ define i8@test_int_x86_avx512_cvtd2mask_128(<4 x i32> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtd2mask_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %x0)
ret i8 %res
@@ -708,7 +711,8 @@ define i8@test_int_x86_avx512_cvtd2mask_256(<8 x i32> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtd2mask_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32> %x0)
ret i8 %res
@@ -720,7 +724,8 @@ define i8@test_int_x86_avx512_cvtq2mask_128(<2 x i64> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtq2mask_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64> %x0)
ret i8 %res
@@ -732,66 +737,20 @@ define i8@test_int_x86_avx512_cvtq2mask_256(<4 x i64> %x0) {
; CHECK-LABEL: test_int_x86_avx512_cvtq2mask_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0]
-; CHECK-NEXT: kmovb %k0, %eax ## encoding: [0xc5,0xf9,0x93,0xc0]
+; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64> %x0)
ret i8 %res
}
-declare <4 x i32> @llvm.x86.avx512.cvtmask2d.128(i8)
-
-define <4 x i32>@test_int_x86_avx512_cvtmask2d_128(i8 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k0 ## encoding: [0xc5,0xf9,0x92,0xc7]
-; CHECK-NEXT: vpmovm2d %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.avx512.cvtmask2d.128(i8 %x0)
- ret <4 x i32> %res
-}
-
-declare <8 x i32> @llvm.x86.avx512.cvtmask2d.256(i8)
-
-define <8 x i32>@test_int_x86_avx512_cvtmask2d_256(i8 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k0 ## encoding: [0xc5,0xf9,0x92,0xc7]
-; CHECK-NEXT: vpmovm2d %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x38,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x i32> @llvm.x86.avx512.cvtmask2d.256(i8 %x0)
- ret <8 x i32> %res
-}
-
-declare <2 x i64> @llvm.x86.avx512.cvtmask2q.128(i8)
-
-define <2 x i64>@test_int_x86_avx512_cvtmask2q_128(i8 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_128:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k0 ## encoding: [0xc5,0xf9,0x92,0xc7]
-; CHECK-NEXT: vpmovm2q %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.avx512.cvtmask2q.128(i8 %x0)
- ret <2 x i64> %res
-}
-
-declare <4 x i64> @llvm.x86.avx512.cvtmask2q.256(i8)
-
-define <4 x i64>@test_int_x86_avx512_cvtmask2q_256(i8 %x0) {
-; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k0 ## encoding: [0xc5,0xf9,0x92,0xc7]
-; CHECK-NEXT: vpmovm2q %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x38,0xc0]
-; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x i64> @llvm.x86.avx512.cvtmask2q.256(i8 %x0)
- ret <4 x i64> %res
-}
declare <4 x double> @llvm.x86.avx512.mask.broadcastf64x2.256(<2 x double>, <4 x double>, i8)
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256:
; CHECK: ## BB#0:
; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vshuff64x2 $0, %ymm0, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x23,0xd0,0x00]
; CHECK-NEXT: ## ymm2 {%k1} {z} = ymm0[0,1,0,1]
; CHECK-NEXT: vshuff64x2 $0, %ymm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x23,0xc8,0x00]
@@ -810,13 +769,27 @@ define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0
ret <4 x double> %res5
}
+define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256_load(<2 x double>* %x0ptr, <4 x double> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vmovapd (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x0f]
+; CHECK-NEXT: vshuff64x2 $0, %ymm1, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x23,0xc1,0x00]
+; CHECK-NEXT: ## ymm0 {%k1} = ymm1[0,1,0,1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+
+ %x0 = load <2 x double>, <2 x double>* %x0ptr
+ %res = call <4 x double> @llvm.x86.avx512.mask.broadcastf64x2.256(<2 x double> %x0, <4 x double> %x2, i8 %mask)
+ ret <4 x double> %res
+}
+
declare <4 x i64> @llvm.x86.avx512.mask.broadcasti64x2.256(<2 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256:
; CHECK: ## BB#0:
; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vshufi64x2 $0, %ymm0, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x43,0xd0,0x00]
; CHECK-NEXT: ## ymm2 {%k1} {z} = ymm0[0,1,0,1]
; CHECK-NEXT: vshufi64x2 $0, %ymm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x43,0xc8,0x00]
@@ -834,3 +807,17 @@ define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x
%res5 = add <4 x i64> %res3, %res4
ret <4 x i64> %res5
}
+
+define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256_load(<2 x i64>* %x0ptr, <4 x i64> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f]
+; CHECK-NEXT: vshufi64x2 $0, %ymm1, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x43,0xc1,0x00]
+; CHECK-NEXT: ## ymm0 {%k1} = ymm1[0,1,0,1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+
+ %x0 = load <2 x i64>, <2 x i64>* %x0ptr
+ %res = call <4 x i64> @llvm.x86.avx512.mask.broadcasti64x2.256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask)
+ ret <4 x i64> %res
+}
diff --git a/test/CodeGen/X86/avx512er-intrinsics.ll b/test/CodeGen/X86/avx512er-intrinsics.ll
index 827a56d76ae1..ca130bd2b676 100644
--- a/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -1,34 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=knl --show-mc-encoding| FileCheck %s
define <16 x float> @test_rsqrt28_ps(<16 x float> %a0) {
- ; CHECK: vrsqrt28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcc,0xc0]
+; CHECK-LABEL: test_rsqrt28_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcc,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
ret <16 x float> %res
}
define <16 x float> @test1_rsqrt28_ps(<16 x float> %a0, <16 x float> %a1) {
- ; CHECK: kmovw
- ; CHECK: vrsqrt28ps {sae}, %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcc,0xc8]
+; CHECK-LABEL: test1_rsqrt28_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
+; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcc,0xc8]
+; CHECK-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> %a1, i16 6, i32 8)
ret <16 x float> %res
}
define <16 x float> @test2_rsqrt28_ps(<16 x float> %a0) {
- ; CHECK: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
+; CHECK-LABEL: test2_rsqrt28_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
+; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; CHECK-NEXT: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> undef, i16 6, i32 4)
ret <16 x float> %res
}
define <16 x float> @test3_rsqrt28_ps(<16 x float> %a0) {
- ; CHECK: kmovw
- ; CHECK: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
+; CHECK-LABEL: test3_rsqrt28_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
+; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; CHECK-NEXT: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 6, i32 4)
ret <16 x float> %res
}
define <16 x float> @test4_rsqrt28_ps(<16 x float> %a0) {
- ; CHECK: vrsqrt28ps {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcc,0xc0]
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> undef, i16 6, i32 8)
+; CHECK-LABEL: test4_rsqrt28_ps:
+; CHECK: # BB#0:
+; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
+; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcc,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> undef, i16 6, i32 8)
ret <16 x float> %res
}
@@ -36,77 +59,133 @@ define <16 x float> @test4_rsqrt28_ps(<16 x float> %a0) {
declare <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
define <16 x float> @test_rcp28_ps_512(<16 x float> %a0) {
- ; CHECK: vrcp28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xca,0xc0]
+; CHECK-LABEL: test_rcp28_ps_512:
+; CHECK: # BB#0:
+; CHECK-NEXT: vrcp28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xca,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
ret <16 x float> %res
}
declare <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
define <8 x double> @test_rcp28_pd_512(<8 x double> %a0) {
- ; CHECK: vrcp28pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xca,0xc0]
- %res = call <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
+; CHECK-LABEL: test_rcp28_pd_512:
+; CHECK: # BB#0:
+; CHECK-NEXT: vrcp28pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xca,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
ret <8 x double> %res
}
declare <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double>, <8 x double>, i8, i32) nounwind readnone
define <16 x float> @test_exp2_ps_512(<16 x float> %a0) {
- ; CHECK: vexp2ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xc8,0xc0]
+; CHECK-LABEL: test_exp2_ps_512:
+; CHECK: # BB#0:
+; CHECK-NEXT: vexp2ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xc8,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <16 x float> @llvm.x86.avx512.exp2.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
ret <16 x float> %res
}
declare <16 x float> @llvm.x86.avx512.exp2.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
define <8 x double> @test_exp2_pd_512(<8 x double> %a0) {
- ; CHECK: vexp2pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xc8,0xc0]
+; CHECK-LABEL: test_exp2_pd_512:
+; CHECK: # BB#0:
+; CHECK-NEXT: vexp2pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xc8,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
ret <8 x double> %res
}
declare <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double>, <8 x double>, i8, i32) nounwind readnone
define <4 x float> @test_rsqrt28_ss(<4 x float> %a0) {
- ; CHECK: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
+; CHECK-LABEL: test_rsqrt28_ss:
+; CHECK: # BB#0:
+; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
define <4 x float> @test_rcp28_ss(<4 x float> %a0) {
- ; CHECK: vrcp28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
+; CHECK-LABEL: test_rcp28_ss:
+; CHECK: # BB#0:
+; CHECK-NEXT: vrcp28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0) {
- ; CHECK: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 7, i32 8) ;
+; CHECK-LABEL: test_rsqrt28_ss_maskz:
+; CHECK: # BB#0:
+; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
+; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 7, i32 8) ;
ret <4 x float> %res
}
define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0) {
- ; CHECK: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
+; CHECK-LABEL: test_rsqrt28_ss_mask:
+; CHECK: # BB#0:
+; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
+; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 7, i32 8) ;
ret <4 x float> %res
}
define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0) {
- ; CHECK: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 7, i32 8) ;
+; CHECK-LABEL: test_rsqrt28_sd_maskz:
+; CHECK: # BB#0:
+; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
+; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 7, i32 8) ;
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0) {
+; CHECK-LABEL: test_rsqrt28_sd_mask:
+; CHECK: # BB#0:
+; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
+; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
+; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 7, i32 8) ;
ret <2 x double> %res
}
declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr ) {
- ; CHECK: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
+; CHECK-LABEL: test_rsqrt28_sd_maskz_mem:
+; CHECK: # BB#0:
+; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
+; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
+; CHECK-NEXT: retq # encoding: [0xc3]
%mem = load double , double * %ptr, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
ret <2 x double> %res
}
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr ) {
- ; CHECK: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
+; CHECK-LABEL: test_rsqrt28_sd_maskz_mem_offset:
+; CHECK: # BB#0:
+; CHECK-NEXT: kxnorw %k0, %k0, %k0 # encoding: [0xc5,0xfc,0x46,0xc0]
+; CHECK-NEXT: kshiftrw $15, %k0, %k1 # encoding: [0xc4,0xe3,0xf9,0x30,0xc8,0x0f]
+; CHECK-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
+; CHECK-NEXT: retq # encoding: [0xc3]
%ptr1 = getelementptr double, double* %ptr, i32 18
%mem = load double , double * %ptr1, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
diff --git a/test/CodeGen/X86/avx512ifma-intrinsics.ll b/test/CodeGen/X86/avx512ifma-intrinsics.ll
index 727a0dce334e..30ecc0d2e49e 100644
--- a/test/CodeGen/X86/avx512ifma-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifma-intrinsics.ll
@@ -1,20 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512ifma | FileCheck %s
declare <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_512:
-; CHECK: kmovw %edi, %k1
-; CHECK: vmovaps %zmm0, %zmm3
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK: vmovaps %zmm0, %zmm4
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm4
-; CHECK: vpxord %zmm2, %zmm2, %zmm2
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
-; CHECK: vpaddq %zmm0, %zmm3, %zmm0
-; CHECK: vpaddq %zmm2, %zmm4, %zmm1
-; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1}
+; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
+; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
@@ -30,17 +33,19 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_512:
-; CHECK: kmovw %edi, %k1
-; CHECK: vmovaps %zmm0, %zmm3
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm3 {%k1} {z}
-; CHECK: vmovaps %zmm0, %zmm4
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm4
-; CHECK: vpxord %zmm2, %zmm2, %zmm2
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
-; CHECK: vpaddq %zmm0, %zmm3, %zmm0
-; CHECK: vpaddq %zmm2, %zmm4, %zmm1
-; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1} {z}
+; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
+; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52h.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
@@ -56,17 +61,19 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64>, <8 x i64>, <
define <8 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_512:
-; CHECK: kmovw %edi, %k1
-; CHECK: vmovaps %zmm0, %zmm3
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK: vmovaps %zmm0, %zmm4
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm4
-; CHECK: vpxord %zmm2, %zmm2, %zmm2
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
-; CHECK: vpaddq %zmm0, %zmm3, %zmm0
-; CHECK: vpaddq %zmm2, %zmm4, %zmm1
-; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1}
+; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
+; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
@@ -82,17 +89,19 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_512:
-; CHECK: kmovw %edi, %k1
-; CHECK: vmovaps %zmm0, %zmm3
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm3 {%k1} {z}
-; CHECK: vmovaps %zmm0, %zmm4
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm4
-; CHECK: vpxord %zmm2, %zmm2, %zmm2
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
-; CHECK: vpaddq %zmm0, %zmm3, %zmm0
-; CHECK: vpaddq %zmm2, %zmm4, %zmm1
-; CHECK: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm3
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1} {z}
+; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
+; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpmadd52l.uq.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
diff --git a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
index 8ba45aa38197..3ca686cef3bf 100644
--- a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl -mattr=+avx512ifma | FileCheck %s
@@ -7,15 +8,15 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vmovdqa %xmm0, %xmm3
+; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: vmovdqa %xmm0, %xmm4
+; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -35,15 +36,15 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52h_uq_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %ymm0, %ymm3
-; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm3 {%k1}
-; CHECK-NEXT: vmovaps %ymm0, %ymm4
-; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4
+; CHECK-NEXT: vmovdqa %ymm0, %ymm3
+; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm3
+; CHECK-NEXT: vmovdqa %ymm0, %ymm4
+; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0
-; CHECK-NEXT: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -63,15 +64,15 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_128(<2 x i64> %x0, <2 x
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm3 {%k1} {z}
-; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vmovdqa %xmm0, %xmm3
+; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: vmovdqa %xmm0, %xmm4
+; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -91,15 +92,15 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_256(<4 x i64> %x0, <4 x
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52h_uq_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %ymm0, %ymm3
-; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm3 {%k1} {z}
-; CHECK-NEXT: vmovaps %ymm0, %ymm4
-; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4
+; CHECK-NEXT: vmovdqa %ymm0, %ymm3
+; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm3
+; CHECK-NEXT: vmovdqa %ymm0, %ymm4
+; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1} {z}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0
-; CHECK-NEXT: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -119,15 +120,15 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vmovdqa %xmm0, %xmm3
+; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: vmovdqa %xmm0, %xmm4
+; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -147,15 +148,15 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i
; CHECK-LABEL: test_int_x86_avx512_mask_vpmadd52l_uq_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %ymm0, %ymm3
-; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm3 {%k1}
-; CHECK-NEXT: vmovaps %ymm0, %ymm4
-; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4
+; CHECK-NEXT: vmovdqa %ymm0, %ymm3
+; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm3
+; CHECK-NEXT: vmovdqa %ymm0, %ymm4
+; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0
-; CHECK-NEXT: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -175,15 +176,15 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_128(<2 x i64> %x0, <2 x
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm3 {%k1} {z}
-; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vmovdqa %xmm0, %xmm3
+; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: vmovdqa %xmm0, %xmm4
+; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vpaddq %xmm2, %xmm4, %xmm1
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -203,15 +204,15 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_256(<4 x i64> %x0, <4 x
; CHECK-LABEL: test_int_x86_avx512_maskz_vpmadd52l_uq_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovaps %ymm0, %ymm3
-; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm3 {%k1} {z}
-; CHECK-NEXT: vmovaps %ymm0, %ymm4
-; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4
+; CHECK-NEXT: vmovdqa %ymm0, %ymm3
+; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm3
+; CHECK-NEXT: vmovdqa %ymm0, %ymm4
+; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1} {z}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0
-; CHECK-NEXT: vpaddq %ymm2, %ymm4, %ymm1
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
index adb419d58789..22edbcc8e157 100644
--- a/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vbmivl-intrinsics.ll
@@ -6,11 +6,11 @@ declare <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8>, <16 x i8>, <16
define <16 x i8>@test_int_x86_avx512_mask_permvar_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_qi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm3 ## encoding: [0x62,0xf2,0x75,0x08,0x8d,0xd8]
; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0x8d,0xd0]
-; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0x8d,0xd8]
-; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x08,0x8d,0xc0]
-; CHECK-NEXT: vpaddb %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfc,0xc0]
+; CHECK-NEXT: vpermb %xmm0, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0x8d,0xc0]
+; CHECK-NEXT: vpaddb %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc3]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
@@ -46,11 +46,11 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_pmultishift_qb_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_pmultishift_qb_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
+; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x83,0xd9]
; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x83,0xd1]
-; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x83,0xd9]
-; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x83,0xc1]
-; CHECK-NEXT: vpaddb %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfc,0xc0]
+; CHECK-NEXT: vpmultishiftqb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x83,0xc1]
+; CHECK-NEXT: vpaddb %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc3]
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.pmultishift.qb.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
@@ -86,14 +86,14 @@ declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_vpermi2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_qi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermi2b %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x75,0xda]
-; CHECK-NEXT: vpermi2b %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x75,0xca]
+; CHECK-NEXT: vpermi2b %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x75,0xda]
+; CHECK-NEXT: vpermi2b %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x75,0xca]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpermi2b %xmm2, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x75,0xe2]
-; CHECK-NEXT: vpaddb %xmm1, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfc,0xc1]
-; CHECK-NEXT: vpaddb %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfc,0xc0]
+; CHECK-NEXT: vpaddb %xmm3, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfc,0xc3]
+; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
%res1 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> zeroinitializer, <16 x i8> %x2, i16 %x3)
@@ -130,14 +130,14 @@ declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_mask_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_qi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7d,0xda]
-; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xca]
+; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x7d,0xda]
+; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7d,0xca]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vpermt2b %xmm2, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7d,0xe2]
-; CHECK-NEXT: vpaddb %xmm1, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfc,0xc1]
-; CHECK-NEXT: vpaddb %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfc,0xc0]
+; CHECK-NEXT: vpaddb %xmm3, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xfc,0xc3]
+; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
%res1 = call <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> zeroinitializer, <16 x i8> %x2, i16 %x3)
@@ -174,7 +174,7 @@ declare <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8>, <16 x i8>,
define <16 x i8>@test_int_x86_avx512_maskz_vpermt2var_qi_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_qi_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpermi2b %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0x75,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3)
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index c63d47d780d1..4d906a4fd29a 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -980,6 +980,7 @@ define i8 @test_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -991,6 +992,7 @@ define i8 @test_mask_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1005,6 +1007,7 @@ define i8 @test_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1018,6 +1021,7 @@ define i8 @test_mask_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1030,6 +1034,7 @@ define i8 @test_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1041,6 +1046,7 @@ define i8 @test_mask_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1055,6 +1061,7 @@ define i8 @test_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1068,6 +1075,7 @@ define i8 @test_mask_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1082,6 +1090,7 @@ define i8 @test_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1095,6 +1104,7 @@ define i8 @test_mask_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1111,6 +1121,7 @@ define i8 @test_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1126,6 +1137,7 @@ define i8 @test_mask_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -1140,6 +1152,7 @@ define i8 @test_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1153,6 +1166,7 @@ define i8 @test_mask_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1169,6 +1183,7 @@ define i8 @test_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1184,6 +1199,7 @@ define i8 @test_mask_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -2106,7 +2122,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pxor.d.256(<8 x i32>, <8 x i32>, <8 x i3
define <4 x i32> @test_mask_andnot_epi32_rr_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_mask_andnot_epi32_rr_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xdf,0xc1]
+; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pandn.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
ret <4 x i32> %res
@@ -2136,7 +2152,7 @@ define <4 x i32> @test_mask_andnot_epi32_rrkz_128(<4 x i32> %a, <4 x i32> %b, i8
define <4 x i32> @test_mask_andnot_epi32_rm_128(<4 x i32> %a, <4 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi32_rm_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnd (%rdi), %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xdf,0x07]
+; CHECK-NEXT: vpandn (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i32>, <4 x i32>* %ptr_b
%res = call <4 x i32> @llvm.x86.avx512.mask.pandn.d.128(<4 x i32> %a, <4 x i32> %b, <4 x i32> zeroinitializer, i8 -1)
@@ -2210,7 +2226,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.pandn.d.128(<4 x i32>, <4 x i32>, <4 x i
define <8 x i32> @test_mask_andnot_epi32_rr_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: test_mask_andnot_epi32_rr_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnd %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xdf,0xc1]
+; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pandn.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
ret <8 x i32> %res
@@ -2240,7 +2256,7 @@ define <8 x i32> @test_mask_andnot_epi32_rrkz_256(<8 x i32> %a, <8 x i32> %b, i8
define <8 x i32> @test_mask_andnot_epi32_rm_256(<8 x i32> %a, <8 x i32>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi32_rm_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnd (%rdi), %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xdf,0x07]
+; CHECK-NEXT: vpandn (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <8 x i32>, <8 x i32>* %ptr_b
%res = call <8 x i32> @llvm.x86.avx512.mask.pandn.d.256(<8 x i32> %a, <8 x i32> %b, <8 x i32> zeroinitializer, i8 -1)
@@ -2314,7 +2330,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pandn.d.256(<8 x i32>, <8 x i32>, <8 x i
define <2 x i64> @test_mask_andnot_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_mask_andnot_epi64_rr_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0xdf,0xc1]
+; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pandn.q.128(<2 x i64> %a, <2 x i64> %b, <2 x i64> zeroinitializer, i8 -1)
ret <2 x i64> %res
@@ -2344,7 +2360,7 @@ define <2 x i64> @test_mask_andnot_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8
define <2 x i64> @test_mask_andnot_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi64_rm_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnq (%rdi), %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0xdf,0x07]
+; CHECK-NEXT: vpandn (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <2 x i64>, <2 x i64>* %ptr_b
%res = call <2 x i64> @llvm.x86.avx512.mask.pandn.q.128(<2 x i64> %a, <2 x i64> %b, <2 x i64> zeroinitializer, i8 -1)
@@ -2418,7 +2434,7 @@ declare <2 x i64> @llvm.x86.avx512.mask.pandn.q.128(<2 x i64>, <2 x i64>, <2 x i
define <4 x i64> @test_mask_andnot_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_mask_andnot_epi64_rr_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnq %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0xdf,0xc1]
+; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pandn.q.256(<4 x i64> %a, <4 x i64> %b, <4 x i64> zeroinitializer, i8 -1)
ret <4 x i64> %res
@@ -2448,7 +2464,7 @@ define <4 x i64> @test_mask_andnot_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8
define <4 x i64> @test_mask_andnot_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b) {
; CHECK-LABEL: test_mask_andnot_epi64_rm_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpandnq (%rdi), %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0xdf,0x07]
+; CHECK-NEXT: vpandn (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdf,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%b = load <4 x i64>, <4 x i64>* %ptr_b
%res = call <4 x i64> @llvm.x86.avx512.mask.pandn.q.256(<4 x i64> %a, <4 x i64> %b, <4 x i64> zeroinitializer, i8 -1)
@@ -4833,3 +4849,128 @@ define <8 x i32>@test_int_x86_avx512_mask_inserti32x4_256(<8 x i32> %x0, <4 x i3
%res4 = add <8 x i32> %res2, %res3
ret <8 x i32> %res4
}
+
+define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_maskz_max_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 %mask)
+ ret <8 x float> %res
+}
+
+define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
+; CHECK-LABEL: test_mm512_mask_max_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5f,0xd1]
+; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask)
+ ret <8 x float> %res
+}
+
+define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_max_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
+ ret <8 x float> %res
+}
+declare <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_maskz_max_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 %mask)
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
+; CHECK-LABEL: test_mm512_mask_max_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5f,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask)
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_max_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
+ ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_maskz_min_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5d,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 %mask)
+ ret <8 x float> %res
+}
+
+define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
+; CHECK-LABEL: test_mm512_mask_min_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5d,0xd1]
+; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask)
+ ret <8 x float> %res
+}
+
+define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_min_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
+ ret <8 x float> %res
+}
+declare <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_maskz_min_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5d,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 %mask)
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
+; CHECK-LABEL: test_mm512_mask_min_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5d,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask)
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
+; CHECK-LABEL: test_mm512_min_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
+ ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index 82014283246e..1f324d679564 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -6,29 +6,29 @@
define <8 x i8> @test_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xd9,0x00]
-; CHECK-NEXT: vpcmpltd %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xe1,0x01]
-; CHECK-NEXT: vpcmpled %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xe9,0x02]
-; CHECK-NEXT: vpcmpunordd %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xf1,0x03]
-; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltd %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnled %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x01]
+; CHECK-NEXT: vpcmpled %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordd %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltd %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnled %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordd %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 -1)
@@ -53,30 +53,30 @@ define <8 x i8> @test_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
define <8 x i8> @test_mask_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_d_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k4 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xe1,0x00]
-; CHECK-NEXT: vpcmpltd %ymm1, %ymm0, %k5 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xe9,0x01]
-; CHECK-NEXT: vpcmpled %ymm1, %ymm0, %k6 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordd %ymm1, %ymm0, %k7 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xf9,0x03]
-; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k0 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltd %ymm1, %ymm0, %k2 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnled %ymm1, %ymm0, %k1 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordd %ymm1, %ymm0, %k3 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1f,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltd %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xd1,0x01]
+; CHECK-NEXT: vpcmpled %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordd %ymm1, %ymm0, %k4 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltd %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnled %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordd %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1f,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 %mask)
@@ -103,29 +103,29 @@ declare i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32>, <8 x i32>, i32, i8) nounwi
define <8 x i8> @test_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequd %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xd9,0x00]
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xe1,0x01]
-; CHECK-NEXT: vpcmpleud %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xe9,0x02]
-; CHECK-NEXT: vpcmpunordud %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xf1,0x03]
-; CHECK-NEXT: vpcmpnequd %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xf9,0x04]
-; CHECK-NEXT: vpcmpnltud %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmpordud %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: kmovw %k3, %ecx ## encoding: [0xc5,0xf8,0x93,0xcb]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpequd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleud %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordud %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequd %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltud %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xf1,0x06]
+; CHECK-NEXT: vpcmpordud %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x28,0x1e,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 -1)
@@ -150,30 +150,30 @@ define <8 x i8> @test_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
define <8 x i8> @test_mask_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k3 ## encoding: [0xc5,0xf8,0x92,0xdf]
-; CHECK-NEXT: vpcmpequd %ymm1, %ymm0, %k4 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xe1,0x00]
-; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k5 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xe9,0x01]
-; CHECK-NEXT: vpcmpleud %ymm1, %ymm0, %k6 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordud %ymm1, %ymm0, %k7 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xf9,0x03]
-; CHECK-NEXT: vpcmpnequd %ymm1, %ymm0, %k0 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xc1,0x04]
-; CHECK-NEXT: vpcmpnltud %ymm1, %ymm0, %k2 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k1 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmpordud %ymm1, %ymm0, %k3 {%k3} ## encoding: [0x62,0xf3,0x7d,0x2b,0x1e,0xd9,0x07]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
-; CHECK-NEXT: kmovw %k4, %ecx ## encoding: [0xc5,0xf8,0x93,0xcc]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpequd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleud %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordud %ymm1, %ymm0, %k4 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequd %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltud %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xf9,0x06]
+; CHECK-NEXT: vpcmpordud %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x1e,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 %mask)
@@ -200,29 +200,29 @@ declare i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32>, <8 x i32>, i32, i8) nounw
define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xe9,0x00]
-; CHECK-NEXT: vpcmpltq %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xf9,0x01]
-; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordq %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xe1,0x03]
-; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x01]
+; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleq %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordq %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 -1)
@@ -247,30 +247,30 @@ define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k5 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xe9,0x00]
-; CHECK-NEXT: vpcmpltq %ymm1, %ymm0, %k0 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xc1,0x01]
-; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordq %ymm1, %ymm0, %k4 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xe1,0x03]
-; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltq %ymm1, %ymm0, %k2 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleq %ymm1, %ymm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordq %ymm1, %ymm0, %k7 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1f,0xf9,0x07]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltq %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xd1,0x01]
+; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordq %ymm1, %ymm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltq %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleq %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordq %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
-; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
@@ -297,29 +297,29 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64>, <4 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe9,0x00]
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xf9,0x01]
-; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunorduq %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe1,0x03]
-; CHECK-NEXT: vpcmpnequq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpequq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunorduq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequq %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xf1,0x06]
+; CHECK-NEXT: vpcmporduq %ymm1, %ymm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 -1)
@@ -344,30 +344,30 @@ define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
-; CHECK-NEXT: vpcmpequq %ymm1, %ymm0, %k5 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xe9,0x00]
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xc1,0x01]
-; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunorduq %ymm1, %ymm0, %k4 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xe1,0x03]
-; CHECK-NEXT: vpcmpnequq %ymm1, %ymm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k2 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduq %ymm1, %ymm0, %k7 {%k7} ## encoding: [0x62,0xf3,0xfd,0x2f,0x1e,0xf9,0x07]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpequq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunorduq %ymm1, %ymm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequq %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xf9,0x06]
+; CHECK-NEXT: vpcmporduq %ymm1, %ymm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
-; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
@@ -396,29 +396,29 @@ declare i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64>, <4 x i64>, i32, i8) nounw
define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xe9,0x00]
-; CHECK-NEXT: vpcmpltd %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xf9,0x01]
-; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordd %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xe1,0x03]
-; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltd %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnled %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x01]
+; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltd %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnled %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordd %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 -1)
@@ -443,30 +443,30 @@ define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xe9,0x00]
-; CHECK-NEXT: vpcmpltd %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xc1,0x01]
-; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordd %xmm1, %xmm0, %k4 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xe1,0x03]
-; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltd %xmm1, %xmm0, %k2 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnled %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordd %xmm1, %xmm0, %k7 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1f,0xf9,0x07]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltd %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xd1,0x01]
+; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordd %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltd %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnled %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordd %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
-; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
@@ -493,29 +493,29 @@ declare i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32>, <4 x i32>, i32, i8) nounwi
define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequd %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe9,0x00]
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xf9,0x01]
-; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordud %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe1,0x03]
-; CHECK-NEXT: vpcmpnequd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmpordud %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpequd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordud %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequd %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xf1,0x06]
+; CHECK-NEXT: vpcmpordud %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 -1)
@@ -540,30 +540,30 @@ define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
-; CHECK-NEXT: vpcmpequd %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xe9,0x00]
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xc1,0x01]
-; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordud %xmm1, %xmm0, %k4 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xe1,0x03]
-; CHECK-NEXT: vpcmpnequd %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k2 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmpordud %xmm1, %xmm0, %k7 {%k7} ## encoding: [0x62,0xf3,0x7d,0x0f,0x1e,0xf9,0x07]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpequd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordud %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequd %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xf9,0x06]
+; CHECK-NEXT: vpcmpordud %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
-; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
@@ -590,29 +590,29 @@ declare i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32>, <4 x i32>, i32, i8) nounw
define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xe9,0x00]
-; CHECK-NEXT: vpcmpltq %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xf9,0x01]
-; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordq %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xe1,0x03]
-; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc1,0x07]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x01]
+; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd1,0x02]
+; CHECK-NEXT: vpcmpunordq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd9,0x03]
+; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleq %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xf1,0x06]
+; CHECK-NEXT: vpcmpordq %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 -1)
@@ -637,30 +637,30 @@ define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xe9,0x00]
-; CHECK-NEXT: vpcmpltq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xc1,0x01]
-; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xf1,0x02]
-; CHECK-NEXT: vpcmpunordq %xmm1, %xmm0, %k4 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xe1,0x03]
-; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltq %xmm1, %xmm0, %k2 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleq %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xc9,0x06]
-; CHECK-NEXT: vpcmpordq %xmm1, %xmm0, %k7 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xf9,0x07]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xc1,0x00]
+; CHECK-NEXT: vpcmpltq %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xd1,0x01]
+; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xd9,0x02]
+; CHECK-NEXT: vpcmpunordq %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xe1,0x03]
+; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltq %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleq %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xf9,0x06]
+; CHECK-NEXT: vpcmpordq %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
-; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
@@ -687,29 +687,29 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64>, <2 x i64>, i32, i8) nounwi
define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpcmpequq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe9,0x00]
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xf9,0x01]
-; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunorduq %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe1,0x03]
-; CHECK-NEXT: vpcmpnequq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc1,0x07]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: vpcmpequq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc9,0x01]
+; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd1,0x02]
+; CHECK-NEXT: vpcmpunorduq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd9,0x03]
+; CHECK-NEXT: vpcmpnequq %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe1,0x04]
+; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe9,0x05]
+; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k6 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xf1,0x06]
+; CHECK-NEXT: vpcmporduq %xmm1, %xmm0, %k7 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xf9,0x07]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
-; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 -1)
@@ -734,30 +734,30 @@ define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
-; CHECK-NEXT: vpcmpequq %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xe9,0x00]
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xc1,0x01]
-; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xf1,0x02]
-; CHECK-NEXT: vpcmpunorduq %xmm1, %xmm0, %k4 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xe1,0x03]
-; CHECK-NEXT: vpcmpnequq %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xd9,0x04]
-; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k2 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xd1,0x05]
-; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xc9,0x06]
-; CHECK-NEXT: vpcmporduq %xmm1, %xmm0, %k7 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xf9,0x07]
-; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: kmovw %k5, %ecx ## encoding: [0xc5,0xf8,0x93,0xcd]
-; CHECK-NEXT: vpinsrb $0, %ecx, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x00]
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpcmpequq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xc1,0x00]
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xd1,0x01]
+; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xd9,0x02]
+; CHECK-NEXT: vpcmpunorduq %xmm1, %xmm0, %k4 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xe1,0x03]
+; CHECK-NEXT: vpcmpnequq %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xe9,0x04]
+; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xf1,0x05]
+; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xf9,0x06]
+; CHECK-NEXT: vpcmporduq %xmm1, %xmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xc9,0x07]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
-; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
@@ -1498,6 +1498,7 @@ define i8 @test_cmpps_256(<8 x float> %a, <8 x float> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %a, <8 x float> %b, i32 2, i8 -1)
ret i8 %res
@@ -1509,6 +1510,7 @@ define i8 @test_cmpps_128(<4 x float> %a, <4 x float> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %a, <4 x float> %b, i32 2, i8 -1)
ret i8 %res
@@ -1520,6 +1522,7 @@ define i8 @test_cmppd_256(<4 x double> %a, <4 x double> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vcmplepd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %a, <4 x double> %b, i32 2, i8 -1)
ret i8 %res
@@ -1531,6 +1534,7 @@ define i8 @test_cmppd_128(<2 x double> %a, <2 x double> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vcmplepd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %a, <2 x double> %b, i32 2, i8 -1)
ret i8 %res
@@ -1543,8 +1547,10 @@ define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 %mask)
- ret <8 x float> %res
+ %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> zeroinitializer
+ ret <8 x float> %3
}
define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
@@ -1554,8 +1560,10 @@ define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1,
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5f,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask)
- ret <8 x float> %res
+ %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> %src
+ ret <8 x float> %3
}
define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
@@ -1563,10 +1571,10 @@ define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %
; CHECK: ## BB#0:
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
- ret <8 x float> %res
+ %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %1
}
-declare <8 x float> @llvm.x86.avx512.mask.max.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>)
define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_max_ps_128:
@@ -1574,8 +1582,11 @@ define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 %mask)
- ret <4 x float> %res
+ %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x float> %1, <4 x float> zeroinitializer
+ ret <4 x float> %3
}
define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
@@ -1585,8 +1596,11 @@ define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1,
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5f,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask)
- ret <4 x float> %res
+ %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x float> %1, <4 x float> %src
+ ret <4 x float> %3
}
define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
@@ -1594,10 +1608,10 @@ define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %
; CHECK: ## BB#0:
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
- ret <4 x float> %res
+ %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %1
}
-declare <4 x float> @llvm.x86.avx512.mask.max.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_ps_256:
@@ -1605,8 +1619,10 @@ define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 %mask)
- ret <8 x float> %res
+ %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> zeroinitializer
+ ret <8 x float> %3
}
define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask) {
@@ -1616,8 +1632,10 @@ define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1,
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x5d,0xd1]
; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %src, i8 %mask)
- ret <8 x float> %res
+ %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x float> %1, <8 x float> %src
+ ret <8 x float> %3
}
define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %mask) {
@@ -1625,10 +1643,10 @@ define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 %
; CHECK: ## BB#0:
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float>zeroinitializer, i8 -1)
- ret <8 x float> %res
+ %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %1
}
-declare <8 x float> @llvm.x86.avx512.mask.min.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>)
define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_mm512_maskz_min_ps_128:
@@ -1636,8 +1654,11 @@ define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 %mask)
- ret <4 x float> %res
+ %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x float> %1, <4 x float> zeroinitializer
+ ret <4 x float> %3
}
define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask) {
@@ -1647,8 +1668,11 @@ define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1,
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x5d,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %src, i8 %mask)
- ret <4 x float> %res
+ %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
+ %2 = bitcast i8 %mask to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x float> %1, <4 x float> %src
+ ret <4 x float> %3
}
define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
@@ -1656,10 +1680,10 @@ define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 %
; CHECK: ## BB#0:
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float>zeroinitializer, i8 -1)
- ret <4 x float> %res
+ %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %1
}
-declare <4 x float> @llvm.x86.avx512.mask.min.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
define <4 x double> @test_sqrt_pd_256(<4 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_sqrt_pd_256:
@@ -1712,9 +1736,9 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermt2var_d_128(<4 x i32> %x0, <4 x i
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7e,0xda]
-; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7e,0xca]
-; CHECK-NEXT: vpaddd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc1]
+; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x7e,0xda]
+; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7e,0xca]
+; CHECK-NEXT: vpaddd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
@@ -1729,9 +1753,9 @@ define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd9]
-; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7e,0xda]
-; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x7e,0xca]
-; CHECK-NEXT: vpaddd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc1]
+; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x7e,0xda]
+; CHECK-NEXT: vpermt2d %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7e,0xca]
+; CHECK-NEXT: vpaddd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
@@ -1746,9 +1770,9 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermt2var_d_256(<8 x i32> %x0, <8 x i
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
-; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7e,0xda]
-; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7e,0xca]
-; CHECK-NEXT: vpaddd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc1]
+; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x7e,0xda]
+; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7e,0xca]
+; CHECK-NEXT: vpaddd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
@@ -1763,9 +1787,9 @@ define <8 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_256(<8 x i32> %x0, <8 x
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd9]
-; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7e,0xda]
-; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x7e,0xca]
-; CHECK-NEXT: vpaddd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc1]
+; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x7e,0xda]
+; CHECK-NEXT: vpermt2d %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7e,0xca]
+; CHECK-NEXT: vpaddd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
@@ -1780,9 +1804,9 @@ define <2 x double>@test_int_x86_avx512_mask_vpermi2var_pd_128(<2 x double> %x0,
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
-; CHECK-NEXT: vpermi2pd %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x77,0xda]
-; CHECK-NEXT: vpermi2pd %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0xfd,0x08,0x77,0xca]
-; CHECK-NEXT: vaddpd %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1]
+; CHECK-NEXT: vpermi2pd %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x77,0xda]
+; CHECK-NEXT: vpermi2pd %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x77,0xca]
+; CHECK-NEXT: vaddpd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.vpermi2var.pd.128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2, i8 %x3)
%res1 = call <2 x double> @llvm.x86.avx512.mask.vpermi2var.pd.128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2, i8 -1)
@@ -1797,9 +1821,9 @@ define <4 x double>@test_int_x86_avx512_mask_vpermi2var_pd_256(<4 x double> %x0,
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
-; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x77,0xda]
-; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0xfd,0x28,0x77,0xca]
-; CHECK-NEXT: vaddpd %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1]
+; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x77,0xda]
+; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x77,0xca]
+; CHECK-NEXT: vaddpd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.vpermi2var.pd.256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 %x3)
%res1 = call <4 x double> @llvm.x86.avx512.mask.vpermi2var.pd.256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 -1)
@@ -1814,9 +1838,9 @@ define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128(<4 x float> %x0, <
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
-; CHECK-NEXT: vpermi2ps %xmm2, %xmm0, %xmm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x77,0xda]
-; CHECK-NEXT: vpermi2ps %xmm2, %xmm0, %xmm1 ## encoding: [0x62,0xf2,0x7d,0x08,0x77,0xca]
-; CHECK-NEXT: vaddps %xmm1, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1]
+; CHECK-NEXT: vpermi2ps %xmm2, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x77,0xda]
+; CHECK-NEXT: vpermi2ps %xmm2, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x77,0xca]
+; CHECK-NEXT: vaddps %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.vpermi2var.ps.128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2, i8 %x3)
%res1 = call <4 x float> @llvm.x86.avx512.mask.vpermi2var.ps.128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2, i8 -1)
@@ -1843,9 +1867,9 @@ define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256(<8 x float> %x0, <
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
-; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm3 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x77,0xda]
-; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm1 ## encoding: [0x62,0xf2,0x7d,0x28,0x77,0xca]
-; CHECK-NEXT: vaddps %ymm1, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1]
+; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x77,0xda]
+; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x77,0xca]
+; CHECK-NEXT: vaddps %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.vpermi2var.ps.256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 %x3)
%res1 = call <8 x float> @llvm.x86.avx512.mask.vpermi2var.ps.256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 -1)
@@ -1987,8 +2011,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_128(<2 x i64> %x0, <16 x i8> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x32,0xc1]
; CHECK-NEXT: vpmovqb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x32,0xc2]
+; CHECK-NEXT: vpmovqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x32,0xc1]
; CHECK-NEXT: vpmovqb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x32,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2021,8 +2045,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_128(<2 x i64> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x22,0xc1]
; CHECK-NEXT: vpmovsqb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x22,0xc2]
+; CHECK-NEXT: vpmovsqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x22,0xc1]
; CHECK-NEXT: vpmovsqb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x22,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2055,8 +2079,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_128(<2 x i64> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x12,0xc1]
; CHECK-NEXT: vpmovusqb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x12,0xc2]
+; CHECK-NEXT: vpmovusqb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x12,0xc1]
; CHECK-NEXT: vpmovusqb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x12,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2089,8 +2113,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_256(<4 x i64> %x0, <16 x i8> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x32,0xc1]
; CHECK-NEXT: vpmovqb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x32,0xc2]
+; CHECK-NEXT: vpmovqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x32,0xc1]
; CHECK-NEXT: vpmovqb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x32,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2123,8 +2147,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_256(<4 x i64> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x22,0xc1]
; CHECK-NEXT: vpmovsqb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x22,0xc2]
+; CHECK-NEXT: vpmovsqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x22,0xc1]
; CHECK-NEXT: vpmovsqb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x22,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2157,8 +2181,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_256(<4 x i64> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x12,0xc1]
; CHECK-NEXT: vpmovusqb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x12,0xc2]
+; CHECK-NEXT: vpmovusqb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x12,0xc1]
; CHECK-NEXT: vpmovusqb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x12,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2191,8 +2215,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_128(<2 x i64> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x34,0xc1]
; CHECK-NEXT: vpmovqw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x34,0xc2]
+; CHECK-NEXT: vpmovqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x34,0xc1]
; CHECK-NEXT: vpmovqw %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x34,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2225,8 +2249,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_128(<2 x i64> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x24,0xc1]
; CHECK-NEXT: vpmovsqw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x24,0xc2]
+; CHECK-NEXT: vpmovsqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x24,0xc1]
; CHECK-NEXT: vpmovsqw %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x24,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2259,8 +2283,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_128(<2 x i64> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x14,0xc1]
; CHECK-NEXT: vpmovusqw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x14,0xc2]
+; CHECK-NEXT: vpmovusqw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x14,0xc1]
; CHECK-NEXT: vpmovusqw %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x14,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2293,8 +2317,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_256(<4 x i64> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x34,0xc1]
; CHECK-NEXT: vpmovqw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x34,0xc2]
+; CHECK-NEXT: vpmovqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x34,0xc1]
; CHECK-NEXT: vpmovqw %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x34,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2327,8 +2351,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_256(<4 x i64> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x24,0xc1]
; CHECK-NEXT: vpmovsqw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x24,0xc2]
+; CHECK-NEXT: vpmovsqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x24,0xc1]
; CHECK-NEXT: vpmovsqw %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x24,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2361,8 +2385,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_256(<4 x i64> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x14,0xc1]
; CHECK-NEXT: vpmovusqw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x14,0xc2]
+; CHECK-NEXT: vpmovusqw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x14,0xc1]
; CHECK-NEXT: vpmovusqw %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x14,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2395,8 +2419,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_128(<2 x i64> %x0, <4 x i32> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x35,0xc1]
; CHECK-NEXT: vpmovqd %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x35,0xc2]
+; CHECK-NEXT: vpmovqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x35,0xc1]
; CHECK-NEXT: vpmovqd %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x35,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
@@ -2429,8 +2453,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_128(<2 x i64> %x0, <4 x i32>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x25,0xc1]
; CHECK-NEXT: vpmovsqd %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x25,0xc2]
+; CHECK-NEXT: vpmovsqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x25,0xc1]
; CHECK-NEXT: vpmovsqd %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x25,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
@@ -2463,8 +2487,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_128(<2 x i64> %x0, <4 x i32>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x15,0xc1]
; CHECK-NEXT: vpmovusqd %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x15,0xc2]
+; CHECK-NEXT: vpmovusqd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x15,0xc1]
; CHECK-NEXT: vpmovusqd %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x15,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
@@ -2497,8 +2521,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_256(<4 x i64> %x0, <4 x i32> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x35,0xc1]
; CHECK-NEXT: vpmovqd %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x35,0xc2]
+; CHECK-NEXT: vpmovqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x35,0xc1]
; CHECK-NEXT: vpmovqd %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x35,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
@@ -2531,8 +2555,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_256(<4 x i64> %x0, <4 x i32>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x25,0xc1]
; CHECK-NEXT: vpmovsqd %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x25,0xc2]
+; CHECK-NEXT: vpmovsqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x25,0xc1]
; CHECK-NEXT: vpmovsqd %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x25,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
@@ -2565,8 +2589,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_256(<4 x i64> %x0, <4 x i32>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x15,0xc1]
; CHECK-NEXT: vpmovusqd %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x15,0xc2]
+; CHECK-NEXT: vpmovusqd %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x15,0xc1]
; CHECK-NEXT: vpmovusqd %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x15,0xc0]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
@@ -2599,8 +2623,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_128(<4 x i32> %x0, <16 x i8> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x31,0xc1]
; CHECK-NEXT: vpmovdb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x31,0xc2]
+; CHECK-NEXT: vpmovdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x31,0xc1]
; CHECK-NEXT: vpmovdb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x31,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2633,8 +2657,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_128(<4 x i32> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x21,0xc1]
; CHECK-NEXT: vpmovsdb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x21,0xc2]
+; CHECK-NEXT: vpmovsdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x21,0xc1]
; CHECK-NEXT: vpmovsdb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x21,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2667,8 +2691,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_128(<4 x i32> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x11,0xc1]
; CHECK-NEXT: vpmovusdb %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x11,0xc2]
+; CHECK-NEXT: vpmovusdb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x11,0xc1]
; CHECK-NEXT: vpmovusdb %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x11,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2701,8 +2725,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_256(<8 x i32> %x0, <16 x i8> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x31,0xc1]
; CHECK-NEXT: vpmovdb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x31,0xc2]
+; CHECK-NEXT: vpmovdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x31,0xc1]
; CHECK-NEXT: vpmovdb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x31,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2735,8 +2759,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_256(<8 x i32> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x21,0xc1]
; CHECK-NEXT: vpmovsdb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x21,0xc2]
+; CHECK-NEXT: vpmovsdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x21,0xc1]
; CHECK-NEXT: vpmovsdb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x21,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2769,8 +2793,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_256(<8 x i32> %x0, <16 x i8>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x11,0xc1]
; CHECK-NEXT: vpmovusdb %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x11,0xc2]
+; CHECK-NEXT: vpmovusdb %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x11,0xc1]
; CHECK-NEXT: vpmovusdb %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x11,0xc0]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfc,0xc2]
@@ -2803,8 +2827,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_128(<4 x i32> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x33,0xc1]
; CHECK-NEXT: vpmovdw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x33,0xc2]
+; CHECK-NEXT: vpmovdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x33,0xc1]
; CHECK-NEXT: vpmovdw %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x33,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2837,8 +2861,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_128(<4 x i32> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x23,0xc1]
; CHECK-NEXT: vpmovsdw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x23,0xc2]
+; CHECK-NEXT: vpmovsdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x23,0xc1]
; CHECK-NEXT: vpmovsdw %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x23,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2871,8 +2895,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_128(<4 x i32> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x13,0xc1]
; CHECK-NEXT: vpmovusdw %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0x89,0x13,0xc2]
+; CHECK-NEXT: vpmovusdw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x09,0x13,0xc1]
; CHECK-NEXT: vpmovusdw %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x13,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2905,8 +2929,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_256(<8 x i32> %x0, <8 x i16> %
; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x33,0xc1]
; CHECK-NEXT: vpmovdw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x33,0xc2]
+; CHECK-NEXT: vpmovdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x33,0xc1]
; CHECK-NEXT: vpmovdw %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x33,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2939,8 +2963,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_256(<8 x i32> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovsdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x23,0xc1]
; CHECK-NEXT: vpmovsdw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x23,0xc2]
+; CHECK-NEXT: vpmovsdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x23,0xc1]
; CHECK-NEXT: vpmovsdw %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x23,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -2973,8 +2997,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_256(<8 x i32> %x0, <8 x i16>
; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpmovusdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x13,0xc1]
; CHECK-NEXT: vpmovusdw %ymm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7e,0xa9,0x13,0xc2]
+; CHECK-NEXT: vpmovusdw %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x13,0xc1]
; CHECK-NEXT: vpmovusdw %ymm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x13,0xc0]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfd,0xc2]
@@ -3545,10 +3569,10 @@ define <8 x float>@test_int_x86_avx512_mask_shuf_f32x4_256(<8 x float> %x0, <8 x
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f32x4_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vshuff32x4 $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x23,0xd1,0x16]
-; CHECK-NEXT: ## ymm2 {%k1} = ymm0[0,1,2,3],ymm1[4,5,6,7]
; CHECK-NEXT: vshuff32x4 $22, %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x23,0xd9,0x16]
; CHECK-NEXT: ## ymm3 {%k1} {z} = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; CHECK-NEXT: vshuff32x4 $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x23,0xd1,0x16]
+; CHECK-NEXT: ## ymm2 {%k1} = ymm0[0,1,2,3],ymm1[4,5,6,7]
; CHECK-NEXT: vshuff32x4 $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x23,0xc1,0x16]
; CHECK-NEXT: ## ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; CHECK-NEXT: vaddps %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc0]
@@ -3568,10 +3592,10 @@ define <4 x double>@test_int_x86_avx512_mask_shuf_f64x2_256(<4 x double> %x0, <4
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f64x2_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vshuff64x2 $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x23,0xd1,0x16]
-; CHECK-NEXT: ## ymm2 {%k1} = ymm0[0,1],ymm1[2,3]
; CHECK-NEXT: vshuff64x2 $22, %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x23,0xd9,0x16]
; CHECK-NEXT: ## ymm3 {%k1} {z} = ymm0[0,1],ymm1[2,3]
+; CHECK-NEXT: vshuff64x2 $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x23,0xd1,0x16]
+; CHECK-NEXT: ## ymm2 {%k1} = ymm0[0,1],ymm1[2,3]
; CHECK-NEXT: vshuff64x2 $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x23,0xc1,0x16]
; CHECK-NEXT: ## ymm0 = ymm0[0,1],ymm1[2,3]
; CHECK-NEXT: vaddpd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc0]
@@ -3627,8 +3651,8 @@ define <2 x double>@test_int_x86_avx512_mask_getmant_pd_128(<2 x double> %x0, <2
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_pd_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vgetmantpd $11, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x26,0xc8,0x0b]
; CHECK-NEXT: vgetmantpd $11, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0x26,0xd0,0x0b]
+; CHECK-NEXT: vgetmantpd $11, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x26,0xc8,0x0b]
; CHECK-NEXT: vgetmantpd $11, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x26,0xc0,0x0b]
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc0]
@@ -3696,9 +3720,9 @@ define <4 x i32>@test_int_x86_avx512_mask_pternlog_d_128(<4 x i32> %x0, <4 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
-; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0x75,0x08,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddd %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pternlog.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i32 33, i8 %x4)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.pternlog.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i32 33, i8 -1)
@@ -3713,9 +3737,9 @@ define <4 x i32>@test_int_x86_avx512_maskz_pternlog_d_128(<4 x i32> %x0, <4 x i3
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
-; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0x89,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0x75,0x08,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddd %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogd $33, %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0x89,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.maskz.pternlog.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i32 33, i8 %x4)
%res1 = call <4 x i32> @llvm.x86.avx512.maskz.pternlog.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i32 33, i8 -1)
@@ -3730,9 +3754,9 @@ define <8 x i32>@test_int_x86_avx512_mask_pternlog_d_256(<8 x i32> %x0, <8 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
-; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0x75,0x28,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pternlog.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i32 33, i8 %x4)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.pternlog.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i32 33, i8 -1)
@@ -3747,9 +3771,9 @@ define <8 x i32>@test_int_x86_avx512_maskz_pternlog_d_256(<8 x i32> %x0, <8 x i3
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
-; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0x75,0x28,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogd $33, %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.maskz.pternlog.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i32 33, i8 %x4)
%res1 = call <8 x i32> @llvm.x86.avx512.maskz.pternlog.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i32 33, i8 -1)
@@ -3764,9 +3788,9 @@ define <2 x i64>@test_int_x86_avx512_mask_pternlog_q_128(<2 x i64> %x0, <2 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
-; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0xf5,0x08,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0xf5,0x08,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pternlog.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i32 33, i8 %x4)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.pternlog.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i32 33, i8 -1)
@@ -3781,9 +3805,9 @@ define <2 x i64>@test_int_x86_avx512_maskz_pternlog_q_128(<2 x i64> %x0, <2 x i6
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xd8]
-; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0xf5,0x08,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0xf5,0x08,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogq $33, %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.maskz.pternlog.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i32 33, i8 %x4)
%res1 = call <2 x i64> @llvm.x86.avx512.maskz.pternlog.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i32 33, i8 -1)
@@ -3798,9 +3822,9 @@ define <4 x i64>@test_int_x86_avx512_mask_pternlog_q_256(<4 x i64> %x0, <4 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
-; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0xf5,0x28,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddq %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pternlog.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i32 33, i8 %x4)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.pternlog.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i32 33, i8 -1)
@@ -3815,9 +3839,9 @@ define <4 x i64>@test_int_x86_avx512_maskz_pternlog_q_256(<4 x i64> %x0, <4 x i6
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovdqa %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xd8]
-; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x25,0xda,0x21]
-; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x25,0xc2,0x21]
-; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0xf5,0x28,0x25,0xda,0x21]
+; CHECK-NEXT: vpternlogq $33, %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x25,0xc2,0x21]
+; CHECK-NEXT: vpaddq %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.maskz.pternlog.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i32 33, i8 %x4)
%res1 = call <4 x i64> @llvm.x86.avx512.maskz.pternlog.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i32 33, i8 -1)
@@ -4080,7 +4104,7 @@ define <4 x double> @test_rsqrt_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x4e,0xc8]
-; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
+; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.rsqrt14.pd.256(<4 x double> %a0, <4 x double> %a1, i8 %mask)
ret <4 x double> %res
@@ -4110,7 +4134,7 @@ define <2 x double> @test_rsqrt_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt14pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x4e,0xc8]
-; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
+; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.rsqrt14.pd.128(<2 x double> %a0, <2 x double> %a1, i8 %mask)
ret <2 x double> %res
@@ -4143,7 +4167,7 @@ define <4 x double> @test_rcp_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i8
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14pd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x4c,0xc8]
-; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1]
+; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.rcp14.pd.256(<4 x double> %a0, <4 x double> %a1, i8 %mask)
ret <4 x double> %res
@@ -4173,7 +4197,7 @@ define <2 x double> @test_rcp_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i8
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrcp14pd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x4c,0xc8]
-; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
+; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.rcp14.pd.128(<2 x double> %a0, <2 x double> %a1, i8 %mask)
ret <2 x double> %res
@@ -4206,6 +4230,19 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0,
ret <8 x float> %res5
}
+define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256_load(<4 x float>* %x0ptr, <8 x float> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vmovaps (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x0f]
+; CHECK-NEXT: vshuff32x4 $0, %ymm1, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x23,0xc1,0x00]
+; CHECK-NEXT: ## ymm0 {%k1} = ymm1[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %x0 = load <4 x float>, <4 x float>* %x0ptr
+ %res = call <8 x float> @llvm.x86.avx512.mask.broadcastf32x4.256(<4 x float> %x0, <8 x float> %x2, i8 %mask)
+ ret <8 x float> %res
+}
+
declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x4.256(<4 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) {
@@ -4230,17 +4267,30 @@ define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x
ret <8 x i32> %res5
}
+define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256_load(<4 x i32>* %x0ptr, <8 x i32> %x2, i8 %mask) {
+; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256_load:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f]
+; CHECK-NEXT: vshufi32x4 $0, %ymm1, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x43,0xc1,0x00]
+; CHECK-NEXT: ## ymm0 {%k1} = ymm1[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %x0 = load <4 x i32>, <4 x i32>* %x0ptr
+ %res = call <8 x i32> @llvm.x86.avx512.mask.broadcasti32x4.256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask)
+ ret <8 x i32> %res
+}
+
declare <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
-; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x14,0xd9]
-; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x14,0xc1]
-; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
+; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
@@ -4256,11 +4306,11 @@ define <8 x i32>@test_int_x86_avx512_mask_prorv_d_256(<8 x i32> %x0, <8 x i32> %
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
-; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xd9]
-; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x14,0xc1]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
+; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
@@ -4276,11 +4326,11 @@ define <2 x i64>@test_int_x86_avx512_mask_prorv_q_128(<2 x i64> %x0, <2 x i64> %
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
-; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x14,0xd9]
-; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x14,0xc1]
-; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
+; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
@@ -4296,11 +4346,11 @@ define <4 x i64>@test_int_x86_avx512_mask_prorv_q_256(<4 x i64> %x0, <4 x i64> %
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
-; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xd9]
-; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x14,0xc1]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
+; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
@@ -4396,11 +4446,11 @@ define <4 x i32>@test_int_x86_avx512_mask_prolv_d_128(<4 x i32> %x0, <4 x i32> %
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
-; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x15,0xd9]
-; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x15,0xc1]
-; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
+; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
@@ -4416,11 +4466,11 @@ define <8 x i32>@test_int_x86_avx512_mask_prolv_d_256(<8 x i32> %x0, <8 x i32> %
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
-; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xd9]
-; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x15,0xc1]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
+; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
@@ -4436,11 +4486,11 @@ define <2 x i64>@test_int_x86_avx512_mask_prolv_q_128(<2 x i64> %x0, <2 x i64> %
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
-; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x15,0xd9]
-; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x15,0xc1]
-; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
+; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
@@ -4456,11 +4506,11 @@ define <4 x i64>@test_int_x86_avx512_mask_prolv_q_256(<4 x i64> %x0, <4 x i64> %
; CHECK-LABEL: test_int_x86_avx512_mask_prolv_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
-; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xd9]
-; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x15,0xc1]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
+; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
@@ -4556,11 +4606,11 @@ define <4 x double>@test_int_x86_avx512_mask_permvar_df_256(<4 x double> %x0, <4
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_df_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm3 ## encoding: [0x62,0xf2,0xf5,0x28,0x16,0xd8]
; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x16,0xd0]
-; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0x16,0xd8]
-; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf2,0xf5,0x28,0x16,0xc0]
-; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xcb]
-; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
+; CHECK-NEXT: vpermpd %ymm0, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0x16,0xc0]
+; CHECK-NEXT: vaddpd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc0]
+; CHECK-NEXT: vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2, i8 %x3)
%res1 = call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %x0, <4 x i64> %x1, <4 x double> zeroinitializer, i8 %x3)
@@ -4576,11 +4626,11 @@ define <4 x i64>@test_int_x86_avx512_mask_permvar_di_256(<4 x i64> %x0, <4 x i64
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_di_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm3 ## encoding: [0x62,0xf2,0xf5,0x28,0x36,0xd8]
; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x36,0xd0]
-; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0x36,0xd8]
-; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf2,0xf5,0x28,0x36,0xc0]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
-; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; CHECK-NEXT: vpermq %ymm0, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0x36,0xc0]
+; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
@@ -4596,11 +4646,11 @@ define <8 x float>@test_int_x86_avx512_mask_permvar_sf_256(<8 x float> %x0, <8 x
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_sf_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xd8]
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0x16,0xd0]
-; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0x16,0xd8]
-; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
-; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xcb]
-; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc0]
+; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0x16,0xc0]
+; CHECK-NEXT: vaddps %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc0]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2, i8 %x3)
%res1 = call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %x0, <8 x i32> %x1, <8 x float> zeroinitializer, i8 %x3)
@@ -4616,11 +4666,11 @@ define <8 x i32>@test_int_x86_avx512_mask_permvar_si_256(<8 x i32> %x0, <8 x i32
; CHECK-LABEL: test_int_x86_avx512_mask_permvar_si_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x36,0xd8]
; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0x36,0xd0]
-; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0x36,0xd8]
-; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x36,0xc0]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
-; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0x36,0xc0]
+; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
@@ -4724,13 +4774,13 @@ define <4 x float>@test_int_x86_avx512_mask_fixupimm_ps_128(<4 x float> %x0, <4
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
-; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x54,0xda,0x05]
+; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x54,0xda,0x05]
; CHECK-NEXT: vmovaps %xmm0, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xe0]
-; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm4 ## encoding: [0x62,0xf3,0x75,0x08,0x54,0xe2,0x05]
+; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm4 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x54,0xe2,0x05]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x54,0xc2,0x05]
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc0]
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc4]
+; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd8,0x58,0xc0]
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4)
@@ -4747,13 +4797,13 @@ define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ps_128(<4 x float> %x0, <4
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8]
-; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0x89,0x54,0xda,0x05]
+; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm3 ## encoding: [0x62,0xf3,0x75,0x08,0x54,0xda,0x05]
; CHECK-NEXT: vmovaps %xmm0, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xe0]
-; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm4 ## encoding: [0x62,0xf3,0x75,0x08,0x54,0xe2,0x05]
+; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0x89,0x54,0xe2,0x05]
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vfixupimmps $5, %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0x89,0x54,0xc2,0x05]
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc0]
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc4]
+; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd8,0x58,0xc0]
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4)
%res1 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4)
@@ -4770,13 +4820,13 @@ define <8 x float>@test_int_x86_avx512_mask_fixupimm_ps_256(<8 x float> %x0, <8
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
-; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x54,0xda,0x05]
+; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xda,0x05]
; CHECK-NEXT: vmovaps %ymm0, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xe0]
-; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm4 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xe2,0x05]
+; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm4 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x54,0xe2,0x05]
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf3,0x75,0x29,0x54,0xc2,0x05]
-; CHECK-NEXT: vaddps %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc0]
-; CHECK-NEXT: vaddps %ymm4, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc4]
+; CHECK-NEXT: vaddps %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdc,0x58,0xc0]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.fixupimm.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x i32> %x2, i32 5, i8 %x4)
%res1 = call <8 x float> @llvm.x86.avx512.mask.fixupimm.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x i32> zeroinitializer, i32 5, i8 %x4)
@@ -4793,13 +4843,13 @@ define <8 x float>@test_int_x86_avx512_maskz_fixupimm_ps_256(<8 x float> %x0, <8
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovaps %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8]
-; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x54,0xda,0x05]
+; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm3 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xda,0x05]
; CHECK-NEXT: vmovaps %ymm0, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xe0]
-; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm4 ## encoding: [0x62,0xf3,0x75,0x28,0x54,0xe2,0x05]
+; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x54,0xe2,0x05]
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vfixupimmps $5, %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x75,0xa9,0x54,0xc2,0x05]
-; CHECK-NEXT: vaddps %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc0]
-; CHECK-NEXT: vaddps %ymm4, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc4]
+; CHECK-NEXT: vaddps %ymm0, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdc,0x58,0xc0]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.maskz.fixupimm.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x i32> %x2, i32 5, i8 %x4)
%res1 = call <8 x float> @llvm.x86.avx512.maskz.fixupimm.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x i32> zeroinitializer, i32 5, i8 %x4)
@@ -4820,6 +4870,7 @@ define i8@test_int_x86_avx512_ptestm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
; CHECK-NEXT: vptestmd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -4833,11 +4884,12 @@ define i8@test_int_x86_avx512_ptestm_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
; CHECK-LABEL: test_int_x86_avx512_ptestm_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestmd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x27,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vptestmd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x28,0x27,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vptestmd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -4856,6 +4908,7 @@ define i8@test_int_x86_avx512_ptestm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
; CHECK-NEXT: vptestmq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -4874,6 +4927,7 @@ define i8@test_int_x86_avx512_ptestm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
; CHECK-NEXT: vptestmq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
@@ -4892,6 +4946,7 @@ define i8@test_int_x86_avx512_ptestnm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2
; CHECK-NEXT: vptestnmd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -4905,11 +4960,12 @@ define i8@test_int_x86_avx512_ptestnm_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2
; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vptestnmd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x27,0xc1]
-; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vptestnmd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x27,0xc1]
+; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
+; CHECK-NEXT: vptestnmd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0x7e,0x29,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -4928,6 +4984,7 @@ define i8@test_int_x86_avx512_ptestnm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2
; CHECK-NEXT: vptestnmq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -4946,6 +5003,7 @@ define i8@test_int_x86_avx512_ptestnm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2
; CHECK-NEXT: vptestnmq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x27,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
+; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
@@ -4959,8 +5017,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pbroadcast_d_gpr_256(i32 %x0, <8 x i32
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_d_gpr_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastd %edi, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7c,0xcf]
+; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastd %edi, %ymm2 ## encoding: [0x62,0xf2,0x7d,0x28,0x7c,0xd7]
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
@@ -4979,8 +5037,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pbroadcast_d_gpr_128(i32 %x0, <4 x i32
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_d_gpr_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastd %edi, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7c,0xcf]
+; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastd %edi, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xd7]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
@@ -4999,8 +5057,8 @@ define <4 x i64>@test_int_x86_avx512_mask_pbroadcast_q_gpr_256(i64 %x0, <4 x i64
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_q_gpr_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastq %rdi, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x7c,0xcf]
+; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastq %rdi, %ymm2 ## encoding: [0x62,0xf2,0xfd,0x28,0x7c,0xd7]
; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
@@ -5019,8 +5077,8 @@ define <2 x i64>@test_int_x86_avx512_mask_pbroadcast_q_gpr_128(i64 %x0, <2 x i64
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_q_gpr_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
-; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastq %rdi, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x7c,0xcf]
+; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x7c,0xc7]
; CHECK-NEXT: vpbroadcastq %rdi, %xmm2 ## encoding: [0x62,0xf2,0xfd,0x08,0x7c,0xd7]
; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
@@ -5252,3 +5310,1115 @@ define <4 x i64> @test_x86_avx512_maskz_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1
}
declare <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64>, <4 x i64>) nounwind readnone
+
+declare <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd256_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmadd256_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask)
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmadd128_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask)
+ ret <2 x double> %res
+}
+
+define <2 x double>@test_int_x86_avx512_mask_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
+; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
+; CHECK-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
+; CHECK-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0xa8,0xca]
+; CHECK-NEXT: vaddpd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+define <4 x double>@test_int_x86_avx512_mask_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
+; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_mask3_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
+; CHECK-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
+; CHECK-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0xa8,0xca]
+; CHECK-NEXT: vaddpd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+define <4 x float>@test_int_x86_avx512_mask_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
+; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
+; CHECK-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1]
+; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
+; CHECK-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xa8,0xca]
+; CHECK-NEXT: vaddps %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+define <8 x float>@test_int_x86_avx512_mask_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
+; CHECK-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_mask3_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
+; CHECK-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_maskz_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
+; CHECK-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xa8,0xca]
+; CHECK-NEXT: vaddps %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+
+declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask3_vfmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaa,0xda]
+; CHECK-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+
+declare <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_mask3_vfmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xaa,0xda]
+; CHECK-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaa,0xda]
+; CHECK-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1]
+; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xaa,0xda]
+; CHECK-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmadd256_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmadd128_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmadd256_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmadd128_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmsub256_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmsub128_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmsub256_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfnmsub128_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+
+define <2 x double>@test_int_x86_avx512_mask_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda]
+; CHECK-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda]
+; CHECK-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+define <4 x double>@test_int_x86_avx512_mask_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda]
+; CHECK-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda]
+; CHECK-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+define <4 x float>@test_int_x86_avx512_mask_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda]
+; CHECK-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda]
+; CHECK-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1]
+; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+define <8 x float>@test_int_x86_avx512_mask_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda]
+; CHECK-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda]
+; CHECK-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+define <2 x double>@test_int_x86_avx512_mask_vfnmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfnmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xac,0xda]
+; CHECK-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+define <4 x double>@test_int_x86_avx512_mask_vfnmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfnmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xac,0xda]
+; CHECK-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+define <4 x float>@test_int_x86_avx512_mask_vfnmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xac,0xda]
+; CHECK-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+define <8 x float>@test_int_x86_avx512_mask_vfnmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xac,0xda]
+; CHECK-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmaddsub256_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask)
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmaddsub128_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask)
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmaddsub256_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmaddsub128_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0xa6,0xca]
+; CHECK-NEXT: vaddpd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2 = fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+define <4 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0xa6,0xca]
+; CHECK-NEXT: vaddpd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2 = fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+define <4 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1]
+; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xa6,0xca]
+; CHECK-NEXT: vaddps %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2 = fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+define <8 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
+; CHECK-NEXT: vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
+; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xa6,0xca]
+; CHECK-NEXT: vaddps %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2 = fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+declare <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
+; CHECK-NEXT: vfmsubadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa7,0xda]
+; CHECK-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
+ %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1)
+ %res2=fadd <2 x double> %res, %res1
+ ret <2 x double> %res2
+}
+
+declare <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
+; CHECK-NEXT: vfmsubadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa7,0xda]
+; CHECK-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
+ %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1)
+ %res2=fadd <4 x double> %res, %res1
+ ret <4 x double> %res2
+}
+
+declare <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
+; CHECK-NEXT: vfmsubadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa7,0xda]
+; CHECK-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1]
+; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
+ %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1)
+ %res2=fadd <4 x float> %res, %res1
+ ret <4 x float> %res2
+}
+
+declare <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
+; CHECK-NEXT: vfmsubadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa7,0xda]
+; CHECK-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
+ %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1)
+ %res2=fadd <8 x float> %res, %res1
+ ret <8 x float> %res2
+}
+
+
+define <4 x float> @test_mask_vfmadd128_ps_r(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_r:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmk:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmka:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 8
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 4
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmb:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load float, float* %ptr_a2
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmba:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load float, float* %ptr_a2, align 4
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load float, float* %ptr_a2
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
+; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %q = load float, float* %ptr_a2, align 4
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_r(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_pd_r:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
+; CHECK-LABEL: test_mask_vfmadd128_pd_rz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd128_pd_rmk:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2) {
+; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213pd (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
+ %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
+ ret <2 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_r(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd256_pd_r:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
+; CHECK-LABEL: test_mask_vfmadd256_pd_rz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) {
+; CHECK-LABEL: test_mask_vfmadd256_pd_rmk:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; CHECK-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <4 x double>, <4 x double>* %ptr_a2
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2) {
+; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vfmadd213pd (%rdi), %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %a2 = load <4 x double>, <4 x double>* %ptr_a2
+ %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
+ ret <4 x double> %res
+}
+
diff --git a/test/CodeGen/X86/avx512vl-logic.ll b/test/CodeGen/X86/avx512vl-logic.ll
index e820bc4ca824..83fa8d4c34cd 100644
--- a/test/CodeGen/X86/avx512vl-logic.ll
+++ b/test/CodeGen/X86/avx512vl-logic.ll
@@ -21,7 +21,7 @@ define <8 x i32> @vpandnd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readno
; CHECK-LABEL: vpandnd256:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm1
-; CHECK-NEXT: vpandnd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
@@ -74,7 +74,7 @@ define <4 x i64> @vpandnq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readno
; CHECK-LABEL: vpandnq256:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0
-; CHECK-NEXT: vpandnq %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: vpandn %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
@@ -129,7 +129,7 @@ define <4 x i32> @vpandnd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readno
; CHECK-LABEL: vpandnd128:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK-NEXT: vpandnd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
@@ -182,7 +182,7 @@ define <2 x i64> @vpandnq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readno
; CHECK-LABEL: vpandnq128:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT: vpandnq %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
@@ -228,7 +228,7 @@ define <4 x double> @test_mm256_mask_andnot_pd(<4 x double> %__W, i8 zeroext %__
;
; SKX-LABEL: test_mm256_mask_andnot_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -252,7 +252,7 @@ define <4 x double> @test_mm256_maskz_andnot_pd(i8 zeroext %__U, <4 x double> %_
;
; SKX-LABEL: test_mm256_maskz_andnot_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -276,7 +276,7 @@ define <2 x double> @test_mm_mask_andnot_pd(<2 x double> %__W, i8 zeroext %__U,
;
; SKX-LABEL: test_mm_mask_andnot_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -300,7 +300,7 @@ define <2 x double> @test_mm_maskz_andnot_pd(i8 zeroext %__U, <2 x double> %__A,
;
; SKX-LABEL: test_mm_maskz_andnot_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnpd %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -324,7 +324,7 @@ define <8 x float> @test_mm256_mask_andnot_ps(<8 x float> %__W, i8 zeroext %__U,
;
; SKX-LABEL: test_mm256_mask_andnot_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -347,7 +347,7 @@ define <8 x float> @test_mm256_maskz_andnot_ps(i8 zeroext %__U, <8 x float> %__A
;
; SKX-LABEL: test_mm256_maskz_andnot_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -370,7 +370,7 @@ define <4 x float> @test_mm_mask_andnot_ps(<4 x float> %__W, i8 zeroext %__U, <4
;
; SKX-LABEL: test_mm_mask_andnot_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -394,7 +394,7 @@ define <4 x float> @test_mm_maskz_andnot_ps(i8 zeroext %__U, <4 x float> %__A, <
;
; SKX-LABEL: test_mm_maskz_andnot_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandnps %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -418,7 +418,7 @@ define <4 x double> @test_mm256_mask_and_pd(<4 x double> %__W, i8 zeroext %__U,
;
; SKX-LABEL: test_mm256_mask_and_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -441,7 +441,7 @@ define <4 x double> @test_mm256_maskz_and_pd(i8 zeroext %__U, <4 x double> %__A,
;
; SKX-LABEL: test_mm256_maskz_and_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -464,7 +464,7 @@ define <2 x double> @test_mm_mask_and_pd(<2 x double> %__W, i8 zeroext %__U, <2
;
; SKX-LABEL: test_mm_mask_and_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -487,7 +487,7 @@ define <2 x double> @test_mm_maskz_and_pd(i8 zeroext %__U, <2 x double> %__A, <2
;
; SKX-LABEL: test_mm_maskz_and_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandpd %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -510,7 +510,7 @@ define <8 x float> @test_mm256_mask_and_ps(<8 x float> %__W, i8 zeroext %__U, <8
;
; SKX-LABEL: test_mm256_mask_and_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -532,7 +532,7 @@ define <8 x float> @test_mm256_maskz_and_ps(i8 zeroext %__U, <8 x float> %__A, <
;
; SKX-LABEL: test_mm256_maskz_and_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -554,7 +554,7 @@ define <4 x float> @test_mm_mask_and_ps(<4 x float> %__W, i8 zeroext %__U, <4 x
;
; SKX-LABEL: test_mm_mask_and_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -577,7 +577,7 @@ define <4 x float> @test_mm_maskz_and_ps(i8 zeroext %__U, <4 x float> %__A, <4 x
;
; SKX-LABEL: test_mm_maskz_and_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vandps %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -600,7 +600,7 @@ define <4 x double> @test_mm256_mask_xor_pd(<4 x double> %__W, i8 zeroext %__U,
;
; SKX-LABEL: test_mm256_mask_xor_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -623,7 +623,7 @@ define <4 x double> @test_mm256_maskz_xor_pd(i8 zeroext %__U, <4 x double> %__A,
;
; SKX-LABEL: test_mm256_maskz_xor_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -646,7 +646,7 @@ define <2 x double> @test_mm_mask_xor_pd(<2 x double> %__W, i8 zeroext %__U, <2
;
; SKX-LABEL: test_mm_mask_xor_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -669,7 +669,7 @@ define <2 x double> @test_mm_maskz_xor_pd(i8 zeroext %__U, <2 x double> %__A, <2
;
; SKX-LABEL: test_mm_maskz_xor_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorpd %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -692,7 +692,7 @@ define <8 x float> @test_mm256_mask_xor_ps(<8 x float> %__W, i8 zeroext %__U, <8
;
; SKX-LABEL: test_mm256_mask_xor_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %ymm2, %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -714,7 +714,7 @@ define <8 x float> @test_mm256_maskz_xor_ps(i8 zeroext %__U, <8 x float> %__A, <
;
; SKX-LABEL: test_mm256_maskz_xor_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %ymm1, %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -736,7 +736,7 @@ define <4 x float> @test_mm_mask_xor_ps(<4 x float> %__W, i8 zeroext %__U, <4 x
;
; SKX-LABEL: test_mm_mask_xor_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -759,7 +759,7 @@ define <4 x float> @test_mm_maskz_xor_ps(i8 zeroext %__U, <4 x float> %__A, <4 x
;
; SKX-LABEL: test_mm_maskz_xor_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps %xmm1, %xmm0, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -782,7 +782,7 @@ define <4 x double> @test_mm256_mask_or_pd(<4 x double> %__W, i8 zeroext %__U, <
;
; SKX-LABEL: test_mm256_mask_or_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -805,7 +805,7 @@ define <4 x double> @test_mm256_maskz_or_pd(i8 zeroext %__U, <4 x double> %__A,
;
; SKX-LABEL: test_mm256_maskz_or_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -828,7 +828,7 @@ define <2 x double> @test_mm_mask_or_pd(<2 x double> %__W, i8 zeroext %__U, <2 x
;
; SKX-LABEL: test_mm_mask_or_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -851,7 +851,7 @@ define <2 x double> @test_mm_maskz_or_pd(i8 zeroext %__U, <2 x double> %__A, <2
;
; SKX-LABEL: test_mm_maskz_or_pd:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorpd %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -874,7 +874,7 @@ define <8 x float> @test_mm256_mask_or_ps(<8 x float> %__W, i8 zeroext %__U, <8
;
; SKX-LABEL: test_mm256_mask_or_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %ymm1, %ymm2, %ymm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -896,7 +896,7 @@ define <8 x float> @test_mm256_maskz_or_ps(i8 zeroext %__U, <8 x float> %__A, <8
;
; SKX-LABEL: test_mm256_maskz_or_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %ymm0, %ymm1, %ymm0 {%k1} {z}
; SKX-NEXT: retq
entry:
@@ -918,7 +918,7 @@ define <4 x float> @test_mm_mask_or_ps(<4 x float> %__W, i8 zeroext %__U, <4 x f
;
; SKX-LABEL: test_mm_mask_or_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %xmm1, %xmm2, %xmm0 {%k1}
; SKX-NEXT: retq
entry:
@@ -941,7 +941,7 @@ define <4 x float> @test_mm_maskz_or_ps(i8 zeroext %__U, <4 x float> %__A, <4 x
;
; SKX-LABEL: test_mm_maskz_or_ps:
; SKX: ## BB#0: ## %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vorps %xmm0, %xmm1, %xmm0 {%k1} {z}
; SKX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512vl-vbroadcast.ll b/test/CodeGen/X86/avx512vl-vbroadcast.ll
index d52041220002..38a461ff0be2 100644
--- a/test/CodeGen/X86/avx512vl-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512vl-vbroadcast.ll
@@ -5,14 +5,14 @@ declare void @func_f32(float)
define <8 x float> @_256_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _256_broadcast_ss_spill:
; CHECK: # BB#0:
-; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .Lcfi0:
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: callq func_f32
-; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %ymm0 # 4-byte Folded Reload
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: vbroadcastss (%rsp), %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
@@ -24,14 +24,14 @@ define <8 x float> @_256_broadcast_ss_spill(float %x) {
define <4 x float> @_128_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _128_broadcast_ss_spill:
; CHECK: # BB#0:
-; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .Lcfi1:
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: callq func_f32
-; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Folded Reload
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: vbroadcastss (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
@@ -44,14 +44,14 @@ declare void @func_f64(double)
define <4 x double> @_256_broadcast_sd_spill(double %x) {
; CHECK-LABEL: _256_broadcast_sd_spill:
; CHECK: # BB#0:
-; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .Lcfi2:
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddsd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: callq func_f64
-; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 8-byte Folded Reload
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: retq
%a = fadd double %x, %x
call void @func_f64(double %a)
diff --git a/test/CodeGen/X86/avx512vl-vec-cmp.ll b/test/CodeGen/X86/avx512vl-vec-cmp.ll
index 25b9cc79096f..e0acf2be653e 100644
--- a/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -5,8 +5,7 @@ define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK-LABEL: test256_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp eq <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y
@@ -17,8 +16,7 @@ define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
; CHECK-LABEL: test256_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vmovdqa64 %ymm2, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sgt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
@@ -29,8 +27,7 @@ define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind
; CHECK-LABEL: test256_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k1
-; CHECK-NEXT: vmovdqa32 %ymm2, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y
@@ -41,8 +38,7 @@ define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
; CHECK-LABEL: test256_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vmovdqa64 %ymm2, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ugt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
@@ -53,8 +49,7 @@ define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwin
; CHECK-LABEL: test256_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
@@ -66,8 +61,7 @@ define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_5b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %y, %x
@@ -79,8 +73,7 @@ define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK-LABEL: test256_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
@@ -92,8 +85,7 @@ define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
; CHECK-LABEL: test256_6b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp slt <8 x i32> %y, %x
@@ -105,8 +97,7 @@ define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK-LABEL: test256_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
@@ -118,8 +109,7 @@ define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
; CHECK-LABEL: test256_7b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sge <8 x i32> %y, %x
@@ -131,8 +121,7 @@ define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK-LABEL: test256_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
@@ -144,8 +133,7 @@ define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
; CHECK-LABEL: test256_8b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -158,8 +146,7 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp eq <8 x i32> %x1, %y1
%mask0 = icmp eq <8 x i32> %x, %y
@@ -173,8 +160,7 @@ define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpleq %ymm2, %ymm3, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %ymm0, %ymm2 {%k1}
-; CHECK-NEXT: vmovdqa %ymm2, %ymm0
+; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%mask0 = icmp sle <4 x i64> %x, %y
@@ -188,8 +174,7 @@ define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sgt <4 x i64> %x1, %y1
%y = load <4 x i64>, <4 x i64>* %y.ptr, align 4
@@ -204,8 +189,7 @@ define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
@@ -219,8 +203,7 @@ define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind
; CHECK-LABEL: test256_13:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k1
-; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
@@ -234,8 +217,7 @@ define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind
; CHECK-LABEL: test256_14:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi){1to8}, %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
@@ -250,8 +232,7 @@ define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -268,8 +249,7 @@ define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -285,8 +265,7 @@ define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_17:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %x, %y
@@ -298,8 +277,7 @@ define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_18:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %y, %x
@@ -311,8 +289,7 @@ define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_19:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %x, %y
@@ -324,8 +301,7 @@ define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_20:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -337,8 +313,7 @@ define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
; CHECK-LABEL: test128_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp eq <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y
@@ -349,8 +324,7 @@ define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
; CHECK-LABEL: test128_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovdqa64 %xmm2, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sgt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
@@ -361,8 +335,7 @@ define <4 x i32> @test128_3(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1) nounwind
; CHECK-LABEL: test128_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm2, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp sge <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x1, <4 x i32> %y
@@ -373,8 +346,7 @@ define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
; CHECK-LABEL: test128_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovdqa64 %xmm2, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ugt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
@@ -385,8 +357,7 @@ define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwin
; CHECK-LABEL: test128_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %x, %y
@@ -398,8 +369,7 @@ define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwi
; CHECK-LABEL: test128_5b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %y, %x
@@ -411,8 +381,7 @@ define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK-LABEL: test128_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sgt <4 x i32> %x, %y
@@ -424,8 +393,7 @@ define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_6b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp slt <4 x i32> %y, %x
@@ -437,8 +405,7 @@ define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK-LABEL: test128_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sle <4 x i32> %x, %y
@@ -450,8 +417,7 @@ define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_7b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sge <4 x i32> %y, %x
@@ -463,8 +429,7 @@ define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK-LABEL: test128_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ule <4 x i32> %x, %y
@@ -476,8 +441,7 @@ define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_8b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
@@ -490,8 +454,7 @@ define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32>
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp eq <4 x i32> %x1, %y1
%mask0 = icmp eq <4 x i32> %x, %y
@@ -505,8 +468,7 @@ define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpleq %xmm2, %xmm3, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vmovdqa %xmm2, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%mask0 = icmp sle <2 x i64> %x, %y
@@ -520,8 +482,7 @@ define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm2, %xmm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sgt <2 x i64> %x1, %y1
%y = load <2 x i64>, <2 x i64>* %y.ptr, align 4
@@ -536,8 +497,7 @@ define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
@@ -551,8 +511,7 @@ define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind
; CHECK-LABEL: test128_13:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k1
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
@@ -566,8 +525,7 @@ define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind
; CHECK-LABEL: test128_14:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi){1to4}, %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
@@ -582,8 +540,7 @@ define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -600,8 +557,7 @@ define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -617,8 +573,7 @@ define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_17:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %x, %y
@@ -630,8 +585,7 @@ define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_18:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %y, %x
@@ -643,8 +597,7 @@ define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_19:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %x, %y
@@ -656,8 +609,7 @@ define <4 x i32> @test128_20(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_20:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
diff --git a/test/CodeGen/X86/bc-extract.ll b/test/CodeGen/X86/bc-extract.ll
index a1c0f5ae527c..b43c70e303a1 100644
--- a/test/CodeGen/X86/bc-extract.ll
+++ b/test/CodeGen/X86/bc-extract.ll
@@ -1,25 +1,50 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse4.2 | FileCheck %s
-
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
define float @extractFloat1() nounwind {
+; X32-LABEL: extractFloat1:
+; X32: # BB#0: # %entry
+; X32-NEXT: fld1
+; X32-NEXT: retl
+;
+; X64-LABEL: extractFloat1:
+; X64: # BB#0: # %entry
+; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: retq
entry:
- ; CHECK: 1065353216
%tmp0 = bitcast <1 x double> <double 0x000000003F800000> to <2 x float>
- %tmp1 = extractelement <2 x float> %tmp0, i32 0
+ %tmp1 = extractelement <2 x float> %tmp0, i32 0
ret float %tmp1
}
define float @extractFloat2() nounwind {
+; X32-LABEL: extractFloat2:
+; X32: # BB#0: # %entry
+; X32-NEXT: fldz
+; X32-NEXT: retl
+;
+; X64-LABEL: extractFloat2:
+; X64: # BB#0: # %entry
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: retq
entry:
- ; CHECK: xorps %xmm0, %xmm0
%tmp4 = bitcast <1 x double> <double 0x000000003F800000> to <2 x float>
%tmp5 = extractelement <2 x float> %tmp4, i32 1
ret float %tmp5
}
define i32 @extractInt2() nounwind {
+; X32-LABEL: extractInt2:
+; X32: # BB#0: # %entry
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: extractInt2:
+; X64: # BB#0: # %entry
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
entry:
- ; CHECK: xorl %eax, %eax
%tmp4 = bitcast <1 x i64> <i64 256> to <2 x i32>
%tmp5 = extractelement <2 x i32> %tmp4, i32 1
ret i32 %tmp5
diff --git a/test/CodeGen/X86/bitcast-mmx.ll b/test/CodeGen/X86/bitcast-mmx.ll
index 4107f3914f81..f0318ede531a 100644
--- a/test/CodeGen/X86/bitcast-mmx.ll
+++ b/test/CodeGen/X86/bitcast-mmx.ll
@@ -1,12 +1,20 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
-define i32 @t0(i64 %x) {
-; CHECK-LABEL: t0:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movd %[[REG1:[a-z]+]], %mm0
-; CHECK-NEXT: pshufw $238, %mm0, %mm0
-; CHECK-NEXT: movd %mm0, %eax
-; CHECK-NEXT: retq
+define i32 @t0(i64 %x) nounwind {
+; X86-LABEL: t0:
+; X86: # BB#0: # %entry
+; X86-NEXT: pshufw $238, {{[0-9]+}}(%esp), %mm0 # mm0 = mem[2,3,2,3]
+; X86-NEXT: movd %mm0, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: t0:
+; X64: # BB#0: # %entry
+; X64-NEXT: movd %rdi, %mm0
+; X64-NEXT: pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
+; X64-NEXT: movd %mm0, %eax
+; X64-NEXT: retq
entry:
%0 = bitcast i64 %x to <4 x i16>
%1 = bitcast <4 x i16> %0 to x86_mmx
@@ -19,14 +27,30 @@ entry:
ret i32 %7
}
-define i64 @t1(i64 %x, i32 %n) {
-; CHECK-LABEL: t1:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movd %[[REG2:[a-z]+]], %mm0
-; CHECK-NEXT: movd %[[REG1]], %mm1
-; CHECK-NEXT: psllq %mm0, %mm1
-; CHECK-NEXT: movd %mm1, %rax
-; CHECK-NEXT: retq
+define i64 @t1(i64 %x, i32 %n) nounwind {
+; X86-LABEL: t1:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movd 16(%ebp), %mm0
+; X86-NEXT: movq 8(%ebp), %mm1
+; X86-NEXT: psllq %mm0, %mm1
+; X86-NEXT: movq %mm1, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t1:
+; X64: # BB#0: # %entry
+; X64-NEXT: movd %esi, %mm0
+; X64-NEXT: movd %rdi, %mm1
+; X64-NEXT: psllq %mm0, %mm1
+; X64-NEXT: movd %mm1, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast i64 %x to x86_mmx
%1 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %0, i32 %n)
@@ -34,16 +58,33 @@ entry:
ret i64 %2
}
-define i64 @t2(i64 %x, i32 %n, i32 %w) {
-; CHECK-LABEL: t2:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movd %[[REG4:[a-z]+]], %mm0
-; CHECK-NEXT: movd %[[REG6:[a-z0-9]+]], %mm1
-; CHECK-NEXT: psllq %mm0, %mm1
-; CHECK-NEXT: movd %[[REG1]], %mm0
-; CHECK-NEXT: por %mm1, %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t2(i64 %x, i32 %n, i32 %w) nounwind {
+; X86-LABEL: t2:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movd 16(%ebp), %mm0
+; X86-NEXT: movd 20(%ebp), %mm1
+; X86-NEXT: psllq %mm0, %mm1
+; X86-NEXT: por 8(%ebp), %mm1
+; X86-NEXT: movq %mm1, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t2:
+; X64: # BB#0: # %entry
+; X64-NEXT: movd %esi, %mm0
+; X64-NEXT: movd %edx, %mm1
+; X64-NEXT: psllq %mm0, %mm1
+; X64-NEXT: movd %rdi, %mm0
+; X64-NEXT: por %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = insertelement <2 x i32> undef, i32 %w, i32 0
%1 = insertelement <2 x i32> %0, i32 0, i32 1
@@ -55,13 +96,32 @@ entry:
ret i64 %6
}
-define i64 @t3(<1 x i64>* %y, i32* %n) {
-; CHECK-LABEL: t3:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psllq (%[[REG3:[a-z]+]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t3(<1 x i64>* %y, i32* %n) nounwind {
+; X86-LABEL: t3:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psllq %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t3:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psllq %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %y to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
diff --git a/test/CodeGen/X86/bitreverse.ll b/test/CodeGen/X86/bitreverse.ll
index 35cbbdafb464..06daf014c151 100644
--- a/test/CodeGen/X86/bitreverse.ll
+++ b/test/CodeGen/X86/bitreverse.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
; These tests just check that the plumbing is in place for @llvm.bitreverse. The
; actual output is massive at the moment as llvm.bitreverse is not yet legal.
@@ -7,100 +8,354 @@
declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) readnone
define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
-; CHECK-LABEL: test_bitreverse_v2i16:
-; CHECK: # BB#0:
-; CHECK-NEXT: movw {{[0-9]+}}(%esp), %cx
-; CHECK-NEXT: movw {{[0-9]+}}(%esp), %ax
-; CHECK-NEXT: rolw $8, %ax
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: andl $3855, %edx # imm = 0xF0F
-; CHECK-NEXT: shll $4, %edx
-; CHECK-NEXT: andl $61680, %eax # imm = 0xF0F0
-; CHECK-NEXT: shrl $4, %eax
-; CHECK-NEXT: orl %edx, %eax
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: andl $13107, %edx # imm = 0x3333
-; CHECK-NEXT: andl $52428, %eax # imm = 0xCCCC
-; CHECK-NEXT: shrl $2, %eax
-; CHECK-NEXT: leal (%eax,%edx,4), %eax
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: andl $21845, %edx # imm = 0x5555
-; CHECK-NEXT: andl $43690, %eax # imm = 0xAAAA
-; CHECK-NEXT: shrl %eax
-; CHECK-NEXT: leal (%eax,%edx,2), %eax
-; CHECK-NEXT: rolw $8, %cx
-; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: andl $3855, %edx # imm = 0xF0F
-; CHECK-NEXT: shll $4, %edx
-; CHECK-NEXT: andl $61680, %ecx # imm = 0xF0F0
-; CHECK-NEXT: shrl $4, %ecx
-; CHECK-NEXT: orl %edx, %ecx
-; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: andl $13107, %edx # imm = 0x3333
-; CHECK-NEXT: andl $52428, %ecx # imm = 0xCCCC
-; CHECK-NEXT: shrl $2, %ecx
-; CHECK-NEXT: leal (%ecx,%edx,4), %ecx
-; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: andl $21845, %edx # imm = 0x5555
-; CHECK-NEXT: andl $43690, %ecx # imm = 0xAAAA
-; CHECK-NEXT: shrl %ecx
-; CHECK-NEXT: leal (%ecx,%edx,2), %edx
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; CHECK-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; CHECK-NEXT: retl
+; X86-LABEL: test_bitreverse_v2i16:
+; X86: # BB#0:
+; X86-NEXT: movw {{[0-9]+}}(%esp), %cx
+; X86-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X86-NEXT: rolw $8, %ax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: andl $3855, %edx # imm = 0xF0F
+; X86-NEXT: shll $4, %edx
+; X86-NEXT: andl $61680, %eax # imm = 0xF0F0
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: andl $13107, %edx # imm = 0x3333
+; X86-NEXT: andl $52428, %eax # imm = 0xCCCC
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: leal (%eax,%edx,4), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: andl $21845, %edx # imm = 0x5555
+; X86-NEXT: andl $43690, %eax # imm = 0xAAAA
+; X86-NEXT: shrl %eax
+; X86-NEXT: leal (%eax,%edx,2), %eax
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $3855, %edx # imm = 0xF0F
+; X86-NEXT: shll $4, %edx
+; X86-NEXT: andl $61680, %ecx # imm = 0xF0F0
+; X86-NEXT: shrl $4, %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $13107, %edx # imm = 0x3333
+; X86-NEXT: andl $52428, %ecx # imm = 0xCCCC
+; X86-NEXT: shrl $2, %ecx
+; X86-NEXT: leal (%ecx,%edx,4), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $21845, %edx # imm = 0x5555
+; X86-NEXT: andl $43690, %ecx # imm = 0xAAAA
+; X86-NEXT: shrl %ecx
+; X86-NEXT: leal (%ecx,%edx,2), %edx
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_v2i16:
+; X64: # BB#0:
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: movdqa %xmm0, %xmm2
+; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X64-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; X64-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; X64-NEXT: packuswb %xmm2, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X64-NEXT: movdqa %xmm0, %xmm2
+; X64-NEXT: pand %xmm1, %xmm2
+; X64-NEXT: psllw $4, %xmm2
+; X64-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; X64-NEXT: pand %xmm3, %xmm2
+; X64-NEXT: pand %xmm3, %xmm0
+; X64-NEXT: psrlw $4, %xmm0
+; X64-NEXT: pand %xmm1, %xmm0
+; X64-NEXT: por %xmm2, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; X64-NEXT: pand %xmm0, %xmm1
+; X64-NEXT: psllw $2, %xmm1
+; X64-NEXT: pand {{.*}}(%rip), %xmm1
+; X64-NEXT: pand {{.*}}(%rip), %xmm0
+; X64-NEXT: psrlw $2, %xmm0
+; X64-NEXT: pand {{.*}}(%rip), %xmm0
+; X64-NEXT: por %xmm1, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; X64-NEXT: pand %xmm0, %xmm1
+; X64-NEXT: paddb %xmm1, %xmm1
+; X64-NEXT: pand {{.*}}(%rip), %xmm0
+; X64-NEXT: psrlw $1, %xmm0
+; X64-NEXT: pand {{.*}}(%rip), %xmm0
+; X64-NEXT: por %xmm1, %xmm0
+; X64-NEXT: psrlq $48, %xmm0
+; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
ret <2 x i16> %b
}
+declare i64 @llvm.bitreverse.i64(i64) readnone
+
+define i64 @test_bitreverse_i64(i64 %a) nounwind {
+; X86-LABEL: test_bitreverse_i64:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
+; X86-NEXT: shll $4, %edx
+; X86-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: andl $858993459, %edx # imm = 0x33333333
+; X86-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: leal (%eax,%edx,4), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: andl $1431655765, %edx # imm = 0x55555555
+; X86-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA
+; X86-NEXT: shrl %eax
+; X86-NEXT: leal (%eax,%edx,2), %eax
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
+; X86-NEXT: shll $4, %edx
+; X86-NEXT: andl $-252645136, %ecx # imm = 0xF0F0F0F0
+; X86-NEXT: shrl $4, %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $858993459, %edx # imm = 0x33333333
+; X86-NEXT: andl $-858993460, %ecx # imm = 0xCCCCCCCC
+; X86-NEXT: shrl $2, %ecx
+; X86-NEXT: leal (%ecx,%edx,4), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $1431655765, %edx # imm = 0x55555555
+; X86-NEXT: andl $-1431655766, %ecx # imm = 0xAAAAAAAA
+; X86-NEXT: shrl %ecx
+; X86-NEXT: leal (%ecx,%edx,2), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_i64:
+; X64: # BB#0:
+; X64-NEXT: bswapq %rdi
+; X64-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
+; X64-NEXT: andq %rdi, %rax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: movabsq $-1085102592571150096, %rcx # imm = 0xF0F0F0F0F0F0F0F0
+; X64-NEXT: andq %rdi, %rcx
+; X64-NEXT: shrq $4, %rcx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NEXT: andq %rcx, %rax
+; X64-NEXT: movabsq $-3689348814741910324, %rdx # imm = 0xCCCCCCCCCCCCCCCC
+; X64-NEXT: andq %rcx, %rdx
+; X64-NEXT: shrq $2, %rdx
+; X64-NEXT: leaq (%rdx,%rax,4), %rax
+; X64-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
+; X64-NEXT: andq %rax, %rcx
+; X64-NEXT: movabsq $-6148914691236517206, %rdx # imm = 0xAAAAAAAAAAAAAAAA
+; X64-NEXT: andq %rax, %rdx
+; X64-NEXT: shrq %rdx
+; X64-NEXT: leaq (%rdx,%rcx,2), %rax
+; X64-NEXT: retq
+ %b = call i64 @llvm.bitreverse.i64(i64 %a)
+ ret i64 %b
+}
+
+declare i32 @llvm.bitreverse.i32(i32) readnone
+
+define i32 @test_bitreverse_i32(i32 %a) nounwind {
+; X86-LABEL: test_bitreverse_i32:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: leal (%eax,%ecx,4), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA
+; X86-NEXT: shrl %eax
+; X86-NEXT: leal (%eax,%ecx,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_i32:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: bswapl %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT: shll $4, %eax
+; X64-NEXT: andl $-252645136, %edi # imm = 0xF0F0F0F0
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT: andl $-858993460, %edi # imm = 0xCCCCCCCC
+; X64-NEXT: shrl $2, %edi
+; X64-NEXT: leal (%rdi,%rax,4), %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; X64-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA
+; X64-NEXT: shrl %eax
+; X64-NEXT: leal (%rax,%rcx,2), %eax
+; X64-NEXT: retq
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %b
+}
+
declare i24 @llvm.bitreverse.i24(i24) readnone
define i24 @test_bitreverse_i24(i24 %a) nounwind {
-; CHECK-LABEL: test_bitreverse_i24:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: bswapl %eax
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
-; CHECK-NEXT: shll $4, %ecx
-; CHECK-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0
-; CHECK-NEXT: shrl $4, %eax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andl $858993459, %ecx # imm = 0x33333333
-; CHECK-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC
-; CHECK-NEXT: shrl $2, %eax
-; CHECK-NEXT: leal (%eax,%ecx,4), %eax
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andl $1431655680, %ecx # imm = 0x55555500
-; CHECK-NEXT: andl $-1431655936, %eax # imm = 0xAAAAAA00
-; CHECK-NEXT: shrl %eax
-; CHECK-NEXT: leal (%eax,%ecx,2), %eax
-; CHECK-NEXT: shrl $8, %eax
-; CHECK-NEXT: retl
+; X86-LABEL: test_bitreverse_i24:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: andl $-252645136, %eax # imm = 0xF0F0F0F0
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT: andl $-858993460, %eax # imm = 0xCCCCCCCC
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: leal (%eax,%ecx,4), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $1431655680, %ecx # imm = 0x55555500
+; X86-NEXT: andl $-1431655936, %eax # imm = 0xAAAAAA00
+; X86-NEXT: shrl %eax
+; X86-NEXT: leal (%eax,%ecx,2), %eax
+; X86-NEXT: shrl $8, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_i24:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: bswapl %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT: shll $4, %eax
+; X64-NEXT: andl $-252645136, %edi # imm = 0xF0F0F0F0
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT: andl $-858993460, %edi # imm = 0xCCCCCCCC
+; X64-NEXT: shrl $2, %edi
+; X64-NEXT: leal (%rdi,%rax,4), %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $1431655680, %ecx # imm = 0x55555500
+; X64-NEXT: andl $-1431655936, %eax # imm = 0xAAAAAA00
+; X64-NEXT: shrl %eax
+; X64-NEXT: leal (%rax,%rcx,2), %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: retq
%b = call i24 @llvm.bitreverse.i24(i24 %a)
ret i24 %b
}
+declare i16 @llvm.bitreverse.i16(i16) readnone
+
+define i16 @test_bitreverse_i16(i16 %a) nounwind {
+; X86-LABEL: test_bitreverse_i16:
+; X86: # BB#0:
+; X86-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X86-NEXT: rolw $8, %ax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $3855, %ecx # imm = 0xF0F
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: andl $61680, %eax # imm = 0xF0F0
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $13107, %ecx # imm = 0x3333
+; X86-NEXT: andl $52428, %eax # imm = 0xCCCC
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: leal (%eax,%ecx,4), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $21845, %ecx # imm = 0x5555
+; X86-NEXT: andl $43690, %eax # imm = 0xAAAA
+; X86-NEXT: shrl %eax
+; X86-NEXT: leal (%eax,%ecx,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_i16:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: rolw $8, %di
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $3855, %eax # imm = 0xF0F
+; X64-NEXT: shll $4, %eax
+; X64-NEXT: andl $61680, %edi # imm = 0xF0F0
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $13107, %eax # imm = 0x3333
+; X64-NEXT: andl $52428, %edi # imm = 0xCCCC
+; X64-NEXT: shrl $2, %edi
+; X64-NEXT: leal (%rdi,%rax,4), %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $21845, %ecx # imm = 0x5555
+; X64-NEXT: andl $43690, %eax # imm = 0xAAAA
+; X64-NEXT: shrl %eax
+; X64-NEXT: leal (%rax,%rcx,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %b = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %b
+}
+
declare i8 @llvm.bitreverse.i8(i8) readnone
define i8 @test_bitreverse_i8(i8 %a) {
-; CHECK-LABEL: test_bitreverse_i8:
-; CHECK: # BB#0:
-; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT: rolb $4, %al
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andb $51, %cl
-; CHECK-NEXT: shlb $2, %cl
-; CHECK-NEXT: andb $-52, %al
-; CHECK-NEXT: shrb $2, %al
-; CHECK-NEXT: orb %cl, %al
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andb $85, %cl
-; CHECK-NEXT: addb %cl, %cl
-; CHECK-NEXT: andb $-86, %al
-; CHECK-NEXT: shrb %al
-; CHECK-NEXT: orb %cl, %al
-; CHECK-NEXT: retl
+; X86-LABEL: test_bitreverse_i8:
+; X86: # BB#0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: rolb $4, %al
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andb $51, %cl
+; X86-NEXT: shlb $2, %cl
+; X86-NEXT: andb $-52, %al
+; X86-NEXT: shrb $2, %al
+; X86-NEXT: orb %cl, %al
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andb $85, %cl
+; X86-NEXT: addb %cl, %cl
+; X86-NEXT: andb $-86, %al
+; X86-NEXT: shrb %al
+; X86-NEXT: orb %cl, %al
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_i8:
+; X64: # BB#0:
+; X64-NEXT: rolb $4, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andb $51, %al
+; X64-NEXT: shlb $2, %al
+; X64-NEXT: andb $-52, %dil
+; X64-NEXT: shrb $2, %dil
+; X64-NEXT: orb %al, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andb $85, %al
+; X64-NEXT: addb %al, %al
+; X64-NEXT: andb $-86, %dil
+; X64-NEXT: shrb %dil
+; X64-NEXT: orb %al, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %b
}
@@ -108,24 +363,43 @@ define i8 @test_bitreverse_i8(i8 %a) {
declare i4 @llvm.bitreverse.i4(i4) readnone
define i4 @test_bitreverse_i4(i4 %a) {
-; CHECK-LABEL: test_bitreverse_i4:
-; CHECK: # BB#0:
-; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT: rolb $4, %al
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andb $51, %cl
-; CHECK-NEXT: shlb $2, %cl
-; CHECK-NEXT: andb $-52, %al
-; CHECK-NEXT: shrb $2, %al
-; CHECK-NEXT: orb %cl, %al
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andb $80, %cl
-; CHECK-NEXT: addb %cl, %cl
-; CHECK-NEXT: andb $-96, %al
-; CHECK-NEXT: shrb %al
-; CHECK-NEXT: orb %cl, %al
-; CHECK-NEXT: shrb $4, %al
-; CHECK-NEXT: retl
+; X86-LABEL: test_bitreverse_i4:
+; X86: # BB#0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: rolb $4, %al
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andb $51, %cl
+; X86-NEXT: shlb $2, %cl
+; X86-NEXT: andb $-52, %al
+; X86-NEXT: shrb $2, %al
+; X86-NEXT: orb %cl, %al
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andb $80, %cl
+; X86-NEXT: addb %cl, %cl
+; X86-NEXT: andb $-96, %al
+; X86-NEXT: shrb %al
+; X86-NEXT: orb %cl, %al
+; X86-NEXT: shrb $4, %al
+; X86-NEXT: retl
+;
+; X64-LABEL: test_bitreverse_i4:
+; X64: # BB#0:
+; X64-NEXT: rolb $4, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andb $51, %al
+; X64-NEXT: shlb $2, %al
+; X64-NEXT: andb $-52, %dil
+; X64-NEXT: shrb $2, %dil
+; X64-NEXT: orb %al, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andb $80, %al
+; X64-NEXT: addb %al, %al
+; X64-NEXT: andb $-96, %dil
+; X64-NEXT: shrb %dil
+; X64-NEXT: orb %al, %dil
+; X64-NEXT: shrb $4, %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
%b = call i4 @llvm.bitreverse.i4(i4 %a)
ret i4 %b
}
@@ -133,38 +407,58 @@ define i4 @test_bitreverse_i4(i4 %a) {
; These tests check that bitreverse(constant) calls are folded
define <2 x i16> @fold_v2i16() {
-; CHECK-LABEL: fold_v2i16:
-; CHECK: # BB#0:
-; CHECK-NEXT: movw $-4096, %ax # imm = 0xF000
-; CHECK-NEXT: movw $240, %dx
-; CHECK-NEXT: retl
+; X86-LABEL: fold_v2i16:
+; X86: # BB#0:
+; X86-NEXT: movw $-4096, %ax # imm = 0xF000
+; X86-NEXT: movw $240, %dx
+; X86-NEXT: retl
+;
+; X64-LABEL: fold_v2i16:
+; X64: # BB#0:
+; X64-NEXT: movaps {{.*#+}} xmm0 = [61440,240]
+; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> <i16 15, i16 3840>)
ret <2 x i16> %b
}
define i24 @fold_i24() {
-; CHECK-LABEL: fold_i24:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl $2048, %eax # imm = 0x800
-; CHECK-NEXT: retl
+; X86-LABEL: fold_i24:
+; X86: # BB#0:
+; X86-NEXT: movl $2048, %eax # imm = 0x800
+; X86-NEXT: retl
+;
+; X64-LABEL: fold_i24:
+; X64: # BB#0:
+; X64-NEXT: movl $2048, %eax # imm = 0x800
+; X64-NEXT: retq
%b = call i24 @llvm.bitreverse.i24(i24 4096)
ret i24 %b
}
define i8 @fold_i8() {
-; CHECK-LABEL: fold_i8:
-; CHECK: # BB#0:
-; CHECK-NEXT: movb $-16, %al
-; CHECK-NEXT: retl
+; X86-LABEL: fold_i8:
+; X86: # BB#0:
+; X86-NEXT: movb $-16, %al
+; X86-NEXT: retl
+;
+; X64-LABEL: fold_i8:
+; X64: # BB#0:
+; X64-NEXT: movb $-16, %al
+; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 15)
ret i8 %b
}
define i4 @fold_i4() {
-; CHECK-LABEL: fold_i4:
-; CHECK: # BB#0:
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: retl
+; X86-LABEL: fold_i4:
+; X86: # BB#0:
+; X86-NEXT: movb $1, %al
+; X86-NEXT: retl
+;
+; X64-LABEL: fold_i4:
+; X64: # BB#0:
+; X64-NEXT: movb $1, %al
+; X64-NEXT: retq
%b = call i4 @llvm.bitreverse.i4(i4 8)
ret i4 %b
}
@@ -172,21 +466,30 @@ define i4 @fold_i4() {
; These tests check that bitreverse(bitreverse()) calls are removed
define i8 @identity_i8(i8 %a) {
-; CHECK-LABEL: identity_i8:
-; CHECK: # BB#0:
-; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT: retl
+; X86-LABEL: identity_i8:
+; X86: # BB#0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
+;
+; X64-LABEL: identity_i8:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
%c = call i8 @llvm.bitreverse.i8(i8 %b)
ret i8 %c
}
define <2 x i16> @identity_v2i16(<2 x i16> %a) {
-; CHECK-LABEL: identity_v2i16:
-; CHECK: # BB#0:
-; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: retl
+; X86-LABEL: identity_v2i16:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: identity_v2i16:
+; X64: # BB#0:
+; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
%c = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %b)
ret <2 x i16> %c
@@ -195,17 +498,25 @@ define <2 x i16> @identity_v2i16(<2 x i16> %a) {
; These tests check that bitreverse(undef) calls are removed
define i8 @undef_i8() {
-; CHECK-LABEL: undef_i8:
-; CHECK: # BB#0:
-; CHECK-NEXT: retl
+; X86-LABEL: undef_i8:
+; X86: # BB#0:
+; X86-NEXT: retl
+;
+; X64-LABEL: undef_i8:
+; X64: # BB#0:
+; X64-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 undef)
ret i8 %b
}
define <2 x i16> @undef_v2i16() {
-; CHECK-LABEL: undef_v2i16:
-; CHECK: # BB#0:
-; CHECK-NEXT: retl
+; X86-LABEL: undef_v2i16:
+; X86: # BB#0:
+; X86-NEXT: retl
+;
+; X64-LABEL: undef_v2i16:
+; X64: # BB#0:
+; X64-NEXT: retq
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> undef)
ret <2 x i16> %b
}
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index 807dfe464cbe..c7de65d84507 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -314,7 +314,7 @@ exit:
define void @unnatural_cfg1() {
; Test that we can handle a loop with an inner unnatural loop at the end of
; a function. This is a gross CFG reduced out of the single source GCC.
-; CHECK: unnatural_cfg1
+; CHECK-LABEL: unnatural_cfg1
; CHECK: %entry
; CHECK: %loop.body1
; CHECK: %loop.body2
@@ -352,17 +352,15 @@ define void @unnatural_cfg2() {
; Test that we can handle a loop with a nested natural loop *and* an unnatural
; loop. This was reduced from a crash on block placement when run over
; single-source GCC.
-; CHECK: unnatural_cfg2
+; CHECK-LABEL: unnatural_cfg2
; CHECK: %entry
; CHECK: %loop.body1
; CHECK: %loop.body2
-; CHECK: %loop.body3
-; CHECK: %loop.inner1.begin
-; The end block is folded with %loop.body3...
-; CHECK-NOT: %loop.inner1.end
; CHECK: %loop.body4
; CHECK: %loop.inner2.begin
-; The loop.inner2.end block is folded
+; CHECK: %loop.inner2.begin
+; CHECK: %loop.body3
+; CHECK: %loop.inner1.begin
; CHECK: %loop.header
; CHECK: %bail
@@ -559,7 +557,7 @@ define void @test_eh_lpad_successor() personality i8* bitcast (i32 (...)* @__gxx
; didn't correctly locate the fallthrough successor, assuming blindly that the
; first one was the fallthrough successor. As a result, we would add an
; erroneous jump to the landing pad thinking *that* was the default successor.
-; CHECK: test_eh_lpad_successor
+; CHECK-LABEL: test_eh_lpad_successor
; CHECK: %entry
; CHECK-NOT: jmp
; CHECK: %loop
@@ -587,7 +585,7 @@ define void @test_eh_throw() personality i8* bitcast (i32 (...)* @__gxx_personal
; fallthrough simply won't occur. Make sure we don't crash trying to update
; terminators for such constructs.
;
-; CHECK: test_eh_throw
+; CHECK-LABEL: test_eh_throw
; CHECK: %entry
; CHECK: %cleanup
@@ -609,7 +607,7 @@ define void @test_unnatural_cfg_backwards_inner_loop() {
; attempt to merge onto the wrong end of the inner loop just because we find it
; first. This was reduced from a crasher in GCC's single source.
;
-; CHECK: test_unnatural_cfg_backwards_inner_loop
+; CHECK-LABEL: test_unnatural_cfg_backwards_inner_loop
; CHECK: %entry
; CHECK: %loop2b
; CHECK: %loop1
@@ -649,7 +647,7 @@ define void @unanalyzable_branch_to_loop_header() {
; fallthrough because that happens to always produce unanalyzable branches on
; x86.
;
-; CHECK: unanalyzable_branch_to_loop_header
+; CHECK-LABEL: unanalyzable_branch_to_loop_header
; CHECK: %entry
; CHECK: %loop
; CHECK: %exit
@@ -673,7 +671,7 @@ define void @unanalyzable_branch_to_best_succ(i1 %cond) {
; This branch is now analyzable and hence the destination block becomes the
; hotter one. The right order is entry->bar->exit->foo.
;
-; CHECK: unanalyzable_branch_to_best_succ
+; CHECK-LABEL: unanalyzable_branch_to_best_succ
; CHECK: %entry
; CHECK: %bar
; CHECK: %exit
@@ -699,7 +697,7 @@ define void @unanalyzable_branch_to_free_block(float %x) {
; Ensure that we can handle unanalyzable branches where the destination block
; gets selected as the best free block in the CFG.
;
-; CHECK: unanalyzable_branch_to_free_block
+; CHECK-LABEL: unanalyzable_branch_to_free_block
; CHECK: %entry
; CHECK: %a
; CHECK: %b
@@ -729,7 +727,7 @@ define void @many_unanalyzable_branches() {
; Ensure that we don't crash as we're building up many unanalyzable branches,
; blocks, and loops.
;
-; CHECK: many_unanalyzable_branches
+; CHECK-LABEL: many_unanalyzable_branches
; CHECK: %entry
; CHECK: %exit
@@ -948,7 +946,7 @@ define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
; strange layouts that are siginificantly less efficient, often times maing
; it discontiguous.
;
-; CHECK: @benchmark_heapsort
+; CHECK-LABEL: @benchmark_heapsort
; CHECK: %entry
; First rotated loop top.
; CHECK: .p2align
@@ -1456,9 +1454,50 @@ exit:
ret void
}
+; Because %endif has a higher frequency than %if, the calculations show we
+; shouldn't tail-duplicate %endif so that we can place it after %if. We were
+; previously undercounting the cost by ignoring execution frequency that didn't
+; come from the %if->%endif path.
+; CHECK-LABEL: higher_frequency_succ_tail_dup
+; CHECK: %entry
+; CHECK: %elseif
+; CHECK: %else
+; CHECK: %endif
+; CHECK: %then
+; CHECK: %ret
+define void @higher_frequency_succ_tail_dup(i1 %a, i1 %b, i1 %c) {
+entry:
+ br label %if
+if: ; preds = %entry
+ call void @effect(i32 0)
+ br i1 %a, label %elseif, label %endif, !prof !11 ; even
+
+elseif: ; preds = %if
+ call void @effect(i32 1)
+ br i1 %b, label %else, label %endif, !prof !11 ; even
+
+else: ; preds = %elseif
+ call void @effect(i32 2)
+ br label %endif
+
+endif: ; preds = %if, %elseif, %else
+ br i1 %c, label %then, label %ret, !prof !12 ; 5 to 3
+
+then: ; preds = %endif
+ call void @effect(i32 3)
+ br label %ret
+
+ret: ; preds = %endif, %then
+ ret void
+}
+
+declare void @effect(i32)
+
!5 = !{!"branch_weights", i32 84, i32 16}
!6 = !{!"function_entry_count", i32 10}
!7 = !{!"branch_weights", i32 60, i32 40}
!8 = !{!"branch_weights", i32 5001, i32 4999}
!9 = !{!"branch_weights", i32 85, i32 15}
!10 = !{!"branch_weights", i32 90, i32 10}
+!11 = !{!"branch_weights", i32 1, i32 1}
+!12 = !{!"branch_weights", i32 5, i32 3}
diff --git a/test/CodeGen/X86/block-placement.mir b/test/CodeGen/X86/block-placement.mir
index 7d13c3e529ca..c0cd7057d5c6 100644
--- a/test/CodeGen/X86/block-placement.mir
+++ b/test/CodeGen/X86/block-placement.mir
@@ -46,7 +46,7 @@ liveins:
- { reg: '%rdi' }
- { reg: '%esi' }
-# CHECK: %eax = FAULTING_LOAD_OP %bb.3.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+# CHECK: %eax = FAULTING_OP 1, %bb.3.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
# CHECK-NEXT: JMP_1 %bb.2.not_null
# CHECK: bb.3.null:
# CHECK: bb.4.right:
@@ -66,7 +66,7 @@ body: |
successors: %bb.2.null(0x7ffff800), %bb.4.not_null(0x00000800)
liveins: %rdi
- %eax = FAULTING_LOAD_OP %bb.2.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ %eax = FAULTING_OP 1, %bb.2.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
JMP_1 %bb.4.not_null
bb.4.not_null:
diff --git a/test/CodeGen/X86/bool-ext-inc.ll b/test/CodeGen/X86/bool-ext-inc.ll
new file mode 100644
index 000000000000..d0967c102149
--- /dev/null
+++ b/test/CodeGen/X86/bool-ext-inc.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+; FIXME: add (sext i1 X), 1 -> zext (not i1 X)
+
+define i32 @sext_inc(i1 zeroext %x) nounwind {
+; CHECK-LABEL: sext_inc:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl %dil, %ecx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: subl %ecx, %eax
+; CHECK-NEXT: retq
+ %ext = sext i1 %x to i32
+ %add = add i32 %ext, 1
+ ret i32 %add
+}
+
+; FIXME: add (sext i1 X), 1 -> zext (not i1 X)
+
+define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
+; CHECK-LABEL: sext_inc_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: pslld $31, %xmm0
+; CHECK-NEXT: psrad $31, %xmm0
+; CHECK-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %ext = sext <4 x i1> %x to <4 x i32>
+ %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %add
+}
+
+
diff --git a/test/CodeGen/X86/branchfolding-debugloc.ll b/test/CodeGen/X86/branchfolding-debugloc.ll
new file mode 100644
index 000000000000..3ad8315f083d
--- /dev/null
+++ b/test/CodeGen/X86/branchfolding-debugloc.ll
@@ -0,0 +1,83 @@
+; RUN: llc < %s | FileCheck %s
+;
+; The test code is generated from the following source code:
+;
+; 1 extern int bar(int x);
+; 2
+; 3 int foo(int *begin, int *end) {
+; 4 int *i;
+; 5 int ret = 0;
+; 6 for (
+; 7 i = begin ;
+; 8 i != end ;
+; 9 i++)
+; 10 {
+; 11 ret += bar(*i);
+; 12 }
+; 13 return ret;
+; 14 }
+;
+; CHECK: # %entry
+; CHECK-NOT: # %for.body
+; CHECK: .loc 1 6 3
+; CHECK-NEXT: je [[BB:.LBB[^ ]+]]
+; CHECK: [[BB]]:{{.}}# %for.end
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @foo(i32* readonly %begin, i32* readnone %end) !dbg !4 {
+entry:
+ %cmp6 = icmp eq i32* %begin, %end, !dbg !9
+ br i1 %cmp6, label %for.end, label %for.body.preheader, !dbg !12
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !13
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %ret.08 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %i.07 = phi i32* [ %incdec.ptr, %for.body ], [ %begin, %for.body.preheader ]
+ %0 = load i32, i32* %i.07, align 4, !dbg !13, !tbaa !15
+ %call = tail call i32 @bar(i32 %0), !dbg !19
+ %add = add nsw i32 %call, %ret.08, !dbg !20
+ %incdec.ptr = getelementptr inbounds i32, i32* %i.07, i64 1, !dbg !21
+ %cmp = icmp eq i32* %incdec.ptr, %end, !dbg !9
+ br i1 %cmp, label %for.end.loopexit, label %for.body, !dbg !12, !llvm.loop !22
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end, !dbg !24
+
+for.end: ; preds = %for.end.loopexit, %entry
+ %ret.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.end.loopexit ]
+ ret i32 %ret.0.lcssa, !dbg !24
+}
+
+declare i32 @bar(i32)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, emissionKind: FullDebug)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7, !8, !8}
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!9 = !DILocation(line: 8, column: 9, scope: !10)
+!10 = distinct !DILexicalBlock(scope: !11, file: !1, line: 6, column: 3)
+!11 = distinct !DILexicalBlock(scope: !4, file: !1, line: 6, column: 3)
+!12 = !DILocation(line: 6, column: 3, scope: !11)
+!13 = !DILocation(line: 11, column: 18, scope: !14)
+!14 = distinct !DILexicalBlock(scope: !10, file: !1, line: 10, column: 3)
+!15 = !{!16, !16, i64 0}
+!16 = !{!"int", !17, i64 0}
+!17 = !{!"omnipotent char", !18, i64 0}
+!18 = !{!"Simple C/C++ TBAA"}
+!19 = !DILocation(line: 11, column: 14, scope: !14)
+!20 = !DILocation(line: 11, column: 11, scope: !14)
+!21 = !DILocation(line: 9, column: 8, scope: !10)
+!22 = distinct !{!22, !12, !23}
+!23 = !DILocation(line: 12, column: 3, scope: !11)
+!24 = !DILocation(line: 13, column: 3, scope: !4)
diff --git a/test/CodeGen/X86/brcond.ll b/test/CodeGen/X86/brcond.ll
index f4db3ba7fecb..ce8a0dab98ce 100644
--- a/test/CodeGen/X86/brcond.ll
+++ b/test/CodeGen/X86/brcond.ll
@@ -30,45 +30,6 @@ declare i32 @foo(...)
declare i32 @bar(...)
-
-; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
-define i32 @test2(i32* %P, i32* %Q) nounwind ssp {
-entry:
- %a = icmp eq i32* %P, null ; <i1> [#uses=1]
- %b = icmp eq i32* %Q, null ; <i1> [#uses=1]
- %c = and i1 %a, %b
- br i1 %c, label %bb1, label %return
-
-bb1: ; preds = %entry
- ret i32 4
-
-return: ; preds = %entry
- ret i32 192
-; CHECK-LABEL: test2:
-; CHECK: movl 4(%esp), %eax
-; CHECK-NEXT: orl 8(%esp), %eax
-; CHECK-NEXT: jne LBB1_2
-}
-
-; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
-define i32 @test3(i32* %P, i32* %Q) nounwind ssp {
-entry:
- %a = icmp ne i32* %P, null ; <i1> [#uses=1]
- %b = icmp ne i32* %Q, null ; <i1> [#uses=1]
- %c = or i1 %a, %b
- br i1 %c, label %bb1, label %return
-
-bb1: ; preds = %entry
- ret i32 4
-
-return: ; preds = %entry
- ret i32 192
-; CHECK-LABEL: test3:
-; CHECK: movl 4(%esp), %eax
-; CHECK-NEXT: orl 8(%esp), %eax
-; CHECK-NEXT: je LBB2_2
-}
-
; <rdar://problem/7598384>:
;
; jCC L1
diff --git a/test/CodeGen/X86/break-false-dep.ll b/test/CodeGen/X86/break-false-dep.ll
index 4c5e747f9ca7..4388a8f6e084 100644
--- a/test/CodeGen/X86/break-false-dep.ll
+++ b/test/CodeGen/X86/break-false-dep.ll
@@ -277,3 +277,60 @@ ret:
;AVX: vcvtsi2sdq {{.*}}, [[XMM4_7:%xmm[4-7]]], {{%xmm[0-9]+}}
;AVX-NOT: [[XMM4_7]]
}
+
+; Make sure we are making a smart choice regarding undef registers even for more
+; complicated loop structures. This example is the inner loop from
+; julia> a = falses(10000); a[1:4:end] = true
+; julia> linspace(1.0,2.0,10000)[a]
+define void @loopclearance2(double* nocapture %y, i64* %x, double %c1, double %c2, double %c3, double %c4, i64 %size) {
+entry:
+ tail call void asm sideeffect "", "~{xmm7},~{dirflag},~{fpsr},~{flags}"()
+ tail call void asm sideeffect "", "~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{dirflag},~{fpsr},~{flags}"()
+ tail call void asm sideeffect "", "~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"()
+ br label %loop
+
+loop:
+ %phi_i = phi i64 [ 1, %entry ], [ %nexti, %loop_end ]
+ %phi_j = phi i64 [ 1, %entry ], [ %nextj, %loop_end ]
+ %phi_k = phi i64 [ 0, %entry ], [ %nextk, %loop_end ]
+ br label %inner_loop
+
+inner_loop:
+ %phi = phi i64 [ %phi_k, %loop ], [ %nextk, %inner_loop ]
+ %idx = lshr i64 %phi, 6
+ %inputptr = getelementptr i64, i64* %x, i64 %idx
+ %input = load i64, i64* %inputptr, align 8
+ %masked = and i64 %phi, 63
+ %shiftedmasked = shl i64 1, %masked
+ %maskedinput = and i64 %input, %shiftedmasked
+ %cmp = icmp eq i64 %maskedinput, 0
+ %nextk = add i64 %phi, 1
+ br i1 %cmp, label %inner_loop, label %loop_end
+
+loop_end:
+ %nexti = add i64 %phi_i, 1
+ %nextj = add i64 %phi_j, 1
+ ; Register use, plus us clobbering 7-15 above, basically forces xmm6 here as
+ ; the only reasonable choice. The primary thing we care about is that it's
+ ; not one of the registers used in the loop (e.g. not the output reg here)
+;AVX-NOT: %xmm6
+;AVX: vcvtsi2sdq {{.*}}, %xmm6, {{%xmm[0-9]+}}
+;AVX-NOT: %xmm6
+ %nexti_f = sitofp i64 %nexti to double
+ %sub = fsub double %c1, %nexti_f
+ %mul = fmul double %sub, %c2
+;AVX: vcvtsi2sdq {{.*}}, %xmm6, {{%xmm[0-9]+}}
+;AVX-NOT: %xmm6
+ %phi_f = sitofp i64 %phi to double
+ %mul2 = fmul double %phi_f, %c3
+ %add2 = fadd double %mul, %mul2
+ %div = fdiv double %add2, %c4
+ %prev_j = add i64 %phi_j, -1
+ %outptr = getelementptr double, double* %y, i64 %prev_j
+ store double %div, double* %outptr, align 8
+ %done = icmp slt i64 %size, %nexti
+ br i1 %done, label %loopdone, label %loop
+
+loopdone:
+ ret void
+}
diff --git a/test/CodeGen/X86/bt.ll b/test/CodeGen/X86/bt.ll
index 6576f33a5b9c..cebcba38bd4f 100644
--- a/test/CodeGen/X86/bt.ll
+++ b/test/CodeGen/X86/bt.ll
@@ -43,7 +43,7 @@ define void @test2b(i32 %x, i32 %n) nounwind {
; CHECK-LABEL: test2b:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
-; CHECK-NEXT: jb .LBB1_2
+; CHECK-NEXT: jae .LBB1_1
;
entry:
%tmp29 = lshr i32 %x, %n
@@ -83,7 +83,7 @@ define void @atest2b(i32 %x, i32 %n) nounwind {
; CHECK-LABEL: atest2b:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
-; CHECK-NEXT: jb .LBB3_2
+; CHECK-NEXT: jae .LBB3_1
;
entry:
%tmp29 = ashr i32 %x, %n
@@ -103,7 +103,7 @@ define void @test3(i32 %x, i32 %n) nounwind {
; CHECK-LABEL: test3:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
-; CHECK-NEXT: jb .LBB4_2
+; CHECK-NEXT: jae .LBB4_1
;
entry:
%tmp29 = shl i32 1, %n
@@ -123,7 +123,7 @@ define void @test3b(i32 %x, i32 %n) nounwind {
; CHECK-LABEL: test3b:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: btl %esi, %edi
-; CHECK-NEXT: jb .LBB5_2
+; CHECK-NEXT: jae .LBB5_1
;
entry:
%tmp29 = shl i32 1, %n
diff --git a/test/CodeGen/X86/buildvec-insertvec.ll b/test/CodeGen/X86/buildvec-insertvec.ll
index 616d352a75d3..730376acdc93 100644
--- a/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/test/CodeGen/X86/buildvec-insertvec.ll
@@ -1,15 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE41
define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
-; CHECK-LABEL: foo:
-; CHECK: # BB#0:
-; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: movl $255, %eax
-; CHECK-NEXT: pinsrd $3, %eax, %xmm0
-; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; CHECK-NEXT: movd %xmm0, (%rdi)
-; CHECK-NEXT: retq
+; SSE2-LABEL: foo:
+; SSE2: # BB#0:
+; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
+; SSE2-NEXT: movl $255, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movd %xmm0, (%rdi)
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: foo:
+; SSE41: # BB#0:
+; SSE41-NEXT: cvttps2dq %xmm0, %xmm0
+; SSE41-NEXT: movl $255, %eax
+; SSE41-NEXT: pinsrd $3, %eax, %xmm0
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: movd %xmm0, (%rdi)
+; SSE41-NEXT: retq
%t0 = fptoui <3 x float> %in to <3 x i8>
%t1 = shufflevector <3 x i8> %t0, <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
%t2 = insertelement <4 x i8> %t1, i8 -1, i32 3
@@ -21,10 +35,21 @@ define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
; blend with a zero vector if the build_vector contains negative zero.
define <4 x float> @test_negative_zero_1(<4 x float> %A) {
-; CHECK-LABEL: test_negative_zero_1:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2],zero
-; CHECK-NEXT: retq
+; SSE2-LABEL: test_negative_zero_1:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_negative_zero_1:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2],zero
+; SSE41-NEXT: retq
entry:
%0 = extractelement <4 x float> %A, i32 0
%1 = insertelement <4 x float> undef, float %0, i32 0
@@ -48,12 +73,19 @@ entry:
}
define <4 x float> @test_buildvector_v4f32_register(float %f0, float %f1, float %f2, float %f3) {
-; CHECK-LABEL: test_buildvector_v4f32_register:
-; CHECK: # BB#0:
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
-; CHECK-NEXT: retq
+; SSE2-LABEL: test_buildvector_v4f32_register:
+; SSE2: # BB#0:
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v4f32_register:
+; SSE41: # BB#0:
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; SSE41-NEXT: retq
%ins0 = insertelement <4 x float> undef, float %f0, i32 0
%ins1 = insertelement <4 x float> %ins0, float %f1, i32 1
%ins2 = insertelement <4 x float> %ins1, float %f2, i32 2
@@ -62,13 +94,24 @@ define <4 x float> @test_buildvector_v4f32_register(float %f0, float %f1, float
}
define <4 x float> @test_buildvector_v4f32_load(float* %p0, float* %p1, float* %p2, float* %p3) {
-; CHECK-LABEL: test_buildvector_v4f32_load:
-; CHECK: # BB#0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; CHECK-NEXT: retq
+; SSE2-LABEL: test_buildvector_v4f32_load:
+; SSE2: # BB#0:
+; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v4f32_load:
+; SSE41: # BB#0:
+; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; SSE41-NEXT: retq
%f0 = load float, float* %p0, align 4
%f1 = load float, float* %p1, align 4
%f2 = load float, float* %p2, align 4
@@ -81,12 +124,20 @@ define <4 x float> @test_buildvector_v4f32_load(float* %p0, float* %p1, float* %
}
define <4 x float> @test_buildvector_v4f32_partial_load(float %f0, float %f1, float %f2, float* %p3) {
-; CHECK-LABEL: test_buildvector_v4f32_partial_load:
-; CHECK: # BB#0:
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; CHECK-NEXT: retq
+; SSE2-LABEL: test_buildvector_v4f32_partial_load:
+; SSE2: # BB#0:
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v4f32_partial_load:
+; SSE41: # BB#0:
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; SSE41-NEXT: retq
%f3 = load float, float* %p3, align 4
%ins0 = insertelement <4 x float> undef, float %f0, i32 0
%ins1 = insertelement <4 x float> %ins0, float %f1, i32 1
@@ -94,3 +145,405 @@ define <4 x float> @test_buildvector_v4f32_partial_load(float %f0, float %f1, fl
%ins3 = insertelement <4 x float> %ins2, float %f3, i32 3
ret <4 x float> %ins3
}
+
+define <4 x i32> @test_buildvector_v4i32_register(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+; SSE2-LABEL: test_buildvector_v4i32_register:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: movd %edx, %xmm2
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v4i32_register:
+; SSE41: # BB#0:
+; SSE41-NEXT: movd %edi, %xmm0
+; SSE41-NEXT: pinsrd $1, %esi, %xmm0
+; SSE41-NEXT: pinsrd $2, %edx, %xmm0
+; SSE41-NEXT: pinsrd $3, %ecx, %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <4 x i32> undef, i32 %a0, i32 0
+ %ins1 = insertelement <4 x i32> %ins0, i32 %a1, i32 1
+ %ins2 = insertelement <4 x i32> %ins1, i32 %a2, i32 2
+ %ins3 = insertelement <4 x i32> %ins2, i32 %a3, i32 3
+ ret <4 x i32> %ins3
+}
+
+define <4 x i32> @test_buildvector_v4i32_partial(i32 %a0, i32 %a3) {
+; SSE2-LABEL: test_buildvector_v4i32_partial:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v4i32_partial:
+; SSE41: # BB#0:
+; SSE41-NEXT: movd %edi, %xmm0
+; SSE41-NEXT: pinsrd $3, %esi, %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <4 x i32> undef, i32 %a0, i32 0
+ %ins1 = insertelement <4 x i32> %ins0, i32 undef, i32 1
+ %ins2 = insertelement <4 x i32> %ins1, i32 undef, i32 2
+ %ins3 = insertelement <4 x i32> %ins2, i32 %a3, i32 3
+ ret <4 x i32> %ins3
+}
+
+define <4 x i32> @test_buildvector_v4i32_register_zero(i32 %a0, i32 %a2, i32 %a3) {
+; CHECK-LABEL: test_buildvector_v4i32_register_zero:
+; CHECK: # BB#0:
+; CHECK-NEXT: movd %edx, %xmm0
+; CHECK-NEXT: movd %esi, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: movd %edi, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+ %ins0 = insertelement <4 x i32> undef, i32 %a0, i32 0
+ %ins1 = insertelement <4 x i32> %ins0, i32 0, i32 1
+ %ins2 = insertelement <4 x i32> %ins1, i32 %a2, i32 2
+ %ins3 = insertelement <4 x i32> %ins2, i32 %a3, i32 3
+ ret <4 x i32> %ins3
+}
+
+define <4 x i32> @test_buildvector_v4i32_register_zero_2(i32 %a1, i32 %a2, i32 %a3) {
+; CHECK-LABEL: test_buildvector_v4i32_register_zero_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movd %edx, %xmm0
+; CHECK-NEXT: movd %esi, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: movd %edi, %xmm0
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
+; CHECK-NEXT: retq
+ %ins0 = insertelement <4 x i32> undef, i32 0, i32 0
+ %ins1 = insertelement <4 x i32> %ins0, i32 %a1, i32 1
+ %ins2 = insertelement <4 x i32> %ins1, i32 %a2, i32 2
+ %ins3 = insertelement <4 x i32> %ins2, i32 %a3, i32 3
+ ret <4 x i32> %ins3
+}
+
+define <8 x i16> @test_buildvector_v8i16_register(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) {
+; SSE2-LABEL: test_buildvector_v8i16_register:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: movd %r9d, %xmm1
+; SSE2-NEXT: movd %esi, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: movd %edx, %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: movd %r8d, %xmm3
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v8i16_register:
+; SSE41: # BB#0:
+; SSE41-NEXT: movd %edi, %xmm0
+; SSE41-NEXT: pinsrw $1, %esi, %xmm0
+; SSE41-NEXT: pinsrw $2, %edx, %xmm0
+; SSE41-NEXT: pinsrw $3, %ecx, %xmm0
+; SSE41-NEXT: pinsrw $4, %r8d, %xmm0
+; SSE41-NEXT: pinsrw $5, %r9d, %xmm0
+; SSE41-NEXT: pinsrw $6, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrw $7, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
+ %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1
+ %ins2 = insertelement <8 x i16> %ins1, i16 %a2, i32 2
+ %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <8 x i16> %ins5, i16 %a6, i32 6
+ %ins7 = insertelement <8 x i16> %ins6, i16 %a7, i32 7
+ ret <8 x i16> %ins7
+}
+
+define <8 x i16> @test_buildvector_v8i16_partial(i16 %a1, i16 %a3, i16 %a4, i16 %a5) {
+; CHECK-LABEL: test_buildvector_v8i16_partial:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm0, %xmm0
+; CHECK-NEXT: pinsrw $1, %edi, %xmm0
+; CHECK-NEXT: pinsrw $3, %esi, %xmm0
+; CHECK-NEXT: pinsrw $4, %edx, %xmm0
+; CHECK-NEXT: pinsrw $5, %ecx, %xmm0
+; CHECK-NEXT: retq
+ %ins0 = insertelement <8 x i16> undef, i16 undef, i32 0
+ %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1
+ %ins2 = insertelement <8 x i16> %ins1, i16 undef, i32 2
+ %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <8 x i16> %ins5, i16 undef, i32 6
+ %ins7 = insertelement <8 x i16> %ins6, i16 undef, i32 7
+ ret <8 x i16> %ins7
+}
+
+define <8 x i16> @test_buildvector_v8i16_register_zero(i16 %a0, i16 %a3, i16 %a4, i16 %a5) {
+; CHECK-LABEL: test_buildvector_v8i16_register_zero:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm0, %xmm0
+; CHECK-NEXT: pinsrw $0, %edi, %xmm0
+; CHECK-NEXT: pinsrw $3, %esi, %xmm0
+; CHECK-NEXT: pinsrw $4, %edx, %xmm0
+; CHECK-NEXT: pinsrw $5, %ecx, %xmm0
+; CHECK-NEXT: retq
+ %ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
+ %ins1 = insertelement <8 x i16> %ins0, i16 0, i32 1
+ %ins2 = insertelement <8 x i16> %ins1, i16 0, i32 2
+ %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <8 x i16> %ins5, i16 0, i32 6
+ %ins7 = insertelement <8 x i16> %ins6, i16 0, i32 7
+ ret <8 x i16> %ins7
+}
+
+define <8 x i16> @test_buildvector_v8i16_register_zero_2(i16 %a1, i16 %a3, i16 %a4, i16 %a5) {
+; CHECK-LABEL: test_buildvector_v8i16_register_zero_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm0, %xmm0
+; CHECK-NEXT: pinsrw $1, %edi, %xmm0
+; CHECK-NEXT: pinsrw $3, %esi, %xmm0
+; CHECK-NEXT: pinsrw $4, %edx, %xmm0
+; CHECK-NEXT: pinsrw $5, %ecx, %xmm0
+; CHECK-NEXT: retq
+ %ins0 = insertelement <8 x i16> undef, i16 0, i32 0
+ %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1
+ %ins2 = insertelement <8 x i16> %ins1, i16 0, i32 2
+ %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <8 x i16> %ins5, i16 0, i32 6
+ %ins7 = insertelement <8 x i16> %ins6, i16 0, i32 7
+ ret <8 x i16> %ins7
+}
+
+define <16 x i8> @test_buildvector_v16i8_register(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) {
+; SSE2-LABEL: test_buildvector_v16i8_register:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: movd %r9d, %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movd %esi, %xmm2
+; SSE2-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %edx, %xmm3
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: movd %r8d, %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v16i8_register:
+; SSE41: # BB#0:
+; SSE41-NEXT: movd %edi, %xmm0
+; SSE41-NEXT: pinsrb $1, %esi, %xmm0
+; SSE41-NEXT: pinsrb $2, %edx, %xmm0
+; SSE41-NEXT: pinsrb $3, %ecx, %xmm0
+; SSE41-NEXT: pinsrb $4, %r8d, %xmm0
+; SSE41-NEXT: pinsrb $5, %r9d, %xmm0
+; SSE41-NEXT: pinsrb $6, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $7, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $9, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $10, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $11, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $12, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $13, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $14, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <16 x i8> undef, i8 %a0, i32 0
+ %ins1 = insertelement <16 x i8> %ins0, i8 %a1, i32 1
+ %ins2 = insertelement <16 x i8> %ins1, i8 %a2, i32 2
+ %ins3 = insertelement <16 x i8> %ins2, i8 %a3, i32 3
+ %ins4 = insertelement <16 x i8> %ins3, i8 %a4, i32 4
+ %ins5 = insertelement <16 x i8> %ins4, i8 %a5, i32 5
+ %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <16 x i8> %ins6, i8 %a7, i32 7
+ %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <16 x i8> %ins8, i8 %a9, i32 9
+ %ins10 = insertelement <16 x i8> %ins9, i8 %a10, i32 10
+ %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <16 x i8> %ins12, i8 %a13, i32 13
+ %ins14 = insertelement <16 x i8> %ins13, i8 %a14, i32 14
+ %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15
+ ret <16 x i8> %ins15
+}
+
+define <16 x i8> @test_buildvector_v16i8_partial(i8 %a2, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) {
+; SSE2-LABEL: test_buildvector_v16i8_partial:
+; SSE2: # BB#0:
+; SSE2-NEXT: movzbl %dil, %eax
+; SSE2-NEXT: pinsrw $1, %eax, %xmm0
+; SSE2-NEXT: movzbl %sil, %eax
+; SSE2-NEXT: pinsrw $3, %eax, %xmm0
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: pinsrw $4, %eax, %xmm0
+; SSE2-NEXT: shll $8, %ecx
+; SSE2-NEXT: pinsrw $5, %ecx, %xmm0
+; SSE2-NEXT: movzbl %r8b, %eax
+; SSE2-NEXT: pinsrw $6, %eax, %xmm0
+; SSE2-NEXT: shll $8, %r9d
+; SSE2-NEXT: pinsrw $7, %r9d, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v16i8_partial:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pinsrb $2, %edi, %xmm0
+; SSE41-NEXT: pinsrb $6, %esi, %xmm0
+; SSE41-NEXT: pinsrb $8, %edx, %xmm0
+; SSE41-NEXT: pinsrb $11, %ecx, %xmm0
+; SSE41-NEXT: pinsrb $12, %r8d, %xmm0
+; SSE41-NEXT: pinsrb $15, %r9d, %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <16 x i8> undef, i8 undef, i32 0
+ %ins1 = insertelement <16 x i8> %ins0, i8 undef, i32 1
+ %ins2 = insertelement <16 x i8> %ins1, i8 %a2, i32 2
+ %ins3 = insertelement <16 x i8> %ins2, i8 undef, i32 3
+ %ins4 = insertelement <16 x i8> %ins3, i8 undef, i32 4
+ %ins5 = insertelement <16 x i8> %ins4, i8 undef, i32 5
+ %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <16 x i8> %ins6, i8 undef, i32 7
+ %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <16 x i8> %ins8, i8 undef, i32 9
+ %ins10 = insertelement <16 x i8> %ins9, i8 undef, i32 10
+ %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <16 x i8> %ins12, i8 undef, i32 13
+ %ins14 = insertelement <16 x i8> %ins13, i8 undef, i32 14
+ %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15
+ ret <16 x i8> %ins15
+}
+
+define <16 x i8> @test_buildvector_v16i8_register_zero(i8 %a0, i8 %a4, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) {
+; SSE2-LABEL: test_buildvector_v16i8_register_zero:
+; SSE2: # BB#0:
+; SSE2-NEXT: movzbl %sil, %eax
+; SSE2-NEXT: movzbl %dil, %esi
+; SSE2-NEXT: movd %esi, %xmm0
+; SSE2-NEXT: pinsrw $2, %eax, %xmm0
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: pinsrw $3, %eax, %xmm0
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: pinsrw $4, %eax, %xmm0
+; SSE2-NEXT: shll $8, %r8d
+; SSE2-NEXT: pinsrw $5, %r8d, %xmm0
+; SSE2-NEXT: movzbl %r9b, %eax
+; SSE2-NEXT: pinsrw $6, %eax, %xmm0
+; SSE2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: shll $8, %eax
+; SSE2-NEXT: pinsrw $7, %eax, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v16i8_register_zero:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pinsrb $0, %edi, %xmm0
+; SSE41-NEXT: pinsrb $4, %esi, %xmm0
+; SSE41-NEXT: pinsrb $6, %edx, %xmm0
+; SSE41-NEXT: pinsrb $8, %ecx, %xmm0
+; SSE41-NEXT: pinsrb $11, %r8d, %xmm0
+; SSE41-NEXT: pinsrb $12, %r9d, %xmm0
+; SSE41-NEXT: pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <16 x i8> undef, i8 %a0, i32 0
+ %ins1 = insertelement <16 x i8> %ins0, i8 0, i32 1
+ %ins2 = insertelement <16 x i8> %ins1, i8 0, i32 2
+ %ins3 = insertelement <16 x i8> %ins2, i8 0, i32 3
+ %ins4 = insertelement <16 x i8> %ins3, i8 %a4, i32 4
+ %ins5 = insertelement <16 x i8> %ins4, i8 0, i32 5
+ %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <16 x i8> %ins6, i8 0, i32 7
+ %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <16 x i8> %ins8, i8 0, i32 9
+ %ins10 = insertelement <16 x i8> %ins9, i8 0, i32 10
+ %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <16 x i8> %ins12, i8 0, i32 13
+ %ins14 = insertelement <16 x i8> %ins13, i8 0, i32 14
+ %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15
+ ret <16 x i8> %ins15
+}
+
+define <16 x i8> @test_buildvector_v16i8_register_zero_2(i8 %a2, i8 %a3, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) {
+; SSE2-LABEL: test_buildvector_v16i8_register_zero_2:
+; SSE2: # BB#0:
+; SSE2-NEXT: shll $8, %esi
+; SSE2-NEXT: movzbl %dil, %eax
+; SSE2-NEXT: orl %esi, %eax
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pinsrw $1, %eax, %xmm0
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: pinsrw $3, %eax, %xmm0
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: pinsrw $4, %eax, %xmm0
+; SSE2-NEXT: shll $8, %r8d
+; SSE2-NEXT: pinsrw $5, %r8d, %xmm0
+; SSE2-NEXT: movzbl %r9b, %eax
+; SSE2-NEXT: pinsrw $6, %eax, %xmm0
+; SSE2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: shll $8, %eax
+; SSE2-NEXT: pinsrw $7, %eax, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_buildvector_v16i8_register_zero_2:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pinsrb $2, %edi, %xmm0
+; SSE41-NEXT: pinsrb $3, %esi, %xmm0
+; SSE41-NEXT: pinsrb $6, %edx, %xmm0
+; SSE41-NEXT: pinsrb $8, %ecx, %xmm0
+; SSE41-NEXT: pinsrb $11, %r8d, %xmm0
+; SSE41-NEXT: pinsrb $12, %r9d, %xmm0
+; SSE41-NEXT: pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-NEXT: retq
+ %ins0 = insertelement <16 x i8> undef, i8 0, i32 0
+ %ins1 = insertelement <16 x i8> %ins0, i8 0, i32 1
+ %ins2 = insertelement <16 x i8> %ins1, i8 %a2, i32 2
+ %ins3 = insertelement <16 x i8> %ins2, i8 %a3, i32 3
+ %ins4 = insertelement <16 x i8> %ins3, i8 0, i32 4
+ %ins5 = insertelement <16 x i8> %ins4, i8 0, i32 5
+ %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <16 x i8> %ins6, i8 0, i32 7
+ %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <16 x i8> %ins8, i8 0, i32 9
+ %ins10 = insertelement <16 x i8> %ins9, i8 0, i32 10
+ %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <16 x i8> %ins12, i8 0, i32 13
+ %ins14 = insertelement <16 x i8> %ins13, i8 0, i32 14
+ %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15
+ ret <16 x i8> %ins15
+}
diff --git a/test/CodeGen/X86/bypass-slow-division-32.ll b/test/CodeGen/X86/bypass-slow-division-32.ll
index ea545d22385c..9f266647d8aa 100644
--- a/test/CodeGen/X86/bypass-slow-division-32.ll
+++ b/test/CodeGen/X86/bypass-slow-division-32.ll
@@ -95,20 +95,19 @@ define i32 @Test_use_div_and_idiv(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: idivl %ebx
; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: testl $-256, %edi
-; CHECK-NEXT: jne .LBB3_5
-; CHECK-NEXT: jmp .LBB3_4
-; CHECK-NEXT: .LBB3_1:
-; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
-; CHECK-NEXT: divb %bl
-; CHECK-NEXT: movzbl %al, %esi
-; CHECK-NEXT: testl $-256, %edi
; CHECK-NEXT: je .LBB3_4
; CHECK-NEXT: .LBB3_5:
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: jmp .LBB3_6
+; CHECK-NEXT: .LBB3_1:
+; CHECK-NEXT: movzbl %cl, %eax
+; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: divb %bl
+; CHECK-NEXT: movzbl %al, %esi
+; CHECK-NEXT: testl $-256, %edi
+; CHECK-NEXT: jne .LBB3_5
; CHECK-NEXT: .LBB3_4:
; CHECK-NEXT: movzbl %cl, %eax
; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
diff --git a/test/CodeGen/X86/catchpad-lifetime.ll b/test/CodeGen/X86/catchpad-lifetime.ll
index 77d3f25057cf..d85adec360c8 100644
--- a/test/CodeGen/X86/catchpad-lifetime.ll
+++ b/test/CodeGen/X86/catchpad-lifetime.ll
@@ -26,9 +26,9 @@ catch.pad: ; preds = %catch.dispatch
%cp = catchpad within %cs [i8* null, i32 0, i8** %alloca1]
store volatile i8* null, i8** %alloca1
%bc1 = bitcast i8** %alloca1 to i8*
- call void @llvm.lifetime.end(i64 4, i8* nonnull %bc1)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %bc1)
%bc2 = bitcast i8** %alloca2 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %bc2)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %bc2)
store volatile i8* null, i8** %alloca1
unreachable
@@ -63,9 +63,9 @@ catch.pad: ; preds = %catch.dispatch
%cp = catchpad within %cs [i8* null, i32 0, i8** null]
store volatile i8* null, i8** %alloca1
%bc1 = bitcast i8** %alloca1 to i8*
- call void @llvm.lifetime.end(i64 4, i8* nonnull %bc1)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %bc1)
%bc2 = bitcast i8** %alloca2 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %bc2)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %bc2)
store volatile i8* null, i8** %alloca1
unreachable
@@ -83,9 +83,9 @@ unreachable: ; preds = %entry
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
attributes #0 = { argmemonly nounwind }
diff --git a/test/CodeGen/X86/catchpad-weight.ll b/test/CodeGen/X86/catchpad-weight.ll
index 60939bc6b03e..6caf0c6012f7 100644
--- a/test/CodeGen/X86/catchpad-weight.ll
+++ b/test/CodeGen/X86/catchpad-weight.ll
@@ -26,7 +26,7 @@ define i32 @main() #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to
entry:
%o = alloca %struct.HasDtor, align 1
%0 = getelementptr inbounds %struct.HasDtor, %struct.HasDtor* %o, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %0) #4
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %0) #4
invoke void @"\01?may_throw@@YAXXZ"()
to label %try.cont unwind label %catch.dispatch
@@ -39,7 +39,7 @@ catch.5: ; preds = %catch.dispatch
try.cont: ; preds = %entry, %catch, %catch.3, %catch.5
call void @"\01??1HasDtor@@QEAA@XZ"(%struct.HasDtor* nonnull %o) #4
- call void @llvm.lifetime.end(i64 1, i8* %0) #4
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %0) #4
ret i32 0
catch.dispatch.1: ; preds = %catch.dispatch
@@ -63,7 +63,7 @@ ehcleanup: ; preds = %catchendblock
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @"\01?may_throw@@YAXXZ"() #2
@@ -73,7 +73,7 @@ declare i32 @__CxxFrameHandler3(...)
declare void @"\01??1HasDtor@@QEAA@XZ"(%struct.HasDtor*) #3
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind argmemonly }
diff --git a/test/CodeGen/X86/chain_order.ll b/test/CodeGen/X86/chain_order.ll
index 8c3aa6e15156..cc48e5b6149c 100644
--- a/test/CodeGen/X86/chain_order.ll
+++ b/test/CodeGen/X86/chain_order.ll
@@ -11,9 +11,9 @@ define void @cftx020(double* nocapture %a) {
; CHECK-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovupd (%rdi), %xmm1
-; CHECK-NEXT: vsubpd 16(%rdi), %xmm1, %xmm1
; CHECK-NEXT: vmovupd %xmm0, (%rdi)
-; CHECK-NEXT: vmovupd %xmm1, 16(%rdi)
+; CHECK-NEXT: vsubpd 16(%rdi), %xmm1, %xmm0
+; CHECK-NEXT: vmovupd %xmm0, 16(%rdi)
; CHECK-NEXT: retq
entry:
%0 = load double, double* %a, align 8
diff --git a/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/test/CodeGen/X86/clear_upper_vector_element_bits.ll
index 693bf2e17d51..c425e3a92d17 100644
--- a/test/CodeGen/X86/clear_upper_vector_element_bits.ll
+++ b/test/CodeGen/X86/clear_upper_vector_element_bits.ll
@@ -35,6 +35,44 @@ define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
ret <2 x i64> %v1
}
+define <4 x i64> @_clearupper4xi64a(<4 x i64>) nounwind {
+; SSE-LABEL: _clearupper4xi64a:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper4xi64a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper4xi64a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: retq
+ %x0 = extractelement <4 x i64> %0, i32 0
+ %x1 = extractelement <4 x i64> %0, i32 1
+ %x2 = extractelement <4 x i64> %0, i32 2
+ %x3 = extractelement <4 x i64> %0, i32 3
+ %trunc0 = trunc i64 %x0 to i32
+ %trunc1 = trunc i64 %x1 to i32
+ %trunc2 = trunc i64 %x2 to i32
+ %trunc3 = trunc i64 %x3 to i32
+ %ext0 = zext i32 %trunc0 to i64
+ %ext1 = zext i32 %trunc1 to i64
+ %ext2 = zext i32 %trunc2 to i64
+ %ext3 = zext i32 %trunc3 to i64
+ %v0 = insertelement <4 x i64> undef, i64 %ext0, i32 0
+ %v1 = insertelement <4 x i64> %v0, i64 %ext1, i32 1
+ %v2 = insertelement <4 x i64> %v1, i64 %ext2, i32 2
+ %v3 = insertelement <4 x i64> %v2, i64 %ext3, i32 3
+ ret <4 x i64> %v3
+}
+
define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
; SSE-LABEL: _clearupper4xi32a:
; SSE: # BB#0:
@@ -65,6 +103,59 @@ define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
ret <4 x i32> %v3
}
+define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind {
+; SSE-LABEL: _clearupper8xi32a:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper8xi32a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper8xi32a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX2-NEXT: retq
+ %x0 = extractelement <8 x i32> %0, i32 0
+ %x1 = extractelement <8 x i32> %0, i32 1
+ %x2 = extractelement <8 x i32> %0, i32 2
+ %x3 = extractelement <8 x i32> %0, i32 3
+ %x4 = extractelement <8 x i32> %0, i32 4
+ %x5 = extractelement <8 x i32> %0, i32 5
+ %x6 = extractelement <8 x i32> %0, i32 6
+ %x7 = extractelement <8 x i32> %0, i32 7
+ %trunc0 = trunc i32 %x0 to i16
+ %trunc1 = trunc i32 %x1 to i16
+ %trunc2 = trunc i32 %x2 to i16
+ %trunc3 = trunc i32 %x3 to i16
+ %trunc4 = trunc i32 %x4 to i16
+ %trunc5 = trunc i32 %x5 to i16
+ %trunc6 = trunc i32 %x6 to i16
+ %trunc7 = trunc i32 %x7 to i16
+ %ext0 = zext i16 %trunc0 to i32
+ %ext1 = zext i16 %trunc1 to i32
+ %ext2 = zext i16 %trunc2 to i32
+ %ext3 = zext i16 %trunc3 to i32
+ %ext4 = zext i16 %trunc4 to i32
+ %ext5 = zext i16 %trunc5 to i32
+ %ext6 = zext i16 %trunc6 to i32
+ %ext7 = zext i16 %trunc7 to i32
+ %v0 = insertelement <8 x i32> undef, i32 %ext0, i32 0
+ %v1 = insertelement <8 x i32> %v0, i32 %ext1, i32 1
+ %v2 = insertelement <8 x i32> %v1, i32 %ext2, i32 2
+ %v3 = insertelement <8 x i32> %v2, i32 %ext3, i32 3
+ %v4 = insertelement <8 x i32> %v3, i32 %ext4, i32 4
+ %v5 = insertelement <8 x i32> %v4, i32 %ext5, i32 5
+ %v6 = insertelement <8 x i32> %v5, i32 %ext6, i32 6
+ %v7 = insertelement <8 x i32> %v6, i32 %ext7, i32 7
+ ret <8 x i32> %v7
+}
+
define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16a:
; SSE: # BB#0:
@@ -94,21 +185,7 @@ define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
;
; AVX-LABEL: _clearupper8xi16a:
; AVX: # BB#0:
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: vpextrw $2, %xmm0, %ecx
-; AVX-NEXT: vpextrw $3, %xmm0, %edx
-; AVX-NEXT: vpextrw $4, %xmm0, %esi
-; AVX-NEXT: vpextrw $5, %xmm0, %edi
-; AVX-NEXT: vpextrw $6, %xmm0, %r8d
-; AVX-NEXT: vpextrw $7, %xmm0, %r9d
-; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $3, %edx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $4, %esi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $6, %r8d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %0, i32 0
%x1 = extractelement <8 x i16> %0, i32 1
@@ -145,90 +222,194 @@ define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
ret <8 x i16> %v7
}
+define <16 x i16> @_clearupper16xi16a(<16 x i16>) nounwind {
+; SSE-LABEL: _clearupper16xi16a:
+; SSE: # BB#0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: pextrw $1, %xmm0, %edi
+; SSE-NEXT: pextrw $2, %xmm0, %eax
+; SSE-NEXT: pextrw $3, %xmm0, %ecx
+; SSE-NEXT: pextrw $4, %xmm0, %edx
+; SSE-NEXT: pextrw $5, %xmm0, %esi
+; SSE-NEXT: pextrw $6, %xmm0, %ebx
+; SSE-NEXT: pextrw $7, %xmm0, %ebp
+; SSE-NEXT: pextrw $1, %xmm1, %r10d
+; SSE-NEXT: pextrw $2, %xmm1, %r9d
+; SSE-NEXT: pextrw $3, %xmm1, %r14d
+; SSE-NEXT: pextrw $4, %xmm1, %r8d
+; SSE-NEXT: pextrw $5, %xmm1, %r15d
+; SSE-NEXT: pextrw $6, %xmm1, %r11d
+; SSE-NEXT: pextrw $7, %xmm1, %r12d
+; SSE-NEXT: movd %ebp, %xmm2
+; SSE-NEXT: movd %ecx, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT: movd %esi, %xmm2
+; SSE-NEXT: movd %edi, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: movd %ebx, %xmm2
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT: movd %edx, %xmm2
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movd %r12d, %xmm3
+; SSE-NEXT: movd %r14d, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: movd %r15d, %xmm3
+; SSE-NEXT: movd %r10d, %xmm5
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE-NEXT: movd %r11d, %xmm3
+; SSE-NEXT: movd %r9d, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: movd %r8d, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX-LABEL: _clearupper16xi16a:
+; AVX: # BB#0:
+; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
+ %x0 = extractelement <16 x i16> %0, i32 0
+ %x1 = extractelement <16 x i16> %0, i32 1
+ %x2 = extractelement <16 x i16> %0, i32 2
+ %x3 = extractelement <16 x i16> %0, i32 3
+ %x4 = extractelement <16 x i16> %0, i32 4
+ %x5 = extractelement <16 x i16> %0, i32 5
+ %x6 = extractelement <16 x i16> %0, i32 6
+ %x7 = extractelement <16 x i16> %0, i32 7
+ %x8 = extractelement <16 x i16> %0, i32 8
+ %x9 = extractelement <16 x i16> %0, i32 9
+ %x10 = extractelement <16 x i16> %0, i32 10
+ %x11 = extractelement <16 x i16> %0, i32 11
+ %x12 = extractelement <16 x i16> %0, i32 12
+ %x13 = extractelement <16 x i16> %0, i32 13
+ %x14 = extractelement <16 x i16> %0, i32 14
+ %x15 = extractelement <16 x i16> %0, i32 15
+ %trunc0 = trunc i16 %x0 to i8
+ %trunc1 = trunc i16 %x1 to i8
+ %trunc2 = trunc i16 %x2 to i8
+ %trunc3 = trunc i16 %x3 to i8
+ %trunc4 = trunc i16 %x4 to i8
+ %trunc5 = trunc i16 %x5 to i8
+ %trunc6 = trunc i16 %x6 to i8
+ %trunc7 = trunc i16 %x7 to i8
+ %trunc8 = trunc i16 %x8 to i8
+ %trunc9 = trunc i16 %x9 to i8
+ %trunc10 = trunc i16 %x10 to i8
+ %trunc11 = trunc i16 %x11 to i8
+ %trunc12 = trunc i16 %x12 to i8
+ %trunc13 = trunc i16 %x13 to i8
+ %trunc14 = trunc i16 %x14 to i8
+ %trunc15 = trunc i16 %x15 to i8
+ %ext0 = zext i8 %trunc0 to i16
+ %ext1 = zext i8 %trunc1 to i16
+ %ext2 = zext i8 %trunc2 to i16
+ %ext3 = zext i8 %trunc3 to i16
+ %ext4 = zext i8 %trunc4 to i16
+ %ext5 = zext i8 %trunc5 to i16
+ %ext6 = zext i8 %trunc6 to i16
+ %ext7 = zext i8 %trunc7 to i16
+ %ext8 = zext i8 %trunc8 to i16
+ %ext9 = zext i8 %trunc9 to i16
+ %ext10 = zext i8 %trunc10 to i16
+ %ext11 = zext i8 %trunc11 to i16
+ %ext12 = zext i8 %trunc12 to i16
+ %ext13 = zext i8 %trunc13 to i16
+ %ext14 = zext i8 %trunc14 to i16
+ %ext15 = zext i8 %trunc15 to i16
+ %v0 = insertelement <16 x i16> undef, i16 %ext0, i32 0
+ %v1 = insertelement <16 x i16> %v0, i16 %ext1, i32 1
+ %v2 = insertelement <16 x i16> %v1, i16 %ext2, i32 2
+ %v3 = insertelement <16 x i16> %v2, i16 %ext3, i32 3
+ %v4 = insertelement <16 x i16> %v3, i16 %ext4, i32 4
+ %v5 = insertelement <16 x i16> %v4, i16 %ext5, i32 5
+ %v6 = insertelement <16 x i16> %v5, i16 %ext6, i32 6
+ %v7 = insertelement <16 x i16> %v6, i16 %ext7, i32 7
+ %v8 = insertelement <16 x i16> %v7, i16 %ext8, i32 8
+ %v9 = insertelement <16 x i16> %v8, i16 %ext9, i32 9
+ %v10 = insertelement <16 x i16> %v9, i16 %ext10, i32 10
+ %v11 = insertelement <16 x i16> %v10, i16 %ext11, i32 11
+ %v12 = insertelement <16 x i16> %v11, i16 %ext12, i32 12
+ %v13 = insertelement <16 x i16> %v12, i16 %ext13, i32 13
+ %v14 = insertelement <16 x i16> %v13, i16 %ext14, i32 14
+ %v15 = insertelement <16 x i16> %v14, i16 %ext15, i32 15
+ ret <16 x i16> %v15
+}
+
define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8a:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: movd %esi, %xmm0
; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE-NEXT: movd %ecx, %xmm2
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: movd %edx, %xmm0
-; SSE-NEXT: movd %esi, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: movd %edi, %xmm0
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE-NEXT: movd %edx, %xmm3
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE-NEXT: movd %r9d, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: movd %r8d, %xmm0
-; SSE-NEXT: movd %ecx, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper16xi8a:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: vpextrb $1, %xmm0, %ecx
; AVX-NEXT: vmovd %eax, %xmm1
-; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $2, %xmm0, %eax
-; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $3, %xmm0, %eax
-; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $4, %xmm0, %eax
-; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $5, %xmm0, %eax
-; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $6, %xmm0, %eax
-; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $7, %xmm0, %eax
-; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $8, %xmm0, %eax
-; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $9, %xmm0, %eax
-; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $10, %xmm0, %eax
-; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $12, %xmm0, %eax
-; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $13, %xmm0, %eax
-; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $15, %xmm0, %eax
-; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %0, i32 0
@@ -298,17 +479,270 @@ define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
ret <16 x i8> %v15
}
+define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
+; SSE-LABEL: _clearupper32xi8a:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE-NEXT: movd {{.*#+}} xmm5 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm4
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm4
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper32xi8a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $0, %xmm1, %edx
+; AVX1-NEXT: vpextrb $1, %xmm1, %esi
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5,6,7]
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper32xi8a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $0, %xmm1, %edx
+; AVX2-NEXT: vpextrb $1, %xmm1, %esi
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vmovd %eax, %xmm2
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %x0 = extractelement <32 x i8> %0, i32 0
+ %x1 = extractelement <32 x i8> %0, i32 1
+ %x2 = extractelement <32 x i8> %0, i32 2
+ %x3 = extractelement <32 x i8> %0, i32 3
+ %x4 = extractelement <32 x i8> %0, i32 4
+ %x5 = extractelement <32 x i8> %0, i32 5
+ %x6 = extractelement <32 x i8> %0, i32 6
+ %x7 = extractelement <32 x i8> %0, i32 7
+ %x8 = extractelement <32 x i8> %0, i32 8
+ %x9 = extractelement <32 x i8> %0, i32 9
+ %x10 = extractelement <32 x i8> %0, i32 10
+ %x11 = extractelement <32 x i8> %0, i32 11
+ %x12 = extractelement <32 x i8> %0, i32 12
+ %x13 = extractelement <32 x i8> %0, i32 13
+ %x14 = extractelement <32 x i8> %0, i32 14
+ %x15 = extractelement <32 x i8> %0, i32 15
+ %x16 = extractelement <32 x i8> %0, i32 16
+ %x17 = extractelement <32 x i8> %0, i32 17
+ %x18 = extractelement <32 x i8> %0, i32 18
+ %x19 = extractelement <32 x i8> %0, i32 19
+ %x20 = extractelement <32 x i8> %0, i32 20
+ %x21 = extractelement <32 x i8> %0, i32 21
+ %x22 = extractelement <32 x i8> %0, i32 22
+ %x23 = extractelement <32 x i8> %0, i32 23
+ %x24 = extractelement <32 x i8> %0, i32 24
+ %x25 = extractelement <32 x i8> %0, i32 25
+ %x26 = extractelement <32 x i8> %0, i32 26
+ %x27 = extractelement <32 x i8> %0, i32 27
+ %x28 = extractelement <32 x i8> %0, i32 28
+ %x29 = extractelement <32 x i8> %0, i32 29
+ %x30 = extractelement <32 x i8> %0, i32 30
+ %x31 = extractelement <32 x i8> %0, i32 31
+ %trunc0 = trunc i8 %x0 to i4
+ %trunc1 = trunc i8 %x1 to i4
+ %trunc2 = trunc i8 %x2 to i4
+ %trunc3 = trunc i8 %x3 to i4
+ %trunc4 = trunc i8 %x4 to i4
+ %trunc5 = trunc i8 %x5 to i4
+ %trunc6 = trunc i8 %x6 to i4
+ %trunc7 = trunc i8 %x7 to i4
+ %trunc8 = trunc i8 %x8 to i4
+ %trunc9 = trunc i8 %x9 to i4
+ %trunc10 = trunc i8 %x10 to i4
+ %trunc11 = trunc i8 %x11 to i4
+ %trunc12 = trunc i8 %x12 to i4
+ %trunc13 = trunc i8 %x13 to i4
+ %trunc14 = trunc i8 %x14 to i4
+ %trunc15 = trunc i8 %x15 to i4
+ %trunc16 = trunc i8 %x16 to i4
+ %trunc17 = trunc i8 %x17 to i4
+ %trunc18 = trunc i8 %x18 to i4
+ %trunc19 = trunc i8 %x19 to i4
+ %trunc20 = trunc i8 %x20 to i4
+ %trunc21 = trunc i8 %x21 to i4
+ %trunc22 = trunc i8 %x22 to i4
+ %trunc23 = trunc i8 %x23 to i4
+ %trunc24 = trunc i8 %x24 to i4
+ %trunc25 = trunc i8 %x25 to i4
+ %trunc26 = trunc i8 %x26 to i4
+ %trunc27 = trunc i8 %x27 to i4
+ %trunc28 = trunc i8 %x28 to i4
+ %trunc29 = trunc i8 %x29 to i4
+ %trunc30 = trunc i8 %x30 to i4
+ %trunc31 = trunc i8 %x31 to i4
+ %ext0 = zext i4 %trunc0 to i8
+ %ext1 = zext i4 %trunc1 to i8
+ %ext2 = zext i4 %trunc2 to i8
+ %ext3 = zext i4 %trunc3 to i8
+ %ext4 = zext i4 %trunc4 to i8
+ %ext5 = zext i4 %trunc5 to i8
+ %ext6 = zext i4 %trunc6 to i8
+ %ext7 = zext i4 %trunc7 to i8
+ %ext8 = zext i4 %trunc8 to i8
+ %ext9 = zext i4 %trunc9 to i8
+ %ext10 = zext i4 %trunc10 to i8
+ %ext11 = zext i4 %trunc11 to i8
+ %ext12 = zext i4 %trunc12 to i8
+ %ext13 = zext i4 %trunc13 to i8
+ %ext14 = zext i4 %trunc14 to i8
+ %ext15 = zext i4 %trunc15 to i8
+ %ext16 = zext i4 %trunc16 to i8
+ %ext17 = zext i4 %trunc17 to i8
+ %ext18 = zext i4 %trunc18 to i8
+ %ext19 = zext i4 %trunc19 to i8
+ %ext20 = zext i4 %trunc20 to i8
+ %ext21 = zext i4 %trunc21 to i8
+ %ext22 = zext i4 %trunc22 to i8
+ %ext23 = zext i4 %trunc23 to i8
+ %ext24 = zext i4 %trunc24 to i8
+ %ext25 = zext i4 %trunc25 to i8
+ %ext26 = zext i4 %trunc26 to i8
+ %ext27 = zext i4 %trunc27 to i8
+ %ext28 = zext i4 %trunc28 to i8
+ %ext29 = zext i4 %trunc29 to i8
+ %ext30 = zext i4 %trunc30 to i8
+ %ext31 = zext i4 %trunc31 to i8
+ %v0 = insertelement <32 x i8> undef, i8 %ext0, i32 0
+ %v1 = insertelement <32 x i8> %v0, i8 %ext1, i32 1
+ %v2 = insertelement <32 x i8> %v1, i8 %ext2, i32 2
+ %v3 = insertelement <32 x i8> %v2, i8 %ext3, i32 3
+ %v4 = insertelement <32 x i8> %v3, i8 %ext4, i32 4
+ %v5 = insertelement <32 x i8> %v4, i8 %ext5, i32 5
+ %v6 = insertelement <32 x i8> %v5, i8 %ext6, i32 6
+ %v7 = insertelement <32 x i8> %v6, i8 %ext7, i32 7
+ %v8 = insertelement <32 x i8> %v7, i8 %ext8, i32 8
+ %v9 = insertelement <32 x i8> %v8, i8 %ext9, i32 9
+ %v10 = insertelement <32 x i8> %v9, i8 %ext10, i32 10
+ %v11 = insertelement <32 x i8> %v10, i8 %ext11, i32 11
+ %v12 = insertelement <32 x i8> %v11, i8 %ext12, i32 12
+ %v13 = insertelement <32 x i8> %v12, i8 %ext13, i32 13
+ %v14 = insertelement <32 x i8> %v13, i8 %ext14, i32 14
+ %v15 = insertelement <32 x i8> %v14, i8 %ext15, i32 15
+ %v16 = insertelement <32 x i8> %v15, i8 %ext16, i32 16
+ %v17 = insertelement <32 x i8> %v16, i8 %ext17, i32 17
+ %v18 = insertelement <32 x i8> %v17, i8 %ext18, i32 18
+ %v19 = insertelement <32 x i8> %v18, i8 %ext19, i32 19
+ %v20 = insertelement <32 x i8> %v19, i8 %ext20, i32 20
+ %v21 = insertelement <32 x i8> %v20, i8 %ext21, i32 21
+ %v22 = insertelement <32 x i8> %v21, i8 %ext22, i32 22
+ %v23 = insertelement <32 x i8> %v22, i8 %ext23, i32 23
+ %v24 = insertelement <32 x i8> %v23, i8 %ext24, i32 24
+ %v25 = insertelement <32 x i8> %v24, i8 %ext25, i32 25
+ %v26 = insertelement <32 x i8> %v25, i8 %ext26, i32 26
+ %v27 = insertelement <32 x i8> %v26, i8 %ext27, i32 27
+ %v28 = insertelement <32 x i8> %v27, i8 %ext28, i32 28
+ %v29 = insertelement <32 x i8> %v28, i8 %ext29, i32 29
+ %v30 = insertelement <32 x i8> %v29, i8 %ext30, i32 30
+ %v31 = insertelement <32 x i8> %v30, i8 %ext31, i32 31
+ ret <32 x i8> %v31
+}
+
define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
; SSE-LABEL: _clearupper2xi64b:
; SSE: # BB#0:
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: movd %eax, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: _clearupper2xi64b:
@@ -329,14 +763,38 @@ define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
ret <2 x i64> %r
}
+define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
+; SSE-LABEL: _clearupper4xi64b:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper4xi64b:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper4xi64b:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: retq
+ %x32 = bitcast <4 x i64> %0 to <8 x i32>
+ %r0 = insertelement <8 x i32> %x32, i32 zeroinitializer, i32 1
+ %r1 = insertelement <8 x i32> %r0, i32 zeroinitializer, i32 3
+ %r2 = insertelement <8 x i32> %r1, i32 zeroinitializer, i32 5
+ %r3 = insertelement <8 x i32> %r2, i32 zeroinitializer, i32 7
+ %r = bitcast <8 x i32> %r3 to <4 x i64>
+ ret <4 x i64> %r
+}
+
define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
; SSE-LABEL: _clearupper4xi32b:
; SSE: # BB#0:
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: pinsrw $1, %eax, %xmm0
-; SSE-NEXT: pinsrw $3, %eax, %xmm0
-; SSE-NEXT: pinsrw $5, %eax, %xmm0
-; SSE-NEXT: pinsrw $7, %eax, %xmm0
+; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper4xi32b:
@@ -353,71 +811,55 @@ define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
ret <4 x i32> %r
}
+define <8 x i32> @_clearupper8xi32b(<8 x i32>) nounwind {
+; SSE-LABEL: _clearupper8xi32b:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper8xi32b:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper8xi32b:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+ %x16 = bitcast <8 x i32> %0 to <16 x i16>
+ %r0 = insertelement <16 x i16> %x16, i16 zeroinitializer, i32 1
+ %r1 = insertelement <16 x i16> %r0, i16 zeroinitializer, i32 3
+ %r2 = insertelement <16 x i16> %r1, i16 zeroinitializer, i32 5
+ %r3 = insertelement <16 x i16> %r2, i16 zeroinitializer, i32 7
+ %r4 = insertelement <16 x i16> %r3, i16 zeroinitializer, i32 9
+ %r5 = insertelement <16 x i16> %r4, i16 zeroinitializer, i32 11
+ %r6 = insertelement <16 x i16> %r5, i16 zeroinitializer, i32 13
+ %r7 = insertelement <16 x i16> %r6, i16 zeroinitializer, i32 15
+ %r = bitcast <16 x i16> %r7 to <8 x i32>
+ ret <8 x i32> %r
+}
+
define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16b:
; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psllw $8, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pslld $24, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psllq $40, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psllq $56, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6]
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4]
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2]
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16b:
; AVX: # BB#0:
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x8 = bitcast <8 x i16> %0 to <16 x i8>
%r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1
@@ -432,6 +874,54 @@ define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
ret <8 x i16> %r
}
+define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
+; SSE-LABEL: _clearupper16xi16b:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper16xi16b:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper16xi16b:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+ %x8 = bitcast <16 x i16> %0 to <32 x i8>
+ %r0 = insertelement <32 x i8> %x8, i8 zeroinitializer, i32 1
+ %r1 = insertelement <32 x i8> %r0, i8 zeroinitializer, i32 3
+ %r2 = insertelement <32 x i8> %r1, i8 zeroinitializer, i32 5
+ %r3 = insertelement <32 x i8> %r2, i8 zeroinitializer, i32 7
+ %r4 = insertelement <32 x i8> %r3, i8 zeroinitializer, i32 9
+ %r5 = insertelement <32 x i8> %r4, i8 zeroinitializer, i32 11
+ %r6 = insertelement <32 x i8> %r5, i8 zeroinitializer, i32 13
+ %r7 = insertelement <32 x i8> %r6, i8 zeroinitializer, i32 15
+ %r8 = insertelement <32 x i8> %r7, i8 zeroinitializer, i32 17
+ %r9 = insertelement <32 x i8> %r8, i8 zeroinitializer, i32 19
+ %r10 = insertelement <32 x i8> %r9, i8 zeroinitializer, i32 21
+ %r11 = insertelement <32 x i8> %r10, i8 zeroinitializer, i32 23
+ %r12 = insertelement <32 x i8> %r11, i8 zeroinitializer, i32 25
+ %r13 = insertelement <32 x i8> %r12, i8 zeroinitializer, i32 27
+ %r14 = insertelement <32 x i8> %r13, i8 zeroinitializer, i32 29
+ %r15 = insertelement <32 x i8> %r14, i8 zeroinitializer, i32 31
+ %r = bitcast <32 x i8> %r15 to <16 x i16>
+ ret <16 x i16> %r
+}
+
define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8b:
; SSE: # BB#0:
@@ -610,6 +1100,447 @@ define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
ret <16 x i8> %r
}
+define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
+; SSE-LABEL: _clearupper32xi8b:
+; SSE: # BB#0:
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT: movd %xmm0, %rcx
+; SSE-NEXT: movq %rcx, %r8
+; SSE-NEXT: movq %rcx, %r9
+; SSE-NEXT: movq %rcx, %r10
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: movq %rcx, %rdx
+; SSE-NEXT: movq %rcx, %rsi
+; SSE-NEXT: movq %rcx, %rdi
+; SSE-NEXT: andb $15, %cl
+; SSE-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movd %xmm2, %rcx
+; SSE-NEXT: shrq $56, %rdi
+; SSE-NEXT: andb $15, %dil
+; SSE-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %r11
+; SSE-NEXT: shrq $48, %rsi
+; SSE-NEXT: andb $15, %sil
+; SSE-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %r14
+; SSE-NEXT: shrq $40, %rdx
+; SSE-NEXT: andb $15, %dl
+; SSE-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rdx
+; SSE-NEXT: shrq $32, %rax
+; SSE-NEXT: andb $15, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: shrq $24, %r10
+; SSE-NEXT: andb $15, %r10b
+; SSE-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rdi
+; SSE-NEXT: shrq $16, %r9
+; SSE-NEXT: andb $15, %r9b
+; SSE-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rsi
+; SSE-NEXT: shrq $8, %r8
+; SSE-NEXT: andb $15, %r8b
+; SSE-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rbx
+; SSE-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andb $15, %cl
+; SSE-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $56, %rbx
+; SSE-NEXT: andb $15, %bl
+; SSE-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $48, %rsi
+; SSE-NEXT: andb $15, %sil
+; SSE-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $40, %rdi
+; SSE-NEXT: andb $15, %dil
+; SSE-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $32, %rax
+; SSE-NEXT: andb $15, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $24, %rdx
+; SSE-NEXT: andb $15, %dl
+; SSE-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $16, %r14
+; SSE-NEXT: andb $15, %r14b
+; SSE-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $8, %r11
+; SSE-NEXT: andb $15, %r11b
+; SSE-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper32xi8b:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %r14
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: movq %rdx, %r11
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: movq %rdx, %rcx
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: andb $15, %dl
+; AVX1-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: andb $15, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %r10
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %rdx
+; AVX1-NEXT: shrq $40, %rdi
+; AVX1-NEXT: andb $15, %dil
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %rax
+; AVX1-NEXT: shrq $32, %rsi
+; AVX1-NEXT: andb $15, %sil
+; AVX1-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %rcx
+; AVX1-NEXT: shrq $24, %r11
+; AVX1-NEXT: andb $15, %r11b
+; AVX1-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %rsi
+; AVX1-NEXT: shrq $16, %r9
+; AVX1-NEXT: andb $15, %r9b
+; AVX1-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %rdi
+; AVX1-NEXT: shrq $8, %r8
+; AVX1-NEXT: andb $15, %r8b
+; AVX1-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %r14, %rbx
+; AVX1-NEXT: andb $15, %r14b
+; AVX1-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $8, %r10
+; AVX1-NEXT: shrq $16, %rdx
+; AVX1-NEXT: shrq $24, %rax
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: shrq $40, %rsi
+; AVX1-NEXT: shrq $48, %rdi
+; AVX1-NEXT: shrq $56, %rbx
+; AVX1-NEXT: andb $15, %bl
+; AVX1-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %dil
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %sil
+; AVX1-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %dl
+; AVX1-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r10b
+; AVX1-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, %r8
+; AVX1-NEXT: movq %rax, %rdx
+; AVX1-NEXT: movq %rax, %rsi
+; AVX1-NEXT: movq %rax, %rdi
+; AVX1-NEXT: movl %eax, %ebx
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shrl $24, %ebx
+; AVX1-NEXT: vpinsrb $3, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shrq $32, %rdi
+; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shrq $40, %rsi
+; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX1-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2
+; AVX1-NEXT: shrq $48, %rdx
+; AVX1-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: shrq $56, %r8
+; AVX1-NEXT: vpinsrb $7, %r8d, %xmm1, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $8, %ecx
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $24, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $40, %rcx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm2, %rcx
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $32, %rax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $40, %rax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $48, %rax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm2, %rax
+; AVX1-NEXT: shrq $56, %rcx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $8, %ecx
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $24, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $40, %rcx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper32xi8b:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %r14
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: movq %rdx, %r11
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: movq %rdx, %rcx
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: andb $15, %dl
+; AVX2-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $56, %rax
+; AVX2-NEXT: andb $15, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %r10
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %rdx
+; AVX2-NEXT: shrq $40, %rdi
+; AVX2-NEXT: andb $15, %dil
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %rax
+; AVX2-NEXT: shrq $32, %rsi
+; AVX2-NEXT: andb $15, %sil
+; AVX2-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %rcx
+; AVX2-NEXT: shrq $24, %r11
+; AVX2-NEXT: andb $15, %r11b
+; AVX2-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %rsi
+; AVX2-NEXT: shrq $16, %r9
+; AVX2-NEXT: andb $15, %r9b
+; AVX2-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %rdi
+; AVX2-NEXT: shrq $8, %r8
+; AVX2-NEXT: andb $15, %r8b
+; AVX2-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %r14, %rbx
+; AVX2-NEXT: andb $15, %r14b
+; AVX2-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $8, %r10
+; AVX2-NEXT: shrq $16, %rdx
+; AVX2-NEXT: shrq $24, %rax
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: shrq $40, %rsi
+; AVX2-NEXT: shrq $48, %rdi
+; AVX2-NEXT: shrq $56, %rbx
+; AVX2-NEXT: andb $15, %bl
+; AVX2-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %dil
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %sil
+; AVX2-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %dl
+; AVX2-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r10b
+; AVX2-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, %r8
+; AVX2-NEXT: movq %rax, %rdx
+; AVX2-NEXT: movq %rax, %rsi
+; AVX2-NEXT: movq %rax, %rdi
+; AVX2-NEXT: movl %eax, %ebx
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shrl $24, %ebx
+; AVX2-NEXT: vpinsrb $3, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shrq $32, %rdi
+; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shrq $40, %rsi
+; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX2-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2
+; AVX2-NEXT: shrq $48, %rdx
+; AVX2-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: shrq $56, %r8
+; AVX2-NEXT: vpinsrb $7, %r8d, %xmm1, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $8, %ecx
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $24, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $40, %rcx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: shrq $56, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: shrq $32, %rax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: shrq $40, %rax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: shrq $48, %rax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm2, %rax
+; AVX2-NEXT: shrq $56, %rcx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $8, %ecx
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $24, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $40, %rcx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shrq $56, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: retq
+ %x4 = bitcast <32 x i8> %0 to <64 x i4>
+ %r0 = insertelement <64 x i4> %x4, i4 zeroinitializer, i32 1
+ %r1 = insertelement <64 x i4> %r0, i4 zeroinitializer, i32 3
+ %r2 = insertelement <64 x i4> %r1, i4 zeroinitializer, i32 5
+ %r3 = insertelement <64 x i4> %r2, i4 zeroinitializer, i32 7
+ %r4 = insertelement <64 x i4> %r3, i4 zeroinitializer, i32 9
+ %r5 = insertelement <64 x i4> %r4, i4 zeroinitializer, i32 11
+ %r6 = insertelement <64 x i4> %r5, i4 zeroinitializer, i32 13
+ %r7 = insertelement <64 x i4> %r6, i4 zeroinitializer, i32 15
+ %r8 = insertelement <64 x i4> %r7, i4 zeroinitializer, i32 17
+ %r9 = insertelement <64 x i4> %r8, i4 zeroinitializer, i32 19
+ %r10 = insertelement <64 x i4> %r9, i4 zeroinitializer, i32 21
+ %r11 = insertelement <64 x i4> %r10, i4 zeroinitializer, i32 23
+ %r12 = insertelement <64 x i4> %r11, i4 zeroinitializer, i32 25
+ %r13 = insertelement <64 x i4> %r12, i4 zeroinitializer, i32 27
+ %r14 = insertelement <64 x i4> %r13, i4 zeroinitializer, i32 29
+ %r15 = insertelement <64 x i4> %r14, i4 zeroinitializer, i32 31
+ %r16 = insertelement <64 x i4> %r15, i4 zeroinitializer, i32 33
+ %r17 = insertelement <64 x i4> %r16, i4 zeroinitializer, i32 35
+ %r18 = insertelement <64 x i4> %r17, i4 zeroinitializer, i32 37
+ %r19 = insertelement <64 x i4> %r18, i4 zeroinitializer, i32 39
+ %r20 = insertelement <64 x i4> %r19, i4 zeroinitializer, i32 41
+ %r21 = insertelement <64 x i4> %r20, i4 zeroinitializer, i32 43
+ %r22 = insertelement <64 x i4> %r21, i4 zeroinitializer, i32 45
+ %r23 = insertelement <64 x i4> %r22, i4 zeroinitializer, i32 47
+ %r24 = insertelement <64 x i4> %r23, i4 zeroinitializer, i32 49
+ %r25 = insertelement <64 x i4> %r24, i4 zeroinitializer, i32 51
+ %r26 = insertelement <64 x i4> %r25, i4 zeroinitializer, i32 53
+ %r27 = insertelement <64 x i4> %r26, i4 zeroinitializer, i32 55
+ %r28 = insertelement <64 x i4> %r27, i4 zeroinitializer, i32 57
+ %r29 = insertelement <64 x i4> %r28, i4 zeroinitializer, i32 59
+ %r30 = insertelement <64 x i4> %r29, i4 zeroinitializer, i32 61
+ %r31 = insertelement <64 x i4> %r30, i4 zeroinitializer, i32 63
+ %r = bitcast <64 x i4> %r15 to <32 x i8>
+ ret <32 x i8> %r
+}
+
define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
; SSE-LABEL: _clearupper2xi64c:
; SSE: # BB#0:
@@ -631,6 +1562,29 @@ define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
ret <2 x i64> %r
}
+define <4 x i64> @_clearupper4xi64c(<4 x i64>) nounwind {
+; SSE-LABEL: _clearupper4xi64c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper4xi64c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper4xi64c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: retq
+ %r = and <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>, %0
+ ret <4 x i64> %r
+}
+
define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
; SSE-LABEL: _clearupper4xi32c:
; SSE: # BB#0:
@@ -646,6 +1600,28 @@ define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
ret <4 x i32> %r
}
+define <8 x i32> @_clearupper8xi32c(<8 x i32>) nounwind {
+; SSE-LABEL: _clearupper8xi32c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper8xi32c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper8xi32c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX2-NEXT: retq
+ %r = and <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>, %0
+ ret <8 x i32> %r
+}
+
define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16c:
; SSE: # BB#0:
@@ -660,6 +1636,22 @@ define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
ret <8 x i16> %r
}
+define <16 x i16> @_clearupper16xi16c(<16 x i16>) nounwind {
+; SSE-LABEL: _clearupper16xi16c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: _clearupper16xi16c:
+; AVX: # BB#0:
+; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
+ %r = and <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
+ ret <16 x i16> %r
+}
+
define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8c:
; SSE: # BB#0:
@@ -673,3 +1665,19 @@ define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
%r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
ret <16 x i8> %r
}
+
+define <32 x i8> @_clearupper32xi8c(<32 x i8>) nounwind {
+; SSE-LABEL: _clearupper32xi8c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: _clearupper32xi8c:
+; AVX: # BB#0:
+; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
+ %r = and <32 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
+ ret <32 x i8> %r
+}
diff --git a/test/CodeGen/X86/clflushopt.ll b/test/CodeGen/X86/clflushopt.ll
new file mode 100644
index 000000000000..ee416eb96c5e
--- /dev/null
+++ b/test/CodeGen/X86/clflushopt.ll
@@ -0,0 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=clflushopt | FileCheck %s
+
+define void @clflushopt(i8* %p) nounwind {
+; CHECK-LABEL: clflushopt:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: clflushopt (%eax)
+; CHECK-NEXT: retl
+ tail call void @llvm.x86.clflushopt(i8* %p)
+ ret void
+}
+declare void @llvm.x86.clflushopt(i8*) nounwind
diff --git a/test/CodeGen/X86/clzero.ll b/test/CodeGen/X86/clzero.ll
new file mode 100644
index 000000000000..f15d4deedeff
--- /dev/null
+++ b/test/CodeGen/X86/clzero.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+clzero | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i386-pc-linux -mattr=+clzero | FileCheck %s --check-prefix=X32
+
+define void @foo(i8* %p) #0 {
+; X64-LABEL: foo:
+; X64: # BB#0: # %entry
+; X64-NEXT: leaq (%rdi), %rax
+; X64-NEXT: clzero
+; X64-NEXT: retq
+;
+; X32-LABEL: foo:
+; X32: # BB#0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: leal (%eax), %eax
+; X32-NEXT: clzero
+; X32-NEXT: retl
+entry:
+ tail call void @llvm.x86.clzero(i8* %p) #1
+ ret void
+}
+
+declare void @llvm.x86.clzero(i8*) #1
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 0060539c691f..d901f16e5c73 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -70,7 +70,7 @@ define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
@g_100 = external global i8 ; <i8*> [#uses=2]
@_2E_str = external constant [15 x i8], align 1 ; <[15 x i8]*> [#uses=1]
-define i32 @test4() nounwind {
+define i1 @test4() nounwind {
entry:
%0 = load i8, i8* @g_3, align 1 ; <i8> [#uses=2]
%1 = sext i8 %0 to i32 ; <i32> [#uses=1]
@@ -107,10 +107,11 @@ bb.i.i: ; preds = %func_4.exit.i
func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
%g_96.tmp.0.i = phi i8 [ %g_96.promoted.i, %bb.i.i ], [ %.mux.i, %func_4.exit.i ] ; <i8> [#uses=2]
+ %ret = phi i1 [ 0, %bb.i.i ], [ %.not.i, %func_4.exit.i ]
store i8 %g_96.tmp.0.i, i8* @g_96
%6 = zext i8 %g_96.tmp.0.i to i32 ; <i32> [#uses=1]
%7 = tail call i32 (i8*, ...) @printf(i8* noalias getelementptr ([15 x i8], [15 x i8]* @_2E_str, i64 0, i64 0), i32 %6) nounwind ; <i32> [#uses=0]
- ret i32 0
+ ret i1 %ret
}
declare i32 @printf(i8* nocapture, ...) nounwind
diff --git a/test/CodeGen/X86/cmovcmov.ll b/test/CodeGen/X86/cmovcmov.ll
index 38ba308ecff5..5b984d27249b 100644
--- a/test/CodeGen/X86/cmovcmov.ll
+++ b/test/CodeGen/X86/cmovcmov.ll
@@ -249,16 +249,23 @@ attributes #0 = { nounwind }
; CMOV-DAG: cmpl %edx, %esi
; CMOV-DAG: movb $20, %al
; CMOV-DAG: movb $20, %dl
-; CMOV: jl [[BB0:.LBB[0-9_]+]]
+; CMOV: jge [[BB2:.LBB[0-9_]+]]
+; CMOV: jle [[BB3:.LBB[0-9_]+]]
+; CMOV: [[BB0:.LBB[0-9_]+]]
+; CMOV: testl %edi, %edi
+; CMOV: jne [[BB4:.LBB[0-9_]+]]
+; CMOV: [[BB1:.LBB[0-9_]+]]
+; CMOV: movb %al, g8(%rip)
+; CMOV: retq
+; CMOV: [[BB2]]:
; CMOV: movl %ecx, %edx
-; CMOV: [[BB0]]:
-; CMOV: jg [[BB1:.LBB[0-9_]+]]
+; CMOV: jg [[BB0]]
+; CMOV: [[BB3]]:
; CMOV: movl %edx, %eax
-; CMOV: [[BB1]]:
; CMOV: testl %edi, %edi
-; CMOV: je [[BB2:.LBB[0-9_]+]]
+; CMOV: je [[BB1]]
+; CMOV: [[BB4]]:
; CMOV: movl %edx, %eax
-; CMOV: [[BB2]]:
; CMOV: movb %al, g8(%rip)
; CMOV: retq
define void @no_cascade_opt(i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
diff --git a/test/CodeGen/X86/code_placement_outline_optional_branches.ll b/test/CodeGen/X86/code_placement_outline_optional_branches.ll
deleted file mode 100644
index 5624d435215a..000000000000
--- a/test/CodeGen/X86/code_placement_outline_optional_branches.ll
+++ /dev/null
@@ -1,77 +0,0 @@
-; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux < %s | FileCheck %s
-; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -outline-optional-branches < %s | FileCheck %s -check-prefix=CHECK-OUTLINE
-
-define void @foo(i32 %t1, i32 %t2, i32 %t3) {
-; Test that we lift the call to 'c' up to immediately follow the call to 'b'
-; when we disable the cfg conflict check.
-;
-; CHECK-LABEL: foo:
-; CHECK: callq a
-; CHECK: callq a
-; CHECK: callq a
-; CHECK: callq a
-; CHECK: callq b
-; CHECK: callq c
-; CHECK: callq d
-; CHECK: callq e
-; CHECK: callq f
-;
-; CHECK-OUTLINE-LABEL: foo:
-; CHECK-OUTLINE: callq b
-; CHECK-OUTLINE: callq c
-; CHECK-OUTLINE: callq d
-; CHECK-OUTLINE: callq e
-; CHECK-OUTLINE: callq f
-; CHECK-OUTLINE: callq a
-; CHECK-OUTLINE: callq a
-; CHECK-OUTLINE: callq a
-; CHECK-OUTLINE: callq a
-
-entry:
- %cmp = icmp eq i32 %t1, 0
- br i1 %cmp, label %if.then, label %if.end
-
-if.then:
- call void @a()
- call void @a()
- call void @a()
- call void @a()
- br label %if.end
-
-if.end:
- call void @b()
- br label %hotbranch
-
-hotbranch:
- %cmp2 = icmp eq i32 %t2, 0
- br i1 %cmp2, label %if.then2, label %if.end2, !prof !1
-
-if.then2:
- call void @c()
- br label %if.end2
-
-if.end2:
- call void @d()
- br label %shortbranch
-
-shortbranch:
- %cmp3 = icmp eq i32 %t3, 0
- br i1 %cmp3, label %if.then3, label %if.end3
-
-if.then3:
- call void @e()
- br label %if.end3
-
-if.end3:
- call void @f()
- ret void
-}
-
-declare void @a()
-declare void @b()
-declare void @c()
-declare void @d()
-declare void @e()
-declare void @f()
-
-!1 = !{!"branch_weights", i32 64, i32 4}
diff --git a/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll b/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
index f00c40ba3a92..1f4578c95314 100644
--- a/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
+++ b/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
@@ -1,5 +1,4 @@
; RUN: opt -S -codegenprepare %s -o - | FileCheck %s
-; RUN: opt -S -codegenprepare -addr-sink-using-gep=1 %s -o - | FileCheck -check-prefix=CHECK-GEP %s
; This file tests the different cases what are involved when codegen prepare
; tries to get sign/zero extension out of the way of addressing mode.
; This tests require an actual target as addressing mode decisions depends
@@ -309,33 +308,18 @@ define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, i8* %base) {
; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SHL]], %arg2
; CHECK: [[SEXTADD:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
; BB then
-; CHECK: [[BASE1:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
-; CHECK: [[ADDR1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE1]] to i32*
+; CHECK: [[BASE1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
+; CHECK: [[BCC1:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE1]] to i8*
+; CHECK: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC1]], i64 48
+; CHECK: [[ADDR1:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL1]] to i32*
; CHECK: load i32, i32* [[ADDR1]]
; BB else
-; CHECK: [[BASE2:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
-; CHECK: [[ADDR2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE2]] to i32*
+; CHECK: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
+; CHECK: [[BCC2:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE2]] to i8*
+; CHECK: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC2]], i64 48
+; CHECK: [[ADDR2:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL2]] to i32*
; CHECK: load i32, i32* [[ADDR2]]
; CHECK: ret
-; CHECK-GEP-LABEL: @checkProfitability
-; CHECK-GEP-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg1 to i64
-; CHECK-GEP-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg2 to i64
-; CHECK-GEP: [[SHL:%[a-zA-Z_0-9-]+]] = shl nsw i32 %arg1, 1
-; CHECK-GEP: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SHL]], %arg2
-; CHECK-GEP: [[SEXTADD:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
-; BB then
-; CHECK-GEP: [[BASE1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
-; CHECK-GEP: [[BCC1:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE1]] to i8*
-; CHECK-GEP: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC1]], i64 48
-; CHECK-GEP: [[ADDR1:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL1]] to i32*
-; CHECK-GEP: load i32, i32* [[ADDR1]]
-; BB else
-; CHECK-GEP: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
-; CHECK-GEP: [[BCC2:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE2]] to i8*
-; CHECK-GEP: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC2]], i64 48
-; CHECK-GEP: [[ADDR2:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL2]] to i32*
-; CHECK-GEP: load i32, i32* [[ADDR2]]
-; CHECK-GEP: ret
define i32 @checkProfitability(i32 %arg1, i32 %arg2, i1 %test) {
%shl = shl nsw i32 %arg1, 1
%add1 = add nsw i32 %shl, %arg2
@@ -371,11 +355,10 @@ end:
; Use it at the starting point for the matching.
; CHECK: %conv.i = zext i16 [[PLAIN_OPND:%[.a-zA-Z_0-9-]+]] to i32
; CHECK-NEXT: [[PROMOTED_CONV:%[.a-zA-Z_0-9-]+]] = zext i16 [[PLAIN_OPND]] to i64
-; CHECK-NEXT: [[BASE:%[a-zA-Z_0-9-]+]] = ptrtoint %struct.dns_packet* %P to i64
-; CHECK-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add i64 [[BASE]], [[PROMOTED_CONV]]
-; CHECK-NEXT: [[ADDR:%[a-zA-Z_0-9-]+]] = add i64 [[ADD]], 7
-; CHECK-NEXT: [[CAST:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[ADDR]] to i8*
-; CHECK-NEXT: load i8, i8* [[CAST]], align 1
+; CHECK-NEXT: [[BASE:%[a-zA-Z_0-9-]+]] = bitcast %struct.dns_packet* %P to i8*
+; CHECK-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BASE]], i64 [[PROMOTED_CONV]]
+; CHECK-NEXT: [[ADDR:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[ADD]], i64 7
+; CHECK-NEXT: load i8, i8* [[ADDR]], align 1
define signext i16 @fn3(%struct.dns_packet* nocapture readonly %P) {
entry:
%tmp = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 2
diff --git a/test/CodeGen/X86/codegen-prepare-extload.ll b/test/CodeGen/X86/codegen-prepare-extload.ll
index c5c761ee63ef..db5476ae1fe7 100644
--- a/test/CodeGen/X86/codegen-prepare-extload.ll
+++ b/test/CodeGen/X86/codegen-prepare-extload.ll
@@ -264,8 +264,7 @@ false:
; => We have one zext of %zextld left and we created one sext of %ld2.
; 2. We try to promote the operand of %sextaddza.
; a. This creates one sext of %zexta and one of %zextld
-; b. The sext of %zexta does not lead to any load, it stays here, even if it
-; could have been combine with the zext of %a.
+; b. The sext of %zexta can be combined with the zext of %a.
; c. The sext of %zextld leads to %ld and can be combined with it. This is
; done by promoting %zextld. This is fine with the current heuristic:
; neutral.
@@ -287,16 +286,14 @@ false:
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %addr1
; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
-; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, i32* %addr2
; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64
-; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_1]]
-; We do not combine this one: see 2.b.
-; OPT-NEXT: [[ZEXTA:%[a-zA-Z_0-9-]+]] = zext i8 %a to i32
-; OPT-NEXT: [[SEXTZEXTA:%[a-zA-Z_0-9-]+]] = sext i32 [[ZEXTA]] to i64
-; OPT-NEXT: [[RESZA:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTZEXTA]], [[ZEXTLD1_3]]
+; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_3]]
+; OPT-NEXT: [[ZEXTLD1_4:%[a-zA-Z_0-9-]+]] = zext i8 %a to i64
+; OPT-NEXT: [[RESZA:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ZEXTLD1_4]], [[ZEXTLD1_2]]
; OPT-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
-; OPT-NEXT: [[RESB:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTB]], [[ZEXTLD1_2]]
+; OPT-NEXT: [[RESB:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTB]], [[ZEXTLD1_1]]
;
; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32
; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
diff --git a/test/CodeGen/X86/codegen-prepare.ll b/test/CodeGen/X86/codegen-prepare.ll
index e58bc22ef142..9d7d3d376cdc 100644
--- a/test/CodeGen/X86/codegen-prepare.ll
+++ b/test/CodeGen/X86/codegen-prepare.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux -addr-sink-using-gep=1 | FileCheck %s
; Check that the CodeGenPrepare Pass
; does not wrongly rewrite the address computed by Instruction %4
diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll
new file mode 100644
index 000000000000..ac8f790a2ead
--- /dev/null
+++ b/test/CodeGen/X86/combine-abs.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s
+
+; fold (abs c1) -> c2
+define <4 x i32> @combine_v4i32_abs_constant() {
+; CHECK-LABEL: combine_v4i32_abs_constant:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [0,1,3,2147483648]
+; CHECK-NEXT: retq
+ %1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> <i32 0, i32 -1, i32 3, i32 -2147483648>)
+ ret <4 x i32> %1
+}
+
+define <16 x i16> @combine_v16i16_abs_constant() {
+; CHECK-LABEL: combine_v16i16_abs_constant:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,1,3,3,7,7,255,255,4096,4096,32767,32767,32768,32768,0]
+; CHECK-NEXT: retq
+ %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> <i16 0, i16 1, i16 -1, i16 3, i16 -3, i16 7, i16 -7, i16 255, i16 -255, i16 4096, i16 -4096, i16 32767, i16 -32767, i16 -32768, i16 32768, i16 65536>)
+ ret <16 x i16> %1
+}
+
+; fold (abs (abs x)) -> (abs x)
+define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) {
+; CHECK-LABEL: combine_v8i16_abs_abs:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpabsw %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %a1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a)
+ %n2 = sub <8 x i16> zeroinitializer, %a1
+ %c2 = icmp slt <8 x i16> %a1, zeroinitializer
+ %a2 = select <8 x i1> %c2, <8 x i16> %n2, <8 x i16> %a1
+ ret <8 x i16> %a2
+}
+
+define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
+; CHECK-LABEL: combine_v32i8_abs_abs:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpabsb %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %n1 = sub <32 x i8> zeroinitializer, %a
+ %b1 = icmp slt <32 x i8> %a, zeroinitializer
+ %a1 = select <32 x i1> %b1, <32 x i8> %n1, <32 x i8> %a
+ %a2 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a1)
+ ret <32 x i8> %a2
+}
+
+define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
+; CHECK-LABEL: combine_v4i64_abs_abs:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1
+; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1
+; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %n1 = sub <4 x i64> zeroinitializer, %a
+ %b1 = icmp slt <4 x i64> %a, zeroinitializer
+ %a1 = select <4 x i1> %b1, <4 x i64> %n1, <4 x i64> %a
+ %n2 = sub <4 x i64> zeroinitializer, %a1
+ %b2 = icmp sgt <4 x i64> %a1, zeroinitializer
+ %a2 = select <4 x i1> %b2, <4 x i64> %a1, <4 x i64> %n2
+ ret <4 x i64> %a2
+}
+
+; fold (abs x) -> x iff not-negative
+define <16 x i8> @combine_v16i8_abs_constant(<16 x i8> %a) {
+; CHECK-LABEL: combine_v16i8_abs_constant:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpabsb %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %1 = insertelement <16 x i8> undef, i8 15, i32 0
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
+ %3 = and <16 x i8> %a, %2
+ %4 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %3)
+ ret <16 x i8> %4
+}
+
+define <8 x i32> @combine_v8i32_abs_pos(<8 x i32> %a) {
+; CHECK-LABEL: combine_v8i32_abs_pos:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpsrld $1, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %1 = lshr <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %2 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %1)
+ ret <8 x i32> %2
+}
+
+declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
+declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
+declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
+
+declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
+declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
+declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
diff --git a/test/CodeGen/X86/combine-and.ll b/test/CodeGen/X86/combine-and.ll
index 6f310d9b7b12..f30fa61bbfbe 100644
--- a/test/CodeGen/X86/combine-and.ll
+++ b/test/CodeGen/X86/combine-and.ll
@@ -245,3 +245,28 @@ define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
%3 = and <4 x i32> %2, <i32 65536, i32 65536, i32 65536, i32 65536>
ret <4 x i32> %3
}
+
+;
+; known sign bits folding
+;
+
+define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
+; CHECK-LABEL: ashr_mask1_v8i16:
+; CHECK: # BB#0:
+; CHECK-NEXT: psrlw $15, %xmm0
+; CHECK-NEXT: retq
+ %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
+; CHECK-LABEL: ashr_mask7_v4i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: psrad $31, %xmm0
+; CHECK-NEXT: psrld $29, %xmm0
+; CHECK-NEXT: retq
+ %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
+ %2 = and <4 x i32> %1, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %2
+}
diff --git a/test/CodeGen/X86/combine-fcopysign.ll b/test/CodeGen/X86/combine-fcopysign.ll
index 807ac4e3fc6b..43e09bfe5fea 100644
--- a/test/CodeGen/X86/combine-fcopysign.ll
+++ b/test/CodeGen/X86/combine-fcopysign.ll
@@ -292,7 +292,7 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
; SSE-NEXT: cvtsd2ss %xmm1, %xmm1
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: orps %xmm6, %xmm1
-; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: andps %xmm5, %xmm1
diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll
index 5cc7312de47f..3ad38f2717d9 100644
--- a/test/CodeGen/X86/combine-shl.ll
+++ b/test/CodeGen/X86/combine-shl.ll
@@ -243,11 +243,11 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
; SSE-LABEL: combine_vec_shl_zext_lshr0:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_zext_lshr0:
@@ -270,15 +270,15 @@ define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: psrlw $4, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6],xmm1[7]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlw $2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2],xmm0[3,4],xmm2[5,6],xmm0[7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlw $2, %xmm1
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3,4],xmm1[5,6],xmm0[7]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm1
; SSE-NEXT: retq
@@ -288,7 +288,7 @@ define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,3,4,5,6,7,8]
; AVX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
diff --git a/test/CodeGen/X86/combine-testm-and.ll b/test/CodeGen/X86/combine-testm-and.ll
index 2b95a114540d..b10a4b5ed298 100644
--- a/test/CodeGen/X86/combine-testm-and.ll
+++ b/test/CodeGen/X86/combine-testm-and.ll
@@ -6,6 +6,7 @@ define i32 @combineTESTM_AND_1(<8 x i64> %a, <8 x i64> %b) {
; CHECK: ## BB#0:
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%and.i = and <8 x i64> %b, %a
%test.i = tail call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %and.i, <8 x i64> %and.i, i8 -1)
@@ -16,9 +17,10 @@ define i32 @combineTESTM_AND_1(<8 x i64> %a, <8 x i64> %b) {
define i32 @combineTESTM_AND_2(<8 x i64> %a, <8 x i64> %b , i8 %mask) {
; CHECK-LABEL: combineTESTM_AND_2:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%and.i = and <8 x i64> %b, %a
%test.i = tail call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %and.i, <8 x i64> %and.i, i8 %mask)
@@ -29,9 +31,10 @@ define i32 @combineTESTM_AND_2(<8 x i64> %a, <8 x i64> %b , i8 %mask) {
define i32 @combineTESTM_AND_mask_3(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
; CHECK-LABEL: combineTESTM_AND_mask_3:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vptestmq (%rdi), %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%b = load <8 x i64>, <8 x i64>* %bptr
%and.i = and <8 x i64> %a, %b
@@ -43,9 +46,10 @@ define i32 @combineTESTM_AND_mask_3(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
define i32 @combineTESTM_AND_mask_4(<8 x i64> %a, <8 x i64>* %bptr , i8 %mask) {
; CHECK-LABEL: combineTESTM_AND_mask_4:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vptestmq (%rdi), %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%b = load <8 x i64>, <8 x i64>* %bptr
%and.i = and <8 x i64> %b, %a
diff --git a/test/CodeGen/X86/combiner-aa-0.ll b/test/CodeGen/X86/combiner-aa-0.ll
deleted file mode 100644
index 403059d90ab1..000000000000
--- a/test/CodeGen/X86/combiner-aa-0.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 -combiner-global-alias-analysis -combiner-alias-analysis
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
- %struct.Hash_Key = type { [4 x i32], i32 }
-@g_flipV_hashkey = external global %struct.Hash_Key, align 16 ; <%struct.Hash_Key*> [#uses=1]
-
-define void @foo() nounwind {
- %t0 = load i32, i32* undef, align 16 ; <i32> [#uses=1]
- %t1 = load i32, i32* null, align 4 ; <i32> [#uses=1]
- %t2 = srem i32 %t0, 32 ; <i32> [#uses=1]
- %t3 = shl i32 1, %t2 ; <i32> [#uses=1]
- %t4 = xor i32 %t3, %t1 ; <i32> [#uses=1]
- store i32 %t4, i32* null, align 4
- %t5 = getelementptr %struct.Hash_Key, %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2]
- %t6 = load i32, i32* %t5, align 4 ; <i32> [#uses=1]
- %t7 = shl i32 1, undef ; <i32> [#uses=1]
- %t8 = xor i32 %t7, %t6 ; <i32> [#uses=1]
- store i32 %t8, i32* %t5, align 4
- unreachable
-}
diff --git a/test/CodeGen/X86/combiner-aa-1.ll b/test/CodeGen/X86/combiner-aa-1.ll
deleted file mode 100644
index cc3e5ca12602..000000000000
--- a/test/CodeGen/X86/combiner-aa-1.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s --combiner-alias-analysis --combiner-global-alias-analysis
-; PR4880
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
-
-%struct.alst_node = type { %struct.node }
-%struct.arg_node = type { %struct.node, i8*, %struct.alst_node* }
-%struct.arglst_node = type { %struct.alst_node, %struct.arg_node*, %struct.arglst_node* }
-%struct.lam_node = type { %struct.alst_node, %struct.arg_node*, %struct.alst_node* }
-%struct.node = type { i32 (...)**, %struct.node* }
-
-define i32 @._ZN8lam_node18resolve_name_clashEP8arg_nodeP9alst_node._ZNK8lam_nodeeqERK8exp_node._ZN11arglst_nodeD0Ev(%struct.lam_node* %this.this, %struct.arg_node* %outer_arg, %struct.alst_node* %env.cmp, %struct.arglst_node* %this, i32 %functionID) {
-comb_entry:
- %.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1]
- %0 = load i32 (...)**, i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
- %1 = getelementptr inbounds i32 (...)*, i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1]
- %2 = load i32 (...)*, i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
- store %struct.node* undef, %struct.node** %.SV59
- %3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1]
- %4 = tail call i32 %3(%struct.node* undef) ; <i32> [#uses=0]
- unreachable
-}
diff --git a/test/CodeGen/X86/commute-3dnow.ll b/test/CodeGen/X86/commute-3dnow.ll
new file mode 100644
index 000000000000..b7a01efe2d3a
--- /dev/null
+++ b/test/CodeGen/X86/commute-3dnow.ll
@@ -0,0 +1,270 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+3dnow | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+3dnow | FileCheck %s --check-prefix=X64
+
+define void @commute_m_pfadd(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfadd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pfadd (%eax), %mm0
+; X32-NEXT: pfadd (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfadd:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pfadd (%rsi), %mm0
+; X64-NEXT: pfadd (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx, x86_mmx)
+
+define void @commute_m_pfsub(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfsub:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pfsub (%eax), %mm0
+; X32-NEXT: pfsubr (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfsub:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pfsub (%rsi), %mm0
+; X64-NEXT: pfsubr (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx, x86_mmx)
+
+define void @commute_m_pfsubr(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfsubr:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pfsubr (%eax), %mm0
+; X32-NEXT: pfsub (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfsubr:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pfsubr (%rsi), %mm0
+; X64-NEXT: pfsub (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx, x86_mmx)
+
+define void @commute_m_pfmul(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfmul:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pfmul (%eax), %mm0
+; X32-NEXT: pfmul (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfmul:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pfmul (%rsi), %mm0
+; X64-NEXT: pfmul (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx, x86_mmx)
+
+; PFMAX can't commute without fast-math.
+define void @commute_m_pfmax(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfmax:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: movq (%ecx), %mm1
+; X32-NEXT: pfmax (%eax), %mm0
+; X32-NEXT: pfmax %mm0, %mm1
+; X32-NEXT: movq %mm1, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfmax:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movq (%rdx), %mm1
+; X64-NEXT: pfmax (%rsi), %mm0
+; X64-NEXT: pfmax %mm0, %mm1
+; X64-NEXT: movq %mm1, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx, x86_mmx)
+
+; PFMIN can't commute without fast-math.
+define void @commute_m_pfmin(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfmin:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: movq (%ecx), %mm1
+; X32-NEXT: pfmin (%eax), %mm0
+; X32-NEXT: pfmin %mm0, %mm1
+; X32-NEXT: movq %mm1, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfmin:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movq (%rdx), %mm1
+; X64-NEXT: pfmin (%rsi), %mm0
+; X64-NEXT: pfmin %mm0, %mm1
+; X64-NEXT: movq %mm1, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx, x86_mmx)
+
+define void @commute_m_pfcmpeq(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pfcmpeq:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pfcmpeq (%eax), %mm0
+; X32-NEXT: pfcmpeq (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pfcmpeq:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pfcmpeq (%rsi), %mm0
+; X64-NEXT: pfcmpeq (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx, x86_mmx)
+
+define void @commute_m_pavgusb(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pavgusb:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pavgusb (%eax), %mm0
+; X32-NEXT: pavgusb (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pavgusb:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pavgusb (%rsi), %mm0
+; X64-NEXT: pavgusb (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx, x86_mmx)
+
+define void @commute_m_pmulhrw(x86_mmx *%a0, x86_mmx *%a1, x86_mmx *%a2) nounwind {
+; X32-LABEL: commute_m_pmulhrw:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movq (%edx), %mm0
+; X32-NEXT: pmulhrw (%eax), %mm0
+; X32-NEXT: pmulhrw (%ecx), %mm0
+; X32-NEXT: movq %mm0, (%ecx)
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_m_pmulhrw:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: pmulhrw (%rsi), %mm0
+; X64-NEXT: pmulhrw (%rdx), %mm0
+; X64-NEXT: movq %mm0, (%rdx)
+; X64-NEXT: retq
+ %1 = load x86_mmx, x86_mmx* %a0
+ %2 = load x86_mmx, x86_mmx* %a1
+ %3 = load x86_mmx, x86_mmx* %a2
+ %4 = tail call x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx %1, x86_mmx %2)
+ %5 = tail call x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx %3, x86_mmx %4)
+ store x86_mmx %5, x86_mmx* %a2
+ ret void
+}
+declare x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx, x86_mmx)
diff --git a/test/CodeGen/X86/commute-clmul.ll b/test/CodeGen/X86/commute-clmul.ll
index d13911abc864..84d9a914c9bb 100644
--- a/test/CodeGen/X86/commute-clmul.ll
+++ b/test/CodeGen/X86/commute-clmul.ll
@@ -1,59 +1,64 @@
-; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2,+pclmul < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2,+pclmul < %s | FileCheck %s --check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+pclmul | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,+pclmul | FileCheck %s --check-prefix=AVX
declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
- ;SSE-LABEL: commute_lq_lq
- ;SSE: pclmulqdq $0, (%rdi), %xmm0
- ;SSE-NEXT: retq
-
- ;AVX-LABEL: commute_lq_lq
- ;AVX: vpclmulqdq $0, (%rdi), %xmm0, %xmm0
- ;AVX-NEXT: retq
-
+; SSE-LABEL: commute_lq_lq:
+; SSE: # BB#0:
+; SSE-NEXT: pclmulqdq $0, (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: commute_lq_lq:
+; AVX: # BB#0:
+; AVX-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 0)
ret <2 x i64> %2
}
define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
- ;SSE-LABEL: commute_lq_hq
- ;SSE: pclmulqdq $1, (%rdi), %xmm0
- ;SSE-NEXT: retq
-
- ;AVX-LABEL: commute_lq_hq
- ;AVX: vpclmulqdq $1, (%rdi), %xmm0, %xmm0
- ;AVX-NEXT: retq
-
+; SSE-LABEL: commute_lq_hq:
+; SSE: # BB#0:
+; SSE-NEXT: pclmulqdq $1, (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: commute_lq_hq:
+; AVX: # BB#0:
+; AVX-NEXT: vpclmulqdq $1, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 16)
ret <2 x i64> %2
}
define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
- ;SSE-LABEL: commute_hq_lq
- ;SSE: pclmulqdq $16, (%rdi), %xmm0
- ;SSE-NEXT: retq
-
- ;AVX-LABEL: commute_hq_lq
- ;AVX: vpclmulqdq $16, (%rdi), %xmm0, %xmm0
- ;AVX-NEXT: retq
-
+; SSE-LABEL: commute_hq_lq:
+; SSE: # BB#0:
+; SSE-NEXT: pclmulqdq $16, (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: commute_hq_lq:
+; AVX: # BB#0:
+; AVX-NEXT: vpclmulqdq $16, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 1)
ret <2 x i64> %2
}
define <2 x i64> @commute_hq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
- ;SSE-LABEL: commute_hq_hq
- ;SSE: pclmulqdq $17, (%rdi), %xmm0
- ;SSE-NEXT: retq
-
- ;AVX-LABEL: commute_hq_hq
- ;AVX: vpclmulqdq $17, (%rdi), %xmm0, %xmm0
- ;AVX-NEXT: retq
-
+; SSE-LABEL: commute_hq_hq:
+; SSE: # BB#0:
+; SSE-NEXT: pclmulqdq $17, (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: commute_hq_hq:
+; AVX: # BB#0:
+; AVX-NEXT: vpclmulqdq $17, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 17)
ret <2 x i64> %2
diff --git a/test/CodeGen/X86/commute-fcmp.ll b/test/CodeGen/X86/commute-fcmp.ll
index 4274d1feaa3b..f05fb805b411 100644
--- a/test/CodeGen/X86/commute-fcmp.ll
+++ b/test/CodeGen/X86/commute-fcmp.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
;
; Float Comparisons
@@ -17,7 +17,6 @@ define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp oeq <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -34,7 +33,6 @@ define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpneqps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp une <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -51,7 +49,6 @@ define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpordps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ord <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -68,7 +65,6 @@ define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpunordps (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp uno <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -92,7 +88,6 @@ define <4 x i32> @commute_cmpps_ueq(<4 x float>* %a0, <4 x float> %a1) {
; AVX-NEXT: vcmpunordps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vorps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ueq <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -116,7 +111,6 @@ define <4 x i32> @commute_cmpps_one(<4 x float>* %a0, <4 x float> %a1) {
; AVX-NEXT: vcmpordps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp one <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -136,7 +130,6 @@ define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) {
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp olt <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -156,7 +149,6 @@ define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) {
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ole <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
@@ -174,7 +166,6 @@ define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp oeq <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -192,7 +183,6 @@ define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpneqps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp une <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -210,7 +200,6 @@ define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpordps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ord <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -228,7 +217,6 @@ define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpunordps (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp uno <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -257,7 +245,6 @@ define <8 x i32> @commute_cmpps_ueq_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX-NEXT: vcmpunordps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ueq <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -286,7 +273,6 @@ define <8 x i32> @commute_cmpps_one_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX-NEXT: vcmpordps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp one <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -309,7 +295,6 @@ define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp olt <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -332,7 +317,6 @@ define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) {
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ole <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
@@ -354,7 +338,6 @@ define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp oeq <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -371,7 +354,6 @@ define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpneqpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp une <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -388,7 +370,6 @@ define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpordpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ord <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -412,7 +393,6 @@ define <2 x i64> @commute_cmppd_ueq(<2 x double>* %a0, <2 x double> %a1) {
; AVX-NEXT: vcmpunordpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vorpd %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ueq <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -436,7 +416,6 @@ define <2 x i64> @commute_cmppd_one(<2 x double>* %a0, <2 x double> %a1) {
; AVX-NEXT: vcmpordpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp one <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -453,7 +432,6 @@ define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpunordpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp uno <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -473,7 +451,6 @@ define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) {
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp olt <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -493,7 +470,6 @@ define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) {
; AVX-NEXT: vmovapd (%rdi), %xmm1
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
-;
%1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ole <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
@@ -511,7 +487,6 @@ define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp oeq <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -529,7 +504,6 @@ define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpneqpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp une <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -547,7 +521,6 @@ define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpordpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ord <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -565,7 +538,6 @@ define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX: # BB#0:
; AVX-NEXT: vcmpunordpd (%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp uno <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -594,7 +566,6 @@ define <4 x i64> @commute_cmppd_ueq_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX-NEXT: vcmpunordpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vorpd %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ueq <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -623,7 +594,6 @@ define <4 x i64> @commute_cmppd_one_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX-NEXT: vcmpordpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vandpd %ymm2, %ymm0, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp one <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -646,7 +616,6 @@ define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp olt <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
@@ -669,7 +638,6 @@ define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) {
; AVX-NEXT: vmovapd (%rdi), %ymm1
; AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
-;
%1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ole <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
diff --git a/test/CodeGen/X86/commute-xop.ll b/test/CodeGen/X86/commute-xop.ll
index e551d9bfc78f..4043155ba8d4 100644
--- a/test/CodeGen/X86/commute-xop.ll
+++ b/test/CodeGen/X86/commute-xop.ll
@@ -1,8 +1,18 @@
-; RUN: llc -O3 -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx,+xop < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X64
define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomb
- ;CHECK: vpcomgtb (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomb:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomgtb (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomb:
+; X64: # BB#0:
+; X64-NEXT: vpcomgtb (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a0
%2 = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %1, <16 x i8> %a1, i8 0) ; vpcomltb
ret <16 x i8> %2
@@ -10,8 +20,16 @@ define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readnone
define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomd
- ;CHECK: vpcomged (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomged (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomd:
+; X64: # BB#0:
+; X64-NEXT: vpcomged (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %1, <4 x i32> %a1, i8 1) ; vpcomled
ret <4 x i32> %2
@@ -19,8 +37,16 @@ define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readnone
define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomq
- ;CHECK: vpcomltq (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomq:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomltq (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomq:
+; X64: # BB#0:
+; X64-NEXT: vpcomltq (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %1, <2 x i64> %a1, i8 2) ; vpcomgtq
ret <2 x i64> %2
@@ -28,8 +54,16 @@ define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readnone
define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomub
- ;CHECK: vpcomleub (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomub:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomleub (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomub:
+; X64: # BB#0:
+; X64-NEXT: vpcomleub (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a0
%2 = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %1, <16 x i8> %a1, i8 3) ; vpcomgeub
ret <16 x i8> %2
@@ -37,8 +71,16 @@ define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readnone
define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomud
- ;CHECK: vpcomequd (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomud:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomequd (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomud:
+; X64: # BB#0:
+; X64-NEXT: vpcomequd (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %1, <4 x i32> %a1, i8 4) ; vpcomequd
ret <4 x i32> %2
@@ -46,8 +88,16 @@ define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readnone
define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomuq
- ;CHECK: vpcomnequq (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomuq:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomnequq (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomuq:
+; X64: # BB#0:
+; X64-NEXT: vpcomnequq (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %1, <2 x i64> %a1, i8 5) ; vpcomnequq
ret <2 x i64> %2
@@ -55,8 +105,16 @@ define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readnone
define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomuw
- ;CHECK: vpcomfalseuw (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomuw:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomfalseuw (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomuw:
+; X64: # BB#0:
+; X64-NEXT: vpcomfalseuw (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %1, <8 x i16> %a1, i8 6) ; vpcomfalseuw
ret <8 x i16> %2
@@ -64,8 +122,16 @@ define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readnone
define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
- ;CHECK-LABEL: commute_fold_vpcomw
- ;CHECK: vpcomtruew (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpcomw:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpcomtruew (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpcomw:
+; X64: # BB#0:
+; X64-NEXT: vpcomtruew (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %1, <8 x i16> %a1, i8 7) ; vpcomtruew
ret <8 x i16> %2
@@ -73,8 +139,16 @@ define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readnone
define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacsdd
- ;CHECK: vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacsdd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacsdd %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacsdd:
+; X64: # BB#0:
+; X64-NEXT: vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
ret <4 x i32> %2
@@ -82,8 +156,16 @@ define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32>
declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacsdqh
- ;CHECK: vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacsdqh:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacsdqh %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacsdqh:
+; X64: # BB#0:
+; X64-NEXT: vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
@@ -91,8 +173,16 @@ define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64
declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacsdql
- ;CHECK: vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacsdql:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacsdql %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacsdql:
+; X64: # BB#0:
+; X64-NEXT: vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
@@ -100,8 +190,16 @@ define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64
declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacssdd
- ;CHECK: vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacssdd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacssdd %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacssdd:
+; X64: # BB#0:
+; X64-NEXT: vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
ret <4 x i32> %2
@@ -109,8 +207,16 @@ define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32
declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacssdqh
- ;CHECK: vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacssdqh:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacssdqh %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacssdqh:
+; X64: # BB#0:
+; X64-NEXT: vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
@@ -118,8 +224,16 @@ define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i6
declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacssdql
- ;CHECK: vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacssdql:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacssdql %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacssdql:
+; X64: # BB#0:
+; X64-NEXT: vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
@@ -127,8 +241,16 @@ define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i6
declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacsswd
- ;CHECK: vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacsswd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacsswd %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacsswd:
+; X64: # BB#0:
+; X64-NEXT: vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
@@ -136,8 +258,16 @@ define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32
declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacssww
- ;CHECK: vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacssww:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacssww %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacssww:
+; X64: # BB#0:
+; X64-NEXT: vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
ret <8 x i16> %2
@@ -145,8 +275,16 @@ define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16
declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacswd
- ;CHECK: vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacswd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacswd %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacswd:
+; X64: # BB#0:
+; X64-NEXT: vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
@@ -154,8 +292,16 @@ define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32>
declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
- ;CHECK-LABEL: commute_fold_vpmacsww
- ;CHECK: vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmacsww:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmacsww %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmacsww:
+; X64: # BB#0:
+; X64-NEXT: vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
ret <8 x i16> %2
@@ -163,8 +309,16 @@ define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16>
declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
- ;CHECK-LABEL: commute_fold_vpmadcsswd
- ;CHECK: vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmadcsswd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmadcsswd %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmadcsswd:
+; X64: # BB#0:
+; X64-NEXT: vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
@@ -172,13 +326,18 @@ define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i3
declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
define <4 x i32> @commute_fold_vpmadcswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
- ;CHECK-LABEL: commute_fold_vpmadcswd
- ;CHECK: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
+; X32-LABEL: commute_fold_vpmadcswd:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpmadcswd %xmm1, (%eax), %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: commute_fold_vpmadcswd:
+; X64: # BB#0:
+; X64-NEXT: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
+; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
declare <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
-
-
-
diff --git a/test/CodeGen/X86/compare-global.ll b/test/CodeGen/X86/compare-global.ll
index 8e3d3a93a564..747595c1a89c 100644
--- a/test/CodeGen/X86/compare-global.ll
+++ b/test/CodeGen/X86/compare-global.ll
@@ -7,7 +7,7 @@ target triple = "i686-pc-windows-msvc18.0.0"
define void @f(i8* %c) {
entry:
- ; CHECK: subl $_foo, %eax
+ ; CHECK: cmpl $_foo, 4(%esp)
%cmp = icmp eq i8* %c, @foo
br i1 %cmp, label %if.then, label %if.end
diff --git a/test/CodeGen/X86/complex-fastmath.ll b/test/CodeGen/X86/complex-fastmath.ll
new file mode 100644
index 000000000000..d31707260a0a
--- /dev/null
+++ b/test/CodeGen/X86/complex-fastmath.ll
@@ -0,0 +1,215 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=FMA
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=FMA
+
+; PR31866
+; complex float complex_square_f32(complex float x) {
+; return x*x;
+; }
+
+define <2 x float> @complex_square_f32(<2 x float>) #0 {
+; SSE-LABEL: complex_square_f32:
+; SSE: # BB#0:
+; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: addss %xmm2, %xmm2
+; SSE-NEXT: mulss %xmm1, %xmm2
+; SSE-NEXT: mulss %xmm0, %xmm0
+; SSE-NEXT: mulss %xmm1, %xmm1
+; SSE-NEXT: subss %xmm1, %xmm0
+; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: complex_square_f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vmulss %xmm2, %xmm1, %xmm2
+; AVX1-NEXT: vmulss %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmulss %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; AVX1-NEXT: retq
+;
+; FMA-LABEL: complex_square_f32:
+; FMA: # BB#0:
+; FMA-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; FMA-NEXT: vaddss %xmm0, %xmm0, %xmm2
+; FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2
+; FMA-NEXT: vmulss %xmm1, %xmm1, %xmm1
+; FMA-NEXT: vfmsub231ss %xmm0, %xmm0, %xmm1
+; FMA-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[2,3]
+; FMA-NEXT: retq
+ %2 = extractelement <2 x float> %0, i32 0
+ %3 = extractelement <2 x float> %0, i32 1
+ %4 = fmul fast float %3, 2.000000e+00
+ %5 = fmul fast float %4, %2
+ %6 = fmul fast float %2, %2
+ %7 = fmul fast float %3, %3
+ %8 = fsub fast float %6, %7
+ %9 = insertelement <2 x float> undef, float %8, i32 0
+ %10 = insertelement <2 x float> %9, float %5, i32 1
+ ret <2 x float> %10
+}
+
+define <2 x double> @complex_square_f64(<2 x double>) #0 {
+; SSE-LABEL: complex_square_f64:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: addsd %xmm2, %xmm2
+; SSE-NEXT: mulsd %xmm1, %xmm2
+; SSE-NEXT: mulsd %xmm0, %xmm0
+; SSE-NEXT: mulsd %xmm1, %xmm1
+; SSE-NEXT: subsd %xmm1, %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: complex_square_f64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1-NEXT: vaddsd %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vmulsd %xmm2, %xmm1, %xmm2
+; AVX1-NEXT: vmulsd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmulsd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: retq
+;
+; FMA-LABEL: complex_square_f64:
+; FMA: # BB#0:
+; FMA-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; FMA-NEXT: vaddsd %xmm0, %xmm0, %xmm2
+; FMA-NEXT: vmulsd %xmm2, %xmm1, %xmm2
+; FMA-NEXT: vmulsd %xmm1, %xmm1, %xmm1
+; FMA-NEXT: vfmsub231sd %xmm0, %xmm0, %xmm1
+; FMA-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm2[0]
+; FMA-NEXT: retq
+ %2 = extractelement <2 x double> %0, i32 0
+ %3 = extractelement <2 x double> %0, i32 1
+ %4 = fmul fast double %3, 2.000000e+00
+ %5 = fmul fast double %4, %2
+ %6 = fmul fast double %2, %2
+ %7 = fmul fast double %3, %3
+ %8 = fsub fast double %6, %7
+ %9 = insertelement <2 x double> undef, double %8, i32 0
+ %10 = insertelement <2 x double> %9, double %5, i32 1
+ ret <2 x double> %10
+}
+
+; complex float complex_mul_f32(complex float x, complex float y) {
+; return x*y;
+; }
+
+define <2 x float> @complex_mul_f32(<2 x float>, <2 x float>) #0 {
+; SSE-LABEL: complex_mul_f32:
+; SSE: # BB#0:
+; SSE-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE-NEXT: movaps %xmm3, %xmm4
+; SSE-NEXT: mulss %xmm0, %xmm4
+; SSE-NEXT: mulss %xmm1, %xmm0
+; SSE-NEXT: mulss %xmm2, %xmm1
+; SSE-NEXT: addss %xmm4, %xmm1
+; SSE-NEXT: mulss %xmm2, %xmm3
+; SSE-NEXT: subss %xmm3, %xmm0
+; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: complex_mul_f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vmulss %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vmulss %xmm2, %xmm1, %xmm5
+; AVX1-NEXT: vaddss %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmulss %xmm2, %xmm3, %xmm1
+; AVX1-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
+; AVX1-NEXT: retq
+;
+; FMA-LABEL: complex_mul_f32:
+; FMA: # BB#0:
+; FMA-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; FMA-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; FMA-NEXT: vmulss %xmm2, %xmm1, %xmm4
+; FMA-NEXT: vfmadd231ss %xmm0, %xmm3, %xmm4
+; FMA-NEXT: vmulss %xmm2, %xmm3, %xmm2
+; FMA-NEXT: vfmsub231ss %xmm0, %xmm1, %xmm2
+; FMA-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm4[0],xmm2[2,3]
+; FMA-NEXT: retq
+ %3 = extractelement <2 x float> %0, i32 0
+ %4 = extractelement <2 x float> %0, i32 1
+ %5 = extractelement <2 x float> %1, i32 0
+ %6 = extractelement <2 x float> %1, i32 1
+ %7 = fmul fast float %6, %3
+ %8 = fmul fast float %5, %4
+ %9 = fadd fast float %7, %8
+ %10 = fmul fast float %5, %3
+ %11 = fmul fast float %6, %4
+ %12 = fsub fast float %10, %11
+ %13 = insertelement <2 x float> undef, float %12, i32 0
+ %14 = insertelement <2 x float> %13, float %9, i32 1
+ ret <2 x float> %14
+}
+
+define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
+; SSE-LABEL: complex_mul_f64:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movaps %xmm1, %xmm3
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movaps %xmm3, %xmm4
+; SSE-NEXT: mulsd %xmm0, %xmm4
+; SSE-NEXT: mulsd %xmm1, %xmm0
+; SSE-NEXT: mulsd %xmm2, %xmm1
+; SSE-NEXT: addsd %xmm4, %xmm1
+; SSE-NEXT: mulsd %xmm2, %xmm3
+; SSE-NEXT: subsd %xmm3, %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: complex_mul_f64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; AVX1-NEXT: vmulsd %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vmulsd %xmm2, %xmm1, %xmm5
+; AVX1-NEXT: vaddsd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmulsd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmulsd %xmm2, %xmm3, %xmm1
+; AVX1-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX1-NEXT: retq
+;
+; FMA-LABEL: complex_mul_f64:
+; FMA: # BB#0:
+; FMA-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; FMA-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; FMA-NEXT: vmulsd %xmm2, %xmm1, %xmm4
+; FMA-NEXT: vfmadd231sd %xmm0, %xmm3, %xmm4
+; FMA-NEXT: vmulsd %xmm2, %xmm3, %xmm2
+; FMA-NEXT: vfmsub231sd %xmm0, %xmm1, %xmm2
+; FMA-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm4[0]
+; FMA-NEXT: retq
+ %3 = extractelement <2 x double> %0, i32 0
+ %4 = extractelement <2 x double> %0, i32 1
+ %5 = extractelement <2 x double> %1, i32 0
+ %6 = extractelement <2 x double> %1, i32 1
+ %7 = fmul fast double %6, %3
+ %8 = fmul fast double %5, %4
+ %9 = fadd fast double %7, %8
+ %10 = fmul fast double %5, %3
+ %11 = fmul fast double %6, %4
+ %12 = fsub fast double %10, %11
+ %13 = insertelement <2 x double> undef, double %12, i32 0
+ %14 = insertelement <2 x double> %13, double %9, i32 1
+ ret <2 x double> %14
+}
+
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/X86/compress_expand.ll b/test/CodeGen/X86/compress_expand.ll
index c1a3a1b92bbc..e09fcf2a336e 100644
--- a/test/CodeGen/X86/compress_expand.ll
+++ b/test/CodeGen/X86/compress_expand.ll
@@ -8,23 +8,37 @@ target triple = "x86_64-unknown-linux-gnu"
define <16 x float> @test1(float* %base) {
-; ALL-LABEL: test1:
-; ALL: # BB#0:
-; ALL-NEXT: movw $-2049, %ax # imm = 0xF7FF
-; ALL-NEXT: kmovw %eax, %k1
-; ALL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
-; ALL-NEXT: retq
+; SKX-LABEL: test1:
+; SKX: # BB#0:
+; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; SKX-NEXT: retq
+;
+; KNL-LABEL: test1:
+; KNL: # BB#0:
+; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
+; KNL-NEXT: retq
%res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
ret <16 x float>%res
}
define <16 x float> @test2(float* %base, <16 x float> %src0) {
-; ALL-LABEL: test2:
-; ALL: # BB#0:
-; ALL-NEXT: movw $30719, %ax # imm = 0x77FF
-; ALL-NEXT: kmovw %eax, %k1
-; ALL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; ALL-NEXT: retq
+; SKX-LABEL: test2:
+; SKX: # BB#0:
+; SKX-NEXT: movw $30719, %ax # imm = 0x77FF
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1}
+; SKX-NEXT: retq
+;
+; KNL-LABEL: test2:
+; KNL: # BB#0:
+; KNL-NEXT: movw $30719, %ax # imm = 0x77FF
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
+; KNL-NEXT: retq
%res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x float> %src0)
ret <16 x float>%res
}
@@ -52,7 +66,7 @@ define <4 x float> @test4(float* %base, <4 x float> %src0) {
; SKX-LABEL: test4:
; SKX: # BB#0:
; SKX-NEXT: movb $7, %al
-; SKX-NEXT: kmovb %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
@@ -72,7 +86,7 @@ define <2 x i64> @test5(i64* %base, <2 x i64> %src0) {
; SKX-LABEL: test5:
; SKX: # BB#0:
; SKX-NEXT: movb $2, %al
-; SKX-NEXT: kmovb %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
@@ -94,12 +108,20 @@ declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>
declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
define void @test6(float* %base, <16 x float> %V) {
-; ALL-LABEL: test6:
-; ALL: # BB#0:
-; ALL-NEXT: movw $-2049, %ax # imm = 0xF7FF
-; ALL-NEXT: kmovw %eax, %k1
-; ALL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
-; ALL-NEXT: retq
+; SKX-LABEL: test6:
+; SKX: # BB#0:
+; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+;
+; KNL-LABEL: test6:
+; KNL: # BB#0:
+; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
+; KNL-NEXT: retq
call void @llvm.masked.compressstore.v16f32(<16 x float> %V, float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>)
ret void
}
@@ -110,6 +132,7 @@ define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vcompressps %ymm0, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: test7:
@@ -132,6 +155,7 @@ define void @test8(double* %base, <8 x double> %V, <8 x i1> %mask) {
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: test8:
@@ -151,6 +175,7 @@ define void @test9(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: test9:
@@ -170,6 +195,7 @@ define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vpcompressq %ymm0, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: test10:
@@ -200,8 +226,7 @@ define void @test11(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
; KNL: # BB#0:
; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
-; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
-; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; KNL-NEXT: vpsraq $63, %zmm1, %zmm1
; KNL-NEXT: vpxord %zmm2, %zmm2, %zmm2
; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm2, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
@@ -341,16 +366,28 @@ define <16 x double> @test16(double* %base, <16 x double> %src0, <16 x i32> %tri
}
define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
-; ALL-LABEL: test17:
-; ALL: # BB#0:
-; ALL-NEXT: vpxord %zmm4, %zmm4, %zmm4
-; ALL-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
-; ALL-NEXT: vpcmpeqd %zmm4, %zmm2, %k2
-; ALL-NEXT: kmovw %k2, %eax
-; ALL-NEXT: popcntl %eax, %eax
-; ALL-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
-; ALL-NEXT: vcompressps %zmm0, (%rdi) {%k2}
-; ALL-NEXT: retq
+; SKX-LABEL: test17:
+; SKX: # BB#0:
+; SKX-NEXT: vpxord %zmm4, %zmm4, %zmm4
+; SKX-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
+; SKX-NEXT: vpcmpeqd %zmm4, %zmm2, %k2
+; SKX-NEXT: kmovw %k2, %eax
+; SKX-NEXT: popcntl %eax, %eax
+; SKX-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
+; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k2}
+; SKX-NEXT: vzeroupper
+; SKX-NEXT: retq
+;
+; KNL-LABEL: test17:
+; KNL: # BB#0:
+; KNL-NEXT: vpxord %zmm4, %zmm4, %zmm4
+; KNL-NEXT: vpcmpeqd %zmm4, %zmm3, %k1
+; KNL-NEXT: vpcmpeqd %zmm4, %zmm2, %k2
+; KNL-NEXT: kmovw %k2, %eax
+; KNL-NEXT: popcntl %eax, %eax
+; KNL-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
+; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k2}
+; KNL-NEXT: retq
%mask = icmp eq <32 x i32> %trigger, zeroinitializer
call void @llvm.masked.compressstore.v32f32(<32 x float> %V, float* %base, <32 x i1> %mask)
ret void
@@ -366,6 +403,7 @@ define void @test18(double* %base, <16 x double> %V, <16 x i1> %mask) {
; SKX-NEXT: popcntl %eax, %eax
; SKX-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: test18:
diff --git a/test/CodeGen/X86/conditional-indecrement.ll b/test/CodeGen/X86/conditional-indecrement.ll
index c3e71180bb18..f9e18f626972 100644
--- a/test/CodeGen/X86/conditional-indecrement.ll
+++ b/test/CodeGen/X86/conditional-indecrement.ll
@@ -1,89 +1,119 @@
-; RUN: llc -march=x86 < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
define i32 @test1(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: sbbl $-1, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%not.cmp = icmp ne i32 %a, 0
%inc = zext i1 %not.cmp to i32
%retval.0 = add i32 %inc, %b
ret i32 %retval.0
-; CHECK-LABEL: test1:
-; CHECK: cmpl $1
-; CHECK: sbbl $-1
-; CHECK: ret
+}
+
+define i32 @test1_commute(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test1_commute:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: sbbl $-1, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ne i32 %a, 0
+ %inc = zext i1 %cmp to i32
+ %ret = add i32 %b, %inc
+ ret i32 %ret
}
define i32 @test2(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: adcl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp eq i32 %a, 0
%inc = zext i1 %cmp to i32
%retval.0 = add i32 %inc, %b
ret i32 %retval.0
-; CHECK-LABEL: test2:
-; CHECK: cmpl $1
-; CHECK: adcl $0
-; CHECK: ret
}
define i32 @test3(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test3:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: adcl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp eq i32 %a, 0
%inc = zext i1 %cmp to i32
%retval.0 = add i32 %inc, %b
ret i32 %retval.0
-; CHECK-LABEL: test3:
-; CHECK: cmpl $1
-; CHECK: adcl $0
-; CHECK: ret
}
define i32 @test4(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: sbbl $-1, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%not.cmp = icmp ne i32 %a, 0
%inc = zext i1 %not.cmp to i32
%retval.0 = add i32 %inc, %b
ret i32 %retval.0
-; CHECK-LABEL: test4:
-; CHECK: cmpl $1
-; CHECK: sbbl $-1
-; CHECK: ret
}
define i32 @test5(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test5:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: adcl $-1, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%not.cmp = icmp ne i32 %a, 0
%inc = zext i1 %not.cmp to i32
%retval.0 = sub i32 %b, %inc
ret i32 %retval.0
-; CHECK-LABEL: test5:
-; CHECK: cmpl $1
-; CHECK: adcl $-1
-; CHECK: ret
}
define i32 @test6(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test6:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: sbbl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp eq i32 %a, 0
%inc = zext i1 %cmp to i32
%retval.0 = sub i32 %b, %inc
ret i32 %retval.0
-; CHECK-LABEL: test6:
-; CHECK: cmpl $1
-; CHECK: sbbl $0
-; CHECK: ret
}
define i32 @test7(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test7:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: sbbl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp eq i32 %a, 0
%inc = zext i1 %cmp to i32
%retval.0 = sub i32 %b, %inc
ret i32 %retval.0
-; CHECK-LABEL: test7:
-; CHECK: cmpl $1
-; CHECK: sbbl $0
-; CHECK: ret
}
define i32 @test8(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: test8:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: adcl $-1, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%not.cmp = icmp ne i32 %a, 0
%inc = zext i1 %not.cmp to i32
%retval.0 = sub i32 %b, %inc
ret i32 %retval.0
-; CHECK-LABEL: test8:
-; CHECK: cmpl $1
-; CHECK: adcl $-1
-; CHECK: ret
}
diff --git a/test/CodeGen/X86/conditional-tailcall.ll b/test/CodeGen/X86/conditional-tailcall.ll
new file mode 100644
index 000000000000..c00ce75b26de
--- /dev/null
+++ b/test/CodeGen/X86/conditional-tailcall.ll
@@ -0,0 +1,163 @@
+; RUN: llc < %s -mtriple=i686-linux -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32
+; RUN: llc < %s -mtriple=x86_64-linux -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64
+; RUN: llc < %s -mtriple=x86_64-win32 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=WIN64
+
+declare void @foo()
+declare void @bar()
+
+define void @f(i32 %x, i32 %y) optsize {
+entry:
+ %p = icmp eq i32 %x, %y
+ br i1 %p, label %bb1, label %bb2
+bb1:
+ tail call void @foo()
+ ret void
+bb2:
+ tail call void @bar()
+ ret void
+
+; CHECK-LABEL: f:
+; CHECK: cmp
+; CHECK: jne bar
+; Check that the asm doesn't just look good, but uses the correct encoding.
+; CHECK: encoding: [0x75,A]
+; CHECK: jmp foo
+}
+
+define void @f_non_leaf(i32 %x, i32 %y) optsize {
+entry:
+ ; Force %ebx to be spilled on the stack, turning this into
+ ; not a "leaf" function for Win64.
+ tail call void asm sideeffect "", "~{ebx}"()
+
+ %p = icmp eq i32 %x, %y
+ br i1 %p, label %bb1, label %bb2
+bb1:
+ tail call void @foo()
+ ret void
+bb2:
+ tail call void @bar()
+ ret void
+
+; CHECK-LABEL: f_non_leaf:
+; WIN64-NOT: je foo
+; WIN64-NOT: jne bar
+; WIN64: jne
+; WIN64: jmp foo
+; WIN64: jmp bar
+}
+
+declare x86_thiscallcc zeroext i1 @baz(i8*, i32)
+define x86_thiscallcc zeroext i1 @BlockPlacementTest(i8* %this, i32 %x) optsize {
+entry:
+ %and = and i32 %x, 42
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %land.end, label %land.rhs
+
+land.rhs:
+ %and6 = and i32 %x, 44
+ %tobool7 = icmp eq i32 %and6, 0
+ br i1 %tobool7, label %lor.rhs, label %land.end
+
+lor.rhs:
+ %call = tail call x86_thiscallcc zeroext i1 @baz(i8* %this, i32 %x) #2
+ br label %land.end
+
+land.end:
+ %0 = phi i1 [ false, %entry ], [ true, %land.rhs ], [ %call, %lor.rhs ]
+ ret i1 %0
+
+; Make sure machine block placement isn't confused by the conditional tail call,
+; but sees that it can fall through to the next block.
+; CHECK-LABEL: BlockPlacementTest
+; CHECK: je baz
+; CHECK-NOT: xor
+; CHECK: ret
+}
+
+
+
+%"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+declare zeroext i1 @_Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEES3_(i8*, i8*)
+
+define zeroext i1 @pr31257(%"class.std::basic_string"* nocapture readonly dereferenceable(8) %s) minsize {
+; CHECK-LABEL: pr31257
+entry:
+ %_M_p.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %s, i64 0, i32 0, i32 0
+ %0 = load i8*, i8** %_M_p.i.i, align 8
+ %arrayidx.i.i.i54 = getelementptr inbounds i8, i8* %0, i64 -24
+ %_M_length.i.i55 = bitcast i8* %arrayidx.i.i.i54 to i64*
+ %1 = load i64, i64* %_M_length.i.i55, align 8
+ %add.ptr.i56 = getelementptr inbounds i8, i8* %0, i64 %1
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %it.sroa.0.0 = phi i8* [ %0, %entry ], [ %incdec.ptr.i, %for.inc ]
+ %state.0 = phi i32 [ 0, %entry ], [ %state.1, %for.inc ]
+ %cmp.i = icmp eq i8* %it.sroa.0.0, %add.ptr.i56
+ br i1 %cmp.i, label %5, label %for.body
+
+for.body: ; preds = %for.cond
+ switch i32 %state.0, label %for.inc [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb14
+ i32 2, label %sw.bb22
+ ]
+
+sw.bb: ; preds = %for.body
+ %2 = load i8, i8* %it.sroa.0.0, align 1
+ switch i8 %2, label %if.else [
+ i8 43, label %for.inc
+ i8 45, label %for.inc
+ ]
+
+if.else: ; preds = %sw.bb
+ %conv9 = zext i8 %2 to i32
+ %isdigittmp45 = add nsw i32 %conv9, -48
+ %isdigit46 = icmp ult i32 %isdigittmp45, 10
+ br i1 %isdigit46, label %for.inc, label %cleanup.thread.loopexit
+
+sw.bb14: ; preds = %for.body
+ %3 = load i8, i8* %it.sroa.0.0, align 1
+ %conv16 = zext i8 %3 to i32
+ %isdigittmp43 = add nsw i32 %conv16, -48
+ %isdigit44 = icmp ult i32 %isdigittmp43, 10
+ br i1 %isdigit44, label %for.inc, label %cleanup.thread.loopexit
+
+sw.bb22: ; preds = %for.body
+ %4 = load i8, i8* %it.sroa.0.0, align 1
+ %conv24 = zext i8 %4 to i32
+ %isdigittmp = add nsw i32 %conv24, -48
+ %isdigit = icmp ult i32 %isdigittmp, 10
+ br i1 %isdigit, label %for.inc, label %if.else28
+
+; Make sure Machine Copy Propagation doesn't delete the mov to %ecx becaue it
+; thinks the conditional tail call clobbers it.
+; CHECK64-LABEL: .LBB3_11:
+; CHECK64: movzbl (%rdi), %ecx
+; CHECK64-NEXT: addl $-48, %ecx
+; CHECK64-NEXT: cmpl $10, %ecx
+; CHECK64-NEXT: movl %r9d, %ecx
+; CHECK64-NEXT: jae _Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEE
+
+if.else28: ; preds = %sw.bb22
+ %call34 = tail call zeroext i1 @_Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEES3_(i8* nonnull %it.sroa.0.0, i8* %add.ptr.i56)
+ br label %cleanup.thread
+
+for.inc: ; preds = %sw.bb, %sw.bb, %sw.bb22, %sw.bb14, %if.else, %for.body
+ %state.1 = phi i32 [ %state.0, %for.body ], [ 1, %sw.bb ], [ 2, %if.else ], [ 2, %sw.bb14 ], [ 2, %sw.bb22 ], [ 1, %sw.bb ]
+ %incdec.ptr.i = getelementptr inbounds i8, i8* %it.sroa.0.0, i64 1
+ br label %for.cond
+
+; <label>:5: ; preds = %for.cond
+ %cmp37 = icmp eq i32 %state.0, 2
+ br label %cleanup.thread
+
+cleanup.thread.loopexit: ; preds = %if.else, %sw.bb14
+ br label %cleanup.thread
+
+cleanup.thread: ; preds = %cleanup.thread.loopexit, %if.else28, %5
+ %6 = phi i1 [ %cmp37, %5 ], [ %call34, %if.else28 ], [ false, %cleanup.thread.loopexit ]
+ ret i1 %6
+}
diff --git a/test/CodeGen/X86/copy-eflags.ll b/test/CodeGen/X86/copy-eflags.ll
index 796c1ecd8c71..d98d8a7839b1 100644
--- a/test/CodeGen/X86/copy-eflags.ll
+++ b/test/CodeGen/X86/copy-eflags.ll
@@ -9,19 +9,22 @@ target triple = "i686-unknown-linux-gnu"
@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
; CHECK-LABEL: func:
-; This tests whether eax is properly saved/restored around the lahf/sahf
-; instruction sequences.
+; This tests whether eax is properly saved/restored around the
+; lahf/sahf instruction sequences. We make mem op volatile to prevent
+; their reordering to avoid spills.
+
+
define i32 @func() {
entry:
%bval = load i8, i8* @b
%inc = add i8 %bval, 1
- store i8 %inc, i8* @b
- %cval = load i32, i32* @c
+ store volatile i8 %inc, i8* @b
+ %cval = load volatile i32, i32* @c
%inc1 = add nsw i32 %cval, 1
- store i32 %inc1, i32* @c
- %aval = load i8, i8* @a
+ store volatile i32 %inc1, i32* @c
+ %aval = load volatile i8, i8* @a
%inc2 = add i8 %aval, 1
- store i8 %inc2, i8* @a
+ store volatile i8 %inc2, i8* @a
; Copy flags produced by the incb of %inc1 to a register, need to save+restore
; eax around it. The flags will be reused by %tobool.
; CHECK: pushl %eax
diff --git a/test/CodeGen/X86/copy-propagation.ll b/test/CodeGen/X86/copy-propagation.ll
index dac46c173825..4d8b8462b5fa 100644
--- a/test/CodeGen/X86/copy-propagation.ll
+++ b/test/CodeGen/X86/copy-propagation.ll
@@ -1,38 +1,25 @@
; RUN: llc %s -mattr=+avx -o - | FileCheck %s
-; PR21743.
+; Originally from http://llvm.org/PR21743.
target triple = "x86_64-pc-win32-elf"
-; Check that copy propagation conservatively assumes that undef register
-; can be rewritten by the backend to break false dependencies for the
-; hardware.
-; In this function we are in this situation:
-; reg1 = copy reg2
-; = inst reg2<undef>
-; reg2 = copy reg1
-; Copy propagation used to remove the last copy.
-; This is incorrect because the undef flag on reg2 in inst, allows next
-; passes to put whatever trashed value in reg2 that may help.
-; In practice we end up with this code:
-; reg1 = copy reg2
-; reg2 = 0
-; = inst reg2<undef>
-; reg2 = copy reg1
-; Therefore, removing the last copy is wrong.
+; Copy propagation may remove COPYs if the result is only used by undef
+; operands.
;
; CHECK-LABEL: foo:
; CHECK: movl $339752784, %e[[INDIRECT_CALL1:[a-z]+]]
; CHECK: callq *%r[[INDIRECT_CALL1]]
; Copy the result in a temporary.
-; Note: Technically the regalloc could have been smarter and this move not required,
-; which would have hidden the bug.
+; Note: Technically the regalloc could have been smarter and this move not
+; required, which would have hidden the bug.
; CHECK: vmovapd %xmm0, [[TMP:%xmm[0-9]+]]
-; Crush xmm0.
-; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NOT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vcvtsi2sdq %rsi, %xmm0, %xmm6
; CHECK: movl $339772768, %e[[INDIRECT_CALL2:[a-z]+]]
+; CHECK-NOT: vmovapd %xmm7, %xmm0
+; CHECK-NEXT: vmovapd %xmm6, %xmm1
; Set TMP in the first argument of the second call.
-; CHECK-NEXT: vmovapd [[TMP]], %xmm0
-; CHECK: callq *%r[[INDIRECT_CALL2]]
+; CHECK_NEXT: callq *%r[[INDIRECT_CALL2]]
; CHECK: retq
define double @foo(i64 %arg) {
top:
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
index a95b84d4c3b0..4bdb2ddfab62 100644
--- a/test/CodeGen/X86/crash.ll
+++ b/test/CodeGen/X86/crash.ll
@@ -314,9 +314,9 @@ declare %t14* @_ZN4llvm9MCContext16CreateTempSymbolEv(%t2*)
declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10*, %t21* byval align 4, %t13*)
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
; PR10463
; Spilling a virtual register with <undef> uses.
diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll
index 435401639f05..b7031a817e82 100644
--- a/test/CodeGen/X86/ctpop-combine.ll
+++ b/test/CodeGen/X86/ctpop-combine.ll
@@ -36,11 +36,11 @@ define i32 @test2(i64 %x) nounwind readnone {
define i32 @test3(i64 %x) nounwind readnone {
; CHECK-LABEL: test3:
; CHECK: # BB#0:
-; CHECK-NEXT: popcntq %rdi, %rax
-; CHECK-NEXT: andb $63, %al
-; CHECK-NEXT: cmpb $2, %al
-; CHECK-NEXT: sbbl %eax, %eax
-; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: popcntq %rdi, %rcx
+; CHECK-NEXT: andb $63, %cl
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpb $2, %cl
+; CHECK-NEXT: setb %al
; CHECK-NEXT: retq
%count = tail call i64 @llvm.ctpop.i64(i64 %x)
%cast = trunc i64 %count to i6 ; Too small for 0-64
diff --git a/test/CodeGen/X86/dag-fmf-cse.ll b/test/CodeGen/X86/dag-fmf-cse.ll
index ac8c5000aba4..c12c49d0f40b 100644
--- a/test/CodeGen/X86/dag-fmf-cse.ll
+++ b/test/CodeGen/X86/dag-fmf-cse.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma -enable-unsafe-fp-math -enable-fmf-dag=1 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma -enable-unsafe-fp-math | FileCheck %s
; If fast-math-flags are propagated correctly, the mul1 expression
; should be recognized as a factor in the last fsub, so we should
diff --git a/test/CodeGen/X86/dag-merge-fast-accesses.ll b/test/CodeGen/X86/dag-merge-fast-accesses.ll
index 867881d83d3f..e5dfccb278ce 100644
--- a/test/CodeGen/X86/dag-merge-fast-accesses.ll
+++ b/test/CodeGen/X86/dag-merge-fast-accesses.ll
@@ -51,19 +51,11 @@ define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
}
-;; TODO: FAST *should* be:
-;; movups (%rdi), %xmm0
-;; movups %xmm0, 40(%rdi)
-;; ..but is not currently. See the UseAA FIXME in DAGCombiner.cpp
-;; visitSTORE.
-
define void @merge_vec_load_and_stores(i64 *%ptr) {
; FAST-LABEL: merge_vec_load_and_stores:
; FAST: # BB#0:
-; FAST-NEXT: movq (%rdi), %rax
-; FAST-NEXT: movq 8(%rdi), %rcx
-; FAST-NEXT: movq %rax, 40(%rdi)
-; FAST-NEXT: movq %rcx, 48(%rdi)
+; FAST-NEXT: movups (%rdi), %xmm0
+; FAST-NEXT: movups %xmm0, 40(%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_vec_load_and_stores:
diff --git a/test/CodeGen/X86/dagcombine-and-setcc.ll b/test/CodeGen/X86/dagcombine-and-setcc.ll
index 57adc8bc5daa..f7302aee65cf 100644
--- a/test/CodeGen/X86/dagcombine-and-setcc.ll
+++ b/test/CodeGen/X86/dagcombine-and-setcc.ll
@@ -12,10 +12,11 @@ declare i32 @printf(i8* nocapture readonly, ...)
;CHECK: cmpl
-;CHECK: setg
+;CHECK: setl
;CHECK: cmpl
-;CHECK: setg
-;CHECK: andb
+;CHECK: setl
+;CHECK: orb
+;CHECK: je
@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
; Function Attrs: optsize ssp uwtable
diff --git a/test/CodeGen/X86/dagcombine-cse.ll b/test/CodeGen/X86/dagcombine-cse.ll
index bff0e64910bf..a283bcc6d460 100644
--- a/test/CodeGen/X86/dagcombine-cse.ll
+++ b/test/CodeGen/X86/dagcombine-cse.ll
@@ -1,7 +1,40 @@
-; REQUIRES: asserts
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i386-apple-darwin -stats 2>&1 | grep asm-printer | grep 13
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=X64
define i32 @t(i8* %ref_frame_ptr, i32 %ref_frame_stride, i32 %idxX, i32 %idxY) nounwind {
+; X32-LABEL: t:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: imull {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzwl 4(%eax,%ecx), %edx
+; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: movd %edx, %xmm1
+; X32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
+; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: t:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: ## kill: %EDX<def> %EDX<kill> %RDX<def>
+; X64-NEXT: ## kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: imull %ecx, %esi
+; X64-NEXT: leal (%rsi,%rdx), %eax
+; X64-NEXT: cltq
+; X64-NEXT: leal 4(%rsi,%rdx), %ecx
+; X64-NEXT: movslq %ecx, %rcx
+; X64-NEXT: movzwl (%rdi,%rcx), %ecx
+; X64-NEXT: shlq $32, %rcx
+; X64-NEXT: movl (%rdi,%rax), %eax
+; X64-NEXT: orq %rcx, %rax
+; X64-NEXT: movd %rax, %xmm0
+; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
+; X64-NEXT: movd %xmm0, %eax
+; X64-NEXT: retq
entry:
%tmp7 = mul i32 %idxY, %ref_frame_stride ; <i32> [#uses=2]
%tmp9 = add i32 %tmp7, %idxX ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
index 7968777b0d88..7a19dd2a98d1 100644
--- a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
+++ b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -65,13 +65,13 @@ if.then: ; preds = %entry
if.end: ; preds = %entry, %if.then
%0 = getelementptr inbounds %struct.AAA3, %struct.AAA3* %var1, i64 0, i32 0, i64 0, !dbg !56
- call void @llvm.lifetime.start(i64 4, i8* %0) #4, !dbg !56
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #4, !dbg !56
tail call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !32, metadata !57), !dbg !58
tail call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !36, metadata !46), !dbg !59
tail call void @llvm.dbg.value(metadata i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0), i64 0, metadata !38, metadata !46), !dbg !62
call void @_Z3fooPcjPKc(i8* %0, i32 4, i8* nonnull getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0)), !dbg !63
%1 = getelementptr inbounds %struct.AAA3, %struct.AAA3* %var2, i64 0, i32 0, i64 0, !dbg !65
- call void @llvm.lifetime.start(i64 4, i8* %1) #4, !dbg !65
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %1) #4, !dbg !65
call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !33, metadata !57), !dbg !66
call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !36, metadata !46), !dbg !67
call void @llvm.dbg.value(metadata i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0), i64 0, metadata !38, metadata !46), !dbg !69
@@ -96,18 +96,18 @@ if.end3: ; preds = %if.else, %if.then2
call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !41, metadata !46), !dbg !82
call void @llvm.dbg.value(metadata i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0), i64 0, metadata !42, metadata !46), !dbg !84
call void @_Z3fooPcjPKc(i8* %0, i32 4, i8* nonnull getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0)), !dbg !85
- call void @llvm.lifetime.end(i64 4, i8* %1) #4, !dbg !86
- call void @llvm.lifetime.end(i64 4, i8* %0) #4, !dbg !87
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %1) #4, !dbg !86
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #4, !dbg !87
ret void, !dbg !86
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare i8* @_Z5i2stri(i32) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
declare void @_Z3fooPcjPKc(i8*, i32, i8*) #2
diff --git a/test/CodeGen/X86/div-rem-simplify.ll b/test/CodeGen/X86/div-rem-simplify.ll
new file mode 100644
index 000000000000..04cf439dc155
--- /dev/null
+++ b/test/CodeGen/X86/div-rem-simplify.ll
@@ -0,0 +1,187 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+; Div/rem by zero is undef.
+
+define i32 @srem0(i32 %x) {
+; CHECK-LABEL: srem0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %rem = srem i32 %x, 0
+ ret i32 %rem
+}
+
+define i32 @urem0(i32 %x) {
+; CHECK-LABEL: urem0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %rem = urem i32 %x, 0
+ ret i32 %rem
+}
+
+define i32 @sdiv0(i32 %x) {
+; CHECK-LABEL: sdiv0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %div = sdiv i32 %x, 0
+ ret i32 %div
+}
+
+define i32 @udiv0(i32 %x) {
+; CHECK-LABEL: udiv0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %div = udiv i32 %x, 0
+ ret i32 %div
+}
+
+; Div/rem by zero vectors is undef.
+
+define <4 x i32> @srem_vec0(<4 x i32> %x) {
+; CHECK-LABEL: srem_vec0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %rem = srem <4 x i32> %x, zeroinitializer
+ ret <4 x i32> %rem
+}
+
+define <4 x i32> @urem_vec0(<4 x i32> %x) {
+; CHECK-LABEL: urem_vec0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %rem = urem <4 x i32> %x, zeroinitializer
+ ret <4 x i32> %rem
+}
+
+define <4 x i32> @sdiv_vec0(<4 x i32> %x) {
+; CHECK-LABEL: sdiv_vec0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %div = sdiv <4 x i32> %x, zeroinitializer
+ ret <4 x i32> %div
+}
+
+define <4 x i32> @udiv_vec0(<4 x i32> %x) {
+; CHECK-LABEL: udiv_vec0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %div = udiv <4 x i32> %x, zeroinitializer
+ ret <4 x i32> %div
+}
+
+; Make sure we handle undef before we try to fold constants from the select with the 0.
+; These used to assert because we can't fold div/rem-by-0 into APInt.
+
+define i32 @sel_urem0(i1 %cond) {
+; CHECK-LABEL: sel_urem0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 23, i32 234
+ %rem = urem i32 %sel, 0
+ ret i32 %rem
+}
+
+define i32 @sel_srem0(i1 %cond) {
+; CHECK-LABEL: sel_srem0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 23, i32 234
+ %rem = srem i32 %sel, 0
+ ret i32 %rem
+}
+
+define i32 @sel_udiv0(i1 %cond) {
+; CHECK-LABEL: sel_udiv0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 23, i32 234
+ %div = udiv i32 %sel, 0
+ ret i32 %div
+}
+
+define i32 @sel_sdiv0(i1 %cond) {
+; CHECK-LABEL: sel_sdiv0:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 23, i32 234
+ %div = sdiv i32 %sel, 0
+ ret i32 %div
+}
+
+; Make sure we handle undef before we try to fold constants from the select with the vector 0.
+; These used to assert because we can't fold div/rem-by-0 into APInt.
+
+define <4 x i32> @sel_urem0_vec(i1 %cond) {
+; CHECK-LABEL: sel_urem0_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
+ %rem = urem <4 x i32> %sel, zeroinitializer
+ ret <4 x i32> %rem
+}
+
+define <4 x i32> @sel_srem0_vec(i1 %cond) {
+; CHECK-LABEL: sel_srem0_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
+ %rem = srem <4 x i32> %sel, zeroinitializer
+ ret <4 x i32> %rem
+}
+
+define <4 x i32> @sel_udiv0_vec(i1 %cond) {
+; CHECK-LABEL: sel_udiv0_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
+ %div = udiv <4 x i32> %sel, zeroinitializer
+ ret <4 x i32> %div
+}
+
+define <4 x i32> @sel_sdiv0_vec(i1 %cond) {
+; CHECK-LABEL: sel_sdiv0_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14>
+ %div = sdiv <4 x i32> %sel, zeroinitializer
+ ret <4 x i32> %div
+}
+
+; If any element of a constant divisor vector is zero, the whole op is undef.
+
+define <4 x i32> @sdiv0elt_vec(<4 x i32> %x) {
+; CHECK-LABEL: sdiv0elt_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
+ %some_ones = or <4 x i32> %zero, <i32 0, i32 -1, i32 0, i32 3>
+ %div = sdiv <4 x i32> <i32 -11, i32 -12, i32 -13, i32 -14>, %some_ones
+ ret <4 x i32> %div
+}
+
+define <4 x i32> @udiv0elt_vec(<4 x i32> %x) {
+; CHECK-LABEL: udiv0elt_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %div = udiv <4 x i32> <i32 11, i32 12, i32 13, i32 14>, <i32 0, i32 3, i32 4, i32 0>
+ ret <4 x i32> %div
+}
+
+define <4 x i32> @urem0elt_vec(<4 x i32> %x) {
+; CHECK-LABEL: urem0elt_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
+ %some_ones = or <4 x i32> %zero, <i32 0, i32 0, i32 0, i32 3>
+ %rem = urem <4 x i32> <i32 11, i32 12, i32 13, i32 14>, %some_ones
+ ret <4 x i32> %rem
+}
+
+define <4 x i32> @srem0elt_vec(<4 x i32> %x) {
+; CHECK-LABEL: srem0elt_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
+ %rem = srem <4 x i32> <i32 -11, i32 -12, i32 -13, i32 -14>, <i32 -3, i32 -3, i32 0, i32 2>
+ ret <4 x i32> %rem
+}
+
diff --git a/test/CodeGen/X86/divrem8_ext.ll b/test/CodeGen/X86/divrem8_ext.ll
index fc516001aa59..7521156a370e 100644
--- a/test/CodeGen/X86/divrem8_ext.ll
+++ b/test/CodeGen/X86/divrem8_ext.ll
@@ -206,8 +206,7 @@ define i64 @pr25754(i8 %a, i8 %c) {
; X32-NEXT: movzbl %ah, %ecx # NOREX
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: sbbl %edx, %edx
-; X32-NEXT: andl $1, %edx
+; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
;
; X64-LABEL: pr25754:
diff --git a/test/CodeGen/X86/dont-trunc-store-double-to-float.ll b/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
index 8a334d21631a..05245d0d9e1e 100644
--- a/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
+++ b/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=x86 < %s | FileCheck %s
; CHECK-LABEL: @bar
-; CHECK: movl $1074339512,
-; CHECK: movl $1374389535,
-; CHECK: movl $1078523331,
+; CHECK-DAG: movl $1074339512,
+; CHECK-DAG: movl $1374389535,
+; CHECK-DAG: movl $1078523331,
define void @bar() unnamed_addr {
entry-block:
%a = alloca double
diff --git a/test/CodeGen/X86/dropped_constructor.ll b/test/CodeGen/X86/dropped_constructor.ll
new file mode 100644
index 000000000000..3478422a34fd
--- /dev/null
+++ b/test/CodeGen/X86/dropped_constructor.ll
@@ -0,0 +1,19 @@
+; Test to ensure that a global value that was dropped to a declaration
+; (e.g. ThinLTO will drop non-prevailing weak to declarations) does not
+; provoke creation of a comdat when it had an initializer.
+; RUN: llc -mtriple x86_64-unknown-linux-gnu < %s | FileCheck %s
+; CHECK-NOT: comdat
+
+; ModuleID = 'dropped_constructor.o'
+source_filename = "dropped_constructor.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@fv = external global i8, align 8
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @__cxx_global_var_init.33, i8* @fv }]
+
+; Function Attrs: norecurse nounwind
+define internal void @__cxx_global_var_init.33() section ".text.startup" {
+ store i8 1, i8* @fv, align 8
+ ret void
+}
diff --git a/test/CodeGen/X86/dwarf-headers.ll b/test/CodeGen/X86/dwarf-headers.ll
new file mode 100644
index 000000000000..612807dd8123
--- /dev/null
+++ b/test/CodeGen/X86/dwarf-headers.ll
@@ -0,0 +1,109 @@
+; RUN: llc -split-dwarf=Disable -dwarf-version=4 -generate-type-units \
+; RUN: -filetype=obj -O0 -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump - | FileCheck %s --check-prefix=SINGLE-4
+
+; RUN: llc -split-dwarf=Enable -dwarf-version=4 -generate-type-units \
+; RUN: -filetype=obj -O0 -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump - | FileCheck %s --check-prefix=SPLIT-4
+
+; RUN: llc -split-dwarf=Disable -dwarf-version=5 -generate-type-units \
+; RUN: -filetype=obj -O0 -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump - | FileCheck %s --check-prefix=SINGLE-5
+
+; RUN: llc -split-dwarf=Enable -dwarf-version=5 -generate-type-units \
+; RUN: -filetype=obj -O0 -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump - | FileCheck %s --check-prefix=SPLIT-5
+
+; Looking for DWARF headers to be generated correctly.
+; There are 7 variants: v4 CU, v4 TU, v5 (normal/skeleton/split) CU,
+; v5 (normal/split) TU. The v5 CU variants and TU variants differ
+; only in the type-unit code.
+; (v2 thru v4 CUs are all the same, and TUs were invented in v4,
+; so we don't bother checking older versions.)
+
+; Test case built from:
+;struct S {
+; int s1;
+;};
+;
+;S s;
+
+; Verify the v4 non-split headers.
+; Note that we check the exact offset of the DIEs because that tells us
+; the length of the header.
+;
+; SINGLE-4: .debug_info contents:
+; SINGLE-4: 0x00000000: Compile Unit: {{.*}} version = 0x0004 abbr_offset
+; SINGLE-4: 0x0000000b: DW_TAG_compile_unit
+;
+; SINGLE-4: .debug_types contents:
+; SINGLE-4: 0x00000000: Type Unit: {{.*}} version = 0x0004 abbr_offset
+; SINGLE-4: 0x00000017: DW_TAG_type_unit
+
+; Verify the v4 split headers.
+;
+; SPLIT-4: .debug_info contents:
+; SPLIT-4: 0x00000000: Compile Unit: {{.*}} version = 0x0004 abbr_offset
+; SPLIT-4: 0x0000000b: DW_TAG_compile_unit
+;
+; SPLIT-4: .debug_info.dwo contents:
+; SPLIT-4: 0x00000000: Compile Unit: {{.*}} version = 0x0004 abbr_offset
+; SPLIT-4: 0x0000000b: DW_TAG_compile_unit
+;
+; SPLIT-4: .debug_types.dwo contents:
+; SPLIT-4: 0x00000000: Type Unit: {{.*}} version = 0x0004 abbr_offset
+; SPLIT-4: 0x00000017: DW_TAG_type_unit
+
+; Verify the v5 non-split headers.
+;
+; SINGLE-5: .debug_info contents:
+; SINGLE-5: 0x00000000: Compile Unit: {{.*}} version = 0x0005 unit_type = DW_UT_compile abbr_offset
+; SINGLE-5: 0x0000000c: DW_TAG_compile_unit
+;
+; FIXME: V5 wants type units in .debug_info not .debug_types.
+; SINGLE-5: .debug_types contents:
+; SINGLE-5: 0x00000000: Type Unit: {{.*}} version = 0x0005 unit_type = DW_UT_type abbr_offset
+; SINGLE-5: 0x00000018: DW_TAG_type_unit
+
+; Verify the v5 split headers.
+;
+; SPLIT-5: .debug_info contents:
+; SPLIT-5: 0x00000000: Compile Unit: {{.*}} version = 0x0005 unit_type = DW_UT_skeleton abbr_offset
+; SPLIT-5: 0x0000000c: DW_TAG_compile_unit
+;
+; SPLIT-5: .debug_info.dwo contents:
+; SPLIT-5: 0x00000000: Compile Unit: {{.*}} version = 0x0005 unit_type = DW_UT_split_compile abbr_offset
+; SPLIT-5: 0x0000000c: DW_TAG_compile_unit
+;
+; FIXME: V5 wants type units in .debug_info.dwo not .debug_types.dwo.
+; SPLIT-5: .debug_types.dwo contents:
+; SPLIT-5: 0x00000000: Type Unit: {{.*}} version = 0x0005 unit_type = DW_UT_split_type abbr_offset
+; SPLIT-5: 0x00000018: DW_TAG_type_unit
+
+
+; ModuleID = 't.cpp'
+source_filename = "t.cpp"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.S = type { i32 }
+
+@s = global %struct.S zeroinitializer, align 4, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "s", scope: !2, file: !3, line: 5, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 5.0.0 (trunk 295942)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "t.cpp", directory: "/home/probinson/projects/scratch")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "S", file: !3, line: 1, size: 32, elements: !7, identifier: "_ZTS1S")
+!7 = !{!8}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "s1", scope: !6, file: !3, line: 2, baseType: !9, size: 32)
+!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{!"clang version 5.0.0 (trunk 295942)"}
diff --git a/test/CodeGen/X86/dynamic-alloca-lifetime.ll b/test/CodeGen/X86/dynamic-alloca-lifetime.ll
index 034b074ef9bd..996eec05163d 100644
--- a/test/CodeGen/X86/dynamic-alloca-lifetime.ll
+++ b/test/CodeGen/X86/dynamic-alloca-lifetime.ll
@@ -10,10 +10,10 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
target triple = "i386-apple-macosx10.10.0"
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
; Function Attrs: ssp
define void @foo(i1 %cond1, i1 %cond2) #1 {
@@ -30,11 +30,11 @@ end1:
if.else130: ; preds = %bb1
%tmp = getelementptr inbounds [8192 x i8], [8192 x i8]* %bitmapBuffer, i32 0, i32 0
- call void @llvm.lifetime.start(i64 8192, i8* %tmp) #0
- call void @llvm.lifetime.end(i64 8192, i8* %tmp) #0
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %tmp) #0
+ call void @llvm.lifetime.end.p0i8(i64 8192, i8* %tmp) #0
%tmp25 = getelementptr inbounds [8192 x i8], [8192 x i8]* %bitmapBuffer229, i32 0, i32 0
- call void @llvm.lifetime.start(i64 8192, i8* %tmp25) #0
- call void @llvm.lifetime.end(i64 8192, i8* %tmp25) #0
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %tmp25) #0
+ call void @llvm.lifetime.end.p0i8(i64 8192, i8* %tmp25) #0
br label %end1
}
diff --git a/test/CodeGen/X86/elf-associated.ll b/test/CodeGen/X86/elf-associated.ll
new file mode 100644
index 000000000000..361cf66cce72
--- /dev/null
+++ b/test/CodeGen/X86/elf-associated.ll
@@ -0,0 +1,39 @@
+; RUN: llc -data-sections=1 -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+; RUN: llc -data-sections=0 -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+@a = global i32 1
+@b = global i32 2, !associated !0
+!0 = !{i32* @a}
+; CHECK-DAG: .section .data.b,"awo",@progbits,a
+
+; Loop is OK. Also, normally -data-sections=0 would place @c and @d in the same section. !associated prevents that.
+@c = global i32 2, !associated !2
+@d = global i32 2, !associated !1
+!1 = !{i32* @c}
+!2 = !{i32* @d}
+; CHECK-DAG: .section .data.c,"awo",@progbits,d
+; CHECK-DAG: .section .data.d,"awo",@progbits,c
+
+; BSS is OK.
+@e = global i32 0
+@f = global i32 0, !associated !3
+@g = global i32 1, !associated !3
+!3 = !{i32* @e}
+; CHECK-DAG: .section .bss.f,"awo",@nobits,e
+; CHECK-DAG: .section .data.g,"awo",@progbits,e
+
+; Explicit sections.
+@h = global i32 1, section "aaa"
+@i = global i32 1, section "bbb", !associated !4
+@j = global i32 1, section "bbb", !associated !4
+@k = global i32 1, !associated !4
+!4 = !{i32* @h}
+; CHECK-DAG: .section aaa,"aw",@progbits
+; CHECK-DAG: .section bbb,"awo",@progbits,h,unique,1
+; CHECK-DAG: .section bbb,"awo",@progbits,h,unique,2
+; CHECK-DAG: .section .data.k,"awo",@progbits,h
+
+; Non-GlobalObject metadata.
+@l = global i32 1, section "ccc", !associated !5
+!5 = !{i32* null}
+; CHECK-DAG: .section ccc,"aw",@progbits
diff --git a/test/CodeGen/X86/evex-to-vex-compress.mir b/test/CodeGen/X86/evex-to-vex-compress.mir
index 043f3a38aa70..2295ddb5b2b9 100755
--- a/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -119,6 +119,14 @@ body: |
%ymm0 = VPANDQZ256rm %ymm0, %rip, 1, _, %rax, _
; CHECK: %ymm0 = VPANDYrr %ymm0, %ymm1
%ymm0 = VPANDQZ256rr %ymm0, %ymm1
+ ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, _, %rax, _
+ %ymm0 = VPANDNDZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1
+ %ymm0 = VPANDNDZ256rr %ymm0, %ymm1
+ ; CHECK: %ymm0 = VPANDNYrm %ymm0, %rip, 1, _, %rax, _
+ %ymm0 = VPANDNQZ256rm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPANDNYrr %ymm0, %ymm1
+ %ymm0 = VPANDNQZ256rr %ymm0, %ymm1
; CHECK: %ymm0 = VPAVGBYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VPAVGBZ256rm %ymm0, %rip, 1, _, %rax, _
; CHECK: %ymm0 = VPAVGBYrr %ymm0, %ymm1
@@ -347,13 +355,13 @@ body: |
%ymm0 = VMAXCPSZ256rm %ymm0, %rip, 1, _, %rax, _
; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1
%ymm0 = VMAXCPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXPDYrm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMAXCPDYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VMAXPDZ256rm %ymm0, %rip, 1, _, %rax, _
- ; CHECK: %ymm0 = VMAXPDYrr %ymm0, %ymm1
+ ; CHECK: %ymm0 = VMAXCPDYrr %ymm0, %ymm1
%ymm0 = VMAXPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMAXPSYrm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMAXCPSYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VMAXPSZ256rm %ymm0, %rip, 1, _, %rax, _
- ; CHECK: %ymm0 = VMAXPSYrr %ymm0, %ymm1
+ ; CHECK: %ymm0 = VMAXCPSYrr %ymm0, %ymm1
%ymm0 = VMAXPSZ256rr %ymm0, %ymm1
; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VMINCPDZ256rm %ymm0, %rip, 1, _, %rax, _
@@ -363,13 +371,13 @@ body: |
%ymm0 = VMINCPSZ256rm %ymm0, %rip, 1, _, %rax, _
; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1
%ymm0 = VMINCPSZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINPDYrm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMINCPDYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VMINPDZ256rm %ymm0, %rip, 1, _, %rax, _
- ; CHECK: %ymm0 = VMINPDYrr %ymm0, %ymm1
+ ; CHECK: %ymm0 = VMINCPDYrr %ymm0, %ymm1
%ymm0 = VMINPDZ256rr %ymm0, %ymm1
- ; CHECK: %ymm0 = VMINPSYrm %ymm0, %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VMINCPSYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VMINPSZ256rm %ymm0, %rip, 1, _, %rax, _
- ; CHECK: %ymm0 = VMINPSYrr %ymm0, %ymm1
+ ; CHECK: %ymm0 = VMINCPSYrr %ymm0, %ymm1
%ymm0 = VMINPSZ256rr %ymm0, %ymm1
; CHECK: %ymm0 = VXORPDYrm %ymm0, %rip, 1, _, %rax, _
%ymm0 = VXORPDZ256rm %ymm0, %rip, 1, _, %rax, _
@@ -687,18 +695,20 @@ body: |
%ymm0 = VPMOVZXWQZ256rm %rip, 1, _, %rax, _
; CHECK: %ymm0 = VPMOVZXWQYrr %xmm0
%ymm0 = VPMOVZXWQZ256rr %xmm0
+ ; CHECK: %ymm0 = VBROADCASTF128 %rip, 1, _, %rax, _
+ %ymm0 = VBROADCASTF32X4Z256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, _, %rax, _
+ %ymm0 = VBROADCASTF32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
+ %ymm0 = VBROADCASTF32X2Z256r %xmm0
; CHECK: %ymm0 = VBROADCASTSDYrm %rip, 1, _, %rax, _
%ymm0 = VBROADCASTSDZ256m %rip, 1, _, %rax, _
; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
%ymm0 = VBROADCASTSDZ256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTSDYrr %xmm0
- %ymm0 = VBROADCASTSDZ256r_s %xmm0
; CHECK: %ymm0 = VBROADCASTSSYrm %rip, 1, _, %rax, _
%ymm0 = VBROADCASTSSZ256m %rip, 1, _, %rax, _
; CHECK: %ymm0 = VBROADCASTSSYrr %xmm0
%ymm0 = VBROADCASTSSZ256r %xmm0
- ; CHECK: %ymm0 = VBROADCASTSSYrr %xmm0
- %ymm0 = VBROADCASTSSZ256r_s %xmm0
; CHECK: %ymm0 = VPBROADCASTBYrm %rip, 1, _, %rax, _
%ymm0 = VPBROADCASTBZ256m %rip, 1, _, %rax, _
; CHECK: %ymm0 = VPBROADCASTBYrr %xmm0
@@ -711,6 +721,12 @@ body: |
%ymm0 = VPBROADCASTWZ256m %rip, 1, _, %rax, _
; CHECK: %ymm0 = VPBROADCASTWYrr %xmm0
%ymm0 = VPBROADCASTWZ256r %xmm0
+ ; CHECK: %ymm0 = VBROADCASTI128 %rip, 1, _, %rax, _
+ %ymm0 = VBROADCASTI32X4Z256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, _, %rax, _
+ %ymm0 = VBROADCASTI32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0
+ %ymm0 = VBROADCASTI32X2Z256r %xmm0
; CHECK: %ymm0 = VPBROADCASTQYrm %rip, 1, _, %rax, _
%ymm0 = VPBROADCASTQZ256m %rip, 1, _, %rax, _
; CHECK: %ymm0 = VPBROADCASTQYrr %xmm0
@@ -1043,13 +1059,13 @@ body: |
%xmm0 = VMAXCPSZ128rm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1
%xmm0 = VMAXCPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXPDrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCPDrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMAXPDZ128rm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMAXPDrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMAXCPDrr %xmm0, %xmm1
%xmm0 = VMAXPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXPSrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCPSrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMAXPSZ128rm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMAXPSrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMAXCPSrr %xmm0, %xmm1
%xmm0 = VMAXPSZ128rr %xmm0, %xmm1
; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINCPDZ128rm %xmm0, %rip, 1, _, %rax, _
@@ -1059,13 +1075,13 @@ body: |
%xmm0 = VMINCPSZ128rm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1
%xmm0 = VMINCPSZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINPDrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCPDrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINPDZ128rm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMINPDrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMINCPDrr %xmm0, %xmm1
%xmm0 = VMINPDZ128rr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINPSrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCPSrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINPSZ128rm %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMINPSrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMINCPSrr %xmm0, %xmm1
%xmm0 = VMINPSZ128rr %xmm0, %xmm1
; CHECK: %xmm0 = VMULPDrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMULPDZ128rm %xmm0, %rip, 1, _, %rax, _
@@ -1123,6 +1139,14 @@ body: |
%xmm0 = VPANDQZ128rm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VPANDrr %xmm0, %xmm1
%xmm0 = VPANDQZ128rr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, _, %rax, _
+ %xmm0 = VPANDNDZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1
+ %xmm0 = VPANDNDZ128rr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VPANDNrm %xmm0, %rip, 1, _, %rax, _
+ %xmm0 = VPANDNQZ128rm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VPANDNrr %xmm0, %xmm1
+ %xmm0 = VPANDNQZ128rr %xmm0, %xmm1
; CHECK: %xmm0 = VPAVGBrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VPAVGBZ128rm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VPAVGBrr %xmm0, %xmm1
@@ -1695,8 +1719,6 @@ body: |
%xmm0 = VBROADCASTSSZ128m %rip, _, _, _, _
; CHECK: %xmm0 = VBROADCASTSSrr %xmm0
%xmm0 = VBROADCASTSSZ128r %xmm0
- ; CHECK: %xmm0 = VBROADCASTSSrr %xmm0
- %xmm0 = VBROADCASTSSZ128r_s %xmm0
; CHECK: %xmm0 = VPBROADCASTBrm %rip, _, _, _, _
%xmm0 = VPBROADCASTBZ128m %rip, _, _, _, _
; CHECK: %xmm0 = VPBROADCASTBrr %xmm0
@@ -1713,6 +1735,10 @@ body: |
%xmm0 = VPBROADCASTWZ128m %rip, _, _, _, _
; CHECK: %xmm0 = VPBROADCASTWrr %xmm0
%xmm0 = VPBROADCASTWZ128r %xmm0
+ ; CHECK: %xmm0 = VPBROADCASTQrm %rip, _, _, _, _
+ %xmm0 = VBROADCASTI32X2Z128m %rip, _, _, _, _
+ ; CHECK: %xmm0 = VPBROADCASTQrr %xmm0
+ %xmm0 = VBROADCASTI32X2Z128r %xmm0
; CHECK: %xmm0 = VCVTPS2PHrr %xmm0, 2
%xmm0 = VCVTPS2PHZ128rr %xmm0, 2
; CHECK: VCVTPS2PHmr %rdi, %xmm0, 1, _, 0, _, _
@@ -1784,19 +1810,19 @@ body: |
%xmm0 = VMAXCSSZrm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1
%xmm0 = VMAXCSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXSDrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCSDrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMAXSDZrm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMAXSDrm_Int %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMAXSDZrm_Int %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMAXSDrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMAXCSDrr %xmm0, %xmm1
%xmm0 = VMAXSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMAXSDrr_Int %xmm0, %xmm1
%xmm0 = VMAXSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMAXSSrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMAXCSSrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMAXSSZrm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMAXSSrm_Int %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMAXSSZrm_Int %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMAXSSrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMAXCSSrr %xmm0, %xmm1
%xmm0 = VMAXSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMAXSSrr_Int %xmm0, %xmm1
%xmm0 = VMAXSSZrr_Int %xmm0, %xmm1
@@ -1808,19 +1834,19 @@ body: |
%xmm0 = VMINCSSZrm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1
%xmm0 = VMINCSSZrr %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINSDrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCSDrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINSDZrm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMINSDrm_Int %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINSDZrm_Int %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMINSDrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMINCSDrr %xmm0, %xmm1
%xmm0 = VMINSDZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMINSDrr_Int %xmm0, %xmm1
%xmm0 = VMINSDZrr_Int %xmm0, %xmm1
- ; CHECK: %xmm0 = VMINSSrm %xmm0, %rip, 1, _, %rax, _
+ ; CHECK: %xmm0 = VMINCSSrm %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINSSZrm %xmm0, %rip, 1, _, %rax, _
; CHECK: %xmm0 = VMINSSrm_Int %xmm0, %rip, 1, _, %rax, _
%xmm0 = VMINSSZrm_Int %xmm0, %rip, 1, _, %rax, _
- ; CHECK: %xmm0 = VMINSSrr %xmm0, %xmm1
+ ; CHECK: %xmm0 = VMINCSSrr %xmm0, %xmm1
%xmm0 = VMINSSZrr %xmm0, %xmm1
; CHECK: %xmm0 = VMINSSrr_Int %xmm0, %xmm1
%xmm0 = VMINSSZrr_Int %xmm0, %xmm1
@@ -2064,6 +2090,8 @@ body: |
VPEXTRWZmr %rdi, 1, _, 0, _, %xmm0, 3
; CHECK: %eax = VPEXTRWri %xmm0, 1
%eax = VPEXTRWZrr %xmm0, 1
+ ; CHECK: %eax = VPEXTRWrr_REV %xmm0, 1
+ %eax = VPEXTRWZrr_REV %xmm0, 1
; CHECK: %xmm0 = VPINSRBrm %xmm0, %rsi, 1, _, 0, _, 3
%xmm0 = VPINSRBZrm %xmm0, %rsi, 1, _, 0, _, 3
; CHECK: %xmm0 = VPINSRBrr %xmm0, %edi, 5
@@ -2096,18 +2124,18 @@ body: |
%xmm0 = VSQRTSSZr %xmm0, _
; CHECK: %xmm0 = VSQRTSSr_Int %xmm0, _
%xmm0 = VSQRTSSZr_Int %xmm0, _
- ; CHECK: %rdi = VCVTSD2SI64rm %rdi, %xmm0, 1, _, 0
- %rdi = VCVTSD2SI64Zrm %rdi, %xmm0, 1, _, 0
; CHECK: %rdi = VCVTSD2SI64rr %xmm0
%rdi = VCVTSD2SI64Zrr %xmm0
- ; CHECK: %edi = VCVTSD2SIrm %rdi, %xmm0, 1, _, 0
- %edi = VCVTSD2SIZrm %rdi, %xmm0, 1, _, 0
; CHECK: %edi = VCVTSD2SIrr %xmm0
%edi = VCVTSD2SIZrr %xmm0
; CHECK: %xmm0 = VCVTSD2SSrm %xmm0, %rdi, 1, _, 0, _
%xmm0 = VCVTSD2SSZrm %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = Int_VCVTSD2SSrm %xmm0, %rdi, 1, _, 0, _
+ %xmm0 = VCVTSD2SSZrm_Int %xmm0, %rdi, 1, _, 0, _
; CHECK: %xmm0 = VCVTSD2SSrr %xmm0, _
%xmm0 = VCVTSD2SSZrr %xmm0, _
+ ; CHECK: %xmm0 = Int_VCVTSD2SSrr %xmm0, _
+ %xmm0 = VCVTSD2SSZrr_Int %xmm0, _
; CHECK: %xmm0 = VCVTSI2SDrm %xmm0, %rdi, 1, _, 0, _
%xmm0 = VCVTSI2SDZrm %xmm0, %rdi, 1, _, 0, _
; CHECK: %xmm0 = Int_VCVTSI2SDrm %xmm0, %rdi, 1, _, 0, _
@@ -2124,10 +2152,30 @@ body: |
%xmm0 = VCVTSI2SSZrr %xmm0, _
; CHECK: %xmm0 = Int_VCVTSI2SSrr %xmm0, _
%xmm0 = VCVTSI2SSZrr_Int %xmm0, _
+ ; CHECK: %xmm0 = VCVTSI2SD64rm %xmm0, %rdi, 1, _, 0, _
+ %xmm0 = VCVTSI642SDZrm %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = Int_VCVTSI2SD64rm %xmm0, %rdi, 1, _, 0, _
+ %xmm0 = VCVTSI642SDZrm_Int %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VCVTSI2SD64rr %xmm0, _
+ %xmm0 = VCVTSI642SDZrr %xmm0, _
+ ; CHECK: %xmm0 = Int_VCVTSI2SD64rr %xmm0, _
+ %xmm0 = VCVTSI642SDZrr_Int %xmm0, _
+ ; CHECK: %xmm0 = VCVTSI2SS64rm %xmm0, %rdi, 1, _, 0, _
+ %xmm0 = VCVTSI642SSZrm %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = Int_VCVTSI2SS64rm %xmm0, %rdi, 1, _, 0, _
+ %xmm0 = VCVTSI642SSZrm_Int %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = VCVTSI2SS64rr %xmm0, _
+ %xmm0 = VCVTSI642SSZrr %xmm0, _
+ ; CHECK: %xmm0 = Int_VCVTSI2SS64rr %xmm0, _
+ %xmm0 = VCVTSI642SSZrr_Int %xmm0, _
; CHECK: %xmm0 = VCVTSS2SDrm %xmm0, %rdi, 1, _, 0, _
%xmm0 = VCVTSS2SDZrm %xmm0, %rdi, 1, _, 0, _
+ ; CHECK: %xmm0 = Int_VCVTSS2SDrm %xmm0, %rdi, 1, _, 0, _
+ %xmm0 = VCVTSS2SDZrm_Int %xmm0, %rdi, 1, _, 0, _
; CHECK: %xmm0 = VCVTSS2SDrr %xmm0, _
%xmm0 = VCVTSS2SDZrr %xmm0, _
+ ; CHECK: %xmm0 = Int_VCVTSS2SDrr %xmm0, _
+ %xmm0 = VCVTSS2SDZrr_Int %xmm0, _
; CHECK: %rdi = VCVTSS2SI64rm %rdi, %xmm0, 1, _, 0
%rdi = VCVTSS2SI64Zrm %rdi, %xmm0, 1, _, 0
; CHECK: %rdi = VCVTSS2SI64rr %xmm0
@@ -2180,6 +2228,12 @@ body: |
%xmm0 = VMOVSDZrm %rip, _, _, _, _
; CHECK: %xmm0 = VMOVSDrr %xmm0, _
%xmm0 = VMOVSDZrr %xmm0, _
+ ; CHECK: %xmm0 = VMOVSDrr_REV %xmm0, _
+ %xmm0 = VMOVSDZrr_REV %xmm0, _
+ ; CHECK: %rax = VMOVSDto64rr %xmm0
+ %rax = VMOVSDto64Zrr %xmm0
+ ; CHECK: VMOVSDto64mr %rdi, %xmm0, _, _, _, _
+ VMOVSDto64Zmr %rdi, %xmm0, _, _, _, _
; CHECK: VMOVSSmr %rdi, %xmm0, _, _, _, _
VMOVSSZmr %rdi, %xmm0, _, _, _, _
; CHECK: %xmm0 = VMOVSSrm %rip, _, _, _, _
@@ -2188,8 +2242,14 @@ body: |
%xmm0 = VMOVSSZrr %xmm0, _
; CHECK: %xmm0 = VMOVSSrr_REV %xmm0, _
%xmm0 = VMOVSSZrr_REV %xmm0, _
+ ; CHECK: VMOVSS2DImr %rdi, %xmm0, _, _, _, _
+ VMOVSS2DIZmr %rdi, %xmm0, _, _, _, _
+ ; CHECK: %eax = VMOVSS2DIrr %xmm0
+ %eax = VMOVSS2DIZrr %xmm0
; CHECK: %xmm0 = VMOV64toPQIrr %rdi
%xmm0 = VMOV64toPQIZrr %rdi
+ ; CHECK: %xmm0 = VMOV64toPQIrm %rdi, _, _, _, _
+ %xmm0 = VMOV64toPQIZrm %rdi, _, _, _, _
; CHECK: %xmm0 = VMOV64toSDrr %rdi
%xmm0 = VMOV64toSDZrr %rdi
; CHECK: %xmm0 = VMOVDI2PDIrm %rip, _, _, _, _
@@ -2203,11 +2263,15 @@ body: |
; CHECK: VMOVPDI2DImr %rdi, %xmm0, _, _, _, _
VMOVPDI2DIZmr %rdi, %xmm0, _, _, _, _
; CHECK: %edi = VMOVPDI2DIrr %xmm0
- %edi = VMOVPDI2DIZrr %xmm0
+ %edi = VMOVPDI2DIZrr %xmm0
+ ; CHECK: %xmm0 = VMOVPQI2QIrr %xmm0
+ %xmm0 = VMOVPQI2QIZrr %xmm0
; CHECK: VMOVPQI2QImr %rdi, %xmm0, _, _, _, _
VMOVPQI2QIZmr %rdi, %xmm0, _, _, _, _
; CHECK: %rdi = VMOVPQIto64rr %xmm0
%rdi = VMOVPQIto64Zrr %xmm0
+ ; CHECK: VMOVPQIto64mr %rdi, %xmm0, _, _, _, _
+ VMOVPQIto64Zmr %rdi, %xmm0, _, _, _, _
; CHECK: %xmm0 = VMOVQI2PQIrm %rip, _, _, _, _
%xmm0 = VMOVQI2PQIZrm %rip, _, _, _, _
; CHECK: %xmm0 = VMOVZPQILo2PQIrr %xmm0
@@ -2244,6 +2308,14 @@ body: |
VUCOMISSZrm %xmm0, %rdi, _, _, _, _, implicit-def %eflags
; CHECK: VUCOMISSrr %xmm0, %xmm1, implicit-def %eflags
VUCOMISSZrr %xmm0, %xmm1, implicit-def %eflags
+ ; CHECK: VEXTRACTPSmr %rdi, 1, _, 0, _, %xmm0, _
+ VEXTRACTPSZmr %rdi, 1, _, 0, _, %xmm0, _
+ ; CHECK: %eax = VEXTRACTPSrr %xmm0, _
+ %eax = VEXTRACTPSZrr %xmm0, _
+ ; CHECK: %xmm0 = VINSERTPSrm %xmm0, %rdi, _, _, _, _, _
+ %xmm0 = VINSERTPSZrm %xmm0, %rdi, _, _, _, _, _
+ ; CHECK: %xmm0 = VINSERTPSrr %xmm0, %xmm0, _
+ %xmm0 = VINSERTPSZrr %xmm0, %xmm0, _
RET 0, %zmm0, %zmm1
...
@@ -2356,6 +2428,14 @@ body: |
%ymm16 = VPANDQZ256rm %ymm16, %rip, 1, _, %rax, _
; CHECK: %ymm16 = VPANDQZ256rr %ymm16, %ymm1
%ymm16 = VPANDQZ256rr %ymm16, %ymm1
+ ; CHECK: %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, _, %rax, _
+ %ymm16 = VPANDNDZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPANDNDZ256rr %ymm16, %ymm1
+ %ymm16 = VPANDNDZ256rr %ymm16, %ymm1
+ ; CHECK: %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, _, %rax, _
+ %ymm16 = VPANDNQZ256rm %ymm16, %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VPANDNQZ256rr %ymm16, %ymm1
+ %ymm16 = VPANDNQZ256rr %ymm16, %ymm1
; CHECK: %ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, _, %rax, _
%ymm16 = VPAVGBZ256rm %ymm16, %rip, 1, _, %rax, _
; CHECK: %ymm16 = VPAVGBZ256rr %ymm16, %ymm1
@@ -2924,18 +3004,20 @@ body: |
%ymm16 = VPMOVZXWQZ256rm %rip, 1, _, %rax, _
; CHECK: %ymm16 = VPMOVZXWQZ256rr %xmm0
%ymm16 = VPMOVZXWQZ256rr %xmm0
+ ; CHECK: %ymm16 = VBROADCASTF32X2Z256m %rip, 1, _, %rax, _
+ %ymm16 = VBROADCASTF32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTF32X2Z256r %xmm16
+ %ymm16 = VBROADCASTF32X2Z256r %xmm16
+ ; CHECK: %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, _, %rax, _
+ %ymm16 = VBROADCASTF32X4Z256rm %rip, 1, _, %rax, _
; CHECK: %ymm16 = VBROADCASTSDZ256m %rip, 1, _, %rax, _
%ymm16 = VBROADCASTSDZ256m %rip, 1, _, %rax, _
; CHECK: %ymm16 = VBROADCASTSDZ256r %xmm0
%ymm16 = VBROADCASTSDZ256r %xmm0
- ; CHECK: %ymm16 = VBROADCASTSDZ256r_s %xmm0
- %ymm16 = VBROADCASTSDZ256r_s %xmm0
; CHECK: %ymm16 = VBROADCASTSSZ256m %rip, 1, _, %rax, _
%ymm16 = VBROADCASTSSZ256m %rip, 1, _, %rax, _
; CHECK: %ymm16 = VBROADCASTSSZ256r %xmm0
%ymm16 = VBROADCASTSSZ256r %xmm0
- ; CHECK: %ymm16 = VBROADCASTSSZ256r_s %xmm0
- %ymm16 = VBROADCASTSSZ256r_s %xmm0
; CHECK: %ymm16 = VPBROADCASTBZ256m %rip, 1, _, %rax, _
%ymm16 = VPBROADCASTBZ256m %rip, 1, _, %rax, _
; CHECK: %ymm16 = VPBROADCASTBZ256r %xmm0
@@ -2948,6 +3030,12 @@ body: |
%ymm16 = VPBROADCASTWZ256m %rip, 1, _, %rax, _
; CHECK: %ymm16 = VPBROADCASTWZ256r %xmm0
%ymm16 = VPBROADCASTWZ256r %xmm0
+ ; CHECK: %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, _, %rax, _
+ %ymm16 = VBROADCASTI32X4Z256rm %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTI32X2Z256m %rip, 1, _, %rax, _
+ %ymm16 = VBROADCASTI32X2Z256m %rip, 1, _, %rax, _
+ ; CHECK: %ymm16 = VBROADCASTI32X2Z256r %xmm16
+ %ymm16 = VBROADCASTI32X2Z256r %xmm16
; CHECK: %ymm16 = VPBROADCASTQZ256m %rip, 1, _, %rax, _
%ymm16 = VPBROADCASTQZ256m %rip, 1, _, %rax, _
; CHECK: %ymm16 = VPBROADCASTQZ256r %xmm0
@@ -3360,6 +3448,14 @@ body: |
%xmm16 = VPANDQZ128rm %xmm16, %rip, 1, _, %rax, _
; CHECK: %xmm16 = VPANDQZ128rr %xmm16, %xmm1
%xmm16 = VPANDQZ128rr %xmm16, %xmm1
+ ; CHECK: %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, _, %rax, _
+ %xmm16 = VPANDNDZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPANDNDZ128rr %xmm16, %xmm1
+ %xmm16 = VPANDNDZ128rr %xmm16, %xmm1
+ ; CHECK: %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, _, %rax, _
+ %xmm16 = VPANDNQZ128rm %xmm16, %rip, 1, _, %rax, _
+ ; CHECK: %xmm16 = VPANDNQZ128rr %xmm16, %xmm1
+ %xmm16 = VPANDNQZ128rr %xmm16, %xmm1
; CHECK: %xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, _, %rax, _
%xmm16 = VPAVGBZ128rm %xmm16, %rip, 1, _, %rax, _
; CHECK: %xmm16 = VPAVGBZ128rr %xmm16, %xmm1
@@ -3932,8 +4028,6 @@ body: |
%xmm16 = VBROADCASTSSZ128m %rip, _, _, _, _
; CHECK: %xmm16 = VBROADCASTSSZ128r %xmm16
%xmm16 = VBROADCASTSSZ128r %xmm16
- ; CHECK: %xmm16 = VBROADCASTSSZ128r_s %xmm16
- %xmm16 = VBROADCASTSSZ128r_s %xmm16
; CHECK: %xmm16 = VPBROADCASTBZ128m %rip, _, _, _, _
%xmm16 = VPBROADCASTBZ128m %rip, _, _, _, _
; CHECK: %xmm16 = VPBROADCASTBZ128r %xmm16
@@ -3950,6 +4044,10 @@ body: |
%xmm16 = VPBROADCASTWZ128m %rip, _, _, _, _
; CHECK: %xmm16 = VPBROADCASTWZ128r %xmm16
%xmm16 = VPBROADCASTWZ128r %xmm16
+ ; CHECK: %xmm16 = VBROADCASTI32X2Z128m %rip, _, _, _, _
+ %xmm16 = VBROADCASTI32X2Z128m %rip, _, _, _, _
+ ; CHECK: %xmm16 = VBROADCASTI32X2Z128r %xmm0
+ %xmm16 = VBROADCASTI32X2Z128r %xmm0
; CHECK: %xmm16 = VCVTPS2PHZ128rr %xmm16, 2
%xmm16 = VCVTPS2PHZ128rr %xmm16, 2
; CHECK: VCVTPS2PHZ128mr %rdi, %xmm16, 1, _, 0, _, _
@@ -3970,6 +4068,14 @@ body: |
%xmm16 = VPALIGNRZ128rmi %xmm16, _, _, _, _, _, _
; CHECK: %xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15
%xmm16 = VPALIGNRZ128rri %xmm16, %xmm1, 15
+ ; CHECK: VEXTRACTPSZmr %rdi, 1, _, 0, _, %xmm16, _
+ VEXTRACTPSZmr %rdi, 1, _, 0, _, %xmm16, _
+ ; CHECK: %eax = VEXTRACTPSZrr %xmm16, _
+ %eax = VEXTRACTPSZrr %xmm16, _
+ ; CHECK: %xmm16 = VINSERTPSZrm %xmm16, %rdi, _, _, _, _, _
+ %xmm16 = VINSERTPSZrm %xmm16, %rdi, _, _, _, _, _
+ ; CHECK: %xmm16 = VINSERTPSZrr %xmm16, %xmm16, _
+ %xmm16 = VINSERTPSZrr %xmm16, %xmm16, _
RET 0, %zmm0, %zmm1
...
@@ -4300,6 +4406,8 @@ body: |
VPEXTRWZmr %rdi, 1, _, 0, _, %xmm16, 3
; CHECK: %eax = VPEXTRWZrr %xmm16, 1
%eax = VPEXTRWZrr %xmm16, 1
+ ; CHECK: %eax = VPEXTRWZrr_REV %xmm16, 1
+ %eax = VPEXTRWZrr_REV %xmm16, 1
; CHECK: %xmm16 = VPINSRBZrm %xmm16, %rsi, 1, _, 0, _, 3
%xmm16 = VPINSRBZrm %xmm16, %rsi, 1, _, 0, _, 3
; CHECK: %xmm16 = VPINSRBZrr %xmm16, %edi, 5
@@ -4342,8 +4450,12 @@ body: |
%edi = VCVTSD2SIZrr %xmm16
; CHECK: %xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, _, 0, _
%xmm16 = VCVTSD2SSZrm %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, _, 0, _
+ %xmm16 = VCVTSD2SSZrm_Int %xmm16, %rdi, 1, _, 0, _
; CHECK: %xmm16 = VCVTSD2SSZrr %xmm16, _
%xmm16 = VCVTSD2SSZrr %xmm16, _
+ ; CHECK: %xmm16 = VCVTSD2SSZrr_Int %xmm16, _
+ %xmm16 = VCVTSD2SSZrr_Int %xmm16, _
; CHECK: %xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, _, 0, _
%xmm16 = VCVTSI2SDZrm %xmm16, %rdi, 1, _, 0, _
; CHECK: %xmm16 = VCVTSI2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
@@ -4360,10 +4472,30 @@ body: |
%xmm16 = VCVTSI2SSZrr %xmm16, _
; CHECK: %xmm16 = VCVTSI2SSZrr_Int %xmm16, _
%xmm16 = VCVTSI2SSZrr_Int %xmm16, _
+ ; CHECK: %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, _, 0, _
+ %xmm16 = VCVTSI642SDZrm %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, _, 0, _
+ %xmm16 = VCVTSI642SDZrm_Int %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VCVTSI642SDZrr %xmm16, _
+ %xmm16 = VCVTSI642SDZrr %xmm16, _
+ ; CHECK: %xmm16 = VCVTSI642SDZrr_Int %xmm16, _
+ %xmm16 = VCVTSI642SDZrr_Int %xmm16, _
+ ; CHECK: %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, _, 0, _
+ %xmm16 = VCVTSI642SSZrm %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, _, 0, _
+ %xmm16 = VCVTSI642SSZrm_Int %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VCVTSI642SSZrr %xmm16, _
+ %xmm16 = VCVTSI642SSZrr %xmm16, _
+ ; CHECK: %xmm16 = VCVTSI642SSZrr_Int %xmm16, _
+ %xmm16 = VCVTSI642SSZrr_Int %xmm16, _
; CHECK: %xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, _, 0, _
%xmm16 = VCVTSS2SDZrm %xmm16, %rdi, 1, _, 0, _
+ ; CHECK: %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
+ %xmm16 = VCVTSS2SDZrm_Int %xmm16, %rdi, 1, _, 0, _
; CHECK: %xmm16 = VCVTSS2SDZrr %xmm16, _
%xmm16 = VCVTSS2SDZrr %xmm16, _
+ ; CHECK: %xmm16 = VCVTSS2SDZrr_Int %xmm16, _
+ %xmm16 = VCVTSS2SDZrr_Int %xmm16, _
; CHECK: %rdi = VCVTSS2SI64Zrm %rdi, %xmm16, 1, _, 0
%rdi = VCVTSS2SI64Zrm %rdi, %xmm16, 1, _, 0
; CHECK: %rdi = VCVTSS2SI64Zrr %xmm16
@@ -4416,6 +4548,12 @@ body: |
%xmm16 = VMOVSDZrm %rip, _, _, _, _
; CHECK: %xmm16 = VMOVSDZrr %xmm16, _
%xmm16 = VMOVSDZrr %xmm16, _
+ ; CHECK: %xmm16 = VMOVSDZrr_REV %xmm16, _
+ %xmm16 = VMOVSDZrr_REV %xmm16, _
+ ; CHECK: %rax = VMOVSDto64Zrr %xmm16
+ %rax = VMOVSDto64Zrr %xmm16
+ ; CHECK: VMOVSDto64Zmr %rdi, %xmm16, _, _, _, _
+ VMOVSDto64Zmr %rdi, %xmm16, _, _, _, _
; CHECK: VMOVSSZmr %rdi, %xmm16, _, _, _, _
VMOVSSZmr %rdi, %xmm16, _, _, _, _
; CHECK: %xmm16 = VMOVSSZrm %rip, _, _, _, _
@@ -4424,8 +4562,14 @@ body: |
%xmm16 = VMOVSSZrr %xmm16, _
; CHECK: %xmm16 = VMOVSSZrr_REV %xmm16, _
%xmm16 = VMOVSSZrr_REV %xmm16, _
+ ; CHECK: VMOVSS2DIZmr %rdi, %xmm16, _, _, _, _
+ VMOVSS2DIZmr %rdi, %xmm16, _, _, _, _
+ ; CHECK: %eax = VMOVSS2DIZrr %xmm16
+ %eax = VMOVSS2DIZrr %xmm16
; CHECK: %xmm16 = VMOV64toPQIZrr %rdi
%xmm16 = VMOV64toPQIZrr %rdi
+ ; CHECK: %xmm16 = VMOV64toPQIZrm %rdi, _, _, _, _
+ %xmm16 = VMOV64toPQIZrm %rdi, _, _, _, _
; CHECK: %xmm16 = VMOV64toSDZrr %rdi
%xmm16 = VMOV64toSDZrr %rdi
; CHECK: %xmm16 = VMOVDI2PDIZrm %rip, _, _, _, _
@@ -4440,10 +4584,14 @@ body: |
VMOVPDI2DIZmr %rdi, %xmm16, _, _, _, _
; CHECK: %edi = VMOVPDI2DIZrr %xmm16
%edi = VMOVPDI2DIZrr %xmm16
+ ; CHECK: %xmm16 = VMOVPQI2QIZrr %xmm16
+ %xmm16 = VMOVPQI2QIZrr %xmm16
; CHECK: VMOVPQI2QIZmr %rdi, %xmm16, _, _, _, _
VMOVPQI2QIZmr %rdi, %xmm16, _, _, _, _
; CHECK: %rdi = VMOVPQIto64Zrr %xmm16
%rdi = VMOVPQIto64Zrr %xmm16
+ ; CHECK: VMOVPQIto64Zmr %rdi, %xmm16, _, _, _, _
+ VMOVPQIto64Zmr %rdi, %xmm16, _, _, _, _
; CHECK: %xmm16 = VMOVQI2PQIZrm %rip, _, _, _, _
%xmm16 = VMOVQI2PQIZrm %rip, _, _, _, _
; CHECK: %xmm16 = VMOVZPQILo2PQIZrr %xmm16
diff --git a/test/CodeGen/X86/extract-store.ll b/test/CodeGen/X86/extract-store.ll
index f0e4d1407728..1751f03731d3 100644
--- a/test/CodeGen/X86/extract-store.ll
+++ b/test/CodeGen/X86/extract-store.ll
@@ -1,116 +1,537 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
-
-define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) {
-; SSE2-LABEL: extract_i8_0:
-; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: extract_i8_0:
-; SSE41: # BB#0:
-; SSE41-NEXT: pextrb $0, %xmm0, (%rdi)
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: extract_i8_0:
-; AVX: # BB#0:
-; AVX-NEXT: vpextrb $0, %xmm0, (%rdi)
-; AVX-NEXT: retq
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32 --check-prefix=SSE-X32 --check-prefix=SSE2-X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE2-X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32 --check-prefix=SSE-X32 --check-prefix=SSE41-X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 --check-prefix=SSE-X64 --check-prefix=SSE41-X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32 --check-prefix=AVX-X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=AVX-X64
+
+define void @extract_i8_0(i8* nocapture %dst, <16 x i8> %foo) nounwind {
+; SSE2-X32-LABEL: extract_i8_0:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: movd %xmm0, %ecx
+; SSE2-X32-NEXT: movb %cl, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_i8_0:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: movd %xmm0, %eax
+; SSE2-X64-NEXT: movb %al, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_i8_0:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: pextrb $0, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_i8_0:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrb $0, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i8_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpextrb $0, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_i8_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrb $0, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 0
store i8 %vecext, i8* %dst, align 1
ret void
}
-define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) {
-; SSE2-LABEL: extract_i8_15:
-; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: retq
+define void @extract_i8_3(i8* nocapture %dst, <16 x i8> %foo) nounwind {
+; SSE2-X32-LABEL: extract_i8_3:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: movd %xmm0, %ecx
+; SSE2-X32-NEXT: shrl $24, %ecx
+; SSE2-X32-NEXT: movb %cl, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_i8_3:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: movd %xmm0, %eax
+; SSE2-X64-NEXT: shrl $24, %eax
+; SSE2-X64-NEXT: movb %al, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_i8_3:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: pextrb $3, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_i8_3:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrb $3, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i8_3:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpextrb $3, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_i8_3:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrb $3, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <16 x i8> %foo, i32 3
+ store i8 %vecext, i8* %dst, align 1
+ ret void
+}
+
+define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind {
+; SSE2-X32-LABEL: extract_i8_15:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: pextrw $7, %xmm0, %ecx
+; SSE2-X32-NEXT: movb %ch, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_i8_15:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-X64-NEXT: movb %ah, (%rdi) # NOREX
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_i8_15:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: pextrb $15, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_i8_15:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrb $15, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
;
-; SSE41-LABEL: extract_i8_15:
-; SSE41: # BB#0:
-; SSE41-NEXT: pextrb $15, %xmm0, (%rdi)
-; SSE41-NEXT: retq
+; AVX-X32-LABEL: extract_i8_15:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpextrb $15, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
;
-; AVX-LABEL: extract_i8_15:
-; AVX: # BB#0:
-; AVX-NEXT: vpextrb $15, %xmm0, (%rdi)
-; AVX-NEXT: retq
+; AVX-X64-LABEL: extract_i8_15:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrb $15, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 15
store i8 %vecext, i8* %dst, align 1
ret void
}
-define void @extract_i16_0(i16* nocapture %dst, <8 x i16> %foo) {
-; SSE2-LABEL: extract_i16_0:
-; SSE2: # BB#0:
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movw %ax, (%rdi)
-; SSE2-NEXT: retq
+define void @extract_i16_0(i16* nocapture %dst, <8 x i16> %foo) nounwind {
+; SSE2-X32-LABEL: extract_i16_0:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: movd %xmm0, %ecx
+; SSE2-X32-NEXT: movw %cx, (%eax)
+; SSE2-X32-NEXT: retl
;
-; SSE41-LABEL: extract_i16_0:
-; SSE41: # BB#0:
-; SSE41-NEXT: pextrw $0, %xmm0, (%rdi)
-; SSE41-NEXT: retq
+; SSE2-X64-LABEL: extract_i16_0:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: movd %xmm0, %eax
+; SSE2-X64-NEXT: movw %ax, (%rdi)
+; SSE2-X64-NEXT: retq
;
-; AVX-LABEL: extract_i16_0:
-; AVX: # BB#0:
-; AVX-NEXT: vpextrw $0, %xmm0, (%rdi)
-; AVX-NEXT: retq
+; SSE41-X32-LABEL: extract_i16_0:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: pextrw $0, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_i16_0:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrw $0, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i16_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpextrw $0, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_i16_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrw $0, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 0
store i16 %vecext, i16* %dst, align 1
ret void
}
-define void @extract_i16_7(i16* nocapture %dst, <8 x i16> %foo) {
-; SSE2-LABEL: extract_i16_7:
-; SSE2: # BB#0:
-; SSE2-NEXT: pextrw $7, %xmm0, %eax
-; SSE2-NEXT: movw %ax, (%rdi)
-; SSE2-NEXT: retq
+define void @extract_i16_7(i16* nocapture %dst, <8 x i16> %foo) nounwind {
+; SSE2-X32-LABEL: extract_i16_7:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: pextrw $7, %xmm0, %ecx
+; SSE2-X32-NEXT: movw %cx, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_i16_7:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-X64-NEXT: movw %ax, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_i16_7:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: pextrw $7, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_i16_7:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrw $7, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
;
-; SSE41-LABEL: extract_i16_7:
-; SSE41: # BB#0:
-; SSE41-NEXT: pextrw $7, %xmm0, (%rdi)
-; SSE41-NEXT: retq
+; AVX-X32-LABEL: extract_i16_7:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpextrw $7, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
;
-; AVX-LABEL: extract_i16_7:
-; AVX: # BB#0:
-; AVX-NEXT: vpextrw $7, %xmm0, (%rdi)
-; AVX-NEXT: retq
+; AVX-X64-LABEL: extract_i16_7:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrw $7, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 7
store i16 %vecext, i16* %dst, align 1
ret void
}
-define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) {
-; SSE-LABEL: extract_i8_undef:
-; SSE: # BB#0:
-; SSE-NEXT: retq
+define void @extract_i32_0(i32* nocapture %dst, <4 x i32> %foo) nounwind {
+; SSE-X32-LABEL: extract_i32_0:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movss %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_i32_0:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movss %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i32_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovss %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_i32_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <4 x i32> %foo, i32 0
+ store i32 %vecext, i32* %dst, align 1
+ ret void
+}
+
+define void @extract_i32_3(i32* nocapture %dst, <4 x i32> %foo) nounwind {
+; SSE2-X32-LABEL: extract_i32_3:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-X32-NEXT: movd %xmm0, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_i32_3:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-X64-NEXT: movd %xmm0, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_i32_3:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: pextrd $3, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_i32_3:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrd $3, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i32_3:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpextrd $3, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_i32_3:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrd $3, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <4 x i32> %foo, i32 3
+ store i32 %vecext, i32* %dst, align 1
+ ret void
+}
+
+define void @extract_i64_0(i64* nocapture %dst, <2 x i64> %foo) nounwind {
+; SSE-X32-LABEL: extract_i64_0:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movlps %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_i64_0:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movlps %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i64_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_i64_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <2 x i64> %foo, i32 0
+ store i64 %vecext, i64* %dst, align 1
+ ret void
+}
+
+define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind {
+; SSE-X32-LABEL: extract_i64_1:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-X32-NEXT: movq %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_i64_1:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-X64-NEXT: movq %xmm0, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X64-LABEL: extract_i64_1:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: pextrq $1, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_i64_1:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX-X32-NEXT: vmovq %xmm0, (%eax)
+; AVX-X32-NEXT: retl
;
-; AVX-LABEL: extract_i8_undef:
-; AVX: # BB#0:
-; AVX-NEXT: retq
+; AVX-X64-LABEL: extract_i64_1:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vpextrq $1, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <2 x i64> %foo, i32 1
+ store i64 %vecext, i64* %dst, align 1
+ ret void
+}
+
+define void @extract_f32_0(float* nocapture %dst, <4 x float> %foo) nounwind {
+; SSE-X32-LABEL: extract_f32_0:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movss %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_f32_0:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movss %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f32_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovss %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f32_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <4 x float> %foo, i32 0
+ store float %vecext, float* %dst, align 1
+ ret void
+}
+
+define void @extract_f32_3(float* nocapture %dst, <4 x float> %foo) nounwind {
+; SSE2-X32-LABEL: extract_f32_3:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-X32-NEXT: movss %xmm0, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_f32_3:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-X64-NEXT: movss %xmm0, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_f32_3:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: extractps $3, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_f32_3:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f32_3:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vextractps $3, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f32_3:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <4 x float> %foo, i32 3
+ store float %vecext, float* %dst, align 1
+ ret void
+}
+
+define void @extract_f64_0(double* nocapture %dst, <2 x double> %foo) nounwind {
+; SSE-X32-LABEL: extract_f64_0:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movlps %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_f64_0:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movlps %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f64_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f64_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <2 x double> %foo, i32 0
+ store double %vecext, double* %dst, align 1
+ ret void
+}
+
+define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind {
+; SSE-X32-LABEL: extract_f64_1:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movhpd %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_f64_1:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movhpd %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f64_1:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovhpd %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f64_1:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovhpd %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <2 x double> %foo, i32 1
+ store double %vecext, double* %dst, align 1
+ ret void
+}
+
+define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind {
+; X32-LABEL: extract_i8_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_i8_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
%vecext = extractelement <16 x i8> %foo, i32 16 ; undef
store i8 %vecext, i8* %dst, align 1
ret void
}
-define void @extract_i16_undef(i16* nocapture %dst, <8 x i16> %foo) {
-; SSE-LABEL: extract_i16_undef:
-; SSE: # BB#0:
-; SSE-NEXT: retq
+define void @extract_i16_undef(i16* nocapture %dst, <8 x i16> %foo) nounwind {
+; X32-LABEL: extract_i16_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
;
-; AVX-LABEL: extract_i16_undef:
-; AVX: # BB#0:
-; AVX-NEXT: retq
+; X64-LABEL: extract_i16_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
%vecext = extractelement <8 x i16> %foo, i32 9 ; undef
store i16 %vecext, i16* %dst, align 1
ret void
}
+
+define void @extract_i32_undef(i32* nocapture %dst, <4 x i32> %foo) nounwind {
+; X32-LABEL: extract_i32_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_i32_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %vecext = extractelement <4 x i32> %foo, i32 6 ; undef
+ store i32 %vecext, i32* %dst, align 1
+ ret void
+}
+
+define void @extract_i64_undef(i64* nocapture %dst, <2 x i64> %foo) nounwind {
+; X32-LABEL: extract_i64_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_i64_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %vecext = extractelement <2 x i64> %foo, i32 2 ; undef
+ store i64 %vecext, i64* %dst, align 1
+ ret void
+}
+
+define void @extract_f32_undef(float* nocapture %dst, <4 x float> %foo) nounwind {
+; X32-LABEL: extract_f32_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_f32_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %vecext = extractelement <4 x float> %foo, i32 6 ; undef
+ store float %vecext, float* %dst, align 1
+ ret void
+}
+
+define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwind {
+; X32-LABEL: extract_f64_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_f64_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %vecext = extractelement <2 x double> %foo, i32 2 ; undef
+ store double %vecext, double* %dst, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/extractelement-index.ll b/test/CodeGen/X86/extractelement-index.ll
index 8c12e7148aa7..e36e33ffe66b 100644
--- a/test/CodeGen/X86/extractelement-index.ll
+++ b/test/CodeGen/X86/extractelement-index.ll
@@ -11,8 +11,9 @@
define i8 @extractelement_v16i8_1(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_1:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_1:
@@ -33,8 +34,9 @@ define i8 @extractelement_v16i8_1(<16 x i8> %a) nounwind {
define i8 @extractelement_v16i8_11(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_11:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_11:
@@ -55,8 +57,8 @@ define i8 @extractelement_v16i8_11(<16 x i8> %a) nounwind {
define i8 @extractelement_v16i8_14(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_14:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_14:
@@ -77,8 +79,9 @@ define i8 @extractelement_v16i8_14(<16 x i8> %a) nounwind {
define i8 @extractelement_v32i8_1(<32 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v32i8_1:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_1:
@@ -100,8 +103,9 @@ define i8 @extractelement_v32i8_1(<32 x i8> %a) nounwind {
define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v32i8_17:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_17:
@@ -538,27 +542,19 @@ define i32 @extractelement_v8i32_var(<8 x i32> %a, i256 %i) nounwind {
; SSE-NEXT: popq %rbp
; SSE-NEXT: retq
;
-; AVX1-LABEL: extractelement_v8i32_var:
-; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: andl $7, %edi
-; AVX1-NEXT: vmovaps %ymm0, (%rsp)
-; AVX1-NEXT: movl (%rsp,%rdi,4), %eax
-; AVX1-NEXT: movq %rbp, %rsp
-; AVX1-NEXT: popq %rbp
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: extractelement_v8i32_var:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovd %edi, %xmm1
-; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; AVX-LABEL: extractelement_v8i32_var:
+; AVX: # BB#0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: andl $7, %edi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: movl (%rsp,%rdi,4), %eax
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%b = extractelement <8 x i32> %a, i256 %i
ret i32 %b
}
diff --git a/test/CodeGen/X86/extractelement-legalization-store-ordering.ll b/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
index c418e67ecb67..5d5cbc76f92e 100644
--- a/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
+++ b/test/CodeGen/X86/extractelement-legalization-store-ordering.ll
@@ -16,19 +16,20 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
; CHECK-NEXT: movl 20(%esp), %edx
; CHECK-NEXT: paddd (%edx), %xmm0
; CHECK-NEXT: movdqa %xmm0, (%edx)
-; CHECK-NEXT: movl (%edx), %esi
-; CHECK-NEXT: movl 12(%edx), %edi
-; CHECK-NEXT: movl 8(%edx), %ebx
-; CHECK-NEXT: movl 4(%edx), %edx
-; CHECK-NEXT: shll $4, %ecx
+; CHECK-NEXT: movl (%edx), %esi
+; CHECK-NEXT: movl 4(%edx), %edi
+; CHECK-NEXT: shll $4, %ecx
+; CHECK-NEXT: movl 8(%edx), %ebx
+; CHECK-NEXT: movl 12(%edx), %edx
; CHECK-NEXT: movl %esi, 12(%eax,%ecx)
-; CHECK-NEXT: movl %edx, (%eax,%ecx)
+; CHECK-NEXT: movl %edi, (%eax,%ecx)
; CHECK-NEXT: movl %ebx, 8(%eax,%ecx)
-; CHECK-NEXT: movl %edi, 4(%eax,%ecx)
+; CHECK-NEXT: movl %edx, 4(%eax,%ecx)
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebx
; CHECK-NEXT: retl
+
define void @test_extractelement_legalization_storereuse(<4 x i32> %a, i32* nocapture %x, i32* nocapture readonly %y, i32 %i) #0 {
entry:
%0 = bitcast i32* %y to <4 x i32>*
diff --git a/test/CodeGen/X86/fadd-combines.ll b/test/CodeGen/X86/fadd-combines.ll
index 2df0e06dc252..28f72f42d01d 100644
--- a/test/CodeGen/X86/fadd-combines.ll
+++ b/test/CodeGen/X86/fadd-combines.ll
@@ -221,4 +221,4 @@ define <4 x float> @fadd_fadd_x_x_fadd_x_x_4f32(<4 x float> %x) #0 {
ret <4 x float> %z
}
-attributes #0 = { "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" }
+attributes #0 = { "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" }
diff --git a/test/CodeGen/X86/fast-isel-abort-warm.ll b/test/CodeGen/X86/fast-isel-abort-warm.ll
new file mode 100644
index 000000000000..3caa91b11ec6
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-abort-warm.ll
@@ -0,0 +1,14 @@
+; RUN: llc -fast-isel -o - %s -fast-isel-report-on-fallback 2>&1 | FileCheck %s
+; Make sure FastISel report a warming when we asked it to do so.
+; Note: This test needs to use whatever is not supported by FastISel.
+; Thus, this test may fail because inline asm gets supported in FastISel.
+; To fix this, use something else that's not supported (e.g., weird types).
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+; CHECK: warning: Instruction selection used fallback path for foo
+define void @foo(){
+entry:
+ call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"()
+ ret void
+}
diff --git a/test/CodeGen/X86/fast-isel-cmp.ll b/test/CodeGen/X86/fast-isel-cmp.ll
index a4833a7d66d6..59c536369849 100644
--- a/test/CodeGen/X86/fast-isel-cmp.ll
+++ b/test/CodeGen/X86/fast-isel-cmp.ll
@@ -1,688 +1,1128 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=SDAG
-; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=FAST
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=ALL --check-prefix=SDAG
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=ALL --check-prefix=FAST
define zeroext i1 @fcmp_oeq(float %x, float %y) {
-; SDAG-LABEL: fcmp_oeq
-; SDAG: cmpeqss %xmm1, %xmm0
-; SDAG-NEXT: movd %xmm0, %eax
-; SDAG-NEXT: andl $1, %eax
-; FAST-LABEL: fcmp_oeq
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: sete %al
-; FAST-NEXT: setnp %cl
-; FAST-NEXT: andb %al, %cl
+; SDAG-LABEL: fcmp_oeq:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_oeq:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
%1 = fcmp oeq float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ogt(float %x, float %y) {
-; SDAG-LABEL: fcmp_ogt
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: seta %al
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: seta %al
+; SDAG-LABEL: fcmp_ogt:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ogt:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ogt float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_oge(float %x, float %y) {
-; SDAG-LABEL: fcmp_oge
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setae %al
-; FAST-LABEL: fcmp_oge
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setae %al
+; SDAG-LABEL: fcmp_oge:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_oge:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp oge float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_olt(float %x, float %y) {
-; SDAG-LABEL: fcmp_olt
-; SDAG: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: seta %al
-; FAST-LABEL: fcmp_olt
-; FAST: ucomiss %xmm0, %xmm1
-; FAST-NEXT: seta %al
+; SDAG-LABEL: fcmp_olt:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_olt:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp olt float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ole(float %x, float %y) {
-; SDAG-LABEL: fcmp_ole
-; SDAG: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: setae %al
-; FAST-LABEL: fcmp_ole
-; FAST: ucomiss %xmm0, %xmm1
-; FAST-NEXT: setae %al
+; SDAG-LABEL: fcmp_ole:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ole:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ole float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_one(float %x, float %y) {
-; SDAG-LABEL: fcmp_one
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setne %al
-; FAST-LABEL: fcmp_one
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setne %al
+; SDAG-LABEL: fcmp_one:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_one:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp one float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ord(float %x, float %y) {
-; SDAG-LABEL: fcmp_ord
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setnp %al
-; FAST-LABEL: fcmp_ord
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setnp %al
+; SDAG-LABEL: fcmp_ord:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setnp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ord:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setnp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ord float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_uno(float %x, float %y) {
-; SDAG-LABEL: fcmp_uno
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setp %al
-; FAST-LABEL: fcmp_uno
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setp %al
+; SDAG-LABEL: fcmp_uno:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_uno:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp uno float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ueq(float %x, float %y) {
-; SDAG-LABEL: fcmp_ueq
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: sete %al
-; FAST-LABEL: fcmp_ueq
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: sete %al
+; SDAG-LABEL: fcmp_ueq:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ueq:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ueq float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ugt(float %x, float %y) {
-; SDAG-LABEL: fcmp_ugt
-; SDAG: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: setb %al
-; FAST-LABEL: fcmp_ugt
-; FAST: ucomiss %xmm0, %xmm1
-; FAST-NEXT: setb %al
+; SDAG-LABEL: fcmp_ugt:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ugt:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ugt float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_uge(float %x, float %y) {
-; SDAG-LABEL: fcmp_uge
-; SDAG: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: setbe %al
-; FAST-LABEL: fcmp_uge
-; FAST: ucomiss %xmm0, %xmm1
-; FAST-NEXT: setbe %al
+; SDAG-LABEL: fcmp_uge:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_uge:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp uge float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ult(float %x, float %y) {
-; SDAG-LABEL: fcmp_ult
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setb %al
-; FAST-LABEL: fcmp_ult
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setb %al
+; SDAG-LABEL: fcmp_ult:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ult:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ult float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_ule(float %x, float %y) {
-; SDAG-LABEL: fcmp_ule
-; SDAG: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setbe %al
-; FAST-LABEL: fcmp_ule
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setbe %al
+; SDAG-LABEL: fcmp_ule:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ule:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ule float %x, %y
ret i1 %1
}
define zeroext i1 @fcmp_une(float %x, float %y) {
-; SDAG-LABEL: fcmp_une
-; SDAG: cmpneqss %xmm1, %xmm0
-; SDAG-NEXT: movd %xmm0, %eax
-; SDAG-NEXT: andl $1, %eax
-; FAST-LABEL: fcmp_une
-; FAST: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setne %al
-; FAST-NEXT: setp %cl
-; FAST-NEXT: orb %al, %cl
+; SDAG-LABEL: fcmp_une:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_une:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
%1 = fcmp une float %x, %y
ret i1 %1
}
define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_eq
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: sete %al
-; FAST-LABEL: icmp_eq
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: sete %al
+; SDAG-LABEL: icmp_eq:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: sete %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_eq:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: sete %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp eq i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_ne
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setne %al
-; FAST-LABEL: icmp_ne
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setne %al
+; SDAG-LABEL: icmp_ne:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setne %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ne:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setne %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ne i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_ugt
-; SDAG: cmpl %edi, %esi
-; SDAG-NEXT: setb %al
-; FAST-LABEL: icmp_ugt
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: seta %al
+; SDAG-LABEL: icmp_ugt:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: seta %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ugt:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: seta %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ugt i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_uge
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setae %al
-; FAST-LABEL: icmp_uge
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setae %al
+; SDAG-LABEL: icmp_uge:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setae %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_uge:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setae %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp uge i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_ult
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setb %al
-; FAST-LABEL: icmp_ult
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setb %al
+; SDAG-LABEL: icmp_ult:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ult:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setb %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ult i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_ule
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setbe %al
-; FAST-LABEL: icmp_ule
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setbe %al
+; SDAG-LABEL: icmp_ule:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setbe %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ule:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setbe %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ule i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_sgt
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setg %al
-; FAST-LABEL: icmp_sgt
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setg %al
+; SDAG-LABEL: icmp_sgt:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setg %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_sgt:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setg %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp sgt i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_sge
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setge %al
-; FAST-LABEL: icmp_sge
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setge %al
+; SDAG-LABEL: icmp_sge:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setge %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_sge:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setge %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp sge i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_slt
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setl %al
-; FAST-LABEL: icmp_slt
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setl %al
+; SDAG-LABEL: icmp_slt:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setl %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_slt:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setl %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp slt i32 %x, %y
ret i1 %1
}
define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
-; SDAG-LABEL: icmp_sle
-; SDAG: cmpl %esi, %edi
-; SDAG-NEXT: setle %al
-; FAST-LABEL: icmp_sle
-; FAST: cmpl %esi, %edi
-; FAST-NEXT: setle %al
+; SDAG-LABEL: icmp_sle:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: setle %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_sle:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: setle %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp sle i32 %x, %y
ret i1 %1
}
; Test cmp folding and condition optimization.
define zeroext i1 @fcmp_oeq2(float %x) {
-; SDAG-LABEL: fcmp_oeq2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setnp %al
-; FAST-LABEL: fcmp_oeq2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setnp %al
+; SDAG-LABEL: fcmp_oeq2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_oeq2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp oeq float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_oeq3(float %x) {
-; SDAG-LABEL: fcmp_oeq3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: cmpeqss %xmm0, %xmm1
-; SDAG-NEXT: movd %xmm1, %eax
-; SDAG-NEXT: andl $1, %eax
-; FAST-LABEL: fcmp_oeq3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: sete %al
-; FAST-NEXT: setnp %cl
-; FAST-NEXT: andb %al, %cl
+; SDAG-LABEL: fcmp_oeq3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpeqss %xmm0, %xmm1
+; SDAG-NEXT: movd %xmm1, %eax
+; SDAG-NEXT: andl $1, %eax
+; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_oeq3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
%1 = fcmp oeq float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ogt2(float %x) {
-; SDAG-LABEL: fcmp_ogt2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: fcmp_ogt2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: fcmp_ogt2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ogt2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ogt float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ogt3(float %x) {
-; SDAG-LABEL: fcmp_ogt3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: seta %al
-; FAST-LABEL: fcmp_ogt3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: seta %al
+; SDAG-LABEL: fcmp_ogt3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ogt3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ogt float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_oge2(float %x) {
-; SDAG-LABEL: fcmp_oge2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setnp %al
-; FAST-LABEL: fcmp_oge2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setnp %al
+; SDAG-LABEL: fcmp_oge2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_oge2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp oge float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_oge3(float %x) {
-; SDAG-LABEL: fcmp_oge3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setae %al
-; FAST-LABEL: fcmp_oge3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setae %al
+; SDAG-LABEL: fcmp_oge3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_oge3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp oge float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_olt2(float %x) {
-; SDAG-LABEL: fcmp_olt2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: fcmp_olt2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: fcmp_olt2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_olt2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp olt float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_olt3(float %x) {
-; SDAG-LABEL: fcmp_olt3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: seta %al
-; FAST-LABEL: fcmp_olt3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm0, %xmm1
-; FAST-NEXT: seta %al
+; SDAG-LABEL: fcmp_olt3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_olt3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp olt float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ole2(float %x) {
-; SDAG-LABEL: fcmp_ole2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setnp %al
-; FAST-LABEL: fcmp_ole2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setnp %al
+; SDAG-LABEL: fcmp_ole2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ole2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ole float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ole3(float %x) {
-; SDAG-LABEL: fcmp_ole3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: setae %al
-; FAST-LABEL: fcmp_ole3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm0, %xmm1
-; FAST-NEXT: setae %al
+; SDAG-LABEL: fcmp_ole3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ole3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ole float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_one2(float %x) {
-; SDAG-LABEL: fcmp_one2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: fcmp_one2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: fcmp_one2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_one2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp one float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_one3(float %x) {
-; SDAG-LABEL: fcmp_one3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setne %al
-; FAST-LABEL: fcmp_one3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setne %al
+; SDAG-LABEL: fcmp_one3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_one3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp one float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ord2(float %x) {
-; SDAG-LABEL: fcmp_ord2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setnp %al
-; FAST-LABEL: fcmp_ord2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setnp %al
+; SDAG-LABEL: fcmp_ord2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ord2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ord float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ord3(float %x) {
-; SDAG-LABEL: fcmp_ord3
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setnp %al
-; FAST-LABEL: fcmp_ord3
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setnp %al
+; SDAG-LABEL: fcmp_ord3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ord3:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ord float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_uno2(float %x) {
-; SDAG-LABEL: fcmp_uno2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setp %al
-; FAST-LABEL: fcmp_uno2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setp %al
+; SDAG-LABEL: fcmp_uno2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_uno2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp uno float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_uno3(float %x) {
-; SDAG-LABEL: fcmp_uno3
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setp %al
-; FAST-LABEL: fcmp_uno3
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setp %al
+; SDAG-LABEL: fcmp_uno3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_uno3:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp uno float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ueq2(float %x) {
-; SDAG-LABEL: fcmp_ueq2
-; SDAG: movb $1, %al
-; FAST-LABEL: fcmp_ueq2
-; FAST: movb $1, %al
+; SDAG-LABEL: fcmp_ueq2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ueq2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ueq float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ueq3(float %x) {
-; SDAG-LABEL: fcmp_ueq3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: sete %al
-; FAST-LABEL: fcmp_ueq3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: sete %al
+; SDAG-LABEL: fcmp_ueq3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ueq3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ueq float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ugt2(float %x) {
-; SDAG-LABEL: fcmp_ugt2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setp %al
-; FAST-LABEL: fcmp_ugt2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setp %al
+; SDAG-LABEL: fcmp_ugt2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ugt2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ugt float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ugt3(float %x) {
-; SDAG-LABEL: fcmp_ugt3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: setb %al
-; FAST-LABEL: fcmp_ugt3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm0, %xmm1
-; FAST-NEXT: setb %al
+; SDAG-LABEL: fcmp_ugt3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ugt3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ugt float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_uge2(float %x) {
-; SDAG-LABEL: fcmp_uge2
-; SDAG: movb $1, %al
-; FAST-LABEL: fcmp_uge2
-; FAST: movb $1, %al
+; SDAG-LABEL: fcmp_uge2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_uge2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp uge float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_uge3(float %x) {
-; SDAG-LABEL: fcmp_uge3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm0, %xmm1
-; SDAG-NEXT: setbe %al
-; FAST-LABEL: fcmp_uge3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm0, %xmm1
-; FAST-NEXT: setbe %al
+; SDAG-LABEL: fcmp_uge3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_uge3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp uge float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ult2(float %x) {
-; SDAG-LABEL: fcmp_ult2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setp %al
-; FAST-LABEL: fcmp_ult2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setp %al
+; SDAG-LABEL: fcmp_ult2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ult2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ult float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ult3(float %x) {
-; SDAG-LABEL: fcmp_ult3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setb %al
-; FAST-LABEL: fcmp_ult3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setb %al
+; SDAG-LABEL: fcmp_ult3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ult3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ult float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_ule2(float %x) {
-; SDAG-LABEL: fcmp_ule2
-; SDAG: movb $1, %al
-; FAST-LABEL: fcmp_ule2
-; FAST: movb $1, %al
+; SDAG-LABEL: fcmp_ule2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ule2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ule float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_ule3(float %x) {
-; SDAG-LABEL: fcmp_ule3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: ucomiss %xmm1, %xmm0
-; SDAG-NEXT: setbe %al
-; FAST-LABEL: fcmp_ule3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setbe %al
+; SDAG-LABEL: fcmp_ule3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_ule3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp ule float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @fcmp_une2(float %x) {
-; SDAG-LABEL: fcmp_une2
-; SDAG: ucomiss %xmm0, %xmm0
-; SDAG-NEXT: setp %al
-; FAST-LABEL: fcmp_une2
-; FAST: ucomiss %xmm0, %xmm0
-; FAST-NEXT: setp %al
+; SDAG-LABEL: fcmp_une2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_une2:
+; FAST: ## BB#0:
+; FAST-NEXT: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = fcmp une float %x, %x
ret i1 %1
}
define zeroext i1 @fcmp_une3(float %x) {
-; SDAG-LABEL: fcmp_une3
-; SDAG: xorps %xmm1, %xmm1
-; SDAG-NEXT: cmpneqss %xmm0, %xmm1
-; SDAG-NEXT: movd %xmm1, %eax
-; SDAG-NEXT: andl $1, %eax
-; FAST-LABEL: fcmp_une3
-; FAST: xorps %xmm1, %xmm1
-; FAST-NEXT: ucomiss %xmm1, %xmm0
-; FAST-NEXT: setne %al
-; FAST-NEXT: setp %cl
-; FAST-NEXT: orb %al, %cl
+; SDAG-LABEL: fcmp_une3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpneqss %xmm0, %xmm1
+; SDAG-NEXT: movd %xmm1, %eax
+; SDAG-NEXT: andl $1, %eax
+; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: fcmp_une3:
+; FAST: ## BB#0:
+; FAST-NEXT: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
%1 = fcmp une float %x, 0.000000e+00
ret i1 %1
}
define zeroext i1 @icmp_eq2(i32 %x) {
-; SDAG-LABEL: icmp_eq2
-; SDAG: movb $1, %al
-; FAST-LABEL: icmp_eq2
-; FAST: movb $1, %al
+; SDAG-LABEL: icmp_eq2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_eq2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp eq i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_ne2(i32 %x) {
-; SDAG-LABEL: icmp_ne2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: icmp_ne2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: icmp_ne2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ne2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ne i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_ugt2(i32 %x) {
-; SDAG-LABEL: icmp_ugt2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: icmp_ugt2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: icmp_ugt2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ugt2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ugt i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_uge2(i32 %x) {
-; SDAG-LABEL: icmp_uge2
-; SDAG: movb $1, %al
-; FAST-LABEL: icmp_uge2
-; FAST: movb $1, %al
+; SDAG-LABEL: icmp_uge2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_uge2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp uge i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_ult2(i32 %x) {
-; SDAG-LABEL: icmp_ult2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: icmp_ult2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: icmp_ult2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ult2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ult i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_ule2(i32 %x) {
-; SDAG-LABEL: icmp_ule2
-; SDAG: movb $1, %al
-; FAST-LABEL: icmp_ule2
-; FAST: movb $1, %al
+; SDAG-LABEL: icmp_ule2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_ule2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp ule i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_sgt2(i32 %x) {
-; SDAG-LABEL: icmp_sgt2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: icmp_sgt2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: icmp_sgt2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_sgt2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp sgt i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_sge2(i32 %x) {
-; SDAG-LABEL: icmp_sge2
-; SDAG: movb $1, %al
-; FAST-LABEL: icmp_sge2
-; FAST: movb $1, %al
+; SDAG-LABEL: icmp_sge2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_sge2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp sge i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_slt2(i32 %x) {
-; SDAG-LABEL: icmp_slt2
-; SDAG: xorl %eax, %eax
-; FAST-LABEL: icmp_slt2
-; FAST: xorl %eax, %eax
+; SDAG-LABEL: icmp_slt2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_slt2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp slt i32 %x, %x
ret i1 %1
}
define zeroext i1 @icmp_sle2(i32 %x) {
-; SDAG-LABEL: icmp_sle2
-; SDAG: movb $1, %al
-; FAST-LABEL: icmp_sle2
-; FAST: movb $1, %al
+; SDAG-LABEL: icmp_sle2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: icmp_sle2:
+; FAST: ## BB#0:
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
%1 = icmp sle i32 %x, %x
ret i1 %1
}
diff --git a/test/CodeGen/X86/fast-isel-deadcode.ll b/test/CodeGen/X86/fast-isel-deadcode.ll
index 0a53d60f8352..5381dc4858af 100644
--- a/test/CodeGen/X86/fast-isel-deadcode.ll
+++ b/test/CodeGen/X86/fast-isel-deadcode.ll
@@ -83,7 +83,7 @@ entry:
%tmp = alloca { <2 x float>, float }, align 8
store i32 0, i32* %retval, align 4
%0 = bitcast %struct.FVector* %v to i8*
- call void @llvm.lifetime.start(i64 12, i8* %0) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 12, i8* %0) nounwind
%x.i = getelementptr inbounds %struct.FVector, %struct.FVector* %v, i64 0, i32 0
store float 1.000000e+00, float* %x.i, align 4
%y.i = getelementptr inbounds %struct.FVector, %struct.FVector* %v, i64 0, i32 1
@@ -136,12 +136,12 @@ func.exit: ; preds = %if.then.i, %if.else.i, %if.end.5.i
%5 = bitcast %struct.FVector* %ref.tmp to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 12, i32 4, i1 false)
%6 = bitcast %struct.FVector* %v to i8*
- call void @llvm.lifetime.end(i64 12, i8* %6) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 12, i8* %6) nounwind
ret i32 0
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) argmemonly nounwind
diff --git a/test/CodeGen/X86/fast-isel-load-i1.ll b/test/CodeGen/X86/fast-isel-load-i1.ll
index 1b2e3c5b9bbf..2f3c6c4b84b9 100644
--- a/test/CodeGen/X86/fast-isel-load-i1.ll
+++ b/test/CodeGen/X86/fast-isel-load-i1.ll
@@ -1,9 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s
define i1 @test_i1(i1* %b) {
; CHECK-LABEL: test_i1:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: testb $1, (%rdi)
+; CHECK-NEXT: movzbl (%rdi), %eax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: testb $1, %al
+; CHECK-NEXT: je .LBB0_2
+; CHECK-NEXT: # BB#1: # %in
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_2: # %out
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: retq
entry:
%0 = load i1, i1* %b, align 1
br i1 %0, label %in, label %out
diff --git a/test/CodeGen/X86/fast-isel-nontemporal.ll b/test/CodeGen/X86/fast-isel-nontemporal.ll
index 2fc08fb4135d..4140721bd5f3 100644
--- a/test/CodeGen/X86/fast-isel-nontemporal.ll
+++ b/test/CodeGen/X86/fast-isel-nontemporal.ll
@@ -1,11 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+sse4a -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE4A
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx2 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx512f -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx512bw -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse4a -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE4A
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse4.1 -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx2 -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx512f -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx512bw -fast-isel -O0 | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Scalar Stores
@@ -92,6 +92,25 @@ entry:
}
;
+; MMX Store
+;
+
+define void @test_mmx(x86_mmx* nocapture %a0, x86_mmx* nocapture %a1) {
+; ALL-LABEL: test_mmx:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: movq (%rdi), %mm0
+; ALL-NEXT: psrlq $3, %mm0
+; ALL-NEXT: movntq %mm0, (%rsi)
+; ALL-NEXT: retq
+entry:
+ %0 = load x86_mmx, x86_mmx* %a0
+ %1 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %0, i32 3)
+ store x86_mmx %1, x86_mmx* %a1, align 8, !nontemporal !1
+ ret void
+}
+declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone
+
+;
; 128-bit Vector Stores
;
@@ -379,6 +398,7 @@ define void @test_nt8xfloat(<8 x float>* nocapture %ptr, <8 x float> %X) {
; AVX512-LABEL: test_nt8xfloat:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntps %ymm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <8 x float> %X, <8 x float>* %ptr, align 32, !nontemporal !1
@@ -401,6 +421,7 @@ define void @test_nt4xdouble(<4 x double>* nocapture %ptr, <4 x double> %X) {
; AVX512-LABEL: test_nt4xdouble:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntpd %ymm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <4 x double> %X, <4 x double>* %ptr, align 32, !nontemporal !1
@@ -423,6 +444,7 @@ define void @test_nt32xi8(<32 x i8>* nocapture %ptr, <32 x i8> %X) {
; AVX512-LABEL: test_nt32xi8:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <32 x i8> %X, <32 x i8>* %ptr, align 32, !nontemporal !1
@@ -445,6 +467,7 @@ define void @test_nt16xi16(<16 x i16>* nocapture %ptr, <16 x i16> %X) {
; AVX512-LABEL: test_nt16xi16:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <16 x i16> %X, <16 x i16>* %ptr, align 32, !nontemporal !1
@@ -467,6 +490,7 @@ define void @test_nt8xi32(<8 x i32>* nocapture %ptr, <8 x i32> %X) {
; AVX512-LABEL: test_nt8xi32:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <8 x i32> %X, <8 x i32>* %ptr, align 32, !nontemporal !1
@@ -489,6 +513,7 @@ define void @test_nt4xi64(<4 x i64>* nocapture %ptr, <4 x i64> %X) {
; AVX512-LABEL: test_nt4xi64:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <4 x i64> %X, <4 x i64>* %ptr, align 32, !nontemporal !1
@@ -750,6 +775,7 @@ define void @test_nt16xfloat(<16 x float>* nocapture %ptr, <16 x float> %X) {
; AVX512-LABEL: test_nt16xfloat:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntps %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <16 x float> %X, <16 x float>* %ptr, align 64, !nontemporal !1
@@ -775,6 +801,7 @@ define void @test_nt8xdouble(<8 x double>* nocapture %ptr, <8 x double> %X) {
; AVX512-LABEL: test_nt8xdouble:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntpd %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <8 x double> %X, <8 x double>* %ptr, align 64, !nontemporal !1
@@ -801,11 +828,13 @@ define void @test_nt64xi8(<64 x i8>* nocapture %ptr, <64 x i8> %X) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512F-NEXT: vmovntdq %ymm1, 32(%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_nt64xi8:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vmovntdq %zmm0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
store <64 x i8> %X, <64 x i8>* %ptr, align 64, !nontemporal !1
@@ -832,11 +861,13 @@ define void @test_nt32xi16(<32 x i16>* nocapture %ptr, <32 x i16> %X) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vmovntdq %ymm0, (%rdi)
; AVX512F-NEXT: vmovntdq %ymm1, 32(%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_nt32xi16:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vmovntdq %zmm0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
store <32 x i16> %X, <32 x i16>* %ptr, align 64, !nontemporal !1
@@ -862,6 +893,7 @@ define void @test_nt16xi32(<16 x i32>* nocapture %ptr, <16 x i32> %X) {
; AVX512-LABEL: test_nt16xi32:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntdq %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <16 x i32> %X, <16 x i32>* %ptr, align 64, !nontemporal !1
@@ -887,6 +919,7 @@ define void @test_nt8xi64(<8 x i64>* nocapture %ptr, <8 x i64> %X) {
; AVX512-LABEL: test_nt8xi64:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vmovntdq %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
store <8 x i64> %X, <8 x i64>* %ptr, align 64, !nontemporal !1
diff --git a/test/CodeGen/X86/fast-isel-select-cmov.ll b/test/CodeGen/X86/fast-isel-select-cmov.ll
index a9b2dd841f20..e40e917e11e9 100644
--- a/test/CodeGen/X86/fast-isel-select-cmov.ll
+++ b/test/CodeGen/X86/fast-isel-select-cmov.ll
@@ -6,21 +6,12 @@
; conditon input (argument or cmp). Currently i8 is not supported.
define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
-; NOAVX512-LABEL: select_cmov_i16:
-; NOAVX512: ## BB#0:
-; NOAVX512-NEXT: testb $1, %dil
-; NOAVX512-NEXT: cmovew %dx, %si
-; NOAVX512-NEXT: movzwl %si, %eax
-; NOAVX512-NEXT: retq
-;
-; AVX512-LABEL: select_cmov_i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: kmovw %edi, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: testb $1, %al
-; AVX512-NEXT: cmovew %dx, %si
-; AVX512-NEXT: movzwl %si, %eax
-; AVX512-NEXT: retq
+; CHECK-LABEL: select_cmov_i16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: cmovew %dx, %si
+; CHECK-NEXT: movzwl %si, %eax
+; CHECK-NEXT: retq
%1 = select i1 %cond, i16 %a, i16 %b
ret i16 %1
}
@@ -38,21 +29,12 @@ define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
}
define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
-; NOAVX512-LABEL: select_cmov_i32:
-; NOAVX512: ## BB#0:
-; NOAVX512-NEXT: testb $1, %dil
-; NOAVX512-NEXT: cmovel %edx, %esi
-; NOAVX512-NEXT: movl %esi, %eax
-; NOAVX512-NEXT: retq
-;
-; AVX512-LABEL: select_cmov_i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: kmovw %edi, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: testb $1, %al
-; AVX512-NEXT: cmovel %edx, %esi
-; AVX512-NEXT: movl %esi, %eax
-; AVX512-NEXT: retq
+; CHECK-LABEL: select_cmov_i32:
+; CHECK: ## BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: cmovel %edx, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%1 = select i1 %cond, i32 %a, i32 %b
ret i32 %1
}
@@ -70,21 +52,12 @@ define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
}
define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
-; NOAVX512-LABEL: select_cmov_i64:
-; NOAVX512: ## BB#0:
-; NOAVX512-NEXT: testb $1, %dil
-; NOAVX512-NEXT: cmoveq %rdx, %rsi
-; NOAVX512-NEXT: movq %rsi, %rax
-; NOAVX512-NEXT: retq
-;
-; AVX512-LABEL: select_cmov_i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: kmovw %edi, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: testb $1, %al
-; AVX512-NEXT: cmoveq %rdx, %rsi
-; AVX512-NEXT: movq %rsi, %rax
-; AVX512-NEXT: retq
+; CHECK-LABEL: select_cmov_i64:
+; CHECK: ## BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: cmoveq %rdx, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+; CHECK-NEXT: retq
%1 = select i1 %cond, i64 %a, i64 %b
ret i64 %1
}
diff --git a/test/CodeGen/X86/fast-isel-select-sse.ll b/test/CodeGen/X86/fast-isel-select-sse.ll
index 502260d03f5a..499fe5ba54a2 100644
--- a/test/CodeGen/X86/fast-isel-select-sse.ll
+++ b/test/CodeGen/X86/fast-isel-select-sse.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx512f | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -mattr=avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -mattr=avx512f | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=avx512f | FileCheck %s --check-prefix=AVX512
; Test all cmp predicates that can be used with SSE.
@@ -39,9 +39,9 @@ define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_oeq_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_oeq_f64:
@@ -94,10 +94,10 @@ define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ogt_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpltsd %xmm0, %xmm1
-; SSE-NEXT: andps %xmm1, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm1
-; SSE-NEXT: orps %xmm2, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: andpd %xmm1, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm1
+; SSE-NEXT: orpd %xmm2, %xmm1
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ogt_f64:
@@ -150,10 +150,10 @@ define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_oge_f64:
; SSE: # BB#0:
; SSE-NEXT: cmplesd %xmm0, %xmm1
-; SSE-NEXT: andps %xmm1, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm1
-; SSE-NEXT: orps %xmm2, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: andpd %xmm1, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm1
+; SSE-NEXT: orpd %xmm2, %xmm1
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_oge_f64:
@@ -205,9 +205,9 @@ define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_olt_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpltsd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_olt_f64:
@@ -259,9 +259,9 @@ define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ole_f64:
; SSE: # BB#0:
; SSE-NEXT: cmplesd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ole_f64:
@@ -313,9 +313,9 @@ define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ord_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpordsd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ord_f64:
@@ -367,9 +367,9 @@ define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_uno_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpunordsd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_uno_f64:
@@ -421,9 +421,9 @@ define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ugt_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpnlesd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ugt_f64:
@@ -475,9 +475,9 @@ define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_uge_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpnltsd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_uge_f64:
@@ -530,10 +530,10 @@ define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ult_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpnlesd %xmm0, %xmm1
-; SSE-NEXT: andps %xmm1, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm1
-; SSE-NEXT: orps %xmm2, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: andpd %xmm1, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm1
+; SSE-NEXT: orpd %xmm2, %xmm1
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ult_f64:
@@ -586,10 +586,10 @@ define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_ule_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpnltsd %xmm0, %xmm1
-; SSE-NEXT: andps %xmm1, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm1
-; SSE-NEXT: orps %xmm2, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: andpd %xmm1, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm1
+; SSE-NEXT: orpd %xmm2, %xmm1
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_ule_f64:
@@ -641,9 +641,9 @@ define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
; SSE-LABEL: select_fcmp_une_f64:
; SSE: # BB#0:
; SSE-NEXT: cmpneqsd %xmm1, %xmm0
-; SSE-NEXT: andps %xmm0, %xmm2
-; SSE-NEXT: andnps %xmm3, %xmm0
-; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm2
+; SSE-NEXT: andnpd %xmm3, %xmm0
+; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: select_fcmp_une_f64:
diff --git a/test/CodeGen/X86/fast-isel-x86-64.ll b/test/CodeGen/X86/fast-isel-x86-64.ll
index 8288fd6f1a9a..3d5c12c03484 100644
--- a/test/CodeGen/X86/fast-isel-x86-64.ll
+++ b/test/CodeGen/X86/fast-isel-x86-64.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mattr=-avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort=1 | FileCheck %s
-; RUN: llc < %s -mattr=-avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-verbose 2>&1 >/dev/null | FileCheck %s --check-prefix=STDERR --allow-empty
+; RUN: llc < %s -mattr=-avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -pass-remarks-missed=isel 2>&1 >/dev/null | FileCheck %s --check-prefix=STDERR --allow-empty
; RUN: llc < %s -mattr=+avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort=1 | FileCheck %s --check-prefix=AVX
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/test/CodeGen/X86/fast-isel-x86.ll b/test/CodeGen/X86/fast-isel-x86.ll
index 643b77638f45..aa6d9b7cf056 100644
--- a/test/CodeGen/X86/fast-isel-x86.ll
+++ b/test/CodeGen/X86/fast-isel-x86.ll
@@ -1,5 +1,5 @@
; RUN: llc -fast-isel -O0 -mcpu=generic -mtriple=i386-apple-darwin10 -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -fast-isel -O0 -mcpu=generic -mtriple=i386-apple-darwin10 -relocation-model=pic < %s -fast-isel-verbose 2>&1 >/dev/null | FileCheck -check-prefix=STDERR -allow-empty %s
+; RUN: llc -fast-isel -O0 -mcpu=generic -mtriple=i386-apple-darwin10 -relocation-model=pic < %s -pass-remarks-missed=isel 2>&1 >/dev/null | FileCheck -check-prefix=STDERR -allow-empty %s
; This should use flds to set the return value.
; CHECK-LABEL: test0:
diff --git a/test/CodeGen/X86/fast-isel.ll b/test/CodeGen/X86/fast-isel.ll
index 36183e48c299..375814c8afcd 100644
--- a/test/CodeGen/X86/fast-isel.ll
+++ b/test/CodeGen/X86/fast-isel.ll
@@ -107,12 +107,12 @@ define void @crash_test1() nounwind ssp {
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
define i64* @life() nounwind {
%a1 = alloca i64*, align 8
%a2 = bitcast i64** %a1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %a2) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %a2) nounwind
%a3 = load i64*, i64** %a1, align 8
ret i64* %a3
}
diff --git a/test/CodeGen/X86/fentry-insertion.ll b/test/CodeGen/X86/fentry-insertion.ll
new file mode 100644
index 000000000000..a585d96b209c
--- /dev/null
+++ b/test/CodeGen/X86/fentry-insertion.ll
@@ -0,0 +1,16 @@
+; RUN: llc %s -o - | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @test1() #0 {
+entry:
+ ret void
+
+; CHECK-LABEL: @test1
+; CHECK: callq __fentry__
+; CHECK-NOT: mcount
+; CHECK: retq
+}
+
+attributes #0 = { "fentry-call"="true" }
+
diff --git a/test/CodeGen/X86/file-source-filename.ll b/test/CodeGen/X86/file-source-filename.ll
new file mode 100644
index 000000000000..146da9e16c95
--- /dev/null
+++ b/test/CodeGen/X86/file-source-filename.ll
@@ -0,0 +1,4 @@
+; RUN: llc -mtriple=x86_64-linux-gnu < %s | FileCheck %s
+; CHECK: .file "foobar"
+
+source_filename = "foobar"
diff --git a/test/CodeGen/X86/fma-fneg-combine.ll b/test/CodeGen/X86/fma-fneg-combine.ll
index 5329f5b216a4..bb332f7282a8 100644
--- a/test/CodeGen/X86/fma-fneg-combine.ll
+++ b/test/CodeGen/X86/fma-fneg-combine.ll
@@ -127,7 +127,7 @@ define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test10:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
%0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 -1, i32 4) #2
@@ -142,7 +142,7 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze
; SKX: # BB#0: # %entry
; SKX-NEXT: vxorps {{.*}}(%rip){1to4}, %xmm2, %xmm0
; SKX-NEXT: andl $1, %edi
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
;
@@ -165,7 +165,7 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <
define <8 x double> @test12(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; SKX-LABEL: test12:
; SKX: # BB#0: # %entry
-; SKX-NEXT: kmovb %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: vxorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
@@ -183,13 +183,21 @@ entry:
}
define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
-; CHECK-LABEL: test13:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: retq
+; SKX-LABEL: test13:
+; SKX: # BB#0: # %entry
+; SKX-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: kmovd %edi, %k1
+; SKX-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: retq
+;
+; KNL-LABEL: test13:
+; KNL: # BB#0: # %entry
+; KNL-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
+; KNL-NEXT: retq
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
@@ -199,7 +207,7 @@ entry:
define <16 x float> @test14(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; SKX-LABEL: test14:
; SKX: # BB#0: # %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfnmsub132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
@@ -219,7 +227,7 @@ entry:
define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; SKX-LABEL: test15:
; SKX: # BB#0: # %entry
-; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm3
; SKX-NEXT: vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
; SKX-NEXT: vmovaps %zmm1, %zmm3 {%k1}
diff --git a/test/CodeGen/X86/fma.ll b/test/CodeGen/X86/fma.ll
index b91479cda871..2c942347d54c 100644
--- a/test/CodeGen/X86/fma.ll
+++ b/test/CodeGen/X86/fma.ll
@@ -1,47 +1,413 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+fma,-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-INST
-; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=-fma,-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-CALL
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+fma,-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-INST
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=-fma,-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-CALL
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512f,-fma,-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-INST
-; RUN: llc < %s -march=x86 -mcpu=bdver2 -mattr=-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-INST
-; RUN: llc < %s -march=x86 -mcpu=bdver2 -mattr=-fma,-fma4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-CALL
-
-; CHECK-LABEL: test_f32:
-; CHECK-FMA-INST: vfmadd213ss
-; CHECK-FMA-CALL: fmaf
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+avx,+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA32
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mattr=+avx,-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA64
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL64
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512f,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX51264
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mattr=+avx512vl,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=bdver2 -mattr=-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMA32
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=bdver2 -mattr=-fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=FMACALL32
+
define float @test_f32(float %a, float %b, float %c) #0 {
+; FMA32-LABEL: test_f32:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: pushl %eax ## encoding: [0x50]
+; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
+; FMA32-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
+; FMA32-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; FMA32-NEXT: vfmadd213ss {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0x79,0xa9,0x4c,0x24,0x10]
+; FMA32-NEXT: vmovss %xmm1, (%esp) ## encoding: [0xc5,0xfa,0x11,0x0c,0x24]
+; FMA32-NEXT: flds (%esp) ## encoding: [0xd9,0x04,0x24]
+; FMA32-NEXT: popl %eax ## encoding: [0x58]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: test_f32:
+; FMACALL32: ## BB#0: ## %entry
+; FMACALL32-NEXT: jmp _fmaf ## TAILCALL
+; FMACALL32-NEXT: ## encoding: [0xeb,A]
+; FMACALL32-NEXT: ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
+;
+; FMA64-LABEL: test_f32:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: test_f32:
+; FMACALL64: ## BB#0: ## %entry
+; FMACALL64-NEXT: jmp _fmaf ## TAILCALL
+; FMACALL64-NEXT: ## encoding: [0xeb,A]
+; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fmaf-1, kind: FK_PCRel_1
+;
+; AVX512-LABEL: test_f32:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_f32:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call float @llvm.fma.f32(float %a, float %b, float %c)
ret float %call
}
-; CHECK-LABEL: test_f64:
-; CHECK-FMA-INST: vfmadd213sd
-; CHECK-FMA-CALL: fma
define double @test_f64(double %a, double %b, double %c) #0 {
+; FMA32-LABEL: test_f64:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: subl $12, %esp ## encoding: [0x83,0xec,0x0c]
+; FMA32-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x10]
+; FMA32-NEXT: ## xmm0 = mem[0],zero
+; FMA32-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x18]
+; FMA32-NEXT: ## xmm1 = mem[0],zero
+; FMA32-NEXT: vfmadd213sd {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0xf9,0xa9,0x4c,0x24,0x20]
+; FMA32-NEXT: vmovsd %xmm1, (%esp) ## encoding: [0xc5,0xfb,0x11,0x0c,0x24]
+; FMA32-NEXT: fldl (%esp) ## encoding: [0xdd,0x04,0x24]
+; FMA32-NEXT: addl $12, %esp ## encoding: [0x83,0xc4,0x0c]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: test_f64:
+; FMACALL32: ## BB#0: ## %entry
+; FMACALL32-NEXT: jmp _fma ## TAILCALL
+; FMACALL32-NEXT: ## encoding: [0xeb,A]
+; FMACALL32-NEXT: ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
+;
+; FMA64-LABEL: test_f64:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: test_f64:
+; FMACALL64: ## BB#0: ## %entry
+; FMACALL64-NEXT: jmp _fma ## TAILCALL
+; FMACALL64-NEXT: ## encoding: [0xeb,A]
+; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fma-1, kind: FK_PCRel_1
+;
+; AVX512-LABEL: test_f64:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_f64:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call double @llvm.fma.f64(double %a, double %b, double %c)
ret double %call
}
-; CHECK-LABEL: test_f80:
-; CHECK: fmal
define x86_fp80 @test_f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c) #0 {
+; FMA32-LABEL: test_f80:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: subl $60, %esp ## encoding: [0x83,0xec,0x3c]
+; FMA32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x40]
+; FMA32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x50]
+; FMA32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x60]
+; FMA32-NEXT: fstpt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x7c,0x24,0x20]
+; FMA32-NEXT: fstpt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x7c,0x24,0x10]
+; FMA32-NEXT: fstpt (%esp) ## encoding: [0xdb,0x3c,0x24]
+; FMA32-NEXT: calll _fmal ## encoding: [0xe8,A,A,A,A]
+; FMA32-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
+; FMA32-NEXT: addl $60, %esp ## encoding: [0x83,0xc4,0x3c]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: test_f80:
+; FMACALL32: ## BB#0: ## %entry
+; FMACALL32-NEXT: subl $60, %esp ## encoding: [0x83,0xec,0x3c]
+; FMACALL32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x40]
+; FMACALL32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x50]
+; FMACALL32-NEXT: fldt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x6c,0x24,0x60]
+; FMACALL32-NEXT: fstpt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x7c,0x24,0x20]
+; FMACALL32-NEXT: fstpt {{[0-9]+}}(%esp) ## encoding: [0xdb,0x7c,0x24,0x10]
+; FMACALL32-NEXT: fstpt (%esp) ## encoding: [0xdb,0x3c,0x24]
+; FMACALL32-NEXT: calll _fmal ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
+; FMACALL32-NEXT: addl $60, %esp ## encoding: [0x83,0xc4,0x3c]
+; FMACALL32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_f80:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
+; FMA64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
+; FMA64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
+; FMA64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
+; FMA64-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
+; FMA64-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
+; FMA64-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
+; FMA64-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
+; FMA64-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
+; FMA64-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: test_f80:
+; FMACALL64: ## BB#0: ## %entry
+; FMACALL64-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
+; FMACALL64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
+; FMACALL64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
+; FMACALL64-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
+; FMACALL64-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
+; FMACALL64-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
+; FMACALL64-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
+; FMACALL64-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
+; FMACALL64-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
+; FMACALL64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_f80:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
+; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
+; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
+; AVX512-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
+; AVX512-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
+; AVX512-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
+; AVX512-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
+; AVX512-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
+; AVX512-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
+; AVX512-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_f80:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: subq $56, %rsp ## encoding: [0x48,0x83,0xec,0x38]
+; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x40]
+; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x50]
+; AVX512VL-NEXT: fldt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x6c,0x24,0x60]
+; AVX512VL-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x20]
+; AVX512VL-NEXT: fstpt {{[0-9]+}}(%rsp) ## encoding: [0xdb,0x7c,0x24,0x10]
+; AVX512VL-NEXT: fstpt (%rsp) ## encoding: [0xdb,0x3c,0x24]
+; AVX512VL-NEXT: callq _fmal ## encoding: [0xe8,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 1, value: _fmal-4, kind: FK_PCRel_4
+; AVX512VL-NEXT: addq $56, %rsp ## encoding: [0x48,0x83,0xc4,0x38]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call x86_fp80 @llvm.fma.f80(x86_fp80 %a, x86_fp80 %b, x86_fp80 %c)
ret x86_fp80 %call
}
-; CHECK-LABEL: test_f32_cst:
-; CHECK-NOT: vfmadd
define float @test_f32_cst() #0 {
+; FMA32-LABEL: test_f32_cst:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: flds LCPI3_0 ## encoding: [0xd9,0x05,A,A,A,A]
+; FMA32-NEXT: ## fixup A - offset: 2, value: LCPI3_0, kind: FK_Data_4
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMACALL32-LABEL: test_f32_cst:
+; FMACALL32: ## BB#0: ## %entry
+; FMACALL32-NEXT: flds LCPI3_0 ## encoding: [0xd9,0x05,A,A,A,A]
+; FMACALL32-NEXT: ## fixup A - offset: 2, value: LCPI3_0, kind: FK_Data_4
+; FMACALL32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_f32_cst:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vmovss {{.*}}(%rip), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; FMA64-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
+; FMA64-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; FMACALL64-LABEL: test_f32_cst:
+; FMACALL64: ## BB#0: ## %entry
+; FMACALL64-NEXT: movss {{.*}}(%rip), %xmm0 ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
+; FMACALL64-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
+; FMACALL64-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMACALL64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_f32_cst:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; AVX512-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
+; AVX512-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_f32_cst:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vmovss {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
+; AVX512VL-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call float @llvm.fma.f32(float 3.0, float 3.0, float 3.0)
ret float %call
}
+define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 {
+; FMA32-LABEL: test_v4f32:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_v4f32:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_v4f32:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_v4f32:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+entry:
+ %call = call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
+ ret <4 x float> %call
+}
+
+define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #0 {
+; FMA32-LABEL: test_v8f32:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_v8f32:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_v8f32:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_v8f32:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+entry:
+ %call = call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c)
+ ret <8 x float> %call
+}
+
+define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c) #0 {
+; FMA32-LABEL: test_v16f32:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: pushl %ebp ## encoding: [0x55]
+; FMA32-NEXT: movl %esp, %ebp ## encoding: [0x89,0xe5]
+; FMA32-NEXT: andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
+; FMA32-NEXT: subl $32, %esp ## encoding: [0x83,0xec,0x20]
+; FMA32-NEXT: vfmadd213ps 8(%ebp), %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0x45,0x08]
+; FMA32-NEXT: vfmadd213ps 40(%ebp), %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0x4d,0x28]
+; FMA32-NEXT: movl %ebp, %esp ## encoding: [0x89,0xec]
+; FMA32-NEXT: popl %ebp ## encoding: [0x5d]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_v16f32:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213ps %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0xc4]
+; FMA64-NEXT: vfmadd213ps %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0xcd]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_v16f32:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_v16f32:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+entry:
+ %call = call <16 x float> @llvm.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c)
+ ret <16 x float> %call
+}
+
+define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #0 {
+; FMA32-LABEL: test_v2f64:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_v2f64:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_v2f64:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_v2f64:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+entry:
+ %call = call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
+ ret <2 x double> %call
+}
+
+define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 {
+; FMA32-LABEL: test_v4f64:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_v4f64:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_v4f64:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_v4f64:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+entry:
+ %call = call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c)
+ ret <4 x double> %call
+}
+
+define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c) #0 {
+; FMA32-LABEL: test_v8f64:
+; FMA32: ## BB#0: ## %entry
+; FMA32-NEXT: pushl %ebp ## encoding: [0x55]
+; FMA32-NEXT: movl %esp, %ebp ## encoding: [0x89,0xe5]
+; FMA32-NEXT: andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
+; FMA32-NEXT: subl $32, %esp ## encoding: [0x83,0xec,0x20]
+; FMA32-NEXT: vfmadd213pd 8(%ebp), %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0x45,0x08]
+; FMA32-NEXT: vfmadd213pd 40(%ebp), %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0x4d,0x28]
+; FMA32-NEXT: movl %ebp, %esp ## encoding: [0x89,0xec]
+; FMA32-NEXT: popl %ebp ## encoding: [0x5d]
+; FMA32-NEXT: retl ## encoding: [0xc3]
+;
+; FMA64-LABEL: test_v8f64:
+; FMA64: ## BB#0: ## %entry
+; FMA64-NEXT: vfmadd213pd %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0xc4]
+; FMA64-NEXT: vfmadd213pd %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0xcd]
+; FMA64-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_v8f64:
+; AVX512: ## BB#0: ## %entry
+; AVX512-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
+; AVX512-NEXT: retq ## encoding: [0xc3]
+;
+; AVX512VL-LABEL: test_v8f64:
+; AVX512VL: ## BB#0: ## %entry
+; AVX512VL-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
+; AVX512VL-NEXT: retq ## encoding: [0xc3]
+entry:
+ %call = call <8 x double> @llvm.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c)
+ ret <8 x double> %call
+}
+
declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80)
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
+declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>)
+
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
+declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>)
+
attributes #0 = { nounwind }
diff --git a/test/CodeGen/X86/fma_patterns.ll b/test/CodeGen/X86/fma_patterns.ll
index 2554b0201a66..002b0746d3c3 100644
--- a/test/CodeGen/X86/fma_patterns.ll
+++ b/test/CodeGen/X86/fma_patterns.ll
@@ -1483,7 +1483,7 @@ define double @test_f64_fneg_fmul(double %x, double %y) #0 {
;
; AVX512-LABEL: test_f64_fneg_fmul:
; AVX512: # BB#0:
-; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0
; AVX512-NEXT: retq
%m = fmul nsz double %x, %y
diff --git a/test/CodeGen/X86/fold-vector-sext-zext.ll b/test/CodeGen/X86/fold-vector-sext-zext.ll
index 3f502efa753e..575bd5897e47 100644
--- a/test/CodeGen/X86/fold-vector-sext-zext.ll
+++ b/test/CodeGen/X86/fold-vector-sext-zext.ll
@@ -245,9 +245,8 @@ define <4 x i32> @test_zext_4i8_4i32() {
define <4 x i64> @test_zext_4i8_4i64() {
; X32-LABEL: test_zext_4i8_4i64:
; X32: # BB#0:
-; X32-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,255,0]
+; X32-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_zext_4i8_4i64:
@@ -301,11 +300,9 @@ define <4 x i32> @test_zext_4i8_4i32_undef() {
define <4 x i64> @test_zext_4i8_4i64_undef() {
; X32-LABEL: test_zext_4i8_4i64_undef:
; X32: # BB#0:
-; X32-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; X32-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT: vmovaps {{.*#+}} xmm0 = <u,u,255,0>
; X32-NEXT: movl $2, %eax
; X32-NEXT: vmovd %eax, %xmm1
-; X32-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
diff --git a/test/CodeGen/X86/fp-intrinsics.ll b/test/CodeGen/X86/fp-intrinsics.ll
new file mode 100644
index 000000000000..88aef6bb0659
--- /dev/null
+++ b/test/CodeGen/X86/fp-intrinsics.ll
@@ -0,0 +1,111 @@
+; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s
+
+; Verify that constants aren't folded to inexact results when the rounding mode
+; is unknown.
+;
+; double f1() {
+; // Because 0.1 cannot be represented exactly, this shouldn't be folded.
+; return 1.0/10.0;
+; }
+;
+; CHECK-LABEL: f1
+; CHECK: divsd
+define double @f1() {
+entry:
+ %div = call double @llvm.experimental.constrained.fdiv.f64(
+ double 1.000000e+00,
+ double 1.000000e+01,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %div
+}
+
+; Verify that 'a - 0' isn't simplified to 'a' when the rounding mode is unknown.
+;
+; double f2(double a) {
+; // Because the result of '0 - 0' is negative zero if rounding mode is
+; // downward, this shouldn't be simplified.
+; return a - 0;
+; }
+;
+; CHECK-LABEL: f2
+; CHECK: subsd
+define double @f2(double %a) {
+entry:
+ %div = call double @llvm.experimental.constrained.fsub.f64(
+ double %a,
+ double 0.000000e+00,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %div
+}
+
+; Verify that '-((-a)*b)' isn't simplified to 'a*b' when the rounding mode is
+; unknown.
+;
+; double f3(double a, double b) {
+; // Because the intermediate value involved in this calculation may require
+; // rounding, this shouldn't be simplified.
+; return -((-a)*b);
+; }
+;
+; CHECK-LABEL: f3:
+; CHECK: subsd
+; CHECK: mulsd
+; CHECK: subsd
+define double @f3(double %a, double %b) {
+entry:
+ %sub = call double @llvm.experimental.constrained.fsub.f64(
+ double -0.000000e+00, double %a,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ %mul = call double @llvm.experimental.constrained.fmul.f64(
+ double %sub, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ %ret = call double @llvm.experimental.constrained.fsub.f64(
+ double -0.000000e+00,
+ double %mul,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %ret
+}
+
+; Verify that FP operations are not performed speculatively when FP exceptions
+; are not being ignored.
+;
+; double f4(int n, double a) {
+; // Because a + 1 may overflow, this should not be simplified.
+; if (n > 0)
+; return a + 1.0;
+; return a;
+; }
+;
+;
+; CHECK-LABEL: f4:
+; CHECK: testl
+; CHECK: jle
+; CHECK: addsd
+define double @f4(i32 %n, double %a) {
+entry:
+ %cmp = icmp sgt i32 %n, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %add = call double @llvm.experimental.constrained.fadd.f64(
+ double 1.000000e+00, double %a,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ br label %if.end
+
+if.end:
+ %a.0 = phi double [%add, %if.then], [ %a, %entry ]
+ ret double %a.0
+}
+
+
+@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
+declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
diff --git a/test/CodeGen/X86/fp-select-cmp-and.ll b/test/CodeGen/X86/fp-select-cmp-and.ll
index c9c8922c97f3..e012809cf480 100644
--- a/test/CodeGen/X86/fp-select-cmp-and.ll
+++ b/test/CodeGen/X86/fp-select-cmp-and.ll
@@ -5,7 +5,7 @@ define double @test1(double %a, double %b, double %eps) {
; CHECK-LABEL: test1:
; CHECK: # BB#0:
; CHECK-NEXT: cmpltsd %xmm2, %xmm0
-; CHECK-NEXT: andps %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm1, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp olt double %a, %eps
@@ -17,7 +17,7 @@ define double @test2(double %a, double %b, double %eps) {
; CHECK-LABEL: test2:
; CHECK: # BB#0:
; CHECK-NEXT: cmplesd %xmm2, %xmm0
-; CHECK-NEXT: andps %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm1, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp ole double %a, %eps
@@ -29,8 +29,8 @@ define double @test3(double %a, double %b, double %eps) {
; CHECK-LABEL: test3:
; CHECK: # BB#0:
; CHECK-NEXT: cmpltsd %xmm0, %xmm2
-; CHECK-NEXT: andps %xmm1, %xmm2
-; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp ogt double %a, %eps
@@ -42,8 +42,8 @@ define double @test4(double %a, double %b, double %eps) {
; CHECK-LABEL: test4:
; CHECK: # BB#0:
; CHECK-NEXT: cmplesd %xmm0, %xmm2
-; CHECK-NEXT: andps %xmm1, %xmm2
-; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp oge double %a, %eps
@@ -55,7 +55,7 @@ define double @test5(double %a, double %b, double %eps) {
; CHECK-LABEL: test5:
; CHECK: # BB#0:
; CHECK-NEXT: cmpltsd %xmm2, %xmm0
-; CHECK-NEXT: andnps %xmm1, %xmm0
+; CHECK-NEXT: andnpd %xmm1, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp olt double %a, %eps
@@ -67,7 +67,7 @@ define double @test6(double %a, double %b, double %eps) {
; CHECK-LABEL: test6:
; CHECK: # BB#0:
; CHECK-NEXT: cmplesd %xmm2, %xmm0
-; CHECK-NEXT: andnps %xmm1, %xmm0
+; CHECK-NEXT: andnpd %xmm1, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp ole double %a, %eps
@@ -79,8 +79,8 @@ define double @test7(double %a, double %b, double %eps) {
; CHECK-LABEL: test7:
; CHECK: # BB#0:
; CHECK-NEXT: cmpltsd %xmm0, %xmm2
-; CHECK-NEXT: andnps %xmm1, %xmm2
-; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: andnpd %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp ogt double %a, %eps
@@ -92,8 +92,8 @@ define double @test8(double %a, double %b, double %eps) {
; CHECK-LABEL: test8:
; CHECK: # BB#0:
; CHECK-NEXT: cmplesd %xmm0, %xmm2
-; CHECK-NEXT: andnps %xmm1, %xmm2
-; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: andnpd %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp oge double %a, %eps
@@ -220,10 +220,10 @@ define double @test18(double %a, double %b, double %c, double %eps) {
; CHECK-LABEL: test18:
; CHECK: # BB#0:
; CHECK-NEXT: cmplesd %xmm0, %xmm3
-; CHECK-NEXT: andps %xmm3, %xmm2
-; CHECK-NEXT: andnps %xmm1, %xmm3
-; CHECK-NEXT: orps %xmm2, %xmm3
-; CHECK-NEXT: movaps %xmm3, %xmm0
+; CHECK-NEXT: andpd %xmm3, %xmm2
+; CHECK-NEXT: andnpd %xmm1, %xmm3
+; CHECK-NEXT: orpd %xmm2, %xmm3
+; CHECK-NEXT: movapd %xmm3, %xmm0
; CHECK-NEXT: retq
;
%cmp = fcmp oge double %a, %eps
diff --git a/test/CodeGen/X86/fp-une-cmp.ll b/test/CodeGen/X86/fp-une-cmp.ll
index e3b2a04060ba..1b5af5aba366 100644
--- a/test/CodeGen/X86/fp-une-cmp.ll
+++ b/test/CodeGen/X86/fp-une-cmp.ll
@@ -36,8 +36,8 @@ define double @rdar_7859988(double %x, double %y) nounwind readnone optsize ssp
entry:
%mul = fmul double %x, %y
- %cmp = fcmp une double %mul, 0.000000e+00
- br i1 %cmp, label %bb2, label %bb1
+ %cmp = fcmp oeq double %mul, 0.000000e+00
+ br i1 %cmp, label %bb1, label %bb2
bb1:
%add = fadd double %mul, -1.000000e+00
diff --git a/test/CodeGen/X86/fp128-cast.ll b/test/CodeGen/X86/fp128-cast.ll
index 9408437ecc8a..6568f73029e0 100644
--- a/test/CodeGen/X86/fp128-cast.ll
+++ b/test/CodeGen/X86/fp128-cast.ll
@@ -152,7 +152,7 @@ entry:
; X32: retl
;
; X64-LABEL: TestFPTruncF128_F64:
-; X64: movapd vf128(%rip), %xmm0
+; X64: movaps vf128(%rip), %xmm0
; X64-NEXT: callq __trunctfdf2
; X64-NEXT: movsd %xmm0, vf64(%rip)
; X64: retq
diff --git a/test/CodeGen/X86/fp128-compare.ll b/test/CodeGen/X86/fp128-compare.ll
index 6ad3b74aeafa..7ee2e90657c0 100644
--- a/test/CodeGen/X86/fp128-compare.ll
+++ b/test/CodeGen/X86/fp128-compare.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s
define i32 @TestComp128GT(fp128 %d1, fp128 %d2) {
entry:
diff --git a/test/CodeGen/X86/fp128-g.ll b/test/CodeGen/X86/fp128-g.ll
index 192ac7af39ff..5eeef0cb77c4 100644
--- a/test/CodeGen/X86/fp128-g.ll
+++ b/test/CodeGen/X86/fp128-g.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=X64
;
; These cases check if x86_64-linux-android works with -O2 -g,
; especially CSE matching needed by SoftenFloatRes_LOAD.
diff --git a/test/CodeGen/X86/fp128-i128.ll b/test/CodeGen/X86/fp128-i128.ll
index 77160674ab20..98082ec611d4 100644
--- a/test/CodeGen/X86/fp128-i128.ll
+++ b/test/CodeGen/X86/fp128-i128.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx -enable-legalize-types-checking | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx -enable-legalize-types-checking | FileCheck %s
; These tests were generated from simplified libm C code.
; When compiled for the x86_64-linux-android target,
@@ -41,6 +42,19 @@
; foo(w);
; }
define void @TestUnionLD1(fp128 %s, i64 %n) #0 {
+; CHECK-LABEL: TestUnionLD1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movabsq $281474976710655, %rcx # imm = 0xFFFFFFFFFFFF
+; CHECK-NEXT: andq %rdi, %rcx
+; CHECK-NEXT: movabsq $-281474976710656, %rdx # imm = 0xFFFF000000000000
+; CHECK-NEXT: andq -{{[0-9]+}}(%rsp), %rdx
+; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: orq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: jmp foo # TAILCALL
entry:
%0 = bitcast fp128 %s to i128
%1 = zext i64 %n to i128
@@ -51,18 +65,6 @@ entry:
%2 = bitcast i128 %bf.set to fp128
tail call void @foo(fp128 %2) #2
ret void
-; CHECK-LABEL: TestUnionLD1:
-; CHECK: movaps %xmm0, -24(%rsp)
-; CHECK-NEXT: movq -24(%rsp), %rax
-; CHECK-NEXT: movabsq $281474976710655, %rcx
-; CHECK-NEXT: andq %rdi, %rcx
-; CHECK-NEXT: movabsq $-281474976710656, %rdx
-; CHECK-NEXT: andq -16(%rsp), %rdx
-; CHECK-NEXT: movq %rax, -40(%rsp)
-; CHECK-NEXT: orq %rcx, %rdx
-; CHECK-NEXT: movq %rdx, -32(%rsp)
-; CHECK-NEXT: movaps -40(%rsp), %xmm0
-; CHECK-NEXT: jmp foo
}
; C code:
@@ -75,18 +77,19 @@ entry:
; return w;
; }
define fp128 @TestUnionLD2(fp128 %s) #0 {
+; CHECK-LABEL: TestUnionLD2:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %s to i128
%bf.clear = and i128 %0, -18446744073709551616
%1 = bitcast i128 %bf.clear to fp128
ret fp128 %1
-; CHECK-LABEL: TestUnionLD2:
-; CHECK: movaps %xmm0, -24(%rsp)
-; CHECK-NEXT: movq -16(%rsp), %rax
-; CHECK-NEXT: movq %rax, -32(%rsp)
-; CHECK-NEXT: movq $0, -40(%rsp)
-; CHECK-NEXT: movaps -40(%rsp), %xmm0
-; CHECK-NEXT: retq
}
; C code:
@@ -98,6 +101,25 @@ entry:
; return (z.e < 0.1L) ? 1.0L : 2.0L;
; }
define fp128 @TestI128_1(fp128 %x) #0 {
+; CHECK-LABEL: TestI128_1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; CHECK-NEXT: andq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq %rax, (%rsp)
+; CHECK-NEXT: movaps (%rsp), %xmm0
+; CHECK-NEXT: movaps {{.*}}(%rip), %xmm1
+; CHECK-NEXT: callq __lttf2
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: sets %cl
+; CHECK-NEXT: shlq $4, %rcx
+; CHECK-NEXT: movaps {{\.LCPI.*}}(%rcx), %xmm0
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %x to i128
%bf.clear = and i128 %0, 170141183460469231731687303715884105727
@@ -105,13 +127,6 @@ entry:
%cmp = fcmp olt fp128 %1, 0xL999999999999999A3FFB999999999999
%cond = select i1 %cmp, fp128 0xL00000000000000003FFF000000000000, fp128 0xL00000000000000004000000000000000
ret fp128 %cond
-; CHECK-LABEL: TestI128_1:
-; CHECK: movaps %xmm0,
-; CHECK: movabsq $9223372036854775807,
-; CHECK: callq __lttf2
-; CHECK: testl %eax, %eax
-; CHECK: movaps {{.*}}, %xmm0
-; CHECK: retq
}
; C code:
@@ -124,17 +139,20 @@ entry:
; return (hx & 0x8000) == 0 ? x : y;
; }
define fp128 @TestI128_2(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: TestI128_2:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpq $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jns .LBB3_2
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: .LBB3_2: # %entry
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %x to i128
%cmp = icmp sgt i128 %0, -1
%cond = select i1 %cmp, fp128 %x, fp128 %y
ret fp128 %cond
-; CHECK-LABEL: TestI128_2:
-; CHECK: movaps %xmm0, -24(%rsp)
-; CHECK-NEXT: cmpq $0, -16(%rsp)
-; CHECK-NEXT: jns
-; CHECK: movaps %xmm1, %xmm0
-; CHECK: retq
}
; C code:
@@ -149,6 +167,32 @@ entry:
; return (u.e);
; }
define fp128 @TestI128_3(fp128 %x, i32* nocapture readnone %ex) #0 {
+; CHECK-LABEL: TestI128_3:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movabsq $9223090561878065152, %rcx # imm = 0x7FFF000000000000
+; CHECK-NEXT: testq %rcx, %rax
+; CHECK-NEXT: je .LBB4_2
+; CHECK-NEXT: # BB#1:
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: jmp .LBB4_3
+; CHECK-NEXT: .LBB4_2: # %if.then
+; CHECK-NEXT: movaps {{.*}}(%rip), %xmm1
+; CHECK-NEXT: callq __multf3
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: movabsq $-9223090561878065153, %rdx # imm = 0x8000FFFFFFFFFFFF
+; CHECK-NEXT: andq {{[0-9]+}}(%rsp), %rdx
+; CHECK-NEXT: movabsq $4611123068473966592, %rax # imm = 0x3FFE000000000000
+; CHECK-NEXT: orq %rdx, %rax
+; CHECK-NEXT: .LBB4_3: # %if.end
+; CHECK-NEXT: movq %rcx, (%rsp)
+; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps (%rsp), %xmm0
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %x to i128
%bf.cast = and i128 %0, 170135991163610696904058773219554885632
@@ -166,15 +210,6 @@ if.end: ; preds = %if.then, %entry
%u.sroa.0.0 = phi i128 [ %bf.set, %if.then ], [ %0, %entry ]
%2 = bitcast i128 %u.sroa.0.0 to fp128
ret fp128 %2
-; CHECK-LABEL: TestI128_3:
-; CHECK: movaps %xmm0,
-; CHECK: movabsq $9223090561878065152,
-; CHECK: testq
-; CHECK: callq __multf3
-; CHECK-NEXT: movaps %xmm0
-; CHECK: movabsq $-9223090561878065153,
-; CHECK: movabsq $4611123068473966592,
-; CHECK: retq
}
; C code:
@@ -188,21 +223,24 @@ if.end: ; preds = %if.then, %entry
; return x + df;
; }
define fp128 @TestI128_4(fp128 %x) #0 {
+; CHECK-LABEL: TestI128_4:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: movaps %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq $0, (%rsp)
+; CHECK-NEXT: movaps (%rsp), %xmm0
+; CHECK-NEXT: callq __addtf3
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %x to i128
%bf.clear = and i128 %0, -18446744073709551616
%1 = bitcast i128 %bf.clear to fp128
%add = fadd fp128 %1, %x
ret fp128 %add
-; CHECK-LABEL: TestI128_4:
-; CHECK: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, 16(%rsp)
-; CHECK-NEXT: movq 24(%rsp), %rax
-; CHECK-NEXT: movq %rax, 8(%rsp)
-; CHECK-NEXT: movq $0, (%rsp)
-; CHECK-NEXT: movaps (%rsp), %xmm0
-; CHECK-NEXT: callq __addtf3
-; CHECK: retq
}
@v128 = common global i128 0, align 16
@@ -214,6 +252,15 @@ entry:
; v128 = ((v128 << 96) | v128_2);
; }
define void @TestShift128_2() #2 {
+; CHECK-LABEL: TestShift128_2:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movq {{.*}}(%rip), %rax
+; CHECK-NEXT: shlq $32, %rax
+; CHECK-NEXT: movq {{.*}}(%rip), %rcx
+; CHECK-NEXT: orq v128_2+{{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rcx, {{.*}}(%rip)
+; CHECK-NEXT: movq %rax, v128+{{.*}}(%rip)
+; CHECK-NEXT: retq
entry:
%0 = load i128, i128* @v128, align 16
%shl = shl i128 %0, 96
@@ -221,59 +268,58 @@ entry:
%or = or i128 %shl, %1
store i128 %or, i128* @v128, align 16
ret void
-; CHECK-LABEL: TestShift128_2:
-; CHECK: movq v128(%rip), %rax
-; CHECK-NEXT: shlq $32, %rax
-; CHECK-NEXT: movq v128_2(%rip), %rcx
-; CHECK-NEXT: orq v128_2+8(%rip), %rax
-; CHECK-NEXT: movq %rcx, v128(%rip)
-; CHECK-NEXT: movq %rax, v128+8(%rip)
-; CHECK-NEXT: retq
}
define fp128 @acosl(fp128 %x) #0 {
+; CHECK-LABEL: acosl:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: movaps %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movq $0, (%rsp)
+; CHECK-NEXT: movaps (%rsp), %xmm0
+; CHECK-NEXT: callq __addtf3
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %x to i128
%bf.clear = and i128 %0, -18446744073709551616
%1 = bitcast i128 %bf.clear to fp128
%add = fadd fp128 %1, %x
ret fp128 %add
-; CHECK-LABEL: acosl:
-; CHECK: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, 16(%rsp)
-; CHECK-NEXT: movq 24(%rsp), %rax
-; CHECK-NEXT: movq %rax, 8(%rsp)
-; CHECK-NEXT: movq $0, (%rsp)
-; CHECK-NEXT: movaps (%rsp), %xmm0
-; CHECK-NEXT: callq __addtf3
-; CHECK: retq
}
; Compare i128 values and check i128 constants.
define fp128 @TestComp(fp128 %x, fp128 %y) #0 {
+; CHECK-LABEL: TestComp:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpq $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jns .LBB8_2
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: .LBB8_2: # %entry
+; CHECK-NEXT: retq
entry:
%0 = bitcast fp128 %x to i128
%cmp = icmp sgt i128 %0, -1
%cond = select i1 %cmp, fp128 %x, fp128 %y
ret fp128 %cond
-; CHECK-LABEL: TestComp:
-; CHECK: movaps %xmm0, -24(%rsp)
-; CHECK-NEXT: cmpq $0, -16(%rsp)
-; CHECK-NEXT: jns
-; CHECK: movaps %xmm1, %xmm0
-; CHECK: retq
}
declare void @foo(fp128) #1
; Test logical operations on fp128 values.
define fp128 @TestFABS_LD(fp128 %x) #0 {
+; CHECK-LABEL: TestFABS_LD:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
entry:
%call = tail call fp128 @fabsl(fp128 %x) #2
ret fp128 %call
-; CHECK-LABEL: TestFABS_LD
-; CHECK: andps {{.*}}, %xmm0
-; CHECK-NEXT: retq
}
declare fp128 @fabsl(fp128) #1
@@ -282,6 +328,43 @@ declare fp128 @copysignl(fp128, fp128) #1
; Test more complicated logical operations generated from copysignl.
define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* byval nocapture readonly align 16 %z) #0 {
+; CHECK-LABEL: TestCopySign:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: callq __gttf2
+; CHECK-NEXT: movl %eax, %ebp
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: movaps %xmm0, %xmm1
+; CHECK-NEXT: callq __subtf3
+; CHECK-NEXT: testl %ebp, %ebp
+; CHECK-NEXT: jle .LBB10_1
+; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: movaps %xmm0, %xmm1
+; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: movaps %xmm1, %xmm2
+; CHECK-NEXT: jmp .LBB10_3
+; CHECK-NEXT: .LBB10_1:
+; CHECK-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload
+; CHECK-NEXT: .LBB10_3: # %cleanup
+; CHECK-NEXT: movaps {{.*}}(%rip), %xmm1
+; CHECK-NEXT: andps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: movaps %xmm2, (%rbx)
+; CHECK-NEXT: movaps %xmm0, 16(%rbx)
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
entry:
%z.realp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %z, i64 0, i32 0
%z.real = load fp128, fp128* %z.realp, align 16
@@ -304,17 +387,9 @@ cleanup: ; preds = %entry, %if.then
store fp128 %call.sink, fp128* %0, align 16
store fp128 %call5, fp128* %1, align 16
ret void
-; CHECK-LABEL: TestCopySign
-; CHECK-NOT: call
-; CHECK: callq __subtf3
-; CHECK-NOT: call
-; CHECK: callq __gttf2
-; CHECK-NOT: call
-; CHECK: andps {{.*}}, %xmm0
-; CHECK: retq
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+ssse3,+sse3,+popcnt,+sse,+sse2,+sse4.1,+sse4.2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+ssse3,+sse3,+popcnt,+sse,+sse2,+sse4.1,+sse4.2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+ssse3,+sse3,+popcnt,+sse,+sse2,+sse4.1,+sse4.2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind readnone }
diff --git a/test/CodeGen/X86/fp128-libcalls.ll b/test/CodeGen/X86/fp128-libcalls.ll
index ee5fa447448c..09bda890fa8c 100644
--- a/test/CodeGen/X86/fp128-libcalls.ll
+++ b/test/CodeGen/X86/fp128-libcalls.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s
; Check all soft floating point library function calls.
diff --git a/test/CodeGen/X86/fp128-load.ll b/test/CodeGen/X86/fp128-load.ll
index 73bacf87275e..bd70ab5a1ac7 100644
--- a/test/CodeGen/X86/fp128-load.ll
+++ b/test/CodeGen/X86/fp128-load.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s
; __float128 myFP128 = 1.0L; // x86_64-linux-android
@my_fp128 = global fp128 0xL00000000000000003FFF000000000000, align 16
diff --git a/test/CodeGen/X86/fp128-select.ll b/test/CodeGen/X86/fp128-select.ll
index dc41d5095a71..c02db1fcdde8 100644
--- a/test/CodeGen/X86/fp128-select.ll
+++ b/test/CodeGen/X86/fp128-select.ll
@@ -1,8 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s --check-prefix=MMX
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=MMX
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android | FileCheck %s
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=MMX
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=MMX
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android \
+; RUN: -enable-legalize-types-checking | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu \
+; RUN: -enable-legalize-types-checking | FileCheck %s
define void @test_select(fp128* %p, fp128* %q, i1 zeroext %c) {
; MMX-LABEL: test_select:
diff --git a/test/CodeGen/X86/huge-stack-offset2.ll b/test/CodeGen/X86/huge-stack-offset2.ll
new file mode 100644
index 000000000000..9ac85b618dbd
--- /dev/null
+++ b/test/CodeGen/X86/huge-stack-offset2.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=CHECK
+
+; Test how we handle pathologically large stack frames when RAX is live through
+; the prologue and epilogue.
+
+declare void @bar(i8*)
+declare void @llvm.va_start(i8*)
+
+; For stack frames between 2GB and 16GB, do multiple adjustments.
+
+define i32 @stack_frame_8gb(i32 %x, ...) nounwind {
+; CHECK-LABEL: stack_frame_8gb:
+; CHECK: subq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: subq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: subq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: subq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: subq ${{.*}}, %rsp
+; CHECK: callq bar
+; CHECK: addq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: addq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: addq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: addq ${{.*}}, %rsp # imm = 0x7FFFFFFF
+; CHECK: addq ${{.*}}, %rsp
+; CHECK: retq
+ %1 = alloca [u0x200000000 x i8]
+ %va = alloca i8, i32 24
+ call void @llvm.va_start(i8* %va)
+ %2 = getelementptr inbounds [u0x200000000 x i8], [u0x200000000 x i8]* %1, i32 0, i32 0
+ call void @bar(i8* %2)
+ ret i32 %x
+}
+
+; For stack frames larger than 16GB, spill EAX instead of doing a linear number
+; of adjustments.
+
+; This function should have a frame size of 0x4000000D0. The 0xD0 is 208 bytes
+; from 24 bytes of va_list, 176 bytes of spilled varargs regparms, and 8 bytes
+; of alignment. We subtract 8 less and add 8 more in the prologue and epilogue
+; respectively to account for the PUSH.
+
+define i32 @stack_frame_16gb(i32 %x, ...) nounwind {
+; CHECK-LABEL: stack_frame_16gb:
+; CHECK: pushq %rax
+; CHECK-NEXT: movabsq ${{.*}}, %rax # imm = 0xFFFFFFFBFFFFFF38
+; CHECK-NEXT: addq %rsp, %rax
+; CHECK-NEXT: xchgq %rax, (%rsp)
+; CHECK-NEXT: movq (%rsp), %rsp
+; CHECK: callq bar
+; CHECK: pushq %rax
+; CHECK-NEXT: movabsq ${{.*}}, %rax # imm = 0x4000000D8
+; CHECK-NEXT: addq %rsp, %rax
+; CHECK-NEXT: xchgq %rax, (%rsp)
+; CHECK-NEXT: movq (%rsp), %rsp
+; CHECK: retq
+ %1 = alloca [u0x400000000 x i8]
+ %va = alloca i8, i32 24
+ call void @llvm.va_start(i8* %va)
+ %2 = getelementptr inbounds [u0x400000000 x i8], [u0x400000000 x i8]* %1, i32 0, i32 0
+ call void @bar(i8* %2)
+ ret i32 %x
+}
+
diff --git a/test/CodeGen/X86/i256-add.ll b/test/CodeGen/X86/i256-add.ll
index 6164d898ca11..a745f652d065 100644
--- a/test/CodeGen/X86/i256-add.ll
+++ b/test/CodeGen/X86/i256-add.ll
@@ -1,8 +1,67 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep adcl %t | count 7
-; RUN: grep sbbl %t | count 7
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
define void @add(i256* %p, i256* %q) nounwind {
+; X32-LABEL: add:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl 8(%ecx), %edx
+; X32-NEXT: movl (%ecx), %ebx
+; X32-NEXT: movl 4(%ecx), %edi
+; X32-NEXT: movl 28(%eax), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 24(%eax), %ebp
+; X32-NEXT: addl (%eax), %ebx
+; X32-NEXT: adcl 4(%eax), %edi
+; X32-NEXT: adcl 8(%eax), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 20(%eax), %esi
+; X32-NEXT: movl 12(%eax), %edx
+; X32-NEXT: movl 16(%eax), %eax
+; X32-NEXT: adcl 12(%ecx), %edx
+; X32-NEXT: adcl 16(%ecx), %eax
+; X32-NEXT: adcl 20(%ecx), %esi
+; X32-NEXT: adcl 24(%ecx), %ebp
+; X32-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload
+; X32-NEXT: adcl %ebp, 28(%ecx)
+; X32-NEXT: movl %ebx, (%ecx)
+; X32-NEXT: movl %edi, 4(%ecx)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, 8(%ecx)
+; X32-NEXT: movl %edx, 12(%ecx)
+; X32-NEXT: movl %eax, 16(%ecx)
+; X32-NEXT: movl %esi, 20(%ecx)
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, 24(%ecx)
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: popl %ebx
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: add:
+; X64: # BB#0:
+; X64-NEXT: movq 16(%rdi), %rax
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq 8(%rdi), %rdx
+; X64-NEXT: movq 24(%rsi), %r8
+; X64-NEXT: addq (%rsi), %rcx
+; X64-NEXT: adcq 8(%rsi), %rdx
+; X64-NEXT: adcq 16(%rsi), %rax
+; X64-NEXT: adcq %r8, 24(%rdi)
+; X64-NEXT: movq %rcx, (%rdi)
+; X64-NEXT: movq %rdx, 8(%rdi)
+; X64-NEXT: movq %rax, 16(%rdi)
+; X64-NEXT: retq
%a = load i256, i256* %p
%b = load i256, i256* %q
%c = add i256 %a, %b
@@ -10,6 +69,63 @@ define void @add(i256* %p, i256* %q) nounwind {
ret void
}
define void @sub(i256* %p, i256* %q) nounwind {
+; X32-LABEL: sub:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl 16(%ecx), %eax
+; X32-NEXT: movl 12(%ecx), %edx
+; X32-NEXT: movl 8(%ecx), %edi
+; X32-NEXT: movl (%ecx), %ebx
+; X32-NEXT: movl 4(%ecx), %ebp
+; X32-NEXT: subl (%esi), %ebx
+; X32-NEXT: sbbl 4(%esi), %ebp
+; X32-NEXT: sbbl 8(%esi), %edi
+; X32-NEXT: sbbl 12(%esi), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: sbbl 16(%esi), %eax
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: movl 20(%ecx), %edx
+; X32-NEXT: sbbl 20(%esi), %edx
+; X32-NEXT: movl 24(%ecx), %eax
+; X32-NEXT: sbbl 24(%esi), %eax
+; X32-NEXT: movl 28(%esi), %esi
+; X32-NEXT: sbbl %esi, 28(%ecx)
+; X32-NEXT: movl %ebx, (%ecx)
+; X32-NEXT: movl %ebp, 4(%ecx)
+; X32-NEXT: movl %edi, 8(%ecx)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 12(%ecx)
+; X32-NEXT: movl (%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 16(%ecx)
+; X32-NEXT: movl %edx, 20(%ecx)
+; X32-NEXT: movl %eax, 24(%ecx)
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: popl %ebx
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: sub:
+; X64: # BB#0:
+; X64-NEXT: movq 16(%rdi), %rax
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq 8(%rdi), %rdx
+; X64-NEXT: movq 24(%rsi), %r8
+; X64-NEXT: subq (%rsi), %rcx
+; X64-NEXT: sbbq 8(%rsi), %rdx
+; X64-NEXT: sbbq 16(%rsi), %rax
+; X64-NEXT: sbbq %r8, 24(%rdi)
+; X64-NEXT: movq %rcx, (%rdi)
+; X64-NEXT: movq %rdx, 8(%rdi)
+; X64-NEXT: movq %rax, 16(%rdi)
+; X64-NEXT: retq
%a = load i256, i256* %p
%b = load i256, i256* %q
%c = sub i256 %a, %b
diff --git a/test/CodeGen/X86/i386-shrink-wrapping.ll b/test/CodeGen/X86/i386-shrink-wrapping.ll
index 2c3e384b70a6..d4e099ac6558 100644
--- a/test/CodeGen/X86/i386-shrink-wrapping.ll
+++ b/test/CodeGen/X86/i386-shrink-wrapping.ll
@@ -55,8 +55,7 @@ target triple = "i386-apple-macosx10.5"
;
; CHECK-NEXT: L_e$non_lazy_ptr, [[E:%[a-z]+]]
; CHECK-NEXT: movb [[D]], ([[E]])
-; CHECK-NEXT: L_f$non_lazy_ptr, [[F:%[a-z]+]]
-; CHECK-NEXT: movsbl ([[F]]), [[CONV:%[a-z]+]]
+; CHECK-NEXT: movsbl ([[E]]), [[CONV:%[a-z]+]]
; CHECK-NEXT: movl $6, [[CONV:%[a-z]+]]
; The eflags is used in the next instruction.
; If that instruction disappear, we are not exercising the bug
@@ -96,7 +95,7 @@ for.end: ; preds = %for.cond.preheader
%.b3 = load i1, i1* @d, align 1
%tmp2 = select i1 %.b3, i8 0, i8 6
store i8 %tmp2, i8* @e, align 1
- %tmp3 = load i8, i8* @f, align 1
+ %tmp3 = load i8, i8* @e, align 1
%conv = sext i8 %tmp3 to i32
%add = add nsw i32 %conv, 1
%rem = srem i32 %tmp1, %add
diff --git a/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
new file mode 100644
index 000000000000..ceb465711906
--- /dev/null
+++ b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -0,0 +1,141 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+define void @i24_or(i24* %a) {
+; CHECK-LABEL: i24_or:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: movzbl 2(%rdi), %ecx
+; CHECK-NEXT: movb %cl, 2(%rdi)
+; CHECK-NEXT: shll $16, %ecx
+; CHECK-NEXT: orl %eax, %ecx
+; CHECK-NEXT: orl $384, %ecx # imm = 0x180
+; CHECK-NEXT: movw %cx, (%rdi)
+; CHECK-NEXT: retq
+ %aa = load i24, i24* %a, align 1
+ %b = or i24 %aa, 384
+ store i24 %b, i24* %a, align 1
+ ret void
+}
+
+define void @i24_and_or(i24* %a) {
+; CHECK-LABEL: i24_and_or:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: movzbl 2(%rdi), %ecx
+; CHECK-NEXT: movb %cl, 2(%rdi)
+; CHECK-NEXT: shll $16, %ecx
+; CHECK-NEXT: orl %eax, %ecx
+; CHECK-NEXT: orl $384, %ecx # imm = 0x180
+; CHECK-NEXT: andl $16777088, %ecx # imm = 0xFFFF80
+; CHECK-NEXT: movw %cx, (%rdi)
+; CHECK-NEXT: retq
+ %b = load i24, i24* %a, align 1
+ %c = and i24 %b, -128
+ %d = or i24 %c, 384
+ store i24 %d, i24* %a, align 1
+ ret void
+}
+
+define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
+; CHECK-LABEL: i24_insert_bit:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl %sil, %eax
+; CHECK-NEXT: movzwl (%rdi), %ecx
+; CHECK-NEXT: movzbl 2(%rdi), %edx
+; CHECK-NEXT: movb %dl, 2(%rdi)
+; CHECK-NEXT: shll $16, %edx
+; CHECK-NEXT: orl %ecx, %edx
+; CHECK-NEXT: shll $13, %eax
+; CHECK-NEXT: andl $16769023, %edx # imm = 0xFFDFFF
+; CHECK-NEXT: orl %eax, %edx
+; CHECK-NEXT: movw %dx, (%rdi)
+; CHECK-NEXT: retq
+ %extbit = zext i1 %bit to i24
+ %b = load i24, i24* %a, align 1
+ %extbit.shl = shl nuw nsw i24 %extbit, 13
+ %c = and i24 %b, -8193
+ %d = or i24 %c, %extbit.shl
+ store i24 %d, i24* %a, align 1
+ ret void
+}
+
+define void @i56_or(i56* %a) {
+; CHECK-LABEL: i56_or:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzwl 4(%rdi), %eax
+; CHECK-NEXT: movzbl 6(%rdi), %ecx
+; CHECK-NEXT: movl (%rdi), %edx
+; CHECK-NEXT: movb %cl, 6(%rdi)
+; CHECK-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
+; CHECK-NEXT: shll $16, %ecx
+; CHECK-NEXT: orl %eax, %ecx
+; CHECK-NEXT: shlq $32, %rcx
+; CHECK-NEXT: orq %rcx, %rdx
+; CHECK-NEXT: orq $384, %rdx # imm = 0x180
+; CHECK-NEXT: movl %edx, (%rdi)
+; CHECK-NEXT: shrq $32, %rdx
+; CHECK-NEXT: movw %dx, 4(%rdi)
+; CHECK-NEXT: retq
+ %aa = load i56, i56* %a, align 1
+ %b = or i56 %aa, 384
+ store i56 %b, i56* %a, align 1
+ ret void
+}
+
+define void @i56_and_or(i56* %a) {
+; CHECK-LABEL: i56_and_or:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzwl 4(%rdi), %eax
+; CHECK-NEXT: movzbl 6(%rdi), %ecx
+; CHECK-NEXT: movl (%rdi), %edx
+; CHECK-NEXT: movb %cl, 6(%rdi)
+; CHECK-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
+; CHECK-NEXT: shll $16, %ecx
+; CHECK-NEXT: orl %eax, %ecx
+; CHECK-NEXT: shlq $32, %rcx
+; CHECK-NEXT: orq %rcx, %rdx
+; CHECK-NEXT: orq $384, %rdx # imm = 0x180
+; CHECK-NEXT: movabsq $72057594037927808, %rax # imm = 0xFFFFFFFFFFFF80
+; CHECK-NEXT: andq %rdx, %rax
+; CHECK-NEXT: movl %eax, (%rdi)
+; CHECK-NEXT: shrq $32, %rax
+; CHECK-NEXT: movw %ax, 4(%rdi)
+; CHECK-NEXT: retq
+ %b = load i56, i56* %a, align 1
+ %c = and i56 %b, -128
+ %d = or i56 %c, 384
+ store i56 %d, i56* %a, align 1
+ ret void
+}
+
+define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
+; CHECK-LABEL: i56_insert_bit:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl %sil, %eax
+; CHECK-NEXT: movzwl 4(%rdi), %ecx
+; CHECK-NEXT: movzbl 6(%rdi), %edx
+; CHECK-NEXT: movl (%rdi), %esi
+; CHECK-NEXT: movb %dl, 6(%rdi)
+; CHECK-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> %RDX<def>
+; CHECK-NEXT: shll $16, %edx
+; CHECK-NEXT: orl %ecx, %edx
+; CHECK-NEXT: shlq $32, %rdx
+; CHECK-NEXT: orq %rdx, %rsi
+; CHECK-NEXT: shlq $13, %rax
+; CHECK-NEXT: movabsq $72057594037919743, %rcx # imm = 0xFFFFFFFFFFDFFF
+; CHECK-NEXT: andq %rsi, %rcx
+; CHECK-NEXT: orq %rax, %rcx
+; CHECK-NEXT: movl %ecx, (%rdi)
+; CHECK-NEXT: shrq $32, %rcx
+; CHECK-NEXT: movw %cx, 4(%rdi)
+; CHECK-NEXT: retq
+ %extbit = zext i1 %bit to i56
+ %b = load i56, i56* %a, align 1
+ %extbit.shl = shl nuw nsw i56 %extbit, 13
+ %c = and i56 %b, -8193
+ %d = or i56 %c, %extbit.shl
+ store i56 %d, i56* %a, align 1
+ ret void
+}
+
diff --git a/test/CodeGen/X86/implicit-null-check.ll b/test/CodeGen/X86/implicit-null-check.ll
index 9a8a3a4369d3..ee795667cdb1 100644
--- a/test/CodeGen/X86/implicit-null-check.ll
+++ b/test/CodeGen/X86/implicit-null-check.ll
@@ -135,6 +135,53 @@ define i32 @imp_null_check_via_mem_comparision(i32* %x, i32 %val) {
ret i32 200
}
+define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
+; CHECK-LABEL: imp_null_check_gep_load_with_use_dep:
+; CHECK: [[BB0_imp_null_check_gep_load_with_use_dep:L[^:]+]]:
+; CHECK: movl (%rdi), %eax
+; CHECK: addl %edi, %esi
+; CHECK: leal 4(%rax,%rsi), %eax
+; CHECK: retq
+; CHECK: [[BB1_imp_null_check_gep_load_with_use_dep:LBB5_[0-9]+]]:
+; CHECK: movl $42, %eax
+; CHECK: retq
+
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+ ret i32 42
+
+ not_null:
+ %x.loc = getelementptr i32, i32* %x, i32 1
+ %y = ptrtoint i32* %x.loc to i32
+ %b = add i32 %a, %y
+ %t = load i32, i32* %x
+ %z = add i32 %t, %b
+ ret i32 %z
+}
+
+define void @imp_null_check_store(i32* %x) {
+; CHECK-LABEL: _imp_null_check_store:
+; CHECK: [[BB0_imp_null_check_store:L[^:]+]]:
+; CHECK: movl $1, (%rdi)
+; CHECK: retq
+; CHECK: [[BB1_imp_null_check_store:LBB6_[0-9]+]]:
+; CHECK: retq
+
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+ ret void
+
+ not_null:
+ store i32 1, i32* %x
+ ret void
+}
+
!0 = !{}
; CHECK-LABEL: __LLVM_FaultMaps:
@@ -147,7 +194,7 @@ define i32 @imp_null_check_via_mem_comparision(i32* %x, i32 %val) {
; CHECK-NEXT: .short 0
; # functions:
-; CHECK-NEXT: .long 5
+; CHECK-NEXT: .long 7
; FunctionAddr:
; CHECK-NEXT: .quad _imp_null_check_add_result
@@ -176,6 +223,19 @@ define i32 @imp_null_check_via_mem_comparision(i32* %x, i32 %val) {
; CHECK-NEXT: .long [[BB1_imp_null_check_gep_load]]-_imp_null_check_gep_load
; FunctionAddr:
+; CHECK-NEXT: .quad _imp_null_check_gep_load_with_use_dep
+; NumFaultingPCs
+; CHECK-NEXT: .long 1
+; Reserved:
+; CHECK-NEXT: .long 0
+; Fault[0].Type:
+; CHECK-NEXT: .long 1
+; Fault[0].FaultOffset:
+; CHECK-NEXT: .long [[BB0_imp_null_check_gep_load_with_use_dep]]-_imp_null_check_gep_load_with_use_dep
+; Fault[0].HandlerOffset:
+; CHECK-NEXT: .long [[BB1_imp_null_check_gep_load_with_use_dep]]-_imp_null_check_gep_load_with_use_dep
+
+; FunctionAddr:
; CHECK-NEXT: .quad _imp_null_check_hoist_over_unrelated_load
; NumFaultingPCs
; CHECK-NEXT: .long 1
@@ -202,6 +262,19 @@ define i32 @imp_null_check_via_mem_comparision(i32* %x, i32 %val) {
; CHECK-NEXT: .long [[BB1_imp_null_check_load]]-_imp_null_check_load
; FunctionAddr:
+; CHECK-NEXT: .quad _imp_null_check_store
+; NumFaultingPCs
+; CHECK-NEXT: .long 1
+; Reserved:
+; CHECK-NEXT: .long 0
+; Fault[0].Type:
+; CHECK-NEXT: .long 3
+; Fault[0].FaultOffset:
+; CHECK-NEXT: .long [[BB0_imp_null_check_store]]-_imp_null_check_store
+; Fault[0].HandlerOffset:
+; CHECK-NEXT: .long [[BB1_imp_null_check_store]]-_imp_null_check_store
+
+; FunctionAddr:
; CHECK-NEXT: .quad _imp_null_check_via_mem_comparision
; NumFaultingPCs
; CHECK-NEXT: .long 1
@@ -216,12 +289,18 @@ define i32 @imp_null_check_via_mem_comparision(i32* %x, i32 %val) {
; OBJDUMP: FaultMap table:
; OBJDUMP-NEXT: Version: 0x1
-; OBJDUMP-NEXT: NumFunctions: 5
+; OBJDUMP-NEXT: NumFunctions: 7
; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 5
; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 7
; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
+; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 9
+; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 7
; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 3
+; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
+; OBJDUMP-NEXT: Fault kind: FaultingStore, faulting PC offset: 0, handling PC offset: 7
+; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1
+; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 11
diff --git a/test/CodeGen/X86/implicit-null-checks.mir b/test/CodeGen/X86/implicit-null-checks.mir
index 81351511374c..39bfedaa7814 100644
--- a/test/CodeGen/X86/implicit-null-checks.mir
+++ b/test/CodeGen/X86/implicit-null-checks.mir
@@ -131,6 +131,240 @@
ret i32 0
}
+ define i32 @use_alternate_load_op(i32* %ptr, i32* %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 0
+ }
+
+ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null: ; preds = %entry
+ ret i32 42
+
+ not_null: ; preds = %entry
+ %x.loc = getelementptr i32, i32* %x, i32 1
+ %y = ptrtoint i32* %x.loc to i32
+ %b = add i32 %a, %y
+ %t = load i32, i32* %x
+ %z = add i32 %t, %b
+ ret i32 %z
+ }
+
+ define i32 @imp_null_check_load_with_base_sep(i32* %x, i32 %a) {
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null: ; preds = %entry
+ ret i32 42
+
+ not_null: ; preds = %entry
+ ret i32 undef
+ }
+
+ define void @inc_store(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define void @inc_store_plus_offset(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define void @inc_store_with_dep(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define i32 @inc_store_with_dep_in_null(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define void @inc_store_with_volatile(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define void @inc_store_with_two_dep(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define void @inc_store_with_redefined_base(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define i32 @inc_store_with_reused_base(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define i32 @inc_store_across_call(i32* %ptr) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ call void @f()
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define i32 @inc_store_with_dep_in_dep(i32* %ptr, i32 %val) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define i32 @inc_store_with_load_over_store(i32* %ptr, i32* %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define i32 @inc_store_with_store_over_load(i32* %ptr, i32* %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define void @inc_store_with_store_over_store(i32* %ptr, i32* %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define void @inc_store_with_load_and_store(i32* %ptr, i32* %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret void
+
+ is_null:
+ ret void
+ }
+
+ define i32 @inc_store_and_load_no_alias(i32* noalias %ptr, i32* noalias %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
+ define i32 @inc_store_and_load_alias(i32* %ptr, i32* %ptr2) {
+ entry:
+ %ptr_is_null = icmp eq i32* %ptr, null
+ br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+ ret i32 undef
+
+ is_null:
+ ret i32 undef
+ }
+
attributes #0 = { "target-features"="+bmi,+bmi2" }
!0 = !{}
@@ -145,7 +379,7 @@ liveins:
- { reg: '%esi' }
# CHECK: bb.0.entry:
# CHECK: %eax = MOV32ri 2200000
-# CHECK-NEXT: %eax = FAULTING_LOAD_OP %bb.3.is_null, {{[0-9]+}}, killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
# CHECK-NEXT: JMP_1 %bb.1.not_null
body: |
@@ -167,15 +401,15 @@ body: |
bb.2.ret_200:
%eax = MOV32ri 200
- RET 0, %eax
+ RETQ %eax
bb.3.is_null:
%eax = MOV32ri 42
- RET 0, %eax
+ RETQ %eax
bb.4.ret_100:
%eax = MOV32ri 100
- RET 0, %eax
+ RETQ %eax
...
---
@@ -217,11 +451,11 @@ body: |
bb.3.is_null:
liveins: %eax, %ah, %al, %ax, %bh, %bl, %bp, %bpl, %bx, %eax, %ebp, %ebx, %rax, %rbp, %rbx, %r12, %r13, %r14, %r15, %r12b, %r13b, %r14b, %r15b, %r12d, %r13d, %r14d, %r15d, %r12w, %r13w, %r14w, %r15w
- RET 0, %eax
+ RETQ %eax
bb.4.ret_100:
%eax = MOV32ri 100
- RET 0, %eax
+ RETQ %eax
...
---
@@ -256,15 +490,15 @@ body: |
bb.2.ret_200:
%eax = MOV32ri 200
- RET 0, %eax
+ RETQ %eax
bb.3.is_null:
%eax = MOV32ri 42
- RET 0, %eax
+ RETQ %eax
bb.4.ret_100:
%eax = MOV32ri 100
- RET 0, %eax
+ RETQ %eax
...
---
@@ -298,15 +532,15 @@ body: |
bb.2.ret_200:
%eax = MOV32ri 200
- RET 0, %eax
+ RETQ %eax
bb.3.is_null:
%eax = MOV32ri 42
- RET 0, %eax
+ RETQ %eax
bb.4.ret_100:
%eax = MOV32ri 100
- RET 0, %eax
+ RETQ %eax
...
---
@@ -319,7 +553,7 @@ liveins:
- { reg: '%rsi' }
# CHECK: bb.0.entry:
# CHECK: %rbx = MOV64rr %rdx
-# CHECK-NEXT: %rdi = FAULTING_LOAD_OP %bb.3.is_null, {{[0-9]+}}, killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+# CHECK-NEXT: %rdi = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
body: |
bb.0.entry:
@@ -341,15 +575,15 @@ body: |
bb.2.ret_200:
%eax = MOV32ri 200
- RET 0, %eax
+ RETQ %eax
bb.3.is_null:
%eax = MOV32ri 42
- RET 0, %eax
+ RETQ %eax
bb.4.ret_100:
%eax = MOV32ri 100
- RET 0, %eax
+ RETQ %eax
...
---
@@ -364,7 +598,7 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
'%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
'%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
# CHECK: body:
-# CHECK-NOT: FAULTING_LOAD_OP
+# CHECK-NOT: FAULTING_OP
# CHECK: bb.1.stay:
# CHECK: CALL64pcrel32
body: |
@@ -397,7 +631,7 @@ body: |
name: dependency_live_in_hazard
# CHECK-LABEL: name: dependency_live_in_hazard
# CHECK: bb.0.entry:
-# CHECK-NOT: FAULTING_LOAD_OP
+# CHECK-NOT: FAULTING_OP
# CHECK: bb.1.not_null:
# Make sure that the BEXTR32rm instruction below is not used to emit
@@ -431,3 +665,635 @@ body: |
RETQ %eax
...
+---
+name: use_alternate_load_op
+# CHECK-LABEL: name: use_alternate_load_op
+# CHECK: bb.0.entry:
+# CHECK: %rax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %rcx = MOV64rm killed %rsi, 1, _, 0, _
+ %rcx = AND64rm killed %rcx, %rdi, 1, _, 0, _, implicit-def dead %eflags
+ %rax = MOV64rm killed %rdi, 1, _, 0, _
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ RETQ %eax
+
+...
+---
+name: imp_null_check_gep_load_with_use_dep
+# CHECK-LABEL: name: imp_null_check_gep_load_with_use_dep
+# CHECK: bb.0.entry:
+# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, implicit-def %rax :: (load 4 from %ir.x)
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000)
+ liveins: %rsi, %rdi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.1.is_null, implicit %eflags
+
+ bb.2.not_null:
+ liveins: %rdi, %rsi
+
+ %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
+ %eax = MOV32rm killed %rdi, 1, _, 0, _, implicit-def %rax :: (load 4 from %ir.x)
+ %eax = LEA64_32r killed %rax, 1, killed %rsi, 4, _
+ RETQ %eax
+
+ bb.1.is_null:
+ %eax = MOV32ri 42
+ RETQ %eax
+
+...
+---
+name: imp_null_check_load_with_base_sep
+# CHECK-LABEL: name: imp_null_check_load_with_base_sep
+# CHECK: bb.0.entry:
+# CHECK: %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
+# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %esi, %rdi, 1, _, 0, _, implicit-def dead %eflags
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000)
+ liveins: %rsi, %rdi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.1.is_null, implicit %eflags
+
+ bb.2.not_null:
+ liveins: %rdi, %rsi
+
+ %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags
+ %esi = AND32rm killed %esi, %rdi, 1, _, 0, _, implicit-def dead %eflags
+ %eax = MOV32rr %esi
+ RETQ %eax
+
+ bb.1.is_null:
+ %eax = MOV32ri 42
+ RETQ %eax
+
+...
+---
+name: inc_store
+# CHECK-LABEL: name: inc_store
+# CHECK: bb.0.entry:
+# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, killed %rsi
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV64mr killed %rdi, 1, _, 0, _, killed %rsi
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_plus_offset
+# CHECK-LABEL: inc_store_plus_offset
+# CHECK: bb.0.entry:
+# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 16, _, killed %rsi
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV64mr killed %rdi, 1, _, 16, _, killed %rsi
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_with_dep
+# CHECK-LABEL: inc_store_with_dep
+# CHECK: bb.0.entry:
+# CHECK: %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
+# CHECK-NEXT: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 16, _, killed %esi
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
+ MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_with_dep_in_null
+# CHECK-LABEL: inc_store_with_dep_in_null
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %esi = ADD32rr %esi, %esi, implicit-def dead %eflags
+ MOV32mr killed %rdi, 1, _, 0, _, %esi
+ %eax = MOV32rr killed %esi
+ RETQ %eax
+
+ bb.2.is_null:
+ liveins: %rsi
+
+ %eax = MOV32rr killed %esi
+ RETQ %eax
+
+...
+---
+name: inc_store_with_volatile
+# CHECK-LABEL: inc_store_with_volatile
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV32mr killed %rdi, 1, _, 0, _, killed %esi :: (volatile store 4 into %ir.ptr)
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_with_two_dep
+# CHECK-LABEL: inc_store_with_two_dep
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags
+ %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags
+ MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_with_redefined_base
+# CHECK-LABEL: inc_store_with_redefined_base
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %rdi = ADD64rr killed %rdi, killed %rdi, implicit-def dead %eflags
+ MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_with_reused_base
+# CHECK-LABEL: inc_store_with_reused_base
+# CHECK: bb.0.entry:
+# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 16, _, killed %esi
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %rax = MOV64rr %rdi
+ MOV32mr killed %rdi, 1, _, 16, _, killed %esi
+ RETQ %eax
+
+ bb.2.is_null:
+ %rax = XOR64rr undef %rax, undef %rax, implicit-def dead %eflags
+ RETQ %eax
+
+...
+---
+name: inc_store_across_call
+# CHECK-LABEL: inc_store_across_call
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rbx, %rbx, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
+ '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
+ '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
+ '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rbx
+
+ frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
+ CFI_INSTRUCTION def_cfa_offset 16
+ CFI_INSTRUCTION offset %rbx, -16
+ %rbx = MOV64rr killed %rdi
+ TEST64rr %rbx, %rbx, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rbx
+
+ CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp
+ MOV32mi %rbx, 1, _, 0, _, 20
+ %rax = MOV64rr killed %rbx
+ %rbx = POP64r implicit-def %rsp, implicit %rsp
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ %rbx = POP64r implicit-def %rsp, implicit %rsp
+ RETQ %eax
+
+...
+---
+name: inc_store_with_dep_in_dep
+# CHECK-LABEL: inc_store_with_dep_in_dep
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %eax = MOV32rr %esi
+ %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags
+ MOV32mr killed %rdi, 1, _, 0, _, killed %esi
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ RETQ %eax
+
+...
+---
+name: inc_store_with_load_over_store
+# CHECK-LABEL: inc_store_with_load_over_store
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV32mi killed %rsi, 1, _, 0, _, 2
+ %eax = MOV32rm killed %rdi, 1, _, 0, _
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ RETQ %eax
+
+...
+---
+name: inc_store_with_store_over_load
+# CHECK-LABEL: inc_store_with_store_over_load
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %eax = MOV32rm killed %rsi, 1, _, 0, _
+ MOV32mi killed %rdi, 1, _, 0, _, 2
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ RETQ %eax
+
+...
+---
+name: inc_store_with_store_over_store
+# CHECK-LABEL: inc_store_with_store_over_store
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV32mi killed %rsi, 1, _, 0, _, 3
+ MOV32mi killed %rdi, 1, _, 0, _, 2
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_with_load_and_store
+# CHECK-LABEL: inc_store_with_load_and_store
+# CHECK: bb.0.entry:
+# CHECK: _ = FAULTING_OP 2, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, killed %esi, implicit-def dead %eflags
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ %esi = ADD32rr %esi, %esi, implicit-def dead %eflags
+ ADD32mr killed %rdi, 1, _, 0, _, killed %esi, implicit-def dead %eflags
+ RETQ
+
+ bb.2.is_null:
+ RETQ
+
+...
+---
+name: inc_store_and_load_no_alias
+# CHECK-LABEL: inc_store_and_load_no_alias
+# CHECK: bb.0.entry:
+# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+# CHECK-NEXT: JMP_1 %bb.1.not_null
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV32mi killed %rsi, 1, _, 0, _, 3 :: (store 4 into %ir.ptr2)
+ %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ RETQ %eax
+
+...
+---
+name: inc_store_and_load_alias
+# CHECK-LABEL: inc_store_and_load_alias
+# CHECK: bb.0.entry:
+# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags
+# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags
+# CHECK: bb.1.not_null
+
+alignment: 4
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0.entry:
+ successors: %bb.2.is_null, %bb.1.not_null
+ liveins: %rdi, %rsi
+
+ TEST64rr %rdi, %rdi, implicit-def %eflags
+ JE_1 %bb.2.is_null, implicit killed %eflags
+
+ bb.1.not_null:
+ liveins: %rdi, %rsi
+
+ MOV32mi killed %rsi, 1, _, 0, _, 3 :: (store 4 into %ir.ptr2)
+ %eax = MOV32rm killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr)
+ RETQ %eax
+
+ bb.2.is_null:
+ %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags
+ RETQ %eax
+
+...
diff --git a/test/CodeGen/X86/implicit-use-spill.mir b/test/CodeGen/X86/implicit-use-spill.mir
index 827f0f186ced..94bdd47b4470 100644
--- a/test/CodeGen/X86/implicit-use-spill.mir
+++ b/test/CodeGen/X86/implicit-use-spill.mir
@@ -1,4 +1,4 @@
-# RUN: llc -run-pass=greedy -mtriple=x86_64-apple-macosx -o - %s 2>&1 | FileCheck %s
+# RUN: llc -run-pass=greedy -mtriple=x86_64-apple-macosx -o - %s | FileCheck %s
# Make sure we don't assert when we try to reload a value that is just implicitly used.
---
diff --git a/test/CodeGen/X86/imul.ll b/test/CodeGen/X86/imul.ll
index 9d4d19332dbb..45a83cc5dfd9 100644
--- a/test/CodeGen/X86/imul.ll
+++ b/test/CodeGen/X86/imul.ll
@@ -171,3 +171,233 @@ define i64 @mul18446744073709551615_64(i64 %A) {
%mul = mul i64 %A, 18446744073709551615
ret i64 %mul
}
+
+define i32 @test(i32 %a) {
+; X64-LABEL: test:
+; X64: # BB#0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $5, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: retq
+;
+; X86-LABEL: test:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i32 %a, 31
+ ret i32 %tmp3
+}
+
+define i32 @test1(i32 %a) {
+; X64-LABEL: test1:
+; X64: # BB#0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $5, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: retq
+;
+; X86-LABEL: test1:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i32 %a, -31
+ ret i32 %tmp3
+}
+
+
+define i32 @test2(i32 %a) {
+; X64-LABEL: test2:
+; X64: # BB#0: # %entry
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $5, %eax
+; X64-NEXT: leal (%rax,%rdi), %eax
+; X64-NEXT: retq
+;
+; X86-LABEL: test2:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i32 %a, 33
+ ret i32 %tmp3
+}
+
+define i32 @test3(i32 %a) {
+; X64-LABEL: test3:
+; X64: # BB#0: # %entry
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $5, %eax
+; X64-NEXT: leal (%rax,%rdi), %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: retq
+;
+; X86-LABEL: test3:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i32 %a, -33
+ ret i32 %tmp3
+}
+
+define i64 @test4(i64 %a) {
+; X64-LABEL: test4:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $5, %rax
+; X64-NEXT: subq %rdi, %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: test4:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: shll $5, %ecx
+; X86-NEXT: subl %eax, %ecx
+; X86-NEXT: movl $31, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i64 %a, 31
+ ret i64 %tmp3
+}
+
+define i64 @test5(i64 %a) {
+; X64-LABEL: test5:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $5, %rax
+; X64-NEXT: subq %rdi, %rax
+; X64-NEXT: negq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: test5:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .Lcfi0:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .Lcfi1:
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: shll $5, %esi
+; X86-NEXT: subl %eax, %esi
+; X86-NEXT: movl $-31, %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: mull %edx
+; X86-NEXT: subl %ecx, %edx
+; X86-NEXT: subl %esi, %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i64 %a, -31
+ ret i64 %tmp3
+}
+
+
+define i64 @test6(i64 %a) {
+; X64-LABEL: test6:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $5, %rax
+; X64-NEXT: leaq (%rax,%rdi), %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: test6:
+; X86: # BB#0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: shll $5, %ecx
+; X86-NEXT: addl %eax, %ecx
+; X86-NEXT: movl $33, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i64 %a, 33
+ ret i64 %tmp3
+}
+
+define i64 @test7(i64 %a) {
+; X64-LABEL: test7:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $5, %rax
+; X64-NEXT: leaq (%rax,%rdi), %rax
+; X64-NEXT: negq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: test7:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .Lcfi2:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .Lcfi3:
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: shll $5, %esi
+; X86-NEXT: addl %eax, %esi
+; X86-NEXT: movl $-33, %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: mull %edx
+; X86-NEXT: subl %ecx, %edx
+; X86-NEXT: subl %esi, %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i64 %a, -33
+ ret i64 %tmp3
+}
+
+define i64 @testOverflow(i64 %a) {
+; X64-LABEL: testOverflow:
+; X64: # BB#0: # %entry
+; X64-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
+; X64-NEXT: imulq %rdi, %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: testOverflow:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .Lcfi4:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .Lcfi5:
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-1, %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: mull %edx
+; X86-NEXT: movl %ecx, %esi
+; X86-NEXT: shll $31, %esi
+; X86-NEXT: subl %ecx, %esi
+; X86-NEXT: addl %esi, %edx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+entry:
+ %tmp3 = mul i64 %a, 9223372036854775807
+ ret i64 %tmp3
+}
diff --git a/test/CodeGen/X86/inline-asm-A-constraint.ll b/test/CodeGen/X86/inline-asm-A-constraint.ll
new file mode 100644
index 000000000000..2ad011e88e0d
--- /dev/null
+++ b/test/CodeGen/X86/inline-asm-A-constraint.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=x86_64-- < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64--"
+
+; Function Attrs: nounwind uwtable
+define { i64, i64 } @foo(i8* %ptr, i128* nocapture readonly %src, i128* nocapture readonly %dst) local_unnamed_addr #0 {
+entry:
+ %0 = load i128, i128* %dst, align 16, !tbaa !1
+ %shr = lshr i128 %0, 64
+ %conv = trunc i128 %shr to i64
+ %conv1 = trunc i128 %0 to i64
+ %1 = load i128, i128* %src, align 16, !tbaa !1
+ %2 = tail call i128 asm sideeffect "lock; cmpxchg16b $1", "=A,=*m,{cx},{bx},0,*m,~{dirflag},~{fpsr},~{flags}"(i8* %ptr, i64 %conv, i64 %conv1, i128 %1, i8* %ptr) #1, !srcloc !5
+ %retval.sroa.0.0.extract.trunc = trunc i128 %2 to i64
+ %retval.sroa.2.0.extract.shift = lshr i128 %2, 64
+ %retval.sroa.2.0.extract.trunc = trunc i128 %retval.sroa.2.0.extract.shift to i64
+ %.fca.0.insert = insertvalue { i64, i64 } undef, i64 %retval.sroa.0.0.extract.trunc, 0
+ %.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %retval.sroa.2.0.extract.trunc, 1
+ ret { i64, i64 } %.fca.1.insert
+}
+; CHECK: lock
+; CHECK-NEXT: cmpxchg16b
+
+attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 5.0.0 (trunk 300088)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"__int128", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{i32 269}
diff --git a/test/CodeGen/X86/inline-asm-tied.ll b/test/CodeGen/X86/inline-asm-tied.ll
index 25853579a4b7..db63a8048836 100644
--- a/test/CodeGen/X86/inline-asm-tied.ll
+++ b/test/CodeGen/X86/inline-asm-tied.ll
@@ -1,31 +1,27 @@
; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 -optimize-regalloc -regalloc=basic -no-integrated-as | FileCheck %s
; rdar://6992609
-; CHECK: movl %ecx, 4([[ESP:%e..]])
-; CHECK: movl 4([[ESP]]), [[EDX:%e..]]
-; CHECK: movl [[EDX]], 4([[ESP]])
target triple = "i386-apple-darwin9.0"
-@llvm.used = appending global [1 x i8*] [i8* bitcast (i64 (i64)* @_OSSwapInt64 to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
define i64 @_OSSwapInt64(i64 %_data) nounwind {
entry:
- %retval = alloca i64 ; <i64*> [#uses=2]
- %_data.addr = alloca i64 ; <i64*> [#uses=4]
- store i64 %_data, i64* %_data.addr
- %tmp = load i64, i64* %_data.addr ; <i64> [#uses=1]
- %0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %tmp) nounwind ; <i64> [#uses=1]
- store i64 %0, i64* %_data.addr
- %tmp1 = load i64, i64* %_data.addr ; <i64> [#uses=1]
- store i64 %tmp1, i64* %retval
- %1 = load i64, i64* %retval ; <i64> [#uses=1]
- ret i64 %1
+ %0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %%edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %_data) nounwind
+ ret i64 %0
}
+; CHECK-LABEL: __OSSwapInt64:
+; CHECK-DAG: movl 8(%esp), %edx
+; CHECK-DAG: movl 4(%esp), %eax
+; CHECK: ## InlineAsm Start
+; CHECK: ## InlineAsm End
+; Everything is set up in EAX:EDX, return immediately.
+; CHECK-NEXT: retl
+
; The tied operands are not necessarily in the same order as the defs.
; PR13742
define i64 @swapped(i64 %x, i64 %y) nounwind {
entry:
- %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind
- %x1 = extractvalue { i64, i64 } %x0, 0
- ret i64 %x1
+ %x0 = call { i64, i64 } asm "foo", "=r,=r,1,0,~{dirflag},~{fpsr},~{flags}"(i64 %x, i64 %y) nounwind
+ %x1 = extractvalue { i64, i64 } %x0, 0
+ ret i64 %x1
}
diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll
index ac27bb7d8af9..ea780a2fa68c 100644
--- a/test/CodeGen/X86/insertelement-zero.ll
+++ b/test/CodeGen/X86/insertelement-zero.ll
@@ -46,22 +46,22 @@ define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) {
; SSE2-LABEL: insert_v4f64_0zz3:
; SSE2: # BB#0:
+; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE2-NEXT: xorpd %xmm2, %xmm2
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4f64_0zz3:
; SSE3: # BB#0:
+; SSE3-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE3-NEXT: xorpd %xmm2, %xmm2
-; SSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4f64_0zz3:
; SSSE3: # BB#0:
+; SSSE3-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSSE3-NEXT: xorpd %xmm2, %xmm2
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSSE3-NEXT: retq
;
@@ -244,24 +244,21 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
define <4 x i32> @insert_v4i32_01z3(<4 x i32> %a) {
; SSE2-LABEL: insert_v4i32_01z3:
; SSE2: # BB#0:
-; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v4i32_01z3:
; SSE3: # BB#0:
-; SSE3-NEXT: xorl %eax, %eax
-; SSE3-NEXT: movd %eax, %xmm1
+; SSE3-NEXT: xorps %xmm1, %xmm1
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v4i32_01z3:
; SSSE3: # BB#0:
-; SSSE3-NEXT: xorl %eax, %eax
-; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
@@ -292,8 +289,7 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
; SSE2-NEXT: retq
@@ -302,8 +298,7 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm2, %xmm2
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; SSE3-NEXT: xorl %eax, %eax
-; SSE3-NEXT: movd %eax, %xmm2
+; SSE3-NEXT: xorps %xmm2, %xmm2
; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
; SSE3-NEXT: retq
@@ -312,8 +307,7 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; SSSE3-NEXT: xorl %eax, %eax
-; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: xorps %xmm2, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
; SSSE3-NEXT: retq
@@ -414,25 +408,21 @@ define <16 x i16> @insert_v16i16_z12345z789ABZDEz(<16 x i16> %a) {
; AVX1-LABEL: insert_v16i16_z12345z789ABZDEz:
; AVX1: # BB#0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v16i16_z12345z789ABZDEz:
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
; AVX2-NEXT: retq
%1 = insertelement <16 x i16> %a, i16 0, i32 0
%2 = insertelement <16 x i16> %1, i16 0, i32 6
@@ -440,58 +430,30 @@ define <16 x i16> @insert_v16i16_z12345z789ABZDEz(<16 x i16> %a) {
ret <16 x i16> %3
}
-define <16 x i8> @insert_v16i8_z123456789ABZDEz(<16 x i8> %a) {
-; SSE2-LABEL: insert_v16i8_z123456789ABZDEz:
+define <16 x i8> @insert_v16i8_z123456789ABCDEz(<16 x i8> %a) {
+; SSE2-LABEL: insert_v16i8_z123456789ABCDEz:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
-; SSE3-LABEL: insert_v16i8_z123456789ABZDEz:
+; SSE3-LABEL: insert_v16i8_z123456789ABCDEz:
; SSE3: # BB#0:
-; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SSE3-NEXT: pand %xmm1, %xmm0
-; SSE3-NEXT: xorl %eax, %eax
-; SSE3-NEXT: movd %eax, %xmm2
-; SSE3-NEXT: pandn %xmm2, %xmm1
-; SSE3-NEXT: por %xmm1, %xmm0
-; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE3-NEXT: pand %xmm1, %xmm0
-; SSE3-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; SSE3-NEXT: pandn %xmm2, %xmm1
-; SSE3-NEXT: por %xmm1, %xmm0
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
-; SSSE3-LABEL: insert_v16i8_z123456789ABZDEz:
+; SSSE3-LABEL: insert_v16i8_z123456789ABCDEz:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; SSSE3-NEXT: xorl %eax, %eax
-; SSSE3-NEXT: movd %eax, %xmm1
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm2, %xmm0
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero
-; SSSE3-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
-; SSE41-LABEL: insert_v16i8_z123456789ABZDEz:
+; SSE41-LABEL: insert_v16i8_z123456789ABCDEz:
; SSE41: # BB#0:
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: insert_v16i8_z123456789ABZDEz:
+; AVX-LABEL: insert_v16i8_z123456789ABCDEz:
; AVX: # BB#0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
@@ -505,68 +467,20 @@ define <16 x i8> @insert_v16i8_z123456789ABZDEz(<16 x i8> %a) {
define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; SSE2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0]
-; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1]
-; SSE2-NEXT: pandn %xmm3, %xmm5
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm4, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; SSE3: # BB#0:
-; SSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; SSE3-NEXT: pand %xmm2, %xmm0
-; SSE3-NEXT: xorl %eax, %eax
-; SSE3-NEXT: movd %eax, %xmm3
-; SSE3-NEXT: pandn %xmm3, %xmm2
-; SSE3-NEXT: por %xmm2, %xmm0
-; SSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE3-NEXT: pand %xmm2, %xmm0
-; SSE3-NEXT: movdqa %xmm3, %xmm4
-; SSE3-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0]
-; SSE3-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255]
-; SSE3-NEXT: pand %xmm5, %xmm1
-; SSE3-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1]
-; SSE3-NEXT: pandn %xmm3, %xmm5
-; SSE3-NEXT: por %xmm5, %xmm1
-; SSE3-NEXT: pand %xmm2, %xmm1
-; SSE3-NEXT: pandn %xmm4, %xmm2
-; SSE3-NEXT: por %xmm2, %xmm0
-; SSE3-NEXT: por %xmm2, %xmm1
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm1
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; SSSE3-NEXT: xorl %eax, %eax
-; SSSE3-NEXT: movd %eax, %xmm2
-; SSSE3-NEXT: movdqa %xmm2, %xmm3
-; SSSE3-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm3, %xmm0
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,128]
-; SSSE3-NEXT: pshufb %xmm3, %xmm0
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
-; SSSE3-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0]
-; SSSE3-NEXT: por %xmm4, %xmm0
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13],zero,xmm1[15]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0],zero
-; SSSE3-NEXT: por %xmm2, %xmm1
-; SSSE3-NEXT: pshufb %xmm3, %xmm1
-; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
@@ -574,34 +488,32 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
-; SSE41-NEXT: pinsrb $14, %eax, %xmm1
-; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX1: # BB#0:
; AVX1-NEXT: xorl %eax, %eax
; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX2: # BB#0:
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%1 = insertelement <32 x i8> %a, i8 0, i32 0
%2 = insertelement <32 x i8> %1, i8 0, i32 15
diff --git a/test/CodeGen/X86/isel-sink.ll b/test/CodeGen/X86/isel-sink.ll
index 27abe051a9b3..2f32097a09b2 100644
--- a/test/CodeGen/X86/isel-sink.ll
+++ b/test/CodeGen/X86/isel-sink.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -march=x86 | FileCheck %s
-; RUN: llc < %s -march=x86 -addr-sink-using-gep=1 | FileCheck %s
define i32 @test(i32* %X, i32 %B) {
; CHECK-LABEL: test:
diff --git a/test/CodeGen/X86/jump_sign.ll b/test/CodeGen/X86/jump_sign.ll
index ca3e8bf71eba..5d6baad7068d 100644
--- a/test/CodeGen/X86/jump_sign.ll
+++ b/test/CodeGen/X86/jump_sign.ll
@@ -6,7 +6,7 @@ entry:
; CHECK: jns
%tmp1 = add i32 %X, 1 ; <i32> [#uses=1]
%tmp = icmp slt i32 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true, label %cond_next
+ br i1 %tmp, label %cond_true, label %cond_next, !prof !1
cond_true: ; preds = %entry
%tmp2 = tail call i32 (...) @bar( ) ; <i32> [#uses=0]
@@ -303,3 +303,5 @@ if.then:
if.end:
ret i32 undef
}
+
+!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/X86/known-bits-vector.ll b/test/CodeGen/X86/known-bits-vector.ll
index 5f15fb2b7315..eee466a5a60a 100644
--- a/test/CodeGen/X86/known-bits-vector.ll
+++ b/test/CodeGen/X86/known-bits-vector.ll
@@ -23,18 +23,14 @@ define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
; X32-LABEL: knownbits_mask_extract_uitofp:
; X32: # BB#0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $16, %esp
+; X32-NEXT: pushl %eax
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
-; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT: fildll {{[0-9]+}}(%esp)
-; X32-NEXT: fstps {{[0-9]+}}(%esp)
-; X32-NEXT: flds {{[0-9]+}}(%esp)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
+; X32-NEXT: vmovd %xmm0, %eax
+; X32-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
+; X32-NEXT: vmovss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_extract_uitofp:
@@ -42,7 +38,7 @@ define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
; X64-NEXT: vmovq %xmm0, %rax
-; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
+; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
; X64-NEXT: retq
%1 = and <2 x i64> %a0, <i64 65535, i64 -1>
%2 = extractelement <2 x i64> %1, i32 0
@@ -83,15 +79,15 @@ define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_sext:
; X32: # BB#0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shuffle_sext:
; X64: # BB#0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%1 = and <8 x i16> %a0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 15, i16 15, i16 15, i16 15>
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -103,15 +99,15 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_sext:
; X32: # BB#0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_shuffle_shuffle_sext:
; X64: # BB#0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%1 = and <8 x i16> %a0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 15, i16 15, i16 15, i16 15>
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -476,39 +472,46 @@ define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
-define <4 x float> @knownbits_umax_umin_shuffle_uitofp(<4 x i32> %a0) {
-; X32-LABEL: knownbits_umax_umin_shuffle_uitofp:
+define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) {
+; X32-LABEL: knownbits_umin_shuffle_uitofp:
; X32: # BB#0:
-; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpminud {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
-; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vpsrld $16, %xmm0, %xmm0
-; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
;
-; X64-LABEL: knownbits_umax_umin_shuffle_uitofp:
+; X64-LABEL: knownbits_umin_shuffle_uitofp:
; X64: # BB#0:
-; X64-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
-; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vpsrld $16, %xmm0, %xmm0
-; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-NEXT: retq
- %1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> <i32 255, i32 -1, i32 -1, i32 1023>)
- %2 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %1, <4 x i32> <i32 65535, i32 -1, i32 -1, i32 262143>)
- %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
- %4 = uitofp <4 x i32> %3 to <4 x float>
- ret <4 x float> %4
+ %1 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> <i32 65535, i32 -1, i32 -1, i32 262143>)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
+ %3 = uitofp <4 x i32> %2 to <4 x float>
+ ret <4 x float> %3
}
declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
+define <4 x i32> @knownbits_umax_shuffle_ashr(<4 x i32> %a0) {
+; X32-LABEL: knownbits_umax_shuffle_ashr:
+; X32: # BB#0:
+; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_umax_shuffle_ashr:
+; X64: # BB#0:
+; X64-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; X64-NEXT: retq
+ %1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> <i32 65535, i32 -1, i32 -1, i32 262143>)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 2>
+ %3 = ashr <4 x i32> %2, <i32 31, i32 31, i32 31, i32 31>
+ ret <4 x i32> %3
+}
+
define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_mask_umax_shuffle_uitofp:
; X32: # BB#0:
@@ -531,3 +534,73 @@ define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
%4 = uitofp <4 x i32> %3 to <4 x float>
ret <4 x float> %4
}
+
+define <4 x i32> @knownbits_mask_bitreverse_ashr(<4 x i32> %a0) {
+; X32-LABEL: knownbits_mask_bitreverse_ashr:
+; X32: # BB#0:
+; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_mask_bitreverse_ashr:
+; X64: # BB#0:
+; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 -2, i32 -2>
+ %2 = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %1)
+ %3 = ashr <4 x i32> %2, <i32 31, i32 31, i32 31, i32 31>
+ ret <4 x i32> %3
+}
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) nounwind readnone
+
+; If we don't know that the input isn't INT_MIN we can't combine to sitofp
+define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
+; X32-LABEL: knownbits_abs_uitofp:
+; X32: # BB#0:
+; X32-NEXT: vpabsd %xmm0, %xmm0
+; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X32-NEXT: vpsrld $16, %xmm0, %xmm0
+; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_abs_uitofp:
+; X64: # BB#0:
+; X64-NEXT: vpabsd %xmm0, %xmm0
+; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X64-NEXT: vpsrld $16, %xmm0, %xmm0
+; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; X64-NEXT: retq
+ %1 = sub <4 x i32> zeroinitializer, %a0
+ %2 = icmp slt <4 x i32> %a0, zeroinitializer
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> %a0
+ %4 = uitofp <4 x i32> %3 to <4 x float>
+ ret <4 x float> %4
+}
+
+define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
+; X32-LABEL: knownbits_or_abs_uitofp:
+; X32: # BB#0:
+; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
+; X32-NEXT: vpabsd %xmm0, %xmm0
+; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_or_abs_uitofp:
+; X64: # BB#0:
+; X64-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
+; X64-NEXT: vpabsd %xmm0, %xmm0
+; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = or <4 x i32> %a0, <i32 1, i32 0, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
+ %3 = sub <4 x i32> zeroinitializer, %2
+ %4 = icmp slt <4 x i32> %2, zeroinitializer
+ %5 = select <4 x i1> %4, <4 x i32> %3, <4 x i32> %2
+ %6 = uitofp <4 x i32> %5 to <4 x float>
+ ret <4 x float> %6
+}
diff --git a/test/CodeGen/X86/known-bits.ll b/test/CodeGen/X86/known-bits.ll
index 46451f21d8d6..81a60cdee3ac 100644
--- a/test/CodeGen/X86/known-bits.ll
+++ b/test/CodeGen/X86/known-bits.ll
@@ -103,3 +103,173 @@ CF246: ; preds = %CF237
%E156 = extractelement <4 x i1> %Cmp117, i32 2
br label %CF
}
+
+define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
+; X32-LABEL: knownbits_mask_add_lshr:
+; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_mask_add_lshr:
+; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %1 = and i32 %a0, 32767
+ %2 = and i32 %a1, 32766
+ %3 = add i32 %1, %2
+ %4 = lshr i32 %3, 17
+ ret i32 %4
+}
+
+define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
+; X32-LABEL: knownbits_mask_addc_shl:
+; X32: # BB#0:
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl $-1024, %esi # imm = 0xFC00
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: andl %esi, %edi
+; X32-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: shldl $22, %edx, %ecx
+; X32-NEXT: shldl $22, %esi, %edx
+; X32-NEXT: movl %edx, 8(%eax)
+; X32-NEXT: movl %ecx, 12(%eax)
+; X32-NEXT: movl $0, 4(%eax)
+; X32-NEXT: movl $0, (%eax)
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: retl $4
+;
+; X64-LABEL: knownbits_mask_addc_shl:
+; X64: # BB#0:
+; X64-NEXT: andq $-1024, %rdi # imm = 0xFC00
+; X64-NEXT: andq $-1024, %rsi # imm = 0xFC00
+; X64-NEXT: addq %rdi, %rsi
+; X64-NEXT: adcl $0, %edx
+; X64-NEXT: shldq $54, %rsi, %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %1 = and i64 %a0, -1024
+ %2 = zext i64 %1 to i128
+ %3 = and i64 %a1, -1024
+ %4 = zext i64 %3 to i128
+ %5 = add i128 %2, %4
+ %6 = zext i64 %a2 to i128
+ %7 = shl i128 %6, 64
+ %8 = add i128 %5, %7
+ %9 = shl i128 %8, 54
+ ret i128 %9
+}
+
+define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
+; X32-LABEL: knownbits_uaddo_saddo:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: leal (%ecx,%eax), %edx
+; X32-NEXT: cmpl %ecx, %edx
+; X32-NEXT: setb %bl
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: setns %al
+; X32-NEXT: testl %ecx, %ecx
+; X32-NEXT: setns %cl
+; X32-NEXT: cmpb %al, %cl
+; X32-NEXT: sete %al
+; X32-NEXT: testl %edx, %edx
+; X32-NEXT: setns %dl
+; X32-NEXT: cmpb %dl, %cl
+; X32-NEXT: setne %dl
+; X32-NEXT: andb %al, %dl
+; X32-NEXT: orb %bl, %dl
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: popl %ebx
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_uaddo_saddo:
+; X64: # BB#0:
+; X64-NEXT: shlq $32, %rdi
+; X64-NEXT: shlq $32, %rsi
+; X64-NEXT: addq %rdi, %rsi
+; X64-NEXT: setb %al
+; X64-NEXT: seto %dl
+; X64-NEXT: orb %al, %dl
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %1 = shl i64 %a0, 32
+ %2 = shl i64 %a1, 32
+ %u = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %1, i64 %2)
+ %uval = extractvalue {i64, i1} %u, 0
+ %uovf = extractvalue {i64, i1} %u, 1
+ %s = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %1, i64 %2)
+ %sval = extractvalue {i64, i1} %s, 0
+ %sovf = extractvalue {i64, i1} %s, 1
+ %sum = add i64 %uval, %sval
+ %3 = trunc i64 %sum to i32
+ %4 = or i1 %uovf, %sovf
+ %ret0 = insertvalue {i32, i1} undef, i32 %3, 0
+ %ret1 = insertvalue {i32, i1} %ret0, i1 %4, 1
+ ret {i32, i1} %ret1
+}
+
+define {i32, i1} @knownbits_usubo_ssubo(i64 %a0, i64 %a1) nounwind {
+; X32-LABEL: knownbits_usubo_ssubo:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, %edx
+; X32-NEXT: subl %eax, %edx
+; X32-NEXT: setns %bl
+; X32-NEXT: cmpl %edx, %ecx
+; X32-NEXT: setb %dh
+; X32-NEXT: testl %ecx, %ecx
+; X32-NEXT: setns %cl
+; X32-NEXT: cmpb %bl, %cl
+; X32-NEXT: setne %ch
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: setns %al
+; X32-NEXT: cmpb %al, %cl
+; X32-NEXT: setne %dl
+; X32-NEXT: andb %ch, %dl
+; X32-NEXT: orb %dh, %dl
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: popl %ebx
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_usubo_ssubo:
+; X64: # BB#0:
+; X64-NEXT: shlq $32, %rdi
+; X64-NEXT: shlq $32, %rsi
+; X64-NEXT: cmpq %rsi, %rdi
+; X64-NEXT: setb %al
+; X64-NEXT: seto %dl
+; X64-NEXT: orb %al, %dl
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %1 = shl i64 %a0, 32
+ %2 = shl i64 %a1, 32
+ %u = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %1, i64 %2)
+ %uval = extractvalue {i64, i1} %u, 0
+ %uovf = extractvalue {i64, i1} %u, 1
+ %s = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %1, i64 %2)
+ %sval = extractvalue {i64, i1} %s, 0
+ %sovf = extractvalue {i64, i1} %s, 1
+ %sum = add i64 %uval, %sval
+ %3 = trunc i64 %sum to i32
+ %4 = or i1 %uovf, %sovf
+ %ret0 = insertvalue {i32, i1} undef, i32 %3, 0
+ %ret1 = insertvalue {i32, i1} %ret0, i1 %4, 1
+ ret {i32, i1} %ret1
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/test/CodeGen/X86/known-signbits-vector.ll b/test/CodeGen/X86/known-signbits-vector.ll
new file mode 100644
index 000000000000..cea9ac26edbc
--- /dev/null
+++ b/test/CodeGen/X86/known-signbits-vector.ll
@@ -0,0 +1,139 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+
+define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind {
+; X32-LABEL: signbits_sext_v2i64_sitofp_v2f64:
+; X32: # BB#0:
+; X32-NEXT: vcvtdq2pd {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: signbits_sext_v2i64_sitofp_v2f64:
+; X64: # BB#0:
+; X64-NEXT: vmovd %edi, %xmm0
+; X64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
+; X64-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = sext i32 %a0 to i64
+ %2 = sext i32 %a1 to i64
+ %3 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %4 = insertelement <2 x i64> %3, i64 %2, i32 1
+ %5 = sitofp <2 x i64> %4 to <2 x double>
+ ret <2 x double> %5
+}
+
+define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext %a1, i32 %a2, i32 %a3) nounwind {
+; X32-LABEL: signbits_sext_v4i64_sitofp_v4f32:
+; X32: # BB#0:
+; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movswl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: vmovd %eax, %xmm0
+; X32-NEXT: sarl $31, %eax
+; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; X32-NEXT: sarl $31, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: vmovd %eax, %xmm1
+; X32-NEXT: sarl $31, %eax
+; X32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X32-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1
+; X32-NEXT: sarl $31, %edx
+; X32-NEXT: vpinsrd $3, %edx, %xmm1, %xmm1
+; X32-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: signbits_sext_v4i64_sitofp_v4f32:
+; X64: # BB#0:
+; X64-NEXT: movslq %edi, %rax
+; X64-NEXT: movslq %esi, %rsi
+; X64-NEXT: movslq %edx, %rdx
+; X64-NEXT: movslq %ecx, %rcx
+; X64-NEXT: vmovq %rcx, %xmm0
+; X64-NEXT: vmovq %rdx, %xmm1
+; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-NEXT: vmovq %rsi, %xmm1
+; X64-NEXT: vmovq %rax, %xmm2
+; X64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = sext i8 %a0 to i64
+ %2 = sext i16 %a1 to i64
+ %3 = sext i32 %a2 to i64
+ %4 = sext i32 %a3 to i64
+ %5 = insertelement <4 x i64> undef, i64 %1, i32 0
+ %6 = insertelement <4 x i64> %5, i64 %2, i32 1
+ %7 = insertelement <4 x i64> %6, i64 %3, i32 2
+ %8 = insertelement <4 x i64> %7, i64 %4, i32 3
+ %9 = sitofp <4 x i64> %8 to <4 x float>
+ ret <4 x float> %9
+}
+
+define float @signbits_ashr_extract_sitofp(<2 x i64> %a0) nounwind {
+; X32-LABEL: signbits_ashr_extract_sitofp:
+; X32: # BB#0:
+; X32-NEXT: pushl %eax
+; X32-NEXT: vpextrd $1, %xmm0, %eax
+; X32-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
+; X32-NEXT: vmovss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: signbits_ashr_extract_sitofp:
+; X64: # BB#0:
+; X64-NEXT: vpsrad $31, %xmm0, %xmm1
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
+; X64-NEXT: retq
+ %1 = ashr <2 x i64> %a0, <i64 32, i64 32>
+ %2 = extractelement <2 x i64> %1, i32 0
+ %3 = sitofp i64 %2 to float
+ ret float %3
+}
+
+define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwind {
+; X32-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
+; X32: # BB#0:
+; X32-NEXT: pushl %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: shrdl $30, %ecx, %eax
+; X32-NEXT: sarl $30, %ecx
+; X32-NEXT: vmovd %eax, %xmm0
+; X32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X32-NEXT: vpsrlq $3, %xmm0, %xmm0
+; X32-NEXT: vmovd %xmm0, %eax
+; X32-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
+; X32-NEXT: vmovss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
+; X64: # BB#0:
+; X64-NEXT: sarq $30, %rdi
+; X64-NEXT: vmovq %rsi, %xmm0
+; X64-NEXT: vmovq %rdi, %xmm1
+; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-NEXT: vpsrad $3, %xmm0, %xmm1
+; X64-NEXT: vpsrlq $3, %xmm0, %xmm0
+; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-NEXT: vmovq %xmm0, %rax
+; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
+; X64-NEXT: retq
+ %1 = ashr i64 %a0, 30
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 %a1, i32 1
+ %4 = ashr <2 x i64> %3, <i64 3, i64 3>
+ %5 = extractelement <2 x i64> %4, i32 0
+ %6 = sitofp i64 %5 to float
+ ret float %6
+}
diff --git a/test/CodeGen/X86/lea-opt-with-debug.mir b/test/CodeGen/X86/lea-opt-with-debug.mir
new file mode 100644
index 000000000000..ebf86ff718db
--- /dev/null
+++ b/test/CodeGen/X86/lea-opt-with-debug.mir
@@ -0,0 +1,122 @@
+# RUN: llc -mtriple=x86_64-unknown-unknown -start-after peephole-opt -stop-before detect-dead-lanes -o - %s | FileCheck %s
+
+# Test that pass optimize LEA can remove a redundant LEA even when it is also
+# used by a DBG_VALUE.
+
+--- |
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+ %struct.A = type { i32, i32, i32 }
+
+ @c = common local_unnamed_addr global %struct.A* null, align 8
+ @a = common local_unnamed_addr global i32 0, align 4
+ @d = common local_unnamed_addr global i32 0, align 4
+ @b = common local_unnamed_addr global i32 0, align 4
+
+ define i32 @fn1() local_unnamed_addr !dbg !8 {
+ %1 = load %struct.A*, %struct.A** @c, align 8, !dbg !13
+ %2 = load i32, i32* @a, align 4, !dbg !13
+ %3 = sext i32 %2 to i64, !dbg !13
+ %4 = getelementptr inbounds %struct.A, %struct.A* %1, i64 %3, !dbg !13
+ %5 = ptrtoint %struct.A* %4 to i64, !dbg !13
+ %6 = trunc i64 %5 to i32, !dbg !13
+ store i32 %6, i32* @d, align 4, !dbg !13
+ %7 = getelementptr inbounds %struct.A, %struct.A* %1, i64 %3, i32 2, !dbg !14
+ tail call void @llvm.dbg.value(metadata i32* %7, i64 0, metadata !11, metadata !15), !dbg !16
+ br label %8, !dbg !17
+
+ ; <label>:8: ; preds = %8, %0
+ %9 = load i32, i32* %7, align 4, !dbg !18
+ store i32 %9, i32* @d, align 4, !dbg !18
+ br label %8, !dbg !19
+ }
+
+ ; Function Attrs: nounwind readnone
+ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #0
+
+ attributes #0 = { nounwind readnone }
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!5, !6, !7}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3, globals: !2)
+ !1 = !DIFile(filename: "test.c", directory: "")
+ !2 = !{}
+ !3 = !{!4}
+ !4 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+ !5 = !{i32 2, !"Dwarf Version", i32 4}
+ !6 = !{i32 2, !"Debug Info Version", i32 3}
+ !7 = !{i32 1, !"PIC Level", i32 2}
+ !8 = distinct !DISubprogram(name: "fn1", scope: !1, file: !1, line: 7, type: !9, isLocal: false, isDefinition: true, scopeLine: 7, isOptimized: true, unit: !0, variables: !10)
+ !9 = !DISubroutineType(types: !3)
+ !10 = !{!11}
+ !11 = !DILocalVariable(name: "e", scope: !8, file: !1, line: 8, type: !12)
+ !12 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !4, size: 64)
+ !13 = !DILocation(line: 9, scope: !8)
+ !14 = !DILocation(line: 10, scope: !8)
+ !15 = !DIExpression()
+ !16 = !DILocation(line: 8, scope: !8)
+ !17 = !DILocation(line: 11, scope: !8)
+ !18 = !DILocation(line: 13, scope: !8)
+ !19 = !DILocation(line: 14, scope: !8)
+
+...
+---
+name: fn1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64 }
+ - { id: 1, class: gr64 }
+ - { id: 2, class: gr64_nosp }
+ - { id: 3, class: gr64_nosp }
+ - { id: 4, class: gr64 }
+ - { id: 5, class: gr32 }
+ - { id: 6, class: gr32 }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ successors: %bb.1(0x80000000)
+
+ ; CHECK: %3 = LEA64r %2, 2, %2, 0, _, debug-location !13
+ ; CHECK-NEXT: %4 = LEA64r %1, 4, %3, 0, _, debug-location !13
+ ; CHECK-NOT: %0 = LEA64r %1, 4, %3, 8, _, debug-location !14
+ ; CHECK: DBG_VALUE debug-use _, debug-use _, !11, !15, debug-location !16
+
+ %1 = MOV64rm %rip, 1, _, @c, _, debug-location !13 :: (dereferenceable load 8 from @c)
+ %2 = MOVSX64rm32 %rip, 1, _, @a, _, debug-location !13 :: (dereferenceable load 4 from @a)
+ %3 = LEA64r %2, 2, %2, 0, _, debug-location !13
+ %4 = LEA64r %1, 4, %3, 0, _, debug-location !13
+ %5 = COPY %4.sub_32bit, debug-location !13
+ MOV32mr %rip, 1, _, @d, _, killed %5, debug-location !13 :: (store 4 into @d)
+ %0 = LEA64r %1, 4, %3, 8, _, debug-location !14
+ DBG_VALUE debug-use %0, debug-use _, !11, !15, debug-location !16
+
+ ; CHECK-LABEL: bb.1 (%ir-block.8):
+ ; CHECK: %6 = MOV32rm %4, 1, _, 8, _, debug-location !18 :: (load 4 from %ir.7)
+
+ bb.1 (%ir-block.8):
+ successors: %bb.1(0x80000000)
+
+ %6 = MOV32rm %0, 1, _, 0, _, debug-location !18 :: (load 4 from %ir.7)
+ MOV32mr %rip, 1, _, @d, _, killed %6, debug-location !18 :: (store 4 into @d)
+ JMP_1 %bb.1, debug-location !19
+
+...
diff --git a/test/CodeGen/X86/lfence.ll b/test/CodeGen/X86/lfence.ll
deleted file mode 100644
index 1903a1e31b5c..000000000000
--- a/test/CodeGen/X86/lfence.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep lfence
-
-declare void @llvm.x86.sse2.lfence() nounwind
-
-define void @test() {
- call void @llvm.x86.sse2.lfence()
- ret void
-}
diff --git a/test/CodeGen/X86/licm-nested.ll b/test/CodeGen/X86/licm-nested.ll
index 42e6d12ec1e0..63e3c5c3b6b2 100644
--- a/test/CodeGen/X86/licm-nested.ll
+++ b/test/CodeGen/X86/licm-nested.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep "hoisted out of loops" | grep 4
+; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep "hoisted out of loops" | grep 5
; MachineLICM should be able to hoist the symbolic addresses out of
; the inner loops.
diff --git a/test/CodeGen/X86/live-range-nosubreg.ll b/test/CodeGen/X86/live-range-nosubreg.ll
index f28d59237b42..899a375221c4 100644
--- a/test/CodeGen/X86/live-range-nosubreg.ll
+++ b/test/CodeGen/X86/live-range-nosubreg.ll
@@ -1,7 +1,6 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
+; RUN: llc -march=x86-64 < %s
-; Check for a sane output. This testcase used to crash. See PR29132.
-; CHECK: leal -1
+; This testcase used to crash. See PR29132.
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/CodeGen/X86/load-combine.ll b/test/CodeGen/X86/load-combine.ll
new file mode 100644
index 000000000000..e737a51cf405
--- /dev/null
+++ b/test/CodeGen/X86/load-combine.ll
@@ -0,0 +1,1314 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=BSWAP
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+movbe | FileCheck %s --check-prefix=CHECK --check-prefix=MOVBE
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=CHECK64 --check-prefix=BSWAP64
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+movbe | FileCheck %s --check-prefix=CHECK64 --check-prefix=MOVBE64
+
+; i8* p;
+; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i8(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl (%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 8
+ %tmp7 = or i32 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i32
+ %tmp11 = shl nuw nsw i32 %tmp10, 16
+ %tmp12 = or i32 %tmp7, %tmp11
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i32
+ %tmp16 = shl nuw nsw i32 %tmp15, 24
+ %tmp17 = or i32 %tmp12, %tmp16
+ ret i32 %tmp17
+}
+
+; i8* p;
+; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_bswap(i32* %arg) {
+; BSWAP-LABEL: load_i32_by_i8_bswap:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl (%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i32_by_i8_bswap:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT: movbel (%eax), %eax
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i32_by_i8_bswap:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movl (%rdi), %eax
+; BSWAP64-NEXT: bswapl %eax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i32_by_i8_bswap:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movbel (%rdi), %eax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ret i32 %tmp17
+}
+
+; i16* p;
+; (i32) p[0] | ((i32) p[1] << 16)
+define i32 @load_i32_by_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i16:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl (%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 1
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = zext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 16
+ %tmp7 = or i32 %tmp6, %tmp2
+ ret i32 %tmp7
+}
+
+; i16* p_16;
+; i8* p_8 = (i8*) p_16;
+; (i32) p_16[0] | ((i32) p[2] << 16) | ((i32) p[3] << 24)
+define i32 @load_i32_by_i16_i8(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16_i8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i16_i8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl (%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = bitcast i32* %arg to i8*
+ %tmp2 = load i16, i16* %tmp, align 1
+ %tmp3 = zext i16 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp1, i32 2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = getelementptr inbounds i8, i8* %tmp1, i32 3
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i32
+ %tmp11 = shl nuw nsw i32 %tmp10, 24
+ %tmp12 = or i32 %tmp7, %tmp11
+ %tmp13 = or i32 %tmp12, %tmp3
+ ret i32 %tmp13
+}
+
+
+; i8* p;
+; (i32) ((i16) p[0] | ((i16) p[1] << 8)) | (((i32) ((i16) p[3] | ((i16) p[4] << 8)) << 16)
+define i32 @load_i32_by_i16_by_i8(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i16_by_i8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i16_by_i8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl (%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i16
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i16
+ %tmp6 = shl nuw nsw i16 %tmp5, 8
+ %tmp7 = or i16 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i16
+ %tmp11 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp12 = load i8, i8* %tmp11, align 1
+ %tmp13 = zext i8 %tmp12 to i16
+ %tmp14 = shl nuw nsw i16 %tmp13, 8
+ %tmp15 = or i16 %tmp14, %tmp10
+ %tmp16 = zext i16 %tmp7 to i32
+ %tmp17 = zext i16 %tmp15 to i32
+ %tmp18 = shl nuw nsw i32 %tmp17, 16
+ %tmp19 = or i32 %tmp18, %tmp16
+ ret i32 %tmp19
+}
+
+; i8* p;
+; ((i32) (((i16) p[0] << 8) | (i16) p[1]) << 16) | (i32) (((i16) p[3] << 8) | (i16) p[4])
+define i32 @load_i32_by_i16_by_i8_bswap(i32* %arg) {
+; BSWAP-LABEL: load_i32_by_i16_by_i8_bswap:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl (%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i32_by_i16_by_i8_bswap:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT: movbel (%eax), %eax
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i32_by_i16_by_i8_bswap:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movl (%rdi), %eax
+; BSWAP64-NEXT: bswapl %eax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i32_by_i16_by_i8_bswap:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movbel (%rdi), %eax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i16
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i16
+ %tmp6 = shl nuw nsw i16 %tmp2, 8
+ %tmp7 = or i16 %tmp6, %tmp5
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i16
+ %tmp11 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp12 = load i8, i8* %tmp11, align 1
+ %tmp13 = zext i8 %tmp12 to i16
+ %tmp14 = shl nuw nsw i16 %tmp10, 8
+ %tmp15 = or i16 %tmp14, %tmp13
+ %tmp16 = zext i16 %tmp7 to i32
+ %tmp17 = zext i16 %tmp15 to i32
+ %tmp18 = shl nuw nsw i32 %tmp16, 16
+ %tmp19 = or i32 %tmp18, %tmp17
+ ret i32 %tmp19
+}
+
+; i8* p;
+; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
+define i64 @load_i64_by_i8(i64* %arg) {
+; CHECK-LABEL: load_i64_by_i8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl (%ecx), %eax
+; CHECK-NEXT: movl 4(%ecx), %edx
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i64_by_i8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movq (%rdi), %rax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp4 = load i8, i8* %tmp3, align 1
+ %tmp5 = zext i8 %tmp4 to i64
+ %tmp6 = shl nuw nsw i64 %tmp5, 8
+ %tmp7 = or i64 %tmp6, %tmp2
+ %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp9 = load i8, i8* %tmp8, align 1
+ %tmp10 = zext i8 %tmp9 to i64
+ %tmp11 = shl nuw nsw i64 %tmp10, 16
+ %tmp12 = or i64 %tmp7, %tmp11
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i64
+ %tmp16 = shl nuw nsw i64 %tmp15, 24
+ %tmp17 = or i64 %tmp12, %tmp16
+ %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp19 = load i8, i8* %tmp18, align 1
+ %tmp20 = zext i8 %tmp19 to i64
+ %tmp21 = shl nuw nsw i64 %tmp20, 32
+ %tmp22 = or i64 %tmp17, %tmp21
+ %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp24 = load i8, i8* %tmp23, align 1
+ %tmp25 = zext i8 %tmp24 to i64
+ %tmp26 = shl nuw nsw i64 %tmp25, 40
+ %tmp27 = or i64 %tmp22, %tmp26
+ %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp29 = load i8, i8* %tmp28, align 1
+ %tmp30 = zext i8 %tmp29 to i64
+ %tmp31 = shl nuw nsw i64 %tmp30, 48
+ %tmp32 = or i64 %tmp27, %tmp31
+ %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp34 = load i8, i8* %tmp33, align 1
+ %tmp35 = zext i8 %tmp34 to i64
+ %tmp36 = shl nuw i64 %tmp35, 56
+ %tmp37 = or i64 %tmp32, %tmp36
+ ret i64 %tmp37
+}
+
+; i8* p;
+; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
+define i64 @load_i64_by_i8_bswap(i64* %arg) {
+; BSWAP-LABEL: load_i64_by_i8_bswap:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl (%eax), %edx
+; BSWAP-NEXT: movl 4(%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: bswapl %edx
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i64_by_i8_bswap:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; MOVBE-NEXT: movbel 4(%ecx), %eax
+; MOVBE-NEXT: movbel (%ecx), %edx
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i64_by_i8_bswap:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movq (%rdi), %rax
+; BSWAP64-NEXT: bswapq %rax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i64_by_i8_bswap:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movbeq (%rdi), %rax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i64* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i64
+ %tmp3 = shl nuw i64 %tmp2, 56
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i64
+ %tmp7 = shl nuw nsw i64 %tmp6, 48
+ %tmp8 = or i64 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i64
+ %tmp12 = shl nuw nsw i64 %tmp11, 40
+ %tmp13 = or i64 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i64
+ %tmp17 = shl nuw nsw i64 %tmp16, 32
+ %tmp18 = or i64 %tmp13, %tmp17
+ %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
+ %tmp20 = load i8, i8* %tmp19, align 1
+ %tmp21 = zext i8 %tmp20 to i64
+ %tmp22 = shl nuw nsw i64 %tmp21, 24
+ %tmp23 = or i64 %tmp18, %tmp22
+ %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
+ %tmp25 = load i8, i8* %tmp24, align 1
+ %tmp26 = zext i8 %tmp25 to i64
+ %tmp27 = shl nuw nsw i64 %tmp26, 16
+ %tmp28 = or i64 %tmp23, %tmp27
+ %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
+ %tmp30 = load i8, i8* %tmp29, align 1
+ %tmp31 = zext i8 %tmp30 to i64
+ %tmp32 = shl nuw nsw i64 %tmp31, 8
+ %tmp33 = or i64 %tmp28, %tmp32
+ %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i64
+ %tmp37 = or i64 %tmp33, %tmp36
+ ret i64 %tmp37
+}
+
+; Part of the load by bytes pattern is used outside of the pattern
+; i8* p;
+; i32 x = (i32) p[1]
+; res = ((i32) p[0] << 24) | (x << 16) | ((i32) p[2] << 8) | (i32) p[3]
+; x | res
+define i32 @load_i32_by_i8_bswap_uses(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_bswap_uses:
+; CHECK: # BB#0:
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: .Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_offset %esi, -8
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl (%eax), %ecx
+; CHECK-NEXT: shll $24, %ecx
+; CHECK-NEXT: movzbl 1(%eax), %edx
+; CHECK-NEXT: movl %edx, %esi
+; CHECK-NEXT: shll $16, %esi
+; CHECK-NEXT: orl %ecx, %esi
+; CHECK-NEXT: movzbl 2(%eax), %ecx
+; CHECK-NEXT: shll $8, %ecx
+; CHECK-NEXT: orl %esi, %ecx
+; CHECK-NEXT: movzbl 3(%eax), %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: orl %edx, %eax
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_bswap_uses:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $24, %eax
+; CHECK64-NEXT: movzbl 1(%rdi), %ecx
+; CHECK64-NEXT: movl %ecx, %edx
+; CHECK64-NEXT: shll $16, %edx
+; CHECK64-NEXT: orl %eax, %edx
+; CHECK64-NEXT: movzbl 2(%rdi), %esi
+; CHECK64-NEXT: shll $8, %esi
+; CHECK64-NEXT: orl %edx, %esi
+; CHECK64-NEXT: movzbl 3(%rdi), %eax
+; CHECK64-NEXT: orl %esi, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ; Use individual part of the pattern outside of the pattern
+ %tmp18 = or i32 %tmp6, %tmp17
+ ret i32 %tmp18
+}
+
+; One of the loads is volatile
+; i8* p;
+; p0 = volatile *p;
+; ((i32) p0 << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_bswap_volatile(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_bswap_volatile:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl (%eax), %ecx
+; CHECK-NEXT: shll $24, %ecx
+; CHECK-NEXT: movzbl 1(%eax), %edx
+; CHECK-NEXT: shll $16, %edx
+; CHECK-NEXT: orl %ecx, %edx
+; CHECK-NEXT: movzbl 2(%eax), %ecx
+; CHECK-NEXT: shll $8, %ecx
+; CHECK-NEXT: orl %edx, %ecx
+; CHECK-NEXT: movzbl 3(%eax), %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_bswap_volatile:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $24, %eax
+; CHECK64-NEXT: movzbl 1(%rdi), %ecx
+; CHECK64-NEXT: shll $16, %ecx
+; CHECK64-NEXT: orl %eax, %ecx
+; CHECK64-NEXT: movzbl 2(%rdi), %edx
+; CHECK64-NEXT: shll $8, %edx
+; CHECK64-NEXT: orl %ecx, %edx
+; CHECK64-NEXT: movzbl 3(%rdi), %eax
+; CHECK64-NEXT: orl %edx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = load volatile i8, i8* %tmp, align 1
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = shl nuw nsw i32 %tmp2, 24
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 8
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = or i32 %tmp13, %tmp16
+ ret i32 %tmp17
+}
+
+; There is a store in between individual loads
+; i8* p, q;
+; res1 = ((i32) p[0] << 24) | ((i32) p[1] << 16)
+; *q = 0;
+; res2 = ((i32) p[2] << 8) | (i32) p[3]
+; res1 | res2
+define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
+; CHECK-LABEL: load_i32_by_i8_bswap_store_in_between:
+; CHECK: # BB#0:
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_offset %esi, -8
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movzbl (%ecx), %edx
+; CHECK-NEXT: shll $24, %edx
+; CHECK-NEXT: movzbl 1(%ecx), %esi
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: shll $16, %esi
+; CHECK-NEXT: orl %edx, %esi
+; CHECK-NEXT: movzbl 2(%ecx), %edx
+; CHECK-NEXT: shll $8, %edx
+; CHECK-NEXT: orl %esi, %edx
+; CHECK-NEXT: movzbl 3(%ecx), %eax
+; CHECK-NEXT: orl %edx, %eax
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $24, %eax
+; CHECK64-NEXT: movzbl 1(%rdi), %ecx
+; CHECK64-NEXT: movl $0, (%rsi)
+; CHECK64-NEXT: shll $16, %ecx
+; CHECK64-NEXT: orl %eax, %ecx
+; CHECK64-NEXT: movzbl 2(%rdi), %edx
+; CHECK64-NEXT: shll $8, %edx
+; CHECK64-NEXT: orl %ecx, %edx
+; CHECK64-NEXT: movzbl 3(%rdi), %eax
+; CHECK64-NEXT: orl %edx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp2 = load i8, i8* %tmp, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = shl nuw nsw i32 %tmp3, 24
+ %tmp5 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp6 = load i8, i8* %tmp5, align 1
+ ; This store will prevent folding of the pattern
+ store i32 0, i32* %arg1
+ %tmp7 = zext i8 %tmp6 to i32
+ %tmp8 = shl nuw nsw i32 %tmp7, 16
+ %tmp9 = or i32 %tmp8, %tmp4
+ %tmp10 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp11 = load i8, i8* %tmp10, align 1
+ %tmp12 = zext i8 %tmp11 to i32
+ %tmp13 = shl nuw nsw i32 %tmp12, 8
+ %tmp14 = or i32 %tmp9, %tmp13
+ %tmp15 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp16 = load i8, i8* %tmp15, align 1
+ %tmp17 = zext i8 %tmp16 to i32
+ %tmp18 = or i32 %tmp14, %tmp17
+ ret i32 %tmp18
+}
+
+; One of the loads is from an unrelated location
+; i8* p, q;
+; ((i32) p[0] << 24) | ((i32) q[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
+define i32 @load_i32_by_i8_bswap_unrelated_load(i32* %arg, i32* %arg1) {
+; CHECK-LABEL: load_i32_by_i8_bswap_unrelated_load:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movzbl (%ecx), %edx
+; CHECK-NEXT: shll $24, %edx
+; CHECK-NEXT: movzbl 1(%eax), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: orl %edx, %eax
+; CHECK-NEXT: movzbl 2(%ecx), %edx
+; CHECK-NEXT: shll $8, %edx
+; CHECK-NEXT: orl %eax, %edx
+; CHECK-NEXT: movzbl 3(%ecx), %eax
+; CHECK-NEXT: orl %edx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_bswap_unrelated_load:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $24, %eax
+; CHECK64-NEXT: movzbl 1(%rsi), %ecx
+; CHECK64-NEXT: shll $16, %ecx
+; CHECK64-NEXT: orl %eax, %ecx
+; CHECK64-NEXT: movzbl 2(%rdi), %edx
+; CHECK64-NEXT: shll $8, %edx
+; CHECK64-NEXT: orl %ecx, %edx
+; CHECK64-NEXT: movzbl 3(%rdi), %eax
+; CHECK64-NEXT: orl %edx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp2 = bitcast i32* %arg1 to i8*
+ %tmp3 = load i8, i8* %tmp, align 1
+ %tmp4 = zext i8 %tmp3 to i32
+ %tmp5 = shl nuw nsw i32 %tmp4, 24
+ ; Load from an unrelated address
+ %tmp6 = getelementptr inbounds i8, i8* %tmp2, i32 1
+ %tmp7 = load i8, i8* %tmp6, align 1
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = shl nuw nsw i32 %tmp8, 16
+ %tmp10 = or i32 %tmp9, %tmp5
+ %tmp11 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp12 = load i8, i8* %tmp11, align 1
+ %tmp13 = zext i8 %tmp12 to i32
+ %tmp14 = shl nuw nsw i32 %tmp13, 8
+ %tmp15 = or i32 %tmp10, %tmp14
+ %tmp16 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp17 = load i8, i8* %tmp16, align 1
+ %tmp18 = zext i8 %tmp17 to i32
+ %tmp19 = or i32 %tmp15, %tmp18
+ ret i32 %tmp19
+}
+
+; i8* p;
+; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
+define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl 1(%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_nonzero_offset:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl 1(%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p;
+; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
+define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+; CHECK-LABEL: load_i32_by_i8_neg_offset:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl -4(%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_neg_offset:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl -4(%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p;
+; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
+define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+; BSWAP-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl 1(%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT: movbel 1(%eax), %eax
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movl 1(%rdi), %eax
+; BSWAP64-NEXT: bswapl %eax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i32_by_i8_nonzero_offset_bswap:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movbel 1(%rdi), %eax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p;
+; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
+define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+; BSWAP-LABEL: load_i32_by_i8_neg_offset_bswap:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl -4(%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i32_by_i8_neg_offset_bswap:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT: movbel -4(%eax), %eax
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i32_by_i8_neg_offset_bswap:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movl -4(%rdi), %eax
+; BSWAP64-NEXT: bswapl %eax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i32_by_i8_neg_offset_bswap:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movbel -4(%rdi), %eax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
+ %tmp10 = load i8, i8* %tmp9, align 1
+ %tmp11 = zext i8 %tmp10 to i32
+ %tmp12 = shl nuw nsw i32 %tmp11, 16
+ %tmp13 = or i32 %tmp8, %tmp12
+ %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
+ %tmp15 = load i8, i8* %tmp14, align 1
+ %tmp16 = zext i8 %tmp15 to i32
+ %tmp17 = shl nuw nsw i32 %tmp16, 24
+ %tmp18 = or i32 %tmp13, %tmp17
+ ret i32 %tmp18
+}
+
+; i8* p; i32 i;
+; ((i32) p[i] << 24) | ((i32) p[i + 1] << 16) | ((i32) p[i + 2] << 8) | (i32) p[i + 3]
+define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) {
+; BSWAP-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; BSWAP-NEXT: movl (%ecx,%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; MOVBE-NEXT: movbel (%ecx,%eax), %eax
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movslq %esi, %rax
+; BSWAP64-NEXT: movl (%rdi,%rax), %eax
+; BSWAP64-NEXT: bswapl %eax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movslq %esi, %rax
+; MOVBE64-NEXT: movbel (%rdi,%rax), %eax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp2 = getelementptr inbounds i8, i8* %tmp, i32 %arg1
+ %tmp3 = load i8, i8* %tmp2, align 1
+ %tmp4 = zext i8 %tmp3 to i32
+ %tmp5 = shl nuw nsw i32 %tmp4, 24
+ %tmp6 = add nuw nsw i32 %arg1, 1
+ %tmp7 = getelementptr inbounds i8, i8* %tmp, i32 %tmp6
+ %tmp8 = load i8, i8* %tmp7, align 1
+ %tmp9 = zext i8 %tmp8 to i32
+ %tmp10 = shl nuw nsw i32 %tmp9, 16
+ %tmp11 = or i32 %tmp10, %tmp5
+ %tmp12 = add nuw nsw i32 %arg1, 2
+ %tmp13 = getelementptr inbounds i8, i8* %tmp, i32 %tmp12
+ %tmp14 = load i8, i8* %tmp13, align 1
+ %tmp15 = zext i8 %tmp14 to i32
+ %tmp16 = shl nuw nsw i32 %tmp15, 8
+ %tmp17 = or i32 %tmp11, %tmp16
+ %tmp18 = add nuw nsw i32 %arg1, 3
+ %tmp19 = getelementptr inbounds i8, i8* %tmp, i32 %tmp18
+ %tmp20 = load i8, i8* %tmp19, align 1
+ %tmp21 = zext i8 %tmp20 to i32
+ %tmp22 = or i32 %tmp17, %tmp21
+ ret i32 %tmp22
+}
+
+; Verify that we don't crash handling shl i32 %conv57, 32
+define void @shift_i32_by_32(i8* %src1, i8* %src2, i64* %dst) {
+; CHECK-LABEL: shift_i32_by_32:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl $-1, 4(%eax)
+; CHECK-NEXT: movl $-1, (%eax)
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: shift_i32_by_32:
+; CHECK64: # BB#0: # %entry
+; CHECK64-NEXT: movq $-1, (%rdx)
+; CHECK64-NEXT: retq
+entry:
+ %load1 = load i8, i8* %src1, align 1
+ %conv46 = zext i8 %load1 to i32
+ %shl47 = shl i32 %conv46, 56
+ %or55 = or i32 %shl47, 0
+ %load2 = load i8, i8* %src2, align 1
+ %conv57 = zext i8 %load2 to i32
+ %shl58 = shl i32 %conv57, 32
+ %or59 = or i32 %or55, %shl58
+ %or74 = or i32 %or59, 0
+ %conv75 = sext i32 %or74 to i64
+ store i64 %conv75, i64* %dst, align 8
+ ret void
+}
+
+declare i16 @llvm.bswap.i16(i16)
+
+; i16* p;
+; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
+define i32 @load_i32_by_bswap_i16(i32* %arg) {
+; BSWAP-LABEL: load_i32_by_bswap_i16:
+; BSWAP: # BB#0:
+; BSWAP-NEXT: movl {{[0-9]+}}(%esp), %eax
+; BSWAP-NEXT: movl (%eax), %eax
+; BSWAP-NEXT: bswapl %eax
+; BSWAP-NEXT: retl
+;
+; MOVBE-LABEL: load_i32_by_bswap_i16:
+; MOVBE: # BB#0:
+; MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT: movbel (%eax), %eax
+; MOVBE-NEXT: retl
+;
+; BSWAP64-LABEL: load_i32_by_bswap_i16:
+; BSWAP64: # BB#0:
+; BSWAP64-NEXT: movl (%rdi), %eax
+; BSWAP64-NEXT: bswapl %eax
+; BSWAP64-NEXT: retq
+;
+; MOVBE64-LABEL: load_i32_by_bswap_i16:
+; MOVBE64: # BB#0:
+; MOVBE64-NEXT: movbel (%rdi), %eax
+; MOVBE64-NEXT: retq
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 4
+ %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
+ %tmp2 = zext i16 %tmp11 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
+ %tmp5 = zext i16 %tmp41 to i32
+ %tmp6 = shl nuw nsw i32 %tmp2, 16
+ %tmp7 = or i32 %tmp6, %tmp5
+ ret i32 %tmp7
+}
+
+; i16* p;
+; (i32) p[0] | (sext(p[1] << 16) to i32)
+define i32 @load_i32_by_sext_i16(i32* %arg) {
+; CHECK-LABEL: load_i32_by_sext_i16:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_sext_i16:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl (%rdi), %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i16*
+ %tmp1 = load i16, i16* %tmp, align 1
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
+ %tmp4 = load i16, i16* %tmp3, align 1
+ %tmp5 = sext i16 %tmp4 to i32
+ %tmp6 = shl nuw nsw i32 %tmp5, 16
+ %tmp7 = or i32 %tmp6, %tmp2
+ ret i32 %tmp7
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
+define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl 12(%eax,%ecx), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_base_offset_index:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl %esi, %eax
+; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
+; CHECK64-NEXT: retq
+ %tmp = add nuw nsw i32 %i, 3
+ %tmp2 = add nuw nsw i32 %i, 2
+ %tmp3 = add nuw nsw i32 %i, 1
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = zext i32 %i to i64
+ %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
+ %tmp7 = load i8, i8* %tmp6, align 1
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = zext i32 %tmp3 to i64
+ %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
+ %tmp11 = load i8, i8* %tmp10, align 1
+ %tmp12 = zext i8 %tmp11 to i32
+ %tmp13 = shl nuw nsw i32 %tmp12, 8
+ %tmp14 = or i32 %tmp13, %tmp8
+ %tmp15 = zext i32 %tmp2 to i64
+ %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
+ %tmp17 = load i8, i8* %tmp16, align 1
+ %tmp18 = zext i8 %tmp17 to i32
+ %tmp19 = shl nuw nsw i32 %tmp18, 16
+ %tmp20 = or i32 %tmp14, %tmp19
+ %tmp21 = zext i32 %tmp to i64
+ %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
+ %tmp23 = load i8, i8* %tmp22, align 1
+ %tmp24 = zext i8 %tmp23 to i32
+ %tmp25 = shl nuw i32 %tmp24, 24
+ %tmp26 = or i32 %tmp20, %tmp25
+ ret i32 %tmp26
+}
+
+; i8* arg; i32 i;
+; p = arg + 12;
+; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
+define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl 13(%eax,%ecx), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl %esi, %eax
+; CHECK64-NEXT: movl 13(%rdi,%rax), %eax
+; CHECK64-NEXT: retq
+ %tmp = add nuw nsw i32 %i, 4
+ %tmp2 = add nuw nsw i32 %i, 3
+ %tmp3 = add nuw nsw i32 %i, 2
+ %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp5 = add nuw nsw i32 %i, 1
+ %tmp27 = zext i32 %tmp5 to i64
+ %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
+ %tmp29 = load i8, i8* %tmp28, align 1
+ %tmp30 = zext i8 %tmp29 to i32
+ %tmp31 = zext i32 %tmp3 to i64
+ %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
+ %tmp33 = load i8, i8* %tmp32, align 1
+ %tmp34 = zext i8 %tmp33 to i32
+ %tmp35 = shl nuw nsw i32 %tmp34, 8
+ %tmp36 = or i32 %tmp35, %tmp30
+ %tmp37 = zext i32 %tmp2 to i64
+ %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
+ %tmp39 = load i8, i8* %tmp38, align 1
+ %tmp40 = zext i8 %tmp39 to i32
+ %tmp41 = shl nuw nsw i32 %tmp40, 16
+ %tmp42 = or i32 %tmp36, %tmp41
+ %tmp43 = zext i32 %tmp to i64
+ %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
+ %tmp45 = load i8, i8* %tmp44, align 1
+ %tmp46 = zext i8 %tmp45 to i32
+ %tmp47 = shl nuw i32 %tmp46, 24
+ %tmp48 = or i32 %tmp42, %tmp47
+ ret i32 %tmp48
+}
+
+; i8* arg; i32 i;
+;
+; p0 = arg;
+; p1 = arg + i + 1;
+; p2 = arg + i + 2;
+; p3 = arg + i + 3;
+;
+; (i32) p0[12] | ((i32) p1[12] << 8) | ((i32) p2[12] << 16) | ((i32) p3[12] << 24)
+;
+; This test excercises zero and any extend loads as a part of load combine pattern.
+; In order to fold the pattern above we need to reassociate the address computation
+; first. By the time the address computation is reassociated loads are combined to
+; to zext and aext loads.
+define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
+; CHECK-LABEL: load_i32_by_i8_zaext_loads:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl 12(%eax,%ecx), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_zaext_loads:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl %esi, %eax
+; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
+; CHECK64-NEXT: retq
+ %tmp = add nuw nsw i32 %arg1, 3
+ %tmp2 = add nuw nsw i32 %arg1, 2
+ %tmp3 = add nuw nsw i32 %arg1, 1
+ %tmp4 = zext i32 %tmp to i64
+ %tmp5 = zext i32 %tmp2 to i64
+ %tmp6 = zext i32 %tmp3 to i64
+ %tmp24 = getelementptr inbounds i8, i8* %arg, i64 %tmp4
+ %tmp30 = getelementptr inbounds i8, i8* %arg, i64 %tmp5
+ %tmp31 = getelementptr inbounds i8, i8* %arg, i64 %tmp6
+ %tmp32 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp33 = zext i32 %arg1 to i64
+ %tmp34 = getelementptr inbounds i8, i8* %tmp32, i64 %tmp33
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i32
+ %tmp37 = getelementptr inbounds i8, i8* %tmp31, i64 12
+ %tmp38 = load i8, i8* %tmp37, align 1
+ %tmp39 = zext i8 %tmp38 to i32
+ %tmp40 = shl nuw nsw i32 %tmp39, 8
+ %tmp41 = or i32 %tmp40, %tmp36
+ %tmp42 = getelementptr inbounds i8, i8* %tmp30, i64 12
+ %tmp43 = load i8, i8* %tmp42, align 1
+ %tmp44 = zext i8 %tmp43 to i32
+ %tmp45 = shl nuw nsw i32 %tmp44, 16
+ %tmp46 = or i32 %tmp41, %tmp45
+ %tmp47 = getelementptr inbounds i8, i8* %tmp24, i64 12
+ %tmp48 = load i8, i8* %tmp47, align 1
+ %tmp49 = zext i8 %tmp48 to i32
+ %tmp50 = shl nuw i32 %tmp49, 24
+ %tmp51 = or i32 %tmp46, %tmp50
+ ret i32 %tmp51
+}
+
+; The same as load_i32_by_i8_zaext_loads but the last load is combined to
+; a sext load.
+;
+; i8* arg; i32 i;
+;
+; p0 = arg;
+; p1 = arg + i + 1;
+; p2 = arg + i + 2;
+; p3 = arg + i + 3;
+;
+; (i32) p0[12] | ((i32) p1[12] << 8) | ((i32) p2[12] << 16) | ((i32) p3[12] << 24)
+define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) {
+; CHECK-LABEL: load_i32_by_i8_zsext_loads:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl 12(%eax,%ecx), %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: load_i32_by_i8_zsext_loads:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl %esi, %eax
+; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
+; CHECK64-NEXT: retq
+ %tmp = add nuw nsw i32 %arg1, 3
+ %tmp2 = add nuw nsw i32 %arg1, 2
+ %tmp3 = add nuw nsw i32 %arg1, 1
+ %tmp4 = zext i32 %tmp to i64
+ %tmp5 = zext i32 %tmp2 to i64
+ %tmp6 = zext i32 %tmp3 to i64
+ %tmp24 = getelementptr inbounds i8, i8* %arg, i64 %tmp4
+ %tmp30 = getelementptr inbounds i8, i8* %arg, i64 %tmp5
+ %tmp31 = getelementptr inbounds i8, i8* %arg, i64 %tmp6
+ %tmp32 = getelementptr inbounds i8, i8* %arg, i64 12
+ %tmp33 = zext i32 %arg1 to i64
+ %tmp34 = getelementptr inbounds i8, i8* %tmp32, i64 %tmp33
+ %tmp35 = load i8, i8* %tmp34, align 1
+ %tmp36 = zext i8 %tmp35 to i32
+ %tmp37 = getelementptr inbounds i8, i8* %tmp31, i64 12
+ %tmp38 = load i8, i8* %tmp37, align 1
+ %tmp39 = zext i8 %tmp38 to i32
+ %tmp40 = shl nuw nsw i32 %tmp39, 8
+ %tmp41 = or i32 %tmp40, %tmp36
+ %tmp42 = getelementptr inbounds i8, i8* %tmp30, i64 12
+ %tmp43 = load i8, i8* %tmp42, align 1
+ %tmp44 = zext i8 %tmp43 to i32
+ %tmp45 = shl nuw nsw i32 %tmp44, 16
+ %tmp46 = or i32 %tmp41, %tmp45
+ %tmp47 = getelementptr inbounds i8, i8* %tmp24, i64 12
+ %tmp48 = load i8, i8* %tmp47, align 1
+ %tmp49 = sext i8 %tmp48 to i16
+ %tmp50 = zext i16 %tmp49 to i32
+ %tmp51 = shl nuw i32 %tmp50, 24
+ %tmp52 = or i32 %tmp46, %tmp51
+ ret i32 %tmp52
+}
+
+; i8* p;
+; (i32) p[0] | ((i32) p[1] << 8)
+define i32 @zext_load_i32_by_i8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl (%eax), %ecx
+; CHECK-NEXT: movzbl 1(%eax), %eax
+; CHECK-NEXT: shll $8, %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: zext_load_i32_by_i8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %ecx
+; CHECK64-NEXT: movzbl 1(%rdi), %eax
+; CHECK64-NEXT: shll $8, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p;
+; ((i32) p[0] << 8) | ((i32) p[1] << 16)
+define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl (%eax), %ecx
+; CHECK-NEXT: shll $8, %ecx
+; CHECK-NEXT: movzbl 1(%eax), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: zext_load_i32_by_i8_shl_8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %ecx
+; CHECK64-NEXT: shll $8, %ecx
+; CHECK64-NEXT: movzbl 1(%rdi), %eax
+; CHECK64-NEXT: shll $16, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p;
+; ((i32) p[0] << 16) | ((i32) p[1] << 24)
+define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl (%eax), %ecx
+; CHECK-NEXT: shll $16, %ecx
+; CHECK-NEXT: movzbl 1(%eax), %eax
+; CHECK-NEXT: shll $24, %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: zext_load_i32_by_i8_shl_16:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl (%rdi), %ecx
+; CHECK64-NEXT: shll $16, %ecx
+; CHECK64-NEXT: movzbl 1(%rdi), %eax
+; CHECK64-NEXT: shll $24, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p;
+; (i32) p[1] | ((i32) p[0] << 8)
+define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl 1(%eax), %ecx
+; CHECK-NEXT: movzbl (%eax), %eax
+; CHECK-NEXT: shll $8, %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: zext_load_i32_by_i8_bswap:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl 1(%rdi), %ecx
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $8, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 8
+ %tmp8 = or i32 %tmp7, %tmp3
+ ret i32 %tmp8
+}
+
+; i8* p;
+; ((i32) p[1] << 8) | ((i32) p[0] << 16)
+define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl 1(%eax), %ecx
+; CHECK-NEXT: shll $8, %ecx
+; CHECK-NEXT: movzbl (%eax), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_8:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl 1(%rdi), %ecx
+; CHECK64-NEXT: shll $8, %ecx
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $16, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 16
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
+
+; i8* p;
+; ((i32) p[1] << 16) | ((i32) p[0] << 24)
+define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movzbl 1(%eax), %ecx
+; CHECK-NEXT: shll $16, %ecx
+; CHECK-NEXT: movzbl (%eax), %eax
+; CHECK-NEXT: shll $24, %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: retl
+;
+; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_16:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movzbl 1(%rdi), %ecx
+; CHECK64-NEXT: shll $16, %ecx
+; CHECK64-NEXT: movzbl (%rdi), %eax
+; CHECK64-NEXT: shll $24, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: retq
+ %tmp = bitcast i32* %arg to i8*
+ %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
+ %tmp2 = load i8, i8* %tmp1, align 1
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp30 = shl nuw nsw i32 %tmp3, 16
+ %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
+ %tmp5 = load i8, i8* %tmp4, align 1
+ %tmp6 = zext i8 %tmp5 to i32
+ %tmp7 = shl nuw nsw i32 %tmp6, 24
+ %tmp8 = or i32 %tmp7, %tmp30
+ ret i32 %tmp8
+}
diff --git a/test/CodeGen/X86/load-slice.ll b/test/CodeGen/X86/load-slice.ll
index 2f90f819d47e..8803512eec09 100644
--- a/test/CodeGen/X86/load-slice.ll
+++ b/test/CodeGen/X86/load-slice.ll
@@ -19,10 +19,10 @@
; STRESS-LABEL: t1:
; Load out[out_start + 8].real, this is base + 8 * 8 + 0.
; STRESS: vmovss 64([[BASE:[^(]+]]), [[OUT_Real:%xmm[0-9]+]]
-; Add low slice: out[out_start].real, this is base + 0.
-; STRESS-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
; Load out[out_start + 8].imm, this is base + 8 * 8 + 4.
; STRESS-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
+; Add low slice: out[out_start].real, this is base + 0.
+; STRESS-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
; Add high slice: out[out_start].imm, this is base + 4.
; STRESS-NEXT: vaddss 4([[BASE]]), [[OUT_Imm]], [[RES_Imm:%xmm[0-9]+]]
; Swap Imm and Real.
@@ -34,10 +34,10 @@
; REGULAR-LABEL: t1:
; Load out[out_start + 8].real, this is base + 8 * 8 + 0.
; REGULAR: vmovss 64([[BASE:[^)]+]]), [[OUT_Real:%xmm[0-9]+]]
-; Add low slice: out[out_start].real, this is base + 0.
-; REGULAR-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
; Load out[out_start + 8].imm, this is base + 8 * 8 + 4.
; REGULAR-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
+; Add low slice: out[out_start].real, this is base + 0.
+; REGULAR-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
; Add high slice: out[out_start].imm, this is base + 4.
; REGULAR-NEXT: vaddss 4([[BASE]]), [[OUT_Imm]], [[RES_Imm:%xmm[0-9]+]]
; Swap Imm and Real.
@@ -73,10 +73,10 @@ entry:
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
; Check that we do not read outside of the chunk of bits of the original loads.
;
diff --git a/test/CodeGen/X86/local_stack_symbol_ordering.ll b/test/CodeGen/X86/local_stack_symbol_ordering.ll
index 1893eeec2f1f..1cd4d6c26c35 100644
--- a/test/CodeGen/X86/local_stack_symbol_ordering.ll
+++ b/test/CodeGen/X86/local_stack_symbol_ordering.ll
@@ -115,21 +115,21 @@ entry:
%d = alloca i32, align 4
%aaa = alloca [1000 x i32], align 16
%0 = bitcast i32* %f to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #1
%1 = bitcast [30 x i32]* %a to i8*
- call void @llvm.lifetime.start(i64 120, i8* %1) #1
+ call void @llvm.lifetime.start.p0i8(i64 120, i8* %1) #1
%2 = bitcast [1000 x i32]* %aa to i8*
- call void @llvm.lifetime.start(i64 4000, i8* %2) #1
+ call void @llvm.lifetime.start.p0i8(i64 4000, i8* %2) #1
%3 = bitcast i32* %e to i8*
- call void @llvm.lifetime.start(i64 4, i8* %3) #1
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %3) #1
%4 = bitcast [1000 x i32]* %cc to i8*
- call void @llvm.lifetime.start(i64 4000, i8* %4) #1
+ call void @llvm.lifetime.start.p0i8(i64 4000, i8* %4) #1
%5 = bitcast i32* %b to i8*
- call void @llvm.lifetime.start(i64 4, i8* %5) #1
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %5) #1
%6 = bitcast i32* %d to i8*
- call void @llvm.lifetime.start(i64 4, i8* %6) #1
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %6) #1
%7 = bitcast [1000 x i32]* %aaa to i8*
- call void @llvm.lifetime.start(i64 4000, i8* %7) #1
+ call void @llvm.lifetime.start.p0i8(i64 4000, i8* %7) #1
%call = call i32 ([30 x i32]*, ...) bitcast (i32 (...)* @check_a to i32 ([30 x i32]*, ...)*)([30 x i32]* %a)
%call1 = call i32 ([1000 x i32]*, ...) bitcast (i32 (...)* @bar1 to i32 ([1000 x i32]*, ...)*)([1000 x i32]* %aaa)
call void asm sideeffect "", "~{esi},~{edi},~{ebp},~{ebx},~{rbx},~{r12},~{r13},~{r14},~{r15},~{rbp},~{dirflag},~{fpsr},~{flags}"() #1
@@ -156,19 +156,19 @@ entry:
%call15 = call i32 (i32*, i32*, i32*, ...) bitcast (i32 (...)* @bar3 to i32 (i32*, i32*, i32*, ...)*)(i32* %d, i32* %e, i32* %f)
call void asm sideeffect "", "~{esi},~{edi},~{ebp},~{ebx},~{rbx},~{r12},~{r13},~{r14},~{r15},~{rbp},~{dirflag},~{fpsr},~{flags}"() #1
%call16 = call i32 ([30 x i32]*, ...) bitcast (i32 (...)* @bar1 to i32 ([30 x i32]*, ...)*)([30 x i32]* %a)
- call void @llvm.lifetime.end(i64 4000, i8* %7) #1
- call void @llvm.lifetime.end(i64 4, i8* %6) #1
- call void @llvm.lifetime.end(i64 4, i8* %5) #1
- call void @llvm.lifetime.end(i64 4000, i8* %4) #1
- call void @llvm.lifetime.end(i64 4, i8* %3) #1
- call void @llvm.lifetime.end(i64 4000, i8* %2) #1
- call void @llvm.lifetime.end(i64 120, i8* %1) #1
- call void @llvm.lifetime.end(i64 4, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 4000, i8* %7) #1
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %6) #1
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %5) #1
+ call void @llvm.lifetime.end.p0i8(i64 4000, i8* %4) #1
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %3) #1
+ call void @llvm.lifetime.end.p0i8(i64 4000, i8* %2) #1
+ call void @llvm.lifetime.end.p0i8(i64 120, i8* %1) #1
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #1
ret void
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare i32 @check_a(...) #2
declare i32 @bar1(...) #2
@@ -180,5 +180,5 @@ declare i32 @check_e(...) #2
declare i32 @check_d(...) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
diff --git a/test/CodeGen/X86/logical-load-fold.ll b/test/CodeGen/X86/logical-load-fold.ll
index 73930ca8bca1..5f06fce1b7b6 100644
--- a/test/CodeGen/X86/logical-load-fold.ll
+++ b/test/CodeGen/X86/logical-load-fold.ll
@@ -15,14 +15,14 @@ define double @load_double_no_fold(double %x, double %y) {
; SSE2: # BB#0:
; SSE2-NEXT: cmplesd %xmm0, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: andpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: load_double_no_fold:
; AVX: # BB#0:
; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = fcmp oge double %x, %y
diff --git a/test/CodeGen/X86/longlong-deadload.ll b/test/CodeGen/X86/longlong-deadload.ll
index 3adaf49e372b..01888f07306a 100644
--- a/test/CodeGen/X86/longlong-deadload.ll
+++ b/test/CodeGen/X86/longlong-deadload.ll
@@ -1,14 +1,20 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-; This should not load or store the top part of *P.
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu | FileCheck %s
+; FIXME: This should not load or store the top part of *P.
define void @test(i64* %P) nounwind {
; CHECK-LABEL: test:
-; CHECK: movl 4(%esp), %[[REGISTER:.*]]
-; CHECK-NOT: 4(%[[REGISTER]])
-; CHECK: ret
- %tmp1 = load i64, i64* %P, align 8 ; <i64> [#uses=1]
- %tmp2 = xor i64 %tmp1, 1 ; <i64> [#uses=1]
- store i64 %tmp2, i64* %P, align 8
- ret void
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %ecx
+; CHECK-NEXT: xorl $1, %ecx
+; CHECK-NEXT: orl $2, %ecx
+; CHECK-NEXT: movl %ecx, (%eax)
+; CHECK-NEXT: retl
+ %tmp1 = load i64, i64* %P, align 8
+ %tmp2 = xor i64 %tmp1, 1
+ %tmp3 = or i64 %tmp2, 2
+ store i64 %tmp3, i64* %P, align 8
+ ret void
}
diff --git a/test/CodeGen/X86/lzcnt-zext-cmp.ll b/test/CodeGen/X86/lzcnt-zext-cmp.ll
index c69dbf573f46..7c961a98ad55 100644
--- a/test/CodeGen/X86/lzcnt-zext-cmp.ll
+++ b/test/CodeGen/X86/lzcnt-zext-cmp.ll
@@ -1,26 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; Test patterns which generates lzcnt instructions.
; Eg: zext(or(setcc(cmp), setcc(cmp))) -> shr(or(lzcnt, lzcnt))
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=btver2 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=btver2 -mattr=-fast-lzcnt | FileCheck --check-prefix=NOFASTLZCNT %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=znver1 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=znver1 -mattr=-fast-lzcnt | FileCheck --check-prefix=NOFASTLZCNT %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=btver2 | FileCheck --check-prefix=ALL --check-prefix=FASTLZCNT %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=btver2 -mattr=-fast-lzcnt | FileCheck --check-prefix=ALL --check-prefix=NOFASTLZCNT %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=znver1 | FileCheck --check-prefix=ALL --check-prefix=FASTLZCNT %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=znver1 -mattr=-fast-lzcnt | FileCheck --check-prefix=ALL --check-prefix=NOFASTLZCNT %s
; Test one 32-bit input, output is 32-bit, no transformations expected.
define i32 @test_zext_cmp0(i32 %a) {
-; CHECK-LABEL: test_zext_cmp0:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: sete %al
-; CHECK-NEXT: retq
-;
-; NOFASTLZCNT-LABEL: test_zext_cmp0:
-; NOFASTLZCNT: # BB#0: # %entry
-; NOFASTLZCNT-NEXT: xorl %eax, %eax
-; NOFASTLZCNT-NEXT: testl %edi, %edi
-; NOFASTLZCNT-NEXT: sete %al
-; NOFASTLZCNT-NEXT: retq
+; ALL-LABEL: test_zext_cmp0:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: xorl %eax, %eax
+; ALL-NEXT: testl %edi, %edi
+; ALL-NEXT: sete %al
+; ALL-NEXT: retq
entry:
%cmp = icmp eq i32 %a, 0
%conv = zext i1 %cmp to i32
@@ -29,13 +22,13 @@ entry:
; Test two 32-bit inputs, output is 32-bit.
define i32 @test_zext_cmp1(i32 %a, i32 %b) {
-; CHECK-LABEL: test_zext_cmp1:
-; CHECK: # BB#0:
-; CHECK-NEXT: lzcntl %edi, %ecx
-; CHECK-NEXT: lzcntl %esi, %eax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: shrl $5, %eax
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp1:
+; FASTLZCNT: # BB#0:
+; FASTLZCNT-NEXT: lzcntl %edi, %ecx
+; FASTLZCNT-NEXT: lzcntl %esi, %eax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: shrl $5, %eax
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp1:
; NOFASTLZCNT: # BB#0:
@@ -55,13 +48,13 @@ define i32 @test_zext_cmp1(i32 %a, i32 %b) {
; Test two 64-bit inputs, output is 64-bit.
define i64 @test_zext_cmp2(i64 %a, i64 %b) {
-; CHECK-LABEL: test_zext_cmp2:
-; CHECK: # BB#0:
-; CHECK-NEXT: lzcntq %rdi, %rcx
-; CHECK-NEXT: lzcntq %rsi, %rax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: shrl $6, %eax
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp2:
+; FASTLZCNT: # BB#0:
+; FASTLZCNT-NEXT: lzcntq %rdi, %rcx
+; FASTLZCNT-NEXT: lzcntq %rsi, %rax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: shrl $6, %eax
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp2:
; NOFASTLZCNT: # BB#0:
@@ -83,27 +76,16 @@ define i64 @test_zext_cmp2(i64 %a, i64 %b) {
; The transform is disabled for the 16-bit case, as we still have to clear the
; upper 16-bits, adding one more instruction.
define i16 @test_zext_cmp3(i16 %a, i16 %b) {
-; CHECK-LABEL: test_zext_cmp3:
-; CHECK: # BB#0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: sete %al
-; CHECK-NEXT: testw %si, %si
-; CHECK-NEXT: sete %cl
-; CHECK-NEXT: orb %al, %cl
-; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; CHECK-NEXT: retq
-;
-; NOFASTLZCNT-LABEL: test_zext_cmp3:
-; NOFASTLZCNT: # BB#0:
-; NOFASTLZCNT-NEXT: testw %di, %di
-; NOFASTLZCNT-NEXT: sete %al
-; NOFASTLZCNT-NEXT: testw %si, %si
-; NOFASTLZCNT-NEXT: sete %cl
-; NOFASTLZCNT-NEXT: orb %al, %cl
-; NOFASTLZCNT-NEXT: movzbl %cl, %eax
-; NOFASTLZCNT-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; NOFASTLZCNT-NEXT: retq
+; ALL-LABEL: test_zext_cmp3:
+; ALL: # BB#0:
+; ALL-NEXT: testw %di, %di
+; ALL-NEXT: sete %al
+; ALL-NEXT: testw %si, %si
+; ALL-NEXT: sete %cl
+; ALL-NEXT: orb %al, %cl
+; ALL-NEXT: movzbl %cl, %eax
+; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ALL-NEXT: retq
%cmp = icmp eq i16 %a, 0
%cmp1 = icmp eq i16 %b, 0
%or = or i1 %cmp, %cmp1
@@ -113,13 +95,13 @@ define i16 @test_zext_cmp3(i16 %a, i16 %b) {
; Test two 32-bit inputs, output is 64-bit.
define i64 @test_zext_cmp4(i32 %a, i32 %b) {
-; CHECK-LABEL: test_zext_cmp4:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lzcntl %edi, %ecx
-; CHECK-NEXT: lzcntl %esi, %eax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: shrl $5, %eax
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp4:
+; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT-NEXT: lzcntl %edi, %ecx
+; FASTLZCNT-NEXT: lzcntl %esi, %eax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: shrl $5, %eax
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp4:
; NOFASTLZCNT: # BB#0: # %entry
@@ -140,14 +122,14 @@ entry:
; Test two 64-bit inputs, output is 32-bit.
define i32 @test_zext_cmp5(i64 %a, i64 %b) {
-; CHECK-LABEL: test_zext_cmp5:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lzcntq %rdi, %rcx
-; CHECK-NEXT: lzcntq %rsi, %rax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: shrl $6, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp5:
+; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT-NEXT: lzcntq %rdi, %rcx
+; FASTLZCNT-NEXT: lzcntq %rsi, %rax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: shrl $6, %eax
+; FASTLZCNT-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp5:
; NOFASTLZCNT: # BB#0: # %entry
@@ -168,15 +150,15 @@ entry:
; Test three 32-bit inputs, output is 32-bit.
define i32 @test_zext_cmp6(i32 %a, i32 %b, i32 %c) {
-; CHECK-LABEL: test_zext_cmp6:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lzcntl %edi, %eax
-; CHECK-NEXT: lzcntl %esi, %ecx
-; CHECK-NEXT: orl %eax, %ecx
-; CHECK-NEXT: lzcntl %edx, %eax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: shrl $5, %eax
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp6:
+; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT-NEXT: lzcntl %edi, %eax
+; FASTLZCNT-NEXT: lzcntl %esi, %ecx
+; FASTLZCNT-NEXT: orl %eax, %ecx
+; FASTLZCNT-NEXT: lzcntl %edx, %eax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: shrl $5, %eax
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp6:
; NOFASTLZCNT: # BB#0: # %entry
@@ -203,15 +185,15 @@ entry:
; Test three 32-bit inputs, output is 32-bit, but compared to test_zext_cmp6 test,
; %.cmp2 inputs' order is inverted.
define i32 @test_zext_cmp7(i32 %a, i32 %b, i32 %c) {
-; CHECK-LABEL: test_zext_cmp7:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lzcntl %edi, %eax
-; CHECK-NEXT: lzcntl %esi, %ecx
-; CHECK-NEXT: orl %eax, %ecx
-; CHECK-NEXT: lzcntl %edx, %eax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: shrl $5, %eax
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp7:
+; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT-NEXT: lzcntl %edi, %eax
+; FASTLZCNT-NEXT: lzcntl %esi, %ecx
+; FASTLZCNT-NEXT: orl %eax, %ecx
+; FASTLZCNT-NEXT: lzcntl %edx, %eax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: shrl $5, %eax
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp7:
; NOFASTLZCNT: # BB#0: # %entry
@@ -237,17 +219,17 @@ entry:
; Test four 32-bit inputs, output is 32-bit.
define i32 @test_zext_cmp8(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: test_zext_cmp8:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lzcntl %edi, %eax
-; CHECK-NEXT: lzcntl %esi, %esi
-; CHECK-NEXT: lzcntl %edx, %edx
-; CHECK-NEXT: orl %eax, %esi
-; CHECK-NEXT: lzcntl %ecx, %eax
-; CHECK-NEXT: orl %edx, %eax
-; CHECK-NEXT: orl %esi, %eax
-; CHECK-NEXT: shrl $5, %eax
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp8:
+; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT-NEXT: lzcntl %edi, %eax
+; FASTLZCNT-NEXT: lzcntl %esi, %esi
+; FASTLZCNT-NEXT: lzcntl %edx, %edx
+; FASTLZCNT-NEXT: orl %eax, %esi
+; FASTLZCNT-NEXT: lzcntl %ecx, %eax
+; FASTLZCNT-NEXT: orl %edx, %eax
+; FASTLZCNT-NEXT: orl %esi, %eax
+; FASTLZCNT-NEXT: shrl $5, %eax
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp8:
; NOFASTLZCNT: # BB#0: # %entry
@@ -278,15 +260,15 @@ entry:
; Test one 32-bit input, one 64-bit input, output is 32-bit.
define i32 @test_zext_cmp9(i32 %a, i64 %b) {
-; CHECK-LABEL: test_zext_cmp9:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lzcntq %rsi, %rax
-; CHECK-NEXT: lzcntl %edi, %ecx
-; CHECK-NEXT: shrl $5, %ecx
-; CHECK-NEXT: shrl $6, %eax
-; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
-; CHECK-NEXT: retq
+; FASTLZCNT-LABEL: test_zext_cmp9:
+; FASTLZCNT: # BB#0: # %entry
+; FASTLZCNT-NEXT: lzcntq %rsi, %rax
+; FASTLZCNT-NEXT: lzcntl %edi, %ecx
+; FASTLZCNT-NEXT: shrl $5, %ecx
+; FASTLZCNT-NEXT: shrl $6, %eax
+; FASTLZCNT-NEXT: orl %ecx, %eax
+; FASTLZCNT-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp9:
; NOFASTLZCNT: # BB#0: # %entry
@@ -307,25 +289,15 @@ entry:
; Test 2 128-bit inputs, output is 32-bit, no transformations expected.
define i32 @test_zext_cmp10(i64 %a.coerce0, i64 %a.coerce1, i64 %b.coerce0, i64 %b.coerce1) {
-; CHECK-LABEL: test_zext_cmp10:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: orq %rsi, %rdi
-; CHECK-NEXT: sete %al
-; CHECK-NEXT: orq %rcx, %rdx
-; CHECK-NEXT: sete %cl
-; CHECK-NEXT: orb %al, %cl
-; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: retq
-;
-; NOFASTLZCNT-LABEL: test_zext_cmp10:
-; NOFASTLZCNT: # BB#0: # %entry
-; NOFASTLZCNT-NEXT: orq %rsi, %rdi
-; NOFASTLZCNT-NEXT: sete %al
-; NOFASTLZCNT-NEXT: orq %rcx, %rdx
-; NOFASTLZCNT-NEXT: sete %cl
-; NOFASTLZCNT-NEXT: orb %al, %cl
-; NOFASTLZCNT-NEXT: movzbl %cl, %eax
-; NOFASTLZCNT-NEXT: retq
+; ALL-LABEL: test_zext_cmp10:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: orq %rsi, %rdi
+; ALL-NEXT: sete %al
+; ALL-NEXT: orq %rcx, %rdx
+; ALL-NEXT: sete %cl
+; ALL-NEXT: orb %al, %cl
+; ALL-NEXT: movzbl %cl, %eax
+; ALL-NEXT: retq
entry:
%a.sroa.2.0.insert.ext = zext i64 %a.coerce1 to i128
%a.sroa.2.0.insert.shift = shl nuw i128 %a.sroa.2.0.insert.ext, 64
@@ -341,3 +313,24 @@ entry:
%lor.ext = zext i1 %0 to i32
ret i32 %lor.ext
}
+
+; PR31902 Fix a crash in combineOrCmpEqZeroToCtlzSrl under fast math.
+define i32 @test_zext_cmp11(double %a, double %b) "no-nans-fp-math"="true" {
+;
+; ALL-LABEL: test_zext_cmp11:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; ALL-NEXT: vucomisd %xmm2, %xmm0
+; ALL-NEXT: sete %al
+; ALL-NEXT: vucomisd %xmm2, %xmm1
+; ALL-NEXT: sete %cl
+; ALL-NEXT: orb %al, %cl
+; ALL-NEXT: movzbl %cl, %eax
+; ALL-NEXT: retq
+entry:
+ %cmp = fcmp fast oeq double %a, 0.000000e+00
+ %cmp1 = fcmp fast oeq double %b, 0.000000e+00
+ %0 = or i1 %cmp, %cmp1
+ %conv = zext i1 %0 to i32
+ ret i32 %conv
+}
diff --git a/test/CodeGen/X86/machine-outliner-debuginfo.ll b/test/CodeGen/X86/machine-outliner-debuginfo.ll
new file mode 100644
index 000000000000..26a194764086
--- /dev/null
+++ b/test/CodeGen/X86/machine-outliner-debuginfo.ll
@@ -0,0 +1,75 @@
+; RUN: llc -enable-machine-outliner -mtriple=x86_64-apple-darwin < %s | FileCheck %s
+
+@x = global i32 0, align 4, !dbg !0
+
+define i32 @main() #0 !dbg !11 {
+ ; CHECK-LABEL: _main:
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ %4 = alloca i32, align 4
+ %5 = alloca i32, align 4
+ ; There is a debug value in the middle of this section, make sure debug values are ignored.
+ ; CHECK: callq l_OUTLINED_FUNCTION_0
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ call void @llvm.dbg.value(metadata i32 10, i64 0, metadata !15, metadata !16), !dbg !17
+ store i32 4, i32* %5, align 4
+ store i32 0, i32* @x, align 4, !dbg !24
+ ; This is the same sequence of instructions without a debug value. It should be outlined
+ ; in the same way.
+ ; CHECK: callq l_OUTLINED_FUNCTION_0
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ store i32 4, i32* %5, align 4
+ store i32 1, i32* @x, align 4, !dbg !14
+ ret i32 0, !dbg !25
+}
+
+; CHECK-LABEL: l_OUTLINED_FUNCTION_0:
+; CHECK-NOT: .loc {{[0-9]+}} {{[0-9]+}} {{[0-9]+}} {{^(is_stmt)}}
+; CHECK-NOT: ##DEBUG_VALUE: main:{{[a-z]}} <- {{[0-9]+}}
+; CHECK: movl $1, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: retq
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="true" }
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8, !9}
+!llvm.ident = !{!10}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "x", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "debug-test.c", directory: "dir")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"PIC Level", i32 2}
+!10 = !{!"clang version 5.0.0"}
+!11 = distinct !DISubprogram(name: "main", scope: !3, file: !3, line: 4, type: !12, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !2, variables: !4)
+!12 = !DISubroutineType(types: !13)
+!13 = !{!6}
+!14 = !DILocation(line: 7, column: 4, scope: !11)
+!15 = !DILocalVariable(name: "a", scope: !11, file: !3, line: 5, type: !6)
+!16 = !DIExpression()
+!17 = !DILocation(line: 5, column: 6, scope: !11)
+!18 = !DILocalVariable(name: "b", scope: !11, file: !3, line: 5, type: !6)
+!19 = !DILocation(line: 5, column: 9, scope: !11)
+!20 = !DILocalVariable(name: "c", scope: !11, file: !3, line: 5, type: !6)
+!21 = !DILocation(line: 5, column: 12, scope: !11)
+!22 = !DILocalVariable(name: "d", scope: !11, file: !3, line: 5, type: !6)
+!23 = !DILocation(line: 5, column: 15, scope: !11)
+!24 = !DILocation(line: 14, column: 4, scope: !11)
+!25 = !DILocation(line: 21, column: 2, scope: !11)
diff --git a/test/CodeGen/X86/machine-outliner-tailcalls.ll b/test/CodeGen/X86/machine-outliner-tailcalls.ll
new file mode 100644
index 000000000000..020f7eeaaff3
--- /dev/null
+++ b/test/CodeGen/X86/machine-outliner-tailcalls.ll
@@ -0,0 +1,35 @@
+; RUN: llc -enable-machine-outliner -mtriple=x86_64-apple-darwin < %s | FileCheck %s
+
+@x = common local_unnamed_addr global i32 0, align 4
+
+define i32 @foo0(i32) local_unnamed_addr #0 {
+; CHECK-LABEL: _foo0:
+; CHECK: jmp l_OUTLINED_FUNCTION_0
+; CHECK-NEXT: .cfi_endproc
+ store i32 0, i32* @x, align 4, !tbaa !2
+ %2 = tail call i32 @ext(i32 1) #2
+ ret i32 undef
+}
+
+declare i32 @ext(i32) local_unnamed_addr #1
+
+define i32 @foo1(i32) local_unnamed_addr #0 {
+; CHECK-LABEL: _foo1:
+; CHECK: jmp l_OUTLINED_FUNCTION_0
+; CHECK-NEXT: .cfi_endproc
+ store i32 0, i32* @x, align 4, !tbaa !2
+ %2 = tail call i32 @ext(i32 1) #2
+ ret i32 undef
+}
+
+attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false" }
+
+!2 = !{!3, !3, i64 0}
+!3 = !{!"int", !4, i64 0}
+!4 = !{!"omnipotent char", !5, i64 0}
+!5 = !{!"Simple C/C++ TBAA"}
+
+; CHECK-LABEL: l_OUTLINED_FUNCTION_0:
+; CHECK: movl $0, (%rax)
+; CHECK-NEXT: movl $1, %edi
+; CHECK-NEXT: jmp _ext \ No newline at end of file
diff --git a/test/CodeGen/X86/machine-outliner.ll b/test/CodeGen/X86/machine-outliner.ll
new file mode 100644
index 000000000000..9f8e6ec298f4
--- /dev/null
+++ b/test/CodeGen/X86/machine-outliner.ll
@@ -0,0 +1,110 @@
+; RUN: llc -enable-machine-outliner -mtriple=x86_64-apple-darwin < %s | FileCheck %s
+
+@x = global i32 0, align 4
+
+define i32 @check_boundaries() #0 {
+ ; CHECK-LABEL: _check_boundaries:
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ %4 = alloca i32, align 4
+ %5 = alloca i32, align 4
+ store i32 0, i32* %1, align 4
+ store i32 0, i32* %2, align 4
+ %6 = load i32, i32* %2, align 4
+ %7 = icmp ne i32 %6, 0
+ br i1 %7, label %9, label %8
+
+ ; CHECK: callq [[OFUNC1:l_OUTLINED_FUNCTION_[0-9]+]]
+ ; CHECK: cmpl $0, -{{[0-9]+}}(%rbp)
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ store i32 4, i32* %5, align 4
+ br label %10
+
+ store i32 1, i32* %4, align 4
+ br label %10
+
+ %11 = load i32, i32* %2, align 4
+ %12 = icmp ne i32 %11, 0
+ br i1 %12, label %14, label %13
+
+ ; CHECK: callq [[OFUNC1]]
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ store i32 4, i32* %5, align 4
+ br label %15
+
+ store i32 1, i32* %4, align 4
+ br label %15
+
+ ret i32 0
+}
+
+define i32 @empty_1() #0 {
+ ; CHECK-LABEL: _empty_1:
+ ; CHECK-NOT: callq l_OUTLINED_FUNCTION_{{[0-9]+}}
+ ret i32 1
+}
+
+define i32 @empty_2() #0 {
+ ; CHECK-LABEL: _empty_2
+ ; CHECK-NOT: callq l_OUTLINED_FUNCTION_{{[0-9]+}}
+ ret i32 1
+}
+
+define i32 @no_empty_outlining() #0 {
+ ; CHECK-LABEL: _no_empty_outlining:
+ %1 = alloca i32, align 4
+ store i32 0, i32* %1, align 4
+ ; CHECK-NOT: callq l_OUTLINED_FUNCTION_{{[0-9]+}}
+ %2 = call i32 @empty_1() #1
+ %3 = call i32 @empty_2() #1
+ %4 = call i32 @empty_1() #1
+ %5 = call i32 @empty_2() #1
+ %6 = call i32 @empty_1() #1
+ %7 = call i32 @empty_2() #1
+ ret i32 0
+}
+
+define i32 @main() #0 {
+ ; CHECK-LABEL: _main:
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ %4 = alloca i32, align 4
+ %5 = alloca i32, align 4
+
+ store i32 0, i32* %1, align 4
+ store i32 0, i32* @x, align 4
+ ; CHECK: callq [[OFUNC2:l_OUTLINED_FUNCTION_[0-9]+]]
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ store i32 4, i32* %5, align 4
+ store i32 1, i32* @x, align 4
+ ; CHECK: callq [[OFUNC2]]
+ store i32 1, i32* %2, align 4
+ store i32 2, i32* %3, align 4
+ store i32 3, i32* %4, align 4
+ store i32 4, i32* %5, align 4
+ ret i32 0
+}
+
+attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="true" }
+
+; CHECK-LABEL: l_OUTLINED_FUNCTION_1:
+; CHECK: movl $1, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: retq
+
+; CHECK-LABEL: l_OUTLINED_FUNCTION_0:
+; CHECK: movl $1, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rbp)
+; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/machine-region-info.mir b/test/CodeGen/X86/machine-region-info.mir
new file mode 100644
index 000000000000..0998fe97c235
--- /dev/null
+++ b/test/CodeGen/X86/machine-region-info.mir
@@ -0,0 +1,83 @@
+# RUN: llc -mtriple=x86_64-- -run-pass=machine-region-info %s -debug-only=machine-region-info -o /dev/null 2>&1 | FileCheck %s
+# REQUIRES: asserts
+---
+name: fun
+body: |
+ bb.0:
+ successors: %bb.1, %bb.7
+
+ CMP32ri8 %edi, 40, implicit-def %eflags
+ JNE_1 %bb.7, implicit killed %eflags
+ JMP_1 %bb.1
+
+ bb.1:
+ successors: %bb.2, %bb.11
+
+ CMP32ri8 %edi, 1, implicit-def %eflags
+ JNE_1 %bb.11, implicit killed %eflags
+ JMP_1 %bb.2
+
+ bb.2:
+ successors: %bb.3, %bb.5
+
+ CMP32ri8 %edi, 2, implicit-def %eflags
+ JNE_1 %bb.5, implicit killed %eflags
+ JMP_1 %bb.3
+
+ bb.3:
+ successors: %bb.4, %bb.5
+
+ CMP32ri8 %edi, 90, implicit-def %eflags
+ JNE_1 %bb.5, implicit killed %eflags
+ JMP_1 %bb.4
+
+ bb.4:
+ successors: %bb.5
+
+ bb.5:
+ successors: %bb.6, %bb.11
+
+ CMP32ri8 %edi, 4, implicit-def %eflags
+ JNE_1 %bb.11, implicit killed %eflags
+ JMP_1 %bb.6
+
+ bb.6:
+ successors: %bb.11
+
+ JMP_1 %bb.11
+
+ bb.7:
+ successors: %bb.9, %bb.8
+
+ CMP32ri8 %edi, 5, implicit-def %eflags
+ JE_1 %bb.9, implicit killed %eflags
+ JMP_1 %bb.8
+
+ bb.8:
+ successors: %bb.9
+
+ bb.9:
+ successors: %bb.11, %bb.10
+
+ CMP32ri8 %edi, 6, implicit-def %eflags
+ JE_1 %bb.11, implicit killed %eflags
+ JMP_1 %bb.10
+
+ bb.10:
+ successors: %bb.11
+
+ bb.11:
+ RET 0
+
+...
+
+# CHECK: Region tree:
+# CHECK-NEXT: [0] BB#0 => <Function Return>
+# CHECK-NEXT: [1] BB#0 => BB#11
+# CHECK-NEXT: [2] BB#1 => BB#11
+# CHECK-NEXT: [3] BB#2 => BB#5
+# CHECK-NEXT: [4] BB#3 => BB#5
+# CHECK-NEXT: [3] BB#5 => BB#11
+# CHECK-NEXT: [2] BB#7 => BB#9
+# CHECK-NEXT: [2] BB#9 => BB#11
+# CHECK-NEXT: End region tree
diff --git a/test/CodeGen/X86/machine-trace-metrics-crash.ll b/test/CodeGen/X86/machine-trace-metrics-crash.ll
index 5b7c5445316c..6369ee4eb0ef 100644
--- a/test/CodeGen/X86/machine-trace-metrics-crash.ll
+++ b/test/CodeGen/X86/machine-trace-metrics-crash.ll
@@ -31,7 +31,7 @@ if.end:
%add.i = fadd fast float %add, %n0
store float %add.i, float* undef, align 4
%n1 = bitcast %struct.A* %i to i8*
- call void @llvm.lifetime.start(i64 16, i8* %n1)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %n1)
%n2 = load <2 x float>, <2 x float>* undef, align 8
%conv = uitofp i1 %tobool to float
%bitcast = extractelement <2 x float> %n2, i32 0
@@ -45,7 +45,7 @@ if.end:
declare void @bar(float)
declare void @foo(%struct.A*)
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
!llvm.dbg.cu = !{!0}
diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll
new file mode 100644
index 000000000000..fdc5ace8d9bc
--- /dev/null
+++ b/test/CodeGen/X86/madd.ll
@@ -0,0 +1,103 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512
+
+;SSE2-label: @_Z10test_shortPsS_i
+;SSE2: movdqu
+;SSE2-NEXT: movdqu
+;SSE2-NEXT: pmaddwd
+;SSE2-NEXT: paddd
+
+;AVX2-label: @_Z10test_shortPsS_i
+;AVX2: vmovdqu
+;AVX2-NEXT: vpmaddwd
+;AVX2-NEXT: vinserti128
+;AVX2-NEXT: vpaddd
+
+;AVX512-label: @_Z10test_shortPsS_i
+;AVX512: vmovdqu
+;AVX512-NEXT: vpmaddwd
+;AVX512-NEXT: vinserti128
+;AVX512-NEXT: vpaddd
+
+define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+entry:
+ %3 = zext i32 %2 to i64
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
+ %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
+ %4 = getelementptr inbounds i16, i16* %0, i64 %index
+ %5 = bitcast i16* %4 to <8 x i16>*
+ %wide.load = load <8 x i16>, <8 x i16>* %5, align 2
+ %6 = sext <8 x i16> %wide.load to <8 x i32>
+ %7 = getelementptr inbounds i16, i16* %1, i64 %index
+ %8 = bitcast i16* %7 to <8 x i16>*
+ %wide.load14 = load <8 x i16>, <8 x i16>* %8, align 2
+ %9 = sext <8 x i16> %wide.load14 to <8 x i32>
+ %10 = mul nsw <8 x i32> %9, %6
+ %11 = add nsw <8 x i32> %10, %vec.phi
+ %index.next = add i64 %index, 8
+ %12 = icmp eq i64 %index.next, %3
+ br i1 %12, label %middle.block, label %vector.body
+
+middle.block:
+ %rdx.shuf = shufflevector <8 x i32> %11, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i32> %11, %rdx.shuf
+ %rdx.shuf15 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx16 = add <8 x i32> %bin.rdx, %rdx.shuf15
+ %rdx.shuf17 = shufflevector <8 x i32> %bin.rdx16, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx18 = add <8 x i32> %bin.rdx16, %rdx.shuf17
+ %13 = extractelement <8 x i32> %bin.rdx18, i32 0
+ ret i32 %13
+}
+
+;AVX2-label: @_Z9test_charPcS_i
+;AVX2: vpmovsxbw
+;AVX2-NEXT: vpmovsxbw
+;AVX2-NEXT: vpmaddwd
+;AVX2-NEXT: vpaddd
+
+;AVX512-label: @_Z9test_charPcS_i
+;AVX512: vpmovsxbw
+;AVX512-NEXT: vpmovsxbw
+;AVX512-NEXT: vpmaddwd
+;AVX512-NEXT: vinserti64x4
+;AVX512-NEXT: vpaddd
+
+define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
+entry:
+ %3 = zext i32 %2 to i64
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
+ %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
+ %4 = getelementptr inbounds i8, i8* %0, i64 %index
+ %5 = bitcast i8* %4 to <16 x i8>*
+ %wide.load = load <16 x i8>, <16 x i8>* %5, align 1
+ %6 = sext <16 x i8> %wide.load to <16 x i32>
+ %7 = getelementptr inbounds i8, i8* %1, i64 %index
+ %8 = bitcast i8* %7 to <16 x i8>*
+ %wide.load14 = load <16 x i8>, <16 x i8>* %8, align 1
+ %9 = sext <16 x i8> %wide.load14 to <16 x i32>
+ %10 = mul nsw <16 x i32> %9, %6
+ %11 = add nsw <16 x i32> %10, %vec.phi
+ %index.next = add i64 %index, 16
+ %12 = icmp eq i64 %index.next, %3
+ br i1 %12, label %middle.block, label %vector.body
+
+middle.block:
+ %rdx.shuf = shufflevector <16 x i32> %11, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <16 x i32> %11, %rdx.shuf
+ %rdx.shuf15 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx16 = add <16 x i32> %bin.rdx, %rdx.shuf15
+ %rdx.shuf17 = shufflevector <16 x i32> %bin.rdx16, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx18 = add <16 x i32> %bin.rdx16, %rdx.shuf17
+ %rdx.shuf19 = shufflevector <16 x i32> %bin.rdx18, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx20 = add <16 x i32> %bin.rdx18, %rdx.shuf19
+ %13 = extractelement <16 x i32> %bin.rdx20, i32 0
+ ret i32 %13
+}
diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll
index 6ef291a8a7a5..1a15cab97e2e 100644
--- a/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/test/CodeGen/X86/masked_gather_scatter.ll
@@ -233,6 +233,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; KNL_64-NEXT: kmovw %k1, %k2
; KNL_64-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k2}
; KNL_64-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k1}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test5:
@@ -242,6 +243,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; KNL_32-NEXT: kmovw %k1, %k2
; KNL_32-NEXT: vpscatterdd %zmm1, (%eax,%zmm0,4) {%k2}
; KNL_32-NEXT: vpscatterdd %zmm1, (%eax,%zmm0,4) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test5:
@@ -250,6 +252,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; SKX-NEXT: kmovw %k1, %k2
; SKX-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k2}
; SKX-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test5:
@@ -259,6 +262,7 @@ define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
; SKX_32-NEXT: kmovw %k1, %k2
; SKX_32-NEXT: vpscatterdd %zmm1, (%eax,%zmm0,4) {%k2}
; SKX_32-NEXT: vpscatterdd %zmm1, (%eax,%zmm0,4) {%k1}
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
%broadcast.splatinsert = insertelement <16 x i32*> undef, i32* %base, i32 0
@@ -356,7 +360,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
;
; SKX-LABEL: test7:
; SKX: # BB#0:
-; SKX-NEXT: kmovb %esi, %k1
+; SKX-NEXT: kmovw %esi, %k1
; SKX-NEXT: kmovw %k1, %k2
; SKX-NEXT: vpgatherdd (%rdi,%ymm0,4), %ymm1 {%k2}
; SKX-NEXT: vmovdqa %ymm1, %ymm2
@@ -675,6 +679,7 @@ define <16 x float> @test12(float* %base, <16 x i32> %ind) {
define <16 x float> @test13(float* %base, <16 x i32> %ind) {
; KNL_64-LABEL: test13:
; KNL_64: # BB#0:
+; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; KNL_64-NEXT: vmovaps %zmm1, %zmm0
; KNL_64-NEXT: retq
@@ -682,12 +687,14 @@ define <16 x float> @test13(float* %base, <16 x i32> %ind) {
; KNL_32-LABEL: test13:
; KNL_32: # BB#0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
; KNL_32-NEXT: vmovaps %zmm1, %zmm0
; KNL_32-NEXT: retl
;
; SKX-LABEL: test13:
; SKX: # BB#0:
+; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
; SKX-NEXT: vmovaps %zmm1, %zmm0
; SKX-NEXT: retq
@@ -695,6 +702,7 @@ define <16 x float> @test13(float* %base, <16 x i32> %ind) {
; SKX_32-LABEL: test13:
; SKX_32: # BB#0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm0,4), %zmm1 {%k1}
; SKX_32-NEXT: vmovaps %zmm1, %zmm0
; SKX_32-NEXT: retl
@@ -702,7 +710,7 @@ define <16 x float> @test13(float* %base, <16 x i32> %ind) {
%sext_ind = sext <16 x i32> %ind to <16 x i64>
%gep.random = getelementptr float, float *%base, <16 x i64> %sext_ind
- %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> undef, <16 x float> undef)
+ %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
ret <16 x float>%res
}
@@ -710,52 +718,52 @@ define <16 x float> @test13(float* %base, <16 x i32> %ind) {
define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; KNL_64-LABEL: test14:
; KNL_64: # BB#0:
-; KNL_64-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm1
-; KNL_64-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; KNL_64-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
; KNL_64-NEXT: vpbroadcastq %xmm0, %zmm0
; KNL_64-NEXT: vmovd %esi, %xmm1
; KNL_64-NEXT: vpbroadcastd %xmm1, %ymm1
; KNL_64-NEXT: vpmovsxdq %ymm1, %zmm1
; KNL_64-NEXT: vpsllq $2, %zmm1, %zmm1
; KNL_64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; KNL_64-NEXT: kshiftrw $8, %k0, %k1
-; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1}
+; KNL_64-NEXT: kxnorw %k0, %k0, %k1
+; KNL_64-NEXT: kshiftrw $8, %k1, %k2
+; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k2}
; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm2 {%k1}
; KNL_64-NEXT: vinsertf64x4 $1, %ymm1, %zmm2, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test14:
; KNL_32: # BB#0:
-; KNL_32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm1
-; KNL_32-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; KNL_32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; KNL_32-NEXT: vpbroadcastd %xmm0, %zmm0
; KNL_32-NEXT: vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1
; KNL_32-NEXT: vpaddd %zmm1, %zmm0, %zmm1
+; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (,%zmm1), %zmm0 {%k1}
; KNL_32-NEXT: retl
;
; SKX-LABEL: test14:
; SKX: # BB#0:
-; SKX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm1
-; SKX-NEXT: vinserti64x2 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
; SKX-NEXT: vpbroadcastq %xmm0, %zmm0
; SKX-NEXT: vpbroadcastd %esi, %ymm1
; SKX-NEXT: vpmovsxdq %ymm1, %zmm1
; SKX-NEXT: vpsllq $2, %zmm1, %zmm1
; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; SKX-NEXT: kshiftrw $8, %k0, %k1
-; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1}
+; SKX-NEXT: kxnorw %k0, %k0, %k1
+; SKX-NEXT: kshiftrw $8, %k1, %k2
+; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k2}
; SKX-NEXT: vgatherqps (,%zmm0), %ymm2 {%k1}
; SKX-NEXT: vinsertf32x8 $1, %ymm1, %zmm2, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test14:
; SKX_32: # BB#0:
-; SKX_32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm1
-; SKX_32-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX_32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; SKX_32-NEXT: vpbroadcastd %xmm0, %zmm0
; SKX_32-NEXT: vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1
; SKX_32-NEXT: vpaddd %zmm1, %zmm0, %zmm1
+; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (,%zmm1), %zmm0 {%k1}
; SKX_32-NEXT: retl
@@ -764,7 +772,7 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
%gep.random = getelementptr float, <16 x float*> %broadcast.splat, i32 %ind
- %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> undef, <16 x float> undef)
+ %res = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %gep.random, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
ret <16 x float>%res
}
@@ -786,6 +794,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_64-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm2,4), %ymm0 {%k1}
; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test15:
@@ -800,6 +809,7 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_32-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm2,4), %ymm0 {%k1}
; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test15:
@@ -896,6 +906,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_64-NEXT: vgatherqpd (%rdi,%zmm0,8), %zmm2 {%k1}
; KNL_64-NEXT: vmovapd %xmm2, %xmm0
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test17:
@@ -909,6 +920,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_32-NEXT: vgatherqpd (%eax,%zmm0,8), %zmm2 {%k1}
; KNL_32-NEXT: vmovapd %xmm2, %xmm0
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test17:
@@ -952,6 +964,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
; KNL_64-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k1}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test18:
@@ -965,6 +978,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_32-NEXT: vpslld $31, %ymm2, %ymm2
; KNL_32-NEXT: vptestmd %zmm2, %zmm2, %k1
; KNL_32-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test18:
@@ -972,6 +986,7 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; SKX-NEXT: vpslld $31, %xmm2, %xmm2
; SKX-NEXT: vptestmd %xmm2, %xmm2, %k1
; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test18:
@@ -998,6 +1013,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; KNL_64-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_64-NEXT: vscatterqpd %zmm0, (%rdi,%zmm2,8) {%k1}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test19:
@@ -1013,6 +1029,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; KNL_32-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_32-NEXT: vscatterqpd %zmm0, (%eax,%zmm2,8) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test19:
@@ -1020,6 +1037,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vscatterqpd %ymm0, (%rdi,%ymm2,8) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test19:
@@ -1028,6 +1046,7 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
; SKX_32-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: vscatterqpd %ymm0, (%eax,%ymm2,8) {%k1}
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
%gep = getelementptr double, double* %ptr, <4 x i64> %ind
call void @llvm.masked.scatter.v4f64(<4 x double> %a1, <4 x double*> %gep, i32 8, <4 x i1> %mask)
@@ -1047,6 +1066,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
; KNL_64-NEXT: vscatterqps %ymm0, (,%zmm1) {%k1}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test20:
@@ -1060,6 +1080,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_32-NEXT: vpslld $31, %ymm2, %ymm2
; KNL_32-NEXT: vptestmd %zmm2, %zmm2, %k1
; KNL_32-NEXT: vscatterqps %ymm0, (,%zmm1) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test20:
@@ -1070,6 +1091,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; SKX-NEXT: kshiftlb $6, %k0, %k0
; SKX-NEXT: kshiftrb $6, %k0, %k1
; SKX-NEXT: vscatterqps %xmm0, (,%ymm1) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test20:
@@ -1097,6 +1119,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_64-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL_64-NEXT: vptestmq %zmm2, %zmm2, %k1
; KNL_64-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k1}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test21:
@@ -1108,6 +1131,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_32-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL_32-NEXT: vptestmq %zmm2, %zmm2, %k1
; KNL_32-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test21:
@@ -1119,6 +1143,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; SKX-NEXT: kshiftrb $6, %k0, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test21:
@@ -1130,6 +1155,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; SKX_32-NEXT: kshiftrb $6, %k0, %k1
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX_32-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v2i32(<2 x i32> %a1, <2 x i32*> %ptr, i32 4, <2 x i1> %mask)
ret void
@@ -1153,6 +1179,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
; KNL_64-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm0,4), %ymm2 {%k1}
; KNL_64-NEXT: vmovaps %xmm2, %xmm0
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test22:
@@ -1168,6 +1195,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm0,4), %ymm2 {%k1}
; KNL_32-NEXT: vmovaps %xmm2, %xmm0
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test22:
@@ -1213,6 +1241,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_64-NEXT: vpgatherqq (%rdi,%zmm0,8), %zmm2 {%k1}
; KNL_64-NEXT: vmovdqa %xmm2, %xmm0
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test23:
@@ -1226,6 +1255,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_32-NEXT: vpgatherqq (%eax,%zmm0,8), %zmm2 {%k1}
; KNL_32-NEXT: vmovdqa %xmm2, %xmm0
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test23:
@@ -1258,6 +1288,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vpgatherqq (%rdi,%zmm0,8), %zmm1 {%k1}
; KNL_64-NEXT: vmovdqa %xmm1, %xmm0
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test24:
@@ -1270,6 +1301,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_32-NEXT: vpgatherqq (%eax,%zmm0,8), %zmm1 {%k1}
; KNL_32-NEXT: vmovdqa %xmm1, %xmm0
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test24:
@@ -1304,6 +1336,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_64-NEXT: vpgatherqq (%rdi,%zmm0,8), %zmm2 {%k1}
; KNL_64-NEXT: vmovdqa %xmm2, %xmm0
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test25:
@@ -1317,6 +1350,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL_32-NEXT: vpgatherqq (%eax,%zmm0,8), %zmm2 {%k1}
; KNL_32-NEXT: vmovdqa %xmm2, %xmm0
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test25:
@@ -1351,6 +1385,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vpgatherqq (%rdi,%zmm0,8), %zmm1 {%k1}
; KNL_64-NEXT: vmovdqa %xmm1, %xmm0
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test26:
@@ -1364,6 +1399,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_32-NEXT: vptestmq %zmm2, %zmm2, %k1
; KNL_32-NEXT: vpgatherqq (%eax,%zmm0,8), %zmm1 {%k1}
; KNL_32-NEXT: vmovdqa %xmm1, %xmm0
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test26:
@@ -1397,6 +1433,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm1,4), %ymm0 {%k1}
; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test27:
@@ -1408,13 +1445,14 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_32-NEXT: kmovw %ecx, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm1,4), %ymm0 {%k1}
; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test27:
; SKX: # BB#0:
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SKX-NEXT: movb $3, %al
-; SKX-NEXT: kmovb %eax, %k1
+; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
; SKX-NEXT: retq
;
@@ -1423,7 +1461,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; SKX_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; SKX_32-NEXT: movb $3, %cl
-; SKX_32-NEXT: kmovb %ecx, %k1
+; SKX_32-NEXT: kmovw %ecx, %k1
; SKX_32-NEXT: vgatherdps (%eax,%xmm1,4), %xmm0 {%k1}
; SKX_32-NEXT: retl
%sext_ind = sext <2 x i32> %ind to <2 x i64>
@@ -1443,6 +1481,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k1}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test28:
@@ -1454,24 +1493,27 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_32-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL_32-NEXT: vptestmq %zmm2, %zmm2, %k1
; KNL_32-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test28:
; SKX: # BB#0:
; SKX-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
-; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX-NEXT: movb $3, %al
-; SKX-NEXT: kmovb %eax, %k1
+; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test28:
; SKX_32: # BB#0:
; SKX_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
-; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX_32-NEXT: movb $3, %al
-; SKX_32-NEXT: kmovb %eax, %k1
+; SKX_32-NEXT: kmovw %eax, %k1
+; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SKX_32-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v2i32(<2 x i32> %a1, <2 x i32*> %ptr, i32 4, <2 x i1> <i1 true, i1 true>)
ret void
@@ -1649,12 +1691,12 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_32-LABEL: test_gather_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi0:
+; KNL_32-NEXT: .Lcfi4:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi1:
+; KNL_32-NEXT: .Lcfi5:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi2:
+; KNL_32-NEXT: .Lcfi6:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1772,12 +1814,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_32-LABEL: test_gather_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi3:
+; KNL_32-NEXT: .Lcfi7:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi4:
+; KNL_32-NEXT: .Lcfi8:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi5:
+; KNL_32-NEXT: .Lcfi9:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1844,6 +1886,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; KNL_64-NEXT: vpscatterqd %ymm3, (,%zmm0) {%k1}
; KNL_64-NEXT: vextracti64x4 $1, %zmm3, %ymm0
; KNL_64-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k2}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16i32:
@@ -1852,6 +1895,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; KNL_32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL_32-NEXT: vpscatterdd %zmm2, (,%zmm0) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16i32:
@@ -1863,6 +1907,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; SKX-NEXT: vpscatterqd %ymm3, (,%zmm0) {%k1}
; SKX-NEXT: vextracti32x8 $1, %zmm3, %ymm0
; SKX-NEXT: vpscatterqd %ymm0, (,%zmm1) {%k2}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16i32:
@@ -1871,6 +1916,7 @@ define void @test_scatter_16i32(<16 x i32*> %ptrs, <16 x i1> %mask, <16 x i32> %
; SKX_32-NEXT: vpslld $31, %zmm1, %zmm1
; SKX_32-NEXT: vptestmd %zmm1, %zmm1, %k1
; SKX_32-NEXT: vpscatterdd %zmm2, (,%zmm0) {%k1}
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v16i32(<16 x i32> %src0, <16 x i32*> %ptrs, i32 4, <16 x i1> %mask)
ret void
@@ -1884,17 +1930,18 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_64-NEXT: kshiftrw $8, %k1, %k2
; KNL_64-NEXT: vpscatterqq %zmm3, (,%zmm0) {%k1}
; KNL_64-NEXT: vpscatterqq %zmm4, (,%zmm1) {%k2}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi6:
+; KNL_32-NEXT: .Lcfi10:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi7:
+; KNL_32-NEXT: .Lcfi11:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi8:
+; KNL_32-NEXT: .Lcfi12:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1908,6 +1955,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2}
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16i64:
@@ -1918,6 +1966,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; SKX-NEXT: kshiftrw $8, %k1, %k2
; SKX-NEXT: vpscatterqq %zmm3, (,%zmm0) {%k1}
; SKX-NEXT: vpscatterqq %zmm4, (,%zmm1) {%k2}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16i64:
@@ -1942,6 +1991,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; SKX_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2}
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v16i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32 4, <16 x i1> %mask)
ret void
@@ -1957,6 +2007,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; KNL_64-NEXT: vscatterqps %ymm3, (,%zmm0) {%k1}
; KNL_64-NEXT: vextractf64x4 $1, %zmm3, %ymm0
; KNL_64-NEXT: vscatterqps %ymm0, (,%zmm1) {%k2}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16f32:
@@ -1965,6 +2016,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; KNL_32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_32-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL_32-NEXT: vscatterdps %zmm2, (,%zmm0) {%k1}
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16f32:
@@ -1976,6 +2028,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; SKX-NEXT: vscatterqps %ymm3, (,%zmm0) {%k1}
; SKX-NEXT: vextractf32x8 $1, %zmm3, %ymm0
; SKX-NEXT: vscatterqps %ymm0, (,%zmm1) {%k2}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16f32:
@@ -1984,6 +2037,7 @@ define void @test_scatter_16f32(<16 x float*> %ptrs, <16 x i1> %mask, <16 x floa
; SKX_32-NEXT: vpslld $31, %zmm1, %zmm1
; SKX_32-NEXT: vptestmd %zmm1, %zmm1, %k1
; SKX_32-NEXT: vscatterdps %zmm2, (,%zmm0) {%k1}
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v16f32(<16 x float> %src0, <16 x float*> %ptrs, i32 4, <16 x i1> %mask)
ret void
@@ -1998,17 +2052,18 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_64-NEXT: kshiftrw $8, %k1, %k2
; KNL_64-NEXT: vscatterqpd %zmm3, (,%zmm0) {%k1}
; KNL_64-NEXT: vscatterqpd %zmm4, (,%zmm1) {%k2}
+; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test_scatter_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi9:
+; KNL_32-NEXT: .Lcfi13:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi10:
+; KNL_32-NEXT: .Lcfi14:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi11:
+; KNL_32-NEXT: .Lcfi15:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2022,6 +2077,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2}
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_scatter_16f64:
@@ -2032,6 +2088,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; SKX-NEXT: kshiftrw $8, %k1, %k2
; SKX-NEXT: vscatterqpd %zmm3, (,%zmm0) {%k1}
; SKX-NEXT: vscatterqpd %zmm4, (,%zmm1) {%k2}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; SKX_32-LABEL: test_scatter_16f64:
@@ -2056,6 +2113,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; SKX_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2}
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v16f64(<16 x double> %src0, <16 x double*> %ptrs, i32 4, <16 x i1> %mask)
ret void
@@ -2078,6 +2136,34 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_64-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; KNL_64-NEXT: retq
;
+; KNL_32-LABEL: test_pr28312:
+; KNL_32: # BB#0:
+; KNL_32-NEXT: pushl %ebp
+; KNL_32-NEXT: .Lcfi16:
+; KNL_32-NEXT: .cfi_def_cfa_offset 8
+; KNL_32-NEXT: .Lcfi17:
+; KNL_32-NEXT: .cfi_offset %ebp, -8
+; KNL_32-NEXT: movl %esp, %ebp
+; KNL_32-NEXT: .Lcfi18:
+; KNL_32-NEXT: .cfi_def_cfa_register %ebp
+; KNL_32-NEXT: andl $-32, %esp
+; KNL_32-NEXT: subl $32, %esp
+; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
+; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
+; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
+; KNL_32-NEXT: vpxord %zmm2, %zmm2, %zmm2
+; KNL_32-NEXT: vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm0
+; KNL_32-NEXT: vpsllq $63, %zmm1, %zmm1
+; KNL_32-NEXT: vptestmq %zmm1, %zmm1, %k1
+; KNL_32-NEXT: vpgatherqq (,%zmm0), %zmm1 {%k1}
+; KNL_32-NEXT: vpaddq %ymm1, %ymm1, %ymm0
+; KNL_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; KNL_32-NEXT: movl %ebp, %esp
+; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: retl
+;
; SKX-LABEL: test_pr28312:
; SKX: # BB#0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
@@ -2086,6 +2172,27 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; SKX-NEXT: vpaddq %ymm1, %ymm1, %ymm0
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; SKX-NEXT: retq
+;
+; SKX_32-LABEL: test_pr28312:
+; SKX_32: # BB#0:
+; SKX_32-NEXT: pushl %ebp
+; SKX_32-NEXT: .Lcfi13:
+; SKX_32-NEXT: .cfi_def_cfa_offset 8
+; SKX_32-NEXT: .Lcfi14:
+; SKX_32-NEXT: .cfi_offset %ebp, -8
+; SKX_32-NEXT: movl %esp, %ebp
+; SKX_32-NEXT: .Lcfi15:
+; SKX_32-NEXT: .cfi_def_cfa_register %ebp
+; SKX_32-NEXT: andl $-32, %esp
+; SKX_32-NEXT: subl $32, %esp
+; SKX_32-NEXT: vpslld $31, %xmm1, %xmm1
+; SKX_32-NEXT: vptestmd %xmm1, %xmm1, %k1
+; SKX_32-NEXT: vpgatherdq (,%xmm0), %ymm1 {%k1}
+; SKX_32-NEXT: vpaddq %ymm1, %ymm1, %ymm0
+; SKX_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; SKX_32-NEXT: movl %ebp, %esp
+; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: retl
%g1 = call <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef)
%g2 = call <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef)
%g3 = call <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef)
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
index 4e65b169c7e6..3c616e8a9f43 100644
--- a/test/CodeGen/X86/masked_memop.ll
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -29,8 +29,7 @@ define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double>
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
-; SKX-NEXT: vmovupd (%rdi), %xmm1 {%k1}
-; SKX-NEXT: vmovapd %xmm1, %xmm0
+; SKX-NEXT: vblendmpd (%rdi), %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i64> %trigger, zeroinitializer
%res = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst)
@@ -58,8 +57,7 @@ define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %d
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
-; SKX-NEXT: vmovups (%rdi), %xmm1 {%k1}
-; SKX-NEXT: vmovaps %xmm1, %xmm0
+; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
%res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst)
@@ -95,8 +93,7 @@ define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
-; SKX-NEXT: vmovdqu32 (%rdi), %xmm1 {%k1}
-; SKX-NEXT: vmovdqa %xmm1, %xmm0
+; SKX-NEXT: vpblendmd (%rdi), %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
%res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst)
@@ -171,8 +168,7 @@ define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpeqd %xmm2, %xmm0, %k1
-; SKX-NEXT: vmovapd (%rdi), %ymm1 {%k1}
-; SKX-NEXT: vmovapd %ymm1, %ymm0
+; SKX-NEXT: vblendmpd (%rdi), %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <4 x i32> %trigger, zeroinitializer
%res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 32, <4 x i1>%mask, <4 x double>%dst)
@@ -246,16 +242,15 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float>
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
-; AVX512F-NEXT: vmovups (%rdi), %zmm1 {%k1}
-; AVX512F-NEXT: vmovaps %ymm1, %ymm0
+; AVX512F-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11a:
; SKX: ## BB#0:
; SKX-NEXT: vpxor %ymm2, %ymm2, %ymm2
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
-; SKX-NEXT: vmovaps (%rdi), %ymm1 {%k1}
-; SKX-NEXT: vmovaps %ymm1, %ymm0
+; SKX-NEXT: vblendmps (%rdi), %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <8 x i32> %trigger, zeroinitializer
%res = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %addr, i32 32, <8 x i1>%mask, <8 x float>%dst)
@@ -293,16 +288,15 @@ define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
-; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm1 {%k1}
-; AVX512F-NEXT: vmovdqa %ymm1, %ymm0
+; AVX512F-NEXT: vpblendmd (%rdi), %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11b:
; SKX: ## BB#0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
-; SKX-NEXT: vmovdqu32 (%rdi), %ymm1 {%k1}
-; SKX-NEXT: vmovdqa %ymm1, %ymm0
+; SKX-NEXT: vpblendmd (%rdi), %ymm1, %ymm0 {%k1}
; SKX-NEXT: retq
%res = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %addr, i32 4, <8 x i1>%mask, <8 x i32>%dst)
ret <8 x i32> %res
@@ -421,6 +415,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
; AVX512F-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; SKX-LABEL: test12:
@@ -428,6 +423,7 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
; SKX-NEXT: vpxor %ymm2, %ymm2, %ymm2
; SKX-NEXT: vpcmpeqd %ymm2, %ymm0, %k1
; SKX-NEXT: vmovdqu32 %ymm1, (%rdi) {%k1}
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%mask = icmp eq <8 x i32> %trigger, zeroinitializer
call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
@@ -557,8 +553,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
; SKX-NEXT: kshiftlw $14, %k0, %k0
; SKX-NEXT: kshiftrw $14, %k0, %k1
-; SKX-NEXT: vmovups (%rdi), %xmm1 {%k1}
-; SKX-NEXT: vmovaps %xmm1, %xmm0
+; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
%res = call <2 x float> @llvm.masked.load.v2f32.p0v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>%dst)
@@ -702,7 +697,7 @@ define <4 x float> @mload_constmask_v4f32(<4 x float>* %addr, <4 x float> %dst)
; SKX-LABEL: mload_constmask_v4f32:
; SKX: ## BB#0:
; SKX-NEXT: movb $13, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
%res = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %addr, i32 4, <4 x i1> <i1 1, i1 0, i1 1, i1 1>, <4 x float> %dst)
@@ -736,7 +731,7 @@ define <4 x i32> @mload_constmask_v4i32(<4 x i32>* %addr, <4 x i32> %dst) {
; SKX-LABEL: mload_constmask_v4i32:
; SKX: ## BB#0:
; SKX-NEXT: movb $14, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
%res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1> <i1 0, i1 1, i1 1, i1 1>, <4 x i32> %dst)
@@ -765,7 +760,7 @@ define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst)
; SKX-LABEL: mload_constmask_v8f32:
; SKX: ## BB#0:
; SKX-NEXT: movb $7, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovups (%rdi), %ymm0 {%k1}
; SKX-NEXT: retq
%res = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* %addr, i32 4, <8 x i1> <i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0>, <8 x float> %dst)
@@ -790,7 +785,7 @@ define <4 x double> @mload_constmask_v4f64(<4 x double>* %addr, <4 x double> %ds
; SKX-LABEL: mload_constmask_v4f64:
; SKX: ## BB#0:
; SKX-NEXT: movb $7, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovupd (%rdi), %ymm0 {%k1}
; SKX-NEXT: retq
%res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 4, <4 x i1> <i1 1, i1 1, i1 1, i1 0>, <4 x double> %dst)
@@ -822,7 +817,7 @@ define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) {
; SKX-LABEL: mload_constmask_v8i32:
; SKX: ## BB#0:
; SKX-NEXT: movb $-121, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1}
; SKX-NEXT: retq
%res = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %addr, i32 4, <8 x i1> <i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1>, <8 x i32> %dst)
@@ -850,7 +845,7 @@ define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) {
; SKX-LABEL: mload_constmask_v4i64:
; SKX: ## BB#0:
; SKX-NEXT: movb $9, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1}
; SKX-NEXT: retq
%res = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %addr, i32 4, <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x i64> %dst)
@@ -866,12 +861,19 @@ define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %ds
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1,2],ymm0[3]
; AVX-NEXT: retq
;
-; AVX512-LABEL: mload_constmask_v8f64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: movb $-121, %al
-; AVX512-NEXT: kmovw %eax, %k1
-; AVX512-NEXT: vmovupd (%rdi), %zmm0 {%k1}
-; AVX512-NEXT: retq
+; AVX512F-LABEL: mload_constmask_v8f64:
+; AVX512F: ## BB#0:
+; AVX512F-NEXT: movb $-121, %al
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vmovupd (%rdi), %zmm0 {%k1}
+; AVX512F-NEXT: retq
+;
+; SKX-LABEL: mload_constmask_v8f64:
+; SKX: ## BB#0:
+; SKX-NEXT: movb $-121, %al
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: vmovupd (%rdi), %zmm0 {%k1}
+; SKX-NEXT: retq
%res = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %addr, i32 4, <8 x i1> <i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1>, <8 x double> %dst)
ret <8 x double> %res
}
@@ -894,7 +896,7 @@ define <4 x double> @mload_constmask_v4f64_undef_passthrough(<4 x double>* %addr
; SKX-LABEL: mload_constmask_v4f64_undef_passthrough:
; SKX: ## BB#0:
; SKX-NEXT: movb $7, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovupd (%rdi), %ymm0 {%k1} {z}
; SKX-NEXT: retq
%res = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %addr, i32 4, <4 x i1> <i1 1, i1 1, i1 1, i1 0>, <4 x double> undef)
@@ -923,7 +925,7 @@ define <4 x i64> @mload_constmask_v4i64_undef_passthrough(<4 x i64>* %addr) {
; SKX-LABEL: mload_constmask_v4i64_undef_passthrough:
; SKX: ## BB#0:
; SKX-NEXT: movb $6, %al
-; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z}
; SKX-NEXT: retq
%res = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %addr, i32 4, <4 x i1> <i1 0, i1 1, i1 1, i1 0>, <4 x i64> undef)
@@ -1005,12 +1007,14 @@ define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
; AVX512F: ## BB#0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovlps %xmm0, 16(%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; SKX-LABEL: one_mask_bit_set3:
; SKX: ## BB#0:
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vmovq %xmm0, 16(%rdi)
+; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %val, <4 x i64>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
ret void
@@ -1030,6 +1034,7 @@ define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
; AVX512: ## BB#0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovhpd %xmm0, 24(%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 false, i1 true>)
ret void
@@ -1049,6 +1054,7 @@ define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
; AVX512: ## BB#0:
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovlps %xmm0, 48(%rdi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %addr, i32 4, <8 x i1><i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false>)
ret void
diff --git a/test/CodeGen/X86/mature-mc-support.ll b/test/CodeGen/X86/mature-mc-support.ll
index 9d956f46beca..3d6f0f66c187 100644
--- a/test/CodeGen/X86/mature-mc-support.ll
+++ b/test/CodeGen/X86/mature-mc-support.ll
@@ -15,4 +15,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index 6a51d60f636c..ce1bb3b06ce5 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -1,130 +1,368 @@
-; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
-; RUN: llc < %s -disable-simplify-libcalls -mtriple=x86_64-linux | FileCheck %s --check-prefix=NOBUILTIN
-; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=AVX2
; This tests codegen time inlining/optimization of memcmp
; rdar://6480398
-@.str = private constant [23 x i8] c"fooooooooooooooooooooo\00", align 1 ; <[23 x i8]*> [#uses=1]
+@.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1
-declare i32 @memcmp(...)
+declare i32 @memcmp(i8*, i8*, i64)
-define void @memcmp2(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* %Y, i32 2) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
+define i1 @length2(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
+; X32-LABEL: length2:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzwl (%ecx), %ecx
+; X32-NEXT: cmpw (%eax), %cx
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length2:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
+define i1 @length2_const(i8* %X, i32* nocapture %P) nounwind {
+; X32-LABEL: length2_const:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movzwl (%eax), %eax
+; X32-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X32-NEXT: setne %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length2_const:
+; X64: # BB#0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp2:
-; CHECK: movzwl
-; CHECK-NEXT: cmpw
-; NOBUILTIN-LABEL: memcmp2:
-; NOBUILTIN: callq
+define i1 @length2_nobuiltin_attr(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
+; X32-LABEL: length2_nobuiltin_attr:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $2
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length2_nobuiltin_attr:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $2, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
}
-define void @memcmp2a(i8* %X, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 1), i32 2) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp2a:
-; CHECK: movzwl
-; CHECK-NEXT: cmpl $28527,
+define i1 @length4(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
+; X32-LABEL: length4:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl (%ecx), %ecx
+; X32-NEXT: cmpl (%eax), %ecx
+; X32-NEXT: setne %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length4:
+; X64: # BB#0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
}
-define void @memcmp2nb(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* %Y, i32 2) nounwind nobuiltin ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
+define i1 @length4_const(i8* %X, i32* nocapture %P) nounwind {
+; X32-LABEL: length4_const:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length4_const:
+; X64: # BB#0:
+; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
+define i1 @length8(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
+; X32-LABEL: length8:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $8
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length8:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp2nb:
-; CHECK: callq
+define i1 @length8_const(i8* %X, i32* nocapture %P) nounwind {
+; X32-LABEL: length8_const:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $8
+; X32-NEXT: pushl $.L.str
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: setne %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length8_const:
+; X64: # BB#0:
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
}
-define void @memcmp4(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* %Y, i32 4) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp4:
-; CHECK: movl
-; CHECK-NEXT: cmpl
+define i1 @length16(i8* %x, i8* %y) nounwind {
+; X32-LABEL: length16:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $16
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: setne %al
+; X32-NEXT: retl
+;
+; SSE2-LABEL: length16:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqu (%rsi), %xmm0
+; SSE2-NEXT: movdqu (%rdi), %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: length16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
}
-define void @memcmp4a(i8* %X, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 1), i32 4) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
+define i1 @length16_const(i8* %X, i32* nocapture %P) nounwind {
+; X32-LABEL: length16_const:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $16
+; X32-NEXT: pushl $.L.str
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; SSE2-LABEL: length16_const:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqu (%rdi), %xmm0
+; SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: length16_const:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
+define i1 @length32(i8* %x, i8* %y) nounwind {
+; X32-LABEL: length32:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $32
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; SSE2-LABEL: length32:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movl $32, %edx
+; SSE2-NEXT: callq memcmp
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: popq %rcx
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: length32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp4a:
-; CHECK: cmpl $1869573999,
+define i1 @length32_const(i8* %X, i32* nocapture %P) nounwind {
+; X32-LABEL: length32_const:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $32
+; X32-NEXT: pushl $.L.str
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: setne %al
+; X32-NEXT: retl
+;
+; SSE2-LABEL: length32_const:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movl $.L.str, %esi
+; SSE2-NEXT: movl $32, %edx
+; SSE2-NEXT: callq memcmp
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: popq %rcx
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: length32_const:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
}
-define void @memcmp8(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* %Y, i32 8) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp8:
-; CHECK: movq
-; CHECK: cmpq
+define i1 @length64(i8* %x, i8* %y) nounwind {
+; X32-LABEL: length64:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $64
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: setne %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length64:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
}
-define void @memcmp8a(i8* %X, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...) @memcmp(i8* %X, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0), i32 8) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK-LABEL: memcmp8a:
-; CHECK: movabsq $8029759185026510694,
-; CHECK: cmpq
+define i1 @length64_const(i8* %X, i32* nocapture %P) nounwind {
+; X32-LABEL: length64_const:
+; X32: # BB#0:
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $64
+; X32-NEXT: pushl $.L.str
+; X32-NEXT: pushl {{[0-9]+}}(%esp)
+; X32-NEXT: calll memcmp
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: testl %eax, %eax
+; X32-NEXT: sete %al
+; X32-NEXT: retl
+;
+; X64-LABEL: length64_const:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $.L.str, %esi
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
}
diff --git a/test/CodeGen/X86/mempcpy-32.ll b/test/CodeGen/X86/mempcpy-32.ll
new file mode 100644
index 000000000000..108442f6b648
--- /dev/null
+++ b/test/CodeGen/X86/mempcpy-32.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=i686-unknown-linux -O2 | FileCheck %s
+
+; This tests the i686 lowering of mempcpy.
+; Also see mempcpy.ll
+
+@G = common global i8* null, align 8
+
+; CHECK-LABEL: RET_MEMPCPY:
+; CHECK: movl [[REG:%e[a-z0-9]+]], {{.*}}G
+; CHECK: calll {{.*}}memcpy
+; CHECK: movl [[REG]], %eax
+;
+define i8* @RET_MEMPCPY(i8* %DST, i8* %SRC, i32 %N) {
+ %add.ptr = getelementptr inbounds i8, i8* %DST, i32 %N
+ store i8* %add.ptr, i8** @G, align 8
+ %call = tail call i8* @mempcpy(i8* %DST, i8* %SRC, i32 %N)
+ ret i8* %call
+}
+
+declare i8* @mempcpy(i8*, i8*, i32)
diff --git a/test/CodeGen/X86/mempcpy.ll b/test/CodeGen/X86/mempcpy.ll
index 1c737b644021..f8db255c1a4b 100644
--- a/test/CodeGen/X86/mempcpy.ll
+++ b/test/CodeGen/X86/mempcpy.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mtriple=x86_64-unknown-linux -O2 | FileCheck %s
-; RUN: llc < %s -mtriple=i686-unknown-linux -O2 | FileCheck %s
; This test checks that:
; (1) mempcpy is lowered as memcpy, and
@@ -11,12 +10,15 @@
; the first instance to be reused as the return value. This allows the check for
; (2) to be expressed as verifying that the MOV to store DST+N to G and
; the MOV to copy DST+N to %rax use the same source register.
+
+; Also see mempcpy-32.ll
+
@G = common global i8* null, align 8
; CHECK-LABEL: RET_MEMPCPY:
-; CHECK: mov{{.*}} [[REG:%[er][a-z0-9]+]], {{.*}}G
-; CHECK: call{{.*}} {{.*}}memcpy
-; CHECK: mov{{.*}} [[REG]], %{{[er]}}ax
+; CHECK: movq [[REG:%r[a-z0-9]+]], {{.*}}G
+; CHECK: callq {{.*}}memcpy
+; CHECK: movq [[REG]], %rax
;
define i8* @RET_MEMPCPY(i8* %DST, i8* %SRC, i64 %N) {
%add.ptr = getelementptr inbounds i8, i8* %DST, i64 %N
diff --git a/test/CodeGen/X86/merge-consecutive-loads-128.ll b/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 003e2e60521b..71417694b0d4 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -1037,12 +1037,12 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin
define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_2345_volatile:
; SSE2: # BB#0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
@@ -1065,12 +1065,12 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
; X32-SSE1-LABEL: merge_4f32_f32_2345_volatile:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-SSE1-NEXT: retl
;
@@ -1132,3 +1132,41 @@ define <4 x float> @merge_4f32_f32_X0YY(float* %ptr0, float* %ptr1) nounwind uwt
%res3 = insertelement <4 x float> %res2, float %val1, i32 3
ret <4 x float> %res3
}
+
+;
+; Extension tests.
+;
+
+; PR31309
+define <4 x i32> @load_i32_zext_i128_v4i32(i32* %ptr) {
+; SSE-LABEL: load_i32_zext_i128_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i32_zext_i128_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: retq
+;
+; X32-SSE1-LABEL: load_i32_zext_i128_v4i32:
+; X32-SSE1: # BB#0:
+; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-SSE1-NEXT: movl (%ecx), %ecx
+; X32-SSE1-NEXT: movl %ecx, (%eax)
+; X32-SSE1-NEXT: movl $0, 12(%eax)
+; X32-SSE1-NEXT: movl $0, 8(%eax)
+; X32-SSE1-NEXT: movl $0, 4(%eax)
+; X32-SSE1-NEXT: retl $4
+;
+; X32-SSE41-LABEL: load_i32_zext_i128_v4i32:
+; X32-SSE41: # BB#0:
+; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-SSE41-NEXT: retl
+ %1 = load i32, i32* %ptr
+ %2 = zext i32 %1 to i128
+ %3 = bitcast i128 %2 to <4 x i32>
+ ret <4 x i32> %3
+}
diff --git a/test/CodeGen/X86/merge-consecutive-loads-256.ll b/test/CodeGen/X86/merge-consecutive-loads-256.ll
index c18a98e05d1d..b00d732889e3 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-256.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-256.ll
@@ -625,7 +625,7 @@ define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable
; AVX1: # BB#0:
; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -641,7 +641,7 @@ define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
@@ -650,7 +650,7 @@ define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; X32-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 3
@@ -668,10 +668,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX1: # BB#0:
; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
-; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -679,10 +679,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
-; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
@@ -690,10 +690,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX512F: # BB#0:
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
-; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX512F-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0
+; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512F-NEXT: retq
;
@@ -702,10 +702,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $0, (%eax), %xmm0, %xmm1
-; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1
; X32-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0
+; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
diff --git a/test/CodeGen/X86/merge-consecutive-loads-512.ll b/test/CodeGen/X86/merge-consecutive-loads-512.ll
index 8259a780c8fb..c3500f0ad399 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-512.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-512.ll
@@ -136,21 +136,29 @@ define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noin
}
define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noinline ssp {
-; ALL-LABEL: merge_8f64_f64_1u3u5zu8:
-; ALL: # BB#0:
-; ALL-NEXT: vmovupd 8(%rdi), %zmm1
-; ALL-NEXT: vpxord %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vmovapd {{.*#+}} zmm0 = <0,u,2,u,4,13,u,7>
-; ALL-NEXT: vpermi2pd %zmm2, %zmm1, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: movb $32, %al
+; AVX512F-NEXT: kmovw %eax, %k0
+; AVX512F-NEXT: knotw %k0, %k1
+; AVX512F-NEXT: vmovupd 8(%rdi), %zmm0 {%k1} {z}
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: merge_8f64_f64_1u3u5zu8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: movb $32, %al
+; AVX512BW-NEXT: kmovd %eax, %k0
+; AVX512BW-NEXT: knotw %k0, %k1
+; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
; X32-AVX512F: # BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vmovupd 8(%eax), %zmm1
-; X32-AVX512F-NEXT: vpxord %zmm2, %zmm2, %zmm2
-; X32-AVX512F-NEXT: vmovapd {{.*#+}} zmm0 = <0,0,u,u,2,0,u,u,4,0,13,0,u,u,7,0>
-; X32-AVX512F-NEXT: vpermi2pd %zmm2, %zmm1, %zmm0
+; X32-AVX512F-NEXT: movb $32, %cl
+; X32-AVX512F-NEXT: kmovw %ecx, %k0
+; X32-AVX512F-NEXT: knotw %k0, %k1
+; X32-AVX512F-NEXT: vmovupd 8(%eax), %zmm0 {%k1} {z}
; X32-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 1
%ptr2 = getelementptr inbounds double, double* %ptr, i64 3
@@ -223,21 +231,29 @@ define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline s
}
define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline ssp {
-; ALL-LABEL: merge_8i64_i64_1u3u5zu8:
-; ALL: # BB#0:
-; ALL-NEXT: vmovdqu64 8(%rdi), %zmm1
-; ALL-NEXT: vpxord %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,u,2,u,4,13,u,7>
-; ALL-NEXT: vpermi2q %zmm2, %zmm1, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: movb $32, %al
+; AVX512F-NEXT: kmovw %eax, %k0
+; AVX512F-NEXT: knotw %k0, %k1
+; AVX512F-NEXT: vmovdqu64 8(%rdi), %zmm0 {%k1} {z}
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: merge_8i64_i64_1u3u5zu8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: movb $32, %al
+; AVX512BW-NEXT: kmovd %eax, %k0
+; AVX512BW-NEXT: knotw %k0, %k1
+; AVX512BW-NEXT: vmovdqu64 8(%rdi), %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
; X32-AVX512F: # BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm1
-; X32-AVX512F-NEXT: vpxord %zmm2, %zmm2, %zmm2
-; X32-AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm0 = <0,0,u,u,2,0,u,u,4,0,13,0,u,u,7,0>
-; X32-AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm0
+; X32-AVX512F-NEXT: movb $32, %cl
+; X32-AVX512F-NEXT: kmovw %ecx, %k0
+; X32-AVX512F-NEXT: knotw %k0, %k1
+; X32-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0 {%k1} {z}
; X32-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
@@ -446,21 +462,29 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable
}
define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable noinline ssp {
-; ALL-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; ALL: # BB#0:
-; ALL-NEXT: vmovdqu32 (%rdi), %zmm1
-; ALL-NEXT: vpxord %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vmovdqa32 {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
-; ALL-NEXT: vpermi2d %zmm2, %zmm1, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: movw $8240, %ax # imm = 0x2030
+; AVX512F-NEXT: kmovw %eax, %k0
+; AVX512F-NEXT: knotw %k0, %k1
+; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: movw $8240, %ax # imm = 0x2030
+; AVX512BW-NEXT: kmovd %eax, %k0
+; AVX512BW-NEXT: knotw %k0, %k1
+; AVX512BW-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
+; AVX512BW-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
; X32-AVX512F: # BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vmovdqu32 (%eax), %zmm1
-; X32-AVX512F-NEXT: vpxord %zmm2, %zmm2, %zmm2
-; X32-AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
-; X32-AVX512F-NEXT: vpermi2d %zmm2, %zmm1, %zmm0
+; X32-AVX512F-NEXT: movw $8240, %cx # imm = 0x2030
+; X32-AVX512F-NEXT: kmovw %ecx, %k0
+; X32-AVX512F-NEXT: knotw %k0, %k1
+; X32-AVX512F-NEXT: vmovdqu32 (%eax), %zmm0 {%k1} {z}
; X32-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
diff --git a/test/CodeGen/X86/merge-store-partially-alias-loads.ll b/test/CodeGen/X86/merge-store-partially-alias-loads.ll
index 735e64a076d0..6ca964be9570 100644
--- a/test/CodeGen/X86/merge-store-partially-alias-loads.ll
+++ b/test/CodeGen/X86/merge-store-partially-alias-loads.ll
@@ -21,11 +21,11 @@
; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<LD2[%tmp81](align=1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64
; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<LD1[%tmp12]> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64
-; DBGDAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1
-
+; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64
+; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1
; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64
-; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ST2]], [[LD1]], t{{[0-9]+}}, undef:i64
-; DBGDAG: X86ISD::RET_FLAG [[ST1]],
+
+; DBGDAG: X86ISD::RET_FLAG t{{[0-9]+}},
; DBGDAG: Type-legalized selection DAG: BB#0 'merge_store_partial_overlap_load:'
define void @merge_store_partial_overlap_load([4 x i8]* %tmp) {
diff --git a/test/CodeGen/X86/merge_store.ll b/test/CodeGen/X86/merge_store.ll
index 2701f369bcde..31c1f6582426 100644
--- a/test/CodeGen/X86/merge_store.ll
+++ b/test/CodeGen/X86/merge_store.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -addr-sink-using-gep=1 | FileCheck %s
define void @merge_store(i32* nocapture %a) {
; CHECK-LABEL: merge_store:
diff --git a/test/CodeGen/X86/merge_store_duplicated_loads.ll b/test/CodeGen/X86/merge_store_duplicated_loads.ll
new file mode 100644
index 000000000000..cfc39035e403
--- /dev/null
+++ b/test/CodeGen/X86/merge_store_duplicated_loads.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -o - | FileCheck %s
+
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @merge_double(double* noalias nocapture %st, double* noalias nocapture readonly %ld) #0 {
+; CHECK-LABEL: merge_double:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd %xmm0, (%rdi)
+; CHECK-NEXT: movsd %xmm1, 8(%rdi)
+; CHECK-NEXT: movsd %xmm0, 16(%rdi)
+; CHECK-NEXT: movsd %xmm1, 24(%rdi)
+; CHECK-NEXT: retq
+ %ld_idx1 = getelementptr inbounds double, double* %ld, i64 1
+ %ld0 = load double, double* %ld, align 8, !tbaa !2
+ %ld1 = load double, double* %ld_idx1, align 8, !tbaa !2
+
+ %st_idx1 = getelementptr inbounds double, double* %st, i64 1
+ %st_idx2 = getelementptr inbounds double, double* %st, i64 2
+ %st_idx3 = getelementptr inbounds double, double* %st, i64 3
+
+ store double %ld0, double* %st, align 8, !tbaa !2
+ store double %ld1, double* %st_idx1, align 8, !tbaa !2
+ store double %ld0, double* %st_idx2, align 8, !tbaa !2
+ store double %ld1, double* %st_idx3, align 8, !tbaa !2
+ ret void
+}
+
+define void @merge_loadstore_int(i64* noalias nocapture readonly %p, i64* noalias nocapture %q) local_unnamed_addr #0 {
+; CHECK-LABEL: merge_loadstore_int:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rcx
+; CHECK-NEXT: movq %rax, (%rsi)
+; CHECK-NEXT: movq %rcx, 8(%rsi)
+; CHECK-NEXT: movq %rax, 16(%rsi)
+; CHECK-NEXT: movq %rcx, 24(%rsi)
+; CHECK-NEXT: retq
+entry:
+ %0 = load i64, i64* %p, align 8, !tbaa !1
+ %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
+ %1 = load i64, i64* %arrayidx1, align 8, !tbaa !1
+ store i64 %0, i64* %q, align 8, !tbaa !1
+ %arrayidx3 = getelementptr inbounds i64, i64* %q, i64 1
+ store i64 %1, i64* %arrayidx3, align 8, !tbaa !1
+ %arrayidx4 = getelementptr inbounds i64, i64* %q, i64 2
+ store i64 %0, i64* %arrayidx4, align 8, !tbaa !1
+ %arrayidx5 = getelementptr inbounds i64, i64* %q, i64 3
+ store i64 %1, i64* %arrayidx5, align 8, !tbaa !1
+ ret void
+}
+
+define i64 @merge_loadstore_int_with_extra_use(i64* noalias nocapture readonly %p, i64* noalias nocapture %q) local_unnamed_addr #0 {
+; CHECK-LABEL: merge_loadstore_int_with_extra_use:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rcx
+; CHECK-NEXT: movq %rax, (%rsi)
+; CHECK-NEXT: movq %rcx, 8(%rsi)
+; CHECK-NEXT: movq %rax, 16(%rsi)
+; CHECK-NEXT: movq %rcx, 24(%rsi)
+; CHECK-NEXT: retq
+entry:
+ %0 = load i64, i64* %p, align 8, !tbaa !1
+ %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
+ %1 = load i64, i64* %arrayidx1, align 8, !tbaa !1
+ store i64 %0, i64* %q, align 8, !tbaa !1
+ %arrayidx3 = getelementptr inbounds i64, i64* %q, i64 1
+ store i64 %1, i64* %arrayidx3, align 8, !tbaa !1
+ %arrayidx4 = getelementptr inbounds i64, i64* %q, i64 2
+ store i64 %0, i64* %arrayidx4, align 8, !tbaa !1
+ %arrayidx5 = getelementptr inbounds i64, i64* %q, i64 3
+ store i64 %1, i64* %arrayidx5, align 8, !tbaa !1
+ ret i64 %0
+
+}
+
+attributes #0 = { "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" }
+
+
+!0 = !{!"clang version 5.0.0 (trunk 296467) (llvm/trunk 296476)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"double", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/misched-aa-colored.ll b/test/CodeGen/X86/misched-aa-colored.ll
index 9f8f3a946e66..e118b00fd098 100644
--- a/test/CodeGen/X86/misched-aa-colored.ll
+++ b/test/CodeGen/X86/misched-aa-colored.ll
@@ -143,10 +143,10 @@ declare { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm
declare void @__assert_fail(i8*, i8*, i32, i8*) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
; Function Attrs: nounwind uwtable
define hidden { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm16DAGTypeLegalizer18WidenVecRes_BinaryEPNS_6SDNodeE(%"class.llvm::DAGTypeLegalizer.117.717.1077.2037.2157.2397.4197"* %this, %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"* %N) #2 align 2 {
@@ -155,13 +155,13 @@ entry:
%ref.tmp.i = alloca %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", align 8
%Op.i = alloca %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", align 8
%0 = bitcast %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i to i8*
- call void @llvm.lifetime.start(i64 24, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 24, i8* %0) #1
%retval.sroa.0.0.idx.i36 = getelementptr inbounds %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i, i64 0, i32 1, i32 0, i32 0
%retval.sroa.0.0.copyload.i37 = load i32, i32* %retval.sroa.0.0.idx.i36, align 8
- call void @llvm.lifetime.end(i64 24, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 24, i8* %0) #1
%agg.tmp8.sroa.2.0.copyload = load i32, i32* undef, align 8
%1 = bitcast %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i to i8*
- call void @llvm.lifetime.start(i64 16, i8* %1) #1
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %1) #1
%2 = getelementptr %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i, i64 0, i32 1
store i32 %agg.tmp8.sroa.2.0.copyload, i32* %2, align 8
diff --git a/test/CodeGen/X86/mmx-cvt.ll b/test/CodeGen/X86/mmx-cvt.ll
new file mode 100644
index 000000000000..8f2da9535399
--- /dev/null
+++ b/test/CodeGen/X86/mmx-cvt.ll
@@ -0,0 +1,369 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
+
+; If we are transferring XMM conversion results to MMX registers we could use the MMX equivalents
+; (CVTPD2PI/CVTTPD2PI + CVTPS2PI/CVTTPS2PI) without affecting rounding/exceptions etc.
+
+define void @cvt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
+; X86-LABEL: cvt_v2f64_v2i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvtpd2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: cvt_v2f64_v2i32:
+; X64: # BB#0:
+; X64-NEXT: cvtpd2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = tail call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %0)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ %5 = extractelement <2 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to x86_mmx
+ %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6)
+ %8 = bitcast x86_mmx %7 to i64
+ %9 = insertelement <1 x i64> undef, i64 %8, i32 0
+ store <1 x i64> %9, <1 x i64>* %1
+ ret void
+}
+
+define void @cvtt_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
+; X86-LABEL: cvtt_v2f64_v2i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvttpd2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: cvtt_v2f64_v2i32:
+; X64: # BB#0:
+; X64-NEXT: cvttpd2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = tail call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %0)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ %5 = extractelement <2 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to x86_mmx
+ %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6)
+ %8 = bitcast x86_mmx %7 to i64
+ %9 = insertelement <1 x i64> undef, i64 %8, i32 0
+ store <1 x i64> %9, <1 x i64>* %1
+ ret void
+}
+
+define void @fptosi_v2f64_v2i32(<2 x double>, <1 x i64>*) nounwind {
+; X86-LABEL: fptosi_v2f64_v2i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvttpd2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: fptosi_v2f64_v2i32:
+; X64: # BB#0:
+; X64-NEXT: cvttpd2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = fptosi <2 x double> %0 to <2 x i32>
+ %4 = bitcast <2 x i32> %3 to x86_mmx
+ %5 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %4, x86_mmx %4)
+ %6 = bitcast x86_mmx %5 to i64
+ %7 = insertelement <1 x i64> undef, i64 %6, i32 0
+ store <1 x i64> %7, <1 x i64>* %1
+ ret void
+}
+
+define void @cvt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
+; X86-LABEL: cvt_v2f32_v2i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvtps2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: cvt_v2f32_v2i32:
+; X64: # BB#0:
+; X64-NEXT: cvtps2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = tail call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %0)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ %5 = extractelement <2 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to x86_mmx
+ %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6)
+ %8 = bitcast x86_mmx %7 to i64
+ %9 = insertelement <1 x i64> undef, i64 %8, i32 0
+ store <1 x i64> %9, <1 x i64>* %1
+ ret void
+}
+
+define void @cvtt_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
+; X86-LABEL: cvtt_v2f32_v2i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvttps2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: cvtt_v2f32_v2i32:
+; X64: # BB#0:
+; X64-NEXT: cvttps2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = tail call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %0)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ %5 = extractelement <2 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to x86_mmx
+ %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6)
+ %8 = bitcast x86_mmx %7 to i64
+ %9 = insertelement <1 x i64> undef, i64 %8, i32 0
+ store <1 x i64> %9, <1 x i64>* %1
+ ret void
+}
+
+define void @fptosi_v4f32_v4i32(<4 x float>, <1 x i64>*) nounwind {
+; X86-LABEL: fptosi_v4f32_v4i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvttps2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: fptosi_v4f32_v4i32:
+; X64: # BB#0:
+; X64-NEXT: cvttps2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = fptosi <4 x float> %0 to <4 x i32>
+ %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ %5 = bitcast <2 x i32> %4 to x86_mmx
+ %6 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %5)
+ %7 = bitcast x86_mmx %6 to i64
+ %8 = insertelement <1 x i64> undef, i64 %7, i32 0
+ store <1 x i64> %8, <1 x i64>* %1
+ ret void
+}
+
+define void @fptosi_v2f32_v2i32(<4 x float>, <1 x i64>*) nounwind {
+; X86-LABEL: fptosi_v2f32_v2i32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: cvttps2pi %xmm0, %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: fptosi_v2f32_v2i32:
+; X64: # BB#0:
+; X64-NEXT: cvttps2pi %xmm0, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
+ %3 = fptosi <4 x float> %0 to <4 x i32>
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ %5 = extractelement <2 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to x86_mmx
+ %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6)
+ %8 = bitcast x86_mmx %7 to i64
+ %9 = insertelement <1 x i64> undef, i64 %8, i32 0
+ store <1 x i64> %9, <1 x i64>* %1
+ ret void
+}
+
+; FIXME: If we are transferring MMX registers to XMM for conversion we could use the MMX equivalents
+; (CVTPI2PD + CVTPI2PS) without affecting rounding/exceptions etc.
+
+define <2 x double> @sitofp_v2i32_v2f64(<1 x i64>*) nounwind {
+; X86-LABEL: sitofp_v2i32_v2f64:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movq (%eax), %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: cvtdq2pd (%esp), %xmm0
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_v2i32_v2f64:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq2dq %mm0, %xmm0
+; X64-NEXT: cvtdq2pd %xmm0, %xmm0
+; X64-NEXT: retq
+ %2 = bitcast <1 x i64>* %0 to x86_mmx*
+ %3 = load x86_mmx, x86_mmx* %2, align 8
+ %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3)
+ %5 = bitcast x86_mmx %4 to i64
+ %6 = insertelement <2 x i64> undef, i64 %5, i32 0
+ %7 = bitcast <2 x i64> %6 to <4 x i32>
+ %8 = shufflevector <4 x i32> %7, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ %9 = sitofp <2 x i32> %8 to <2 x double>
+ ret <2 x double> %9
+}
+
+define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
+; X86-LABEL: sitofp_v2i32_v2f32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movq (%eax), %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: cvtdq2ps %xmm0, %xmm0
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: sitofp_v2i32_v2f32:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: cvtdq2ps %xmm0, %xmm0
+; X64-NEXT: retq
+ %2 = bitcast <1 x i64>* %0 to x86_mmx*
+ %3 = load x86_mmx, x86_mmx* %2, align 8
+ %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3)
+ %5 = bitcast x86_mmx %4 to <2 x i32>
+ %6 = shufflevector <2 x i32> %5, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %7 = sitofp <4 x i32> %6 to <4 x float>
+ ret <4 x float> %7
+}
+
+define <4 x float> @cvt_v2i32_v2f32(<1 x i64>*) nounwind {
+; X86-LABEL: cvt_v2i32_v2f32:
+; X86: # BB#0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movq (%eax), %mm0
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: cvtdq2ps %xmm0, %xmm0
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: cvt_v2i32_v2f32:
+; X64: # BB#0:
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: movd %rax, %xmm0
+; X64-NEXT: cvtdq2ps %xmm0, %xmm0
+; X64-NEXT: retq
+ %2 = bitcast <1 x i64>* %0 to x86_mmx*
+ %3 = load x86_mmx, x86_mmx* %2, align 8
+ %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3)
+ %5 = bitcast x86_mmx %4 to i64
+ %6 = insertelement <2 x i64> undef, i64 %5, i32 0
+ %7 = insertelement <2 x i64> %6, i64 0, i32 1
+ %8 = bitcast <2 x i64> %7 to <4 x i32>
+ %9 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %8)
+ ret <4 x float> %9
+}
+
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>)
+declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>)
+declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>)
+declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>)
+declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>)
diff --git a/test/CodeGen/X86/mmx-fold-load.ll b/test/CodeGen/X86/mmx-fold-load.ll
index 2b9d30f59fd5..832743870fb4 100644
--- a/test/CodeGen/X86/mmx-fold-load.ll
+++ b/test/CodeGen/X86/mmx-fold-load.ll
@@ -1,12 +1,33 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
-define i64 @t0(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t0:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1:[a-z]+]]), %mm0
-; CHECK-NEXT: psllq (%[[REG2:[a-z]+]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t0(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t0:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psllq %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t0:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psllq %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -17,13 +38,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
-define i64 @t1(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t1:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psrlq (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t1(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t1:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psrlq %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t1:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psrlq %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -34,13 +74,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32)
-define i64 @t2(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t2:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psllw (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t2(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t2:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psllw %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t2:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psllw %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -51,13 +110,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32)
-define i64 @t3(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t3:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psrlw (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t3(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t3:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psrlw %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t3:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psrlw %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -68,13 +146,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32)
-define i64 @t4(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t4:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: pslld (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t4(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t4:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: pslld %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t4:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: pslld %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -85,13 +182,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32)
-define i64 @t5(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t5:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psrld (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t5(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t5:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psrld %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t5:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psrld %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -102,13 +218,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32)
-define i64 @t6(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t6:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psraw (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t6(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t6:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psraw %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t6:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psraw %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -119,13 +254,32 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32)
-define i64 @t7(<1 x i64>* %a, i32* %b) {
-; CHECK-LABEL: t7:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: movq (%[[REG1]]), %mm0
-; CHECK-NEXT: psrad (%[[REG2]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: retq
+define i64 @t7(<1 x i64>* %a, i32* %b) nounwind {
+; X86-LABEL: t7:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movq (%ecx), %mm0
+; X86-NEXT: movd (%eax), %mm1
+; X86-NEXT: psrad %mm1, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: t7:
+; X64: # BB#0: # %entry
+; X64-NEXT: movq (%rdi), %mm0
+; X64-NEXT: movd (%rsi), %mm1
+; X64-NEXT: psrad %mm1, %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
%1 = load x86_mmx, x86_mmx* %0, align 8
@@ -136,13 +290,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32)
-define i64 @tt0(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt0:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: paddb (%[[REG3:[a-z]+]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt0(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt0:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: paddb (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt0:
+; X64: # BB#0: # %entry
+; X64-NEXT: paddb (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v)
@@ -153,13 +323,29 @@ entry:
declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms()
-define i64 @tt1(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt1:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: paddw (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt1(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt1:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: paddw (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt1:
+; X64: # BB#0: # %entry
+; X64-NEXT: paddw (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v)
@@ -169,13 +355,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
-define i64 @tt2(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt2:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: paddd (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt2(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt2:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: paddd (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt2:
+; X64: # BB#0: # %entry
+; X64-NEXT: paddd (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v)
@@ -185,13 +387,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
-define i64 @tt3(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt3:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: paddq (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt3(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt3:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: paddq (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt3:
+; X64: # BB#0: # %entry
+; X64-NEXT: paddq (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v)
@@ -201,13 +419,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
-define i64 @tt4(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt4:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: paddusb (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt4(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt4:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: paddusb (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt4:
+; X64: # BB#0: # %entry
+; X64-NEXT: paddusb (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %t, x86_mmx %v)
@@ -217,13 +451,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
-define i64 @tt5(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt5:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: paddusw (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt5(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt5:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: paddusw (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt5:
+; X64: # BB#0: # %entry
+; X64-NEXT: paddusw (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %t, x86_mmx %v)
@@ -233,13 +483,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
-define i64 @tt6(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt6:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: psrlw (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt6(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt6:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: psrlw (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt6:
+; X64: # BB#0: # %entry
+; X64-NEXT: psrlw (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v)
@@ -249,13 +515,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx)
-define i64 @tt7(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt7:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: psrld (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt7(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt7:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: psrld (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt7:
+; X64: # BB#0: # %entry
+; X64-NEXT: psrld (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v)
@@ -265,13 +547,29 @@ entry:
}
declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx)
-define i64 @tt8(x86_mmx %t, x86_mmx* %q) {
-; CHECK-LABEL: tt8:
-; CHECK: # BB#0:{{.*}} %entry
-; CHECK: psrlq (%[[REG3]]), %mm0
-; CHECK-NEXT: movd %mm0, %rax
-; CHECK-NEXT: emms
-; CHECK-NEXT: retq
+define i64 @tt8(x86_mmx %t, x86_mmx* %q) nounwind {
+; X86-LABEL: tt8:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: psrlq (%eax), %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: emms
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: tt8:
+; X64: # BB#0: # %entry
+; X64-NEXT: psrlq (%rdi), %mm0
+; X64-NEXT: movd %mm0, %rax
+; X64-NEXT: emms
+; X64-NEXT: retq
entry:
%v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v)
@@ -280,3 +578,46 @@ entry:
ret i64 %s
}
declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx)
+
+define void @test_psrlq_by_volatile_shift_amount(x86_mmx* %t) nounwind {
+; X86-LABEL: test_psrlq_by_volatile_shift_amount:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0
+; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $255, {{[0-9]+}}(%esp)
+; X86-NEXT: movq {{[0-9]+}}(%esp), %mm1
+; X86-NEXT: psrlq %mm0, %mm1
+; X86-NEXT: movq %mm1, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_psrlq_by_volatile_shift_amount:
+; X64: # BB#0: # %entry
+; X64-NEXT: movl $1, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movd -{{[0-9]+}}(%rsp), %mm0
+; X64-NEXT: movl $255, %eax
+; X64-NEXT: movd %rax, %mm1
+; X64-NEXT: psrlq %mm0, %mm1
+; X64-NEXT: movq %mm1, (%rdi)
+; X64-NEXT: retq
+entry:
+ %0 = alloca i32, align 4
+ %1 = bitcast i32* %0 to i8*
+ call void @llvm.lifetime.start(i64 4, i8* nonnull %1)
+ store volatile i32 1, i32* %0, align 4
+ %2 = load volatile i32, i32* %0, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx bitcast (<1 x i64> <i64 255> to x86_mmx), i32 %2)
+ store x86_mmx %3, x86_mmx* %t, align 8
+ call void @llvm.lifetime.end(i64 4, i8* nonnull %1)
+ ret void
+}
+
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.end(i64, i8* nocapture)
diff --git a/test/CodeGen/X86/mul-constant-i16.ll b/test/CodeGen/X86/mul-constant-i16.ll
new file mode 100644
index 000000000000..e3e2737cf3e6
--- /dev/null
+++ b/test/CodeGen/X86/mul-constant-i16.ll
@@ -0,0 +1,589 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
+
+define i16 @test_mul_by_1(i16 %x) {
+; X86-LABEL: test_mul_by_1:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_1:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 1
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_2(i16 %x) {
+; X86-LABEL: test_mul_by_2:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_2:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 2
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_3(i16 %x) {
+; X86-LABEL: test_mul_by_3:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_3:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 3
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_4(i16 %x) {
+; X86-LABEL: test_mul_by_4:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_4:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (,%rdi,4), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 4
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_5(i16 %x) {
+; X86-LABEL: test_mul_by_5:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_5:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 5
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_6(i16 %x) {
+; X86-LABEL: test_mul_by_6:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_6:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: addl %edi, %edi
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 6
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_7(i16 %x) {
+; X86-LABEL: test_mul_by_7:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: leal (,%ecx,8), %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_7:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (,%rdi,8), %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 7
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_8(i16 %x) {
+; X86-LABEL: test_mul_by_8:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $3, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_8:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (,%rdi,8), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 8
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_9(i16 %x) {
+; X86-LABEL: test_mul_by_9:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,8), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_9:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,8), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 9
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_10(i16 %x) {
+; X86-LABEL: test_mul_by_10:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_10:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: addl %edi, %edi
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 10
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_11(i16 %x) {
+; X86-LABEL: test_mul_by_11:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $11, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_11:
+; X64: # BB#0:
+; X64-NEXT: imull $11, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 11
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_12(i16 %x) {
+; X86-LABEL: test_mul_by_12:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_12:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: shll $2, %edi
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 12
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_13(i16 %x) {
+; X86-LABEL: test_mul_by_13:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $13, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_13:
+; X64: # BB#0:
+; X64-NEXT: imull $13, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 13
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_14(i16 %x) {
+; X86-LABEL: test_mul_by_14:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $14, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_14:
+; X64: # BB#0:
+; X64-NEXT: imull $14, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 14
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_15(i16 %x) {
+; X86-LABEL: test_mul_by_15:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_15:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: leal (%rax,%rax,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 15
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_16(i16 %x) {
+; X86-LABEL: test_mul_by_16:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_16:
+; X64: # BB#0:
+; X64-NEXT: shll $4, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 16
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_17(i16 %x) {
+; X86-LABEL: test_mul_by_17:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_17:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $4, %eax
+; X64-NEXT: leal (%rax,%rdi), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 17
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_18(i16 %x) {
+; X86-LABEL: test_mul_by_18:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: leal (%eax,%eax,8), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_18:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: addl %edi, %edi
+; X64-NEXT: leal (%rdi,%rdi,8), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 18
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_19(i16 %x) {
+; X86-LABEL: test_mul_by_19:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $19, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_19:
+; X64: # BB#0:
+; X64-NEXT: imull $19, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 19
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_20(i16 %x) {
+; X86-LABEL: test_mul_by_20:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_20:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: shll $2, %edi
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 20
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_21(i16 %x) {
+; X86-LABEL: test_mul_by_21:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $21, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_21:
+; X64: # BB#0:
+; X64-NEXT: imull $21, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 21
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_22(i16 %x) {
+; X86-LABEL: test_mul_by_22:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $22, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_22:
+; X64: # BB#0:
+; X64-NEXT: imull $22, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 22
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_23(i16 %x) {
+; X86-LABEL: test_mul_by_23:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $23, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_23:
+; X64: # BB#0:
+; X64-NEXT: imull $23, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 23
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_24(i16 %x) {
+; X86-LABEL: test_mul_by_24:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $3, %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_24:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: shll $3, %edi
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 24
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_25(i16 %x) {
+; X86-LABEL: test_mul_by_25:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_25:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: leal (%rax,%rax,4), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 25
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_26(i16 %x) {
+; X86-LABEL: test_mul_by_26:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $26, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_26:
+; X64: # BB#0:
+; X64-NEXT: imull $26, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 26
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_27(i16 %x) {
+; X86-LABEL: test_mul_by_27:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,8), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_27:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,8), %eax
+; X64-NEXT: leal (%rax,%rax,2), %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 27
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_28(i16 %x) {
+; X86-LABEL: test_mul_by_28:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $28, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_28:
+; X64: # BB#0:
+; X64-NEXT: imull $28, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 28
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_29(i16 %x) {
+; X86-LABEL: test_mul_by_29:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $29, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_29:
+; X64: # BB#0:
+; X64-NEXT: imull $29, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 29
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_30(i16 %x) {
+; X86-LABEL: test_mul_by_30:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull $30, %eax, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_30:
+; X64: # BB#0:
+; X64-NEXT: imull $30, %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 30
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_31(i16 %x) {
+; X86-LABEL: test_mul_by_31:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_31:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $5, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 31
+ ret i16 %mul
+}
+
+define i16 @test_mul_by_32(i16 %x) {
+; X86-LABEL: test_mul_by_32:
+; X86: # BB#0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_32:
+; X64: # BB#0:
+; X64-NEXT: shll $5, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i16 %x, 32
+ ret i16 %mul
+}
diff --git a/test/CodeGen/X86/mul-constant-i32.ll b/test/CodeGen/X86/mul-constant-i32.ll
new file mode 100644
index 000000000000..76e46e1f1b09
--- /dev/null
+++ b/test/CodeGen/X86/mul-constant-i32.ll
@@ -0,0 +1,515 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
+
+define i32 @test_mul_by_1(i32 %x) {
+; X86-LABEL: test_mul_by_1:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_1:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 1
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_2(i32 %x) {
+; X86-LABEL: test_mul_by_2:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_2:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 2
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_3(i32 %x) {
+; X86-LABEL: test_mul_by_3:
+; X86: # BB#0:
+; X86-NEXT: imull $3, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_3:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 3
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_4(i32 %x) {
+; X86-LABEL: test_mul_by_4:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_4:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (,%rdi,4), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 4
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_5(i32 %x) {
+; X86-LABEL: test_mul_by_5:
+; X86: # BB#0:
+; X86-NEXT: imull $5, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_5:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 5
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_6(i32 %x) {
+; X86-LABEL: test_mul_by_6:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_6:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: addl %edi, %edi
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 6
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_7(i32 %x) {
+; X86-LABEL: test_mul_by_7:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: leal (,%ecx,8), %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_7:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (,%rdi,8), %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 7
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_8(i32 %x) {
+; X86-LABEL: test_mul_by_8:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $3, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_8:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (,%rdi,8), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 8
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_9(i32 %x) {
+; X86-LABEL: test_mul_by_9:
+; X86: # BB#0:
+; X86-NEXT: imull $9, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_9:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,8), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 9
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_10(i32 %x) {
+; X86-LABEL: test_mul_by_10:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_10:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: addl %edi, %edi
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 10
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_11(i32 %x) {
+; X86-LABEL: test_mul_by_11:
+; X86: # BB#0:
+; X86-NEXT: imull $11, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_11:
+; X64: # BB#0:
+; X64-NEXT: imull $11, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 11
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_12(i32 %x) {
+; X86-LABEL: test_mul_by_12:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_12:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: shll $2, %edi
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 12
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_13(i32 %x) {
+; X86-LABEL: test_mul_by_13:
+; X86: # BB#0:
+; X86-NEXT: imull $13, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_13:
+; X64: # BB#0:
+; X64-NEXT: imull $13, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 13
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_14(i32 %x) {
+; X86-LABEL: test_mul_by_14:
+; X86: # BB#0:
+; X86-NEXT: imull $14, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_14:
+; X64: # BB#0:
+; X64-NEXT: imull $14, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 14
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_15(i32 %x) {
+; X86-LABEL: test_mul_by_15:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_15:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: leal (%rax,%rax,2), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 15
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_16(i32 %x) {
+; X86-LABEL: test_mul_by_16:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_16:
+; X64: # BB#0:
+; X64-NEXT: shll $4, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 16
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_17(i32 %x) {
+; X86-LABEL: test_mul_by_17:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_17:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $4, %eax
+; X64-NEXT: leal (%rax,%rdi), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 17
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_18(i32 %x) {
+; X86-LABEL: test_mul_by_18:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: leal (%eax,%eax,8), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_18:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: addl %edi, %edi
+; X64-NEXT: leal (%rdi,%rdi,8), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 18
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_19(i32 %x) {
+; X86-LABEL: test_mul_by_19:
+; X86: # BB#0:
+; X86-NEXT: imull $19, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_19:
+; X64: # BB#0:
+; X64-NEXT: imull $19, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 19
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_20(i32 %x) {
+; X86-LABEL: test_mul_by_20:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_20:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: shll $2, %edi
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 20
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_21(i32 %x) {
+; X86-LABEL: test_mul_by_21:
+; X86: # BB#0:
+; X86-NEXT: imull $21, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_21:
+; X64: # BB#0:
+; X64-NEXT: imull $21, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 21
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_22(i32 %x) {
+; X86-LABEL: test_mul_by_22:
+; X86: # BB#0:
+; X86-NEXT: imull $22, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_22:
+; X64: # BB#0:
+; X64-NEXT: imull $22, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 22
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_23(i32 %x) {
+; X86-LABEL: test_mul_by_23:
+; X86: # BB#0:
+; X86-NEXT: imull $23, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_23:
+; X64: # BB#0:
+; X64-NEXT: imull $23, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 23
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_24(i32 %x) {
+; X86-LABEL: test_mul_by_24:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $3, %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_24:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: shll $3, %edi
+; X64-NEXT: leal (%rdi,%rdi,2), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 24
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_25(i32 %x) {
+; X86-LABEL: test_mul_by_25:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: leal (%eax,%eax,4), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_25:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,4), %eax
+; X64-NEXT: leal (%rax,%rax,4), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 25
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_26(i32 %x) {
+; X86-LABEL: test_mul_by_26:
+; X86: # BB#0:
+; X86-NEXT: imull $26, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_26:
+; X64: # BB#0:
+; X64-NEXT: imull $26, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 26
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_27(i32 %x) {
+; X86-LABEL: test_mul_by_27:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,8), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_27:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: leal (%rdi,%rdi,8), %eax
+; X64-NEXT: leal (%rax,%rax,2), %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 27
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_28(i32 %x) {
+; X86-LABEL: test_mul_by_28:
+; X86: # BB#0:
+; X86-NEXT: imull $28, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_28:
+; X64: # BB#0:
+; X64-NEXT: imull $28, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 28
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_29(i32 %x) {
+; X86-LABEL: test_mul_by_29:
+; X86: # BB#0:
+; X86-NEXT: imull $29, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_29:
+; X64: # BB#0:
+; X64-NEXT: imull $29, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 29
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_30(i32 %x) {
+; X86-LABEL: test_mul_by_30:
+; X86: # BB#0:
+; X86-NEXT: imull $30, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_30:
+; X64: # BB#0:
+; X64-NEXT: imull $30, %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 30
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_31(i32 %x) {
+; X86-LABEL: test_mul_by_31:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_31:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll $5, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 31
+ ret i32 %mul
+}
+
+define i32 @test_mul_by_32(i32 %x) {
+; X86-LABEL: test_mul_by_32:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_32:
+; X64: # BB#0:
+; X64-NEXT: shll $5, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %mul = mul nsw i32 %x, 32
+ ret i32 %mul
+}
diff --git a/test/CodeGen/X86/mul-constant-i64.ll b/test/CodeGen/X86/mul-constant-i64.ll
new file mode 100644
index 000000000000..8579179a8231
--- /dev/null
+++ b/test/CodeGen/X86/mul-constant-i64.ll
@@ -0,0 +1,581 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
+
+define i64 @test_mul_by_1(i64 %x) {
+; X86-LABEL: test_mul_by_1:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_1:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 1
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_2(i64 %x) {
+; X86-LABEL: test_mul_by_2:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $1, %eax, %edx
+; X86-NEXT: addl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_2:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 2
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_3(i64 %x) {
+; X86-LABEL: test_mul_by_3:
+; X86: # BB#0:
+; X86-NEXT: movl $3, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $3, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_3:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi,2), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 3
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_4(i64 %x) {
+; X86-LABEL: test_mul_by_4:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $2, %eax, %edx
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_4:
+; X64: # BB#0:
+; X64-NEXT: leaq (,%rdi,4), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 4
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_5(i64 %x) {
+; X86-LABEL: test_mul_by_5:
+; X86: # BB#0:
+; X86-NEXT: movl $5, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $5, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_5:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi,4), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 5
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_6(i64 %x) {
+; X86-LABEL: test_mul_by_6:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,2), %ecx
+; X86-NEXT: movl $6, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%edx,%ecx,2), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_6:
+; X64: # BB#0:
+; X64-NEXT: addq %rdi, %rdi
+; X64-NEXT: leaq (%rdi,%rdi,2), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 6
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_7(i64 %x) {
+; X86-LABEL: test_mul_by_7:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (,%eax,8), %ecx
+; X86-NEXT: subl %eax, %ecx
+; X86-NEXT: movl $7, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_7:
+; X64: # BB#0:
+; X64-NEXT: leaq (,%rdi,8), %rax
+; X64-NEXT: subq %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 7
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_8(i64 %x) {
+; X86-LABEL: test_mul_by_8:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $3, %eax, %edx
+; X86-NEXT: shll $3, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_8:
+; X64: # BB#0:
+; X64-NEXT: leaq (,%rdi,8), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 8
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_9(i64 %x) {
+; X86-LABEL: test_mul_by_9:
+; X86: # BB#0:
+; X86-NEXT: movl $9, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $9, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_9:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi,8), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 9
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_10(i64 %x) {
+; X86-LABEL: test_mul_by_10:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %ecx
+; X86-NEXT: movl $10, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%edx,%ecx,2), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_10:
+; X64: # BB#0:
+; X64-NEXT: addq %rdi, %rdi
+; X64-NEXT: leaq (%rdi,%rdi,4), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 10
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_11(i64 %x) {
+; X86-LABEL: test_mul_by_11:
+; X86: # BB#0:
+; X86-NEXT: movl $11, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $11, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_11:
+; X64: # BB#0:
+; X64-NEXT: imulq $11, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 11
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_12(i64 %x) {
+; X86-LABEL: test_mul_by_12:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,2), %ecx
+; X86-NEXT: movl $12, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%edx,%ecx,4), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_12:
+; X64: # BB#0:
+; X64-NEXT: shlq $2, %rdi
+; X64-NEXT: leaq (%rdi,%rdi,2), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 12
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_13(i64 %x) {
+; X86-LABEL: test_mul_by_13:
+; X86: # BB#0:
+; X86-NEXT: movl $13, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $13, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_13:
+; X64: # BB#0:
+; X64-NEXT: imulq $13, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 13
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_14(i64 %x) {
+; X86-LABEL: test_mul_by_14:
+; X86: # BB#0:
+; X86-NEXT: movl $14, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $14, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_14:
+; X64: # BB#0:
+; X64-NEXT: imulq $14, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 14
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_15(i64 %x) {
+; X86-LABEL: test_mul_by_15:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $15, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%ecx,%ecx,4), %ecx
+; X86-NEXT: leal (%ecx,%ecx,2), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_15:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi,4), %rax
+; X64-NEXT: leaq (%rax,%rax,2), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 15
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_16(i64 %x) {
+; X86-LABEL: test_mul_by_16:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $4, %eax, %edx
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_16:
+; X64: # BB#0:
+; X64-NEXT: shlq $4, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 16
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_17(i64 %x) {
+; X86-LABEL: test_mul_by_17:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: addl %eax, %ecx
+; X86-NEXT: movl $17, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_17:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: leaq (%rax,%rdi), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 17
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_18(i64 %x) {
+; X86-LABEL: test_mul_by_18:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,8), %ecx
+; X86-NEXT: movl $18, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%edx,%ecx,2), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_18:
+; X64: # BB#0:
+; X64-NEXT: addq %rdi, %rdi
+; X64-NEXT: leaq (%rdi,%rdi,8), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 18
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_19(i64 %x) {
+; X86-LABEL: test_mul_by_19:
+; X86: # BB#0:
+; X86-NEXT: movl $19, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $19, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_19:
+; X64: # BB#0:
+; X64-NEXT: imulq $19, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 19
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_20(i64 %x) {
+; X86-LABEL: test_mul_by_20:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,4), %ecx
+; X86-NEXT: movl $20, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%edx,%ecx,4), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_20:
+; X64: # BB#0:
+; X64-NEXT: shlq $2, %rdi
+; X64-NEXT: leaq (%rdi,%rdi,4), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 20
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_21(i64 %x) {
+; X86-LABEL: test_mul_by_21:
+; X86: # BB#0:
+; X86-NEXT: movl $21, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $21, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_21:
+; X64: # BB#0:
+; X64-NEXT: imulq $21, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 21
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_22(i64 %x) {
+; X86-LABEL: test_mul_by_22:
+; X86: # BB#0:
+; X86-NEXT: movl $22, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $22, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_22:
+; X64: # BB#0:
+; X64-NEXT: imulq $22, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 22
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_23(i64 %x) {
+; X86-LABEL: test_mul_by_23:
+; X86: # BB#0:
+; X86-NEXT: movl $23, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $23, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_23:
+; X64: # BB#0:
+; X64-NEXT: imulq $23, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 23
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_24(i64 %x) {
+; X86-LABEL: test_mul_by_24:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,2), %ecx
+; X86-NEXT: movl $24, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%edx,%ecx,8), %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_24:
+; X64: # BB#0:
+; X64-NEXT: shlq $3, %rdi
+; X64-NEXT: leaq (%rdi,%rdi,2), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 24
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_25(i64 %x) {
+; X86-LABEL: test_mul_by_25:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $25, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%ecx,%ecx,4), %ecx
+; X86-NEXT: leal (%ecx,%ecx,4), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_25:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi,4), %rax
+; X64-NEXT: leaq (%rax,%rax,4), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 25
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_26(i64 %x) {
+; X86-LABEL: test_mul_by_26:
+; X86: # BB#0:
+; X86-NEXT: movl $26, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $26, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_26:
+; X64: # BB#0:
+; X64-NEXT: imulq $26, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 26
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_27(i64 %x) {
+; X86-LABEL: test_mul_by_27:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $27, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: leal (%ecx,%ecx,8), %ecx
+; X86-NEXT: leal (%ecx,%ecx,2), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_27:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rdi,8), %rax
+; X64-NEXT: leaq (%rax,%rax,2), %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 27
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_28(i64 %x) {
+; X86-LABEL: test_mul_by_28:
+; X86: # BB#0:
+; X86-NEXT: movl $28, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $28, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_28:
+; X64: # BB#0:
+; X64-NEXT: imulq $28, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 28
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_29(i64 %x) {
+; X86-LABEL: test_mul_by_29:
+; X86: # BB#0:
+; X86-NEXT: movl $29, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $29, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_29:
+; X64: # BB#0:
+; X64-NEXT: imulq $29, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 29
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_30(i64 %x) {
+; X86-LABEL: test_mul_by_30:
+; X86: # BB#0:
+; X86-NEXT: movl $30, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: imull $30, {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_30:
+; X64: # BB#0:
+; X64-NEXT: imulq $30, %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 30
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_31(i64 %x) {
+; X86-LABEL: test_mul_by_31:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: shll $5, %ecx
+; X86-NEXT: subl %eax, %ecx
+; X86-NEXT: movl $31, %eax
+; X86-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_31:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $5, %rax
+; X64-NEXT: subq %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 31
+ ret i64 %mul
+}
+
+define i64 @test_mul_by_32(i64 %x) {
+; X86-LABEL: test_mul_by_32:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $5, %eax, %edx
+; X86-NEXT: shll $5, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mul_by_32:
+; X64: # BB#0:
+; X64-NEXT: shlq $5, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %mul = mul nsw i64 %x, 32
+ ret i64 %mul
+}
diff --git a/test/CodeGen/X86/mul-i256.ll b/test/CodeGen/X86/mul-i256.ll
index 8f207b8dd086..bb2989b9298e 100644
--- a/test/CodeGen/X86/mul-i256.ll
+++ b/test/CodeGen/X86/mul-i256.ll
@@ -1,8 +1,284 @@
-; RUN: llc < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
+
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define void @test(i256* %a, i256* %b, i256* %out) #0 {
+; X32-LABEL: test:
+; X32: # BB#0: # %entry
+; X32-NEXT: pushl %ebp
+; X32-NEXT: .Lcfi0:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .Lcfi1:
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .Lcfi2:
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $168, %esp
+; X32-NEXT: .Lcfi3:
+; X32-NEXT: .cfi_offset %esi, -20
+; X32-NEXT: .Lcfi4:
+; X32-NEXT: .cfi_offset %edi, -16
+; X32-NEXT: .Lcfi5:
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl 16(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 20(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 24(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 28(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 8(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 12(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 4(%eax), %ebx
+; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl 16(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 20(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 24(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 28(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 4(%eax), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl 8(%eax), %esi
+; X32-NEXT: movl 12(%eax), %edi
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: pushl %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: pushl %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pushl %esi
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl %esi
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: adcl %eax, %ebx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: sbbl %eax, %eax
+; X32-NEXT: andl $1, %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl 16(%ebp), %edi
+; X32-NEXT: movl %ebx, 4(%edi)
+; X32-NEXT: movl 16(%ebp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, (%ebx)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, 8(%ebx)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, 12(%ebx)
+; X32-NEXT: movl %esi, 16(%ebx)
+; X32-NEXT: movl %ecx, 20(%ebx)
+; X32-NEXT: movl %edx, 24(%ebx)
+; X32-NEXT: movl %eax, 28(%ebx)
+; X32-NEXT: leal -12(%ebp), %esp
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: popl %ebx
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: test:
+; X64: # BB#0: # %entry
+; X64-NEXT: pushq %r15
+; X64-NEXT: .Lcfi0:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: pushq %r14
+; X64-NEXT: .Lcfi1:
+; X64-NEXT: .cfi_def_cfa_offset 24
+; X64-NEXT: pushq %r12
+; X64-NEXT: .Lcfi2:
+; X64-NEXT: .cfi_def_cfa_offset 32
+; X64-NEXT: pushq %rbx
+; X64-NEXT: .Lcfi3:
+; X64-NEXT: .cfi_def_cfa_offset 40
+; X64-NEXT: .Lcfi4:
+; X64-NEXT: .cfi_offset %rbx, -40
+; X64-NEXT: .Lcfi5:
+; X64-NEXT: .cfi_offset %r12, -32
+; X64-NEXT: .Lcfi6:
+; X64-NEXT: .cfi_offset %r14, -24
+; X64-NEXT: .Lcfi7:
+; X64-NEXT: .cfi_offset %r15, -16
+; X64-NEXT: movq %rdx, %r10
+; X64-NEXT: movq (%rdi), %r14
+; X64-NEXT: movq 8(%rdi), %r8
+; X64-NEXT: movq 16(%rdi), %rcx
+; X64-NEXT: movq 16(%rsi), %rbx
+; X64-NEXT: movq (%rsi), %r12
+; X64-NEXT: movq 8(%rsi), %r15
+; X64-NEXT: movq 24(%rdi), %rdi
+; X64-NEXT: imulq %r12, %rdi
+; X64-NEXT: movq %r12, %rax
+; X64-NEXT: mulq %rcx
+; X64-NEXT: movq %rax, %r9
+; X64-NEXT: addq %rdi, %rdx
+; X64-NEXT: imulq %r15, %rcx
+; X64-NEXT: addq %rdx, %rcx
+; X64-NEXT: movq %rbx, %rdi
+; X64-NEXT: imulq %r8, %rdi
+; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: mulq %r14
+; X64-NEXT: movq %rax, %r11
+; X64-NEXT: addq %rdi, %rdx
+; X64-NEXT: movq 24(%rsi), %rbx
+; X64-NEXT: imulq %r14, %rbx
+; X64-NEXT: addq %rdx, %rbx
+; X64-NEXT: addq %r9, %r11
+; X64-NEXT: adcq %rcx, %rbx
+; X64-NEXT: movq %r14, %rax
+; X64-NEXT: mulq %r12
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %r9
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: mulq %r12
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rax, %rdi
+; X64-NEXT: addq %rsi, %rdi
+; X64-NEXT: adcq $0, %rcx
+; X64-NEXT: movq %r14, %rax
+; X64-NEXT: mulq %r15
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %r14
+; X64-NEXT: addq %rdi, %r14
+; X64-NEXT: adcq $0, %rsi
+; X64-NEXT: addq %rcx, %rsi
+; X64-NEXT: sbbq %rcx, %rcx
+; X64-NEXT: andl $1, %ecx
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: mulq %r15
+; X64-NEXT: addq %rsi, %rax
+; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: addq %r11, %rax
+; X64-NEXT: adcq %rbx, %rdx
+; X64-NEXT: movq %r9, (%r10)
+; X64-NEXT: movq %r14, 8(%r10)
+; X64-NEXT: movq %rax, 16(%r10)
+; X64-NEXT: movq %rdx, 24(%r10)
+; X64-NEXT: popq %rbx
+; X64-NEXT: popq %r12
+; X64-NEXT: popq %r14
+; X64-NEXT: popq %r15
+; X64-NEXT: retq
entry:
%av = load i256, i256* %a
%bv = load i256, i256* %b
@@ -11,22 +287,4 @@ entry:
ret void
}
-; CHECK-LABEL: @test
-; There is a lot of inter-register motion, and so matching the instruction
-; sequence will be fragile. There should be 6 underlying multiplications.
-; CHECK: imulq
-; CHECK: mulq
-; CHECK: imulq
-; CHECK: imulq
-; CHECK: mulq
-; CHECK: imulq
-; CHECK: mulq
-; CHECK: mulq
-; CHECK: mulq
-; CHECK: mulq
-; CHECK-NOT: imulq
-; CHECK-NOT: mulq
-; CHECK: retq
-
attributes #0 = { norecurse nounwind uwtable "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" }
-
diff --git a/test/CodeGen/X86/mulx32.ll b/test/CodeGen/X86/mulx32.ll
index 42ef2eb6f647..9ebd380170d3 100644
--- a/test/CodeGen/X86/mulx32.ll
+++ b/test/CodeGen/X86/mulx32.ll
@@ -1,22 +1,29 @@
-; RUN: llc -mcpu=core-avx2 -march=x86 < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+bmi2 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown -mcpu=core-avx2 | FileCheck %s
define i64 @f1(i32 %a, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: mulxl {{[0-9]+}}(%esp), %eax, %edx
+; CHECK-NEXT: retl
%x = zext i32 %a to i64
%y = zext i32 %b to i64
%r = mul i64 %x, %y
-; CHECK: f1
-; CHECK: mulxl
-; CHECK: ret
ret i64 %r
}
define i64 @f2(i32 %a, i32* %p) {
+; CHECK-LABEL: f2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: mulxl (%eax), %eax, %edx
+; CHECK-NEXT: retl
%b = load i32, i32* %p
%x = zext i32 %a to i64
%y = zext i32 %b to i64
%r = mul i64 %x, %y
-; CHECK: f2
-; CHECK: mulxl ({{.+}}), %{{.+}}, %{{.+}}
-; CHECK: ret
ret i64 %r
}
diff --git a/test/CodeGen/X86/mulx64.ll b/test/CodeGen/X86/mulx64.ll
index 808c02290b7c..7cc10e017fc6 100644
--- a/test/CodeGen/X86/mulx64.ll
+++ b/test/CodeGen/X86/mulx64.ll
@@ -1,22 +1,28 @@
-; RUN: llc -mcpu=core-avx2 -march=x86-64 < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+bmi2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=core-avx2 | FileCheck %s
define i128 @f1(i64 %a, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: # BB#0:
+; CHECK-NEXT: movq %rdi, %rdx
+; CHECK-NEXT: mulxq %rsi, %rax, %rdx
+; CHECK-NEXT: retq
%x = zext i64 %a to i128
%y = zext i64 %b to i128
%r = mul i128 %x, %y
-; CHECK: f1
-; CHECK: mulxq
-; CHECK: ret
ret i128 %r
}
define i128 @f2(i64 %a, i64* %p) {
+; CHECK-LABEL: f2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movq %rdi, %rdx
+; CHECK-NEXT: mulxq (%rsi), %rax, %rdx
+; CHECK-NEXT: retq
%b = load i64, i64* %p
%x = zext i64 %a to i128
%y = zext i64 %b to i128
%r = mul i128 %x, %y
-; CHECK: f2
-; CHECK: mulxq ({{.+}}), %{{.+}}, %{{.+}}
-; CHECK: ret
ret i128 %r
}
diff --git a/test/CodeGen/X86/neg_cmp.ll b/test/CodeGen/X86/neg_cmp.ll
index 79050720d8e7..cc82857706c0 100644
--- a/test/CodeGen/X86/neg_cmp.ll
+++ b/test/CodeGen/X86/neg_cmp.ll
@@ -1,22 +1,50 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; rdar://11245199
; PR12545
-define void @f(i32 %x, i32 %y) nounwind uwtable ssp {
-entry:
-; CHECK-LABEL: f:
-; CHECK-NOT: neg
-; CHECK: add
+
+declare void @g()
+
+define void @neg_cmp(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: neg_cmp:
+; CHECK: # BB#0:
+; CHECK-NEXT: addl %esi, %edi
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: jmp g # TAILCALL
+; CHECK-NEXT: .LBB0_1: # %if.end
+; CHECK-NEXT: retq
%sub = sub i32 0, %y
%cmp = icmp eq i32 %x, %sub
br i1 %cmp, label %if.then, label %if.end
-if.then: ; preds = %entry
+if.then:
tail call void @g() nounwind
br label %if.end
-if.end: ; preds = %if.then, %entry
+if.end:
+ ret void
+}
+
+define void @neg_cmp_commuted(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: neg_cmp_commuted:
+; CHECK: # BB#0:
+; CHECK-NEXT: addl %esi, %edi
+; CHECK-NEXT: jne .LBB1_1
+; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: jmp g # TAILCALL
+; CHECK-NEXT: .LBB1_1: # %if.end
+; CHECK-NEXT: retq
+ %sub = sub i32 0, %y
+ %cmp = icmp eq i32 %sub, %x
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @g() nounwind
+ br label %if.end
+
+if.end:
ret void
}
-declare void @g()
diff --git a/test/CodeGen/X86/negative-sin.ll b/test/CodeGen/X86/negative-sin.ll
index 16258f479402..bc38021b5620 100644
--- a/test/CodeGen/X86/negative-sin.ll
+++ b/test/CodeGen/X86/negative-sin.ll
@@ -101,5 +101,5 @@ define double @fn_attr(double %e) nounwind #0 {
ret double %h
}
-attributes #0 = { "unsafe-fp-math"="true" }
+attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" }
diff --git a/test/CodeGen/X86/nontemporal-2.ll b/test/CodeGen/X86/nontemporal-2.ll
index 92a35436d90d..d1bb8d3e923b 100644
--- a/test/CodeGen/X86/nontemporal-2.ll
+++ b/test/CodeGen/X86/nontemporal-2.ll
@@ -255,6 +255,7 @@ define void @test_zero_v8f32(<8 x float>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpxor %ymm0, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <8 x float> zeroinitializer, <8 x float>* %dst, align 32, !nontemporal !1
ret void
@@ -279,6 +280,7 @@ define void @test_zero_v8i32(<8 x i32>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpxor %ymm0, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <8 x i32> zeroinitializer, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
@@ -303,6 +305,7 @@ define void @test_zero_v4f64(<4 x double>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpxor %ymm0, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <4 x double> zeroinitializer, <4 x double>* %dst, align 32, !nontemporal !1
ret void
@@ -327,6 +330,7 @@ define void @test_zero_v4i64(<4 x i64>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpxor %ymm0, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <4 x i64> zeroinitializer, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
@@ -351,6 +355,7 @@ define void @test_zero_v16i16(<16 x i16>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpxor %ymm0, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <16 x i16> zeroinitializer, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
@@ -375,6 +380,7 @@ define void @test_zero_v32i8(<32 x i8>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpxor %ymm0, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <32 x i8> zeroinitializer, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
@@ -757,6 +763,7 @@ define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) {
; VLX-LABEL: test_arg_v8f32:
; VLX: # BB#0:
; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <8 x float> %arg, <8 x float>* %dst, align 32, !nontemporal !1
ret void
@@ -777,7 +784,8 @@ define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) {
;
; VLX-LABEL: test_arg_v8i32:
; VLX: # BB#0:
-; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <8 x i32> %arg, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
@@ -798,7 +806,8 @@ define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) {
;
; VLX-LABEL: test_arg_v4f64:
; VLX: # BB#0:
-; VLX-NEXT: vmovntpd %ymm0, (%rdi)
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <4 x double> %arg, <4 x double>* %dst, align 32, !nontemporal !1
ret void
@@ -819,7 +828,8 @@ define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) {
;
; VLX-LABEL: test_arg_v4i64:
; VLX: # BB#0:
-; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <4 x i64> %arg, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
@@ -840,7 +850,8 @@ define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) {
;
; VLX-LABEL: test_arg_v16i16:
; VLX: # BB#0:
-; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <16 x i16> %arg, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
@@ -861,7 +872,8 @@ define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) {
;
; VLX-LABEL: test_arg_v32i8:
; VLX: # BB#0:
-; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
store <32 x i8> %arg, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
@@ -1031,6 +1043,7 @@ define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = fadd <8 x float> %a, %b
store <8 x float> %r, <8 x float>* %dst, align 32, !nontemporal !1
@@ -1068,6 +1081,7 @@ define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = add <8 x i32> %a, %b
store <8 x i32> %r, <8 x i32>* %dst, align 32, !nontemporal !1
@@ -1094,6 +1108,7 @@ define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst)
; VLX: # BB#0:
; VLX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntpd %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = fadd <4 x double> %a, %b
store <4 x double> %r, <4 x double>* %dst, align 32, !nontemporal !1
@@ -1131,6 +1146,7 @@ define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = add <4 x i64> %a, %b
store <4 x i64> %r, <4 x i64>* %dst, align 32, !nontemporal !1
@@ -1168,6 +1184,7 @@ define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = add <16 x i16> %a, %b
store <16 x i16> %r, <16 x i16>* %dst, align 32, !nontemporal !1
@@ -1205,6 +1222,7 @@ define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) {
; VLX: # BB#0:
; VLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = add <32 x i8> %a, %b
store <32 x i8> %r, <32 x i8>* %dst, align 32, !nontemporal !1
@@ -1235,6 +1253,7 @@ define void @test_unaligned_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %
; VLX: # BB#0:
; VLX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; VLX-NEXT: vmovups %ymm0, (%rdi)
+; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
%r = fadd <8 x float> %a, %b
store <8 x float> %r, <8 x float>* %dst, align 16, !nontemporal !1
diff --git a/test/CodeGen/X86/nontemporal-loads.ll b/test/CodeGen/X86/nontemporal-loads.ll
index 53f4e8d04374..eaab26ef9547 100644
--- a/test/CodeGen/X86/nontemporal-loads.ll
+++ b/test/CodeGen/X86/nontemporal-loads.ll
@@ -752,7 +752,7 @@ define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
;
; AVX1-LABEL: test_arg_v8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm1
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
@@ -804,7 +804,7 @@ define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
;
; AVX1-LABEL: test_arg_v4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm1
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
@@ -835,7 +835,7 @@ define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
;
; AVX1-LABEL: test_arg_v16i16:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm1
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
@@ -866,7 +866,7 @@ define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
;
; AVX1-LABEL: test_arg_v32i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm1
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
@@ -925,8 +925,8 @@ define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
;
; AVX1-LABEL: test_arg_v16i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm2
-; AVX1-NEXT: vmovaps 32(%rdi), %ymm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm2
+; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
@@ -989,8 +989,8 @@ define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
;
; AVX1-LABEL: test_arg_v8i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm2
-; AVX1-NEXT: vmovaps 32(%rdi), %ymm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm2
+; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; AVX1-NEXT: vpaddq %xmm5, %xmm4, %xmm4
@@ -1029,8 +1029,8 @@ define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
;
; AVX1-LABEL: test_arg_v32i16:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm2
-; AVX1-NEXT: vmovaps 32(%rdi), %ymm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm2
+; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
@@ -1081,8 +1081,8 @@ define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
;
; AVX1-LABEL: test_arg_v64i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm2
-; AVX1-NEXT: vmovaps 32(%rdi), %ymm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm2
+; AVX1-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; AVX1-NEXT: vpaddb %xmm5, %xmm4, %xmm4
diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll
index 952db42842ef..d26cf02dd942 100644
--- a/test/CodeGen/X86/oddshuffles.ll
+++ b/test/CodeGen/X86/oddshuffles.ll
@@ -244,40 +244,34 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: movb %al, 6(%rdi)
-; SSE2-NEXT: movd %xmm1, (%rdi)
-; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: movd %xmm0, (%rdi)
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
; SSE2-NEXT: movw %ax, 4(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i8:
; SSE42: # BB#0:
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
+; SSE42-NEXT: pextrb $0, %xmm1, 6(%rdi)
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
-; SSE42-NEXT: pextrb $12, %xmm1, 6(%rdi)
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; SSE42-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE42-NEXT: pextrw $4, %xmm1, 4(%rdi)
-; SSE42-NEXT: movd %xmm0, (%rdi)
+; SSE42-NEXT: pextrw $2, %xmm1, 4(%rdi)
+; SSE42-NEXT: movd %xmm1, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v7i8:
; AVX: # BB#0:
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX-NEXT: vpextrb $12, %xmm0, 6(%rdi)
-; AVX-NEXT: vpextrw $4, %xmm1, 4(%rdi)
-; AVX-NEXT: vmovd %xmm2, (%rdi)
+; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5,6,7]
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpextrb $0, %xmm1, 6(%rdi)
+; AVX-NEXT: vpextrw $2, %xmm0, 4(%rdi)
+; AVX-NEXT: vmovd %xmm0, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <4 x i8> %a, <4 x i8> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
store <7 x i8> %r, <7 x i8>* %p
@@ -923,7 +917,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
; AVX1-LABEL: interleave_24i16_out:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm0
-; AVX1-NEXT: vmovups (%rdi), %ymm1
+; AVX1-NEXT: vmovdqu (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6],xmm2[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
@@ -1445,8 +1439,8 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; AVX1: # BB#0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-NEXT: vmovapd %ymm1, 32(%rdi)
-; AVX1-NEXT: vmovapd %ymm1, (%rdi)
+; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
+; AVX1-NEXT: vmovaps %ymm1, (%rdi)
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
diff --git a/test/CodeGen/X86/overflow.ll b/test/CodeGen/X86/overflow.ll
new file mode 100644
index 000000000000..ff25b5de4933
--- /dev/null
+++ b/test/CodeGen/X86/overflow.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+
+define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind {
+; X32-LABEL: mulhioverflow:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebp
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $16, %esp
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: movl 28(%ebp), %edi
+; X32-NEXT: movl %esp, %eax
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl 24(%ebp)
+; X32-NEXT: pushl 20(%ebp)
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl $0
+; X32-NEXT: pushl 16(%ebp)
+; X32-NEXT: pushl 12(%ebp)
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll __multi3
+; X32-NEXT: addl $32, %esp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: andl $1, %edi
+; X32-NEXT: xorl %ecx, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: sbbl %edx, %edx
+; X32-NEXT: andl $1, %edx
+; X32-NEXT: movl %edi, (%esi)
+; X32-NEXT: movl %eax, 4(%esi)
+; X32-NEXT: movl %ecx, 8(%esi)
+; X32-NEXT: movl %edx, 12(%esi)
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: leal -8(%ebp), %esp
+; X32-NEXT: popl %esi
+; X32-NEXT: popl %edi
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl $4
+;
+; X64-LABEL: mulhioverflow:
+; X64: # BB#0:
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: mulq %rsi
+; X64-NEXT: andl $1, %ecx
+; X64-NEXT: leaq (%rcx,%rdx), %rax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: retq
+ %1 = zext i64 %a to i128
+ %2 = zext i64 %b to i128
+ %3 = mul i128 %1, %2
+ %4 = lshr i128 %3, 64
+ %5 = and i64 %c, 1
+ %6 = zext i64 %5 to i128
+ %7 = add i128 %4, %6
+ ret i128 %7
+}
diff --git a/test/CodeGen/X86/peep-setb.ll b/test/CodeGen/X86/peep-setb.ll
index adae8acd0432..01e445a86221 100644
--- a/test/CodeGen/X86/peep-setb.ll
+++ b/test/CodeGen/X86/peep-setb.ll
@@ -1,82 +1,123 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
+
+; These tests use cmp+adc/sbb in place of test+set+add/sub. Should this transform
+; be enabled by micro-architecture rather than as part of generic lowering/isel?
define i8 @test1(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: adcb $0, %sil
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp ult i8 %a, %b
%cond = zext i1 %cmp to i8
%add = add i8 %cond, %b
ret i8 %add
-; CHECK-LABEL: test1:
-; CHECK: adcb $0
}
define i32 @test2(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: adcl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp ult i32 %a, %b
%cond = zext i1 %cmp to i32
%add = add i32 %cond, %b
ret i32 %add
-; CHECK-LABEL: test2:
-; CHECK: adcl $0
}
define i64 @test3(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: test3:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: adcq $0, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+; CHECK-NEXT: retq
%cmp = icmp ult i64 %a, %b
%conv = zext i1 %cmp to i64
%add = add i64 %conv, %b
ret i64 %add
-; CHECK-LABEL: test3:
-; CHECK: adcq $0
}
define i8 @test4(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: sbbb $0, %sil
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp ult i8 %a, %b
%cond = zext i1 %cmp to i8
%sub = sub i8 %b, %cond
ret i8 %sub
-; CHECK-LABEL: test4:
-; CHECK: sbbb $0
}
define i32 @test5(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: test5:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: sbbl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp ult i32 %a, %b
%cond = zext i1 %cmp to i32
%sub = sub i32 %b, %cond
ret i32 %sub
-; CHECK-LABEL: test5:
-; CHECK: sbbl $0
}
define i64 @test6(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: test6:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: sbbq $0, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+; CHECK-NEXT: retq
%cmp = icmp ult i64 %a, %b
%conv = zext i1 %cmp to i64
%sub = sub i64 %b, %conv
ret i64 %sub
-; CHECK-LABEL: test6:
-; CHECK: sbbq $0
}
define i8 @test7(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: test7:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: adcb $0, %sil
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp ult i8 %a, %b
%cond = sext i1 %cmp to i8
%sub = sub i8 %b, %cond
ret i8 %sub
-; CHECK-LABEL: test7:
-; CHECK: adcb $0
}
define i32 @test8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: test8:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: adcl $0, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
%cmp = icmp ult i32 %a, %b
%cond = sext i1 %cmp to i32
%sub = sub i32 %b, %cond
ret i32 %sub
-; CHECK-LABEL: test8:
-; CHECK: adcl $0
}
define i64 @test9(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: test9:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: adcq $0, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+; CHECK-NEXT: retq
%cmp = icmp ult i64 %a, %b
%conv = sext i1 %cmp to i64
%sub = sub i64 %b, %conv
ret i64 %sub
-; CHECK-LABEL: test9:
-; CHECK: adcq $0
}
+
diff --git a/test/CodeGen/X86/peep-test-4.ll b/test/CodeGen/X86/peep-test-4.ll
index 1ae621fb1f58..832262aba7e4 100644
--- a/test/CodeGen/X86/peep-test-4.ll
+++ b/test/CodeGen/X86/peep-test-4.ll
@@ -1,14 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+bmi,+bmi2,+popcnt,+lzcnt | FileCheck %s
declare void @foo(i32)
declare void @foo32(i32)
declare void @foo64(i64)
-; CHECK-LABEL: neg:
-; CHECK: negl %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @neg(i32 %x) nounwind {
+; CHECK-LABEL: neg:
+; CHECK: # BB#0:
+; CHECK-NEXT: negl %edi
+; CHECK-NEXT: je .LBB0_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB0_1: # %return
+; CHECK-NEXT: retq
%sub = sub i32 0, %x
%cmp = icmp eq i32 %sub, 0
br i1 %cmp, label %return, label %bb
@@ -21,12 +25,15 @@ return:
ret void
}
-; CHECK-LABEL: sar:
-; CHECK: sarl %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @sar(i32 %x) nounwind {
+; CHECK-LABEL: sar:
+; CHECK: # BB#0:
+; CHECK-NEXT: sarl %edi
+; CHECK-NEXT: je .LBB1_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB1_1: # %return
+; CHECK-NEXT: retq
%ashr = ashr i32 %x, 1
%cmp = icmp eq i32 %ashr, 0
br i1 %cmp, label %return, label %bb
@@ -39,12 +46,15 @@ return:
ret void
}
-; CHECK-LABEL: shr:
-; CHECK: shrl %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @shr(i32 %x) nounwind {
+; CHECK-LABEL: shr:
+; CHECK: # BB#0:
+; CHECK-NEXT: shrl %edi
+; CHECK-NEXT: je .LBB2_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB2_1: # %return
+; CHECK-NEXT: retq
%ashr = lshr i32 %x, 1
%cmp = icmp eq i32 %ashr, 0
br i1 %cmp, label %return, label %bb
@@ -57,12 +67,15 @@ return:
ret void
}
-; CHECK-LABEL: shri:
-; CHECK: shrl $3, %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @shri(i32 %x) nounwind {
+; CHECK-LABEL: shri:
+; CHECK: # BB#0:
+; CHECK-NEXT: shrl $3, %edi
+; CHECK-NEXT: je .LBB3_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB3_1: # %return
+; CHECK-NEXT: retq
%ashr = lshr i32 %x, 3
%cmp = icmp eq i32 %ashr, 0
br i1 %cmp, label %return, label %bb
@@ -75,12 +88,15 @@ return:
ret void
}
-; CHECK-LABEL: shl:
-; CHECK: addl %edi, %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @shl(i32 %x) nounwind {
+; CHECK-LABEL: shl:
+; CHECK: # BB#0:
+; CHECK-NEXT: addl %edi, %edi
+; CHECK-NEXT: je .LBB4_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB4_1: # %return
+; CHECK-NEXT: retq
%shl = shl i32 %x, 1
%cmp = icmp eq i32 %shl, 0
br i1 %cmp, label %return, label %bb
@@ -93,12 +109,15 @@ return:
ret void
}
-; CHECK-LABEL: shli:
-; CHECK: shll $4, %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @shli(i32 %x) nounwind {
+; CHECK-LABEL: shli:
+; CHECK: # BB#0:
+; CHECK-NEXT: shll $4, %edi
+; CHECK-NEXT: je .LBB5_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB5_1: # %return
+; CHECK-NEXT: retq
%shl = shl i32 %x, 4
%cmp = icmp eq i32 %shl, 0
br i1 %cmp, label %return, label %bb
@@ -111,35 +130,40 @@ return:
ret void
}
-; CHECK-LABEL: adc:
-; CHECK: movabsq $-9223372036854775808, %rax
-; CHECK-NEXT: addq %rdi, %rax
-; CHECK-NEXT: adcq $0, %rsi
-; CHECK-NEXT: sete %al
-; CHECK: ret
define zeroext i1 @adc(i128 %x) nounwind {
+; CHECK-LABEL: adc:
+; CHECK: # BB#0:
+; CHECK-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
+; CHECK-NEXT: addq %rdi, %rax
+; CHECK-NEXT: adcq $0, %rsi
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
%add = add i128 %x, 9223372036854775808
%cmp = icmp ult i128 %add, 18446744073709551616
ret i1 %cmp
}
-; CHECK-LABEL: sbb:
-; CHECK: cmpq %rdx, %rdi
-; CHECK-NEXT: sbbq %rcx, %rsi
-; CHECK-NEXT: setns %al
-; CHECK: ret
define zeroext i1 @sbb(i128 %x, i128 %y) nounwind {
+; CHECK-LABEL: sbb:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: sbbq %rcx, %rsi
+; CHECK-NEXT: setns %al
+; CHECK-NEXT: retq
%sub = sub i128 %x, %y
%cmp = icmp sge i128 %sub, 0
ret i1 %cmp
}
-; CHECK-LABEL: andn:
-; CHECK: andnl %esi, %edi, %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
define void @andn(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: andn:
+; CHECK: # BB#0:
+; CHECK-NEXT: andnl %esi, %edi, %edi
+; CHECK-NEXT: je .LBB8_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB8_1: # %return
+; CHECK-NEXT: retq
%not = xor i32 %x, -1
%andn = and i32 %y, %not
%cmp = icmp eq i32 %andn, 0
@@ -153,13 +177,16 @@ return:
ret void
}
-; CHECK-LABEL: bextr:
-; CHECK: bextrl %esi, %edi, %edi
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
declare i32 @llvm.x86.bmi.bextr.32(i32, i32) nounwind readnone
define void @bextr(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: bextr:
+; CHECK: # BB#0:
+; CHECK-NEXT: bextrl %esi, %edi, %edi
+; CHECK-NEXT: je .LBB9_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB9_1: # %return
+; CHECK-NEXT: retq
%bextr = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 %y)
%cmp = icmp eq i32 %bextr, 0
br i1 %cmp, label %return, label %bb
@@ -172,43 +199,54 @@ return:
ret void
}
-; CHECK-LABEL: popcnt:
-; CHECK: popcntl
-; CHECK-NEXT: je
-; CHECK: jmp foo
-; CHECK: ret
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
define void @popcnt(i32 %x) nounwind {
+; CHECK-LABEL: popcnt:
+; CHECK: # BB#0:
+; CHECK-NEXT: popcntl %edi, %edi
+; CHECK-NEXT: je .LBB10_1
+; CHECK-NEXT: # BB#2: # %bb
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB10_1: # %return
+; CHECK-NEXT: retq
%popcnt = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp eq i32 %popcnt, 0
br i1 %cmp, label %return, label %bb
-;
bb:
tail call void @foo(i32 %popcnt)
br label %return
-;
return:
ret void
}
-; CHECK-LABEL: testCTZ
-; CHECK: tzcntq
-; CHECK-NOT: test
-; CHECK: cmovaeq
declare i64 @llvm.cttz.i64(i64, i1)
define i64 @testCTZ(i64 %v) nounwind {
+; CHECK-LABEL: testCTZ:
+; CHECK: # BB#0:
+; CHECK-NEXT: tzcntq %rdi, %rcx
+; CHECK-NEXT: movl $255, %eax
+; CHECK-NEXT: cmovaeq %rcx, %rax
+; CHECK-NEXT: retq
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 %v, 0
%cond = select i1 %tobool, i64 255, i64 %cnt
ret i64 %cond
}
-; CHECK-LABEL: testCTZ2
-; CHECK: tzcntl
-; CHECK-NEXT: jb
-; CHECK: jmp foo
declare i32 @llvm.cttz.i32(i32, i1)
define void @testCTZ2(i32 %v) nounwind {
+; CHECK-LABEL: testCTZ2:
+; CHECK: # BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: tzcntl %edi, %ebx
+; CHECK-NEXT: jb .LBB12_2
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: movl %ebx, %edi
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: .LBB12_2: # %return
+; CHECK-NEXT: movl %ebx, %edi
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: jmp foo32 # TAILCALL
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%cmp = icmp eq i32 %v, 0
br i1 %cmp, label %return, label %bb
@@ -222,11 +260,19 @@ return:
ret void
}
-; CHECK-LABEL: testCTZ3
-; CHECK: tzcntl
-; CHECK-NEXT: jae
-; CHECK: jmp foo
define void @testCTZ3(i32 %v) nounwind {
+; CHECK-LABEL: testCTZ3:
+; CHECK: # BB#0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: tzcntl %edi, %ebx
+; CHECK-NEXT: jae .LBB13_2
+; CHECK-NEXT: # BB#1: # %bb
+; CHECK-NEXT: movl %ebx, %edi
+; CHECK-NEXT: callq foo
+; CHECK-NEXT: .LBB13_2: # %return
+; CHECK-NEXT: movl %ebx, %edi
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: jmp foo32 # TAILCALL
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%cmp = icmp ne i32 %v, 0
br i1 %cmp, label %return, label %bb
@@ -240,24 +286,28 @@ return:
ret void
}
-; CHECK-LABEL: testCLZ
-; CHECK: lzcntq
-; CHECK-NOT: test
-; CHECK: cmovaeq
declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @testCLZ(i64 %v) nounwind {
+; CHECK-LABEL: testCLZ:
+; CHECK: # BB#0:
+; CHECK-NEXT: lzcntq %rdi, %rcx
+; CHECK-NEXT: movl $255, %eax
+; CHECK-NEXT: cmovaeq %rcx, %rax
+; CHECK-NEXT: retq
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp ne i64 %v, 0
%cond = select i1 %tobool, i64 %cnt, i64 255
ret i64 %cond
}
-; CHECK-LABEL: testPOPCNT
-; CHECK: popcntq
-; CHECK-NOT: test
-; CHECK: cmovneq
declare i64 @llvm.ctpop.i64(i64)
define i64 @testPOPCNT(i64 %v) nounwind {
+; CHECK-LABEL: testPOPCNT:
+; CHECK: # BB#0:
+; CHECK-NEXT: popcntq %rdi, %rcx
+; CHECK-NEXT: movl $255, %eax
+; CHECK-NEXT: cmovneq %rcx, %rax
+; CHECK-NEXT: retq
%cnt = tail call i64 @llvm.ctpop.i64(i64 %v)
%tobool = icmp ne i64 %v, 0
%cond = select i1 %tobool, i64 %cnt, i64 255
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 7d9ef28a090f..88cb7a6d5825 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -8,19 +8,18 @@
define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE2-LABEL: mul_v16i8c:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
+; SSE2-NEXT: pmullw %xmm2, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: pmullw %xmm1, %xmm0
+; SSE2-NEXT: pmullw %xmm2, %xmm0
; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v16i8c:
@@ -56,6 +55,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v16i8c:
@@ -64,6 +64,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
%A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
@@ -196,6 +197,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_v16i8:
@@ -205,6 +207,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
%A = mul <16 x i8> %i, %j
@@ -382,29 +385,28 @@ entry:
define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-LABEL: mul_v32i8c:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
+; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: pmullw %xmm2, %xmm0
+; SSE2-NEXT: pmullw %xmm3, %xmm0
; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm2, %xmm1
+; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v32i8c:
@@ -771,11 +773,10 @@ entry:
define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-LABEL: mul_v64i8c:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
-; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm5, %xmm6
@@ -1160,35 +1161,26 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
;
; SSE41-LABEL: mul_v4i64_zero_upper:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE41-NEXT: pmuludq %xmm0, %xmm1
-; SSE41-NEXT: pmuludq %xmm4, %xmm2
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
-; SSE41-NEXT: movaps %xmm2, %xmm0
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pmuludq %xmm3, %xmm0
+; SSE41-NEXT: pmuludq %xmm2, %xmm4
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
; SSE41-NEXT: retq
;
-; AVX2-LABEL: mul_v4i64_zero_upper:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mul_v4i64_zero_upper:
-; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; AVX512-NEXT: retq
+; AVX-LABEL: mul_v4i64_zero_upper:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
entry:
%val1a = zext <4 x i32> %val1 to <4 x i64>
%val2a = zext <4 x i32> %val2 to <4 x i64>
@@ -1222,48 +1214,36 @@ define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
;
; SSE41-LABEL: mul_v4i64_zero_upper_left:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: pmuludq %xmm0, %xmm2
-; SSE41-NEXT: psllq $32, %xmm2
-; SSE41-NEXT: paddq %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: pmuludq %xmm1, %xmm0
; SSE41-NEXT: psrlq $32, %xmm1
; SSE41-NEXT: pmuludq %xmm4, %xmm1
; SSE41-NEXT: psllq $32, %xmm1
; SSE41-NEXT: paddq %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm1
+; SSE41-NEXT: pmuludq %xmm2, %xmm1
+; SSE41-NEXT: psrlq $32, %xmm2
+; SSE41-NEXT: pmuludq %xmm3, %xmm2
+; SSE41-NEXT: psllq $32, %xmm2
+; SSE41-NEXT: paddq %xmm1, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
-; AVX2-LABEL: mul_v4i64_zero_upper_left:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm1
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mul_v4i64_zero_upper_left:
-; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpsrlq $32, %ymm1, %ymm1
-; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; AVX512-NEXT: retq
+; AVX-LABEL: mul_v4i64_zero_upper_left:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
+; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
entry:
%val1a = zext <4 x i32> %val1 to <4 x i64>
%res64 = mul <4 x i64> %val1a, %val2
@@ -1291,39 +1271,28 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
;
; SSE41-LABEL: mul_v4i64_zero_lower:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: psrlq $32, %xmm1
+; SSE41-NEXT: pmuludq %xmm1, %xmm0
+; SSE41-NEXT: psllq $32, %xmm0
; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: pmuludq %xmm0, %xmm2
+; SSE41-NEXT: pmuludq %xmm3, %xmm2
; SSE41-NEXT: psllq $32, %xmm2
-; SSE41-NEXT: psrlq $32, %xmm1
-; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm2[1,3]
-; SSE41-NEXT: movaps %xmm3, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
-; AVX2-LABEL: mul_v4i64_zero_lower:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm1
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mul_v4i64_zero_lower:
-; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vpsrlq $32, %ymm1, %ymm1
-; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; AVX512-NEXT: retq
+; AVX-LABEL: mul_v4i64_zero_lower:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
entry:
%val1a = zext <4 x i32> %val1 to <4 x i64>
%val2a = and <4 x i64> %val2, <i64 -4294967296, i64 -4294967296, i64 -4294967296, i64 -4294967296>
@@ -1361,23 +1330,24 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
;
; SSE41-LABEL: mul_v8i64_zero_upper:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pxor %xmm6, %xmm6
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm4[0],zero,xmm4[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm3[0],zero,xmm3[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: pmuludq %xmm0, %xmm2
-; SSE41-NEXT: pmuludq %xmm7, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
+; SSE41-NEXT: pmuludq %xmm7, %xmm1
+; SSE41-NEXT: pmuludq %xmm6, %xmm2
+; SSE41-NEXT: pmuludq %xmm5, %xmm0
; SSE41-NEXT: pmuludq %xmm8, %xmm4
-; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
-; SSE41-NEXT: movaps %xmm4, %xmm0
-; SSE41-NEXT: movaps %xmm5, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
; AVX2-LABEL: mul_v8i64_zero_upper:
diff --git a/test/CodeGen/X86/pointer-vector.ll b/test/CodeGen/X86/pointer-vector.ll
index 9a0271aa7f00..d5297b9c70ce 100644
--- a/test/CodeGen/X86/pointer-vector.ll
+++ b/test/CodeGen/X86/pointer-vector.ll
@@ -133,7 +133,7 @@ define <4 x i32> @ICMP0(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-NEXT: movdqa (%ecx), %xmm0
; CHECK-NEXT: pcmpgtd (%eax), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6]
-; CHECK-NEXT: blendvps {{\.LCPI.*}}, %xmm1
+; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retl
entry:
@@ -152,7 +152,7 @@ define <4 x i32> @ICMP1(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-NEXT: movdqa (%ecx), %xmm0
; CHECK-NEXT: pcmpeqd (%eax), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6]
-; CHECK-NEXT: blendvps {{\.LCPI.*}}, %xmm1
+; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retl
entry:
diff --git a/test/CodeGen/X86/pr11334.ll b/test/CodeGen/X86/pr11334.ll
index 7d3d7aaac82b..8a154653414a 100644
--- a/test/CodeGen/X86/pr11334.ll
+++ b/test/CodeGen/X86/pr11334.ll
@@ -85,15 +85,15 @@ entry:
define void @test_vector_creation() nounwind {
; SSE-LABEL: test_vector_creation:
; SSE: # BB#0:
-; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, (%rax)
+; SSE-NEXT: xorpd %xmm0, %xmm0
+; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; SSE-NEXT: movapd %xmm0, (%rax)
; SSE-NEXT: retq
;
; AVX-LABEL: test_vector_creation:
; AVX: # BB#0:
-; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX-NEXT: vmovaps %ymm0, (%rax)
; AVX-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/pr12312.ll b/test/CodeGen/X86/pr12312.ll
index 81aaf91f2688..6575d2a73d9c 100644
--- a/test/CodeGen/X86/pr12312.ll
+++ b/test/CodeGen/X86/pr12312.ll
@@ -1,155 +1,243 @@
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1,-avx < %s | FileCheck %s --check-prefix SSE41
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx,-avx2 < %s | FileCheck %s --check-prefix AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1,-avx < %s | FileCheck %s --check-prefix=SSE41
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx,-avx2 < %s | FileCheck %s --check-prefix=AVX
define i32 @veccond128(<4 x i32> %input) {
+; SSE41-LABEL: veccond128:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: ptest %xmm0, %xmm0
+; SSE41-NEXT: je .LBB0_2
+; SSE41-NEXT: # BB#1: # %if-true-block
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: retq
+; SSE41-NEXT: .LBB0_2: # %endif-block
+; SSE41-NEXT: movl $1, %eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: veccond128:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vptest %xmm0, %xmm0
+; AVX-NEXT: je .LBB0_2
+; AVX-NEXT: # BB#1: # %if-true-block
+; AVX-NEXT: xorl %eax, %eax
+; AVX-NEXT: retq
+; AVX-NEXT: .LBB0_2: # %endif-block
+; AVX-NEXT: movl $1, %eax
+; AVX-NEXT: retq
entry:
%0 = bitcast <4 x i32> %input to i128
%1 = icmp ne i128 %0, 0
br i1 %1, label %if-true-block, label %endif-block
-
-if-true-block: ; preds = %entry
+if-true-block:
ret i32 0
-endif-block: ; preds = %entry,
+endif-block:
ret i32 1
-; SSE41: veccond128
-; SSE41: ptest
-; SSE41: ret
-; AVX: veccond128
-; AVX: vptest %xmm{{.*}}, %xmm{{.*}}
-; AVX: ret
}
define i32 @veccond256(<8 x i32> %input) {
+; SSE41-LABEL: veccond256:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: ptest %xmm0, %xmm0
+; SSE41-NEXT: je .LBB1_2
+; SSE41-NEXT: # BB#1: # %if-true-block
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: retq
+; SSE41-NEXT: .LBB1_2: # %endif-block
+; SSE41-NEXT: movl $1, %eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: veccond256:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vptest %ymm0, %ymm0
+; AVX-NEXT: je .LBB1_2
+; AVX-NEXT: # BB#1: # %if-true-block
+; AVX-NEXT: xorl %eax, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+; AVX-NEXT: .LBB1_2: # %endif-block
+; AVX-NEXT: movl $1, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
entry:
%0 = bitcast <8 x i32> %input to i256
%1 = icmp ne i256 %0, 0
br i1 %1, label %if-true-block, label %endif-block
-
-if-true-block: ; preds = %entry
+if-true-block:
ret i32 0
-endif-block: ; preds = %entry,
+endif-block:
ret i32 1
-; SSE41: veccond256
-; SSE41: por
-; SSE41: ptest
-; SSE41: ret
-; AVX: veccond256
-; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
-; AVX: ret
}
define i32 @veccond512(<16 x i32> %input) {
+; SSE41-LABEL: veccond512:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: por %xmm3, %xmm1
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm1
+; SSE41-NEXT: je .LBB2_2
+; SSE41-NEXT: # BB#1: # %if-true-block
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: retq
+; SSE41-NEXT: .LBB2_2: # %endif-block
+; SSE41-NEXT: movl $1, %eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: veccond512:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vptest %ymm0, %ymm0
+; AVX-NEXT: je .LBB2_2
+; AVX-NEXT: # BB#1: # %if-true-block
+; AVX-NEXT: xorl %eax, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+; AVX-NEXT: .LBB2_2: # %endif-block
+; AVX-NEXT: movl $1, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
entry:
%0 = bitcast <16 x i32> %input to i512
%1 = icmp ne i512 %0, 0
br i1 %1, label %if-true-block, label %endif-block
-
-if-true-block: ; preds = %entry
+if-true-block:
ret i32 0
-endif-block: ; preds = %entry,
+endif-block:
ret i32 1
-; SSE41: veccond512
-; SSE41: por
-; SSE41: por
-; SSE41: por
-; SSE41: ptest
-; SSE41: ret
-; AVX: veccond512
-; AVX: vorps
-; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
-; AVX: ret
}
define i32 @vectest128(<4 x i32> %input) {
-entry:
- %0 = bitcast <4 x i32> %input to i128
- %1 = icmp ne i128 %0, 0
- %2 = zext i1 %1 to i32
- ret i32 %2
-; SSE41: vectest128
-; SSE41: ptest
-; SSE41: ret
-; AVX: vectest128
-; AVX: vptest %xmm{{.*}}, %xmm{{.*}}
-; AVX: ret
+; SSE41-LABEL: vectest128:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: ptest %xmm0, %xmm0
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: vectest128:
+; AVX: # BB#0:
+; AVX-NEXT: xorl %eax, %eax
+; AVX-NEXT: vptest %xmm0, %xmm0
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+ %t0 = bitcast <4 x i32> %input to i128
+ %t1 = icmp ne i128 %t0, 0
+ %t2 = zext i1 %t1 to i32
+ ret i32 %t2
}
define i32 @vectest256(<8 x i32> %input) {
-entry:
- %0 = bitcast <8 x i32> %input to i256
- %1 = icmp ne i256 %0, 0
- %2 = zext i1 %1 to i32
- ret i32 %2
-; SSE41: vectest256
-; SSE41: por
-; SSE41: ptest
-; SSE41: ret
-; AVX: vectest256
-; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
-; AVX: ret
+; SSE41-LABEL: vectest256:
+; SSE41: # BB#0:
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: ptest %xmm0, %xmm0
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: vectest256:
+; AVX: # BB#0:
+; AVX-NEXT: xorl %eax, %eax
+; AVX-NEXT: vptest %ymm0, %ymm0
+; AVX-NEXT: setne %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %t0 = bitcast <8 x i32> %input to i256
+ %t1 = icmp ne i256 %t0, 0
+ %t2 = zext i1 %t1 to i32
+ ret i32 %t2
}
define i32 @vectest512(<16 x i32> %input) {
-entry:
- %0 = bitcast <16 x i32> %input to i512
- %1 = icmp ne i512 %0, 0
- %2 = zext i1 %1 to i32
- ret i32 %2
-; SSE41: vectest512
-; SSE41: por
-; SSE41: por
-; SSE41: por
-; SSE41: ptest
-; SSE41: ret
-; AVX: vectest512
-; AVX: vorps
-; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
-; AVX: ret
+; SSE41-LABEL: vectest512:
+; SSE41: # BB#0:
+; SSE41-NEXT: por %xmm3, %xmm1
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: ptest %xmm1, %xmm1
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: vectest512:
+; AVX: # BB#0:
+; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: xorl %eax, %eax
+; AVX-NEXT: vptest %ymm0, %ymm0
+; AVX-NEXT: setne %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %t0 = bitcast <16 x i32> %input to i512
+ %t1 = icmp ne i512 %t0, 0
+ %t2 = zext i1 %t1 to i32
+ ret i32 %t2
}
define i32 @vecsel128(<4 x i32> %input, i32 %a, i32 %b) {
-entry:
- %0 = bitcast <4 x i32> %input to i128
- %1 = icmp ne i128 %0, 0
- %2 = select i1 %1, i32 %a, i32 %b
- ret i32 %2
-; SSE41: vecsel128
-; SSE41: ptest
-; SSE41: ret
-; AVX: vecsel128
-; AVX: vptest %xmm{{.*}}, %xmm{{.*}}
-; AVX: ret
+; SSE41-LABEL: vecsel128:
+; SSE41: # BB#0:
+; SSE41-NEXT: ptest %xmm0, %xmm0
+; SSE41-NEXT: cmovel %esi, %edi
+; SSE41-NEXT: movl %edi, %eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: vecsel128:
+; AVX: # BB#0:
+; AVX-NEXT: vptest %xmm0, %xmm0
+; AVX-NEXT: cmovel %esi, %edi
+; AVX-NEXT: movl %edi, %eax
+; AVX-NEXT: retq
+ %t0 = bitcast <4 x i32> %input to i128
+ %t1 = icmp ne i128 %t0, 0
+ %t2 = select i1 %t1, i32 %a, i32 %b
+ ret i32 %t2
}
define i32 @vecsel256(<8 x i32> %input, i32 %a, i32 %b) {
-entry:
- %0 = bitcast <8 x i32> %input to i256
- %1 = icmp ne i256 %0, 0
- %2 = select i1 %1, i32 %a, i32 %b
- ret i32 %2
-; SSE41: vecsel256
-; SSE41: por
-; SSE41: ptest
-; SSE41: ret
-; AVX: vecsel256
-; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
-; AVX: ret
+; SSE41-LABEL: vecsel256:
+; SSE41: # BB#0:
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: ptest %xmm0, %xmm0
+; SSE41-NEXT: cmovel %esi, %edi
+; SSE41-NEXT: movl %edi, %eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: vecsel256:
+; AVX: # BB#0:
+; AVX-NEXT: vptest %ymm0, %ymm0
+; AVX-NEXT: cmovel %esi, %edi
+; AVX-NEXT: movl %edi, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %t0 = bitcast <8 x i32> %input to i256
+ %t1 = icmp ne i256 %t0, 0
+ %t2 = select i1 %t1, i32 %a, i32 %b
+ ret i32 %t2
}
define i32 @vecsel512(<16 x i32> %input, i32 %a, i32 %b) {
-entry:
- %0 = bitcast <16 x i32> %input to i512
- %1 = icmp ne i512 %0, 0
- %2 = select i1 %1, i32 %a, i32 %b
- ret i32 %2
-; SSE41: vecsel512
-; SSE41: por
-; SSE41: por
-; SSE41: por
-; SSE41: ptest
-; SSE41: ret
-; AVX: vecsel512
-; AVX: vorps
-; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
-; AVX: ret
+; SSE41-LABEL: vecsel512:
+; SSE41: # BB#0:
+; SSE41-NEXT: por %xmm3, %xmm1
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm1
+; SSE41-NEXT: cmovel %esi, %edi
+; SSE41-NEXT: movl %edi, %eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: vecsel512:
+; AVX: # BB#0:
+; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vptest %ymm0, %ymm0
+; AVX-NEXT: cmovel %esi, %edi
+; AVX-NEXT: movl %edi, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %t0 = bitcast <16 x i32> %input to i512
+ %t1 = icmp ne i512 %t0, 0
+ %t2 = select i1 %t1, i32 %a, i32 %b
+ ret i32 %t2
}
+
diff --git a/test/CodeGen/X86/pr14204.ll b/test/CodeGen/X86/pr14204.ll
index 3b60ad885d55..ab467d6ad96d 100644
--- a/test/CodeGen/X86/pr14204.ll
+++ b/test/CodeGen/X86/pr14204.ll
@@ -1,9 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s
-; FIXME: vpmovsxwd should be generated instead of vpmovzxwd followed by
-; SLL/SRA.
-
define <8 x i32> @foo(<8 x i1> %bar) nounwind readnone {
; CHECK-LABEL: foo:
; CHECK: # BB#0:
@@ -11,7 +8,6 @@ define <8 x i32> @foo(<8 x i1> %bar) nounwind readnone {
; CHECK-NEXT: vpslld $31, %ymm0, %ymm0
; CHECK-NEXT: vpsrad $31, %ymm0, %ymm0
; CHECK-NEXT: retq
-;
%s = sext <8 x i1> %bar to <8 x i32>
ret <8 x i32> %s
}
diff --git a/test/CodeGen/X86/pr14314.ll b/test/CodeGen/X86/pr14314.ll
index 0832702244e5..10733a476995 100644
--- a/test/CodeGen/X86/pr14314.ll
+++ b/test/CodeGen/X86/pr14314.ll
@@ -1,13 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 | FileCheck %s
define i64 @atomicSub(i64* %a, i64 %b) nounwind {
+; CHECK-LABEL: atomicSub:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; CHECK-NEXT: movl (%ebp), %eax
+; CHECK-NEXT: movl 4(%ebp), %edx
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ebx
+; CHECK-NEXT: subl %edi, %ebx
+; CHECK-NEXT: movl %edx, %ecx
+; CHECK-NEXT: sbbl %esi, %ecx
+; CHECK-NEXT: lock cmpxchg8b (%ebp)
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # BB#2: # %atomicrmw.end
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: retl
entry:
%0 = atomicrmw sub i64* %a, i64 %b seq_cst
ret i64 %0
-; CHECK: atomicSub
-; CHECK: movl %eax, %ebx
-; CHECK: subl {{%[a-z]+}}, %ebx
-; CHECK: movl %edx, %ecx
-; CHECK: sbbl {{%[a-z]+}}, %ecx
-; CHECK: ret
}
diff --git a/test/CodeGen/X86/pr16031.ll b/test/CodeGen/X86/pr16031.ll
index dc16fd9671ad..01bc38a243a5 100644
--- a/test/CodeGen/X86/pr16031.ll
+++ b/test/CodeGen/X86/pr16031.ll
@@ -1,20 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=corei7-avx -enable-misched=false | FileCheck %s
-; CHECK-LABEL: main:
-; CHECK: pushl %esi
-; CHECK-NEXT: testb $1, 8(%esp)
-; CHECK-NEXT: movl $-12, %eax
-; CHECK-NEXT: movl $-1, %edx
-; CHECK-NEXT: cmovel %edx, %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movl %eax, %esi
-; CHECK-NEXT: addl $-1, %esi
-; CHECK-NEXT: movl $-1, %esi
-; CHECK-NEXT: adcl $-1, %esi
-; CHECK-NEXT: cmovsl %ecx, %eax
-; CHECK-NEXT: cmovsl %ecx, %edx
-; CHECK-NEXT: popl %esi
define i64 @main(i1 %tobool1) nounwind {
+; CHECK-LABEL: main:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl $-12, %eax
+; CHECK-NEXT: movl $-1, %edx
+; CHECK-NEXT: cmovel %edx, %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: movl %eax, %esi
+; CHECK-NEXT: addl $-1, %esi
+; CHECK-NEXT: movl $-1, %esi
+; CHECK-NEXT: adcl $-1, %esi
+; CHECK-NEXT: cmovsl %ecx, %eax
+; CHECK-NEXT: cmovsl %ecx, %edx
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: retl
entry:
%0 = zext i1 %tobool1 to i32
%. = xor i32 %0, 1
diff --git a/test/CodeGen/X86/pr17764.ll b/test/CodeGen/X86/pr17764.ll
index a44248ff3f59..ccfdb5b58344 100644
--- a/test/CodeGen/X86/pr17764.ll
+++ b/test/CodeGen/X86/pr17764.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s
define <16 x i16> @foo(<16 x i1> %mask, <16 x i16> %x, <16 x i16> %y) {
@@ -9,7 +9,6 @@ define <16 x i16> @foo(<16 x i1> %mask, <16 x i16> %x, <16 x i16> %y) {
; CHECK-NEXT: vpsraw $15, %ymm0, %ymm0
; CHECK-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; CHECK-NEXT: retq
-;
%ret = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %y
ret <16 x i16> %ret
}
diff --git a/test/CodeGen/X86/pr18014.ll b/test/CodeGen/X86/pr18014.ll
index dc9d53fff173..bb3b9c23f1e3 100644
--- a/test/CodeGen/X86/pr18014.ll
+++ b/test/CodeGen/X86/pr18014.ll
@@ -9,7 +9,7 @@ define <4 x i32> @foo(<4 x i32>* %p, <4 x i1> %cond, <4 x i32> %v1, <4 x i32> %v
; CHECK: # BB#0:
; CHECK-NEXT: pslld $31, %xmm0
; CHECK-NEXT: psrad $31, %xmm0
-; CHECK-NEXT: blendvps %xmm1, %xmm2
+; CHECK-NEXT: blendvps %xmm0, %xmm1, %xmm2
; CHECK-NEXT: paddd %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm2, (%rdi)
; CHECK-NEXT: movdqa %xmm1, %xmm0
diff --git a/test/CodeGen/X86/pr18023.ll b/test/CodeGen/X86/pr18023.ll
deleted file mode 100644
index c7ea20c281ba..000000000000
--- a/test/CodeGen/X86/pr18023.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -mtriple x86_64-apple-macosx10.9.0 | FileCheck %s
-; PR18023
-
-; CHECK: movabsq $4294967296, %rcx
-; CHECK: movq %rcx, (%rax)
-; CHECK: movl $1, 4(%rax)
-; CHECK: movl $0, 4(%rax)
-; CHECK: movq $1, 4(%rax)
-
-@c = common global i32 0, align 4
-@a = common global [3 x i32] zeroinitializer, align 4
-@b = common global i32 0, align 4
-@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
-
-define void @func() {
- store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4
- store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 0), align 4
- %1 = load volatile i32, i32* @b, align 4
- store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4
- store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4
- %2 = load volatile i32, i32* @b, align 4
- store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4
- store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 2), align 4
- %3 = load volatile i32, i32* @b, align 4
- store i32 3, i32* @c, align 4
- %4 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4)
- ret void
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/test/CodeGen/X86/pr18344.ll b/test/CodeGen/X86/pr18344.ll
new file mode 100644
index 000000000000..15bf91031ee8
--- /dev/null
+++ b/test/CodeGen/X86/pr18344.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64
+
+%v4_varying_complex = type { <4 x float>, <4 x float> }
+
+define void @FFT(%v4_varying_complex* noalias nocapture %destination, float* noalias %re, <4 x i32>* noalias nocapture %ptr_cast_for_load) nounwind {
+; X86-LABEL: FFT:
+; X86: # BB#0: # %begin
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movdqu (%edx), %xmm0
+; X86-NEXT: pslld $4, %xmm0
+; X86-NEXT: movd %xmm0, %edx
+; X86-NEXT: pextrd $1, %xmm0, %esi
+; X86-NEXT: pextrd $2, %xmm0, %edi
+; X86-NEXT: pextrd $3, %xmm0, %ebx
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X86-NEXT: movss %xmm0, 128(%eax)
+; X86-NEXT: movss %xmm1, 164(%eax)
+; X86-NEXT: movss %xmm2, 200(%eax)
+; X86-NEXT: movss %xmm3, 236(%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X64-LABEL: FFT:
+; X64: # BB#0: # %begin
+; X64-NEXT: movdqu (%rdx), %xmm0
+; X64-NEXT: pslld $4, %xmm0
+; X64-NEXT: movd %xmm0, %rax
+; X64-NEXT: movslq %eax, %r8
+; X64-NEXT: sarq $32, %rax
+; X64-NEXT: pextrq $1, %xmm0, %rdx
+; X64-NEXT: movslq %edx, %rcx
+; X64-NEXT: sarq $32, %rdx
+; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X64-NEXT: movss %xmm0, 128(%rdi)
+; X64-NEXT: movss %xmm1, 164(%rdi)
+; X64-NEXT: movss %xmm2, 200(%rdi)
+; X64-NEXT: movss %xmm3, 236(%rdi)
+; X64-NEXT: retq
+begin:
+ %ptr_masked_load79 = load <4 x i32>, <4 x i32>* %ptr_cast_for_load, align 4
+ %mul__bitReversedProgramIndex_load = shl <4 x i32> %ptr_masked_load79, <i32 4, i32 4, i32 4, i32 4>
+
+ %offset32_1 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 0
+ %ptroffset_1 = sext i32 %offset32_1 to i64
+ %offset32_2 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 1
+ %ptroffset_2 = sext i32 %offset32_2 to i64
+ %offset32_3 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 2
+ %ptroffset_3 = sext i32 %offset32_3 to i64
+ %offset32_4 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 3
+ %ptroffset_4 = sext i32 %offset32_4 to i64
+
+ %ptrcast_1 = getelementptr float, float* %re, i64 %ptroffset_1
+ %val_1 = load float, float* %ptrcast_1, align 4
+ %ptrcast_2 = getelementptr float, float* %re, i64 %ptroffset_2
+ %val_2 = load float, float* %ptrcast_2, align 4
+ %ptrcast_3 = getelementptr float, float* %re, i64 %ptroffset_3
+ %val_3 = load float, float* %ptrcast_3, align 4
+ %ptrcast_4 = getelementptr float, float* %re, i64 %ptroffset_4
+ %val_4 = load float, float* %ptrcast_4, align 4
+
+ %destination_load_ptr2int_2void = bitcast %v4_varying_complex* %destination to i8*
+ %ptrcast1_1 = getelementptr inbounds %v4_varying_complex, %v4_varying_complex* %destination, i64 4, i32 0, i64 0
+ store float %val_1, float* %ptrcast1_1, align 4
+ %finalptr_2 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 164
+ %ptrcast1_2 = bitcast i8* %finalptr_2 to float*
+ store float %val_2, float* %ptrcast1_2, align 4
+ %finalptr_3 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 200
+ %ptrcast1_3 = bitcast i8* %finalptr_3 to float*
+ store float %val_3, float* %ptrcast1_3, align 4
+ %finalptr_4 = getelementptr i8, i8* %destination_load_ptr2int_2void, i64 236
+ %ptrcast1_4 = bitcast i8* %finalptr_4 to float*
+ store float %val_4, float* %ptrcast1_4, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/pr22338.ll b/test/CodeGen/X86/pr22338.ll
new file mode 100644
index 000000000000..e0645d1ef551
--- /dev/null
+++ b/test/CodeGen/X86/pr22338.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
+
+define i32 @fn() {
+; X86-LABEL: fn:
+; X86: # BB#0: # %entry
+; X86-NEXT: cmpl $1, %eax
+; X86-NEXT: sete %cl
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: jne .LBB0_2
+; X86-NEXT: # BB#1: # %entry
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: .LBB0_2: # %entry
+; X86-NEXT: addb %cl, %cl
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: .p2align 4, 0x90
+; X86-NEXT: .LBB0_3: # %bb1
+; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB0_3
+; X86-NEXT: # BB#4: # %bb2
+; X86-NEXT: retl
+;
+; X64-LABEL: fn:
+; X64: # BB#0: # %entry
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpl $1, %eax
+; X64-NEXT: sete %cl
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: addb %cl, %cl
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: .p2align 4, 0x90
+; X64-NEXT: .LBB0_1: # %bb1
+; X64-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB0_1
+; X64-NEXT: # BB#2: # %bb2
+; X64-NEXT: retq
+entry:
+ %cmp1 = icmp ne i32 undef, 1
+ %cmp2 = icmp eq i32 undef, 1
+ %sel1 = select i1 %cmp1, i32 0, i32 2
+ %sel2 = select i1 %cmp2, i32 2, i32 0
+ %sext = sext i1 %cmp1 to i32
+ %shl1 = shl i32 %sext, %sel1
+ %shl2 = shl i32 %sext, %sel2
+ %tobool = icmp eq i32 %shl1, 0
+ br label %bb1
+
+bb1: ; preds = %bb1, %entry
+ br i1 %tobool, label %bb1, label %bb2
+
+bb2: ; preds = %bb1
+ ret i32 %shl2
+}
diff --git a/test/CodeGen/X86/pr26350.ll b/test/CodeGen/X86/pr26350.ll
index 6e87cb3e8b7a..5ba5862413b5 100644
--- a/test/CodeGen/X86/pr26350.ll
+++ b/test/CodeGen/X86/pr26350.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -disable-constant-hoisting < %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
@@ -5,6 +6,18 @@ target triple = "i386-unknown-linux-gnu"
@d = global i32 8, align 4
define i32 @main() {
+; CHECK-LABEL: main:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl d, %eax
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $31, %ecx
+; CHECK-NEXT: addl %eax, %eax
+; CHECK-NEXT: andl $16, %eax
+; CHECK-NEXT: cmpl $-1, %eax
+; CHECK-NEXT: sbbl $0, %ecx
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retl
entry:
%load = load i32, i32* @d, align 4
%conv1 = zext i32 %load to i64
@@ -14,8 +27,3 @@ entry:
%zext = zext i1 %cmp to i32
ret i32 %zext
}
-; CHECK: main:
-; CHECK: movl d, %[[load:.*]]
-; CHECK: movl %[[load]], %[[copy:.*]]
-; CHECK: shrl $31, %[[copy]]
-; CHECK: addl %[[load]], %[[load]]
diff --git a/test/CodeGen/X86/pr2656.ll b/test/CodeGen/X86/pr2656.ll
index 3005c581866f..c54ae3d35029 100644
--- a/test/CodeGen/X86/pr2656.ll
+++ b/test/CodeGen/X86/pr2656.ll
@@ -1,9 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
; PR2656
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin9.4.0"
- %struct.anon = type <{ float, float }>
+
+%struct.anon = type <{ float, float }>
@.str = internal constant [17 x i8] c"pt: %.0f, %.0f\0A\00\00" ; <[17 x i8]*> [#uses=1]
; We can not fold either stack load into an 'xor' instruction because that
@@ -13,12 +15,21 @@ target triple = "i686-apple-darwin9.4.0"
define void @foo(%struct.anon* byval %p) nounwind {
; CHECK-LABEL: foo:
-; CHECK: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: subl $28, %esp
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
; CHECK-NEXT: xorps %xmm2, %xmm0
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: xorps %xmm2, %xmm1
+; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
+; CHECK-NEXT: movsd %xmm1, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl $_.str, (%esp)
+; CHECK-NEXT: calll _printf
+; CHECK-NEXT: addl $28, %esp
+; CHECK-NEXT: retl
entry:
%tmp = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 0 ; <float*> [#uses=1]
%tmp1 = load float, float* %tmp ; <float> [#uses=1]
@@ -40,13 +51,19 @@ declare i32 @printf(...)
define double @PR22371(double %x) {
; CHECK-LABEL: PR22371:
-; CHECK: movsd 16(%esp), %xmm0
-; CHECK-NEXT: andps LCPI1_0, %xmm0
-; CHECK-NEXT: movlps %xmm0, (%esp)
+; CHECK: ## BB#0:
+; CHECK-NEXT: subl $12, %esp
+; CHECK-NEXT: Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: andps LCPI1_0, %xmm0
+; CHECK-NEXT: movlps %xmm0, (%esp)
+; CHECK-NEXT: fldl (%esp)
+; CHECK-NEXT: addl $12, %esp
+; CHECK-NEXT: retl
%call = tail call double @fabs(double %x) #0
ret double %call
}
declare double @fabs(double) #0
attributes #0 = { readnone }
-
diff --git a/test/CodeGen/X86/pr27591.ll b/test/CodeGen/X86/pr27591.ll
index 3331a9354fcf..3ff6c096d097 100644
--- a/test/CodeGen/X86/pr27591.ll
+++ b/test/CodeGen/X86/pr27591.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -o - -O0 < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -8,11 +9,12 @@ define void @test1(i32 %x) #0 {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setne %al
-; CHECK-NEXT: # implicit-def: %EDI
+; CHECK-NEXT: # implicit-def: %EDI
; CHECK-NEXT: movb %al, %dil
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kmovd %k0, %edi
+; CHECK-NEXT: movb %dil, %al
; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: movzbl %al, %edi
; CHECK-NEXT: callq callee1
@@ -30,11 +32,11 @@ define void @test2(i32 %x) #0 {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setne %al
-; CHECK-NEXT: # implicit-def: %EDI
+; CHECK-NEXT: # implicit-def: %EDI
; CHECK-NEXT: movb %al, %dil
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: kmovw %k0, %edi
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kmovd %k0, %edi
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movb %dil, %al
; CHECK-NEXT: xorl %edi, %edi
diff --git a/test/CodeGen/X86/pr28173.ll b/test/CodeGen/X86/pr28173.ll
index db7d3335215d..d9622b99bd98 100644
--- a/test/CodeGen/X86/pr28173.ll
+++ b/test/CodeGen/X86/pr28173.ll
@@ -5,9 +5,6 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; Note that the kmovs should really *not* appear in the output, this is an
-; artifact of the current poor lowering. This is tracked by PR28175.
-
define i64 @foo64(i1 zeroext %i) #0 {
; CHECK-LABEL: foo64:
; CHECK: # BB#0:
@@ -43,25 +40,13 @@ end:
ret i16 %v
}
-; This code is still not optimal
define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 {
-; KNL-LABEL: foo16_1:
-; KNL: # BB#0:
-; KNL-NEXT: kmovw %edi, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: orl $2, %eax
-; KNL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; KNL-NEXT: retq
-;
-; SKX-LABEL: foo16_1:
-; SKX: # BB#0:
-; SKX-NEXT: kmovd %edi, %k0
-; SKX-NEXT: kmovw %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: orl $2, %eax
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; SKX-NEXT: retq
+; CHECK-LABEL: foo16_1:
+; CHECK: # BB#0:
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: orl $2, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
br label %bb
bb:
diff --git a/test/CodeGen/X86/pr29112.ll b/test/CodeGen/X86/pr29112.ll
index 8bf704835ae2..8c970b3d4771 100644
--- a/test/CodeGen/X86/pr29112.ll
+++ b/test/CodeGen/X86/pr29112.ll
@@ -24,11 +24,11 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vextractf32x4 $2, %zmm3, %xmm4
; CHECK-NEXT: vblendps {{.*#+}} xmm4 = xmm0[0,1,2],xmm4[3]
; CHECK-NEXT: vpermilps {{.*#+}} xmm5 = xmm2[3,1,2,3]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[2,3]
+; CHECK-NEXT: vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm2[1],xmm5[3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm3[1]
; CHECK-NEXT: vmovshdup {{.*#+}} xmm7 = xmm8[1,1,3,3]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[2,3]
+; CHECK-NEXT: vunpcklps {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
; CHECK-NEXT: vinsertps {{.*#+}} xmm10 = xmm7[0,1],xmm2[1],xmm7[3]
; CHECK-NEXT: vblendps {{.*#+}} xmm7 = xmm10[0,1,2],xmm3[3]
; CHECK-NEXT: vblendps {{.*#+}} xmm11 = xmm0[0,1,2],xmm3[3]
@@ -60,6 +60,7 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq foo
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vaddps {{[0-9]+}}(%rsp), %xmm1, %xmm1 # 16-byte Folded Reload
diff --git a/test/CodeGen/X86/pr29170.ll b/test/CodeGen/X86/pr29170.ll
index d8e27557ab93..ecb4c9785365 100644
--- a/test/CodeGen/X86/pr29170.ll
+++ b/test/CodeGen/X86/pr29170.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
@@ -5,10 +6,26 @@ target triple = "i386-unknown-linux-gnu"
@b = global i16 0, align 4
-; CHECK-LABEL: @main
-; CHECK: cmpl
-; CHECK: sbbl
define i32 @main() {
+; CHECK-LABEL: main:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: jne .LBB0_3
+; CHECK-NEXT: # BB#1: # %go
+; CHECK-NEXT: movl $-1, %ecx
+; CHECK-NEXT: movsbl b, %edx
+; CHECK-NEXT: notl %ecx
+; CHECK-NEXT: movzwl %dx, %edx
+; CHECK-NEXT: cmpl $-1, %edx
+; CHECK-NEXT: sbbl %ecx, %eax
+; CHECK-NEXT: jge .LBB0_3
+; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB0_3: # %if.else
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retl
entry:
%true = icmp eq i32 0, 0
%const = bitcast i64 -4294967296 to i64
diff --git a/test/CodeGen/X86/pr30284.ll b/test/CodeGen/X86/pr30284.ll
index cb2de00d436a..7ab1b729ea04 100644
--- a/test/CodeGen/X86/pr30284.ll
+++ b/test/CodeGen/X86/pr30284.ll
@@ -7,11 +7,11 @@ define void @f_f___un_3C_unf_3E_un_3C_unf_3E_() {
; CHECK-NEXT: vmovapd 0, %zmm0
; CHECK-NEXT: vmovapd 64, %zmm1
; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [0,16,0,16,0,16,0,16,0,16,0,16,0,16,0,16]
-; CHECK-NEXT: kshiftrw $8, %k0, %k1
-; CHECK-NEXT: vorpd %zmm2, %zmm1, %zmm1 {%k1}
; CHECK-NEXT: vorpd %zmm2, %zmm0, %zmm0 {%k1}
-; CHECK-NEXT: vmovapd %zmm0, 0
+; CHECK-NEXT: vorpd %zmm2, %zmm1, %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, 64
+; CHECK-NEXT: vmovapd %zmm0, 0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
%a_load22 = load <16 x i64>, <16 x i64>* null, align 1
%bitop = or <16 x i64> %a_load22, <i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736, i64 68719476736>
diff --git a/test/CodeGen/X86/pr30430.ll b/test/CodeGen/X86/pr30430.ll
index 6aa4c91c4a80..14d81f14fc32 100644
--- a/test/CodeGen/X86/pr30430.ll
+++ b/test/CodeGen/X86/pr30430.ll
@@ -30,14 +30,6 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float
; CHECK-NEXT: vmovss %xmm5, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss %xmm6, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss %xmm7, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm15, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm14, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm13, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm12, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm11, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm10, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm9, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm8, (%rsp)
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -46,14 +38,14 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float
; CHECK-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm10 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm11 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm12 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm13 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm14 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm15 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm16 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm17 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm18 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm19 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm20 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm21 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm22 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm23 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss %xmm1, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss %xmm2, {{[0-9]+}}(%rsp)
@@ -62,14 +54,14 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float
; CHECK-NEXT: vmovss %xmm5, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss %xmm6, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss %xmm7, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm8, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm9, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm10, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm11, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm12, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm13, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm14, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovss %xmm15, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm16, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm17, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm18, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm19, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm20, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm21, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm22, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovss %xmm23, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -104,11 +96,19 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float
; CHECK-NEXT: # implicit-def: %YMM3
; CHECK-NEXT: vmovaps %xmm1, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm3
-; CHECK-NEXT: # implicit-def: %ZMM16
-; CHECK-NEXT: vmovaps %zmm3, %zmm16
-; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm16, %zmm16
-; CHECK-NEXT: vmovaps %zmm16, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: # implicit-def: %ZMM24
+; CHECK-NEXT: vmovaps %zmm3, %zmm24
+; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm24, %zmm24
+; CHECK-NEXT: vmovaps %zmm24, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0
+; CHECK-NEXT: vmovss %xmm15, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm8, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm9, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm10, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm11, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm12, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm13, {{[0-9]+}}(%rsp) # 4-byte Spill
+; CHECK-NEXT: vmovss %xmm14, (%rsp) # 4-byte Spill
; CHECK-NEXT: movq %rbp, %rsp
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pr30562.ll b/test/CodeGen/X86/pr30562.ll
new file mode 100644
index 000000000000..dda736a1a183
--- /dev/null
+++ b/test/CodeGen/X86/pr30562.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i32 @foo(i64* nocapture %perm, i32 %n) {
+entry:
+ br label %body
+
+body:
+; CHECK-LABEL: foo:
+; CHECK: pslldq $8, %xmm0
+ %vec.ind = phi <2 x i64> [ <i64 0, i64 1>, %entry ], [ <i64 2, i64 3>, %body ]
+ %l13 = extractelement <2 x i64> %vec.ind, i32 %n
+ %l14 = getelementptr inbounds i64, i64* %perm, i64 %l13
+ %l15 = bitcast i64* %l14 to <2 x i64>*
+ store <2 x i64> %vec.ind, <2 x i64>* %l15, align 8
+ %niter.ncmp.3 = icmp eq i64 %l13, 0
+ br i1 %niter.ncmp.3, label %exit, label %body
+
+exit:
+ ret i32 %n
+
+}
+
diff --git a/test/CodeGen/X86/pr30693.ll b/test/CodeGen/X86/pr30693.ll
deleted file mode 100644
index 834365911ed5..000000000000
--- a/test/CodeGen/X86/pr30693.ll
+++ /dev/null
@@ -1,147 +0,0 @@
-; PR30693
-; RUN: llc < %s | FileCheck %s
-
-; CHECK: .p2align 2
-; CHECK-NEXT: .LCPI0_0:
-; CHECK-NOT: vmovaps .LCPI0_0(%rip),
-; CHECK: .cfi_endproc
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-@var_35 = external local_unnamed_addr global i32, align 4
-@var_14 = external local_unnamed_addr global i16, align 2
-
-; Function Attrs: uwtable
-define void @_Z3foov() local_unnamed_addr #0 {
-entry:
- %0 = load i32, i32* @var_35, align 4
- %1 = load i16, i16* @var_14, align 2
- %conv34 = zext i16 %1 to i64
- %conv37 = ashr exact i64 undef, 32
- %sub316 = add i16 undef, -7198
- %cmp339981 = icmp sgt i32 undef, 0
- %cmp401989 = icmp sgt i32 undef, 0
- %cmp443994 = icmp sgt i32 undef, 0
- %lcmp.mod = icmp eq i64 undef, 0
- %broadcast.splat1461 = shufflevector <32 x i16> undef, <32 x i16> undef, <32 x i32> zeroinitializer
- %broadcast.splat1357 = shufflevector <32 x i16> undef, <32 x i16> undef, <32 x i32> zeroinitializer
- %broadcast.splat1435 = shufflevector <32 x i16> undef, <32 x i16> undef, <32 x i32> zeroinitializer
- %broadcast.splat1409 = shufflevector <32 x i16> undef, <32 x i16> undef, <32 x i32> zeroinitializer
- br label %for.cond11.preheader
-
-for.cond11.preheader: ; preds = %for.cond.cleanup477.loopexit, %entry
- %div = sdiv i32 0, 0
- %mul31 = mul nsw i32 %div, %0
- %conv32 = sext i32 %mul31 to i64
- %div40 = sdiv i64 0, 0
- %div41 = sdiv i32 0, 0
- %conv42 = sext i32 %div41 to i64
- %mul43 = mul nsw i64 %conv32, %conv34
- %mul44 = mul i64 %mul43, %div40
- %mul45 = mul i64 %mul44, %conv37
- %mul46 = mul i64 %mul45, %conv42
- %add48 = add nsw i64 %mul46, 36611
- %conv49 = trunc i64 %add48 to i16
- br label %vector.ph1520
-
-vector.ph1520: ; preds = %for.cond11.preheader
- %broadcast.splatinsert1531 = insertelement <32 x i16> undef, i16 %conv49, i32 0
- %broadcast.splat1532 = shufflevector <32 x i16> %broadcast.splatinsert1531, <32 x i16> undef, <32 x i32> zeroinitializer
- br i1 %lcmp.mod, label %vector.body1512.prol.loopexit, label %vector.body1512.prol.preheader
-
-vector.body1512.prol.preheader: ; preds = %vector.ph1520
- store <32 x i16> %broadcast.splat1532, <32 x i16>* undef, align 8, !tbaa !1
- unreachable
-
-vector.body1512.prol.loopexit: ; preds = %vector.ph1520
- %add318 = add i16 %sub316, 0
- %2 = insertelement <16 x i16> undef, i16 %add318, i32 7
- %3 = insertelement <16 x i16> %2, i16 %add318, i32 8
- %4 = insertelement <16 x i16> %3, i16 %add318, i32 9
- %5 = insertelement <16 x i16> %4, i16 %add318, i32 10
- %6 = insertelement <16 x i16> %5, i16 %add318, i32 11
- %7 = insertelement <16 x i16> %6, i16 %add318, i32 12
- %8 = insertelement <16 x i16> %7, i16 %add318, i32 13
- %9 = insertelement <16 x i16> %8, i16 %add318, i32 14
- %10 = insertelement <16 x i16> undef, i16 %add318, i32 7
- %11 = insertelement <16 x i16> %10, i16 %add318, i32 8
- %12 = insertelement <16 x i16> %11, i16 %add318, i32 9
- %13 = insertelement <16 x i16> %12, i16 %add318, i32 10
- %14 = insertelement <16 x i16> %13, i16 %add318, i32 11
- %15 = insertelement <16 x i16> %14, i16 %add318, i32 12
- %16 = insertelement <16 x i16> %15, i16 %add318, i32 13
- %17 = insertelement <16 x i16> %16, i16 %add318, i32 14
- %18 = insertelement <16 x i16> %17, i16 %add318, i32 15
- %19 = insertelement <8 x i16> undef, i16 %add318, i32 7
- br label %for.cond74.loopexit.us
-
-for.cond337.preheader.lr.ph: ; preds = %for.cond130.preheader.loopexit
- br i1 %cmp339981, label %for.cond337.preheader.us.preheader, label %for.cond.cleanup335
-
-for.cond337.preheader.us.preheader: ; preds = %for.cond337.preheader.lr.ph
- store <32 x i16> %broadcast.splat1461, <32 x i16>* undef, align 4, !tbaa !1
- unreachable
-
-for.cond74.loopexit.us: ; preds = %for.cond74.loopexit.us, %vector.body1512.prol.loopexit
- store <8 x i16> zeroinitializer, <8 x i16>* undef, align 2, !tbaa !1
- %cmp76.us = icmp slt i64 undef, undef
- br i1 %cmp76.us, label %for.cond74.loopexit.us, label %for.cond130.preheader.loopexit
-
-for.cond130.preheader.loopexit: ; preds = %for.cond74.loopexit.us
- store <16 x i16> zeroinitializer, <16 x i16>* undef, align 2, !tbaa !1
- store <16 x i16> %18, <16 x i16>* undef, align 2, !tbaa !1
- store <8 x i16> %19, <8 x i16>* undef, align 2, !tbaa !1
- br label %for.cond337.preheader.lr.ph
-
-for.cond.cleanup335: ; preds = %for.cond337.preheader.lr.ph
- br label %for.cond380.preheader
-
-for.cond380.preheader: ; preds = %for.cond.cleanup335
- br label %for.cond385.preheader
-
-for.cond.cleanup378.loopexit: ; preds = %for.cond.cleanup388
- br label %for.cond481.preheader
-
-for.cond385.preheader: ; preds = %for.cond380.preheader
- br i1 %cmp443994, label %for.cond392.preheader.us.preheader, label %for.cond392.preheader.preheader
-
-for.cond392.preheader.preheader: ; preds = %for.cond385.preheader
- store <32 x i16> %broadcast.splat1435, <32 x i16>* undef, align 4, !tbaa !1
- store <32 x i16> %broadcast.splat1409, <32 x i16>* undef, align 4, !tbaa !1
- unreachable
-
-for.cond392.preheader.us.preheader: ; preds = %for.cond385.preheader
- br label %for.cond399.preheader.lr.ph.us.1
-
-for.cond.cleanup388: ; preds = %for.cond399.preheader.lr.ph.us.1
- br label %for.cond.cleanup378.loopexit
-
-for.cond481.preheader: ; preds = %for.cond.cleanup486, %for.cond.cleanup378.loopexit
- br label %for.cond.cleanup486
-
-for.cond.cleanup477.loopexit: ; preds = %for.cond.cleanup486
- store <8 x i32> <i32 1221902566, i32 1221902566, i32 1221902566, i32 1221902566, i32 1221902566, i32 1221902566, i32 1221902566, i32 1221902566>, <8 x i32>* undef, align 4, !tbaa !5
- br label %for.cond11.preheader
-
-for.cond.cleanup486: ; preds = %for.cond481.preheader
- br i1 undef, label %for.cond481.preheader, label %for.cond.cleanup477.loopexit
-
-for.cond399.preheader.lr.ph.us.1: ; preds = %for.cond392.preheader.us.preheader
- br i1 %cmp401989, label %for.cond399.preheader.us.us.1.preheader, label %for.cond.cleanup388
-
-for.cond399.preheader.us.us.1.preheader: ; preds = %for.cond399.preheader.lr.ph.us.1
- store <32 x i16> %broadcast.splat1357, <32 x i16>* undef, align 4, !tbaa !1
- unreachable
-}
-
-attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="knl" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512er,+avx512f,+avx512pf,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prefetchwt1,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!llvm.ident = !{!0}
-
-!0 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git ef66d4d58b9a2c6b3d31bbaf3ed2a70a9754a137) (http://llvm.org/git/llvm.git 5e661621191d6133a12effa103bfb2cbbdbb35ad)"}
-!1 = !{!2, !2, i64 0}
-!2 = !{!"short", !3, i64 0}
-!3 = !{!"omnipotent char", !4, i64 0}
-!4 = !{!"Simple C++ TBAA"}
-!5 = !{!6, !6, i64 0}
-!6 = !{!"int", !3, i64 0}
diff --git a/test/CodeGen/X86/pr31773.ll b/test/CodeGen/X86/pr31773.ll
new file mode 100644
index 000000000000..8722df3f4b57
--- /dev/null
+++ b/test/CodeGen/X86/pr31773.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+
+; This matter of this test is ensuring that vpackus* is not used for umin+trunc combination, since vpackus* input is a signed number.
+define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
+; CHECK-LABEL: usat_trunc_wb_256:
+; CHECK-NOT: vpackuswb %xmm1, %xmm0, %xmm0
+ %x3 = icmp ult <16 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x5 = select <16 x i1> %x3, <16 x i16> %i, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %x6 = trunc <16 x i16> %x5 to <16 x i8>
+ ret <16 x i8> %x6
+}
+
+define <8 x i16> @usat_trunc_dw_256(<8 x i32> %i) {
+; CHECK-LABEL: usat_trunc_dw_256:
+; CHECK-NOT: vpackusdw %xmm1, %xmm0, %xmm0
+ %x3 = icmp ult <8 x i32> %i, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %x5 = select <8 x i1> %x3, <8 x i32> %i, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %x6 = trunc <8 x i32> %x5 to <8 x i16>
+ ret <8 x i16> %x6
+}
diff --git a/test/CodeGen/X86/pr32108.ll b/test/CodeGen/X86/pr32108.ll
new file mode 100644
index 000000000000..f14b04802a04
--- /dev/null
+++ b/test/CodeGen/X86/pr32108.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+define void @pr32108() {
+; CHECK-LABEL: pr32108:
+; CHECK: # BB#0: # %CF257
+; CHECK-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # %CF244
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: jmp .LBB0_1
+BB:
+ %Cmp45 = icmp slt <4 x i32> undef, undef
+ br label %CF243
+
+CF243: ; preds = %CF243, %BB
+ br i1 undef, label %CF243, label %CF257
+
+CF257: ; preds = %CF243
+ %Shuff144 = shufflevector <4 x i1> undef, <4 x i1> %Cmp45, <4 x i32> <i32 undef, i32 undef, i32 5, i32 undef>
+ br label %CF244
+
+CF244: ; preds = %CF244, %CF257
+ %Shuff182 = shufflevector <4 x i1> %Shuff144, <4 x i1> zeroinitializer, <4 x i32> <i32 3, i32 5, i32 7, i32 undef>
+ br label %CF244
+}
diff --git a/test/CodeGen/X86/pr32241.ll b/test/CodeGen/X86/pr32241.ll
new file mode 100644
index 000000000000..d8ce230057ea
--- /dev/null
+++ b/test/CodeGen/X86/pr32241.ll
@@ -0,0 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -O0 -mcpu=skx | FileCheck %s
+
+define i32 @_Z3foov() {
+; CHECK-LABEL: _Z3foov:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: subl $20, %esp
+; CHECK-NEXT: .Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: movw $10959, {{[0-9]+}}(%esp) # imm = 0x2ACF
+; CHECK-NEXT: movw $-15498, {{[0-9]+}}(%esp) # imm = 0xC376
+; CHECK-NEXT: movw $19417, {{[0-9]+}}(%esp) # imm = 0x4BD9
+; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movw {{[0-9]+}}(%esp), %cx
+; CHECK-NEXT: kxnorw %k0, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: testw %cx, %cx
+; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: jne .LBB0_2
+; CHECK-NEXT: jmp .LBB0_1
+; CHECK-NEXT: .LBB0_1: # %lor.rhs
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovd %eax, %k0
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: jmp .LBB0_2
+; CHECK-NEXT: .LBB0_2: # %lor.end
+; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: kmovw %k1, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: jne .LBB0_4
+; CHECK-NEXT: jmp .LBB0_3
+; CHECK-NEXT: .LBB0_3: # %lor.rhs4
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovd %eax, %k0
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: jmp .LBB0_4
+; CHECK-NEXT: .LBB0_4: # %lor.end5
+; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: movw %ax, %cx
+; CHECK-NEXT: movw %cx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: addl $20, %esp
+; CHECK-NEXT: retl
+entry:
+ %aa = alloca i16, align 2
+ %bb = alloca i16, align 2
+ %cc = alloca i16, align 2
+ store i16 10959, i16* %aa, align 2
+ store i16 -15498, i16* %bb, align 2
+ store i16 19417, i16* %cc, align 2
+ %0 = load i16, i16* %aa, align 2
+ %conv = zext i16 %0 to i32
+ %1 = load i16, i16* %cc, align 2
+ %tobool = icmp ne i16 %1, 0
+ br i1 %tobool, label %lor.end, label %lor.rhs
+
+lor.rhs: ; preds = %entry
+ br label %lor.end
+
+lor.end: ; preds = %lor.rhs, %entry
+ %2 = phi i1 [ true, %entry ], [ false, %lor.rhs ]
+ %conv1 = zext i1 %2 to i32
+ %cmp = icmp slt i32 %conv, %conv1
+ %conv2 = zext i1 %cmp to i32
+ %neg = xor i32 %conv2, -1
+ %tobool3 = icmp ne i32 %neg, 0
+ br i1 %tobool3, label %lor.end5, label %lor.rhs4
+
+lor.rhs4: ; preds = %lor.end
+ br label %lor.end5
+
+lor.end5: ; preds = %lor.rhs4, %lor.end
+ %3 = phi i1 [ true, %lor.end ], [ false, %lor.rhs4 ]
+ %conv6 = zext i1 %3 to i16
+ store i16 %conv6, i16* %bb, align 2
+ %4 = load i16, i16* %bb, align 2
+ %conv7 = zext i16 %4 to i32
+ ret i32 %conv7
+}
diff --git a/test/CodeGen/X86/pr32256.ll b/test/CodeGen/X86/pr32256.ll
new file mode 100644
index 000000000000..cb26c13e53eb
--- /dev/null
+++ b/test/CodeGen/X86/pr32256.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -O0 -mcpu=skx | FileCheck %s
+
+@c = external global i8, align 1
+
+; Function Attrs: noinline nounwind
+define void @_Z1av() {
+; CHECK-LABEL: _Z1av:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: subl $6, %esp
+; CHECK-NEXT: .Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 10
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovd %eax, %k0
+; CHECK-NEXT: movb c, %cl
+; CHECK-NEXT: # implicit-def: %EAX
+; CHECK-NEXT: movb %cl, %al
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: kmovq %k1, %k2
+; CHECK-NEXT: kxnorw %k0, %k0, %k3
+; CHECK-NEXT: kshiftrw $15, %k3, %k3
+; CHECK-NEXT: kxorw %k3, %k1, %k1
+; CHECK-NEXT: kmovd %k1, %eax
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: testb $1, %cl
+; CHECK-NEXT: kmovw %k2, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: jmp .LBB0_2
+; CHECK-NEXT: .LBB0_1: # %land.rhs
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovd %eax, %k0
+; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill
+; CHECK-NEXT: jmp .LBB0_2
+; CHECK-NEXT: .LBB0_2: # %land.end
+; CHECK-NEXT: kmovw (%esp), %k0 # 2-byte Reload
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: andb $1, %cl
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp)
+; CHECK-NEXT: addl $6, %esp
+; CHECK-NEXT: retl
+entry:
+ %b = alloca i8, align 1
+ %0 = load i8, i8* @c, align 1
+ %tobool = trunc i8 %0 to i1
+ %lnot = xor i1 %tobool, true
+ br i1 %lnot, label %land.rhs, label %land.end
+
+land.rhs: ; preds = %entry
+ br label %land.end
+
+land.end: ; preds = %land.rhs, %entry
+ %1 = phi i1 [ false, %entry ], [ false, %land.rhs ]
+ %conv = zext i1 %1 to i8
+ store i8 %conv, i8* %b, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32278.ll b/test/CodeGen/X86/pr32278.ll
new file mode 100644
index 000000000000..1b130c838bae
--- /dev/null
+++ b/test/CodeGen/X86/pr32278.ll
@@ -0,0 +1,11 @@
+; PR32278
+
+; RUN: llc -mtriple=x86_64-unknown < %s
+
+define i8 @foo_v4i1_0_0_1_1_2_2_3_3(i8 %in) {
+ %trunc = trunc i8 %in to i4
+ %mask = bitcast i4 %trunc to <4 x i1>
+ %s = shufflevector <4 x i1> %mask, <4 x i1> undef, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
+ %b = bitcast <8 x i1> %s to i8
+ ret i8 %b
+}
diff --git a/test/CodeGen/X86/pr32284.ll b/test/CodeGen/X86/pr32284.ll
new file mode 100644
index 000000000000..e05fc926b080
--- /dev/null
+++ b/test/CodeGen/X86/pr32284.ll
@@ -0,0 +1,117 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=i686-unknown -mcpu=skx -O0 | FileCheck %s --check-prefix=X86-O0
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx -O0 | FileCheck %s --check-prefix=X64-O0
+
+@c = external constant i8, align 1
+
+define void @foo() {
+; X86-LABEL: foo:
+; X86: # BB#0: # %entry
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: .Lcfi0:
+; X86-NEXT: .cfi_def_cfa_offset 12
+; X86-NEXT: movzbl c, %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %cl
+; X86-NEXT: testb %al, %al
+; X86-NEXT: setne {{[0-9]+}}(%esp)
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: setle %dl
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: retl
+;
+; X86-O0-LABEL: foo:
+; X86-O0: # BB#0: # %entry
+; X86-O0-NEXT: subl $12, %esp
+; X86-O0-NEXT: .Lcfi0:
+; X86-O0-NEXT: .cfi_def_cfa_offset 16
+; X86-O0-NEXT: movzbl c, %eax
+; X86-O0-NEXT: testl %eax, %eax
+; X86-O0-NEXT: setne %cl
+; X86-O0-NEXT: movl %eax, %edx
+; X86-O0-NEXT: movb %dl, %ch
+; X86-O0-NEXT: testb %ch, %ch
+; X86-O0-NEXT: setne {{[0-9]+}}(%esp)
+; X86-O0-NEXT: movzbl %cl, %edx
+; X86-O0-NEXT: subl %eax, %edx
+; X86-O0-NEXT: setle %cl
+; X86-O0-NEXT: # implicit-def: %EAX
+; X86-O0-NEXT: movb %cl, %al
+; X86-O0-NEXT: andl $1, %eax
+; X86-O0-NEXT: kmovd %eax, %k0
+; X86-O0-NEXT: kmovd %k0, %eax
+; X86-O0-NEXT: movb %al, %cl
+; X86-O0-NEXT: andb $1, %cl
+; X86-O0-NEXT: movzbl %cl, %eax
+; X86-O0-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-O0-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-O0-NEXT: addl $12, %esp
+; X86-O0-NEXT: retl
+;
+; X64-LABEL: foo:
+; X64: # BB#0: # %entry
+; X64-NEXT: movzbl {{.*}}(%rip), %eax
+; X64-NEXT: testb %al, %al
+; X64-NEXT: setne -{{[0-9]+}}(%rsp)
+; X64-NEXT: xorl %ecx, %ecx
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setne %cl
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpl %eax, %ecx
+; X64-NEXT: setle %dl
+; X64-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; X64-NEXT: retq
+;
+; X64-O0-LABEL: foo:
+; X64-O0: # BB#0: # %entry
+; X64-O0-NEXT: movzbl {{.*}}(%rip), %eax
+; X64-O0-NEXT: movl %eax, %ecx
+; X64-O0-NEXT: movb %cl, %dl
+; X64-O0-NEXT: movl %ecx, %eax
+; X64-O0-NEXT: testq %rcx, %rcx
+; X64-O0-NEXT: setne %sil
+; X64-O0-NEXT: testb %dl, %dl
+; X64-O0-NEXT: setne -{{[0-9]+}}(%rsp)
+; X64-O0-NEXT: movzbl %sil, %edi
+; X64-O0-NEXT: subl %eax, %edi
+; X64-O0-NEXT: setle %dl
+; X64-O0-NEXT: # implicit-def: %EAX
+; X64-O0-NEXT: movb %dl, %al
+; X64-O0-NEXT: andl $1, %eax
+; X64-O0-NEXT: kmovd %eax, %k0
+; X64-O0-NEXT: kmovd %k0, %eax
+; X64-O0-NEXT: movb %al, %dl
+; X64-O0-NEXT: andb $1, %dl
+; X64-O0-NEXT: movzbl %dl, %eax
+; X64-O0-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; X64-O0-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # 4-byte Spill
+; X64-O0-NEXT: retq
+entry:
+ %a = alloca i8, align 1
+ %b = alloca i32, align 4
+ %0 = load i8, i8* @c, align 1
+ %conv = zext i8 %0 to i32
+ %sub = sub nsw i32 0, %conv
+ %conv1 = sext i32 %sub to i64
+ %sub2 = sub nsw i64 0, %conv1
+ %conv3 = trunc i64 %sub2 to i8
+ %tobool = icmp ne i8 %conv3, 0
+ %frombool = zext i1 %tobool to i8
+ store i8 %frombool, i8* %a, align 1
+ %1 = load i8, i8* @c, align 1
+ %tobool4 = icmp ne i8 %1, 0
+ %lnot = xor i1 %tobool4, true
+ %lnot5 = xor i1 %lnot, true
+ %conv6 = zext i1 %lnot5 to i32
+ %2 = load i8, i8* @c, align 1
+ %conv7 = zext i8 %2 to i32
+ %cmp = icmp sle i32 %conv6, %conv7
+ %conv8 = zext i1 %cmp to i32
+ store i32 %conv8, i32* %b, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32329.ll b/test/CodeGen/X86/pr32329.ll
new file mode 100644
index 000000000000..f2b79b67877f
--- /dev/null
+++ b/test/CodeGen/X86/pr32329.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mcpu=skx | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx | FileCheck %s -check-prefix=X64
+
+%struct.AA = type { i24, [4 x i8] }
+
+@obj = external local_unnamed_addr global %struct.AA, align 8
+@var_27 = external local_unnamed_addr constant i8, align 1
+@var_2 = external local_unnamed_addr constant i16, align 2
+@var_24 = external local_unnamed_addr constant i64, align 8
+@var_310 = external local_unnamed_addr global i64, align 8
+@var_50 = external local_unnamed_addr global i64, align 8
+@var_205 = external local_unnamed_addr global i8, align 1
+@var_218 = external local_unnamed_addr global i8, align 1
+
+define void @foo() local_unnamed_addr {
+; X86-LABEL: foo:
+; X86: # BB#0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .Lcfi0:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: pushl %ebx
+; X86-NEXT: .Lcfi1:
+; X86-NEXT: .cfi_def_cfa_offset 12
+; X86-NEXT: pushl %edi
+; X86-NEXT: .Lcfi2:
+; X86-NEXT: .cfi_def_cfa_offset 16
+; X86-NEXT: pushl %esi
+; X86-NEXT: .Lcfi3:
+; X86-NEXT: .cfi_def_cfa_offset 20
+; X86-NEXT: .Lcfi4:
+; X86-NEXT: .cfi_offset %esi, -20
+; X86-NEXT: .Lcfi5:
+; X86-NEXT: .cfi_offset %edi, -16
+; X86-NEXT: .Lcfi6:
+; X86-NEXT: .cfi_offset %ebx, -12
+; X86-NEXT: .Lcfi7:
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl obj, %edx
+; X86-NEXT: movsbl var_27, %eax
+; X86-NEXT: movzwl var_2, %esi
+; X86-NEXT: movl var_310, %ecx
+; X86-NEXT: imull %eax, %ecx
+; X86-NEXT: addl var_24, %ecx
+; X86-NEXT: andl $4194303, %edx # imm = 0x3FFFFF
+; X86-NEXT: leal (%edx,%edx), %ebx
+; X86-NEXT: subl %eax, %ebx
+; X86-NEXT: movl %ebx, %edi
+; X86-NEXT: subl %esi, %edi
+; X86-NEXT: imull %edi, %ecx
+; X86-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71
+; X86-NEXT: movl $9, %esi
+; X86-NEXT: xorl %ebp, %ebp
+; X86-NEXT: shldl %cl, %esi, %ebp
+; X86-NEXT: shll %cl, %esi
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: cmovnel %esi, %ebp
+; X86-NEXT: movl $0, %ecx
+; X86-NEXT: cmovnel %ecx, %esi
+; X86-NEXT: cmpl %edx, %edi
+; X86-NEXT: movl %ebp, var_50+4
+; X86-NEXT: movl %esi, var_50
+; X86-NEXT: setge var_205
+; X86-NEXT: imull %eax, %ebx
+; X86-NEXT: movb %bl, var_218
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: foo:
+; X64: # BB#0: # %entry
+; X64-NEXT: movl {{.*}}(%rip), %eax
+; X64-NEXT: movsbl {{.*}}(%rip), %r9d
+; X64-NEXT: movzwl {{.*}}(%rip), %r8d
+; X64-NEXT: movl {{.*}}(%rip), %esi
+; X64-NEXT: imull %r9d, %esi
+; X64-NEXT: addl {{.*}}(%rip), %esi
+; X64-NEXT: andl $4194303, %eax # imm = 0x3FFFFF
+; X64-NEXT: leal (%rax,%rax), %edi
+; X64-NEXT: subl %r9d, %edi
+; X64-NEXT: movl %edi, %edx
+; X64-NEXT: subl %r8d, %edx
+; X64-NEXT: imull %edx, %esi
+; X64-NEXT: addl $-1437483407, %esi # imm = 0xAA51BE71
+; X64-NEXT: movl $9, %ecx
+; X64-NEXT: shlxq %rsi, %rcx, %rcx
+; X64-NEXT: movq %rcx, {{.*}}(%rip)
+; X64-NEXT: cmpl %eax, %edx
+; X64-NEXT: setge {{.*}}(%rip)
+; X64-NEXT: imull %r9d, %edi
+; X64-NEXT: movb %dil, {{.*}}(%rip)
+; X64-NEXT: retq
+ entry:
+ %bf.load = load i32, i32* bitcast (%struct.AA* @obj to i32*), align 8
+ %bf.clear = shl i32 %bf.load, 1
+ %add = and i32 %bf.clear, 8388606
+ %0 = load i8, i8* @var_27, align 1
+ %conv5 = sext i8 %0 to i32
+ %sub = sub nsw i32 %add, %conv5
+ %1 = load i16, i16* @var_2, align 2
+ %conv6 = zext i16 %1 to i32
+ %sub7 = sub nsw i32 %sub, %conv6
+ %conv8 = sext i32 %sub7 to i64
+ %2 = load i64, i64* @var_24, align 8
+ %3 = load i64, i64* @var_310, align 8
+ %conv9 = sext i8 %0 to i64
+ %mul = mul i64 %3, %conv9
+ %add10 = add i64 %mul, %2
+ %mul11 = mul i64 %add10, %conv8
+ %sub12 = add i64 %mul11, 8662905354777116273
+ %shl = shl i64 9, %sub12
+ store i64 %shl, i64* @var_50, align 8
+ %bf.clear14 = and i32 %bf.load, 4194303
+ %add21 = shl nuw nsw i32 %bf.clear14, 1
+ %sub23 = sub nsw i32 %add21, %conv5
+ %sub25 = sub nsw i32 %sub23, %conv6
+ %cmp = icmp sge i32 %sub25, %bf.clear14
+ %conv30 = zext i1 %cmp to i8
+ store i8 %conv30, i8* @var_205, align 1
+ %mul43 = mul nsw i32 %sub, %conv5
+ %conv44 = trunc i32 %mul43 to i8
+ store i8 %conv44, i8* @var_218, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32340.ll b/test/CodeGen/X86/pr32340.ll
new file mode 100644
index 000000000000..cd9b5af1dc56
--- /dev/null
+++ b/test/CodeGen/X86/pr32340.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X64
+
+@var_825 = external global i16, align 2
+@var_32 = external global i16, align 2
+@var_901 = external global i16, align 2
+@var_826 = external global i64, align 8
+@var_57 = external global i64, align 8
+@var_900 = external global i16, align 2
+@var_28 = external constant i64, align 8
+@var_827 = external global i16, align 2
+
+define void @foo() {
+; X64-LABEL: foo:
+; X64: # BB#0: # %entry
+; X64-NEXT: movw $0, {{.*}}(%rip)
+; X64-NEXT: movzwl {{.*}}(%rip), %eax
+; X64-NEXT: movw %ax, %cx
+; X64-NEXT: movw {{.*}}(%rip), %dx
+; X64-NEXT: xorw %dx, %cx
+; X64-NEXT: # implicit-def: %ESI
+; X64-NEXT: movw %cx, %si
+; X64-NEXT: movl %eax, %edi
+; X64-NEXT: xorl %esi, %edi
+; X64-NEXT: movw %di, %cx
+; X64-NEXT: movzwl %cx, %esi
+; X64-NEXT: movl %esi, %edi
+; X64-NEXT: addl %eax, %edi
+; X64-NEXT: movl %edi, %r8d
+; X64-NEXT: movq %r8, {{.*}}(%rip)
+; X64-NEXT: xorl $-772157262, %esi # imm = 0xD1F9D0B2
+; X64-NEXT: movl {{.*}}(%rip), %eax
+; X64-NEXT: movl %esi, %edi
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: orl %edi, %esi
+; X64-NEXT: movw %si, %cx
+; X64-NEXT: movw %cx, {{.*}}(%rip)
+; X64-NEXT: movq {{.*}}(%rip), %r8
+; X64-NEXT: testq %r8, %r8
+; X64-NEXT: setne %r9b
+; X64-NEXT: movzbl %r9b, %eax
+; X64-NEXT: movw %ax, %cx
+; X64-NEXT: movw %cx, var_827
+; X64-NEXT: retq
+entry:
+ store i16 0, i16* @var_825, align 2
+ %v0 = load i16, i16* @var_32, align 2
+ %conv = zext i16 %v0 to i32
+ %v2 = load i16, i16* @var_901, align 2
+ %conv2 = zext i16 %v2 to i32
+ %xor = xor i32 %conv, %conv2
+ %xor3 = xor i32 %conv, %xor
+ %add = add nsw i32 %xor3, %conv
+ %conv5 = sext i32 %add to i64
+ store i64 %conv5, i64* @var_826, align 8
+ %v4 = load i16, i16* @var_32, align 2
+ %conv6 = zext i16 %v4 to i64
+ %v6 = load i16, i16* @var_901, align 2
+ %conv8 = zext i16 %v6 to i32
+ %xor9 = xor i32 51981, %conv8
+ %conv10 = sext i32 %xor9 to i64
+ %xor11 = xor i64 -1142377792914660288, %conv10
+ %xor12 = xor i64 %conv6, %xor11
+ %neg = xor i64 %xor12, -1
+ %xor13 = xor i64 %conv6, %neg
+ %v9 = load i16, i16* @var_901, align 2
+ %v10 = load i64, i64* @var_57, align 8
+ %or = or i64 %xor13, %v10
+ %or23 = or i64 %xor13, %or
+ %conv24 = trunc i64 %or23 to i16
+ store i16 %conv24, i16* @var_900, align 2
+ %v11 = load i64, i64* @var_28, align 8
+ %cmp = icmp ne i64 0, %v11
+ %conv25 = zext i1 %cmp to i16
+ store i16 %conv25, i16* @var_827, align 2
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32345.ll b/test/CodeGen/X86/pr32345.ll
new file mode 100644
index 000000000000..e9182698dd90
--- /dev/null
+++ b/test/CodeGen/X86/pr32345.ll
@@ -0,0 +1,169 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X640
+; RUN: llc -O0 -mtriple=i686-unknown -o - %s | FileCheck %s -check-prefix=6860
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X64
+; RUN: llc -mtriple=i686-unknown -o - %s | FileCheck %s -check-prefix=686
+
+@var_22 = external global i16, align 2
+@var_27 = external global i16, align 2
+
+define void @foo() {
+; X640-LABEL: foo:
+; X640: # BB#0: # %bb
+; X640-NEXT: # implicit-def: %RAX
+; X640-NEXT: movzwl var_22, %ecx
+; X640-NEXT: movzwl var_27, %edx
+; X640-NEXT: xorl %edx, %ecx
+; X640-NEXT: movzwl var_27, %edx
+; X640-NEXT: xorl %edx, %ecx
+; X640-NEXT: movslq %ecx, %rsi
+; X640-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
+; X640-NEXT: movzwl var_22, %ecx
+; X640-NEXT: movzwl var_27, %edx
+; X640-NEXT: xorl %edx, %ecx
+; X640-NEXT: movzwl var_27, %edx
+; X640-NEXT: xorl %edx, %ecx
+; X640-NEXT: movslq %ecx, %rsi
+; X640-NEXT: movzwl var_27, %ecx
+; X640-NEXT: subl $16610, %ecx # imm = 0x40E2
+; X640-NEXT: movl %ecx, %ecx
+; X640-NEXT: # kill: %RCX<def> %ECX<kill>
+; X640-NEXT: # kill: %CL<def> %RCX<kill>
+; X640-NEXT: sarq %cl, %rsi
+; X640-NEXT: movb %sil, %cl
+; X640-NEXT: movb %cl, (%rax)
+; X640-NEXT: retq
+;
+; 6860-LABEL: foo:
+; 6860: # BB#0: # %bb
+; 6860-NEXT: pushl %ebp
+; 6860-NEXT: .Lcfi0:
+; 6860-NEXT: .cfi_def_cfa_offset 8
+; 6860-NEXT: .Lcfi1:
+; 6860-NEXT: .cfi_offset %ebp, -8
+; 6860-NEXT: movl %esp, %ebp
+; 6860-NEXT: .Lcfi2:
+; 6860-NEXT: .cfi_def_cfa_register %ebp
+; 6860-NEXT: pushl %ebx
+; 6860-NEXT: pushl %edi
+; 6860-NEXT: pushl %esi
+; 6860-NEXT: andl $-8, %esp
+; 6860-NEXT: subl $32, %esp
+; 6860-NEXT: .Lcfi3:
+; 6860-NEXT: .cfi_offset %esi, -20
+; 6860-NEXT: .Lcfi4:
+; 6860-NEXT: .cfi_offset %edi, -16
+; 6860-NEXT: .Lcfi5:
+; 6860-NEXT: .cfi_offset %ebx, -12
+; 6860-NEXT: # implicit-def: %EAX
+; 6860-NEXT: movw var_22, %cx
+; 6860-NEXT: movzwl var_27, %edx
+; 6860-NEXT: movw %dx, %si
+; 6860-NEXT: xorw %si, %cx
+; 6860-NEXT: # implicit-def: %EDI
+; 6860-NEXT: movw %cx, %di
+; 6860-NEXT: xorl %edx, %edi
+; 6860-NEXT: movw %di, %cx
+; 6860-NEXT: movzwl %cx, %edi
+; 6860-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; 6860-NEXT: movl $0, {{[0-9]+}}(%esp)
+; 6860-NEXT: addl $-16610, %edx # imm = 0xBF1E
+; 6860-NEXT: movb %dl, %bl
+; 6860-NEXT: xorl %edx, %edx
+; 6860-NEXT: movb %bl, %cl
+; 6860-NEXT: shrdl %cl, %edx, %edi
+; 6860-NEXT: testb $32, %bl
+; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; 6860-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; 6860-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; 6860-NEXT: jne .LBB0_2
+; 6860-NEXT: # BB#1: # %bb
+; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; 6860-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; 6860-NEXT: .LBB0_2: # %bb
+; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; 6860-NEXT: movb %al, %cl
+; 6860-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; 6860-NEXT: movb %cl, (%eax)
+; 6860-NEXT: leal -12(%ebp), %esp
+; 6860-NEXT: popl %esi
+; 6860-NEXT: popl %edi
+; 6860-NEXT: popl %ebx
+; 6860-NEXT: popl %ebp
+; 6860-NEXT: retl
+;
+; X64-LABEL: foo:
+; X64: # BB#0: # %bb
+; X64-NEXT: movzwl {{.*}}(%rip), %ecx
+; X64-NEXT: movw {{.*}}(%rip), %ax
+; X64-NEXT: xorw %cx, %ax
+; X64-NEXT: xorl %ecx, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; X64-NEXT: addl $-16610, %ecx # imm = 0xBF1E
+; X64-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X64-NEXT: shrq %cl, %rax
+; X64-NEXT: movb %al, (%rax)
+; X64-NEXT: retq
+;
+; 686-LABEL: foo:
+; 686: # BB#0: # %bb
+; 686-NEXT: pushl %ebp
+; 686-NEXT: .Lcfi0:
+; 686-NEXT: .cfi_def_cfa_offset 8
+; 686-NEXT: .Lcfi1:
+; 686-NEXT: .cfi_offset %ebp, -8
+; 686-NEXT: movl %esp, %ebp
+; 686-NEXT: .Lcfi2:
+; 686-NEXT: .cfi_def_cfa_register %ebp
+; 686-NEXT: andl $-8, %esp
+; 686-NEXT: subl $8, %esp
+; 686-NEXT: movzwl var_27, %ecx
+; 686-NEXT: movw var_22, %ax
+; 686-NEXT: xorw %cx, %ax
+; 686-NEXT: xorl %ecx, %eax
+; 686-NEXT: movzwl %ax, %eax
+; 686-NEXT: movl %eax, (%esp)
+; 686-NEXT: movl $0, {{[0-9]+}}(%esp)
+; 686-NEXT: addl $-16610, %ecx # imm = 0xBF1E
+; 686-NEXT: xorl %edx, %edx
+; 686-NEXT: shrdl %cl, %edx, %eax
+; 686-NEXT: testb $32, %cl
+; 686-NEXT: jne .LBB0_2
+; 686-NEXT: # BB#1: # %bb
+; 686-NEXT: movl %eax, %edx
+; 686-NEXT: .LBB0_2: # %bb
+; 686-NEXT: movb %dl, (%eax)
+; 686-NEXT: movl %ebp, %esp
+; 686-NEXT: popl %ebp
+; 686-NEXT: retl
+bb:
+ %tmp = alloca i64, align 8
+ %tmp1 = load i16, i16* @var_22, align 2
+ %tmp2 = zext i16 %tmp1 to i32
+ %tmp3 = load i16, i16* @var_27, align 2
+ %tmp4 = zext i16 %tmp3 to i32
+ %tmp5 = xor i32 %tmp2, %tmp4
+ %tmp6 = load i16, i16* @var_27, align 2
+ %tmp7 = zext i16 %tmp6 to i32
+ %tmp8 = xor i32 %tmp5, %tmp7
+ %tmp9 = sext i32 %tmp8 to i64
+ store i64 %tmp9, i64* %tmp, align 8
+ %tmp10 = load i16, i16* @var_22, align 2
+ %tmp11 = zext i16 %tmp10 to i32
+ %tmp12 = load i16, i16* @var_27, align 2
+ %tmp13 = zext i16 %tmp12 to i32
+ %tmp14 = xor i32 %tmp11, %tmp13
+ %tmp15 = load i16, i16* @var_27, align 2
+ %tmp16 = zext i16 %tmp15 to i32
+ %tmp17 = xor i32 %tmp14, %tmp16
+ %tmp18 = sext i32 %tmp17 to i64
+ %tmp19 = load i16, i16* @var_27, align 2
+ %tmp20 = zext i16 %tmp19 to i32
+ %tmp21 = sub nsw i32 %tmp20, 16610
+ %tmp22 = zext i32 %tmp21 to i64
+ %tmp23 = ashr i64 %tmp18, %tmp22
+ %tmp24 = trunc i64 %tmp23 to i8
+ store i8 %tmp24, i8* undef, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32420.ll b/test/CodeGen/X86/pr32420.ll
new file mode 100644
index 000000000000..bf3a4720c080
--- /dev/null
+++ b/test/CodeGen/X86/pr32420.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+@a = common local_unnamed_addr global i16 0, align 4
+@b = common local_unnamed_addr global i16 0, align 4
+
+define i32 @PR32420() {
+; CHECK-LABEL: PR32420:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movq _a@{{.*}}(%rip), %rax
+; CHECK-NEXT: movzwl (%rax), %eax
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shll $12, %ecx
+; CHECK-NEXT: sarw $12, %cx
+; CHECK-NEXT: movq _b@{{.*}}(%rip), %rdx
+; CHECK-NEXT: movw %cx, %si
+; CHECK-NEXT: orw (%rdx), %si
+; CHECK-NEXT: andl %ecx, %esi
+; CHECK-NEXT: movw %si, (%rdx)
+; CHECK-NEXT: retq
+ %load2 = load i16, i16* @a, align 4
+ %shl3 = shl i16 %load2, 12
+ %ashr4 = ashr i16 %shl3, 12
+ %t2 = load volatile i16, i16* @b, align 4
+ %conv8 = or i16 %t2, %ashr4
+ %load9 = load i16, i16* @a, align 4
+ %shl10 = shl i16 %load9, 12
+ %ashr11 = ashr i16 %shl10, 12
+ %and = and i16 %conv8, %ashr11
+ store i16 %and, i16* @b, align 4
+ %cast1629 = zext i16 %load2 to i32
+ ret i32 %cast1629
+}
diff --git a/test/CodeGen/X86/pr32451.ll b/test/CodeGen/X86/pr32451.ll
new file mode 100644
index 000000000000..d980b7ff284c
--- /dev/null
+++ b/test/CodeGen/X86/pr32451.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -O0 -mcpu=knl | FileCheck %s
+
+; ModuleID = 'convert'
+source_filename = "convert"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i8** @japi1_convert_690(i8**, i8***, i32) {
+; CHECK-LABEL: japi1_convert_690:
+; CHECK: # BB#0: # %top
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: .Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: subl $16, %esp
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .cfi_offset %ebx, -8
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: calll julia.gc_root_decl
+; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: calll jl_get_ptls_states
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; CHECK-NEXT: movl 4(%ecx), %edx
+; CHECK-NEXT: movb (%edx), %bl
+; CHECK-NEXT: # implicit-def: %EDX
+; CHECK-NEXT: movb %bl, %dl
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: kmovw %edx, %k0
+; CHECK-NEXT: kmovw %k0, %edx
+; CHECK-NEXT: movb %dl, %bl
+; CHECK-NEXT: andb $1, %bl
+; CHECK-NEXT: movzbl %bl, %edx
+; CHECK-NEXT: movl %edx, (%esp)
+; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: calll jl_box_int32
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; CHECK-NEXT: movl %eax, (%ecx)
+; CHECK-NEXT: addl $16, %esp
+; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: retl
+top:
+ %3 = alloca i8***
+ store volatile i8*** %1, i8**** %3
+ %4 = call i8*** @julia.gc_root_decl()
+ %5 = call i8**** @jl_get_ptls_states()
+ %6 = bitcast i8**** %5 to i8***
+ %7 = getelementptr i8**, i8*** %6, i64 3
+ %8 = bitcast i8*** %7 to i64**
+ %9 = load i64*, i64** %8
+ %10 = getelementptr i8**, i8*** %1, i64 1
+ %11 = load i8**, i8*** %10
+ %12 = bitcast i8** %11 to i8*
+ %13 = load i8, i8* %12
+ %14 = trunc i8 %13 to i1
+ %15 = zext i1 %14 to i8
+ %16 = zext i8 %15 to i32
+ %17 = call i8** @jl_box_int32(i32 signext %16)
+ store i8** %17, i8*** %4
+ ret i8** %17
+}
+
+declare i8**** @jl_get_ptls_states()
+
+declare i8** @jl_box_int32(i32)
+
+declare i8*** @julia.gc_root_decl()
diff --git a/test/CodeGen/X86/pr32484.ll b/test/CodeGen/X86/pr32484.ll
new file mode 100644
index 000000000000..74857f8d0066
--- /dev/null
+++ b/test/CodeGen/X86/pr32484.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @foo() {
+; CHECK-LABEL: foo:
+; CHECK: # BB#0:
+; CHECK-NEXT: # implicit-def: %RAX
+; CHECK-NEXT: jmpq *%rax
+; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: # implicit-def: %RAX
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: movdqu %xmm1, (%rax)
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: retq
+ indirectbr i8* undef, [label %9, label %1]
+
+; <label>:1: ; preds = %0
+ %2 = shufflevector <16 x i8> zeroinitializer, <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ %3 = shufflevector <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> zeroinitializer, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ %4 = or <16 x i8> %3, %2
+ %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> <i32 8, i32 5, i32 1, i32 13, i32 15, i32 10, i32 14, i32 0, i32 3, i32 2, i32 7, i32 4, i32 6, i32 9, i32 11, i32 12>
+ %6 = bitcast <16 x i8> %5 to <2 x i64>
+ %7 = xor <2 x i64> %6, zeroinitializer
+ %8 = xor <2 x i64> %7, <i64 -1, i64 -1>
+ store <2 x i64> %8, <2 x i64>* undef, align 1
+ unreachable
+
+; <label>:9: ; preds = %0
+ ret void
+}
diff --git a/test/CodeGen/X86/pr32588.ll b/test/CodeGen/X86/pr32588.ll
new file mode 100644
index 000000000000..eee1d651c3e8
--- /dev/null
+++ b/test/CodeGen/X86/pr32588.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
+
+@c = external local_unnamed_addr global i32, align 4
+@b = external local_unnamed_addr global i32, align 4
+@d = external local_unnamed_addr global i32, align 4
+
+; CHECK: cmpl $1, c(%rip)
+; CHECK-NEXT: sbbl %eax, %eax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: movl %eax, d(%rip)
+; CHECK-NEXT: retq
+
+define void @fn1() {
+entry:
+ %0 = load i32, i32* @c, align 4
+ %tobool1 = icmp eq i32 %0, 0
+ %xor = zext i1 %tobool1 to i32
+ %1 = load i32, i32* @b, align 4
+ %tobool2 = icmp ne i32 %1, 0
+ %tobool4 = icmp ne i32 undef, 0
+ %2 = and i1 %tobool4, %tobool2
+ %sub = sext i1 %2 to i32
+ %div = sdiv i32 %sub, 2
+ %add = add nsw i32 %div, %xor
+ store i32 %add, i32* @d, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/pre-coalesce-2.ll b/test/CodeGen/X86/pre-coalesce-2.ll
new file mode 100644
index 000000000000..90fcd1875d49
--- /dev/null
+++ b/test/CodeGen/X86/pre-coalesce-2.ll
@@ -0,0 +1,281 @@
+; RUN: llc -regalloc=greedy -verify-coalescing -mtriple=x86_64-unknown-linux-gnu < %s
+; Check the live range is updated properly after register coalescing.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@.str = internal unnamed_addr constant { [17 x i8], [47 x i8] } { [17 x i8] c"0123456789ABCDEF\00", [47 x i8] zeroinitializer }, align 32
+@b = common local_unnamed_addr global i32 0, align 4
+@a = common local_unnamed_addr global i32* null, align 8
+@__sancov_gen_cov = private global [9 x i32] zeroinitializer
+
+; Function Attrs: nounwind sanitize_address
+define void @fn2(i8* %p1) local_unnamed_addr #0 {
+entry:
+ %0 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 4) to i32*) monotonic, align 4
+ %1 = icmp sge i32 0, %0
+ br i1 %1, label %2, label %3
+
+; <label>:2: ; preds = %entry
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 4) to i32*))
+ call void asm sideeffect "", ""()
+ br label %3
+
+; <label>:3: ; preds = %entry, %2
+ br label %while.cond.outer
+
+while.cond.outer: ; preds = %75, %3
+ %e.0.ph = phi i8* [ %e.058, %75 ], [ undef, %3 ]
+ %c.0.ph = phi i32* [ %c.059, %75 ], [ undef, %3 ]
+ %p1.addr.0.ph = phi i8* [ %incdec.ptr60, %75 ], [ %p1, %3 ]
+ %4 = ptrtoint i8* %p1.addr.0.ph to i64
+ %5 = lshr i64 %4, 3
+ %6 = add i64 %5, 2147450880
+ %7 = inttoptr i64 %6 to i8*
+ %8 = load i8, i8* %7
+ %9 = icmp ne i8 %8, 0
+ br i1 %9, label %10, label %15
+
+; <label>:10: ; preds = %while.cond.outer
+ %11 = and i64 %4, 7
+ %12 = trunc i64 %11 to i8
+ %13 = icmp sge i8 %12, %8
+ br i1 %13, label %14, label %15
+
+; <label>:14: ; preds = %10
+ call void @__asan_report_load1(i64 %4)
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:15: ; preds = %10, %while.cond.outer
+ %16 = load i8, i8* %p1.addr.0.ph, align 1
+ call void @__sanitizer_cov_trace_cmp1(i8 %16, i8 0)
+ %cmp57 = icmp eq i8 %16, 0
+ br i1 %cmp57, label %while.cond.outer.enoent.loopexit96_crit_edge, label %while.body.preheader
+
+while.cond.outer.enoent.loopexit96_crit_edge: ; preds = %15
+ %17 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 8) to i32*) monotonic, align 4
+ %18 = icmp sge i32 0, %17
+ br i1 %18, label %19, label %20
+
+; <label>:19: ; preds = %while.cond.outer.enoent.loopexit96_crit_edge
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 8) to i32*))
+ call void asm sideeffect "", ""()
+ br label %20
+
+; <label>:20: ; preds = %while.cond.outer.enoent.loopexit96_crit_edge, %19
+ br label %enoent.loopexit96
+
+while.body.preheader: ; preds = %15
+ br label %while.body
+
+while.body: ; preds = %56, %while.body.preheader
+ %21 = phi i8 [ %52, %56 ], [ %16, %while.body.preheader ]
+ %p1.addr.0.ph.pn = phi i8* [ %incdec.ptr60, %56 ], [ %p1.addr.0.ph, %while.body.preheader ]
+ %c.059 = phi i32* [ %incdec.ptr18, %56 ], [ %c.0.ph, %while.body.preheader ]
+ %e.058 = phi i8* [ %incdec.ptr60, %56 ], [ %e.0.ph, %while.body.preheader ]
+ %incdec.ptr60 = getelementptr inbounds i8, i8* %p1.addr.0.ph.pn, i64 1
+ %conv = sext i8 %21 to i32
+ %call = tail call i32 (i8*, i32, ...) bitcast (i32 (...)* @fn3 to i32 (i8*, i32, ...)*)(i8* getelementptr inbounds ({ [17 x i8], [47 x i8] }, { [17 x i8], [47 x i8] }* @.str, i32 0, i32 0, i64 0), i32 %conv) #2
+ call void @__sanitizer_cov_trace_cmp4(i32 %call, i32 0)
+ %tobool = icmp eq i32 %call, 0
+ br i1 %tobool, label %if.end5, label %cleanup
+
+if.end5: ; preds = %while.body
+ call void @__sanitizer_cov_trace_cmp1(i8 %21, i8 58)
+ %cmp6 = icmp eq i8 %21, 58
+ br i1 %cmp6, label %if.end14, label %cleanup.thread40
+
+if.end14: ; preds = %if.end5
+ %22 = load i8, i8* inttoptr (i64 add (i64 lshr (i64 ptrtoint (i32** @a to i64), i64 3), i64 2147450880) to i8*)
+ %23 = icmp ne i8 %22, 0
+ br i1 %23, label %24, label %25
+
+; <label>:24: ; preds = %if.end14
+ call void @__asan_report_load8(i64 ptrtoint (i32** @a to i64))
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:25: ; preds = %if.end14
+ %26 = load i32*, i32** @a, align 8
+ %tobool15 = icmp eq i32* %26, null
+ br i1 %tobool15, label %cleanup.thread39, label %cleanup23.loopexit
+
+cleanup.thread39: ; preds = %25
+ %incdec.ptr18 = getelementptr inbounds i32, i32* %c.059, i64 1
+ %27 = ptrtoint i32* %c.059 to i64
+ %28 = lshr i64 %27, 3
+ %29 = add i64 %28, 2147450880
+ %30 = inttoptr i64 %29 to i8*
+ %31 = load i8, i8* %30
+ %32 = icmp ne i8 %31, 0
+ br i1 %32, label %33, label %39
+
+; <label>:33: ; preds = %cleanup.thread39
+ %34 = and i64 %27, 7
+ %35 = add i64 %34, 3
+ %36 = trunc i64 %35 to i8
+ %37 = icmp sge i8 %36, %31
+ br i1 %37, label %38, label %39
+
+; <label>:38: ; preds = %33
+ call void @__asan_report_store4(i64 %27)
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:39: ; preds = %33, %cleanup.thread39
+ store i32 0, i32* %c.059, align 4
+ %40 = ptrtoint i8* %incdec.ptr60 to i64
+ %41 = lshr i64 %40, 3
+ %42 = add i64 %41, 2147450880
+ %43 = inttoptr i64 %42 to i8*
+ %44 = load i8, i8* %43
+ %45 = icmp ne i8 %44, 0
+ br i1 %45, label %46, label %51
+
+; <label>:46: ; preds = %39
+ %47 = and i64 %40, 7
+ %48 = trunc i64 %47 to i8
+ %49 = icmp sge i8 %48, %44
+ br i1 %49, label %50, label %51
+
+; <label>:50: ; preds = %46
+ call void @__asan_report_load1(i64 %40)
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:51: ; preds = %46, %39
+ %52 = load i8, i8* %incdec.ptr60, align 1
+ call void @__sanitizer_cov_trace_cmp1(i8 %52, i8 0)
+ %cmp = icmp eq i8 %52, 0
+ br i1 %cmp, label %enoent.loopexit, label %cleanup.thread39.while.body_crit_edge
+
+cleanup.thread39.while.body_crit_edge: ; preds = %51
+ %53 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 12) to i32*) monotonic, align 4
+ %54 = icmp sge i32 0, %53
+ br i1 %54, label %55, label %56
+
+; <label>:55: ; preds = %cleanup.thread39.while.body_crit_edge
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 12) to i32*))
+ call void asm sideeffect "", ""()
+ br label %56
+
+; <label>:56: ; preds = %cleanup.thread39.while.body_crit_edge, %55
+ br label %while.body
+
+cleanup.thread40: ; preds = %if.end5
+ %57 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 16) to i32*) monotonic, align 4
+ %58 = icmp sge i32 0, %57
+ br i1 %58, label %59, label %60
+
+; <label>:59: ; preds = %cleanup.thread40
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 16) to i32*))
+ call void asm sideeffect "", ""()
+ br label %60
+
+; <label>:60: ; preds = %cleanup.thread40, %59
+ %call20 = tail call i32 (i8*, ...) bitcast (i32 (...)* @fn4 to i32 (i8*, ...)*)(i8* %e.058) #2
+ br label %enoent
+
+cleanup: ; preds = %while.body
+ %61 = load i8, i8* inttoptr (i64 add (i64 lshr (i64 ptrtoint (i32* @b to i64), i64 3), i64 2147450880) to i8*)
+ %62 = icmp ne i8 %61, 0
+ br i1 %62, label %63, label %66
+
+; <label>:63: ; preds = %cleanup
+ %64 = icmp sge i8 trunc (i64 add (i64 and (i64 ptrtoint (i32* @b to i64), i64 7), i64 3) to i8), %61
+ br i1 %64, label %65, label %66
+
+; <label>:65: ; preds = %63
+ call void @__asan_report_load4(i64 ptrtoint (i32* @b to i64))
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:66: ; preds = %63, %cleanup
+ %67 = load i32, i32* @b, align 4
+ call void @__sanitizer_cov_trace_cmp4(i32 %67, i32 0)
+ %tobool3 = icmp eq i32 %67, 0
+ br i1 %tobool3, label %cleanup.while.cond.outer_crit_edge, label %cleanup.enoent.loopexit96_crit_edge
+
+cleanup.enoent.loopexit96_crit_edge: ; preds = %66
+ %68 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 20) to i32*) monotonic, align 4
+ %69 = icmp sge i32 0, %68
+ br i1 %69, label %70, label %71
+
+; <label>:70: ; preds = %cleanup.enoent.loopexit96_crit_edge
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 20) to i32*))
+ call void asm sideeffect "", ""()
+ br label %71
+
+; <label>:71: ; preds = %cleanup.enoent.loopexit96_crit_edge, %70
+ br label %enoent.loopexit96
+
+cleanup.while.cond.outer_crit_edge: ; preds = %66
+ %72 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 24) to i32*) monotonic, align 4
+ %73 = icmp sge i32 0, %72
+ br i1 %73, label %74, label %75
+
+; <label>:74: ; preds = %cleanup.while.cond.outer_crit_edge
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 24) to i32*))
+ call void asm sideeffect "", ""()
+ br label %75
+
+; <label>:75: ; preds = %cleanup.while.cond.outer_crit_edge, %74
+ br label %while.cond.outer
+
+enoent.loopexit: ; preds = %51
+ %76 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 28) to i32*) monotonic, align 4
+ %77 = icmp sge i32 0, %76
+ br i1 %77, label %78, label %79
+
+; <label>:78: ; preds = %enoent.loopexit
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 28) to i32*))
+ call void asm sideeffect "", ""()
+ br label %79
+
+; <label>:79: ; preds = %enoent.loopexit, %78
+ br label %enoent
+
+enoent.loopexit96: ; preds = %71, %20
+ br label %enoent
+
+enoent: ; preds = %enoent.loopexit96, %79, %60
+ %call22 = tail call i32* (...) @fn1() #2
+ br label %cleanup23
+
+cleanup23.loopexit: ; preds = %25
+ %80 = load atomic i32, i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 32) to i32*) monotonic, align 4
+ %81 = icmp sge i32 0, %80
+ br i1 %81, label %82, label %83
+
+; <label>:82: ; preds = %cleanup23.loopexit
+ call void @__sanitizer_cov(i32* inttoptr (i64 add (i64 ptrtoint ([9 x i32]* @__sancov_gen_cov to i64), i64 32) to i32*))
+ call void asm sideeffect "", ""()
+ br label %83
+
+; <label>:83: ; preds = %cleanup23.loopexit, %82
+ br label %cleanup23
+
+cleanup23: ; preds = %83, %enoent
+ ret void
+}
+
+declare i32 @fn3(...) local_unnamed_addr #1
+
+declare i32 @fn4(...) local_unnamed_addr #1
+
+declare i32* @fn1(...) local_unnamed_addr #1
+
+declare void @__sanitizer_cov(i32*)
+
+declare void @__sanitizer_cov_trace_cmp1(i8, i8)
+
+declare void @__sanitizer_cov_trace_cmp4(i32, i32)
+
+declare void @__asan_report_load1(i64)
+
+declare void @__asan_report_load4(i64)
+
+declare void @__asan_report_load8(i64)
+
+declare void @__asan_report_store4(i64)
+
diff --git a/test/CodeGen/X86/pre-coalesce.ll b/test/CodeGen/X86/pre-coalesce.ll
new file mode 100644
index 000000000000..9cd6365453c9
--- /dev/null
+++ b/test/CodeGen/X86/pre-coalesce.ll
@@ -0,0 +1,51 @@
+; RUN: llc -regalloc=greedy -mtriple=x86_64-unknown-linux-gnu < %s -o - | FileCheck %s
+;
+; The test is to check no redundent mov as follows will be generated in %while.body loop.
+; .LBB0_2:
+; movsbl %cl, %ecx
+; movl %edx, %eax ==> This movl can be promoted outside of loop.
+; shll $5, %eax
+; ...
+; movl %eax, %edx
+; jne .LBB0_2
+;
+; CHECK-LABEL: foo:
+; CHECK: [[L0:.LBB0_[0-9]+]]: # %while.body
+; CHECK: movl %[[REGA:.*]], %[[REGB:.*]]
+; CHECK-NOT: movl %[[REGB]], %[[REGA]]
+; CHECK: jne [[L0]]
+;
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@b = common local_unnamed_addr global i8* null, align 8
+@a = common local_unnamed_addr global i32 0, align 4
+
+define i32 @foo() local_unnamed_addr {
+entry:
+ %t0 = load i8*, i8** @b, align 8
+ %t1 = load i8, i8* %t0, align 1
+ %cmp4 = icmp eq i8 %t1, 0
+ %t2 = load i32, i32* @a, align 4
+ br i1 %cmp4, label %while.end, label %while.body.preheader
+
+while.body.preheader: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %while.body
+ %t3 = phi i32 [ %add3, %while.body ], [ %t2, %while.body.preheader ]
+ %t4 = phi i8 [ %t5, %while.body ], [ %t1, %while.body.preheader ]
+ %conv = sext i8 %t4 to i32
+ %add = mul i32 %t3, 33
+ %add3 = add nsw i32 %add, %conv
+ store i32 %add3, i32* @a, align 4
+ %t5 = load i8, i8* %t0, align 1
+ %cmp = icmp eq i8 %t5, 0
+ br i1 %cmp, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %while.body
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %.lcssa = phi i32 [ %t2, %entry ], [ %add3, %while.end.loopexit ]
+ ret i32 %.lcssa
+}
diff --git a/test/CodeGen/X86/pre-coalesce.mir b/test/CodeGen/X86/pre-coalesce.mir
new file mode 100644
index 000000000000..11805fe090b4
--- /dev/null
+++ b/test/CodeGen/X86/pre-coalesce.mir
@@ -0,0 +1,122 @@
+# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass simple-register-coalescing -o - %s | FileCheck %s
+# Check there is no partial redundent copy left in the loop after register coalescing.
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ @b = common local_unnamed_addr global i8* null, align 8
+ @a = common local_unnamed_addr global i32 0, align 4
+
+ define i32 @foo() local_unnamed_addr {
+ entry:
+ %t0 = load i8*, i8** @b, align 8
+ %t1 = load i8, i8* %t0, align 1
+ %cmp4 = icmp eq i8 %t1, 0
+ %t2 = load i32, i32* @a, align 4
+ br i1 %cmp4, label %while.end, label %while.body.preheader
+
+ while.body.preheader: ; preds = %entry
+ br label %while.body
+
+ while.body: ; preds = %while.body, %while.body.preheader
+ %t3 = phi i32 [ %add3, %while.body ], [ %t2, %while.body.preheader ]
+ %t4 = phi i8 [ %t5, %while.body ], [ %t1, %while.body.preheader ]
+ %conv = sext i8 %t4 to i32
+ %add = mul i32 %t3, 33
+ %add3 = add nsw i32 %add, %conv
+ store i32 %add3, i32* @a, align 4
+ %t5 = load i8, i8* %t0, align 1
+ %cmp = icmp eq i8 %t5, 0
+ br i1 %cmp, label %while.end, label %while.body
+
+ while.end: ; preds = %while.body, %entry
+ %.lcssa = phi i32 [ %t2, %entry ], [ %add3, %while.body ]
+ ret i32 %.lcssa
+ }
+
+...
+---
+# Check A = B and B = A copies will not exist in the loop at the same time.
+# CHECK: name: foo
+# CHECK: [[L1:bb.3.while.body]]:
+# CHECK: %[[REGA:.*]] = COPY %[[REGB:.*]]
+# CHECK-NOT: %[[REGB]] = COPY %[[REGA]]
+# CHECK: JNE_1 %[[L1]]
+
+name: foo
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr64 }
+ - { id: 1, class: gr8 }
+ - { id: 2, class: gr32 }
+ - { id: 3, class: gr32 }
+ - { id: 4, class: gr8 }
+ - { id: 5, class: gr32 }
+ - { id: 6, class: gr8 }
+ - { id: 7, class: gr32 }
+ - { id: 8, class: gr32 }
+ - { id: 9, class: gr32 }
+ - { id: 10, class: gr32 }
+ - { id: 11, class: gr32 }
+ - { id: 12, class: gr8 }
+ - { id: 13, class: gr32 }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ successors: %bb.4(0x30000000), %bb.1.while.body.preheader(0x50000000)
+
+ %0 = MOV64rm %rip, 1, _, @b, _ :: (dereferenceable load 8 from @b)
+ %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
+ TEST8rr %12, %12, implicit-def %eflags
+ %11 = MOV32rm %rip, 1, _, @a, _ :: (dereferenceable load 4 from @a)
+ JNE_1 %bb.1.while.body.preheader, implicit killed %eflags
+
+ bb.4:
+ successors: %bb.3.while.end(0x80000000)
+
+ %10 = COPY %11
+ JMP_1 %bb.3.while.end
+
+ bb.1.while.body.preheader:
+ successors: %bb.2.while.body(0x80000000)
+
+ bb.2.while.body:
+ successors: %bb.3.while.end(0x04000000), %bb.2.while.body(0x7c000000)
+
+ %8 = MOVSX32rr8 %12
+ %10 = COPY %11
+ %10 = SHL32ri %10, 5, implicit-def dead %eflags
+ %10 = ADD32rr %10, %11, implicit-def dead %eflags
+ %10 = ADD32rr %10, %8, implicit-def dead %eflags
+ MOV32mr %rip, 1, _, @a, _, %10 :: (store 4 into @a)
+ %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
+ TEST8rr %12, %12, implicit-def %eflags
+ %11 = COPY %10
+ JNE_1 %bb.2.while.body, implicit killed %eflags
+ JMP_1 %bb.3.while.end
+
+ bb.3.while.end:
+ %eax = COPY %10
+ RET 0, killed %eax
+
+...
diff --git a/test/CodeGen/X86/prefixdata.ll b/test/CodeGen/X86/prefixdata.ll
index 9bb54a2a3977..b62f48ddce27 100644
--- a/test/CodeGen/X86/prefixdata.ll
+++ b/test/CodeGen/X86/prefixdata.ll
@@ -1,18 +1,29 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck --check-prefix=MACHO %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck --check-prefix=ELF %s
@i = linkonce_odr global i32 1
-; CHECK: .type f,@function
-; CHECK-NEXT: .long 1
-; CHECK-NEXT: # 0x1
-; CHECK-NEXT: f:
+; MACHO: ltmp0:
+; MACHO-NEXT: .long 1
+; MACHO-NEXT: .alt_entry _f
+; MACHO-NEXT: _f:
+; ELF: .type f,@function
+; ELF-NEXT: .long 1
+; ELF-NEXT: # 0x1
+; ELF-NEXT: f:
define void @f() prefix i32 1 {
ret void
}
-; CHECK: .type g,@function
-; CHECK-NEXT: .quad i
-; CHECK-NEXT: g:
+; MACHO: ltmp1:
+; MACHO-NEXT: .quad _i
+; MACHO-NEXT: .alt_entry _g
+; MACHO-NEXT: _g:
+; ELF: .type g,@function
+; ELF-NEXT: .quad i
+; ELF-NEXT: g:
define void @g() prefix i32* @i {
ret void
}
+
+; MACHO: .subsections_via_symbols
diff --git a/test/CodeGen/X86/promote-vec3.ll b/test/CodeGen/X86/promote-vec3.ll
index 7a496714622a..42aeeb14739d 100644
--- a/test/CodeGen/X86/promote-vec3.ll
+++ b/test/CodeGen/X86/promote-vec3.ll
@@ -9,17 +9,16 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; SSE3-LABEL: zext_i8:
; SSE3: # BB#0:
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; SSE3-NEXT: pxor %xmm0, %xmm0
-; SSE3-NEXT: pxor %xmm1, %xmm1
-; SSE3-NEXT: pinsrw $0, %eax, %xmm1
+; SSE3-NEXT: movd %eax, %xmm0
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; SSE3-NEXT: pinsrw $1, %eax, %xmm1
+; SSE3-NEXT: pinsrw $1, %eax, %xmm0
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; SSE3-NEXT: pinsrw $2, %eax, %xmm1
-; SSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE3-NEXT: movd %xmm1, %eax
-; SSE3-NEXT: pextrw $2, %xmm1, %edx
-; SSE3-NEXT: pextrw $4, %xmm1, %ecx
+; SSE3-NEXT: pinsrw $2, %eax, %xmm0
+; SSE3-NEXT: pxor %xmm1, %xmm1
+; SSE3-NEXT: pextrw $1, %xmm0, %edx
+; SSE3-NEXT: pextrw $2, %xmm0, %ecx
+; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE3-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
; SSE3-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
@@ -74,7 +73,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; SSE3-LABEL: sext_i8:
; SSE3: # BB#0:
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; SSE3-NEXT: pinsrw $0, %eax, %xmm0
+; SSE3-NEXT: movd %eax, %xmm0
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; SSE3-NEXT: pinsrw $1, %eax, %xmm0
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -93,7 +92,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
;
; SSE41-LABEL: sext_i8:
; SSE41: # BB#0:
-; SSE41-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0
+; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
; SSE41-NEXT: pslld $24, %xmm0
@@ -108,7 +107,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
;
; AVX-32-LABEL: sext_i8:
; AVX-32: # BB#0:
-; AVX-32-NEXT: vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpslld $24, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll
index 9a11eff5331d..35f96eda35e1 100644
--- a/test/CodeGen/X86/psubus.ll
+++ b/test/CodeGen/X86/psubus.ll
@@ -213,7 +213,7 @@ define void @test7(i16* nocapture %head) nounwind {
;
; AVX1-LABEL: test7:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm1
@@ -257,13 +257,13 @@ define void @test8(i16* nocapture %head) nounwind {
;
; AVX1-LABEL: test8:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; AVX1-NEXT: vxorps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [65534,65534,65534,65534,65534,65534,65534,65534]
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vxorps %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32769,32769,32769,32769,32769,32769,32769,32769]
@@ -310,7 +310,7 @@ define void @test9(i16* nocapture %head, i16 zeroext %w) nounwind {
;
; AVX1-LABEL: test9:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
@@ -364,7 +364,7 @@ define void @test10(i8* nocapture %head) nounwind {
;
; AVX1-LABEL: test10:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
@@ -409,13 +409,13 @@ define void @test11(i8* nocapture %head) nounwind {
;
; AVX1-LABEL: test11:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX1-NEXT: vxorps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [254,254,254,254,254,254,254,254,254,254,254,254,254,254,254,254]
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vxorps %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [129,129,129,129,129,129,129,129,129,129,129,129,129,129,129,129]
@@ -475,7 +475,7 @@ define void @test12(i8* nocapture %head, i8 zeroext %w) nounwind {
;
; AVX1-LABEL: test12:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
; AVX1-NEXT: vmovd %esi, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -542,8 +542,6 @@ define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
-; SSE2-NEXT: psllw $15, %xmm4
-; SSE2-NEXT: psraw $15, %xmm4
; SSE2-NEXT: psubd %xmm2, %xmm1
; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
@@ -577,8 +575,6 @@ define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind {
; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
; SSSE3-NEXT: pshufb %xmm5, %xmm6
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0]
-; SSSE3-NEXT: psllw $15, %xmm6
-; SSSE3-NEXT: psraw $15, %xmm6
; SSSE3-NEXT: psubd %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm5, %xmm0
; SSSE3-NEXT: pshufb %xmm5, %xmm1
@@ -589,7 +585,7 @@ define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind {
;
; AVX1-LABEL: test13:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rsi), %ymm0
+; AVX1-NEXT: vmovdqu (%rsi), %ymm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
@@ -598,7 +594,7 @@ define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind {
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpacksswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
@@ -623,7 +619,7 @@ define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind {
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
@@ -649,150 +645,123 @@ define void @test14(i8* nocapture %head, i32* nocapture %w) nounwind {
; SSE2-LABEL: test14:
; SSE2: ## BB#0: ## %vector.ph
; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: movdqu (%rsi), %xmm10
-; SSE2-NEXT: movdqu 16(%rsi), %xmm4
-; SSE2-NEXT: movdqu 32(%rsi), %xmm8
-; SSE2-NEXT: movdqu 48(%rsi), %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm11
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: movdqu (%rsi), %xmm8
+; SSE2-NEXT: movdqu 16(%rsi), %xmm9
+; SSE2-NEXT: movdqu 32(%rsi), %xmm10
+; SSE2-NEXT: movdqu 48(%rsi), %xmm7
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm7
-; SSE2-NEXT: psubd %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm6, %xmm4
-; SSE2-NEXT: pxor %xmm6, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: psubd %xmm10, %xmm5
-; SSE2-NEXT: pxor %xmm6, %xmm10
-; SSE2-NEXT: pxor %xmm6, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm10
-; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm10[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm4[0]
-; SSE2-NEXT: psllw $15, %xmm7
-; SSE2-NEXT: psraw $15, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm10, %xmm7
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: psubd %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm6, %xmm9
-; SSE2-NEXT: pxor %xmm6, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm9
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: psubd %xmm7, %xmm0
+; SSE2-NEXT: pxor %xmm3, %xmm7
+; SSE2-NEXT: pxor %xmm3, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: psubd %xmm10, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm10
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm10
+; SSE2-NEXT: pand %xmm5, %xmm10
+; SSE2-NEXT: packuswb %xmm7, %xmm10
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psubd %xmm9, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm9
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm9
+; SSE2-NEXT: pand %xmm5, %xmm9
; SSE2-NEXT: movdqa %xmm8, %xmm4
-; SSE2-NEXT: pxor %xmm6, %xmm4
-; SSE2-NEXT: pxor %xmm11, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0]
-; SSE2-NEXT: psllw $15, %xmm4
-; SSE2-NEXT: psraw $15, %xmm4
-; SSE2-NEXT: pand %xmm10, %xmm4
-; SSE2-NEXT: packuswb %xmm4, %xmm7
-; SSE2-NEXT: psllw $7, %xmm7
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm7
-; SSE2-NEXT: pcmpgtb %xmm7, %xmm1
-; SSE2-NEXT: psubd %xmm8, %xmm11
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm5
-; SSE2-NEXT: packuswb %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm11
-; SSE2-NEXT: packuswb %xmm3, %xmm11
-; SSE2-NEXT: packuswb %xmm11, %xmm5
-; SSE2-NEXT: pandn %xmm5, %xmm1
-; SSE2-NEXT: movdqu %xmm1, (%rdi)
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: packuswb %xmm9, %xmm4
+; SSE2-NEXT: packuswb %xmm10, %xmm4
+; SSE2-NEXT: psubd %xmm8, %xmm2
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: packuswb %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm4
+; SSE2-NEXT: movdqu %xmm4, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test14:
; SSSE3: ## BB#0: ## %vector.ph
-; SSSE3-NEXT: movdqu (%rdi), %xmm7
-; SSSE3-NEXT: movdqu (%rsi), %xmm10
-; SSSE3-NEXT: movdqu 16(%rsi), %xmm4
-; SSSE3-NEXT: movdqu 32(%rsi), %xmm8
-; SSSE3-NEXT: movdqu 48(%rsi), %xmm9
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1]
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSSE3-NEXT: movdqa %xmm2, %xmm3
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; SSSE3-NEXT: movdqa %xmm7, %xmm0
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
-; SSSE3-NEXT: movdqa %xmm7, %xmm5
-; SSSE3-NEXT: psubd %xmm4, %xmm7
-; SSSE3-NEXT: pxor %xmm6, %xmm4
-; SSSE3-NEXT: pxor %xmm6, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm4
-; SSSE3-NEXT: movdqa {{.*#+}} xmm11 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSSE3-NEXT: pshufb %xmm11, %xmm4
+; SSSE3-NEXT: movdqu (%rdi), %xmm0
+; SSSE3-NEXT: movdqu (%rsi), %xmm8
+; SSSE3-NEXT: movdqu 16(%rsi), %xmm9
+; SSSE3-NEXT: movdqu 32(%rsi), %xmm10
+; SSSE3-NEXT: movdqu 48(%rsi), %xmm7
+; SSSE3-NEXT: pxor %xmm3, %xmm3
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm6
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
-; SSSE3-NEXT: psubd %xmm10, %xmm0
-; SSSE3-NEXT: pxor %xmm6, %xmm10
-; SSSE3-NEXT: pxor %xmm6, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm10
-; SSSE3-NEXT: pshufb %xmm11, %xmm10
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm4[0]
-; SSSE3-NEXT: psllw $15, %xmm10
-; SSSE3-NEXT: psraw $15, %xmm10
-; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSSE3-NEXT: pshufb %xmm4, %xmm10
-; SSSE3-NEXT: movdqa %xmm2, %xmm5
-; SSSE3-NEXT: psubd %xmm9, %xmm2
-; SSSE3-NEXT: pxor %xmm6, %xmm9
-; SSSE3-NEXT: pxor %xmm6, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm9
-; SSSE3-NEXT: pshufb %xmm11, %xmm9
+; SSSE3-NEXT: psubd %xmm7, %xmm0
+; SSSE3-NEXT: pxor %xmm3, %xmm7
+; SSSE3-NEXT: pxor %xmm3, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm5, %xmm7
+; SSSE3-NEXT: movdqa %xmm6, %xmm4
+; SSSE3-NEXT: psubd %xmm10, %xmm6
+; SSSE3-NEXT: pxor %xmm3, %xmm10
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm10
+; SSSE3-NEXT: pshufb %xmm5, %xmm10
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: psubd %xmm9, %xmm1
+; SSSE3-NEXT: pxor %xmm3, %xmm9
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm9
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm4, %xmm9
; SSSE3-NEXT: movdqa %xmm8, %xmm5
-; SSSE3-NEXT: pxor %xmm6, %xmm5
-; SSSE3-NEXT: pxor %xmm3, %xmm6
-; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5
-; SSSE3-NEXT: pshufb %xmm11, %xmm5
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm9[0]
-; SSSE3-NEXT: psllw $15, %xmm5
-; SSSE3-NEXT: psraw $15, %xmm5
+; SSSE3-NEXT: pxor %xmm3, %xmm5
+; SSSE3-NEXT: pxor %xmm2, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
; SSSE3-NEXT: pshufb %xmm4, %xmm5
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm5[0]
-; SSSE3-NEXT: psllw $7, %xmm10
-; SSSE3-NEXT: pand {{.*}}(%rip), %xmm10
-; SSSE3-NEXT: pcmpgtb %xmm10, %xmm1
-; SSSE3-NEXT: psubd %xmm8, %xmm3
-; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSSE3-NEXT: pand %xmm4, %xmm7
-; SSSE3-NEXT: pand %xmm4, %xmm0
-; SSSE3-NEXT: packuswb %xmm7, %xmm0
-; SSSE3-NEXT: pand %xmm4, %xmm2
-; SSSE3-NEXT: pand %xmm4, %xmm3
-; SSSE3-NEXT: packuswb %xmm2, %xmm3
-; SSSE3-NEXT: packuswb %xmm3, %xmm0
-; SSSE3-NEXT: pandn %xmm0, %xmm1
-; SSSE3-NEXT: movdqu %xmm1, (%rdi)
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm10 = xmm5[0],xmm10[1]
+; SSSE3-NEXT: psubd %xmm8, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pand %xmm3, %xmm6
+; SSSE3-NEXT: packuswb %xmm0, %xmm6
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: packuswb %xmm1, %xmm2
+; SSSE3-NEXT: packuswb %xmm6, %xmm2
+; SSSE3-NEXT: andnpd %xmm2, %xmm10
+; SSSE3-NEXT: movupd %xmm10, (%rdi)
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test14:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rsi), %ymm0
-; AVX1-NEXT: vmovups 32(%rsi), %ymm1
+; AVX1-NEXT: vmovdqu (%rsi), %ymm0
+; AVX1-NEXT: vmovdqu 32(%rsi), %ymm1
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -803,25 +772,20 @@ define void @test14(i8* nocapture %head, i32* nocapture %w) nounwind {
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm6, %xmm10, %xmm7
-; AVX1-NEXT: vxorps %xmm6, %xmm1, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm11
+; AVX1-NEXT: vpxor %xmm6, %xmm9, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm6, %xmm8, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm6
+; AVX1-NEXT: vpcmpgtd %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm11, %xmm3, %xmm12
-; AVX1-NEXT: vpxor %xmm6, %xmm9, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm6, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm6, %xmm8, %xmm7
-; AVX1-NEXT: vxorps %xmm6, %xmm0, %xmm6
-; AVX1-NEXT: vpcmpgtd %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpacksswb %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpshufb %xmm11, %xmm3, %xmm3
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm12[0]
-; AVX1-NEXT: vpsllw $7, %xmm3, %xmm3
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpacksswb %xmm11, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm0, %xmm8, %xmm0
-; AVX1-NEXT: vpsubd %xmm4, %xmm9, %xmm4
+; AVX1-NEXT: vpsubd %xmm7, %xmm9, %xmm4
; AVX1-NEXT: vpsubd %xmm1, %xmm10, %xmm1
; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
@@ -850,26 +814,22 @@ define void @test14(i8* nocapture %head, i32* nocapture %w) nounwind {
; AVX2-NEXT: vpcmpgtd %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX2-NEXT: vpacksswb %xmm6, %xmm5, %xmm5
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm5
-; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm7
+; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm6
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm4
-; AVX2-NEXT: vpcmpgtd %ymm7, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm7
-; AVX2-NEXT: vpacksswb %xmm7, %xmm4, %xmm4
-; AVX2-NEXT: vpshufb %xmm6, %xmm4, %xmm4
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; AVX2-NEXT: vpsllw $7, %xmm4, %xmm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6
+; AVX2-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
+; AVX2-NEXT: vpacksswb %xmm5, %xmm4, %xmm4
; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpsubd %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
@@ -919,8 +879,6 @@ define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
-; SSE2-NEXT: psllw $15, %xmm4
-; SSE2-NEXT: psraw $15, %xmm4
; SSE2-NEXT: psubd %xmm2, %xmm1
; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
@@ -954,8 +912,6 @@ define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind {
; SSSE3-NEXT: pcmpgtd %xmm6, %xmm3
; SSSE3-NEXT: pshufb %xmm4, %xmm3
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; SSSE3-NEXT: psllw $15, %xmm3
-; SSSE3-NEXT: psraw $15, %xmm3
; SSSE3-NEXT: psubd %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm4, %xmm1
@@ -966,7 +922,7 @@ define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind {
;
; AVX1-LABEL: test15:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rsi), %ymm0
+; AVX1-NEXT: vmovdqu (%rsi), %ymm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
@@ -975,7 +931,7 @@ define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind {
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpacksswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
@@ -1000,7 +956,7 @@ define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind {
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
@@ -1049,8 +1005,6 @@ define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
-; SSE2-NEXT: psllw $15, %xmm4
-; SSE2-NEXT: psraw $15, %xmm4
; SSE2-NEXT: psubd %xmm2, %xmm1
; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
@@ -1084,8 +1038,6 @@ define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind {
; SSSE3-NEXT: pcmpgtd %xmm6, %xmm3
; SSSE3-NEXT: pshufb %xmm4, %xmm3
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; SSSE3-NEXT: psllw $15, %xmm3
-; SSSE3-NEXT: psraw $15, %xmm3
; SSSE3-NEXT: psubd %xmm2, %xmm1
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm4, %xmm1
@@ -1096,7 +1048,7 @@ define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind {
;
; AVX1-LABEL: test16:
; AVX1: ## BB#0: ## %vector.ph
-; AVX1-NEXT: vmovups (%rsi), %ymm0
+; AVX1-NEXT: vmovdqu (%rsi), %ymm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
@@ -1105,7 +1057,7 @@ define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind {
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpacksswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
@@ -1130,7 +1082,7 @@ define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind {
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
diff --git a/test/CodeGen/X86/recip-fastmath.ll b/test/CodeGen/X86/recip-fastmath.ll
index 0a99254cd625..16e261bf3c5e 100644
--- a/test/CodeGen/X86/recip-fastmath.ll
+++ b/test/CodeGen/X86/recip-fastmath.ll
@@ -1,6 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE --check-prefix=SSE-RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX-RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=FMA-RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell -mattr=-fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=HASWELL-NO-FMA
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 --check-prefix=KNL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 --check-prefix=SKX
; If the target's divss/divps instructions are substantially
; slower than rcpss/rcpps with a Newton-Raphson refinement,
@@ -18,11 +25,47 @@ define float @f32_no_estimate(float %x) #0 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: f32_no_estimate:
-; AVX: # BB#0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: f32_no_estimate:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vdivss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_no_estimate:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vdivss %xmm0, %xmm1, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_no_estimate:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [5:1.00]
+; BTVER2-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [19:19.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_no_estimate:
+; SANDY: # BB#0:
+; SANDY-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_no_estimate:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
+; HASWELL-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_no_estimate:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; AVX512-LABEL: f32_no_estimate:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [4:0.50]
+; AVX512-NEXT: vdivss %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast float 1.0, %x
ret float %div
}
@@ -39,15 +82,66 @@ define float @f32_one_step(float %x) #1 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: f32_one_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT: vsubss %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: f32_one_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
+; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_one_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; FMA-RECIP-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_one_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
+; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_one_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_one_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_one_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
+; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; AVX512-LABEL: f32_one_step:
+; AVX512: # BB#0:
+; AVX512-NEXT: vrcp14ss %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; AVX512-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast float 1.0, %x
ret float %div
}
@@ -70,19 +164,94 @@ define float @f32_two_step(float %x) #2 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: f32_two_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT: vsubss %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm2
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vsubss %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: f32_two_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vsubss %xmm2, %xmm3, %xmm2
+; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm2
+; AVX-RECIP-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vsubss %xmm0, %xmm3, %xmm0
+; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_two_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm3
+; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ss %xmm2, %xmm3, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ss %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_two_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [5:1.00]
+; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_two_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_two_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; HASWELL-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm3
+; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm3
+; HASWELL-NEXT: vfnmadd213ss %xmm2, %xmm3, %xmm0
+; HASWELL-NEXT: vfmadd132ss %xmm3, %xmm3, %xmm0
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_two_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vsubss %xmm2, %xmm3, %xmm2
+; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2
+; HASWELL-NO-FMA-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm3, %xmm0
+; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; AVX512-LABEL: f32_two_step:
+; AVX512: # BB#0:
+; AVX512-NEXT: vrcp14ss %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; AVX512-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; AVX512-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm3
+; AVX512-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm3
+; AVX512-NEXT: vfnmadd213ss %xmm2, %xmm3, %xmm0
+; AVX512-NEXT: vfmadd132ss %xmm3, %xmm3, %xmm0
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast float 1.0, %x
ret float %div
}
@@ -95,11 +264,47 @@ define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: v4f32_no_estimate:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v4f32_no_estimate:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vdivps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v4f32_no_estimate:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-RECIP-NEXT: vdivps %xmm0, %xmm1, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v4f32_no_estimate:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [19:19.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v4f32_no_estimate:
+; SANDY: # BB#0:
+; SANDY-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v4f32_no_estimate:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 # sched: [4:0.50]
+; HASWELL-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v4f32_no_estimate:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; HASWELL-NO-FMA-NEXT: vdivps %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; AVX512-LABEL: v4f32_no_estimate:
+; AVX512: # BB#0:
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 # sched: [4:0.50]
+; AVX512-NEXT: vdivps %xmm0, %xmm1, %xmm0 # sched: [12:1.00]
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <4 x float> %div
}
@@ -116,15 +321,75 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: v4f32_one_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %xmm0, %xmm1
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v4f32_one_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v4f32_one_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v4f32_one_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v4f32_one_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v4f32_one_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v4f32_one_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
+; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; KNL-LABEL: v4f32_one_step:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v4f32_one_step:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %xmm0, %xmm1
+; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0
+; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <4 x float> %div
}
@@ -147,19 +412,105 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: v4f32_two_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %xmm0, %xmm1
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm2
-; AVX-NEXT: vaddps %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vsubps %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v4f32_two_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm2
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %xmm2, %xmm3, %xmm2
+; AVX-RECIP-NEXT: vmulps %xmm2, %xmm1, %xmm2
+; AVX-RECIP-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vsubps %xmm0, %xmm3, %xmm0
+; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v4f32_two_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v4f32_two_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v4f32_two_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v4f32_two_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; HASWELL-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v4f32_two_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm3
+; HASWELL-NO-FMA-NEXT: vsubps %xmm2, %xmm3, %xmm2
+; HASWELL-NO-FMA-NEXT: vmulps %xmm2, %xmm1, %xmm2
+; HASWELL-NO-FMA-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm3, %xmm0
+; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; KNL-LABEL: v4f32_two_step:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; KNL-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v4f32_two_step:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %xmm0, %xmm1
+; SKX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; SKX-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; SKX-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; SKX-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <4 x float> %div
}
@@ -175,11 +526,47 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 {
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_no_estimate:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_no_estimate:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_no_estimate:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-RECIP-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_no_estimate:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:19.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_no_estimate:
+; SANDY: # BB#0:
+; SANDY-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [12:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_no_estimate:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 # sched: [5:1.00]
+; HASWELL-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:2.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_no_estimate:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
+; HASWELL-NO-FMA-NEXT: vdivps %ymm0, %ymm1, %ymm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; AVX512-LABEL: v8f32_no_estimate:
+; AVX512: # BB#0:
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 # sched: [5:1.00]
+; AVX512-NEXT: vdivps %ymm0, %ymm1, %ymm0 # sched: [19:2.00]
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <8 x float> %div
}
@@ -203,15 +590,75 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 {
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_one_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %ymm0, %ymm1
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0
-; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_one_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %ymm0, %ymm2, %ymm0
+; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_one_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
+; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_one_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_one_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_one_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
+; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_one_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm2
+; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0
+; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; KNL-LABEL: v8f32_one_step:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
+; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_one_step:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm1
+; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0
+; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <8 x float> %div
}
@@ -247,19 +694,105 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 {
; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_two_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %ymm0, %ymm1
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm2
-; AVX-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %ymm2, %ymm3, %ymm2
-; AVX-NEXT: vmulps %ymm2, %ymm1, %ymm2
-; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vsubps %ymm0, %ymm3, %ymm0
-; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_two_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm2
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %ymm2, %ymm3, %ymm2
+; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm2
+; AVX-RECIP-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0
+; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_two_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-RECIP-NEXT: vmovaps %ymm1, %ymm3
+; FMA-RECIP-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; FMA-RECIP-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; FMA-RECIP-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_two_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_two_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_two_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; HASWELL-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_two_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
+; HASWELL-NO-FMA-NEXT: vsubps %ymm2, %ymm3, %ymm2
+; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm2
+; HASWELL-NO-FMA-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0
+; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; HASWELL-NO-FMA-NEXT: retq
+;
+; KNL-LABEL: v8f32_two_step:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; KNL-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_two_step:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm1
+; SKX-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; SKX-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
+; SKX-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; SKX-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; SKX-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <8 x float> %div
}
diff --git a/test/CodeGen/X86/recip-fastmath2.ll b/test/CodeGen/X86/recip-fastmath2.ll
index 0788b036cc52..440a6f0bef13 100644
--- a/test/CodeGen/X86/recip-fastmath2.ll
+++ b/test/CodeGen/X86/recip-fastmath2.ll
@@ -1,6 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=SSE --check-prefix=SSE-RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX-RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=FMA-RECIP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=BTVER2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=SANDY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell -print-schedule -mattr=-fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=HASWELL-NO-FMA
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 --check-prefix=KNL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -print-schedule | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 --check-prefix=SKX
; It's the extra tests coverage for recip as discussed on D26855.
@@ -11,11 +18,47 @@ define float @f32_no_step_2(float %x) #3 {
; SSE-NEXT: mulss {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: f32_no_step_2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: f32_no_step_2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_no_step_2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm0
+; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_no_step_2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_no_step_2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_no_step_2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
+; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_no_step_2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; AVX512-LABEL: f32_no_step_2:
+; AVX512: # BB#0:
+; AVX512-NEXT: vrcp14ss %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast float 1234.0, %x
ret float %div
}
@@ -33,20 +76,170 @@ define float @f32_one_step_2(float %x) #1 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: f32_one_step_2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT: vsubss %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: f32_one_step_2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
+; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_one_step_2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; FMA-RECIP-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_one_step_2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
+; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_one_step_2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_one_step_2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_one_step_2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; AVX512-LABEL: f32_one_step_2:
+; AVX512: # BB#0:
+; AVX512-NEXT: vrcp14ss %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; AVX512-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast float 3456.0, %x
ret float %div
}
+define float @f32_one_step_2_divs(float %x) #1 {
+; SSE-LABEL: f32_one_step_2_divs:
+; SSE: # BB#0:
+; SSE-NEXT: rcpss %xmm0, %xmm1
+; SSE-NEXT: mulss %xmm1, %xmm0
+; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: subss %xmm0, %xmm2
+; SSE-NEXT: mulss %xmm1, %xmm2
+; SSE-NEXT: addss %xmm1, %xmm2
+; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: mulss %xmm2, %xmm0
+; SSE-NEXT: mulss %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-RECIP-LABEL: f32_one_step_2_divs:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
+; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_one_step_2_divs:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; FMA-RECIP-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
+; FMA-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_one_step_2_divs:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:1.00]
+; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [7:1.00]
+; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_one_step_2_divs:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_one_step_2_divs:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; HASWELL-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_one_step_2_divs:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; AVX512-LABEL: f32_one_step_2_divs:
+; AVX512: # BB#0:
+; AVX512-NEXT: vrcp14ss %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
+; AVX512-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; AVX512-NEXT: retq # sched: [1:1.00]
+ %div = fdiv fast float 3456.0, %x
+ %div2 = fdiv fast float %div, %x
+ ret float %div2
+}
+
define float @f32_two_step_2(float %x) #2 {
; SSE-LABEL: f32_two_step_2:
; SSE: # BB#0:
@@ -66,20 +259,101 @@ define float @f32_two_step_2(float %x) #2 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: f32_two_step_2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT: vsubss %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm2
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vsubss %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: f32_two_step_2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vsubss %xmm2, %xmm3, %xmm2
+; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm2
+; AVX-RECIP-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vsubss %xmm0, %xmm3, %xmm0
+; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: f32_two_step_2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm3
+; FMA-RECIP-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ss %xmm2, %xmm3, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ss %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: f32_two_step_2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [5:1.00]
+; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: f32_two_step_2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [4:0.50]
+; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: f32_two_step_2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; HASWELL-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm3
+; HASWELL-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm3
+; HASWELL-NEXT: vfnmadd213ss %xmm2, %xmm3, %xmm0
+; HASWELL-NEXT: vfmadd132ss %xmm3, %xmm3, %xmm0
+; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: f32_two_step_2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vsubss %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddss %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; AVX512-LABEL: f32_two_step_2:
+; AVX512: # BB#0:
+; AVX512-NEXT: vrcp14ss %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [4:0.50]
+; AVX512-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; AVX512-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm3
+; AVX512-NEXT: vfmadd132ss %xmm1, %xmm1, %xmm3
+; AVX512-NEXT: vfnmadd213ss %xmm2, %xmm3, %xmm0
+; AVX512-NEXT: vfmadd132ss %xmm3, %xmm3, %xmm0
+; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; AVX512-NEXT: retq # sched: [1:1.00]
%div = fdiv fast float 6789.0, %x
ret float %div
}
@@ -97,20 +371,191 @@ define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: v4f32_one_step2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %xmm0, %xmm1
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v4f32_one_step2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v4f32_one_step2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v4f32_one_step2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v4f32_one_step2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v4f32_one_step2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v4f32_one_step2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v4f32_one_step2:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v4f32_one_step2:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %xmm0, %xmm1
+; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0
+; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
ret <4 x float> %div
}
+define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
+; SSE-LABEL: v4f32_one_step_2_divs:
+; SSE: # BB#0:
+; SSE-NEXT: rcpps %xmm0, %xmm1
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; SSE-NEXT: subps %xmm0, %xmm2
+; SSE-NEXT: mulps %xmm1, %xmm2
+; SSE-NEXT: addps %xmm1, %xmm2
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00]
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-RECIP-LABEL: v4f32_one_step_2_divs:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v4f32_one_step_2_divs:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
+; FMA-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v4f32_one_step_2_divs:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [7:1.00]
+; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v4f32_one_step_2_divs:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:1.00]
+; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v4f32_one_step_2_divs:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; HASWELL-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v4f32_one_step_2_divs:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v4f32_one_step_2_divs:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; KNL-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v4f32_one_step_2_divs:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %xmm0, %xmm1
+; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0
+; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
+; SKX-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; SKX-NEXT: retq # sched: [1:1.00]
+ %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
+ %div2 = fdiv fast <4 x float> %div, %x
+ ret <4 x float> %div2
+}
+
define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SSE-LABEL: v4f32_two_step2:
; SSE: # BB#0:
@@ -130,20 +575,113 @@ define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: v4f32_two_step2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %xmm0, %xmm1
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm2
-; AVX-NEXT: vaddps %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vsubps %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v4f32_two_step2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm2
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %xmm2, %xmm3, %xmm2
+; AVX-RECIP-NEXT: vmulps %xmm2, %xmm1, %xmm2
+; AVX-RECIP-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-RECIP-NEXT: vsubps %xmm0, %xmm3, %xmm0
+; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v4f32_two_step2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
+; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; FMA-RECIP-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; FMA-RECIP-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; FMA-RECIP-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v4f32_two_step2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %xmm0, %xmm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v4f32_two_step2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v4f32_two_step2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; HASWELL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; HASWELL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; HASWELL-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; HASWELL-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v4f32_two_step2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %xmm3 # sched: [4:0.50]
+; HASWELL-NO-FMA-NEXT: vsubps %xmm2, %xmm3, %xmm2 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm2, %xmm1, %xmm2 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddps %xmm2, %xmm1, %xmm1 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm3, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v4f32_two_step2:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %xmm0, %xmm1 # sched: [5:1.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; KNL-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; KNL-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; KNL-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; KNL-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v4f32_two_step2:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %xmm0, %xmm1
+; SKX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 # sched: [4:0.50]
+; SKX-NEXT: vmovaps %xmm1, %xmm3 # sched: [1:1.00]
+; SKX-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3
+; SKX-NEXT: vfmadd132ps %xmm1, %xmm1, %xmm3
+; SKX-NEXT: vfnmadd213ps %xmm2, %xmm3, %xmm0
+; SKX-NEXT: vfmadd132ps %xmm3, %xmm3, %xmm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
ret <4 x float> %div
}
@@ -169,20 +707,200 @@ define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_one_step2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %ymm0, %ymm1
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0
-; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_one_step2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %ymm0, %ymm2, %ymm0
+; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_one_step2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
+; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_one_step2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_one_step2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_one_step2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
+; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_one_step2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v8f32_one_step2:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
+; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_one_step2:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm1
+; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0
+; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
ret <8 x float> %div
}
+define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
+; SSE-LABEL: v8f32_one_step_2_divs:
+; SSE: # BB#0:
+; SSE-NEXT: rcpps %xmm0, %xmm2
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; SSE-NEXT: movaps %xmm3, %xmm4
+; SSE-NEXT: subps %xmm0, %xmm4
+; SSE-NEXT: mulps %xmm2, %xmm4
+; SSE-NEXT: addps %xmm2, %xmm4
+; SSE-NEXT: rcpps %xmm1, %xmm0
+; SSE-NEXT: mulps %xmm0, %xmm1
+; SSE-NEXT: subps %xmm1, %xmm3
+; SSE-NEXT: mulps %xmm0, %xmm3
+; SSE-NEXT: addps %xmm0, %xmm3
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00]
+; SSE-NEXT: mulps %xmm3, %xmm1
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00]
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-RECIP-LABEL: v8f32_one_step_2_divs:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %ymm0, %ymm2, %ymm0
+; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_one_step_2_divs:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; FMA-RECIP-NEXT: vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
+; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
+; FMA-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_one_step_2_divs:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [7:1.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_one_step_2_divs:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
+; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_one_step_2_divs:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
+; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
+; HASWELL-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_one_step_2_divs:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v8f32_one_step_2_divs:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
+; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
+; KNL-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_one_step_2_divs:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm1
+; SKX-NEXT: vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0
+; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [9:1.00]
+; SKX-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SKX-NEXT: retq # sched: [1:1.00]
+ %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
+ %div2 = fdiv fast <8 x float> %div, %x
+ ret <8 x float> %div2
+}
+
define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SSE-LABEL: v8f32_two_step2:
; SSE: # BB#0:
@@ -216,20 +934,113 @@ define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_two_step2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %ymm0, %ymm1
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm2
-; AVX-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; AVX-NEXT: vsubps %ymm2, %ymm3, %ymm2
-; AVX-NEXT: vmulps %ymm2, %ymm1, %ymm2
-; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vsubps %ymm0, %ymm3, %ymm0
-; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_two_step2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm2
+; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; AVX-RECIP-NEXT: vsubps %ymm2, %ymm3, %ymm2
+; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm2
+; AVX-RECIP-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0
+; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_two_step2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
+; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-RECIP-NEXT: vmovaps %ymm1, %ymm3
+; FMA-RECIP-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; FMA-RECIP-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; FMA-RECIP-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; FMA-RECIP-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_two_step2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00]
+; BTVER2-NEXT: vrcpps %ymm0, %ymm1 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_two_step2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm1 # sched: [5:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
+; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [4:0.50]
+; SANDY-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_two_step2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; HASWELL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; HASWELL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; HASWELL-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; HASWELL-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_two_step2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*}}(%rip), %ymm3 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vsubps %ymm2, %ymm3, %ymm2 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm2 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vaddps %ymm2, %ymm1, %ymm1 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0 # sched: [5:1.00]
+; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v8f32_two_step2:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm1 # sched: [7:2.00]
+; KNL-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; KNL-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; KNL-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; KNL-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; KNL-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_two_step2:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm1
+; SKX-NEXT: vbroadcastss {{.*}}(%rip), %ymm2 # sched: [5:1.00]
+; SKX-NEXT: vmovaps %ymm1, %ymm3 # sched: [1:1.00]
+; SKX-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3
+; SKX-NEXT: vfmadd132ps %ymm1, %ymm1, %ymm3
+; SKX-NEXT: vfnmadd213ps %ymm2, %ymm3, %ymm0
+; SKX-NEXT: vfmadd132ps %ymm3, %ymm3, %ymm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
ret <8 x float> %div
}
@@ -241,10 +1052,45 @@ define <8 x float> @v8f32_no_step(<8 x float> %x) #3 {
; SSE-NEXT: rcpps %xmm1, %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_no_step:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %ymm0, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_no_step:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_no_step:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_no_step:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_no_step:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_no_step:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_no_step:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v8f32_no_step:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_no_step:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm0
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
ret <8 x float> %div
}
@@ -258,11 +1104,53 @@ define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 {
; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: v8f32_no_step2:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpps %ymm0, %ymm0
-; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
-; AVX-NEXT: retq
+; AVX-RECIP-LABEL: v8f32_no_step2:
+; AVX-RECIP: # BB#0:
+; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0
+; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-RECIP-NEXT: retq
+;
+; FMA-RECIP-LABEL: v8f32_no_step2:
+; FMA-RECIP: # BB#0:
+; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0
+; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
+; FMA-RECIP-NEXT: retq
+;
+; BTVER2-LABEL: v8f32_no_step2:
+; BTVER2: # BB#0:
+; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:1.00]
+; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:1.00]
+; BTVER2-NEXT: retq # sched: [4:1.00]
+;
+; SANDY-LABEL: v8f32_no_step2:
+; SANDY: # BB#0:
+; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:1.00]
+; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; SANDY-NEXT: retq # sched: [5:1.00]
+;
+; HASWELL-LABEL: v8f32_no_step2:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
+; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; HASWELL-NO-FMA-LABEL: v8f32_no_step2:
+; HASWELL-NO-FMA: # BB#0:
+; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
+; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NO-FMA-NEXT: retq # sched: [1:1.00]
+;
+; KNL-LABEL: v8f32_no_step2:
+; KNL: # BB#0:
+; KNL-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00]
+; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; KNL-NEXT: retq # sched: [1:1.00]
+;
+; SKX-LABEL: v8f32_no_step2:
+; SKX: # BB#0:
+; SKX-NEXT: vrcp14ps %ymm0, %ymm0
+; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [9:1.00]
+; SKX-NEXT: retq # sched: [1:1.00]
%div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
ret <8 x float> %div
}
diff --git a/test/CodeGen/X86/recip-pic.ll b/test/CodeGen/X86/recip-pic.ll
new file mode 100644
index 000000000000..7a0d03d6072e
--- /dev/null
+++ b/test/CodeGen/X86/recip-pic.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -enable-unsafe-fp-math -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK
+
+define fastcc float @foo(float %x) unnamed_addr #0 {
+; CHECK-LABEL: foo:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: calll .L0$pb
+; CHECK-NEXT: .Lcfi0:
+; CHECK-NEXT: .cfi_adjust_cfa_offset 4
+; CHECK-NEXT: .L0$pb:
+; CHECK-NEXT: popl %eax
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_adjust_cfa_offset -4
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %eax
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: divss %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: movss %xmm1, (%eax)
+; CHECK-NEXT: retl
+entry:
+ %div = fdiv fast float 3.0, %x
+ store float %div, float* undef, align 4
+ ret float %div
+}
+
+
diff --git a/test/CodeGen/X86/reduce-trunc-shl.ll b/test/CodeGen/X86/reduce-trunc-shl.ll
index 275327b1486e..0638e9e3f6cd 100644
--- a/test/CodeGen/X86/reduce-trunc-shl.ll
+++ b/test/CodeGen/X86/reduce-trunc-shl.ll
@@ -41,7 +41,7 @@ define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
; AVX2-LABEL: trunc_shl_v8i16_v8i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpslld $17, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/regparm.ll b/test/CodeGen/X86/regparm.ll
new file mode 100644
index 000000000000..9484e5a9490b
--- /dev/null
+++ b/test/CodeGen/X86/regparm.ll
@@ -0,0 +1,48 @@
+; RUN: llc %s -mtriple=i386-pc-linux -o - | FileCheck -check-prefix=CHECK %s
+; RUN: llc %s -mtriple=i386-pc-win32 -o - | FileCheck -check-prefix=WIN %s
+; RUN: llc %s -mtriple=i386-pc-linux -fast-isel -o - | FileCheck -check-prefix=FAST %s
+; RUN: llc %s -mtriple=i386-pc-win32 -fast-isel -o - | FileCheck -check-prefix=FASTWIN %s
+
+
+
+target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+target triple = "i386-unknown-linux-gnu"
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) #1
+
+define void @use_memset(i8* inreg nocapture %dest, i8 inreg %c, i32 inreg %n) local_unnamed_addr #0 {
+entry:
+;CHECK-LABEL: @use_memset
+;CHECK-NOT: push
+;CHECK: jmp memset
+;CHECK-NOT: retl
+;WIN-LABEL: @use_memset
+;WIN-NOT: push
+;WIN: jmp _memset
+;WIN-NOT: retl
+;FAST-LABEL: @use_memset
+;FAST: subl $12, %esp
+;FAST-NEXT: movzbl %dl, %edx
+;FAST-NEXT: calll memset
+;FAST-NEXT: addl $12, %esp
+;FASTWIN-LABEL: @use_memset
+;FASTWIN: movzbl %dl, %edx
+;FASTWIN-NEXT: calll _memset
+;FASTWIN-NEXT: retl
+ tail call void @llvm.memset.p0i8.i32(i8* %dest, i8 %c, i32 %n, i32 1, i1 false)
+ ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) #1
+
+
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { argmemonly nounwind }
+
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{i32 1, !"NumRegisterParameters", i32 3}
+!1 = !{!"clang version 4.0.0 (trunk 288025) (llvm/trunk 288033)"}
diff --git a/test/CodeGen/X86/rot32.ll b/test/CodeGen/X86/rot32.ll
index 5738f70fa47e..79ecbe0514d0 100644
--- a/test/CodeGen/X86/rot32.ll
+++ b/test/CodeGen/X86/rot32.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86 -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=corei7-avx | FileCheck %s --check-prefix=SHLD
; RUN: llc < %s -march=x86 -mcpu=core-avx2 | FileCheck %s --check-prefix=BMI2
define i32 @foo(i32 %x, i32 %y, i32 %z) nounwind readnone {
@@ -49,6 +50,8 @@ define i32 @xfoo(i32 %x, i32 %y, i32 %z) nounwind readnone {
entry:
; CHECK-LABEL: xfoo:
; CHECK: roll $7
+; SHLD-LABEL: xfoo:
+; SHLD: shldl $7
; BMI2-LABEL: xfoo:
; BMI2: rorxl $25
%0 = lshr i32 %x, 25
@@ -59,8 +62,12 @@ entry:
define i32 @xfoop(i32* %p) nounwind readnone {
entry:
+; CHECK-LABEL: xfoop:
+; CHECK: roll $7
+; SHLD-LABEL: xfoop:
+; SHLD: shldl $7
; BMI2-LABEL: xfoop:
-; BMI2: rorxl $25, ({{.+}}), %{{.+}}
+; BMI2: rorxl $25
%x = load i32, i32* %p
%a = lshr i32 %x, 25
%b = shl i32 %x, 7
@@ -82,6 +89,8 @@ define i32 @xun(i32 %x, i32 %y, i32 %z) nounwind readnone {
entry:
; CHECK-LABEL: xun:
; CHECK: roll $25
+; SHLD-LABEL: xun:
+; SHLD: shldl $25
; BMI2-LABEL: xun:
; BMI2: rorxl $7
%0 = lshr i32 %x, 7
@@ -92,8 +101,12 @@ entry:
define i32 @xunp(i32* %p) nounwind readnone {
entry:
+; CHECK-LABEL: xunp:
+; CHECK: roll $25
+; shld-label: xunp:
+; shld: shldl $25
; BMI2-LABEL: xunp:
-; BMI2: rorxl $7, ({{.+}}), %{{.+}}
+; BMI2: rorxl $7
%x = load i32, i32* %p
%a = lshr i32 %x, 7
%b = shl i32 %x, 25
@@ -104,7 +117,7 @@ entry:
define i32 @xbu(i32 %x, i32 %y, i32 %z) nounwind readnone {
entry:
; CHECK-LABEL: xbu:
-; CHECK: shldl
+; CHECK: shldl $25
%0 = lshr i32 %y, 7
%1 = shl i32 %x, 25
%2 = or i32 %0, %1
diff --git a/test/CodeGen/X86/rot64.ll b/test/CodeGen/X86/rot64.ll
index f77bde050c78..976acbb01675 100644
--- a/test/CodeGen/X86/rot64.ll
+++ b/test/CodeGen/X86/rot64.ll
@@ -1,12 +1,11 @@
-; RUN: llc < %s -march=x86-64 -mcpu=corei7 > %t
-; RUN: grep rol %t | count 5
-; RUN: grep ror %t | count 1
-; RUN: grep shld %t | count 2
-; RUN: grep shrd %t | count 2
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s --check-prefix=SHLD
; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s --check-prefix=BMI2
define i64 @foo(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: foo:
+; CHECK: rolq %cl
%0 = shl i64 %x, %z
%1 = sub i64 64, %z
%2 = lshr i64 %x, %1
@@ -16,6 +15,8 @@ entry:
define i64 @bar(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: bar:
+; CHECK: shldq %cl
%0 = shl i64 %y, %z
%1 = sub i64 64, %z
%2 = lshr i64 %x, %1
@@ -25,6 +26,8 @@ entry:
define i64 @un(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: un:
+; CHECK: rorq %cl
%0 = lshr i64 %x, %z
%1 = sub i64 64, %z
%2 = shl i64 %x, %1
@@ -34,6 +37,8 @@ entry:
define i64 @bu(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: bu:
+; CHECK: shrdq %cl
%0 = lshr i64 %y, %z
%1 = sub i64 64, %z
%2 = shl i64 %x, %1
@@ -43,6 +48,10 @@ entry:
define i64 @xfoo(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: xfoo:
+; CHECK: rolq $7
+; SHLD-LABEL: xfoo:
+; SHLD: shldq $7
; BMI2-LABEL: xfoo:
; BMI2: rorxq $57
%0 = lshr i64 %x, 57
@@ -53,8 +62,12 @@ entry:
define i64 @xfoop(i64* %p) nounwind readnone {
entry:
+; CHECK-LABEL: xfoop:
+; CHECK: rolq $7
+; SHLD-LABEL: xfoop:
+; SHLD: shldq $7
; BMI2-LABEL: xfoop:
-; BMI2: rorxq $57, ({{.+}}), %{{.+}}
+; BMI2: rorxq $57
%x = load i64, i64* %p
%a = lshr i64 %x, 57
%b = shl i64 %x, 7
@@ -64,6 +77,8 @@ entry:
define i64 @xbar(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: xbar:
+; CHECK: shrdq $57
%0 = shl i64 %y, 7
%1 = lshr i64 %x, 57
%2 = or i64 %0, %1
@@ -72,6 +87,10 @@ entry:
define i64 @xun(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: xun:
+; CHECK: rolq $57
+; SHLD-LABEL: xun:
+; SHLD: shldq $57
; BMI2-LABEL: xun:
; BMI2: rorxq $7
%0 = lshr i64 %x, 7
@@ -82,8 +101,12 @@ entry:
define i64 @xunp(i64* %p) nounwind readnone {
entry:
+; CHECK-LABEL: xunp:
+; CHECK: rolq $57
+; SHLD-LABEL: xunp:
+; SHLD: shldq $57
; BMI2-LABEL: xunp:
-; BMI2: rorxq $7, ({{.+}}), %{{.+}}
+; BMI2: rorxq $7
%x = load i64, i64* %p
%a = lshr i64 %x, 7
%b = shl i64 %x, 57
@@ -93,6 +116,8 @@ entry:
define i64 @xbu(i64 %x, i64 %y, i64 %z) nounwind readnone {
entry:
+; CHECK-LABEL: xbu:
+; CHECK: shldq $57
%0 = lshr i64 %y, 7
%1 = shl i64 %x, 57
%2 = or i64 %0, %1
diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll
index 657b312b06c9..5d5150ad62d6 100644
--- a/test/CodeGen/X86/rotate.ll
+++ b/test/CodeGen/X86/rotate.ll
@@ -541,3 +541,86 @@ define i8 @rotr1_8(i8 %A) nounwind {
%D = or i8 %B, %C
ret i8 %D
}
+
+define void @rotr1_64_mem(i64* %Aptr) nounwind {
+; 32-LABEL: rotr1_64_mem:
+; 32: # BB#0:
+; 32-NEXT: pushl %esi
+; 32-NEXT: movl 8(%esp), %eax
+; 32-NEXT: movl (%eax), %ecx
+; 32-NEXT: movl 4(%eax), %edx
+; 32-NEXT: movl %edx, %esi
+; 32-NEXT: shldl $31, %ecx, %esi
+; 32-NEXT: shldl $31, %edx, %ecx
+; 32-NEXT: movl %ecx, 4(%eax)
+; 32-NEXT: movl %esi, (%eax)
+; 32-NEXT: popl %esi
+
+; 64-LABEL: rotr1_64_mem:
+; 64: # BB#0:
+; 64-NEXT: rorq (%rdi)
+; 64-NEXT: retq
+ %A = load i64, i64 *%Aptr
+ %B = shl i64 %A, 63
+ %C = lshr i64 %A, 1
+ %D = or i64 %B, %C
+ store i64 %D, i64* %Aptr
+ ret void
+}
+
+define void @rotr1_32_mem(i32* %Aptr) nounwind {
+; 32-LABEL: rotr1_32_mem:
+; 32: # BB#0:
+; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: rorl (%eax)
+; 32-NEXT: retl
+;
+; 64-LABEL: rotr1_32_mem:
+; 64: # BB#0:
+; 64-NEXT: rorl (%rdi)
+; 64-NEXT: retq
+ %A = load i32, i32 *%Aptr
+ %B = shl i32 %A, 31
+ %C = lshr i32 %A, 1
+ %D = or i32 %B, %C
+ store i32 %D, i32* %Aptr
+ ret void
+}
+
+define void @rotr1_16_mem(i16* %Aptr) nounwind {
+; 32-LABEL: rotr1_16_mem:
+; 32: # BB#0:
+; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: rorw (%eax)
+; 32-NEXT: retl
+;
+; 64-LABEL: rotr1_16_mem:
+; 64: # BB#0:
+; 64-NEXT: rorw (%rdi)
+; 64-NEXT: retq
+ %A = load i16, i16 *%Aptr
+ %B = shl i16 %A, 15
+ %C = lshr i16 %A, 1
+ %D = or i16 %B, %C
+ store i16 %D, i16* %Aptr
+ ret void
+}
+
+define void @rotr1_8_mem(i8* %Aptr) nounwind {
+; 32-LABEL: rotr1_8_mem:
+; 32: # BB#0:
+; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: rorb (%eax)
+; 32-NEXT: retl
+;
+; 64-LABEL: rotr1_8_mem:
+; 64: # BB#0:
+; 64-NEXT: rorb (%rdi)
+; 64-NEXT: retq
+ %A = load i8, i8 *%Aptr
+ %B = shl i8 %A, 7
+ %C = lshr i8 %A, 1
+ %D = or i8 %B, %C
+ store i8 %D, i8* %Aptr
+ ret void
+}
diff --git a/test/CodeGen/X86/rtm.ll b/test/CodeGen/X86/rtm.ll
index fb06cac45fff..7215c482ffa2 100644
--- a/test/CodeGen/X86/rtm.ll
+++ b/test/CodeGen/X86/rtm.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -mattr=+rtm -mtriple=x86_64-unknown-unknown | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X64
declare i32 @llvm.x86.xbegin() nounwind
declare void @llvm.x86.xend() nounwind
@@ -6,39 +8,78 @@ declare void @llvm.x86.xabort(i8) nounwind
declare void @f1()
define i32 @test_xbegin() nounwind uwtable {
+; X86-LABEL: test_xbegin:
+; X86: # BB#0: # %entry
+; X86-NEXT: xbegin .LBB0_2
+; X86-NEXT: # BB#1: # %entry
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: .LBB0_2: # %entry
+; X86-NEXT: retl
+;
+; X64-LABEL: test_xbegin:
+; X64: # BB#0: # %entry
+; X64-NEXT: xbegin .LBB0_2
+; X64-NEXT: # BB#1: # %entry
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: .LBB0_2: # %entry
+; X64-NEXT: retq
entry:
%0 = tail call i32 @llvm.x86.xbegin() nounwind
ret i32 %0
-; CHECK: test_xbegin
-; CHECK: xbegin [[LABEL:.*BB.*]]
-; CHECK: [[LABEL]]:
}
define void @test_xend() nounwind uwtable {
+; X86-LABEL: test_xend:
+; X86: # BB#0: # %entry
+; X86-NEXT: xend
+; X86-NEXT: retl
+;
+; X64-LABEL: test_xend:
+; X64: # BB#0: # %entry
+; X64-NEXT: xend
+; X64-NEXT: retq
entry:
tail call void @llvm.x86.xend() nounwind
ret void
-; CHECK: test_xend
-; CHECK: xend
}
define void @test_xabort() nounwind uwtable {
+; X86-LABEL: test_xabort:
+; X86: # BB#0: # %entry
+; X86-NEXT: xabort $2
+; X86-NEXT: retl
+;
+; X64-LABEL: test_xabort:
+; X64: # BB#0: # %entry
+; X64-NEXT: xabort $2
+; X64-NEXT: retq
entry:
tail call void @llvm.x86.xabort(i8 2)
ret void
-; CHECK: test_xabort
-; CHECK: xabort $2
}
define void @f2(i32 %x) nounwind uwtable {
+; X86-LABEL: f2:
+; X86: # BB#0: # %entry
+; X86-NEXT: xabort $1
+; X86-NEXT: calll f1
+; X86-NEXT: retl
+;
+; X64-LABEL: f2:
+; X64: # BB#0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi0:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: movl %edi, {{[0-9]+}}(%rsp)
+; X64-NEXT: xabort $1
+; X64-NEXT: callq f1
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
entry:
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
call void @llvm.x86.xabort(i8 1)
call void @f1()
ret void
-; CHECK-LABEL: f2
-; CHECK: xabort $1
-; CHECK: callq f1
}
- \ No newline at end of file
+
diff --git a/test/CodeGen/X86/sad.ll b/test/CodeGen/X86/sad.ll
index 07c07485c88e..b8a8b8afd14f 100644
--- a/test/CodeGen/X86/sad.ll
+++ b/test/CodeGen/X86/sad.ll
@@ -81,6 +81,7 @@ define i32 @sad_16i8() nounwind {
; AVX512F-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_16i8:
@@ -106,6 +107,7 @@ define i32 @sad_16i8() nounwind {
; AVX512BW-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
br label %vector.body
@@ -147,129 +149,123 @@ middle.block:
define i32 @sad_32i8() nounwind {
; SSE2-LABEL: sad_32i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pxor %xmm11, %xmm11
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pxor %xmm12, %xmm12
; SSE2-NEXT: pxor %xmm15, %xmm15
-; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pxor %xmm13, %xmm13
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB1_1: # %vector.body
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa a+1040(%rax), %xmm1
+; SSE2-NEXT: movdqa a+1040(%rax), %xmm6
; SSE2-NEXT: movdqa a+1024(%rax), %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3],xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3],xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm11[8],xmm3[9],xmm11[9],xmm3[10],xmm11[10],xmm3[11],xmm11[11],xmm3[12],xmm11[12],xmm3[13],xmm11[13],xmm3[14],xmm11[14],xmm3[15],xmm11[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
-; SSE2-NEXT: movdqa b+1040(%rax), %xmm2
-; SSE2-NEXT: movdqa b+1024(%rax), %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm2[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm11[8],xmm6[9],xmm11[9],xmm6[10],xmm11[10],xmm6[11],xmm11[11],xmm6[12],xmm11[12],xmm6[13],xmm11[13],xmm6[14],xmm11[14],xmm6[15],xmm11[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
+; SSE2-NEXT: movdqa %xmm9, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
+; SSE2-NEXT: psubd %xmm9, %xmm6
+; SSE2-NEXT: movdqa b+1024(%rax), %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm10, %xmm7
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
; SSE2-NEXT: psubd %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm5[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm10, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm5, %xmm3
-; SSE2-NEXT: movdqa %xmm7, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3],xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm2, %xmm6
-; SSE2-NEXT: movdqa %xmm4, %xmm10
-; SSE2-NEXT: movdqa %xmm9, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm9, %xmm7
-; SSE2-NEXT: movdqa %xmm8, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm11, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm11, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm9, %xmm5
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm8, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm8
-; SSE2-NEXT: pxor %xmm4, %xmm8
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm7
-; SSE2-NEXT: pxor %xmm4, %xmm7
-; SSE2-NEXT: movdqa %xmm6, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm10, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: psubd %xmm2, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm4, %xmm10
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm10
+; SSE2-NEXT: pxor %xmm2, %xmm10
+; SSE2-NEXT: movdqa %xmm8, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm8
+; SSE2-NEXT: pxor %xmm2, %xmm8
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm7, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm7
+; SSE2-NEXT: pxor %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm2, %xmm6
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm6, %xmm14
+; SSE2-NEXT: paddd %xmm7, %xmm13
; SSE2-NEXT: paddd %xmm1, %xmm15
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: paddd %xmm0, %xmm12
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm3, %xmm4
-; SSE2-NEXT: paddd %xmm6, %xmm0
-; SSE2-NEXT: paddd %xmm7, %xmm14
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm5, %xmm3
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm8, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm1
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm5, %xmm2
+; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: paddd %xmm10, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB1_1
; SSE2-NEXT: # BB#2: # %middle.block
-; SSE2-NEXT: paddd %xmm15, %xmm4
+; SSE2-NEXT: paddd %xmm15, %xmm3
; SSE2-NEXT: paddd %xmm14, %xmm1
-; SSE2-NEXT: paddd %xmm13, %xmm0
-; SSE2-NEXT: paddd %xmm5, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: paddd %xmm12, %xmm0
+; SSE2-NEXT: paddd %xmm13, %xmm2
+; SSE2-NEXT: paddd %xmm3, %xmm1
; SSE2-NEXT: paddd %xmm2, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
@@ -330,6 +326,7 @@ define i32 @sad_32i8() nounwind {
; AVX512F-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_32i8:
@@ -357,6 +354,7 @@ define i32 @sad_32i8() nounwind {
; AVX512BW-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
br label %vector.body
@@ -400,291 +398,288 @@ middle.block:
define i32 @sad_avx64i8() nounwind {
; SSE2-LABEL: sad_avx64i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: subq $216, %rsp
-; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: subq $184, %rsp
+; SSE2-NEXT: pxor %xmm15, %xmm15
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pxor %xmm6, %xmm6
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pxor %xmm11, %xmm11
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm7, %xmm7
+; SSE2-NEXT: pxor %xmm13, %xmm13
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm15, %xmm15
-; SSE2-NEXT: pxor %xmm11, %xmm11
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB2_1: # %vector.body
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqa %xmm5, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm11, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm10, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm12, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa a+1040(%rax), %xmm13
-; SSE2-NEXT: movdqa a+1024(%rax), %xmm12
-; SSE2-NEXT: movdqa a+1056(%rax), %xmm10
-; SSE2-NEXT: movdqa a+1072(%rax), %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,0,1]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm13, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3],xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm10, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm12[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
+; SSE2-NEXT: movdqa %xmm14, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm6, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm8, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm12, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa a+1040(%rax), %xmm6
+; SSE2-NEXT: movdqa a+1024(%rax), %xmm4
+; SSE2-NEXT: movdqa a+1056(%rax), %xmm11
+; SSE2-NEXT: movdqa a+1072(%rax), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm15[0],xmm12[1],xmm15[1],xmm12[2],xmm15[2],xmm12[3],xmm15[3],xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
; SSE2-NEXT: movdqa %xmm12, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3],xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm13, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7]
-; SSE2-NEXT: movdqa b+1040(%rax), %xmm7
-; SSE2-NEXT: movdqa b+1024(%rax), %xmm11
-; SSE2-NEXT: movdqa b+1056(%rax), %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm7, %xmm13
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm11[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3],xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm11, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm11, %xmm12
-; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm9[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3],xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm2, %xmm15
-; SSE2-NEXT: movdqa %xmm15, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm9, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm15[8],xmm4[9],xmm15[9],xmm4[10],xmm15[10],xmm4[11],xmm15[11],xmm4[12],xmm15[12],xmm4[13],xmm15[13],xmm4[14],xmm15[14],xmm4[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm15[4],xmm4[5],xmm15[5],xmm4[6],xmm15[6],xmm4[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm15[0],xmm7[1],xmm15[1],xmm7[2],xmm15[2],xmm7[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm15[8],xmm6[9],xmm15[9],xmm6[10],xmm15[10],xmm6[11],xmm15[11],xmm6[12],xmm15[12],xmm6[13],xmm15[13],xmm6[14],xmm15[14],xmm6[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
+; SSE2-NEXT: movdqa %xmm9, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm15[8],xmm9[9],xmm15[9],xmm9[10],xmm15[10],xmm9[11],xmm15[11],xmm9[12],xmm15[12],xmm9[13],xmm15[13],xmm9[14],xmm15[14],xmm9[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm9, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm9, %xmm6
+; SSE2-NEXT: movdqa b+1024(%rax), %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3],xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm10, %xmm8
+; SSE2-NEXT: movdqa %xmm13, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm13, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm9, %xmm7
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm2, %xmm4
+; SSE2-NEXT: movdqa b+1056(%rax), %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm10, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm10, %xmm12
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm2, %xmm11
+; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm2, %xmm13
+; SSE2-NEXT: movdqa b+1072(%rax), %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm2, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
; SSE2-NEXT: psubd %xmm9, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm2, %xmm9
+; SSE2-NEXT: movdqa %xmm9, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm5, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm5, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm10, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm10
+; SSE2-NEXT: pxor %xmm0, %xmm10
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm13, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: pxor %xmm0, %xmm13
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm11, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm11
+; SSE2-NEXT: pxor %xmm0, %xmm11
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm12, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm12
+; SSE2-NEXT: pxor %xmm0, %xmm12
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm7, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm6[0],xmm15[1],xmm6[1],xmm15[2],xmm6[2],xmm15[3],xmm6[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm0, %xmm15
-; SSE2-NEXT: movdqa %xmm1, %xmm11
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3],xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: pxor %xmm0, %xmm7
; SSE2-NEXT: movdqa %xmm14, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm14, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm14
-; SSE2-NEXT: movdqa %xmm8, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm0, %xmm11
-; SSE2-NEXT: movdqa b+1072(%rax), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm0, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm5, %xmm9
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: psubd %xmm2, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-NEXT: psubd %xmm5, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm7, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm7
-; SSE2-NEXT: pxor %xmm2, %xmm7
-; SSE2-NEXT: movdqa %xmm9, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm9
-; SSE2-NEXT: pxor %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm8
-; SSE2-NEXT: pxor %xmm2, %xmm8
-; SSE2-NEXT: movdqa %xmm11, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm11
-; SSE2-NEXT: pxor %xmm2, %xmm11
-; SSE2-NEXT: movdqa %xmm14, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm14
-; SSE2-NEXT: pxor %xmm2, %xmm14
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm15, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm15
-; SSE2-NEXT: pxor %xmm2, %xmm15
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm10, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm10
-; SSE2-NEXT: pxor %xmm2, %xmm10
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm14
+; SSE2-NEXT: pxor %xmm0, %xmm14
+; SSE2-NEXT: movdqa %xmm8, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: pxor %xmm0, %xmm8
+; SSE2-NEXT: movdqa %xmm6, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm6
+; SSE2-NEXT: pxor %xmm0, %xmm6
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm8, %xmm6
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm14, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm12, %xmm8
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm12
+; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm11, %xmm0
+; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsp), %xmm11 # 16-byte Reload
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm1, %xmm2
+; SSE2-NEXT: paddd %xmm13, %xmm7
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm12, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm12
-; SSE2-NEXT: pxor %xmm2, %xmm12
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm13, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm13
-; SSE2-NEXT: pxor %xmm2, %xmm13
+; SSE2-NEXT: paddd %xmm10, %xmm1
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm5, %xmm3
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm13, %xmm5
+; SSE2-NEXT: paddd %xmm9, %xmm5
; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm1, %xmm13
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm12, %xmm5
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm10, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 # 16-byte Reload
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm3, %xmm2
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm15, %xmm3
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm14, %xmm15
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm11, %xmm4
-; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 # 16-byte Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm9, %xmm4
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm7, %xmm5
-; SSE2-NEXT: movdqa (%rsp), %xmm7 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: movdqa %xmm7, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB2_1
; SSE2-NEXT: # BB#2: # %middle.block
-; SSE2-NEXT: paddd %xmm15, %xmm3
-; SSE2-NEXT: paddd %xmm5, %xmm10
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm8, %xmm13
+; SSE2-NEXT: paddd %xmm2, %xmm4
+; SSE2-NEXT: paddd %xmm3, %xmm6
+; SSE2-NEXT: movdqa %xmm12, %xmm2
; SSE2-NEXT: paddd %xmm11, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm1, %xmm5
+; SSE2-NEXT: paddd %xmm13, %xmm14
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm5, %xmm7
+; SSE2-NEXT: paddd %xmm0, %xmm8
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm2, %xmm12
-; SSE2-NEXT: paddd %xmm3, %xmm10
-; SSE2-NEXT: paddd %xmm13, %xmm10
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: paddd %xmm5, %xmm12
-; SSE2-NEXT: paddd %xmm10, %xmm12
-; SSE2-NEXT: paddd %xmm6, %xmm12
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,0,1]
-; SSE2-NEXT: paddd %xmm12, %xmm0
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm7
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm14, %xmm6
+; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: paddd %xmm8, %xmm7
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: paddd %xmm2, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,0,1]
+; SSE2-NEXT: paddd %xmm7, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: addq $216, %rsp
+; SSE2-NEXT: addq $184, %rsp
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_avx64i8:
@@ -808,6 +803,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512F-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_avx64i8:
@@ -836,6 +832,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512BW-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
br label %vector.body
@@ -1156,100 +1153,100 @@ define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* n
define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
; SSE2-LABEL: sad_nonloop_32i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqu (%rdi), %xmm12
-; SSE2-NEXT: movdqu 16(%rdi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm2[2,3,0,1]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm13, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm12, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3]
-; SSE2-NEXT: movdqu (%rdx), %xmm7
-; SSE2-NEXT: movdqu 16(%rdx), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[2,3,0,1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm5[4],xmm14[5],xmm5[5],xmm14[6],xmm5[6],xmm14[7],xmm5[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; SSE2-NEXT: psubd %xmm7, %xmm12
-; SSE2-NEXT: psubd %xmm0, %xmm2
-; SSE2-NEXT: psubd %xmm4, %xmm1
-; SSE2-NEXT: psubd %xmm6, %xmm13
-; SSE2-NEXT: psubd %xmm8, %xmm11
-; SSE2-NEXT: psubd %xmm15, %xmm10
-; SSE2-NEXT: psubd %xmm14, %xmm3
+; SSE2-NEXT: movdqu (%rdi), %xmm0
+; SSE2-NEXT: movdqu 16(%rdi), %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm12, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3],xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm13, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqu (%rdx), %xmm5
+; SSE2-NEXT: movdqu 16(%rdx), %xmm7
+; SSE2-NEXT: movdqa %xmm7, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: psubd %xmm5, %xmm0
+; SSE2-NEXT: psubd %xmm7, %xmm3
+; SSE2-NEXT: psubd %xmm2, %xmm13
+; SSE2-NEXT: psubd %xmm1, %xmm12
+; SSE2-NEXT: psubd %xmm8, %xmm6
+; SSE2-NEXT: psubd %xmm15, %xmm11
+; SSE2-NEXT: psubd %xmm14, %xmm10
; SSE2-NEXT: psubd -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm9, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm9
-; SSE2-NEXT: pxor %xmm0, %xmm9
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm10, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm10
-; SSE2-NEXT: pxor %xmm0, %xmm10
-; SSE2-NEXT: movdqa %xmm11, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm11
-; SSE2-NEXT: pxor %xmm0, %xmm11
-; SSE2-NEXT: movdqa %xmm13, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm13
-; SSE2-NEXT: pxor %xmm0, %xmm13
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm12, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: pxor %xmm0, %xmm12
-; SSE2-NEXT: paddd %xmm13, %xmm1
-; SSE2-NEXT: paddd %xmm9, %xmm3
-; SSE2-NEXT: paddd %xmm10, %xmm3
-; SSE2-NEXT: paddd %xmm11, %xmm3
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: paddd %xmm12, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: movdqa %xmm9, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm9
+; SSE2-NEXT: pxor %xmm1, %xmm9
+; SSE2-NEXT: movdqa %xmm10, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm10
+; SSE2-NEXT: pxor %xmm1, %xmm10
+; SSE2-NEXT: movdqa %xmm11, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm11
+; SSE2-NEXT: pxor %xmm1, %xmm11
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm1, %xmm6
+; SSE2-NEXT: movdqa %xmm12, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm12
+; SSE2-NEXT: pxor %xmm1, %xmm12
+; SSE2-NEXT: movdqa %xmm13, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm13
+; SSE2-NEXT: pxor %xmm1, %xmm13
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: paddd %xmm9, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm6
+; SSE2-NEXT: paddd %xmm12, %xmm0
+; SSE2-NEXT: paddd %xmm6, %xmm0
+; SSE2-NEXT: paddd %xmm13, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_nonloop_32i8:
@@ -1273,6 +1270,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX512F-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: sad_nonloop_32i8:
@@ -1284,6 +1282,7 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX512BW-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%v1 = load <32 x i8>, <32 x i8>* %p, align 1
%z1 = zext <32 x i8> %v1 to <32 x i32>
diff --git a/test/CodeGen/X86/sad_variations.ll b/test/CodeGen/X86/sad_variations.ll
new file mode 100644
index 000000000000..1d826cf41a4d
--- /dev/null
+++ b/test/CodeGen/X86/sad_variations.ll
@@ -0,0 +1,347 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
+
+define i32 @sad8_32bit_icmp_sge(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #0 {
+; SSE2-LABEL: sad8_32bit_icmp_sge:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_32bit_icmp_sge:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_32bit_icmp_sge:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: retq
+
+entry:
+ %idx.ext = zext i32 %stride to i64
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i32>
+ %6 = sub nsw <8 x i32> %2, %5
+ %7 = icmp sgt <8 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %8 = sub nsw <8 x i32> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i32> %6, <8 x i32> %8
+ %rdx.shuf = shufflevector <8 x i32> %9, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i32> %9, %rdx.shuf
+ %rdx.shuf229 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx230 = add <8 x i32> %bin.rdx, %rdx.shuf229
+ %rdx.shuf231 = shufflevector <8 x i32> %bin.rdx230, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx232 = add <8 x i32> %bin.rdx230, %rdx.shuf231
+ %10 = extractelement <8 x i32> %bin.rdx232, i32 0
+ ret i32 %10
+}
+
+define i32 @sad8_32bit_icmp_sgt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #1 {
+; SSE2-LABEL: sad8_32bit_icmp_sgt:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_32bit_icmp_sgt:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_32bit_icmp_sgt:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: retq
+entry:
+ %idx.ext = zext i32 %stride to i64
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i32>
+ %6 = sub nsw <8 x i32> %2, %5
+ %7 = icmp sgt <8 x i32> %6, zeroinitializer
+ %8 = sub nsw <8 x i32> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i32> %6, <8 x i32> %8
+ %rdx.shuf = shufflevector <8 x i32> %9, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i32> %9, %rdx.shuf
+ %rdx.shuf229 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx230 = add <8 x i32> %bin.rdx, %rdx.shuf229
+ %rdx.shuf231 = shufflevector <8 x i32> %bin.rdx230, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx232 = add <8 x i32> %bin.rdx230, %rdx.shuf231
+ %10 = extractelement <8 x i32> %bin.rdx232, i32 0
+ ret i32 %10
+}
+
+define i32 @sad8_32bit_icmp_sle(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #2 {
+; SSE2-LABEL: sad8_32bit_icmp_sle:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_32bit_icmp_sle:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_32bit_icmp_sle:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: retq
+entry:
+ %idx.ext = zext i32 %stride to i64
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i32>
+ %6 = sub nsw <8 x i32> %2, %5
+ %7 = icmp slt <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %8 = sub nsw <8 x i32> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
+ %rdx.shuf = shufflevector <8 x i32> %9, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i32> %9, %rdx.shuf
+ %rdx.shuf229 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx230 = add <8 x i32> %bin.rdx, %rdx.shuf229
+ %rdx.shuf231 = shufflevector <8 x i32> %bin.rdx230, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx232 = add <8 x i32> %bin.rdx230, %rdx.shuf231
+ %10 = extractelement <8 x i32> %bin.rdx232, i32 0
+ ret i32 %10
+}
+
+define i32 @sad8_32bit_icmp_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i32 %stride) local_unnamed_addr #3 {
+; SSE2-LABEL: sad8_32bit_icmp_slt:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_32bit_icmp_slt:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_32bit_icmp_slt:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: retq
+entry:
+ %idx.ext = zext i32 %stride to i64
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i32>
+ %6 = sub nsw <8 x i32> %2, %5
+ %7 = icmp slt <8 x i32> %6, zeroinitializer
+ %8 = sub nsw <8 x i32> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
+ %rdx.shuf = shufflevector <8 x i32> %9, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i32> %9, %rdx.shuf
+ %rdx.shuf229 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx230 = add <8 x i32> %bin.rdx, %rdx.shuf229
+ %rdx.shuf231 = shufflevector <8 x i32> %bin.rdx230, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx232 = add <8 x i32> %bin.rdx230, %rdx.shuf231
+ %10 = extractelement <8 x i32> %bin.rdx232, i32 0
+ ret i32 %10
+}
+
+define i64 @sad8_64bit_icmp_sext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
+; SSE2-LABEL: sad8_64bit_icmp_sext_slt:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %rax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_64bit_icmp_sext_slt:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_64bit_icmp_sext_slt:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: retq
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i32>
+ %6 = sub nsw <8 x i32> %2, %5
+ %7 = icmp slt <8 x i32> %6, zeroinitializer
+ %8 = sub nsw <8 x i32> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
+ %10 = sext <8 x i32> %9 to <8 x i64>
+ %rdx.shuf = shufflevector <8 x i64> %10, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i64> %rdx.shuf, %10
+ %rdx.shuf236 = shufflevector <8 x i64> %bin.rdx, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx237 = add <8 x i64> %bin.rdx, %rdx.shuf236
+ %rdx.shuf238 = shufflevector <8 x i64> %bin.rdx237, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx239 = add <8 x i64> %bin.rdx237, %rdx.shuf238
+ %11 = extractelement <8 x i64> %bin.rdx239, i32 0
+ ret i64 %11
+}
+
+define i64 @sad8_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
+; SSE2-LABEL: sad8_64bit_icmp_zext_slt:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %rax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_64bit_icmp_zext_slt:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_64bit_icmp_zext_slt:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: retq
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i32>
+ %6 = sub nsw <8 x i32> %2, %5
+ %7 = icmp slt <8 x i32> %6, zeroinitializer
+ %8 = sub nsw <8 x i32> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
+ %10 = zext <8 x i32> %9 to <8 x i64>
+ %rdx.shuf = shufflevector <8 x i64> %10, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i64> %rdx.shuf, %10
+ %rdx.shuf236 = shufflevector <8 x i64> %bin.rdx, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx237 = add <8 x i64> %bin.rdx, %rdx.shuf236
+ %rdx.shuf238 = shufflevector <8 x i64> %bin.rdx237, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx239 = add <8 x i64> %bin.rdx237, %rdx.shuf238
+ %11 = extractelement <8 x i64> %bin.rdx239, i32 0
+ ret i64 %11
+}
+
+define i64 @sad8_early_64bit_icmp_zext_slt(i8* nocapture readonly %cur, i8* nocapture readonly %ref, i64 %stride) local_unnamed_addr #4 {
+; SSE2-LABEL: sad8_early_64bit_icmp_zext_slt:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %rax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: sad8_early_64bit_icmp_zext_slt:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: sad8_early_64bit_icmp_zext_slt:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: retq
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry
+ %0 = bitcast i8* %cur to <8 x i8>*
+ %1 = load <8 x i8>, <8 x i8>* %0, align 1
+ %2 = zext <8 x i8> %1 to <8 x i64>
+ %3 = bitcast i8* %ref to <8 x i8>*
+ %4 = load <8 x i8>, <8 x i8>* %3, align 1
+ %5 = zext <8 x i8> %4 to <8 x i64>
+ %6 = sub nsw <8 x i64> %2, %5
+ %7 = icmp slt <8 x i64> %6, zeroinitializer
+ %8 = sub nsw <8 x i64> zeroinitializer, %6
+ %9 = select <8 x i1> %7, <8 x i64> %8, <8 x i64> %6
+ %rdx.shuf = shufflevector <8 x i64> %9, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx = add <8 x i64> %rdx.shuf, %9
+ %rdx.shuf236 = shufflevector <8 x i64> %bin.rdx, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx237 = add <8 x i64> %bin.rdx, %rdx.shuf236
+ %rdx.shuf238 = shufflevector <8 x i64> %bin.rdx237, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %bin.rdx239 = add <8 x i64> %bin.rdx237, %rdx.shuf238
+ %10 = extractelement <8 x i64> %bin.rdx239, i32 0
+ ret i64 %10
+}
diff --git a/test/CodeGen/X86/safestack.ll b/test/CodeGen/X86/safestack.ll
index 1ff9a050aefb..bd8f57f5e3c9 100644
--- a/test/CodeGen/X86/safestack.ll
+++ b/test/CodeGen/X86/safestack.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=x86_64-linux < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
; RUN: llc -mtriple=i386-linux-android < %s -o - | FileCheck --check-prefix=ANDROID-I386 %s
; RUN: llc -mtriple=x86_64-linux-android < %s -o - | FileCheck --check-prefix=ANDROID-X64 %s
+; RUN: llc -mtriple=x86_64-fuchsia < %s -o - | FileCheck --check-prefix=FUCHSIA-X64 %s
define void @_Z1fv() safestack {
entry:
@@ -30,3 +31,7 @@ declare void @_Z7CapturePi(i32*)
; ANDROID-X64: movq %fs:72, %[[A:.*]]
; ANDROID-X64: leaq -16(%[[A]]), %[[B:.*]]
; ANDROID-X64: movq %[[B]], %fs:72
+
+; FUCHSIA-X64: movq %fs:24, %[[A:.*]]
+; FUCHSIA-X64: leaq -16(%[[A]]), %[[B:.*]]
+; FUCHSIA-X64: movq %[[B]], %fs:24
diff --git a/test/CodeGen/X86/safestack_ssp.ll b/test/CodeGen/X86/safestack_ssp.ll
index 5a1a465158cf..a0415cc98feb 100644
--- a/test/CodeGen/X86/safestack_ssp.ll
+++ b/test/CodeGen/X86/safestack_ssp.ll
@@ -1,6 +1,7 @@
; Test codegen pipeline for SafeStack + StackProtector combination.
; RUN: llc -mtriple=i386-linux < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
; RUN: llc -mtriple=x86_64-linux < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
+; RUN: llc -mtriple=x86_64-fuchsia < %s -o - | FileCheck --check-prefix=FUCHSIA-X64 %s
define void @_Z1fv() safestack sspreq {
entry:
@@ -25,3 +26,9 @@ declare void @_Z7CapturePi(i32*)
; LINUX-I386-DAG: leal -16(%[[B]]), %[[C:.*]]
; LINUX-I386-DAG: movl %[[C]], %gs:(%[[A]])
; LINUX-I386-DAG: movl %[[COOKIE]], -4(%[[B]])
+
+; FUCHSIA-X64-DAG: movq %fs:24, %[[B:.*]]
+; FUCHSIA-X64-DAG: movq %fs:16, %[[COOKIE:.*]]
+; FUCHSIA-X64-DAG: leaq -16(%[[B]]), %[[C:.*]]
+; FUCHSIA-X64-DAG: movq %[[C]], %fs:24
+; FUCHSIA-X64-DAG: movq %[[COOKIE]], -8(%[[B]])
diff --git a/test/CodeGen/X86/scalar-int-to-fp.ll b/test/CodeGen/X86/scalar-int-to-fp.ll
index 47774e2289f6..2b19d02ba8b5 100644
--- a/test/CodeGen/X86/scalar-int-to-fp.ll
+++ b/test/CodeGen/X86/scalar-int-to-fp.ll
@@ -1,175 +1,736 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32 --check-prefix=AVX512_32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64 --check-prefix=AVX512_64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32 --check-prefix=SSE2_32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64 --check-prefix=SSE2_64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=-sse | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32 --check-prefix=X87
+
; Verify that scalar integer conversions to FP compile successfully
; (at one time long double failed with avx512f), and that reasonable
; instruction sequences are selected based on subtarget features.
-; Due to the plethora of reasonable sequences we just check for
-; one key instruction, usually a cvt or fild, allowing the test
-; to be relatively easily updated when sequences are improved.
-;
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512_32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512_64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2_32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2_64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=-sse | FileCheck %s --check-prefix=CHECK --check-prefix=X87
-; CHECK-LABEL: u32_to_f
-; AVX512_32: vcvtusi2ssl
-; AVX512_64: vcvtusi2ssl
-; SSE2_32: cvtsd2ss
-; SSE2_64: cvtsi2ssq
-; X87: fildll
define float @u32_to_f(i32 %a) nounwind {
+; AVX512_32-LABEL: u32_to_f:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %eax
+; AVX512_32-NEXT: vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512_32-NEXT: vmovss %xmm0, (%esp)
+; AVX512_32-NEXT: flds (%esp)
+; AVX512_32-NEXT: popl %eax
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: u32_to_f:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: u32_to_f:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %eax
+; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2_32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2_32-NEXT: orpd %xmm0, %xmm1
+; SSE2_32-NEXT: subsd %xmm0, %xmm1
+; SSE2_32-NEXT: xorps %xmm0, %xmm0
+; SSE2_32-NEXT: cvtsd2ss %xmm1, %xmm0
+; SSE2_32-NEXT: movss %xmm0, (%esp)
+; SSE2_32-NEXT: flds (%esp)
+; SSE2_32-NEXT: popl %eax
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: u32_to_f:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: movl %edi, %eax
+; SSE2_64-NEXT: cvtsi2ssq %rax, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: u32_to_f:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X87-NEXT: fildll (%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%r = uitofp i32 %a to float
ret float %r
}
-; CHECK-LABEL: s32_to_f
-; AVX512_32: vcvtsi2ssl
-; AVX512_64: vcvtsi2ssl
-; SSE2_32: cvtsi2ssl
-; SSE2_64: cvtsi2ssl
-; X87: fildl
define float @s32_to_f(i32 %a) nounwind {
+; AVX512_32-LABEL: s32_to_f:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %eax
+; AVX512_32-NEXT: vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512_32-NEXT: vmovss %xmm0, (%esp)
+; AVX512_32-NEXT: flds (%esp)
+; AVX512_32-NEXT: popl %eax
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: s32_to_f:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: s32_to_f:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %eax
+; SSE2_32-NEXT: cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
+; SSE2_32-NEXT: movss %xmm0, (%esp)
+; SSE2_32-NEXT: flds (%esp)
+; SSE2_32-NEXT: popl %eax
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: s32_to_f:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: cvtsi2ssl %edi, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: s32_to_f:
+; X87: # BB#0:
+; X87-NEXT: pushl %eax
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: fildl (%esp)
+; X87-NEXT: popl %eax
+; X87-NEXT: retl
%r = sitofp i32 %a to float
ret float %r
}
-; CHECK-LABEL: u32_to_d
-; AVX512_32: vcvtusi2sdl
-; AVX512_64: vcvtusi2sdl
-; SSE2_32: subsd
-; SSE2_64: cvtsi2sdq
-; X87: fildll
define double @u32_to_d(i32 %a) nounwind {
+; AVX512_32-LABEL: u32_to_d:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $8, %esp
+; AVX512_32-NEXT: vcvtusi2sdl 8(%ebp), %xmm0, %xmm0
+; AVX512_32-NEXT: vmovsd %xmm0, (%esp)
+; AVX512_32-NEXT: fldl (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: u32_to_d:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: u32_to_d:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $8, %esp
+; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2_32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2_32-NEXT: orpd %xmm0, %xmm1
+; SSE2_32-NEXT: subsd %xmm0, %xmm1
+; SSE2_32-NEXT: movsd %xmm1, (%esp)
+; SSE2_32-NEXT: fldl (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: u32_to_d:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: movl %edi, %eax
+; SSE2_64-NEXT: cvtsi2sdq %rax, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: u32_to_d:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X87-NEXT: fildll (%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%r = uitofp i32 %a to double
ret double %r
}
-; CHECK-LABEL: s32_to_d
-; AVX512_32: vcvtsi2sdl
-; AVX512_64: vcvtsi2sdl
-; SSE2_32: cvtsi2sdl
-; SSE2_64: cvtsi2sdl
-; X87: fildl
define double @s32_to_d(i32 %a) nounwind {
+; AVX512_32-LABEL: s32_to_d:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $8, %esp
+; AVX512_32-NEXT: vcvtsi2sdl 8(%ebp), %xmm0, %xmm0
+; AVX512_32-NEXT: vmovsd %xmm0, (%esp)
+; AVX512_32-NEXT: fldl (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: s32_to_d:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: s32_to_d:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $8, %esp
+; SSE2_32-NEXT: cvtsi2sdl 8(%ebp), %xmm0
+; SSE2_32-NEXT: movsd %xmm0, (%esp)
+; SSE2_32-NEXT: fldl (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: s32_to_d:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: cvtsi2sdl %edi, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: s32_to_d:
+; X87: # BB#0:
+; X87-NEXT: pushl %eax
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: fildl (%esp)
+; X87-NEXT: popl %eax
+; X87-NEXT: retl
%r = sitofp i32 %a to double
ret double %r
}
-; CHECK-LABEL: u32_to_x
-; AVX512_32: vsubsd
-; AVX512_64: vsubsd
-; SSE2_32: subsd
-; SSE2_64: fildll
-; X87: fildll
define x86_fp80 @u32_to_x(i32 %a) nounwind {
+; AVX512_32-LABEL: u32_to_x:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $8, %esp
+; AVX512_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512_32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512_32-NEXT: vorpd %xmm0, %xmm1, %xmm1
+; AVX512_32-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; AVX512_32-NEXT: vmovsd %xmm0, (%esp)
+; AVX512_32-NEXT: fldl (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: u32_to_x:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512_64-NEXT: vmovd %edi, %xmm1
+; AVX512_64-NEXT: vpor %xmm0, %xmm1, %xmm1
+; AVX512_64-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; AVX512_64-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
+; AVX512_64-NEXT: fldl -{{[0-9]+}}(%rsp)
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: u32_to_x:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $8, %esp
+; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2_32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2_32-NEXT: orpd %xmm0, %xmm1
+; SSE2_32-NEXT: subsd %xmm0, %xmm1
+; SSE2_32-NEXT: movsd %xmm1, (%esp)
+; SSE2_32-NEXT: fldl (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: u32_to_x:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: movl %edi, %eax
+; SSE2_64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; SSE2_64-NEXT: fildll -{{[0-9]+}}(%rsp)
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: u32_to_x:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X87-NEXT: fildll (%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%r = uitofp i32 %a to x86_fp80
ret x86_fp80 %r
}
-; CHECK-LABEL: s32_to_x
-; CHECK: fildl
define x86_fp80 @s32_to_x(i32 %a) nounwind {
+; CHECK32-LABEL: s32_to_x:
+; CHECK32: # BB#0:
+; CHECK32-NEXT: pushl %eax
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movl %eax, (%esp)
+; CHECK32-NEXT: fildl (%esp)
+; CHECK32-NEXT: popl %eax
+; CHECK32-NEXT: retl
+;
+; CHECK64-LABEL: s32_to_x:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
+; CHECK64-NEXT: fildl -{{[0-9]+}}(%rsp)
+; CHECK64-NEXT: retq
%r = sitofp i32 %a to x86_fp80
ret x86_fp80 %r
}
-; CHECK-LABEL: u64_to_f
-; AVX512_32: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512_32: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; AVX512_32: fildll
-
-; AVX512_64: vcvtusi2ssq
-
-; SSE2_32: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2_32: movlps %xmm0, {{[0-9]+}}(%esp)
-; SSE2_32: fildll
-
-; SSE2_64: cvtsi2ssq
-; X87: fildll
define float @u64_to_f(i64 %a) nounwind {
+; AVX512_32-LABEL: u64_to_f:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $16, %esp
+; AVX512_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512_32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: xorl %eax, %eax
+; AVX512_32-NEXT: cmpl $0, 12(%ebp)
+; AVX512_32-NEXT: setns %al
+; AVX512_32-NEXT: fildll {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX512_32-NEXT: fstps {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512_32-NEXT: vmovss %xmm0, (%esp)
+; AVX512_32-NEXT: flds (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: u64_to_f:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtusi2ssq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: u64_to_f:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $16, %esp
+; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2_32-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: xorl %eax, %eax
+; SSE2_32-NEXT: cmpl $0, 12(%ebp)
+; SSE2_32-NEXT: setns %al
+; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE2_32-NEXT: fstps {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2_32-NEXT: movss %xmm0, (%esp)
+; SSE2_32-NEXT: flds (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: u64_to_f:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: testq %rdi, %rdi
+; SSE2_64-NEXT: js .LBB6_1
+; SSE2_64-NEXT: # BB#2:
+; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT: retq
+; SSE2_64-NEXT: .LBB6_1:
+; SSE2_64-NEXT: movq %rdi, %rax
+; SSE2_64-NEXT: shrq %rax
+; SSE2_64-NEXT: andl $1, %edi
+; SSE2_64-NEXT: orq %rax, %rdi
+; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT: addss %xmm0, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: u64_to_f:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $16, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl 12(%ebp), %ecx
+; X87-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X87-NEXT: xorl %eax, %eax
+; X87-NEXT: testl %ecx, %ecx
+; X87-NEXT: setns %al
+; X87-NEXT: fildll {{[0-9]+}}(%esp)
+; X87-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X87-NEXT: fstps {{[0-9]+}}(%esp)
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%r = uitofp i64 %a to float
ret float %r
}
-; CHECK-LABEL: s64_to_f
-; AVX512_32: fildll
-; AVX512_64: vcvtsi2ssq
-; SSE2_32: fildll
-; SSE2_64: cvtsi2ssq
-; X87: fildll
define float @s64_to_f(i64 %a) nounwind {
+; AVX512_32-LABEL: s64_to_f:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %eax
+; AVX512_32-NEXT: fildll {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: fstps (%esp)
+; AVX512_32-NEXT: flds (%esp)
+; AVX512_32-NEXT: popl %eax
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: s64_to_f:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: s64_to_f:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %eax
+; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: fstps (%esp)
+; SSE2_32-NEXT: flds (%esp)
+; SSE2_32-NEXT: popl %eax
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: s64_to_f:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: s64_to_f:
+; X87: # BB#0:
+; X87-NEXT: fildll {{[0-9]+}}(%esp)
+; X87-NEXT: retl
%r = sitofp i64 %a to float
ret float %r
}
-; CHECK-LABEL: s64_to_f_2
-; SSE2_32: movd %ecx, %xmm0
-; SSE2_32: movd %eax, %xmm1
-; SSE2_32: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2_32: movq %xmm1, {{[0-9]+}}(%esp)
-; SSE2_32: fildll {{[0-9]+}}(%esp)
-
-; AVX512_32: vmovd %eax, %xmm0
-; AVX512_32: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX512_32: vmovq %xmm0, {{[0-9]+}}(%esp)
-; AVX512_32: fildll {{[0-9]+}}(%esp)
-
define float @s64_to_f_2(i64 %a) nounwind {
+; AVX512_32-LABEL: s64_to_f_2:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $16, %esp
+; AVX512_32-NEXT: movl 8(%ebp), %eax
+; AVX512_32-NEXT: movl 12(%ebp), %ecx
+; AVX512_32-NEXT: addl $5, %eax
+; AVX512_32-NEXT: adcl $0, %ecx
+; AVX512_32-NEXT: vmovd %eax, %xmm0
+; AVX512_32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX512_32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: fildll {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: fstps {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: flds {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: s64_to_f_2:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: addq $5, %rdi
+; AVX512_64-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: s64_to_f_2:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $16, %esp
+; SSE2_32-NEXT: movl 8(%ebp), %eax
+; SSE2_32-NEXT: movl 12(%ebp), %ecx
+; SSE2_32-NEXT: addl $5, %eax
+; SSE2_32-NEXT: adcl $0, %ecx
+; SSE2_32-NEXT: movd %ecx, %xmm0
+; SSE2_32-NEXT: movd %eax, %xmm1
+; SSE2_32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2_32-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: fstps {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: flds {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: s64_to_f_2:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: addq $5, %rdi
+; SSE2_64-NEXT: cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: s64_to_f_2:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl 12(%ebp), %ecx
+; X87-NEXT: addl $5, %eax
+; X87-NEXT: adcl $0, %ecx
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X87-NEXT: fildll (%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%a1 = add i64 %a, 5
%r = sitofp i64 %a1 to float
ret float %r
}
-; CHECK-LABEL: u64_to_d
-; AVX512_32: vpunpckldq
-; AVX512_64: vcvtusi2sdq
-; SSE2_32: punpckldq
-; SSE2_64: punpckldq
-; X87: fildll
define double @u64_to_d(i64 %a) nounwind {
+; AVX512_32-LABEL: u64_to_d:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $8, %esp
+; AVX512_32-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512_32-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512_32-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; AVX512_32-NEXT: vmovlpd %xmm0, (%esp)
+; AVX512_32-NEXT: fldl (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: u64_to_d:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtusi2sdq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: u64_to_d:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $8, %esp
+; SSE2_32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2_32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0
+; SSE2_32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2_32-NEXT: addpd %xmm0, %xmm1
+; SSE2_32-NEXT: movlpd %xmm1, (%esp)
+; SSE2_32-NEXT: fldl (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: u64_to_d:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: movd %rdi, %xmm1
+; SSE2_64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; SSE2_64-NEXT: subpd {{.*}}(%rip), %xmm1
+; SSE2_64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2_64-NEXT: addpd %xmm1, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: u64_to_d:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $16, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl 12(%ebp), %ecx
+; X87-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: xorl %eax, %eax
+; X87-NEXT: testl %ecx, %ecx
+; X87-NEXT: setns %al
+; X87-NEXT: fildll (%esp)
+; X87-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X87-NEXT: fstpl {{[0-9]+}}(%esp)
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%r = uitofp i64 %a to double
ret double %r
}
-; CHECK-LABEL: s64_to_d
-; AVX512_32: fildll
-; AVX512_64: vcvtsi2sdq
-; SSE2_32: fildll
-; SSE2_64: cvtsi2sdq
-; X87: fildll
define double @s64_to_d(i64 %a) nounwind {
+; AVX512_32-LABEL: s64_to_d:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $8, %esp
+; AVX512_32-NEXT: fildll 8(%ebp)
+; AVX512_32-NEXT: fstpl (%esp)
+; AVX512_32-NEXT: fldl (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: s64_to_d:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: s64_to_d:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $8, %esp
+; SSE2_32-NEXT: fildll 8(%ebp)
+; SSE2_32-NEXT: fstpl (%esp)
+; SSE2_32-NEXT: fldl (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: s64_to_d:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: cvtsi2sdq %rdi, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: s64_to_d:
+; X87: # BB#0:
+; X87-NEXT: fildll {{[0-9]+}}(%esp)
+; X87-NEXT: retl
%r = sitofp i64 %a to double
ret double %r
}
-; CHECK-LABEL: s64_to_d_2
-; SSE2_32: movd %ecx, %xmm0
-; SSE2_32: movd %eax, %xmm1
-; SSE2_32: punpckldq %xmm0, %xmm1
-; SSE2_32: movq %xmm1, {{[0-9]+}}(%esp)
-; SSE2_32: fildll
-
-; AVX512_32: vmovd %eax, %xmm0
-; AVX512_32: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX512_32: vmovq %xmm0, {{[0-9]+}}(%esp)
-; AVX512_32: fildll
-
define double @s64_to_d_2(i64 %a) nounwind {
+; AVX512_32-LABEL: s64_to_d_2:
+; AVX512_32: # BB#0:
+; AVX512_32-NEXT: pushl %ebp
+; AVX512_32-NEXT: movl %esp, %ebp
+; AVX512_32-NEXT: andl $-8, %esp
+; AVX512_32-NEXT: subl $16, %esp
+; AVX512_32-NEXT: movl 8(%ebp), %eax
+; AVX512_32-NEXT: movl 12(%ebp), %ecx
+; AVX512_32-NEXT: addl $5, %eax
+; AVX512_32-NEXT: adcl $0, %ecx
+; AVX512_32-NEXT: vmovd %eax, %xmm0
+; AVX512_32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX512_32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: fildll {{[0-9]+}}(%esp)
+; AVX512_32-NEXT: fstpl (%esp)
+; AVX512_32-NEXT: fldl (%esp)
+; AVX512_32-NEXT: movl %ebp, %esp
+; AVX512_32-NEXT: popl %ebp
+; AVX512_32-NEXT: retl
+;
+; AVX512_64-LABEL: s64_to_d_2:
+; AVX512_64: # BB#0:
+; AVX512_64-NEXT: addq $5, %rdi
+; AVX512_64-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT: retq
+;
+; SSE2_32-LABEL: s64_to_d_2:
+; SSE2_32: # BB#0:
+; SSE2_32-NEXT: pushl %ebp
+; SSE2_32-NEXT: movl %esp, %ebp
+; SSE2_32-NEXT: andl $-8, %esp
+; SSE2_32-NEXT: subl $16, %esp
+; SSE2_32-NEXT: movl 8(%ebp), %eax
+; SSE2_32-NEXT: movl 12(%ebp), %ecx
+; SSE2_32-NEXT: addl $5, %eax
+; SSE2_32-NEXT: adcl $0, %ecx
+; SSE2_32-NEXT: movd %ecx, %xmm0
+; SSE2_32-NEXT: movd %eax, %xmm1
+; SSE2_32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2_32-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
+; SSE2_32-NEXT: fstpl (%esp)
+; SSE2_32-NEXT: fldl (%esp)
+; SSE2_32-NEXT: movl %ebp, %esp
+; SSE2_32-NEXT: popl %ebp
+; SSE2_32-NEXT: retl
+;
+; SSE2_64-LABEL: s64_to_d_2:
+; SSE2_64: # BB#0:
+; SSE2_64-NEXT: addq $5, %rdi
+; SSE2_64-NEXT: cvtsi2sdq %rdi, %xmm0
+; SSE2_64-NEXT: retq
+;
+; X87-LABEL: s64_to_d_2:
+; X87: # BB#0:
+; X87-NEXT: pushl %ebp
+; X87-NEXT: movl %esp, %ebp
+; X87-NEXT: andl $-8, %esp
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: movl 8(%ebp), %eax
+; X87-NEXT: movl 12(%ebp), %ecx
+; X87-NEXT: addl $5, %eax
+; X87-NEXT: adcl $0, %ecx
+; X87-NEXT: movl %eax, (%esp)
+; X87-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X87-NEXT: fildll (%esp)
+; X87-NEXT: movl %ebp, %esp
+; X87-NEXT: popl %ebp
+; X87-NEXT: retl
%b = add i64 %a, 5
%f = sitofp i64 %b to double
ret double %f
}
-; CHECK-LABEL: u64_to_x
-; CHECK: fildll
define x86_fp80 @u64_to_x(i64 %a) nounwind {
+; CHECK32-LABEL: u64_to_x:
+; CHECK32: # BB#0:
+; CHECK32-NEXT: pushl %ebp
+; CHECK32-NEXT: movl %esp, %ebp
+; CHECK32-NEXT: andl $-8, %esp
+; CHECK32-NEXT: subl $8, %esp
+; CHECK32-NEXT: movl 8(%ebp), %eax
+; CHECK32-NEXT: movl 12(%ebp), %ecx
+; CHECK32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; CHECK32-NEXT: movl %eax, (%esp)
+; CHECK32-NEXT: xorl %eax, %eax
+; CHECK32-NEXT: testl %ecx, %ecx
+; CHECK32-NEXT: setns %al
+; CHECK32-NEXT: fildll (%esp)
+; CHECK32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; CHECK32-NEXT: movl %ebp, %esp
+; CHECK32-NEXT: popl %ebp
+; CHECK32-NEXT: retl
+;
+; CHECK64-LABEL: u64_to_x:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; CHECK64-NEXT: xorl %eax, %eax
+; CHECK64-NEXT: testq %rdi, %rdi
+; CHECK64-NEXT: setns %al
+; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp)
+; CHECK64-NEXT: fadds {{\.LCPI.*}}(,%rax,4)
+; CHECK64-NEXT: retq
%r = uitofp i64 %a to x86_fp80
ret x86_fp80 %r
}
-; CHECK-LABEL: s64_to_x
-; CHECK: fildll
define x86_fp80 @s64_to_x(i64 %a) nounwind {
+; CHECK32-LABEL: s64_to_x:
+; CHECK32: # BB#0:
+; CHECK32-NEXT: fildll {{[0-9]+}}(%esp)
+; CHECK32-NEXT: retl
+;
+; CHECK64-LABEL: s64_to_x:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp)
+; CHECK64-NEXT: retq
%r = sitofp i64 %a to x86_fp80
ret x86_fp80 %r
}
diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll
index a0d814ac4bee..ce42d0d643e8 100644
--- a/test/CodeGen/X86/select.ll
+++ b/test/CodeGen/X86/select.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
+; RUN: llc < %s -mtriple=i386-intel-elfiamcu | FileCheck %s --check-prefix=MCU
; PR5757
%0 = type { i64, i32 }
@@ -14,6 +15,20 @@ define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; CHECK-NEXT: cmovneq %rdi, %rsi
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test1:
+; MCU: # BB#0:
+; MCU-NEXT: testb $1, %cl
+; MCU-NEXT: jne .LBB0_1
+; MCU-NEXT: # BB#2:
+; MCU-NEXT: addl $8, %edx
+; MCU-NEXT: movl %edx, %eax
+; MCU-NEXT: movl (%eax), %eax
+; MCU-NEXT: retl
+; MCU-NEXT: .LBB0_1:
+; MCU-NEXT: addl $8, %eax
+; MCU-NEXT: movl (%eax), %eax
+; MCU-NEXT: retl
%t0 = load %0, %0* %p
%t1 = load %0, %0* %q
%t4 = select i1 %r, %0 %t0, %0 %t1
@@ -40,6 +55,26 @@ define i32 @test2() nounwind {
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
; CHECK-NEXT: LBB1_1: ## %bb90
+;
+; MCU-LABEL: test2:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: calll return_false
+; MCU-NEXT: testb $1, %al
+; MCU-NEXT: jne .LBB1_1
+; MCU-NEXT: # BB#2: # %entry
+; MCU-NEXT: movw $-480, %ax # imm = 0xFE20
+; MCU-NEXT: jmp .LBB1_3
+; MCU-NEXT: .LBB1_1:
+; MCU-NEXT: xorl %eax, %eax
+; MCU-NEXT: .LBB1_3: # %entry
+; MCU-NEXT: cwtl
+; MCU-NEXT: shll $3, %eax
+; MCU-NEXT: cmpl $32768, %eax # imm = 0x8000
+; MCU-NEXT: jge .LBB1_4
+; MCU-NEXT: # BB#5: # %bb91
+; MCU-NEXT: xorl %eax, %eax
+; MCU-NEXT: retl
+; MCU-NEXT: .LBB1_4: # %bb90
entry:
%tmp73 = tail call i1 @return_false()
%g.0 = select i1 %tmp73, i16 0, i16 -480
@@ -65,6 +100,14 @@ define float @test3(i32 %x) nounwind readnone {
; CHECK-NEXT: leaq {{.*}}(%rip), %rcx
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test3:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: xorl %ecx, %ecx
+; MCU-NEXT: testl %eax, %eax
+; MCU-NEXT: sete %cl
+; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4)
+; MCU-NEXT: retl
entry:
%0 = icmp eq i32 %x, 0
%iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01
@@ -80,6 +123,20 @@ define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; CHECK-NEXT: seta %al
; CHECK-NEXT: movsbl (%rdi,%rax,4), %eax
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test4:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: movl %eax, %ecx
+; MCU-NEXT: fldl {{[0-9]+}}(%esp)
+; MCU-NEXT: flds {{\.LCPI.*}}
+; MCU-NEXT: fucompp
+; MCU-NEXT: fnstsw %ax
+; MCU-NEXT: xorl %edx, %edx
+; MCU-NEXT: # kill: %AH<def> %AH<kill> %AX<kill>
+; MCU-NEXT: sahf
+; MCU-NEXT: seta %dl
+; MCU-NEXT: movb (%ecx,%edx,4), %al
+; MCU-NEXT: retl
entry:
%0 = fcmp olt double %F, 4.200000e+01
%iftmp.0.0 = select i1 %0, i32 4, i32 0
@@ -100,6 +157,25 @@ define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; CHECK-NEXT: movd %xmm0, (%rsi)
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test5:
+; MCU: # BB#0:
+; MCU-NEXT: pushl %esi
+; MCU-NEXT: andb $1, %al
+; MCU-NEXT: jne .LBB4_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: movw {{[0-9]+}}(%esp), %dx
+; MCU-NEXT: .LBB4_2:
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %esi
+; MCU-NEXT: testb %al, %al
+; MCU-NEXT: jne .LBB4_4
+; MCU-NEXT: # BB#3:
+; MCU-NEXT: movw {{[0-9]+}}(%esp), %cx
+; MCU-NEXT: .LBB4_4:
+; MCU-NEXT: movw %dx, (%esi)
+; MCU-NEXT: movw %cx, 2(%esi)
+; MCU-NEXT: popl %esi
+; MCU-NEXT: retl
%x = select i1 %c, <2 x i16> %a, <2 x i16> %b
store <2 x i16> %x, <2 x i16>* %p
ret void
@@ -120,6 +196,57 @@ define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-NEXT: mulps %xmm0, %xmm0
; CHECK-NEXT: movaps %xmm0, (%rsi)
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test6:
+; MCU: # BB#0:
+; MCU-NEXT: pushl %eax
+; MCU-NEXT: flds 12(%edx)
+; MCU-NEXT: fstps (%esp) # 4-byte Folded Spill
+; MCU-NEXT: flds 8(%edx)
+; MCU-NEXT: flds 4(%edx)
+; MCU-NEXT: flds (%ecx)
+; MCU-NEXT: flds 4(%ecx)
+; MCU-NEXT: flds 8(%ecx)
+; MCU-NEXT: flds 12(%ecx)
+; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fxch %st(1)
+; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fxch %st(2)
+; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: fxch %st(3)
+; MCU-NEXT: fmul %st(0), %st(0)
+; MCU-NEXT: testl %eax, %eax
+; MCU-NEXT: flds (%edx)
+; MCU-NEXT: je .LBB5_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: fstp %st(1)
+; MCU-NEXT: fstp %st(3)
+; MCU-NEXT: fstp %st(1)
+; MCU-NEXT: fstp %st(0)
+; MCU-NEXT: flds (%esp) # 4-byte Folded Reload
+; MCU-NEXT: fldz
+; MCU-NEXT: fldz
+; MCU-NEXT: fldz
+; MCU-NEXT: fxch %st(1)
+; MCU-NEXT: fxch %st(6)
+; MCU-NEXT: fxch %st(1)
+; MCU-NEXT: fxch %st(5)
+; MCU-NEXT: fxch %st(4)
+; MCU-NEXT: fxch %st(1)
+; MCU-NEXT: fxch %st(3)
+; MCU-NEXT: fxch %st(2)
+; MCU-NEXT: .LBB5_2:
+; MCU-NEXT: fstp %st(0)
+; MCU-NEXT: fstp %st(5)
+; MCU-NEXT: fstp %st(3)
+; MCU-NEXT: fxch %st(2)
+; MCU-NEXT: fstps 12(%edx)
+; MCU-NEXT: fxch %st(1)
+; MCU-NEXT: fstps 8(%edx)
+; MCU-NEXT: fstps 4(%edx)
+; MCU-NEXT: fstps (%edx)
+; MCU-NEXT: popl %eax
+; MCU-NEXT: retl
%tmp = load <4 x float>, <4 x float>* %A
%tmp3 = load <4 x float>, <4 x float>* %B
%tmp9 = fmul <4 x float> %tmp3, %tmp3
@@ -140,6 +267,15 @@ define x86_fp80 @test7(i32 %tmp8) nounwind {
; CHECK-NEXT: leaq {{.*}}(%rip), %rcx
; CHECK-NEXT: fldt (%rax,%rcx)
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test7:
+; MCU: # BB#0:
+; MCU-NEXT: xorl %ecx, %ecx
+; MCU-NEXT: testl %eax, %eax
+; MCU-NEXT: setns %cl
+; MCU-NEXT: shll $4, %ecx
+; MCU-NEXT: fldt {{\.LCPI.*}}(%ecx)
+; MCU-NEXT: retl
%tmp9 = icmp sgt i32 %tmp8, -1
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000
ret x86_fp80 %retval
@@ -218,6 +354,80 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; ATOM-NEXT: movq %xmm0, 16(%rsi)
; ATOM-NEXT: movdqa %xmm1, (%rsi)
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test8:
+; MCU: # BB#0:
+; MCU-NEXT: pushl %ebp
+; MCU-NEXT: pushl %ebx
+; MCU-NEXT: pushl %edi
+; MCU-NEXT: pushl %esi
+; MCU-NEXT: andb $1, %al
+; MCU-NEXT: jne .LBB7_1
+; MCU-NEXT: # BB#2:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; MCU-NEXT: movl (%ecx), %ecx
+; MCU-NEXT: je .LBB7_5
+; MCU-NEXT: .LBB7_4:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
+; MCU-NEXT: movl (%esi), %esi
+; MCU-NEXT: je .LBB7_8
+; MCU-NEXT: .LBB7_7:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
+; MCU-NEXT: movl (%edi), %edi
+; MCU-NEXT: je .LBB7_11
+; MCU-NEXT: .LBB7_10:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
+; MCU-NEXT: movl (%ebx), %ebx
+; MCU-NEXT: je .LBB7_14
+; MCU-NEXT: .LBB7_13:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
+; MCU-NEXT: jmp .LBB7_15
+; MCU-NEXT: .LBB7_1:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; MCU-NEXT: movl (%ecx), %ecx
+; MCU-NEXT: jne .LBB7_4
+; MCU-NEXT: .LBB7_5:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
+; MCU-NEXT: movl (%esi), %esi
+; MCU-NEXT: jne .LBB7_7
+; MCU-NEXT: .LBB7_8:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
+; MCU-NEXT: movl (%edi), %edi
+; MCU-NEXT: jne .LBB7_10
+; MCU-NEXT: .LBB7_11:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
+; MCU-NEXT: movl (%ebx), %ebx
+; MCU-NEXT: jne .LBB7_13
+; MCU-NEXT: .LBB7_14:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
+; MCU-NEXT: .LBB7_15:
+; MCU-NEXT: movl (%ebp), %ebp
+; MCU-NEXT: testb %al, %al
+; MCU-NEXT: jne .LBB7_16
+; MCU-NEXT: # BB#17:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: jmp .LBB7_18
+; MCU-NEXT: .LBB7_16:
+; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: .LBB7_18:
+; MCU-NEXT: movl (%eax), %eax
+; MCU-NEXT: decl %eax
+; MCU-NEXT: decl %ebp
+; MCU-NEXT: decl %ebx
+; MCU-NEXT: decl %edi
+; MCU-NEXT: decl %esi
+; MCU-NEXT: decl %ecx
+; MCU-NEXT: movl %ecx, 20(%edx)
+; MCU-NEXT: movl %esi, 16(%edx)
+; MCU-NEXT: movl %edi, 12(%edx)
+; MCU-NEXT: movl %ebx, 8(%edx)
+; MCU-NEXT: movl %ebp, 4(%edx)
+; MCU-NEXT: movl %eax, (%edx)
+; MCU-NEXT: popl %esi
+; MCU-NEXT: popl %edi
+; MCU-NEXT: popl %ebx
+; MCU-NEXT: popl %ebp
+; MCU-NEXT: retl
%x = select i1 %c, <6 x i32> %src1, <6 x i32> %src2
%val = sub <6 x i32> %x, < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
store <6 x i32> %val, <6 x i32>* %dst.addr
@@ -243,6 +453,19 @@ define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test9:
+; MCU: # BB#0:
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: jne .LBB8_1
+; MCU-NEXT: # BB#2:
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: retl
+; MCU-NEXT: .LBB8_1:
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
+; MCU-NEXT: retl
%cmp = icmp ne i64 %x, 0
%cond = select i1 %cmp, i64 %y, i64 -1
ret i64 %cond
@@ -265,6 +488,18 @@ define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test9a:
+; MCU: # BB#0:
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: je .LBB9_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
+; MCU-NEXT: .LBB9_2:
+; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 %y
ret i64 %cond
@@ -286,6 +521,19 @@ define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test9b:
+; MCU: # BB#0:
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: je .LBB10_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: xorl %edx, %edx
+; MCU-NEXT: .LBB10_2:
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: orl {{[0-9]+}}(%esp), %edx
+; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%A = sext i1 %cmp to i64
%cond = or i64 %y, %A
@@ -309,6 +557,18 @@ define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test10:
+; MCU: # BB#0:
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: je .LBB11_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: xorl %edx, %edx
+; MCU-NEXT: movl $1, %eax
+; MCU-NEXT: .LBB11_2:
+; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 1
ret i64 %cond
@@ -322,6 +582,19 @@ define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test11:
+; MCU: # BB#0:
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: je .LBB12_1
+; MCU-NEXT: # BB#2:
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: retl
+; MCU-NEXT: .LBB12_1:
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
+; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 %y, i64 -1
ret i64 %cond
@@ -335,6 +608,18 @@ define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test11a:
+; MCU: # BB#0:
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: jne .LBB13_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
+; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
+; MCU-NEXT: .LBB13_2:
+; MCU-NEXT: retl
%cmp = icmp ne i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 %y
ret i64 %cond
@@ -361,6 +646,39 @@ define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
; ATOM-NEXT: movq $-1, %rdi
; ATOM-NEXT: cmovnoq %rax, %rdi
; ATOM-NEXT: jmp __Znam ## TAILCALL
+;
+; MCU-LABEL: test12:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: pushl %ebp
+; MCU-NEXT: pushl %ebx
+; MCU-NEXT: pushl %edi
+; MCU-NEXT: pushl %esi
+; MCU-NEXT: movl %edx, %ebx
+; MCU-NEXT: movl %eax, %ebp
+; MCU-NEXT: movl $4, %ecx
+; MCU-NEXT: mull %ecx
+; MCU-NEXT: movl %eax, %esi
+; MCU-NEXT: leal (%edx,%ebx,4), %edi
+; MCU-NEXT: movl %edi, %edx
+; MCU-NEXT: pushl $0
+; MCU-NEXT: pushl $4
+; MCU-NEXT: calll __udivdi3
+; MCU-NEXT: addl $8, %esp
+; MCU-NEXT: xorl %ebx, %edx
+; MCU-NEXT: xorl %ebp, %eax
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: movl $-1, %edx
+; MCU-NEXT: jne .LBB14_2
+; MCU-NEXT: # BB#1: # %entry
+; MCU-NEXT: movl %esi, %eax
+; MCU-NEXT: movl %edi, %edx
+; MCU-NEXT: .LBB14_2: # %entry
+; MCU-NEXT: popl %esi
+; MCU-NEXT: popl %edi
+; MCU-NEXT: popl %ebx
+; MCU-NEXT: popl %ebp
+; MCU-NEXT: jmp _Znam # TAILCALL
entry:
%A = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %count, i64 4)
%B = extractvalue { i64, i1 } %A, 1
@@ -388,6 +706,12 @@ define i32 @test13(i32 %a, i32 %b) nounwind {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test13:
+; MCU: # BB#0:
+; MCU-NEXT: cmpl %edx, %eax
+; MCU-NEXT: sbbl %eax, %eax
+; MCU-NEXT: retl
%c = icmp ult i32 %a, %b
%d = sext i1 %c to i32
ret i32 %d
@@ -409,6 +733,13 @@ define i32 @test14(i32 %a, i32 %b) nounwind {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test14:
+; MCU: # BB#0:
+; MCU-NEXT: cmpl %edx, %eax
+; MCU-NEXT: sbbl %eax, %eax
+; MCU-NEXT: notl %eax
+; MCU-NEXT: retl
%c = icmp uge i32 %a, %b
%d = sext i1 %c to i32
ret i32 %d
@@ -431,6 +762,12 @@ define i32 @test15(i32 %x) nounwind {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test15:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: negl %eax
+; MCU-NEXT: sbbl %eax, %eax
+; MCU-NEXT: retl
entry:
%cmp = icmp ne i32 %x, 0
%sub = sext i1 %cmp to i32
@@ -453,6 +790,17 @@ define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test16:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: orl %edx, %eax
+; MCU-NEXT: movl $-1, %eax
+; MCU-NEXT: jne .LBB18_2
+; MCU-NEXT: # BB#1: # %entry
+; MCU-NEXT: xorl %eax, %eax
+; MCU-NEXT: .LBB18_2: # %entry
+; MCU-NEXT: movl %eax, %edx
+; MCU-NEXT: retl
entry:
%cmp = icmp ne i64 %x, 0
%conv1 = sext i1 %cmp to i64
@@ -475,6 +823,12 @@ define i16 @test17(i16 %x) nounwind {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test17:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: negw %ax
+; MCU-NEXT: sbbw %ax, %ax
+; MCU-NEXT: retl
entry:
%cmp = icmp ne i16 %x, 0
%sub = sext i1 %cmp to i16
@@ -497,6 +851,16 @@ define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
+;
+; MCU-LABEL: test18:
+; MCU: # BB#0:
+; MCU-NEXT: cmpl $15, %eax
+; MCU-NEXT: jl .LBB20_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: movl %ecx, %edx
+; MCU-NEXT: .LBB20_2:
+; MCU-NEXT: movl %edx, %eax
+; MCU-NEXT: retl
%cmp = icmp slt i32 %x, 15
%sel = select i1 %cmp, i8 %a, i8 %b
ret i8 %sel
@@ -510,6 +874,13 @@ define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
; CHECK-NEXT: shll %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
+;
+; MCU-LABEL: trunc_select_miscompile:
+; MCU: # BB#0:
+; MCU-NEXT: orb $2, %dl
+; MCU-NEXT: movl %edx, %ecx
+; MCU-NEXT: shll %cl, %eax
+; MCU-NEXT: retl
%tmp1 = select i1 %cc, i32 3, i32 2
%tmp2 = shl i32 %a, %tmp1
ret i32 %tmp2
@@ -544,6 +915,23 @@ define void @clamp_i8(i32 %src, i8* %dst) {
; ATOM-NEXT: LBB22_2:
; ATOM-NEXT: movb %cl, (%rsi)
; ATOM-NEXT: retq
+;
+; MCU-LABEL: clamp_i8:
+; MCU: # BB#0:
+; MCU-NEXT: cmpl $127, %eax
+; MCU-NEXT: movl $127, %ecx
+; MCU-NEXT: jg .LBB22_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: movl %eax, %ecx
+; MCU-NEXT: .LBB22_2:
+; MCU-NEXT: cmpl $-128, %ecx
+; MCU-NEXT: movb $-128, %al
+; MCU-NEXT: jl .LBB22_4
+; MCU-NEXT: # BB#3:
+; MCU-NEXT: movl %ecx, %eax
+; MCU-NEXT: .LBB22_4:
+; MCU-NEXT: movb %al, (%edx)
+; MCU-NEXT: retl
%cmp = icmp sgt i32 %src, 127
%sel1 = select i1 %cmp, i32 127, i32 %src
%cmp1 = icmp slt i32 %sel1, -128
@@ -576,6 +964,23 @@ define void @clamp(i32 %src, i16* %dst) {
; ATOM-NEXT: cmovgew %ax, %cx
; ATOM-NEXT: movw %cx, (%rsi)
; ATOM-NEXT: retq
+;
+; MCU-LABEL: clamp:
+; MCU: # BB#0:
+; MCU-NEXT: cmpl $32767, %eax # imm = 0x7FFF
+; MCU-NEXT: movl $32767, %ecx # imm = 0x7FFF
+; MCU-NEXT: jg .LBB23_2
+; MCU-NEXT: # BB#1:
+; MCU-NEXT: movl %eax, %ecx
+; MCU-NEXT: .LBB23_2:
+; MCU-NEXT: cmpl $-32768, %ecx # imm = 0x8000
+; MCU-NEXT: movw $-32768, %ax # imm = 0x8000
+; MCU-NEXT: jl .LBB23_4
+; MCU-NEXT: # BB#3:
+; MCU-NEXT: movl %ecx, %eax
+; MCU-NEXT: .LBB23_4:
+; MCU-NEXT: movw %ax, (%edx)
+; MCU-NEXT: retl
%cmp = icmp sgt i32 %src, 32767
%sel1 = select i1 %cmp, i32 32767, i32 %src
%cmp1 = icmp slt i32 %sel1, -32768
@@ -612,6 +1017,33 @@ define void @test19() {
; CHECK-NEXT: jp LBB24_3
; CHECK-NEXT: ## BB#4: ## %CF244
; CHECK-NEXT: retq
+;
+; MCU-LABEL: test19:
+; MCU: # BB#0: # %BB
+; MCU-NEXT: movl $-1, %ecx
+; MCU-NEXT: movb $1, %al
+; MCU-NEXT: .p2align 4, 0x90
+; MCU-NEXT: .LBB24_1: # %CF
+; MCU-NEXT: # =>This Inner Loop Header: Depth=1
+; MCU-NEXT: testb %al, %al
+; MCU-NEXT: jne .LBB24_1
+; MCU-NEXT: # BB#2: # %CF250
+; MCU-NEXT: # in Loop: Header=BB24_1 Depth=1
+; MCU-NEXT: jne .LBB24_1
+; MCU-NEXT: # BB#3: # %CF242.preheader
+; MCU-NEXT: fldz
+; MCU-NEXT: .p2align 4, 0x90
+; MCU-NEXT: .LBB24_4: # %CF242
+; MCU-NEXT: # =>This Inner Loop Header: Depth=1
+; MCU-NEXT: cmpl %eax, %ecx
+; MCU-NEXT: fucom %st(0)
+; MCU-NEXT: fnstsw %ax
+; MCU-NEXT: # kill: %AH<def> %AH<kill> %AX<kill>
+; MCU-NEXT: sahf
+; MCU-NEXT: jp .LBB24_4
+; MCU-NEXT: # BB#5: # %CF244
+; MCU-NEXT: fstp %st(0)
+; MCU-NEXT: retl
BB:
br label %CF
@@ -635,3 +1067,101 @@ CF244:
%B122 = fadd float %Sl59, undef
ret void
}
+
+define i16 @select_xor_1(i16 %A, i8 %cond) {
+; CHECK-LABEL: select_xor_1:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: xorl $43, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cmovnew %ax, %di
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
+;
+; MCU-LABEL: select_xor_1:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: andl $1, %edx
+; MCU-NEXT: negl %edx
+; MCU-NEXT: andl $43, %edx
+; MCU-NEXT: xorl %edx, %eax
+; MCU-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; MCU-NEXT: retl
+entry:
+ %and = and i8 %cond, 1
+ %cmp10 = icmp eq i8 %and, 0
+ %0 = xor i16 %A, 43
+ %1 = select i1 %cmp10, i16 %A, i16 %0
+ ret i16 %1
+}
+
+define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
+; CHECK-LABEL: select_xor_2:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: xorl %edi, %esi
+; CHECK-NEXT: testb $1, %dl
+; CHECK-NEXT: cmovel %edi, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
+;
+; MCU-LABEL: select_xor_2:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: andl $1, %ecx
+; MCU-NEXT: negl %ecx
+; MCU-NEXT: andl %edx, %ecx
+; MCU-NEXT: xorl %ecx, %eax
+; MCU-NEXT: retl
+entry:
+ %and = and i8 %cond, 1
+ %cmp10 = icmp eq i8 %and, 0
+ %0 = xor i32 %B, %A
+ %1 = select i1 %cmp10, i32 %A, i32 %0
+ ret i32 %1
+}
+
+define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
+; CHECK-LABEL: select_or:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: orl %edi, %esi
+; CHECK-NEXT: testb $1, %dl
+; CHECK-NEXT: cmovel %edi, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
+;
+; MCU-LABEL: select_or:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: andl $1, %ecx
+; MCU-NEXT: negl %ecx
+; MCU-NEXT: andl %edx, %ecx
+; MCU-NEXT: orl %ecx, %eax
+; MCU-NEXT: retl
+entry:
+ %and = and i8 %cond, 1
+ %cmp10 = icmp eq i8 %and, 0
+ %0 = or i32 %B, %A
+ %1 = select i1 %cmp10, i32 %A, i32 %0
+ ret i32 %1
+}
+
+define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
+; CHECK-LABEL: select_or_1:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: orl %edi, %esi
+; CHECK-NEXT: testb $1, %dl
+; CHECK-NEXT: cmovel %edi, %esi
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: retq
+;
+; MCU-LABEL: select_or_1:
+; MCU: # BB#0: # %entry
+; MCU-NEXT: andl $1, %ecx
+; MCU-NEXT: negl %ecx
+; MCU-NEXT: andl %edx, %ecx
+; MCU-NEXT: orl %ecx, %eax
+; MCU-NEXT: retl
+entry:
+ %and = and i32 %cond, 1
+ %cmp10 = icmp eq i32 %and, 0
+ %0 = or i32 %B, %A
+ %1 = select i1 %cmp10, i32 %A, i32 %0
+ ret i32 %1
+}
diff --git a/test/CodeGen/X86/select_const.ll b/test/CodeGen/X86/select_const.ll
index 8c54685644c7..a97e7c299e73 100644
--- a/test/CodeGen/X86/select_const.ll
+++ b/test/CodeGen/X86/select_const.ll
@@ -1,6 +1,11 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+; Select of constants: control flow / conditional moves can always be replaced by logic+math (but may not be worth it?).
+; Test the zeroext/signext variants of each pattern to see if that makes a difference.
+
+; select Cond, 0, 1 --> zext (!Cond)
+
define i32 @select_0_or_1(i1 %cond) {
; CHECK-LABEL: select_0_or_1:
; CHECK: # BB#0:
@@ -8,7 +13,6 @@ define i32 @select_0_or_1(i1 %cond) {
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 0, i32 1
ret i32 %sel
}
@@ -19,18 +23,29 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
; CHECK-NEXT: xorb $1, %dil
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 0, i32 1
ret i32 %sel
}
+define i32 @select_0_or_1_signext(i1 signext %cond) {
+; CHECK-LABEL: select_0_or_1_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: notb %dil
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+; select Cond, 1, 0 --> zext (Cond)
+
define i32 @select_1_or_0(i1 %cond) {
; CHECK-LABEL: select_1_or_0:
; CHECK: # BB#0:
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 1, i32 0
ret i32 %sel
}
@@ -40,11 +55,22 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
; CHECK: # BB#0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 1, i32 0
ret i32 %sel
}
+define i32 @select_1_or_0_signext(i1 signext %cond) {
+; CHECK-LABEL: select_1_or_0_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: andb $1, %dil
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, 0, -1 --> sext (!Cond)
+
define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
; CHECK: # BB#0:
@@ -52,7 +78,6 @@ define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 0, i32 -1
ret i32 %sel
}
@@ -63,20 +88,30 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: decl %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 0, i32 -1
ret i32 %sel
}
+define i32 @select_0_or_neg1_signext(i1 signext %cond) {
+; CHECK-LABEL: select_0_or_neg1_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: andb $1, %dil
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+; select Cond, -1, 0 --> sext (Cond)
+
define i32 @select_neg1_or_0(i1 %cond) {
; CHECK-LABEL: select_neg1_or_0:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovel %ecx, %eax
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: negl %edi
+; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 -1, i32 0
ret i32 %sel
}
@@ -84,16 +119,133 @@ define i32 @select_neg1_or_0(i1 %cond) {
define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_neg1_or_0_zeroext:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: testb %dil, %dil
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovel %ecx, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: negl %eax
; CHECK-NEXT: retq
-;
%sel = select i1 %cond, i32 -1, i32 0
ret i32 %sel
}
+define i32 @select_neg1_or_0_signext(i1 signext %cond) {
+; CHECK-LABEL: select_neg1_or_0_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsbl %dil, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, C+1, C --> add (zext Cond), C
+
+define i32 @select_Cplus1_C(i1 %cond) {
+; CHECK-LABEL: select_Cplus1_C:
+; CHECK: # BB#0:
+; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: leal 41(%rdi), %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_Cplus1_C_zeroext:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: addl $41, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_signext(i1 signext %cond) {
+; CHECK-LABEL: select_Cplus1_C_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: andb $1, %dil
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: addl $41, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+; select Cond, C, C+1 --> add (sext Cond), C
+
+define i32 @select_C_Cplus1(i1 %cond) {
+; CHECK-LABEL: select_C_Cplus1:
+; CHECK: # BB#0:
+; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: subl %edi, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_C_Cplus1_zeroext:
+; CHECK: # BB#0:
+; CHECK-NEXT: movzbl %dil, %ecx
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: subl %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_signext(i1 signext %cond) {
+; CHECK-LABEL: select_C_Cplus1_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: andb $1, %dil
+; CHECK-NEXT: movzbl %dil, %ecx
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: subl %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+; In general, select of 2 constants could be:
+; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2 --> add (and (sext Cond), C1-C2), C2
+
+define i32 @select_C1_C2(i1 %cond) {
+; CHECK-LABEL: select_C1_C2:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: movl $421, %ecx # imm = 0x1A5
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
+; CHECK-LABEL: select_C1_C2_zeroext:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: movl $421, %ecx # imm = 0x1A5
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_signext(i1 signext %cond) {
+; CHECK-LABEL: select_C1_C2_signext:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: movl $421, %ecx # imm = 0x1A5
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+; select (x == 2), 2, (x + 1) --> select (x == 2), x, (x + 1)
+
define i64 @select_2_or_inc(i64 %x) {
; CHECK-LABEL: select_2_or_inc:
; CHECK: # BB#0:
@@ -101,10 +253,66 @@ define i64 @select_2_or_inc(i64 %x) {
; CHECK-NEXT: cmpq $2, %rdi
; CHECK-NEXT: cmoveq %rdi, %rax
; CHECK-NEXT: retq
-;
%cmp = icmp eq i64 %x, 2
%add = add i64 %x, 1
%retval.0 = select i1 %cmp, i64 2, i64 %add
ret i64 %retval.0
}
+define <4 x i32> @sel_constants_add_constant_vec(i1 %cond) {
+; CHECK-LABEL: sel_constants_add_constant_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: jne .LBB22_1
+; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [12,13,14,15]
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB22_1:
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [4294967293,14,4,4]
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, <4 x i32> <i32 -4, i32 12, i32 1, i32 0>, <4 x i32> <i32 11, i32 11, i32 11, i32 11>
+ %bo = add <4 x i32> %sel, <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %bo
+}
+
+define <2 x double> @sel_constants_fmul_constant_vec(i1 %cond) {
+; CHECK-LABEL: sel_constants_fmul_constant_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: jne .LBB23_1
+; CHECK-NEXT: # BB#2:
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.188300e+02,3.454000e+01]
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB23_1:
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [-2.040000e+01,3.768000e+01]
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, <2 x double> <double -4.0, double 12.0>, <2 x double> <double 23.3, double 11.0>
+ %bo = fmul <2 x double> %sel, <double 5.1, double 3.14>
+ ret <2 x double> %bo
+}
+
+; 4294967297 = 0x100000001.
+; This becomes an opaque constant via ConstantHoisting, so we don't fold it into the select.
+
+define i64 @opaque_constant(i1 %cond, i64 %x) {
+; CHECK-LABEL: opaque_constant:
+; CHECK: # BB#0:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: movl $23, %ecx
+; CHECK-NEXT: movq $-4, %rax
+; CHECK-NEXT: cmoveq %rcx, %rax
+; CHECK-NEXT: movabsq $4294967297, %rcx # imm = 0x100000001
+; CHECK-NEXT: andq %rcx, %rax
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: cmpq %rcx, %rsi
+; CHECK-NEXT: sete %dl
+; CHECK-NEXT: subq %rdx, %rax
+; CHECK-NEXT: retq
+ %sel = select i1 %cond, i64 -4, i64 23
+ %bo = and i64 %sel, 4294967297
+ %cmp = icmp eq i64 %x, 4294967297
+ %sext = sext i1 %cmp to i64
+ %add = add i64 %bo, %sext
+ ret i64 %add
+}
+
diff --git a/test/CodeGen/X86/select_meta.ll b/test/CodeGen/X86/select_meta.ll
index 0b2b344114d9..2c73f767e375 100644
--- a/test/CodeGen/X86/select_meta.ll
+++ b/test/CodeGen/X86/select_meta.ll
@@ -13,4 +13,4 @@ define i32 @foo(i32, i32, i32) {
!0 = !{!"clang version 4.0.0 (trunk 279683)"}
!1 = !{!"branch_weights", i32 1000, i32 1 }
-; CHECK ![[WT]] = !{!"branch_weights", i32 1000, i32 1 }
+; CHECK: ![[WT]] = !{!"branch_weights", i32 1000, i32 1}
diff --git a/test/CodeGen/X86/selectiondag-order.ll b/test/CodeGen/X86/selectiondag-order.ll
new file mode 100644
index 000000000000..163e2cb90b2f
--- /dev/null
+++ b/test/CodeGen/X86/selectiondag-order.ll
@@ -0,0 +1,97 @@
+; Check that debug intrinsics do not affect code generation.
+
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck --check-prefix=X86-CHECK %s
+
+define i64 @simulate(<2 x i32> %a) {
+entry:
+ %rand = tail call i64 @lrand48()
+ br label %body
+
+body: ; preds = %body, %entry
+ %0 = phi <2 x i32> [ %add, %body ], [ zeroinitializer, %entry ]
+ %add = add <2 x i32> %0, %a
+ %rand1 = tail call i64 @lrand48() #3
+ %cmp = icmp eq i64 %rand1, 0
+ br i1 %cmp, label %end, label %body
+
+end: ; preds = %body
+ %c = bitcast <2 x i32> %add to i64
+ %res = add i64 %rand, %c
+ ret i64 %res
+}
+
+; X86-CHECK: simulate:
+; X86-CHECK: movdqa %xmm0, 16(%rsp)
+; X86-CHECK: pxor %xmm0, %xmm0
+; X86-CHECK: movdqa %xmm0, (%rsp)
+; X86-CHECK: callq lrand48
+; X86-CHECK: movq %rax, %rbx
+
+define i64 @simulateWithDebugIntrinsic(<2 x i32> %a) local_unnamed_addr {
+entry:
+ %rand = tail call i64 @lrand48() #3
+ tail call void @llvm.dbg.value(metadata i64 %rand, i64 0, metadata !6, metadata !7), !dbg !8
+ br label %body
+
+body: ; preds = %body, %entry
+ %0 = phi <2 x i32> [ %add, %body ], [ zeroinitializer, %entry ]
+ %add = add <2 x i32> %0, %a
+ %rand1 = tail call i64 @lrand48() #3
+ %cmp = icmp eq i64 %rand1, 0
+ br i1 %cmp, label %end, label %body
+
+end: ; preds = %body
+ %c = bitcast <2 x i32> %add to i64
+ %res = add i64 %rand, %c
+ ret i64 %res
+}
+
+; X86-CHECK: simulateWithDebugIntrinsic:
+; X86-CHECK: movdqa %xmm0, 16(%rsp)
+; X86-CHECK: pxor %xmm0, %xmm0
+; X86-CHECK: movdqa %xmm0, (%rsp)
+; X86-CHECK: callq lrand48
+; X86-CHECK: movq %rax, %rbx
+
+define i64 @simulateWithDbgDeclare(<2 x i32> %a) local_unnamed_addr {
+entry:
+ %rand = tail call i64 @lrand48() #3
+ tail call void @llvm.dbg.declare(metadata i64 %rand, metadata !6, metadata !7), !dbg !8
+ br label %body
+
+body: ; preds = %body, %entry
+ %0 = phi <2 x i32> [ %add, %body ], [ zeroinitializer, %entry ]
+ %add = add <2 x i32> %0, %a
+ %rand1 = tail call i64 @lrand48() #3
+ %cmp = icmp eq i64 %rand1, 0
+ br i1 %cmp, label %end, label %body
+
+end: ; preds = %body
+ %c = bitcast <2 x i32> %add to i64
+ %res = add i64 %rand, %c
+ ret i64 %res
+}
+
+; X86-CHECK: simulateWithDbgDeclare:
+; X86-CHECK: movdqa %xmm0, 16(%rsp)
+; X86-CHECK: pxor %xmm0, %xmm0
+; X86-CHECK: movdqa %xmm0, (%rsp)
+; X86-CHECK: callq lrand48
+; X86-CHECK: movq %rax, %rbx
+
+declare i64 @lrand48()
+
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!1}
+!llvm.module.flags = !{!3, !4}
+
+!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2, runtimeVersion: 0, emissionKind: FullDebug)
+!2 = !DIFile(filename: "test.ll", directory: ".")
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "simulateWithDebugIntrinsic", scope: !2, file: !2, line: 64, isLocal: false, isDefinition: true, scopeLine: 65, unit: !1)
+!6 = !DILocalVariable(name: "randv", scope: !5, file: !2, line: 69)
+!7 = !DIExpression()
+!8 = !DILocation(line: 132, column: 2, scope: !5)
diff --git a/test/CodeGen/X86/setcc-logic.ll b/test/CodeGen/X86/setcc-logic.ll
new file mode 100644
index 000000000000..4d1e5ba16540
--- /dev/null
+++ b/test/CodeGen/X86/setcc-logic.ll
@@ -0,0 +1,482 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %a = icmp eq i32 %P, 0
+ %b = icmp eq i32 %Q, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_sign_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: setns %al
+; CHECK-NEXT: retq
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_bits_set(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: andl %esi, %edi
+; CHECK-NEXT: cmpl $-1, %edi
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %a = icmp eq i32 %P, -1
+ %b = icmp eq i32 %Q, -1
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_sign_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: andl %esi, %edi
+; CHECK-NEXT: shrl $31, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_bits_set(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: retq
+ %a = icmp ne i32 %P, 0
+ %b = icmp ne i32 %Q, 0
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_sign_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: shrl $31, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: andl %esi, %edi
+; CHECK-NEXT: cmpl $-1, %edi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: retq
+ %a = icmp ne i32 %P, -1
+ %b = icmp ne i32 %Q, -1
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_sign_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: testl %esi, %edi
+; CHECK-NEXT: setns %al
+; CHECK-NEXT: retq
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
+define i32 @all_bits_clear_branch(i32* %P, i32* %Q) nounwind {
+; CHECK-LABEL: all_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: orq %rsi, %rdi
+; CHECK-NEXT: jne .LBB8_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB8_2: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp eq i32* %P, null
+ %b = icmp eq i32* %Q, null
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_sign_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: js .LBB9_3
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: js .LBB9_3
+; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB9_3: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_bits_set_branch(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cmpl $-1, %edi
+; CHECK-NEXT: jne .LBB10_3
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: cmpl $-1, %esi
+; CHECK-NEXT: jne .LBB10_3
+; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB10_3: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp eq i32 %P, -1
+ %b = icmp eq i32 %Q, -1
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: all_sign_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: jns .LBB11_3
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: jns .LBB11_3
+; CHECK-NEXT: # BB#2: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB11_3: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
+define i32 @any_bits_set_branch(i32* %P, i32* %Q) nounwind {
+; CHECK-LABEL: any_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: orq %rsi, %rdi
+; CHECK-NEXT: je .LBB12_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB12_2: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp ne i32* %P, null
+ %b = icmp ne i32* %Q, null
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_sign_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: js .LBB13_2
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: js .LBB13_2
+; CHECK-NEXT: # BB#3: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB13_2: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_bits_clear_branch(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: cmpl $-1, %edi
+; CHECK-NEXT: jne .LBB14_2
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: cmpl $-1, %esi
+; CHECK-NEXT: jne .LBB14_2
+; CHECK-NEXT: # BB#3: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB14_2: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp ne i32 %P, -1
+ %b = icmp ne i32 %Q, -1
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) nounwind {
+; CHECK-LABEL: any_sign_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: jns .LBB15_2
+; CHECK-NEXT: # BB#1: # %entry
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: jns .LBB15_2
+; CHECK-NEXT: # BB#3: # %return
+; CHECK-NEXT: movl $192, %eax
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB15_2: # %bb1
+; CHECK-NEXT: movl $4, %eax
+; CHECK-NEXT: retq
+entry:
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: all_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp eq <4 x i32> %P, zeroinitializer
+ %b = icmp eq <4 x i32> %Q, zeroinitializer
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: all_sign_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: all_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: pand %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp eq <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp eq <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: all_sign_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: pand %xmm1, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
+; CHECK-NEXT: movdqa %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp slt <4 x i32> %P, zeroinitializer
+ %b = icmp slt <4 x i32> %Q, zeroinitializer
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: any_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pxor %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp ne <4 x i32> %P, zeroinitializer
+ %b = icmp ne <4 x i32> %Q, zeroinitializer
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: any_sign_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pcmpgtd %xmm0, %xmm1
+; CHECK-NEXT: movdqa %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp slt <4 x i32> %P, zeroinitializer
+ %b = icmp slt <4 x i32> %Q, zeroinitializer
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: any_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: pand %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp ne <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp ne <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) nounwind {
+; CHECK-LABEL: any_sign_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: pand %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) nounwind {
+; CHECK-LABEL: ne_neg1_and_ne_zero:
+; CHECK: # BB#0:
+; CHECK-NEXT: incq %rdi
+; CHECK-NEXT: cmpq $1, %rdi
+; CHECK-NEXT: seta %al
+; CHECK-NEXT: retq
+ %cmp1 = icmp ne i64 %x, -1
+ %cmp2 = icmp ne i64 %x, 0
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; PR32401 - https://bugs.llvm.org/show_bug.cgi?id=32401
+
+define zeroext i1 @and_eq(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
+; CHECK-LABEL: and_eq:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %esi, %edi
+; CHECK-NEXT: xorl %ecx, %edx
+; CHECK-NEXT: orb %dl, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %cmp1 = icmp eq i8 %a, %b
+ %cmp2 = icmp eq i8 %c, %d
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+define zeroext i1 @or_ne(i8 %a, i8 %b, i8 %c, i8 %d) nounwind {
+; CHECK-LABEL: or_ne:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %esi, %edi
+; CHECK-NEXT: xorl %ecx, %edx
+; CHECK-NEXT: orb %dl, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: retq
+ %cmp1 = icmp ne i8 %a, %b
+ %cmp2 = icmp ne i8 %c, %d
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+; This should not be transformed because vector compares + bitwise logic are faster.
+
+define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
+; CHECK-LABEL: and_eq_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm3, %xmm2
+; CHECK-NEXT: pand %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %cmp1 = icmp eq <4 x i32> %a, %b
+ %cmp2 = icmp eq <4 x i32> %c, %d
+ %and = and <4 x i1> %cmp1, %cmp2
+ ret <4 x i1> %and
+}
+
diff --git a/test/CodeGen/X86/setcc-lowering.ll b/test/CodeGen/X86/setcc-lowering.ll
index a5ff27758024..391f1cc9fb43 100644
--- a/test/CodeGen/X86/setcc-lowering.ll
+++ b/test/CodeGen/X86/setcc-lowering.ll
@@ -20,6 +20,19 @@ define <8 x i16> @pr25080(<8 x i32> %a) {
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
+;
+; KNL-32-LABEL: pr25080:
+; KNL-32: # BB#0: # %entry
+; KNL-32-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm1
+; KNL-32-NEXT: vpand %ymm1, %ymm0, %ymm0
+; KNL-32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL-32-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; KNL-32-NEXT: movb $15, %al
+; KNL-32-NEXT: kmovw %eax, %k1
+; KNL-32-NEXT: korw %k1, %k0, %k1
+; KNL-32-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-32-NEXT: vpmovqw %zmm0, %xmm0
+; KNL-32-NEXT: retl
entry:
%0 = trunc <8 x i32> %a to <8 x i23>
%1 = icmp eq <8 x i23> %0, zeroinitializer
@@ -29,6 +42,18 @@ entry:
}
define void @pr26232(i64 %a) {
+; AVX-LABEL: pr26232:
+; AVX: # BB#0: # %for_loop599.preheader
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB1_1: # %for_loop599
+; AVX-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX-NEXT: cmpq $65536, %rdi # imm = 0x10000
+; AVX-NEXT: setl -{{[0-9]+}}(%rsp)
+; AVX-NEXT: cmpw $0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: jne .LBB1_1
+; AVX-NEXT: # BB#2: # %for_exit600
+; AVX-NEXT: retq
+;
; KNL-32-LABEL: pr26232:
; KNL-32: # BB#0: # %for_loop599.preheader
; KNL-32-NEXT: pushl %esi
diff --git a/test/CodeGen/X86/setcc-sentinals.ll b/test/CodeGen/X86/setcc-sentinals.ll
deleted file mode 100644
index d36e678c6048..000000000000
--- a/test/CodeGen/X86/setcc-sentinals.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mcpu=generic -march=x86-64 -asm-verbose=false | FileCheck %s
-
-define zeroext i1 @test0(i64 %x) nounwind {
-; CHECK-LABEL: test0:
-; CHECK-NEXT: incq %[[X:rdi|rcx]]
-; CHECK-NEXT: cmpq $1, %[[X]]
-; CHECK-NEXT: seta %al
-; CHECK-NEXT: ret
- %cmp1 = icmp ne i64 %x, -1
- %not.cmp = icmp ne i64 %x, 0
- %.cmp1 = and i1 %cmp1, %not.cmp
- ret i1 %.cmp1
-}
diff --git a/test/CodeGen/X86/setcc-wide-types.ll b/test/CodeGen/X86/setcc-wide-types.ll
new file mode 100644
index 000000000000..b4ec03598aa4
--- /dev/null
+++ b/test/CodeGen/X86/setcc-wide-types.ll
@@ -0,0 +1,140 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX2
+
+; Equality checks of 128/256-bit values can use PMOVMSK or PTEST to avoid scalarization.
+
+define i32 @ne_i128(<2 x i64> %x, <2 x i64> %y) {
+; SSE2-LABEL: ne_i128:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %ecx
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ne_i128:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: retq
+ %bcx = bitcast <2 x i64> %x to i128
+ %bcy = bitcast <2 x i64> %y to i128
+ %cmp = icmp ne i128 %bcx, %bcy
+ %zext = zext i1 %cmp to i32
+ ret i32 %zext
+}
+
+define i32 @eq_i128(<2 x i64> %x, <2 x i64> %y) {
+; SSE2-LABEL: eq_i128:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %ecx
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: eq_i128:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: cmpl $65535, %ecx # imm = 0xFFFF
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: retq
+ %bcx = bitcast <2 x i64> %x to i128
+ %bcy = bitcast <2 x i64> %y to i128
+ %cmp = icmp eq i128 %bcx, %bcy
+ %zext = zext i1 %cmp to i32
+ ret i32 %zext
+}
+
+define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
+; SSE2-LABEL: ne_i256:
+; SSE2: # BB#0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm4, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE2-NEXT: movd %xmm4, %r9
+; SSE2-NEXT: movd %xmm0, %r10
+; SSE2-NEXT: movd %xmm1, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rdi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: movd %xmm2, %rcx
+; SSE2-NEXT: movd %xmm3, %rdx
+; SSE2-NEXT: xorq %rsi, %rdx
+; SSE2-NEXT: xorq %r10, %rcx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: xorq %r9, %rax
+; SSE2-NEXT: xorq %r8, %rdi
+; SSE2-NEXT: orq %rax, %rdi
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: orq %rcx, %rdi
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ne_i256:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %ecx
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %bcx = bitcast <4 x i64> %x to i256
+ %bcy = bitcast <4 x i64> %y to i256
+ %cmp = icmp ne i256 %bcx, %bcy
+ %zext = zext i1 %cmp to i32
+ ret i32 %zext
+}
+
+define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) {
+; SSE2-LABEL: eq_i256:
+; SSE2: # BB#0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm4, %r8
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE2-NEXT: movd %xmm4, %r9
+; SSE2-NEXT: movd %xmm0, %r10
+; SSE2-NEXT: movd %xmm1, %rsi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rdi
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: movd %xmm2, %rcx
+; SSE2-NEXT: movd %xmm3, %rdx
+; SSE2-NEXT: xorq %rsi, %rdx
+; SSE2-NEXT: xorq %r10, %rcx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: xorq %r9, %rax
+; SSE2-NEXT: xorq %r8, %rdi
+; SSE2-NEXT: orq %rax, %rdi
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: orq %rcx, %rdi
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: eq_i256:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %ecx
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %bcx = bitcast <4 x i64> %x to i256
+ %bcy = bitcast <4 x i64> %y to i256
+ %cmp = icmp eq i256 %bcx, %bcy
+ %zext = zext i1 %cmp to i32
+ ret i32 %zext
+}
+
diff --git a/test/CodeGen/X86/setcc.ll b/test/CodeGen/X86/setcc.ll
index 268460f999b8..fab4f4137251 100644
--- a/test/CodeGen/X86/setcc.ll
+++ b/test/CodeGen/X86/setcc.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
; rdar://7329206
@@ -13,7 +13,6 @@ define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
; CHECK-NEXT: seta %al
; CHECK-NEXT: shll $5, %eax
; CHECK-NEXT: retq
-;
%t0 = icmp ugt i16 %x, 26
%if = select i1 %t0, i16 32, i16 0
ret i16 %if
@@ -22,11 +21,11 @@ define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
; CHECK-LABEL: t2:
; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $26, %edi
-; CHECK-NEXT: sbbl %eax, %eax
-; CHECK-NEXT: andl $32, %eax
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: shll $5, %eax
; CHECK-NEXT: retq
-;
%t0 = icmp ult i16 %x, 26
%if = select i1 %t0, i16 32, i16 0
ret i16 %if
@@ -35,11 +34,11 @@ define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
define i64 @t3(i64 %x) nounwind readnone ssp {
; CHECK-LABEL: t3:
; CHECK: ## BB#0:
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $18, %rdi
-; CHECK-NEXT: sbbq %rax, %rax
-; CHECK-NEXT: andl $64, %eax
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: shlq $6, %rax
; CHECK-NEXT: retq
-;
%t0 = icmp ult i64 %x, 18
%if = select i1 %t0, i64 64, i64 0
ret i64 %if
@@ -52,11 +51,10 @@ define i32 @t4(i32 %a) {
; CHECK: ## BB#0:
; CHECK-NEXT: movq _v4@{{.*}}(%rip), %rax
; CHECK-NEXT: cmpl $1, (%rax)
-; CHECK-NEXT: sbbl %eax, %eax
-; CHECK-NEXT: andl $32768, %eax ## imm = 0x8000
-; CHECK-NEXT: leal 65536(%rax,%rax), %eax
+; CHECK-NEXT: movw $1, %ax
+; CHECK-NEXT: adcw $0, %ax
+; CHECK-NEXT: shll $16, %eax
; CHECK-NEXT: retq
-;
%t0 = load i32, i32* @v4, align 4
%not.tobool = icmp eq i32 %t0, 0
%conv.i = sext i1 %not.tobool to i16
@@ -73,7 +71,6 @@ define i8 @t5(i32 %a) #0 {
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setns %al
; CHECK-NEXT: retq
-;
%.lobit = lshr i32 %a, 31
%trunc = trunc i32 %.lobit to i8
%.not = xor i8 %trunc, 1
@@ -86,7 +83,6 @@ define zeroext i1 @t6(i32 %a) #0 {
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setns %al
; CHECK-NEXT: retq
-;
%.lobit = lshr i32 %a, 31
%trunc = trunc i32 %.lobit to i1
%.not = xor i1 %trunc, 1
diff --git a/test/CodeGen/X86/sext-i1.ll b/test/CodeGen/X86/sext-i1.ll
index 9b86cd0c9a2a..8c92434db21a 100644
--- a/test/CodeGen/X86/sext-i1.ll
+++ b/test/CodeGen/X86/sext-i1.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-unknown -disable-cgp-branch-opts | FileCheck %s --check-prefix=CHECK --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -disable-cgp-branch-opts | FileCheck %s --check-prefix=CHECK --check-prefix=X64
@@ -6,24 +6,34 @@
; PR6146
define i32 @t1(i32 %x) nounwind readnone ssp {
-; CHECK-LABEL: t1:
-; CHECK: # BB#0:
-; CHECK-NEXT: cmpl $1
-; CHECK-NEXT: sbbl %eax, %eax
-; CHECK-NEXT: ret
+; X32-LABEL: t1:
+; X32: # BB#0:
+; X32-NEXT: cmpl $1, {{[0-9]+}}(%esp)
+; X32-NEXT: sbbl %eax, %eax
+; X32-NEXT: retl
;
+; X64-LABEL: t1:
+; X64: # BB#0:
+; X64-NEXT: cmpl $1, %edi
+; X64-NEXT: sbbl %eax, %eax
+; X64-NEXT: retq
%t0 = icmp eq i32 %x, 0
%if = select i1 %t0, i32 -1, i32 0
ret i32 %if
}
define i32 @t2(i32 %x) nounwind readnone ssp {
-; CHECK-LABEL: t2:
-; CHECK: # BB#0:
-; CHECK-NEXT: cmpl $1
-; CHECK-NEXT: sbbl %eax, %eax
-; CHECK-NEXT: ret
+; X32-LABEL: t2:
+; X32: # BB#0:
+; X32-NEXT: cmpl $1, {{[0-9]+}}(%esp)
+; X32-NEXT: sbbl %eax, %eax
+; X32-NEXT: retl
;
+; X64-LABEL: t2:
+; X64: # BB#0:
+; X64-NEXT: cmpl $1, %edi
+; X64-NEXT: sbbl %eax, %eax
+; X64-NEXT: retq
%t0 = icmp eq i32 %x, 0
%if = sext i1 %t0 to i32
ret i32 %if
@@ -46,7 +56,6 @@ define i32 @t3() nounwind readonly {
; X64-NEXT: cmpq %rax, %rax
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
-;
entry:
%not.tobool = icmp eq i32 undef, 0
%cond = sext i1 %not.tobool to i32
@@ -80,7 +89,6 @@ define i32 @t4(i64 %x) nounwind readnone ssp {
; X64-NEXT: cmpq $1, %rdi
; X64-NEXT: sbbl %eax, %eax
; X64-NEXT: retq
-;
%t0 = icmp eq i64 %x, 0
%t1 = sext i1 %t0 to i32
ret i32 %t1
@@ -99,9 +107,70 @@ define i64 @t5(i32 %x) nounwind readnone ssp {
; X64-NEXT: cmpl $1, %edi
; X64-NEXT: sbbq %rax, %rax
; X64-NEXT: retq
-;
%t0 = icmp eq i32 %x, 0
%t1 = sext i1 %t0 to i64
ret i64 %t1
}
+; sext (xor Bool, -1) --> sub (zext Bool), 1
+
+define i32 @select_0_or_1s(i1 %cond) {
+; X32-LABEL: select_0_or_1s:
+; X32: # BB#0:
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: andl $1, %eax
+; X32-NEXT: decl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: select_0_or_1s:
+; X64: # BB#0:
+; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: andl $1, %edi
+; X64-NEXT: leal -1(%rdi), %eax
+; X64-NEXT: retq
+ %not = xor i1 %cond, 1
+ %sext = sext i1 %not to i32
+ ret i32 %sext
+}
+
+; sext (xor Bool, -1) --> sub (zext Bool), 1
+
+define i32 @select_0_or_1s_zeroext(i1 zeroext %cond) {
+; X32-LABEL: select_0_or_1s_zeroext:
+; X32: # BB#0:
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: decl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: select_0_or_1s_zeroext:
+; X64: # BB#0:
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: decl %eax
+; X64-NEXT: retq
+ %not = xor i1 %cond, 1
+ %sext = sext i1 %not to i32
+ ret i32 %sext
+}
+
+; sext (xor Bool, -1) --> sub (zext Bool), 1
+
+define i32 @select_0_or_1s_signext(i1 signext %cond) {
+; X32-LABEL: select_0_or_1s_signext:
+; X32: # BB#0:
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: andb $1, %al
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: decl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: select_0_or_1s_signext:
+; X64: # BB#0:
+; X64-NEXT: andb $1, %dil
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: decl %eax
+; X64-NEXT: retq
+ %not = xor i1 %cond, 1
+ %sext = sext i1 %not to i32
+ ret i32 %sext
+}
+
diff --git a/test/CodeGen/X86/sfence.ll b/test/CodeGen/X86/sfence.ll
deleted file mode 100644
index 0c28407b31e9..000000000000
--- a/test/CodeGen/X86/sfence.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep sfence
-
-declare void @llvm.x86.sse.sfence() nounwind
-
-define void @test() {
- call void @llvm.x86.sse.sfence()
- ret void
-}
diff --git a/test/CodeGen/X86/sha.ll b/test/CodeGen/X86/sha.ll
index fe42637bc538..eb1966470491 100644
--- a/test/CodeGen/X86/sha.ll
+++ b/test/CodeGen/X86/sha.ll
@@ -86,7 +86,7 @@ entry:
; CHECK: test_sha256rnds2rr
; CHECK: movaps %xmm0, [[XMM_TMP1:%xmm[1-9][0-9]?]]
; CHECK: movaps %xmm2, %xmm0
- ; CHECK: sha256rnds2 %xmm1, [[XMM_TMP1]]
+ ; CHECK: sha256rnds2 %xmm0, %xmm1, [[XMM_TMP1]]
}
define <4 x i32> @test_sha256rnds2rm(<4 x i32> %a, <4 x i32>* %b, <4 x i32> %c) nounwind uwtable {
@@ -97,7 +97,7 @@ entry:
; CHECK: test_sha256rnds2rm
; CHECK: movaps %xmm0, [[XMM_TMP2:%xmm[1-9][0-9]?]]
; CHECK: movaps %xmm1, %xmm0
- ; CHECK: sha256rnds2 (%rdi), [[XMM_TMP2]]
+ ; CHECK: sha256rnds2 %xmm0, (%rdi), [[XMM_TMP2]]
}
declare <4 x i32> @llvm.x86.sha256msg1(<4 x i32>, <4 x i32>) nounwind readnone
@@ -136,4 +136,4 @@ entry:
ret <4 x i32> %1
; CHECK: test_sha256msg2rm
; CHECK: sha256msg2 (%rdi), %xmm0
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/X86/shrink-compare.ll b/test/CodeGen/X86/shrink-compare.ll
index 11facc4f009d..41f5d2d5be23 100644
--- a/test/CodeGen/X86/shrink-compare.ll
+++ b/test/CodeGen/X86/shrink-compare.ll
@@ -93,8 +93,7 @@ if.end:
; CHECK-LABEL: test2_1:
; CHECK: movzbl
; CHECK: cmpl $256
-; CHECK: jne .LBB
-; CHECK: jmp bar
+; CHECK: je bar
define void @test2_1(i32 %X) nounwind minsize {
entry:
%and = and i32 %X, 255
@@ -224,8 +223,7 @@ if.end:
; CHECK-LABEL: test_sext_i8_icmp_255:
; CHECK: movb $1,
; CHECK: testb
-; CHECK: jne .LBB
-; CHECK: jmp bar
+; CHECK: je bar
define void @test_sext_i8_icmp_255(i8 %x) nounwind minsize {
entry:
%sext = sext i8 %x to i32
diff --git a/test/CodeGen/X86/shrink_vmul.ll b/test/CodeGen/X86/shrink_vmul.ll
index d7e99afb2f50..930af226b953 100644
--- a/test/CodeGen/X86/shrink_vmul.ll
+++ b/test/CodeGen/X86/shrink_vmul.ll
@@ -449,7 +449,7 @@ define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readon
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; CHECK-NEXT: psrad $16, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -835,7 +835,7 @@ define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; CHECK-NEXT: psrad $16, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; CHECK-NEXT: movl $32768, %ecx # imm = 0x8000
diff --git a/test/CodeGen/X86/shuffle-combine-crash-2.ll b/test/CodeGen/X86/shuffle-combine-crash-2.ll
new file mode 100644
index 000000000000..ea37d5b48531
--- /dev/null
+++ b/test/CodeGen/X86/shuffle-combine-crash-2.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
+
+define <4 x i64> @fold_movsd_zero() {
+; X86-LABEL: fold_movsd_zero:
+; X86: # BB#0:
+; X86-NEXT: xorps %xmm0, %xmm0
+; X86-NEXT: xorps %xmm1, %xmm1
+; X86-NEXT: retl
+;
+; X64-LABEL: fold_movsd_zero:
+; X64: # BB#0:
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: retq
+ %insert = insertelement <4 x i64> zeroinitializer, i64 0, i32 0
+ %shuffle = shufflevector <4 x i64> %insert, <4 x i64> zeroinitializer, <4 x i32> <i32 3, i32 5, i32 7, i32 1>
+ ret <4 x i64> %shuffle
+}
diff --git a/test/CodeGen/X86/shuffle-of-splat-multiuses.ll b/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
new file mode 100644
index 000000000000..d46082f20a45
--- /dev/null
+++ b/test/CodeGen/X86/shuffle-of-splat-multiuses.ll
@@ -0,0 +1,100 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+; PR32449
+
+define <2 x double> @foo2(<2 x double> %v, <2 x double> *%p) nounwind {
+; AVX2-LABEL: foo2:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,1]
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
+; AVX2-NEXT: vmovapd %xmm1, (%rdi)
+; AVX2-NEXT: retq
+ %res = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %res1 = shufflevector<2 x double> %res, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
+ store <2 x double> %res, <2 x double>* %p
+ ret <2 x double> %res1
+}
+
+define <4 x double> @foo4(<4 x double> %v, <4 x double> *%p) nounwind {
+; AVX2-LABEL: foo4:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,2,2,2]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm1[2,0,2,3]
+; AVX2-NEXT: vmovapd %ymm1, (%rdi)
+; AVX2-NEXT: retq
+ %res = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ %res1 = shufflevector<4 x double> %res, <4 x double> undef, <4 x i32> <i32 2, i32 0, i32 undef, i32 undef>
+ store <4 x double> %res, <4 x double>* %p
+ ret <4 x double> %res1
+}
+
+define <8 x float> @foo8(<8 x float> %v, <8 x float> *%p) nounwind {
+; AVX2-LABEL: foo8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,2,2,2]
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = <2,0,u,u,5,1,3,7>
+; AVX2-NEXT: vpermps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovapd %ymm1, (%rdi)
+; AVX2-NEXT: retq
+ %res = shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ %res1 = shufflevector<8 x float> %res, <8 x float> undef, <8 x i32> <i32 2, i32 0, i32 undef, i32 undef, i32 5, i32 1, i32 3, i32 7>
+ store <8 x float> %res, <8 x float>* %p
+ ret <8 x float> %res1
+}
+
+define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind {
+; AVX2-LABEL: undef_splatmask:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
+; AVX2-NEXT: retq
+ %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
+ %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+ ret <4 x i32> %res1
+}
+
+define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind {
+; AVX2-LABEL: undef_splatmask2:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: retq
+ %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 undef>
+ %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+ ret <4 x i32> %res1
+}
+
+define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind {
+; AVX2-LABEL: undef_splatmask3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
+; AVX2-NEXT: retq
+ %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
+ %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3>
+ ret <4 x i32> %res1
+}
+
+define <4 x i32> @undef_splatmask4(<4 x i32> %v, <4 x i32>* %p) nounwind {
+; AVX2-LABEL: undef_splatmask4:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
+; AVX2-NEXT: retq
+ %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
+ %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+ store <4 x i32> %res, <4 x i32>* %p
+ ret <4 x i32> %res1
+}
+
+define <4 x i32> @undef_splatmask5(<4 x i32> %v, <4 x i32>* %p) nounwind {
+; AVX2-LABEL: undef_splatmask5:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
+; AVX2-NEXT: retq
+ %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 undef>
+ %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3>
+ store <4 x i32> %res, <4 x i32>* %p
+ ret <4 x i32> %res1
+}
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index 893f96e6fb22..b4ea9e2dc919 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -31,6 +31,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v16i8:
@@ -42,6 +43,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v16i8:
@@ -53,6 +55,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v16i8:
@@ -64,6 +67,7 @@ define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
%strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -89,6 +93,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v16i16_to_v16i8:
@@ -96,6 +101,7 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v16i16_to_v16i8:
@@ -103,12 +109,14 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v16i16_to_v16i8:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
%bc = bitcast <32 x i8> %vec to <16 x i16>
@@ -139,6 +147,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v8i16:
@@ -153,6 +162,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v8i16:
@@ -164,6 +174,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v8i16:
@@ -178,6 +189,7 @@ define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %L
%strided.vec = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -189,7 +201,7 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX-LABEL: trunc_v8i32_to_v8i16:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa (%rdi), %ymm0
-; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX-NEXT: vmovdqa %xmm0, (%rsi)
; AVX-NEXT: vzeroupper
@@ -200,12 +212,14 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v8i32_to_v8i16:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovdw %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v8i32_to_v8i16:
@@ -213,12 +227,14 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v8i32_to_v8i16:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovdw %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %L
%bc = bitcast <16 x i16> %vec to <8 x i32>
@@ -243,6 +259,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512F-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_to_v4i32:
@@ -251,6 +268,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512VL-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v8i32_to_v4i32:
@@ -259,6 +277,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512BW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512BW-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v8i32_to_v4i32:
@@ -267,6 +286,7 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512BWVL-NEXT: vmovaps %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %L
%strided.vec = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -288,12 +308,14 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i64_to_v4i32:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovqd %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i64_to_v4i32:
@@ -301,12 +323,14 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i64_to_v4i32:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovqd %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <8 x i32>, <8 x i32>* %L
%bc = bitcast <8 x i32> %vec to <4 x i64>
@@ -337,6 +361,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v8i8:
@@ -348,6 +373,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v8i8:
@@ -359,6 +385,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8:
@@ -373,6 +400,7 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
%strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
@@ -384,7 +412,7 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX-LABEL: trunc_v8i32_to_v8i8:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa (%rdi), %ymm0
-; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX-NEXT: vmovq %xmm0, (%rsi)
@@ -397,12 +425,14 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v8i32_to_v8i8:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovdb %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v8i32_to_v8i8:
@@ -411,12 +441,14 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
%bc = bitcast <32 x i8> %vec to <8 x i32>
@@ -449,6 +481,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v16i16_to_v4i16:
@@ -457,6 +490,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v16i16_to_v4i16:
@@ -469,6 +503,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16:
@@ -477,6 +512,7 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %L
%strided.vec = shufflevector <16 x i16> %vec, <16 x i16> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -500,12 +536,14 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i64_to_v4i16:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovqw %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i64_to_v4i16:
@@ -514,12 +552,14 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovqw %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <16 x i16>, <16 x i16>* %L
%bc = bitcast <16 x i16> %vec to <4 x i64>
@@ -550,6 +590,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i8_to_v4i8:
@@ -558,6 +599,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i8_to_v4i8:
@@ -569,6 +611,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8:
@@ -577,6 +620,7 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
%strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
@@ -600,12 +644,14 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovd %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v4i64_to_v4i8:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpmovqb %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v4i64_to_v4i8:
@@ -614,12 +660,14 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v4i64_to_v4i8:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BWVL-NEXT: vpmovqb %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i8>, <32 x i8>* %L
%bc = bitcast <32 x i8> %vec to <4 x i64>
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index 923290411ae3..d053c63dcdb3 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -18,6 +18,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v32i8:
@@ -29,6 +30,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v32i8:
@@ -40,6 +42,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8:
@@ -51,6 +54,7 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512BWVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512BWVL-NEXT: vmovdqu %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
@@ -67,6 +71,7 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc_v32i16_to_v32i8:
@@ -77,18 +82,21 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc_v32i16_to_v32i8:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_v32i16_to_v32i8:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%bc = bitcast <64 x i8> %vec to <32 x i16>
@@ -100,37 +108,40 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind {
define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512F-LABEL: shuffle_v32i16_to_v16i16:
; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13,16,17,20,21,20,21,22,23,16,17,20,21,24,25,28,29]
-; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX512F-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX512F-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX512F-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,2],ymm1[4,6],ymm0[4,6]
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v16i16:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
-; AVX512VL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX512VL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,2],ymm1[4,6],ymm0[4,6]
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v16i16:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13,16,17,20,21,20,21,22,23,16,17,20,21,24,25,28,29]
-; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX512BW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16:
@@ -141,6 +152,7 @@ define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512BWVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,2,1,3]
; AVX512BWVL-NEXT: vmovdqu %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %L
%strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -153,6 +165,7 @@ define void @trunc_v16i32_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
; AVX512-NEXT: vpmovdw %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %L
%bc = bitcast <32 x i16> %vec to <16 x i32>
@@ -169,6 +182,7 @@ define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %L
%strided.vec = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -181,6 +195,7 @@ define void @trunc_v8i64_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vpmovqd %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %L
%bc = bitcast <16 x i32> %vec to <8 x i64>
@@ -206,6 +221,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v16i8:
@@ -224,6 +240,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v16i8:
@@ -265,6 +282,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpextrb $12, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8:
@@ -306,6 +324,7 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512BWVL-NEXT: vpextrb $12, %xmm0, %eax
; AVX512BWVL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
@@ -318,6 +337,7 @@ define void @trunc_v16i32_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0
; AVX512-NEXT: vpmovdb %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%bc = bitcast <64 x i8> %vec to <16 x i32>
@@ -345,6 +365,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v32i16_to_v8i16:
@@ -365,6 +386,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v32i16_to_v8i16:
@@ -388,6 +410,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax
; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16:
@@ -411,6 +434,7 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax
; AVX512BWVL-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %L
%strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
@@ -423,6 +447,7 @@ define void @trunc_v8i64_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vpmovqw %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %L
%bc = bitcast <32 x i16> %vec to <8 x i64>
@@ -448,6 +473,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX512F-NEXT: vmovq %xmm0, (%rsi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v64i8_to_v8i8:
@@ -466,6 +492,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: shuffle_v64i8_to_v8i8:
@@ -482,7 +509,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpextrb $0, %xmm1, %ecx
; AVX512BW-NEXT: vpextrb $8, %xmm0, %edx
; AVX512BW-NEXT: vpextrb $0, %xmm0, %edi
-; AVX512BW-NEXT: vpinsrb $0, %edi, %xmm0, %xmm0
+; AVX512BW-NEXT: vmovd %edi, %xmm0
; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0
; AVX512BW-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
@@ -491,14 +518,15 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BW-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
; AVX512BW-NEXT: vpinsrb $7, %r8d, %xmm0, %xmm0
; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax
+; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX512BWVL-NEXT: vmovd %ecx, %xmm1
; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax
@@ -516,6 +544,7 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax
; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm0
; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56>
@@ -528,6 +557,7 @@ define void @trunc_v8i64_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vpmovqb %zmm0, (%rsi)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%vec = load <64 x i8>, <64 x i8>* %L
%bc = bitcast <64 x i8> %vec to <8 x i64>
diff --git a/test/CodeGen/X86/split-extend-vector-inreg.ll b/test/CodeGen/X86/split-extend-vector-inreg.ll
new file mode 100644
index 000000000000..692cbdb00be6
--- /dev/null
+++ b/test/CodeGen/X86/split-extend-vector-inreg.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+
+define <4 x i64> @autogen_SD88863() {
+; X32-LABEL: autogen_SD88863:
+; X32: # BB#0: # %BB
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
+; X32-NEXT: movb $1, %al
+; X32-NEXT: .p2align 4, 0x90
+; X32-NEXT: .LBB0_1: # %CF
+; X32-NEXT: # =>This Inner Loop Header: Depth=1
+; X32-NEXT: testb %al, %al
+; X32-NEXT: jne .LBB0_1
+; X32-NEXT: # BB#2: # %CF240
+; X32-NEXT: retl
+;
+; X64-LABEL: autogen_SD88863:
+; X64: # BB#0: # %BB
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
+; X64-NEXT: movb $1, %al
+; X64-NEXT: .p2align 4, 0x90
+; X64-NEXT: .LBB0_1: # %CF
+; X64-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-NEXT: testb %al, %al
+; X64-NEXT: jne .LBB0_1
+; X64-NEXT: # BB#2: # %CF240
+; X64-NEXT: retq
+BB:
+ %I26 = insertelement <4 x i64> undef, i64 undef, i32 2
+ br label %CF
+
+CF:
+ %E66 = extractelement <4 x i64> %I26, i32 1
+ %I68 = insertelement <4 x i64> zeroinitializer, i64 %E66, i32 2
+ %Cmp72 = icmp eq i32 0, 0
+ br i1 %Cmp72, label %CF, label %CF240
+
+CF240:
+ ret <4 x i64> %I68
+}
diff --git a/test/CodeGen/X86/split-store.ll b/test/CodeGen/X86/split-store.ll
index c2e67fb25273..6e320efb2b26 100644
--- a/test/CodeGen/X86/split-store.ll
+++ b/test/CodeGen/X86/split-store.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple=x86_64-unknown-unknown -force-split-store < %s | FileCheck %s
; CHECK-LABEL: int32_float_pair
-; CHECK: movl %edi, (%rsi)
-; CHECK: movss %xmm0, 4(%rsi)
+; CHECK-DAG: movl %edi, (%rsi)
+; CHECK-DAG: movss %xmm0, 4(%rsi)
define void @int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) {
entry:
%t0 = bitcast float %tmp2 to i32
@@ -15,8 +15,8 @@ entry:
}
; CHECK-LABEL: float_int32_pair
-; CHECK: movss %xmm0, (%rsi)
-; CHECK: movl %edi, 4(%rsi)
+; CHECK-DAG: movss %xmm0, (%rsi)
+; CHECK-DAG: movl %edi, 4(%rsi)
define void @float_int32_pair(float %tmp1, i32 %tmp2, i64* %ref.tmp) {
entry:
%t0 = bitcast float %tmp1 to i32
@@ -29,9 +29,9 @@ entry:
}
; CHECK-LABEL: int16_float_pair
-; CHECK: movzwl %di, %eax
-; CHECK: movl %eax, (%rsi)
-; CHECK: movss %xmm0, 4(%rsi)
+; CHECK-DAG: movzwl %di, %eax
+; CHECK-DAG: movl %eax, (%rsi)
+; CHECK-DAG: movss %xmm0, 4(%rsi)
define void @int16_float_pair(i16 signext %tmp1, float %tmp2, i64* %ref.tmp) {
entry:
%t0 = bitcast float %tmp2 to i32
@@ -44,9 +44,9 @@ entry:
}
; CHECK-LABEL: int8_float_pair
-; CHECK: movzbl %dil, %eax
-; CHECK: movl %eax, (%rsi)
-; CHECK: movss %xmm0, 4(%rsi)
+; CHECK-DAG: movzbl %dil, %eax
+; CHECK-DAG: movl %eax, (%rsi)
+; CHECK-DAG: movss %xmm0, 4(%rsi)
define void @int8_float_pair(i8 signext %tmp1, float %tmp2, i64* %ref.tmp) {
entry:
%t0 = bitcast float %tmp2 to i32
@@ -146,10 +146,9 @@ entry:
; CHECK: movw %di, (%rdx)
; CHECK: shrl $16, %edi
; CHECK: movb %dil, 2(%rdx)
-; CHECK: movl %esi, %eax
-; CHECK: shrl $16, %eax
-; CHECK: movb %al, 6(%rdx)
-; CHECK: movw %si, 4(%rdx)
+; CHECK: movw %si, 4(%rdx)
+; CHECK: shrl $16, %esi
+; CHECK: movb %sil, 6(%rdx)
define void @int24_int24_pair(i24 signext %tmp1, i24 signext %tmp2, i48* %ref.tmp) {
entry:
%t1 = zext i24 %tmp2 to i48
diff --git a/test/CodeGen/X86/sse-align-10.ll b/test/CodeGen/X86/sse-align-10.ll
index 81bf55354cd2..1e688a56ad44 100644
--- a/test/CodeGen/X86/sse-align-10.ll
+++ b/test/CodeGen/X86/sse-align-10.ll
@@ -1,6 +1,9 @@
-; RUN: llc < %s -march=x86-64 | grep movups | count 1
+; RUN: llc < %s -march=x86-64 | FileCheck %s
define <2 x i64> @bar(<2 x i64>* %p) nounwind {
+; CHECK-LABEL: bar:
+; CHECK: movups
+; CHECK-NOT: movups
%t = load <2 x i64>, <2 x i64>* %p, align 8
ret <2 x i64> %t
}
diff --git a/test/CodeGen/X86/sse-fsignum.ll b/test/CodeGen/X86/sse-fsignum.ll
index 32594a27698d..8b27941571e8 100644
--- a/test/CodeGen/X86/sse-fsignum.ll
+++ b/test/CodeGen/X86/sse-fsignum.ll
@@ -102,6 +102,7 @@ define void @signum32b(<8 x float>*) {
; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512F-NEXT: vsubps %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vmovaps %ymm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
entry:
%1 = load <8 x float>, <8 x float>* %0
@@ -161,6 +162,7 @@ define void @signum64b(<4 x double>*) {
; AVX512F-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512F-NEXT: vsubpd %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vmovapd %ymm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
entry:
%1 = load <4 x double>, <4 x double>* %0
@@ -178,43 +180,18 @@ entry:
;
define void @signum32c(<8 x float>*) {
-; AVX1-LABEL: signum32c:
-; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vmovaps (%rdi), %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; AVX1-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
-; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: vsubps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vmovaps %ymm0, (%rdi)
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: signum32c:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %ymm0
-; AVX2-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vcvtdq2ps %ymm2, %ymm2
-; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: vsubps %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vmovaps %ymm0, (%rdi)
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: signum32c:
-; AVX512F: # BB#0: # %entry
-; AVX512F-NEXT: vmovaps (%rdi), %ymm0
-; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
-; AVX512F-NEXT: vcvtdq2ps %ymm2, %ymm2
-; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512F-NEXT: vsubps %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vmovaps %ymm0, (%rdi)
-; AVX512F-NEXT: retq
+; AVX-LABEL: signum32c:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vmovaps (%rdi), %ymm0
+; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
+; AVX-NEXT: vcvtdq2ps %ymm2, %ymm2
+; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vmovaps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
entry:
%1 = load <8 x float>, <8 x float>* %0
%2 = tail call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %1, <8 x float> zeroinitializer, i8 1)
@@ -270,6 +247,7 @@ define void @signum64c(<4 x double>*) {
; AVX512F-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX512F-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512F-NEXT: vmovaps %ymm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
entry:
%x = load <4 x double>, <4 x double>* %0
diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index 18434546262c..0b03dffe99b5 100644
--- a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -1653,12 +1653,8 @@ define <4 x float> @test_mm_set1_ps(float %a0) nounwind {
define void @test_mm_setcsr(i32 %a0) nounwind {
; X32-LABEL: test_mm_setcsr:
; X32: # BB#0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %esp, %ecx
-; X32-NEXT: movl %eax, (%esp)
-; X32-NEXT: ldmxcsr (%ecx)
-; X32-NEXT: popl %eax
+; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X32-NEXT: ldmxcsr (%eax)
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setcsr:
diff --git a/test/CodeGen/X86/sse-intrinsics-x86.ll b/test/CodeGen/X86/sse-intrinsics-x86.ll
index f1c4c7463054..679b1e8b057f 100644
--- a/test/CodeGen/X86/sse-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse-intrinsics-x86.ll
@@ -322,10 +322,15 @@ define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: maxss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5f,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
-; VCHECK-LABEL: test_x86_sse_max_ss:
-; VCHECK: ## BB#0:
-; VCHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5f,0xc1]
-; VCHECK-NEXT: retl ## encoding: [0xc3]
+; AVX2-LABEL: test_x86_sse_max_ss:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5f,0xc1]
+; AVX2-NEXT: retl ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse_max_ss:
+; SKX: ## BB#0:
+; SKX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5f,0xc1]
+; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
@@ -359,10 +364,15 @@ define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) {
; SSE-NEXT: minss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
-; VCHECK-LABEL: test_x86_sse_min_ss:
-; VCHECK: ## BB#0:
-; VCHECK-NEXT: vminss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5d,0xc1]
-; VCHECK-NEXT: retl ## encoding: [0xc3]
+; AVX2-LABEL: test_x86_sse_min_ss:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vminss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5d,0xc1]
+; AVX2-NEXT: retl ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse_min_ss:
+; SKX: ## BB#0:
+; SKX-NEXT: vminss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5d,0xc1]
+; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
@@ -682,3 +692,19 @@ define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnone
+
+
+define void @sfence() nounwind {
+; SSE-LABEL: sfence:
+; SSE: ## BB#0:
+; SSE-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
+; SSE-NEXT: retl ## encoding: [0xc3]
+;
+; VCHECK-LABEL: sfence:
+; VCHECK: ## BB#0:
+; VCHECK-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
+; VCHECK-NEXT: retl ## encoding: [0xc3]
+ tail call void @llvm.x86.sse.sfence()
+ ret void
+}
+declare void @llvm.x86.sse.sfence() nounwind
diff --git a/test/CodeGen/X86/sse-intrinsics-x86_64.ll b/test/CodeGen/X86/sse-intrinsics-x86_64.ll
new file mode 100644
index 000000000000..61d0cae9acf1
--- /dev/null
+++ b/test/CodeGen/X86/sse-intrinsics-x86_64.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse -show-mc-encoding | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+
+define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_sse_cvtss2si64:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vcvtss2si %xmm0, %rax
+; CHECK-NEXT: retq
+; SSE-LABEL: test_x86_sse_cvtss2si64:
+; SSE: ## BB#0:
+; SSE-NEXT: cvtss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2d,0xc0]
+; SSE-NEXT: retq ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_sse_cvtss2si64:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcvtss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
+; AVX2-NEXT: retq ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse_cvtss2si64:
+; SKX: ## BB#0:
+; SKX-NEXT: vcvtss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
+; SKX-NEXT: retq ## encoding: [0xc3]
+ %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
+ ret i64 %res
+}
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
+
+
+define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
+; CHECK-LABEL: test_x86_sse_cvtsi642ss:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
+; CHECK-NEXT: retq
+; SSE-LABEL: test_x86_sse_cvtsi642ss:
+; SSE: ## BB#0:
+; SSE-NEXT: cvtsi2ssq %rdi, %xmm0 ## encoding: [0xf3,0x48,0x0f,0x2a,0xc7]
+; SSE-NEXT: retq ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_sse_cvtsi642ss:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
+; AVX2-NEXT: retq ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse_cvtsi642ss:
+; SKX: ## BB#0:
+; SKX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
+; SKX-NEXT: retq ## encoding: [0xc3]
+ %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
+ ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
+
+
+define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_sse_cvttss2si64:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vcvttss2si %xmm0, %rax
+; CHECK-NEXT: retq
+; SSE-LABEL: test_x86_sse_cvttss2si64:
+; SSE: ## BB#0:
+; SSE-NEXT: cvttss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2c,0xc0]
+; SSE-NEXT: retq ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_sse_cvttss2si64:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcvttss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
+; AVX2-NEXT: retq ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse_cvttss2si64:
+; SKX: ## BB#0:
+; SKX-NEXT: vcvttss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
+; SKX-NEXT: retq ## encoding: [0xc3]
+ %res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
+ ret i64 %res
+}
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll
index c18ddab4e29c..2944001ed7e9 100644
--- a/test/CodeGen/X86/sse-minmax.ll
+++ b/test/CodeGen/X86/sse-minmax.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=STRICT
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=UNSAFE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-nans-fp-math | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=FINITE
@@ -18,7 +18,6 @@ define double @ogt(double %x, double %y) {
; ALL: # BB#0:
; ALL-NEXT: maxsd %xmm1, %xmm0
; ALL-NEXT: retq
-;
%c = fcmp ogt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -29,7 +28,6 @@ define double @olt(double %x, double %y) {
; ALL: # BB#0:
; ALL-NEXT: minsd %xmm1, %xmm0
; ALL-NEXT: retq
-;
%c = fcmp olt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -52,7 +50,6 @@ define double @ogt_inverse(double %x, double %y) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ogt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -75,7 +72,6 @@ define double @olt_inverse(double %x, double %y) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp olt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -84,18 +80,17 @@ define double @olt_inverse(double %x, double %y) {
define double @oge(double %x, double %y) {
; STRICT-LABEL: oge:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm0
-; STRICT-NEXT: andnps %xmm1, %xmm2
-; STRICT-NEXT: orps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm0
+; STRICT-NEXT: andnpd %xmm1, %xmm2
+; STRICT-NEXT: orpd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: oge:
; RELAX: # BB#0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp oge double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -104,19 +99,18 @@ define double @oge(double %x, double %y) {
define double @ole(double %x, double %y) {
; STRICT-LABEL: ole:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplesd %xmm1, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm0
-; STRICT-NEXT: andnps %xmm1, %xmm2
-; STRICT-NEXT: orps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm0
+; STRICT-NEXT: andnpd %xmm1, %xmm2
+; STRICT-NEXT: orpd %xmm0, %xmm2
+; STRICT-NEXT: movapd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ole:
; RELAX: # BB#0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ole double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -125,12 +119,12 @@ define double @ole(double %x, double %y) {
define double @oge_inverse(double %x, double %y) {
; STRICT-LABEL: oge_inverse:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm2
-; STRICT-NEXT: orps %xmm1, %xmm2
-; STRICT-NEXT: movaps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm1
+; STRICT-NEXT: andnpd %xmm0, %xmm2
+; STRICT-NEXT: orpd %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: oge_inverse:
@@ -143,7 +137,6 @@ define double @oge_inverse(double %x, double %y) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp oge double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -152,12 +145,12 @@ define double @oge_inverse(double %x, double %y) {
define double @ole_inverse(double %x, double %y) {
; STRICT-LABEL: ole_inverse:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplesd %xmm1, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm2
-; STRICT-NEXT: orps %xmm1, %xmm2
-; STRICT-NEXT: movaps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm1
+; STRICT-NEXT: andnpd %xmm0, %xmm2
+; STRICT-NEXT: orpd %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ole_inverse:
@@ -170,7 +163,6 @@ define double @ole_inverse(double %x, double %y) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ole double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -182,7 +174,6 @@ define double @ogt_x(double %x) {
; ALL-NEXT: xorpd %xmm1, %xmm1
; ALL-NEXT: maxsd %xmm1, %xmm0
; ALL-NEXT: retq
-;
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -194,7 +185,6 @@ define double @olt_x(double %x) {
; ALL-NEXT: xorpd %xmm1, %xmm1
; ALL-NEXT: minsd %xmm1, %xmm0
; ALL-NEXT: retq
-;
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -220,7 +210,6 @@ define double @ogt_inverse_x(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -246,7 +235,6 @@ define double @olt_inverse_x(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -255,9 +243,9 @@ define double @olt_inverse_x(double %x) {
define double @oge_x(double %x) {
; STRICT-LABEL: oge_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm1, %xmm1
+; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: oge_x:
@@ -265,7 +253,6 @@ define double @oge_x(double %x) {
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -274,11 +261,11 @@ define double @oge_x(double %x) {
define double @ole_x(double %x) {
; STRICT-LABEL: ole_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm2, %xmm2
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: xorpd %xmm2, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
-; STRICT-NEXT: andps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ole_x:
@@ -286,7 +273,6 @@ define double @ole_x(double %x) {
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -295,10 +281,10 @@ define double @ole_x(double %x) {
define double @oge_inverse_x(double %x) {
; STRICT-LABEL: oge_inverse_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm1, %xmm1
+; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: oge_inverse_x:
@@ -313,7 +299,6 @@ define double @oge_inverse_x(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -322,11 +307,11 @@ define double @oge_inverse_x(double %x) {
define double @ole_inverse_x(double %x) {
; STRICT-LABEL: ole_inverse_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm2, %xmm2
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: xorpd %xmm2, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ole_inverse_x:
@@ -341,7 +326,6 @@ define double @ole_inverse_x(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -350,19 +334,18 @@ define double @ole_inverse_x(double %x) {
define double @ugt(double %x, double %y) {
; STRICT-LABEL: ugt:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm0
-; STRICT-NEXT: andnps %xmm1, %xmm2
-; STRICT-NEXT: orps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm0
+; STRICT-NEXT: andnpd %xmm1, %xmm2
+; STRICT-NEXT: orpd %xmm0, %xmm2
+; STRICT-NEXT: movapd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ugt:
; RELAX: # BB#0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ugt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -371,18 +354,17 @@ define double @ugt(double %x, double %y) {
define double @ult(double %x, double %y) {
; STRICT-LABEL: ult:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm0
-; STRICT-NEXT: andnps %xmm1, %xmm2
-; STRICT-NEXT: orps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm0
+; STRICT-NEXT: andnpd %xmm1, %xmm2
+; STRICT-NEXT: orpd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ult:
; RELAX: # BB#0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ult double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -391,12 +373,12 @@ define double @ult(double %x, double %y) {
define double @ugt_inverse(double %x, double %y) {
; STRICT-LABEL: ugt_inverse:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm2
-; STRICT-NEXT: orps %xmm1, %xmm2
-; STRICT-NEXT: movaps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm1
+; STRICT-NEXT: andnpd %xmm0, %xmm2
+; STRICT-NEXT: orpd %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ugt_inverse:
@@ -409,7 +391,6 @@ define double @ugt_inverse(double %x, double %y) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ugt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -418,12 +399,12 @@ define double @ugt_inverse(double %x, double %y) {
define double @ult_inverse(double %x, double %y) {
; STRICT-LABEL: ult_inverse:
; STRICT: # BB#0:
-; STRICT-NEXT: movaps %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm2
-; STRICT-NEXT: orps %xmm1, %xmm2
-; STRICT-NEXT: movaps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm1
+; STRICT-NEXT: andnpd %xmm0, %xmm2
+; STRICT-NEXT: orpd %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ult_inverse:
@@ -436,7 +417,6 @@ define double @ult_inverse(double %x, double %y) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ult double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -453,7 +433,6 @@ define double @uge(double %x, double %y) {
; RELAX: # BB#0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp uge double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -470,7 +449,6 @@ define double @ule(double %x, double %y) {
; RELAX: # BB#0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ule double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
@@ -492,7 +470,6 @@ define double @uge_inverse(double %x, double %y) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp uge double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -514,7 +491,6 @@ define double @ule_inverse(double %x, double %y) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ule double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
@@ -523,11 +499,11 @@ define double @ule_inverse(double %x, double %y) {
define double @ugt_x(double %x) {
; STRICT-LABEL: ugt_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm2, %xmm2
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: xorpd %xmm2, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT: andps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ugt_x:
@@ -535,7 +511,6 @@ define double @ugt_x(double %x) {
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -544,9 +519,9 @@ define double @ugt_x(double %x) {
define double @ult_x(double %x) {
; STRICT-LABEL: ult_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm1, %xmm1
+; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ult_x:
@@ -554,7 +529,6 @@ define double @ult_x(double %x) {
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -563,11 +537,11 @@ define double @ult_x(double %x) {
define double @ugt_inverse_x(double %x) {
; STRICT-LABEL: ugt_inverse_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm2, %xmm2
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: xorpd %xmm2, %xmm2
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ugt_inverse_x:
@@ -582,7 +556,6 @@ define double @ugt_inverse_x(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -591,10 +564,10 @@ define double @ugt_inverse_x(double %x) {
define double @ult_inverse_x(double %x) {
; STRICT-LABEL: ult_inverse_x:
; STRICT: # BB#0:
-; STRICT-NEXT: xorps %xmm1, %xmm1
+; STRICT-NEXT: xorpd %xmm1, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ult_inverse_x:
@@ -609,7 +582,6 @@ define double @ult_inverse_x(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -628,7 +600,6 @@ define double @uge_x(double %x) {
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -647,7 +618,6 @@ define double @ule_x(double %x) {
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
@@ -672,7 +642,6 @@ define double @uge_inverse_x(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -697,7 +666,6 @@ define double @ule_inverse_x(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -708,7 +676,6 @@ define double @ogt_y(double %x) {
; ALL: # BB#0:
; ALL-NEXT: maxsd {{.*}}(%rip), %xmm0
; ALL-NEXT: retq
-;
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -719,7 +686,6 @@ define double @olt_y(double %x) {
; ALL: # BB#0:
; ALL-NEXT: minsd {{.*}}(%rip), %xmm0
; ALL-NEXT: retq
-;
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -744,7 +710,6 @@ define double @ogt_inverse_y(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -769,7 +734,6 @@ define double @olt_inverse_y(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -779,18 +743,17 @@ define double @oge_y(double %x) {
; STRICT-LABEL: oge_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; STRICT-NEXT: movaps %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm0
-; STRICT-NEXT: andnps %xmm1, %xmm2
-; STRICT-NEXT: orps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm0
+; STRICT-NEXT: andnpd %xmm1, %xmm2
+; STRICT-NEXT: orpd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: oge_y:
; RELAX: # BB#0:
; RELAX-NEXT: maxsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -800,19 +763,18 @@ define double @ole_y(double %x) {
; STRICT-LABEL: ole_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm0
-; STRICT-NEXT: andnps %xmm2, %xmm1
-; STRICT-NEXT: orps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm0
+; STRICT-NEXT: andnpd %xmm2, %xmm1
+; STRICT-NEXT: orpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ole_y:
; RELAX: # BB#0:
; RELAX-NEXT: minsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -822,12 +784,12 @@ define double @oge_inverse_y(double %x) {
; STRICT-LABEL: oge_inverse_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT: movaps %xmm2, %xmm1
+; STRICT-NEXT: movapd %xmm2, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm2
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: orps %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm2
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: orpd %xmm2, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: oge_inverse_y:
@@ -841,7 +803,6 @@ define double @oge_inverse_y(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -851,12 +812,12 @@ define double @ole_inverse_y(double %x) {
; STRICT-LABEL: ole_inverse_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm2
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: orps %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm2
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: orpd %xmm2, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ole_inverse_y:
@@ -870,7 +831,6 @@ define double @ole_inverse_y(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -880,19 +840,18 @@ define double @ugt_y(double %x) {
; STRICT-LABEL: ugt_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm0
-; STRICT-NEXT: andnps %xmm2, %xmm1
-; STRICT-NEXT: orps %xmm0, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm0
+; STRICT-NEXT: andnpd %xmm2, %xmm1
+; STRICT-NEXT: orpd %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ugt_y:
; RELAX: # BB#0:
; RELAX-NEXT: maxsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -902,18 +861,17 @@ define double @ult_y(double %x) {
; STRICT-LABEL: ult_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; STRICT-NEXT: movaps %xmm1, %xmm2
+; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
-; STRICT-NEXT: andps %xmm2, %xmm0
-; STRICT-NEXT: andnps %xmm1, %xmm2
-; STRICT-NEXT: orps %xmm2, %xmm0
+; STRICT-NEXT: andpd %xmm2, %xmm0
+; STRICT-NEXT: andnpd %xmm1, %xmm2
+; STRICT-NEXT: orpd %xmm2, %xmm0
; STRICT-NEXT: retq
;
; RELAX-LABEL: ult_y:
; RELAX: # BB#0:
; RELAX-NEXT: minsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -923,12 +881,12 @@ define double @ugt_inverse_y(double %x) {
; STRICT-LABEL: ugt_inverse_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT: movaps %xmm0, %xmm1
+; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm2
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: orps %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm2
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: orpd %xmm2, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ugt_inverse_y:
@@ -942,7 +900,6 @@ define double @ugt_inverse_y(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -952,12 +909,12 @@ define double @ult_inverse_y(double %x) {
; STRICT-LABEL: ult_inverse_y:
; STRICT: # BB#0:
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT: movaps %xmm2, %xmm1
+; STRICT-NEXT: movapd %xmm2, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andps %xmm1, %xmm2
-; STRICT-NEXT: andnps %xmm0, %xmm1
-; STRICT-NEXT: orps %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
+; STRICT-NEXT: andpd %xmm1, %xmm2
+; STRICT-NEXT: andnpd %xmm0, %xmm1
+; STRICT-NEXT: orpd %xmm2, %xmm1
+; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
; UNSAFE-LABEL: ult_inverse_y:
@@ -971,7 +928,6 @@ define double @ult_inverse_y(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -989,7 +945,6 @@ define double @uge_y(double %x) {
; RELAX: # BB#0:
; RELAX-NEXT: maxsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -1007,7 +962,6 @@ define double @ule_y(double %x) {
; RELAX: # BB#0:
; RELAX-NEXT: minsd {{.*}}(%rip), %xmm0
; RELAX-NEXT: retq
-;
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
@@ -1030,7 +984,6 @@ define double @uge_inverse_y(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -1053,7 +1006,6 @@ define double @ule_inverse_y(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -1080,7 +1032,6 @@ define double @clampTo3k_a(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp ogt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1103,7 +1054,6 @@ define double @clampTo3k_b(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp uge double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1128,7 +1078,6 @@ define double @clampTo3k_c(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp olt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1151,7 +1100,6 @@ define double @clampTo3k_d(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp ule double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1176,7 +1124,6 @@ define double @clampTo3k_e(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp olt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1199,7 +1146,6 @@ define double @clampTo3k_f(double %x) {
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp ule double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1224,7 +1170,6 @@ define double @clampTo3k_g(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp ogt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1247,7 +1192,6 @@ define double @clampTo3k_h(double %x) {
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
-;
%t0 = fcmp uge double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
@@ -1259,7 +1203,7 @@ define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: cmplepd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm2, %xmm1
+; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1267,7 +1211,6 @@ define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: maxpd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%max_is_x = fcmp oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
@@ -1278,7 +1221,7 @@ define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
; STRICT: # BB#0:
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplepd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm2, %xmm1
+; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1286,7 +1229,6 @@ define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: minpd %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%min_is_x = fcmp ole <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
@@ -1298,7 +1240,7 @@ define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm2, %xmm1
+; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1306,7 +1248,6 @@ define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: maxps %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%max_is_x = fcmp oge <4 x float> %x, %y
%max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %max
@@ -1317,7 +1258,7 @@ define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
; STRICT: # BB#0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm2, %xmm1
+; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1325,7 +1266,6 @@ define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: minps %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%min_is_x = fcmp ole <4 x float> %x, %y
%min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %min
@@ -1337,9 +1277,7 @@ define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
-; STRICT-NEXT: pslld $31, %xmm0
-; STRICT-NEXT: blendvps %xmm2, %xmm1
+; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1347,7 +1285,6 @@ define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: maxps %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%max_is_x = fcmp oge <2 x float> %x, %y
%max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
ret <2 x float> %max
@@ -1358,9 +1295,7 @@ define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; STRICT: # BB#0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
-; STRICT-NEXT: pslld $31, %xmm0
-; STRICT-NEXT: blendvps %xmm2, %xmm1
+; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1368,7 +1303,6 @@ define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: minps %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%min_is_x = fcmp ole <2 x float> %x, %y
%min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
ret <2 x float> %min
@@ -1380,7 +1314,7 @@ define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm2, %xmm1
+; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1388,7 +1322,6 @@ define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: maxps %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%max_is_x = fcmp oge <3 x float> %x, %y
%max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
ret <3 x float> %max
@@ -1399,7 +1332,7 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; STRICT: # BB#0:
; STRICT-NEXT: movaps %xmm0, %xmm2
; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm2, %xmm1
+; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
; STRICT-NEXT: movaps %xmm1, %xmm0
; STRICT-NEXT: retq
;
@@ -1407,7 +1340,6 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; RELAX: # BB#0:
; RELAX-NEXT: minps %xmm1, %xmm0
; RELAX-NEXT: retq
-;
%min_is_x = fcmp ole <3 x float> %x, %y
%min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
ret <3 x float> %min
diff --git a/test/CodeGen/X86/sse-regcall.ll b/test/CodeGen/X86/sse-regcall.ll
index b44e544d83c1..862b9cc92f6c 100644
--- a/test/CodeGen/X86/sse-regcall.ll
+++ b/test/CodeGen/X86/sse-regcall.ll
@@ -37,48 +37,42 @@ define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
}
; WIN64-LABEL: testf32_inp
-; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
-; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
-; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
-; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
+; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
+; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
+; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
+; WIN64: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
; WIN64: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
; WIN64: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
; WIN64: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
; WIN64: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
-; WIN64: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
-; WIN64: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
-; WIN64: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
-; WIN64: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; WIN64: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; WIN64: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; WIN64: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; WIN64: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
; WIN64: retq
; WIN32-LABEL: testf32_inp
-; WIN32: movaps {{%xmm([4-7])}}, {{.*(%ebp).*}} {{#+}} 16-byte Spill
-; WIN32: movaps {{%xmm([4-7])}}, {{.*(%ebp).*}} {{#+}} 16-byte Spill
-; WIN32: movaps {{%xmm([4-7])}}, {{.*(%ebp).*}} {{#+}} 16-byte Spill
-; WIN32: movaps {{%xmm([4-7])}}, {{.*(%ebp).*}} {{#+}} 16-byte Spill
+; WIN32: movaps {{%xmm([0-7])}}, {{.*(%e(b|s)p).*}} {{#+}} 16-byte Spill
; WIN32: {{.*}} {{%xmm[0-7]}}, {{%xmm[4-7]}}
; WIN32: {{.*}} {{%xmm[0-7]}}, {{%xmm[4-7]}}
; WIN32: {{.*}} {{%xmm[0-7]}}, {{%xmm[4-7]}}
; WIN32: {{.*}} {{%xmm[0-7]}}, {{%xmm[4-7]}}
-; WIN32: movaps {{.*(%ebp).*}}, {{%xmm([4-7])}} {{#+}} 16-byte Reload
-; WIN32: movaps {{.*(%ebp).*}}, {{%xmm([4-7])}} {{#+}} 16-byte Reload
-; WIN32: movaps {{.*(%ebp).*}}, {{%xmm([4-7])}} {{#+}} 16-byte Reload
-; WIN32: movaps {{.*(%ebp).*}}, {{%xmm([4-7])}} {{#+}} 16-byte Reload
+; WIN32: movaps {{.*(%e(b|s)p).*}}, {{%xmm([0-7])}} {{#+}} 16-byte Reload
; WIN32: retl
; LINUXOSX-LABEL: testf32_inp
-; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
-; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
-; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
-; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%rsp).*}} {{#+}} 16-byte Spill
+; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
+; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
+; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
+; LINUXOSX: movaps {{%xmm(1[2-5])}}, {{.*(%r(b|s)p).*}} {{#+}} 16-byte Spill
; LINUXOSX: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
; LINUXOSX: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
; LINUXOSX: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
; LINUXOSX: {{.*}} {{%xmm([0-9]|1[0-1])}}, {{%xmm(1[2-5])}}
-; LINUXOSX: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
-; LINUXOSX: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
-; LINUXOSX: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
-; LINUXOSX: movaps {{.*(%rsp).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; LINUXOSX: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; LINUXOSX: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; LINUXOSX: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
+; LINUXOSX: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
; LINUXOSX: retq
;test calling conventions - input parameters, callee saved XMMs
@@ -93,10 +87,6 @@ define x86_regcallcc <16 x float> @testf32_inp(<16 x float> %a, <16 x float> %b,
; WIN32-LABEL: testi32_inp
; WIN32: pushl {{%e(si|di|bx|bp)}}
; WIN32: pushl {{%e(si|di|bx|bp)}}
-; WIN32: pushl {{%e(si|di|bx|bp)}}
-; WIN32: pushl {{%e(si|di|bx|bp)}}
-; WIN32: popl {{%e(si|di|bx|bp)}}
-; WIN32: popl {{%e(si|di|bx|bp)}}
; WIN32: popl {{%e(si|di|bx|bp)}}
; WIN32: popl {{%e(si|di|bx|bp)}}
; WIN32: retl
@@ -105,10 +95,6 @@ define x86_regcallcc <16 x float> @testf32_inp(<16 x float> %a, <16 x float> %b,
; WIN64: pushq {{%r(bp|bx|1[0-5])}}
; WIN64: pushq {{%r(bp|bx|1[0-5])}}
; WIN64: pushq {{%r(bp|bx|1[0-5])}}
-; WIN64: pushq {{%r(bp|bx|1[0-5])}}
-; WIN64: pushq {{%r(bp|bx|1[0-5])}}
-; WIN64: popq {{%r(bp|bx|1[0-5])}}
-; WIN64: popq {{%r(bp|bx|1[0-5])}}
; WIN64: popq {{%r(bp|bx|1[0-5])}}
; WIN64: popq {{%r(bp|bx|1[0-5])}}
; WIN64: popq {{%r(bp|bx|1[0-5])}}
@@ -117,10 +103,6 @@ define x86_regcallcc <16 x float> @testf32_inp(<16 x float> %a, <16 x float> %b,
; LINUXOSX-LABEL: testi32_inp
; LINUXOSX: pushq {{%r(bp|bx|1[2-5])}}
; LINUXOSX: pushq {{%r(bp|bx|1[2-5])}}
-; LINUXOSX: pushq {{%r(bp|bx|1[2-5])}}
-; LINUXOSX: pushq {{%r(bp|bx|1[2-5])}}
-; LINUXOSX: popq {{%r(bp|bx|1[2-5])}}
-; LINUXOSX: popq {{%r(bp|bx|1[2-5])}}
; LINUXOSX: popq {{%r(bp|bx|1[2-5])}}
; LINUXOSX: popq {{%r(bp|bx|1[2-5])}}
; LINUXOSX: retq
diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll
index 9488d6d26056..dfc1aefd31a6 100644
--- a/test/CodeGen/X86/sse1.ll
+++ b/test/CodeGen/X86/sse1.ll
@@ -60,7 +60,13 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: jne .LBB1_5
-; X32-NEXT: jmp .LBB1_4
+; X32-NEXT: .LBB1_4:
+; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: jne .LBB1_8
+; X32-NEXT: .LBB1_7:
+; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: jmp .LBB1_9
; X32-NEXT: .LBB1_1:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
@@ -68,17 +74,9 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: .LBB1_5: # %entry
; X32-NEXT: xorps %xmm2, %xmm2
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
-; X32-NEXT: jne .LBB1_8
-; X32-NEXT: jmp .LBB1_7
-; X32-NEXT: .LBB1_4:
-; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: je .LBB1_7
; X32-NEXT: .LBB1_8: # %entry
; X32-NEXT: xorps %xmm3, %xmm3
-; X32-NEXT: jmp .LBB1_9
-; X32-NEXT: .LBB1_7:
-; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; X32-NEXT: .LBB1_9: # %entry
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
@@ -99,7 +97,13 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: testl %edx, %edx
; X64-NEXT: jne .LBB1_5
-; X64-NEXT: jmp .LBB1_4
+; X64-NEXT: .LBB1_4:
+; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: testl %r8d, %r8d
+; X64-NEXT: jne .LBB1_8
+; X64-NEXT: .LBB1_7:
+; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X64-NEXT: jmp .LBB1_9
; X64-NEXT: .LBB1_1:
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: testl %edx, %edx
@@ -107,17 +111,9 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X64-NEXT: .LBB1_5: # %entry
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: testl %r8d, %r8d
-; X64-NEXT: jne .LBB1_8
-; X64-NEXT: jmp .LBB1_7
-; X64-NEXT: .LBB1_4:
-; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X64-NEXT: testl %r8d, %r8d
; X64-NEXT: je .LBB1_7
; X64-NEXT: .LBB1_8: # %entry
; X64-NEXT: xorps %xmm3, %xmm3
-; X64-NEXT: jmp .LBB1_9
-; X64-NEXT: .LBB1_7:
-; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; X64-NEXT: .LBB1_9: # %entry
; X64-NEXT: testl %esi, %esi
; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
@@ -215,7 +211,7 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
ret <4 x i32> %zext
}
-; Fragile test warning - we need to induce the generation of a vselect
+; Fragile test warning - we need to induce the generation of a vselect
; post-legalization to cause the crash seen in:
; https://llvm.org/bugs/show_bug.cgi?id=31672
; Is there a way to do that without an unsafe/fast sqrt intrinsic call?
diff --git a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
index 972a33f13cd0..3071155172e3 100644
--- a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
@@ -2934,13 +2934,13 @@ define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwin
; X32-LABEL: test_mm_sqrt_sd:
; X32: # BB#0:
; X32-NEXT: sqrtsd %xmm0, %xmm1
-; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm_sqrt_sd:
; X64: # BB#0:
; X64-NEXT: sqrtsd %xmm0, %xmm1
-; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
%call = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0)
%ext0 = extractelement <2 x double> %call, i32 0
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
index 2fc0d94a1d8d..13911eeea6c4 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
@@ -98,8 +98,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; CHECK-LABEL: test_x86_sse2_storeu_pd:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: addpd %xmm0, %xmm1
; CHECK-NEXT: movupd %xmm1, (%eax)
; CHECK-NEXT: retl
@@ -147,7 +147,6 @@ define <16 x i8> @max_epu8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: pmaxub %xmm1, %xmm0
; CHECK-NEXT: retl
-;
%res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
ret <16 x i8> %res
}
@@ -158,7 +157,6 @@ define <16 x i8> @min_epu8(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: pminub %xmm1, %xmm0
; CHECK-NEXT: retl
-;
%res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
ret <16 x i8> %res
}
@@ -169,7 +167,6 @@ define <8 x i16> @max_epi16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: pmaxsw %xmm1, %xmm0
; CHECK-NEXT: retl
-;
%res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
ret <8 x i16> %res
}
@@ -180,7 +177,6 @@ define <8 x i16> @min_epi16(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK: ## BB#0:
; CHECK-NEXT: pminsw %xmm1, %xmm0
; CHECK-NEXT: retl
-;
%res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
ret <8 x i16> %res
}
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index a93ffc6655b7..b0a8744f5d80 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
+; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse2_cmp_pd:
@@ -587,10 +587,15 @@ define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: maxsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5f,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
-; VCHECK-LABEL: test_x86_sse2_max_sd:
-; VCHECK: ## BB#0:
-; VCHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
-; VCHECK-NEXT: retl ## encoding: [0xc3]
+; AVX2-LABEL: test_x86_sse2_max_sd:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5f,0xc1]
+; AVX2-NEXT: retl ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse2_max_sd:
+; SKX: ## BB#0:
+; SKX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
+; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
@@ -624,10 +629,15 @@ define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) {
; SSE-NEXT: minsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5d,0xc1]
; SSE-NEXT: retl ## encoding: [0xc3]
;
-; VCHECK-LABEL: test_x86_sse2_min_sd:
-; VCHECK: ## BB#0:
-; VCHECK-NEXT: vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
-; VCHECK-NEXT: retl ## encoding: [0xc3]
+; AVX2-LABEL: test_x86_sse2_min_sd:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5d,0xc1]
+; AVX2-NEXT: retl ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse2_min_sd:
+; SKX: ## BB#0:
+; SKX-NEXT: vminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
+; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
@@ -1502,21 +1512,21 @@ define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) {
; SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load:
; SSE: ## BB#0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SSE-NEXT: movaps (%eax), %xmm0 ## encoding: [0x0f,0x28,0x00]
+; SSE-NEXT: movapd (%eax), %xmm0 ## encoding: [0x66,0x0f,0x28,0x00]
; SSE-NEXT: sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse2_sqrt_sd_vec_load:
; AVX2: ## BB#0:
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX2-NEXT: vmovaps (%eax), %xmm0 ## encoding: [0xc5,0xf8,0x28,0x00]
+; AVX2-NEXT: vmovapd (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x00]
; AVX2-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
; AVX2-NEXT: retl ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse2_sqrt_sd_vec_load:
; SKX: ## BB#0:
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; SKX-NEXT: vmovaps (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x00]
+; SKX-NEXT: vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00]
; SKX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0]
; SKX-NEXT: retl ## encoding: [0xc3]
%a1 = load <2 x double>, <2 x double>* %a0, align 16
@@ -1699,16 +1709,42 @@ define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind readnone
define void @test_x86_sse2_pause() {
-; SSE-LABEL: test_x86_sse2_pause:
-; SSE: ## BB#0:
-; SSE-NEXT: pause ## encoding: [0xf3,0x90]
-; SSE-NEXT: retl ## encoding: [0xc3]
-;
-; VCHECK-LABEL: test_x86_sse2_pause:
-; VCHECK: ## BB#0:
-; VCHECK-NEXT: pause ## encoding: [0xf3,0x90]
-; VCHECK-NEXT: retl ## encoding: [0xc3]
+; CHECK-LABEL: test_x86_sse2_pause:
+; CHECK: ## BB#0:
+; CHECK-NEXT: pause ## encoding: [0xf3,0x90]
+; CHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse2.pause()
ret void
}
declare void @llvm.x86.sse2.pause() nounwind
+
+define void @lfence() nounwind {
+; CHECK-LABEL: lfence:
+; CHECK: ## BB#0:
+; CHECK-NEXT: lfence ## encoding: [0x0f,0xae,0xe8]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ tail call void @llvm.x86.sse2.lfence()
+ ret void
+}
+declare void @llvm.x86.sse2.lfence() nounwind
+
+define void @mfence() nounwind {
+; CHECK-LABEL: mfence:
+; CHECK: ## BB#0:
+; CHECK-NEXT: mfence ## encoding: [0x0f,0xae,0xf0]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ tail call void @llvm.x86.sse2.mfence()
+ ret void
+}
+declare void @llvm.x86.sse2.mfence() nounwind
+
+define void @clflush(i8* %p) nounwind {
+; CHECK-LABEL: clflush:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: clflush (%eax) ## encoding: [0x0f,0xae,0x38]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ tail call void @llvm.x86.sse2.clflush(i8* %p)
+ ret void
+}
+declare void @llvm.x86.sse2.clflush(i8*) nounwind
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86_64.ll b/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
new file mode 100644
index 000000000000..cd5e11e12795
--- /dev/null
+++ b/test/CodeGen/X86/sse2-intrinsics-x86_64.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+
+define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vcvtsd2si %xmm0, %rax
+; CHECK-NEXT: retq
+; SSE-LABEL: test_x86_sse2_cvtsd2si64:
+; SSE: ## BB#0:
+; SSE-NEXT: cvtsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2d,0xc0]
+; SSE-NEXT: retq ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_sse2_cvtsd2si64:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcvtsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
+; AVX2-NEXT: retq ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse2_cvtsd2si64:
+; SKX: ## BB#0:
+; SKX-NEXT: vcvtsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2d,0xc0]
+; SKX-NEXT: retq ## encoding: [0xc3]
+ %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
+ ret i64 %res
+}
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
+
+
+define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
+; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
+; CHECK-NEXT: retq
+; SSE-LABEL: test_x86_sse2_cvtsi642sd:
+; SSE: ## BB#0:
+; SSE-NEXT: cvtsi2sdq %rdi, %xmm0 ## encoding: [0xf2,0x48,0x0f,0x2a,0xc7]
+; SSE-NEXT: retq ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_sse2_cvtsi642sd:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
+; AVX2-NEXT: retq ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse2_cvtsi642sd:
+; SKX: ## BB#0:
+; SKX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
+; SKX-NEXT: retq ## encoding: [0xc3]
+ %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
+ ret <2 x double> %res
+}
+declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
+
+
+define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_sse2_cvttsd2si64:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vcvttsd2si %xmm0, %rax
+; CHECK-NEXT: retq
+; SSE-LABEL: test_x86_sse2_cvttsd2si64:
+; SSE: ## BB#0:
+; SSE-NEXT: cvttsd2si %xmm0, %rax ## encoding: [0xf2,0x48,0x0f,0x2c,0xc0]
+; SSE-NEXT: retq ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_sse2_cvttsd2si64:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcvttsd2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
+; AVX2-NEXT: retq ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_sse2_cvttsd2si64:
+; SKX: ## BB#0:
+; SKX-NEXT: vcvttsd2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2c,0xc0]
+; SKX-NEXT: retq ## encoding: [0xc3]
+ %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
+ ret i64 %res
+}
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
diff --git a/test/CodeGen/X86/sse3-avx-addsub.ll b/test/CodeGen/X86/sse3-avx-addsub.ll
index 17586a811f40..0e0cf4852568 100644
--- a/test/CodeGen/X86/sse3-avx-addsub.ll
+++ b/test/CodeGen/X86/sse3-avx-addsub.ll
@@ -119,10 +119,11 @@ define <16 x float> @test5(<16 x float> %A, <16 x float> %B) {
;
; AVX512-LABEL: test5:
; AVX512: # BB#0:
-; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vshufps {{.*#+}} zmm0 = zmm0[0,2],zmm2[1,3],zmm0[4,6],zmm2[5,7],zmm0[8,10],zmm2[9,11],zmm0[12,14],zmm2[13,15]
-; AVX512-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[0,2,1,3,4,6,5,7,8,10,9,11,12,14,13,15]
+; AVX512-NEXT: vsubps %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512-NEXT: vmovaps %zmm2, %zmm0
; AVX512-NEXT: retq
%add = fadd <16 x float> %A, %B
%sub = fsub <16 x float> %A, %B
diff --git a/test/CodeGen/X86/sse3-intrinsics-x86.ll b/test/CodeGen/X86/sse3-intrinsics-x86.ll
index 362525f24d2a..fd7f59a01579 100644
--- a/test/CodeGen/X86/sse3-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse3-intrinsics-x86.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse3 -show-mc-encoding | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse3 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
+; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
define <2 x double> @test_x86_sse3_addsub_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_x86_sse3_addsub_pd:
@@ -115,3 +115,31 @@ define <16 x i8> @test_x86_sse3_ldu_dq(i8* %a0) {
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8*) nounwind readonly
+
+; Make sure instructions with no AVX equivalents, but are associated with SSEX feature flags still work
+
+define void @monitor(i8* %P, i32 %E, i32 %H) nounwind {
+; CHECK-LABEL: monitor:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x0c]
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: leal (%eax), %eax ## encoding: [0x8d,0x00]
+; CHECK-NEXT: monitor ## encoding: [0x0f,0x01,0xc8]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ tail call void @llvm.x86.sse3.monitor(i8* %P, i32 %E, i32 %H)
+ ret void
+}
+declare void @llvm.x86.sse3.monitor(i8*, i32, i32) nounwind
+
+define void @mwait(i32 %E, i32 %H) nounwind {
+; CHECK-LABEL: mwait:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
+; CHECK-NEXT: mwait ## encoding: [0x0f,0x01,0xc9]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ tail call void @llvm.x86.sse3.mwait(i32 %E, i32 %H)
+ ret void
+}
+declare void @llvm.x86.sse3.mwait(i32, i32) nounwind
diff --git a/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
index 16868d854df7..f106f7ec5cc1 100644
--- a/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll
@@ -54,7 +54,7 @@ define <2 x i64> @test_mm_blendv_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a
; X32: # BB#0:
; X32-NEXT: movdqa %xmm0, %xmm3
; X32-NEXT: movaps %xmm2, %xmm0
-; X32-NEXT: pblendvb %xmm1, %xmm3
+; X32-NEXT: pblendvb %xmm0, %xmm1, %xmm3
; X32-NEXT: movdqa %xmm3, %xmm0
; X32-NEXT: retl
;
@@ -62,7 +62,7 @@ define <2 x i64> @test_mm_blendv_epi8(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a
; X64: # BB#0:
; X64-NEXT: movdqa %xmm0, %xmm3
; X64-NEXT: movaps %xmm2, %xmm0
-; X64-NEXT: pblendvb %xmm1, %xmm3
+; X64-NEXT: pblendvb %xmm0, %xmm1, %xmm3
; X64-NEXT: movdqa %xmm3, %xmm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -79,7 +79,7 @@ define <2 x double> @test_mm_blendv_pd(<2 x double> %a0, <2 x double> %a1, <2 x
; X32: # BB#0:
; X32-NEXT: movapd %xmm0, %xmm3
; X32-NEXT: movaps %xmm2, %xmm0
-; X32-NEXT: blendvpd %xmm1, %xmm3
+; X32-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; X32-NEXT: movapd %xmm3, %xmm0
; X32-NEXT: retl
;
@@ -87,7 +87,7 @@ define <2 x double> @test_mm_blendv_pd(<2 x double> %a0, <2 x double> %a1, <2 x
; X64: # BB#0:
; X64-NEXT: movapd %xmm0, %xmm3
; X64-NEXT: movaps %xmm2, %xmm0
-; X64-NEXT: blendvpd %xmm1, %xmm3
+; X64-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; X64-NEXT: movapd %xmm3, %xmm0
; X64-NEXT: retq
%res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
@@ -100,7 +100,7 @@ define <4 x float> @test_mm_blendv_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo
; X32: # BB#0:
; X32-NEXT: movaps %xmm0, %xmm3
; X32-NEXT: movaps %xmm2, %xmm0
-; X32-NEXT: blendvps %xmm1, %xmm3
+; X32-NEXT: blendvps %xmm0, %xmm1, %xmm3
; X32-NEXT: movaps %xmm3, %xmm0
; X32-NEXT: retl
;
@@ -108,7 +108,7 @@ define <4 x float> @test_mm_blendv_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo
; X64: # BB#0:
; X64-NEXT: movaps %xmm0, %xmm3
; X64-NEXT: movaps %xmm2, %xmm0
-; X64-NEXT: blendvps %xmm1, %xmm3
+; X64-NEXT: blendvps %xmm0, %xmm1, %xmm3
; X64-NEXT: movaps %xmm3, %xmm0
; X64-NEXT: retq
%res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
@@ -898,17 +898,17 @@ define i32 @test_mm_test_all_ones(<2 x i64> %a0) {
; X32-LABEL: test_mm_test_all_ones:
; X32: # BB#0:
; X32-NEXT: pcmpeqd %xmm1, %xmm1
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_test_all_ones:
; X64: # BB#0:
; X64-NEXT: pcmpeqd %xmm1, %xmm1
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> <i64 -1, i64 -1>)
ret i32 %res
@@ -956,16 +956,16 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
define i32 @test_mm_testc_si128(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_testc_si128:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_testc_si128:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: retq
%res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
ret i32 %res
diff --git a/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
index 4f6aa798faf0..26af37e30295 100644
--- a/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll
@@ -59,6 +59,19 @@ define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
+define <2 x i64> @test_x86_sse41_movntdqa(<2 x i64>* %a0) {
+; CHECK-LABEL: test_x86_sse41_movntdqa:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movntdqa (%eax), %xmm0
+; CHECK-NEXT: retl
+ %arg0 = bitcast <2 x i64>* %a0 to i8*
+ %res = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %arg0)
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse41.movntdqa(i8*) nounwind readnone
+
+
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_x86_sse41_mpsadbw:
; CHECK: ## BB#0:
diff --git a/test/CodeGen/X86/sse41-intrinsics-x86.ll b/test/CodeGen/X86/sse41-intrinsics-x86.ll
index 321b4e8108b7..3abfcf4d542e 100644
--- a/test/CodeGen/X86/sse41-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse41-intrinsics-x86.ll
@@ -8,7 +8,7 @@ define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1,
; SSE41: ## BB#0:
; SSE41-NEXT: movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
-; SSE41-NEXT: blendvpd %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
; SSE41-NEXT: movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
@@ -27,7 +27,7 @@ define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4
; SSE41: ## BB#0:
; SSE41-NEXT: movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
-; SSE41-NEXT: blendvps %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
+; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
; SSE41-NEXT: movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
@@ -88,7 +88,7 @@ define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
;
; SKX-LABEL: test_x86_sse41_insertps:
; SKX: ## BB#0:
-; SKX-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x21,0xc1,0x11]
+; SKX-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
; SKX-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
; SKX-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) ; <<4 x float>> [#uses=1]
@@ -140,7 +140,7 @@ define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8
; SSE41: ## BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
-; SSE41-NEXT: pblendvb %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
; SSE41-NEXT: movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
@@ -362,16 +362,16 @@ declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
; SSE41-LABEL: test_x86_sse41_ptestc:
; SSE41: ## BB#0:
+; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
-; SSE41-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; SSE41-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; SSE41-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; SSE41-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse41_ptestc:
; VCHECK: ## BB#0:
+; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
-; VCHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; VCHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; VCHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
ret i32 %res
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index 4a009023a7bd..503b9416c8d3 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X64
@@ -227,16 +228,16 @@ define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
; X32-LABEL: ptestz_2:
; X32: ## BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: ptest %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: ptestz_2:
; X64: ## BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: ptest %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: retq
%tmp1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
ret i32 %tmp1
@@ -544,13 +545,15 @@ define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X0YC:
; X32: ## BB#0:
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; X32-NEXT: xorps %xmm2, %xmm2
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X0YC:
; X64: ## BB#0:
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
@@ -771,7 +774,7 @@ define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
; X32: ## BB#0:
; X32-NEXT: psllw $15, %xmm0
; X32-NEXT: psraw $15, %xmm0
-; X32-NEXT: pblendvb %xmm1, %xmm2
+; X32-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm0
; X32-NEXT: retl
;
@@ -779,7 +782,7 @@ define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
; X64: ## BB#0:
; X64-NEXT: psllw $15, %xmm0
; X64-NEXT: psraw $15, %xmm0
-; X64-NEXT: pblendvb %xmm1, %xmm2
+; X64-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm0
; X64-NEXT: retq
%ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
@@ -791,12 +794,12 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap
; X32-LABEL: insertps_from_vector_load:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $48, (%{{...}}), {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load:
; X64: ## BB#0:
-; X64-NEXT: insertps $48, (%{{...}}), {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
@@ -809,12 +812,12 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>
; X32-LABEL: insertps_from_vector_load_offset:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $32, 4(%{{...}}), {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load_offset:
; X64: ## BB#0:
-; X64-NEXT: insertps $32, 4(%{{...}}), {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
@@ -828,13 +831,13 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $4, %ecx
-; X32-NEXT: insertps $0, 12(%{{...}},%{{...}}), {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
+; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load_offset_2:
; X64: ## BB#0:
; X64-NEXT: shlq $4, %rsi
-; X64-NEXT: insertps $0, 12(%{{...}},%{{...}}), {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
+; X64-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X64-NEXT: retq
%1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
%2 = load <4 x float>, <4 x float>* %1, align 16
@@ -990,15 +993,14 @@ define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32*
define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_4:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_4:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
@@ -1010,15 +1012,14 @@ entry:
define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_5:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_5:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %B, i32 1
@@ -1030,15 +1031,14 @@ entry:
define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_6:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_6:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 1
%vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
%vecext1 = extractelement <4 x float> %B, i32 2
@@ -1049,15 +1049,14 @@ entry:
define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_7:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_7:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
@@ -1069,15 +1068,14 @@ entry:
define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_8:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_8:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %B, i32 0
@@ -1089,17 +1087,16 @@ entry:
define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_9:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: insertps_9:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 0
%vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
%vecext1 = extractelement <4 x float> %B, i32 2
@@ -1108,7 +1105,7 @@ entry:
ret <4 x float> %vecinit3
}
-define <4 x float> @insertps_10(<4 x float> %A)
+define <4 x float> @insertps_10(<4 x float> %A) {
; X32-LABEL: insertps_10:
; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
@@ -1118,7 +1115,6 @@ define <4 x float> @insertps_10(<4 x float> %A)
; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
; X64-NEXT: retq
-{
%vecext = extractelement <4 x float> %A, i32 0
%vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0
%vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2
@@ -1127,17 +1123,16 @@ define <4 x float> @insertps_10(<4 x float> %A)
define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
; X32-LABEL: build_vector_to_shuffle_1:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: build_vector_to_shuffle_1:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 1
%vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
@@ -1147,17 +1142,16 @@ entry:
define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
; X32-LABEL: build_vector_to_shuffle_2:
-; X32: ## BB#0: ## %entry
+; X32: ## BB#0:
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: build_vector_to_shuffle_2:
-; X64: ## BB#0: ## %entry
+; X64: ## BB#0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; X64-NEXT: retq
-entry:
%vecext = extractelement <4 x float> %A, i32 1
%vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
diff --git a/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
index 53b94e7f0d39..383ab21bd404 100644
--- a/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll
@@ -33,23 +33,27 @@ define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
}
declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
+define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
; X32-LABEL: test_mm_cmpestrc:
; X32: # BB#0:
+; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: xorl %ebx, %ebx
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: popl %ebx
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpestrc:
; X64: # BB#0:
+; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %edx
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %r8b
+; X64-NEXT: movl %r8d, %eax
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
@@ -229,16 +233,16 @@ declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_mm_cmpistrc(<2 x i64> %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_cmpistrc:
; X32: # BB#0:
+; X32-NEXT: xorl %eax, %eax
; X32-NEXT: pcmpistri $7, %xmm1, %xmm0
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: retl
;
; X64-LABEL: test_mm_cmpistrc:
; X64: # BB#0:
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: pcmpistri $7, %xmm1, %xmm0
-; X64-NEXT: sbbl %eax, %eax
-; X64-NEXT: andl $1, %eax
+; X64-NEXT: setb %al
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
diff --git a/test/CodeGen/X86/sse42-intrinsics-x86.ll b/test/CodeGen/X86/sse42-intrinsics-x86.ll
index d5d34926fed8..d9e103c48111 100644
--- a/test/CodeGen/X86/sse42-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse42-intrinsics-x86.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=SSE42
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
+; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
; SSE42-LABEL: test_x86_sse42_pcmpestri128:
@@ -95,23 +95,29 @@ define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind
declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
-define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) {
+define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
; SSE42-LABEL: test_x86_sse42_pcmpestric128:
; SSE42: ## BB#0:
+; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; SSE42-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
-; SSE42-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; SSE42-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; SSE42-NEXT: setb %bl ## encoding: [0x0f,0x92,0xc3]
+; SSE42-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
+; SSE42-NEXT: popl %ebx ## encoding: [0x5b]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpestric128:
; VCHECK: ## BB#0:
+; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
+; VCHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
-; VCHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; VCHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; VCHECK-NEXT: setb %bl ## encoding: [0x0f,0x92,0xc3]
+; VCHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
+; VCHECK-NEXT: popl %ebx ## encoding: [0x5b]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
ret i32 %res
@@ -326,16 +332,16 @@ declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind rea
define i32 @test_x86_sse42_pcmpistric128(<16 x i8> %a0, <16 x i8> %a1) {
; SSE42-LABEL: test_x86_sse42_pcmpistric128:
; SSE42: ## BB#0:
+; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
-; SSE42-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; SSE42-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; SSE42-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; SSE42-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: test_x86_sse42_pcmpistric128:
; VCHECK: ## BB#0:
+; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
-; VCHECK-NEXT: sbbl %eax, %eax ## encoding: [0x19,0xc0]
-; VCHECK-NEXT: andl $1, %eax ## encoding: [0x83,0xe0,0x01]
+; VCHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
; VCHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
ret i32 %res
@@ -435,3 +441,36 @@ define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
+
+define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
+; CHECK-LABEL: crc32_32_8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ %tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
+ ret i32 %tmp
+}
+declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
+
+define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
+; CHECK-LABEL: crc32_32_16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ %tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
+ ret i32 %tmp
+}
+declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
+
+define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: crc32_32_32:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; CHECK-NEXT: crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
+; CHECK-NEXT: retl ## encoding: [0xc3]
+ %tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+declare i32 @llvm.x86.sse42.crc32.32.32(i32, i32) nounwind
diff --git a/test/CodeGen/X86/sse42-intrinsics-x86_64.ll b/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
new file mode 100644
index 000000000000..e90aa455cfd8
--- /dev/null
+++ b/test/CodeGen/X86/sse42-intrinsics-x86_64.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
+
+declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
+declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind
+
+define i64 @crc32_64_8(i64 %a, i8 %b) nounwind {
+; CHECK-LABEL: crc32_64_8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: crc32b %sil, %edi ## encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xfe]
+; CHECK-NEXT: movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %tmp = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a, i8 %b)
+ ret i64 %tmp
+}
+
+define i64 @crc32_64_64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: crc32_64_64:
+; CHECK: ## BB#0:
+; CHECK-NEXT: crc32q %rsi, %rdi ## encoding: [0xf2,0x48,0x0f,0x38,0xf1,0xfe]
+; CHECK-NEXT: movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
+; CHECK-NEXT: retq ## encoding: [0xc3]
+ %tmp = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a, i64 %b)
+ ret i64 %tmp
+}
+
diff --git a/test/CodeGen/X86/sse42.ll b/test/CodeGen/X86/sse42.ll
deleted file mode 100644
index 2d05f9884c42..000000000000
--- a/test/CodeGen/X86/sse42.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.2 | FileCheck %s --check-prefix=X64
-
-declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
-declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
-declare i32 @llvm.x86.sse42.crc32.32.32(i32, i32) nounwind
-
-define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
-; X32-LABEL: crc32_32_8:
-; X32: ## BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: crc32b {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
-;
-; X64-LABEL: crc32_32_8:
-; X64: ## BB#0:
-; X64-NEXT: crc32b %sil, %edi
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: retq
- %tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
- ret i32 %tmp
-}
-
-
-define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
-; X32-LABEL: crc32_32_16:
-; X32: ## BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: crc32w {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
-;
-; X64-LABEL: crc32_32_16:
-; X64: ## BB#0:
-; X64-NEXT: crc32w %si, %edi
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: retq
- %tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
- ret i32 %tmp
-}
-
-
-define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
-; X32-LABEL: crc32_32_32:
-; X32: ## BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: crc32l {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
-;
-; X64-LABEL: crc32_32_32:
-; X64: ## BB#0:
-; X64-NEXT: crc32l %esi, %edi
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: retq
- %tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
- ret i32 %tmp
-}
-
diff --git a/test/CodeGen/X86/sse42_64.ll b/test/CodeGen/X86/sse42_64.ll
deleted file mode 100644
index b39e76c78eb7..000000000000
--- a/test/CodeGen/X86/sse42_64.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.2 | FileCheck %s -check-prefix=X64
-
-declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
-declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind
-
-define i64 @crc32_64_8(i64 %a, i8 %b) nounwind {
- %tmp = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a, i8 %b)
- ret i64 %tmp
-
-; X64: _crc32_64_8:
-; X64: crc32b %sil,
-}
-
-define i64 @crc32_64_64(i64 %a, i64 %b) nounwind {
- %tmp = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a, i64 %b)
- ret i64 %tmp
-
-; X64: _crc32_64_64:
-; X64: crc32q %rsi,
-}
-
diff --git a/test/CodeGen/X86/ssse3-intrinsics-x86.ll b/test/CodeGen/X86/ssse3-intrinsics-x86.ll
index d2785b4c89bb..4f49385fec7f 100644
--- a/test/CodeGen/X86/ssse3-intrinsics-x86.ll
+++ b/test/CodeGen/X86/ssse3-intrinsics-x86.ll
@@ -183,6 +183,35 @@ define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128(<16 x i8> %a0, <16 x i8> %a1) {
declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
+; Make sure we don't commute this operation.
+define <8 x i16> @test_x86_ssse3_pmadd_ub_sw_128_load_op0(<16 x i8>* %ptr, <16 x i8> %a1) {
+; SSE-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
+; SSE: ## BB#0:
+; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; SSE-NEXT: movdqa (%eax), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x08]
+; SSE-NEXT: pmaddubsw %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x38,0x04,0xc8]
+; SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1]
+; SSE-NEXT: retl ## encoding: [0xc3]
+;
+; AVX2-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
+; AVX2: ## BB#0:
+; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; AVX2-NEXT: vmovdqa (%eax), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x08]
+; AVX2-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0x04,0xc0]
+; AVX2-NEXT: retl ## encoding: [0xc3]
+;
+; SKX-LABEL: test_x86_ssse3_pmadd_ub_sw_128_load_op0:
+; SKX: ## BB#0:
+; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; SKX-NEXT: vmovdqu (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x6f,0x08]
+; SKX-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x04,0xc0]
+; SKX-NEXT: retl ## encoding: [0xc3]
+ %a0 = load <16 x i8>, <16 x i8>* %ptr
+ %res = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1) ; <<8 x i16>> [#uses=1]
+ ret <8 x i16> %res
+}
+
+
define <8 x i16> @test_x86_ssse3_pmul_hr_sw_128(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_x86_ssse3_pmul_hr_sw_128:
; SSE: ## BB#0:
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index 04bae023984f..192306462d1d 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -69,10 +69,10 @@ entry:
define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align 4 %s) #0 {
%d.sroa.0 = alloca [16 x i8], align 1
%1 = getelementptr inbounds [16 x i8], [16 x i8]* %d.sroa.0, i32 0, i32 0
- call void @llvm.lifetime.start(i64 16, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
%2 = getelementptr inbounds %struct.sixteen, %struct.sixteen* %s, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 16, i32 1, i1 true)
- call void @llvm.lifetime.end(i64 16, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %1)
ret void
; CHECK-LABEL: test5:
; CHECK: and
@@ -82,10 +82,10 @@ define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align
; CHECK-NEXT: movsd
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) argmemonly nounwind
attributes #0 = { nounwind alignstack=16 "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CodeGen/X86/stack-folding-adx-x86_64.ll b/test/CodeGen/X86/stack-folding-adx-x86_64.ll
index 5f109f09aa19..e992e463dc4b 100644
--- a/test/CodeGen/X86/stack-folding-adx-x86_64.ll
+++ b/test/CodeGen/X86/stack-folding-adx-x86_64.ll
@@ -43,3 +43,21 @@ define i8 @stack_fold_addcarryx_u64(i8 %a0, i64 %a1, i64 %a2, i8* %a3) {
ret i8 %2;
}
declare i8 @llvm.x86.addcarryx.u64(i8, i64, i64, i8*)
+
+define i8 @stack_fold_subborrow_u32(i8 %a0, i32 %a1, i32 %a2, i8* %a3) {
+ ;CHECK-LABEL: stack_fold_subborrow_u32
+ ;CHECK: sbbl {{-?[0-9]*}}(%rsp), %ecx {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i8 @llvm.x86.subborrow.u32(i8 %a0, i32 %a1, i32 %a2, i8* %a3)
+ ret i8 %2;
+}
+declare i8 @llvm.x86.subborrow.u32(i8, i32, i32, i8*)
+
+define i8 @stack_fold_subborrow_u64(i8 %a0, i64 %a1, i64 %a2, i8* %a3) {
+ ;CHECK-LABEL: stack_fold_subborrow_u64
+ ;CHECK: sbbq {{-?[0-9]*}}(%rsp), %rcx {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i8 @llvm.x86.subborrow.u64(i8 %a0, i64 %a1, i64 %a2, i8* %a3)
+ ret i8 %2;
+}
+declare i8 @llvm.x86.subborrow.u64(i8, i64, i64, i8*)
diff --git a/test/CodeGen/X86/stack-folding-bmi.ll b/test/CodeGen/X86/stack-folding-bmi.ll
new file mode 100644
index 000000000000..cabc88432be4
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-bmi.ll
@@ -0,0 +1,121 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define i32 @stack_fold_andn_u32(i32 %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_andn_u32
+ ;CHECK: andnl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = xor i32 %a0, -1
+ %3 = and i32 %a1, %2
+ ret i32 %3
+}
+
+define i64 @stack_fold_andn_u64(i64 %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_andn_u64
+ ;CHECK: andnq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = xor i64 %a0, -1
+ %3 = and i64 %a1, %2
+ ret i64 %3
+}
+
+define i32 @stack_fold_bextr_u32(i32 %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_bextr_u32
+ ;CHECK: # BB#0:
+ ;CHECK: bextrl %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a0, i32 %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
+
+define i64 @stack_fold_bextr_u64(i64 %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_bextr_u64
+ ;CHECK: # BB#0:
+ ;CHECK: bextrq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a0, i64 %a1)
+ ret i64 %2
+}
+declare i64 @llvm.x86.bmi.bextr.64(i64, i64)
+
+define i32 @stack_fold_blsi_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blsi_u32
+ ;CHECK: blsil {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i32 0, %a0
+ %3 = and i32 %2, %a0
+ ret i32 %3
+}
+
+define i64 @stack_fold_blsi_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blsi_u64
+ ;CHECK: blsiq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i64 0, %a0
+ %3 = and i64 %2, %a0
+ ret i64 %3
+}
+
+define i32 @stack_fold_blsmsk_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blsmsk_u32
+ ;CHECK: blsmskl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i32 %a0, 1
+ %3 = xor i32 %2, %a0
+ ret i32 %3
+}
+
+define i64 @stack_fold_blsmsk_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blsmsk_u64
+ ;CHECK: blsmskq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i64 %a0, 1
+ %3 = xor i64 %2, %a0
+ ret i64 %3
+}
+
+define i32 @stack_fold_blsr_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blsr_u32
+ ;CHECK: blsrl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i32 %a0, 1
+ %3 = and i32 %2, %a0
+ ret i32 %3
+}
+
+define i64 @stack_fold_blsr_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blsr_u64
+ ;CHECK: blsrq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i64 %a0, 1
+ %3 = and i64 %2, %a0
+ ret i64 %3
+}
+
+;TODO stack_fold_tzcnt_u16
+
+define i32 @stack_fold_tzcnt_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_tzcnt_u32
+ ;CHECK: tzcntl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i32 @llvm.cttz.i32(i32 %a0, i1 0)
+ ret i32 %2
+}
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i64 @stack_fold_tzcnt_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_tzcnt_u64
+ ;CHECK: tzcntq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i64 @llvm.cttz.i64(i64 %a0, i1 0)
+ ret i64 %2
+}
+declare i64 @llvm.cttz.i64(i64, i1)
diff --git a/test/CodeGen/X86/stack-folding-bmi2.ll b/test/CodeGen/X86/stack-folding-bmi2.ll
new file mode 100644
index 000000000000..b70f7c668d01
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-bmi2.ll
@@ -0,0 +1,77 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define i32 @stack_fold_bzhi_u32(i32 %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_bzhi_u32
+ ;CHECK: bzhil %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a0, i32 %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
+
+define i64 @stack_fold_bzhi_u64(i64 %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_bzhi_u64
+ ;CHECK: bzhiq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a0, i64 %a1)
+ ret i64 %2
+}
+declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
+
+define i64 @stack_fold_mulx_u64(i64 %a0, i64 %a1, i64 *%a2) {
+ ;CHECK-LABEL: stack_fold_mulx_u64
+ ;CHECK: mulxq {{-?[0-9]*}}(%rsp), %rax, %rcx {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = zext i64 %a0 to i128
+ %3 = zext i64 %a1 to i128
+ %4 = mul i128 %2, %3
+ %5 = lshr i128 %4, 64
+ %6 = trunc i128 %4 to i64
+ %7 = trunc i128 %5 to i64
+ store i64 %7, i64 *%a2
+ ret i64 %6
+}
+
+define i32 @stack_fold_pdep_u32(i32 %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_pdep_u32
+ ;CHECK: pdepl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
+
+define i64 @stack_fold_pdep_u64(i64 %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_pdep_u64
+ ;CHECK: pdepq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
+ ret i64 %2
+}
+declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
+
+define i32 @stack_fold_pext_u32(i32 %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_pext_u32
+ ;CHECK: pextl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.bmi.pext.32(i32, i32)
+
+define i64 @stack_fold_pext_u64(i64 %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_pext_u64
+ ;CHECK: pextq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
+ ret i64 %2
+}
+declare i64 @llvm.x86.bmi.pext.64(i64, i64)
diff --git a/test/CodeGen/X86/stack-folding-fp-avx1.ll b/test/CodeGen/X86/stack-folding-fp-avx1.ll
index 5e939cc034d4..72542f499087 100644
--- a/test/CodeGen/X86/stack-folding-fp-avx1.ll
+++ b/test/CodeGen/X86/stack-folding-fp-avx1.ll
@@ -575,17 +575,6 @@ define i64 @stack_fold_cvtsd2si64_int(<2 x double> %a0) {
}
declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
-; TODO stack_fold_cvtsd2ss
-
-define <4 x float> @stack_fold_cvtsd2ss_int(<2 x double> %a0) {
- ;CHECK-LABEL: stack_fold_cvtsd2ss_int
- ;CHECK: vcvtsd2ss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, <2 x double> %a0)
- ret <4 x float> %2
-}
-declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
-
define double @stack_fold_cvtsi2sd(i32 %a0) {
;CHECK-LABEL: stack_fold_cvtsi2sd
;CHECK: vcvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
@@ -654,17 +643,6 @@ define <4 x float> @stack_fold_cvtsi642ss_int(i64 %a0) {
}
declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
-; TODO stack_fold_cvtss2sd
-
-define <2 x double> @stack_fold_cvtss2sd_int(<4 x float> %a0) {
- ;CHECK-LABEL: stack_fold_cvtss2sd_int
- ;CHECK: vcvtss2sd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> <double 0x0, double 0x0>, <4 x float> %a0)
- ret <2 x double> %2
-}
-declare <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double>, <4 x float>) nounwind readnone
-
; TODO stack_fold_cvtss2si
define i32 @stack_fold_cvtss2si_int(<4 x float> %a0) {
diff --git a/test/CodeGen/X86/stack-folding-fp-avx512vl.ll b/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
index c6ae85dda43a..292829a01cb3 100644
--- a/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
+++ b/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
@@ -402,6 +402,45 @@ define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) {
ret <8 x float> %6
}
+define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_shufps
+ ;CHECK: vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
+ ret <4 x float> %2
+}
+
+define <4 x float> @stack_fold_shufps_mask(<4 x float>* %passthru, <4 x float> %a0, <4 x float> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_shufps_mask
+ ;CHECK: vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = load <4 x float>, <4 x float>* %passthru
+ %6 = select <4 x i1> %4, <4 x float> %2, <4 x float> %5
+ ret <4 x float> %6
+}
+
+define <4 x float> @stack_fold_shufps_maskz(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_shufps_maskz
+ ;CHECK: vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %4, <4 x float> %2, <4 x float> zeroinitializer
+ ret <4 x float> %5
+}
+
+define <8 x float> @stack_fold_shufps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_shufps_ymm
+ ;CHECK: vshufps $148, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 13, i32 14>
+ ret <8 x float> %2
+}
+
define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_subpd
;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
diff --git a/test/CodeGen/X86/stack-folding-fp-sse42.ll b/test/CodeGen/X86/stack-folding-fp-sse42.ll
index c57782721a66..daa903bc8660 100644
--- a/test/CodeGen/X86/stack-folding-fp-sse42.ll
+++ b/test/CodeGen/X86/stack-folding-fp-sse42.ll
@@ -148,7 +148,7 @@ define <4 x float> @stack_fold_blendps(<4 x float> %a0, <4 x float> %a1) {
define <2 x double> @stack_fold_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %c) {
;CHECK-LABEL: stack_fold_blendvpd
- ;CHECK: blendvpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ ;CHECK: blendvpd %xmm0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a1, <2 x double> %c, <2 x double> %a0)
ret <2 x double> %2
@@ -157,7 +157,7 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @stack_fold_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %c) {
;CHECK-LABEL: stack_fold_blendvps
- ;CHECK: blendvps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ ;CHECK: blendvps %xmm0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a1, <4 x float> %c, <4 x float> %a0)
ret <4 x float> %2
diff --git a/test/CodeGen/X86/stack-folding-int-avx512.ll b/test/CodeGen/X86/stack-folding-int-avx512.ll
index 20572b373082..04a7d1159014 100644
--- a/test/CodeGen/X86/stack-folding-int-avx512.ll
+++ b/test/CodeGen/X86/stack-folding-int-avx512.ll
@@ -8,6 +8,329 @@ target triple = "x86_64-unknown-unknown"
; By including a nop call with sideeffects we can force a partial register spill of the
; relevant registers and check that the reload is correctly folded into the instruction.
+define <16 x i32> @stack_fold_valignd(<16 x i32> %a, <16 x i32> %b) {
+ ;CHECK-LABEL: stack_fold_valignd
+ ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+ ret <16 x i32> %2
+}
+
+define <16 x i32> @stack_fold_valignd_mask(<16 x i32> %a, <16 x i32> %b, <16 x i32>* %passthru, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_valignd_mask
+ ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = load <16 x i32>, <16 x i32>* %passthru
+ %5 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %4
+ ret <16 x i32> %5
+}
+
+define <16 x i32> @stack_fold_valignd_maskz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_valignd_maskz
+ ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ ret <16 x i32> %4
+}
+
+define <8 x i64> @stack_fold_valignq(<8 x i64> %a, <8 x i64> %b) {
+ ;CHECK-LABEL: stack_fold_valignq
+ ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @stack_fold_valignq_mask(<8 x i64> %a, <8 x i64> %b, <8 x i64>* %passthru, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_valignq_mask
+ ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = load <8 x i64>, <8 x i64>* %passthru
+ %5 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> %4
+ ret <8 x i64> %5
+}
+
+define <8 x i64> @stack_fold_valignq_maskz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_valignq_maskz
+ ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
+ ret <8 x i64> %4
+}
+
+define <64 x i8> @stack_fold_pavgb(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgb
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> undef, i64 -1)
+ ret <64 x i8> %2
+}
+declare <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
+
+define <64 x i8> @stack_fold_pavgb_mask(<64 x i8>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_pavgb_mask
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = load <64 x i8>, <64 x i8>* %passthru
+ %3 = call <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> %2, i64 %mask)
+ ret <64 x i8> %3
+}
+
+define <64 x i8> @stack_fold_pavgb_maskz(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_pavgb_maskz
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> zeroinitializer, i64 %mask)
+ ret <64 x i8> %2
+}
+
+define <32 x i16> @stack_fold_pavgw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgw
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) nounwind readnone
+
+define <32 x i16> @stack_fold_pavgw_mask(<32 x i16>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_pavgw_mask
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = load <32 x i16>, <32 x i16>* %passthru
+ %3 = call <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> %2, i32 %mask)
+ ret <32 x i16> %3
+}
+
+define <32 x i16> @stack_fold_pavgw_maskz(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_pavgw_maskz
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %2
+}
+
+define <4 x i32> @stack_fold_extracti32x4(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti32x4
+ ;CHECK: vextracti32x4 $3, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <16 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %2 = shufflevector <16 x i32> %1, <16 x i32> %a1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_extracti64x2(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti64x2
+ ;CHECK: vextracti64x2 $3, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <8 x i64> %a0, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %2 = shufflevector <8 x i64> %1, <8 x i64> %a1, <2 x i32> <i32 6, i32 7>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <2 x i64> %2
+}
+
+define <8 x i32> @stack_fold_extracti32x8(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti32x8
+ ;CHECK: vextracti32x8 $1, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <16 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %2 = shufflevector <16 x i32> %1, <16 x i32> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @stack_fold_extracti64x4(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti64x4
+ ;CHECK: vextracti64x4 $1, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <8 x i64> %a0, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %2 = shufflevector <8 x i64> %1, <8 x i64> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <4 x i64> %2
+}
+
+define <16 x i32> @stack_fold_inserti32x8(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_inserti32x8
+ ;CHECK: vinserti32x8 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ; add forces execution domain
+ %3 = add <16 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <16 x i32> %3
+}
+
+define <8 x i64> @stack_fold_inserti64x4(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_inserti64x4
+ ;CHECK: vinserti64x4 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ; add forces execution domain
+ %3 = add <8 x i64> %2, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ ret <8 x i64> %3
+}
+
+define <64 x i8> @stack_fold_pabsb(<64 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsb
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8> %a0, <64 x i8> undef, i64 -1)
+ ret <64 x i8> %2
+}
+declare <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8>, <64 x i8>, i64) nounwind readnone
+
+define <64 x i8> @stack_fold_pabsb_mask(<64 x i8> %passthru, <64 x i8> %a0, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_pabsb_mask
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask)
+ ret <64 x i8> %2
+}
+
+define <64 x i8> @stack_fold_pabsb_maskz(<64 x i8> %a0, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_pabsb_maskz
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.pabs.b.512(<64 x i8> %a0, <64 x i8> zeroinitializer, i64 %mask)
+ ret <64 x i8> %2
+}
+
+define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) {
+ ;check-label: stack_fold_pabsd
+ ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte folded reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> undef, i16 -1)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <16 x i32> @stack_fold_pabsd_mask(<16 x i32> %passthru, <16 x i32> %a0, i16 %mask) {
+ ;check-label: stack_fold_pabsd
+ ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte folded reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask)
+ ret <16 x i32> %2
+}
+
+define <16 x i32> @stack_fold_pabsd_maskz(<16 x i32> %a0, i16 %mask) {
+ ;check-label: stack_fold_pabsd
+ ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte folded reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %2
+}
+
+define <8 x i64> @stack_fold_pabsq(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsq
+ ;CHECK: vpabsq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %a0, <8 x i64> undef, i8 -1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8) nounwind readnone
+
+define <8 x i64> @stack_fold_pabsq_mask(<8 x i64> %passthru, <8 x i64> %a0, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pabsq_mask
+ ;CHECK: vpabsq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask)
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @stack_fold_pabsq_maskz(<8 x i64> %a0, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pabsq_maskz
+ ;CHECK: vpabsq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %a0, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %2
+}
+
+define <32 x i16> @stack_fold_pabsw(<32 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsw
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16> %a0, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16>, <32 x i16>, i32) nounwind readnone
+
+define <32 x i16> @stack_fold_pabsw_mask(<32 x i16> %passthru, <32 x i16> %a0, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_pabsw_mask
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask)
+ ret <32 x i16> %2
+}
+
+define <32 x i16> @stack_fold_pabsw_maskz(<32 x i16> %a0, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_pabsw_maskz
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pabs.w.512(<32 x i16> %a0, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %2
+}
+
+define <32 x i16> @stack_fold_packssdw(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packssdw
+ ;CHECK: vpackssdw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32> %a0, <16 x i32> %a1, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32>, <16 x i32>, <32 x i16>, i32) nounwind readnone
+
+define <64 x i8> @stack_fold_packsswb(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packsswb
+ ;CHECK: vpacksswb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16> %a0, <32 x i16> %a1, <64 x i8> undef, i64 -1)
+ ret <64 x i8> %2
+}
+declare <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16>, <32 x i16>, <64 x i8>, i64) nounwind readnone
+
+define <32 x i16> @stack_fold_packusdw(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packusdw
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a0, <16 x i32> %a1, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32>, <16 x i32>, <32 x i16>, i32) nounwind readnone
+
+define <32 x i16> @stack_fold_packusdw_mask(<32 x i16>* %passthru, <16 x i32> %a0, <16 x i32> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_packusdw_mask
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = load <32 x i16>, <32 x i16>* %passthru
+ %3 = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a0, <16 x i32> %a1, <32 x i16> %2, i32 %mask)
+ ret <32 x i16> %3
+}
+
+define <32 x i16> @stack_fold_packusdw_maskz(<16 x i32> %a0, <16 x i32> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_packusdw_maskz
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32> %a0, <16 x i32> %a1, <32 x i16> zeroinitializer, i32 %mask)
+ ret <32 x i16> %2
+}
+
+define <64 x i8> @stack_fold_packuswb(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packuswb
+ ;CHECK: vpackuswb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16> %a0, <32 x i16> %a1, <64 x i8> undef, i64 -1)
+ ret <64 x i8> %2
+}
+declare <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16>, <32 x i16>, <64 x i8>, i64) nounwind readnone
+
define <64 x i8> @stack_fold_paddb(<64 x i8> %a0, <64 x i8> %a1) {
;CHECK-LABEL: stack_fold_paddb
;CHECK: vpaddb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
@@ -98,6 +421,35 @@ define <32 x i16> @stack_fold_paddw(<32 x i16> %a0, <32 x i16> %a1) {
ret <32 x i16> %2
}
+define <64 x i8> @stack_fold_palignr(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_palignr
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a1, <64 x i8> %a0, <64 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112>
+ ret <64 x i8> %2
+}
+
+define <64 x i8> @stack_fold_palignr_mask(<64 x i8> %a0, <64 x i8> %a1, <64 x i8>* %passthru, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_palignr_mask
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a1, <64 x i8> %a0, <64 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112>
+ %3 = bitcast i64 %mask to <64 x i1>
+ %4 = load <64 x i8>, <64 x i8>* %passthru
+ %5 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> %4
+ ret <64 x i8> %5
+}
+
+define <64 x i8> @stack_fold_palignr_maskz(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_palignr_maskz
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a1, <64 x i8> %a0, <64 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112>
+ %3 = bitcast i64 %mask to <64 x i1>
+ %4 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> zeroinitializer
+ ret <64 x i8> %4
+}
+
define i64 @stack_fold_pcmpeqb(<64 x i8> %a0, <64 x i8> %a1) {
;CHECK-LABEL: stack_fold_pcmpeqb
;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 64-byte Folded Reload
@@ -134,92 +486,321 @@ define i32 @stack_fold_pcmpeqw(<32 x i16> %a0, <32 x i16> %a1) {
ret i32 %3
}
-define <64 x i8> @stack_fold_psubb(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubb
- ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <64 x i8> @stack_fold_permbvar(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_permbvar
+ ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <64 x i8> %a0, %a1
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a1, <64 x i8> %a0, <64 x i8> undef, i64 -1)
ret <64 x i8> %2
}
+declare <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readonly
-define <16 x i32> @stack_fold_psubd(<16 x i32> %a0, <16 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_psubd
- ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <64 x i8> @stack_fold_permbvar_mask(<64 x i8>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_permbvar_mask
+ ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <16 x i32> %a0, %a1
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a1, <64 x i8> %a0, <64 x i8> undef, i64 -1)
+ %3 = bitcast i64 %mask to <64 x i1>
+ ; load needed to keep the operation from being scheduled above the asm block
+ %4 = load <64 x i8>, <64 x i8>* %passthru
+ %5 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> %4
+ ret <64 x i8> %5
+}
+
+define <64 x i8> @stack_fold_permbvar_maskz(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_permbvar_maskz
+ ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a1, <64 x i8> %a0, <64 x i8> undef, i64 -1)
+ %3 = bitcast i64 %mask to <64 x i1>
+ %4 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> zeroinitializer
+ ret <64 x i8> %4
+}
+
+define <16 x i32> @stack_fold_permd(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_permd
+ ;CHECK: vpermd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a1, <16 x i32> %a0, <16 x i32> undef, i16 -1)
ret <16 x i32> %2
}
+declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readonly
-define <8 x i64> @stack_fold_psubq(<8 x i64> %a0, <8 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_psubq
- ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <64 x i8> @stack_fold_vpermi2b(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2b
+ ;CHECK: vpermi2b {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <64 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1)
+ ret <64 x i8> %res
+}
+declare <64 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
+
+define <16 x i32> @stack_fold_vpermi2d(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2d
+ ;CHECK: vpermi2d {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
+ ret <16 x i32> %res
+}
+declare <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <8 x i64> @stack_fold_vpermi2q(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2q
+ ;CHECK: vpermi2q {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i64> @llvm.x86.avx512.mask.vpermt2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ ret <8 x i64> %res
+}
+declare <8 x i64> @llvm.x86.avx512.mask.vpermt2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <32 x i16> @stack_fold_vpermi2w(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2w
+ ;CHECK: vpermi2w {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
+ ret <32 x i16> %res
+}
+declare <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <8 x i64> @stack_fold_permq(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_permq
+ ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
+ ; add forces execution domain
+ %3 = add <8 x i64> %2, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @stack_fold_permq_mask(<8 x i64>* %passthru, <8 x i64> %a0, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_permq_mask
+ ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
+ %3 = bitcast i8 %mask to <8 x i1>
+ ; load needed to keep the operation from being scheduled above the asm block
+ %4 = load <8 x i64>, <8 x i64>* %passthru
+ %5 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> %4
+ ; add forces execution domain
+ %6 = add <8 x i64> %5, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ ret <8 x i64> %6
+}
+
+define <8 x i64> @stack_fold_permq_maskz(<8 x i64>* %passthru, <8 x i64> %a0, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_permq_maskz
+ ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @stack_fold_permqvar(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_permqvar
+ ;CHECK: vpermq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <8 x i64> %a0, %a1
- ret <8 x i64> %2
+ %2 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a1, <8 x i64> %a0, <8 x i64> undef, i8 -1)
+ ; add forces execution domain
+ %3 = add <8 x i64> %2, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ ret <8 x i64> %3
}
+declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readonly
-define <64 x i8> @stack_fold_psubsb(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubsb
- ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_permqvar_mask(<8 x i64>* %passthru, <8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_permqvar_mask
+ ;CHECK: vpermq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> undef, i64 -1)
- ret <64 x i8> %2
+ %2 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a1, <8 x i64> %a0, <8 x i64> undef, i8 -1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ ; load needed to keep the operation from being scheduled above the asm block
+ %4 = load <8 x i64>, <8 x i64>* %passthru
+ %5 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> %4
+ ; add forces execution domain
+ %6 = add <8 x i64> %5, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ ret <8 x i64> %6
}
-declare <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
-define <32 x i16> @stack_fold_psubsw(<32 x i16> %a0, <32 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubsw
- ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <64 x i8> @stack_fold_vpermt2b(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2b
+ ;CHECK: vpermt2b {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1)
+ ret <64 x i8> %res
+}
+declare <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
+
+define <16 x i32> @stack_fold_vpermt2d(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2d
+ ;CHECK: vpermt2d {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
+ ret <16 x i32> %res
+}
+declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <8 x i64> @stack_fold_vpermt2q(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2q
+ ;CHECK: vpermt2q {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ ret <8 x i64> %res
+}
+declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <32 x i16> @stack_fold_vpermt2w(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2w
+ ;CHECK: vpermt2w {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
+ ret <32 x i16> %res
+}
+declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16> @stack_fold_permwvar(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_permwvar
+ ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> undef, i32 -1)
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a1, <32 x i16> %a0, <32 x i16> undef, i32 -1)
ret <32 x i16> %2
}
-declare <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) nounwind readonly
-define <64 x i8> @stack_fold_psubusb(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubusb
- ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <32 x i16> @stack_fold_permwvar_mask(<32 x i16>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_permwvar_mask
+ ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> undef, i64 -1)
- ret <64 x i8> %2
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a1, <32 x i16> %a0, <32 x i16> undef, i32 -1)
+ %3 = bitcast i32 %mask to <32 x i1>
+ ; load needed to keep the operation from being scheduled above the asm block
+ %4 = load <32 x i16>, <32 x i16>* %passthru
+ %5 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> %4
+ ret <32 x i16> %5
}
-declare <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
-define <32 x i16> @stack_fold_psubusw(<32 x i16> %a0, <32 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubusw
- ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <32 x i16> @stack_fold_permwvar_maskz(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_permwvar_maskz
+ ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> undef, i32 -1)
- ret <32 x i16> %2
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a1, <32 x i16> %a0, <32 x i16> undef, i32 -1)
+ %3 = bitcast i32 %mask to <32 x i1>
+ %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
+ ret <32 x i16> %4
}
-declare <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-define <32 x i16> @stack_fold_psubw(<32 x i16> %a0, <32 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubw
- ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define i32 @stack_fold_pextrd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pextrd
+ ;CHECK: vpextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
+ ; add forces execution domain
+ %1 = add <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4>
+ %2 = extractelement <4 x i32> %1, i32 1
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %2
+}
+
+define i64 @stack_fold_pextrq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pextrq
+ ;CHECK: vpextrq $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
+ ;CHECK: movq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Reload
+ %1 = extractelement <2 x i64> %a0, i32 1
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i64 %1
+}
+
+define <16 x i8> @stack_fold_pinsrb(<16 x i8> %a0, i8 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrb
+ ;CHECK: vpinsrb $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <16 x i8> %a0, i8 %a1, i32 1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_pinsrd(<4 x i32> %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrd
+ ;CHECK: vpinsrd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <4 x i32> %a0, i32 %a1, i32 1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_pinsrq(<2 x i64> %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrq
+ ;CHECK: vpinsrq $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <2 x i64> %a0, i64 %a1, i32 1
+ ret <2 x i64> %2
+}
+
+define <8 x i16> @stack_fold_pinsrw(<8 x i16> %a0, i16 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrw
+ ;CHECK: vpinsrw $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <8 x i16> %a0, i16 %a1, i32 1
+ ret <8 x i16> %2
+}
+
+define <32 x i16> @stack_fold_pmaddubsw_zmm(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_zmm
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <32 x i16> %a0, %a1
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8> %a0, <64 x i8> %a1, <32 x i16> undef, i32 -1)
ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8>, <64 x i8>, <32 x i16>, i32) nounwind readnone
-define <16 x i32> @stack_fold_ternlogd(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_ternlogd
- ;CHECK: vpternlogd $33, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
- ret <16 x i32> %res
+define <32 x i16> @stack_fold_pmaddubsw_zmm_mask(<32 x i16>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_zmm_mask
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8> %a0, <64 x i8> %a1, <32 x i16> undef, i32 -1)
+ %3 = bitcast i32 %mask to <32 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %4 = load <32 x i16>, <32 x i16>* %passthru
+ %5 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> %4
+ ret <32 x i16> %5
}
-declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32, i16)
-define <8 x i64> @stack_fold_ternlogq(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_ternlogq
- ;CHECK: vpternlogq $33, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33, i8 -1)
- ret <8 x i64> %res
+define <32 x i16> @stack_fold_pmaddubsw_zmm_maskz(<64 x i8> %a0, <64 x i8> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_zmm_maskz
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8> %a0, <64 x i8> %a1, <32 x i16> undef, i32 -1)
+ %3 = bitcast i32 %mask to <32 x i1>
+ %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
+ ret <32 x i16> %4
}
-declare <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i32, i8)
+define <16 x i32> @stack_fold_pmaddwd_zmm(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_zmm
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> undef, i16 -1)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16>, <32 x i16>, <16 x i32>, i16) nounwind readnone
+
+define <16 x i32> @stack_fold_pmaddwd_zmm_mask(<16 x i32>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_zmm_mask
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> undef, i16 -1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %4 = load <16 x i32>, <16 x i32>* %passthru
+ %5 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %4
+ ret <16 x i32> %5
+}
+
+define <16 x i32> @stack_fold_pmaddwd_zmm_maskz(<16 x i32>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_zmm_maskz
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> undef, i16 -1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ ret <16 x i32> %4
+}
define <16 x i8> @stack_fold_vpmovdb(<16 x i32> %a0) {
;CHECK-LABEL: stack_fold_vpmovdb
@@ -239,6 +820,16 @@ define <16 x i16> @stack_fold_vpmovdw(<16 x i32> %a0) {
}
declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16)
+define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_movq_load
+ ;CHECK: vmovq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ ; add forces execution domain
+ %3 = add <2 x i64> %2, <i64 1, i64 1>
+ ret <2 x i64> %3
+}
+
define <8 x i32> @stack_fold_vpmovqd(<8 x i64> %a0) {
;CHECK-LABEL: stack_fold_vpmovqd
;CHECK: vpmovqd %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
@@ -311,193 +902,6 @@ define <32 x i8> @stack_fold_vpmovswb(<32 x i16> %a0) {
}
declare <32 x i8> @llvm.x86.avx512.mask.pmovs.wb.512(<32 x i16>, <32 x i8>, i32)
-define <16 x i8> @stack_fold_vpmovusdb(<16 x i32> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovusdb
- ;CHECK: vpmovusdb %zmm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
- %1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %a0, <16 x i8> undef, i16 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <16 x i8> %1
-}
-declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16)
-
-define <16 x i16> @stack_fold_vpmovusdw(<16 x i32> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovusdw
- ;CHECK: vpmovusdw %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
- %1 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %a0, <16 x i16> undef, i16 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <16 x i16> %1
-}
-declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i16)
-
-define <8 x i32> @stack_fold_vpmovusqd(<8 x i64> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovusqd
- ;CHECK: vpmovusqd %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
- %1 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %a0, <8 x i32> undef, i8 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <8 x i32> %1
-}
-declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8)
-
-define <8 x i16> @stack_fold_vpmovusqw(<8 x i64> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovusqw
- ;CHECK: vpmovusqw %zmm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
- %1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %a0, <8 x i16> undef, i8 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <8 x i16> %1
-}
-declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8)
-
-define <32 x i8> @stack_fold_vpmovuswb(<32 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovuswb
- ;CHECK: vpmovuswb %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
- %1 = call <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16> %a0, <32 x i8> undef, i32 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <32 x i8> %1
-}
-declare <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16>, <32 x i8>, i32)
-
-define <4 x i32> @stack_fold_extracti32x4(<16 x i32> %a0, <16 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_extracti32x4
- ;CHECK: vextracti32x4 $3, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
- ; add forces execution domain
- %1 = add <16 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
- %2 = shufflevector <16 x i32> %1, <16 x i32> %a1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
- %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <4 x i32> %2
-}
-
-define <2 x i64> @stack_fold_extracti64x2(<8 x i64> %a0, <8 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_extracti64x2
- ;CHECK: vextracti64x2 $3, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
- ; add forces execution domain
- %1 = add <8 x i64> %a0, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- %2 = shufflevector <8 x i64> %1, <8 x i64> %a1, <2 x i32> <i32 6, i32 7>
- %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <2 x i64> %2
-}
-
-define <8 x i32> @stack_fold_extracti32x8(<16 x i32> %a0, <16 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_extracti32x8
- ;CHECK: vextracti32x8 $1, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
- ; add forces execution domain
- %1 = add <16 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
- %2 = shufflevector <16 x i32> %1, <16 x i32> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <8 x i32> %2
-}
-
-define <4 x i64> @stack_fold_extracti64x4(<8 x i64> %a0, <8 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_extracti64x4
- ;CHECK: vextracti64x4 $1, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
- ; add forces execution domain
- %1 = add <8 x i64> %a0, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- %2 = shufflevector <8 x i64> %1, <8 x i64> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <4 x i64> %2
-}
-
-define <16 x i32> @stack_fold_inserti32x8(<8 x i32> %a0, <8 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_inserti32x8
- ;CHECK: vinserti32x8 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ; add forces execution domain
- %3 = add <16 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
- ret <16 x i32> %3
-}
-
-define <8 x i64> @stack_fold_inserti64x4(<4 x i64> %a0, <4 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_inserti64x4
- ;CHECK: vinserti64x4 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ; add forces execution domain
- %3 = add <8 x i64> %2, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- ret <8 x i64> %3
-}
-
-define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) {
- ;CHECK-LABEL: stack_fold_movq_load
- ;CHECK: movq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
- ; add forces execution domain
- %3 = add <2 x i64> %2, <i64 1, i64 1>
- ret <2 x i64> %3
-}
-
-define <16 x i32> @stack_fold_vpermt2d(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2d
- ;CHECK: vpermt2d {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
- ret <16 x i32> %res
-}
-declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-
-define <16 x i32> @stack_fold_vpermi2d(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2d
- ;CHECK: vpermi2d {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
- ret <16 x i32> %res
-}
-declare <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
-
-define <8 x i64> @stack_fold_vpermt2q(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2q
- ;CHECK: vpermt2q {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
- ret <8 x i64> %res
-}
-declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-
-define <8 x i64> @stack_fold_vpermi2q(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2q
- ;CHECK: vpermi2q {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i64> @llvm.x86.avx512.mask.vpermt2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
- ret <8 x i64> %res
-}
-declare <8 x i64> @llvm.x86.avx512.mask.vpermt2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
-
-define <32 x i16> @stack_fold_vpermt2w(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2w
- ;CHECK: vpermt2w {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
- ret <32 x i16> %res
-}
-declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-
-define <32 x i16> @stack_fold_vpermi2w(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2w
- ;CHECK: vpermi2w {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1)
- ret <32 x i16> %res
-}
-declare <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
-
-define <64 x i8> @stack_fold_vpermt2b(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2b
- ;CHECK: vpermt2b {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1)
- ret <64 x i8> %res
-}
-declare <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
-
-define <64 x i8> @stack_fold_vpermi2b(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2b
- ;CHECK: vpermi2b {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <64 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1)
- ret <64 x i8> %res
-}
-declare <64 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64)
-
define <16 x i32> @stack_fold_pmovsxbd_zmm(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovsxbd_zmm
;CHECK: vpmovsxbd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -567,6 +971,51 @@ define <8 x i64> @stack_fold_pmovsxwq_maskz_zmm(<8 x i16> %a0, i8 %mask) {
ret <8 x i64> %4
}
+define <16 x i8> @stack_fold_vpmovusdb(<16 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovusdb
+ ;CHECK: vpmovusdb %zmm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
+ %1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %a0, <16 x i8> undef, i16 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <16 x i8> %1
+}
+declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16)
+
+define <16 x i16> @stack_fold_vpmovusdw(<16 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovusdw
+ ;CHECK: vpmovusdw %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
+ %1 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %a0, <16 x i16> undef, i16 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <16 x i16> %1
+}
+declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i16)
+
+define <8 x i32> @stack_fold_vpmovusqd(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovusqd
+ ;CHECK: vpmovusqd %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
+ %1 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %a0, <8 x i32> undef, i8 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <8 x i32> %1
+}
+declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8)
+
+define <8 x i16> @stack_fold_vpmovusqw(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovusqw
+ ;CHECK: vpmovusqw %zmm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
+ %1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %a0, <8 x i16> undef, i8 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <8 x i16> %1
+}
+declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8)
+
+define <32 x i8> @stack_fold_vpmovuswb(<32 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovuswb
+ ;CHECK: vpmovuswb %zmm0, {{-?[0-9]*}}(%rsp) # 32-byte Folded Spill
+ %1 = call <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16> %a0, <32 x i8> undef, i32 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <32 x i8> %1
+}
+declare <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16>, <32 x i8>, i32)
+
define <16 x i32> @stack_fold_pmovzxbd_zmm(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovzxbd_zmm
;CHECK: vpmovzxbd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -636,35 +1085,14 @@ define <8 x i64> @stack_fold_pmovzxwq_maskz_zmm(<8 x i16> %a0, i8 %mask) {
ret <8 x i64> %4
}
-define <64 x i8> @stack_fold_punpckhbw_zmm(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_punpckhbw_zmm
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <64 x i8> %a0, <64 x i8> %a1, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
- ret <64 x i8> %2
-}
-
-define <64 x i8> @stack_fold_punpckhbw_mask_zmm(<64 x i8>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
- ;CHECK-LABEL: stack_fold_punpckhbw_mask_zmm
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <64 x i8> %a0, <64 x i8> %a1, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
- %3 = bitcast i64 %mask to <64 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <64 x i8>, <64 x i8>* %passthru
- %5 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> %4
- ret <64 x i8> %5
-}
-
-define <64 x i8> @stack_fold_punpckhbw_maskz_zmm(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
- ;CHECK-LABEL: stack_fold_punpckhbw_maskz_zmm
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <64 x i8> %a0, <64 x i8> %a1, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
- %3 = bitcast i64 %mask to <64 x i1>
- %4 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> zeroinitializer
- ret <64 x i8> %4
+define <8 x i64> @stack_fold_psadbw(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psadbw
+ ;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.psad.bw.512(<64 x i8> %a0, <64 x i8> %a1)
+ ret <8 x i64> %2
}
+declare <8 x i64> @llvm.x86.avx512.psad.bw.512(<64 x i8>, <64 x i8>) nounwind readnone
define <64 x i8> @stack_fold_pshufb_zmm(<64 x i8> %a0, <64 x i8> %a1) {
;CHECK-LABEL: stack_fold_pshufb_zmm
@@ -776,280 +1204,440 @@ define <32 x i16> @stack_fold_pshuflw_zmm_maskz(<32 x i16> %a0, i32 %mask) {
ret <32 x i16> %4
}
-define <32 x i16> @stack_fold_pmaddubsw_zmm(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_zmm
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_pslld(<16 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pslld
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8> %a0, <64 x i8> %a1, <32 x i16> undef, i32 -1)
- ret <32 x i16> %2
+ %2 = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1)
+ ret <16 x i32> %2
}
-declare <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8>, <64 x i8>, <32 x i16>, i32) nounwind readnone
+declare <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32>, <4 x i32>) nounwind readnone
-define <32 x i16> @stack_fold_pmaddubsw_zmm_mask(<32 x i16>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_zmm_mask
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_pslld_mask(<16 x i32>* %passthru, <16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pslld_mask
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8> %a0, <64 x i8> %a1, <32 x i16> undef, i32 -1)
- %3 = bitcast i32 %mask to <32 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <32 x i16>, <32 x i16>* %passthru
- %5 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> %4
- ret <32 x i16> %5
+ %2 = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = load <16 x i32>, <16 x i32>* %passthru
+ %5 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %4
+ ret <16 x i32> %5
}
-define <32 x i16> @stack_fold_pmaddubsw_zmm_maskz(<64 x i8> %a0, <64 x i8> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_zmm_maskz
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_pslld_maskz(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pslld_maskz
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8> %a0, <64 x i8> %a1, <32 x i16> undef, i32 -1)
- %3 = bitcast i32 %mask to <32 x i1>
- %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
- ret <32 x i16> %4
+ %2 = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ ret <16 x i32> %4
}
-define <16 x i32> @stack_fold_pmaddwd_zmm(<32 x i16> %a0, <32 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_pmaddwd_zmm
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_pslldi(<16 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pslldi
+ ;CHECK: vpslld $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 1)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32>, i32) nounwind readnone
+
+define <16 x i32> @stack_fold_pslldi_mask(<16 x i32>* %passthru, <16 x i32> %a0, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pslldi_mask
+ ;CHECK: vpslld $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = load <16 x i32>, <16 x i32>* %passthru
+ %5 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %4
+ ret <16 x i32> %5
+}
+
+define <16 x i32> @stack_fold_pslldi_maskz(<16 x i32> %a0, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pslldi_maskz
+ ;CHECK: vpslld $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ ret <16 x i32> %4
+}
+
+define <64 x i8> @stack_fold_pslldq(<64 x i8> %a, <64 x i8> %b) {
+ ;CHECK-LABEL: stack_fold_pslldq
+ ;CHECK: vpslldq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a, <64 x i8> zeroinitializer, <64 x i32> <i32 79, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 95, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 111, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 127, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62>
+ ret <64 x i8> %2
+}
+
+define <8 x i64> @stack_fold_psllq(<8 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllq
+ ;CHECK: vpsllq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> undef, i16 -1)
+ %2 = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i64> @stack_fold_psllqi(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_psllqi
+ ;CHECK: vpsllq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64>, i32) nounwind readnone
+
+define <16 x i32> @stack_fold_psllvd(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvd
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
ret <16 x i32> %2
}
-declare <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16>, <32 x i16>, <16 x i32>, i16) nounwind readnone
+declare <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32>, <16 x i32>) nounwind readnone
-define <16 x i32> @stack_fold_pmaddwd_zmm_mask(<16 x i32>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i16 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddwd_zmm_mask
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_psllvd_mask(<16 x i32>* %passthru, <16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_psllvd_mask
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> undef, i16 -1)
+ %2 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
%3 = bitcast i16 %mask to <16 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
%4 = load <16 x i32>, <16 x i32>* %passthru
%5 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %4
ret <16 x i32> %5
}
-define <16 x i32> @stack_fold_pmaddwd_zmm_maskz(<16 x i32>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i16 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddwd_zmm_maskz
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_psllvd_maskz(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_psllvd_maskz
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16> %a0, <32 x i16> %a1, <16 x i32> undef, i16 -1)
+ %2 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
%3 = bitcast i16 %mask to <16 x i1>
%4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
ret <16 x i32> %4
}
-define <16 x i32> @stack_fold_permd(<16 x i32> %a0, <16 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_permd
- ;CHECK: vpermd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_psllvq(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvq
+ ;CHECK: vpsllvq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64>, <8 x i64>) nounwind readnone
+
+define <32 x i16> @stack_fold_psllvw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvw
+ ;CHECK: vpsllvw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a1, <16 x i32> %a0, <16 x i32> undef, i16 -1)
- ret <16 x i32> %2
+ %2 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> %a0, <32 x i16> %a1)
+ ret <32 x i16> %2
}
-declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readonly
+declare <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16>, <32 x i16>) nounwind readnone
-define <8 x i64> @stack_fold_permq(<8 x i64> %a0) {
- ;CHECK-LABEL: stack_fold_permq
- ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
- ; add forces execution domain
- %3 = add <8 x i64> %2, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- ret <8 x i64> %3
+define <32 x i16> @stack_fold_psllw(<32 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllw
+ ;CHECK: vpsllw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.psll.w.512(<32 x i16> %a0, <8 x i16> %a1)
+ ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.psll.w.512(<32 x i16>, <8 x i16>) nounwind readnone
-define <8 x i64> @stack_fold_permq_mask(<8 x i64>* %passthru, <8 x i64> %a0, i8 %mask) {
- ;CHECK-LABEL: stack_fold_permq_mask
- ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+define <32 x i16> @stack_fold_psllwi(<32 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_psllwi
+ ;CHECK: vpsllw $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
- %3 = bitcast i8 %mask to <8 x i1>
- ; load needed to keep the operation from being scheduled above the asm block
- %4 = load <8 x i64>, <8 x i64>* %passthru
- %5 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> %4
- ; add forces execution domain
- %6 = add <8 x i64> %5, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- ret <8 x i64> %6
+ %2 = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> %a0, i32 1)
+ ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16>, i32) nounwind readnone
-define <8 x i64> @stack_fold_permq_maskz(<8 x i64>* %passthru, <8 x i64> %a0, i8 %mask) {
- ;CHECK-LABEL: stack_fold_permq_maskz
- ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_psrad(<16 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrad
+ ;CHECK: vpsrad {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i32> @stack_fold_psradi(<16 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_psradi
+ ;CHECK: vpsrad $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
- ret <8 x i64> %4
+ %2 = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 1)
+ ret <16 x i32> %2
}
+declare <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32>, i32) nounwind readnone
-define <8 x i64> @stack_fold_permqvar(<8 x i64> %a0, <8 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_permqvar
- ;CHECK: vpermq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_psraq(<8 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psraq
+ ;CHECK: vpsraq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a1, <8 x i64> %a0, <8 x i64> undef, i8 -1)
- ; add forces execution domain
- %3 = add <8 x i64> %2, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- ret <8 x i64> %3
+ %2 = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1)
+ ret <8 x i64> %2
}
-declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readonly
+declare <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64>, <2 x i64>) nounwind readnone
-define <8 x i64> @stack_fold_permqvar_mask(<8 x i64>* %passthru, <8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_permqvar_mask
- ;CHECK: vpermq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_psraqi(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_psraqi
+ ;CHECK: vpsraq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64>, i32) nounwind readnone
+
+define <16 x i32> @stack_fold_psravd(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psravd
+ ;CHECK: vpsravd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a1, <8 x i64> %a0, <8 x i64> undef, i8 -1)
- %3 = bitcast i8 %mask to <8 x i1>
- ; load needed to keep the operation from being scheduled above the asm block
- %4 = load <8 x i64>, <8 x i64>* %passthru
- %5 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> %4
- ; add forces execution domain
- %6 = add <8 x i64> %5, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
- ret <8 x i64> %6
+ %2 = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
+ ret <16 x i32> %2
}
+declare <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32>, <16 x i32>) nounwind readnone
-define <32 x i16> @stack_fold_permwvar(<32 x i16> %a0, <32 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_permwvar
- ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_psravq(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psravq
+ ;CHECK: vpsravq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64>, <8 x i64>) nounwind readnone
+
+define <32 x i16> @stack_fold_psravw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psravw
+ ;CHECK: vpsravw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a1, <32 x i16> %a0, <32 x i16> undef, i32 -1)
+ %2 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> %a0, <32 x i16> %a1)
ret <32 x i16> %2
}
-declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) nounwind readonly
+declare <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16>, <32 x i16>) nounwind readnone
-define <32 x i16> @stack_fold_permwvar_mask(<32 x i16>* %passthru, <32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_permwvar_mask
- ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+define <32 x i16> @stack_fold_psraw(<32 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psraw
+ ;CHECK: vpsraw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a1, <32 x i16> %a0, <32 x i16> undef, i32 -1)
- %3 = bitcast i32 %mask to <32 x i1>
- ; load needed to keep the operation from being scheduled above the asm block
- %4 = load <32 x i16>, <32 x i16>* %passthru
- %5 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> %4
- ret <32 x i16> %5
+ %2 = call <32 x i16> @llvm.x86.avx512.psra.w.512(<32 x i16> %a0, <8 x i16> %a1)
+ ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.psra.w.512(<32 x i16>, <8 x i16>) nounwind readnone
-define <32 x i16> @stack_fold_permwvar_maskz(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_permwvar_maskz
- ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a1, <32 x i16> %a0, <32 x i16> undef, i32 -1)
- %3 = bitcast i32 %mask to <32 x i1>
- %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
- ret <32 x i16> %4
+define <32 x i16> @stack_fold_psrawi(<32 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_psrawi
+ ;CHECK: vpsraw $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> %a0, i32 1)
+ ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16>, i32) nounwind readnone
-define <64 x i8> @stack_fold_permbvar(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_permbvar
- ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <16 x i32> @stack_fold_psrld(<16 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrld
+ ;CHECK: vpsrld {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a1, <64 x i8> %a0, <64 x i8> undef, i64 -1)
+ %2 = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i32> @stack_fold_psrldi(<16 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_psrldi
+ ;CHECK: vpsrld $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 1)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32>, i32) nounwind readnone
+
+define <64 x i8> @stack_fold_psrldq(<64 x i8> %a, <64 x i8> %b) {
+ ;CHECK-LABEL: stack_fold_psrldq
+ ;CHECK: vpsrldq $2, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a, <64 x i8> zeroinitializer, <64 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 64, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 64, i32 64, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 64, i32 64, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 64>
ret <64 x i8> %2
}
-declare <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readonly
-define <64 x i8> @stack_fold_permbvar_mask(<64 x i8>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
- ;CHECK-LABEL: stack_fold_permbvar_mask
- ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_psrlq(<8 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlq
+ ;CHECK: vpsrlq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a1, <64 x i8> %a0, <64 x i8> undef, i64 -1)
- %3 = bitcast i64 %mask to <64 x i1>
- ; load needed to keep the operation from being scheduled above the asm block
- %4 = load <64 x i8>, <64 x i8>* %passthru
- %5 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> %4
- ret <64 x i8> %5
+ %2 = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1)
+ ret <8 x i64> %2
}
+declare <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64>, <2 x i64>) nounwind readnone
-define <64 x i8> @stack_fold_permbvar_maskz(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
- ;CHECK-LABEL: stack_fold_permbvar_maskz
- ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+define <8 x i64> @stack_fold_psrlqi(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_psrlqi
+ ;CHECK: vpsrlq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 1)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64>, i32) nounwind readnone
+
+define <16 x i32> @stack_fold_psrlvd(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvd
+ ;CHECK: vpsrlvd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a1, <64 x i8> %a0, <64 x i8> undef, i64 -1)
- %3 = bitcast i64 %mask to <64 x i1>
- %4 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> zeroinitializer
- ret <64 x i8> %4
+ %2 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
+ ret <16 x i32> %2
}
+declare <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32>, <16 x i32>) nounwind readnone
-define <8 x i64> @stack_fold_valignq(<8 x i64> %a, <8 x i64> %b) {
- ;CHECK-LABEL: stack_fold_valignq
- ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+define <8 x i64> @stack_fold_psrlvq(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvq
+ ;CHECK: vpsrlvq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <8 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
ret <8 x i64> %2
}
+declare <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64>, <8 x i64>) nounwind readnone
-define <8 x i64> @stack_fold_valignq_mask(<8 x i64> %a, <8 x i64> %b, <8 x i64>* %passthru, i8 %mask) {
- ;CHECK-LABEL: stack_fold_valignq_mask
- ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = load <8 x i64>, <8 x i64>* %passthru
- %5 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> %4
- ret <8 x i64> %5
+define <32 x i16> @stack_fold_psrlvw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvw
+ ;CHECK: vpsrlvw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> %a0, <32 x i16> %a1)
+ ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16>, <32 x i16>) nounwind readnone
-define <8 x i64> @stack_fold_valignq_maskz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
- ;CHECK-LABEL: stack_fold_valignq_maskz
- ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
- ret <8 x i64> %4
+define <32 x i16> @stack_fold_psrlw(<32 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlw
+ ;CHECK: vpsrlw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %a0, <8 x i16> %a1)
+ ret <32 x i16> %2
}
+declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>) nounwind readnone
-define <16 x i32> @stack_fold_valignd(<16 x i32> %a, <16 x i32> %b) {
- ;CHECK-LABEL: stack_fold_valignd
- ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+define <32 x i16> @stack_fold_psrlwi(<32 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_psrlwi
+ ;CHECK: vpsrlw $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+ %2 = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> %a0, i32 1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16>, i32) nounwind readnone
+
+define <64 x i8> @stack_fold_psubb(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubb
+ ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <64 x i8> %a0, %a1
+ ret <64 x i8> %2
+}
+
+define <16 x i32> @stack_fold_psubd(<16 x i32> %a0, <16 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psubd
+ ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <16 x i32> %a0, %a1
ret <16 x i32> %2
}
-define <16 x i32> @stack_fold_valignd_mask(<16 x i32> %a, <16 x i32> %b, <16 x i32>* %passthru, i16 %mask) {
- ;CHECK-LABEL: stack_fold_valignd_mask
- ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
- %3 = bitcast i16 %mask to <16 x i1>
- %4 = load <16 x i32>, <16 x i32>* %passthru
- %5 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> %4
- ret <16 x i32> %5
+define <8 x i64> @stack_fold_psubq(<8 x i64> %a0, <8 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psubq
+ ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <8 x i64> %a0, %a1
+ ret <8 x i64> %2
}
-define <16 x i32> @stack_fold_valignd_maskz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
- ;CHECK-LABEL: stack_fold_valignd_maskz
- ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
- %3 = bitcast i16 %mask to <16 x i1>
- %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
- ret <16 x i32> %4
+define <64 x i8> @stack_fold_psubsb(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsb
+ ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> undef, i64 -1)
+ ret <64 x i8> %2
}
+declare <64 x i8> @llvm.x86.avx512.mask.psubs.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
-define <64 x i8> @stack_fold_palignr(<64 x i8> %a0, <64 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_palignr
- ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <64 x i8> %a1, <64 x i8> %a0, <64 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112>
+define <32 x i16> @stack_fold_psubsw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsw
+ ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <64 x i8> @stack_fold_psubusb(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusb
+ ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> undef, i64 -1)
ret <64 x i8> %2
}
+declare <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) nounwind readnone
-define <64 x i8> @stack_fold_palignr_mask(<64 x i8> %a0, <64 x i8> %a1, <64 x i8>* %passthru, i64 %mask) {
- ;CHECK-LABEL: stack_fold_palignr_mask
- ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <64 x i8> %a1, <64 x i8> %a0, <64 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112>
+define <32 x i16> @stack_fold_psubusw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusw
+ ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16> %a0, <32 x i16> %a1, <32 x i16> undef, i32 -1)
+ ret <32 x i16> %2
+}
+declare <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
+
+define <32 x i16> @stack_fold_psubw(<32 x i16> %a0, <32 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubw
+ ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <32 x i16> %a0, %a1
+ ret <32 x i16> %2
+}
+
+define <16 x i32> @stack_fold_ternlogd(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_ternlogd
+ ;CHECK: vpternlogd $33, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32, i16)
+
+define <8 x i64> @stack_fold_ternlogq(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_ternlogq
+ ;CHECK: vpternlogq $33, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33, i8 -1)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i32, i8)
+
+define <64 x i8> @stack_fold_punpckhbw_zmm(<64 x i8> %a0, <64 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_zmm
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a0, <64 x i8> %a1, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
+ ret <64 x i8> %2
+}
+
+define <64 x i8> @stack_fold_punpckhbw_mask_zmm(<64 x i8>* %passthru, <64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_mask_zmm
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a0, <64 x i8> %a1, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
%3 = bitcast i64 %mask to <64 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
%4 = load <64 x i8>, <64 x i8>* %passthru
%5 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> %4
ret <64 x i8> %5
}
-define <64 x i8> @stack_fold_palignr_maskz(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
- ;CHECK-LABEL: stack_fold_palignr_maskz
- ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <64 x i8> %a1, <64 x i8> %a0, <64 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112>
+define <64 x i8> @stack_fold_punpckhbw_maskz_zmm(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_maskz_zmm
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <64 x i8> %a0, <64 x i8> %a1, <64 x i32> <i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
%3 = bitcast i64 %mask to <64 x i1>
%4 = select <64 x i1> %3, <64 x i8> %2, <64 x i8> zeroinitializer
ret <64 x i8> %4
diff --git a/test/CodeGen/X86/stack-folding-int-avx512vl.ll b/test/CodeGen/X86/stack-folding-int-avx512vl.ll
index 77afc49b2576..7ce798f778a3 100644
--- a/test/CodeGen/X86/stack-folding-int-avx512vl.ll
+++ b/test/CodeGen/X86/stack-folding-int-avx512vl.ll
@@ -8,6 +8,263 @@ target triple = "x86_64-unknown-unknown"
; By including a nop call with sideeffects we can force a partial register spill of the
; relevant registers and check that the reload is correctly folded into the instruction.
+define <8 x i32> @stack_fold_valignd_ymm(<8 x i32> %a, <8 x i32> %b) {
+ ;CHECK-LABEL: stack_fold_valignd_ymm
+ ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ ret <8 x i32> %2
+}
+
+define <8 x i32> @stack_fold_valignd_ymm_mask(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %passthru, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_valignd_ymm_mask
+ ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = load <8 x i32>, <8 x i32>* %passthru
+ %5 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %4
+ ret <8 x i32> %5
+}
+
+define <8 x i32> @stack_fold_valignd_ymm_maskz(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_valignd_ymm_maskz
+ ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ ret <8 x i32> %4
+}
+
+define <4 x i64> @stack_fold_valignq_ymm(<4 x i64> %a, <4 x i64> %b) {
+ ;CHECK-LABEL: stack_fold_valignq_ymm
+ ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i64> %2
+}
+
+define <16 x i8> @stack_fold_pavgb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgb
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <32 x i8> @stack_fold_pavgb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgb_ymm
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pavgw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgw
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_pavgw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgw_ymm
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_extracti32x4(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti32x4
+ ;CHECK: vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <8 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_extracti64x2(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti64x2
+ ;CHECK: vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <4 x i64> %a0, <i64 1, i64 1, i64 1, i64 1>
+ %2 = shufflevector <4 x i64> %1, <4 x i64> %a1, <2 x i32> <i32 2, i32 3>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <2 x i64> %2
+}
+
+define <8 x i32> @stack_fold_inserti32x4(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_inserti32x4
+ ;CHECK: vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ; add forces execution domain
+ %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @stack_fold_inserti64x2(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_inserti64x2
+ ;CHECK: vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+
+define <16 x i8> @stack_fold_pabsb(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsb
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
+
+define <32 x i8> @stack_fold_pabsb_ymm(<32 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsb_ymm
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pabsd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsd
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_pabsd_ymm(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsd_ymm
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_pabsq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsq
+ ;CHECK: vpabsq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx512.mask.pabs.q.128(<2 x i64> %a0, <2 x i64> undef, i8 -1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx512.mask.pabs.q.128(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <4 x i64> @stack_fold_pabsq_ymm(<4 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsq_ymm
+ ;CHECK: vpabsq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.pabs.q.256(<4 x i64> %a0, <4 x i64> undef, i8 -1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx512.mask.pabs.q.256(<4 x i64>, <4 x i64>, i8) nounwind readnone
+
+define <8 x i16> @stack_fold_pabsw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsw
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_pabsw_ymm(<16 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsw_ymm
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_packssdw(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packssdw
+ ;CHECK: vpackssdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_packssdw_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packssdw_ymm
+ ;CHECK: vpackssdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_packsswb(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packsswb
+ ;CHECK: vpacksswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_packsswb_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packsswb_ymm
+ ;CHECK: vpacksswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packusdw
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_packusdw_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packusdw_ymm
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_packuswb(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packuswb
+ ;CHECK: vpackuswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_packuswb_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packuswb_ymm
+ ;CHECK: vpackuswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
+
define <16 x i8> @stack_fold_paddb(<16 x i8> %a0, <16 x i8> %a1) {
;CHECK-LABEL: stack_fold_paddb
;CHECK: vpaddb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -188,6 +445,35 @@ define <16 x i16> @stack_fold_paddw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
ret <16 x i16> %2
}
+define <32 x i8> @stack_fold_palignr(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_palignr
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+ ret <32 x i8> %2
+}
+
+define <32 x i8> @stack_fold_palignr_mask(<32 x i8> %a0, <32 x i8> %a1, <32 x i8>* %passthru, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_palignr_mask
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+ %3 = bitcast i32 %mask to <32 x i1>
+ %4 = load <32 x i8>, <32 x i8>* %passthru
+ %5 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> %4
+ ret <32 x i8> %5
+}
+
+define <32 x i8> @stack_fold_palignr_maskz(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_palignr_maskz
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+ %3 = bitcast i32 %mask to <32 x i1>
+ %4 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> zeroinitializer
+ ret <32 x i8> %4
+}
+
define i16 @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) {
;CHECK-LABEL: stack_fold_pcmpeqb
;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
@@ -226,141 +512,649 @@ define i8 @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) {
ret i8 %3
}
-define <16 x i8> @stack_fold_psubb(<16 x i8> %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubb
- ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <32 x i8> @stack_fold_permbvar(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_permbvar
+ ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a1, <32 x i8> %a0, <32 x i8> undef, i32 -1)
+ ; add forces execution domain
+ %3 = add <32 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <32 x i8> %3
+}
+declare <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) nounwind readonly
+
+define <8 x i32> @stack_fold_permd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_permd
+ ;CHECK: vpermd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <16 x i8> %a0, %a1
+ %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a1, <8 x i32> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
+
+define <16 x i8> @stack_fold_vpermi2b(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2b
+ ;CHECK: vpermi2b {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
+ ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
+
+define <32 x i8> @stack_fold_vpermi2b_ymm(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2b_ymm
+ ;CHECK: vpermi2b {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
+ ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
+
+define <4 x i32> @stack_fold_vpermi2d(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2d
+ ;CHECK: vpermi2d {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+
+define <8 x i32> @stack_fold_vpermi2d_ymm(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2d_ymm
+ ;CHECK: vpermi2d {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
+ ret <8 x i32> %res
+}
+declare <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+
+define <2 x i64> @stack_fold_vpermi2q(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2q
+ ;CHECK: vpermi2q {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <2 x i64> @llvm.x86.avx512.mask.vpermt2var.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.avx512.mask.vpermt2var.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <4 x i64> @stack_fold_vpermi2q_ymm(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2q_ymm
+ ;CHECK: vpermi2q {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <4 x i64> @llvm.x86.avx512.mask.vpermt2var.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx512.mask.vpermt2var.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <8 x i16> @stack_fold_vpermi2w(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2w
+ ;CHECK: vpermi2w {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <16 x i16> @stack_fold_vpermi2w_ymm(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermi2w_ymm
+ ;CHECK: vpermi2w {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
+ ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <4 x i64> @stack_fold_permq(<4 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_permq
+ ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @stack_fold_permqvar(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_permqvar
+ ;CHECK: vpermq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a1, <4 x i64> %a0, <4 x i64> undef, i8 -1)
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+declare <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) nounwind readonly
+
+define <16 x i8> @stack_fold_vpermt2b(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2b
+ ;CHECK: vpermt2b {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
+ ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
+
+define <32 x i8> @stack_fold_vpermt2b_ymm(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2b_ymm
+ ;CHECK: vpermt2b {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
+ ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
+
+define <4 x i32> @stack_fold_vpermt2d(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2d
+ ;CHECK: vpermt2d {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <4 x i32> @llvm.x86.avx512.mask.vpermi2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.avx512.mask.vpermi2var.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+
+define <8 x i32> @stack_fold_vpermt2d_ymm(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2d_ymm
+ ;CHECK: vpermt2d {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i32> @llvm.x86.avx512.mask.vpermi2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
+ ret <8 x i32> %res
+}
+declare <8 x i32> @llvm.x86.avx512.mask.vpermi2var.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+
+define <2 x i64> @stack_fold_vpermt2q(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2q
+ ;CHECK: vpermt2q {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <2 x i64> @llvm.x86.avx512.mask.vpermi2var.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.avx512.mask.vpermi2var.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <4 x i64> @stack_fold_vpermt2q_ymm(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2q_ymm
+ ;CHECK: vpermt2q {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <4 x i64> @llvm.x86.avx512.mask.vpermi2var.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx512.mask.vpermi2var.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <8 x i16> @stack_fold_vpermt2w(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2w
+ ;CHECK: vpermt2w {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
+
+define <16 x i16> @stack_fold_vpermt2w_ymm(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2) {
+ ;CHECK-LABEL: stack_fold_vpermt2w_ymm
+ ;CHECK: vpermt2w {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %res = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
+ ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
+
+define <16 x i16> @stack_fold_permwvar(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_permwvar
+ ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a1, <16 x i16> %a0, <16 x i16> undef, i16 -1)
+ ; add forces execution domain
+ %3 = add <16 x i16> %2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ret <16 x i16> %3
+}
+declare <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) nounwind readonly
+
+define <8 x i16> @stack_fold_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmaddubsw_mask(<8 x i16>* %passthru, <16 x i8> %a0, <16 x i8> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_mask
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %4 = load <8 x i16>, <8 x i16>* %passthru
+ %5 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> %4
+ ret <8 x i16> %5
+}
+
+define <8 x i16> @stack_fold_pmaddubsw_maskz(<16 x i8> %a0, <16 x i8> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_maskz
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer
+ ret <8 x i16> %4
+}
+
+define <16 x i16> @stack_fold_pmaddubsw_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_ymm
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmaddubsw_ymm_mask(<16 x i16>* %passthru, <32 x i8> %a0, <32 x i8> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_ymm_mask
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %4 = load <16 x i16>, <16 x i16>* %passthru
+ %5 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> %4
+ ret <16 x i16> %5
+}
+
+define <16 x i16> @stack_fold_pmaddubsw_ymm_maskz(<32 x i8> %a0, <32 x i8> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw_ymm_maskz
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
+ ret <16 x i16> %4
+}
+
+define <4 x i32> @stack_fold_pmaddwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddwd
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaddwd_mask(<4 x i32>* %passthru, <8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_mask
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %5 = load <4 x i32>, <4 x i32>* %passthru
+ %6 = select <4 x i1> %4, <4 x i32> %2, <4 x i32> %5
+ ret <4 x i32> %6
+}
+
+define <4 x i32> @stack_fold_pmaddwd_maskz(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_maskz
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %4, <4 x i32> %2, <4 x i32> zeroinitializer
+ ret <4 x i32> %5
+}
+
+define <8 x i32> @stack_fold_pmaddwd_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_ymm
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmaddwd_ymm_mask(<8 x i32>* %passthru, <16 x i16> %a0, <16 x i16> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_ymm_mask
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %4 = load <8 x i32>, <8 x i32>* %passthru
+ %5 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %4
+ ret <8 x i32> %5
+}
+
+define <8 x i32> @stack_fold_pmaddwd_ymm_maskz(<16 x i16> %a0, <16 x i16> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaddwd_ymm_maskz
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ ret <8 x i32> %4
+}
+
+define <16 x i8> @stack_fold_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsb
+ ;CHECK: vpmaxsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
ret <16 x i8> %2
}
+declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
-define <32 x i8> @stack_fold_psubb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubb_ymm
- ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <32 x i8> @stack_fold_pmaxsb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsb_ymm
+ ;CHECK: vpmaxsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <32 x i8> %a0, %a1
+ %2 = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %2
}
+declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
-define <4 x i32> @stack_fold_psubd(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_psubd
- ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <4 x i32> @stack_fold_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsd
+ ;CHECK: vpmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <4 x i32> %a0, %a1
+ %2 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
ret <4 x i32> %2
}
+declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
-define <8 x i32> @stack_fold_psubd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_psubd_ymm
- ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <8 x i32> @stack_fold_pmaxsd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsd_ymm
+ ;CHECK: vpmaxsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <8 x i32> %a0, %a1
+ %2 = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1)
ret <8 x i32> %2
}
+declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
-define <2 x i64> @stack_fold_psubq(<2 x i64> %a0, <2 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_psubq
- ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <2 x i64> @stack_fold_pmaxsq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsq
+ ;CHECK: vpmaxsq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <2 x i64> %a0, %a1
+ %2 = call <2 x i64> @llvm.x86.avx512.mask.pmaxs.q.128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> undef, i8 -1)
ret <2 x i64> %2
}
+declare <2 x i64> @llvm.x86.avx512.mask.pmaxs.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) nounwind readnone
-define <4 x i64> @stack_fold_psubq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_psubq_ymm
- ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <4 x i64> @stack_fold_pmaxsq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsq_ymm
+ ;CHECK: vpmaxsq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <4 x i64> %a0, %a1
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.pmaxs.q.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> undef, i8 -1)
ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx512.mask.pmaxs.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) nounwind readnone
-define <16 x i8> @stack_fold_psubsb(<16 x i8> %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubsb
- ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <8 x i16> @stack_fold_pmaxsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsw
+ ;CHECK: vpmaxsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+ %2 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmaxsw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsw_ymm
+ ;CHECK: vpmaxsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pmaxub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxub
+ ;CHECK: vpmaxub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
ret <16 x i8> %2
}
-declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
-define <32 x i8> @stack_fold_psubsb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubsb_ymm
- ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <32 x i8> @stack_fold_pmaxub_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxub_ymm
+ ;CHECK: vpmaxub {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
+ %2 = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %2
}
-declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
-define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubsw
- ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <4 x i32> @stack_fold_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxud
+ ;CHECK: vpmaxud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+ %2 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmaxud_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxud_ymm
+ ;CHECK: vpmaxud {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmaxuq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuq
+ ;CHECK: vpmaxuq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> undef, i8 -1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <2 x i64> @stack_fold_pmaxuq_mask(<2 x i64>* %passthru, <2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaxuq_mask
+ ;CHECK: vpmaxuq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = load <2 x i64>, <2 x i64>* %passthru
+ %3 = call <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %2, i8 %mask)
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @stack_fold_pmaxuq_maskz(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaxuq_maskz
+ ;CHECK: vpmaxuq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> zeroinitializer, i8 %mask)
+ ret <2 x i64> %2
+}
+
+define <4 x i64> @stack_fold_pmaxuq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuq_ymm
+ ;CHECK: vpmaxuq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> undef, i8 -1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) nounwind readnone
+
+define <4 x i64> @stack_fold_pmaxuq_ymm_mask(<4 x i64>* %passthru, <4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaxuq_ymm_mask
+ ;CHECK: vpmaxuq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = load <4 x i64>, <4 x i64>* %passthru
+ %3 = call <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %2, i8 %mask)
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @stack_fold_pmaxuq_ymm_maskz(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmaxuq_ymm_maskz
+ ;CHECK: vpmaxuq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> zeroinitializer, i8 %mask)
+ ret <4 x i64> %2
+}
+
+define <8 x i16> @stack_fold_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuw
+ ;CHECK: vpmaxuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
ret <8 x i16> %2
}
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
-define <16 x i16> @stack_fold_psubsw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubsw_ymm
- ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_pmaxuw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuw_ymm
+ ;CHECK: vpmaxuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
+ %2 = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %2
}
-declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readnone
-define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubusb
- ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <16 x i8> @stack_fold_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsb
+ ;CHECK: vpminsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+ %2 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
ret <16 x i8> %2
}
-declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
-define <32 x i8> @stack_fold_psubusb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_psubusb_ymm
- ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <32 x i8> @stack_fold_pminsb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsb_ymm
+ ;CHECK: vpminsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
+ %2 = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %2
}
-declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
+declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
-define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubusw
- ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <4 x i32> @stack_fold_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsd
+ ;CHECK: vpminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+ %2 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_pminsd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsd_ymm
+ ;CHECK: vpminsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_pminsq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsq
+ ;CHECK: vpminsq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx512.mask.pmins.q.128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> undef, i8 -1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx512.mask.pmins.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <4 x i64> @stack_fold_pminsq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsq_ymm
+ ;CHECK: vpminsq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.pmins.q.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> undef, i8 -1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx512.mask.pmins.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) nounwind readnone
+
+define <8 x i16> @stack_fold_pminsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsw
+ ;CHECK: vpminsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
ret <8 x i16> %2
}
-declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
-define <16 x i16> @stack_fold_psubusw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubusw_ymm
- ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_pminsw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsw_ymm
+ ;CHECK: vpminsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
+ %2 = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %2
}
-declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
+declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readnone
-define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubw
- ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <16 x i8> @stack_fold_pminub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminub
+ ;CHECK: vpminub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <8 x i16> %a0, %a1
+ %2 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <32 x i8> @stack_fold_pminub_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminub_ymm
+ ;CHECK: vpminub {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pminud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminud
+ ;CHECK: vpminud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_pminud_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminud_ymm
+ ;CHECK: vpminud {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_pminuq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuq
+ ;CHECK: vpminuq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx512.mask.pminu.q.128(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> undef, i8 -1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx512.mask.pminu.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <4 x i64> @stack_fold_pminuq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuq_ymm
+ ;CHECK: vpminuq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.mask.pminu.q.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> undef, i8 -1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx512.mask.pminu.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) nounwind readnone
+
+define <8 x i16> @stack_fold_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuw
+ ;CHECK: vpminuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
ret <8 x i16> %2
}
+declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
-define <16 x i16> @stack_fold_psubw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_psubw_ymm
- ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_pminuw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuw_ymm
+ ;CHECK: vpminuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sub <16 x i16> %a0, %a1
+ %2 = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %2
}
+declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readnone
define <8 x i16> @stack_fold_vpmovdw(<8 x i32> %a0) {
;CHECK-LABEL: stack_fold_vpmovdw
@@ -416,217 +1210,6 @@ define <16 x i8> @stack_fold_vpmovswb(<16 x i16> %a0) {
}
declare <16 x i8> @llvm.x86.avx512.mask.pmovs.wb.256(<16 x i16>, <16 x i8>, i16)
-define <8 x i16> @stack_fold_vpmovusdw(<8 x i32> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovusdw
- ;CHECK: vpmovusdw %ymm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
- %1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.256(<8 x i32> %a0, <8 x i16> undef, i8 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <8 x i16> %1
-}
-declare <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.256(<8 x i32>, <8 x i16>, i8)
-
-define <4 x i32> @stack_fold_vpmovusqd(<4 x i64> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovusqd
- ;CHECK: vpmovusqd %ymm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
- %1 = call <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.256(<4 x i64> %a0, <4 x i32> undef, i8 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <4 x i32> %1
-}
-declare <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.256(<4 x i64>, <4 x i32>, i8)
-
-define <16 x i8> @stack_fold_vpmovuswb(<16 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_vpmovuswb
- ;CHECK: vpmovuswb %ymm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
- %1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.256(<16 x i16> %a0, <16 x i8> undef, i16 -1)
- %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <16 x i8> %1
-}
-declare <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.256(<16 x i16>, <16 x i8>, i16)
-
-define <4 x i32> @stack_fold_extracti32x4(<8 x i32> %a0, <8 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_extracti32x4
- ;CHECK: vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
- ; add forces execution domain
- %1 = add <8 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
- %2 = shufflevector <8 x i32> %1, <8 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <4 x i32> %2
-}
-
-define <2 x i64> @stack_fold_extracti64x2(<4 x i64> %a0, <4 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_extracti64x2
- ;CHECK: vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
- ; add forces execution domain
- %1 = add <4 x i64> %a0, <i64 1, i64 1, i64 1, i64 1>
- %2 = shufflevector <4 x i64> %1, <4 x i64> %a1, <2 x i32> <i32 2, i32 3>
- %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- ret <2 x i64> %2
-}
-
-define <8 x i32> @stack_fold_inserti32x4(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_inserti32x4
- ;CHECK: vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ; add forces execution domain
- %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
- ret <8 x i32> %3
-}
-
-define <4 x i64> @stack_fold_inserti64x2(<2 x i64> %a0, <2 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_inserti64x2
- ;CHECK: vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ; add forces execution domain
- %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
- ret <4 x i64> %3
-}
-
-define <4 x i32> @stack_fold_vpermt2d(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2d
- ;CHECK: vpermt2d {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <4 x i32> @llvm.x86.avx512.mask.vpermi2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.avx512.mask.vpermi2var.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-
-define <4 x i32> @stack_fold_vpermi2d(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2d
- ;CHECK: vpermi2d {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
- ret <4 x i32> %res
-}
-declare <4 x i32> @llvm.x86.avx512.mask.vpermt2var.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
-
-define <2 x i64> @stack_fold_vpermt2q(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2q
- ;CHECK: vpermt2q {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <2 x i64> @llvm.x86.avx512.mask.vpermi2var.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.avx512.mask.vpermi2var.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-
-define <2 x i64> @stack_fold_vpermi2q(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2q
- ;CHECK: vpermi2q {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <2 x i64> @llvm.x86.avx512.mask.vpermt2var.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.avx512.mask.vpermt2var.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
-
-define <8 x i16> @stack_fold_vpermt2w(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2w
- ;CHECK: vpermt2w {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-
-define <8 x i16> @stack_fold_vpermi2w(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2w
- ;CHECK: vpermi2w {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
- ret <8 x i16> %res
-}
-declare <8 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
-
-define <16 x i8> @stack_fold_vpermt2b(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2b
- ;CHECK: vpermt2b {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
-
-define <16 x i8> @stack_fold_vpermi2b(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2b
- ;CHECK: vpermi2b {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1)
- ret <16 x i8> %res
-}
-declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16)
-
-define <8 x i32> @stack_fold_vpermt2d_ymm(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2d_ymm
- ;CHECK: vpermt2d {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i32> @llvm.x86.avx512.mask.vpermi2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
- ret <8 x i32> %res
-}
-declare <8 x i32> @llvm.x86.avx512.mask.vpermi2var.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-
-define <8 x i32> @stack_fold_vpermi2d_ymm(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2d_ymm
- ;CHECK: vpermi2d {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
- ret <8 x i32> %res
-}
-declare <8 x i32> @llvm.x86.avx512.mask.vpermt2var.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
-
-define <4 x i64> @stack_fold_vpermt2q_ymm(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2q_ymm
- ;CHECK: vpermt2q {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <4 x i64> @llvm.x86.avx512.mask.vpermi2var.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx512.mask.vpermi2var.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-
-define <4 x i64> @stack_fold_vpermi2q_ymm(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2q_ymm
- ;CHECK: vpermi2q {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <4 x i64> @llvm.x86.avx512.mask.vpermt2var.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx512.mask.vpermt2var.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
-
-define <16 x i16> @stack_fold_vpermt2w_ymm(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2w_ymm
- ;CHECK: vpermt2w {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
- ret <16 x i16> %res
-}
-declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-
-define <16 x i16> @stack_fold_vpermi2w_ymm(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2w_ymm
- ;CHECK: vpermi2w {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1)
- ret <16 x i16> %res
-}
-declare <16 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
-
-define <32 x i8> @stack_fold_vpermt2b_ymm(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2) {
- ;CHECK-LABEL: stack_fold_vpermt2b_ymm
- ;CHECK: vpermt2b {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
- ret <32 x i8> %res
-}
-declare <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
-
-define <32 x i8> @stack_fold_vpermi2b_ymm(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2) {
- ;CHECK-LABEL: stack_fold_vpermi2b_ymm
- ;CHECK: vpermi2b {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
- %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %res = call <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1)
- ret <32 x i8> %res
-}
-declare <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
-
define <4 x i32> @stack_fold_pmovsxbd(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovsxbd
;CHECK: vpmovsxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -636,6 +1219,15 @@ define <4 x i32> @stack_fold_pmovsxbd(<16 x i8> %a0) {
ret <4 x i32> %3
}
+define <8 x i32> @stack_fold_pmovsxbd_ymm(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbd_ymm
+ ;CHECK: vpmovsxbd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %3 = sext <8 x i8> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
define <2 x i64> @stack_fold_pmovsxbq(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovsxbq
;CHECK: vpmovsxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -645,6 +1237,15 @@ define <2 x i64> @stack_fold_pmovsxbq(<16 x i8> %a0) {
ret <2 x i64> %3
}
+define <4 x i64> @stack_fold_pmovsxbq_ymm(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbq_ymm
+ ;CHECK: pmovsxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = sext <4 x i8> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
define <8 x i16> @stack_fold_pmovsxbw(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovsxbw
;CHECK: vpmovsxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -654,6 +1255,14 @@ define <8 x i16> @stack_fold_pmovsxbw(<16 x i8> %a0) {
ret <8 x i16> %3
}
+define <16 x i16> @stack_fold_pmovsxbw_ymm(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbw_ymm
+ ;CHECK: vpmovsxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sext <16 x i8> %a0 to <16 x i16>
+ ret <16 x i16> %2
+}
+
define <2 x i64> @stack_fold_pmovsxdq(<4 x i32> %a0) {
;CHECK-LABEL: stack_fold_pmovsxdq
;CHECK: vpmovsxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -663,6 +1272,14 @@ define <2 x i64> @stack_fold_pmovsxdq(<4 x i32> %a0) {
ret <2 x i64> %3
}
+define <4 x i64> @stack_fold_pmovsxdq_ymm(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxdq_ymm
+ ;CHECK: vpmovsxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sext <4 x i32> %a0 to <4 x i64>
+ ret <4 x i64> %2
+}
+
define <4 x i32> @stack_fold_pmovsxwd(<8 x i16> %a0) {
;CHECK-LABEL: stack_fold_pmovsxwd
;CHECK: vpmovsxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -672,6 +1289,14 @@ define <4 x i32> @stack_fold_pmovsxwd(<8 x i16> %a0) {
ret <4 x i32> %3
}
+define <8 x i32> @stack_fold_pmovsxwd_ymm(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwd_ymm
+ ;CHECK: vpmovsxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sext <8 x i16> %a0 to <8 x i32>
+ ret <8 x i32> %2
+}
+
define <2 x i64> @stack_fold_pmovsxwq(<8 x i16> %a0) {
;CHECK-LABEL: stack_fold_pmovsxwq
;CHECK: vpmovsxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -681,109 +1306,49 @@ define <2 x i64> @stack_fold_pmovsxwq(<8 x i16> %a0) {
ret <2 x i64> %3
}
-define <4 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) {
- ;CHECK-LABEL: stack_fold_pmovzxbd
- ;CHECK: vpmovzxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 1, i32 19, i32 20, i32 21, i32 2, i32 22, i32 23, i32 24, i32 3, i32 25, i32 26, i32 27>
- %3 = bitcast <16 x i8> %2 to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <2 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) {
- ;CHECK-LABEL: stack_fold_pmovzxbq
- ;CHECK: vpmovzxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 1, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
- %3 = bitcast <16 x i8> %2 to <2 x i64>
- ret <2 x i64> %3
-}
-
-define <8 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) {
- ;CHECK-LABEL: stack_fold_pmovzxbw
- ;CHECK: vpmovzxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
- %3 = bitcast <16 x i8> %2 to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <2 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) {
- ;CHECK-LABEL: stack_fold_pmovzxdq
- ;CHECK: vpmovzxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
- %3 = bitcast <4 x i32> %2 to <2 x i64>
- ret <2 x i64> %3
-}
-
-define <4 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_pmovzxwd
- ;CHECK: vpmovzxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
- %3 = bitcast <8 x i16> %2 to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <2 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_pmovzxwq
- ;CHECK: vpmovzxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 1, i32 11, i32 12, i32 13>
- %3 = bitcast <8 x i16> %2 to <2 x i64>
- ret <2 x i64> %3
-}
-
-define <8 x i32> @stack_fold_pmovsxbd_ymm(<16 x i8> %a0) {
- ;CHECK-LABEL: stack_fold_pmovsxbd_ymm
- ;CHECK: vpmovsxbd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %3 = sext <8 x i8> %2 to <8 x i32>
- ret <8 x i32> %3
-}
-
-define <4 x i64> @stack_fold_pmovsxbq_ymm(<16 x i8> %a0) {
- ;CHECK-LABEL: stack_fold_pmovsxbq_ymm
- ;CHECK: pmovsxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <4 x i64> @stack_fold_pmovsxwq_ymm(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwq_ymm
+ ;CHECK: vpmovsxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %3 = sext <4 x i8> %2 to <4 x i64>
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = sext <4 x i16> %2 to <4 x i64>
ret <4 x i64> %3
}
-define <16 x i16> @stack_fold_pmovsxbw_ymm(<16 x i8> %a0) {
- ;CHECK-LABEL: stack_fold_pmovsxbw_ymm
- ;CHECK: vpmovsxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sext <16 x i8> %a0 to <16 x i16>
- ret <16 x i16> %2
+define <8 x i16> @stack_fold_vpmovusdw(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovusdw
+ ;CHECK: vpmovusdw %ymm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
+ %1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.256(<8 x i32> %a0, <8 x i16> undef, i8 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <8 x i16> %1
}
+declare <8 x i16> @llvm.x86.avx512.mask.pmovus.dw.256(<8 x i32>, <8 x i16>, i8)
-define <4 x i64> @stack_fold_pmovsxdq_ymm(<4 x i32> %a0) {
- ;CHECK-LABEL: stack_fold_pmovsxdq_ymm
- ;CHECK: vpmovsxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sext <4 x i32> %a0 to <4 x i64>
- ret <4 x i64> %2
+define <4 x i32> @stack_fold_vpmovusqd(<4 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovusqd
+ ;CHECK: vpmovusqd %ymm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
+ %1 = call <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.256(<4 x i64> %a0, <4 x i32> undef, i8 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <4 x i32> %1
}
+declare <4 x i32> @llvm.x86.avx512.mask.pmovus.qd.256(<4 x i64>, <4 x i32>, i8)
-define <8 x i32> @stack_fold_pmovsxwd_ymm(<8 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_pmovsxwd_ymm
- ;CHECK: vpmovsxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = sext <8 x i16> %a0 to <8 x i32>
- ret <8 x i32> %2
+define <16 x i8> @stack_fold_vpmovuswb(<16 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vpmovuswb
+ ;CHECK: vpmovuswb %ymm0, {{-?[0-9]*}}(%rsp) # 16-byte Folded Spill
+ %1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.256(<16 x i16> %a0, <16 x i8> undef, i16 -1)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ ret <16 x i8> %1
}
+declare <16 x i8> @llvm.x86.avx512.mask.pmovus.wb.256(<16 x i16>, <16 x i8>, i16)
-define <4 x i64> @stack_fold_pmovsxwq_ymm(<8 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_pmovsxwq_ymm
- ;CHECK: vpmovsxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <4 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbd
+ ;CHECK: vpmovzxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %3 = sext <4 x i16> %2 to <4 x i64>
- ret <4 x i64> %3
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 1, i32 19, i32 20, i32 21, i32 2, i32 22, i32 23, i32 24, i32 3, i32 25, i32 26, i32 27>
+ %3 = bitcast <16 x i8> %2 to <4 x i32>
+ ret <4 x i32> %3
}
define <8 x i32> @stack_fold_pmovzxbd_ymm(<16 x i8> %a0) {
@@ -795,6 +1360,15 @@ define <8 x i32> @stack_fold_pmovzxbd_ymm(<16 x i8> %a0) {
ret <8 x i32> %3
}
+define <2 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbq
+ ;CHECK: vpmovzxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 1, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+ %3 = bitcast <16 x i8> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
define <4 x i64> @stack_fold_pmovzxbq_ymm(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovzxbq_ymm
;CHECK: vpmovzxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -804,6 +1378,15 @@ define <4 x i64> @stack_fold_pmovzxbq_ymm(<16 x i8> %a0) {
ret <4 x i64> %3
}
+define <8 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbw
+ ;CHECK: vpmovzxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ %3 = bitcast <16 x i8> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
define <16 x i16> @stack_fold_pmovzxbw_ymm(<16 x i8> %a0) {
;CHECK-LABEL: stack_fold_pmovzxbw_ymm
;CHECK: vpmovzxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -812,6 +1395,15 @@ define <16 x i16> @stack_fold_pmovzxbw_ymm(<16 x i8> %a0) {
ret <16 x i16> %2
}
+define <2 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxdq
+ ;CHECK: vpmovzxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
define <4 x i64> @stack_fold_pmovzxdq_ymm(<4 x i32> %a0) {
;CHECK-LABEL: stack_fold_pmovzxdq_ymm
;CHECK: vpmovzxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -820,6 +1412,15 @@ define <4 x i64> @stack_fold_pmovzxdq_ymm(<4 x i32> %a0) {
ret <4 x i64> %2
}
+define <4 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwd
+ ;CHECK: vpmovzxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %3 = bitcast <8 x i16> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
define <8 x i32> @stack_fold_pmovzxwd_ymm(<8 x i16> %a0) {
;CHECK-LABEL: stack_fold_pmovzxwd_ymm
;CHECK: vpmovzxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -828,6 +1429,15 @@ define <8 x i32> @stack_fold_pmovzxwd_ymm(<8 x i16> %a0) {
ret <8 x i32> %2
}
+define <2 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwq
+ ;CHECK: vpmovzxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 1, i32 11, i32 12, i32 13>
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
define <4 x i64> @stack_fold_pmovzxwq_ymm(<8 x i16> %a0) {
;CHECK-LABEL: stack_fold_pmovzxwq_ymm
;CHECK: vpmovzxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -861,65 +1471,82 @@ define <4 x i64> @stack_fold_pmovzxwq_mask_ymm(<4 x i64> %passthru, <8 x i16> %a
ret <4 x i64> %6
}
-define <16 x i8> @stack_fold_punpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_punpckhbw
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuldq
+ ;CHECK: vpmuldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
- ret <16 x i8> %2
+ %2 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
+ ret <2 x i64> %2
}
+declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
-define <16 x i8> @stack_fold_punpckhbw_mask(<16 x i8>* %passthru, <16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
- ;CHECK-LABEL: stack_fold_punpckhbw_mask
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+define <4 x i64> @stack_fold_pmuldq_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuldq_ymm
+ ;CHECK: vpmuldq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
- %3 = bitcast i16 %mask to <16 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <16 x i8>, <16 x i8>* %passthru
- %5 = select <16 x i1> %3, <16 x i8> %2, <16 x i8> %4
- ret <16 x i8> %5
+ %2 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %a0, <8 x i32> %a1)
+ ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
-define <16 x i8> @stack_fold_punpckhbw_maskz(<16 x i8> %passthru, <16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
- ;CHECK-LABEL: stack_fold_punpckhbw_maskz
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
+define <2 x i64> @stack_fold_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuludq
+ ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
- %3 = bitcast i16 %mask to <16 x i1>
- %4 = select <16 x i1> %3, <16 x i8> %2, <16 x i8> zeroinitializer
- ret <16 x i8> %4
+ %2 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1)
+ ret <2 x i64> %2
}
+declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone
-define <32 x i8> @stack_fold_punpckhbw_ymm(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_punpckhbw_ymm
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <4 x i64> @stack_fold_pmuludq_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuludq_ymm
+ ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
- ret <32 x i8> %2
+ %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1)
+ ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnone
-define <32 x i8> @stack_fold_punpckhbw_mask_ymm(<32 x i8>* %passthru, <32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_punpckhbw_mask_ymm
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
- %3 = bitcast i32 %mask to <32 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <32 x i8>, <32 x i8>* %passthru
- %5 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> %4
- ret <32 x i8> %5
+define <4 x i64> @stack_fold_pmuludq_ymm_mask(<4 x i64>* %passthru, <8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmuludq_ymm_mask
+ ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = load <4 x i64>, <4 x i64>* %passthru
+ %6 = select <4 x i1> %4, <4 x i64> %2, <4 x i64> %5
+ ret <4 x i64> %6
}
-define <32 x i8> @stack_fold_punpckhbw_maskz_ymm(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_punpckhbw_maskz_ymm
- ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
- %3 = bitcast i32 %mask to <32 x i1>
- %4 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> zeroinitializer
- ret <32 x i8> %4
+define <4 x i64> @stack_fold_pmuludq_ymm_maskz(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pmuludq_ymm_maskz
+ ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1)
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %4, <4 x i64> %2, <4 x i64> zeroinitializer
+ ret <4 x i64> %5
+}
+
+define <2 x i64> @stack_fold_psadbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psadbw
+ ;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i64> @stack_fold_psadbw_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psadbw_ymm
+ ;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1)
+ ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
define <16 x i8> @stack_fold_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
;CHECK-LABEL: stack_fold_pshufb
@@ -1003,6 +1630,34 @@ define <4 x i32> @stack_fold_pshufd_maskz(<4 x i32> %a0, i8 %mask) {
ret <4 x i32> %5
}
+define <8 x i32> @stack_fold_pshufd_ymm(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufd_ymm
+ ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x i32> %2
+}
+
+define <8 x i32> @stack_fold_pshufd_ymm_mask(<8 x i32> %passthru, <8 x i32> %a0, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pshufd_ymm_mask
+ ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %passthru
+ ret <8 x i32> %4
+}
+
+define <8 x i32> @stack_fold_pshufd_ymm_maskz(<8 x i32> %a0, i8 %mask) {
+ ;CHECK-LABEL: stack_fold_pshufd_ymm_maskz
+ ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ %3 = bitcast i8 %mask to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ ret <8 x i32> %4
+}
+
define <8 x i16> @stack_fold_pshufhw(<8 x i16> %a0) {
;CHECK-LABEL: stack_fold_pshufhw
;CHECK: vpshufhw $11, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -1031,6 +1686,34 @@ define <8 x i16> @stack_fold_pshufhw_maskz(<8 x i16> %a0, i8 %mask) {
ret <8 x i16> %4
}
+define <16 x i16> @stack_fold_pshufhw_ymm(<16 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufhw_ymm
+ ;CHECK: vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
+ ret <16 x i16> %2
+}
+
+define <16 x i16> @stack_fold_pshufhw_ymm_mask(<16 x i16> %passthru, <16 x i16> %a0, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pshufhw_ymm_mask
+ ;CHECK: vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> %passthru
+ ret <16 x i16> %4
+}
+
+define <16 x i16> @stack_fold_pshufhw_ymm_maskz(<16 x i16> %a0, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pshufhw_ymm_maskz
+ ;CHECK: vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
+ ret <16 x i16> %4
+}
+
define <8 x i16> @stack_fold_pshuflw(<8 x i16> %a0) {
;CHECK-LABEL: stack_fold_pshuflw
;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -1059,345 +1742,581 @@ define <8 x i16> @stack_fold_pshuflw_maskz(<8 x i16> %a0, i8 %mask) {
ret <8 x i16> %4
}
-define <8 x i32> @stack_fold_pshufd_ymm(<8 x i32> %a0) {
- ;CHECK-LABEL: stack_fold_pshufd_ymm
- ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- ret <8 x i32> %2
-}
-
-define <8 x i32> @stack_fold_pshufd_ymm_mask(<8 x i32> %passthru, <8 x i32> %a0, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pshufd_ymm_mask
- ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %passthru
- ret <8 x i32> %4
-}
-
-define <8 x i32> @stack_fold_pshufd_ymm_maskz(<8 x i32> %a0, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pshufd_ymm_maskz
- ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
- ret <8 x i32> %4
-}
-
-define <16 x i16> @stack_fold_vpshufhw_ymm(<16 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_vpshufhw_ymm
- ;CHECK: vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_pshuflw_ymm(<16 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pshuflw_ymm
+ ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
ret <16 x i16> %2
}
-define <16 x i16> @stack_fold_vpshufhw_ymm_mask(<16 x i16> %passthru, <16 x i16> %a0, i16 %mask) {
- ;CHECK-LABEL: stack_fold_vpshufhw_ymm_mask
- ;CHECK: vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_pshuflw_ymm_mask(<16 x i16> %passthru, <16 x i16> %a0, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pshuflw_ymm_mask
+ ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
%3 = bitcast i16 %mask to <16 x i1>
%4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> %passthru
ret <16 x i16> %4
}
-define <16 x i16> @stack_fold_vpshufhw_ymm_maskz(<16 x i16> %a0, i16 %mask) {
- ;CHECK-LABEL: stack_fold_vpshufhw_ymm_maskz
- ;CHECK: vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_pshuflw_ymm_maskz(<16 x i16> %a0, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_pshuflw_ymm_maskz
+ ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
%3 = bitcast i16 %mask to <16 x i1>
%4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
ret <16 x i16> %4
}
-define <16 x i16> @stack_fold_vpshuflw_ymm(<16 x i16> %a0) {
- ;CHECK-LABEL: stack_fold_vpshuflw_ymm
- ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
- ret <16 x i16> %2
+define <4 x i32> @stack_fold_pslld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pslld
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
}
+declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
-define <16 x i16> @stack_fold_vpshuflw_ymm_mask(<16 x i16> %passthru, <16 x i16> %a0, i16 %mask) {
- ;CHECK-LABEL: stack_fold_vpshuflw_ymm_mask
- ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+define <8 x i32> @stack_fold_pslld_ymm(<8 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pslld_ymm
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_pslldq(<16 x i8> %a) {
+ ;CHECK-LABEL: stack_fold_pslldq
+ ;CHECK: vpslldq $12, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
- %3 = bitcast i16 %mask to <16 x i1>
- %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> %passthru
- ret <16 x i16> %4
+ %2 = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 17, i32 18, i32 19>
+ ret <16 x i8> %2
}
-define <16 x i16> @stack_fold_vpshuflw_ymm_maskz(<16 x i16> %a0, i16 %mask) {
- ;CHECK-LABEL: stack_fold_vpshuflw_ymm_maskz
- ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+define <32 x i8> @stack_fold_pslldq_ymm(<32 x i8> %a) {
+ ;CHECK-LABEL: stack_fold_pslldq_ymm
+ ;CHECK: vpslldq $15, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
- %3 = bitcast i16 %mask to <16 x i1>
- %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
- ret <16 x i16> %4
+ %2 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 32, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 48>
+ ret <32 x i8> %2
}
-define <8 x i16> @stack_fold_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_pmaddubsw
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <2 x i64> @stack_fold_psllq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllq
+ ;CHECK: vpsllq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
- ret <8 x i16> %2
+ %2 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
}
-declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
+declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
-define <8 x i16> @stack_fold_pmaddubsw_mask(<8 x i16>* %passthru, <16 x i8> %a0, <16 x i8> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_mask
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+define <4 x i64> @stack_fold_psllq_ymm(<4 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllq_ymm
+ ;CHECK: vpsllq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
- %3 = bitcast i8 %mask to <8 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <8 x i16>, <8 x i16>* %passthru
- %5 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> %4
- ret <8 x i16> %5
+ %2 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
+ ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
-define <8 x i16> @stack_fold_pmaddubsw_maskz(<16 x i8> %a0, <16 x i8> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_maskz
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+define <4 x i32> @stack_fold_psllvd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvd
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i16> %2, <8 x i16> zeroinitializer
- ret <8 x i16> %4
+ %2 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
}
+declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
-define <16 x i16> @stack_fold_pmaddubsw_ymm(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_ymm
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <8 x i32> @stack_fold_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvd_ymm
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psllvq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvq
+ ;CHECK: vpsllvq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i64> @stack_fold_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvq_ymm
+ ;CHECK: vpsllvq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_psllvw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvw
+ ;CHECK: vpsllvw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_psllvw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvw_ymm
+ ;CHECK: vpsllvw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> %a0, <16 x i16> %a1)
ret <16 x i16> %2
}
-declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
+declare <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16>, <16 x i16>) nounwind readnone
-define <16 x i16> @stack_fold_pmaddubsw_ymm_mask(<16 x i16>* %passthru, <32 x i8> %a0, <32 x i8> %a1, i16 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_ymm_mask
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+define <8 x i16> @stack_fold_psllw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllw
+ ;CHECK: vpsllw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
- %3 = bitcast i16 %mask to <16 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <16 x i16>, <16 x i16>* %passthru
- %5 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> %4
- ret <16 x i16> %5
+ %2 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
}
+declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
-define <16 x i16> @stack_fold_pmaddubsw_ymm_maskz(<32 x i8> %a0, <32 x i8> %a1, i16 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddubsw_ymm_maskz
- ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_psllw_ymm(<16 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllw_ymm
+ ;CHECK: vpsllw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
- %3 = bitcast i16 %mask to <16 x i1>
- %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
- ret <16 x i16> %4
+ %2 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i16> %2
}
+declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone
-define <4 x i32> @stack_fold_pmaddwd(<8 x i16> %a0, <8 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_pmaddwd
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <4 x i32> @stack_fold_psrad(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrad
+ ;CHECK: vpsrad {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ %2 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
ret <4 x i32> %2
}
-declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
-define <4 x i32> @stack_fold_pmaddwd_mask(<4 x i32>* %passthru, <8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddwd_mask
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+define <8 x i32> @stack_fold_psrad_ymm(<8 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrad_ymm
+ ;CHECK: vpsrad {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ; load needed to keep the operation from being scheduled about the asm block
- %5 = load <4 x i32>, <4 x i32>* %passthru
- %6 = select <4 x i1> %4, <4 x i32> %2, <4 x i32> %5
- ret <4 x i32> %6
+ %2 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i32> %2
}
+declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
-define <4 x i32> @stack_fold_pmaddwd_maskz(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddwd_maskz
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
+define <2 x i64> @stack_fold_psraq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psraq
+ ;CHECK: vpsraq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %5 = select <4 x i1> %4, <4 x i32> %2, <4 x i32> zeroinitializer
- ret <4 x i32> %5
+ %2 = call <2 x i64> @llvm.x86.avx512.psra.q.128(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
}
+declare <2 x i64> @llvm.x86.avx512.psra.q.128(<2 x i64>, <2 x i64>) nounwind readnone
-define <8 x i32> @stack_fold_pmaddwd_ymm(<16 x i16> %a0, <16 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_pmaddwd_ymm
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <4 x i64> @stack_fold_psraq_ymm(<4 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psraq_ymm
+ ;CHECK: vpsraq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx512.psra.q.256(<4 x i64> %a0, <2 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx512.psra.q.256(<4 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @stack_fold_psravd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psravd
+ ;CHECK: vpsravd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psravd_ymm
+ ;CHECK: vpsravd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1)
ret <8 x i32> %2
}
-declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
+declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
-define <8 x i32> @stack_fold_pmaddwd_ymm_mask(<8 x i32>* %passthru, <16 x i16> %a0, <16 x i16> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddwd_ymm_mask
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+define <2 x i64> @stack_fold_psravq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psravq
+ ;CHECK: vpsravq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
- %3 = bitcast i8 %mask to <8 x i1>
- ; load needed to keep the operation from being scheduled about the asm block
- %4 = load <8 x i32>, <8 x i32>* %passthru
- %5 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %4
- ret <8 x i32> %5
+ %2 = call <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
}
+declare <2 x i64> @llvm.x86.avx512.psrav.q.128(<2 x i64>, <2 x i64>) nounwind readnone
-define <8 x i32> @stack_fold_pmaddwd_ymm_maskz(<16 x i16> %a0, <16 x i16> %a1, i8 %mask) {
- ;CHECK-LABEL: stack_fold_pmaddwd_ymm_maskz
- ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+define <4 x i64> @stack_fold_psravq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psravq_ymm
+ ;CHECK: vpsravq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
- ret <8 x i32> %4
+ %2 = call <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64> %a0, <4 x i64> %a1)
+ ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64>, <4 x i64>) nounwind readnone
-define <8 x i32> @stack_fold_permd(<8 x i32> %a0, <8 x i32> %a1) {
- ;CHECK-LABEL: stack_fold_permd
- ;CHECK: vpermd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <8 x i16> @stack_fold_psravw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psravw
+ ;CHECK: vpsravw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a1, <8 x i32> %a0)
- ret <8 x i32> %2
+ %2 = call <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
}
-declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
+declare <8 x i16> @llvm.x86.avx512.psrav.w.128(<8 x i16>, <8 x i16>) nounwind readnone
-define <4 x i64> @stack_fold_permq(<4 x i64> %a0) {
- ;CHECK-LABEL: stack_fold_permq
- ;CHECK: vpermq $235, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
- ; add forces execution domain
- %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
- ret <4 x i64> %3
+define <16 x i16> @stack_fold_psravw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psravw_ymm
+ ;CHECK: vpsravw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
}
+declare <16 x i16> @llvm.x86.avx512.psrav.w.256(<16 x i16>, <16 x i16>) nounwind readnone
-define <4 x i64> @stack_fold_permqvar(<4 x i64> %a0, <4 x i64> %a1) {
- ;CHECK-LABEL: stack_fold_permqvar
- ;CHECK: vpermq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a1, <4 x i64> %a0, <4 x i64> undef, i8 -1)
- ; add forces execution domain
- %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
- ret <4 x i64> %3
+define <8 x i16> @stack_fold_psraw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psraw
+ ;CHECK: vpsraw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
}
-declare <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) nounwind readonly
+declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
-define <16 x i16> @stack_fold_permwvar(<16 x i16> %a0, <16 x i16> %a1) {
- ;CHECK-LABEL: stack_fold_permwvar
- ;CHECK: vpermw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <16 x i16> @stack_fold_psraw_ymm(<16 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psraw_ymm
+ ;CHECK: vpsraw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrld
+ ;CHECK: vpsrld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_psrld_ymm(<8 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrld_ymm
+ ;CHECK: vpsrld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a1, <16 x i16> %a0, <16 x i16> undef, i16 -1)
- ; add forces execution domain
- %3 = add <16 x i16> %2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
- ret <16 x i16> %3
+ %2 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i32> %2
}
-declare <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) nounwind readonly
+declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
-define <32 x i8> @stack_fold_permbvar(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_permbvar
- ;CHECK: vpermb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+define <16 x i8> @stack_fold_psrldq(<16 x i8> %a) {
+ ;CHECK-LABEL: stack_fold_psrldq
+ ;CHECK: vpsrldq $12, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a1, <32 x i8> %a0, <32 x i8> undef, i32 -1)
- ; add forces execution domain
- %3 = add <32 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- ret <32 x i8> %3
+ %2 = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 28, i32 29, i32 30, i32 31, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 09, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <16 x i8> %2
}
-declare <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) nounwind readonly
-define <2 x i64> @stack_fold_valignq(<2 x i64> %a, <2 x i64> %b) {
- ;CHECK-LABEL: stack_fold_valignq
- ;CHECK: vpalignr $8, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+define <32 x i8> @stack_fold_psrldq_ymm(<32 x i8> %a) {
+ ;CHECK-LABEL: stack_fold_psrldq_ymm
+ ;CHECK: vpsrldq $15, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ %2 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 47, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 63, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <32 x i8> %2
+}
+
+define <2 x i64> @stack_fold_psrlq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlq
+ ;CHECK: vpsrlq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
ret <2 x i64> %2
}
+declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
-define <4 x i32> @stack_fold_valignd(<4 x i32> %a, <4 x i32> %b) {
- ;CHECK-LABEL: stack_fold_valignd
- ;CHECK: vpalignr $4, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32><i32 1, i32 2, i32 3, i32 4>
+define <4 x i64> @stack_fold_psrlq_ymm(<4 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlq_ymm
+ ;CHECK: vpsrlq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrlvd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvd
+ ;CHECK: vpsrlvd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1)
ret <4 x i32> %2
}
+declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
-define <4 x i64> @stack_fold_valignq_ymm(<4 x i64> %a, <4 x i64> %b) {
- ;CHECK-LABEL: stack_fold_valignq_ymm
- ;CHECK: valignq $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+define <8 x i32> @stack_fold_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvd_ymm
+ ;CHECK: vpsrlvd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psrlvq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvq
+ ;CHECK: vpsrlvq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i64> @stack_fold_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvq_ymm
+ ;CHECK: vpsrlvq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
ret <4 x i64> %2
}
+declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
-define <8 x i32> @stack_fold_valignd_ymm(<8 x i32> %a, <8 x i32> %b) {
- ;CHECK-LABEL: stack_fold_valignd_ymm
- ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+define <8 x i16> @stack_fold_psrlvw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvw
+ ;CHECK: vpsrlvw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_psrlvw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvw_ymm
+ ;CHECK: vpsrlvw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_psrlw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlw
+ ;CHECK: vpsrlw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_psrlw_ymm(<16 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlw_ymm
+ ;CHECK: vpsrlw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_psubb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubb
+ ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <16 x i8> %a0, %a1
+ ret <16 x i8> %2
+}
+
+define <32 x i8> @stack_fold_psubb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubb_ymm
+ ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <32 x i8> %a0, %a1
+ ret <32 x i8> %2
+}
+
+define <4 x i32> @stack_fold_psubd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psubd
+ ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <8 x i32> @stack_fold_psubd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psubd_ymm
+ ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <8 x i32> %a0, %a1
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_valignd_ymm_mask(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %passthru, i8 %mask) {
- ;CHECK-LABEL: stack_fold_valignd_ymm_mask
- ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = load <8 x i32>, <8 x i32>* %passthru
- %5 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %4
- ret <8 x i32> %5
+define <2 x i64> @stack_fold_psubq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psubq
+ ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <2 x i64> %a0, %a1
+ ret <2 x i64> %2
}
-define <8 x i32> @stack_fold_valignd_ymm_maskz(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
- ;CHECK-LABEL: stack_fold_valignd_ymm_maskz
- ;CHECK: valignd $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32><i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
- %3 = bitcast i8 %mask to <8 x i1>
- %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
- ret <8 x i32> %4
+define <4 x i64> @stack_fold_psubq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psubq_ymm
+ ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <4 x i64> %a0, %a1
+ ret <4 x i64> %2
}
-define <32 x i8> @stack_fold_palignr(<32 x i8> %a0, <32 x i8> %a1) {
- ;CHECK-LABEL: stack_fold_palignr
- ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+define <16 x i8> @stack_fold_psubsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsb
+ ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <32 x i8> @stack_fold_psubsb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsb_ymm
+ ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
ret <32 x i8> %2
}
+declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
-define <32 x i8> @stack_fold_palignr_mask(<32 x i8> %a0, <32 x i8> %a1, <32 x i8>* %passthru, i32 %mask) {
- ;CHECK-LABEL: stack_fold_palignr_mask
- ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsw
+ ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_psubsw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsw_ymm
+ ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusb
+ ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <32 x i8> @stack_fold_psubusb_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusb_ymm
+ ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusw
+ ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_psubusw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusw_ymm
+ ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubw
+ ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define <16 x i16> @stack_fold_psubw_ymm(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubw_ymm
+ ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = sub <16 x i16> %a0, %a1
+ ret <16 x i16> %2
+}
+
+define <16 x i8> @stack_fold_punpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhbw
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @stack_fold_punpckhbw_mask(<16 x i8>* %passthru, <16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_mask
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ %3 = bitcast i16 %mask to <16 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
+ %4 = load <16 x i8>, <16 x i8>* %passthru
+ %5 = select <16 x i1> %3, <16 x i8> %2, <16 x i8> %4
+ ret <16 x i8> %5
+}
+
+define <16 x i8> @stack_fold_punpckhbw_maskz(<16 x i8> %passthru, <16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_maskz
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ %3 = bitcast i16 %mask to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i8> %2, <16 x i8> zeroinitializer
+ ret <16 x i8> %4
+}
+
+define <32 x i8> @stack_fold_punpckhbw_ymm(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_ymm
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
+ ret <32 x i8> %2
+}
+
+define <32 x i8> @stack_fold_punpckhbw_mask_ymm(<32 x i8>* %passthru, <32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_mask_ymm
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
%3 = bitcast i32 %mask to <32 x i1>
+ ; load needed to keep the operation from being scheduled about the asm block
%4 = load <32 x i8>, <32 x i8>* %passthru
%5 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> %4
ret <32 x i8> %5
}
-define <32 x i8> @stack_fold_palignr_maskz(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
- ;CHECK-LABEL: stack_fold_palignr_maskz
- ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
- %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
- %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+define <32 x i8> @stack_fold_punpckhbw_maskz_ymm(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
+ ;CHECK-LABEL: stack_fold_punpckhbw_maskz_ymm
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
%3 = bitcast i32 %mask to <32 x i1>
%4 = select <32 x i1> %3, <32 x i8> %2, <32 x i8> zeroinitializer
ret <32 x i8> %4
diff --git a/test/CodeGen/X86/stack-folding-int-sse42.ll b/test/CodeGen/X86/stack-folding-int-sse42.ll
index a839a315e766..5c6f697610a0 100644
--- a/test/CodeGen/X86/stack-folding-int-sse42.ll
+++ b/test/CodeGen/X86/stack-folding-int-sse42.ll
@@ -318,7 +318,7 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @stack_fold_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %c) {
;CHECK-LABEL: stack_fold_pblendvb
- ;CHECK: pblendvb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ ;CHECK: pblendvb %xmm0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a1, <16 x i8> %c, <16 x i8> %a0)
ret <16 x i8> %2
diff --git a/test/CodeGen/X86/stack-folding-sha.ll b/test/CodeGen/X86/stack-folding-sha.ll
new file mode 100644
index 000000000000..768c8a0f5e77
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-sha.ll
@@ -0,0 +1,72 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+sha < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <4 x i32> @stack_fold_sha1msg1(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_sha1msg1
+ ;CHECK: sha1msg1 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha1msg1(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_sha1msg2(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_sha1msg2
+ ;CHECK: sha1msg2 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha1msg2(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_sha1nexte(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_sha1nexte
+ ;CHECK: sha1nexte {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha1nexte(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_sha1rnds4(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_sha1rnds4
+ ;CHECK: sha1rnds4 $3, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, i8 3)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha1rnds4(<4 x i32>, <4 x i32>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_sha256msg1(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_sha256msg1
+ ;CHECK: sha256msg1 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha256msg1(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_sha256msg2(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_sha256msg2
+ ;CHECK: sha256msg2 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha256msg2(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_sha256rnds2
+ ;CHECK: sha256rnds2 {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sha256rnds2(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
diff --git a/test/CodeGen/X86/stack-folding-tbm.ll b/test/CodeGen/X86/stack-folding-tbm.ll
new file mode 100644
index 000000000000..fe3c828a69b0
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-tbm.ll
@@ -0,0 +1,201 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi,+tbm < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define i32 @stack_fold_bextri_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_bextri_u32
+ ;CHECK: # BB#0:
+ ;CHECK: bextr $2814, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %a0, i32 2814)
+ ret i32 %2
+}
+declare i32 @llvm.x86.tbm.bextri.u32(i32, i32)
+
+define i64 @stack_fold_bextri_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_bextri_u64
+ ;CHECK: # BB#0:
+ ;CHECK: bextr $2814, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %a0, i64 2814)
+ ret i64 %2
+}
+declare i64 @llvm.x86.tbm.bextri.u64(i64, i64)
+
+define i32 @stack_fold_blcfill_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blcfill_u32
+ ;CHECK: blcfill {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i32 %a0, 1
+ %3 = and i32 %a0, %2
+ ret i32 %3
+}
+
+define i64 @stack_fold_blcfill_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blcfill_u64
+ ;CHECK: blcfill {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i64 %a0, 1
+ %3 = and i64 %a0, %2
+ ret i64 %3
+}
+
+define i32 @stack_fold_blci_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blci_u32
+ ;CHECK: blci {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i32 %a0, 1
+ %3 = xor i32 %2, -1
+ %4 = or i32 %a0, %3
+ ret i32 %4
+}
+
+define i64 @stack_fold_blci_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blci_u64
+ ;CHECK: blci {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i64 %a0, 1
+ %3 = xor i64 %2, -1
+ %4 = or i64 %a0, %3
+ ret i64 %4
+}
+
+define i32 @stack_fold_blcic_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blcic_u32
+ ;CHECK: blcic {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i32 %a0, 1
+ %3 = xor i32 %a0, -1
+ %4 = and i32 %2, %3
+ ret i32 %4
+}
+
+define i64 @stack_fold_blcic_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blcic_u64
+ ;CHECK: blcic {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i64 %a0, 1
+ %3 = xor i64 %a0, -1
+ %4 = and i64 %2, %3
+ ret i64 %4
+}
+
+define i32 @stack_fold_blcmsk_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blcmsk_u32
+ ;CHECK: blcmsk {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i32 %a0, 1
+ %3 = xor i32 %a0, %2
+ ret i32 %3
+}
+
+define i64 @stack_fold_blcmsk_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blcmsk_u64
+ ;CHECK: blcmsk {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i64 %a0, 1
+ %3 = xor i64 %a0, %2
+ ret i64 %3
+}
+
+define i32 @stack_fold_blcs_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blcs_u32
+ ;CHECK: blcs {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i32 %a0, 1
+ %3 = or i32 %a0, %2
+ ret i32 %3
+}
+
+define i64 @stack_fold_blcs_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blcs_u64
+ ;CHECK: blcs {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i64 %a0, 1
+ %3 = or i64 %a0, %2
+ ret i64 %3
+}
+
+define i32 @stack_fold_blsfill_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blsfill_u32
+ ;CHECK: blsfill {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i32 %a0, 1
+ %3 = or i32 %a0, %2
+ ret i32 %3
+}
+
+define i64 @stack_fold_blsfill_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blsfill_u64
+ ;CHECK: blsfill {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i64 %a0, 1
+ %3 = or i64 %a0, %2
+ ret i64 %3
+}
+
+define i32 @stack_fold_blsic_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_blsic_u32
+ ;CHECK: blsic {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i32 %a0, 1
+ %3 = xor i32 %a0, -1
+ %4 = or i32 %2, %3
+ ret i32 %4
+}
+
+define i64 @stack_fold_blsic_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_blsic_u64
+ ;CHECK: blsic {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i64 %a0, 1
+ %3 = xor i64 %a0, -1
+ %4 = or i64 %2, %3
+ ret i64 %4
+}
+
+define i32 @stack_fold_t1mskc_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_t1mskc_u32
+ ;CHECK: t1mskc {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i32 %a0, 1
+ %3 = xor i32 %a0, -1
+ %4 = or i32 %2, %3
+ ret i32 %4
+}
+
+define i64 @stack_fold_t1mskc_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_t1mskc_u64
+ ;CHECK: t1mskc {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = add i64 %a0, 1
+ %3 = xor i64 %a0, -1
+ %4 = or i64 %2, %3
+ ret i64 %4
+}
+
+define i32 @stack_fold_tzmsk_u32(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_tzmsk_u32
+ ;CHECK: tzmsk {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i32 %a0, 1
+ %3 = xor i32 %a0, -1
+ %4 = and i32 %2, %3
+ ret i32 %4
+}
+
+define i64 @stack_fold_tzmsk_u64(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_tzmsk_u64
+ ;CHECK: tzmsk {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sub i64 %a0, 1
+ %3 = xor i64 %a0, -1
+ %4 = and i64 %2, %3
+ ret i64 %4
+}
diff --git a/test/CodeGen/X86/stack-protector-remarks.ll b/test/CodeGen/X86/stack-protector-remarks.ll
new file mode 100644
index 000000000000..3792bef2742b
--- /dev/null
+++ b/test/CodeGen/X86/stack-protector-remarks.ll
@@ -0,0 +1,103 @@
+; RUN: llc %s -mtriple=x86_64-unknown-unknown -pass-remarks=stack-protector -o /dev/null 2>&1 | FileCheck %s
+; CHECK-NOT: nossp
+; CHECK: function attribute_ssp
+; CHECK-SAME: a function attribute or command-line switch
+; CHECK-NOT: alloca_fixed_small_nossp
+; CHECK: function alloca_fixed_small_ssp
+; CHECK-SAME: a call to alloca or use of a variable length array
+; CHECK: function alloca_fixed_large_ssp
+; CHECK-SAME: a call to alloca or use of a variable length array
+; CHECK: function alloca_variable_ssp
+; CHECK-SAME: a call to alloca or use of a variable length array
+; CHECK: function buffer_ssp
+; CHECK-SAME: a stack allocated buffer or struct containing a buffer
+; CHECK: function struct_ssp
+; CHECK-SAME: a stack allocated buffer or struct containing a buffer
+; CHECK: function address_ssp
+; CHECK-SAME: the address of a local variable being taken
+; CHECK: function multiple_ssp
+; CHECK-SAME: a function attribute or command-line switch
+; CHECK: function multiple_ssp
+; CHECK-SAME: a stack allocated buffer or struct containing a buffer
+; CHECK: function multiple_ssp
+; CHECK-SAME: a stack allocated buffer or struct containing a buffer
+; CHECK: function multiple_ssp
+; CHECK-SAME: the address of a local variable being taken
+; CHECK: function multiple_ssp
+; CHECK-SAME: a call to alloca or use of a variable length array
+
+; Check that no remark is emitted when the switch is not specified.
+; RUN: llc %s -mtriple=x86_64-unknown-unknown -o /dev/null 2>&1 | FileCheck %s -check-prefix=NOREMARK -allow-empty
+; NOREMARK-NOT: ssp
+
+; RUN: llc %s -mtriple=x86_64-unknown-unknown -o /dev/null -pass-remarks-output=%t.yaml
+; RUN: cat %t.yaml | FileCheck %s -check-prefix=YAML
+; YAML: --- !Passed
+; YAML-NEXT: Pass: stack-protector
+; YAML-NEXT: Name: StackProtectorRequested
+; YAML-NEXT: Function: attribute_ssp
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'Stack protection applied to function '
+; YAML-NEXT: - Function: attribute_ssp
+; YAML-NEXT: - String: ' due to a function attribute or command-line switch'
+; YAML-NEXT: ...
+
+define void @nossp() ssp {
+ ret void
+}
+
+define void @attribute_ssp() sspreq {
+ ret void
+}
+
+define void @alloca_fixed_small_nossp() ssp {
+ %1 = alloca i8, i64 2, align 16
+ ret void
+}
+
+define void @alloca_fixed_small_ssp() sspstrong {
+ %1 = alloca i8, i64 2, align 16
+ ret void
+}
+
+define void @alloca_fixed_large_ssp() ssp {
+ %1 = alloca i8, i64 64, align 16
+ ret void
+}
+
+define void @alloca_variable_ssp(i64 %x) ssp {
+ %1 = alloca i8, i64 %x, align 16
+ ret void
+}
+
+define void @buffer_ssp() sspstrong {
+ %x = alloca [64 x i32], align 16
+ ret void
+}
+
+%struct.X = type { [64 x i32] }
+define void @struct_ssp() sspstrong {
+ %x = alloca %struct.X, align 4
+ ret void
+}
+
+define void @address_ssp() sspstrong {
+entry:
+ %x = alloca i32, align 4
+ %y = alloca i32*, align 8
+ store i32 32, i32* %x, align 4
+ store i32* %x, i32** %y, align 8
+ ret void
+}
+
+define void @multiple_ssp() sspreq {
+entry:
+ %x = alloca %struct.X, align 4
+ %y = alloca [64 x i32], align 16
+ %a = alloca i32, align 4
+ %b = alloca i32*, align 8
+ %0 = alloca i8, i64 2, align 16
+ store i32 32, i32* %a, align 4
+ store i32* %a, i32** %b, align 8
+ ret void
+}
diff --git a/test/CodeGen/X86/stack-protector-target.ll b/test/CodeGen/X86/stack-protector-target.ll
index 66e45055b2b5..fc5a18d79d4b 100644
--- a/test/CodeGen/X86/stack-protector-target.ll
+++ b/test/CodeGen/X86/stack-protector-target.ll
@@ -1,10 +1,17 @@
; Test target-specific stack cookie location.
-; RUN: llc -mtriple=i386-linux < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
-; RUN: llc -mtriple=x86_64-linux < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
-; RUN: llc -mtriple=i386-linux-android < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
-; RUN: llc -mtriple=x86_64-linux-android < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
-; RUN: llc -mtriple=i386-kfreebsd < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
-; RUN: llc -mtriple=x86_64-kfreebsd < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
+; RUN: llc -mtriple=i386-linux < %s -o - | FileCheck --check-prefix=I386-TLS %s
+; RUN: llc -mtriple=x86_64-linux < %s -o - | FileCheck --check-prefix=X64-TLS %s
+
+; RUN: llc -mtriple=i386-linux-android < %s -o - | FileCheck --check-prefix=I386 %s
+; RUN: llc -mtriple=i386-linux-android16 < %s -o - | FileCheck --check-prefix=I386 %s
+; RUN: llc -mtriple=i386-linux-android17 < %s -o - | FileCheck --check-prefix=I386-TLS %s
+; RUN: llc -mtriple=i386-linux-android24 < %s -o - | FileCheck --check-prefix=I386-TLS %s
+; RUN: llc -mtriple=x86_64-linux-android < %s -o - | FileCheck --check-prefix=X64-TLS %s
+; RUN: llc -mtriple=x86_64-linux-android17 < %s -o - | FileCheck --check-prefix=X64-TLS %s
+; RUN: llc -mtriple=x86_64-linux-android24 < %s -o - | FileCheck --check-prefix=X64-TLS %s
+
+; RUN: llc -mtriple=i386-kfreebsd < %s -o - | FileCheck --check-prefix=I386-TLS %s
+; RUN: llc -mtriple=x86_64-kfreebsd < %s -o - | FileCheck --check-prefix=X64-TLS %s
define void @_Z1fv() sspreq {
entry:
@@ -16,12 +23,17 @@ entry:
declare void @_Z7CapturePi(i32*)
-; LINUX-X64: movq %fs:40, %[[B:.*]]
-; LINUX-X64: movq %[[B]], 16(%rsp)
-; LINUX-X64: movq %fs:40, %[[C:.*]]
-; LINUX-X64: cmpq 16(%rsp), %[[C]]
+; X64-TLS: movq %fs:40, %[[B:.*]]
+; X64-TLS: movq %[[B]], 16(%rsp)
+; X64-TLS: movq %fs:40, %[[C:.*]]
+; X64-TLS: cmpq 16(%rsp), %[[C]]
+
+; I386: movl __stack_chk_guard, %[[B:.*]]
+; I386: movl %[[B]], 8(%esp)
+; I386: movl __stack_chk_guard, %[[C:.*]]
+; I386: cmpl 8(%esp), %[[C]]
-; LINUX-I386: movl %gs:20, %[[B:.*]]
-; LINUX-I386: movl %[[B]], 8(%esp)
-; LINUX-I386: movl %gs:20, %[[C:.*]]
-; LINUX-I386: cmpl 8(%esp), %[[C]]
+; I386-TLS: movl %gs:20, %[[B:.*]]
+; I386-TLS: movl %[[B]], 8(%esp)
+; I386-TLS: movl %gs:20, %[[C:.*]]
+; I386-TLS: cmpl 8(%esp), %[[C]]
diff --git a/test/CodeGen/X86/stack-protector-weight.ll b/test/CodeGen/X86/stack-protector-weight.ll
index 58c6c713941d..d5a65ffb890b 100644
--- a/test/CodeGen/X86/stack-protector-weight.ll
+++ b/test/CodeGen/X86/stack-protector-weight.ll
@@ -31,20 +31,20 @@ define i32 @test_branch_weights(i32 %n) #0 {
entry:
%a = alloca [128 x i32], align 16
%0 = bitcast [128 x i32]* %a to i8*
- call void @llvm.lifetime.start(i64 512, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %0)
%arraydecay = getelementptr inbounds [128 x i32], [128 x i32]* %a, i64 0, i64 0
call void @foo2(i32* %arraydecay)
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %a, i64 0, i64 %idxprom
%1 = load i32, i32* %arrayidx, align 4
- call void @llvm.lifetime.end(i64 512, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %0)
ret i32 %1
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo2(i32*)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { sspstrong "stack-protector-buffer-size"="8" }
diff --git a/test/CodeGen/X86/stack_guard_remat.ll b/test/CodeGen/X86/stack_guard_remat.ll
index d38c68a8a5bb..cc3cd6b0801a 100644
--- a/test/CodeGen/X86/stack_guard_remat.ll
+++ b/test/CodeGen/X86/stack_guard_remat.ll
@@ -9,20 +9,20 @@ define i32 @test_stack_guard_remat() #0 {
entry:
%a1 = alloca [256 x i32], align 16
%0 = bitcast [256 x i32]* %a1 to i8*
- call void @llvm.lifetime.start(i64 1024, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %0)
%arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
call void asm sideeffect "foo2", "~{r12},~{r13},~{r14},~{r15},~{ebx},~{esi},~{edi},~{dirflag},~{fpsr},~{flags}"()
- call void @llvm.lifetime.end(i64 1024, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %0)
ret i32 0
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/X86/stores-merging.ll b/test/CodeGen/X86/stores-merging.ll
index 9e479bd71b98..dbfb06881d82 100644
--- a/test/CodeGen/X86/stores-merging.ll
+++ b/test/CodeGen/X86/stores-merging.ll
@@ -13,9 +13,9 @@ target triple = "x86_64-unknown-linux-gnu"
;; the same result in memory in the end.
; CHECK-LABEL: redundant_stores_merging:
-; CHECK: movl $123, e+8(%rip)
-; CHECK: movabsq $1958505086977, %rax
+; CHECK: movabsq $528280977409, %rax
; CHECK: movq %rax, e+4(%rip)
+; CHECK: movl $456, e+8(%rip)
define void @redundant_stores_merging() {
entry:
store i32 1, i32* getelementptr inbounds (%structTy, %structTy* @e, i64 0, i32 1), align 4
@@ -26,9 +26,9 @@ entry:
;; This variant tests PR25154.
; CHECK-LABEL: redundant_stores_merging_reverse:
-; CHECK: movl $123, e+8(%rip)
-; CHECK: movabsq $1958505086977, %rax
+; CHECK: movabsq $528280977409, %rax
; CHECK: movq %rax, e+4(%rip)
+; CHECK: movl $456, e+8(%rip)
define void @redundant_stores_merging_reverse() {
entry:
store i32 123, i32* getelementptr inbounds (%structTy, %structTy* @e, i64 0, i32 2), align 4
@@ -45,9 +45,8 @@ entry:
;; a movl, after the store to 3).
;; CHECK-LABEL: overlapping_stores_merging:
-;; CHECK: movw $0, b+2(%rip)
+;; CHECK: movl $1, b(%rip)
;; CHECK: movw $2, b+3(%rip)
-;; CHECK: movw $1, b(%rip)
define void @overlapping_stores_merging() {
entry:
store i16 0, i16* bitcast (i8* getelementptr inbounds ([8 x i8], [8 x i8]* @b, i64 0, i64 2) to i16*), align 2
diff --git a/test/CodeGen/X86/subvector-broadcast.ll b/test/CodeGen/X86/subvector-broadcast.ll
index 7aa3f393bbed..94d3b22a4c80 100644
--- a/test/CodeGen/X86/subvector-broadcast.ll
+++ b/test/CodeGen/X86/subvector-broadcast.ll
@@ -24,13 +24,13 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-AVX512F-LABEL: test_broadcast_2f64_4f64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512F-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2f64_4f64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512BW-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2f64_4f64:
@@ -46,12 +46,12 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
;
; X64-AVX512F-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512F-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512BW-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2f64_4f64:
@@ -153,13 +153,13 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-AVX512F-LABEL: test_broadcast_2i64_4i64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2i64_4i64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2i64_4i64:
@@ -175,12 +175,12 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
;
; X64-AVX512F-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2i64_4i64:
@@ -286,27 +286,16 @@ define <8 x i64> @test_broadcast_4i64_8i64(<4 x i64> *%p) nounwind {
}
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
-; X32-AVX-LABEL: test_broadcast_4f32_8f32:
-; X32-AVX: ## BB#0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-AVX-NEXT: retl
-;
-; X32-AVX512-LABEL: test_broadcast_4f32_8f32:
-; X32-AVX512: ## BB#0:
-; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
-; X32-AVX512-NEXT: retl
-;
-; X64-AVX-LABEL: test_broadcast_4f32_8f32:
-; X64-AVX: ## BB#0:
-; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X64-AVX-NEXT: retq
+; X32-LABEL: test_broadcast_4f32_8f32:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: retl
;
-; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
-; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
-; X64-AVX512-NEXT: retq
+; X64-LABEL: test_broadcast_4f32_8f32:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
ret <8 x float> %2
@@ -402,7 +391,7 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-AVX512-LABEL: test_broadcast_4i32_8i32:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i32_8i32:
@@ -412,7 +401,7 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
;
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -522,7 +511,7 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-AVX512-LABEL: test_broadcast_8i16_16i16:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_8i16_16i16:
@@ -532,7 +521,7 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
;
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -557,7 +546,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X32-AVX512F-LABEL: test_broadcast_8i16_32i16:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
@@ -570,7 +559,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X32-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
@@ -588,7 +577,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
;
; X64-AVX512F-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
@@ -599,7 +588,7 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
;
; X64-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -672,7 +661,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-AVX512-LABEL: test_broadcast_16i8_32i8:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_16i8_32i8:
@@ -682,7 +671,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
;
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -707,7 +696,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X32-AVX512F-LABEL: test_broadcast_16i8_64i8:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
@@ -720,7 +709,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X32-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X32-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
@@ -738,7 +727,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
;
; X64-AVX512F-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512F-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
@@ -749,7 +738,7 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
;
; X64-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; X64-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
@@ -1225,8 +1214,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512F-NEXT: vmovdqa (%ecx), %xmm0
; X32-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX512F-NEXT: vmovdqa %xmm1, (%eax)
-; X32-AVX512F-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
-; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X32-AVX512F-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4i32_16i32_chain:
@@ -1236,8 +1224,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512BW-NEXT: vmovdqa (%ecx), %xmm0
; X32-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-AVX512BW-NEXT: vmovdqa %xmm1, (%eax)
-; X32-AVX512BW-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
-; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X32-AVX512BW-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4i32_16i32_chain:
@@ -1247,8 +1234,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X32-AVX512DQ-NEXT: vmovdqa (%ecx), %xmm0
; X32-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX512DQ-NEXT: vmovaps %xmm1, (%eax)
-; X32-AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
-; X32-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
+; X32-AVX512DQ-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i32_16i32_chain:
@@ -1265,8 +1251,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512F-NEXT: vmovdqa %xmm1, (%rsi)
-; X64-AVX512F-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
-; X64-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X64-AVX512F-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4i32_16i32_chain:
@@ -1274,8 +1259,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX512BW-NEXT: vmovdqa %xmm1, (%rsi)
-; X64-AVX512BW-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
-; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X64-AVX512BW-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4i32_16i32_chain:
@@ -1283,8 +1267,7 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
; X64-AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512DQ-NEXT: vmovaps %xmm1, (%rsi)
-; X64-AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %zmm0, %zmm0
-; X64-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
+; X64-AVX512DQ-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %p0
store <4 x float> zeroinitializer, <4 x float>* %p1
@@ -1304,7 +1287,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
; X32-AVX1: ## BB#0: ## %entry
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [1,0,2,0,3,0,4,0]
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} ymm4 = [1,0,2,0,3,0,4,0]
; X32-AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
; X32-AVX1-NEXT: vpaddq %xmm5, %xmm3, %xmm3
; X32-AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
@@ -1347,6 +1330,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm1
; X32-AVX512-NEXT: vmovdqu %ymm0, _ga4
; X32-AVX512-NEXT: vmovdqu64 %zmm1, _gb4
+; X32-AVX512-NEXT: vzeroupper
; X32-AVX512-NEXT: retl
;
; X64-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
@@ -1397,6 +1381,7 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X64-AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm1
; X64-AVX512-NEXT: vmovdqu %ymm0, {{.*}}(%rip)
; X64-AVX512-NEXT: vmovdqu64 %zmm1, {{.*}}(%rip)
+; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
entry:
%0 = add <4 x i64> %a, <i64 1, i64 2, i64 3, i64 4>
@@ -1435,6 +1420,7 @@ define void @fallback_broadcast_v4f64_to_v8f64(<4 x double> %a, <8 x double> %b)
; X32-AVX512-NEXT: vdivpd %zmm2, %zmm1, %zmm1
; X32-AVX512-NEXT: vmovupd %ymm0, _ga2
; X32-AVX512-NEXT: vmovupd %zmm1, _gb2
+; X32-AVX512-NEXT: vzeroupper
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: fallback_broadcast_v4f64_to_v8f64:
@@ -1460,6 +1446,7 @@ define void @fallback_broadcast_v4f64_to_v8f64(<4 x double> %a, <8 x double> %b)
; X64-AVX512-NEXT: vdivpd %zmm2, %zmm1, %zmm1
; X64-AVX512-NEXT: vmovupd %ymm0, {{.*}}(%rip)
; X64-AVX512-NEXT: vmovupd %zmm1, {{.*}}(%rip)
+; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
entry:
%0 = fadd <4 x double> %a, <double 1.0, double 2.0, double 3.0, double 4.0>
diff --git a/test/CodeGen/X86/swifterror.ll b/test/CodeGen/X86/swifterror.ll
index 86e0221c2015..5704d1919988 100644
--- a/test/CodeGen/X86/swifterror.ll
+++ b/test/CodeGen/X86/swifterror.ll
@@ -685,3 +685,30 @@ entry:
tail call void @acallee(i8* null)
ret void
}
+
+; Make sure we don't crash on this function during -O0.
+; We used to crash because we would insert an IMPLICIT_DEF for the swifterror at
+; beginning of the machine basic block but did not inform FastISel of the
+; inserted instruction. When computing the InsertPoint in the entry block
+; FastISel would choose an insertion point before the IMPLICIT_DEF causing a
+; crash later on.
+declare hidden swiftcc i8* @testFunA()
+
+%TSb = type <{ i1 }>
+
+define swiftcc void @dontCrash() {
+entry:
+ %swifterror = alloca swifterror %swift_error*, align 8
+ store %swift_error* null, %swift_error** %swifterror, align 8
+ %a = call i8* @testFunA()
+ %b = bitcast i8* %a to %TSb*
+ %._value = getelementptr inbounds %TSb, %TSb* %b, i32 0, i32 0
+ %c = load i1, i1* %._value, align 1
+ br i1 %c, label %trueBB, label %falseBB
+
+trueBB:
+ ret void
+
+falseBB:
+ ret void
+}
diff --git a/test/CodeGen/X86/tail-call-conditional.mir b/test/CodeGen/X86/tail-call-conditional.mir
new file mode 100644
index 000000000000..e006138ba848
--- /dev/null
+++ b/test/CodeGen/X86/tail-call-conditional.mir
@@ -0,0 +1,85 @@
+# RUN: llc -mtriple x86_64-- -verify-machineinstrs -run-pass branch-folder -o - %s | FileCheck %s
+
+# Check the TCRETURNdi64cc optimization.
+
+--- |
+ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+ define i64 @test(i64 %arg, i8* %arg1) optsize {
+ %tmp = icmp ult i64 %arg, 100
+ br i1 %tmp, label %1, label %4
+
+ %tmp3 = icmp ult i64 %arg, 10
+ br i1 %tmp3, label %2, label %3
+
+ %tmp5 = tail call i64 @f1(i8* %arg1, i64 %arg)
+ ret i64 %tmp5
+
+ %tmp7 = tail call i64 @f2(i8* %arg1, i64 %arg)
+ ret i64 %tmp7
+
+ ret i64 123
+ }
+
+ declare i64 @f1(i8*, i64)
+ declare i64 @f2(i8*, i64)
+
+...
+---
+name: test
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rsi' }
+body: |
+ bb.0:
+ successors: %bb.1, %bb.4
+ liveins: %rdi, %rsi
+
+ %rax = COPY %rdi
+ CMP64ri8 %rax, 99, implicit-def %eflags
+ JA_1 %bb.4, implicit %eflags
+ JMP_1 %bb.1
+
+ ; CHECK: bb.1:
+ ; CHECK-NEXT: successors: %bb.2({{[^)]+}}){{$}}
+ ; CHECK-NEXT: liveins: %rax, %rsi
+ ; CHECK-NEXT: {{^ $}}
+ ; CHECK-NEXT: %rdi = COPY %rsi
+ ; CHECK-NEXT: %rsi = COPY %rax
+ ; CHECK-NEXT: CMP64ri8 %rax, 9, implicit-def %eflags
+ ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit %rsp, implicit %eflags, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rax, implicit-def %rax, implicit %sil, implicit-def %sil, implicit %si, implicit-def %si, implicit %esi, implicit-def %esi, implicit %rsi, implicit-def %rsi, implicit %dil, implicit-def %dil, implicit %di, implicit-def %di, implicit %edi, implicit-def %edi, implicit %rdi, implicit-def %rdi, implicit %ah, implicit-def %ah, implicit %al, implicit-def %al, implicit %ax, implicit-def %ax, implicit %eax, implicit-def %eax
+
+ bb.1:
+ successors: %bb.2, %bb.3
+ liveins: %rax, %rsi
+
+ CMP64ri8 %rax, 9, implicit-def %eflags
+ JA_1 %bb.3, implicit %eflags
+ JMP_1 %bb.2
+
+ bb.2:
+ liveins: %rax, %rsi
+
+ %rdi = COPY %rsi
+ %rsi = COPY %rax
+
+ TCRETURNdi64 @f1, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+
+ ; CHECK: bb.2:
+ ; CHECK-NEXT: liveins: %rax, %rdi, %rsi
+ ; CHECK-NEXT: {{^ $}}
+ ; CHECK-NEXT: TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+
+ bb.3:
+ liveins: %rax, %rsi
+
+ %rdi = COPY %rsi
+ %rsi = COPY %rax
+ TCRETURNdi64 @f2, 0, csr_64, implicit %rsp, implicit %rdi, implicit %rsi
+
+ bb.4:
+ dead %eax = MOV32ri64 123, implicit-def %rax
+ RET 0, %rax
+
+...
diff --git a/test/CodeGen/X86/tail-dup-debugloc.ll b/test/CodeGen/X86/tail-dup-debugloc.ll
new file mode 100644
index 000000000000..c5ca6fc5750c
--- /dev/null
+++ b/test/CodeGen/X86/tail-dup-debugloc.ll
@@ -0,0 +1,56 @@
+; RUN: llc -stop-after=tailduplication -march=x86-64 < %s | FileCheck %s
+;
+; Check that DebugLoc attached to the branch instruction of
+; 'while.cond1.preheader.lr.ph' survives after tailduplication pass.
+;
+; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 9, column: 5, scope: !{{[0-9]+}})
+; CHECK: [[VREG:%[^ ]+]] = COPY %rdi
+; CHECK: TEST64rr [[VREG]], [[VREG]]
+; CHECK-NEXT: JE_1 {{.+}}, debug-location [[DLOC]]
+; CHECK-NEXT: JMP_1 {{.+}}, debug-location [[DLOC]]
+
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.Node = type { %struct.Node* }
+
+define i32 @foo(%struct.Node* readonly %node, %struct.Node* readnone %root) !dbg !6 {
+entry:
+ %cmp = icmp eq %struct.Node* %node, %root, !dbg !8
+ br i1 %cmp, label %while.end4, label %while.cond1.preheader.lr.ph, !dbg !10
+
+while.cond1.preheader.lr.ph: ; preds = %entry
+ %tobool = icmp eq %struct.Node* %node, null
+ br i1 %tobool, label %while.cond1.preheader.us.preheader, label %while.body2.preheader, !dbg !11
+
+while.body2.preheader: ; preds = %while.cond1.preheader.lr.ph
+ br label %while.body2, !dbg !11
+
+while.cond1.preheader.us.preheader: ; preds = %while.cond1.preheader.lr.ph
+ br label %while.cond1.preheader.us, !dbg !10
+
+while.cond1.preheader.us: ; preds = %while.cond1.preheader.us.preheader, %while.cond1.preheader.us
+ br label %while.cond1.preheader.us, !dbg !10
+
+while.body2: ; preds = %while.body2.preheader, %while.body2
+ br label %while.body2, !dbg !11
+
+while.end4: ; preds = %entry
+ ret i32 0, !dbg !12
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, emissionKind: LineTablesOnly)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{}
+!6 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 5, type: !7, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !2)
+!8 = !DILocation(line: 7, column: 15, scope: !9)
+!9 = !DILexicalBlockFile(scope: !6, file: !1, discriminator: 2)
+!10 = !DILocation(line: 7, column: 3, scope: !9)
+!11 = !DILocation(line: 9, column: 5, scope: !9)
+!12 = !DILocation(line: 14, column: 3, scope: !6)
diff --git a/test/CodeGen/X86/tail-dup-no-other-successor.ll b/test/CodeGen/X86/tail-dup-no-other-successor.ll
new file mode 100644
index 000000000000..6fa6f94e6530
--- /dev/null
+++ b/test/CodeGen/X86/tail-dup-no-other-successor.ll
@@ -0,0 +1,53 @@
+; RUN: llc -O3 -o - %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @effect(i32);
+
+; After the loop gets laid out, loop.end is the only successor, but can't be
+; laid out because of the CFG dependency from top.fakephi. The calculations show
+; that it isn't profitable to tail-duplicate in this case, because of the
+; effects on fallthrough from %loop.end
+; CHECK-LABEL: {{^}}no_successor_still_no_taildup:
+; CHECK: %entry
+; CHECK: %loop.top
+; CHECK: %loop.latch
+; CHECK: %top.fakephi
+; CHECK: %loop.end
+; CHECK: %false
+; CHECK: %ret
+define void @no_successor_still_no_taildup (i32 %count, i32 %key) {
+entry:
+ br label %loop.top
+
+loop.top:
+ %i.loop.top = phi i32 [ %count, %entry ], [ %i.latch, %loop.latch ]
+ %cmp.top = icmp eq i32 %i.loop.top, %key
+ call void @effect(i32 0)
+ br i1 %cmp.top, label %top.fakephi, label %loop.latch, !prof !1
+
+loop.latch:
+ %i.latch = sub i32 %i.loop.top, 1
+ %cmp.latch = icmp eq i32 %i.latch, 0
+ call void @effect(i32 1)
+ br i1 %cmp.top, label %loop.top, label %loop.end, !prof !2
+
+top.fakephi:
+ call void @effect(i32 2)
+ br label %loop.end
+
+loop.end:
+ %cmp.end = icmp eq i32 %count, 0
+ br i1 %cmp.end, label %ret, label %false, !prof !3
+
+false:
+ call void @effect(i32 4)
+ br label %ret
+
+ret:
+ ret void
+}
+
+!1 = !{!"branch_weights", i32 1, i32 1}
+!2 = !{!"branch_weights", i32 5, i32 1}
+!3 = !{!"branch_weights", i32 1, i32 2}
diff --git a/test/CodeGen/X86/tail-dup-repeat.ll b/test/CodeGen/X86/tail-dup-repeat.ll
index 21b48e16efb9..7d9c0908e571 100644
--- a/test/CodeGen/X86/tail-dup-repeat.ll
+++ b/test/CodeGen/X86/tail-dup-repeat.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O2 -tail-dup-placement-threshold=4 -o - %s | FileCheck %s
+; RUN: llc -O3 -tail-dup-placement-threshold=4 -o - %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/CodeGen/X86/tail-merge-debugloc.ll b/test/CodeGen/X86/tail-merge-debugloc.ll
new file mode 100644
index 000000000000..197b0b803257
--- /dev/null
+++ b/test/CodeGen/X86/tail-merge-debugloc.ll
@@ -0,0 +1,42 @@
+; RUN: llc -stop-after=branch-folder < %s | FileCheck %s
+;
+; bb2 and bb3 in the IR below will be tail-merged into a single basic block.
+; As br instructions in bb2 and bb3 have the same debug location, make sure that
+; the branch instruction in the merged basic block still maintains the debug
+; location info.
+;
+; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 2, column: 2, scope: !{{[0-9]+}})
+; CHECK: TEST64rr{{.*}}%rsi, %rsi, implicit-def %eflags
+; CHECK-NEXT: JNE_1{{.*}}, debug-location [[DLOC]]
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @foo(i1 %b, i8* %p) {
+bb1:
+ br i1 %b, label %bb2, label %bb3
+
+bb2:
+ %a1 = icmp eq i8* %p, null
+ br i1 %a1, label %bb4, label %bb5, !dbg !6
+
+bb3:
+ %a2 = icmp eq i8* %p, null
+ br i1 %a2, label %bb4, label %bb5, !dbg !6
+
+bb4:
+ ret i32 1
+
+bb5:
+ ret i32 0
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!5 = distinct !DILexicalBlock(scope: !4, file: !1, line: 1, column: 1)
+!6 = !DILocation(line: 2, column: 2, scope: !5)
diff --git a/test/CodeGen/X86/tail-merge-identical.ll b/test/CodeGen/X86/tail-merge-identical.ll
new file mode 100644
index 000000000000..024ad582d03f
--- /dev/null
+++ b/test/CodeGen/X86/tail-merge-identical.ll
@@ -0,0 +1,41 @@
+; RUN: llc -o - -verify-machineinstrs %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@data = external global [3 x i32], align 4
+@store = external global i32, align 4
+
+; %else1 and %then2 end up lowering to identical blocks. These blocks should be
+; merged during tail-merging.
+; CHECK-LABEL: merge_identical_blocks
+; CHECK: movl $data+4
+; CHECK-NOT: movl $data+4
+; CHECK: retq
+define void @merge_identical_blocks(i1 %a, i1 %b) {
+entry:
+ br label %if1
+
+if1: ; predfs = %entry
+ br i1 %a, label %else1, label %if2
+
+else1: ; preds = %if1
+ %ptr.else1 = getelementptr inbounds [3 x i32], [3 x i32]* @data, i64 0, i32 1
+ br label %phi_join
+
+if2: ; preds = %if1
+ br i1 %b, label %then2, label %else2
+
+then2: ; preds = %if2
+ %ptr.then2 = getelementptr inbounds [3 x i32], [3 x i32]* @data, i64 0, i32 1
+ br label %phi_join
+
+else2: ; preds = %if2
+ %ptr.else2 = getelementptr inbounds [3 x i32], [3 x i32]* @data, i64 0, i32 2
+ br label %phi_join
+
+phi_join: ; preds = %else1, %then2, %else2
+ %val.ptr = phi i32* [ %ptr.else1, %else1 ], [ %ptr.then2, %then2 ], [ %ptr.else2, %else2 ]
+ %val = load i32, i32* %val.ptr, align 4
+ store i32 %val, i32* @store, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/tail-merge-unreachable.ll b/test/CodeGen/X86/tail-merge-unreachable.ll
index 7b2c0f727215..ce5613f52309 100644
--- a/test/CodeGen/X86/tail-merge-unreachable.ll
+++ b/test/CodeGen/X86/tail-merge-unreachable.ll
@@ -29,6 +29,6 @@ end:
; CHECK: [[JUMP_TABLE_BLOCK]]:
; CHECK: btl
; CHECK: jae [[UNREACHABLE_BLOCK:[.][A-Za-z0-9_]+]]
-; CHECK [[UNREACHABLE_BLOCK]]:
+; CHECK: [[UNREACHABLE_BLOCK]]:
; CHECK: .Lfunc_end0
}
diff --git a/test/CodeGen/X86/tail-opts.ll b/test/CodeGen/X86/tail-opts.ll
index 12c90c1a5fa9..96ff33ff5f7d 100644
--- a/test/CodeGen/X86/tail-opts.ll
+++ b/test/CodeGen/X86/tail-opts.ll
@@ -113,16 +113,15 @@ altret:
; CHECK-NEXT: jbe .LBB2_3
; CHECK-NEXT: ucomiss %xmm{{[0-2]}}, %xmm{{[0-2]}}
; CHECK-NEXT: ja .LBB2_4
-; CHECK-NEXT: jmp .LBB2_2
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: movb $1, %al
+; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_3:
; CHECK-NEXT: ucomiss %xmm{{[0-2]}}, %xmm{{[0-2]}}
; CHECK-NEXT: jbe .LBB2_2
; CHECK-NEXT: .LBB2_4:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB2_2:
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: ret
define i1 @dont_merge_oddly(float* %result) nounwind {
entry:
@@ -299,33 +298,35 @@ declare void @func()
; one - One instruction may be tail-duplicated even with optsize.
; CHECK-LABEL: one:
-; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $0, XYZ(%rip)
+; CHECK: j{{.*}} tail_call_me
+; CHECK: j{{.*}} tail_call_me
@XYZ = external global i32
-define void @one() nounwind optsize {
+declare void @tail_call_me()
+
+define void @one(i32 %v) nounwind optsize {
entry:
- %0 = icmp eq i32 undef, 0
+ %0 = icmp eq i32 %v, 0
br i1 %0, label %bbx, label %bby
bby:
- switch i32 undef, label %bb7 [
+ switch i32 %v, label %bb7 [
i32 16, label %return
]
bb7:
- store volatile i32 0, i32* @XYZ
- unreachable
+ tail call void @tail_call_me()
+ ret void
bbx:
- switch i32 undef, label %bb12 [
+ switch i32 %v, label %bb12 [
i32 128, label %return
]
bb12:
- store volatile i32 0, i32* @XYZ
- unreachable
+ tail call void @tail_call_me()
+ ret void
return:
ret void
@@ -414,9 +415,9 @@ return:
; CHECK-LABEL: two_nosize:
; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $1, XYZ(%rip)
+; CHECK: jmp tail_call_me
; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $1, XYZ(%rip)
+; CHECK: jmp tail_call_me
define void @two_nosize() nounwind {
entry:
@@ -430,8 +431,8 @@ bby:
bb7:
store volatile i32 0, i32* @XYZ
- store volatile i32 1, i32* @XYZ
- unreachable
+ tail call void @tail_call_me()
+ ret void
bbx:
switch i32 undef, label %bb12 [
@@ -440,8 +441,8 @@ bbx:
bb12:
store volatile i32 0, i32* @XYZ
- store volatile i32 1, i32* @XYZ
- unreachable
+ tail call void @tail_call_me()
+ ret void
return:
ret void
@@ -469,3 +470,88 @@ bb.nph: ; preds = %entry
for.end: ; preds = %entry
ret i64 %varx.0
}
+
+; We should tail merge small blocks that don't end in a tail call or return
+; instruction. Those blocks are typically unreachable and will be placed
+; out-of-line after the main return, so we should try to eliminate as many of
+; them as possible.
+
+; CHECK-LABEL: merge_aborts:
+; CHECK-NOT: callq abort
+; CHECK: ret
+; CHECK: callq abort
+; CHECK-NOT: callq abort
+; CHECK: .Lfunc_end{{.*}}:
+
+declare void @abort()
+define void @merge_aborts() {
+entry:
+ %c1 = call i1 @qux()
+ br i1 %c1, label %cont1, label %abort1
+abort1:
+ call void @abort()
+ unreachable
+cont1:
+ %c2 = call i1 @qux()
+ br i1 %c2, label %cont2, label %abort2
+abort2:
+ call void @abort()
+ unreachable
+cont2:
+ %c3 = call i1 @qux()
+ br i1 %c3, label %cont3, label %abort3
+abort3:
+ call void @abort()
+ unreachable
+cont3:
+ %c4 = call i1 @qux()
+ br i1 %c4, label %cont4, label %abort4
+abort4:
+ call void @abort()
+ unreachable
+cont4:
+ ret void
+}
+
+; Use alternating abort functions so that the blocks we wish to merge are not
+; layout successors during branch folding.
+
+; CHECK-LABEL: merge_alternating_aborts:
+; CHECK-NOT: callq abort
+; CHECK: ret
+; CHECK: callq abort
+; CHECK: callq alt_abort
+; CHECK-NOT: callq abort
+; CHECK-NOT: callq alt_abort
+; CHECK: .Lfunc_end{{.*}}:
+
+declare void @alt_abort()
+
+define void @merge_alternating_aborts() {
+entry:
+ %c1 = call i1 @qux()
+ br i1 %c1, label %cont1, label %abort1
+abort1:
+ call void @abort()
+ unreachable
+cont1:
+ %c2 = call i1 @qux()
+ br i1 %c2, label %cont2, label %abort2
+abort2:
+ call void @alt_abort()
+ unreachable
+cont2:
+ %c3 = call i1 @qux()
+ br i1 %c3, label %cont3, label %abort3
+abort3:
+ call void @abort()
+ unreachable
+cont3:
+ %c4 = call i1 @qux()
+ br i1 %c4, label %cont4, label %abort4
+abort4:
+ call void @alt_abort()
+ unreachable
+cont4:
+ ret void
+}
diff --git a/test/CodeGen/X86/twoaddr-coalesce-3.ll b/test/CodeGen/X86/twoaddr-coalesce-3.ll
index 33c9d46f13c3..f5a7326c970c 100644
--- a/test/CodeGen/X86/twoaddr-coalesce-3.ll
+++ b/test/CodeGen/X86/twoaddr-coalesce-3.ll
@@ -19,7 +19,7 @@ for.body.lr.ph: ; preds = %entry
; Check that only one mov will be generated in the kernel loop.
; CHECK-LABEL: foo:
-; CHECK: [[LOOP1:^[a-zA-Z0-9_.]+]]: {{#.*}} %for.body
+; CHECK: [[LOOP1:^[a-zA-Z0-9_.]+]]: {{#.*}} %for.body{{$}}
; CHECK-NOT: mov
; CHECK: movl {{.*}}, [[REG1:%[a-z0-9]+]]
; CHECK-NOT: mov
@@ -56,7 +56,7 @@ for.body.lr.ph: ; preds = %entry
; Check that only two mov will be generated in the kernel loop.
; CHECK-LABEL: goo:
-; CHECK: [[LOOP2:^[a-zA-Z0-9_.]+]]: {{#.*}} %for.body
+; CHECK: [[LOOP2:^[a-zA-Z0-9_.]+]]: {{#.*}} %for.body{{$}}
; CHECK-NOT: mov
; CHECK: movl {{.*}}, [[REG2:%[a-z0-9]+]]
; CHECK-NOT: mov
diff --git a/test/CodeGen/X86/unaligned-32-byte-memops.ll b/test/CodeGen/X86/unaligned-32-byte-memops.ll
index b9deb058cb3f..391f7a38a379 100644
--- a/test/CodeGen/X86/unaligned-32-byte-memops.ll
+++ b/test/CodeGen/X86/unaligned-32-byte-memops.ll
@@ -254,7 +254,7 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x double> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_double:
; AVXSLOW: # BB#0:
-; AVXSLOW-NEXT: vmovupd 144(%rdi), %xmm1
+; AVXSLOW-NEXT: vmovups 144(%rdi), %xmm1
; AVXSLOW-NEXT: vinsertf128 $1, 160(%rdi), %ymm1, %ymm1
; AVXSLOW-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVXSLOW-NEXT: retq
diff --git a/test/CodeGen/X86/unreachableblockelim.ll b/test/CodeGen/X86/unreachableblockelim.ll
index 49a075c32811..adaedb5e8d78 100644
--- a/test/CodeGen/X86/unreachableblockelim.ll
+++ b/test/CodeGen/X86/unreachableblockelim.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @abort()
; CHECK-LABEL: @foo(
-; CHECK-NOT return:
+; CHECK-NOT: return:
define void @foo(i32* %p) {
entry:
%p.addr = alloca i32*, align 8
diff --git a/test/CodeGen/X86/unused_stackslots.ll b/test/CodeGen/X86/unused_stackslots.ll
index 0bb904130f1c..82fd3db1ccb9 100644
--- a/test/CodeGen/X86/unused_stackslots.ll
+++ b/test/CodeGen/X86/unused_stackslots.ll
@@ -24,7 +24,7 @@ define i32 @fn() #0 {
entry:
%n = alloca [8 x [8 x i32]], align 16
%tmp = bitcast [8 x [8 x i32]]* %n to i8*
- call void @llvm.lifetime.start(i64 256, i8* %tmp) #3
+ call void @llvm.lifetime.start.p0i8(i64 256, i8* %tmp) #3
%tmp1 = bitcast [8 x [8 x i32]]* %n to i8*
%arraydecay.1 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %n, i64 0, i64 1, i64 0
%tmp2 = bitcast i32* %arraydecay.1 to i8*
@@ -222,12 +222,12 @@ for.inc73: ; preds = %for.body61.preheade
for.end75: ; preds = %for.inc73
%m.4.lcssa = phi i32 [ %m.4, %for.inc73 ]
- call void @llvm.lifetime.end(i64 256, i8* %tmp) #3
+ call void @llvm.lifetime.end.p0i8(i64 256, i8* %tmp) #3
ret i32 %m.4.lcssa
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @LumaPrediction4x4(i32, i32, i32, i32, i32, i16 signext, i16 signext) #2
@@ -237,7 +237,7 @@ declare i32 @distortion4x4(i32*) #2
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
diff --git a/test/CodeGen/X86/unwindraise.ll b/test/CodeGen/X86/unwindraise.ll
index fb8319b63c2c..db39f4ed4559 100644
--- a/test/CodeGen/X86/unwindraise.ll
+++ b/test/CodeGen/X86/unwindraise.ll
@@ -123,7 +123,7 @@ while.end: ; preds = %if.then4
store i64 %16, i64* %private_2, align 8
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false)
%17 = bitcast %struct._Unwind_FrameState* %fs.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %17)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %17)
%personality.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs.i, i64 0, i32 6
%retaddr_column.i22 = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs.i, i64 0, i32 9
br label %while.body.i
@@ -211,7 +211,7 @@ uw_update_context.exit44: ; preds = %if.then10.i.i.i40,
br label %while.body.i
do.body19: ; preds = %if.then3.i
- call void @llvm.lifetime.end(i64 -1, i8* %17)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %17)
%call20 = call fastcc i64 @uw_install_context_1(%struct._Unwind_Context* %this_context, %struct._Unwind_Context* %cur_context)
%32 = load i8*, i8** %ra.i, align 8
call void @llvm.eh.return.i64(i64 %call20, i8* %32)
@@ -242,6 +242,6 @@ declare void @llvm.eh.return.i64(i64, i8*) nounwind
declare fastcc void @uw_update_context_1(%struct._Unwind_Context*, %struct._Unwind_FrameState* nocapture) uwtable
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/X86/update-terminator-debugloc.ll b/test/CodeGen/X86/update-terminator-debugloc.ll
new file mode 100644
index 000000000000..359c348b42cb
--- /dev/null
+++ b/test/CodeGen/X86/update-terminator-debugloc.ll
@@ -0,0 +1,91 @@
+; RUN: llc -stop-after=machine-sink -march=x86-64 < %s | FileCheck %s
+;
+; test code:
+; 1 extern int bar(int x);
+; 2
+; 3 int foo(int *begin, int *end) {
+; 4 int *i;
+; 5 int ret = 0;
+; 6 for (
+; 7 i = begin ;
+; 8 i != end ;
+; 9 i++)
+; 10 {
+; 11 ret += bar(*i);
+; 12 }
+; 13 return ret;
+; 14 }
+;
+; With the test code, LLVM-IR below shows that loop-control branches have a
+; debug location of line 6 (branches in entry and for.body block). Make sure that
+; these debug locations are propaged correctly to lowered instructions.
+;
+; CHECK: [[DLOC:![0-9]+]] = !DILocation(line: 6
+; CHECK-DAG: [[VREG1:%[^ ]+]] = COPY %rsi
+; CHECK-DAG: [[VREG2:%[^ ]+]] = COPY %rdi
+; CHECK: SUB64rr [[VREG2]], [[VREG1]]
+; CHECK-NEXT: JNE_1 {{.*}}, debug-location [[DLOC]]{{$}}
+; CHECK: [[VREG3:%[^ ]+]] = PHI [[VREG2]]
+; CHECK: [[VREG4:%[^ ]+]] = ADD64ri8 [[VREG3]], 4
+; CHECK: SUB64rr [[VREG1]], [[VREG4]]
+; CHECK-NEXT: JNE_1 {{.*}}, debug-location [[DLOC]]{{$}}
+; CHECK-NEXT: JMP_1 {{.*}}, debug-location [[DLOC]]{{$}}
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @foo(i32* readonly %begin, i32* readnone %end) !dbg !4 {
+entry:
+ %cmp6 = icmp eq i32* %begin, %end, !dbg !9
+ br i1 %cmp6, label %for.end, label %for.body.preheader, !dbg !12
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !13
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %ret.08 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %i.07 = phi i32* [ %incdec.ptr, %for.body ], [ %begin, %for.body.preheader ]
+ %0 = load i32, i32* %i.07, align 4, !dbg !13, !tbaa !15
+ %call = tail call i32 @bar(i32 %0), !dbg !19
+ %add = add nsw i32 %call, %ret.08, !dbg !20
+ %incdec.ptr = getelementptr inbounds i32, i32* %i.07, i64 1, !dbg !21
+ %cmp = icmp eq i32* %incdec.ptr, %end, !dbg !9
+ br i1 %cmp, label %for.end.loopexit, label %for.body, !dbg !12, !llvm.loop !22
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end, !dbg !24
+
+for.end: ; preds = %for.end.loopexit, %entry
+ %ret.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.end.loopexit ]
+ ret i32 %ret.0.lcssa, !dbg !24
+}
+
+declare i32 @bar(i32)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7, !8, !8}
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!9 = !DILocation(line: 8, column: 9, scope: !10)
+!10 = distinct !DILexicalBlock(scope: !11, file: !1, line: 6, column: 3)
+!11 = distinct !DILexicalBlock(scope: !4, file: !1, line: 6, column: 3)
+!12 = !DILocation(line: 6, column: 3, scope: !11)
+!13 = !DILocation(line: 11, column: 18, scope: !14)
+!14 = distinct !DILexicalBlock(scope: !10, file: !1, line: 10, column: 3)
+!15 = !{!16, !16, i64 0}
+!16 = !{!"int", !17, i64 0}
+!17 = !{!"omnipotent char", !18, i64 0}
+!18 = !{!"Simple C/C++ TBAA"}
+!19 = !DILocation(line: 11, column: 14, scope: !14)
+!20 = !DILocation(line: 11, column: 11, scope: !14)
+!21 = !DILocation(line: 9, column: 8, scope: !10)
+!22 = distinct !{!22, !12, !23}
+!23 = !DILocation(line: 12, column: 3, scope: !11)
+!24 = !DILocation(line: 13, column: 3, scope: !4)
diff --git a/test/CodeGen/X86/vec_cast2.ll b/test/CodeGen/X86/vec_cast2.ll
index dda50b7b94b7..c03b330b88e0 100644
--- a/test/CodeGen/X86/vec_cast2.ll
+++ b/test/CodeGen/X86/vec_cast2.ll
@@ -48,10 +48,10 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
; CHECK-LABEL: foo2_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpand LCPI2_0, %xmm0, %xmm0
-; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
; CHECK-NEXT: retl
;
@@ -97,10 +97,10 @@ define <8 x i8> @foo3_8(<8 x float> %src) {
;
; CHECK-WIDE-LABEL: foo3_8:
; CHECK-WIDE: ## BB#0:
-; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %eax
-; CHECK-WIDE-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
-; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-WIDE-NEXT: vcvttss2si %xmm2, %eax
+; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
+; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
+; CHECK-WIDE-NEXT: vmovd %ecx, %xmm1
; CHECK-WIDE-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; CHECK-WIDE-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; CHECK-WIDE-NEXT: vcvttss2si %xmm2, %eax
@@ -134,10 +134,10 @@ define <4 x i8> @foo3_4(<4 x float> %src) {
;
; CHECK-WIDE-LABEL: foo3_4:
; CHECK-WIDE: ## BB#0:
-; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %eax
-; CHECK-WIDE-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
-; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-WIDE-NEXT: vcvttss2si %xmm2, %eax
+; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
+; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
+; CHECK-WIDE-NEXT: vmovd %ecx, %xmm1
; CHECK-WIDE-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; CHECK-WIDE-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; CHECK-WIDE-NEXT: vcvttss2si %xmm2, %eax
diff --git a/test/CodeGen/X86/vec_extract-mmx.ll b/test/CodeGen/X86/vec_extract-mmx.ll
index ed957728aeff..a137d052d296 100644
--- a/test/CodeGen/X86/vec_extract-mmx.ll
+++ b/test/CodeGen/X86/vec_extract-mmx.ll
@@ -8,17 +8,14 @@ define i32 @test0(<1 x i64>* %v4) nounwind {
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $24, %esp
+; X32-NEXT: subl $8, %esp
; X32-NEXT: movl 8(%ebp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT: movl %ecx, (%esp)
; X32-NEXT: pshufw $238, (%esp), %mm0 # mm0 = mem[2,3,2,3]
-; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: movd %mm0, %eax
; X32-NEXT: addl $32, %eax
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
@@ -47,20 +44,11 @@ entry:
define i32 @test1(i32* nocapture readonly %ptr) nounwind {
; X32-LABEL: test1:
; X32: # BB#0: # %entry
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $16, %esp
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd (%eax), %mm0
; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
-; X32-NEXT: movq %mm0, (%esp)
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: movd %mm0, %eax
; X32-NEXT: emms
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
; X32-NEXT: retl
;
; X64-LABEL: test1:
@@ -91,19 +79,10 @@ entry:
define i32 @test2(i32* nocapture readonly %ptr) nounwind {
; X32-LABEL: test2:
; X32: # BB#0: # %entry
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $16, %esp
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
-; X32-NEXT: movq %mm0, (%esp)
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; X32-NEXT: movd %xmm0, %eax
+; X32-NEXT: movd %mm0, %eax
; X32-NEXT: emms
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
; X32-NEXT: retl
;
; X64-LABEL: test2:
@@ -150,7 +129,7 @@ define i32 @test4(x86_mmx %a) nounwind {
; X32-NEXT: subl $8, %esp
; X32-NEXT: movq %mm0, (%esp)
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,0,1]
+; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,0,1]
; X32-NEXT: movd %xmm0, %eax
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
diff --git a/test/CodeGen/X86/vec_fp_to_int.ll b/test/CodeGen/X86/vec_fp_to_int.ll
index 2ad20a89cf26..a345f78e18c1 100644
--- a/test/CodeGen/X86/vec_fp_to_int.ll
+++ b/test/CodeGen/X86/vec_fp_to_int.ll
@@ -63,6 +63,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f64_to_2i64:
@@ -112,18 +113,12 @@ define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) {
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
-; VEX-LABEL: fptosi_4f64_to_2i32:
-; VEX: # BB#0:
-; VEX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; VEX-NEXT: vcvttpd2dq %ymm0, %xmm0
-; VEX-NEXT: vzeroupper
-; VEX-NEXT: retq
-;
-; AVX512-LABEL: fptosi_4f64_to_2i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; AVX512-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX512-NEXT: retq
+; AVX-LABEL: fptosi_4f64_to_2i32:
+; AVX: # BB#0:
+; AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%ext = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%cvt = fptosi <4 x double> %ext to <4 x i32>
ret <4 x i32> %cvt
@@ -243,16 +238,11 @@ define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) {
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
-; VEX-LABEL: fptosi_4f64_to_4i32:
-; VEX: # BB#0:
-; VEX-NEXT: vcvttpd2dq %ymm0, %xmm0
-; VEX-NEXT: vzeroupper
-; VEX-NEXT: retq
-;
-; AVX512-LABEL: fptosi_4f64_to_4i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX512-NEXT: retq
+; AVX-LABEL: fptosi_4f64_to_4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%cvt = fptosi <4 x double> %a to <4 x i32>
ret <4 x i32> %cvt
}
@@ -334,6 +324,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_2i64:
@@ -400,6 +391,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_4i32:
@@ -412,6 +404,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_4i32:
@@ -477,6 +470,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_2i32:
@@ -489,6 +483,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_2i32:
@@ -550,12 +545,14 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_2i32:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_2i32:
@@ -563,12 +560,14 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%ext = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%cvt = fptoui <4 x double> %ext to <4 x i32>
@@ -816,11 +815,13 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_4i32:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i32:
@@ -828,11 +829,13 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i32:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x double> %a to <4 x i32>
ret <4 x i32> %cvt
@@ -980,12 +983,14 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <4 x float> %a to <4 x i64>
%shuf = shufflevector <4 x i64> %cvt, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -1281,6 +1286,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f32_to_2i32:
@@ -1294,6 +1300,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f32_to_2i32:
@@ -1347,6 +1354,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_4i32:
@@ -1359,6 +1367,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i32:
@@ -1529,12 +1538,14 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x float> %a to <4 x i64>
%shuf = shufflevector <4 x i64> %cvt, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -2291,6 +2302,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
; AVX512F-NEXT: vmovq %rax, %xmm0
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_2f16_to_4i32:
@@ -2321,6 +2333,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
; AVX512DQ-NEXT: vmovq %rax, %xmm0
; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512DQ-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f16_to_4i32:
diff --git a/test/CodeGen/X86/vec_fpext.ll b/test/CodeGen/X86/vec_fpext.ll
index 9d9434cb5223..609ed0882092 100644
--- a/test/CodeGen/X86/vec_fpext.ll
+++ b/test/CodeGen/X86/vec_fpext.ll
@@ -82,6 +82,7 @@ define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x01]
; X32-AVX512VL-NEXT: vmovups %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x00]
+; X32-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem4:
@@ -103,6 +104,7 @@ define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X64-AVX512VL: # BB#0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %ymm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x06]
+; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x float>, <4 x float>* %in
@@ -143,6 +145,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x01]
; X32-AVX512VL-NEXT: vmovups %zmm0, (%eax) # encoding: [0x62,0xf1,0x7c,0x48,0x11,0x00]
+; X32-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem8:
@@ -170,6 +173,7 @@ define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X64-AVX512VL: # BB#0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %zmm0, (%rsi) # encoding: [0x62,0xf1,0x7c,0x48,0x11,0x06]
+; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <8 x float>, <8 x float>* %in
diff --git a/test/CodeGen/X86/vec_fptrunc.ll b/test/CodeGen/X86/vec_fptrunc.ll
index 841ac8a44dab..e6a0d52c5ae8 100644
--- a/test/CodeGen/X86/vec_fptrunc.ll
+++ b/test/CodeGen/X86/vec_fptrunc.ll
@@ -102,7 +102,7 @@ define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
; X32-AVX-NEXT: vcvtpd2psy 32(%ecx), %xmm1
; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovupd %ymm0, (%eax)
+; X32-AVX-NEXT: vmovups %ymm0, (%eax)
; X32-AVX-NEXT: vzeroupper
; X32-AVX-NEXT: retl
;
@@ -123,7 +123,7 @@ define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
; X64-AVX-NEXT: vcvtpd2psy 32(%rdi), %xmm1
; X64-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X64-AVX-NEXT: vmovupd %ymm0, (%rsi)
+; X64-AVX-NEXT: vmovups %ymm0, (%rsi)
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll
index 923af1216d05..649b45712f57 100644
--- a/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/test/CodeGen/X86/vec_int_to_fp.ll
@@ -61,6 +61,7 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_2f64:
@@ -92,18 +93,12 @@ define <2 x double> @sitofp_4i32_to_2f64(<4 x i32> %a) {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; VEX-LABEL: sitofp_4i32_to_2f64:
-; VEX: # BB#0:
-; VEX-NEXT: vcvtdq2pd %xmm0, %ymm0
-; VEX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; VEX-NEXT: vzeroupper
-; VEX-NEXT: retq
-;
-; AVX512-LABEL: sitofp_4i32_to_2f64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512-NEXT: retq
+; AVX-LABEL: sitofp_4i32_to_2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x double>
%shuf = shufflevector <4 x double> %cvt, <4 x double> undef, <2 x i32> <i32 0, i32 1>
ret <2 x double> %shuf
@@ -156,6 +151,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -211,6 +207,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -498,6 +495,7 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_2f64:
@@ -536,6 +534,7 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i32_to_2f64:
@@ -548,6 +547,7 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i32_to_2f64:
@@ -603,12 +603,14 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_2f64:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_2f64:
@@ -616,12 +618,14 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x double>
%shuf = shufflevector <4 x double> %cvt, <4 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -675,6 +679,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -730,6 +735,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -1089,6 +1095,7 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_4f32:
@@ -1147,6 +1154,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_4f32_zero:
@@ -1212,12 +1220,14 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%ext = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%cvt = sitofp <4 x i64> %ext to <4 x float>
@@ -1288,6 +1298,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x float>
%shuf = shufflevector <8 x float> %cvt, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1346,6 +1357,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1421,6 +1433,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f32:
@@ -1437,6 +1450,7 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32:
@@ -1444,11 +1458,13 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <4 x i64> %a to <4 x float>
ret <4 x float> %cvt
@@ -1697,6 +1713,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_4f32:
@@ -1805,6 +1822,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_2f32:
@@ -1932,12 +1950,14 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%ext = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%cvt = uitofp <4 x i64> %ext to <4 x float>
@@ -1982,6 +2002,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f32:
@@ -1994,6 +2015,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f32:
@@ -2032,10 +2054,10 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
;
; AVX1-LABEL: uitofp_8i16_to_4f32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
@@ -2054,6 +2076,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x float>
%shuf = shufflevector <8 x float> %cvt, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2112,6 +2135,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2335,6 +2359,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f32:
@@ -2351,6 +2376,7 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32:
@@ -2358,11 +2384,13 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
+; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i64> %a to <4 x float>
ret <4 x float> %cvt
@@ -2456,10 +2484,10 @@ define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
;
; AVX1-LABEL: uitofp_8i16_to_8f32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -2607,6 +2635,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_2i64_to_2f64:
@@ -2661,7 +2690,7 @@ define <2 x double> @sitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-LABEL: sitofp_load_2i16_to_2f64:
; SSE: # BB#0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
@@ -2723,7 +2752,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
;
; AVX1-LABEL: sitofp_load_4i64_to_4f64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm0
+; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -2926,6 +2955,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_2i64_to_2f64:
@@ -2967,6 +2997,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_2i32_to_2f64:
@@ -2981,6 +3012,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_2i32_to_2f64:
@@ -3021,8 +3053,8 @@ define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4,5,6,7]
+; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VL-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
@@ -3037,8 +3069,8 @@ define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; AVX512VLDQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512VLDQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4,5,6,7]
+; AVX512VLDQ-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512VLDQ-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VLDQ-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i16>, <2 x i16> *%a
@@ -3076,7 +3108,8 @@ define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; AVX512VL-LABEL: uitofp_load_2i8_to_2f64:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[u],zero,zero,zero,xmm0[u],zero,zero,zero
+; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512VL-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
@@ -3091,7 +3124,8 @@ define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; AVX512VLDQ-LABEL: uitofp_load_2i8_to_2f64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512VLDQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[u],zero,zero,zero,xmm0[u],zero,zero,zero
+; AVX512VLDQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512VLDQ-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX512VLDQ-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i8>, <2 x i8> *%a
@@ -3130,7 +3164,7 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
;
; AVX1-LABEL: uitofp_load_4i64_to_4f64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps (%rdi), %ymm0
+; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
@@ -3416,6 +3450,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_4i64_to_4f32:
@@ -3433,6 +3468,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_4i64_to_4f32:
@@ -3440,6 +3476,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f32:
@@ -4003,6 +4040,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i64_to_4f32:
@@ -4020,6 +4058,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm0
; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i64_to_4f32:
@@ -4027,6 +4066,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f32:
@@ -4079,6 +4119,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f32:
@@ -4091,6 +4132,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f32:
@@ -4810,6 +4852,7 @@ define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; AVX512-NEXT: vpmovsxwd 8(%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: vmovaps %ymm0, (%rax)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = load %Arguments, %Arguments* %a0, align 1
%2 = extractvalue %Arguments %1, 1
diff --git a/test/CodeGen/X86/vec_logical.ll b/test/CodeGen/X86/vec_logical.ll
index b632616cde88..92ec76009f6a 100644
--- a/test/CodeGen/X86/vec_logical.ll
+++ b/test/CodeGen/X86/vec_logical.ll
@@ -5,13 +5,13 @@
define void @t(<4 x float> %A) {
; SSE-LABEL: t:
; SSE: # BB#0:
-; SSE-NEXT: xorps .LCPI0_0, %xmm0
+; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
; SSE-NEXT: movaps %xmm0, 0
; SSE-NEXT: retl
;
; AVX-LABEL: t:
; AVX: # BB#0:
-; AVX-NEXT: vxorps .LCPI0_0, %xmm0, %xmm0
+; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, 0
; AVX-NEXT: retl
%tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A
@@ -85,3 +85,22 @@ entry:
store <4 x float> %tmp30, <4 x float>* %d
ret void
}
+
+define <2 x i64> @andn_double_xor(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+; SSE-LABEL: andn_double_xor:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm2, %xmm1
+; SSE-NEXT: andnps %xmm1, %xmm0
+; SSE-NEXT: retl
+;
+; AVX-LABEL: andn_double_xor:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+ %1 = xor <2 x i64> %a, <i64 -1, i64 -1>
+ %2 = xor <2 x i64> %b, %c
+ %3 = and <2 x i64> %1, %2
+ ret <2 x i64> %3
+}
+
diff --git a/test/CodeGen/X86/vec_minmax_match.ll b/test/CodeGen/X86/vec_minmax_match.ll
index af4410a898e3..98f77912779f 100644
--- a/test/CodeGen/X86/vec_minmax_match.ll
+++ b/test/CodeGen/X86/vec_minmax_match.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
; These are actually tests of ValueTracking, and so may have test coverage in InstCombine or other
@@ -11,7 +11,6 @@ define <4 x i32> @smin_vec1(<4 x i32> %x) {
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%not_x = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%cmp = icmp sgt <4 x i32> %x, zeroinitializer
%sel = select <4 x i1> %cmp, <4 x i32> %not_x, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -25,7 +24,6 @@ define <4 x i32> @smin_vec2(<4 x i32> %x) {
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%not_x = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%cmp = icmp slt <4 x i32> %x, zeroinitializer
%sel = select <4 x i1> %cmp, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %not_x
@@ -41,7 +39,6 @@ define <4 x i32> @smin_vec3(<4 x i32> %x, <4 x i32> %y) {
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%sub = sub nsw <4 x i32> %x, %y
%cmp = icmp sgt <4 x i32> %x, %y
%sel = select <4 x i1> %cmp, <4 x i32> zeroinitializer, <4 x i32> %sub
@@ -57,7 +54,6 @@ define <4 x i32> @smin_vec4(<4 x i32> %x, <4 x i32> %y) {
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%sub = sub nsw <4 x i32> %x, %y
%cmp = icmp slt <4 x i32> %x, %y
%sel = select <4 x i1> %cmp, <4 x i32> %sub, <4 x i32> zeroinitializer
@@ -71,7 +67,6 @@ define <4 x i32> @smax_vec1(<4 x i32> %x) {
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%not_x = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%cmp = icmp slt <4 x i32> %x, zeroinitializer
%sel = select <4 x i1> %cmp, <4 x i32> %not_x, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -85,7 +80,6 @@ define <4 x i32> @smax_vec2(<4 x i32> %x) {
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%not_x = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%cmp = icmp sgt <4 x i32> %x, zeroinitializer
%sel = select <4 x i1> %cmp, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %not_x
@@ -101,7 +95,6 @@ define <4 x i32> @smax_vec3(<4 x i32> %x, <4 x i32> %y) {
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%sub = sub nsw <4 x i32> %x, %y
%cmp = icmp slt <4 x i32> %x, %y
%sel = select <4 x i1> %cmp, <4 x i32> zeroinitializer, <4 x i32> %sub
@@ -117,7 +110,6 @@ define <4 x i32> @smax_vec4(<4 x i32> %x, <4 x i32> %y) {
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%sub = sub nsw <4 x i32> %x, %y
%cmp = icmp sgt <4 x i32> %x, %y
%sel = select <4 x i1> %cmp, <4 x i32> %sub, <4 x i32> zeroinitializer
@@ -129,7 +121,6 @@ define <4 x i32> @umax_vec1(<4 x i32> %x) {
; CHECK: # BB#0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%cmp = icmp slt <4 x i32> %x, zeroinitializer
%sel = select <4 x i1> %cmp, <4 x i32> %x, <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
ret <4 x i32> %sel
@@ -140,7 +131,6 @@ define <4 x i32> @umax_vec2(<4 x i32> %x) {
; CHECK: # BB#0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%sel = select <4 x i1> %cmp, <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>, <4 x i32> %x
ret <4 x i32> %sel
@@ -151,7 +141,6 @@ define <4 x i32> @umin_vec1(<4 x i32> %x) {
; CHECK: # BB#0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%cmp = icmp slt <4 x i32> %x, zeroinitializer
%sel = select <4 x i1> %cmp, <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> %x
ret <4 x i32> %sel
@@ -162,9 +151,71 @@ define <4 x i32> @umin_vec2(<4 x i32> %x) {
; CHECK: # BB#0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
-;
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%sel = select <4 x i1> %cmp, <4 x i32> %x, <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
ret <4 x i32> %sel
}
+; The next 4 tests are value clamping with constants:
+; https://llvm.org/bugs/show_bug.cgi?id=31693
+
+; (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
+
+define <4 x i32> @clamp_signed1(<4 x i32> %x) {
+; CHECK-LABEL: clamp_signed1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %cmp2 = icmp slt <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+ %min = select <4 x i1> %cmp2, <4 x i32> %x, <4 x i32><i32 255, i32 255, i32 255, i32 255>
+ %cmp1 = icmp slt <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
+ %r = select <4 x i1> %cmp1, <4 x i32><i32 15, i32 15, i32 15, i32 15>, <4 x i32> %min
+ ret <4 x i32> %r
+}
+
+; (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
+
+define <4 x i32> @clamp_signed2(<4 x i32> %x) {
+; CHECK-LABEL: clamp_signed2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %cmp2 = icmp sgt <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
+ %max = select <4 x i1> %cmp2, <4 x i32> %x, <4 x i32><i32 15, i32 15, i32 15, i32 15>
+ %cmp1 = icmp sgt <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+ %r = select <4 x i1> %cmp1, <4 x i32><i32 255, i32 255, i32 255, i32 255>, <4 x i32> %max
+ ret <4 x i32> %r
+}
+
+; (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
+
+define <4 x i32> @clamp_unsigned1(<4 x i32> %x) {
+; CHECK-LABEL: clamp_unsigned1:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %cmp2 = icmp ult <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+ %min = select <4 x i1> %cmp2, <4 x i32> %x, <4 x i32><i32 255, i32 255, i32 255, i32 255>
+ %cmp1 = icmp ult <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
+ %r = select <4 x i1> %cmp1, <4 x i32><i32 15, i32 15, i32 15, i32 15>, <4 x i32> %min
+ ret <4 x i32> %r
+}
+
+; (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
+
+define <4 x i32> @clamp_unsigned2(<4 x i32> %x) {
+; CHECK-LABEL: clamp_unsigned2:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %cmp2 = icmp ugt <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
+ %max = select <4 x i1> %cmp2, <4 x i32> %x, <4 x i32><i32 15, i32 15, i32 15, i32 15>
+ %cmp1 = icmp ugt <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+ %r = select <4 x i1> %cmp1, <4 x i32><i32 255, i32 255, i32 255, i32 255>, <4 x i32> %max
+ ret <4 x i32> %r
+}
+
diff --git a/test/CodeGen/X86/vec_minmax_sint.ll b/test/CodeGen/X86/vec_minmax_sint.ll
index 419eb2bed743..5999116deb9c 100644
--- a/test/CodeGen/X86/vec_minmax_sint.ll
+++ b/test/CodeGen/X86/vec_minmax_sint.ll
@@ -46,7 +46,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: pand %xmm5, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -54,7 +54,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42: # BB#0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -130,9 +130,9 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pand %xmm7, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -143,9 +143,9 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: movdqa %xmm1, %xmm5
; SSE42-NEXT: pcmpgtq %xmm3, %xmm5
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -429,7 +429,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: por %xmm0, %xmm3
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -440,7 +440,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm3, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -527,9 +527,9 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
; SSE41-NEXT: por %xmm6, %xmm0
; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -544,9 +544,9 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: movdqa %xmm2, %xmm6
; SSE42-NEXT: pcmpgtq %xmm4, %xmm6
; SSE42-NEXT: pxor %xmm6, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -844,7 +844,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: pand %xmm5, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -853,7 +853,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -929,9 +929,9 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pand %xmm7, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -943,9 +943,9 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: pcmpgtq %xmm1, %xmm5
; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -1223,7 +1223,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: por %xmm0, %xmm3
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -1233,7 +1233,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm3, %xmm3
; SSE42-NEXT: pxor %xmm3, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -1320,9 +1320,9 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
; SSE41-NEXT: por %xmm6, %xmm0
; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -1336,9 +1336,9 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: pxor %xmm6, %xmm5
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: pxor %xmm6, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
diff --git a/test/CodeGen/X86/vec_minmax_uint.ll b/test/CodeGen/X86/vec_minmax_uint.ll
index 6e48423c1520..ec5f83ea396c 100644
--- a/test/CodeGen/X86/vec_minmax_uint.ll
+++ b/test/CodeGen/X86/vec_minmax_uint.ll
@@ -46,7 +46,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: pand %xmm5, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -58,7 +58,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: pxor %xmm0, %xmm3
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -137,9 +137,9 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pand %xmm7, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -157,9 +157,9 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: pxor %xmm0, %xmm6
; SSE42-NEXT: pxor %xmm4, %xmm0
; SSE42-NEXT: pcmpgtq %xmm6, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -167,13 +167,13 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: max_gt_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -468,7 +468,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: por %xmm0, %xmm3
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -481,7 +481,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm3, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -571,9 +571,9 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
; SSE41-NEXT: por %xmm6, %xmm0
; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -594,9 +594,9 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm7, %xmm0
; SSE42-NEXT: pxor %xmm6, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -604,15 +604,15 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: max_ge_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm5
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
@@ -910,7 +910,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: pand %xmm5, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -922,7 +922,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: pxor %xmm0, %xmm3
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -1001,9 +1001,9 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pand %xmm7, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -1021,9 +1021,9 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: pxor %xmm0, %xmm6
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm6, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -1031,13 +1031,13 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: min_lt_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -1330,7 +1330,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE41-NEXT: por %xmm0, %xmm3
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -1344,7 +1344,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm3, %xmm0
-; SSE42-NEXT: blendvpd %xmm2, %xmm1
+; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
@@ -1434,9 +1434,9 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
; SSE41-NEXT: por %xmm6, %xmm0
; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: blendvpd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: movapd %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -1457,9 +1457,9 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE42-NEXT: pxor %xmm4, %xmm0
; SSE42-NEXT: pcmpgtq %xmm7, %xmm0
; SSE42-NEXT: pxor %xmm6, %xmm0
-; SSE42-NEXT: blendvpd %xmm4, %xmm2
+; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: blendvpd %xmm1, %xmm3
+; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: movapd %xmm3, %xmm1
; SSE42-NEXT: retq
@@ -1467,15 +1467,15 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: min_le_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm5
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
diff --git a/test/CodeGen/X86/vec_sdiv_to_shift.ll b/test/CodeGen/X86/vec_sdiv_to_shift.ll
index f7151af528b5..f0c9069d8c79 100644
--- a/test/CodeGen/X86/vec_sdiv_to_shift.ll
+++ b/test/CodeGen/X86/vec_sdiv_to_shift.ll
@@ -49,56 +49,6 @@ entry:
ret <8 x i16> %0
}
-define <4 x i32> @sdiv_zero(<4 x i32> %var) {
-; SSE-LABEL: sdiv_zero:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: pextrd $1, %xmm0, %eax
-; SSE-NEXT: xorl %esi, %esi
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movl %eax, %ecx
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: pinsrd $2, %eax, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: pinsrd $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: sdiv_zero:
-; AVX: # BB#0: # %entry
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: xorl %esi, %esi
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: movl %eax, %ecx
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vmovd %eax, %xmm1
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT: retq
-entry:
- %0 = sdiv <4 x i32> %var, <i32 0, i32 0, i32 0, i32 0>
- ret <4 x i32> %0
-}
-
define <4 x i32> @sdiv_vec4x32(<4 x i32> %var) {
; SSE-LABEL: sdiv_vec4x32:
; SSE: # BB#0: # %entry
@@ -234,52 +184,15 @@ entry:
ret <16 x i16> %a0
}
+; Div-by-0 in any lane is UB.
+
define <4 x i32> @sdiv_non_splat(<4 x i32> %x) {
; SSE-LABEL: sdiv_non_splat:
; SSE: # BB#0:
-; SSE-NEXT: pextrd $1, %xmm0, %eax
-; SSE-NEXT: xorl %ecx, %ecx
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: movd %xmm0, %edx
-; SSE-NEXT: movl %edx, %esi
-; SSE-NEXT: shrl $31, %esi
-; SSE-NEXT: addl %edx, %esi
-; SSE-NEXT: sarl %esi
-; SSE-NEXT: movd %esi, %xmm1
-; SSE-NEXT: pinsrd $1, %eax, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $2, %eax, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_non_splat:
; AVX: # BB#0:
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: xorl %ecx, %ecx
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vmovd %xmm0, %edx
-; AVX-NEXT: movl %edx, %esi
-; AVX-NEXT: shrl $31, %esi
-; AVX-NEXT: addl %edx, %esi
-; AVX-NEXT: sarl %esi
-; AVX-NEXT: vmovd %esi, %xmm1
-; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
%y = sdiv <4 x i32> %x, <i32 2, i32 0, i32 0, i32 0>
ret <4 x i32> %y
diff --git a/test/CodeGen/X86/vec_shift4.ll b/test/CodeGen/X86/vec_shift4.ll
index 66229361990f..bef2438aecd1 100644
--- a/test/CodeGen/X86/vec_shift4.ll
+++ b/test/CodeGen/X86/vec_shift4.ll
@@ -39,18 +39,18 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X32-NEXT: psllw $4, %xmm3
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
; X32-NEXT: movdqa %xmm1, %xmm0
-; X32-NEXT: pblendvb %xmm3, %xmm2
+; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm3
; X32-NEXT: psllw $2, %xmm3
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
; X32-NEXT: paddb %xmm1, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
-; X32-NEXT: pblendvb %xmm3, %xmm2
+; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm3
; X32-NEXT: paddb %xmm3, %xmm3
; X32-NEXT: paddb %xmm1, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
-; X32-NEXT: pblendvb %xmm3, %xmm2
+; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm0
; X32-NEXT: retl
;
@@ -62,18 +62,18 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X64-NEXT: psllw $4, %xmm3
; X64-NEXT: pand {{.*}}(%rip), %xmm3
; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: pblendvb %xmm3, %xmm2
+; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3
; X64-NEXT: psllw $2, %xmm3
; X64-NEXT: pand {{.*}}(%rip), %xmm3
; X64-NEXT: paddb %xmm1, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: pblendvb %xmm3, %xmm2
+; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3
; X64-NEXT: paddb %xmm3, %xmm3
; X64-NEXT: paddb %xmm1, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: pblendvb %xmm3, %xmm2
+; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm0
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vec_shift5.ll b/test/CodeGen/X86/vec_shift5.ll
index cba2b5d05041..c0226d0a4c09 100644
--- a/test/CodeGen/X86/vec_shift5.ll
+++ b/test/CodeGen/X86/vec_shift5.ll
@@ -93,8 +93,7 @@ define <4 x i32> @test6() {
define <2 x i64> @test7() {
; X32-LABEL: test7:
; X32: # BB#0:
-; X32-NEXT: movdqa {{.*#+}} xmm0 = [1,0,2,0]
-; X32-NEXT: psllq $3, %xmm0
+; X32-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0]
; X32-NEXT: retl
;
; X64-LABEL: test7:
@@ -108,8 +107,7 @@ define <2 x i64> @test7() {
define <2 x i64> @test8() {
; X32-LABEL: test8:
; X32: # BB#0:
-; X32-NEXT: movdqa {{.*#+}} xmm0 = [8,0,16,0]
-; X32-NEXT: psrlq $3, %xmm0
+; X32-NEXT: movaps {{.*#+}} xmm0 = [1,0,2,0]
; X32-NEXT: retl
;
; X64-LABEL: test8:
@@ -151,8 +149,7 @@ define <4 x i32> @test10() {
define <2 x i64> @test11() {
; X32-LABEL: test11:
; X32: # BB#0:
-; X32-NEXT: movdqa {{.*#+}} xmm0 = <u,u,31,0>
-; X32-NEXT: psrlq $3, %xmm0
+; X32-NEXT: movaps {{.*#+}} xmm0 = <u,u,3,0>
; X32-NEXT: retl
;
; X64-LABEL: test11:
@@ -222,8 +219,7 @@ define <4 x i32> @test15() {
define <2 x i64> @test16() {
; X32-LABEL: test16:
; X32: # BB#0:
-; X32-NEXT: movdqa {{.*#+}} xmm0 = <u,u,31,0>
-; X32-NEXT: psllq $3, %xmm0
+; X32-NEXT: movaps {{.*#+}} xmm0 = <u,u,248,0>
; X32-NEXT: retl
;
; X64-LABEL: test16:
diff --git a/test/CodeGen/X86/vec_shift7.ll b/test/CodeGen/X86/vec_shift7.ll
index 80d72a4a986f..64c64c392544 100644
--- a/test/CodeGen/X86/vec_shift7.ll
+++ b/test/CodeGen/X86/vec_shift7.ll
@@ -10,17 +10,14 @@ define i64 @test1(<2 x i64> %a) {
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psllq $2, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; X32-NEXT: movd %xmm1, %eax
-; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; X32-NEXT: movd %xmm0, %edx
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; X32-NEXT: movd %xmm1, %edx
+; X32-NEXT: movd %xmm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64: # BB#0: # %entry
-; X64-NEXT: movdqa %xmm0, %xmm1
-; X64-NEXT: psllq $2, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; X64-NEXT: movd %xmm1, %rax
+; X64-NEXT: movd %xmm0, %rax
; X64-NEXT: retq
entry:
%c = shl <2 x i64> %a, <i64 0, i64 2>
diff --git a/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
index a1b46b9324d3..7df3c3070422 100644
--- a/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
+++ b/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
@@ -77,6 +77,7 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; AVX512F-NEXT: # kill
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_uitofp_v4i32_to_v4f32:
@@ -102,9 +103,6 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; AVX2: [[FPMASKCSTADDR_v8:.LCPI[0-9_]+]]:
; AVX2-NEXT: .long 1199570944 # float 65536
-; AVX2: [[MASKCSTADDR_v8:.LCPI[0-9_]+]]:
-; AVX2-NEXT: .long 65535 # 0xffff
-
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
; SSE2: # BB#0:
@@ -165,8 +163,8 @@ define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; AVX2-NEXT: vcvtdq2ps %ymm1, %ymm1
; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR_v8]](%rip), %ymm2
; AVX2-NEXT: vmulps %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastd [[MASKCSTADDR_v8]](%rip), %ymm2
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vxorps %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
diff --git a/test/CodeGen/X86/vec_unsafe-fp-math.ll b/test/CodeGen/X86/vec_unsafe-fp-math.ll
index 827d4184d111..1c352782fca4 100644
--- a/test/CodeGen/X86/vec_unsafe-fp-math.ll
+++ b/test/CodeGen/X86/vec_unsafe-fp-math.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -enable-unsafe-fp-math -mtriple=x86_64-unknown-unknown -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown -mcpu=corei7 | FileCheck %s
; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math.
diff --git a/test/CodeGen/X86/vec_zero_cse.ll b/test/CodeGen/X86/vec_zero_cse.ll
index 8ed8083a284f..75e85348ba8d 100644
--- a/test/CodeGen/X86/vec_zero_cse.ll
+++ b/test/CodeGen/X86/vec_zero_cse.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -relocation-model=static -mtriple=i686-unknown -mattr=+mmx,+sse3 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -relocation-model=static -mtriple=i686-unknown -mattr=+mmx,+sse3 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -relocation-model=static -mtriple=x86_64-unknown -mattr=+mmx,+sse3 | FileCheck %s --check-prefix=X64
+
; 64-bit stores here do not use MMX.
@M1 = external global <1 x i64>
@@ -8,35 +11,78 @@
@S2 = external global <4 x i32>
define void @test1() {
-;CHECK-LABEL: @test1
-;CHECK: xorps
+; X32-LABEL: test1:
+; X32: # BB#0:
+; X32-NEXT: movl $0, M1+4
+; X32-NEXT: movl $0, M1
+; X32-NEXT: xorps %xmm0, %xmm0
+; X32-NEXT: movlps %xmm0, M2
+; X32-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: # BB#0:
+; X64-NEXT: movq $0, {{.*}}(%rip)
+; X64-NEXT: movq $0, {{.*}}(%rip)
+; X64-NEXT: retq
store <1 x i64> zeroinitializer, <1 x i64>* @M1
store <2 x i32> zeroinitializer, <2 x i32>* @M2
ret void
}
define void @test2() {
-;CHECK-LABEL: @test2
-;CHECK: pcmpeqd
+; X32-LABEL: test2:
+; X32: # BB#0:
+; X32-NEXT: movl $-1, M1+4
+; X32-NEXT: movl $-1, M1
+; X32-NEXT: pcmpeqd %xmm0, %xmm0
+; X32-NEXT: movq %xmm0, M2
+; X32-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # BB#0:
+; X64-NEXT: movq $-1, {{.*}}(%rip)
+; X64-NEXT: movq {{.*}}(%rip), %rax
+; X64-NEXT: movq %rax, {{.*}}(%rip)
+; X64-NEXT: retq
store <1 x i64> < i64 -1 >, <1 x i64>* @M1
store <2 x i32> < i32 -1, i32 -1 >, <2 x i32>* @M2
ret void
}
define void @test3() {
-;CHECK-LABEL: @test3
-;CHECK: xorps
+; X32-LABEL: test3:
+; X32: # BB#0:
+; X32-NEXT: xorps %xmm0, %xmm0
+; X32-NEXT: movaps %xmm0, S1
+; X32-NEXT: movaps %xmm0, S2
+; X32-NEXT: retl
+;
+; X64-LABEL: test3:
+; X64: # BB#0:
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
+; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
+; X64-NEXT: retq
store <2 x i64> zeroinitializer, <2 x i64>* @S1
store <4 x i32> zeroinitializer, <4 x i32>* @S2
ret void
}
define void @test4() {
-;CHECK-LABEL: @test4
-;CHECK: pcmpeqd
+; X32-LABEL: test4:
+; X32: # BB#0:
+; X32-NEXT: pcmpeqd %xmm0, %xmm0
+; X32-NEXT: movdqa %xmm0, S1
+; X32-NEXT: movdqa %xmm0, S2
+; X32-NEXT: retl
+;
+; X64-LABEL: test4:
+; X64: # BB#0:
+; X64-NEXT: pcmpeqd %xmm0, %xmm0
+; X64-NEXT: movdqa %xmm0, {{.*}}(%rip)
+; X64-NEXT: movdqa %xmm0, {{.*}}(%rip)
+; X64-NEXT: retq
store <2 x i64> < i64 -1, i64 -1>, <2 x i64>* @S1
store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, <4 x i32>* @S2
ret void
}
-
-
diff --git a/test/CodeGen/X86/vector-bitreverse.ll b/test/CodeGen/X86/vector-bitreverse.ll
index f9746bcfcdee..226c0adbaf3c 100644
--- a/test/CodeGen/X86/vector-bitreverse.ll
+++ b/test/CodeGen/X86/vector-bitreverse.ll
@@ -613,8 +613,8 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_bitreverse_v32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
@@ -622,7 +622,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1361,8 +1361,8 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-LABEL: test_bitreverse_v64i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
@@ -1370,7 +1370,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
@@ -1378,13 +1378,13 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
@@ -2447,6 +2447,93 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
ret <8 x i64> %b
}
+;
+; Constant Folding
+;
+
+define i32 @fold_bitreverse_i32() nounwind {
+; ALL-LABEL: fold_bitreverse_i32:
+; ALL: # BB#0:
+; ALL-NEXT: movl $16711935, %eax # imm = 0xFF00FF
+; ALL-NEXT: retq
+ %b = call i32 @llvm.bitreverse.i32(i32 4278255360)
+ ret i32 %b
+}
+
+define <16 x i8> @fold_bitreverse_v16i8() nounwind {
+; SSE-LABEL: fold_bitreverse_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: fold_bitreverse_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
+; AVX-NEXT: retq
+;
+; XOP-LABEL: fold_bitreverse_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
+; XOP-NEXT: retq
+ %b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> <i8 0, i8 -1, i8 2, i8 -3, i8 4, i8 -5, i8 6, i8 -7, i8 8, i8 -9, i8 10, i8 -11, i8 12, i8 -13, i8 14, i8 -15>)
+ ret <16 x i8> %b
+}
+
+define <16 x i16> @fold_bitreverse_v16i16() nounwind {
+; SSE-LABEL: fold_bitreverse_v16i16:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,16384,49151,8192,57343,24576,40959]
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [4096,61439,20480,45055,12288,53247,28672,36863]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: fold_bitreverse_v16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
+; AVX-NEXT: retq
+;
+; XOP-LABEL: fold_bitreverse_v16i16:
+; XOP: # BB#0:
+; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
+; XOP-NEXT: retq
+ %b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> <i16 0, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6, i16 -7, i16 8, i16 -9, i16 10, i16 -11, i16 12, i16 -13, i16 14, i16 -15>)
+ ret <16 x i16> %b
+}
+
+define <16 x i32> @fold_bitreverse_v16i32() nounwind {
+; SSE-LABEL: fold_bitreverse_v16i32:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,1073741824,3221225471]
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [536870912,3758096383,1610612736,2684354559]
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [268435456,4026531839,1342177280,2952790015]
+; SSE-NEXT: movaps {{.*#+}} xmm3 = [805306368,3489660927,1879048192,2415919103]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: fold_bitreverse_v16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
+; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fold_bitreverse_v16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
+; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: fold_bitreverse_v16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
+; AVX512-NEXT: retq
+;
+; XOP-LABEL: fold_bitreverse_v16i32:
+; XOP: # BB#0:
+; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
+; XOP-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
+; XOP-NEXT: retq
+ %b = call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> <i32 0, i32 -1, i32 2, i32 -3, i32 4, i32 -5, i32 6, i32 -7, i32 8, i32 -9, i32 10, i32 -11, i32 12, i32 -13, i32 14, i32 -15>)
+ ret <16 x i32> %b
+}
+
declare i8 @llvm.bitreverse.i8(i8) readnone
declare i16 @llvm.bitreverse.i16(i16) readnone
declare i32 @llvm.bitreverse.i32(i32) readnone
diff --git a/test/CodeGen/X86/vector-blend.ll b/test/CodeGen/X86/vector-blend.ll
index d3bac61959dc..a05a981daa1f 100644
--- a/test/CodeGen/X86/vector-blend.ll
+++ b/test/CodeGen/X86/vector-blend.ll
@@ -274,7 +274,7 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -500,7 +500,7 @@ define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmplepd %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -539,7 +539,7 @@ define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmpnlepd %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -653,8 +653,8 @@ define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
-; SSE41-NEXT: pblendvb %xmm1, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: movdqa %xmm3, %xmm1
; SSE41-NEXT: retq
@@ -822,7 +822,7 @@ define <4 x i32> @blend_logic_v4i32(<4 x i32> %b, <4 x i32> %a, <4 x i32> %c) {
; SSE41-LABEL: blend_logic_v4i32:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: psrad $31, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -870,9 +870,9 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE41: # BB#0: # %entry
; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: psrad $31, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm4
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm5
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: movdqa %xmm5, %xmm1
; SSE41-NEXT: retq
@@ -1028,7 +1028,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: psubd %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: blendvps %xmm2, %xmm3
+; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
; SSE41-NEXT: movaps %xmm3, %xmm0
; SSE41-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-compare-all_of.ll b/test/CodeGen/X86/vector-compare-all_of.ll
new file mode 100644
index 000000000000..316df2780d16
--- /dev/null
+++ b/test/CodeGen/X86/vector-compare-all_of.ll
@@ -0,0 +1,946 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512
+
+define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: test_v2f64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: movd %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v2f64_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v2f64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: retq
+ %c = fcmp ogt <2 x double> %a0, %a1
+ %s = sext <2 x i1> %c to <2 x i64>
+ %1 = shufflevector <2 x i64> %s, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %2 = and <2 x i64> %s, %1
+ %3 = extractelement <2 x i64> %2, i32 0
+ ret i64 %3
+}
+
+define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: test_v4f64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm1, %xmm3
+; SSE-NEXT: cmpltpd %xmm0, %xmm2
+; SSE-NEXT: andpd %xmm3, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movd %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4f64_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vmovmskpd %ymm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $15, %eax
+; AVX-NEXT: movq $-1, %rax
+; AVX-NEXT: cmovneq %rcx, %rax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4f64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <4 x double> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i64>
+ %1 = shufflevector <4 x i64> %s, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = and <4 x i64> %s, %1
+ %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = and <4 x i64> %2, %3
+ %5 = extractelement <4 x i64> %4, i64 0
+ ret i64 %5
+}
+
+define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: test_v4f64_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm1, %xmm3
+; SSE-NEXT: cmpltpd %xmm0, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: movmskps %xmm2, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: cltq
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4f64_legal_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $15, %eax
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: cltq
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4f64_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cltq
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <4 x double> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = and <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = and <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i64 0
+ %6 = sext i32 %5 to i64
+ ret i64 %6
+}
+
+define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: test_v4f32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm0, %xmm1
+; SSE-NEXT: movmskps %xmm1, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4f32_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $15, %eax
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4f32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: retq
+ %c = fcmp ogt <4 x float> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = and <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = and <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i32 0
+ ret i32 %5
+}
+
+define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: test_v8f32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm1, %xmm3
+; SSE-NEXT: cmpltps %xmm0, %xmm2
+; SSE-NEXT: andps %xmm3, %xmm2
+; SSE-NEXT: movmskps %xmm2, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8f32_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vmovmskps %ymm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $255, %eax
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v8f32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <8 x float> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i32>
+ %1 = shufflevector <8 x i32> %s, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <8 x i32> %s, %1
+ %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <8 x i32> %2, %3
+ %5 = shufflevector <8 x i32> %4, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <8 x i32> %4, %5
+ %7 = extractelement <8 x i32> %6, i32 0
+ ret i32 %7
+}
+
+define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: test_v8f32_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm1, %xmm3
+; SSE-NEXT: cmpltps %xmm0, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: pmovmskb %xmm2, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8f32_legal_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v8f32_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <8 x float> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i16>
+ %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <8 x i16> %s, %1
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <8 x i16> %2, %3
+ %5 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <8 x i16> %4, %5
+ %7 = extractelement <8 x i16> %6, i32 0
+ %8 = sext i16 %7 to i32
+ ret i32 %8
+}
+
+define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: test_v2i64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v2i64_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v2i64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: retq
+ %c = icmp sgt <2 x i64> %a0, %a1
+ %s = sext <2 x i1> %c to <2 x i64>
+ %1 = shufflevector <2 x i64> %s, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %2 = and <2 x i64> %s, %1
+ %3 = extractelement <2 x i64> %2, i32 0
+ ret i64 %3
+}
+
+define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_v4i64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm3, %xmm1
+; SSE-NEXT: pcmpgtq %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %rax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v4i64_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskpd %ymm0, %eax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: cmpl $15, %eax
+; AVX1-NEXT: movq $-1, %rax
+; AVX1-NEXT: cmovneq %rcx, %rax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v4i64_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskpd %ymm0, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: cmpl $15, %eax
+; AVX2-NEXT: movq $-1, %rax
+; AVX2-NEXT: cmovneq %rcx, %rax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v4i64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <4 x i64> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i64>
+ %1 = shufflevector <4 x i64> %s, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = and <4 x i64> %s, %1
+ %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = and <4 x i64> %2, %3
+ %5 = extractelement <4 x i64> %4, i64 0
+ ret i64 %5
+}
+
+define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_v4i64_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm3, %xmm1
+; SSE-NEXT: pcmpgtq %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: cltq
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v4i64_legal_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovmskps %xmm0, %eax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: cmpl $15, %eax
+; AVX1-NEXT: movl $-1, %eax
+; AVX1-NEXT: cmovnel %ecx, %eax
+; AVX1-NEXT: cltq
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v4i64_legal_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovmskps %xmm0, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: cmpl $15, %eax
+; AVX2-NEXT: movl $-1, %eax
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: cltq
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v4i64_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cltq
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <4 x i64> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = and <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = and <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i64 0
+ %6 = sext i32 %5 to i64
+ ret i64 %6
+}
+
+define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_v4i32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4i32_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $15, %eax
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4i32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: retq
+ %c = icmp sgt <4 x i32> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = and <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = and <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i32 0
+ ret i32 %5
+}
+
+define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_v8i32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $15, %eax
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v8i32_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: cmpl $255, %eax
+; AVX1-NEXT: movl $-1, %eax
+; AVX1-NEXT: cmovnel %ecx, %eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v8i32_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: cmpl $255, %eax
+; AVX2-NEXT: movl $-1, %eax
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v8i32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <8 x i32> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i32>
+ %1 = shufflevector <8 x i32> %s, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <8 x i32> %s, %1
+ %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <8 x i32> %2, %3
+ %5 = shufflevector <8 x i32> %4, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <8 x i32> %4, %5
+ %7 = extractelement <8 x i32> %6, i32 0
+ ret i32 %7
+}
+
+define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_v8i32_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v8i32_legal_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX1-NEXT: movl $-1, %eax
+; AVX1-NEXT: cmovnel %ecx, %eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v8i32_legal_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX2-NEXT: movl $-1, %eax
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v8i32_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <8 x i32> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i16>
+ %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <8 x i16> %s, %1
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <8 x i16> %2, %3
+ %5 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <8 x i16> %4, %5
+ %7 = extractelement <8 x i16> %6, i32 0
+ %8 = sext i16 %7 to i32
+ ret i32 %8
+}
+
+define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_v8i16_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8i16_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v8i16_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %c = icmp sgt <8 x i16> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i16>
+ %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <8 x i16> %s, %1
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <8 x i16> %2, %3
+ %5 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <8 x i16> %4, %5
+ %7 = extractelement <8 x i16> %6, i32 0
+ ret i16 %7
+}
+
+define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_v16i16_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v16i16_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v16i16_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %ecx
+; AVX2-NEXT: cmovel %ecx, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v16i16_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2w %k0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <16 x i16> %a0, %a1
+ %s = sext <16 x i1> %c to <16 x i16>
+ %1 = shufflevector <16 x i16> %s, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <16 x i16> %s, %1
+ %3 = shufflevector <16 x i16> %2, <16 x i16> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <16 x i16> %2, %3
+ %5 = shufflevector <16 x i16> %4, <16 x i16> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <16 x i16> %4, %5
+ %7 = shufflevector <16 x i16> %6, <16 x i16> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = and <16 x i16> %6, %7
+ %9 = extractelement <16 x i16> %8, i32 0
+ ret i16 %9
+}
+
+define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_v16i16_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v16i16_legal_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: xorl %ecx, %ecx
+; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX1-NEXT: movl $-1, %eax
+; AVX1-NEXT: cmovnel %ecx, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v16i16_legal_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX2-NEXT: movl $-1, %eax
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v16i16_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: movsbl %al, %eax
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <16 x i16> %a0, %a1
+ %s = sext <16 x i1> %c to <16 x i8>
+ %1 = shufflevector <16 x i8> %s, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <16 x i8> %s, %1
+ %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <16 x i8> %2, %3
+ %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <16 x i8> %4, %5
+ %7 = shufflevector <16 x i8> %6, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = and <16 x i8> %6, %7
+ %9 = extractelement <16 x i8> %8, i32 0
+ %10 = sext i8 %9 to i16
+ ret i16 %10
+}
+
+define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_v16i8_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v16i8_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX-NEXT: movl $-1, %eax
+; AVX-NEXT: cmovnel %ecx, %eax
+; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v16i8_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %c = icmp sgt <16 x i8> %a0, %a1
+ %s = sext <16 x i1> %c to <16 x i8>
+ %1 = shufflevector <16 x i8> %s, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <16 x i8> %s, %1
+ %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <16 x i8> %2, %3
+ %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <16 x i8> %4, %5
+ %7 = shufflevector <16 x i8> %6, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = and <16 x i8> %6, %7
+ %9 = extractelement <16 x i8> %8, i32 0
+ ret i8 %9
+}
+
+define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_v32i8_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtb %xmm3, %xmm1
+; SSE-NEXT: pcmpgtb %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: movl $-1, %eax
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v32i8_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v32i8_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %ecx
+; AVX2-NEXT: cmovel %ecx, %eax
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v32i8_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <32 x i8> %a0, %a1
+ %s = sext <32 x i1> %c to <32 x i8>
+ %1 = shufflevector <32 x i8> %s, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = and <32 x i8> %s, %1
+ %3 = shufflevector <32 x i8> %2, <32 x i8> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = and <32 x i8> %2, %3
+ %5 = shufflevector <32 x i8> %4, <32 x i8> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = and <32 x i8> %4, %5
+ %7 = shufflevector <32 x i8> %6, <32 x i8> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = and <32 x i8> %6, %7
+ %9 = shufflevector <32 x i8> %8, <32 x i8> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %10 = and <32 x i8> %8, %9
+ %11 = extractelement <32 x i8> %10, i32 0
+ ret i8 %11
+}
diff --git a/test/CodeGen/X86/vector-compare-any_of.ll b/test/CodeGen/X86/vector-compare-any_of.ll
new file mode 100644
index 000000000000..1d3db6495708
--- /dev/null
+++ b/test/CodeGen/X86/vector-compare-any_of.ll
@@ -0,0 +1,882 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512
+
+define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: test_v2f64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movd %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v2f64_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v2f64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: retq
+ %c = fcmp ogt <2 x double> %a0, %a1
+ %s = sext <2 x i1> %c to <2 x i64>
+ %1 = shufflevector <2 x i64> %s, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %2 = or <2 x i64> %s, %1
+ %3 = extractelement <2 x i64> %2, i32 0
+ ret i64 %3
+}
+
+define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: test_v4f64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm1, %xmm3
+; SSE-NEXT: cmpltpd %xmm0, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: movd %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4f64_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vmovmskpd %ymm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbq %rax, %rax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4f64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <4 x double> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i64>
+ %1 = shufflevector <4 x i64> %s, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = or <4 x i64> %s, %1
+ %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = or <4 x i64> %2, %3
+ %5 = extractelement <4 x i64> %4, i64 0
+ ret i64 %5
+}
+
+define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: test_v4f64_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm1, %xmm3
+; SSE-NEXT: cmpltpd %xmm0, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: movmskps %xmm2, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: cltq
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4f64_legal_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: cltq
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4f64_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cltq
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <4 x double> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = or <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = or <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i64 0
+ %6 = sext i32 %5 to i64
+ ret i64 %6
+}
+
+define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: test_v4f32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm0, %xmm1
+; SSE-NEXT: movmskps %xmm1, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4f32_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4f32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: retq
+ %c = fcmp ogt <4 x float> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = or <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = or <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i32 0
+ ret i32 %5
+}
+
+define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: test_v8f32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm1, %xmm3
+; SSE-NEXT: cmpltps %xmm0, %xmm2
+; SSE-NEXT: orps %xmm3, %xmm2
+; SSE-NEXT: movmskps %xmm2, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8f32_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vmovmskps %ymm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v8f32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <8 x float> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i32>
+ %1 = shufflevector <8 x i32> %s, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <8 x i32> %s, %1
+ %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <8 x i32> %2, %3
+ %5 = shufflevector <8 x i32> %4, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <8 x i32> %4, %5
+ %7 = extractelement <8 x i32> %6, i32 0
+ ret i32 %7
+}
+
+define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: test_v8f32_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm1, %xmm3
+; SSE-NEXT: cmpltps %xmm0, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: pmovmskb %xmm2, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8f32_legal_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v8f32_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = fcmp ogt <8 x float> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i16>
+ %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <8 x i16> %s, %1
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <8 x i16> %2, %3
+ %5 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <8 x i16> %4, %5
+ %7 = extractelement <8 x i16> %6, i32 0
+ %8 = sext i16 %7 to i32
+ ret i32 %8
+}
+
+define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: test_v2i64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v2i64_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v2i64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: retq
+ %c = icmp sgt <2 x i64> %a0, %a1
+ %s = sext <2 x i1> %c to <2 x i64>
+ %1 = shufflevector <2 x i64> %s, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
+ %2 = or <2 x i64> %s, %1
+ %3 = extractelement <2 x i64> %2, i32 0
+ ret i64 %3
+}
+
+define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_v4i64_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm3, %xmm1
+; SSE-NEXT: pcmpgtq %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %rax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v4i64_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskpd %ymm0, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: sbbq %rax, %rax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v4i64_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskpd %ymm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbq %rax, %rax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v4i64_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <4 x i64> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i64>
+ %1 = shufflevector <4 x i64> %s, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = or <4 x i64> %s, %1
+ %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = or <4 x i64> %2, %3
+ %5 = extractelement <4 x i64> %4, i64 0
+ ret i64 %5
+}
+
+define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
+; SSE-LABEL: test_v4i64_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtq %xmm3, %xmm1
+; SSE-NEXT: pcmpgtq %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: cltq
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v4i64_legal_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovmskps %xmm0, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: sbbl %eax, %eax
+; AVX1-NEXT: cltq
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v4i64_legal_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovmskps %xmm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbl %eax, %eax
+; AVX2-NEXT: cltq
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v4i64_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cltq
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <4 x i64> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = or <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = or <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i64 0
+ %6 = sext i32 %5 to i64
+ ret i64 %6
+}
+
+define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: test_v4i32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v4i32_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v4i32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: retq
+ %c = icmp sgt <4 x i32> %a0, %a1
+ %s = sext <4 x i1> %c to <4 x i32>
+ %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+ %2 = or <4 x i32> %s, %1
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %4 = or <4 x i32> %2, %3
+ %5 = extractelement <4 x i32> %4, i32 0
+ ret i32 %5
+}
+
+define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_v8i32_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v8i32_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: sbbl %eax, %eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v8i32_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbl %eax, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v8i32_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
+; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <8 x i32> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i32>
+ %1 = shufflevector <8 x i32> %s, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <8 x i32> %s, %1
+ %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <8 x i32> %2, %3
+ %5 = shufflevector <8 x i32> %4, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <8 x i32> %4, %5
+ %7 = extractelement <8 x i32> %6, i32 0
+ ret i32 %7
+}
+
+define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
+; SSE-LABEL: test_v8i32_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v8i32_legal_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: sbbl %eax, %eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v8i32_legal_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbl %eax, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v8i32_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <8 x i32> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i16>
+ %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <8 x i16> %s, %1
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <8 x i16> %2, %3
+ %5 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <8 x i16> %4, %5
+ %7 = extractelement <8 x i16> %6, i32 0
+ %8 = sext i16 %7 to i32
+ ret i32 %8
+}
+
+define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: test_v8i16_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8i16_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v8i16_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; AVX512-NEXT: vpmovm2w %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %c = icmp sgt <8 x i16> %a0, %a1
+ %s = sext <8 x i1> %c to <8 x i16>
+ %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <8 x i16> %s, %1
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <8 x i16> %2, %3
+ %5 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <8 x i16> %4, %5
+ %7 = extractelement <8 x i16> %6, i32 0
+ ret i16 %7
+}
+
+define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_v16i16_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v16i16_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v16i16_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbl %eax, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v16i16_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2w %k0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <16 x i16> %a0, %a1
+ %s = sext <16 x i1> %c to <16 x i16>
+ %1 = shufflevector <16 x i16> %s, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <16 x i16> %s, %1
+ %3 = shufflevector <16 x i16> %2, <16 x i16> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <16 x i16> %2, %3
+ %5 = shufflevector <16 x i16> %4, <16 x i16> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <16 x i16> %4, %5
+ %7 = shufflevector <16 x i16> %6, <16 x i16> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = or <16 x i16> %6, %7
+ %9 = extractelement <16 x i16> %8, i32 0
+ ret i16 %9
+}
+
+define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
+; SSE-LABEL: test_v16i16_legal_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v16i16_legal_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: sbbl %eax, %eax
+; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v16i16_legal_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbl %eax, %eax
+; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v16i16_legal_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: movsbl %al, %eax
+; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <16 x i16> %a0, %a1
+ %s = sext <16 x i1> %c to <16 x i8>
+ %1 = shufflevector <16 x i8> %s, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <16 x i8> %s, %1
+ %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <16 x i8> %2, %3
+ %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <16 x i8> %4, %5
+ %7 = shufflevector <16 x i8> %6, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = or <16 x i8> %6, %7
+ %9 = extractelement <16 x i8> %8, i32 0
+ %10 = sext i8 %9 to i16
+ ret i16 %10
+}
+
+define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: test_v16i8_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v16i8_sext:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: negl %eax
+; AVX-NEXT: sbbl %eax, %eax
+; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_v16i8_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %c = icmp sgt <16 x i8> %a0, %a1
+ %s = sext <16 x i1> %c to <16 x i8>
+ %1 = shufflevector <16 x i8> %s, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <16 x i8> %s, %1
+ %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <16 x i8> %2, %3
+ %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <16 x i8> %4, %5
+ %7 = shufflevector <16 x i8> %6, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = or <16 x i8> %6, %7
+ %9 = extractelement <16 x i8> %8, i32 0
+ ret i8 %9
+}
+
+define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
+; SSE-LABEL: test_v32i8_sext:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtb %xmm3, %xmm1
+; SSE-NEXT: pcmpgtb %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbl %eax, %eax
+; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v32i8_sext:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_v32i8_sext:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbl %eax, %eax
+; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_v32i8_sext:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
+; AVX512-NEXT: vpmovm2b %k0, %ymm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %c = icmp sgt <32 x i8> %a0, %a1
+ %s = sext <32 x i1> %c to <32 x i8>
+ %1 = shufflevector <32 x i8> %s, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = or <32 x i8> %s, %1
+ %3 = shufflevector <32 x i8> %2, <32 x i8> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = or <32 x i8> %2, %3
+ %5 = shufflevector <32 x i8> %4, <32 x i8> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = or <32 x i8> %4, %5
+ %7 = shufflevector <32 x i8> %6, <32 x i8> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = or <32 x i8> %6, %7
+ %9 = shufflevector <32 x i8> %8, <32 x i8> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %10 = or <32 x i8> %8, %9
+ %11 = extractelement <32 x i8> %10, i32 0
+ ret i8 %11
+}
diff --git a/test/CodeGen/X86/vector-compare-results.ll b/test/CodeGen/X86/vector-compare-results.ll
index c34f333ef785..4fa9596192a6 100644
--- a/test/CodeGen/X86/vector-compare-results.ll
+++ b/test/CodeGen/X86/vector-compare-results.ll
@@ -146,6 +146,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <4 x double> %a0, %a1
ret <4 x i1> %1
@@ -181,6 +182,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <8 x float> %a0, %a1
ret <8 x i1> %1
@@ -243,6 +245,7 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a0, %a1
ret <4 x i1> %1
@@ -279,6 +282,7 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a0, %a1
ret <8 x i1> %1
@@ -315,6 +319,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i16:
@@ -322,6 +327,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512DQ-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i16:
@@ -329,6 +335,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i16> %a0, %a1
ret <16 x i1> %1
@@ -343,98 +350,98 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -610,6 +617,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8f64:
@@ -617,13 +625,15 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8f64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
-; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <8 x double> %a0, %a1
ret <8 x i1> %1
@@ -670,6 +680,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f32:
@@ -677,13 +688,15 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x float> %a0, %a1
ret <16 x i1> %1
@@ -781,6 +794,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8i64:
@@ -788,13 +802,15 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <8 x i64> %a0, %a1
ret <8 x i1> %1
@@ -844,6 +860,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i32:
@@ -851,13 +868,15 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX512DQ-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i32> %a0, %a1
ret <16 x i1> %1
@@ -881,98 +900,98 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -980,108 +999,101 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
;
; SSE42-LABEL: test_cmp_v32i16:
; SSE42: # BB#0:
-; SSE42-NEXT: pcmpgtw %xmm5, %xmm1
-; SSE42-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm5, %xmm1
; SSE42-NEXT: pcmpgtw %xmm4, %xmm0
-; SSE42-NEXT: pshufb %xmm5, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE42-NEXT: pcmpgtw %xmm7, %xmm3
-; SSE42-NEXT: pshufb %xmm5, %xmm3
+; SSE42-NEXT: pcmpgtw %xmm5, %xmm1
; SSE42-NEXT: pcmpgtw %xmm6, %xmm2
-; SSE42-NEXT: pshufb %xmm5, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE42-NEXT: pextrb $15, %xmm2, %eax
+; SSE42-NEXT: pcmpgtw %xmm7, %xmm3
+; SSE42-NEXT: pextrb $14, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm2, %eax
+; SSE42-NEXT: pextrb $12, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm2, %eax
+; SSE42-NEXT: pextrb $10, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm2, %eax
+; SSE42-NEXT: pextrb $8, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm2, %eax
+; SSE42-NEXT: pextrb $6, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm2, %eax
+; SSE42-NEXT: pextrb $4, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm2, %eax
+; SSE42-NEXT: pextrb $2, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm2, %eax
+; SSE42-NEXT: pextrb $0, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm2, %eax
+; SSE42-NEXT: pextrb $14, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm2, %eax
+; SSE42-NEXT: pextrb $12, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm2, %eax
+; SSE42-NEXT: pextrb $10, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm2, %eax
+; SSE42-NEXT: pextrb $8, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm2, %eax
+; SSE42-NEXT: pextrb $6, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm2, %eax
+; SSE42-NEXT: pextrb $4, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm2, %eax
+; SSE42-NEXT: pextrb $2, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
; SSE42-NEXT: pextrb $0, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: pextrb $0, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
; SSE42-NEXT: pextrb $0, %xmm0, %eax
@@ -1137,10 +1149,9 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
;
; AVX512BW-LABEL: test_cmp_v32i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i16> %a0, %a1
ret <32 x i1> %1
@@ -1157,196 +1168,196 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 6(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 4(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -1969,6 +1980,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm3
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512F-NEXT: vmovdqa %xmm4, %xmm2
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i8:
@@ -1979,6 +1991,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm3
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512DQ-NEXT: vmovdqa %xmm4, %xmm2
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i8:
@@ -2173,6 +2186,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f64:
@@ -2288,6 +2302,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f64:
@@ -2403,6 +2418,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x double> %a0, %a1
ret <16 x i1> %1
@@ -2474,98 +2490,98 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -2573,141 +2589,134 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
;
; SSE42-LABEL: test_cmp_v32f32:
; SSE42: # BB#0:
-; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11
-; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm12
-; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8
-; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm14
+; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm13
+; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
+; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm14
+; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm15
-; SSE42-NEXT: cmpltps %xmm3, %xmm15
-; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE42-NEXT: pshufb %xmm3, %xmm15
-; SSE42-NEXT: cmpltps %xmm2, %xmm13
-; SSE42-NEXT: pshufb %xmm3, %xmm13
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm13 = xmm13[0],xmm15[0]
-; SSE42-NEXT: psllw $15, %xmm13
-; SSE42-NEXT: psraw $15, %xmm13
-; SSE42-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm2, %xmm13
-; SSE42-NEXT: cmpltps %xmm1, %xmm14
-; SSE42-NEXT: pshufb %xmm3, %xmm14
+; SSE42-NEXT: cmpltps %xmm1, %xmm15
+; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE42-NEXT: pshufb %xmm1, %xmm15
; SSE42-NEXT: cmpltps %xmm0, %xmm8
-; SSE42-NEXT: pshufb %xmm3, %xmm8
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm14[0]
+; SSE42-NEXT: pshufb %xmm1, %xmm8
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm15[0]
; SSE42-NEXT: psllw $15, %xmm8
; SSE42-NEXT: psraw $15, %xmm8
-; SSE42-NEXT: pshufb %xmm2, %xmm8
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm13[0]
-; SSE42-NEXT: cmpltps %xmm7, %xmm12
-; SSE42-NEXT: pshufb %xmm3, %xmm12
-; SSE42-NEXT: cmpltps %xmm6, %xmm10
-; SSE42-NEXT: pshufb %xmm3, %xmm10
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm12[0]
-; SSE42-NEXT: psllw $15, %xmm10
-; SSE42-NEXT: psraw $15, %xmm10
-; SSE42-NEXT: pshufb %xmm2, %xmm10
-; SSE42-NEXT: cmpltps %xmm5, %xmm11
-; SSE42-NEXT: pshufb %xmm3, %xmm11
-; SSE42-NEXT: cmpltps %xmm4, %xmm9
-; SSE42-NEXT: pshufb %xmm3, %xmm9
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; SSE42-NEXT: cmpltps %xmm3, %xmm14
+; SSE42-NEXT: pshufb %xmm1, %xmm14
+; SSE42-NEXT: cmpltps %xmm2, %xmm9
+; SSE42-NEXT: pshufb %xmm1, %xmm9
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm14[0]
; SSE42-NEXT: psllw $15, %xmm9
; SSE42-NEXT: psraw $15, %xmm9
-; SSE42-NEXT: pshufb %xmm2, %xmm9
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm10[0]
-; SSE42-NEXT: pextrb $15, %xmm9, %eax
+; SSE42-NEXT: cmpltps %xmm5, %xmm13
+; SSE42-NEXT: pshufb %xmm1, %xmm13
+; SSE42-NEXT: cmpltps %xmm4, %xmm10
+; SSE42-NEXT: pshufb %xmm1, %xmm10
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm13[0]
+; SSE42-NEXT: psllw $15, %xmm10
+; SSE42-NEXT: psraw $15, %xmm10
+; SSE42-NEXT: cmpltps %xmm7, %xmm12
+; SSE42-NEXT: pshufb %xmm1, %xmm12
+; SSE42-NEXT: cmpltps %xmm6, %xmm11
+; SSE42-NEXT: pshufb %xmm1, %xmm11
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm11 = xmm11[0],xmm12[0]
+; SSE42-NEXT: psllw $15, %xmm11
+; SSE42-NEXT: psraw $15, %xmm11
+; SSE42-NEXT: pextrb $14, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm9, %eax
+; SSE42-NEXT: pextrb $12, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm9, %eax
+; SSE42-NEXT: pextrb $10, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm9, %eax
+; SSE42-NEXT: pextrb $8, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm9, %eax
+; SSE42-NEXT: pextrb $6, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm9, %eax
+; SSE42-NEXT: pextrb $4, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm9, %eax
+; SSE42-NEXT: pextrb $2, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm9, %eax
+; SSE42-NEXT: pextrb $0, %xmm11, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm9, %eax
+; SSE42-NEXT: pextrb $14, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm9, %eax
+; SSE42-NEXT: pextrb $12, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm9, %eax
+; SSE42-NEXT: pextrb $10, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm9, %eax
+; SSE42-NEXT: pextrb $8, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm9, %eax
+; SSE42-NEXT: pextrb $6, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm9, %eax
+; SSE42-NEXT: pextrb $4, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm9, %eax
+; SSE42-NEXT: pextrb $2, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $0, %xmm9, %eax
+; SSE42-NEXT: pextrb $0, %xmm10, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm8, %eax
+; SSE42-NEXT: pextrb $14, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm8, %eax
+; SSE42-NEXT: pextrb $12, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm8, %eax
+; SSE42-NEXT: pextrb $10, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $12, %xmm8, %eax
+; SSE42-NEXT: pextrb $8, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $11, %xmm8, %eax
+; SSE42-NEXT: pextrb $6, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $10, %xmm8, %eax
+; SSE42-NEXT: pextrb $4, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $9, %xmm8, %eax
+; SSE42-NEXT: pextrb $2, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $8, %xmm8, %eax
+; SSE42-NEXT: pextrb $0, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $7, %xmm8, %eax
+; SSE42-NEXT: pextrb $14, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $6, %xmm8, %eax
+; SSE42-NEXT: pextrb $12, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $5, %xmm8, %eax
+; SSE42-NEXT: pextrb $10, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $4, %xmm8, %eax
+; SSE42-NEXT: pextrb $8, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $3, %xmm8, %eax
+; SSE42-NEXT: pextrb $6, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $2, %xmm8, %eax
+; SSE42-NEXT: pextrb $4, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $1, %xmm8, %eax
+; SSE42-NEXT: pextrb $2, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
; SSE42-NEXT: pextrb $0, %xmm8, %eax
@@ -3652,6 +3661,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i64:
@@ -3783,6 +3793,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i64:
@@ -3914,6 +3925,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i64> %a0, %a1
ret <16 x i1> %1
@@ -3977,98 +3989,98 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -4076,33 +4088,21 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
;
; SSE42-LABEL: test_cmp_v32i32:
; SSE42: # BB#0:
-; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm3
-; SSE42-NEXT: movdqa {{.*#+}} xmm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE42-NEXT: pshufb %xmm8, %xmm3
-; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: pshufb %xmm8, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE42-NEXT: psllw $15, %xmm2
-; SSE42-NEXT: psraw $15, %xmm2
-; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm3, %xmm2
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm1
+; SSE42-NEXT: movdqa {{.*#+}} xmm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE42-NEXT: pshufb %xmm8, %xmm1
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pshufb %xmm8, %xmm0
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE42-NEXT: psllw $15, %xmm0
; SSE42-NEXT: psraw $15, %xmm0
-; SSE42-NEXT: pshufb %xmm3, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: pshufb %xmm8, %xmm7
-; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: pshufb %xmm8, %xmm6
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
-; SSE42-NEXT: psllw $15, %xmm6
-; SSE42-NEXT: psraw $15, %xmm6
-; SSE42-NEXT: pshufb %xmm3, %xmm6
+; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: pshufb %xmm8, %xmm3
+; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
+; SSE42-NEXT: pshufb %xmm8, %xmm2
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE42-NEXT: psllw $15, %xmm2
+; SSE42-NEXT: psraw $15, %xmm2
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: pshufb %xmm8, %xmm5
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm4
@@ -4110,99 +4110,104 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSE42-NEXT: psllw $15, %xmm4
; SSE42-NEXT: psraw $15, %xmm4
-; SSE42-NEXT: pshufb %xmm3, %xmm4
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; SSE42-NEXT: pextrb $15, %xmm4, %eax
+; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm7
+; SSE42-NEXT: pshufb %xmm8, %xmm7
+; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: pshufb %xmm8, %xmm6
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
+; SSE42-NEXT: psllw $15, %xmm6
+; SSE42-NEXT: psraw $15, %xmm6
+; SSE42-NEXT: pextrb $14, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm4, %eax
+; SSE42-NEXT: pextrb $12, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm4, %eax
+; SSE42-NEXT: pextrb $10, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm4, %eax
+; SSE42-NEXT: pextrb $8, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm4, %eax
+; SSE42-NEXT: pextrb $6, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm4, %eax
+; SSE42-NEXT: pextrb $4, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm4, %eax
+; SSE42-NEXT: pextrb $2, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm4, %eax
+; SSE42-NEXT: pextrb $0, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm4, %eax
+; SSE42-NEXT: pextrb $14, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm4, %eax
+; SSE42-NEXT: pextrb $12, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm4, %eax
+; SSE42-NEXT: pextrb $10, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm4, %eax
+; SSE42-NEXT: pextrb $8, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm4, %eax
+; SSE42-NEXT: pextrb $6, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm4, %eax
+; SSE42-NEXT: pextrb $4, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm4, %eax
+; SSE42-NEXT: pextrb $2, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
; SSE42-NEXT: pextrb $0, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: pextrb $0, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
; SSE42-NEXT: pextrb $0, %xmm0, %eax
@@ -4938,196 +4943,196 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 6(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 4(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -5135,214 +5140,201 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
;
; SSE42-LABEL: test_cmp_v64i16:
; SSE42: # BB#0:
-; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: movdqa {{.*#+}} xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm8, %xmm1
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT: pshufb %xmm8, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm3
-; SSE42-NEXT: pshufb %xmm8, %xmm3
+; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: pshufb %xmm8, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pshufb %xmm8, %xmm5
+; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm3
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm4
-; SSE42-NEXT: pshufb %xmm8, %xmm4
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: pshufb %xmm8, %xmm7
+; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: pshufb %xmm8, %xmm6
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
-; SSE42-NEXT: pextrb $15, %xmm6, %eax
+; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm7
+; SSE42-NEXT: pextrb $14, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm6, %eax
+; SSE42-NEXT: pextrb $12, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm6, %eax
+; SSE42-NEXT: pextrb $10, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm6, %eax
+; SSE42-NEXT: pextrb $8, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm6, %eax
+; SSE42-NEXT: pextrb $6, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm6, %eax
+; SSE42-NEXT: pextrb $4, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm6, %eax
+; SSE42-NEXT: pextrb $2, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm6, %eax
+; SSE42-NEXT: pextrb $0, %xmm7, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm6, %eax
+; SSE42-NEXT: pextrb $14, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm6, %eax
+; SSE42-NEXT: pextrb $12, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm6, %eax
+; SSE42-NEXT: pextrb $10, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm6, %eax
+; SSE42-NEXT: pextrb $8, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm6, %eax
+; SSE42-NEXT: pextrb $6, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm6, %eax
+; SSE42-NEXT: pextrb $4, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm6, %eax
+; SSE42-NEXT: pextrb $2, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
; SSE42-NEXT: pextrb $0, %xmm6, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 6(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm4, %eax
+; SSE42-NEXT: pextrb $14, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm4, %eax
+; SSE42-NEXT: pextrb $12, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm4, %eax
+; SSE42-NEXT: pextrb $10, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm4, %eax
+; SSE42-NEXT: pextrb $8, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm4, %eax
+; SSE42-NEXT: pextrb $6, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm4, %eax
+; SSE42-NEXT: pextrb $4, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm4, %eax
+; SSE42-NEXT: pextrb $2, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm4, %eax
+; SSE42-NEXT: pextrb $0, %xmm5, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm4, %eax
+; SSE42-NEXT: pextrb $14, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm4, %eax
+; SSE42-NEXT: pextrb $12, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm4, %eax
+; SSE42-NEXT: pextrb $10, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm4, %eax
+; SSE42-NEXT: pextrb $8, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm4, %eax
+; SSE42-NEXT: pextrb $6, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm4, %eax
+; SSE42-NEXT: pextrb $4, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm4, %eax
+; SSE42-NEXT: pextrb $2, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
; SSE42-NEXT: pextrb $0, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 4(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm2, %eax
+; SSE42-NEXT: pextrb $14, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm2, %eax
+; SSE42-NEXT: pextrb $12, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm2, %eax
+; SSE42-NEXT: pextrb $10, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm2, %eax
+; SSE42-NEXT: pextrb $8, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm2, %eax
+; SSE42-NEXT: pextrb $6, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm2, %eax
+; SSE42-NEXT: pextrb $4, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm2, %eax
+; SSE42-NEXT: pextrb $2, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm2, %eax
+; SSE42-NEXT: pextrb $0, %xmm3, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm2, %eax
+; SSE42-NEXT: pextrb $14, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm2, %eax
+; SSE42-NEXT: pextrb $12, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm2, %eax
+; SSE42-NEXT: pextrb $10, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm2, %eax
+; SSE42-NEXT: pextrb $8, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm2, %eax
+; SSE42-NEXT: pextrb $6, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm2, %eax
+; SSE42-NEXT: pextrb $4, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm2, %eax
+; SSE42-NEXT: pextrb $2, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
; SSE42-NEXT: pextrb $0, %xmm2, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: pextrb $0, %xmm1, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
; SSE42-NEXT: pextrb $0, %xmm0, %eax
@@ -6063,6 +6055,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512F-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i16:
@@ -6349,6 +6342,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512DQ-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i16:
@@ -6780,392 +6774,392 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 14(%rdi)
-; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 14(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 14(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 12(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 12(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 10(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 10(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 8(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 8(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 6(%rdi)
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 4(%rdi)
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 14(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 12(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 10(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 8(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -8416,6 +8410,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, (%rdi)
; AVX512F-NEXT: movq %rdi, %rax
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v128i8:
@@ -8461,6 +8456,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: kmovw %k0, (%rdi)
; AVX512DQ-NEXT: movq %rdi, %rax
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v128i8:
@@ -8584,98 +8580,98 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -8690,185 +8686,178 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; SSE42-NEXT: pushq %r12
; SSE42-NEXT: pushq %rbx
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
-; SSE42-NEXT: cmpltpd %xmm7, %xmm8
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: cmpltpd %xmm6, %xmm7
-; SSE42-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm8[0,2]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: cmpltpd %xmm5, %xmm6
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: cmpltpd %xmm4, %xmm5
-; SSE42-NEXT: pslld $31, %xmm7
-; SSE42-NEXT: psrad $31, %xmm7
-; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
-; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE42-NEXT: pshufb %xmm4, %xmm7
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pshufb %xmm4, %xmm5
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: cmpltpd %xmm3, %xmm6
+; SSE42-NEXT: cmpltpd %xmm3, %xmm8
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
; SSE42-NEXT: cmpltpd %xmm2, %xmm3
+; SSE42-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm8[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: cmpltpd %xmm1, %xmm6
+; SSE42-NEXT: cmpltpd %xmm1, %xmm2
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: cmpltpd %xmm0, %xmm1
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: psllw $15, %xmm5
-; SSE42-NEXT: psraw $15, %xmm5
; SSE42-NEXT: pslld $31, %xmm3
; SSE42-NEXT: psrad $31, %xmm3
-; SSE42-NEXT: pshufb %xmm4, %xmm3
+; SSE42-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE42-NEXT: movdqa {{.*#+}} xmm8 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE42-NEXT: pshufb %xmm8, %xmm3
; SSE42-NEXT: pslld $31, %xmm1
; SSE42-NEXT: psrad $31, %xmm1
-; SSE42-NEXT: pshufb %xmm4, %xmm1
+; SSE42-NEXT: pshufb %xmm8, %xmm1
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm3, %xmm5
-; SSE42-NEXT: psllw $15, %xmm1
-; SSE42-NEXT: psraw $15, %xmm1
-; SSE42-NEXT: pshufb %xmm3, %xmm1
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
+; SSE42-NEXT: cmpltpd %xmm7, %xmm0
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm7
+; SSE42-NEXT: cmpltpd %xmm6, %xmm7
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm0[0,2]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: cmpltpd %xmm5, %xmm6
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
+; SSE42-NEXT: cmpltpd %xmm4, %xmm0
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2
+; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[0,2]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
+; SSE42-NEXT: pslld $31, %xmm7
+; SSE42-NEXT: psrad $31, %xmm7
+; SSE42-NEXT: pshufb %xmm8, %xmm7
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pshufb %xmm8, %xmm0
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm7[0]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm5[0,2]
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm4
+; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm2
+; SSE42-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pshufb %xmm4, %xmm6
-; SSE42-NEXT: pslld $31, %xmm0
-; SSE42-NEXT: psrad $31, %xmm0
-; SSE42-NEXT: pshufb %xmm4, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: pslld $31, %xmm4
+; SSE42-NEXT: psrad $31, %xmm4
+; SSE42-NEXT: pshufb %xmm8, %xmm4
+; SSE42-NEXT: pslld $31, %xmm2
+; SSE42-NEXT: psrad $31, %xmm2
+; SSE42-NEXT: pshufb %xmm8, %xmm2
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm4
; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
-; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
+; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm4[0,2]
+; SSE42-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm4
+; SSE42-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; SSE42-NEXT: pslld $31, %xmm5
; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pshufb %xmm4, %xmm5
-; SSE42-NEXT: pslld $31, %xmm2
-; SSE42-NEXT: psrad $31, %xmm2
-; SSE42-NEXT: pshufb %xmm4, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; SSE42-NEXT: psllw $15, %xmm0
-; SSE42-NEXT: psraw $15, %xmm0
-; SSE42-NEXT: pshufb %xmm3, %xmm0
+; SSE42-NEXT: pshufb %xmm8, %xmm5
+; SSE42-NEXT: pslld $31, %xmm3
+; SSE42-NEXT: psrad $31, %xmm3
+; SSE42-NEXT: pshufb %xmm8, %xmm3
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; SSE42-NEXT: psllw $15, %xmm3
+; SSE42-NEXT: psraw $15, %xmm3
+; SSE42-NEXT: pextrb $14, %xmm3, %ecx
+; SSE42-NEXT: pextrb $12, %xmm3, %edx
+; SSE42-NEXT: pextrb $10, %xmm3, %r8d
+; SSE42-NEXT: pextrb $8, %xmm3, %r10d
+; SSE42-NEXT: pextrb $6, %xmm3, %r14d
+; SSE42-NEXT: pextrb $4, %xmm3, %r12d
+; SSE42-NEXT: pextrb $2, %xmm3, %ebx
+; SSE42-NEXT: pextrb $0, %xmm3, %eax
; SSE42-NEXT: psllw $15, %xmm2
; SSE42-NEXT: psraw $15, %xmm2
-; SSE42-NEXT: pshufb %xmm3, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE42-NEXT: pextrb $15, %xmm2, %eax
-; SSE42-NEXT: andb $1, %al
-; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm2, %eax
-; SSE42-NEXT: andb $1, %al
-; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm2, %r8d
-; SSE42-NEXT: pextrb $12, %xmm2, %r9d
-; SSE42-NEXT: pextrb $11, %xmm2, %r10d
-; SSE42-NEXT: pextrb $10, %xmm2, %r11d
-; SSE42-NEXT: pextrb $9, %xmm2, %r14d
-; SSE42-NEXT: pextrb $8, %xmm2, %r15d
-; SSE42-NEXT: pextrb $7, %xmm2, %r12d
-; SSE42-NEXT: pextrb $6, %xmm2, %r13d
-; SSE42-NEXT: pextrb $5, %xmm2, %ebx
-; SSE42-NEXT: pextrb $4, %xmm2, %ebp
-; SSE42-NEXT: pextrb $3, %xmm2, %eax
-; SSE42-NEXT: pextrb $2, %xmm2, %ecx
-; SSE42-NEXT: pextrb $1, %xmm2, %edx
-; SSE42-NEXT: pextrb $0, %xmm2, %esi
+; SSE42-NEXT: andb $1, %cl
+; SSE42-NEXT: movb %cl, 2(%rdi)
+; SSE42-NEXT: andb $1, %dl
+; SSE42-NEXT: movb %dl, 2(%rdi)
+; SSE42-NEXT: pextrb $14, %xmm2, %edx
+; SSE42-NEXT: pextrb $12, %xmm2, %esi
+; SSE42-NEXT: pextrb $10, %xmm2, %r9d
+; SSE42-NEXT: pextrb $8, %xmm2, %r11d
+; SSE42-NEXT: pextrb $6, %xmm2, %r15d
+; SSE42-NEXT: pextrb $4, %xmm2, %r13d
+; SSE42-NEXT: pextrb $2, %xmm2, %ebp
+; SSE42-NEXT: pextrb $0, %xmm2, %ecx
+; SSE42-NEXT: psllw $15, %xmm0
+; SSE42-NEXT: psraw $15, %xmm0
; SSE42-NEXT: andb $1, %r8b
; SSE42-NEXT: movb %r8b, 2(%rdi)
-; SSE42-NEXT: andb $1, %r9b
-; SSE42-NEXT: movb %r9b, 2(%rdi)
; SSE42-NEXT: andb $1, %r10b
; SSE42-NEXT: movb %r10b, 2(%rdi)
-; SSE42-NEXT: andb $1, %r11b
-; SSE42-NEXT: movb %r11b, 2(%rdi)
; SSE42-NEXT: andb $1, %r14b
; SSE42-NEXT: movb %r14b, 2(%rdi)
-; SSE42-NEXT: andb $1, %r15b
-; SSE42-NEXT: movb %r15b, 2(%rdi)
; SSE42-NEXT: andb $1, %r12b
; SSE42-NEXT: movb %r12b, 2(%rdi)
-; SSE42-NEXT: andb $1, %r13b
-; SSE42-NEXT: movb %r13b, 2(%rdi)
; SSE42-NEXT: andb $1, %bl
; SSE42-NEXT: movb %bl, 2(%rdi)
-; SSE42-NEXT: andb $1, %bpl
-; SSE42-NEXT: movb %bpl, 2(%rdi)
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: andb $1, %cl
-; SSE42-NEXT: movb %cl, 2(%rdi)
; SSE42-NEXT: andb $1, %dl
; SSE42-NEXT: movb %dl, 2(%rdi)
; SSE42-NEXT: andb $1, %sil
; SSE42-NEXT: movb %sil, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm1, %eax
-; SSE42-NEXT: andb $1, %al
-; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm1, %eax
-; SSE42-NEXT: andb $1, %al
-; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm1, %r8d
-; SSE42-NEXT: pextrb $12, %xmm1, %r9d
-; SSE42-NEXT: pextrb $11, %xmm1, %r10d
-; SSE42-NEXT: pextrb $10, %xmm1, %r11d
-; SSE42-NEXT: pextrb $9, %xmm1, %r14d
-; SSE42-NEXT: pextrb $8, %xmm1, %r15d
-; SSE42-NEXT: pextrb $7, %xmm1, %r12d
-; SSE42-NEXT: pextrb $6, %xmm1, %r13d
-; SSE42-NEXT: pextrb $5, %xmm1, %ebx
-; SSE42-NEXT: pextrb $4, %xmm1, %ebp
-; SSE42-NEXT: pextrb $3, %xmm1, %eax
-; SSE42-NEXT: pextrb $2, %xmm1, %ecx
-; SSE42-NEXT: pextrb $1, %xmm1, %edx
-; SSE42-NEXT: pextrb $0, %xmm1, %esi
+; SSE42-NEXT: pextrb $14, %xmm0, %esi
+; SSE42-NEXT: pextrb $12, %xmm0, %edx
+; SSE42-NEXT: pextrb $10, %xmm0, %r8d
+; SSE42-NEXT: pextrb $8, %xmm0, %r10d
+; SSE42-NEXT: pextrb $6, %xmm0, %r14d
+; SSE42-NEXT: pextrb $4, %xmm0, %r12d
+; SSE42-NEXT: pextrb $2, %xmm0, %ebx
+; SSE42-NEXT: pextrb $0, %xmm0, %eax
+; SSE42-NEXT: psllw $15, %xmm1
+; SSE42-NEXT: psraw $15, %xmm1
+; SSE42-NEXT: andb $1, %r9b
+; SSE42-NEXT: movb %r9b, 2(%rdi)
+; SSE42-NEXT: andb $1, %r11b
+; SSE42-NEXT: movb %r11b, 2(%rdi)
+; SSE42-NEXT: andb $1, %r15b
+; SSE42-NEXT: movb %r15b, 2(%rdi)
+; SSE42-NEXT: andb $1, %r13b
+; SSE42-NEXT: movb %r13b, 2(%rdi)
+; SSE42-NEXT: andb $1, %bpl
+; SSE42-NEXT: movb %bpl, 2(%rdi)
+; SSE42-NEXT: andb $1, %cl
+; SSE42-NEXT: movb %cl, 2(%rdi)
+; SSE42-NEXT: andb $1, %sil
+; SSE42-NEXT: movb %sil, (%rdi)
+; SSE42-NEXT: andb $1, %dl
+; SSE42-NEXT: movb %dl, (%rdi)
+; SSE42-NEXT: pextrb $14, %xmm1, %r9d
+; SSE42-NEXT: pextrb $12, %xmm1, %r11d
+; SSE42-NEXT: pextrb $10, %xmm1, %r15d
+; SSE42-NEXT: pextrb $8, %xmm1, %r13d
+; SSE42-NEXT: pextrb $6, %xmm1, %ecx
+; SSE42-NEXT: pextrb $4, %xmm1, %edx
+; SSE42-NEXT: pextrb $2, %xmm1, %esi
+; SSE42-NEXT: pextrb $0, %xmm1, %ebp
; SSE42-NEXT: andb $1, %r8b
; SSE42-NEXT: movb %r8b, (%rdi)
-; SSE42-NEXT: andb $1, %r9b
-; SSE42-NEXT: movb %r9b, (%rdi)
; SSE42-NEXT: andb $1, %r10b
; SSE42-NEXT: movb %r10b, (%rdi)
-; SSE42-NEXT: andb $1, %r11b
-; SSE42-NEXT: movb %r11b, (%rdi)
; SSE42-NEXT: andb $1, %r14b
; SSE42-NEXT: movb %r14b, (%rdi)
-; SSE42-NEXT: andb $1, %r15b
-; SSE42-NEXT: movb %r15b, (%rdi)
; SSE42-NEXT: andb $1, %r12b
; SSE42-NEXT: movb %r12b, (%rdi)
-; SSE42-NEXT: andb $1, %r13b
-; SSE42-NEXT: movb %r13b, (%rdi)
; SSE42-NEXT: andb $1, %bl
; SSE42-NEXT: movb %bl, (%rdi)
-; SSE42-NEXT: andb $1, %bpl
-; SSE42-NEXT: movb %bpl, (%rdi)
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: andb $1, %r9b
+; SSE42-NEXT: movb %r9b, (%rdi)
+; SSE42-NEXT: andb $1, %r11b
+; SSE42-NEXT: movb %r11b, (%rdi)
+; SSE42-NEXT: andb $1, %r15b
+; SSE42-NEXT: movb %r15b, (%rdi)
+; SSE42-NEXT: andb $1, %r13b
+; SSE42-NEXT: movb %r13b, (%rdi)
; SSE42-NEXT: andb $1, %cl
; SSE42-NEXT: movb %cl, (%rdi)
; SSE42-NEXT: andb $1, %dl
; SSE42-NEXT: movb %dl, (%rdi)
; SSE42-NEXT: andb $1, %sil
; SSE42-NEXT: movb %sil, (%rdi)
+; SSE42-NEXT: andb $1, %bpl
+; SSE42-NEXT: movb %bpl, (%rdi)
; SSE42-NEXT: movq %rdi, %rax
; SSE42-NEXT: popq %rbx
; SSE42-NEXT: popq %r12
@@ -9907,98 +9896,98 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
-; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
-; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, 2(%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
; SSE2-NEXT: andb $1, %al
-; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: movb %cl, (%rdi)
; SSE2-NEXT: andb $1, %al
; SSE2-NEXT: movb %al, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
@@ -10006,173 +9995,166 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
;
; SSE42-LABEL: test_cmp_v32i64:
; SSE42: # BB#0:
-; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
-; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: movdqa {{.*#+}} xmm7 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE42-NEXT: pshufb %xmm7, %xmm6
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm4
-; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
-; SSE42-NEXT: pslld $31, %xmm4
-; SSE42-NEXT: psrad $31, %xmm4
-; SSE42-NEXT: pshufb %xmm7, %xmm4
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; SSE42-NEXT: psllw $15, %xmm4
-; SSE42-NEXT: psraw $15, %xmm4
-; SSE42-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm5, %xmm4
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm3
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm2
; SSE42-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE42-NEXT: pslld $31, %xmm2
; SSE42-NEXT: psrad $31, %xmm2
-; SSE42-NEXT: pshufb %xmm7, %xmm2
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE42-NEXT: pshufb %xmm3, %xmm2
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE42-NEXT: pslld $31, %xmm0
; SSE42-NEXT: psrad $31, %xmm0
-; SSE42-NEXT: pshufb %xmm7, %xmm0
+; SSE42-NEXT: pshufb %xmm3, %xmm0
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE42-NEXT: psllw $15, %xmm0
; SSE42-NEXT: psraw $15, %xmm0
-; SSE42-NEXT: pshufb %xmm5, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
+; SSE42-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
+; SSE42-NEXT: pslld $31, %xmm6
+; SSE42-NEXT: psrad $31, %xmm6
+; SSE42-NEXT: pshufb %xmm3, %xmm6
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm5
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm4
+; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
+; SSE42-NEXT: pslld $31, %xmm4
+; SSE42-NEXT: psrad $31, %xmm4
+; SSE42-NEXT: pshufb %xmm3, %xmm4
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; SSE42-NEXT: psllw $15, %xmm4
+; SSE42-NEXT: psraw $15, %xmm4
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm15
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm13
; SSE42-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2],xmm15[0,2]
; SSE42-NEXT: pslld $31, %xmm13
; SSE42-NEXT: psrad $31, %xmm13
-; SSE42-NEXT: pshufb %xmm7, %xmm13
+; SSE42-NEXT: pshufb %xmm3, %xmm13
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm14
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
-; SSE42-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm14[0,2]
-; SSE42-NEXT: pslld $31, %xmm9
-; SSE42-NEXT: psrad $31, %xmm9
-; SSE42-NEXT: pshufb %xmm7, %xmm9
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm13[0]
-; SSE42-NEXT: psllw $15, %xmm9
-; SSE42-NEXT: psraw $15, %xmm9
-; SSE42-NEXT: pshufb %xmm5, %xmm9
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm14[0,2]
+; SSE42-NEXT: pslld $31, %xmm8
+; SSE42-NEXT: psrad $31, %xmm8
+; SSE42-NEXT: pshufb %xmm3, %xmm8
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm13[0]
+; SSE42-NEXT: psllw $15, %xmm8
+; SSE42-NEXT: psraw $15, %xmm8
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm12
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm12[0,2]
; SSE42-NEXT: pslld $31, %xmm10
; SSE42-NEXT: psrad $31, %xmm10
-; SSE42-NEXT: pshufb %xmm7, %xmm10
+; SSE42-NEXT: pshufb %xmm3, %xmm10
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
-; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
-; SSE42-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[0,2]
-; SSE42-NEXT: pslld $31, %xmm8
-; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pshufb %xmm7, %xmm8
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
-; SSE42-NEXT: psllw $15, %xmm8
-; SSE42-NEXT: psraw $15, %xmm8
-; SSE42-NEXT: pshufb %xmm5, %xmm8
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
-; SSE42-NEXT: pextrb $15, %xmm8, %eax
+; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
+; SSE42-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2]
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pshufb %xmm3, %xmm9
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm10[0]
+; SSE42-NEXT: psllw $15, %xmm9
+; SSE42-NEXT: psraw $15, %xmm9
+; SSE42-NEXT: pextrb $14, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $14, %xmm8, %eax
+; SSE42-NEXT: pextrb $12, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $13, %xmm8, %eax
+; SSE42-NEXT: pextrb $10, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $12, %xmm8, %eax
+; SSE42-NEXT: pextrb $8, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $11, %xmm8, %eax
+; SSE42-NEXT: pextrb $6, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $10, %xmm8, %eax
+; SSE42-NEXT: pextrb $4, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $9, %xmm8, %eax
+; SSE42-NEXT: pextrb $2, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $8, %xmm8, %eax
+; SSE42-NEXT: pextrb $0, %xmm9, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $7, %xmm8, %eax
+; SSE42-NEXT: pextrb $14, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $6, %xmm8, %eax
+; SSE42-NEXT: pextrb $12, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $5, %xmm8, %eax
+; SSE42-NEXT: pextrb $10, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $4, %xmm8, %eax
+; SSE42-NEXT: pextrb $8, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $3, %xmm8, %eax
+; SSE42-NEXT: pextrb $6, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $2, %xmm8, %eax
+; SSE42-NEXT: pextrb $4, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $1, %xmm8, %eax
+; SSE42-NEXT: pextrb $2, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
; SSE42-NEXT: pextrb $0, %xmm8, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, 2(%rdi)
-; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: pextrb $0, %xmm4, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
-; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
; SSE42-NEXT: andb $1, %al
; SSE42-NEXT: movb %al, (%rdi)
; SSE42-NEXT: pextrb $0, %xmm0, %eax
@@ -10187,24 +10169,24 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
-; AVX1-NEXT: vmovaps 240(%rbp), %ymm8
+; AVX1-NEXT: vmovdqa 240(%rbp), %ymm8
; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm10
; AVX1-NEXT: vpcmpgtq %xmm9, %xmm10, %xmm9
-; AVX1-NEXT: vmovaps 208(%rbp), %ymm10
+; AVX1-NEXT: vmovdqa 208(%rbp), %ymm10
; AVX1-NEXT: vpcmpgtq %xmm8, %xmm7, %xmm7
; AVX1-NEXT: vpacksswb %xmm9, %xmm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm9, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm10, %xmm6, %xmm6
-; AVX1-NEXT: vmovaps 176(%rbp), %ymm9
+; AVX1-NEXT: vmovdqa 176(%rbp), %ymm9
; AVX1-NEXT: vpacksswb %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpacksswb %xmm8, %xmm6, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vmovaps 144(%rbp), %ymm10
+; AVX1-NEXT: vmovdqa 144(%rbp), %ymm10
; AVX1-NEXT: vpcmpgtq %xmm9, %xmm5, %xmm5
; AVX1-NEXT: vpacksswb %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm6
@@ -10212,26 +10194,26 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm10, %xmm4, %xmm4
; AVX1-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vmovaps 112(%rbp), %ymm6
+; AVX1-NEXT: vmovdqa 112(%rbp), %ymm6
; AVX1-NEXT: vpacksswb %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpacksswb %xmm8, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vmovaps 80(%rbp), %ymm7
+; AVX1-NEXT: vmovdqa 80(%rbp), %ymm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpacksswb %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vmovaps 48(%rbp), %ymm6
+; AVX1-NEXT: vmovdqa 48(%rbp), %ymm6
; AVX1-NEXT: vpacksswb %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vmovaps 16(%rbp), %ymm5
+; AVX1-NEXT: vmovdqa 16(%rbp), %ymm5
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm1, %xmm1
; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
diff --git a/test/CodeGen/X86/vector-extend-inreg.ll b/test/CodeGen/X86/vector-extend-inreg.ll
new file mode 100644
index 000000000000..a8db0d4cd9d8
--- /dev/null
+++ b/test/CodeGen/X86/vector-extend-inreg.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X64-AVX
+
+define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) nounwind {
+; X32-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: pushl %ebp
+; X32-SSE-NEXT: movl %esp, %ebp
+; X32-SSE-NEXT: andl $-128, %esp
+; X32-SSE-NEXT: subl $384, %esp # imm = 0x180
+; X32-SSE-NEXT: movl 88(%ebp), %ecx
+; X32-SSE-NEXT: movdqa 72(%ebp), %xmm0
+; X32-SSE-NEXT: xorps %xmm1, %xmm1
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: movaps %xmm1, (%esp)
+; X32-SSE-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT: leal (%ecx,%ecx), %eax
+; X32-SSE-NEXT: andl $31, %eax
+; X32-SSE-NEXT: movl 128(%esp,%eax,4), %eax
+; X32-SSE-NEXT: leal 1(%ecx,%ecx), %ecx
+; X32-SSE-NEXT: andl $31, %ecx
+; X32-SSE-NEXT: movl (%esp,%ecx,4), %edx
+; X32-SSE-NEXT: movl %ebp, %esp
+; X32-SSE-NEXT: popl %ebp
+; X32-SSE-NEXT: retl
+;
+; X64-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: pushq %rbp
+; X64-SSE-NEXT: movq %rsp, %rbp
+; X64-SSE-NEXT: andq $-128, %rsp
+; X64-SSE-NEXT: subq $256, %rsp # imm = 0x100
+; X64-SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT: xorps %xmm0, %xmm0
+; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: movaps %xmm0, (%rsp)
+; X64-SSE-NEXT: movdqa %xmm7, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: andl $15, %edi
+; X64-SSE-NEXT: movq (%rsp,%rdi,8), %rax
+; X64-SSE-NEXT: movq %rbp, %rsp
+; X64-SSE-NEXT: popq %rbp
+; X64-SSE-NEXT: retq
+;
+; X32-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: pushl %ebp
+; X32-AVX-NEXT: movl %esp, %ebp
+; X32-AVX-NEXT: andl $-128, %esp
+; X32-AVX-NEXT: subl $384, %esp # imm = 0x180
+; X32-AVX-NEXT: movl 40(%ebp), %ecx
+; X32-AVX-NEXT: vbroadcastsd 32(%ebp), %ymm0
+; X32-AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X32-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: vmovapd %ymm0, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: vmovapd %ymm1, (%esp)
+; X32-AVX-NEXT: vmovapd %ymm0, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT: leal (%ecx,%ecx), %eax
+; X32-AVX-NEXT: andl $31, %eax
+; X32-AVX-NEXT: movl 128(%esp,%eax,4), %eax
+; X32-AVX-NEXT: leal 1(%ecx,%ecx), %ecx
+; X32-AVX-NEXT: andl $31, %ecx
+; X32-AVX-NEXT: movl (%esp,%ecx,4), %edx
+; X32-AVX-NEXT: movl %ebp, %esp
+; X32-AVX-NEXT: popl %ebp
+; X32-AVX-NEXT: vzeroupper
+; X32-AVX-NEXT: retl
+;
+; X64-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: pushq %rbp
+; X64-AVX-NEXT: movq %rsp, %rbp
+; X64-AVX-NEXT: andq $-128, %rsp
+; X64-AVX-NEXT: subq $256, %rsp # imm = 0x100
+; X64-AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm3[3,1,2,3]
+; X64-AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; X64-AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X64-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: vmovapd %ymm1, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: vmovapd %ymm1, (%rsp)
+; X64-AVX-NEXT: vmovapd %ymm0, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: andl $15, %edi
+; X64-AVX-NEXT: movq (%rsp,%rdi,8), %rax
+; X64-AVX-NEXT: movq %rbp, %rsp
+; X64-AVX-NEXT: popq %rbp
+; X64-AVX-NEXT: vzeroupper
+; X64-AVX-NEXT: retq
+ %1 = extractelement <16 x i64> %a0, i32 15
+ %2 = insertelement <16 x i64> zeroinitializer, i64 %1, i32 4
+ %3 = extractelement <16 x i64> %2, i32 %a1
+ ret i64 %3
+}
diff --git a/test/CodeGen/X86/vector-half-conversions.ll b/test/CodeGen/X86/vector-half-conversions.ll
index 5bf6fbeb6235..a2a7363d7894 100644
--- a/test/CodeGen/X86/vector-half-conversions.ll
+++ b/test/CodeGen/X86/vector-half-conversions.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL
;
; Half to Float
@@ -29,6 +29,7 @@ define float @cvt_i16_to_f32(i16 %a0) nounwind {
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_i16_to_f32:
@@ -122,6 +123,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4i16_to_4f32:
@@ -232,6 +234,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_4f32:
@@ -880,6 +883,7 @@ define float @load_cvt_i16_to_f32(i16* %a0) nounwind {
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_i16_to_f32:
@@ -950,6 +954,7 @@ define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind {
; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_4i16_to_4f32:
@@ -1053,6 +1058,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_8i16_to_4f32:
@@ -1534,6 +1540,7 @@ define double @cvt_i16_to_f64(i16 %a0) nounwind {
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0
; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_i16_to_f64:
@@ -1598,6 +1605,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_2i16_to_2f64:
@@ -1789,6 +1797,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_2f64:
@@ -1941,25 +1950,25 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_8f64:
; AVX1: # BB#0:
; AVX1-NEXT: vmovq %xmm0, %rdx
-; AVX1-NEXT: movq %rdx, %r9
+; AVX1-NEXT: movq %rdx, %r8
; AVX1-NEXT: movl %edx, %r10d
-; AVX1-NEXT: movswl %dx, %r8d
+; AVX1-NEXT: movswl %dx, %r9d
; AVX1-NEXT: shrq $48, %rdx
-; AVX1-NEXT: shrq $32, %r9
+; AVX1-NEXT: shrq $32, %r8
; AVX1-NEXT: shrl $16, %r10d
; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX1-NEXT: movq %rdi, %rsi
-; AVX1-NEXT: movl %edi, %eax
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: movl %edi, %esi
; AVX1-NEXT: movswl %di, %ecx
; AVX1-NEXT: shrq $48, %rdi
-; AVX1-NEXT: shrq $32, %rsi
-; AVX1-NEXT: shrl $16, %eax
-; AVX1-NEXT: cwtl
-; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: shrq $32, %rax
+; AVX1-NEXT: shrl $16, %esi
+; AVX1-NEXT: movswl %si, %esi
+; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm2
-; AVX1-NEXT: movswl %si, %eax
+; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX1-NEXT: movswl %di, %eax
@@ -1968,9 +1977,9 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX1-NEXT: movswl %r10w, %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
-; AVX1-NEXT: vmovd %r8d, %xmm5
+; AVX1-NEXT: vmovd %r9d, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
-; AVX1-NEXT: movswl %r9w, %eax
+; AVX1-NEXT: movswl %r8w, %eax
; AVX1-NEXT: vmovd %eax, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: movswl %dx, %eax
@@ -1995,25 +2004,25 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX2-LABEL: cvt_8i16_to_8f64:
; AVX2: # BB#0:
; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: movq %rdx, %r9
+; AVX2-NEXT: movq %rdx, %r8
; AVX2-NEXT: movl %edx, %r10d
-; AVX2-NEXT: movswl %dx, %r8d
+; AVX2-NEXT: movswl %dx, %r9d
; AVX2-NEXT: shrq $48, %rdx
-; AVX2-NEXT: shrq $32, %r9
+; AVX2-NEXT: shrq $32, %r8
; AVX2-NEXT: shrl $16, %r10d
; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX2-NEXT: movq %rdi, %rsi
-; AVX2-NEXT: movl %edi, %eax
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: movl %edi, %esi
; AVX2-NEXT: movswl %di, %ecx
; AVX2-NEXT: shrq $48, %rdi
-; AVX2-NEXT: shrq $32, %rsi
-; AVX2-NEXT: shrl $16, %eax
-; AVX2-NEXT: cwtl
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: shrq $32, %rax
+; AVX2-NEXT: shrl $16, %esi
+; AVX2-NEXT: movswl %si, %esi
+; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: vmovd %ecx, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
-; AVX2-NEXT: movswl %si, %eax
+; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: movswl %di, %eax
@@ -2022,9 +2031,9 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX2-NEXT: movswl %r10w, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
-; AVX2-NEXT: vmovd %r8d, %xmm5
+; AVX2-NEXT: vmovd %r9d, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
-; AVX2-NEXT: movswl %r9w, %eax
+; AVX2-NEXT: movswl %r8w, %eax
; AVX2-NEXT: vmovd %eax, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: movswl %dx, %eax
@@ -2187,6 +2196,7 @@ define double @load_cvt_i16_to_f64(i16* %a0) nounwind {
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0
; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_i16_to_f64:
@@ -2240,6 +2250,7 @@ define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind {
; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_2i16_to_2f64:
@@ -2684,6 +2695,7 @@ define i16 @cvt_f32_to_i16(float %a0) nounwind {
; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0
; AVX512F-NEXT: vmovd %xmm0, %eax
; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_f32_to_i16:
@@ -2769,6 +2781,7 @@ define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind {
; AVX512F-NEXT: shlq $32, %rdx
; AVX512F-NEXT: orq %rcx, %rdx
; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_4i16:
@@ -2822,7 +2835,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX1-NEXT: shlq $32, %rdx
; AVX1-NEXT: orq %rcx, %rdx
; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f32_to_8i16_undef:
@@ -2847,7 +2860,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f32_to_8i16_undef:
@@ -2873,7 +2886,8 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX512F-NEXT: shlq $32, %rdx
; AVX512F-NEXT: orq %rcx, %rdx
; AVX512F-NEXT: vmovq %rdx, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_8i16_undef:
@@ -2899,7 +2913,6 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX512VL-NEXT: orq %rcx, %rdx
; AVX512VL-NEXT: vmovq %rdx, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
@@ -2931,7 +2944,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX1-NEXT: shlq $32, %rdx
; AVX1-NEXT: orq %rcx, %rdx
; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f32_to_8i16_zero:
@@ -2956,7 +2969,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f32_to_8i16_zero:
@@ -2982,7 +2995,8 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX512F-NEXT: shlq $32, %rdx
; AVX512F-NEXT: orq %rcx, %rdx
; AVX512F-NEXT: vmovq %rdx, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_8i16_zero:
@@ -3008,7 +3022,6 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX512VL-NEXT: orq %rcx, %rdx
; AVX512VL-NEXT: vmovq %rdx, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -3159,6 +3172,7 @@ define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
; AVX512F-NEXT: vmovq %rsi, %xmm0
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8f32_to_8i16:
@@ -3205,6 +3219,7 @@ define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
; AVX512VL-NEXT: vmovq %rsi, %xmm0
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = fptrunc <8 x float> %a0 to <8 x half>
%2 = bitcast <8 x half> %1 to <8 x i16>
@@ -3511,6 +3526,7 @@ define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind {
; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0
; AVX512F-NEXT: vmovd %xmm0, %eax
; AVX512F-NEXT: movw %ax, (%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_f32_to_i16:
@@ -3582,6 +3598,7 @@ define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind {
; AVX512F-NEXT: movw %dx, 6(%rdi)
; AVX512F-NEXT: movw %cx, 4(%rdi)
; AVX512F-NEXT: movw %ax, 2(%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_4i16:
@@ -3631,7 +3648,7 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX1-NEXT: shlq $32, %rdx
; AVX1-NEXT: orq %rcx, %rdx
; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
@@ -3657,7 +3674,7 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
@@ -3684,8 +3701,9 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: shlq $32, %rdx
; AVX512F-NEXT: orq %rcx, %rdx
; AVX512F-NEXT: vmovq %rdx, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vmovdqa %xmm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_8i16_undef:
@@ -3711,7 +3729,6 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
; AVX512VL-NEXT: orq %rcx, %rdx
; AVX512VL-NEXT: vmovq %rdx, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512VL-NEXT: retq
@@ -3745,7 +3762,7 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX1-NEXT: shlq $32, %rdx
; AVX1-NEXT: orq %rcx, %rdx
; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
@@ -3771,7 +3788,7 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
@@ -3798,8 +3815,9 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX512F-NEXT: shlq $32, %rdx
; AVX512F-NEXT: orq %rcx, %rdx
; AVX512F-NEXT: vmovq %rdx, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vmovdqa %xmm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_8i16_zero:
@@ -3825,7 +3843,6 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
; AVX512VL-NEXT: orq %rcx, %rdx
; AVX512VL-NEXT: vmovq %rdx, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -3945,6 +3962,7 @@ define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
; AVX512F-NEXT: movw %r10w, 6(%rdi)
; AVX512F-NEXT: movw %r9w, 4(%rdi)
; AVX512F-NEXT: movw %r8w, 2(%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_8f32_to_8i16:
@@ -3980,6 +3998,7 @@ define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
; AVX512VL-NEXT: movw %r10w, 6(%rdi)
; AVX512VL-NEXT: movw %r9w, 4(%rdi)
; AVX512VL-NEXT: movw %r8w, 2(%rdi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = fptrunc <8 x float> %a0 to <8 x half>
%2 = bitcast <8 x half> %1 to <8 x i16>
@@ -4187,6 +4206,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
; AVX512F-NEXT: movw %ax, 4(%rdi)
; AVX512F-NEXT: vmovd %xmm4, %eax
; AVX512F-NEXT: movw %ax, 2(%rdi)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_16f32_to_16i16:
@@ -4254,6 +4274,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
; AVX512VL-NEXT: movw %ax, 4(%rdi)
; AVX512VL-NEXT: vmovd %xmm4, %eax
; AVX512VL-NEXT: movw %ax, 2(%rdi)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = fptrunc <16 x float> %a0 to <16 x half>
%2 = bitcast <16 x half> %1 to <16 x i16>
@@ -4315,7 +4336,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
-; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -4379,11 +4400,13 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX512F-NEXT: subq $40, %rsp
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
; AVX512F-NEXT: orl %ebx, %r14d
@@ -4391,6 +4414,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
@@ -4413,11 +4437,13 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: subq $40, %rsp
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
; AVX512VL-NEXT: orl %ebx, %r14d
@@ -4425,6 +4451,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
@@ -4462,7 +4489,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
-; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -4477,7 +4504,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %r14, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
@@ -4515,7 +4542,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %r14, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: addq $40, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
@@ -4528,11 +4555,13 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512F-NEXT: subq $40, %rsp
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
; AVX512F-NEXT: orl %ebx, %r14d
@@ -4540,6 +4569,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
@@ -4550,7 +4580,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512F-NEXT: shlq $32, %rax
; AVX512F-NEXT: orq %r14, %rax
; AVX512F-NEXT: vmovq %rax, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: addq $40, %rsp
; AVX512F-NEXT: popq %rbx
; AVX512F-NEXT: popq %r14
@@ -4563,11 +4593,13 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: subq $40, %rsp
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
; AVX512VL-NEXT: orl %ebx, %r14d
@@ -4575,6 +4607,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
@@ -4586,7 +4619,6 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: orq %r14, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-NEXT: addq $40, %rsp
; AVX512VL-NEXT: popq %rbx
@@ -4616,7 +4648,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
-; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -4631,7 +4663,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %r14, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
@@ -4669,7 +4701,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %r14, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: addq $40, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
@@ -4682,11 +4714,13 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512F-NEXT: subq $40, %rsp
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
; AVX512F-NEXT: orl %ebx, %r14d
@@ -4694,6 +4728,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
@@ -4704,7 +4739,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512F-NEXT: shlq $32, %rax
; AVX512F-NEXT: orq %r14, %rax
; AVX512F-NEXT: vmovq %rax, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: addq $40, %rsp
; AVX512F-NEXT: popq %rbx
; AVX512F-NEXT: popq %r14
@@ -4717,11 +4752,13 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: subq $40, %rsp
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
; AVX512VL-NEXT: orl %ebx, %r14d
@@ -4729,6 +4766,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
@@ -4740,7 +4778,6 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: orq %r14, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -4774,7 +4811,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: orl %ebx, %r15d
-; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -4799,7 +4836,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: orl %ebx, %r15d
-; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -4897,11 +4934,13 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512F-NEXT: subq $96, %rsp
; AVX512F-NEXT: vmovupd %zmm0, (%rsp) # 64-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r15d
; AVX512F-NEXT: orl %ebx, %r15d
@@ -4909,6 +4948,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
@@ -4922,11 +4962,13 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r15d
; AVX512F-NEXT: orl %ebx, %r15d
@@ -4934,6 +4976,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bx
; AVX512F-NEXT: shll $16, %ebx
@@ -4960,11 +5003,13 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512VL-NEXT: subq $96, %rsp
; AVX512VL-NEXT: vmovupd %zmm0, (%rsp) # 64-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r15d
; AVX512VL-NEXT: orl %ebx, %r15d
@@ -4972,6 +5017,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
@@ -4985,11 +5031,13 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512VL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r15d
; AVX512VL-NEXT: orl %ebx, %r15d
@@ -4997,6 +5045,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bx
; AVX512VL-NEXT: shll $16, %ebx
@@ -5077,7 +5126,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
-; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -5150,16 +5199,19 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX512F-NEXT: movq %rdi, %rbx
; AVX512F-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %r14d
; AVX512F-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %r15d
; AVX512F-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
@@ -5185,16 +5237,19 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX512VL-NEXT: movq %rdi, %rbx
; AVX512VL-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %r14d
; AVX512VL-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %r15d
; AVX512VL-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
@@ -5235,7 +5290,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: orl %ebp, %ebx
-; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -5250,7 +5305,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %rbx, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vmovdqa %xmm0, (%r14)
; AVX1-NEXT: addq $32, %rsp
; AVX1-NEXT: popq %rbx
@@ -5292,7 +5347,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %rbx, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: vmovdqa %xmm0, (%r14)
; AVX2-NEXT: addq $32, %rsp
; AVX2-NEXT: popq %rbx
@@ -5309,11 +5364,13 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512F-NEXT: movq %rdi, %r14
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
; AVX512F-NEXT: orl %ebp, %ebx
@@ -5321,6 +5378,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bp
; AVX512F-NEXT: shll $16, %ebp
@@ -5331,7 +5389,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512F-NEXT: shlq $32, %rax
; AVX512F-NEXT: orq %rbx, %rax
; AVX512F-NEXT: vmovq %rax, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vmovdqa %xmm0, (%r14)
; AVX512F-NEXT: addq $32, %rsp
; AVX512F-NEXT: popq %rbx
@@ -5348,11 +5406,13 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512VL-NEXT: movq %rdi, %r14
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
; AVX512VL-NEXT: orl %ebp, %ebx
@@ -5360,6 +5420,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bp
; AVX512VL-NEXT: shll $16, %ebp
@@ -5371,7 +5432,6 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512VL-NEXT: orq %rbx, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-NEXT: vmovdqa %xmm0, (%r14)
; AVX512VL-NEXT: addq $32, %rsp
@@ -5406,7 +5466,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: orl %ebp, %ebx
-; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -5421,7 +5481,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %rbx, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vmovdqa %xmm0, (%r14)
; AVX1-NEXT: addq $32, %rsp
; AVX1-NEXT: popq %rbx
@@ -5463,7 +5523,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %rbx, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vmovdqa %xmm0, (%r14)
; AVX2-NEXT: addq $32, %rsp
; AVX2-NEXT: popq %rbx
@@ -5480,11 +5540,13 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: movq %rdi, %r14
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
; AVX512F-NEXT: orl %ebp, %ebx
@@ -5492,6 +5554,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, %bp
; AVX512F-NEXT: shll $16, %ebp
@@ -5502,7 +5565,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: shlq $32, %rax
; AVX512F-NEXT: orq %rbx, %rax
; AVX512F-NEXT: vmovq %rax, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vmovdqa %xmm0, (%r14)
; AVX512F-NEXT: addq $32, %rsp
; AVX512F-NEXT: popq %rbx
@@ -5519,11 +5582,13 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512VL-NEXT: movq %rdi, %r14
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
; AVX512VL-NEXT: orl %ebp, %ebx
@@ -5531,6 +5596,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, %bp
; AVX512VL-NEXT: shll $16, %ebp
@@ -5542,7 +5608,6 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512VL-NEXT: orq %rbx, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm0
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -5576,7 +5641,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
-; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -5587,7 +5652,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-NEXT: # xmm0 = mem[1,0]
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r12d
-; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -5708,28 +5773,33 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX512F-NEXT: movq %rdi, %rbx
; AVX512F-NEXT: vmovupd %zmm0, {{[0-9]+}}(%rsp) # 64-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512F-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512F-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512F-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %r12d
; AVX512F-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %r13d
; AVX512F-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
@@ -5737,6 +5807,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX512F-NEXT: movl %eax, %r14d
; AVX512F-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %r15d
; AVX512F-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
@@ -5772,28 +5843,33 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX512VL-NEXT: movq %rdi, %rbx
; AVX512VL-NEXT: vmovupd %zmm0, {{[0-9]+}}(%rsp) # 64-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512VL-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512VL-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512VL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512VL-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %r12d
; AVX512VL-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %r13d
; AVX512VL-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
@@ -5801,6 +5877,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX512VL-NEXT: movl %eax, %r14d
; AVX512VL-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %r15d
; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/test/CodeGen/X86/vector-idiv-sdiv-128.ll
index dfbfe06678fa..895bf5c0f02d 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-128.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-128.ll
@@ -175,19 +175,18 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm2, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
+; SSE2-NEXT: pmullw %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm2, %xmm1
+; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: paddb %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
@@ -204,7 +203,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: test_div7_16i8:
; SSE41: # BB#0:
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
-; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE41-NEXT: pmullw %xmm2, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -228,7 +227,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX1-LABEL: test_div7_16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
-; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -250,8 +249,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX2-LABEL: test_div7_16i8:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
-; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
@@ -483,19 +481,18 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm2, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
+; SSE2-NEXT: pmullw %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm2, %xmm1
+; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: paddb %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
@@ -509,8 +506,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm4, %xmm2
@@ -525,7 +521,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: test_rem7_16i8:
; SSE41: # BB#0:
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
-; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE41-NEXT: pmullw %xmm2, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -559,7 +555,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
-; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -592,8 +588,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX2-LABEL: test_rem7_16i8:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
-; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/test/CodeGen/X86/vector-idiv-sdiv-256.ll
index a1727ea4f705..e7bfe3778212 100644
--- a/test/CodeGen/X86/vector-idiv-sdiv-256.ll
+++ b/test/CodeGen/X86/vector-idiv-sdiv-256.ll
@@ -87,7 +87,7 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
@@ -163,7 +163,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2
-; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -348,7 +348,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
@@ -439,7 +439,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
-; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
diff --git a/test/CodeGen/X86/vector-idiv-udiv-128.ll b/test/CodeGen/X86/vector-idiv-udiv-128.ll
index c4ecaa4a1bcf..1b35e2fdddae 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-128.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-128.ll
@@ -172,22 +172,21 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
-; SSE2-NEXT: pmullw %xmm2, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm2, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: packuswb %xmm1, %xmm3
-; SSE2-NEXT: psubb %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
+; SSE2-NEXT: pmullw %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: pmullw %xmm3, %xmm4
+; SSE2-NEXT: psrlw $8, %xmm4
+; SSE2-NEXT: packuswb %xmm2, %xmm4
+; SSE2-NEXT: psubb %xmm4, %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: paddb %xmm3, %xmm0
+; SSE2-NEXT: paddb %xmm4, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
@@ -195,7 +194,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: test_div7_16i8:
; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; SSE41-NEXT: pmullw %xmm2, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -214,7 +213,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX1-LABEL: test_div7_16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -233,8 +232,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; AVX2-LABEL: test_div7_16i8:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
@@ -464,30 +462,28 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
; SSE2: # BB#0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
+; SSE2-NEXT: pmullw %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE2-NEXT: pmullw %xmm3, %xmm4
+; SSE2-NEXT: psrlw $8, %xmm4
+; SSE2-NEXT: packuswb %xmm2, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
-; SSE2-NEXT: pmullw %xmm2, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm2, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: packuswb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psubb %xmm3, %xmm1
+; SSE2-NEXT: psubb %xmm4, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT: paddb %xmm3, %xmm1
+; SSE2-NEXT: paddb %xmm4, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm4, %xmm2
@@ -502,7 +498,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: test_rem7_16i8:
; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; SSE41-NEXT: pmullw %xmm2, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -533,7 +529,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
@@ -563,8 +559,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX2-LABEL: test_rem7_16i8:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/vector-idiv-udiv-256.ll b/test/CodeGen/X86/vector-idiv-udiv-256.ll
index c11ee22d647b..4adc2e2fb6c9 100644
--- a/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -174,7 +174,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
@@ -359,7 +359,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
+; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
@@ -453,7 +453,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
diff --git a/test/CodeGen/X86/vector-interleave.ll b/test/CodeGen/X86/vector-interleave.ll
index 4f9dbb03fb15..1265ea108977 100644
--- a/test/CodeGen/X86/vector-interleave.ll
+++ b/test/CodeGen/X86/vector-interleave.ll
@@ -93,44 +93,32 @@ define <64 x i16> @interleave8x8(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x
; AVX2: # BB#0:
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = <u,4,u,5,u,6,u,7>
-; AVX2-NEXT: vpermd %ymm1, %ymm9, %ymm3
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = <u,0,u,1,u,2,u,3>
-; AVX2-NEXT: vpermd %ymm1, %ymm0, %ymm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm8 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm1[1],ymm8[2],ymm1[3],ymm8[4],ymm1[5],ymm8[6],ymm1[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm5
-; AVX2-NEXT: vpermd %ymm5, %ymm9, %ymm6
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2],ymm6[3],ymm4[4],ymm6[5],ymm4[6],ymm6[7]
-; AVX2-NEXT: vpermd %ymm5, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7]
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm5, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm4
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm3, %ymm3
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX2-NEXT: retq
%ab = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
%cd = shufflevector <8 x i16> %c, <8 x i16> %d, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll
index 6445a363787c..9e11edcc29dc 100644
--- a/test/CodeGen/X86/vector-lzcnt-128.ll
+++ b/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -1596,35 +1596,8 @@ define <2 x i64> @foldv2i64() nounwind {
;
; X32-SSE-LABEL: foldv2i64:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pshufb %xmm0, %xmm4
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pshufb %xmm0, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm3, %xmm0
-; X32-SSE-NEXT: movdqa %xmm1, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm3
-; X32-SSE-NEXT: pand %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm0
-; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm0
-; X32-SSE-NEXT: paddd %xmm1, %xmm0
-; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; X32-SSE-NEXT: psrlq $32, %xmm0
-; X32-SSE-NEXT: paddq %xmm2, %xmm0
+; X32-SSE-NEXT: movl $55, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
ret <2 x i64> %out
@@ -1651,35 +1624,8 @@ define <2 x i64> @foldv2i64u() nounwind {
;
; X32-SSE-LABEL: foldv2i64u:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pshufb %xmm0, %xmm4
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pshufb %xmm0, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm3, %xmm0
-; X32-SSE-NEXT: movdqa %xmm1, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm3
-; X32-SSE-NEXT: pand %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm0
-; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm0
-; X32-SSE-NEXT: paddd %xmm1, %xmm0
-; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; X32-SSE-NEXT: psrlq $32, %xmm0
-; X32-SSE-NEXT: paddq %xmm2, %xmm0
+; X32-SSE-NEXT: movl $55, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
ret <2 x i64> %out
diff --git a/test/CodeGen/X86/vector-lzcnt-256.ll b/test/CodeGen/X86/vector-lzcnt-256.ll
index c68395493023..53cb4d8e445b 100644
--- a/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -11,8 +11,8 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm1
@@ -37,7 +37,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5
; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm6
; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm3
@@ -143,8 +143,8 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm1
@@ -169,7 +169,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5
; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm6
; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm3
@@ -275,8 +275,8 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm5
@@ -296,7 +296,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm5
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
@@ -387,8 +387,8 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm5
@@ -408,7 +408,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm5
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
@@ -499,8 +499,8 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm5
@@ -515,7 +515,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm5
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
@@ -586,8 +586,8 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm5
@@ -602,7 +602,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm5
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
@@ -673,8 +673,8 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
@@ -684,7 +684,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -747,8 +747,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8u:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
@@ -758,7 +758,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -830,31 +830,7 @@ define <4 x i64> @foldv4i64() nounwind {
;
; X32-AVX-LABEL: foldv4i64:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm1, %ymm4
-; X32-AVX-NEXT: vpand %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7]
-; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
@@ -873,31 +849,7 @@ define <4 x i64> @foldv4i64u() nounwind {
;
; X32-AVX-LABEL: foldv4i64u:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm1, %ymm4
-; X32-AVX-NEXT: vpand %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7]
-; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
diff --git a/test/CodeGen/X86/vector-popcnt-256.ll b/test/CodeGen/X86/vector-popcnt-256.ll
index 8bbfea934422..7a675619d720 100644
--- a/test/CodeGen/X86/vector-popcnt-256.ll
+++ b/test/CodeGen/X86/vector-popcnt-256.ll
@@ -6,8 +6,8 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
@@ -16,7 +16,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpsadbw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -47,8 +47,8 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
@@ -61,7 +61,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsadbw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm5
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -145,15 +145,15 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-rotate-128.ll b/test/CodeGen/X86/vector-rotate-128.ll
index fbb67ebbf60c..5eb1a55881e5 100644
--- a/test/CodeGen/X86/vector-rotate-128.ll
+++ b/test/CodeGen/X86/vector-rotate-128.ll
@@ -3,9 +3,11 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-;
+
; Just one 32-bit run to make sure we do reasonable things for i64 rotates.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -75,6 +77,15 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: var_rotate_v2i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
+; AVX512-NEXT: vpsubq %xmm1, %xmm2, %xmm2
+; AVX512-NEXT: vpsllvq %xmm1, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlvq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: var_rotate_v2i64:
; XOP: # BB#0:
; XOP-NEXT: vprotq %xmm1, %xmm0, %xmm0
@@ -203,6 +214,15 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: var_rotate_v4i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vpsubd %xmm1, %xmm2, %xmm2
+; AVX512-NEXT: vpsllvd %xmm1, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlvd %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: var_rotate_v4i32:
; XOP: # BB#0:
; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
@@ -336,21 +356,21 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: psllw $8, %xmm6
; SSE41-NEXT: movdqa %xmm3, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm6, %xmm5
+; SSE41-NEXT: pblendvb %xmm0, %xmm6, %xmm5
; SSE41-NEXT: movdqa %xmm5, %xmm1
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm5
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
; SSE41-NEXT: movdqa %xmm5, %xmm1
; SSE41-NEXT: psllw $2, %xmm1
; SSE41-NEXT: paddw %xmm4, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm5
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
; SSE41-NEXT: movdqa %xmm5, %xmm1
; SSE41-NEXT: psllw $1, %xmm1
; SSE41-NEXT: paddw %xmm4, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm5
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: psllw $12, %xmm0
; SSE41-NEXT: psllw $4, %xmm2
@@ -360,21 +380,21 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm2
; SSE41-NEXT: psrlw $4, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm2
; SSE41-NEXT: psrlw $2, %xmm2
; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm2
; SSE41-NEXT: psrlw $1, %xmm2
; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
; SSE41-NEXT: por %xmm5, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
@@ -421,7 +441,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
@@ -432,6 +452,27 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; AVX512BW-LABEL: var_rotate_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v8i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqu {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: vpsubw %xmm1, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsllvw %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT: vpsrlvw %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: retq
+;
; XOP-LABEL: var_rotate_v8i16:
; XOP: # BB#0:
; XOP-NEXT: vprotw %xmm1, %xmm0, %xmm0
@@ -585,18 +626,18 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm5, %xmm4
+; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: psllw $2, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm5, %xmm4
+; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: paddb %xmm5, %xmm5
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm5, %xmm4
+; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: psllw $5, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm3, %xmm3
@@ -604,18 +645,18 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: psrlw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm5, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $2, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $1, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: por %xmm4, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
@@ -650,6 +691,36 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512BW-LABEL: var_rotate_v16i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
+; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512BW-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512BW-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v16i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqu {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm2
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
; XOP-LABEL: var_rotate_v16i8:
; XOP: # BB#0:
; XOP-NEXT: vprotb %xmm1, %xmm0, %xmm0
@@ -773,6 +844,13 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: constant_rotate_v2i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: constant_rotate_v2i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
@@ -873,6 +951,13 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: constant_rotate_v4i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: constant_rotate_v4i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
@@ -989,12 +1074,30 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; AVX512BW-LABEL: constant_rotate_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: constant_rotate_v8i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm1
+; AVX512VL-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: retq
+;
; XOP-LABEL: constant_rotate_v8i16:
; XOP: # BB#0:
; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
@@ -1044,8 +1147,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_rotate_v16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; SSE2-NEXT: psllw $5, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,57600,41152,24704,8256]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
@@ -1071,8 +1173,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pandn %xmm1, %xmm3
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; SSE2-NEXT: psllw $5, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [57600,41152,24704,8256,8192,24640,41088,57536]
; SSE2-NEXT: pxor %xmm5, %xmm5
; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm5, %xmm6
@@ -1105,72 +1206,79 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_rotate_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; SSE41-NEXT: psllw $5, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psllw $2, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; SSE41-NEXT: psllw $5, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psrlw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: pblendvb %xmm3, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [57600,41152,24704,8256,8192,24640,41088,57536]
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psrlw $2, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psrlw $1, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_rotate_v16i8:
; AVX: # BB#0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm2
-; AVX-NEXT: vpsllw $2, %xmm2, %xmm3
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm3
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm1
-; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; AVX-NEXT: vpsllw $5, %xmm2, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm3
+; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpsllw $2, %xmm1, %xmm3
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm3
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm3
; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [57600,41152,24704,8256,8192,24640,41088,57536]
+; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm3, %xmm3
+; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vpaddb %xmm3, %xmm3, %xmm3
+; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: constant_rotate_v16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm1
+; AVX512-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+;
; XOP-LABEL: constant_rotate_v16i8:
; XOP: # BB#0:
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1
@@ -1182,8 +1290,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
;
; X32-SSE-LABEL: constant_rotate_v16i8:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; X32-SSE-NEXT: psllw $5, %xmm3
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,57600,41152,24704,8256]
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm1
@@ -1209,8 +1316,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; X32-SSE-NEXT: pandn %xmm1, %xmm3
; X32-SSE-NEXT: paddb %xmm1, %xmm1
; X32-SSE-NEXT: pand %xmm4, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; X32-SSE-NEXT: psllw $5, %xmm4
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [57600,41152,24704,8256,8192,24640,41088,57536]
; X32-SSE-NEXT: pxor %xmm5, %xmm5
; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
@@ -1265,6 +1371,13 @@ define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v2i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllq $14, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlq $50, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_v2i64:
; XOP: # BB#0:
; XOP-NEXT: vprotq $14, %xmm0, %xmm0
@@ -1299,6 +1412,13 @@ define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v4i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpslld $4, %xmm0, %xmm1
+; AVX512-NEXT: vpsrld $28, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_v4i32:
; XOP: # BB#0:
; XOP-NEXT: vprotd $4, %xmm0, %xmm0
@@ -1333,6 +1453,13 @@ define <8 x i16> @splatconstant_rotate_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $7, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlw $9, %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_v8i16:
; XOP: # BB#0:
; XOP-NEXT: vprotw $7, %xmm0, %xmm0
@@ -1371,6 +1498,15 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_v16i8:
; XOP: # BB#0:
; XOP-NEXT: vprotb $4, %xmm0, %xmm0
@@ -1416,6 +1552,15 @@ define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v2i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllq $15, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlq $49, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_mask_v2i64:
; XOP: # BB#0:
; XOP-NEXT: vprotq $15, %xmm0, %xmm0
@@ -1461,6 +1606,15 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v4i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpslld $4, %xmm0, %xmm1
+; AVX512-NEXT: vpsrld $28, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_mask_v4i32:
; XOP: # BB#0:
; XOP-NEXT: vprotd $4, %xmm0, %xmm0
@@ -1506,6 +1660,15 @@ define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlw $11, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_mask_v8i16:
; XOP: # BB#0:
; XOP-NEXT: vprotw $5, %xmm0, %xmm0
@@ -1555,6 +1718,17 @@ define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
; XOP-LABEL: splatconstant_rotate_mask_v16i8:
; XOP: # BB#0:
; XOP-NEXT: vprotb $4, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/vector-rotate-256.ll b/test/CodeGen/X86/vector-rotate-256.ll
index af1755e14314..3306cd400c1d 100644
--- a/test/CodeGen/X86/vector-rotate-256.ll
+++ b/test/CodeGen/X86/vector-rotate-256.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
@@ -46,6 +48,15 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: var_rotate_v4i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX512-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX512-NEXT: vpsllvq %ymm1, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlvq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: var_rotate_v4i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -124,6 +135,15 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: var_rotate_v8i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX512-NEXT: vpsubd %ymm1, %ymm2, %ymm2
+; AVX512-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: var_rotate_v8i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -241,6 +261,26 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512BW-LABEL: var_rotate_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v16i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: vpsubw %ymm1, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
; XOPAVX1-LABEL: var_rotate_v16i16:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -359,6 +399,34 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512BW-LABEL: var_rotate_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm2, %ymm2
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: var_rotate_v32i8:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VL-NEXT: vpsubb %ymm1, %ymm2, %ymm2
+; AVX512VL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VL-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512VL-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512VL-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
+; AVX512VL-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
; XOPAVX1-LABEL: var_rotate_v32i8:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
@@ -415,6 +483,13 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: constant_rotate_v4i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: constant_rotate_v4i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
@@ -474,6 +549,13 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: constant_rotate_v8i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: constant_rotate_v8i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
@@ -542,6 +624,23 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512BW-LABEL: constant_rotate_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: constant_rotate_v16i16:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+;
; XOPAVX1-LABEL: constant_rotate_v16i16:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
@@ -582,8 +681,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; AVX1-NEXT: vpsllw $5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,57600,41152,24704,8256]
; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
@@ -605,8 +703,7 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [57600,41152,24704,8256,8192,24640,41088,57536]
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -633,34 +730,42 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
;
; AVX2-LABEL: constant_rotate_v32i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vpsllw $2, %ymm2, %ymm3
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm2, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; AVX2-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm3
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm3
+; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vpsllw $2, %ymm1, %ymm3
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm3
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm3
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
+; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: constant_rotate_v32i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: constant_rotate_v32i8:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
@@ -720,6 +825,13 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v4i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllq $14, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlq $50, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_v4i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotq $14, %xmm0, %xmm1
@@ -761,6 +873,13 @@ define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v8i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpslld $4, %ymm0, %ymm1
+; AVX512-NEXT: vpsrld $28, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_v8i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotd $4, %xmm0, %xmm1
@@ -802,6 +921,13 @@ define <16 x i16> @splatconstant_rotate_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $7, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlw $9, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_v16i16:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotw $7, %xmm0, %xmm1
@@ -851,6 +977,15 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_v32i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_v32i8:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotb $4, %xmm0, %xmm1
@@ -900,6 +1035,15 @@ define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v4i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllq $15, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlq $49, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v4i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotq $15, %xmm0, %xmm1
@@ -949,6 +1093,15 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v8i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpslld $4, %ymm0, %ymm1
+; AVX512-NEXT: vpsrld $28, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v8i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotd $4, %xmm0, %xmm1
@@ -998,6 +1151,15 @@ define <16 x i16> @splatconstant_rotate_mask_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $5, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlw $11, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v16i16:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotw $5, %xmm0, %xmm1
@@ -1055,6 +1217,17 @@ define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
+; AVX512-LABEL: splatconstant_rotate_mask_v32i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v32i8:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vprotb $4, %xmm0, %xmm1
diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll
index 774d615ae896..e9f1d1d8522b 100644
--- a/test/CodeGen/X86/vector-sext.ll
+++ b/test/CodeGen/X86/vector-sext.ll
@@ -1246,6 +1246,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_2i1_to_2i64:
@@ -1254,6 +1255,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_2i1_to_2i64:
@@ -1436,6 +1438,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_4i1_to_4i32:
@@ -1445,6 +1448,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i1_to_4i32:
@@ -1941,14 +1945,16 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_8i1_to_8i16:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
-; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512BW-NEXT: kmovd %eax, %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i1_to_8i16:
@@ -2847,12 +2853,21 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: load_sext_16i1_to_16i8:
-; AVX512: # BB#0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: load_sext_16i1_to_16i8:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: kmovw (%rdi), %k1
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: load_sext_16i1_to_16i8:
+; AVX512BW: # BB#0: # %entry
+; AVX512BW-NEXT: kmovw (%rdi), %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i8:
; X32-SSE41: # BB#0: # %entry
@@ -3384,12 +3399,19 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
-; AVX512-LABEL: load_sext_16i1_to_16i16:
-; AVX512: # BB#0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: load_sext_16i1_to_16i16:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: kmovw (%rdi), %k1
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: load_sext_16i1_to_16i16:
+; AVX512BW: # BB#0: # %entry
+; AVX512BW-NEXT: kmovw (%rdi), %k0
+; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i16:
; X32-SSE41: # BB#0: # %entry
@@ -4228,16 +4250,23 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
-; AVX512-LABEL: load_sext_32i1_to_32i8:
-; AVX512: # BB#0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: kmovw 2(%rdi), %k2
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
-; AVX512-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: load_sext_32i1_to_32i8:
+; AVX512F: # BB#0: # %entry
+; AVX512F-NEXT: kmovw (%rdi), %k1
+; AVX512F-NEXT: kmovw 2(%rdi), %k2
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: load_sext_32i1_to_32i8:
+; AVX512BW: # BB#0: # %entry
+; AVX512BW-NEXT: kmovd (%rdi), %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
; X32-SSE41: # BB#0: # %entry
@@ -4435,7 +4464,7 @@ define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSE2-LABEL: load_sext_2i16_to_2i64:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: psrad $16, %xmm0
@@ -4445,7 +4474,7 @@ define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
; SSSE3-LABEL: load_sext_2i16_to_2i64:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: psrad $16, %xmm0
@@ -4968,10 +4997,9 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
;
; AVX512BW-LABEL: sext_32xi1_to_32xi8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: sext_32xi1_to_32xi8:
diff --git a/test/CodeGen/X86/vector-shift-ashr-128.ll b/test/CodeGen/X86/vector-shift-ashr-128.ll
index 9f0d4a7d7264..a5e2cb66eba8 100644
--- a/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -83,11 +83,10 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512-LABEL: var_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX512-NEXT: vpsrlvq %xmm1, %xmm2, %xmm3
-; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v2i64:
@@ -279,21 +278,21 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psraw $4, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psraw $2, %xmm1
; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psraw $1, %xmm1
; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -320,7 +319,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -468,29 +467,29 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psraw $2, %xmm4
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psraw $1, %xmm4
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psraw $2, %xmm2
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psraw $1, %xmm2
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: packuswb %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
@@ -649,11 +648,9 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512-LABEL: splatvar_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX512-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
-; AVX512-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v2i64:
@@ -844,29 +841,29 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psraw $2, %xmm4
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psraw $1, %xmm4
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm3
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psraw $2, %xmm2
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psraw $1, %xmm2
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: packuswb %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
@@ -1085,10 +1082,10 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
+; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v2i64:
@@ -1246,7 +1243,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -1311,8 +1308,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
; SSE2: # BB#0:
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; SSE2-NEXT: psllw $5, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm5, %xmm5
@@ -1370,35 +1366,34 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; SSE41-NEXT: psllw $5, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $2, %xmm4
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $1, %xmm4
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psraw $4, %xmm3
-; SSE41-NEXT: pblendvb %xmm3, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psraw $2, %xmm3
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psraw $1, %xmm3
; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: packuswb %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
@@ -1406,8 +1401,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
;
; AVX-LABEL: constant_shift_v16i8:
; AVX: # BB#0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8192,24640,41088,57536,49376,32928,16480,32]
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
@@ -1457,8 +1451,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; X32-SSE-NEXT: psllw $5, %xmm3
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: pxor %xmm5, %xmm5
@@ -1562,9 +1555,9 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
;
; AVX512-LABEL: splatconstant_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrad $7, %xmm0, %xmm1
-; AVX512-NEXT: vpsrlq $7, %xmm0, %xmm0
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v2i64:
diff --git a/test/CodeGen/X86/vector-shift-ashr-256.ll b/test/CodeGen/X86/vector-shift-ashr-256.ll
index aee2857157b6..af3ddcf8048e 100644
--- a/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -71,11 +71,10 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX512-LABEL: var_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX512-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
-; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsubq %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
@@ -491,11 +490,9 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX512-LABEL: splatvar_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX512-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
-; AVX512-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
@@ -836,10 +833,10 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
-; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62]
+; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
@@ -990,8 +987,7 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8192,24640,41088,57536,49376,32928,16480,32]
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
@@ -1038,8 +1034,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX2-LABEL: constant_shift_v32i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
@@ -1087,8 +1082,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX512DQ-LABEL: constant_shift_v32i8:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
@@ -1123,8 +1117,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX512DQVL-LABEL: constant_shift_v32i8:
; AVX512DQVL: # BB#0:
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512DQVL-NEXT: vpsraw $4, %ymm3, %ymm4
@@ -1204,9 +1197,9 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
;
; AVX512-LABEL: splatconstant_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsrad $7, %ymm0, %ymm1
-; AVX512-NEXT: vpsrlq $7, %ymm0, %ymm0
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
+; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
diff --git a/test/CodeGen/X86/vector-shift-ashr-512.ll b/test/CodeGen/X86/vector-shift-ashr-512.ll
index 6cc98b5f3eeb..4d4b7f4e8223 100644
--- a/test/CodeGen/X86/vector-shift-ashr-512.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-512.ll
@@ -309,8 +309,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
@@ -357,7 +356,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpsraw $4, %zmm1, %zmm2
-; AVX512BW-NEXT: vpsllw $5, {{.*}}(%rip), %zmm3
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm0[8],zmm3[8],zmm0[9],zmm3[9],zmm0[10],zmm3[10],zmm0[11],zmm3[11],zmm0[12],zmm3[12],zmm0[13],zmm3[13],zmm0[14],zmm3[14],zmm0[15],zmm3[15],zmm0[24],zmm3[24],zmm0[25],zmm3[25],zmm0[26],zmm3[26],zmm0[27],zmm3[27],zmm0[28],zmm3[28],zmm0[29],zmm3[29],zmm0[30],zmm3[30],zmm0[31],zmm3[31],zmm0[40],zmm3[40],zmm0[41],zmm3[41],zmm0[42],zmm3[42],zmm0[43],zmm3[43],zmm0[44],zmm3[44],zmm0[45],zmm3[45],zmm0[46],zmm3[46],zmm0[47],zmm3[47],zmm0[56],zmm3[56],zmm0[57],zmm3[57],zmm0[58],zmm3[58],zmm0[59],zmm3[59],zmm0[60],zmm3[60],zmm0[61],zmm3[61],zmm0[62],zmm3[62],zmm0[63],zmm3[63]
; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
diff --git a/test/CodeGen/X86/vector-shift-lshr-128.ll b/test/CodeGen/X86/vector-shift-lshr-128.ll
index 9b8c0def4558..9b44ad1dac30 100644
--- a/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -249,21 +249,21 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psrlw $4, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psrlw $2, %xmm1
; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -290,7 +290,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -411,19 +411,19 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: psrlw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psrlw $2, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psrlw $1, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -684,18 +684,18 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: psrlw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psrlw $2, %xmm1
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psrlw $1, %xmm1
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -1005,7 +1005,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -1069,8 +1069,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; SSE2-NEXT: psllw $5, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
@@ -1102,40 +1101,38 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; SSE41-NEXT: psllw $5, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $2, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $1, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_shift_v16i8:
; AVX: # BB#0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: constant_shift_v16i8:
@@ -1161,8 +1158,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; X32-SSE-NEXT: psllw $5, %xmm2
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm3, %xmm3
; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
diff --git a/test/CodeGen/X86/vector-shift-lshr-256.ll b/test/CodeGen/X86/vector-shift-lshr-256.ll
index 58bb8f3e6ec0..60575250d713 100644
--- a/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -825,8 +825,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX1-NEXT: vpsllw $5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -852,19 +851,18 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX2-LABEL: constant_shift_v32i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v32i8:
@@ -889,19 +887,18 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX512DQ-LABEL: constant_shift_v32i8:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i8:
@@ -913,19 +910,18 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX512DQVL-LABEL: constant_shift_v32i8:
; AVX512DQVL: # BB#0:
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsrlw $4, %ymm0, %ymm1
+; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsrlw $2, %ymm0, %ymm1
+; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQVL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsrlw $1, %ymm0, %ymm1
+; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQVL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8:
diff --git a/test/CodeGen/X86/vector-shift-lshr-512.ll b/test/CodeGen/X86/vector-shift-lshr-512.ll
index 905445f30162..c269f8159517 100644
--- a/test/CodeGen/X86/vector-shift-lshr-512.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-512.ll
@@ -244,8 +244,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
@@ -270,7 +269,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
;
; AVX512BW-LABEL: constant_shift_v64i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpsllw $5, {{.*}}(%rip), %zmm1
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
diff --git a/test/CodeGen/X86/vector-shift-shl-128.ll b/test/CodeGen/X86/vector-shift-shl-128.ll
index 32334420f8b2..568bf6e974f7 100644
--- a/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -206,21 +206,21 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psllw $2, %xmm1
; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psllw $1, %xmm1
; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -247,7 +247,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -365,18 +365,18 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psllw $2, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm3, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -632,17 +632,17 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: psllw $2, %xmm1
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -926,8 +926,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; SSE2-NEXT: psllw $5, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
@@ -958,38 +957,36 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; SSE41-NEXT: psllw $5, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psllw $2, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: paddb %xmm2, %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_shift_v16i8:
; AVX: # BB#0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsllw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: constant_shift_v16i8:
@@ -1013,8 +1010,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; X32-SSE-NEXT: psllw $5, %xmm2
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: pxor %xmm3, %xmm3
; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
diff --git a/test/CodeGen/X86/vector-shift-shl-256.ll b/test/CodeGen/X86/vector-shift-shl-256.ll
index 104fa089c744..7f534050b6a7 100644
--- a/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -730,8 +730,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX1-NEXT: vpsllw $5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
@@ -754,18 +753,17 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX2-LABEL: constant_shift_v32i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $2, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: constant_shift_v32i8:
@@ -788,18 +786,17 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX512DQ-LABEL: constant_shift_v32i8:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i8:
@@ -811,18 +808,17 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
;
; AVX512DQVL-LABEL: constant_shift_v32i8:
; AVX512DQVL: # BB#0:
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
+; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsllw $2, %ymm0, %ymm1
+; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQVL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; AVX512DQVL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: constant_shift_v32i8:
diff --git a/test/CodeGen/X86/vector-shift-shl-512.ll b/test/CodeGen/X86/vector-shift-shl-512.ll
index 180d6f3a3b03..39f8fe2f05dc 100644
--- a/test/CodeGen/X86/vector-shift-shl-512.ll
+++ b/test/CodeGen/X86/vector-shift-shl-512.ll
@@ -230,8 +230,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
@@ -253,7 +252,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
;
; AVX512BW-LABEL: constant_shift_v64i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpsllw $5, {{.*}}(%rip), %zmm1
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll
index 2aab77433dfb..9f4501c1f225 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -238,14 +238,12 @@ define <16 x i8> @shuffle_v16i8_08_24_09_25_10_26_11_27_12_28_13_29_14_30_15_31(
define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
; SSE2: # BB#0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
@@ -411,7 +409,7 @@ define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -423,8 +421,9 @@ define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(
;
; AVX512VL-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
ret <16 x i8> %shuffle
@@ -450,7 +449,7 @@ define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -462,8 +461,9 @@ define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(
;
; AVX512VL-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
-; AVX512VL-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: movw $-30584, %ax # imm = 0x8888
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 4, i32 5, i32 6, i32 23, i32 8, i32 9, i32 10, i32 27, i32 12, i32 13, i32 14, i32 31>
ret <16 x i8> %shuffle
@@ -508,7 +508,7 @@ define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
-; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -520,8 +520,9 @@ define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(
;
; AVX512VL-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} xmm2 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
-; AVX512VL-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: movw $-28528, %ax # imm = 0x9090
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 5, i32 6, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 13, i32 14, i32 31>
ret <16 x i8> %shuffle
@@ -548,7 +549,7 @@ define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
-; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -560,8 +561,9 @@ define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(
;
; AVX512VL-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} xmm2 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: movw $-21264, %ax # imm = 0xACF0
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 10, i32 11, i32 28, i32 13, i32 30, i32 15>
ret <16 x i8> %shuffle
@@ -1714,17 +1716,17 @@ define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b)
; SSE2-NEXT: movzbl (%rsi), %ecx
; SSE2-NEXT: shll $8, %ecx
; SSE2-NEXT: orl %eax, %ecx
-; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: movzwl %cx, %eax
+; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pinsrw $0, %ecx, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,5,4,4,4]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,4,4]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,7]
-; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR31364:
@@ -1733,8 +1735,8 @@ define <16 x i8> @PR31364(i8* nocapture readonly %a, i8* nocapture readonly %b)
; SSSE3-NEXT: movzbl (%rsi), %ecx
; SSSE3-NEXT: shll $8, %ecx
; SSSE3-NEXT: orl %eax, %ecx
-; SSSE3-NEXT: pxor %xmm0, %xmm0
-; SSSE3-NEXT: pinsrw $0, %ecx, %xmm0
+; SSSE3-NEXT: movzwl %cx, %eax
+; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1],zero,xmm0[1,1,1,1,1,0,0,0]
; SSSE3-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll
index df854ef5dbfc..d0ead653b203 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v2.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -910,25 +910,25 @@ define <2 x double> @shuffle_v2f64_bitcast_1z(<2 x double> %a) {
; SSE-LABEL: shuffle_v2f64_bitcast_1z:
; SSE: # BB#0:
; SSE-NEXT: xorpd %xmm1, %xmm1
-; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: retq
;
; AVX1-LABEL: shuffle_v2f64_bitcast_1z:
; AVX1: # BB#0:
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2f64_bitcast_1z:
; AVX2: # BB#0:
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v2f64_bitcast_1z:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX512VL-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX512VL-NEXT: retq
%shuffle64 = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 2, i32 1>
%bitcast32 = bitcast <2 x double> %shuffle64 to <4 x float>
diff --git a/test/CodeGen/X86/vector-shuffle-256-v16.ll b/test/CodeGen/X86/vector-shuffle-256-v16.ll
index 3c7fd8b51a02..fad5586dd77c 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v16.ll
@@ -717,8 +717,9 @@ define <16 x i16> @shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_3
;
; AVX512VL-LABEL: shuffle_v16i16_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movw $-32768, %ax # imm = 0x8000
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 31>
ret <16 x i16> %shuffle
@@ -741,8 +742,9 @@ define <16 x i16> @shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_1
;
; AVX512VL-LABEL: shuffle_v16i16_16_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movw $1, %ax
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i16> %shuffle
@@ -765,8 +767,9 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_1
;
; AVX512VL-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_24_09_26_11_28_13_30_15:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,255,0,0,255,255,0,0,255,255,0,0,255,255,0,0,0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movw $21930, %ax # imm = 0x55AA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
ret <16 x i16> %shuffle
@@ -789,8 +792,9 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_3
;
; AVX512VL-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_08_25_10_27_12_29_14_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [0,0,255,255,0,0,255,255,0,0,255,255,0,0,255,255,255,255,0,0,255,255,0,0,255,255,0,0,255,255,0,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movw $-21931, %ax # imm = 0xAA55
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
ret <16 x i16> %shuffle
@@ -1660,6 +1664,40 @@ define <16 x i16> @shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_z
ret <16 x i16> %shuffle
}
+define <16 x i16> @shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v16i16_28_zz_zz_zz_29_zz_zz_zz_30_zz_zz_zz_31_zz_zz_zz:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [28,1,2,3,29,5,6,7,30,9,10,11,31,13,14,15]
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpermt2w %ymm0, %ymm2, %ymm1
+; AVX512VL-NEXT: vmovdqa %ymm1, %ymm0
+; AVX512VL-NEXT: retq
+ %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 28, i32 0, i32 0, i32 0, i32 29, i32 0, i32 0, i32 0, i32 30, i32 0, i32 0, i32 0, i32 31, i32 0, i32 0, i32 0>
+ ret <16 x i16> %shuffle
+}
+
define <16 x i16> @shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14:
; AVX1: # BB#0:
@@ -2649,8 +2687,7 @@ define <16 x i16> @shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_1
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,2,3,4,5,6,7]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4,5,6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v16i16_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15:
diff --git a/test/CodeGen/X86/vector-shuffle-256-v32.ll b/test/CodeGen/X86/vector-shuffle-256-v32.ll
index 301e8079a5dc..f4c4403ed83f 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -320,8 +320,10 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_
; AVX512VL-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VL-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpbroadcastb %xmm0, %xmm0
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: movl $32767, %eax # imm = 0x7FFF
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT: vmovdqa %ymm1, %ymm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -349,9 +351,10 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = <0,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: movl $1, %eax
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 17, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -379,9 +382,10 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: movw $1, %ax
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 18, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -409,9 +413,10 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_
; AVX512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512VL-NEXT: movw $1, %ax
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 19, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -1031,8 +1036,9 @@ define <32 x i8> @shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_
;
; AVX512VL-LABEL: shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 33, i32 2, i32 35, i32 4, i32 37, i32 6, i32 39, i32 8, i32 41, i32 10, i32 43, i32 12, i32 45, i32 14, i32 47, i32 16, i32 49, i32 18, i32 51, i32 20, i32 53, i32 22, i32 55, i32 24, i32 57, i32 26, i32 59, i32 28, i32 61, i32 30, i32 63>
ret <32 x i8> %shuffle
@@ -1055,8 +1061,9 @@ define <32 x i8> @shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_
;
; AVX512VL-LABEL: shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
ret <32 x i8> %shuffle
@@ -1070,7 +1077,9 @@ define <32 x i8> @shuffle_v32i8_zz_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_
;
; AVX512VL-LABEL: shuffle_v32i8_zz_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %ymm0, %ymm0 {%k1} {z}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
ret <32 x i8> %shuffle
@@ -1080,8 +1089,7 @@ define <32 x i8> @shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_
; AVX1-LABEL: shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_u6_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31:
; AVX1: # BB#0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[1],zero,xmm0[2],zero,xmm0[4,u,6,7,8,9,10,11,12,13,14,15]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_01_zz_02_zz_04_uu_06_07_08_09_10_11_12_13_14_15_u6_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31:
@@ -1136,12 +1144,12 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
;
; AVX512VL-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vpshufb %ymm2, %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 16, i32 48, i32 16, i32 48, i32 16, i32 48, i32 16, i32 48, i32 16, i32 48, i32 16, i32 48, i32 16, i32 48, i32 16, i32 48>
ret <32 x i8> %shuffle
@@ -1385,9 +1393,9 @@ define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_
; AVX512VL-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_24_56_25_57_26_58_27_59_28_60_29_61_30_62_31_63:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,1,u,2,u,3,u,4,u,5,u,6,u,7,u,24,u,25,u,26,u,27,u,28,u,29,u,30,u,31,u]
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,0,u,1,u,2,u,3,u,4,u,5,u,6,u,7,u,24,u,25,u,26,u,27,u,28,u,29,u,30,u,31]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm1[u,0,u,1,u,2,u,3,u,4,u,5,u,6,u,7,u,24,u,25,u,26,u,27,u,28,u,29,u,30,u,31]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
ret <32 x i8> %shuffle
@@ -1414,9 +1422,9 @@ define <32 x i8> @shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_
; AVX512VL-LABEL: shuffle_v32i8_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47_16_48_17_49_18_50_19_51_20_52_21_53_22_54_23_55:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,u,9,u,10,u,11,u,12,u,13,u,14,u,15,u,16,u,17,u,18,u,19,u,20,u,21,u,22,u,23,u]
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,8,u,9,u,10,u,11,u,12,u,13,u,14,u,15,u,16,u,17,u,18,u,19,u,20,u,21,u,22,u,23]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm1[u,8,u,9,u,10,u,11,u,12,u,13,u,14,u,15,u,16,u,17,u,18,u,19,u,20,u,21,u,22,u,23]
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
ret <32 x i8> %shuffle
@@ -1669,16 +1677,18 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
; AVX512VL-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512VL-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,12,u,u,u,u,u,u,u,0,3,u,u,u,u,u,u,21,16,u,26,u,u,20,18,20,23]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[10,13,u,u,3,3,u,8,u,u,u,12,1,u,u,u,u,u,20,u,17,22,u,u,16,u,27,u,u,u,u,u]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm3 = <255,255,u,u,255,255,0,255,u,u,u,255,255,u,0,0,u,u,255,u,255,255,0,0,255,0,255,u,0,0,0,0>
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: movl $-222248896, %eax # imm = 0xF2C0C040
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm2[u,u,u,u,u,u,12,u,u,u,u,u,u,u,0,3,u,u,u,u,u,u,21,16,u,26,u,u,20,18,20,23]
; AVX512VL-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,1,6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,23,u,u,u,u]
; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,12,13,u,u,u,u,u,u,u,u,u,12,u,u,20,19,u,19,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4,5],ymm2[6],ymm0[7]
-; AVX512VL-NEXT: vmovdqu {{.*#+}} ymm2 = [255,255,0,0,255,255,255,255,0,0,0,255,255,0,255,255,0,0,255,0,255,255,255,255,255,255,255,0,255,255,255,255]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: movl $134948620, %eax # imm = 0x80B270C
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT: vmovdqa %ymm1, %ymm0
; AVX512VL-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 42, i32 45, i32 12, i32 13, i32 35, i32 35, i32 60, i32 40, i32 17, i32 22, i32 29, i32 44, i32 33, i32 12, i32 48, i32 51, i32 20, i32 19, i32 52, i32 19, i32 49, i32 54, i32 37, i32 32, i32 48, i32 42, i32 59, i32 7, i32 36, i32 34, i32 36, i32 39>
ret <32 x i8> %shuffle
@@ -1931,7 +1941,6 @@ define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_
; AVX2OR512VL: # BB#0:
; AVX2OR512VL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; AVX2OR512VL-NEXT: retq
-
%shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
}
@@ -1956,9 +1965,10 @@ define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_
define <32 x i8> @shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz(<32 x i8> %a) {
; AVX1-LABEL: shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
; AVX1: # BB#0:
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2OR512VL-LABEL: shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
@@ -1969,6 +1979,44 @@ define <32 x i8> @shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_
ret <32 x i8> %shuffle
}
+define <32 x i8> @shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz_zz_60_zz_zz_zz_61_zz_zz_zz_62_zz_zz_zz_63_zz_zz_zz:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512VL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512VL-NEXT: movl $286331153, %eax # imm = 0x11111111
+; AVX512VL-NEXT: kmovd %eax, %k1
+; AVX512VL-NEXT: vmovdqu8 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: retq
+ %shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 56, i32 1, i32 2, i32 3, i32 57, i32 5, i32 6, i32 7, i32 58, i32 9, i32 10, i32 11, i32 59, i32 13, i32 14, i32 15, i32 60, i32 17, i32 18, i32 19, i32 61, i32 21, i32 22, i32 23, i32 62, i32 25, i32 26, i32 27, i32 63, i32 29, i32 30, i32 31>
+ ret <32 x i8> %shuffle
+}
+
define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
; AVX1: # BB#0:
diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 7f978138719e..ad343e64e1e5 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -129,6 +129,48 @@ define <4 x double> @shuffle_v4f64_2200(<4 x double> %a, <4 x double> %b) {
ret <4 x double> %shuffle
}
+define <4 x double> @shuffle_v4f64_2222(<4 x double> %a, <4 x double> %b) {
+; AVX1-LABEL: shuffle_v4f64_2222:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4f64_2222:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4f64_2222:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512VL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_2222_bc(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4f64_2222_bc:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4f64_2222_bc:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4f64_2222_bc:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512VL-NEXT: retq
+ %tmp0 = bitcast <4 x i64> %a to <4 x double>
+ %tmp1 = bitcast <4 x i64> %b to <4 x double>
+ %shuffle = shufflevector <4 x double> %tmp0, <4 x double> %tmp1, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x double> %shuffle
+}
+
define <4 x double> @shuffle_v4f64_3330(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_3330:
; AVX1: # BB#0:
@@ -477,6 +519,59 @@ define <4 x double> @shuffle_v4f64_3333(<4 x double> %a, <4 x double> %b) {
ret <4 x double> %shuffle
}
+define <4 x double> @shuffle_v4f64_0z3z(<4 x double> %a, <4 x double> %b) {
+; AVX1-LABEL: shuffle_v4f64_0z3z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4f64_0z3z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
+; AVX2-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4f64_0z3z:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; AVX512VL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 0, i32 4, i32 3, i32 4>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_1z2z(<4 x double> %a, <4 x double> %b) {
+; AVX1-LABEL: shuffle_v4f64_1z2z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4f64_1z2z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4f64_1z2z:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX512VL-NEXT: retq
+ %1 = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
+ ret <4 x double> %1
+}
+
define <4 x i64> @shuffle_v4i64_0000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0000:
; AVX1: # BB#0:
@@ -1131,6 +1226,30 @@ define <4 x i64> @shuffle_v4i64_3333(<4 x i64> %a, <4 x i64> %b) {
ret <4 x i64> %shuffle
}
+define <4 x i64> @shuffle_v4i64_1z3z(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_1z3z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_1z3z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4i64_1z3z:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
+ ret <4 x i64> %shuffle
+}
+
define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
; ALL-LABEL: stress_test1:
; ALL: retq
@@ -1180,7 +1299,7 @@ define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
;
; AVX512VL-LABEL: insert_reg_and_zero_v4f64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
%v = insertelement <4 x double> undef, double %a, i32 0
@@ -1285,7 +1404,7 @@ define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
;
; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; AVX512VL-NEXT: retq
%v = load <2 x i64>, <2 x i64>* %ptr
%shuffle = shufflevector <2 x i64> %v, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1293,20 +1412,10 @@ define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
}
define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
-; AVX1-LABEL: splat128_mem_v4f64_from_v2f64:
-; AVX1: # BB#0:
-; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splat128_mem_v4f64_from_v2f64:
-; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX2-NEXT: retq
-;
-; AVX512VL-LABEL: splat128_mem_v4f64_from_v2f64:
-; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512VL-NEXT: retq
+; ALL-LABEL: splat128_mem_v4f64_from_v2f64:
+; ALL: # BB#0:
+; ALL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; ALL-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
ret <4 x double> %shuffle
@@ -1455,3 +1564,56 @@ define <4 x i64> @shuffle_v4i64_1230(<4 x i64> %a) {
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
ret <4 x i64> %shuffle
}
+
+define <4 x i64> @shuffle_v4i64_z0z3(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_z0z3:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_z0z3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4i64_z0z3:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX512VL-NEXT: retq
+ %1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 4, i32 0, i32 4, i32 3>
+ ret <4 x i64> %1
+}
+
+define <4 x i64> @shuffle_v4i64_1z2z(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_1z2z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_1z2z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4i64_1z2z:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX512VL-NEXT: retq
+ %1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
+ ret <4 x i64> %1
+}
diff --git a/test/CodeGen/X86/vector-shuffle-256-v8.ll b/test/CodeGen/X86/vector-shuffle-256-v8.ll
index cba15827d32c..8d49321a6af8 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -266,21 +266,12 @@ define <8 x float> @shuffle_v8f32_2a3b6e7f(<8 x float> %a, <8 x float> %b) {
}
define <8 x float> @shuffle_v8f32_08192a3b(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_08192a3b:
-; AVX1: # BB#0:
-; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_08192a3b:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <u,0,u,1,u,2,u,3>
-; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,u,1,u,2,u,3,u>
-; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
-; AVX2-NEXT: retq
+; AVX1OR2-LABEL: shuffle_v8f32_08192a3b:
+; AVX1OR2: # BB#0:
+; AVX1OR2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1OR2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8f32_08192a3b:
; AVX512VL: # BB#0:
@@ -1221,10 +1212,9 @@ define <8 x i32> @shuffle_v8i32_08192a3b(<8 x i32> %a, <8 x i32> %b) {
;
; AVX2-LABEL: shuffle_v8i32_08192a3b:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <u,0,u,1,u,2,u,3>
-; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_v8i32_08192a3b:
@@ -2048,6 +2038,24 @@ define <8 x i32> @shuffle_v8i32_44444444(<8 x i32> %a, <8 x i32> %b) {
ret <8 x i32> %shuffle
}
+define <8 x i32> @shuffle_v8i32_44444444_bc(<8 x float> %a, <8 x float> %b) {
+; AVX1-LABEL: shuffle_v8i32_44444444_bc:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX1-NEXT: retq
+;
+; AVX2OR512VL-LABEL: shuffle_v8i32_44444444_bc:
+; AVX2OR512VL: # BB#0:
+; AVX2OR512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2OR512VL-NEXT: vbroadcastss %xmm0, %ymm0
+; AVX2OR512VL-NEXT: retq
+ %tmp0 = bitcast <8 x float> %a to <8 x i32>
+ %tmp1 = bitcast <8 x float> %b to <8 x i32>
+ %shuffle = shufflevector <8 x i32> %tmp0, <8 x i32> %tmp1, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i32> %shuffle
+}
+
define <8 x i32> @shuffle_v8i32_5555uuuu(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_5555uuuu:
; AVX1: # BB#0:
@@ -2064,6 +2072,21 @@ define <8 x i32> @shuffle_v8i32_5555uuuu(<8 x i32> %a, <8 x i32> %b) {
ret <8 x i32> %shuffle
}
+; PR32453
+define <8 x i32> @shuffle_v8i32_uuuuuu7u(<8 x i32> %a, <8 x i32> %b) nounwind {
+; AVX1-LABEL: shuffle_v8i32_uuuuuu7u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: retq
+;
+; AVX2OR512VL-LABEL: shuffle_v8i32_uuuuuu7u:
+; AVX2OR512VL: # BB#0:
+; AVX2OR512VL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
+; AVX2OR512VL-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 undef>
+ ret <8 x i32> %shuffle
+}
+
define <8 x float> @splat_mem_v8f32_2(float* %p) {
; ALL-LABEL: splat_mem_v8f32_2:
; ALL: # BB#0:
diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll
index b951bf1c97ed..fa3471c2fe40 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -23,6 +23,18 @@ define <16 x float> @shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08
ret <16 x float> %shuffle
}
+define <16 x float> @shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_bc(<16 x i32> %a, <16 x i32> %b) {
+; ALL-LABEL: shuffle_v16f32_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_08_bc:
+; ALL: # BB#0:
+; ALL-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; ALL-NEXT: vpbroadcastd %xmm0, %zmm0
+; ALL-NEXT: retq
+ %tmp0 = bitcast <16 x i32> %a to <16 x float>
+ %tmp1 = bitcast <16 x i32> %b to <16 x float>
+ %shuffle = shufflevector <16 x float> %tmp0, <16 x float> %tmp1, <16 x i32><i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+ ret <16 x float> %shuffle
+}
+
define <16 x float> @shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d:
; ALL: # BB#0:
@@ -250,11 +262,19 @@ define <16 x i32> @shuffle_v16i32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19
}
define <16 x i32> @shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u(<16 x i32> %a, <16 x i32> %b) {
-; ALL-LABEL: shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u:
-; ALL: # BB#0:
-; ALL-NEXT: vmovdqa32 {{.*#+}} zmm2 = <0,1,2,19,u,u,u,u,u,u,u,u,u,u,u,u>
-; ALL-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: movw $8, %ax
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: movw $8, %ax
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <16 x i32> %c
}
@@ -384,13 +404,30 @@ define <16 x i32> @shuffle_v8i32_17_16_01_00_21_20_05_04_25_24_09_08_29_28_13_12
ret <16 x i32> %shuffle
}
-define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, <16 x i32> %passthru, i16 %mask) {
-; ALL-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
+define <16 x float> @shuffle_v8f32_v16f32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04(<8 x float> %a) {
+; ALL-LABEL: shuffle_v8f32_v16f32_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04_04:
; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: valignd {{.*#+}} zmm1 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
-; ALL-NEXT: vmovdqa64 %zmm1, %zmm0
+; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
+; ALL-NEXT: vbroadcastss %xmm0, %zmm0
; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <16 x float> %shuffle
+}
+
+define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, <16 x i32> %passthru, i16 %mask) {
+; AVX512F-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: valignd {{.*#+}} zmm1 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: valignd {{.*#+}} zmm1 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru
@@ -398,12 +435,19 @@ define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15
}
define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) {
-; ALL-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: valignd {{.*#+}} zmm2 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: valignd {{.*#+}} zmm2 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
+; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: valignd {{.*#+}} zmm2 {%k1} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru
@@ -411,11 +455,17 @@ define <16 x i32> @mask_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15
}
define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01(<16 x i32> %a, i16 %mask) {
-; ALL-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
-; ALL-NEXT: retq
+; AVX512F-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_01:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> zeroinitializer
@@ -423,11 +473,17 @@ define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_1
}
define <16 x i32> @maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
-; ALL-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
-; ALL-NEXT: retq
+; AVX512F-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: maskz_shuffle_v16i32_02_03_04_05_06_07_08_09_10_11_12_13_14_15_16_17:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1]
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> zeroinitializer
@@ -497,12 +553,19 @@ define <16 x i32> @test_vshufi32x4_512_mask(<16 x i32> %x, <16 x i32> %x1, <16 x
}
define <16 x float> @mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23(<16 x float> %a, <16 x float> %b, <16 x float> %passthru, i16 %mask) {
-; ALL-LABEL: mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: vinsertf32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
-; ALL-NEXT: vmovaps %zmm2, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: vinsertf32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512F-NEXT: vmovaps %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vinsertf32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovaps %zmm2, %zmm0
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x float> %shuffle, <16 x float> %passthru
@@ -510,12 +573,19 @@ define <16 x float> @mask_shuffle_v16f32_00_01_02_03_04_05_06_07_16_17_18_19_20_
}
define <16 x float> @mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15(<16 x float> %a, <16 x float> %b, <16 x float> %passthru, i16 %mask) {
-; ALL-LABEL: mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
-; ALL-NEXT: vmovaps %zmm2, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-NEXT: vmovaps %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovaps %zmm2, %zmm0
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x float> %shuffle, <16 x float> %passthru
@@ -523,12 +593,19 @@ define <16 x float> @mask_shuffle_v16f32_00_01_02_03_16_17_18_19_08_09_10_11_12_
}
define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) {
-; ALL-LABEL: mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21_22_23:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru
@@ -536,14 +613,43 @@ define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_04_05_06_07_16_17_18_19_20_21
}
define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passthru, i16 %mask) {
-; ALL-LABEL: mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
-; ALL: # BB#0:
-; ALL-NEXT: kmovw %edi, %k1
-; ALL-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm0
-; ALL-NEXT: retq
+; AVX512F-LABEL: mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: kmovw %edi, %k1
+; AVX512F-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
+; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13_14_15:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: kmovd %edi, %k1
+; AVX512BW-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%mask.cast = bitcast i16 %mask to <16 x i1>
%res = select <16 x i1> %mask.cast, <16 x i32> %shuffle, <16 x i32> %passthru
ret <16 x i32> %res
}
+
+define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x i32> %a) {
+; ALL-LABEL: mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
+; ALL: # BB#0:
+; ALL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; ALL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; ALL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: retq
+ %res = shufflevector <4 x i32> %a, <4 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <16 x i32> %res
+}
+
+define <16 x float> @mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x float> %a) {
+; ALL-LABEL: mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
+; ALL: # BB#0:
+; ALL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; ALL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: retq
+ %res = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <16 x float> %res
+}
diff --git a/test/CodeGen/X86/vector-shuffle-512-v32.ll b/test/CodeGen/X86/vector-shuffle-512-v32.ll
index 74fe7c1bc665..26cd7301fe60 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v32.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v32.ll
@@ -110,10 +110,9 @@ define <32 x i16> @shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19
define <32 x i16> @shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<32 x i16> %a) {
; ALL-LABEL: shuffle_v32i16_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz:
; ALL: # BB#0:
-; ALL-NEXT: vmovdqu16 {{.*#+}} zmm2 = [32,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
-; ALL-NEXT: vpxord %zmm1, %zmm1, %zmm1
-; ALL-NEXT: vpermt2w %zmm0, %zmm2, %zmm1
-; ALL-NEXT: vmovdqa64 %zmm1, %zmm0
+; ALL-NEXT: movl $1, %eax
+; ALL-NEXT: kmovd %eax, %k1
+; ALL-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
; ALL-NEXT: retq
%shuffle = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
ret <32 x i16> %shuffle
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 365ff3bf63d5..30c8d1b2373e 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -48,6 +48,24 @@ define <8 x double> @shuffle_v8f64_44444444(<8 x double> %a, <8 x double> %b) {
ret <8 x double> %shuffle
}
+define <8 x double> @shuffle_v8f64_44444444_bc(<8 x i64> %a, <8 x i64> %b) {
+; AVX512F-LABEL: shuffle_v8f64_44444444_bc:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; AVX512F-NEXT: vpbroadcastq %xmm0, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: shuffle_v8f64_44444444_bc:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; AVX512F-32-NEXT: vpbroadcastq %xmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %tmp0 = bitcast <8 x i64> %a to <8 x double>
+ %tmp1 = bitcast <8 x i64> %b to <8 x double>
+ %shuffle = shufflevector <8 x double> %tmp0, <8 x double> %tmp1, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x double> %shuffle
+}
+
define <8 x double> @shuffle_v8f64_00000010(<8 x double> %a, <8 x double> %b) {
; AVX512F-LABEL: shuffle_v8f64_00000010:
; AVX512F: # BB#0:
@@ -958,6 +976,24 @@ define <8 x double> @shuffle_v8f64_f511235a(<8 x double> %a, <8 x double> %b) {
ret <8 x double> %shuffle
}
+define <8 x double> @shuffle_v8f64_1z2z5z6z(<8 x double> %a, <8 x double> %b) {
+; AVX512F-LABEL: shuffle_v8f64_1z2z5z6z:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [1,8,2,8,5,8,6,8]
+; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: shuffle_v8f64_1z2z5z6z:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,8,0,2,0,8,0,5,0,8,0,6,0,8,0]
+; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
+; AVX512F-32-NEXT: retl
+ %shuffle = shufflevector <8 x double> %a, <8 x double> <double 0.000000e+00, double undef, double undef, double undef, double undef, double undef, double undef, double undef>, <8 x i32> <i32 1, i32 8, i32 2, i32 8, i32 5, i32 8, i32 6, i32 8>
+ ret <8 x double> %shuffle
+}
+
define <8 x i64> @shuffle_v8i64_00000000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00000000:
@@ -1179,16 +1215,16 @@ define <8 x i64> @shuffle_v8i64_81a3c5e7(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_81a3c5e7:
; AVX512F: # BB#0:
-; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,9,2,11,4,13,6,15]
-; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
-; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT: movb $-86, %al
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v8i64_81a3c5e7:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,9,0,2,0,11,0,4,0,13,0,6,0,15,0]
-; AVX512F-32-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
-; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512F-32-NEXT: movb $-86, %al
+; AVX512F-32-NEXT: kmovw %eax, %k1
+; AVX512F-32-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
; AVX512F-32-NEXT: retl
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
ret <8 x i64> %shuffle
@@ -2571,3 +2607,55 @@ define <8 x i64> @shuffle_v8i64_01234589(<8 x i64> %a, <8 x i64> %b) {
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
ret <8 x i64> %shuffle
}
+
+define <8 x double> @shuffle_v4f64_v8f64_22222222(<4 x double> %a) {
+; AVX512F-LABEL: shuffle_v4f64_v8f64_22222222:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512F-NEXT: vbroadcastsd %xmm0, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: shuffle_v4f64_v8f64_22222222:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512F-32-NEXT: vbroadcastsd %xmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ ret <8 x double> %shuffle
+}
+
+define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
+; AVX512F-LABEL: shuffle_v2i64_v8i64_01010101:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: shuffle_v2i64_v8i64_01010101:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-32-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ ret <8 x i64> %shuffle
+}
+
+define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
+; AVX512F-LABEL: shuffle_v2f64_v8f64_01010101:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: shuffle_v2f64_v8f64_01010101:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-32-NEXT: retl
+ %shuffle = shufflevector <2 x double> %a, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ ret <8 x double> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-avx512.ll b/test/CodeGen/X86/vector-shuffle-avx512.ll
index defc3e918b24..5aab21749d14 100644
--- a/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -1,283 +1,499 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=skx | FileCheck %s --check-prefix=SKX
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=knl | FileCheck %s --check-prefix=KNL
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=skx | FileCheck %s --check-prefix=SKX64
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=knl | FileCheck %s --check-prefix=KNL64
+; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mcpu=skx | FileCheck %s --check-prefix=SKX32
+; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mcpu=knl | FileCheck %s --check-prefix=KNL32
;expand 128 -> 256 include <4 x float> <2 x double>
define <8 x float> @expand(<4 x float> %a) {
-; SKX-LABEL: expand:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; SKX-NEXT: movb $5, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand:
-; KNL: # BB#0:
-; KNL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; KNL-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4,5,6,7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: movb $5, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand:
+; KNL64: # BB#0:
+; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; KNL64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4,5,6,7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: movb $5, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand:
+; KNL32: # BB#0:
+; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; KNL32-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4,5,6,7]
+; KNL32-NEXT: retl
%res = shufflevector <4 x float> %a, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 5, i32 1, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x float> %res
}
define <8 x float> @expand1(<4 x float> %a ) {
-; SKX-LABEL: expand1:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; SKX-NEXT: movb $-86, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand1:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
-; KNL-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; KNL-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand1:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: movb $-86, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand1:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
+; KNL64-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; KNL64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand1:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: movb $-86, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand1:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
+; KNL32-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; KNL32-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; KNL32-NEXT: retl
%res = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
ret <8 x float> %res
}
;Expand 128 -> 256 test <2 x double> -> <4 x double>
define <4 x double> @expand2(<2 x double> %a) {
-; SKX-LABEL: expand2:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; SKX-NEXT: movb $9, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand2:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; KNL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand2:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: movb $9, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand2:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
+; KNL64-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand2:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: movb $9, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand2:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
+; KNL32-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
+; KNL32-NEXT: retl
%res = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 2, i32 1>
ret <4 x double> %res
}
;expand 128 -> 256 include case <4 x i32> <8 x i32>
define <8 x i32> @expand3(<4 x i32> %a ) {
-; SKX-LABEL: expand3:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; SKX-NEXT: movb $-127, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand3:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL-NEXT: vpbroadcastq %xmm0, %ymm0
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6],ymm0[7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand3:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: movb $-127, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand3:
+; KNL64: # BB#0:
+; KNL64-NEXT: vpbroadcastq %xmm0, %ymm0
+; KNL64-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6],ymm0[7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand3:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: movb $-127, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand3:
+; KNL32: # BB#0:
+; KNL32-NEXT: vpbroadcastq %xmm0, %ymm0
+; KNL32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6],ymm0[7]
+; KNL32-NEXT: retl
%res = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <8 x i32> <i32 4, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0,i32 5>
ret <8 x i32> %res
}
;expand 128 -> 256 include case <2 x i64> <4 x i64>
define <4 x i64> @expand4(<2 x i64> %a ) {
-; SKX-LABEL: expand4:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; SKX-NEXT: movb $9, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand4:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand4:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: movb $9, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand4:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
+; KNL64-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand4:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: movb $9, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand4:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
+; KNL32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; KNL32-NEXT: retl
%res = shufflevector <2 x i64> zeroinitializer, <2 x i64> %a, <4 x i32> <i32 2, i32 0, i32 0, i32 3>
ret <4 x i64> %res
}
;Negative test for 128-> 256
define <8 x float> @expand5(<4 x float> %a ) {
-; SKX-LABEL: expand5:
-; SKX: # BB#0:
-; SKX-NEXT: vbroadcastss %xmm0, %ymm0
-; SKX-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; SKX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand5:
-; KNL: # BB#0:
-; KNL-NEXT: vbroadcastss %xmm0, %ymm0
-; KNL-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand5:
+; SKX64: # BB#0:
+; SKX64-NEXT: vbroadcastss %xmm0, %ymm0
+; SKX64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; SKX64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand5:
+; KNL64: # BB#0:
+; KNL64-NEXT: vbroadcastss %xmm0, %ymm0
+; KNL64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand5:
+; SKX32: # BB#0:
+; SKX32-NEXT: vbroadcastss %xmm0, %ymm0
+; SKX32-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; SKX32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand5:
+; KNL32: # BB#0:
+; KNL32-NEXT: vbroadcastss %xmm0, %ymm0
+; KNL32-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
+; KNL32-NEXT: retl
%res = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <8 x i32> <i32 0, i32 4, i32 1, i32 4, i32 2, i32 4, i32 3, i32 4>
ret <8 x float> %res
}
;expand 256 -> 512 include <8 x float> <16 x float>
define <8 x float> @expand6(<4 x float> %a ) {
-; SKX-LABEL: expand6:
-; SKX: # BB#0:
-; SKX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; SKX-NEXT: vinsertf{{.*}}$1, %xmm0, %ymm1, %ymm0
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand6:
-; KNL: # BB#0:
-; KNL-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; KNL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; KNL-NEXT: retq
+; SKX64-LABEL: expand6:
+; SKX64: # BB#0:
+; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; SKX64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand6:
+; KNL64: # BB#0:
+; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; KNL64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand6:
+; SKX32: # BB#0:
+; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; SKX32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand6:
+; KNL32: # BB#0:
+; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; KNL32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; KNL32-NEXT: retl
%res = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x float> %res
}
define <16 x float> @expand7(<8 x float> %a) {
-; SKX-LABEL: expand7:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; SKX-NEXT: movw $1285, %ax # imm = 0x505
-; SKX-NEXT: kmovw %eax, %k1
-; SKX-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand7:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT: movw $1285, %ax # imm = 0x505
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: retq
+; SKX64-LABEL: expand7:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: movw $1285, %ax # imm = 0x505
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand7:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: movw $1285, %ax # imm = 0x505
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand7:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: movw $1285, %ax # imm = 0x505
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand7:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: movw $1285, %ax # imm = 0x505
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; KNL32-NEXT: retl
%res = shufflevector <8 x float> %a, <8 x float> zeroinitializer, <16 x i32> <i32 0, i32 8, i32 1, i32 8, i32 8, i32 8, i32 8, i32 8, i32 2, i32 8, i32 3, i32 8, i32 8, i32 8, i32 8, i32 8>
ret <16 x float> %res
}
define <16 x float> @expand8(<8 x float> %a ) {
-; SKX-LABEL: expand8:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; SKX-NEXT: movw $-21846, %ax # imm = 0xAAAA
-; SKX-NEXT: kmovw %eax, %k1
-; SKX-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand8:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT: movw $-21846, %ax # imm = 0xAAAA
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: retq
+; SKX64-LABEL: expand8:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand8:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand8:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand8:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
+; KNL32-NEXT: retl
%res = shufflevector <8 x float> zeroinitializer, <8 x float> %a, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
ret <16 x float> %res
}
;expand 256 -> 512 include <4 x double> <8 x double>
define <8 x double> @expand9(<4 x double> %a) {
-; SKX-LABEL: expand9:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; SKX-NEXT: movb $-127, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand9:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT: movb $-127, %al
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: retq
+; SKX64-LABEL: expand9:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: movb $-127, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand9:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: movb $-127, %al
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand9:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: movb $-127, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand9:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: movb $-127, %al
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
+; KNL32-NEXT: retl
%res = shufflevector <4 x double> %a, <4 x double> zeroinitializer, <8 x i32> <i32 0, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 1>
ret <8 x double> %res
}
define <16 x i32> @expand10(<8 x i32> %a ) {
-; SKX-LABEL: expand10:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; SKX-NEXT: movw $-21846, %ax # imm = 0xAAAA
-; SKX-NEXT: kmovw %eax, %k1
-; SKX-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand10:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT: movw $-21846, %ax # imm = 0xAAAA
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: retq
+; SKX64-LABEL: expand10:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand10:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand10:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand10:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
+; KNL32-NEXT: retl
%res = shufflevector <8 x i32> zeroinitializer, <8 x i32> %a, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
ret <16 x i32> %res
}
define <8 x i64> @expand11(<4 x i64> %a) {
-; SKX-LABEL: expand11:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; SKX-NEXT: movb $-127, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand11:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT: movb $-127, %al
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: retq
+; SKX64-LABEL: expand11:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: movb $-127, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand11:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: movb $-127, %al
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand11:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: movb $-127, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand11:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: movb $-127, %al
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
+; KNL32-NEXT: retl
%res = shufflevector <4 x i64> %a, <4 x i64> zeroinitializer, <8 x i32> <i32 0, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 1>
ret <8 x i64> %res
}
;Negative test for 256-> 512
define <16 x float> @expand12(<8 x float> %a) {
-; SKX-LABEL: expand12:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; SKX-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
-; SKX-NEXT: vxorps %zmm1, %zmm1, %zmm1
-; SKX-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
-; SKX-NEXT: vmovaps %zmm1, %zmm0
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand12:
-; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
-; KNL-NEXT: vpxord %zmm1, %zmm1, %zmm1
-; KNL-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
-; KNL-NEXT: vmovaps %zmm1, %zmm0
-; KNL-NEXT: retq
+; SKX64-LABEL: expand12:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
+; SKX64-NEXT: vxorps %zmm1, %zmm1, %zmm1
+; SKX64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
+; SKX64-NEXT: vmovaps %zmm1, %zmm0
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand12:
+; KNL64: # BB#0:
+; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
+; KNL64-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; KNL64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
+; KNL64-NEXT: vmovaps %zmm1, %zmm0
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand12:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
+; SKX32-NEXT: vxorps %zmm1, %zmm1, %zmm1
+; SKX32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
+; SKX32-NEXT: vmovaps %zmm1, %zmm0
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand12:
+; KNL32: # BB#0:
+; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
+; KNL32-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; KNL32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
+; KNL32-NEXT: vmovaps %zmm1, %zmm0
+; KNL32-NEXT: retl
%res = shufflevector <8 x float> zeroinitializer, <8 x float> %a, <16 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8,i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8>
ret <16 x float> %res
}
define <16 x float> @expand13(<8 x float> %a ) {
-; SKX-LABEL: expand13:
-; SKX: # BB#0:
-; SKX-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; SKX-NEXT: vinsertf32x8 $1, %ymm0, %zmm1, %zmm0
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand13:
-; KNL: # BB#0:
-; KNL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
-; KNL-NEXT: retq
+; SKX64-LABEL: expand13:
+; SKX64: # BB#0:
+; SKX64-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; SKX64-NEXT: vinsertf32x8 $1, %ymm0, %zmm1, %zmm0
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand13:
+; KNL64: # BB#0:
+; KNL64-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; KNL64-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand13:
+; SKX32: # BB#0:
+; SKX32-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; SKX32-NEXT: vinsertf32x8 $1, %ymm0, %zmm1, %zmm0
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand13:
+; KNL32: # BB#0:
+; KNL32-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; KNL32-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; KNL32-NEXT: retl
%res = shufflevector <8 x float> zeroinitializer, <8 x float> %a, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x float> %res
}
@@ -285,23 +501,41 @@ define <16 x float> @expand13(<8 x float> %a ) {
; The function checks for a case where the vector is mixed values vector ,and the mask points on zero elements from this vector.
define <8 x float> @expand14(<4 x float> %a) {
-; SKX-LABEL: expand14:
-; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; SKX-NEXT: movb $20, %al
-; SKX-NEXT: kmovb %eax, %k1
-; SKX-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand14:
-; KNL: # BB#0:
-; KNL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; KNL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
-; KNL-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,0,u,u,u,u>
-; KNL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,0,0]
-; KNL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
-; KNL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand14:
+; SKX64: # BB#0:
+; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: movb $20, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand14:
+; KNL64: # BB#0:
+; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
+; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,0,u,u,u,u>
+; KNL64-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,0,0]
+; KNL64-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
+; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand14:
+; SKX32: # BB#0:
+; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: movb $20, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand14:
+; KNL32: # BB#0:
+; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
+; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,0,u,u,u,u>
+; KNL32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,0,0]
+; KNL32-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
+; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; KNL32-NEXT: retl
%addV = fadd <4 x float> <float 0.0,float 1.0,float 2.0,float 0.0> , <float 0.0,float 1.0,float 2.0,float 0.0>
%res = shufflevector <4 x float> %addV, <4 x float> %a, <8 x i32> <i32 3, i32 3, i32 4, i32 0, i32 5, i32 0, i32 0, i32 0>
ret <8 x float> %res
@@ -309,25 +543,332 @@ define <8 x float> @expand14(<4 x float> %a) {
;Negative test.
define <8 x float> @expand15(<4 x float> %a) {
-; SKX-LABEL: expand15:
-; SKX: # BB#0:
-; SKX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,1,3]
-; SKX-NEXT: vmovaps {{.*#+}} ymm0 = <0,2,4,0,u,u,u,u>
-; SKX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,0,0]
-; SKX-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3]
-; SKX-NEXT: vpermi2ps %ymm1, %ymm2, %ymm0
-; SKX-NEXT: retq
-;
-; KNL-LABEL: expand15:
-; KNL: # BB#0:
-; KNL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; KNL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
-; KNL-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,0,u,u,u,u>
-; KNL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,0]
-; KNL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
-; KNL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
-; KNL-NEXT: retq
+; SKX64-LABEL: expand15:
+; SKX64: # BB#0:
+; SKX64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,1,3]
+; SKX64-NEXT: vmovaps {{.*#+}} ymm0 = <0,2,4,0,u,u,u,u>
+; SKX64-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,0,0]
+; SKX64-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3]
+; SKX64-NEXT: vpermi2ps %ymm1, %ymm2, %ymm0
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: expand15:
+; KNL64: # BB#0:
+; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
+; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,0,u,u,u,u>
+; KNL64-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,0]
+; KNL64-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
+; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: expand15:
+; SKX32: # BB#0:
+; SKX32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,1,3]
+; SKX32-NEXT: vmovaps {{.*#+}} ymm0 = <0,2,4,0,u,u,u,u>
+; SKX32-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,0,0]
+; SKX32-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3]
+; SKX32-NEXT: vpermi2ps %ymm1, %ymm2, %ymm0
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: expand15:
+; KNL32: # BB#0:
+; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3]
+; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <0,2,4,0,u,u,u,u>
+; KNL32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,0]
+; KNL32-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1]
+; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6,7]
+; KNL32-NEXT: retl
%addV = fadd <4 x float> <float 0.0,float 1.0,float 2.0,float 0.0> , <float 0.0,float 1.0,float 2.0,float 0.0>
%res = shufflevector <4 x float> %addV, <4 x float> %a, <8 x i32> <i32 0, i32 1, i32 4, i32 0, i32 5, i32 0, i32 0, i32 0>
ret <8 x float> %res
}
+
+
+; Shuffle to blend test
+
+define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){
+; SKX64-LABEL: test_mm512_mask_blend_epi8:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; SKX64-NEXT: kmovq %rax, %k1
+; SKX64-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm512_mask_blend_epi8:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: vpbroadcastw {{.*}}(%rip), %ymm4
+; KNL64-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; KNL64-NEXT: vpblendvb %ymm4, %ymm3, %ymm1, %ymm1
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm512_mask_blend_epi8:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; SKX32-NEXT: kmovd %eax, %k0
+; SKX32-NEXT: kunpckdq %k0, %k0, %k1
+; SKX32-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm512_mask_blend_epi8:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: pushl %ebp
+; KNL32-NEXT: .Lcfi0:
+; KNL32-NEXT: .cfi_def_cfa_offset 8
+; KNL32-NEXT: .Lcfi1:
+; KNL32-NEXT: .cfi_offset %ebp, -8
+; KNL32-NEXT: movl %esp, %ebp
+; KNL32-NEXT: .Lcfi2:
+; KNL32-NEXT: .cfi_def_cfa_register %ebp
+; KNL32-NEXT: andl $-32, %esp
+; KNL32-NEXT: subl $32, %esp
+; KNL32-NEXT: vpbroadcastw {{\.LCPI.*}}, %ymm3
+; KNL32-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; KNL32-NEXT: vpblendvb %ymm3, 8(%ebp), %ymm1, %ymm1
+; KNL32-NEXT: movl %ebp, %esp
+; KNL32-NEXT: popl %ebp
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <64 x i8> %A, <64 x i8> %W, <64 x i32> <i32 64, i32 1, i32 66, i32 3, i32 68, i32 5, i32 70, i32 7, i32 72, i32 9, i32 74, i32 11, i32 76, i32 13, i32 78, i32 15, i32 80, i32 17, i32 82, i32 19, i32 84, i32 21, i32 86, i32 23, i32 88, i32 25, i32 90, i32 27, i32 92, i32 29, i32 94, i32 31, i32 96, i32 33, i32 98, i32 35, i32 100, i32 37, i32 102, i32 39, i32 104, i32 41, i32 106, i32 43, i32 108, i32 45, i32 110, i32 47, i32 112, i32 49, i32 114, i32 51, i32 116, i32 53, i32 118, i32 55, i32 120, i32 57, i32 122, i32 59, i32 124, i32 61, i32 126, i32 63>
+ ret <64 x i8> %0
+}
+
+define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){
+; SKX64-LABEL: test_mm512_mask_blend_epi16:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm512_mask_blend_epi16:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7],ymm2[8],ymm0[9],ymm2[10],ymm0[11],ymm2[12],ymm0[13],ymm2[14],ymm0[15]
+; KNL64-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7],ymm3[8],ymm1[9],ymm3[10],ymm1[11],ymm3[12],ymm1[13],ymm3[14],ymm1[15]
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm512_mask_blend_epi16:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm512_mask_blend_epi16:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: pushl %ebp
+; KNL32-NEXT: .Lcfi3:
+; KNL32-NEXT: .cfi_def_cfa_offset 8
+; KNL32-NEXT: .Lcfi4:
+; KNL32-NEXT: .cfi_offset %ebp, -8
+; KNL32-NEXT: movl %esp, %ebp
+; KNL32-NEXT: .Lcfi5:
+; KNL32-NEXT: .cfi_def_cfa_register %ebp
+; KNL32-NEXT: andl $-32, %esp
+; KNL32-NEXT: subl $32, %esp
+; KNL32-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7],ymm2[8],ymm0[9],ymm2[10],ymm0[11],ymm2[12],ymm0[13],ymm2[14],ymm0[15]
+; KNL32-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1],mem[2],ymm1[3],mem[4],ymm1[5],mem[6],ymm1[7],mem[8],ymm1[9],mem[10],ymm1[11],mem[12],ymm1[13],mem[14],ymm1[15]
+; KNL32-NEXT: movl %ebp, %esp
+; KNL32-NEXT: popl %ebp
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
+ ret <32 x i16> %0
+}
+
+define <16 x i32> @test_mm512_mask_blend_epi32(<16 x i32> %A, <16 x i32> %W){
+; SKX64-LABEL: test_mm512_mask_blend_epi32:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm512_mask_blend_epi32:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm512_mask_blend_epi32:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm512_mask_blend_epi32:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <16 x i32> %A, <16 x i32> %W, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i32> %0
+}
+
+define <8 x i64> @test_mm512_mask_blend_epi64(<8 x i64> %A, <8 x i64> %W){
+; SKX64-LABEL: test_mm512_mask_blend_epi64:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movb $-86, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm512_mask_blend_epi64:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: movb $-86, %al
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm512_mask_blend_epi64:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movb $-86, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm512_mask_blend_epi64:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: movb $-86, %al
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <8 x i64> %A, <8 x i64> %W, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i64> %0
+}
+
+define <16 x float> @test_mm512_mask_blend_ps(<16 x float> %A, <16 x float> %W){
+; SKX64-LABEL: test_mm512_mask_blend_ps:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm512_mask_blend_ps:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm512_mask_blend_ps:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm512_mask_blend_ps:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <16 x float> %A, <16 x float> %W, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x float> %0
+}
+
+define <8 x double> @test_mm512_mask_blend_pd(<8 x double> %A, <8 x double> %W){
+; SKX64-LABEL: test_mm512_mask_blend_pd:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movb $-88, %al
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm512_mask_blend_pd:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: movb $-88, %al
+; KNL64-NEXT: kmovw %eax, %k1
+; KNL64-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm512_mask_blend_pd:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movb $-88, %al
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm512_mask_blend_pd:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: movb $-88, %al
+; KNL32-NEXT: kmovw %eax, %k1
+; KNL32-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <8 x double> %A, <8 x double> %W, <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x double> %0
+}
+
+
+define <32 x i8> @test_mm256_mask_blend_epi8(<32 x i8> %A, <32 x i8> %W){
+; SKX64-LABEL: test_mm256_mask_blend_epi8:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm256_mask_blend_epi8:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; KNL64-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm256_mask_blend_epi8:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movl $-1431655766, %eax # imm = 0xAAAAAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm256_mask_blend_epi8:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; KNL32-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <32 x i8> %A, <32 x i8> %W, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
+ ret <32 x i8> %0
+}
+
+define <16 x i8> @test_mm_mask_blend_epi8(<16 x i8> %A, <16 x i8> %W){
+; SKX64-LABEL: test_mm_mask_blend_epi8:
+; SKX64: # BB#0: # %entry
+; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX64-NEXT: kmovd %eax, %k1
+; SKX64-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: test_mm_mask_blend_epi8:
+; KNL64: # BB#0: # %entry
+; KNL64-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; KNL64-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: test_mm_mask_blend_epi8:
+; SKX32: # BB#0: # %entry
+; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; SKX32-NEXT: kmovd %eax, %k1
+; SKX32-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: test_mm_mask_blend_epi8:
+; KNL32: # BB#0: # %entry
+; KNL32-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; KNL32-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; KNL32-NEXT: retl
+entry:
+ %0 = shufflevector <16 x i8> %A, <16 x i8> %W, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i8> %0
+}
+
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index d34bbb601031..1385929ab8cd 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512
declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
@@ -72,12 +74,14 @@ define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) {
define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) {
; X32-LABEL: combine_and_pshufb:
; X32: # BB#0:
-; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_and_pshufb:
; X64: # BB#0:
-; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X64-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
@@ -87,12 +91,14 @@ define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) {
define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_and:
; X32: # BB#0:
-; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_and:
; X64: # BB#0:
-; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15]
; X64-NEXT: retq
%1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
%2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -480,14 +486,12 @@ define <8 x float> @combine_permps_as_permpd(<8 x float> %a) {
define <4 x i64> @combine_pshufb_as_zext(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_zext:
; X32: # BB#0:
-; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,0,1]
-; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[10,11],zero,zero,zero,zero,zero,zero,ymm0[20,21],zero,zero,zero,zero,zero,zero,ymm0[22,23],zero,zero,zero,zero,zero,zero
+; X32-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_zext:
; X64: # BB#0:
-; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,0,1]
-; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[10,11],zero,zero,zero,zero,zero,zero,ymm0[20,21],zero,zero,zero,zero,zero,zero,ymm0[22,23],zero,zero,zero,zero,zero,zero
+; X64-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 10, i8 11, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 4, i8 5, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
@@ -499,7 +503,7 @@ define <4 x i64> @combine_pshufb_as_zext128(<32 x i8> %a0) {
; X32-LABEL: combine_pshufb_as_zext128:
; X32: # BB#0:
; X32-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; X32-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1,0,1]
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[15,14],zero,zero,zero,zero,zero,zero,ymm0[13,12],zero,zero,zero,zero,zero,zero,ymm0[31,30],zero,zero,zero,zero,zero,zero,ymm0[29,28],zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
@@ -516,17 +520,29 @@ define <4 x i64> @combine_pshufb_as_zext128(<32 x i8> %a0) {
}
define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
-; X32-LABEL: combine_pshufb_as_vzmovl_64:
-; X32: # BB#0:
-; X32-NEXT: vxorpd %ymm1, %ymm1, %ymm1
-; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; X32-NEXT: retl
-;
-; X64-LABEL: combine_pshufb_as_vzmovl_64:
-; X64: # BB#0:
-; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1
-; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; X64-NEXT: retq
+; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X32-AVX2-NEXT: retl
+;
+; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
+; X32-AVX512: # BB#0:
+; X32-AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X32-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; X32-AVX512-NEXT: retl
+;
+; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; X64-AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X64-AVX2-NEXT: retq
+;
+; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
+; X64-AVX512: # BB#0:
+; X64-AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; X64-AVX512-NEXT: retq
%1 = bitcast <4 x double> %a0 to <32 x i8>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
%3 = bitcast <32 x i8> %2 to <4 x double>
@@ -534,17 +550,29 @@ define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
}
define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
-; X32-LABEL: combine_pshufb_as_vzmovl_32:
-; X32: # BB#0:
-; X32-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
-; X32-NEXT: retl
-;
-; X64-LABEL: combine_pshufb_as_vzmovl_32:
-; X64: # BB#0:
-; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
-; X64-NEXT: retq
+; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
+; X32-AVX2: # BB#0:
+; X32-AVX2-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X32-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X32-AVX2-NEXT: retl
+;
+; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
+; X32-AVX512: # BB#0:
+; X32-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X32-AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X32-AVX512-NEXT: retl
+;
+; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X64-AVX2-NEXT: retq
+;
+; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
+; X64-AVX512: # BB#0:
+; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X64-AVX512-NEXT: retq
%1 = bitcast <8 x float> %a0 to <32 x i8>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
%3 = bitcast <32 x i8> %2 to <8 x float>
@@ -664,6 +692,51 @@ define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) {
ret <32 x i8> %res1
}
+define <32 x i8> @combine_pshufb_as_unpacklo_undef(<32 x i8> %a0) {
+; X32-LABEL: combine_pshufb_as_unpacklo_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_pshufb_as_unpacklo_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 undef, i8 0, i8 undef, i8 1, i8 undef, i8 2, i8 undef, i8 3, i8 undef, i8 4, i8 undef, i8 5, i8 undef, i8 6, i8 undef, i8 7, i8 undef, i8 16, i8 undef, i8 17, i8 undef, i8 18, i8 undef, i8 19, i8 undef, i8 20, i8 undef, i8 21, i8 undef, i8 22, i8 undef, i8 23>)
+ %2 = shufflevector <32 x i8> %1, <32 x i8> undef, <32 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14, i32 16, i32 16, i32 18, i32 18, i32 20, i32 20, i32 22, i32 22, i32 24, i32 24, i32 26, i32 26, i32 28, i32 28, i32 30, i32 30>
+ ret <32 x i8> %2
+}
+
+define <32 x i8> @combine_pshufb_as_unpacklo_zero(<32 x i8> %a0) {
+; X32-LABEL: combine_pshufb_as_unpacklo_zero:
+; X32: # BB#0:
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X32-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_pshufb_as_unpacklo_zero:
+; X64: # BB#0:
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X64-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; X64-NEXT: retq
+ %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 2, i8 3, i8 -1, i8 -1, i8 4, i8 5, i8 -1, i8 -1, i8 6, i8 7, i8 -1, i8 -1, i8 16, i8 17, i8 -1, i8 -1, i8 18, i8 19, i8 -1, i8 -1, i8 20, i8 21, i8 -1, i8 -1, i8 22, i8 23, i8 -1, i8 -1>)
+ ret <32 x i8> %1
+}
+
+define <32 x i8> @combine_pshufb_as_unpackhi_zero(<32 x i8> %a0) {
+; X32-LABEL: combine_pshufb_as_unpackhi_zero:
+; X32: # BB#0:
+; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X32-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_pshufb_as_unpackhi_zero:
+; X64: # BB#0:
+; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; X64-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; X64-NEXT: retq
+ %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 -1, i8 8, i8 -1, i8 9, i8 -1, i8 10, i8 -1, i8 11, i8 -1, i8 12, i8 -1, i8 13, i8 -1, i8 14, i8 -1, i8 15, i8 -1, i8 24, i8 -1, i8 25, i8 -1, i8 26, i8 -1, i8 27, i8 -1, i8 28, i8 -1, i8 29, i8 -1, i8 30, i8 -1, i8 31>)
+ ret <32 x i8> %1
+}
+
define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) {
; X32-LABEL: combine_psrlw_pshufb:
; X32: # BB#0:
@@ -712,6 +785,59 @@ define <32 x i8> @combine_psrlq_pshufb(<4 x i64> %a0) {
ret <32 x i8> %3
}
+define <32 x i8> @combine_unpack_unpack_pshufb(<32 x i8> %a0) {
+; X32-LABEL: combine_unpack_unpack_pshufb:
+; X32: # BB#0:
+; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,4,8,1,1,5,9,2,2,6,10,3,3,7,11,16,16,20,24,17,17,21,25,18,18,22,26,19,19,23,27]
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_unpack_unpack_pshufb:
+; X64: # BB#0:
+; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,4,8,1,1,5,9,2,2,6,10,3,3,7,11,16,16,20,24,17,17,21,25,18,18,22,26,19,19,23,27]
+; X64-NEXT: retq
+ %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
+ %2 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 20, i32 21, i32 22, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %3 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 24, i32 25, i32 26, i32 27, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %5 = shufflevector <32 x i8> %1, <32 x i8> %3, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = shufflevector <32 x i8> %4, <32 x i8> %5, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
+ ret <32 x i8> %6
+}
+
+define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
+; X32-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
+; X32: # BB#0:
+; X32-NEXT: vpbroadcastq {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_pshufb_insertion_as_broadcast_v2i64:
+; X64: # BB#0:
+; X64-NEXT: vmovq %rdi, %xmm0
+; X64-NEXT: vpbroadcastq %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = insertelement <2 x i64> undef, i64 %a0, i32 0
+ %2 = bitcast <2 x i64> %1 to <16 x i8>
+ %3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
+ ret <16 x i8> %3
+}
+
+define <8 x i32> @combine_permd_insertion_as_broadcast_v4i64(i64 %a0) {
+; X32-LABEL: combine_permd_insertion_as_broadcast_v4i64:
+; X32: # BB#0:
+; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_permd_insertion_as_broadcast_v4i64:
+; X64: # BB#0:
+; X64-NEXT: vmovq %rdi, %xmm0
+; X64-NEXT: vpbroadcastq %xmm0, %ymm0
+; X64-NEXT: retq
+ %1 = insertelement <4 x i64> undef, i64 %a0, i32 0
+ %2 = bitcast <4 x i64> %1 to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
+ ret <8 x i32> %3
+}
+
define <8 x i32> @constant_fold_permd() {
; X32-LABEL: constant_fold_permd:
; X32: # BB#0:
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
index 687098f9abf3..b68f609fc65d 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
@@ -51,7 +51,7 @@ define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x d
;
; X64-LABEL: combine_permvar_8f64_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1}
; X64-NEXT: vmovapd {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
@@ -66,10 +66,6 @@ define <8 x double> @combine_permvar_8f64_identity_mask(<8 x double> %x0, <8 x d
define <8 x i64> @combine_permvar_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_permvar_8i64_identity:
; X32: # BB#0:
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT: vpermq %zmm0, %zmm1, %zmm0
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
-; X32-NEXT: vpermq %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_identity:
@@ -93,7 +89,7 @@ define <8 x i64> @combine_permvar_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x
;
; X64-LABEL: combine_permvar_8i64_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1}
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
@@ -130,7 +126,7 @@ define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8
;
; X64-LABEL: combine_vpermt2var_8f64_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 {%k1} {z}
; X64-NEXT: vmovapd {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
@@ -179,7 +175,7 @@ define <8 x double> @combine_vpermt2var_8f64_movddup_mask(<8 x double> %x0, <8 x
;
; X64-LABEL: combine_vpermt2var_8f64_movddup_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2, i64 2, i64 4, i64 4, i64 6, i64 6>, <8 x double> %x0, <8 x double> %x1, i8 %m)
@@ -189,10 +185,6 @@ define <8 x double> @combine_vpermt2var_8f64_movddup_mask(<8 x double> %x0, <8 x
define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_vpermt2var_8i64_identity:
; X32: # BB#0:
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,6,0,5,0,4,0,3,0,2,0,1,0,0,0>
-; X32-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,14,0,5,0,12,0,3,0,10,0,1,0,8,0>
-; X32-NEXT: vpermi2q %zmm2, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8i64_identity:
@@ -215,7 +207,7 @@ define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64>
;
; X64-LABEL: combine_vpermt2var_8i64_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 {%k1} {z}
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
@@ -250,7 +242,7 @@ define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <1
;
; X64-LABEL: combine_vpermt2var_16f32_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z}
; X64-NEXT: vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
@@ -307,7 +299,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <1
;
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
; X64-NEXT: retq
@@ -327,7 +319,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%
;
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
; X64: # BB#0:
-; X64-NEXT: kmovw %esi, %k1
+; X64-NEXT: kmovd %esi, %k1
; X64-NEXT: vmovaps (%rdi), %zmm2
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
; X64-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 {%k1} {z}
@@ -375,7 +367,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovshdup_mask(<16 x float> %x0, <
;
; X64-LABEL: combine_vpermt2var_16f32_vmovshdup_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>, <16 x float> %x0, <16 x float> %x1, i16 %m)
@@ -419,7 +411,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask(<16 x float> %x0, <
;
; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>, <16 x float> %x0, <16 x float> %x1, i16 %m)
@@ -435,7 +427,7 @@ define <16 x float> @combine_vpermt2var_16f32_vmovsldup_mask_load(<16 x float> *
;
; X64-LABEL: combine_vpermt2var_16f32_vmovsldup_mask_load:
; X64: # BB#0:
-; X64-NEXT: kmovw %esi, %k1
+; X64-NEXT: kmovd %esi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%x0 = load <16 x float>, <16 x float> *%p0
@@ -480,7 +472,7 @@ define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask(<16 x float> %x0, <
;
; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X64-NEXT: retq
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>, <16 x float> %x0, <16 x float> %x1, i16 %m)
@@ -496,7 +488,7 @@ define <16 x float> @combine_vpermt2var_16f32_vpermilps_mask_load(<16 x float> *
;
; X64-LABEL: combine_vpermt2var_16f32_vpermilps_mask_load:
; X64: # BB#0:
-; X64-NEXT: kmovw %esi, %k1
+; X64-NEXT: kmovd %esi, %k1
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X64-NEXT: retq
%x0 = load <16 x float>, <16 x float> *%p0
@@ -528,7 +520,7 @@ define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x
;
; X64-LABEL: combine_vpermt2var_16i32_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqa32 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z}
; X64-NEXT: vmovdqa32 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
@@ -648,8 +640,7 @@ define <16 x i32> @combine_permvar_as_vpbroadcastd512(<16 x i32> %x0) {
define <8 x i64> @combine_permvar_as_vpbroadcastq512(<8 x i64> %x0) {
; X32-LABEL: combine_permvar_as_vpbroadcastq512:
; X32: # BB#0:
-; X32-NEXT: vpxord %zmm1, %zmm1, %zmm1
-; X32-NEXT: vpermq %zmm0, %zmm1, %zmm0
+; X32-NEXT: vbroadcastsd %xmm0, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_as_vpbroadcastq512:
@@ -663,8 +654,7 @@ define <8 x i64> @combine_permvar_as_vpbroadcastq512(<8 x i64> %x0) {
define <8 x i64> @combine_permvar_8i64_as_permq(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_permvar_8i64_as_permq:
; X32: # BB#0:
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
-; X32-NEXT: vpermq %zmm0, %zmm1, %zmm0
+; X32-NEXT: vpermq {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_as_permq:
@@ -679,14 +669,13 @@ define <8 x i64> @combine_permvar_8i64_as_permq_mask(<8 x i64> %x0, <8 x i64> %x
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
-; X32-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1}
+; X32-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
; X32-NEXT: vmovdqa64 %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8i64_as_permq_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
; X64-NEXT: retq
@@ -718,7 +707,7 @@ define <8 x double> @combine_permvar_8f64_as_permpd_mask(<8 x double> %x0, <8 x
;
; X64-LABEL: combine_permvar_8f64_as_permpd_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
; X64-NEXT: vmovapd %zmm1, %zmm0
; X64-NEXT: retq
@@ -872,10 +861,6 @@ define <8 x double> @combine_vpermi2var_8f64_as_shufpd(<8 x double> %x0, <8 x do
define <8 x i64> @combine_vpermi2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_vpermi2var_8i64_identity:
; X32: # BB#0:
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = <u,u,6,0,5,0,4,0,3,0,2,0,1,0,0,0>
-; X32-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = <u,u,14,0,5,0,12,0,3,0,10,0,1,0,8,0>
-; X32-NEXT: vpermi2q %zmm2, %zmm2, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermi2var_8i64_identity:
@@ -973,10 +958,8 @@ define <8 x double> @combine_vpermi2var_8f64_as_vpermpd(<8 x double> %x0, <8 x d
define <8 x i64> @combine_vpermt2var_8i64_as_vpermq(<8 x i64> %x0, <8 x i64> %x1) {
; X32-LABEL: combine_vpermt2var_8i64_as_vpermq:
; X32: # BB#0:
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [3,0,2,0,1,0,0,0,7,0,6,0,5,0,4,0]
-; X32-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
-; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [12,0,5,0,14,0,7,0,8,0,1,0,10,0,3,0]
-; X32-NEXT: vpermi2q %zmm2, %zmm2, %zmm0
+; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
+; X32-NEXT: vpermq %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermt2var_8i64_as_vpermq:
@@ -1133,3 +1116,18 @@ define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x floa
ret <16 x float> %res1
}
+define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) {
+; X32-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
+; X32: # BB#0:
+; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
+; X64: # BB#0:
+; X64-NEXT: vmovq %rdi, %xmm0
+; X64-NEXT: vpbroadcastq %xmm0, %zmm0
+; X64-NEXT: retq
+ %1 = insertelement <8 x i64> undef, i64 %a0, i32 0
+ %2 = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %1, <8 x i64> zeroinitializer, <8 x i64> undef, i8 -1)
+ ret <8 x i64> %2
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
index ab10ba32e605..954dbe5edc63 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll
@@ -30,7 +30,7 @@ define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x
;
; X64-LABEL: combine_vpermt2var_16i16_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqu {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z}
; X64-NEXT: vmovdqu {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
index 2f58fa830d5d..ad6b5ee05494 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
@@ -45,7 +45,7 @@ define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8>
;
; X64-LABEL: combine_vpermt2var_16i8_identity_mask:
; X64: # BB#0:
-; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: kmovd %edi, %k1
; X64-NEXT: vmovdqu {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
; X64-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z}
; X64-NEXT: vmovdqu {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
diff --git a/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
new file mode 100644
index 000000000000..29e2124a168c
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F
+;
+; Combine tests involving SSE41 target shuffles (BLEND,INSERTPS,MOVZX)
+
+declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) {
+; SSE-LABEL: combine_vpshufb_as_movzx:
+; SSE: # BB#0:
+; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vpshufb_as_movzx:
+; AVX: # BB#0:
+; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT: retq
+ %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 undef, i8 undef, i8 -1, i8 -1, i8 -1, i8 -1>)
+ ret <16 x i8> %res0
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
index 7e29a48d5cd5..546b73126039 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
@@ -473,6 +473,58 @@ define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) {
ret <16 x i8> %1
}
+define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
+; ALL-LABEL: combine_pshufb_as_unpacklo_undef:
+; ALL: # BB#0:
+; ALL-NEXT: retq
+ %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 2, i8 3, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 6, i8 7>)
+ %2 = bitcast <16 x i8> %1 to <8 x i16>
+ %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ ret <8 x i16> %3
+}
+
+define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
+; ALL-LABEL: combine_pshufb_as_unpackhi_undef:
+; ALL: # BB#0:
+; ALL-NEXT: retq
+ %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 undef, i8 10, i8 undef, i8 11, i8 undef, i8 12, i8 undef, i8 13, i8 undef, i8 14, i8 undef, i8 15, i8 undef>)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_unpacklo_zero:
+; SSE: # BB#0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pshufb_as_unpacklo_zero:
+; AVX: # BB#0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX-NEXT: retq
+ %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 4, i8 5, i8 6, i8 7>)
+ ret <16 x i8> %1
+}
+
+define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_unpackhi_zero:
+; SSE: # BB#0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pshufb_as_unpackhi_zero:
+; AVX: # BB#0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX-NEXT: retq
+ %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 -1, i8 9, i8 -1, i8 10, i8 -1, i8 11, i8 -1, i8 12, i8 -1, i8 13, i8 -1, i8 14, i8 -1, i8 15, i8 -1>)
+ ret <16 x i8> %1
+}
+
define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) {
; SSE-LABEL: combine_psrlw_pshufb:
; SSE: # BB#0:
@@ -552,6 +604,27 @@ define <16 x i8> @combine_unpckl_arg1_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
ret <16 x i8> %2
}
+define <8 x i16> @shuffle_combine_unpack_insert(<8 x i16> %a0) {
+; SSE-LABEL: shuffle_combine_unpack_insert:
+; SSE: # BB#0:
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_combine_unpack_insert:
+; AVX: # BB#0:
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
+; AVX-NEXT: retq
+ %1 = extractelement <8 x i16> %a0, i32 2
+ %2 = extractelement <8 x i16> %a0, i32 4
+ %3 = insertelement <8 x i16> %a0, i16 %1, i32 4
+ %4 = insertelement <8 x i16> %a0, i16 %2, i32 2
+ %5 = shufflevector <8 x i16> %3, <8 x i16> %4, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %6 = shufflevector <8 x i16> %5, <8 x i16> %3, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = shufflevector <8 x i16> %5, <8 x i16> %a0, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ %8 = shufflevector <8 x i16> %6, <8 x i16> %7, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %8
+}
+
define <16 x i8> @constant_fold_pshufb() {
; SSE-LABEL: constant_fold_pshufb:
; SSE: # BB#0:
@@ -565,3 +638,38 @@ define <16 x i8> @constant_fold_pshufb() {
%1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
ret <16 x i8> %1
}
+
+; FIXME - unnecessary pshufb/broadcast being used - pshufb mask only needs lowest byte.
+define <16 x i8> @constant_fold_pshufb_2() {
+; SSE-LABEL: constant_fold_pshufb_2:
+; SSE: # BB#0:
+; SSE-NEXT: movl $2, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pshufb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: constant_fold_pshufb_2:
+; AVX1: # BB#0:
+; AVX1-NEXT: movl $2, %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_fold_pshufb_2:
+; AVX2: # BB#0:
+; AVX2-NEXT: movl $2, %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: constant_fold_pshufb_2:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: movl $2, %eax
+; AVX512F-NEXT: vmovd %eax, %xmm0
+; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0
+; AVX512F-NEXT: retq
+ %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 2, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
+ ret <16 x i8> %1
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
index b79df1facfa1..a9dff9164316 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-xop.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
@@ -318,8 +318,9 @@ define <4 x i32> @combine_vpperm_10zz32BA(<4 x i32> %a0, <4 x i32> %a1) {
ret <4 x i32> %res3
}
-define void @buildvector_v4f23_0404(float %a, float %b, <4 x float>* %ptr) {
-; X32-LABEL: buildvector_v4f23_0404:
+; FIXME: Duplicated load in i686
+define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) {
+; X32-LABEL: buildvector_v4f32_0404:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -328,7 +329,7 @@ define void @buildvector_v4f23_0404(float %a, float %b, <4 x float>* %ptr) {
; X32-NEXT: vmovaps %xmm0, (%eax)
; X32-NEXT: retl
;
-; X64-LABEL: buildvector_v4f23_0404:
+; X64-LABEL: buildvector_v4f32_0404:
; X64: # BB#0:
; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[0],xmm1[0]
; X64-NEXT: vmovaps %xmm0, (%rdi)
@@ -341,6 +342,30 @@ define void @buildvector_v4f23_0404(float %a, float %b, <4 x float>* %ptr) {
ret void
}
+define void @buildvector_v4f32_07z6(float %a, <4 x float> %b, <4 x float>* %ptr) {
+; X32-LABEL: buildvector_v4f32_07z6:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm1[0],xmm0[3],zero,xmm0[2]
+; X32-NEXT: vmovaps %xmm0, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: buildvector_v4f32_07z6:
+; X64: # BB#0:
+; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[3],zero,xmm1[2]
+; X64-NEXT: vmovaps %xmm0, (%rdi)
+; X64-NEXT: retq
+ %b2 = extractelement <4 x float> %b, i32 2
+ %b3 = extractelement <4 x float> %b, i32 3
+ %v0 = insertelement <4 x float> undef, float %a, i32 0
+ %v1 = insertelement <4 x float> %v0, float %b3, i32 1
+ %v2 = insertelement <4 x float> %v1, float 0.0, i32 2
+ %v3 = insertelement <4 x float> %v2, float %b2, i32 3
+ store <4 x float> %v3, <4 x float>* %ptr
+ ret void
+}
+
define <2 x double> @constant_fold_vpermil2pd() {
; X32-LABEL: constant_fold_vpermil2pd:
; X32: # BB#0:
@@ -416,16 +441,14 @@ define <4 x float> @PR31296(i8* %in) {
; X32: # BB#0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT: vmovaps {{.*#+}} xmm1 = <0,1,u,u>
-; X32-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0,0,1]
+; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,mem[0]
; X32-NEXT: retl
;
; X64-LABEL: PR31296:
; X64: # BB#0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: vmovq %rax, %xmm0
-; X64-NEXT: vmovaps {{.*#+}} xmm1 = <0,1,u,u>
-; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0,0,1]
+; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,mem[0]
; X64-NEXT: retq
entry:
%0 = getelementptr i8, i8* %in, i32 0
diff --git a/test/CodeGen/X86/vector-shuffle-masked.ll b/test/CodeGen/X86/vector-shuffle-masked.ll
index 37fd022999e4..cedec449f6f4 100644
--- a/test/CodeGen/X86/vector-shuffle-masked.ll
+++ b/test/CodeGen/X86/vector-shuffle-masked.ll
@@ -4,7 +4,7 @@
define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i32_1234:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm2 {%k1} = xmm0[1,2,3],xmm1[0]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
@@ -18,7 +18,7 @@ define <4 x i32> @mask_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, <4 x i32>
define <4 x i32> @maskz_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i32_1234:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3],xmm1[0]
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -31,7 +31,7 @@ define <4 x i32> @maskz_shuffle_v4i32_1234(<4 x i32> %a, <4 x i32> %b, i8 %mask)
define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i32_2345:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm2 {%k1} = xmm0[2,3],xmm1[0,1]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
@@ -45,7 +45,7 @@ define <4 x i32> @mask_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, <4 x i32>
define <4 x i32> @maskz_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i32_2345:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3],xmm1[0,1]
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
@@ -58,7 +58,7 @@ define <4 x i32> @maskz_shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b, i8 %mask)
define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v2i64_12:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0]
; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
@@ -72,7 +72,7 @@ define <2 x i64> @mask_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, <2 x i64> %p
define <2 x i64> @maskz_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v2i64_12:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0]
; CHECK-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
@@ -85,7 +85,7 @@ define <2 x i64> @maskz_shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i64_1234:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} ymm2 {%k1} = ymm0[1,2,3],ymm1[0]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
@@ -99,7 +99,7 @@ define <4 x i64> @mask_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, <4 x i64>
define <4 x i64> @maskz_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i64_1234:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3],ymm1[0]
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -112,7 +112,7 @@ define <4 x i64> @maskz_shuffle_v4i64_1234(<4 x i64> %a, <4 x i64> %b, i8 %mask)
define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v4i64_1230:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,0]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -126,7 +126,7 @@ define <4 x i64> @mask_shuffle_v4i64_1230(<4 x i64> %a, <4 x i64> %passthru, i8
define <4 x i64> @maskz_shuffle_v4i64_1230(<4 x i64> %a, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v4i64_1230:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,0]
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
@@ -139,7 +139,7 @@ define <4 x i64> @maskz_shuffle_v4i64_1230(<4 x i64> %a, i8 %mask) {
define <8 x i32> @mask_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_12345678:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm2 {%k1} = ymm0[1,2,3,4,5,6,7],ymm1[0]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
@@ -152,7 +152,7 @@ define <8 x i32> @mask_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @maskz_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_12345678:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7],ymm1[0]
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
@@ -164,7 +164,7 @@ define <8 x i32> @maskz_shuffle_v8i32_12345678(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @mask_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_23456789:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm2 {%k1} = ymm0[2,3,4,5,6,7],ymm1[0,1]
; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
@@ -177,7 +177,7 @@ define <8 x i32> @mask_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, <8 x i
define <8 x i32> @maskz_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_23456789:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,4,5,6,7],ymm1[0,1]
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
@@ -189,7 +189,7 @@ define <8 x i32> @maskz_shuffle_v8i32_23456789(<8 x i32> %a, <8 x i32> %b, i8 %m
define <8 x i32> @mask_shuffle_v8i32_12345670(<8 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_shuffle_v8i32_12345670:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm1 {%k1} = ymm0[1,2,3,4,5,6,7,0]
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -202,7 +202,7 @@ define <8 x i32> @mask_shuffle_v8i32_12345670(<8 x i32> %a, <8 x i32> %passthru,
define <8 x i32> @maskz_shuffle_v8i32_12345670(<8 x i32> %a, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_12345670:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: valignd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,4,5,6,7,0]
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
@@ -215,9 +215,8 @@ define <8 x i32> @mask_shuffle_v8i32_23456701(<8 x i32> %a, <8 x i32> %passthru,
; CHECK-LABEL: mask_shuffle_v8i32_23456701:
; CHECK: # BB#0:
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
-; CHECK-NEXT: kmovb %edi, %k1
-; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -229,7 +228,7 @@ define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) {
; CHECK-LABEL: maskz_shuffle_v8i32_23456701:
; CHECK: # BB#0:
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
@@ -241,9 +240,10 @@ define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) {
define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -255,9 +255,10 @@ define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru
define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -269,9 +270,10 @@ define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru
define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_2:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -283,9 +285,10 @@ define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru
define <4 x i32> @mask_extract_v16i32_v4i32_3(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v4i32_3:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x4 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -297,9 +300,10 @@ define <4 x i32> @mask_extract_v16i32_v4i32_3(<16 x i32> %a, <4 x i32> %passthru
define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -311,9 +315,10 @@ define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %pa
define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -325,9 +330,10 @@ define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %pa
define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_2:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -339,9 +345,10 @@ define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %pa
define <4 x float> @mask_extract_v16f32_v4f32_3(<16 x float> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v4f32_3:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -353,7 +360,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_3(<16 x float> %a, <4 x float> %pa
define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v8i32_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -366,7 +373,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru
define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16i32_v8i32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -379,7 +386,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru
define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v8f32_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -392,7 +399,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %pa
define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v16f32_v8f32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -405,9 +412,10 @@ define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %pa
define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -419,9 +427,10 @@ define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -433,9 +442,10 @@ define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_2:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x2 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 4, i32 5>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -447,9 +457,10 @@ define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru,
define <2 x i64> @mask_extract_v8i64_v2i64_3(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v2i64_3:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x2 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -461,9 +472,10 @@ define <2 x i64> @mask_extract_v8i64_v2i64_3(<8 x i64> %a, <2 x i64> %passthru,
define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 0, i32 1>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -475,9 +487,10 @@ define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -489,9 +502,10 @@ define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_2:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x2 $2, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 4, i32 5>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -503,9 +517,10 @@ define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %p
define <2 x double> @mask_extract_v8f64_v2f64_3(<8 x double> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v2f64_3:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x2 $3, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -517,7 +532,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_3(<8 x double> %a, <2 x double> %p
define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v4i64_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -531,7 +546,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru,
define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v4i64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -545,7 +560,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru,
define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v4f64_0:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovapd %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -559,7 +574,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %p
define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v4f64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovapd %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -573,7 +588,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %p
define <8 x i32> @mask_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8i64_v8i32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -587,7 +602,7 @@ define <8 x i32> @mask_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru,
define <8 x float> @mask_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_extract_v8f64_v8f32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -601,9 +616,10 @@ define <8 x float> @mask_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %pas
define <4 x i32> @mask_cast_extract_v8i64_v4i32_1(<8 x i64> %a, <4 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 2, i32 3>
%shuffle.cast = bitcast <2 x i64> %shuffle to <4 x i32>
@@ -616,9 +632,10 @@ define <4 x i32> @mask_cast_extract_v8i64_v4i32_1(<8 x i64> %a, <4 x i32> %passt
define <4 x float> @mask_cast_extract_v8f64_v4f32_1(<8 x double> %a, <4 x float> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 2, i32 3>
%shuffle.cast = bitcast <2 x double> %shuffle to <4 x float>
@@ -631,7 +648,7 @@ define <4 x float> @mask_cast_extract_v8f64_v4f32_1(<8 x double> %a, <4 x float>
define <4 x i64> @mask_cast_extract_v16i32_v4i64_1(<16 x i32> %a, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -646,7 +663,7 @@ define <4 x i64> @mask_cast_extract_v16i32_v4i64_1(<16 x i32> %a, <4 x i64> %pas
define <4 x double> @mask_cast_extract_v16f32_v4f64_1(<16 x float> %a, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vmovapd %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -661,9 +678,10 @@ define <4 x double> @mask_cast_extract_v16f32_v4f64_1(<16 x float> %a, <4 x doub
define <2 x i64> @mask_cast_extract_v16i32_v2i64_1(<16 x i32> %a, <2 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%shuffle.cast = bitcast <4 x i32> %shuffle to <2 x i64>
@@ -676,9 +694,10 @@ define <2 x i64> @mask_cast_extract_v16i32_v2i64_1(<16 x i32> %a, <2 x i64> %pas
define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_1:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %edi, %k1
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%shuffle.cast = bitcast <4 x float> %shuffle to <2 x double>
@@ -691,7 +710,7 @@ define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x doub
define <2 x double> @broadcast_v4f32_0101_from_v2f32_mask(double* %x, <2 x double> %passthru, i8 %mask) {
; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
; CHECK-NEXT: retq
%q = load double, double* %x, align 1
@@ -706,7 +725,7 @@ define <2 x double> @broadcast_v4f32_0101_from_v2f32_mask(double* %x, <2 x doubl
define <2 x double> @broadcast_v4f32_0101_from_v2f32_maskz(double* %x, i8 %mask) {
; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
; CHECK-NEXT: retq
%q = load double, double* %x, align 1
@@ -717,3 +736,173 @@ define <2 x double> @broadcast_v4f32_0101_from_v2f32_maskz(double* %x, i8 %mask)
%res = select <2 x i1> %mask.extract, <2 x double> %vecinit2.i, <2 x double> zeroinitializer
ret <2 x double> %res
}
+
+define <8 x float> @test_broadcast_2f64_8f32(<2 x double> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_2f64_8f32:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %1 = load <2 x double>, <2 x double> *%p
+ %2 = shufflevector <2 x double> %1, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ %3 = bitcast <4 x double> %2 to <8 x float>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %mask.cast, <8 x float> %3, <8 x float> zeroinitializer
+ ret <8 x float> %res
+}
+
+define <8 x i32> @test_broadcast_2i64_8i32(<2 x i64> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_2i64_8i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %1 = load <2 x i64>, <2 x i64> *%p
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ %3 = bitcast <4 x i64> %2 to <8 x i32>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %mask.cast, <8 x i32> %3, <8 x i32> zeroinitializer
+ ret <8 x i32> %res
+}
+
+define <16 x float> @test_broadcast_2f64_16f32(<2 x double> *%p, i16 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_2f64_16f32:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %1 = load <2 x double>, <2 x double> *%p
+ %2 = shufflevector <2 x double> %1, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ %3 = bitcast <8 x double> %2 to <16 x float>
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %mask.cast, <16 x float> %3, <16 x float> zeroinitializer
+ ret <16 x float> %res
+}
+
+define <16 x i32> @test_broadcast_2i64_16i32(<2 x i64> *%p, i16 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_2i64_16i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %1 = load <2 x i64>, <2 x i64> *%p
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ %3 = bitcast <8 x i64> %2 to <16 x i32>
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %mask.cast, <16 x i32> %3, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
+define <16 x float> @test_broadcast_4f64_16f32(<4 x double> *%p, i16 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_4f64_16f32:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; CHECK-NEXT: retq
+ %1 = load <4 x double>, <4 x double> *%p
+ %2 = shufflevector <4 x double> %1, <4 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <8 x double> %2 to <16 x float>
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %mask.cast, <16 x float> %3, <16 x float> zeroinitializer
+ ret <16 x float> %res
+}
+
+define <16 x i32> @test_broadcast_4i64_16i32(<4 x i64> *%p, i16 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_4i64_16i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; CHECK-NEXT: retq
+ %1 = load <4 x i64>, <4 x i64> *%p
+ %2 = shufflevector <4 x i64> %1, <4 x i64> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <8 x i64> %2 to <16 x i32>
+ %mask.cast = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %mask.cast, <16 x i32> %3, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
+define <4 x double> @test_broadcast_4f32_4f64(<4 x float> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_4f32_4f64:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
+; CHECK-NEXT: retq
+ %1 = load <4 x float>, <4 x float> *%p
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <8 x float> %2 to <4 x double>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = select <4 x i1> %mask.extract, <4 x double> %3, <4 x double> zeroinitializer
+ ret <4 x double> %res
+}
+
+define <4 x i64> @test_broadcast_4i32_4i64(<4 x i32> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_4i32_4i64:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1]
+; CHECK-NEXT: retq
+ %1 = load <4 x i32>, <4 x i32> *%p
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = select <4 x i1> %mask.extract, <4 x i64> %3, <4 x i64> zeroinitializer
+ ret <4 x i64> %res
+}
+
+define <8 x double> @test_broadcast_4f32_8f64(<4 x float> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_4f32_8f64:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
+; CHECK-NEXT: retq
+ %1 = load <4 x float>, <4 x float> *%p
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <16 x float> %2 to <8 x double>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %mask.cast, <8 x double> %3, <8 x double> zeroinitializer
+ ret <8 x double> %res
+}
+
+define <8 x i64> @test_broadcast_4i32_8i64(<4 x i32> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_4i32_8i64:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1]
+; CHECK-NEXT: retq
+ %1 = load <4 x i32>, <4 x i32> *%p
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %mask.cast, <8 x i64> %3, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
+define <8 x double> @test_broadcast_8f32_8f64(<8 x float> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_8f32_8f64:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %1 = load <8 x float>, <8 x float> *%p
+ %2 = shufflevector <8 x float> %1, <8 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %3 = bitcast <16 x float> %2 to <8 x double>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %mask.cast, <8 x double> %3, <8 x double> zeroinitializer
+ ret <8 x double> %res
+}
+
+define <8 x i64> @test_broadcast_8i32_8i64(<8 x i32> *%p, i8 %mask) nounwind {
+; CHECK-LABEL: test_broadcast_8i32_8i64:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3]
+; CHECK-NEXT: retq
+ %1 = load <8 x i32>, <8 x i32> *%p
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %mask.cast, <8 x i64> %3, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
diff --git a/test/CodeGen/X86/vector-shuffle-v1.ll b/test/CodeGen/X86/vector-shuffle-v1.ll
index 4312b67546d2..4ec6b86247d5 100644
--- a/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -35,11 +35,11 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
; VL_BW_DQ: # BB#0:
; VL_BW_DQ-NEXT: vpsllq $63, %xmm0, %xmm0
; VL_BW_DQ-NEXT: vptestmq %xmm0, %xmm0, %k0
-; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: movb $1, %al
-; VL_BW_DQ-NEXT: kmovb %eax, %k0
+; VL_BW_DQ-NEXT: kmovd %eax, %k1
+; VL_BW_DQ-NEXT: vpmovm2q %k1, %xmm0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm1
-; VL_BW_DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; VL_BW_DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
; VL_BW_DQ-NEXT: retq
@@ -78,6 +78,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
@@ -88,6 +89,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; VL_BW_DQ-NEXT: vpermq %zmm0, %zmm1, %zmm0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%a2 = icmp eq <8 x i64> %a, %a1
%b2 = icmp eq <8 x i64> %b, %b1
@@ -108,6 +110,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
@@ -120,6 +123,7 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
; VL_BW_DQ-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovd2m %zmm2, %k0
; VL_BW_DQ-NEXT: vpmovm2b %k0, %xmm0
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%a2 = icmp eq <16 x i32> %a, %a1
%b2 = icmp eq <16 x i32> %b, %b1
@@ -162,16 +166,18 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovb %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vextracti64x2 $1, %zmm0, %xmm0
; VL_BW_DQ-NEXT: vpbroadcastq %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: vpmovm2w %k0, %xmm0
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%c = shufflevector < 8 x i1> %b, <8 x i1>undef, <8 x i32> <i32 undef, i32 2, i32 undef, i32 undef, i32 2, i32 undef, i32 2, i32 undef>
@@ -189,17 +195,21 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovb %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
-; VL_BW_DQ-NEXT: kmovb %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%c = shufflevector < 8 x i1> %b, <8 x i1> zeroinitializer, <8 x i32> <i32 10, i32 2, i32 9, i32 undef, i32 3, i32 undef, i32 2, i32 undef>
@@ -216,15 +226,19 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovb %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
-; VL_BW_DQ-NEXT: kmovb %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%c = shufflevector < 8 x i1> %b, <8 x i1> undef, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -243,17 +257,21 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovb %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
-; VL_BW_DQ-NEXT: kmovb %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%c = shufflevector <8 x i1> %b, <8 x i1> zeroinitializer, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
@@ -272,17 +290,21 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovb %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm0
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
; VL_BW_DQ-NEXT: vpxord %zmm2, %zmm2, %zmm2
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
-; VL_BW_DQ-NEXT: kmovb %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%c = shufflevector <8 x i1> zeroinitializer, <8 x i1> %b, <8 x i32> <i32 9, i32 6, i32 1, i32 10, i32 3, i32 7, i32 7, i32 0>
@@ -303,19 +325,23 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovb %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: movb $51, %al
-; VL_BW_DQ-NEXT: kmovb %eax, %k1
+; VL_BW_DQ-NEXT: kmovd %eax, %k1
; VL_BW_DQ-NEXT: vpmovm2q %k1, %zmm0
; VL_BW_DQ-NEXT: vpmovm2q %k0, %zmm1
; VL_BW_DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1]
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
-; VL_BW_DQ-NEXT: kmovb %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 0, i1 0>, <8 x i1> %b, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 1>
@@ -336,6 +362,8 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf8i1_9_6_1_10_3_7_7_0_all_ones:
@@ -347,7 +375,9 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; VL_BW_DQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
-; VL_BW_DQ-NEXT: kmovb %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1> %a, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
%c1 = bitcast <8 x i1>%c to i8
@@ -364,15 +394,19 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
; VL_BW_DQ: # BB#0:
-; VL_BW_DQ-NEXT: kmovw %edi, %k0
+; VL_BW_DQ-NEXT: kmovd %edi, %k0
; VL_BW_DQ-NEXT: vpmovm2d %k0, %zmm0
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0
-; VL_BW_DQ-NEXT: kmovw %k0, %eax
+; VL_BW_DQ-NEXT: kmovd %k0, %eax
+; VL_BW_DQ-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i16 %a to <16 x i1>
%c = shufflevector < 16 x i1> %b, <16 x i1> undef, <16 x i32> zeroinitializer
@@ -413,6 +447,7 @@ define i64 @shuf64i1_zero(i64 %a) {
; AVX512F-NEXT: orq %rcx, %rax
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; VL_BW_DQ-LABEL: shuf64i1_zero:
@@ -422,6 +457,7 @@ define i64 @shuf64i1_zero(i64 %a) {
; VL_BW_DQ-NEXT: vpbroadcastb %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovb2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovq %k0, %rax
+; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i64 %a to <64 x i1>
%c = shufflevector < 64 x i1> %b, <64 x i1> undef, <64 x i32> zeroinitializer
diff --git a/test/CodeGen/X86/vector-shuffle-variable-128.ll b/test/CodeGen/X86/vector-shuffle-variable-128.ll
index 70b7fb16fc25..87fd4a7bf6b9 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-128.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-128.ll
@@ -42,8 +42,8 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1)
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
-; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
@@ -56,7 +56,7 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1)
; AVX-NEXT: andl $1, %esi
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
%x0 = extractelement <2 x i64> %x, i32 %i0
%x1 = extractelement <2 x i64> %x, i32 %i1
@@ -79,10 +79,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
; SSE2-NEXT: andl $3, %ecx
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
@@ -99,10 +99,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
@@ -164,10 +164,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
; SSE2-NEXT: andl $3, %ecx
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
@@ -184,10 +184,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
@@ -255,29 +255,29 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSE2-NEXT: andl $7, %r10d
; SSE2-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $7, %eax
-; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %r10d
; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %edi
-; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi
-; SSE2-NEXT: movd %r10d, %xmm0
-; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %edx
-; SSE2-NEXT: movd %edx, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
-; SSE2-NEXT: movd %edi, %xmm0
-; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %eax
; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: movd %ecx, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movzwl -24(%rsp,%r9,2), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %eax
; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
@@ -299,29 +299,29 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSSE3-NEXT: andl $7, %r10d
; SSSE3-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $7, %eax
-; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %r10d
; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi
-; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
-; SSSE3-NEXT: movd %r10d, %xmm0
-; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx
-; SSSE3-NEXT: movd %edx, %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
-; SSSE3-NEXT: movd %edi, %xmm0
-; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %eax
; SSSE3-NEXT: movd %eax, %xmm1
-; SSSE3-NEXT: movd %ecx, %xmm2
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSSE3-NEXT: movd %esi, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax
; SSSE3-NEXT: movd %eax, %xmm3
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
@@ -343,8 +343,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSE41-NEXT: andl $7, %r10d
; SSE41-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $7, %eax
-; SSE41-NEXT: movzwl -24(%rsp,%r10,2), %r10d
-; SSE41-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSE41-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0
@@ -352,8 +350,8 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm0
; SSE41-NEXT: pinsrw $4, -24(%rsp,%r8,2), %xmm0
; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm0
-; SSE41-NEXT: pinsrw $6, %r10d, %xmm0
-; SSE41-NEXT: pinsrw $7, %eax, %xmm0
+; SSE41-NEXT: pinsrw $6, -24(%rsp,%r10,2), %xmm0
+; SSE41-NEXT: pinsrw $7, -24(%rsp,%rax,2), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
@@ -375,8 +373,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; AVX-NEXT: andl $7, %r10d
; AVX-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $7, %eax
-; AVX-NEXT: movzwl -24(%rsp,%r10,2), %r10d
-; AVX-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; AVX-NEXT: vmovd %edi, %xmm0
; AVX-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
@@ -384,8 +380,8 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $6, %r10d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $6, -24(%rsp,%r10,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %x, i16 %i0
%x1 = extractelement <8 x i16> %x, i16 %i1
@@ -416,80 +412,80 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
-; SSE2-NEXT: andl $15, %r10d
-; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %r11
-; SSE2-NEXT: movzbl (%r10,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm15
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
+; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %r10
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm8
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm15
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $15, %eax
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm9
-; SSE2-NEXT: andl $15, %edx
-; SSE2-NEXT: movzbl (%rdx,%r11), %eax
+; SSE2-NEXT: andl $15, %ecx
+; SSE2-NEXT: movzbl (%rcx,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm10
-; SSE2-NEXT: andl $15, %edi
-; SSE2-NEXT: movzbl (%rdi,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: andl $15, %r9d
+; SSE2-NEXT: movzbl (%r9,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm7
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm11
-; SSE2-NEXT: andl $15, %r8d
-; SSE2-NEXT: movzbl (%r8,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm7
+; SSE2-NEXT: andl $15, %esi
+; SSE2-NEXT: movzbl (%rsi,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm6
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm12
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm12
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm5
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm13
-; SSE2-NEXT: andl $15, %ecx
-; SSE2-NEXT: movzbl (%rcx,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm6
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm4
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm14
-; SSE2-NEXT: andl $15, %esi
-; SSE2-NEXT: movzbl (%rsi,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm5
+; SSE2-NEXT: andl $15, %r8d
+; SSE2-NEXT: movzbl (%r8,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm4
-; SSE2-NEXT: andl $15, %r9d
-; SSE2-NEXT: movzbl (%r9,%r11), %eax
-; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: movzbl (%rax,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: andl $15, %edi
+; SSE2-NEXT: movzbl (%rdi,%r10), %eax
+; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
@@ -501,89 +497,84 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
-; SSSE3-NEXT: andl $15, %r10d
-; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %r11
-; SSSE3-NEXT: movzbl (%r10,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm15
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
+; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %r10
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm8
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm15
+; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm9
-; SSSE3-NEXT: andl $15, %edx
-; SSSE3-NEXT: movzbl (%rdx,%r11), %eax
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm10
-; SSSE3-NEXT: andl $15, %edi
-; SSSE3-NEXT: movzbl (%rdi,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: andl $15, %r9d
+; SSSE3-NEXT: movzbl (%r9,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm7
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm11
-; SSSE3-NEXT: andl $15, %r8d
-; SSSE3-NEXT: movzbl (%r8,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm7
+; SSSE3-NEXT: andl $15, %esi
+; SSSE3-NEXT: movzbl (%rsi,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm6
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm12
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm12
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm5
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm13
-; SSSE3-NEXT: andl $15, %ecx
-; SSSE3-NEXT: movzbl (%rcx,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm6
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm4
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm14
-; SSSE3-NEXT: andl $15, %esi
-; SSSE3-NEXT: movzbl (%rsi,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm5
+; SSSE3-NEXT: andl $15, %r8d
+; SSSE3-NEXT: movzbl (%r8,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm4
-; SSSE3-NEXT: andl $15, %r9d
-; SSSE3-NEXT: movzbl (%r9,%r11), %eax
-; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: movzbl (%rax,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: andl $15, %edi
+; SSSE3-NEXT: movzbl (%rdi,%r10), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # BB#0:
-; SSE41-NEXT: pushq %rbp
-; SSE41-NEXT: pushq %r15
-; SSE41-NEXT: pushq %r14
-; SSE41-NEXT: pushq %r12
-; SSE41-NEXT: pushq %rbx
; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
@@ -591,74 +582,54 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE41-NEXT: andl $15, %edi
-; SSE41-NEXT: andl $15, %esi
-; SSE41-NEXT: andl $15, %edx
-; SSE41-NEXT: andl $15, %ecx
-; SSE41-NEXT: andl $15, %r8d
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE41-NEXT: andl $15, %r9d
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
-; SSE41-NEXT: andl $15, %r10d
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
-; SSE41-NEXT: andl $15, %r11d
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
-; SSE41-NEXT: andl $15, %r14d
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
-; SSE41-NEXT: andl $15, %r15d
; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; SSE41-NEXT: movzbl (%rdi,%rax), %edi
; SSE41-NEXT: movd %edi, %xmm0
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %r12d
-; SSE41-NEXT: andl $15, %r12d
-; SSE41-NEXT: pinsrb $1, (%rsi,%rax), %xmm0
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
; SSE41-NEXT: andl $15, %esi
-; SSE41-NEXT: pinsrb $2, (%rdx,%rax), %xmm0
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
+; SSE41-NEXT: pinsrb $1, (%rsi,%rax), %xmm0
; SSE41-NEXT: andl $15, %edx
-; SSE41-NEXT: pinsrb $3, (%rcx,%rax), %xmm0
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: pinsrb $2, (%rdx,%rax), %xmm0
; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $3, (%rcx,%rax), %xmm0
+; SSE41-NEXT: andl $15, %r8d
; SSE41-NEXT: pinsrb $4, (%r8,%rax), %xmm0
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
-; SSE41-NEXT: andl $15, %ebx
+; SSE41-NEXT: andl $15, %r9d
; SSE41-NEXT: pinsrb $5, (%r9,%rax), %xmm0
-; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
-; SSE41-NEXT: andl $15, %edi
-; SSE41-NEXT: movzbl (%r10,%rax), %r8d
-; SSE41-NEXT: movzbl (%r11,%rax), %r9d
-; SSE41-NEXT: movzbl (%r14,%rax), %r10d
-; SSE41-NEXT: movzbl (%r15,%rax), %r11d
-; SSE41-NEXT: movzbl (%r12,%rax), %ebp
-; SSE41-NEXT: movzbl (%rsi,%rax), %esi
-; SSE41-NEXT: movzbl (%rdx,%rax), %edx
-; SSE41-NEXT: movzbl (%rcx,%rax), %ecx
-; SSE41-NEXT: movzbl (%rbx,%rax), %ebx
-; SSE41-NEXT: movzbl (%rdi,%rax), %eax
-; SSE41-NEXT: pinsrb $6, %r8d, %xmm0
-; SSE41-NEXT: pinsrb $7, %r9d, %xmm0
-; SSE41-NEXT: pinsrb $8, %r10d, %xmm0
-; SSE41-NEXT: pinsrb $9, %r11d, %xmm0
-; SSE41-NEXT: pinsrb $10, %ebp, %xmm0
-; SSE41-NEXT: pinsrb $11, %esi, %xmm0
-; SSE41-NEXT: pinsrb $12, %edx, %xmm0
-; SSE41-NEXT: pinsrb $13, %ecx, %xmm0
-; SSE41-NEXT: pinsrb $14, %ebx, %xmm0
-; SSE41-NEXT: pinsrb $15, %eax, %xmm0
-; SSE41-NEXT: popq %rbx
-; SSE41-NEXT: popq %r12
-; SSE41-NEXT: popq %r14
-; SSE41-NEXT: popq %r15
-; SSE41-NEXT: popq %rbp
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $6, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $7, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $8, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $9, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $10, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $11, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $12, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $13, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $14, (%rcx,%rax), %xmm0
+; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; SSE41-NEXT: andl $15, %ecx
+; SSE41-NEXT: pinsrb $15, (%rcx,%rax), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # BB#0:
-; AVX-NEXT: pushq %rbp
-; AVX-NEXT: pushq %r15
-; AVX-NEXT: pushq %r14
-; AVX-NEXT: pushq %r12
-; AVX-NEXT: pushq %rbx
; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
@@ -666,65 +637,50 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX-NEXT: andl $15, %edi
-; AVX-NEXT: andl $15, %esi
-; AVX-NEXT: andl $15, %edx
-; AVX-NEXT: andl $15, %ecx
-; AVX-NEXT: andl $15, %r8d
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX-NEXT: andl $15, %r9d
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
-; AVX-NEXT: andl $15, %r10d
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
-; AVX-NEXT: andl $15, %r11d
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
-; AVX-NEXT: andl $15, %r14d
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
-; AVX-NEXT: andl $15, %r15d
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; AVX-NEXT: movzbl (%rdi,%rax), %edi
; AVX-NEXT: vmovd %edi, %xmm0
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r12d
-; AVX-NEXT: andl $15, %r12d
-; AVX-NEXT: vpinsrb $1, (%rsi,%rax), %xmm0, %xmm0
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
; AVX-NEXT: andl $15, %esi
-; AVX-NEXT: vpinsrb $2, (%rdx,%rax), %xmm0, %xmm0
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
+; AVX-NEXT: vpinsrb $1, (%rsi,%rax), %xmm0, %xmm0
; AVX-NEXT: andl $15, %edx
-; AVX-NEXT: vpinsrb $3, (%rcx,%rax), %xmm0, %xmm0
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: vpinsrb $2, (%rdx,%rax), %xmm0, %xmm0
; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $3, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: andl $15, %r8d
; AVX-NEXT: vpinsrb $4, (%r8,%rax), %xmm0, %xmm0
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
-; AVX-NEXT: andl $15, %ebx
+; AVX-NEXT: andl $15, %r9d
; AVX-NEXT: vpinsrb $5, (%r9,%rax), %xmm0, %xmm0
-; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
-; AVX-NEXT: andl $15, %edi
-; AVX-NEXT: movzbl (%r10,%rax), %r8d
-; AVX-NEXT: movzbl (%r11,%rax), %r9d
-; AVX-NEXT: movzbl (%r14,%rax), %r10d
-; AVX-NEXT: movzbl (%r15,%rax), %r11d
-; AVX-NEXT: movzbl (%r12,%rax), %ebp
-; AVX-NEXT: movzbl (%rsi,%rax), %esi
-; AVX-NEXT: movzbl (%rdx,%rax), %edx
-; AVX-NEXT: movzbl (%rcx,%rax), %ecx
-; AVX-NEXT: movzbl (%rbx,%rax), %ebx
-; AVX-NEXT: movzbl (%rdi,%rax), %eax
-; AVX-NEXT: vpinsrb $6, %r8d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $7, %r9d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $10, %ebp, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $11, %esi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $14, %ebx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX-NEXT: popq %rbx
-; AVX-NEXT: popq %r12
-; AVX-NEXT: popq %r14
-; AVX-NEXT: popq %r15
-; AVX-NEXT: popq %rbp
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $6, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $7, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $8, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $9, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $10, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $11, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $12, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $13, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $14, (%rcx,%rax), %xmm0, %xmm0
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; AVX-NEXT: andl $15, %ecx
+; AVX-NEXT: vpinsrb $15, (%rcx,%rax), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %x, i8 %i0
%x1 = extractelement <16 x i8> %x, i8 %i1
@@ -779,11 +735,11 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
@@ -799,11 +755,11 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
@@ -862,341 +818,281 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8*
; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # BB#0:
; SSE2-NEXT: movzbl (%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl 15(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movzbl 8(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm8
-; SSE2-NEXT: movzbl 12(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm9
-; SSE2-NEXT: movzbl 4(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: movzbl 14(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm10
-; SSE2-NEXT: movzbl 6(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm5
-; SSE2-NEXT: movzbl 10(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm11
-; SSE2-NEXT: movzbl 2(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm7
-; SSE2-NEXT: movzbl 15(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm12
-; SSE2-NEXT: movzbl 7(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: movzbl 11(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm13
-; SSE2-NEXT: movzbl 3(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm6
-; SSE2-NEXT: movzbl 13(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm14
-; SSE2-NEXT: movzbl 5(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm4
-; SSE2-NEXT: movzbl 9(%rdi), %eax
-; SSE2-NEXT: andl $15, %eax
-; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm15
-; SSE2-NEXT: movzbl 1(%rdi), %eax
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm8
+; SSE2-NEXT: movzbl 7(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm15
+; SSE2-NEXT: movzbl 11(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm9
+; SSE2-NEXT: movzbl 3(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm3
+; SSE2-NEXT: movzbl 13(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm10
+; SSE2-NEXT: movzbl 5(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm7
+; SSE2-NEXT: movzbl 9(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm11
+; SSE2-NEXT: movzbl 1(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm6
+; SSE2-NEXT: movzbl 14(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm12
+; SSE2-NEXT: movzbl 6(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm5
+; SSE2-NEXT: movzbl 10(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm13
+; SSE2-NEXT: movzbl 2(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm4
+; SSE2-NEXT: movzbl 12(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm14
+; SSE2-NEXT: movzbl 4(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm1
+; SSE2-NEXT: movzbl 8(%rdi), %edx
+; SSE2-NEXT: andl $15, %edx
+; SSE2-NEXT: movzbl (%rdx,%rcx), %edx
+; SSE2-NEXT: movd %edx, %xmm2
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl (%rax,%rcx), %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # BB#0:
; SSSE3-NEXT: movzbl (%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl 15(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: movzbl 8(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm8
-; SSSE3-NEXT: movzbl 12(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm9
-; SSSE3-NEXT: movzbl 4(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm3
-; SSSE3-NEXT: movzbl 14(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm10
-; SSSE3-NEXT: movzbl 6(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm5
-; SSSE3-NEXT: movzbl 10(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm11
-; SSSE3-NEXT: movzbl 2(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm7
-; SSSE3-NEXT: movzbl 15(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm12
-; SSSE3-NEXT: movzbl 7(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm2
-; SSSE3-NEXT: movzbl 11(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm13
-; SSSE3-NEXT: movzbl 3(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm6
-; SSSE3-NEXT: movzbl 13(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm14
-; SSSE3-NEXT: movzbl 5(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm4
-; SSSE3-NEXT: movzbl 9(%rdi), %eax
-; SSSE3-NEXT: andl $15, %eax
-; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm15
-; SSSE3-NEXT: movzbl 1(%rdi), %eax
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm8
+; SSSE3-NEXT: movzbl 7(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm15
+; SSSE3-NEXT: movzbl 11(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm9
+; SSSE3-NEXT: movzbl 3(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm3
+; SSSE3-NEXT: movzbl 13(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm10
+; SSSE3-NEXT: movzbl 5(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm7
+; SSSE3-NEXT: movzbl 9(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm11
+; SSSE3-NEXT: movzbl 1(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm6
+; SSSE3-NEXT: movzbl 14(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm12
+; SSSE3-NEXT: movzbl 6(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm5
+; SSSE3-NEXT: movzbl 10(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm13
+; SSSE3-NEXT: movzbl 2(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm4
+; SSSE3-NEXT: movzbl 12(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm14
+; SSSE3-NEXT: movzbl 4(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm1
+; SSSE3-NEXT: movzbl 8(%rdi), %edx
+; SSSE3-NEXT: andl $15, %edx
+; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx
+; SSSE3-NEXT: movd %edx, %xmm2
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl (%rax,%rcx), %eax
-; SSSE3-NEXT: movd %eax, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # BB#0:
-; SSE41-NEXT: pushq %rbp
-; SSE41-NEXT: pushq %r15
-; SSE41-NEXT: pushq %r14
-; SSE41-NEXT: pushq %r13
-; SSE41-NEXT: pushq %r12
-; SSE41-NEXT: pushq %rbx
-; SSE41-NEXT: movzbl (%rdi), %r11d
-; SSE41-NEXT: andl $15, %r11d
+; SSE41-NEXT: movzbl (%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE41-NEXT: movzbl 1(%rdi), %r9d
-; SSE41-NEXT: andl $15, %r9d
+; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx
+; SSE41-NEXT: movzbl (%rax,%rcx), %eax
+; SSE41-NEXT: movd %eax, %xmm0
+; SSE41-NEXT: movzbl 1(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $1, (%rax,%rcx), %xmm0
; SSE41-NEXT: movzbl 2(%rdi), %eax
; SSE41-NEXT: andl $15, %eax
-; SSE41-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; SSE41-NEXT: pinsrb $2, (%rax,%rcx), %xmm0
; SSE41-NEXT: movzbl 3(%rdi), %eax
; SSE41-NEXT: andl $15, %eax
-; SSE41-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; SSE41-NEXT: movzbl 4(%rdi), %r14d
-; SSE41-NEXT: andl $15, %r14d
-; SSE41-NEXT: movzbl 5(%rdi), %r15d
-; SSE41-NEXT: andl $15, %r15d
-; SSE41-NEXT: movzbl 6(%rdi), %r12d
-; SSE41-NEXT: andl $15, %r12d
-; SSE41-NEXT: movzbl 7(%rdi), %r13d
-; SSE41-NEXT: andl $15, %r13d
-; SSE41-NEXT: movzbl 8(%rdi), %r8d
-; SSE41-NEXT: andl $15, %r8d
+; SSE41-NEXT: pinsrb $3, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 4(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $4, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 5(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $5, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 6(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $6, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 7(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $7, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 8(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $8, (%rax,%rcx), %xmm0
; SSE41-NEXT: movzbl 9(%rdi), %eax
; SSE41-NEXT: andl $15, %eax
-; SSE41-NEXT: movzbl 10(%rdi), %ecx
-; SSE41-NEXT: andl $15, %ecx
-; SSE41-NEXT: movzbl 11(%rdi), %edx
-; SSE41-NEXT: andl $15, %edx
-; SSE41-NEXT: movzbl 12(%rdi), %esi
-; SSE41-NEXT: andl $15, %esi
-; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp
-; SSE41-NEXT: movzbl (%r11,%rbp), %ebx
-; SSE41-NEXT: movd %ebx, %xmm0
-; SSE41-NEXT: movzbl 13(%rdi), %r11d
-; SSE41-NEXT: andl $15, %r11d
-; SSE41-NEXT: pinsrb $1, (%r9,%rbp), %xmm0
-; SSE41-NEXT: movzbl 14(%rdi), %ebx
-; SSE41-NEXT: andl $15, %ebx
-; SSE41-NEXT: movzbl 15(%rdi), %edi
-; SSE41-NEXT: andl $15, %edi
-; SSE41-NEXT: movzbl (%rdi,%rbp), %r10d
-; SSE41-NEXT: movzbl (%rbx,%rbp), %r9d
-; SSE41-NEXT: movzbl (%r11,%rbp), %r11d
-; SSE41-NEXT: movzbl (%rsi,%rbp), %esi
-; SSE41-NEXT: movzbl (%rdx,%rbp), %edx
-; SSE41-NEXT: movzbl (%rcx,%rbp), %ecx
-; SSE41-NEXT: movzbl (%rax,%rbp), %eax
-; SSE41-NEXT: movzbl (%r8,%rbp), %r8d
-; SSE41-NEXT: movzbl (%r13,%rbp), %r13d
-; SSE41-NEXT: movzbl (%r12,%rbp), %r12d
-; SSE41-NEXT: movzbl (%r15,%rbp), %r15d
-; SSE41-NEXT: movzbl (%r14,%rbp), %r14d
-; SSE41-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; SSE41-NEXT: movzbl (%rdi,%rbp), %edi
-; SSE41-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; SSE41-NEXT: movzbl (%rbx,%rbp), %ebp
-; SSE41-NEXT: pinsrb $2, %ebp, %xmm0
-; SSE41-NEXT: pinsrb $3, %edi, %xmm0
-; SSE41-NEXT: pinsrb $4, %r14d, %xmm0
-; SSE41-NEXT: pinsrb $5, %r15d, %xmm0
-; SSE41-NEXT: pinsrb $6, %r12d, %xmm0
-; SSE41-NEXT: pinsrb $7, %r13d, %xmm0
-; SSE41-NEXT: pinsrb $8, %r8d, %xmm0
-; SSE41-NEXT: pinsrb $9, %eax, %xmm0
-; SSE41-NEXT: pinsrb $10, %ecx, %xmm0
-; SSE41-NEXT: pinsrb $11, %edx, %xmm0
-; SSE41-NEXT: pinsrb $12, %esi, %xmm0
-; SSE41-NEXT: pinsrb $13, %r11d, %xmm0
-; SSE41-NEXT: pinsrb $14, %r9d, %xmm0
-; SSE41-NEXT: pinsrb $15, %r10d, %xmm0
-; SSE41-NEXT: popq %rbx
-; SSE41-NEXT: popq %r12
-; SSE41-NEXT: popq %r13
-; SSE41-NEXT: popq %r14
-; SSE41-NEXT: popq %r15
-; SSE41-NEXT: popq %rbp
+; SSE41-NEXT: pinsrb $9, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 10(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $10, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 11(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $11, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 12(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $12, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 13(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $13, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 14(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $14, (%rax,%rcx), %xmm0
+; SSE41-NEXT: movzbl 15(%rdi), %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: pinsrb $15, (%rax,%rcx), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # BB#0:
-; AVX-NEXT: pushq %rbp
-; AVX-NEXT: pushq %r15
-; AVX-NEXT: pushq %r14
-; AVX-NEXT: pushq %r13
-; AVX-NEXT: pushq %r12
-; AVX-NEXT: pushq %rbx
-; AVX-NEXT: movzbl (%rdi), %r11d
-; AVX-NEXT: andl $15, %r11d
+; AVX-NEXT: movzbl (%rdi), %eax
+; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX-NEXT: movzbl 1(%rdi), %r9d
-; AVX-NEXT: andl $15, %r9d
+; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT: movzbl (%rax,%rcx), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: movzbl 1(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $1, (%rax,%rcx), %xmm0, %xmm0
; AVX-NEXT: movzbl 2(%rdi), %eax
; AVX-NEXT: andl $15, %eax
-; AVX-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; AVX-NEXT: vpinsrb $2, (%rax,%rcx), %xmm0, %xmm0
; AVX-NEXT: movzbl 3(%rdi), %eax
; AVX-NEXT: andl $15, %eax
-; AVX-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX-NEXT: movzbl 4(%rdi), %r14d
-; AVX-NEXT: andl $15, %r14d
-; AVX-NEXT: movzbl 5(%rdi), %r15d
-; AVX-NEXT: andl $15, %r15d
-; AVX-NEXT: movzbl 6(%rdi), %r12d
-; AVX-NEXT: andl $15, %r12d
-; AVX-NEXT: movzbl 7(%rdi), %r13d
-; AVX-NEXT: andl $15, %r13d
-; AVX-NEXT: movzbl 8(%rdi), %r8d
-; AVX-NEXT: andl $15, %r8d
+; AVX-NEXT: vpinsrb $3, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 4(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $4, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 5(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $5, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 6(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $6, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 7(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $7, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 8(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $8, (%rax,%rcx), %xmm0, %xmm0
; AVX-NEXT: movzbl 9(%rdi), %eax
; AVX-NEXT: andl $15, %eax
-; AVX-NEXT: movzbl 10(%rdi), %ecx
-; AVX-NEXT: andl $15, %ecx
-; AVX-NEXT: movzbl 11(%rdi), %edx
-; AVX-NEXT: andl $15, %edx
-; AVX-NEXT: movzbl 12(%rdi), %esi
-; AVX-NEXT: andl $15, %esi
-; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp
-; AVX-NEXT: movzbl (%r11,%rbp), %ebx
-; AVX-NEXT: vmovd %ebx, %xmm0
-; AVX-NEXT: movzbl 13(%rdi), %r11d
-; AVX-NEXT: andl $15, %r11d
-; AVX-NEXT: vpinsrb $1, (%r9,%rbp), %xmm0, %xmm0
-; AVX-NEXT: movzbl 14(%rdi), %ebx
-; AVX-NEXT: andl $15, %ebx
-; AVX-NEXT: movzbl 15(%rdi), %edi
-; AVX-NEXT: andl $15, %edi
-; AVX-NEXT: movzbl (%rdi,%rbp), %r10d
-; AVX-NEXT: movzbl (%rbx,%rbp), %r9d
-; AVX-NEXT: movzbl (%r11,%rbp), %r11d
-; AVX-NEXT: movzbl (%rsi,%rbp), %esi
-; AVX-NEXT: movzbl (%rdx,%rbp), %edx
-; AVX-NEXT: movzbl (%rcx,%rbp), %ecx
-; AVX-NEXT: movzbl (%rax,%rbp), %eax
-; AVX-NEXT: movzbl (%r8,%rbp), %r8d
-; AVX-NEXT: movzbl (%r13,%rbp), %r13d
-; AVX-NEXT: movzbl (%r12,%rbp), %r12d
-; AVX-NEXT: movzbl (%r15,%rbp), %r15d
-; AVX-NEXT: movzbl (%r14,%rbp), %r14d
-; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; AVX-NEXT: movzbl (%rdi,%rbp), %edi
-; AVX-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; AVX-NEXT: movzbl (%rbx,%rbp), %ebp
-; AVX-NEXT: vpinsrb $2, %ebp, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $3, %edi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $4, %r14d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $5, %r15d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $6, %r12d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $7, %r13d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $8, %r8d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $11, %edx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $13, %r11d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $14, %r9d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $15, %r10d, %xmm0, %xmm0
-; AVX-NEXT: popq %rbx
-; AVX-NEXT: popq %r12
-; AVX-NEXT: popq %r13
-; AVX-NEXT: popq %r14
-; AVX-NEXT: popq %r15
-; AVX-NEXT: popq %rbp
+; AVX-NEXT: vpinsrb $9, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 10(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $10, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 11(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $11, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 12(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $12, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 13(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $13, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 14(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $14, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: movzbl 15(%rdi), %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $15, (%rax,%rcx), %xmm0, %xmm0
; AVX-NEXT: retq
%p0 = getelementptr inbounds i8, i8* %i, i64 0
%p1 = getelementptr inbounds i8, i8* %i, i64 1
@@ -1329,29 +1225,28 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
; SSE2-NEXT: andl $7, %ecx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $7, %r8d
-; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $7, %r9d
-; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax
-; SSE2-NEXT: xorl %esi, %esi
-; SSE2-NEXT: movd %esi, %xmm0
-; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: movzwl -24(%rsp,%r9,2), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax
; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: movzwl -40(%rsp,%rdx,2), %eax
+; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: movzwl -40(%rsp,%r8,2), %eax
+; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movzwl -40(%rsp,%rdi,2), %eax
-; SSE2-NEXT: movzwl -40(%rsp,%rdx,2), %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movzwl -40(%rsp,%r8,2), %eax
-; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
@@ -1368,29 +1263,28 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
; SSSE3-NEXT: andl $7, %ecx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $7, %r8d
-; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $7, %r9d
-; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax
-; SSSE3-NEXT: xorl %esi, %esi
-; SSSE3-NEXT: movd %esi, %xmm0
-; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
-; SSSE3-NEXT: movd %ecx, %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax
; SSSE3-NEXT: movd %eax, %xmm3
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSSE3-NEXT: movzwl -40(%rsp,%rdx,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: movzwl -40(%rsp,%r8,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movzwl -40(%rsp,%rdi,2), %eax
-; SSSE3-NEXT: movzwl -40(%rsp,%rdx,2), %ecx
-; SSSE3-NEXT: movd %ecx, %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: movzwl -40(%rsp,%r8,2), %eax
-; SSSE3-NEXT: movd %eax, %xmm3
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
diff --git a/test/CodeGen/X86/vector-shuffle-variable-256.ll b/test/CodeGen/X86/vector-shuffle-variable-256.ll
index 42b3c11d3d6b..b076bc993ef8 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-256.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-256.ll
@@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
@@ -13,16 +14,16 @@ define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0,
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
; ALL-NEXT: subq $64, %rsp
-; ALL-NEXT: andl $3, %ecx
-; ALL-NEXT: andl $3, %edx
; ALL-NEXT: andl $3, %esi
; ALL-NEXT: andl $3, %edi
+; ALL-NEXT: andl $3, %ecx
+; ALL-NEXT: andl $3, %edx
; ALL-NEXT: vmovaps %ymm0, (%rsp)
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: movq %rbp, %rsp
; ALL-NEXT: popq %rbp
; ALL-NEXT: retq
@@ -68,16 +69,16 @@ define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0,
define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4f64_v2f64_xxxx_i64:
; ALL: # BB#0:
-; ALL-NEXT: andl $1, %ecx
-; ALL-NEXT: andl $1, %edx
; ALL-NEXT: andl $1, %esi
; ALL-NEXT: andl $1, %edi
+; ALL-NEXT: andl $1, %ecx
+; ALL-NEXT: andl $1, %edx
; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: retq
%x0 = extractelement <2 x double> %x, i64 %i0
%x1 = extractelement <2 x double> %x, i64 %i1
@@ -97,18 +98,18 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: andl $3, %ecx
-; AVX1-NEXT: andl $3, %edx
-; AVX1-NEXT: andl $3, %esi
; AVX1-NEXT: andl $3, %edi
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: andl $3, %ecx
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
@@ -119,18 +120,18 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: andl $3, %ecx
-; AVX2-NEXT: andl $3, %edx
-; AVX2-NEXT: andl $3, %esi
; AVX2-NEXT: andl $3, %edi
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: andl $3, %ecx
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
@@ -152,12 +153,12 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: andl $3, %esi
; AVX1-NEXT: andl $3, %edi
+; AVX1-NEXT: andl $3, %esi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
@@ -170,12 +171,12 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: andl $3, %esi
; AVX2-NEXT: andl $3, %edi
+; AVX2-NEXT: andl $3, %esi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: movq %rbp, %rsp
@@ -195,34 +196,34 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; AVX1-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
; AVX1: # BB#0:
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: andl $1, %esi
; AVX1-NEXT: andl $1, %edi
+; AVX1-NEXT: andl $1, %esi
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: andl $1, %ecx
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
; AVX2: # BB#0:
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: andl $1, %esi
; AVX2-NEXT: andl $1, %edi
+; AVX2-NEXT: andl $1, %esi
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: andl $1, %ecx
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%x0 = extractelement <2 x i64> %x, i64 %i0
%x1 = extractelement <2 x i64> %x, i64 %i1
@@ -236,70 +237,41 @@ define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i
}
define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
-; AVX1-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
-; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; AVX1-NEXT: andl $7, %edi
-; AVX1-NEXT: andl $7, %esi
-; AVX1-NEXT: andl $7, %edx
-; AVX1-NEXT: andl $7, %ecx
-; AVX1-NEXT: andl $7, %r8d
-; AVX1-NEXT: vmovaps %ymm0, (%rsp)
-; AVX1-NEXT: andl $7, %r9d
-; AVX1-NEXT: movl 16(%rbp), %r10d
-; AVX1-NEXT: andl $7, %r10d
-; AVX1-NEXT: movl 24(%rbp), %eax
-; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
-; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: movq %rbp, %rsp
-; AVX1-NEXT: popq %rbp
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovd %edi, %xmm1
-; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vmovd %esi, %xmm2
-; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm2
-; AVX2-NEXT: vmovd %edx, %xmm3
-; AVX2-NEXT: vpermps %ymm0, %ymm3, %ymm3
-; AVX2-NEXT: vmovd %ecx, %xmm4
-; AVX2-NEXT: vpermps %ymm0, %ymm4, %ymm4
-; AVX2-NEXT: vmovd %r8d, %xmm5
-; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm5
-; AVX2-NEXT: vmovd %r9d, %xmm6
-; AVX2-NEXT: vpermps %ymm0, %ymm6, %ymm6
-; AVX2-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
-; AVX2-NEXT: vpermps %ymm0, %ymm7, %ymm7
-; AVX2-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
-; AVX2-NEXT: vpermps %ymm0, %ymm8, %ymm0
-; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm5[0,1,2],xmm0[0]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
-; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: retq
+; ALL-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
+; ALL: # BB#0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: movq %rsp, %rbp
+; ALL-NEXT: andq $-32, %rsp
+; ALL-NEXT: subq $64, %rsp
+; ALL-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; ALL-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; ALL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; ALL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ALL-NEXT: andl $7, %edi
+; ALL-NEXT: andl $7, %esi
+; ALL-NEXT: andl $7, %edx
+; ALL-NEXT: andl $7, %ecx
+; ALL-NEXT: andl $7, %r8d
+; ALL-NEXT: vmovaps %ymm0, (%rsp)
+; ALL-NEXT: andl $7, %r9d
+; ALL-NEXT: movl 16(%rbp), %r10d
+; ALL-NEXT: andl $7, %r10d
+; ALL-NEXT: movl 24(%rbp), %eax
+; ALL-NEXT: andl $7, %eax
+; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; ALL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0]
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT: movq %rbp, %rsp
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
%x0 = extractelement <8 x float> %x, i32 %i0
%x1 = extractelement <8 x float> %x, i32 %i1
%x2 = extractelement <8 x float> %x, i32 %i2
@@ -340,16 +312,14 @@ define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0
; ALL-NEXT: movl {{[0-9]+}}(%rsp), %eax
; ALL-NEXT: andl $3, %eax
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; ALL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; ALL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
-; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
-; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
-; ALL-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; ALL-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
-; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3]
-; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0]
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; ALL-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 %i0
%x1 = extractelement <4 x float> %x, i32 %i1
@@ -390,32 +360,25 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: movl 40(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl 48(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl 56(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl 64(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl 72(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl 80(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl 88(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: andl $15, %edi
; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax
; AVX1-NEXT: vmovd %eax, %xmm1
@@ -431,12 +394,10 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX1-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm1, %xmm1
; AVX1-NEXT: movl 16(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1
; AVX1-NEXT: movl 24(%rbp), %eax
; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
@@ -461,32 +422,25 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: movl 40(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl 48(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl 56(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl 64(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl 72(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl 80(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl 88(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: andl $15, %edi
; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
@@ -502,12 +456,10 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX2-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm1, %xmm1
; AVX2-NEXT: movl 16(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1
; AVX2-NEXT: movl 24(%rbp), %eax
; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
@@ -563,32 +515,25 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: andl $7, %edi
; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; AVX1-NEXT: vmovd %eax, %xmm1
@@ -604,12 +549,10 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm1, %xmm1
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1
; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: andl $7, %eax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -628,32 +571,25 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: andl $7, %edi
; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
@@ -669,12 +605,10 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm1, %xmm1
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1
; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: andl $7, %eax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%x0 = extractelement <8 x i16> %x, i32 %i0
@@ -734,11 +668,11 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
@@ -760,11 +694,11 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
@@ -801,11 +735,11 @@ define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwi
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
@@ -821,11 +755,11 @@ define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwi
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%p0 = getelementptr inbounds i64, i64* %i, i32 0
%p1 = getelementptr inbounds i64, i64* %i, i32 1
diff --git a/test/CodeGen/X86/vector-sqrt.ll b/test/CodeGen/X86/vector-sqrt.ll
index 03efa54a0639..c5ac4466b5fa 100644
--- a/test/CodeGen/X86/vector-sqrt.ll
+++ b/test/CodeGen/X86/vector-sqrt.ll
@@ -1,13 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
; Function Attrs: nounwind readonly uwtable
define <2 x double> @sqrtd2(double* nocapture readonly %v) local_unnamed_addr #0 {
; CHECK-LABEL: sqrtd2:
-; CHECK: vsqrtsd (%rdi), %xmm0, %xmm0
-; CHECK-NEXT: vsqrtsd 8(%rdi), %xmm1, %xmm1
-; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; CHECK-NEXT: retq
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0
+; CHECK-NEXT: vsqrtsd 8(%rdi), %xmm1, %xmm1
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
entry:
%0 = load double, double* %v, align 8
%call = tail call double @sqrt(double %0) #2
@@ -25,14 +26,15 @@ declare double @sqrt(double) local_unnamed_addr #1
; Function Attrs: nounwind readonly uwtable
define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 {
; CHECK-LABEL: sqrtf4:
-; CHECK: vsqrtss (%rdi), %xmm0, %xmm0
-; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1
-; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2
-; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
-; CHECK-NEXT: retq
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
+; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1
+; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2
+; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; CHECK-NEXT: retq
entry:
%0 = load float, float* %v, align 4
%call = tail call float @sqrtf(float %0) #2
diff --git a/test/CodeGen/X86/vector-trunc-math.ll b/test/CodeGen/X86/vector-trunc-math.ll
index f828ed0ba6e7..ab34ad6a613c 100644
--- a/test/CodeGen/X86/vector-trunc-math.ll
+++ b/test/CodeGen/X86/vector-trunc-math.ll
@@ -42,6 +42,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <4 x i64> %a0, %a1
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -99,7 +100,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -109,6 +110,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i64> %a0, %a1
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -143,7 +145,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-LABEL: trunc_add_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -154,6 +156,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i32> %a0, %a1
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -237,7 +240,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -262,6 +265,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_add_v16i64_v16i8:
@@ -272,6 +276,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_add_v16i64_v16i8:
@@ -282,6 +287,7 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = add <16 x i64> %a0, %a1
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -330,7 +336,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2: # BB#0:
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -346,6 +352,7 @@ define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX512: # BB#0:
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <16 x i32> %a0, %a1
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -392,6 +399,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_add_v16i16_v16i8:
@@ -399,6 +407,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_add_v16i16_v16i8:
@@ -406,12 +415,62 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512DQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = add <16 x i16> %a0, %a1
%2 = trunc <16 x i16> %1 to <16 x i8>
ret <16 x i8> %2
}
+define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
+; SSE-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; SSE: # BB#0:
+; SSE-NEXT: pslld $16, %xmm2
+; SSE-NEXT: psrad $16, %xmm2
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: packssdw %xmm2, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: psraw $8, %xmm0
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = sext <8 x i8> %1 to <8 x i32>
+ %3 = add <8 x i32> %2, %a1
+ %4 = trunc <8 x i32> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
;
; add to constant
;
@@ -444,6 +503,7 @@ define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -491,7 +551,7 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -501,6 +561,7 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -531,7 +592,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_add_const_v8i32_v8i16:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -542,6 +603,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -573,22 +635,22 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-LABEL: trunc_add_const_v16i64_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -603,7 +665,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -628,6 +690,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_add_const_v16i64_v16i8:
@@ -637,6 +700,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_add_const_v16i64_v16i8:
@@ -646,6 +710,7 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = add <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -669,13 +734,13 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-LABEL: trunc_add_const_v16i32_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
@@ -684,7 +749,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_add_const_v16i32_v16i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -701,6 +766,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -744,6 +810,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
@@ -751,6 +818,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_add_const_v16i16_v16i8:
@@ -758,6 +826,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = add <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -800,6 +869,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, %a1
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -857,7 +927,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -867,6 +937,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i64> %a0, %a1
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -901,7 +972,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-LABEL: trunc_sub_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -912,6 +983,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, %a1
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -995,7 +1067,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1020,6 +1092,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_v16i64_v16i8:
@@ -1030,6 +1103,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_v16i64_v16i8:
@@ -1040,6 +1114,7 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = sub <16 x i64> %a0, %a1
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -1088,7 +1163,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2: # BB#0:
; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1104,6 +1179,7 @@ define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX512: # BB#0:
; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <16 x i32> %a0, %a1
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -1150,6 +1226,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_v16i16_v16i8:
@@ -1157,6 +1234,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_v16i16_v16i8:
@@ -1164,6 +1242,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512DQ-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = sub <16 x i16> %a0, %a1
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -1211,6 +1290,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -1272,7 +1352,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -1282,6 +1362,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -1315,7 +1396,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -1326,6 +1407,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -1411,7 +1493,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1436,6 +1518,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_const_v16i64_v16i8:
@@ -1446,6 +1529,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_const_v16i64_v16i8:
@@ -1456,6 +1540,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = sub <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -1502,7 +1587,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -1518,6 +1603,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -1563,6 +1649,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_const_v16i16_v16i8:
@@ -1570,6 +1657,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_const_v16i16_v16i8:
@@ -1577,6 +1665,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512DQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = sub <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -1640,6 +1729,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
@@ -1649,6 +1739,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
@@ -1658,6 +1749,7 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <4 x i64> %a0, %a1
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -1725,7 +1817,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
@@ -1744,6 +1836,7 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512F-NEXT: vpmovqw %zmm1, %xmm1
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
; AVX512F-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v8i64_v8i16:
@@ -1751,12 +1844,14 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512BW-NEXT: vpmovqw %zmm1, %xmm1
; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v8i64_v8i16:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <8 x i64> %a0, %a1
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -1803,7 +1898,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-LABEL: trunc_mul_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -1814,6 +1909,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i32> %a0, %a1
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -2035,7 +2131,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpmulld %xmm6, %xmm2, %xmm2
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2068,6 +2164,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512F-NEXT: vpmulld %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v16i64_v16i8:
@@ -2080,6 +2177,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512BW-NEXT: vpmulld %ymm2, %ymm0, %ymm0
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v16i64_v16i8:
@@ -2090,6 +2188,7 @@ define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <16 x i64> %a0, %a1
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -2162,7 +2261,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2: # BB#0:
; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2178,6 +2277,7 @@ define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX512: # BB#0:
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <16 x i32> %a0, %a1
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -2224,6 +2324,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_v16i16_v16i8:
@@ -2231,6 +2332,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_v16i16_v16i8:
@@ -2238,12 +2340,62 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <16 x i16> %a0, %a1
%2 = trunc <16 x i16> %1 to <16 x i8>
ret <16 x i8> %2
}
+define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
+; SSE-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; SSE: # BB#0:
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: pslld $16, %xmm2
+; SSE-NEXT: psrad $16, %xmm2
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: packssdw %xmm2, %xmm1
+; SSE-NEXT: pmullw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = mul <8 x i32> %2, %a1
+ %4 = trunc <8 x i32> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
;
; mul to constant
;
@@ -2291,6 +2443,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -2338,7 +2491,7 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -2348,6 +2501,7 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -2378,7 +2532,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_mul_const_v8i32_v8i16:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -2389,6 +2543,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -2558,7 +2713,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -2585,6 +2740,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512F-NEXT: vpmulld {{.*}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_const_v16i64_v16i8:
@@ -2595,6 +2751,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512BW-NEXT: vpmulld {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_const_v16i64_v16i8:
@@ -2605,6 +2762,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512DQ-NEXT: vpmulld {{.*}}(%rip), %ymm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -2677,7 +2835,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_mul_const_v16i32_v16i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
@@ -2695,6 +2853,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmulld {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -2740,6 +2899,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_mul_const_v16i16_v16i8:
@@ -2747,6 +2907,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_mul_const_v16i16_v16i8:
@@ -2754,6 +2915,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -2794,6 +2956,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <4 x i64> %a0, %a1
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -2847,7 +3010,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -2857,6 +3020,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i64> %a0, %a1
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -2889,7 +3053,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-LABEL: trunc_and_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -2900,6 +3064,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i32> %a0, %a1
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -2975,7 +3140,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3000,6 +3165,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_and_v16i64_v16i8:
@@ -3010,6 +3176,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_and_v16i64_v16i8:
@@ -3020,6 +3187,7 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = and <16 x i64> %a0, %a1
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -3064,7 +3232,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2: # BB#0:
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3080,6 +3248,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX512: # BB#0:
; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <16 x i32> %a0, %a1
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -3124,6 +3293,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_and_v16i16_v16i8:
@@ -3131,6 +3301,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_and_v16i16_v16i8:
@@ -3138,6 +3309,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = and <16 x i16> %a0, %a1
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -3176,6 +3348,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -3223,7 +3396,7 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -3233,6 +3406,7 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -3263,7 +3437,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_and_const_v8i32_v8i16:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -3274,6 +3448,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -3305,22 +3480,22 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-LABEL: trunc_and_const_v16i64_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -3335,7 +3510,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3360,6 +3535,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_and_const_v16i64_v16i8:
@@ -3369,6 +3545,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_and_const_v16i64_v16i8:
@@ -3378,6 +3555,7 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = and <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -3401,13 +3579,13 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-LABEL: trunc_and_const_v16i32_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@@ -3416,7 +3594,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_and_const_v16i32_v16i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3433,6 +3611,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -3476,6 +3655,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
@@ -3483,6 +3663,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_and_const_v16i16_v16i8:
@@ -3490,6 +3671,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = and <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -3530,6 +3712,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <4 x i64> %a0, %a1
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -3583,7 +3766,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -3593,6 +3776,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i64> %a0, %a1
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -3625,7 +3809,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-LABEL: trunc_xor_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -3636,6 +3820,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i32> %a0, %a1
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -3711,7 +3896,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3736,6 +3921,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_xor_v16i64_v16i8:
@@ -3746,6 +3932,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_xor_v16i64_v16i8:
@@ -3756,6 +3943,7 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = xor <16 x i64> %a0, %a1
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -3800,7 +3988,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -3816,6 +4004,7 @@ define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
; AVX512: # BB#0:
; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <16 x i32> %a0, %a1
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -3860,6 +4049,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512F-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_xor_v16i16_v16i8:
@@ -3867,6 +4057,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_xor_v16i16_v16i8:
@@ -3874,6 +4065,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512DQ-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = xor <16 x i16> %a0, %a1
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -3912,6 +4104,7 @@ define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -3959,7 +4152,7 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -3969,6 +4162,7 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -3999,7 +4193,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_xor_const_v8i32_v8i16:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -4010,6 +4204,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -4041,22 +4236,22 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-LABEL: trunc_xor_const_v16i64_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -4071,7 +4266,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4096,6 +4291,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_xor_const_v16i64_v16i8:
@@ -4105,6 +4301,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_xor_const_v16i64_v16i8:
@@ -4114,6 +4311,7 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = xor <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -4137,13 +4335,13 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-LABEL: trunc_xor_const_v16i32_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -4152,7 +4350,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_xor_const_v16i32_v16i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4169,6 +4367,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -4212,6 +4411,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
@@ -4219,6 +4419,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_xor_const_v16i16_v16i8:
@@ -4226,6 +4427,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = xor <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -4266,6 +4468,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <4 x i64> %a0, %a1
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -4319,7 +4522,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -4329,6 +4532,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i64> %a0, %a1
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -4361,7 +4565,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-LABEL: trunc_or_v8i32_v8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -4372,6 +4576,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i32> %a0, %a1
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -4447,7 +4652,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4472,6 +4677,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_or_v16i64_v16i8:
@@ -4482,6 +4688,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_or_v16i64_v16i8:
@@ -4492,6 +4699,7 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = or <16 x i64> %a0, %a1
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -4536,7 +4744,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind
; AVX2: # BB#0:
; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4552,6 +4760,7 @@ define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind
; AVX512: # BB#0:
; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <16 x i32> %a0, %a1
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -4596,6 +4805,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_or_v16i16_v16i8:
@@ -4603,6 +4813,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_or_v16i16_v16i8:
@@ -4610,6 +4821,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = or <16 x i16> %a0, %a1
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -4648,6 +4860,7 @@ define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
%2 = trunc <4 x i64> %1 to <4 x i32>
@@ -4695,7 +4908,7 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -4705,6 +4918,7 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
%2 = trunc <8 x i64> %1 to <8 x i16>
@@ -4735,7 +4949,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_or_const_v8i32_v8i16:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
@@ -4746,6 +4960,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = trunc <8 x i32> %1 to <8 x i16>
@@ -4777,22 +4992,22 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX1-LABEL: trunc_or_const_v16i64_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
@@ -4807,7 +5022,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4832,6 +5047,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_or_const_v16i64_v16i8:
@@ -4841,6 +5057,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_or_const_v16i64_v16i8:
@@ -4850,6 +5067,7 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = or <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
%2 = trunc <16 x i64> %1 to <16 x i8>
@@ -4873,13 +5091,13 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX1-LABEL: trunc_or_const_v16i32_v16i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@@ -4888,7 +5106,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
;
; AVX2-LABEL: trunc_or_const_v16i32_v16i8:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -4905,6 +5123,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX512: # BB#0:
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = trunc <16 x i32> %1 to <16 x i8>
@@ -4948,6 +5167,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
@@ -4955,6 +5175,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_or_const_v16i16_v16i8:
@@ -4962,6 +5183,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = or <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%2 = trunc <16 x i16> %1 to <16 x i8>
@@ -5004,38 +5226,11 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
-; AVX1-LABEL: mul_add_const_v4i64_v4i32:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: mul_add_const_v4i64_v4i32:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: mul_add_const_v4i64_v4i32:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: mul_add_const_v4i64_v4i32:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BW-NEXT: retq
-;
-; AVX512DQ-LABEL: mul_add_const_v4i64_v4i32:
-; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512DQ-NEXT: retq
+; AVX-LABEL: mul_add_const_v4i64_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = sext <4 x i32> %a0 to <4 x i64>
%2 = sext <4 x i32> %a1 to <4 x i64>
%3 = mul <4 x i64> %1, %2
@@ -5086,38 +5281,11 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX1-LABEL: mul_add_self_v4i64_v4i32:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: mul_add_self_v4i64_v4i32:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: mul_add_self_v4i64_v4i32:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: mul_add_self_v4i64_v4i32:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX512BW-NEXT: retq
-;
-; AVX512DQ-LABEL: mul_add_self_v4i64_v4i32:
-; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX512DQ-NEXT: retq
+; AVX-LABEL: mul_add_self_v4i64_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = sext <4 x i32> %a0 to <4 x i64>
%2 = sext <4 x i32> %a1 to <4 x i64>
%3 = mul <4 x i64> %1, %2
@@ -5129,102 +5297,39 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm3, %xmm4
-; SSE-NEXT: paddq %xmm2, %xmm4
-; SSE-NEXT: psllq $32, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: psrlq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm1, %xmm6
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pmuludq %xmm3, %xmm6
+; SSE-NEXT: paddq %xmm5, %xmm6
+; SSE-NEXT: psllq $32, %xmm6
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: paddq %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: pmuludq %xmm0, %xmm1
-; SSE-NEXT: paddq %xmm6, %xmm1
-; SSE-NEXT: psllq $32, %xmm1
-; SSE-NEXT: paddq %xmm0, %xmm1
-; SSE-NEXT: paddq %xmm1, %xmm2
-; SSE-NEXT: paddq %xmm3, %xmm4
-; SSE-NEXT: paddq %xmm5, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: pmuludq %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm2, %xmm5
+; SSE-NEXT: paddq %xmm1, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: paddq %xmm5, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: retq
;
-; AVX1-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vpmuldq %xmm3, %xmm2, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[0,2]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX512F: # BB#0:
-; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512F-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX512F-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
-; AVX512F-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512BW-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX512BW-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
-; AVX512BW-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512BW-NEXT: retq
-;
-; AVX512DQ-LABEL: mul_add_multiuse_v4i64_v4i32:
-; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm1
-; AVX512DQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512DQ-NEXT: retq
+; AVX-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = sext <4 x i32> %a0 to <4 x i64>
%2 = sext <4 x i32> %a1 to <4 x i64>
%3 = mul <4 x i64> %1, %2
diff --git a/test/CodeGen/X86/vector-trunc.ll b/test/CodeGen/X86/vector-trunc.ll
index 2571a21ce218..d39a90b066f5 100644
--- a/test/CodeGen/X86/vector-trunc.ll
+++ b/test/CodeGen/X86/vector-trunc.ll
@@ -111,7 +111,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -120,6 +120,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; AVX512-LABEL: trunc8i64_8i16:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
%0 = trunc <8 x i64> %a to <8 x i16>
@@ -144,13 +145,13 @@ define void @trunc8i64_8i8(<8 x i64> %a) {
; AVX1-LABEL: trunc8i64_8i8:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
@@ -175,6 +176,7 @@ define void @trunc8i64_8i8(<8 x i64> %a) {
; AVX512-LABEL: trunc8i64_8i8:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpmovqb %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
%0 = trunc <8 x i64> %a to <8 x i8>
@@ -220,7 +222,7 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
;
; AVX2-LABEL: trunc8i32_8i16:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
@@ -231,11 +233,13 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i32_8i16:
; AVX512VL: # BB#0: # %entry
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i32_8i16:
@@ -243,11 +247,13 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i32_8i16:
; AVX512BWVL: # BB#0: # %entry
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
entry:
%0 = trunc <8 x i32> %a to <8 x i16>
@@ -296,7 +302,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
;
; AVX2-LABEL: trunc8i32_8i8:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vmovq %xmm0, (%rax)
@@ -309,11 +315,13 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc8i32_8i8:
; AVX512VL: # BB#0: # %entry
; AVX512VL-NEXT: vpmovdb %ymm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc8i32_8i8:
@@ -322,11 +330,13 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc8i32_8i8:
; AVX512BWVL: # BB#0: # %entry
; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
entry:
%0 = trunc <8 x i32> %a to <8 x i8>
@@ -398,7 +408,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
;
; AVX2-LABEL: trunc16i32_16i16:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
@@ -411,6 +421,7 @@ define void @trunc16i32_16i16(<16 x i32> %a) {
; AVX512-LABEL: trunc16i32_16i16:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
%0 = trunc <16 x i32> %a to <16 x i16>
@@ -435,13 +446,13 @@ define void @trunc16i32_16i8(<16 x i32> %a) {
; AVX1-LABEL: trunc16i32_16i8:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
@@ -450,7 +461,7 @@ define void @trunc16i32_16i8(<16 x i32> %a) {
;
; AVX2-LABEL: trunc16i32_16i8:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@@ -466,6 +477,7 @@ define void @trunc16i32_16i8(<16 x i32> %a) {
; AVX512-LABEL: trunc16i32_16i8:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
entry:
%0 = trunc <16 x i32> %a to <16 x i8>
@@ -529,6 +541,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc16i16_16i8:
@@ -536,6 +549,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX512VL-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
; AVX512VL-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc16i16_16i8:
@@ -543,11 +557,13 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc16i16_16i8:
; AVX512BWVL: # BB#0: # %entry
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
entry:
%0 = trunc <16 x i16> %a to <16 x i8>
@@ -635,6 +651,7 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc32i16_32i8:
@@ -645,16 +662,19 @@ define void @trunc32i16_32i8(<32 x i16> %a) {
; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc32i16_32i8:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpmovwb %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc32i16_32i8:
; AVX512BWVL: # BB#0: # %entry
; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
entry:
%0 = trunc <32 x i16> %a to <32 x i8>
@@ -810,6 +830,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: trunc2x4i64_8i16:
@@ -823,6 +844,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: trunc2x4i64_8i16:
@@ -835,6 +857,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc2x4i64_8i16:
@@ -848,6 +871,7 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
entry:
%0 = trunc <4 x i64> %a to <4 x i16>
diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll
index bf32e672138c..56f634c4188f 100644
--- a/test/CodeGen/X86/vector-tzcnt-128.ll
+++ b/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -281,6 +281,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm1 = [63,63]
; AVX512CD-NEXT: vpsubq %xmm0, %xmm1, %xmm0
+; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64u:
@@ -696,6 +697,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm0
+; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32u:
@@ -1258,23 +1260,8 @@ define <2 x i64> @foldv2i64() nounwind {
;
; X32-SSE-LABEL: foldv2i64:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: psubq %xmm0, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: psubq {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE-NEXT: pand %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: pshufb %xmm4, %xmm5
-; X32-SSE-NEXT: psrlw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm3, %xmm2
-; X32-SSE-NEXT: pshufb %xmm2, %xmm0
-; X32-SSE-NEXT: paddb %xmm5, %xmm0
-; X32-SSE-NEXT: psadbw %xmm1, %xmm0
+; X32-SSE-NEXT: movl $8, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
ret <2 x i64> %out
@@ -1295,23 +1282,8 @@ define <2 x i64> @foldv2i64u() nounwind {
;
; X32-SSE-LABEL: foldv2i64u:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: psubq %xmm0, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: psubq {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE-NEXT: pand %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: pshufb %xmm4, %xmm5
-; X32-SSE-NEXT: psrlw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm3, %xmm2
-; X32-SSE-NEXT: pshufb %xmm2, %xmm0
-; X32-SSE-NEXT: paddb %xmm5, %xmm0
-; X32-SSE-NEXT: psadbw %xmm1, %xmm0
+; X32-SSE-NEXT: movl $8, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
ret <2 x i64> %out
diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll
index 0ced0c5b263f..a0b277ddd732 100644
--- a/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -871,20 +871,7 @@ define <4 x i64> @foldv4i64() nounwind {
;
; X32-AVX-LABEL: foldv4i64:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubq {{\.LCPI.*}}, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
@@ -898,20 +885,7 @@ define <4 x i64> @foldv4i64u() nounwind {
;
; X32-AVX-LABEL: foldv4i64u:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubq {{\.LCPI.*}}, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
diff --git a/test/CodeGen/X86/vector-zext.ll b/test/CodeGen/X86/vector-zext.ll
index 1febf559bdea..fe3523de3575 100644
--- a/test/CodeGen/X86/vector-zext.ll
+++ b/test/CodeGen/X86/vector-zext.ll
@@ -55,18 +55,18 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
;
; SSE41-LABEL: zext_16i8_to_16i16:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i16:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
@@ -110,25 +110,27 @@ define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
;
; SSE41-LABEL: zext_32i8_to_32i16:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_32i8_to_32i16:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
; AVX1-NEXT: vmovaps %ymm2, %ymm0
; AVX1-NEXT: retq
;
@@ -242,14 +244,14 @@ entry:
define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_16i8_to_16i32:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
@@ -257,14 +259,14 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
;
; SSSE3-LABEL: zext_16i8_to_16i32:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; SSSE3-NEXT: movdqa %xmm3, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
@@ -458,16 +460,10 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; AVX2-NEXT: vmovdqa %ymm2, %ymm0
; AVX2-NEXT: retq
;
-; AVX512F-LABEL: zext_16i8_to_8i64:
-; AVX512F: # BB#0: # %entry
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: zext_16i8_to_8i64:
-; AVX512BW: # BB#0: # %entry
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: retq
+; AVX512-LABEL: zext_16i8_to_8i64:
+; AVX512: # BB#0: # %entry
+; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: retq
entry:
%B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%C = zext <8 x i8> %B to <8 x i64>
@@ -521,18 +517,18 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
;
; SSE41-LABEL: zext_8i16_to_8i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i32:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
@@ -576,25 +572,27 @@ define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone
;
; SSE41-LABEL: zext_16i16_to_16i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i16_to_16i32:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
; AVX1-NEXT: vmovaps %ymm2, %ymm0
; AVX1-NEXT: retq
;
@@ -700,14 +698,14 @@ entry:
define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i64:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
@@ -715,14 +713,14 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
;
; SSSE3-LABEL: zext_8i16_to_8i64:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSSE3-NEXT: movdqa %xmm3, %xmm2
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSSE3-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
@@ -818,18 +816,18 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
;
; SSE41-LABEL: zext_4i32_to_4i64:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_4i32_to_4i64:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
@@ -873,25 +871,27 @@ define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
;
; SSE41-LABEL: zext_8i32_to_8i64:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i32_to_8i64:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
; AVX1-NEXT: vmovaps %ymm2, %ymm0
; AVX1-NEXT: retq
;
@@ -1529,20 +1529,20 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
;
; SSE41-LABEL: zext_8i8_to_8i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i8_to_8i32:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i8_to_8i32:
@@ -1636,11 +1636,10 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
;
; AVX1-LABEL: shuf_zext_4i32_to_4i64:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuf_zext_4i32_to_4i64:
@@ -2097,3 +2096,146 @@ entry:
%Z = bitcast <8 x i32> %B to <4 x i64>
ret <4 x i64> %Z
}
+
+define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
+; SSE2-LABEL: zext_32i8_to_32i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, 112(%rdi)
+; SSE2-NEXT: movdqa %xmm4, 96(%rdi)
+; SSE2-NEXT: movdqa %xmm6, 80(%rdi)
+; SSE2-NEXT: movdqa %xmm7, 64(%rdi)
+; SSE2-NEXT: movdqa %xmm0, 48(%rdi)
+; SSE2-NEXT: movdqa %xmm5, 32(%rdi)
+; SSE2-NEXT: movdqa %xmm3, 16(%rdi)
+; SSE2-NEXT: movdqa %xmm8, (%rdi)
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_32i8_to_32i32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm3, %xmm8
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm5
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm6
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm6, %xmm7
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, 112(%rdi)
+; SSSE3-NEXT: movdqa %xmm4, 96(%rdi)
+; SSSE3-NEXT: movdqa %xmm6, 80(%rdi)
+; SSSE3-NEXT: movdqa %xmm7, 64(%rdi)
+; SSSE3-NEXT: movdqa %xmm0, 48(%rdi)
+; SSSE3-NEXT: movdqa %xmm5, 32(%rdi)
+; SSSE3-NEXT: movdqa %xmm3, 16(%rdi)
+; SSSE3-NEXT: movdqa %xmm8, (%rdi)
+; SSSE3-NEXT: movq %rdi, %rax
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_32i8_to_32i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm1, 112(%rdi)
+; SSE41-NEXT: movdqa %xmm7, 96(%rdi)
+; SSE41-NEXT: movdqa %xmm6, 80(%rdi)
+; SSE41-NEXT: movdqa %xmm5, 64(%rdi)
+; SSE41-NEXT: movdqa %xmm0, 48(%rdi)
+; SSE41-NEXT: movdqa %xmm4, 32(%rdi)
+; SSE41-NEXT: movdqa %xmm3, 16(%rdi)
+; SSE41-NEXT: movdqa %xmm2, (%rdi)
+; SSE41-NEXT: movq %rdi, %rax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_32i8_to_32i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-NEXT: vmovaps %ymm4, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_32i8_to_32i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[3,1,2,3]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm3
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vmovdqa %ymm4, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_32i8_to_32i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %res = zext <32 x i8>%x to <32 x i32>
+ ret <32 x i32> %res
+}
diff --git a/test/CodeGen/X86/vectorcall.ll b/test/CodeGen/X86/vectorcall.ll
index 6ba5e10dd21e..598a339ee2f7 100644
--- a/test/CodeGen/X86/vectorcall.ll
+++ b/test/CodeGen/X86/vectorcall.ll
@@ -103,7 +103,7 @@ entry:
}
; CHECK-LABEL: test_mixed_1
; CHECK: movaps %xmm1, 16(%{{(e|r)}}sp)
-; CHECK: movaps 16(%{{(e|r)}}sp), %xmm0
+; CHECK: movaps %xmm1, %xmm0
; CHECK: ret{{q|l}}
define x86_vectorcallcc <4 x float> @test_mixed_2(%struct.HVA4 inreg %a, %struct.HVA4* %b, <4 x float> %c) {
@@ -149,7 +149,7 @@ entry:
}
; CHECK-LABEL: test_mixed_5
; CHECK: movaps %xmm5, 16(%{{(e|r)}}sp)
-; CHECK: movaps 16(%{{(e|r)}}sp), %xmm0
+; CHECK: movaps %xmm5, %xmm0
; CHECK: ret{{[ql]}}
define x86_vectorcallcc %struct.HVA4 @test_mixed_6(%struct.HVA4 inreg %a, %struct.HVA4* %b) {
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
index 61fb66bbcbbd..34a9df1782a4 100644
--- a/test/CodeGen/X86/viabs.ll
+++ b/test/CodeGen/X86/viabs.ll
@@ -147,14 +147,10 @@ define <8 x i32> @test_abs_gt_v8i32(<8 x i32> %a) nounwind {
;
; AVX1-LABEL: test_abs_gt_v8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v8i32:
@@ -193,14 +189,10 @@ define <8 x i32> @test_abs_ge_v8i32(<8 x i32> %a) nounwind {
;
; AVX1-LABEL: test_abs_ge_v8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_ge_v8i32:
@@ -239,14 +231,10 @@ define <16 x i16> @test_abs_gt_v16i16(<16 x i16> %a) nounwind {
;
; AVX1-LABEL: test_abs_gt_v16i16:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsraw $15, %xmm1, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsraw $15, %xmm0, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpabsw %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsw %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v16i16:
@@ -285,15 +273,10 @@ define <32 x i8> @test_abs_lt_v32i8(<32 x i8> %a) nounwind {
;
; AVX1-LABEL: test_abs_lt_v32i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm4
-; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vpabsb %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsb %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_lt_v32i8:
@@ -332,14 +315,10 @@ define <8 x i32> @test_abs_le_v8i32(<8 x i32> %a) nounwind {
;
; AVX1-LABEL: test_abs_le_v8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i32:
@@ -388,22 +367,14 @@ define <16 x i32> @test_abs_le_16i32(<16 x i32> %a) nounwind {
;
; AVX1-LABEL: test_abs_le_16i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vpabsd %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpabsd %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpabsd %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_16i32:
@@ -450,9 +421,7 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
;
; AVX512-LABEL: test_abs_ge_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsraq $63, %xmm0, %xmm1
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpabsq %xmm0, %xmm0
; AVX512-NEXT: retq
%tmp1neg = sub <2 x i64> zeroinitializer, %a
%b = icmp sge <2 x i64> %a, zeroinitializer
@@ -499,9 +468,7 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
;
; AVX512-LABEL: test_abs_gt_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsraq $63, %ymm0, %ymm1
-; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpabsq %ymm0, %ymm0
; AVX512-NEXT: retq
%tmp1neg = sub <4 x i64> zeroinitializer, %a
%b = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -611,8 +578,8 @@ define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
;
; AVX1-LABEL: test_abs_le_v8i64_fold:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovups (%rdi), %ymm0
-; AVX1-NEXT: vmovups 32(%rdi), %ymm1
+; AVX1-NEXT: vmovdqu (%rdi), %ymm0
+; AVX1-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrad $31, %xmm2, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
@@ -691,23 +658,14 @@ define <64 x i8> @test_abs_lt_v64i8(<64 x i8> %a) nounwind {
;
; AVX1-LABEL: test_abs_lt_v64i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vxorps %ymm6, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm5
-; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vpabsb %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsb %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpabsb %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpabsb %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_lt_v64i8:
@@ -763,22 +721,14 @@ define <32 x i16> @test_abs_gt_v32i16(<32 x i16> %a) nounwind {
;
; AVX1-LABEL: test_abs_gt_v32i16:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsraw $15, %xmm2, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $15, %xmm0, %xmm4
-; AVX1-NEXT: vpaddw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpsraw $15, %xmm2, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $15, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm2
-; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vpabsw %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpabsw %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpabsw %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpabsw %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v32i16:
diff --git a/test/CodeGen/X86/vselect-minmax.ll b/test/CodeGen/X86/vselect-minmax.ll
index 8e9f1d980913..5524eaf397c9 100644
--- a/test/CodeGen/X86/vselect-minmax.ll
+++ b/test/CodeGen/X86/vselect-minmax.ll
@@ -4839,13 +4839,13 @@ define <8 x i64> @test121(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm1, %xmm11
; SSE4-NEXT: movdqa %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm8, %xmm0
-; SSE4-NEXT: blendvpd %xmm8, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm8, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -4983,13 +4983,13 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm12, %xmm11
; SSE4-NEXT: pcmpgtq %xmm4, %xmm0
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm8, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm8, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5113,13 +5113,13 @@ define <8 x i64> @test123(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: movdqa %xmm1, %xmm11
; SSE4-NEXT: pcmpgtq %xmm5, %xmm11
; SSE4-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE4-NEXT: blendvpd %xmm8, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm8, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5259,13 +5259,13 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: movdqa %xmm4, %xmm12
; SSE4-NEXT: pcmpgtq %xmm8, %xmm12
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm8, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm8, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5402,13 +5402,13 @@ define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm12
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm8, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm8, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5418,22 +5418,22 @@ define <8 x i64> @test125(<8 x i64> %a, <8 x i64> %b) {
; AVX1-LABEL: test125:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
@@ -5573,13 +5573,13 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm9, %xmm0
; SSE4-NEXT: pcmpgtq %xmm13, %xmm0
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm9, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm9, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm8, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5589,26 +5589,26 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; AVX1-LABEL: test126:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm8, %xmm8
; AVX1-NEXT: vpxor %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
@@ -5730,13 +5730,13 @@ define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm12
; SSE4-NEXT: pxor %xmm8, %xmm0
; SSE4-NEXT: pcmpgtq %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm8, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm8, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5746,22 +5746,22 @@ define <8 x i64> @test127(<8 x i64> %a, <8 x i64> %b) {
; AVX1-LABEL: test127:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
@@ -5902,13 +5902,13 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm13, %xmm0
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm9, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm9, %xmm4
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm5
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm6
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm6
; SSE4-NEXT: movdqa %xmm8, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm7
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm7
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: movapd %xmm5, %xmm1
; SSE4-NEXT: movapd %xmm6, %xmm2
@@ -5918,26 +5918,26 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; AVX1-LABEL: test128:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm8, %xmm8
; AVX1-NEXT: vpxor %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
@@ -7562,13 +7562,13 @@ define <8 x i64> @test153(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm1, %xmm11
; SSE4-NEXT: movdqa %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm8, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
@@ -7703,13 +7703,13 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm12, %xmm11
; SSE4-NEXT: pcmpgtq %xmm4, %xmm0
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
@@ -7834,13 +7834,13 @@ define <8 x i64> @test155(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: movdqa %xmm1, %xmm11
; SSE4-NEXT: pcmpgtq %xmm5, %xmm11
; SSE4-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
@@ -7977,13 +7977,13 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: movdqa %xmm4, %xmm12
; SSE4-NEXT: pcmpgtq %xmm8, %xmm12
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
@@ -8121,35 +8121,35 @@ define <8 x i64> @test157(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm12
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test157:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX1-NEXT: vblendvpd %ymm5, %ymm2, %ymm0, %ymm0
@@ -8289,39 +8289,39 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm8, %xmm0
; SSE4-NEXT: pcmpgtq %xmm13, %xmm0
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test158:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm8, %xmm8
; AVX1-NEXT: vpxor %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
@@ -8447,35 +8447,35 @@ define <8 x i64> @test159(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm12
; SSE4-NEXT: pxor %xmm8, %xmm0
; SSE4-NEXT: pcmpgtq %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test159:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX1-NEXT: vblendvpd %ymm5, %ymm2, %ymm0, %ymm0
@@ -8616,39 +8616,39 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm13, %xmm0
; SSE4-NEXT: pxor %xmm12, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm8
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm8
; SSE4-NEXT: movdqa %xmm11, %xmm0
-; SSE4-NEXT: blendvpd %xmm5, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm5, %xmm1
; SSE4-NEXT: movdqa %xmm10, %xmm0
-; SSE4-NEXT: blendvpd %xmm6, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm6, %xmm2
; SSE4-NEXT: movdqa %xmm9, %xmm0
-; SSE4-NEXT: blendvpd %xmm7, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm7, %xmm3
; SSE4-NEXT: movapd %xmm8, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test160:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm8, %xmm8, %xmm8
; AVX1-NEXT: vpxor %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vxorps %xmm5, %xmm1, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm3, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6
-; AVX1-NEXT: vxorps %xmm5, %xmm0, %xmm7
-; AVX1-NEXT: vxorps %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm7
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
@@ -8724,9 +8724,9 @@ define <4 x i64> @test161(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -8807,9 +8807,9 @@ define <4 x i64> @test162(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm6, %xmm5
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -8886,9 +8886,9 @@ define <4 x i64> @test163(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: movdqa %xmm1, %xmm5
; SSE4-NEXT: pcmpgtq %xmm3, %xmm5
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -8970,9 +8970,9 @@ define <4 x i64> @test164(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: movdqa %xmm2, %xmm6
; SSE4-NEXT: pcmpgtq %xmm4, %xmm6
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -9056,9 +9056,9 @@ define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm6
; SSE4-NEXT: pxor %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -9066,13 +9066,13 @@ define <4 x i64> @test165(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: test165:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -9154,9 +9154,9 @@ define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm7, %xmm0
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -9164,15 +9164,15 @@ define <4 x i64> @test166(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: test166:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm5
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
@@ -9248,9 +9248,9 @@ define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm6
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -9258,13 +9258,13 @@ define <4 x i64> @test167(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: test167:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
@@ -9346,9 +9346,9 @@ define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm7, %xmm0
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm4, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm3
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm3
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: movapd %xmm3, %xmm1
; SSE4-NEXT: retq
@@ -9356,15 +9356,15 @@ define <4 x i64> @test168(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: test168:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm5
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
@@ -9436,9 +9436,9 @@ define <4 x i64> @test169(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm1, %xmm5
; SSE4-NEXT: movdqa %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
@@ -9518,9 +9518,9 @@ define <4 x i64> @test170(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm6, %xmm5
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
@@ -9598,9 +9598,9 @@ define <4 x i64> @test171(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: movdqa %xmm1, %xmm5
; SSE4-NEXT: pcmpgtq %xmm3, %xmm5
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
@@ -9681,9 +9681,9 @@ define <4 x i64> @test172(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: movdqa %xmm2, %xmm6
; SSE4-NEXT: pcmpgtq %xmm4, %xmm6
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
@@ -9768,22 +9768,22 @@ define <4 x i64> @test173(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm6
; SSE4-NEXT: pxor %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test173:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
@@ -9865,24 +9865,24 @@ define <4 x i64> @test174(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm7, %xmm0
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test174:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm5
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
@@ -9960,22 +9960,22 @@ define <4 x i64> @test175(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm6
; SSE4-NEXT: pxor %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test175:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
@@ -10057,24 +10057,24 @@ define <4 x i64> @test176(<4 x i64> %a, <4 x i64> %b) {
; SSE4-NEXT: pxor %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm7, %xmm0
; SSE4-NEXT: pxor %xmm6, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm4
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm3, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm3, %xmm1
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test176:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vxorps %xmm3, %xmm0, %xmm5
-; AVX1-NEXT: vxorps %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
@@ -10127,7 +10127,7 @@ define <2 x i64> @test177(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm0
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10182,7 +10182,7 @@ define <2 x i64> @test178(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10236,7 +10236,7 @@ define <2 x i64> @test179(<2 x i64> %a, <2 x i64> %b) {
; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10292,7 +10292,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10350,7 +10350,7 @@ define <2 x i64> @test181(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm3
; SSE4-NEXT: pxor %xmm1, %xmm0
; SSE4-NEXT: pcmpgtq %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10415,7 +10415,7 @@ define <2 x i64> @test182(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10479,7 +10479,7 @@ define <2 x i64> @test183(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm3
; SSE4-NEXT: pxor %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10543,7 +10543,7 @@ define <2 x i64> @test184(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm2, %xmm1
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4-NEXT: movapd %xmm1, %xmm0
; SSE4-NEXT: retq
;
@@ -10605,7 +10605,7 @@ define <2 x i64> @test185(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm0
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -10660,7 +10660,7 @@ define <2 x i64> @test186(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -10715,7 +10715,7 @@ define <2 x i64> @test187(<2 x i64> %a, <2 x i64> %b) {
; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: pcmpgtq %xmm1, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -10771,7 +10771,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -10830,7 +10830,7 @@ define <2 x i64> @test189(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm3
; SSE4-NEXT: pxor %xmm1, %xmm0
; SSE4-NEXT: pcmpgtq %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -10895,7 +10895,7 @@ define <2 x i64> @test190(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -10960,7 +10960,7 @@ define <2 x i64> @test191(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pxor %xmm0, %xmm3
; SSE4-NEXT: pxor %xmm2, %xmm0
; SSE4-NEXT: pcmpgtq %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
@@ -11024,7 +11024,7 @@ define <2 x i64> @test192(<2 x i64> %a, <2 x i64> %b) {
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
-; SSE4-NEXT: blendvpd %xmm1, %xmm2
+; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE4-NEXT: movapd %xmm2, %xmm0
; SSE4-NEXT: retq
;
diff --git a/test/CodeGen/X86/vselect-pcmp.ll b/test/CodeGen/X86/vselect-pcmp.ll
new file mode 100644
index 000000000000..d33fda4f49c2
--- /dev/null
+++ b/test/CodeGen/X86/vselect-pcmp.ll
@@ -0,0 +1,323 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX12 --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX12 --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL
+
+; The condition vector for BLENDV* only cares about the sign bit of each element.
+; So in these tests, if we generate BLENDV*, we should be able to remove the redundant cmp op.
+
+; Test 128-bit vectors for all legal element types.
+
+; FIXME: Why didn't AVX-512 optimize too?
+
+define <16 x i8> @signbit_sel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) {
+; AVX12-LABEL: signbit_sel_v16i8:
+; AVX12: # BB#0:
+; AVX12-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: signbit_sel_v16i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+ %tr = icmp slt <16 x i8> %mask, zeroinitializer
+ %z = select <16 x i1> %tr, <16 x i8> %x, <16 x i8> %y
+ ret <16 x i8> %z
+}
+
+; Sorry 16-bit, you're not important enough to support?
+
+define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) {
+; AVX-LABEL: signbit_sel_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %tr = icmp slt <8 x i16> %mask, zeroinitializer
+ %z = select <8 x i1> %tr, <8 x i16> %x, <8 x i16> %y
+ ret <8 x i16> %z
+}
+
+define <4 x i32> @signbit_sel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) {
+; AVX12F-LABEL: signbit_sel_v4i32:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v4i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <4 x i32> %mask, zeroinitializer
+ %z = select <4 x i1> %tr, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %z
+}
+
+define <2 x i64> @signbit_sel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
+; AVX12F-LABEL: signbit_sel_v2i64:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v2i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT: vpcmpgtq %xmm2, %xmm3, %k1
+; AVX512VL-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <2 x i64> %mask, zeroinitializer
+ %z = select <2 x i1> %tr, <2 x i64> %x, <2 x i64> %y
+ ret <2 x i64> %z
+}
+
+define <4 x float> @signbit_sel_v4f32(<4 x float> %x, <4 x float> %y, <4 x i32> %mask) {
+; AVX12F-LABEL: signbit_sel_v4f32:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <4 x i32> %mask, zeroinitializer
+ %z = select <4 x i1> %tr, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %z
+}
+
+define <2 x double> @signbit_sel_v2f64(<2 x double> %x, <2 x double> %y, <2 x i64> %mask) {
+; AVX12F-LABEL: signbit_sel_v2f64:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v2f64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT: vpcmpgtq %xmm2, %xmm3, %k1
+; AVX512VL-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <2 x i64> %mask, zeroinitializer
+ %z = select <2 x i1> %tr, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %z
+}
+
+; Test 256-bit vectors to see differences between AVX1 and AVX2.
+
+define <32 x i8> @signbit_sel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %mask) {
+; AVX1-LABEL: signbit_sel_v32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: signbit_sel_v32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: signbit_sel_v32i8:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
+ %tr = icmp slt <32 x i8> %mask, zeroinitializer
+ %z = select <32 x i1> %tr, <32 x i8> %x, <32 x i8> %y
+ ret <32 x i8> %z
+}
+
+; Sorry 16-bit, you'll never be important enough to support?
+
+define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %mask) {
+; AVX1-LABEL: signbit_sel_v16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: signbit_sel_v16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: signbit_sel_v16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %tr = icmp slt <16 x i16> %mask, zeroinitializer
+ %z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y
+ ret <16 x i16> %z
+}
+
+define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
+; AVX12-LABEL: signbit_sel_v8i32:
+; AVX12: # BB#0:
+; AVX12-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; AVX12-NEXT: retq
+;
+; AVX512F-LABEL: signbit_sel_v8i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
+; AVX512F-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
+; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v8i32:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpcmpgtd %ymm2, %ymm3, %k1
+; AVX512VL-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <8 x i32> %mask, zeroinitializer
+ %z = select <8 x i1> %tr, <8 x i32> %x, <8 x i32> %y
+ ret <8 x i32> %z
+}
+
+define <4 x i64> @signbit_sel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
+; AVX12F-LABEL: signbit_sel_v4i64:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v4i64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpcmpgtq %ymm2, %ymm3, %k1
+; AVX512VL-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <4 x i64> %mask, zeroinitializer
+ %z = select <4 x i1> %tr, <4 x i64> %x, <4 x i64> %y
+ ret <4 x i64> %z
+}
+
+define <4 x double> @signbit_sel_v4f64(<4 x double> %x, <4 x double> %y, <4 x i64> %mask) {
+; AVX12F-LABEL: signbit_sel_v4f64:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f64:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpcmpgtq %ymm2, %ymm3, %k1
+; AVX512VL-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <4 x i64> %mask, zeroinitializer
+ %z = select <4 x i1> %tr, <4 x double> %x, <4 x double> %y
+ ret <4 x double> %z
+}
+
+; Try a condition with a different type than the select operands.
+
+define <4 x double> @signbit_sel_v4f64_small_mask(<4 x double> %x, <4 x double> %y, <4 x i32> %mask) {
+; AVX1-LABEL: signbit_sel_v4f64_small_mask:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: signbit_sel_v4f64_small_mask:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: signbit_sel_v4f64_small_mask:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpmovsxdq %xmm2, %ymm2
+; AVX512F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f64_small_mask:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; AVX512VL-NEXT: retq
+ %tr = icmp slt <4 x i32> %mask, zeroinitializer
+ %z = select <4 x i1> %tr, <4 x double> %x, <4 x double> %y
+ ret <4 x double> %z
+}
+
+; Try a 512-bit vector to make sure AVX-512 is handled as expected.
+
+define <8 x double> @signbit_sel_v8f64(<8 x double> %x, <8 x double> %y, <8 x i64> %mask) {
+; AVX12-LABEL: signbit_sel_v8f64:
+; AVX12: # BB#0:
+; AVX12-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
+; AVX12-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: signbit_sel_v8f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpxord %zmm3, %zmm3, %zmm3
+; AVX512-NEXT: vpcmpgtq %zmm2, %zmm3, %k1
+; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; AVX512-NEXT: retq
+ %tr = icmp slt <8 x i64> %mask, zeroinitializer
+ %z = select <8 x i1> %tr, <8 x double> %x, <8 x double> %y
+ ret <8 x double> %z
+}
+
+; If we have a floating-point compare:
+; (1) Don't die.
+; (2) FIXME: If we don't care about signed-zero (and NaN?), the compare should still get folded.
+
+define <4 x float> @signbit_sel_v4f32_fcmp(<4 x float> %x, <4 x float> %y, <4 x float> %mask) #0 {
+; AVX12F-LABEL: signbit_sel_v4f32_fcmp:
+; AVX12F: # BB#0:
+; AVX12F-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX12F-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
+; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX12F-NEXT: retq
+;
+; AVX512VL-LABEL: signbit_sel_v4f32_fcmp:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vcmpltps %xmm2, %xmm0, %k1
+; AVX512VL-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
+; AVX512VL-NEXT: retq
+ %cmp = fcmp olt <4 x float> %x, zeroinitializer
+ %sel = select <4 x i1> %cmp, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %sel
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }
diff --git a/test/CodeGen/X86/vsplit-and.ll b/test/CodeGen/X86/vsplit-and.ll
index 05d6c60b88f0..e62698221973 100644
--- a/test/CodeGen/X86/vsplit-and.ll
+++ b/test/CodeGen/X86/vsplit-and.ll
@@ -6,11 +6,11 @@ define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind read
; CHECK: # BB#0:
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: pcmpeqq %xmm2, %xmm0
-; CHECK-NEXT: pcmpeqd %xmm3, %xmm3
-; CHECK-NEXT: pxor %xmm0, %xmm3
; CHECK-NEXT: pcmpeqq %xmm2, %xmm1
-; CHECK-NEXT: pandn %xmm3, %xmm1
-; CHECK-NEXT: movdqa %xmm1, (%rdi)
+; CHECK-NEXT: pcmpeqd %xmm2, %xmm2
+; CHECK-NEXT: pxor %xmm1, %xmm2
+; CHECK-NEXT: pandn %xmm2, %xmm0
+; CHECK-NEXT: movdqa %xmm0, (%rdi)
; CHECK-NEXT: retq
%cmp1 = icmp ne <2 x i64> %src1, zeroinitializer
%cmp2 = icmp ne <2 x i64> %src2, zeroinitializer
diff --git a/test/CodeGen/X86/wide-integer-cmp.ll b/test/CodeGen/X86/wide-integer-cmp.ll
index c45a0541e6a7..fbaf500e8333 100644
--- a/test/CodeGen/X86/wide-integer-cmp.ll
+++ b/test/CodeGen/X86/wide-integer-cmp.ll
@@ -1,7 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-linux-gnu %s -o - | FileCheck %s
define i32 @branch_eq(i64 %a, i64 %b) {
+; CHECK-LABEL: branch_eq:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: jne .LBB0_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB0_2: # %bb2
+; CHECK-NEXT: movl $2, %eax
+; CHECK-NEXT: retl
entry:
%cmp = icmp eq i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
@@ -9,22 +24,22 @@ bb1:
ret i32 1
bb2:
ret i32 2
-
-; CHECK-LABEL: branch_eq:
-; CHECK: movl 4(%esp), [[LHSLo:%[a-z]+]]
-; CHECK: movl 8(%esp), [[LHSHi:%[a-z]+]]
-; CHECK: xorl 16(%esp), [[LHSHi]]
-; CHECK: xorl 12(%esp), [[LHSLo]]
-; CHECK: orl [[LHSHi]], [[LHSLo]]
-; CHECK: jne [[FALSE:.LBB[0-9_]+]]
-; CHECK: movl $1, %eax
-; CHECK: retl
-; CHECK: [[FALSE]]:
-; CHECK: movl $2, %eax
-; CHECK: retl
}
define i32 @branch_slt(i64 %a, i64 %b) {
+; CHECK-LABEL: branch_slt:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: jge .LBB1_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB1_2: # %bb2
+; CHECK-NEXT: movl $2, %eax
+; CHECK-NEXT: retl
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
@@ -32,21 +47,22 @@ bb1:
ret i32 1
bb2:
ret i32 2
-
-; CHECK-LABEL: branch_slt:
-; CHECK: movl 4(%esp), [[LHSLo:%[a-z]+]]
-; CHECK: movl 8(%esp), [[LHSHi:%[a-z]+]]
-; CHECK: cmpl 12(%esp), [[LHSLo]]
-; CHECK: sbbl 16(%esp), [[LHSHi]]
-; CHECK: jge [[FALSE:.LBB[0-9_]+]]
-; CHECK: movl $1, %eax
-; CHECK: retl
-; CHECK: [[FALSE]]:
-; CHECK: movl $2, %eax
-; CHECK: retl
}
define i32 @branch_ule(i64 %a, i64 %b) {
+; CHECK-LABEL: branch_ule:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: jb .LBB2_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB2_2: # %bb2
+; CHECK-NEXT: movl $2, %eax
+; CHECK-NEXT: retl
entry:
%cmp = icmp ule i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
@@ -54,36 +70,49 @@ bb1:
ret i32 1
bb2:
ret i32 2
-
-; CHECK-LABEL: branch_ule:
-; CHECK: movl 12(%esp), [[RHSLo:%[a-z]+]]
-; CHECK: movl 16(%esp), [[RHSHi:%[a-z]+]]
-; CHECK: cmpl 4(%esp), [[RHSLo]]
-; CHECK: sbbl 8(%esp), [[RHSHi]]
-; CHECK: jb [[FALSE:.LBB[0-9_]+]]
-; CHECK: movl $1, %eax
-; CHECK: retl
-; CHECK: [[FALSE]]:
-; CHECK: movl $2, %eax
-; CHECK: retl
}
define i32 @set_gt(i64 %a, i64 %b) {
+; CHECK-LABEL: set_gt:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retl
entry:
%cmp = icmp sgt i64 %a, %b
%res = select i1 %cmp, i32 1, i32 0
ret i32 %res
-
-; CHECK-LABEL: set_gt:
-; CHECK: movl 12(%esp), [[RHSLo:%[a-z]+]]
-; CHECK: movl 16(%esp), [[RHSHi:%[a-z]+]]
-; CHECK: cmpl 4(%esp), [[RHSLo]]
-; CHECK: sbbl 8(%esp), [[RHSHi]]
-; CHECK: setl %al
-; CHECK: retl
}
define i32 @test_wide(i128 %a, i128 %b) {
+; CHECK-LABEL: test_wide:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: .Lcfi0:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_offset %esi, -8
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: jge .LBB4_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB4_2: # %bb2
+; CHECK-NEXT: movl $2, %eax
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: retl
entry:
%cmp = icmp slt i128 %a, %b
br i1 %cmp, label %bb1, label %bb2
@@ -91,21 +120,22 @@ bb1:
ret i32 1
bb2:
ret i32 2
-
-; CHECK-LABEL: test_wide:
-; CHECK: cmpl 24(%esp)
-; CHECK: sbbl 28(%esp)
-; CHECK: sbbl 32(%esp)
-; CHECK: sbbl 36(%esp)
-; CHECK: jge [[FALSE:.LBB[0-9_]+]]
-; CHECK: movl $1, %eax
-; CHECK: retl
-; CHECK: [[FALSE]]:
-; CHECK: movl $2, %eax
-; CHECK: retl
}
+; The comparison of the low bits will be folded to a CARRY_FALSE node. Make
+; sure the code can handle that.
define i32 @test_carry_false(i64 %a, i64 %b) {
+; CHECK-LABEL: test_carry_false:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: jge .LBB5_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB5_2: # %bb2
+; CHECK-NEXT: movl $2, %eax
+; CHECK-NEXT: retl
entry:
%x = and i64 %a, -4294967296 ;0xffffffff00000000
%y = and i64 %b, -4294967296
@@ -115,16 +145,4 @@ bb1:
ret i32 1
bb2:
ret i32 2
-
-; The comparison of the low bits will be folded to a CARRY_FALSE node. Make
-; sure the code can handle that.
-; CHECK-LABEL: carry_false:
-; CHECK: movl 8(%esp), [[LHSHi:%[a-z]+]]
-; CHECK: cmpl 16(%esp), [[LHSHi]]
-; CHECK: jge [[FALSE:.LBB[0-9_]+]]
-; CHECK: movl $1, %eax
-; CHECK: retl
-; CHECK: [[FALSE]]:
-; CHECK: movl $2, %eax
-; CHECK: retl
}
diff --git a/test/CodeGen/X86/widen_bitops-0.ll b/test/CodeGen/X86/widen_bitops-0.ll
index f8316d0e1ea2..132a2fd928f2 100644
--- a/test/CodeGen/X86/widen_bitops-0.ll
+++ b/test/CodeGen/X86/widen_bitops-0.ll
@@ -131,10 +131,10 @@ define i24 @or_i24_as_v8i3(i24 %a, i24 %b) nounwind {
define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-LABEL: and_v3i8_as_i24:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0
+; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm1
+; X32-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm1
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
@@ -172,10 +172,10 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-LABEL: xor_v3i8_as_i24:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0
+; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm1
+; X32-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm1
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm1
; X32-SSE-NEXT: pxor %xmm0, %xmm1
@@ -213,10 +213,10 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-LABEL: or_v3i8_as_i24:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0
+; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm1
+; X32-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm1
; X32-SSE-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm1
; X32-SSE-NEXT: por %xmm0, %xmm1
diff --git a/test/CodeGen/X86/widen_conv-1.ll b/test/CodeGen/X86/widen_conv-1.ll
index aa2cef4ff814..a672e84fcde4 100644
--- a/test/CodeGen/X86/widen_conv-1.ll
+++ b/test/CodeGen/X86/widen_conv-1.ll
@@ -38,7 +38,6 @@ define void @convert_v3i32_to_v3i8(<3 x i8>* %dst.addr, <3 x i32>* %src.addr) no
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: pextrb $8, %xmm0, 2(%eax)
; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; X86-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; X86-NEXT: pextrw $0, %xmm0, (%eax)
; X86-NEXT: popl %eax
; X86-NEXT: retl
@@ -49,7 +48,6 @@ define void @convert_v3i32_to_v3i8(<3 x i8>* %dst.addr, <3 x i32>* %src.addr) no
; X64-NEXT: paddd {{.*}}(%rip), %xmm0
; X64-NEXT: pextrb $8, %xmm0, 2(%rdi)
; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; X64-NEXT: pextrw $0, %xmm0, (%rdi)
; X64-NEXT: retq
entry:
@@ -75,7 +73,6 @@ define void @convert_v5i16_to_v5i8(<5 x i8>* %dst.addr, <5 x i16>* %src.addr) no
; X86-NEXT: paddw {{\.LCPI.*}}, %xmm0
; X86-NEXT: pextrb $8, %xmm0, 4(%eax)
; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; X86-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; X86-NEXT: movd %xmm0, (%eax)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
@@ -87,7 +84,6 @@ define void @convert_v5i16_to_v5i8(<5 x i8>* %dst.addr, <5 x i16>* %src.addr) no
; X64-NEXT: paddw {{.*}}(%rip), %xmm0
; X64-NEXT: pextrb $8, %xmm0, 4(%rdi)
; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; X64-NEXT: movd %xmm0, (%rdi)
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/widen_conv-3.ll b/test/CodeGen/X86/widen_conv-3.ll
index f2e29337e6ad..504485440eff 100644
--- a/test/CodeGen/X86/widen_conv-3.ll
+++ b/test/CodeGen/X86/widen_conv-3.ll
@@ -65,7 +65,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE2-NEXT: shll $8, %edx
; X86-SSE2-NEXT: movzbl (%esp), %esi
; X86-SSE2-NEXT: orl %edx, %esi
-; X86-SSE2-NEXT: pinsrw $0, %esi, %xmm0
+; X86-SSE2-NEXT: movd %esi, %xmm0
; X86-SSE2-NEXT: movzbl 2(%ecx), %ecx
; X86-SSE2-NEXT: pinsrw $1, %ecx, %xmm0
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -106,8 +106,6 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE2: # BB#0: # %entry
; X64-SSE2-NEXT: movzwl (%rsi), %eax
; X64-SSE2-NEXT: movd %rax, %xmm0
-; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X64-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; X64-SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -115,7 +113,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE2-NEXT: shll $8, %eax
; X64-SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; X64-SSE2-NEXT: orl %eax, %ecx
-; X64-SSE2-NEXT: pinsrw $0, %ecx, %xmm0
+; X64-SSE2-NEXT: movd %ecx, %xmm0
; X64-SSE2-NEXT: movzbl 2(%rsi), %eax
; X64-SSE2-NEXT: pinsrw $1, %eax, %xmm0
; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -132,8 +130,6 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE42-NEXT: movzbl 2(%rsi), %eax
; X64-SSE42-NEXT: movzwl (%rsi), %ecx
; X64-SSE42-NEXT: movd %rcx, %xmm0
-; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; X64-SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-SSE42-NEXT: pinsrd $2, %eax, %xmm0
; X64-SSE42-NEXT: pslld $24, %xmm0
diff --git a/test/CodeGen/X86/widen_conv-4.ll b/test/CodeGen/X86/widen_conv-4.ll
index 90c4bbe6bb70..ef56692e947c 100644
--- a/test/CodeGen/X86/widen_conv-4.ll
+++ b/test/CodeGen/X86/widen_conv-4.ll
@@ -91,7 +91,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE2-NEXT: shll $8, %edx
; X86-SSE2-NEXT: movzbl (%esp), %esi
; X86-SSE2-NEXT: orl %edx, %esi
-; X86-SSE2-NEXT: pinsrw $0, %esi, %xmm0
+; X86-SSE2-NEXT: movd %esi, %xmm0
; X86-SSE2-NEXT: movzbl 2(%ecx), %ecx
; X86-SSE2-NEXT: pinsrw $1, %ecx, %xmm0
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
@@ -131,8 +131,6 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE2: # BB#0: # %entry
; X64-SSE2-NEXT: movzwl (%rsi), %eax
; X64-SSE2-NEXT: movd %rax, %xmm0
-; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X64-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; X64-SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -140,7 +138,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE2-NEXT: shll $8, %eax
; X64-SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
; X64-SSE2-NEXT: orl %eax, %ecx
-; X64-SSE2-NEXT: pinsrw $0, %ecx, %xmm0
+; X64-SSE2-NEXT: movd %ecx, %xmm0
; X64-SSE2-NEXT: movzbl 2(%rsi), %eax
; X64-SSE2-NEXT: pinsrw $1, %eax, %xmm0
; X64-SSE2-NEXT: pxor %xmm1, %xmm1
@@ -157,8 +155,6 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X64-SSE42-NEXT: movzbl 2(%rsi), %eax
; X64-SSE42-NEXT: movzwl (%rsi), %ecx
; X64-SSE42-NEXT: movd %rcx, %xmm0
-; X64-SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; X64-SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-SSE42-NEXT: pinsrd $2, %eax, %xmm0
; X64-SSE42-NEXT: pand {{.*}}(%rip), %xmm0
diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll
index 61297cc11d32..9fc0805b899c 100644
--- a/test/CodeGen/X86/widen_load-2.ll
+++ b/test/CodeGen/X86/widen_load-2.ll
@@ -164,8 +164,7 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp
; X86-NEXT: paddd %xmm0, %xmm1
; X86-NEXT: pextrw $4, %xmm1, 4(%eax)
; X86-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; X86-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; X86-NEXT: movd %xmm0, (%eax)
+; X86-NEXT: movd %xmm1, (%eax)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
@@ -177,8 +176,7 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp
; X64-NEXT: paddd %xmm0, %xmm1
; X64-NEXT: pextrw $4, %xmm1, 4(%rdi)
; X64-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; X64-NEXT: movd %xmm0, (%rdi)
+; X64-NEXT: movd %xmm1, (%rdi)
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
%a = load %i16vec3, %i16vec3* %ap, align 16
@@ -301,8 +299,7 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no
; X86-NEXT: paddd %xmm0, %xmm1
; X86-NEXT: pextrb $8, %xmm1, 2(%eax)
; X86-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; X86-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; X86-NEXT: pextrw $0, %xmm0, (%eax)
+; X86-NEXT: pextrw $0, %xmm1, (%eax)
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl $4
;
@@ -313,8 +310,7 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no
; X64-NEXT: paddd %xmm0, %xmm1
; X64-NEXT: pextrb $8, %xmm1, 2(%rdi)
; X64-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; X64-NEXT: pextrw $0, %xmm0, (%rdi)
+; X64-NEXT: pextrw $0, %xmm1, (%rdi)
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
%a = load %i8vec3, %i8vec3* %ap, align 16
@@ -372,38 +368,36 @@ define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pa
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; X86-NEXT: movdqa {{.*#+}} xmm0 = [40606,0,158,0]
; X86-NEXT: pextrw $0, %xmm0, (%edx)
; X86-NEXT: movb $-98, 2(%edx)
-; X86-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; X86-NEXT: movdqa {{.*#+}} xmm0 = [257,0,1,0]
; X86-NEXT: pextrw $0, %xmm0, (%ecx)
; X86-NEXT: movb $1, 2(%ecx)
; X86-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrld $1, %xmm1
-; X86-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X86-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; X86-NEXT: pextrb $8, %xmm1, 2(%eax)
-; X86-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; X86-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X86-NEXT: pextrw $0, %xmm0, (%eax)
; X86-NEXT: addl $16, %esp
; X86-NEXT: retl $4
;
; X64-LABEL: rot:
; X64: # BB#0: # %entry
-; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; X64-NEXT: movdqa {{.*#+}} xmm0 = [40606,158]
; X64-NEXT: pextrw $0, %xmm0, (%rsi)
; X64-NEXT: movb $-98, 2(%rsi)
-; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; X64-NEXT: movdqa {{.*#+}} xmm0 = [257,1]
; X64-NEXT: pextrw $0, %xmm0, (%rdx)
; X64-NEXT: movb $1, 2(%rdx)
; X64-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrld $1, %xmm1
-; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; X64-NEXT: pextrb $8, %xmm1, 2(%rdi)
-; X64-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; X64-NEXT: pextrw $0, %xmm0, (%rdi)
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/widened-broadcast.ll b/test/CodeGen/X86/widened-broadcast.ll
index 5bce383d9bf1..900a7546f15b 100644
--- a/test/CodeGen/X86/widened-broadcast.ll
+++ b/test/CodeGen/X86/widened-broadcast.ll
@@ -51,14 +51,12 @@ define <8 x float> @load_splat_8f32_4f32_01010101(<4 x float>* %ptr) nounwind uw
;
; AVX2-LABEL: load_splat_8f32_4f32_01010101:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %xmm0
-; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8f32_4f32_01010101:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovaps (%rdi), %xmm0
-; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <4 x float>, <4 x float>* %ptr
@@ -131,14 +129,12 @@ define <8 x i32> @load_splat_8i32_4i32_01010101(<4 x i32>* %ptr) nounwind uwtabl
;
; AVX2-LABEL: load_splat_8i32_4i32_01010101:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %xmm0
-; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_8i32_4i32_01010101:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovaps (%rdi), %xmm0
-; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
@@ -242,14 +238,12 @@ define <16 x i16> @load_splat_16i16_8i16_0101010101010101(<8 x i16>* %ptr) nounw
;
; AVX2-LABEL: load_splat_16i16_8i16_0101010101010101:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %xmm0
-; AVX2-NEXT: vbroadcastss %xmm0, %ymm0
+; AVX2-NEXT: vbroadcastss (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i16_8i16_0101010101010101:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovaps (%rdi), %xmm0
-; AVX512-NEXT: vbroadcastss %xmm0, %ymm0
+; AVX512-NEXT: vbroadcastss (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <8 x i16>, <8 x i16>* %ptr
@@ -272,14 +266,12 @@ define <16 x i16> @load_splat_16i16_8i16_0123012301230123(<8 x i16>* %ptr) nounw
;
; AVX2-LABEL: load_splat_16i16_8i16_0123012301230123:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %xmm0
-; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_16i16_8i16_0123012301230123:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovaps (%rdi), %xmm0
-; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <8 x i16>, <8 x i16>* %ptr
@@ -442,14 +434,12 @@ define <32 x i8> @load_splat_32i8_16i8_01010101010101010101010101010101(<16 x i8
;
; AVX2-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_16i8_01010101010101010101010101010101:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovdqa (%rdi), %xmm0
-; AVX512-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX512-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
@@ -472,14 +462,12 @@ define <32 x i8> @load_splat_32i8_16i8_01230123012301230123012301230123(<16 x i8
;
; AVX2-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %xmm0
-; AVX2-NEXT: vbroadcastss %xmm0, %ymm0
+; AVX2-NEXT: vbroadcastss (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_16i8_01230123012301230123012301230123:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovaps (%rdi), %xmm0
-; AVX512-NEXT: vbroadcastss %xmm0, %ymm0
+; AVX512-NEXT: vbroadcastss (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
@@ -502,14 +490,12 @@ define <32 x i8> @load_splat_32i8_16i8_01234567012345670123456701234567(<16 x i8
;
; AVX2-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovaps (%rdi), %xmm0
-; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
; AVX512: # BB#0: # %entry
-; AVX512-NEXT: vmovaps (%rdi), %xmm0
-; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX512-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
diff --git a/test/CodeGen/X86/win-alloca-expander.ll b/test/CodeGen/X86/win-alloca-expander.ll
index 45ca3b214ab8..4b6e3bb18e60 100644
--- a/test/CodeGen/X86/win-alloca-expander.ll
+++ b/test/CodeGen/X86/win-alloca-expander.ll
@@ -115,34 +115,36 @@ define void @cfg(i1 %x, i1 %y) {
; Test that the blocks are analyzed in the correct order.
; CHECK-LABEL: cfg:
entry:
- br i1 %x, label %bb1, label %bb2
+ br i1 %x, label %bb1, label %bb3
bb1:
%p1 = alloca %struct.S
; CHECK: pushl %eax
; CHECK: subl $1020, %esp
- br label %bb3
+ br label %bb4
+
bb2:
- %p2 = alloca %struct.T
+ %p5 = alloca %struct.T
; CHECK: pushl %eax
; CHECK: subl $2996, %esp
- br label %bb3
+ call void @g(%struct.T* %p5)
+ ret void
bb3:
- br i1 %y, label %bb4, label %bb5
+ %p2 = alloca %struct.T
+; CHECK: pushl %eax
+; CHECK: subl $2996, %esp
+ br label %bb4
bb4:
+ br i1 %y, label %bb5, label %bb2
+
+bb5:
%p4 = alloca %struct.S
; CHECK: subl $1024, %esp
call void @f(%struct.S* %p4)
ret void
-bb5:
- %p5 = alloca %struct.T
-; CHECK: pushl %eax
-; CHECK: subl $2996, %esp
- call void @g(%struct.T* %p5)
- ret void
}
diff --git a/test/CodeGen/X86/win32-eh.ll b/test/CodeGen/X86/win32-eh.ll
index 88403c687403..de8464e4f8b8 100644
--- a/test/CodeGen/X86/win32-eh.ll
+++ b/test/CodeGen/X86/win32-eh.ll
@@ -27,23 +27,26 @@ catch:
; CHECK-LABEL: _use_except_handler3:
; CHECK: pushl %ebp
-; CHECK: movl %esp, %ebp
-; CHECK: pushl %ebx
-; CHECK: pushl %edi
-; CHECK: pushl %esi
-; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl $-1, -16(%ebp)
-; CHECK: movl $L__ehtable$use_except_handler3, -20(%ebp)
-; CHECK: leal -28(%ebp), %[[node:[^ ,]*]]
-; CHECK: movl $__except_handler3, -24(%ebp)
-; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], -28(%ebp)
-; CHECK: movl %[[node]], %fs:0
-; CHECK: calll _may_throw_or_crash
+; CHECK-NEXT: movl %esp, %ebp
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: subl ${{[0-9]+}}, %esp
+; CHECK-NEXT: movl %esp, -36(%ebp)
+; CHECK-NEXT: movl $-1, -16(%ebp)
+; CHECK-NEXT: movl $L__ehtable$use_except_handler3, -20(%ebp)
+; CHECK-NEXT: leal -28(%ebp), %[[node:[^ ,]*]]
+; CHECK-NEXT: movl $__except_handler3, -24(%ebp)
+; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]]
+; CHECK-NEXT: movl %[[next]], -28(%ebp)
+; CHECK-NEXT: movl %[[node]], %fs:0
+; CHECK-NEXT: movl $0, -16(%ebp)
+; CHECK-NEXT: calll _may_throw_or_crash
+
; CHECK: movl -28(%ebp), %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], %fs:0
+; CHECK-NEXT: movl %[[next]], %fs:0
; CHECK: retl
-; CHECK: LBB1_2: # %catch{{$}}
+; CHECK-NEXT: LBB1_2: # %catch{{$}}
; CHECK: .section .xdata,"dr"
; CHECK-LABEL: L__ehtable$use_except_handler3:
@@ -66,23 +69,37 @@ catch:
; CHECK-LABEL: _use_except_handler4:
; CHECK: pushl %ebp
-; CHECK: movl %esp, %ebp
-; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl %esp, -36(%ebp)
-; CHECK: movl $-2, -16(%ebp)
-; CHECK: movl $L__ehtable$use_except_handler4, %[[lsda:[^ ,]*]]
-; CHECK: xorl ___security_cookie, %[[lsda]]
-; CHECK: movl %[[lsda]], -20(%ebp)
-; CHECK: leal -28(%ebp), %[[node:[^ ,]*]]
-; CHECK: movl $__except_handler4, -24(%ebp)
-; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], -28(%ebp)
-; CHECK: movl %[[node]], %fs:0
-; CHECK: calll _may_throw_or_crash
+; CHECK-NEXT: movl %esp, %ebp
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: subl ${{[0-9]+}}, %esp
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: movl %esp, -36(%ebp)
+; CHECK-NEXT: movl $-2, -16(%ebp)
+; CHECK-NEXT: movl $L__ehtable$use_except_handler4, %[[lsda:[^ ,]*]]
+; CHECK-NEXT: movl ___security_cookie, %[[seccookie:[^ ,]*]]
+; CHECK-NEXT: xorl %[[seccookie]], %[[lsda]]
+; CHECK-NEXT: movl %[[lsda]], -20(%ebp)
+; CHECK-NEXT: xorl %[[seccookie]], %[[tmp1:[^ ,]*]]
+; CHECK-NEXT: movl %[[tmp1]], -40(%ebp)
+; CHECK-NEXT: leal -28(%ebp), %[[node:[^ ,]*]]
+; CHECK-NEXT: movl $__except_handler4, -24(%ebp)
+; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]]
+; CHECK-NEXT: movl %[[next]], -28(%ebp)
+; CHECK-NEXT: movl %[[node]], %fs:0
+; CHECK-NEXT: movl $0, -16(%ebp)
+; CHECK-NEXT: calll _may_throw_or_crash
+
; CHECK: movl -28(%ebp), %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], %fs:0
-; CHECK: retl
-; CHECK: LBB2_2: # %catch{{$}}
+; CHECK-NEXT: movl %[[next]], %fs:0
+; CHECK-NEXT: addl $28, %esp
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: retl
+; CHECK-NEXT: LBB2_2: # %catch{{$}}
; CHECK: .section .xdata,"dr"
; CHECK-LABEL: L__ehtable$use_except_handler4:
@@ -109,26 +126,33 @@ catch:
; CHECK-LABEL: _use_except_handler4_ssp:
; CHECK: pushl %ebp
-; CHECK: movl %esp, %ebp
-; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl %ebp, %[[ehguard:[^ ,]*]]
-; CHECK: movl %esp, -36(%ebp)
-; CHECK: movl $-2, -16(%ebp)
-; CHECK: movl $L__ehtable$use_except_handler4_ssp, %[[lsda:[^ ,]*]]
-; CHECK: xorl ___security_cookie, %[[lsda]]
-; CHECK: movl %[[lsda]], -20(%ebp)
-; CHECK: xorl ___security_cookie, %[[ehguard]]
-; CHECK: movl %[[ehguard]], -40(%ebp)
-; CHECK: leal -28(%ebp), %[[node:[^ ,]*]]
-; CHECK: movl $__except_handler4, -24(%ebp)
-; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], -28(%ebp)
-; CHECK: movl %[[node]], %fs:0
-; CHECK: calll _may_throw_or_crash
+; CHECK-NEXT: movl %esp, %ebp
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: subl ${{[0-9]+}}, %esp
+; CHECK-NEXT: movl %ebp, %[[ehguard:[^ ,]*]]
+; CHECK-NEXT: movl %esp, -36(%ebp)
+; CHECK-NEXT: movl $-2, -16(%ebp)
+; CHECK-NEXT: movl $L__ehtable$use_except_handler4_ssp, %[[lsda:[^ ,]*]]
+; CHECK-NEXT: movl ___security_cookie, %[[seccookie:[^ ,]*]]
+; CHECK-NEXT: xorl %[[seccookie]], %[[lsda]]
+; CHECK-NEXT: movl %[[lsda]], -20(%ebp)
+; CHECK-NEXT: xorl %[[seccookie]], %[[ehguard]]
+; CHECK-NEXT: movl %[[ehguard]], -40(%ebp)
+; CHECK-NEXT: leal -28(%ebp), %[[node:[^ ,]*]]
+; CHECK-NEXT: movl $__except_handler4, -24(%ebp)
+; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]]
+; CHECK-NEXT: movl %[[next]], -28(%ebp)
+; CHECK-NEXT: movl %[[node]], %fs:0
+; CHECK-NEXT: movl $0, -16(%ebp)
+; CHECK-NEXT: calll _may_throw_or_crash
; CHECK: movl -28(%ebp), %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], %fs:0
+; CHECK-NEXT: movl %[[next]], %fs:0
; CHECK: retl
-; CHECK: [[catch:[^ ,]*]]: # %catch{{$}}
+; CHECK-NEXT: [[catch:[^ ,]*]]: # %catch{{$}}
+
+
; CHECK: .section .xdata,"dr"
; CHECK-LABEL: L__ehtable$use_except_handler4_ssp:
@@ -155,23 +179,26 @@ catch:
; CHECK-LABEL: _use_CxxFrameHandler3:
; CHECK: pushl %ebp
-; CHECK: movl %esp, %ebp
-; CHECK: subl ${{[0-9]+}}, %esp
-; CHECK: movl %esp, -28(%ebp)
-; CHECK: movl $-1, -16(%ebp)
-; CHECK: leal -24(%ebp), %[[node:[^ ,]*]]
-; CHECK: movl $___ehhandler$use_CxxFrameHandler3, -20(%ebp)
-; CHECK: movl %fs:0, %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], -24(%ebp)
-; CHECK: movl %[[node]], %fs:0
-; CHECK: movl $0, -16(%ebp)
-; CHECK: calll _may_throw_or_crash
+; CHECK-NEXT: movl %esp, %ebp
+; CHECK-NEXT: pushl %ebx
+; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: subl ${{[0-9]+}}, %esp
+; CHECK-NEXT: movl %esp, -28(%ebp)
+; CHECK-NEXT: movl $-1, -16(%ebp)
+; CHECK-NEXT: leal -24(%ebp), %[[node:[^ ,]*]]
+; CHECK-NEXT: movl $___ehhandler$use_CxxFrameHandler3, -20(%ebp)
+; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]]
+; CHECK-NEXT: movl %[[next]], -24(%ebp)
+; CHECK-NEXT: movl %[[node]], %fs:0
+; CHECK-NEXT: movl $0, -16(%ebp)
+; CHECK-NEXT: calll _may_throw_or_crash
; CHECK: movl -24(%ebp), %[[next:[^ ,]*]]
-; CHECK: movl %[[next]], %fs:0
+; CHECK-NEXT: movl %[[next]], %fs:0
; CHECK: retl
; CHECK: .section .xdata,"dr"
-; CHECK: .p2align 2
+; CHECK-NEXT: .p2align 2
; CHECK-LABEL: L__ehtable$use_CxxFrameHandler3:
; CHECK-NEXT: .long 429065506
; CHECK-NEXT: .long 2
@@ -185,8 +212,8 @@ catch:
; CHECK-LABEL: ___ehhandler$use_CxxFrameHandler3:
; CHECK: movl $L__ehtable$use_CxxFrameHandler3, %eax
-; CHECK: jmp ___CxxFrameHandler3 # TAILCALL
+; CHECK-NEXT: jmp ___CxxFrameHandler3 # TAILCALL
; CHECK: .safeseh __except_handler3
-; CHECK: .safeseh __except_handler4
-; CHECK: .safeseh ___ehhandler$use_CxxFrameHandler3
+; CHECK-NEXT: .safeseh __except_handler4
+; CHECK-NEXT: .safeseh ___ehhandler$use_CxxFrameHandler3
diff --git a/test/CodeGen/X86/win64_eh_leaf2.ll b/test/CodeGen/X86/win64_eh_leaf2.ll
new file mode 100644
index 000000000000..a840d948518b
--- /dev/null
+++ b/test/CodeGen/X86/win64_eh_leaf2.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -O1 -mtriple=x86_64-pc-win32 | FileCheck %s
+
+; Neither of these functions need .seh_ directives. We used to crash.
+
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+declare i32 @__CxxFrameHandler3(...)
+
+define void @f1() uwtable nounwind personality i32 (...)* @__CxxFrameHandler3 {
+ ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK-NOT: .seh_
+
+define void @f2() uwtable {
+ ret void
+}
+
+; CHECK-LABEL: f2:
+; CHECK-NOT: .seh_
diff --git a/test/CodeGen/X86/x32-va_start.ll b/test/CodeGen/X86/x32-va_start.ll
index a48468880507..7202a3fb4cdc 100644
--- a/test/CodeGen/X86/x32-va_start.ll
+++ b/test/CodeGen/X86/x32-va_start.ll
@@ -24,7 +24,7 @@ define i32 @foo(float %a, i8* nocapture readnone %fmt, ...) nounwind {
entry:
%ap = alloca [1 x %struct.__va_list_tag], align 16
%0 = bitcast [1 x %struct.__va_list_tag]* %ap to i8*
- call void @llvm.lifetime.start(i64 16, i8* %0) #2
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %0) #2
call void @llvm.va_start(i8* %0)
; SSE: subl $72, %esp
; SSE: testb %al, %al
@@ -79,14 +79,14 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar
%vaarg.addr = bitcast i8* %vaarg.addr.in to i32*
%4 = load i32, i32* %vaarg.addr, align 4
call void @llvm.va_end(i8* %0)
- call void @llvm.lifetime.end(i64 16, i8* %0) #2
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %0) #2
ret i32 %4
; SSE: movl ([[ADDR]]), %eax
; SSE: retq
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
; Function Attrs: nounwind
declare void @llvm.va_start(i8*) nounwind
@@ -95,5 +95,5 @@ declare void @llvm.va_start(i8*) nounwind
declare void @llvm.va_end(i8*) nounwind
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/X86/x86-64-intrcc-nosse.ll b/test/CodeGen/X86/x86-64-intrcc-nosse.ll
new file mode 100644
index 000000000000..ab84088c3444
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-intrcc-nosse.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=-sse < %s | FileCheck %s
+
+%struct.interrupt_frame = type { i64, i64, i64, i64, i64 }
+
+@llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.interrupt_frame*, i64)* @test_isr_sse_clobbers to i8*)], section "llvm.metadata"
+
+; Clobbered SSE must not be saved when the target doesn't support SSE
+define x86_intrcc void @test_isr_sse_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) {
+ ; CHECK-LABEL: test_isr_sse_clobbers:
+ ; CHECK: # BB#0:
+ ; CHECK-NEXT: pushq %rax
+ ; CHECK-NEXT: cld
+ ; CHECK-NEXT: #APP
+ ; CHECK-NEXT: #NO_APP
+ ; CHECK-NEXT: addq $16, %rsp
+ ; CHECK-NEXT: iretq
+ call void asm sideeffect "", "~{xmm0},~{xmm6}"()
+ ret void
+}
diff --git a/test/CodeGen/X86/x86-64-intrcc.ll b/test/CodeGen/X86/x86-64-intrcc.ll
index 2bcf3cde478a..c8bc9e716ce5 100644
--- a/test/CodeGen/X86/x86-64-intrcc.ll
+++ b/test/CodeGen/X86/x86-64-intrcc.ll
@@ -30,22 +30,24 @@ define x86_intrcc void @test_isr_no_ecode(%struct.interrupt_frame* %frame) {
define x86_intrcc void @test_isr_ecode(%struct.interrupt_frame* %frame, i64 %ecode) {
; CHECK-LABEL: test_isr_ecode
; CHECK: pushq %rax
+ ; CHECK: pushq %rax
; CHECK: pushq %rcx
- ; CHECK: movq 16(%rsp), %rax
- ; CHECK: movq 40(%rsp), %rcx
+ ; CHECK: movq 24(%rsp), %rax
+ ; CHECK: movq 48(%rsp), %rcx
; CHECK: popq %rcx
; CHECK: popq %rax
- ; CHECK: addq $8, %rsp
+ ; CHECK: addq $16, %rsp
; CHECK: iretq
; CHECK0-LABEL: test_isr_ecode
; CHECK0: pushq %rax
+ ; CHECK0: pushq %rax
; CHECK0: pushq %rcx
- ; CHECK0: movq 16(%rsp), %rax
- ; CHECK0: leaq 24(%rsp), %rcx
+ ; CHECK0: movq 24(%rsp), %rax
+ ; CHECK0: leaq 32(%rsp), %rcx
; CHECK0: movq 16(%rcx), %rcx
; CHECK0: popq %rcx
; CHECK0: popq %rax
- ; CHECK0: addq $8, %rsp
+ ; CHECK0: addq $16, %rsp
; CHECK0: iretq
%pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2
%flags = load i64, i64* %pflags, align 4
@@ -58,6 +60,7 @@ define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i64 %
call void asm sideeffect "", "~{rax},~{rbx},~{rbp},~{r11},~{xmm0}"()
; CHECK-LABEL: test_isr_clobbers
; CHECK-SSE-NEXT: pushq %rax
+ ; CHECK-SSE-NEXT: pushq %rax
; CHECK-SSE-NEXT; pushq %r11
; CHECK-SSE-NEXT: pushq %rbp
; CHECK-SSE-NEXT: pushq %rbx
@@ -80,7 +83,7 @@ define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i64 %
; CHECK0-SSE-NEXT: popq %rbp
; CHECK0-SSE-NEXT: popq %r11
; CHECK0-SSE-NEXT: popq %rax
- ; CHECK0-SSE-NEXT: addq $8, %rsp
+ ; CHECK0-SSE-NEXT: addq $16, %rsp
; CHECK0-SSE-NEXT: iretq
ret void
}
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 1fc1b43b0402..6fbec91e77a3 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -53,17 +53,29 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
}
define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
-; AVX-LABEL: load_factorf64_1:
-; AVX: # BB#0:
-; AVX-NEXT: vmovupd (%rdi), %ymm0
-; AVX-NEXT: vmovupd 32(%rdi), %ymm1
-; AVX-NEXT: vmovupd 64(%rdi), %ymm2
-; AVX-NEXT: vmovupd 96(%rdi), %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT: vmulpd %ymm0, %ymm0, %ymm0
-; AVX-NEXT: retq
+; AVX1-LABEL: load_factorf64_1:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovups (%rdi), %ymm0
+; AVX1-NEXT: vmovups 32(%rdi), %ymm1
+; AVX1-NEXT: vmovups 64(%rdi), %ymm2
+; AVX1-NEXT: vmovups 96(%rdi), %ymm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT: vmulpd %ymm0, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_factorf64_1:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovupd (%rdi), %ymm0
+; AVX2-NEXT: vmovupd 32(%rdi), %ymm1
+; AVX2-NEXT: vmovupd 64(%rdi), %ymm2
+; AVX2-NEXT: vmovupd 96(%rdi), %ymm3
+; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT: vmulpd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: retq
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
%strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
diff --git a/test/CodeGen/X86/x86-sanitizer-shrink-wrapping.ll b/test/CodeGen/X86/x86-sanitizer-shrink-wrapping.ll
index 4cb11bf3f5cd..db3bed6e60ff 100644
--- a/test/CodeGen/X86/x86-sanitizer-shrink-wrapping.ll
+++ b/test/CodeGen/X86/x86-sanitizer-shrink-wrapping.ll
@@ -12,7 +12,7 @@ target triple = "x86_64-apple-macosx"
; CHECK: popq
; CHECK-NEXT: retq
; CHECK: movl $40, %edi
-; CHECK-NEXT callq ___asan_report_load4
+; CHECK-NEXT: callq ___asan_report_load4
define void @sanitize() #0 {
entry:
%tmp = load i8, i8* inttoptr (i64 17592186044421 to i8*)
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
index 57e6b4a50429..25fd21d80c60 100644
--- a/test/CodeGen/X86/xaluo.ll
+++ b/test/CodeGen/X86/xaluo.ll
@@ -1,16 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SDAG
-; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=FAST
; RUN: llc -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=KNL
+
;
; Get the actual value of the overflow bit.
;
; SADDO reg, reg
-define zeroext i1 @saddo.i8(i8 signext %v1, i8 signext %v2, i8* %res) {
-entry:
-; CHECK-LABEL: saddo.i8
-; CHECK: addb %sil, %dil
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoi8(i8 signext %v1, i8 signext %v2, i8* %res) {
+; SDAG-LABEL: saddoi8:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addb %sil, %dil
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movb %dil, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi8:
+; FAST: ## BB#0:
+; FAST-NEXT: addb %sil, %dil
+; FAST-NEXT: seto %al
+; FAST-NEXT: movb %dil, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi8:
+; KNL: ## BB#0:
+; KNL-NEXT: addb %sil, %dil
+; KNL-NEXT: seto %al
+; KNL-NEXT: movb %dil, (%rdx)
+; KNL-NEXT: retq
%t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2)
%val = extractvalue {i8, i1} %t, 0
%obit = extractvalue {i8, i1} %t, 1
@@ -18,11 +37,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.i16(i16 %v1, i16 %v2, i16* %res) {
-entry:
-; CHECK-LABEL: saddo.i16
-; CHECK: addw %si, %di
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoi16(i16 %v1, i16 %v2, i16* %res) {
+; SDAG-LABEL: saddoi16:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addw %si, %di
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movw %di, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi16:
+; FAST: ## BB#0:
+; FAST-NEXT: addw %si, %di
+; FAST-NEXT: seto %al
+; FAST-NEXT: movw %di, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi16:
+; KNL: ## BB#0:
+; KNL-NEXT: addw %si, %di
+; KNL-NEXT: seto %al
+; KNL-NEXT: movw %di, (%rdx)
+; KNL-NEXT: retq
%t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2)
%val = extractvalue {i16, i1} %t, 0
%obit = extractvalue {i16, i1} %t, 1
@@ -30,11 +67,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
-entry:
-; CHECK-LABEL: saddo.i32
-; CHECK: addl %esi, %edi
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoi32(i32 %v1, i32 %v2, i32* %res) {
+; SDAG-LABEL: saddoi32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addl %esi, %edi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movl %edi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi32:
+; FAST: ## BB#0:
+; FAST-NEXT: addl %esi, %edi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movl %edi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi32:
+; KNL: ## BB#0:
+; KNL-NEXT: addl %esi, %edi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movl %edi, (%rdx)
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -42,11 +97,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
-entry:
-; CHECK-LABEL: saddo.i64
-; CHECK: addq %rsi, %rdi
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoi64(i64 %v1, i64 %v2, i64* %res) {
+; SDAG-LABEL: saddoi64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq %rsi, %rdi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rdi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi64:
+; FAST: ## BB#0:
+; FAST-NEXT: addq %rsi, %rdi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi64:
+; KNL: ## BB#0:
+; KNL-NEXT: addq %rsi, %rdi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rdi, (%rdx)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -55,11 +128,29 @@ entry:
}
; SADDO reg, 1 | INC
-define zeroext i1 @saddo.inc.i8(i8 %v1, i8* %res) {
-entry:
-; CHECK-LABEL: saddo.inc.i8
-; CHECK: incb %dil
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoinci8(i8 %v1, i8* %res) {
+; SDAG-LABEL: saddoinci8:
+; SDAG: ## BB#0:
+; SDAG-NEXT: incb %dil
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movb %dil, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoinci8:
+; FAST: ## BB#0:
+; FAST-NEXT: incb %dil
+; FAST-NEXT: seto %al
+; FAST-NEXT: movb %dil, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoinci8:
+; KNL: ## BB#0:
+; KNL-NEXT: incb %dil
+; KNL-NEXT: seto %al
+; KNL-NEXT: movb %dil, (%rsi)
+; KNL-NEXT: retq
%t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 1)
%val = extractvalue {i8, i1} %t, 0
%obit = extractvalue {i8, i1} %t, 1
@@ -67,11 +158,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.inc.i16(i16 %v1, i16* %res) {
-entry:
-; CHECK-LABEL: saddo.inc.i16
-; CHECK: incw %di
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoinci16(i16 %v1, i16* %res) {
+; SDAG-LABEL: saddoinci16:
+; SDAG: ## BB#0:
+; SDAG-NEXT: incw %di
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movw %di, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoinci16:
+; FAST: ## BB#0:
+; FAST-NEXT: incw %di
+; FAST-NEXT: seto %al
+; FAST-NEXT: movw %di, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoinci16:
+; KNL: ## BB#0:
+; KNL-NEXT: incw %di
+; KNL-NEXT: seto %al
+; KNL-NEXT: movw %di, (%rsi)
+; KNL-NEXT: retq
%t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 1)
%val = extractvalue {i16, i1} %t, 0
%obit = extractvalue {i16, i1} %t, 1
@@ -79,11 +188,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.inc.i32(i32 %v1, i32* %res) {
-entry:
-; CHECK-LABEL: saddo.inc.i32
-; CHECK: incl %edi
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoinci32(i32 %v1, i32* %res) {
+; SDAG-LABEL: saddoinci32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: incl %edi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movl %edi, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoinci32:
+; FAST: ## BB#0:
+; FAST-NEXT: incl %edi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movl %edi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoinci32:
+; KNL: ## BB#0:
+; KNL-NEXT: incl %edi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movl %edi, (%rsi)
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 1)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -91,11 +218,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.inc.i64(i64 %v1, i64* %res) {
-entry:
-; CHECK-LABEL: saddo.inc.i64
-; CHECK: incq %rdi
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoinci64(i64 %v1, i64* %res) {
+; SDAG-LABEL: saddoinci64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: incq %rdi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rdi, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoinci64:
+; FAST: ## BB#0:
+; FAST-NEXT: incq %rdi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoinci64:
+; KNL: ## BB#0:
+; KNL-NEXT: incq %rdi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rdi, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -105,15 +250,31 @@ entry:
; SADDO reg, imm | imm, reg
; FIXME: DAG doesn't optimize immediates on the LHS.
-define zeroext i1 @saddo.i64imm1(i64 %v1, i64* %res) {
-entry:
-; SDAG-LABEL: saddo.i64imm1
-; SDAG: mov
-; SDAG-NEXT: addq
-; SDAG-NEXT: seto
-; FAST-LABEL: saddo.i64imm1
-; FAST: addq $2, %rdi
-; FAST-NEXT: seto %al
+define zeroext i1 @saddoi64imm1(i64 %v1, i64* %res) {
+; SDAG-LABEL: saddoi64imm1:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl $2, %ecx
+; SDAG-NEXT: addq %rdi, %rcx
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rcx, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi64imm1:
+; FAST: ## BB#0:
+; FAST-NEXT: addq $2, %rdi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi64imm1:
+; KNL: ## BB#0:
+; KNL-NEXT: movl $2, %ecx
+; KNL-NEXT: addq %rdi, %rcx
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rcx, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 2, i64 %v1)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -122,11 +283,29 @@ entry:
}
; Check boundary conditions for large immediates.
-define zeroext i1 @saddo.i64imm2(i64 %v1, i64* %res) {
-entry:
-; CHECK-LABEL: saddo.i64imm2
-; CHECK: addq $-2147483648, %rdi
-; CHECK-NEXT: seto %al
+define zeroext i1 @saddoi64imm2(i64 %v1, i64* %res) {
+; SDAG-LABEL: saddoi64imm2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq $-2147483648, %rdi ## imm = 0x80000000
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rdi, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi64imm2:
+; FAST: ## BB#0:
+; FAST-NEXT: addq $-2147483648, %rdi ## imm = 0x80000000
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi64imm2:
+; KNL: ## BB#0:
+; KNL-NEXT: addq $-2147483648, %rdi ## imm = 0x80000000
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rdi, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -2147483648)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -134,12 +313,32 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.i64imm3(i64 %v1, i64* %res) {
-entry:
-; CHECK-LABEL: saddo.i64imm3
-; CHECK: movabsq $-21474836489, %[[REG:[a-z]+]]
-; CHECK-NEXT: addq %rdi, %[[REG]]
-; CHECK-NEXT: seto
+define zeroext i1 @saddoi64imm3(i64 %v1, i64* %res) {
+; SDAG-LABEL: saddoi64imm3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movabsq $-21474836489, %rcx ## imm = 0xFFFFFFFAFFFFFFF7
+; SDAG-NEXT: addq %rdi, %rcx
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rcx, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi64imm3:
+; FAST: ## BB#0:
+; FAST-NEXT: movabsq $-21474836489, %rax ## imm = 0xFFFFFFFAFFFFFFF7
+; FAST-NEXT: addq %rdi, %rax
+; FAST-NEXT: seto %cl
+; FAST-NEXT: movq %rax, (%rsi)
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi64imm3:
+; KNL: ## BB#0:
+; KNL-NEXT: movabsq $-21474836489, %rcx ## imm = 0xFFFFFFFAFFFFFFF7
+; KNL-NEXT: addq %rdi, %rcx
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rcx, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -21474836489)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -147,11 +346,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.i64imm4(i64 %v1, i64* %res) {
-entry:
-; CHECK-LABEL: saddo.i64imm4
-; CHECK: addq $2147483647, %rdi
-; CHECK-NEXT: seto
+define zeroext i1 @saddoi64imm4(i64 %v1, i64* %res) {
+; SDAG-LABEL: saddoi64imm4:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq $2147483647, %rdi ## imm = 0x7FFFFFFF
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rdi, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi64imm4:
+; FAST: ## BB#0:
+; FAST-NEXT: addq $2147483647, %rdi ## imm = 0x7FFFFFFF
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi64imm4:
+; KNL: ## BB#0:
+; KNL-NEXT: addq $2147483647, %rdi ## imm = 0x7FFFFFFF
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rdi, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483647)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -159,12 +376,32 @@ entry:
ret i1 %obit
}
-define zeroext i1 @saddo.i64imm5(i64 %v1, i64* %res) {
-entry:
-; CHECK-LABEL: saddo.i64imm5
-; CHECK: movl $2147483648
-; CHECK: addq %rdi
-; CHECK-NEXT: seto
+define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
+; SDAG-LABEL: saddoi64imm5:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl $2147483648, %ecx ## imm = 0x80000000
+; SDAG-NEXT: addq %rdi, %rcx
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rcx, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoi64imm5:
+; FAST: ## BB#0:
+; FAST-NEXT: movl $2147483648, %eax ## imm = 0x80000000
+; FAST-NEXT: addq %rdi, %rax
+; FAST-NEXT: seto %cl
+; FAST-NEXT: movq %rax, (%rsi)
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoi64imm5:
+; KNL: ## BB#0:
+; KNL-NEXT: movl $2147483648, %ecx ## imm = 0x80000000
+; KNL-NEXT: addq %rdi, %rcx
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rcx, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483648)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -173,11 +410,29 @@ entry:
}
; UADDO
-define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
-entry:
-; CHECK-LABEL: uaddo.i32
-; CHECK: addl %esi, %edi
-; CHECK-NEXT: setb %al
+define zeroext i1 @uaddoi32(i32 %v1, i32 %v2, i32* %res) {
+; SDAG-LABEL: uaddoi32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addl %esi, %edi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movl %edi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoi32:
+; FAST: ## BB#0:
+; FAST-NEXT: addl %esi, %edi
+; FAST-NEXT: setb %al
+; FAST-NEXT: movl %edi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoi32:
+; KNL: ## BB#0:
+; KNL-NEXT: addl %esi, %edi
+; KNL-NEXT: setb %al
+; KNL-NEXT: movl %edi, (%rdx)
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -185,11 +440,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
-entry:
-; CHECK-LABEL: uaddo.i64
-; CHECK: addq %rsi, %rdi
-; CHECK-NEXT: setb %al
+define zeroext i1 @uaddoi64(i64 %v1, i64 %v2, i64* %res) {
+; SDAG-LABEL: uaddoi64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq %rsi, %rdi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movq %rdi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoi64:
+; FAST: ## BB#0:
+; FAST-NEXT: addq %rsi, %rdi
+; FAST-NEXT: setb %al
+; FAST-NEXT: movq %rdi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoi64:
+; KNL: ## BB#0:
+; KNL-NEXT: addq %rsi, %rdi
+; KNL-NEXT: setb %al
+; KNL-NEXT: movq %rdi, (%rdx)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -198,10 +471,29 @@ entry:
}
; UADDO reg, 1 | NOT INC
-define zeroext i1 @uaddo.inc.i8(i8 %v1, i8* %res) {
-entry:
-; CHECK-LABEL: uaddo.inc.i8
-; CHECK-NOT: incb %dil
+define zeroext i1 @uaddoinci8(i8 %v1, i8* %res) {
+; SDAG-LABEL: uaddoinci8:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addb $1, %dil
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movb %dil, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoinci8:
+; FAST: ## BB#0:
+; FAST-NEXT: addb $1, %dil
+; FAST-NEXT: setb %al
+; FAST-NEXT: movb %dil, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoinci8:
+; KNL: ## BB#0:
+; KNL-NEXT: addb $1, %dil
+; KNL-NEXT: setb %al
+; KNL-NEXT: movb %dil, (%rsi)
+; KNL-NEXT: retq
%t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v1, i8 1)
%val = extractvalue {i8, i1} %t, 0
%obit = extractvalue {i8, i1} %t, 1
@@ -209,10 +501,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @uaddo.inc.i16(i16 %v1, i16* %res) {
-entry:
-; CHECK-LABEL: uaddo.inc.i16
-; CHECK-NOT: incw %di
+define zeroext i1 @uaddoinci16(i16 %v1, i16* %res) {
+; SDAG-LABEL: uaddoinci16:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addw $1, %di
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movw %di, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoinci16:
+; FAST: ## BB#0:
+; FAST-NEXT: addw $1, %di
+; FAST-NEXT: setb %al
+; FAST-NEXT: movw %di, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoinci16:
+; KNL: ## BB#0:
+; KNL-NEXT: addw $1, %di
+; KNL-NEXT: setb %al
+; KNL-NEXT: movw %di, (%rsi)
+; KNL-NEXT: retq
%t = call {i16, i1} @llvm.uadd.with.overflow.i16(i16 %v1, i16 1)
%val = extractvalue {i16, i1} %t, 0
%obit = extractvalue {i16, i1} %t, 1
@@ -220,10 +531,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @uaddo.inc.i32(i32 %v1, i32* %res) {
-entry:
-; CHECK-LABEL: uaddo.inc.i32
-; CHECK-NOT: incl %edi
+define zeroext i1 @uaddoinci32(i32 %v1, i32* %res) {
+; SDAG-LABEL: uaddoinci32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addl $1, %edi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movl %edi, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoinci32:
+; FAST: ## BB#0:
+; FAST-NEXT: addl $1, %edi
+; FAST-NEXT: setb %al
+; FAST-NEXT: movl %edi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoinci32:
+; KNL: ## BB#0:
+; KNL-NEXT: addl $1, %edi
+; KNL-NEXT: setb %al
+; KNL-NEXT: movl %edi, (%rsi)
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -231,10 +561,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @uaddo.inc.i64(i64 %v1, i64* %res) {
-entry:
-; CHECK-LABEL: uaddo.inc.i64
-; CHECK-NOT: incq %rdi
+define zeroext i1 @uaddoinci64(i64 %v1, i64* %res) {
+; SDAG-LABEL: uaddoinci64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq $1, %rdi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movq %rdi, (%rsi)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoinci64:
+; FAST: ## BB#0:
+; FAST-NEXT: addq $1, %rdi
+; FAST-NEXT: setb %al
+; FAST-NEXT: movq %rdi, (%rsi)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoinci64:
+; KNL: ## BB#0:
+; KNL-NEXT: addq $1, %rdi
+; KNL-NEXT: setb %al
+; KNL-NEXT: movq %rdi, (%rsi)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 1)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -243,11 +592,29 @@ entry:
}
; SSUBO
-define zeroext i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
-entry:
-; CHECK-LABEL: ssubo.i32
-; CHECK: subl %esi, %edi
-; CHECK-NEXT: seto %al
+define zeroext i1 @ssuboi32(i32 %v1, i32 %v2, i32* %res) {
+; SDAG-LABEL: ssuboi32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: subl %esi, %edi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movl %edi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: ssuboi32:
+; FAST: ## BB#0:
+; FAST-NEXT: subl %esi, %edi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movl %edi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: ssuboi32:
+; KNL: ## BB#0:
+; KNL-NEXT: subl %esi, %edi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movl %edi, (%rdx)
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -255,11 +622,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
-entry:
-; CHECK-LABEL: ssubo.i64
-; CHECK: subq %rsi, %rdi
-; CHECK-NEXT: seto %al
+define zeroext i1 @ssuboi64(i64 %v1, i64 %v2, i64* %res) {
+; SDAG-LABEL: ssuboi64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: subq %rsi, %rdi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rdi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: ssuboi64:
+; FAST: ## BB#0:
+; FAST-NEXT: subq %rsi, %rdi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: ssuboi64:
+; KNL: ## BB#0:
+; KNL-NEXT: subq %rsi, %rdi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rdi, (%rdx)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -268,11 +653,29 @@ entry:
}
; USUBO
-define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
-entry:
-; CHECK-LABEL: usubo.i32
-; CHECK: subl %esi, %edi
-; CHECK-NEXT: setb %al
+define zeroext i1 @usuboi32(i32 %v1, i32 %v2, i32* %res) {
+; SDAG-LABEL: usuboi32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: subl %esi, %edi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movl %edi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usuboi32:
+; FAST: ## BB#0:
+; FAST-NEXT: subl %esi, %edi
+; FAST-NEXT: setb %al
+; FAST-NEXT: movl %edi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: usuboi32:
+; KNL: ## BB#0:
+; KNL-NEXT: subl %esi, %edi
+; KNL-NEXT: setb %al
+; KNL-NEXT: movl %edi, (%rdx)
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -280,11 +683,29 @@ entry:
ret i1 %obit
}
-define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
-entry:
-; CHECK-LABEL: usubo.i64
-; CHECK: subq %rsi, %rdi
-; CHECK-NEXT: setb %al
+define zeroext i1 @usuboi64(i64 %v1, i64 %v2, i64* %res) {
+; SDAG-LABEL: usuboi64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: subq %rsi, %rdi
+; SDAG-NEXT: setb %al
+; SDAG-NEXT: movq %rdi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usuboi64:
+; FAST: ## BB#0:
+; FAST-NEXT: subq %rsi, %rdi
+; FAST-NEXT: setb %al
+; FAST-NEXT: movq %rdi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: usuboi64:
+; KNL: ## BB#0:
+; KNL-NEXT: subq %rsi, %rdi
+; KNL-NEXT: setb %al
+; KNL-NEXT: movq %rdi, (%rdx)
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -292,250 +713,277 @@ entry:
ret i1 %obit
}
-; SMULO
-define zeroext i1 @smulo.i8(i8 %v1, i8 %v2, i8* %res) {
-entry:
-; CHECK-LABEL: smulo.i8
-; CHECK: movl %edi, %eax
-; CHECK-NEXT: imulb %sil
-; CHECK-NEXT: seto %cl
- %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
- %val = extractvalue {i8, i1} %t, 0
- %obit = extractvalue {i8, i1} %t, 1
- store i8 %val, i8* %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo.i16(i16 %v1, i16 %v2, i16* %res) {
-entry:
-; CHECK-LABEL: smulo.i16
-; CHECK: imulw %si, %di
-; CHECK-NEXT: seto %al
- %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
- %val = extractvalue {i16, i1} %t, 0
- %obit = extractvalue {i16, i1} %t, 1
- store i16 %val, i16* %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
-entry:
-; CHECK-LABEL: smulo.i32
-; CHECK: imull %esi, %edi
-; CHECK-NEXT: seto %al
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, i32* %res
- ret i1 %obit
-}
-
-define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
-entry:
-; CHECK-LABEL: smulo.i64
-; CHECK: imulq %rsi, %rdi
-; CHECK-NEXT: seto %al
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, i64* %res
- ret i1 %obit
-}
-
-; UMULO
-define zeroext i1 @umulo.i8(i8 %v1, i8 %v2, i8* %res) {
-entry:
-; CHECK-LABEL: umulo.i8
-; CHECK: movl %edi, %eax
-; CHECK-NEXT: mulb %sil
-; CHECK-NEXT: seto %cl
- %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
- %val = extractvalue {i8, i1} %t, 0
- %obit = extractvalue {i8, i1} %t, 1
- store i8 %val, i8* %res
- ret i1 %obit
-}
-
-define zeroext i1 @umulo.i16(i16 %v1, i16 %v2, i16* %res) {
-entry:
-; CHECK-LABEL: umulo.i16
-; CHECK: mulw %si
-; CHECK-NEXT: seto
- %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
- %val = extractvalue {i16, i1} %t, 0
- %obit = extractvalue {i16, i1} %t, 1
- store i16 %val, i16* %res
- ret i1 %obit
-}
-
-define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
-entry:
-; CHECK-LABEL: umulo.i32
-; CHECK: mull %esi
-; CHECK-NEXT: seto
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- store i32 %val, i32* %res
- ret i1 %obit
-}
-
-define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
-entry:
-; CHECK-LABEL: umulo.i64
-; CHECK: mulq %rsi
-; CHECK-NEXT: seto
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- store i64 %val, i64* %res
- ret i1 %obit
-}
-
;
; Check the use of the overflow bit in combination with a select instruction.
;
-define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: saddo.select.i32
-; CHECK: addl %esi, %eax
-; CHECK-NEXT: cmovol %edi, %esi
+define i32 @saddoselecti32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: saddoselecti32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: addl %esi, %eax
+; SDAG-NEXT: cmovol %edi, %esi
+; SDAG-NEXT: movl %esi, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoselecti32:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: addl %esi, %eax
+; FAST-NEXT: cmovol %edi, %esi
+; FAST-NEXT: movl %esi, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoselecti32:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: addl %esi, %eax
+; KNL-NEXT: cmovol %edi, %esi
+; KNL-NEXT: movl %esi, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
-define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: saddo.select.i64
-; CHECK: addq %rsi, %rax
-; CHECK-NEXT: cmovoq %rdi, %rsi
+define i64 @saddoselecti64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: saddoselecti64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: addq %rsi, %rax
+; SDAG-NEXT: cmovoq %rdi, %rsi
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddoselecti64:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: addq %rsi, %rax
+; FAST-NEXT: cmovoq %rdi, %rsi
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddoselecti64:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: addq %rsi, %rax
+; KNL-NEXT: cmovoq %rdi, %rsi
+; KNL-NEXT: movq %rsi, %rax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
-define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: uaddo.select.i32
-; CHECK: addl %esi, %eax
-; CHECK-NEXT: cmovbl %edi, %esi
+define i32 @uaddoselecti32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: uaddoselecti32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: addl %esi, %eax
+; SDAG-NEXT: cmovbl %edi, %esi
+; SDAG-NEXT: movl %esi, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoselecti32:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: addl %esi, %eax
+; FAST-NEXT: cmovbl %edi, %esi
+; FAST-NEXT: movl %esi, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoselecti32:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: addl %esi, %eax
+; KNL-NEXT: cmovbl %edi, %esi
+; KNL-NEXT: movl %esi, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
-define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: uaddo.select.i64
-; CHECK: addq %rsi, %rax
-; CHECK-NEXT: cmovbq %rdi, %rsi
+define i64 @uaddoselecti64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: uaddoselecti64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: addq %rsi, %rax
+; SDAG-NEXT: cmovbq %rdi, %rsi
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoselecti64:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: addq %rsi, %rax
+; FAST-NEXT: cmovbq %rdi, %rsi
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoselecti64:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: addq %rsi, %rax
+; KNL-NEXT: cmovbq %rdi, %rsi
+; KNL-NEXT: movq %rsi, %rax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
-define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: ssubo.select.i32
-; CHECK: cmpl %esi, %edi
-; CHECK-NEXT: cmovol %edi, %esi
+define i32 @ssuboselecti32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: ssuboselecti32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: cmovol %edi, %esi
+; SDAG-NEXT: movl %esi, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: ssuboselecti32:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: cmovol %edi, %esi
+; FAST-NEXT: movl %esi, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: ssuboselecti32:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: cmovol %edi, %esi
+; KNL-NEXT: movl %esi, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
-define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: ssubo.select.i64
-; CHECK: cmpq %rsi, %rdi
-; CHECK-NEXT: cmovoq %rdi, %rsi
+define i64 @ssuboselecti64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: ssuboselecti64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpq %rsi, %rdi
+; SDAG-NEXT: cmovoq %rdi, %rsi
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: ssuboselecti64:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpq %rsi, %rdi
+; FAST-NEXT: cmovoq %rdi, %rsi
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: ssuboselecti64:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpq %rsi, %rdi
+; KNL-NEXT: cmovoq %rdi, %rsi
+; KNL-NEXT: movq %rsi, %rax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
-define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: usubo.select.i32
-; CHECK: cmpl %esi, %edi
-; CHECK-NEXT: cmovbl %edi, %esi
+define i32 @usuboselecti32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: usuboselecti32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: cmovbl %edi, %esi
+; SDAG-NEXT: movl %esi, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usuboselecti32:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: cmovbl %edi, %esi
+; FAST-NEXT: movl %esi, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: usuboselecti32:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: cmovbl %edi, %esi
+; KNL-NEXT: movl %esi, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%obit = extractvalue {i32, i1} %t, 1
%ret = select i1 %obit, i32 %v1, i32 %v2
ret i32 %ret
}
-define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: usubo.select.i64
-; CHECK: cmpq %rsi, %rdi
-; CHECK-NEXT: cmovbq %rdi, %rsi
+define i64 @usuboselecti64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: usuboselecti64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpq %rsi, %rdi
+; SDAG-NEXT: cmovbq %rdi, %rsi
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usuboselecti64:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpq %rsi, %rdi
+; FAST-NEXT: cmovbq %rdi, %rsi
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: usuboselecti64:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpq %rsi, %rdi
+; KNL-NEXT: cmovbq %rdi, %rsi
+; KNL-NEXT: movq %rsi, %rax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%obit = extractvalue {i64, i1} %t, 1
%ret = select i1 %obit, i64 %v1, i64 %v2
ret i64 %ret
}
-define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: smulo.select.i32
-; CHECK: imull %esi, %eax
-; CHECK-NEXT: cmovol %edi, %esi
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: smulo.select.i64
-; CHECK: imulq %rsi, %rax
-; CHECK-NEXT: cmovoq %rdi, %rsi
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: umulo.select.i32
-; CHECK: mull %esi
-; CHECK-NEXT: cmovol %edi, %esi
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %obit = extractvalue {i32, i1} %t, 1
- %ret = select i1 %obit, i32 %v1, i32 %v2
- ret i32 %ret
-}
-
-define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: umulo.select.i64
-; CHECK: mulq %rsi
-; CHECK-NEXT: cmovoq %rdi, %rsi
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %obit = extractvalue {i64, i1} %t, 1
- %ret = select i1 %obit, i64 %v1, i64 %v2
- ret i64 %ret
-}
-
-
;
; Check the use of the overflow bit in combination with a branch instruction.
;
-define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: saddo.br.i32
-; CHECK: addl %esi, %edi
-; CHECK-NEXT: jo
+define zeroext i1 @saddobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: saddobri32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addl %esi, %edi
+; SDAG-NEXT: jo LBB31_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB31_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddobri32:
+; FAST: ## BB#0:
+; FAST-NEXT: addl %esi, %edi
+; FAST-NEXT: jo LBB31_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB31_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddobri32:
+; KNL: ## BB#0:
+; KNL-NEXT: addl %esi, %edi
+; KNL-NEXT: jo LBB31_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB31_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -548,11 +996,43 @@ continue:
ret i1 true
}
-define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: saddo.br.i64
-; CHECK: addq %rsi, %rdi
-; CHECK-NEXT: jo
+define zeroext i1 @saddobri64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: saddobri64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq %rsi, %rdi
+; SDAG-NEXT: jo LBB32_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB32_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: saddobri64:
+; FAST: ## BB#0:
+; FAST-NEXT: addq %rsi, %rdi
+; FAST-NEXT: jo LBB32_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB32_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: saddobri64:
+; KNL: ## BB#0:
+; KNL-NEXT: addq %rsi, %rdi
+; KNL-NEXT: jo LBB32_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB32_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -565,11 +1045,43 @@ continue:
ret i1 true
}
-define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: uaddo.br.i32
-; CHECK: addl %esi, %edi
-; CHECK-NEXT: jb
+define zeroext i1 @uaddobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: uaddobri32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addl %esi, %edi
+; SDAG-NEXT: jb LBB33_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB33_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddobri32:
+; FAST: ## BB#0:
+; FAST-NEXT: addl %esi, %edi
+; FAST-NEXT: jb LBB33_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB33_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddobri32:
+; KNL: ## BB#0:
+; KNL-NEXT: addl %esi, %edi
+; KNL-NEXT: jb LBB33_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB33_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -582,11 +1094,43 @@ continue:
ret i1 true
}
-define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: uaddo.br.i64
-; CHECK: addq %rsi, %rdi
-; CHECK-NEXT: jb
+define zeroext i1 @uaddobri64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: uaddobri64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: addq %rsi, %rdi
+; SDAG-NEXT: jb LBB34_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB34_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddobri64:
+; FAST: ## BB#0:
+; FAST-NEXT: addq %rsi, %rdi
+; FAST-NEXT: jb LBB34_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB34_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddobri64:
+; KNL: ## BB#0:
+; KNL-NEXT: addq %rsi, %rdi
+; KNL-NEXT: jb LBB34_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB34_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -599,11 +1143,43 @@ continue:
ret i1 true
}
-define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: ssubo.br.i32
-; CHECK: cmpl %esi, %edi
-; CHECK-NEXT: jo
+define zeroext i1 @ssubobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: ssubobri32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: jo LBB35_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB35_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: ssubobri32:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: jo LBB35_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB35_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: ssubobri32:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: jo LBB35_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB35_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -616,11 +1192,43 @@ continue:
ret i1 true
}
-define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: ssubo.br.i64
-; CHECK: cmpq %rsi, %rdi
-; CHECK-NEXT: jo
+define zeroext i1 @ssubobri64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: ssubobri64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpq %rsi, %rdi
+; SDAG-NEXT: jo LBB36_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB36_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: ssubobri64:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpq %rsi, %rdi
+; FAST-NEXT: jo LBB36_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB36_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: ssubobri64:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpq %rsi, %rdi
+; KNL-NEXT: jo LBB36_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB36_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -633,11 +1241,43 @@ continue:
ret i1 true
}
-define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: usubo.br.i32
-; CHECK: cmpl %esi, %edi
-; CHECK-NEXT: jb
+define zeroext i1 @usubobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: usubobri32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpl %esi, %edi
+; SDAG-NEXT: jb LBB37_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB37_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usubobri32:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpl %esi, %edi
+; FAST-NEXT: jb LBB37_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB37_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: usubobri32:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpl %esi, %edi
+; KNL-NEXT: jb LBB37_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB37_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
%val = extractvalue {i32, i1} %t, 0
%obit = extractvalue {i32, i1} %t, 1
@@ -650,11 +1290,43 @@ continue:
ret i1 true
}
-define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: usubo.br.i64
-; CHECK: cmpq %rsi, %rdi
-; CHECK-NEXT: jb
+define zeroext i1 @usubobri64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: usubobri64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: cmpq %rsi, %rdi
+; SDAG-NEXT: jb LBB38_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB38_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usubobri64:
+; FAST: ## BB#0:
+; FAST-NEXT: cmpq %rsi, %rdi
+; FAST-NEXT: jb LBB38_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB38_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: usubobri64:
+; KNL: ## BB#0:
+; KNL-NEXT: cmpq %rsi, %rdi
+; KNL-NEXT: jb LBB38_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB38_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
@@ -667,102 +1339,70 @@ continue:
ret i1 true
}
-define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: smulo.br.i32
-; CHECK: imull %esi, %edi
-; CHECK-NEXT: jo
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue, !prof !0
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: smulo.br.i64
-; CHECK: imulq %rsi, %rdi
-; CHECK-NEXT: jo
- %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue, !prof !0
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
-entry:
-; CHECK-LABEL: umulo.br.i32
-; CHECK: mull %esi
-; CHECK-NEXT: jo
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %val = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %continue, !prof !0
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
-}
-
-define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
-entry:
-; CHECK-LABEL: umulo.br.i64
-; CHECK: mulq %rsi
-; CHECK-NEXT: jo
- %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
- %val = extractvalue {i64, i1} %t, 0
- %obit = extractvalue {i64, i1} %t, 1
- br i1 %obit, label %overflow, label %continue, !prof !0
-
-overflow:
- ret i1 false
-
-continue:
- ret i1 true
+define {i64, i1} @uaddoovf(i64 %a, i64 %b) {
+; SDAG-LABEL: uaddoovf:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movzbl %dil, %ecx
+; SDAG-NEXT: movzbl %sil, %eax
+; SDAG-NEXT: addq %rcx, %rax
+; SDAG-NEXT: xorl %edx, %edx
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: uaddoovf:
+; FAST: ## BB#0:
+; FAST-NEXT: movzbl %dil, %ecx
+; FAST-NEXT: movzbl %sil, %eax
+; FAST-NEXT: addq %rcx, %rax
+; FAST-NEXT: xorl %edx, %edx
+; FAST-NEXT: retq
+;
+; KNL-LABEL: uaddoovf:
+; KNL: ## BB#0:
+; KNL-NEXT: movzbl %dil, %ecx
+; KNL-NEXT: movzbl %sil, %eax
+; KNL-NEXT: addq %rcx, %rax
+; KNL-NEXT: xorl %edx, %edx
+; KNL-NEXT: retq
+ %1 = and i64 %a, 255
+ %2 = and i64 %b, 255
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %1, i64 %2)
+ ret {i64, i1} %t
}
-define i1 @bug27873(i64 %c1, i1 %c2) {
-; CHECK-LABEL: bug27873:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl $160, %ecx
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: mulq %rcx
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: orb %sil, %al
-; CHECK-NEXT: retq
+define {i64, i1} @usuboovf(i64 %a, i64 %b) {
+; SDAG-LABEL: usuboovf:
+; SDAG: ## BB#0:
+; SDAG-NEXT: notq %rsi
+; SDAG-NEXT: xorl %edx, %edx
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: usuboovf:
+; FAST: ## BB#0:
+; FAST-NEXT: notq %rsi
+; FAST-NEXT: xorl %edx, %edx
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
;
-; KNL-LABEL: bug27873:
+; KNL-LABEL: usuboovf:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %esi
-; KNL-NEXT: kmovw %esi, %k0
-; KNL-NEXT: movl $160, %ecx
-; KNL-NEXT: movq %rdi, %rax
-; KNL-NEXT: mulq %rcx
-; KNL-NEXT: seto %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: korw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: notq %rsi
+; KNL-NEXT: xorl %edx, %edx
+; KNL-NEXT: movq %rsi, %rax
; KNL-NEXT: retq
- %mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160)
- %mul.overflow = extractvalue { i64, i1 } %mul, 1
- %x1 = or i1 %c2, %mul.overflow
- ret i1 %x1
+ %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %a)
+ %v0 = extractvalue {i64, i1} %t0, 0
+ %o0 = extractvalue {i64, i1} %t0, 1
+ %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 -1, i64 %b)
+ %v1 = extractvalue {i64, i1} %t1, 0
+ %o1 = extractvalue {i64, i1} %t1, 1
+ %oo = or i1 %o0, %o1
+ %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v0)
+ %v2 = extractvalue {i64, i1} %t2, 0
+ %o2 = extractvalue {i64, i1} %t2, 1
+ %ooo = or i1 %oo, %o2
+ %t = insertvalue {i64, i1} %t2, i1 %ooo, 1
+ ret {i64, i1} %t
}
declare {i8, i1} @llvm.sadd.with.overflow.i8 (i8, i8 ) nounwind readnone
@@ -777,13 +1417,5 @@ declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
-declare {i8, i1} @llvm.smul.with.overflow.i8 (i8, i8 ) nounwind readnone
-declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone
-declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
-declare {i8, i1} @llvm.umul.with.overflow.i8 (i8, i8 ) nounwind readnone
-declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) nounwind readnone
-declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
-declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
!0 = !{!"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/xmulo.ll b/test/CodeGen/X86/xmulo.ll
index 76a7e72ca961..aed305058f0b 100644
--- a/test/CodeGen/X86/xmulo.ll
+++ b/test/CodeGen/X86/xmulo.ll
@@ -1,50 +1,742 @@
-; RUN: llc %s -o - | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
-target triple = "i386-apple-macosx10.8.0"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=FAST
+; RUN: llc -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=KNL
-declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
-declare i32 @printf(i8*, ...)
-
-@.str = private unnamed_addr constant [10 x i8] c"%llx, %d\0A\00", align 1
-
-define i32 @t1() nounwind {
-; CHECK-LABEL: t1:
-; CHECK: pushl $0
-; CHECK: pushl $0
-; CHECK: pushl $72
-
- %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 8)
- %2 = extractvalue {i64, i1} %1, 0
- %3 = extractvalue {i64, i1} %1, 1
- %4 = zext i1 %3 to i32
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), i64 %2, i32 %4)
- ret i32 0
-}
-
-define i32 @t2() nounwind {
-; CHECK-LABEL: t2:
-; CHECK: pushl $0
-; CHECK: pushl $0
-; CHECK: pushl $0
-
- %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 0)
- %2 = extractvalue {i64, i1} %1, 0
- %3 = extractvalue {i64, i1} %1, 1
- %4 = zext i1 %3 to i32
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), i64 %2, i32 %4)
- ret i32 0
-}
-
-define i32 @t3() nounwind {
-; CHECK-LABEL: t3:
-; CHECK: pushl $1
-; CHECK: pushl $-1
-; CHECK: pushl $-9
-
- %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 -1)
- %2 = extractvalue {i64, i1} %1, 0
- %3 = extractvalue {i64, i1} %1, 1
- %4 = zext i1 %3 to i32
- %5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), i64 %2, i32 %4)
- ret i32 0
+define {i64, i1} @t1() nounwind {
+; SDAG-LABEL: t1:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl $8, %ecx
+; SDAG-NEXT: movl $9, %eax
+; SDAG-NEXT: mulq %rcx
+; SDAG-NEXT: seto %dl
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: t1:
+; FAST: ## BB#0:
+; FAST-NEXT: movl $8, %ecx
+; FAST-NEXT: movl $9, %eax
+; FAST-NEXT: mulq %rcx
+; FAST-NEXT: seto %dl
+; FAST-NEXT: retq
+;
+; KNL-LABEL: t1:
+; KNL: ## BB#0:
+; KNL-NEXT: movl $8, %ecx
+; KNL-NEXT: movl $9, %eax
+; KNL-NEXT: mulq %rcx
+; KNL-NEXT: seto %dl
+; KNL-NEXT: retq
+ %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 8)
+ ret {i64, i1} %1
+}
+
+define {i64, i1} @t2() nounwind {
+; SDAG-LABEL: t2:
+; SDAG: ## BB#0:
+; SDAG-NEXT: xorl %ecx, %ecx
+; SDAG-NEXT: movl $9, %eax
+; SDAG-NEXT: mulq %rcx
+; SDAG-NEXT: seto %dl
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: t2:
+; FAST: ## BB#0:
+; FAST-NEXT: xorl %ecx, %ecx
+; FAST-NEXT: movl $9, %eax
+; FAST-NEXT: mulq %rcx
+; FAST-NEXT: seto %dl
+; FAST-NEXT: retq
+;
+; KNL-LABEL: t2:
+; KNL: ## BB#0:
+; KNL-NEXT: xorl %ecx, %ecx
+; KNL-NEXT: movl $9, %eax
+; KNL-NEXT: mulq %rcx
+; KNL-NEXT: seto %dl
+; KNL-NEXT: retq
+ %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 0)
+ ret {i64, i1} %1
+}
+
+define {i64, i1} @t3() nounwind {
+; SDAG-LABEL: t3:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq $-1, %rcx
+; SDAG-NEXT: movl $9, %eax
+; SDAG-NEXT: mulq %rcx
+; SDAG-NEXT: seto %dl
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: t3:
+; FAST: ## BB#0:
+; FAST-NEXT: movq $-1, %rcx
+; FAST-NEXT: movl $9, %eax
+; FAST-NEXT: mulq %rcx
+; FAST-NEXT: seto %dl
+; FAST-NEXT: retq
+;
+; KNL-LABEL: t3:
+; KNL: ## BB#0:
+; KNL-NEXT: movq $-1, %rcx
+; KNL-NEXT: movl $9, %eax
+; KNL-NEXT: mulq %rcx
+; KNL-NEXT: seto %dl
+; KNL-NEXT: retq
+ %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 -1)
+ ret {i64, i1} %1
+}
+
+; SMULO
+define zeroext i1 @smuloi8(i8 %v1, i8 %v2, i8* %res) {
+; SDAG-LABEL: smuloi8:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: imulb %sil
+; SDAG-NEXT: seto %cl
+; SDAG-NEXT: movb %al, (%rdx)
+; SDAG-NEXT: movl %ecx, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smuloi8:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: imulb %sil
+; FAST-NEXT: seto %cl
+; FAST-NEXT: movb %al, (%rdx)
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smuloi8:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: imulb %sil
+; KNL-NEXT: seto %cl
+; KNL-NEXT: movb %al, (%rdx)
+; KNL-NEXT: movl %ecx, %eax
+; KNL-NEXT: retq
+ %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smuloi16(i16 %v1, i16 %v2, i16* %res) {
+; SDAG-LABEL: smuloi16:
+; SDAG: ## BB#0:
+; SDAG-NEXT: imulw %si, %di
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movw %di, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smuloi16:
+; FAST: ## BB#0:
+; FAST-NEXT: imulw %si, %di
+; FAST-NEXT: seto %al
+; FAST-NEXT: movw %di, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smuloi16:
+; KNL: ## BB#0:
+; KNL-NEXT: imulw %si, %di
+; KNL-NEXT: seto %al
+; KNL-NEXT: movw %di, (%rdx)
+; KNL-NEXT: retq
+ %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smuloi32(i32 %v1, i32 %v2, i32* %res) {
+; SDAG-LABEL: smuloi32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: imull %esi, %edi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movl %edi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smuloi32:
+; FAST: ## BB#0:
+; FAST-NEXT: imull %esi, %edi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movl %edi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smuloi32:
+; KNL: ## BB#0:
+; KNL-NEXT: imull %esi, %edi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movl %edi, (%rdx)
+; KNL-NEXT: retq
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smuloi64(i64 %v1, i64 %v2, i64* %res) {
+; SDAG-LABEL: smuloi64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: imulq %rsi, %rdi
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: movq %rdi, (%rdx)
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smuloi64:
+; FAST: ## BB#0:
+; FAST-NEXT: imulq %rsi, %rdi
+; FAST-NEXT: seto %al
+; FAST-NEXT: movq %rdi, (%rdx)
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smuloi64:
+; KNL: ## BB#0:
+; KNL-NEXT: imulq %rsi, %rdi
+; KNL-NEXT: seto %al
+; KNL-NEXT: movq %rdi, (%rdx)
+; KNL-NEXT: retq
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UMULO
+define zeroext i1 @umuloi8(i8 %v1, i8 %v2, i8* %res) {
+; SDAG-LABEL: umuloi8:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: mulb %sil
+; SDAG-NEXT: seto %cl
+; SDAG-NEXT: movb %al, (%rdx)
+; SDAG-NEXT: movl %ecx, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umuloi8:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: mulb %sil
+; FAST-NEXT: seto %cl
+; FAST-NEXT: movb %al, (%rdx)
+; FAST-NEXT: andb $1, %cl
+; FAST-NEXT: movzbl %cl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umuloi8:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: mulb %sil
+; KNL-NEXT: seto %cl
+; KNL-NEXT: movb %al, (%rdx)
+; KNL-NEXT: movl %ecx, %eax
+; KNL-NEXT: retq
+ %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umuloi16(i16 %v1, i16 %v2, i16* %res) {
+; SDAG-LABEL: umuloi16:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdx, %rcx
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: mulw %si
+; SDAG-NEXT: seto %dl
+; SDAG-NEXT: movw %ax, (%rcx)
+; SDAG-NEXT: movl %edx, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umuloi16:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdx, %rcx
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: mulw %si
+; FAST-NEXT: seto %dl
+; FAST-NEXT: movw %ax, (%rcx)
+; FAST-NEXT: andb $1, %dl
+; FAST-NEXT: movzbl %dl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umuloi16:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdx, %rcx
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: mulw %si
+; KNL-NEXT: seto %dl
+; KNL-NEXT: movw %ax, (%rcx)
+; KNL-NEXT: movl %edx, %eax
+; KNL-NEXT: retq
+ %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umuloi32(i32 %v1, i32 %v2, i32* %res) {
+; SDAG-LABEL: umuloi32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdx, %rcx
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: mull %esi
+; SDAG-NEXT: seto %dl
+; SDAG-NEXT: movl %eax, (%rcx)
+; SDAG-NEXT: movl %edx, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umuloi32:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdx, %rcx
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: mull %esi
+; FAST-NEXT: seto %dl
+; FAST-NEXT: movl %eax, (%rcx)
+; FAST-NEXT: andb $1, %dl
+; FAST-NEXT: movzbl %dl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umuloi32:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdx, %rcx
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: mull %esi
+; KNL-NEXT: seto %dl
+; KNL-NEXT: movl %eax, (%rcx)
+; KNL-NEXT: movl %edx, %eax
+; KNL-NEXT: retq
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umuloi64(i64 %v1, i64 %v2, i64* %res) {
+; SDAG-LABEL: umuloi64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdx, %rcx
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: mulq %rsi
+; SDAG-NEXT: seto %dl
+; SDAG-NEXT: movq %rax, (%rcx)
+; SDAG-NEXT: movl %edx, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umuloi64:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdx, %rcx
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: mulq %rsi
+; FAST-NEXT: seto %dl
+; FAST-NEXT: movq %rax, (%rcx)
+; FAST-NEXT: andb $1, %dl
+; FAST-NEXT: movzbl %dl, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umuloi64:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdx, %rcx
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: mulq %rsi
+; KNL-NEXT: seto %dl
+; KNL-NEXT: movq %rax, (%rcx)
+; KNL-NEXT: movl %edx, %eax
+; KNL-NEXT: retq
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+;
+; Check the use of the overflow bit in combination with a select instruction.
+;
+define i32 @smuloselecti32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: smuloselecti32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: imull %esi, %eax
+; SDAG-NEXT: cmovol %edi, %esi
+; SDAG-NEXT: movl %esi, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smuloselecti32:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: imull %esi, %eax
+; FAST-NEXT: cmovol %edi, %esi
+; FAST-NEXT: movl %esi, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smuloselecti32:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: imull %esi, %eax
+; KNL-NEXT: cmovol %edi, %esi
+; KNL-NEXT: movl %esi, %eax
+; KNL-NEXT: retq
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @smuloselecti64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: smuloselecti64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: imulq %rsi, %rax
+; SDAG-NEXT: cmovoq %rdi, %rsi
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smuloselecti64:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: imulq %rsi, %rax
+; FAST-NEXT: cmovoq %rdi, %rsi
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smuloselecti64:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: imulq %rsi, %rax
+; KNL-NEXT: cmovoq %rdi, %rsi
+; KNL-NEXT: movq %rsi, %rax
+; KNL-NEXT: retq
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
}
+
+define i32 @umuloselecti32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: umuloselecti32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: mull %esi
+; SDAG-NEXT: cmovol %edi, %esi
+; SDAG-NEXT: movl %esi, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umuloselecti32:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: mull %esi
+; FAST-NEXT: cmovol %edi, %esi
+; FAST-NEXT: movl %esi, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umuloselecti32:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: mull %esi
+; KNL-NEXT: cmovol %edi, %esi
+; KNL-NEXT: movl %esi, %eax
+; KNL-NEXT: retq
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @umuloselecti64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: umuloselecti64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: mulq %rsi
+; SDAG-NEXT: cmovoq %rdi, %rsi
+; SDAG-NEXT: movq %rsi, %rax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umuloselecti64:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: mulq %rsi
+; FAST-NEXT: cmovoq %rdi, %rsi
+; FAST-NEXT: movq %rsi, %rax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umuloselecti64:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: mulq %rsi
+; KNL-NEXT: cmovoq %rdi, %rsi
+; KNL-NEXT: movq %rsi, %rax
+; KNL-NEXT: retq
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+;
+; Check the use of the overflow bit in combination with a branch instruction.
+;
+define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: smulobri32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: imull %esi, %edi
+; SDAG-NEXT: jo LBB15_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB15_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smulobri32:
+; FAST: ## BB#0:
+; FAST-NEXT: imull %esi, %edi
+; FAST-NEXT: jo LBB15_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB15_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smulobri32:
+; KNL: ## BB#0:
+; KNL-NEXT: imull %esi, %edi
+; KNL-NEXT: jo LBB15_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB15_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulobri64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: smulobri64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: imulq %rsi, %rdi
+; SDAG-NEXT: jo LBB16_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB16_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: smulobri64:
+; FAST: ## BB#0:
+; FAST-NEXT: imulq %rsi, %rdi
+; FAST-NEXT: jo LBB16_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB16_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: smulobri64:
+; KNL: ## BB#0:
+; KNL-NEXT: imulq %rsi, %rdi
+; KNL-NEXT: jo LBB16_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB16_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulobri32(i32 %v1, i32 %v2) {
+; SDAG-LABEL: umulobri32:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl %edi, %eax
+; SDAG-NEXT: mull %esi
+; SDAG-NEXT: jo LBB17_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB17_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umulobri32:
+; FAST: ## BB#0:
+; FAST-NEXT: movl %edi, %eax
+; FAST-NEXT: mull %esi
+; FAST-NEXT: jo LBB17_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB17_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umulobri32:
+; KNL: ## BB#0:
+; KNL-NEXT: movl %edi, %eax
+; KNL-NEXT: mull %esi
+; KNL-NEXT: jo LBB17_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB17_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulobri64(i64 %v1, i64 %v2) {
+; SDAG-LABEL: umulobri64:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: mulq %rsi
+; SDAG-NEXT: jo LBB18_1
+; SDAG-NEXT: ## BB#2: ## %continue
+; SDAG-NEXT: movb $1, %al
+; SDAG-NEXT: retq
+; SDAG-NEXT: LBB18_1: ## %overflow
+; SDAG-NEXT: xorl %eax, %eax
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: umulobri64:
+; FAST: ## BB#0:
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: mulq %rsi
+; FAST-NEXT: jo LBB18_1
+; FAST-NEXT: ## BB#2: ## %continue
+; FAST-NEXT: movb $1, %al
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+; FAST-NEXT: LBB18_1: ## %overflow
+; FAST-NEXT: xorl %eax, %eax
+; FAST-NEXT: andb $1, %al
+; FAST-NEXT: movzbl %al, %eax
+; FAST-NEXT: retq
+;
+; KNL-LABEL: umulobri64:
+; KNL: ## BB#0:
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: mulq %rsi
+; KNL-NEXT: jo LBB18_1
+; KNL-NEXT: ## BB#2: ## %continue
+; KNL-NEXT: movb $1, %al
+; KNL-NEXT: retq
+; KNL-NEXT: LBB18_1: ## %overflow
+; KNL-NEXT: xorl %eax, %eax
+; KNL-NEXT: retq
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @bug27873(i64 %c1, i1 %c2) {
+; SDAG-LABEL: bug27873:
+; SDAG: ## BB#0:
+; SDAG-NEXT: movl $160, %ecx
+; SDAG-NEXT: movq %rdi, %rax
+; SDAG-NEXT: mulq %rcx
+; SDAG-NEXT: seto %al
+; SDAG-NEXT: orb %sil, %al
+; SDAG-NEXT: retq
+;
+; FAST-LABEL: bug27873:
+; FAST: ## BB#0:
+; FAST-NEXT: movl $160, %ecx
+; FAST-NEXT: movq %rdi, %rax
+; FAST-NEXT: mulq %rcx
+; FAST-NEXT: seto %al
+; FAST-NEXT: orb %sil, %al
+; FAST-NEXT: retq
+;
+; KNL-LABEL: bug27873:
+; KNL: ## BB#0:
+; KNL-NEXT: andl $1, %esi
+; KNL-NEXT: movl $160, %ecx
+; KNL-NEXT: movq %rdi, %rax
+; KNL-NEXT: mulq %rcx
+; KNL-NEXT: kmovw %esi, %k0
+; KNL-NEXT: seto %al
+; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: korw %k1, %k0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: retq
+ %mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160)
+ %mul.overflow = extractvalue { i64, i1 } %mul, 1
+ %x1 = or i1 %c2, %mul.overflow
+ ret i1 %x1
+}
+
+declare {i8, i1} @llvm.smul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.umul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
+
+!0 = !{!"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/xop-ifma.ll b/test/CodeGen/X86/xop-ifma.ll
new file mode 100644
index 000000000000..83291095b876
--- /dev/null
+++ b/test/CodeGen/X86/xop-ifma.ll
@@ -0,0 +1,129 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=XOP --check-prefix=XOP-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=XOP --check-prefix=XOP-AVX2
+
+define <8 x i16> @test_mul_v8i16_add_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
+; XOP-LABEL: test_mul_v8i16_add_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+ %1 = mul <8 x i16> %a0, %a1
+ %2 = add <8 x i16> %1, %a2
+ ret <8 x i16> %2
+}
+
+define <16 x i16> @test_mul_v16i16_add_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) {
+; XOP-AVX1-LABEL: test_mul_v16i16_add_v16i16:
+; XOP-AVX1: # BB#0:
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; XOP-AVX1-NEXT: vpmacsww %xmm5, %xmm3, %xmm4, %xmm3
+; XOP-AVX1-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOP-AVX1-NEXT: retq
+;
+; XOP-AVX2-LABEL: test_mul_v16i16_add_v16i16:
+; XOP-AVX2: # BB#0:
+; XOP-AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; XOP-AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; XOP-AVX2-NEXT: retq
+ %1 = mul <16 x i16> %a0, %a1
+ %2 = add <16 x i16> %a2, %1
+ ret <16 x i16> %2
+}
+
+define <4 x i32> @test_mul_v4i32_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+; XOP-LABEL: test_mul_v4i32_add_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+ %1 = mul <4 x i32> %a0, %a1
+ %2 = add <4 x i32> %1, %a2
+ ret <4 x i32> %2
+}
+
+define <8 x i32> @test_mul_v8i32_add_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+; XOP-AVX1-LABEL: test_mul_v8i32_add_v8i32:
+; XOP-AVX1: # BB#0:
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; XOP-AVX1-NEXT: vpmacsdd %xmm5, %xmm3, %xmm4, %xmm3
+; XOP-AVX1-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOP-AVX1-NEXT: retq
+;
+; XOP-AVX2-LABEL: test_mul_v8i32_add_v8i32:
+; XOP-AVX2: # BB#0:
+; XOP-AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; XOP-AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; XOP-AVX2-NEXT: retq
+ %1 = mul <8 x i32> %a0, %a1
+ %2 = add <8 x i32> %a2, %1
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @test_mulx_v4i32_add_v4i64(<4 x i32> %a0, <4 x i32> %a1, <4 x i64> %a2) {
+; XOP-AVX1-LABEL: test_mulx_v4i32_add_v4i64:
+; XOP-AVX1: # BB#0:
+; XOP-AVX1-NEXT: vpmovsxdq %xmm0, %xmm3
+; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; XOP-AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; XOP-AVX1-NEXT: vpmovsxdq %xmm1, %xmm4
+; XOP-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; XOP-AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; XOP-AVX1-NEXT: vpmacsdql %xmm5, %xmm1, %xmm0, %xmm0
+; XOP-AVX1-NEXT: vpmacsdql %xmm2, %xmm4, %xmm3, %xmm1
+; XOP-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOP-AVX1-NEXT: retq
+;
+; XOP-AVX2-LABEL: test_mulx_v4i32_add_v4i64:
+; XOP-AVX2: # BB#0:
+; XOP-AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; XOP-AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; XOP-AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
+; XOP-AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; XOP-AVX2-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = sext <4 x i32> %a1 to <4 x i64>
+ %3 = mul <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, %a2
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_pmuldq_lo_v4i32_add_v2i64(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
+; XOP-LABEL: test_pmuldq_lo_v4i32_add_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpmacsdql %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+ %1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
+ %2 = add <2 x i64> %1, %a2
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_pmuldq_hi_v4i32_add_v2i64(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
+; XOP-LABEL: test_pmuldq_hi_v4i32_add_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpmacsdqh %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+ %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
+ %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
+ %3 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %1, <4 x i32> %2)
+ %4 = add <2 x i64> %3, %a2
+ ret <2 x i64> %4
+}
+
+define <4 x i32> @test_pmaddwd_v8i16_add_v4i32(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
+; XOP-LABEL: test_pmaddwd_v8i16_add_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpmadcswd %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+ %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ %2 = add <4 x i32> %1, %a2
+ ret <4 x i32> %2
+}
+
+declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
diff --git a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
index a9287e7d8c91..a100a1425dd1 100644
--- a/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
@@ -499,12 +499,22 @@ declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
define <4 x i64> @test_mm256_cmov_si256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; X32-LABEL: test_mm256_cmov_si256:
; X32: # BB#0:
-; X32-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; X32-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; X32-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; X32-NEXT: vxorps %ymm3, %ymm2, %ymm3
+; X32-NEXT: vandps %ymm2, %ymm0, %ymm0
+; X32-NEXT: vandps %ymm3, %ymm1, %ymm1
+; X32-NEXT: vorps %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_cmov_si256:
; X64: # BB#0:
-; X64-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; X64-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; X64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; X64-NEXT: vxorps %ymm3, %ymm2, %ymm3
+; X64-NEXT: vandps %ymm2, %ymm0, %ymm0
+; X64-NEXT: vandps %ymm3, %ymm1, %ymm1
+; X64-NEXT: vorps %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2)
ret <4 x i64> %res
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll b/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
index 6fba72f2681b..2369beffb6b0 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64-upgrade.ll
@@ -725,3 +725,42 @@ define <8 x i16> @test_int_x86_xop_vpcomtruew(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomtruew(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <2 x i64> @test_int_x86_xop_vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
+; CHECK-LABEL: test_int_x86_xop_vpcmov:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %res = call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) ;
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i64> @test_int_x86_xop_vpcmov_256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
+; CHECK-LABEL: test_int_x86_xop_vpcmov_256:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) ;
+ ret <4 x i64> %res
+}
+define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1, <4 x i64> %a2) {
+; CHECK-LABEL: test_int_x86_xop_vpcmov_256_mr:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %vec = load <4 x i64>, <4 x i64>* %a1
+ %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %vec, <4 x i64> %a2) ;
+ ret <4 x i64> %res
+}
+define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64>* %a2) {
+; CHECK-LABEL: test_int_x86_xop_vpcmov_256_rm:
+; CHECK: # BB#0:
+; CHECK-NEXT: vpcmov (%rdi), %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: retq
+ %vec = load <4 x i64>, <4 x i64>* %a2
+ %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %vec) ;
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) nounwind readnone
+
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64.ll b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
index bb6ef50cdc6c..76286a26ffa9 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
@@ -82,18 +82,23 @@ define <2 x i64> @test_int_x86_xop_vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64
; CHECK: # BB#0:
; CHECK-NEXT: vpcmov %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %res = call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) ;
- ret <2 x i64> %res
+ %1 = xor <2 x i64> %a2, <i64 -1, i64 -1>
+ %2 = and <2 x i64> %a0, %a2
+ %3 = and <2 x i64> %a1, %1
+ %4 = or <2 x i64> %2, %3
+ ret <2 x i64> %4
}
-declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
define <4 x i64> @test_int_x86_xop_vpcmov_256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256:
; CHECK: # BB#0:
; CHECK-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) ;
- ret <4 x i64> %res
+ %1 = xor <4 x i64> %a2, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %2 = and <4 x i64> %a0, %a2
+ %3 = and <4 x i64> %a1, %1
+ %4 = or <4 x i64> %2, %3
+ ret <4 x i64> %4
}
define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256_mr:
@@ -101,19 +106,24 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1,
; CHECK-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
; CHECK-NEXT: retq
%vec = load <4 x i64>, <4 x i64>* %a1
- %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %vec, <4 x i64> %a2) ;
- ret <4 x i64> %res
+ %1 = xor <4 x i64> %a2, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %2 = and <4 x i64> %a0, %a2
+ %3 = and <4 x i64> %vec, %1
+ %4 = or <4 x i64> %2, %3
+ ret <4 x i64> %4
}
define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64>* %a2) {
; CHECK-LABEL: test_int_x86_xop_vpcmov_256_rm:
; CHECK: # BB#0:
; CHECK-NEXT: vpcmov (%rdi), %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %vec = load <4 x i64>, <4 x i64>* %a2
- %res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %vec) ;
- ret <4 x i64> %res
+ %vec = load <4 x i64>, <4 x i64>* %a2
+ %1 = xor <4 x i64> %vec, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %2 = and <4 x i64> %a0, %vec
+ %3 = and <4 x i64> %a1, %1
+ %4 = or <4 x i64> %2, %3
+ ret <4 x i64> %4
}
-declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vphaddbd(<16 x i8> %a0) {
; CHECK-LABEL: test_int_x86_xop_vphaddbd:
diff --git a/test/CodeGen/X86/xop-mask-comments.ll b/test/CodeGen/X86/xop-mask-comments.ll
index 14c18c311a6f..4ba47380f89a 100644
--- a/test/CodeGen/X86/xop-mask-comments.ll
+++ b/test/CodeGen/X86/xop-mask-comments.ll
@@ -95,15 +95,19 @@ define <16 x i8> @vpperm_shuffle_general(<16 x i8> %a0, <16 x i8> %a1) {
; VPERMIL2
;
+; Note: _mm_permute2_pd shouldn't be used for constant shuffles as there will always
+; be a quicker (and smaller) alternative.
define <2 x double> @vpermil2pd_21(<2 x double> %a0, <2 x double> %a1) {
; X32-LABEL: vpermil2pd_21:
; X32: # BB#0:
-; X32-NEXT: vpermil2pd {{.*#+}} xmm0 = zero,xmm0[0]
+; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X32-NEXT: retl
;
; X64-LABEL: vpermil2pd_21:
; X64: # BB#0:
-; X64-NEXT: vpermil2pd {{.*#+}} xmm0 = zero,xmm0[0]
+; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64-NEXT: retq
%1 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x i64> <i64 10, i64 1>, i8 2)
ret <2 x double> %1
diff --git a/test/CodeGen/X86/xor-combine-debugloc.ll b/test/CodeGen/X86/xor-combine-debugloc.ll
new file mode 100644
index 000000000000..21777c1c572f
--- /dev/null
+++ b/test/CodeGen/X86/xor-combine-debugloc.ll
@@ -0,0 +1,69 @@
+; RUN: llc -stop-after=expand-isel-pseudos < %s | FileCheck %s
+;
+; Make sure that when the entry block of IR below is lowered, an instruction
+; that implictly defines %eflags has a same debug location with the icmp
+; instruction, and the branch instructions have a same debug location with the
+; br instruction.
+;
+; CHECK: [[DLOC1:![0-9]+]] = !DILocation(line: 5, column: 9, scope: !{{[0-9]+}})
+; CHECK: [[DLOC2:![0-9]+]] = !DILocation(line: 5, column: 7, scope: !{{[0-9]+}})
+; CHECK-DAG: [[VREG1:%[^ ]+]] = COPY %esi
+; CHECK-DAG: [[VREG2:%[^ ]+]] = COPY %edi
+; CHECK: SUB32rr [[VREG2]], [[VREG1]], implicit-def %eflags, debug-location [[DLOC1]]
+; CHECK-NEXT: JE_1{{.*}} implicit %eflags, debug-location [[DLOC2]]
+; CHECK-NEXT: JMP_1{{.*}} debug-location [[DLOC2]]
+
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define i32 @foo(i32 %x, i32 %y) !dbg !4 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !9, metadata !11), !dbg !12
+ tail call void @llvm.dbg.value(metadata i32 %y, i64 0, metadata !10, metadata !11), !dbg !13
+ %cmp = icmp ne i32 %x, %y, !dbg !14
+ br i1 %cmp, label %if.then, label %if.else, !dbg !16
+
+if.then: ; preds = %entry
+ %call = tail call i32 (...) @bar() #3, !dbg !17
+ br label %return, !dbg !18
+
+if.else: ; preds = %entry
+ %call1 = tail call i32 (...) @baz() #3, !dbg !19
+ br label %return, !dbg !20
+
+return: ; preds = %if.else, %if.then
+ %retval.0 = phi i32 [ %call, %if.then ], [ %call1, %if.else ]
+ ret i32 %retval.0, !dbg !21
+}
+
+declare i32 @bar(...)
+declare i32 @baz(...)
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, emissionKind: FullDebug)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 4, type: !5, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !8)
+!5 = !DISubroutineType(types: !6)
+!6 = !{!7, !7, !7}
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{!9, !10}
+!9 = !DILocalVariable(name: "x", arg: 1, scope: !4, file: !1, line: 4, type: !7)
+!10 = !DILocalVariable(name: "y", arg: 2, scope: !4, file: !1, line: 4, type: !7)
+!11 = !DIExpression()
+!12 = !DILocation(line: 4, column: 13, scope: !4)
+!13 = !DILocation(line: 4, column: 20, scope: !4)
+!14 = !DILocation(line: 5, column: 9, scope: !15)
+!15 = distinct !DILexicalBlock(scope: !4, file: !1, line: 5, column: 7)
+!16 = !DILocation(line: 5, column: 7, scope: !4)
+!17 = !DILocation(line: 6, column: 12, scope: !15)
+!18 = !DILocation(line: 6, column: 5, scope: !15)
+!19 = !DILocation(line: 8, column: 12, scope: !15)
+!20 = !DILocation(line: 8, column: 5, scope: !15)
+!21 = !DILocation(line: 9, column: 1, scope: !4)
diff --git a/test/CodeGen/X86/xray-log-args.ll b/test/CodeGen/X86/xray-log-args.ll
new file mode 100644
index 000000000000..a551868ffb4a
--- /dev/null
+++ b/test/CodeGen/X86/xray-log-args.ll
@@ -0,0 +1,35 @@
+; When logging arguments is specified, emit the entry sled accordingly.
+
+; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -filetype=asm -o - -mtriple=x86_64-darwin-unknown < %s | FileCheck %s
+
+define i32 @callee(i32 %arg) nounwind noinline uwtable "function-instrument"="xray-always" "xray-log-args"="1" {
+ ret i32 %arg
+}
+; CHECK-LABEL: Lxray_synthetic_0:
+; CHECK: .quad {{\.?}}Lxray_sled_0
+; CHECK: .quad {{_?}}callee
+; CHECK: .byte 3
+; CHECK: .byte 1
+; CHECK: .{{(zero|space)}} 14
+; CHECK: .quad {{\.?}}Lxray_sled_1
+; CHECK: .quad {{_?}}callee
+; CHECK: .byte 1
+; CHECK: .byte 1
+; CHECK: .{{(zero|space)}} 14
+
+define i32 @caller(i32 %arg) nounwind noinline uwtable "function-instrument"="xray-always" "xray-log-args"="1" {
+ %retval = tail call i32 @callee(i32 %arg)
+ ret i32 %retval
+}
+; CHECK-LABEL: Lxray_synthetic_1:
+; CHECK: .quad {{\.?}}Lxray_sled_2
+; CHECK: .quad {{_?}}caller
+; CHECK: .byte 3
+; CHECK: .byte 1
+; CHECK: .{{(zero|space)}} 14
+; CHECK: .quad {{\.?}}Lxray_sled_3
+; CHECK: .quad {{_?}}caller
+; CHECK: .byte 2
+; CHECK: .byte 1
+; CHECK: .{{(zero|space)}} 14
diff --git a/test/CodeGen/XCore/fneg.ll b/test/CodeGen/XCore/fneg.ll
index 67ab6195aad2..20433da3bbe5 100644
--- a/test/CodeGen/XCore/fneg.ll
+++ b/test/CodeGen/XCore/fneg.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -march=xcore | FileCheck %s
-define i1 @test(double %F) nounwind {
+define i1 @test(double %F, double %G) nounwind {
entry:
; CHECK-LABEL: test:
; CHECK: xor
%0 = fsub double -0.000000e+00, %F
- %1 = fcmp olt double 0.000000e+00, %0
+ %1 = fcmp olt double %G, %0
ret i1 %1
}
diff --git a/test/CodeGen/XCore/section-name.ll b/test/CodeGen/XCore/section-name.ll
new file mode 100644
index 000000000000..65161db34bea
--- /dev/null
+++ b/test/CodeGen/XCore/section-name.ll
@@ -0,0 +1,9 @@
+; RUN: not llc < %s -march=xcore 2>&1 | FileCheck %s
+
+@bar = internal global i32 zeroinitializer
+
+define void @".dp.bss"() {
+ ret void
+}
+
+; CHECK: LLVM ERROR: invalid symbol redefinition
diff --git a/test/CodeGen/XCore/varargs.ll b/test/CodeGen/XCore/varargs.ll
index 28c293390c59..2e364b275610 100644
--- a/test/CodeGen/XCore/varargs.ll
+++ b/test/CodeGen/XCore/varargs.ll
@@ -26,10 +26,10 @@ entry:
; CHECK-LABEL: test_vararg
; CHECK: extsp 6
; CHECK: stw lr, sp[1]
+; CHECK: stw r3, sp[6]
; CHECK: stw r0, sp[3]
; CHECK: stw r1, sp[4]
; CHECK: stw r2, sp[5]
-; CHECK: stw r3, sp[6]
; CHECK: ldaw r0, sp[3]
; CHECK: stw r0, sp[2]
%list = alloca i8*, align 4
diff --git a/test/DebugInfo/AArch64/asan-stack-vars.ll b/test/DebugInfo/AArch64/asan-stack-vars.ll
new file mode 100644
index 000000000000..1dff15cb588b
--- /dev/null
+++ b/test/DebugInfo/AArch64/asan-stack-vars.ll
@@ -0,0 +1,326 @@
+; RUN: llc -O0 -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s
+;
+; Derived from (clang -O0 -g -fsanitize=address -fobjc-arc)
+; @protocol NSObject
+; @end
+; @interface NSObject<NSObject>{}
+; + (instancetype)alloc;
+; @end
+; struct CGSize {
+; double width;
+; double height;
+; };
+; typedef struct CGSize CGSize;
+; @interface Object : NSObject
+; - (instancetype)initWithSize:(CGSize)size;
+; - (id)aMessage;
+; @end
+; @implementation MyObject
+; + (id)doWithSize:(CGSize)imageSize andObject:(id)object {
+; return [object aMessage];
+; }
+; @end
+;
+; CHECK: .debug_info contents:
+; CHECK: DW_TAG_subprogram
+; CHECK-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+; CHECK-NEXT: DW_AT_high_pc [DW_FORM_addr] ([[FN_END:.*]])
+; CHECK: "_cmd"
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NEXT: DW_AT_location {{.*}} ([[OFS:.*]])
+; CHECK-NEXT: DW_AT_name {{.*}}"imageSize"
+;
+; CHECK: .debug_loc contents:
+; CHECK: [[OFS]]: Beginning address offset: 0x0000000000000000
+; CHECK_NOT: 0x{{.*}}: Beginning
+; CHECK: Ending address offset: [[FN_END]]
+
+; ModuleID = 'm.m'
+source_filename = "m.m"
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios"
+
+%0 = type opaque
+%struct._class_t = type { %struct._class_t*, %struct._class_t*, %struct._objc_cache*, i8* (i8*, i8*)**, %struct._class_ro_t* }
+%struct._objc_cache = type opaque
+%struct._class_ro_t = type { i32, i32, i32, i8*, i8*, %struct.__method_list_t*, %struct._objc_protocol_list*, %struct._ivar_list_t*, i8*, %struct._prop_list_t* }
+%struct.__method_list_t = type { i32, i32, [0 x %struct._objc_method] }
+%struct._objc_method = type { i8*, i8*, i8* }
+%struct._objc_protocol_list = type { i64, [0 x %struct._protocol_t*] }
+%struct._protocol_t = type { i8*, i8*, %struct._objc_protocol_list*, %struct.__method_list_t*, %struct.__method_list_t*, %struct.__method_list_t*, %struct.__method_list_t*, %struct._prop_list_t*, i32, i32, i8**, i8*, %struct._prop_list_t* }
+%struct._ivar_list_t = type { i32, i32, [0 x %struct._ivar_t] }
+%struct._ivar_t = type { i32*, i8*, i8*, i32, i32 }
+%struct._prop_list_t = type { i32, i32, [0 x %struct._prop_t] }
+%struct._prop_t = type { i8*, i8* }
+%struct.CGSize = type { double, double }
+
+@"OBJC_CLASS_$_Object" = external global %struct._class_t
+@"OBJC_CLASSLIST_REFERENCES_$_" = private global %struct._class_t* @"OBJC_CLASS_$_Object", section "__DATA, __objc_classrefs, regular, no_dead_strip", align 8
+@OBJC_METH_VAR_NAME_ = private unnamed_addr constant [6 x i8] c"alloc\00", section "__TEXT,__objc_methname,cstring_literals", align 1
+@OBJC_SELECTOR_REFERENCES_ = private externally_initialized global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @OBJC_METH_VAR_NAME_, i32 0, i32 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip", align 8
+@OBJC_METH_VAR_NAME_.1 = private unnamed_addr constant [14 x i8] c"initWithSize:\00", section "__TEXT,__objc_methname,cstring_literals", align 1
+@OBJC_SELECTOR_REFERENCES_.2 = private externally_initialized global i8* getelementptr inbounds ([14 x i8], [14 x i8]* @OBJC_METH_VAR_NAME_.1, i32 0, i32 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip", align 8
+@OBJC_METH_VAR_NAME_.3 = private unnamed_addr constant [9 x i8] c"aMessage\00", section "__TEXT,__objc_methname,cstring_literals", align 1
+@OBJC_SELECTOR_REFERENCES_.4 = private externally_initialized global i8* getelementptr inbounds ([9 x i8], [9 x i8]* @OBJC_METH_VAR_NAME_.3, i32 0, i32 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip", align 8
+@_objc_empty_cache = external global %struct._objc_cache
+@"OBJC_CLASS_$_MyObject" = global %struct._class_t { %struct._class_t* @"OBJC_METACLASS_$_MyObject", %struct._class_t* null, %struct._objc_cache* @_objc_empty_cache, i8* (i8*, i8*)** null, %struct._class_ro_t* @"\01l_OBJC_CLASS_RO_$_MyObject" }, section "__DATA, __objc_data", align 8
+@"OBJC_METACLASS_$_MyObject" = global %struct._class_t { %struct._class_t* @"OBJC_METACLASS_$_MyObject", %struct._class_t* @"OBJC_CLASS_$_MyObject", %struct._objc_cache* @_objc_empty_cache, i8* (i8*, i8*)** null, %struct._class_ro_t* @"\01l_OBJC_METACLASS_RO_$_MyObject" }, section "__DATA, __objc_data", align 8
+@OBJC_CLASS_NAME_ = private unnamed_addr constant [9 x i8] c"MyObject\00", section "__TEXT,__objc_classname,cstring_literals", align 1
+@OBJC_METH_VAR_NAME_.5 = private unnamed_addr constant [12 x i8] c"doWithSize:\00", section "__TEXT,__objc_methname,cstring_literals", align 1
+@OBJC_METH_VAR_TYPE_ = private unnamed_addr constant [21 x i8] c"@32@0:8{CGSize=dd}16\00", section "__TEXT,__objc_methtype,cstring_literals", align 1
+@"\01l_OBJC_$_CLASS_METHODS_MyObject" = private global { i32, i32, [1 x %struct._objc_method] } { i32 24, i32 1, [1 x %struct._objc_method] [%struct._objc_method { i8* getelementptr inbounds ([12 x i8], [12 x i8]* @OBJC_METH_VAR_NAME_.5, i32 0, i32 0), i8* getelementptr inbounds ([21 x i8], [21 x i8]* @OBJC_METH_VAR_TYPE_, i32 0, i32 0), i8* bitcast (i8* (i8*, i8*, [2 x double])* @"\01+[MyObject doWithSize:]" to i8*) }] }, section "__DATA, __objc_const", align 8
+@"\01l_OBJC_METACLASS_RO_$_MyObject" = private global %struct._class_ro_t { i32 131, i32 40, i32 40, i8* null, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @OBJC_CLASS_NAME_, i32 0, i32 0), %struct.__method_list_t* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01l_OBJC_$_CLASS_METHODS_MyObject" to %struct.__method_list_t*), %struct._objc_protocol_list* null, %struct._ivar_list_t* null, i8* null, %struct._prop_list_t* null }, section "__DATA, __objc_const", align 8
+@"\01l_OBJC_CLASS_RO_$_MyObject" = private global %struct._class_ro_t { i32 130, i32 0, i32 0, i8* null, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @OBJC_CLASS_NAME_, i32 0, i32 0), %struct.__method_list_t* null, %struct._objc_protocol_list* null, %struct._ivar_list_t* null, i8* null, %struct._prop_list_t* null }, section "__DATA, __objc_const", align 8
+@"OBJC_LABEL_CLASS_$" = private global [1 x i8*] [i8* bitcast (%struct._class_t* @"OBJC_CLASS_$_MyObject" to i8*)], section "__DATA, __objc_classlist, regular, no_dead_strip", align 8
+@llvm.compiler.used = appending global [12 x i8*] [i8* bitcast (%struct._class_t** @"OBJC_CLASSLIST_REFERENCES_$_" to i8*), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @OBJC_METH_VAR_NAME_, i32 0, i32 0), i8* bitcast (i8** @OBJC_SELECTOR_REFERENCES_ to i8*), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @OBJC_METH_VAR_NAME_.1, i32 0, i32 0), i8* bitcast (i8** @OBJC_SELECTOR_REFERENCES_.2 to i8*), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @OBJC_METH_VAR_NAME_.3, i32 0, i32 0), i8* bitcast (i8** @OBJC_SELECTOR_REFERENCES_.4 to i8*), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @OBJC_CLASS_NAME_, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8], [12 x i8]* @OBJC_METH_VAR_NAME_.5, i32 0, i32 0), i8* getelementptr inbounds ([21 x i8], [21 x i8]* @OBJC_METH_VAR_TYPE_, i32 0, i32 0), i8* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01l_OBJC_$_CLASS_METHODS_MyObject" to i8*), i8* bitcast ([1 x i8*]* @"OBJC_LABEL_CLASS_$" to i8*)], section "llvm.metadata"
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_ctor, i8* null }]
+@__asan_shadow_memory_dynamic_address = external global i64
+@__asan_gen_ = private unnamed_addr constant [34 x i8] c"2 32 16 9 imageSize 64 8 6 object\00", align 1
+
+; Function Attrs: noinline sanitize_address ssp uwtable
+define internal i8* @"\01+[MyObject doWithSize:]"(i8* %self, i8* %_cmd, [2 x double] %imageSize.coerce) #0 !dbg !14 {
+entry:
+ %0 = load i64, i64* @__asan_shadow_memory_dynamic_address
+ %self.addr = alloca i8*, align 8
+ %_cmd.addr = alloca i8*, align 8
+ %MyAlloca = alloca [96 x i8], align 32, !dbg !35
+ %1 = ptrtoint [96 x i8]* %MyAlloca to i64, !dbg !35
+ %2 = add i64 %1, 32, !dbg !35
+ %3 = inttoptr i64 %2 to %struct.CGSize*, !dbg !35
+ %4 = add i64 %1, 64, !dbg !35
+ %5 = inttoptr i64 %4 to %0**, !dbg !35
+ %6 = inttoptr i64 %1 to i64*, !dbg !35
+ store i64 1102416563, i64* %6, !dbg !35
+ %7 = add i64 %1, 8, !dbg !35
+ %8 = inttoptr i64 %7 to i64*, !dbg !35
+ store i64 ptrtoint ([34 x i8]* @__asan_gen_ to i64), i64* %8, !dbg !35
+ %9 = add i64 %1, 16, !dbg !35
+ %10 = inttoptr i64 %9 to i64*, !dbg !35
+ store i64 ptrtoint (i8* (i8*, i8*, [2 x double])* @"\01+[MyObject doWithSize:]" to i64), i64* %10, !dbg !35
+ %11 = lshr i64 %1, 3, !dbg !35
+ %12 = add i64 %11, %0, !dbg !35
+ %13 = add i64 %12, 0, !dbg !35
+ %14 = inttoptr i64 %13 to i64*, !dbg !35
+ store i64 -940689368107847183, i64* %14, align 1, !dbg !35
+ %15 = add i64 %12, 9, !dbg !35
+ %16 = inttoptr i64 %15 to i16*, !dbg !35
+ store i16 -3085, i16* %16, align 1, !dbg !35
+ %17 = add i64 %12, 11, !dbg !35
+ %18 = inttoptr i64 %17 to i8*, !dbg !35
+ store i8 -13, i8* %18, align 1, !dbg !35
+ call void @llvm.dbg.declare(metadata %struct.CGSize* %3, metadata !36, metadata !37), !dbg !38
+ call void @llvm.dbg.declare(metadata %0** %5, metadata !39, metadata !37), !dbg !45
+ %19 = bitcast %struct.CGSize* %3 to [2 x double]*
+ %20 = ptrtoint [2 x double]* %19 to i64
+ %21 = lshr i64 %20, 3
+ %22 = add i64 %21, %0
+ %23 = inttoptr i64 %22 to i16*
+ %24 = load i16, i16* %23
+ %25 = icmp ne i16 %24, 0
+ br i1 %25, label %26, label %27
+
+; <label>:26: ; preds = %entry
+ call void @__asan_report_store16(i64 %20)
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:27: ; preds = %entry
+ store [2 x double] %imageSize.coerce, [2 x double]* %19, align 8
+ store i8* %self, i8** %self.addr, align 8
+ call void @llvm.dbg.declare(metadata i8** %self.addr, metadata !46, metadata !48), !dbg !49
+ store i8* %_cmd, i8** %_cmd.addr, align 8
+ call void @llvm.dbg.declare(metadata i8** %_cmd.addr, metadata !50, metadata !48), !dbg !49
+ %28 = load %struct._class_t*, %struct._class_t** @"OBJC_CLASSLIST_REFERENCES_$_", align 8, !dbg !52
+ %29 = add i64 lshr (i64 ptrtoint (i8** @OBJC_SELECTOR_REFERENCES_ to i64), i64 3), %0, !dbg !52
+ %30 = inttoptr i64 %29 to i8*, !dbg !52
+ %31 = load i8, i8* %30, !dbg !52
+ %32 = icmp ne i8 %31, 0, !dbg !52
+ br i1 %32, label %33, label %34, !dbg !52
+
+; <label>:33: ; preds = %27
+ call void @__asan_report_load8(i64 ptrtoint (i8** @OBJC_SELECTOR_REFERENCES_ to i64)), !dbg !52
+ call void asm sideeffect "", ""(), !dbg !52
+ unreachable, !dbg !52
+
+; <label>:34: ; preds = %27
+ %35 = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8, !dbg !52, !invariant.load !2
+ %36 = bitcast %struct._class_t* %28 to i8*, !dbg !52
+ %call = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %36, i8* %35), !dbg !52
+ %37 = bitcast i8* %call to %0*, !dbg !52
+ %38 = add i64 lshr (i64 ptrtoint (i8** @OBJC_SELECTOR_REFERENCES_.2 to i64), i64 3), %0, !dbg !53
+ %39 = inttoptr i64 %38 to i8*, !dbg !53
+ %40 = load i8, i8* %39, !dbg !53
+ %41 = icmp ne i8 %40, 0, !dbg !53
+ br i1 %41, label %42, label %43, !dbg !53
+
+; <label>:42: ; preds = %34
+ call void @__asan_report_load8(i64 ptrtoint (i8** @OBJC_SELECTOR_REFERENCES_.2 to i64)), !dbg !53
+ call void asm sideeffect "", ""(), !dbg !53
+ unreachable, !dbg !53
+
+; <label>:43: ; preds = %34
+ %44 = load i8*, i8** @OBJC_SELECTOR_REFERENCES_.2, align 8, !dbg !53, !invariant.load !2
+ %45 = bitcast %0* %37 to i8*, !dbg !53
+ %46 = bitcast %struct.CGSize* %3 to [2 x double]*, !dbg !53
+ %47 = ptrtoint [2 x double]* %46 to i64, !dbg !53
+ %48 = lshr i64 %47, 3, !dbg !53
+ %49 = add i64 %48, %0, !dbg !53
+ %50 = inttoptr i64 %49 to i16*, !dbg !53
+ %51 = load i16, i16* %50, !dbg !53
+ %52 = icmp ne i16 %51, 0, !dbg !53
+ br i1 %52, label %53, label %54, !dbg !53
+
+; <label>:53: ; preds = %43
+ call void @__asan_report_load16(i64 %47), !dbg !53
+ call void asm sideeffect "", ""(), !dbg !53
+ unreachable, !dbg !53
+
+; <label>:54: ; preds = %43
+ %55 = load [2 x double], [2 x double]* %46, align 8, !dbg !53
+ %call1 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*, [2 x double])*)(i8* %45, i8* %44, [2 x double] %55), !dbg !53
+ %56 = bitcast i8* %call1 to %0*, !dbg !53
+ %57 = ptrtoint %0** %5 to i64, !dbg !45
+ %58 = lshr i64 %57, 3, !dbg !45
+ %59 = add i64 %58, %0, !dbg !45
+ %60 = inttoptr i64 %59 to i8*, !dbg !45
+ %61 = load i8, i8* %60, !dbg !45
+ %62 = icmp ne i8 %61, 0, !dbg !45
+ br i1 %62, label %63, label %64, !dbg !45
+
+; <label>:63: ; preds = %54
+ call void @__asan_report_store8(i64 %57), !dbg !45
+ call void asm sideeffect "", ""(), !dbg !45
+ unreachable, !dbg !45
+
+; <label>:64: ; preds = %54
+ store %0* %56, %0** %5, align 8, !dbg !45
+ %65 = load %0*, %0** %5, align 8, !dbg !54
+ %66 = add i64 lshr (i64 ptrtoint (i8** @OBJC_SELECTOR_REFERENCES_.4 to i64), i64 3), %0, !dbg !55
+ %67 = inttoptr i64 %66 to i8*, !dbg !55
+ %68 = load i8, i8* %67, !dbg !55
+ %69 = icmp ne i8 %68, 0, !dbg !55
+ br i1 %69, label %70, label %71, !dbg !55
+
+; <label>:70: ; preds = %64
+ call void @__asan_report_load8(i64 ptrtoint (i8** @OBJC_SELECTOR_REFERENCES_.4 to i64)), !dbg !55
+ call void asm sideeffect "", ""(), !dbg !55
+ unreachable, !dbg !55
+
+; <label>:71: ; preds = %64
+ %72 = load i8*, i8** @OBJC_SELECTOR_REFERENCES_.4, align 8, !dbg !55, !invariant.load !2
+ %73 = bitcast %0* %65 to i8*, !dbg !55
+ %call2 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %73, i8* %72), !dbg !55
+ call void asm sideeffect "mov\09fp, fp\09\09# marker for objc_retainAutoreleaseReturnValue", ""(), !dbg !55
+ %74 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call2) #3, !dbg !55
+ %75 = bitcast %0** %5 to i8**, !dbg !56
+ call void @objc_storeStrong(i8** %75, i8* null) #3, !dbg !56
+ %76 = tail call i8* @objc_autoreleaseReturnValue(i8* %74) #3, !dbg !56
+ store i64 1172321806, i64* %6, !dbg !56
+ %77 = add i64 %12, 0, !dbg !56
+ %78 = inttoptr i64 %77 to i64*, !dbg !56
+ store i64 0, i64* %78, align 1, !dbg !56
+ %79 = add i64 %12, 9, !dbg !56
+ %80 = inttoptr i64 %79 to i16*, !dbg !56
+ store i16 0, i16* %80, align 1, !dbg !56
+ %81 = add i64 %12, 11, !dbg !56
+ %82 = inttoptr i64 %81 to i8*, !dbg !56
+ store i8 0, i8* %82, align 1, !dbg !56
+ ret i8* %76, !dbg !56
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+; Function Attrs: nonlazybind
+declare i8* @objc_msgSend(i8*, i8*, ...) #2
+
+declare i8* @objc_retainAutoreleasedReturnValue(i8* returned)
+
+declare void @objc_storeStrong(i8**, i8*)
+
+declare i8* @objc_autoreleaseReturnValue(i8* returned)
+
+define internal void @asan.module_ctor() {
+ call void @__asan_init()
+ call void @__asan_version_mismatch_check_v8()
+ ret void
+}
+
+declare void @__asan_init()
+
+declare void @__asan_version_mismatch_check_v8()
+
+declare void @__asan_report_load8(i64)
+
+declare void @__asan_report_load16(i64)
+
+declare void @__asan_report_store8(i64)
+
+declare void @__asan_report_store16(i64)
+
+attributes #0 = { noinline sanitize_address ssp uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nonlazybind }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!5, !6, !7, !8, !9, !10, !11, !12}
+!llvm.ident = !{!13}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !1, producer: "clang version 5.0.0 (trunk 295779) (llvm/trunk 295777)", isOptimized: false, runtimeVersion: 2, emissionKind: FullDebug, enums: !2, retainedTypes: !3)
+!1 = !DIFile(filename: "m.m", directory: "/")
+!2 = !{}
+!3 = !{!4}
+!4 = !DICompositeType(tag: DW_TAG_structure_type, name: "MyObject", scope: !1, file: !1, line: 15, flags: DIFlagObjcClassComplete, elements: !2, runtimeLang: DW_LANG_ObjC)
+!5 = !{i32 1, !"Objective-C Version", i32 2}
+!6 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!7 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!8 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
+!9 = !{i32 1, !"Objective-C Class Properties", i32 64}
+!10 = !{i32 2, !"Dwarf Version", i32 2}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"PIC Level", i32 2}
+!13 = !{!"clang version 5.0.0 (trunk 295779) (llvm/trunk 295777)"}
+!14 = distinct !DISubprogram(name: "+[MyObject doWithSize:]", scope: !1, file: !1, line: 16, type: !15, isLocal: true, isDefinition: true, scopeLine: 16, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!15 = !DISubroutineType(types: !16)
+!16 = !{!17, !24, !26, !29}
+!17 = !DIDerivedType(tag: DW_TAG_typedef, name: "id", file: !1, baseType: !18)
+!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !19, size: 64)
+!19 = !DICompositeType(tag: DW_TAG_structure_type, name: "objc_object", file: !1, elements: !20)
+!20 = !{!21}
+!21 = !DIDerivedType(tag: DW_TAG_member, name: "isa", scope: !19, file: !1, baseType: !22, size: 64)
+!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64)
+!23 = !DICompositeType(tag: DW_TAG_structure_type, name: "objc_class", file: !1, flags: DIFlagFwdDecl)
+!24 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !25, flags: DIFlagArtificial | DIFlagObjectPointer)
+!25 = !DIDerivedType(tag: DW_TAG_typedef, name: "Class", file: !1, baseType: !22)
+!26 = !DIDerivedType(tag: DW_TAG_typedef, name: "SEL", file: !1, baseType: !27, flags: DIFlagArtificial)
+!27 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !28, size: 64)
+!28 = !DICompositeType(tag: DW_TAG_structure_type, name: "objc_selector", file: !1, flags: DIFlagFwdDecl)
+!29 = !DIDerivedType(tag: DW_TAG_typedef, name: "CGSize", file: !1, line: 10, baseType: !30)
+!30 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "CGSize", file: !1, line: 6, size: 128, elements: !31)
+!31 = !{!32, !34}
+!32 = !DIDerivedType(tag: DW_TAG_member, name: "width", scope: !30, file: !1, line: 7, baseType: !33, size: 64)
+!33 = !DIBasicType(name: "double", size: 64, encoding: DW_ATE_float)
+!34 = !DIDerivedType(tag: DW_TAG_member, name: "height", scope: !30, file: !1, line: 8, baseType: !33, size: 64, offset: 64)
+!35 = !DILocation(line: 16, scope: !14)
+!36 = !DILocalVariable(name: "imageSize", arg: 3, scope: !14, file: !1, line: 16, type: !29)
+!37 = !DIExpression(DW_OP_deref)
+!38 = !DILocation(line: 16, column: 26, scope: !14)
+!39 = !DILocalVariable(name: "object", scope: !14, file: !1, line: 17, type: !40)
+!40 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !41, size: 64)
+!41 = !DICompositeType(tag: DW_TAG_structure_type, name: "Object", scope: !1, file: !1, line: 11, elements: !42, runtimeLang: DW_LANG_ObjC)
+!42 = !{!43}
+!43 = !DIDerivedType(tag: DW_TAG_inheritance, scope: !41, baseType: !44)
+!44 = !DICompositeType(tag: DW_TAG_structure_type, name: "NSObject", scope: !1, file: !1, line: 3, elements: !2, runtimeLang: DW_LANG_ObjC)
+!45 = !DILocation(line: 17, column: 11, scope: !14)
+!46 = !DILocalVariable(name: "self", arg: 1, scope: !14, type: !47, flags: DIFlagArtificial | DIFlagObjectPointer)
+!47 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !25)
+!48 = !DIExpression()
+!49 = !DILocation(line: 0, scope: !14)
+!50 = !DILocalVariable(name: "_cmd", arg: 2, scope: !14, type: !51, flags: DIFlagArtificial)
+!51 = !DIDerivedType(tag: DW_TAG_typedef, name: "SEL", file: !1, baseType: !27)
+!52 = !DILocation(line: 17, column: 21, scope: !14)
+!53 = !DILocation(line: 17, column: 20, scope: !14)
+!54 = !DILocation(line: 18, column: 11, scope: !14)
+!55 = !DILocation(line: 18, column: 10, scope: !14)
+!56 = !DILocation(line: 19, column: 1, scope: !14)
diff --git a/test/DebugInfo/AMDGPU/lit.local.cfg b/test/DebugInfo/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..2a665f06be72
--- /dev/null
+++ b/test/DebugInfo/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AMDGPU' in config.root.targets:
+ config.unsupported = True
diff --git a/test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll b/test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll
new file mode 100644
index 000000000000..cbd5e7688a5a
--- /dev/null
+++ b/test/DebugInfo/AMDGPU/pointer-address-space-dwarf-v1.ll
@@ -0,0 +1,70 @@
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; LLVM IR generated with the following command and OpenCL source:
+;
+; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file>
+;
+; kernel void kernel1() {
+; global int *FuncVar0 = 0;
+; constant int *FuncVar1 = 0;
+; local int *FuncVar2 = 0;
+; private int *FuncVar3 = 0;
+; int *FuncVar4 = 0;
+; }
+
+; DW_AT_address_class is available since Dwarf Version 2.
+; CHECK-NOT: DW_AT_address_class
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+define amdgpu_kernel void @kernel1() #0 !dbg !7 {
+entry:
+ %FuncVar0 = alloca i32 addrspace(1)*, align 4
+ %FuncVar1 = alloca i32 addrspace(2)*, align 4
+ %FuncVar2 = alloca i32 addrspace(3)*, align 4
+ %FuncVar3 = alloca i32*, align 4
+ %FuncVar4 = alloca i32 addrspace(4)*, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %FuncVar0, metadata !10, metadata !13), !dbg !14
+ store i32 addrspace(1)* null, i32 addrspace(1)** %FuncVar0, align 4, !dbg !14
+ call void @llvm.dbg.declare(metadata i32 addrspace(2)** %FuncVar1, metadata !15, metadata !13), !dbg !16
+ store i32 addrspace(2)* null, i32 addrspace(2)** %FuncVar1, align 4, !dbg !16
+ call void @llvm.dbg.declare(metadata i32 addrspace(3)** %FuncVar2, metadata !17, metadata !13), !dbg !19
+ store i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*), i32 addrspace(3)** %FuncVar2, align 4, !dbg !19
+ call void @llvm.dbg.declare(metadata i32** %FuncVar3, metadata !20, metadata !13), !dbg !22
+ store i32* addrspacecast (i32 addrspace(4)* null to i32*), i32** %FuncVar3, align 4, !dbg !22
+ call void @llvm.dbg.declare(metadata i32 addrspace(4)** %FuncVar4, metadata !23, metadata !13), !dbg !24
+ store i32 addrspace(4)* null, i32 addrspace(4)** %FuncVar4, align 4, !dbg !24
+ ret void, !dbg !25
+}
+
+!llvm.dbg.cu = !{!0}
+!opencl.ocl.version = !{!3}
+!llvm.module.flags = !{!4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "pointer-address-space-dwarf-v1.cl", directory: "/some/random/directory")
+!2 = !{}
+!3 = !{i32 2, i32 0}
+!4 = !{i32 2, !"Dwarf Version", i32 1}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{!""}
+!7 = distinct !DISubprogram(name: "kernel1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocalVariable(name: "FuncVar0", scope: !7, file: !1, line: 2, type: !11)
+!11 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 64)
+!12 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!13 = !DIExpression()
+!14 = !DILocation(line: 2, column: 15, scope: !7)
+!15 = !DILocalVariable(name: "FuncVar1", scope: !7, file: !1, line: 3, type: !11)
+!16 = !DILocation(line: 3, column: 17, scope: !7)
+!17 = !DILocalVariable(name: "FuncVar2", scope: !7, file: !1, line: 4, type: !18)
+!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 32, dwarfAddressSpace: 2)
+!19 = !DILocation(line: 4, column: 14, scope: !7)
+!20 = !DILocalVariable(name: "FuncVar3", scope: !7, file: !1, line: 5, type: !21)
+!21 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 32, dwarfAddressSpace: 1)
+!22 = !DILocation(line: 5, column: 16, scope: !7)
+!23 = !DILocalVariable(name: "FuncVar4", scope: !7, file: !1, line: 6, type: !11)
+!24 = !DILocation(line: 6, column: 8, scope: !7)
+!25 = !DILocation(line: 7, column: 1, scope: !7)
diff --git a/test/DebugInfo/AMDGPU/pointer-address-space.ll b/test/DebugInfo/AMDGPU/pointer-address-space.ll
new file mode 100644
index 000000000000..a99d690935a1
--- /dev/null
+++ b/test/DebugInfo/AMDGPU/pointer-address-space.ll
@@ -0,0 +1,104 @@
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; LLVM IR generated with the following command and OpenCL source:
+;
+; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file>
+;
+; kernel void kernel1() {
+; global int *FuncVar0 = 0;
+; constant int *FuncVar1 = 0;
+; local int *FuncVar2 = 0;
+; private int *FuncVar3 = 0;
+; int *FuncVar4 = 0;
+; }
+
+; CHECK: DW_AT_name {{.*}}"FuncVar0"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] (cu + 0x{{[a-f0-9]+}} => {0x[[NONE:[a-f0-9]+]]})
+
+; CHECK: DW_AT_name {{.*}}"FuncVar1"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] (cu + 0x{{[a-f0-9]+}} => {0x[[NONE]]})
+
+; CHECK: DW_AT_name {{.*}}"FuncVar2"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] (cu + 0x{{[a-f0-9]+}} => {0x[[LOCAL:[a-f0-9]+]]})
+
+; CHECK: DW_AT_name {{.*}}"FuncVar3"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] (cu + 0x{{[a-f0-9]+}} => {0x[[PRIVATE:[a-f0-9]+]]})
+
+; CHECK: DW_AT_name {{.*}}"FuncVar4"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] (cu + 0x{{[a-f0-9]+}} => {0x[[NONE]]})
+
+; CHECK: 0x[[NONE]]: DW_TAG_pointer_type
+; CHECK-NEXT: DW_AT_type
+; CHECK-NOT: DW_AT_address_class
+
+; CHECK: 0x[[LOCAL]]: DW_TAG_pointer_type
+; CHECK-NEXT: DW_AT_type
+; CHECK-NEXT: DW_AT_address_class [DW_FORM_data4] (0x00000002)
+
+; CHECK: 0x[[PRIVATE]]: DW_TAG_pointer_type
+; CHECK-NEXT: DW_AT_type
+; CHECK-NEXT: DW_AT_address_class [DW_FORM_data4] (0x00000001)
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+define amdgpu_kernel void @kernel1() !dbg !7 {
+entry:
+ %FuncVar0 = alloca i32 addrspace(1)*, align 4
+ %FuncVar1 = alloca i32 addrspace(2)*, align 4
+ %FuncVar2 = alloca i32 addrspace(3)*, align 4
+ %FuncVar3 = alloca i32*, align 4
+ %FuncVar4 = alloca i32 addrspace(4)*, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %FuncVar0, metadata !10, metadata !13), !dbg !14
+ store i32 addrspace(1)* null, i32 addrspace(1)** %FuncVar0, align 4, !dbg !14
+ call void @llvm.dbg.declare(metadata i32 addrspace(2)** %FuncVar1, metadata !15, metadata !13), !dbg !16
+ store i32 addrspace(2)* null, i32 addrspace(2)** %FuncVar1, align 4, !dbg !16
+ call void @llvm.dbg.declare(metadata i32 addrspace(3)** %FuncVar2, metadata !17, metadata !13), !dbg !19
+ store i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*), i32 addrspace(3)** %FuncVar2, align 4, !dbg !19
+ call void @llvm.dbg.declare(metadata i32** %FuncVar3, metadata !20, metadata !13), !dbg !22
+ store i32* addrspacecast (i32 addrspace(4)* null to i32*), i32** %FuncVar3, align 4, !dbg !22
+ call void @llvm.dbg.declare(metadata i32 addrspace(4)** %FuncVar4, metadata !23, metadata !13), !dbg !24
+ store i32 addrspace(4)* null, i32 addrspace(4)** %FuncVar4, align 4, !dbg !24
+ ret void, !dbg !25
+}
+
+!llvm.dbg.cu = !{!0}
+!opencl.ocl.version = !{!3}
+!llvm.module.flags = !{!4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "pointer-address-space.ll", directory: "/some/random/directory")
+!2 = !{}
+!3 = !{i32 2, i32 0}
+!4 = !{i32 2, !"Dwarf Version", i32 2}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{!""}
+!7 = distinct !DISubprogram(name: "kernel1", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null}
+!10 = !DILocalVariable(name: "FuncVar0", scope: !7, file: !1, line: 2, type: !11)
+!11 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 64)
+!12 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!13 = !DIExpression()
+!14 = !DILocation(line: 2, column: 15, scope: !7)
+!15 = !DILocalVariable(name: "FuncVar1", scope: !7, file: !1, line: 3, type: !11)
+!16 = !DILocation(line: 3, column: 17, scope: !7)
+!17 = !DILocalVariable(name: "FuncVar2", scope: !7, file: !1, line: 4, type: !18)
+!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 32, dwarfAddressSpace: 2)
+!19 = !DILocation(line: 4, column: 14, scope: !7)
+!20 = !DILocalVariable(name: "FuncVar3", scope: !7, file: !1, line: 5, type: !21)
+!21 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !12, size: 32, dwarfAddressSpace: 1)
+!22 = !DILocation(line: 5, column: 16, scope: !7)
+!23 = !DILocalVariable(name: "FuncVar4", scope: !7, file: !1, line: 6, type: !11)
+!24 = !DILocation(line: 6, column: 8, scope: !7)
+!25 = !DILocation(line: 7, column: 1, scope: !7)
diff --git a/test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll b/test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll
new file mode 100644
index 000000000000..d04a8eb74656
--- /dev/null
+++ b/test/DebugInfo/AMDGPU/variable-locations-dwarf-v1.ll
@@ -0,0 +1,92 @@
+; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; LLVM IR generated with the following command and OpenCL source:
+;
+; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file>
+;
+; global int GlobA;
+; global int GlobB;
+;
+; kernel void kernel1(unsigned int ArgN, global int *ArgA, global int *ArgB) {
+; ArgA[ArgN] += ArgB[ArgN];
+; }
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 )
+@GlobA = common addrspace(1) global i32 0, align 4, !dbg !0
+; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 )
+@GlobB = common addrspace(1) global i32 0, align 4, !dbg !6
+
+define amdgpu_kernel void @kernel1(
+; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x06> 91 04 10 01 16 18 )
+ i32 %ArgN,
+; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x06> 91 08 10 01 16 18 )
+ i32 addrspace(1)* %ArgA,
+; CHECK-NOT: DW_AT_location [DW_FORM_block1] (<0x06> 91 10 10 01 16 18 )
+ i32 addrspace(1)* %ArgB) !dbg !13 {
+entry:
+ %ArgN.addr = alloca i32, align 4
+ %ArgA.addr = alloca i32 addrspace(1)*, align 4
+ %ArgB.addr = alloca i32 addrspace(1)*, align 4
+ store i32 %ArgN, i32* %ArgN.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %ArgN.addr, metadata !22, metadata !23), !dbg !24
+ store i32 addrspace(1)* %ArgA, i32 addrspace(1)** %ArgA.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgA.addr, metadata !25, metadata !23), !dbg !26
+ store i32 addrspace(1)* %ArgB, i32 addrspace(1)** %ArgB.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgB.addr, metadata !27, metadata !23), !dbg !28
+ %0 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgB.addr, align 4, !dbg !29
+ %1 = load i32, i32* %ArgN.addr, align 4, !dbg !30
+ %idxprom = zext i32 %1 to i64, !dbg !29
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 %idxprom, !dbg !29
+ %2 = load i32, i32 addrspace(1)* %arrayidx, align 4, !dbg !29
+ %3 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgA.addr, align 4, !dbg !31
+ %4 = load i32, i32* %ArgN.addr, align 4, !dbg !32
+ %idxprom1 = zext i32 %4 to i64, !dbg !31
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %3, i64 %idxprom1, !dbg !31
+ %5 = load i32, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33
+ %add = add nsw i32 %5, %2, !dbg !33
+ store i32 %add, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33
+ ret void, !dbg !34
+}
+
+!llvm.dbg.cu = !{!2}
+!opencl.ocl.version = !{!9}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "GlobA", scope: !2, file: !3, line: 1, type: !8, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "variable-locations-dwarf-v1.cl", directory: "/some/random/directory")
+!4 = !{}
+!5 = !{!0, !6}
+!6 = !DIGlobalVariableExpression(var: !7)
+!7 = distinct !DIGlobalVariable(name: "GlobB", scope: !2, file: !3, line: 2, type: !8, isLocal: false, isDefinition: true)
+!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!9 = !{i32 2, i32 0}
+!10 = !{i32 2, !"Dwarf Version", i32 1}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{!"clang version 5.0.0"}
+!13 = distinct !DISubprogram(name: "kernel1", scope: !3, file: !3, line: 4, type: !14, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !2, variables: !4)
+!14 = !DISubroutineType(types: !15)
+!15 = !{null, !16, !17, !17}
+!16 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!17 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 64)
+!18 = !{i32 0, i32 1, i32 1}
+!19 = !{!"none", !"none", !"none"}
+!20 = !{!"uint", !"int*", !"int*"}
+!21 = !{!"", !"", !""}
+!22 = !DILocalVariable(name: "ArgN", arg: 1, scope: !13, file: !3, line: 4, type: !16)
+!23 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)
+!24 = !DILocation(line: 4, column: 34, scope: !13)
+!25 = !DILocalVariable(name: "ArgA", arg: 2, scope: !13, file: !3, line: 4, type: !17)
+!26 = !DILocation(line: 4, column: 52, scope: !13)
+!27 = !DILocalVariable(name: "ArgB", arg: 3, scope: !13, file: !3, line: 4, type: !17)
+!28 = !DILocation(line: 4, column: 70, scope: !13)
+!29 = !DILocation(line: 5, column: 17, scope: !13)
+!30 = !DILocation(line: 5, column: 22, scope: !13)
+!31 = !DILocation(line: 5, column: 3, scope: !13)
+!32 = !DILocation(line: 5, column: 8, scope: !13)
+!33 = !DILocation(line: 5, column: 14, scope: !13)
+!34 = !DILocation(line: 6, column: 1, scope: !13)
diff --git a/test/DebugInfo/AMDGPU/variable-locations.ll b/test/DebugInfo/AMDGPU/variable-locations.ll
new file mode 100644
index 000000000000..1aab40f946c6
--- /dev/null
+++ b/test/DebugInfo/AMDGPU/variable-locations.ll
@@ -0,0 +1,111 @@
+; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; LLVM IR generated with the following command and OpenCL source:
+;
+; $clang -cl-std=CL2.0 -g -O0 -target amdgcn-amd-amdhsa -S -emit-llvm <path-to-file>
+;
+; global int GlobA;
+; global int GlobB;
+;
+; kernel void kernel1(unsigned int ArgN, global int *ArgA, global int *ArgB) {
+; ArgA[ArgN] += ArgB[ArgN];
+; }
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+; CHECK: {{.*}}DW_TAG_variable
+; CHECK-NEXT: DW_AT_name {{.*}}"GlobA"
+; CHECK-NEXT: DW_AT_type
+; CHECK-NEXT: DW_AT_external
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 )
+@GlobA = common addrspace(1) global i32 0, align 4, !dbg !0
+
+; CHECK: {{.*}}DW_TAG_variable
+; CHECK-NEXT: DW_AT_name {{.*}}"GlobB"
+; CHECK-NEXT: DW_AT_type
+; CHECK-NEXT: DW_AT_external
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x05> 03 00 00 00 00 )
+@GlobB = common addrspace(1) global i32 0, align 4, !dbg !6
+
+define amdgpu_kernel void @kernel1(
+; CHECK: {{.*}}DW_TAG_formal_parameter
+; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x06> 91 04 10 01 16 18 )
+; CHECK-NEXT: DW_AT_name {{.*}}"ArgN"
+ i32 %ArgN,
+; CHECK: {{.*}}DW_TAG_formal_parameter
+; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x06> 91 08 10 01 16 18 )
+; CHECK-NEXT: DW_AT_name {{.*}}"ArgA"
+ i32 addrspace(1)* %ArgA,
+; CHECK: {{.*}}DW_TAG_formal_parameter
+; CHECK-NEXT: DW_AT_location [DW_FORM_block1] (<0x06> 91 10 10 01 16 18 )
+; CHECK-NEXT: DW_AT_name {{.*}}"ArgB"
+ i32 addrspace(1)* %ArgB) !dbg !13 {
+entry:
+ %ArgN.addr = alloca i32, align 4
+ %ArgA.addr = alloca i32 addrspace(1)*, align 4
+ %ArgB.addr = alloca i32 addrspace(1)*, align 4
+ store i32 %ArgN, i32* %ArgN.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %ArgN.addr, metadata !22, metadata !23), !dbg !24
+ store i32 addrspace(1)* %ArgA, i32 addrspace(1)** %ArgA.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgA.addr, metadata !25, metadata !23), !dbg !26
+ store i32 addrspace(1)* %ArgB, i32 addrspace(1)** %ArgB.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %ArgB.addr, metadata !27, metadata !23), !dbg !28
+ %0 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgB.addr, align 4, !dbg !29
+ %1 = load i32, i32* %ArgN.addr, align 4, !dbg !30
+ %idxprom = zext i32 %1 to i64, !dbg !29
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 %idxprom, !dbg !29
+ %2 = load i32, i32 addrspace(1)* %arrayidx, align 4, !dbg !29
+ %3 = load i32 addrspace(1)*, i32 addrspace(1)** %ArgA.addr, align 4, !dbg !31
+ %4 = load i32, i32* %ArgN.addr, align 4, !dbg !32
+ %idxprom1 = zext i32 %4 to i64, !dbg !31
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %3, i64 %idxprom1, !dbg !31
+ %5 = load i32, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33
+ %add = add nsw i32 %5, %2, !dbg !33
+ store i32 %add, i32 addrspace(1)* %arrayidx2, align 4, !dbg !33
+ ret void, !dbg !34
+}
+
+!llvm.dbg.cu = !{!2}
+!opencl.ocl.version = !{!9}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "GlobA", scope: !2, file: !3, line: 1, type: !8, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "variable-locations.cl", directory: "/some/random/directory")
+!4 = !{}
+!5 = !{!0, !6}
+!6 = !DIGlobalVariableExpression(var: !7)
+!7 = distinct !DIGlobalVariable(name: "GlobB", scope: !2, file: !3, line: 2, type: !8, isLocal: false, isDefinition: true)
+!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!9 = !{i32 2, i32 0}
+!10 = !{i32 2, !"Dwarf Version", i32 2}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{!"clang version 5.0.0"}
+!13 = distinct !DISubprogram(name: "kernel1", scope: !3, file: !3, line: 4, type: !14, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !2, variables: !4)
+!14 = !DISubroutineType(types: !15)
+!15 = !{null, !16, !17, !17}
+!16 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!17 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 64)
+!18 = !{i32 0, i32 1, i32 1}
+!19 = !{!"none", !"none", !"none"}
+!20 = !{!"uint", !"int*", !"int*"}
+!21 = !{!"", !"", !""}
+!22 = !DILocalVariable(name: "ArgN", arg: 1, scope: !13, file: !3, line: 4, type: !16)
+!23 = !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)
+!24 = !DILocation(line: 4, column: 34, scope: !13)
+!25 = !DILocalVariable(name: "ArgA", arg: 2, scope: !13, file: !3, line: 4, type: !17)
+!26 = !DILocation(line: 4, column: 52, scope: !13)
+!27 = !DILocalVariable(name: "ArgB", arg: 3, scope: !13, file: !3, line: 4, type: !17)
+!28 = !DILocation(line: 4, column: 70, scope: !13)
+!29 = !DILocation(line: 5, column: 17, scope: !13)
+!30 = !DILocation(line: 5, column: 22, scope: !13)
+!31 = !DILocation(line: 5, column: 3, scope: !13)
+!32 = !DILocation(line: 5, column: 8, scope: !13)
+!33 = !DILocation(line: 5, column: 14, scope: !13)
+!34 = !DILocation(line: 6, column: 1, scope: !13)
diff --git a/test/DebugInfo/ARM/s-super-register.ll b/test/DebugInfo/ARM/s-super-register.ll
index ef2bc9ac1ec3..de0284a9a557 100644
--- a/test/DebugInfo/ARM/s-super-register.ll
+++ b/test/DebugInfo/ARM/s-super-register.ll
@@ -5,9 +5,7 @@ target triple = "thumbv7-apple-macosx10.6.7"
; The S registers on ARM are expressed as pieces of their super-registers in DWARF.
;
; 0x90 DW_OP_regx of super-register
-; 0x93 DW_OP_piece
-; 0x9d DW_OP_bit_piece
-; CHECK: Location description: 90 {{.. .. ((93 ..)|(9d .. ..)) $}}
+; CHECK: Location description: 90
define void @_Z3foov() optsize ssp !dbg !1 {
entry:
diff --git a/test/DebugInfo/COFF/array-odr-violation.ll b/test/DebugInfo/COFF/array-odr-violation.ll
new file mode 100644
index 000000000000..471c18f00afd
--- /dev/null
+++ b/test/DebugInfo/COFF/array-odr-violation.ll
@@ -0,0 +1,100 @@
+; This tests that emitting CodeView arrays doesn't assert when an ODR violation
+; makes our array dimension size calculations inaccurate. (PR32383)
+
+; Here was the scenario:
+; $ cat a.cpp
+; typedef union YYSTYPE { int x; } YYSTYPE;
+; YYSTYPE a;
+; $ cat b.cpp
+; typedef union YYSTYPE { char x; } YYSTYPE;
+; void fn1() { YYSTYPE a[1]; }
+; $ clang-cl -c -Zi -flto a.cpp b.cpp
+; $ llvm-link a.obj b.obj -S -o t.ll # This is the test case IR.
+; $ llc t.ll # Used to assert
+
+; RUN: llc < %s | FileCheck %s
+
+; FIXME: sizeof(a) in the user program is 1, but we claim it is 4 because
+; sometimes the frontend lies to us. See array-types-advanced.ll for an example.
+;
+; CHECK: Array ({{.*}}) {
+; CHECK: TypeLeafKind: LF_ARRAY (0x1503)
+; CHECK: ElementType: YYSTYPE ({{.*}})
+; CHECK: IndexType: unsigned __int64 (0x23)
+; CHECK: SizeOf: 4
+; CHECK: Name:
+; CHECK: }
+
+; sizeof(YYSTYPE) == 4
+; CHECK: Union ({{.*}}) {
+; CHECK: TypeLeafKind: LF_UNION (0x1506)
+; CHECK: MemberCount: 1
+; CHECK: Properties [ (0x600)
+; CHECK: HasUniqueName (0x200)
+; CHECK: Sealed (0x400)
+; CHECK: ]
+; CHECK: FieldList: <field list>
+; CHECK: SizeOf: 4
+; CHECK: Name: YYSTYPE
+; CHECK: LinkageName: .?ATYYSTYPE@@
+; CHECK: }
+
+; ModuleID = 'llvm-link'
+source_filename = "llvm-link"
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.10.24728"
+
+%union.YYSTYPE = type { i32 }
+%union.YYSTYPE.0 = type { i8 }
+
+@"\01?a@@3TYYSTYPE@@A" = global %union.YYSTYPE zeroinitializer, align 4, !dbg !0
+
+; Function Attrs: noinline nounwind sspstrong uwtable
+define void @"\01?fn1@@YAXXZ"() #0 !dbg !21 {
+entry:
+ %a = alloca [1 x %union.YYSTYPE.0], align 1
+ call void @llvm.dbg.declare(metadata [1 x %union.YYSTYPE.0]* %a, metadata !24, metadata !29), !dbg !30
+ ret void, !dbg !30
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+attributes #0 = { noinline nounwind sspstrong uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!2, !11}
+!llvm.ident = !{!13, !13}
+!llvm.module.flags = !{!14, !18, !19, !20}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "a", linkageName: "\01?a@@3TYYSTYPE@@A", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 5.0.0 ", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "a.cpp", directory: "C:\5Csrc\5Cllvm-project\5Cbuild", checksumkind: CSK_MD5, checksum: "c0005139aa3df153c30d8c6953390a4b")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIDerivedType(tag: DW_TAG_typedef, name: "YYSTYPE", file: !3, line: 1, baseType: !7)
+!7 = distinct !DICompositeType(tag: DW_TAG_union_type, name: "YYSTYPE", file: !3, line: 1, size: 32, elements: !8, identifier: ".?ATYYSTYPE@@")
+!8 = !{!9}
+!9 = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: !7, file: !3, line: 1, baseType: !10, size: 32)
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !12, producer: "clang version 5.0.0 ", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4)
+!12 = !DIFile(filename: "b.cpp", directory: "C:\5Csrc\5Cllvm-project\5Cbuild", checksumkind: CSK_MD5, checksum: "9cfd390d8827beab36769147bb037abc")
+!13 = !{!"clang version 5.0.0 "}
+!14 = !{i32 6, !"Linker Options", !15}
+!15 = !{!16, !17}
+!16 = !{!"/DEFAULTLIB:libcmt.lib"}
+!17 = !{!"/DEFAULTLIB:oldnames.lib"}
+!18 = !{i32 2, !"CodeView", i32 1}
+!19 = !{i32 2, !"Debug Info Version", i32 3}
+!20 = !{i32 1, !"PIC Level", i32 2}
+!21 = distinct !DISubprogram(name: "fn1", linkageName: "\01?fn1@@YAXXZ", scope: !12, file: !12, line: 2, type: !22, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !11, variables: !4)
+!22 = !DISubroutineType(types: !23)
+!23 = !{null}
+!24 = !DILocalVariable(name: "a", scope: !21, file: !12, line: 2, type: !25)
+!25 = !DICompositeType(tag: DW_TAG_array_type, baseType: !26, size: 8, elements: !27)
+!26 = !DIDerivedType(tag: DW_TAG_typedef, name: "YYSTYPE", file: !12, line: 1, baseType: !7)
+!27 = !{!28}
+!28 = !DISubrange(count: 1)
+!29 = !DIExpression()
+!30 = !DILocation(line: 2, scope: !21)
diff --git a/test/DebugInfo/COFF/globals.ll b/test/DebugInfo/COFF/globals.ll
index e560e4f9806f..aadf6ab557f9 100644
--- a/test/DebugInfo/COFF/globals.ll
+++ b/test/DebugInfo/COFF/globals.ll
@@ -96,7 +96,7 @@
; OBJ: ]
; OBJ: ]
; OBJ: CodeViewDebugInfo [
-; OBJ: Section: .debug$S (7)
+; OBJ: Section: .debug$S (8)
; OBJ: Magic: 0x4
; OBJ: Subsection [
; OBJ: SubSectionType: Symbols (0xF1)
diff --git a/test/DebugInfo/COFF/typedef.ll b/test/DebugInfo/COFF/typedef.ll
index 1c4fe7ac7610..cf4e3df257de 100644
--- a/test/DebugInfo/COFF/typedef.ll
+++ b/test/DebugInfo/COFF/typedef.ll
@@ -31,7 +31,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1)
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, emissionKind: FullDebug)
!1 = !DIFile(filename: "-", directory: "/usr/local/google/home/majnemer/llvm/src")
!3 = !{i32 2, !"CodeView", i32 1}
!4 = !{i32 2, !"Debug Info Version", i32 3}
diff --git a/test/DebugInfo/COFF/types-data-members.ll b/test/DebugInfo/COFF/types-data-members.ll
index 9276b962ac8c..275af969a48e 100644
--- a/test/DebugInfo/COFF/types-data-members.ll
+++ b/test/DebugInfo/COFF/types-data-members.ll
@@ -37,7 +37,7 @@
; $ clang t.cpp -S -emit-llvm -g -gcodeview -o t.ll
; CHECK: CodeViewTypes [
-; CHECK: Section: .debug$T (10)
+; CHECK: Section: .debug$T (8)
; CHECK: Magic: 0x4
; CHECK: ArgList (0x1000) {
; CHECK: TypeLeafKind: LF_ARGLIST (0x1201)
diff --git a/test/DebugInfo/Generic/2010-01-05-DbgScope.ll b/test/DebugInfo/Generic/2010-01-05-DbgScope.ll
index 008fd8fbd637..031d64e17f69 100644
--- a/test/DebugInfo/Generic/2010-01-05-DbgScope.ll
+++ b/test/DebugInfo/Generic/2010-01-05-DbgScope.ll
@@ -13,7 +13,7 @@ entry:
!0 = !DILocation(line: 571, column: 3, scope: !1)
!1 = distinct !DILexicalBlock(line: 1, column: 1, file: !11, scope: !2)
-!2 = distinct !DISubprogram(name: "foo", linkageName: "foo", line: 561, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !3, scope: !3, type: !4)
+!2 = distinct !DISubprogram(name: "foo", linkageName: "foo", file: !11, line: 561, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !3, scope: !3, type: !4)
!3 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang 1.1", isOptimized: true, emissionKind: FullDebug, file: !11, enums: !12, retainedTypes: !12)
!4 = !DISubroutineType(types: !5)
!5 = !{!6}
diff --git a/test/DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll b/test/DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll
index 31d3487db7a6..0996cab00da2 100644
--- a/test/DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll
+++ b/test/DebugInfo/Generic/2010-06-29-InlinedFnLocalVar.ll
@@ -1,7 +1,7 @@
; RUN: %llc_dwarf -O2 %s -o - | FileCheck %s
; Check struct X for dead variable xyz from inlined function foo.
-; CHECK: section_info
+; CHECK: debug_info,
; CHECK: DW_TAG_structure_type
; CHECK-NEXT: DW_AT_name
diff --git a/test/DebugInfo/Generic/array.ll b/test/DebugInfo/Generic/array.ll
index 7b4ff7cb805a..c3c592885184 100644
--- a/test/DebugInfo/Generic/array.ll
+++ b/test/DebugInfo/Generic/array.ll
@@ -25,7 +25,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
!7 = distinct !DILexicalBlock(line: 3, column: 12, file: !14, scope: !0)
!8 = !DICompositeType(tag: DW_TAG_array_type, align: 32, file: !14, scope: !2, baseType: !5, elements: !9)
!9 = !{!10}
-;CHECK: section_info:
+;CHECK: debug_info,
;CHECK: DW_TAG_subrange_type
;CHECK-NEXT: DW_AT_type
;CHECK-NOT: DW_AT_lower_bound
diff --git a/test/DebugInfo/Generic/debuginfofinder-inlined-cu.ll b/test/DebugInfo/Generic/debuginfofinder-inlined-cu.ll
new file mode 100644
index 000000000000..313e22d84f35
--- /dev/null
+++ b/test/DebugInfo/Generic/debuginfofinder-inlined-cu.ll
@@ -0,0 +1,31 @@
+; RUN: opt -analyze -module-debuginfo < %s | FileCheck %s
+
+; Verify that both compile units, even though one compile units's functions
+; were entirely inlined into the other.
+;CHECK: Compile unit: DW_LANG_C99 from /tmp/test1.c
+;CHECK: Compile unit: DW_LANG_C99 from /tmp/test2.c
+;CHECK: Subprogram: f from /tmp/test1.c:1
+;CHECK: Subprogram: g from /tmp/test2.c:1
+
+define void @f() !dbg !4 {
+ ret void, !dbg !15
+}
+
+!llvm.dbg.cu = !{!0, !8}
+!llvm.module.flags = !{!13, !16}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.4 (192092)", isOptimized: false, emissionKind: FullDebug, file: !1, enums: !2, retainedTypes: !2, globals: !2, imports: !2)
+!1 = !DIFile(filename: "test1.c", directory: "/tmp")
+!2 = !{}
+!4 = distinct !DISubprogram(name: "f", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !0, scopeLine: 1, file: !1, scope: !5, type: !6, variables: !2)
+!5 = !DIFile(filename: "test1.c", directory: "/tmp")
+!6 = !DISubroutineType(types: !7)
+!7 = !{null}
+!8 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang version 3.4 (192092)", isOptimized: false, emissionKind: FullDebug, file: !9, enums: !2, retainedTypes: !2, globals: !2, imports: !2)
+!9 = !DIFile(filename: "test2.c", directory: "/tmp")
+!11 = distinct !DISubprogram(name: "g", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !8, scopeLine: 1, file: !9, scope: !12, type: !6, variables: !2)
+!12 = !DIFile(filename: "test2.c", directory: "/tmp")
+!13 = !{i32 2, !"Dwarf Version", i32 4}
+!14 = !DILocation(line: 1, scope: !4)
+!15 = !DILocation(line: 1, scope: !11, inlinedAt: !14)
+!16 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/DebugInfo/Generic/gmlt_profiling.ll b/test/DebugInfo/Generic/gmlt_profiling.ll
new file mode 100644
index 000000000000..551959caa15e
--- /dev/null
+++ b/test/DebugInfo/Generic/gmlt_profiling.ll
@@ -0,0 +1,32 @@
+; REQUIRES: object-emission
+; RUN: %llc_dwarf -O0 -filetype=obj < %S/gmlt_profiling.ll | llvm-dwarfdump - | FileCheck %S/gmlt_profiling.ll
+
+; CHECK: .debug_info
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "f1"
+; With debug-info-for-profiling attribute, we need to emit decl_file and
+; decl_line of the subprogram.
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+
+; Function Attrs: nounwind uwtable
+define void @_Z2f1v() !dbg !4 {
+entry:
+ ret void, !dbg !13
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.6.0 ", isOptimized: false, emissionKind: LineTablesOnly, file: !1, enums: !2, retainedTypes: !2, globals: !2, imports: !2, debugInfoForProfiling: true)
+!1 = !DIFile(filename: "gmlt.cpp", directory: "/tmp/dbginfo")
+!2 = !{}
+!4 = distinct !DISubprogram(name: "f1", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, scopeLine: 1, file: !1, scope: !5, type: !6, variables: !2)
+!5 = !DIFile(filename: "gmlt.cpp", directory: "/tmp/dbginfo")
+!6 = !DISubroutineType(types: !2)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{!"clang version 3.6.0 "}
+!13 = !DILocation(line: 1, column: 12, scope: !4)
diff --git a/test/DebugInfo/Generic/invalid.ll b/test/DebugInfo/Generic/invalid.ll
new file mode 100644
index 000000000000..fdb68d9cca3f
--- /dev/null
+++ b/test/DebugInfo/Generic/invalid.ll
@@ -0,0 +1,17 @@
+; RUN: not opt -verify %s 2>&1 | FileCheck %s
+
+; Make sure we emit this diagnostic only once (which means we don't visit the
+; same DISubprogram twice.
+; CHECK: subprogram definitions must have a compile unit
+; CHECK-NEXT: !3 = distinct !DISubprogram(name: "patatino", scope: null, isLocal: false, isDefinition: true, isOptimized: false)
+; CHECK-NOT: subprogram definitions must have a compile unit
+; CHECK-NOT: !3 = distinct !DISubprogram(name: "patatino", scope: null, isLocal: false, isDefinition: true, isOptimized: false)
+
+define void @tinkywinky() !dbg !3 { ret void }
+
+!llvm.module.flags = !{!4}
+!llvm.dbg.cu = !{!0}
+!0 = distinct !DICompileUnit(language: 12, file: !1)
+!1 = !DIFile(filename: "/home/davide", directory: "/home/davide")
+!3 = distinct !DISubprogram(name: "patatino", isDefinition: true)
+!4 = !{i32 2, !"Debug Info Version", i32 3}
diff --git a/test/DebugInfo/Generic/store-tail-merge.ll b/test/DebugInfo/Generic/store-tail-merge.ll
new file mode 100644
index 000000000000..624f30416e0f
--- /dev/null
+++ b/test/DebugInfo/Generic/store-tail-merge.ll
@@ -0,0 +1,72 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+;
+; Generated with:
+;
+; clang -S -gmlt -emit-llvm test.c -o 1.ll
+; opt -sroa -S 1.ll -o test.ll
+;
+; extern int bar(int i);
+; extern int bar2(int i);
+;
+; int foo(int a, int *d) {
+; if(a) {
+; *d = bar(a);
+; } else {
+; *d = bar2(a);
+; }
+;
+; return a;
+; }
+;
+; CHECK: define {{.*}}@foo
+; CHECK: if.end:
+; CHECK-NEXT: %storemerge = phi
+; This final check is the "real" test, verify no !dbg on the store.
+; CHECK-NEXT: store i32 %storemerge{{.*}}, align 4{{$}}
+;
+; ModuleID = 'test1.ll'
+source_filename = "test.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind uwtable
+define i32 @foo(i32 %a, i32* %d) !dbg !6 {
+entry:
+ %tobool = icmp ne i32 %a, 0, !dbg !8
+ br i1 %tobool, label %if.then, label %if.else, !dbg !8
+
+if.then: ; preds = %entry
+ %call = call i32 @bar(i32 %a), !dbg !9
+ store i32 %call, i32* %d, align 4, !dbg !10
+ br label %if.end, !dbg !11
+
+if.else: ; preds = %entry
+ %call1 = call i32 @bar2(i32 %a), !dbg !12
+ store i32 %call1, i32* %d, align 4, !dbg !13
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret i32 %a, !dbg !14
+}
+
+declare i32 @bar(i32)
+
+declare i32 @bar2(i32)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "test.c", directory: "/home/probinson/projects/scratch")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 4, type: !7, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !2)
+!8 = !DILocation(line: 5, column: 6, scope: !6)
+!9 = !DILocation(line: 6, column: 12, scope: !6)
+!10 = !DILocation(line: 6, column: 10, scope: !6)
+!11 = !DILocation(line: 7, column: 3, scope: !6)
+!12 = !DILocation(line: 8, column: 12, scope: !6)
+!13 = !DILocation(line: 8, column: 10, scope: !6)
+!14 = !DILocation(line: 10, column: 3, scope: !6)
diff --git a/test/DebugInfo/Inputs/dwarfdump-header.elf-x86-64 b/test/DebugInfo/Inputs/dwarfdump-header.elf-x86-64
new file mode 100644
index 000000000000..447813419e3e
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-header.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/Inputs/dwarfdump-header.s b/test/DebugInfo/Inputs/dwarfdump-header.s
new file mode 100644
index 000000000000..ce51e987f38a
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-header.s
@@ -0,0 +1,149 @@
+# Test object to verify dwarfdump handles v4 and v5 CU/TU headers.
+# We have a representative set of units: v4 CU, v5 CU, v4 TU, v5 split TU.
+#
+# To generate the test object:
+# llvm-mc -triple x86_64-unknown-linux dwarfdump-header.s -filetype=obj \
+# -o dwarfdump-header.elf-x86-64
+
+ .section .debug_str,"MS",@progbits,1
+str_producer:
+ .asciz "Handmade DWARF producer"
+str_CU_4:
+ .asciz "V4_compile_unit"
+str_CU_5:
+ .asciz "V5_compile_unit"
+str_TU_4:
+ .asciz "V4_type_unit"
+
+ .section .debug_str.dwo,"MS",@progbits,1
+dwo_TU_5:
+ .asciz "V5_split_type_unit"
+
+# All CUs/TUs use the same abbrev section for simplicity.
+ .section .debug_abbrev,"",@progbits
+ .byte 0x01 # Abbrev code
+ .byte 0x11 # DW_TAG_compile_unit
+ .byte 0x00 # DW_CHILDREN_no
+ .byte 0x25 # DW_AT_producer
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x03 # DW_AT_name
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x02 # Abbrev code
+ .byte 0x41 # DW_TAG_type_unit
+ .byte 0x01 # DW_CHILDREN_yes
+ .byte 0x03 # DW_AT_name
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x03 # Abbrev code
+ .byte 0x13 # DW_TAG_structure_type
+ .byte 0x00 # DW_CHILDREN_no (no members)
+ .byte 0x03 # DW_AT_name
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x00 # EOM(3)
+
+# And a .dwo copy for the .dwo sections.
+ .section .debug_abbrev.dwo,"",@progbits
+ .byte 0x01 # Abbrev code
+ .byte 0x11 # DW_TAG_compile_unit
+ .byte 0x00 # DW_CHILDREN_no
+ .byte 0x25 # DW_AT_producer
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x03 # DW_AT_name
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x02 # Abbrev code
+ .byte 0x41 # DW_TAG_type_unit
+ .byte 0x01 # DW_CHILDREN_yes
+ .byte 0x03 # DW_AT_name
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x03 # Abbrev code
+ .byte 0x13 # DW_TAG_structure_type
+ .byte 0x00 # DW_CHILDREN_no (no members)
+ .byte 0x03 # DW_AT_name
+ .byte 0x0e # DW_FORM_strp
+ .byte 0x00 # EOM(1)
+ .byte 0x00 # EOM(2)
+ .byte 0x00 # EOM(3)
+
+ .section .debug_info,"",@progbits
+
+# DWARF v4 CU header. V4 CU headers all look the same so we do only one.
+ .long CU_4_end-CU_4_version # Length of Unit
+CU_4_version:
+ .short 4 # DWARF version number
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 8 # Address Size (in bytes)
+# The compile-unit DIE, which has just DW_AT_producer and DW_AT_name.
+ .byte 1
+ .long str_producer
+ .long str_CU_4
+ .byte 0 # NULL
+CU_4_end:
+
+# DWARF v5 normal CU header.
+ .long CU_5_end-CU_5_version # Length of Unit
+CU_5_version:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+# The compile-unit DIE, which has just DW_AT_producer and DW_AT_name.
+ .byte 1
+ .long str_producer
+ .long str_CU_5
+ .byte 0 # NULL
+CU_5_end:
+
+ .section .debug_types,"",@progbits
+
+# DWARF v4 Type unit header. Normal/split are identical so we do only one.
+TU_4_start:
+ .long TU_4_end-TU_4_version # Length of Unit
+TU_4_version:
+ .short 4 # DWARF version number
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 8 # Address Size (in bytes)
+ .quad 0x0011223344556677 # Type Signature
+ .long TU_4_type-TU_4_start # Type offset
+# The type-unit DIE, which has a name.
+ .byte 2
+ .long str_TU_4
+# The type DIE, which has a name.
+TU_4_type:
+ .byte 3
+ .long str_TU_4
+ .byte 0 # NULL
+ .byte 0 # NULL
+TU_4_end:
+
+ .section .debug_types.dwo,"",@progbits
+# FIXME: DWARF v5 wants type units in .debug_info[.dwo] not .debug_types[.dwo].
+
+# DWARF v5 split type unit header.
+TU_split_5_start:
+ .long TU_split_5_end-TU_split_5_version # Length of Unit
+TU_split_5_version:
+ .short 5 # DWARF version number
+ .byte 6 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev.dwo # Offset Into Abbrev. Section
+ .quad 0x8899aabbccddeeff # Type Signature
+ .long TU_split_5_type-TU_split_5_start # Type offset
+# The type-unit DIE, which has a name.
+ .byte 2
+ .long dwo_TU_5
+# The type DIE, which has a name.
+TU_split_5_type:
+ .byte 3
+ .long dwo_TU_5
+ .byte 0 # NULL
+ .byte 0 # NULL
+TU_split_5_end:
diff --git a/test/DebugInfo/Inputs/gmlt.ll b/test/DebugInfo/Inputs/gmlt.ll
index dc02e77909f6..116cd75b8110 100644
--- a/test/DebugInfo/Inputs/gmlt.ll
+++ b/test/DebugInfo/Inputs/gmlt.ll
@@ -76,7 +76,6 @@
; CHECK-NOT: {{DW_TAG|DW_AT}}
; CHECK: NULL
-
; CHECK: .debug_ranges contents:
; ... some addresses (depends on platform (such as platforms with function
diff --git a/test/DebugInfo/MIR/ARM/split-superreg-complex.mir b/test/DebugInfo/MIR/ARM/split-superreg-complex.mir
new file mode 100644
index 000000000000..2e8d9977a649
--- /dev/null
+++ b/test/DebugInfo/MIR/ARM/split-superreg-complex.mir
@@ -0,0 +1,122 @@
+# RUN: llc -filetype=obj -o - %s | llvm-dwarfdump - | FileCheck %s
+#
+# This is an artificial example of a debug value residing in a composite
+# location with a complex expression. Because the semantics of applying a DWARF
+# expression to a composite location are ill-defined, the compiler should bail
+# out of emitting a location.
+#
+# CHECK: .debug_info contents:
+# CHECK: DW_TAG_variable
+# CHECK-NOT: DW_AT_location
+# CHECK: DW_TAG
+--- |
+ ; Generated from:
+ ; typedef float vec2 __attribute__((vector_size(16)));
+ ; vec2 v();
+ ; float f() {
+ ; vec2 vec = v();
+ ; return vec[0] + vec[1];
+ ; }
+
+ target datalayout = "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+ target triple = "thumbv7s-apple-ios5.0.0"
+
+ define float @f() local_unnamed_addr #0 !dbg !9 {
+ entry:
+ %call = tail call <4 x float> bitcast (<4 x float> (...)* @v to <4 x float> ()*)() #0, !dbg !19
+ tail call void @llvm.dbg.value(metadata <4 x float> %call, i64 0, metadata !14, metadata !20), !dbg !21
+ %vecext = extractelement <4 x float> %call, i32 0, !dbg !22
+ %vecext1 = extractelement <4 x float> %call, i32 1, !dbg !23
+ %add = fadd float %vecext, %vecext1, !dbg !24
+ ret float %add, !dbg !25
+ }
+
+ declare arm_aapcs_vfpcc <4 x float> @v(...) local_unnamed_addr #0
+ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #0
+
+ attributes #0 = { nounwind readnone }
+
+ !llvm.dbg.cu = !{!0}
+ !llvm.module.flags = !{!3, !4}
+ !llvm.ident = !{!8}
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (trunk 286322) (llvm/trunk 286305)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+ !1 = !DIFile(filename: "v.c", directory: "/")
+ !2 = !{}
+ !3 = !{i32 2, !"Dwarf Version", i32 2}
+ !4 = !{i32 2, !"Debug Info Version", i32 3}
+ !8 = !{!"clang version 4.0.0 (trunk 286322) (llvm/trunk 286305)"}
+ !9 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 3, type: !10, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: true, unit: !0, variables: !13)
+ !10 = !DISubroutineType(types: !11)
+ !11 = !{!12}
+ !12 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float)
+ !13 = !{!14}
+ !14 = !DILocalVariable(name: "vec", scope: !9, file: !1, line: 4, type: !15)
+ !15 = !DIDerivedType(tag: DW_TAG_typedef, name: "vec2", file: !1, line: 1, baseType: !16)
+ !16 = !DICompositeType(tag: DW_TAG_array_type, baseType: !12, size: 128, flags: DIFlagVector, elements: !17)
+ !17 = !{!18}
+ !18 = !DISubrange(count: 4)
+ !19 = !DILocation(line: 4, column: 13, scope: !9)
+ !20 = !DIExpression(DW_OP_plus, 1, DW_OP_minus, 1)
+ !21 = !DILocation(line: 4, column: 7, scope: !9)
+ !22 = !DILocation(line: 5, column: 9, scope: !9)
+ !23 = !DILocation(line: 5, column: 18, scope: !9)
+ !24 = !DILocation(line: 5, column: 16, scope: !9)
+ !25 = !DILocation(line: 5, column: 2, scope: !9)
+
+...
+---
+name: f
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13',
+ '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4',
+ '%r5', '%r6', '%r7', '%r8', '%r10', '%r11', '%s16',
+ '%s17', '%s18', '%s19', '%s20', '%s21', '%s22',
+ '%s23', '%s24', '%s25', '%s26', '%s27', '%s28',
+ '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11', '%d10_d12',
+ '%d11_d13', '%d12_d14', '%d13_d15', '%q4_q5', '%q5_q6',
+ '%q6_q7', '%q4_q5_q6_q7', '%r4_r5', '%r6_r7', '%r10_r11',
+ '%d8_d9_d10', '%d9_d10_d11', '%d10_d11_d12', '%d11_d12_d13',
+ '%d12_d13_d14', '%d13_d14_d15', '%d8_d10_d12',
+ '%d9_d11_d13', '%d10_d12_d14', '%d11_d13_d15',
+ '%d8_d10_d12_d14', '%d9_d11_d13_d15', '%d9_d10',
+ '%d11_d12', '%d13_d14', '%d9_d10_d11_d12', '%d11_d12_d13_d14' ]
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 4
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+stack:
+ - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr' }
+body: |
+ bb.0.entry:
+ liveins: %lr
+
+ early-clobber %sp = frame-setup t2STR_PRE killed undef %lr, %sp, -4, 14, _
+ frame-setup CFI_INSTRUCTION def_cfa_offset 4
+ frame-setup CFI_INSTRUCTION offset %lr, -4
+ tBL 14, _, @v, csr_ios, implicit-def dead %lr, implicit %sp, implicit-def %sp, implicit-def %r0, implicit-def %r1, implicit-def %r2, implicit-def %r3, debug-location !19
+ %d1 = VMOVDRR killed %r2, killed %r3, 14, _, implicit-def %q0, debug-location !19
+ %d0 = VMOVDRR killed %r0, killed %r1, 14, _, implicit killed %q0, implicit-def %q0, debug-location !19
+ DBG_VALUE debug-use %q0, debug-use _, !14, !20, debug-location !21
+ %s4 = VMOVS %s1, 14, _, implicit-def %d2, debug-location !24
+ %d0 = VADDfd %d0, killed %d2, 14, _, implicit killed %q0, debug-location !24
+ %r0 = VMOVRS %s0, 14, _, implicit killed %d0, debug-location !25
+ %lr, %sp = t2LDR_POST %sp, 4, 14, _, debug-location !25
+ tBX_RET 14, _, implicit %r0, debug-location !25
+
+...
diff --git a/test/DebugInfo/MIR/X86/live-debug-values-spill.mir b/test/DebugInfo/MIR/X86/live-debug-values-spill.mir
new file mode 100644
index 000000000000..c0d0d7010564
--- /dev/null
+++ b/test/DebugInfo/MIR/X86/live-debug-values-spill.mir
@@ -0,0 +1,468 @@
+# RUN: llc -run-pass=livedebugvalues -march=x86-64 -o - %s | FileCheck -check-prefix=GENERATE %s
+# RUN: llc -run-pass=livedebugvalues -march=x86-64 -o - %s | FileCheck -check-prefix=TERMINATE %s
+#
+# Check that spills are recognized in the Live Debug Values pass and that
+# DBG_VALUE instructions are generated to keep track of spilled user
+# variables.
+# In addition we check that the ranges of spilled debug values are properly
+# extended.
+#
+# Test case generated from:
+#
+# extern void use (int);
+# extern void set (int *, int *, int *);
+#
+# int glob0, glob1, glob2, glob3, glob4, glob5;
+#
+# void foo(int b0, int b1, int int0, int int1, int int2,
+# int int3, int int4)
+# {
+# int inta = glob0;
+# int intb = glob1;
+# int intc = glob2;
+# int intd = glob3;
+# int inte = glob4;
+# int intf = glob5;
+# int intg;
+#
+# if (b0)
+# return;
+#
+# int0 += (int1 + int2 + int3) * int4;
+# use(intf);
+# use(inte);
+#
+# if (b1) {
+# set(&inte, &intf, &intg);
+# int0 = (int1 + int2 + int3) * int4;
+# inta = (intb*inte + intc*inte + intd) * inte;
+# }
+# int0 += int4 * inta;
+# use(int0);
+# }
+#
+#
+# Generated with
+# clang -g -O2 -S -emit-llvm -fno-omit-frame-pointer spill1.c
+# llc -stop-after=funclet-layout < spill1.ll > spill1.mir
+#
+# Make sure that we generated DBG_VALUE instructions for the spills
+# GENERATE: bb.1.if.end:
+# GENERATE: MOV32mr %rbp, 1, _, -48, _, killed %edx :: (store 4 into %stack.5)
+# GENERATE-NEXT: DBG_VALUE debug-use %rbp, -48, !26, !38
+# GENERATE: MOV32mr %rbp, 1, _, -52, _, killed %r8d :: (store 4 into %stack.4)
+# GENERATE-NEXT: DBG_VALUE debug-use %rbp, -52, !32, !38
+# GENERATE: MOV32mr %rbp, 1, _, -56, _, killed %esi :: (store 4 into %stack.3)
+# GENERATE-NEXT: DBG_VALUE debug-use %rbp, -56, !34, !38
+#
+# Check that the spill locations that are valid at the end of bb.1.if.end are
+# propagated to subsequent BBs.
+#
+# GENERATE: bb.2.if.then4:
+# GENERATE-NOT: bb.3:
+# GENERATE-DAG: DBG_VALUE debug-use %rbp, -56, !34, !38
+# GENERATE-DAG: DBG_VALUE debug-use %rbp, -52, !32, !38
+#
+# GENERATE: bb.3:
+# GENERATE-NOT: bb.4.if.end13:
+# GENERATE-DAG: DBG_VALUE debug-use %rbp, -56, !34, !38
+# GENERATE-DAG: DBG_VALUE debug-use %rbp, -52, !32, !38
+#
+# GENERATE: bb.4.if.end13:
+# GENERATE-NOT: bb.5.cleanup:
+# GENERATE-DAG: DBG_VALUE debug-use %rbp, -56, !34, !38
+# GENERATE-DAG: DBG_VALUE debug-use %rbp, -52, !32, !38
+#
+# Check that the spill location rbp-48 (the variable int0) is not propagated
+# because int0 is redefined within the same basic block.
+#
+# TERMINATE: bb.2.if.then4:
+# TERMINATE-NOT: DBG_VALUE debug-use %rbp, -48, !26, !38
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "spill1.c"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ @glob0 = common local_unnamed_addr global i32 0, align 4, !dbg !0
+ @glob1 = common local_unnamed_addr global i32 0, align 4, !dbg !6
+ @glob2 = common local_unnamed_addr global i32 0, align 4, !dbg !9
+ @glob3 = common local_unnamed_addr global i32 0, align 4, !dbg !11
+ @glob4 = common local_unnamed_addr global i32 0, align 4, !dbg !13
+ @glob5 = common local_unnamed_addr global i32 0, align 4, !dbg !15
+
+ ; Function Attrs: nounwind uwtable
+ define void @foo(i32 %b0, i32 %b1, i32 %int0, i32 %int1, i32 %int2, i32 %int3, i32 %int4) local_unnamed_addr #0 !dbg !20 {
+ entry:
+ %inte = alloca i32, align 4
+ %intf = alloca i32, align 4
+ %intg = alloca i32, align 4
+ tail call void @llvm.dbg.value(metadata i32 %b0, i64 0, metadata !24, metadata !38), !dbg !39
+ tail call void @llvm.dbg.value(metadata i32 %b1, i64 0, metadata !25, metadata !38), !dbg !40
+ tail call void @llvm.dbg.value(metadata i32 %int0, i64 0, metadata !26, metadata !38), !dbg !41
+ tail call void @llvm.dbg.value(metadata i32 %int1, i64 0, metadata !27, metadata !38), !dbg !42
+ tail call void @llvm.dbg.value(metadata i32 %int2, i64 0, metadata !28, metadata !38), !dbg !43
+ tail call void @llvm.dbg.value(metadata i32 %int3, i64 0, metadata !29, metadata !38), !dbg !44
+ tail call void @llvm.dbg.value(metadata i32 %int4, i64 0, metadata !30, metadata !38), !dbg !45
+ %0 = load i32, i32* @glob0, align 4, !dbg !46, !tbaa !47
+ tail call void @llvm.dbg.value(metadata i32 %0, i64 0, metadata !31, metadata !38), !dbg !51
+ %1 = load i32, i32* @glob1, align 4, !dbg !52, !tbaa !47
+ tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !32, metadata !38), !dbg !53
+ %2 = load i32, i32* @glob2, align 4, !dbg !54, !tbaa !47
+ tail call void @llvm.dbg.value(metadata i32 %2, i64 0, metadata !33, metadata !38), !dbg !55
+ %3 = load i32, i32* @glob3, align 4, !dbg !56, !tbaa !47
+ tail call void @llvm.dbg.value(metadata i32 %3, i64 0, metadata !34, metadata !38), !dbg !57
+ %4 = bitcast i32* %inte to i8*, !dbg !58
+ call void @llvm.lifetime.start(i64 4, i8* nonnull %4) #4, !dbg !58
+ %5 = load i32, i32* @glob4, align 4, !dbg !59, !tbaa !47
+ tail call void @llvm.dbg.value(metadata i32 %5, i64 0, metadata !35, metadata !38), !dbg !60
+ tail call void @llvm.dbg.value(metadata i32 %5, i64 0, metadata !35, metadata !38), !dbg !60
+ store i32 %5, i32* %inte, align 4, !dbg !60, !tbaa !47
+ %6 = bitcast i32* %intf to i8*, !dbg !61
+ call void @llvm.lifetime.start(i64 4, i8* nonnull %6) #4, !dbg !61
+ %7 = load i32, i32* @glob5, align 4, !dbg !62, !tbaa !47
+ tail call void @llvm.dbg.value(metadata i32 %7, i64 0, metadata !36, metadata !38), !dbg !63
+ tail call void @llvm.dbg.value(metadata i32 %7, i64 0, metadata !36, metadata !38), !dbg !63
+ store i32 %7, i32* %intf, align 4, !dbg !63, !tbaa !47
+ %8 = bitcast i32* %intg to i8*, !dbg !64
+ call void @llvm.lifetime.start(i64 4, i8* nonnull %8) #4, !dbg !64
+ %tobool = icmp eq i32 %b0, 0, !dbg !65
+ br i1 %tobool, label %if.end, label %cleanup, !dbg !67
+
+ if.end: ; preds = %entry
+ %add = add nsw i32 %int2, %int1, !dbg !68
+ %add1 = add nsw i32 %add, %int3, !dbg !69
+ %mul = mul nsw i32 %add1, %int4, !dbg !70
+ call void @llvm.dbg.value(metadata i32 %mul, i64 0, metadata !26, metadata !38), !dbg !41
+ %add2 = add nsw i32 %mul, %int0, !dbg !71
+ tail call void @llvm.dbg.value(metadata i32 %add2, i64 0, metadata !26, metadata !38), !dbg !41
+ tail call void @use(i32 %7) #4, !dbg !72
+ tail call void @use(i32 %5) #4, !dbg !73
+ %tobool3 = icmp eq i32 %b1, 0, !dbg !74
+ br i1 %tobool3, label %if.end13, label %if.then4, !dbg !76
+
+ if.then4: ; preds = %if.end
+ tail call void @llvm.dbg.value(metadata i32* %inte, i64 0, metadata !35, metadata !77), !dbg !60
+ tail call void @llvm.dbg.value(metadata i32* %intf, i64 0, metadata !36, metadata !77), !dbg !63
+ tail call void @llvm.dbg.value(metadata i32* %intg, i64 0, metadata !37, metadata !77), !dbg !78
+ call void @set(i32* nonnull %inte, i32* nonnull %intf, i32* nonnull %intg) #4, !dbg !79
+ %9 = load i32, i32* %inte, align 4, !dbg !81, !tbaa !47
+ call void @llvm.dbg.value(metadata i32 %9, i64 0, metadata !35, metadata !38), !dbg !60
+ %mul833 = add i32 %2, %1, !dbg !82
+ %add10 = mul i32 %9, %mul833, !dbg !82
+ %add11 = add nsw i32 %add10, %3, !dbg !83
+ %mul12 = mul nsw i32 %add11, %9, !dbg !84
+ call void @llvm.dbg.value(metadata i32 %mul12, i64 0, metadata !31, metadata !38), !dbg !51
+ br label %if.end13, !dbg !85
+
+ if.end13: ; preds = %if.then4, %if.end
+ %inta.0 = phi i32 [ %mul12, %if.then4 ], [ %0, %if.end ]
+ %int0.addr.0 = phi i32 [ %mul, %if.then4 ], [ %add2, %if.end ]
+ call void @llvm.dbg.value(metadata i32 %inta.0, i64 0, metadata !31, metadata !38), !dbg !51
+ call void @llvm.dbg.value(metadata i32 %int0.addr.0, i64 0, metadata !26, metadata !38), !dbg !41
+ %mul14 = mul nsw i32 %inta.0, %int4, !dbg !86
+ %add15 = add nsw i32 %int0.addr.0, %mul14, !dbg !87
+ call void @llvm.dbg.value(metadata i32 %add15, i64 0, metadata !26, metadata !38), !dbg !41
+ call void @use(i32 %add15) #4, !dbg !88
+ br label %cleanup, !dbg !89
+
+ cleanup: ; preds = %if.end13, %entry
+ %10 = bitcast i32* %intg to i8*
+ %11 = bitcast i32* %intf to i8*
+ %12 = bitcast i32* %inte to i8*
+ call void @llvm.lifetime.end(i64 4, i8* nonnull %10) #4, !dbg !89
+ call void @llvm.lifetime.end(i64 4, i8* nonnull %11) #4, !dbg !89
+ call void @llvm.lifetime.end(i64 4, i8* nonnull %12) #4, !dbg !89
+ ret void, !dbg !90
+ }
+
+ ; Function Attrs: argmemonly nounwind
+ declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+
+ declare void @use(i32) local_unnamed_addr #2
+
+ declare void @set(i32*, i32*, i32*) local_unnamed_addr #2
+
+ ; Function Attrs: argmemonly nounwind
+ declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+
+ ; Function Attrs: nounwind readnone
+ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #3
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #4
+
+ attributes #0 = { nounwind uwtable "no-frame-pointer-elim-non-leaf" }
+ attributes #1 = { argmemonly nounwind }
+ attributes #2 = { "no-frame-pointer-elim-non-leaf" }
+ attributes #3 = { nounwind readnone }
+ attributes #4 = { nounwind }
+
+ !llvm.dbg.cu = !{!2}
+ !llvm.module.flags = !{!17, !18}
+ !llvm.ident = !{!19}
+
+ !0 = !DIGlobalVariableExpression(var: !1)
+ !1 = distinct !DIGlobalVariable(name: "glob0", scope: !2, file: !3, line: 4, type: !8, isLocal: false, isDefinition: true)
+ !2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 5.0.0 (trunk 292962)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+ !3 = !DIFile(filename: "spill1.c", directory: "/home/test")
+ !4 = !{}
+ !5 = !{!0, !6, !9, !11, !13, !15}
+ !6 = !DIGlobalVariableExpression(var: !7)
+ !7 = distinct !DIGlobalVariable(name: "glob1", scope: !2, file: !3, line: 4, type: !8, isLocal: false, isDefinition: true)
+ !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+ !9 = !DIGlobalVariableExpression(var: !10)
+ !10 = distinct !DIGlobalVariable(name: "glob2", scope: !2, file: !3, line: 4, type: !8, isLocal: false, isDefinition: true)
+ !11 = !DIGlobalVariableExpression(var: !12)
+ !12 = distinct !DIGlobalVariable(name: "glob3", scope: !2, file: !3, line: 4, type: !8, isLocal: false, isDefinition: true)
+ !13 = !DIGlobalVariableExpression(var: !14)
+ !14 = distinct !DIGlobalVariable(name: "glob4", scope: !2, file: !3, line: 4, type: !8, isLocal: false, isDefinition: true)
+ !15 = !DIGlobalVariableExpression(var: !16)
+ !16 = distinct !DIGlobalVariable(name: "glob5", scope: !2, file: !3, line: 4, type: !8, isLocal: false, isDefinition: true)
+ !17 = !{i32 2, !"Dwarf Version", i32 4}
+ !18 = !{i32 2, !"Debug Info Version", i32 3}
+ !19 = !{!"clang version 5.0.0 (trunk 292962)"}
+ !20 = distinct !DISubprogram(name: "foo", scope: !3, file: !3, line: 6, type: !21, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !2, variables: !23)
+ !21 = !DISubroutineType(types: !22)
+ !22 = !{null, !8, !8, !8, !8, !8, !8, !8}
+ !23 = !{!24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35, !36, !37}
+ !24 = !DILocalVariable(name: "b0", arg: 1, scope: !20, file: !3, line: 6, type: !8)
+ !25 = !DILocalVariable(name: "b1", arg: 2, scope: !20, file: !3, line: 6, type: !8)
+ !26 = !DILocalVariable(name: "int0", arg: 3, scope: !20, file: !3, line: 6, type: !8)
+ !27 = !DILocalVariable(name: "int1", arg: 4, scope: !20, file: !3, line: 6, type: !8)
+ !28 = !DILocalVariable(name: "int2", arg: 5, scope: !20, file: !3, line: 6, type: !8)
+ !29 = !DILocalVariable(name: "int3", arg: 6, scope: !20, file: !3, line: 7, type: !8)
+ !30 = !DILocalVariable(name: "int4", arg: 7, scope: !20, file: !3, line: 7, type: !8)
+ !31 = !DILocalVariable(name: "inta", scope: !20, file: !3, line: 9, type: !8)
+ !32 = !DILocalVariable(name: "intb", scope: !20, file: !3, line: 10, type: !8)
+ !33 = !DILocalVariable(name: "intc", scope: !20, file: !3, line: 11, type: !8)
+ !34 = !DILocalVariable(name: "intd", scope: !20, file: !3, line: 12, type: !8)
+ !35 = !DILocalVariable(name: "inte", scope: !20, file: !3, line: 13, type: !8)
+ !36 = !DILocalVariable(name: "intf", scope: !20, file: !3, line: 14, type: !8)
+ !37 = !DILocalVariable(name: "intg", scope: !20, file: !3, line: 15, type: !8)
+ !38 = !DIExpression()
+ !39 = !DILocation(line: 6, column: 14, scope: !20)
+ !40 = !DILocation(line: 6, column: 22, scope: !20)
+ !41 = !DILocation(line: 6, column: 30, scope: !20)
+ !42 = !DILocation(line: 6, column: 40, scope: !20)
+ !43 = !DILocation(line: 6, column: 50, scope: !20)
+ !44 = !DILocation(line: 7, column: 14, scope: !20)
+ !45 = !DILocation(line: 7, column: 24, scope: !20)
+ !46 = !DILocation(line: 9, column: 14, scope: !20)
+ !47 = !{!48, !48, i64 0}
+ !48 = !{!"int", !49, i64 0}
+ !49 = !{!"omnipotent char", !50, i64 0}
+ !50 = !{!"Simple C/C++ TBAA"}
+ !51 = !DILocation(line: 9, column: 7, scope: !20)
+ !52 = !DILocation(line: 10, column: 14, scope: !20)
+ !53 = !DILocation(line: 10, column: 7, scope: !20)
+ !54 = !DILocation(line: 11, column: 14, scope: !20)
+ !55 = !DILocation(line: 11, column: 7, scope: !20)
+ !56 = !DILocation(line: 12, column: 14, scope: !20)
+ !57 = !DILocation(line: 12, column: 7, scope: !20)
+ !58 = !DILocation(line: 13, column: 3, scope: !20)
+ !59 = !DILocation(line: 13, column: 14, scope: !20)
+ !60 = !DILocation(line: 13, column: 7, scope: !20)
+ !61 = !DILocation(line: 14, column: 3, scope: !20)
+ !62 = !DILocation(line: 14, column: 14, scope: !20)
+ !63 = !DILocation(line: 14, column: 7, scope: !20)
+ !64 = !DILocation(line: 15, column: 3, scope: !20)
+ !65 = !DILocation(line: 17, column: 7, scope: !66)
+ !66 = distinct !DILexicalBlock(scope: !20, file: !3, line: 17, column: 7)
+ !67 = !DILocation(line: 17, column: 7, scope: !20)
+ !68 = !DILocation(line: 20, column: 17, scope: !20)
+ !69 = !DILocation(line: 20, column: 24, scope: !20)
+ !70 = !DILocation(line: 20, column: 32, scope: !20)
+ !71 = !DILocation(line: 20, column: 8, scope: !20)
+ !72 = !DILocation(line: 21, column: 3, scope: !20)
+ !73 = !DILocation(line: 22, column: 3, scope: !20)
+ !74 = !DILocation(line: 24, column: 7, scope: !75)
+ !75 = distinct !DILexicalBlock(scope: !20, file: !3, line: 24, column: 7)
+ !76 = !DILocation(line: 24, column: 7, scope: !20)
+ !77 = !DIExpression(DW_OP_deref)
+ !78 = !DILocation(line: 15, column: 7, scope: !20)
+ !79 = !DILocation(line: 25, column: 5, scope: !80)
+ !80 = distinct !DILexicalBlock(scope: !75, file: !3, line: 24, column: 11)
+ !81 = !DILocation(line: 27, column: 18, scope: !80)
+ !82 = !DILocation(line: 27, column: 23, scope: !80)
+ !83 = !DILocation(line: 27, column: 35, scope: !80)
+ !84 = !DILocation(line: 27, column: 43, scope: !80)
+ !85 = !DILocation(line: 28, column: 3, scope: !80)
+ !86 = !DILocation(line: 29, column: 16, scope: !20)
+ !87 = !DILocation(line: 29, column: 8, scope: !20)
+ !88 = !DILocation(line: 30, column: 3, scope: !20)
+ !89 = !DILocation(line: 31, column: 1, scope: !20)
+ !90 = !DILocation(line: 31, column: 1, scope: !91)
+ !91 = !DILexicalBlockFile(scope: !20, file: !3, discriminator: 2)
+
+...
+---
+name: foo
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%edi' }
+ - { reg: '%esi' }
+ - { reg: '%edx' }
+ - { reg: '%ecx' }
+ - { reg: '%r8d' }
+ - { reg: '%r9d' }
+calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
+ '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15',
+ '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d',
+ '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 72
+ offsetAdjustment: -24
+ maxAlignment: 8
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+fixedStack:
+ - { id: 0, type: spill-slot, offset: -56, size: 8, alignment: 8, callee-saved-register: '%rbx' }
+ - { id: 1, type: spill-slot, offset: -48, size: 8, alignment: 16, callee-saved-register: '%r12' }
+ - { id: 2, type: spill-slot, offset: -40, size: 8, alignment: 8, callee-saved-register: '%r13' }
+ - { id: 3, type: spill-slot, offset: -32, size: 8, alignment: 16, callee-saved-register: '%r14' }
+ - { id: 4, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%r15' }
+ - { id: 5, type: spill-slot, offset: -16, size: 8, alignment: 16 }
+ - { id: 6, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
+stack:
+ - { id: 0, name: inte, offset: -60, size: 4, alignment: 4 }
+ - { id: 1, name: intf, offset: -76, size: 4, alignment: 4 }
+ - { id: 2, name: intg, offset: -80, size: 4, alignment: 4 }
+ - { id: 3, type: spill-slot, offset: -72, size: 4, alignment: 4 }
+ - { id: 4, type: spill-slot, offset: -68, size: 4, alignment: 4 }
+ - { id: 5, type: spill-slot, offset: -64, size: 4, alignment: 4 }
+body: |
+ bb.0.entry:
+ successors: %bb.1.if.end(0x30000000), %bb.5.cleanup(0x50000000)
+ liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d, %r15, %r14, %r13, %r12, %rbx, %rbp
+
+ frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp
+ CFI_INSTRUCTION def_cfa_offset 16
+ CFI_INSTRUCTION offset %rbp, -16
+ %rbp = frame-setup MOV64rr %rsp
+ CFI_INSTRUCTION def_cfa_register %rbp
+ frame-setup PUSH64r killed %r15, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed %r14, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed %r13, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed %r12, implicit-def %rsp, implicit %rsp
+ frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
+ %rsp = frame-setup SUB64ri8 %rsp, 24, implicit-def dead %eflags
+ CFI_INSTRUCTION offset %rbx, -56
+ CFI_INSTRUCTION offset %r12, -48
+ CFI_INSTRUCTION offset %r13, -40
+ CFI_INSTRUCTION offset %r14, -32
+ CFI_INSTRUCTION offset %r15, -24
+ DBG_VALUE debug-use %edi, debug-use _, !24, !38, debug-location !39
+ DBG_VALUE debug-use %esi, debug-use _, !25, !38, debug-location !40
+ DBG_VALUE debug-use %edx, debug-use _, !26, !38, debug-location !41
+ DBG_VALUE debug-use %ecx, debug-use _, !27, !38, debug-location !42
+ DBG_VALUE debug-use %r8d, debug-use _, !28, !38, debug-location !43
+ DBG_VALUE debug-use %r9d, debug-use _, !29, !38, debug-location !44
+ %r14d = MOV32rr %r8d
+ DBG_VALUE debug-use %r14d, debug-use _, !28, !38, debug-location !43
+ %r12d = MOV32rr %esi
+ DBG_VALUE debug-use %r12d, debug-use _, !25, !38, debug-location !40
+ %eax = MOV32rr %edi
+ DBG_VALUE debug-use %eax, debug-use _, !24, !38, debug-location !39
+ %r13d = MOV32rm %rip, 1, _, @glob0, _, debug-location !46 :: (dereferenceable load 4 from @glob0, !tbaa !47)
+ DBG_VALUE debug-use %r13d, debug-use _, !31, !38, debug-location !51
+ %r8d = MOV32rm %rip, 1, _, @glob1, _, debug-location !52 :: (dereferenceable load 4 from @glob1, !tbaa !47)
+ DBG_VALUE debug-use %r8d, debug-use _, !32, !38, debug-location !53
+ %r15d = MOV32rm %rip, 1, _, @glob2, _, debug-location !54 :: (dereferenceable load 4 from @glob2, !tbaa !47)
+ DBG_VALUE debug-use %r15d, debug-use _, !33, !38, debug-location !55
+ %esi = MOV32rm %rip, 1, _, @glob3, _, debug-location !56 :: (dereferenceable load 4 from @glob3, !tbaa !47)
+ DBG_VALUE debug-use %esi, debug-use _, !34, !38, debug-location !57
+ %ebx = MOV32rm %rip, 1, _, @glob4, _, debug-location !59 :: (dereferenceable load 4 from @glob4, !tbaa !47)
+ DBG_VALUE debug-use %ebx, debug-use _, !35, !38, debug-location !60
+ MOV32mr %rbp, 1, _, -44, _, %ebx, debug-location !60 :: (store 4 into %ir.inte, !tbaa !47)
+ %edi = MOV32rm %rip, 1, _, @glob5, _, debug-location !62 :: (dereferenceable load 4 from @glob5, !tbaa !47)
+ DBG_VALUE debug-use %edi, debug-use _, !36, !38, debug-location !63
+ MOV32mr %rbp, 1, _, -60, _, %edi, debug-location !63 :: (store 4 into %ir.intf, !tbaa !47)
+ TEST32rr killed %eax, %eax, implicit-def %eflags, debug-location !67
+ JNE_1 %bb.5.cleanup, implicit %eflags
+
+ bb.1.if.end:
+ successors: %bb.2(0x30000000), %bb.3.if.then4(0x50000000)
+ liveins: %ebx, %ecx, %edi, %edx, %esi, %r8d, %r9d, %r12d, %r13d, %r14d, %r15d, %rbp
+
+ MOV32mr %rbp, 1, _, -48, _, killed %edx :: (store 4 into %stack.5)
+ MOV32mr %rbp, 1, _, -52, _, killed %r8d :: (store 4 into %stack.4)
+ MOV32mr %rbp, 1, _, -56, _, killed %esi :: (store 4 into %stack.3)
+ DBG_VALUE debug-use _, debug-use _, !30, !38, debug-location !45
+ %r14d = ADD32rr killed %r14d, killed %ecx, implicit-def dead %eflags, debug-location !68
+ %r14d = ADD32rr killed %r14d, killed %r9d, implicit-def dead %eflags, debug-location !69
+ %r14d = IMUL32rm killed %r14d, %rbp, 1, _, 16, _, implicit-def dead %eflags, debug-location !70 :: (load 4 from %fixed-stack.6, align 16)
+ DBG_VALUE debug-use %r14d, debug-use _, !26, !38, debug-location !41
+ CALL64pcrel32 @use, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !72
+ %edi = MOV32rr killed %ebx, debug-location !73
+ CALL64pcrel32 @use, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !73
+ TEST32rr killed %r12d, %r12d, implicit-def %eflags, debug-location !74
+ JE_1 %bb.2, implicit %eflags
+
+ bb.3.if.then4:
+ successors: %bb.4.if.end13(0x80000000)
+ liveins: %r14d, %r15d, %rbp
+
+ %rdi = LEA64r %rbp, 1, _, -44, _
+ DBG_VALUE %rbp, -44, !35, !38, debug-location !60
+ %rsi = LEA64r %rbp, 1, _, -60, _
+ DBG_VALUE %rbp, -60, !36, !38, debug-location !63
+ %rdx = LEA64r %rbp, 1, _, -64, _
+ DBG_VALUE %rbp, -64, !37, !38, debug-location !78
+ CALL64pcrel32 @set, csr_64, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rdx, implicit-def %rsp, debug-location !79
+ %eax = MOV32rm %rbp, 1, _, -44, _, debug-location !81 :: (dereferenceable load 4 from %ir.inte, !tbaa !47)
+ DBG_VALUE debug-use %eax, debug-use _, !35, !38, debug-location !60
+ %r15d = ADD32rm killed %r15d, %rbp, 1, _, -52, _, implicit-def dead %eflags, debug-location !82 :: (load 4 from %stack.4)
+ %r15d = IMUL32rr killed %r15d, %eax, implicit-def dead %eflags, debug-location !82
+ %r15d = ADD32rm killed %r15d, %rbp, 1, _, -56, _, implicit-def dead %eflags, debug-location !83 :: (load 4 from %stack.3)
+ %r15d = IMUL32rr killed %r15d, killed %eax, implicit-def dead %eflags, debug-location !84
+ DBG_VALUE debug-use %r15d, debug-use _, !31, !38, debug-location !51
+ %r13d = MOV32rr killed %r15d
+ DBG_VALUE debug-use %r13d, debug-use _, !31, !38, debug-location !51
+ JMP_1 %bb.4.if.end13
+
+ bb.2:
+ successors: %bb.4.if.end13(0x80000000)
+ liveins: %r13d, %r14d, %rbp
+
+ %r14d = ADD32rm killed %r14d, %rbp, 1, _, -48, _, implicit-def dead %eflags, debug-location !71 :: (load 4 from %stack.5)
+ DBG_VALUE debug-use %r14d, debug-use _, !26, !38, debug-location !41
+
+ bb.4.if.end13:
+ successors: %bb.5.cleanup(0x80000000)
+ liveins: %r13d, %r14d, %rbp
+
+ DBG_VALUE debug-use %r14d, debug-use _, !26, !38, debug-location !41
+ DBG_VALUE debug-use %r13d, debug-use _, !31, !38, debug-location !51
+ %r13d = IMUL32rm killed %r13d, %rbp, 1, _, 16, _, implicit-def dead %eflags, debug-location !86 :: (load 4 from %fixed-stack.6, align 16)
+ %r13d = ADD32rr killed %r13d, killed %r14d, implicit-def dead %eflags, debug-location !87
+ DBG_VALUE debug-use %r13d, debug-use _, !26, !38, debug-location !41
+ %edi = MOV32rr killed %r13d, debug-location !88
+ CALL64pcrel32 @use, csr_64, implicit %rsp, implicit %edi, implicit-def %rsp, debug-location !88
+
+ bb.5.cleanup:
+ liveins: %rbp
+
+ %rsp = ADD64ri8 %rsp, 24, implicit-def dead %eflags, debug-location !90
+ %rbx = POP64r implicit-def %rsp, implicit %rsp, debug-location !90
+ %r12 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90
+ %r13 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90
+ %r14 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90
+ %r15 = POP64r implicit-def %rsp, implicit %rsp, debug-location !90
+ %rbp = POP64r implicit-def %rsp, implicit %rsp, debug-location !90
+ RETQ debug-location !90
+
+...
diff --git a/test/DebugInfo/Mips/InlinedFnLocalVar.ll b/test/DebugInfo/Mips/InlinedFnLocalVar.ll
index 51b319c3a5b5..cd5a03159ef2 100644
--- a/test/DebugInfo/Mips/InlinedFnLocalVar.ll
+++ b/test/DebugInfo/Mips/InlinedFnLocalVar.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple mips-linux-gnu -O2 %s -o - | FileCheck %s
; Check struct X for dead variable xyz from inlined function foo.
-; CHECK: section_info
+; CHECK: .section .debug_info,"",@0x7000001e
; CHECK: DW_TAG_structure_type
; CHECK-NEXT: info_string
diff --git a/test/DebugInfo/PDB/DIA/pdbdump-linenumbers.test b/test/DebugInfo/PDB/DIA/pdbdump-linenumbers.test
index 780e0db84665..2a596e4af149 100644
--- a/test/DebugInfo/PDB/DIA/pdbdump-linenumbers.test
+++ b/test/DebugInfo/PDB/DIA/pdbdump-linenumbers.test
@@ -1,12 +1,14 @@
+; RUN: llvm-pdbdump pretty -lines %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=LINE_NUMS_FPO %s
; RUN: llvm-pdbdump pretty -lines %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=LINE_NUMS %s
-; LINE_NUMS: llvm\test\debuginfo\pdb\inputs\symbolformat-fpo.cpp
-; LINE_NUMS: Line 5, Address: [0x000011a0 - 0x000011a5] (6 bytes)
-; LINE_NUMS: Line 6, Address: [0x000011a6 - 0x000011a6] (1 bytes)
+; LINE_NUMS_FPO: llvm\test\debuginfo\pdb\inputs\symbolformat-fpo.cpp
+; LINE_NUMS_FPO: Line 5, Address: [0x000011a0 - 0x000011a5] (6 bytes)
+; LINE_NUMS_FPO: Line 6, Address: [0x000011a6 - 0x000011a6] (1 bytes)
+
; LINE_NUMS: llvm\test\debuginfo\pdb\inputs\symbolformat.cpp
; LINE_NUMS: Line 6, Address: [0x00001060 - 0x00001066] (7 bytes)
-; LINE_NUMS: Line 72, Address: [0x000010d0 - 0x000010d1] (2 bytes)
-; LINE_NUMS: Line 73, Address: [0x000010d2 - 0x000010d5] (4 bytes)
+; LINE_NUMS: Line 80, Address: [0x000010d0 - 0x000010d1] (2 bytes)
+; LINE_NUMS: Line 81, Address: [0x000010d2 - 0x000010d5] (4 bytes)
; LINE_NUMS: Line 28, Address: [0x00001170 - 0x0000117a] (11 bytes)
; LINE_NUMS: Line 21, Address: [0x00001180 - 0x0000118a] (11 bytes)
-; LINE_NUMS: Line 20, Address: [0x00001190 - 0x0000119a] (11 bytes) \ No newline at end of file
+; LINE_NUMS: Line 20, Address: [0x00001190 - 0x0000119a] (11 bytes)
diff --git a/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test b/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test
index e729e8bc8944..0bb3e001d3a4 100644
--- a/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test
+++ b/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test
@@ -1,3 +1,4 @@
+; RUN: llvm-pdbdump pretty -symbols %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=SYM_FORMAT_FPO %s
; RUN: llvm-pdbdump pretty -symbols %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=SYM_FORMAT %s
; RUN: llvm-pdbdump pretty -types %p/../Inputs/symbolformat.pdb > %t.types
; RUN: FileCheck --check-prefix=TYPES_FORMAT %s < %t.types
@@ -7,9 +8,11 @@
; RUN: llvm-pdbdump pretty -globals %p/../Inputs/symbolformat.pdb | FileCheck --check-prefix=GLOBALS %s
; The format is func [0x<rva_start>+<prologue_length> - 0x<rva_end>-<epilogue_length>]
+; SYM_FORMAT_FPO: ---SYMBOLS---
+; SYM_FORMAT_FPO: symbolformat-fpo.obj
+; SYM_FORMAT-FPO: func [{{.*}}] (FPO) unsigned int __cdecl fpo_func(unsigned int n)
+
; SYM_FORMAT: ---SYMBOLS---
-; SYM_FORMAT: symbolformat-fpo.obj
-; SYM_FORMAT-DAG: func [{{.*}}] (FPO) unsigned int __cdecl fpo_func(unsigned int n)
; SYM_FORMAT: symbolformat.obj
; SYM_FORMAT-DAG: func [{{.*}}] (EBP) int __cdecl _purecall()
; SYM_FORMAT-DAG: func [{{.*}}] (EBP) int __cdecl main(int argc, char** argv)
@@ -29,32 +32,39 @@
; TYPES_FORMAT-DAG: typedef class A ClassAType
; TYPES_1: Classes
-; TYPES_1: struct A {
-; TYPES_1: public:
+; TYPES_1: struct A [sizeof = 4] {
; TYPES_1: virtual void PureFunc() = 0
; TYPES_1: virtual void VirtualFunc()
; TYPES_1: void RegularFunc()
; TYPES_1: }
; TYPES_2: Classes
-; TYPES_2: struct MemberTest {
-; TYPES_2: data +0x00 MemberTest::NestedEnum m_nested_enum
-; TYPES_2: data +0x04 int m_typedef
-; TYPES_2: data +0x08 bool m_bool
-; TYPES_2: data +0x09 char m_char
-; TYPES_2: data +0x0a wchar_t m_wchar_t
-; TYPES_2: data +0x0c int m_int
-; TYPES_2: data +0x10 unsigned int m_unsigned
-; TYPES_2: data +0x14 long m_long
-; TYPES_2: data +0x18 unsigned long m_unsigned_long
-; TYPES_2: data +0x20 __int64 m_int64
-; TYPES_2: data +0x28 unsigned __int64 m_unsigned_int64
-; TYPES_2: data +0x30 float m_float
-; TYPES_2: data +0x38 double m_double
-; TYPES_2: data +0x40 void (__cdecl *m_pfn_2_args)(int, double)
+; TYPES_2: struct MemberTest [sizeof = 96] {
+; TYPES_2: data +0x00 [sizeof=4] MemberTest::NestedEnum m_nested_enum
+; TYPES_2: data +0x04 [sizeof=4] int m_typedef
+; TYPES_2: data +0x08 [sizeof=1] bool m_bool
+; TYPES_2: data +0x09 [sizeof=1] char m_char
+; TYPES_2: data +0x0a [sizeof=2] wchar_t m_wchar_t
+; TYPES_2: data +0x0c [sizeof=4] int m_int
+; TYPES_2: data +0x10 [sizeof=4] unsigned int m_unsigned
+; TYPES_2: data +0x14 [sizeof=4] long m_long
+; TYPES_2: data +0x18 [sizeof=4] unsigned long m_unsigned_long
+; TYPES_2: <padding> (4 bytes)
+; TYPES_2: data +0x20 [sizeof=8] __int64 m_int64
+; TYPES_2: data +0x28 [sizeof=8] unsigned __int64 m_unsigned_int64
+; TYPES_2: data +0x30 [sizeof=4] float m_float
+; TYPES_2: <padding> (4 bytes)
+; TYPES_2: data +0x38 [sizeof=8] double m_double
+; TYPES_2: data +0x40 [sizeof=4] void (__cdecl * m_pfn_2_args)(int, double)
+; TYPES_2: data +0x44 [sizeof=24] int m_multidimensional_array[2][3]
; TYPES_2: }
; GLOBALS: ---GLOBALS---
; GLOBALS-DAG: func [{{.*}}] (FPO) unsigned int __cdecl fpo_func(unsigned int n)
-; GLOBALS-DAG: data [{{.*}}] static void* g_global_pointer
-; GLOBALS-DAG: data [{{.*}}] static int g_global_int
+; GLOBALS-DAG: data [{{.*}}, sizeof=4] static void* g_global_pointer
+; GLOBALS-DAG: data [{{.*}}, sizeof=4] static int g_global_int
+; GLOBALS-DAG: data [{{.*}}, sizeof=12] static int g_array[3]
+; GLOBALS-DAG: data [{{.*}}, sizeof=4] static int (* g_pointer_to_array)[3]
+; GLOBALS-DAG: data [{{.*}}, sizeof=4] static const int* g_pointer_to_const_int
+; GLOBALS-DAG: data [sizeof=4] int* const g_const_pointer_to_int = 0
+; GLOBALS-DAG: data [sizeof=4] const int* const g_const_pointer_to_const_int = 0
diff --git a/test/DebugInfo/PDB/Inputs/longname-truncation.yaml b/test/DebugInfo/PDB/Inputs/longname-truncation.yaml
new file mode 100644
index 000000000000..3d6639edc581
--- /dev/null
+++ b/test/DebugInfo/PDB/Inputs/longname-truncation.yaml
@@ -0,0 +1,26 @@
+---
+TpiStream:
+ Version: VC80
+ Records:
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 0
+ Options: [ None, HasUniqueName ]
+ FieldList: 0
+ Name: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
+ UniqueName: 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 1
+
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 0
+ Options: [ None ]
+ FieldList: 0
+ Name: 'fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff'
+ UniqueName: ''
+ DerivationList: 0
+ VTableShape: 0
+ Size: 8
+...
diff --git a/test/DebugInfo/PDB/Inputs/one-symbol.yaml b/test/DebugInfo/PDB/Inputs/one-symbol.yaml
new file mode 100644
index 000000000000..5728f05d490c
--- /dev/null
+++ b/test/DebugInfo/PDB/Inputs/one-symbol.yaml
@@ -0,0 +1,11 @@
+---
+DbiStream:
+ Modules:
+ - Module: one-symbol.yaml
+ Modi:
+ Records:
+ - Kind: S_OBJNAME
+ ObjNameSym:
+ Signature: 0
+ ObjectName: 'c:\foo\one-symbol.yaml'
+...
diff --git a/test/DebugInfo/PDB/Inputs/symbolformat.cpp b/test/DebugInfo/PDB/Inputs/symbolformat.cpp
index 5479b717cd9d..14b44ae3c316 100644
--- a/test/DebugInfo/PDB/Inputs/symbolformat.cpp
+++ b/test/DebugInfo/PDB/Inputs/symbolformat.cpp
@@ -50,6 +50,7 @@ struct MemberTest {
float m_float;
double m_double;
void (*m_pfn_2_args)(int, double);
+ int m_multidimensional_array[2][3];
};
typedef int IntType;
@@ -58,6 +59,13 @@ typedef A ClassAType;
int g_global_int;
void *g_global_pointer = nullptr;
+typedef int int_array[3];
+int_array g_array = { 1, 2, 3 };
+int_array *g_pointer_to_array = &g_array;
+const int *g_pointer_to_const_int = nullptr;
+int * const g_const_pointer_to_int = nullptr;
+const int * const g_const_pointer_to_const_int = nullptr;
+
int main(int argc, char **argv) {
// Force symbol references so the linker generates debug info
B b;
diff --git a/test/DebugInfo/PDB/Inputs/symbolformat.pdb b/test/DebugInfo/PDB/Inputs/symbolformat.pdb
index 53d8a1b31a39..9272f318258b 100644
--- a/test/DebugInfo/PDB/Inputs/symbolformat.pdb
+++ b/test/DebugInfo/PDB/Inputs/symbolformat.pdb
Binary files differ
diff --git a/test/DebugInfo/PDB/Native/pdb-native-compilands.test b/test/DebugInfo/PDB/Native/pdb-native-compilands.test
new file mode 100644
index 000000000000..38234d719e50
--- /dev/null
+++ b/test/DebugInfo/PDB/Native/pdb-native-compilands.test
@@ -0,0 +1,65 @@
+; Test that the native PDB reader can enumerate the compilands.
+; RUN: llvm-pdbdump pretty -native -compilands %p/../Inputs/empty.pdb \
+; RUN: | FileCheck -check-prefix=EMPTY %s
+; RUN: llvm-pdbdump pretty -native -compilands %p/../Inputs/big-read.pdb \
+; RUN: | FileCheck -check-prefix=BIGREAD %s
+
+; Reference output was generated with the DIA reader to ensure that the
+; `-native` option produces identical output. The paths output will have
+; backslashes even on non-Windows platforms because they are from PDBs built
+; on Windows. The path prefixes have been elided because those may be
+; machine-specific.
+
+EMPTY:---COMPILANDS---
+EMPTY: \llvm\test\DebugInfo\PDB\Inputs\empty.obj
+EMPTY: * Linker *
+
+BIGREAD:---COMPILANDS---
+BIGREAD: \llvm\test\tools\llvm-symbolizer\pdb\Inputs\test.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_cpu_disp_.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_initsect_.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_sehprolg4_.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_chandler4gs_.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\_secchk_.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\gs_cookie.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\gs_report.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\gs_support.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\checkcfg.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\guard_support.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\loadcfg.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\dyn_tls_dtor.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\dyn_tls_init.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\matherr_detection.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\ucrt_detection.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\argv_mode.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\commit_mode.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\default_local_stdio_options.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\denormal_control.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\env_mode.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\file_mode.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\invalid_parameter_handler.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\matherr.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\new_mode.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\thread_locale.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\tncleanup.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\exe_main.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\initializers.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\utility.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\ucrt_stubs.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\utility_desktop.obj
+BIGREAD: f:\dd\vctools\crt\vcstartup\build\md\msvcrt_kernel32\obj1r\i386\default_precision.obj
+BIGREAD: Import:KERNEL32.dll
+BIGREAD: KERNEL32.dll
+BIGREAD: Import:VCRUNTIME140.dll
+BIGREAD: VCRUNTIME140.dll
+BIGREAD: Import:api-ms-win-crt-stdio-l1-1-0.dll
+BIGREAD: api-ms-win-crt-stdio-l1-1-0.dll
+BIGREAD: Import:api-ms-win-crt-runtime-l1-1-0.dll
+BIGREAD: api-ms-win-crt-runtime-l1-1-0.dll
+BIGREAD: Import:api-ms-win-crt-math-l1-1-0.dll
+BIGREAD: api-ms-win-crt-math-l1-1-0.dll
+BIGREAD: Import:api-ms-win-crt-locale-l1-1-0.dll
+BIGREAD: api-ms-win-crt-locale-l1-1-0.dll
+BIGREAD: Import:api-ms-win-crt-heap-l1-1-0.dll
+BIGREAD: api-ms-win-crt-heap-l1-1-0.dll
+BIGREAD: * Linker *
diff --git a/test/DebugInfo/PDB/Native/pdb-native-summary.test b/test/DebugInfo/PDB/Native/pdb-native-summary.test
new file mode 100644
index 000000000000..bd32f198a390
--- /dev/null
+++ b/test/DebugInfo/PDB/Native/pdb-native-summary.test
@@ -0,0 +1,11 @@
+; Test that the native PDB reader gets the PDB summary correct.
+; RUN: llvm-pdbdump pretty -native -color-output=false %p/../Inputs/empty.pdb \
+; RUN: | FileCheck -check-prefix=EMPTY %s
+
+; Reference output was generated with the DIA reader to ensure that the
+; `-native` option produces identical output.
+
+; EMPTY: Size: 102400 bytes
+; EMPTY: Guid: {0B355641-86A0-A249-896F-9988FAE52FF0}
+; EMPTY: Age: 1
+; EMPTY: Attributes: HasPrivateSymbols
diff --git a/test/DebugInfo/PDB/pdb-longname-truncation.test b/test/DebugInfo/PDB/pdb-longname-truncation.test
new file mode 100644
index 000000000000..2e0284fbe916
--- /dev/null
+++ b/test/DebugInfo/PDB/pdb-longname-truncation.test
@@ -0,0 +1,3 @@
+; For now just verify that this doesn't cause an error. Later we pdbdump can
+; do type lookup, we can verify that the name matches what we expect.
+; RUN: llvm-pdbdump yaml2pdb -pdb=%t.pdb %p/Inputs/longname-truncation.yaml
diff --git a/test/DebugInfo/PDB/pdb-minimal-construct.test b/test/DebugInfo/PDB/pdb-minimal-construct.test
new file mode 100644
index 000000000000..d75c51056c9f
--- /dev/null
+++ b/test/DebugInfo/PDB/pdb-minimal-construct.test
@@ -0,0 +1,11 @@
+; This testcase verifies that we can produce a minimal PDB, while
+; serving as an example for how to construct a minimal PDB for other
+; testcases. It takes as input a small fragment of hand-written yaml
+; that specifies nothing about the PDB other than a definition of one
+; symbol that it contains. Then it produces a PDB, and uses the
+; resulting PDB to go back to yaml, and verify that the resulting yaml
+; is identical.
+
+; RUN: llvm-pdbdump yaml2pdb -pdb=%t.pdb %p/Inputs/one-symbol.yaml
+; RUN: llvm-pdbdump pdb2yaml -minimal -dbi-module-syms -no-file-headers %t.pdb > %t.pdb.yaml
+; RUN: diff -b %p/Inputs/one-symbol.yaml %t.pdb.yaml
diff --git a/test/DebugInfo/PDB/pdb-yaml-types.test b/test/DebugInfo/PDB/pdb-yaml-types.test
new file mode 100644
index 000000000000..b3108591271e
--- /dev/null
+++ b/test/DebugInfo/PDB/pdb-yaml-types.test
@@ -0,0 +1,74 @@
+RUN: llvm-pdbdump pdb2yaml -tpi-stream %p/Inputs/big-read.pdb > %t.yaml
+RUN: FileCheck -check-prefix=YAML %s < %t.yaml
+RUN: llvm-pdbdump yaml2pdb %t.yaml -pdb %t.pdb
+RUN: llvm-pdbdump raw -tpi-records %t.pdb | FileCheck %s --check-prefix=PDB
+
+Only verify the beginning of the type stream.
+
+YAML: TpiStream:
+YAML-NEXT: Version: VC80
+YAML-NEXT: Records:
+YAML-NEXT: - Kind: LF_ARGLIST
+YAML-NEXT: ArgList:
+YAML-NEXT: ArgIndices: [ ]
+YAML-NEXT: - Kind: LF_PROCEDURE
+YAML-NEXT: Procedure:
+YAML-NEXT: ReturnType: 3
+YAML-NEXT: CallConv: NearC
+YAML-NEXT: Options: [ None ]
+YAML-NEXT: ParameterCount: 0
+YAML-NEXT: ArgumentList: 4096
+YAML-NEXT: - Kind: LF_PROCEDURE
+YAML-NEXT: Procedure:
+YAML-NEXT: ReturnType: 116
+YAML-NEXT: CallConv: NearC
+YAML-NEXT: Options: [ None ]
+YAML-NEXT: ParameterCount: 0
+YAML-NEXT: ArgumentList: 4096
+
+This test is mostly checking to make sure we include the type index offset
+table, and eventually hash codes. The type index offsets should be similar to
+what are already present in big-read.pdb.
+
+PDB: Type Info Stream (TPI) {
+PDB-NEXT: TPI Version: 20040203
+PDB-NEXT: Record count: 728
+PDB-NEXT: Records [
+PDB-NEXT: {
+PDB-NEXT: ArgList (0x1000) {
+PDB-NEXT: TypeLeafKind: LF_ARGLIST (0x1201)
+PDB-NEXT: NumArgs: 0
+PDB-NEXT: Arguments [
+PDB-NEXT: ]
+PDB-NEXT: }
+PDB-NEXT: }
+PDB-NEXT: {
+PDB-NEXT: Procedure (0x1001) {
+PDB-NEXT: TypeLeafKind: LF_PROCEDURE (0x1008)
+PDB-NEXT: ReturnType: void (0x3)
+PDB-NEXT: CallingConvention: NearC (0x0)
+PDB-NEXT: FunctionOptions [ (0x0)
+PDB-NEXT: ]
+PDB-NEXT: NumParameters: 0
+PDB-NEXT: ArgListType: () (0x1000)
+PDB-NEXT: }
+PDB-NEXT: }
+PDB-NEXT: {
+PDB-NEXT: Procedure (0x1002) {
+PDB-NEXT: TypeLeafKind: LF_PROCEDURE (0x1008)
+PDB-NEXT: ReturnType: int (0x74)
+PDB-NEXT: CallingConvention: NearC (0x0)
+PDB-NEXT: FunctionOptions [ (0x0)
+PDB-NEXT: ]
+PDB-NEXT: NumParameters: 0
+PDB-NEXT: ArgListType: () (0x1000)
+PDB-NEXT: }
+PDB-NEXT: }
+...
+PDB: TypeIndexOffsets [
+PDB-NEXT: Index: 0x1000, Offset: 0
+PDB-NEXT: Index: 0x106c, Offset: 8,116
+PDB-NEXT: Index: 0x1118, Offset: 16,372
+PDB-NEXT: Index: 0x11df, Offset: 24,564
+PDB-NEXT: Index: 0x128e, Offset: 32,752
+PDB-NEXT: ]
diff --git a/test/DebugInfo/PDB/pdbdump-headers.test b/test/DebugInfo/PDB/pdbdump-headers.test
index edbaedb6b526..4152f0f9da00 100644
--- a/test/DebugInfo/PDB/pdbdump-headers.test
+++ b/test/DebugInfo/PDB/pdbdump-headers.test
@@ -1,4 +1,4 @@
-; RUN: llvm-pdbdump raw -headers -tpi-records -tpi-record-bytes -module-syms \
+; RUN: llvm-pdbdump raw -headers -string-table -tpi-records -tpi-record-bytes -module-syms \
; RUN: -sym-record-bytes -globals -publics -module-files \
; RUN: -stream-summary -stream-blocks -ipi-records -ipi-record-bytes \
; RUN: -section-contribs -section-map -section-headers -line-info \
@@ -61,11 +61,22 @@
; EMPTY-NEXT: Stream 15: [21]
; EMPTY-NEXT: Stream 16: [22]
; EMPTY-NEXT: ]
+; EMPTY-NEXT: String Table {
+; EMPTY-NEXT: 'd:\src\llvm\test\debuginfo\pdb\inputs\predefined c++ attributes (compiler internal)'
+; EMPTY-NEXT: 'd:\src\llvm\test\debuginfo\pdb\inputs\empty.cpp'
+; EMPTY-NEXT: '$T0 $ebp = $eip $T0 4 + ^ = $ebp $T0 ^ = $esp $T0 8 + = '
+; EMPTY-NEXT: }
; EMPTY-NEXT: PDB Stream {
; EMPTY-NEXT: Version: 20000404
; EMPTY-NEXT: Signature: 0x54E507E2
; EMPTY-NEXT: Age: 1
; EMPTY-NEXT: Guid: {0B355641-86A0-A249-896F-9988FAE52FF0}
+; EMPTY-NEXT: Features: 0x1
+; EMPTY-NEXT: Named Streams {
+; EMPTY-NEXT: /names: 13
+; EMPTY-NEXT: /LinkInfo: 5
+; EMPTY-NEXT: /src/headerblock: 9
+; EMPTY-NEXT: }
; EMPTY-NEXT: }
; EMPTY-NEXT: Type Info Stream (TPI) {
; EMPTY-NEXT: TPI Version: 20040203
@@ -142,15 +153,18 @@
; EMPTY-NEXT: Number of Hash Buckets: 262143
; EMPTY-NEXT: Hash Key Size: 4
; EMPTY-NEXT: Values: [205956, 163561, 59811, 208239, 16377, 247078, 194342, 254156, 194536, 167492, 185421, 119540, 261871, 198119, 48056, 251486, 134580, 148190, 113636, 53336, 55779, 220695, 198114, 148734, 81128, 60158, 217249, 174209, 159978, 249504, 141941, 238785, 6214, 94935, 151449, 135589, 73373, 96512, 254299, 17744, 239514, 173189, 130544, 204437, 238560, 144673, 115151, 197306, 256035, 101096, 231280, 52156, 48854, 170035, 177041, 102745, 16947, 183703, 98548, 35693, 171328, 203640, 139292, 49018, 43821, 202555, 165040, 215835, 142625, 52534, 44186, 103930, 110942, 17991, 213215]
-; EMPTY-NEXT: Type Index Offsets: [{4096, 0}]
-; EMPTY-NEXT: Hash Adjustments: []
+; EMPTY-NEXT: Adjusters [
+; EMPTY-NEXT: ]
; EMPTY-NEXT: }
+; EMPTY-NEXT: TypeIndexOffsets [
+; EMPTY-NEXT: Index: 0x1000, Offset: 0
+; EMPTY-NEXT: ]
; EMPTY: Type Info Stream (IPI) {
; EMPTY-NEXT: IPI Version: 20040203
; EMPTY-NEXT: Record count: 15
; EMPTY-NEXT: Records [
; EMPTY-NEXT: {
-; EMPTY-NEXT: UdtModSourceLine (0x104B) {
+; EMPTY-NEXT: UdtModSourceLine (0x1000) {
; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; EMPTY-NEXT: UDT: __vc_attributes::threadingAttribute (0x100B)
; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
@@ -162,7 +176,7 @@
; EMPTY-NEXT: )
; EMPTY-NEXT: }
; EMPTY-NEXT: {
-; EMPTY-NEXT: UdtModSourceLine (0x104C) {
+; EMPTY-NEXT: UdtModSourceLine (0x1001) {
; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; EMPTY-NEXT: UDT: __vc_attributes::event_receiverAttribute (0x1017)
; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
@@ -174,7 +188,7 @@
; EMPTY-NEXT: )
; EMPTY-NEXT: }
; EMPTY-NEXT: {
-; EMPTY-NEXT: UdtModSourceLine (0x104D) {
+; EMPTY-NEXT: UdtModSourceLine (0x1002) {
; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; EMPTY-NEXT: UDT: __vc_attributes::aggregatableAttribute (0x1021)
; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
@@ -186,7 +200,7 @@
; EMPTY-NEXT: )
; EMPTY-NEXT: }
; EMPTY-NEXT: {
-; EMPTY-NEXT: UdtModSourceLine (0x104E) {
+; EMPTY-NEXT: UdtModSourceLine (0x1003) {
; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; EMPTY-NEXT: UDT: __vc_attributes::event_sourceAttribute (0x102C)
; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
@@ -198,7 +212,7 @@
; EMPTY-NEXT: )
; EMPTY-NEXT: }
; EMPTY-NEXT: {
-; EMPTY-NEXT: UdtModSourceLine (0x104F) {
+; EMPTY-NEXT: UdtModSourceLine (0x1004) {
; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; EMPTY-NEXT: UDT: __vc_attributes::moduleAttribute (0x103A)
; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
@@ -210,7 +224,7 @@
; EMPTY-NEXT: )
; EMPTY-NEXT: }
; EMPTY-NEXT: {
-; EMPTY-NEXT: UdtModSourceLine (0x1050) {
+; EMPTY-NEXT: UdtModSourceLine (0x1005) {
; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; EMPTY-NEXT: UDT: __vc_attributes::helper_attributes::usageAttribute (0x1042)
; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
@@ -221,13 +235,141 @@
; EMPTY-NEXT: 0000: 42100000 01000000 6C000000 0100F2F1 |B.......l.......|
; EMPTY-NEXT: )
; EMPTY-NEXT: }
-; EMPTY: Hash {
-; EMPTY-NEXT: Number of Hash Buckets: 262143
-; EMPTY-NEXT: Hash Key Size: 4
-; EMPTY-NEXT: Values: [7186, 7198, 7180, 7191, 7201, 7241, 7249, 80727, 154177, 75189, 253662, 193467, 222705, 186099, 257108]
-; EMPTY-NEXT: Type Index Offsets: [{4096, 0}]
-; EMPTY-NEXT: Hash Adjustments: []
-; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: UdtModSourceLine (0x1006) {
+; EMPTY-NEXT: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
+; EMPTY-NEXT: UDT: __vc_attributes::helper_attributes::v1_alttypeAttribute (0x104A)
+; EMPTY-NEXT: SourceFile: <unknown simple type> (0x1)
+; EMPTY-NEXT: LineNumber: 96
+; EMPTY-NEXT: Module: 1
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 4A100000 01000000 60000000 0100F2F1 |J.......`.......|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringId (0x1007) {
+; EMPTY-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+; EMPTY-NEXT: Id: 0x0
+; EMPTY-NEXT: StringData: d:\src\llvm\test\DebugInfo\PDB\Inputs
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 00000000 643A5C73 72635C6C 6C766D5C |....d:\src\llvm\|
+; EMPTY-NEXT: 0010: 74657374 5C446562 7567496E 666F5C50 |test\DebugInfo\P|
+; EMPTY-NEXT: 0020: 44425C49 6E707574 7300F2F1 |DB\Inputs...|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringId (0x1008) {
+; EMPTY-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+; EMPTY-NEXT: Id: 0x0
+; EMPTY-NEXT: StringData: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 00000000 433A5C50 726F6772 616D2046 |....C:\Program F|
+; EMPTY-NEXT: 0010: 696C6573 20287838 36295C4D 6963726F |iles (x86)\Micro|
+; EMPTY-NEXT: 0020: 736F6674 20566973 75616C20 53747564 |soft Visual Stud|
+; EMPTY-NEXT: 0030: 696F2031 322E305C 56435C42 494E5C63 |io 12.0\VC\BIN\c|
+; EMPTY-NEXT: 0040: 6C2E6578 6500F2F1 |l.exe...|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringId (0x1009) {
+; EMPTY-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+; EMPTY-NEXT: Id: 0x0
+; EMPTY-NEXT: StringData: empty.cpp
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 00000000 656D7074 792E6370 7000F2F1 |....empty.cpp...|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringId (0x100A) {
+; EMPTY-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+; EMPTY-NEXT: Id: 0x0
+; EMPTY-NEXT: StringData: d:\src\llvm\test\DebugInfo\PDB\Inputs\vc120.pdb
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 00000000 643A5C73 72635C6C 6C766D5C |....d:\src\llvm\|
+; EMPTY-NEXT: 0010: 74657374 5C446562 7567496E 666F5C50 |test\DebugInfo\P|
+; EMPTY-NEXT: 0020: 44425C49 6E707574 735C7663 3132302E |DB\Inputs\vc120.|
+; EMPTY-NEXT: 0030: 70646200 |pdb.|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringId (0x100B) {
+; EMPTY-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+; EMPTY-NEXT: Id: 0x0
+; EMPTY-NEXT: StringData: -Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 00000000 2D5A6920 2D4D5420 2D492243 |....-Zi -MT -I"C|
+; EMPTY-NEXT: 0010: 3A5C5072 6F677261 6D204669 6C657320 |:\Program Files |
+; EMPTY-NEXT: 0020: 28783836 295C4D69 63726F73 6F667420 |(x86)\Microsoft |
+; EMPTY-NEXT: 0030: 56697375 616C2053 74756469 6F203132 |Visual Studio 12|
+; EMPTY-NEXT: 0040: 2E305C56 435C494E 434C5544 4522202D |.0\VC\INCLUDE" -|
+; EMPTY-NEXT: 0050: 4922433A 5C50726F 6772616D 2046696C |I"C:\Program Fil|
+; EMPTY-NEXT: 0060: 65732028 78383629 5C4D6963 726F736F |es (x86)\Microso|
+; EMPTY-NEXT: 0070: 66742056 69737561 6C205374 7564696F |ft Visual Studio|
+; EMPTY-NEXT: 0080: 2031322E 305C5643 5C41544C 4D46435C | 12.0\VC\ATLMFC\|
+; EMPTY-NEXT: 0090: 494E434C 55444522 202D4922 433A5C50 |INCLUDE" -I"C:\P|
+; EMPTY-NEXT: 00A0: 726F6772 616D2046 696C6573 20287838 |rogram Files (x8|
+; EMPTY-NEXT: 00B0: 36295C57 696E646F 7773204B 6974735C |6)\Windows Kits\|
+; EMPTY-NEXT: 00C0: 382E315C 696E636C 7564655C 73686172 |8.1\include\shar|
+; EMPTY-NEXT: 00D0: 65642220 2D492243 3A5C5072 6F677261 |ed" -I"C:\Progra|
+; EMPTY-NEXT: 00E0: 6D204669 6C657320 28783836 295C5769 |m Files (x86)\Wi|
+; EMPTY-NEXT: 00F0: 6E646F77 7300F2F1 |ndows...|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringList (0x100C) {
+; EMPTY-NEXT: TypeLeafKind: LF_SUBSTR_LIST (0x1604)
+; EMPTY-NEXT: NumStrings: 1
+; EMPTY-NEXT: Strings [
+; EMPTY-NEXT: String: __vc_attributes::threadingAttribute (0x100B)
+; EMPTY-NEXT: ]
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 01000000 0B100000 |........|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: StringId (0x100D) {
+; EMPTY-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+; EMPTY-NEXT: Id: "-Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows" (0x100C)
+; EMPTY-NEXT: StringData: Kits\8.1\include\um" -I"C:\Program Files (x86)\Windows Kits\8.1\include\winrt" -TP -X
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 0C100000 204B6974 735C382E 315C696E |.... Kits\8.1\in|
+; EMPTY-NEXT: 0010: 636C7564 655C756D 22202D49 22433A5C |clude\um" -I"C:\|
+; EMPTY-NEXT: 0020: 50726F67 72616D20 46696C65 73202878 |Program Files (x|
+; EMPTY-NEXT: 0030: 3836295C 57696E64 6F777320 4B697473 |86)\Windows Kits|
+; EMPTY-NEXT: 0040: 5C382E31 5C696E63 6C756465 5C77696E |\8.1\include\win|
+; EMPTY-NEXT: 0050: 72742220 2D545020 2D5800F1 |rt" -TP -X..|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: {
+; EMPTY-NEXT: BuildInfo (0x100E) {
+; EMPTY-NEXT: TypeLeafKind: LF_BUILDINFO (0x1603)
+; EMPTY-NEXT: NumArgs: 5
+; EMPTY-NEXT: Arguments [
+; EMPTY-NEXT: ArgType: d:\src\llvm\test\DebugInfo\PDB\Inputs (0x1007)
+; EMPTY-NEXT: ArgType: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe (0x1008)
+; EMPTY-NEXT: ArgType: empty.cpp (0x1009)
+; EMPTY-NEXT: ArgType: d:\src\llvm\test\DebugInfo\PDB\Inputs\vc120.pdb (0x100A)
+; EMPTY-NEXT: ArgType: Kits\8.1\include\um" -I"C:\Program Files (x86)\Windows Kits\8.1\include\winrt" -TP -X (0x100D)
+; EMPTY-NEXT: ]
+; EMPTY-NEXT: }
+; EMPTY-NEXT: Bytes (
+; EMPTY-NEXT: 0000: 05000710 00000810 00000910 00000A10 |................|
+; EMPTY-NEXT: 0010: 00000D10 0000F2F1 |........|
+; EMPTY-NEXT: )
+; EMPTY-NEXT: }
+; EMPTY-NEXT: TypeIndexOffsets [
+; EMPTY-NEXT: Index: 0x1000, Offset: 0
+; EMPTY-NEXT: ]
+; EMPTY-NEXT: ]
+; EMPTY-NEXT: }
; EMPTY: DBI Stream {
; EMPTY-NEXT: Dbi Version: 19990903
; EMPTY-NEXT: Age: 1
@@ -994,13 +1136,14 @@
; ALL: Signature: 0x54E507E2
; ALL: Age: 1
; ALL: Guid: {0B355641-86A0-A249-896F-9988FAE52FF0}
+; ALL: Features: 0x1
; ALL: }
; ALL: Type Info Stream (IPI) {
; ALL: IPI Version: 20040203
; ALL: Record count: 15
; ALL: Records [
; ALL: {
-; ALL: UdtModSourceLine (0x104B) {
+; ALL: UdtModSourceLine (0x1000) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::threadingAttribute (0x100B)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1009,7 +1152,7 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: UdtModSourceLine (0x104C) {
+; ALL: UdtModSourceLine (0x1001) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::event_receiverAttribute (0x1017)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1018,7 +1161,7 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: UdtModSourceLine (0x104D) {
+; ALL: UdtModSourceLine (0x1002) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::aggregatableAttribute (0x1021)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1027,7 +1170,7 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: UdtModSourceLine (0x104E) {
+; ALL: UdtModSourceLine (0x1003) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::event_sourceAttribute (0x102C)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1036,7 +1179,7 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: UdtModSourceLine (0x104F) {
+; ALL: UdtModSourceLine (0x1004) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::moduleAttribute (0x103A)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1045,7 +1188,7 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: UdtModSourceLine (0x1050) {
+; ALL: UdtModSourceLine (0x1005) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::helper_attributes::usageAttribute (0x1042)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1054,7 +1197,7 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: UdtModSourceLine (0x1051) {
+; ALL: UdtModSourceLine (0x1006) {
; ALL: TypeLeafKind: LF_UDT_MOD_SRC_LINE (0x1607)
; ALL: UDT: __vc_attributes::helper_attributes::v1_alttypeAttribute (0x104A)
; ALL: SourceFile: <unknown simple type> (0x1)
@@ -1063,66 +1206,66 @@
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringId (0x1052) {
+; ALL: StringId (0x1007) {
; ALL: TypeLeafKind: LF_STRING_ID (0x1605)
; ALL: Id: 0x0
; ALL: StringData: d:\src\llvm\test\DebugInfo\PDB\Inputs
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringId (0x1053) {
+; ALL: StringId (0x1008) {
; ALL: TypeLeafKind: LF_STRING_ID (0x1605)
; ALL: Id: 0x0
; ALL: StringData: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringId (0x1054) {
+; ALL: StringId (0x1009) {
; ALL: TypeLeafKind: LF_STRING_ID (0x1605)
; ALL: Id: 0x0
; ALL: StringData: empty.cpp
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringId (0x1055) {
+; ALL: StringId (0x100A) {
; ALL: TypeLeafKind: LF_STRING_ID (0x1605)
; ALL: Id: 0x0
; ALL: StringData: d:\src\llvm\test\DebugInfo\PDB\Inputs\vc120.pdb
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringId (0x1056) {
+; ALL: StringId (0x100B) {
; ALL: TypeLeafKind: LF_STRING_ID (0x1605)
; ALL: Id: 0x0
; ALL: StringData: -Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringList (0x1057) {
+; ALL: StringList (0x100C) {
; ALL: TypeLeafKind: LF_SUBSTR_LIST (0x1604)
-; ALL: NumArgs: 1
-; ALL: Arguments [
-; ALL: ArgType: __vc_attributes::threadingAttribute (0x100B)
+; ALL: NumStrings: 1
+; ALL: Strings [
+; ALL: String: __vc_attributes::threadingAttribute (0x100B)
; ALL: ]
; ALL: }
; ALL: }
; ALL: {
-; ALL: StringId (0x1058) {
+; ALL: StringId (0x100D) {
; ALL: TypeLeafKind: LF_STRING_ID (0x1605)
-; ALL: Id: <field list> (0x100C)
+; ALL: Id: "-Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows" (0x100C)
; ALL: StringData: Kits\8.1\include\um" -I"C:\Program Files (x86)\Windows Kits\8.1\include\winrt" -TP -X
; ALL: }
; ALL: }
; ALL: {
-; ALL: BuildInfo (0x1059) {
+; ALL: BuildInfo (0x100E) {
; ALL: TypeLeafKind: LF_BUILDINFO (0x1603)
; ALL: NumArgs: 5
; ALL: Arguments [
-; ALL: ArgType: void __vc_attributes::threadingAttribute::(__vc_attributes::threadingAttribute::threading_e) (0x1007)
-; ALL: ArgType: void __vc_attributes::threadingAttribute::() (0x1008)
-; ALL: ArgType: 0x1009
-; ALL: ArgType: <field list> (0x100A)
-; ALL: ArgType: __vc_attributes::event_receiverAttribute::type_e (0x100D)
+; ALL: ArgType: d:\src\llvm\test\DebugInfo\PDB\Inputs (0x1007)
+; ALL: ArgType: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\BIN\cl.exe (0x1008)
+; ALL: ArgType: empty.cpp (0x1009)
+; ALL: ArgType: d:\src\llvm\test\DebugInfo\PDB\Inputs\vc120.pdb (0x100A)
+; ALL: ArgType: Kits\8.1\include\um" -I"C:\Program Files (x86)\Windows Kits\8.1\include\winrt" -TP -X (0x100D)
; ALL: ]
; ALL: }
; ALL: }
@@ -1692,6 +1835,12 @@
; BIG-NEXT: Signature: 0x571FFE67
; BIG-NEXT: Age: 1
; BIG-NEXT: Guid: {880ECC89-DF81-0B4F-839C-58CBD052E937}
+; BIG-NEXT: Features: 0x1
+; BIG-NEXT: Named Streams {
+; BIG-NEXT: /names: 13
+; BIG-NEXT: /LinkInfo: 5
+; BIG-NEXT: /src/headerblock: 61
+; BIG-NEXT: }
; BIG-NEXT: }
; BIG-NEXT: DBI Stream {
; BIG-NEXT: Dbi Version: 19990903
diff --git a/test/DebugInfo/PDB/pdbdump-readwrite.test b/test/DebugInfo/PDB/pdbdump-readwrite.test
index b2f1debd5336..4756faf68c2d 100644
--- a/test/DebugInfo/PDB/pdbdump-readwrite.test
+++ b/test/DebugInfo/PDB/pdbdump-readwrite.test
@@ -1,10 +1,10 @@
RUN: llvm-pdbdump pdb2yaml -dbi-module-info -dbi-module-source-info \
-RUN: -dbi-stream -pdb-stream -tpi-stream -stream-directory \
+RUN: -dbi-stream -pdb-stream -string-table -tpi-stream -stream-directory \
RUN: -stream-metadata %p/Inputs/empty.pdb > %t.1
RUN: llvm-pdbdump yaml2pdb -pdb=%t.2 %t.1
-RUN: llvm-pdbdump raw -headers -tpi-records %p/Inputs/empty.pdb | FileCheck %s
-RUN: llvm-pdbdump raw -headers -tpi-records %t.2 | FileCheck %s
+RUN: llvm-pdbdump raw -headers -string-table -tpi-records %p/Inputs/empty.pdb | FileCheck %s
+RUN: llvm-pdbdump raw -headers -string-table -tpi-records %t.2 | FileCheck %s
CHECK: FileHeaders {
CHECK-NEXT: BlockSize: 4096
@@ -17,11 +17,20 @@ CHECK-NEXT: NumDirectoryBlocks: 1
CHECK-NEXT: DirectoryBlocks:
CHECK-NEXT: NumStreams:
CHECK-NEXT: }
+CHECK: String Table {
+CHECK-DAG: 'd:\src\llvm\test\debuginfo\pdb\inputs\predefined c++ attributes (compiler internal)'
+CHECK-DAG: 'd:\src\llvm\test\debuginfo\pdb\inputs\empty.cpp'
+CHECK-DAG: '$T0 $ebp = $eip $T0 4 + ^ = $ebp $T0 ^ = $esp $T0 8 + = '
+CHECK-NEXT: }
CHECK: PDB Stream {
CHECK-NEXT: Version: 20000404
CHECK-NEXT: Signature: 0x54E507E2
CHECK-NEXT: Age: 1
CHECK-NEXT: Guid: {0B355641-86A0-A249-896F-9988FAE52FF0}
+CHECK-NEXT: Features: 0x1
+CHECK-NEXT: Named Streams {
+CHECK: /names:
+CHECK: }
CHECK-NEXT: }
CHECK: Type Info Stream (TPI) {
CHECK-NEXT: TPI Version: 20040203
diff --git a/test/DebugInfo/PDB/pdbdump-write.test b/test/DebugInfo/PDB/pdbdump-write.test
index 6e29bdbc1cfe..f56b4fbe3624 100644
--- a/test/DebugInfo/PDB/pdbdump-write.test
+++ b/test/DebugInfo/PDB/pdbdump-write.test
@@ -10,8 +10,11 @@
; stream metadata, since the layout of the MSF file might be different
; (for example if we don't write the entire stream)
;
-; RUN: llvm-pdbdump pdb2yaml -stream-metadata -stream-directory -pdb-stream -tpi-stream %p/Inputs/empty.pdb > %t.1
+; RUN: llvm-pdbdump pdb2yaml -stream-metadata -stream-directory \
+; RUN: -pdb-stream -tpi-stream %p/Inputs/empty.pdb > %t.1
; RUN: llvm-pdbdump yaml2pdb -pdb=%t.2 %t.1
-; RUN: llvm-pdbdump pdb2yaml -pdb-stream -tpi-stream -no-file-headers %p/Inputs/empty.pdb > %t.3
-; RUN: llvm-pdbdump pdb2yaml -pdb-stream -tpi-stream -no-file-headers %t.2 > %t.4
+; RUN: llvm-pdbdump pdb2yaml -pdb-stream -tpi-stream \
+; RUN: -no-file-headers %p/Inputs/empty.pdb > %t.3
+; RUN: llvm-pdbdump pdb2yaml -pdb-stream -tpi-stream \
+; RUN: -no-file-headers %t.2 > %t.4
; RUN: diff %t.3 %t.4
diff --git a/test/DebugInfo/PDB/pdbdump-yaml-types.test b/test/DebugInfo/PDB/pdbdump-yaml-types.test
index 25895f3de2f3..7e6fcc1ca420 100644
--- a/test/DebugInfo/PDB/pdbdump-yaml-types.test
+++ b/test/DebugInfo/PDB/pdbdump-yaml-types.test
@@ -4,18 +4,14 @@
YAML: ---
YAML: MSF:
YAML: SuperBlock:
-YAML: BlockSize: 4096
-YAML: FreeBlockMap: 2
YAML: NumBlocks: 25
YAML: NumDirectoryBytes: 136
-YAML: Unknown1: 0
YAML: BlockMapAddr: 24
YAML: NumDirectoryBlocks: 1
YAML: DirectoryBlocks: [ 23 ]
YAML: NumStreams: 0
YAML: FileSize: 102400
YAML: TpiStream:
-YAML: Version: VC80
YAML: Records:
YAML: - Kind: LF_ARGLIST
YAML: ArgList:
diff --git a/test/DebugInfo/PDB/pdbdump-yaml.test b/test/DebugInfo/PDB/pdbdump-yaml.test
index 7d0c429fd1bf..44025be5bca7 100644
--- a/test/DebugInfo/PDB/pdbdump-yaml.test
+++ b/test/DebugInfo/PDB/pdbdump-yaml.test
@@ -1,5 +1,5 @@
-; RUN: llvm-pdbdump pdb2yaml -stream-metadata -stream-directory -pdb-stream %p/Inputs/empty.pdb \
-; RUN: | FileCheck -check-prefix=YAML %s
+; RUN: llvm-pdbdump pdb2yaml -stream-metadata -stream-directory -string-table -pdb-stream \
+; RUN: %p/Inputs/empty.pdb | FileCheck -check-prefix=YAML %s
; RUN: llvm-pdbdump pdb2yaml -no-file-headers -stream-metadata -stream-directory -pdb-stream \
; RUN: %p/Inputs/empty.pdb | FileCheck -check-prefix=NO-HEADERS %s
@@ -36,18 +36,16 @@
; YAML-NEXT: - Stream: [ 7 ]
; YAML-NEXT: - Stream: [ 21 ]
; YAML-NEXT: - Stream: [ 22 ]
+; YAML-NEXT: StringTable:
+; YAML-NEXT: - 'd:\src\llvm\test\debuginfo\pdb\inputs\predefined c++ attributes (compiler internal)'
+; YAML-NEXT: - 'd:\src\llvm\test\debuginfo\pdb\inputs\empty.cpp'
+; YAML-NEXT: - '$T0 $ebp = $eip $T0 4 + ^ = $ebp $T0 ^ = $esp $T0 8 + = '
; YAML-NEXT: PdbStream:
; YAML-NEXT: Age: 1
; YAML-NEXT: Guid: '{0B355641-86A0-A249-896F-9988FAE52FF0}'
; YAML-NEXT: Signature: 1424295906
+; YAML-NEXT: Features: [ VC110 ]
; YAML-NEXT: Version: VC70
-; YAML-NEXT: NamedStreams:
-; YAML-NEXT: - Name: /names
-; YAML-NEXT: StreamNum: 13
-; YAML-NEXT: - Name: /LinkInfo
-; YAML-NEXT: StreamNum: 5
-; YAML-NEXT: - Name: /src/headerblock
-; YAML-NEXT: StreamNum: 9
; YAML-NEXT: ...
; NO-HEADERS: ---
diff --git a/test/DebugInfo/PowerPC/tls-fission.ll b/test/DebugInfo/PowerPC/tls-fission.ll
index 358fd5b32c49..f456cbcb7146 100644
--- a/test/DebugInfo/PowerPC/tls-fission.ll
+++ b/test/DebugInfo/PowerPC/tls-fission.ll
@@ -13,8 +13,7 @@
; DW_OP_GNU_push_tls_address
; CHECK-NEXT: .byte 224
; check that the expected TLS address description is the first thing in the debug_addr section
-; CHECK: debug_addr
-; CHECK-NEXT: .Laddr_sec:
+; CHECK: .section .debug_addr,"",@progbits
; CHECK-NEXT: .quad tls@DTPREL+32768
source_filename = "test/DebugInfo/PowerPC/tls-fission.ll"
diff --git a/test/DebugInfo/WebAssembly/dbg-declare.ll b/test/DebugInfo/WebAssembly/dbg-declare.ll
index c48b9122a1aa..d0f172c6988c 100644
--- a/test/DebugInfo/WebAssembly/dbg-declare.ll
+++ b/test/DebugInfo/WebAssembly/dbg-declare.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=wasm32-unknown-unknown | FileCheck %s
-; RUN: llc < %s -verify-machineinstrs -mtriple=wasm32-unknown-unknown -fast-isel | FileCheck --check-prefix=CHECK-FAST %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm -fast-isel | FileCheck --check-prefix=CHECK-FAST %s
; CHECK: #DEBUG_VALUE: decode:i <- [%vreg
; CHECK: #DEBUG_VALUE: decode:v <- [%vreg
; CHECK: DW_TAG_variable
@@ -9,7 +9,7 @@
source_filename = "test/DebugInfo/WebAssembly/dbg-declare.ll"
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
-target triple = "wasm32-unknown-unknown"
+target triple = "wasm32-unknown-unknown-wasm"
@key = external global [15 x i8], align 1
diff --git a/test/DebugInfo/X86/PR26148.ll b/test/DebugInfo/X86/PR26148.ll
index 1f66b7599faa..69e7bbd213b4 100644
--- a/test/DebugInfo/X86/PR26148.ll
+++ b/test/DebugInfo/X86/PR26148.ll
@@ -19,7 +19,7 @@
; AS in 26163, we expect two ranges (as opposed to one), the first one being zero sized
;
;
-; CHECK: 0x00000025: Beginning address offset: 0x0000000000000004
+; CHECK: Beginning address offset: 0x0000000000000004
; CHECK: Ending address offset: 0x0000000000000004
; CHECK: Location description: 10 03 93 04 55 93 02
; constu 0x00000003, piece 0x00000004, rdi, piece 0x00000002
diff --git a/test/DebugInfo/X86/dbg-abstract-vars-g-gmlt.ll b/test/DebugInfo/X86/dbg-abstract-vars-g-gmlt.ll
new file mode 100644
index 000000000000..a4e40a20da56
--- /dev/null
+++ b/test/DebugInfo/X86/dbg-abstract-vars-g-gmlt.ll
@@ -0,0 +1,105 @@
+; RUN: llc < %s -filetype=obj | llvm-dwarfdump - -debug-dump=info | FileCheck %s
+;
+; IR module created as follows:
+; clang -emit-llvm -S db-abs-1.cpp -o db-abs-1.ll -g
+; clang -emit-llvm -S db-abs-2.cpp -o db-abs-2.ll -gmlt
+; llvm-link db-abs-1.ll db-abs-2.ll -S -o db-abs-3.ll
+; --- db-abs-1.cpp ---
+; void f1();
+; inline __attribute__((always_inline)) void f2(int) {
+; f1();
+; }
+; void f3() {
+; f2(0);
+; }
+; --- db-abs-2.cpp ---
+; void f() {
+; }
+; ---
+; The point is that f3() is compiled -g and we get an abstract variable for the
+; unnamed parameter to f2(); then f() is compiled -gmlt and it's okay to have
+; the abstract variable still there.
+; PR31437.
+;
+; (The 'always_inline' attribute forces f2() to be inlined even at -O0, the
+; 'inline' keyword means the non-inlined definition of f2() can be omitted from
+; the IR. These are just tactics to simplify the generated test case.)
+;
+; Verify we see the formal parameter in the first compile-unit, and nothing in
+; the second compile-unit.
+;
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "db-abs-1.cpp"
+; CHECK-NOT: NULL
+; CHECK: DW_TAG_subprogram
+; CHECK-NEXT: DW_AT_linkage_name {{.*}} "_Z2f2i"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_AT_name
+; CHECK: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}} "_Z2f2i"
+
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG
+
+; ModuleID = 'llvm-link'
+source_filename = "llvm-link"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline uwtable
+define void @_Z2f3v() #0 !dbg !8 {
+entry:
+ %.addr.i = alloca i32, align 4
+ call void @llvm.dbg.declare(metadata i32* %.addr.i, metadata !11, metadata !16), !dbg !17
+ store i32 0, i32* %.addr.i, align 4
+ call void @_Z2f1v(), !dbg !19
+ ret void, !dbg !20
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+declare void @_Z2f1v() #2
+
+; Function Attrs: noinline nounwind uwtable
+define void @_Z1fv() #3 !dbg !21 {
+entry:
+ ret void, !dbg !23
+}
+
+attributes #0 = { noinline uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+attributes #3 = { noinline nounwind uwtable }
+
+!llvm.dbg.cu = !{!0, !3}
+!llvm.ident = !{!5, !5}
+!llvm.module.flags = !{!6, !7}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 293745)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "db-abs-1.cpp", directory: "/home/probinson/projects/scratch/pr31437")
+!2 = !{}
+!3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !4, producer: "clang version 5.0.0 (trunk 293745)", isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!4 = !DIFile(filename: "db-abs-2.cpp", directory: "/home/probinson/projects/scratch/pr31437")
+!5 = !{!"clang version 5.0.0 (trunk 293745)"}
+!6 = !{i32 2, !"Dwarf Version", i32 4}
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = distinct !DISubprogram(name: "f3", linkageName: "_Z2f3v", scope: !1, file: !1, line: 5, type: !9, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null}
+!11 = !DILocalVariable(arg: 1, scope: !12, file: !1, line: 2, type: !15)
+!12 = distinct !DISubprogram(name: "f2", linkageName: "_Z2f2i", scope: !1, file: !1, line: 2, type: !13, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!13 = !DISubroutineType(types: !14)
+!14 = !{null, !15}
+!15 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!16 = !DIExpression()
+!17 = !DILocation(line: 2, column: 50, scope: !12, inlinedAt: !18)
+!18 = distinct !DILocation(line: 6, column: 3, scope: !8)
+!19 = !DILocation(line: 3, column: 3, scope: !12, inlinedAt: !18)
+!20 = !DILocation(line: 7, column: 1, scope: !8)
+!21 = distinct !DISubprogram(name: "f", scope: !4, file: !4, line: 1, type: !22, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !3, variables: !2)
+!22 = !DISubroutineType(types: !2)
+!23 = !DILocation(line: 2, column: 1, scope: !21)
diff --git a/test/DebugInfo/X86/dbg-value-const-byref.ll b/test/DebugInfo/X86/dbg-value-const-byref.ll
index 40b9f726f31e..77e243702a8a 100644
--- a/test/DebugInfo/X86/dbg-value-const-byref.ll
+++ b/test/DebugInfo/X86/dbg-value-const-byref.ll
@@ -34,10 +34,10 @@
; CHECK: Beginning address offset: [[C1]]
; CHECK: Ending address offset: [[C2:.*]]
; CHECK: Location description: 11 07
-; rax, piece 0x00000004
+; rax
; CHECK: Beginning address offset: [[C2]]
; CHECK: Ending address offset: [[R1:.*]]
-; CHECK: Location description: 50 93 04
+; CHECK: Location description: 50
; rdi+0
; CHECK: Beginning address offset: [[R1]]
; CHECK: Ending address offset: [[R2:.*]]
diff --git a/test/DebugInfo/X86/dbg-value-g-gmlt.ll b/test/DebugInfo/X86/dbg-value-g-gmlt.ll
new file mode 100644
index 000000000000..45b9b0a16862
--- /dev/null
+++ b/test/DebugInfo/X86/dbg-value-g-gmlt.ll
@@ -0,0 +1,100 @@
+; RUN: llc < %s -filetype=obj | llvm-dwarfdump - -debug-dump=info | FileCheck %s
+;
+; IR module created as follows:
+; clang -emit-llvm -S -O2 foo.cpp -o foo.ll -g
+; clang -emit-llvm -S -O2 bar.cpp -o bar.ll -gmlt
+; llvm-link foo.ll bar.ll -S -o linked.ll
+; opt -std-link-opts linked.ll -S -o opt.ll
+; --- foo.cpp ---
+; void f();
+; void foo(int param) {
+; if (param) f();
+; }
+; --- bar.cpp ---
+; void foo(int);
+; void bar() {
+; foo(0);
+; }
+; ---
+; The point is that bar() is compiled -gmlt and calls foo() with a constant 0.
+; foo() is compiled -g and gets inlined into bar(); foo's body is then
+; optimized away, leaving only a dbg.value call describing the inlined copy
+; of 'param', which should be benign.
+; That is, the compile-unit for bar.cpp should have nothing in it.
+; PR31437.
+
+; foo.cpp's unit comes first; skip past it, second unit should be empty.
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_AT_name {{.*}} "foo.cpp"
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG
+
+; ModuleID = 'linked.ll'
+source_filename = "llvm-link"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: uwtable
+define void @_Z3fooi(i32 %param) local_unnamed_addr #0 !dbg !8 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %param, i64 0, metadata !13, metadata !14), !dbg !15
+ %tobool = icmp eq i32 %param, 0, !dbg !16
+ br i1 %tobool, label %if.end, label %if.then, !dbg !18
+
+if.then: ; preds = %entry
+ tail call void @_Z1fv(), !dbg !19
+ br label %if.end, !dbg !19
+
+if.end: ; preds = %if.then, %entry
+ ret void, !dbg !21
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+declare void @_Z1fv() local_unnamed_addr #2
+
+; Function Attrs: nounwind readnone uwtable
+define void @_Z3barv() local_unnamed_addr #3 !dbg !22 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !13, metadata !14), !dbg !24
+ ret void, !dbg !26
+}
+
+attributes #0 = { uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+attributes #3 = { nounwind readnone uwtable }
+
+!llvm.dbg.cu = !{!0, !3}
+!llvm.ident = !{!5, !5}
+!llvm.module.flags = !{!6, !7}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 293745)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "foo.cpp", directory: "/home/probinson/projects/scratch/pr31437")
+!2 = !{}
+!3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !4, producer: "clang version 5.0.0 (trunk 293745)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!4 = !DIFile(filename: "bar.cpp", directory: "/home/probinson/projects/scratch/pr31437")
+!5 = !{!"clang version 5.0.0 (trunk 293745)"}
+!6 = !{i32 2, !"Dwarf Version", i32 4}
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3fooi", scope: !1, file: !1, line: 2, type: !9, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !12)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null, !11}
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!12 = !{!13}
+!13 = !DILocalVariable(name: "param", arg: 1, scope: !8, file: !1, line: 2, type: !11)
+!14 = !DIExpression()
+!15 = !DILocation(line: 2, column: 14, scope: !8)
+!16 = !DILocation(line: 3, column: 7, scope: !17)
+!17 = distinct !DILexicalBlock(scope: !8, file: !1, line: 3, column: 7)
+!18 = !DILocation(line: 3, column: 7, scope: !8)
+!19 = !DILocation(line: 3, column: 14, scope: !20)
+!20 = !DILexicalBlockFile(scope: !17, file: !1, discriminator: 1)
+!21 = !DILocation(line: 4, column: 1, scope: !8)
+!22 = distinct !DISubprogram(name: "bar", scope: !4, file: !4, line: 2, type: !23, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !3, variables: !2)
+!23 = !DISubroutineType(types: !2)
+!24 = !DILocation(line: 2, column: 14, scope: !8, inlinedAt: !25)
+!25 = distinct !DILocation(line: 3, column: 3, scope: !22)
+!26 = !DILocation(line: 4, column: 1, scope: !22)
diff --git a/test/DebugInfo/X86/dbg-value-regmask-clobber.ll b/test/DebugInfo/X86/dbg-value-regmask-clobber.ll
index 93543e5ed948..b958f080d02e 100644
--- a/test/DebugInfo/X86/dbg-value-regmask-clobber.ll
+++ b/test/DebugInfo/X86/dbg-value-regmask-clobber.ll
@@ -16,10 +16,8 @@
; ASM: .Ldebug_loc1:
; ASM-NEXT: .quad .Lfunc_begin0-.Lfunc_begin0
; ASM-NEXT: .quad [[argc_range_end]]-.Lfunc_begin0
-; ASM-NEXT: .short 3 # Loc expr size
+; ASM-NEXT: .short 1 # Loc expr size
; ASM-NEXT: .byte 82 # super-register DW_OP_reg2
-; ASM-NEXT: .byte 147 # DW_OP_piece
-; ASM-NEXT: .byte 4 # 4
; argc is the first formal parameter.
; DWARF: .debug_info contents:
@@ -30,7 +28,7 @@
; DWARF: .debug_loc contents:
; DWARF: [[argc_loc_offset]]: Beginning address offset: 0x0000000000000000
; DWARF-NEXT: Ending address offset: 0x0000000000000013
-; DWARF-NEXT: Location description: 52 93 04
+; DWARF-NEXT: Location description: 52
; ModuleID = 't.cpp'
source_filename = "test/DebugInfo/X86/dbg-value-regmask-clobber.ll"
diff --git a/test/DebugInfo/X86/debug-info-producer-with-flags.ll b/test/DebugInfo/X86/debug-info-producer-with-flags.ll
new file mode 100644
index 000000000000..c004c8a9067b
--- /dev/null
+++ b/test/DebugInfo/X86/debug-info-producer-with-flags.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu %s -o %t -filetype=obj
+; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
+;
+; Test the DW_AT_producer DWARG attribute.
+; When producer and flags are both given in DIComileUnit, set DW_AT_producer
+; as two values combined.
+;
+; The test splits into two parts, this is LLVM part. The frontend part can be
+; found at llvm/tools/clang/test/Driver/debug-options.c.
+;
+; Generated and reduced from:
+; clang++ -g -grecord-gcc-switches test.cc -S -llvm-emit -o -
+;
+; test.cc:
+; int main() {
+; return 0;
+; }
+
+; CHECK: DW_AT_producer
+; CHECK-SAME: "clang++ -g -grecord-gcc-switches test.cc -S -emit-llvm -o -"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() !dbg !6 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval, align 4
+ ret i32 0, !dbg !10
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang++", isOptimized: false, flags: "-g -grecord-gcc-switches test.cc -S -emit-llvm -o -", runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "test.cc", directory: "d")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang"}
+!6 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 4, type: !7, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9}
+!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!10 = !DILocation(line: 5, column: 3, scope: !6)
diff --git a/test/DebugInfo/X86/debug_and_nodebug_CUs.ll b/test/DebugInfo/X86/debug_and_nodebug_CUs.ll
new file mode 100644
index 000000000000..8347893ccf35
--- /dev/null
+++ b/test/DebugInfo/X86/debug_and_nodebug_CUs.ll
@@ -0,0 +1,82 @@
+; Test to ensure that a module containing both a NoDebug CU and one with
+; debug is handled correctly.
+
+; LLVM IR was generated the following way:
+; $ cat a.cpp
+; void f1();
+; __attribute__((always_inline)) void f2() {
+; f1();
+; }
+; void f3();
+; void f4() {
+; f3();
+; }
+; $ cat b.cpp
+; void f2();
+; __attribute__((always_inline)) void f3() {
+; f2();
+; }
+; $ clang++ -flto a.cpp -g -c
+; $ clang++ -flto b.cpp -Rpass=inline -c
+; $ llvm-link {a,b}.o -o - | opt -O2 - -o ab.bc
+; $ llvm-dis ab.bc
+
+; Ensure we can successfully generate assembly, and check that neither
+; "b.cpp" nor "f3" strings show up (which would be in the .debug_str
+; section if we had generated any lexical scopes and debug for them).
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu %s -o - | FileCheck %s
+; CHECK-NOT: .asciz "b.cpp"
+; CHECK-NOT: .asciz "f3"
+
+; ModuleID = 'debug_and_nodebug_CUs.bc'
+source_filename = "debug_and_nodebug_CUs.ll"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @_Z2f2v() local_unnamed_addr !dbg !8 {
+entry:
+ tail call void @_Z2f1v(), !dbg !11
+ ret void, !dbg !12
+}
+
+declare void @_Z2f1v() local_unnamed_addr
+
+define void @_Z2f4v() local_unnamed_addr !dbg !13 {
+entry:
+ tail call void @_Z2f1v(), !dbg !14
+ ret void, !dbg !19
+}
+
+define void @_Z2f3v() local_unnamed_addr !dbg !16 {
+entry:
+ tail call void @_Z2f1v(), !dbg !20
+ ret void, !dbg !22
+}
+
+!llvm.dbg.cu = !{!0, !3}
+!llvm.ident = !{!5, !5}
+!llvm.module.flags = !{!6, !7}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 294362) (llvm/trunk 294367)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "a.cpp", directory: ".")
+!2 = !{}
+!3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !4, producer: "clang version 5.0.0 (trunk 294362) (llvm/trunk 294367)", isOptimized: false, runtimeVersion: 0, emissionKind: NoDebug, enums: !2)
+!4 = !DIFile(filename: "b.cpp", directory: ".")
+!5 = !{!"clang version 5.0.0 (trunk 294362) (llvm/trunk 294367)"}
+!6 = !{i32 2, !"Dwarf Version", i32 4}
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = distinct !DISubprogram(name: "f2", linkageName: "_Z2f2v", scope: !1, file: !1, line: 2, type: !9, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!9 = !DISubroutineType(types: !10)
+!10 = !{null}
+!11 = !DILocation(line: 3, column: 3, scope: !8)
+!12 = !DILocation(line: 4, column: 1, scope: !8)
+!13 = distinct !DISubprogram(name: "f4", linkageName: "_Z2f4v", scope: !1, file: !1, line: 6, type: !9, isLocal: false, isDefinition: true, scopeLine: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!14 = !DILocation(line: 3, column: 3, scope: !8, inlinedAt: !15)
+!15 = distinct !DILocation(line: 3, column: 3, scope: !16, inlinedAt: !18)
+!16 = distinct !DISubprogram(name: "f3", scope: !4, file: !4, line: 2, type: !17, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !3, variables: !2)
+!17 = !DISubroutineType(types: !2)
+!18 = distinct !DILocation(line: 7, column: 3, scope: !13)
+!19 = !DILocation(line: 8, column: 1, scope: !13)
+!20 = !DILocation(line: 3, column: 3, scope: !8, inlinedAt: !21)
+!21 = distinct !DILocation(line: 3, column: 3, scope: !16)
+!22 = !DILocation(line: 4, column: 1, scope: !16)
diff --git a/test/DebugInfo/X86/default-subrange-array.ll b/test/DebugInfo/X86/default-subrange-array.ll
new file mode 100644
index 000000000000..564e195a36f6
--- /dev/null
+++ b/test/DebugInfo/X86/default-subrange-array.ll
@@ -0,0 +1,53 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -O0 -filetype=obj -dwarf-version 4 \
+; RUN: -o - < %s | llvm-dwarfdump - -debug-dump=info \
+; RUN: | FileCheck %s -check-prefixes=CHECK,DWARF4
+; RUN: llc -mtriple=x86_64-apple-darwin -O0 -filetype=obj -dwarf-version 5 \
+; RUN: -o - < %s | llvm-dwarfdump - -debug-dump=info \
+; RUN: | FileCheck %s -check-prefixes=CHECK,DWARF5
+
+; Check that we can omit default array lower-bounds.
+; DW_LANG_C_plus_plus_11 is new in DWARF v5, so if we use that with
+; DWARF v4, we should get the DW_AT_lower_bound attribute.
+
+source_filename = "test/DebugInfo/X86/default-subrange-array.ll"
+
+%class.A = type { [42 x i32] }
+
+@a = global %class.A zeroinitializer, align 4, !dbg !0
+
+; CHECK: DW_TAG_class_type
+; CHECK: DW_TAG_member
+; CHECK-NEXT: DW_AT_name {{.*}} "x"
+; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] {{.*}} => {[[ARRAY:0x[0-9a-f]+]]})
+
+; CHECK: [[ARRAY]]: DW_TAG_array_type
+; CHECK-NEXT: DW_AT_type
+; CHECK: DW_TAG_subrange_type
+; CHECK-NEXT: DW_AT_type
+; DWARF4-NEXT: DW_AT_lower_bound [DW_FORM_data1] (0x00)
+; CHECK-NEXT: DW_AT_count [DW_FORM_data1] (0x2a)
+; DWARF5-NOT: DW_AT_lower_bound
+
+
+!llvm.dbg.cu = !{!14}
+!llvm.module.flags = !{!17}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = !DIGlobalVariable(name: "a", scope: null, file: !2, line: 1, type: !3, isLocal: false, isDefinition: true)
+!2 = !DIFile(filename: "t.cpp", directory: "/Volumes/Sandbox/llvm")
+!3 = !DICompositeType(tag: DW_TAG_class_type, name: "A", file: !2, line: 1, align: 32, elements: !4)
+!4 = !{!5, !10}
+!5 = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: !3, file: !2, line: 1, baseType: !6, flags: DIFlagPrivate)
+!6 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, align: 32, elements: !8)
+!7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!8 = !{!9}
+!9 = !DISubrange(count: 42, lowerBound: 0)
+!10 = !DISubprogram(name: "A", scope: !3, file: !2, line: 1, type: !11, isLocal: false, isDefinition: false, scopeLine: 1, virtualIndex: 6, flags: DIFlagArtificial | DIFlagPrototyped, isOptimized: false)
+!11 = !DISubroutineType(types: !12)
+!12 = !{null, !13}
+!13 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !3, size: 64, align: 64, flags: DIFlagArtificial | DIFlagObjectPointer)
+!14 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_11, file: !2, producer: "clang version 3.3 (trunk 169136)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !15, retainedTypes: !15, globals: !16, imports: !15)
+!15 = !{}
+!16 = !{!0}
+!17 = !{i32 1, !"Debug Info Version", i32 3}
+
diff --git a/test/DebugInfo/X86/discriminator.ll b/test/DebugInfo/X86/discriminator.ll
index 49b2326ac744..a040137adec4 100644
--- a/test/DebugInfo/X86/discriminator.ll
+++ b/test/DebugInfo/X86/discriminator.ll
@@ -59,4 +59,4 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
; CHECK: Address Line Column File ISA Discriminator Flags
; CHECK: ------------------ ------ ------ ------ --- ------------- -------------
-; CHECK: 0x0000000000000011 2 0 1 0 42 {{$}}
+; CHECK: 0x000000000000000a 2 0 1 0 42 {{$}}
diff --git a/test/DebugInfo/X86/dw_op_minus_direct.ll b/test/DebugInfo/X86/dw_op_minus_direct.ll
index a84c506b90a7..29e07213abbb 100644
--- a/test/DebugInfo/X86/dw_op_minus_direct.ll
+++ b/test/DebugInfo/X86/dw_op_minus_direct.ll
@@ -8,8 +8,8 @@
; CHECK: Beginning address offset: 0x0000000000000000
; CHECK: Ending address offset: 0x0000000000000004
-; CHECK: Location description: 50 10 01 1c 93 04
-; rax, constu 0x00000001, minus, piece 0x00000004
+; CHECK: Location description: 50 10 ff ff ff ff 0f 1a 10 01 1c
+; rax, constu 0xffffffff, and, constu 0x00000001, minus
source_filename = "minus.c"
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.12.0"
diff --git a/test/DebugInfo/X86/externaltyperef.ll b/test/DebugInfo/X86/externaltyperef.ll
deleted file mode 100644
index 4cd7f8c00c77..000000000000
--- a/test/DebugInfo/X86/externaltyperef.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; REQUIRES: object-emission
-; RUN: %llc_dwarf -filetype=obj -O0 < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
-; Manually derived by externalizing the composite types from:
-;
-; namespace N { class B; }
-; using N::B;
-; class A;
-; A *a;
-;
-; Test the direct use of an external type.
-; CHECK: DW_TAG_variable
-; CHECK: DW_AT_type [DW_FORM_ref4] {{.*}}{[[PTR:.*]]}
-; CHECK: [[PTR]]: DW_TAG_pointer_type
-; CHECK: DW_AT_type [DW_FORM_ref4] {{.*}}{[[A:.*]]}
-; CHECK: [[A]]: DW_TAG_class_type
-; CHECK: DW_AT_declaration [DW_FORM_flag] (0x01)
-; CHECK: DW_AT_signature [DW_FORM_ref_sig8] (0x4e834ea939695c24)
-; CHECK: [[B:.*]]: DW_TAG_class_type
-; CHECK: DW_AT_declaration [DW_FORM_flag] (0x01)
-; CHECK: DW_AT_signature [DW_FORM_ref_sig8] (0x942e51c7addda5f7)
-; CHECK: DW_TAG_imported_declaration
-; CHECK: DW_AT_import [DW_FORM_ref4] {{.*}}[[B]]
-
-source_filename = "test/DebugInfo/X86/externaltyperef.ll"
-target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.10.0"
-
-%class.A = type opaque
-
-@a = global %class.A* null, align 8, !dbg !0
-
-!llvm.dbg.cu = !{!2}
-!llvm.module.flags = !{!12, !13, !14}
-!llvm.ident = !{!15}
-
-!0 = !DIGlobalVariableExpression(var: !1)
-!1 = !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 2, type: !11, isLocal: false, isDefinition: true)
-!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 3.7.0 (trunk 242039) (llvm/trunk 242046)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !5, globals: !8, imports: !9)
-!3 = !DIFile(filename: "test.cpp", directory: "/")
-!4 = !{}
-!5 = !{!6, !7}
-!6 = !DICompositeType(tag: DW_TAG_class_type, name: "A", file: !3, flags: DIFlagExternalTypeRef, identifier: "_ZTS1A")
-!7 = !DICompositeType(tag: DW_TAG_class_type, name: "A", file: !3, flags: DIFlagExternalTypeRef, identifier: "_ZTSN1N1BE")
-!8 = !{!0}
-!9 = !{!10}
-!10 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !2, entity: !7, line: 4)
-!11 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 64, align: 64)
-!12 = !{i32 2, !"Dwarf Version", i32 2}
-!13 = !{i32 2, !"Debug Info Version", i32 3}
-!14 = !{i32 1, !"PIC Level", i32 2}
-!15 = !{!"clang version 3.7.0 (trunk 242039) (llvm/trunk 242046)"}
-
diff --git a/test/DebugInfo/X86/fission-ranges.ll b/test/DebugInfo/X86/fission-ranges.ll
index 0dfb13ab66b7..60d0f1777a43 100644
--- a/test/DebugInfo/X86/fission-ranges.ll
+++ b/test/DebugInfo/X86/fission-ranges.ll
@@ -30,16 +30,16 @@
; CHECK-NEXT: {{^$}}
; CHECK-NEXT: Beginning address index: 3
; CHECK-NEXT: Length: 25
-; CHECK-NEXT: Location description: 50 93 04
+; CHECK-NEXT: Location description: 50
; CHECK: [[E]]: Beginning address index: 4
; CHECK-NEXT: Length: 19
-; CHECK-NEXT: Location description: 50 93 04
+; CHECK-NEXT: Location description: 50
; CHECK: [[B]]: Beginning address index: 5
; CHECK-NEXT: Length: 17
-; CHECK-NEXT: Location description: 50 93 04
+; CHECK-NEXT: Location description: 50
; CHECK: [[D]]: Beginning address index: 6
; CHECK-NEXT: Length: 17
-; CHECK-NEXT: Location description: 50 93 04
+; CHECK-NEXT: Location description: 50
; Make sure we don't produce any relocations in any .dwo section (though in particular, debug_info.dwo)
; HDR-NOT: .rela.{{.*}}.dwo
diff --git a/test/DebugInfo/X86/gnu-public-names-tu.ll b/test/DebugInfo/X86/gnu-public-names-tu.ll
new file mode 100644
index 000000000000..0b7647aa8c78
--- /dev/null
+++ b/test/DebugInfo/X86/gnu-public-names-tu.ll
@@ -0,0 +1,54 @@
+; RUN: llc -mtriple=x86_64-pc-linux-gnu -generate-type-units -generate-gnu-dwarf-pub-sections -filetype=obj < %s | llvm-dwarfdump - | FileCheck %s
+
+; Generated from:
+
+; namespace ns {
+; struct foo {
+; };
+; }
+; struct bar {
+; ns::foo f;
+; };
+; bar b;
+
+; CHECK-LABEL: .debug_info contents:
+; CHECK: [[CU:0x[0-9a-f]+]]: DW_TAG_compile_unit
+; CHECK: [[BAR:0x[0-9a-f]+]]: DW_TAG_structure_type
+
+
+; CHECK-LABEL: .debug_gnu_pubnames contents:
+; CHECK-NEXT: length = {{.*}} version = 0x0002 unit_offset = 0x00000000 unit_size = {{.*}}
+; CHECK-NEXT: Offset Linkage Kind Name
+; CHECK-NEXT: [[CU]] EXTERNAL TYPE "ns"
+; CHECK-NEXT: {{.*}} EXTERNAL VARIABLE "b"
+
+; CHECK-LABEL: debug_gnu_pubtypes contents:
+; CHECK-NEXT: length = {{.*}} version = 0x0002 unit_offset = 0x00000000 unit_size = {{.*}}
+; CHECK-NEXT: Offset Linkage Kind Name
+; CHECK-NEXT: [[BAR]] EXTERNAL TYPE "bar"
+; CHECK-NEXT: [[CU]] EXTERNAL TYPE "ns::foo"
+
+%struct.bar = type { %"struct.ns::foo" }
+%"struct.ns::foo" = type { i8 }
+
+@b = global %struct.bar zeroinitializer, align 1, !dbg !0
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!11, !12}
+!llvm.ident = !{!13}
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "b", scope: !2, file: !3, line: 8, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 5.0.0 (trunk 293904) (llvm/trunk 293908)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "type.cpp", directory: "/tmp/dbginfo")
+!4 = !{}
+!5 = !{!0}
+!6 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "bar", file: !3, line: 5, size: 8, elements: !7, identifier: "_ZTS3bar")
+!7 = !{!8}
+!8 = !DIDerivedType(tag: DW_TAG_member, name: "f", scope: !6, file: !3, line: 6, baseType: !9, size: 8)
+!9 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "foo", scope: !10, file: !3, line: 2, size: 8, elements: !4, identifier: "_ZTSN2ns3fooE")
+!10 = !DINamespace(name: "ns", scope: null, file: !3, line: 1)
+!11 = !{i32 2, !"Dwarf Version", i32 4}
+!12 = !{i32 2, !"Debug Info Version", i32 3}
+!13 = !{!"clang version 5.0.0 (trunk 293904) (llvm/trunk 293908)"}
+
diff --git a/test/DebugInfo/X86/ref_addr_relocation.ll b/test/DebugInfo/X86/ref_addr_relocation.ll
index 58fc236e6bbb..373ccfd2dea4 100644
--- a/test/DebugInfo/X86/ref_addr_relocation.ll
+++ b/test/DebugInfo/X86/ref_addr_relocation.ll
@@ -46,7 +46,7 @@
; CHECK: DW_TAG_variable
; Make sure this is relocatable.
; and test that we don't create the labels to emit a correct COFF relocation
-; ELF-ASM: .quad .Lsection_info+[[TYPE]] # DW_AT_type
+; ELF-ASM: .quad .debug_info+[[TYPE]] # DW_AT_type
; COFF-ASM: .secrel32 .Lsection_info+[[TYPE]] # DW_AT_type
; DARWIN-ASM2: .quad [[TYPE]] ## DW_AT_type
; DARWIN-ASM4: .long [[TYPE]] ## DW_AT_type
diff --git a/test/DebugInfo/X86/single-dbg_value.ll b/test/DebugInfo/X86/single-dbg_value.ll
index 0275c37d24e7..7f77e61092db 100644
--- a/test/DebugInfo/X86/single-dbg_value.ll
+++ b/test/DebugInfo/X86/single-dbg_value.ll
@@ -8,8 +8,8 @@
; CHECK-NEXT: DW_AT_location [DW_FORM_data4]
; CHECK-NEXT: DW_AT_name{{.*}}"a"
; CHECK: .debug_loc contents:
-; rax, piece 0x00000004
-; CHECK: Location description: 50 93 04
+; rax
+; CHECK: Location description: 50
; SANITY: DBG_VALUE
; SANITY-NOT: DBG_VALUE
; ModuleID = 'test.ll'
diff --git a/test/DebugInfo/X86/single-fi.ll b/test/DebugInfo/X86/single-fi.ll
new file mode 100644
index 000000000000..1de4a3bac595
--- /dev/null
+++ b/test/DebugInfo/X86/single-fi.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -o - %s -filetype=obj \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; A single FI location. This used to trigger an assertion in debug libstdc++.
+; CHECK: DW_TAG_formal_parameter
+; fbreg -8
+; CHECK-NEXT: DW_AT_location {{.*}} (<0x2> 91 78 )
+; CHECK-NEXT: DW_AT_name {{.*}} "dipsy"
+define void @tinkywinky(i8* %dipsy) !dbg !6 {
+entry:
+ %dipsy.addr = alloca i8*
+ store i8* %dipsy, i8** %dipsy.addr
+ call void @llvm.dbg.declare(metadata i8** %dipsy.addr, metadata !12, metadata
+!13), !dbg !14
+ ret void, !dbg !15
+}
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (trunk 297917) (llvm/trunk 297929)", isOptimized: false,
+runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "teletubbies.c", directory: "/home/davide/work/llvm/build-clang/bin")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 5.0.0 (trunk 297917) (llvm/trunk 297929)"}
+!6 = distinct !DISubprogram(name: "tinkywinky", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags:
+DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{null, !9}
+!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64)
+!10 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !11)
+!11 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!12 = !DILocalVariable(name: "dipsy", arg: 1, scope: !6, file: !1, line: 1, type: !9)
+!13 = !DIExpression()
+!14 = !DILocation(line: 1, column: 29, scope: !6)
+!15 = !DILocation(line: 1, column: 37, scope: !6)
diff --git a/test/DebugInfo/X86/split-global.ll b/test/DebugInfo/X86/split-global.ll
index 536ed045b5d6..3cdecdafc8d4 100644
--- a/test/DebugInfo/X86/split-global.ll
+++ b/test/DebugInfo/X86/split-global.ll
@@ -30,7 +30,7 @@ target triple = "x86_64-apple-macosx10.12.0"
@point.y = global i32 2, align 4, !dbg !13
@point.x = global i32 1, align 4, !dbg !12
-@part_const.x = global i32 1, align 4, !dbg !15
+@part_const.x = global i32 1, align 4, !dbg !14
!llvm.dbg.cu = !{!1}
!llvm.module.flags = !{!10, !11}
diff --git a/test/DebugInfo/X86/stack-value-dwarf4.ll b/test/DebugInfo/X86/stack-value-dwarf4.ll
index 5f9213f3bdd4..7ad7cceb7ff0 100644
--- a/test/DebugInfo/X86/stack-value-dwarf4.ll
+++ b/test/DebugInfo/X86/stack-value-dwarf4.ll
@@ -1,38 +1,42 @@
; RUN: llc -o - %s | FileCheck --check-prefix=CHECK-DWARF2 %s
; RUN: llc -dwarf-version=4 -o - %s | FileCheck --check-prefix=CHECK-DWARF4 %s
+; Exercise DW_OP_stack_value on global constants.
+
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK-DWARF2: .byte 13 # DW_AT_location
-; CHECK-DWARF2-NEXT: .byte 3
-; CHECK-DWARF2-NEXT: .quad g
-; CHECK-DWARF2-NEXT: .byte 16
-; CHECK-DWARF2-NEXT: .byte 4
-; CHECK-DWARF2-NEXT: .byte 16
-; CHECK-DWARF2-NEXT: .byte 4
-
-; CHECK-DWARF4: .byte 14 # DW_AT_location
-; CHECK-DWARF4-NEXT: .byte 3
-; CHECK-DWARF4-NEXT: .quad g
-; CHECK-DWARF4-NEXT: .byte 16
-; CHECK-DWARF4-NEXT: .byte 4
-; CHECK-DWARF4-NEXT: .byte 16
-; CHECK-DWARF4-NEXT: .byte 4
-; CHECK-DWARF4-NEXT: .byte 159
+; CHECK-DWARF2: .byte 8 # DW_AT_location
+; CHECK-DWARF2 .byte 16
+; CHECK-DWARF2 .byte 4
+; CHECK-DWARF2 .byte 147
+; CHECK-DWARF2 .byte 2
+; CHECK-DWARF2 .byte 16
+; CHECK-DWARF2 .byte 0
+; CHECK-DWARF2 .byte 147
+; CHECK-DWARF2 .byte 2
-@g = global i32 0, !dbg !2
+; CHECK-DWARF4: .byte 10 # DW_AT_location
+; CHECK-DWARF4-NEXT: .byte 16
+; CHECK-DWARF4-NEXT: .byte 4
+; CHECK-DWARF4-NEXT: .byte 159
+; CHECK-DWARF4-NEXT: .byte 147
+; CHECK-DWARF4-NEXT: .byte 2
+; CHECK-DWARF4-NEXT: .byte 16
+; CHECK-DWARF4-NEXT: .byte 0
+; CHECK-DWARF4-NEXT: .byte 159
!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang", file: !4, globals: !1, emissionKind: FullDebug)
-!1 = !{!2}
+!1 = !{!2, !10}
!2 = !DIGlobalVariableExpression(var: !8, expr: !3)
-!3 = !DIExpression(DW_OP_constu, 4, DW_OP_constu, 4, DW_OP_stack_value)
+!3 = !DIExpression(DW_OP_constu, 4, DW_OP_stack_value, DW_OP_LLVM_fragment, 0, 16)
!4 = !DIFile(filename: "<stdin>", directory: "/")
!5 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
-
!6 = !{i32 2, !"Dwarf Version", i32 2}
!7 = !{i32 2, !"Debug Info Version", i32 3}
!8 = distinct !DIGlobalVariable(name: "a", scope: null, isLocal: false, isDefinition: true, type: !5)
+!9 = !DIExpression(DW_OP_constu, 0, DW_OP_stack_value, DW_OP_LLVM_fragment, 16, 16)
+!10 = !DIGlobalVariableExpression(var: !8, expr: !9)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!6, !7}
diff --git a/test/DebugInfo/X86/subreg.ll b/test/DebugInfo/X86/subreg.ll
index 5e837edfd2bf..30c672396e4e 100644
--- a/test/DebugInfo/X86/subreg.ll
+++ b/test/DebugInfo/X86/subreg.ll
@@ -4,8 +4,9 @@
; being in its superregister.
; CHECK: .byte 80 # super-register DW_OP_reg0
-; CHECK-NEXT: .byte 147 # DW_OP_piece
-; CHECK-NEXT: .byte 2 # 2
+; No need to a piece at offset 0.
+; CHECK-NOT: DW_OP_piece
+; CHECK-NOT: DW_OP_bit_piece
define i16 @f(i16 signext %zzz) nounwind !dbg !1 {
entry:
diff --git a/test/DebugInfo/X86/subregisters.ll b/test/DebugInfo/X86/subregisters.ll
index d40be0d9e3c1..99f7a10e443b 100644
--- a/test/DebugInfo/X86/subregisters.ll
+++ b/test/DebugInfo/X86/subregisters.ll
@@ -2,7 +2,7 @@
; RUN: llvm-dwarfdump %t.o | FileCheck %s
;
; Test that on x86_64, the 32-bit subregister esi is emitted as
-; DW_OP_piece 32 of the 64-bit rsi.
+; subregister of the 64-bit rsi.
;
; rdar://problem/16015314
;
@@ -11,8 +11,8 @@
; CHECK-NEXT: DW_AT_location [DW_FORM_data4] (0x00000000)
; CHECK-NEXT: DW_AT_name [DW_FORM_strp]{{.*}} "a"
; CHECK: .debug_loc contents:
-; rsi, piece 0x00000004
-; CHECK: Location description: 54 93 04
+; rsi
+; CHECK: Location description: 54
;
; struct bar {
; int a;
diff --git a/test/DebugInfo/X86/tls.ll b/test/DebugInfo/X86/tls.ll
index 19570d0e0c0a..b6ea213dd748 100644
--- a/test/DebugInfo/X86/tls.ll
+++ b/test/DebugInfo/X86/tls.ll
@@ -78,7 +78,6 @@
; check that the expected TLS address description is the first thing in the debug_addr section
; FISSION: .section .debug_addr
-; FISSION: addr_sec:
; FISSION-NEXT: .quad tls@DTPOFF
; FISSION-NEXT: .quad glbl
; FISSION-NOT: .quad glbl
diff --git a/test/DebugInfo/dwarfdump-header.test b/test/DebugInfo/dwarfdump-header.test
new file mode 100644
index 000000000000..3947c8b438d2
--- /dev/null
+++ b/test/DebugInfo/dwarfdump-header.test
@@ -0,0 +1,29 @@
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-header.elf-x86-64 | FileCheck %s
+
+The input file is hand-coded assembler to generate all the units,
+so we're willing to make exact checks for offsets and such.
+
+CHECK-LABEL: .debug_info contents:
+
+The v4 CU header.
+
+CHECK: 0x00000000: Compile Unit: length = 0x00000011 version = 0x0004 abbr_offset = 0x0000 addr_size = 0x08 (next unit at 0x00000015)
+CHECK: 0x0000000b: DW_TAG_compile_unit
+
+The v5 normal CU header.
+
+CHECK: 0x00000015: Compile Unit: length = 0x00000012 version = 0x0005 unit_type = DW_UT_compile abbr_offset = 0x0000 addr_size = 0x08 (next unit at 0x0000002b)
+CHECK: 0x00000021: DW_TAG_compile_unit
+
+CHECK-LABEL: .debug_types contents:
+
+The v4 type unit header.
+
+CHECK: 0x00000000: Type Unit: length = 0x0000001f version = 0x0004 abbr_offset = 0x0000 addr_size = 0x08 name = 'V4_type_unit' type_signature = 0x0011223344556677 type_offset = 0x001c (next unit at 0x00000023)
+CHECK: 0x00000017: DW_TAG_type_unit
+
+FIXME: DWARF v5 wants type units in .debug_info[.dwo] not .debug_types[.dwo].
+CHECK: .debug_types.dwo contents:
+
+CHECK: 0x00000000: Type Unit: length = 0x00000020 version = 0x0005 unit_type = DW_UT_split_type abbr_offset = 0x0000 addr_size = 0x08 name = 'V5_split_type_unit' type_signature = 0x8899aabbccddeeff type_offset = 0x001d (next unit at 0x00000024)
+CHECK: 0x00000018: DW_TAG_type_unit
diff --git a/test/DebugInfo/strip-loop-metadata.ll b/test/DebugInfo/strip-loop-metadata.ll
new file mode 100644
index 000000000000..e0d8cdfaf469
--- /dev/null
+++ b/test/DebugInfo/strip-loop-metadata.ll
@@ -0,0 +1,124 @@
+; RUN: opt -S -strip-debug < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+; CHECK-LABEL: _Z5test1v
+; CHECK-NOT: br {{.*}} !llvm.loop
+define void @_Z5test1v() !dbg !7 {
+entry:
+ br label %while.body, !dbg !9
+
+while.body:
+ call void @_Z3barv(), !dbg !10
+ br label %while.body, !dbg !11, !llvm.loop !13
+
+return:
+ ret void, !dbg !14
+}
+
+declare void @_Z3barv()
+declare i1 @_Z3bazv()
+
+; CHECK-LABEL: _Z5test2v
+; CHECK: br {{.*}} !llvm.loop [[LOOP:![0-9]+]]
+define void @_Z5test2v() !dbg !15 {
+entry:
+ br label %while.body, !dbg !16
+
+while.body:
+ call void @_Z3barv(), !dbg !17
+ br label %while.body, !dbg !18, !llvm.loop !19
+
+return:
+ ret void, !dbg !21
+}
+
+; CHECK-LABEL: _Z5test3v
+define void @_Z5test3v() !dbg !22 {
+entry:
+ br label %while.body, !dbg !23
+
+while.body:
+ %c = call i1 @_Z3bazv()
+ br i1 %c, label %if, label %then
+
+if:
+ call void @_Z3barv(), !dbg !24
+; CHECK: br {{.*}} !llvm.loop [[LOOP2:![0-9]+]]
+ br label %while.body, !dbg !25, !llvm.loop !26
+
+then:
+; CHECK: br {{.*}} !llvm.loop [[LOOP2]]
+ br label %while.body, !dbg !25, !llvm.loop !26
+
+return:
+ ret void, !dbg !28
+}
+
+; CHECK-LABEL: _Z5test4v
+; CHECK-NOT: br {{.*}} !llvm.loop
+define void @_Z5test4v() !dbg !30 {
+entry:
+ br label %while.body, !dbg !31
+
+while.body:
+ call void @_Z3barv(), !dbg !32
+ br label %while.body, !dbg !33, !llvm.loop !34
+
+return:
+ ret void, !dbg !36
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 4.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "test.cpp", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 4.0.0"}
+!7 = distinct !DISubprogram(name: "test1", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !2)
+!9 = !DILocation(line: 4, column: 3, scope: !7)
+!10 = !DILocation(line: 5, column: 5, scope: !7)
+!11 = !DILocation(line: 4, column: 3, scope: !12)
+!12 = !DILexicalBlockFile(scope: !7, file: !1, discriminator: 1)
+!13 = distinct !{!13, !9}
+!14 = !DILocation(line: 6, column: 1, scope: !7)
+!15 = distinct !DISubprogram(name: "test2", scope: !1, file: !1, line: 8, type: !8, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!16 = !DILocation(line: 8, column: 14, scope: !15)
+!17 = !DILocation(line: 11, column: 5, scope: !15)
+!18 = !DILocation(line: 10, column: 3, scope: !15)
+!19 = distinct !{!19, !16, !20}
+!20 = !{!"llvm.loop.unroll.enable"}
+!21 = !DILocation(line: 12, column: 1, scope: !15)
+!22 = distinct !DISubprogram(name: "test3", scope: !1, file: !1, line: 8, type: !8, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!23 = !DILocation(line: 8, column: 14, scope: !22)
+!24 = !DILocation(line: 11, column: 5, scope: !22)
+!25 = !DILocation(line: 10, column: 3, scope: !22)
+!26 = distinct !{!26, !23, !29, !27}
+!27 = !{!"llvm.loop.unroll.enable"}
+!28 = !DILocation(line: 12, column: 1, scope: !22)
+!29 = !DILocation(line: 12, column: 1, scope: !22)
+!30 = distinct !DISubprogram(name: "test4", scope: !1, file: !1, line: 8, type: !8, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!31 = !DILocation(line: 8, column: 14, scope: !30)
+!32 = !DILocation(line: 11, column: 5, scope: !30)
+!33 = !DILocation(line: 10, column: 3, scope: !30)
+!34 = distinct !{!34, !31, !35}
+!35 = !DILocation(line: 12, column: 1, scope: !30)
+!36 = !DILocation(line: 12, column: 1, scope: !30)
+
+; CHECK-NOT: !DICompileUnit
+; CHECK-NOT: !DIFile
+; CHECK-NOT: !DISubprogram
+; CHECK-NOT: !DISubroutineType
+; CHECK-NOT: !DILocation
+; CHECK-NOT: !DILexicalBlockFile
+; CHECK: [[LOOP]] = distinct !{[[LOOP]], [[LOOP_UNROLL:![0-9]+]]}
+; CHECK-NEXT: [[LOOP_UNROLL]] = !{!"llvm.loop.unroll.enable"}
+; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[LOOP_UNROLL]]}
+; CHECK-NOT: !DILocation
diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_PIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_PIC_relocations.s
new file mode 100644
index 000000000000..ba00afc7ad99
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_PIC_relocations.s
@@ -0,0 +1,46 @@
+# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/pic-reloc.o %s
+# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -check=%s %T/pic-reloc.o \
+# RUN: -map-section pic-reloc.o,.got=0x20000 -dummy-extern f=0x1234 -dummy-extern g=0x5678
+
+_s:
+ nop
+_a1:
+ adrp x8, :got:f
+_a2:
+ adrp x9, :got:g
+_a3:
+ adrp x10, :got:_s
+_l1:
+ ldr x8, [x8, :got_lo12:f]
+_l2:
+ ldr x9, [x9, :got_lo12:g]
+_l3:
+ ldr x10, [x10, :got_lo12:_s]
+
+
+## We'll end up having two sections .text and .got,
+## each is located on the start of a memory page
+
+## Test that .got section has three entries pointing to f, g and _s
+# *{8}section_addr(pic-reloc.o, .got) = f
+# *{8}(section_addr(pic-reloc.o, .got) + 8) = g
+# *{8}(section_addr(pic-reloc.o, .got) + 16) = _s
+
+## Test that first adrp instruction really takes address of
+## the .got section (_s label is on the start of a page)
+# rtdyld-check: _s + (((*{4}_a1)[30:29] + ((*{4}_a1)[23:5] << 2)) << 12) = section_addr(pic-reloc.o, .got)
+
+## Test that second adrp takes address of .got
+# rtdyld-check: _s + (((*{4}_a2)[30:29] + ((*{4}_a2)[23:5] << 2)) << 12) = section_addr(pic-reloc.o, .got)
+
+## Test that third adrp takes address of .got
+# rtdyld-check: _s + (((*{4}_a3)[30:29] + ((*{4}_a3)[23:5] << 2)) << 12) = section_addr(pic-reloc.o, .got)
+
+## Test that first ldr immediate value is 0 >> 3 = 0 (1st .got entry)
+# rtdyld-check: (*{4}_l1)[21:10] = 0
+
+## Test that second ldr immediate value is 8 >> 3 = 1 (2nd .got entry)
+# rtdyld-check: (*{4}_l2)[21:10] = 1
+
+## Test that third ldr immediate value is 16 >> 3 = 2 (3rd .got entry, addend is 0)
+# rtdyld-check: (*{4}_l3)[21:10] = 2
diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s
index 069170bdf36b..1e356ea200f8 100644
--- a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s
+++ b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s
@@ -20,10 +20,16 @@ g:
# R_AARCH64_MOVW_UABS_G0_NC
movk x0, #:abs_g0_nc:f
l:
+# R_AARCH64_LDST8_ABS_LO12_NC
+ ldrsb x4, [x5, :lo12:a+1]
+# R_AARCH64_LDST16_ABS_LO12_NC
+ ldrh w4, [x5, :lo12:a+2]
# R_AARCH64_LDST32_ABS_LO12_NC
ldr s4, [x5, :lo12:a]
# R_AARCH64_LDST64_ABS_LO12_NC
ldr x4, [x5, :lo12:a]
+# R_AARCH64_LDST128_ABS_LO12_NC
+ ldr q4, [x5, :lo12:a]
p:
# R_AARCH64_ADR_PREL_PG_HI21
# Test both low and high immediate values
@@ -57,9 +63,12 @@ r:
# rtdyld-check: *{4}(g + 8) = 0xf2b13560
# rtdyld-check: *{4}(g + 12) = 0xf299bde0
-## Check LDST32_ABS_LO12_NC and LDST64_ABS_LO12_NC
-# rtdyld-check: (*{4}l)[21:10] = a[11:2]
-# rtdyld-check: (*{4}(l+4))[21:10] = a[11:3]
+## Check LDSTXX_ABS_LO12_NC
+# rtdyld-check: (*{4}l)[21:10] = (a+1)[11:0]
+# rtdyld-check: (*{4}(l+4))[21:10] = (a+2)[11:1]
+# rtdyld-check: (*{4}(l+8))[21:10] = a[11:2]
+# rtdyld-check: (*{4}(l+12))[21:10] = a[11:3]
+# rtdyld-check: (*{4}(l+16))[21:10] = a[11:4]
## Check ADR_PREL_PG_HI21. Low order bits of immediate value
## go to bits 30:29. High order bits go to bits 23:5
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86-64_none.yaml b/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86-64_none.yaml
new file mode 100644
index 000000000000..7732c7f1ab98
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/X86/ELF_x86-64_none.yaml
@@ -0,0 +1,30 @@
+# RUN: yaml2obj %s -o %t.o
+# RUN: llvm-rtdyld -triple=x86_64-pc-linux %t.o -printline
+
+# Verify rtdyld doesn't abort in presence of R_X86_64_NONE
+
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ OSABI: ELFOSABI_FREEBSD
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ - Name: .rela.text
+ Type: SHT_RELA
+ Link: .symtab
+ Info: .text
+ Relocations:
+ - Offset: 0x0000000000000000
+ Symbol: ''
+ Type: R_X86_64_NONE
+Symbols:
+ Global:
+ - Name: _main
+ Section: .text
+ Value: 0
+ Size: 4
diff --git a/test/Feature/OperandBundles/dse.ll b/test/Feature/OperandBundles/dse.ll
index 9ddf7f02e384..a2183d0457c7 100644
--- a/test/Feature/OperandBundles/dse.ll
+++ b/test/Feature/OperandBundles/dse.ll
@@ -39,7 +39,7 @@ define void @test_2() {
ret void
; CHECK: tail call void @f() [ "deopt"(i8* %m) ]
-; CHECK-NEXT ret void
+; CHECK-NEXT: ret void
}
define i8* @test_3() {
diff --git a/test/Feature/fp-intrinsics.ll b/test/Feature/fp-intrinsics.ll
new file mode 100644
index 000000000000..960bfb5ca105
--- /dev/null
+++ b/test/Feature/fp-intrinsics.ll
@@ -0,0 +1,102 @@
+; RUN: opt -O3 -S < %s | FileCheck %s
+
+; Test to verify that constants aren't folded when the rounding mode is unknown.
+; CHECK-LABEL: @f1
+; CHECK: call double @llvm.experimental.constrained.fdiv.f64
+define double @f1() {
+entry:
+ %div = call double @llvm.experimental.constrained.fdiv.f64(
+ double 1.000000e+00,
+ double 1.000000e+01,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %div
+}
+
+; Verify that 'a - 0' isn't simplified to 'a' when the rounding mode is unknown.
+;
+; double f2(double a) {
+; // Because the result of '0 - 0' is negative zero if rounding mode is
+; // downward, this shouldn't be simplified.
+; return a - 0.0;
+; }
+;
+; CHECK-LABEL: @f2
+; CHECK: call double @llvm.experimental.constrained.fsub.f64
+define double @f2(double %a) {
+entry:
+ %div = call double @llvm.experimental.constrained.fsub.f64(
+ double %a, double 0.000000e+00,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %div
+}
+
+; Verify that '-((-a)*b)' isn't simplified to 'a*b' when the rounding mode is
+; unknown.
+;
+; double f3(double a, double b) {
+; // Because the intermediate value involved in this calculation may require
+; // rounding, this shouldn't be simplified.
+; return -((-a)*b);
+; }
+;
+; CHECK-LABEL: @f3
+; CHECK: call double @llvm.experimental.constrained.fsub.f64
+; CHECK: call double @llvm.experimental.constrained.fmul.f64
+; CHECK: call double @llvm.experimental.constrained.fsub.f64
+define double @f3(double %a, double %b) {
+entry:
+ %sub = call double @llvm.experimental.constrained.fsub.f64(
+ double -0.000000e+00, double %a,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ %mul = call double @llvm.experimental.constrained.fmul.f64(
+ double %sub, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ %ret = call double @llvm.experimental.constrained.fsub.f64(
+ double -0.000000e+00,
+ double %mul,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %ret
+}
+
+; Verify that FP operations are not performed speculatively when FP exceptions
+; are not being ignored.
+;
+; double f4(int n, double a) {
+; // Because a + 1 may overflow, this should not be simplified.
+; if (n > 0)
+; return a + 1.0;
+; return a;
+; }
+;
+;
+; CHECK-LABEL: @f4
+; CHECK-NOT: select
+; CHECK: br i1 %cmp
+define double @f4(i32 %n, double %a) {
+entry:
+ %cmp = icmp sgt i32 %n, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %add = call double @llvm.experimental.constrained.fadd.f64(
+ double 1.000000e+00, double %a,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ br label %if.end
+
+if.end:
+ %a.0 = phi double [%add, %if.then], [ %a, %entry ]
+ ret double %a.0
+}
+
+
+@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
+declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
diff --git a/test/Feature/optnone-opt.ll b/test/Feature/optnone-opt.ll
index f53877d4aea9..a00013ec1797 100644
--- a/test/Feature/optnone-opt.ll
+++ b/test/Feature/optnone-opt.ll
@@ -41,6 +41,7 @@ attributes #0 = { optnone noinline }
; OPT-O1-DAG: Skipping pass 'Combine redundant instructions'
; OPT-O1-DAG: Skipping pass 'Dead Store Elimination'
; OPT-O1-DAG: Skipping pass 'Early CSE'
+; OPT-O1-DAG: Skipping pass 'Early GVN Hoisting of Expressions'
; OPT-O1-DAG: Skipping pass 'Jump Threading'
; OPT-O1-DAG: Skipping pass 'MemCpy Optimization'
; OPT-O1-DAG: Skipping pass 'Reassociate expressions'
diff --git a/test/FileCheck/line-count.txt b/test/FileCheck/line-count.txt
index 6f91c2050bf8..d39663e2dbad 100644
--- a/test/FileCheck/line-count.txt
+++ b/test/FileCheck/line-count.txt
@@ -1,15 +1,15 @@
; RUN: FileCheck -input-file %s %s
-2
-3 aaa
-4 bbb
-5 ccc
-6 CHECK: [[@LINE-3]] {{a}}aa
-7 CHECK: [[@LINE-3]] {{b}}bb
-8 CHECK: [[@LINE-3]] {{c}}cc
-9 foobar
-10 CHECK: [[@LINE-1]] {{foo}}bar
-11
-12 arst CHECK: [[@LINE]] {{a}}rst
-13
+; RUN: not FileCheck -check-prefix BAD -input-file %s %s
+3
+4 aaa
+5 bbb
+6 ccc
+7 CHECK: [[@LINE-3]] {{a}}aa
+8 CHECK: [[@LINE-3]] {{b}}bb
+9 CHECK: [[@LINE-3]] {{c}}cc
+10 foobar
+11 CHECK: [[@LINE-1]] {{foo}}bar
+12
+13 arst CHECK: [[@LINE]] {{a}}rst
14
-
+15 BAD: [[@LINE:cant-have-regex]]
diff --git a/test/FileCheck/regex-scope.txt b/test/FileCheck/regex-scope.txt
new file mode 100644
index 000000000000..e77f3f6513a8
--- /dev/null
+++ b/test/FileCheck/regex-scope.txt
@@ -0,0 +1,23 @@
+// RUN: FileCheck -check-prefix CHECK -input-file %s %s
+// RUN: FileCheck -check-prefixes CHECK,GLOBAL -input-file %s %s
+// RUN: FileCheck -check-prefixes CHECK,LOCAL -input-file %s %s
+// RUN: FileCheck -check-prefixes CHECK,GLOBAL --enable-var-scope -input-file %s %s
+// RUN: not FileCheck -check-prefixes CHECK,LOCAL --enable-var-scope -input-file %s %s
+
+local
+global
+; CHECK: [[LOCAL:loc.*]]
+; CHECK: [[$GLOBAL:glo.*]]
+
+local2
+global2
+; CHECK: [[LOCAL]]2
+; CHECK: [[$GLOBAL]]2
+
+barrier:
+; CHECK-LABEL: barrier
+
+local3
+global3
+; LOCAL: [[LOCAL]]3
+; GLOBAL: [[$GLOBAL]]3
diff --git a/test/Instrumentation/AddressSanitizer/freebsd.ll b/test/Instrumentation/AddressSanitizer/freebsd.ll
index 5178432d63aa..f940b52b41f3 100644
--- a/test/Instrumentation/AddressSanitizer/freebsd.ll
+++ b/test/Instrumentation/AddressSanitizer/freebsd.ll
@@ -1,11 +1,11 @@
; RUN: opt < %s -asan -asan-module -S \
; RUN: -mtriple=i386-unknown-freebsd \
-; RUN: -default-data-layout="e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" | \
+; RUN: -data-layout="e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" | \
; RUN: FileCheck --check-prefix=CHECK-32 %s
; RUN: opt < %s -asan -asan-module -S \
; RUN: -mtriple=x86_64-unknown-freebsd \
-; RUN: -default-data-layout="e-m:e-i64:64-f80:128-n8:16:32:64-S128" | \
+; RUN: -data-layout="e-m:e-i64:64-f80:128-n8:16:32:64-S128" | \
; RUN: FileCheck --check-prefix=CHECK-64 %s
define i32 @read_4_bytes(i32* %a) sanitize_address {
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll b/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll
index 686b506a96c4..27cbd61ef81f 100644
--- a/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll
+++ b/test/Instrumentation/AddressSanitizer/global_metadata_windows.ll
@@ -13,7 +13,7 @@ $mystr = comdat any
; CHECK: $dead_global = comdat noduplicates
; CHECK: @dead_global = local_unnamed_addr global { i32, [60 x i8] } { i32 42, [60 x i8] zeroinitializer }, comdat, align 32
-; CHECK: @__asan_global_dead_global = internal global { {{.*}} }, section ".ASAN$GL", comdat($dead_global), align 64
+; CHECK: @__asan_global_dead_global = private global { {{.*}} }, section ".ASAN$GL", comdat($dead_global), align 64
@dead_global = local_unnamed_addr global i32 42, align 4
@mystr = linkonce_odr unnamed_addr constant [5 x i8] c"main\00", comdat, align 1
diff --git a/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll b/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
index 01a7a6610caf..8341697ff48c 100644
--- a/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
+++ b/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
@@ -16,10 +16,10 @@ entry:
; OPT1: IncrementMe
; OPT1: __asan_report_
; OPT1-NOT: __asan_report_
-; OPT1: asan.module_ctor
+; OPT1: ret void
; Without optimizations we should see two calls to __asan_report_*
; OPT0: IncrementMe
; OPT0: __asan_report_
; OPT0: __asan_report_
-; OPT0: asan.module_ctor
+; OPT0: ret void
diff --git a/test/Instrumentation/AddressSanitizer/lifetime-throw.ll b/test/Instrumentation/AddressSanitizer/lifetime-throw.ll
index 6d0cbd9ad5aa..ff03d10c7c5d 100644
--- a/test/Instrumentation/AddressSanitizer/lifetime-throw.ll
+++ b/test/Instrumentation/AddressSanitizer/lifetime-throw.ll
@@ -23,7 +23,7 @@ entry:
; Poison memory in prologue: F1F1F1F1F8F3F3F3
; CHECK: store i64 -868082052615769615, i64* %{{[0-9]+}}
- call void @llvm.lifetime.start(i64 4, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
; CHECK: store i8 4, i8* %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.start
@@ -37,7 +37,7 @@ lpad:
%1 = landingpad { i8*, i32 }
cleanup
call void @_ZN3ABCD2Ev(%struct.ABC* nonnull %x)
- call void @llvm.lifetime.end(i64 4, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0)
; CHECK: store i8 -8, i8* %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.end
@@ -77,7 +77,7 @@ entry:
; Poison memory in prologue: F1F1F1F1F8F304F2
; CHECK: store i64 -935355671561244175, i64* %{{[0-9]+}}
- call void @llvm.lifetime.start(i64 4, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
; CHECK: store i8 4, i8* %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.start
@@ -90,7 +90,7 @@ entry:
ehcleanup:
%2 = cleanuppad within none []
call void @"\01??1ABC@@QEAA@XZ"(%struct.ABC* nonnull %x) [ "funclet"(token %2) ]
- call void @llvm.lifetime.end(i64 4, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0)
; CHECK: store i8 -8, i8* %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.end
@@ -104,8 +104,8 @@ unreachable:
declare i32 @__gxx_personality_v0(...)
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
declare i8* @__cxa_allocate_exception(i64) local_unnamed_addr
declare void @_ZN3ABCD2Ev(%struct.ABC* %this) unnamed_addr
diff --git a/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll b/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
index 93708e350fa3..437b6a94185b 100644
--- a/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
+++ b/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
@@ -6,8 +6,8 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
define i32 @basic_test() sanitize_address {
; CHECK-LABEL: define i32 @basic_test()
@@ -19,14 +19,14 @@ entry:
; Memory is poisoned in prologue: F1F1F1F104F3F8F2
; CHECK-UAS: store i64 -866676825215864335, i64* %{{[0-9]+}}
- call void @llvm.lifetime.start(i64 1, i8* %c)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %c)
; Memory is unpoisoned at llvm.lifetime.start: 01
; CHECK-UAS: store i8 1, i8* %{{[0-9]+}}
store volatile i32 0, i32* %retval
store volatile i8 0, i8* %c, align 1
- call void @llvm.lifetime.end(i64 1, i8* %c)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %c)
; Memory is poisoned at llvm.lifetime.end: F8
; CHECK-UAS: store i8 -8, i8* %{{[0-9]+}}
diff --git a/test/Instrumentation/AddressSanitizer/lifetime.ll b/test/Instrumentation/AddressSanitizer/lifetime.ll
index be72124f3ab6..b951afdc670f 100644
--- a/test/Instrumentation/AddressSanitizer/lifetime.ll
+++ b/test/Instrumentation/AddressSanitizer/lifetime.ll
@@ -5,8 +5,8 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
define void @lifetime_no_size() sanitize_address {
; CHECK-LABEL: define void @lifetime_no_size()
@@ -17,7 +17,7 @@ entry:
; Poison memory in prologue: F1F1F1F104F3F3F3
; CHECK: store i64 -868083100587789839, i64* %{{[0-9]+}}
- call void @llvm.lifetime.start(i64 -1, i8* %i.ptr)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %i.ptr)
; Check that lifetime with no size are ignored.
; CHECK-NOT: store
; CHECK: call void @llvm.lifetime.start
@@ -25,7 +25,7 @@ entry:
store volatile i8 0, i8* %i.ptr
; CHECK: store volatile
- call void @llvm.lifetime.end(i64 -1, i8* %i.ptr)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %i.ptr)
; Check that lifetime with no size are ignored.
; CHECK-NOT: store
; CHECK: call void @llvm.lifetime.end
@@ -48,19 +48,19 @@ define void @lifetime() sanitize_address {
; CHECK: store i64 -868082052615769615, i64* %{{[0-9]+}}
; Memory is unpoisoned at llvm.lifetime.start
- call void @llvm.lifetime.start(i64 3, i8* %i.ptr)
+ call void @llvm.lifetime.start.p0i8(i64 3, i8* %i.ptr)
; CHECK: store i8 4, i8* %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.start
store volatile i8 0, i8* %i.ptr
; CHECK: store volatile
- call void @llvm.lifetime.end(i64 4, i8* %i.ptr)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %i.ptr)
; CHECK: store i8 -8, i8* %{{[0-9]+}}
; CHECK-NEXT: call void @llvm.lifetime.end
; Memory is poisoned at every call to llvm.lifetime.end
- call void @llvm.lifetime.end(i64 2, i8* %i.ptr)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %i.ptr)
; CHECK: store i8 -8, i8* %{{[0-9]+}}
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -68,26 +68,26 @@ define void @lifetime() sanitize_address {
%arr = alloca [10 x i32], align 16
%arr.ptr = bitcast [10 x i32]* %arr to i8*
- call void @llvm.lifetime.start(i64 40, i8* %arr.ptr)
+ call void @llvm.lifetime.start.p0i8(i64 40, i8* %arr.ptr)
; CHECK-DEFAULT: call void @__asan_unpoison_stack_memory(i64 %{{[^ ]+}}, i64 40)
; CHECK-NO-DYNAMIC-NOT: call void @__asan_unpoison_stack_memory(i64 %{{[^ ]+}}, i64 40)
store volatile i8 0, i8* %arr.ptr
; CHECK: store volatile
- call void @llvm.lifetime.end(i64 40, i8* %arr.ptr)
+ call void @llvm.lifetime.end.p0i8(i64 40, i8* %arr.ptr)
; CHECK-DEFAULT: call void @__asan_poison_stack_memory(i64 %{{[^ ]+}}, i64 40)
; CHECK-NO-DYNAMIC-NOT: call void @__asan_poison_stack_memory(i64 %{{[^ ]+}}, i64 40)
; One more lifetime start/end for the same variable %i.
- call void @llvm.lifetime.start(i64 2, i8* %i.ptr)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %i.ptr)
; CHECK: store i8 4, i8* %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.start
store volatile i8 0, i8* %i.ptr
; CHECK: store volatile
- call void @llvm.lifetime.end(i64 4, i8* %i.ptr)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %i.ptr)
; CHECK: store i8 -8, i8* %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.end
@@ -108,7 +108,7 @@ entry:
; Poison memory in prologue: F1F1F1F1F8F3F3F3
; CHECK: store i64 -868082052615769615, i64* %{{[0-9]+}}
- call void @llvm.lifetime.start(i64 8, i8* %i.ptr)
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %i.ptr)
; CHECK: store i8 0, i8* %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.start
@@ -123,7 +123,7 @@ bb0:
bb1:
%i.phi = phi i8* [ %i.ptr, %entry ], [ %i.ptr2, %bb0 ]
- call void @llvm.lifetime.end(i64 8, i8* %i.phi)
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %i.phi)
; CHECK: store i8 -8, i8* %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.end
@@ -147,14 +147,14 @@ entry:
; CHECK: store i64 -868082074056920077, i64* %{{[0-9]+}}
%0 = getelementptr inbounds [1024 x i8], [1024 x i8]* %x, i64 0, i64 0
- call void @llvm.lifetime.start(i64 1024, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1024, i8* %0)
; CHECK: call void @__asan_set_shadow_00(i64 %{{[0-9]+}}, i64 128)
; CHECK-NEXT: call void @llvm.lifetime.start
store i8* %0, i8** %d, align 8
; CHECK: store i8
- call void @llvm.lifetime.end(i64 1024, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1024, i8* %0)
; CHECK: call void @__asan_set_shadow_f8(i64 %{{[0-9]+}}, i64 128)
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -172,12 +172,12 @@ entry:
store i64 %a, i64* %a.addr, align 8
%0 = bitcast [0 x i8]* %b to i8*
- call void @llvm.lifetime.start(i64 0, i8* %0) #2
+ call void @llvm.lifetime.start.p0i8(i64 0, i8* %0) #2
; CHECK: %{{[0-9]+}} = bitcast
; CHECK-NEXT: call void @llvm.lifetime.start
%1 = bitcast [0 x i8]* %b to i8*
- call void @llvm.lifetime.end(i64 0, i8* %1) #2
+ call void @llvm.lifetime.end.p0i8(i64 0, i8* %1) #2
; CHECK-NEXT: %{{[0-9]+}} = bitcast
; CHECK-NEXT: call void @llvm.lifetime.end
diff --git a/test/Instrumentation/AddressSanitizer/ps4.ll b/test/Instrumentation/AddressSanitizer/ps4.ll
new file mode 100644
index 000000000000..e160996866b4
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/ps4.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -asan -asan-module -S -mtriple=x86_64-scei-ps4 | FileCheck %s
+
+define i32 @read_4_bytes(i32* %a) sanitize_address {
+entry:
+ %tmp1 = load i32, i32* %a, align 4
+ ret i32 %tmp1
+}
+
+; CHECK: @read_4_bytes
+; CHECK-NOT: ret
+; Check for ASAN's Offset on the PS4 (2^40 or 0x10000000000)
+; CHECK: lshr {{.*}} 3
+; CHECK-NEXT: {{1099511627776}}
+; CHECK: ret
diff --git a/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime-be.ll b/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime-be.ll
index 059c49a3457c..569a67d6d356 100644
--- a/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime-be.ll
+++ b/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime-be.ll
@@ -91,7 +91,7 @@ entry:
; CHECK-NEXT: %zz = getelementptr inbounds
- call void @llvm.lifetime.start(i64 650, i8* %xx)
+ call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
; 0000...
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_00(i64 [[OFFSET]], i64 81)
@@ -100,39 +100,39 @@ entry:
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
; ENTRY-UAS-NEXT: store [[TYPE]] 2, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
call void @Foo(i8* %xx)
; CHECK-NEXT: call void @Foo(i8* %xx)
- call void @llvm.lifetime.end(i64 650, i8* %xx)
+ call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_f8(i64 [[OFFSET]], i64 82)
- ; CHECK-NEXT: call void @llvm.lifetime.end(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
- call void @llvm.lifetime.start(i64 13, i8* %yy)
+ call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
; 0005
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
; ENTRY-UAS-NEXT: store [[TYPE]] 5, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
call void @Foo(i8* %yy)
; CHECK-NEXT: call void @Foo(i8* %yy)
- call void @llvm.lifetime.end(i64 13, i8* %yy)
+ call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
; F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
; ENTRY-UAS-NEXT: store [[TYPE]] -1800, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
- call void @llvm.lifetime.start(i64 40, i8* %zz)
+ call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
; 00000000
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
@@ -142,12 +142,12 @@ entry:
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
; ENTRY-UAS-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
call void @Foo(i8* %zz)
; CHECK-NEXT: call void @Foo(i8* %zz)
- call void @llvm.lifetime.end(i64 40, i8* %zz)
+ call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
; F8F8F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
@@ -157,7 +157,7 @@ entry:
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
; ENTRY-UAS-NEXT: store [[TYPE]] -8, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
; CHECK-LABEL: <label>
@@ -209,8 +209,8 @@ entry:
; CHECK: ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
; CHECK-ON: declare void @__asan_set_shadow_00(i64, i64)
; CHECK-ON: declare void @__asan_set_shadow_f1(i64, i64)
diff --git a/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime.ll b/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime.ll
index 48d3b0e53ccb..0799b03e455e 100644
--- a/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime.ll
+++ b/test/Instrumentation/AddressSanitizer/stack-poisoning-and-lifetime.ll
@@ -91,7 +91,7 @@ entry:
; CHECK-NEXT: %zz = getelementptr inbounds
- call void @llvm.lifetime.start(i64 650, i8* %xx)
+ call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
; 0000...
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_00(i64 [[OFFSET]], i64 81)
@@ -100,39 +100,39 @@ entry:
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
; ENTRY-UAS-NEXT: store [[TYPE]] 2, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
call void @Foo(i8* %xx)
; CHECK-NEXT: call void @Foo(i8* %xx)
- call void @llvm.lifetime.end(i64 650, i8* %xx)
+ call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_f8(i64 [[OFFSET]], i64 82)
- ; CHECK-NEXT: call void @llvm.lifetime.end(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
- call void @llvm.lifetime.start(i64 13, i8* %yy)
+ call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
; 0005
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
; ENTRY-UAS-NEXT: store [[TYPE]] 1280, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
call void @Foo(i8* %yy)
; CHECK-NEXT: call void @Foo(i8* %yy)
- call void @llvm.lifetime.end(i64 13, i8* %yy)
+ call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
; F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
; ENTRY-UAS-NEXT: store [[TYPE]] -1800, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
- call void @llvm.lifetime.start(i64 40, i8* %zz)
+ call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
; 00000000
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
@@ -142,12 +142,12 @@ entry:
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
; ENTRY-UAS-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
call void @Foo(i8* %zz)
; CHECK-NEXT: call void @Foo(i8* %zz)
- call void @llvm.lifetime.end(i64 40, i8* %zz)
+ call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
; F8F8F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
@@ -157,7 +157,7 @@ entry:
; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
; ENTRY-UAS-NEXT: store [[TYPE]] -8, [[TYPE]]* [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
; CHECK-LABEL: <label>
@@ -209,8 +209,8 @@ entry:
; CHECK: ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
; CHECK-ON: declare void @__asan_set_shadow_00(i64, i64)
; CHECK-ON: declare void @__asan_set_shadow_f1(i64, i64)
diff --git a/test/Instrumentation/AddressSanitizer/stack_layout.ll b/test/Instrumentation/AddressSanitizer/stack_layout.ll
index 96706f70c83b..4e756f9ab2f2 100644
--- a/test/Instrumentation/AddressSanitizer/stack_layout.ll
+++ b/test/Instrumentation/AddressSanitizer/stack_layout.ll
@@ -9,8 +9,8 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-unknown-linux-gnu"
declare void @Use(i8*)
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
; CHECK: private unnamed_addr constant{{.*}}3 32 10 3 XXX 64 20 3 YYY 128 30 3 ZZZ\0
; CHECK: private unnamed_addr constant{{.*}}3 32 5 3 AAA 64 55 3 BBB 160 555 3 CCC\0
@@ -87,13 +87,13 @@ define void @Func5() sanitize_address #0 !dbg !11 {
%AAA = alloca i32, align 4 ; File is not the same as !11
%BBB = alloca i32, align 4 ; File is the same as !11
%BBB.ptr = bitcast i32* %BBB to i8*
- call void @llvm.lifetime.start(i64 4, i8* nonnull %BBB.ptr), !dbg !12
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %BBB.ptr), !dbg !12
store volatile i32 5, i32* %BBB, align 4
%AAA.ptr = bitcast i32* %AAA to i8*
- call void @llvm.lifetime.start(i64 4, i8* nonnull %AAA.ptr), !dbg !14
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %AAA.ptr), !dbg !14
store volatile i32 3, i32* %AAA, align 4
- call void @llvm.lifetime.end(i64 4, i8* nonnull %AAA.ptr), !dbg !17
- call void @llvm.lifetime.end(i64 4, i8* nonnull %BBB.ptr), !dbg !18
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %AAA.ptr), !dbg !17
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %BBB.ptr), !dbg !18
ret void
}
diff --git a/test/Instrumentation/InstrProfiling/PR23499.ll b/test/Instrumentation/InstrProfiling/PR23499.ll
index 47c60fd802ab..101fad19950e 100644
--- a/test/Instrumentation/InstrProfiling/PR23499.ll
+++ b/test/Instrumentation/InstrProfiling/PR23499.ll
@@ -13,15 +13,15 @@ $_Z3barIvEvv = comdat any
@__profn__Z3barIvEvv = linkonce_odr hidden constant [11 x i8] c"_Z3barIvEvv", align 1
-; CHECK: @__profn__Z3barIvEvv = private constant [11 x i8] c"_Z3barIvEvv", align 1
+; CHECK-NOT: __profn__Z3barIvEvv
; CHECK: @__profc__Z3barIvEvv = linkonce_odr hidden global [1 x i64] zeroinitializer, section "{{.*}}__llvm_prf_cnts", comdat($__profv__Z3barIvEvv), align 8
-; CHECK: @__profd__Z3barIvEvv = linkonce_odr hidden global { i64, i64, i64*, i8*, i8*, i32, [1 x i16] } { i64 4947693190065689389, i64 0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__Z3barIvEvv, i32 0, i32 0), i8*{{.*}}, i8* null, i32 1, [1 x i16] zeroinitializer }, section "{{.*}}__llvm_prf_data{{.*}}", comdat($__profv__Z3barIvEvv), align 8
+; CHECK: @__profd__Z3barIvEvv = linkonce_odr hidden global { i64, i64, i64*, i8*, i8*, i32, [2 x i16] } { i64 4947693190065689389, i64 0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__Z3barIvEvv, i32 0, i32 0), i8*{{.*}}, i8* null, i32 1, [2 x i16] zeroinitializer }, section "{{.*}}__llvm_prf_data{{.*}}", comdat($__profv__Z3barIvEvv), align 8
; CHECK: @__llvm_prf_nm = private constant [{{.*}} x i8] c"{{.*}}", section "{{.*}}__llvm_prf_names"
-; COFF: @__profn__Z3barIvEvv = private constant [11 x i8] c"_Z3barIvEvv", align 1
-; COFF: @__profc__Z3barIvEvv = linkonce_odr hidden global [1 x i64] zeroinitializer, section "{{.*}}__llvm_prf_cnts", comdat, align 8
-; COFF: @__profd__Z3barIvEvv = linkonce_odr hidden global { i64, i64, i64*, i8*, i8*, i32, [1 x i16] } { i64 4947693190065689389, i64 0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__Z3barIvEvv, i32 0, i32 0), i8*{{.*}}, i8* null, i32 1, [1 x i16] zeroinitializer }, section "{{.*}}__llvm_prf_data{{.*}}", comdat($__profc__Z3barIvEvv), align 8
+; COFF-NOT: __profn__Z3barIvEvv
+; COFF: @__profc__Z3barIvEvv = linkonce_odr hidden global [1 x i64] zeroinitializer, section "{{.*}}prfc", comdat, align 8
+; COFF: @__profd__Z3barIvEvv = linkonce_odr hidden global { i64, i64, i64*, i8*, i8*, i32, [2 x i16] } { i64 4947693190065689389, i64 0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__Z3barIvEvv, i32 0, i32 0), i8*{{.*}}, i8* null, i32 1, [2 x i16] zeroinitializer }, section "{{.*}}prfd{{.*}}", comdat($__profc__Z3barIvEvv), align 8
declare void @llvm.instrprof.increment(i8*, i64, i32, i32) #1
diff --git a/test/Instrumentation/InstrProfiling/icall.ll b/test/Instrumentation/InstrProfiling/icall.ll
index 529ad9ce12f0..d92de47421d4 100644
--- a/test/Instrumentation/InstrProfiling/icall.ll
+++ b/test/Instrumentation/InstrProfiling/icall.ll
@@ -37,9 +37,9 @@ attributes #0 = { nounwind }
; DYN-NOT: @__profvp_foo
; DYN-NOT: @__llvm_prf_vnodes
-; STATIC: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [1 x i16] }* @__profd_foo to i8*), i32 0)
-; STATIC-EXT: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [1 x i16] }* @__profd_foo to i8*), i32 zeroext 0)
-; STATIC-SEXT: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [1 x i16] }* @__profd_foo to i8*), i32 signext 0)
+; STATIC: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [2 x i16] }* @__profd_foo to i8*), i32 0)
+; STATIC-EXT: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [2 x i16] }* @__profd_foo to i8*), i32 zeroext 0)
+; STATIC-SEXT: call void @__llvm_profile_instrument_target(i64 %3, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [2 x i16] }* @__profd_foo to i8*), i32 signext 0)
; STATIC: declare void @__llvm_profile_instrument_target(i64, i8*, i32)
; STATIC-EXT: declare void @__llvm_profile_instrument_target(i64, i8*, i32 zeroext)
diff --git a/test/Instrumentation/InstrProfiling/platform.ll b/test/Instrumentation/InstrProfiling/platform.ll
index b731fc3e5ff5..c0c711054ff1 100644
--- a/test/Instrumentation/InstrProfiling/platform.ll
+++ b/test/Instrumentation/InstrProfiling/platform.ll
@@ -12,8 +12,8 @@
; RUN: opt < %s -mtriple=x86_64-pc-solaris -passes=instrprof -S | FileCheck %s -check-prefix=SOLARIS
@__profn_foo = hidden constant [3 x i8] c"foo"
-; MACHO: @__profn_foo = private constant [3 x i8] c"foo"
-; ELF: @__profn_foo = private constant [3 x i8] c"foo"
+; MACHO-NOT: __profn_foo
+; ELF-NOT: __profn_foo
; MACHO: @__profc_foo = hidden global [1 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8
; ELF: @__profc_foo = hidden global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8
diff --git a/test/Instrumentation/InstrProfiling/profiling.ll b/test/Instrumentation/InstrProfiling/profiling.ll
index 508d3ef8dea0..c4cc1d9ce438 100644
--- a/test/Instrumentation/InstrProfiling/profiling.ll
+++ b/test/Instrumentation/InstrProfiling/profiling.ll
@@ -4,11 +4,11 @@
target triple = "x86_64-apple-macosx10.10.0"
@__profn_foo = hidden constant [3 x i8] c"foo"
-; CHECK: @__profn_foo = private constant [3 x i8] c"foo"
+; CHECK-NOT: __profn_foo
@__profn_bar = hidden constant [4 x i8] c"bar\00"
-; CHECK: @__profn_bar = private constant [4 x i8] c"bar\00"
+; CHECK-NOT: __profn_bar
@__profn_baz = hidden constant [3 x i8] c"baz"
-; CHECK: @__profn_baz = private constant [3 x i8] c"baz"
+; CHECK-NOT: __profn_baz
; CHECK: @__profc_foo = hidden global [1 x i64] zeroinitializer, section "__DATA,__llvm_prf_cnts", align 8
; CHECK: @__profd_foo = hidden {{.*}}, section "__DATA,__llvm_prf_data,regular,live_support", align 8
diff --git a/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
index 99b460456239..18d2c3bfe4d8 100644
--- a/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
+++ b/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
@@ -8,10 +8,10 @@ target triple = "aarch64-unknown-linux-gnu"
define i32 @foo(i32 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
%1 = bitcast %struct.__va_list* %vl to i8*
- call void @llvm.lifetime.start(i64 32, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end(i64 32, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
@@ -46,10 +46,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i32 192
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{%.*}}, i8* [[STACK]], i64 {{%.*}}, i32 16, i1 false)
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00,
diff --git a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index 71397f1db5a4..46e840c607f9 100644
--- a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -6,10 +6,10 @@ target triple = "mips64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
- call void @llvm.lifetime.start(i64 32, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end(i64 32, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index 9931b13baacb..e0177b63d68d 100644
--- a/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -6,10 +6,10 @@ target triple = "mips64el--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
- call void @llvm.lifetime.start(i64 32, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end(i64 32, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index 71f4b3466595..afc4b775de35 100644
--- a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -6,10 +6,10 @@ target triple = "powerpc64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
- call void @llvm.lifetime.start(i64 32, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end(i64 32, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index 6e844dce5491..1afe778ad79a 100644
--- a/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -6,10 +6,10 @@ target triple = "powerpc64le--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca i8*, align 8
%1 = bitcast i8** %vl to i8*
- call void @llvm.lifetime.start(i64 32, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_start(i8* %1)
call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end(i64 32, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[C]], i8* [[STACK]], i64 [[B]], i32 8, i1 false)
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.va_start(i8*) #2
declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/test/Instrumentation/MemorySanitizer/alloca.ll b/test/Instrumentation/MemorySanitizer/alloca.ll
new file mode 100644
index 000000000000..57ee9120ae83
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/alloca.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s --check-prefixes=CHECK,INLINE
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-poison-stack-with-call=1 -S | FileCheck %s --check-prefixes=CHECK,CALL
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @static() sanitize_memory {
+entry:
+ %x = alloca i32, align 4
+ ret void
+}
+
+; CHECK-LABEL: define void @static(
+; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 4, i32 4, i1 false)
+; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 4,
+; CHECK: ret void
+
+
+define void @dynamic() sanitize_memory {
+entry:
+ br label %l
+l:
+ %x = alloca i32, align 4
+ ret void
+}
+
+; CHECK-LABEL: define void @dynamic(
+; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 4, i32 4, i1 false)
+; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 4,
+; CHECK: ret void
+
+define void @array() sanitize_memory {
+entry:
+ %x = alloca i32, i64 5, align 4
+ ret void
+}
+
+; CHECK-LABEL: define void @array(
+; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 20, i32 4, i1 false)
+; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 20)
+; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 20,
+; CHECK: ret void
+
+define void @array_non_const(i64 %cnt) sanitize_memory {
+entry:
+ %x = alloca i32, i64 %cnt, align 4
+ ret void
+}
+
+; CHECK-LABEL: define void @array_non_const(
+; CHECK: %[[A:.*]] = mul i64 4, %cnt
+; INLINE: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 -1, i64 %[[A]], i32 4, i1 false)
+; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]])
+; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 %[[A]],
+; CHECK: ret void
diff --git a/test/Instrumentation/MemorySanitizer/csr.ll b/test/Instrumentation/MemorySanitizer/csr.ll
new file mode 100644
index 000000000000..c4e3a3f73920
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/csr.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s --check-prefix=ADDR
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @llvm.x86.sse.stmxcsr(i8*)
+declare void @llvm.x86.sse.ldmxcsr(i8*)
+
+define void @getcsr(i32 *%p) sanitize_memory {
+entry:
+ %0 = bitcast i32* %p to i8*
+ call void @llvm.x86.sse.stmxcsr(i8* %0)
+ ret void
+}
+
+; CHECK-LABEL: @getcsr(
+; CHECK: store i32 0, i32*
+; CHECK: call void @llvm.x86.sse.stmxcsr(
+; CHECK: ret void
+
+; ADDR-LABEL: @getcsr(
+; ADDR: %[[A:.*]] = load i64, i64* getelementptr inbounds {{.*}} @__msan_param_tls, i32 0, i32 0), align 8
+; ADDR: %[[B:.*]] = icmp ne i64 %[[A]], 0
+; ADDR: br i1 %[[B]], label {{.*}}, label
+; ADDR: call void @__msan_warning_noreturn()
+; ADDR: call void @llvm.x86.sse.stmxcsr(
+; ADDR: ret void
+
+; Function Attrs: nounwind uwtable
+define void @setcsr(i32 *%p) sanitize_memory {
+entry:
+ %0 = bitcast i32* %p to i8*
+ call void @llvm.x86.sse.ldmxcsr(i8* %0)
+ ret void
+}
+
+; CHECK-LABEL: @setcsr(
+; CHECK: %[[A:.*]] = load i32, i32* %{{.*}}, align 1
+; CHECK: %[[B:.*]] = icmp ne i32 %[[A]], 0
+; CHECK: br i1 %[[B]], label {{.*}}, label
+; CHECK: call void @__msan_warning_noreturn()
+; CHECK: call void @llvm.x86.sse.ldmxcsr(
+; CHECK: ret void
+
+; ADDR-LABEL: @setcsr(
+; ADDR: %[[A:.*]] = load i64, i64* getelementptr inbounds {{.*}} @__msan_param_tls, i32 0, i32 0), align 8
+; ADDR: %[[B:.*]] = icmp ne i64 %[[A]], 0
+; ADDR: br i1 %[[B]], label {{.*}}, label
+; ADDR: call void @__msan_warning_noreturn()
+; ADDR: call void @llvm.x86.sse.ldmxcsr(
+; ADDR: ret void
diff --git a/test/Instrumentation/SanitizerCoverage/coverage.ll b/test/Instrumentation/SanitizerCoverage/coverage.ll
index a2a92a02c718..75a341da021c 100644
--- a/test/Instrumentation/SanitizerCoverage/coverage.ll
+++ b/test/Instrumentation/SanitizerCoverage/coverage.ll
@@ -1,13 +1,12 @@
; RUN: opt < %s -sancov -sanitizer-coverage-level=0 -S | FileCheck %s --check-prefix=CHECK0
; RUN: opt < %s -sancov -sanitizer-coverage-level=1 -S | FileCheck %s --check-prefix=CHECK1
-; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -S | FileCheck %s --check-prefix=CHECK2
+; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -S | FileCheck %s --check-prefix=CHECK_WITH_CHECK
; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=10 -S | FileCheck %s --check-prefix=CHECK2
; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=0 -S | FileCheck %s --check-prefix=CHECK_WITH_CHECK
; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=1 -S | FileCheck %s --check-prefix=CHECK_WITH_CHECK
; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-block-threshold=10 -S | FileCheck %s --check-prefix=CHECK3
; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -S | FileCheck %s --check-prefix=CHECK4
; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -sanitizer-coverage-trace-pc -S | FileCheck %s --check-prefix=CHECK_TRACE_PC
-; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -sanitizer-coverage-trace-pc-guard -S | FileCheck %s --check-prefix=CHECK_TRACE_PC
; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-8bit-counters=1 -S | FileCheck %s --check-prefix=CHECK-8BIT
; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=10 \
diff --git a/test/Instrumentation/SanitizerCoverage/coverage2-dbg.ll b/test/Instrumentation/SanitizerCoverage/coverage2-dbg.ll
index 37eca1065fdc..fde1904259d5 100644
--- a/test/Instrumentation/SanitizerCoverage/coverage2-dbg.ll
+++ b/test/Instrumentation/SanitizerCoverage/coverage2-dbg.ll
@@ -17,8 +17,8 @@ target triple = "x86_64-unknown-linux-gnu"
; Check that __sanitizer_cov call has !dgb pointing to the beginning
; of appropriate basic blocks.
; CHECK-LABEL:_Z3fooPi
-; CHECK: call void @__sanitizer_cov(i32*{{.*}}), !dbg [[A:!.*]]
-; CHECK: call void @__sanitizer_cov(i32*{{.*}}), !dbg [[B:!.*]]
+; CHECK: call void @__sanitizer_cov{{.*}}(i32*{{.*}}), !dbg [[A:!.*]]
+; CHECK: call void @__sanitizer_cov{{.*}}(i32*{{.*}}), !dbg [[B:!.*]]
; CHECK: ret void
; CHECK: [[A]] = !DILocation(line: 1, scope: !{{.*}})
; CHECK: [[B]] = !DILocation(line: 3, column: 5, scope: !{{.*}})
diff --git a/test/Instrumentation/SanitizerCoverage/trace-pc-guard-comdat.ll b/test/Instrumentation/SanitizerCoverage/trace-pc-guard-comdat.ll
new file mode 100644
index 000000000000..8ab5f4961b1b
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/trace-pc-guard-comdat.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -sanitizer-coverage-trace-pc-guard -S | FileCheck %s --check-prefix=CHECK_TRACE_PC_GUARD
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+define void @foo(i32* %a) sanitize_address {
+entry:
+ %tobool = icmp eq i32* %a, null
+ br i1 %tobool, label %if.end, label %if.then
+
+ if.then: ; preds = %entry
+ store i32 0, i32* %a, align 4
+ br label %if.end
+
+ if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+%struct.StructWithVptr = type { i32 (...)** }
+
+define void @CallViaVptr(%struct.StructWithVptr* %foo) uwtable sanitize_address {
+entry:
+ %0 = bitcast %struct.StructWithVptr* %foo to void (%struct.StructWithVptr*)***
+ %vtable = load void (%struct.StructWithVptr*)**, void (%struct.StructWithVptr*)*** %0, align 8
+ %1 = load void (%struct.StructWithVptr*)*, void (%struct.StructWithVptr*)** %vtable, align 8
+ tail call void %1(%struct.StructWithVptr* %foo)
+ tail call void %1(%struct.StructWithVptr* %foo)
+ tail call void asm sideeffect "", ""()
+ ret void
+}
+
+; CHECK_TRACE_PC_GUARD-LABEL: define void @foo
+; CHECK_TRACE_PC_GUARD: call void @__sanitizer_cov_trace_pc
+; CHECK_TRACE_PC_GUARD: call void asm sideeffect "", ""()
+; CHECK_TRACE_PC_GUARD: ret void
+
+; CHECK_TRACE_PC_GUARD-LABEL: define void @CallViaVptr
+; CHECK_TRACE_PC_GUARD: call void @__sanitizer_cov_trace_pc_indir
+; CHECK_TRACE_PC_GUARD: call void @__sanitizer_cov_trace_pc_indir
+; CHECK_TRACE_PC_GUARD: ret void
+
+; CHECK_TRACE_PC_GUARD-LABEL: define internal void @sancov.module_ctor() comdat
+
diff --git a/test/Instrumentation/SanitizerCoverage/trace-pc-guard-nocomdat.ll b/test/Instrumentation/SanitizerCoverage/trace-pc-guard-nocomdat.ll
new file mode 100644
index 000000000000..392ff8d29327
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/trace-pc-guard-nocomdat.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -sancov -sanitizer-coverage-level=4 -sanitizer-coverage-trace-pc-guard -S | FileCheck %s --check-prefix=CHECK_TRACE_PC_GUARD
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+define void @foo(i32* %a) sanitize_address {
+entry:
+ %tobool = icmp eq i32* %a, null
+ br i1 %tobool, label %if.end, label %if.then
+
+ if.then: ; preds = %entry
+ store i32 0, i32* %a, align 4
+ br label %if.end
+
+ if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+%struct.StructWithVptr = type { i32 (...)** }
+
+define void @CallViaVptr(%struct.StructWithVptr* %foo) uwtable sanitize_address {
+entry:
+ %0 = bitcast %struct.StructWithVptr* %foo to void (%struct.StructWithVptr*)***
+ %vtable = load void (%struct.StructWithVptr*)**, void (%struct.StructWithVptr*)*** %0, align 8
+ %1 = load void (%struct.StructWithVptr*)*, void (%struct.StructWithVptr*)** %vtable, align 8
+ tail call void %1(%struct.StructWithVptr* %foo)
+ tail call void %1(%struct.StructWithVptr* %foo)
+ tail call void asm sideeffect "", ""()
+ ret void
+}
+
+; CHECK_TRACE_PC_GUARD-LABEL: define void @foo
+; CHECK_TRACE_PC_GUARD: call void @__sanitizer_cov_trace_pc
+; CHECK_TRACE_PC_GUARD: call void asm sideeffect "", ""()
+; CHECK_TRACE_PC_GUARD: ret void
+
+; CHECK_TRACE_PC_GUARD-LABEL: define void @CallViaVptr
+; CHECK_TRACE_PC_GUARD: call void @__sanitizer_cov_trace_pc_indir
+; CHECK_TRACE_PC_GUARD: call void @__sanitizer_cov_trace_pc_indir
+; CHECK_TRACE_PC_GUARD: ret void
+
+; CHECK_TRACE_PC_GUARD-LABEL: define internal void @sancov.module_ctor() {
+
diff --git a/test/Instrumentation/SanitizerCoverage/tracing.ll b/test/Instrumentation/SanitizerCoverage/tracing.ll
index 49c2a1a63527..9e153472eaba 100644
--- a/test/Instrumentation/SanitizerCoverage/tracing.ll
+++ b/test/Instrumentation/SanitizerCoverage/tracing.ll
@@ -3,6 +3,7 @@
; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-experimental-tracing -S | FileCheck %s --check-prefix=CHECK3
; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc -S | FileCheck %s --check-prefix=CHECK_PC
; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc-guard -S | FileCheck %s --check-prefix=CHECK_PC_GUARD
+; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc-guard -S -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefix=CHECK_PC_GUARD_DARWIN
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
@@ -47,3 +48,11 @@ entry:
; CHECK_PC_GUARD-NOT: call void @__sanitizer_cov_trace_pc
; CHECK_PC_GUARD: ret void
; CHECK_PC_GUARD: call void @__sanitizer_cov_trace_pc_guard_init(i32* bitcast (i32** @__start___sancov_guards to i32*), i32* bitcast (i32** @__stop___sancov_guards to i32*))
+
+; CHECK_PC_GUARD_DARWIN-LABEL: define void @foo
+; CHECK_PC_GUARD_DARWIN: call void @__sanitizer_cov_trace_pc_guard
+; CHECK_PC_GUARD_DARWIN: call void @__sanitizer_cov_trace_pc_guard
+; CHECK_PC_GUARD_DARWIN: call void @__sanitizer_cov_trace_pc_guard
+; CHECK_PC_GUARD_DARWIN-NOT: call void @__sanitizer_cov_trace_pc
+; CHECK_PC_GUARD_DARWIN: ret void
+; CHECK_PC_GUARD_DARWIN: call void @__sanitizer_cov_trace_pc_guard_init(i32* bitcast (i32** @"\01section$start$__DATA$__sancov_guards" to i32*), i32* bitcast (i32** @"\01section$end$__DATA$__sancov_guards" to i32*))
diff --git a/test/Instrumentation/SanitizerCoverage/wineh.ll b/test/Instrumentation/SanitizerCoverage/wineh.ll
new file mode 100644
index 000000000000..87b44be5544f
--- /dev/null
+++ b/test/Instrumentation/SanitizerCoverage/wineh.ll
@@ -0,0 +1,111 @@
+; RUN: opt < %s -sancov -sanitizer-coverage-level=3 -sanitizer-coverage-trace-pc -S | FileCheck %s --check-prefix=CHECK
+
+; Generated from this C++ source:
+; $ clang -O2 t.cpp -S -emit-llvm
+; void g();
+; struct Foo { Foo(); ~Foo(); };
+; int f() {
+; Foo v;
+; g();
+; try {
+; g();
+; } catch (int e) {
+; g();
+; } catch (...) {
+; g();
+; }
+; return 0;
+; }
+
+; FIXME: We need to do more than this. In particular, __sanitizer_cov callbacks
+; in funclets need token bundles.
+
+; CHECK-LABEL: define i32 @"\01?f@@YAHXZ"()
+; CHECK: catch.dispatch:
+; CHECK-NEXT: catchswitch within none [label %catch3, label %catch] unwind label %ehcleanup
+
+; ModuleID = 't.cpp'
+source_filename = "t.cpp"
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.10.24728"
+
+%rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
+%struct.Foo = type { i8 }
+
+$"\01??_R0H@8" = comdat any
+
+@"\01??_7type_info@@6B@" = external constant i8*
+@"\01??_R0H@8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"\01??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat
+
+; Function Attrs: uwtable
+define i32 @"\01?f@@YAHXZ"() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+entry:
+ %v = alloca %struct.Foo, align 1
+ %e = alloca i32, align 4
+ %0 = getelementptr inbounds %struct.Foo, %struct.Foo* %v, i64 0, i32 0
+ call void @llvm.lifetime.start(i64 1, i8* nonnull %0) #4
+ %call = call %struct.Foo* @"\01??0Foo@@QEAA@XZ"(%struct.Foo* nonnull %v)
+ invoke void @"\01?g@@YAXXZ"()
+ to label %invoke.cont unwind label %ehcleanup
+
+invoke.cont: ; preds = %entry
+ invoke void @"\01?g@@YAXXZ"()
+ to label %try.cont unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %invoke.cont
+ %1 = catchswitch within none [label %catch3, label %catch] unwind label %ehcleanup
+
+catch3: ; preds = %catch.dispatch
+ %2 = catchpad within %1 [%rtti.TypeDescriptor2* @"\01??_R0H@8", i32 0, i32* %e]
+ invoke void @"\01?g@@YAXXZ"() [ "funclet"(token %2) ]
+ to label %invoke.cont4 unwind label %ehcleanup
+
+invoke.cont4: ; preds = %catch3
+ catchret from %2 to label %try.cont
+
+try.cont: ; preds = %invoke.cont, %invoke.cont2, %invoke.cont4
+ call void @"\01??1Foo@@QEAA@XZ"(%struct.Foo* nonnull %v) #4
+ call void @llvm.lifetime.end(i64 1, i8* nonnull %0) #4
+ ret i32 0
+
+catch: ; preds = %catch.dispatch
+ %3 = catchpad within %1 [i8* null, i32 64, i8* null]
+ invoke void @"\01?g@@YAXXZ"() [ "funclet"(token %3) ]
+ to label %invoke.cont2 unwind label %ehcleanup
+
+invoke.cont2: ; preds = %catch
+ catchret from %3 to label %try.cont
+
+ehcleanup: ; preds = %catch3, %catch, %catch.dispatch, %entry
+ %4 = cleanuppad within none []
+ call void @"\01??1Foo@@QEAA@XZ"(%struct.Foo* nonnull %v) #4 [ "funclet"(token %4) ]
+ call void @llvm.lifetime.end(i64 1, i8* nonnull %0) #4
+ cleanupret from %4 unwind to caller
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+
+declare %struct.Foo* @"\01??0Foo@@QEAA@XZ"(%struct.Foo* returned) unnamed_addr #2
+
+declare void @"\01?g@@YAXXZ"() local_unnamed_addr #2
+
+declare i32 @__CxxFrameHandler3(...)
+
+; Function Attrs: nounwind
+declare void @"\01??1Foo@@QEAA@XZ"(%struct.Foo*) unnamed_addr #3
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+
+attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #4 = { nounwind }
+
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"clang version 5.0.0 "}
diff --git a/test/LTO/Resolution/X86/Inputs/link-odr-availextern-ae.ll b/test/LTO/Resolution/X86/Inputs/link-odr-availextern-ae.ll
new file mode 100644
index 000000000000..f2d180afc82d
--- /dev/null
+++ b/test/LTO/Resolution/X86/Inputs/link-odr-availextern-ae.ll
@@ -0,0 +1,6 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define available_externally i32 @f() {
+ ret i32 2
+}
diff --git a/test/LTO/Resolution/X86/Inputs/link-odr-availextern-odr.ll b/test/LTO/Resolution/X86/Inputs/link-odr-availextern-odr.ll
new file mode 100644
index 000000000000..76e745a444fe
--- /dev/null
+++ b/test/LTO/Resolution/X86/Inputs/link-odr-availextern-odr.ll
@@ -0,0 +1,6 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define linkonce_odr i32 @f() {
+ ret i32 2
+}
diff --git a/test/LTO/Resolution/X86/alias.ll b/test/LTO/Resolution/X86/alias.ll
index 2056112e145d..886eadcfc82e 100644
--- a/test/LTO/Resolution/X86/alias.ll
+++ b/test/LTO/Resolution/X86/alias.ll
@@ -1,6 +1,6 @@
; RUN: llvm-as %s -o %t1.o
; RUN: llvm-as %p/Inputs/alias-1.ll -o %t2.o
-; RUN: llvm-lto2 -o %t3.o %t2.o %t1.o -r %t2.o,a,px -r %t1.o,a, -r %t1.o,b,px -save-temps
+; RUN: llvm-lto2 run -o %t3.o %t2.o %t1.o -r %t2.o,a,px -r %t1.o,a, -r %t1.o,b,px -save-temps
; RUN: llvm-dis < %t3.o.0.0.preopt.bc -o - | FileCheck %s
; RUN: FileCheck --check-prefix=RES %s < %t3.o.resolution.txt
diff --git a/test/LTO/Resolution/X86/asm-output.ll b/test/LTO/Resolution/X86/asm-output.ll
new file mode 100644
index 000000000000..41d293501dd1
--- /dev/null
+++ b/test/LTO/Resolution/X86/asm-output.ll
@@ -0,0 +1,19 @@
+; Test the ability to emit assembly code from the resolution-based LTO API
+;
+; RUN: llvm-as < %s > %t1.bc
+;
+; RUN: llvm-lto2 run -filetype=asm -r %t1.bc,main,px -o %t2 %t1.bc
+; RUN: FileCheck --check-prefix=ASM %s < %t2.0
+; RUN: llvm-lto2 run -filetype=obj -r %t1.bc,main,px -o %t2 %t1.bc
+; RUN: llvm-objdump -d %t2.0 | FileCheck --check-prefix=ASM %s
+;
+; ASM: main:
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() {
+entry:
+ ret i32 23
+}
+
diff --git a/test/LTO/Resolution/X86/comdat.ll b/test/LTO/Resolution/X86/comdat.ll
index 5124b951bed4..60d082b3e0f7 100644
--- a/test/LTO/Resolution/X86/comdat.ll
+++ b/test/LTO/Resolution/X86/comdat.ll
@@ -1,6 +1,6 @@
; RUN: llvm-as %s -o %t.o
; RUN: llvm-as %p/Inputs/comdat.ll -o %t2.o
-; RUN: llvm-lto2 -save-temps -o %t3.o %t.o %t2.o \
+; RUN: llvm-lto2 run -save-temps -o %t3.o %t.o %t2.o \
; RUN: -r=%t.o,f1,plx \
; RUN: -r=%t.o,v1,px \
; RUN: -r=%t.o,r11,px \
diff --git a/test/LTO/Resolution/X86/common2.ll b/test/LTO/Resolution/X86/common2.ll
index 3328d7c5ec36..3cb0a992d9ac 100644
--- a/test/LTO/Resolution/X86/common2.ll
+++ b/test/LTO/Resolution/X86/common2.ll
@@ -4,7 +4,7 @@
; Test that the common merging (size + alignment) is properly handled
; Client marked the "large with little alignment" one as prevailing
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,x \
; RUN: -r %t2.bc,v,px \
; RUN: -r %t1.bc,foo,px \
@@ -12,7 +12,7 @@
; RUN: llvm-dis < %t.o.0.0.preopt.bc | FileCheck %s --check-prefix=LARGE-PREVAILED
; Same as before, but reversing the order of the inputs
-; RUN: llvm-lto2 %t2.bc %t1.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t2.bc %t1.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,x \
; RUN: -r %t2.bc,v,px \
; RUN: -r %t1.bc,foo,px \
@@ -20,7 +20,7 @@
; RUN: llvm-dis < %t.o.0.0.preopt.bc | FileCheck %s --check-prefix=LARGE-PREVAILED
; Client marked the "small with large alignment" one as prevailing
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,px \
; RUN: -r %t2.bc,v,x \
; RUN: -r %t1.bc,foo,px \
@@ -28,7 +28,7 @@
; RUN: llvm-dis < %t.o.0.0.preopt.bc | FileCheck %s --check-prefix=SMALL-PREVAILED
; Same as before, but reversing the order of the inputs
-; RUN: llvm-lto2 %t2.bc %t1.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t2.bc %t1.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,px \
; RUN: -r %t2.bc,v,x \
; RUN: -r %t1.bc,foo,px \
@@ -37,7 +37,7 @@
; Client didn't mark any as prevailing, we keep the first one we see as "external"
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,x \
; RUN: -r %t2.bc,v,x \
; RUN: -r %t1.bc,foo,px \
@@ -45,7 +45,7 @@
; RUN: llvm-dis < %t.o.0.0.preopt.bc | FileCheck %s --check-prefix=NONE-PREVAILED1
; Same as before, but reversing the order of the inputs
-; RUN: llvm-lto2 %t2.bc %t1.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t2.bc %t1.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,x \
; RUN: -r %t2.bc,v,x \
; RUN: -r %t1.bc,foo,px \
@@ -55,7 +55,7 @@
; Client marked both as prevailing
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,px \
; RUN: -r %t2.bc,v,px \
; RUN: -r %t1.bc,foo,px \
@@ -63,7 +63,7 @@
; RUN: llvm-dis < %t.o.0.0.preopt.bc | FileCheck %s --check-prefix=BOTH-PREVAILED1
; Same as before, but reversing the order of the inputs
-; RUN: llvm-lto2 %t2.bc %t1.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t2.bc %t1.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,v,px \
; RUN: -r %t2.bc,v,px \
; RUN: -r %t1.bc,foo,px \
diff --git a/test/LTO/Resolution/X86/commons.ll b/test/LTO/Resolution/X86/commons.ll
index b3e504835afb..28bf1ada4a86 100644
--- a/test/LTO/Resolution/X86/commons.ll
+++ b/test/LTO/Resolution/X86/commons.ll
@@ -1,6 +1,6 @@
; RUN: llvm-as -o %t1.bc %s
; RUN: llvm-as -o %t2.bc %p/Inputs/commons.ll
-; RUN: llvm-lto2 %t1.bc -r=%t1.bc,x,l %t2.bc -r=%t2.bc,x,pl -o %t.out -save-temps
+; RUN: llvm-lto2 run %t1.bc -r=%t1.bc,x,l %t2.bc -r=%t2.bc,x,pl -o %t.out -save-temps
; RUN: llvm-dis -o - %t.out.0.0.preopt.bc | FileCheck %s
; A strong definition should override the common
diff --git a/test/LTO/Resolution/X86/diagnostic-handler-remarks-with-hotness.ll b/test/LTO/Resolution/X86/diagnostic-handler-remarks-with-hotness.ll
new file mode 100644
index 000000000000..2469570c26b3
--- /dev/null
+++ b/test/LTO/Resolution/X86/diagnostic-handler-remarks-with-hotness.ll
@@ -0,0 +1,37 @@
+; RUN: llvm-as < %s >%t.bc
+
+; RUN: rm -f %t.yaml
+; RUN: llvm-lto2 run -pass-remarks-output=%t.yaml \
+; RUN: -pass-remarks-with-hotness \
+; RUN: -r %t.bc,tinkywinky,p \
+; RUN: -r %t.bc,patatino,px \
+; RUN: -r %t.bc,main,px -o %t.o %t.bc
+; RUN: cat %t.yaml | FileCheck %s -check-prefix=YAML
+
+; YAML: --- !Passed
+; YAML-NEXT: Pass: inline
+; YAML-NEXT: Name: Inlined
+; YAML-NEXT: Function: main
+; YAML-NEXT: Hotness: 300
+; YAML-NEXT: Args:
+; YAML-NEXT: - Callee: tinkywinky
+; YAML-NEXT: - String: ' inlined into '
+; YAML-NEXT: - Caller: main
+; YAML-NEXT: ...
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-scei-ps4"
+
+declare i32 @patatino()
+
+define i32 @tinkywinky() {
+ %a = call i32 @patatino()
+ ret i32 %a
+}
+
+define i32 @main() !prof !0 {
+ %i = call i32 @tinkywinky()
+ ret i32 %i
+}
+
+!0 = !{!"function_entry_count", i64 300}
diff --git a/test/LTO/Resolution/X86/diagnostic-handler-remarks.ll b/test/LTO/Resolution/X86/diagnostic-handler-remarks.ll
new file mode 100644
index 000000000000..eb1bca3670c6
--- /dev/null
+++ b/test/LTO/Resolution/X86/diagnostic-handler-remarks.ll
@@ -0,0 +1,33 @@
+; RUN: llvm-as < %s >%t.bc
+
+; RUN: rm -f %t.yaml
+; RUN: llvm-lto2 run -pass-remarks-output=%t.yaml \
+; RUN: -r %t.bc,tinkywinky,p \
+; RUN: -r %t.bc,patatino,px \
+; RUN: -r %t.bc,main,px -o %t.o %t.bc
+; RUN: cat %t.yaml | FileCheck %s -check-prefix=YAML
+
+; YAML: --- !Passed
+; YAML-NEXT: Pass: inline
+; YAML-NEXT: Name: Inlined
+; YAML-NEXT: Function: main
+; YAML-NEXT: Args:
+; YAML-NEXT: - Callee: tinkywinky
+; YAML-NEXT: - String: ' inlined into '
+; YAML-NEXT: - Caller: main
+; YAML-NEXT: ...
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-scei-ps4"
+
+declare i32 @patatino()
+
+define i32 @tinkywinky() {
+ %a = call i32 @patatino()
+ ret i32 %a
+}
+
+define i32 @main() {
+ %i = call i32 @tinkywinky()
+ ret i32 %i
+}
diff --git a/test/LTO/Resolution/X86/empty-bitcode.test b/test/LTO/Resolution/X86/empty-bitcode.test
index c98c54499ef6..c05c5e3824b6 100644
--- a/test/LTO/Resolution/X86/empty-bitcode.test
+++ b/test/LTO/Resolution/X86/empty-bitcode.test
@@ -1,3 +1,3 @@
RUN: llvm-cat -o %t.o
-RUN: not llvm-lto2 -o %t2 %t.o 2>&1 | FileCheck %s
+RUN: not llvm-lto2 run -o %t2 %t.o 2>&1 | FileCheck %s
CHECK: Bitcode file does not contain any modules
diff --git a/test/LTO/Resolution/X86/intrinsic.ll b/test/LTO/Resolution/X86/intrinsic.ll
index f785f8f4f714..dc287ace0f87 100644
--- a/test/LTO/Resolution/X86/intrinsic.ll
+++ b/test/LTO/Resolution/X86/intrinsic.ll
@@ -1,6 +1,6 @@
; RUN: llvm-as %s -o %t1.o
; RUN: llvm-as %p/Inputs/intrinsic.ll -o %t2.o
-; RUN: llvm-lto2 -o %t3.o %t1.o %t2.o -r %t1.o,foo
+; RUN: llvm-lto2 run -o %t3.o %t1.o %t2.o -r %t1.o,foo
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/LTO/Resolution/X86/link-odr-availextern.ll b/test/LTO/Resolution/X86/link-odr-availextern.ll
new file mode 100644
index 000000000000..cc360338d6d1
--- /dev/null
+++ b/test/LTO/Resolution/X86/link-odr-availextern.ll
@@ -0,0 +1,38 @@
+; Tests for correct behavior for non-prevailing resolutions in cases involving
+; *_odr and available_externally linkages.
+
+; RUN: llvm-as %s -o %t1
+; RUN: llvm-as %S/Inputs/link-odr-availextern-ae.ll -o %t2ae
+; RUN: llvm-as %S/Inputs/link-odr-availextern-odr.ll -o %t2odr
+
+; RUN: llvm-lto2 run -o %t3 %t1 %t2ae -r %t1,f,p -r %t2ae,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=PREVAILING %s
+
+; RUN: llvm-lto2 run -o %t3 %t1 %t2odr -r %t1,f,p -r %t2odr,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=PREVAILING %s
+
+; RUN: llvm-lto2 run -o %t3 %t2ae %t1 -r %t1,f,p -r %t2ae,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=PREVAILING %s
+
+; RUN: llvm-lto2 run -o %t3 %t2odr %t1 -r %t1,f,p -r %t2odr,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=PREVAILING %s
+
+; RUN: llvm-lto2 run -o %t3 %t2ae -r %t2ae,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=NONPREVAILING %s
+
+; RUN: llvm-lto2 run -o %t3 %t2odr -r %t2odr,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=NONPREVAILING %s
+
+; RUN: llvm-lto2 run -o %t3 %t2odr %t1 -r %t1,f, -r %t2odr,f, -save-temps
+; RUN: llvm-dis < %t3.0.0.preopt.bc -o - | FileCheck --check-prefix=NONPREVAILING %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; PREVAILING: define weak_odr i32 @f()
+; PREVAILING-NEXT: ret i32 1
+; NONPREVAILING: define available_externally i32 @f()
+; NONPREVAILING-NEXT: ret i32 2
+define linkonce_odr i32 @f() {
+ ret i32 1
+}
diff --git a/test/LTO/Resolution/X86/lowertypetests.ll b/test/LTO/Resolution/X86/lowertypetests.ll
new file mode 100644
index 000000000000..c84a786e66fc
--- /dev/null
+++ b/test/LTO/Resolution/X86/lowertypetests.ll
@@ -0,0 +1,21 @@
+; RUN: opt -thinlto-bc -o %t %s
+; RUN: llvm-lto2 run -r %t,f,plx -r %t,foo,lx -r %t,foo,plx -o %t1 %t
+; RUN: llvm-nm %t1.0 | FileCheck --check-prefix=MERGED %s
+; RUN: llvm-nm %t1.1 | FileCheck %s
+
+; MERGED: R __typeid_foo_global_addr
+; CHECK: U __typeid_foo_global_addr
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@foo = global i32 0, !type !0
+
+define i1 @f(i8* %ptr) {
+ %p = call i1 @llvm.type.test(i8* %ptr, metadata !"foo")
+ ret i1 %p
+}
+
+declare i1 @llvm.type.test(i8* %ptr, metadata %typeid) nounwind readnone
+
+!0 = !{i32 0, !"foo"}
diff --git a/test/LTO/Resolution/X86/mixed_lto.ll b/test/LTO/Resolution/X86/mixed_lto.ll
index 02b15c611544..aa686a8114c9 100644
--- a/test/LTO/Resolution/X86/mixed_lto.ll
+++ b/test/LTO/Resolution/X86/mixed_lto.ll
@@ -2,7 +2,7 @@
; RUN: opt %s -o %t1.o
; RUN: opt -module-summary %p/Inputs/mixed_lto.ll -o %t2.o
-; RUN: llvm-lto2 -o %t3.o %t2.o %t1.o -r %t2.o,main,px -r %t2.o,g, -r %t1.o,g,px
+; RUN: llvm-lto2 run -o %t3.o %t2.o %t1.o -r %t2.o,main,px -r %t2.o,g, -r %t1.o,g,px
; Task 0 is the regular LTO file (this file)
; RUN: llvm-nm %t3.o.0 | FileCheck %s --check-prefix=NM0
@@ -15,7 +15,7 @@
; Do the same test again, but with the regular and thin LTO modules in the same file.
; RUN: llvm-cat -b -o %t4.o %t2.o %t1.o
-; RUN: llvm-lto2 -o %t5.o %t4.o -r %t4.o,main,px -r %t4.o,g, -r %t4.o,g,px
+; RUN: llvm-lto2 run -o %t5.o %t4.o -r %t4.o,main,px -r %t4.o,g, -r %t4.o,g,px
; RUN: llvm-nm %t5.o.0 | FileCheck %s --check-prefix=NM0
; RUN: llvm-nm %t5.o.1 | FileCheck %s --check-prefix=NM1
diff --git a/test/LTO/Resolution/X86/multi-thinlto.ll b/test/LTO/Resolution/X86/multi-thinlto.ll
index 8af73a328a13..06150e44a167 100644
--- a/test/LTO/Resolution/X86/multi-thinlto.ll
+++ b/test/LTO/Resolution/X86/multi-thinlto.ll
@@ -1,6 +1,6 @@
; RUN: opt -module-summary %s -o %t.o
; RUN: llvm-cat -b -o %t2.o %t.o %t.o
-; RUN: not llvm-lto2 -o %t3.o %t2.o 2>&1 | FileCheck %s
+; RUN: not llvm-lto2 run -o %t3.o %t2.o 2>&1 | FileCheck %s
; CHECK: Expected at most one ThinLTO module per bitcode file
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/LTO/Resolution/X86/symtab-elf.ll b/test/LTO/Resolution/X86/symtab-elf.ll
new file mode 100644
index 000000000000..1683b061c6d6
--- /dev/null
+++ b/test/LTO/Resolution/X86/symtab-elf.ll
@@ -0,0 +1,15 @@
+; RUN: llvm-as -o %t %s
+; RUN: llvm-lto2 dump-symtab %t | FileCheck %s
+
+; CHECK: target triple: x86_64-unknown-linux-gnu
+target triple = "x86_64-unknown-linux-gnu"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; CHECK-NOT: linker opts:
+!0 = !{i32 6, !"Linker Options", !{!{!"/include:foo"}}}
+!llvm.module.flags = !{ !0 }
+
+@g1 = global i32 0
+
+; CHECK-NOT: fallback g1
+@g2 = weak alias i32, i32* @g1
diff --git a/test/LTO/Resolution/X86/symtab.ll b/test/LTO/Resolution/X86/symtab.ll
new file mode 100644
index 000000000000..b7bc11749016
--- /dev/null
+++ b/test/LTO/Resolution/X86/symtab.ll
@@ -0,0 +1,53 @@
+; RUN: llvm-as -o %t %s
+; RUN: llvm-lto2 dump-symtab %t | FileCheck %s
+
+; CHECK: target triple: i686-pc-windows-msvc18.0.0
+target triple = "i686-pc-windows-msvc18.0.0"
+target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
+
+; CHECK: source filename: src.c
+source_filename = "src.c"
+
+; CHECK: linker opts: /include:foo
+!0 = !{i32 6, !"Linker Options", !{!{!"/include:foo"}}}
+!llvm.module.flags = !{ !0 }
+
+; CHECK: D------X _fun
+define i32 @fun() {
+ ret i32 0
+}
+
+; CHECK: H------- _g1
+@g1 = hidden global i32 0
+
+; CHECK: P------- _g2
+@g2 = protected global i32 0
+
+; CHECK: D------- _g3
+@g3 = global i32 0
+
+; CHECK: DU------ _g4
+@g4 = external global i32
+
+; CHECK: D--W---- _g5
+@g5 = weak global i32 0
+
+; CHECK: D--W-O-- _g6
+@g6 = linkonce_odr unnamed_addr global i32 0
+
+; CHECK: D-----T- _g7
+@g7 = thread_local global i32 0
+
+; CHECK: D-C----- _g8
+; CHECK-NEXT: size 4 align 8
+@g8 = common global i32 0, align 8
+
+; CHECK: D------- _g9
+; CHECK-NEXT: comdat g9
+$g9 = comdat any
+@g9 = global i32 0, comdat
+
+; CHECK: D--WI--- _g10
+; CHECK-NEXT: comdat g9
+; CHECK-NEXT: fallback _g9
+@g10 = weak alias i32, i32* @g9
diff --git a/test/LTO/X86/diagnostic-handler-remarks-with-hotness.ll b/test/LTO/X86/diagnostic-handler-remarks-with-hotness.ll
index e5d53c7774a1..5d0a9b0a4e22 100644
--- a/test/LTO/X86/diagnostic-handler-remarks-with-hotness.ll
+++ b/test/LTO/X86/diagnostic-handler-remarks-with-hotness.ll
@@ -2,9 +2,9 @@
; with -lto-pass-remarks-with-hotness.
; RUN: llvm-as < %s >%t.bc
+; RUN: rm -f %t.yaml
; RUN: llvm-lto -lto-pass-remarks-output=%t.yaml \
; RUN: -lto-pass-remarks-with-hotness \
-; RUN: -exported-symbol _func2 \
; RUN: -exported-symbol _main -o %t.o %t.bc
; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
@@ -34,45 +34,4 @@ define i32 @main() !prof !0 {
ret i32 %i
}
-define i32 @func2(i32* %out, i32* %out2, i32* %A, i32* %B, i32* %C, i32* %D, i32* %E, i32* %F) {
-entry:
- br label %for.body
-
-for.body: ; preds = %for.body, %entry
- %i.037 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.037
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 %i.037
- %1 = load i32, i32* %arrayidx1, align 4
- %add = add nsw i32 %1, %0
- %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %i.037
- %2 = load i32, i32* %arrayidx2, align 4
- %add3 = add nsw i32 %add, %2
- %arrayidx4 = getelementptr inbounds i32, i32* %E, i64 %i.037
- %3 = load i32, i32* %arrayidx4, align 4
- %add5 = add nsw i32 %add3, %3
- %arrayidx6 = getelementptr inbounds i32, i32* %F, i64 %i.037
- %4 = load i32, i32* %arrayidx6, align 4
- %add7 = add nsw i32 %add5, %4
- %arrayidx8 = getelementptr inbounds i32, i32* %out, i64 %i.037
- store i32 %add7, i32* %arrayidx8, align 4
- %5 = load i32, i32* %arrayidx, align 4
- %6 = load i32, i32* %arrayidx1, align 4
- %add11 = add nsw i32 %6, %5
- %7 = load i32, i32* %arrayidx2, align 4
- %add13 = add nsw i32 %add11, %7
- %8 = load i32, i32* %arrayidx4, align 4
- %add15 = add nsw i32 %add13, %8
- %9 = load i32, i32* %arrayidx6, align 4
- %add17 = add nsw i32 %add15, %9
- %arrayidx18 = getelementptr inbounds i32, i32* %out2, i64 %i.037
- store i32 %add17, i32* %arrayidx18, align 4
- %inc = add i64 %i.037, 1
- %exitcond = icmp eq i64 %inc, 256
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body
- ret i32 undef
-}
-
!0 = !{!"function_entry_count", i64 300}
diff --git a/test/LTO/X86/diagnostic-handler-remarks.ll b/test/LTO/X86/diagnostic-handler-remarks.ll
index 456bdb5419c8..82627fd24ab4 100644
--- a/test/LTO/X86/diagnostic-handler-remarks.ll
+++ b/test/LTO/X86/diagnostic-handler-remarks.ll
@@ -28,6 +28,7 @@
; RUN: llvm-nm %t.o | FileCheck %s -check-prefix NM
; Optimization records are collected regardless of the diagnostic handler
+; RUN: rm -f %t.yaml
; RUN: llvm-lto -lto-pass-remarks-output=%t.yaml \
; RUN: -exported-symbol _func2 \
; RUN: -exported-symbol _main -o %t.o %t.bc 2>&1 | \
diff --git a/test/LTO/X86/remangle_intrinsics_tbaa.ll b/test/LTO/X86/remangle_intrinsics_tbaa.ll
index 189674b5b068..cac72f4330b3 100644
--- a/test/LTO/X86/remangle_intrinsics_tbaa.ll
+++ b/test/LTO/X86/remangle_intrinsics_tbaa.ll
@@ -3,7 +3,7 @@
; RUN: llvm-link -disable-lazy-loading %t2.bc %t1.bc -S | FileCheck %s
; Verify that we correctly rename the intrinsic and don't crash
-; CHECK: @llvm.masked.store.v4p0some_named_struct.0.p0v4p0some_named_struct.0
+; CHECK: @llvm.masked.store.v4p0s_some_named_struct.0s.p0v4p0s_some_named_struct.0s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
diff --git a/test/LTO/X86/strip-debug-info-no-call-loc.ll b/test/LTO/X86/strip-debug-info-no-call-loc.ll
new file mode 100644
index 000000000000..39b8c40c3a47
--- /dev/null
+++ b/test/LTO/X86/strip-debug-info-no-call-loc.ll
@@ -0,0 +1,56 @@
+; RUN: llvm-as %s -disable-verify -o %t.bc
+; RUN: llvm-lto -lto-strip-invalid-debug-info=true \
+; RUN: -exported-symbol f -exported-symbol _f \
+; RUN: -o %t.o %t.bc 2>&1 | \
+; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN
+; RUN: llvm-nm %t.o | FileCheck %s
+
+; Check that missing debug locations on inlinable calls are a
+; recoverable error.
+
+; CHECK-WARN: Invalid debug info found, debug info will be stripped
+; CHECK: {{f$}}
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+define void @h() #0 !dbg !7 {
+entry:
+ call void (...) @i(), !dbg !9
+ ret void, !dbg !10
+}
+
+declare void @i(...) #1
+
+define void @g() #0 !dbg !11 {
+entry:
+; Manually removed !dbg.
+ call void @h()
+ ret void, !dbg !13
+}
+
+define void @f() #0 !dbg !14 {
+entry:
+ call void @g(), !dbg !15
+ ret void, !dbg !16
+}
+
+attributes #0 = { nounwind ssp uwtable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: false, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "test.c", directory: "/")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 2}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!7 = distinct !DISubprogram(name: "h", scope: !1, file: !1, line: 2, type: !8, isLocal: false, isDefinition: true, scopeLine: 2, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !2)
+!9 = !DILocation(line: 2, column: 12, scope: !7)
+!10 = !DILocation(line: 2, column: 17, scope: !7)
+!11 = distinct !DISubprogram(name: "g", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: false, unit: !0, variables: !2)
+!12 = !DILocation(line: 3, column: 12, scope: !11)
+!13 = !DILocation(line: 3, column: 17, scope: !11)
+!14 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 4, type: !8, isLocal: false, isDefinition: true, scopeLine: 4, isOptimized: false, unit: !0, variables: !2)
+!15 = !DILocation(line: 4, column: 12, scope: !14)
+!16 = !DILocation(line: 4, column: 17, scope: !14)
diff --git a/test/LTO/X86/symver-asm.ll b/test/LTO/X86/symver-asm.ll
index 03dda2bedd96..4841892724af 100644
--- a/test/LTO/X86/symver-asm.ll
+++ b/test/LTO/X86/symver-asm.ll
@@ -1,16 +1,47 @@
; RUN: llvm-as < %s >%t1
-; RUN: llvm-lto -o %t2 %t1
+; RUN: llvm-lto -exported-symbol=io_cancel_0_4 -exported-symbol=io_cancel_weak_0_4 -exported-symbol=foo -o %t2 %t1
; RUN: llvm-nm %t2 | FileCheck %s
+; RUN: llvm-lto2 run -r %t1,io_cancel_0_4,plx -r %t1,io_cancel_0_4,plx -r %t1,io_cancel_local_0_4,plx -r %t1,io_cancel_weak_0_4,plx -r %t1,io_cancel_weak_0_4,plx -r %t1,io_cancel@@LIBAIO_0.4,plx -r %t1,io_cancel_weak@@LIBAIO_0.4,plx -r %t1,io_cancel_weak@@LIBAIO_0.4.1,plx -r %t1,foo,plx -r %t1,foo,plx -r %t1,foo@@VER1,plx -o %t3 %t1 -save-temps
+; RUN: llvm-nm %t3.0 | FileCheck %s
+; RUN: llvm-dis %t3.0.2.internalize.bc -o - | FileCheck %s --check-prefix=INTERN
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
module asm ".symver io_cancel_0_4,io_cancel@@LIBAIO_0.4"
+module asm ".symver io_cancel_local_0_4,io_cancel_local@@LIBAIO_0.4"
+module asm ".symver io_cancel_weak_0_4,io_cancel_weak@@LIBAIO_0.4"
+; Ensure we handle case of same aliasee with two version aliases.
+module asm ".symver io_cancel_weak_0_4,io_cancel_weak@@LIBAIO_0.4.1"
+module asm ".symver foo,foo@@VER1"
+
+; Local values used in inline assembly must be specified on the
+; llvm.compiler.used so they aren't incorrectly DCE'd during module linking.
+@llvm.compiler.used = appending global [1 x i8*] [i8* bitcast (i32 ()* @io_cancel_local_0_4 to i8*)], section "llvm.metadata"
-; Even without -exported-symbol, io_cancel_0_4 should be noticed by LTOModule's
-; RecordStreamer, so it shouldn't get eliminated. However, the object file will
-; contain the aliased symver as well as the original.
define i32 @io_cancel_0_4() {
-; CHECK: io_cancel@@LIBAIO_0.4
-; CHECK: io_cancel_0_4
+; CHECK-DAG: T io_cancel@@LIBAIO_0.4
+; CHECK-DAG: T io_cancel_0_4
+ ret i32 0
+}
+
+define internal i32 @io_cancel_local_0_4() {
+; INTERN: llvm.compiler.used {{.*}} @io_cancel_local_0_4
+; INTERN: define internal i32 @io_cancel_local_0_4()
+; CHECK-DAG: t io_cancel_local@@LIBAIO_0.4
+; CHECK-DAG: t io_cancel_local_0_4
+ ret i32 0
+}
+
+define weak i32 @io_cancel_weak_0_4() {
+; CHECK-DAG: W io_cancel_weak@@LIBAIO_0.4
+; CHECK-DAG: W io_cancel_weak@@LIBAIO_0.4.1
+; CHECK-DAG: W io_cancel_weak_0_4
+ret i32 0
+}
+
+define i32 @"\01foo"() {
+; CHECK-DAG: T foo@@VER1
+; CHECK-DAG: T foo
ret i32 0
}
diff --git a/test/LTO/X86/symver-asm2.ll b/test/LTO/X86/symver-asm2.ll
new file mode 100644
index 000000000000..42d6e54bd06a
--- /dev/null
+++ b/test/LTO/X86/symver-asm2.ll
@@ -0,0 +1,30 @@
+; Test to ensure symbol binding works correctly for symver directives,
+; when the aliased symbols are defined in inline assembly, including
+; cases when the symbol attributes are provided after the .symver
+; directive.
+
+; RUN: llvm-as < %s >%t1
+; RUN: llvm-lto -o %t2 %t1
+; RUN: llvm-nm %t2 | FileCheck %s
+; RUN: llvm-lto2 run -r %t1,_start,plx -r %t1,_start3,plx -r %t1,foo@@SOME_VERSION -r %t1,foo@SOME_VERSION3 -o %t3 %t1 -save-temps
+; RUN: llvm-nm %t3.0 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm ".global _start"
+module asm "_start:"
+module asm "_start2:"
+module asm "_start3:"
+module asm ".symver _start, foo@@SOME_VERSION"
+module asm ".symver _start2, foo@SOME_VERSION2"
+module asm ".symver _start3, foo@SOME_VERSION3"
+module asm ".local _start2"
+module asm ".weak _start3"
+
+; CHECK-DAG: T _start
+; CHECK-DAG: t _start2
+; CHECK-DAG: W _start3
+; CHECK-DAG: T foo@@SOME_VERSION
+; CHECK-DAG: t foo@SOME_VERSION2
+; CHECK-DAG: W foo@SOME_VERSION3
diff --git a/test/Linker/2011-08-18-unique-class-type2.ll b/test/Linker/2011-08-18-unique-class-type2.ll
index f5cd6333b670..a933cc3fd7d8 100644
--- a/test/Linker/2011-08-18-unique-class-type2.ll
+++ b/test/Linker/2011-08-18-unique-class-type2.ll
@@ -21,7 +21,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.0 (trunk 137954)", isOptimized: true, emissionKind: FullDebug, file: !16, enums: !2, retainedTypes: !2, globals: !2)
!1 = !{!2}
!2 = !{}
-!5 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barN2N11AE", line: 4, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, scope: !6, type: !7)
+!5 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barN2N11AE", file: !16, line: 4, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, scope: !6, type: !7)
!6 = !DIFile(filename: "n2.c", directory: "/private/tmp")
!7 = !DISubroutineType(types: !8)
!8 = !{null}
diff --git a/test/Linker/Inputs/linkage.d.ll b/test/Linker/Inputs/linkage.d.ll
new file mode 100644
index 000000000000..aaf010d38855
--- /dev/null
+++ b/test/Linker/Inputs/linkage.d.ll
@@ -0,0 +1,5 @@
+@Y = global i8 42
+
+define i64 @foo() { ret i64 7 }
+
+@llvm.used = appending global [2 x i8*] [i8* @Y, i8* bitcast (i64 ()* @foo to i8*)], section "llvm.metadata"
diff --git a/test/Linker/available_externally_a.ll b/test/Linker/available_externally_a.ll
index 3ae4ce29140a..7a000b6a4aa5 100644
--- a/test/Linker/available_externally_a.ll
+++ b/test/Linker/available_externally_a.ll
@@ -1,5 +1,7 @@
; RUN: llvm-link %s %p/available_externally_b.ll -S -o - | FileCheck %s
+; RUN: llvm-link %s -S -o - | FileCheck --check-prefix=AE-ONLY %s
@foo = available_externally unnamed_addr constant i32 0
; CHECK: @foo = hidden unnamed_addr constant i32 0
+; AE-ONLY-NOT: @foo
diff --git a/test/Linker/link-flags.ll b/test/Linker/link-flags.ll
index c901b699575a..1a57e8aa4d28 100644
--- a/test/Linker/link-flags.ll
+++ b/test/Linker/link-flags.ll
@@ -2,12 +2,15 @@
; RUN: llvm-link -S -only-needed %S/Inputs/linkage.b.ll %S/Inputs/linkage.c.ll | FileCheck %s -check-prefix=B -check-prefix=C -check-prefix=CN
; RUN: llvm-link -S -internalize %S/Inputs/linkage.b.ll %S/Inputs/linkage.c.ll | FileCheck %s -check-prefix=B -check-prefix=CI
; RUN: llvm-link -S -internalize -only-needed %S/Inputs/linkage.b.ll %S/Inputs/linkage.c.ll | FileCheck %s -check-prefix=B -check-prefix=CN
+; RUN: llvm-link -S -internalize %S/Inputs/linkage.b.ll %S/Inputs/linkage.c.ll %S/Inputs/linkage.d.ll | FileCheck %s -check-prefix=B -check-prefix=DI
C-LABEL: @X = global i32 5
CI-LABEL: @X = internal global i32 5
CU-LABEL:@U = global i32 6
CI-LABEL:@U = internal global i32 6
CN-NOT:@U
+DI-LABEL: @Y = global i8 42
+DI-LABEL: @llvm.used = appending global [2 x i8*] [i8* @Y, i8* bitcast (i64 ()* @foo to i8*)], section "llvm.metadata"
B-LABEL: define void @bar() {
@@ -17,3 +20,6 @@ CI-LABEL: define internal i32 @foo()
CU-LABEL:define i32 @unused() {
CI-LABEL:define internal i32 @unused() {
CN-NOT:@unused()
+
+DI-LABEL: define internal i32 @foo.6()
+DI-LABEL: define i64 @foo()
diff --git a/test/MC/AArch64/alias-addsubimm.s b/test/MC/AArch64/alias-addsubimm.s
index 75e0a185572e..5c1c4799828c 100644
--- a/test/MC/AArch64/alias-addsubimm.s
+++ b/test/MC/AArch64/alias-addsubimm.s
@@ -1,19 +1,24 @@
// RUN: llvm-mc -triple=aarch64-none-linux-gnu < %s | FileCheck %s
+// RUN: not llvm-mc -mattr=+no-neg-immediates -triple=aarch64-none-linux-gnu < %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-NEG-IMM
// CHECK: sub w0, w2, #2, lsl #12
// CHECK: sub w0, w2, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
sub w0, w2, #2, lsl 12
add w0, w2, #-2, lsl 12
// CHECK: sub x1, x3, #2, lsl #12
// CHECK: sub x1, x3, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
sub x1, x3, #2, lsl 12
add x1, x3, #-2, lsl 12
// CHECK: sub x1, x3, #4
// CHECK: sub x1, x3, #4
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
sub x1, x3, #4
add x1, x3, #-4
// CHECK: sub x1, x3, #4095
// CHECK: sub x1, x3, #4095
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
sub x1, x3, #4095, lsl 0
add x1, x3, #-4095, lsl 0
// CHECK: sub x3, x4, #0
@@ -21,18 +26,22 @@
// CHECK: add w0, w2, #2, lsl #12
// CHECK: add w0, w2, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
add w0, w2, #2, lsl 12
sub w0, w2, #-2, lsl 12
// CHECK: add x1, x3, #2, lsl #12
// CHECK: add x1, x3, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
add x1, x3, #2, lsl 12
sub x1, x3, #-2, lsl 12
// CHECK: add x1, x3, #4
// CHECK: add x1, x3, #4
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
add x1, x3, #4
sub x1, x3, #-4
// CHECK: add x1, x3, #4095
// CHECK: add x1, x3, #4095
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
add x1, x3, #4095, lsl 0
sub x1, x3, #-4095, lsl 0
// CHECK: add x2, x5, #0
@@ -40,18 +49,22 @@
// CHECK: subs w0, w2, #2, lsl #12
// CHECK: subs w0, w2, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
subs w0, w2, #2, lsl 12
adds w0, w2, #-2, lsl 12
// CHECK: subs x1, x3, #2, lsl #12
// CHECK: subs x1, x3, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
subs x1, x3, #2, lsl 12
adds x1, x3, #-2, lsl 12
// CHECK: subs x1, x3, #4
// CHECK: subs x1, x3, #4
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
subs x1, x3, #4
adds x1, x3, #-4
// CHECK: subs x1, x3, #4095
// CHECK: subs x1, x3, #4095
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
subs x1, x3, #4095, lsl 0
adds x1, x3, #-4095, lsl 0
// CHECK: subs x3, x4, #0
@@ -59,18 +72,22 @@
// CHECK: adds w0, w2, #2, lsl #12
// CHECK: adds w0, w2, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
adds w0, w2, #2, lsl 12
subs w0, w2, #-2, lsl 12
// CHECK: adds x1, x3, #2, lsl #12
// CHECK: adds x1, x3, #2, lsl #12
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
adds x1, x3, #2, lsl 12
subs x1, x3, #-2, lsl 12
// CHECK: adds x1, x3, #4
// CHECK: adds x1, x3, #4
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
adds x1, x3, #4
subs x1, x3, #-4
// CHECK: adds x1, x3, #4095
// CHECK: adds x1, x3, #4095
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
adds x1, x3, #4095, lsl 0
subs x1, x3, #-4095, lsl 0
// CHECK: adds x2, x5, #0
@@ -78,17 +95,21 @@
// CHECK: {{adds xzr,|cmn}} x5, #5
// CHECK: {{adds xzr,|cmn}} x5, #5
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
cmn x5, #5
cmp x5, #-5
// CHECK: {{subs xzr,|cmp}} x6, #4095
// CHECK: {{subs xzr,|cmp}} x6, #4095
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
cmp x6, #4095
cmn x6, #-4095
// CHECK: {{adds wzr,|cmn}} w7, #5
// CHECK: {{adds wzr,|cmn}} w7, #5
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
cmn w7, #5
cmp w7, #-5
// CHECK: {{subs wzr,|cmp}} w8, #4095
// CHECK: {{subs wzr,|cmp}} w8, #4095
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
cmp w8, #4095
cmn w8, #-4095
diff --git a/test/MC/AArch64/alias-logicalimm.s b/test/MC/AArch64/alias-logicalimm.s
index 28ec40beac4d..427a06d6514f 100644
--- a/test/MC/AArch64/alias-logicalimm.s
+++ b/test/MC/AArch64/alias-logicalimm.s
@@ -1,41 +1,50 @@
// RUN: llvm-mc -triple=aarch64-none-linux-gnu < %s | FileCheck %s
+// RUN: not llvm-mc -mattr=+no-neg-immediates -triple=aarch64-none-linux-gnu < %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-NEG-IMM
// CHECK: and x0, x1, #0xfffffffffffffffd
// CHECK: and x0, x1, #0xfffffffffffffffd
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
and x0, x1, #~2
bic x0, x1, #2
// CHECK: and w0, w1, #0xfffffffd
// CHECK: and w0, w1, #0xfffffffd
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
and w0, w1, #~2
bic w0, w1, #2
// CHECK: ands x0, x1, #0xfffffffffffffffd
// CHECK: ands x0, x1, #0xfffffffffffffffd
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
ands x0, x1, #~2
bics x0, x1, #2
// CHECK: ands w0, w1, #0xfffffffd
// CHECK: ands w0, w1, #0xfffffffd
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
ands w0, w1, #~2
bics w0, w1, #2
// CHECK: orr x0, x1, #0xfffffffffffffffd
// CHECK: orr x0, x1, #0xfffffffffffffffd
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
orr x0, x1, #~2
orn x0, x1, #2
// CHECK: orr w2, w1, #0xfffffffc
// CHECK: orr w2, w1, #0xfffffffc
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
orr w2, w1, #~3
orn w2, w1, #3
// CHECK: eor x0, x1, #0xfffffffffffffffd
// CHECK: eor x0, x1, #0xfffffffffffffffd
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
eor x0, x1, #~2
eon x0, x1, #2
// CHECK: eor w2, w1, #0xfffffffc
// CHECK: eor w2, w1, #0xfffffffc
+// CHECK-NO-NEG-IMM: instruction requires: NegativeImmediates
eor w2, w1, #~3
eon w2, w1, #3
diff --git a/test/MC/AArch64/armv8.1a-lse.s b/test/MC/AArch64/armv8.1a-lse.s
new file mode 100644
index 000000000000..6143d0e13800
--- /dev/null
+++ b/test/MC/AArch64/armv8.1a-lse.s
@@ -0,0 +1,5175 @@
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu -mattr=+v8.1a,+lse -show-encoding < %s 2> %t | FileCheck %s
+// RUN: FileCheck -check-prefix=CHECK-ERROR < %t %s
+ .text
+
+ cas w0, w1, [x2]
+ cas w2, w3, [sp]
+ casa w0, w1, [x2]
+ casa w2, w3, [sp]
+ casl w0, w1, [x2]
+ casl w2, w3, [sp]
+ casal w0, w1, [x2]
+ casal w2, w3, [sp]
+ // CHECK: cas w0, w1, [x2] // encoding: [0x41,0x7c,0xa0,0x88]
+ // CHECK: cas w2, w3, [sp] // encoding: [0xe3,0x7f,0xa2,0x88]
+ // CHECK: casa w0, w1, [x2] // encoding: [0x41,0x7c,0xe0,0x88]
+ // CHECK: casa w2, w3, [sp] // encoding: [0xe3,0x7f,0xe2,0x88]
+ // CHECK: casl w0, w1, [x2] // encoding: [0x41,0xfc,0xa0,0x88]
+ // CHECK: casl w2, w3, [sp] // encoding: [0xe3,0xff,0xa2,0x88]
+ // CHECK: casal w0, w1, [x2] // encoding: [0x41,0xfc,0xe0,0x88]
+ // CHECK: casal w2, w3, [sp] // encoding: [0xe3,0xff,0xe2,0x88]
+
+ casb w0, w1, [x2]
+ casb w2, w3, [sp]
+ cash w0, w1, [x2]
+ cash w2, w3, [sp]
+ casab w0, w1, [x2]
+ casab w2, w3, [sp]
+ caslb w0, w1, [x2]
+ caslb w2, w3, [sp]
+ // CHECK: casb w0, w1, [x2] // encoding: [0x41,0x7c,0xa0,0x08]
+ // CHECK: casb w2, w3, [sp] // encoding: [0xe3,0x7f,0xa2,0x08]
+ // CHECK: cash w0, w1, [x2] // encoding: [0x41,0x7c,0xa0,0x48]
+ // CHECK: cash w2, w3, [sp] // encoding: [0xe3,0x7f,0xa2,0x48]
+ // CHECK: casab w0, w1, [x2] // encoding: [0x41,0x7c,0xe0,0x08]
+ // CHECK: casab w2, w3, [sp] // encoding: [0xe3,0x7f,0xe2,0x08]
+ // CHECK: caslb w0, w1, [x2] // encoding: [0x41,0xfc,0xa0,0x08]
+ // CHECK: caslb w2, w3, [sp] // encoding: [0xe3,0xff,0xa2,0x08]
+
+ casalb w0, w1, [x2]
+ casalb w2, w3, [sp]
+ casah w0, w1, [x2]
+ casah w2, w3, [sp]
+ caslh w0, w1, [x2]
+ caslh w2, w3, [sp]
+ casalh w0, w1, [x2]
+ casalh w2, w3, [sp]
+ // CHECK: casalb w0, w1, [x2] // encoding: [0x41,0xfc,0xe0,0x08]
+ // CHECK: casalb w2, w3, [sp] // encoding: [0xe3,0xff,0xe2,0x08]
+ // CHECK: casah w0, w1, [x2] // encoding: [0x41,0x7c,0xe0,0x48]
+ // CHECK: casah w2, w3, [sp] // encoding: [0xe3,0x7f,0xe2,0x48]
+ // CHECK: caslh w0, w1, [x2] // encoding: [0x41,0xfc,0xa0,0x48]
+ // CHECK: caslh w2, w3, [sp] // encoding: [0xe3,0xff,0xa2,0x48]
+ // CHECK: casalh w0, w1, [x2] // encoding: [0x41,0xfc,0xe0,0x48]
+ // CHECK: casalh w2, w3, [sp] // encoding: [0xe3,0xff,0xe2,0x48]
+
+ cas x0, x1, [x2]
+ cas x2, x3, [sp]
+ casa x0, x1, [x2]
+ casa x2, x3, [sp]
+ casl x0, x1, [x2]
+ casl x2, x3, [sp]
+ casal x0, x1, [x2]
+ casal x2, x3, [sp]
+ // CHECK: cas x0, x1, [x2] // encoding: [0x41,0x7c,0xa0,0xc8]
+ // CHECK: cas x2, x3, [sp] // encoding: [0xe3,0x7f,0xa2,0xc8]
+ // CHECK: casa x0, x1, [x2] // encoding: [0x41,0x7c,0xe0,0xc8]
+ // CHECK: casa x2, x3, [sp] // encoding: [0xe3,0x7f,0xe2,0xc8]
+ // CHECK: casl x0, x1, [x2] // encoding: [0x41,0xfc,0xa0,0xc8]
+ // CHECK: casl x2, x3, [sp] // encoding: [0xe3,0xff,0xa2,0xc8]
+ // CHECK: casal x0, x1, [x2] // encoding: [0x41,0xfc,0xe0,0xc8]
+ // CHECK: casal x2, x3, [sp] // encoding: [0xe3,0xff,0xe2,0xc8]
+
+ swp w0, w1, [x2]
+ swp w2, w3, [sp]
+ swpa w0, w1, [x2]
+ swpa w2, w3, [sp]
+ swpl w0, w1, [x2]
+ swpl w2, w3, [sp]
+ swpal w0, w1, [x2]
+ swpal w2, w3, [sp]
+ // CHECK: swp w0, w1, [x2] // encoding: [0x41,0x80,0x20,0xb8]
+ // CHECK: swp w2, w3, [sp] // encoding: [0xe3,0x83,0x22,0xb8]
+ // CHECK: swpa w0, w1, [x2] // encoding: [0x41,0x80,0xa0,0xb8]
+ // CHECK: swpa w2, w3, [sp] // encoding: [0xe3,0x83,0xa2,0xb8]
+ // CHECK: swpl w0, w1, [x2] // encoding: [0x41,0x80,0x60,0xb8]
+ // CHECK: swpl w2, w3, [sp] // encoding: [0xe3,0x83,0x62,0xb8]
+ // CHECK: swpal w0, w1, [x2] // encoding: [0x41,0x80,0xe0,0xb8]
+ // CHECK: swpal w2, w3, [sp] // encoding: [0xe3,0x83,0xe2,0xb8]
+
+ swpb w0, w1, [x2]
+ swpb w2, w3, [sp]
+ swph w0, w1, [x2]
+ swph w2, w3, [sp]
+ swpab w0, w1, [x2]
+ swpab w2, w3, [sp]
+ swplb w0, w1, [x2]
+ swplb w2, w3, [sp]
+ // CHECK: swpb w0, w1, [x2] // encoding: [0x41,0x80,0x20,0x38]
+ // CHECK: swpb w2, w3, [sp] // encoding: [0xe3,0x83,0x22,0x38]
+ // CHECK: swph w0, w1, [x2] // encoding: [0x41,0x80,0x20,0x78]
+ // CHECK: swph w2, w3, [sp] // encoding: [0xe3,0x83,0x22,0x78]
+ // CHECK: swpab w0, w1, [x2] // encoding: [0x41,0x80,0xa0,0x38]
+ // CHECK: swpab w2, w3, [sp] // encoding: [0xe3,0x83,0xa2,0x38]
+ // CHECK: swplb w0, w1, [x2] // encoding: [0x41,0x80,0x60,0x38]
+ // CHECK: swplb w2, w3, [sp] // encoding: [0xe3,0x83,0x62,0x38]
+
+ swpalb w0, w1, [x2]
+ swpalb w2, w3, [sp]
+ swpah w0, w1, [x2]
+ swpah w2, w3, [sp]
+ swplh w0, w1, [x2]
+ swplh w2, w3, [sp]
+ swpalh w0, w1, [x2]
+ swpalh w2, w3, [sp]
+ // CHECK: swpalb w0, w1, [x2] // encoding: [0x41,0x80,0xe0,0x38]
+ // CHECK: swpalb w2, w3, [sp] // encoding: [0xe3,0x83,0xe2,0x38]
+ // CHECK: swpah w0, w1, [x2] // encoding: [0x41,0x80,0xa0,0x78]
+ // CHECK: swpah w2, w3, [sp] // encoding: [0xe3,0x83,0xa2,0x78]
+ // CHECK: swplh w0, w1, [x2] // encoding: [0x41,0x80,0x60,0x78]
+ // CHECK: swplh w2, w3, [sp] // encoding: [0xe3,0x83,0x62,0x78]
+ // CHECK: swpalh w0, w1, [x2] // encoding: [0x41,0x80,0xe0,0x78]
+ // CHECK: swpalh w2, w3, [sp] // encoding: [0xe3,0x83,0xe2,0x78]
+
+ swp x0, x1, [x2]
+ swp x2, x3, [sp]
+ swpa x0, x1, [x2]
+ swpa x2, x3, [sp]
+ swpl x0, x1, [x2]
+ swpl x2, x3, [sp]
+ swpal x0, x1, [x2]
+ swpal x2, x3, [sp]
+ // CHECK: swp x0, x1, [x2] // encoding: [0x41,0x80,0x20,0xf8]
+ // CHECK: swp x2, x3, [sp] // encoding: [0xe3,0x83,0x22,0xf8]
+ // CHECK: swpa x0, x1, [x2] // encoding: [0x41,0x80,0xa0,0xf8]
+ // CHECK: swpa x2, x3, [sp] // encoding: [0xe3,0x83,0xa2,0xf8]
+ // CHECK: swpl x0, x1, [x2] // encoding: [0x41,0x80,0x60,0xf8]
+ // CHECK: swpl x2, x3, [sp] // encoding: [0xe3,0x83,0x62,0xf8]
+ // CHECK: swpal x0, x1, [x2] // encoding: [0x41,0x80,0xe0,0xf8]
+ // CHECK: swpal x2, x3, [sp] // encoding: [0xe3,0x83,0xe2,0xf8]
+
+ casp w0, w1, w2, w3, [x5]
+ casp w4, w5, w6, w7, [sp]
+ casp x0, x1, x2, x3, [x2]
+ casp x4, x5, x6, x7, [sp]
+ caspa w0, w1, w2, w3, [x5]
+ caspa w4, w5, w6, w7, [sp]
+ caspa x0, x1, x2, x3, [x2]
+ caspa x4, x5, x6, x7, [sp]
+ // CHECK: casp w0, w1, w2, w3, [x5] // encoding: [0xa2,0x7c,0x20,0x08]
+ // CHECK: casp w4, w5, w6, w7, [sp] // encoding: [0xe6,0x7f,0x24,0x08]
+ // CHECK: casp x0, x1, x2, x3, [x2] // encoding: [0x42,0x7c,0x20,0x48]
+ // CHECK: casp x4, x5, x6, x7, [sp] // encoding: [0xe6,0x7f,0x24,0x48]
+ // CHECK: caspa w0, w1, w2, w3, [x5] // encoding: [0xa2,0x7c,0x60,0x08]
+ // CHECK: caspa w4, w5, w6, w7, [sp] // encoding: [0xe6,0x7f,0x64,0x08]
+ // CHECK: caspa x0, x1, x2, x3, [x2] // encoding: [0x42,0x7c,0x60,0x48]
+ // CHECK: caspa x4, x5, x6, x7, [sp] // encoding: [0xe6,0x7f,0x64,0x48]
+
+ caspl w0, w1, w2, w3, [x5]
+ caspl w4, w5, w6, w7, [sp]
+ caspl x0, x1, x2, x3, [x2]
+ caspl x4, x5, x6, x7, [sp]
+ caspal w0, w1, w2, w3, [x5]
+ caspal w4, w5, w6, w7, [sp]
+ caspal x0, x1, x2, x3, [x2]
+ caspal x4, x5, x6, x7, [sp]
+ // CHECK: caspl w0, w1, w2, w3, [x5] // encoding: [0xa2,0xfc,0x20,0x08]
+ // CHECK: caspl w4, w5, w6, w7, [sp] // encoding: [0xe6,0xff,0x24,0x08]
+ // CHECK: caspl x0, x1, x2, x3, [x2] // encoding: [0x42,0xfc,0x20,0x48]
+ // CHECK: caspl x4, x5, x6, x7, [sp] // encoding: [0xe6,0xff,0x24,0x48]
+ // CHECK: caspal w0, w1, w2, w3, [x5] // encoding: [0xa2,0xfc,0x60,0x08]
+ // CHECK: caspal w4, w5, w6, w7, [sp] // encoding: [0xe6,0xff,0x64,0x08]
+ // CHECK: caspal x0, x1, x2, x3, [x2] // encoding: [0x42,0xfc,0x60,0x48]
+ // CHECK: caspal x4, x5, x6, x7, [sp] // encoding: [0xe6,0xff,0x64,0x48]
+
+ ldadd w0, w1, [x2]
+ ldadd w2, w3, [sp]
+ ldadda w0, w1, [x2]
+ ldadda w2, w3, [sp]
+ ldaddl w0, w1, [x2]
+ ldaddl w2, w3, [sp]
+ ldaddal w0, w1, [x2]
+ ldaddal w2, w3, [sp]
+ // CHECK: ldadd w0, w1, [x2] // encoding: [0x41,0x00,0x20,0xb8]
+ // CHECK: ldadd w2, w3, [sp] // encoding: [0xe3,0x03,0x22,0xb8]
+ // CHECK: ldadda w0, w1, [x2] // encoding: [0x41,0x00,0xa0,0xb8]
+ // CHECK: ldadda w2, w3, [sp] // encoding: [0xe3,0x03,0xa2,0xb8]
+ // CHECK: ldaddl w0, w1, [x2] // encoding: [0x41,0x00,0x60,0xb8]
+ // CHECK: ldaddl w2, w3, [sp] // encoding: [0xe3,0x03,0x62,0xb8]
+ // CHECK: ldaddal w0, w1, [x2] // encoding: [0x41,0x00,0xe0,0xb8]
+ // CHECK: ldaddal w2, w3, [sp] // encoding: [0xe3,0x03,0xe2,0xb8]
+
+ ldaddb w0, w1, [x2]
+ ldaddb w2, w3, [sp]
+ ldaddh w0, w1, [x2]
+ ldaddh w2, w3, [sp]
+ ldaddab w0, w1, [x2]
+ ldaddab w2, w3, [sp]
+ ldaddlb w0, w1, [x2]
+ ldaddlb w2, w3, [sp]
+ // CHECK: ldaddb w0, w1, [x2] // encoding: [0x41,0x00,0x20,0x38]
+ // CHECK: ldaddb w2, w3, [sp] // encoding: [0xe3,0x03,0x22,0x38]
+ // CHECK: ldaddh w0, w1, [x2] // encoding: [0x41,0x00,0x20,0x78]
+ // CHECK: ldaddh w2, w3, [sp] // encoding: [0xe3,0x03,0x22,0x78]
+ // CHECK: ldaddab w0, w1, [x2] // encoding: [0x41,0x00,0xa0,0x38]
+ // CHECK: ldaddab w2, w3, [sp] // encoding: [0xe3,0x03,0xa2,0x38]
+ // CHECK: ldaddlb w0, w1, [x2] // encoding: [0x41,0x00,0x60,0x38]
+ // CHECK: ldaddlb w2, w3, [sp] // encoding: [0xe3,0x03,0x62,0x38]
+
+ ldaddalb w0, w1, [x2]
+ ldaddalb w2, w3, [sp]
+ ldaddah w0, w1, [x2]
+ ldaddah w2, w3, [sp]
+ ldaddlh w0, w1, [x2]
+ ldaddlh w2, w3, [sp]
+ ldaddalh w0, w1, [x2]
+ ldaddalh w2, w3, [sp]
+ // CHECK: ldaddalb w0, w1, [x2] // encoding: [0x41,0x00,0xe0,0x38]
+ // CHECK: ldaddalb w2, w3, [sp] // encoding: [0xe3,0x03,0xe2,0x38]
+ // CHECK: ldaddah w0, w1, [x2] // encoding: [0x41,0x00,0xa0,0x78]
+ // CHECK: ldaddah w2, w3, [sp] // encoding: [0xe3,0x03,0xa2,0x78]
+ // CHECK: ldaddlh w0, w1, [x2] // encoding: [0x41,0x00,0x60,0x78]
+ // CHECK: ldaddlh w2, w3, [sp] // encoding: [0xe3,0x03,0x62,0x78]
+ // CHECK: ldaddalh w0, w1, [x2] // encoding: [0x41,0x00,0xe0,0x78]
+ // CHECK: ldaddalh w2, w3, [sp] // encoding: [0xe3,0x03,0xe2,0x78]
+
+ ldadd x0, x1, [x2]
+ ldadd x2, x3, [sp]
+ ldadda x0, x1, [x2]
+ ldadda x2, x3, [sp]
+ ldaddl x0, x1, [x2]
+ ldaddl x2, x3, [sp]
+ ldaddal x0, x1, [x2]
+ ldaddal x2, x3, [sp]
+ // CHECK: ldadd x0, x1, [x2] // encoding: [0x41,0x00,0x20,0xf8]
+ // CHECK: ldadd x2, x3, [sp] // encoding: [0xe3,0x03,0x22,0xf8]
+ // CHECK: ldadda x0, x1, [x2] // encoding: [0x41,0x00,0xa0,0xf8]
+ // CHECK: ldadda x2, x3, [sp] // encoding: [0xe3,0x03,0xa2,0xf8]
+ // CHECK: ldaddl x0, x1, [x2] // encoding: [0x41,0x00,0x60,0xf8]
+ // CHECK: ldaddl x2, x3, [sp] // encoding: [0xe3,0x03,0x62,0xf8]
+ // CHECK: ldaddal x0, x1, [x2] // encoding: [0x41,0x00,0xe0,0xf8]
+ // CHECK: ldaddal x2, x3, [sp] // encoding: [0xe3,0x03,0xe2,0xf8]
+
+ ldclr w0, w1, [x2]
+ ldclr w2, w3, [sp]
+ ldclra w0, w1, [x2]
+ ldclra w2, w3, [sp]
+ ldclrl w0, w1, [x2]
+ ldclrl w2, w3, [sp]
+ ldclral w0, w1, [x2]
+ ldclral w2, w3, [sp]
+ // CHECK: ldclr w0, w1, [x2] // encoding: [0x41,0x10,0x20,0xb8]
+ // CHECK: ldclr w2, w3, [sp] // encoding: [0xe3,0x13,0x22,0xb8]
+ // CHECK: ldclra w0, w1, [x2] // encoding: [0x41,0x10,0xa0,0xb8]
+ // CHECK: ldclra w2, w3, [sp] // encoding: [0xe3,0x13,0xa2,0xb8]
+ // CHECK: ldclrl w0, w1, [x2] // encoding: [0x41,0x10,0x60,0xb8]
+ // CHECK: ldclrl w2, w3, [sp] // encoding: [0xe3,0x13,0x62,0xb8]
+ // CHECK: ldclral w0, w1, [x2] // encoding: [0x41,0x10,0xe0,0xb8]
+ // CHECK: ldclral w2, w3, [sp] // encoding: [0xe3,0x13,0xe2,0xb8]
+
+ ldclrb w0, w1, [x2]
+ ldclrb w2, w3, [sp]
+ ldclrh w0, w1, [x2]
+ ldclrh w2, w3, [sp]
+ ldclrab w0, w1, [x2]
+ ldclrab w2, w3, [sp]
+ ldclrlb w0, w1, [x2]
+ ldclrlb w2, w3, [sp]
+ // CHECK: ldclrb w0, w1, [x2] // encoding: [0x41,0x10,0x20,0x38]
+ // CHECK: ldclrb w2, w3, [sp] // encoding: [0xe3,0x13,0x22,0x38]
+ // CHECK: ldclrh w0, w1, [x2] // encoding: [0x41,0x10,0x20,0x78]
+ // CHECK: ldclrh w2, w3, [sp] // encoding: [0xe3,0x13,0x22,0x78]
+ // CHECK: ldclrab w0, w1, [x2] // encoding: [0x41,0x10,0xa0,0x38]
+ // CHECK: ldclrab w2, w3, [sp] // encoding: [0xe3,0x13,0xa2,0x38]
+ // CHECK: ldclrlb w0, w1, [x2] // encoding: [0x41,0x10,0x60,0x38]
+ // CHECK: ldclrlb w2, w3, [sp] // encoding: [0xe3,0x13,0x62,0x38]
+
+ ldclralb w0, w1, [x2]
+ ldclralb w2, w3, [sp]
+ ldclrah w0, w1, [x2]
+ ldclrah w2, w3, [sp]
+ ldclrlh w0, w1, [x2]
+ ldclrlh w2, w3, [sp]
+ ldclralh w0, w1, [x2]
+ ldclralh w2, w3, [sp]
+ // CHECK: ldclralb w0, w1, [x2] // encoding: [0x41,0x10,0xe0,0x38]
+ // CHECK: ldclralb w2, w3, [sp] // encoding: [0xe3,0x13,0xe2,0x38]
+ // CHECK: ldclrah w0, w1, [x2] // encoding: [0x41,0x10,0xa0,0x78]
+ // CHECK: ldclrah w2, w3, [sp] // encoding: [0xe3,0x13,0xa2,0x78]
+ // CHECK: ldclrlh w0, w1, [x2] // encoding: [0x41,0x10,0x60,0x78]
+ // CHECK: ldclrlh w2, w3, [sp] // encoding: [0xe3,0x13,0x62,0x78]
+ // CHECK: ldclralh w0, w1, [x2] // encoding: [0x41,0x10,0xe0,0x78]
+ // CHECK: ldclralh w2, w3, [sp] // encoding: [0xe3,0x13,0xe2,0x78]
+
+ ldclr x0, x1, [x2]
+ ldclr x2, x3, [sp]
+ ldclra x0, x1, [x2]
+ ldclra x2, x3, [sp]
+ ldclrl x0, x1, [x2]
+ ldclrl x2, x3, [sp]
+ ldclral x0, x1, [x2]
+ ldclral x2, x3, [sp]
+ // CHECK: ldclr x0, x1, [x2] // encoding: [0x41,0x10,0x20,0xf8]
+ // CHECK: ldclr x2, x3, [sp] // encoding: [0xe3,0x13,0x22,0xf8]
+ // CHECK: ldclra x0, x1, [x2] // encoding: [0x41,0x10,0xa0,0xf8]
+ // CHECK: ldclra x2, x3, [sp] // encoding: [0xe3,0x13,0xa2,0xf8]
+ // CHECK: ldclrl x0, x1, [x2] // encoding: [0x41,0x10,0x60,0xf8]
+ // CHECK: ldclrl x2, x3, [sp] // encoding: [0xe3,0x13,0x62,0xf8]
+ // CHECK: ldclral x0, x1, [x2] // encoding: [0x41,0x10,0xe0,0xf8]
+ // CHECK: ldclral x2, x3, [sp] // encoding: [0xe3,0x13,0xe2,0xf8]
+
+ ldeor w0, w1, [x2]
+ ldeor w2, w3, [sp]
+ ldeora w0, w1, [x2]
+ ldeora w2, w3, [sp]
+ ldeorl w0, w1, [x2]
+ ldeorl w2, w3, [sp]
+ ldeoral w0, w1, [x2]
+ ldeoral w2, w3, [sp]
+ // CHECK: ldeor w0, w1, [x2] // encoding: [0x41,0x20,0x20,0xb8]
+ // CHECK: ldeor w2, w3, [sp] // encoding: [0xe3,0x23,0x22,0xb8]
+ // CHECK: ldeora w0, w1, [x2] // encoding: [0x41,0x20,0xa0,0xb8]
+ // CHECK: ldeora w2, w3, [sp] // encoding: [0xe3,0x23,0xa2,0xb8]
+ // CHECK: ldeorl w0, w1, [x2] // encoding: [0x41,0x20,0x60,0xb8]
+ // CHECK: ldeorl w2, w3, [sp] // encoding: [0xe3,0x23,0x62,0xb8]
+ // CHECK: ldeoral w0, w1, [x2] // encoding: [0x41,0x20,0xe0,0xb8]
+ // CHECK: ldeoral w2, w3, [sp] // encoding: [0xe3,0x23,0xe2,0xb8]
+
+ ldeorb w0, w1, [x2]
+ ldeorb w2, w3, [sp]
+ ldeorh w0, w1, [x2]
+ ldeorh w2, w3, [sp]
+ ldeorab w0, w1, [x2]
+ ldeorab w2, w3, [sp]
+ ldeorlb w0, w1, [x2]
+ ldeorlb w2, w3, [sp]
+ // CHECK: ldeorb w0, w1, [x2] // encoding: [0x41,0x20,0x20,0x38]
+ // CHECK: ldeorb w2, w3, [sp] // encoding: [0xe3,0x23,0x22,0x38]
+ // CHECK: ldeorh w0, w1, [x2] // encoding: [0x41,0x20,0x20,0x78]
+ // CHECK: ldeorh w2, w3, [sp] // encoding: [0xe3,0x23,0x22,0x78]
+ // CHECK: ldeorab w0, w1, [x2] // encoding: [0x41,0x20,0xa0,0x38]
+ // CHECK: ldeorab w2, w3, [sp] // encoding: [0xe3,0x23,0xa2,0x38]
+ // CHECK: ldeorlb w0, w1, [x2] // encoding: [0x41,0x20,0x60,0x38]
+ // CHECK: ldeorlb w2, w3, [sp] // encoding: [0xe3,0x23,0x62,0x38]
+
+ ldeoralb w0, w1, [x2]
+ ldeoralb w2, w3, [sp]
+ ldeorah w0, w1, [x2]
+ ldeorah w2, w3, [sp]
+ ldeorlh w0, w1, [x2]
+ ldeorlh w2, w3, [sp]
+ ldeoralh w0, w1, [x2]
+ ldeoralh w2, w3, [sp]
+ // CHECK: ldeoralb w0, w1, [x2] // encoding: [0x41,0x20,0xe0,0x38]
+ // CHECK: ldeoralb w2, w3, [sp] // encoding: [0xe3,0x23,0xe2,0x38]
+ // CHECK: ldeorah w0, w1, [x2] // encoding: [0x41,0x20,0xa0,0x78]
+ // CHECK: ldeorah w2, w3, [sp] // encoding: [0xe3,0x23,0xa2,0x78]
+ // CHECK: ldeorlh w0, w1, [x2] // encoding: [0x41,0x20,0x60,0x78]
+ // CHECK: ldeorlh w2, w3, [sp] // encoding: [0xe3,0x23,0x62,0x78]
+ // CHECK: ldeoralh w0, w1, [x2] // encoding: [0x41,0x20,0xe0,0x78]
+ // CHECK: ldeoralh w2, w3, [sp] // encoding: [0xe3,0x23,0xe2,0x78]
+
+ ldeor x0, x1, [x2]
+ ldeor x2, x3, [sp]
+ ldeora x0, x1, [x2]
+ ldeora x2, x3, [sp]
+ ldeorl x0, x1, [x2]
+ ldeorl x2, x3, [sp]
+ ldeoral x0, x1, [x2]
+ ldeoral x2, x3, [sp]
+ // CHECK: ldeor x0, x1, [x2] // encoding: [0x41,0x20,0x20,0xf8]
+ // CHECK: ldeor x2, x3, [sp] // encoding: [0xe3,0x23,0x22,0xf8]
+ // CHECK: ldeora x0, x1, [x2] // encoding: [0x41,0x20,0xa0,0xf8]
+ // CHECK: ldeora x2, x3, [sp] // encoding: [0xe3,0x23,0xa2,0xf8]
+ // CHECK: ldeorl x0, x1, [x2] // encoding: [0x41,0x20,0x60,0xf8]
+ // CHECK: ldeorl x2, x3, [sp] // encoding: [0xe3,0x23,0x62,0xf8]
+ // CHECK: ldeoral x0, x1, [x2] // encoding: [0x41,0x20,0xe0,0xf8]
+ // CHECK: ldeoral x2, x3, [sp] // encoding: [0xe3,0x23,0xe2,0xf8]
+
+ ldset w0, w1, [x2]
+ ldset w2, w3, [sp]
+ ldseta w0, w1, [x2]
+ ldseta w2, w3, [sp]
+ ldsetl w0, w1, [x2]
+ ldsetl w2, w3, [sp]
+ ldsetal w0, w1, [x2]
+ ldsetal w2, w3, [sp]
+ // CHECK: ldset w0, w1, [x2] // encoding: [0x41,0x30,0x20,0xb8]
+ // CHECK: ldset w2, w3, [sp] // encoding: [0xe3,0x33,0x22,0xb8]
+ // CHECK: ldseta w0, w1, [x2] // encoding: [0x41,0x30,0xa0,0xb8]
+ // CHECK: ldseta w2, w3, [sp] // encoding: [0xe3,0x33,0xa2,0xb8]
+ // CHECK: ldsetl w0, w1, [x2] // encoding: [0x41,0x30,0x60,0xb8]
+ // CHECK: ldsetl w2, w3, [sp] // encoding: [0xe3,0x33,0x62,0xb8]
+ // CHECK: ldsetal w0, w1, [x2] // encoding: [0x41,0x30,0xe0,0xb8]
+ // CHECK: ldsetal w2, w3, [sp] // encoding: [0xe3,0x33,0xe2,0xb8]
+
+ ldsetb w0, w1, [x2]
+ ldsetb w2, w3, [sp]
+ ldseth w0, w1, [x2]
+ ldseth w2, w3, [sp]
+ ldsetab w0, w1, [x2]
+ ldsetab w2, w3, [sp]
+ ldsetlb w0, w1, [x2]
+ ldsetlb w2, w3, [sp]
+ // CHECK: ldsetb w0, w1, [x2] // encoding: [0x41,0x30,0x20,0x38]
+ // CHECK: ldsetb w2, w3, [sp] // encoding: [0xe3,0x33,0x22,0x38]
+ // CHECK: ldseth w0, w1, [x2] // encoding: [0x41,0x30,0x20,0x78]
+ // CHECK: ldseth w2, w3, [sp] // encoding: [0xe3,0x33,0x22,0x78]
+ // CHECK: ldsetab w0, w1, [x2] // encoding: [0x41,0x30,0xa0,0x38]
+ // CHECK: ldsetab w2, w3, [sp] // encoding: [0xe3,0x33,0xa2,0x38]
+ // CHECK: ldsetlb w0, w1, [x2] // encoding: [0x41,0x30,0x60,0x38]
+ // CHECK: ldsetlb w2, w3, [sp] // encoding: [0xe3,0x33,0x62,0x38]
+
+ ldsetalb w0, w1, [x2]
+ ldsetalb w2, w3, [sp]
+ ldsetah w0, w1, [x2]
+ ldsetah w2, w3, [sp]
+ ldsetlh w0, w1, [x2]
+ ldsetlh w2, w3, [sp]
+ ldsetalh w0, w1, [x2]
+ ldsetalh w2, w3, [sp]
+ // CHECK: ldsetalb w0, w1, [x2] // encoding: [0x41,0x30,0xe0,0x38]
+ // CHECK: ldsetalb w2, w3, [sp] // encoding: [0xe3,0x33,0xe2,0x38]
+ // CHECK: ldsetah w0, w1, [x2] // encoding: [0x41,0x30,0xa0,0x78]
+ // CHECK: ldsetah w2, w3, [sp] // encoding: [0xe3,0x33,0xa2,0x78]
+ // CHECK: ldsetlh w0, w1, [x2] // encoding: [0x41,0x30,0x60,0x78]
+ // CHECK: ldsetlh w2, w3, [sp] // encoding: [0xe3,0x33,0x62,0x78]
+ // CHECK: ldsetalh w0, w1, [x2] // encoding: [0x41,0x30,0xe0,0x78]
+ // CHECK: ldsetalh w2, w3, [sp] // encoding: [0xe3,0x33,0xe2,0x78]
+
+ ldset x0, x1, [x2]
+ ldset x2, x3, [sp]
+ ldseta x0, x1, [x2]
+ ldseta x2, x3, [sp]
+ ldsetl x0, x1, [x2]
+ ldsetl x2, x3, [sp]
+ ldsetal x0, x1, [x2]
+ ldsetal x2, x3, [sp]
+ // CHECK: ldset x0, x1, [x2] // encoding: [0x41,0x30,0x20,0xf8]
+ // CHECK: ldset x2, x3, [sp] // encoding: [0xe3,0x33,0x22,0xf8]
+ // CHECK: ldseta x0, x1, [x2] // encoding: [0x41,0x30,0xa0,0xf8]
+ // CHECK: ldseta x2, x3, [sp] // encoding: [0xe3,0x33,0xa2,0xf8]
+ // CHECK: ldsetl x0, x1, [x2] // encoding: [0x41,0x30,0x60,0xf8]
+ // CHECK: ldsetl x2, x3, [sp] // encoding: [0xe3,0x33,0x62,0xf8]
+ // CHECK: ldsetal x0, x1, [x2] // encoding: [0x41,0x30,0xe0,0xf8]
+ // CHECK: ldsetal x2, x3, [sp] // encoding: [0xe3,0x33,0xe2,0xf8]
+
+ ldsmax w0, w1, [x2]
+ ldsmax w2, w3, [sp]
+ ldsmaxa w0, w1, [x2]
+ ldsmaxa w2, w3, [sp]
+ ldsmaxl w0, w1, [x2]
+ ldsmaxl w2, w3, [sp]
+ ldsmaxal w0, w1, [x2]
+ ldsmaxal w2, w3, [sp]
+ // CHECK: ldsmax w0, w1, [x2] // encoding: [0x41,0x40,0x20,0xb8]
+ // CHECK: ldsmax w2, w3, [sp] // encoding: [0xe3,0x43,0x22,0xb8]
+ // CHECK: ldsmaxa w0, w1, [x2] // encoding: [0x41,0x40,0xa0,0xb8]
+ // CHECK: ldsmaxa w2, w3, [sp] // encoding: [0xe3,0x43,0xa2,0xb8]
+ // CHECK: ldsmaxl w0, w1, [x2] // encoding: [0x41,0x40,0x60,0xb8]
+ // CHECK: ldsmaxl w2, w3, [sp] // encoding: [0xe3,0x43,0x62,0xb8]
+ // CHECK: ldsmaxal w0, w1, [x2] // encoding: [0x41,0x40,0xe0,0xb8]
+ // CHECK: ldsmaxal w2, w3, [sp] // encoding: [0xe3,0x43,0xe2,0xb8]
+
+ ldsmaxb w0, w1, [x2]
+ ldsmaxb w2, w3, [sp]
+ ldsmaxh w0, w1, [x2]
+ ldsmaxh w2, w3, [sp]
+ ldsmaxab w0, w1, [x2]
+ ldsmaxab w2, w3, [sp]
+ ldsmaxlb w0, w1, [x2]
+ ldsmaxlb w2, w3, [sp]
+ // CHECK: ldsmaxb w0, w1, [x2] // encoding: [0x41,0x40,0x20,0x38]
+ // CHECK: ldsmaxb w2, w3, [sp] // encoding: [0xe3,0x43,0x22,0x38]
+ // CHECK: ldsmaxh w0, w1, [x2] // encoding: [0x41,0x40,0x20,0x78]
+ // CHECK: ldsmaxh w2, w3, [sp] // encoding: [0xe3,0x43,0x22,0x78]
+ // CHECK: ldsmaxab w0, w1, [x2] // encoding: [0x41,0x40,0xa0,0x38]
+ // CHECK: ldsmaxab w2, w3, [sp] // encoding: [0xe3,0x43,0xa2,0x38]
+ // CHECK: ldsmaxlb w0, w1, [x2] // encoding: [0x41,0x40,0x60,0x38]
+ // CHECK: ldsmaxlb w2, w3, [sp] // encoding: [0xe3,0x43,0x62,0x38]
+
+ ldsmaxalb w0, w1, [x2]
+ ldsmaxalb w2, w3, [sp]
+ ldsmaxah w0, w1, [x2]
+ ldsmaxah w2, w3, [sp]
+ ldsmaxlh w0, w1, [x2]
+ ldsmaxlh w2, w3, [sp]
+ ldsmaxalh w0, w1, [x2]
+ ldsmaxalh w2, w3, [sp]
+ // CHECK: ldsmaxalb w0, w1, [x2] // encoding: [0x41,0x40,0xe0,0x38]
+ // CHECK: ldsmaxalb w2, w3, [sp] // encoding: [0xe3,0x43,0xe2,0x38]
+ // CHECK: ldsmaxah w0, w1, [x2] // encoding: [0x41,0x40,0xa0,0x78]
+ // CHECK: ldsmaxah w2, w3, [sp] // encoding: [0xe3,0x43,0xa2,0x78]
+ // CHECK: ldsmaxlh w0, w1, [x2] // encoding: [0x41,0x40,0x60,0x78]
+ // CHECK: ldsmaxlh w2, w3, [sp] // encoding: [0xe3,0x43,0x62,0x78]
+ // CHECK: ldsmaxalh w0, w1, [x2] // encoding: [0x41,0x40,0xe0,0x78]
+ // CHECK: ldsmaxalh w2, w3, [sp] // encoding: [0xe3,0x43,0xe2,0x78]
+
+ ldsmax x0, x1, [x2]
+ ldsmax x2, x3, [sp]
+ ldsmaxa x0, x1, [x2]
+ ldsmaxa x2, x3, [sp]
+ ldsmaxl x0, x1, [x2]
+ ldsmaxl x2, x3, [sp]
+ ldsmaxal x0, x1, [x2]
+ ldsmaxal x2, x3, [sp]
+ // CHECK: ldsmax x0, x1, [x2] // encoding: [0x41,0x40,0x20,0xf8]
+ // CHECK: ldsmax x2, x3, [sp] // encoding: [0xe3,0x43,0x22,0xf8]
+ // CHECK: ldsmaxa x0, x1, [x2] // encoding: [0x41,0x40,0xa0,0xf8]
+ // CHECK: ldsmaxa x2, x3, [sp] // encoding: [0xe3,0x43,0xa2,0xf8]
+ // CHECK: ldsmaxl x0, x1, [x2] // encoding: [0x41,0x40,0x60,0xf8]
+ // CHECK: ldsmaxl x2, x3, [sp] // encoding: [0xe3,0x43,0x62,0xf8]
+ // CHECK: ldsmaxal x0, x1, [x2] // encoding: [0x41,0x40,0xe0,0xf8]
+ // CHECK: ldsmaxal x2, x3, [sp] // encoding: [0xe3,0x43,0xe2,0xf8]
+
+ ldsmin w0, w1, [x2]
+ ldsmin w2, w3, [sp]
+ ldsmina w0, w1, [x2]
+ ldsmina w2, w3, [sp]
+ ldsminl w0, w1, [x2]
+ ldsminl w2, w3, [sp]
+ ldsminal w0, w1, [x2]
+ ldsminal w2, w3, [sp]
+ // CHECK: ldsmin w0, w1, [x2] // encoding: [0x41,0x50,0x20,0xb8]
+ // CHECK: ldsmin w2, w3, [sp] // encoding: [0xe3,0x53,0x22,0xb8]
+ // CHECK: ldsmina w0, w1, [x2] // encoding: [0x41,0x50,0xa0,0xb8]
+ // CHECK: ldsmina w2, w3, [sp] // encoding: [0xe3,0x53,0xa2,0xb8]
+ // CHECK: ldsminl w0, w1, [x2] // encoding: [0x41,0x50,0x60,0xb8]
+ // CHECK: ldsminl w2, w3, [sp] // encoding: [0xe3,0x53,0x62,0xb8]
+ // CHECK: ldsminal w0, w1, [x2] // encoding: [0x41,0x50,0xe0,0xb8]
+ // CHECK: ldsminal w2, w3, [sp] // encoding: [0xe3,0x53,0xe2,0xb8]
+
+ ldsminb w0, w1, [x2]
+ ldsminb w2, w3, [sp]
+ ldsminh w0, w1, [x2]
+ ldsminh w2, w3, [sp]
+ ldsminab w0, w1, [x2]
+ ldsminab w2, w3, [sp]
+ ldsminlb w0, w1, [x2]
+ ldsminlb w2, w3, [sp]
+ // CHECK: ldsminb w0, w1, [x2] // encoding: [0x41,0x50,0x20,0x38]
+ // CHECK: ldsminb w2, w3, [sp] // encoding: [0xe3,0x53,0x22,0x38]
+ // CHECK: ldsminh w0, w1, [x2] // encoding: [0x41,0x50,0x20,0x78]
+ // CHECK: ldsminh w2, w3, [sp] // encoding: [0xe3,0x53,0x22,0x78]
+ // CHECK: ldsminab w0, w1, [x2] // encoding: [0x41,0x50,0xa0,0x38]
+ // CHECK: ldsminab w2, w3, [sp] // encoding: [0xe3,0x53,0xa2,0x38]
+ // CHECK: ldsminlb w0, w1, [x2] // encoding: [0x41,0x50,0x60,0x38]
+ // CHECK: ldsminlb w2, w3, [sp] // encoding: [0xe3,0x53,0x62,0x38]
+
+ ldsminalb w0, w1, [x2]
+ ldsminalb w2, w3, [sp]
+ ldsminah w0, w1, [x2]
+ ldsminah w2, w3, [sp]
+ ldsminlh w0, w1, [x2]
+ ldsminlh w2, w3, [sp]
+ ldsminalh w0, w1, [x2]
+ ldsminalh w2, w3, [sp]
+ // CHECK: ldsminalb w0, w1, [x2] // encoding: [0x41,0x50,0xe0,0x38]
+ // CHECK: ldsminalb w2, w3, [sp] // encoding: [0xe3,0x53,0xe2,0x38]
+ // CHECK: ldsminah w0, w1, [x2] // encoding: [0x41,0x50,0xa0,0x78]
+ // CHECK: ldsminah w2, w3, [sp] // encoding: [0xe3,0x53,0xa2,0x78]
+ // CHECK: ldsminlh w0, w1, [x2] // encoding: [0x41,0x50,0x60,0x78]
+ // CHECK: ldsminlh w2, w3, [sp] // encoding: [0xe3,0x53,0x62,0x78]
+ // CHECK: ldsminalh w0, w1, [x2] // encoding: [0x41,0x50,0xe0,0x78]
+ // CHECK: ldsminalh w2, w3, [sp] // encoding: [0xe3,0x53,0xe2,0x78]
+
+ ldsmin x0, x1, [x2]
+ ldsmin x2, x3, [sp]
+ ldsmina x0, x1, [x2]
+ ldsmina x2, x3, [sp]
+ ldsminl x0, x1, [x2]
+ ldsminl x2, x3, [sp]
+ ldsminal x0, x1, [x2]
+ ldsminal x2, x3, [sp]
+ // CHECK: ldsmin x0, x1, [x2] // encoding: [0x41,0x50,0x20,0xf8]
+ // CHECK: ldsmin x2, x3, [sp] // encoding: [0xe3,0x53,0x22,0xf8]
+ // CHECK: ldsmina x0, x1, [x2] // encoding: [0x41,0x50,0xa0,0xf8]
+ // CHECK: ldsmina x2, x3, [sp] // encoding: [0xe3,0x53,0xa2,0xf8]
+ // CHECK: ldsminl x0, x1, [x2] // encoding: [0x41,0x50,0x60,0xf8]
+ // CHECK: ldsminl x2, x3, [sp] // encoding: [0xe3,0x53,0x62,0xf8]
+ // CHECK: ldsminal x0, x1, [x2] // encoding: [0x41,0x50,0xe0,0xf8]
+ // CHECK: ldsminal x2, x3, [sp] // encoding: [0xe3,0x53,0xe2,0xf8]
+
+ ldumax w0, w1, [x2]
+ ldumax w2, w3, [sp]
+ ldumaxa w0, w1, [x2]
+ ldumaxa w2, w3, [sp]
+ ldumaxl w0, w1, [x2]
+ ldumaxl w2, w3, [sp]
+ ldumaxal w0, w1, [x2]
+ ldumaxal w2, w3, [sp]
+ // CHECK: ldumax w0, w1, [x2] // encoding: [0x41,0x60,0x20,0xb8]
+ // CHECK: ldumax w2, w3, [sp] // encoding: [0xe3,0x63,0x22,0xb8]
+ // CHECK: ldumaxa w0, w1, [x2] // encoding: [0x41,0x60,0xa0,0xb8]
+ // CHECK: ldumaxa w2, w3, [sp] // encoding: [0xe3,0x63,0xa2,0xb8]
+ // CHECK: ldumaxl w0, w1, [x2] // encoding: [0x41,0x60,0x60,0xb8]
+ // CHECK: ldumaxl w2, w3, [sp] // encoding: [0xe3,0x63,0x62,0xb8]
+ // CHECK: ldumaxal w0, w1, [x2] // encoding: [0x41,0x60,0xe0,0xb8]
+ // CHECK: ldumaxal w2, w3, [sp] // encoding: [0xe3,0x63,0xe2,0xb8]
+
+ ldumaxb w0, w1, [x2]
+ ldumaxb w2, w3, [sp]
+ ldumaxh w0, w1, [x2]
+ ldumaxh w2, w3, [sp]
+ ldumaxab w0, w1, [x2]
+ ldumaxab w2, w3, [sp]
+ ldumaxlb w0, w1, [x2]
+ ldumaxlb w2, w3, [sp]
+ // CHECK: ldumaxb w0, w1, [x2] // encoding: [0x41,0x60,0x20,0x38]
+ // CHECK: ldumaxb w2, w3, [sp] // encoding: [0xe3,0x63,0x22,0x38]
+ // CHECK: ldumaxh w0, w1, [x2] // encoding: [0x41,0x60,0x20,0x78]
+ // CHECK: ldumaxh w2, w3, [sp] // encoding: [0xe3,0x63,0x22,0x78]
+ // CHECK: ldumaxab w0, w1, [x2] // encoding: [0x41,0x60,0xa0,0x38]
+ // CHECK: ldumaxab w2, w3, [sp] // encoding: [0xe3,0x63,0xa2,0x38]
+ // CHECK: ldumaxlb w0, w1, [x2] // encoding: [0x41,0x60,0x60,0x38]
+ // CHECK: ldumaxlb w2, w3, [sp] // encoding: [0xe3,0x63,0x62,0x38]
+
+ ldumaxalb w0, w1, [x2]
+ ldumaxalb w2, w3, [sp]
+ ldumaxah w0, w1, [x2]
+ ldumaxah w2, w3, [sp]
+ ldumaxlh w0, w1, [x2]
+ ldumaxlh w2, w3, [sp]
+ ldumaxalh w0, w1, [x2]
+ ldumaxalh w2, w3, [sp]
+ // CHECK: ldumaxalb w0, w1, [x2] // encoding: [0x41,0x60,0xe0,0x38]
+ // CHECK: ldumaxalb w2, w3, [sp] // encoding: [0xe3,0x63,0xe2,0x38]
+ // CHECK: ldumaxah w0, w1, [x2] // encoding: [0x41,0x60,0xa0,0x78]
+ // CHECK: ldumaxah w2, w3, [sp] // encoding: [0xe3,0x63,0xa2,0x78]
+ // CHECK: ldumaxlh w0, w1, [x2] // encoding: [0x41,0x60,0x60,0x78]
+ // CHECK: ldumaxlh w2, w3, [sp] // encoding: [0xe3,0x63,0x62,0x78]
+ // CHECK: ldumaxalh w0, w1, [x2] // encoding: [0x41,0x60,0xe0,0x78]
+ // CHECK: ldumaxalh w2, w3, [sp] // encoding: [0xe3,0x63,0xe2,0x78]
+
+ ldumax x0, x1, [x2]
+ ldumax x2, x3, [sp]
+ ldumaxa x0, x1, [x2]
+ ldumaxa x2, x3, [sp]
+ ldumaxl x0, x1, [x2]
+ ldumaxl x2, x3, [sp]
+ ldumaxal x0, x1, [x2]
+ ldumaxal x2, x3, [sp]
+ // CHECK: ldumax x0, x1, [x2] // encoding: [0x41,0x60,0x20,0xf8]
+ // CHECK: ldumax x2, x3, [sp] // encoding: [0xe3,0x63,0x22,0xf8]
+ // CHECK: ldumaxa x0, x1, [x2] // encoding: [0x41,0x60,0xa0,0xf8]
+ // CHECK: ldumaxa x2, x3, [sp] // encoding: [0xe3,0x63,0xa2,0xf8]
+ // CHECK: ldumaxl x0, x1, [x2] // encoding: [0x41,0x60,0x60,0xf8]
+ // CHECK: ldumaxl x2, x3, [sp] // encoding: [0xe3,0x63,0x62,0xf8]
+ // CHECK: ldumaxal x0, x1, [x2] // encoding: [0x41,0x60,0xe0,0xf8]
+ // CHECK: ldumaxal x2, x3, [sp] // encoding: [0xe3,0x63,0xe2,0xf8]
+
+ ldumin w0, w1, [x2]
+ ldumin w2, w3, [sp]
+ ldumina w0, w1, [x2]
+ ldumina w2, w3, [sp]
+ lduminl w0, w1, [x2]
+ lduminl w2, w3, [sp]
+ lduminal w0, w1, [x2]
+ lduminal w2, w3, [sp]
+ // CHECK: ldumin w0, w1, [x2] // encoding: [0x41,0x70,0x20,0xb8]
+ // CHECK: ldumin w2, w3, [sp] // encoding: [0xe3,0x73,0x22,0xb8]
+ // CHECK: ldumina w0, w1, [x2] // encoding: [0x41,0x70,0xa0,0xb8]
+ // CHECK: ldumina w2, w3, [sp] // encoding: [0xe3,0x73,0xa2,0xb8]
+ // CHECK: lduminl w0, w1, [x2] // encoding: [0x41,0x70,0x60,0xb8]
+ // CHECK: lduminl w2, w3, [sp] // encoding: [0xe3,0x73,0x62,0xb8]
+ // CHECK: lduminal w0, w1, [x2] // encoding: [0x41,0x70,0xe0,0xb8]
+ // CHECK: lduminal w2, w3, [sp] // encoding: [0xe3,0x73,0xe2,0xb8]
+
+ lduminb w0, w1, [x2]
+ lduminb w2, w3, [sp]
+ lduminh w0, w1, [x2]
+ lduminh w2, w3, [sp]
+ lduminab w0, w1, [x2]
+ lduminab w2, w3, [sp]
+ lduminlb w0, w1, [x2]
+ lduminlb w2, w3, [sp]
+ // CHECK: lduminb w0, w1, [x2] // encoding: [0x41,0x70,0x20,0x38]
+ // CHECK: lduminb w2, w3, [sp] // encoding: [0xe3,0x73,0x22,0x38]
+ // CHECK: lduminh w0, w1, [x2] // encoding: [0x41,0x70,0x20,0x78]
+ // CHECK: lduminh w2, w3, [sp] // encoding: [0xe3,0x73,0x22,0x78]
+ // CHECK: lduminab w0, w1, [x2] // encoding: [0x41,0x70,0xa0,0x38]
+ // CHECK: lduminab w2, w3, [sp] // encoding: [0xe3,0x73,0xa2,0x38]
+ // CHECK: lduminlb w0, w1, [x2] // encoding: [0x41,0x70,0x60,0x38]
+ // CHECK: lduminlb w2, w3, [sp] // encoding: [0xe3,0x73,0x62,0x38]
+
+ lduminalb w0, w1, [x2]
+ lduminalb w2, w3, [sp]
+ lduminah w0, w1, [x2]
+ lduminah w2, w3, [sp]
+ lduminlh w0, w1, [x2]
+ lduminlh w2, w3, [sp]
+ lduminalh w0, w1, [x2]
+ lduminalh w2, w3, [sp]
+ // CHECK: lduminalb w0, w1, [x2] // encoding: [0x41,0x70,0xe0,0x38]
+ // CHECK: lduminalb w2, w3, [sp] // encoding: [0xe3,0x73,0xe2,0x38]
+ // CHECK: lduminah w0, w1, [x2] // encoding: [0x41,0x70,0xa0,0x78]
+ // CHECK: lduminah w2, w3, [sp] // encoding: [0xe3,0x73,0xa2,0x78]
+ // CHECK: lduminlh w0, w1, [x2] // encoding: [0x41,0x70,0x60,0x78]
+ // CHECK: lduminlh w2, w3, [sp] // encoding: [0xe3,0x73,0x62,0x78]
+ // CHECK: lduminalh w0, w1, [x2] // encoding: [0x41,0x70,0xe0,0x78]
+ // CHECK: lduminalh w2, w3, [sp] // encoding: [0xe3,0x73,0xe2,0x78]
+
+ ldumin x0, x1, [x2]
+ ldumin x2, x3, [sp]
+ ldumina x0, x1, [x2]
+ ldumina x2, x3, [sp]
+ lduminl x0, x1, [x2]
+ lduminl x2, x3, [sp]
+ lduminal x0, x1, [x2]
+ lduminal x2, x3, [sp]
+ // CHECK: ldumin x0, x1, [x2] // encoding: [0x41,0x70,0x20,0xf8]
+ // CHECK: ldumin x2, x3, [sp] // encoding: [0xe3,0x73,0x22,0xf8]
+ // CHECK: ldumina x0, x1, [x2] // encoding: [0x41,0x70,0xa0,0xf8]
+ // CHECK: ldumina x2, x3, [sp] // encoding: [0xe3,0x73,0xa2,0xf8]
+ // CHECK: lduminl x0, x1, [x2] // encoding: [0x41,0x70,0x60,0xf8]
+ // CHECK: lduminl x2, x3, [sp] // encoding: [0xe3,0x73,0x62,0xf8]
+ // CHECK: lduminal x0, x1, [x2] // encoding: [0x41,0x70,0xe0,0xf8]
+ // CHECK: lduminal x2, x3, [sp] // encoding: [0xe3,0x73,0xe2,0xf8]
+
+ stadd w0, [x2]
+ stadd w2, [sp]
+ staddl w0, [x2]
+ staddl w2, [sp]
+ staddb w0, [x2]
+ staddb w2, [sp]
+ staddh w0, [x2]
+ staddh w2, [sp]
+ // CHECK: stadd w0, [x2] // encoding: [0x5f,0x00,0x20,0xb8]
+ // CHECK: stadd w2, [sp] // encoding: [0xff,0x03,0x22,0xb8]
+ // CHECK: staddl w0, [x2] // encoding: [0x5f,0x00,0x60,0xb8]
+ // CHECK: staddl w2, [sp] // encoding: [0xff,0x03,0x62,0xb8]
+ // CHECK: staddb w0, [x2] // encoding: [0x5f,0x00,0x20,0x38]
+ // CHECK: staddb w2, [sp] // encoding: [0xff,0x03,0x22,0x38]
+ // CHECK: staddh w0, [x2] // encoding: [0x5f,0x00,0x20,0x78]
+ // CHECK: staddh w2, [sp] // encoding: [0xff,0x03,0x22,0x78]
+
+ staddlb w0, [x2]
+ staddlb w2, [sp]
+ staddlh w0, [x2]
+ staddlh w2, [sp]
+ stadd x0, [x2]
+ stadd x2, [sp]
+ staddl x0, [x2]
+ staddl x2, [sp]
+ // CHECK: staddlb w0, [x2] // encoding: [0x5f,0x00,0x60,0x38]
+ // CHECK: staddlb w2, [sp] // encoding: [0xff,0x03,0x62,0x38]
+ // CHECK: staddlh w0, [x2] // encoding: [0x5f,0x00,0x60,0x78]
+ // CHECK: staddlh w2, [sp] // encoding: [0xff,0x03,0x62,0x78]
+ // CHECK: stadd x0, [x2] // encoding: [0x5f,0x00,0x20,0xf8]
+ // CHECK: stadd x2, [sp] // encoding: [0xff,0x03,0x22,0xf8]
+ // CHECK: staddl x0, [x2] // encoding: [0x5f,0x00,0x60,0xf8]
+ // CHECK: staddl x2, [sp] // encoding: [0xff,0x03,0x62,0xf8]
+
+ stclr w0, [x2]
+ stclr w2, [sp]
+ stclrl w0, [x2]
+ stclrl w2, [sp]
+ stclrb w0, [x2]
+ stclrb w2, [sp]
+ stclrh w0, [x2]
+ stclrh w2, [sp]
+ // CHECK: stclr w0, [x2] // encoding: [0x5f,0x10,0x20,0xb8]
+ // CHECK: stclr w2, [sp] // encoding: [0xff,0x13,0x22,0xb8]
+ // CHECK: stclrl w0, [x2] // encoding: [0x5f,0x10,0x60,0xb8]
+ // CHECK: stclrl w2, [sp] // encoding: [0xff,0x13,0x62,0xb8]
+ // CHECK: stclrb w0, [x2] // encoding: [0x5f,0x10,0x20,0x38]
+ // CHECK: stclrb w2, [sp] // encoding: [0xff,0x13,0x22,0x38]
+ // CHECK: stclrh w0, [x2] // encoding: [0x5f,0x10,0x20,0x78]
+ // CHECK: stclrh w2, [sp] // encoding: [0xff,0x13,0x22,0x78]
+
+ stclrlb w0, [x2]
+ stclrlb w2, [sp]
+ stclrlh w0, [x2]
+ stclrlh w2, [sp]
+ stclr x0, [x2]
+ stclr x2, [sp]
+ stclrl x0, [x2]
+ stclrl x2, [sp]
+ // CHECK: stclrlb w0, [x2] // encoding: [0x5f,0x10,0x60,0x38]
+ // CHECK: stclrlb w2, [sp] // encoding: [0xff,0x13,0x62,0x38]
+ // CHECK: stclrlh w0, [x2] // encoding: [0x5f,0x10,0x60,0x78]
+ // CHECK: stclrlh w2, [sp] // encoding: [0xff,0x13,0x62,0x78]
+ // CHECK: stclr x0, [x2] // encoding: [0x5f,0x10,0x20,0xf8]
+ // CHECK: stclr x2, [sp] // encoding: [0xff,0x13,0x22,0xf8]
+ // CHECK: stclrl x0, [x2] // encoding: [0x5f,0x10,0x60,0xf8]
+ // CHECK: stclrl x2, [sp] // encoding: [0xff,0x13,0x62,0xf8]
+
+ steor w0, [x2]
+ steor w2, [sp]
+ steorl w0, [x2]
+ steorl w2, [sp]
+ steorb w0, [x2]
+ steorb w2, [sp]
+ steorh w0, [x2]
+ steorh w2, [sp]
+ // CHECK: steor w0, [x2] // encoding: [0x5f,0x20,0x20,0xb8]
+ // CHECK: steor w2, [sp] // encoding: [0xff,0x23,0x22,0xb8]
+ // CHECK: steorl w0, [x2] // encoding: [0x5f,0x20,0x60,0xb8]
+ // CHECK: steorl w2, [sp] // encoding: [0xff,0x23,0x62,0xb8]
+ // CHECK: steorb w0, [x2] // encoding: [0x5f,0x20,0x20,0x38]
+ // CHECK: steorb w2, [sp] // encoding: [0xff,0x23,0x22,0x38]
+ // CHECK: steorh w0, [x2] // encoding: [0x5f,0x20,0x20,0x78]
+ // CHECK: steorh w2, [sp] // encoding: [0xff,0x23,0x22,0x78]
+
+ steorlb w0, [x2]
+ steorlb w2, [sp]
+ steorlh w0, [x2]
+ steorlh w2, [sp]
+ steor x0, [x2]
+ steor x2, [sp]
+ steorl x0, [x2]
+ steorl x2, [sp]
+ // CHECK: steorlb w0, [x2] // encoding: [0x5f,0x20,0x60,0x38]
+ // CHECK: steorlb w2, [sp] // encoding: [0xff,0x23,0x62,0x38]
+ // CHECK: steorlh w0, [x2] // encoding: [0x5f,0x20,0x60,0x78]
+ // CHECK: steorlh w2, [sp] // encoding: [0xff,0x23,0x62,0x78]
+ // CHECK: steor x0, [x2] // encoding: [0x5f,0x20,0x20,0xf8]
+ // CHECK: steor x2, [sp] // encoding: [0xff,0x23,0x22,0xf8]
+ // CHECK: steorl x0, [x2] // encoding: [0x5f,0x20,0x60,0xf8]
+ // CHECK: steorl x2, [sp] // encoding: [0xff,0x23,0x62,0xf8]
+
+ stset w0, [x2]
+ stset w2, [sp]
+ stsetl w0, [x2]
+ stsetl w2, [sp]
+ stsetb w0, [x2]
+ stsetb w2, [sp]
+ stseth w0, [x2]
+ stseth w2, [sp]
+ // CHECK: stset w0, [x2] // encoding: [0x5f,0x30,0x20,0xb8]
+ // CHECK: stset w2, [sp] // encoding: [0xff,0x33,0x22,0xb8]
+ // CHECK: stsetl w0, [x2] // encoding: [0x5f,0x30,0x60,0xb8]
+ // CHECK: stsetl w2, [sp] // encoding: [0xff,0x33,0x62,0xb8]
+ // CHECK: stsetb w0, [x2] // encoding: [0x5f,0x30,0x20,0x38]
+ // CHECK: stsetb w2, [sp] // encoding: [0xff,0x33,0x22,0x38]
+ // CHECK: stseth w0, [x2] // encoding: [0x5f,0x30,0x20,0x78]
+ // CHECK: stseth w2, [sp] // encoding: [0xff,0x33,0x22,0x78]
+
+ stsetlb w0, [x2]
+ stsetlb w2, [sp]
+ stsetlh w0, [x2]
+ stsetlh w2, [sp]
+ stset x0, [x2]
+ stset x2, [sp]
+ stsetl x0, [x2]
+ stsetl x2, [sp]
+ // CHECK: stsetlb w0, [x2] // encoding: [0x5f,0x30,0x60,0x38]
+ // CHECK: stsetlb w2, [sp] // encoding: [0xff,0x33,0x62,0x38]
+ // CHECK: stsetlh w0, [x2] // encoding: [0x5f,0x30,0x60,0x78]
+ // CHECK: stsetlh w2, [sp] // encoding: [0xff,0x33,0x62,0x78]
+ // CHECK: stset x0, [x2] // encoding: [0x5f,0x30,0x20,0xf8]
+ // CHECK: stset x2, [sp] // encoding: [0xff,0x33,0x22,0xf8]
+ // CHECK: stsetl x0, [x2] // encoding: [0x5f,0x30,0x60,0xf8]
+ // CHECK: stsetl x2, [sp] // encoding: [0xff,0x33,0x62,0xf8]
+
+ stsmax w0, [x2]
+ stsmax w2, [sp]
+ stsmaxl w0, [x2]
+ stsmaxl w2, [sp]
+ stsmaxb w0, [x2]
+ stsmaxb w2, [sp]
+ stsmaxh w0, [x2]
+ stsmaxh w2, [sp]
+ // CHECK: stsmax w0, [x2] // encoding: [0x5f,0x40,0x20,0xb8]
+ // CHECK: stsmax w2, [sp] // encoding: [0xff,0x43,0x22,0xb8]
+ // CHECK: stsmaxl w0, [x2] // encoding: [0x5f,0x40,0x60,0xb8]
+ // CHECK: stsmaxl w2, [sp] // encoding: [0xff,0x43,0x62,0xb8]
+ // CHECK: stsmaxb w0, [x2] // encoding: [0x5f,0x40,0x20,0x38]
+ // CHECK: stsmaxb w2, [sp] // encoding: [0xff,0x43,0x22,0x38]
+ // CHECK: stsmaxh w0, [x2] // encoding: [0x5f,0x40,0x20,0x78]
+ // CHECK: stsmaxh w2, [sp] // encoding: [0xff,0x43,0x22,0x78]
+
+ stsmaxlb w0, [x2]
+ stsmaxlb w2, [sp]
+ stsmaxlh w0, [x2]
+ stsmaxlh w2, [sp]
+ stsmax x0, [x2]
+ stsmax x2, [sp]
+ stsmaxl x0, [x2]
+ stsmaxl x2, [sp]
+ // CHECK: stsmaxlb w0, [x2] // encoding: [0x5f,0x40,0x60,0x38]
+ // CHECK: stsmaxlb w2, [sp] // encoding: [0xff,0x43,0x62,0x38]
+ // CHECK: stsmaxlh w0, [x2] // encoding: [0x5f,0x40,0x60,0x78]
+ // CHECK: stsmaxlh w2, [sp] // encoding: [0xff,0x43,0x62,0x78]
+ // CHECK: stsmax x0, [x2] // encoding: [0x5f,0x40,0x20,0xf8]
+ // CHECK: stsmax x2, [sp] // encoding: [0xff,0x43,0x22,0xf8]
+ // CHECK: stsmaxl x0, [x2] // encoding: [0x5f,0x40,0x60,0xf8]
+ // CHECK: stsmaxl x2, [sp] // encoding: [0xff,0x43,0x62,0xf8]
+
+ stsmin w0, [x2]
+ stsmin w2, [sp]
+ stsminl w0, [x2]
+ stsminl w2, [sp]
+ stsminb w0, [x2]
+ stsminb w2, [sp]
+ stsminh w0, [x2]
+ stsminh w2, [sp]
+ // CHECK: stsmin w0, [x2] // encoding: [0x5f,0x50,0x20,0xb8]
+ // CHECK: stsmin w2, [sp] // encoding: [0xff,0x53,0x22,0xb8]
+ // CHECK: stsminl w0, [x2] // encoding: [0x5f,0x50,0x60,0xb8]
+ // CHECK: stsminl w2, [sp] // encoding: [0xff,0x53,0x62,0xb8]
+ // CHECK: stsminb w0, [x2] // encoding: [0x5f,0x50,0x20,0x38]
+ // CHECK: stsminb w2, [sp] // encoding: [0xff,0x53,0x22,0x38]
+ // CHECK: stsminh w0, [x2] // encoding: [0x5f,0x50,0x20,0x78]
+ // CHECK: stsminh w2, [sp] // encoding: [0xff,0x53,0x22,0x78]
+
+ stsminlb w0, [x2]
+ stsminlb w2, [sp]
+ stsminlh w0, [x2]
+ stsminlh w2, [sp]
+ stsmin x0, [x2]
+ stsmin x2, [sp]
+ stsminl x0, [x2]
+ stsminl x2, [sp]
+ // CHECK: stsminlb w0, [x2] // encoding: [0x5f,0x50,0x60,0x38]
+ // CHECK: stsminlb w2, [sp] // encoding: [0xff,0x53,0x62,0x38]
+ // CHECK: stsminlh w0, [x2] // encoding: [0x5f,0x50,0x60,0x78]
+ // CHECK: stsminlh w2, [sp] // encoding: [0xff,0x53,0x62,0x78]
+ // CHECK: stsmin x0, [x2] // encoding: [0x5f,0x50,0x20,0xf8]
+ // CHECK: stsmin x2, [sp] // encoding: [0xff,0x53,0x22,0xf8]
+ // CHECK: stsminl x0, [x2] // encoding: [0x5f,0x50,0x60,0xf8]
+ // CHECK: stsminl x2, [sp] // encoding: [0xff,0x53,0x62,0xf8]
+
+ stumax w0, [x2]
+ stumax w2, [sp]
+ stumaxl w0, [x2]
+ stumaxl w2, [sp]
+ stumaxb w0, [x2]
+ stumaxb w2, [sp]
+ stumaxh w0, [x2]
+ stumaxh w2, [sp]
+ // CHECK: stumax w0, [x2] // encoding: [0x5f,0x60,0x20,0xb8]
+ // CHECK: stumax w2, [sp] // encoding: [0xff,0x63,0x22,0xb8]
+ // CHECK: stumaxl w0, [x2] // encoding: [0x5f,0x60,0x60,0xb8]
+ // CHECK: stumaxl w2, [sp] // encoding: [0xff,0x63,0x62,0xb8]
+ // CHECK: stumaxb w0, [x2] // encoding: [0x5f,0x60,0x20,0x38]
+ // CHECK: stumaxb w2, [sp] // encoding: [0xff,0x63,0x22,0x38]
+ // CHECK: stumaxh w0, [x2] // encoding: [0x5f,0x60,0x20,0x78]
+ // CHECK: stumaxh w2, [sp] // encoding: [0xff,0x63,0x22,0x78]
+
+ stumaxlb w0, [x2]
+ stumaxlb w2, [sp]
+ stumaxlh w0, [x2]
+ stumaxlh w2, [sp]
+ stumax x0, [x2]
+ stumax x2, [sp]
+ stumaxl x0, [x2]
+ stumaxl x2, [sp]
+ // CHECK: stumaxlb w0, [x2] // encoding: [0x5f,0x60,0x60,0x38]
+ // CHECK: stumaxlb w2, [sp] // encoding: [0xff,0x63,0x62,0x38]
+ // CHECK: stumaxlh w0, [x2] // encoding: [0x5f,0x60,0x60,0x78]
+ // CHECK: stumaxlh w2, [sp] // encoding: [0xff,0x63,0x62,0x78]
+ // CHECK: stumax x0, [x2] // encoding: [0x5f,0x60,0x20,0xf8]
+ // CHECK: stumax x2, [sp] // encoding: [0xff,0x63,0x22,0xf8]
+ // CHECK: stumaxl x0, [x2] // encoding: [0x5f,0x60,0x60,0xf8]
+ // CHECK: stumaxl x2, [sp] // encoding: [0xff,0x63,0x62,0xf8]
+
+ stumin w0, [x2]
+ stumin w2, [sp]
+ stuminl w0, [x2]
+ stuminl w2, [sp]
+ stuminb w0, [x2]
+ stuminb w2, [sp]
+ stuminh w0, [x2]
+ stuminh w2, [sp]
+ // CHECK: stumin w0, [x2] // encoding: [0x5f,0x70,0x20,0xb8]
+ // CHECK: stumin w2, [sp] // encoding: [0xff,0x73,0x22,0xb8]
+ // CHECK: stuminl w0, [x2] // encoding: [0x5f,0x70,0x60,0xb8]
+ // CHECK: stuminl w2, [sp] // encoding: [0xff,0x73,0x62,0xb8]
+ // CHECK: stuminb w0, [x2] // encoding: [0x5f,0x70,0x20,0x38]
+ // CHECK: stuminb w2, [sp] // encoding: [0xff,0x73,0x22,0x38]
+ // CHECK: stuminh w0, [x2] // encoding: [0x5f,0x70,0x20,0x78]
+ // CHECK: stuminh w2, [sp] // encoding: [0xff,0x73,0x22,0x78]
+
+ cas b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cas b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ cas b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cas b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ cas h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cas h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ cas h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cas h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ casa b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casa b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casa b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casa b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casa h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casa h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casa h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casa h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ casb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ cash b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cash b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ cash b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cash b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ cash h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cash h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ cash h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: cash h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ casah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ casalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+
+ casl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ caslb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: caslb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ caslb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: caslb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ caslb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: caslb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ caslb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: caslb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+
+ casalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ casalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ cas v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ caslb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: caslb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ caslh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: caslh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: casalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ casp b0, b1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: casp b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ casp b2, b3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: casp b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ casp h0, h1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: casp h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ casp h2, h3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: casp h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ caspa b0, b1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspa b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ caspa b2, b3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspa b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ caspa h0, h1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspa h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ caspa h2, h3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspa h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ caspl b0, b1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ caspl b2, b3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ caspl h0, h1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ caspl h2, h3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ caspal b0, b1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ caspal b2, b3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ caspal h0, h1, [x2]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ caspal h2, h3, [sp]
+ // CHECK-ERROR: error: expected first even register of a consecutive same-size even/odd register pair
+ // CHECK-ERROR: caspal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ swp b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swp b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swp b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swp b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpa b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpa b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpa b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpa b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swplb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swplb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swplb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swplb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swpalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swpalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swph b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swph b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ swph b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swph b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ swp v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swp v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swplb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swplb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swpalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swpalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ swph v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: swph v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldadd b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadd b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldadd b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadd b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldadd h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadd h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldadd h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadd h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldadd v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadd v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldadda b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadda b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldadda b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadda b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldadda h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadda h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldadda h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadda h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldadda v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldadda v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddal h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddal h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldaddalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldaddalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldaddalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldaddalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclr b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclr b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclr b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclr b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclr h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclr h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclr h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclr h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclr v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclr v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclra b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclra b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclra h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclra h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclra v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclra b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclra b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclra h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclra h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclra v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclra v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclral b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclral b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclral b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclral b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclral h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclral h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclral h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclral h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclral v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclral v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclralb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclralb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclralb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclralb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclralb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclrlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclrlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclrlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclrlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldclralh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclralh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclralh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldclralh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldclralh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldclralh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeor b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeor b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeor b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeor b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeor h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeor h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeor h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeor h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeor v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeor v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeora b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeora b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeora b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeora b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeora h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeora h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeora h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeora h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeora v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeora v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeoral b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoral b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeoral b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoral b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeoral h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoral h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeoral h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoral h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeoral v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoral v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeoralb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeoralb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeoralb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeoralb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeoralb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeorlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeorlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeorlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeorlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldeoralh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeoralh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeoralh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldeoralh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldeoralh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldeoralh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldset b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldset b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldset b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldset b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldset h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldset h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldset h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldset h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldset v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldset v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldseta b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseta b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldseta b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseta b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldseta h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseta h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldseta h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseta h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldseta v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseta v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetal h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetal h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldseth b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseth b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldseth b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseth b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldseth h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseth h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldseth h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseth h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldseth v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldseth v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsetalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsetalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsetalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsetalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmax b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmax b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmax b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmax b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmax h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmax h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmax h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmax h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmax v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmax v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxa b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxa b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxa b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxa b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxa h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxa h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxa h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxa h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxal h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxal h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmaxalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmaxalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmaxalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmaxalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmin b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmin b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmin b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmin b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmin h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmin h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmin h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmin h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmin v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmin v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsmina b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmina b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmina b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmina b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmina h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmina h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsmina h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmina h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsmina v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsmina v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminal h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminal h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldsminalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldsminalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldsminalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldsminalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumax b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumax b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumax b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumax b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumax h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumax h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumax h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumax h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumax v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumax v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxa b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxa b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxa b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxa b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxa h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxa h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxa h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxa h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxa v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxal h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxal h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumaxalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumaxalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumaxalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumaxalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumin b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumin b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumin b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumin b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumin h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumin h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumin h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumin h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumin v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumin v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ ldumina b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumina b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumina b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumina b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumina h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumina h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ ldumina h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumina h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ ldumina v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: ldumina v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminl b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminl b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminl b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminl b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminl h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminl h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminl h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminl h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminl v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminal b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminal b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminal b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminal b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminal h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminal h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminal h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminal h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminal v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminab b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminab b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminab b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminab b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminab h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminab h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminab h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminab h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminab v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminlb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminlb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminlb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminlb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminalb b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalb b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminalb b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalb b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminalb h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalb h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminalb h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalb h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalb v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminah b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminah b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminah b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminah b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminah h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminah h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminah h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminah h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminah v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminlh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminlh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminlh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminlh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminlh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ lduminalh b0, b1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalh b0, b1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminalh b2, b3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalh b2, b3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminalh h0, h1, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalh h0, h1, [x2]
+ // CHECK-ERROR: ^
+
+ lduminalh h2, h3, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalh h2, h3, [sp]
+ // CHECK-ERROR: ^
+
+ lduminalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: lduminalh v0.4h, v1.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stadd b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd b0, [x2]
+ // CHECK-ERROR: ^
+
+ stadd b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd b2, [sp]
+ // CHECK-ERROR: ^
+
+ stadd h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd h0, [x2]
+ // CHECK-ERROR: ^
+
+ stadd h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd h2, [sp]
+ // CHECK-ERROR: ^
+
+ stadd v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ staddl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl b0, [x2]
+ // CHECK-ERROR: ^
+
+ staddl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl b2, [sp]
+ // CHECK-ERROR: ^
+
+ staddl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl h0, [x2]
+ // CHECK-ERROR: ^
+
+ staddl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl h2, [sp]
+ // CHECK-ERROR: ^
+
+ staddl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ staddb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddb b0, [x2]
+ // CHECK-ERROR: ^
+
+ staddb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddb b2, [sp]
+ // CHECK-ERROR: ^
+
+ staddb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddb h0, [x2]
+ // CHECK-ERROR: ^
+
+ staddb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddb h2, [sp]
+ // CHECK-ERROR: ^
+
+ staddb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ staddh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddh b0, [x2]
+ // CHECK-ERROR: ^
+
+ staddh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddh b2, [sp]
+ // CHECK-ERROR: ^
+
+ staddh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddh h0, [x2]
+ // CHECK-ERROR: ^
+
+ staddh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddh h2, [sp]
+ // CHECK-ERROR: ^
+
+ staddh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ staddlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ staddlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ staddlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ staddlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ staddlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ staddlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ staddlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ staddlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ staddlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ staddlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stadd b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd b0, [x2]
+ // CHECK-ERROR: ^
+
+ stadd b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd b2, [sp]
+ // CHECK-ERROR: ^
+
+ stadd h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd h0, [x2]
+ // CHECK-ERROR: ^
+
+ stadd h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd h2, [sp]
+ // CHECK-ERROR: ^
+
+ stadd v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stadd v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ staddl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl b0, [x2]
+ // CHECK-ERROR: ^
+
+ staddl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl b2, [sp]
+ // CHECK-ERROR: ^
+
+ staddl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl h0, [x2]
+ // CHECK-ERROR: ^
+
+ staddl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl h2, [sp]
+ // CHECK-ERROR: ^
+
+ staddl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: staddl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stclr b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclr b0, [x2]
+ // CHECK-ERROR: ^
+
+ stclr b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclr b2, [sp]
+ // CHECK-ERROR: ^
+
+ stclr h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclr h0, [x2]
+ // CHECK-ERROR: ^
+
+ stclr h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclr h2, [sp]
+ // CHECK-ERROR: ^
+
+ stclr v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclr v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stclrl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrl b0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrl b2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrl h0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrl h2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stclrb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stclrh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stclrlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stclrlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stclrlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stclrlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stclrlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ steor b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steor b0, [x2]
+ // CHECK-ERROR: ^
+
+ steor b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steor b2, [sp]
+ // CHECK-ERROR: ^
+
+ steor h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steor h0, [x2]
+ // CHECK-ERROR: ^
+
+ steor h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steor h2, [sp]
+ // CHECK-ERROR: ^
+
+ steor v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steor v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ steorl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorl b0, [x2]
+ // CHECK-ERROR: ^
+
+ steorl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorl b2, [sp]
+ // CHECK-ERROR: ^
+
+ steorl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorl h0, [x2]
+ // CHECK-ERROR: ^
+
+ steorl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorl h2, [sp]
+ // CHECK-ERROR: ^
+
+ steorl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ steorb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorb b0, [x2]
+ // CHECK-ERROR: ^
+
+ steorb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorb b2, [sp]
+ // CHECK-ERROR: ^
+
+ steorb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorb h0, [x2]
+ // CHECK-ERROR: ^
+
+ steorb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorb h2, [sp]
+ // CHECK-ERROR: ^
+
+ steorb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ steorh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorh b0, [x2]
+ // CHECK-ERROR: ^
+
+ steorh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorh b2, [sp]
+ // CHECK-ERROR: ^
+
+ steorh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorh h0, [x2]
+ // CHECK-ERROR: ^
+
+ steorh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorh h2, [sp]
+ // CHECK-ERROR: ^
+
+ steorh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ steorlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ steorlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ steorlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ steorlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ steorlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ steorlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ steorlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ steorlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ steorlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ steorlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: steorlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stset b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stset b0, [x2]
+ // CHECK-ERROR: ^
+
+ stset b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stset b2, [sp]
+ // CHECK-ERROR: ^
+
+ stset h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stset h0, [x2]
+ // CHECK-ERROR: ^
+
+ stset h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stset h2, [sp]
+ // CHECK-ERROR: ^
+
+ stset v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stset v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsetl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetl b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetl b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetl h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetl h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsetb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stseth b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stseth b0, [x2]
+ // CHECK-ERROR: ^
+
+ stseth b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stseth b2, [sp]
+ // CHECK-ERROR: ^
+
+ stseth h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stseth h0, [x2]
+ // CHECK-ERROR: ^
+
+ stseth h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stseth h2, [sp]
+ // CHECK-ERROR: ^
+
+ stseth v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stseth v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsetlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsetlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsetlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsetlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsetlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmax b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmax b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmax b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmax b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmax h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmax h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmax h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmax h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmax v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmax v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmaxl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxl b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxl b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxl h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxl h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmaxb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmaxh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmaxlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmaxlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmaxlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmaxlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmaxlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsmin b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmin b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmin b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmin b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmin h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmin h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsmin h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmin h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsmin v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsmin v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsminl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminl b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminl b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminl h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminl h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsminb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsminh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsminlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stsminlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stsminlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stsminlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stsminlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumax b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumax b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumax b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumax b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumax h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumax h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumax h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumax h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumax v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumax v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumaxl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxl b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxl b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxl h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxl h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumaxb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumaxh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumaxlb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxlb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxlb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxlb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxlb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumaxlh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxlh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxlh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumaxlh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumaxlh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumaxlh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stumin b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumin b0, [x2]
+ // CHECK-ERROR: ^
+
+ stumin b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumin b2, [sp]
+ // CHECK-ERROR: ^
+
+ stumin h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumin h0, [x2]
+ // CHECK-ERROR: ^
+
+ stumin h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumin h2, [sp]
+ // CHECK-ERROR: ^
+
+ stumin v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stumin v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stuminl b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminl b0, [x2]
+ // CHECK-ERROR: ^
+
+ stuminl b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminl b2, [sp]
+ // CHECK-ERROR: ^
+
+ stuminl h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminl h0, [x2]
+ // CHECK-ERROR: ^
+
+ stuminl h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminl h2, [sp]
+ // CHECK-ERROR: ^
+
+ stuminl v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminl v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stuminb b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminb b0, [x2]
+ // CHECK-ERROR: ^
+
+ stuminb b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminb b2, [sp]
+ // CHECK-ERROR: ^
+
+ stuminb h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminb h0, [x2]
+ // CHECK-ERROR: ^
+
+ stuminb h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminb h2, [sp]
+ // CHECK-ERROR: ^
+
+ stuminb v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminb v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
+ stuminh b0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminh b0, [x2]
+ // CHECK-ERROR: ^
+
+ stuminh b2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminh b2, [sp]
+ // CHECK-ERROR: ^
+
+ stuminh h0, [x2]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminh h0, [x2]
+ // CHECK-ERROR: ^
+
+ stuminh h2, [sp]
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminh h2, [sp]
+ // CHECK-ERROR: ^
+
+ stuminh v0.4h, v2.4h
+ // CHECK-ERROR: error: invalid operand for instruction
+ // CHECK-ERROR: stuminh v0.4h, v2.4h
+ // CHECK-ERROR: ^
+
diff --git a/test/MC/AArch64/error-location-post-layout.s b/test/MC/AArch64/error-location-post-layout.s
index 8b41c6780b36..ac176ca9362f 100644
--- a/test/MC/AArch64/error-location-post-layout.s
+++ b/test/MC/AArch64/error-location-post-layout.s
@@ -1,14 +1,11 @@
// RUN: not llvm-mc -triple aarch64--none-eabi -filetype obj < %s -o /dev/null 2>&1 | FileCheck %s
-// Note: These errors are not always emitted in the order in which the relevant
-// source appears, this file is carefully ordered so that that is the case.
-
-// CHECK: <unknown>:0: error: expression could not be evaluated
.set v1, -undef
+// CHECK: 3:12: error: expression could not be evaluated
.comm common, 4
-// CHECK: <unknown>:0: error: Common symbol 'common' cannot be used in assignment expr
.set v3, common
+// CHECK: 7:12: error: Common symbol 'common' cannot be used in assignment expr
-// CHECK: <unknown>:0: error: symbol 'undef' could not be evaluated in a subtraction expression
.set v2, a-undef
+// CHECK: 10:13: error: symbol 'undef' could not be evaluated in a subtraction expression
diff --git a/test/MC/AArch64/label-arithmetic-diags-elf.s b/test/MC/AArch64/label-arithmetic-diags-elf.s
index 6e928bdf094c..e9d92d591fac 100644
--- a/test/MC/AArch64/label-arithmetic-diags-elf.s
+++ b/test/MC/AArch64/label-arithmetic-diags-elf.s
@@ -63,9 +63,9 @@ end_across_sec:
add w0, w1, #(sec_y - sec_x)
cmp w0, #(sec_y - sec_x)
- // CHECK: error: symbol 'sec_x' can not be undefined in a subtraction expression
+ // CHECK: error: Cannot represent a difference across sections
// CHECK-NEXT: add w0, w1, #(sec_y - sec_x)
// CHECK-NEXT: ^
- // CHECK: error: symbol 'sec_x' can not be undefined in a subtraction expression
+ // CHECK: error: Cannot represent a difference across sections
// CHECK-NEXT: cmp w0, #(sec_y - sec_x)
// CHECK-NEXT: ^
diff --git a/test/MC/AArch64/neon-add-sub-instructions.s b/test/MC/AArch64/neon-add-sub-instructions.s
index 0d8416537022..b95ea6af0181 100644
--- a/test/MC/AArch64/neon-add-sub-instructions.s
+++ b/test/MC/AArch64/neon-add-sub-instructions.s
@@ -67,7 +67,7 @@
fsub v0.2d, v1.2d, v2.2d
// CHECK: fsub v0.4h, v1.4h, v2.4h // encoding: [0x20,0x14,0xc2,0x0e]
-// CHECK; fsub v0.8h, v1.8h, v2.8h // encoding: [0x20,0x14,0xc2,0x4e]
+// CHECK: fsub v0.8h, v1.8h, v2.8h // encoding: [0x20,0x14,0xc2,0x4e]
// CHECK: fsub v0.2s, v1.2s, v2.2s // encoding: [0x20,0xd4,0xa2,0x0e]
// CHECK: fsub v0.4s, v1.4s, v2.4s // encoding: [0x20,0xd4,0xa2,0x4e]
// CHECK: fsub v0.2d, v1.2d, v2.2d // encoding: [0x20,0xd4,0xe2,0x4e]
diff --git a/test/MC/AArch64/neon-diagnostics.s b/test/MC/AArch64/neon-diagnostics.s
index a51243dfb344..1172903b0342 100644
--- a/test/MC/AArch64/neon-diagnostics.s
+++ b/test/MC/AArch64/neon-diagnostics.s
@@ -81,7 +81,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: and v0.8b, v1.16b, v2.8b
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: orr v0.4h, v1.4h, v2.4h
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -152,10 +152,10 @@
// invalid vector type (2s, 4s, 4h, 8h)
movi v5.8b, #1, lsl #8
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: movi v0.2s, #-1
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: mvni v1.4s, #256
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -183,10 +183,10 @@
// invalid vector type (2s, 4s)
movi v5.4h, #31, msl #8
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: movi v0.2s, #-1, msl #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: mvni v7.4s, #256, msl #16
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -206,10 +206,10 @@
movi v0.8b, #-1
movi v1.16b, #256
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: movi v0.8b, #-1
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: error: immediate must be an integer in range [0, 255]
// CHECK-ERROR: movi v1.16b, #256
// CHECK-ERROR: ^
diff --git a/test/MC/AArch64/nofp-crypto-diagnostic.s b/test/MC/AArch64/nofp-crypto-diagnostic.s
new file mode 100644
index 000000000000..36da8a83128d
--- /dev/null
+++ b/test/MC/AArch64/nofp-crypto-diagnostic.s
@@ -0,0 +1,8 @@
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon,+crypto,-fp-armv8 < %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ERROR < %t %s
+
+ sha1h s0, s1
+
+// CHECK-ERROR: error: instruction requires: crypto
+// CHECK-ERROR-NEXT: sha1h s0, s1
+// CHECK-ERROR-NEXT: ^
diff --git a/test/MC/AMDGPU/code-object-metadata-kernel-args.s b/test/MC/AMDGPU/code-object-metadata-kernel-args.s
new file mode 100644
index 000000000000..90915e61f99a
--- /dev/null
+++ b/test/MC/AMDGPU/code-object-metadata-kernel-args.s
@@ -0,0 +1,68 @@
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx700 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX700 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX800 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX900 %s
+
+// CHECK: .amdgpu_code_object_metadata
+// CHECK: Version: [ 1, 0 ]
+// CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+// CHECK: Kernels:
+// CHECK: - Name: test_kernel
+// CHECK: Language: OpenCL C
+// CHECK: LanguageVersion: [ 2, 0 ]
+// CHECK: Args:
+// CHECK: - Size: 1
+// CHECK: Align: 1
+// CHECK: ValueKind: ByValue
+// CHECK: ValueType: I8
+// CHECK: AccQual: Default
+// CHECK: TypeName: char
+// CHECK: - Size: 8
+// CHECK: Align: 8
+// CHECK: ValueKind: HiddenGlobalOffsetX
+// CHECK: ValueType: I64
+// CHECK: - Size: 8
+// CHECK: Align: 8
+// CHECK: ValueKind: HiddenGlobalOffsetY
+// CHECK: ValueType: I64
+// CHECK: - Size: 8
+// CHECK: Align: 8
+// CHECK: ValueKind: HiddenGlobalOffsetZ
+// CHECK: ValueType: I64
+// CHECK: - Size: 8
+// CHECK: Align: 8
+// CHECK: ValueKind: HiddenPrintfBuffer
+// CHECK: ValueType: I8
+// CHECK: AddrSpaceQual: Global
+// CHECK: .end_amdgpu_code_object_metadata
+.amdgpu_code_object_metadata
+ Version: [ 1, 0 ]
+ Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+ Kernels:
+ - Name: test_kernel
+ Language: OpenCL C
+ LanguageVersion: [ 2, 0 ]
+ Args:
+ - Size: 1
+ Align: 1
+ ValueKind: ByValue
+ ValueType: I8
+ AccQual: Default
+ TypeName: char
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenGlobalOffsetX
+ ValueType: I64
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenGlobalOffsetY
+ ValueType: I64
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenGlobalOffsetZ
+ ValueType: I64
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenPrintfBuffer
+ ValueType: I8
+ AddrSpaceQual: Global
+.end_amdgpu_code_object_metadata
diff --git a/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s b/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s
new file mode 100644
index 000000000000..9669fcf53939
--- /dev/null
+++ b/test/MC/AMDGPU/code-object-metadata-kernel-attrs.s
@@ -0,0 +1,28 @@
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx700 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX700 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX800 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX900 %s
+
+// CHECK: .amdgpu_code_object_metadata
+// CHECK: Version: [ 1, 0 ]
+// CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+// CHECK: Kernels:
+// CHECK: - Name: test_kernel
+// CHECK: Language: OpenCL C
+// CHECK: LanguageVersion: [ 2, 0 ]
+// CHECK: Attrs:
+// CHECK: ReqdWorkGroupSize: [ 1, 2, 4 ]
+// CHECK: WorkGroupSizeHint: [ 8, 16, 32 ]
+// CHECK: VecTypeHint: int
+// CHECK: .end_amdgpu_code_object_metadata
+.amdgpu_code_object_metadata
+ Version: [ 1, 0 ]
+ Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+ Kernels:
+ - Name: test_kernel
+ Language: OpenCL C
+ LanguageVersion: [ 2, 0 ]
+ Attrs:
+ ReqdWorkGroupSize: [ 1, 2, 4 ]
+ WorkGroupSizeHint: [ 8, 16, 32 ]
+ VecTypeHint: int
+.end_amdgpu_code_object_metadata
diff --git a/test/MC/AMDGPU/code-object-metadata-kernel-code-props.s b/test/MC/AMDGPU/code-object-metadata-kernel-code-props.s
new file mode 100644
index 000000000000..da4c8c1028d7
--- /dev/null
+++ b/test/MC/AMDGPU/code-object-metadata-kernel-code-props.s
@@ -0,0 +1,24 @@
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx700 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX700 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX800 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX900 %s
+
+// CHECK: .amdgpu_code_object_metadata
+// CHECK: Version: [ 1, 0 ]
+// CHECK: Kernels:
+// CHECK: - Name: test_kernel
+// CHECK: CodeProps:
+// CHECK: KernargSegmentSize: 24
+// CHECK: WorkitemPrivateSegmentSize: 16
+// CHECK: WavefrontNumSGPRs: 6
+// CHECK: WorkitemNumVGPRs: 12
+.amdgpu_code_object_metadata
+ Version: [ 1, 0 ]
+ Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+ Kernels:
+ - Name: test_kernel
+ CodeProps:
+ KernargSegmentSize: 24
+ WorkitemPrivateSegmentSize: 16
+ WavefrontNumSGPRs: 6
+ WorkitemNumVGPRs: 12
+.end_amdgpu_code_object_metadata
diff --git a/test/MC/AMDGPU/code-object-metadata-kernel-debug-props.s b/test/MC/AMDGPU/code-object-metadata-kernel-debug-props.s
new file mode 100644
index 000000000000..4153737bf33a
--- /dev/null
+++ b/test/MC/AMDGPU/code-object-metadata-kernel-debug-props.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx700 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX700 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX800 %s
+// RUN: llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -show-encoding %s | FileCheck --check-prefix=CHECK --check-prefix=GFX900 %s
+
+// CHECK: .amdgpu_code_object_metadata
+// CHECK: Version: [ 1, 0 ]
+// CHECK: Kernels:
+// CHECK: - Name: test_kernel
+// CHECK: DebugProps:
+// CHECK: DebuggerABIVersion: [ 1, 0 ]
+// CHECK: ReservedNumVGPRs: 4
+// CHECK: ReservedFirstVGPR: 11
+// CHECK: PrivateSegmentBufferSGPR: 0
+// CHECK: WavefrontPrivateSegmentOffsetSGPR: 11
+.amdgpu_code_object_metadata
+ Version: [ 1, 0 ]
+ Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+ Kernels:
+ - Name: test_kernel
+ DebugProps:
+ DebuggerABIVersion: [ 1, 0 ]
+ ReservedNumVGPRs: 4
+ ReservedFirstVGPR: 11
+ PrivateSegmentBufferSGPR: 0
+ WavefrontPrivateSegmentOffsetSGPR: 11
+.end_amdgpu_code_object_metadata \ No newline at end of file
diff --git a/test/MC/AMDGPU/code-object-metadata-unknown-key.s b/test/MC/AMDGPU/code-object-metadata-unknown-key.s
new file mode 100644
index 000000000000..9add19f6e55c
--- /dev/null
+++ b/test/MC/AMDGPU/code-object-metadata-unknown-key.s
@@ -0,0 +1,41 @@
+// RUN: not llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx700 %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx800 %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -filetype=obj %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj %s 2>&1 | FileCheck %s
+
+// CHECK: error: unknown key 'UnknownKey'
+.amdgpu_code_object_metadata
+ UnknownKey: [ 2, 0 ]
+ Version: [ 1, 0 ]
+ Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+ Kernels:
+ - Name: test_kernel
+ Language: OpenCL C
+ LanguageVersion: [ 2, 0 ]
+ Args:
+ - Size: 1
+ Align: 1
+ ValueKind: ByValue
+ ValueType: I8
+ AccQual: Default
+ TypeName: char
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenGlobalOffsetX
+ ValueType: I64
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenGlobalOffsetY
+ ValueType: I64
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenGlobalOffsetZ
+ ValueType: I64
+ - Size: 8
+ Align: 8
+ ValueKind: HiddenPrintfBuffer
+ ValueType: I8
+ AddrSpaceQual: Global
+.end_amdgpu_code_object_metadata
diff --git a/test/MC/AMDGPU/ds.s b/test/MC/AMDGPU/ds.s
index 4b68a823dd22..18e4957e32d7 100644
--- a/test/MC/AMDGPU/ds.s
+++ b/test/MC/AMDGPU/ds.s
@@ -19,13 +19,13 @@ ds_add_u32 v2, v4 offset:16
// Checks for 2 8-bit Offsets
//===----------------------------------------------------------------------===//
-ds_write_src2_b32 v2 offset0:4 offset1:8
-// SICI: ds_write_src2_b32 v2 offset0:4 offset1:8 ; encoding: [0x04,0x08,0x34,0xda,0x02,0x00,0x00,0x00]
-// VI: ds_write_src2_b32 v2 offset0:4 offset1:8 ; encoding: [0x04,0x08,0x1a,0xd9,0x02,0x00,0x00,0x00]
+ds_write_src2_b32 v2 offset:2052
+// SICI: ds_write_src2_b32 v2 offset:2052 ; encoding: [0x04,0x08,0x34,0xda,0x02,0x00,0x00,0x00]
+// VI: ds_write_src2_b32 v2 offset:2052 ; encoding: [0x04,0x08,0x1a,0xd9,0x02,0x00,0x00,0x00]
-ds_write_src2_b64 v2 offset0:4 offset1:8
-// SICI: ds_write_src2_b64 v2 offset0:4 offset1:8 ; encoding: [0x04,0x08,0x34,0xdb,0x02,0x00,0x00,0x00]
-// VI: ds_write_src2_b64 v2 offset0:4 offset1:8 ; encoding: [0x04,0x08,0x9a,0xd9,0x02,0x00,0x00,0x00]
+ds_write_src2_b64 v2 offset:2052
+// SICI: ds_write_src2_b64 v2 offset:2052 ; encoding: [0x04,0x08,0x34,0xdb,0x02,0x00,0x00,0x00]
+// VI: ds_write_src2_b64 v2 offset:2052 ; encoding: [0x04,0x08,0x9a,0xd9,0x02,0x00,0x00,0x00]
ds_write2_b32 v2, v4, v6 offset0:4
// SICI: ds_write2_b32 v2, v4, v6 offset0:4 ; encoding: [0x04,0x00,0x38,0xd8,0x02,0x04,0x06,0x00]
@@ -140,24 +140,32 @@ ds_max_f32 v2, v4
// VI: ds_max_f32 v2, v4 ; encoding: [0x00,0x00,0x26,0xd8,0x02,0x04,0x00,0x00]
ds_gws_init v2 gds
-// SICI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x66,0xd8,0x02,0x00,0x00,0x00]
-// VI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x33,0xd8,0x02,0x00,0x00,0x00]
+// SICI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x66,0xd8,0x00,0x02,0x00,0x00]
+// VI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x33,0xd9,0x00,0x02,0x00,0x00]
-ds_gws_sema_v v2 gds
-// SICI: ds_gws_sema_v v2 gds ; encoding: [0x00,0x00,0x6a,0xd8,0x02,0x00,0x00,0x00]
-// VI: ds_gws_sema_v v2 gds ; encoding: [0x00,0x00,0x35,0xd8,0x02,0x00,0x00,0x00]
+ds_gws_init v3 offset:12345 gds
+// SICI: ds_gws_init v3 offset:12345 gds ; encoding: [0x39,0x30,0x66,0xd8,0x00,0x03,0x00,0x00]
+// VI: ds_gws_init v3 offset:12345 gds ; encoding: [0x39,0x30,0x33,0xd9,0x00,0x03,0x00,0x00]
+
+ds_gws_sema_v gds
+// SICI: ds_gws_sema_v gds ; encoding: [0x00,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00]
+// VI: ds_gws_sema_v gds ; encoding: [0x00,0x00,0x35,0xd9,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_v offset:257 gds
+// SICI: ds_gws_sema_v offset:257 gds ; encoding: [0x01,0x01,0x6a,0xd8,0x00,0x00,0x00,0x00]
+// VI: ds_gws_sema_v offset:257 gds ; encoding: [0x01,0x01,0x35,0xd9,0x00,0x00,0x00,0x00]
ds_gws_sema_br v2 gds
-// SICI: ds_gws_sema_br v2 gds ; encoding: [0x00,0x00,0x6e,0xd8,0x02,0x00,0x00,0x00]
-// VI: ds_gws_sema_br v2 gds ; encoding: [0x00,0x00,0x37,0xd8,0x02,0x00,0x00,0x00]
+// SICI: ds_gws_sema_br v2 gds ; encoding: [0x00,0x00,0x6e,0xd8,0x00,0x02,0x00,0x00]
+// VI: ds_gws_sema_br v2 gds ; encoding: [0x00,0x00,0x37,0xd9,0x00,0x02,0x00,0x00]
-ds_gws_sema_p v2 gds
-// SICI: ds_gws_sema_p v2 gds ; encoding: [0x00,0x00,0x72,0xd8,0x02,0x00,0x00,0x00]
-// VI: ds_gws_sema_p v2 gds ; encoding: [0x00,0x00,0x39,0xd8,0x02,0x00,0x00,0x00]
+ds_gws_sema_p gds
+// SICI: ds_gws_sema_p gds ; encoding: [0x00,0x00,0x72,0xd8,0x00,0x00,0x00,0x00]
+// VI: ds_gws_sema_p gds ; encoding: [0x00,0x00,0x39,0xd9,0x00,0x00,0x00,0x00]
ds_gws_barrier v2 gds
-// SICI: ds_gws_barrier v2 gds ; encoding: [0x00,0x00,0x76,0xd8,0x02,0x00,0x00,0x00]
-// VI: ds_gws_barrier v2 gds ; encoding: [0x00,0x00,0x3b,0xd8,0x02,0x00,0x00,0x00]
+// SICI: ds_gws_barrier v2 gds ; encoding: [0x00,0x00,0x76,0xd8,0x00,0x02,0x00,0x00]
+// VI: ds_gws_barrier v2 gds ; encoding: [0x00,0x00,0x3b,0xd9,0x00,0x02,0x00,0x00]
ds_write_b8 v2, v4
// SICI: ds_write_b8 v2, v4 ; encoding: [0x00,0x00,0x78,0xd8,0x02,0x04,0x00,0x00]
@@ -231,10 +239,18 @@ ds_wrxchg2_rtn_b32 v[8:9], v2, v4, v6
// SICI: ds_wrxchg2_rtn_b32 v[8:9], v2, v4, v6 ; encoding: [0x00,0x00,0xb8,0xd8,0x02,0x04,0x06,0x08]
// VI: ds_wrxchg2_rtn_b32 v[8:9], v2, v4, v6 ; encoding: [0x00,0x00,0x5c,0xd8,0x02,0x04,0x06,0x08]
+ds_wrxchg2_rtn_b32 v[0:1], v0, v0, v0 offset0:127 offset1:255
+// SICI: ds_wrxchg2_rtn_b32 v[0:1], v0, v0, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x00,0x00,0x00,0x00]
+// VI: ds_wrxchg2_rtn_b32 v[0:1], v0, v0, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x5c,0xd8,0x00,0x00,0x00,0x00]
+
ds_wrxchg2st64_rtn_b32 v[8:9] v2, v4, v6
// SICI: ds_wrxchg2st64_rtn_b32 v[8:9], v2, v4, v6 ; encoding: [0x00,0x00,0xbc,0xd8,0x02,0x04,0x06,0x08]
// VI: ds_wrxchg2st64_rtn_b32 v[8:9], v2, v4, v6 ; encoding: [0x00,0x00,0x5e,0xd8,0x02,0x04,0x06,0x08]
+ds_wrxchg2st64_rtn_b32 v[0:1], v0, v255, v0 offset0:127 offset1:255
+// SICI: ds_wrxchg2st64_rtn_b32 v[0:1], v0, v255, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x00,0xff,0x00,0x00]
+// VI: ds_wrxchg2st64_rtn_b32 v[0:1], v0, v255, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x5e,0xd8,0x00,0xff,0x00,0x00]
+
ds_cmpst_rtn_b32 v8, v2, v4, v6
// SICI: ds_cmpst_rtn_b32 v8, v2, v4, v6 ; encoding: [0x00,0x00,0xc0,0xd8,0x02,0x04,0x06,0x08]
// VI: ds_cmpst_rtn_b32 v8, v2, v4, v6 ; encoding: [0x00,0x00,0x60,0xd8,0x02,0x04,0x06,0x08]
@@ -284,17 +300,17 @@ ds_read_u16 v8, v2
// VI: ds_read_u16 v8, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x02,0x00,0x00,0x08]
-//ds_consume v8
-// FIXMESICI: ds_consume v8 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x08]
-// FIXMEVI: ds_consume v8 ; encoding: [0x00,0x00,0x7a,0xd8,0x00,0x00,0x00,0x08]
+ds_consume v8
+// SICI: ds_consume v8 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x08]
+// VI: ds_consume v8 ; encoding: [0x00,0x00,0x7a,0xd9,0x00,0x00,0x00,0x08]
-//ds_append v8
-// FIXMESICI: ds_append v8 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x08]
-// FIXMEVI: ds_append v8 ; encoding: [0x00,0x00,0x7c,0xd8,0x00,0x00,0x00,0x08]
+ds_append v8
+// SICI: ds_append v8 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x08]
+// VI: ds_append v8 ; encoding: [0x00,0x00,0x7c,0xd9,0x00,0x00,0x00,0x08]
-//ds_ordered_count v8, v2 gds
-// FIXMESICI: ds_ordered_count v8, v2 gds ; encoding: [0x00,0x00,0xfe,0xd8,0x02,0x00,0x00,0x08]
-// FIXMEVI: ds_ordered_count v8, v2 gds ; encoding: [0x00,0x00,0x7f,0xd8,0x02,0x00,0x00,0x08]
+ds_ordered_count v8, v2 gds
+// SICI: ds_ordered_count v8, v2 gds ; encoding: [0x00,0x00,0xfe,0xd8,0x02,0x00,0x00,0x08]
+// VI: ds_ordered_count v8, v2 gds ; encoding: [0x00,0x00,0x7f,0xd9,0x02,0x00,0x00,0x08]
ds_add_u64 v2, v[4:5]
// SICI: ds_add_u64 v2, v[4:5] ; encoding: [0x00,0x00,0x00,0xd9,0x02,0x04,0x00,0x00]
@@ -436,10 +452,18 @@ ds_wrxchg2_rtn_b64 v[8:11], v2, v[4:5], v[6:7]
// SICI: ds_wrxchg2_rtn_b64 v[8:11], v2, v[4:5], v[6:7] ; encoding: [0x00,0x00,0xb8,0xd9,0x02,0x04,0x06,0x08]
// VI: ds_wrxchg2_rtn_b64 v[8:11], v2, v[4:5], v[6:7] ; encoding: [0x00,0x00,0xdc,0xd8,0x02,0x04,0x06,0x08]
+ds_wrxchg2_rtn_b64 v[0:3], v0, v[1:2], v[0:1] offset0:127 offset1:255
+// SICI: ds_wrxchg2_rtn_b64 v[0:3], v0, v[1:2], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x00,0x01,0x00,0x00]
+// VI: ds_wrxchg2_rtn_b64 v[0:3], v0, v[1:2], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x00,0x01,0x00,0x00]
+
ds_wrxchg2st64_rtn_b64 v[8:11], v2, v[4:5], v[6:7]
// SICI: ds_wrxchg2st64_rtn_b64 v[8:11], v2, v[4:5], v[6:7] ; encoding: [0x00,0x00,0xbc,0xd9,0x02,0x04,0x06,0x08]
// VI: ds_wrxchg2st64_rtn_b64 v[8:11], v2, v[4:5], v[6:7] ; encoding: [0x00,0x00,0xde,0xd8,0x02,0x04,0x06,0x08]
+ds_wrxchg2st64_rtn_b64 v[0:3], v255, v[0:1], v[0:1] offset0:127 offset1:255
+// SICI: ds_wrxchg2st64_rtn_b64 v[0:3], v255, v[0:1], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0xff,0x00,0x00,0x00]
+// VI: ds_wrxchg2st64_rtn_b64 v[0:3], v255, v[0:1], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xde,0xd8,0xff,0x00,0x00,0x00]
+
ds_cmpst_rtn_b64 v[8:9], v2, v[4:5], v[6:7]
// SICI: ds_cmpst_rtn_b64 v[8:9], v2, v[4:5], v[6:7] ; encoding: [0x00,0x00,0xc0,0xd9,0x02,0x04,0x06,0x08]
// VI: ds_cmpst_rtn_b64 v[8:9], v2, v[4:5], v[6:7] ; encoding: [0x00,0x00,0xe0,0xd8,0x02,0x04,0x06,0x08]
@@ -468,3 +492,17 @@ ds_read2st64_b64 v[8:11], v2
// SICI: ds_read2st64_b64 v[8:11], v2 ; encoding: [0x00,0x00,0xe0,0xd9,0x02,0x00,0x00,0x08]
// VI: ds_read2st64_b64 v[8:11], v2 ; encoding: [0x00,0x00,0xf0,0xd8,0x02,0x00,0x00,0x08]
+ds_read_b128 v[8:11], v2
+// NOSI: error: instruction not supported on this GPU
+// CI: ds_read_b128 v[8:11], v2 ; encoding: [0x00,0x00,0xfc,0xdb,0x02,0x00,0x00,0x08]
+// VI: ds_read_b128 v[8:11], v2 ; encoding: [0x00,0x00,0xfe,0xd9,0x02,0x00,0x00,0x08]
+
+ds_write_b128 v2, v[4:7]
+// NOSI: error: instruction not supported on this GPU
+// CI: ds_write_b128 v2, v[4:7] ; encoding: [0x00,0x00,0x7c,0xdb,0x02,0x04,0x00,0x00]
+// VI: ds_write_b128 v2, v[4:7] ; encoding: [0x00,0x00,0xbe,0xd9,0x02,0x04,0x00,0x00]
+
+ds_nop
+// NOSI: error: instruction not supported on this GPU
+// CI: ds_nop ; encoding: [0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+// VI: ds_nop ; encoding: [0x00,0x00,0x28,0xd8,0x00,0x00,0x00,0x00]
diff --git a/test/MC/AMDGPU/exp.s b/test/MC/AMDGPU/exp.s
index 4dc379987292..710a777ab217 100644
--- a/test/MC/AMDGPU/exp.s
+++ b/test/MC/AMDGPU/exp.s
@@ -1,86 +1,114 @@
-// RUN: llvm-mc -arch=amdgcn -show-encoding %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+// RUN: llvm-mc -arch=amdgcn -show-encoding %s | FileCheck -check-prefix=SI %s
+// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck -check-prefix=VI %s
exp mrt0 off, off, off, off
-// GCN: exp mrt0 off, off, off, off ; encoding: [0x00,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+// SI: exp mrt0 off, off, off, off ; encoding: [0x00,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+// VI: exp mrt0 off, off, off, off ; encoding: [0x00,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
exp mrt0 off, off, off, off done
-// GCN: exp mrt0 off, off, off, off done ; encoding: [0x00,0x08,0x00,0xf8,0x00,0x00,0x00,0x00]
+// SI: exp mrt0 off, off, off, off done ; encoding: [0x00,0x08,0x00,0xf8,0x00,0x00,0x00,0x00]
+// VI: exp mrt0 off, off, off, off done ; encoding: [0x00,0x08,0x00,0xc4,0x00,0x00,0x00,0x00]
exp mrt0 v4, off, off, off done
-// GCN: exp mrt0 v4, off, off, off done ; encoding: [0x01,0x08,0x00,0xf8,0x04,0x00,0x00,0x00]
+// SI: exp mrt0 v4, off, off, off done ; encoding: [0x01,0x08,0x00,0xf8,0x04,0x00,0x00,0x00]
+// VI: exp mrt0 v4, off, off, off done ; encoding: [0x01,0x08,0x00,0xc4,0x04,0x00,0x00,0x00]
exp mrt0 off, v3, off, off done
-// GCN: exp mrt0 off, v3, off, off done ; encoding: [0x02,0x08,0x00,0xf8,0x00,0x03,0x00,0x00]
+// SI: exp mrt0 off, v3, off, off done ; encoding: [0x02,0x08,0x00,0xf8,0x00,0x03,0x00,0x00]
+// VI: exp mrt0 off, v3, off, off done ; encoding: [0x02,0x08,0x00,0xc4,0x00,0x03,0x00,0x00]
exp mrt0 off, off, v2, off done
-// GCN: exp mrt0 off, off, v2, off done ; encoding: [0x04,0x08,0x00,0xf8,0x00,0x00,0x02,0x00]
+// SI: exp mrt0 off, off, v2, off done ; encoding: [0x04,0x08,0x00,0xf8,0x00,0x00,0x02,0x00]
+// VI: exp mrt0 off, off, v2, off done ; encoding: [0x04,0x08,0x00,0xc4,0x00,0x00,0x02,0x00]
exp mrt0 off, off, off, v1 done
-// GCN: exp mrt0 off, off, off, v1 done ; encoding: [0x08,0x08,0x00,0xf8,0x00,0x00,0x00,0x01]
+// SI: exp mrt0 off, off, off, v1 done ; encoding: [0x08,0x08,0x00,0xf8,0x00,0x00,0x00,0x01]
+// VI: exp mrt0 off, off, off, v1 done ; encoding: [0x08,0x08,0x00,0xc4,0x00,0x00,0x00,0x01]
exp mrt0 v4, v3, off, off done
-// GCN: exp mrt0 v4, v3, off, off done ; encoding: [0x03,0x08,0x00,0xf8,0x04,0x03,0x00,0x00]
+// SI: exp mrt0 v4, v3, off, off done ; encoding: [0x03,0x08,0x00,0xf8,0x04,0x03,0x00,0x00]
+// VI: exp mrt0 v4, v3, off, off done ; encoding: [0x03,0x08,0x00,0xc4,0x04,0x03,0x00,0x00]
exp mrt0 v4, off, v2, off done
-// GCN: exp mrt0 v4, off, v2, off done ; encoding: [0x05,0x08,0x00,0xf8,0x04,0x00,0x02,0x00]
+// SI: exp mrt0 v4, off, v2, off done ; encoding: [0x05,0x08,0x00,0xf8,0x04,0x00,0x02,0x00]
+// VI: exp mrt0 v4, off, v2, off done ; encoding: [0x05,0x08,0x00,0xc4,0x04,0x00,0x02,0x00]
exp mrt0 v4, off, off, v1
-// GCN: exp mrt0 v4, off, off, v1 ; encoding: [0x09,0x00,0x00,0xf8,0x04,0x00,0x00,0x01]
+// SI: exp mrt0 v4, off, off, v1 ; encoding: [0x09,0x00,0x00,0xf8,0x04,0x00,0x00,0x01]
+// VI: exp mrt0 v4, off, off, v1 ; encoding: [0x09,0x00,0x00,0xc4,0x04,0x00,0x00,0x01]
exp mrt0 v4, off, off, v1 done
-// GCN: exp mrt0 v4, off, off, v1 done ; encoding: [0x09,0x08,0x00,0xf8,0x04,0x00,0x00,0x01]
+// SI: exp mrt0 v4, off, off, v1 done ; encoding: [0x09,0x08,0x00,0xf8,0x04,0x00,0x00,0x01]
+// VI: exp mrt0 v4, off, off, v1 done ; encoding: [0x09,0x08,0x00,0xc4,0x04,0x00,0x00,0x01]
exp mrt0 v4, v3, v2, v1
-// GCN: exp mrt0 v4, v3, v2, v1 ; encoding: [0x0f,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp mrt0 v4, v3, v2, v1 ; encoding: [0x0f,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp mrt0 v4, v3, v2, v1 ; encoding: [0x0f,0x00,0x00,0xc4,0x04,0x03,0x02,0x01]
exp mrt0 v4, v3, v2, v1 done
-// GCN: exp mrt0 v4, v3, v2, v1 done ; encoding: [0x0f,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp mrt0 v4, v3, v2, v1 done ; encoding: [0x0f,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp mrt0 v4, v3, v2, v1 done ; encoding: [0x0f,0x08,0x00,0xc4,0x04,0x03,0x02,0x01]
exp mrt7 v1, v1, v1, v1
-// GCN: exp mrt7 v1, v1, v1, v1 ; encoding: [0x7f,0x00,0x00,0xf8,0x01,0x01,0x01,0x01]
+// SI: exp mrt7 v1, v1, v1, v1 ; encoding: [0x7f,0x00,0x00,0xf8,0x01,0x01,0x01,0x01]
+// VI: exp mrt7 v1, v1, v1, v1 ; encoding: [0x7f,0x00,0x00,0xc4,0x01,0x01,0x01,0x01]
exp mrt7 v1, v1, v1, v1 done
-// GCN: exp mrt7 v1, v1, v1, v1 done ; encoding: [0x7f,0x08,0x00,0xf8,0x01,0x01,0x01,0x01]
+// SI: exp mrt7 v1, v1, v1, v1 done ; encoding: [0x7f,0x08,0x00,0xf8,0x01,0x01,0x01,0x01]
+// VI: exp mrt7 v1, v1, v1, v1 done ; encoding: [0x7f,0x08,0x00,0xc4,0x01,0x01,0x01,0x01]
exp mrtz v4, v3, v2, v1
-// GCN: exp mrtz v4, v3, v2, v1 ; encoding: [0x8f,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp mrtz v4, v3, v2, v1 ; encoding: [0x8f,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp mrtz v4, v3, v2, v1 ; encoding: [0x8f,0x00,0x00,0xc4,0x04,0x03,0x02,0x01]
exp mrtz v4, v3, v2, v1 done
-// GCN: exp mrtz v4, v3, v2, v1 done ; encoding: [0x8f,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp mrtz v4, v3, v2, v1 done ; encoding: [0x8f,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp mrtz v4, v3, v2, v1 done ; encoding: [0x8f,0x08,0x00,0xc4,0x04,0x03,0x02,0x01]
exp null v4, v3, v2, v1
-// GCN: exp null v4, v3, v2, v1 ; encoding: [0x9f,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp null v4, v3, v2, v1 ; encoding: [0x9f,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp null v4, v3, v2, v1 ; encoding: [0x9f,0x00,0x00,0xc4,0x04,0x03,0x02,0x01]
exp null v4, v3, v2, v1 done
-// GCN: exp null v4, v3, v2, v1 done ; encoding: [0x9f,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp null v4, v3, v2, v1 done ; encoding: [0x9f,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp null v4, v3, v2, v1 done ; encoding: [0x9f,0x08,0x00,0xc4,0x04,0x03,0x02,0x01]
exp pos0 v4, v3, v2, v1
-// GCN: exp pos0 v4, v3, v2, v1 ; encoding: [0xcf,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp pos0 v4, v3, v2, v1 ; encoding: [0xcf,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp pos0 v4, v3, v2, v1 ; encoding: [0xcf,0x00,0x00,0xc4,0x04,0x03,0x02,0x01]
exp pos0 v4, v3, v2, v1 done
-// GCN: exp pos0 v4, v3, v2, v1 done ; encoding: [0xcf,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp pos0 v4, v3, v2, v1 done ; encoding: [0xcf,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp pos0 v4, v3, v2, v1 done ; encoding: [0xcf,0x08,0x00,0xc4,0x04,0x03,0x02,0x01]
exp pos3 v4, v3, v2, v1
-// GCN: exp pos3 v4, v3, v2, v1 ; encoding: [0xff,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp pos3 v4, v3, v2, v1 ; encoding: [0xff,0x00,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp pos3 v4, v3, v2, v1 ; encoding: [0xff,0x00,0x00,0xc4,0x04,0x03,0x02,0x01]
exp pos3 v4, v3, v2, v1 done
-// GCN: exp pos3 v4, v3, v2, v1 done ; encoding: [0xff,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp pos3 v4, v3, v2, v1 done ; encoding: [0xff,0x08,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp pos3 v4, v3, v2, v1 done ; encoding: [0xff,0x08,0x00,0xc4,0x04,0x03,0x02,0x01]
exp param0 v4, v3, v2, v1
-// GCN: exp param0 v4, v3, v2, v1 ; encoding: [0x0f,0x02,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp param0 v4, v3, v2, v1 ; encoding: [0x0f,0x02,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp param0 v4, v3, v2, v1 ; encoding: [0x0f,0x02,0x00,0xc4,0x04,0x03,0x02,0x01]
exp param0 v4, v3, v2, v1 done
-// GCN: exp param0 v4, v3, v2, v1 done ; encoding: [0x0f,0x0a,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp param0 v4, v3, v2, v1 done ; encoding: [0x0f,0x0a,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp param0 v4, v3, v2, v1 done ; encoding: [0x0f,0x0a,0x00,0xc4,0x04,0x03,0x02,0x01]
exp param31 v4, v3, v2, v1
-// GCN: exp param31 v4, v3, v2, v1 ; encoding: [0xff,0x03,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp param31 v4, v3, v2, v1 ; encoding: [0xff,0x03,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp param31 v4, v3, v2, v1 ; encoding: [0xff,0x03,0x00,0xc4,0x04,0x03,0x02,0x01]
exp param31 v4, v3, v2, v1 done
-// GCN: exp param31 v4, v3, v2, v1 done ; encoding: [0xff,0x0b,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp param31 v4, v3, v2, v1 done ; encoding: [0xff,0x0b,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp param31 v4, v3, v2, v1 done ; encoding: [0xff,0x0b,0x00,0xc4,0x04,0x03,0x02,0x01]
exp mrt0 v4, v3, v2, v1 vm
-// GCN: exp mrt0 v4, v3, v2, v1 vm ; encoding: [0x0f,0x10,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp mrt0 v4, v3, v2, v1 vm ; encoding: [0x0f,0x10,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp mrt0 v4, v3, v2, v1 vm ; encoding: [0x0f,0x10,0x00,0xc4,0x04,0x03,0x02,0x01]
exp mrt0 v4, v3, v2, v1 done vm
-// GCN: exp mrt0 v4, v3, v2, v1 done vm ; encoding: [0x0f,0x18,0x00,0xf8,0x04,0x03,0x02,0x01]
+// SI: exp mrt0 v4, v3, v2, v1 done vm ; encoding: [0x0f,0x18,0x00,0xf8,0x04,0x03,0x02,0x01]
+// VI: exp mrt0 v4, v3, v2, v1 done vm ; encoding: [0x0f,0x18,0x00,0xc4,0x04,0x03,0x02,0x01]
diff --git a/test/MC/AMDGPU/expressions.s b/test/MC/AMDGPU/expressions.s
index 9fc956628f1f..e593bcd75610 100644
--- a/test/MC/AMDGPU/expressions.s
+++ b/test/MC/AMDGPU/expressions.s
@@ -11,7 +11,7 @@ s_mov_b32 s0, global
// Use a token with the same name as a global
ds_gws_init v2 gds
-// VI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x33,0xd8,0x02,0x00,0x00,0x00]
+// VI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x33,0xd9,0x00,0x02,0x00,0x00]
// Use a global with the same name as a token
s_mov_b32 s0, gds
diff --git a/test/MC/AMDGPU/gfx7_asm_all.s b/test/MC/AMDGPU/gfx7_asm_all.s
new file mode 100644
index 000000000000..d1d864c3ffeb
--- /dev/null
+++ b/test/MC/AMDGPU/gfx7_asm_all.s
@@ -0,0 +1,68629 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=bonaire -show-encoding %s | FileCheck %s
+
+// *** GENERATED BY TESTGEN, DO NOT EDIT! ***
+
+ds_add_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd8,0xff,0x02,0x00,0x00]
+
+ds_add_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd8,0x01,0xff,0x00,0x00]
+
+ds_add_u32 v1, v2
+// CHECK: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x02,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd8,0xff,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd8,0x01,0xff,0x00,0x00]
+
+ds_sub_u32 v1, v2
+// CHECK: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x06,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd8,0xff,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd8,0x01,0xff,0x00,0x00]
+
+ds_rsub_u32 v1, v2
+// CHECK: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x0a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_inc_u32 v1, v2
+// CHECK: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x0e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd8,0xff,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd8,0x01,0xff,0x00,0x00]
+
+ds_dec_u32 v1, v2
+// CHECK: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x12,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_i32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd8,0x01,0xff,0x00,0x00]
+
+ds_min_i32 v1, v2
+// CHECK: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x16,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_i32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0x01,0xff,0x00,0x00]
+
+ds_max_i32 v1, v2
+// CHECK: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x1a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_min_u32 v1, v2
+// CHECK: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x1e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0x01,0xff,0x00,0x00]
+
+ds_max_u32 v1, v2
+// CHECK: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x22,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd8,0xff,0x02,0x00,0x00]
+
+ds_and_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd8,0x01,0xff,0x00,0x00]
+
+ds_and_b32 v1, v2
+// CHECK: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x26,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x28,0xd8,0xff,0x02,0x00,0x00]
+
+ds_or_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x28,0xd8,0x01,0xff,0x00,0x00]
+
+ds_or_b32 v1, v2
+// CHECK: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x2a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_xor_b32 v1, v2
+// CHECK: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x2e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x30,0xd8,0xff,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x30,0xd8,0x01,0xff,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x30,0xd8,0x01,0x02,0xff,0x00]
+
+ds_mskor_b32 v1, v2, v3
+// CHECK: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x32,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x34,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x34,0xd8,0x01,0xff,0x00,0x00]
+
+ds_write_b32 v1, v2
+// CHECK: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x36,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v255, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd8,0xff,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v255, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd8,0x01,0xff,0x03,0x00]
+
+ds_write2_b32 v1, v2, v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd8,0x01,0x02,0xff,0x00]
+
+ds_write2_b32 v1, v2, v3 offset1:255
+// CHECK: [0x00,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127
+// CHECK: [0x7f,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x3a,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v255, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd8,0xff,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v255, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd8,0x01,0xff,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0xff,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset1:255
+// CHECK: [0x00,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127
+// CHECK: [0x7f,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x3e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0xff,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0x01,0xff,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0x01,0x02,0xff,0x00]
+
+ds_cmpst_b32 v1, v2, v3
+// CHECK: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x42,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0xff,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0x01,0xff,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0x01,0x02,0xff,0x00]
+
+ds_cmpst_f32 v1, v2, v3
+// CHECK: [0x00,0x00,0x44,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x44,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x44,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x46,0xd8,0x01,0x02,0x03,0x00]
+
+ds_min_f32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_f32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0x01,0xff,0x00,0x00]
+
+ds_min_f32 v1, v2
+// CHECK: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x4a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_f32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_max_f32 v1, v2
+// CHECK: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x4e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_gws_init v1 gds
+// CHECK: [0x00,0x00,0x66,0xd8,0x00,0x01,0x00,0x00]
+
+ds_gws_sema_v gds
+// CHECK: [0x00,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_br v1 gds
+// CHECK: [0x00,0x00,0x6e,0xd8,0x00,0x01,0x00,0x00]
+
+ds_gws_sema_p gds
+// CHECK: [0x00,0x00,0x72,0xd8,0x00,0x00,0x00,0x00]
+
+ds_gws_barrier v1 gds
+// CHECK: [0x00,0x00,0x76,0xd8,0x00,0x01,0x00,0x00]
+
+ds_gws_sema_release_all offset:65535 gds
+// CHECK: [0xff,0xff,0x62,0xd8,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_release_all gds
+// CHECK: [0x00,0x00,0x62,0xd8,0x00,0x00,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x78,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b8 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x78,0xd8,0x01,0xff,0x00,0x00]
+
+ds_write_b8 v1, v2
+// CHECK: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x7a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b16 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_write_b16 v1, v2
+// CHECK: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x7e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0xff]
+
+ds_add_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0xff,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0x01,0xff,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x82,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0xff]
+
+ds_sub_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0xff,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0x01,0xff,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x86,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0xff]
+
+ds_rsub_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0xff,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0x01,0xff,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x8a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0xff]
+
+ds_inc_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0xff,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0x01,0xff,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x8e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0xff]
+
+ds_dec_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0xff,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0x01,0xff,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x92,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0xff]
+
+ds_min_rtn_i32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0x01,0xff,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2
+// CHECK: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x96,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0xff]
+
+ds_max_rtn_i32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0x01,0xff,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2
+// CHECK: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x9a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0xff]
+
+ds_min_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd8,0x01,0xff,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x9e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0xff]
+
+ds_max_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0x01,0xff,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xa2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0xff]
+
+ds_and_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0xff,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0x01,0xff,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xa6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0xff]
+
+ds_or_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd8,0xff,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd8,0x01,0xff,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xaa,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0xff]
+
+ds_xor_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xac,0xd8,0xff,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xac,0xd8,0x01,0xff,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xae,0xd8,0x01,0x02,0x00,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0xff]
+
+ds_mskor_rtn_b32 v5, v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd8,0xff,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd8,0x01,0xff,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd8,0x01,0x02,0xff,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3
+// CHECK: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0xb2,0xd8,0x01,0x02,0x03,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0xff]
+
+ds_wrxchg_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd8,0xff,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd8,0x01,0xff,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xb6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0xff]
+
+ds_cmpst_rtn_b32 v5, v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0xff,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0x02,0xff,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3
+// CHECK: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0xc2,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0x02,0x03,0xff]
+
+ds_cmpst_rtn_f32 v5, v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0xff,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0x02,0xff,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3
+// CHECK: [0x00,0x00,0xc4,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0xc4,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0xc4,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0xc6,0xd8,0x01,0x02,0x03,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0xff]
+
+ds_min_rtn_f32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0x01,0xff,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2
+// CHECK: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xca,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0xff]
+
+ds_max_rtn_f32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0x01,0xff,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2
+// CHECK: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0xce,0xd8,0x01,0x02,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0x01,0x00,0x00,0xff]
+
+ds_swizzle_b32 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0xff,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1
+// CHECK: [0x00,0x00,0xd4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:0
+// CHECK: [0x00,0x00,0xd4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:4
+// CHECK: [0x04,0x00,0xd4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xd6,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_b32 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1
+// CHECK: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:0
+// CHECK: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:4
+// CHECK: [0x04,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xda,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[254:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0xfe]
+
+ds_read2_b32 v[5:6], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xdc,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset1:255
+// CHECK: [0x00,0xff,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127
+// CHECK: [0x7f,0x00,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0xdc,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0xde,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[254:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0xfe]
+
+ds_read2st64_b32 v[5:6], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xe0,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset1:255
+// CHECK: [0x00,0xff,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127
+// CHECK: [0x7f,0x00,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0xe0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0xe2,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_i8 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1
+// CHECK: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:0
+// CHECK: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:4
+// CHECK: [0x04,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xe6,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_u8 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0xe8,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1
+// CHECK: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:0
+// CHECK: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:4
+// CHECK: [0x04,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xea,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_i16 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0xec,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1
+// CHECK: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:0
+// CHECK: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:4
+// CHECK: [0x04,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_u16 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0xf0,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1
+// CHECK: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:0
+// CHECK: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:4
+// CHECK: [0x04,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xf2,0xd8,0x01,0x00,0x00,0x05]
+
+ds_consume v5 offset:65535
+// CHECK: [0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v255 offset:65535
+// CHECK: [0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0xff]
+
+ds_consume v5
+// CHECK: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:0
+// CHECK: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:4
+// CHECK: [0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:65535 gds
+// CHECK: [0xff,0xff,0xf6,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:65535
+// CHECK: [0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v255 offset:65535
+// CHECK: [0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0xff]
+
+ds_append v5
+// CHECK: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:0
+// CHECK: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:4
+// CHECK: [0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:65535 gds
+// CHECK: [0xff,0xff,0xfa,0xd8,0x00,0x00,0x00,0x05]
+
+ds_ordered_count v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xfe,0xd8,0x01,0x00,0x00,0x05]
+
+ds_ordered_count v255, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xfe,0xd8,0x01,0x00,0x00,0xff]
+
+ds_ordered_count v5, v255 offset:65535 gds
+// CHECK: [0xff,0xff,0xfe,0xd8,0xff,0x00,0x00,0x05]
+
+ds_ordered_count v5, v1 gds
+// CHECK: [0x00,0x00,0xfe,0xd8,0x01,0x00,0x00,0x05]
+
+ds_ordered_count v5, v1 offset:0 gds
+// CHECK: [0x00,0x00,0xfe,0xd8,0x01,0x00,0x00,0x05]
+
+ds_ordered_count v5, v1 offset:4 gds
+// CHECK: [0x04,0x00,0xfe,0xd8,0x01,0x00,0x00,0x05]
+
+ds_add_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x00,0xd9,0xff,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x00,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_add_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x02,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x04,0xd9,0xff,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x04,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x06,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x08,0xd9,0xff,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x08,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x0a,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd9,0xff,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x0e,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x10,0xd9,0xff,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x10,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x12,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x14,0xd9,0xff,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x14,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_min_i64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x16,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x18,0xd9,0xff,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x18,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_max_i64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x1a,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x1c,0xd9,0xff,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x1c,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_min_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x1e,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x20,0xd9,0xff,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x20,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_max_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x22,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x24,0xd9,0xff,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x24,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_and_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x26,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x28,0xd9,0xff,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x28,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_or_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x2a,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x2c,0xd9,0xff,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x2c,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x2e,0xd9,0x01,0x02,0x00,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x30,0xd9,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x30,0xd9,0xff,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x30,0xd9,0x01,0xfe,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x30,0xd9,0x01,0x02,0xfe,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0x30,0xd9,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0x30,0xd9,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0x30,0xd9,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0x32,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x34,0xd9,0xff,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x34,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_write_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x36,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v255, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd9,0xff,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[254:255], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd9,0x01,0xfe,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[254:255] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x38,0xd9,0x01,0x02,0xfe,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset1:255
+// CHECK: [0x00,0xff,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127
+// CHECK: [0x7f,0x00,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x38,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x3a,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v255, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd9,0xff,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[254:255], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd9,0x01,0xfe,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[254:255] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0xfe,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset1:255
+// CHECK: [0x00,0xff,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127
+// CHECK: [0x7f,0x00,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x3c,0xd9,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x3e,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x40,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x40,0xd9,0xff,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x40,0xd9,0x01,0xfe,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x40,0xd9,0x01,0x02,0xfe,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0x40,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0x40,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0x40,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0x42,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x44,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x44,0xd9,0xff,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x44,0xd9,0x01,0xfe,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x44,0xd9,0x01,0x02,0xfe,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0x44,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0x44,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0x44,0xd9,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0x46,0xd9,0x01,0x02,0x03,0x00]
+
+ds_min_f64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x48,0xd9,0xff,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x48,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_min_f64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x4a,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd9,0xff,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd9,0x01,0xfe,0x00,0x00]
+
+ds_max_f64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x4e,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_add_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd9,0xff,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x80,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x82,0xd9,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_sub_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd9,0xff,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x84,0xd9,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x86,0xd9,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_rsub_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd9,0xff,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x88,0xd9,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x8a,0xd9,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_inc_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd9,0xff,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x8c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x8e,0xd9,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_dec_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd9,0xff,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x90,0xd9,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x92,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_min_rtn_i64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd9,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x94,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x96,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_max_rtn_i64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd9,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x98,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x9a,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_min_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd9,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x9c,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x9c,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x9e,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_max_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd9,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xa0,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xa2,0xd9,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_and_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd9,0xff,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xa4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xa6,0xd9,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_or_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd9,0xff,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa8,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xa8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xaa,0xd9,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_xor_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xac,0xd9,0xff,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xac,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xac,0xd9,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xae,0xd9,0x01,0x02,0x00,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[254:255], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd9,0x01,0x02,0x03,0xfe]
+
+ds_mskor_rtn_b64 v[5:6], v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd9,0xff,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd9,0x01,0xfe,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xb0,0xd9,0x01,0x02,0xfe,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xb0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xb2,0xd9,0x01,0x02,0x03,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_wrxchg_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd9,0xff,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xb4,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xb4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xb6,0xd9,0x01,0x02,0x00,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[254:255], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd9,0x01,0x02,0x03,0xfe]
+
+ds_cmpst_rtn_b64 v[5:6], v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd9,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd9,0x01,0xfe,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd9,0x01,0x02,0xfe,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xc0,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xc2,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[254:255], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd9,0x01,0x02,0x03,0xfe]
+
+ds_cmpst_rtn_f64 v[5:6], v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd9,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd9,0x01,0xfe,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd9,0x01,0x02,0xfe,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xc4,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xc4,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xc4,0xd9,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xc6,0xd9,0x01,0x02,0x03,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_min_rtn_f64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd9,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xc8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xca,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0xfe]
+
+ds_max_rtn_f64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd9,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd9,0x01,0xfe,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xcc,0xd9,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xce,0xd9,0x01,0x02,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[254:255], v1 offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0xfe]
+
+ds_read_b64 v[5:6], v255 offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd9,0xff,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1
+// CHECK: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:0
+// CHECK: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:4
+// CHECK: [0x04,0x00,0xd8,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xda,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[252:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0xfc]
+
+ds_read2_b64 v[5:8], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xdc,0xd9,0xff,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset1:255
+// CHECK: [0x00,0xff,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127
+// CHECK: [0x7f,0x00,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0xdc,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0xde,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[252:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0xfc]
+
+ds_read2st64_b64 v[5:8], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xe0,0xd9,0xff,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset1:255
+// CHECK: [0x00,0xff,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127
+// CHECK: [0x7f,0x00,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0xe0,0xd9,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0xe2,0xd9,0x01,0x00,0x00,0x05]
+
+ds_add_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x00,0xda,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x00,0xda,0xff,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1
+// CHECK: [0x00,0x00,0x00,0xda,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x00,0xda,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x00,0xda,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x02,0xda,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x04,0xda,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x04,0xda,0xff,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1
+// CHECK: [0x00,0x00,0x04,0xda,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x04,0xda,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x04,0xda,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x06,0xda,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x08,0xda,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x08,0xda,0xff,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1
+// CHECK: [0x00,0x00,0x08,0xda,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x08,0xda,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x08,0xda,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0a,0xda,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xda,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xda,0xff,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1
+// CHECK: [0x00,0x00,0x0c,0xda,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x0c,0xda,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x0c,0xda,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0e,0xda,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x10,0xda,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x10,0xda,0xff,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1
+// CHECK: [0x00,0x00,0x10,0xda,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x10,0xda,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x10,0xda,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x12,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:65535
+// CHECK: [0xff,0xff,0x14,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v255 offset:65535
+// CHECK: [0xff,0xff,0x14,0xda,0xff,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1
+// CHECK: [0x00,0x00,0x14,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:0
+// CHECK: [0x00,0x00,0x14,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:4
+// CHECK: [0x04,0x00,0x14,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x16,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:65535
+// CHECK: [0xff,0xff,0x18,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v255 offset:65535
+// CHECK: [0xff,0xff,0x18,0xda,0xff,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1
+// CHECK: [0x00,0x00,0x18,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:0
+// CHECK: [0x00,0x00,0x18,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:4
+// CHECK: [0x04,0x00,0x18,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x1a,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xda,0xff,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1
+// CHECK: [0x00,0x00,0x1c,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x1c,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x1c,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x1e,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x20,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x20,0xda,0xff,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1
+// CHECK: [0x00,0x00,0x20,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x20,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x20,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x22,0xda,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:65535
+// CHECK: [0xff,0xff,0x28,0xda,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v255 offset:65535
+// CHECK: [0xff,0xff,0x28,0xda,0xff,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1
+// CHECK: [0x00,0x00,0x28,0xda,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:0
+// CHECK: [0x00,0x00,0x28,0xda,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:4
+// CHECK: [0x04,0x00,0x28,0xda,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x2a,0xda,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xda,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v255 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xda,0xff,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1
+// CHECK: [0x00,0x00,0x2c,0xda,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:0
+// CHECK: [0x00,0x00,0x2c,0xda,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:4
+// CHECK: [0x04,0x00,0x2c,0xda,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x2e,0xda,0x01,0x00,0x00,0x00]
+
+ds_write_src2_b32 v1
+// CHECK: [0x00,0x00,0x34,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:65535
+// CHECK: [0xff,0xff,0x48,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v255 offset:65535
+// CHECK: [0xff,0xff,0x48,0xda,0xff,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1
+// CHECK: [0x00,0x00,0x48,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:0
+// CHECK: [0x00,0x00,0x48,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:4
+// CHECK: [0x04,0x00,0x48,0xda,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x4a,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v255 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xda,0xff,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1
+// CHECK: [0x00,0x00,0x4c,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:0
+// CHECK: [0x00,0x00,0x4c,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:4
+// CHECK: [0x04,0x00,0x4c,0xda,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x4e,0xda,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x00,0xdb,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x00,0xdb,0xff,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1
+// CHECK: [0x00,0x00,0x00,0xdb,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x00,0xdb,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x00,0xdb,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x02,0xdb,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x04,0xdb,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x04,0xdb,0xff,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1
+// CHECK: [0x00,0x00,0x04,0xdb,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x04,0xdb,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x04,0xdb,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x06,0xdb,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x08,0xdb,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x08,0xdb,0xff,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1
+// CHECK: [0x00,0x00,0x08,0xdb,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x08,0xdb,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x08,0xdb,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0a,0xdb,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xdb,0xff,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1
+// CHECK: [0x00,0x00,0x0c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x0c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x0c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0e,0xdb,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x10,0xdb,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x10,0xdb,0xff,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1
+// CHECK: [0x00,0x00,0x10,0xdb,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x10,0xdb,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x10,0xdb,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x12,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:65535
+// CHECK: [0xff,0xff,0x14,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v255 offset:65535
+// CHECK: [0xff,0xff,0x14,0xdb,0xff,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1
+// CHECK: [0x00,0x00,0x14,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:0
+// CHECK: [0x00,0x00,0x14,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:4
+// CHECK: [0x04,0x00,0x14,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x16,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:65535
+// CHECK: [0xff,0xff,0x18,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v255 offset:65535
+// CHECK: [0xff,0xff,0x18,0xdb,0xff,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1
+// CHECK: [0x00,0x00,0x18,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:0
+// CHECK: [0x00,0x00,0x18,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:4
+// CHECK: [0x04,0x00,0x18,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x1a,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x1c,0xdb,0xff,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1
+// CHECK: [0x00,0x00,0x1c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x1c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x1c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x1e,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x20,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x20,0xdb,0xff,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1
+// CHECK: [0x00,0x00,0x20,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x20,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x20,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x22,0xdb,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:65535
+// CHECK: [0xff,0xff,0x24,0xdb,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v255 offset:65535
+// CHECK: [0xff,0xff,0x24,0xdb,0xff,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1
+// CHECK: [0x00,0x00,0x24,0xdb,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:0
+// CHECK: [0x00,0x00,0x24,0xdb,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:4
+// CHECK: [0x04,0x00,0x24,0xdb,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x26,0xdb,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:65535
+// CHECK: [0xff,0xff,0x28,0xdb,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v255 offset:65535
+// CHECK: [0xff,0xff,0x28,0xdb,0xff,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1
+// CHECK: [0x00,0x00,0x28,0xdb,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:0
+// CHECK: [0x00,0x00,0x28,0xdb,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:4
+// CHECK: [0x04,0x00,0x28,0xdb,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x2a,0xdb,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v255 offset:65535
+// CHECK: [0xff,0xff,0x2c,0xdb,0xff,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1
+// CHECK: [0x00,0x00,0x2c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:0
+// CHECK: [0x00,0x00,0x2c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:4
+// CHECK: [0x04,0x00,0x2c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x2e,0xdb,0x01,0x00,0x00,0x00]
+
+ds_write_src2_b64 v1
+// CHECK: [0x00,0x00,0x34,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:65535
+// CHECK: [0xff,0xff,0x48,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v255 offset:65535
+// CHECK: [0xff,0xff,0x48,0xdb,0xff,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1
+// CHECK: [0x00,0x00,0x48,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:0
+// CHECK: [0x00,0x00,0x48,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:4
+// CHECK: [0x04,0x00,0x48,0xdb,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x4a,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v255 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xdb,0xff,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1
+// CHECK: [0x00,0x00,0x4c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:0
+// CHECK: [0x00,0x00,0x4c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:4
+// CHECK: [0x04,0x00,0x4c,0xdb,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x4e,0xdb,0x01,0x00,0x00,0x00]
+
+ds_wrap_rtn_b32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0xd0,0xd8,0x01,0x02,0x03,0xff]
+
+ds_wrap_rtn_b32 v255, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0xd2,0xd8,0x01,0x02,0x03,0xff]
+
+ds_wrap_rtn_b32 v255, v1, v2, v3
+// CHECK: [0x00,0x00,0xd0,0xd8,0x01,0x02,0x03,0xff]
+
+ds_condxchg32_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x05]
+
+ds_condxchg32_rtn_b64 v[5:6], v1, v[2:3] gds
+// CHECK: [0x00,0x00,0xfa,0xd9,0x01,0x02,0x00,0x05]
+
+ds_condxchg32_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xf8,0xd9,0x01,0xfe,0x00,0x05]
+
+exp mrt0, v0, v0, v0, v0
+// CHECK: [0x0f,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrtz, v0, v0, v0, v0
+// CHECK: [0x8f,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp null, v0, v0, v0, v0
+// CHECK: [0x9f,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp pos0, v0, v0, v0, v0
+// CHECK: [0xcf,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp param0, v0, v0, v0, v0
+// CHECK: [0x0f,0x02,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v255, v0, v0, v0
+// CHECK: [0x0f,0x00,0x00,0xf8,0xff,0x00,0x00,0x00]
+
+exp mrt0, v0, v255, v0, v0
+// CHECK: [0x0f,0x00,0x00,0xf8,0x00,0xff,0x00,0x00]
+
+exp mrt0, v0, v0, v255, v0
+// CHECK: [0x0f,0x00,0x00,0xf8,0x00,0x00,0xff,0x00]
+
+exp mrt0, v0, v0, v0, v255
+// CHECK: [0x0f,0x00,0x00,0xf8,0x00,0x00,0x00,0xff]
+
+exp mrt0, v0, off, off, off
+// CHECK: [0x01,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, off, off
+// CHECK: [0x02,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, off, off
+// CHECK: [0x03,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, v0, off
+// CHECK: [0x04,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, off, v0, off
+// CHECK: [0x05,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, v0, off
+// CHECK: [0x06,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, v0, off
+// CHECK: [0x07,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, off, v0
+// CHECK: [0x08,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, off, off, v0
+// CHECK: [0x09,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, off, v0
+// CHECK: [0x0a,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, off, v0
+// CHECK: [0x0b,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, v0, v0
+// CHECK: [0x0c,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, off, v0, v0
+// CHECK: [0x0d,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, v0, v0
+// CHECK: [0x0e,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, off, off
+// CHECK: [0x00,0x00,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, v0, v0 vm
+// CHECK: [0x0f,0x10,0x00,0xf8,0x00,0x00,0x00,0x00]
+
+flat_load_ubyte v5, v[1:2]
+// CHECK: [0x00,0x00,0x20,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ubyte v255, v[1:2]
+// CHECK: [0x00,0x00,0x20,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_ubyte v5, v[254:255]
+// CHECK: [0x00,0x00,0x20,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_ubyte v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x21,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ubyte v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x22,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sbyte v5, v[1:2]
+// CHECK: [0x00,0x00,0x24,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sbyte v255, v[1:2]
+// CHECK: [0x00,0x00,0x24,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_sbyte v5, v[254:255]
+// CHECK: [0x00,0x00,0x24,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_sbyte v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x25,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sbyte v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x26,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ushort v5, v[1:2]
+// CHECK: [0x00,0x00,0x28,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ushort v255, v[1:2]
+// CHECK: [0x00,0x00,0x28,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_ushort v5, v[254:255]
+// CHECK: [0x00,0x00,0x28,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_ushort v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x29,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ushort v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x2a,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sshort v5, v[1:2]
+// CHECK: [0x00,0x00,0x2c,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sshort v255, v[1:2]
+// CHECK: [0x00,0x00,0x2c,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_sshort v5, v[254:255]
+// CHECK: [0x00,0x00,0x2c,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_sshort v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x2d,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sshort v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x2e,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dword v5, v[1:2]
+// CHECK: [0x00,0x00,0x30,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dword v255, v[1:2]
+// CHECK: [0x00,0x00,0x30,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_dword v5, v[254:255]
+// CHECK: [0x00,0x00,0x30,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dword v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x31,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dword v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x32,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[5:6], v[1:2]
+// CHECK: [0x00,0x00,0x34,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[254:255], v[1:2]
+// CHECK: [0x00,0x00,0x34,0xdc,0x01,0x00,0x00,0xfe]
+
+flat_load_dwordx2 v[5:6], v[254:255]
+// CHECK: [0x00,0x00,0x34,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[5:6], v[1:2] glc
+// CHECK: [0x00,0x00,0x35,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[5:6], v[1:2] slc
+// CHECK: [0x00,0x00,0x36,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[5:8], v[1:2]
+// CHECK: [0x00,0x00,0x38,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[252:255], v[1:2]
+// CHECK: [0x00,0x00,0x38,0xdc,0x01,0x00,0x00,0xfc]
+
+flat_load_dwordx4 v[5:8], v[254:255]
+// CHECK: [0x00,0x00,0x38,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[5:8], v[1:2] glc
+// CHECK: [0x00,0x00,0x39,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[5:8], v[1:2] slc
+// CHECK: [0x00,0x00,0x3a,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[5:7], v[1:2]
+// CHECK: [0x00,0x00,0x3c,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[253:255], v[1:2]
+// CHECK: [0x00,0x00,0x3c,0xdc,0x01,0x00,0x00,0xfd]
+
+flat_load_dwordx3 v[5:7], v[254:255]
+// CHECK: [0x00,0x00,0x3c,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[5:7], v[1:2] glc
+// CHECK: [0x00,0x00,0x3d,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[5:7], v[1:2] slc
+// CHECK: [0x00,0x00,0x3e,0xdc,0x01,0x00,0x00,0x05]
+
+flat_store_byte v[1:2], v2
+// CHECK: [0x00,0x00,0x60,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_byte v[254:255], v2
+// CHECK: [0x00,0x00,0x60,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_byte v[1:2], v255
+// CHECK: [0x00,0x00,0x60,0xdc,0x01,0xff,0x00,0x00]
+
+flat_store_byte v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x61,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_byte v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x62,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_short v[1:2], v2
+// CHECK: [0x00,0x00,0x68,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_short v[254:255], v2
+// CHECK: [0x00,0x00,0x68,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_short v[1:2], v255
+// CHECK: [0x00,0x00,0x68,0xdc,0x01,0xff,0x00,0x00]
+
+flat_store_short v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x69,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_short v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x6a,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dword v[1:2], v2
+// CHECK: [0x00,0x00,0x70,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dword v[254:255], v2
+// CHECK: [0x00,0x00,0x70,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dword v[1:2], v255
+// CHECK: [0x00,0x00,0x70,0xdc,0x01,0xff,0x00,0x00]
+
+flat_store_dword v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x71,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dword v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x72,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x74,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x74,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x74,0xdc,0x01,0xfe,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x75,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x76,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[2:5]
+// CHECK: [0x00,0x00,0x78,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[254:255], v[2:5]
+// CHECK: [0x00,0x00,0x78,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[252:255]
+// CHECK: [0x00,0x00,0x78,0xdc,0x01,0xfc,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[2:5] glc
+// CHECK: [0x00,0x00,0x79,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[2:5] slc
+// CHECK: [0x00,0x00,0x7a,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[2:4]
+// CHECK: [0x00,0x00,0x7c,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[254:255], v[2:4]
+// CHECK: [0x00,0x00,0x7c,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[253:255]
+// CHECK: [0x00,0x00,0x7c,0xdc,0x01,0xfd,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[2:4] glc
+// CHECK: [0x00,0x00,0x7d,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[2:4] slc
+// CHECK: [0x00,0x00,0x7e,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap v[1:2], v2
+// CHECK: [0x00,0x00,0xc0,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap v[254:255], v2
+// CHECK: [0x00,0x00,0xc0,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_swap v[1:2], v255
+// CHECK: [0x00,0x00,0xc0,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_swap v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xc1,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xc2,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xc4,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xc4,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xc4,0xdc,0x01,0xfe,0x00,0x00]
+
+flat_atomic_cmpswap v0, v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xc5,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xc6,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_add v[1:2], v2
+// CHECK: [0x00,0x00,0xc8,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_add v[254:255], v2
+// CHECK: [0x00,0x00,0xc8,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_add v[1:2], v255
+// CHECK: [0x00,0x00,0xc8,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_add v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xc9,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_add v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xca,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub v[1:2], v2
+// CHECK: [0x00,0x00,0xcc,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub v[254:255], v2
+// CHECK: [0x00,0x00,0xcc,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_sub v[1:2], v255
+// CHECK: [0x00,0x00,0xcc,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_sub v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xcd,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xce,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin v[1:2], v2
+// CHECK: [0x00,0x00,0xd4,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin v[254:255], v2
+// CHECK: [0x00,0x00,0xd4,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smin v[1:2], v255
+// CHECK: [0x00,0x00,0xd4,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_smin v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xd5,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xd6,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin v[1:2], v2
+// CHECK: [0x00,0x00,0xd8,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin v[254:255], v2
+// CHECK: [0x00,0x00,0xd8,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umin v[1:2], v255
+// CHECK: [0x00,0x00,0xd8,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_umin v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xd9,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xda,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax v[1:2], v2
+// CHECK: [0x00,0x00,0xdc,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax v[254:255], v2
+// CHECK: [0x00,0x00,0xdc,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smax v[1:2], v255
+// CHECK: [0x00,0x00,0xdc,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_smax v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xdd,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xde,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax v[1:2], v2
+// CHECK: [0x00,0x00,0xe0,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax v[254:255], v2
+// CHECK: [0x00,0x00,0xe0,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umax v[1:2], v255
+// CHECK: [0x00,0x00,0xe0,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_umax v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xe1,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xe2,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_and v[1:2], v2
+// CHECK: [0x00,0x00,0xe4,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_and v[254:255], v2
+// CHECK: [0x00,0x00,0xe4,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_and v[1:2], v255
+// CHECK: [0x00,0x00,0xe4,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_and v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xe5,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_and v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xe6,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_or v[1:2], v2
+// CHECK: [0x00,0x00,0xe8,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_or v[254:255], v2
+// CHECK: [0x00,0x00,0xe8,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_or v[1:2], v255
+// CHECK: [0x00,0x00,0xe8,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_or v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xe9,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_or v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xea,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor v[1:2], v2
+// CHECK: [0x00,0x00,0xec,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor v[254:255], v2
+// CHECK: [0x00,0x00,0xec,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_xor v[1:2], v255
+// CHECK: [0x00,0x00,0xec,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_xor v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xed,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xee,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc v[1:2], v2
+// CHECK: [0x00,0x00,0xf0,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc v[254:255], v2
+// CHECK: [0x00,0x00,0xf0,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_inc v[1:2], v255
+// CHECK: [0x00,0x00,0xf0,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_inc v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xf1,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xf2,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec v[1:2], v2
+// CHECK: [0x00,0x00,0xf4,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec v[254:255], v2
+// CHECK: [0x00,0x00,0xf4,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_dec v[1:2], v255
+// CHECK: [0x00,0x00,0xf4,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_dec v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xf5,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xf6,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xf8,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xf8,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xf8,0xdc,0x01,0xfe,0x00,0x00]
+
+flat_atomic_fcmpswap v0, v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xf9,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xfa,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmin v[1:2], v2
+// CHECK: [0x00,0x00,0xfc,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmin v[254:255], v2
+// CHECK: [0x00,0x00,0xfc,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_atomic_fmin v[1:2], v255
+// CHECK: [0x00,0x00,0xfc,0xdc,0x01,0xff,0x00,0x00]
+
+flat_atomic_fmin v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0xfd,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmin v[1:2], v2 slc
+// CHECK: [0x00,0x00,0xfe,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmax v[1:2], v2
+// CHECK: [0x00,0x00,0x00,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmax v[254:255], v2
+// CHECK: [0x00,0x00,0x00,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_fmax v[1:2], v255
+// CHECK: [0x00,0x00,0x00,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_fmax v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x01,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmax v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x02,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x40,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x40,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x40,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_swap_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x41,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x42,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[1:2], v[2:5]
+// CHECK: [0x00,0x00,0x44,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[254:255], v[2:5]
+// CHECK: [0x00,0x00,0x44,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[1:2], v[252:255]
+// CHECK: [0x00,0x00,0x44,0xdd,0x01,0xfc,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[0:1], v[1:2], v[2:5] glc
+// CHECK: [0x00,0x00,0x45,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[1:2], v[2:5] slc
+// CHECK: [0x00,0x00,0x46,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x48,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x48,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x48,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_add_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x49,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x4a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x4c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x4c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x4c,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_sub_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x4d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x4e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x54,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x54,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x54,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_smin_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x55,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x56,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x58,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x58,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x58,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_umin_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x59,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x5a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x5c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x5c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x5c,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_smax_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x5d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x5e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x60,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x60,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x60,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_umax_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x61,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x62,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x64,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x64,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x64,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_and_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x65,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x66,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x68,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x68,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x68,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_or_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x69,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x6a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x6c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x6c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x6c,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_xor_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x6d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x6e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x70,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x70,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x70,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_inc_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x71,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x72,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x74,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x74,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x74,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_dec_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x75,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x76,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap_x2 v[1:2], v[2:5]
+// CHECK: [0x00,0x00,0x78,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap_x2 v[254:255], v[2:5]
+// CHECK: [0x00,0x00,0x78,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap_x2 v[1:2], v[252:255]
+// CHECK: [0x00,0x00,0x78,0xdd,0x01,0xfc,0x00,0x00]
+
+flat_atomic_fcmpswap_x2 v[0:1], v[1:2], v[2:5] glc
+// CHECK: [0x00,0x00,0x79,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fcmpswap_x2 v[1:2], v[2:5] slc
+// CHECK: [0x00,0x00,0x7a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmin_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x7c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmin_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x7c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_fmin_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x7c,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_fmin_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x7d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmin_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x7e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmax_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x80,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmax_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x80,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_fmax_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x80,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_fmax_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x81,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_fmax_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x82,0xdd,0x01,0x02,0x00,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v252, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0xfc,0x02,0x00]
+
+image_load v5, v[252:255], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0xfc,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[12:19] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0x05,0x03,0x00]
+
+image_load v5, v[1:4], s[96:103] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0x05,0x18,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x2
+// CHECK: [0x00,0x02,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x3
+// CHECK: [0x00,0x03,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x4
+// CHECK: [0x00,0x04,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x5
+// CHECK: [0x00,0x05,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x6
+// CHECK: [0x00,0x06,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0x7
+// CHECK: [0x00,0x07,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x8
+// CHECK: [0x00,0x08,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x9
+// CHECK: [0x00,0x09,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:8], v[1:4], s[8:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x0
+// CHECK: [0x00,0x00,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v252, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0xfc,0x02,0x00]
+
+image_load_mip v5, v[252:255], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0xfc,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[12:19] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0x05,0x03,0x00]
+
+image_load_mip v5, v[1:4], s[96:103] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0x05,0x18,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x2
+// CHECK: [0x00,0x02,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x3
+// CHECK: [0x00,0x03,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x4
+// CHECK: [0x00,0x04,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x5
+// CHECK: [0x00,0x05,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x6
+// CHECK: [0x00,0x06,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0x7
+// CHECK: [0x00,0x07,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x8
+// CHECK: [0x00,0x08,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x9
+// CHECK: [0x00,0x09,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:8], v[1:4], s[8:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x0
+// CHECK: [0x00,0x00,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v252, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0xfc,0x03,0x00]
+
+image_store v1, v[252:255], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0xfc,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[16:23] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0x01,0x04,0x00]
+
+image_store v1, v[2:5], s[96:103] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0x01,0x18,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x2 unorm
+// CHECK: [0x00,0x12,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x3 unorm
+// CHECK: [0x00,0x13,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x4 unorm
+// CHECK: [0x00,0x14,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x5 unorm
+// CHECK: [0x00,0x15,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x6 unorm
+// CHECK: [0x00,0x16,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0x7 unorm
+// CHECK: [0x00,0x17,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x8 unorm
+// CHECK: [0x00,0x18,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x9 unorm
+// CHECK: [0x00,0x19,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0xa unorm
+// CHECK: [0x00,0x1a,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0xb unorm
+// CHECK: [0x00,0x1b,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0xc unorm
+// CHECK: [0x00,0x1c,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0xd unorm
+// CHECK: [0x00,0x1d,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0xe unorm
+// CHECK: [0x00,0x1e,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:4], v[2:5], s[12:19] dmask:0xf unorm
+// CHECK: [0x00,0x1f,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x0 unorm
+// CHECK: [0x00,0x10,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x1 unorm glc
+// CHECK: [0x00,0x31,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v252, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0xfc,0x03,0x00]
+
+image_store_mip v1, v[252:255], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0xfc,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[16:23] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0x01,0x04,0x00]
+
+image_store_mip v1, v[2:5], s[96:103] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0x01,0x18,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x2 unorm
+// CHECK: [0x00,0x12,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x3 unorm
+// CHECK: [0x00,0x13,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x4 unorm
+// CHECK: [0x00,0x14,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x5 unorm
+// CHECK: [0x00,0x15,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x6 unorm
+// CHECK: [0x00,0x16,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0x7 unorm
+// CHECK: [0x00,0x17,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x8 unorm
+// CHECK: [0x00,0x18,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x9 unorm
+// CHECK: [0x00,0x19,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0xa unorm
+// CHECK: [0x00,0x1a,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0xb unorm
+// CHECK: [0x00,0x1b,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0xc unorm
+// CHECK: [0x00,0x1c,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0xd unorm
+// CHECK: [0x00,0x1d,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0xe unorm
+// CHECK: [0x00,0x1e,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:4], v[2:5], s[12:19] dmask:0xf unorm
+// CHECK: [0x00,0x1f,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x0 unorm
+// CHECK: [0x00,0x10,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x1 unorm glc
+// CHECK: [0x00,0x31,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v252, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0xfc,0x02,0x00]
+
+image_get_resinfo v5, v[252:255], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0xfc,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[12:19] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0x05,0x03,0x00]
+
+image_get_resinfo v5, v[1:4], s[96:103] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0x05,0x18,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x2
+// CHECK: [0x00,0x02,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x3
+// CHECK: [0x00,0x03,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x4
+// CHECK: [0x00,0x04,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x5
+// CHECK: [0x00,0x05,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x6
+// CHECK: [0x00,0x06,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0x7
+// CHECK: [0x00,0x07,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x8
+// CHECK: [0x00,0x08,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x9
+// CHECK: [0x00,0x09,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:8], v[1:4], s[8:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x0
+// CHECK: [0x00,0x00,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_cl v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_l v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_l v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_l v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_b v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_b v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_b v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_b_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_b_cl v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_b_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_lz v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_lz v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_lz v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_cl v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_d v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_d v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_d v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c_d v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_d v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xa8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_l v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_l v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c_l v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_b v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_b v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c_b v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_b_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_lz v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_lz v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x78,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x22,0x03]
+
+image_sample_c_lz v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4 v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4 v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4 v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4 v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4 v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x00,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x01,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x02,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_cl v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x04,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x05,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x06,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_l v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_l v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_l v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x10,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x11,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x12,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_b v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x14,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x15,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x16,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b_cl v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_b_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x18,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x19,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x1a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_lz v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_lz v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_lz v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x1c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x1d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x1e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x20,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x21,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x22,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_cl v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x24,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x25,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x26,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_l v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_l v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_l v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x30,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x31,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x32,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_b v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x34,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x35,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x36,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b_cl v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_b_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x38,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x39,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x3a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_lz v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_lz v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_lz v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x3c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x3d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x3e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x40,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x41,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x42,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_cl_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x44,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x45,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x46,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_l_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_l_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_l_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x50,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x51,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x52,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_b_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x54,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x55,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x56,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b_cl_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_b_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x58,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x59,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x5a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_lz_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_lz_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_lz_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x5c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x5d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x5e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x60,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x61,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x62,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_cl_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x64,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x65,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x66,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_l_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_l_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_l_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x70,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x71,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x72,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_b_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x74,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x75,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x76,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x78,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x79,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x7a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_lz_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_lz_o v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x78,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x22,0x03]
+
+image_gather4_c_lz_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x7c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x7d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x7e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0xfc,0x62,0x00]
+
+image_get_lod v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0xff,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x63,0x00]
+
+image_get_lod v5, v1, s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x78,0x00]
+
+image_get_lod v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x82,0x00]
+
+image_get_lod v5, v1, s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x22,0x03]
+
+image_get_lod v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0xc2,0x03]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:8], v1, s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0xfc,0x62,0x00]
+
+image_sample_c_cd v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0xfc,0x05,0x62,0x00]
+
+image_sample_c_cd v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0x05,0x63,0x00]
+
+image_sample_c_cd v5, v[1:4], s[96:103], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0x05,0x78,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0x05,0x82,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[100:103] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0x05,0x22,0x03]
+
+image_sample_c_cd v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xa8,0xf1,0x01,0x05,0xc2,0x03]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+image_sample_c_cd v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xa8,0xf1,0x01,0x05,0x62,0x00]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_format_x v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_x v5, off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_format_x v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_format_x v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_x v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_x v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_x v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_x v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_x v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0xfe,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_xy v[5:6], off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_format_xy v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_format_xy v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_xy v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_xy v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_xy v[5:6], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_xy v[5:6], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_xy v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[253:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0xfd,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_format_xyz v[5:7], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_xyz v[5:7], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[252:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0xfc,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_format_xyzw v[5:8], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_xyzw v[5:8], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_format_x v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_x v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_format_x v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_format_x v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_x v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_x v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_x v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_x v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_x v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0xfe,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_xy v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_format_xy v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_format_xy v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_xy v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_xy v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_xy v[1:2], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_xy v[1:2], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_xy v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[253:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0xfd,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_format_xyz v[1:3], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_xyz v[1:3], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[252:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0xfc,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_format_xyzw v[1:4], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_xyzw v[1:4], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_ubyte v5, off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_ubyte v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_ubyte v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_ubyte v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_ubyte v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_ubyte v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_ubyte v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_ubyte v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x20,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x20,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_sbyte v5, off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_sbyte v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_sbyte v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_sbyte v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_sbyte v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_sbyte v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_sbyte v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_sbyte v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x24,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x24,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_ushort v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_ushort v5, off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_ushort v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_ushort v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_ushort v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_ushort v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_ushort v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_ushort v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_ushort v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x28,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x28,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_sshort v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_sshort v5, off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_sshort v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_sshort v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_sshort v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_sshort v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_sshort v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_sshort v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_sshort v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x2c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x2c,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_dword v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dword v5, off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_dword v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dword v5, off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_dword v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dword v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dword v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dword v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dword v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dword v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x30,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x30,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0xfe,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_dwordx2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dwordx2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x34,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x34,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[252:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0xfc,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_dwordx4 v[5:8], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dwordx4 v[5:8], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x38,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x38,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[253:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0xfd,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[100:103], s3 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x19,0x03]
+
+buffer_load_dwordx3 v[5:7], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s103 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0x67]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dwordx3 v[5:7], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], v[0:1], s[8:11], s3 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x3c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x3c,0xe0,0x00,0x05,0x42,0x03]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_byte v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_byte v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_byte v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_byte v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_byte v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_byte v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_byte v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_byte v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_byte v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_byte v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_short v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_short v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_short v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_short v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_short v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_short v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_short v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_short v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_short v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_short v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_dword v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dword v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_dword v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dword v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_dword v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dword v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dword v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dword v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dword v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dword v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0xfe,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_dwordx2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dwordx2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[252:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0xfc,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_dwordx4 v[1:4], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dwordx4 v[1:4], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[253:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0xfd,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_store_dwordx3 v[1:3], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dwordx3 v[1:3], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_swap v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_swap v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_swap v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_swap v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_swap v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_swap v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_swap v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xc0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xc0,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_cmpswap v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xc4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xc4,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_add v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_add v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_add v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_add v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_add v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_add v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_add v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xc8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xc8,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_sub v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_sub v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_sub v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_sub v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_sub v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_sub v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_sub v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xcc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xcc,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_smin v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_smin v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_smin v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_smin v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_smin v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_smin v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_smin v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xd4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xd4,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_umin v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_umin v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_umin v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_umin v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_umin v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_umin v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_umin v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xd8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xd8,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_smax v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_smax v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_smax v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_smax v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_smax v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_smax v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_smax v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xdc,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xdc,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_umax v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_umax v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_umax v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_umax v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_umax v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_umax v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_umax v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xe0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xe0,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_and v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_and v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_and v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_and v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_and v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_and v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_and v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xe4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xe4,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_or v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_or v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_or v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_or v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_or v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_or v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_or v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xe8,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xe8,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_xor v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_xor v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_xor v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_xor v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_xor v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_xor v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_xor v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xec,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xec,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_inc v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_inc v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_inc v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_inc v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_inc v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_inc v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_inc v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xf0,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xf0,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_atomic_dec v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_atomic_dec v1, off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x19,0x04]
+
+buffer_atomic_dec v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x03,0x67]
+
+buffer_atomic_dec v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_dec v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_atomic_dec v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_dec v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0xf4,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0xf4,0xe0,0x00,0x01,0x43,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_swap_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x40,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_swap_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x40,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[252:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0xfc,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_cmpswap_x2 v[1:4], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x44,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_cmpswap_x2 v[1:4], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x44,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_add_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x48,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_add_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x48,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_sub_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x4c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_sub_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x4c,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_smin_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x54,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smin_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x54,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_umin_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x58,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umin_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x58,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_smax_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x5c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_smax_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x5c,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_umax_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x60,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_umax_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x60,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_and_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x64,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_and_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x64,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_or_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x68,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_or_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x68,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_xor_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x6c,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_xor_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x6c,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_inc_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x70,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_inc_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x70,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0xfe,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x04,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[100:103], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x19,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x1e,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s103 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x03,0x67]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x03,0x7c]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x03,0x80]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x03,0xc1]
+
+buffer_atomic_dec_x2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], v[0:1], s[12:15], s4 addr64 offset:4095
+// CHECK: [0xff,0x8f,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x74,0xe1,0x00,0x01,0x03,0x04]
+
+buffer_atomic_dec_x2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x74,0xe1,0x00,0x01,0x43,0x04]
+
+buffer_wbinvl1_vol
+// CHECK: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00]
+
+buffer_wbinvl1
+// CHECK: [0x00,0x00,0xc4,0xe1,0x00,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], s2
+// CHECK: [0x02,0x82,0x02,0xc0]
+
+s_load_dword s103, s[2:3], s2
+// CHECK: [0x02,0x82,0x33,0xc0]
+
+s_load_dword vcc_lo, s[2:3], s2
+// CHECK: [0x02,0x02,0x35,0xc0]
+
+s_load_dword vcc_hi, s[2:3], s2
+// CHECK: [0x02,0x82,0x35,0xc0]
+
+s_load_dword s5, s[4:5], s2
+// CHECK: [0x02,0x84,0x02,0xc0]
+
+s_load_dword s5, s[102:103], s2
+// CHECK: [0x02,0xe6,0x02,0xc0]
+
+s_load_dword s5, flat_scratch, s2
+// CHECK: [0x02,0xe8,0x02,0xc0]
+
+s_load_dword s5, vcc, s2
+// CHECK: [0x02,0xea,0x02,0xc0]
+
+s_load_dword s5, tba, s2
+// CHECK: [0x02,0xec,0x02,0xc0]
+
+s_load_dword s5, tma, s2
+// CHECK: [0x02,0xee,0x02,0xc0]
+
+s_load_dword s5, ttmp[10:11], s2
+// CHECK: [0x02,0xfa,0x02,0xc0]
+
+s_load_dword s5, s[2:3], s103
+// CHECK: [0x67,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], flat_scratch_lo
+// CHECK: [0x68,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], flat_scratch_hi
+// CHECK: [0x69,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], vcc_lo
+// CHECK: [0x6a,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], vcc_hi
+// CHECK: [0x6b,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], tba_lo
+// CHECK: [0x6c,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], tba_hi
+// CHECK: [0x6d,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], tma_lo
+// CHECK: [0x6e,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], tma_hi
+// CHECK: [0x6f,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], ttmp11
+// CHECK: [0x7b,0x82,0x02,0xc0]
+
+s_load_dword s5, s[2:3], 0xaf123456
+// CHECK: [0xff,0x82,0x02,0xc0,0x56,0x34,0x12,0xaf]
+
+s_load_dword s5, s[2:3], 0x3f717273
+// CHECK: [0xff,0x82,0x02,0xc0,0x73,0x72,0x71,0x3f]
+
+s_load_dword s5, s[2:3], 0x7f
+// CHECK: [0x7f,0x83,0x02,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x46,0xc0]
+
+s_load_dwordx2 s[102:103], s[2:3], s2
+// CHECK: [0x02,0x02,0x73,0xc0]
+
+s_load_dwordx2 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0x75,0xc0]
+
+s_load_dwordx2 s[10:11], s[4:5], s2
+// CHECK: [0x02,0x04,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[102:103], s2
+// CHECK: [0x02,0x66,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], flat_scratch, s2
+// CHECK: [0x02,0x68,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], vcc, s2
+// CHECK: [0x02,0x6a,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], tba, s2
+// CHECK: [0x02,0x6c,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], tma, s2
+// CHECK: [0x02,0x6e,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], ttmp[10:11], s2
+// CHECK: [0x02,0x7a,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], s103
+// CHECK: [0x67,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x68,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x69,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x6a,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x6b,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], tba_lo
+// CHECK: [0x6c,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], tba_hi
+// CHECK: [0x6d,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], tma_lo
+// CHECK: [0x6e,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], tma_hi
+// CHECK: [0x6f,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], ttmp11
+// CHECK: [0x7b,0x02,0x45,0xc0]
+
+s_load_dwordx2 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0xff,0x02,0x45,0xc0,0x56,0x34,0x12,0xaf]
+
+s_load_dwordx2 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0xff,0x02,0x45,0xc0,0x73,0x72,0x71,0x3f]
+
+s_load_dwordx2 s[10:11], s[2:3], 0x7f
+// CHECK: [0x7f,0x03,0x45,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[24:27], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0xc0]
+
+s_load_dwordx4 s[100:103], s[2:3], s2
+// CHECK: [0x02,0x02,0xb2,0xc0]
+
+s_load_dwordx4 s[20:23], s[4:5], s2
+// CHECK: [0x02,0x04,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[102:103], s2
+// CHECK: [0x02,0x66,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], flat_scratch, s2
+// CHECK: [0x02,0x68,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], vcc, s2
+// CHECK: [0x02,0x6a,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], tba, s2
+// CHECK: [0x02,0x6c,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], tma, s2
+// CHECK: [0x02,0x6e,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], ttmp[10:11], s2
+// CHECK: [0x02,0x7a,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], s103
+// CHECK: [0x67,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], flat_scratch_lo
+// CHECK: [0x68,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], flat_scratch_hi
+// CHECK: [0x69,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], vcc_lo
+// CHECK: [0x6a,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], vcc_hi
+// CHECK: [0x6b,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], tba_lo
+// CHECK: [0x6c,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], tba_hi
+// CHECK: [0x6d,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], tma_lo
+// CHECK: [0x6e,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], tma_hi
+// CHECK: [0x6f,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], ttmp11
+// CHECK: [0x7b,0x02,0x8a,0xc0]
+
+s_load_dwordx4 s[20:23], s[2:3], 0xaf123456
+// CHECK: [0xff,0x02,0x8a,0xc0,0x56,0x34,0x12,0xaf]
+
+s_load_dwordx4 s[20:23], s[2:3], 0x3f717273
+// CHECK: [0xff,0x02,0x8a,0xc0,0x73,0x72,0x71,0x3f]
+
+s_load_dwordx4 s[20:23], s[2:3], 0x7f
+// CHECK: [0x7f,0x03,0x8a,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], s2
+// CHECK: [0x02,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[24:31], s[2:3], s2
+// CHECK: [0x02,0x02,0xcc,0xc0]
+
+s_load_dwordx8 s[96:103], s[2:3], s2
+// CHECK: [0x02,0x02,0xf0,0xc0]
+
+s_load_dwordx8 s[20:27], s[4:5], s2
+// CHECK: [0x02,0x04,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[102:103], s2
+// CHECK: [0x02,0x66,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], flat_scratch, s2
+// CHECK: [0x02,0x68,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], vcc, s2
+// CHECK: [0x02,0x6a,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], tba, s2
+// CHECK: [0x02,0x6c,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], tma, s2
+// CHECK: [0x02,0x6e,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], ttmp[10:11], s2
+// CHECK: [0x02,0x7a,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], s103
+// CHECK: [0x67,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], flat_scratch_lo
+// CHECK: [0x68,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], flat_scratch_hi
+// CHECK: [0x69,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], vcc_lo
+// CHECK: [0x6a,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], vcc_hi
+// CHECK: [0x6b,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], tba_lo
+// CHECK: [0x6c,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], tba_hi
+// CHECK: [0x6d,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], tma_lo
+// CHECK: [0x6e,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], tma_hi
+// CHECK: [0x6f,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], ttmp11
+// CHECK: [0x7b,0x02,0xca,0xc0]
+
+s_load_dwordx8 s[20:27], s[2:3], 0xaf123456
+// CHECK: [0xff,0x02,0xca,0xc0,0x56,0x34,0x12,0xaf]
+
+s_load_dwordx8 s[20:27], s[2:3], 0x3f717273
+// CHECK: [0xff,0x02,0xca,0xc0,0x73,0x72,0x71,0x3f]
+
+s_load_dwordx8 s[20:27], s[2:3], 0x7f
+// CHECK: [0x7f,0x03,0xca,0xc0]
+
+s_load_dwordx16 s[20:35], s[2:3], s2
+// CHECK: [0x02,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[24:39], s[2:3], s2
+// CHECK: [0x02,0x02,0x0c,0xc1]
+
+s_load_dwordx16 s[88:103], s[2:3], s2
+// CHECK: [0x02,0x02,0x2c,0xc1]
+
+s_load_dwordx16 s[20:35], s[4:5], s2
+// CHECK: [0x02,0x04,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[102:103], s2
+// CHECK: [0x02,0x66,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], flat_scratch, s2
+// CHECK: [0x02,0x68,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], vcc, s2
+// CHECK: [0x02,0x6a,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], tba, s2
+// CHECK: [0x02,0x6c,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], tma, s2
+// CHECK: [0x02,0x6e,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], ttmp[10:11], s2
+// CHECK: [0x02,0x7a,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], s103
+// CHECK: [0x67,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], flat_scratch_lo
+// CHECK: [0x68,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], flat_scratch_hi
+// CHECK: [0x69,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], vcc_lo
+// CHECK: [0x6a,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], vcc_hi
+// CHECK: [0x6b,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], tba_lo
+// CHECK: [0x6c,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], tba_hi
+// CHECK: [0x6d,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], tma_lo
+// CHECK: [0x6e,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], tma_hi
+// CHECK: [0x6f,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], ttmp11
+// CHECK: [0x7b,0x02,0x0a,0xc1]
+
+s_load_dwordx16 s[20:35], s[2:3], 0xaf123456
+// CHECK: [0xff,0x02,0x0a,0xc1,0x56,0x34,0x12,0xaf]
+
+s_load_dwordx16 s[20:35], s[2:3], 0x3f717273
+// CHECK: [0xff,0x02,0x0a,0xc1,0x73,0x72,0x71,0x3f]
+
+s_load_dwordx16 s[20:35], s[2:3], 0x7f
+// CHECK: [0x7f,0x03,0x0a,0xc1]
+
+s_buffer_load_dword s5, s[4:7], s2
+// CHECK: [0x02,0x84,0x02,0xc2]
+
+s_buffer_load_dword s103, s[4:7], s2
+// CHECK: [0x02,0x84,0x33,0xc2]
+
+s_buffer_load_dword vcc_lo, s[4:7], s2
+// CHECK: [0x02,0x04,0x35,0xc2]
+
+s_buffer_load_dword vcc_hi, s[4:7], s2
+// CHECK: [0x02,0x84,0x35,0xc2]
+
+s_buffer_load_dword s5, s[8:11], s2
+// CHECK: [0x02,0x88,0x02,0xc2]
+
+s_buffer_load_dword s5, s[100:103], s2
+// CHECK: [0x02,0xe4,0x02,0xc2]
+
+s_buffer_load_dword s5, ttmp[8:11], s2
+// CHECK: [0x02,0xf8,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], s103
+// CHECK: [0x67,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], flat_scratch_lo
+// CHECK: [0x68,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], flat_scratch_hi
+// CHECK: [0x69,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], vcc_lo
+// CHECK: [0x6a,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], vcc_hi
+// CHECK: [0x6b,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], tba_lo
+// CHECK: [0x6c,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], tba_hi
+// CHECK: [0x6d,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], tma_lo
+// CHECK: [0x6e,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], tma_hi
+// CHECK: [0x6f,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], ttmp11
+// CHECK: [0x7b,0x84,0x02,0xc2]
+
+s_buffer_load_dword s5, s[4:7], 0xaf123456
+// CHECK: [0xff,0x84,0x02,0xc2,0x56,0x34,0x12,0xaf]
+
+s_buffer_load_dword s5, s[4:7], 0x3f717273
+// CHECK: [0xff,0x84,0x02,0xc2,0x73,0x72,0x71,0x3f]
+
+s_buffer_load_dword s5, s[4:7], 0x7f
+// CHECK: [0x7f,0x85,0x02,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], s2
+// CHECK: [0x02,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[12:13], s[4:7], s2
+// CHECK: [0x02,0x04,0x46,0xc2]
+
+s_buffer_load_dwordx2 s[102:103], s[4:7], s2
+// CHECK: [0x02,0x04,0x73,0xc2]
+
+s_buffer_load_dwordx2 vcc, s[4:7], s2
+// CHECK: [0x02,0x04,0x75,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[8:11], s2
+// CHECK: [0x02,0x08,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[100:103], s2
+// CHECK: [0x02,0x64,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], ttmp[8:11], s2
+// CHECK: [0x02,0x78,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], s103
+// CHECK: [0x67,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], flat_scratch_lo
+// CHECK: [0x68,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], flat_scratch_hi
+// CHECK: [0x69,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], vcc_lo
+// CHECK: [0x6a,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], vcc_hi
+// CHECK: [0x6b,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tba_lo
+// CHECK: [0x6c,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tba_hi
+// CHECK: [0x6d,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tma_lo
+// CHECK: [0x6e,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tma_hi
+// CHECK: [0x6f,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], ttmp11
+// CHECK: [0x7b,0x04,0x45,0xc2]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], 0xaf123456
+// CHECK: [0xff,0x04,0x45,0xc2,0x56,0x34,0x12,0xaf]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], 0x3f717273
+// CHECK: [0xff,0x04,0x45,0xc2,0x73,0x72,0x71,0x3f]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], 0x7f
+// CHECK: [0x7f,0x05,0x45,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], s2
+// CHECK: [0x02,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[24:27], s[4:7], s2
+// CHECK: [0x02,0x04,0x8c,0xc2]
+
+s_buffer_load_dwordx4 s[100:103], s[4:7], s2
+// CHECK: [0x02,0x04,0xb2,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[8:11], s2
+// CHECK: [0x02,0x08,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[100:103], s2
+// CHECK: [0x02,0x64,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], ttmp[8:11], s2
+// CHECK: [0x02,0x78,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], s103
+// CHECK: [0x67,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], flat_scratch_lo
+// CHECK: [0x68,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], flat_scratch_hi
+// CHECK: [0x69,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], vcc_lo
+// CHECK: [0x6a,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], vcc_hi
+// CHECK: [0x6b,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tba_lo
+// CHECK: [0x6c,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tba_hi
+// CHECK: [0x6d,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tma_lo
+// CHECK: [0x6e,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tma_hi
+// CHECK: [0x6f,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], ttmp11
+// CHECK: [0x7b,0x04,0x8a,0xc2]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], 0xaf123456
+// CHECK: [0xff,0x04,0x8a,0xc2,0x56,0x34,0x12,0xaf]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], 0x3f717273
+// CHECK: [0xff,0x04,0x8a,0xc2,0x73,0x72,0x71,0x3f]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], 0x7f
+// CHECK: [0x7f,0x05,0x8a,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], s2
+// CHECK: [0x02,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[24:31], s[4:7], s2
+// CHECK: [0x02,0x04,0xcc,0xc2]
+
+s_buffer_load_dwordx8 s[96:103], s[4:7], s2
+// CHECK: [0x02,0x04,0xf0,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[8:11], s2
+// CHECK: [0x02,0x08,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[100:103], s2
+// CHECK: [0x02,0x64,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], ttmp[8:11], s2
+// CHECK: [0x02,0x78,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], s103
+// CHECK: [0x67,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], flat_scratch_lo
+// CHECK: [0x68,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], flat_scratch_hi
+// CHECK: [0x69,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], vcc_lo
+// CHECK: [0x6a,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], vcc_hi
+// CHECK: [0x6b,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tba_lo
+// CHECK: [0x6c,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tba_hi
+// CHECK: [0x6d,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tma_lo
+// CHECK: [0x6e,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tma_hi
+// CHECK: [0x6f,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], ttmp11
+// CHECK: [0x7b,0x04,0xca,0xc2]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], 0xaf123456
+// CHECK: [0xff,0x04,0xca,0xc2,0x56,0x34,0x12,0xaf]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], 0x3f717273
+// CHECK: [0xff,0x04,0xca,0xc2,0x73,0x72,0x71,0x3f]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], 0x7f
+// CHECK: [0x7f,0x05,0xca,0xc2]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], s2
+// CHECK: [0x02,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[24:39], s[4:7], s2
+// CHECK: [0x02,0x04,0x0c,0xc3]
+
+s_buffer_load_dwordx16 s[88:103], s[4:7], s2
+// CHECK: [0x02,0x04,0x2c,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[8:11], s2
+// CHECK: [0x02,0x08,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[100:103], s2
+// CHECK: [0x02,0x64,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], ttmp[8:11], s2
+// CHECK: [0x02,0x78,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], s103
+// CHECK: [0x67,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], flat_scratch_lo
+// CHECK: [0x68,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], flat_scratch_hi
+// CHECK: [0x69,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], vcc_lo
+// CHECK: [0x6a,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], vcc_hi
+// CHECK: [0x6b,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tba_lo
+// CHECK: [0x6c,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tba_hi
+// CHECK: [0x6d,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tma_lo
+// CHECK: [0x6e,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tma_hi
+// CHECK: [0x6f,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], ttmp11
+// CHECK: [0x7b,0x04,0x0a,0xc3]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], 0xaf123456
+// CHECK: [0xff,0x04,0x0a,0xc3,0x56,0x34,0x12,0xaf]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], 0x3f717273
+// CHECK: [0xff,0x04,0x0a,0xc3,0x73,0x72,0x71,0x3f]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], 0x7f
+// CHECK: [0x7f,0x05,0x0a,0xc3]
+
+s_dcache_inv_vol
+// CHECK: [0x00,0x00,0x40,0xc7]
+
+s_memtime s[10:11]
+// CHECK: [0x00,0x00,0x85,0xc7]
+
+s_memtime s[12:13]
+// CHECK: [0x00,0x00,0x86,0xc7]
+
+s_memtime s[102:103]
+// CHECK: [0x00,0x00,0xb3,0xc7]
+
+s_memtime vcc
+// CHECK: [0x00,0x00,0xb5,0xc7]
+
+s_dcache_inv
+// CHECK: [0x00,0x00,0xc0,0xc7]
+
+s_mov_b32 s5, s1
+// CHECK: [0x01,0x03,0x85,0xbe]
+
+s_mov_b32 s103, s1
+// CHECK: [0x01,0x03,0xe7,0xbe]
+
+s_mov_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x03,0xe8,0xbe]
+
+s_mov_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x03,0xe9,0xbe]
+
+s_mov_b32 vcc_lo, s1
+// CHECK: [0x01,0x03,0xea,0xbe]
+
+s_mov_b32 vcc_hi, s1
+// CHECK: [0x01,0x03,0xeb,0xbe]
+
+s_mov_b32 tba_lo, s1
+// CHECK: [0x01,0x03,0xec,0xbe]
+
+s_mov_b32 tba_hi, s1
+// CHECK: [0x01,0x03,0xed,0xbe]
+
+s_mov_b32 tma_lo, s1
+// CHECK: [0x01,0x03,0xee,0xbe]
+
+s_mov_b32 tma_hi, s1
+// CHECK: [0x01,0x03,0xef,0xbe]
+
+s_mov_b32 ttmp11, s1
+// CHECK: [0x01,0x03,0xfb,0xbe]
+
+s_mov_b32 m0, s1
+// CHECK: [0x01,0x03,0xfc,0xbe]
+
+s_mov_b32 exec_lo, s1
+// CHECK: [0x01,0x03,0xfe,0xbe]
+
+s_mov_b32 exec_hi, s1
+// CHECK: [0x01,0x03,0xff,0xbe]
+
+s_mov_b32 s5, s103
+// CHECK: [0x67,0x03,0x85,0xbe]
+
+s_mov_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x03,0x85,0xbe]
+
+s_mov_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x03,0x85,0xbe]
+
+s_mov_b32 s5, vcc_lo
+// CHECK: [0x6a,0x03,0x85,0xbe]
+
+s_mov_b32 s5, vcc_hi
+// CHECK: [0x6b,0x03,0x85,0xbe]
+
+s_mov_b32 s5, tba_lo
+// CHECK: [0x6c,0x03,0x85,0xbe]
+
+s_mov_b32 s5, tba_hi
+// CHECK: [0x6d,0x03,0x85,0xbe]
+
+s_mov_b32 s5, tma_lo
+// CHECK: [0x6e,0x03,0x85,0xbe]
+
+s_mov_b32 s5, tma_hi
+// CHECK: [0x6f,0x03,0x85,0xbe]
+
+s_mov_b32 s5, ttmp11
+// CHECK: [0x7b,0x03,0x85,0xbe]
+
+s_mov_b32 s5, m0
+// CHECK: [0x7c,0x03,0x85,0xbe]
+
+s_mov_b32 s5, exec_lo
+// CHECK: [0x7e,0x03,0x85,0xbe]
+
+s_mov_b32 s5, exec_hi
+// CHECK: [0x7f,0x03,0x85,0xbe]
+
+s_mov_b32 s5, 0
+// CHECK: [0x80,0x03,0x85,0xbe]
+
+s_mov_b32 s5, -1
+// CHECK: [0xc1,0x03,0x85,0xbe]
+
+s_mov_b32 s5, 0.5
+// CHECK: [0xf0,0x03,0x85,0xbe]
+
+s_mov_b32 s5, -4.0
+// CHECK: [0xf7,0x03,0x85,0xbe]
+
+s_mov_b32 s5, 0xaf123456
+// CHECK: [0xff,0x03,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_mov_b32 s5, 0x3f717273
+// CHECK: [0xff,0x03,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_mov_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x04,0x8a,0xbe]
+
+s_mov_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x04,0x8c,0xbe]
+
+s_mov_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x04,0xe6,0xbe]
+
+s_mov_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x04,0xe8,0xbe]
+
+s_mov_b64 vcc, s[2:3]
+// CHECK: [0x02,0x04,0xea,0xbe]
+
+s_mov_b64 tba, s[2:3]
+// CHECK: [0x02,0x04,0xec,0xbe]
+
+s_mov_b64 tma, s[2:3]
+// CHECK: [0x02,0x04,0xee,0xbe]
+
+s_mov_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x04,0xfa,0xbe]
+
+s_mov_b64 exec, s[2:3]
+// CHECK: [0x02,0x04,0xfe,0xbe]
+
+s_mov_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], vcc
+// CHECK: [0x6a,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], tba
+// CHECK: [0x6c,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], tma
+// CHECK: [0x6e,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], exec
+// CHECK: [0x7e,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], 0
+// CHECK: [0x80,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], -1
+// CHECK: [0xc1,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x04,0x8a,0xbe]
+
+s_mov_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x04,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_mov_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x04,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_cmov_b32 s5, s1
+// CHECK: [0x01,0x05,0x85,0xbe]
+
+s_cmov_b32 s103, s1
+// CHECK: [0x01,0x05,0xe7,0xbe]
+
+s_cmov_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x05,0xe8,0xbe]
+
+s_cmov_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x05,0xe9,0xbe]
+
+s_cmov_b32 vcc_lo, s1
+// CHECK: [0x01,0x05,0xea,0xbe]
+
+s_cmov_b32 vcc_hi, s1
+// CHECK: [0x01,0x05,0xeb,0xbe]
+
+s_cmov_b32 tba_lo, s1
+// CHECK: [0x01,0x05,0xec,0xbe]
+
+s_cmov_b32 tba_hi, s1
+// CHECK: [0x01,0x05,0xed,0xbe]
+
+s_cmov_b32 tma_lo, s1
+// CHECK: [0x01,0x05,0xee,0xbe]
+
+s_cmov_b32 tma_hi, s1
+// CHECK: [0x01,0x05,0xef,0xbe]
+
+s_cmov_b32 ttmp11, s1
+// CHECK: [0x01,0x05,0xfb,0xbe]
+
+s_cmov_b32 m0, s1
+// CHECK: [0x01,0x05,0xfc,0xbe]
+
+s_cmov_b32 exec_lo, s1
+// CHECK: [0x01,0x05,0xfe,0xbe]
+
+s_cmov_b32 exec_hi, s1
+// CHECK: [0x01,0x05,0xff,0xbe]
+
+s_cmov_b32 s5, s103
+// CHECK: [0x67,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, vcc_lo
+// CHECK: [0x6a,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, vcc_hi
+// CHECK: [0x6b,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, tba_lo
+// CHECK: [0x6c,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, tba_hi
+// CHECK: [0x6d,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, tma_lo
+// CHECK: [0x6e,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, tma_hi
+// CHECK: [0x6f,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, ttmp11
+// CHECK: [0x7b,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, m0
+// CHECK: [0x7c,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, exec_lo
+// CHECK: [0x7e,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, exec_hi
+// CHECK: [0x7f,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, 0
+// CHECK: [0x80,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, -1
+// CHECK: [0xc1,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, 0.5
+// CHECK: [0xf0,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, -4.0
+// CHECK: [0xf7,0x05,0x85,0xbe]
+
+s_cmov_b32 s5, 0xaf123456
+// CHECK: [0xff,0x05,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_cmov_b32 s5, 0x3f717273
+// CHECK: [0xff,0x05,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_cmov_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x06,0x8c,0xbe]
+
+s_cmov_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x06,0xe6,0xbe]
+
+s_cmov_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x06,0xe8,0xbe]
+
+s_cmov_b64 vcc, s[2:3]
+// CHECK: [0x02,0x06,0xea,0xbe]
+
+s_cmov_b64 tba, s[2:3]
+// CHECK: [0x02,0x06,0xec,0xbe]
+
+s_cmov_b64 tma, s[2:3]
+// CHECK: [0x02,0x06,0xee,0xbe]
+
+s_cmov_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x06,0xfa,0xbe]
+
+s_cmov_b64 exec, s[2:3]
+// CHECK: [0x02,0x06,0xfe,0xbe]
+
+s_cmov_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], vcc
+// CHECK: [0x6a,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], tba
+// CHECK: [0x6c,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], tma
+// CHECK: [0x6e,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], exec
+// CHECK: [0x7e,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], 0
+// CHECK: [0x80,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], -1
+// CHECK: [0xc1,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x06,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x06,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_cmov_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x06,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_not_b32 s5, s1
+// CHECK: [0x01,0x07,0x85,0xbe]
+
+s_not_b32 s103, s1
+// CHECK: [0x01,0x07,0xe7,0xbe]
+
+s_not_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x07,0xe8,0xbe]
+
+s_not_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x07,0xe9,0xbe]
+
+s_not_b32 vcc_lo, s1
+// CHECK: [0x01,0x07,0xea,0xbe]
+
+s_not_b32 vcc_hi, s1
+// CHECK: [0x01,0x07,0xeb,0xbe]
+
+s_not_b32 tba_lo, s1
+// CHECK: [0x01,0x07,0xec,0xbe]
+
+s_not_b32 tba_hi, s1
+// CHECK: [0x01,0x07,0xed,0xbe]
+
+s_not_b32 tma_lo, s1
+// CHECK: [0x01,0x07,0xee,0xbe]
+
+s_not_b32 tma_hi, s1
+// CHECK: [0x01,0x07,0xef,0xbe]
+
+s_not_b32 ttmp11, s1
+// CHECK: [0x01,0x07,0xfb,0xbe]
+
+s_not_b32 m0, s1
+// CHECK: [0x01,0x07,0xfc,0xbe]
+
+s_not_b32 exec_lo, s1
+// CHECK: [0x01,0x07,0xfe,0xbe]
+
+s_not_b32 exec_hi, s1
+// CHECK: [0x01,0x07,0xff,0xbe]
+
+s_not_b32 s5, s103
+// CHECK: [0x67,0x07,0x85,0xbe]
+
+s_not_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x07,0x85,0xbe]
+
+s_not_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x07,0x85,0xbe]
+
+s_not_b32 s5, vcc_lo
+// CHECK: [0x6a,0x07,0x85,0xbe]
+
+s_not_b32 s5, vcc_hi
+// CHECK: [0x6b,0x07,0x85,0xbe]
+
+s_not_b32 s5, tba_lo
+// CHECK: [0x6c,0x07,0x85,0xbe]
+
+s_not_b32 s5, tba_hi
+// CHECK: [0x6d,0x07,0x85,0xbe]
+
+s_not_b32 s5, tma_lo
+// CHECK: [0x6e,0x07,0x85,0xbe]
+
+s_not_b32 s5, tma_hi
+// CHECK: [0x6f,0x07,0x85,0xbe]
+
+s_not_b32 s5, ttmp11
+// CHECK: [0x7b,0x07,0x85,0xbe]
+
+s_not_b32 s5, m0
+// CHECK: [0x7c,0x07,0x85,0xbe]
+
+s_not_b32 s5, exec_lo
+// CHECK: [0x7e,0x07,0x85,0xbe]
+
+s_not_b32 s5, exec_hi
+// CHECK: [0x7f,0x07,0x85,0xbe]
+
+s_not_b32 s5, 0
+// CHECK: [0x80,0x07,0x85,0xbe]
+
+s_not_b32 s5, -1
+// CHECK: [0xc1,0x07,0x85,0xbe]
+
+s_not_b32 s5, 0.5
+// CHECK: [0xf0,0x07,0x85,0xbe]
+
+s_not_b32 s5, -4.0
+// CHECK: [0xf7,0x07,0x85,0xbe]
+
+s_not_b32 s5, 0xaf123456
+// CHECK: [0xff,0x07,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_not_b32 s5, 0x3f717273
+// CHECK: [0xff,0x07,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_not_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x08,0x8a,0xbe]
+
+s_not_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x08,0x8c,0xbe]
+
+s_not_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x08,0xe6,0xbe]
+
+s_not_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x08,0xe8,0xbe]
+
+s_not_b64 vcc, s[2:3]
+// CHECK: [0x02,0x08,0xea,0xbe]
+
+s_not_b64 tba, s[2:3]
+// CHECK: [0x02,0x08,0xec,0xbe]
+
+s_not_b64 tma, s[2:3]
+// CHECK: [0x02,0x08,0xee,0xbe]
+
+s_not_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x08,0xfa,0xbe]
+
+s_not_b64 exec, s[2:3]
+// CHECK: [0x02,0x08,0xfe,0xbe]
+
+s_not_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], vcc
+// CHECK: [0x6a,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], tba
+// CHECK: [0x6c,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], tma
+// CHECK: [0x6e,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], exec
+// CHECK: [0x7e,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], 0
+// CHECK: [0x80,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], -1
+// CHECK: [0xc1,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x08,0x8a,0xbe]
+
+s_not_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x08,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_not_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x08,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_wqm_b32 s5, s1
+// CHECK: [0x01,0x09,0x85,0xbe]
+
+s_wqm_b32 s103, s1
+// CHECK: [0x01,0x09,0xe7,0xbe]
+
+s_wqm_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x09,0xe8,0xbe]
+
+s_wqm_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x09,0xe9,0xbe]
+
+s_wqm_b32 vcc_lo, s1
+// CHECK: [0x01,0x09,0xea,0xbe]
+
+s_wqm_b32 vcc_hi, s1
+// CHECK: [0x01,0x09,0xeb,0xbe]
+
+s_wqm_b32 tba_lo, s1
+// CHECK: [0x01,0x09,0xec,0xbe]
+
+s_wqm_b32 tba_hi, s1
+// CHECK: [0x01,0x09,0xed,0xbe]
+
+s_wqm_b32 tma_lo, s1
+// CHECK: [0x01,0x09,0xee,0xbe]
+
+s_wqm_b32 tma_hi, s1
+// CHECK: [0x01,0x09,0xef,0xbe]
+
+s_wqm_b32 ttmp11, s1
+// CHECK: [0x01,0x09,0xfb,0xbe]
+
+s_wqm_b32 m0, s1
+// CHECK: [0x01,0x09,0xfc,0xbe]
+
+s_wqm_b32 exec_lo, s1
+// CHECK: [0x01,0x09,0xfe,0xbe]
+
+s_wqm_b32 exec_hi, s1
+// CHECK: [0x01,0x09,0xff,0xbe]
+
+s_wqm_b32 s5, s103
+// CHECK: [0x67,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, vcc_lo
+// CHECK: [0x6a,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, vcc_hi
+// CHECK: [0x6b,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, tba_lo
+// CHECK: [0x6c,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, tba_hi
+// CHECK: [0x6d,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, tma_lo
+// CHECK: [0x6e,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, tma_hi
+// CHECK: [0x6f,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, ttmp11
+// CHECK: [0x7b,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, m0
+// CHECK: [0x7c,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, exec_lo
+// CHECK: [0x7e,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, exec_hi
+// CHECK: [0x7f,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, 0
+// CHECK: [0x80,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, -1
+// CHECK: [0xc1,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, 0.5
+// CHECK: [0xf0,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, -4.0
+// CHECK: [0xf7,0x09,0x85,0xbe]
+
+s_wqm_b32 s5, 0xaf123456
+// CHECK: [0xff,0x09,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_wqm_b32 s5, 0x3f717273
+// CHECK: [0xff,0x09,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_wqm_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x0a,0x8c,0xbe]
+
+s_wqm_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x0a,0xe6,0xbe]
+
+s_wqm_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x0a,0xe8,0xbe]
+
+s_wqm_b64 vcc, s[2:3]
+// CHECK: [0x02,0x0a,0xea,0xbe]
+
+s_wqm_b64 tba, s[2:3]
+// CHECK: [0x02,0x0a,0xec,0xbe]
+
+s_wqm_b64 tma, s[2:3]
+// CHECK: [0x02,0x0a,0xee,0xbe]
+
+s_wqm_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x0a,0xfa,0xbe]
+
+s_wqm_b64 exec, s[2:3]
+// CHECK: [0x02,0x0a,0xfe,0xbe]
+
+s_wqm_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], vcc
+// CHECK: [0x6a,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], tba
+// CHECK: [0x6c,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], tma
+// CHECK: [0x6e,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], exec
+// CHECK: [0x7e,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], 0
+// CHECK: [0x80,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], -1
+// CHECK: [0xc1,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x0a,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x0a,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_wqm_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x0a,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_brev_b32 s5, s1
+// CHECK: [0x01,0x0b,0x85,0xbe]
+
+s_brev_b32 s103, s1
+// CHECK: [0x01,0x0b,0xe7,0xbe]
+
+s_brev_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x0b,0xe8,0xbe]
+
+s_brev_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x0b,0xe9,0xbe]
+
+s_brev_b32 vcc_lo, s1
+// CHECK: [0x01,0x0b,0xea,0xbe]
+
+s_brev_b32 vcc_hi, s1
+// CHECK: [0x01,0x0b,0xeb,0xbe]
+
+s_brev_b32 tba_lo, s1
+// CHECK: [0x01,0x0b,0xec,0xbe]
+
+s_brev_b32 tba_hi, s1
+// CHECK: [0x01,0x0b,0xed,0xbe]
+
+s_brev_b32 tma_lo, s1
+// CHECK: [0x01,0x0b,0xee,0xbe]
+
+s_brev_b32 tma_hi, s1
+// CHECK: [0x01,0x0b,0xef,0xbe]
+
+s_brev_b32 ttmp11, s1
+// CHECK: [0x01,0x0b,0xfb,0xbe]
+
+s_brev_b32 m0, s1
+// CHECK: [0x01,0x0b,0xfc,0xbe]
+
+s_brev_b32 exec_lo, s1
+// CHECK: [0x01,0x0b,0xfe,0xbe]
+
+s_brev_b32 exec_hi, s1
+// CHECK: [0x01,0x0b,0xff,0xbe]
+
+s_brev_b32 s5, s103
+// CHECK: [0x67,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, vcc_lo
+// CHECK: [0x6a,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, vcc_hi
+// CHECK: [0x6b,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, tba_lo
+// CHECK: [0x6c,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, tba_hi
+// CHECK: [0x6d,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, tma_lo
+// CHECK: [0x6e,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, tma_hi
+// CHECK: [0x6f,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, ttmp11
+// CHECK: [0x7b,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, m0
+// CHECK: [0x7c,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, exec_lo
+// CHECK: [0x7e,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, exec_hi
+// CHECK: [0x7f,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, 0
+// CHECK: [0x80,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, -1
+// CHECK: [0xc1,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, 0.5
+// CHECK: [0xf0,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, -4.0
+// CHECK: [0xf7,0x0b,0x85,0xbe]
+
+s_brev_b32 s5, 0xaf123456
+// CHECK: [0xff,0x0b,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_brev_b32 s5, 0x3f717273
+// CHECK: [0xff,0x0b,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_brev_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x0c,0x8c,0xbe]
+
+s_brev_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x0c,0xe6,0xbe]
+
+s_brev_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x0c,0xe8,0xbe]
+
+s_brev_b64 vcc, s[2:3]
+// CHECK: [0x02,0x0c,0xea,0xbe]
+
+s_brev_b64 tba, s[2:3]
+// CHECK: [0x02,0x0c,0xec,0xbe]
+
+s_brev_b64 tma, s[2:3]
+// CHECK: [0x02,0x0c,0xee,0xbe]
+
+s_brev_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x0c,0xfa,0xbe]
+
+s_brev_b64 exec, s[2:3]
+// CHECK: [0x02,0x0c,0xfe,0xbe]
+
+s_brev_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], vcc
+// CHECK: [0x6a,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], tba
+// CHECK: [0x6c,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], tma
+// CHECK: [0x6e,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], exec
+// CHECK: [0x7e,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], 0
+// CHECK: [0x80,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], -1
+// CHECK: [0xc1,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x0c,0x8a,0xbe]
+
+s_brev_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x0c,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_brev_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x0c,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt0_i32_b32 s5, s1
+// CHECK: [0x01,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s103, s1
+// CHECK: [0x01,0x0d,0xe7,0xbe]
+
+s_bcnt0_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x0d,0xe8,0xbe]
+
+s_bcnt0_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x0d,0xe9,0xbe]
+
+s_bcnt0_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x0d,0xea,0xbe]
+
+s_bcnt0_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x0d,0xeb,0xbe]
+
+s_bcnt0_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x0d,0xec,0xbe]
+
+s_bcnt0_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x0d,0xed,0xbe]
+
+s_bcnt0_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x0d,0xee,0xbe]
+
+s_bcnt0_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x0d,0xef,0xbe]
+
+s_bcnt0_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x0d,0xfb,0xbe]
+
+s_bcnt0_i32_b32 m0, s1
+// CHECK: [0x01,0x0d,0xfc,0xbe]
+
+s_bcnt0_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x0d,0xfe,0xbe]
+
+s_bcnt0_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x0d,0xff,0xbe]
+
+s_bcnt0_i32_b32 s5, s103
+// CHECK: [0x67,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, m0
+// CHECK: [0x7c,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, 0
+// CHECK: [0x80,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, -1
+// CHECK: [0xc1,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x0d,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x0d,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt0_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x0d,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt0_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s103, s[2:3]
+// CHECK: [0x02,0x0e,0xe7,0xbe]
+
+s_bcnt0_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x0e,0xe8,0xbe]
+
+s_bcnt0_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x0e,0xe9,0xbe]
+
+s_bcnt0_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x0e,0xea,0xbe]
+
+s_bcnt0_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x0e,0xeb,0xbe]
+
+s_bcnt0_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x0e,0xec,0xbe]
+
+s_bcnt0_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x0e,0xed,0xbe]
+
+s_bcnt0_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x0e,0xee,0xbe]
+
+s_bcnt0_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x0e,0xef,0xbe]
+
+s_bcnt0_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x0e,0xfb,0xbe]
+
+s_bcnt0_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x0e,0xfc,0xbe]
+
+s_bcnt0_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x0e,0xfe,0xbe]
+
+s_bcnt0_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x0e,0xff,0xbe]
+
+s_bcnt0_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, s[102:103]
+// CHECK: [0x66,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, flat_scratch
+// CHECK: [0x68,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, vcc
+// CHECK: [0x6a,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, tba
+// CHECK: [0x6c,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, tma
+// CHECK: [0x6e,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, exec
+// CHECK: [0x7e,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, 0
+// CHECK: [0x80,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, -1
+// CHECK: [0xc1,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x0e,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x0e,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt0_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x0e,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt1_i32_b32 s5, s1
+// CHECK: [0x01,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s103, s1
+// CHECK: [0x01,0x0f,0xe7,0xbe]
+
+s_bcnt1_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x0f,0xe8,0xbe]
+
+s_bcnt1_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x0f,0xe9,0xbe]
+
+s_bcnt1_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x0f,0xea,0xbe]
+
+s_bcnt1_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x0f,0xeb,0xbe]
+
+s_bcnt1_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x0f,0xec,0xbe]
+
+s_bcnt1_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x0f,0xed,0xbe]
+
+s_bcnt1_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x0f,0xee,0xbe]
+
+s_bcnt1_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x0f,0xef,0xbe]
+
+s_bcnt1_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x0f,0xfb,0xbe]
+
+s_bcnt1_i32_b32 m0, s1
+// CHECK: [0x01,0x0f,0xfc,0xbe]
+
+s_bcnt1_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x0f,0xfe,0xbe]
+
+s_bcnt1_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x0f,0xff,0xbe]
+
+s_bcnt1_i32_b32 s5, s103
+// CHECK: [0x67,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, m0
+// CHECK: [0x7c,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, 0
+// CHECK: [0x80,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, -1
+// CHECK: [0xc1,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x0f,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x0f,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt1_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x0f,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt1_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s103, s[2:3]
+// CHECK: [0x02,0x10,0xe7,0xbe]
+
+s_bcnt1_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x10,0xe8,0xbe]
+
+s_bcnt1_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x10,0xe9,0xbe]
+
+s_bcnt1_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x10,0xea,0xbe]
+
+s_bcnt1_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x10,0xeb,0xbe]
+
+s_bcnt1_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x10,0xec,0xbe]
+
+s_bcnt1_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x10,0xed,0xbe]
+
+s_bcnt1_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x10,0xee,0xbe]
+
+s_bcnt1_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x10,0xef,0xbe]
+
+s_bcnt1_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x10,0xfb,0xbe]
+
+s_bcnt1_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x10,0xfc,0xbe]
+
+s_bcnt1_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x10,0xfe,0xbe]
+
+s_bcnt1_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x10,0xff,0xbe]
+
+s_bcnt1_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, s[102:103]
+// CHECK: [0x66,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, flat_scratch
+// CHECK: [0x68,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, vcc
+// CHECK: [0x6a,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, tba
+// CHECK: [0x6c,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, tma
+// CHECK: [0x6e,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, exec
+// CHECK: [0x7e,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, 0
+// CHECK: [0x80,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, -1
+// CHECK: [0xc1,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x10,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x10,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt1_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x10,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff0_i32_b32 s5, s1
+// CHECK: [0x01,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s103, s1
+// CHECK: [0x01,0x11,0xe7,0xbe]
+
+s_ff0_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x11,0xe8,0xbe]
+
+s_ff0_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x11,0xe9,0xbe]
+
+s_ff0_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x11,0xea,0xbe]
+
+s_ff0_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x11,0xeb,0xbe]
+
+s_ff0_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x11,0xec,0xbe]
+
+s_ff0_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x11,0xed,0xbe]
+
+s_ff0_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x11,0xee,0xbe]
+
+s_ff0_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x11,0xef,0xbe]
+
+s_ff0_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x11,0xfb,0xbe]
+
+s_ff0_i32_b32 m0, s1
+// CHECK: [0x01,0x11,0xfc,0xbe]
+
+s_ff0_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x11,0xfe,0xbe]
+
+s_ff0_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x11,0xff,0xbe]
+
+s_ff0_i32_b32 s5, s103
+// CHECK: [0x67,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, m0
+// CHECK: [0x7c,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, 0
+// CHECK: [0x80,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, -1
+// CHECK: [0xc1,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x11,0x85,0xbe]
+
+s_ff0_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x11,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff0_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x11,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff0_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s103, s[2:3]
+// CHECK: [0x02,0x12,0xe7,0xbe]
+
+s_ff0_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x12,0xe8,0xbe]
+
+s_ff0_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x12,0xe9,0xbe]
+
+s_ff0_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x12,0xea,0xbe]
+
+s_ff0_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x12,0xeb,0xbe]
+
+s_ff0_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x12,0xec,0xbe]
+
+s_ff0_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x12,0xed,0xbe]
+
+s_ff0_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x12,0xee,0xbe]
+
+s_ff0_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x12,0xef,0xbe]
+
+s_ff0_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x12,0xfb,0xbe]
+
+s_ff0_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x12,0xfc,0xbe]
+
+s_ff0_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x12,0xfe,0xbe]
+
+s_ff0_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x12,0xff,0xbe]
+
+s_ff0_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, s[102:103]
+// CHECK: [0x66,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, flat_scratch
+// CHECK: [0x68,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, vcc
+// CHECK: [0x6a,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, tba
+// CHECK: [0x6c,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, tma
+// CHECK: [0x6e,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, exec
+// CHECK: [0x7e,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, 0
+// CHECK: [0x80,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, -1
+// CHECK: [0xc1,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x12,0x85,0xbe]
+
+s_ff0_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x12,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff0_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x12,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff1_i32_b32 s5, s1
+// CHECK: [0x01,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s103, s1
+// CHECK: [0x01,0x13,0xe7,0xbe]
+
+s_ff1_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x13,0xe8,0xbe]
+
+s_ff1_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x13,0xe9,0xbe]
+
+s_ff1_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x13,0xea,0xbe]
+
+s_ff1_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x13,0xeb,0xbe]
+
+s_ff1_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x13,0xec,0xbe]
+
+s_ff1_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x13,0xed,0xbe]
+
+s_ff1_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x13,0xee,0xbe]
+
+s_ff1_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x13,0xef,0xbe]
+
+s_ff1_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x13,0xfb,0xbe]
+
+s_ff1_i32_b32 m0, s1
+// CHECK: [0x01,0x13,0xfc,0xbe]
+
+s_ff1_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x13,0xfe,0xbe]
+
+s_ff1_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x13,0xff,0xbe]
+
+s_ff1_i32_b32 s5, s103
+// CHECK: [0x67,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, m0
+// CHECK: [0x7c,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, 0
+// CHECK: [0x80,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, -1
+// CHECK: [0xc1,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x13,0x85,0xbe]
+
+s_ff1_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x13,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff1_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x13,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff1_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s103, s[2:3]
+// CHECK: [0x02,0x14,0xe7,0xbe]
+
+s_ff1_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x14,0xe8,0xbe]
+
+s_ff1_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x14,0xe9,0xbe]
+
+s_ff1_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x14,0xea,0xbe]
+
+s_ff1_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x14,0xeb,0xbe]
+
+s_ff1_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x14,0xec,0xbe]
+
+s_ff1_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x14,0xed,0xbe]
+
+s_ff1_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x14,0xee,0xbe]
+
+s_ff1_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x14,0xef,0xbe]
+
+s_ff1_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x14,0xfb,0xbe]
+
+s_ff1_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x14,0xfc,0xbe]
+
+s_ff1_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x14,0xfe,0xbe]
+
+s_ff1_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x14,0xff,0xbe]
+
+s_ff1_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, s[102:103]
+// CHECK: [0x66,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, flat_scratch
+// CHECK: [0x68,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, vcc
+// CHECK: [0x6a,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, tba
+// CHECK: [0x6c,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, tma
+// CHECK: [0x6e,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, exec
+// CHECK: [0x7e,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, 0
+// CHECK: [0x80,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, -1
+// CHECK: [0xc1,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x14,0x85,0xbe]
+
+s_ff1_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x14,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff1_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x14,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32_b32 s5, s1
+// CHECK: [0x01,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s103, s1
+// CHECK: [0x01,0x15,0xe7,0xbe]
+
+s_flbit_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x15,0xe8,0xbe]
+
+s_flbit_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x15,0xe9,0xbe]
+
+s_flbit_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x15,0xea,0xbe]
+
+s_flbit_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x15,0xeb,0xbe]
+
+s_flbit_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x15,0xec,0xbe]
+
+s_flbit_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x15,0xed,0xbe]
+
+s_flbit_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x15,0xee,0xbe]
+
+s_flbit_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x15,0xef,0xbe]
+
+s_flbit_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x15,0xfb,0xbe]
+
+s_flbit_i32_b32 m0, s1
+// CHECK: [0x01,0x15,0xfc,0xbe]
+
+s_flbit_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x15,0xfe,0xbe]
+
+s_flbit_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x15,0xff,0xbe]
+
+s_flbit_i32_b32 s5, s103
+// CHECK: [0x67,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, m0
+// CHECK: [0x7c,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, 0
+// CHECK: [0x80,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, -1
+// CHECK: [0xc1,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x15,0x85,0xbe]
+
+s_flbit_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x15,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x15,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s103, s[2:3]
+// CHECK: [0x02,0x16,0xe7,0xbe]
+
+s_flbit_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x16,0xe8,0xbe]
+
+s_flbit_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x16,0xe9,0xbe]
+
+s_flbit_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x16,0xea,0xbe]
+
+s_flbit_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x16,0xeb,0xbe]
+
+s_flbit_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x16,0xec,0xbe]
+
+s_flbit_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x16,0xed,0xbe]
+
+s_flbit_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x16,0xee,0xbe]
+
+s_flbit_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x16,0xef,0xbe]
+
+s_flbit_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x16,0xfb,0xbe]
+
+s_flbit_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x16,0xfc,0xbe]
+
+s_flbit_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x16,0xfe,0xbe]
+
+s_flbit_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x16,0xff,0xbe]
+
+s_flbit_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, s[102:103]
+// CHECK: [0x66,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, flat_scratch
+// CHECK: [0x68,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, vcc
+// CHECK: [0x6a,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, tba
+// CHECK: [0x6c,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, tma
+// CHECK: [0x6e,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, exec
+// CHECK: [0x7e,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, 0
+// CHECK: [0x80,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, -1
+// CHECK: [0xc1,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x16,0x85,0xbe]
+
+s_flbit_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x16,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x16,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32 s5, s1
+// CHECK: [0x01,0x17,0x85,0xbe]
+
+s_flbit_i32 s103, s1
+// CHECK: [0x01,0x17,0xe7,0xbe]
+
+s_flbit_i32 flat_scratch_lo, s1
+// CHECK: [0x01,0x17,0xe8,0xbe]
+
+s_flbit_i32 flat_scratch_hi, s1
+// CHECK: [0x01,0x17,0xe9,0xbe]
+
+s_flbit_i32 vcc_lo, s1
+// CHECK: [0x01,0x17,0xea,0xbe]
+
+s_flbit_i32 vcc_hi, s1
+// CHECK: [0x01,0x17,0xeb,0xbe]
+
+s_flbit_i32 tba_lo, s1
+// CHECK: [0x01,0x17,0xec,0xbe]
+
+s_flbit_i32 tba_hi, s1
+// CHECK: [0x01,0x17,0xed,0xbe]
+
+s_flbit_i32 tma_lo, s1
+// CHECK: [0x01,0x17,0xee,0xbe]
+
+s_flbit_i32 tma_hi, s1
+// CHECK: [0x01,0x17,0xef,0xbe]
+
+s_flbit_i32 ttmp11, s1
+// CHECK: [0x01,0x17,0xfb,0xbe]
+
+s_flbit_i32 m0, s1
+// CHECK: [0x01,0x17,0xfc,0xbe]
+
+s_flbit_i32 exec_lo, s1
+// CHECK: [0x01,0x17,0xfe,0xbe]
+
+s_flbit_i32 exec_hi, s1
+// CHECK: [0x01,0x17,0xff,0xbe]
+
+s_flbit_i32 s5, s103
+// CHECK: [0x67,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, flat_scratch_lo
+// CHECK: [0x68,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, flat_scratch_hi
+// CHECK: [0x69,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, vcc_lo
+// CHECK: [0x6a,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, vcc_hi
+// CHECK: [0x6b,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, tba_lo
+// CHECK: [0x6c,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, tba_hi
+// CHECK: [0x6d,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, tma_lo
+// CHECK: [0x6e,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, tma_hi
+// CHECK: [0x6f,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, ttmp11
+// CHECK: [0x7b,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, m0
+// CHECK: [0x7c,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, exec_lo
+// CHECK: [0x7e,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, exec_hi
+// CHECK: [0x7f,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, 0
+// CHECK: [0x80,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, -1
+// CHECK: [0xc1,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, 0.5
+// CHECK: [0xf0,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, -4.0
+// CHECK: [0xf7,0x17,0x85,0xbe]
+
+s_flbit_i32 s5, 0xaf123456
+// CHECK: [0xff,0x17,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32 s5, 0x3f717273
+// CHECK: [0xff,0x17,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32_i64 s5, s[2:3]
+// CHECK: [0x02,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s103, s[2:3]
+// CHECK: [0x02,0x18,0xe7,0xbe]
+
+s_flbit_i32_i64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x18,0xe8,0xbe]
+
+s_flbit_i32_i64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x18,0xe9,0xbe]
+
+s_flbit_i32_i64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x18,0xea,0xbe]
+
+s_flbit_i32_i64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x18,0xeb,0xbe]
+
+s_flbit_i32_i64 tba_lo, s[2:3]
+// CHECK: [0x02,0x18,0xec,0xbe]
+
+s_flbit_i32_i64 tba_hi, s[2:3]
+// CHECK: [0x02,0x18,0xed,0xbe]
+
+s_flbit_i32_i64 tma_lo, s[2:3]
+// CHECK: [0x02,0x18,0xee,0xbe]
+
+s_flbit_i32_i64 tma_hi, s[2:3]
+// CHECK: [0x02,0x18,0xef,0xbe]
+
+s_flbit_i32_i64 ttmp11, s[2:3]
+// CHECK: [0x02,0x18,0xfb,0xbe]
+
+s_flbit_i32_i64 m0, s[2:3]
+// CHECK: [0x02,0x18,0xfc,0xbe]
+
+s_flbit_i32_i64 exec_lo, s[2:3]
+// CHECK: [0x02,0x18,0xfe,0xbe]
+
+s_flbit_i32_i64 exec_hi, s[2:3]
+// CHECK: [0x02,0x18,0xff,0xbe]
+
+s_flbit_i32_i64 s5, s[4:5]
+// CHECK: [0x04,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, s[102:103]
+// CHECK: [0x66,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, flat_scratch
+// CHECK: [0x68,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, vcc
+// CHECK: [0x6a,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, tba
+// CHECK: [0x6c,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, tma
+// CHECK: [0x6e,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, exec
+// CHECK: [0x7e,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, 0
+// CHECK: [0x80,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, -1
+// CHECK: [0xc1,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, 0.5
+// CHECK: [0xf0,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, -4.0
+// CHECK: [0xf7,0x18,0x85,0xbe]
+
+s_flbit_i32_i64 s5, 0xaf123456
+// CHECK: [0xff,0x18,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32_i64 s5, 0x3f717273
+// CHECK: [0xff,0x18,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_sext_i32_i8 s5, s1
+// CHECK: [0x01,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s103, s1
+// CHECK: [0x01,0x19,0xe7,0xbe]
+
+s_sext_i32_i8 flat_scratch_lo, s1
+// CHECK: [0x01,0x19,0xe8,0xbe]
+
+s_sext_i32_i8 flat_scratch_hi, s1
+// CHECK: [0x01,0x19,0xe9,0xbe]
+
+s_sext_i32_i8 vcc_lo, s1
+// CHECK: [0x01,0x19,0xea,0xbe]
+
+s_sext_i32_i8 vcc_hi, s1
+// CHECK: [0x01,0x19,0xeb,0xbe]
+
+s_sext_i32_i8 tba_lo, s1
+// CHECK: [0x01,0x19,0xec,0xbe]
+
+s_sext_i32_i8 tba_hi, s1
+// CHECK: [0x01,0x19,0xed,0xbe]
+
+s_sext_i32_i8 tma_lo, s1
+// CHECK: [0x01,0x19,0xee,0xbe]
+
+s_sext_i32_i8 tma_hi, s1
+// CHECK: [0x01,0x19,0xef,0xbe]
+
+s_sext_i32_i8 ttmp11, s1
+// CHECK: [0x01,0x19,0xfb,0xbe]
+
+s_sext_i32_i8 m0, s1
+// CHECK: [0x01,0x19,0xfc,0xbe]
+
+s_sext_i32_i8 exec_lo, s1
+// CHECK: [0x01,0x19,0xfe,0xbe]
+
+s_sext_i32_i8 exec_hi, s1
+// CHECK: [0x01,0x19,0xff,0xbe]
+
+s_sext_i32_i8 s5, s103
+// CHECK: [0x67,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, flat_scratch_lo
+// CHECK: [0x68,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, flat_scratch_hi
+// CHECK: [0x69,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, vcc_lo
+// CHECK: [0x6a,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, vcc_hi
+// CHECK: [0x6b,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, tba_lo
+// CHECK: [0x6c,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, tba_hi
+// CHECK: [0x6d,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, tma_lo
+// CHECK: [0x6e,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, tma_hi
+// CHECK: [0x6f,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, ttmp11
+// CHECK: [0x7b,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, m0
+// CHECK: [0x7c,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, exec_lo
+// CHECK: [0x7e,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, exec_hi
+// CHECK: [0x7f,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, 0
+// CHECK: [0x80,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, -1
+// CHECK: [0xc1,0x19,0x85,0xbe]
+
+s_sext_i32_i8 s5, 0x71
+// CHECK: [0xff,0x19,0x85,0xbe,0x71,0x00,0x00,0x00]
+
+s_sext_i32_i8 s5, 0xf0
+// CHECK: [0xff,0x19,0x85,0xbe,0xf0,0x00,0x00,0x00]
+
+s_sext_i32_i16 s5, s1
+// CHECK: [0x01,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s103, s1
+// CHECK: [0x01,0x1a,0xe7,0xbe]
+
+s_sext_i32_i16 flat_scratch_lo, s1
+// CHECK: [0x01,0x1a,0xe8,0xbe]
+
+s_sext_i32_i16 flat_scratch_hi, s1
+// CHECK: [0x01,0x1a,0xe9,0xbe]
+
+s_sext_i32_i16 vcc_lo, s1
+// CHECK: [0x01,0x1a,0xea,0xbe]
+
+s_sext_i32_i16 vcc_hi, s1
+// CHECK: [0x01,0x1a,0xeb,0xbe]
+
+s_sext_i32_i16 tba_lo, s1
+// CHECK: [0x01,0x1a,0xec,0xbe]
+
+s_sext_i32_i16 tba_hi, s1
+// CHECK: [0x01,0x1a,0xed,0xbe]
+
+s_sext_i32_i16 tma_lo, s1
+// CHECK: [0x01,0x1a,0xee,0xbe]
+
+s_sext_i32_i16 tma_hi, s1
+// CHECK: [0x01,0x1a,0xef,0xbe]
+
+s_sext_i32_i16 ttmp11, s1
+// CHECK: [0x01,0x1a,0xfb,0xbe]
+
+s_sext_i32_i16 m0, s1
+// CHECK: [0x01,0x1a,0xfc,0xbe]
+
+s_sext_i32_i16 exec_lo, s1
+// CHECK: [0x01,0x1a,0xfe,0xbe]
+
+s_sext_i32_i16 exec_hi, s1
+// CHECK: [0x01,0x1a,0xff,0xbe]
+
+s_sext_i32_i16 s5, s103
+// CHECK: [0x67,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, flat_scratch_lo
+// CHECK: [0x68,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, flat_scratch_hi
+// CHECK: [0x69,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, vcc_lo
+// CHECK: [0x6a,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, vcc_hi
+// CHECK: [0x6b,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, tba_lo
+// CHECK: [0x6c,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, tba_hi
+// CHECK: [0x6d,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, tma_lo
+// CHECK: [0x6e,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, tma_hi
+// CHECK: [0x6f,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, ttmp11
+// CHECK: [0x7b,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, m0
+// CHECK: [0x7c,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, exec_lo
+// CHECK: [0x7e,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, exec_hi
+// CHECK: [0x7f,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, 0
+// CHECK: [0x80,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, -1
+// CHECK: [0xc1,0x1a,0x85,0xbe]
+
+s_sext_i32_i16 s5, 0xfe0b
+// CHECK: [0xff,0x1a,0x85,0xbe,0x0b,0xfe,0x00,0x00]
+
+s_sext_i32_i16 s5, 0x3456
+// CHECK: [0xff,0x1a,0x85,0xbe,0x56,0x34,0x00,0x00]
+
+s_bitset0_b32 s5, s1
+// CHECK: [0x01,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s103, s1
+// CHECK: [0x01,0x1b,0xe7,0xbe]
+
+s_bitset0_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x1b,0xe8,0xbe]
+
+s_bitset0_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x1b,0xe9,0xbe]
+
+s_bitset0_b32 vcc_lo, s1
+// CHECK: [0x01,0x1b,0xea,0xbe]
+
+s_bitset0_b32 vcc_hi, s1
+// CHECK: [0x01,0x1b,0xeb,0xbe]
+
+s_bitset0_b32 tba_lo, s1
+// CHECK: [0x01,0x1b,0xec,0xbe]
+
+s_bitset0_b32 tba_hi, s1
+// CHECK: [0x01,0x1b,0xed,0xbe]
+
+s_bitset0_b32 tma_lo, s1
+// CHECK: [0x01,0x1b,0xee,0xbe]
+
+s_bitset0_b32 tma_hi, s1
+// CHECK: [0x01,0x1b,0xef,0xbe]
+
+s_bitset0_b32 ttmp11, s1
+// CHECK: [0x01,0x1b,0xfb,0xbe]
+
+s_bitset0_b32 m0, s1
+// CHECK: [0x01,0x1b,0xfc,0xbe]
+
+s_bitset0_b32 exec_lo, s1
+// CHECK: [0x01,0x1b,0xfe,0xbe]
+
+s_bitset0_b32 exec_hi, s1
+// CHECK: [0x01,0x1b,0xff,0xbe]
+
+s_bitset0_b32 s5, s103
+// CHECK: [0x67,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, vcc_lo
+// CHECK: [0x6a,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, vcc_hi
+// CHECK: [0x6b,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, tba_lo
+// CHECK: [0x6c,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, tba_hi
+// CHECK: [0x6d,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, tma_lo
+// CHECK: [0x6e,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, tma_hi
+// CHECK: [0x6f,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, ttmp11
+// CHECK: [0x7b,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, m0
+// CHECK: [0x7c,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, exec_lo
+// CHECK: [0x7e,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, exec_hi
+// CHECK: [0x7f,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, 0
+// CHECK: [0x80,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, -1
+// CHECK: [0xc1,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, 0.5
+// CHECK: [0xf0,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, -4.0
+// CHECK: [0xf7,0x1b,0x85,0xbe]
+
+s_bitset0_b32 s5, 0xaf123456
+// CHECK: [0xff,0x1b,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset0_b32 s5, 0x3f717273
+// CHECK: [0xff,0x1b,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset0_b64 s[10:11], s1
+// CHECK: [0x01,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[12:13], s1
+// CHECK: [0x01,0x1c,0x8c,0xbe]
+
+s_bitset0_b64 s[102:103], s1
+// CHECK: [0x01,0x1c,0xe6,0xbe]
+
+s_bitset0_b64 flat_scratch, s1
+// CHECK: [0x01,0x1c,0xe8,0xbe]
+
+s_bitset0_b64 vcc, s1
+// CHECK: [0x01,0x1c,0xea,0xbe]
+
+s_bitset0_b64 tba, s1
+// CHECK: [0x01,0x1c,0xec,0xbe]
+
+s_bitset0_b64 tma, s1
+// CHECK: [0x01,0x1c,0xee,0xbe]
+
+s_bitset0_b64 ttmp[10:11], s1
+// CHECK: [0x01,0x1c,0xfa,0xbe]
+
+s_bitset0_b64 exec, s1
+// CHECK: [0x01,0x1c,0xfe,0xbe]
+
+s_bitset0_b64 s[10:11], s103
+// CHECK: [0x67,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], flat_scratch_lo
+// CHECK: [0x68,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], flat_scratch_hi
+// CHECK: [0x69,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], vcc_lo
+// CHECK: [0x6a,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], vcc_hi
+// CHECK: [0x6b,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tba_lo
+// CHECK: [0x6c,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tba_hi
+// CHECK: [0x6d,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tma_lo
+// CHECK: [0x6e,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tma_hi
+// CHECK: [0x6f,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], ttmp11
+// CHECK: [0x7b,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], m0
+// CHECK: [0x7c,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], exec_lo
+// CHECK: [0x7e,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], exec_hi
+// CHECK: [0x7f,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], 0
+// CHECK: [0x80,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], -1
+// CHECK: [0xc1,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x1c,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x1c,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset0_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x1c,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset1_b32 s5, s1
+// CHECK: [0x01,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s103, s1
+// CHECK: [0x01,0x1d,0xe7,0xbe]
+
+s_bitset1_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x1d,0xe8,0xbe]
+
+s_bitset1_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x1d,0xe9,0xbe]
+
+s_bitset1_b32 vcc_lo, s1
+// CHECK: [0x01,0x1d,0xea,0xbe]
+
+s_bitset1_b32 vcc_hi, s1
+// CHECK: [0x01,0x1d,0xeb,0xbe]
+
+s_bitset1_b32 tba_lo, s1
+// CHECK: [0x01,0x1d,0xec,0xbe]
+
+s_bitset1_b32 tba_hi, s1
+// CHECK: [0x01,0x1d,0xed,0xbe]
+
+s_bitset1_b32 tma_lo, s1
+// CHECK: [0x01,0x1d,0xee,0xbe]
+
+s_bitset1_b32 tma_hi, s1
+// CHECK: [0x01,0x1d,0xef,0xbe]
+
+s_bitset1_b32 ttmp11, s1
+// CHECK: [0x01,0x1d,0xfb,0xbe]
+
+s_bitset1_b32 m0, s1
+// CHECK: [0x01,0x1d,0xfc,0xbe]
+
+s_bitset1_b32 exec_lo, s1
+// CHECK: [0x01,0x1d,0xfe,0xbe]
+
+s_bitset1_b32 exec_hi, s1
+// CHECK: [0x01,0x1d,0xff,0xbe]
+
+s_bitset1_b32 s5, s103
+// CHECK: [0x67,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, vcc_lo
+// CHECK: [0x6a,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, vcc_hi
+// CHECK: [0x6b,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, tba_lo
+// CHECK: [0x6c,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, tba_hi
+// CHECK: [0x6d,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, tma_lo
+// CHECK: [0x6e,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, tma_hi
+// CHECK: [0x6f,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, ttmp11
+// CHECK: [0x7b,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, m0
+// CHECK: [0x7c,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, exec_lo
+// CHECK: [0x7e,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, exec_hi
+// CHECK: [0x7f,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, 0
+// CHECK: [0x80,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, -1
+// CHECK: [0xc1,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, 0.5
+// CHECK: [0xf0,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, -4.0
+// CHECK: [0xf7,0x1d,0x85,0xbe]
+
+s_bitset1_b32 s5, 0xaf123456
+// CHECK: [0xff,0x1d,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset1_b32 s5, 0x3f717273
+// CHECK: [0xff,0x1d,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset1_b64 s[10:11], s1
+// CHECK: [0x01,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[12:13], s1
+// CHECK: [0x01,0x1e,0x8c,0xbe]
+
+s_bitset1_b64 s[102:103], s1
+// CHECK: [0x01,0x1e,0xe6,0xbe]
+
+s_bitset1_b64 flat_scratch, s1
+// CHECK: [0x01,0x1e,0xe8,0xbe]
+
+s_bitset1_b64 vcc, s1
+// CHECK: [0x01,0x1e,0xea,0xbe]
+
+s_bitset1_b64 tba, s1
+// CHECK: [0x01,0x1e,0xec,0xbe]
+
+s_bitset1_b64 tma, s1
+// CHECK: [0x01,0x1e,0xee,0xbe]
+
+s_bitset1_b64 ttmp[10:11], s1
+// CHECK: [0x01,0x1e,0xfa,0xbe]
+
+s_bitset1_b64 exec, s1
+// CHECK: [0x01,0x1e,0xfe,0xbe]
+
+s_bitset1_b64 s[10:11], s103
+// CHECK: [0x67,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], flat_scratch_lo
+// CHECK: [0x68,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], flat_scratch_hi
+// CHECK: [0x69,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], vcc_lo
+// CHECK: [0x6a,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], vcc_hi
+// CHECK: [0x6b,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tba_lo
+// CHECK: [0x6c,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tba_hi
+// CHECK: [0x6d,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tma_lo
+// CHECK: [0x6e,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tma_hi
+// CHECK: [0x6f,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], ttmp11
+// CHECK: [0x7b,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], m0
+// CHECK: [0x7c,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], exec_lo
+// CHECK: [0x7e,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], exec_hi
+// CHECK: [0x7f,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], 0
+// CHECK: [0x80,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], -1
+// CHECK: [0xc1,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x1e,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x1e,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset1_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x1e,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_getpc_b64 s[10:11]
+// CHECK: [0x00,0x1f,0x8a,0xbe]
+
+s_getpc_b64 s[12:13]
+// CHECK: [0x00,0x1f,0x8c,0xbe]
+
+s_getpc_b64 s[102:103]
+// CHECK: [0x00,0x1f,0xe6,0xbe]
+
+s_getpc_b64 flat_scratch
+// CHECK: [0x00,0x1f,0xe8,0xbe]
+
+s_getpc_b64 vcc
+// CHECK: [0x00,0x1f,0xea,0xbe]
+
+s_getpc_b64 tba
+// CHECK: [0x00,0x1f,0xec,0xbe]
+
+s_getpc_b64 tma
+// CHECK: [0x00,0x1f,0xee,0xbe]
+
+s_getpc_b64 ttmp[10:11]
+// CHECK: [0x00,0x1f,0xfa,0xbe]
+
+s_getpc_b64 exec
+// CHECK: [0x00,0x1f,0xfe,0xbe]
+
+s_setpc_b64 s[2:3]
+// CHECK: [0x02,0x20,0x80,0xbe]
+
+s_setpc_b64 s[4:5]
+// CHECK: [0x04,0x20,0x80,0xbe]
+
+s_setpc_b64 s[102:103]
+// CHECK: [0x66,0x20,0x80,0xbe]
+
+s_setpc_b64 flat_scratch
+// CHECK: [0x68,0x20,0x80,0xbe]
+
+s_setpc_b64 vcc
+// CHECK: [0x6a,0x20,0x80,0xbe]
+
+s_setpc_b64 tba
+// CHECK: [0x6c,0x20,0x80,0xbe]
+
+s_setpc_b64 tma
+// CHECK: [0x6e,0x20,0x80,0xbe]
+
+s_setpc_b64 ttmp[10:11]
+// CHECK: [0x7a,0x20,0x80,0xbe]
+
+s_swappc_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x21,0x8c,0xbe]
+
+s_swappc_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x21,0xe6,0xbe]
+
+s_swappc_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x21,0xe8,0xbe]
+
+s_swappc_b64 vcc, s[2:3]
+// CHECK: [0x02,0x21,0xea,0xbe]
+
+s_swappc_b64 tba, s[2:3]
+// CHECK: [0x02,0x21,0xec,0xbe]
+
+s_swappc_b64 tma, s[2:3]
+// CHECK: [0x02,0x21,0xee,0xbe]
+
+s_swappc_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x21,0xfa,0xbe]
+
+s_swappc_b64 exec, s[2:3]
+// CHECK: [0x02,0x21,0xfe,0xbe]
+
+s_swappc_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], vcc
+// CHECK: [0x6a,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], tba
+// CHECK: [0x6c,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], tma
+// CHECK: [0x6e,0x21,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x21,0x8a,0xbe]
+
+s_rfe_b64 s[2:3]
+// CHECK: [0x02,0x22,0x80,0xbe]
+
+s_rfe_b64 s[4:5]
+// CHECK: [0x04,0x22,0x80,0xbe]
+
+s_rfe_b64 s[102:103]
+// CHECK: [0x66,0x22,0x80,0xbe]
+
+s_rfe_b64 flat_scratch
+// CHECK: [0x68,0x22,0x80,0xbe]
+
+s_rfe_b64 vcc
+// CHECK: [0x6a,0x22,0x80,0xbe]
+
+s_rfe_b64 tba
+// CHECK: [0x6c,0x22,0x80,0xbe]
+
+s_rfe_b64 tma
+// CHECK: [0x6e,0x22,0x80,0xbe]
+
+s_rfe_b64 ttmp[10:11]
+// CHECK: [0x7a,0x22,0x80,0xbe]
+
+s_and_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x24,0x8c,0xbe]
+
+s_and_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x24,0xe6,0xbe]
+
+s_and_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x24,0xe8,0xbe]
+
+s_and_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x24,0xea,0xbe]
+
+s_and_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x24,0xec,0xbe]
+
+s_and_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x24,0xee,0xbe]
+
+s_and_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x24,0xfa,0xbe]
+
+s_and_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x24,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x24,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_and_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x24,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_or_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x25,0x8c,0xbe]
+
+s_or_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x25,0xe6,0xbe]
+
+s_or_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x25,0xe8,0xbe]
+
+s_or_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x25,0xea,0xbe]
+
+s_or_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x25,0xec,0xbe]
+
+s_or_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x25,0xee,0xbe]
+
+s_or_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x25,0xfa,0xbe]
+
+s_or_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x25,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x25,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_or_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x25,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_xor_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x26,0x8c,0xbe]
+
+s_xor_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x26,0xe6,0xbe]
+
+s_xor_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x26,0xe8,0xbe]
+
+s_xor_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x26,0xea,0xbe]
+
+s_xor_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x26,0xec,0xbe]
+
+s_xor_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x26,0xee,0xbe]
+
+s_xor_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x26,0xfa,0xbe]
+
+s_xor_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x26,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x26,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_xor_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x26,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_andn2_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x27,0x8c,0xbe]
+
+s_andn2_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x27,0xe6,0xbe]
+
+s_andn2_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x27,0xe8,0xbe]
+
+s_andn2_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x27,0xea,0xbe]
+
+s_andn2_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x27,0xec,0xbe]
+
+s_andn2_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x27,0xee,0xbe]
+
+s_andn2_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x27,0xfa,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x27,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x27,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_andn2_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x27,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_orn2_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x28,0x8c,0xbe]
+
+s_orn2_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x28,0xe6,0xbe]
+
+s_orn2_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x28,0xe8,0xbe]
+
+s_orn2_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x28,0xea,0xbe]
+
+s_orn2_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x28,0xec,0xbe]
+
+s_orn2_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x28,0xee,0xbe]
+
+s_orn2_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x28,0xfa,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x28,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x28,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_orn2_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x28,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_nand_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x29,0x8c,0xbe]
+
+s_nand_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x29,0xe6,0xbe]
+
+s_nand_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x29,0xe8,0xbe]
+
+s_nand_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x29,0xea,0xbe]
+
+s_nand_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x29,0xec,0xbe]
+
+s_nand_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x29,0xee,0xbe]
+
+s_nand_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x29,0xfa,0xbe]
+
+s_nand_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x29,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x29,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_nand_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x29,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_nor_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x2a,0x8c,0xbe]
+
+s_nor_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x2a,0xe6,0xbe]
+
+s_nor_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x2a,0xe8,0xbe]
+
+s_nor_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x2a,0xea,0xbe]
+
+s_nor_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x2a,0xec,0xbe]
+
+s_nor_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x2a,0xee,0xbe]
+
+s_nor_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x2a,0xfa,0xbe]
+
+s_nor_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x2a,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x2a,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_nor_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x2a,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_xnor_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x2b,0x8c,0xbe]
+
+s_xnor_saveexec_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x2b,0xe6,0xbe]
+
+s_xnor_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x2b,0xe8,0xbe]
+
+s_xnor_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x2b,0xea,0xbe]
+
+s_xnor_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x2b,0xec,0xbe]
+
+s_xnor_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x2b,0xee,0xbe]
+
+s_xnor_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x2b,0xfa,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x2b,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x2b,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_xnor_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x2b,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_quadmask_b32 s5, s1
+// CHECK: [0x01,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s103, s1
+// CHECK: [0x01,0x2c,0xe7,0xbe]
+
+s_quadmask_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x2c,0xe8,0xbe]
+
+s_quadmask_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x2c,0xe9,0xbe]
+
+s_quadmask_b32 vcc_lo, s1
+// CHECK: [0x01,0x2c,0xea,0xbe]
+
+s_quadmask_b32 vcc_hi, s1
+// CHECK: [0x01,0x2c,0xeb,0xbe]
+
+s_quadmask_b32 tba_lo, s1
+// CHECK: [0x01,0x2c,0xec,0xbe]
+
+s_quadmask_b32 tba_hi, s1
+// CHECK: [0x01,0x2c,0xed,0xbe]
+
+s_quadmask_b32 tma_lo, s1
+// CHECK: [0x01,0x2c,0xee,0xbe]
+
+s_quadmask_b32 tma_hi, s1
+// CHECK: [0x01,0x2c,0xef,0xbe]
+
+s_quadmask_b32 ttmp11, s1
+// CHECK: [0x01,0x2c,0xfb,0xbe]
+
+s_quadmask_b32 m0, s1
+// CHECK: [0x01,0x2c,0xfc,0xbe]
+
+s_quadmask_b32 exec_lo, s1
+// CHECK: [0x01,0x2c,0xfe,0xbe]
+
+s_quadmask_b32 exec_hi, s1
+// CHECK: [0x01,0x2c,0xff,0xbe]
+
+s_quadmask_b32 s5, s103
+// CHECK: [0x67,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, vcc_lo
+// CHECK: [0x6a,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, vcc_hi
+// CHECK: [0x6b,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, tba_lo
+// CHECK: [0x6c,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, tba_hi
+// CHECK: [0x6d,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, tma_lo
+// CHECK: [0x6e,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, tma_hi
+// CHECK: [0x6f,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, ttmp11
+// CHECK: [0x7b,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, m0
+// CHECK: [0x7c,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, exec_lo
+// CHECK: [0x7e,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, exec_hi
+// CHECK: [0x7f,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, 0
+// CHECK: [0x80,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, -1
+// CHECK: [0xc1,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, 0.5
+// CHECK: [0xf0,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, -4.0
+// CHECK: [0xf7,0x2c,0x85,0xbe]
+
+s_quadmask_b32 s5, 0xaf123456
+// CHECK: [0xff,0x2c,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_quadmask_b32 s5, 0x3f717273
+// CHECK: [0xff,0x2c,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_quadmask_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x2d,0x8c,0xbe]
+
+s_quadmask_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x2d,0xe6,0xbe]
+
+s_quadmask_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x2d,0xe8,0xbe]
+
+s_quadmask_b64 vcc, s[2:3]
+// CHECK: [0x02,0x2d,0xea,0xbe]
+
+s_quadmask_b64 tba, s[2:3]
+// CHECK: [0x02,0x2d,0xec,0xbe]
+
+s_quadmask_b64 tma, s[2:3]
+// CHECK: [0x02,0x2d,0xee,0xbe]
+
+s_quadmask_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x2d,0xfa,0xbe]
+
+s_quadmask_b64 exec, s[2:3]
+// CHECK: [0x02,0x2d,0xfe,0xbe]
+
+s_quadmask_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], vcc
+// CHECK: [0x6a,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], tba
+// CHECK: [0x6c,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], tma
+// CHECK: [0x6e,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], exec
+// CHECK: [0x7e,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], 0
+// CHECK: [0x80,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], -1
+// CHECK: [0xc1,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x2d,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x2d,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_quadmask_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x2d,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_movrels_b32 s5, s1
+// CHECK: [0x01,0x2e,0x85,0xbe]
+
+s_movrels_b32 s103, s1
+// CHECK: [0x01,0x2e,0xe7,0xbe]
+
+s_movrels_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x2e,0xe8,0xbe]
+
+s_movrels_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x2e,0xe9,0xbe]
+
+s_movrels_b32 vcc_lo, s1
+// CHECK: [0x01,0x2e,0xea,0xbe]
+
+s_movrels_b32 vcc_hi, s1
+// CHECK: [0x01,0x2e,0xeb,0xbe]
+
+s_movrels_b32 tba_lo, s1
+// CHECK: [0x01,0x2e,0xec,0xbe]
+
+s_movrels_b32 tba_hi, s1
+// CHECK: [0x01,0x2e,0xed,0xbe]
+
+s_movrels_b32 tma_lo, s1
+// CHECK: [0x01,0x2e,0xee,0xbe]
+
+s_movrels_b32 tma_hi, s1
+// CHECK: [0x01,0x2e,0xef,0xbe]
+
+s_movrels_b32 ttmp11, s1
+// CHECK: [0x01,0x2e,0xfb,0xbe]
+
+s_movrels_b32 m0, s1
+// CHECK: [0x01,0x2e,0xfc,0xbe]
+
+s_movrels_b32 exec_lo, s1
+// CHECK: [0x01,0x2e,0xfe,0xbe]
+
+s_movrels_b32 exec_hi, s1
+// CHECK: [0x01,0x2e,0xff,0xbe]
+
+s_movrels_b32 s5, s103
+// CHECK: [0x67,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, vcc_lo
+// CHECK: [0x6a,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, vcc_hi
+// CHECK: [0x6b,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, tba_lo
+// CHECK: [0x6c,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, tba_hi
+// CHECK: [0x6d,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, tma_lo
+// CHECK: [0x6e,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, tma_hi
+// CHECK: [0x6f,0x2e,0x85,0xbe]
+
+s_movrels_b32 s5, ttmp11
+// CHECK: [0x7b,0x2e,0x85,0xbe]
+
+s_movrels_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x2f,0x8c,0xbe]
+
+s_movrels_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x2f,0xe6,0xbe]
+
+s_movrels_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x2f,0xe8,0xbe]
+
+s_movrels_b64 vcc, s[2:3]
+// CHECK: [0x02,0x2f,0xea,0xbe]
+
+s_movrels_b64 tba, s[2:3]
+// CHECK: [0x02,0x2f,0xec,0xbe]
+
+s_movrels_b64 tma, s[2:3]
+// CHECK: [0x02,0x2f,0xee,0xbe]
+
+s_movrels_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x2f,0xfa,0xbe]
+
+s_movrels_b64 exec, s[2:3]
+// CHECK: [0x02,0x2f,0xfe,0xbe]
+
+s_movrels_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], vcc
+// CHECK: [0x6a,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], tba
+// CHECK: [0x6c,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], tma
+// CHECK: [0x6e,0x2f,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x2f,0x8a,0xbe]
+
+s_movreld_b32 s5, s1
+// CHECK: [0x01,0x30,0x85,0xbe]
+
+s_movreld_b32 s103, s1
+// CHECK: [0x01,0x30,0xe7,0xbe]
+
+s_movreld_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x30,0xe8,0xbe]
+
+s_movreld_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x30,0xe9,0xbe]
+
+s_movreld_b32 vcc_lo, s1
+// CHECK: [0x01,0x30,0xea,0xbe]
+
+s_movreld_b32 vcc_hi, s1
+// CHECK: [0x01,0x30,0xeb,0xbe]
+
+s_movreld_b32 tba_lo, s1
+// CHECK: [0x01,0x30,0xec,0xbe]
+
+s_movreld_b32 tba_hi, s1
+// CHECK: [0x01,0x30,0xed,0xbe]
+
+s_movreld_b32 tma_lo, s1
+// CHECK: [0x01,0x30,0xee,0xbe]
+
+s_movreld_b32 tma_hi, s1
+// CHECK: [0x01,0x30,0xef,0xbe]
+
+s_movreld_b32 ttmp11, s1
+// CHECK: [0x01,0x30,0xfb,0xbe]
+
+s_movreld_b32 s5, s103
+// CHECK: [0x67,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, vcc_lo
+// CHECK: [0x6a,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, vcc_hi
+// CHECK: [0x6b,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, tba_lo
+// CHECK: [0x6c,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, tba_hi
+// CHECK: [0x6d,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, tma_lo
+// CHECK: [0x6e,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, tma_hi
+// CHECK: [0x6f,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, ttmp11
+// CHECK: [0x7b,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, m0
+// CHECK: [0x7c,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, 0
+// CHECK: [0x80,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, -1
+// CHECK: [0xc1,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, 0.5
+// CHECK: [0xf0,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, -4.0
+// CHECK: [0xf7,0x30,0x85,0xbe]
+
+s_movreld_b32 s5, 0xaf123456
+// CHECK: [0xff,0x30,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_movreld_b32 s5, 0x3f717273
+// CHECK: [0xff,0x30,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_movreld_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x31,0x8c,0xbe]
+
+s_movreld_b64 s[102:103], s[2:3]
+// CHECK: [0x02,0x31,0xe6,0xbe]
+
+s_movreld_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x31,0xe8,0xbe]
+
+s_movreld_b64 vcc, s[2:3]
+// CHECK: [0x02,0x31,0xea,0xbe]
+
+s_movreld_b64 tba, s[2:3]
+// CHECK: [0x02,0x31,0xec,0xbe]
+
+s_movreld_b64 tma, s[2:3]
+// CHECK: [0x02,0x31,0xee,0xbe]
+
+s_movreld_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x31,0xfa,0xbe]
+
+s_movreld_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], s[102:103]
+// CHECK: [0x66,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], flat_scratch
+// CHECK: [0x68,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], vcc
+// CHECK: [0x6a,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], tba
+// CHECK: [0x6c,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], tma
+// CHECK: [0x6e,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], 0
+// CHECK: [0x80,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], -1
+// CHECK: [0xc1,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x31,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x31,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_movreld_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x31,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_abs_i32 s5, s1
+// CHECK: [0x01,0x34,0x85,0xbe]
+
+s_abs_i32 s103, s1
+// CHECK: [0x01,0x34,0xe7,0xbe]
+
+s_abs_i32 flat_scratch_lo, s1
+// CHECK: [0x01,0x34,0xe8,0xbe]
+
+s_abs_i32 flat_scratch_hi, s1
+// CHECK: [0x01,0x34,0xe9,0xbe]
+
+s_abs_i32 vcc_lo, s1
+// CHECK: [0x01,0x34,0xea,0xbe]
+
+s_abs_i32 vcc_hi, s1
+// CHECK: [0x01,0x34,0xeb,0xbe]
+
+s_abs_i32 tba_lo, s1
+// CHECK: [0x01,0x34,0xec,0xbe]
+
+s_abs_i32 tba_hi, s1
+// CHECK: [0x01,0x34,0xed,0xbe]
+
+s_abs_i32 tma_lo, s1
+// CHECK: [0x01,0x34,0xee,0xbe]
+
+s_abs_i32 tma_hi, s1
+// CHECK: [0x01,0x34,0xef,0xbe]
+
+s_abs_i32 ttmp11, s1
+// CHECK: [0x01,0x34,0xfb,0xbe]
+
+s_abs_i32 m0, s1
+// CHECK: [0x01,0x34,0xfc,0xbe]
+
+s_abs_i32 exec_lo, s1
+// CHECK: [0x01,0x34,0xfe,0xbe]
+
+s_abs_i32 exec_hi, s1
+// CHECK: [0x01,0x34,0xff,0xbe]
+
+s_abs_i32 s5, s103
+// CHECK: [0x67,0x34,0x85,0xbe]
+
+s_abs_i32 s5, flat_scratch_lo
+// CHECK: [0x68,0x34,0x85,0xbe]
+
+s_abs_i32 s5, flat_scratch_hi
+// CHECK: [0x69,0x34,0x85,0xbe]
+
+s_abs_i32 s5, vcc_lo
+// CHECK: [0x6a,0x34,0x85,0xbe]
+
+s_abs_i32 s5, vcc_hi
+// CHECK: [0x6b,0x34,0x85,0xbe]
+
+s_abs_i32 s5, tba_lo
+// CHECK: [0x6c,0x34,0x85,0xbe]
+
+s_abs_i32 s5, tba_hi
+// CHECK: [0x6d,0x34,0x85,0xbe]
+
+s_abs_i32 s5, tma_lo
+// CHECK: [0x6e,0x34,0x85,0xbe]
+
+s_abs_i32 s5, tma_hi
+// CHECK: [0x6f,0x34,0x85,0xbe]
+
+s_abs_i32 s5, ttmp11
+// CHECK: [0x7b,0x34,0x85,0xbe]
+
+s_abs_i32 s5, m0
+// CHECK: [0x7c,0x34,0x85,0xbe]
+
+s_abs_i32 s5, exec_lo
+// CHECK: [0x7e,0x34,0x85,0xbe]
+
+s_abs_i32 s5, exec_hi
+// CHECK: [0x7f,0x34,0x85,0xbe]
+
+s_abs_i32 s5, 0
+// CHECK: [0x80,0x34,0x85,0xbe]
+
+s_abs_i32 s5, -1
+// CHECK: [0xc1,0x34,0x85,0xbe]
+
+s_abs_i32 s5, 0.5
+// CHECK: [0xf0,0x34,0x85,0xbe]
+
+s_abs_i32 s5, -4.0
+// CHECK: [0xf7,0x34,0x85,0xbe]
+
+s_abs_i32 s5, 0xaf123456
+// CHECK: [0xff,0x34,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_abs_i32 s5, 0x3f717273
+// CHECK: [0xff,0x34,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_mov_fed_b32 s5, s1
+// CHECK: [0x01,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s103, s1
+// CHECK: [0x01,0x35,0xe7,0xbe]
+
+s_mov_fed_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x35,0xe8,0xbe]
+
+s_mov_fed_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x35,0xe9,0xbe]
+
+s_mov_fed_b32 vcc_lo, s1
+// CHECK: [0x01,0x35,0xea,0xbe]
+
+s_mov_fed_b32 vcc_hi, s1
+// CHECK: [0x01,0x35,0xeb,0xbe]
+
+s_mov_fed_b32 tba_lo, s1
+// CHECK: [0x01,0x35,0xec,0xbe]
+
+s_mov_fed_b32 tba_hi, s1
+// CHECK: [0x01,0x35,0xed,0xbe]
+
+s_mov_fed_b32 tma_lo, s1
+// CHECK: [0x01,0x35,0xee,0xbe]
+
+s_mov_fed_b32 tma_hi, s1
+// CHECK: [0x01,0x35,0xef,0xbe]
+
+s_mov_fed_b32 ttmp11, s1
+// CHECK: [0x01,0x35,0xfb,0xbe]
+
+s_mov_fed_b32 m0, s1
+// CHECK: [0x01,0x35,0xfc,0xbe]
+
+s_mov_fed_b32 exec_lo, s1
+// CHECK: [0x01,0x35,0xfe,0xbe]
+
+s_mov_fed_b32 exec_hi, s1
+// CHECK: [0x01,0x35,0xff,0xbe]
+
+s_mov_fed_b32 s5, s103
+// CHECK: [0x67,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, flat_scratch_lo
+// CHECK: [0x68,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, flat_scratch_hi
+// CHECK: [0x69,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, vcc_lo
+// CHECK: [0x6a,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, vcc_hi
+// CHECK: [0x6b,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, tba_lo
+// CHECK: [0x6c,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, tba_hi
+// CHECK: [0x6d,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, tma_lo
+// CHECK: [0x6e,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, tma_hi
+// CHECK: [0x6f,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, ttmp11
+// CHECK: [0x7b,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, m0
+// CHECK: [0x7c,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, exec_lo
+// CHECK: [0x7e,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, exec_hi
+// CHECK: [0x7f,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, 0
+// CHECK: [0x80,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, -1
+// CHECK: [0xc1,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, 0.5
+// CHECK: [0xf0,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, -4.0
+// CHECK: [0xf7,0x35,0x85,0xbe]
+
+s_mov_fed_b32 s5, 0xaf123456
+// CHECK: [0xff,0x35,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_mov_fed_b32 s5, 0x3f717273
+// CHECK: [0xff,0x35,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_add_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x80]
+
+s_add_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x80]
+
+s_add_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x80]
+
+s_add_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x80]
+
+s_add_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x80]
+
+s_add_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x80]
+
+s_add_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x80]
+
+s_add_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x80]
+
+s_add_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x80]
+
+s_add_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x80]
+
+s_add_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x80]
+
+s_add_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x80]
+
+s_add_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x80]
+
+s_add_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x80]
+
+s_add_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x80]
+
+s_add_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x80]
+
+s_add_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x80]
+
+s_add_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x80]
+
+s_add_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x80]
+
+s_add_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x80]
+
+s_add_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x80]
+
+s_add_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x80]
+
+s_add_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x80]
+
+s_add_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x80]
+
+s_add_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x80]
+
+s_add_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x80]
+
+s_add_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x80]
+
+s_add_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x80]
+
+s_add_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x80]
+
+s_add_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x80]
+
+s_add_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x80]
+
+s_add_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x80,0x56,0x34,0x12,0xaf]
+
+s_add_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x80,0x73,0x72,0x71,0x3f]
+
+s_add_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x80]
+
+s_add_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x80]
+
+s_add_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x80]
+
+s_add_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x80]
+
+s_add_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x80]
+
+s_add_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x80]
+
+s_add_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x80]
+
+s_add_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x80]
+
+s_add_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x80]
+
+s_add_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x80]
+
+s_add_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x80]
+
+s_add_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x80]
+
+s_add_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x80]
+
+s_add_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x80]
+
+s_add_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x80]
+
+s_add_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x80]
+
+s_add_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x80]
+
+s_add_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x80,0x56,0x34,0x12,0xaf]
+
+s_add_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x80,0x73,0x72,0x71,0x3f]
+
+s_sub_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x80]
+
+s_sub_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x80]
+
+s_sub_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x80]
+
+s_sub_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe9,0x80]
+
+s_sub_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x80]
+
+s_sub_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x80]
+
+s_sub_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x80]
+
+s_sub_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x80]
+
+s_sub_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x80]
+
+s_sub_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x80]
+
+s_sub_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x80]
+
+s_sub_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x80]
+
+s_sub_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x80]
+
+s_sub_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x80]
+
+s_sub_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x85,0x80]
+
+s_sub_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x85,0x80]
+
+s_sub_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x85,0x80]
+
+s_sub_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x80]
+
+s_sub_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x80]
+
+s_sub_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x80]
+
+s_sub_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x80]
+
+s_sub_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x80]
+
+s_sub_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x80]
+
+s_sub_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x80]
+
+s_sub_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x80]
+
+s_sub_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x80]
+
+s_sub_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x80]
+
+s_sub_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x80]
+
+s_sub_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x80]
+
+s_sub_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x80]
+
+s_sub_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x80]
+
+s_sub_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x80,0x56,0x34,0x12,0xaf]
+
+s_sub_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x80,0x73,0x72,0x71,0x3f]
+
+s_sub_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x85,0x80]
+
+s_sub_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x85,0x80]
+
+s_sub_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x85,0x80]
+
+s_sub_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x80]
+
+s_sub_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x80]
+
+s_sub_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x80]
+
+s_sub_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x80]
+
+s_sub_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x80]
+
+s_sub_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x80]
+
+s_sub_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x80]
+
+s_sub_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x80]
+
+s_sub_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x80]
+
+s_sub_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x80]
+
+s_sub_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x80]
+
+s_sub_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x80]
+
+s_sub_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x80]
+
+s_sub_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x80]
+
+s_sub_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x80,0x56,0x34,0x12,0xaf]
+
+s_sub_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x80,0x73,0x72,0x71,0x3f]
+
+s_add_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x81]
+
+s_add_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x81]
+
+s_add_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x81]
+
+s_add_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x81]
+
+s_add_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x81]
+
+s_add_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x81]
+
+s_add_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x81]
+
+s_add_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x81]
+
+s_add_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x81]
+
+s_add_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x81]
+
+s_add_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x81]
+
+s_add_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x81]
+
+s_add_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x81]
+
+s_add_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x81]
+
+s_add_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x81]
+
+s_add_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x81]
+
+s_add_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x81]
+
+s_add_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x81]
+
+s_add_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x81]
+
+s_add_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x81]
+
+s_add_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x81]
+
+s_add_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x81]
+
+s_add_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x81]
+
+s_add_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x81]
+
+s_add_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x81]
+
+s_add_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x81]
+
+s_add_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x81]
+
+s_add_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x81]
+
+s_add_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x81]
+
+s_add_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x81]
+
+s_add_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x81]
+
+s_add_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x81,0x56,0x34,0x12,0xaf]
+
+s_add_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x81,0x73,0x72,0x71,0x3f]
+
+s_add_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x81]
+
+s_add_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x81]
+
+s_add_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x81]
+
+s_add_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x81]
+
+s_add_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x81]
+
+s_add_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x81]
+
+s_add_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x81]
+
+s_add_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x81]
+
+s_add_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x81]
+
+s_add_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x81]
+
+s_add_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x81]
+
+s_add_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x81]
+
+s_add_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x81]
+
+s_add_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x81]
+
+s_add_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x81]
+
+s_add_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x81]
+
+s_add_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x81]
+
+s_add_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x81,0x56,0x34,0x12,0xaf]
+
+s_add_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x81,0x73,0x72,0x71,0x3f]
+
+s_sub_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x81]
+
+s_sub_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x81]
+
+s_sub_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x81]
+
+s_sub_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe9,0x81]
+
+s_sub_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x81]
+
+s_sub_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x81]
+
+s_sub_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x81]
+
+s_sub_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x81]
+
+s_sub_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x81]
+
+s_sub_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x81]
+
+s_sub_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x81]
+
+s_sub_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x81]
+
+s_sub_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x81]
+
+s_sub_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x81]
+
+s_sub_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x85,0x81]
+
+s_sub_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x85,0x81]
+
+s_sub_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x85,0x81]
+
+s_sub_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x81]
+
+s_sub_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x81]
+
+s_sub_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x81]
+
+s_sub_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x81]
+
+s_sub_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x81]
+
+s_sub_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x81]
+
+s_sub_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x81]
+
+s_sub_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x81]
+
+s_sub_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x81]
+
+s_sub_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x81]
+
+s_sub_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x81]
+
+s_sub_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x81]
+
+s_sub_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x81]
+
+s_sub_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x81]
+
+s_sub_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x81,0x56,0x34,0x12,0xaf]
+
+s_sub_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x81,0x73,0x72,0x71,0x3f]
+
+s_sub_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x85,0x81]
+
+s_sub_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x85,0x81]
+
+s_sub_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x85,0x81]
+
+s_sub_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x81]
+
+s_sub_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x81]
+
+s_sub_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x81]
+
+s_sub_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x81]
+
+s_sub_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x81]
+
+s_sub_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x81]
+
+s_sub_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x81]
+
+s_sub_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x81]
+
+s_sub_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x81]
+
+s_sub_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x81]
+
+s_sub_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x81]
+
+s_sub_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x81]
+
+s_sub_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x81]
+
+s_sub_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x81]
+
+s_sub_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x81,0x56,0x34,0x12,0xaf]
+
+s_sub_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x81,0x73,0x72,0x71,0x3f]
+
+s_addc_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x82]
+
+s_addc_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x82]
+
+s_addc_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x82]
+
+s_addc_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x82]
+
+s_addc_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x82]
+
+s_addc_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x82]
+
+s_addc_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x82]
+
+s_addc_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x82]
+
+s_addc_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x82]
+
+s_addc_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x82]
+
+s_addc_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x82]
+
+s_addc_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x82]
+
+s_addc_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x82]
+
+s_addc_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x82]
+
+s_addc_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x82]
+
+s_addc_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x82]
+
+s_addc_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x82]
+
+s_addc_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x82]
+
+s_addc_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x82]
+
+s_addc_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x82]
+
+s_addc_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x82]
+
+s_addc_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x82]
+
+s_addc_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x82]
+
+s_addc_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x82]
+
+s_addc_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x82]
+
+s_addc_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x82]
+
+s_addc_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x82]
+
+s_addc_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x82]
+
+s_addc_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x82]
+
+s_addc_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x82]
+
+s_addc_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x82]
+
+s_addc_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x82,0x56,0x34,0x12,0xaf]
+
+s_addc_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x82,0x73,0x72,0x71,0x3f]
+
+s_addc_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x82]
+
+s_addc_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x82]
+
+s_addc_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x82]
+
+s_addc_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x82]
+
+s_addc_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x82]
+
+s_addc_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x82]
+
+s_addc_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x82]
+
+s_addc_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x82]
+
+s_addc_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x82]
+
+s_addc_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x82]
+
+s_addc_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x82]
+
+s_addc_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x82]
+
+s_addc_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x82]
+
+s_addc_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x82]
+
+s_addc_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x82]
+
+s_addc_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x82]
+
+s_addc_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x82]
+
+s_addc_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x82,0x56,0x34,0x12,0xaf]
+
+s_addc_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x82,0x73,0x72,0x71,0x3f]
+
+s_subb_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x82]
+
+s_subb_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x82]
+
+s_subb_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x82]
+
+s_subb_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe9,0x82]
+
+s_subb_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x82]
+
+s_subb_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x82]
+
+s_subb_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x82]
+
+s_subb_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x82]
+
+s_subb_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x82]
+
+s_subb_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x82]
+
+s_subb_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x82]
+
+s_subb_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x82]
+
+s_subb_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x82]
+
+s_subb_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x82]
+
+s_subb_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x85,0x82]
+
+s_subb_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x85,0x82]
+
+s_subb_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x85,0x82]
+
+s_subb_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x82]
+
+s_subb_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x82]
+
+s_subb_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x82]
+
+s_subb_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x82]
+
+s_subb_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x82]
+
+s_subb_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x82]
+
+s_subb_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x82]
+
+s_subb_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x82]
+
+s_subb_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x82]
+
+s_subb_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x82]
+
+s_subb_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x82]
+
+s_subb_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x82]
+
+s_subb_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x82]
+
+s_subb_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x82]
+
+s_subb_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x82,0x56,0x34,0x12,0xaf]
+
+s_subb_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x82,0x73,0x72,0x71,0x3f]
+
+s_subb_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x85,0x82]
+
+s_subb_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x85,0x82]
+
+s_subb_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x85,0x82]
+
+s_subb_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x82]
+
+s_subb_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x82]
+
+s_subb_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x82]
+
+s_subb_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x82]
+
+s_subb_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x82]
+
+s_subb_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x82]
+
+s_subb_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x82]
+
+s_subb_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x82]
+
+s_subb_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x82]
+
+s_subb_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x82]
+
+s_subb_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x82]
+
+s_subb_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x82]
+
+s_subb_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x82]
+
+s_subb_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x82]
+
+s_subb_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x82,0x56,0x34,0x12,0xaf]
+
+s_subb_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x82,0x73,0x72,0x71,0x3f]
+
+s_min_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x83]
+
+s_min_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x83]
+
+s_min_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x83]
+
+s_min_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x83]
+
+s_min_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x83]
+
+s_min_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x83]
+
+s_min_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x83]
+
+s_min_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x83]
+
+s_min_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x83]
+
+s_min_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x83]
+
+s_min_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x83]
+
+s_min_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x83]
+
+s_min_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x83]
+
+s_min_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x83]
+
+s_min_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x83]
+
+s_min_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x83]
+
+s_min_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x83]
+
+s_min_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x83]
+
+s_min_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x83]
+
+s_min_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x83]
+
+s_min_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x83]
+
+s_min_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x83]
+
+s_min_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x83]
+
+s_min_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x83]
+
+s_min_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x83]
+
+s_min_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x83]
+
+s_min_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x83]
+
+s_min_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x83]
+
+s_min_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x83]
+
+s_min_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x83]
+
+s_min_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x83]
+
+s_min_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x83,0x73,0x72,0x71,0x3f]
+
+s_min_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x83]
+
+s_min_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x83]
+
+s_min_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x83]
+
+s_min_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x83]
+
+s_min_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x83]
+
+s_min_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x83]
+
+s_min_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x83]
+
+s_min_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x83]
+
+s_min_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x83]
+
+s_min_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x83]
+
+s_min_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x83]
+
+s_min_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x83]
+
+s_min_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x83]
+
+s_min_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x83]
+
+s_min_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x83]
+
+s_min_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x83]
+
+s_min_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x83]
+
+s_min_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x83,0x73,0x72,0x71,0x3f]
+
+s_min_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x83]
+
+s_min_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x83]
+
+s_min_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x83]
+
+s_min_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe9,0x83]
+
+s_min_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x83]
+
+s_min_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x83]
+
+s_min_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x83]
+
+s_min_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x83]
+
+s_min_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x83]
+
+s_min_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x83]
+
+s_min_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x83]
+
+s_min_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x83]
+
+s_min_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x83]
+
+s_min_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x83]
+
+s_min_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x85,0x83]
+
+s_min_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x85,0x83]
+
+s_min_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x85,0x83]
+
+s_min_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x83]
+
+s_min_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x83]
+
+s_min_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x83]
+
+s_min_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x83]
+
+s_min_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x83]
+
+s_min_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x83]
+
+s_min_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x83]
+
+s_min_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x83]
+
+s_min_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x83]
+
+s_min_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x83]
+
+s_min_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x83]
+
+s_min_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x83]
+
+s_min_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x83]
+
+s_min_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x83]
+
+s_min_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x83,0x73,0x72,0x71,0x3f]
+
+s_min_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x85,0x83]
+
+s_min_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x85,0x83]
+
+s_min_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x85,0x83]
+
+s_min_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x83]
+
+s_min_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x83]
+
+s_min_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x83]
+
+s_min_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x83]
+
+s_min_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x83]
+
+s_min_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x83]
+
+s_min_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x83]
+
+s_min_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x83]
+
+s_min_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x83]
+
+s_min_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x83]
+
+s_min_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x83]
+
+s_min_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x83]
+
+s_min_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x83]
+
+s_min_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x83]
+
+s_min_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x83,0x73,0x72,0x71,0x3f]
+
+s_max_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x84]
+
+s_max_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x84]
+
+s_max_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x84]
+
+s_max_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x84]
+
+s_max_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x84]
+
+s_max_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x84]
+
+s_max_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x84]
+
+s_max_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x84]
+
+s_max_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x84]
+
+s_max_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x84]
+
+s_max_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x84]
+
+s_max_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x84]
+
+s_max_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x84]
+
+s_max_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x84]
+
+s_max_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x84]
+
+s_max_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x84]
+
+s_max_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x84]
+
+s_max_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x84]
+
+s_max_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x84]
+
+s_max_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x84]
+
+s_max_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x84]
+
+s_max_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x84]
+
+s_max_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x84]
+
+s_max_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x84]
+
+s_max_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x84]
+
+s_max_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x84]
+
+s_max_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x84]
+
+s_max_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x84]
+
+s_max_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x84]
+
+s_max_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x84]
+
+s_max_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x84]
+
+s_max_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x84,0x73,0x72,0x71,0x3f]
+
+s_max_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x84]
+
+s_max_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x84]
+
+s_max_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x84]
+
+s_max_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x84]
+
+s_max_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x84]
+
+s_max_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x84]
+
+s_max_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x84]
+
+s_max_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x84]
+
+s_max_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x84]
+
+s_max_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x84]
+
+s_max_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x84]
+
+s_max_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x84]
+
+s_max_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x84]
+
+s_max_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x84]
+
+s_max_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x84]
+
+s_max_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x84]
+
+s_max_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x84]
+
+s_max_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x84,0x73,0x72,0x71,0x3f]
+
+s_max_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x84]
+
+s_max_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x84]
+
+s_max_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x84]
+
+s_max_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe9,0x84]
+
+s_max_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x84]
+
+s_max_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x84]
+
+s_max_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x84]
+
+s_max_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x84]
+
+s_max_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x84]
+
+s_max_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x84]
+
+s_max_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x84]
+
+s_max_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x84]
+
+s_max_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x84]
+
+s_max_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x84]
+
+s_max_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x85,0x84]
+
+s_max_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x85,0x84]
+
+s_max_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x85,0x84]
+
+s_max_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x84]
+
+s_max_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x84]
+
+s_max_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x84]
+
+s_max_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x84]
+
+s_max_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x84]
+
+s_max_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x84]
+
+s_max_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x84]
+
+s_max_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x84]
+
+s_max_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x84]
+
+s_max_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x84]
+
+s_max_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x84]
+
+s_max_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x84]
+
+s_max_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x84]
+
+s_max_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x84]
+
+s_max_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x84,0x73,0x72,0x71,0x3f]
+
+s_max_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x85,0x84]
+
+s_max_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x85,0x84]
+
+s_max_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x85,0x84]
+
+s_max_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x84]
+
+s_max_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x84]
+
+s_max_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x84]
+
+s_max_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x84]
+
+s_max_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x84]
+
+s_max_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x84]
+
+s_max_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x84]
+
+s_max_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x84]
+
+s_max_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x84]
+
+s_max_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x84]
+
+s_max_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x84]
+
+s_max_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x84]
+
+s_max_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x84]
+
+s_max_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x84]
+
+s_max_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x84,0x73,0x72,0x71,0x3f]
+
+s_cselect_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x85]
+
+s_cselect_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x85]
+
+s_cselect_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x85]
+
+s_cselect_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x85]
+
+s_cselect_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x85]
+
+s_cselect_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x85]
+
+s_cselect_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x85]
+
+s_cselect_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x85]
+
+s_cselect_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x85]
+
+s_cselect_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x85]
+
+s_cselect_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x85]
+
+s_cselect_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x85]
+
+s_cselect_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x85]
+
+s_cselect_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x85]
+
+s_cselect_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x85]
+
+s_cselect_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x85]
+
+s_cselect_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x85]
+
+s_cselect_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x85]
+
+s_cselect_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x85]
+
+s_cselect_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x85]
+
+s_cselect_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x85]
+
+s_cselect_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x85]
+
+s_cselect_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x85]
+
+s_cselect_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x85]
+
+s_cselect_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x85]
+
+s_cselect_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x85]
+
+s_cselect_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x85]
+
+s_cselect_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x85,0x73,0x72,0x71,0x3f]
+
+s_cselect_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x85]
+
+s_cselect_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x85]
+
+s_cselect_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x85]
+
+s_cselect_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x85]
+
+s_cselect_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x85]
+
+s_cselect_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x85]
+
+s_cselect_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x85]
+
+s_cselect_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x85]
+
+s_cselect_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x85]
+
+s_cselect_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x85]
+
+s_cselect_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x85]
+
+s_cselect_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x85]
+
+s_cselect_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x85]
+
+s_cselect_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x85]
+
+s_cselect_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x85]
+
+s_cselect_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x85]
+
+s_cselect_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x85]
+
+s_cselect_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x85,0x73,0x72,0x71,0x3f]
+
+s_cselect_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x85]
+
+s_cselect_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x85]
+
+s_cselect_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x85]
+
+s_cselect_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x85]
+
+s_cselect_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x85]
+
+s_cselect_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x85]
+
+s_cselect_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x85]
+
+s_cselect_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x85]
+
+s_cselect_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x85]
+
+s_cselect_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x85,0x73,0x72,0x71,0x3f]
+
+s_cselect_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x85,0x73,0x72,0x71,0x3f]
+
+s_and_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x87]
+
+s_and_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x87]
+
+s_and_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x87]
+
+s_and_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x87]
+
+s_and_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x87]
+
+s_and_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x87]
+
+s_and_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x87]
+
+s_and_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x87]
+
+s_and_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x87]
+
+s_and_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x87]
+
+s_and_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x87]
+
+s_and_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x87]
+
+s_and_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x87]
+
+s_and_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x87]
+
+s_and_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x87]
+
+s_and_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x87]
+
+s_and_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x87]
+
+s_and_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x87]
+
+s_and_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x87]
+
+s_and_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x87]
+
+s_and_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x87]
+
+s_and_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x87]
+
+s_and_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x87]
+
+s_and_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x87]
+
+s_and_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x87]
+
+s_and_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x87]
+
+s_and_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x87]
+
+s_and_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x87]
+
+s_and_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x87]
+
+s_and_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x87]
+
+s_and_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x87]
+
+s_and_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x87,0x56,0x34,0x12,0xaf]
+
+s_and_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x87,0x73,0x72,0x71,0x3f]
+
+s_and_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x87]
+
+s_and_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x87]
+
+s_and_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x87]
+
+s_and_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x87]
+
+s_and_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x87]
+
+s_and_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x87]
+
+s_and_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x87]
+
+s_and_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x87]
+
+s_and_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x87]
+
+s_and_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x87]
+
+s_and_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x87]
+
+s_and_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x87]
+
+s_and_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x87]
+
+s_and_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x87]
+
+s_and_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x87]
+
+s_and_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x87]
+
+s_and_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x87]
+
+s_and_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x87,0x56,0x34,0x12,0xaf]
+
+s_and_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x87,0x73,0x72,0x71,0x3f]
+
+s_and_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x87]
+
+s_and_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x87]
+
+s_and_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x87]
+
+s_and_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x87]
+
+s_and_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x87]
+
+s_and_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x87]
+
+s_and_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x87]
+
+s_and_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x87]
+
+s_and_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x87]
+
+s_and_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x87]
+
+s_and_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x87,0x56,0x34,0x12,0xaf]
+
+s_and_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x87,0x73,0x72,0x71,0x3f]
+
+s_and_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x87]
+
+s_and_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x87,0x56,0x34,0x12,0xaf]
+
+s_and_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x87,0x73,0x72,0x71,0x3f]
+
+s_or_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x88]
+
+s_or_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x88]
+
+s_or_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x88]
+
+s_or_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x88]
+
+s_or_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x88]
+
+s_or_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x88]
+
+s_or_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x88]
+
+s_or_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x88]
+
+s_or_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x88]
+
+s_or_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x88]
+
+s_or_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x88]
+
+s_or_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x88]
+
+s_or_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x88]
+
+s_or_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x88]
+
+s_or_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x88]
+
+s_or_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x88]
+
+s_or_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x88]
+
+s_or_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x88]
+
+s_or_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x88]
+
+s_or_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x88]
+
+s_or_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x88]
+
+s_or_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x88]
+
+s_or_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x88]
+
+s_or_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x88]
+
+s_or_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x88]
+
+s_or_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x88]
+
+s_or_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x88]
+
+s_or_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x88]
+
+s_or_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x88]
+
+s_or_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x88]
+
+s_or_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x88]
+
+s_or_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x88,0x56,0x34,0x12,0xaf]
+
+s_or_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x88,0x73,0x72,0x71,0x3f]
+
+s_or_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x88]
+
+s_or_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x88]
+
+s_or_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x88]
+
+s_or_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x88]
+
+s_or_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x88]
+
+s_or_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x88]
+
+s_or_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x88]
+
+s_or_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x88]
+
+s_or_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x88]
+
+s_or_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x88]
+
+s_or_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x88]
+
+s_or_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x88]
+
+s_or_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x88]
+
+s_or_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x88]
+
+s_or_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x88]
+
+s_or_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x88]
+
+s_or_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x88]
+
+s_or_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x88,0x56,0x34,0x12,0xaf]
+
+s_or_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x88,0x73,0x72,0x71,0x3f]
+
+s_or_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x88]
+
+s_or_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x88]
+
+s_or_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x88]
+
+s_or_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x88]
+
+s_or_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x88]
+
+s_or_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x88]
+
+s_or_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x88]
+
+s_or_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x88]
+
+s_or_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x88]
+
+s_or_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x88]
+
+s_or_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x88,0x56,0x34,0x12,0xaf]
+
+s_or_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x88,0x73,0x72,0x71,0x3f]
+
+s_or_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x88]
+
+s_or_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x88,0x56,0x34,0x12,0xaf]
+
+s_or_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x88,0x73,0x72,0x71,0x3f]
+
+s_xor_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x89]
+
+s_xor_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x89]
+
+s_xor_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x89]
+
+s_xor_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x89]
+
+s_xor_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x89]
+
+s_xor_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x89]
+
+s_xor_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x89]
+
+s_xor_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x89]
+
+s_xor_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x89]
+
+s_xor_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x89]
+
+s_xor_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x89]
+
+s_xor_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x89]
+
+s_xor_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x89]
+
+s_xor_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x89]
+
+s_xor_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x89]
+
+s_xor_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x89]
+
+s_xor_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x89]
+
+s_xor_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x89]
+
+s_xor_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x89]
+
+s_xor_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x89]
+
+s_xor_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x89]
+
+s_xor_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x89]
+
+s_xor_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x89]
+
+s_xor_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x89]
+
+s_xor_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x89]
+
+s_xor_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x89]
+
+s_xor_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x89]
+
+s_xor_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x89]
+
+s_xor_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x89]
+
+s_xor_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x89]
+
+s_xor_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x89]
+
+s_xor_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x89,0x56,0x34,0x12,0xaf]
+
+s_xor_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x89,0x73,0x72,0x71,0x3f]
+
+s_xor_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x89]
+
+s_xor_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x89]
+
+s_xor_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x89]
+
+s_xor_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x89]
+
+s_xor_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x89]
+
+s_xor_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x89]
+
+s_xor_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x89]
+
+s_xor_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x89]
+
+s_xor_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x89]
+
+s_xor_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x89]
+
+s_xor_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x89]
+
+s_xor_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x89]
+
+s_xor_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x89]
+
+s_xor_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x89]
+
+s_xor_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x89]
+
+s_xor_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x89]
+
+s_xor_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x89]
+
+s_xor_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x89,0x56,0x34,0x12,0xaf]
+
+s_xor_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x89,0x73,0x72,0x71,0x3f]
+
+s_xor_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x89]
+
+s_xor_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x89]
+
+s_xor_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x89]
+
+s_xor_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x89]
+
+s_xor_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x89]
+
+s_xor_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x89]
+
+s_xor_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x89]
+
+s_xor_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x89]
+
+s_xor_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x89]
+
+s_xor_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x89]
+
+s_xor_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x89,0x56,0x34,0x12,0xaf]
+
+s_xor_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x89,0x73,0x72,0x71,0x3f]
+
+s_xor_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x89]
+
+s_xor_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x89,0x56,0x34,0x12,0xaf]
+
+s_xor_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x89,0x73,0x72,0x71,0x3f]
+
+s_andn2_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8a]
+
+s_andn2_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8a]
+
+s_andn2_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x8a]
+
+s_andn2_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x8a]
+
+s_andn2_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8a]
+
+s_andn2_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8a]
+
+s_andn2_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8a]
+
+s_andn2_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8a]
+
+s_andn2_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8a]
+
+s_andn2_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8a]
+
+s_andn2_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8a]
+
+s_andn2_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8a]
+
+s_andn2_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8a]
+
+s_andn2_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8a]
+
+s_andn2_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8a]
+
+s_andn2_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8a,0x56,0x34,0x12,0xaf]
+
+s_andn2_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8a,0x73,0x72,0x71,0x3f]
+
+s_andn2_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x8a]
+
+s_andn2_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x8a]
+
+s_andn2_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x8a]
+
+s_andn2_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8a]
+
+s_andn2_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8a]
+
+s_andn2_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8a]
+
+s_andn2_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8a]
+
+s_andn2_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8a]
+
+s_andn2_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8a]
+
+s_andn2_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8a]
+
+s_andn2_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8a]
+
+s_andn2_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8a]
+
+s_andn2_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8a]
+
+s_andn2_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8a]
+
+s_andn2_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8a]
+
+s_andn2_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8a]
+
+s_andn2_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8a]
+
+s_andn2_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8a,0x56,0x34,0x12,0xaf]
+
+s_andn2_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8a,0x73,0x72,0x71,0x3f]
+
+s_andn2_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8a]
+
+s_andn2_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8a]
+
+s_andn2_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x8a]
+
+s_andn2_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8a]
+
+s_andn2_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8a]
+
+s_andn2_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8a]
+
+s_andn2_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8a]
+
+s_andn2_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8a]
+
+s_andn2_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8a,0x56,0x34,0x12,0xaf]
+
+s_andn2_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8a,0x73,0x72,0x71,0x3f]
+
+s_andn2_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8a]
+
+s_andn2_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8a,0x56,0x34,0x12,0xaf]
+
+s_andn2_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8a,0x73,0x72,0x71,0x3f]
+
+s_orn2_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8b]
+
+s_orn2_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8b]
+
+s_orn2_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x8b]
+
+s_orn2_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x8b]
+
+s_orn2_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8b]
+
+s_orn2_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8b]
+
+s_orn2_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8b]
+
+s_orn2_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8b]
+
+s_orn2_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8b]
+
+s_orn2_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8b]
+
+s_orn2_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8b]
+
+s_orn2_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8b]
+
+s_orn2_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8b]
+
+s_orn2_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8b]
+
+s_orn2_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8b]
+
+s_orn2_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8b,0x56,0x34,0x12,0xaf]
+
+s_orn2_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8b,0x73,0x72,0x71,0x3f]
+
+s_orn2_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x8b]
+
+s_orn2_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x8b]
+
+s_orn2_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x8b]
+
+s_orn2_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8b]
+
+s_orn2_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8b]
+
+s_orn2_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8b]
+
+s_orn2_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8b]
+
+s_orn2_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8b]
+
+s_orn2_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8b]
+
+s_orn2_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8b]
+
+s_orn2_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8b]
+
+s_orn2_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8b]
+
+s_orn2_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8b]
+
+s_orn2_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8b]
+
+s_orn2_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8b]
+
+s_orn2_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8b]
+
+s_orn2_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8b]
+
+s_orn2_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8b,0x56,0x34,0x12,0xaf]
+
+s_orn2_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8b,0x73,0x72,0x71,0x3f]
+
+s_orn2_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8b]
+
+s_orn2_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8b]
+
+s_orn2_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x8b]
+
+s_orn2_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8b]
+
+s_orn2_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8b]
+
+s_orn2_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8b]
+
+s_orn2_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8b]
+
+s_orn2_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8b]
+
+s_orn2_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8b,0x56,0x34,0x12,0xaf]
+
+s_orn2_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8b,0x73,0x72,0x71,0x3f]
+
+s_orn2_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8b]
+
+s_orn2_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8b,0x56,0x34,0x12,0xaf]
+
+s_orn2_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8b,0x73,0x72,0x71,0x3f]
+
+s_nand_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8c]
+
+s_nand_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8c]
+
+s_nand_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x8c]
+
+s_nand_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x8c]
+
+s_nand_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8c]
+
+s_nand_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8c]
+
+s_nand_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8c]
+
+s_nand_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8c]
+
+s_nand_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8c]
+
+s_nand_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8c]
+
+s_nand_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8c]
+
+s_nand_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8c]
+
+s_nand_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8c]
+
+s_nand_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8c]
+
+s_nand_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x8c]
+
+s_nand_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x8c]
+
+s_nand_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x8c]
+
+s_nand_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8c]
+
+s_nand_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8c]
+
+s_nand_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8c]
+
+s_nand_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8c]
+
+s_nand_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8c]
+
+s_nand_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8c]
+
+s_nand_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8c]
+
+s_nand_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8c]
+
+s_nand_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8c]
+
+s_nand_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8c]
+
+s_nand_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8c]
+
+s_nand_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8c]
+
+s_nand_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8c]
+
+s_nand_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8c]
+
+s_nand_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nand_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nand_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x8c]
+
+s_nand_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x8c]
+
+s_nand_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x8c]
+
+s_nand_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8c]
+
+s_nand_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8c]
+
+s_nand_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8c]
+
+s_nand_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8c]
+
+s_nand_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8c]
+
+s_nand_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8c]
+
+s_nand_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8c]
+
+s_nand_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8c]
+
+s_nand_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8c]
+
+s_nand_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8c]
+
+s_nand_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8c]
+
+s_nand_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8c]
+
+s_nand_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8c]
+
+s_nand_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8c]
+
+s_nand_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nand_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nand_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8c]
+
+s_nand_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8c]
+
+s_nand_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8c]
+
+s_nand_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x8c]
+
+s_nand_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8c]
+
+s_nand_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8c]
+
+s_nand_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8c]
+
+s_nand_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8c]
+
+s_nand_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8c]
+
+s_nand_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8c]
+
+s_nand_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nand_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nand_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8c]
+
+s_nand_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nand_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nor_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8d]
+
+s_nor_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8d]
+
+s_nor_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x8d]
+
+s_nor_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x8d]
+
+s_nor_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8d]
+
+s_nor_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8d]
+
+s_nor_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8d]
+
+s_nor_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8d]
+
+s_nor_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8d]
+
+s_nor_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8d]
+
+s_nor_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8d]
+
+s_nor_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8d]
+
+s_nor_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8d]
+
+s_nor_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8d]
+
+s_nor_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x8d]
+
+s_nor_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x8d]
+
+s_nor_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x8d]
+
+s_nor_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8d]
+
+s_nor_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8d]
+
+s_nor_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8d]
+
+s_nor_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8d]
+
+s_nor_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8d]
+
+s_nor_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8d]
+
+s_nor_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8d]
+
+s_nor_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8d]
+
+s_nor_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8d]
+
+s_nor_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8d]
+
+s_nor_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8d]
+
+s_nor_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8d]
+
+s_nor_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8d]
+
+s_nor_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8d]
+
+s_nor_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8d,0x56,0x34,0x12,0xaf]
+
+s_nor_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8d,0x73,0x72,0x71,0x3f]
+
+s_nor_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x8d]
+
+s_nor_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x8d]
+
+s_nor_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x8d]
+
+s_nor_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8d]
+
+s_nor_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8d]
+
+s_nor_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8d]
+
+s_nor_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8d]
+
+s_nor_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8d]
+
+s_nor_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8d]
+
+s_nor_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8d]
+
+s_nor_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8d]
+
+s_nor_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8d]
+
+s_nor_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8d]
+
+s_nor_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8d]
+
+s_nor_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8d]
+
+s_nor_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8d]
+
+s_nor_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8d]
+
+s_nor_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8d,0x56,0x34,0x12,0xaf]
+
+s_nor_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8d,0x73,0x72,0x71,0x3f]
+
+s_nor_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8d]
+
+s_nor_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8d]
+
+s_nor_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8d]
+
+s_nor_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x8d]
+
+s_nor_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8d]
+
+s_nor_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8d]
+
+s_nor_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8d]
+
+s_nor_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8d]
+
+s_nor_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8d]
+
+s_nor_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8d]
+
+s_nor_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8d,0x56,0x34,0x12,0xaf]
+
+s_nor_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8d,0x73,0x72,0x71,0x3f]
+
+s_nor_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8d]
+
+s_nor_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8d,0x56,0x34,0x12,0xaf]
+
+s_nor_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8d,0x73,0x72,0x71,0x3f]
+
+s_xnor_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8e]
+
+s_xnor_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8e]
+
+s_xnor_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x8e]
+
+s_xnor_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x8e]
+
+s_xnor_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8e]
+
+s_xnor_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8e]
+
+s_xnor_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8e]
+
+s_xnor_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8e]
+
+s_xnor_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8e]
+
+s_xnor_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8e]
+
+s_xnor_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8e]
+
+s_xnor_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8e]
+
+s_xnor_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8e]
+
+s_xnor_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8e]
+
+s_xnor_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8e]
+
+s_xnor_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8e,0x56,0x34,0x12,0xaf]
+
+s_xnor_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8e,0x73,0x72,0x71,0x3f]
+
+s_xnor_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x8e]
+
+s_xnor_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x8e]
+
+s_xnor_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x8e]
+
+s_xnor_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8e]
+
+s_xnor_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8e]
+
+s_xnor_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8e]
+
+s_xnor_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8e]
+
+s_xnor_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8e]
+
+s_xnor_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8e]
+
+s_xnor_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8e]
+
+s_xnor_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8e]
+
+s_xnor_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8e]
+
+s_xnor_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8e]
+
+s_xnor_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8e]
+
+s_xnor_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8e]
+
+s_xnor_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8e]
+
+s_xnor_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8e]
+
+s_xnor_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8e,0x56,0x34,0x12,0xaf]
+
+s_xnor_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8e,0x73,0x72,0x71,0x3f]
+
+s_xnor_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8e]
+
+s_xnor_b64 s[102:103], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8e]
+
+s_xnor_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe8,0x8e]
+
+s_xnor_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8e]
+
+s_xnor_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8e]
+
+s_xnor_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8e]
+
+s_xnor_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8e]
+
+s_xnor_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8e]
+
+s_xnor_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8e,0x56,0x34,0x12,0xaf]
+
+s_xnor_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8e,0x73,0x72,0x71,0x3f]
+
+s_xnor_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8e]
+
+s_xnor_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8e,0x56,0x34,0x12,0xaf]
+
+s_xnor_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8e,0x73,0x72,0x71,0x3f]
+
+s_lshl_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8f]
+
+s_lshl_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8f]
+
+s_lshl_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x8f]
+
+s_lshl_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x8f]
+
+s_lshl_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8f]
+
+s_lshl_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8f]
+
+s_lshl_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8f]
+
+s_lshl_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8f]
+
+s_lshl_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8f]
+
+s_lshl_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8f]
+
+s_lshl_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8f]
+
+s_lshl_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8f]
+
+s_lshl_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8f]
+
+s_lshl_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8f]
+
+s_lshl_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8f]
+
+s_lshl_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshl_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshl_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x8f]
+
+s_lshl_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x8f]
+
+s_lshl_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x8f]
+
+s_lshl_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8f]
+
+s_lshl_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8f]
+
+s_lshl_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8f]
+
+s_lshl_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8f]
+
+s_lshl_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8f]
+
+s_lshl_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8f]
+
+s_lshl_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8f]
+
+s_lshl_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8f]
+
+s_lshl_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8f]
+
+s_lshl_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8f]
+
+s_lshl_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8f]
+
+s_lshl_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8f]
+
+s_lshl_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8f]
+
+s_lshl_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8f]
+
+s_lshl_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshl_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshl_b64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x8f]
+
+s_lshl_b64 s[102:103], s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x8f]
+
+s_lshl_b64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe8,0x8f]
+
+s_lshl_b64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x8f]
+
+s_lshl_b64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x8f]
+
+s_lshl_b64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x8f]
+
+s_lshl_b64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x8f]
+
+s_lshl_b64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x8f]
+
+s_lshl_b64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[102:103], s2
+// CHECK: [0x66,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], flat_scratch, s2
+// CHECK: [0x68,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshl_b64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshl_b64 s[10:11], s[2:3], s103
+// CHECK: [0x02,0x67,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8f]
+
+s_lshl_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshl_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshr_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x90]
+
+s_lshr_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x90]
+
+s_lshr_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x90]
+
+s_lshr_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x90]
+
+s_lshr_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x90]
+
+s_lshr_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x90]
+
+s_lshr_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x90]
+
+s_lshr_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x90]
+
+s_lshr_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x90]
+
+s_lshr_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x90]
+
+s_lshr_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x90]
+
+s_lshr_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x90]
+
+s_lshr_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x90]
+
+s_lshr_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x90]
+
+s_lshr_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x90]
+
+s_lshr_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x90]
+
+s_lshr_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x90]
+
+s_lshr_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x90]
+
+s_lshr_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x90]
+
+s_lshr_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x90]
+
+s_lshr_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x90]
+
+s_lshr_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x90]
+
+s_lshr_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x90]
+
+s_lshr_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x90]
+
+s_lshr_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x90]
+
+s_lshr_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x90]
+
+s_lshr_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x90]
+
+s_lshr_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x90]
+
+s_lshr_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x90]
+
+s_lshr_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x90]
+
+s_lshr_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x90]
+
+s_lshr_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x90,0x56,0x34,0x12,0xaf]
+
+s_lshr_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x90,0x73,0x72,0x71,0x3f]
+
+s_lshr_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x90]
+
+s_lshr_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x90]
+
+s_lshr_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x90]
+
+s_lshr_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x90]
+
+s_lshr_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x90]
+
+s_lshr_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x90]
+
+s_lshr_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x90]
+
+s_lshr_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x90]
+
+s_lshr_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x90]
+
+s_lshr_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x90]
+
+s_lshr_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x90]
+
+s_lshr_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x90]
+
+s_lshr_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x90]
+
+s_lshr_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x90]
+
+s_lshr_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x90]
+
+s_lshr_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x90]
+
+s_lshr_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x90]
+
+s_lshr_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x90,0x56,0x34,0x12,0xaf]
+
+s_lshr_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x90,0x73,0x72,0x71,0x3f]
+
+s_lshr_b64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x90]
+
+s_lshr_b64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x90]
+
+s_lshr_b64 s[102:103], s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x90]
+
+s_lshr_b64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe8,0x90]
+
+s_lshr_b64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x90]
+
+s_lshr_b64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x90]
+
+s_lshr_b64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x90]
+
+s_lshr_b64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x90]
+
+s_lshr_b64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x90]
+
+s_lshr_b64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[102:103], s2
+// CHECK: [0x66,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], flat_scratch, s2
+// CHECK: [0x68,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x90]
+
+s_lshr_b64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x90,0x56,0x34,0x12,0xaf]
+
+s_lshr_b64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x90,0x73,0x72,0x71,0x3f]
+
+s_lshr_b64 s[10:11], s[2:3], s103
+// CHECK: [0x02,0x67,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x90]
+
+s_lshr_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x90,0x56,0x34,0x12,0xaf]
+
+s_lshr_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x90,0x73,0x72,0x71,0x3f]
+
+s_ashr_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x91]
+
+s_ashr_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x91]
+
+s_ashr_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x91]
+
+s_ashr_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x91]
+
+s_ashr_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x91]
+
+s_ashr_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x91]
+
+s_ashr_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x91]
+
+s_ashr_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x91]
+
+s_ashr_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x91]
+
+s_ashr_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x91]
+
+s_ashr_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x91]
+
+s_ashr_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x91]
+
+s_ashr_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x91]
+
+s_ashr_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x91]
+
+s_ashr_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x91]
+
+s_ashr_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x91]
+
+s_ashr_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x91]
+
+s_ashr_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x91]
+
+s_ashr_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x91]
+
+s_ashr_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x91]
+
+s_ashr_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x91]
+
+s_ashr_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x91]
+
+s_ashr_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x91]
+
+s_ashr_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x91]
+
+s_ashr_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x91]
+
+s_ashr_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x91]
+
+s_ashr_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x91]
+
+s_ashr_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x91]
+
+s_ashr_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x91]
+
+s_ashr_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x91]
+
+s_ashr_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x91]
+
+s_ashr_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x91,0x56,0x34,0x12,0xaf]
+
+s_ashr_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x91,0x73,0x72,0x71,0x3f]
+
+s_ashr_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x91]
+
+s_ashr_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x91]
+
+s_ashr_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x91]
+
+s_ashr_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x91]
+
+s_ashr_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x91]
+
+s_ashr_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x91]
+
+s_ashr_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x91]
+
+s_ashr_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x91]
+
+s_ashr_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x91]
+
+s_ashr_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x91]
+
+s_ashr_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x91]
+
+s_ashr_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x91]
+
+s_ashr_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x91]
+
+s_ashr_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x91]
+
+s_ashr_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x91]
+
+s_ashr_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x91]
+
+s_ashr_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x91]
+
+s_ashr_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x91,0x56,0x34,0x12,0xaf]
+
+s_ashr_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x91,0x73,0x72,0x71,0x3f]
+
+s_ashr_i64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x91]
+
+s_ashr_i64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x91]
+
+s_ashr_i64 s[102:103], s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x91]
+
+s_ashr_i64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe8,0x91]
+
+s_ashr_i64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x91]
+
+s_ashr_i64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x91]
+
+s_ashr_i64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x91]
+
+s_ashr_i64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x91]
+
+s_ashr_i64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x91]
+
+s_ashr_i64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[102:103], s2
+// CHECK: [0x66,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], flat_scratch, s2
+// CHECK: [0x68,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x91]
+
+s_ashr_i64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x91,0x56,0x34,0x12,0xaf]
+
+s_ashr_i64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x91,0x73,0x72,0x71,0x3f]
+
+s_ashr_i64 s[10:11], s[2:3], s103
+// CHECK: [0x02,0x67,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x91]
+
+s_ashr_i64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x91,0x56,0x34,0x12,0xaf]
+
+s_ashr_i64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x91,0x73,0x72,0x71,0x3f]
+
+s_bfm_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x92]
+
+s_bfm_b32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x92]
+
+s_bfm_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x92]
+
+s_bfm_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x92]
+
+s_bfm_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x92]
+
+s_bfm_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x92]
+
+s_bfm_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x92]
+
+s_bfm_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x92]
+
+s_bfm_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x92]
+
+s_bfm_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x92]
+
+s_bfm_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x92]
+
+s_bfm_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x92]
+
+s_bfm_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x92]
+
+s_bfm_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x92]
+
+s_bfm_b32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x92]
+
+s_bfm_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x92]
+
+s_bfm_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x92]
+
+s_bfm_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x92]
+
+s_bfm_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x92]
+
+s_bfm_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x92]
+
+s_bfm_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x92]
+
+s_bfm_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x92]
+
+s_bfm_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x92]
+
+s_bfm_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x92]
+
+s_bfm_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x92]
+
+s_bfm_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x92]
+
+s_bfm_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x92]
+
+s_bfm_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x92]
+
+s_bfm_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x92]
+
+s_bfm_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x92]
+
+s_bfm_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x92]
+
+s_bfm_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x92,0x56,0x34,0x12,0xaf]
+
+s_bfm_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x92,0x73,0x72,0x71,0x3f]
+
+s_bfm_b32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x92]
+
+s_bfm_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x92]
+
+s_bfm_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x92]
+
+s_bfm_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x92]
+
+s_bfm_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x92]
+
+s_bfm_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x92]
+
+s_bfm_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x92]
+
+s_bfm_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x92]
+
+s_bfm_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x92]
+
+s_bfm_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x92]
+
+s_bfm_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x92]
+
+s_bfm_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x92]
+
+s_bfm_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x92]
+
+s_bfm_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x92]
+
+s_bfm_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x92]
+
+s_bfm_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x92]
+
+s_bfm_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x92]
+
+s_bfm_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x92,0x56,0x34,0x12,0xaf]
+
+s_bfm_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x92,0x73,0x72,0x71,0x3f]
+
+s_bfm_b64 s[10:11], s1, s2
+// CHECK: [0x01,0x02,0x8a,0x92]
+
+s_bfm_b64 s[12:13], s1, s2
+// CHECK: [0x01,0x02,0x8c,0x92]
+
+s_bfm_b64 s[102:103], s1, s2
+// CHECK: [0x01,0x02,0xe6,0x92]
+
+s_bfm_b64 flat_scratch, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x92]
+
+s_bfm_b64 vcc, s1, s2
+// CHECK: [0x01,0x02,0xea,0x92]
+
+s_bfm_b64 tba, s1, s2
+// CHECK: [0x01,0x02,0xec,0x92]
+
+s_bfm_b64 tma, s1, s2
+// CHECK: [0x01,0x02,0xee,0x92]
+
+s_bfm_b64 ttmp[10:11], s1, s2
+// CHECK: [0x01,0x02,0xfa,0x92]
+
+s_bfm_b64 exec, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x92]
+
+s_bfm_b64 s[10:11], s103, s2
+// CHECK: [0x67,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], vcc_lo, s2
+// CHECK: [0x6a,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], vcc_hi, s2
+// CHECK: [0x6b,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], tba_lo, s2
+// CHECK: [0x6c,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], tba_hi, s2
+// CHECK: [0x6d,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], tma_lo, s2
+// CHECK: [0x6e,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], tma_hi, s2
+// CHECK: [0x6f,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], ttmp11, s2
+// CHECK: [0x7b,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], m0, s2
+// CHECK: [0x7c,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], exec_lo, s2
+// CHECK: [0x7e,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], exec_hi, s2
+// CHECK: [0x7f,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x92]
+
+s_bfm_b64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x92,0x56,0x34,0x12,0xaf]
+
+s_bfm_b64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x92,0x73,0x72,0x71,0x3f]
+
+s_bfm_b64 s[10:11], s1, s103
+// CHECK: [0x01,0x67,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, vcc_lo
+// CHECK: [0x01,0x6a,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, vcc_hi
+// CHECK: [0x01,0x6b,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, tba_lo
+// CHECK: [0x01,0x6c,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, tba_hi
+// CHECK: [0x01,0x6d,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, tma_lo
+// CHECK: [0x01,0x6e,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, tma_hi
+// CHECK: [0x01,0x6f,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, ttmp11
+// CHECK: [0x01,0x7b,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, m0
+// CHECK: [0x01,0x7c,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, exec_lo
+// CHECK: [0x01,0x7e,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, exec_hi
+// CHECK: [0x01,0x7f,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, 0
+// CHECK: [0x01,0x80,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, -1
+// CHECK: [0x01,0xc1,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, 0.5
+// CHECK: [0x01,0xf0,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, -4.0
+// CHECK: [0x01,0xf7,0x8a,0x92]
+
+s_bfm_b64 s[10:11], s1, 0xaf123456
+// CHECK: [0x01,0xff,0x8a,0x92,0x56,0x34,0x12,0xaf]
+
+s_bfm_b64 s[10:11], s1, 0x3f717273
+// CHECK: [0x01,0xff,0x8a,0x92,0x73,0x72,0x71,0x3f]
+
+s_mul_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x93]
+
+s_mul_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x93]
+
+s_mul_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x93]
+
+s_mul_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x93]
+
+s_mul_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x93]
+
+s_mul_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x93]
+
+s_mul_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x93]
+
+s_mul_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x93]
+
+s_mul_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x93]
+
+s_mul_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x93]
+
+s_mul_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x93]
+
+s_mul_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x93]
+
+s_mul_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x93]
+
+s_mul_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x93]
+
+s_mul_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x93]
+
+s_mul_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x93]
+
+s_mul_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x93]
+
+s_mul_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x93]
+
+s_mul_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x93]
+
+s_mul_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x93]
+
+s_mul_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x93]
+
+s_mul_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x93]
+
+s_mul_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x93]
+
+s_mul_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x93]
+
+s_mul_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x93]
+
+s_mul_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x93]
+
+s_mul_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x93]
+
+s_mul_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x93]
+
+s_mul_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x93]
+
+s_mul_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x93]
+
+s_mul_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x93]
+
+s_mul_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x93,0x56,0x34,0x12,0xaf]
+
+s_mul_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x93,0x73,0x72,0x71,0x3f]
+
+s_mul_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x93]
+
+s_mul_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x93]
+
+s_mul_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x93]
+
+s_mul_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x93]
+
+s_mul_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x93]
+
+s_mul_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x93]
+
+s_mul_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x93]
+
+s_mul_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x93]
+
+s_mul_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x93]
+
+s_mul_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x93]
+
+s_mul_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x93]
+
+s_mul_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x93]
+
+s_mul_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x93]
+
+s_mul_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x93]
+
+s_mul_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x93]
+
+s_mul_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x93]
+
+s_mul_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x93]
+
+s_mul_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x93,0x56,0x34,0x12,0xaf]
+
+s_mul_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x93]
+
+s_bfe_u32 s103, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x93]
+
+s_bfe_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe8,0x93]
+
+s_bfe_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe9,0x93]
+
+s_bfe_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x93]
+
+s_bfe_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x93]
+
+s_bfe_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x93]
+
+s_bfe_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x93]
+
+s_bfe_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x93]
+
+s_bfe_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x93]
+
+s_bfe_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x93]
+
+s_bfe_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x93]
+
+s_bfe_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x93]
+
+s_bfe_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x93]
+
+s_bfe_u32 s5, s103, s2
+// CHECK: [0x67,0x02,0x85,0x93]
+
+s_bfe_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x85,0x93]
+
+s_bfe_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x85,0x93]
+
+s_bfe_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x93]
+
+s_bfe_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x93]
+
+s_bfe_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x93]
+
+s_bfe_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x93]
+
+s_bfe_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x93]
+
+s_bfe_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x93]
+
+s_bfe_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x93]
+
+s_bfe_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x93]
+
+s_bfe_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x93]
+
+s_bfe_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x93]
+
+s_bfe_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x93]
+
+s_bfe_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x93]
+
+s_bfe_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x93]
+
+s_bfe_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x93]
+
+s_bfe_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x93,0x56,0x34,0x12,0xaf]
+
+s_bfe_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_u32 s5, s1, s103
+// CHECK: [0x01,0x67,0x85,0x93]
+
+s_bfe_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x85,0x93]
+
+s_bfe_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x85,0x93]
+
+s_bfe_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x93]
+
+s_bfe_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x93]
+
+s_bfe_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x93]
+
+s_bfe_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x93]
+
+s_bfe_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x93]
+
+s_bfe_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x93]
+
+s_bfe_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x93]
+
+s_bfe_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x93]
+
+s_bfe_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x93]
+
+s_bfe_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x93]
+
+s_bfe_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x93]
+
+s_bfe_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x93]
+
+s_bfe_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x93]
+
+s_bfe_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x93]
+
+s_bfe_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x93,0x56,0x34,0x12,0xaf]
+
+s_bfe_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x94]
+
+s_bfe_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x94]
+
+s_bfe_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x94]
+
+s_bfe_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x94]
+
+s_bfe_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x94]
+
+s_bfe_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x94]
+
+s_bfe_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x94]
+
+s_bfe_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x94]
+
+s_bfe_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x94]
+
+s_bfe_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x94]
+
+s_bfe_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x94]
+
+s_bfe_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x94]
+
+s_bfe_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x94]
+
+s_bfe_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x94]
+
+s_bfe_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x94]
+
+s_bfe_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x94]
+
+s_bfe_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x94]
+
+s_bfe_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x94]
+
+s_bfe_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x94]
+
+s_bfe_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x94]
+
+s_bfe_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x94]
+
+s_bfe_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x94]
+
+s_bfe_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x94]
+
+s_bfe_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x94]
+
+s_bfe_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x94]
+
+s_bfe_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x94]
+
+s_bfe_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x94]
+
+s_bfe_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x94]
+
+s_bfe_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x94]
+
+s_bfe_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x94]
+
+s_bfe_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x94]
+
+s_bfe_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x94,0x56,0x34,0x12,0xaf]
+
+s_bfe_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x94,0x73,0x72,0x71,0x3f]
+
+s_bfe_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x94]
+
+s_bfe_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x94]
+
+s_bfe_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x94]
+
+s_bfe_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x94]
+
+s_bfe_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x94]
+
+s_bfe_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x94]
+
+s_bfe_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x94]
+
+s_bfe_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x94]
+
+s_bfe_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x94]
+
+s_bfe_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x94]
+
+s_bfe_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x94]
+
+s_bfe_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x94]
+
+s_bfe_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x94]
+
+s_bfe_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x94]
+
+s_bfe_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x94]
+
+s_bfe_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x94]
+
+s_bfe_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x94]
+
+s_bfe_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x94,0x56,0x34,0x12,0xaf]
+
+s_bfe_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x94,0x73,0x72,0x71,0x3f]
+
+s_bfe_u64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x94]
+
+s_bfe_u64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x94]
+
+s_bfe_u64 s[102:103], s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x94]
+
+s_bfe_u64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe8,0x94]
+
+s_bfe_u64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x94]
+
+s_bfe_u64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x94]
+
+s_bfe_u64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x94]
+
+s_bfe_u64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x94]
+
+s_bfe_u64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x94]
+
+s_bfe_u64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[102:103], s2
+// CHECK: [0x66,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], flat_scratch, s2
+// CHECK: [0x68,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x94]
+
+s_bfe_u64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x94,0x56,0x34,0x12,0xaf]
+
+s_bfe_u64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x94,0x73,0x72,0x71,0x3f]
+
+s_bfe_u64 s[10:11], s[2:3], s103
+// CHECK: [0x02,0x67,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x94]
+
+s_bfe_u64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x94,0x56,0x34,0x12,0xaf]
+
+s_bfe_u64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x94,0x73,0x72,0x71,0x3f]
+
+s_bfe_i64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x0a,0x95]
+
+s_bfe_i64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x0c,0x95]
+
+s_bfe_i64 s[102:103], s[2:3], s2
+// CHECK: [0x02,0x02,0x66,0x95]
+
+s_bfe_i64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0x68,0x95]
+
+s_bfe_i64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0x6a,0x95]
+
+s_bfe_i64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0x6c,0x95]
+
+s_bfe_i64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0x6e,0x95]
+
+s_bfe_i64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x7a,0x95]
+
+s_bfe_i64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0x7e,0x95]
+
+s_bfe_i64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[102:103], s2
+// CHECK: [0x66,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], flat_scratch, s2
+// CHECK: [0x68,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x0a,0x95]
+
+s_bfe_i64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0a,0x95,0x56,0x34,0x12,0xaf]
+
+s_bfe_i64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0a,0x95,0x73,0x72,0x71,0x3f]
+
+s_bfe_i64 s[10:11], s[2:3], s103
+// CHECK: [0x02,0x67,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x0a,0x95]
+
+s_bfe_i64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x0a,0x95,0x56,0x34,0x12,0xaf]
+
+s_bfe_i64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x0a,0x95,0x73,0x72,0x71,0x3f]
+
+s_cbranch_g_fork s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x80,0x95]
+
+s_cbranch_g_fork s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x80,0x95]
+
+s_cbranch_g_fork s[102:103], s[4:5]
+// CHECK: [0x66,0x04,0x80,0x95]
+
+s_cbranch_g_fork flat_scratch, s[4:5]
+// CHECK: [0x68,0x04,0x80,0x95]
+
+s_cbranch_g_fork vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x80,0x95]
+
+s_cbranch_g_fork tba, s[4:5]
+// CHECK: [0x6c,0x04,0x80,0x95]
+
+s_cbranch_g_fork tma, s[4:5]
+// CHECK: [0x6e,0x04,0x80,0x95]
+
+s_cbranch_g_fork ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x80,0x95]
+
+s_cbranch_g_fork exec, s[4:5]
+// CHECK: [0x7e,0x04,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], s[102:103]
+// CHECK: [0x02,0x66,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], flat_scratch
+// CHECK: [0x02,0x68,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], vcc
+// CHECK: [0x02,0x6a,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], tba
+// CHECK: [0x02,0x6c,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], tma
+// CHECK: [0x02,0x6e,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x80,0x95]
+
+s_cbranch_g_fork s[2:3], exec
+// CHECK: [0x02,0x7e,0x80,0x95]
+
+s_absdiff_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x96]
+
+s_absdiff_i32 s103, s1, s2
+// CHECK: [0x01,0x02,0x67,0x96]
+
+s_absdiff_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x68,0x96]
+
+s_absdiff_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x69,0x96]
+
+s_absdiff_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x96]
+
+s_absdiff_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x96]
+
+s_absdiff_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x96]
+
+s_absdiff_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x96]
+
+s_absdiff_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x96]
+
+s_absdiff_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x96]
+
+s_absdiff_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x96]
+
+s_absdiff_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x96]
+
+s_absdiff_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x96]
+
+s_absdiff_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x96]
+
+s_absdiff_i32 s5, s103, s2
+// CHECK: [0x67,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x96]
+
+s_absdiff_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x96,0x56,0x34,0x12,0xaf]
+
+s_absdiff_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x96,0x73,0x72,0x71,0x3f]
+
+s_absdiff_i32 s5, s1, s103
+// CHECK: [0x01,0x67,0x05,0x96]
+
+s_absdiff_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0x96]
+
+s_absdiff_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0x96]
+
+s_absdiff_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x96]
+
+s_absdiff_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x96]
+
+s_absdiff_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x96]
+
+s_absdiff_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x96]
+
+s_absdiff_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x96]
+
+s_absdiff_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x96]
+
+s_absdiff_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x96]
+
+s_absdiff_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x96]
+
+s_absdiff_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x96]
+
+s_absdiff_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x96]
+
+s_absdiff_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x96]
+
+s_absdiff_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x96]
+
+s_absdiff_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x96]
+
+s_absdiff_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x96]
+
+s_absdiff_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x96,0x56,0x34,0x12,0xaf]
+
+s_absdiff_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x96,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_i32 s1, s2
+// CHECK: [0x01,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 s103, s2
+// CHECK: [0x67,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 m0, s2
+// CHECK: [0x7c,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 0, s2
+// CHECK: [0x80,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 -1, s2
+// CHECK: [0xc1,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x00,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x00,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_i32 s1, s103
+// CHECK: [0x01,0x67,0x00,0xbf]
+
+s_cmp_eq_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x00,0xbf]
+
+s_cmp_eq_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x00,0xbf]
+
+s_cmp_eq_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x00,0xbf]
+
+s_cmp_eq_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x00,0xbf]
+
+s_cmp_eq_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x00,0xbf]
+
+s_cmp_eq_i32 s1, m0
+// CHECK: [0x01,0x7c,0x00,0xbf]
+
+s_cmp_eq_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x00,0xbf]
+
+s_cmp_eq_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x00,0xbf]
+
+s_cmp_eq_i32 s1, 0
+// CHECK: [0x01,0x80,0x00,0xbf]
+
+s_cmp_eq_i32 s1, -1
+// CHECK: [0x01,0xc1,0x00,0xbf]
+
+s_cmp_eq_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x00,0xbf]
+
+s_cmp_eq_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x00,0xbf]
+
+s_cmp_eq_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x00,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x00,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_i32 s1, s2
+// CHECK: [0x01,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 s103, s2
+// CHECK: [0x67,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 m0, s2
+// CHECK: [0x7c,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 0, s2
+// CHECK: [0x80,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 -1, s2
+// CHECK: [0xc1,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x01,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x01,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_i32 s1, s103
+// CHECK: [0x01,0x67,0x01,0xbf]
+
+s_cmp_lg_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x01,0xbf]
+
+s_cmp_lg_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x01,0xbf]
+
+s_cmp_lg_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x01,0xbf]
+
+s_cmp_lg_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x01,0xbf]
+
+s_cmp_lg_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x01,0xbf]
+
+s_cmp_lg_i32 s1, m0
+// CHECK: [0x01,0x7c,0x01,0xbf]
+
+s_cmp_lg_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x01,0xbf]
+
+s_cmp_lg_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x01,0xbf]
+
+s_cmp_lg_i32 s1, 0
+// CHECK: [0x01,0x80,0x01,0xbf]
+
+s_cmp_lg_i32 s1, -1
+// CHECK: [0x01,0xc1,0x01,0xbf]
+
+s_cmp_lg_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x01,0xbf]
+
+s_cmp_lg_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x01,0xbf]
+
+s_cmp_lg_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x01,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x01,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_i32 s1, s2
+// CHECK: [0x01,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 s103, s2
+// CHECK: [0x67,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 m0, s2
+// CHECK: [0x7c,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 0, s2
+// CHECK: [0x80,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 -1, s2
+// CHECK: [0xc1,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x02,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x02,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_i32 s1, s103
+// CHECK: [0x01,0x67,0x02,0xbf]
+
+s_cmp_gt_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x02,0xbf]
+
+s_cmp_gt_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x02,0xbf]
+
+s_cmp_gt_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x02,0xbf]
+
+s_cmp_gt_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x02,0xbf]
+
+s_cmp_gt_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x02,0xbf]
+
+s_cmp_gt_i32 s1, m0
+// CHECK: [0x01,0x7c,0x02,0xbf]
+
+s_cmp_gt_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x02,0xbf]
+
+s_cmp_gt_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x02,0xbf]
+
+s_cmp_gt_i32 s1, 0
+// CHECK: [0x01,0x80,0x02,0xbf]
+
+s_cmp_gt_i32 s1, -1
+// CHECK: [0x01,0xc1,0x02,0xbf]
+
+s_cmp_gt_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x02,0xbf]
+
+s_cmp_gt_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x02,0xbf]
+
+s_cmp_gt_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x02,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x02,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_i32 s1, s2
+// CHECK: [0x01,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 s103, s2
+// CHECK: [0x67,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 m0, s2
+// CHECK: [0x7c,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 0, s2
+// CHECK: [0x80,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 -1, s2
+// CHECK: [0xc1,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x03,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x03,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_i32 s1, s103
+// CHECK: [0x01,0x67,0x03,0xbf]
+
+s_cmp_ge_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x03,0xbf]
+
+s_cmp_ge_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x03,0xbf]
+
+s_cmp_ge_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x03,0xbf]
+
+s_cmp_ge_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x03,0xbf]
+
+s_cmp_ge_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x03,0xbf]
+
+s_cmp_ge_i32 s1, m0
+// CHECK: [0x01,0x7c,0x03,0xbf]
+
+s_cmp_ge_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x03,0xbf]
+
+s_cmp_ge_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x03,0xbf]
+
+s_cmp_ge_i32 s1, 0
+// CHECK: [0x01,0x80,0x03,0xbf]
+
+s_cmp_ge_i32 s1, -1
+// CHECK: [0x01,0xc1,0x03,0xbf]
+
+s_cmp_ge_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x03,0xbf]
+
+s_cmp_ge_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x03,0xbf]
+
+s_cmp_ge_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x03,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x03,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_i32 s1, s2
+// CHECK: [0x01,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 s103, s2
+// CHECK: [0x67,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 m0, s2
+// CHECK: [0x7c,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 0, s2
+// CHECK: [0x80,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 -1, s2
+// CHECK: [0xc1,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x04,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x04,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_i32 s1, s103
+// CHECK: [0x01,0x67,0x04,0xbf]
+
+s_cmp_lt_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x04,0xbf]
+
+s_cmp_lt_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x04,0xbf]
+
+s_cmp_lt_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x04,0xbf]
+
+s_cmp_lt_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x04,0xbf]
+
+s_cmp_lt_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x04,0xbf]
+
+s_cmp_lt_i32 s1, m0
+// CHECK: [0x01,0x7c,0x04,0xbf]
+
+s_cmp_lt_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x04,0xbf]
+
+s_cmp_lt_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x04,0xbf]
+
+s_cmp_lt_i32 s1, 0
+// CHECK: [0x01,0x80,0x04,0xbf]
+
+s_cmp_lt_i32 s1, -1
+// CHECK: [0x01,0xc1,0x04,0xbf]
+
+s_cmp_lt_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x04,0xbf]
+
+s_cmp_lt_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x04,0xbf]
+
+s_cmp_lt_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x04,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x04,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_i32 s1, s2
+// CHECK: [0x01,0x02,0x05,0xbf]
+
+s_cmp_le_i32 s103, s2
+// CHECK: [0x67,0x02,0x05,0xbf]
+
+s_cmp_le_i32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x05,0xbf]
+
+s_cmp_le_i32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x05,0xbf]
+
+s_cmp_le_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0xbf]
+
+s_cmp_le_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0xbf]
+
+s_cmp_le_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0xbf]
+
+s_cmp_le_i32 m0, s2
+// CHECK: [0x7c,0x02,0x05,0xbf]
+
+s_cmp_le_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0xbf]
+
+s_cmp_le_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0xbf]
+
+s_cmp_le_i32 0, s2
+// CHECK: [0x80,0x02,0x05,0xbf]
+
+s_cmp_le_i32 -1, s2
+// CHECK: [0xc1,0x02,0x05,0xbf]
+
+s_cmp_le_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0xbf]
+
+s_cmp_le_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0xbf]
+
+s_cmp_le_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_i32 s1, s103
+// CHECK: [0x01,0x67,0x05,0xbf]
+
+s_cmp_le_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x05,0xbf]
+
+s_cmp_le_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x05,0xbf]
+
+s_cmp_le_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0xbf]
+
+s_cmp_le_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0xbf]
+
+s_cmp_le_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0xbf]
+
+s_cmp_le_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0xbf]
+
+s_cmp_le_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0xbf]
+
+s_cmp_le_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0xbf]
+
+s_cmp_le_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0xbf]
+
+s_cmp_le_i32 s1, m0
+// CHECK: [0x01,0x7c,0x05,0xbf]
+
+s_cmp_le_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0xbf]
+
+s_cmp_le_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0xbf]
+
+s_cmp_le_i32 s1, 0
+// CHECK: [0x01,0x80,0x05,0xbf]
+
+s_cmp_le_i32 s1, -1
+// CHECK: [0x01,0xc1,0x05,0xbf]
+
+s_cmp_le_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0xbf]
+
+s_cmp_le_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0xbf]
+
+s_cmp_le_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_u32 s1, s2
+// CHECK: [0x01,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 s103, s2
+// CHECK: [0x67,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 m0, s2
+// CHECK: [0x7c,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 0, s2
+// CHECK: [0x80,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 -1, s2
+// CHECK: [0xc1,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x06,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x06,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_u32 s1, s103
+// CHECK: [0x01,0x67,0x06,0xbf]
+
+s_cmp_eq_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x06,0xbf]
+
+s_cmp_eq_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x06,0xbf]
+
+s_cmp_eq_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x06,0xbf]
+
+s_cmp_eq_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x06,0xbf]
+
+s_cmp_eq_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x06,0xbf]
+
+s_cmp_eq_u32 s1, m0
+// CHECK: [0x01,0x7c,0x06,0xbf]
+
+s_cmp_eq_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x06,0xbf]
+
+s_cmp_eq_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x06,0xbf]
+
+s_cmp_eq_u32 s1, 0
+// CHECK: [0x01,0x80,0x06,0xbf]
+
+s_cmp_eq_u32 s1, -1
+// CHECK: [0x01,0xc1,0x06,0xbf]
+
+s_cmp_eq_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x06,0xbf]
+
+s_cmp_eq_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x06,0xbf]
+
+s_cmp_eq_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x06,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x06,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_u32 s1, s2
+// CHECK: [0x01,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 s103, s2
+// CHECK: [0x67,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 m0, s2
+// CHECK: [0x7c,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 0, s2
+// CHECK: [0x80,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 -1, s2
+// CHECK: [0xc1,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x07,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x07,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_u32 s1, s103
+// CHECK: [0x01,0x67,0x07,0xbf]
+
+s_cmp_lg_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x07,0xbf]
+
+s_cmp_lg_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x07,0xbf]
+
+s_cmp_lg_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x07,0xbf]
+
+s_cmp_lg_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x07,0xbf]
+
+s_cmp_lg_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x07,0xbf]
+
+s_cmp_lg_u32 s1, m0
+// CHECK: [0x01,0x7c,0x07,0xbf]
+
+s_cmp_lg_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x07,0xbf]
+
+s_cmp_lg_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x07,0xbf]
+
+s_cmp_lg_u32 s1, 0
+// CHECK: [0x01,0x80,0x07,0xbf]
+
+s_cmp_lg_u32 s1, -1
+// CHECK: [0x01,0xc1,0x07,0xbf]
+
+s_cmp_lg_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x07,0xbf]
+
+s_cmp_lg_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x07,0xbf]
+
+s_cmp_lg_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x07,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x07,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_u32 s1, s2
+// CHECK: [0x01,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 s103, s2
+// CHECK: [0x67,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 m0, s2
+// CHECK: [0x7c,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 0, s2
+// CHECK: [0x80,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 -1, s2
+// CHECK: [0xc1,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x08,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x08,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_u32 s1, s103
+// CHECK: [0x01,0x67,0x08,0xbf]
+
+s_cmp_gt_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x08,0xbf]
+
+s_cmp_gt_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x08,0xbf]
+
+s_cmp_gt_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x08,0xbf]
+
+s_cmp_gt_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x08,0xbf]
+
+s_cmp_gt_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x08,0xbf]
+
+s_cmp_gt_u32 s1, m0
+// CHECK: [0x01,0x7c,0x08,0xbf]
+
+s_cmp_gt_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x08,0xbf]
+
+s_cmp_gt_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x08,0xbf]
+
+s_cmp_gt_u32 s1, 0
+// CHECK: [0x01,0x80,0x08,0xbf]
+
+s_cmp_gt_u32 s1, -1
+// CHECK: [0x01,0xc1,0x08,0xbf]
+
+s_cmp_gt_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x08,0xbf]
+
+s_cmp_gt_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x08,0xbf]
+
+s_cmp_gt_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x08,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x08,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_u32 s1, s2
+// CHECK: [0x01,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 s103, s2
+// CHECK: [0x67,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 m0, s2
+// CHECK: [0x7c,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 0, s2
+// CHECK: [0x80,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 -1, s2
+// CHECK: [0xc1,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x09,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x09,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_u32 s1, s103
+// CHECK: [0x01,0x67,0x09,0xbf]
+
+s_cmp_ge_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x09,0xbf]
+
+s_cmp_ge_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x09,0xbf]
+
+s_cmp_ge_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x09,0xbf]
+
+s_cmp_ge_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x09,0xbf]
+
+s_cmp_ge_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x09,0xbf]
+
+s_cmp_ge_u32 s1, m0
+// CHECK: [0x01,0x7c,0x09,0xbf]
+
+s_cmp_ge_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x09,0xbf]
+
+s_cmp_ge_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x09,0xbf]
+
+s_cmp_ge_u32 s1, 0
+// CHECK: [0x01,0x80,0x09,0xbf]
+
+s_cmp_ge_u32 s1, -1
+// CHECK: [0x01,0xc1,0x09,0xbf]
+
+s_cmp_ge_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x09,0xbf]
+
+s_cmp_ge_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x09,0xbf]
+
+s_cmp_ge_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x09,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x09,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_u32 s1, s2
+// CHECK: [0x01,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 s103, s2
+// CHECK: [0x67,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 m0, s2
+// CHECK: [0x7c,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 0, s2
+// CHECK: [0x80,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 -1, s2
+// CHECK: [0xc1,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0a,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0a,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_u32 s1, s103
+// CHECK: [0x01,0x67,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, m0
+// CHECK: [0x01,0x7c,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, 0
+// CHECK: [0x01,0x80,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, -1
+// CHECK: [0x01,0xc1,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0a,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0a,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_u32 s1, s2
+// CHECK: [0x01,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 s103, s2
+// CHECK: [0x67,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 m0, s2
+// CHECK: [0x7c,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 0, s2
+// CHECK: [0x80,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 -1, s2
+// CHECK: [0xc1,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0b,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0b,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_u32 s1, s103
+// CHECK: [0x01,0x67,0x0b,0xbf]
+
+s_cmp_le_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x0b,0xbf]
+
+s_cmp_le_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x0b,0xbf]
+
+s_cmp_le_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0b,0xbf]
+
+s_cmp_le_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0b,0xbf]
+
+s_cmp_le_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0b,0xbf]
+
+s_cmp_le_u32 s1, m0
+// CHECK: [0x01,0x7c,0x0b,0xbf]
+
+s_cmp_le_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0b,0xbf]
+
+s_cmp_le_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0b,0xbf]
+
+s_cmp_le_u32 s1, 0
+// CHECK: [0x01,0x80,0x0b,0xbf]
+
+s_cmp_le_u32 s1, -1
+// CHECK: [0x01,0xc1,0x0b,0xbf]
+
+s_cmp_le_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0b,0xbf]
+
+s_cmp_le_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0b,0xbf]
+
+s_cmp_le_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0b,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0b,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b32 s1, s2
+// CHECK: [0x01,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 s103, s2
+// CHECK: [0x67,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 m0, s2
+// CHECK: [0x7c,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 0, s2
+// CHECK: [0x80,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 -1, s2
+// CHECK: [0xc1,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 0.5, s2
+// CHECK: [0xf0,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0c,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0c,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b32 s1, s103
+// CHECK: [0x01,0x67,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, m0
+// CHECK: [0x01,0x7c,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, 0
+// CHECK: [0x01,0x80,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, -1
+// CHECK: [0x01,0xc1,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0c,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0c,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b32 s1, s2
+// CHECK: [0x01,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 s103, s2
+// CHECK: [0x67,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 m0, s2
+// CHECK: [0x7c,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 0, s2
+// CHECK: [0x80,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 -1, s2
+// CHECK: [0xc1,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 0.5, s2
+// CHECK: [0xf0,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0d,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0d,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b32 s1, s103
+// CHECK: [0x01,0x67,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, m0
+// CHECK: [0x01,0x7c,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, 0
+// CHECK: [0x01,0x80,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, -1
+// CHECK: [0x01,0xc1,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0d,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0d,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b64 s[2:3], s2
+// CHECK: [0x02,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 s[4:5], s2
+// CHECK: [0x04,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 s[102:103], s2
+// CHECK: [0x66,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 flat_scratch, s2
+// CHECK: [0x68,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 vcc, s2
+// CHECK: [0x6a,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 tba, s2
+// CHECK: [0x6c,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 tma, s2
+// CHECK: [0x6e,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 exec, s2
+// CHECK: [0x7e,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 0, s2
+// CHECK: [0x80,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 -1, s2
+// CHECK: [0xc1,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 0.5, s2
+// CHECK: [0xf0,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 -4.0, s2
+// CHECK: [0xf7,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0e,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b64 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0e,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b64 s[2:3], s103
+// CHECK: [0x02,0x67,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], m0
+// CHECK: [0x02,0x7c,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], 0
+// CHECK: [0x02,0x80,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], -1
+// CHECK: [0x02,0xc1,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x0e,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b64 s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x0e,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b64 s[2:3], s2
+// CHECK: [0x02,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 s[4:5], s2
+// CHECK: [0x04,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 s[102:103], s2
+// CHECK: [0x66,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 flat_scratch, s2
+// CHECK: [0x68,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 vcc, s2
+// CHECK: [0x6a,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 tba, s2
+// CHECK: [0x6c,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 tma, s2
+// CHECK: [0x6e,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 exec, s2
+// CHECK: [0x7e,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 0, s2
+// CHECK: [0x80,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 -1, s2
+// CHECK: [0xc1,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 0.5, s2
+// CHECK: [0xf0,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 -4.0, s2
+// CHECK: [0xf7,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0f,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b64 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0f,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b64 s[2:3], s103
+// CHECK: [0x02,0x67,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x68,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x69,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], m0
+// CHECK: [0x02,0x7c,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], 0
+// CHECK: [0x02,0x80,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], -1
+// CHECK: [0x02,0xc1,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x0f,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b64 s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x0f,0xbf,0x73,0x72,0x71,0x3f]
+
+s_setvskip s1, s2
+// CHECK: [0x01,0x02,0x10,0xbf]
+
+s_setvskip s103, s2
+// CHECK: [0x67,0x02,0x10,0xbf]
+
+s_setvskip flat_scratch_lo, s2
+// CHECK: [0x68,0x02,0x10,0xbf]
+
+s_setvskip flat_scratch_hi, s2
+// CHECK: [0x69,0x02,0x10,0xbf]
+
+s_setvskip vcc_lo, s2
+// CHECK: [0x6a,0x02,0x10,0xbf]
+
+s_setvskip vcc_hi, s2
+// CHECK: [0x6b,0x02,0x10,0xbf]
+
+s_setvskip tba_lo, s2
+// CHECK: [0x6c,0x02,0x10,0xbf]
+
+s_setvskip tba_hi, s2
+// CHECK: [0x6d,0x02,0x10,0xbf]
+
+s_setvskip tma_lo, s2
+// CHECK: [0x6e,0x02,0x10,0xbf]
+
+s_setvskip tma_hi, s2
+// CHECK: [0x6f,0x02,0x10,0xbf]
+
+s_setvskip ttmp11, s2
+// CHECK: [0x7b,0x02,0x10,0xbf]
+
+s_setvskip m0, s2
+// CHECK: [0x7c,0x02,0x10,0xbf]
+
+s_setvskip exec_lo, s2
+// CHECK: [0x7e,0x02,0x10,0xbf]
+
+s_setvskip exec_hi, s2
+// CHECK: [0x7f,0x02,0x10,0xbf]
+
+s_setvskip 0, s2
+// CHECK: [0x80,0x02,0x10,0xbf]
+
+s_setvskip -1, s2
+// CHECK: [0xc1,0x02,0x10,0xbf]
+
+s_setvskip 0.5, s2
+// CHECK: [0xf0,0x02,0x10,0xbf]
+
+s_setvskip -4.0, s2
+// CHECK: [0xf7,0x02,0x10,0xbf]
+
+s_setvskip 0xaf123456, s2
+// CHECK: [0xff,0x02,0x10,0xbf,0x56,0x34,0x12,0xaf]
+
+s_setvskip 0x3f717273, s2
+// CHECK: [0xff,0x02,0x10,0xbf,0x73,0x72,0x71,0x3f]
+
+s_setvskip s1, s103
+// CHECK: [0x01,0x67,0x10,0xbf]
+
+s_setvskip s1, flat_scratch_lo
+// CHECK: [0x01,0x68,0x10,0xbf]
+
+s_setvskip s1, flat_scratch_hi
+// CHECK: [0x01,0x69,0x10,0xbf]
+
+s_setvskip s1, vcc_lo
+// CHECK: [0x01,0x6a,0x10,0xbf]
+
+s_setvskip s1, vcc_hi
+// CHECK: [0x01,0x6b,0x10,0xbf]
+
+s_setvskip s1, tba_lo
+// CHECK: [0x01,0x6c,0x10,0xbf]
+
+s_setvskip s1, tba_hi
+// CHECK: [0x01,0x6d,0x10,0xbf]
+
+s_setvskip s1, tma_lo
+// CHECK: [0x01,0x6e,0x10,0xbf]
+
+s_setvskip s1, tma_hi
+// CHECK: [0x01,0x6f,0x10,0xbf]
+
+s_setvskip s1, ttmp11
+// CHECK: [0x01,0x7b,0x10,0xbf]
+
+s_setvskip s1, m0
+// CHECK: [0x01,0x7c,0x10,0xbf]
+
+s_setvskip s1, exec_lo
+// CHECK: [0x01,0x7e,0x10,0xbf]
+
+s_setvskip s1, exec_hi
+// CHECK: [0x01,0x7f,0x10,0xbf]
+
+s_setvskip s1, 0
+// CHECK: [0x01,0x80,0x10,0xbf]
+
+s_setvskip s1, -1
+// CHECK: [0x01,0xc1,0x10,0xbf]
+
+s_setvskip s1, 0.5
+// CHECK: [0x01,0xf0,0x10,0xbf]
+
+s_setvskip s1, -4.0
+// CHECK: [0x01,0xf7,0x10,0xbf]
+
+s_setvskip s1, 0xaf123456
+// CHECK: [0x01,0xff,0x10,0xbf,0x56,0x34,0x12,0xaf]
+
+s_setvskip s1, 0x3f717273
+// CHECK: [0x01,0xff,0x10,0xbf,0x73,0x72,0x71,0x3f]
+
+s_movk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x05,0xb0]
+
+s_movk_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb0]
+
+s_movk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb0]
+
+s_movk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb0]
+
+s_movk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb0]
+
+s_movk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb0]
+
+s_movk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb0]
+
+s_movk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb0]
+
+s_movk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb0]
+
+s_movk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb0]
+
+s_movk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb0]
+
+s_movk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb0]
+
+s_movk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb0]
+
+s_movk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb0]
+
+s_movk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x05,0xb0]
+
+s_cmovk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x05,0xb1]
+
+s_cmovk_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb1]
+
+s_cmovk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb1]
+
+s_cmovk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb1]
+
+s_cmovk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb1]
+
+s_cmovk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb1]
+
+s_cmovk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb1]
+
+s_cmovk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb1]
+
+s_cmovk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb1]
+
+s_cmovk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb1]
+
+s_cmovk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb1]
+
+s_cmovk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb1]
+
+s_cmovk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb1]
+
+s_cmovk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb1]
+
+s_cmovk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x05,0xb1]
+
+s_cmpk_eq_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb1]
+
+s_cmpk_eq_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb1]
+
+s_cmpk_eq_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb1]
+
+s_cmpk_eq_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb1]
+
+s_cmpk_eq_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb1]
+
+s_cmpk_eq_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb1]
+
+s_cmpk_eq_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb1]
+
+s_cmpk_eq_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb1]
+
+s_cmpk_eq_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb1]
+
+s_cmpk_eq_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb1]
+
+s_cmpk_eq_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb1]
+
+s_cmpk_eq_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb1]
+
+s_cmpk_eq_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb1]
+
+s_cmpk_eq_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb1]
+
+s_cmpk_eq_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb1]
+
+s_cmpk_lg_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb2]
+
+s_cmpk_lg_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb2]
+
+s_cmpk_lg_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb2]
+
+s_cmpk_lg_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb2]
+
+s_cmpk_lg_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb2]
+
+s_cmpk_lg_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb2]
+
+s_cmpk_lg_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb2]
+
+s_cmpk_lg_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb2]
+
+s_cmpk_lg_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb2]
+
+s_cmpk_lg_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb2]
+
+s_cmpk_lg_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb2]
+
+s_cmpk_lg_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb2]
+
+s_cmpk_lg_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb2]
+
+s_cmpk_lg_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb2]
+
+s_cmpk_lg_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb2]
+
+s_cmpk_gt_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb2]
+
+s_cmpk_gt_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb2]
+
+s_cmpk_gt_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb2]
+
+s_cmpk_gt_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb2]
+
+s_cmpk_gt_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb2]
+
+s_cmpk_gt_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb2]
+
+s_cmpk_gt_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb2]
+
+s_cmpk_gt_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb2]
+
+s_cmpk_gt_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb2]
+
+s_cmpk_gt_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb2]
+
+s_cmpk_gt_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb2]
+
+s_cmpk_gt_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb2]
+
+s_cmpk_gt_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb2]
+
+s_cmpk_gt_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb2]
+
+s_cmpk_gt_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb2]
+
+s_cmpk_ge_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb3]
+
+s_cmpk_ge_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb3]
+
+s_cmpk_ge_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb3]
+
+s_cmpk_ge_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb3]
+
+s_cmpk_ge_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb3]
+
+s_cmpk_ge_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb3]
+
+s_cmpk_ge_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb3]
+
+s_cmpk_ge_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb3]
+
+s_cmpk_ge_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb3]
+
+s_cmpk_ge_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb3]
+
+s_cmpk_ge_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb3]
+
+s_cmpk_ge_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb3]
+
+s_cmpk_ge_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb3]
+
+s_cmpk_ge_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb3]
+
+s_cmpk_ge_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb3]
+
+s_cmpk_lt_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb3]
+
+s_cmpk_lt_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb3]
+
+s_cmpk_lt_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb3]
+
+s_cmpk_lt_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb3]
+
+s_cmpk_lt_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb3]
+
+s_cmpk_lt_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb3]
+
+s_cmpk_lt_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb3]
+
+s_cmpk_lt_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb3]
+
+s_cmpk_lt_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb3]
+
+s_cmpk_lt_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb3]
+
+s_cmpk_lt_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb3]
+
+s_cmpk_lt_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb3]
+
+s_cmpk_lt_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb3]
+
+s_cmpk_lt_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb3]
+
+s_cmpk_lt_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb3]
+
+s_cmpk_le_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb4]
+
+s_cmpk_le_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb4]
+
+s_cmpk_le_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb4]
+
+s_cmpk_le_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb4]
+
+s_cmpk_le_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb4]
+
+s_cmpk_le_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb4]
+
+s_cmpk_le_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb4]
+
+s_cmpk_le_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb4]
+
+s_cmpk_le_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb4]
+
+s_cmpk_le_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb4]
+
+s_cmpk_le_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb4]
+
+s_cmpk_le_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb4]
+
+s_cmpk_le_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb4]
+
+s_cmpk_le_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb4]
+
+s_cmpk_le_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb4]
+
+s_cmpk_eq_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb4]
+
+s_cmpk_eq_u32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb4]
+
+s_cmpk_eq_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb4]
+
+s_cmpk_eq_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb4]
+
+s_cmpk_eq_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb4]
+
+s_cmpk_eq_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb4]
+
+s_cmpk_eq_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb4]
+
+s_cmpk_eq_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb4]
+
+s_cmpk_eq_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb4]
+
+s_cmpk_eq_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb4]
+
+s_cmpk_eq_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb4]
+
+s_cmpk_eq_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb4]
+
+s_cmpk_eq_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb4]
+
+s_cmpk_eq_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb4]
+
+s_cmpk_eq_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb4]
+
+s_cmpk_lg_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb5]
+
+s_cmpk_lg_u32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb5]
+
+s_cmpk_lg_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb5]
+
+s_cmpk_lg_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb5]
+
+s_cmpk_lg_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb5]
+
+s_cmpk_lg_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb5]
+
+s_cmpk_lg_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb5]
+
+s_cmpk_lg_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb5]
+
+s_cmpk_lg_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb5]
+
+s_cmpk_lg_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb5]
+
+s_cmpk_lg_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb5]
+
+s_cmpk_lg_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb5]
+
+s_cmpk_lg_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb5]
+
+s_cmpk_lg_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb5]
+
+s_cmpk_lg_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb5]
+
+s_cmpk_gt_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb5]
+
+s_cmpk_gt_u32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb5]
+
+s_cmpk_gt_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb5]
+
+s_cmpk_gt_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb5]
+
+s_cmpk_gt_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb5]
+
+s_cmpk_gt_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb5]
+
+s_cmpk_gt_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb5]
+
+s_cmpk_gt_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb5]
+
+s_cmpk_gt_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb5]
+
+s_cmpk_gt_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb5]
+
+s_cmpk_gt_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb5]
+
+s_cmpk_gt_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb5]
+
+s_cmpk_gt_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb5]
+
+s_cmpk_gt_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb5]
+
+s_cmpk_gt_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb5]
+
+s_cmpk_ge_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb6]
+
+s_cmpk_ge_u32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb6]
+
+s_cmpk_ge_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb6]
+
+s_cmpk_ge_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb6]
+
+s_cmpk_ge_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb6]
+
+s_cmpk_ge_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb6]
+
+s_cmpk_ge_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb6]
+
+s_cmpk_ge_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb6]
+
+s_cmpk_ge_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb6]
+
+s_cmpk_ge_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb6]
+
+s_cmpk_ge_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb6]
+
+s_cmpk_ge_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb6]
+
+s_cmpk_ge_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb6]
+
+s_cmpk_ge_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb6]
+
+s_cmpk_ge_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb6]
+
+s_cmpk_lt_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb6]
+
+s_cmpk_lt_u32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb6]
+
+s_cmpk_lt_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb6]
+
+s_cmpk_lt_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb6]
+
+s_cmpk_lt_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb6]
+
+s_cmpk_lt_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb6]
+
+s_cmpk_lt_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb6]
+
+s_cmpk_lt_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb6]
+
+s_cmpk_lt_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb6]
+
+s_cmpk_lt_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb6]
+
+s_cmpk_lt_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb6]
+
+s_cmpk_lt_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb6]
+
+s_cmpk_lt_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb6]
+
+s_cmpk_lt_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb6]
+
+s_cmpk_lt_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb6]
+
+s_cmpk_le_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb7]
+
+s_cmpk_le_u32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb7]
+
+s_cmpk_le_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb7]
+
+s_cmpk_le_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb7]
+
+s_cmpk_le_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb7]
+
+s_cmpk_le_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb7]
+
+s_cmpk_le_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb7]
+
+s_cmpk_le_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb7]
+
+s_cmpk_le_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb7]
+
+s_cmpk_le_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb7]
+
+s_cmpk_le_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb7]
+
+s_cmpk_le_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb7]
+
+s_cmpk_le_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb7]
+
+s_cmpk_le_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb7]
+
+s_cmpk_le_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb7]
+
+s_addk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x85,0xb7]
+
+s_addk_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb7]
+
+s_addk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe8,0xb7]
+
+s_addk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe9,0xb7]
+
+s_addk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb7]
+
+s_addk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb7]
+
+s_addk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb7]
+
+s_addk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb7]
+
+s_addk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb7]
+
+s_addk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb7]
+
+s_addk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb7]
+
+s_addk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb7]
+
+s_addk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb7]
+
+s_addk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb7]
+
+s_addk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x85,0xb7]
+
+s_mulk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x05,0xb8]
+
+s_mulk_i32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb8]
+
+s_mulk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb8]
+
+s_mulk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb8]
+
+s_mulk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb8]
+
+s_mulk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb8]
+
+s_mulk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb8]
+
+s_mulk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb8]
+
+s_mulk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb8]
+
+s_mulk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb8]
+
+s_mulk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb8]
+
+s_mulk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb8]
+
+s_mulk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb8]
+
+s_mulk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb8]
+
+s_mulk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x05,0xb8]
+
+s_cbranch_i_fork s[2:3], 12609
+// CHECK: [0x41,0x31,0x82,0xb8]
+
+s_cbranch_i_fork s[4:5], 12609
+// CHECK: [0x41,0x31,0x84,0xb8]
+
+s_cbranch_i_fork s[102:103], 12609
+// CHECK: [0x41,0x31,0xe6,0xb8]
+
+s_cbranch_i_fork flat_scratch, 12609
+// CHECK: [0x41,0x31,0xe8,0xb8]
+
+s_cbranch_i_fork vcc, 12609
+// CHECK: [0x41,0x31,0xea,0xb8]
+
+s_cbranch_i_fork tba, 12609
+// CHECK: [0x41,0x31,0xec,0xb8]
+
+s_cbranch_i_fork tma, 12609
+// CHECK: [0x41,0x31,0xee,0xb8]
+
+s_cbranch_i_fork ttmp[10:11], 12609
+// CHECK: [0x41,0x31,0xfa,0xb8]
+
+s_cbranch_i_fork exec, 12609
+// CHECK: [0x41,0x31,0xfe,0xb8]
+
+s_cbranch_i_fork s[2:3], 49617
+// CHECK: [0xd1,0xc1,0x82,0xb8]
+
+s_getreg_b32 s5, 0x3141
+// CHECK: [0x41,0x31,0x05,0xb9]
+
+s_getreg_b32 s103, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb9]
+
+s_getreg_b32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x68,0xb9]
+
+s_getreg_b32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x69,0xb9]
+
+s_getreg_b32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb9]
+
+s_getreg_b32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb9]
+
+s_getreg_b32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb9]
+
+s_getreg_b32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb9]
+
+s_getreg_b32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb9]
+
+s_getreg_b32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb9]
+
+s_getreg_b32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb9]
+
+s_getreg_b32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb9]
+
+s_getreg_b32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb9]
+
+s_getreg_b32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb9]
+
+s_getreg_b32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x05,0xb9]
+
+s_setreg_b32 0x3141, s1
+// CHECK: [0x41,0x31,0x81,0xb9]
+
+s_setreg_b32 0xc1d1, s1
+// CHECK: [0xd1,0xc1,0x81,0xb9]
+
+s_setreg_b32 0x3141, s103
+// CHECK: [0x41,0x31,0xe7,0xb9]
+
+s_setreg_b32 0x3141, flat_scratch_lo
+// CHECK: [0x41,0x31,0xe8,0xb9]
+
+s_setreg_b32 0x3141, flat_scratch_hi
+// CHECK: [0x41,0x31,0xe9,0xb9]
+
+s_setreg_b32 0x3141, vcc_lo
+// CHECK: [0x41,0x31,0xea,0xb9]
+
+s_setreg_b32 0x3141, vcc_hi
+// CHECK: [0x41,0x31,0xeb,0xb9]
+
+s_setreg_b32 0x3141, tba_lo
+// CHECK: [0x41,0x31,0xec,0xb9]
+
+s_setreg_b32 0x3141, tba_hi
+// CHECK: [0x41,0x31,0xed,0xb9]
+
+s_setreg_b32 0x3141, tma_lo
+// CHECK: [0x41,0x31,0xee,0xb9]
+
+s_setreg_b32 0x3141, tma_hi
+// CHECK: [0x41,0x31,0xef,0xb9]
+
+s_setreg_b32 0x3141, ttmp11
+// CHECK: [0x41,0x31,0xfb,0xb9]
+
+s_setreg_b32 0x3141, m0
+// CHECK: [0x41,0x31,0xfc,0xb9]
+
+s_setreg_b32 0x3141, exec_lo
+// CHECK: [0x41,0x31,0xfe,0xb9]
+
+s_setreg_b32 0x3141, exec_hi
+// CHECK: [0x41,0x31,0xff,0xb9]
+
+s_setreg_imm32_b32 0x3141, 0x11213141
+// CHECK: [0x41,0x31,0x80,0xba,0x41,0x31,0x21,0x11]
+
+s_setreg_imm32_b32 0xc1d1, 0x11213141
+// CHECK: [0xd1,0xc1,0x80,0xba,0x41,0x31,0x21,0x11]
+
+s_setreg_imm32_b32 0x3141, 0xa1b1c1d1
+// CHECK: [0x41,0x31,0x80,0xba,0xd1,0xc1,0xb1,0xa1]
+
+s_nop 0x3141
+// CHECK: [0x41,0x31,0x80,0xbf]
+
+s_nop 0xc1d1
+// CHECK: [0xd1,0xc1,0x80,0xbf]
+
+s_endpgm
+// CHECK: [0x00,0x00,0x81,0xbf]
+
+s_branch 12609
+// CHECK: [0x41,0x31,0x82,0xbf]
+
+s_branch 49617
+// CHECK: [0xd1,0xc1,0x82,0xbf]
+
+s_cbranch_scc0 12609
+// CHECK: [0x41,0x31,0x84,0xbf]
+
+s_cbranch_scc0 49617
+// CHECK: [0xd1,0xc1,0x84,0xbf]
+
+s_cbranch_scc1 12609
+// CHECK: [0x41,0x31,0x85,0xbf]
+
+s_cbranch_scc1 49617
+// CHECK: [0xd1,0xc1,0x85,0xbf]
+
+s_cbranch_vccz 12609
+// CHECK: [0x41,0x31,0x86,0xbf]
+
+s_cbranch_vccz 49617
+// CHECK: [0xd1,0xc1,0x86,0xbf]
+
+s_cbranch_vccnz 12609
+// CHECK: [0x41,0x31,0x87,0xbf]
+
+s_cbranch_vccnz 49617
+// CHECK: [0xd1,0xc1,0x87,0xbf]
+
+s_cbranch_execz 12609
+// CHECK: [0x41,0x31,0x88,0xbf]
+
+s_cbranch_execz 49617
+// CHECK: [0xd1,0xc1,0x88,0xbf]
+
+s_cbranch_execnz 12609
+// CHECK: [0x41,0x31,0x89,0xbf]
+
+s_cbranch_execnz 49617
+// CHECK: [0xd1,0xc1,0x89,0xbf]
+
+s_barrier
+// CHECK: [0x00,0x00,0x8a,0xbf]
+
+s_waitcnt 0x3141
+// CHECK: [0x41,0x31,0x8c,0xbf]
+
+s_waitcnt 0xc1d1
+// CHECK: [0xd1,0xc1,0x8c,0xbf]
+
+s_sethalt 0x3141
+// CHECK: [0x41,0x31,0x8d,0xbf]
+
+s_sethalt 0xc1d1
+// CHECK: [0xd1,0xc1,0x8d,0xbf]
+
+s_sleep 0x3141
+// CHECK: [0x41,0x31,0x8e,0xbf]
+
+s_sleep 0xc1d1
+// CHECK: [0xd1,0xc1,0x8e,0xbf]
+
+s_setprio 0x3141
+// CHECK: [0x41,0x31,0x8f,0xbf]
+
+s_setprio 0xc1d1
+// CHECK: [0xd1,0xc1,0x8f,0xbf]
+
+s_sendmsg 0x3141
+// CHECK: [0x41,0x31,0x90,0xbf]
+
+s_sendmsg 0xc1d1
+// CHECK: [0xd1,0xc1,0x90,0xbf]
+
+s_sendmsghalt 0x3141
+// CHECK: [0x41,0x31,0x91,0xbf]
+
+s_sendmsghalt 0xc1d1
+// CHECK: [0xd1,0xc1,0x91,0xbf]
+
+s_trap 0x3141
+// CHECK: [0x41,0x31,0x92,0xbf]
+
+s_trap 0xc1d1
+// CHECK: [0xd1,0xc1,0x92,0xbf]
+
+s_icache_inv
+// CHECK: [0x00,0x00,0x93,0xbf]
+
+s_incperflevel 0x3141
+// CHECK: [0x41,0x31,0x94,0xbf]
+
+s_incperflevel 0xc1d1
+// CHECK: [0xd1,0xc1,0x94,0xbf]
+
+s_decperflevel 0x3141
+// CHECK: [0x41,0x31,0x95,0xbf]
+
+s_decperflevel 0xc1d1
+// CHECK: [0xd1,0xc1,0x95,0xbf]
+
+s_ttracedata
+// CHECK: [0x00,0x00,0x96,0xbf]
+
+v_interp_p1_f32 v5, v1, attr0.x
+// CHECK: [0x01,0x00,0x14,0xc8]
+
+v_interp_p1_f32 v255, v1, attr0.x
+// CHECK: [0x01,0x00,0xfc,0xcb]
+
+v_interp_p1_f32 v5, v255, attr0.x
+// CHECK: [0xff,0x00,0x14,0xc8]
+
+v_interp_p1_f32 v5, v1, attr1.x
+// CHECK: [0x01,0x04,0x14,0xc8]
+
+v_interp_p1_f32 v5, v1, attr31.x
+// CHECK: [0x01,0x7c,0x14,0xc8]
+
+v_interp_p1_f32 v5, v1, attr32.x
+// CHECK: [0x01,0x80,0x14,0xc8]
+
+v_interp_p1_f32 v5, v1, attr0.y
+// CHECK: [0x01,0x01,0x14,0xc8]
+
+v_interp_p1_f32 v5, v1, attr0.z
+// CHECK: [0x01,0x02,0x14,0xc8]
+
+v_interp_p1_f32 v5, v1, attr0.w
+// CHECK: [0x01,0x03,0x14,0xc8]
+
+v_interp_p2_f32 v5, v1, attr0.x
+// CHECK: [0x01,0x00,0x15,0xc8]
+
+v_interp_p2_f32 v255, v1, attr0.x
+// CHECK: [0x01,0x00,0xfd,0xcb]
+
+v_interp_p2_f32 v5, v255, attr0.x
+// CHECK: [0xff,0x00,0x15,0xc8]
+
+v_interp_p2_f32 v5, v1, attr1.x
+// CHECK: [0x01,0x04,0x15,0xc8]
+
+v_interp_p2_f32 v5, v1, attr31.x
+// CHECK: [0x01,0x7c,0x15,0xc8]
+
+v_interp_p2_f32 v5, v1, attr32.x
+// CHECK: [0x01,0x80,0x15,0xc8]
+
+v_interp_p2_f32 v5, v1, attr0.y
+// CHECK: [0x01,0x01,0x15,0xc8]
+
+v_interp_p2_f32 v5, v1, attr0.z
+// CHECK: [0x01,0x02,0x15,0xc8]
+
+v_interp_p2_f32 v5, v1, attr0.w
+// CHECK: [0x01,0x03,0x15,0xc8]
+
+v_interp_mov_f32 v5, p10, attr0.x
+// CHECK: [0x00,0x00,0x16,0xc8]
+
+v_interp_mov_f32 v255, p10, attr0.x
+// CHECK: [0x00,0x00,0xfe,0xcb]
+
+v_interp_mov_f32 v5, p20, attr0.x
+// CHECK: [0x01,0x00,0x16,0xc8]
+
+v_interp_mov_f32 v5, p0, attr0.x
+// CHECK: [0x02,0x00,0x16,0xc8]
+
+v_interp_mov_f32 v5, p10, attr1.x
+// CHECK: [0x00,0x04,0x16,0xc8]
+
+v_interp_mov_f32 v5, p10, attr31.x
+// CHECK: [0x00,0x7c,0x16,0xc8]
+
+v_interp_mov_f32 v5, p10, attr32.x
+// CHECK: [0x00,0x80,0x16,0xc8]
+
+v_interp_mov_f32 v5, p10, attr0.y
+// CHECK: [0x00,0x01,0x16,0xc8]
+
+v_interp_mov_f32 v5, p10, attr0.z
+// CHECK: [0x00,0x02,0x16,0xc8]
+
+v_interp_mov_f32 v5, p10, attr0.w
+// CHECK: [0x00,0x03,0x16,0xc8]
+
+v_nop
+// CHECK: [0x00,0x00,0x00,0x7e]
+
+v_nop_e64
+// CHECK: [0x00,0x00,0x00,0xd3,0x00,0x00,0x00,0x00]
+
+v_mov_b32 v5, s1
+// CHECK: [0x01,0x02,0x0a,0x7e]
+
+v_mov_b32 v255, s1
+// CHECK: [0x01,0x02,0xfe,0x7f]
+
+v_mov_b32 v5, s103
+// CHECK: [0x67,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, flat_scratch_lo
+// CHECK: [0x68,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, flat_scratch_hi
+// CHECK: [0x69,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, vcc_lo
+// CHECK: [0x6a,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, vcc_hi
+// CHECK: [0x6b,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tba_lo
+// CHECK: [0x6c,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tba_hi
+// CHECK: [0x6d,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tma_lo
+// CHECK: [0x6e,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tma_hi
+// CHECK: [0x6f,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, ttmp11
+// CHECK: [0x7b,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, m0
+// CHECK: [0x7c,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, exec_lo
+// CHECK: [0x7e,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, exec_hi
+// CHECK: [0x7f,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, 0
+// CHECK: [0x80,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, -1
+// CHECK: [0xc1,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, 0.5
+// CHECK: [0xf0,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, -4.0
+// CHECK: [0xf7,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, 0xaf123456
+// CHECK: [0xff,0x02,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_mov_b32 v5, 0x3f717273
+// CHECK: [0xff,0x02,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_mov_b32 v5, v1
+// CHECK: [0x01,0x03,0x0a,0x7e]
+
+v_mov_b32 v5, v255
+// CHECK: [0xff,0x03,0x0a,0x7e]
+
+v_mov_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x02,0xd3,0x01,0x00,0x00,0x00]
+
+v_mov_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x02,0xd3,0x01,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, s103
+// CHECK: [0x05,0x00,0x02,0xd3,0x67,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x02,0xd3,0x68,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x02,0xd3,0x69,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x02,0xd3,0x6a,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x02,0xd3,0x6b,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x02,0xd3,0x6c,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x02,0xd3,0x6d,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x02,0xd3,0x6e,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x02,0xd3,0x6f,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x02,0xd3,0x7b,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x02,0xd3,0x7c,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x02,0xd3,0x7e,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x02,0xd3,0x7f,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x02,0xd3,0x80,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x02,0xd3,0xc1,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x02,0xd3,0xf0,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x02,0xd3,0xf7,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x02,0xd3,0x01,0x01,0x00,0x00]
+
+v_mov_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x02,0xd3,0xff,0x01,0x00,0x00]
+
+v_readfirstlane_b32 s5, v1
+// CHECK: [0x01,0x05,0x0a,0x7e]
+
+v_readfirstlane_b32 s103, v1
+// CHECK: [0x01,0x05,0xce,0x7e]
+
+v_readfirstlane_b32 tba_lo, v1
+// CHECK: [0x01,0x05,0xd8,0x7e]
+
+v_readfirstlane_b32 tba_hi, v1
+// CHECK: [0x01,0x05,0xda,0x7e]
+
+v_readfirstlane_b32 tma_lo, v1
+// CHECK: [0x01,0x05,0xdc,0x7e]
+
+v_readfirstlane_b32 tma_hi, v1
+// CHECK: [0x01,0x05,0xde,0x7e]
+
+v_readfirstlane_b32 ttmp11, v1
+// CHECK: [0x01,0x05,0xf6,0x7e]
+
+v_readfirstlane_b32 s5, v255
+// CHECK: [0xff,0x05,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, s[2:3]
+// CHECK: [0x02,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v255, s[2:3]
+// CHECK: [0x02,0x06,0xfe,0x7f]
+
+v_cvt_i32_f64 v5, s[4:5]
+// CHECK: [0x04,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, s[102:103]
+// CHECK: [0x66,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, flat_scratch
+// CHECK: [0x68,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, vcc
+// CHECK: [0x6a,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, tba
+// CHECK: [0x6c,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, tma
+// CHECK: [0x6e,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, exec
+// CHECK: [0x7e,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, 0
+// CHECK: [0x80,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, -1
+// CHECK: [0xc1,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, 0.5
+// CHECK: [0xf0,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, -4.0
+// CHECK: [0xf7,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x06,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_i32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x06,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_i32_f64 v5, v[1:2]
+// CHECK: [0x01,0x07,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x07,0x0a,0x7e]
+
+v_cvt_i32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x06,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x06,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x06,0xd3,0x04,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, s[102:103]
+// CHECK: [0x05,0x00,0x06,0xd3,0x66,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x06,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x06,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x06,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x06,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x06,0xd3,0x7a,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x06,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x06,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x06,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x06,0xd3,0xfe,0x01,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x06,0xd3,0x02,0x00,0x00,0x20]
+
+v_cvt_i32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x06,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_f64_i32 v[5:6], s1
+// CHECK: [0x01,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[254:255], s1
+// CHECK: [0x01,0x08,0xfc,0x7f]
+
+v_cvt_f64_i32 v[5:6], s103
+// CHECK: [0x67,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], flat_scratch_lo
+// CHECK: [0x68,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], flat_scratch_hi
+// CHECK: [0x69,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], vcc_lo
+// CHECK: [0x6a,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], vcc_hi
+// CHECK: [0x6b,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tba_lo
+// CHECK: [0x6c,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tba_hi
+// CHECK: [0x6d,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tma_lo
+// CHECK: [0x6e,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tma_hi
+// CHECK: [0x6f,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], ttmp11
+// CHECK: [0x7b,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], m0
+// CHECK: [0x7c,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], exec_lo
+// CHECK: [0x7e,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], exec_hi
+// CHECK: [0x7f,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], 0
+// CHECK: [0x80,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], -1
+// CHECK: [0xc1,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], 0.5
+// CHECK: [0xf0,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], -4.0
+// CHECK: [0xf7,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], 0xaf123456
+// CHECK: [0xff,0x08,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f64_i32 v[5:6], 0x3f717273
+// CHECK: [0xff,0x08,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f64_i32 v[5:6], v1
+// CHECK: [0x01,0x09,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], v255
+// CHECK: [0xff,0x09,0x0a,0x7e]
+
+v_cvt_f64_i32_e64 v[5:6], s1
+// CHECK: [0x05,0x00,0x08,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[254:255], s1
+// CHECK: [0xfe,0x00,0x08,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], s103
+// CHECK: [0x05,0x00,0x08,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], flat_scratch_lo
+// CHECK: [0x05,0x00,0x08,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], flat_scratch_hi
+// CHECK: [0x05,0x00,0x08,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], vcc_lo
+// CHECK: [0x05,0x00,0x08,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], vcc_hi
+// CHECK: [0x05,0x00,0x08,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tba_lo
+// CHECK: [0x05,0x00,0x08,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tba_hi
+// CHECK: [0x05,0x00,0x08,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tma_lo
+// CHECK: [0x05,0x00,0x08,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tma_hi
+// CHECK: [0x05,0x00,0x08,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], ttmp11
+// CHECK: [0x05,0x00,0x08,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], m0
+// CHECK: [0x05,0x00,0x08,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], exec_lo
+// CHECK: [0x05,0x00,0x08,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], exec_hi
+// CHECK: [0x05,0x00,0x08,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], 0
+// CHECK: [0x05,0x00,0x08,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], -1
+// CHECK: [0x05,0x00,0x08,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], 0.5
+// CHECK: [0x05,0x00,0x08,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], -4.0
+// CHECK: [0x05,0x00,0x08,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], v1
+// CHECK: [0x05,0x00,0x08,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], v255
+// CHECK: [0x05,0x00,0x08,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_i32 v5, s1
+// CHECK: [0x01,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v255, s1
+// CHECK: [0x01,0x0a,0xfe,0x7f]
+
+v_cvt_f32_i32 v5, s103
+// CHECK: [0x67,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, flat_scratch_lo
+// CHECK: [0x68,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, flat_scratch_hi
+// CHECK: [0x69,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, vcc_lo
+// CHECK: [0x6a,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, vcc_hi
+// CHECK: [0x6b,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tba_lo
+// CHECK: [0x6c,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tba_hi
+// CHECK: [0x6d,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tma_lo
+// CHECK: [0x6e,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tma_hi
+// CHECK: [0x6f,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, ttmp11
+// CHECK: [0x7b,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, m0
+// CHECK: [0x7c,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, exec_lo
+// CHECK: [0x7e,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, exec_hi
+// CHECK: [0x7f,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, 0
+// CHECK: [0x80,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, -1
+// CHECK: [0xc1,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, 0.5
+// CHECK: [0xf0,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, -4.0
+// CHECK: [0xf7,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, 0xaf123456
+// CHECK: [0xff,0x0a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_i32 v5, 0x3f717273
+// CHECK: [0xff,0x0a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_i32 v5, v1
+// CHECK: [0x01,0x0b,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, v255
+// CHECK: [0xff,0x0b,0x0a,0x7e]
+
+v_cvt_f32_i32_e64 v5, s1
+// CHECK: [0x05,0x00,0x0a,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v255, s1
+// CHECK: [0xff,0x00,0x0a,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, s103
+// CHECK: [0x05,0x00,0x0a,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0a,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0a,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x0a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x0a,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x0a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x0a,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x0a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x0a,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x0a,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, m0
+// CHECK: [0x05,0x00,0x0a,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x0a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x0a,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, 0
+// CHECK: [0x05,0x00,0x0a,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, -1
+// CHECK: [0x05,0x00,0x0a,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x0a,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x0a,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, v1
+// CHECK: [0x05,0x00,0x0a,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, v255
+// CHECK: [0x05,0x00,0x0a,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_u32 v5, s1
+// CHECK: [0x01,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v255, s1
+// CHECK: [0x01,0x0c,0xfe,0x7f]
+
+v_cvt_f32_u32 v5, s103
+// CHECK: [0x67,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, flat_scratch_lo
+// CHECK: [0x68,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, flat_scratch_hi
+// CHECK: [0x69,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, vcc_lo
+// CHECK: [0x6a,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, vcc_hi
+// CHECK: [0x6b,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tba_lo
+// CHECK: [0x6c,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tba_hi
+// CHECK: [0x6d,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tma_lo
+// CHECK: [0x6e,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tma_hi
+// CHECK: [0x6f,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, ttmp11
+// CHECK: [0x7b,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, m0
+// CHECK: [0x7c,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, exec_lo
+// CHECK: [0x7e,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, exec_hi
+// CHECK: [0x7f,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, 0
+// CHECK: [0x80,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, -1
+// CHECK: [0xc1,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, 0.5
+// CHECK: [0xf0,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, -4.0
+// CHECK: [0xf7,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, 0xaf123456
+// CHECK: [0xff,0x0c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_u32 v5, 0x3f717273
+// CHECK: [0xff,0x0c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_u32 v5, v1
+// CHECK: [0x01,0x0d,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, v255
+// CHECK: [0xff,0x0d,0x0a,0x7e]
+
+v_cvt_f32_u32_e64 v5, s1
+// CHECK: [0x05,0x00,0x0c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v255, s1
+// CHECK: [0xff,0x00,0x0c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, s103
+// CHECK: [0x05,0x00,0x0c,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0c,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0c,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x0c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x0c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x0c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x0c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x0c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x0c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x0c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, m0
+// CHECK: [0x05,0x00,0x0c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x0c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x0c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, 0
+// CHECK: [0x05,0x00,0x0c,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, -1
+// CHECK: [0x05,0x00,0x0c,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x0c,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x0c,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, v1
+// CHECK: [0x05,0x00,0x0c,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, v255
+// CHECK: [0x05,0x00,0x0c,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_u32_f32 v5, s1
+// CHECK: [0x01,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v255, s1
+// CHECK: [0x01,0x0e,0xfe,0x7f]
+
+v_cvt_u32_f32 v5, s103
+// CHECK: [0x67,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tba_lo
+// CHECK: [0x6c,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tba_hi
+// CHECK: [0x6d,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tma_lo
+// CHECK: [0x6e,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tma_hi
+// CHECK: [0x6f,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, ttmp11
+// CHECK: [0x7b,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, m0
+// CHECK: [0x7c,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, exec_lo
+// CHECK: [0x7e,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, exec_hi
+// CHECK: [0x7f,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, 0
+// CHECK: [0x80,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, -1
+// CHECK: [0xc1,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, 0.5
+// CHECK: [0xf0,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, -4.0
+// CHECK: [0xf7,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x0e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_u32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x0e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_u32_f32 v5, v1
+// CHECK: [0x01,0x0f,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, v255
+// CHECK: [0xff,0x0f,0x0a,0x7e]
+
+v_cvt_u32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x0e,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x0e,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x0e,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0e,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0e,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x0e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x0e,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x0e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x0e,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x0e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x0e,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x0e,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x0e,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x0e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x0e,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x0e,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x0e,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x0e,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x0e,0xd3,0x01,0x00,0x00,0x20]
+
+v_cvt_u32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x0e,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32 v5, s1
+// CHECK: [0x01,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v255, s1
+// CHECK: [0x01,0x10,0xfe,0x7f]
+
+v_cvt_i32_f32 v5, s103
+// CHECK: [0x67,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, m0
+// CHECK: [0x7c,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, 0
+// CHECK: [0x80,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, -1
+// CHECK: [0xc1,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x10,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x10,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_i32_f32 v5, v1
+// CHECK: [0x01,0x11,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, v255
+// CHECK: [0xff,0x11,0x0a,0x7e]
+
+v_cvt_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x10,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x10,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x10,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x10,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x10,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x10,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x10,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x10,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x10,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x10,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x10,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x10,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x10,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x10,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x10,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x10,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x10,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x10,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x10,0xd3,0x01,0x00,0x00,0x20]
+
+v_cvt_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x10,0xd3,0x01,0x00,0x00,0x00]
+
+v_mov_fed_b32 v5, s1
+// CHECK: [0x01,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v255, s1
+// CHECK: [0x01,0x12,0xfe,0x7f]
+
+v_mov_fed_b32 v5, s103
+// CHECK: [0x67,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, flat_scratch_lo
+// CHECK: [0x68,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, flat_scratch_hi
+// CHECK: [0x69,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, vcc_lo
+// CHECK: [0x6a,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, vcc_hi
+// CHECK: [0x6b,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, tba_lo
+// CHECK: [0x6c,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, tba_hi
+// CHECK: [0x6d,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, tma_lo
+// CHECK: [0x6e,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, tma_hi
+// CHECK: [0x6f,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, ttmp11
+// CHECK: [0x7b,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, m0
+// CHECK: [0x7c,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, exec_lo
+// CHECK: [0x7e,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, exec_hi
+// CHECK: [0x7f,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, 0
+// CHECK: [0x80,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, -1
+// CHECK: [0xc1,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, 0.5
+// CHECK: [0xf0,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, -4.0
+// CHECK: [0xf7,0x12,0x0a,0x7e]
+
+v_mov_fed_b32 v5, 0xaf123456
+// CHECK: [0xff,0x12,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_mov_fed_b32 v5, 0x3f717273
+// CHECK: [0xff,0x12,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_mov_fed_b32 v5, v1
+// CHECK: [0x01,0x13,0x0a,0x7e]
+
+v_mov_fed_b32 v5, v255
+// CHECK: [0xff,0x13,0x0a,0x7e]
+
+v_mov_fed_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x12,0xd3,0x01,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x12,0xd3,0x01,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, s103
+// CHECK: [0x05,0x00,0x12,0xd3,0x67,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x12,0xd3,0x68,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x12,0xd3,0x69,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x12,0xd3,0x6a,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x12,0xd3,0x6b,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x12,0xd3,0x6c,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x12,0xd3,0x6d,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x12,0xd3,0x6e,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x12,0xd3,0x6f,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x12,0xd3,0x7b,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x12,0xd3,0x7c,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x12,0xd3,0x7e,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x12,0xd3,0x7f,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x12,0xd3,0x80,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x12,0xd3,0xc1,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x12,0xd3,0xf0,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x12,0xd3,0xf7,0x00,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x12,0xd3,0x01,0x01,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x12,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f16_f32 v5, s1
+// CHECK: [0x01,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v255, s1
+// CHECK: [0x01,0x14,0xfe,0x7f]
+
+v_cvt_f16_f32 v5, s103
+// CHECK: [0x67,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, vcc_lo
+// CHECK: [0x6a,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, vcc_hi
+// CHECK: [0x6b,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tba_lo
+// CHECK: [0x6c,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tba_hi
+// CHECK: [0x6d,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tma_lo
+// CHECK: [0x6e,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tma_hi
+// CHECK: [0x6f,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, ttmp11
+// CHECK: [0x7b,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, m0
+// CHECK: [0x7c,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, exec_lo
+// CHECK: [0x7e,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, exec_hi
+// CHECK: [0x7f,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, 0
+// CHECK: [0x80,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, -1
+// CHECK: [0xc1,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, 0.5
+// CHECK: [0xf0,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, -4.0
+// CHECK: [0xf7,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, 0xaf123456
+// CHECK: [0xff,0x14,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f16_f32 v5, 0x3f717273
+// CHECK: [0xff,0x14,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f16_f32 v5, v1
+// CHECK: [0x01,0x15,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, v255
+// CHECK: [0xff,0x15,0x0a,0x7e]
+
+v_cvt_f16_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x14,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x14,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x14,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x14,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x14,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x14,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x14,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x14,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x14,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x14,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x14,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x14,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x14,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x14,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x14,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x14,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x14,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x14,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x14,0xd3,0x01,0x00,0x00,0x20]
+
+v_cvt_f16_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x14,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16 v5, s1
+// CHECK: [0x01,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v255, s1
+// CHECK: [0x01,0x16,0xfe,0x7f]
+
+v_cvt_f32_f16 v5, s103
+// CHECK: [0x67,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, flat_scratch_lo
+// CHECK: [0x68,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, flat_scratch_hi
+// CHECK: [0x69,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, vcc_lo
+// CHECK: [0x6a,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, vcc_hi
+// CHECK: [0x6b,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tba_lo
+// CHECK: [0x6c,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tba_hi
+// CHECK: [0x6d,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tma_lo
+// CHECK: [0x6e,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tma_hi
+// CHECK: [0x6f,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, ttmp11
+// CHECK: [0x7b,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, m0
+// CHECK: [0x7c,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, exec_lo
+// CHECK: [0x7e,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, exec_hi
+// CHECK: [0x7f,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, 0
+// CHECK: [0x80,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, -1
+// CHECK: [0xc1,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, v1
+// CHECK: [0x01,0x17,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, v255
+// CHECK: [0xff,0x17,0x0a,0x7e]
+
+v_cvt_f32_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x16,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x16,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, s103
+// CHECK: [0x05,0x00,0x16,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x16,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x16,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x16,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x16,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x16,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x16,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x16,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x16,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x16,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x16,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x16,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x16,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x16,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x16,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x16,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x16,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x16,0xd3,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_f16_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x16,0xd3,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_f16_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x16,0xd3,0x01,0x00,0x00,0x18]
+
+v_cvt_rpi_i32_f32 v5, s1
+// CHECK: [0x01,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v255, s1
+// CHECK: [0x01,0x18,0xfe,0x7f]
+
+v_cvt_rpi_i32_f32 v5, s103
+// CHECK: [0x67,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, m0
+// CHECK: [0x7c,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, 0
+// CHECK: [0x80,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, -1
+// CHECK: [0xc1,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x18,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_rpi_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x18,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_rpi_i32_f32 v5, v1
+// CHECK: [0x01,0x19,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, v255
+// CHECK: [0xff,0x19,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x18,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x18,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x18,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x18,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x18,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x18,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x18,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x18,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x18,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x18,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x18,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x18,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x18,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x18,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x18,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x18,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x18,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x18,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x18,0xd3,0x01,0x00,0x00,0x20]
+
+v_cvt_rpi_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x18,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32 v5, s1
+// CHECK: [0x01,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v255, s1
+// CHECK: [0x01,0x1a,0xfe,0x7f]
+
+v_cvt_flr_i32_f32 v5, s103
+// CHECK: [0x67,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, m0
+// CHECK: [0x7c,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, 0
+// CHECK: [0x80,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, -1
+// CHECK: [0xc1,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x1a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_flr_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x1a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_flr_i32_f32 v5, v1
+// CHECK: [0x01,0x1b,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, v255
+// CHECK: [0xff,0x1b,0x0a,0x7e]
+
+v_cvt_flr_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x1a,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x1a,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x1a,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x1a,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x1a,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x1a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x1a,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x1a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x1a,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x1a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x1a,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x1a,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x1a,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x1a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x1a,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x1a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x1a,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x1a,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x1a,0xd3,0x01,0x00,0x00,0x20]
+
+v_cvt_flr_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x1a,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4 v5, s1
+// CHECK: [0x01,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v255, s1
+// CHECK: [0x01,0x1c,0xfe,0x7f]
+
+v_cvt_off_f32_i4 v5, s103
+// CHECK: [0x67,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, flat_scratch_lo
+// CHECK: [0x68,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, flat_scratch_hi
+// CHECK: [0x69,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, vcc_lo
+// CHECK: [0x6a,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, vcc_hi
+// CHECK: [0x6b,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tba_lo
+// CHECK: [0x6c,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tba_hi
+// CHECK: [0x6d,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tma_lo
+// CHECK: [0x6e,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tma_hi
+// CHECK: [0x6f,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, ttmp11
+// CHECK: [0x7b,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, m0
+// CHECK: [0x7c,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, exec_lo
+// CHECK: [0x7e,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, exec_hi
+// CHECK: [0x7f,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, 0
+// CHECK: [0x80,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, -1
+// CHECK: [0xc1,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, 0.5
+// CHECK: [0xf0,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, -4.0
+// CHECK: [0xf7,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, 0x4f
+// CHECK: [0xff,0x1c,0x0a,0x7e,0x4f,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4 v5, 0x41
+// CHECK: [0xff,0x1c,0x0a,0x7e,0x41,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4 v5, v1
+// CHECK: [0x01,0x1d,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, v255
+// CHECK: [0xff,0x1d,0x0a,0x7e]
+
+v_cvt_off_f32_i4_e64 v5, s1
+// CHECK: [0x05,0x00,0x1c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v255, s1
+// CHECK: [0xff,0x00,0x1c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, s103
+// CHECK: [0x05,0x00,0x1c,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x1c,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x1c,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x1c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x1c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x1c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x1c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x1c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x1c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x1c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, m0
+// CHECK: [0x05,0x00,0x1c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x1c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x1c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, 0
+// CHECK: [0x05,0x00,0x1c,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, -1
+// CHECK: [0x05,0x00,0x1c,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x1c,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x1c,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, v1
+// CHECK: [0x05,0x00,0x1c,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, v255
+// CHECK: [0x05,0x00,0x1c,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_f64 v5, s[2:3]
+// CHECK: [0x02,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v255, s[2:3]
+// CHECK: [0x02,0x1e,0xfe,0x7f]
+
+v_cvt_f32_f64 v5, s[4:5]
+// CHECK: [0x04,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, s[102:103]
+// CHECK: [0x66,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, flat_scratch
+// CHECK: [0x68,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, vcc
+// CHECK: [0x6a,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, tba
+// CHECK: [0x6c,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, tma
+// CHECK: [0x6e,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, exec
+// CHECK: [0x7e,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, 0
+// CHECK: [0x80,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, -1
+// CHECK: [0xc1,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, 0.5
+// CHECK: [0xf0,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, -4.0
+// CHECK: [0xf7,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x1e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x1e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_f64 v5, v[1:2]
+// CHECK: [0x01,0x1f,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x1f,0x0a,0x7e]
+
+v_cvt_f32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x1e,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x1e,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x1e,0xd3,0x04,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[102:103]
+// CHECK: [0x05,0x00,0x1e,0xd3,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x1e,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x1e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x1e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x1e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x1e,0xd3,0x7a,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x1e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x1e,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x1e,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x1e,0xd3,0xfe,0x01,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x1e,0xd3,0x02,0x00,0x00,0x20]
+
+v_cvt_f32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x1e,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[2:3] clamp
+// CHECK: [0x05,0x08,0x1e,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[2:3] mul:2
+// CHECK: [0x05,0x00,0x1e,0xd3,0x02,0x00,0x00,0x08]
+
+v_cvt_f32_f64_e64 v5, s[2:3] mul:4
+// CHECK: [0x05,0x00,0x1e,0xd3,0x02,0x00,0x00,0x10]
+
+v_cvt_f32_f64_e64 v5, s[2:3] div:2
+// CHECK: [0x05,0x00,0x1e,0xd3,0x02,0x00,0x00,0x18]
+
+v_cvt_f64_f32 v[5:6], s1
+// CHECK: [0x01,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[254:255], s1
+// CHECK: [0x01,0x20,0xfc,0x7f]
+
+v_cvt_f64_f32 v[5:6], s103
+// CHECK: [0x67,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], flat_scratch_lo
+// CHECK: [0x68,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], flat_scratch_hi
+// CHECK: [0x69,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], vcc_lo
+// CHECK: [0x6a,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], vcc_hi
+// CHECK: [0x6b,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tba_lo
+// CHECK: [0x6c,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tba_hi
+// CHECK: [0x6d,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tma_lo
+// CHECK: [0x6e,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tma_hi
+// CHECK: [0x6f,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], ttmp11
+// CHECK: [0x7b,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], m0
+// CHECK: [0x7c,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], exec_lo
+// CHECK: [0x7e,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], exec_hi
+// CHECK: [0x7f,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], 0
+// CHECK: [0x80,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], -1
+// CHECK: [0xc1,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], 0.5
+// CHECK: [0xf0,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], -4.0
+// CHECK: [0xf7,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], 0xaf123456
+// CHECK: [0xff,0x20,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f64_f32 v[5:6], 0x3f717273
+// CHECK: [0xff,0x20,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f64_f32 v[5:6], v1
+// CHECK: [0x01,0x21,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], v255
+// CHECK: [0xff,0x21,0x0a,0x7e]
+
+v_cvt_f64_f32_e64 v[5:6], s1
+// CHECK: [0x05,0x00,0x20,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[254:255], s1
+// CHECK: [0xfe,0x00,0x20,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], s103
+// CHECK: [0x05,0x00,0x20,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], flat_scratch_lo
+// CHECK: [0x05,0x00,0x20,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], flat_scratch_hi
+// CHECK: [0x05,0x00,0x20,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], vcc_lo
+// CHECK: [0x05,0x00,0x20,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], vcc_hi
+// CHECK: [0x05,0x00,0x20,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tba_lo
+// CHECK: [0x05,0x00,0x20,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tba_hi
+// CHECK: [0x05,0x00,0x20,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tma_lo
+// CHECK: [0x05,0x00,0x20,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tma_hi
+// CHECK: [0x05,0x00,0x20,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], ttmp11
+// CHECK: [0x05,0x00,0x20,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], m0
+// CHECK: [0x05,0x00,0x20,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], exec_lo
+// CHECK: [0x05,0x00,0x20,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], exec_hi
+// CHECK: [0x05,0x00,0x20,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x20,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], v1
+// CHECK: [0x05,0x00,0x20,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], v255
+// CHECK: [0x05,0x00,0x20,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], -s1
+// CHECK: [0x05,0x00,0x20,0xd3,0x01,0x00,0x00,0x20]
+
+v_cvt_f64_f32_e64 v[5:6], |s1|
+// CHECK: [0x05,0x01,0x20,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], s1 clamp
+// CHECK: [0x05,0x08,0x20,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], s1 mul:2
+// CHECK: [0x05,0x00,0x20,0xd3,0x01,0x00,0x00,0x08]
+
+v_cvt_f64_f32_e64 v[5:6], s1 mul:4
+// CHECK: [0x05,0x00,0x20,0xd3,0x01,0x00,0x00,0x10]
+
+v_cvt_f64_f32_e64 v[5:6], s1 div:2
+// CHECK: [0x05,0x00,0x20,0xd3,0x01,0x00,0x00,0x18]
+
+v_cvt_f32_ubyte0 v5, s1
+// CHECK: [0x01,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v255, s1
+// CHECK: [0x01,0x22,0xfe,0x7f]
+
+v_cvt_f32_ubyte0 v5, s103
+// CHECK: [0x67,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, flat_scratch_lo
+// CHECK: [0x68,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, flat_scratch_hi
+// CHECK: [0x69,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, vcc_lo
+// CHECK: [0x6a,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, vcc_hi
+// CHECK: [0x6b,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tba_lo
+// CHECK: [0x6c,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tba_hi
+// CHECK: [0x6d,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tma_lo
+// CHECK: [0x6e,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tma_hi
+// CHECK: [0x6f,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, ttmp11
+// CHECK: [0x7b,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, m0
+// CHECK: [0x7c,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, exec_lo
+// CHECK: [0x7e,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, exec_hi
+// CHECK: [0x7f,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, 0
+// CHECK: [0x80,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, -1
+// CHECK: [0xc1,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, 0.5
+// CHECK: [0xf0,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, -4.0
+// CHECK: [0xf7,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, 0xaf123456
+// CHECK: [0xff,0x22,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte0 v5, 0x3f717273
+// CHECK: [0xff,0x22,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte0 v5, v1
+// CHECK: [0x01,0x23,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, v255
+// CHECK: [0xff,0x23,0x0a,0x7e]
+
+v_cvt_f32_ubyte0_e64 v5, s1
+// CHECK: [0x05,0x00,0x22,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v255, s1
+// CHECK: [0xff,0x00,0x22,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, s103
+// CHECK: [0x05,0x00,0x22,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x22,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x22,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x22,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x22,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x22,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x22,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x22,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x22,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x22,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, m0
+// CHECK: [0x05,0x00,0x22,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x22,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x22,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, 0
+// CHECK: [0x05,0x00,0x22,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, -1
+// CHECK: [0x05,0x00,0x22,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x22,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x22,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, v1
+// CHECK: [0x05,0x00,0x22,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, v255
+// CHECK: [0x05,0x00,0x22,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte1 v5, s1
+// CHECK: [0x01,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v255, s1
+// CHECK: [0x01,0x24,0xfe,0x7f]
+
+v_cvt_f32_ubyte1 v5, s103
+// CHECK: [0x67,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, flat_scratch_lo
+// CHECK: [0x68,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, flat_scratch_hi
+// CHECK: [0x69,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, vcc_lo
+// CHECK: [0x6a,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, vcc_hi
+// CHECK: [0x6b,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tba_lo
+// CHECK: [0x6c,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tba_hi
+// CHECK: [0x6d,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tma_lo
+// CHECK: [0x6e,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tma_hi
+// CHECK: [0x6f,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, ttmp11
+// CHECK: [0x7b,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, m0
+// CHECK: [0x7c,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, exec_lo
+// CHECK: [0x7e,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, exec_hi
+// CHECK: [0x7f,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, 0
+// CHECK: [0x80,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, -1
+// CHECK: [0xc1,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, 0.5
+// CHECK: [0xf0,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, -4.0
+// CHECK: [0xf7,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, 0xaf123456
+// CHECK: [0xff,0x24,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte1 v5, 0x3f717273
+// CHECK: [0xff,0x24,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte1 v5, v1
+// CHECK: [0x01,0x25,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, v255
+// CHECK: [0xff,0x25,0x0a,0x7e]
+
+v_cvt_f32_ubyte1_e64 v5, s1
+// CHECK: [0x05,0x00,0x24,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v255, s1
+// CHECK: [0xff,0x00,0x24,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, s103
+// CHECK: [0x05,0x00,0x24,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x24,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x24,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x24,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x24,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x24,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x24,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x24,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x24,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x24,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, m0
+// CHECK: [0x05,0x00,0x24,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x24,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x24,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, 0
+// CHECK: [0x05,0x00,0x24,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, -1
+// CHECK: [0x05,0x00,0x24,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x24,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x24,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, v1
+// CHECK: [0x05,0x00,0x24,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, v255
+// CHECK: [0x05,0x00,0x24,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte2 v5, s1
+// CHECK: [0x01,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v255, s1
+// CHECK: [0x01,0x26,0xfe,0x7f]
+
+v_cvt_f32_ubyte2 v5, s103
+// CHECK: [0x67,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, flat_scratch_lo
+// CHECK: [0x68,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, flat_scratch_hi
+// CHECK: [0x69,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, vcc_lo
+// CHECK: [0x6a,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, vcc_hi
+// CHECK: [0x6b,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tba_lo
+// CHECK: [0x6c,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tba_hi
+// CHECK: [0x6d,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tma_lo
+// CHECK: [0x6e,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tma_hi
+// CHECK: [0x6f,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, ttmp11
+// CHECK: [0x7b,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, m0
+// CHECK: [0x7c,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, exec_lo
+// CHECK: [0x7e,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, exec_hi
+// CHECK: [0x7f,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, 0
+// CHECK: [0x80,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, -1
+// CHECK: [0xc1,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, 0.5
+// CHECK: [0xf0,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, -4.0
+// CHECK: [0xf7,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, 0xaf123456
+// CHECK: [0xff,0x26,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte2 v5, 0x3f717273
+// CHECK: [0xff,0x26,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte2 v5, v1
+// CHECK: [0x01,0x27,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, v255
+// CHECK: [0xff,0x27,0x0a,0x7e]
+
+v_cvt_f32_ubyte2_e64 v5, s1
+// CHECK: [0x05,0x00,0x26,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v255, s1
+// CHECK: [0xff,0x00,0x26,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, s103
+// CHECK: [0x05,0x00,0x26,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x26,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x26,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x26,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x26,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x26,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x26,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x26,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x26,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x26,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, m0
+// CHECK: [0x05,0x00,0x26,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x26,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x26,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, 0
+// CHECK: [0x05,0x00,0x26,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, -1
+// CHECK: [0x05,0x00,0x26,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x26,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x26,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, v1
+// CHECK: [0x05,0x00,0x26,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, v255
+// CHECK: [0x05,0x00,0x26,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte3 v5, s1
+// CHECK: [0x01,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v255, s1
+// CHECK: [0x01,0x28,0xfe,0x7f]
+
+v_cvt_f32_ubyte3 v5, s103
+// CHECK: [0x67,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, flat_scratch_lo
+// CHECK: [0x68,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, flat_scratch_hi
+// CHECK: [0x69,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, vcc_lo
+// CHECK: [0x6a,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, vcc_hi
+// CHECK: [0x6b,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tba_lo
+// CHECK: [0x6c,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tba_hi
+// CHECK: [0x6d,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tma_lo
+// CHECK: [0x6e,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tma_hi
+// CHECK: [0x6f,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, ttmp11
+// CHECK: [0x7b,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, m0
+// CHECK: [0x7c,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, exec_lo
+// CHECK: [0x7e,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, exec_hi
+// CHECK: [0x7f,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, 0
+// CHECK: [0x80,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, -1
+// CHECK: [0xc1,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, 0.5
+// CHECK: [0xf0,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, -4.0
+// CHECK: [0xf7,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, 0xaf123456
+// CHECK: [0xff,0x28,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte3 v5, 0x3f717273
+// CHECK: [0xff,0x28,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte3 v5, v1
+// CHECK: [0x01,0x29,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, v255
+// CHECK: [0xff,0x29,0x0a,0x7e]
+
+v_cvt_f32_ubyte3_e64 v5, s1
+// CHECK: [0x05,0x00,0x28,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v255, s1
+// CHECK: [0xff,0x00,0x28,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, s103
+// CHECK: [0x05,0x00,0x28,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x28,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x28,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x28,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x28,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x28,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x28,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x28,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x28,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x28,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, m0
+// CHECK: [0x05,0x00,0x28,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x28,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x28,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, 0
+// CHECK: [0x05,0x00,0x28,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, -1
+// CHECK: [0x05,0x00,0x28,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x28,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x28,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, v1
+// CHECK: [0x05,0x00,0x28,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, v255
+// CHECK: [0x05,0x00,0x28,0xd3,0xff,0x01,0x00,0x00]
+
+v_cvt_u32_f64 v5, s[2:3]
+// CHECK: [0x02,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v255, s[2:3]
+// CHECK: [0x02,0x2a,0xfe,0x7f]
+
+v_cvt_u32_f64 v5, s[4:5]
+// CHECK: [0x04,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, s[102:103]
+// CHECK: [0x66,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, flat_scratch
+// CHECK: [0x68,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, vcc
+// CHECK: [0x6a,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, tba
+// CHECK: [0x6c,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, tma
+// CHECK: [0x6e,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, exec
+// CHECK: [0x7e,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, 0
+// CHECK: [0x80,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, -1
+// CHECK: [0xc1,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, 0.5
+// CHECK: [0xf0,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, -4.0
+// CHECK: [0xf7,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x2a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_u32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x2a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_u32_f64 v5, v[1:2]
+// CHECK: [0x01,0x2b,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x2b,0x0a,0x7e]
+
+v_cvt_u32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x2a,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x2a,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x2a,0xd3,0x04,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, s[102:103]
+// CHECK: [0x05,0x00,0x2a,0xd3,0x66,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x2a,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x2a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x2a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x2a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x2a,0xd3,0x7a,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x2a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x2a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x2a,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x2a,0xd3,0xfe,0x01,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x2a,0xd3,0x02,0x00,0x00,0x20]
+
+v_cvt_u32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x2a,0xd3,0x02,0x00,0x00,0x00]
+
+v_cvt_f64_u32 v[5:6], s1
+// CHECK: [0x01,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[254:255], s1
+// CHECK: [0x01,0x2c,0xfc,0x7f]
+
+v_cvt_f64_u32 v[5:6], s103
+// CHECK: [0x67,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], flat_scratch_lo
+// CHECK: [0x68,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], flat_scratch_hi
+// CHECK: [0x69,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], vcc_lo
+// CHECK: [0x6a,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], vcc_hi
+// CHECK: [0x6b,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tba_lo
+// CHECK: [0x6c,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tba_hi
+// CHECK: [0x6d,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tma_lo
+// CHECK: [0x6e,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tma_hi
+// CHECK: [0x6f,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], ttmp11
+// CHECK: [0x7b,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], m0
+// CHECK: [0x7c,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], exec_lo
+// CHECK: [0x7e,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], exec_hi
+// CHECK: [0x7f,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], 0
+// CHECK: [0x80,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], -1
+// CHECK: [0xc1,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], 0.5
+// CHECK: [0xf0,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], -4.0
+// CHECK: [0xf7,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], 0xaf123456
+// CHECK: [0xff,0x2c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f64_u32 v[5:6], 0x3f717273
+// CHECK: [0xff,0x2c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f64_u32 v[5:6], v1
+// CHECK: [0x01,0x2d,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], v255
+// CHECK: [0xff,0x2d,0x0a,0x7e]
+
+v_cvt_f64_u32_e64 v[5:6], s1
+// CHECK: [0x05,0x00,0x2c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[254:255], s1
+// CHECK: [0xfe,0x00,0x2c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], s103
+// CHECK: [0x05,0x00,0x2c,0xd3,0x67,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], flat_scratch_lo
+// CHECK: [0x05,0x00,0x2c,0xd3,0x68,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], flat_scratch_hi
+// CHECK: [0x05,0x00,0x2c,0xd3,0x69,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], vcc_lo
+// CHECK: [0x05,0x00,0x2c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], vcc_hi
+// CHECK: [0x05,0x00,0x2c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tba_lo
+// CHECK: [0x05,0x00,0x2c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tba_hi
+// CHECK: [0x05,0x00,0x2c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tma_lo
+// CHECK: [0x05,0x00,0x2c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tma_hi
+// CHECK: [0x05,0x00,0x2c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], ttmp11
+// CHECK: [0x05,0x00,0x2c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], m0
+// CHECK: [0x05,0x00,0x2c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], exec_lo
+// CHECK: [0x05,0x00,0x2c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], exec_hi
+// CHECK: [0x05,0x00,0x2c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], 0
+// CHECK: [0x05,0x00,0x2c,0xd3,0x80,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], -1
+// CHECK: [0x05,0x00,0x2c,0xd3,0xc1,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], 0.5
+// CHECK: [0x05,0x00,0x2c,0xd3,0xf0,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], -4.0
+// CHECK: [0x05,0x00,0x2c,0xd3,0xf7,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], v1
+// CHECK: [0x05,0x00,0x2c,0xd3,0x01,0x01,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], v255
+// CHECK: [0x05,0x00,0x2c,0xd3,0xff,0x01,0x00,0x00]
+
+v_trunc_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x2e,0xfc,0x7f]
+
+v_trunc_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], vcc
+// CHECK: [0x6a,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], tba
+// CHECK: [0x6c,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], tma
+// CHECK: [0x6e,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], exec
+// CHECK: [0x7e,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], 0
+// CHECK: [0x80,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], -1
+// CHECK: [0xc1,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x2e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_trunc_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x2e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_trunc_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x2f,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x2f,0x0a,0x7e]
+
+v_trunc_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x2e,0xd3,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x2e,0xd3,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x2e,0xd3,0x04,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x2e,0xd3,0x66,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x2e,0xd3,0x68,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x2e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x2e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x2e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x2e,0xd3,0x7a,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x2e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x2e,0xd3,0xfd,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x2e,0xd3,0x01,0x01,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x2e,0xd3,0xfe,0x01,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x2e,0xd3,0x02,0x00,0x00,0x20]
+
+v_trunc_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x2e,0xd3,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x2e,0xd3,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x2e,0xd3,0x02,0x00,0x00,0x08]
+
+v_trunc_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x2e,0xd3,0x02,0x00,0x00,0x10]
+
+v_trunc_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x2e,0xd3,0x02,0x00,0x00,0x18]
+
+v_ceil_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x30,0xfc,0x7f]
+
+v_ceil_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], vcc
+// CHECK: [0x6a,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], tba
+// CHECK: [0x6c,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], tma
+// CHECK: [0x6e,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], exec
+// CHECK: [0x7e,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], 0
+// CHECK: [0x80,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], -1
+// CHECK: [0xc1,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x30,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ceil_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x30,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ceil_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x31,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x31,0x0a,0x7e]
+
+v_ceil_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x30,0xd3,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x30,0xd3,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x30,0xd3,0x04,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x30,0xd3,0x66,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x30,0xd3,0x68,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x30,0xd3,0x6a,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x30,0xd3,0x6c,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x30,0xd3,0x6e,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x30,0xd3,0x7a,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x30,0xd3,0x7e,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x30,0xd3,0xfd,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x30,0xd3,0x01,0x01,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x30,0xd3,0xfe,0x01,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x30,0xd3,0x02,0x00,0x00,0x20]
+
+v_ceil_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x30,0xd3,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x30,0xd3,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x30,0xd3,0x02,0x00,0x00,0x08]
+
+v_ceil_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x30,0xd3,0x02,0x00,0x00,0x10]
+
+v_ceil_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x30,0xd3,0x02,0x00,0x00,0x18]
+
+v_rndne_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x32,0xfc,0x7f]
+
+v_rndne_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], vcc
+// CHECK: [0x6a,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], tba
+// CHECK: [0x6c,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], tma
+// CHECK: [0x6e,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], exec
+// CHECK: [0x7e,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], 0
+// CHECK: [0x80,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], -1
+// CHECK: [0xc1,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x32,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rndne_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x32,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rndne_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x33,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x33,0x0a,0x7e]
+
+v_rndne_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x32,0xd3,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x32,0xd3,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x32,0xd3,0x04,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x32,0xd3,0x66,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x32,0xd3,0x68,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x32,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x32,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x32,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x32,0xd3,0x7a,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x32,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x32,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x32,0xd3,0x01,0x01,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x32,0xd3,0xfe,0x01,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x32,0xd3,0x02,0x00,0x00,0x20]
+
+v_rndne_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x32,0xd3,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x32,0xd3,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x32,0xd3,0x02,0x00,0x00,0x08]
+
+v_rndne_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x32,0xd3,0x02,0x00,0x00,0x10]
+
+v_rndne_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x32,0xd3,0x02,0x00,0x00,0x18]
+
+v_floor_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x34,0x0a,0x7e]
+
+v_floor_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x34,0xfc,0x7f]
+
+v_floor_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], vcc
+// CHECK: [0x6a,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], tba
+// CHECK: [0x6c,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], tma
+// CHECK: [0x6e,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], exec
+// CHECK: [0x7e,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], 0
+// CHECK: [0x80,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], -1
+// CHECK: [0xc1,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x34,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_floor_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x34,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_floor_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x35,0x0a,0x7e]
+
+v_floor_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x35,0x0a,0x7e]
+
+v_floor_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x34,0xd3,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x34,0xd3,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x34,0xd3,0x04,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x34,0xd3,0x66,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x34,0xd3,0x68,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x34,0xd3,0x6a,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x34,0xd3,0x6c,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x34,0xd3,0x6e,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x34,0xd3,0x7a,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x34,0xd3,0x7e,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x34,0xd3,0xfd,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x34,0xd3,0x01,0x01,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x34,0xd3,0xfe,0x01,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x34,0xd3,0x02,0x00,0x00,0x20]
+
+v_floor_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x34,0xd3,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x34,0xd3,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x34,0xd3,0x02,0x00,0x00,0x08]
+
+v_floor_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x34,0xd3,0x02,0x00,0x00,0x10]
+
+v_floor_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x34,0xd3,0x02,0x00,0x00,0x18]
+
+v_fract_f32 v5, s1
+// CHECK: [0x01,0x40,0x0a,0x7e]
+
+v_fract_f32 v255, s1
+// CHECK: [0x01,0x40,0xfe,0x7f]
+
+v_fract_f32 v5, s103
+// CHECK: [0x67,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, vcc_lo
+// CHECK: [0x6a,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, vcc_hi
+// CHECK: [0x6b,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, tba_lo
+// CHECK: [0x6c,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, tba_hi
+// CHECK: [0x6d,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, tma_lo
+// CHECK: [0x6e,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, tma_hi
+// CHECK: [0x6f,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, ttmp11
+// CHECK: [0x7b,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, m0
+// CHECK: [0x7c,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, exec_lo
+// CHECK: [0x7e,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, exec_hi
+// CHECK: [0x7f,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, 0
+// CHECK: [0x80,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, -1
+// CHECK: [0xc1,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, 0.5
+// CHECK: [0xf0,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, -4.0
+// CHECK: [0xf7,0x40,0x0a,0x7e]
+
+v_fract_f32 v5, 0xaf123456
+// CHECK: [0xff,0x40,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_fract_f32 v5, 0x3f717273
+// CHECK: [0xff,0x40,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_fract_f32 v5, v1
+// CHECK: [0x01,0x41,0x0a,0x7e]
+
+v_fract_f32 v5, v255
+// CHECK: [0xff,0x41,0x0a,0x7e]
+
+v_fract_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x40,0xd3,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x40,0xd3,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x40,0xd3,0x67,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x40,0xd3,0x68,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x40,0xd3,0x69,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x40,0xd3,0x6a,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x40,0xd3,0x6b,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x40,0xd3,0x6c,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x40,0xd3,0x6d,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x40,0xd3,0x6e,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x40,0xd3,0x6f,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x40,0xd3,0x7b,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x40,0xd3,0x7c,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x40,0xd3,0x7e,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x40,0xd3,0x7f,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x40,0xd3,0xfd,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x40,0xd3,0x01,0x01,0x00,0x00]
+
+v_fract_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x40,0xd3,0xff,0x01,0x00,0x00]
+
+v_fract_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x40,0xd3,0x01,0x00,0x00,0x20]
+
+v_fract_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x40,0xd3,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x40,0xd3,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x40,0xd3,0x01,0x00,0x00,0x08]
+
+v_fract_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x40,0xd3,0x01,0x00,0x00,0x10]
+
+v_fract_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x40,0xd3,0x01,0x00,0x00,0x18]
+
+v_trunc_f32 v5, s1
+// CHECK: [0x01,0x42,0x0a,0x7e]
+
+v_trunc_f32 v255, s1
+// CHECK: [0x01,0x42,0xfe,0x7f]
+
+v_trunc_f32 v5, s103
+// CHECK: [0x67,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, vcc_lo
+// CHECK: [0x6a,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, vcc_hi
+// CHECK: [0x6b,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, tba_lo
+// CHECK: [0x6c,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, tba_hi
+// CHECK: [0x6d,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, tma_lo
+// CHECK: [0x6e,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, tma_hi
+// CHECK: [0x6f,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, ttmp11
+// CHECK: [0x7b,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, m0
+// CHECK: [0x7c,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, exec_lo
+// CHECK: [0x7e,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, exec_hi
+// CHECK: [0x7f,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, 0
+// CHECK: [0x80,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, -1
+// CHECK: [0xc1,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, 0.5
+// CHECK: [0xf0,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, -4.0
+// CHECK: [0xf7,0x42,0x0a,0x7e]
+
+v_trunc_f32 v5, 0xaf123456
+// CHECK: [0xff,0x42,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_trunc_f32 v5, 0x3f717273
+// CHECK: [0xff,0x42,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_trunc_f32 v5, v1
+// CHECK: [0x01,0x43,0x0a,0x7e]
+
+v_trunc_f32 v5, v255
+// CHECK: [0xff,0x43,0x0a,0x7e]
+
+v_trunc_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x42,0xd3,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x42,0xd3,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x42,0xd3,0x67,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x42,0xd3,0x68,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x42,0xd3,0x69,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x42,0xd3,0x6a,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x42,0xd3,0x6b,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x42,0xd3,0x6c,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x42,0xd3,0x6d,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x42,0xd3,0x6e,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x42,0xd3,0x6f,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x42,0xd3,0x7b,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x42,0xd3,0x7c,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x42,0xd3,0x7e,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x42,0xd3,0x7f,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x42,0xd3,0xfd,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x42,0xd3,0x01,0x01,0x00,0x00]
+
+v_trunc_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x42,0xd3,0xff,0x01,0x00,0x00]
+
+v_trunc_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x42,0xd3,0x01,0x00,0x00,0x20]
+
+v_trunc_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x42,0xd3,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x42,0xd3,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x42,0xd3,0x01,0x00,0x00,0x08]
+
+v_trunc_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x42,0xd3,0x01,0x00,0x00,0x10]
+
+v_trunc_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x42,0xd3,0x01,0x00,0x00,0x18]
+
+v_ceil_f32 v5, s1
+// CHECK: [0x01,0x44,0x0a,0x7e]
+
+v_ceil_f32 v255, s1
+// CHECK: [0x01,0x44,0xfe,0x7f]
+
+v_ceil_f32 v5, s103
+// CHECK: [0x67,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, vcc_lo
+// CHECK: [0x6a,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, vcc_hi
+// CHECK: [0x6b,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, tba_lo
+// CHECK: [0x6c,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, tba_hi
+// CHECK: [0x6d,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, tma_lo
+// CHECK: [0x6e,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, tma_hi
+// CHECK: [0x6f,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, ttmp11
+// CHECK: [0x7b,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, m0
+// CHECK: [0x7c,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, exec_lo
+// CHECK: [0x7e,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, exec_hi
+// CHECK: [0x7f,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, 0
+// CHECK: [0x80,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, -1
+// CHECK: [0xc1,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, 0.5
+// CHECK: [0xf0,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, -4.0
+// CHECK: [0xf7,0x44,0x0a,0x7e]
+
+v_ceil_f32 v5, 0xaf123456
+// CHECK: [0xff,0x44,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ceil_f32 v5, 0x3f717273
+// CHECK: [0xff,0x44,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ceil_f32 v5, v1
+// CHECK: [0x01,0x45,0x0a,0x7e]
+
+v_ceil_f32 v5, v255
+// CHECK: [0xff,0x45,0x0a,0x7e]
+
+v_ceil_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x44,0xd3,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x44,0xd3,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x44,0xd3,0x67,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x44,0xd3,0x68,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x44,0xd3,0x69,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x44,0xd3,0x6a,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x44,0xd3,0x6b,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x44,0xd3,0x6c,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x44,0xd3,0x6d,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x44,0xd3,0x6e,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x44,0xd3,0x6f,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x44,0xd3,0x7b,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x44,0xd3,0x7c,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x44,0xd3,0x7e,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x44,0xd3,0x7f,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x44,0xd3,0xfd,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x44,0xd3,0x01,0x01,0x00,0x00]
+
+v_ceil_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x44,0xd3,0xff,0x01,0x00,0x00]
+
+v_ceil_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x44,0xd3,0x01,0x00,0x00,0x20]
+
+v_ceil_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x44,0xd3,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x44,0xd3,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x44,0xd3,0x01,0x00,0x00,0x08]
+
+v_ceil_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x44,0xd3,0x01,0x00,0x00,0x10]
+
+v_ceil_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x44,0xd3,0x01,0x00,0x00,0x18]
+
+v_rndne_f32 v5, s1
+// CHECK: [0x01,0x46,0x0a,0x7e]
+
+v_rndne_f32 v255, s1
+// CHECK: [0x01,0x46,0xfe,0x7f]
+
+v_rndne_f32 v5, s103
+// CHECK: [0x67,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, vcc_lo
+// CHECK: [0x6a,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, vcc_hi
+// CHECK: [0x6b,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, tba_lo
+// CHECK: [0x6c,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, tba_hi
+// CHECK: [0x6d,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, tma_lo
+// CHECK: [0x6e,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, tma_hi
+// CHECK: [0x6f,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, ttmp11
+// CHECK: [0x7b,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, m0
+// CHECK: [0x7c,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, exec_lo
+// CHECK: [0x7e,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, exec_hi
+// CHECK: [0x7f,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, 0
+// CHECK: [0x80,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, -1
+// CHECK: [0xc1,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, 0.5
+// CHECK: [0xf0,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, -4.0
+// CHECK: [0xf7,0x46,0x0a,0x7e]
+
+v_rndne_f32 v5, 0xaf123456
+// CHECK: [0xff,0x46,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rndne_f32 v5, 0x3f717273
+// CHECK: [0xff,0x46,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rndne_f32 v5, v1
+// CHECK: [0x01,0x47,0x0a,0x7e]
+
+v_rndne_f32 v5, v255
+// CHECK: [0xff,0x47,0x0a,0x7e]
+
+v_rndne_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x46,0xd3,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x46,0xd3,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x46,0xd3,0x67,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x46,0xd3,0x68,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x46,0xd3,0x69,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x46,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x46,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x46,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x46,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x46,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x46,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x46,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x46,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x46,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x46,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x46,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x46,0xd3,0x01,0x01,0x00,0x00]
+
+v_rndne_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x46,0xd3,0xff,0x01,0x00,0x00]
+
+v_rndne_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x46,0xd3,0x01,0x00,0x00,0x20]
+
+v_rndne_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x46,0xd3,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x46,0xd3,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x46,0xd3,0x01,0x00,0x00,0x08]
+
+v_rndne_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x46,0xd3,0x01,0x00,0x00,0x10]
+
+v_rndne_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x46,0xd3,0x01,0x00,0x00,0x18]
+
+v_floor_f32 v5, s1
+// CHECK: [0x01,0x48,0x0a,0x7e]
+
+v_floor_f32 v255, s1
+// CHECK: [0x01,0x48,0xfe,0x7f]
+
+v_floor_f32 v5, s103
+// CHECK: [0x67,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, vcc_lo
+// CHECK: [0x6a,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, vcc_hi
+// CHECK: [0x6b,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, tba_lo
+// CHECK: [0x6c,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, tba_hi
+// CHECK: [0x6d,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, tma_lo
+// CHECK: [0x6e,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, tma_hi
+// CHECK: [0x6f,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, ttmp11
+// CHECK: [0x7b,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, m0
+// CHECK: [0x7c,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, exec_lo
+// CHECK: [0x7e,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, exec_hi
+// CHECK: [0x7f,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, 0
+// CHECK: [0x80,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, -1
+// CHECK: [0xc1,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, 0.5
+// CHECK: [0xf0,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, -4.0
+// CHECK: [0xf7,0x48,0x0a,0x7e]
+
+v_floor_f32 v5, 0xaf123456
+// CHECK: [0xff,0x48,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_floor_f32 v5, 0x3f717273
+// CHECK: [0xff,0x48,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_floor_f32 v5, v1
+// CHECK: [0x01,0x49,0x0a,0x7e]
+
+v_floor_f32 v5, v255
+// CHECK: [0xff,0x49,0x0a,0x7e]
+
+v_floor_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x48,0xd3,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x48,0xd3,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x48,0xd3,0x67,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x48,0xd3,0x68,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x48,0xd3,0x69,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x48,0xd3,0x6a,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x48,0xd3,0x6b,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x48,0xd3,0x6c,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x48,0xd3,0x6d,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x48,0xd3,0x6e,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x48,0xd3,0x6f,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x48,0xd3,0x7b,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x48,0xd3,0x7c,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x48,0xd3,0x7e,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x48,0xd3,0x7f,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x48,0xd3,0xfd,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x48,0xd3,0x01,0x01,0x00,0x00]
+
+v_floor_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x48,0xd3,0xff,0x01,0x00,0x00]
+
+v_floor_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x48,0xd3,0x01,0x00,0x00,0x20]
+
+v_floor_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x48,0xd3,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x48,0xd3,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x48,0xd3,0x01,0x00,0x00,0x08]
+
+v_floor_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x48,0xd3,0x01,0x00,0x00,0x10]
+
+v_floor_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x48,0xd3,0x01,0x00,0x00,0x18]
+
+v_exp_f32 v5, s1
+// CHECK: [0x01,0x4a,0x0a,0x7e]
+
+v_exp_f32 v255, s1
+// CHECK: [0x01,0x4a,0xfe,0x7f]
+
+v_exp_f32 v5, s103
+// CHECK: [0x67,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, tba_lo
+// CHECK: [0x6c,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, tba_hi
+// CHECK: [0x6d,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, tma_lo
+// CHECK: [0x6e,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, tma_hi
+// CHECK: [0x6f,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, ttmp11
+// CHECK: [0x7b,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, m0
+// CHECK: [0x7c,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, exec_lo
+// CHECK: [0x7e,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, exec_hi
+// CHECK: [0x7f,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, 0
+// CHECK: [0x80,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, -1
+// CHECK: [0xc1,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, 0.5
+// CHECK: [0xf0,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, -4.0
+// CHECK: [0xf7,0x4a,0x0a,0x7e]
+
+v_exp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x4a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_exp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x4a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_exp_f32 v5, v1
+// CHECK: [0x01,0x4b,0x0a,0x7e]
+
+v_exp_f32 v5, v255
+// CHECK: [0xff,0x4b,0x0a,0x7e]
+
+v_exp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x00,0x00,0x00]
+
+v_exp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x4a,0xd3,0x01,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x4a,0xd3,0x67,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4a,0xd3,0x68,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4a,0xd3,0x69,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4a,0xd3,0x6b,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4a,0xd3,0x6d,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4a,0xd3,0x6f,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4a,0xd3,0x7b,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x4a,0xd3,0x7c,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4a,0xd3,0x7f,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x4a,0xd3,0x80,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x4a,0xd3,0xf0,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x4a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x01,0x00,0x00]
+
+v_exp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x4a,0xd3,0xff,0x01,0x00,0x00]
+
+v_exp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x00,0x00,0x20]
+
+v_exp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x00,0x00,0x08]
+
+v_exp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x00,0x00,0x10]
+
+v_exp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x4a,0xd3,0x01,0x00,0x00,0x18]
+
+v_log_clamp_f32 v5, s1
+// CHECK: [0x01,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v255, s1
+// CHECK: [0x01,0x4c,0xfe,0x7f]
+
+v_log_clamp_f32 v5, s103
+// CHECK: [0x67,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, tba_lo
+// CHECK: [0x6c,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, tba_hi
+// CHECK: [0x6d,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, tma_lo
+// CHECK: [0x6e,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, tma_hi
+// CHECK: [0x6f,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, ttmp11
+// CHECK: [0x7b,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, m0
+// CHECK: [0x7c,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, exec_lo
+// CHECK: [0x7e,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, exec_hi
+// CHECK: [0x7f,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, 0
+// CHECK: [0x80,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, -1
+// CHECK: [0xc1,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, 0.5
+// CHECK: [0xf0,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, -4.0
+// CHECK: [0xf7,0x4c,0x0a,0x7e]
+
+v_log_clamp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x4c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_log_clamp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x4c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_log_clamp_f32 v5, v1
+// CHECK: [0x01,0x4d,0x0a,0x7e]
+
+v_log_clamp_f32 v5, v255
+// CHECK: [0xff,0x4d,0x0a,0x7e]
+
+v_log_clamp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x4c,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x4c,0xd3,0x67,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4c,0xd3,0x68,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4c,0xd3,0x69,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x4c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x4c,0xd3,0x80,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x4c,0xd3,0xf0,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x4c,0xd3,0xfd,0x00,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x01,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x4c,0xd3,0xff,0x01,0x00,0x00]
+
+v_log_clamp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x00,0x00,0x20]
+
+v_log_clamp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x00,0x00,0x08]
+
+v_log_clamp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x00,0x00,0x10]
+
+v_log_clamp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x4c,0xd3,0x01,0x00,0x00,0x18]
+
+v_log_f32 v5, s1
+// CHECK: [0x01,0x4e,0x0a,0x7e]
+
+v_log_f32 v255, s1
+// CHECK: [0x01,0x4e,0xfe,0x7f]
+
+v_log_f32 v5, s103
+// CHECK: [0x67,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, vcc_lo
+// CHECK: [0x6a,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, vcc_hi
+// CHECK: [0x6b,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, tba_lo
+// CHECK: [0x6c,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, tba_hi
+// CHECK: [0x6d,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, tma_lo
+// CHECK: [0x6e,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, tma_hi
+// CHECK: [0x6f,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, ttmp11
+// CHECK: [0x7b,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, m0
+// CHECK: [0x7c,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, exec_lo
+// CHECK: [0x7e,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, exec_hi
+// CHECK: [0x7f,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, 0
+// CHECK: [0x80,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, -1
+// CHECK: [0xc1,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, 0.5
+// CHECK: [0xf0,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, -4.0
+// CHECK: [0xf7,0x4e,0x0a,0x7e]
+
+v_log_f32 v5, 0xaf123456
+// CHECK: [0xff,0x4e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_log_f32 v5, 0x3f717273
+// CHECK: [0xff,0x4e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_log_f32 v5, v1
+// CHECK: [0x01,0x4f,0x0a,0x7e]
+
+v_log_f32 v5, v255
+// CHECK: [0xff,0x4f,0x0a,0x7e]
+
+v_log_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x4e,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x4e,0xd3,0x67,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4e,0xd3,0x68,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4e,0xd3,0x69,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4e,0xd3,0x6b,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4e,0xd3,0x6d,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4e,0xd3,0x6f,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4e,0xd3,0x7b,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x4e,0xd3,0x7c,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4e,0xd3,0x7f,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x4e,0xd3,0x80,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x4e,0xd3,0xf0,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x4e,0xd3,0xfd,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x01,0x00,0x00]
+
+v_log_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x4e,0xd3,0xff,0x01,0x00,0x00]
+
+v_log_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x00,0x00,0x20]
+
+v_log_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x00,0x00,0x08]
+
+v_log_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x00,0x00,0x10]
+
+v_log_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x4e,0xd3,0x01,0x00,0x00,0x18]
+
+v_rcp_clamp_f32 v5, s1
+// CHECK: [0x01,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v255, s1
+// CHECK: [0x01,0x50,0xfe,0x7f]
+
+v_rcp_clamp_f32 v5, s103
+// CHECK: [0x67,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, tba_lo
+// CHECK: [0x6c,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, tba_hi
+// CHECK: [0x6d,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, tma_lo
+// CHECK: [0x6e,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, tma_hi
+// CHECK: [0x6f,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, ttmp11
+// CHECK: [0x7b,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, m0
+// CHECK: [0x7c,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, exec_lo
+// CHECK: [0x7e,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, exec_hi
+// CHECK: [0x7f,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, 0
+// CHECK: [0x80,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, -1
+// CHECK: [0xc1,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, 0.5
+// CHECK: [0xf0,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, -4.0
+// CHECK: [0xf7,0x50,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x50,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_clamp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x50,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_clamp_f32 v5, v1
+// CHECK: [0x01,0x51,0x0a,0x7e]
+
+v_rcp_clamp_f32 v5, v255
+// CHECK: [0xff,0x51,0x0a,0x7e]
+
+v_rcp_clamp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x50,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x50,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x50,0xd3,0x67,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x50,0xd3,0x68,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x50,0xd3,0x69,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x50,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x50,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x50,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x50,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x50,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x50,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x50,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x50,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x50,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x50,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x50,0xd3,0x80,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x50,0xd3,0xf0,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x50,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x50,0xd3,0x01,0x01,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x50,0xd3,0xff,0x01,0x00,0x00]
+
+v_rcp_clamp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x50,0xd3,0x01,0x00,0x00,0x20]
+
+v_rcp_clamp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x50,0xd3,0x01,0x00,0x00,0x08]
+
+v_rcp_clamp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x50,0xd3,0x01,0x00,0x00,0x10]
+
+v_rcp_clamp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x50,0xd3,0x01,0x00,0x00,0x18]
+
+v_rcp_legacy_f32 v5, s1
+// CHECK: [0x01,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v255, s1
+// CHECK: [0x01,0x52,0xfe,0x7f]
+
+v_rcp_legacy_f32 v5, s103
+// CHECK: [0x67,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, vcc_lo
+// CHECK: [0x6a,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, vcc_hi
+// CHECK: [0x6b,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, tba_lo
+// CHECK: [0x6c,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, tba_hi
+// CHECK: [0x6d,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, tma_lo
+// CHECK: [0x6e,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, tma_hi
+// CHECK: [0x6f,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, ttmp11
+// CHECK: [0x7b,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, m0
+// CHECK: [0x7c,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, exec_lo
+// CHECK: [0x7e,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, exec_hi
+// CHECK: [0x7f,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, 0
+// CHECK: [0x80,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, -1
+// CHECK: [0xc1,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, 0.5
+// CHECK: [0xf0,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, -4.0
+// CHECK: [0xf7,0x52,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, 0xaf123456
+// CHECK: [0xff,0x52,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_legacy_f32 v5, 0x3f717273
+// CHECK: [0xff,0x52,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_legacy_f32 v5, v1
+// CHECK: [0x01,0x53,0x0a,0x7e]
+
+v_rcp_legacy_f32 v5, v255
+// CHECK: [0xff,0x53,0x0a,0x7e]
+
+v_rcp_legacy_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x52,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x52,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x52,0xd3,0x67,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x52,0xd3,0x68,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x52,0xd3,0x69,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x52,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x52,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x52,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x52,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x52,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x52,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x52,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x52,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x52,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x52,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x52,0xd3,0x80,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x52,0xd3,0xf0,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x52,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x52,0xd3,0x01,0x01,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x52,0xd3,0xff,0x01,0x00,0x00]
+
+v_rcp_legacy_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x52,0xd3,0x01,0x00,0x00,0x20]
+
+v_rcp_legacy_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x52,0xd3,0x01,0x00,0x00,0x08]
+
+v_rcp_legacy_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x52,0xd3,0x01,0x00,0x00,0x10]
+
+v_rcp_legacy_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x52,0xd3,0x01,0x00,0x00,0x18]
+
+v_rcp_f32 v5, s1
+// CHECK: [0x01,0x54,0x0a,0x7e]
+
+v_rcp_f32 v255, s1
+// CHECK: [0x01,0x54,0xfe,0x7f]
+
+v_rcp_f32 v5, s103
+// CHECK: [0x67,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, tba_lo
+// CHECK: [0x6c,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, tba_hi
+// CHECK: [0x6d,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, tma_lo
+// CHECK: [0x6e,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, tma_hi
+// CHECK: [0x6f,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, ttmp11
+// CHECK: [0x7b,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, m0
+// CHECK: [0x7c,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, exec_lo
+// CHECK: [0x7e,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, exec_hi
+// CHECK: [0x7f,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, 0
+// CHECK: [0x80,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, -1
+// CHECK: [0xc1,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, 0.5
+// CHECK: [0xf0,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, -4.0
+// CHECK: [0xf7,0x54,0x0a,0x7e]
+
+v_rcp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x54,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x54,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_f32 v5, v1
+// CHECK: [0x01,0x55,0x0a,0x7e]
+
+v_rcp_f32 v5, v255
+// CHECK: [0xff,0x55,0x0a,0x7e]
+
+v_rcp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x54,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x54,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x54,0xd3,0x67,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x54,0xd3,0x68,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x54,0xd3,0x69,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x54,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x54,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x54,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x54,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x54,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x54,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x54,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x54,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x54,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x54,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x54,0xd3,0x80,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x54,0xd3,0xf0,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x54,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x54,0xd3,0x01,0x01,0x00,0x00]
+
+v_rcp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x54,0xd3,0xff,0x01,0x00,0x00]
+
+v_rcp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x54,0xd3,0x01,0x00,0x00,0x20]
+
+v_rcp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x54,0xd3,0x01,0x00,0x00,0x08]
+
+v_rcp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x54,0xd3,0x01,0x00,0x00,0x10]
+
+v_rcp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x54,0xd3,0x01,0x00,0x00,0x18]
+
+v_rcp_iflag_f32 v5, s1
+// CHECK: [0x01,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v255, s1
+// CHECK: [0x01,0x56,0xfe,0x7f]
+
+v_rcp_iflag_f32 v5, s103
+// CHECK: [0x67,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, vcc_lo
+// CHECK: [0x6a,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, vcc_hi
+// CHECK: [0x6b,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tba_lo
+// CHECK: [0x6c,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tba_hi
+// CHECK: [0x6d,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tma_lo
+// CHECK: [0x6e,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tma_hi
+// CHECK: [0x6f,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, ttmp11
+// CHECK: [0x7b,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, m0
+// CHECK: [0x7c,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, exec_lo
+// CHECK: [0x7e,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, exec_hi
+// CHECK: [0x7f,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, 0
+// CHECK: [0x80,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, -1
+// CHECK: [0xc1,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, 0.5
+// CHECK: [0xf0,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, -4.0
+// CHECK: [0xf7,0x56,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, 0xaf123456
+// CHECK: [0xff,0x56,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_iflag_f32 v5, 0x3f717273
+// CHECK: [0xff,0x56,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_iflag_f32 v5, v1
+// CHECK: [0x01,0x57,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, v255
+// CHECK: [0xff,0x57,0x0a,0x7e]
+
+v_rcp_iflag_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x56,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x56,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x56,0xd3,0x67,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x56,0xd3,0x68,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x56,0xd3,0x69,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x56,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x56,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x56,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x56,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x56,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x56,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x56,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x56,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x56,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x56,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x56,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x56,0xd3,0x01,0x01,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x56,0xd3,0xff,0x01,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x56,0xd3,0x01,0x00,0x00,0x20]
+
+v_rcp_iflag_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x56,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x56,0xd3,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x56,0xd3,0x01,0x00,0x00,0x08]
+
+v_rcp_iflag_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x56,0xd3,0x01,0x00,0x00,0x10]
+
+v_rcp_iflag_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x56,0xd3,0x01,0x00,0x00,0x18]
+
+v_rsq_clamp_f32 v5, s1
+// CHECK: [0x01,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v255, s1
+// CHECK: [0x01,0x58,0xfe,0x7f]
+
+v_rsq_clamp_f32 v5, s103
+// CHECK: [0x67,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, tba_lo
+// CHECK: [0x6c,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, tba_hi
+// CHECK: [0x6d,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, tma_lo
+// CHECK: [0x6e,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, tma_hi
+// CHECK: [0x6f,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, ttmp11
+// CHECK: [0x7b,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, m0
+// CHECK: [0x7c,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, exec_lo
+// CHECK: [0x7e,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, exec_hi
+// CHECK: [0x7f,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, 0
+// CHECK: [0x80,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, -1
+// CHECK: [0xc1,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, 0.5
+// CHECK: [0xf0,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, -4.0
+// CHECK: [0xf7,0x58,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x58,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_clamp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x58,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_clamp_f32 v5, v1
+// CHECK: [0x01,0x59,0x0a,0x7e]
+
+v_rsq_clamp_f32 v5, v255
+// CHECK: [0xff,0x59,0x0a,0x7e]
+
+v_rsq_clamp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x58,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x58,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x58,0xd3,0x67,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x58,0xd3,0x68,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x58,0xd3,0x69,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x58,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x58,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x58,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x58,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x58,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x58,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x58,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x58,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x58,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x58,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x58,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x58,0xd3,0x01,0x01,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x58,0xd3,0xff,0x01,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x58,0xd3,0x01,0x00,0x00,0x20]
+
+v_rsq_clamp_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x58,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x58,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_clamp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x58,0xd3,0x01,0x00,0x00,0x08]
+
+v_rsq_clamp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x58,0xd3,0x01,0x00,0x00,0x10]
+
+v_rsq_clamp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x58,0xd3,0x01,0x00,0x00,0x18]
+
+v_rsq_legacy_f32 v5, s1
+// CHECK: [0x01,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v255, s1
+// CHECK: [0x01,0x5a,0xfe,0x7f]
+
+v_rsq_legacy_f32 v5, s103
+// CHECK: [0x67,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, vcc_lo
+// CHECK: [0x6a,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, vcc_hi
+// CHECK: [0x6b,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, tba_lo
+// CHECK: [0x6c,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, tba_hi
+// CHECK: [0x6d,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, tma_lo
+// CHECK: [0x6e,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, tma_hi
+// CHECK: [0x6f,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, ttmp11
+// CHECK: [0x7b,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, m0
+// CHECK: [0x7c,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, exec_lo
+// CHECK: [0x7e,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, exec_hi
+// CHECK: [0x7f,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, 0
+// CHECK: [0x80,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, -1
+// CHECK: [0xc1,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, 0.5
+// CHECK: [0xf0,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, -4.0
+// CHECK: [0xf7,0x5a,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, 0xaf123456
+// CHECK: [0xff,0x5a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_legacy_f32 v5, 0x3f717273
+// CHECK: [0xff,0x5a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_legacy_f32 v5, v1
+// CHECK: [0x01,0x5b,0x0a,0x7e]
+
+v_rsq_legacy_f32 v5, v255
+// CHECK: [0xff,0x5b,0x0a,0x7e]
+
+v_rsq_legacy_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5a,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x5a,0xd3,0x67,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5a,0xd3,0x68,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5a,0xd3,0x69,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5a,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5a,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5a,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5a,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5a,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5a,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x01,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5a,0xd3,0xff,0x01,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x00,0x00,0x20]
+
+v_rsq_legacy_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x5a,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x5a,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_legacy_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x00,0x00,0x08]
+
+v_rsq_legacy_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x00,0x00,0x10]
+
+v_rsq_legacy_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5a,0xd3,0x01,0x00,0x00,0x18]
+
+v_rsq_f32 v5, s1
+// CHECK: [0x01,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v255, s1
+// CHECK: [0x01,0x5c,0xfe,0x7f]
+
+v_rsq_f32 v5, s103
+// CHECK: [0x67,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, vcc_lo
+// CHECK: [0x6a,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, vcc_hi
+// CHECK: [0x6b,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, tba_lo
+// CHECK: [0x6c,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, tba_hi
+// CHECK: [0x6d,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, tma_lo
+// CHECK: [0x6e,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, tma_hi
+// CHECK: [0x6f,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, ttmp11
+// CHECK: [0x7b,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, m0
+// CHECK: [0x7c,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, exec_lo
+// CHECK: [0x7e,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, exec_hi
+// CHECK: [0x7f,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, 0
+// CHECK: [0x80,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, -1
+// CHECK: [0xc1,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, 0.5
+// CHECK: [0xf0,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, -4.0
+// CHECK: [0xf7,0x5c,0x0a,0x7e]
+
+v_rsq_f32 v5, 0xaf123456
+// CHECK: [0xff,0x5c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_f32 v5, 0x3f717273
+// CHECK: [0xff,0x5c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_f32 v5, v1
+// CHECK: [0x01,0x5d,0x0a,0x7e]
+
+v_rsq_f32 v5, v255
+// CHECK: [0xff,0x5d,0x0a,0x7e]
+
+v_rsq_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5c,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x5c,0xd3,0x67,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5c,0xd3,0x68,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5c,0xd3,0x69,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5c,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x01,0x00,0x00]
+
+v_rsq_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5c,0xd3,0xff,0x01,0x00,0x00]
+
+v_rsq_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x00,0x00,0x20]
+
+v_rsq_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x5c,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x5c,0xd3,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x00,0x00,0x08]
+
+v_rsq_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x00,0x00,0x10]
+
+v_rsq_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5c,0xd3,0x01,0x00,0x00,0x18]
+
+v_rcp_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x5e,0xfc,0x7f]
+
+v_rcp_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], vcc
+// CHECK: [0x6a,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], tba
+// CHECK: [0x6c,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], tma
+// CHECK: [0x6e,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], exec
+// CHECK: [0x7e,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], 0
+// CHECK: [0x80,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], -1
+// CHECK: [0xc1,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x5e,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x5e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x5e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x5f,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x5f,0x0a,0x7e]
+
+v_rcp_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x5e,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x5e,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x5e,0xd3,0x04,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x5e,0xd3,0x66,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x5e,0xd3,0x68,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x5e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x5e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x5e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x5e,0xd3,0x7a,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x5e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x5e,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x5e,0xd3,0x01,0x01,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x5e,0xd3,0xfe,0x01,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x5e,0xd3,0x02,0x00,0x00,0x20]
+
+v_rcp_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x5e,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x5e,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x5e,0xd3,0x02,0x00,0x00,0x08]
+
+v_rcp_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x5e,0xd3,0x02,0x00,0x00,0x10]
+
+v_rcp_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x5e,0xd3,0x02,0x00,0x00,0x18]
+
+v_rcp_clamp_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x60,0xfc,0x7f]
+
+v_rcp_clamp_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], vcc
+// CHECK: [0x6a,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], tba
+// CHECK: [0x6c,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], tma
+// CHECK: [0x6e,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], exec
+// CHECK: [0x7e,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], 0
+// CHECK: [0x80,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], -1
+// CHECK: [0xc1,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x60,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x60,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_clamp_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x60,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_clamp_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x61,0x0a,0x7e]
+
+v_rcp_clamp_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x61,0x0a,0x7e]
+
+v_rcp_clamp_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x60,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x60,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x60,0xd3,0x04,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x60,0xd3,0x66,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x60,0xd3,0x68,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x60,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x60,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x60,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x60,0xd3,0x7a,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x60,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x60,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x60,0xd3,0x01,0x01,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x60,0xd3,0xfe,0x01,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x60,0xd3,0x02,0x00,0x00,0x20]
+
+v_rcp_clamp_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x60,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x60,0xd3,0x02,0x00,0x00,0x00]
+
+v_rcp_clamp_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x60,0xd3,0x02,0x00,0x00,0x08]
+
+v_rcp_clamp_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x60,0xd3,0x02,0x00,0x00,0x10]
+
+v_rcp_clamp_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x60,0xd3,0x02,0x00,0x00,0x18]
+
+v_rsq_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x62,0xfc,0x7f]
+
+v_rsq_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], vcc
+// CHECK: [0x6a,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], tba
+// CHECK: [0x6c,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], tma
+// CHECK: [0x6e,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], exec
+// CHECK: [0x7e,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], 0
+// CHECK: [0x80,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], -1
+// CHECK: [0xc1,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x62,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x62,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x62,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x63,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x63,0x0a,0x7e]
+
+v_rsq_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x62,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x62,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x62,0xd3,0x04,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x62,0xd3,0x66,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x62,0xd3,0x68,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x62,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x62,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x62,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x62,0xd3,0x7a,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x62,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x62,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x62,0xd3,0x01,0x01,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x62,0xd3,0xfe,0x01,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x62,0xd3,0x02,0x00,0x00,0x20]
+
+v_rsq_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x62,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x62,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x62,0xd3,0x02,0x00,0x00,0x08]
+
+v_rsq_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x62,0xd3,0x02,0x00,0x00,0x10]
+
+v_rsq_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x62,0xd3,0x02,0x00,0x00,0x18]
+
+v_rsq_clamp_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x64,0xfc,0x7f]
+
+v_rsq_clamp_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], vcc
+// CHECK: [0x6a,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], tba
+// CHECK: [0x6c,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], tma
+// CHECK: [0x6e,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], exec
+// CHECK: [0x7e,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], 0
+// CHECK: [0x80,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], -1
+// CHECK: [0xc1,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x64,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x64,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_clamp_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x64,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_clamp_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x65,0x0a,0x7e]
+
+v_rsq_clamp_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x65,0x0a,0x7e]
+
+v_rsq_clamp_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x64,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x64,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x64,0xd3,0x04,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x64,0xd3,0x66,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x64,0xd3,0x68,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x64,0xd3,0x6a,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x64,0xd3,0x6c,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x64,0xd3,0x6e,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x64,0xd3,0x7a,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x64,0xd3,0x7e,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x64,0xd3,0xfd,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x64,0xd3,0x01,0x01,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x64,0xd3,0xfe,0x01,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x64,0xd3,0x02,0x00,0x00,0x20]
+
+v_rsq_clamp_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x64,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x64,0xd3,0x02,0x00,0x00,0x00]
+
+v_rsq_clamp_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x64,0xd3,0x02,0x00,0x00,0x08]
+
+v_rsq_clamp_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x64,0xd3,0x02,0x00,0x00,0x10]
+
+v_rsq_clamp_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x64,0xd3,0x02,0x00,0x00,0x18]
+
+v_sqrt_f32 v5, s1
+// CHECK: [0x01,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v255, s1
+// CHECK: [0x01,0x66,0xfe,0x7f]
+
+v_sqrt_f32 v5, s103
+// CHECK: [0x67,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, vcc_lo
+// CHECK: [0x6a,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, vcc_hi
+// CHECK: [0x6b,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, tba_lo
+// CHECK: [0x6c,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, tba_hi
+// CHECK: [0x6d,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, tma_lo
+// CHECK: [0x6e,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, tma_hi
+// CHECK: [0x6f,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, ttmp11
+// CHECK: [0x7b,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, m0
+// CHECK: [0x7c,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, exec_lo
+// CHECK: [0x7e,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, exec_hi
+// CHECK: [0x7f,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, 0
+// CHECK: [0x80,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, -1
+// CHECK: [0xc1,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, 0.5
+// CHECK: [0xf0,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, -4.0
+// CHECK: [0xf7,0x66,0x0a,0x7e]
+
+v_sqrt_f32 v5, 0xaf123456
+// CHECK: [0xff,0x66,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_sqrt_f32 v5, 0x3f717273
+// CHECK: [0xff,0x66,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_sqrt_f32 v5, v1
+// CHECK: [0x01,0x67,0x0a,0x7e]
+
+v_sqrt_f32 v5, v255
+// CHECK: [0xff,0x67,0x0a,0x7e]
+
+v_sqrt_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x66,0xd3,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x66,0xd3,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x66,0xd3,0x67,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x66,0xd3,0x68,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x66,0xd3,0x69,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x66,0xd3,0x6a,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x66,0xd3,0x6b,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x66,0xd3,0x6c,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x66,0xd3,0x6d,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x66,0xd3,0x6e,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x66,0xd3,0x6f,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x66,0xd3,0x7b,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x66,0xd3,0x7c,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x66,0xd3,0x7e,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x66,0xd3,0x7f,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x66,0xd3,0xfd,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x66,0xd3,0x01,0x01,0x00,0x00]
+
+v_sqrt_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x66,0xd3,0xff,0x01,0x00,0x00]
+
+v_sqrt_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x66,0xd3,0x01,0x00,0x00,0x20]
+
+v_sqrt_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x66,0xd3,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x66,0xd3,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x66,0xd3,0x01,0x00,0x00,0x08]
+
+v_sqrt_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x66,0xd3,0x01,0x00,0x00,0x10]
+
+v_sqrt_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x66,0xd3,0x01,0x00,0x00,0x18]
+
+v_sqrt_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x68,0xfc,0x7f]
+
+v_sqrt_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], vcc
+// CHECK: [0x6a,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], tba
+// CHECK: [0x6c,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], tma
+// CHECK: [0x6e,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], exec
+// CHECK: [0x7e,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], 0
+// CHECK: [0x80,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], -1
+// CHECK: [0xc1,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x68,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x68,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_sqrt_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x68,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_sqrt_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x69,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x69,0x0a,0x7e]
+
+v_sqrt_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x68,0xd3,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x68,0xd3,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x68,0xd3,0x04,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x68,0xd3,0x66,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x68,0xd3,0x68,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x68,0xd3,0x6a,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x68,0xd3,0x6c,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x68,0xd3,0x6e,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x68,0xd3,0x7a,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x68,0xd3,0x7e,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x68,0xd3,0xfd,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x68,0xd3,0x01,0x01,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x68,0xd3,0xfe,0x01,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x68,0xd3,0x02,0x00,0x00,0x20]
+
+v_sqrt_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x68,0xd3,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x68,0xd3,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x68,0xd3,0x02,0x00,0x00,0x08]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x68,0xd3,0x02,0x00,0x00,0x10]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x68,0xd3,0x02,0x00,0x00,0x18]
+
+v_sin_f32 v5, s1
+// CHECK: [0x01,0x6a,0x0a,0x7e]
+
+v_sin_f32 v255, s1
+// CHECK: [0x01,0x6a,0xfe,0x7f]
+
+v_sin_f32 v5, s103
+// CHECK: [0x67,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, vcc_lo
+// CHECK: [0x6a,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, vcc_hi
+// CHECK: [0x6b,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, tba_lo
+// CHECK: [0x6c,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, tba_hi
+// CHECK: [0x6d,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, tma_lo
+// CHECK: [0x6e,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, tma_hi
+// CHECK: [0x6f,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, ttmp11
+// CHECK: [0x7b,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, m0
+// CHECK: [0x7c,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, exec_lo
+// CHECK: [0x7e,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, exec_hi
+// CHECK: [0x7f,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, 0
+// CHECK: [0x80,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, -1
+// CHECK: [0xc1,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, 0.5
+// CHECK: [0xf0,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, -4.0
+// CHECK: [0xf7,0x6a,0x0a,0x7e]
+
+v_sin_f32 v5, 0xaf123456
+// CHECK: [0xff,0x6a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_sin_f32 v5, 0x3f717273
+// CHECK: [0xff,0x6a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_sin_f32 v5, v1
+// CHECK: [0x01,0x6b,0x0a,0x7e]
+
+v_sin_f32 v5, v255
+// CHECK: [0xff,0x6b,0x0a,0x7e]
+
+v_sin_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6a,0xd3,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x6a,0xd3,0x67,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6a,0xd3,0x68,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6a,0xd3,0x69,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6a,0xd3,0x6b,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6a,0xd3,0x6d,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6a,0xd3,0x6f,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6a,0xd3,0x7b,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6a,0xd3,0x7c,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6a,0xd3,0x7f,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x6a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x01,0x00,0x00]
+
+v_sin_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6a,0xd3,0xff,0x01,0x00,0x00]
+
+v_sin_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x00,0x00,0x20]
+
+v_sin_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x6a,0xd3,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x6a,0xd3,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x00,0x00,0x08]
+
+v_sin_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x00,0x00,0x10]
+
+v_sin_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x6a,0xd3,0x01,0x00,0x00,0x18]
+
+v_cos_f32 v5, s1
+// CHECK: [0x01,0x6c,0x0a,0x7e]
+
+v_cos_f32 v255, s1
+// CHECK: [0x01,0x6c,0xfe,0x7f]
+
+v_cos_f32 v5, s103
+// CHECK: [0x67,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, vcc_lo
+// CHECK: [0x6a,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, vcc_hi
+// CHECK: [0x6b,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, tba_lo
+// CHECK: [0x6c,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, tba_hi
+// CHECK: [0x6d,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, tma_lo
+// CHECK: [0x6e,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, tma_hi
+// CHECK: [0x6f,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, ttmp11
+// CHECK: [0x7b,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, m0
+// CHECK: [0x7c,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, exec_lo
+// CHECK: [0x7e,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, exec_hi
+// CHECK: [0x7f,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, 0
+// CHECK: [0x80,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, -1
+// CHECK: [0xc1,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, 0.5
+// CHECK: [0xf0,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, -4.0
+// CHECK: [0xf7,0x6c,0x0a,0x7e]
+
+v_cos_f32 v5, 0xaf123456
+// CHECK: [0xff,0x6c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cos_f32 v5, 0x3f717273
+// CHECK: [0xff,0x6c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cos_f32 v5, v1
+// CHECK: [0x01,0x6d,0x0a,0x7e]
+
+v_cos_f32 v5, v255
+// CHECK: [0xff,0x6d,0x0a,0x7e]
+
+v_cos_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x6c,0xd3,0x67,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6c,0xd3,0x68,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6c,0xd3,0x69,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x6c,0xd3,0xfd,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x01,0x00,0x00]
+
+v_cos_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6c,0xd3,0xff,0x01,0x00,0x00]
+
+v_cos_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x00,0x00,0x20]
+
+v_cos_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x6c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x6c,0xd3,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x00,0x00,0x08]
+
+v_cos_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x00,0x00,0x10]
+
+v_cos_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x6c,0xd3,0x01,0x00,0x00,0x18]
+
+v_not_b32 v5, s1
+// CHECK: [0x01,0x6e,0x0a,0x7e]
+
+v_not_b32 v255, s1
+// CHECK: [0x01,0x6e,0xfe,0x7f]
+
+v_not_b32 v5, s103
+// CHECK: [0x67,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, flat_scratch_lo
+// CHECK: [0x68,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, flat_scratch_hi
+// CHECK: [0x69,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, vcc_lo
+// CHECK: [0x6a,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, vcc_hi
+// CHECK: [0x6b,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, tba_lo
+// CHECK: [0x6c,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, tba_hi
+// CHECK: [0x6d,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, tma_lo
+// CHECK: [0x6e,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, tma_hi
+// CHECK: [0x6f,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, ttmp11
+// CHECK: [0x7b,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, m0
+// CHECK: [0x7c,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, exec_lo
+// CHECK: [0x7e,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, exec_hi
+// CHECK: [0x7f,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, 0
+// CHECK: [0x80,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, -1
+// CHECK: [0xc1,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, 0.5
+// CHECK: [0xf0,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, -4.0
+// CHECK: [0xf7,0x6e,0x0a,0x7e]
+
+v_not_b32 v5, 0xaf123456
+// CHECK: [0xff,0x6e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_not_b32 v5, 0x3f717273
+// CHECK: [0xff,0x6e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_not_b32 v5, v1
+// CHECK: [0x01,0x6f,0x0a,0x7e]
+
+v_not_b32 v5, v255
+// CHECK: [0xff,0x6f,0x0a,0x7e]
+
+v_not_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6e,0xd3,0x01,0x00,0x00,0x00]
+
+v_not_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6e,0xd3,0x01,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, s103
+// CHECK: [0x05,0x00,0x6e,0xd3,0x67,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6e,0xd3,0x68,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6e,0xd3,0x69,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6e,0xd3,0x6b,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6e,0xd3,0x6d,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6e,0xd3,0x6f,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6e,0xd3,0x7b,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6e,0xd3,0x7c,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6e,0xd3,0x7f,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x6e,0xd3,0x80,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x6e,0xd3,0xc1,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x6e,0xd3,0xf0,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x6e,0xd3,0xf7,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6e,0xd3,0x01,0x01,0x00,0x00]
+
+v_not_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6e,0xd3,0xff,0x01,0x00,0x00]
+
+v_bfrev_b32 v5, s1
+// CHECK: [0x01,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v255, s1
+// CHECK: [0x01,0x70,0xfe,0x7f]
+
+v_bfrev_b32 v5, s103
+// CHECK: [0x67,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, flat_scratch_lo
+// CHECK: [0x68,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, flat_scratch_hi
+// CHECK: [0x69,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, vcc_lo
+// CHECK: [0x6a,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, vcc_hi
+// CHECK: [0x6b,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, tba_lo
+// CHECK: [0x6c,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, tba_hi
+// CHECK: [0x6d,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, tma_lo
+// CHECK: [0x6e,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, tma_hi
+// CHECK: [0x6f,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, ttmp11
+// CHECK: [0x7b,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, m0
+// CHECK: [0x7c,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, exec_lo
+// CHECK: [0x7e,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, exec_hi
+// CHECK: [0x7f,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, 0
+// CHECK: [0x80,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, -1
+// CHECK: [0xc1,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, 0.5
+// CHECK: [0xf0,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, -4.0
+// CHECK: [0xf7,0x70,0x0a,0x7e]
+
+v_bfrev_b32 v5, 0xaf123456
+// CHECK: [0xff,0x70,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_bfrev_b32 v5, 0x3f717273
+// CHECK: [0xff,0x70,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_bfrev_b32 v5, v1
+// CHECK: [0x01,0x71,0x0a,0x7e]
+
+v_bfrev_b32 v5, v255
+// CHECK: [0xff,0x71,0x0a,0x7e]
+
+v_bfrev_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x70,0xd3,0x01,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x70,0xd3,0x01,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, s103
+// CHECK: [0x05,0x00,0x70,0xd3,0x67,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x70,0xd3,0x68,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x70,0xd3,0x69,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x70,0xd3,0x6a,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x70,0xd3,0x6b,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x70,0xd3,0x6c,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x70,0xd3,0x6d,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x70,0xd3,0x6e,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x70,0xd3,0x6f,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x70,0xd3,0x7b,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x70,0xd3,0x7c,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x70,0xd3,0x7e,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x70,0xd3,0x7f,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x70,0xd3,0x80,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x70,0xd3,0xc1,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x70,0xd3,0xf0,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x70,0xd3,0xf7,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x70,0xd3,0x01,0x01,0x00,0x00]
+
+v_bfrev_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x70,0xd3,0xff,0x01,0x00,0x00]
+
+v_ffbh_u32 v5, s1
+// CHECK: [0x01,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v255, s1
+// CHECK: [0x01,0x72,0xfe,0x7f]
+
+v_ffbh_u32 v5, s103
+// CHECK: [0x67,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, flat_scratch_lo
+// CHECK: [0x68,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, flat_scratch_hi
+// CHECK: [0x69,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, vcc_lo
+// CHECK: [0x6a,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, vcc_hi
+// CHECK: [0x6b,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, tba_lo
+// CHECK: [0x6c,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, tba_hi
+// CHECK: [0x6d,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, tma_lo
+// CHECK: [0x6e,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, tma_hi
+// CHECK: [0x6f,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, ttmp11
+// CHECK: [0x7b,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, m0
+// CHECK: [0x7c,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, exec_lo
+// CHECK: [0x7e,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, exec_hi
+// CHECK: [0x7f,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, 0
+// CHECK: [0x80,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, -1
+// CHECK: [0xc1,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, 0.5
+// CHECK: [0xf0,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, -4.0
+// CHECK: [0xf7,0x72,0x0a,0x7e]
+
+v_ffbh_u32 v5, 0xaf123456
+// CHECK: [0xff,0x72,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ffbh_u32 v5, 0x3f717273
+// CHECK: [0xff,0x72,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ffbh_u32 v5, v1
+// CHECK: [0x01,0x73,0x0a,0x7e]
+
+v_ffbh_u32 v5, v255
+// CHECK: [0xff,0x73,0x0a,0x7e]
+
+v_ffbh_u32_e64 v5, s1
+// CHECK: [0x05,0x00,0x72,0xd3,0x01,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v255, s1
+// CHECK: [0xff,0x00,0x72,0xd3,0x01,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, s103
+// CHECK: [0x05,0x00,0x72,0xd3,0x67,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x72,0xd3,0x68,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x72,0xd3,0x69,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x72,0xd3,0x6a,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x72,0xd3,0x6b,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x72,0xd3,0x6c,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x72,0xd3,0x6d,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x72,0xd3,0x6e,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x72,0xd3,0x6f,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x72,0xd3,0x7b,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, m0
+// CHECK: [0x05,0x00,0x72,0xd3,0x7c,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x72,0xd3,0x7e,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x72,0xd3,0x7f,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, 0
+// CHECK: [0x05,0x00,0x72,0xd3,0x80,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, -1
+// CHECK: [0x05,0x00,0x72,0xd3,0xc1,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x72,0xd3,0xf0,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x72,0xd3,0xf7,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, v1
+// CHECK: [0x05,0x00,0x72,0xd3,0x01,0x01,0x00,0x00]
+
+v_ffbh_u32_e64 v5, v255
+// CHECK: [0x05,0x00,0x72,0xd3,0xff,0x01,0x00,0x00]
+
+v_ffbl_b32 v5, s1
+// CHECK: [0x01,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v255, s1
+// CHECK: [0x01,0x74,0xfe,0x7f]
+
+v_ffbl_b32 v5, s103
+// CHECK: [0x67,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, flat_scratch_lo
+// CHECK: [0x68,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, flat_scratch_hi
+// CHECK: [0x69,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, vcc_lo
+// CHECK: [0x6a,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, vcc_hi
+// CHECK: [0x6b,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, tba_lo
+// CHECK: [0x6c,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, tba_hi
+// CHECK: [0x6d,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, tma_lo
+// CHECK: [0x6e,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, tma_hi
+// CHECK: [0x6f,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, ttmp11
+// CHECK: [0x7b,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, m0
+// CHECK: [0x7c,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, exec_lo
+// CHECK: [0x7e,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, exec_hi
+// CHECK: [0x7f,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, 0
+// CHECK: [0x80,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, -1
+// CHECK: [0xc1,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, 0.5
+// CHECK: [0xf0,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, -4.0
+// CHECK: [0xf7,0x74,0x0a,0x7e]
+
+v_ffbl_b32 v5, 0xaf123456
+// CHECK: [0xff,0x74,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ffbl_b32 v5, 0x3f717273
+// CHECK: [0xff,0x74,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ffbl_b32 v5, v1
+// CHECK: [0x01,0x75,0x0a,0x7e]
+
+v_ffbl_b32 v5, v255
+// CHECK: [0xff,0x75,0x0a,0x7e]
+
+v_ffbl_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x74,0xd3,0x01,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x74,0xd3,0x01,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, s103
+// CHECK: [0x05,0x00,0x74,0xd3,0x67,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x74,0xd3,0x68,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x74,0xd3,0x69,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x74,0xd3,0x6a,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x74,0xd3,0x6b,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x74,0xd3,0x6c,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x74,0xd3,0x6d,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x74,0xd3,0x6e,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x74,0xd3,0x6f,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x74,0xd3,0x7b,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x74,0xd3,0x7c,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x74,0xd3,0x7e,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x74,0xd3,0x7f,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x74,0xd3,0x80,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x74,0xd3,0xc1,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x74,0xd3,0xf0,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x74,0xd3,0xf7,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x74,0xd3,0x01,0x01,0x00,0x00]
+
+v_ffbl_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x74,0xd3,0xff,0x01,0x00,0x00]
+
+v_ffbh_i32 v5, s1
+// CHECK: [0x01,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v255, s1
+// CHECK: [0x01,0x76,0xfe,0x7f]
+
+v_ffbh_i32 v5, s103
+// CHECK: [0x67,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, flat_scratch_lo
+// CHECK: [0x68,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, flat_scratch_hi
+// CHECK: [0x69,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, vcc_lo
+// CHECK: [0x6a,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, vcc_hi
+// CHECK: [0x6b,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, tba_lo
+// CHECK: [0x6c,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, tba_hi
+// CHECK: [0x6d,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, tma_lo
+// CHECK: [0x6e,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, tma_hi
+// CHECK: [0x6f,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, ttmp11
+// CHECK: [0x7b,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, m0
+// CHECK: [0x7c,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, exec_lo
+// CHECK: [0x7e,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, exec_hi
+// CHECK: [0x7f,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, 0
+// CHECK: [0x80,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, -1
+// CHECK: [0xc1,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, 0.5
+// CHECK: [0xf0,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, -4.0
+// CHECK: [0xf7,0x76,0x0a,0x7e]
+
+v_ffbh_i32 v5, 0xaf123456
+// CHECK: [0xff,0x76,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ffbh_i32 v5, 0x3f717273
+// CHECK: [0xff,0x76,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ffbh_i32 v5, v1
+// CHECK: [0x01,0x77,0x0a,0x7e]
+
+v_ffbh_i32 v5, v255
+// CHECK: [0xff,0x77,0x0a,0x7e]
+
+v_ffbh_i32_e64 v5, s1
+// CHECK: [0x05,0x00,0x76,0xd3,0x01,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v255, s1
+// CHECK: [0xff,0x00,0x76,0xd3,0x01,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, s103
+// CHECK: [0x05,0x00,0x76,0xd3,0x67,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x76,0xd3,0x68,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x76,0xd3,0x69,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x76,0xd3,0x6a,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x76,0xd3,0x6b,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x76,0xd3,0x6c,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x76,0xd3,0x6d,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x76,0xd3,0x6e,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x76,0xd3,0x6f,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x76,0xd3,0x7b,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, m0
+// CHECK: [0x05,0x00,0x76,0xd3,0x7c,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x76,0xd3,0x7e,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x76,0xd3,0x7f,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, 0
+// CHECK: [0x05,0x00,0x76,0xd3,0x80,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, -1
+// CHECK: [0x05,0x00,0x76,0xd3,0xc1,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x76,0xd3,0xf0,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x76,0xd3,0xf7,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, v1
+// CHECK: [0x05,0x00,0x76,0xd3,0x01,0x01,0x00,0x00]
+
+v_ffbh_i32_e64 v5, v255
+// CHECK: [0x05,0x00,0x76,0xd3,0xff,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f64 v5, s[2:3]
+// CHECK: [0x02,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v255, s[2:3]
+// CHECK: [0x02,0x78,0xfe,0x7f]
+
+v_frexp_exp_i32_f64 v5, s[4:5]
+// CHECK: [0x04,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, s[102:103]
+// CHECK: [0x66,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, flat_scratch
+// CHECK: [0x68,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, vcc
+// CHECK: [0x6a,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, tba
+// CHECK: [0x6c,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, tma
+// CHECK: [0x6e,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, exec
+// CHECK: [0x7e,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, 0
+// CHECK: [0x80,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, -1
+// CHECK: [0xc1,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, 0.5
+// CHECK: [0xf0,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, -4.0
+// CHECK: [0xf7,0x78,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x78,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_exp_i32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x78,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_exp_i32_f64 v5, v[1:2]
+// CHECK: [0x01,0x79,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x79,0x0a,0x7e]
+
+v_frexp_exp_i32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x78,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x78,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x78,0xd3,0x04,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, s[102:103]
+// CHECK: [0x05,0x00,0x78,0xd3,0x66,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x78,0xd3,0x68,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x78,0xd3,0x6a,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x78,0xd3,0x6c,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x78,0xd3,0x6e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x78,0xd3,0x7a,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x78,0xd3,0x7e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x78,0xd3,0xfd,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x78,0xd3,0x01,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x78,0xd3,0xfe,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x78,0xd3,0x02,0x00,0x00,0x20]
+
+v_frexp_exp_i32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x78,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x7a,0xfc,0x7f]
+
+v_frexp_mant_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], vcc
+// CHECK: [0x6a,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], tba
+// CHECK: [0x6c,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], tma
+// CHECK: [0x6e,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], exec
+// CHECK: [0x7e,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], 0
+// CHECK: [0x80,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], -1
+// CHECK: [0xc1,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x7a,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x7a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_mant_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x7a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_mant_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x7b,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x7b,0x0a,0x7e]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x7a,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x7a,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x7a,0xd3,0x04,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x7a,0xd3,0x66,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x7a,0xd3,0x68,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x7a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x7a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x7a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x7a,0xd3,0x7a,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x7a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x7a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x7a,0xd3,0x01,0x01,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x7a,0xd3,0xfe,0x01,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x7a,0xd3,0x02,0x00,0x00,0x20]
+
+v_frexp_mant_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x7a,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x7a,0xd3,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x7a,0xd3,0x02,0x00,0x00,0x08]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x7a,0xd3,0x02,0x00,0x00,0x10]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x7a,0xd3,0x02,0x00,0x00,0x18]
+
+v_fract_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x7c,0xfc,0x7f]
+
+v_fract_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], s[102:103]
+// CHECK: [0x66,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], flat_scratch
+// CHECK: [0x68,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], vcc
+// CHECK: [0x6a,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], tba
+// CHECK: [0x6c,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], tma
+// CHECK: [0x6e,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], exec
+// CHECK: [0x7e,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], 0
+// CHECK: [0x80,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], -1
+// CHECK: [0xc1,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x7c,0x0a,0x7e]
+
+v_fract_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x7c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_fract_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x7c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_fract_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x7d,0x0a,0x7e]
+
+v_fract_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x7d,0x0a,0x7e]
+
+v_fract_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x7c,0xd3,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x7c,0xd3,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x7c,0xd3,0x04,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[102:103]
+// CHECK: [0x05,0x00,0x7c,0xd3,0x66,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x7c,0xd3,0x68,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x7c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x7c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x7c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x7c,0xd3,0x7a,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x7c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x7c,0xd3,0xfd,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x7c,0xd3,0x01,0x01,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x7c,0xd3,0xfe,0x01,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x7c,0xd3,0x02,0x00,0x00,0x20]
+
+v_fract_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x7c,0xd3,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x08,0x7c,0xd3,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x7c,0xd3,0x02,0x00,0x00,0x08]
+
+v_fract_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x7c,0xd3,0x02,0x00,0x00,0x10]
+
+v_fract_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x7c,0xd3,0x02,0x00,0x00,0x18]
+
+v_frexp_exp_i32_f32 v5, s1
+// CHECK: [0x01,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v255, s1
+// CHECK: [0x01,0x7e,0xfe,0x7f]
+
+v_frexp_exp_i32_f32 v5, s103
+// CHECK: [0x67,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, m0
+// CHECK: [0x7c,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, 0
+// CHECK: [0x80,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, -1
+// CHECK: [0xc1,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x7e,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x7e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_exp_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x7e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_exp_i32_f32 v5, v1
+// CHECK: [0x01,0x7f,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, v255
+// CHECK: [0xff,0x7f,0x0a,0x7e]
+
+v_frexp_exp_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x7e,0xd3,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x7e,0xd3,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x7e,0xd3,0x67,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7e,0xd3,0x68,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7e,0xd3,0x69,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7e,0xd3,0x6a,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7e,0xd3,0x6b,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7e,0xd3,0x6c,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7e,0xd3,0x6d,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7e,0xd3,0x6e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7e,0xd3,0x6f,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7e,0xd3,0x7b,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x7e,0xd3,0x7c,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7e,0xd3,0x7e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7e,0xd3,0x7f,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x7e,0xd3,0x80,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x7e,0xd3,0xf0,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x7e,0xd3,0xfd,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x7e,0xd3,0x01,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x7e,0xd3,0xff,0x01,0x00,0x00]
+
+v_frexp_mant_f32 v5, s1
+// CHECK: [0x01,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v255, s1
+// CHECK: [0x01,0x80,0xfe,0x7f]
+
+v_frexp_mant_f32 v5, s103
+// CHECK: [0x67,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, vcc_lo
+// CHECK: [0x6a,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, vcc_hi
+// CHECK: [0x6b,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tba_lo
+// CHECK: [0x6c,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tba_hi
+// CHECK: [0x6d,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tma_lo
+// CHECK: [0x6e,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tma_hi
+// CHECK: [0x6f,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, ttmp11
+// CHECK: [0x7b,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, m0
+// CHECK: [0x7c,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, exec_lo
+// CHECK: [0x7e,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, exec_hi
+// CHECK: [0x7f,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, 0
+// CHECK: [0x80,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, -1
+// CHECK: [0xc1,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, 0.5
+// CHECK: [0xf0,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, -4.0
+// CHECK: [0xf7,0x80,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, 0xaf123456
+// CHECK: [0xff,0x80,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_mant_f32 v5, 0x3f717273
+// CHECK: [0xff,0x80,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_mant_f32 v5, v1
+// CHECK: [0x01,0x81,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, v255
+// CHECK: [0xff,0x81,0x0a,0x7e]
+
+v_frexp_mant_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x80,0xd3,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x80,0xd3,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x80,0xd3,0x67,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x80,0xd3,0x68,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x80,0xd3,0x69,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x80,0xd3,0x6a,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x80,0xd3,0x6b,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x80,0xd3,0x6c,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x80,0xd3,0x6d,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x80,0xd3,0x6e,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x80,0xd3,0x6f,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x80,0xd3,0x7b,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x80,0xd3,0x7c,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x80,0xd3,0x7e,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x80,0xd3,0x7f,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x80,0xd3,0x80,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x80,0xd3,0xf0,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x80,0xd3,0xfd,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x80,0xd3,0x01,0x01,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x80,0xd3,0xff,0x01,0x00,0x00]
+
+v_clrexcp
+// CHECK: [0x00,0x82,0x00,0x7e]
+
+v_clrexcp_e64
+// CHECK: [0x00,0x00,0x82,0xd3,0x00,0x00,0x00,0x00]
+
+v_movreld_b32 v5, m0
+// CHECK: [0x7c,0x84,0x0a,0x7e]
+
+v_movreld_b32 v255, m0
+// CHECK: [0x7c,0x84,0xfe,0x7f]
+
+v_movreld_b32 v5, 0
+// CHECK: [0x80,0x84,0x0a,0x7e]
+
+v_movreld_b32 v5, -1
+// CHECK: [0xc1,0x84,0x0a,0x7e]
+
+v_movreld_b32 v5, 0.5
+// CHECK: [0xf0,0x84,0x0a,0x7e]
+
+v_movreld_b32 v5, -4.0
+// CHECK: [0xf7,0x84,0x0a,0x7e]
+
+v_movreld_b32 v5, v1
+// CHECK: [0x01,0x85,0x0a,0x7e]
+
+v_movreld_b32 v5, v255
+// CHECK: [0xff,0x85,0x0a,0x7e]
+
+v_movreld_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x84,0xd3,0x7c,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v255, m0
+// CHECK: [0xff,0x00,0x84,0xd3,0x7c,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x84,0xd3,0x80,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x84,0xd3,0xc1,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x84,0xd3,0xf0,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x84,0xd3,0xf7,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x84,0xd3,0x01,0x01,0x00,0x00]
+
+v_movreld_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x84,0xd3,0xff,0x01,0x00,0x00]
+
+v_movrels_b32 v5, v1
+// CHECK: [0x01,0x87,0x0a,0x7e]
+
+v_movrels_b32 v255, v1
+// CHECK: [0x01,0x87,0xfe,0x7f]
+
+v_movrels_b32 v5, v255
+// CHECK: [0xff,0x87,0x0a,0x7e]
+
+v_movrels_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x86,0xd3,0x01,0x01,0x00,0x00]
+
+v_movrels_b32_e64 v255, v1
+// CHECK: [0xff,0x00,0x86,0xd3,0x01,0x01,0x00,0x00]
+
+v_movrels_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x86,0xd3,0xff,0x01,0x00,0x00]
+
+v_movrelsd_b32 v5, v1
+// CHECK: [0x01,0x89,0x0a,0x7e]
+
+v_movrelsd_b32 v255, v1
+// CHECK: [0x01,0x89,0xfe,0x7f]
+
+v_movrelsd_b32 v5, v255
+// CHECK: [0xff,0x89,0x0a,0x7e]
+
+v_movrelsd_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x88,0xd3,0x01,0x01,0x00,0x00]
+
+v_movrelsd_b32_e64 v255, v1
+// CHECK: [0xff,0x00,0x88,0xd3,0x01,0x01,0x00,0x00]
+
+v_movrelsd_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x88,0xd3,0xff,0x01,0x00,0x00]
+
+v_log_legacy_f32 v5, s1
+// CHECK: [0x01,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v255, s1
+// CHECK: [0x01,0x8a,0xfe,0x7f]
+
+v_log_legacy_f32 v5, s103
+// CHECK: [0x67,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, vcc_lo
+// CHECK: [0x6a,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, vcc_hi
+// CHECK: [0x6b,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tba_lo
+// CHECK: [0x6c,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tba_hi
+// CHECK: [0x6d,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tma_lo
+// CHECK: [0x6e,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tma_hi
+// CHECK: [0x6f,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, ttmp11
+// CHECK: [0x7b,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, m0
+// CHECK: [0x7c,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, exec_lo
+// CHECK: [0x7e,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, exec_hi
+// CHECK: [0x7f,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, 0
+// CHECK: [0x80,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, -1
+// CHECK: [0xc1,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, 0.5
+// CHECK: [0xf0,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, -4.0
+// CHECK: [0xf7,0x8a,0x0a,0x7e]
+
+v_log_legacy_f32 v5, 0xaf123456
+// CHECK: [0xff,0x8a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_log_legacy_f32 v5, 0x3f717273
+// CHECK: [0xff,0x8a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_log_legacy_f32 v5, v1
+// CHECK: [0x01,0x8b,0x0a,0x7e]
+
+v_log_legacy_f32 v5, v255
+// CHECK: [0xff,0x8b,0x0a,0x7e]
+
+v_log_legacy_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x8a,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x8a,0xd3,0x67,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8a,0xd3,0x68,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8a,0xd3,0x69,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x8a,0xd3,0x6a,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x8a,0xd3,0x6b,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x8a,0xd3,0x6c,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x8a,0xd3,0x6d,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x8a,0xd3,0x6e,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x8a,0xd3,0x6f,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x8a,0xd3,0x7b,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x8a,0xd3,0x7c,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x8a,0xd3,0x7e,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x8a,0xd3,0x7f,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x8a,0xd3,0xfd,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x01,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x8a,0xd3,0xff,0x01,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x00,0x00,0x20]
+
+v_log_legacy_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x8a,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x8a,0xd3,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x00,0x00,0x08]
+
+v_log_legacy_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x00,0x00,0x10]
+
+v_log_legacy_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x8a,0xd3,0x01,0x00,0x00,0x18]
+
+v_exp_legacy_f32 v5, s1
+// CHECK: [0x01,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v255, s1
+// CHECK: [0x01,0x8c,0xfe,0x7f]
+
+v_exp_legacy_f32 v5, s103
+// CHECK: [0x67,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, flat_scratch_lo
+// CHECK: [0x68,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, flat_scratch_hi
+// CHECK: [0x69,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, vcc_lo
+// CHECK: [0x6a,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, vcc_hi
+// CHECK: [0x6b,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tba_lo
+// CHECK: [0x6c,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tba_hi
+// CHECK: [0x6d,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tma_lo
+// CHECK: [0x6e,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tma_hi
+// CHECK: [0x6f,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, ttmp11
+// CHECK: [0x7b,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, m0
+// CHECK: [0x7c,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, exec_lo
+// CHECK: [0x7e,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, exec_hi
+// CHECK: [0x7f,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, 0
+// CHECK: [0x80,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, -1
+// CHECK: [0xc1,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, 0.5
+// CHECK: [0xf0,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, -4.0
+// CHECK: [0xf7,0x8c,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, 0xaf123456
+// CHECK: [0xff,0x8c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_exp_legacy_f32 v5, 0x3f717273
+// CHECK: [0xff,0x8c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_exp_legacy_f32 v5, v1
+// CHECK: [0x01,0x8d,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, v255
+// CHECK: [0xff,0x8d,0x0a,0x7e]
+
+v_exp_legacy_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x8c,0xd3,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, s103
+// CHECK: [0x05,0x00,0x8c,0xd3,0x67,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8c,0xd3,0x68,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8c,0xd3,0x69,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x8c,0xd3,0x6a,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x8c,0xd3,0x6b,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x8c,0xd3,0x6c,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x8c,0xd3,0x6d,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x8c,0xd3,0x6e,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x8c,0xd3,0x6f,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x8c,0xd3,0x7b,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x8c,0xd3,0x7c,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x8c,0xd3,0x7e,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x8c,0xd3,0x7f,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x8c,0xd3,0xfd,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x01,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x8c,0xd3,0xff,0x01,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x00,0x00,0x20]
+
+v_exp_legacy_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x8c,0xd3,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x08,0x8c,0xd3,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x00,0x00,0x08]
+
+v_exp_legacy_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x00,0x00,0x10]
+
+v_exp_legacy_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x8c,0xd3,0x01,0x00,0x00,0x18]
+
+v_cndmask_b32 v5, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x00]
+
+v_cndmask_b32 v255, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x01]
+
+v_cndmask_b32 v5, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x00]
+
+v_cndmask_b32 v5, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x00]
+
+v_cndmask_b32 v5, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x00]
+
+v_cndmask_b32 v5, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x00]
+
+v_cndmask_b32 v5, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x00]
+
+v_cndmask_b32 v5, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v255, 0, 0, s[6:7]
+// CHECK: [0xff,0x00,0x00,0xd2,0x80,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, -1, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0xc1,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0.5, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0xf0,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, -4.0, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0xf7,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, v1, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x01,0x01,0x19,0x00]
+
+v_cndmask_b32_e64 v5, v255, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0xff,0x01,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, -1, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x82,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0.5, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0xe0,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, -4.0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0xee,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, v2, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x04,0x1a,0x00]
+
+v_cndmask_b32_e64 v5, 0, v255, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0xfe,0x1b,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0, s[8:9]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0x21,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0, s[102:103]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0x99,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, flat_scratch
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0xa1,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, vcc
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0xa9,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, tba
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0xb1,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, tma
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0xb9,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x00,0x00,0xd2,0x80,0x00,0xe9,0x01]
+
+v_readlane_b32 s5, v1, s2
+// CHECK: [0x01,0x05,0x0a,0x02]
+
+v_readlane_b32 s103, v1, s2
+// CHECK: [0x01,0x05,0xce,0x02]
+
+v_readlane_b32 tba_lo, v1, s2
+// CHECK: [0x01,0x05,0xd8,0x02]
+
+v_readlane_b32 tba_hi, v1, s2
+// CHECK: [0x01,0x05,0xda,0x02]
+
+v_readlane_b32 tma_lo, v1, s2
+// CHECK: [0x01,0x05,0xdc,0x02]
+
+v_readlane_b32 tma_hi, v1, s2
+// CHECK: [0x01,0x05,0xde,0x02]
+
+v_readlane_b32 ttmp11, v1, s2
+// CHECK: [0x01,0x05,0xf6,0x02]
+
+v_readlane_b32 s5, v255, s2
+// CHECK: [0xff,0x05,0x0a,0x02]
+
+v_readlane_b32 s5, v1, s103
+// CHECK: [0x01,0xcf,0x0a,0x02]
+
+v_readlane_b32 s5, v1, flat_scratch_lo
+// CHECK: [0x01,0xd1,0x0a,0x02]
+
+v_readlane_b32 s5, v1, flat_scratch_hi
+// CHECK: [0x01,0xd3,0x0a,0x02]
+
+v_readlane_b32 s5, v1, vcc_lo
+// CHECK: [0x01,0xd5,0x0a,0x02]
+
+v_readlane_b32 s5, v1, vcc_hi
+// CHECK: [0x01,0xd7,0x0a,0x02]
+
+v_readlane_b32 s5, v1, tba_lo
+// CHECK: [0x01,0xd9,0x0a,0x02]
+
+v_readlane_b32 s5, v1, tba_hi
+// CHECK: [0x01,0xdb,0x0a,0x02]
+
+v_readlane_b32 s5, v1, tma_lo
+// CHECK: [0x01,0xdd,0x0a,0x02]
+
+v_readlane_b32 s5, v1, tma_hi
+// CHECK: [0x01,0xdf,0x0a,0x02]
+
+v_readlane_b32 s5, v1, ttmp11
+// CHECK: [0x01,0xf7,0x0a,0x02]
+
+v_readlane_b32 s5, v1, m0
+// CHECK: [0x01,0xf9,0x0a,0x02]
+
+v_readlane_b32 s5, v1, 0
+// CHECK: [0x01,0x01,0x0b,0x02]
+
+v_writelane_b32 v5, s1, 0
+// CHECK: [0x01,0x00,0x0b,0x04]
+
+v_writelane_b32 v255, s1, 0
+// CHECK: [0x01,0x00,0xff,0x05]
+
+v_writelane_b32 v5, s103, 0
+// CHECK: [0x67,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, flat_scratch_lo, 0
+// CHECK: [0x68,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, flat_scratch_hi, 0
+// CHECK: [0x69,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, vcc_lo, 0
+// CHECK: [0x6a,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, vcc_hi, 0
+// CHECK: [0x6b,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, tba_lo, 0
+// CHECK: [0x6c,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, tba_hi, 0
+// CHECK: [0x6d,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, tma_lo, 0
+// CHECK: [0x6e,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, tma_hi, 0
+// CHECK: [0x6f,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, ttmp11, 0
+// CHECK: [0x7b,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, m0, 0
+// CHECK: [0x7c,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, exec_lo, 0
+// CHECK: [0x7e,0x00,0x0b,0x04]
+
+v_writelane_b32 v5, exec_hi, 0
+// CHECK: [0x7f,0x00,0x0b,0x04]
+
+v_add_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x06]
+
+v_add_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x07]
+
+v_add_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x06]
+
+v_add_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x06]
+
+v_add_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x06]
+
+v_add_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x06]
+
+v_add_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x06]
+
+v_add_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x06]
+
+v_add_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x06]
+
+v_add_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x06]
+
+v_add_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x06]
+
+v_add_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x06]
+
+v_add_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x06]
+
+v_add_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x06]
+
+v_add_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x06]
+
+v_add_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x06]
+
+v_add_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x06]
+
+v_add_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x06]
+
+v_add_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x06]
+
+v_add_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x06,0x56,0x34,0x12,0xaf]
+
+v_add_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x06,0x73,0x72,0x71,0x3f]
+
+v_add_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x06]
+
+v_add_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x06]
+
+v_add_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x06]
+
+v_add_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x06,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x06,0xd2,0xff,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xcf,0x00,0x00]
+
+v_add_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd1,0x00,0x00]
+
+v_add_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd3,0x00,0x00]
+
+v_add_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd5,0x00,0x00]
+
+v_add_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd7,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xd9,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xdb,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xdd,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xdf,0x00,0x00]
+
+v_add_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xf7,0x00,0x00]
+
+v_add_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xf9,0x00,0x00]
+
+v_add_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xfd,0x00,0x00]
+
+v_add_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xff,0x00,0x00]
+
+v_add_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xfb,0x01,0x00]
+
+v_add_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x02,0x00]
+
+v_add_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0xff,0x03,0x00]
+
+v_add_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x20]
+
+v_add_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x40]
+
+v_add_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x60]
+
+v_add_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x06,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x06,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x06,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x06,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x08]
+
+v_add_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x10]
+
+v_add_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x06,0xd2,0x01,0x05,0x00,0x18]
+
+v_sub_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x08]
+
+v_sub_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x09]
+
+v_sub_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x08]
+
+v_sub_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x08]
+
+v_sub_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x08]
+
+v_sub_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x08]
+
+v_sub_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x08]
+
+v_sub_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x08]
+
+v_sub_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x08]
+
+v_sub_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x08]
+
+v_sub_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x08]
+
+v_sub_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x08]
+
+v_sub_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x08]
+
+v_sub_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x08]
+
+v_sub_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x08]
+
+v_sub_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x08]
+
+v_sub_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x08]
+
+v_sub_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x08]
+
+v_sub_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x08]
+
+v_sub_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x08,0x56,0x34,0x12,0xaf]
+
+v_sub_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x08,0x73,0x72,0x71,0x3f]
+
+v_sub_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x08]
+
+v_sub_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x08]
+
+v_sub_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x08]
+
+v_sub_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x08,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x08,0xd2,0xff,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xcf,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd1,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd3,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd5,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd7,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xd9,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xdb,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xdd,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xdf,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xf7,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xf9,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xfd,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xff,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xfb,0x01,0x00]
+
+v_sub_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x02,0x00]
+
+v_sub_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0xff,0x03,0x00]
+
+v_sub_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x20]
+
+v_sub_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x40]
+
+v_sub_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x60]
+
+v_sub_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x08,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x08,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x08,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x08,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x08]
+
+v_sub_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x10]
+
+v_sub_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x08,0xd2,0x01,0x05,0x00,0x18]
+
+v_subrev_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x0a]
+
+v_subrev_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x0b]
+
+v_subrev_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x0a]
+
+v_subrev_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x0a,0x56,0x34,0x12,0xaf]
+
+v_subrev_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x0a,0x73,0x72,0x71,0x3f]
+
+v_subrev_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x0a]
+
+v_subrev_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x0a]
+
+v_subrev_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x0a]
+
+v_subrev_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x0a,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0a,0xd2,0xff,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xcf,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd1,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd3,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd5,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd7,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xd9,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xdb,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xdd,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xdf,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xf7,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xf9,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xfd,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xff,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xfb,0x01,0x00]
+
+v_subrev_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x02,0x00]
+
+v_subrev_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0xff,0x03,0x00]
+
+v_subrev_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x20]
+
+v_subrev_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x40]
+
+v_subrev_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x60]
+
+v_subrev_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x0a,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x0a,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x0a,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x0a,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x08]
+
+v_subrev_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x10]
+
+v_subrev_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x0a,0xd2,0x01,0x05,0x00,0x18]
+
+v_mac_legacy_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x0d]
+
+v_mac_legacy_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x0c,0x56,0x34,0x12,0xaf]
+
+v_mac_legacy_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x0c,0x73,0x72,0x71,0x3f]
+
+v_mac_legacy_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x0c]
+
+v_mac_legacy_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x0c]
+
+v_mac_legacy_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x0c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0c,0xd2,0xff,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xcf,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd1,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd3,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd5,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd7,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xd9,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xdb,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xdd,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xdf,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xf7,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xf9,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xfd,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xff,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xfb,0x01,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x02,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0xff,0x03,0x00]
+
+v_mac_legacy_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x20]
+
+v_mac_legacy_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x40]
+
+v_mac_legacy_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x60]
+
+v_mac_legacy_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x0c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x0c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x0c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x0c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_legacy_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x08]
+
+v_mac_legacy_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x10]
+
+v_mac_legacy_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x0c,0xd2,0x01,0x05,0x00,0x18]
+
+v_mul_legacy_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x0f]
+
+v_mul_legacy_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x0e,0x56,0x34,0x12,0xaf]
+
+v_mul_legacy_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x0e,0x73,0x72,0x71,0x3f]
+
+v_mul_legacy_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x0e]
+
+v_mul_legacy_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x0e]
+
+v_mul_legacy_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x0e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0e,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xcf,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd1,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd3,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd5,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd7,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xd9,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xdb,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xdd,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xdf,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xf7,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xf9,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xfd,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xff,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xfb,0x01,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x02,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0xff,0x03,0x00]
+
+v_mul_legacy_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x20]
+
+v_mul_legacy_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x40]
+
+v_mul_legacy_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x60]
+
+v_mul_legacy_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x0e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x0e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x0e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x0e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x08]
+
+v_mul_legacy_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x10]
+
+v_mul_legacy_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x0e,0xd2,0x01,0x05,0x00,0x18]
+
+v_mul_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x10]
+
+v_mul_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x11]
+
+v_mul_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x10]
+
+v_mul_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x10]
+
+v_mul_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x10]
+
+v_mul_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x10]
+
+v_mul_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x10]
+
+v_mul_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x10]
+
+v_mul_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x10]
+
+v_mul_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x10]
+
+v_mul_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x10]
+
+v_mul_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x10]
+
+v_mul_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x10]
+
+v_mul_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x10]
+
+v_mul_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x10]
+
+v_mul_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x10]
+
+v_mul_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x10]
+
+v_mul_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x10]
+
+v_mul_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x10]
+
+v_mul_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x10,0x56,0x34,0x12,0xaf]
+
+v_mul_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x10,0x73,0x72,0x71,0x3f]
+
+v_mul_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x10]
+
+v_mul_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x10]
+
+v_mul_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x10]
+
+v_mul_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x10,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x10,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xcf,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd1,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd3,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd5,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd7,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xd9,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xdb,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xdd,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xdf,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xf7,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xf9,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xfd,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xff,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xfb,0x01,0x00]
+
+v_mul_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x02,0x00]
+
+v_mul_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0xff,0x03,0x00]
+
+v_mul_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x20]
+
+v_mul_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x40]
+
+v_mul_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x60]
+
+v_mul_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x10,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x10,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x10,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x10,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x08]
+
+v_mul_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x10]
+
+v_mul_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x10,0xd2,0x01,0x05,0x00,0x18]
+
+v_mul_i32_i24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x13]
+
+v_mul_i32_i24 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x12]
+
+v_mul_i32_i24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x12,0x56,0x34,0x12,0xaf]
+
+v_mul_i32_i24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x12,0x73,0x72,0x71,0x3f]
+
+v_mul_i32_i24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x12]
+
+v_mul_i32_i24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x12]
+
+v_mul_i32_i24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x12]
+
+v_mul_i32_i24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x12,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x12,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x12,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x12,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x12,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x12,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_i32_i24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x12,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_i32_i24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x15]
+
+v_mul_hi_i32_i24 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x14,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_i32_i24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x14,0x73,0x72,0x71,0x3f]
+
+v_mul_hi_i32_i24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x14]
+
+v_mul_hi_i32_i24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x14]
+
+v_mul_hi_i32_i24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x14,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x14,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x14,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x14,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x14,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x14,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x14,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_u32_u24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x17]
+
+v_mul_u32_u24 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x16]
+
+v_mul_u32_u24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x16,0x56,0x34,0x12,0xaf]
+
+v_mul_u32_u24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x16,0x73,0x72,0x71,0x3f]
+
+v_mul_u32_u24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x16]
+
+v_mul_u32_u24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x16]
+
+v_mul_u32_u24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x16]
+
+v_mul_u32_u24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x16,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x16,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x16,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x16,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x16,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x16,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_u32_u24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x16,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_u32_u24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x19]
+
+v_mul_hi_u32_u24 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x18,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_u32_u24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x18,0x73,0x72,0x71,0x3f]
+
+v_mul_hi_u32_u24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x18]
+
+v_mul_hi_u32_u24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x18]
+
+v_mul_hi_u32_u24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x18,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x18,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x18,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x18,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x18,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x18,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x18,0xd2,0x80,0xfe,0x03,0x00]
+
+v_min_legacy_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x1b]
+
+v_min_legacy_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x1a]
+
+v_min_legacy_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x1a,0x56,0x34,0x12,0xaf]
+
+v_min_legacy_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x1a,0x73,0x72,0x71,0x3f]
+
+v_min_legacy_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x1a]
+
+v_min_legacy_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x1a]
+
+v_min_legacy_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x1a]
+
+v_min_legacy_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x1a,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x1a,0xd2,0xff,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xcf,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd1,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd3,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd5,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd7,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xd9,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xdb,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xdd,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xdf,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xf7,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xf9,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xfd,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xff,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xfb,0x01,0x00]
+
+v_min_legacy_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x02,0x00]
+
+v_min_legacy_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0xff,0x03,0x00]
+
+v_min_legacy_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x20]
+
+v_min_legacy_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x40]
+
+v_min_legacy_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x60]
+
+v_min_legacy_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x1a,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x1a,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x1a,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x1a,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_legacy_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x08]
+
+v_min_legacy_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x10]
+
+v_min_legacy_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x1a,0xd2,0x01,0x05,0x00,0x18]
+
+v_max_legacy_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x1d]
+
+v_max_legacy_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x1c]
+
+v_max_legacy_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x1c,0x56,0x34,0x12,0xaf]
+
+v_max_legacy_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x1c,0x73,0x72,0x71,0x3f]
+
+v_max_legacy_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x1c]
+
+v_max_legacy_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x1c]
+
+v_max_legacy_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x1c]
+
+v_max_legacy_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x1c,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x1c,0xd2,0xff,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xcf,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd1,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd3,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd5,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd7,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xd9,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xdb,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xdd,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xdf,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xf7,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xf9,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xfd,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xff,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xfb,0x01,0x00]
+
+v_max_legacy_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x02,0x00]
+
+v_max_legacy_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0xff,0x03,0x00]
+
+v_max_legacy_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x20]
+
+v_max_legacy_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x40]
+
+v_max_legacy_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x60]
+
+v_max_legacy_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x1c,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x1c,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x1c,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x1c,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_legacy_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x08]
+
+v_max_legacy_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x10]
+
+v_max_legacy_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x1c,0xd2,0x01,0x05,0x00,0x18]
+
+v_min_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x1e]
+
+v_min_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x1f]
+
+v_min_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x1e]
+
+v_min_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x1e]
+
+v_min_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x1e]
+
+v_min_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x1e]
+
+v_min_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x1e]
+
+v_min_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x1e]
+
+v_min_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x1e]
+
+v_min_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x1e]
+
+v_min_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x1e]
+
+v_min_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x1e]
+
+v_min_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x1e]
+
+v_min_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x1e]
+
+v_min_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x1e]
+
+v_min_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x1e]
+
+v_min_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x1e]
+
+v_min_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x1e]
+
+v_min_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x1e]
+
+v_min_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x1e,0x56,0x34,0x12,0xaf]
+
+v_min_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x1e,0x73,0x72,0x71,0x3f]
+
+v_min_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x1e]
+
+v_min_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x1e]
+
+v_min_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x1e]
+
+v_min_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x1e,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x1e,0xd2,0xff,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xcf,0x00,0x00]
+
+v_min_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd1,0x00,0x00]
+
+v_min_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd3,0x00,0x00]
+
+v_min_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd5,0x00,0x00]
+
+v_min_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd7,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xd9,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xdb,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xdd,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xdf,0x00,0x00]
+
+v_min_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xf7,0x00,0x00]
+
+v_min_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xf9,0x00,0x00]
+
+v_min_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xfd,0x00,0x00]
+
+v_min_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xff,0x00,0x00]
+
+v_min_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xfb,0x01,0x00]
+
+v_min_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x02,0x00]
+
+v_min_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0xff,0x03,0x00]
+
+v_min_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x20]
+
+v_min_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x40]
+
+v_min_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x60]
+
+v_min_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x1e,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x1e,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x1e,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x1e,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x08]
+
+v_min_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x10]
+
+v_min_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x1e,0xd2,0x01,0x05,0x00,0x18]
+
+v_max_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x20]
+
+v_max_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x21]
+
+v_max_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x20]
+
+v_max_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x20]
+
+v_max_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x20]
+
+v_max_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x20]
+
+v_max_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x20]
+
+v_max_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x20]
+
+v_max_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x20]
+
+v_max_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x20]
+
+v_max_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x20]
+
+v_max_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x20]
+
+v_max_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x20]
+
+v_max_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x20]
+
+v_max_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x20]
+
+v_max_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x20]
+
+v_max_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x20]
+
+v_max_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x20]
+
+v_max_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x20]
+
+v_max_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x20,0x56,0x34,0x12,0xaf]
+
+v_max_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x20,0x73,0x72,0x71,0x3f]
+
+v_max_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x20]
+
+v_max_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x20]
+
+v_max_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x20]
+
+v_max_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x20,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x20,0xd2,0xff,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xcf,0x00,0x00]
+
+v_max_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd1,0x00,0x00]
+
+v_max_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd3,0x00,0x00]
+
+v_max_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd5,0x00,0x00]
+
+v_max_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd7,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xd9,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xdb,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xdd,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xdf,0x00,0x00]
+
+v_max_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xf7,0x00,0x00]
+
+v_max_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xf9,0x00,0x00]
+
+v_max_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xfd,0x00,0x00]
+
+v_max_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xff,0x00,0x00]
+
+v_max_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xfb,0x01,0x00]
+
+v_max_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x02,0x00]
+
+v_max_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0xff,0x03,0x00]
+
+v_max_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x20]
+
+v_max_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x40]
+
+v_max_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x60]
+
+v_max_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x20,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x20,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x20,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x20,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x08]
+
+v_max_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x10]
+
+v_max_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x20,0xd2,0x01,0x05,0x00,0x18]
+
+v_min_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x22]
+
+v_min_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x23]
+
+v_min_i32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x22]
+
+v_min_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x22]
+
+v_min_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x22]
+
+v_min_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x22]
+
+v_min_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x22]
+
+v_min_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x22]
+
+v_min_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x22]
+
+v_min_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x22]
+
+v_min_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x22]
+
+v_min_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x22]
+
+v_min_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x22]
+
+v_min_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x22]
+
+v_min_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x22]
+
+v_min_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x22]
+
+v_min_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x22]
+
+v_min_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x22]
+
+v_min_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x22]
+
+v_min_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x22,0x56,0x34,0x12,0xaf]
+
+v_min_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x22,0x73,0x72,0x71,0x3f]
+
+v_min_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x22]
+
+v_min_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x22]
+
+v_min_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x22]
+
+v_min_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0x04,0x00,0x00]
+
+v_min_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x22,0xd2,0x80,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x22,0xd2,0xc1,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x22,0xd2,0xf0,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x22,0xd2,0xf7,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x22,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x22,0xd2,0xff,0x05,0x00,0x00]
+
+v_min_i32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xce,0x00,0x00]
+
+v_min_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xd0,0x00,0x00]
+
+v_min_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xd2,0x00,0x00]
+
+v_min_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xd4,0x00,0x00]
+
+v_min_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xd6,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xd8,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xda,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xdc,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xde,0x00,0x00]
+
+v_min_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xf6,0x00,0x00]
+
+v_min_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xf8,0x00,0x00]
+
+v_min_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xfc,0x00,0x00]
+
+v_min_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xfe,0x00,0x00]
+
+v_min_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0x00,0x01,0x00]
+
+v_min_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0x82,0x01,0x00]
+
+v_min_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xe0,0x01,0x00]
+
+v_min_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xee,0x01,0x00]
+
+v_min_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0x04,0x02,0x00]
+
+v_min_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x22,0xd2,0x80,0xfe,0x03,0x00]
+
+v_max_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x24]
+
+v_max_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x25]
+
+v_max_i32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x24]
+
+v_max_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x24]
+
+v_max_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x24]
+
+v_max_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x24]
+
+v_max_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x24]
+
+v_max_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x24]
+
+v_max_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x24]
+
+v_max_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x24]
+
+v_max_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x24]
+
+v_max_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x24]
+
+v_max_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x24]
+
+v_max_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x24]
+
+v_max_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x24]
+
+v_max_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x24]
+
+v_max_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x24]
+
+v_max_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x24]
+
+v_max_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x24]
+
+v_max_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x24,0x56,0x34,0x12,0xaf]
+
+v_max_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x24,0x73,0x72,0x71,0x3f]
+
+v_max_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x24]
+
+v_max_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x24]
+
+v_max_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x24]
+
+v_max_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0x04,0x00,0x00]
+
+v_max_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x24,0xd2,0x80,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x24,0xd2,0xc1,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x24,0xd2,0xf0,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x24,0xd2,0xf7,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x24,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x24,0xd2,0xff,0x05,0x00,0x00]
+
+v_max_i32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xce,0x00,0x00]
+
+v_max_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xd0,0x00,0x00]
+
+v_max_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xd2,0x00,0x00]
+
+v_max_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xd4,0x00,0x00]
+
+v_max_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xd6,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xd8,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xda,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xdc,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xde,0x00,0x00]
+
+v_max_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xf6,0x00,0x00]
+
+v_max_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xf8,0x00,0x00]
+
+v_max_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xfc,0x00,0x00]
+
+v_max_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xfe,0x00,0x00]
+
+v_max_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0x00,0x01,0x00]
+
+v_max_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0x82,0x01,0x00]
+
+v_max_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xe0,0x01,0x00]
+
+v_max_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xee,0x01,0x00]
+
+v_max_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0x04,0x02,0x00]
+
+v_max_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x24,0xd2,0x80,0xfe,0x03,0x00]
+
+v_min_u32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x26]
+
+v_min_u32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x27]
+
+v_min_u32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x26]
+
+v_min_u32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x26]
+
+v_min_u32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x26]
+
+v_min_u32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x26]
+
+v_min_u32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x26]
+
+v_min_u32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x26]
+
+v_min_u32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x26]
+
+v_min_u32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x26]
+
+v_min_u32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x26]
+
+v_min_u32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x26]
+
+v_min_u32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x26]
+
+v_min_u32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x26]
+
+v_min_u32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x26]
+
+v_min_u32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x26]
+
+v_min_u32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x26]
+
+v_min_u32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x26]
+
+v_min_u32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x26]
+
+v_min_u32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x26,0x56,0x34,0x12,0xaf]
+
+v_min_u32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x26,0x73,0x72,0x71,0x3f]
+
+v_min_u32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x26]
+
+v_min_u32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x26]
+
+v_min_u32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x26]
+
+v_min_u32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0x04,0x00,0x00]
+
+v_min_u32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x26,0xd2,0x80,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x26,0xd2,0xc1,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x26,0xd2,0xf0,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x26,0xd2,0xf7,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x26,0xd2,0x01,0x05,0x00,0x00]
+
+v_min_u32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x26,0xd2,0xff,0x05,0x00,0x00]
+
+v_min_u32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xce,0x00,0x00]
+
+v_min_u32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xd0,0x00,0x00]
+
+v_min_u32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xd2,0x00,0x00]
+
+v_min_u32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xd4,0x00,0x00]
+
+v_min_u32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xd6,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xd8,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xda,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xdc,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xde,0x00,0x00]
+
+v_min_u32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xf6,0x00,0x00]
+
+v_min_u32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xf8,0x00,0x00]
+
+v_min_u32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xfc,0x00,0x00]
+
+v_min_u32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xfe,0x00,0x00]
+
+v_min_u32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0x00,0x01,0x00]
+
+v_min_u32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0x82,0x01,0x00]
+
+v_min_u32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xe0,0x01,0x00]
+
+v_min_u32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xee,0x01,0x00]
+
+v_min_u32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0x04,0x02,0x00]
+
+v_min_u32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x26,0xd2,0x80,0xfe,0x03,0x00]
+
+v_max_u32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x28]
+
+v_max_u32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x29]
+
+v_max_u32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x28]
+
+v_max_u32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x28]
+
+v_max_u32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x28]
+
+v_max_u32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x28]
+
+v_max_u32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x28]
+
+v_max_u32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x28]
+
+v_max_u32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x28]
+
+v_max_u32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x28]
+
+v_max_u32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x28]
+
+v_max_u32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x28]
+
+v_max_u32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x28]
+
+v_max_u32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x28]
+
+v_max_u32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x28]
+
+v_max_u32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x28]
+
+v_max_u32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x28]
+
+v_max_u32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x28]
+
+v_max_u32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x28]
+
+v_max_u32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x28,0x56,0x34,0x12,0xaf]
+
+v_max_u32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x28,0x73,0x72,0x71,0x3f]
+
+v_max_u32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x28]
+
+v_max_u32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x28]
+
+v_max_u32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x28]
+
+v_max_u32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0x04,0x00,0x00]
+
+v_max_u32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x28,0xd2,0x80,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x28,0xd2,0xc1,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x28,0xd2,0xf0,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x28,0xd2,0xf7,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x28,0xd2,0x01,0x05,0x00,0x00]
+
+v_max_u32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x28,0xd2,0xff,0x05,0x00,0x00]
+
+v_max_u32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xce,0x00,0x00]
+
+v_max_u32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xd0,0x00,0x00]
+
+v_max_u32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xd2,0x00,0x00]
+
+v_max_u32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xd4,0x00,0x00]
+
+v_max_u32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xd6,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xd8,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xda,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xdc,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xde,0x00,0x00]
+
+v_max_u32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xf6,0x00,0x00]
+
+v_max_u32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xf8,0x00,0x00]
+
+v_max_u32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xfc,0x00,0x00]
+
+v_max_u32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xfe,0x00,0x00]
+
+v_max_u32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0x00,0x01,0x00]
+
+v_max_u32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0x82,0x01,0x00]
+
+v_max_u32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xe0,0x01,0x00]
+
+v_max_u32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xee,0x01,0x00]
+
+v_max_u32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0x04,0x02,0x00]
+
+v_max_u32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x28,0xd2,0x80,0xfe,0x03,0x00]
+
+v_lshr_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x2a]
+
+v_lshr_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x2b]
+
+v_lshr_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x2a]
+
+v_lshr_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x2a,0x56,0x34,0x12,0xaf]
+
+v_lshr_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x2a,0x73,0x72,0x71,0x3f]
+
+v_lshr_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x2a]
+
+v_lshr_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x2a]
+
+v_lshr_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x2a]
+
+v_lshr_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshr_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2a,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshr_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2a,0xd2,0xc1,0x04,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2a,0xd2,0xf0,0x04,0x00,0x00]
+
+v_lshr_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2a,0xd2,0xf7,0x04,0x00,0x00]
+
+v_lshr_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2a,0xd2,0x01,0x05,0x00,0x00]
+
+v_lshr_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2a,0xd2,0xff,0x05,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xce,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xd0,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xd2,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xd6,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xda,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xde,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xf6,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xf8,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xfe,0x00,0x00]
+
+v_lshr_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshr_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshr_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshr_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshr_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshr_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2a,0xd2,0x80,0xfe,0x03,0x00]
+
+v_lshrrev_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x2d]
+
+v_lshrrev_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x2c]
+
+v_lshrrev_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x2c,0x56,0x34,0x12,0xaf]
+
+v_lshrrev_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x2c,0x73,0x72,0x71,0x3f]
+
+v_lshrrev_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x2c]
+
+v_lshrrev_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x2c]
+
+v_lshrrev_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x2c]
+
+v_lshrrev_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2c,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2c,0xd2,0xc1,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2c,0xd2,0xf0,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2c,0xd2,0xf7,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2c,0xd2,0x01,0x05,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2c,0xd2,0xff,0x05,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xce,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xd0,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xd2,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xd6,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xda,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xde,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xf6,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xf8,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xfe,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshrrev_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2c,0xd2,0x80,0xfe,0x03,0x00]
+
+v_ashr_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x2e]
+
+v_ashr_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x2f]
+
+v_ashr_i32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x2e]
+
+v_ashr_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x2e,0x56,0x34,0x12,0xaf]
+
+v_ashr_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x2e,0x73,0x72,0x71,0x3f]
+
+v_ashr_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x2e]
+
+v_ashr_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x2e]
+
+v_ashr_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x2e]
+
+v_ashr_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0x04,0x00,0x00]
+
+v_ashr_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2e,0xd2,0x80,0x04,0x00,0x00]
+
+v_ashr_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2e,0xd2,0xc1,0x04,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2e,0xd2,0xf0,0x04,0x00,0x00]
+
+v_ashr_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2e,0xd2,0xf7,0x04,0x00,0x00]
+
+v_ashr_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2e,0xd2,0x01,0x05,0x00,0x00]
+
+v_ashr_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2e,0xd2,0xff,0x05,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xce,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xd0,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xd2,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xd6,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xda,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xde,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xf6,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xf8,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xfe,0x00,0x00]
+
+v_ashr_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0x00,0x01,0x00]
+
+v_ashr_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0x82,0x01,0x00]
+
+v_ashr_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ashr_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xee,0x01,0x00]
+
+v_ashr_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0x04,0x02,0x00]
+
+v_ashr_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2e,0xd2,0x80,0xfe,0x03,0x00]
+
+v_ashrrev_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x31]
+
+v_ashrrev_i32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x30]
+
+v_ashrrev_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x30,0x56,0x34,0x12,0xaf]
+
+v_ashrrev_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x30,0x73,0x72,0x71,0x3f]
+
+v_ashrrev_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x30]
+
+v_ashrrev_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x30]
+
+v_ashrrev_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x30]
+
+v_ashrrev_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x30,0xd2,0x80,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x30,0xd2,0xc1,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x30,0xd2,0xf0,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x30,0xd2,0xf7,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x30,0xd2,0x01,0x05,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x30,0xd2,0xff,0x05,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xce,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xd0,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xd2,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xd6,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xda,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xde,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xf6,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xf8,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xfe,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0x00,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0x82,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xee,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0x04,0x02,0x00]
+
+v_ashrrev_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x30,0xd2,0x80,0xfe,0x03,0x00]
+
+v_lshl_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x32]
+
+v_lshl_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x33]
+
+v_lshl_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x32]
+
+v_lshl_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x32,0x56,0x34,0x12,0xaf]
+
+v_lshl_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x32,0x73,0x72,0x71,0x3f]
+
+v_lshl_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x32]
+
+v_lshl_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x32]
+
+v_lshl_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x32]
+
+v_lshl_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshl_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x32,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshl_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x32,0xd2,0xc1,0x04,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x32,0xd2,0xf0,0x04,0x00,0x00]
+
+v_lshl_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x32,0xd2,0xf7,0x04,0x00,0x00]
+
+v_lshl_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x32,0xd2,0x01,0x05,0x00,0x00]
+
+v_lshl_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x32,0xd2,0xff,0x05,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xce,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xd0,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xd2,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xd6,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xda,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xde,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xf6,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xf8,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xfe,0x00,0x00]
+
+v_lshl_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshl_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshl_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshl_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshl_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshl_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x32,0xd2,0x80,0xfe,0x03,0x00]
+
+v_lshlrev_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x35]
+
+v_lshlrev_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x34]
+
+v_lshlrev_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x34,0x56,0x34,0x12,0xaf]
+
+v_lshlrev_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x34,0x73,0x72,0x71,0x3f]
+
+v_lshlrev_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x34]
+
+v_lshlrev_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x34]
+
+v_lshlrev_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x34]
+
+v_lshlrev_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x34,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x34,0xd2,0xc1,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x34,0xd2,0xf0,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x34,0xd2,0xf7,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x34,0xd2,0x01,0x05,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x34,0xd2,0xff,0x05,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xce,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xd0,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xd2,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xd6,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xda,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xde,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xf6,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xf8,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xfe,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshlrev_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x34,0xd2,0x80,0xfe,0x03,0x00]
+
+v_and_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x36]
+
+v_and_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x37]
+
+v_and_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x36]
+
+v_and_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x36]
+
+v_and_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x36]
+
+v_and_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x36]
+
+v_and_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x36]
+
+v_and_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x36]
+
+v_and_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x36]
+
+v_and_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x36]
+
+v_and_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x36]
+
+v_and_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x36]
+
+v_and_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x36]
+
+v_and_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x36]
+
+v_and_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x36]
+
+v_and_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x36]
+
+v_and_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x36]
+
+v_and_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x36]
+
+v_and_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x36]
+
+v_and_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x36,0x56,0x34,0x12,0xaf]
+
+v_and_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x36,0x73,0x72,0x71,0x3f]
+
+v_and_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x36]
+
+v_and_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x36]
+
+v_and_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x36]
+
+v_and_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0x04,0x00,0x00]
+
+v_and_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x36,0xd2,0x80,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x36,0xd2,0xc1,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x36,0xd2,0xf0,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x36,0xd2,0xf7,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x36,0xd2,0x01,0x05,0x00,0x00]
+
+v_and_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x36,0xd2,0xff,0x05,0x00,0x00]
+
+v_and_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xce,0x00,0x00]
+
+v_and_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xd0,0x00,0x00]
+
+v_and_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xd2,0x00,0x00]
+
+v_and_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xd4,0x00,0x00]
+
+v_and_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xd6,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xd8,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xda,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xdc,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xde,0x00,0x00]
+
+v_and_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xf6,0x00,0x00]
+
+v_and_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xf8,0x00,0x00]
+
+v_and_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xfc,0x00,0x00]
+
+v_and_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xfe,0x00,0x00]
+
+v_and_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0x00,0x01,0x00]
+
+v_and_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0x82,0x01,0x00]
+
+v_and_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xe0,0x01,0x00]
+
+v_and_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xee,0x01,0x00]
+
+v_and_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0x04,0x02,0x00]
+
+v_and_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x36,0xd2,0x80,0xfe,0x03,0x00]
+
+v_or_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x38]
+
+v_or_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x39]
+
+v_or_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x38]
+
+v_or_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x38]
+
+v_or_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x38]
+
+v_or_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x38]
+
+v_or_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x38]
+
+v_or_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x38]
+
+v_or_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x38]
+
+v_or_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x38]
+
+v_or_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x38]
+
+v_or_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x38]
+
+v_or_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x38]
+
+v_or_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x38]
+
+v_or_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x38]
+
+v_or_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x38]
+
+v_or_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x38]
+
+v_or_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x38]
+
+v_or_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x38]
+
+v_or_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x38,0x56,0x34,0x12,0xaf]
+
+v_or_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x38,0x73,0x72,0x71,0x3f]
+
+v_or_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x38]
+
+v_or_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x38]
+
+v_or_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x38]
+
+v_or_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0x04,0x00,0x00]
+
+v_or_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x38,0xd2,0x80,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x38,0xd2,0xc1,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x38,0xd2,0xf0,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x38,0xd2,0xf7,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x38,0xd2,0x01,0x05,0x00,0x00]
+
+v_or_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x38,0xd2,0xff,0x05,0x00,0x00]
+
+v_or_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xce,0x00,0x00]
+
+v_or_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xd0,0x00,0x00]
+
+v_or_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xd2,0x00,0x00]
+
+v_or_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xd4,0x00,0x00]
+
+v_or_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xd6,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xd8,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xda,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xdc,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xde,0x00,0x00]
+
+v_or_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xf6,0x00,0x00]
+
+v_or_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xf8,0x00,0x00]
+
+v_or_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xfc,0x00,0x00]
+
+v_or_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xfe,0x00,0x00]
+
+v_or_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0x00,0x01,0x00]
+
+v_or_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0x82,0x01,0x00]
+
+v_or_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xe0,0x01,0x00]
+
+v_or_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xee,0x01,0x00]
+
+v_or_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0x04,0x02,0x00]
+
+v_or_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x38,0xd2,0x80,0xfe,0x03,0x00]
+
+v_xor_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x3a]
+
+v_xor_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x3b]
+
+v_xor_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x3a]
+
+v_xor_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x3a,0x56,0x34,0x12,0xaf]
+
+v_xor_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x3a,0x73,0x72,0x71,0x3f]
+
+v_xor_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x3a]
+
+v_xor_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x3a]
+
+v_xor_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x3a]
+
+v_xor_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0x04,0x00,0x00]
+
+v_xor_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x3a,0xd2,0x80,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x3a,0xd2,0xc1,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x3a,0xd2,0xf0,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x3a,0xd2,0xf7,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x3a,0xd2,0x01,0x05,0x00,0x00]
+
+v_xor_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x3a,0xd2,0xff,0x05,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xce,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xd0,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xd2,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xd4,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xd6,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xd8,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xda,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xdc,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xde,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xf6,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xf8,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xfc,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xfe,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0x00,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0x82,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xe0,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xee,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0x04,0x02,0x00]
+
+v_xor_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x3a,0xd2,0x80,0xfe,0x03,0x00]
+
+v_bfm_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x3c]
+
+v_bfm_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x3d]
+
+v_bfm_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x3c]
+
+v_bfm_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x3c,0x56,0x34,0x12,0xaf]
+
+v_bfm_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x3c,0x73,0x72,0x71,0x3f]
+
+v_bfm_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x3c]
+
+v_bfm_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x3c]
+
+v_bfm_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x3c]
+
+v_bfm_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0x04,0x00,0x00]
+
+v_bfm_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x3c,0xd2,0x80,0x04,0x00,0x00]
+
+v_bfm_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x3c,0xd2,0xc1,0x04,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x3c,0xd2,0xf0,0x04,0x00,0x00]
+
+v_bfm_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x3c,0xd2,0xf7,0x04,0x00,0x00]
+
+v_bfm_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x3c,0xd2,0x01,0x05,0x00,0x00]
+
+v_bfm_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x3c,0xd2,0xff,0x05,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xce,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xd0,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xd2,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xd4,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xd6,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xd8,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xda,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xdc,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xde,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xf6,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xf8,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xfc,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xfe,0x00,0x00]
+
+v_bfm_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0x00,0x01,0x00]
+
+v_bfm_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0x82,0x01,0x00]
+
+v_bfm_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xe0,0x01,0x00]
+
+v_bfm_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xee,0x01,0x00]
+
+v_bfm_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0x04,0x02,0x00]
+
+v_bfm_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x3c,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mac_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x3e]
+
+v_mac_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x3f]
+
+v_mac_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x3e]
+
+v_mac_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x3e,0x56,0x34,0x12,0xaf]
+
+v_mac_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x3e,0x73,0x72,0x71,0x3f]
+
+v_mac_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x3e]
+
+v_mac_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x3e]
+
+v_mac_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x3e]
+
+v_mac_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x3e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x3e,0xd2,0xff,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xcf,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd1,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd3,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd5,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd7,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xd9,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xdb,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xdd,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xdf,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xf7,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xf9,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xfd,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xff,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xfb,0x01,0x00]
+
+v_mac_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x02,0x00]
+
+v_mac_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0xff,0x03,0x00]
+
+v_mac_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x20]
+
+v_mac_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x40]
+
+v_mac_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x60]
+
+v_mac_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x3e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x3e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x3e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x08,0x3e,0xd2,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x08]
+
+v_mac_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x10]
+
+v_mac_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x3e,0xd2,0x01,0x05,0x00,0x18]
+
+v_madmk_f32 v5, 0, 0x11213141, v3
+// CHECK: [0x80,0x06,0x0a,0x40,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v255, 0, 0x11213141, v3
+// CHECK: [0x80,0x06,0xfe,0x41,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, -1, 0x11213141, v3
+// CHECK: [0xc1,0x06,0x0a,0x40,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, 0.5, 0x11213141, v3
+// CHECK: [0xf0,0x06,0x0a,0x40,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, -4.0, 0x11213141, v3
+// CHECK: [0xf7,0x06,0x0a,0x40,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, v1, 0x11213141, v3
+// CHECK: [0x01,0x07,0x0a,0x40,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, v255, 0x11213141, v3
+// CHECK: [0xff,0x07,0x0a,0x40,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, 0, 0xa1b1c1d1, v3
+// CHECK: [0x80,0x06,0x0a,0x40,0xd1,0xc1,0xb1,0xa1]
+
+v_madmk_f32 v5, 0, 0x11213141, v255
+// CHECK: [0x80,0xfe,0x0b,0x40,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0, v2, 0x11213141
+// CHECK: [0x80,0x04,0x0a,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v255, 0, v2, 0x11213141
+// CHECK: [0x80,0x04,0xfe,0x43,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, -1, v2, 0x11213141
+// CHECK: [0xc1,0x04,0x0a,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0.5, v2, 0x11213141
+// CHECK: [0xf0,0x04,0x0a,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, -4.0, v2, 0x11213141
+// CHECK: [0xf7,0x04,0x0a,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, v1, v2, 0x11213141
+// CHECK: [0x01,0x05,0x0a,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, v255, v2, 0x11213141
+// CHECK: [0xff,0x05,0x0a,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0, v255, 0x11213141
+// CHECK: [0x80,0xfe,0x0b,0x42,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0, v2, 0xa1b1c1d1
+// CHECK: [0x80,0x04,0x0a,0x42,0xd1,0xc1,0xb1,0xa1]
+
+v_bcnt_u32_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x45]
+
+v_bcnt_u32_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x44,0x56,0x34,0x12,0xaf]
+
+v_bcnt_u32_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x44,0x73,0x72,0x71,0x3f]
+
+v_bcnt_u32_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x44]
+
+v_bcnt_u32_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x44]
+
+v_bcnt_u32_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0x04,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x44,0xd2,0x80,0x04,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x44,0xd2,0xc1,0x04,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x44,0xd2,0xf0,0x04,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x44,0xd2,0xf7,0x04,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x44,0xd2,0x01,0x05,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x44,0xd2,0xff,0x05,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xce,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xd0,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xd2,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xd4,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xd6,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xd8,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xda,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xdc,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xde,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xf6,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xf8,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xfc,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xfe,0x00,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0x00,0x01,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0x82,0x01,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xe0,0x01,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xee,0x01,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0x04,0x02,0x00]
+
+v_bcnt_u32_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x44,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mbcnt_lo_u32_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x47]
+
+v_mbcnt_lo_u32_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x46,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_lo_u32_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x46,0x73,0x72,0x71,0x3f]
+
+v_mbcnt_lo_u32_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x46]
+
+v_mbcnt_lo_u32_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x46]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x46,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x46,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x46,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x46,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x46,0xd2,0x01,0x05,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x46,0xd2,0xff,0x05,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xce,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xda,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xde,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0x00,0x01,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0x82,0x01,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xee,0x01,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0x04,0x02,0x00]
+
+v_mbcnt_lo_u32_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x46,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mbcnt_hi_u32_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x49]
+
+v_mbcnt_hi_u32_b32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x48,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_hi_u32_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x48,0x73,0x72,0x71,0x3f]
+
+v_mbcnt_hi_u32_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x48]
+
+v_mbcnt_hi_u32_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x48]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x48,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x48,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x48,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x48,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x48,0xd2,0x01,0x05,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x48,0xd2,0xff,0x05,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xce,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xda,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xde,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0x00,0x01,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0x82,0x01,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xee,0x01,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0x04,0x02,0x00]
+
+v_mbcnt_hi_u32_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x48,0xd2,0x80,0xfe,0x03,0x00]
+
+v_add_i32 v5, vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x4a]
+
+v_add_i32 v255, vcc, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x4b]
+
+v_add_i32 v5, vcc, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x4a]
+
+v_add_i32 v5, vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x4a,0x56,0x34,0x12,0xaf]
+
+v_add_i32 v5, vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x4a,0x73,0x72,0x71,0x3f]
+
+v_add_i32 v5, vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x4a]
+
+v_add_i32 v5, vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x4a]
+
+v_add_i32 v5, vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x4a]
+
+v_add_i32_e64 v5, s[12:13], 0, s2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v255, s[12:13], 0, s2
+// CHECK: [0xff,0x0c,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, s[14:15], 0, s2
+// CHECK: [0x05,0x0e,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, s[102:103], 0, s2
+// CHECK: [0x05,0x66,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, flat_scratch, 0, s2
+// CHECK: [0x05,0x68,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, vcc, 0, s2
+// CHECK: [0x05,0x6a,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, tba, 0, s2
+// CHECK: [0x05,0x6c,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, tma, 0, s2
+// CHECK: [0x05,0x6e,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, ttmp[10:11], 0, s2
+// CHECK: [0x05,0x7a,0x4a,0xd2,0x80,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], -1, s2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0xc1,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0.5, s2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0xf0,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], -4.0, s2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0xf7,0x04,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], v1, s2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x01,0x05,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], v255, s2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0xff,0x05,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, s103
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xce,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, flat_scratch_lo
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xd0,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, flat_scratch_hi
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xd2,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, vcc_lo
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xd4,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, vcc_hi
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xd6,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, tba_lo
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xd8,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, tba_hi
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xda,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, tma_lo
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xdc,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, tma_hi
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xde,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, ttmp11
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xf6,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, m0
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xf8,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, exec_lo
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xfc,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, exec_hi
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xfe,0x00,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, 0
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0x00,0x01,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, -1
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0x82,0x01,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, 0.5
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xe0,0x01,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, -4.0
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xee,0x01,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, v2
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0x04,0x02,0x00]
+
+v_add_i32_e64 v5, s[12:13], 0, v255
+// CHECK: [0x05,0x0c,0x4a,0xd2,0x80,0xfe,0x03,0x00]
+
+v_sub_i32 v5, vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x4c]
+
+v_sub_i32 v255, vcc, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x4d]
+
+v_sub_i32 v5, vcc, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x4c,0x56,0x34,0x12,0xaf]
+
+v_sub_i32 v5, vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x4c,0x73,0x72,0x71,0x3f]
+
+v_sub_i32 v5, vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x4c]
+
+v_sub_i32 v5, vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x4c]
+
+v_sub_i32_e64 v5, s[12:13], 0, s2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v255, s[12:13], 0, s2
+// CHECK: [0xff,0x0c,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, s[14:15], 0, s2
+// CHECK: [0x05,0x0e,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, s[102:103], 0, s2
+// CHECK: [0x05,0x66,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, flat_scratch, 0, s2
+// CHECK: [0x05,0x68,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, vcc, 0, s2
+// CHECK: [0x05,0x6a,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, tba, 0, s2
+// CHECK: [0x05,0x6c,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, tma, 0, s2
+// CHECK: [0x05,0x6e,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, ttmp[10:11], 0, s2
+// CHECK: [0x05,0x7a,0x4c,0xd2,0x80,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], -1, s2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0xc1,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0.5, s2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0xf0,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], -4.0, s2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0xf7,0x04,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], v1, s2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x01,0x05,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], v255, s2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0xff,0x05,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, s103
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xce,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, flat_scratch_lo
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xd0,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, flat_scratch_hi
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xd2,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, vcc_lo
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xd4,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, vcc_hi
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xd6,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, tba_lo
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xd8,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, tba_hi
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xda,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, tma_lo
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xdc,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, tma_hi
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xde,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, ttmp11
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xf6,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, m0
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xf8,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, exec_lo
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xfc,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, exec_hi
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xfe,0x00,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, 0
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0x00,0x01,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, -1
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0x82,0x01,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, 0.5
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xe0,0x01,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, -4.0
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xee,0x01,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, v2
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0x04,0x02,0x00]
+
+v_sub_i32_e64 v5, s[12:13], 0, v255
+// CHECK: [0x05,0x0c,0x4c,0xd2,0x80,0xfe,0x03,0x00]
+
+v_subrev_i32 v5, vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x4e]
+
+v_subrev_i32 v255, vcc, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x4f]
+
+v_subrev_i32 v5, vcc, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x4e,0x56,0x34,0x12,0xaf]
+
+v_subrev_i32 v5, vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x4e,0x73,0x72,0x71,0x3f]
+
+v_subrev_i32 v5, vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x4e]
+
+v_subrev_i32 v5, vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x4e]
+
+v_subrev_i32_e64 v5, s[12:13], 0, s2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v255, s[12:13], 0, s2
+// CHECK: [0xff,0x0c,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[14:15], 0, s2
+// CHECK: [0x05,0x0e,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[102:103], 0, s2
+// CHECK: [0x05,0x66,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, flat_scratch, 0, s2
+// CHECK: [0x05,0x68,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, vcc, 0, s2
+// CHECK: [0x05,0x6a,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, tba, 0, s2
+// CHECK: [0x05,0x6c,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, tma, 0, s2
+// CHECK: [0x05,0x6e,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, ttmp[10:11], 0, s2
+// CHECK: [0x05,0x7a,0x4e,0xd2,0x80,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], -1, s2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0xc1,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0.5, s2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0xf0,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], -4.0, s2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0xf7,0x04,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], v1, s2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x01,0x05,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], v255, s2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0xff,0x05,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, s103
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xce,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, flat_scratch_lo
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xd0,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, flat_scratch_hi
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xd2,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, vcc_lo
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xd4,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, vcc_hi
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xd6,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, tba_lo
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xd8,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, tba_hi
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xda,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, tma_lo
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xdc,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, tma_hi
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xde,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, ttmp11
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xf6,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, m0
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xf8,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, exec_lo
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xfc,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, exec_hi
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xfe,0x00,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, 0
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0x00,0x01,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, -1
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0x82,0x01,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, 0.5
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xe0,0x01,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, -4.0
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xee,0x01,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, v2
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0x04,0x02,0x00]
+
+v_subrev_i32_e64 v5, s[12:13], 0, v255
+// CHECK: [0x05,0x0c,0x4e,0xd2,0x80,0xfe,0x03,0x00]
+
+v_addc_u32 v5, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x50]
+
+v_addc_u32 v255, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x51]
+
+v_addc_u32 v5, vcc, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x50]
+
+v_addc_u32 v5, vcc, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x50]
+
+v_addc_u32 v5, vcc, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x50]
+
+v_addc_u32 v5, vcc, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x50]
+
+v_addc_u32 v5, vcc, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x50]
+
+v_addc_u32 v5, vcc, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x50]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v255, s[12:13], 0, 0, s[6:7]
+// CHECK: [0xff,0x0c,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[14:15], 0, 0, s[6:7]
+// CHECK: [0x05,0x0e,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[102:103], 0, 0, s[6:7]
+// CHECK: [0x05,0x66,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, flat_scratch, 0, 0, s[6:7]
+// CHECK: [0x05,0x68,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, vcc, 0, 0, s[6:7]
+// CHECK: [0x05,0x6a,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, tba, 0, 0, s[6:7]
+// CHECK: [0x05,0x6c,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, tma, 0, 0, s[6:7]
+// CHECK: [0x05,0x6e,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, ttmp[10:11], 0, 0, s[6:7]
+// CHECK: [0x05,0x7a,0x50,0xd2,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], -1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0xc1,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0.5, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0xf0,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], -4.0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0xf7,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], v1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x01,0x01,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], v255, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0xff,0x01,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, -1, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x82,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0.5, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0xe0,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, -4.0, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0xee,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, v2, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x04,0x1a,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, v255, s[6:7]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0xfe,0x1b,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, s[8:9]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0x21,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, s[102:103]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0x99,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, flat_scratch
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0xa1,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, vcc
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0xa9,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, tba
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0xb1,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, tma
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0xb9,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x0c,0x50,0xd2,0x80,0x00,0xe9,0x01]
+
+v_subb_u32 v5, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x52]
+
+v_subb_u32 v255, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x53]
+
+v_subb_u32 v5, vcc, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x52]
+
+v_subb_u32 v5, vcc, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x52]
+
+v_subb_u32 v5, vcc, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x52]
+
+v_subb_u32 v5, vcc, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x52]
+
+v_subb_u32 v5, vcc, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x52]
+
+v_subb_u32 v5, vcc, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x52]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v255, s[12:13], 0, 0, s[6:7]
+// CHECK: [0xff,0x0c,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[14:15], 0, 0, s[6:7]
+// CHECK: [0x05,0x0e,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[102:103], 0, 0, s[6:7]
+// CHECK: [0x05,0x66,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, flat_scratch, 0, 0, s[6:7]
+// CHECK: [0x05,0x68,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, vcc, 0, 0, s[6:7]
+// CHECK: [0x05,0x6a,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, tba, 0, 0, s[6:7]
+// CHECK: [0x05,0x6c,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, tma, 0, 0, s[6:7]
+// CHECK: [0x05,0x6e,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, ttmp[10:11], 0, 0, s[6:7]
+// CHECK: [0x05,0x7a,0x52,0xd2,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], -1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0xc1,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0.5, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0xf0,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], -4.0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0xf7,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], v1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x01,0x01,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], v255, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0xff,0x01,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, -1, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x82,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0.5, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0xe0,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, -4.0, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0xee,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, v2, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x04,0x1a,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, v255, s[6:7]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0xfe,0x1b,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, s[8:9]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0x21,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, s[102:103]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0x99,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, flat_scratch
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0xa1,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, vcc
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0xa9,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, tba
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0xb1,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, tma
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0xb9,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x0c,0x52,0xd2,0x80,0x00,0xe9,0x01]
+
+v_subbrev_u32 v5, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x54]
+
+v_subbrev_u32 v255, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x55]
+
+v_subbrev_u32 v5, vcc, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x54]
+
+v_subbrev_u32 v5, vcc, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x54]
+
+v_subbrev_u32 v5, vcc, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x54]
+
+v_subbrev_u32 v5, vcc, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x54]
+
+v_subbrev_u32 v5, vcc, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x54]
+
+v_subbrev_u32 v5, vcc, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x54]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v255, s[12:13], 0, 0, s[6:7]
+// CHECK: [0xff,0x0c,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[14:15], 0, 0, s[6:7]
+// CHECK: [0x05,0x0e,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[102:103], 0, 0, s[6:7]
+// CHECK: [0x05,0x66,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, flat_scratch, 0, 0, s[6:7]
+// CHECK: [0x05,0x68,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, vcc, 0, 0, s[6:7]
+// CHECK: [0x05,0x6a,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, tba, 0, 0, s[6:7]
+// CHECK: [0x05,0x6c,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, tma, 0, 0, s[6:7]
+// CHECK: [0x05,0x6e,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, ttmp[10:11], 0, 0, s[6:7]
+// CHECK: [0x05,0x7a,0x54,0xd2,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], -1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0xc1,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0.5, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0xf0,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], -4.0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0xf7,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], v1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x01,0x01,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], v255, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0xff,0x01,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, -1, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x82,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0.5, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0xe0,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, -4.0, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0xee,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, v2, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x04,0x1a,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, v255, s[6:7]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0xfe,0x1b,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, s[8:9]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0x21,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, s[102:103]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0x99,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, flat_scratch
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0xa1,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, vcc
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0xa9,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, tba
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0xb1,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, tma
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0xb9,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x0c,0x54,0xd2,0x80,0x00,0xe9,0x01]
+
+v_ldexp_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x56]
+
+v_ldexp_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x57]
+
+v_ldexp_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x56]
+
+v_ldexp_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x56,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x56,0x73,0x72,0x71,0x3f]
+
+v_ldexp_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x56]
+
+v_ldexp_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x56]
+
+v_ldexp_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x56]
+
+v_ldexp_f32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x56,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x56,0xd2,0xf0,0x04,0x00,0x00]
+
+v_ldexp_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x56,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x56,0xd2,0xff,0x05,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xce,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xd0,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xd2,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xd6,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xda,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xde,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xf6,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xf8,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xfe,0x00,0x00]
+
+v_ldexp_f32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0x00,0x01,0x00]
+
+v_ldexp_f32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0x82,0x01,0x00]
+
+v_ldexp_f32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ldexp_f32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xee,0x01,0x00]
+
+v_ldexp_f32_e64 v5, 0, scc
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xfa,0x01,0x00]
+
+v_ldexp_f32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0x04,0x02,0x00]
+
+v_ldexp_f32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x56,0xd2,0x80,0xfe,0x03,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x59]
+
+v_cvt_pkaccum_u8_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x58,0x56,0x34,0x12,0xaf]
+
+v_cvt_pkaccum_u8_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x58,0x73,0x72,0x71,0x3f]
+
+v_cvt_pkaccum_u8_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x58]
+
+v_cvt_pkaccum_u8_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x58]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x58,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x58,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd1,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd3,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, 0
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x01,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, -1
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x83,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, 0.5
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xe1,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, -4.0
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xef,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pkaccum_u8_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x58,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pkaccum_u8_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x58,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x5b]
+
+v_cvt_pknorm_i16_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x5a,0x56,0x34,0x12,0xaf]
+
+v_cvt_pknorm_i16_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x5a,0x73,0x72,0x71,0x3f]
+
+v_cvt_pknorm_i16_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x5a]
+
+v_cvt_pknorm_i16_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x5a]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x5a,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x5a,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd1,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd3,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x40]
+
+v_cvt_pknorm_i16_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x5a,0xd2,0x01,0x05,0x00,0x60]
+
+v_cvt_pknorm_i16_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x5a,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x5a,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x5a,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x5d]
+
+v_cvt_pknorm_u16_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x5c,0x56,0x34,0x12,0xaf]
+
+v_cvt_pknorm_u16_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x5c,0x73,0x72,0x71,0x3f]
+
+v_cvt_pknorm_u16_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x5c]
+
+v_cvt_pknorm_u16_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x5c]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x5c,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x5c,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd1,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd3,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x40]
+
+v_cvt_pknorm_u16_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x5c,0xd2,0x01,0x05,0x00,0x60]
+
+v_cvt_pknorm_u16_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x5c,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x5c,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x5c,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x5f]
+
+v_cvt_pkrtz_f16_f32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x5e,0x56,0x34,0x12,0xaf]
+
+v_cvt_pkrtz_f16_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x5e,0x73,0x72,0x71,0x3f]
+
+v_cvt_pkrtz_f16_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x5e]
+
+v_cvt_pkrtz_f16_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x5e]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x5e,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x5e,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, s103
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd1,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd3,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x40]
+
+v_cvt_pkrtz_f16_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x5e,0xd2,0x01,0x05,0x00,0x60]
+
+v_cvt_pkrtz_f16_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x5e,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x5e,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x5e,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x61]
+
+v_cvt_pk_u16_u32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_u32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x60,0x73,0x72,0x71,0x3f]
+
+v_cvt_pk_u16_u32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x60]
+
+v_cvt_pk_u16_u32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x60]
+
+v_cvt_pk_u16_u32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x60,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x60,0xd2,0xc1,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x60,0xd2,0xf0,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x60,0xd2,0xf7,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x60,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x60,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xce,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xd0,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xd2,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xd4,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xd6,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xd8,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xda,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xdc,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xde,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xf6,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xf8,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xfc,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0x00,0x01,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0x82,0x01,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xe0,0x01,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xee,0x01,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0x04,0x02,0x00]
+
+v_cvt_pk_u16_u32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x60,0xd2,0x80,0xfe,0x03,0x00]
+
+v_cvt_pk_i16_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x63]
+
+v_cvt_pk_i16_i32 v5, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x62,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x62,0x73,0x72,0x71,0x3f]
+
+v_cvt_pk_i16_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x62]
+
+v_cvt_pk_i16_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x62]
+
+v_cvt_pk_i16_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x62,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x62,0xd2,0xc1,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x62,0xd2,0xf0,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x62,0xd2,0xf7,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x62,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x62,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, s103
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xce,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xd0,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xd2,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xd4,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xd6,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xd8,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xda,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xdc,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xde,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xf6,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xf8,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xfc,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xfe,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0x00,0x01,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0x82,0x01,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xe0,0x01,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xee,0x01,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0x04,0x02,0x00]
+
+v_cvt_pk_i16_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x62,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mad_legacy_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x67,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x68,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x69,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x05,0x0e,0x04]
+
+v_mad_legacy_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0xff,0x05,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0xfe,0x07]
+
+v_mad_legacy_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x24]
+
+v_mad_legacy_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x44]
+
+v_mad_legacy_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x84]
+
+v_mad_legacy_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_mad_legacy_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x80,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_mad_legacy_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x14]
+
+v_mad_legacy_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_mad_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x67,0x04,0x0e,0x04]
+
+v_mad_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x68,0x04,0x0e,0x04]
+
+v_mad_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x69,0x04,0x0e,0x04]
+
+v_mad_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_mad_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_mad_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_mad_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_mad_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_mad_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_mad_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_mad_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x05,0x0e,0x04]
+
+v_mad_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0xff,0x05,0x0e,0x04]
+
+v_mad_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_mad_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0xfe,0x07]
+
+v_mad_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x24]
+
+v_mad_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x44]
+
+v_mad_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x84]
+
+v_mad_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_mad_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x82,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_mad_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x14]
+
+v_mad_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_mad_i32_i24 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0x01,0x02]
+
+v_mad_i32_i24 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x84,0xd2,0x01,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x67,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x68,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x69,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x6a,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x6b,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x6c,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x6d,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x6e,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x6f,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x7b,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x7c,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x7e,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x7f,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0xc1,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0xf0,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0xf7,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x01,0x01,0x02]
+
+v_mad_i32_i24 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0xff,0x01,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x82,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0xe0,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0xee,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x04,0x02,0x02]
+
+v_mad_i32_i24 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0xfe,0x03,0x02]
+
+v_mad_i32_i24 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0x05,0x03]
+
+v_mad_i32_i24 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0xc1,0x03]
+
+v_mad_i32_i24 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0xdd,0x03]
+
+v_mad_i32_i24 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0x0d,0x04]
+
+v_mad_i32_i24 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x00,0xfd,0x07]
+
+v_mad_u32_u24 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0x01,0x02]
+
+v_mad_u32_u24 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x86,0xd2,0x01,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x67,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x68,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x69,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x6a,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x6b,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x6c,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x6d,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x6e,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x6f,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x7b,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x7c,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x7e,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x7f,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0xc1,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0xf0,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0xf7,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x01,0x01,0x02]
+
+v_mad_u32_u24 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0xff,0x01,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x82,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0xe0,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0xee,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x04,0x02,0x02]
+
+v_mad_u32_u24 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0xfe,0x03,0x02]
+
+v_mad_u32_u24 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0x05,0x03]
+
+v_mad_u32_u24 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0xc1,0x03]
+
+v_mad_u32_u24 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0xdd,0x03]
+
+v_mad_u32_u24 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0x0d,0x04]
+
+v_mad_u32_u24 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x00,0xfd,0x07]
+
+v_cubeid_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x67,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x68,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x69,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x0e,0x04]
+
+v_cubeid_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0xff,0x05,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_cubeid_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0xfe,0x07]
+
+v_cubeid_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x24]
+
+v_cubeid_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x44]
+
+v_cubeid_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x84]
+
+v_cubeid_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_cubeid_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x88,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_cubeid_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x14]
+
+v_cubeid_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_cubesc_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x67,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x68,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x69,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x05,0x0e,0x04]
+
+v_cubesc_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0xff,0x05,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_cubesc_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0xfe,0x07]
+
+v_cubesc_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x24]
+
+v_cubesc_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x44]
+
+v_cubesc_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x84]
+
+v_cubesc_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_cubesc_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x8a,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_cubesc_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x14]
+
+v_cubesc_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_cubetc_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x67,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x68,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x69,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x05,0x0e,0x04]
+
+v_cubetc_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0xff,0x05,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_cubetc_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0xfe,0x07]
+
+v_cubetc_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x24]
+
+v_cubetc_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x44]
+
+v_cubetc_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x84]
+
+v_cubetc_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_cubetc_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x8c,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_cubetc_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x14]
+
+v_cubetc_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_cubema_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x67,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x68,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x69,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x05,0x0e,0x04]
+
+v_cubema_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0xff,0x05,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_cubema_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0xfe,0x07]
+
+v_cubema_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x24]
+
+v_cubema_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x44]
+
+v_cubema_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x84]
+
+v_cubema_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_cubema_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x8e,0xd2,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_cubema_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x14]
+
+v_cubema_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x8e,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_bfe_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0x01,0x02]
+
+v_bfe_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x90,0xd2,0x01,0x00,0x01,0x02]
+
+v_bfe_u32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x67,0x00,0x01,0x02]
+
+v_bfe_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x68,0x00,0x01,0x02]
+
+v_bfe_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x69,0x00,0x01,0x02]
+
+v_bfe_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x6a,0x00,0x01,0x02]
+
+v_bfe_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x6b,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x6c,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x6d,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x6e,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x6f,0x00,0x01,0x02]
+
+v_bfe_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x7b,0x00,0x01,0x02]
+
+v_bfe_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x7c,0x00,0x01,0x02]
+
+v_bfe_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x7e,0x00,0x01,0x02]
+
+v_bfe_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x7f,0x00,0x01,0x02]
+
+v_bfe_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0x00,0x01,0x02]
+
+v_bfe_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0xc1,0x00,0x01,0x02]
+
+v_bfe_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0xf0,0x00,0x01,0x02]
+
+v_bfe_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0xf7,0x00,0x01,0x02]
+
+v_bfe_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x01,0x01,0x02]
+
+v_bfe_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0xff,0x01,0x01,0x02]
+
+v_bfe_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x82,0x01,0x02]
+
+v_bfe_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0xe0,0x01,0x02]
+
+v_bfe_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0xee,0x01,0x02]
+
+v_bfe_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x04,0x02,0x02]
+
+v_bfe_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0xfe,0x03,0x02]
+
+v_bfe_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0x05,0x03]
+
+v_bfe_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0xc1,0x03]
+
+v_bfe_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0xdd,0x03]
+
+v_bfe_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0x0d,0x04]
+
+v_bfe_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x00,0xfd,0x07]
+
+v_bfe_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x00,0x01,0x02]
+
+v_bfe_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x92,0xd2,0x01,0x00,0x01,0x02]
+
+v_bfe_i32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x67,0x00,0x01,0x02]
+
+v_bfe_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x68,0x00,0x01,0x02]
+
+v_bfe_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x69,0x00,0x01,0x02]
+
+v_bfe_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x6a,0x00,0x01,0x02]
+
+v_bfe_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x6b,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x6c,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x6d,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x6e,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x6f,0x00,0x01,0x02]
+
+v_bfe_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x7b,0x00,0x01,0x02]
+
+v_bfe_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x7c,0x00,0x01,0x02]
+
+v_bfe_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x7e,0x00,0x01,0x02]
+
+v_bfe_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x7f,0x00,0x01,0x02]
+
+v_bfe_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x00,0x01,0x02]
+
+v_bfe_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0xc1,0x00,0x01,0x02]
+
+v_bfe_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0xf0,0x00,0x01,0x02]
+
+v_bfe_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0xf7,0x00,0x01,0x02]
+
+v_bfe_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x01,0x01,0x02]
+
+v_bfe_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0xff,0x01,0x01,0x02]
+
+v_bfe_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x82,0x01,0x02]
+
+v_bfe_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0xe0,0x01,0x02]
+
+v_bfe_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0xee,0x01,0x02]
+
+v_bfe_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x04,0x02,0x02]
+
+v_bfe_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0xfe,0x03,0x02]
+
+v_bfe_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x00,0x05,0x03]
+
+v_bfe_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x00,0xc1,0x03]
+
+v_bfe_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x00,0xdd,0x03]
+
+v_bfe_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x00,0x0d,0x04]
+
+v_bfe_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x00,0xfd,0x07]
+
+v_bfi_b32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0x01,0x02]
+
+v_bfi_b32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x94,0xd2,0x01,0x00,0x01,0x02]
+
+v_bfi_b32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x67,0x00,0x01,0x02]
+
+v_bfi_b32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x68,0x00,0x01,0x02]
+
+v_bfi_b32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x69,0x00,0x01,0x02]
+
+v_bfi_b32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x6a,0x00,0x01,0x02]
+
+v_bfi_b32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x6b,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x6c,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x6d,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x6e,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x6f,0x00,0x01,0x02]
+
+v_bfi_b32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x7b,0x00,0x01,0x02]
+
+v_bfi_b32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x7c,0x00,0x01,0x02]
+
+v_bfi_b32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x7e,0x00,0x01,0x02]
+
+v_bfi_b32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x7f,0x00,0x01,0x02]
+
+v_bfi_b32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x80,0x00,0x01,0x02]
+
+v_bfi_b32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0xc1,0x00,0x01,0x02]
+
+v_bfi_b32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0xf0,0x00,0x01,0x02]
+
+v_bfi_b32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0xf7,0x00,0x01,0x02]
+
+v_bfi_b32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x01,0x01,0x02]
+
+v_bfi_b32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0xff,0x01,0x01,0x02]
+
+v_bfi_b32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x82,0x01,0x02]
+
+v_bfi_b32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xe0,0x01,0x02]
+
+v_bfi_b32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xee,0x01,0x02]
+
+v_bfi_b32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x04,0x02,0x02]
+
+v_bfi_b32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xfe,0x03,0x02]
+
+v_bfi_b32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0x05,0x03]
+
+v_bfi_b32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0xc1,0x03]
+
+v_bfi_b32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0xdd,0x03]
+
+v_bfi_b32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0x0d,0x04]
+
+v_bfi_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x00,0xfd,0x07]
+
+v_fma_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x67,0x04,0x0e,0x04]
+
+v_fma_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x68,0x04,0x0e,0x04]
+
+v_fma_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x69,0x04,0x0e,0x04]
+
+v_fma_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_fma_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_fma_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_fma_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_fma_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_fma_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_fma_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_fma_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x0e,0x04]
+
+v_fma_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0xff,0x05,0x0e,0x04]
+
+v_fma_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_fma_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0xfe,0x07]
+
+v_fma_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x24]
+
+v_fma_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x44]
+
+v_fma_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x84]
+
+v_fma_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_fma_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0x96,0xd2,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_fma_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x14]
+
+v_fma_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[254:255], s[2:3], v[2:3], v[3:4]
+// CHECK: [0xfe,0x00,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[4:5], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x04,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[102:103], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x66,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], flat_scratch, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x68,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], vcc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], tba, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], tma, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x7a,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], exec, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], scc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x01,0x05,0x0e,0x04]
+
+v_fma_f64 v[5:6], v[254:255], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0xfe,0x05,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[254:255], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0xfc,0x0f,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[254:255]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0xfa,0x07]
+
+v_fma_f64 v[5:6], -s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x24]
+
+v_fma_f64 v[5:6], s[2:3], -v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x44]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x84]
+
+v_fma_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0xe4]
+
+v_fma_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4]
+// CHECK: [0x05,0x01,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4]
+// CHECK: [0x05,0x02,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], |v[3:4]|
+// CHECK: [0x05,0x04,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]|
+// CHECK: [0x05,0x07,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp
+// CHECK: [0x05,0x08,0x98,0xd2,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x0c]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x14]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2
+// CHECK: [0x05,0x00,0x98,0xd2,0x02,0x04,0x0e,0x1c]
+
+v_lerp_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x00,0x01,0x02]
+
+v_lerp_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x9a,0xd2,0x01,0x00,0x01,0x02]
+
+v_lerp_u8 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x67,0x00,0x01,0x02]
+
+v_lerp_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x68,0x00,0x01,0x02]
+
+v_lerp_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x69,0x00,0x01,0x02]
+
+v_lerp_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x6a,0x00,0x01,0x02]
+
+v_lerp_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x6b,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x6c,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x6d,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x6e,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x6f,0x00,0x01,0x02]
+
+v_lerp_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x7b,0x00,0x01,0x02]
+
+v_lerp_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x7c,0x00,0x01,0x02]
+
+v_lerp_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x7e,0x00,0x01,0x02]
+
+v_lerp_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x7f,0x00,0x01,0x02]
+
+v_lerp_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x80,0x00,0x01,0x02]
+
+v_lerp_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0xc1,0x00,0x01,0x02]
+
+v_lerp_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x01,0x01,0x02]
+
+v_lerp_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0xff,0x01,0x01,0x02]
+
+v_lerp_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x82,0x01,0x02]
+
+v_lerp_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x04,0x02,0x02]
+
+v_lerp_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0xfe,0x03,0x02]
+
+v_lerp_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x00,0x05,0x03]
+
+v_lerp_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x00,0x0d,0x04]
+
+v_lerp_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x9a,0xd2,0x01,0x00,0xfd,0x07]
+
+v_alignbit_b32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x00,0x01,0x02]
+
+v_alignbit_b32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x9c,0xd2,0x01,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x67,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x68,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x69,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x6a,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x6b,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x6c,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x6d,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x6e,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x6f,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x7b,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x7c,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x7e,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x7f,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x80,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0xc1,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0xf0,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0xf7,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x01,0x01,0x02]
+
+v_alignbit_b32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0xff,0x01,0x01,0x02]
+
+v_alignbit_b32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x82,0x01,0x02]
+
+v_alignbit_b32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0xe0,0x01,0x02]
+
+v_alignbit_b32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0xee,0x01,0x02]
+
+v_alignbit_b32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x04,0x02,0x02]
+
+v_alignbit_b32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0xfe,0x03,0x02]
+
+v_alignbit_b32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x00,0x05,0x03]
+
+v_alignbit_b32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x00,0xc1,0x03]
+
+v_alignbit_b32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x00,0xdd,0x03]
+
+v_alignbit_b32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x00,0x0d,0x04]
+
+v_alignbit_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x9c,0xd2,0x01,0x00,0xfd,0x07]
+
+v_alignbyte_b32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0x01,0x02]
+
+v_alignbyte_b32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0x9e,0xd2,0x01,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x67,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x68,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x69,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x6a,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x6b,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x6c,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x6d,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x6e,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x6f,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x7b,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x7c,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x7e,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x7f,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x80,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0xc1,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0xf0,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0xf7,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x01,0x01,0x02]
+
+v_alignbyte_b32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0xff,0x01,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x82,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0xe0,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0xee,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x04,0x02,0x02]
+
+v_alignbyte_b32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0xfe,0x03,0x02]
+
+v_alignbyte_b32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0x05,0x03]
+
+v_alignbyte_b32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0xc1,0x03]
+
+v_alignbyte_b32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0xdd,0x03]
+
+v_alignbyte_b32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0x0d,0x04]
+
+v_alignbyte_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0x9e,0xd2,0x01,0x00,0xfd,0x07]
+
+v_mullit_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x67,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x68,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x69,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x05,0x0e,0x04]
+
+v_mullit_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0xff,0x05,0x0e,0x04]
+
+v_mullit_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_mullit_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0xfe,0x07]
+
+v_mullit_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x24]
+
+v_mullit_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x44]
+
+v_mullit_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x84]
+
+v_mullit_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_mullit_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0xa0,0xd2,0x01,0x04,0x0e,0x04]
+
+v_mullit_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_mullit_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x14]
+
+v_mullit_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xa0,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_min3_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x67,0x04,0x0e,0x04]
+
+v_min3_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x68,0x04,0x0e,0x04]
+
+v_min3_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x69,0x04,0x0e,0x04]
+
+v_min3_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_min3_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_min3_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_min3_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_min3_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_min3_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_min3_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_min3_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x05,0x0e,0x04]
+
+v_min3_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0xff,0x05,0x0e,0x04]
+
+v_min3_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_min3_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0xfe,0x07]
+
+v_min3_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x24]
+
+v_min3_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x44]
+
+v_min3_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x84]
+
+v_min3_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_min3_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0xa2,0xd2,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_min3_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x14]
+
+v_min3_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xa2,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_min3_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0x01,0x02]
+
+v_min3_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xa4,0xd2,0x01,0x00,0x01,0x02]
+
+v_min3_i32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x67,0x00,0x01,0x02]
+
+v_min3_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x68,0x00,0x01,0x02]
+
+v_min3_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x69,0x00,0x01,0x02]
+
+v_min3_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x6a,0x00,0x01,0x02]
+
+v_min3_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x6b,0x00,0x01,0x02]
+
+v_min3_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x6c,0x00,0x01,0x02]
+
+v_min3_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x6d,0x00,0x01,0x02]
+
+v_min3_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x6e,0x00,0x01,0x02]
+
+v_min3_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x6f,0x00,0x01,0x02]
+
+v_min3_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x7b,0x00,0x01,0x02]
+
+v_min3_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x7c,0x00,0x01,0x02]
+
+v_min3_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x7e,0x00,0x01,0x02]
+
+v_min3_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x7f,0x00,0x01,0x02]
+
+v_min3_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x80,0x00,0x01,0x02]
+
+v_min3_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0xc1,0x00,0x01,0x02]
+
+v_min3_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0xf0,0x00,0x01,0x02]
+
+v_min3_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0xf7,0x00,0x01,0x02]
+
+v_min3_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x01,0x01,0x02]
+
+v_min3_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0xff,0x01,0x01,0x02]
+
+v_min3_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x82,0x01,0x02]
+
+v_min3_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0xe0,0x01,0x02]
+
+v_min3_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0xee,0x01,0x02]
+
+v_min3_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x04,0x02,0x02]
+
+v_min3_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0xfe,0x03,0x02]
+
+v_min3_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0x05,0x03]
+
+v_min3_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0xc1,0x03]
+
+v_min3_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0xdd,0x03]
+
+v_min3_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0x0d,0x04]
+
+v_min3_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xa4,0xd2,0x01,0x00,0xfd,0x07]
+
+v_min3_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0x01,0x02]
+
+v_min3_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xa6,0xd2,0x01,0x00,0x01,0x02]
+
+v_min3_u32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x67,0x00,0x01,0x02]
+
+v_min3_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x68,0x00,0x01,0x02]
+
+v_min3_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x69,0x00,0x01,0x02]
+
+v_min3_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x6a,0x00,0x01,0x02]
+
+v_min3_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x6b,0x00,0x01,0x02]
+
+v_min3_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x6c,0x00,0x01,0x02]
+
+v_min3_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x6d,0x00,0x01,0x02]
+
+v_min3_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x6e,0x00,0x01,0x02]
+
+v_min3_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x6f,0x00,0x01,0x02]
+
+v_min3_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x7b,0x00,0x01,0x02]
+
+v_min3_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x7c,0x00,0x01,0x02]
+
+v_min3_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x7e,0x00,0x01,0x02]
+
+v_min3_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x7f,0x00,0x01,0x02]
+
+v_min3_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x80,0x00,0x01,0x02]
+
+v_min3_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0xc1,0x00,0x01,0x02]
+
+v_min3_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0xf0,0x00,0x01,0x02]
+
+v_min3_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0xf7,0x00,0x01,0x02]
+
+v_min3_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x01,0x01,0x02]
+
+v_min3_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0xff,0x01,0x01,0x02]
+
+v_min3_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x82,0x01,0x02]
+
+v_min3_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0xe0,0x01,0x02]
+
+v_min3_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0xee,0x01,0x02]
+
+v_min3_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x04,0x02,0x02]
+
+v_min3_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0xfe,0x03,0x02]
+
+v_min3_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0x05,0x03]
+
+v_min3_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0xc1,0x03]
+
+v_min3_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0xdd,0x03]
+
+v_min3_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0x0d,0x04]
+
+v_min3_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xa6,0xd2,0x01,0x00,0xfd,0x07]
+
+v_max3_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x67,0x04,0x0e,0x04]
+
+v_max3_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x68,0x04,0x0e,0x04]
+
+v_max3_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x69,0x04,0x0e,0x04]
+
+v_max3_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_max3_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_max3_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_max3_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_max3_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_max3_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_max3_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_max3_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x05,0x0e,0x04]
+
+v_max3_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0xff,0x05,0x0e,0x04]
+
+v_max3_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_max3_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0xfe,0x07]
+
+v_max3_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x24]
+
+v_max3_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x44]
+
+v_max3_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x84]
+
+v_max3_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_max3_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0xa8,0xd2,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_max3_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x14]
+
+v_max3_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xa8,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_max3_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0x01,0x02]
+
+v_max3_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xaa,0xd2,0x01,0x00,0x01,0x02]
+
+v_max3_i32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x67,0x00,0x01,0x02]
+
+v_max3_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x68,0x00,0x01,0x02]
+
+v_max3_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x69,0x00,0x01,0x02]
+
+v_max3_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x6a,0x00,0x01,0x02]
+
+v_max3_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x6b,0x00,0x01,0x02]
+
+v_max3_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x6c,0x00,0x01,0x02]
+
+v_max3_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x6d,0x00,0x01,0x02]
+
+v_max3_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x6e,0x00,0x01,0x02]
+
+v_max3_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x6f,0x00,0x01,0x02]
+
+v_max3_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x7b,0x00,0x01,0x02]
+
+v_max3_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x7c,0x00,0x01,0x02]
+
+v_max3_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x7e,0x00,0x01,0x02]
+
+v_max3_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x7f,0x00,0x01,0x02]
+
+v_max3_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x80,0x00,0x01,0x02]
+
+v_max3_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0xc1,0x00,0x01,0x02]
+
+v_max3_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0xf0,0x00,0x01,0x02]
+
+v_max3_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0xf7,0x00,0x01,0x02]
+
+v_max3_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x01,0x01,0x02]
+
+v_max3_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0xff,0x01,0x01,0x02]
+
+v_max3_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x82,0x01,0x02]
+
+v_max3_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0xe0,0x01,0x02]
+
+v_max3_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0xee,0x01,0x02]
+
+v_max3_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x04,0x02,0x02]
+
+v_max3_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0xfe,0x03,0x02]
+
+v_max3_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0x05,0x03]
+
+v_max3_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0xc1,0x03]
+
+v_max3_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0xdd,0x03]
+
+v_max3_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0x0d,0x04]
+
+v_max3_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xaa,0xd2,0x01,0x00,0xfd,0x07]
+
+v_max3_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0x01,0x02]
+
+v_max3_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xac,0xd2,0x01,0x00,0x01,0x02]
+
+v_max3_u32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x67,0x00,0x01,0x02]
+
+v_max3_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x68,0x00,0x01,0x02]
+
+v_max3_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x69,0x00,0x01,0x02]
+
+v_max3_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x6a,0x00,0x01,0x02]
+
+v_max3_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x6b,0x00,0x01,0x02]
+
+v_max3_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x6c,0x00,0x01,0x02]
+
+v_max3_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x6d,0x00,0x01,0x02]
+
+v_max3_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x6e,0x00,0x01,0x02]
+
+v_max3_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x6f,0x00,0x01,0x02]
+
+v_max3_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x7b,0x00,0x01,0x02]
+
+v_max3_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x7c,0x00,0x01,0x02]
+
+v_max3_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x7e,0x00,0x01,0x02]
+
+v_max3_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x7f,0x00,0x01,0x02]
+
+v_max3_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x80,0x00,0x01,0x02]
+
+v_max3_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0xc1,0x00,0x01,0x02]
+
+v_max3_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0xf0,0x00,0x01,0x02]
+
+v_max3_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0xf7,0x00,0x01,0x02]
+
+v_max3_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x01,0x01,0x02]
+
+v_max3_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0xff,0x01,0x01,0x02]
+
+v_max3_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x82,0x01,0x02]
+
+v_max3_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0xe0,0x01,0x02]
+
+v_max3_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0xee,0x01,0x02]
+
+v_max3_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x04,0x02,0x02]
+
+v_max3_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0xfe,0x03,0x02]
+
+v_max3_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0x05,0x03]
+
+v_max3_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0xc1,0x03]
+
+v_max3_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0xdd,0x03]
+
+v_max3_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0x0d,0x04]
+
+v_max3_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xac,0xd2,0x01,0x00,0xfd,0x07]
+
+v_med3_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x67,0x04,0x0e,0x04]
+
+v_med3_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x68,0x04,0x0e,0x04]
+
+v_med3_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x69,0x04,0x0e,0x04]
+
+v_med3_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_med3_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_med3_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_med3_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_med3_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_med3_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_med3_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_med3_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x05,0x0e,0x04]
+
+v_med3_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0xff,0x05,0x0e,0x04]
+
+v_med3_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_med3_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0xfe,0x07]
+
+v_med3_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x24]
+
+v_med3_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x44]
+
+v_med3_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x84]
+
+v_med3_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_med3_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0xae,0xd2,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_med3_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x14]
+
+v_med3_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xae,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_med3_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0x01,0x02]
+
+v_med3_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xb0,0xd2,0x01,0x00,0x01,0x02]
+
+v_med3_i32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x67,0x00,0x01,0x02]
+
+v_med3_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x68,0x00,0x01,0x02]
+
+v_med3_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x69,0x00,0x01,0x02]
+
+v_med3_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x6a,0x00,0x01,0x02]
+
+v_med3_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x6b,0x00,0x01,0x02]
+
+v_med3_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x6c,0x00,0x01,0x02]
+
+v_med3_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x6d,0x00,0x01,0x02]
+
+v_med3_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x6e,0x00,0x01,0x02]
+
+v_med3_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x6f,0x00,0x01,0x02]
+
+v_med3_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x7b,0x00,0x01,0x02]
+
+v_med3_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x7c,0x00,0x01,0x02]
+
+v_med3_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x7e,0x00,0x01,0x02]
+
+v_med3_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x7f,0x00,0x01,0x02]
+
+v_med3_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x80,0x00,0x01,0x02]
+
+v_med3_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0xc1,0x00,0x01,0x02]
+
+v_med3_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0xf0,0x00,0x01,0x02]
+
+v_med3_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0xf7,0x00,0x01,0x02]
+
+v_med3_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x01,0x01,0x02]
+
+v_med3_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0xff,0x01,0x01,0x02]
+
+v_med3_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x82,0x01,0x02]
+
+v_med3_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0xe0,0x01,0x02]
+
+v_med3_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0xee,0x01,0x02]
+
+v_med3_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x04,0x02,0x02]
+
+v_med3_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0xfe,0x03,0x02]
+
+v_med3_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0x05,0x03]
+
+v_med3_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0xc1,0x03]
+
+v_med3_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0xdd,0x03]
+
+v_med3_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0x0d,0x04]
+
+v_med3_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xb0,0xd2,0x01,0x00,0xfd,0x07]
+
+v_med3_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x00,0x01,0x02]
+
+v_med3_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xb2,0xd2,0x01,0x00,0x01,0x02]
+
+v_med3_u32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x67,0x00,0x01,0x02]
+
+v_med3_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x68,0x00,0x01,0x02]
+
+v_med3_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x69,0x00,0x01,0x02]
+
+v_med3_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x6a,0x00,0x01,0x02]
+
+v_med3_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x6b,0x00,0x01,0x02]
+
+v_med3_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x6c,0x00,0x01,0x02]
+
+v_med3_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x6d,0x00,0x01,0x02]
+
+v_med3_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x6e,0x00,0x01,0x02]
+
+v_med3_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x6f,0x00,0x01,0x02]
+
+v_med3_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x7b,0x00,0x01,0x02]
+
+v_med3_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x7c,0x00,0x01,0x02]
+
+v_med3_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x7e,0x00,0x01,0x02]
+
+v_med3_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x7f,0x00,0x01,0x02]
+
+v_med3_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x80,0x00,0x01,0x02]
+
+v_med3_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0xc1,0x00,0x01,0x02]
+
+v_med3_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0xf0,0x00,0x01,0x02]
+
+v_med3_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0xf7,0x00,0x01,0x02]
+
+v_med3_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x01,0x01,0x02]
+
+v_med3_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0xff,0x01,0x01,0x02]
+
+v_med3_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x82,0x01,0x02]
+
+v_med3_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0xe0,0x01,0x02]
+
+v_med3_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0xee,0x01,0x02]
+
+v_med3_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x04,0x02,0x02]
+
+v_med3_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0xfe,0x03,0x02]
+
+v_med3_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x00,0x05,0x03]
+
+v_med3_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x00,0xc1,0x03]
+
+v_med3_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x00,0xdd,0x03]
+
+v_med3_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x00,0x0d,0x04]
+
+v_med3_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xb2,0xd2,0x01,0x00,0xfd,0x07]
+
+v_sad_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xb4,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_u8 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x67,0x00,0x01,0x02]
+
+v_sad_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x68,0x00,0x01,0x02]
+
+v_sad_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x69,0x00,0x01,0x02]
+
+v_sad_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x6a,0x00,0x01,0x02]
+
+v_sad_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x6b,0x00,0x01,0x02]
+
+v_sad_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x6c,0x00,0x01,0x02]
+
+v_sad_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x6d,0x00,0x01,0x02]
+
+v_sad_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x6e,0x00,0x01,0x02]
+
+v_sad_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x6f,0x00,0x01,0x02]
+
+v_sad_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x7b,0x00,0x01,0x02]
+
+v_sad_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x7c,0x00,0x01,0x02]
+
+v_sad_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x7e,0x00,0x01,0x02]
+
+v_sad_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x7f,0x00,0x01,0x02]
+
+v_sad_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x80,0x00,0x01,0x02]
+
+v_sad_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0xc1,0x00,0x01,0x02]
+
+v_sad_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x01,0x01,0x02]
+
+v_sad_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0xff,0x01,0x01,0x02]
+
+v_sad_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x82,0x01,0x02]
+
+v_sad_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x04,0x02,0x02]
+
+v_sad_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0xfe,0x03,0x02]
+
+v_sad_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x00,0x05,0x03]
+
+v_sad_u8 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x00,0xc1,0x03]
+
+v_sad_u8 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x00,0xdd,0x03]
+
+v_sad_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x00,0x0d,0x04]
+
+v_sad_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xb4,0xd2,0x01,0x00,0xfd,0x07]
+
+v_sad_hi_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_hi_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xb6,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x67,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x68,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x69,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x6a,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x6b,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x6c,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x6d,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x6e,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x6f,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x7b,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x7c,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x7e,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x7f,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x80,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0xc1,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x01,0x01,0x02]
+
+v_sad_hi_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0xff,0x01,0x01,0x02]
+
+v_sad_hi_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x82,0x01,0x02]
+
+v_sad_hi_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x04,0x02,0x02]
+
+v_sad_hi_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0xfe,0x03,0x02]
+
+v_sad_hi_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x00,0x05,0x03]
+
+v_sad_hi_u8 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x00,0xc1,0x03]
+
+v_sad_hi_u8 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x00,0xdd,0x03]
+
+v_sad_hi_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x00,0x0d,0x04]
+
+v_sad_hi_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xb6,0xd2,0x01,0x00,0xfd,0x07]
+
+v_sad_u16 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_u16 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xb8,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_u16 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x67,0x00,0x01,0x02]
+
+v_sad_u16 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x68,0x00,0x01,0x02]
+
+v_sad_u16 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x69,0x00,0x01,0x02]
+
+v_sad_u16 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x6a,0x00,0x01,0x02]
+
+v_sad_u16 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x6b,0x00,0x01,0x02]
+
+v_sad_u16 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x6c,0x00,0x01,0x02]
+
+v_sad_u16 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x6d,0x00,0x01,0x02]
+
+v_sad_u16 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x6e,0x00,0x01,0x02]
+
+v_sad_u16 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x6f,0x00,0x01,0x02]
+
+v_sad_u16 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x7b,0x00,0x01,0x02]
+
+v_sad_u16 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x7c,0x00,0x01,0x02]
+
+v_sad_u16 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x7e,0x00,0x01,0x02]
+
+v_sad_u16 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x7f,0x00,0x01,0x02]
+
+v_sad_u16 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x80,0x00,0x01,0x02]
+
+v_sad_u16 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0xc1,0x00,0x01,0x02]
+
+v_sad_u16 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x01,0x01,0x02]
+
+v_sad_u16 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0xff,0x01,0x01,0x02]
+
+v_sad_u16 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x82,0x01,0x02]
+
+v_sad_u16 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x04,0x02,0x02]
+
+v_sad_u16 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0xfe,0x03,0x02]
+
+v_sad_u16 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x00,0x05,0x03]
+
+v_sad_u16 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x00,0xc1,0x03]
+
+v_sad_u16 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x00,0xdd,0x03]
+
+v_sad_u16 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x00,0x0d,0x04]
+
+v_sad_u16 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xb8,0xd2,0x01,0x00,0xfd,0x07]
+
+v_sad_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xba,0xd2,0x01,0x00,0x01,0x02]
+
+v_sad_u32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x67,0x00,0x01,0x02]
+
+v_sad_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x68,0x00,0x01,0x02]
+
+v_sad_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x69,0x00,0x01,0x02]
+
+v_sad_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x6a,0x00,0x01,0x02]
+
+v_sad_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x6b,0x00,0x01,0x02]
+
+v_sad_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x6c,0x00,0x01,0x02]
+
+v_sad_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x6d,0x00,0x01,0x02]
+
+v_sad_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x6e,0x00,0x01,0x02]
+
+v_sad_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x6f,0x00,0x01,0x02]
+
+v_sad_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x7b,0x00,0x01,0x02]
+
+v_sad_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x7c,0x00,0x01,0x02]
+
+v_sad_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x7e,0x00,0x01,0x02]
+
+v_sad_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x7f,0x00,0x01,0x02]
+
+v_sad_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x80,0x00,0x01,0x02]
+
+v_sad_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0xc1,0x00,0x01,0x02]
+
+v_sad_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0xf0,0x00,0x01,0x02]
+
+v_sad_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0xf7,0x00,0x01,0x02]
+
+v_sad_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x01,0x01,0x02]
+
+v_sad_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0xff,0x01,0x01,0x02]
+
+v_sad_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x82,0x01,0x02]
+
+v_sad_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0xe0,0x01,0x02]
+
+v_sad_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0xee,0x01,0x02]
+
+v_sad_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x04,0x02,0x02]
+
+v_sad_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0xfe,0x03,0x02]
+
+v_sad_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x00,0x05,0x03]
+
+v_sad_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x00,0xc1,0x03]
+
+v_sad_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x00,0xdd,0x03]
+
+v_sad_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x00,0x0d,0x04]
+
+v_sad_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xba,0xd2,0x01,0x00,0xfd,0x07]
+
+v_cvt_pk_u8_f32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xbc,0xd2,0x01,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x67,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x68,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x69,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x6a,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x6b,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x6c,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x6d,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x6e,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x6f,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x7b,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x7c,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x7e,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x7f,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x80,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0xf0,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, scc, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0xfd,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x01,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0xff,0x01,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x82,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0xe0,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0xee,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x04,0x02,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0xfe,0x03,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0x05,0x03]
+
+v_cvt_pk_u8_f32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0xc1,0x03]
+
+v_cvt_pk_u8_f32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0xdd,0x03]
+
+v_cvt_pk_u8_f32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0x0d,0x04]
+
+v_cvt_pk_u8_f32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xbc,0xd2,0x01,0x00,0xfd,0x07]
+
+v_div_fixup_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s103, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x67,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x68,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x69,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x6b,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x6d,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x6f,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x7b,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x7c,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x7f,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0xff,0x05,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0xfe,0x0f,0x04]
+
+v_div_fixup_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0xfe,0x07]
+
+v_div_fixup_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x24]
+
+v_div_fixup_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x44]
+
+v_div_fixup_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x84]
+
+v_div_fixup_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0xe4]
+
+v_div_fixup_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x08,0xbe,0xd2,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x0c]
+
+v_div_fixup_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x14]
+
+v_div_fixup_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xbe,0xd2,0x01,0x04,0x0e,0x1c]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[254:255], s[2:3], v[2:3], v[3:4]
+// CHECK: [0xfe,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[4:5], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x04,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[102:103], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x66,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], flat_scratch, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x68,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], vcc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x6a,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], tba, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x6c,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], tma, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x6e,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x7a,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], exec, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x7e,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], scc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0xfd,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], v[254:255], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0xfe,0x05,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[254:255], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0xfc,0x0f,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[254:255]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0xfa,0x07]
+
+v_div_fixup_f64 v[5:6], -s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x24]
+
+v_div_fixup_f64 v[5:6], s[2:3], -v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x44]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x84]
+
+v_div_fixup_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0xe4]
+
+v_div_fixup_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4]
+// CHECK: [0x05,0x01,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4]
+// CHECK: [0x05,0x02,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], |v[3:4]|
+// CHECK: [0x05,0x04,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]|
+// CHECK: [0x05,0x07,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp
+// CHECK: [0x05,0x08,0xc0,0xd2,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x0c]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x14]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2
+// CHECK: [0x05,0x00,0xc0,0xd2,0x02,0x04,0x0e,0x1c]
+
+v_lshl_b64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshl_b64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0xc2,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshl_b64 v[5:6], -1, s2
+// CHECK: [0x05,0x00,0xc2,0xd2,0xc1,0x04,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0xc2,0xd2,0xf0,0x04,0x00,0x00]
+
+v_lshl_b64 v[5:6], -4.0, s2
+// CHECK: [0x05,0x00,0xc2,0xd2,0xf7,0x04,0x00,0x00]
+
+v_lshl_b64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0xc2,0xd2,0x01,0x05,0x00,0x00]
+
+v_lshl_b64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0xc2,0xd2,0xfe,0x05,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, s103
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xce,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xd0,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xd2,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xd6,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xda,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xde,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xf6,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xf8,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xfe,0x00,0x00]
+
+v_lshl_b64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshl_b64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshl_b64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshl_b64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshl_b64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshl_b64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0xc2,0xd2,0x80,0xfe,0x03,0x00]
+
+v_lshr_b64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshr_b64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0xc4,0xd2,0x80,0x04,0x00,0x00]
+
+v_lshr_b64 v[5:6], -1, s2
+// CHECK: [0x05,0x00,0xc4,0xd2,0xc1,0x04,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0xc4,0xd2,0xf0,0x04,0x00,0x00]
+
+v_lshr_b64 v[5:6], -4.0, s2
+// CHECK: [0x05,0x00,0xc4,0xd2,0xf7,0x04,0x00,0x00]
+
+v_lshr_b64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0xc4,0xd2,0x01,0x05,0x00,0x00]
+
+v_lshr_b64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0xc4,0xd2,0xfe,0x05,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, s103
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xce,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xd0,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xd2,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xd6,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xda,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xde,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xf6,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xf8,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xfe,0x00,0x00]
+
+v_lshr_b64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshr_b64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshr_b64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshr_b64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshr_b64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshr_b64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0xc4,0xd2,0x80,0xfe,0x03,0x00]
+
+v_ashr_i64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0x04,0x00,0x00]
+
+v_ashr_i64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0xc6,0xd2,0x80,0x04,0x00,0x00]
+
+v_ashr_i64 v[5:6], -1, s2
+// CHECK: [0x05,0x00,0xc6,0xd2,0xc1,0x04,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0xc6,0xd2,0xf0,0x04,0x00,0x00]
+
+v_ashr_i64 v[5:6], -4.0, s2
+// CHECK: [0x05,0x00,0xc6,0xd2,0xf7,0x04,0x00,0x00]
+
+v_ashr_i64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0xc6,0xd2,0x01,0x05,0x00,0x00]
+
+v_ashr_i64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0xc6,0xd2,0xfe,0x05,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, s103
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xce,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xd0,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xd2,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xd6,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xda,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xde,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xf6,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xf8,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xfe,0x00,0x00]
+
+v_ashr_i64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0x00,0x01,0x00]
+
+v_ashr_i64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0x82,0x01,0x00]
+
+v_ashr_i64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ashr_i64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xee,0x01,0x00]
+
+v_ashr_i64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0x04,0x02,0x00]
+
+v_ashr_i64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0xc6,0xd2,0x80,0xfe,0x03,0x00]
+
+v_add_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0xc8,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x01,0x09,0x00,0x00]
+
+v_add_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0xc8,0xd2,0xfe,0x09,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x04,0x02,0x00]
+
+v_add_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0xfc,0x03,0x00]
+
+v_add_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x20]
+
+v_add_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x40]
+
+v_add_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x60]
+
+v_add_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0xc8,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0xc8,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0xc8,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x08,0xc8,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x08]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x10]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0xc8,0xd2,0x04,0x08,0x00,0x18]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0xca,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0xca,0xd2,0x01,0x09,0x00,0x00]
+
+v_mul_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0xca,0xd2,0xfe,0x09,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x04,0x02,0x00]
+
+v_mul_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0xfc,0x03,0x00]
+
+v_mul_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x20]
+
+v_mul_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x40]
+
+v_mul_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x60]
+
+v_mul_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0xca,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0xca,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0xca,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x08,0xca,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x08]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x10]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0xca,0xd2,0x04,0x08,0x00,0x18]
+
+v_min_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0xcc,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x01,0x09,0x00,0x00]
+
+v_min_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0xcc,0xd2,0xfe,0x09,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x04,0x02,0x00]
+
+v_min_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0xfc,0x03,0x00]
+
+v_min_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x20]
+
+v_min_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x40]
+
+v_min_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x60]
+
+v_min_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0xcc,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0xcc,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0xcc,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x08,0xcc,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x08]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x10]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0xcc,0xd2,0x04,0x08,0x00,0x18]
+
+v_max_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0xce,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0xce,0xd2,0x01,0x09,0x00,0x00]
+
+v_max_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0xce,0xd2,0xfe,0x09,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x04,0x02,0x00]
+
+v_max_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0xfc,0x03,0x00]
+
+v_max_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x20]
+
+v_max_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x40]
+
+v_max_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x60]
+
+v_max_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0xce,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0xce,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0xce,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x08,0xce,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x08]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x10]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0xce,0xd2,0x04,0x08,0x00,0x18]
+
+v_ldexp_f64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0xd0,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0xd0,0xd2,0xf0,0x04,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0xd0,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0xd0,0xd2,0xfe,0x05,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, s103
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xce,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xd0,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xd2,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xd6,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xda,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xde,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xf6,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xf8,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xfe,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x00,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x82,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xee,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, scc
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xfa,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x02,0x00]
+
+v_ldexp_f64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0xfe,0x03,0x00]
+
+v_ldexp_f64 v[5:6], 0, s2 clamp
+// CHECK: [0x05,0x08,0xd0,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, s2 mul:2
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x00,0x08]
+
+v_ldexp_f64 v[5:6], 0, s2 mul:4
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x00,0x10]
+
+v_ldexp_f64 v[5:6], 0, s2 div:2
+// CHECK: [0x05,0x00,0xd0,0xd2,0x80,0x04,0x00,0x18]
+
+v_mul_lo_u32 v5, 0, s2
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_lo_u32 v255, 0, s2
+// CHECK: [0xff,0x00,0xd2,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, -1, s2
+// CHECK: [0x05,0x00,0xd2,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0xd2,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0xd2,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, v1, s2
+// CHECK: [0x05,0x00,0xd2,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_lo_u32 v5, v255, s2
+// CHECK: [0x05,0x00,0xd2,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, s103
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, m0
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, -1
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, v2
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_lo_u32 v5, 0, v255
+// CHECK: [0x05,0x00,0xd2,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_u32 v5, 0, s2
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32 v255, 0, s2
+// CHECK: [0xff,0x00,0xd4,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, -1, s2
+// CHECK: [0x05,0x00,0xd4,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0xd4,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0xd4,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, v1, s2
+// CHECK: [0x05,0x00,0xd4,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_hi_u32 v5, v255, s2
+// CHECK: [0x05,0x00,0xd4,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, s103
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, m0
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, -1
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, v2
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_hi_u32 v5, 0, v255
+// CHECK: [0x05,0x00,0xd4,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_lo_i32 v5, 0, s2
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_lo_i32 v255, 0, s2
+// CHECK: [0xff,0x00,0xd6,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_lo_i32 v5, -1, s2
+// CHECK: [0x05,0x00,0xd6,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_lo_i32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0xd6,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_lo_i32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0xd6,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_lo_i32 v5, v1, s2
+// CHECK: [0x05,0x00,0xd6,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_lo_i32 v5, v255, s2
+// CHECK: [0x05,0x00,0xd6,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, s103
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, m0
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_lo_i32 v5, 0, 0
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_lo_i32 v5, 0, -1
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_lo_i32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_lo_i32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_lo_i32 v5, 0, v2
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_lo_i32 v5, 0, v255
+// CHECK: [0x05,0x00,0xd6,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_i32 v5, 0, s2
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32 v255, 0, s2
+// CHECK: [0xff,0x00,0xd8,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, -1, s2
+// CHECK: [0x05,0x00,0xd8,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0xd8,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0xd8,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, v1, s2
+// CHECK: [0x05,0x00,0xd8,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_hi_i32 v5, v255, s2
+// CHECK: [0x05,0x00,0xd8,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, s103
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xd0,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xd2,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, m0
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, -1
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, v2
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_hi_i32 v5, 0, v255
+// CHECK: [0x05,0x00,0xd8,0xd2,0x80,0xfe,0x03,0x00]
+
+v_div_scale_f32 v5, vcc, s1, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x00,0x01,0x02]
+
+v_div_scale_f32 v255, vcc, s1, 0, 0
+// CHECK: [0xff,0x6a,0xda,0xd2,0x01,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s103, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x67,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x68,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x69,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, vcc_lo, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x6a,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, vcc_hi, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x6b,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tba_lo, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x6c,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tba_hi, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x6d,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tma_lo, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x6e,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tma_hi, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x6f,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, ttmp11, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x7b,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, m0, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x7c,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, exec_lo, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x7e,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, exec_hi, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x7f,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, 0, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x80,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, -1, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0xc1,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, 0.5, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0xf0,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, -4.0, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0xf7,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, v1, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x01,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, v255, 0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0xff,0x01,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, -1, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x82,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, 0.5, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0xe0,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, -4.0, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0xee,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, v2, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x04,0x02,0x02]
+
+v_div_scale_f32 v5, vcc, s1, v255, 0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0xfe,0x03,0x02]
+
+v_div_scale_f32 v5, vcc, s1, 0, -1
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x00,0x05,0x03]
+
+v_div_scale_f32 v5, vcc, s1, 0, 0.5
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x00,0xc1,0x03]
+
+v_div_scale_f32 v5, vcc, s1, 0, -4.0
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x00,0xdd,0x03]
+
+v_div_scale_f32 v5, vcc, s1, 0, v3
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x00,0x0d,0x04]
+
+v_div_scale_f32 v5, vcc, s1, 0, v255
+// CHECK: [0x05,0x6a,0xda,0xd2,0x01,0x00,0xfd,0x07]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0x01,0x02]
+
+v_div_scale_f64 v[254:255], vcc, s[2:3], 0, 0
+// CHECK: [0xfe,0x6a,0xdc,0xd2,0x02,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[4:5], 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x04,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[102:103], 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x66,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, flat_scratch, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x68,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, vcc, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x6a,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, tba, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x6c,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, tma, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x6e,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, ttmp[10:11], 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x7a,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, exec, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x7e,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, 0, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x80,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, -1, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0xc1,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, 0.5, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0xf0,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, -4.0, 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0xf7,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, v[1:2], 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x01,0x01,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, v[254:255], 0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0xfe,0x01,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], -1, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x82,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0.5, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0xe0,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], -4.0, 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0xee,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], v[2:3], 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x04,0x02,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], v[254:255], 0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0xfc,0x03,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, -1
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0x05,0x03]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, 0.5
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0xc1,0x03]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, -4.0
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0xdd,0x03]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[3:4]
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0x0d,0x04]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[254:255]
+// CHECK: [0x05,0x6a,0xdc,0xd2,0x02,0x00,0xf9,0x07]
+
+v_div_fmas_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v255, v1, v2, v3
+// CHECK: [0xff,0x00,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd2,0xff,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v255, v3
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0xff,0x0f,0x04]
+
+v_div_fmas_f32 v5, v1, v2, v255
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0xfe,0x07]
+
+v_div_fmas_f32 v5, -v1, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x24]
+
+v_div_fmas_f32 v5, v1, -v2, v3
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x44]
+
+v_div_fmas_f32 v5, v1, v2, -v3
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x84]
+
+v_div_fmas_f32 v5, -v1, -v2, -v3
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0xe4]
+
+v_div_fmas_f32 v5, |v1|, v2, v3
+// CHECK: [0x05,0x01,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, |v2|, v3
+// CHECK: [0x05,0x02,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v2, |v3|
+// CHECK: [0x05,0x04,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, |v1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0x08,0xde,0xd2,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x0c]
+
+v_div_fmas_f32 v5, v1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x14]
+
+v_div_fmas_f32 v5, v1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xde,0xd2,0x01,0x05,0x0e,0x1c]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[254:255], vcc, vcc, vcc
+// CHECK: [0xfe,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], v[1:2], vcc, vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x01,0xd5,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], v[254:255], vcc, vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0xfe,0xd5,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, v[2:3], vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0x04,0xaa,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, v[254:255], vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xfc,0xab,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, v[3:4]
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0x0c,0x04]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, v[254:255]
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xf8,0x07]
+
+v_div_fmas_f64 v[5:6], -vcc, vcc, vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x21]
+
+v_div_fmas_f64 v[5:6], vcc, -vcc, vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x41]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, -vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x81]
+
+v_div_fmas_f64 v[5:6], -vcc, -vcc, -vcc
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0xe1]
+
+v_div_fmas_f64 v[5:6], |vcc|, vcc, vcc
+// CHECK: [0x05,0x01,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, |vcc|, vcc
+// CHECK: [0x05,0x02,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, |vcc|
+// CHECK: [0x05,0x04,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], |vcc|, |vcc|, |vcc|
+// CHECK: [0x05,0x07,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc clamp
+// CHECK: [0x05,0x08,0xe0,0xd2,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc mul:2
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x09]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc mul:4
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x11]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc div:2
+// CHECK: [0x05,0x00,0xe0,0xd2,0x6a,0xd4,0xa8,0x19]
+
+v_msad_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x00,0x01,0x02]
+
+v_msad_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xe2,0xd2,0x01,0x00,0x01,0x02]
+
+v_msad_u8 v5, s103, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x67,0x00,0x01,0x02]
+
+v_msad_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x68,0x00,0x01,0x02]
+
+v_msad_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x69,0x00,0x01,0x02]
+
+v_msad_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x6a,0x00,0x01,0x02]
+
+v_msad_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x6b,0x00,0x01,0x02]
+
+v_msad_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x6c,0x00,0x01,0x02]
+
+v_msad_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x6d,0x00,0x01,0x02]
+
+v_msad_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x6e,0x00,0x01,0x02]
+
+v_msad_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x6f,0x00,0x01,0x02]
+
+v_msad_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x7b,0x00,0x01,0x02]
+
+v_msad_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x7c,0x00,0x01,0x02]
+
+v_msad_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x7e,0x00,0x01,0x02]
+
+v_msad_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x7f,0x00,0x01,0x02]
+
+v_msad_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x80,0x00,0x01,0x02]
+
+v_msad_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0xc1,0x00,0x01,0x02]
+
+v_msad_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x01,0x01,0x02]
+
+v_msad_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0xff,0x01,0x01,0x02]
+
+v_msad_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x82,0x01,0x02]
+
+v_msad_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x04,0x02,0x02]
+
+v_msad_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0xfe,0x03,0x02]
+
+v_msad_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x00,0x05,0x03]
+
+v_msad_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x00,0x0d,0x04]
+
+v_msad_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xe2,0xd2,0x01,0x00,0xfd,0x07]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[254:255], s[2:3], 0, 0
+// CHECK: [0xfe,0x00,0xe4,0xd2,0x02,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[4:5], 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x04,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[102:103], 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x66,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], flat_scratch, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x68,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], vcc, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x6a,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], tba, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x6c,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], tma, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x6e,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], ttmp[10:11], 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x7a,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], exec, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x7e,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], 0, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x80,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], -1, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0xc1,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x01,0x01,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], v[254:255], 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0xfe,0x01,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], -1, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0x82,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], v2, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0x04,0x02,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], v255, 0
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0xfe,0x03,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, -1
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0x00,0x05,0x03]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, v[3:4]
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0x00,0x0d,0x04]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, v[254:255]
+// CHECK: [0x05,0x00,0xe4,0xd2,0x02,0x00,0xf9,0x07]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[254:255], s[2:3], 0, 0
+// CHECK: [0xfe,0x00,0xe6,0xd2,0x02,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[4:5], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x04,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[102:103], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x66,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], flat_scratch, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x68,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], vcc, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x6a,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], tba, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x6c,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], tma, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x6e,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], ttmp[10:11], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x7a,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], exec, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x7e,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], 0, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x80,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], -1, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0xc1,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x01,0x01,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], v[254:255], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0xfe,0x01,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], -1, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0x82,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], v2, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0x04,0x02,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], v255, 0
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0xfe,0x03,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, -1
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0x00,0x05,0x03]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, v[3:4]
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0x00,0x0d,0x04]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, v[254:255]
+// CHECK: [0x05,0x00,0xe6,0xd2,0x02,0x00,0xf9,0x07]
+
+v_trig_preop_f64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0xe8,0xd2,0x80,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0xe8,0xd2,0xf0,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0xe8,0xd2,0x01,0x05,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0xe8,0xd2,0xfe,0x05,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s103
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xce,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xd0,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xd2,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xd4,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xd6,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xd8,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xda,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xdc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xde,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xf6,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xf8,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xfc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xfe,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x00,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x82,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xe0,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xee,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, scc
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xfa,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x02,0x00]
+
+v_trig_preop_f64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0xfe,0x03,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s2 clamp
+// CHECK: [0x05,0x08,0xe8,0xd2,0x80,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s2 mul:2
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x00,0x08]
+
+v_trig_preop_f64 v[5:6], 0, s2 mul:4
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x00,0x10]
+
+v_trig_preop_f64 v[5:6], 0, s2 div:2
+// CHECK: [0x05,0x00,0xe8,0xd2,0x80,0x04,0x00,0x18]
+
+v_cmp_f_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x00,0x7c]
+
+v_cmp_f_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x00,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x00,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x00,0x7c]
+
+v_cmp_f_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x00,0x7c]
+
+v_cmp_f_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x01,0x7c]
+
+v_cmp_f_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x00,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x00,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x00,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x00,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x00,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_lt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x02,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x02,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x02,0x7c]
+
+v_cmp_lt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x03,0x7c]
+
+v_cmp_lt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x02,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x02,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x02,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x02,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x02,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_eq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x04,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x04,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x04,0x7c]
+
+v_cmp_eq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x05,0x7c]
+
+v_cmp_eq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x04,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x04,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x04,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x04,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x04,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_le_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x06,0x7c]
+
+v_cmp_le_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x06,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x06,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x06,0x7c]
+
+v_cmp_le_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x06,0x7c]
+
+v_cmp_le_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x07,0x7c]
+
+v_cmp_le_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x06,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x06,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x06,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x06,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x06,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_gt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x08,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x08,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x08,0x7c]
+
+v_cmp_gt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x09,0x7c]
+
+v_cmp_gt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x08,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x08,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x08,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x08,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x08,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_lg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x7c]
+
+v_cmp_lg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x7c]
+
+v_cmp_lg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x0a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x0a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x0a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_ge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x0c,0x7c]
+
+v_cmp_ge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0d,0x7c]
+
+v_cmp_ge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x0c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x0c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x0c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_o_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_o_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_o_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x0e,0x7c]
+
+v_cmp_o_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0f,0x7c]
+
+v_cmp_o_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x0e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x0e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x0e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_u_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x10,0x7c]
+
+v_cmp_u_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x10,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_u_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x10,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_u_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x10,0x7c]
+
+v_cmp_u_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x10,0x7c]
+
+v_cmp_u_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x11,0x7c]
+
+v_cmp_u_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x10,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x10,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x10,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x10,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x10,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x12,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x12,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x12,0x7c]
+
+v_cmp_nge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x13,0x7c]
+
+v_cmp_nge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x12,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x12,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x12,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x12,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x12,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nlg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x14,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x14,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x14,0x7c]
+
+v_cmp_nlg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x15,0x7c]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x14,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x14,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x14,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x14,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x14,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_ngt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x16,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ngt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x16,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ngt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x16,0x7c]
+
+v_cmp_ngt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x17,0x7c]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x16,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x16,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x16,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x16,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x16,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nle_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x18,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nle_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x18,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nle_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x18,0x7c]
+
+v_cmp_nle_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x19,0x7c]
+
+v_cmp_nle_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x18,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x18,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x18,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x18,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x18,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_neq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x1a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_neq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x1a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_neq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x1a,0x7c]
+
+v_cmp_neq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x1b,0x7c]
+
+v_cmp_neq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x1a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x1a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x1a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nlt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x1c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x1c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x1c,0x7c]
+
+v_cmp_nlt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x1d,0x7c]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x1c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x1c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x1c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_tru_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x1e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_tru_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x1e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_tru_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x1e,0x7c]
+
+v_cmp_tru_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x1f,0x7c]
+
+v_cmp_tru_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x1e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x1e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x1e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_f_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x20,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x20,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x20,0x7c]
+
+v_cmpx_f_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x21,0x7c]
+
+v_cmpx_f_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_lt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x22,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x22,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x22,0x7c]
+
+v_cmpx_lt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x23,0x7c]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_eq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x24,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x24,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x24,0x7c]
+
+v_cmpx_eq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x25,0x7c]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_le_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x26,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x26,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x26,0x7c]
+
+v_cmpx_le_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x27,0x7c]
+
+v_cmpx_le_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_gt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x28,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x28,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x28,0x7c]
+
+v_cmpx_gt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x29,0x7c]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_lg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x2a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x2a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2a,0x7c]
+
+v_cmpx_lg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2b,0x7c]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_ge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x2c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x2c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2c,0x7c]
+
+v_cmpx_ge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2d,0x7c]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_o_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x2e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x2e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_o_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2e,0x7c]
+
+v_cmpx_o_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2f,0x7c]
+
+v_cmpx_o_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_u_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x30,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x30,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_u_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x30,0x7c]
+
+v_cmpx_u_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x31,0x7c]
+
+v_cmpx_u_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x32,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x32,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x32,0x7c]
+
+v_cmpx_nge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x33,0x7c]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nlg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x34,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x34,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x34,0x7c]
+
+v_cmpx_nlg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x35,0x7c]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_ngt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x36,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x36,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ngt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x36,0x7c]
+
+v_cmpx_ngt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x37,0x7c]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nle_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x38,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x38,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nle_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x38,0x7c]
+
+v_cmpx_nle_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x39,0x7c]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_neq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x3a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x3a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_neq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x3a,0x7c]
+
+v_cmpx_neq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x3b,0x7c]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nlt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x3c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x3c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x3c,0x7c]
+
+v_cmpx_nlt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x3d,0x7c]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_tru_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x3e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_tru_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x3e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_tru_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x3e,0x7c]
+
+v_cmpx_tru_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x3f,0x7c]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_f_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x40,0x7c]
+
+v_cmp_f_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x40,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x40,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x40,0x7c]
+
+v_cmp_f_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x40,0x7c]
+
+v_cmp_f_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x41,0x7c]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x40,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_f_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_f_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_lt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x42,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x42,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x42,0x7c]
+
+v_cmp_lt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x43,0x7c]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x42,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_lt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_eq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x44,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x44,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x44,0x7c]
+
+v_cmp_eq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x45,0x7c]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x44,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_eq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_le_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x46,0x7c]
+
+v_cmp_le_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x46,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x46,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x46,0x7c]
+
+v_cmp_le_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x46,0x7c]
+
+v_cmp_le_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x47,0x7c]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x46,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_le_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_le_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_gt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x48,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x48,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x48,0x7c]
+
+v_cmp_gt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x49,0x7c]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x48,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_gt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_lg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x4a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x4a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x4a,0x7c]
+
+v_cmp_lg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x4b,0x7c]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x4a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_lg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_ge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x4c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x4c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x4c,0x7c]
+
+v_cmp_ge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x4d,0x7c]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x4c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_ge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_o_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x4e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_o_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x4e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_o_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x4e,0x7c]
+
+v_cmp_o_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x4f,0x7c]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x4e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_o_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_o_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_u_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x50,0x7c]
+
+v_cmp_u_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x50,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_u_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x50,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_u_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x50,0x7c]
+
+v_cmp_u_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x50,0x7c]
+
+v_cmp_u_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x51,0x7c]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x50,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_u_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_u_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x50,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x52,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x52,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x52,0x7c]
+
+v_cmp_nge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x53,0x7c]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x52,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x52,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nlg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x54,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x54,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x54,0x7c]
+
+v_cmp_nlg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x55,0x7c]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x54,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nlg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x54,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_ngt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x56,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ngt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x56,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ngt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x56,0x7c]
+
+v_cmp_ngt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x57,0x7c]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x56,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_ngt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x56,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nle_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x58,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nle_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x58,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nle_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x58,0x7c]
+
+v_cmp_nle_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x59,0x7c]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x58,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nle_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x58,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_neq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x5a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_neq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x5a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_neq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x5a,0x7c]
+
+v_cmp_neq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x5b,0x7c]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x5a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_neq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nlt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x5c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x5c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x5c,0x7c]
+
+v_cmp_nlt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x5d,0x7c]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x5c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nlt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_tru_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x5e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_tru_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x5e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_tru_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x5e,0x7c]
+
+v_cmp_tru_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x5f,0x7c]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x5e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_tru_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_f_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x60,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x60,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x60,0x7c]
+
+v_cmpx_f_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x61,0x7c]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_f_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_lt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x62,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x62,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x62,0x7c]
+
+v_cmpx_lt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x63,0x7c]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_lt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_eq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x64,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x64,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x64,0x7c]
+
+v_cmpx_eq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x65,0x7c]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_eq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_le_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x66,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x66,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x66,0x7c]
+
+v_cmpx_le_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x67,0x7c]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_le_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_gt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x68,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x68,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x68,0x7c]
+
+v_cmpx_gt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x69,0x7c]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_gt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_lg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x6a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x6a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x6a,0x7c]
+
+v_cmpx_lg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x6b,0x7c]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_lg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_ge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x6c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x6c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x6c,0x7c]
+
+v_cmpx_ge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x6d,0x7c]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_ge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_o_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x6e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x6e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_o_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x6e,0x7c]
+
+v_cmpx_o_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x6f,0x7c]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_o_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_u_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x70,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x70,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_u_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x70,0x7c]
+
+v_cmpx_u_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x71,0x7c]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_u_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x72,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x72,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x72,0x7c]
+
+v_cmpx_nge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x73,0x7c]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nlg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x74,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x74,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x74,0x7c]
+
+v_cmpx_nlg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x75,0x7c]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nlg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_ngt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x76,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x76,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ngt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x76,0x7c]
+
+v_cmpx_ngt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x77,0x7c]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_ngt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nle_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x78,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x78,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nle_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x78,0x7c]
+
+v_cmpx_nle_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x79,0x7c]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nle_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_neq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x7a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x7a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_neq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x7a,0x7c]
+
+v_cmpx_neq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x7b,0x7c]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_neq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nlt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x7c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x7c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x7c,0x7c]
+
+v_cmpx_nlt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x7d,0x7c]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nlt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_tru_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x7e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_tru_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x7e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_tru_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x7e,0x7c]
+
+v_cmpx_tru_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x7f,0x7c]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_tru_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_f_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x80,0x7c]
+
+v_cmps_f_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x80,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_f_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x80,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_f_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x80,0x7c]
+
+v_cmps_f_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x80,0x7c]
+
+v_cmps_f_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x81,0x7c]
+
+v_cmps_f_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x80,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x80,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x80,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x80,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_f_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x80,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_lt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x82,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_lt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x82,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_lt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x82,0x7c]
+
+v_cmps_lt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x83,0x7c]
+
+v_cmps_lt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x82,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x82,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x82,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x82,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_lt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x82,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_eq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x84,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_eq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x84,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_eq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x84,0x7c]
+
+v_cmps_eq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x85,0x7c]
+
+v_cmps_eq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x84,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x84,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x84,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x84,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_eq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x84,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_le_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x86,0x7c]
+
+v_cmps_le_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x86,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_le_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x86,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_le_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x86,0x7c]
+
+v_cmps_le_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x86,0x7c]
+
+v_cmps_le_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x87,0x7c]
+
+v_cmps_le_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x86,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x86,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x86,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x86,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_le_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x86,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_gt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x88,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_gt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x88,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_gt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x88,0x7c]
+
+v_cmps_gt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x89,0x7c]
+
+v_cmps_gt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x88,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x88,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x88,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x88,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_gt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x88,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_lg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_lg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_lg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8a,0x7c]
+
+v_cmps_lg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8b,0x7c]
+
+v_cmps_lg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x8a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x8a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_lg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x8a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_ge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_ge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_ge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8c,0x7c]
+
+v_cmps_ge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8d,0x7c]
+
+v_cmps_ge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x8c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x8c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_ge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x8c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_o_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_o_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_o_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8e,0x7c]
+
+v_cmps_o_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8f,0x7c]
+
+v_cmps_o_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x8e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x8e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_o_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x8e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_u_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x90,0x7c]
+
+v_cmps_u_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x90,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_u_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x90,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_u_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x90,0x7c]
+
+v_cmps_u_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x90,0x7c]
+
+v_cmps_u_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x91,0x7c]
+
+v_cmps_u_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x90,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x90,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x90,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x90,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_u_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x90,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_nge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x92,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x92,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x92,0x7c]
+
+v_cmps_nge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x93,0x7c]
+
+v_cmps_nge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x92,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x92,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x92,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x92,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_nge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x92,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_nlg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x94,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nlg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x94,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nlg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x94,0x7c]
+
+v_cmps_nlg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x95,0x7c]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x94,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x94,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x94,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x94,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_nlg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x94,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_ngt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x96,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_ngt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x96,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_ngt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x96,0x7c]
+
+v_cmps_ngt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x97,0x7c]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x96,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x96,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x96,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x96,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_ngt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x96,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_nle_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x98,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nle_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x98,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nle_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x98,0x7c]
+
+v_cmps_nle_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x99,0x7c]
+
+v_cmps_nle_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x98,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x98,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x98,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x98,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_nle_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x98,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_neq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_neq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_neq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9a,0x7c]
+
+v_cmps_neq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9b,0x7c]
+
+v_cmps_neq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x9a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x9a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_neq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x9a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_nlt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nlt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nlt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9c,0x7c]
+
+v_cmps_nlt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9d,0x7c]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x9c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x9c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_nlt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x9c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_tru_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_tru_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_tru_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9e,0x7c]
+
+v_cmps_tru_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9f,0x7c]
+
+v_cmps_tru_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x9e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x9e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmps_tru_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x9e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_f_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_f_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_f_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa0,0x7c]
+
+v_cmpsx_f_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa1,0x7c]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_f_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_lt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_lt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_lt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa2,0x7c]
+
+v_cmpsx_lt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa3,0x7c]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_lt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_eq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_eq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_eq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa4,0x7c]
+
+v_cmpsx_eq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa5,0x7c]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_eq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_le_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_le_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_le_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa6,0x7c]
+
+v_cmpsx_le_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa7,0x7c]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_le_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_gt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_gt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_gt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa8,0x7c]
+
+v_cmpsx_gt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa9,0x7c]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_gt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_lg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xaa,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_lg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xaa,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_lg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xaa,0x7c]
+
+v_cmpsx_lg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xab,0x7c]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_lg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_ge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xac,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_ge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xac,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_ge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xac,0x7c]
+
+v_cmpsx_ge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xad,0x7c]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_ge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_o_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xae,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_o_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xae,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_o_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xae,0x7c]
+
+v_cmpsx_o_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xaf,0x7c]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_o_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_u_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_u_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_u_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb0,0x7c]
+
+v_cmpsx_u_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb1,0x7c]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_u_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_nge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb2,0x7c]
+
+v_cmpsx_nge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb3,0x7c]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_nge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_nlg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nlg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nlg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb4,0x7c]
+
+v_cmpsx_nlg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb5,0x7c]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_nlg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_ngt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_ngt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_ngt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb6,0x7c]
+
+v_cmpsx_ngt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb7,0x7c]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_ngt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_nle_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nle_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nle_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb8,0x7c]
+
+v_cmpsx_nle_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb9,0x7c]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_nle_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_neq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xba,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_neq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xba,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_neq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xba,0x7c]
+
+v_cmpsx_neq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbb,0x7c]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_neq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_nlt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xbc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nlt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xbc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nlt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xbc,0x7c]
+
+v_cmpsx_nlt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbd,0x7c]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_nlt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpsx_tru_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xbe,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_tru_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xbe,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_tru_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xbe,0x7c]
+
+v_cmpsx_tru_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbf,0x7c]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd0,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd2,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpsx_tru_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmps_f_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_f_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_f_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc0,0x7c]
+
+v_cmps_f_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc1,0x7c]
+
+v_cmps_f_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_f_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_f_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_f_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_f_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_f_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_f_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_f_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_f_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_f_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_lt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_lt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_lt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc2,0x7c]
+
+v_cmps_lt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc3,0x7c]
+
+v_cmps_lt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_lt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_lt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_lt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_eq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_eq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_eq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc4,0x7c]
+
+v_cmps_eq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc5,0x7c]
+
+v_cmps_eq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_eq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_eq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_eq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_le_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_le_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_le_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc6,0x7c]
+
+v_cmps_le_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc7,0x7c]
+
+v_cmps_le_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_le_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_le_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_le_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_le_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_le_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_le_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_le_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_le_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_le_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_gt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_gt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_gt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc8,0x7c]
+
+v_cmps_gt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc9,0x7c]
+
+v_cmps_gt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_gt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_gt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_gt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_lg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_lg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_lg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xca,0x7c]
+
+v_cmps_lg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcb,0x7c]
+
+v_cmps_lg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xca,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_lg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_lg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_lg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_ge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_ge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_ge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xcc,0x7c]
+
+v_cmps_ge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcd,0x7c]
+
+v_cmps_ge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xcc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_ge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_ge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_ge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_o_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xce,0x7c]
+
+v_cmps_o_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_o_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_o_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xce,0x7c]
+
+v_cmps_o_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xce,0x7c]
+
+v_cmps_o_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcf,0x7c]
+
+v_cmps_o_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xce,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_o_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_o_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_o_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_o_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_o_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_o_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_o_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_o_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_o_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_u_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_u_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_u_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd0,0x7c]
+
+v_cmps_u_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd1,0x7c]
+
+v_cmps_u_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xd0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_u_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_u_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_u_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_u_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_u_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_u_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_u_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_u_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_u_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_nge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd2,0x7c]
+
+v_cmps_nge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd3,0x7c]
+
+v_cmps_nge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xd2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_nge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_nge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_nge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_nlg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nlg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nlg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd4,0x7c]
+
+v_cmps_nlg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd5,0x7c]
+
+v_cmps_nlg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xd4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_nlg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_nlg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_nlg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_ngt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_ngt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_ngt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd6,0x7c]
+
+v_cmps_ngt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd7,0x7c]
+
+v_cmps_ngt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xd6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_ngt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_ngt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_ngt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_nle_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nle_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nle_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd8,0x7c]
+
+v_cmps_nle_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd9,0x7c]
+
+v_cmps_nle_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xd8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_nle_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_nle_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_nle_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_neq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xda,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_neq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xda,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_neq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xda,0x7c]
+
+v_cmps_neq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdb,0x7c]
+
+v_cmps_neq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xda,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_neq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_neq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_neq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xda,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_nlt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xdc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_nlt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xdc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_nlt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xdc,0x7c]
+
+v_cmps_nlt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdd,0x7c]
+
+v_cmps_nlt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xdc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_nlt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_nlt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_nlt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmps_tru_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xde,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmps_tru_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xde,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmps_tru_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xde,0x7c]
+
+v_cmps_tru_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdf,0x7c]
+
+v_cmps_tru_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xde,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmps_tru_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmps_tru_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmps_tru_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xde,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_f_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_f_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_f_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe0,0x7c]
+
+v_cmpsx_f_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe1,0x7c]
+
+v_cmpsx_f_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_f_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_f_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_f_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_lt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_lt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_lt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe2,0x7c]
+
+v_cmpsx_lt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe3,0x7c]
+
+v_cmpsx_lt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_lt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_lt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_lt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_eq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_eq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_eq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe4,0x7c]
+
+v_cmpsx_eq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe5,0x7c]
+
+v_cmpsx_eq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_eq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_eq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_eq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_le_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_le_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_le_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe6,0x7c]
+
+v_cmpsx_le_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe7,0x7c]
+
+v_cmpsx_le_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_le_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_le_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_le_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_gt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_gt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_gt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe8,0x7c]
+
+v_cmpsx_gt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe9,0x7c]
+
+v_cmpsx_gt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_gt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_gt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_gt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_lg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_lg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_lg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xea,0x7c]
+
+v_cmpsx_lg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xeb,0x7c]
+
+v_cmpsx_lg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_lg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_lg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_lg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_ge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_ge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_ge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xec,0x7c]
+
+v_cmpsx_ge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xed,0x7c]
+
+v_cmpsx_ge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_ge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_ge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_ge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_o_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_o_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_o_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xee,0x7c]
+
+v_cmpsx_o_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xef,0x7c]
+
+v_cmpsx_o_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_o_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_o_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_o_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_u_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_u_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_u_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf0,0x7c]
+
+v_cmpsx_u_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf1,0x7c]
+
+v_cmpsx_u_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_u_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_u_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_u_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_nge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf2,0x7c]
+
+v_cmpsx_nge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf3,0x7c]
+
+v_cmpsx_nge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_nge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_nge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_nge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_nlg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nlg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nlg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf4,0x7c]
+
+v_cmpsx_nlg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf5,0x7c]
+
+v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_nlg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_nlg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_nlg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_ngt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_ngt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_ngt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf6,0x7c]
+
+v_cmpsx_ngt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf7,0x7c]
+
+v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_ngt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_ngt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_ngt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_nle_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nle_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nle_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf8,0x7c]
+
+v_cmpsx_nle_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf9,0x7c]
+
+v_cmpsx_nle_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_nle_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_nle_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_nle_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_neq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfa,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_neq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfa,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_neq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfa,0x7c]
+
+v_cmpsx_neq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xfb,0x7c]
+
+v_cmpsx_neq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_neq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_neq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_neq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_nlt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_nlt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_nlt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfc,0x7c]
+
+v_cmpsx_nlt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xfd,0x7c]
+
+v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_nlt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_nlt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_nlt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpsx_tru_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfe,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpsx_tru_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfe,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpsx_tru_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfe,0x7c]
+
+v_cmpsx_tru_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xff,0x7c]
+
+v_cmpsx_tru_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpsx_tru_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpsx_tru_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpsx_tru_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_f_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x00,0x7d]
+
+v_cmp_f_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x00,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x00,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x00,0x7d]
+
+v_cmp_f_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x00,0x7d]
+
+v_cmp_f_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x01,0x7d]
+
+v_cmp_f_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x00,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x00,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x00,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x00,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x00,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x00,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x00,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x02,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x02,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x02,0x7d]
+
+v_cmp_lt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x03,0x7d]
+
+v_cmp_lt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x02,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x02,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x02,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x02,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x02,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x02,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x04,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x04,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x04,0x7d]
+
+v_cmp_eq_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x05,0x7d]
+
+v_cmp_eq_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x04,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x04,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x04,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x04,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x04,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x04,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x06,0x7d]
+
+v_cmp_le_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x06,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x06,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x06,0x7d]
+
+v_cmp_le_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x06,0x7d]
+
+v_cmp_le_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x07,0x7d]
+
+v_cmp_le_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x06,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x06,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x06,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x06,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x06,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x06,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x08,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x08,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x08,0x7d]
+
+v_cmp_gt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x09,0x7d]
+
+v_cmp_gt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x08,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x08,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x08,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x08,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x08,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x08,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_ne_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x7d]
+
+v_cmp_ne_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x7d]
+
+v_cmp_ne_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x0a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x0a,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x0c,0x7d]
+
+v_cmp_ge_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0d,0x7d]
+
+v_cmp_ge_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x0c,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_t_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x0e,0x7d]
+
+v_cmp_t_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0f,0x7d]
+
+v_cmp_t_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x0e,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x20,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x20,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x20,0x7d]
+
+v_cmpx_f_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x21,0x7d]
+
+v_cmpx_f_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x20,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x20,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x20,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x20,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x20,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x20,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x22,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x22,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x22,0x7d]
+
+v_cmpx_lt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x23,0x7d]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x22,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x22,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x22,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x22,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x22,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x22,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x24,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x24,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x24,0x7d]
+
+v_cmpx_eq_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x25,0x7d]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x24,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x24,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x24,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x24,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x24,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x24,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x24,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x26,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x26,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x26,0x7d]
+
+v_cmpx_le_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x27,0x7d]
+
+v_cmpx_le_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x26,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x26,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x26,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x26,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x26,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x26,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x28,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x28,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x28,0x7d]
+
+v_cmpx_gt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x29,0x7d]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x28,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x28,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x28,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x28,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x28,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x28,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ne_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x2a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x2a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2a,0x7d]
+
+v_cmpx_ne_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2b,0x7d]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2a,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x2c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x2c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2c,0x7d]
+
+v_cmpx_ge_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2d,0x7d]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2c,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_t_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x2e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x2e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2e,0x7d]
+
+v_cmpx_t_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2f,0x7d]
+
+v_cmpx_t_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2e,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x40,0x7d]
+
+v_cmp_f_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x40,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x40,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x40,0x7d]
+
+v_cmp_f_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x40,0x7d]
+
+v_cmp_f_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x41,0x7d]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x40,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x40,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x40,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_lt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x42,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x42,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x42,0x7d]
+
+v_cmp_lt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x43,0x7d]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x42,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x42,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x42,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_eq_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x44,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x44,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x44,0x7d]
+
+v_cmp_eq_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x45,0x7d]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x44,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x44,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x44,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_le_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x46,0x7d]
+
+v_cmp_le_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x46,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x46,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x46,0x7d]
+
+v_cmp_le_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x46,0x7d]
+
+v_cmp_le_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x47,0x7d]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x46,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x46,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x46,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_gt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x48,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x48,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x48,0x7d]
+
+v_cmp_gt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x49,0x7d]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x48,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x48,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x48,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_ne_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x4a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x4a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x4a,0x7d]
+
+v_cmp_ne_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x4b,0x7d]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x4a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x4a,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_ge_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x4c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x4c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x4c,0x7d]
+
+v_cmp_ge_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x4d,0x7d]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x4c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x4c,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_t_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x4e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x4e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x4e,0x7d]
+
+v_cmp_t_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x4f,0x7d]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x4e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x4e,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_f_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x60,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x60,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x60,0x7d]
+
+v_cmpx_f_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x61,0x7d]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x60,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x60,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x62,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x62,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x62,0x7d]
+
+v_cmpx_lt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x63,0x7d]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x62,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x62,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_eq_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x64,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x64,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x64,0x7d]
+
+v_cmpx_eq_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x65,0x7d]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x64,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x64,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_le_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x66,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x66,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x66,0x7d]
+
+v_cmpx_le_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x67,0x7d]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x66,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x66,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_gt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x68,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x68,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x68,0x7d]
+
+v_cmpx_gt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x69,0x7d]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x68,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x68,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ne_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x6a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x6a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x6a,0x7d]
+
+v_cmpx_ne_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x6b,0x7d]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6a,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6a,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ge_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x6c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x6c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x6c,0x7d]
+
+v_cmpx_ge_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x6d,0x7d]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6c,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6c,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_t_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0x6e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0x6e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0x6e,0x7d]
+
+v_cmpx_t_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0x6f,0x7d]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6e,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6e,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_f_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x80,0x7d]
+
+v_cmp_f_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x80,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x80,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x80,0x7d]
+
+v_cmp_f_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x80,0x7d]
+
+v_cmp_f_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x81,0x7d]
+
+v_cmp_f_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x80,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x80,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x80,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x80,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x80,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x80,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x80,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x82,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x82,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x82,0x7d]
+
+v_cmp_lt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x83,0x7d]
+
+v_cmp_lt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x82,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x82,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x82,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x82,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x82,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x82,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x82,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x84,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x84,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x84,0x7d]
+
+v_cmp_eq_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x85,0x7d]
+
+v_cmp_eq_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x84,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x84,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x84,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x84,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x84,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x84,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x84,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x86,0x7d]
+
+v_cmp_le_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x86,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x86,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x86,0x7d]
+
+v_cmp_le_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x86,0x7d]
+
+v_cmp_le_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x87,0x7d]
+
+v_cmp_le_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x86,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x86,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x86,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x86,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x86,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x86,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x86,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x88,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x88,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x88,0x7d]
+
+v_cmp_gt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x89,0x7d]
+
+v_cmp_gt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x88,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x88,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x88,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x88,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x88,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x88,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x88,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_ne_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8a,0x7d]
+
+v_cmp_ne_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8b,0x7d]
+
+v_cmp_ne_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x8a,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x8a,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8c,0x7d]
+
+v_cmp_ge_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8d,0x7d]
+
+v_cmp_ge_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x8c,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x8c,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_t_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8e,0x7d]
+
+v_cmp_t_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8f,0x7d]
+
+v_cmp_t_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x8e,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x8e,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa0,0x7d]
+
+v_cmpx_f_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa1,0x7d]
+
+v_cmpx_f_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa0,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa0,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa2,0x7d]
+
+v_cmpx_lt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa3,0x7d]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa2,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa2,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa4,0x7d]
+
+v_cmpx_eq_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa5,0x7d]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa4,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa4,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa6,0x7d]
+
+v_cmpx_le_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa7,0x7d]
+
+v_cmpx_le_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa6,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa6,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa8,0x7d]
+
+v_cmpx_gt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa9,0x7d]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa8,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa8,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ne_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xaa,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xaa,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xaa,0x7d]
+
+v_cmpx_ne_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xab,0x7d]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xaa,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xaa,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xac,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xac,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xac,0x7d]
+
+v_cmpx_ge_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xad,0x7d]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xac,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xac,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xac,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xac,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xac,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xac,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xac,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmpx_t_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, s103, v2
+// CHECK: [0x67,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xae,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xae,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xae,0x7d]
+
+v_cmpx_t_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xaf,0x7d]
+
+v_cmpx_t_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[102:103], 0, s2
+// CHECK: [0x66,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x68,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xae,0xd1,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xae,0xd1,0xc1,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xae,0xd1,0xf0,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xae,0xd1,0xf7,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xae,0xd1,0x01,0x05,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xae,0xd1,0xff,0x05,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, s103
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xce,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xd0,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xd2,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xd4,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xd6,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xd8,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xda,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xdc,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xde,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xf6,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xf8,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xfc,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xfe,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0x00,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0x82,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xe0,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xee,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0x04,0x02,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xae,0xd1,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc0,0x7d]
+
+v_cmp_f_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc1,0x7d]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc0,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_lt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc2,0x7d]
+
+v_cmp_lt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc3,0x7d]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc2,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_eq_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc4,0x7d]
+
+v_cmp_eq_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc5,0x7d]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc4,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_le_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc6,0x7d]
+
+v_cmp_le_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc7,0x7d]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc6,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_gt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc8,0x7d]
+
+v_cmp_gt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc9,0x7d]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xc8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xc8,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_ne_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xca,0x7d]
+
+v_cmp_ne_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcb,0x7d]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xca,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xca,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xca,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_ge_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xcc,0x7d]
+
+v_cmp_ge_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcd,0x7d]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xcc,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xcc,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_t_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xce,0x7d]
+
+v_cmp_t_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xce,0x7d]
+
+v_cmp_t_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xce,0x7d]
+
+v_cmp_t_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcf,0x7d]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xce,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xce,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xce,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_f_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe0,0x7d]
+
+v_cmpx_f_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe1,0x7d]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe0,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe0,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe2,0x7d]
+
+v_cmpx_lt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe3,0x7d]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe2,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe2,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_eq_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe4,0x7d]
+
+v_cmpx_eq_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe5,0x7d]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe4,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe4,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_le_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe6,0x7d]
+
+v_cmpx_le_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe7,0x7d]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe6,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe6,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_gt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe8,0x7d]
+
+v_cmpx_gt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe9,0x7d]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe8,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe8,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ne_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xea,0x7d]
+
+v_cmpx_ne_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xeb,0x7d]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xea,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xea,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ge_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xec,0x7d]
+
+v_cmpx_ge_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xed,0x7d]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xec,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xec,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmpx_t_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, s[102:103], v[2:3]
+// CHECK: [0x66,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x68,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xee,0x7d]
+
+v_cmpx_t_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xef,0x7d]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[102:103], s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x68,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xee,0xd1,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0x80,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0xc1,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0xf0,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0xf7,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0x01,0x09,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd1,0xfe,0x09,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0x00,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0x82,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0xe0,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0xee,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0x04,0x02,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xee,0xd1,0x04,0xfc,0x03,0x00]
+
+v_cmp_class_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x10,0x7d]
+
+v_cmp_class_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x10,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_class_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x10,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_class_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x10,0x7d]
+
+v_cmp_class_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x10,0x7d]
+
+v_cmp_class_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x11,0x7d]
+
+v_cmpx_class_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, s103, v2
+// CHECK: [0x67,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x68,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x69,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x30,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x30,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_class_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x30,0x7d]
+
+v_cmpx_class_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x31,0x7d]
+
+v_cmp_class_f64 vcc, s[2:3], v2
+// CHECK: [0x02,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, s[4:5], v2
+// CHECK: [0x04,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, s[102:103], v2
+// CHECK: [0x66,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, flat_scratch, v2
+// CHECK: [0x68,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, vcc, v2
+// CHECK: [0x6a,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, tba, v2
+// CHECK: [0x6c,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, tma, v2
+// CHECK: [0x6e,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, ttmp[10:11], v2
+// CHECK: [0x7a,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, exec, v2
+// CHECK: [0x7e,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, 0, v2
+// CHECK: [0x80,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x50,0x7d]
+
+v_cmp_class_f64 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x50,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_class_f64 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x50,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_class_f64 vcc, v[1:2], v2
+// CHECK: [0x01,0x05,0x50,0x7d]
+
+v_cmp_class_f64 vcc, v[254:255], v2
+// CHECK: [0xfe,0x05,0x50,0x7d]
+
+v_cmp_class_f64 vcc, s[2:3], v255
+// CHECK: [0x02,0xfe,0x51,0x7d]
+
+v_cmpx_class_f64 vcc, s[2:3], v2
+// CHECK: [0x02,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, s[4:5], v2
+// CHECK: [0x04,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, s[102:103], v2
+// CHECK: [0x66,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, flat_scratch, v2
+// CHECK: [0x68,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, vcc, v2
+// CHECK: [0x6a,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, tba, v2
+// CHECK: [0x6c,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, tma, v2
+// CHECK: [0x6e,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, ttmp[10:11], v2
+// CHECK: [0x7a,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, exec, v2
+// CHECK: [0x7e,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, 0, v2
+// CHECK: [0x80,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x70,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f64 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x70,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_class_f64 vcc, v[1:2], v2
+// CHECK: [0x01,0x05,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, v[254:255], v2
+// CHECK: [0xfe,0x05,0x70,0x7d]
+
+v_cmpx_class_f64 vcc, s[2:3], v255
+// CHECK: [0x02,0xfe,0x71,0x7d]
+
diff --git a/test/MC/AMDGPU/gfx8_asm_all.s b/test/MC/AMDGPU/gfx8_asm_all.s
new file mode 100644
index 000000000000..0a0d42c208f9
--- /dev/null
+++ b/test/MC/AMDGPU/gfx8_asm_all.s
@@ -0,0 +1,98847 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s
+
+// *** GENERATED BY TESTGEN, DO NOT EDIT! ***
+
+ds_add_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd8,0xff,0x02,0x00,0x00]
+
+ds_add_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd8,0x01,0xff,0x00,0x00]
+
+ds_add_u32 v1, v2
+// CHECK: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x01,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x02,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x02,0xd8,0xff,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x02,0xd8,0x01,0xff,0x00,0x00]
+
+ds_sub_u32 v1, v2
+// CHECK: [0x00,0x00,0x02,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x02,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x02,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x03,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd8,0xff,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd8,0x01,0xff,0x00,0x00]
+
+ds_rsub_u32 v1, v2
+// CHECK: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x05,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x06,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x06,0xd8,0xff,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x06,0xd8,0x01,0xff,0x00,0x00]
+
+ds_inc_u32 v1, v2
+// CHECK: [0x00,0x00,0x06,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x06,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x06,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x07,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd8,0xff,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd8,0x01,0xff,0x00,0x00]
+
+ds_dec_u32 v1, v2
+// CHECK: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x09,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x0a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x0a,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_i32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x0a,0xd8,0x01,0xff,0x00,0x00]
+
+ds_min_i32 v1, v2
+// CHECK: [0x00,0x00,0x0a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x0a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x0a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x0b,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_i32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_max_i32 v1, v2
+// CHECK: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x0d,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x0e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x0e,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x0e,0xd8,0x01,0xff,0x00,0x00]
+
+ds_min_u32 v1, v2
+// CHECK: [0x00,0x00,0x0e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x0e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x0e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x0f,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_u32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd8,0x01,0xff,0x00,0x00]
+
+ds_max_u32 v1, v2
+// CHECK: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x11,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x12,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x12,0xd8,0xff,0x02,0x00,0x00]
+
+ds_and_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x12,0xd8,0x01,0xff,0x00,0x00]
+
+ds_and_b32 v1, v2
+// CHECK: [0x00,0x00,0x12,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x12,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x12,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x13,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd8,0xff,0x02,0x00,0x00]
+
+ds_or_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd8,0x01,0xff,0x00,0x00]
+
+ds_or_b32 v1, v2
+// CHECK: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x15,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x16,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x16,0xd8,0xff,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x16,0xd8,0x01,0xff,0x00,0x00]
+
+ds_xor_b32 v1, v2
+// CHECK: [0x00,0x00,0x16,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x16,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x16,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x17,0xd8,0x01,0x02,0x00,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0xff,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0x01,0xff,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x18,0xd8,0x01,0x02,0xff,0x00]
+
+ds_mskor_b32 v1, v2, v3
+// CHECK: [0x00,0x00,0x18,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x18,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x18,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x19,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write_b32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x1a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x1a,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x1a,0xd8,0x01,0xff,0x00,0x00]
+
+ds_write_b32 v1, v2
+// CHECK: [0x00,0x00,0x1a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x1a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x1a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x1b,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v255, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1c,0xd8,0xff,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v255, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1c,0xd8,0x01,0xff,0x03,0x00]
+
+ds_write2_b32 v1, v2, v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1c,0xd8,0x01,0x02,0xff,0x00]
+
+ds_write2_b32 v1, v2, v3 offset1:255
+// CHECK: [0x00,0xff,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127
+// CHECK: [0x7f,0x00,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x1c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x1d,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v255, v2, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1e,0xd8,0xff,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v255, v3 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1e,0xd8,0x01,0xff,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x1e,0xd8,0x01,0x02,0xff,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset1:255
+// CHECK: [0x00,0xff,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127
+// CHECK: [0x7f,0x00,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x1e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x1f,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0xff,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0x01,0xff,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x20,0xd8,0x01,0x02,0xff,0x00]
+
+ds_cmpst_b32 v1, v2, v3
+// CHECK: [0x00,0x00,0x20,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x20,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x20,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b32 v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x21,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x22,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x22,0xd8,0xff,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x22,0xd8,0x01,0xff,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x22,0xd8,0x01,0x02,0xff,0x00]
+
+ds_cmpst_f32 v1, v2, v3
+// CHECK: [0x00,0x00,0x22,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x22,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x22,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f32 v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x23,0xd8,0x01,0x02,0x03,0x00]
+
+ds_min_f32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_f32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd8,0x01,0xff,0x00,0x00]
+
+ds_min_f32 v1, v2
+// CHECK: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x25,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x26,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x26,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_f32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x26,0xd8,0x01,0xff,0x00,0x00]
+
+ds_max_f32 v1, v2
+// CHECK: [0x00,0x00,0x26,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x26,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x26,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x27,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x2a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x2a,0xd8,0xff,0x02,0x00,0x00]
+
+ds_add_f32 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x2a,0xd8,0x01,0xff,0x00,0x00]
+
+ds_add_f32 v1, v2
+// CHECK: [0x00,0x00,0x2a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x2a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x2a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x2b,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x3c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x3c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b8 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x3c,0xd8,0x01,0xff,0x00,0x00]
+
+ds_write_b8 v1, v2
+// CHECK: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x3c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x3d,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x3e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x3e,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b16 v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x3e,0xd8,0x01,0xff,0x00,0x00]
+
+ds_write_b16 v1, v2
+// CHECK: [0x00,0x00,0x3e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:0
+// CHECK: [0x00,0x00,0x3e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:4
+// CHECK: [0x04,0x00,0x3e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x3f,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0x01,0x02,0x00,0xff]
+
+ds_add_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0xff,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x40,0xd8,0x01,0xff,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x40,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x40,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x40,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x41,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x42,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x42,0xd8,0x01,0x02,0x00,0xff]
+
+ds_sub_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x42,0xd8,0xff,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x42,0xd8,0x01,0xff,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x42,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x42,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x42,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x43,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0x01,0x02,0x00,0xff]
+
+ds_rsub_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0xff,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x44,0xd8,0x01,0xff,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x44,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x44,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x44,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x45,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x46,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x46,0xd8,0x01,0x02,0x00,0xff]
+
+ds_inc_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x46,0xd8,0xff,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x46,0xd8,0x01,0xff,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x46,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x46,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x46,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x47,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0xff]
+
+ds_dec_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0xff,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x48,0xd8,0x01,0xff,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x48,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x49,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4a,0xd8,0x01,0x02,0x00,0xff]
+
+ds_min_rtn_i32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x4a,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x4a,0xd8,0x01,0xff,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2
+// CHECK: [0x00,0x00,0x4a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x4a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x4a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x4b,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0xff]
+
+ds_max_rtn_i32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x4c,0xd8,0x01,0xff,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2
+// CHECK: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x4c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x4d,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x4e,0xd8,0x01,0x02,0x00,0xff]
+
+ds_min_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x4e,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x4e,0xd8,0x01,0xff,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x4e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x4e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x4e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x4f,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x50,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x50,0xd8,0x01,0x02,0x00,0xff]
+
+ds_max_rtn_u32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x50,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x50,0xd8,0x01,0xff,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2
+// CHECK: [0x00,0x00,0x50,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x50,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x50,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x51,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x52,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x52,0xd8,0x01,0x02,0x00,0xff]
+
+ds_and_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x52,0xd8,0xff,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x52,0xd8,0x01,0xff,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0x52,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x52,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x52,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x53,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0xff]
+
+ds_or_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x54,0xd8,0xff,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x54,0xd8,0x01,0xff,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x54,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x55,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x56,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x56,0xd8,0x01,0x02,0x00,0xff]
+
+ds_xor_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x56,0xd8,0xff,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x56,0xd8,0x01,0xff,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0x56,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x56,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x56,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x57,0xd8,0x01,0x02,0x00,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x58,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x58,0xd8,0x01,0x02,0x03,0xff]
+
+ds_mskor_rtn_b32 v5, v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x58,0xd8,0xff,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x58,0xd8,0x01,0xff,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x58,0xd8,0x01,0x02,0xff,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3
+// CHECK: [0x00,0x00,0x58,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x58,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x58,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x59,0xd8,0x01,0x02,0x03,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x5a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x5a,0xd8,0x01,0x02,0x00,0xff]
+
+ds_wrxchg_rtn_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x5a,0xd8,0xff,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x5a,0xd8,0x01,0xff,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0x5a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x5a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x5a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x5b,0xd8,0x01,0x02,0x00,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x60,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x60,0xd8,0x01,0x02,0x03,0xff]
+
+ds_cmpst_rtn_b32 v5, v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x60,0xd8,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x60,0xd8,0x01,0xff,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x60,0xd8,0x01,0x02,0xff,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3
+// CHECK: [0x00,0x00,0x60,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x60,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x60,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b32 v5, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x61,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x62,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v255, v1, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x62,0xd8,0x01,0x02,0x03,0xff]
+
+ds_cmpst_rtn_f32 v5, v255, v2, v3 offset:65535
+// CHECK: [0xff,0xff,0x62,0xd8,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v255, v3 offset:65535
+// CHECK: [0xff,0xff,0x62,0xd8,0x01,0xff,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x62,0xd8,0x01,0x02,0xff,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3
+// CHECK: [0x00,0x00,0x62,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:0
+// CHECK: [0x00,0x00,0x62,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:4
+// CHECK: [0x04,0x00,0x62,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f32 v5, v1, v2, v3 offset:65535 gds
+// CHECK: [0xff,0xff,0x63,0xd8,0x01,0x02,0x03,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x64,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x64,0xd8,0x01,0x02,0x00,0xff]
+
+ds_min_rtn_f32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x64,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x64,0xd8,0x01,0xff,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2
+// CHECK: [0x00,0x00,0x64,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x64,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x64,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x65,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x66,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x66,0xd8,0x01,0x02,0x00,0xff]
+
+ds_max_rtn_f32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x66,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x66,0xd8,0x01,0xff,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2
+// CHECK: [0x00,0x00,0x66,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x66,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x66,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x67,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x6a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x6a,0xd8,0x01,0x02,0x00,0xff]
+
+ds_add_rtn_f32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x6a,0xd8,0xff,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x6a,0xd8,0x01,0xff,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2
+// CHECK: [0x00,0x00,0x6a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x6a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x6a,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:65535 gds
+// CHECK: [0xff,0xff,0x6b,0xd8,0x01,0x02,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0x6c,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0x6c,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_b32 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0x6c,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1
+// CHECK: [0x00,0x00,0x6c,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:0
+// CHECK: [0x00,0x00,0x6c,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:4
+// CHECK: [0x04,0x00,0x6c,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x6d,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[254:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x6e,0xd8,0x01,0x00,0x00,0xfe]
+
+ds_read2_b32 v[5:6], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x6e,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset1:255
+// CHECK: [0x00,0xff,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127
+// CHECK: [0x7f,0x00,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x6e,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b32 v[5:6], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x6f,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[254:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x70,0xd8,0x01,0x00,0x00,0xfe]
+
+ds_read2st64_b32 v[5:6], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x70,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset1:255
+// CHECK: [0x00,0xff,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127
+// CHECK: [0x7f,0x00,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x70,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b32 v[5:6], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x71,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0x72,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0x72,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_i8 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0x72,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1
+// CHECK: [0x00,0x00,0x72,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:0
+// CHECK: [0x00,0x00,0x72,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:4
+// CHECK: [0x04,0x00,0x72,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x73,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0x74,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0x74,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_u8 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0x74,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1
+// CHECK: [0x00,0x00,0x74,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:0
+// CHECK: [0x00,0x00,0x74,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:4
+// CHECK: [0x04,0x00,0x74,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x75,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0x76,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0x76,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_i16 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0x76,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1
+// CHECK: [0x00,0x00,0x76,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:0
+// CHECK: [0x00,0x00,0x76,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:4
+// CHECK: [0x04,0x00,0x76,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x77,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0x78,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0x78,0xd8,0x01,0x00,0x00,0xff]
+
+ds_read_u16 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0x78,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1
+// CHECK: [0x00,0x00,0x78,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:0
+// CHECK: [0x00,0x00,0x78,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:4
+// CHECK: [0x04,0x00,0x78,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x79,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:65535
+// CHECK: [0xff,0xff,0x7a,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v255, v1 offset:65535
+// CHECK: [0xff,0xff,0x7a,0xd8,0x01,0x00,0x00,0xff]
+
+ds_swizzle_b32 v5, v255 offset:65535
+// CHECK: [0xff,0xff,0x7a,0xd8,0xff,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1
+// CHECK: [0x00,0x00,0x7a,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:0
+// CHECK: [0x00,0x00,0x7a,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:4
+// CHECK: [0x04,0x00,0x7a,0xd8,0x01,0x00,0x00,0x05]
+
+ds_swizzle_b32 v5, v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x7b,0xd8,0x01,0x00,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0xff]
+
+ds_permute_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0xff,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd8,0x01,0xff,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x7c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x7e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v255, v1, v2 offset:65535
+// CHECK: [0xff,0xff,0x7e,0xd8,0x01,0x02,0x00,0xff]
+
+ds_bpermute_b32 v5, v255, v2 offset:65535
+// CHECK: [0xff,0xff,0x7e,0xd8,0xff,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v255 offset:65535
+// CHECK: [0xff,0xff,0x7e,0xd8,0x01,0xff,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2
+// CHECK: [0x00,0x00,0x7e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:0
+// CHECK: [0x00,0x00,0x7e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:4
+// CHECK: [0x04,0x00,0x7e,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0xff,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x80,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_add_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x80,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x81,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x82,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x82,0xd8,0xff,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x82,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x82,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x82,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x82,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x83,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0xff,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x84,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x84,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x85,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x86,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x86,0xd8,0xff,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x86,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x86,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x86,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x86,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x87,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0xff,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x88,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x88,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x89,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8a,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x8a,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_min_i64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x8a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x8a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x8a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x8b,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_max_i64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x8c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x8d,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x8e,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x8e,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_min_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x8e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x8e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x8e,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x8f,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x90,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_max_u64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x90,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x91,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x92,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x92,0xd8,0xff,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x92,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_and_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x92,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x92,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x92,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x93,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0xff,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x94,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_or_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x94,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x95,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x96,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x96,0xd8,0xff,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x96,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x96,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x96,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x96,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x97,0xd8,0x01,0x02,0x00,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0xff,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0x01,0xfe,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x98,0xd8,0x01,0x02,0xfe,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0x98,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0x98,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0x98,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0x99,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write_b64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x9a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0x9a,0xd8,0xff,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0x9a,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_write_b64 v1, v[2:3]
+// CHECK: [0x00,0x00,0x9a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0x9a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0x9a,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0x9b,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v255, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9c,0xd8,0xff,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[254:255], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9c,0xd8,0x01,0xfe,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[254:255] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9c,0xd8,0x01,0x02,0xfe,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset1:255
+// CHECK: [0x00,0xff,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127
+// CHECK: [0x7f,0x00,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x9c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x9d,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v255, v[2:3], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9e,0xd8,0xff,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[254:255], v[3:4] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9e,0xd8,0x01,0xfe,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[254:255] offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0x9e,0xd8,0x01,0x02,0xfe,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset1:255
+// CHECK: [0x00,0xff,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:0 offset1:255
+// CHECK: [0x00,0xff,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:16 offset1:255
+// CHECK: [0x10,0xff,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127
+// CHECK: [0x7f,0x00,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0x9e,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[3:4] offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0x9f,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0xff,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0x01,0xfe,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa0,0xd8,0x01,0x02,0xfe,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xa0,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_b64 v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xa1,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xa2,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xa2,0xd8,0xff,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xa2,0xd8,0x01,0xfe,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa2,0xd8,0x01,0x02,0xfe,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xa2,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xa2,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xa2,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpst_f64 v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xa3,0xd8,0x01,0x02,0x03,0x00]
+
+ds_min_f64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0xff,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_min_f64 v1, v[2:3]
+// CHECK: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xa4,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_f64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xa5,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa6,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xa6,0xd8,0xff,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xa6,0xd8,0x01,0xfe,0x00,0x00]
+
+ds_max_f64 v1, v[2:3]
+// CHECK: [0x00,0x00,0xa6,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xa6,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xa6,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_f64 v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xa7,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_add_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0xff,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc0,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xc0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xc1,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc2,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_sub_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc2,0xd8,0xff,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc2,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xc2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xc2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xc2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xc3,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_rsub_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0xff,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc4,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xc4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xc4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xc4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xc5,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc6,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_inc_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc6,0xd8,0xff,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc6,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xc6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xc6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xc6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xc7,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_dec_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0xff,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xc8,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xc9,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xca,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xca,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_min_rtn_i64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xca,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xca,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xca,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xca,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xca,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xcb,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_max_rtn_i64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xcc,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xcd,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xce,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xce,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_min_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xce,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xce,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xce,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xce,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xce,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xcf,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd0,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_max_rtn_u64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd0,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xd0,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xd0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xd0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xd0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xd1,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd2,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_and_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd2,0xd8,0xff,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xd2,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xd2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xd2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xd2,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xd3,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_or_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0xff,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xd4,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xd4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xd4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xd4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xd5,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd6,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_xor_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xd6,0xd8,0xff,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xd6,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xd6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xd6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xd6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xd7,0xd8,0x01,0x02,0x00,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[254:255], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0x01,0x02,0x03,0xfe]
+
+ds_mskor_rtn_b64 v[5:6], v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0xff,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0x01,0xfe,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xd8,0xd8,0x01,0x02,0xfe,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xd8,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xd8,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xd8,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xd9,0xd8,0x01,0x02,0x03,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xda,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xda,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_wrxchg_rtn_b64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xda,0xd8,0xff,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xda,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xda,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xda,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xda,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xdb,0xd8,0x01,0x02,0x00,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[254:255], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe0,0xd8,0x01,0x02,0x03,0xfe]
+
+ds_cmpst_rtn_b64 v[5:6], v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe0,0xd8,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe0,0xd8,0x01,0xfe,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xe0,0xd8,0x01,0x02,0xfe,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xe0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xe0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xe0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_b64 v[5:6], v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xe1,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe2,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[254:255], v1, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe2,0xd8,0x01,0x02,0x03,0xfe]
+
+ds_cmpst_rtn_f64 v[5:6], v255, v[2:3], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe2,0xd8,0xff,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[254:255], v[3:4] offset:65535
+// CHECK: [0xff,0xff,0xe2,0xd8,0x01,0xfe,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xe2,0xd8,0x01,0x02,0xfe,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4]
+// CHECK: [0x00,0x00,0xe2,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:0
+// CHECK: [0x00,0x00,0xe2,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:4
+// CHECK: [0x04,0x00,0xe2,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpst_rtn_f64 v[5:6], v1, v[2:3], v[3:4] offset:65535 gds
+// CHECK: [0xff,0xff,0xe3,0xd8,0x01,0x02,0x03,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_min_rtn_f64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0xff,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xe4,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xe4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xe4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xe4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_f64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xe5,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xe6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[254:255], v1, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xe6,0xd8,0x01,0x02,0x00,0xfe]
+
+ds_max_rtn_f64 v[5:6], v255, v[2:3] offset:65535
+// CHECK: [0xff,0xff,0xe6,0xd8,0xff,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[254:255] offset:65535
+// CHECK: [0xff,0xff,0xe6,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3]
+// CHECK: [0x00,0x00,0xe6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:0
+// CHECK: [0x00,0x00,0xe6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:4
+// CHECK: [0x04,0x00,0xe6,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_f64 v[5:6], v1, v[2:3] offset:65535 gds
+// CHECK: [0xff,0xff,0xe7,0xd8,0x01,0x02,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:65535
+// CHECK: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[254:255], v1 offset:65535
+// CHECK: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0xfe]
+
+ds_read_b64 v[5:6], v255 offset:65535
+// CHECK: [0xff,0xff,0xec,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1
+// CHECK: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:0
+// CHECK: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:4
+// CHECK: [0x04,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b64 v[5:6], v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xed,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[252:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xee,0xd8,0x01,0x00,0x00,0xfc]
+
+ds_read2_b64 v[5:8], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xee,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset1:255
+// CHECK: [0x00,0xff,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127
+// CHECK: [0x7f,0x00,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0xee,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2_b64 v[5:8], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0xef,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[252:255], v1 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xf0,0xd8,0x01,0x00,0x00,0xfc]
+
+ds_read2st64_b64 v[5:8], v255 offset0:127 offset1:255
+// CHECK: [0x7f,0xff,0xf0,0xd8,0xff,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset1:255
+// CHECK: [0x00,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:0 offset1:255
+// CHECK: [0x00,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:16 offset1:255
+// CHECK: [0x10,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127
+// CHECK: [0x7f,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:0
+// CHECK: [0x7f,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:1
+// CHECK: [0x7f,0x01,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read2st64_b64 v[5:8], v1 offset0:127 offset1:255 gds
+// CHECK: [0x7f,0xff,0xf1,0xd8,0x01,0x00,0x00,0x05]
+
+ds_add_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x00,0xd9,0xff,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1
+// CHECK: [0x00,0x00,0x00,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x00,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x00,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x01,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x02,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x02,0xd9,0xff,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1
+// CHECK: [0x00,0x00,0x02,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x02,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x02,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x03,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x04,0xd9,0xff,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1
+// CHECK: [0x00,0x00,0x04,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x04,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x04,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x05,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x06,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x06,0xd9,0xff,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1
+// CHECK: [0x00,0x00,0x06,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x06,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x06,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x07,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x08,0xd9,0xff,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1
+// CHECK: [0x00,0x00,0x08,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x08,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x08,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x09,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:65535
+// CHECK: [0xff,0xff,0x0a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v255 offset:65535
+// CHECK: [0xff,0xff,0x0a,0xd9,0xff,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1
+// CHECK: [0x00,0x00,0x0a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:0
+// CHECK: [0x00,0x00,0x0a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:4
+// CHECK: [0x04,0x00,0x0a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0b,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v255 offset:65535
+// CHECK: [0xff,0xff,0x0c,0xd9,0xff,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1
+// CHECK: [0x00,0x00,0x0c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:0
+// CHECK: [0x00,0x00,0x0c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:4
+// CHECK: [0x04,0x00,0x0c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0d,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x0e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x0e,0xd9,0xff,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1
+// CHECK: [0x00,0x00,0x0e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x0e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x0e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x0f,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v255 offset:65535
+// CHECK: [0xff,0xff,0x10,0xd9,0xff,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1
+// CHECK: [0x00,0x00,0x10,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:0
+// CHECK: [0x00,0x00,0x10,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:4
+// CHECK: [0x04,0x00,0x10,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x11,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v255 offset:65535
+// CHECK: [0xff,0xff,0x14,0xd9,0xff,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1
+// CHECK: [0x00,0x00,0x14,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:0
+// CHECK: [0x00,0x00,0x14,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:4
+// CHECK: [0x04,0x00,0x14,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x15,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:65535
+// CHECK: [0xff,0xff,0x16,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v255 offset:65535
+// CHECK: [0xff,0xff,0x16,0xd9,0xff,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1
+// CHECK: [0x00,0x00,0x16,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:0
+// CHECK: [0x00,0x00,0x16,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:4
+// CHECK: [0x04,0x00,0x16,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x17,0xd9,0x01,0x00,0x00,0x00]
+
+ds_write_src2_b32 v1
+// CHECK: [0x00,0x00,0x1a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v255 offset:65535
+// CHECK: [0xff,0xff,0x24,0xd9,0xff,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1
+// CHECK: [0x00,0x00,0x24,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:0
+// CHECK: [0x00,0x00,0x24,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:4
+// CHECK: [0x04,0x00,0x24,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x25,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:65535
+// CHECK: [0xff,0xff,0x26,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v255 offset:65535
+// CHECK: [0xff,0xff,0x26,0xd9,0xff,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1
+// CHECK: [0x00,0x00,0x26,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:0
+// CHECK: [0x00,0x00,0x26,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:4
+// CHECK: [0x04,0x00,0x26,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f32 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x27,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x80,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x80,0xd9,0xff,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1
+// CHECK: [0x00,0x00,0x80,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x80,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x80,0xd9,0x01,0x00,0x00,0x00]
+
+ds_add_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x81,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x82,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x82,0xd9,0xff,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1
+// CHECK: [0x00,0x00,0x82,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x82,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x82,0xd9,0x01,0x00,0x00,0x00]
+
+ds_sub_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x83,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x84,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x84,0xd9,0xff,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1
+// CHECK: [0x00,0x00,0x84,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x84,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x84,0xd9,0x01,0x00,0x00,0x00]
+
+ds_rsub_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x85,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x86,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x86,0xd9,0xff,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1
+// CHECK: [0x00,0x00,0x86,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x86,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x86,0xd9,0x01,0x00,0x00,0x00]
+
+ds_inc_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x87,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x88,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x88,0xd9,0xff,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1
+// CHECK: [0x00,0x00,0x88,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x88,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x88,0xd9,0x01,0x00,0x00,0x00]
+
+ds_dec_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x89,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:65535
+// CHECK: [0xff,0xff,0x8a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v255 offset:65535
+// CHECK: [0xff,0xff,0x8a,0xd9,0xff,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1
+// CHECK: [0x00,0x00,0x8a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:0
+// CHECK: [0x00,0x00,0x8a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:4
+// CHECK: [0x04,0x00,0x8a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_i64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x8b,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v255 offset:65535
+// CHECK: [0xff,0xff,0x8c,0xd9,0xff,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1
+// CHECK: [0x00,0x00,0x8c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:0
+// CHECK: [0x00,0x00,0x8c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:4
+// CHECK: [0x04,0x00,0x8c,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_i64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x8d,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x8e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x8e,0xd9,0xff,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1
+// CHECK: [0x00,0x00,0x8e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x8e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x8e,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x8f,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:65535
+// CHECK: [0xff,0xff,0x90,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v255 offset:65535
+// CHECK: [0xff,0xff,0x90,0xd9,0xff,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1
+// CHECK: [0x00,0x00,0x90,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:0
+// CHECK: [0x00,0x00,0x90,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:4
+// CHECK: [0x04,0x00,0x90,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_u64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x91,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:65535
+// CHECK: [0xff,0xff,0x92,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v255 offset:65535
+// CHECK: [0xff,0xff,0x92,0xd9,0xff,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1
+// CHECK: [0x00,0x00,0x92,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:0
+// CHECK: [0x00,0x00,0x92,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:4
+// CHECK: [0x04,0x00,0x92,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x93,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:65535
+// CHECK: [0xff,0xff,0x94,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v255 offset:65535
+// CHECK: [0xff,0xff,0x94,0xd9,0xff,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1
+// CHECK: [0x00,0x00,0x94,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:0
+// CHECK: [0x00,0x00,0x94,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:4
+// CHECK: [0x04,0x00,0x94,0xd9,0x01,0x00,0x00,0x00]
+
+ds_or_src2_b64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x95,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:65535
+// CHECK: [0xff,0xff,0x96,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v255 offset:65535
+// CHECK: [0xff,0xff,0x96,0xd9,0xff,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1
+// CHECK: [0x00,0x00,0x96,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:0
+// CHECK: [0x00,0x00,0x96,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:4
+// CHECK: [0x04,0x00,0x96,0xd9,0x01,0x00,0x00,0x00]
+
+ds_xor_src2_b64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0x97,0xd9,0x01,0x00,0x00,0x00]
+
+ds_write_src2_b64 v1
+// CHECK: [0x00,0x00,0x9a,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v255 offset:65535
+// CHECK: [0xff,0xff,0xa4,0xd9,0xff,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1
+// CHECK: [0x00,0x00,0xa4,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:0
+// CHECK: [0x00,0x00,0xa4,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:4
+// CHECK: [0x04,0x00,0xa4,0xd9,0x01,0x00,0x00,0x00]
+
+ds_min_src2_f64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xa5,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:65535
+// CHECK: [0xff,0xff,0xa6,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v255 offset:65535
+// CHECK: [0xff,0xff,0xa6,0xd9,0xff,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1
+// CHECK: [0x00,0x00,0xa6,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:0
+// CHECK: [0x00,0x00,0xa6,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:4
+// CHECK: [0x04,0x00,0xa6,0xd9,0x01,0x00,0x00,0x00]
+
+ds_max_src2_f64 v1 offset:65535 gds
+// CHECK: [0xff,0xff,0xa7,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b32 v1
+// CHECK: [0x00,0x00,0x12,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b32 v1 gds
+// CHECK: [0x00,0x00,0x13,0xd9,0x01,0x00,0x00,0x00]
+
+ds_and_src2_b32 v255 offset:65535
+// CHECK: [0xff,0xff,0x12,0xd9,0xff,0x00,0x00,0x00]
+
+ds_append v5
+// CHECK: [0x00,0x00,0x7c,0xd9,0x00,0x00,0x00,0x05]
+
+ds_append v5 gds
+// CHECK: [0x00,0x00,0x7d,0xd9,0x00,0x00,0x00,0x05]
+
+ds_append v255 offset:65535
+// CHECK: [0xff,0xff,0x7c,0xd9,0x00,0x00,0x00,0xff]
+
+ds_consume v5
+// CHECK: [0x00,0x00,0x7a,0xd9,0x00,0x00,0x00,0x05]
+
+ds_consume v5 gds
+// CHECK: [0x00,0x00,0x7b,0xd9,0x00,0x00,0x00,0x05]
+
+ds_consume v255 offset:65535
+// CHECK: [0xff,0xff,0x7a,0xd9,0x00,0x00,0x00,0xff]
+
+ds_ordered_count v5, v1 gds
+// CHECK: [0x00,0x00,0x7f,0xd9,0x01,0x00,0x00,0x05]
+
+ds_ordered_count v5, v255 offset:65535 gds
+// CHECK: [0xff,0xff,0x7f,0xd9,0xff,0x00,0x00,0x05]
+
+ds_ordered_count v5, v255 gds
+// CHECK: [0x00,0x00,0x7f,0xd9,0xff,0x00,0x00,0x05]
+
+ds_gws_barrier v1 gds
+// CHECK: [0x00,0x00,0x3b,0xd9,0x00,0x01,0x00,0x00]
+
+ds_gws_barrier v255 offset:65535 gds
+// CHECK: [0xff,0xff,0x3b,0xd9,0x00,0xff,0x00,0x00]
+
+ds_gws_init v1 gds
+// CHECK: [0x00,0x00,0x33,0xd9,0x00,0x01,0x00,0x00]
+
+ds_gws_init v255 offset:65535 gds
+// CHECK: [0xff,0xff,0x33,0xd9,0x00,0xff,0x00,0x00]
+
+ds_gws_sema_br v1 gds
+// CHECK: [0x00,0x00,0x37,0xd9,0x00,0x01,0x00,0x00]
+
+ds_gws_sema_br v255 offset:65535 gds
+// CHECK: [0xff,0xff,0x37,0xd9,0x00,0xff,0x00,0x00]
+
+ds_gws_sema_p offset:65535 gds
+// CHECK: [0xff,0xff,0x39,0xd9,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_p gds
+// CHECK: [0x00,0x00,0x39,0xd9,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_release_all offset:65535 gds
+// CHECK: [0xff,0xff,0x31,0xd9,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_release_all gds
+// CHECK: [0x00,0x00,0x31,0xd9,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_v offset:65535 gds
+// CHECK: [0xff,0xff,0x35,0xd9,0x00,0x00,0x00,0x00]
+
+ds_gws_sema_v gds
+// CHECK: [0x00,0x00,0x35,0xd9,0x00,0x00,0x00,0x00]
+
+ds_wrap_rtn_b32 v5, v255, v2, v3 gds
+// CHECK: [0x00,0x00,0x69,0xd8,0xff,0x02,0x03,0x05]
+
+ds_wrap_rtn_b32 v5, v255, v2, v255 offset:65535
+// CHECK: [0xff,0xff,0x68,0xd8,0xff,0x02,0xff,0x05]
+
+ds_condxchg32_rtn_b64 v[5:6], v1, v[254:255] offset:65535 gds
+// CHECK: [0xff,0xff,0xfd,0xd8,0x01,0xfe,0x00,0x05]
+
+ds_condxchg32_rtn_b64 v[5:6], v1, v[254:255]
+// CHECK: [0x00,0x00,0xfc,0xd8,0x01,0xfe,0x00,0x05]
+
+exp mrt0, v0, v0, v0, v0
+// CHECK: [0x0f,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrtz, v0, v0, v0, v0
+// CHECK: [0x8f,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp null, v0, v0, v0, v0
+// CHECK: [0x9f,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp pos0, v0, v0, v0, v0
+// CHECK: [0xcf,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp param0, v0, v0, v0, v0
+// CHECK: [0x0f,0x02,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v255, v0, v0, v0
+// CHECK: [0x0f,0x00,0x00,0xc4,0xff,0x00,0x00,0x00]
+
+exp mrt0, v0, v255, v0, v0
+// CHECK: [0x0f,0x00,0x00,0xc4,0x00,0xff,0x00,0x00]
+
+exp mrt0, v0, v0, v255, v0
+// CHECK: [0x0f,0x00,0x00,0xc4,0x00,0x00,0xff,0x00]
+
+exp mrt0, v0, v0, v0, v255
+// CHECK: [0x0f,0x00,0x00,0xc4,0x00,0x00,0x00,0xff]
+
+exp mrt0, v0, off, off, off
+// CHECK: [0x01,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, off, off
+// CHECK: [0x02,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, off, off
+// CHECK: [0x03,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, v0, off
+// CHECK: [0x04,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, off, v0, off
+// CHECK: [0x05,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, v0, off
+// CHECK: [0x06,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, v0, off
+// CHECK: [0x07,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, off, v0
+// CHECK: [0x08,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, off, off, v0
+// CHECK: [0x09,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, off, v0
+// CHECK: [0x0a,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, off, v0
+// CHECK: [0x0b,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, v0, v0
+// CHECK: [0x0c,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, off, v0, v0
+// CHECK: [0x0d,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, v0, v0, v0
+// CHECK: [0x0e,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, off, off, off, off
+// CHECK: [0x00,0x00,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+exp mrt0, v0, v0, v0, v0 vm
+// CHECK: [0x0f,0x10,0x00,0xc4,0x00,0x00,0x00,0x00]
+
+flat_load_ubyte v5, v[1:2]
+// CHECK: [0x00,0x00,0x40,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ubyte v255, v[1:2]
+// CHECK: [0x00,0x00,0x40,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_ubyte v5, v[254:255]
+// CHECK: [0x00,0x00,0x40,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_ubyte v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x41,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ubyte v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x42,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sbyte v5, v[1:2]
+// CHECK: [0x00,0x00,0x44,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sbyte v255, v[1:2]
+// CHECK: [0x00,0x00,0x44,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_sbyte v5, v[254:255]
+// CHECK: [0x00,0x00,0x44,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_sbyte v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x45,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sbyte v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x46,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ushort v5, v[1:2]
+// CHECK: [0x00,0x00,0x48,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ushort v255, v[1:2]
+// CHECK: [0x00,0x00,0x48,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_ushort v5, v[254:255]
+// CHECK: [0x00,0x00,0x48,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_ushort v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x49,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_ushort v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x4a,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sshort v5, v[1:2]
+// CHECK: [0x00,0x00,0x4c,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sshort v255, v[1:2]
+// CHECK: [0x00,0x00,0x4c,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_sshort v5, v[254:255]
+// CHECK: [0x00,0x00,0x4c,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_sshort v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x4d,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_sshort v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x4e,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dword v5, v[1:2]
+// CHECK: [0x00,0x00,0x50,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dword v255, v[1:2]
+// CHECK: [0x00,0x00,0x50,0xdc,0x01,0x00,0x00,0xff]
+
+flat_load_dword v5, v[254:255]
+// CHECK: [0x00,0x00,0x50,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dword v5, v[1:2] glc
+// CHECK: [0x00,0x00,0x51,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dword v5, v[1:2] slc
+// CHECK: [0x00,0x00,0x52,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[5:6], v[1:2]
+// CHECK: [0x00,0x00,0x54,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[254:255], v[1:2]
+// CHECK: [0x00,0x00,0x54,0xdc,0x01,0x00,0x00,0xfe]
+
+flat_load_dwordx2 v[5:6], v[254:255]
+// CHECK: [0x00,0x00,0x54,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[5:6], v[1:2] glc
+// CHECK: [0x00,0x00,0x55,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx2 v[5:6], v[1:2] slc
+// CHECK: [0x00,0x00,0x56,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[5:7], v[1:2]
+// CHECK: [0x00,0x00,0x58,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[253:255], v[1:2]
+// CHECK: [0x00,0x00,0x58,0xdc,0x01,0x00,0x00,0xfd]
+
+flat_load_dwordx3 v[5:7], v[254:255]
+// CHECK: [0x00,0x00,0x58,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[5:7], v[1:2] glc
+// CHECK: [0x00,0x00,0x59,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx3 v[5:7], v[1:2] slc
+// CHECK: [0x00,0x00,0x5a,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[5:8], v[1:2]
+// CHECK: [0x00,0x00,0x5c,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[252:255], v[1:2]
+// CHECK: [0x00,0x00,0x5c,0xdc,0x01,0x00,0x00,0xfc]
+
+flat_load_dwordx4 v[5:8], v[254:255]
+// CHECK: [0x00,0x00,0x5c,0xdc,0xfe,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[5:8], v[1:2] glc
+// CHECK: [0x00,0x00,0x5d,0xdc,0x01,0x00,0x00,0x05]
+
+flat_load_dwordx4 v[5:8], v[1:2] slc
+// CHECK: [0x00,0x00,0x5e,0xdc,0x01,0x00,0x00,0x05]
+
+flat_store_byte v[1:2], v2
+// CHECK: [0x00,0x00,0x60,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_byte v[254:255], v2
+// CHECK: [0x00,0x00,0x60,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_byte v[1:2], v255
+// CHECK: [0x00,0x00,0x60,0xdc,0x01,0xff,0x00,0x00]
+
+flat_store_byte v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x61,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_byte v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x62,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_short v[1:2], v2
+// CHECK: [0x00,0x00,0x68,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_short v[254:255], v2
+// CHECK: [0x00,0x00,0x68,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_short v[1:2], v255
+// CHECK: [0x00,0x00,0x68,0xdc,0x01,0xff,0x00,0x00]
+
+flat_store_short v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x69,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_short v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x6a,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dword v[1:2], v2
+// CHECK: [0x00,0x00,0x70,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dword v[254:255], v2
+// CHECK: [0x00,0x00,0x70,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dword v[1:2], v255
+// CHECK: [0x00,0x00,0x70,0xdc,0x01,0xff,0x00,0x00]
+
+flat_store_dword v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x71,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dword v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x72,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x74,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x74,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x74,0xdc,0x01,0xfe,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x75,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x76,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[2:4]
+// CHECK: [0x00,0x00,0x78,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[254:255], v[2:4]
+// CHECK: [0x00,0x00,0x78,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[253:255]
+// CHECK: [0x00,0x00,0x78,0xdc,0x01,0xfd,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[2:4] glc
+// CHECK: [0x00,0x00,0x79,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx3 v[1:2], v[2:4] slc
+// CHECK: [0x00,0x00,0x7a,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[2:5]
+// CHECK: [0x00,0x00,0x7c,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[254:255], v[2:5]
+// CHECK: [0x00,0x00,0x7c,0xdc,0xfe,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[252:255]
+// CHECK: [0x00,0x00,0x7c,0xdc,0x01,0xfc,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[2:5] glc
+// CHECK: [0x00,0x00,0x7d,0xdc,0x01,0x02,0x00,0x00]
+
+flat_store_dwordx4 v[1:2], v[2:5] slc
+// CHECK: [0x00,0x00,0x7e,0xdc,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap v[1:2], v2
+// CHECK: [0x00,0x00,0x00,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap v[254:255], v2
+// CHECK: [0x00,0x00,0x00,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_swap v[1:2], v255
+// CHECK: [0x00,0x00,0x00,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_swap v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x01,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x02,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x04,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x04,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x04,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_cmpswap v0, v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x05,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x06,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add v[1:2], v2
+// CHECK: [0x00,0x00,0x08,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add v[254:255], v2
+// CHECK: [0x00,0x00,0x08,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_add v[1:2], v255
+// CHECK: [0x00,0x00,0x08,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_add v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x09,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x0a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub v[1:2], v2
+// CHECK: [0x00,0x00,0x0c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub v[254:255], v2
+// CHECK: [0x00,0x00,0x0c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_sub v[1:2], v255
+// CHECK: [0x00,0x00,0x0c,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_sub v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x0d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x0e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin v[1:2], v2
+// CHECK: [0x00,0x00,0x10,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin v[254:255], v2
+// CHECK: [0x00,0x00,0x10,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smin v[1:2], v255
+// CHECK: [0x00,0x00,0x10,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_smin v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x11,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x12,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin v[1:2], v2
+// CHECK: [0x00,0x00,0x14,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin v[254:255], v2
+// CHECK: [0x00,0x00,0x14,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umin v[1:2], v255
+// CHECK: [0x00,0x00,0x14,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_umin v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x15,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x16,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax v[1:2], v2
+// CHECK: [0x00,0x00,0x18,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax v[254:255], v2
+// CHECK: [0x00,0x00,0x18,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smax v[1:2], v255
+// CHECK: [0x00,0x00,0x18,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_smax v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x19,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x1a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax v[1:2], v2
+// CHECK: [0x00,0x00,0x1c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax v[254:255], v2
+// CHECK: [0x00,0x00,0x1c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umax v[1:2], v255
+// CHECK: [0x00,0x00,0x1c,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_umax v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x1d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x1e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and v[1:2], v2
+// CHECK: [0x00,0x00,0x20,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and v[254:255], v2
+// CHECK: [0x00,0x00,0x20,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_and v[1:2], v255
+// CHECK: [0x00,0x00,0x20,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_and v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x21,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x22,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or v[1:2], v2
+// CHECK: [0x00,0x00,0x24,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or v[254:255], v2
+// CHECK: [0x00,0x00,0x24,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_or v[1:2], v255
+// CHECK: [0x00,0x00,0x24,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_or v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x25,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x26,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor v[1:2], v2
+// CHECK: [0x00,0x00,0x28,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor v[254:255], v2
+// CHECK: [0x00,0x00,0x28,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_xor v[1:2], v255
+// CHECK: [0x00,0x00,0x28,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_xor v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x29,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x2a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc v[1:2], v2
+// CHECK: [0x00,0x00,0x2c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc v[254:255], v2
+// CHECK: [0x00,0x00,0x2c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_inc v[1:2], v255
+// CHECK: [0x00,0x00,0x2c,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_inc v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x2d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x2e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec v[1:2], v2
+// CHECK: [0x00,0x00,0x30,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec v[254:255], v2
+// CHECK: [0x00,0x00,0x30,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_dec v[1:2], v255
+// CHECK: [0x00,0x00,0x30,0xdd,0x01,0xff,0x00,0x00]
+
+flat_atomic_dec v0, v[1:2], v2 glc
+// CHECK: [0x00,0x00,0x31,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec v[1:2], v2 slc
+// CHECK: [0x00,0x00,0x32,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x80,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x80,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x80,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_swap_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x81,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_swap_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x82,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[1:2], v[2:5]
+// CHECK: [0x00,0x00,0x84,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[254:255], v[2:5]
+// CHECK: [0x00,0x00,0x84,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[1:2], v[252:255]
+// CHECK: [0x00,0x00,0x84,0xdd,0x01,0xfc,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[0:1], v[1:2], v[2:5] glc
+// CHECK: [0x00,0x00,0x85,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_cmpswap_x2 v[1:2], v[2:5] slc
+// CHECK: [0x00,0x00,0x86,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x88,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x88,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x88,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_add_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x89,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_add_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x8a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x8c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x8c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x8c,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_sub_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x8d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_sub_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x8e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x90,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x90,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x90,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_smin_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x91,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smin_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x92,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x94,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x94,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x94,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_umin_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x95,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umin_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x96,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x98,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x98,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x98,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_smax_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x99,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_smax_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x9a,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0x9c,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0x9c,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0x9c,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_umax_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0x9d,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_umax_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0x9e,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xa0,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xa0,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xa0,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_and_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xa1,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_and_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xa2,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xa4,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xa4,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xa4,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_or_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xa5,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_or_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xa6,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xa8,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xa8,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xa8,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_xor_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xa9,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_xor_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xaa,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xac,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xac,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xac,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_inc_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xad,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_inc_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xae,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[1:2], v[2:3]
+// CHECK: [0x00,0x00,0xb0,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[254:255], v[2:3]
+// CHECK: [0x00,0x00,0xb0,0xdd,0xfe,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[1:2], v[254:255]
+// CHECK: [0x00,0x00,0xb0,0xdd,0x01,0xfe,0x00,0x00]
+
+flat_atomic_dec_x2 v[0:1], v[1:2], v[2:3] glc
+// CHECK: [0x00,0x00,0xb1,0xdd,0x01,0x02,0x00,0x00]
+
+flat_atomic_dec_x2 v[1:2], v[2:3] slc
+// CHECK: [0x00,0x00,0xb2,0xdd,0x01,0x02,0x00,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v252, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0xfc,0x02,0x00]
+
+image_load v5, v[252:255], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0xfc,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[12:19] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0x05,0x03,0x00]
+
+image_load v5, v[1:4], s[92:99] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf0,0x01,0x05,0x17,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x2
+// CHECK: [0x00,0x02,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x3
+// CHECK: [0x00,0x03,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x4
+// CHECK: [0x00,0x04,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x5
+// CHECK: [0x00,0x05,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x6
+// CHECK: [0x00,0x06,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0x7
+// CHECK: [0x00,0x07,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x8
+// CHECK: [0x00,0x08,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0x9
+// CHECK: [0x00,0x09,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:6], v[1:4], s[8:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:7], v[1:4], s[8:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v[5:8], v[1:4], s[8:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x0
+// CHECK: [0x00,0x00,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load v5, v[1:4], s[8:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x00,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v252, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0xfc,0x02,0x00]
+
+image_load_mip v5, v[252:255], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0xfc,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[12:19] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0x05,0x03,0x00]
+
+image_load_mip v5, v[1:4], s[92:99] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf0,0x01,0x05,0x17,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x2
+// CHECK: [0x00,0x02,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x3
+// CHECK: [0x00,0x03,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x4
+// CHECK: [0x00,0x04,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x5
+// CHECK: [0x00,0x05,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x6
+// CHECK: [0x00,0x06,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0x7
+// CHECK: [0x00,0x07,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x8
+// CHECK: [0x00,0x08,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0x9
+// CHECK: [0x00,0x09,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:6], v[1:4], s[8:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:7], v[1:4], s[8:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v[5:8], v[1:4], s[8:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x0
+// CHECK: [0x00,0x00,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_load_mip v5, v[1:4], s[8:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x04,0xf0,0x01,0x05,0x02,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v252, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0xfc,0x03,0x00]
+
+image_store v1, v[252:255], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0xfc,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[16:23] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0x01,0x04,0x00]
+
+image_store v1, v[2:5], s[92:99] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf0,0x02,0x01,0x17,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x2 unorm
+// CHECK: [0x00,0x12,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x3 unorm
+// CHECK: [0x00,0x13,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x4 unorm
+// CHECK: [0x00,0x14,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x5 unorm
+// CHECK: [0x00,0x15,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x6 unorm
+// CHECK: [0x00,0x16,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0x7 unorm
+// CHECK: [0x00,0x17,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x8 unorm
+// CHECK: [0x00,0x18,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0x9 unorm
+// CHECK: [0x00,0x19,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0xa unorm
+// CHECK: [0x00,0x1a,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0xb unorm
+// CHECK: [0x00,0x1b,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:2], v[2:5], s[12:19] dmask:0xc unorm
+// CHECK: [0x00,0x1c,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0xd unorm
+// CHECK: [0x00,0x1d,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:3], v[2:5], s[12:19] dmask:0xe unorm
+// CHECK: [0x00,0x1e,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v[1:4], v[2:5], s[12:19] dmask:0xf unorm
+// CHECK: [0x00,0x1f,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x0 unorm
+// CHECK: [0x00,0x10,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store v1, v[2:5], s[12:19] dmask:0x1 unorm glc
+// CHECK: [0x00,0x31,0x20,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v252, v[2:5], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0xfc,0x03,0x00]
+
+image_store_mip v1, v[252:255], s[12:19] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0xfc,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[16:23] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0x01,0x04,0x00]
+
+image_store_mip v1, v[2:5], s[92:99] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf0,0x02,0x01,0x17,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x2 unorm
+// CHECK: [0x00,0x12,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x3 unorm
+// CHECK: [0x00,0x13,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x4 unorm
+// CHECK: [0x00,0x14,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x5 unorm
+// CHECK: [0x00,0x15,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x6 unorm
+// CHECK: [0x00,0x16,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0x7 unorm
+// CHECK: [0x00,0x17,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x8 unorm
+// CHECK: [0x00,0x18,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0x9 unorm
+// CHECK: [0x00,0x19,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0xa unorm
+// CHECK: [0x00,0x1a,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0xb unorm
+// CHECK: [0x00,0x1b,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:2], v[2:5], s[12:19] dmask:0xc unorm
+// CHECK: [0x00,0x1c,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0xd unorm
+// CHECK: [0x00,0x1d,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:3], v[2:5], s[12:19] dmask:0xe unorm
+// CHECK: [0x00,0x1e,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v[1:4], v[2:5], s[12:19] dmask:0xf unorm
+// CHECK: [0x00,0x1f,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x0 unorm
+// CHECK: [0x00,0x10,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_store_mip v1, v[2:5], s[12:19] dmask:0x1 unorm glc
+// CHECK: [0x00,0x31,0x24,0xf0,0x02,0x01,0x03,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v252, v[1:4], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0xfc,0x02,0x00]
+
+image_get_resinfo v5, v[252:255], s[8:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0xfc,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[12:19] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0x05,0x03,0x00]
+
+image_get_resinfo v5, v[1:4], s[92:99] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf0,0x01,0x05,0x17,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x2
+// CHECK: [0x00,0x02,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x3
+// CHECK: [0x00,0x03,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x4
+// CHECK: [0x00,0x04,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x5
+// CHECK: [0x00,0x05,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x6
+// CHECK: [0x00,0x06,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0x7
+// CHECK: [0x00,0x07,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x8
+// CHECK: [0x00,0x08,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0x9
+// CHECK: [0x00,0x09,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:6], v[1:4], s[8:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:7], v[1:4], s[8:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v[5:8], v[1:4], s[8:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x0
+// CHECK: [0x00,0x00,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_get_resinfo v5, v[1:4], s[8:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x38,0xf0,0x01,0x05,0x02,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x80,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_cl v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x84,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x84,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_l v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_l v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_l v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x90,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x90,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_b v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_b v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_b v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x94,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x94,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_b_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_b_cl v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_b_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x98,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x98,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_lz v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_lz v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_lz v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x9c,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x9c,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_c v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xa0,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xa0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_cl v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_c_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xa4,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xa4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_l v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_l v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_c_l v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xb0,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_l v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xb0,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_b v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_b v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_c_b v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xb4,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xb4,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_b_cl v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xb8,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_b_cl v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xb8,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v252, v[1:4], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0xfc,0x62,0x00]
+
+image_sample_c_lz v5, v[252:255], s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0xfc,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x63,0x00]
+
+image_sample_c_lz v5, v[1:4], s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x77,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x82,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0x02,0x03]
+
+image_sample_c_lz v5, v[1:4], s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0xbc,0xf0,0x01,0x05,0xc2,0x03]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:6], v[1:4], s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:7], v[1:4], s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v[5:8], v[1:4], s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_sample_c_lz v5, v[1:4], s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0xbc,0xf0,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4 v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4 v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4 v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4 v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4 v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x00,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x00,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4 v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x01,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x02,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4 v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x00,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_cl v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x04,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x04,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x05,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x06,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x04,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_l v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_l v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_l v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x10,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x10,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_l v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x11,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x12,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x10,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_b v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x14,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x14,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x15,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x16,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x14,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b_cl v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_b_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x18,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x18,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x19,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x1a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x18,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_lz v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_lz v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_lz v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x1c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x1c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x1d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x1e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x1c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x20,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x20,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x21,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x22,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x20,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_cl v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x24,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x24,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x25,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x26,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x24,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_l v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_l v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_l v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x30,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x30,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x31,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x32,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x30,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_b v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x34,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x34,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x35,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x36,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x34,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b_cl v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b_cl v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_b_cl v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x38,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x38,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x39,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x3a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x38,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_lz v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_lz v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_lz v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x3c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x3c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x3d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x3e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x3c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x40,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x40,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x41,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x42,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x40,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_cl_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x44,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x44,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x45,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x46,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x44,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_l_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_l_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_l_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x50,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x50,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x51,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x52,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x50,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_b_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x54,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x54,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x55,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x56,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x54,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_b_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_b_cl_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_b_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x58,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x58,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x59,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x5a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x58,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_lz_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_lz_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_lz_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x5c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x5c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x5d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x5e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x5c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x60,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x60,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x61,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x62,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x60,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_cl_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x64,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x64,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x65,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x66,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x64,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_l_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_l_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_l_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x70,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x70,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x71,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x72,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_l_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x70,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_b_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x74,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x74,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x75,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x76,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x74,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x78,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x78,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x79,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x7a,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_b_cl_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x78,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0xfc,0x62,0x00]
+
+image_gather4_c_lz_o v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0xff,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x63,0x00]
+
+image_gather4_c_lz_o v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x77,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x82,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0x02,0x03]
+
+image_gather4_c_lz_o v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x7c,0xf1,0x01,0x05,0xc2,0x03]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 glc
+// CHECK: [0x00,0x21,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 slc
+// CHECK: [0x00,0x01,0x7c,0xf3,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v[5:6], v1, s[8:15], s[12:15] dmask:0x1 tfe
+// CHECK: [0x00,0x01,0x7d,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 lwe
+// CHECK: [0x00,0x01,0x7e,0xf1,0x01,0x05,0x62,0x00]
+
+image_gather4_c_lz_o v5, v1, s[8:15], s[12:15] dmask:0x1 da
+// CHECK: [0x00,0x41,0x7c,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v252, v1, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0xfc,0x62,0x00]
+
+image_get_lod v5, v255, s[8:15], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0xff,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[12:19], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x63,0x00]
+
+image_get_lod v5, v1, s[92:99], s[12:15] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x77,0x00]
+
+image_get_lod v5, v1, s[8:15], s[16:19] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x82,0x00]
+
+image_get_lod v5, v1, s[8:15], s[96:99] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0x02,0x03]
+
+image_get_lod v5, v1, s[8:15], ttmp[8:11] dmask:0x1
+// CHECK: [0x00,0x01,0x80,0xf1,0x01,0x05,0xc2,0x03]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x2
+// CHECK: [0x00,0x02,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x3
+// CHECK: [0x00,0x03,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x4
+// CHECK: [0x00,0x04,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x5
+// CHECK: [0x00,0x05,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x6
+// CHECK: [0x00,0x06,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0x7
+// CHECK: [0x00,0x07,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x8
+// CHECK: [0x00,0x08,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0x9
+// CHECK: [0x00,0x09,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0xa
+// CHECK: [0x00,0x0a,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0xb
+// CHECK: [0x00,0x0b,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:6], v1, s[8:15], s[12:15] dmask:0xc
+// CHECK: [0x00,0x0c,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0xd
+// CHECK: [0x00,0x0d,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:7], v1, s[8:15], s[12:15] dmask:0xe
+// CHECK: [0x00,0x0e,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v[5:8], v1, s[8:15], s[12:15] dmask:0xf
+// CHECK: [0x00,0x0f,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x0
+// CHECK: [0x00,0x00,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+image_get_lod v5, v1, s[8:15], s[12:15] dmask:0x1 unorm
+// CHECK: [0x00,0x11,0x80,0xf1,0x01,0x05,0x62,0x00]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_format_x v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_x v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_format_x v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_format_x v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_x v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_x v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_x v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_x v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_x v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x00,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_x v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x02,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0xfe,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_xy v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_format_xy v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_format_xy v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_xy v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_xy v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_xy v[5:6], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_xy v[5:6], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_xy v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x04,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xy v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x06,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[253:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0xfd,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_format_xyz v[5:7], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_xyz v[5:7], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x08,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyz v[5:7], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x0a,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[252:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0xfc,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_format_xyzw v[5:8], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_format_xyzw v[5:8], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x0c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_format_xyzw v[5:8], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x0e,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_format_x v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_x v1, off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_format_x v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_format_x v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_x v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_x v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_x v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_x v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_x v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x10,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_x v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x12,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0xfe,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_xy v[1:2], off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_format_xy v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_format_xy v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_xy v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_xy v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_xy v[1:2], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_xy v[1:2], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_xy v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x14,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xy v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x16,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[253:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0xfd,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_format_xyz v[1:3], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_xyz v[1:3], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x18,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyz v[1:3], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x1a,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[252:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0xfc,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_format_xyzw v[1:4], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_format_xyzw v[1:4], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x1c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_format_xyzw v[1:4], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x1e,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_ubyte v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_ubyte v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_ubyte v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_ubyte v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_ubyte v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_ubyte v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_ubyte v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x40,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_ubyte v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x40,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ubyte v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x42,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_sbyte v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_sbyte v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_sbyte v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_sbyte v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_sbyte v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_sbyte v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_sbyte v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x44,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_sbyte v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x44,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sbyte v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x46,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_ushort v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_ushort v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_ushort v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_ushort v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_ushort v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_ushort v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_ushort v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_ushort v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x48,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_ushort v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x48,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_ushort v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x4a,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_sshort v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_sshort v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_sshort v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_sshort v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_sshort v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_sshort v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_sshort v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_sshort v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x4c,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_sshort v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x4c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_sshort v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x4e,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0xff,0x02,0x03]
+
+buffer_load_dword v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dword v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_dword v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dword v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_dword v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dword v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dword v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dword v5, off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dword v5, off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x50,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dword v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x50,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dword v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x52,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0xfe,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_dwordx2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x54,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dwordx2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x54,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x56,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[253:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0xfd,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_dwordx3 v[5:7], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x58,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dwordx3 v[5:7], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x58,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx3 v[5:7], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x5a,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[252:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0xfc,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x03,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x18,0x03]
+
+buffer_load_dwordx4 v[5:8], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x1e,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0x65]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0x7c]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0x80]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0xc1]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0xf0]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x5c,0xe0,0x00,0x05,0x02,0xf7]
+
+buffer_load_dwordx4 v[5:8], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x5c,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_load_dwordx4 v[5:8], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x5e,0xe0,0x00,0x05,0x02,0x03]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_byte v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_byte v1, off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_byte v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_byte v1, off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_byte v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_byte v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_byte v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_byte v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_byte v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x60,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_byte v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x60,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_byte v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x62,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_short v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_short v1, off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_short v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_short v1, off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_short v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_short v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_short v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_short v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_short v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x68,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_short v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x68,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_short v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x6a,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v255, off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0xff,0x03,0x04]
+
+buffer_store_dword v1, off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dword v1, off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_dword v1, off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dword v1, off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_dword v1, off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dword v1, off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dword v1, off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dword v1, off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dword v1, off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x70,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dword v1, v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4
+// CHECK: [0x00,0x00,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x70,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dword v1, off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x72,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[254:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0xfe,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_dwordx2 v[1:2], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x74,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dwordx2 v[1:2], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x74,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx2 v[1:2], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x76,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[253:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0xfd,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_dwordx3 v[1:3], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x78,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dwordx3 v[1:3], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x78,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx3 v[1:3], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x7a,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[252:255], off, s[12:15], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0xfc,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[16:19], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x04,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[96:99], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x18,0x04]
+
+buffer_store_dwordx4 v[1:4], off, ttmp[8:11], s4 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x1e,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s101 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x65]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], m0 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x7c]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], 0 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0x80]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], -1 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0xc1]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], 0.5 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0xf0]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], -4.0 offset:4095
+// CHECK: [0xff,0x0f,0x7c,0xe0,0x00,0x01,0x03,0xf7]
+
+buffer_store_dwordx4 v[1:4], v0, s[12:15], s4 idxen offset:4095
+// CHECK: [0xff,0x2f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], v0, s[12:15], s4 offen offset:4095
+// CHECK: [0xff,0x1f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4
+// CHECK: [0x00,0x00,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:0
+// CHECK: [0x00,0x00,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:7
+// CHECK: [0x07,0x00,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:4095 glc
+// CHECK: [0xff,0x4f,0x7c,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_store_dwordx4 v[1:4], off, s[12:15], s4 offset:4095 slc
+// CHECK: [0xff,0x0f,0x7e,0xe0,0x00,0x01,0x03,0x04]
+
+buffer_wbinvl1
+// CHECK: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00]
+
+buffer_wbinvl1_vol
+// CHECK: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_swap v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_swap v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_swap v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_swap v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_swap v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_swap v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_swap v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x00,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_swap v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x00,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x02,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x04,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_cmpswap v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x04,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x06,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_add v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_add v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_add v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_add v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_add v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_add v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x08,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_add v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x08,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x0a,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_sub v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_sub v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_sub v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_sub v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_sub v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x0c,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_sub v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x0c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x0e,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_smin v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_smin v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_smin v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_smin v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_smin v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x10,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_smin v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x10,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x12,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_umin v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_umin v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_umin v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_umin v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_umin v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x14,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_umin v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x14,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x16,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_smax v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_smax v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_smax v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_smax v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_smax v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x18,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_smax v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x18,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x1a,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_umax v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_umax v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_umax v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_umax v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_umax v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x1c,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_umax v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x1c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x1e,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_and v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_and v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_and v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_and v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_and v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_and v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x20,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_and v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x20,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x22,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_or v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_or v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_or v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_or v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_or v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_or v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x24,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_or v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x24,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x26,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_xor v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_xor v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_xor v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_xor v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_xor v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x28,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_xor v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x28,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x2a,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_inc v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_inc v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_inc v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_inc v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_inc v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x2c,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_inc v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x2c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x2e,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v255, off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0xff,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_dec v5, off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_dec v5, off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_dec v5, off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_dec v5, off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_dec v5, off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x30,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_dec v5, v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s3
+// CHECK: [0x00,0x00,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x30,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec v5, off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x32,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x80,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_swap_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x80,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_swap_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x82,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[252:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0xfc,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x84,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_cmpswap_x2 v[5:8], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x84,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_cmpswap_x2 v[5:8], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x86,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x88,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_add_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x88,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_add_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x8a,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x8c,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_sub_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x8c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_sub_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x8e,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x90,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_smin_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x90,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smin_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x92,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x94,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_umin_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x94,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umin_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x96,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x98,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_smax_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x98,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_smax_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x9a,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0x9c,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_umax_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0x9c,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_umax_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0x9e,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0xa0,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_and_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0xa0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_and_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0xa2,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0xa4,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_or_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0xa4,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_or_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0xa6,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0xa8,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_xor_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0xa8,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_xor_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0xaa,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0xac,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_inc_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0xac,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_inc_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0xae,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[254:255], off, s[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0xfe,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[12:15], s3 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x03,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[96:99], s3 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x18,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, ttmp[8:11], s3 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x1e,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s101 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x02,0x65]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], m0 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x02,0x7c]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], 0 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x02,0x80]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], -1 offset:4095
+// CHECK: [0xff,0x0f,0xb0,0xe1,0x00,0x05,0x02,0xc1]
+
+buffer_atomic_dec_x2 v[5:6], v0, s[8:11], s3 idxen offset:4095
+// CHECK: [0xff,0x2f,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], v0, s[8:11], s3 offen offset:4095
+// CHECK: [0xff,0x1f,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s3
+// CHECK: [0x00,0x00,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s3 offset:0
+// CHECK: [0x00,0x00,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s3 offset:7
+// CHECK: [0x07,0x00,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s3 offset:4095 glc
+// CHECK: [0xff,0x4f,0xb0,0xe1,0x00,0x05,0x02,0x03]
+
+buffer_atomic_dec_x2 v[5:6], off, s[8:11], s3 offset:4095 slc
+// CHECK: [0xff,0x0f,0xb2,0xe1,0x00,0x05,0x02,0x03]
+
+s_load_dword s5, s[2:3], s2
+// CHECK: [0x41,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s101, s[2:3], s2
+// CHECK: [0x41,0x19,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword flat_scratch_lo, s[2:3], s2
+// CHECK: [0x81,0x19,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword flat_scratch_hi, s[2:3], s2
+// CHECK: [0xc1,0x19,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword vcc_lo, s[2:3], s2
+// CHECK: [0x81,0x1a,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword vcc_hi, s[2:3], s2
+// CHECK: [0xc1,0x1a,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword tba_lo, s[2:3], s2
+// CHECK: [0x01,0x1b,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword tba_hi, s[2:3], s2
+// CHECK: [0x41,0x1b,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword tma_lo, s[2:3], s2
+// CHECK: [0x81,0x1b,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword tma_hi, s[2:3], s2
+// CHECK: [0xc1,0x1b,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword ttmp11, s[2:3], s2
+// CHECK: [0xc1,0x1e,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, s[4:5], s2
+// CHECK: [0x42,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, s[100:101], s2
+// CHECK: [0x72,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, flat_scratch, s2
+// CHECK: [0x73,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, vcc, s2
+// CHECK: [0x75,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, tba, s2
+// CHECK: [0x76,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, tma, s2
+// CHECK: [0x77,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, ttmp[10:11], s2
+// CHECK: [0x7d,0x01,0x00,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], s101
+// CHECK: [0x41,0x01,0x00,0xc0,0x65,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], flat_scratch_lo
+// CHECK: [0x41,0x01,0x00,0xc0,0x66,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], flat_scratch_hi
+// CHECK: [0x41,0x01,0x00,0xc0,0x67,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], vcc_lo
+// CHECK: [0x41,0x01,0x00,0xc0,0x6a,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], vcc_hi
+// CHECK: [0x41,0x01,0x00,0xc0,0x6b,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], tba_lo
+// CHECK: [0x41,0x01,0x00,0xc0,0x6c,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], tba_hi
+// CHECK: [0x41,0x01,0x00,0xc0,0x6d,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], tma_lo
+// CHECK: [0x41,0x01,0x00,0xc0,0x6e,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], tma_hi
+// CHECK: [0x41,0x01,0x00,0xc0,0x6f,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], ttmp11
+// CHECK: [0x41,0x01,0x00,0xc0,0x7b,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], m0
+// CHECK: [0x41,0x01,0x00,0xc0,0x7c,0x00,0x00,0x00]
+
+s_load_dword s5, s[2:3], 0x7ffff
+// CHECK: [0x41,0x01,0x02,0xc0,0xff,0xff,0x07,0x00]
+
+s_load_dword s5, s[2:3], s2 glc
+// CHECK: [0x41,0x01,0x01,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], s2
+// CHECK: [0x81,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[12:13], s[2:3], s2
+// CHECK: [0x01,0x03,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[100:101], s[2:3], s2
+// CHECK: [0x01,0x19,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 flat_scratch, s[2:3], s2
+// CHECK: [0x81,0x19,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 vcc, s[2:3], s2
+// CHECK: [0x81,0x1a,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 tba, s[2:3], s2
+// CHECK: [0x01,0x1b,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 tma, s[2:3], s2
+// CHECK: [0x81,0x1b,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 ttmp[10:11], s[2:3], s2
+// CHECK: [0x81,0x1e,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[4:5], s2
+// CHECK: [0x82,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[100:101], s2
+// CHECK: [0xb2,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], flat_scratch, s2
+// CHECK: [0xb3,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], vcc, s2
+// CHECK: [0xb5,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], tba, s2
+// CHECK: [0xb6,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], tma, s2
+// CHECK: [0xb7,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], ttmp[10:11], s2
+// CHECK: [0xbd,0x02,0x04,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], s101
+// CHECK: [0x81,0x02,0x04,0xc0,0x65,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x81,0x02,0x04,0xc0,0x66,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x81,0x02,0x04,0xc0,0x67,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x81,0x02,0x04,0xc0,0x6a,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x81,0x02,0x04,0xc0,0x6b,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], tba_lo
+// CHECK: [0x81,0x02,0x04,0xc0,0x6c,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], tba_hi
+// CHECK: [0x81,0x02,0x04,0xc0,0x6d,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], tma_lo
+// CHECK: [0x81,0x02,0x04,0xc0,0x6e,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], tma_hi
+// CHECK: [0x81,0x02,0x04,0xc0,0x6f,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], ttmp11
+// CHECK: [0x81,0x02,0x04,0xc0,0x7b,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], m0
+// CHECK: [0x81,0x02,0x04,0xc0,0x7c,0x00,0x00,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], 0x7ffff
+// CHECK: [0x81,0x02,0x06,0xc0,0xff,0xff,0x07,0x00]
+
+s_load_dwordx2 s[10:11], s[2:3], s2 glc
+// CHECK: [0x81,0x02,0x05,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], s2
+// CHECK: [0x01,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[24:27], s[2:3], s2
+// CHECK: [0x01,0x06,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[96:99], s[2:3], s2
+// CHECK: [0x01,0x18,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 ttmp[8:11], s[2:3], s2
+// CHECK: [0x01,0x1e,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[4:5], s2
+// CHECK: [0x02,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[100:101], s2
+// CHECK: [0x32,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], flat_scratch, s2
+// CHECK: [0x33,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], vcc, s2
+// CHECK: [0x35,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], tba, s2
+// CHECK: [0x36,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], tma, s2
+// CHECK: [0x37,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], ttmp[10:11], s2
+// CHECK: [0x3d,0x05,0x08,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], s101
+// CHECK: [0x01,0x05,0x08,0xc0,0x65,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], flat_scratch_lo
+// CHECK: [0x01,0x05,0x08,0xc0,0x66,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], flat_scratch_hi
+// CHECK: [0x01,0x05,0x08,0xc0,0x67,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], vcc_lo
+// CHECK: [0x01,0x05,0x08,0xc0,0x6a,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], vcc_hi
+// CHECK: [0x01,0x05,0x08,0xc0,0x6b,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], tba_lo
+// CHECK: [0x01,0x05,0x08,0xc0,0x6c,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], tba_hi
+// CHECK: [0x01,0x05,0x08,0xc0,0x6d,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], tma_lo
+// CHECK: [0x01,0x05,0x08,0xc0,0x6e,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], tma_hi
+// CHECK: [0x01,0x05,0x08,0xc0,0x6f,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], ttmp11
+// CHECK: [0x01,0x05,0x08,0xc0,0x7b,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], m0
+// CHECK: [0x01,0x05,0x08,0xc0,0x7c,0x00,0x00,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], 0x7ffff
+// CHECK: [0x01,0x05,0x0a,0xc0,0xff,0xff,0x07,0x00]
+
+s_load_dwordx4 s[20:23], s[2:3], s2 glc
+// CHECK: [0x01,0x05,0x09,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], s2
+// CHECK: [0x01,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[24:31], s[2:3], s2
+// CHECK: [0x01,0x06,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[92:99], s[2:3], s2
+// CHECK: [0x01,0x17,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[4:5], s2
+// CHECK: [0x02,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[100:101], s2
+// CHECK: [0x32,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], flat_scratch, s2
+// CHECK: [0x33,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], vcc, s2
+// CHECK: [0x35,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], tba, s2
+// CHECK: [0x36,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], tma, s2
+// CHECK: [0x37,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], ttmp[10:11], s2
+// CHECK: [0x3d,0x05,0x0c,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], s101
+// CHECK: [0x01,0x05,0x0c,0xc0,0x65,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], flat_scratch_lo
+// CHECK: [0x01,0x05,0x0c,0xc0,0x66,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], flat_scratch_hi
+// CHECK: [0x01,0x05,0x0c,0xc0,0x67,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], vcc_lo
+// CHECK: [0x01,0x05,0x0c,0xc0,0x6a,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], vcc_hi
+// CHECK: [0x01,0x05,0x0c,0xc0,0x6b,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], tba_lo
+// CHECK: [0x01,0x05,0x0c,0xc0,0x6c,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], tba_hi
+// CHECK: [0x01,0x05,0x0c,0xc0,0x6d,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], tma_lo
+// CHECK: [0x01,0x05,0x0c,0xc0,0x6e,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], tma_hi
+// CHECK: [0x01,0x05,0x0c,0xc0,0x6f,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], ttmp11
+// CHECK: [0x01,0x05,0x0c,0xc0,0x7b,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], m0
+// CHECK: [0x01,0x05,0x0c,0xc0,0x7c,0x00,0x00,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], 0x7ffff
+// CHECK: [0x01,0x05,0x0e,0xc0,0xff,0xff,0x07,0x00]
+
+s_load_dwordx8 s[20:27], s[2:3], s2 glc
+// CHECK: [0x01,0x05,0x0d,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], s2
+// CHECK: [0x01,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[24:39], s[2:3], s2
+// CHECK: [0x01,0x06,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[84:99], s[2:3], s2
+// CHECK: [0x01,0x15,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[4:5], s2
+// CHECK: [0x02,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[100:101], s2
+// CHECK: [0x32,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], flat_scratch, s2
+// CHECK: [0x33,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], vcc, s2
+// CHECK: [0x35,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], tba, s2
+// CHECK: [0x36,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], tma, s2
+// CHECK: [0x37,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], ttmp[10:11], s2
+// CHECK: [0x3d,0x05,0x10,0xc0,0x02,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], s101
+// CHECK: [0x01,0x05,0x10,0xc0,0x65,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], flat_scratch_lo
+// CHECK: [0x01,0x05,0x10,0xc0,0x66,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], flat_scratch_hi
+// CHECK: [0x01,0x05,0x10,0xc0,0x67,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], vcc_lo
+// CHECK: [0x01,0x05,0x10,0xc0,0x6a,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], vcc_hi
+// CHECK: [0x01,0x05,0x10,0xc0,0x6b,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], tba_lo
+// CHECK: [0x01,0x05,0x10,0xc0,0x6c,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], tba_hi
+// CHECK: [0x01,0x05,0x10,0xc0,0x6d,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], tma_lo
+// CHECK: [0x01,0x05,0x10,0xc0,0x6e,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], tma_hi
+// CHECK: [0x01,0x05,0x10,0xc0,0x6f,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], ttmp11
+// CHECK: [0x01,0x05,0x10,0xc0,0x7b,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], m0
+// CHECK: [0x01,0x05,0x10,0xc0,0x7c,0x00,0x00,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], 0x7ffff
+// CHECK: [0x01,0x05,0x12,0xc0,0xff,0xff,0x07,0x00]
+
+s_load_dwordx16 s[20:35], s[2:3], s2 glc
+// CHECK: [0x01,0x05,0x11,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], s2
+// CHECK: [0x42,0x01,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword s101, s[4:7], s2
+// CHECK: [0x42,0x19,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword flat_scratch_lo, s[4:7], s2
+// CHECK: [0x82,0x19,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword flat_scratch_hi, s[4:7], s2
+// CHECK: [0xc2,0x19,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword vcc_lo, s[4:7], s2
+// CHECK: [0x82,0x1a,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword vcc_hi, s[4:7], s2
+// CHECK: [0xc2,0x1a,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword tba_lo, s[4:7], s2
+// CHECK: [0x02,0x1b,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword tba_hi, s[4:7], s2
+// CHECK: [0x42,0x1b,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword tma_lo, s[4:7], s2
+// CHECK: [0x82,0x1b,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword tma_hi, s[4:7], s2
+// CHECK: [0xc2,0x1b,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword ttmp11, s[4:7], s2
+// CHECK: [0xc2,0x1e,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[8:11], s2
+// CHECK: [0x44,0x01,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[96:99], s2
+// CHECK: [0x70,0x01,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, ttmp[8:11], s2
+// CHECK: [0x7c,0x01,0x20,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], s101
+// CHECK: [0x42,0x01,0x20,0xc0,0x65,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], flat_scratch_lo
+// CHECK: [0x42,0x01,0x20,0xc0,0x66,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], flat_scratch_hi
+// CHECK: [0x42,0x01,0x20,0xc0,0x67,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], vcc_lo
+// CHECK: [0x42,0x01,0x20,0xc0,0x6a,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], vcc_hi
+// CHECK: [0x42,0x01,0x20,0xc0,0x6b,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], tba_lo
+// CHECK: [0x42,0x01,0x20,0xc0,0x6c,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], tba_hi
+// CHECK: [0x42,0x01,0x20,0xc0,0x6d,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], tma_lo
+// CHECK: [0x42,0x01,0x20,0xc0,0x6e,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], tma_hi
+// CHECK: [0x42,0x01,0x20,0xc0,0x6f,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], ttmp11
+// CHECK: [0x42,0x01,0x20,0xc0,0x7b,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], m0
+// CHECK: [0x42,0x01,0x20,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_load_dword s5, s[4:7], 0x7ffff
+// CHECK: [0x42,0x01,0x22,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_load_dword s5, s[4:7], s2 glc
+// CHECK: [0x42,0x01,0x21,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], s2
+// CHECK: [0x82,0x02,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[12:13], s[4:7], s2
+// CHECK: [0x02,0x03,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[100:101], s[4:7], s2
+// CHECK: [0x02,0x19,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 flat_scratch, s[4:7], s2
+// CHECK: [0x82,0x19,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 vcc, s[4:7], s2
+// CHECK: [0x82,0x1a,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 tba, s[4:7], s2
+// CHECK: [0x02,0x1b,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 tma, s[4:7], s2
+// CHECK: [0x82,0x1b,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 ttmp[10:11], s[4:7], s2
+// CHECK: [0x82,0x1e,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[8:11], s2
+// CHECK: [0x84,0x02,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[96:99], s2
+// CHECK: [0xb0,0x02,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], ttmp[8:11], s2
+// CHECK: [0xbc,0x02,0x24,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], s101
+// CHECK: [0x82,0x02,0x24,0xc0,0x65,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], flat_scratch_lo
+// CHECK: [0x82,0x02,0x24,0xc0,0x66,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], flat_scratch_hi
+// CHECK: [0x82,0x02,0x24,0xc0,0x67,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], vcc_lo
+// CHECK: [0x82,0x02,0x24,0xc0,0x6a,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], vcc_hi
+// CHECK: [0x82,0x02,0x24,0xc0,0x6b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tba_lo
+// CHECK: [0x82,0x02,0x24,0xc0,0x6c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tba_hi
+// CHECK: [0x82,0x02,0x24,0xc0,0x6d,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tma_lo
+// CHECK: [0x82,0x02,0x24,0xc0,0x6e,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], tma_hi
+// CHECK: [0x82,0x02,0x24,0xc0,0x6f,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], ttmp11
+// CHECK: [0x82,0x02,0x24,0xc0,0x7b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], m0
+// CHECK: [0x82,0x02,0x24,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], 0x7ffff
+// CHECK: [0x82,0x02,0x26,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_load_dwordx2 s[10:11], s[4:7], s2 glc
+// CHECK: [0x82,0x02,0x25,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], s2
+// CHECK: [0x02,0x05,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[24:27], s[4:7], s2
+// CHECK: [0x02,0x06,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[96:99], s[4:7], s2
+// CHECK: [0x02,0x18,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 ttmp[8:11], s[4:7], s2
+// CHECK: [0x02,0x1e,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[8:11], s2
+// CHECK: [0x04,0x05,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[96:99], s2
+// CHECK: [0x30,0x05,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], ttmp[8:11], s2
+// CHECK: [0x3c,0x05,0x28,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], s101
+// CHECK: [0x02,0x05,0x28,0xc0,0x65,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], flat_scratch_lo
+// CHECK: [0x02,0x05,0x28,0xc0,0x66,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], flat_scratch_hi
+// CHECK: [0x02,0x05,0x28,0xc0,0x67,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], vcc_lo
+// CHECK: [0x02,0x05,0x28,0xc0,0x6a,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], vcc_hi
+// CHECK: [0x02,0x05,0x28,0xc0,0x6b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tba_lo
+// CHECK: [0x02,0x05,0x28,0xc0,0x6c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tba_hi
+// CHECK: [0x02,0x05,0x28,0xc0,0x6d,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tma_lo
+// CHECK: [0x02,0x05,0x28,0xc0,0x6e,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], tma_hi
+// CHECK: [0x02,0x05,0x28,0xc0,0x6f,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], ttmp11
+// CHECK: [0x02,0x05,0x28,0xc0,0x7b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], m0
+// CHECK: [0x02,0x05,0x28,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], 0x7ffff
+// CHECK: [0x02,0x05,0x2a,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_load_dwordx4 s[20:23], s[4:7], s2 glc
+// CHECK: [0x02,0x05,0x29,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], s2
+// CHECK: [0x02,0x05,0x2c,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[24:31], s[4:7], s2
+// CHECK: [0x02,0x06,0x2c,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[92:99], s[4:7], s2
+// CHECK: [0x02,0x17,0x2c,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[8:11], s2
+// CHECK: [0x04,0x05,0x2c,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[96:99], s2
+// CHECK: [0x30,0x05,0x2c,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], ttmp[8:11], s2
+// CHECK: [0x3c,0x05,0x2c,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], s101
+// CHECK: [0x02,0x05,0x2c,0xc0,0x65,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], flat_scratch_lo
+// CHECK: [0x02,0x05,0x2c,0xc0,0x66,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], flat_scratch_hi
+// CHECK: [0x02,0x05,0x2c,0xc0,0x67,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], vcc_lo
+// CHECK: [0x02,0x05,0x2c,0xc0,0x6a,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], vcc_hi
+// CHECK: [0x02,0x05,0x2c,0xc0,0x6b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tba_lo
+// CHECK: [0x02,0x05,0x2c,0xc0,0x6c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tba_hi
+// CHECK: [0x02,0x05,0x2c,0xc0,0x6d,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tma_lo
+// CHECK: [0x02,0x05,0x2c,0xc0,0x6e,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], tma_hi
+// CHECK: [0x02,0x05,0x2c,0xc0,0x6f,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], ttmp11
+// CHECK: [0x02,0x05,0x2c,0xc0,0x7b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], m0
+// CHECK: [0x02,0x05,0x2c,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], 0x7ffff
+// CHECK: [0x02,0x05,0x2e,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_load_dwordx8 s[20:27], s[4:7], s2 glc
+// CHECK: [0x02,0x05,0x2d,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], s2
+// CHECK: [0x02,0x05,0x30,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[24:39], s[4:7], s2
+// CHECK: [0x02,0x06,0x30,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[84:99], s[4:7], s2
+// CHECK: [0x02,0x15,0x30,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[8:11], s2
+// CHECK: [0x04,0x05,0x30,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[96:99], s2
+// CHECK: [0x30,0x05,0x30,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], ttmp[8:11], s2
+// CHECK: [0x3c,0x05,0x30,0xc0,0x02,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], s101
+// CHECK: [0x02,0x05,0x30,0xc0,0x65,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], flat_scratch_lo
+// CHECK: [0x02,0x05,0x30,0xc0,0x66,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], flat_scratch_hi
+// CHECK: [0x02,0x05,0x30,0xc0,0x67,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], vcc_lo
+// CHECK: [0x02,0x05,0x30,0xc0,0x6a,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], vcc_hi
+// CHECK: [0x02,0x05,0x30,0xc0,0x6b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tba_lo
+// CHECK: [0x02,0x05,0x30,0xc0,0x6c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tba_hi
+// CHECK: [0x02,0x05,0x30,0xc0,0x6d,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tma_lo
+// CHECK: [0x02,0x05,0x30,0xc0,0x6e,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], tma_hi
+// CHECK: [0x02,0x05,0x30,0xc0,0x6f,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], ttmp11
+// CHECK: [0x02,0x05,0x30,0xc0,0x7b,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], m0
+// CHECK: [0x02,0x05,0x30,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], 0x7ffff
+// CHECK: [0x02,0x05,0x32,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_load_dwordx16 s[20:35], s[4:7], s2 glc
+// CHECK: [0x02,0x05,0x31,0xc0,0x02,0x00,0x00,0x00]
+
+s_store_dword s1, s[4:5], m0
+// CHECK: [0x42,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s101, s[4:5], m0
+// CHECK: [0x42,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword flat_scratch_lo, s[4:5], m0
+// CHECK: [0x82,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword flat_scratch_hi, s[4:5], m0
+// CHECK: [0xc2,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword vcc_lo, s[4:5], m0
+// CHECK: [0x82,0x1a,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword vcc_hi, s[4:5], m0
+// CHECK: [0xc2,0x1a,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword tba_lo, s[4:5], m0
+// CHECK: [0x02,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword tba_hi, s[4:5], m0
+// CHECK: [0x42,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword tma_lo, s[4:5], m0
+// CHECK: [0x82,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword tma_hi, s[4:5], m0
+// CHECK: [0xc2,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword ttmp11, s[4:5], m0
+// CHECK: [0xc2,0x1e,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, s[6:7], m0
+// CHECK: [0x43,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, s[100:101], m0
+// CHECK: [0x72,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, flat_scratch, m0
+// CHECK: [0x73,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, vcc, m0
+// CHECK: [0x75,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, tba, m0
+// CHECK: [0x76,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, tma, m0
+// CHECK: [0x77,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, ttmp[10:11], m0
+// CHECK: [0x7d,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dword s1, s[4:5], 0x7ffff
+// CHECK: [0x42,0x00,0x42,0xc0,0xff,0xff,0x07,0x00]
+
+s_store_dword s1, s[4:5], m0 glc
+// CHECK: [0x42,0x00,0x41,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], s[4:5], m0
+// CHECK: [0x82,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[4:5], s[4:5], m0
+// CHECK: [0x02,0x01,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[100:101], s[4:5], m0
+// CHECK: [0x02,0x19,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 flat_scratch, s[4:5], m0
+// CHECK: [0x82,0x19,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 vcc, s[4:5], m0
+// CHECK: [0x82,0x1a,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 tba, s[4:5], m0
+// CHECK: [0x02,0x1b,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 tma, s[4:5], m0
+// CHECK: [0x82,0x1b,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 ttmp[10:11], s[4:5], m0
+// CHECK: [0x82,0x1e,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], s[6:7], m0
+// CHECK: [0x83,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], s[100:101], m0
+// CHECK: [0xb2,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], flat_scratch, m0
+// CHECK: [0xb3,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], vcc, m0
+// CHECK: [0xb5,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], tba, m0
+// CHECK: [0xb6,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], tma, m0
+// CHECK: [0xb7,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], ttmp[10:11], m0
+// CHECK: [0xbd,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx2 s[2:3], s[4:5], 0x7ffff
+// CHECK: [0x82,0x00,0x46,0xc0,0xff,0xff,0x07,0x00]
+
+s_store_dwordx2 s[2:3], s[4:5], m0 glc
+// CHECK: [0x82,0x00,0x45,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], s[4:5], m0
+// CHECK: [0x02,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[8:11], s[4:5], m0
+// CHECK: [0x02,0x02,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[96:99], s[4:5], m0
+// CHECK: [0x02,0x18,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 ttmp[8:11], s[4:5], m0
+// CHECK: [0x02,0x1e,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], s[6:7], m0
+// CHECK: [0x03,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], s[100:101], m0
+// CHECK: [0x32,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], flat_scratch, m0
+// CHECK: [0x33,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], vcc, m0
+// CHECK: [0x35,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], tba, m0
+// CHECK: [0x36,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], tma, m0
+// CHECK: [0x37,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], ttmp[10:11], m0
+// CHECK: [0x3d,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+
+s_store_dwordx4 s[4:7], s[4:5], 0x7ffff
+// CHECK: [0x02,0x01,0x4a,0xc0,0xff,0xff,0x07,0x00]
+
+s_store_dwordx4 s[4:7], s[4:5], m0 glc
+// CHECK: [0x02,0x01,0x49,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword s1, s[8:11], m0
+// CHECK: [0x44,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword s101, s[8:11], m0
+// CHECK: [0x44,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword flat_scratch_lo, s[8:11], m0
+// CHECK: [0x84,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword flat_scratch_hi, s[8:11], m0
+// CHECK: [0xc4,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword vcc_lo, s[8:11], m0
+// CHECK: [0x84,0x1a,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword vcc_hi, s[8:11], m0
+// CHECK: [0xc4,0x1a,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword tba_lo, s[8:11], m0
+// CHECK: [0x04,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword tba_hi, s[8:11], m0
+// CHECK: [0x44,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword tma_lo, s[8:11], m0
+// CHECK: [0x84,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword tma_hi, s[8:11], m0
+// CHECK: [0xc4,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword ttmp11, s[8:11], m0
+// CHECK: [0xc4,0x1e,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword s1, s[12:15], m0
+// CHECK: [0x46,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword s1, s[96:99], m0
+// CHECK: [0x70,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword s1, ttmp[8:11], m0
+// CHECK: [0x7c,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dword s1, s[8:11], 0x7ffff
+// CHECK: [0x44,0x00,0x62,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_store_dword s1, s[8:11], m0 glc
+// CHECK: [0x44,0x00,0x61,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[2:3], s[8:11], m0
+// CHECK: [0x84,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[4:5], s[8:11], m0
+// CHECK: [0x04,0x01,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[100:101], s[8:11], m0
+// CHECK: [0x04,0x19,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 flat_scratch, s[8:11], m0
+// CHECK: [0x84,0x19,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 vcc, s[8:11], m0
+// CHECK: [0x84,0x1a,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 tba, s[8:11], m0
+// CHECK: [0x04,0x1b,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 tma, s[8:11], m0
+// CHECK: [0x84,0x1b,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 ttmp[10:11], s[8:11], m0
+// CHECK: [0x84,0x1e,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[2:3], s[12:15], m0
+// CHECK: [0x86,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[2:3], s[96:99], m0
+// CHECK: [0xb0,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[2:3], ttmp[8:11], m0
+// CHECK: [0xbc,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx2 s[2:3], s[8:11], 0x7ffff
+// CHECK: [0x84,0x00,0x66,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_store_dwordx2 s[2:3], s[8:11], m0 glc
+// CHECK: [0x84,0x00,0x65,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[4:7], s[8:11], m0
+// CHECK: [0x04,0x01,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[8:11], s[8:11], m0
+// CHECK: [0x04,0x02,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[96:99], s[8:11], m0
+// CHECK: [0x04,0x18,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 ttmp[8:11], s[8:11], m0
+// CHECK: [0x04,0x1e,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[4:7], s[12:15], m0
+// CHECK: [0x06,0x01,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[4:7], s[96:99], m0
+// CHECK: [0x30,0x01,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[4:7], ttmp[8:11], m0
+// CHECK: [0x3c,0x01,0x68,0xc0,0x7c,0x00,0x00,0x00]
+
+s_buffer_store_dwordx4 s[4:7], s[8:11], 0x7ffff
+// CHECK: [0x04,0x01,0x6a,0xc0,0xff,0xff,0x07,0x00]
+
+s_buffer_store_dwordx4 s[4:7], s[8:11], m0 glc
+// CHECK: [0x04,0x01,0x69,0xc0,0x7c,0x00,0x00,0x00]
+
+s_dcache_inv
+// CHECK: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00]
+
+s_dcache_wb
+// CHECK: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00]
+
+s_dcache_inv_vol
+// CHECK: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00]
+
+s_dcache_wb_vol
+// CHECK: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime s[10:11]
+// CHECK: [0x80,0x02,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime s[12:13]
+// CHECK: [0x00,0x03,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime s[100:101]
+// CHECK: [0x00,0x19,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime flat_scratch
+// CHECK: [0x80,0x19,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime vcc
+// CHECK: [0x80,0x1a,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime tba
+// CHECK: [0x00,0x1b,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime tma
+// CHECK: [0x80,0x1b,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memtime ttmp[10:11]
+// CHECK: [0x80,0x1e,0x90,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime s[10:11]
+// CHECK: [0x80,0x02,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime s[12:13]
+// CHECK: [0x00,0x03,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime s[100:101]
+// CHECK: [0x00,0x19,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime flat_scratch
+// CHECK: [0x80,0x19,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime vcc
+// CHECK: [0x80,0x1a,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime tba
+// CHECK: [0x00,0x1b,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime tma
+// CHECK: [0x80,0x1b,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_memrealtime ttmp[10:11]
+// CHECK: [0x80,0x1e,0x94,0xc0,0x00,0x00,0x00,0x00]
+
+s_mov_b32 s5, s1
+// CHECK: [0x01,0x00,0x85,0xbe]
+
+s_mov_b32 s101, s1
+// CHECK: [0x01,0x00,0xe5,0xbe]
+
+s_mov_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x00,0xe6,0xbe]
+
+s_mov_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x00,0xe7,0xbe]
+
+s_mov_b32 vcc_lo, s1
+// CHECK: [0x01,0x00,0xea,0xbe]
+
+s_mov_b32 vcc_hi, s1
+// CHECK: [0x01,0x00,0xeb,0xbe]
+
+s_mov_b32 tba_lo, s1
+// CHECK: [0x01,0x00,0xec,0xbe]
+
+s_mov_b32 tba_hi, s1
+// CHECK: [0x01,0x00,0xed,0xbe]
+
+s_mov_b32 tma_lo, s1
+// CHECK: [0x01,0x00,0xee,0xbe]
+
+s_mov_b32 tma_hi, s1
+// CHECK: [0x01,0x00,0xef,0xbe]
+
+s_mov_b32 ttmp11, s1
+// CHECK: [0x01,0x00,0xfb,0xbe]
+
+s_mov_b32 m0, s1
+// CHECK: [0x01,0x00,0xfc,0xbe]
+
+s_mov_b32 exec_lo, s1
+// CHECK: [0x01,0x00,0xfe,0xbe]
+
+s_mov_b32 exec_hi, s1
+// CHECK: [0x01,0x00,0xff,0xbe]
+
+s_mov_b32 s5, s101
+// CHECK: [0x65,0x00,0x85,0xbe]
+
+s_mov_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x00,0x85,0xbe]
+
+s_mov_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x00,0x85,0xbe]
+
+s_mov_b32 s5, vcc_lo
+// CHECK: [0x6a,0x00,0x85,0xbe]
+
+s_mov_b32 s5, vcc_hi
+// CHECK: [0x6b,0x00,0x85,0xbe]
+
+s_mov_b32 s5, tba_lo
+// CHECK: [0x6c,0x00,0x85,0xbe]
+
+s_mov_b32 s5, tba_hi
+// CHECK: [0x6d,0x00,0x85,0xbe]
+
+s_mov_b32 s5, tma_lo
+// CHECK: [0x6e,0x00,0x85,0xbe]
+
+s_mov_b32 s5, tma_hi
+// CHECK: [0x6f,0x00,0x85,0xbe]
+
+s_mov_b32 s5, ttmp11
+// CHECK: [0x7b,0x00,0x85,0xbe]
+
+s_mov_b32 s5, m0
+// CHECK: [0x7c,0x00,0x85,0xbe]
+
+s_mov_b32 s5, exec_lo
+// CHECK: [0x7e,0x00,0x85,0xbe]
+
+s_mov_b32 s5, exec_hi
+// CHECK: [0x7f,0x00,0x85,0xbe]
+
+s_mov_b32 s5, 0
+// CHECK: [0x80,0x00,0x85,0xbe]
+
+s_mov_b32 s5, -1
+// CHECK: [0xc1,0x00,0x85,0xbe]
+
+s_mov_b32 s5, 0.5
+// CHECK: [0xf0,0x00,0x85,0xbe]
+
+s_mov_b32 s5, -4.0
+// CHECK: [0xf7,0x00,0x85,0xbe]
+
+s_mov_b32 s5, 0xaf123456
+// CHECK: [0xff,0x00,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_mov_b32 s5, 0x3f717273
+// CHECK: [0xff,0x00,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_mov_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x01,0x8a,0xbe]
+
+s_mov_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x01,0x8c,0xbe]
+
+s_mov_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x01,0xe4,0xbe]
+
+s_mov_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x01,0xe6,0xbe]
+
+s_mov_b64 vcc, s[2:3]
+// CHECK: [0x02,0x01,0xea,0xbe]
+
+s_mov_b64 tba, s[2:3]
+// CHECK: [0x02,0x01,0xec,0xbe]
+
+s_mov_b64 tma, s[2:3]
+// CHECK: [0x02,0x01,0xee,0xbe]
+
+s_mov_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x01,0xfa,0xbe]
+
+s_mov_b64 exec, s[2:3]
+// CHECK: [0x02,0x01,0xfe,0xbe]
+
+s_mov_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], vcc
+// CHECK: [0x6a,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], tba
+// CHECK: [0x6c,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], tma
+// CHECK: [0x6e,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], exec
+// CHECK: [0x7e,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], 0
+// CHECK: [0x80,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], -1
+// CHECK: [0xc1,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x01,0x8a,0xbe]
+
+s_mov_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x01,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_mov_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x01,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_cmov_b32 s5, s1
+// CHECK: [0x01,0x02,0x85,0xbe]
+
+s_cmov_b32 s101, s1
+// CHECK: [0x01,0x02,0xe5,0xbe]
+
+s_cmov_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x02,0xe6,0xbe]
+
+s_cmov_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x02,0xe7,0xbe]
+
+s_cmov_b32 vcc_lo, s1
+// CHECK: [0x01,0x02,0xea,0xbe]
+
+s_cmov_b32 vcc_hi, s1
+// CHECK: [0x01,0x02,0xeb,0xbe]
+
+s_cmov_b32 tba_lo, s1
+// CHECK: [0x01,0x02,0xec,0xbe]
+
+s_cmov_b32 tba_hi, s1
+// CHECK: [0x01,0x02,0xed,0xbe]
+
+s_cmov_b32 tma_lo, s1
+// CHECK: [0x01,0x02,0xee,0xbe]
+
+s_cmov_b32 tma_hi, s1
+// CHECK: [0x01,0x02,0xef,0xbe]
+
+s_cmov_b32 ttmp11, s1
+// CHECK: [0x01,0x02,0xfb,0xbe]
+
+s_cmov_b32 m0, s1
+// CHECK: [0x01,0x02,0xfc,0xbe]
+
+s_cmov_b32 exec_lo, s1
+// CHECK: [0x01,0x02,0xfe,0xbe]
+
+s_cmov_b32 exec_hi, s1
+// CHECK: [0x01,0x02,0xff,0xbe]
+
+s_cmov_b32 s5, s101
+// CHECK: [0x65,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, vcc_lo
+// CHECK: [0x6a,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, vcc_hi
+// CHECK: [0x6b,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, tba_lo
+// CHECK: [0x6c,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, tba_hi
+// CHECK: [0x6d,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, tma_lo
+// CHECK: [0x6e,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, tma_hi
+// CHECK: [0x6f,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, ttmp11
+// CHECK: [0x7b,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, m0
+// CHECK: [0x7c,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, exec_lo
+// CHECK: [0x7e,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, exec_hi
+// CHECK: [0x7f,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, 0
+// CHECK: [0x80,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, -1
+// CHECK: [0xc1,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, 0.5
+// CHECK: [0xf0,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, -4.0
+// CHECK: [0xf7,0x02,0x85,0xbe]
+
+s_cmov_b32 s5, 0xaf123456
+// CHECK: [0xff,0x02,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_cmov_b32 s5, 0x3f717273
+// CHECK: [0xff,0x02,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_cmov_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x03,0x8c,0xbe]
+
+s_cmov_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x03,0xe4,0xbe]
+
+s_cmov_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x03,0xe6,0xbe]
+
+s_cmov_b64 vcc, s[2:3]
+// CHECK: [0x02,0x03,0xea,0xbe]
+
+s_cmov_b64 tba, s[2:3]
+// CHECK: [0x02,0x03,0xec,0xbe]
+
+s_cmov_b64 tma, s[2:3]
+// CHECK: [0x02,0x03,0xee,0xbe]
+
+s_cmov_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x03,0xfa,0xbe]
+
+s_cmov_b64 exec, s[2:3]
+// CHECK: [0x02,0x03,0xfe,0xbe]
+
+s_cmov_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], vcc
+// CHECK: [0x6a,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], tba
+// CHECK: [0x6c,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], tma
+// CHECK: [0x6e,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], exec
+// CHECK: [0x7e,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], 0
+// CHECK: [0x80,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], -1
+// CHECK: [0xc1,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x03,0x8a,0xbe]
+
+s_cmov_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x03,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_cmov_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x03,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_not_b32 s5, s1
+// CHECK: [0x01,0x04,0x85,0xbe]
+
+s_not_b32 s101, s1
+// CHECK: [0x01,0x04,0xe5,0xbe]
+
+s_not_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x04,0xe6,0xbe]
+
+s_not_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x04,0xe7,0xbe]
+
+s_not_b32 vcc_lo, s1
+// CHECK: [0x01,0x04,0xea,0xbe]
+
+s_not_b32 vcc_hi, s1
+// CHECK: [0x01,0x04,0xeb,0xbe]
+
+s_not_b32 tba_lo, s1
+// CHECK: [0x01,0x04,0xec,0xbe]
+
+s_not_b32 tba_hi, s1
+// CHECK: [0x01,0x04,0xed,0xbe]
+
+s_not_b32 tma_lo, s1
+// CHECK: [0x01,0x04,0xee,0xbe]
+
+s_not_b32 tma_hi, s1
+// CHECK: [0x01,0x04,0xef,0xbe]
+
+s_not_b32 ttmp11, s1
+// CHECK: [0x01,0x04,0xfb,0xbe]
+
+s_not_b32 m0, s1
+// CHECK: [0x01,0x04,0xfc,0xbe]
+
+s_not_b32 exec_lo, s1
+// CHECK: [0x01,0x04,0xfe,0xbe]
+
+s_not_b32 exec_hi, s1
+// CHECK: [0x01,0x04,0xff,0xbe]
+
+s_not_b32 s5, s101
+// CHECK: [0x65,0x04,0x85,0xbe]
+
+s_not_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x04,0x85,0xbe]
+
+s_not_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x04,0x85,0xbe]
+
+s_not_b32 s5, vcc_lo
+// CHECK: [0x6a,0x04,0x85,0xbe]
+
+s_not_b32 s5, vcc_hi
+// CHECK: [0x6b,0x04,0x85,0xbe]
+
+s_not_b32 s5, tba_lo
+// CHECK: [0x6c,0x04,0x85,0xbe]
+
+s_not_b32 s5, tba_hi
+// CHECK: [0x6d,0x04,0x85,0xbe]
+
+s_not_b32 s5, tma_lo
+// CHECK: [0x6e,0x04,0x85,0xbe]
+
+s_not_b32 s5, tma_hi
+// CHECK: [0x6f,0x04,0x85,0xbe]
+
+s_not_b32 s5, ttmp11
+// CHECK: [0x7b,0x04,0x85,0xbe]
+
+s_not_b32 s5, m0
+// CHECK: [0x7c,0x04,0x85,0xbe]
+
+s_not_b32 s5, exec_lo
+// CHECK: [0x7e,0x04,0x85,0xbe]
+
+s_not_b32 s5, exec_hi
+// CHECK: [0x7f,0x04,0x85,0xbe]
+
+s_not_b32 s5, 0
+// CHECK: [0x80,0x04,0x85,0xbe]
+
+s_not_b32 s5, -1
+// CHECK: [0xc1,0x04,0x85,0xbe]
+
+s_not_b32 s5, 0.5
+// CHECK: [0xf0,0x04,0x85,0xbe]
+
+s_not_b32 s5, -4.0
+// CHECK: [0xf7,0x04,0x85,0xbe]
+
+s_not_b32 s5, 0xaf123456
+// CHECK: [0xff,0x04,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_not_b32 s5, 0x3f717273
+// CHECK: [0xff,0x04,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_not_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x05,0x8a,0xbe]
+
+s_not_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x05,0x8c,0xbe]
+
+s_not_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x05,0xe4,0xbe]
+
+s_not_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x05,0xe6,0xbe]
+
+s_not_b64 vcc, s[2:3]
+// CHECK: [0x02,0x05,0xea,0xbe]
+
+s_not_b64 tba, s[2:3]
+// CHECK: [0x02,0x05,0xec,0xbe]
+
+s_not_b64 tma, s[2:3]
+// CHECK: [0x02,0x05,0xee,0xbe]
+
+s_not_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x05,0xfa,0xbe]
+
+s_not_b64 exec, s[2:3]
+// CHECK: [0x02,0x05,0xfe,0xbe]
+
+s_not_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], vcc
+// CHECK: [0x6a,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], tba
+// CHECK: [0x6c,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], tma
+// CHECK: [0x6e,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], exec
+// CHECK: [0x7e,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], 0
+// CHECK: [0x80,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], -1
+// CHECK: [0xc1,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x05,0x8a,0xbe]
+
+s_not_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x05,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_not_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x05,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_wqm_b32 s5, s1
+// CHECK: [0x01,0x06,0x85,0xbe]
+
+s_wqm_b32 s101, s1
+// CHECK: [0x01,0x06,0xe5,0xbe]
+
+s_wqm_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x06,0xe6,0xbe]
+
+s_wqm_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x06,0xe7,0xbe]
+
+s_wqm_b32 vcc_lo, s1
+// CHECK: [0x01,0x06,0xea,0xbe]
+
+s_wqm_b32 vcc_hi, s1
+// CHECK: [0x01,0x06,0xeb,0xbe]
+
+s_wqm_b32 tba_lo, s1
+// CHECK: [0x01,0x06,0xec,0xbe]
+
+s_wqm_b32 tba_hi, s1
+// CHECK: [0x01,0x06,0xed,0xbe]
+
+s_wqm_b32 tma_lo, s1
+// CHECK: [0x01,0x06,0xee,0xbe]
+
+s_wqm_b32 tma_hi, s1
+// CHECK: [0x01,0x06,0xef,0xbe]
+
+s_wqm_b32 ttmp11, s1
+// CHECK: [0x01,0x06,0xfb,0xbe]
+
+s_wqm_b32 m0, s1
+// CHECK: [0x01,0x06,0xfc,0xbe]
+
+s_wqm_b32 exec_lo, s1
+// CHECK: [0x01,0x06,0xfe,0xbe]
+
+s_wqm_b32 exec_hi, s1
+// CHECK: [0x01,0x06,0xff,0xbe]
+
+s_wqm_b32 s5, s101
+// CHECK: [0x65,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, vcc_lo
+// CHECK: [0x6a,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, vcc_hi
+// CHECK: [0x6b,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, tba_lo
+// CHECK: [0x6c,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, tba_hi
+// CHECK: [0x6d,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, tma_lo
+// CHECK: [0x6e,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, tma_hi
+// CHECK: [0x6f,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, ttmp11
+// CHECK: [0x7b,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, m0
+// CHECK: [0x7c,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, exec_lo
+// CHECK: [0x7e,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, exec_hi
+// CHECK: [0x7f,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, 0
+// CHECK: [0x80,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, -1
+// CHECK: [0xc1,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, 0.5
+// CHECK: [0xf0,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, -4.0
+// CHECK: [0xf7,0x06,0x85,0xbe]
+
+s_wqm_b32 s5, 0xaf123456
+// CHECK: [0xff,0x06,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_wqm_b32 s5, 0x3f717273
+// CHECK: [0xff,0x06,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_wqm_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x07,0x8c,0xbe]
+
+s_wqm_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x07,0xe4,0xbe]
+
+s_wqm_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x07,0xe6,0xbe]
+
+s_wqm_b64 vcc, s[2:3]
+// CHECK: [0x02,0x07,0xea,0xbe]
+
+s_wqm_b64 tba, s[2:3]
+// CHECK: [0x02,0x07,0xec,0xbe]
+
+s_wqm_b64 tma, s[2:3]
+// CHECK: [0x02,0x07,0xee,0xbe]
+
+s_wqm_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x07,0xfa,0xbe]
+
+s_wqm_b64 exec, s[2:3]
+// CHECK: [0x02,0x07,0xfe,0xbe]
+
+s_wqm_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], vcc
+// CHECK: [0x6a,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], tba
+// CHECK: [0x6c,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], tma
+// CHECK: [0x6e,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], exec
+// CHECK: [0x7e,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], 0
+// CHECK: [0x80,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], -1
+// CHECK: [0xc1,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x07,0x8a,0xbe]
+
+s_wqm_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x07,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_wqm_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x07,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_brev_b32 s5, s1
+// CHECK: [0x01,0x08,0x85,0xbe]
+
+s_brev_b32 s101, s1
+// CHECK: [0x01,0x08,0xe5,0xbe]
+
+s_brev_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x08,0xe6,0xbe]
+
+s_brev_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x08,0xe7,0xbe]
+
+s_brev_b32 vcc_lo, s1
+// CHECK: [0x01,0x08,0xea,0xbe]
+
+s_brev_b32 vcc_hi, s1
+// CHECK: [0x01,0x08,0xeb,0xbe]
+
+s_brev_b32 tba_lo, s1
+// CHECK: [0x01,0x08,0xec,0xbe]
+
+s_brev_b32 tba_hi, s1
+// CHECK: [0x01,0x08,0xed,0xbe]
+
+s_brev_b32 tma_lo, s1
+// CHECK: [0x01,0x08,0xee,0xbe]
+
+s_brev_b32 tma_hi, s1
+// CHECK: [0x01,0x08,0xef,0xbe]
+
+s_brev_b32 ttmp11, s1
+// CHECK: [0x01,0x08,0xfb,0xbe]
+
+s_brev_b32 m0, s1
+// CHECK: [0x01,0x08,0xfc,0xbe]
+
+s_brev_b32 exec_lo, s1
+// CHECK: [0x01,0x08,0xfe,0xbe]
+
+s_brev_b32 exec_hi, s1
+// CHECK: [0x01,0x08,0xff,0xbe]
+
+s_brev_b32 s5, s101
+// CHECK: [0x65,0x08,0x85,0xbe]
+
+s_brev_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x08,0x85,0xbe]
+
+s_brev_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x08,0x85,0xbe]
+
+s_brev_b32 s5, vcc_lo
+// CHECK: [0x6a,0x08,0x85,0xbe]
+
+s_brev_b32 s5, vcc_hi
+// CHECK: [0x6b,0x08,0x85,0xbe]
+
+s_brev_b32 s5, tba_lo
+// CHECK: [0x6c,0x08,0x85,0xbe]
+
+s_brev_b32 s5, tba_hi
+// CHECK: [0x6d,0x08,0x85,0xbe]
+
+s_brev_b32 s5, tma_lo
+// CHECK: [0x6e,0x08,0x85,0xbe]
+
+s_brev_b32 s5, tma_hi
+// CHECK: [0x6f,0x08,0x85,0xbe]
+
+s_brev_b32 s5, ttmp11
+// CHECK: [0x7b,0x08,0x85,0xbe]
+
+s_brev_b32 s5, m0
+// CHECK: [0x7c,0x08,0x85,0xbe]
+
+s_brev_b32 s5, exec_lo
+// CHECK: [0x7e,0x08,0x85,0xbe]
+
+s_brev_b32 s5, exec_hi
+// CHECK: [0x7f,0x08,0x85,0xbe]
+
+s_brev_b32 s5, 0
+// CHECK: [0x80,0x08,0x85,0xbe]
+
+s_brev_b32 s5, -1
+// CHECK: [0xc1,0x08,0x85,0xbe]
+
+s_brev_b32 s5, 0.5
+// CHECK: [0xf0,0x08,0x85,0xbe]
+
+s_brev_b32 s5, -4.0
+// CHECK: [0xf7,0x08,0x85,0xbe]
+
+s_brev_b32 s5, 0xaf123456
+// CHECK: [0xff,0x08,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_brev_b32 s5, 0x3f717273
+// CHECK: [0xff,0x08,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_brev_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x09,0x8a,0xbe]
+
+s_brev_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x09,0x8c,0xbe]
+
+s_brev_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x09,0xe4,0xbe]
+
+s_brev_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x09,0xe6,0xbe]
+
+s_brev_b64 vcc, s[2:3]
+// CHECK: [0x02,0x09,0xea,0xbe]
+
+s_brev_b64 tba, s[2:3]
+// CHECK: [0x02,0x09,0xec,0xbe]
+
+s_brev_b64 tma, s[2:3]
+// CHECK: [0x02,0x09,0xee,0xbe]
+
+s_brev_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x09,0xfa,0xbe]
+
+s_brev_b64 exec, s[2:3]
+// CHECK: [0x02,0x09,0xfe,0xbe]
+
+s_brev_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], vcc
+// CHECK: [0x6a,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], tba
+// CHECK: [0x6c,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], tma
+// CHECK: [0x6e,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], exec
+// CHECK: [0x7e,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], 0
+// CHECK: [0x80,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], -1
+// CHECK: [0xc1,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x09,0x8a,0xbe]
+
+s_brev_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x09,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_brev_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x09,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt0_i32_b32 s5, s1
+// CHECK: [0x01,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s101, s1
+// CHECK: [0x01,0x0a,0xe5,0xbe]
+
+s_bcnt0_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x0a,0xe6,0xbe]
+
+s_bcnt0_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x0a,0xe7,0xbe]
+
+s_bcnt0_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x0a,0xea,0xbe]
+
+s_bcnt0_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x0a,0xeb,0xbe]
+
+s_bcnt0_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x0a,0xec,0xbe]
+
+s_bcnt0_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x0a,0xed,0xbe]
+
+s_bcnt0_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x0a,0xee,0xbe]
+
+s_bcnt0_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x0a,0xef,0xbe]
+
+s_bcnt0_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x0a,0xfb,0xbe]
+
+s_bcnt0_i32_b32 m0, s1
+// CHECK: [0x01,0x0a,0xfc,0xbe]
+
+s_bcnt0_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x0a,0xfe,0xbe]
+
+s_bcnt0_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x0a,0xff,0xbe]
+
+s_bcnt0_i32_b32 s5, s101
+// CHECK: [0x65,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, m0
+// CHECK: [0x7c,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, 0
+// CHECK: [0x80,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, -1
+// CHECK: [0xc1,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x0a,0x85,0xbe]
+
+s_bcnt0_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x0a,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt0_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x0a,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt0_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s101, s[2:3]
+// CHECK: [0x02,0x0b,0xe5,0xbe]
+
+s_bcnt0_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x0b,0xe6,0xbe]
+
+s_bcnt0_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x0b,0xe7,0xbe]
+
+s_bcnt0_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x0b,0xea,0xbe]
+
+s_bcnt0_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x0b,0xeb,0xbe]
+
+s_bcnt0_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x0b,0xec,0xbe]
+
+s_bcnt0_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x0b,0xed,0xbe]
+
+s_bcnt0_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x0b,0xee,0xbe]
+
+s_bcnt0_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x0b,0xef,0xbe]
+
+s_bcnt0_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x0b,0xfb,0xbe]
+
+s_bcnt0_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x0b,0xfc,0xbe]
+
+s_bcnt0_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x0b,0xfe,0xbe]
+
+s_bcnt0_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x0b,0xff,0xbe]
+
+s_bcnt0_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, s[100:101]
+// CHECK: [0x64,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, flat_scratch
+// CHECK: [0x66,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, vcc
+// CHECK: [0x6a,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, tba
+// CHECK: [0x6c,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, tma
+// CHECK: [0x6e,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, exec
+// CHECK: [0x7e,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, 0
+// CHECK: [0x80,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, -1
+// CHECK: [0xc1,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x0b,0x85,0xbe]
+
+s_bcnt0_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x0b,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt0_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x0b,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt1_i32_b32 s5, s1
+// CHECK: [0x01,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s101, s1
+// CHECK: [0x01,0x0c,0xe5,0xbe]
+
+s_bcnt1_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x0c,0xe6,0xbe]
+
+s_bcnt1_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x0c,0xe7,0xbe]
+
+s_bcnt1_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x0c,0xea,0xbe]
+
+s_bcnt1_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x0c,0xeb,0xbe]
+
+s_bcnt1_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x0c,0xec,0xbe]
+
+s_bcnt1_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x0c,0xed,0xbe]
+
+s_bcnt1_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x0c,0xee,0xbe]
+
+s_bcnt1_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x0c,0xef,0xbe]
+
+s_bcnt1_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x0c,0xfb,0xbe]
+
+s_bcnt1_i32_b32 m0, s1
+// CHECK: [0x01,0x0c,0xfc,0xbe]
+
+s_bcnt1_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x0c,0xfe,0xbe]
+
+s_bcnt1_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x0c,0xff,0xbe]
+
+s_bcnt1_i32_b32 s5, s101
+// CHECK: [0x65,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, m0
+// CHECK: [0x7c,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, 0
+// CHECK: [0x80,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, -1
+// CHECK: [0xc1,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x0c,0x85,0xbe]
+
+s_bcnt1_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x0c,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt1_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x0c,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bcnt1_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s101, s[2:3]
+// CHECK: [0x02,0x0d,0xe5,0xbe]
+
+s_bcnt1_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x0d,0xe6,0xbe]
+
+s_bcnt1_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x0d,0xe7,0xbe]
+
+s_bcnt1_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x0d,0xea,0xbe]
+
+s_bcnt1_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x0d,0xeb,0xbe]
+
+s_bcnt1_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x0d,0xec,0xbe]
+
+s_bcnt1_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x0d,0xed,0xbe]
+
+s_bcnt1_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x0d,0xee,0xbe]
+
+s_bcnt1_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x0d,0xef,0xbe]
+
+s_bcnt1_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x0d,0xfb,0xbe]
+
+s_bcnt1_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x0d,0xfc,0xbe]
+
+s_bcnt1_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x0d,0xfe,0xbe]
+
+s_bcnt1_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x0d,0xff,0xbe]
+
+s_bcnt1_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, s[100:101]
+// CHECK: [0x64,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, flat_scratch
+// CHECK: [0x66,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, vcc
+// CHECK: [0x6a,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, tba
+// CHECK: [0x6c,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, tma
+// CHECK: [0x6e,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, exec
+// CHECK: [0x7e,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, 0
+// CHECK: [0x80,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, -1
+// CHECK: [0xc1,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x0d,0x85,0xbe]
+
+s_bcnt1_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x0d,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bcnt1_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x0d,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff0_i32_b32 s5, s1
+// CHECK: [0x01,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s101, s1
+// CHECK: [0x01,0x0e,0xe5,0xbe]
+
+s_ff0_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x0e,0xe6,0xbe]
+
+s_ff0_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x0e,0xe7,0xbe]
+
+s_ff0_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x0e,0xea,0xbe]
+
+s_ff0_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x0e,0xeb,0xbe]
+
+s_ff0_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x0e,0xec,0xbe]
+
+s_ff0_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x0e,0xed,0xbe]
+
+s_ff0_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x0e,0xee,0xbe]
+
+s_ff0_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x0e,0xef,0xbe]
+
+s_ff0_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x0e,0xfb,0xbe]
+
+s_ff0_i32_b32 m0, s1
+// CHECK: [0x01,0x0e,0xfc,0xbe]
+
+s_ff0_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x0e,0xfe,0xbe]
+
+s_ff0_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x0e,0xff,0xbe]
+
+s_ff0_i32_b32 s5, s101
+// CHECK: [0x65,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, m0
+// CHECK: [0x7c,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, 0
+// CHECK: [0x80,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, -1
+// CHECK: [0xc1,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x0e,0x85,0xbe]
+
+s_ff0_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x0e,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff0_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x0e,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff0_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s101, s[2:3]
+// CHECK: [0x02,0x0f,0xe5,0xbe]
+
+s_ff0_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x0f,0xe6,0xbe]
+
+s_ff0_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x0f,0xe7,0xbe]
+
+s_ff0_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x0f,0xea,0xbe]
+
+s_ff0_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x0f,0xeb,0xbe]
+
+s_ff0_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x0f,0xec,0xbe]
+
+s_ff0_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x0f,0xed,0xbe]
+
+s_ff0_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x0f,0xee,0xbe]
+
+s_ff0_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x0f,0xef,0xbe]
+
+s_ff0_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x0f,0xfb,0xbe]
+
+s_ff0_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x0f,0xfc,0xbe]
+
+s_ff0_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x0f,0xfe,0xbe]
+
+s_ff0_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x0f,0xff,0xbe]
+
+s_ff0_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, s[100:101]
+// CHECK: [0x64,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, flat_scratch
+// CHECK: [0x66,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, vcc
+// CHECK: [0x6a,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, tba
+// CHECK: [0x6c,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, tma
+// CHECK: [0x6e,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, exec
+// CHECK: [0x7e,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, 0
+// CHECK: [0x80,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, -1
+// CHECK: [0xc1,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x0f,0x85,0xbe]
+
+s_ff0_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x0f,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff0_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x0f,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff1_i32_b32 s5, s1
+// CHECK: [0x01,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s101, s1
+// CHECK: [0x01,0x10,0xe5,0xbe]
+
+s_ff1_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x10,0xe6,0xbe]
+
+s_ff1_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x10,0xe7,0xbe]
+
+s_ff1_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x10,0xea,0xbe]
+
+s_ff1_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x10,0xeb,0xbe]
+
+s_ff1_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x10,0xec,0xbe]
+
+s_ff1_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x10,0xed,0xbe]
+
+s_ff1_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x10,0xee,0xbe]
+
+s_ff1_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x10,0xef,0xbe]
+
+s_ff1_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x10,0xfb,0xbe]
+
+s_ff1_i32_b32 m0, s1
+// CHECK: [0x01,0x10,0xfc,0xbe]
+
+s_ff1_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x10,0xfe,0xbe]
+
+s_ff1_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x10,0xff,0xbe]
+
+s_ff1_i32_b32 s5, s101
+// CHECK: [0x65,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, m0
+// CHECK: [0x7c,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, 0
+// CHECK: [0x80,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, -1
+// CHECK: [0xc1,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x10,0x85,0xbe]
+
+s_ff1_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x10,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff1_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x10,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_ff1_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s101, s[2:3]
+// CHECK: [0x02,0x11,0xe5,0xbe]
+
+s_ff1_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x11,0xe6,0xbe]
+
+s_ff1_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x11,0xe7,0xbe]
+
+s_ff1_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x11,0xea,0xbe]
+
+s_ff1_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x11,0xeb,0xbe]
+
+s_ff1_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x11,0xec,0xbe]
+
+s_ff1_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x11,0xed,0xbe]
+
+s_ff1_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x11,0xee,0xbe]
+
+s_ff1_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x11,0xef,0xbe]
+
+s_ff1_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x11,0xfb,0xbe]
+
+s_ff1_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x11,0xfc,0xbe]
+
+s_ff1_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x11,0xfe,0xbe]
+
+s_ff1_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x11,0xff,0xbe]
+
+s_ff1_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, s[100:101]
+// CHECK: [0x64,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, flat_scratch
+// CHECK: [0x66,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, vcc
+// CHECK: [0x6a,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, tba
+// CHECK: [0x6c,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, tma
+// CHECK: [0x6e,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, exec
+// CHECK: [0x7e,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, 0
+// CHECK: [0x80,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, -1
+// CHECK: [0xc1,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x11,0x85,0xbe]
+
+s_ff1_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x11,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_ff1_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x11,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32_b32 s5, s1
+// CHECK: [0x01,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s101, s1
+// CHECK: [0x01,0x12,0xe5,0xbe]
+
+s_flbit_i32_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x12,0xe6,0xbe]
+
+s_flbit_i32_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x12,0xe7,0xbe]
+
+s_flbit_i32_b32 vcc_lo, s1
+// CHECK: [0x01,0x12,0xea,0xbe]
+
+s_flbit_i32_b32 vcc_hi, s1
+// CHECK: [0x01,0x12,0xeb,0xbe]
+
+s_flbit_i32_b32 tba_lo, s1
+// CHECK: [0x01,0x12,0xec,0xbe]
+
+s_flbit_i32_b32 tba_hi, s1
+// CHECK: [0x01,0x12,0xed,0xbe]
+
+s_flbit_i32_b32 tma_lo, s1
+// CHECK: [0x01,0x12,0xee,0xbe]
+
+s_flbit_i32_b32 tma_hi, s1
+// CHECK: [0x01,0x12,0xef,0xbe]
+
+s_flbit_i32_b32 ttmp11, s1
+// CHECK: [0x01,0x12,0xfb,0xbe]
+
+s_flbit_i32_b32 m0, s1
+// CHECK: [0x01,0x12,0xfc,0xbe]
+
+s_flbit_i32_b32 exec_lo, s1
+// CHECK: [0x01,0x12,0xfe,0xbe]
+
+s_flbit_i32_b32 exec_hi, s1
+// CHECK: [0x01,0x12,0xff,0xbe]
+
+s_flbit_i32_b32 s5, s101
+// CHECK: [0x65,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, vcc_lo
+// CHECK: [0x6a,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, vcc_hi
+// CHECK: [0x6b,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tba_lo
+// CHECK: [0x6c,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tba_hi
+// CHECK: [0x6d,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tma_lo
+// CHECK: [0x6e,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, tma_hi
+// CHECK: [0x6f,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, ttmp11
+// CHECK: [0x7b,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, m0
+// CHECK: [0x7c,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, exec_lo
+// CHECK: [0x7e,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, exec_hi
+// CHECK: [0x7f,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, 0
+// CHECK: [0x80,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, -1
+// CHECK: [0xc1,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, 0.5
+// CHECK: [0xf0,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, -4.0
+// CHECK: [0xf7,0x12,0x85,0xbe]
+
+s_flbit_i32_b32 s5, 0xaf123456
+// CHECK: [0xff,0x12,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32_b32 s5, 0x3f717273
+// CHECK: [0xff,0x12,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32_b64 s5, s[2:3]
+// CHECK: [0x02,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s101, s[2:3]
+// CHECK: [0x02,0x13,0xe5,0xbe]
+
+s_flbit_i32_b64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x13,0xe6,0xbe]
+
+s_flbit_i32_b64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x13,0xe7,0xbe]
+
+s_flbit_i32_b64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x13,0xea,0xbe]
+
+s_flbit_i32_b64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x13,0xeb,0xbe]
+
+s_flbit_i32_b64 tba_lo, s[2:3]
+// CHECK: [0x02,0x13,0xec,0xbe]
+
+s_flbit_i32_b64 tba_hi, s[2:3]
+// CHECK: [0x02,0x13,0xed,0xbe]
+
+s_flbit_i32_b64 tma_lo, s[2:3]
+// CHECK: [0x02,0x13,0xee,0xbe]
+
+s_flbit_i32_b64 tma_hi, s[2:3]
+// CHECK: [0x02,0x13,0xef,0xbe]
+
+s_flbit_i32_b64 ttmp11, s[2:3]
+// CHECK: [0x02,0x13,0xfb,0xbe]
+
+s_flbit_i32_b64 m0, s[2:3]
+// CHECK: [0x02,0x13,0xfc,0xbe]
+
+s_flbit_i32_b64 exec_lo, s[2:3]
+// CHECK: [0x02,0x13,0xfe,0xbe]
+
+s_flbit_i32_b64 exec_hi, s[2:3]
+// CHECK: [0x02,0x13,0xff,0xbe]
+
+s_flbit_i32_b64 s5, s[4:5]
+// CHECK: [0x04,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, s[100:101]
+// CHECK: [0x64,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, flat_scratch
+// CHECK: [0x66,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, vcc
+// CHECK: [0x6a,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, tba
+// CHECK: [0x6c,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, tma
+// CHECK: [0x6e,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, exec
+// CHECK: [0x7e,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, 0
+// CHECK: [0x80,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, -1
+// CHECK: [0xc1,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, 0.5
+// CHECK: [0xf0,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, -4.0
+// CHECK: [0xf7,0x13,0x85,0xbe]
+
+s_flbit_i32_b64 s5, 0xaf123456
+// CHECK: [0xff,0x13,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32_b64 s5, 0x3f717273
+// CHECK: [0xff,0x13,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32 s5, s1
+// CHECK: [0x01,0x14,0x85,0xbe]
+
+s_flbit_i32 s101, s1
+// CHECK: [0x01,0x14,0xe5,0xbe]
+
+s_flbit_i32 flat_scratch_lo, s1
+// CHECK: [0x01,0x14,0xe6,0xbe]
+
+s_flbit_i32 flat_scratch_hi, s1
+// CHECK: [0x01,0x14,0xe7,0xbe]
+
+s_flbit_i32 vcc_lo, s1
+// CHECK: [0x01,0x14,0xea,0xbe]
+
+s_flbit_i32 vcc_hi, s1
+// CHECK: [0x01,0x14,0xeb,0xbe]
+
+s_flbit_i32 tba_lo, s1
+// CHECK: [0x01,0x14,0xec,0xbe]
+
+s_flbit_i32 tba_hi, s1
+// CHECK: [0x01,0x14,0xed,0xbe]
+
+s_flbit_i32 tma_lo, s1
+// CHECK: [0x01,0x14,0xee,0xbe]
+
+s_flbit_i32 tma_hi, s1
+// CHECK: [0x01,0x14,0xef,0xbe]
+
+s_flbit_i32 ttmp11, s1
+// CHECK: [0x01,0x14,0xfb,0xbe]
+
+s_flbit_i32 m0, s1
+// CHECK: [0x01,0x14,0xfc,0xbe]
+
+s_flbit_i32 exec_lo, s1
+// CHECK: [0x01,0x14,0xfe,0xbe]
+
+s_flbit_i32 exec_hi, s1
+// CHECK: [0x01,0x14,0xff,0xbe]
+
+s_flbit_i32 s5, s101
+// CHECK: [0x65,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, flat_scratch_lo
+// CHECK: [0x66,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, flat_scratch_hi
+// CHECK: [0x67,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, vcc_lo
+// CHECK: [0x6a,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, vcc_hi
+// CHECK: [0x6b,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, tba_lo
+// CHECK: [0x6c,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, tba_hi
+// CHECK: [0x6d,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, tma_lo
+// CHECK: [0x6e,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, tma_hi
+// CHECK: [0x6f,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, ttmp11
+// CHECK: [0x7b,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, m0
+// CHECK: [0x7c,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, exec_lo
+// CHECK: [0x7e,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, exec_hi
+// CHECK: [0x7f,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, 0
+// CHECK: [0x80,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, -1
+// CHECK: [0xc1,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, 0.5
+// CHECK: [0xf0,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, -4.0
+// CHECK: [0xf7,0x14,0x85,0xbe]
+
+s_flbit_i32 s5, 0xaf123456
+// CHECK: [0xff,0x14,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32 s5, 0x3f717273
+// CHECK: [0xff,0x14,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_flbit_i32_i64 s5, s[2:3]
+// CHECK: [0x02,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s101, s[2:3]
+// CHECK: [0x02,0x15,0xe5,0xbe]
+
+s_flbit_i32_i64 flat_scratch_lo, s[2:3]
+// CHECK: [0x02,0x15,0xe6,0xbe]
+
+s_flbit_i32_i64 flat_scratch_hi, s[2:3]
+// CHECK: [0x02,0x15,0xe7,0xbe]
+
+s_flbit_i32_i64 vcc_lo, s[2:3]
+// CHECK: [0x02,0x15,0xea,0xbe]
+
+s_flbit_i32_i64 vcc_hi, s[2:3]
+// CHECK: [0x02,0x15,0xeb,0xbe]
+
+s_flbit_i32_i64 tba_lo, s[2:3]
+// CHECK: [0x02,0x15,0xec,0xbe]
+
+s_flbit_i32_i64 tba_hi, s[2:3]
+// CHECK: [0x02,0x15,0xed,0xbe]
+
+s_flbit_i32_i64 tma_lo, s[2:3]
+// CHECK: [0x02,0x15,0xee,0xbe]
+
+s_flbit_i32_i64 tma_hi, s[2:3]
+// CHECK: [0x02,0x15,0xef,0xbe]
+
+s_flbit_i32_i64 ttmp11, s[2:3]
+// CHECK: [0x02,0x15,0xfb,0xbe]
+
+s_flbit_i32_i64 m0, s[2:3]
+// CHECK: [0x02,0x15,0xfc,0xbe]
+
+s_flbit_i32_i64 exec_lo, s[2:3]
+// CHECK: [0x02,0x15,0xfe,0xbe]
+
+s_flbit_i32_i64 exec_hi, s[2:3]
+// CHECK: [0x02,0x15,0xff,0xbe]
+
+s_flbit_i32_i64 s5, s[4:5]
+// CHECK: [0x04,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, s[100:101]
+// CHECK: [0x64,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, flat_scratch
+// CHECK: [0x66,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, vcc
+// CHECK: [0x6a,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, tba
+// CHECK: [0x6c,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, tma
+// CHECK: [0x6e,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, ttmp[10:11]
+// CHECK: [0x7a,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, exec
+// CHECK: [0x7e,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, 0
+// CHECK: [0x80,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, -1
+// CHECK: [0xc1,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, 0.5
+// CHECK: [0xf0,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, -4.0
+// CHECK: [0xf7,0x15,0x85,0xbe]
+
+s_flbit_i32_i64 s5, 0xaf123456
+// CHECK: [0xff,0x15,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_flbit_i32_i64 s5, 0x3f717273
+// CHECK: [0xff,0x15,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_sext_i32_i8 s5, s1
+// CHECK: [0x01,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s101, s1
+// CHECK: [0x01,0x16,0xe5,0xbe]
+
+s_sext_i32_i8 flat_scratch_lo, s1
+// CHECK: [0x01,0x16,0xe6,0xbe]
+
+s_sext_i32_i8 flat_scratch_hi, s1
+// CHECK: [0x01,0x16,0xe7,0xbe]
+
+s_sext_i32_i8 vcc_lo, s1
+// CHECK: [0x01,0x16,0xea,0xbe]
+
+s_sext_i32_i8 vcc_hi, s1
+// CHECK: [0x01,0x16,0xeb,0xbe]
+
+s_sext_i32_i8 tba_lo, s1
+// CHECK: [0x01,0x16,0xec,0xbe]
+
+s_sext_i32_i8 tba_hi, s1
+// CHECK: [0x01,0x16,0xed,0xbe]
+
+s_sext_i32_i8 tma_lo, s1
+// CHECK: [0x01,0x16,0xee,0xbe]
+
+s_sext_i32_i8 tma_hi, s1
+// CHECK: [0x01,0x16,0xef,0xbe]
+
+s_sext_i32_i8 ttmp11, s1
+// CHECK: [0x01,0x16,0xfb,0xbe]
+
+s_sext_i32_i8 m0, s1
+// CHECK: [0x01,0x16,0xfc,0xbe]
+
+s_sext_i32_i8 exec_lo, s1
+// CHECK: [0x01,0x16,0xfe,0xbe]
+
+s_sext_i32_i8 exec_hi, s1
+// CHECK: [0x01,0x16,0xff,0xbe]
+
+s_sext_i32_i8 s5, s101
+// CHECK: [0x65,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, flat_scratch_lo
+// CHECK: [0x66,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, flat_scratch_hi
+// CHECK: [0x67,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, vcc_lo
+// CHECK: [0x6a,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, vcc_hi
+// CHECK: [0x6b,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, tba_lo
+// CHECK: [0x6c,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, tba_hi
+// CHECK: [0x6d,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, tma_lo
+// CHECK: [0x6e,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, tma_hi
+// CHECK: [0x6f,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, ttmp11
+// CHECK: [0x7b,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, m0
+// CHECK: [0x7c,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, exec_lo
+// CHECK: [0x7e,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, exec_hi
+// CHECK: [0x7f,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, 0
+// CHECK: [0x80,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, -1
+// CHECK: [0xc1,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, 0.5
+// CHECK: [0xf0,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, -4.0
+// CHECK: [0xf7,0x16,0x85,0xbe]
+
+s_sext_i32_i8 s5, 0x71
+// CHECK: [0xff,0x16,0x85,0xbe,0x71,0x00,0x00,0x00]
+
+s_sext_i32_i8 s5, 0xf0
+// CHECK: [0xff,0x16,0x85,0xbe,0xf0,0x00,0x00,0x00]
+
+s_sext_i32_i16 s5, s1
+// CHECK: [0x01,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s101, s1
+// CHECK: [0x01,0x17,0xe5,0xbe]
+
+s_sext_i32_i16 flat_scratch_lo, s1
+// CHECK: [0x01,0x17,0xe6,0xbe]
+
+s_sext_i32_i16 flat_scratch_hi, s1
+// CHECK: [0x01,0x17,0xe7,0xbe]
+
+s_sext_i32_i16 vcc_lo, s1
+// CHECK: [0x01,0x17,0xea,0xbe]
+
+s_sext_i32_i16 vcc_hi, s1
+// CHECK: [0x01,0x17,0xeb,0xbe]
+
+s_sext_i32_i16 tba_lo, s1
+// CHECK: [0x01,0x17,0xec,0xbe]
+
+s_sext_i32_i16 tba_hi, s1
+// CHECK: [0x01,0x17,0xed,0xbe]
+
+s_sext_i32_i16 tma_lo, s1
+// CHECK: [0x01,0x17,0xee,0xbe]
+
+s_sext_i32_i16 tma_hi, s1
+// CHECK: [0x01,0x17,0xef,0xbe]
+
+s_sext_i32_i16 ttmp11, s1
+// CHECK: [0x01,0x17,0xfb,0xbe]
+
+s_sext_i32_i16 m0, s1
+// CHECK: [0x01,0x17,0xfc,0xbe]
+
+s_sext_i32_i16 exec_lo, s1
+// CHECK: [0x01,0x17,0xfe,0xbe]
+
+s_sext_i32_i16 exec_hi, s1
+// CHECK: [0x01,0x17,0xff,0xbe]
+
+s_sext_i32_i16 s5, s101
+// CHECK: [0x65,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, flat_scratch_lo
+// CHECK: [0x66,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, flat_scratch_hi
+// CHECK: [0x67,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, vcc_lo
+// CHECK: [0x6a,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, vcc_hi
+// CHECK: [0x6b,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, tba_lo
+// CHECK: [0x6c,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, tba_hi
+// CHECK: [0x6d,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, tma_lo
+// CHECK: [0x6e,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, tma_hi
+// CHECK: [0x6f,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, ttmp11
+// CHECK: [0x7b,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, m0
+// CHECK: [0x7c,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, exec_lo
+// CHECK: [0x7e,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, exec_hi
+// CHECK: [0x7f,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, 0
+// CHECK: [0x80,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, -1
+// CHECK: [0xc1,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, 0.5
+// CHECK: [0xf0,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, -4.0
+// CHECK: [0xf7,0x17,0x85,0xbe]
+
+s_sext_i32_i16 s5, 0xaf123456
+// CHECK: [0xff,0x17,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_sext_i32_i16 s5, 0x3f717273
+// CHECK: [0xff,0x17,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset0_b32 s5, s1
+// CHECK: [0x01,0x18,0x85,0xbe]
+
+s_bitset0_b32 s101, s1
+// CHECK: [0x01,0x18,0xe5,0xbe]
+
+s_bitset0_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x18,0xe6,0xbe]
+
+s_bitset0_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x18,0xe7,0xbe]
+
+s_bitset0_b32 vcc_lo, s1
+// CHECK: [0x01,0x18,0xea,0xbe]
+
+s_bitset0_b32 vcc_hi, s1
+// CHECK: [0x01,0x18,0xeb,0xbe]
+
+s_bitset0_b32 tba_lo, s1
+// CHECK: [0x01,0x18,0xec,0xbe]
+
+s_bitset0_b32 tba_hi, s1
+// CHECK: [0x01,0x18,0xed,0xbe]
+
+s_bitset0_b32 tma_lo, s1
+// CHECK: [0x01,0x18,0xee,0xbe]
+
+s_bitset0_b32 tma_hi, s1
+// CHECK: [0x01,0x18,0xef,0xbe]
+
+s_bitset0_b32 ttmp11, s1
+// CHECK: [0x01,0x18,0xfb,0xbe]
+
+s_bitset0_b32 m0, s1
+// CHECK: [0x01,0x18,0xfc,0xbe]
+
+s_bitset0_b32 exec_lo, s1
+// CHECK: [0x01,0x18,0xfe,0xbe]
+
+s_bitset0_b32 exec_hi, s1
+// CHECK: [0x01,0x18,0xff,0xbe]
+
+s_bitset0_b32 s5, s101
+// CHECK: [0x65,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, vcc_lo
+// CHECK: [0x6a,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, vcc_hi
+// CHECK: [0x6b,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, tba_lo
+// CHECK: [0x6c,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, tba_hi
+// CHECK: [0x6d,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, tma_lo
+// CHECK: [0x6e,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, tma_hi
+// CHECK: [0x6f,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, ttmp11
+// CHECK: [0x7b,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, m0
+// CHECK: [0x7c,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, exec_lo
+// CHECK: [0x7e,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, exec_hi
+// CHECK: [0x7f,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, 0
+// CHECK: [0x80,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, -1
+// CHECK: [0xc1,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, 0.5
+// CHECK: [0xf0,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, -4.0
+// CHECK: [0xf7,0x18,0x85,0xbe]
+
+s_bitset0_b32 s5, 0xaf123456
+// CHECK: [0xff,0x18,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset0_b32 s5, 0x3f717273
+// CHECK: [0xff,0x18,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset0_b64 s[10:11], s1
+// CHECK: [0x01,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[12:13], s1
+// CHECK: [0x01,0x19,0x8c,0xbe]
+
+s_bitset0_b64 s[100:101], s1
+// CHECK: [0x01,0x19,0xe4,0xbe]
+
+s_bitset0_b64 flat_scratch, s1
+// CHECK: [0x01,0x19,0xe6,0xbe]
+
+s_bitset0_b64 vcc, s1
+// CHECK: [0x01,0x19,0xea,0xbe]
+
+s_bitset0_b64 tba, s1
+// CHECK: [0x01,0x19,0xec,0xbe]
+
+s_bitset0_b64 tma, s1
+// CHECK: [0x01,0x19,0xee,0xbe]
+
+s_bitset0_b64 ttmp[10:11], s1
+// CHECK: [0x01,0x19,0xfa,0xbe]
+
+s_bitset0_b64 exec, s1
+// CHECK: [0x01,0x19,0xfe,0xbe]
+
+s_bitset0_b64 s[10:11], s101
+// CHECK: [0x65,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], flat_scratch_lo
+// CHECK: [0x66,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], flat_scratch_hi
+// CHECK: [0x67,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], vcc_lo
+// CHECK: [0x6a,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], vcc_hi
+// CHECK: [0x6b,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tba_lo
+// CHECK: [0x6c,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tba_hi
+// CHECK: [0x6d,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tma_lo
+// CHECK: [0x6e,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], tma_hi
+// CHECK: [0x6f,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], ttmp11
+// CHECK: [0x7b,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], m0
+// CHECK: [0x7c,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], exec_lo
+// CHECK: [0x7e,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], exec_hi
+// CHECK: [0x7f,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], 0
+// CHECK: [0x80,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], -1
+// CHECK: [0xc1,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x19,0x8a,0xbe]
+
+s_bitset0_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x19,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset0_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x19,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset1_b32 s5, s1
+// CHECK: [0x01,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s101, s1
+// CHECK: [0x01,0x1a,0xe5,0xbe]
+
+s_bitset1_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x1a,0xe6,0xbe]
+
+s_bitset1_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x1a,0xe7,0xbe]
+
+s_bitset1_b32 vcc_lo, s1
+// CHECK: [0x01,0x1a,0xea,0xbe]
+
+s_bitset1_b32 vcc_hi, s1
+// CHECK: [0x01,0x1a,0xeb,0xbe]
+
+s_bitset1_b32 tba_lo, s1
+// CHECK: [0x01,0x1a,0xec,0xbe]
+
+s_bitset1_b32 tba_hi, s1
+// CHECK: [0x01,0x1a,0xed,0xbe]
+
+s_bitset1_b32 tma_lo, s1
+// CHECK: [0x01,0x1a,0xee,0xbe]
+
+s_bitset1_b32 tma_hi, s1
+// CHECK: [0x01,0x1a,0xef,0xbe]
+
+s_bitset1_b32 ttmp11, s1
+// CHECK: [0x01,0x1a,0xfb,0xbe]
+
+s_bitset1_b32 m0, s1
+// CHECK: [0x01,0x1a,0xfc,0xbe]
+
+s_bitset1_b32 exec_lo, s1
+// CHECK: [0x01,0x1a,0xfe,0xbe]
+
+s_bitset1_b32 exec_hi, s1
+// CHECK: [0x01,0x1a,0xff,0xbe]
+
+s_bitset1_b32 s5, s101
+// CHECK: [0x65,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, vcc_lo
+// CHECK: [0x6a,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, vcc_hi
+// CHECK: [0x6b,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, tba_lo
+// CHECK: [0x6c,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, tba_hi
+// CHECK: [0x6d,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, tma_lo
+// CHECK: [0x6e,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, tma_hi
+// CHECK: [0x6f,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, ttmp11
+// CHECK: [0x7b,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, m0
+// CHECK: [0x7c,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, exec_lo
+// CHECK: [0x7e,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, exec_hi
+// CHECK: [0x7f,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, 0
+// CHECK: [0x80,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, -1
+// CHECK: [0xc1,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, 0.5
+// CHECK: [0xf0,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, -4.0
+// CHECK: [0xf7,0x1a,0x85,0xbe]
+
+s_bitset1_b32 s5, 0xaf123456
+// CHECK: [0xff,0x1a,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset1_b32 s5, 0x3f717273
+// CHECK: [0xff,0x1a,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_bitset1_b64 s[10:11], s1
+// CHECK: [0x01,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[12:13], s1
+// CHECK: [0x01,0x1b,0x8c,0xbe]
+
+s_bitset1_b64 s[100:101], s1
+// CHECK: [0x01,0x1b,0xe4,0xbe]
+
+s_bitset1_b64 flat_scratch, s1
+// CHECK: [0x01,0x1b,0xe6,0xbe]
+
+s_bitset1_b64 vcc, s1
+// CHECK: [0x01,0x1b,0xea,0xbe]
+
+s_bitset1_b64 tba, s1
+// CHECK: [0x01,0x1b,0xec,0xbe]
+
+s_bitset1_b64 tma, s1
+// CHECK: [0x01,0x1b,0xee,0xbe]
+
+s_bitset1_b64 ttmp[10:11], s1
+// CHECK: [0x01,0x1b,0xfa,0xbe]
+
+s_bitset1_b64 exec, s1
+// CHECK: [0x01,0x1b,0xfe,0xbe]
+
+s_bitset1_b64 s[10:11], s101
+// CHECK: [0x65,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], flat_scratch_lo
+// CHECK: [0x66,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], flat_scratch_hi
+// CHECK: [0x67,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], vcc_lo
+// CHECK: [0x6a,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], vcc_hi
+// CHECK: [0x6b,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tba_lo
+// CHECK: [0x6c,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tba_hi
+// CHECK: [0x6d,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tma_lo
+// CHECK: [0x6e,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], tma_hi
+// CHECK: [0x6f,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], ttmp11
+// CHECK: [0x7b,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], m0
+// CHECK: [0x7c,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], exec_lo
+// CHECK: [0x7e,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], exec_hi
+// CHECK: [0x7f,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], 0
+// CHECK: [0x80,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], -1
+// CHECK: [0xc1,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x1b,0x8a,0xbe]
+
+s_bitset1_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x1b,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_bitset1_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x1b,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_getpc_b64 s[10:11]
+// CHECK: [0x00,0x1c,0x8a,0xbe]
+
+s_getpc_b64 s[12:13]
+// CHECK: [0x00,0x1c,0x8c,0xbe]
+
+s_getpc_b64 s[100:101]
+// CHECK: [0x00,0x1c,0xe4,0xbe]
+
+s_getpc_b64 flat_scratch
+// CHECK: [0x00,0x1c,0xe6,0xbe]
+
+s_getpc_b64 vcc
+// CHECK: [0x00,0x1c,0xea,0xbe]
+
+s_getpc_b64 tba
+// CHECK: [0x00,0x1c,0xec,0xbe]
+
+s_getpc_b64 tma
+// CHECK: [0x00,0x1c,0xee,0xbe]
+
+s_getpc_b64 ttmp[10:11]
+// CHECK: [0x00,0x1c,0xfa,0xbe]
+
+s_getpc_b64 exec
+// CHECK: [0x00,0x1c,0xfe,0xbe]
+
+s_setpc_b64 s[2:3]
+// CHECK: [0x02,0x1d,0x80,0xbe]
+
+s_setpc_b64 s[4:5]
+// CHECK: [0x04,0x1d,0x80,0xbe]
+
+s_setpc_b64 s[100:101]
+// CHECK: [0x64,0x1d,0x80,0xbe]
+
+s_setpc_b64 flat_scratch
+// CHECK: [0x66,0x1d,0x80,0xbe]
+
+s_setpc_b64 vcc
+// CHECK: [0x6a,0x1d,0x80,0xbe]
+
+s_setpc_b64 tba
+// CHECK: [0x6c,0x1d,0x80,0xbe]
+
+s_setpc_b64 tma
+// CHECK: [0x6e,0x1d,0x80,0xbe]
+
+s_setpc_b64 ttmp[10:11]
+// CHECK: [0x7a,0x1d,0x80,0xbe]
+
+s_swappc_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x1e,0x8c,0xbe]
+
+s_swappc_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x1e,0xe4,0xbe]
+
+s_swappc_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x1e,0xe6,0xbe]
+
+s_swappc_b64 vcc, s[2:3]
+// CHECK: [0x02,0x1e,0xea,0xbe]
+
+s_swappc_b64 tba, s[2:3]
+// CHECK: [0x02,0x1e,0xec,0xbe]
+
+s_swappc_b64 tma, s[2:3]
+// CHECK: [0x02,0x1e,0xee,0xbe]
+
+s_swappc_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x1e,0xfa,0xbe]
+
+s_swappc_b64 exec, s[2:3]
+// CHECK: [0x02,0x1e,0xfe,0xbe]
+
+s_swappc_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], vcc
+// CHECK: [0x6a,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], tba
+// CHECK: [0x6c,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], tma
+// CHECK: [0x6e,0x1e,0x8a,0xbe]
+
+s_swappc_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x1e,0x8a,0xbe]
+
+s_rfe_b64 s[2:3]
+// CHECK: [0x02,0x1f,0x80,0xbe]
+
+s_rfe_b64 s[4:5]
+// CHECK: [0x04,0x1f,0x80,0xbe]
+
+s_rfe_b64 s[100:101]
+// CHECK: [0x64,0x1f,0x80,0xbe]
+
+s_rfe_b64 flat_scratch
+// CHECK: [0x66,0x1f,0x80,0xbe]
+
+s_rfe_b64 vcc
+// CHECK: [0x6a,0x1f,0x80,0xbe]
+
+s_rfe_b64 tba
+// CHECK: [0x6c,0x1f,0x80,0xbe]
+
+s_rfe_b64 tma
+// CHECK: [0x6e,0x1f,0x80,0xbe]
+
+s_rfe_b64 ttmp[10:11]
+// CHECK: [0x7a,0x1f,0x80,0xbe]
+
+s_and_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x20,0x8c,0xbe]
+
+s_and_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x20,0xe4,0xbe]
+
+s_and_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x20,0xe6,0xbe]
+
+s_and_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x20,0xea,0xbe]
+
+s_and_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x20,0xec,0xbe]
+
+s_and_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x20,0xee,0xbe]
+
+s_and_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x20,0xfa,0xbe]
+
+s_and_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x20,0x8a,0xbe]
+
+s_and_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x20,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_and_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x20,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_or_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x21,0x8c,0xbe]
+
+s_or_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x21,0xe4,0xbe]
+
+s_or_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x21,0xe6,0xbe]
+
+s_or_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x21,0xea,0xbe]
+
+s_or_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x21,0xec,0xbe]
+
+s_or_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x21,0xee,0xbe]
+
+s_or_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x21,0xfa,0xbe]
+
+s_or_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x21,0x8a,0xbe]
+
+s_or_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x21,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_or_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x21,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_xor_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x22,0x8c,0xbe]
+
+s_xor_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x22,0xe4,0xbe]
+
+s_xor_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x22,0xe6,0xbe]
+
+s_xor_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x22,0xea,0xbe]
+
+s_xor_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x22,0xec,0xbe]
+
+s_xor_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x22,0xee,0xbe]
+
+s_xor_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x22,0xfa,0xbe]
+
+s_xor_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x22,0x8a,0xbe]
+
+s_xor_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x22,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_xor_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x22,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_andn2_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x23,0x8c,0xbe]
+
+s_andn2_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x23,0xe4,0xbe]
+
+s_andn2_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x23,0xe6,0xbe]
+
+s_andn2_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x23,0xea,0xbe]
+
+s_andn2_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x23,0xec,0xbe]
+
+s_andn2_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x23,0xee,0xbe]
+
+s_andn2_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x23,0xfa,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x23,0x8a,0xbe]
+
+s_andn2_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x23,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_andn2_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x23,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_orn2_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x24,0x8c,0xbe]
+
+s_orn2_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x24,0xe4,0xbe]
+
+s_orn2_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x24,0xe6,0xbe]
+
+s_orn2_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x24,0xea,0xbe]
+
+s_orn2_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x24,0xec,0xbe]
+
+s_orn2_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x24,0xee,0xbe]
+
+s_orn2_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x24,0xfa,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x24,0x8a,0xbe]
+
+s_orn2_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x24,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_orn2_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x24,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_nand_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x25,0x8c,0xbe]
+
+s_nand_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x25,0xe4,0xbe]
+
+s_nand_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x25,0xe6,0xbe]
+
+s_nand_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x25,0xea,0xbe]
+
+s_nand_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x25,0xec,0xbe]
+
+s_nand_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x25,0xee,0xbe]
+
+s_nand_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x25,0xfa,0xbe]
+
+s_nand_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x25,0x8a,0xbe]
+
+s_nand_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x25,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_nand_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x25,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_nor_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x26,0x8c,0xbe]
+
+s_nor_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x26,0xe4,0xbe]
+
+s_nor_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x26,0xe6,0xbe]
+
+s_nor_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x26,0xea,0xbe]
+
+s_nor_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x26,0xec,0xbe]
+
+s_nor_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x26,0xee,0xbe]
+
+s_nor_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x26,0xfa,0xbe]
+
+s_nor_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x26,0x8a,0xbe]
+
+s_nor_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x26,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_nor_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x26,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_xnor_saveexec_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x27,0x8c,0xbe]
+
+s_xnor_saveexec_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x27,0xe4,0xbe]
+
+s_xnor_saveexec_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x27,0xe6,0xbe]
+
+s_xnor_saveexec_b64 vcc, s[2:3]
+// CHECK: [0x02,0x27,0xea,0xbe]
+
+s_xnor_saveexec_b64 tba, s[2:3]
+// CHECK: [0x02,0x27,0xec,0xbe]
+
+s_xnor_saveexec_b64 tma, s[2:3]
+// CHECK: [0x02,0x27,0xee,0xbe]
+
+s_xnor_saveexec_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x27,0xfa,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], vcc
+// CHECK: [0x6a,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], tba
+// CHECK: [0x6c,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], tma
+// CHECK: [0x6e,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], exec
+// CHECK: [0x7e,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], 0
+// CHECK: [0x80,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], -1
+// CHECK: [0xc1,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x27,0x8a,0xbe]
+
+s_xnor_saveexec_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x27,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_xnor_saveexec_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x27,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_quadmask_b32 s5, s1
+// CHECK: [0x01,0x28,0x85,0xbe]
+
+s_quadmask_b32 s101, s1
+// CHECK: [0x01,0x28,0xe5,0xbe]
+
+s_quadmask_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x28,0xe6,0xbe]
+
+s_quadmask_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x28,0xe7,0xbe]
+
+s_quadmask_b32 vcc_lo, s1
+// CHECK: [0x01,0x28,0xea,0xbe]
+
+s_quadmask_b32 vcc_hi, s1
+// CHECK: [0x01,0x28,0xeb,0xbe]
+
+s_quadmask_b32 tba_lo, s1
+// CHECK: [0x01,0x28,0xec,0xbe]
+
+s_quadmask_b32 tba_hi, s1
+// CHECK: [0x01,0x28,0xed,0xbe]
+
+s_quadmask_b32 tma_lo, s1
+// CHECK: [0x01,0x28,0xee,0xbe]
+
+s_quadmask_b32 tma_hi, s1
+// CHECK: [0x01,0x28,0xef,0xbe]
+
+s_quadmask_b32 ttmp11, s1
+// CHECK: [0x01,0x28,0xfb,0xbe]
+
+s_quadmask_b32 m0, s1
+// CHECK: [0x01,0x28,0xfc,0xbe]
+
+s_quadmask_b32 exec_lo, s1
+// CHECK: [0x01,0x28,0xfe,0xbe]
+
+s_quadmask_b32 exec_hi, s1
+// CHECK: [0x01,0x28,0xff,0xbe]
+
+s_quadmask_b32 s5, s101
+// CHECK: [0x65,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, vcc_lo
+// CHECK: [0x6a,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, vcc_hi
+// CHECK: [0x6b,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, tba_lo
+// CHECK: [0x6c,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, tba_hi
+// CHECK: [0x6d,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, tma_lo
+// CHECK: [0x6e,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, tma_hi
+// CHECK: [0x6f,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, ttmp11
+// CHECK: [0x7b,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, m0
+// CHECK: [0x7c,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, exec_lo
+// CHECK: [0x7e,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, exec_hi
+// CHECK: [0x7f,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, 0
+// CHECK: [0x80,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, -1
+// CHECK: [0xc1,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, 0.5
+// CHECK: [0xf0,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, -4.0
+// CHECK: [0xf7,0x28,0x85,0xbe]
+
+s_quadmask_b32 s5, 0xaf123456
+// CHECK: [0xff,0x28,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_quadmask_b32 s5, 0x3f717273
+// CHECK: [0xff,0x28,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_quadmask_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x29,0x8c,0xbe]
+
+s_quadmask_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x29,0xe4,0xbe]
+
+s_quadmask_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x29,0xe6,0xbe]
+
+s_quadmask_b64 vcc, s[2:3]
+// CHECK: [0x02,0x29,0xea,0xbe]
+
+s_quadmask_b64 tba, s[2:3]
+// CHECK: [0x02,0x29,0xec,0xbe]
+
+s_quadmask_b64 tma, s[2:3]
+// CHECK: [0x02,0x29,0xee,0xbe]
+
+s_quadmask_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x29,0xfa,0xbe]
+
+s_quadmask_b64 exec, s[2:3]
+// CHECK: [0x02,0x29,0xfe,0xbe]
+
+s_quadmask_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], vcc
+// CHECK: [0x6a,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], tba
+// CHECK: [0x6c,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], tma
+// CHECK: [0x6e,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], exec
+// CHECK: [0x7e,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], 0
+// CHECK: [0x80,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], -1
+// CHECK: [0xc1,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x29,0x8a,0xbe]
+
+s_quadmask_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x29,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_quadmask_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x29,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_movrels_b32 s5, s1
+// CHECK: [0x01,0x2a,0x85,0xbe]
+
+s_movrels_b32 s101, s1
+// CHECK: [0x01,0x2a,0xe5,0xbe]
+
+s_movrels_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x2a,0xe6,0xbe]
+
+s_movrels_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x2a,0xe7,0xbe]
+
+s_movrels_b32 vcc_lo, s1
+// CHECK: [0x01,0x2a,0xea,0xbe]
+
+s_movrels_b32 vcc_hi, s1
+// CHECK: [0x01,0x2a,0xeb,0xbe]
+
+s_movrels_b32 tba_lo, s1
+// CHECK: [0x01,0x2a,0xec,0xbe]
+
+s_movrels_b32 tba_hi, s1
+// CHECK: [0x01,0x2a,0xed,0xbe]
+
+s_movrels_b32 tma_lo, s1
+// CHECK: [0x01,0x2a,0xee,0xbe]
+
+s_movrels_b32 tma_hi, s1
+// CHECK: [0x01,0x2a,0xef,0xbe]
+
+s_movrels_b32 ttmp11, s1
+// CHECK: [0x01,0x2a,0xfb,0xbe]
+
+s_movrels_b32 m0, s1
+// CHECK: [0x01,0x2a,0xfc,0xbe]
+
+s_movrels_b32 exec_lo, s1
+// CHECK: [0x01,0x2a,0xfe,0xbe]
+
+s_movrels_b32 exec_hi, s1
+// CHECK: [0x01,0x2a,0xff,0xbe]
+
+s_movrels_b32 s5, s101
+// CHECK: [0x65,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, vcc_lo
+// CHECK: [0x6a,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, vcc_hi
+// CHECK: [0x6b,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, tba_lo
+// CHECK: [0x6c,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, tba_hi
+// CHECK: [0x6d,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, tma_lo
+// CHECK: [0x6e,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, tma_hi
+// CHECK: [0x6f,0x2a,0x85,0xbe]
+
+s_movrels_b32 s5, ttmp11
+// CHECK: [0x7b,0x2a,0x85,0xbe]
+
+s_movrels_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x2b,0x8c,0xbe]
+
+s_movrels_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x2b,0xe4,0xbe]
+
+s_movrels_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x2b,0xe6,0xbe]
+
+s_movrels_b64 vcc, s[2:3]
+// CHECK: [0x02,0x2b,0xea,0xbe]
+
+s_movrels_b64 tba, s[2:3]
+// CHECK: [0x02,0x2b,0xec,0xbe]
+
+s_movrels_b64 tma, s[2:3]
+// CHECK: [0x02,0x2b,0xee,0xbe]
+
+s_movrels_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x2b,0xfa,0xbe]
+
+s_movrels_b64 exec, s[2:3]
+// CHECK: [0x02,0x2b,0xfe,0xbe]
+
+s_movrels_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], vcc
+// CHECK: [0x6a,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], tba
+// CHECK: [0x6c,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], tma
+// CHECK: [0x6e,0x2b,0x8a,0xbe]
+
+s_movrels_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x2b,0x8a,0xbe]
+
+s_movreld_b32 s5, s1
+// CHECK: [0x01,0x2c,0x85,0xbe]
+
+s_movreld_b32 s101, s1
+// CHECK: [0x01,0x2c,0xe5,0xbe]
+
+s_movreld_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x2c,0xe6,0xbe]
+
+s_movreld_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x2c,0xe7,0xbe]
+
+s_movreld_b32 vcc_lo, s1
+// CHECK: [0x01,0x2c,0xea,0xbe]
+
+s_movreld_b32 vcc_hi, s1
+// CHECK: [0x01,0x2c,0xeb,0xbe]
+
+s_movreld_b32 tba_lo, s1
+// CHECK: [0x01,0x2c,0xec,0xbe]
+
+s_movreld_b32 tba_hi, s1
+// CHECK: [0x01,0x2c,0xed,0xbe]
+
+s_movreld_b32 tma_lo, s1
+// CHECK: [0x01,0x2c,0xee,0xbe]
+
+s_movreld_b32 tma_hi, s1
+// CHECK: [0x01,0x2c,0xef,0xbe]
+
+s_movreld_b32 ttmp11, s1
+// CHECK: [0x01,0x2c,0xfb,0xbe]
+
+s_movreld_b32 s5, s101
+// CHECK: [0x65,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, vcc_lo
+// CHECK: [0x6a,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, vcc_hi
+// CHECK: [0x6b,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, tba_lo
+// CHECK: [0x6c,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, tba_hi
+// CHECK: [0x6d,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, tma_lo
+// CHECK: [0x6e,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, tma_hi
+// CHECK: [0x6f,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, ttmp11
+// CHECK: [0x7b,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, m0
+// CHECK: [0x7c,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, 0
+// CHECK: [0x80,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, -1
+// CHECK: [0xc1,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, 0.5
+// CHECK: [0xf0,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, -4.0
+// CHECK: [0xf7,0x2c,0x85,0xbe]
+
+s_movreld_b32 s5, 0xaf123456
+// CHECK: [0xff,0x2c,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_movreld_b32 s5, 0x3f717273
+// CHECK: [0xff,0x2c,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_movreld_b64 s[10:11], s[2:3]
+// CHECK: [0x02,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[12:13], s[2:3]
+// CHECK: [0x02,0x2d,0x8c,0xbe]
+
+s_movreld_b64 s[100:101], s[2:3]
+// CHECK: [0x02,0x2d,0xe4,0xbe]
+
+s_movreld_b64 flat_scratch, s[2:3]
+// CHECK: [0x02,0x2d,0xe6,0xbe]
+
+s_movreld_b64 vcc, s[2:3]
+// CHECK: [0x02,0x2d,0xea,0xbe]
+
+s_movreld_b64 tba, s[2:3]
+// CHECK: [0x02,0x2d,0xec,0xbe]
+
+s_movreld_b64 tma, s[2:3]
+// CHECK: [0x02,0x2d,0xee,0xbe]
+
+s_movreld_b64 ttmp[10:11], s[2:3]
+// CHECK: [0x02,0x2d,0xfa,0xbe]
+
+s_movreld_b64 s[10:11], s[4:5]
+// CHECK: [0x04,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], s[100:101]
+// CHECK: [0x64,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], flat_scratch
+// CHECK: [0x66,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], vcc
+// CHECK: [0x6a,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], tba
+// CHECK: [0x6c,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], tma
+// CHECK: [0x6e,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], ttmp[10:11]
+// CHECK: [0x7a,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], 0
+// CHECK: [0x80,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], -1
+// CHECK: [0xc1,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], 0.5
+// CHECK: [0xf0,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], -4.0
+// CHECK: [0xf7,0x2d,0x8a,0xbe]
+
+s_movreld_b64 s[10:11], 0xaf123456
+// CHECK: [0xff,0x2d,0x8a,0xbe,0x56,0x34,0x12,0xaf]
+
+s_movreld_b64 s[10:11], 0x3f717273
+// CHECK: [0xff,0x2d,0x8a,0xbe,0x73,0x72,0x71,0x3f]
+
+s_abs_i32 s5, s1
+// CHECK: [0x01,0x30,0x85,0xbe]
+
+s_abs_i32 s101, s1
+// CHECK: [0x01,0x30,0xe5,0xbe]
+
+s_abs_i32 flat_scratch_lo, s1
+// CHECK: [0x01,0x30,0xe6,0xbe]
+
+s_abs_i32 flat_scratch_hi, s1
+// CHECK: [0x01,0x30,0xe7,0xbe]
+
+s_abs_i32 vcc_lo, s1
+// CHECK: [0x01,0x30,0xea,0xbe]
+
+s_abs_i32 vcc_hi, s1
+// CHECK: [0x01,0x30,0xeb,0xbe]
+
+s_abs_i32 tba_lo, s1
+// CHECK: [0x01,0x30,0xec,0xbe]
+
+s_abs_i32 tba_hi, s1
+// CHECK: [0x01,0x30,0xed,0xbe]
+
+s_abs_i32 tma_lo, s1
+// CHECK: [0x01,0x30,0xee,0xbe]
+
+s_abs_i32 tma_hi, s1
+// CHECK: [0x01,0x30,0xef,0xbe]
+
+s_abs_i32 ttmp11, s1
+// CHECK: [0x01,0x30,0xfb,0xbe]
+
+s_abs_i32 m0, s1
+// CHECK: [0x01,0x30,0xfc,0xbe]
+
+s_abs_i32 exec_lo, s1
+// CHECK: [0x01,0x30,0xfe,0xbe]
+
+s_abs_i32 exec_hi, s1
+// CHECK: [0x01,0x30,0xff,0xbe]
+
+s_abs_i32 s5, s101
+// CHECK: [0x65,0x30,0x85,0xbe]
+
+s_abs_i32 s5, flat_scratch_lo
+// CHECK: [0x66,0x30,0x85,0xbe]
+
+s_abs_i32 s5, flat_scratch_hi
+// CHECK: [0x67,0x30,0x85,0xbe]
+
+s_abs_i32 s5, vcc_lo
+// CHECK: [0x6a,0x30,0x85,0xbe]
+
+s_abs_i32 s5, vcc_hi
+// CHECK: [0x6b,0x30,0x85,0xbe]
+
+s_abs_i32 s5, tba_lo
+// CHECK: [0x6c,0x30,0x85,0xbe]
+
+s_abs_i32 s5, tba_hi
+// CHECK: [0x6d,0x30,0x85,0xbe]
+
+s_abs_i32 s5, tma_lo
+// CHECK: [0x6e,0x30,0x85,0xbe]
+
+s_abs_i32 s5, tma_hi
+// CHECK: [0x6f,0x30,0x85,0xbe]
+
+s_abs_i32 s5, ttmp11
+// CHECK: [0x7b,0x30,0x85,0xbe]
+
+s_abs_i32 s5, m0
+// CHECK: [0x7c,0x30,0x85,0xbe]
+
+s_abs_i32 s5, exec_lo
+// CHECK: [0x7e,0x30,0x85,0xbe]
+
+s_abs_i32 s5, exec_hi
+// CHECK: [0x7f,0x30,0x85,0xbe]
+
+s_abs_i32 s5, 0
+// CHECK: [0x80,0x30,0x85,0xbe]
+
+s_abs_i32 s5, -1
+// CHECK: [0xc1,0x30,0x85,0xbe]
+
+s_abs_i32 s5, 0.5
+// CHECK: [0xf0,0x30,0x85,0xbe]
+
+s_abs_i32 s5, -4.0
+// CHECK: [0xf7,0x30,0x85,0xbe]
+
+s_abs_i32 s5, 0xaf123456
+// CHECK: [0xff,0x30,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_abs_i32 s5, 0x3f717273
+// CHECK: [0xff,0x30,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_mov_fed_b32 s5, s1
+// CHECK: [0x01,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s101, s1
+// CHECK: [0x01,0x31,0xe5,0xbe]
+
+s_mov_fed_b32 flat_scratch_lo, s1
+// CHECK: [0x01,0x31,0xe6,0xbe]
+
+s_mov_fed_b32 flat_scratch_hi, s1
+// CHECK: [0x01,0x31,0xe7,0xbe]
+
+s_mov_fed_b32 vcc_lo, s1
+// CHECK: [0x01,0x31,0xea,0xbe]
+
+s_mov_fed_b32 vcc_hi, s1
+// CHECK: [0x01,0x31,0xeb,0xbe]
+
+s_mov_fed_b32 tba_lo, s1
+// CHECK: [0x01,0x31,0xec,0xbe]
+
+s_mov_fed_b32 tba_hi, s1
+// CHECK: [0x01,0x31,0xed,0xbe]
+
+s_mov_fed_b32 tma_lo, s1
+// CHECK: [0x01,0x31,0xee,0xbe]
+
+s_mov_fed_b32 tma_hi, s1
+// CHECK: [0x01,0x31,0xef,0xbe]
+
+s_mov_fed_b32 ttmp11, s1
+// CHECK: [0x01,0x31,0xfb,0xbe]
+
+s_mov_fed_b32 m0, s1
+// CHECK: [0x01,0x31,0xfc,0xbe]
+
+s_mov_fed_b32 exec_lo, s1
+// CHECK: [0x01,0x31,0xfe,0xbe]
+
+s_mov_fed_b32 exec_hi, s1
+// CHECK: [0x01,0x31,0xff,0xbe]
+
+s_mov_fed_b32 s5, s101
+// CHECK: [0x65,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, flat_scratch_lo
+// CHECK: [0x66,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, flat_scratch_hi
+// CHECK: [0x67,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, vcc_lo
+// CHECK: [0x6a,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, vcc_hi
+// CHECK: [0x6b,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, tba_lo
+// CHECK: [0x6c,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, tba_hi
+// CHECK: [0x6d,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, tma_lo
+// CHECK: [0x6e,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, tma_hi
+// CHECK: [0x6f,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, ttmp11
+// CHECK: [0x7b,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, m0
+// CHECK: [0x7c,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, exec_lo
+// CHECK: [0x7e,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, exec_hi
+// CHECK: [0x7f,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, 0
+// CHECK: [0x80,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, -1
+// CHECK: [0xc1,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, 0.5
+// CHECK: [0xf0,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, -4.0
+// CHECK: [0xf7,0x31,0x85,0xbe]
+
+s_mov_fed_b32 s5, 0xaf123456
+// CHECK: [0xff,0x31,0x85,0xbe,0x56,0x34,0x12,0xaf]
+
+s_mov_fed_b32 s5, 0x3f717273
+// CHECK: [0xff,0x31,0x85,0xbe,0x73,0x72,0x71,0x3f]
+
+s_set_gpr_idx_idx s1
+// CHECK: [0x01,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx s101
+// CHECK: [0x65,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx flat_scratch_lo
+// CHECK: [0x66,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx flat_scratch_hi
+// CHECK: [0x67,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx vcc_lo
+// CHECK: [0x6a,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx vcc_hi
+// CHECK: [0x6b,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx tba_lo
+// CHECK: [0x6c,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx tba_hi
+// CHECK: [0x6d,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx tma_lo
+// CHECK: [0x6e,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx tma_hi
+// CHECK: [0x6f,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx ttmp11
+// CHECK: [0x7b,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx m0
+// CHECK: [0x7c,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx 0
+// CHECK: [0x80,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx -1
+// CHECK: [0xc1,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx 0.5
+// CHECK: [0xf0,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx -4.0
+// CHECK: [0xf7,0x32,0x80,0xbe]
+
+s_set_gpr_idx_idx 0xaf123456
+// CHECK: [0xff,0x32,0x80,0xbe,0x56,0x34,0x12,0xaf]
+
+s_set_gpr_idx_idx 0x3f717273
+// CHECK: [0xff,0x32,0x80,0xbe,0x73,0x72,0x71,0x3f]
+
+s_add_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x80]
+
+s_add_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x80]
+
+s_add_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x80]
+
+s_add_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x80]
+
+s_add_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x80]
+
+s_add_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x80]
+
+s_add_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x80]
+
+s_add_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x80]
+
+s_add_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x80]
+
+s_add_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x80]
+
+s_add_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x80]
+
+s_add_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x80]
+
+s_add_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x80]
+
+s_add_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x80]
+
+s_add_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x80]
+
+s_add_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x80]
+
+s_add_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x80]
+
+s_add_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x80]
+
+s_add_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x80]
+
+s_add_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x80]
+
+s_add_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x80]
+
+s_add_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x80]
+
+s_add_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x80]
+
+s_add_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x80]
+
+s_add_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x80]
+
+s_add_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x80]
+
+s_add_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x80]
+
+s_add_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x80]
+
+s_add_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x80]
+
+s_add_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x80]
+
+s_add_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x80]
+
+s_add_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x80,0x56,0x34,0x12,0xaf]
+
+s_add_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x80,0x73,0x72,0x71,0x3f]
+
+s_add_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x80]
+
+s_add_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x80]
+
+s_add_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x80]
+
+s_add_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x80]
+
+s_add_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x80]
+
+s_add_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x80]
+
+s_add_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x80]
+
+s_add_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x80]
+
+s_add_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x80]
+
+s_add_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x80]
+
+s_add_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x80]
+
+s_add_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x80]
+
+s_add_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x80]
+
+s_add_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x80]
+
+s_add_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x80]
+
+s_add_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x80]
+
+s_add_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x80]
+
+s_add_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x80,0x56,0x34,0x12,0xaf]
+
+s_add_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x80,0x73,0x72,0x71,0x3f]
+
+s_sub_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x80]
+
+s_sub_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0xe5,0x80]
+
+s_sub_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x80]
+
+s_sub_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x80]
+
+s_sub_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x80]
+
+s_sub_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x80]
+
+s_sub_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x80]
+
+s_sub_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x80]
+
+s_sub_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x80]
+
+s_sub_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x80]
+
+s_sub_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x80]
+
+s_sub_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x80]
+
+s_sub_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x80]
+
+s_sub_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x80]
+
+s_sub_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x85,0x80]
+
+s_sub_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x85,0x80]
+
+s_sub_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x85,0x80]
+
+s_sub_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x80]
+
+s_sub_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x80]
+
+s_sub_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x80]
+
+s_sub_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x80]
+
+s_sub_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x80]
+
+s_sub_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x80]
+
+s_sub_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x80]
+
+s_sub_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x80]
+
+s_sub_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x80]
+
+s_sub_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x80]
+
+s_sub_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x80]
+
+s_sub_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x80]
+
+s_sub_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x80]
+
+s_sub_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x80]
+
+s_sub_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x80,0x56,0x34,0x12,0xaf]
+
+s_sub_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x80,0x73,0x72,0x71,0x3f]
+
+s_sub_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x85,0x80]
+
+s_sub_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x85,0x80]
+
+s_sub_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x85,0x80]
+
+s_sub_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x80]
+
+s_sub_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x80]
+
+s_sub_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x80]
+
+s_sub_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x80]
+
+s_sub_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x80]
+
+s_sub_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x80]
+
+s_sub_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x80]
+
+s_sub_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x80]
+
+s_sub_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x80]
+
+s_sub_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x80]
+
+s_sub_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x80]
+
+s_sub_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x80]
+
+s_sub_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x80]
+
+s_sub_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x80]
+
+s_sub_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x80,0x56,0x34,0x12,0xaf]
+
+s_sub_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x80,0x73,0x72,0x71,0x3f]
+
+s_add_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x81]
+
+s_add_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x81]
+
+s_add_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x81]
+
+s_add_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x81]
+
+s_add_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x81]
+
+s_add_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x81]
+
+s_add_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x81]
+
+s_add_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x81]
+
+s_add_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x81]
+
+s_add_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x81]
+
+s_add_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x81]
+
+s_add_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x81]
+
+s_add_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x81]
+
+s_add_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x81]
+
+s_add_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x81]
+
+s_add_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x81]
+
+s_add_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x81]
+
+s_add_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x81]
+
+s_add_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x81]
+
+s_add_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x81]
+
+s_add_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x81]
+
+s_add_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x81]
+
+s_add_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x81]
+
+s_add_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x81]
+
+s_add_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x81]
+
+s_add_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x81]
+
+s_add_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x81]
+
+s_add_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x81]
+
+s_add_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x81]
+
+s_add_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x81]
+
+s_add_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x81]
+
+s_add_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x81,0x56,0x34,0x12,0xaf]
+
+s_add_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x81,0x73,0x72,0x71,0x3f]
+
+s_add_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x81]
+
+s_add_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x81]
+
+s_add_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x81]
+
+s_add_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x81]
+
+s_add_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x81]
+
+s_add_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x81]
+
+s_add_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x81]
+
+s_add_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x81]
+
+s_add_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x81]
+
+s_add_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x81]
+
+s_add_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x81]
+
+s_add_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x81]
+
+s_add_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x81]
+
+s_add_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x81]
+
+s_add_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x81]
+
+s_add_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x81]
+
+s_add_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x81]
+
+s_add_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x81,0x56,0x34,0x12,0xaf]
+
+s_add_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x81,0x73,0x72,0x71,0x3f]
+
+s_sub_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x81]
+
+s_sub_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0xe5,0x81]
+
+s_sub_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x81]
+
+s_sub_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x81]
+
+s_sub_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x81]
+
+s_sub_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x81]
+
+s_sub_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x81]
+
+s_sub_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x81]
+
+s_sub_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x81]
+
+s_sub_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x81]
+
+s_sub_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x81]
+
+s_sub_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x81]
+
+s_sub_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x81]
+
+s_sub_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x81]
+
+s_sub_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x85,0x81]
+
+s_sub_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x85,0x81]
+
+s_sub_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x85,0x81]
+
+s_sub_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x81]
+
+s_sub_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x81]
+
+s_sub_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x81]
+
+s_sub_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x81]
+
+s_sub_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x81]
+
+s_sub_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x81]
+
+s_sub_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x81]
+
+s_sub_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x81]
+
+s_sub_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x81]
+
+s_sub_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x81]
+
+s_sub_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x81]
+
+s_sub_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x81]
+
+s_sub_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x81]
+
+s_sub_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x81]
+
+s_sub_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x81,0x56,0x34,0x12,0xaf]
+
+s_sub_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x81,0x73,0x72,0x71,0x3f]
+
+s_sub_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x85,0x81]
+
+s_sub_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x85,0x81]
+
+s_sub_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x85,0x81]
+
+s_sub_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x81]
+
+s_sub_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x81]
+
+s_sub_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x81]
+
+s_sub_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x81]
+
+s_sub_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x81]
+
+s_sub_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x81]
+
+s_sub_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x81]
+
+s_sub_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x81]
+
+s_sub_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x81]
+
+s_sub_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x81]
+
+s_sub_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x81]
+
+s_sub_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x81]
+
+s_sub_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x81]
+
+s_sub_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x81]
+
+s_sub_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x81,0x56,0x34,0x12,0xaf]
+
+s_sub_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x81,0x73,0x72,0x71,0x3f]
+
+s_addc_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x82]
+
+s_addc_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x82]
+
+s_addc_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x82]
+
+s_addc_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x82]
+
+s_addc_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x82]
+
+s_addc_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x82]
+
+s_addc_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x82]
+
+s_addc_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x82]
+
+s_addc_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x82]
+
+s_addc_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x82]
+
+s_addc_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x82]
+
+s_addc_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x82]
+
+s_addc_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x82]
+
+s_addc_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x82]
+
+s_addc_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x82]
+
+s_addc_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x82]
+
+s_addc_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x82]
+
+s_addc_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x82]
+
+s_addc_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x82]
+
+s_addc_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x82]
+
+s_addc_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x82]
+
+s_addc_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x82]
+
+s_addc_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x82]
+
+s_addc_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x82]
+
+s_addc_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x82]
+
+s_addc_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x82]
+
+s_addc_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x82]
+
+s_addc_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x82]
+
+s_addc_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x82]
+
+s_addc_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x82]
+
+s_addc_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x82]
+
+s_addc_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x82,0x56,0x34,0x12,0xaf]
+
+s_addc_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x82,0x73,0x72,0x71,0x3f]
+
+s_addc_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x82]
+
+s_addc_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x82]
+
+s_addc_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x82]
+
+s_addc_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x82]
+
+s_addc_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x82]
+
+s_addc_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x82]
+
+s_addc_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x82]
+
+s_addc_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x82]
+
+s_addc_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x82]
+
+s_addc_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x82]
+
+s_addc_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x82]
+
+s_addc_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x82]
+
+s_addc_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x82]
+
+s_addc_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x82]
+
+s_addc_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x82]
+
+s_addc_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x82]
+
+s_addc_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x82]
+
+s_addc_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x82,0x56,0x34,0x12,0xaf]
+
+s_addc_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x82,0x73,0x72,0x71,0x3f]
+
+s_subb_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x82]
+
+s_subb_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0xe5,0x82]
+
+s_subb_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x82]
+
+s_subb_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x82]
+
+s_subb_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x82]
+
+s_subb_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x82]
+
+s_subb_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x82]
+
+s_subb_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x82]
+
+s_subb_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x82]
+
+s_subb_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x82]
+
+s_subb_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x82]
+
+s_subb_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x82]
+
+s_subb_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x82]
+
+s_subb_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x82]
+
+s_subb_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x85,0x82]
+
+s_subb_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x85,0x82]
+
+s_subb_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x85,0x82]
+
+s_subb_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x82]
+
+s_subb_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x82]
+
+s_subb_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x82]
+
+s_subb_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x82]
+
+s_subb_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x82]
+
+s_subb_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x82]
+
+s_subb_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x82]
+
+s_subb_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x82]
+
+s_subb_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x82]
+
+s_subb_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x82]
+
+s_subb_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x82]
+
+s_subb_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x82]
+
+s_subb_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x82]
+
+s_subb_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x82]
+
+s_subb_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x82,0x56,0x34,0x12,0xaf]
+
+s_subb_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x82,0x73,0x72,0x71,0x3f]
+
+s_subb_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x85,0x82]
+
+s_subb_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x85,0x82]
+
+s_subb_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x85,0x82]
+
+s_subb_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x82]
+
+s_subb_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x82]
+
+s_subb_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x82]
+
+s_subb_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x82]
+
+s_subb_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x82]
+
+s_subb_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x82]
+
+s_subb_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x82]
+
+s_subb_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x82]
+
+s_subb_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x82]
+
+s_subb_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x82]
+
+s_subb_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x82]
+
+s_subb_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x82]
+
+s_subb_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x82]
+
+s_subb_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x82]
+
+s_subb_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x82,0x56,0x34,0x12,0xaf]
+
+s_subb_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x82,0x73,0x72,0x71,0x3f]
+
+s_min_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x83]
+
+s_min_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x83]
+
+s_min_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x83]
+
+s_min_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x83]
+
+s_min_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x83]
+
+s_min_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x83]
+
+s_min_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x83]
+
+s_min_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x83]
+
+s_min_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x83]
+
+s_min_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x83]
+
+s_min_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x83]
+
+s_min_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x83]
+
+s_min_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x83]
+
+s_min_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x83]
+
+s_min_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x83]
+
+s_min_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x83]
+
+s_min_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x83]
+
+s_min_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x83]
+
+s_min_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x83]
+
+s_min_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x83]
+
+s_min_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x83]
+
+s_min_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x83]
+
+s_min_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x83]
+
+s_min_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x83]
+
+s_min_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x83]
+
+s_min_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x83]
+
+s_min_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x83]
+
+s_min_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x83]
+
+s_min_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x83]
+
+s_min_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x83]
+
+s_min_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x83]
+
+s_min_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x83,0x73,0x72,0x71,0x3f]
+
+s_min_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x83]
+
+s_min_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x83]
+
+s_min_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x83]
+
+s_min_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x83]
+
+s_min_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x83]
+
+s_min_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x83]
+
+s_min_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x83]
+
+s_min_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x83]
+
+s_min_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x83]
+
+s_min_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x83]
+
+s_min_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x83]
+
+s_min_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x83]
+
+s_min_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x83]
+
+s_min_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x83]
+
+s_min_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x83]
+
+s_min_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x83]
+
+s_min_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x83]
+
+s_min_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x83,0x73,0x72,0x71,0x3f]
+
+s_min_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x83]
+
+s_min_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0xe5,0x83]
+
+s_min_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x83]
+
+s_min_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x83]
+
+s_min_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x83]
+
+s_min_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x83]
+
+s_min_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x83]
+
+s_min_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x83]
+
+s_min_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x83]
+
+s_min_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x83]
+
+s_min_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x83]
+
+s_min_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x83]
+
+s_min_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x83]
+
+s_min_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x83]
+
+s_min_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x85,0x83]
+
+s_min_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x85,0x83]
+
+s_min_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x85,0x83]
+
+s_min_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x83]
+
+s_min_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x83]
+
+s_min_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x83]
+
+s_min_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x83]
+
+s_min_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x83]
+
+s_min_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x83]
+
+s_min_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x83]
+
+s_min_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x83]
+
+s_min_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x83]
+
+s_min_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x83]
+
+s_min_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x83]
+
+s_min_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x83]
+
+s_min_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x83]
+
+s_min_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x83]
+
+s_min_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x83,0x73,0x72,0x71,0x3f]
+
+s_min_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x85,0x83]
+
+s_min_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x85,0x83]
+
+s_min_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x85,0x83]
+
+s_min_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x83]
+
+s_min_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x83]
+
+s_min_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x83]
+
+s_min_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x83]
+
+s_min_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x83]
+
+s_min_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x83]
+
+s_min_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x83]
+
+s_min_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x83]
+
+s_min_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x83]
+
+s_min_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x83]
+
+s_min_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x83]
+
+s_min_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x83]
+
+s_min_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x83]
+
+s_min_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x83]
+
+s_min_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x83,0x56,0x34,0x12,0xaf]
+
+s_min_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x83,0x73,0x72,0x71,0x3f]
+
+s_max_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x84]
+
+s_max_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x84]
+
+s_max_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x84]
+
+s_max_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x84]
+
+s_max_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x84]
+
+s_max_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x84]
+
+s_max_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x84]
+
+s_max_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x84]
+
+s_max_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x84]
+
+s_max_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x84]
+
+s_max_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x84]
+
+s_max_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x84]
+
+s_max_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x84]
+
+s_max_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x84]
+
+s_max_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x84]
+
+s_max_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x84]
+
+s_max_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x84]
+
+s_max_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x84]
+
+s_max_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x84]
+
+s_max_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x84]
+
+s_max_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x84]
+
+s_max_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x84]
+
+s_max_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x84]
+
+s_max_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x84]
+
+s_max_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x84]
+
+s_max_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x84]
+
+s_max_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x84]
+
+s_max_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x84]
+
+s_max_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x84]
+
+s_max_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x84]
+
+s_max_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x84]
+
+s_max_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x84,0x73,0x72,0x71,0x3f]
+
+s_max_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x84]
+
+s_max_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x84]
+
+s_max_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x84]
+
+s_max_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x84]
+
+s_max_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x84]
+
+s_max_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x84]
+
+s_max_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x84]
+
+s_max_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x84]
+
+s_max_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x84]
+
+s_max_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x84]
+
+s_max_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x84]
+
+s_max_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x84]
+
+s_max_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x84]
+
+s_max_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x84]
+
+s_max_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x84]
+
+s_max_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x84]
+
+s_max_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x84]
+
+s_max_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x84,0x73,0x72,0x71,0x3f]
+
+s_max_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x84]
+
+s_max_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0xe5,0x84]
+
+s_max_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x84]
+
+s_max_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x84]
+
+s_max_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x84]
+
+s_max_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x84]
+
+s_max_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x84]
+
+s_max_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x84]
+
+s_max_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x84]
+
+s_max_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x84]
+
+s_max_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x84]
+
+s_max_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x84]
+
+s_max_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x84]
+
+s_max_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x84]
+
+s_max_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x85,0x84]
+
+s_max_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x85,0x84]
+
+s_max_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x85,0x84]
+
+s_max_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x84]
+
+s_max_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x84]
+
+s_max_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x84]
+
+s_max_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x84]
+
+s_max_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x84]
+
+s_max_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x84]
+
+s_max_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x84]
+
+s_max_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x84]
+
+s_max_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x84]
+
+s_max_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x84]
+
+s_max_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x84]
+
+s_max_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x84]
+
+s_max_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x84]
+
+s_max_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x84]
+
+s_max_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x84,0x73,0x72,0x71,0x3f]
+
+s_max_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x85,0x84]
+
+s_max_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x85,0x84]
+
+s_max_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x85,0x84]
+
+s_max_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x84]
+
+s_max_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x84]
+
+s_max_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x84]
+
+s_max_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x84]
+
+s_max_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x84]
+
+s_max_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x84]
+
+s_max_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x84]
+
+s_max_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x84]
+
+s_max_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x84]
+
+s_max_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x84]
+
+s_max_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x84]
+
+s_max_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x84]
+
+s_max_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x84]
+
+s_max_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x84]
+
+s_max_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x84,0x56,0x34,0x12,0xaf]
+
+s_max_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x84,0x73,0x72,0x71,0x3f]
+
+s_cselect_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x85]
+
+s_cselect_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x85]
+
+s_cselect_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x85]
+
+s_cselect_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x85]
+
+s_cselect_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x85]
+
+s_cselect_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x85]
+
+s_cselect_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x85]
+
+s_cselect_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x85]
+
+s_cselect_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x85]
+
+s_cselect_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x85]
+
+s_cselect_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x85]
+
+s_cselect_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x85]
+
+s_cselect_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x85]
+
+s_cselect_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x85]
+
+s_cselect_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x85]
+
+s_cselect_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x85]
+
+s_cselect_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x85]
+
+s_cselect_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x85]
+
+s_cselect_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x85]
+
+s_cselect_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x85]
+
+s_cselect_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x85]
+
+s_cselect_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x85]
+
+s_cselect_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x85]
+
+s_cselect_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x85]
+
+s_cselect_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x85]
+
+s_cselect_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x85]
+
+s_cselect_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x85]
+
+s_cselect_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x85]
+
+s_cselect_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x85,0x73,0x72,0x71,0x3f]
+
+s_cselect_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x85]
+
+s_cselect_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x85]
+
+s_cselect_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x85]
+
+s_cselect_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x85]
+
+s_cselect_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x85]
+
+s_cselect_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x85]
+
+s_cselect_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x85]
+
+s_cselect_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x85]
+
+s_cselect_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x85]
+
+s_cselect_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x85]
+
+s_cselect_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x85]
+
+s_cselect_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x85]
+
+s_cselect_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x85]
+
+s_cselect_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x85]
+
+s_cselect_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x85]
+
+s_cselect_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x85]
+
+s_cselect_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x85]
+
+s_cselect_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x85,0x73,0x72,0x71,0x3f]
+
+s_cselect_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x85]
+
+s_cselect_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x85]
+
+s_cselect_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x85]
+
+s_cselect_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x85]
+
+s_cselect_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x85]
+
+s_cselect_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x85]
+
+s_cselect_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x85]
+
+s_cselect_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x85]
+
+s_cselect_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x85]
+
+s_cselect_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x85]
+
+s_cselect_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x85,0x73,0x72,0x71,0x3f]
+
+s_cselect_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x85]
+
+s_cselect_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x85,0x56,0x34,0x12,0xaf]
+
+s_cselect_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x85,0x73,0x72,0x71,0x3f]
+
+s_and_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x86]
+
+s_and_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x86]
+
+s_and_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x86]
+
+s_and_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x86]
+
+s_and_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x86]
+
+s_and_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x86]
+
+s_and_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x86]
+
+s_and_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x86]
+
+s_and_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x86]
+
+s_and_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x86]
+
+s_and_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x86]
+
+s_and_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x86]
+
+s_and_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x86]
+
+s_and_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x86]
+
+s_and_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x86]
+
+s_and_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x86]
+
+s_and_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x86]
+
+s_and_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x86]
+
+s_and_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x86]
+
+s_and_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x86]
+
+s_and_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x86]
+
+s_and_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x86]
+
+s_and_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x86]
+
+s_and_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x86]
+
+s_and_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x86]
+
+s_and_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x86]
+
+s_and_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x86]
+
+s_and_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x86]
+
+s_and_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x86]
+
+s_and_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x86]
+
+s_and_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x86]
+
+s_and_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x86,0x56,0x34,0x12,0xaf]
+
+s_and_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x86,0x73,0x72,0x71,0x3f]
+
+s_and_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x86]
+
+s_and_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x86]
+
+s_and_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x86]
+
+s_and_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x86]
+
+s_and_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x86]
+
+s_and_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x86]
+
+s_and_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x86]
+
+s_and_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x86]
+
+s_and_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x86]
+
+s_and_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x86]
+
+s_and_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x86]
+
+s_and_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x86]
+
+s_and_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x86]
+
+s_and_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x86]
+
+s_and_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x86]
+
+s_and_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x86]
+
+s_and_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x86]
+
+s_and_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x86,0x56,0x34,0x12,0xaf]
+
+s_and_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x86,0x73,0x72,0x71,0x3f]
+
+s_and_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x86]
+
+s_and_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x86]
+
+s_and_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x86]
+
+s_and_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x86]
+
+s_and_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x86]
+
+s_and_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x86]
+
+s_and_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x86]
+
+s_and_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x86]
+
+s_and_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x86]
+
+s_and_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x86]
+
+s_and_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x86,0x56,0x34,0x12,0xaf]
+
+s_and_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x86,0x73,0x72,0x71,0x3f]
+
+s_and_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x86]
+
+s_and_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x86,0x56,0x34,0x12,0xaf]
+
+s_and_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x86,0x73,0x72,0x71,0x3f]
+
+s_or_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x87]
+
+s_or_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x87]
+
+s_or_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x87]
+
+s_or_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x87]
+
+s_or_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x87]
+
+s_or_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x87]
+
+s_or_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x87]
+
+s_or_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x87]
+
+s_or_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x87]
+
+s_or_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x87]
+
+s_or_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x87]
+
+s_or_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x87]
+
+s_or_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x87]
+
+s_or_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x87]
+
+s_or_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x87]
+
+s_or_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x87]
+
+s_or_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x87]
+
+s_or_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x87]
+
+s_or_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x87]
+
+s_or_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x87]
+
+s_or_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x87]
+
+s_or_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x87]
+
+s_or_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x87]
+
+s_or_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x87]
+
+s_or_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x87]
+
+s_or_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x87]
+
+s_or_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x87]
+
+s_or_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x87]
+
+s_or_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x87]
+
+s_or_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x87]
+
+s_or_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x87]
+
+s_or_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x87,0x56,0x34,0x12,0xaf]
+
+s_or_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x87,0x73,0x72,0x71,0x3f]
+
+s_or_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x87]
+
+s_or_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x87]
+
+s_or_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x87]
+
+s_or_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x87]
+
+s_or_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x87]
+
+s_or_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x87]
+
+s_or_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x87]
+
+s_or_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x87]
+
+s_or_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x87]
+
+s_or_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x87]
+
+s_or_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x87]
+
+s_or_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x87]
+
+s_or_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x87]
+
+s_or_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x87]
+
+s_or_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x87]
+
+s_or_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x87]
+
+s_or_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x87]
+
+s_or_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x87,0x56,0x34,0x12,0xaf]
+
+s_or_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x87,0x73,0x72,0x71,0x3f]
+
+s_or_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x87]
+
+s_or_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x87]
+
+s_or_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x87]
+
+s_or_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x87]
+
+s_or_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x87]
+
+s_or_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x87]
+
+s_or_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x87]
+
+s_or_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x87]
+
+s_or_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x87]
+
+s_or_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x87]
+
+s_or_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x87,0x56,0x34,0x12,0xaf]
+
+s_or_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x87,0x73,0x72,0x71,0x3f]
+
+s_or_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x87]
+
+s_or_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x87,0x56,0x34,0x12,0xaf]
+
+s_or_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x87,0x73,0x72,0x71,0x3f]
+
+s_xor_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x88]
+
+s_xor_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x88]
+
+s_xor_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x88]
+
+s_xor_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x88]
+
+s_xor_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x88]
+
+s_xor_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x88]
+
+s_xor_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x88]
+
+s_xor_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x88]
+
+s_xor_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x88]
+
+s_xor_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x88]
+
+s_xor_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x88]
+
+s_xor_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x88]
+
+s_xor_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x88]
+
+s_xor_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x88]
+
+s_xor_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x88]
+
+s_xor_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x88]
+
+s_xor_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x88]
+
+s_xor_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x88]
+
+s_xor_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x88]
+
+s_xor_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x88]
+
+s_xor_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x88]
+
+s_xor_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x88]
+
+s_xor_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x88]
+
+s_xor_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x88]
+
+s_xor_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x88]
+
+s_xor_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x88]
+
+s_xor_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x88]
+
+s_xor_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x88]
+
+s_xor_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x88]
+
+s_xor_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x88]
+
+s_xor_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x88]
+
+s_xor_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x88,0x56,0x34,0x12,0xaf]
+
+s_xor_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x88,0x73,0x72,0x71,0x3f]
+
+s_xor_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x88]
+
+s_xor_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x88]
+
+s_xor_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x88]
+
+s_xor_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x88]
+
+s_xor_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x88]
+
+s_xor_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x88]
+
+s_xor_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x88]
+
+s_xor_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x88]
+
+s_xor_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x88]
+
+s_xor_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x88]
+
+s_xor_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x88]
+
+s_xor_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x88]
+
+s_xor_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x88]
+
+s_xor_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x88]
+
+s_xor_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x88]
+
+s_xor_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x88]
+
+s_xor_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x88]
+
+s_xor_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x88,0x56,0x34,0x12,0xaf]
+
+s_xor_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x88,0x73,0x72,0x71,0x3f]
+
+s_xor_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x88]
+
+s_xor_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x88]
+
+s_xor_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x88]
+
+s_xor_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x88]
+
+s_xor_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x88]
+
+s_xor_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x88]
+
+s_xor_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x88]
+
+s_xor_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x88]
+
+s_xor_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x88]
+
+s_xor_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x88]
+
+s_xor_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x88,0x56,0x34,0x12,0xaf]
+
+s_xor_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x88,0x73,0x72,0x71,0x3f]
+
+s_xor_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x88]
+
+s_xor_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x88,0x56,0x34,0x12,0xaf]
+
+s_xor_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x88,0x73,0x72,0x71,0x3f]
+
+s_andn2_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x89]
+
+s_andn2_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x89]
+
+s_andn2_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x89]
+
+s_andn2_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x89]
+
+s_andn2_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x89]
+
+s_andn2_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x89]
+
+s_andn2_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x89]
+
+s_andn2_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x89]
+
+s_andn2_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x89]
+
+s_andn2_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x89]
+
+s_andn2_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x89]
+
+s_andn2_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x89]
+
+s_andn2_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x89]
+
+s_andn2_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x89]
+
+s_andn2_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x89]
+
+s_andn2_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x89]
+
+s_andn2_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x89]
+
+s_andn2_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x89]
+
+s_andn2_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x89]
+
+s_andn2_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x89]
+
+s_andn2_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x89]
+
+s_andn2_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x89]
+
+s_andn2_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x89]
+
+s_andn2_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x89]
+
+s_andn2_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x89]
+
+s_andn2_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x89]
+
+s_andn2_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x89]
+
+s_andn2_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x89]
+
+s_andn2_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x89]
+
+s_andn2_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x89]
+
+s_andn2_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x89]
+
+s_andn2_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x89,0x56,0x34,0x12,0xaf]
+
+s_andn2_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x89,0x73,0x72,0x71,0x3f]
+
+s_andn2_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x89]
+
+s_andn2_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x89]
+
+s_andn2_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x89]
+
+s_andn2_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x89]
+
+s_andn2_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x89]
+
+s_andn2_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x89]
+
+s_andn2_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x89]
+
+s_andn2_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x89]
+
+s_andn2_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x89]
+
+s_andn2_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x89]
+
+s_andn2_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x89]
+
+s_andn2_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x89]
+
+s_andn2_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x89]
+
+s_andn2_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x89]
+
+s_andn2_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x89]
+
+s_andn2_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x89]
+
+s_andn2_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x89]
+
+s_andn2_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x89,0x56,0x34,0x12,0xaf]
+
+s_andn2_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x89,0x73,0x72,0x71,0x3f]
+
+s_andn2_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x89]
+
+s_andn2_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x89]
+
+s_andn2_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x89]
+
+s_andn2_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x89]
+
+s_andn2_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x89]
+
+s_andn2_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x89]
+
+s_andn2_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x89]
+
+s_andn2_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x89]
+
+s_andn2_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x89]
+
+s_andn2_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x89]
+
+s_andn2_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x89,0x56,0x34,0x12,0xaf]
+
+s_andn2_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x89,0x73,0x72,0x71,0x3f]
+
+s_andn2_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x89]
+
+s_andn2_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x89,0x56,0x34,0x12,0xaf]
+
+s_andn2_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x89,0x73,0x72,0x71,0x3f]
+
+s_orn2_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8a]
+
+s_orn2_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x8a]
+
+s_orn2_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x8a]
+
+s_orn2_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8a]
+
+s_orn2_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8a]
+
+s_orn2_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8a]
+
+s_orn2_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8a]
+
+s_orn2_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8a]
+
+s_orn2_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8a]
+
+s_orn2_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8a]
+
+s_orn2_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8a]
+
+s_orn2_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8a]
+
+s_orn2_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8a]
+
+s_orn2_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8a]
+
+s_orn2_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8a]
+
+s_orn2_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8a,0x56,0x34,0x12,0xaf]
+
+s_orn2_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8a,0x73,0x72,0x71,0x3f]
+
+s_orn2_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x8a]
+
+s_orn2_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x8a]
+
+s_orn2_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x8a]
+
+s_orn2_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8a]
+
+s_orn2_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8a]
+
+s_orn2_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8a]
+
+s_orn2_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8a]
+
+s_orn2_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8a]
+
+s_orn2_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8a]
+
+s_orn2_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8a]
+
+s_orn2_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8a]
+
+s_orn2_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8a]
+
+s_orn2_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8a]
+
+s_orn2_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8a]
+
+s_orn2_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8a]
+
+s_orn2_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8a]
+
+s_orn2_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8a]
+
+s_orn2_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8a,0x56,0x34,0x12,0xaf]
+
+s_orn2_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8a,0x73,0x72,0x71,0x3f]
+
+s_orn2_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8a]
+
+s_orn2_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x8a]
+
+s_orn2_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8a]
+
+s_orn2_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8a]
+
+s_orn2_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8a]
+
+s_orn2_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8a]
+
+s_orn2_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8a]
+
+s_orn2_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8a]
+
+s_orn2_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8a,0x56,0x34,0x12,0xaf]
+
+s_orn2_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8a,0x73,0x72,0x71,0x3f]
+
+s_orn2_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8a]
+
+s_orn2_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8a,0x56,0x34,0x12,0xaf]
+
+s_orn2_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8a,0x73,0x72,0x71,0x3f]
+
+s_nand_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8b]
+
+s_nand_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x8b]
+
+s_nand_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x8b]
+
+s_nand_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8b]
+
+s_nand_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8b]
+
+s_nand_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8b]
+
+s_nand_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8b]
+
+s_nand_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8b]
+
+s_nand_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8b]
+
+s_nand_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8b]
+
+s_nand_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8b]
+
+s_nand_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8b]
+
+s_nand_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8b]
+
+s_nand_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8b]
+
+s_nand_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x8b]
+
+s_nand_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x8b]
+
+s_nand_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x8b]
+
+s_nand_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8b]
+
+s_nand_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8b]
+
+s_nand_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8b]
+
+s_nand_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8b]
+
+s_nand_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8b]
+
+s_nand_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8b]
+
+s_nand_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8b]
+
+s_nand_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8b]
+
+s_nand_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8b]
+
+s_nand_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8b]
+
+s_nand_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8b]
+
+s_nand_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8b]
+
+s_nand_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8b]
+
+s_nand_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8b]
+
+s_nand_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8b,0x56,0x34,0x12,0xaf]
+
+s_nand_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8b,0x73,0x72,0x71,0x3f]
+
+s_nand_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x8b]
+
+s_nand_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x8b]
+
+s_nand_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x8b]
+
+s_nand_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8b]
+
+s_nand_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8b]
+
+s_nand_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8b]
+
+s_nand_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8b]
+
+s_nand_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8b]
+
+s_nand_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8b]
+
+s_nand_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8b]
+
+s_nand_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8b]
+
+s_nand_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8b]
+
+s_nand_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8b]
+
+s_nand_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8b]
+
+s_nand_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8b]
+
+s_nand_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8b]
+
+s_nand_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8b]
+
+s_nand_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8b,0x56,0x34,0x12,0xaf]
+
+s_nand_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8b,0x73,0x72,0x71,0x3f]
+
+s_nand_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8b]
+
+s_nand_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8b]
+
+s_nand_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x8b]
+
+s_nand_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8b]
+
+s_nand_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8b]
+
+s_nand_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8b]
+
+s_nand_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8b]
+
+s_nand_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8b]
+
+s_nand_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8b]
+
+s_nand_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8b]
+
+s_nand_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8b,0x56,0x34,0x12,0xaf]
+
+s_nand_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8b,0x73,0x72,0x71,0x3f]
+
+s_nand_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8b]
+
+s_nand_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8b,0x56,0x34,0x12,0xaf]
+
+s_nand_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8b,0x73,0x72,0x71,0x3f]
+
+s_nor_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8c]
+
+s_nor_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x8c]
+
+s_nor_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x8c]
+
+s_nor_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8c]
+
+s_nor_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8c]
+
+s_nor_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8c]
+
+s_nor_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8c]
+
+s_nor_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8c]
+
+s_nor_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8c]
+
+s_nor_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8c]
+
+s_nor_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8c]
+
+s_nor_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8c]
+
+s_nor_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8c]
+
+s_nor_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8c]
+
+s_nor_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x8c]
+
+s_nor_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x8c]
+
+s_nor_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x8c]
+
+s_nor_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8c]
+
+s_nor_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8c]
+
+s_nor_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8c]
+
+s_nor_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8c]
+
+s_nor_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8c]
+
+s_nor_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8c]
+
+s_nor_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8c]
+
+s_nor_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8c]
+
+s_nor_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8c]
+
+s_nor_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8c]
+
+s_nor_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8c]
+
+s_nor_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8c]
+
+s_nor_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8c]
+
+s_nor_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8c]
+
+s_nor_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nor_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nor_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x8c]
+
+s_nor_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x8c]
+
+s_nor_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x8c]
+
+s_nor_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8c]
+
+s_nor_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8c]
+
+s_nor_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8c]
+
+s_nor_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8c]
+
+s_nor_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8c]
+
+s_nor_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8c]
+
+s_nor_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8c]
+
+s_nor_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8c]
+
+s_nor_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8c]
+
+s_nor_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8c]
+
+s_nor_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8c]
+
+s_nor_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8c]
+
+s_nor_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8c]
+
+s_nor_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8c]
+
+s_nor_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nor_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nor_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8c]
+
+s_nor_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8c]
+
+s_nor_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x8c]
+
+s_nor_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8c]
+
+s_nor_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8c]
+
+s_nor_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8c]
+
+s_nor_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8c]
+
+s_nor_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8c]
+
+s_nor_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8c]
+
+s_nor_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8c]
+
+s_nor_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nor_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8c,0x73,0x72,0x71,0x3f]
+
+s_nor_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8c]
+
+s_nor_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8c,0x56,0x34,0x12,0xaf]
+
+s_nor_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8c,0x73,0x72,0x71,0x3f]
+
+s_xnor_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8d]
+
+s_xnor_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x8d]
+
+s_xnor_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x8d]
+
+s_xnor_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8d]
+
+s_xnor_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8d]
+
+s_xnor_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8d]
+
+s_xnor_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8d]
+
+s_xnor_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8d]
+
+s_xnor_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8d]
+
+s_xnor_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8d]
+
+s_xnor_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8d]
+
+s_xnor_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8d]
+
+s_xnor_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8d]
+
+s_xnor_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8d]
+
+s_xnor_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8d]
+
+s_xnor_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8d,0x56,0x34,0x12,0xaf]
+
+s_xnor_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8d,0x73,0x72,0x71,0x3f]
+
+s_xnor_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x8d]
+
+s_xnor_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x8d]
+
+s_xnor_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x8d]
+
+s_xnor_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8d]
+
+s_xnor_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8d]
+
+s_xnor_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8d]
+
+s_xnor_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8d]
+
+s_xnor_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8d]
+
+s_xnor_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8d]
+
+s_xnor_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8d]
+
+s_xnor_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8d]
+
+s_xnor_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8d]
+
+s_xnor_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8d]
+
+s_xnor_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8d]
+
+s_xnor_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8d]
+
+s_xnor_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8d]
+
+s_xnor_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8d]
+
+s_xnor_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8d,0x56,0x34,0x12,0xaf]
+
+s_xnor_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8d,0x73,0x72,0x71,0x3f]
+
+s_xnor_b64 s[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[12:13], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x8c,0x8d]
+
+s_xnor_b64 s[100:101], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe4,0x8d]
+
+s_xnor_b64 flat_scratch, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xe6,0x8d]
+
+s_xnor_b64 vcc, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xea,0x8d]
+
+s_xnor_b64 tba, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xec,0x8d]
+
+s_xnor_b64 tma, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xee,0x8d]
+
+s_xnor_b64 ttmp[10:11], s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfa,0x8d]
+
+s_xnor_b64 exec, s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0xfe,0x8d]
+
+s_xnor_b64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], tba, s[4:5]
+// CHECK: [0x6c,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], tma, s[4:5]
+// CHECK: [0x6e,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], exec, s[4:5]
+// CHECK: [0x7e,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], 0, s[4:5]
+// CHECK: [0x80,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], -1, s[4:5]
+// CHECK: [0xc1,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8d,0x56,0x34,0x12,0xaf]
+
+s_xnor_b64 s[10:11], 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x8a,0x8d,0x73,0x72,0x71,0x3f]
+
+s_xnor_b64 s[10:11], s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], vcc
+// CHECK: [0x02,0x6a,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], tba
+// CHECK: [0x02,0x6c,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], tma
+// CHECK: [0x02,0x6e,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], exec
+// CHECK: [0x02,0x7e,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8d]
+
+s_xnor_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8d,0x56,0x34,0x12,0xaf]
+
+s_xnor_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8d,0x73,0x72,0x71,0x3f]
+
+s_lshl_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8e]
+
+s_lshl_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x8e]
+
+s_lshl_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x8e]
+
+s_lshl_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8e]
+
+s_lshl_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8e]
+
+s_lshl_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8e]
+
+s_lshl_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8e]
+
+s_lshl_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8e]
+
+s_lshl_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8e]
+
+s_lshl_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8e]
+
+s_lshl_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8e]
+
+s_lshl_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8e]
+
+s_lshl_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8e]
+
+s_lshl_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8e]
+
+s_lshl_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8e]
+
+s_lshl_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8e,0x56,0x34,0x12,0xaf]
+
+s_lshl_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8e,0x73,0x72,0x71,0x3f]
+
+s_lshl_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x8e]
+
+s_lshl_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x8e]
+
+s_lshl_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x8e]
+
+s_lshl_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8e]
+
+s_lshl_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8e]
+
+s_lshl_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8e]
+
+s_lshl_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8e]
+
+s_lshl_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8e]
+
+s_lshl_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8e]
+
+s_lshl_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8e]
+
+s_lshl_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8e]
+
+s_lshl_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8e]
+
+s_lshl_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8e]
+
+s_lshl_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8e]
+
+s_lshl_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8e]
+
+s_lshl_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8e]
+
+s_lshl_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8e]
+
+s_lshl_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8e,0x56,0x34,0x12,0xaf]
+
+s_lshl_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8e,0x73,0x72,0x71,0x3f]
+
+s_lshl_b64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x8e]
+
+s_lshl_b64 s[100:101], s[2:3], s2
+// CHECK: [0x02,0x02,0xe4,0x8e]
+
+s_lshl_b64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x8e]
+
+s_lshl_b64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x8e]
+
+s_lshl_b64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x8e]
+
+s_lshl_b64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x8e]
+
+s_lshl_b64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x8e]
+
+s_lshl_b64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x8e]
+
+s_lshl_b64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[100:101], s2
+// CHECK: [0x64,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], flat_scratch, s2
+// CHECK: [0x66,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x8e,0x56,0x34,0x12,0xaf]
+
+s_lshl_b64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x8e,0x73,0x72,0x71,0x3f]
+
+s_lshl_b64 s[10:11], s[2:3], s101
+// CHECK: [0x02,0x65,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8e]
+
+s_lshl_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8e,0x56,0x34,0x12,0xaf]
+
+s_lshl_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8e,0x73,0x72,0x71,0x3f]
+
+s_lshr_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x8f]
+
+s_lshr_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x8f]
+
+s_lshr_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x8f]
+
+s_lshr_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x8f]
+
+s_lshr_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x8f]
+
+s_lshr_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x8f]
+
+s_lshr_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x8f]
+
+s_lshr_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x8f]
+
+s_lshr_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x8f]
+
+s_lshr_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x8f]
+
+s_lshr_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x8f]
+
+s_lshr_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x8f]
+
+s_lshr_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x8f]
+
+s_lshr_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x8f]
+
+s_lshr_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x8f]
+
+s_lshr_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshr_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshr_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x8f]
+
+s_lshr_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x8f]
+
+s_lshr_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x8f]
+
+s_lshr_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x8f]
+
+s_lshr_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x8f]
+
+s_lshr_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x8f]
+
+s_lshr_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x8f]
+
+s_lshr_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x8f]
+
+s_lshr_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x8f]
+
+s_lshr_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x8f]
+
+s_lshr_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x8f]
+
+s_lshr_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x8f]
+
+s_lshr_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x8f]
+
+s_lshr_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x8f]
+
+s_lshr_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x8f]
+
+s_lshr_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x8f]
+
+s_lshr_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x8f]
+
+s_lshr_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshr_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshr_b64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x8f]
+
+s_lshr_b64 s[100:101], s[2:3], s2
+// CHECK: [0x02,0x02,0xe4,0x8f]
+
+s_lshr_b64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x8f]
+
+s_lshr_b64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x8f]
+
+s_lshr_b64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x8f]
+
+s_lshr_b64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x8f]
+
+s_lshr_b64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x8f]
+
+s_lshr_b64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x8f]
+
+s_lshr_b64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[100:101], s2
+// CHECK: [0x64,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], flat_scratch, s2
+// CHECK: [0x66,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshr_b64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x8f,0x73,0x72,0x71,0x3f]
+
+s_lshr_b64 s[10:11], s[2:3], s101
+// CHECK: [0x02,0x65,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x8f]
+
+s_lshr_b64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x8f,0x56,0x34,0x12,0xaf]
+
+s_lshr_b64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x8f,0x73,0x72,0x71,0x3f]
+
+s_ashr_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x90]
+
+s_ashr_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x90]
+
+s_ashr_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x90]
+
+s_ashr_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x90]
+
+s_ashr_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x90]
+
+s_ashr_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x90]
+
+s_ashr_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x90]
+
+s_ashr_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x90]
+
+s_ashr_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x90]
+
+s_ashr_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x90]
+
+s_ashr_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x90]
+
+s_ashr_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x90]
+
+s_ashr_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x90]
+
+s_ashr_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x90]
+
+s_ashr_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x90]
+
+s_ashr_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x90]
+
+s_ashr_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x90]
+
+s_ashr_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x90]
+
+s_ashr_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x90]
+
+s_ashr_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x90]
+
+s_ashr_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x90]
+
+s_ashr_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x90]
+
+s_ashr_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x90]
+
+s_ashr_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x90]
+
+s_ashr_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x90]
+
+s_ashr_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x90]
+
+s_ashr_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x90]
+
+s_ashr_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x90]
+
+s_ashr_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x90]
+
+s_ashr_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x90]
+
+s_ashr_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x90]
+
+s_ashr_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x90,0x56,0x34,0x12,0xaf]
+
+s_ashr_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x90,0x73,0x72,0x71,0x3f]
+
+s_ashr_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x90]
+
+s_ashr_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x90]
+
+s_ashr_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x90]
+
+s_ashr_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x90]
+
+s_ashr_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x90]
+
+s_ashr_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x90]
+
+s_ashr_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x90]
+
+s_ashr_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x90]
+
+s_ashr_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x90]
+
+s_ashr_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x90]
+
+s_ashr_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x90]
+
+s_ashr_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x90]
+
+s_ashr_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x90]
+
+s_ashr_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x90]
+
+s_ashr_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x90]
+
+s_ashr_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x90]
+
+s_ashr_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x90]
+
+s_ashr_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x90,0x56,0x34,0x12,0xaf]
+
+s_ashr_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x90,0x73,0x72,0x71,0x3f]
+
+s_ashr_i64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x90]
+
+s_ashr_i64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x90]
+
+s_ashr_i64 s[100:101], s[2:3], s2
+// CHECK: [0x02,0x02,0xe4,0x90]
+
+s_ashr_i64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x90]
+
+s_ashr_i64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x90]
+
+s_ashr_i64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x90]
+
+s_ashr_i64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x90]
+
+s_ashr_i64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x90]
+
+s_ashr_i64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x90]
+
+s_ashr_i64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[100:101], s2
+// CHECK: [0x64,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], flat_scratch, s2
+// CHECK: [0x66,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x90]
+
+s_ashr_i64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x90,0x56,0x34,0x12,0xaf]
+
+s_ashr_i64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x90,0x73,0x72,0x71,0x3f]
+
+s_ashr_i64 s[10:11], s[2:3], s101
+// CHECK: [0x02,0x65,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x90]
+
+s_ashr_i64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x90,0x56,0x34,0x12,0xaf]
+
+s_ashr_i64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x90,0x73,0x72,0x71,0x3f]
+
+s_bfm_b32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x91]
+
+s_bfm_b32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x91]
+
+s_bfm_b32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x91]
+
+s_bfm_b32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x91]
+
+s_bfm_b32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x91]
+
+s_bfm_b32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x91]
+
+s_bfm_b32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x91]
+
+s_bfm_b32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x91]
+
+s_bfm_b32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x91]
+
+s_bfm_b32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x91]
+
+s_bfm_b32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x91]
+
+s_bfm_b32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x91]
+
+s_bfm_b32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x91]
+
+s_bfm_b32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x91]
+
+s_bfm_b32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x91]
+
+s_bfm_b32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x91]
+
+s_bfm_b32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x91]
+
+s_bfm_b32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x91]
+
+s_bfm_b32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x91]
+
+s_bfm_b32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x91]
+
+s_bfm_b32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x91]
+
+s_bfm_b32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x91]
+
+s_bfm_b32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x91]
+
+s_bfm_b32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x91]
+
+s_bfm_b32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x91]
+
+s_bfm_b32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x91]
+
+s_bfm_b32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x91]
+
+s_bfm_b32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x91]
+
+s_bfm_b32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x91]
+
+s_bfm_b32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x91]
+
+s_bfm_b32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x91]
+
+s_bfm_b32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x91,0x56,0x34,0x12,0xaf]
+
+s_bfm_b32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x91,0x73,0x72,0x71,0x3f]
+
+s_bfm_b32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x91]
+
+s_bfm_b32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x91]
+
+s_bfm_b32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x91]
+
+s_bfm_b32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x91]
+
+s_bfm_b32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x91]
+
+s_bfm_b32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x91]
+
+s_bfm_b32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x91]
+
+s_bfm_b32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x91]
+
+s_bfm_b32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x91]
+
+s_bfm_b32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x91]
+
+s_bfm_b32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x91]
+
+s_bfm_b32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x91]
+
+s_bfm_b32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x91]
+
+s_bfm_b32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x91]
+
+s_bfm_b32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x91]
+
+s_bfm_b32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x91]
+
+s_bfm_b32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x91]
+
+s_bfm_b32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x91,0x56,0x34,0x12,0xaf]
+
+s_bfm_b32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x91,0x73,0x72,0x71,0x3f]
+
+s_bfm_b64 s[10:11], s1, s2
+// CHECK: [0x01,0x02,0x8a,0x91]
+
+s_bfm_b64 s[12:13], s1, s2
+// CHECK: [0x01,0x02,0x8c,0x91]
+
+s_bfm_b64 s[100:101], s1, s2
+// CHECK: [0x01,0x02,0xe4,0x91]
+
+s_bfm_b64 flat_scratch, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x91]
+
+s_bfm_b64 vcc, s1, s2
+// CHECK: [0x01,0x02,0xea,0x91]
+
+s_bfm_b64 tba, s1, s2
+// CHECK: [0x01,0x02,0xec,0x91]
+
+s_bfm_b64 tma, s1, s2
+// CHECK: [0x01,0x02,0xee,0x91]
+
+s_bfm_b64 ttmp[10:11], s1, s2
+// CHECK: [0x01,0x02,0xfa,0x91]
+
+s_bfm_b64 exec, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x91]
+
+s_bfm_b64 s[10:11], s101, s2
+// CHECK: [0x65,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], vcc_lo, s2
+// CHECK: [0x6a,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], vcc_hi, s2
+// CHECK: [0x6b,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], tba_lo, s2
+// CHECK: [0x6c,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], tba_hi, s2
+// CHECK: [0x6d,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], tma_lo, s2
+// CHECK: [0x6e,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], tma_hi, s2
+// CHECK: [0x6f,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], ttmp11, s2
+// CHECK: [0x7b,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], m0, s2
+// CHECK: [0x7c,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], exec_lo, s2
+// CHECK: [0x7e,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], exec_hi, s2
+// CHECK: [0x7f,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x91]
+
+s_bfm_b64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x91,0x56,0x34,0x12,0xaf]
+
+s_bfm_b64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x91,0x73,0x72,0x71,0x3f]
+
+s_bfm_b64 s[10:11], s1, s101
+// CHECK: [0x01,0x65,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, vcc_lo
+// CHECK: [0x01,0x6a,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, vcc_hi
+// CHECK: [0x01,0x6b,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, tba_lo
+// CHECK: [0x01,0x6c,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, tba_hi
+// CHECK: [0x01,0x6d,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, tma_lo
+// CHECK: [0x01,0x6e,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, tma_hi
+// CHECK: [0x01,0x6f,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, ttmp11
+// CHECK: [0x01,0x7b,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, m0
+// CHECK: [0x01,0x7c,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, exec_lo
+// CHECK: [0x01,0x7e,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, exec_hi
+// CHECK: [0x01,0x7f,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, 0
+// CHECK: [0x01,0x80,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, -1
+// CHECK: [0x01,0xc1,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, 0.5
+// CHECK: [0x01,0xf0,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, -4.0
+// CHECK: [0x01,0xf7,0x8a,0x91]
+
+s_bfm_b64 s[10:11], s1, 0xaf123456
+// CHECK: [0x01,0xff,0x8a,0x91,0x56,0x34,0x12,0xaf]
+
+s_bfm_b64 s[10:11], s1, 0x3f717273
+// CHECK: [0x01,0xff,0x8a,0x91,0x73,0x72,0x71,0x3f]
+
+s_mul_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x92]
+
+s_mul_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x92]
+
+s_mul_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x92]
+
+s_mul_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x92]
+
+s_mul_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x92]
+
+s_mul_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x92]
+
+s_mul_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x92]
+
+s_mul_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x92]
+
+s_mul_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x92]
+
+s_mul_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x92]
+
+s_mul_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x92]
+
+s_mul_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x92]
+
+s_mul_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x92]
+
+s_mul_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x92]
+
+s_mul_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x92]
+
+s_mul_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x92]
+
+s_mul_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x92]
+
+s_mul_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x92]
+
+s_mul_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x92]
+
+s_mul_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x92]
+
+s_mul_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x92]
+
+s_mul_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x92]
+
+s_mul_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x92]
+
+s_mul_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x92]
+
+s_mul_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x92]
+
+s_mul_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x92]
+
+s_mul_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x92]
+
+s_mul_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x92]
+
+s_mul_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x92]
+
+s_mul_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x92]
+
+s_mul_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x92]
+
+s_mul_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x92,0x56,0x34,0x12,0xaf]
+
+s_mul_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x92,0x73,0x72,0x71,0x3f]
+
+s_mul_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x92]
+
+s_mul_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x92]
+
+s_mul_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x92]
+
+s_mul_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x92]
+
+s_mul_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x92]
+
+s_mul_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x92]
+
+s_mul_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x92]
+
+s_mul_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x92]
+
+s_mul_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x92]
+
+s_mul_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x92]
+
+s_mul_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x92]
+
+s_mul_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x92]
+
+s_mul_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x92]
+
+s_mul_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x92]
+
+s_mul_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x92]
+
+s_mul_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x92]
+
+s_mul_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x92]
+
+s_mul_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x92,0x56,0x34,0x12,0xaf]
+
+s_mul_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x92,0x73,0x72,0x71,0x3f]
+
+s_bfe_u32 s5, s1, s2
+// CHECK: [0x01,0x02,0x85,0x92]
+
+s_bfe_u32 s101, s1, s2
+// CHECK: [0x01,0x02,0xe5,0x92]
+
+s_bfe_u32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0xe6,0x92]
+
+s_bfe_u32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0xe7,0x92]
+
+s_bfe_u32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0xea,0x92]
+
+s_bfe_u32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0xeb,0x92]
+
+s_bfe_u32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0xec,0x92]
+
+s_bfe_u32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0xed,0x92]
+
+s_bfe_u32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0xee,0x92]
+
+s_bfe_u32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0xef,0x92]
+
+s_bfe_u32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0xfb,0x92]
+
+s_bfe_u32 m0, s1, s2
+// CHECK: [0x01,0x02,0xfc,0x92]
+
+s_bfe_u32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0xfe,0x92]
+
+s_bfe_u32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0xff,0x92]
+
+s_bfe_u32 s5, s101, s2
+// CHECK: [0x65,0x02,0x85,0x92]
+
+s_bfe_u32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x85,0x92]
+
+s_bfe_u32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x85,0x92]
+
+s_bfe_u32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x85,0x92]
+
+s_bfe_u32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x85,0x92]
+
+s_bfe_u32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x85,0x92]
+
+s_bfe_u32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x85,0x92]
+
+s_bfe_u32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x85,0x92]
+
+s_bfe_u32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x85,0x92]
+
+s_bfe_u32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x85,0x92]
+
+s_bfe_u32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x85,0x92]
+
+s_bfe_u32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x85,0x92]
+
+s_bfe_u32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x85,0x92]
+
+s_bfe_u32 s5, 0, s2
+// CHECK: [0x80,0x02,0x85,0x92]
+
+s_bfe_u32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x85,0x92]
+
+s_bfe_u32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x85,0x92]
+
+s_bfe_u32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x85,0x92]
+
+s_bfe_u32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x85,0x92,0x56,0x34,0x12,0xaf]
+
+s_bfe_u32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x85,0x92,0x73,0x72,0x71,0x3f]
+
+s_bfe_u32 s5, s1, s101
+// CHECK: [0x01,0x65,0x85,0x92]
+
+s_bfe_u32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x85,0x92]
+
+s_bfe_u32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x85,0x92]
+
+s_bfe_u32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x85,0x92]
+
+s_bfe_u32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x85,0x92]
+
+s_bfe_u32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x85,0x92]
+
+s_bfe_u32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x85,0x92]
+
+s_bfe_u32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x85,0x92]
+
+s_bfe_u32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x85,0x92]
+
+s_bfe_u32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x85,0x92]
+
+s_bfe_u32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x85,0x92]
+
+s_bfe_u32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x85,0x92]
+
+s_bfe_u32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x85,0x92]
+
+s_bfe_u32 s5, s1, 0
+// CHECK: [0x01,0x80,0x85,0x92]
+
+s_bfe_u32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x85,0x92]
+
+s_bfe_u32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x85,0x92]
+
+s_bfe_u32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x85,0x92]
+
+s_bfe_u32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x85,0x92,0x56,0x34,0x12,0xaf]
+
+s_bfe_u32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x85,0x92,0x73,0x72,0x71,0x3f]
+
+s_bfe_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x93]
+
+s_bfe_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x93]
+
+s_bfe_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x93]
+
+s_bfe_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x93]
+
+s_bfe_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x93]
+
+s_bfe_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x93]
+
+s_bfe_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x93]
+
+s_bfe_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x93]
+
+s_bfe_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x93]
+
+s_bfe_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x93]
+
+s_bfe_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x93]
+
+s_bfe_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x93]
+
+s_bfe_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x93]
+
+s_bfe_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x93]
+
+s_bfe_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x93]
+
+s_bfe_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x93]
+
+s_bfe_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x93]
+
+s_bfe_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x93]
+
+s_bfe_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x93]
+
+s_bfe_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x93]
+
+s_bfe_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x93]
+
+s_bfe_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x93]
+
+s_bfe_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x93]
+
+s_bfe_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x93]
+
+s_bfe_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x93]
+
+s_bfe_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x93]
+
+s_bfe_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x93]
+
+s_bfe_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x93]
+
+s_bfe_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x93]
+
+s_bfe_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x93]
+
+s_bfe_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x93]
+
+s_bfe_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x93,0x56,0x34,0x12,0xaf]
+
+s_bfe_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x93]
+
+s_bfe_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x93]
+
+s_bfe_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x93]
+
+s_bfe_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x93]
+
+s_bfe_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x93]
+
+s_bfe_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x93]
+
+s_bfe_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x93]
+
+s_bfe_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x93]
+
+s_bfe_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x93]
+
+s_bfe_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x93]
+
+s_bfe_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x93]
+
+s_bfe_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x93]
+
+s_bfe_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x93]
+
+s_bfe_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x93]
+
+s_bfe_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x93]
+
+s_bfe_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x93]
+
+s_bfe_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x93]
+
+s_bfe_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x93,0x56,0x34,0x12,0xaf]
+
+s_bfe_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_u64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x8a,0x93]
+
+s_bfe_u64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x8c,0x93]
+
+s_bfe_u64 s[100:101], s[2:3], s2
+// CHECK: [0x02,0x02,0xe4,0x93]
+
+s_bfe_u64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0xe6,0x93]
+
+s_bfe_u64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0xea,0x93]
+
+s_bfe_u64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0xec,0x93]
+
+s_bfe_u64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0xee,0x93]
+
+s_bfe_u64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0xfa,0x93]
+
+s_bfe_u64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0xfe,0x93]
+
+s_bfe_u64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[100:101], s2
+// CHECK: [0x64,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], flat_scratch, s2
+// CHECK: [0x66,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x8a,0x93]
+
+s_bfe_u64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x8a,0x93,0x56,0x34,0x12,0xaf]
+
+s_bfe_u64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x8a,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_u64 s[10:11], s[2:3], s101
+// CHECK: [0x02,0x65,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x8a,0x93]
+
+s_bfe_u64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x8a,0x93,0x56,0x34,0x12,0xaf]
+
+s_bfe_u64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x8a,0x93,0x73,0x72,0x71,0x3f]
+
+s_bfe_i64 s[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x0a,0x94]
+
+s_bfe_i64 s[12:13], s[2:3], s2
+// CHECK: [0x02,0x02,0x0c,0x94]
+
+s_bfe_i64 s[100:101], s[2:3], s2
+// CHECK: [0x02,0x02,0x64,0x94]
+
+s_bfe_i64 flat_scratch, s[2:3], s2
+// CHECK: [0x02,0x02,0x66,0x94]
+
+s_bfe_i64 vcc, s[2:3], s2
+// CHECK: [0x02,0x02,0x6a,0x94]
+
+s_bfe_i64 tba, s[2:3], s2
+// CHECK: [0x02,0x02,0x6c,0x94]
+
+s_bfe_i64 tma, s[2:3], s2
+// CHECK: [0x02,0x02,0x6e,0x94]
+
+s_bfe_i64 ttmp[10:11], s[2:3], s2
+// CHECK: [0x02,0x02,0x7a,0x94]
+
+s_bfe_i64 exec, s[2:3], s2
+// CHECK: [0x02,0x02,0x7e,0x94]
+
+s_bfe_i64 s[10:11], s[4:5], s2
+// CHECK: [0x04,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[100:101], s2
+// CHECK: [0x64,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], flat_scratch, s2
+// CHECK: [0x66,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], vcc, s2
+// CHECK: [0x6a,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], tba, s2
+// CHECK: [0x6c,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], tma, s2
+// CHECK: [0x6e,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], exec, s2
+// CHECK: [0x7e,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], 0, s2
+// CHECK: [0x80,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], -1, s2
+// CHECK: [0xc1,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], 0.5, s2
+// CHECK: [0xf0,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], -4.0, s2
+// CHECK: [0xf7,0x02,0x0a,0x94]
+
+s_bfe_i64 s[10:11], 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0a,0x94,0x56,0x34,0x12,0xaf]
+
+s_bfe_i64 s[10:11], 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0a,0x94,0x73,0x72,0x71,0x3f]
+
+s_bfe_i64 s[10:11], s[2:3], s101
+// CHECK: [0x02,0x65,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], m0
+// CHECK: [0x02,0x7c,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], 0
+// CHECK: [0x02,0x80,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], -1
+// CHECK: [0x02,0xc1,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x0a,0x94]
+
+s_bfe_i64 s[10:11], s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x0a,0x94,0x56,0x34,0x12,0xaf]
+
+s_bfe_i64 s[10:11], s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x0a,0x94,0x73,0x72,0x71,0x3f]
+
+s_cbranch_g_fork s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x80,0x94]
+
+s_cbranch_g_fork s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x80,0x94]
+
+s_cbranch_g_fork s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x80,0x94]
+
+s_cbranch_g_fork flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x80,0x94]
+
+s_cbranch_g_fork vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x80,0x94]
+
+s_cbranch_g_fork tba, s[4:5]
+// CHECK: [0x6c,0x04,0x80,0x94]
+
+s_cbranch_g_fork tma, s[4:5]
+// CHECK: [0x6e,0x04,0x80,0x94]
+
+s_cbranch_g_fork ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x80,0x94]
+
+s_cbranch_g_fork exec, s[4:5]
+// CHECK: [0x7e,0x04,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], vcc
+// CHECK: [0x02,0x6a,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], tba
+// CHECK: [0x02,0x6c,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], tma
+// CHECK: [0x02,0x6e,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x80,0x94]
+
+s_cbranch_g_fork s[2:3], exec
+// CHECK: [0x02,0x7e,0x80,0x94]
+
+s_absdiff_i32 s5, s1, s2
+// CHECK: [0x01,0x02,0x05,0x95]
+
+s_absdiff_i32 s101, s1, s2
+// CHECK: [0x01,0x02,0x65,0x95]
+
+s_absdiff_i32 flat_scratch_lo, s1, s2
+// CHECK: [0x01,0x02,0x66,0x95]
+
+s_absdiff_i32 flat_scratch_hi, s1, s2
+// CHECK: [0x01,0x02,0x67,0x95]
+
+s_absdiff_i32 vcc_lo, s1, s2
+// CHECK: [0x01,0x02,0x6a,0x95]
+
+s_absdiff_i32 vcc_hi, s1, s2
+// CHECK: [0x01,0x02,0x6b,0x95]
+
+s_absdiff_i32 tba_lo, s1, s2
+// CHECK: [0x01,0x02,0x6c,0x95]
+
+s_absdiff_i32 tba_hi, s1, s2
+// CHECK: [0x01,0x02,0x6d,0x95]
+
+s_absdiff_i32 tma_lo, s1, s2
+// CHECK: [0x01,0x02,0x6e,0x95]
+
+s_absdiff_i32 tma_hi, s1, s2
+// CHECK: [0x01,0x02,0x6f,0x95]
+
+s_absdiff_i32 ttmp11, s1, s2
+// CHECK: [0x01,0x02,0x7b,0x95]
+
+s_absdiff_i32 m0, s1, s2
+// CHECK: [0x01,0x02,0x7c,0x95]
+
+s_absdiff_i32 exec_lo, s1, s2
+// CHECK: [0x01,0x02,0x7e,0x95]
+
+s_absdiff_i32 exec_hi, s1, s2
+// CHECK: [0x01,0x02,0x7f,0x95]
+
+s_absdiff_i32 s5, s101, s2
+// CHECK: [0x65,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, m0, s2
+// CHECK: [0x7c,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, 0, s2
+// CHECK: [0x80,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, -1, s2
+// CHECK: [0xc1,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0x95]
+
+s_absdiff_i32 s5, 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0x95,0x56,0x34,0x12,0xaf]
+
+s_absdiff_i32 s5, 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0x95,0x73,0x72,0x71,0x3f]
+
+s_absdiff_i32 s5, s1, s101
+// CHECK: [0x01,0x65,0x05,0x95]
+
+s_absdiff_i32 s5, s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0x95]
+
+s_absdiff_i32 s5, s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0x95]
+
+s_absdiff_i32 s5, s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0x95]
+
+s_absdiff_i32 s5, s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0x95]
+
+s_absdiff_i32 s5, s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0x95]
+
+s_absdiff_i32 s5, s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0x95]
+
+s_absdiff_i32 s5, s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0x95]
+
+s_absdiff_i32 s5, s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0x95]
+
+s_absdiff_i32 s5, s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0x95]
+
+s_absdiff_i32 s5, s1, m0
+// CHECK: [0x01,0x7c,0x05,0x95]
+
+s_absdiff_i32 s5, s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0x95]
+
+s_absdiff_i32 s5, s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0x95]
+
+s_absdiff_i32 s5, s1, 0
+// CHECK: [0x01,0x80,0x05,0x95]
+
+s_absdiff_i32 s5, s1, -1
+// CHECK: [0x01,0xc1,0x05,0x95]
+
+s_absdiff_i32 s5, s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0x95]
+
+s_absdiff_i32 s5, s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0x95]
+
+s_absdiff_i32 s5, s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0x95,0x56,0x34,0x12,0xaf]
+
+s_absdiff_i32 s5, s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0x95,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_i32 s1, s2
+// CHECK: [0x01,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 s101, s2
+// CHECK: [0x65,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 m0, s2
+// CHECK: [0x7c,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 0, s2
+// CHECK: [0x80,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 -1, s2
+// CHECK: [0xc1,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x00,0xbf]
+
+s_cmp_eq_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x00,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x00,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_i32 s1, s101
+// CHECK: [0x01,0x65,0x00,0xbf]
+
+s_cmp_eq_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x00,0xbf]
+
+s_cmp_eq_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x00,0xbf]
+
+s_cmp_eq_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x00,0xbf]
+
+s_cmp_eq_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x00,0xbf]
+
+s_cmp_eq_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x00,0xbf]
+
+s_cmp_eq_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x00,0xbf]
+
+s_cmp_eq_i32 s1, m0
+// CHECK: [0x01,0x7c,0x00,0xbf]
+
+s_cmp_eq_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x00,0xbf]
+
+s_cmp_eq_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x00,0xbf]
+
+s_cmp_eq_i32 s1, 0
+// CHECK: [0x01,0x80,0x00,0xbf]
+
+s_cmp_eq_i32 s1, -1
+// CHECK: [0x01,0xc1,0x00,0xbf]
+
+s_cmp_eq_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x00,0xbf]
+
+s_cmp_eq_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x00,0xbf]
+
+s_cmp_eq_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x00,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x00,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_i32 s1, s2
+// CHECK: [0x01,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 s101, s2
+// CHECK: [0x65,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 m0, s2
+// CHECK: [0x7c,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 0, s2
+// CHECK: [0x80,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 -1, s2
+// CHECK: [0xc1,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x01,0xbf]
+
+s_cmp_lg_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x01,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x01,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_i32 s1, s101
+// CHECK: [0x01,0x65,0x01,0xbf]
+
+s_cmp_lg_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x01,0xbf]
+
+s_cmp_lg_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x01,0xbf]
+
+s_cmp_lg_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x01,0xbf]
+
+s_cmp_lg_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x01,0xbf]
+
+s_cmp_lg_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x01,0xbf]
+
+s_cmp_lg_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x01,0xbf]
+
+s_cmp_lg_i32 s1, m0
+// CHECK: [0x01,0x7c,0x01,0xbf]
+
+s_cmp_lg_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x01,0xbf]
+
+s_cmp_lg_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x01,0xbf]
+
+s_cmp_lg_i32 s1, 0
+// CHECK: [0x01,0x80,0x01,0xbf]
+
+s_cmp_lg_i32 s1, -1
+// CHECK: [0x01,0xc1,0x01,0xbf]
+
+s_cmp_lg_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x01,0xbf]
+
+s_cmp_lg_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x01,0xbf]
+
+s_cmp_lg_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x01,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x01,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_i32 s1, s2
+// CHECK: [0x01,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 s101, s2
+// CHECK: [0x65,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 m0, s2
+// CHECK: [0x7c,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 0, s2
+// CHECK: [0x80,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 -1, s2
+// CHECK: [0xc1,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x02,0xbf]
+
+s_cmp_gt_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x02,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x02,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_i32 s1, s101
+// CHECK: [0x01,0x65,0x02,0xbf]
+
+s_cmp_gt_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x02,0xbf]
+
+s_cmp_gt_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x02,0xbf]
+
+s_cmp_gt_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x02,0xbf]
+
+s_cmp_gt_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x02,0xbf]
+
+s_cmp_gt_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x02,0xbf]
+
+s_cmp_gt_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x02,0xbf]
+
+s_cmp_gt_i32 s1, m0
+// CHECK: [0x01,0x7c,0x02,0xbf]
+
+s_cmp_gt_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x02,0xbf]
+
+s_cmp_gt_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x02,0xbf]
+
+s_cmp_gt_i32 s1, 0
+// CHECK: [0x01,0x80,0x02,0xbf]
+
+s_cmp_gt_i32 s1, -1
+// CHECK: [0x01,0xc1,0x02,0xbf]
+
+s_cmp_gt_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x02,0xbf]
+
+s_cmp_gt_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x02,0xbf]
+
+s_cmp_gt_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x02,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x02,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_i32 s1, s2
+// CHECK: [0x01,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 s101, s2
+// CHECK: [0x65,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 m0, s2
+// CHECK: [0x7c,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 0, s2
+// CHECK: [0x80,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 -1, s2
+// CHECK: [0xc1,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x03,0xbf]
+
+s_cmp_ge_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x03,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x03,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_i32 s1, s101
+// CHECK: [0x01,0x65,0x03,0xbf]
+
+s_cmp_ge_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x03,0xbf]
+
+s_cmp_ge_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x03,0xbf]
+
+s_cmp_ge_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x03,0xbf]
+
+s_cmp_ge_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x03,0xbf]
+
+s_cmp_ge_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x03,0xbf]
+
+s_cmp_ge_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x03,0xbf]
+
+s_cmp_ge_i32 s1, m0
+// CHECK: [0x01,0x7c,0x03,0xbf]
+
+s_cmp_ge_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x03,0xbf]
+
+s_cmp_ge_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x03,0xbf]
+
+s_cmp_ge_i32 s1, 0
+// CHECK: [0x01,0x80,0x03,0xbf]
+
+s_cmp_ge_i32 s1, -1
+// CHECK: [0x01,0xc1,0x03,0xbf]
+
+s_cmp_ge_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x03,0xbf]
+
+s_cmp_ge_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x03,0xbf]
+
+s_cmp_ge_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x03,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x03,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_i32 s1, s2
+// CHECK: [0x01,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 s101, s2
+// CHECK: [0x65,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 m0, s2
+// CHECK: [0x7c,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 0, s2
+// CHECK: [0x80,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 -1, s2
+// CHECK: [0xc1,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x04,0xbf]
+
+s_cmp_lt_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x04,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x04,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_i32 s1, s101
+// CHECK: [0x01,0x65,0x04,0xbf]
+
+s_cmp_lt_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x04,0xbf]
+
+s_cmp_lt_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x04,0xbf]
+
+s_cmp_lt_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x04,0xbf]
+
+s_cmp_lt_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x04,0xbf]
+
+s_cmp_lt_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x04,0xbf]
+
+s_cmp_lt_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x04,0xbf]
+
+s_cmp_lt_i32 s1, m0
+// CHECK: [0x01,0x7c,0x04,0xbf]
+
+s_cmp_lt_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x04,0xbf]
+
+s_cmp_lt_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x04,0xbf]
+
+s_cmp_lt_i32 s1, 0
+// CHECK: [0x01,0x80,0x04,0xbf]
+
+s_cmp_lt_i32 s1, -1
+// CHECK: [0x01,0xc1,0x04,0xbf]
+
+s_cmp_lt_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x04,0xbf]
+
+s_cmp_lt_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x04,0xbf]
+
+s_cmp_lt_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x04,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x04,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_i32 s1, s2
+// CHECK: [0x01,0x02,0x05,0xbf]
+
+s_cmp_le_i32 s101, s2
+// CHECK: [0x65,0x02,0x05,0xbf]
+
+s_cmp_le_i32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x05,0xbf]
+
+s_cmp_le_i32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x05,0xbf]
+
+s_cmp_le_i32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x05,0xbf]
+
+s_cmp_le_i32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x05,0xbf]
+
+s_cmp_le_i32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x05,0xbf]
+
+s_cmp_le_i32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x05,0xbf]
+
+s_cmp_le_i32 m0, s2
+// CHECK: [0x7c,0x02,0x05,0xbf]
+
+s_cmp_le_i32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x05,0xbf]
+
+s_cmp_le_i32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x05,0xbf]
+
+s_cmp_le_i32 0, s2
+// CHECK: [0x80,0x02,0x05,0xbf]
+
+s_cmp_le_i32 -1, s2
+// CHECK: [0xc1,0x02,0x05,0xbf]
+
+s_cmp_le_i32 0.5, s2
+// CHECK: [0xf0,0x02,0x05,0xbf]
+
+s_cmp_le_i32 -4.0, s2
+// CHECK: [0xf7,0x02,0x05,0xbf]
+
+s_cmp_le_i32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x05,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_i32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x05,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_i32 s1, s101
+// CHECK: [0x01,0x65,0x05,0xbf]
+
+s_cmp_le_i32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x05,0xbf]
+
+s_cmp_le_i32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x05,0xbf]
+
+s_cmp_le_i32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x05,0xbf]
+
+s_cmp_le_i32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x05,0xbf]
+
+s_cmp_le_i32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x05,0xbf]
+
+s_cmp_le_i32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x05,0xbf]
+
+s_cmp_le_i32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x05,0xbf]
+
+s_cmp_le_i32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x05,0xbf]
+
+s_cmp_le_i32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x05,0xbf]
+
+s_cmp_le_i32 s1, m0
+// CHECK: [0x01,0x7c,0x05,0xbf]
+
+s_cmp_le_i32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x05,0xbf]
+
+s_cmp_le_i32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x05,0xbf]
+
+s_cmp_le_i32 s1, 0
+// CHECK: [0x01,0x80,0x05,0xbf]
+
+s_cmp_le_i32 s1, -1
+// CHECK: [0x01,0xc1,0x05,0xbf]
+
+s_cmp_le_i32 s1, 0.5
+// CHECK: [0x01,0xf0,0x05,0xbf]
+
+s_cmp_le_i32 s1, -4.0
+// CHECK: [0x01,0xf7,0x05,0xbf]
+
+s_cmp_le_i32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x05,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_i32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x05,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_u32 s1, s2
+// CHECK: [0x01,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 s101, s2
+// CHECK: [0x65,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 m0, s2
+// CHECK: [0x7c,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 0, s2
+// CHECK: [0x80,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 -1, s2
+// CHECK: [0xc1,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x06,0xbf]
+
+s_cmp_eq_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x06,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x06,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_u32 s1, s101
+// CHECK: [0x01,0x65,0x06,0xbf]
+
+s_cmp_eq_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x06,0xbf]
+
+s_cmp_eq_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x06,0xbf]
+
+s_cmp_eq_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x06,0xbf]
+
+s_cmp_eq_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x06,0xbf]
+
+s_cmp_eq_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x06,0xbf]
+
+s_cmp_eq_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x06,0xbf]
+
+s_cmp_eq_u32 s1, m0
+// CHECK: [0x01,0x7c,0x06,0xbf]
+
+s_cmp_eq_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x06,0xbf]
+
+s_cmp_eq_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x06,0xbf]
+
+s_cmp_eq_u32 s1, 0
+// CHECK: [0x01,0x80,0x06,0xbf]
+
+s_cmp_eq_u32 s1, -1
+// CHECK: [0x01,0xc1,0x06,0xbf]
+
+s_cmp_eq_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x06,0xbf]
+
+s_cmp_eq_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x06,0xbf]
+
+s_cmp_eq_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x06,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x06,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_u32 s1, s2
+// CHECK: [0x01,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 s101, s2
+// CHECK: [0x65,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 m0, s2
+// CHECK: [0x7c,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 0, s2
+// CHECK: [0x80,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 -1, s2
+// CHECK: [0xc1,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x07,0xbf]
+
+s_cmp_lg_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x07,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x07,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_u32 s1, s101
+// CHECK: [0x01,0x65,0x07,0xbf]
+
+s_cmp_lg_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x07,0xbf]
+
+s_cmp_lg_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x07,0xbf]
+
+s_cmp_lg_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x07,0xbf]
+
+s_cmp_lg_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x07,0xbf]
+
+s_cmp_lg_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x07,0xbf]
+
+s_cmp_lg_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x07,0xbf]
+
+s_cmp_lg_u32 s1, m0
+// CHECK: [0x01,0x7c,0x07,0xbf]
+
+s_cmp_lg_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x07,0xbf]
+
+s_cmp_lg_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x07,0xbf]
+
+s_cmp_lg_u32 s1, 0
+// CHECK: [0x01,0x80,0x07,0xbf]
+
+s_cmp_lg_u32 s1, -1
+// CHECK: [0x01,0xc1,0x07,0xbf]
+
+s_cmp_lg_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x07,0xbf]
+
+s_cmp_lg_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x07,0xbf]
+
+s_cmp_lg_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x07,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x07,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_u32 s1, s2
+// CHECK: [0x01,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 s101, s2
+// CHECK: [0x65,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 m0, s2
+// CHECK: [0x7c,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 0, s2
+// CHECK: [0x80,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 -1, s2
+// CHECK: [0xc1,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x08,0xbf]
+
+s_cmp_gt_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x08,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x08,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_gt_u32 s1, s101
+// CHECK: [0x01,0x65,0x08,0xbf]
+
+s_cmp_gt_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x08,0xbf]
+
+s_cmp_gt_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x08,0xbf]
+
+s_cmp_gt_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x08,0xbf]
+
+s_cmp_gt_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x08,0xbf]
+
+s_cmp_gt_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x08,0xbf]
+
+s_cmp_gt_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x08,0xbf]
+
+s_cmp_gt_u32 s1, m0
+// CHECK: [0x01,0x7c,0x08,0xbf]
+
+s_cmp_gt_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x08,0xbf]
+
+s_cmp_gt_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x08,0xbf]
+
+s_cmp_gt_u32 s1, 0
+// CHECK: [0x01,0x80,0x08,0xbf]
+
+s_cmp_gt_u32 s1, -1
+// CHECK: [0x01,0xc1,0x08,0xbf]
+
+s_cmp_gt_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x08,0xbf]
+
+s_cmp_gt_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x08,0xbf]
+
+s_cmp_gt_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x08,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_gt_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x08,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_u32 s1, s2
+// CHECK: [0x01,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 s101, s2
+// CHECK: [0x65,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 m0, s2
+// CHECK: [0x7c,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 0, s2
+// CHECK: [0x80,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 -1, s2
+// CHECK: [0xc1,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x09,0xbf]
+
+s_cmp_ge_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x09,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x09,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_ge_u32 s1, s101
+// CHECK: [0x01,0x65,0x09,0xbf]
+
+s_cmp_ge_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x09,0xbf]
+
+s_cmp_ge_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x09,0xbf]
+
+s_cmp_ge_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x09,0xbf]
+
+s_cmp_ge_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x09,0xbf]
+
+s_cmp_ge_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x09,0xbf]
+
+s_cmp_ge_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x09,0xbf]
+
+s_cmp_ge_u32 s1, m0
+// CHECK: [0x01,0x7c,0x09,0xbf]
+
+s_cmp_ge_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x09,0xbf]
+
+s_cmp_ge_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x09,0xbf]
+
+s_cmp_ge_u32 s1, 0
+// CHECK: [0x01,0x80,0x09,0xbf]
+
+s_cmp_ge_u32 s1, -1
+// CHECK: [0x01,0xc1,0x09,0xbf]
+
+s_cmp_ge_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x09,0xbf]
+
+s_cmp_ge_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x09,0xbf]
+
+s_cmp_ge_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x09,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_ge_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x09,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_u32 s1, s2
+// CHECK: [0x01,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 s101, s2
+// CHECK: [0x65,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 m0, s2
+// CHECK: [0x7c,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 0, s2
+// CHECK: [0x80,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 -1, s2
+// CHECK: [0xc1,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0a,0xbf]
+
+s_cmp_lt_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0a,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0a,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lt_u32 s1, s101
+// CHECK: [0x01,0x65,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, m0
+// CHECK: [0x01,0x7c,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, 0
+// CHECK: [0x01,0x80,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, -1
+// CHECK: [0x01,0xc1,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0a,0xbf]
+
+s_cmp_lt_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0a,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lt_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0a,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_u32 s1, s2
+// CHECK: [0x01,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 s101, s2
+// CHECK: [0x65,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 m0, s2
+// CHECK: [0x7c,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 0, s2
+// CHECK: [0x80,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 -1, s2
+// CHECK: [0xc1,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 0.5, s2
+// CHECK: [0xf0,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0b,0xbf]
+
+s_cmp_le_u32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0b,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_u32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0b,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_le_u32 s1, s101
+// CHECK: [0x01,0x65,0x0b,0xbf]
+
+s_cmp_le_u32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x0b,0xbf]
+
+s_cmp_le_u32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x0b,0xbf]
+
+s_cmp_le_u32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0b,0xbf]
+
+s_cmp_le_u32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0b,0xbf]
+
+s_cmp_le_u32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0b,0xbf]
+
+s_cmp_le_u32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0b,0xbf]
+
+s_cmp_le_u32 s1, m0
+// CHECK: [0x01,0x7c,0x0b,0xbf]
+
+s_cmp_le_u32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0b,0xbf]
+
+s_cmp_le_u32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0b,0xbf]
+
+s_cmp_le_u32 s1, 0
+// CHECK: [0x01,0x80,0x0b,0xbf]
+
+s_cmp_le_u32 s1, -1
+// CHECK: [0x01,0xc1,0x0b,0xbf]
+
+s_cmp_le_u32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0b,0xbf]
+
+s_cmp_le_u32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0b,0xbf]
+
+s_cmp_le_u32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0b,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_le_u32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0b,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b32 s1, s2
+// CHECK: [0x01,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 s101, s2
+// CHECK: [0x65,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 m0, s2
+// CHECK: [0x7c,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 0, s2
+// CHECK: [0x80,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 -1, s2
+// CHECK: [0xc1,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 0.5, s2
+// CHECK: [0xf0,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0c,0xbf]
+
+s_bitcmp0_b32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0c,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0c,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b32 s1, s101
+// CHECK: [0x01,0x65,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, m0
+// CHECK: [0x01,0x7c,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, 0
+// CHECK: [0x01,0x80,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, -1
+// CHECK: [0x01,0xc1,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0c,0xbf]
+
+s_bitcmp0_b32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0c,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0c,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b32 s1, s2
+// CHECK: [0x01,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 s101, s2
+// CHECK: [0x65,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 vcc_lo, s2
+// CHECK: [0x6a,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 vcc_hi, s2
+// CHECK: [0x6b,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tba_lo, s2
+// CHECK: [0x6c,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tba_hi, s2
+// CHECK: [0x6d,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tma_lo, s2
+// CHECK: [0x6e,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 tma_hi, s2
+// CHECK: [0x6f,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 ttmp11, s2
+// CHECK: [0x7b,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 m0, s2
+// CHECK: [0x7c,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 exec_lo, s2
+// CHECK: [0x7e,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 exec_hi, s2
+// CHECK: [0x7f,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 0, s2
+// CHECK: [0x80,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 -1, s2
+// CHECK: [0xc1,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 0.5, s2
+// CHECK: [0xf0,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 -4.0, s2
+// CHECK: [0xf7,0x02,0x0d,0xbf]
+
+s_bitcmp1_b32 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0d,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b32 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0d,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b32 s1, s101
+// CHECK: [0x01,0x65,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, vcc_lo
+// CHECK: [0x01,0x6a,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, vcc_hi
+// CHECK: [0x01,0x6b,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tba_lo
+// CHECK: [0x01,0x6c,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tba_hi
+// CHECK: [0x01,0x6d,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tma_lo
+// CHECK: [0x01,0x6e,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, tma_hi
+// CHECK: [0x01,0x6f,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, ttmp11
+// CHECK: [0x01,0x7b,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, m0
+// CHECK: [0x01,0x7c,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, exec_lo
+// CHECK: [0x01,0x7e,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, exec_hi
+// CHECK: [0x01,0x7f,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, 0
+// CHECK: [0x01,0x80,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, -1
+// CHECK: [0x01,0xc1,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, 0.5
+// CHECK: [0x01,0xf0,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, -4.0
+// CHECK: [0x01,0xf7,0x0d,0xbf]
+
+s_bitcmp1_b32 s1, 0xaf123456
+// CHECK: [0x01,0xff,0x0d,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b32 s1, 0x3f717273
+// CHECK: [0x01,0xff,0x0d,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b64 s[2:3], s2
+// CHECK: [0x02,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 s[4:5], s2
+// CHECK: [0x04,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 s[100:101], s2
+// CHECK: [0x64,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 flat_scratch, s2
+// CHECK: [0x66,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 vcc, s2
+// CHECK: [0x6a,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 tba, s2
+// CHECK: [0x6c,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 tma, s2
+// CHECK: [0x6e,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 exec, s2
+// CHECK: [0x7e,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 0, s2
+// CHECK: [0x80,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 -1, s2
+// CHECK: [0xc1,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 0.5, s2
+// CHECK: [0xf0,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 -4.0, s2
+// CHECK: [0xf7,0x02,0x0e,0xbf]
+
+s_bitcmp0_b64 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0e,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b64 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0e,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp0_b64 s[2:3], s101
+// CHECK: [0x02,0x65,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], m0
+// CHECK: [0x02,0x7c,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], 0
+// CHECK: [0x02,0x80,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], -1
+// CHECK: [0x02,0xc1,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x0e,0xbf]
+
+s_bitcmp0_b64 s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x0e,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp0_b64 s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x0e,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b64 s[2:3], s2
+// CHECK: [0x02,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 s[4:5], s2
+// CHECK: [0x04,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 s[100:101], s2
+// CHECK: [0x64,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 flat_scratch, s2
+// CHECK: [0x66,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 vcc, s2
+// CHECK: [0x6a,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 tba, s2
+// CHECK: [0x6c,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 tma, s2
+// CHECK: [0x6e,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 ttmp[10:11], s2
+// CHECK: [0x7a,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 exec, s2
+// CHECK: [0x7e,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 0, s2
+// CHECK: [0x80,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 -1, s2
+// CHECK: [0xc1,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 0.5, s2
+// CHECK: [0xf0,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 -4.0, s2
+// CHECK: [0xf7,0x02,0x0f,0xbf]
+
+s_bitcmp1_b64 0xaf123456, s2
+// CHECK: [0xff,0x02,0x0f,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b64 0x3f717273, s2
+// CHECK: [0xff,0x02,0x0f,0xbf,0x73,0x72,0x71,0x3f]
+
+s_bitcmp1_b64 s[2:3], s101
+// CHECK: [0x02,0x65,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], flat_scratch_lo
+// CHECK: [0x02,0x66,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], flat_scratch_hi
+// CHECK: [0x02,0x67,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], vcc_lo
+// CHECK: [0x02,0x6a,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], vcc_hi
+// CHECK: [0x02,0x6b,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tba_lo
+// CHECK: [0x02,0x6c,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tba_hi
+// CHECK: [0x02,0x6d,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tma_lo
+// CHECK: [0x02,0x6e,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], tma_hi
+// CHECK: [0x02,0x6f,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], ttmp11
+// CHECK: [0x02,0x7b,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], m0
+// CHECK: [0x02,0x7c,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], exec_lo
+// CHECK: [0x02,0x7e,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], exec_hi
+// CHECK: [0x02,0x7f,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], 0
+// CHECK: [0x02,0x80,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], -1
+// CHECK: [0x02,0xc1,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x0f,0xbf]
+
+s_bitcmp1_b64 s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x0f,0xbf,0x56,0x34,0x12,0xaf]
+
+s_bitcmp1_b64 s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x0f,0xbf,0x73,0x72,0x71,0x3f]
+
+s_setvskip s1, s2
+// CHECK: [0x01,0x02,0x10,0xbf]
+
+s_setvskip s101, s2
+// CHECK: [0x65,0x02,0x10,0xbf]
+
+s_setvskip flat_scratch_lo, s2
+// CHECK: [0x66,0x02,0x10,0xbf]
+
+s_setvskip flat_scratch_hi, s2
+// CHECK: [0x67,0x02,0x10,0xbf]
+
+s_setvskip vcc_lo, s2
+// CHECK: [0x6a,0x02,0x10,0xbf]
+
+s_setvskip vcc_hi, s2
+// CHECK: [0x6b,0x02,0x10,0xbf]
+
+s_setvskip tba_lo, s2
+// CHECK: [0x6c,0x02,0x10,0xbf]
+
+s_setvskip tba_hi, s2
+// CHECK: [0x6d,0x02,0x10,0xbf]
+
+s_setvskip tma_lo, s2
+// CHECK: [0x6e,0x02,0x10,0xbf]
+
+s_setvskip tma_hi, s2
+// CHECK: [0x6f,0x02,0x10,0xbf]
+
+s_setvskip ttmp11, s2
+// CHECK: [0x7b,0x02,0x10,0xbf]
+
+s_setvskip m0, s2
+// CHECK: [0x7c,0x02,0x10,0xbf]
+
+s_setvskip exec_lo, s2
+// CHECK: [0x7e,0x02,0x10,0xbf]
+
+s_setvskip exec_hi, s2
+// CHECK: [0x7f,0x02,0x10,0xbf]
+
+s_setvskip 0, s2
+// CHECK: [0x80,0x02,0x10,0xbf]
+
+s_setvskip -1, s2
+// CHECK: [0xc1,0x02,0x10,0xbf]
+
+s_setvskip 0.5, s2
+// CHECK: [0xf0,0x02,0x10,0xbf]
+
+s_setvskip -4.0, s2
+// CHECK: [0xf7,0x02,0x10,0xbf]
+
+s_setvskip 0xaf123456, s2
+// CHECK: [0xff,0x02,0x10,0xbf,0x56,0x34,0x12,0xaf]
+
+s_setvskip 0x3f717273, s2
+// CHECK: [0xff,0x02,0x10,0xbf,0x73,0x72,0x71,0x3f]
+
+s_setvskip s1, s101
+// CHECK: [0x01,0x65,0x10,0xbf]
+
+s_setvskip s1, flat_scratch_lo
+// CHECK: [0x01,0x66,0x10,0xbf]
+
+s_setvskip s1, flat_scratch_hi
+// CHECK: [0x01,0x67,0x10,0xbf]
+
+s_setvskip s1, vcc_lo
+// CHECK: [0x01,0x6a,0x10,0xbf]
+
+s_setvskip s1, vcc_hi
+// CHECK: [0x01,0x6b,0x10,0xbf]
+
+s_setvskip s1, tba_lo
+// CHECK: [0x01,0x6c,0x10,0xbf]
+
+s_setvskip s1, tba_hi
+// CHECK: [0x01,0x6d,0x10,0xbf]
+
+s_setvskip s1, tma_lo
+// CHECK: [0x01,0x6e,0x10,0xbf]
+
+s_setvskip s1, tma_hi
+// CHECK: [0x01,0x6f,0x10,0xbf]
+
+s_setvskip s1, ttmp11
+// CHECK: [0x01,0x7b,0x10,0xbf]
+
+s_setvskip s1, m0
+// CHECK: [0x01,0x7c,0x10,0xbf]
+
+s_setvskip s1, exec_lo
+// CHECK: [0x01,0x7e,0x10,0xbf]
+
+s_setvskip s1, exec_hi
+// CHECK: [0x01,0x7f,0x10,0xbf]
+
+s_setvskip s1, 0
+// CHECK: [0x01,0x80,0x10,0xbf]
+
+s_setvskip s1, -1
+// CHECK: [0x01,0xc1,0x10,0xbf]
+
+s_setvskip s1, 0.5
+// CHECK: [0x01,0xf0,0x10,0xbf]
+
+s_setvskip s1, -4.0
+// CHECK: [0x01,0xf7,0x10,0xbf]
+
+s_setvskip s1, 0xaf123456
+// CHECK: [0x01,0xff,0x10,0xbf,0x56,0x34,0x12,0xaf]
+
+s_setvskip s1, 0x3f717273
+// CHECK: [0x01,0xff,0x10,0xbf,0x73,0x72,0x71,0x3f]
+
+s_set_gpr_idx_on s1, 0x0
+// CHECK: [0x01,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on s101, 0x0
+// CHECK: [0x65,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on flat_scratch_lo, 0x0
+// CHECK: [0x66,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on flat_scratch_hi, 0x0
+// CHECK: [0x67,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on vcc_lo, 0x0
+// CHECK: [0x6a,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on vcc_hi, 0x0
+// CHECK: [0x6b,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on tba_lo, 0x0
+// CHECK: [0x6c,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on tba_hi, 0x0
+// CHECK: [0x6d,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on tma_lo, 0x0
+// CHECK: [0x6e,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on tma_hi, 0x0
+// CHECK: [0x6f,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on ttmp11, 0x0
+// CHECK: [0x7b,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on m0, 0x0
+// CHECK: [0x7c,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on 0, 0x0
+// CHECK: [0x80,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on -1, 0x0
+// CHECK: [0xc1,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on 0.5, 0x0
+// CHECK: [0xf0,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on -4.0, 0x0
+// CHECK: [0xf7,0x00,0x11,0xbf]
+
+s_set_gpr_idx_on 0xaf123456, 0x0
+// CHECK: [0xff,0x00,0x11,0xbf,0x56,0x34,0x12,0xaf]
+
+s_set_gpr_idx_on 0x3f717273, 0x0
+// CHECK: [0xff,0x00,0x11,0xbf,0x73,0x72,0x71,0x3f]
+
+s_set_gpr_idx_on s1, 0x1
+// CHECK: [0x01,0x01,0x11,0xbf]
+
+s_set_gpr_idx_on s1, 0xF
+// CHECK: [0x01,0x0f,0x11,0xbf]
+
+s_cmp_eq_u64 s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 tba, s[4:5]
+// CHECK: [0x6c,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 tma, s[4:5]
+// CHECK: [0x6e,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 exec, s[4:5]
+// CHECK: [0x7e,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 0, s[4:5]
+// CHECK: [0x80,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 -1, s[4:5]
+// CHECK: [0xc1,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x12,0xbf]
+
+s_cmp_eq_u64 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x12,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_u64 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x12,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_eq_u64 s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], vcc
+// CHECK: [0x02,0x6a,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], tba
+// CHECK: [0x02,0x6c,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], tma
+// CHECK: [0x02,0x6e,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], exec
+// CHECK: [0x02,0x7e,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], 0
+// CHECK: [0x02,0x80,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], -1
+// CHECK: [0x02,0xc1,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x12,0xbf]
+
+s_cmp_eq_u64 s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x12,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_eq_u64 s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x12,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_u64 s[2:3], s[4:5]
+// CHECK: [0x02,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 s[4:5], s[4:5]
+// CHECK: [0x04,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 s[100:101], s[4:5]
+// CHECK: [0x64,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 flat_scratch, s[4:5]
+// CHECK: [0x66,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 vcc, s[4:5]
+// CHECK: [0x6a,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 tba, s[4:5]
+// CHECK: [0x6c,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 tma, s[4:5]
+// CHECK: [0x6e,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 ttmp[10:11], s[4:5]
+// CHECK: [0x7a,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 exec, s[4:5]
+// CHECK: [0x7e,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 0, s[4:5]
+// CHECK: [0x80,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 -1, s[4:5]
+// CHECK: [0xc1,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 0.5, s[4:5]
+// CHECK: [0xf0,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 -4.0, s[4:5]
+// CHECK: [0xf7,0x04,0x13,0xbf]
+
+s_cmp_lg_u64 0xaf123456, s[4:5]
+// CHECK: [0xff,0x04,0x13,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_u64 0x3f717273, s[4:5]
+// CHECK: [0xff,0x04,0x13,0xbf,0x73,0x72,0x71,0x3f]
+
+s_cmp_lg_u64 s[2:3], s[6:7]
+// CHECK: [0x02,0x06,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], s[100:101]
+// CHECK: [0x02,0x64,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], flat_scratch
+// CHECK: [0x02,0x66,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], vcc
+// CHECK: [0x02,0x6a,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], tba
+// CHECK: [0x02,0x6c,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], tma
+// CHECK: [0x02,0x6e,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], ttmp[10:11]
+// CHECK: [0x02,0x7a,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], exec
+// CHECK: [0x02,0x7e,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], 0
+// CHECK: [0x02,0x80,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], -1
+// CHECK: [0x02,0xc1,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], 0.5
+// CHECK: [0x02,0xf0,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], -4.0
+// CHECK: [0x02,0xf7,0x13,0xbf]
+
+s_cmp_lg_u64 s[2:3], 0xaf123456
+// CHECK: [0x02,0xff,0x13,0xbf,0x56,0x34,0x12,0xaf]
+
+s_cmp_lg_u64 s[2:3], 0x3f717273
+// CHECK: [0x02,0xff,0x13,0xbf,0x73,0x72,0x71,0x3f]
+
+s_movk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x05,0xb0]
+
+s_movk_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb0]
+
+s_movk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb0]
+
+s_movk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb0]
+
+s_movk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb0]
+
+s_movk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb0]
+
+s_movk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb0]
+
+s_movk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb0]
+
+s_movk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb0]
+
+s_movk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb0]
+
+s_movk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb0]
+
+s_movk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb0]
+
+s_movk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb0]
+
+s_movk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb0]
+
+s_movk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x05,0xb0]
+
+s_cmovk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x85,0xb0]
+
+s_cmovk_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb0]
+
+s_cmovk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb0]
+
+s_cmovk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb0]
+
+s_cmovk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb0]
+
+s_cmovk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb0]
+
+s_cmovk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb0]
+
+s_cmovk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb0]
+
+s_cmovk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb0]
+
+s_cmovk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb0]
+
+s_cmovk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb0]
+
+s_cmovk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb0]
+
+s_cmovk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb0]
+
+s_cmovk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb0]
+
+s_cmovk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x85,0xb0]
+
+s_cmpk_eq_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb1]
+
+s_cmpk_eq_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb1]
+
+s_cmpk_eq_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb1]
+
+s_cmpk_eq_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb1]
+
+s_cmpk_eq_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb1]
+
+s_cmpk_eq_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb1]
+
+s_cmpk_eq_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb1]
+
+s_cmpk_eq_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb1]
+
+s_cmpk_eq_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb1]
+
+s_cmpk_eq_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb1]
+
+s_cmpk_eq_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb1]
+
+s_cmpk_eq_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb1]
+
+s_cmpk_eq_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb1]
+
+s_cmpk_eq_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb1]
+
+s_cmpk_eq_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb1]
+
+s_cmpk_lg_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb1]
+
+s_cmpk_lg_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb1]
+
+s_cmpk_lg_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb1]
+
+s_cmpk_lg_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb1]
+
+s_cmpk_lg_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb1]
+
+s_cmpk_lg_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb1]
+
+s_cmpk_lg_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb1]
+
+s_cmpk_lg_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb1]
+
+s_cmpk_lg_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb1]
+
+s_cmpk_lg_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb1]
+
+s_cmpk_lg_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb1]
+
+s_cmpk_lg_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb1]
+
+s_cmpk_lg_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb1]
+
+s_cmpk_lg_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb1]
+
+s_cmpk_lg_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb1]
+
+s_cmpk_gt_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb2]
+
+s_cmpk_gt_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb2]
+
+s_cmpk_gt_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb2]
+
+s_cmpk_gt_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb2]
+
+s_cmpk_gt_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb2]
+
+s_cmpk_gt_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb2]
+
+s_cmpk_gt_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb2]
+
+s_cmpk_gt_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb2]
+
+s_cmpk_gt_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb2]
+
+s_cmpk_gt_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb2]
+
+s_cmpk_gt_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb2]
+
+s_cmpk_gt_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb2]
+
+s_cmpk_gt_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb2]
+
+s_cmpk_gt_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb2]
+
+s_cmpk_gt_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb2]
+
+s_cmpk_ge_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb2]
+
+s_cmpk_ge_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb2]
+
+s_cmpk_ge_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb2]
+
+s_cmpk_ge_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb2]
+
+s_cmpk_ge_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb2]
+
+s_cmpk_ge_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb2]
+
+s_cmpk_ge_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb2]
+
+s_cmpk_ge_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb2]
+
+s_cmpk_ge_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb2]
+
+s_cmpk_ge_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb2]
+
+s_cmpk_ge_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb2]
+
+s_cmpk_ge_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb2]
+
+s_cmpk_ge_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb2]
+
+s_cmpk_ge_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb2]
+
+s_cmpk_ge_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb2]
+
+s_cmpk_lt_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb3]
+
+s_cmpk_lt_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb3]
+
+s_cmpk_lt_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb3]
+
+s_cmpk_lt_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb3]
+
+s_cmpk_lt_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb3]
+
+s_cmpk_lt_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb3]
+
+s_cmpk_lt_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb3]
+
+s_cmpk_lt_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb3]
+
+s_cmpk_lt_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb3]
+
+s_cmpk_lt_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb3]
+
+s_cmpk_lt_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb3]
+
+s_cmpk_lt_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb3]
+
+s_cmpk_lt_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb3]
+
+s_cmpk_lt_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb3]
+
+s_cmpk_lt_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb3]
+
+s_cmpk_le_i32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb3]
+
+s_cmpk_le_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb3]
+
+s_cmpk_le_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb3]
+
+s_cmpk_le_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb3]
+
+s_cmpk_le_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb3]
+
+s_cmpk_le_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb3]
+
+s_cmpk_le_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb3]
+
+s_cmpk_le_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb3]
+
+s_cmpk_le_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb3]
+
+s_cmpk_le_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb3]
+
+s_cmpk_le_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb3]
+
+s_cmpk_le_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb3]
+
+s_cmpk_le_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb3]
+
+s_cmpk_le_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb3]
+
+s_cmpk_le_i32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb3]
+
+s_cmpk_eq_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb4]
+
+s_cmpk_eq_u32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb4]
+
+s_cmpk_eq_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb4]
+
+s_cmpk_eq_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb4]
+
+s_cmpk_eq_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb4]
+
+s_cmpk_eq_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb4]
+
+s_cmpk_eq_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb4]
+
+s_cmpk_eq_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb4]
+
+s_cmpk_eq_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb4]
+
+s_cmpk_eq_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb4]
+
+s_cmpk_eq_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb4]
+
+s_cmpk_eq_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb4]
+
+s_cmpk_eq_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb4]
+
+s_cmpk_eq_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb4]
+
+s_cmpk_eq_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb4]
+
+s_cmpk_lg_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb4]
+
+s_cmpk_lg_u32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb4]
+
+s_cmpk_lg_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb4]
+
+s_cmpk_lg_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb4]
+
+s_cmpk_lg_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb4]
+
+s_cmpk_lg_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb4]
+
+s_cmpk_lg_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb4]
+
+s_cmpk_lg_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb4]
+
+s_cmpk_lg_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb4]
+
+s_cmpk_lg_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb4]
+
+s_cmpk_lg_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb4]
+
+s_cmpk_lg_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb4]
+
+s_cmpk_lg_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb4]
+
+s_cmpk_lg_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb4]
+
+s_cmpk_lg_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb4]
+
+s_cmpk_gt_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb5]
+
+s_cmpk_gt_u32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb5]
+
+s_cmpk_gt_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb5]
+
+s_cmpk_gt_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb5]
+
+s_cmpk_gt_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb5]
+
+s_cmpk_gt_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb5]
+
+s_cmpk_gt_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb5]
+
+s_cmpk_gt_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb5]
+
+s_cmpk_gt_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb5]
+
+s_cmpk_gt_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb5]
+
+s_cmpk_gt_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb5]
+
+s_cmpk_gt_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb5]
+
+s_cmpk_gt_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb5]
+
+s_cmpk_gt_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb5]
+
+s_cmpk_gt_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb5]
+
+s_cmpk_ge_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb5]
+
+s_cmpk_ge_u32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb5]
+
+s_cmpk_ge_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb5]
+
+s_cmpk_ge_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb5]
+
+s_cmpk_ge_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb5]
+
+s_cmpk_ge_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb5]
+
+s_cmpk_ge_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb5]
+
+s_cmpk_ge_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb5]
+
+s_cmpk_ge_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb5]
+
+s_cmpk_ge_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb5]
+
+s_cmpk_ge_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb5]
+
+s_cmpk_ge_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb5]
+
+s_cmpk_ge_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb5]
+
+s_cmpk_ge_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb5]
+
+s_cmpk_ge_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb5]
+
+s_cmpk_lt_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x01,0xb6]
+
+s_cmpk_lt_u32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb6]
+
+s_cmpk_lt_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb6]
+
+s_cmpk_lt_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb6]
+
+s_cmpk_lt_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb6]
+
+s_cmpk_lt_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb6]
+
+s_cmpk_lt_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb6]
+
+s_cmpk_lt_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb6]
+
+s_cmpk_lt_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb6]
+
+s_cmpk_lt_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb6]
+
+s_cmpk_lt_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb6]
+
+s_cmpk_lt_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb6]
+
+s_cmpk_lt_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb6]
+
+s_cmpk_lt_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb6]
+
+s_cmpk_lt_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x01,0xb6]
+
+s_cmpk_le_u32 s1, 0x3141
+// CHECK: [0x41,0x31,0x81,0xb6]
+
+s_cmpk_le_u32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb6]
+
+s_cmpk_le_u32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb6]
+
+s_cmpk_le_u32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb6]
+
+s_cmpk_le_u32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb6]
+
+s_cmpk_le_u32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb6]
+
+s_cmpk_le_u32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb6]
+
+s_cmpk_le_u32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb6]
+
+s_cmpk_le_u32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb6]
+
+s_cmpk_le_u32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb6]
+
+s_cmpk_le_u32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb6]
+
+s_cmpk_le_u32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb6]
+
+s_cmpk_le_u32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb6]
+
+s_cmpk_le_u32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb6]
+
+s_cmpk_le_u32 s1, 0xc1d1
+// CHECK: [0xd1,0xc1,0x81,0xb6]
+
+s_addk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x05,0xb7]
+
+s_addk_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0x65,0xb7]
+
+s_addk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0x66,0xb7]
+
+s_addk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0x67,0xb7]
+
+s_addk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0x6a,0xb7]
+
+s_addk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0x6b,0xb7]
+
+s_addk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0x6c,0xb7]
+
+s_addk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0x6d,0xb7]
+
+s_addk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0x6e,0xb7]
+
+s_addk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0x6f,0xb7]
+
+s_addk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0x7b,0xb7]
+
+s_addk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0x7c,0xb7]
+
+s_addk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0x7e,0xb7]
+
+s_addk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0x7f,0xb7]
+
+s_addk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x05,0xb7]
+
+s_mulk_i32 s5, 0x3141
+// CHECK: [0x41,0x31,0x85,0xb7]
+
+s_mulk_i32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb7]
+
+s_mulk_i32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb7]
+
+s_mulk_i32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb7]
+
+s_mulk_i32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb7]
+
+s_mulk_i32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb7]
+
+s_mulk_i32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb7]
+
+s_mulk_i32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb7]
+
+s_mulk_i32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb7]
+
+s_mulk_i32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb7]
+
+s_mulk_i32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb7]
+
+s_mulk_i32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb7]
+
+s_mulk_i32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb7]
+
+s_mulk_i32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb7]
+
+s_mulk_i32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x85,0xb7]
+
+s_cbranch_i_fork s[2:3], 12609
+// CHECK: [0x41,0x31,0x02,0xb8]
+
+s_cbranch_i_fork s[4:5], 12609
+// CHECK: [0x41,0x31,0x04,0xb8]
+
+s_cbranch_i_fork s[100:101], 12609
+// CHECK: [0x41,0x31,0x64,0xb8]
+
+s_cbranch_i_fork flat_scratch, 12609
+// CHECK: [0x41,0x31,0x66,0xb8]
+
+s_cbranch_i_fork vcc, 12609
+// CHECK: [0x41,0x31,0x6a,0xb8]
+
+s_cbranch_i_fork tba, 12609
+// CHECK: [0x41,0x31,0x6c,0xb8]
+
+s_cbranch_i_fork tma, 12609
+// CHECK: [0x41,0x31,0x6e,0xb8]
+
+s_cbranch_i_fork ttmp[10:11], 12609
+// CHECK: [0x41,0x31,0x7a,0xb8]
+
+s_cbranch_i_fork exec, 12609
+// CHECK: [0x41,0x31,0x7e,0xb8]
+
+s_cbranch_i_fork s[2:3], 49617
+// CHECK: [0xd1,0xc1,0x02,0xb8]
+
+s_getreg_b32 s5, 0x3141
+// CHECK: [0x41,0x31,0x85,0xb8]
+
+s_getreg_b32 s101, 0x3141
+// CHECK: [0x41,0x31,0xe5,0xb8]
+
+s_getreg_b32 flat_scratch_lo, 0x3141
+// CHECK: [0x41,0x31,0xe6,0xb8]
+
+s_getreg_b32 flat_scratch_hi, 0x3141
+// CHECK: [0x41,0x31,0xe7,0xb8]
+
+s_getreg_b32 vcc_lo, 0x3141
+// CHECK: [0x41,0x31,0xea,0xb8]
+
+s_getreg_b32 vcc_hi, 0x3141
+// CHECK: [0x41,0x31,0xeb,0xb8]
+
+s_getreg_b32 tba_lo, 0x3141
+// CHECK: [0x41,0x31,0xec,0xb8]
+
+s_getreg_b32 tba_hi, 0x3141
+// CHECK: [0x41,0x31,0xed,0xb8]
+
+s_getreg_b32 tma_lo, 0x3141
+// CHECK: [0x41,0x31,0xee,0xb8]
+
+s_getreg_b32 tma_hi, 0x3141
+// CHECK: [0x41,0x31,0xef,0xb8]
+
+s_getreg_b32 ttmp11, 0x3141
+// CHECK: [0x41,0x31,0xfb,0xb8]
+
+s_getreg_b32 m0, 0x3141
+// CHECK: [0x41,0x31,0xfc,0xb8]
+
+s_getreg_b32 exec_lo, 0x3141
+// CHECK: [0x41,0x31,0xfe,0xb8]
+
+s_getreg_b32 exec_hi, 0x3141
+// CHECK: [0x41,0x31,0xff,0xb8]
+
+s_getreg_b32 s5, 0xc1d1
+// CHECK: [0xd1,0xc1,0x85,0xb8]
+
+s_setreg_b32 0x3141, s1
+// CHECK: [0x41,0x31,0x01,0xb9]
+
+s_setreg_b32 0xc1d1, s1
+// CHECK: [0xd1,0xc1,0x01,0xb9]
+
+s_setreg_b32 0x3141, s101
+// CHECK: [0x41,0x31,0x65,0xb9]
+
+s_setreg_b32 0x3141, flat_scratch_lo
+// CHECK: [0x41,0x31,0x66,0xb9]
+
+s_setreg_b32 0x3141, flat_scratch_hi
+// CHECK: [0x41,0x31,0x67,0xb9]
+
+s_setreg_b32 0x3141, vcc_lo
+// CHECK: [0x41,0x31,0x6a,0xb9]
+
+s_setreg_b32 0x3141, vcc_hi
+// CHECK: [0x41,0x31,0x6b,0xb9]
+
+s_setreg_b32 0x3141, tba_lo
+// CHECK: [0x41,0x31,0x6c,0xb9]
+
+s_setreg_b32 0x3141, tba_hi
+// CHECK: [0x41,0x31,0x6d,0xb9]
+
+s_setreg_b32 0x3141, tma_lo
+// CHECK: [0x41,0x31,0x6e,0xb9]
+
+s_setreg_b32 0x3141, tma_hi
+// CHECK: [0x41,0x31,0x6f,0xb9]
+
+s_setreg_b32 0x3141, ttmp11
+// CHECK: [0x41,0x31,0x7b,0xb9]
+
+s_setreg_b32 0x3141, m0
+// CHECK: [0x41,0x31,0x7c,0xb9]
+
+s_setreg_b32 0x3141, exec_lo
+// CHECK: [0x41,0x31,0x7e,0xb9]
+
+s_setreg_b32 0x3141, exec_hi
+// CHECK: [0x41,0x31,0x7f,0xb9]
+
+s_setreg_imm32_b32 0x3141, 0x11213141
+// CHECK: [0x41,0x31,0x00,0xba,0x41,0x31,0x21,0x11]
+
+s_setreg_imm32_b32 0xc1d1, 0x11213141
+// CHECK: [0xd1,0xc1,0x00,0xba,0x41,0x31,0x21,0x11]
+
+s_setreg_imm32_b32 0x3141, 0xa1b1c1d1
+// CHECK: [0x41,0x31,0x00,0xba,0xd1,0xc1,0xb1,0xa1]
+
+s_nop 0x3141
+// CHECK: [0x41,0x31,0x80,0xbf]
+
+s_nop 0xc1d1
+// CHECK: [0xd1,0xc1,0x80,0xbf]
+
+s_endpgm
+// CHECK: [0x00,0x00,0x81,0xbf]
+
+s_branch 12609
+// CHECK: [0x41,0x31,0x82,0xbf]
+
+s_branch 49617
+// CHECK: [0xd1,0xc1,0x82,0xbf]
+
+s_cbranch_scc0 12609
+// CHECK: [0x41,0x31,0x84,0xbf]
+
+s_cbranch_scc0 49617
+// CHECK: [0xd1,0xc1,0x84,0xbf]
+
+s_cbranch_scc1 12609
+// CHECK: [0x41,0x31,0x85,0xbf]
+
+s_cbranch_scc1 49617
+// CHECK: [0xd1,0xc1,0x85,0xbf]
+
+s_cbranch_vccz 12609
+// CHECK: [0x41,0x31,0x86,0xbf]
+
+s_cbranch_vccz 49617
+// CHECK: [0xd1,0xc1,0x86,0xbf]
+
+s_cbranch_vccnz 12609
+// CHECK: [0x41,0x31,0x87,0xbf]
+
+s_cbranch_vccnz 49617
+// CHECK: [0xd1,0xc1,0x87,0xbf]
+
+s_cbranch_execz 12609
+// CHECK: [0x41,0x31,0x88,0xbf]
+
+s_cbranch_execz 49617
+// CHECK: [0xd1,0xc1,0x88,0xbf]
+
+s_cbranch_execnz 12609
+// CHECK: [0x41,0x31,0x89,0xbf]
+
+s_cbranch_execnz 49617
+// CHECK: [0xd1,0xc1,0x89,0xbf]
+
+s_barrier
+// CHECK: [0x00,0x00,0x8a,0xbf]
+
+s_waitcnt 0x3141
+// CHECK: [0x41,0x31,0x8c,0xbf]
+
+s_waitcnt 0xc1d1
+// CHECK: [0xd1,0xc1,0x8c,0xbf]
+
+s_sethalt 0x3141
+// CHECK: [0x41,0x31,0x8d,0xbf]
+
+s_sethalt 0xc1d1
+// CHECK: [0xd1,0xc1,0x8d,0xbf]
+
+s_sleep 0x3141
+// CHECK: [0x41,0x31,0x8e,0xbf]
+
+s_sleep 0xc1d1
+// CHECK: [0xd1,0xc1,0x8e,0xbf]
+
+s_setprio 0x3141
+// CHECK: [0x41,0x31,0x8f,0xbf]
+
+s_setprio 0xc1d1
+// CHECK: [0xd1,0xc1,0x8f,0xbf]
+
+s_sendmsg 0x3141
+// CHECK: [0x41,0x31,0x90,0xbf]
+
+s_sendmsg 0xc1d1
+// CHECK: [0xd1,0xc1,0x90,0xbf]
+
+s_sendmsghalt 0x3141
+// CHECK: [0x41,0x31,0x91,0xbf]
+
+s_sendmsghalt 0xc1d1
+// CHECK: [0xd1,0xc1,0x91,0xbf]
+
+s_trap 0x3141
+// CHECK: [0x41,0x31,0x92,0xbf]
+
+s_trap 0xc1d1
+// CHECK: [0xd1,0xc1,0x92,0xbf]
+
+s_icache_inv
+// CHECK: [0x00,0x00,0x93,0xbf]
+
+s_incperflevel 0x3141
+// CHECK: [0x41,0x31,0x94,0xbf]
+
+s_incperflevel 0xc1d1
+// CHECK: [0xd1,0xc1,0x94,0xbf]
+
+s_decperflevel 0x3141
+// CHECK: [0x41,0x31,0x95,0xbf]
+
+s_decperflevel 0xc1d1
+// CHECK: [0xd1,0xc1,0x95,0xbf]
+
+s_ttracedata
+// CHECK: [0x00,0x00,0x96,0xbf]
+
+s_set_gpr_idx_off
+// CHECK: [0x00,0x00,0x9c,0xbf]
+
+s_set_gpr_idx_mode 0x0
+// CHECK: [0x00,0x00,0x9d,0xbf]
+
+s_set_gpr_idx_mode 0x1
+// CHECK: [0x01,0x00,0x9d,0xbf]
+
+s_set_gpr_idx_mode 0xF
+// CHECK: [0x0f,0x00,0x9d,0xbf]
+
+v_interp_p1_f32 v5, v1, attr0.x
+// CHECK: [0x01,0x00,0x14,0xd4]
+
+v_interp_p1_f32 v255, v1, attr0.x
+// CHECK: [0x01,0x00,0xfc,0xd7]
+
+v_interp_p1_f32 v5, v255, attr0.x
+// CHECK: [0xff,0x00,0x14,0xd4]
+
+v_interp_p1_f32 v5, v1, attr1.x
+// CHECK: [0x01,0x04,0x14,0xd4]
+
+v_interp_p1_f32 v5, v1, attr31.x
+// CHECK: [0x01,0x7c,0x14,0xd4]
+
+v_interp_p1_f32 v5, v1, attr32.x
+// CHECK: [0x01,0x80,0x14,0xd4]
+
+v_interp_p1_f32 v5, v1, attr0.y
+// CHECK: [0x01,0x01,0x14,0xd4]
+
+v_interp_p1_f32 v5, v1, attr0.z
+// CHECK: [0x01,0x02,0x14,0xd4]
+
+v_interp_p1_f32 v5, v1, attr0.w
+// CHECK: [0x01,0x03,0x14,0xd4]
+
+v_interp_p2_f32 v5, v1, attr0.x
+// CHECK: [0x01,0x00,0x15,0xd4]
+
+v_interp_p2_f32 v255, v1, attr0.x
+// CHECK: [0x01,0x00,0xfd,0xd7]
+
+v_interp_p2_f32 v5, v255, attr0.x
+// CHECK: [0xff,0x00,0x15,0xd4]
+
+v_interp_p2_f32 v5, v1, attr1.x
+// CHECK: [0x01,0x04,0x15,0xd4]
+
+v_interp_p2_f32 v5, v1, attr31.x
+// CHECK: [0x01,0x7c,0x15,0xd4]
+
+v_interp_p2_f32 v5, v1, attr32.x
+// CHECK: [0x01,0x80,0x15,0xd4]
+
+v_interp_p2_f32 v5, v1, attr0.y
+// CHECK: [0x01,0x01,0x15,0xd4]
+
+v_interp_p2_f32 v5, v1, attr0.z
+// CHECK: [0x01,0x02,0x15,0xd4]
+
+v_interp_p2_f32 v5, v1, attr0.w
+// CHECK: [0x01,0x03,0x15,0xd4]
+
+v_interp_mov_f32 v5, p10, attr0.x
+// CHECK: [0x00,0x00,0x16,0xd4]
+
+v_interp_mov_f32 v255, p10, attr0.x
+// CHECK: [0x00,0x00,0xfe,0xd7]
+
+v_interp_mov_f32 v5, p20, attr0.x
+// CHECK: [0x01,0x00,0x16,0xd4]
+
+v_interp_mov_f32 v5, p0, attr0.x
+// CHECK: [0x02,0x00,0x16,0xd4]
+
+v_interp_mov_f32 v5, p10, attr1.x
+// CHECK: [0x00,0x04,0x16,0xd4]
+
+v_interp_mov_f32 v5, p10, attr31.x
+// CHECK: [0x00,0x7c,0x16,0xd4]
+
+v_interp_mov_f32 v5, p10, attr32.x
+// CHECK: [0x00,0x80,0x16,0xd4]
+
+v_interp_mov_f32 v5, p10, attr0.y
+// CHECK: [0x00,0x01,0x16,0xd4]
+
+v_interp_mov_f32 v5, p10, attr0.z
+// CHECK: [0x00,0x02,0x16,0xd4]
+
+v_interp_mov_f32 v5, p10, attr0.w
+// CHECK: [0x00,0x03,0x16,0xd4]
+
+v_nop
+// CHECK: [0x00,0x00,0x00,0x7e]
+
+v_nop_e64
+// CHECK: [0x00,0x00,0x40,0xd1,0x00,0x00,0x00,0x00]
+
+v_mov_b32 v5, s1
+// CHECK: [0x01,0x02,0x0a,0x7e]
+
+v_mov_b32 v255, s1
+// CHECK: [0x01,0x02,0xfe,0x7f]
+
+v_mov_b32 v5, s101
+// CHECK: [0x65,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, flat_scratch_lo
+// CHECK: [0x66,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, flat_scratch_hi
+// CHECK: [0x67,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, vcc_lo
+// CHECK: [0x6a,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, vcc_hi
+// CHECK: [0x6b,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tba_lo
+// CHECK: [0x6c,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tba_hi
+// CHECK: [0x6d,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tma_lo
+// CHECK: [0x6e,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, tma_hi
+// CHECK: [0x6f,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, ttmp11
+// CHECK: [0x7b,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, m0
+// CHECK: [0x7c,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, exec_lo
+// CHECK: [0x7e,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, exec_hi
+// CHECK: [0x7f,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, 0
+// CHECK: [0x80,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, -1
+// CHECK: [0xc1,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, 0.5
+// CHECK: [0xf0,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, -4.0
+// CHECK: [0xf7,0x02,0x0a,0x7e]
+
+v_mov_b32 v5, 0xaf123456
+// CHECK: [0xff,0x02,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_mov_b32 v5, 0x3f717273
+// CHECK: [0xff,0x02,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_mov_b32 v5, v1
+// CHECK: [0x01,0x03,0x0a,0x7e]
+
+v_mov_b32 v5, v255
+// CHECK: [0xff,0x03,0x0a,0x7e]
+
+v_mov_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x41,0xd1,0x01,0x00,0x00,0x00]
+
+v_mov_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x41,0xd1,0x01,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, s101
+// CHECK: [0x05,0x00,0x41,0xd1,0x65,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x41,0xd1,0x66,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x41,0xd1,0x67,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x41,0xd1,0x6a,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x41,0xd1,0x6b,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x41,0xd1,0x6c,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x41,0xd1,0x6d,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x41,0xd1,0x6e,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x41,0xd1,0x6f,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x41,0xd1,0x7b,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x41,0xd1,0x7c,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x41,0xd1,0x7e,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x41,0xd1,0x7f,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x41,0xd1,0x80,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x41,0xd1,0xc1,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x41,0xd1,0xf0,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x41,0xd1,0xf7,0x00,0x00,0x00]
+
+v_mov_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x41,0xd1,0x01,0x01,0x00,0x00]
+
+v_mov_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x41,0xd1,0xff,0x01,0x00,0x00]
+
+v_readfirstlane_b32 s5, v1
+// CHECK: [0x01,0x05,0x0a,0x7e]
+
+v_readfirstlane_b32 s101, v1
+// CHECK: [0x01,0x05,0xca,0x7e]
+
+v_readfirstlane_b32 flat_scratch_lo, v1
+// CHECK: [0x01,0x05,0xcc,0x7e]
+
+v_readfirstlane_b32 flat_scratch_hi, v1
+// CHECK: [0x01,0x05,0xce,0x7e]
+
+v_readfirstlane_b32 tba_lo, v1
+// CHECK: [0x01,0x05,0xd8,0x7e]
+
+v_readfirstlane_b32 tba_hi, v1
+// CHECK: [0x01,0x05,0xda,0x7e]
+
+v_readfirstlane_b32 tma_lo, v1
+// CHECK: [0x01,0x05,0xdc,0x7e]
+
+v_readfirstlane_b32 tma_hi, v1
+// CHECK: [0x01,0x05,0xde,0x7e]
+
+v_readfirstlane_b32 ttmp11, v1
+// CHECK: [0x01,0x05,0xf6,0x7e]
+
+v_readfirstlane_b32 s5, v255
+// CHECK: [0xff,0x05,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, s[2:3]
+// CHECK: [0x02,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v255, s[2:3]
+// CHECK: [0x02,0x06,0xfe,0x7f]
+
+v_cvt_i32_f64 v5, s[4:5]
+// CHECK: [0x04,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, s[100:101]
+// CHECK: [0x64,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, flat_scratch
+// CHECK: [0x66,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, vcc
+// CHECK: [0x6a,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, tba
+// CHECK: [0x6c,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, tma
+// CHECK: [0x6e,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, exec
+// CHECK: [0x7e,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, 0
+// CHECK: [0x80,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, -1
+// CHECK: [0xc1,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, 0.5
+// CHECK: [0xf0,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, -4.0
+// CHECK: [0xf7,0x06,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x06,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_i32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x06,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_i32_f64 v5, v[1:2]
+// CHECK: [0x01,0x07,0x0a,0x7e]
+
+v_cvt_i32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x07,0x0a,0x7e]
+
+v_cvt_i32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x43,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x43,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x43,0xd1,0x04,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, s[100:101]
+// CHECK: [0x05,0x00,0x43,0xd1,0x64,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x43,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x43,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x43,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x43,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x43,0xd1,0x7a,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x43,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x43,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x43,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x43,0xd1,0xfe,0x01,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x43,0xd1,0x02,0x00,0x00,0x20]
+
+v_cvt_i32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x43,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_i32_f64_e64 v5, s[2:3] clamp
+// CHECK: [0x05,0x80,0x43,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_f64_i32 v[5:6], s1
+// CHECK: [0x01,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[254:255], s1
+// CHECK: [0x01,0x08,0xfc,0x7f]
+
+v_cvt_f64_i32 v[5:6], s101
+// CHECK: [0x65,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], flat_scratch_lo
+// CHECK: [0x66,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], flat_scratch_hi
+// CHECK: [0x67,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], vcc_lo
+// CHECK: [0x6a,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], vcc_hi
+// CHECK: [0x6b,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tba_lo
+// CHECK: [0x6c,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tba_hi
+// CHECK: [0x6d,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tma_lo
+// CHECK: [0x6e,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], tma_hi
+// CHECK: [0x6f,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], ttmp11
+// CHECK: [0x7b,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], m0
+// CHECK: [0x7c,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], exec_lo
+// CHECK: [0x7e,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], exec_hi
+// CHECK: [0x7f,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], 0
+// CHECK: [0x80,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], -1
+// CHECK: [0xc1,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], 0.5
+// CHECK: [0xf0,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], -4.0
+// CHECK: [0xf7,0x08,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], 0xaf123456
+// CHECK: [0xff,0x08,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f64_i32 v[5:6], 0x3f717273
+// CHECK: [0xff,0x08,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f64_i32 v[5:6], v1
+// CHECK: [0x01,0x09,0x0a,0x7e]
+
+v_cvt_f64_i32 v[5:6], v255
+// CHECK: [0xff,0x09,0x0a,0x7e]
+
+v_cvt_f64_i32_e64 v[5:6], s1
+// CHECK: [0x05,0x00,0x44,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[254:255], s1
+// CHECK: [0xfe,0x00,0x44,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], s101
+// CHECK: [0x05,0x00,0x44,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], flat_scratch_lo
+// CHECK: [0x05,0x00,0x44,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], flat_scratch_hi
+// CHECK: [0x05,0x00,0x44,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], vcc_lo
+// CHECK: [0x05,0x00,0x44,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], vcc_hi
+// CHECK: [0x05,0x00,0x44,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tba_lo
+// CHECK: [0x05,0x00,0x44,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tba_hi
+// CHECK: [0x05,0x00,0x44,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tma_lo
+// CHECK: [0x05,0x00,0x44,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], tma_hi
+// CHECK: [0x05,0x00,0x44,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], ttmp11
+// CHECK: [0x05,0x00,0x44,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], m0
+// CHECK: [0x05,0x00,0x44,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], exec_lo
+// CHECK: [0x05,0x00,0x44,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], exec_hi
+// CHECK: [0x05,0x00,0x44,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], 0
+// CHECK: [0x05,0x00,0x44,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], -1
+// CHECK: [0x05,0x00,0x44,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], 0.5
+// CHECK: [0x05,0x00,0x44,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], -4.0
+// CHECK: [0x05,0x00,0x44,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], v1
+// CHECK: [0x05,0x00,0x44,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], v255
+// CHECK: [0x05,0x00,0x44,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_i32 v5, s1
+// CHECK: [0x01,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v255, s1
+// CHECK: [0x01,0x0a,0xfe,0x7f]
+
+v_cvt_f32_i32 v5, s101
+// CHECK: [0x65,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, flat_scratch_lo
+// CHECK: [0x66,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, flat_scratch_hi
+// CHECK: [0x67,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, vcc_lo
+// CHECK: [0x6a,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, vcc_hi
+// CHECK: [0x6b,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tba_lo
+// CHECK: [0x6c,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tba_hi
+// CHECK: [0x6d,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tma_lo
+// CHECK: [0x6e,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, tma_hi
+// CHECK: [0x6f,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, ttmp11
+// CHECK: [0x7b,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, m0
+// CHECK: [0x7c,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, exec_lo
+// CHECK: [0x7e,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, exec_hi
+// CHECK: [0x7f,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, 0
+// CHECK: [0x80,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, -1
+// CHECK: [0xc1,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, 0.5
+// CHECK: [0xf0,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, -4.0
+// CHECK: [0xf7,0x0a,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, 0xaf123456
+// CHECK: [0xff,0x0a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_i32 v5, 0x3f717273
+// CHECK: [0xff,0x0a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_i32 v5, v1
+// CHECK: [0x01,0x0b,0x0a,0x7e]
+
+v_cvt_f32_i32 v5, v255
+// CHECK: [0xff,0x0b,0x0a,0x7e]
+
+v_cvt_f32_i32_e64 v5, s1
+// CHECK: [0x05,0x00,0x45,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v255, s1
+// CHECK: [0xff,0x00,0x45,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, s101
+// CHECK: [0x05,0x00,0x45,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x45,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x45,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x45,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x45,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x45,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x45,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x45,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x45,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x45,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, m0
+// CHECK: [0x05,0x00,0x45,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x45,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x45,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, 0
+// CHECK: [0x05,0x00,0x45,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, -1
+// CHECK: [0x05,0x00,0x45,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x45,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x45,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, v1
+// CHECK: [0x05,0x00,0x45,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, v255
+// CHECK: [0x05,0x00,0x45,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_u32 v5, s1
+// CHECK: [0x01,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v255, s1
+// CHECK: [0x01,0x0c,0xfe,0x7f]
+
+v_cvt_f32_u32 v5, s101
+// CHECK: [0x65,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, flat_scratch_lo
+// CHECK: [0x66,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, flat_scratch_hi
+// CHECK: [0x67,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, vcc_lo
+// CHECK: [0x6a,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, vcc_hi
+// CHECK: [0x6b,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tba_lo
+// CHECK: [0x6c,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tba_hi
+// CHECK: [0x6d,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tma_lo
+// CHECK: [0x6e,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, tma_hi
+// CHECK: [0x6f,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, ttmp11
+// CHECK: [0x7b,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, m0
+// CHECK: [0x7c,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, exec_lo
+// CHECK: [0x7e,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, exec_hi
+// CHECK: [0x7f,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, 0
+// CHECK: [0x80,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, -1
+// CHECK: [0xc1,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, 0.5
+// CHECK: [0xf0,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, -4.0
+// CHECK: [0xf7,0x0c,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, 0xaf123456
+// CHECK: [0xff,0x0c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_u32 v5, 0x3f717273
+// CHECK: [0xff,0x0c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_u32 v5, v1
+// CHECK: [0x01,0x0d,0x0a,0x7e]
+
+v_cvt_f32_u32 v5, v255
+// CHECK: [0xff,0x0d,0x0a,0x7e]
+
+v_cvt_f32_u32_e64 v5, s1
+// CHECK: [0x05,0x00,0x46,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v255, s1
+// CHECK: [0xff,0x00,0x46,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, s101
+// CHECK: [0x05,0x00,0x46,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x46,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x46,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x46,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x46,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x46,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x46,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x46,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x46,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x46,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, m0
+// CHECK: [0x05,0x00,0x46,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x46,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x46,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, 0
+// CHECK: [0x05,0x00,0x46,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, -1
+// CHECK: [0x05,0x00,0x46,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x46,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x46,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, v1
+// CHECK: [0x05,0x00,0x46,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, v255
+// CHECK: [0x05,0x00,0x46,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_u32_f32 v5, s1
+// CHECK: [0x01,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v255, s1
+// CHECK: [0x01,0x0e,0xfe,0x7f]
+
+v_cvt_u32_f32 v5, s101
+// CHECK: [0x65,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tba_lo
+// CHECK: [0x6c,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tba_hi
+// CHECK: [0x6d,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tma_lo
+// CHECK: [0x6e,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, tma_hi
+// CHECK: [0x6f,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, ttmp11
+// CHECK: [0x7b,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, m0
+// CHECK: [0x7c,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, exec_lo
+// CHECK: [0x7e,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, exec_hi
+// CHECK: [0x7f,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, 0
+// CHECK: [0x80,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, -1
+// CHECK: [0xc1,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, 0.5
+// CHECK: [0xf0,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, -4.0
+// CHECK: [0xf7,0x0e,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x0e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_u32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x0e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_u32_f32 v5, v1
+// CHECK: [0x01,0x0f,0x0a,0x7e]
+
+v_cvt_u32_f32 v5, v255
+// CHECK: [0xff,0x0f,0x0a,0x7e]
+
+v_cvt_u32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x47,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x47,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x47,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x47,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x47,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x47,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x47,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x47,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x47,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x47,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x47,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x47,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x47,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x47,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x47,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x47,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x47,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x47,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x47,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_u32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x47,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_u32_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x47,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32 v5, s1
+// CHECK: [0x01,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v255, s1
+// CHECK: [0x01,0x10,0xfe,0x7f]
+
+v_cvt_i32_f32 v5, s101
+// CHECK: [0x65,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, m0
+// CHECK: [0x7c,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, 0
+// CHECK: [0x80,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, -1
+// CHECK: [0xc1,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x10,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x10,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x10,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_i32_f32 v5, v1
+// CHECK: [0x01,0x11,0x0a,0x7e]
+
+v_cvt_i32_f32 v5, v255
+// CHECK: [0xff,0x11,0x0a,0x7e]
+
+v_cvt_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x48,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x48,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x48,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x48,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x48,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x48,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x48,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x48,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x48,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x48,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x48,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x48,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x48,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x48,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x48,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x48,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x48,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x48,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x48,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x48,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i32_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x48,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32 v5, s1
+// CHECK: [0x01,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v255, s1
+// CHECK: [0x01,0x14,0xfe,0x7f]
+
+v_cvt_f16_f32 v5, s101
+// CHECK: [0x65,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, vcc_lo
+// CHECK: [0x6a,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, vcc_hi
+// CHECK: [0x6b,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tba_lo
+// CHECK: [0x6c,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tba_hi
+// CHECK: [0x6d,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tma_lo
+// CHECK: [0x6e,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, tma_hi
+// CHECK: [0x6f,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, ttmp11
+// CHECK: [0x7b,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, m0
+// CHECK: [0x7c,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, exec_lo
+// CHECK: [0x7e,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, exec_hi
+// CHECK: [0x7f,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, 0
+// CHECK: [0x80,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, -1
+// CHECK: [0xc1,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, 0.5
+// CHECK: [0xf0,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, -4.0
+// CHECK: [0xf7,0x14,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, 0xaf123456
+// CHECK: [0xff,0x14,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f16_f32 v5, 0x3f717273
+// CHECK: [0xff,0x14,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f16_f32 v5, v1
+// CHECK: [0x01,0x15,0x0a,0x7e]
+
+v_cvt_f16_f32 v5, v255
+// CHECK: [0xff,0x15,0x0a,0x7e]
+
+v_cvt_f16_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x4a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x4a,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4a,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4a,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4a,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4a,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4a,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4a,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4a,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4a,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4a,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x4a,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4a,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4a,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x4a,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x4a,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_f16_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x4a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x4a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f16_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f16_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x4a,0xd1,0x01,0x00,0x00,0x18]
+
+v_cvt_f32_f16 v5, s1
+// CHECK: [0x01,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v255, s1
+// CHECK: [0x01,0x16,0xfe,0x7f]
+
+v_cvt_f32_f16 v5, s101
+// CHECK: [0x65,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, vcc_lo
+// CHECK: [0x6a,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, vcc_hi
+// CHECK: [0x6b,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tba_lo
+// CHECK: [0x6c,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tba_hi
+// CHECK: [0x6d,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tma_lo
+// CHECK: [0x6e,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, tma_hi
+// CHECK: [0x6f,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, ttmp11
+// CHECK: [0x7b,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, m0
+// CHECK: [0x7c,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, exec_lo
+// CHECK: [0x7e,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, exec_hi
+// CHECK: [0x7f,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, 0
+// CHECK: [0x80,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, -1
+// CHECK: [0xc1,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, 0.5
+// CHECK: [0xf0,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, -4.0
+// CHECK: [0xf7,0x16,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, 0xfe0b
+// CHECK: [0xff,0x16,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_cvt_f32_f16 v5, 0x3456
+// CHECK: [0xff,0x16,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_cvt_f32_f16 v5, v1
+// CHECK: [0x01,0x17,0x0a,0x7e]
+
+v_cvt_f32_f16 v5, v255
+// CHECK: [0xff,0x17,0x0a,0x7e]
+
+v_cvt_f32_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x4b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x4b,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4b,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4b,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4b,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4b,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4b,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4b,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4b,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4b,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4b,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x4b,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4b,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4b,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x4b,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x4b,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_f32_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x4b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x4b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_f16_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_f16_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_f16_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x4b,0xd1,0x01,0x00,0x00,0x18]
+
+v_cvt_rpi_i32_f32 v5, s1
+// CHECK: [0x01,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v255, s1
+// CHECK: [0x01,0x18,0xfe,0x7f]
+
+v_cvt_rpi_i32_f32 v5, s101
+// CHECK: [0x65,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, m0
+// CHECK: [0x7c,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, 0
+// CHECK: [0x80,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, -1
+// CHECK: [0xc1,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x18,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x18,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_rpi_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x18,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_rpi_i32_f32 v5, v1
+// CHECK: [0x01,0x19,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32 v5, v255
+// CHECK: [0xff,0x19,0x0a,0x7e]
+
+v_cvt_rpi_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x4c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x4c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x4c,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4c,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4c,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4c,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4c,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4c,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4c,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4c,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4c,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4c,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x4c,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4c,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4c,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x4c,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x4c,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x4c,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4c,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_rpi_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x4c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_rpi_i32_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x4c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32 v5, s1
+// CHECK: [0x01,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v255, s1
+// CHECK: [0x01,0x1a,0xfe,0x7f]
+
+v_cvt_flr_i32_f32 v5, s101
+// CHECK: [0x65,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, m0
+// CHECK: [0x7c,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, 0
+// CHECK: [0x80,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, -1
+// CHECK: [0xc1,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x1a,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x1a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_flr_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x1a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_flr_i32_f32 v5, v1
+// CHECK: [0x01,0x1b,0x0a,0x7e]
+
+v_cvt_flr_i32_f32 v5, v255
+// CHECK: [0xff,0x1b,0x0a,0x7e]
+
+v_cvt_flr_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x4d,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x4d,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x4d,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4d,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4d,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4d,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4d,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4d,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4d,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4d,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4d,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4d,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x4d,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4d,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4d,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x4d,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x4d,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x4d,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x4d,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_flr_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x4d,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_flr_i32_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x4d,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4 v5, s1
+// CHECK: [0x01,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v255, s1
+// CHECK: [0x01,0x1c,0xfe,0x7f]
+
+v_cvt_off_f32_i4 v5, s101
+// CHECK: [0x65,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, flat_scratch_lo
+// CHECK: [0x66,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, flat_scratch_hi
+// CHECK: [0x67,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, vcc_lo
+// CHECK: [0x6a,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, vcc_hi
+// CHECK: [0x6b,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tba_lo
+// CHECK: [0x6c,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tba_hi
+// CHECK: [0x6d,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tma_lo
+// CHECK: [0x6e,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, tma_hi
+// CHECK: [0x6f,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, ttmp11
+// CHECK: [0x7b,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, m0
+// CHECK: [0x7c,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, exec_lo
+// CHECK: [0x7e,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, exec_hi
+// CHECK: [0x7f,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, 0
+// CHECK: [0x80,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, -1
+// CHECK: [0xc1,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, 0.5
+// CHECK: [0xf0,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, -4.0
+// CHECK: [0xf7,0x1c,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, 0x4f
+// CHECK: [0xff,0x1c,0x0a,0x7e,0x4f,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4 v5, 0x41
+// CHECK: [0xff,0x1c,0x0a,0x7e,0x41,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4 v5, v1
+// CHECK: [0x01,0x1d,0x0a,0x7e]
+
+v_cvt_off_f32_i4 v5, v255
+// CHECK: [0xff,0x1d,0x0a,0x7e]
+
+v_cvt_off_f32_i4_e64 v5, s1
+// CHECK: [0x05,0x00,0x4e,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v255, s1
+// CHECK: [0xff,0x00,0x4e,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, s101
+// CHECK: [0x05,0x00,0x4e,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x4e,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x4e,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x4e,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x4e,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x4e,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x4e,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x4e,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x4e,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x4e,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, m0
+// CHECK: [0x05,0x00,0x4e,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x4e,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x4e,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, 0
+// CHECK: [0x05,0x00,0x4e,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, -1
+// CHECK: [0x05,0x00,0x4e,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x4e,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x4e,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, v1
+// CHECK: [0x05,0x00,0x4e,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, v255
+// CHECK: [0x05,0x00,0x4e,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_f64 v5, s[2:3]
+// CHECK: [0x02,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v255, s[2:3]
+// CHECK: [0x02,0x1e,0xfe,0x7f]
+
+v_cvt_f32_f64 v5, s[4:5]
+// CHECK: [0x04,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, s[100:101]
+// CHECK: [0x64,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, flat_scratch
+// CHECK: [0x66,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, vcc
+// CHECK: [0x6a,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, tba
+// CHECK: [0x6c,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, tma
+// CHECK: [0x6e,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, exec
+// CHECK: [0x7e,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, 0
+// CHECK: [0x80,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, -1
+// CHECK: [0xc1,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, 0.5
+// CHECK: [0xf0,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, -4.0
+// CHECK: [0xf7,0x1e,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x1e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x1e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_f64 v5, v[1:2]
+// CHECK: [0x01,0x1f,0x0a,0x7e]
+
+v_cvt_f32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x1f,0x0a,0x7e]
+
+v_cvt_f32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x4f,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x4f,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x4f,0xd1,0x04,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[100:101]
+// CHECK: [0x05,0x00,0x4f,0xd1,0x64,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x4f,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x4f,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x4f,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x4f,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x4f,0xd1,0x7a,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x4f,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x4f,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x4f,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x4f,0xd1,0xfe,0x01,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x4f,0xd1,0x02,0x00,0x00,0x20]
+
+v_cvt_f32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x4f,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[2:3] clamp
+// CHECK: [0x05,0x80,0x4f,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_f32_f64_e64 v5, s[2:3] mul:2
+// CHECK: [0x05,0x00,0x4f,0xd1,0x02,0x00,0x00,0x08]
+
+v_cvt_f32_f64_e64 v5, s[2:3] mul:4
+// CHECK: [0x05,0x00,0x4f,0xd1,0x02,0x00,0x00,0x10]
+
+v_cvt_f32_f64_e64 v5, s[2:3] div:2
+// CHECK: [0x05,0x00,0x4f,0xd1,0x02,0x00,0x00,0x18]
+
+v_cvt_f64_f32 v[5:6], s1
+// CHECK: [0x01,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[254:255], s1
+// CHECK: [0x01,0x20,0xfc,0x7f]
+
+v_cvt_f64_f32 v[5:6], s101
+// CHECK: [0x65,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], flat_scratch_lo
+// CHECK: [0x66,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], flat_scratch_hi
+// CHECK: [0x67,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], vcc_lo
+// CHECK: [0x6a,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], vcc_hi
+// CHECK: [0x6b,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tba_lo
+// CHECK: [0x6c,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tba_hi
+// CHECK: [0x6d,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tma_lo
+// CHECK: [0x6e,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], tma_hi
+// CHECK: [0x6f,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], ttmp11
+// CHECK: [0x7b,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], m0
+// CHECK: [0x7c,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], exec_lo
+// CHECK: [0x7e,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], exec_hi
+// CHECK: [0x7f,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], 0
+// CHECK: [0x80,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], -1
+// CHECK: [0xc1,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], 0.5
+// CHECK: [0xf0,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], -4.0
+// CHECK: [0xf7,0x20,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], 0xaf123456
+// CHECK: [0xff,0x20,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f64_f32 v[5:6], 0x3f717273
+// CHECK: [0xff,0x20,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f64_f32 v[5:6], v1
+// CHECK: [0x01,0x21,0x0a,0x7e]
+
+v_cvt_f64_f32 v[5:6], v255
+// CHECK: [0xff,0x21,0x0a,0x7e]
+
+v_cvt_f64_f32_e64 v[5:6], s1
+// CHECK: [0x05,0x00,0x50,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[254:255], s1
+// CHECK: [0xfe,0x00,0x50,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], s101
+// CHECK: [0x05,0x00,0x50,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], flat_scratch_lo
+// CHECK: [0x05,0x00,0x50,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], flat_scratch_hi
+// CHECK: [0x05,0x00,0x50,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], vcc_lo
+// CHECK: [0x05,0x00,0x50,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], vcc_hi
+// CHECK: [0x05,0x00,0x50,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tba_lo
+// CHECK: [0x05,0x00,0x50,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tba_hi
+// CHECK: [0x05,0x00,0x50,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tma_lo
+// CHECK: [0x05,0x00,0x50,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], tma_hi
+// CHECK: [0x05,0x00,0x50,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], ttmp11
+// CHECK: [0x05,0x00,0x50,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], m0
+// CHECK: [0x05,0x00,0x50,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], exec_lo
+// CHECK: [0x05,0x00,0x50,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], exec_hi
+// CHECK: [0x05,0x00,0x50,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x50,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], v1
+// CHECK: [0x05,0x00,0x50,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], v255
+// CHECK: [0x05,0x00,0x50,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], -s1
+// CHECK: [0x05,0x00,0x50,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_f64_f32_e64 v[5:6], |s1|
+// CHECK: [0x05,0x01,0x50,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], s1 clamp
+// CHECK: [0x05,0x80,0x50,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_f32_e64 v[5:6], s1 mul:2
+// CHECK: [0x05,0x00,0x50,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f64_f32_e64 v[5:6], s1 mul:4
+// CHECK: [0x05,0x00,0x50,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f64_f32_e64 v[5:6], s1 div:2
+// CHECK: [0x05,0x00,0x50,0xd1,0x01,0x00,0x00,0x18]
+
+v_cvt_f32_ubyte0 v5, s1
+// CHECK: [0x01,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v255, s1
+// CHECK: [0x01,0x22,0xfe,0x7f]
+
+v_cvt_f32_ubyte0 v5, s101
+// CHECK: [0x65,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, flat_scratch_lo
+// CHECK: [0x66,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, flat_scratch_hi
+// CHECK: [0x67,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, vcc_lo
+// CHECK: [0x6a,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, vcc_hi
+// CHECK: [0x6b,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tba_lo
+// CHECK: [0x6c,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tba_hi
+// CHECK: [0x6d,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tma_lo
+// CHECK: [0x6e,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, tma_hi
+// CHECK: [0x6f,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, ttmp11
+// CHECK: [0x7b,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, m0
+// CHECK: [0x7c,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, exec_lo
+// CHECK: [0x7e,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, exec_hi
+// CHECK: [0x7f,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, 0
+// CHECK: [0x80,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, -1
+// CHECK: [0xc1,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, 0.5
+// CHECK: [0xf0,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, -4.0
+// CHECK: [0xf7,0x22,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, 0xaf123456
+// CHECK: [0xff,0x22,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte0 v5, 0x3f717273
+// CHECK: [0xff,0x22,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte0 v5, v1
+// CHECK: [0x01,0x23,0x0a,0x7e]
+
+v_cvt_f32_ubyte0 v5, v255
+// CHECK: [0xff,0x23,0x0a,0x7e]
+
+v_cvt_f32_ubyte0_e64 v5, s1
+// CHECK: [0x05,0x00,0x51,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v255, s1
+// CHECK: [0xff,0x00,0x51,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, s101
+// CHECK: [0x05,0x00,0x51,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x51,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x51,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x51,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x51,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x51,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x51,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x51,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x51,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x51,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, m0
+// CHECK: [0x05,0x00,0x51,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x51,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x51,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, 0
+// CHECK: [0x05,0x00,0x51,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, -1
+// CHECK: [0x05,0x00,0x51,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x51,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x51,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, v1
+// CHECK: [0x05,0x00,0x51,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, v255
+// CHECK: [0x05,0x00,0x51,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte1 v5, s1
+// CHECK: [0x01,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v255, s1
+// CHECK: [0x01,0x24,0xfe,0x7f]
+
+v_cvt_f32_ubyte1 v5, s101
+// CHECK: [0x65,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, flat_scratch_lo
+// CHECK: [0x66,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, flat_scratch_hi
+// CHECK: [0x67,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, vcc_lo
+// CHECK: [0x6a,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, vcc_hi
+// CHECK: [0x6b,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tba_lo
+// CHECK: [0x6c,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tba_hi
+// CHECK: [0x6d,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tma_lo
+// CHECK: [0x6e,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, tma_hi
+// CHECK: [0x6f,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, ttmp11
+// CHECK: [0x7b,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, m0
+// CHECK: [0x7c,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, exec_lo
+// CHECK: [0x7e,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, exec_hi
+// CHECK: [0x7f,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, 0
+// CHECK: [0x80,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, -1
+// CHECK: [0xc1,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, 0.5
+// CHECK: [0xf0,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, -4.0
+// CHECK: [0xf7,0x24,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, 0xaf123456
+// CHECK: [0xff,0x24,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte1 v5, 0x3f717273
+// CHECK: [0xff,0x24,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte1 v5, v1
+// CHECK: [0x01,0x25,0x0a,0x7e]
+
+v_cvt_f32_ubyte1 v5, v255
+// CHECK: [0xff,0x25,0x0a,0x7e]
+
+v_cvt_f32_ubyte1_e64 v5, s1
+// CHECK: [0x05,0x00,0x52,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v255, s1
+// CHECK: [0xff,0x00,0x52,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, s101
+// CHECK: [0x05,0x00,0x52,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x52,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x52,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x52,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x52,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x52,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x52,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x52,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x52,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x52,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, m0
+// CHECK: [0x05,0x00,0x52,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x52,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x52,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, 0
+// CHECK: [0x05,0x00,0x52,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, -1
+// CHECK: [0x05,0x00,0x52,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x52,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x52,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, v1
+// CHECK: [0x05,0x00,0x52,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, v255
+// CHECK: [0x05,0x00,0x52,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte2 v5, s1
+// CHECK: [0x01,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v255, s1
+// CHECK: [0x01,0x26,0xfe,0x7f]
+
+v_cvt_f32_ubyte2 v5, s101
+// CHECK: [0x65,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, flat_scratch_lo
+// CHECK: [0x66,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, flat_scratch_hi
+// CHECK: [0x67,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, vcc_lo
+// CHECK: [0x6a,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, vcc_hi
+// CHECK: [0x6b,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tba_lo
+// CHECK: [0x6c,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tba_hi
+// CHECK: [0x6d,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tma_lo
+// CHECK: [0x6e,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, tma_hi
+// CHECK: [0x6f,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, ttmp11
+// CHECK: [0x7b,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, m0
+// CHECK: [0x7c,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, exec_lo
+// CHECK: [0x7e,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, exec_hi
+// CHECK: [0x7f,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, 0
+// CHECK: [0x80,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, -1
+// CHECK: [0xc1,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, 0.5
+// CHECK: [0xf0,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, -4.0
+// CHECK: [0xf7,0x26,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, 0xaf123456
+// CHECK: [0xff,0x26,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte2 v5, 0x3f717273
+// CHECK: [0xff,0x26,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte2 v5, v1
+// CHECK: [0x01,0x27,0x0a,0x7e]
+
+v_cvt_f32_ubyte2 v5, v255
+// CHECK: [0xff,0x27,0x0a,0x7e]
+
+v_cvt_f32_ubyte2_e64 v5, s1
+// CHECK: [0x05,0x00,0x53,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v255, s1
+// CHECK: [0xff,0x00,0x53,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, s101
+// CHECK: [0x05,0x00,0x53,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x53,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x53,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x53,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x53,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x53,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x53,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x53,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x53,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x53,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, m0
+// CHECK: [0x05,0x00,0x53,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x53,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x53,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, 0
+// CHECK: [0x05,0x00,0x53,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, -1
+// CHECK: [0x05,0x00,0x53,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x53,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x53,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, v1
+// CHECK: [0x05,0x00,0x53,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, v255
+// CHECK: [0x05,0x00,0x53,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte3 v5, s1
+// CHECK: [0x01,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v255, s1
+// CHECK: [0x01,0x28,0xfe,0x7f]
+
+v_cvt_f32_ubyte3 v5, s101
+// CHECK: [0x65,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, flat_scratch_lo
+// CHECK: [0x66,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, flat_scratch_hi
+// CHECK: [0x67,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, vcc_lo
+// CHECK: [0x6a,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, vcc_hi
+// CHECK: [0x6b,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tba_lo
+// CHECK: [0x6c,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tba_hi
+// CHECK: [0x6d,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tma_lo
+// CHECK: [0x6e,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, tma_hi
+// CHECK: [0x6f,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, ttmp11
+// CHECK: [0x7b,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, m0
+// CHECK: [0x7c,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, exec_lo
+// CHECK: [0x7e,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, exec_hi
+// CHECK: [0x7f,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, 0
+// CHECK: [0x80,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, -1
+// CHECK: [0xc1,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, 0.5
+// CHECK: [0xf0,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, -4.0
+// CHECK: [0xf7,0x28,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, 0xaf123456
+// CHECK: [0xff,0x28,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f32_ubyte3 v5, 0x3f717273
+// CHECK: [0xff,0x28,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f32_ubyte3 v5, v1
+// CHECK: [0x01,0x29,0x0a,0x7e]
+
+v_cvt_f32_ubyte3 v5, v255
+// CHECK: [0xff,0x29,0x0a,0x7e]
+
+v_cvt_f32_ubyte3_e64 v5, s1
+// CHECK: [0x05,0x00,0x54,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v255, s1
+// CHECK: [0xff,0x00,0x54,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, s101
+// CHECK: [0x05,0x00,0x54,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x54,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x54,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x54,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x54,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x54,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x54,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x54,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x54,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x54,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, m0
+// CHECK: [0x05,0x00,0x54,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x54,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x54,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, 0
+// CHECK: [0x05,0x00,0x54,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, -1
+// CHECK: [0x05,0x00,0x54,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x54,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x54,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, v1
+// CHECK: [0x05,0x00,0x54,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, v255
+// CHECK: [0x05,0x00,0x54,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_u32_f64 v5, s[2:3]
+// CHECK: [0x02,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v255, s[2:3]
+// CHECK: [0x02,0x2a,0xfe,0x7f]
+
+v_cvt_u32_f64 v5, s[4:5]
+// CHECK: [0x04,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, s[100:101]
+// CHECK: [0x64,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, flat_scratch
+// CHECK: [0x66,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, vcc
+// CHECK: [0x6a,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, tba
+// CHECK: [0x6c,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, tma
+// CHECK: [0x6e,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, exec
+// CHECK: [0x7e,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, 0
+// CHECK: [0x80,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, -1
+// CHECK: [0xc1,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, 0.5
+// CHECK: [0xf0,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, -4.0
+// CHECK: [0xf7,0x2a,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x2a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_u32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x2a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_u32_f64 v5, v[1:2]
+// CHECK: [0x01,0x2b,0x0a,0x7e]
+
+v_cvt_u32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x2b,0x0a,0x7e]
+
+v_cvt_u32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x55,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x55,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x55,0xd1,0x04,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, s[100:101]
+// CHECK: [0x05,0x00,0x55,0xd1,0x64,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x55,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x55,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x55,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x55,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x55,0xd1,0x7a,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x55,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x55,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x55,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x55,0xd1,0xfe,0x01,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x55,0xd1,0x02,0x00,0x00,0x20]
+
+v_cvt_u32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x55,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_u32_f64_e64 v5, s[2:3] clamp
+// CHECK: [0x05,0x80,0x55,0xd1,0x02,0x00,0x00,0x00]
+
+v_cvt_f64_u32 v[5:6], s1
+// CHECK: [0x01,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[254:255], s1
+// CHECK: [0x01,0x2c,0xfc,0x7f]
+
+v_cvt_f64_u32 v[5:6], s101
+// CHECK: [0x65,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], flat_scratch_lo
+// CHECK: [0x66,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], flat_scratch_hi
+// CHECK: [0x67,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], vcc_lo
+// CHECK: [0x6a,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], vcc_hi
+// CHECK: [0x6b,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tba_lo
+// CHECK: [0x6c,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tba_hi
+// CHECK: [0x6d,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tma_lo
+// CHECK: [0x6e,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], tma_hi
+// CHECK: [0x6f,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], ttmp11
+// CHECK: [0x7b,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], m0
+// CHECK: [0x7c,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], exec_lo
+// CHECK: [0x7e,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], exec_hi
+// CHECK: [0x7f,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], 0
+// CHECK: [0x80,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], -1
+// CHECK: [0xc1,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], 0.5
+// CHECK: [0xf0,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], -4.0
+// CHECK: [0xf7,0x2c,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], 0xaf123456
+// CHECK: [0xff,0x2c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cvt_f64_u32 v[5:6], 0x3f717273
+// CHECK: [0xff,0x2c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cvt_f64_u32 v[5:6], v1
+// CHECK: [0x01,0x2d,0x0a,0x7e]
+
+v_cvt_f64_u32 v[5:6], v255
+// CHECK: [0xff,0x2d,0x0a,0x7e]
+
+v_cvt_f64_u32_e64 v[5:6], s1
+// CHECK: [0x05,0x00,0x56,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[254:255], s1
+// CHECK: [0xfe,0x00,0x56,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], s101
+// CHECK: [0x05,0x00,0x56,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], flat_scratch_lo
+// CHECK: [0x05,0x00,0x56,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], flat_scratch_hi
+// CHECK: [0x05,0x00,0x56,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], vcc_lo
+// CHECK: [0x05,0x00,0x56,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], vcc_hi
+// CHECK: [0x05,0x00,0x56,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tba_lo
+// CHECK: [0x05,0x00,0x56,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tba_hi
+// CHECK: [0x05,0x00,0x56,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tma_lo
+// CHECK: [0x05,0x00,0x56,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], tma_hi
+// CHECK: [0x05,0x00,0x56,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], ttmp11
+// CHECK: [0x05,0x00,0x56,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], m0
+// CHECK: [0x05,0x00,0x56,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], exec_lo
+// CHECK: [0x05,0x00,0x56,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], exec_hi
+// CHECK: [0x05,0x00,0x56,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], 0
+// CHECK: [0x05,0x00,0x56,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], -1
+// CHECK: [0x05,0x00,0x56,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], 0.5
+// CHECK: [0x05,0x00,0x56,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], -4.0
+// CHECK: [0x05,0x00,0x56,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], v1
+// CHECK: [0x05,0x00,0x56,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], v255
+// CHECK: [0x05,0x00,0x56,0xd1,0xff,0x01,0x00,0x00]
+
+v_trunc_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x2e,0xfc,0x7f]
+
+v_trunc_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], vcc
+// CHECK: [0x6a,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], tba
+// CHECK: [0x6c,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], tma
+// CHECK: [0x6e,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], exec
+// CHECK: [0x7e,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], 0
+// CHECK: [0x80,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], -1
+// CHECK: [0xc1,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x2e,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x2e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_trunc_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x2e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_trunc_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x2f,0x0a,0x7e]
+
+v_trunc_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x2f,0x0a,0x7e]
+
+v_trunc_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x57,0xd1,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x57,0xd1,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x57,0xd1,0x04,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x57,0xd1,0x64,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x57,0xd1,0x66,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x57,0xd1,0x6a,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x57,0xd1,0x6c,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x57,0xd1,0x6e,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x57,0xd1,0x7a,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x57,0xd1,0x7e,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x57,0xd1,0xfd,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x57,0xd1,0x01,0x01,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x57,0xd1,0xfe,0x01,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x57,0xd1,0x02,0x00,0x00,0x20]
+
+v_trunc_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x57,0xd1,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x57,0xd1,0x02,0x00,0x00,0x00]
+
+v_trunc_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x57,0xd1,0x02,0x00,0x00,0x08]
+
+v_trunc_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x57,0xd1,0x02,0x00,0x00,0x10]
+
+v_trunc_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x57,0xd1,0x02,0x00,0x00,0x18]
+
+v_ceil_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x30,0xfc,0x7f]
+
+v_ceil_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], vcc
+// CHECK: [0x6a,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], tba
+// CHECK: [0x6c,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], tma
+// CHECK: [0x6e,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], exec
+// CHECK: [0x7e,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], 0
+// CHECK: [0x80,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], -1
+// CHECK: [0xc1,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x30,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x30,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ceil_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x30,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ceil_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x31,0x0a,0x7e]
+
+v_ceil_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x31,0x0a,0x7e]
+
+v_ceil_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x58,0xd1,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x58,0xd1,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x58,0xd1,0x04,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x58,0xd1,0x64,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x58,0xd1,0x66,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x58,0xd1,0x6a,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x58,0xd1,0x6c,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x58,0xd1,0x6e,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x58,0xd1,0x7a,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x58,0xd1,0x7e,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x58,0xd1,0xfd,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x58,0xd1,0x01,0x01,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x58,0xd1,0xfe,0x01,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x58,0xd1,0x02,0x00,0x00,0x20]
+
+v_ceil_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x58,0xd1,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x58,0xd1,0x02,0x00,0x00,0x00]
+
+v_ceil_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x58,0xd1,0x02,0x00,0x00,0x08]
+
+v_ceil_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x58,0xd1,0x02,0x00,0x00,0x10]
+
+v_ceil_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x58,0xd1,0x02,0x00,0x00,0x18]
+
+v_rndne_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x32,0xfc,0x7f]
+
+v_rndne_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], vcc
+// CHECK: [0x6a,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], tba
+// CHECK: [0x6c,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], tma
+// CHECK: [0x6e,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], exec
+// CHECK: [0x7e,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], 0
+// CHECK: [0x80,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], -1
+// CHECK: [0xc1,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x32,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x32,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rndne_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x32,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rndne_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x33,0x0a,0x7e]
+
+v_rndne_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x33,0x0a,0x7e]
+
+v_rndne_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x59,0xd1,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x59,0xd1,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x59,0xd1,0x04,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x59,0xd1,0x64,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x59,0xd1,0x66,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x59,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x59,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x59,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x59,0xd1,0x7a,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x59,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], 0
+// CHECK: [0x05,0x00,0x59,0xd1,0x80,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], 0.5
+// CHECK: [0x05,0x00,0x59,0xd1,0xf0,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x59,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x59,0xd1,0x01,0x01,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x59,0xd1,0xfe,0x01,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x59,0xd1,0x02,0x00,0x00,0x20]
+
+v_rndne_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x59,0xd1,0x02,0x00,0x00,0x00]
+
+v_rndne_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x59,0xd1,0x02,0x00,0x00,0x08]
+
+v_rndne_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x59,0xd1,0x02,0x00,0x00,0x10]
+
+v_rndne_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x59,0xd1,0x02,0x00,0x00,0x18]
+
+v_floor_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x34,0x0a,0x7e]
+
+v_floor_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x34,0xfc,0x7f]
+
+v_floor_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], vcc
+// CHECK: [0x6a,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], tba
+// CHECK: [0x6c,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], tma
+// CHECK: [0x6e,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], exec
+// CHECK: [0x7e,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], 0
+// CHECK: [0x80,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], -1
+// CHECK: [0xc1,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x34,0x0a,0x7e]
+
+v_floor_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x34,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_floor_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x34,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_floor_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x35,0x0a,0x7e]
+
+v_floor_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x35,0x0a,0x7e]
+
+v_floor_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x5a,0xd1,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x5a,0xd1,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x5a,0xd1,0x04,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x5a,0xd1,0x64,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x5a,0xd1,0x66,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x5a,0xd1,0x6a,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x5a,0xd1,0x6c,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x5a,0xd1,0x6e,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x5a,0xd1,0x7a,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x5a,0xd1,0x7e,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], 0
+// CHECK: [0x05,0x00,0x5a,0xd1,0x80,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], 0.5
+// CHECK: [0x05,0x00,0x5a,0xd1,0xf0,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x5a,0xd1,0xfd,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x5a,0xd1,0x01,0x01,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x5a,0xd1,0xfe,0x01,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x5a,0xd1,0x02,0x00,0x00,0x20]
+
+v_floor_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x5a,0xd1,0x02,0x00,0x00,0x00]
+
+v_floor_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x5a,0xd1,0x02,0x00,0x00,0x08]
+
+v_floor_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x5a,0xd1,0x02,0x00,0x00,0x10]
+
+v_floor_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x5a,0xd1,0x02,0x00,0x00,0x18]
+
+v_fract_f32 v5, s1
+// CHECK: [0x01,0x36,0x0a,0x7e]
+
+v_fract_f32 v255, s1
+// CHECK: [0x01,0x36,0xfe,0x7f]
+
+v_fract_f32 v5, s101
+// CHECK: [0x65,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, vcc_lo
+// CHECK: [0x6a,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, vcc_hi
+// CHECK: [0x6b,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, tba_lo
+// CHECK: [0x6c,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, tba_hi
+// CHECK: [0x6d,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, tma_lo
+// CHECK: [0x6e,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, tma_hi
+// CHECK: [0x6f,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, ttmp11
+// CHECK: [0x7b,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, m0
+// CHECK: [0x7c,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, exec_lo
+// CHECK: [0x7e,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, exec_hi
+// CHECK: [0x7f,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, 0
+// CHECK: [0x80,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, -1
+// CHECK: [0xc1,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, 0.5
+// CHECK: [0xf0,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, -4.0
+// CHECK: [0xf7,0x36,0x0a,0x7e]
+
+v_fract_f32 v5, 0xaf123456
+// CHECK: [0xff,0x36,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_fract_f32 v5, 0x3f717273
+// CHECK: [0xff,0x36,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_fract_f32 v5, v1
+// CHECK: [0x01,0x37,0x0a,0x7e]
+
+v_fract_f32 v5, v255
+// CHECK: [0xff,0x37,0x0a,0x7e]
+
+v_fract_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5b,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x5b,0xd1,0x65,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5b,0xd1,0x66,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5b,0xd1,0x67,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5b,0xd1,0x6a,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5b,0xd1,0x6b,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5b,0xd1,0x6c,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5b,0xd1,0x6d,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5b,0xd1,0x6e,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5b,0xd1,0x6f,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5b,0xd1,0x7b,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5b,0xd1,0x7c,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5b,0xd1,0x7e,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5b,0xd1,0x7f,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x5b,0xd1,0x80,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x5b,0xd1,0xf0,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5b,0xd1,0xfd,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x01,0x00,0x00]
+
+v_fract_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5b,0xd1,0xff,0x01,0x00,0x00]
+
+v_fract_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x00,0x00,0x20]
+
+v_fract_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x5b,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x00,0x00,0x08]
+
+v_fract_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x00,0x00,0x10]
+
+v_fract_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5b,0xd1,0x01,0x00,0x00,0x18]
+
+v_trunc_f32 v5, s1
+// CHECK: [0x01,0x38,0x0a,0x7e]
+
+v_trunc_f32 v255, s1
+// CHECK: [0x01,0x38,0xfe,0x7f]
+
+v_trunc_f32 v5, s101
+// CHECK: [0x65,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, vcc_lo
+// CHECK: [0x6a,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, vcc_hi
+// CHECK: [0x6b,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, tba_lo
+// CHECK: [0x6c,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, tba_hi
+// CHECK: [0x6d,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, tma_lo
+// CHECK: [0x6e,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, tma_hi
+// CHECK: [0x6f,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, ttmp11
+// CHECK: [0x7b,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, m0
+// CHECK: [0x7c,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, exec_lo
+// CHECK: [0x7e,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, exec_hi
+// CHECK: [0x7f,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, 0
+// CHECK: [0x80,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, -1
+// CHECK: [0xc1,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, 0.5
+// CHECK: [0xf0,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, -4.0
+// CHECK: [0xf7,0x38,0x0a,0x7e]
+
+v_trunc_f32 v5, 0xaf123456
+// CHECK: [0xff,0x38,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_trunc_f32 v5, 0x3f717273
+// CHECK: [0xff,0x38,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_trunc_f32 v5, v1
+// CHECK: [0x01,0x39,0x0a,0x7e]
+
+v_trunc_f32 v5, v255
+// CHECK: [0xff,0x39,0x0a,0x7e]
+
+v_trunc_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5c,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x5c,0xd1,0x65,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5c,0xd1,0x66,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5c,0xd1,0x67,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5c,0xd1,0x6a,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5c,0xd1,0x6b,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5c,0xd1,0x6c,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5c,0xd1,0x6d,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5c,0xd1,0x6e,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5c,0xd1,0x6f,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5c,0xd1,0x7b,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5c,0xd1,0x7c,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5c,0xd1,0x7e,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5c,0xd1,0x7f,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x5c,0xd1,0x80,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x5c,0xd1,0xf0,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5c,0xd1,0xfd,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x01,0x00,0x00]
+
+v_trunc_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5c,0xd1,0xff,0x01,0x00,0x00]
+
+v_trunc_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x00,0x00,0x20]
+
+v_trunc_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x5c,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x00,0x00,0x08]
+
+v_trunc_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x00,0x00,0x10]
+
+v_trunc_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5c,0xd1,0x01,0x00,0x00,0x18]
+
+v_ceil_f32 v5, s1
+// CHECK: [0x01,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v255, s1
+// CHECK: [0x01,0x3a,0xfe,0x7f]
+
+v_ceil_f32 v5, s101
+// CHECK: [0x65,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, vcc_lo
+// CHECK: [0x6a,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, vcc_hi
+// CHECK: [0x6b,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, tba_lo
+// CHECK: [0x6c,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, tba_hi
+// CHECK: [0x6d,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, tma_lo
+// CHECK: [0x6e,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, tma_hi
+// CHECK: [0x6f,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, ttmp11
+// CHECK: [0x7b,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, m0
+// CHECK: [0x7c,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, exec_lo
+// CHECK: [0x7e,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, exec_hi
+// CHECK: [0x7f,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, 0
+// CHECK: [0x80,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, -1
+// CHECK: [0xc1,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, 0.5
+// CHECK: [0xf0,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, -4.0
+// CHECK: [0xf7,0x3a,0x0a,0x7e]
+
+v_ceil_f32 v5, 0xaf123456
+// CHECK: [0xff,0x3a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ceil_f32 v5, 0x3f717273
+// CHECK: [0xff,0x3a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ceil_f32 v5, v1
+// CHECK: [0x01,0x3b,0x0a,0x7e]
+
+v_ceil_f32 v5, v255
+// CHECK: [0xff,0x3b,0x0a,0x7e]
+
+v_ceil_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5d,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x5d,0xd1,0x65,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5d,0xd1,0x66,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5d,0xd1,0x67,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5d,0xd1,0x6a,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5d,0xd1,0x6b,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5d,0xd1,0x6c,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5d,0xd1,0x6d,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5d,0xd1,0x6e,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5d,0xd1,0x6f,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5d,0xd1,0x7b,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5d,0xd1,0x7c,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5d,0xd1,0x7e,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5d,0xd1,0x7f,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x5d,0xd1,0x80,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x5d,0xd1,0xf0,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5d,0xd1,0xfd,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x01,0x00,0x00]
+
+v_ceil_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5d,0xd1,0xff,0x01,0x00,0x00]
+
+v_ceil_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x00,0x00,0x20]
+
+v_ceil_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x5d,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x00,0x00,0x08]
+
+v_ceil_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x00,0x00,0x10]
+
+v_ceil_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5d,0xd1,0x01,0x00,0x00,0x18]
+
+v_rndne_f32 v5, s1
+// CHECK: [0x01,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v255, s1
+// CHECK: [0x01,0x3c,0xfe,0x7f]
+
+v_rndne_f32 v5, s101
+// CHECK: [0x65,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, vcc_lo
+// CHECK: [0x6a,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, vcc_hi
+// CHECK: [0x6b,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, tba_lo
+// CHECK: [0x6c,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, tba_hi
+// CHECK: [0x6d,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, tma_lo
+// CHECK: [0x6e,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, tma_hi
+// CHECK: [0x6f,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, ttmp11
+// CHECK: [0x7b,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, m0
+// CHECK: [0x7c,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, exec_lo
+// CHECK: [0x7e,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, exec_hi
+// CHECK: [0x7f,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, 0
+// CHECK: [0x80,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, -1
+// CHECK: [0xc1,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, 0.5
+// CHECK: [0xf0,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, -4.0
+// CHECK: [0xf7,0x3c,0x0a,0x7e]
+
+v_rndne_f32 v5, 0xaf123456
+// CHECK: [0xff,0x3c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rndne_f32 v5, 0x3f717273
+// CHECK: [0xff,0x3c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rndne_f32 v5, v1
+// CHECK: [0x01,0x3d,0x0a,0x7e]
+
+v_rndne_f32 v5, v255
+// CHECK: [0xff,0x3d,0x0a,0x7e]
+
+v_rndne_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5e,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x5e,0xd1,0x65,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5e,0xd1,0x66,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5e,0xd1,0x67,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5e,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5e,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5e,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5e,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5e,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5e,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5e,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5e,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5e,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5e,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, 0
+// CHECK: [0x05,0x00,0x5e,0xd1,0x80,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x5e,0xd1,0xf0,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5e,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x01,0x00,0x00]
+
+v_rndne_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5e,0xd1,0xff,0x01,0x00,0x00]
+
+v_rndne_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x00,0x00,0x20]
+
+v_rndne_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x5e,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x00,0x00,0x08]
+
+v_rndne_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x00,0x00,0x10]
+
+v_rndne_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5e,0xd1,0x01,0x00,0x00,0x18]
+
+v_floor_f32 v5, s1
+// CHECK: [0x01,0x3e,0x0a,0x7e]
+
+v_floor_f32 v255, s1
+// CHECK: [0x01,0x3e,0xfe,0x7f]
+
+v_floor_f32 v5, s101
+// CHECK: [0x65,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, vcc_lo
+// CHECK: [0x6a,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, vcc_hi
+// CHECK: [0x6b,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, tba_lo
+// CHECK: [0x6c,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, tba_hi
+// CHECK: [0x6d,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, tma_lo
+// CHECK: [0x6e,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, tma_hi
+// CHECK: [0x6f,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, ttmp11
+// CHECK: [0x7b,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, m0
+// CHECK: [0x7c,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, exec_lo
+// CHECK: [0x7e,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, exec_hi
+// CHECK: [0x7f,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, 0
+// CHECK: [0x80,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, -1
+// CHECK: [0xc1,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, 0.5
+// CHECK: [0xf0,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, -4.0
+// CHECK: [0xf7,0x3e,0x0a,0x7e]
+
+v_floor_f32 v5, 0xaf123456
+// CHECK: [0xff,0x3e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_floor_f32 v5, 0x3f717273
+// CHECK: [0xff,0x3e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_floor_f32 v5, v1
+// CHECK: [0x01,0x3f,0x0a,0x7e]
+
+v_floor_f32 v5, v255
+// CHECK: [0xff,0x3f,0x0a,0x7e]
+
+v_floor_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x5f,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x5f,0xd1,0x65,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x5f,0xd1,0x66,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x5f,0xd1,0x67,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x5f,0xd1,0x6a,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x5f,0xd1,0x6b,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x5f,0xd1,0x6c,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x5f,0xd1,0x6d,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x5f,0xd1,0x6e,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x5f,0xd1,0x6f,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x5f,0xd1,0x7b,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x5f,0xd1,0x7c,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x5f,0xd1,0x7e,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x5f,0xd1,0x7f,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x5f,0xd1,0xfd,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x01,0x00,0x00]
+
+v_floor_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x5f,0xd1,0xff,0x01,0x00,0x00]
+
+v_floor_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x00,0x00,0x20]
+
+v_floor_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x5f,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x5f,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x00,0x00,0x08]
+
+v_floor_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x00,0x00,0x10]
+
+v_floor_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x5f,0xd1,0x01,0x00,0x00,0x18]
+
+v_exp_f32 v5, s1
+// CHECK: [0x01,0x40,0x0a,0x7e]
+
+v_exp_f32 v255, s1
+// CHECK: [0x01,0x40,0xfe,0x7f]
+
+v_exp_f32 v5, s101
+// CHECK: [0x65,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, tba_lo
+// CHECK: [0x6c,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, tba_hi
+// CHECK: [0x6d,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, tma_lo
+// CHECK: [0x6e,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, tma_hi
+// CHECK: [0x6f,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, ttmp11
+// CHECK: [0x7b,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, m0
+// CHECK: [0x7c,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, exec_lo
+// CHECK: [0x7e,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, exec_hi
+// CHECK: [0x7f,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, 0
+// CHECK: [0x80,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, -1
+// CHECK: [0xc1,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, 0.5
+// CHECK: [0xf0,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, -4.0
+// CHECK: [0xf7,0x40,0x0a,0x7e]
+
+v_exp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x40,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_exp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x40,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_exp_f32 v5, v1
+// CHECK: [0x01,0x41,0x0a,0x7e]
+
+v_exp_f32 v5, v255
+// CHECK: [0xff,0x41,0x0a,0x7e]
+
+v_exp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x60,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x60,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x60,0xd1,0x65,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x60,0xd1,0x66,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x60,0xd1,0x67,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x60,0xd1,0x6a,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x60,0xd1,0x6b,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x60,0xd1,0x6c,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x60,0xd1,0x6d,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x60,0xd1,0x6e,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x60,0xd1,0x6f,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x60,0xd1,0x7b,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x60,0xd1,0x7c,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x60,0xd1,0x7e,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x60,0xd1,0x7f,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x60,0xd1,0xfd,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x60,0xd1,0x01,0x01,0x00,0x00]
+
+v_exp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x60,0xd1,0xff,0x01,0x00,0x00]
+
+v_exp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x60,0xd1,0x01,0x00,0x00,0x20]
+
+v_exp_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x60,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x60,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x60,0xd1,0x01,0x00,0x00,0x08]
+
+v_exp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x60,0xd1,0x01,0x00,0x00,0x10]
+
+v_exp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x60,0xd1,0x01,0x00,0x00,0x18]
+
+v_log_f32 v5, s1
+// CHECK: [0x01,0x42,0x0a,0x7e]
+
+v_log_f32 v255, s1
+// CHECK: [0x01,0x42,0xfe,0x7f]
+
+v_log_f32 v5, s101
+// CHECK: [0x65,0x42,0x0a,0x7e]
+
+v_log_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x42,0x0a,0x7e]
+
+v_log_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x42,0x0a,0x7e]
+
+v_log_f32 v5, vcc_lo
+// CHECK: [0x6a,0x42,0x0a,0x7e]
+
+v_log_f32 v5, vcc_hi
+// CHECK: [0x6b,0x42,0x0a,0x7e]
+
+v_log_f32 v5, tba_lo
+// CHECK: [0x6c,0x42,0x0a,0x7e]
+
+v_log_f32 v5, tba_hi
+// CHECK: [0x6d,0x42,0x0a,0x7e]
+
+v_log_f32 v5, tma_lo
+// CHECK: [0x6e,0x42,0x0a,0x7e]
+
+v_log_f32 v5, tma_hi
+// CHECK: [0x6f,0x42,0x0a,0x7e]
+
+v_log_f32 v5, ttmp11
+// CHECK: [0x7b,0x42,0x0a,0x7e]
+
+v_log_f32 v5, m0
+// CHECK: [0x7c,0x42,0x0a,0x7e]
+
+v_log_f32 v5, exec_lo
+// CHECK: [0x7e,0x42,0x0a,0x7e]
+
+v_log_f32 v5, exec_hi
+// CHECK: [0x7f,0x42,0x0a,0x7e]
+
+v_log_f32 v5, 0
+// CHECK: [0x80,0x42,0x0a,0x7e]
+
+v_log_f32 v5, -1
+// CHECK: [0xc1,0x42,0x0a,0x7e]
+
+v_log_f32 v5, 0.5
+// CHECK: [0xf0,0x42,0x0a,0x7e]
+
+v_log_f32 v5, -4.0
+// CHECK: [0xf7,0x42,0x0a,0x7e]
+
+v_log_f32 v5, 0xaf123456
+// CHECK: [0xff,0x42,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_log_f32 v5, 0x3f717273
+// CHECK: [0xff,0x42,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_log_f32 v5, v1
+// CHECK: [0x01,0x43,0x0a,0x7e]
+
+v_log_f32 v5, v255
+// CHECK: [0xff,0x43,0x0a,0x7e]
+
+v_log_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x61,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x61,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x61,0xd1,0x65,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x61,0xd1,0x66,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x61,0xd1,0x67,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x61,0xd1,0x6a,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x61,0xd1,0x6b,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x61,0xd1,0x6c,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x61,0xd1,0x6d,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x61,0xd1,0x6e,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x61,0xd1,0x6f,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x61,0xd1,0x7b,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x61,0xd1,0x7c,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x61,0xd1,0x7e,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x61,0xd1,0x7f,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x61,0xd1,0xfd,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x61,0xd1,0x01,0x01,0x00,0x00]
+
+v_log_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x61,0xd1,0xff,0x01,0x00,0x00]
+
+v_log_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x61,0xd1,0x01,0x00,0x00,0x20]
+
+v_log_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x61,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x61,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x61,0xd1,0x01,0x00,0x00,0x08]
+
+v_log_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x61,0xd1,0x01,0x00,0x00,0x10]
+
+v_log_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x61,0xd1,0x01,0x00,0x00,0x18]
+
+v_rcp_f32 v5, s1
+// CHECK: [0x01,0x44,0x0a,0x7e]
+
+v_rcp_f32 v255, s1
+// CHECK: [0x01,0x44,0xfe,0x7f]
+
+v_rcp_f32 v5, s101
+// CHECK: [0x65,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, vcc_lo
+// CHECK: [0x6a,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, vcc_hi
+// CHECK: [0x6b,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, tba_lo
+// CHECK: [0x6c,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, tba_hi
+// CHECK: [0x6d,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, tma_lo
+// CHECK: [0x6e,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, tma_hi
+// CHECK: [0x6f,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, ttmp11
+// CHECK: [0x7b,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, m0
+// CHECK: [0x7c,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, exec_lo
+// CHECK: [0x7e,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, exec_hi
+// CHECK: [0x7f,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, 0
+// CHECK: [0x80,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, -1
+// CHECK: [0xc1,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, 0.5
+// CHECK: [0xf0,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, -4.0
+// CHECK: [0xf7,0x44,0x0a,0x7e]
+
+v_rcp_f32 v5, 0xaf123456
+// CHECK: [0xff,0x44,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_f32 v5, 0x3f717273
+// CHECK: [0xff,0x44,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_f32 v5, v1
+// CHECK: [0x01,0x45,0x0a,0x7e]
+
+v_rcp_f32 v5, v255
+// CHECK: [0xff,0x45,0x0a,0x7e]
+
+v_rcp_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x62,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x62,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x62,0xd1,0x65,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x62,0xd1,0x66,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x62,0xd1,0x67,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x62,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x62,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x62,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x62,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x62,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x62,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x62,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x62,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x62,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x62,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x62,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x62,0xd1,0x01,0x01,0x00,0x00]
+
+v_rcp_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x62,0xd1,0xff,0x01,0x00,0x00]
+
+v_rcp_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x62,0xd1,0x01,0x00,0x00,0x20]
+
+v_rcp_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x62,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x62,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x62,0xd1,0x01,0x00,0x00,0x08]
+
+v_rcp_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x62,0xd1,0x01,0x00,0x00,0x10]
+
+v_rcp_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x62,0xd1,0x01,0x00,0x00,0x18]
+
+v_rcp_iflag_f32 v5, s1
+// CHECK: [0x01,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v255, s1
+// CHECK: [0x01,0x46,0xfe,0x7f]
+
+v_rcp_iflag_f32 v5, s101
+// CHECK: [0x65,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, vcc_lo
+// CHECK: [0x6a,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, vcc_hi
+// CHECK: [0x6b,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tba_lo
+// CHECK: [0x6c,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tba_hi
+// CHECK: [0x6d,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tma_lo
+// CHECK: [0x6e,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, tma_hi
+// CHECK: [0x6f,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, ttmp11
+// CHECK: [0x7b,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, m0
+// CHECK: [0x7c,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, exec_lo
+// CHECK: [0x7e,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, exec_hi
+// CHECK: [0x7f,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, 0
+// CHECK: [0x80,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, -1
+// CHECK: [0xc1,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, 0.5
+// CHECK: [0xf0,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, -4.0
+// CHECK: [0xf7,0x46,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, 0xaf123456
+// CHECK: [0xff,0x46,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_iflag_f32 v5, 0x3f717273
+// CHECK: [0xff,0x46,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_iflag_f32 v5, v1
+// CHECK: [0x01,0x47,0x0a,0x7e]
+
+v_rcp_iflag_f32 v5, v255
+// CHECK: [0xff,0x47,0x0a,0x7e]
+
+v_rcp_iflag_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x63,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x63,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x63,0xd1,0x65,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x63,0xd1,0x66,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x63,0xd1,0x67,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x63,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x63,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x63,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x63,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x63,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x63,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x63,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x63,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x63,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x63,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x63,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x63,0xd1,0x01,0x01,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x63,0xd1,0xff,0x01,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x63,0xd1,0x01,0x00,0x00,0x20]
+
+v_rcp_iflag_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x63,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x63,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_iflag_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x63,0xd1,0x01,0x00,0x00,0x08]
+
+v_rcp_iflag_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x63,0xd1,0x01,0x00,0x00,0x10]
+
+v_rcp_iflag_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x63,0xd1,0x01,0x00,0x00,0x18]
+
+v_rsq_f32 v5, s1
+// CHECK: [0x01,0x48,0x0a,0x7e]
+
+v_rsq_f32 v255, s1
+// CHECK: [0x01,0x48,0xfe,0x7f]
+
+v_rsq_f32 v5, s101
+// CHECK: [0x65,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, vcc_lo
+// CHECK: [0x6a,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, vcc_hi
+// CHECK: [0x6b,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, tba_lo
+// CHECK: [0x6c,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, tba_hi
+// CHECK: [0x6d,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, tma_lo
+// CHECK: [0x6e,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, tma_hi
+// CHECK: [0x6f,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, ttmp11
+// CHECK: [0x7b,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, m0
+// CHECK: [0x7c,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, exec_lo
+// CHECK: [0x7e,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, exec_hi
+// CHECK: [0x7f,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, 0
+// CHECK: [0x80,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, -1
+// CHECK: [0xc1,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, 0.5
+// CHECK: [0xf0,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, -4.0
+// CHECK: [0xf7,0x48,0x0a,0x7e]
+
+v_rsq_f32 v5, 0xaf123456
+// CHECK: [0xff,0x48,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_f32 v5, 0x3f717273
+// CHECK: [0xff,0x48,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_f32 v5, v1
+// CHECK: [0x01,0x49,0x0a,0x7e]
+
+v_rsq_f32 v5, v255
+// CHECK: [0xff,0x49,0x0a,0x7e]
+
+v_rsq_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x64,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x64,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x64,0xd1,0x65,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x64,0xd1,0x66,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x64,0xd1,0x67,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x64,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x64,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x64,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x64,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x64,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x64,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x64,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x64,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x64,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x64,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x64,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x64,0xd1,0x01,0x01,0x00,0x00]
+
+v_rsq_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x64,0xd1,0xff,0x01,0x00,0x00]
+
+v_rsq_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x64,0xd1,0x01,0x00,0x00,0x20]
+
+v_rsq_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x64,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x64,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x64,0xd1,0x01,0x00,0x00,0x08]
+
+v_rsq_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x64,0xd1,0x01,0x00,0x00,0x10]
+
+v_rsq_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x64,0xd1,0x01,0x00,0x00,0x18]
+
+v_rcp_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x4a,0xfc,0x7f]
+
+v_rcp_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], vcc
+// CHECK: [0x6a,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], tba
+// CHECK: [0x6c,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], tma
+// CHECK: [0x6e,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], exec
+// CHECK: [0x7e,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], 0
+// CHECK: [0x80,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], -1
+// CHECK: [0xc1,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x4a,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x4a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rcp_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x4a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rcp_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x4b,0x0a,0x7e]
+
+v_rcp_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x4b,0x0a,0x7e]
+
+v_rcp_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x65,0xd1,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x65,0xd1,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x65,0xd1,0x04,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x65,0xd1,0x64,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x65,0xd1,0x66,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x65,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x65,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x65,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x65,0xd1,0x7a,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x65,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x65,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x65,0xd1,0x01,0x01,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x65,0xd1,0xfe,0x01,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x65,0xd1,0x02,0x00,0x00,0x20]
+
+v_rcp_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x65,0xd1,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x65,0xd1,0x02,0x00,0x00,0x00]
+
+v_rcp_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x65,0xd1,0x02,0x00,0x00,0x08]
+
+v_rcp_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x65,0xd1,0x02,0x00,0x00,0x10]
+
+v_rcp_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x65,0xd1,0x02,0x00,0x00,0x18]
+
+v_rsq_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x4c,0xfc,0x7f]
+
+v_rsq_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], vcc
+// CHECK: [0x6a,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], tba
+// CHECK: [0x6c,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], tma
+// CHECK: [0x6e,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], exec
+// CHECK: [0x7e,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], 0
+// CHECK: [0x80,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], -1
+// CHECK: [0xc1,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x4c,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x4c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_rsq_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x4c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_rsq_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x4d,0x0a,0x7e]
+
+v_rsq_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x4d,0x0a,0x7e]
+
+v_rsq_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x66,0xd1,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x66,0xd1,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x66,0xd1,0x04,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x66,0xd1,0x64,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x66,0xd1,0x66,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x66,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x66,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x66,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x66,0xd1,0x7a,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x66,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x66,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x66,0xd1,0x01,0x01,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x66,0xd1,0xfe,0x01,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x66,0xd1,0x02,0x00,0x00,0x20]
+
+v_rsq_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x66,0xd1,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x66,0xd1,0x02,0x00,0x00,0x00]
+
+v_rsq_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x66,0xd1,0x02,0x00,0x00,0x08]
+
+v_rsq_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x66,0xd1,0x02,0x00,0x00,0x10]
+
+v_rsq_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x66,0xd1,0x02,0x00,0x00,0x18]
+
+v_sqrt_f32 v5, s1
+// CHECK: [0x01,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v255, s1
+// CHECK: [0x01,0x4e,0xfe,0x7f]
+
+v_sqrt_f32 v5, s101
+// CHECK: [0x65,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, vcc_lo
+// CHECK: [0x6a,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, vcc_hi
+// CHECK: [0x6b,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, tba_lo
+// CHECK: [0x6c,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, tba_hi
+// CHECK: [0x6d,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, tma_lo
+// CHECK: [0x6e,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, tma_hi
+// CHECK: [0x6f,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, ttmp11
+// CHECK: [0x7b,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, m0
+// CHECK: [0x7c,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, exec_lo
+// CHECK: [0x7e,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, exec_hi
+// CHECK: [0x7f,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, 0
+// CHECK: [0x80,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, -1
+// CHECK: [0xc1,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, 0.5
+// CHECK: [0xf0,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, -4.0
+// CHECK: [0xf7,0x4e,0x0a,0x7e]
+
+v_sqrt_f32 v5, 0xaf123456
+// CHECK: [0xff,0x4e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_sqrt_f32 v5, 0x3f717273
+// CHECK: [0xff,0x4e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_sqrt_f32 v5, v1
+// CHECK: [0x01,0x4f,0x0a,0x7e]
+
+v_sqrt_f32 v5, v255
+// CHECK: [0xff,0x4f,0x0a,0x7e]
+
+v_sqrt_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x67,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x67,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x67,0xd1,0x65,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x67,0xd1,0x66,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x67,0xd1,0x67,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x67,0xd1,0x6a,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x67,0xd1,0x6b,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x67,0xd1,0x6c,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x67,0xd1,0x6d,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x67,0xd1,0x6e,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x67,0xd1,0x6f,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x67,0xd1,0x7b,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x67,0xd1,0x7c,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x67,0xd1,0x7e,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x67,0xd1,0x7f,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x67,0xd1,0xfd,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x67,0xd1,0x01,0x01,0x00,0x00]
+
+v_sqrt_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x67,0xd1,0xff,0x01,0x00,0x00]
+
+v_sqrt_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x67,0xd1,0x01,0x00,0x00,0x20]
+
+v_sqrt_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x67,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x67,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x67,0xd1,0x01,0x00,0x00,0x08]
+
+v_sqrt_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x67,0xd1,0x01,0x00,0x00,0x10]
+
+v_sqrt_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x67,0xd1,0x01,0x00,0x00,0x18]
+
+v_sqrt_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x50,0xfc,0x7f]
+
+v_sqrt_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], vcc
+// CHECK: [0x6a,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], tba
+// CHECK: [0x6c,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], tma
+// CHECK: [0x6e,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], exec
+// CHECK: [0x7e,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], 0
+// CHECK: [0x80,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], -1
+// CHECK: [0xc1,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x50,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x50,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_sqrt_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x50,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_sqrt_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x51,0x0a,0x7e]
+
+v_sqrt_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x51,0x0a,0x7e]
+
+v_sqrt_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x68,0xd1,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x68,0xd1,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x68,0xd1,0x04,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x68,0xd1,0x64,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x68,0xd1,0x66,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x68,0xd1,0x6a,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x68,0xd1,0x6c,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x68,0xd1,0x6e,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x68,0xd1,0x7a,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x68,0xd1,0x7e,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x68,0xd1,0xfd,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x68,0xd1,0x01,0x01,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x68,0xd1,0xfe,0x01,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x68,0xd1,0x02,0x00,0x00,0x20]
+
+v_sqrt_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x68,0xd1,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x68,0xd1,0x02,0x00,0x00,0x00]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x68,0xd1,0x02,0x00,0x00,0x08]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x68,0xd1,0x02,0x00,0x00,0x10]
+
+v_sqrt_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x68,0xd1,0x02,0x00,0x00,0x18]
+
+v_sin_f32 v5, s1
+// CHECK: [0x01,0x52,0x0a,0x7e]
+
+v_sin_f32 v255, s1
+// CHECK: [0x01,0x52,0xfe,0x7f]
+
+v_sin_f32 v5, s101
+// CHECK: [0x65,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, vcc_lo
+// CHECK: [0x6a,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, vcc_hi
+// CHECK: [0x6b,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, tba_lo
+// CHECK: [0x6c,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, tba_hi
+// CHECK: [0x6d,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, tma_lo
+// CHECK: [0x6e,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, tma_hi
+// CHECK: [0x6f,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, ttmp11
+// CHECK: [0x7b,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, m0
+// CHECK: [0x7c,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, exec_lo
+// CHECK: [0x7e,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, exec_hi
+// CHECK: [0x7f,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, 0
+// CHECK: [0x80,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, -1
+// CHECK: [0xc1,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, 0.5
+// CHECK: [0xf0,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, -4.0
+// CHECK: [0xf7,0x52,0x0a,0x7e]
+
+v_sin_f32 v5, 0xaf123456
+// CHECK: [0xff,0x52,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_sin_f32 v5, 0x3f717273
+// CHECK: [0xff,0x52,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_sin_f32 v5, v1
+// CHECK: [0x01,0x53,0x0a,0x7e]
+
+v_sin_f32 v5, v255
+// CHECK: [0xff,0x53,0x0a,0x7e]
+
+v_sin_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x69,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x69,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x69,0xd1,0x65,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x69,0xd1,0x66,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x69,0xd1,0x67,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x69,0xd1,0x6a,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x69,0xd1,0x6b,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x69,0xd1,0x6c,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x69,0xd1,0x6d,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x69,0xd1,0x6e,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x69,0xd1,0x6f,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x69,0xd1,0x7b,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x69,0xd1,0x7c,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x69,0xd1,0x7e,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x69,0xd1,0x7f,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x69,0xd1,0xfd,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x69,0xd1,0x01,0x01,0x00,0x00]
+
+v_sin_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x69,0xd1,0xff,0x01,0x00,0x00]
+
+v_sin_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x69,0xd1,0x01,0x00,0x00,0x20]
+
+v_sin_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x69,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x69,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x69,0xd1,0x01,0x00,0x00,0x08]
+
+v_sin_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x69,0xd1,0x01,0x00,0x00,0x10]
+
+v_sin_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x69,0xd1,0x01,0x00,0x00,0x18]
+
+v_cos_f32 v5, s1
+// CHECK: [0x01,0x54,0x0a,0x7e]
+
+v_cos_f32 v255, s1
+// CHECK: [0x01,0x54,0xfe,0x7f]
+
+v_cos_f32 v5, s101
+// CHECK: [0x65,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, vcc_lo
+// CHECK: [0x6a,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, vcc_hi
+// CHECK: [0x6b,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, tba_lo
+// CHECK: [0x6c,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, tba_hi
+// CHECK: [0x6d,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, tma_lo
+// CHECK: [0x6e,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, tma_hi
+// CHECK: [0x6f,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, ttmp11
+// CHECK: [0x7b,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, m0
+// CHECK: [0x7c,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, exec_lo
+// CHECK: [0x7e,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, exec_hi
+// CHECK: [0x7f,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, 0
+// CHECK: [0x80,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, -1
+// CHECK: [0xc1,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, 0.5
+// CHECK: [0xf0,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, -4.0
+// CHECK: [0xf7,0x54,0x0a,0x7e]
+
+v_cos_f32 v5, 0xaf123456
+// CHECK: [0xff,0x54,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_cos_f32 v5, 0x3f717273
+// CHECK: [0xff,0x54,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_cos_f32 v5, v1
+// CHECK: [0x01,0x55,0x0a,0x7e]
+
+v_cos_f32 v5, v255
+// CHECK: [0xff,0x55,0x0a,0x7e]
+
+v_cos_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x6a,0xd1,0x65,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6a,0xd1,0x66,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6a,0xd1,0x67,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6a,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6a,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6a,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6a,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6a,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6a,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6a,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6a,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6a,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6a,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x6a,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x01,0x00,0x00]
+
+v_cos_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6a,0xd1,0xff,0x01,0x00,0x00]
+
+v_cos_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x00,0x00,0x20]
+
+v_cos_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x6a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x6a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x00,0x00,0x08]
+
+v_cos_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x00,0x00,0x10]
+
+v_cos_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x6a,0xd1,0x01,0x00,0x00,0x18]
+
+v_not_b32 v5, s1
+// CHECK: [0x01,0x56,0x0a,0x7e]
+
+v_not_b32 v255, s1
+// CHECK: [0x01,0x56,0xfe,0x7f]
+
+v_not_b32 v5, s101
+// CHECK: [0x65,0x56,0x0a,0x7e]
+
+v_not_b32 v5, flat_scratch_lo
+// CHECK: [0x66,0x56,0x0a,0x7e]
+
+v_not_b32 v5, flat_scratch_hi
+// CHECK: [0x67,0x56,0x0a,0x7e]
+
+v_not_b32 v5, vcc_lo
+// CHECK: [0x6a,0x56,0x0a,0x7e]
+
+v_not_b32 v5, vcc_hi
+// CHECK: [0x6b,0x56,0x0a,0x7e]
+
+v_not_b32 v5, tba_lo
+// CHECK: [0x6c,0x56,0x0a,0x7e]
+
+v_not_b32 v5, tba_hi
+// CHECK: [0x6d,0x56,0x0a,0x7e]
+
+v_not_b32 v5, tma_lo
+// CHECK: [0x6e,0x56,0x0a,0x7e]
+
+v_not_b32 v5, tma_hi
+// CHECK: [0x6f,0x56,0x0a,0x7e]
+
+v_not_b32 v5, ttmp11
+// CHECK: [0x7b,0x56,0x0a,0x7e]
+
+v_not_b32 v5, m0
+// CHECK: [0x7c,0x56,0x0a,0x7e]
+
+v_not_b32 v5, exec_lo
+// CHECK: [0x7e,0x56,0x0a,0x7e]
+
+v_not_b32 v5, exec_hi
+// CHECK: [0x7f,0x56,0x0a,0x7e]
+
+v_not_b32 v5, 0
+// CHECK: [0x80,0x56,0x0a,0x7e]
+
+v_not_b32 v5, -1
+// CHECK: [0xc1,0x56,0x0a,0x7e]
+
+v_not_b32 v5, 0.5
+// CHECK: [0xf0,0x56,0x0a,0x7e]
+
+v_not_b32 v5, -4.0
+// CHECK: [0xf7,0x56,0x0a,0x7e]
+
+v_not_b32 v5, 0xaf123456
+// CHECK: [0xff,0x56,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_not_b32 v5, 0x3f717273
+// CHECK: [0xff,0x56,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_not_b32 v5, v1
+// CHECK: [0x01,0x57,0x0a,0x7e]
+
+v_not_b32 v5, v255
+// CHECK: [0xff,0x57,0x0a,0x7e]
+
+v_not_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6b,0xd1,0x01,0x00,0x00,0x00]
+
+v_not_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6b,0xd1,0x01,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, s101
+// CHECK: [0x05,0x00,0x6b,0xd1,0x65,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6b,0xd1,0x66,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6b,0xd1,0x67,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6b,0xd1,0x6a,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6b,0xd1,0x6b,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6b,0xd1,0x6c,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6b,0xd1,0x6d,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6b,0xd1,0x6e,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6b,0xd1,0x6f,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6b,0xd1,0x7b,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6b,0xd1,0x7c,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6b,0xd1,0x7e,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6b,0xd1,0x7f,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x6b,0xd1,0x80,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x6b,0xd1,0xc1,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x6b,0xd1,0xf0,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x6b,0xd1,0xf7,0x00,0x00,0x00]
+
+v_not_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6b,0xd1,0x01,0x01,0x00,0x00]
+
+v_not_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6b,0xd1,0xff,0x01,0x00,0x00]
+
+v_bfrev_b32 v5, s1
+// CHECK: [0x01,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v255, s1
+// CHECK: [0x01,0x58,0xfe,0x7f]
+
+v_bfrev_b32 v5, s101
+// CHECK: [0x65,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, flat_scratch_lo
+// CHECK: [0x66,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, flat_scratch_hi
+// CHECK: [0x67,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, vcc_lo
+// CHECK: [0x6a,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, vcc_hi
+// CHECK: [0x6b,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, tba_lo
+// CHECK: [0x6c,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, tba_hi
+// CHECK: [0x6d,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, tma_lo
+// CHECK: [0x6e,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, tma_hi
+// CHECK: [0x6f,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, ttmp11
+// CHECK: [0x7b,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, m0
+// CHECK: [0x7c,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, exec_lo
+// CHECK: [0x7e,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, exec_hi
+// CHECK: [0x7f,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, 0
+// CHECK: [0x80,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, -1
+// CHECK: [0xc1,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, 0.5
+// CHECK: [0xf0,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, -4.0
+// CHECK: [0xf7,0x58,0x0a,0x7e]
+
+v_bfrev_b32 v5, 0xaf123456
+// CHECK: [0xff,0x58,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_bfrev_b32 v5, 0x3f717273
+// CHECK: [0xff,0x58,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_bfrev_b32 v5, v1
+// CHECK: [0x01,0x59,0x0a,0x7e]
+
+v_bfrev_b32 v5, v255
+// CHECK: [0xff,0x59,0x0a,0x7e]
+
+v_bfrev_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6c,0xd1,0x01,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6c,0xd1,0x01,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, s101
+// CHECK: [0x05,0x00,0x6c,0xd1,0x65,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6c,0xd1,0x66,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6c,0xd1,0x67,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6c,0xd1,0x6a,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6c,0xd1,0x6b,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6c,0xd1,0x6c,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6c,0xd1,0x6d,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6c,0xd1,0x6e,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6c,0xd1,0x6f,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6c,0xd1,0x7b,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6c,0xd1,0x7c,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6c,0xd1,0x7e,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6c,0xd1,0x7f,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x6c,0xd1,0x80,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x6c,0xd1,0xc1,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x6c,0xd1,0xf0,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x6c,0xd1,0xf7,0x00,0x00,0x00]
+
+v_bfrev_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6c,0xd1,0x01,0x01,0x00,0x00]
+
+v_bfrev_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6c,0xd1,0xff,0x01,0x00,0x00]
+
+v_ffbh_u32 v5, s1
+// CHECK: [0x01,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v255, s1
+// CHECK: [0x01,0x5a,0xfe,0x7f]
+
+v_ffbh_u32 v5, s101
+// CHECK: [0x65,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, flat_scratch_lo
+// CHECK: [0x66,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, flat_scratch_hi
+// CHECK: [0x67,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, vcc_lo
+// CHECK: [0x6a,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, vcc_hi
+// CHECK: [0x6b,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, tba_lo
+// CHECK: [0x6c,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, tba_hi
+// CHECK: [0x6d,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, tma_lo
+// CHECK: [0x6e,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, tma_hi
+// CHECK: [0x6f,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, ttmp11
+// CHECK: [0x7b,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, m0
+// CHECK: [0x7c,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, exec_lo
+// CHECK: [0x7e,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, exec_hi
+// CHECK: [0x7f,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, 0
+// CHECK: [0x80,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, -1
+// CHECK: [0xc1,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, 0.5
+// CHECK: [0xf0,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, -4.0
+// CHECK: [0xf7,0x5a,0x0a,0x7e]
+
+v_ffbh_u32 v5, 0xaf123456
+// CHECK: [0xff,0x5a,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ffbh_u32 v5, 0x3f717273
+// CHECK: [0xff,0x5a,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ffbh_u32 v5, v1
+// CHECK: [0x01,0x5b,0x0a,0x7e]
+
+v_ffbh_u32 v5, v255
+// CHECK: [0xff,0x5b,0x0a,0x7e]
+
+v_ffbh_u32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6d,0xd1,0x01,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6d,0xd1,0x01,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, s101
+// CHECK: [0x05,0x00,0x6d,0xd1,0x65,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6d,0xd1,0x66,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6d,0xd1,0x67,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6d,0xd1,0x6a,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6d,0xd1,0x6b,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6d,0xd1,0x6c,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6d,0xd1,0x6d,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6d,0xd1,0x6e,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6d,0xd1,0x6f,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6d,0xd1,0x7b,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6d,0xd1,0x7c,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6d,0xd1,0x7e,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6d,0xd1,0x7f,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, 0
+// CHECK: [0x05,0x00,0x6d,0xd1,0x80,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, -1
+// CHECK: [0x05,0x00,0x6d,0xd1,0xc1,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x6d,0xd1,0xf0,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x6d,0xd1,0xf7,0x00,0x00,0x00]
+
+v_ffbh_u32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6d,0xd1,0x01,0x01,0x00,0x00]
+
+v_ffbh_u32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6d,0xd1,0xff,0x01,0x00,0x00]
+
+v_ffbl_b32 v5, s1
+// CHECK: [0x01,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v255, s1
+// CHECK: [0x01,0x5c,0xfe,0x7f]
+
+v_ffbl_b32 v5, s101
+// CHECK: [0x65,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, flat_scratch_lo
+// CHECK: [0x66,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, flat_scratch_hi
+// CHECK: [0x67,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, vcc_lo
+// CHECK: [0x6a,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, vcc_hi
+// CHECK: [0x6b,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, tba_lo
+// CHECK: [0x6c,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, tba_hi
+// CHECK: [0x6d,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, tma_lo
+// CHECK: [0x6e,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, tma_hi
+// CHECK: [0x6f,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, ttmp11
+// CHECK: [0x7b,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, m0
+// CHECK: [0x7c,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, exec_lo
+// CHECK: [0x7e,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, exec_hi
+// CHECK: [0x7f,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, 0
+// CHECK: [0x80,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, -1
+// CHECK: [0xc1,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, 0.5
+// CHECK: [0xf0,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, -4.0
+// CHECK: [0xf7,0x5c,0x0a,0x7e]
+
+v_ffbl_b32 v5, 0xaf123456
+// CHECK: [0xff,0x5c,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ffbl_b32 v5, 0x3f717273
+// CHECK: [0xff,0x5c,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ffbl_b32 v5, v1
+// CHECK: [0x01,0x5d,0x0a,0x7e]
+
+v_ffbl_b32 v5, v255
+// CHECK: [0xff,0x5d,0x0a,0x7e]
+
+v_ffbl_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6e,0xd1,0x01,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6e,0xd1,0x01,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, s101
+// CHECK: [0x05,0x00,0x6e,0xd1,0x65,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6e,0xd1,0x66,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6e,0xd1,0x67,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6e,0xd1,0x6a,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6e,0xd1,0x6b,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6e,0xd1,0x6c,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6e,0xd1,0x6d,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6e,0xd1,0x6e,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6e,0xd1,0x6f,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6e,0xd1,0x7b,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6e,0xd1,0x7c,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6e,0xd1,0x7e,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6e,0xd1,0x7f,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x6e,0xd1,0x80,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x6e,0xd1,0xc1,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x6e,0xd1,0xf0,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x6e,0xd1,0xf7,0x00,0x00,0x00]
+
+v_ffbl_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6e,0xd1,0x01,0x01,0x00,0x00]
+
+v_ffbl_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6e,0xd1,0xff,0x01,0x00,0x00]
+
+v_ffbh_i32 v5, s1
+// CHECK: [0x01,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v255, s1
+// CHECK: [0x01,0x5e,0xfe,0x7f]
+
+v_ffbh_i32 v5, s101
+// CHECK: [0x65,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, flat_scratch_lo
+// CHECK: [0x66,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, flat_scratch_hi
+// CHECK: [0x67,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, vcc_lo
+// CHECK: [0x6a,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, vcc_hi
+// CHECK: [0x6b,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, tba_lo
+// CHECK: [0x6c,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, tba_hi
+// CHECK: [0x6d,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, tma_lo
+// CHECK: [0x6e,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, tma_hi
+// CHECK: [0x6f,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, ttmp11
+// CHECK: [0x7b,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, m0
+// CHECK: [0x7c,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, exec_lo
+// CHECK: [0x7e,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, exec_hi
+// CHECK: [0x7f,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, 0
+// CHECK: [0x80,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, -1
+// CHECK: [0xc1,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, 0.5
+// CHECK: [0xf0,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, -4.0
+// CHECK: [0xf7,0x5e,0x0a,0x7e]
+
+v_ffbh_i32 v5, 0xaf123456
+// CHECK: [0xff,0x5e,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_ffbh_i32 v5, 0x3f717273
+// CHECK: [0xff,0x5e,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_ffbh_i32 v5, v1
+// CHECK: [0x01,0x5f,0x0a,0x7e]
+
+v_ffbh_i32 v5, v255
+// CHECK: [0xff,0x5f,0x0a,0x7e]
+
+v_ffbh_i32_e64 v5, s1
+// CHECK: [0x05,0x00,0x6f,0xd1,0x01,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v255, s1
+// CHECK: [0xff,0x00,0x6f,0xd1,0x01,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, s101
+// CHECK: [0x05,0x00,0x6f,0xd1,0x65,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x6f,0xd1,0x66,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x6f,0xd1,0x67,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x6f,0xd1,0x6a,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x6f,0xd1,0x6b,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x6f,0xd1,0x6c,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x6f,0xd1,0x6d,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x6f,0xd1,0x6e,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x6f,0xd1,0x6f,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x6f,0xd1,0x7b,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, m0
+// CHECK: [0x05,0x00,0x6f,0xd1,0x7c,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x6f,0xd1,0x7e,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x6f,0xd1,0x7f,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, 0
+// CHECK: [0x05,0x00,0x6f,0xd1,0x80,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, -1
+// CHECK: [0x05,0x00,0x6f,0xd1,0xc1,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x6f,0xd1,0xf0,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x6f,0xd1,0xf7,0x00,0x00,0x00]
+
+v_ffbh_i32_e64 v5, v1
+// CHECK: [0x05,0x00,0x6f,0xd1,0x01,0x01,0x00,0x00]
+
+v_ffbh_i32_e64 v5, v255
+// CHECK: [0x05,0x00,0x6f,0xd1,0xff,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f64 v5, s[2:3]
+// CHECK: [0x02,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v255, s[2:3]
+// CHECK: [0x02,0x60,0xfe,0x7f]
+
+v_frexp_exp_i32_f64 v5, s[4:5]
+// CHECK: [0x04,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, s[100:101]
+// CHECK: [0x64,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, flat_scratch
+// CHECK: [0x66,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, vcc
+// CHECK: [0x6a,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, tba
+// CHECK: [0x6c,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, tma
+// CHECK: [0x6e,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, ttmp[10:11]
+// CHECK: [0x7a,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, exec
+// CHECK: [0x7e,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, 0
+// CHECK: [0x80,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, -1
+// CHECK: [0xc1,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, 0.5
+// CHECK: [0xf0,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, -4.0
+// CHECK: [0xf7,0x60,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, 0xaf123456
+// CHECK: [0xff,0x60,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_exp_i32_f64 v5, 0x3f717273
+// CHECK: [0xff,0x60,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_exp_i32_f64 v5, v[1:2]
+// CHECK: [0x01,0x61,0x0a,0x7e]
+
+v_frexp_exp_i32_f64 v5, v[254:255]
+// CHECK: [0xfe,0x61,0x0a,0x7e]
+
+v_frexp_exp_i32_f64_e64 v5, s[2:3]
+// CHECK: [0x05,0x00,0x70,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v255, s[2:3]
+// CHECK: [0xff,0x00,0x70,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, s[4:5]
+// CHECK: [0x05,0x00,0x70,0xd1,0x04,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, s[100:101]
+// CHECK: [0x05,0x00,0x70,0xd1,0x64,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, flat_scratch
+// CHECK: [0x05,0x00,0x70,0xd1,0x66,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, vcc
+// CHECK: [0x05,0x00,0x70,0xd1,0x6a,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, tba
+// CHECK: [0x05,0x00,0x70,0xd1,0x6c,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, tma
+// CHECK: [0x05,0x00,0x70,0xd1,0x6e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, ttmp[10:11]
+// CHECK: [0x05,0x00,0x70,0xd1,0x7a,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, exec
+// CHECK: [0x05,0x00,0x70,0xd1,0x7e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, scc
+// CHECK: [0x05,0x00,0x70,0xd1,0xfd,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, v[1:2]
+// CHECK: [0x05,0x00,0x70,0xd1,0x01,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, v[254:255]
+// CHECK: [0x05,0x00,0x70,0xd1,0xfe,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, -s[2:3]
+// CHECK: [0x05,0x00,0x70,0xd1,0x02,0x00,0x00,0x20]
+
+v_frexp_exp_i32_f64_e64 v5, |s[2:3]|
+// CHECK: [0x05,0x01,0x70,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f64_e64 v5, s[2:3] clamp
+// CHECK: [0x05,0x80,0x70,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x62,0xfc,0x7f]
+
+v_frexp_mant_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], vcc
+// CHECK: [0x6a,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], tba
+// CHECK: [0x6c,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], tma
+// CHECK: [0x6e,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], exec
+// CHECK: [0x7e,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], 0
+// CHECK: [0x80,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], -1
+// CHECK: [0xc1,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x62,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x62,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_mant_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x62,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_mant_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x63,0x0a,0x7e]
+
+v_frexp_mant_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x63,0x0a,0x7e]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x71,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x71,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x71,0xd1,0x04,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x71,0xd1,0x64,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x71,0xd1,0x66,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x71,0xd1,0x6a,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x71,0xd1,0x6c,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x71,0xd1,0x6e,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x71,0xd1,0x7a,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x71,0xd1,0x7e,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x71,0xd1,0xfd,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x71,0xd1,0x01,0x01,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x71,0xd1,0xfe,0x01,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x71,0xd1,0x02,0x00,0x00,0x20]
+
+v_frexp_mant_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x71,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x71,0xd1,0x02,0x00,0x00,0x00]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x71,0xd1,0x02,0x00,0x00,0x08]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x71,0xd1,0x02,0x00,0x00,0x10]
+
+v_frexp_mant_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x71,0xd1,0x02,0x00,0x00,0x18]
+
+v_fract_f64 v[5:6], s[2:3]
+// CHECK: [0x02,0x64,0x0a,0x7e]
+
+v_fract_f64 v[254:255], s[2:3]
+// CHECK: [0x02,0x64,0xfc,0x7f]
+
+v_fract_f64 v[5:6], s[4:5]
+// CHECK: [0x04,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], s[100:101]
+// CHECK: [0x64,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], flat_scratch
+// CHECK: [0x66,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], vcc
+// CHECK: [0x6a,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], tba
+// CHECK: [0x6c,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], tma
+// CHECK: [0x6e,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], ttmp[10:11]
+// CHECK: [0x7a,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], exec
+// CHECK: [0x7e,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], 0
+// CHECK: [0x80,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], -1
+// CHECK: [0xc1,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], 0.5
+// CHECK: [0xf0,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], -4.0
+// CHECK: [0xf7,0x64,0x0a,0x7e]
+
+v_fract_f64 v[5:6], 0xaf123456
+// CHECK: [0xff,0x64,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_fract_f64 v[5:6], 0x3f717273
+// CHECK: [0xff,0x64,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_fract_f64 v[5:6], v[1:2]
+// CHECK: [0x01,0x65,0x0a,0x7e]
+
+v_fract_f64 v[5:6], v[254:255]
+// CHECK: [0xfe,0x65,0x0a,0x7e]
+
+v_fract_f64_e64 v[5:6], s[2:3]
+// CHECK: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[254:255], s[2:3]
+// CHECK: [0xfe,0x00,0x72,0xd1,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[4:5]
+// CHECK: [0x05,0x00,0x72,0xd1,0x04,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[100:101]
+// CHECK: [0x05,0x00,0x72,0xd1,0x64,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], flat_scratch
+// CHECK: [0x05,0x00,0x72,0xd1,0x66,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], vcc
+// CHECK: [0x05,0x00,0x72,0xd1,0x6a,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], tba
+// CHECK: [0x05,0x00,0x72,0xd1,0x6c,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], tma
+// CHECK: [0x05,0x00,0x72,0xd1,0x6e,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], ttmp[10:11]
+// CHECK: [0x05,0x00,0x72,0xd1,0x7a,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], exec
+// CHECK: [0x05,0x00,0x72,0xd1,0x7e,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], scc
+// CHECK: [0x05,0x00,0x72,0xd1,0xfd,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], v[1:2]
+// CHECK: [0x05,0x00,0x72,0xd1,0x01,0x01,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], v[254:255]
+// CHECK: [0x05,0x00,0x72,0xd1,0xfe,0x01,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], -s[2:3]
+// CHECK: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x20]
+
+v_fract_f64_e64 v[5:6], |s[2:3]|
+// CHECK: [0x05,0x01,0x72,0xd1,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[2:3] clamp
+// CHECK: [0x05,0x80,0x72,0xd1,0x02,0x00,0x00,0x00]
+
+v_fract_f64_e64 v[5:6], s[2:3] mul:2
+// CHECK: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x08]
+
+v_fract_f64_e64 v[5:6], s[2:3] mul:4
+// CHECK: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x10]
+
+v_fract_f64_e64 v[5:6], s[2:3] div:2
+// CHECK: [0x05,0x00,0x72,0xd1,0x02,0x00,0x00,0x18]
+
+v_frexp_exp_i32_f32 v5, s1
+// CHECK: [0x01,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v255, s1
+// CHECK: [0x01,0x66,0xfe,0x7f]
+
+v_frexp_exp_i32_f32 v5, s101
+// CHECK: [0x65,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, vcc_lo
+// CHECK: [0x6a,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, vcc_hi
+// CHECK: [0x6b,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tba_lo
+// CHECK: [0x6c,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tba_hi
+// CHECK: [0x6d,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tma_lo
+// CHECK: [0x6e,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, tma_hi
+// CHECK: [0x6f,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, ttmp11
+// CHECK: [0x7b,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, m0
+// CHECK: [0x7c,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, exec_lo
+// CHECK: [0x7e,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, exec_hi
+// CHECK: [0x7f,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, 0
+// CHECK: [0x80,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, -1
+// CHECK: [0xc1,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, 0.5
+// CHECK: [0xf0,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, -4.0
+// CHECK: [0xf7,0x66,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, 0xaf123456
+// CHECK: [0xff,0x66,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_exp_i32_f32 v5, 0x3f717273
+// CHECK: [0xff,0x66,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_exp_i32_f32 v5, v1
+// CHECK: [0x01,0x67,0x0a,0x7e]
+
+v_frexp_exp_i32_f32 v5, v255
+// CHECK: [0xff,0x67,0x0a,0x7e]
+
+v_frexp_exp_i32_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x73,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x73,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x73,0xd1,0x65,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x73,0xd1,0x66,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x73,0xd1,0x67,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x73,0xd1,0x6a,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x73,0xd1,0x6b,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x73,0xd1,0x6c,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x73,0xd1,0x6d,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x73,0xd1,0x6e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x73,0xd1,0x6f,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x73,0xd1,0x7b,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x73,0xd1,0x7c,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x73,0xd1,0x7e,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x73,0xd1,0x7f,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x73,0xd1,0xfd,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x73,0xd1,0x01,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x73,0xd1,0xff,0x01,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x73,0xd1,0x01,0x00,0x00,0x20]
+
+v_frexp_exp_i32_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x73,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i32_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x73,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32 v5, s1
+// CHECK: [0x01,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v255, s1
+// CHECK: [0x01,0x68,0xfe,0x7f]
+
+v_frexp_mant_f32 v5, s101
+// CHECK: [0x65,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, vcc_lo
+// CHECK: [0x6a,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, vcc_hi
+// CHECK: [0x6b,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tba_lo
+// CHECK: [0x6c,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tba_hi
+// CHECK: [0x6d,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tma_lo
+// CHECK: [0x6e,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, tma_hi
+// CHECK: [0x6f,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, ttmp11
+// CHECK: [0x7b,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, m0
+// CHECK: [0x7c,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, exec_lo
+// CHECK: [0x7e,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, exec_hi
+// CHECK: [0x7f,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, 0
+// CHECK: [0x80,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, -1
+// CHECK: [0xc1,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, 0.5
+// CHECK: [0xf0,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, -4.0
+// CHECK: [0xf7,0x68,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, 0xaf123456
+// CHECK: [0xff,0x68,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_frexp_mant_f32 v5, 0x3f717273
+// CHECK: [0xff,0x68,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_frexp_mant_f32 v5, v1
+// CHECK: [0x01,0x69,0x0a,0x7e]
+
+v_frexp_mant_f32 v5, v255
+// CHECK: [0xff,0x69,0x0a,0x7e]
+
+v_frexp_mant_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x74,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x74,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x74,0xd1,0x65,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x74,0xd1,0x66,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x74,0xd1,0x67,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x74,0xd1,0x6a,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x74,0xd1,0x6b,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x74,0xd1,0x6c,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x74,0xd1,0x6d,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x74,0xd1,0x6e,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x74,0xd1,0x6f,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x74,0xd1,0x7b,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x74,0xd1,0x7c,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x74,0xd1,0x7e,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x74,0xd1,0x7f,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x74,0xd1,0xfd,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x74,0xd1,0x01,0x01,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x74,0xd1,0xff,0x01,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x74,0xd1,0x01,0x00,0x00,0x20]
+
+v_frexp_mant_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x74,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x74,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x74,0xd1,0x01,0x00,0x00,0x08]
+
+v_frexp_mant_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x74,0xd1,0x01,0x00,0x00,0x10]
+
+v_frexp_mant_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x74,0xd1,0x01,0x00,0x00,0x18]
+
+v_clrexcp
+// CHECK: [0x00,0x6a,0x00,0x7e]
+
+v_clrexcp_e64
+// CHECK: [0x00,0x00,0x75,0xd1,0x00,0x00,0x00,0x00]
+
+v_movreld_b32 v5, m0
+// CHECK: [0x7c,0x6c,0x0a,0x7e]
+
+v_movreld_b32 v255, m0
+// CHECK: [0x7c,0x6c,0xfe,0x7f]
+
+v_movreld_b32 v5, 0
+// CHECK: [0x80,0x6c,0x0a,0x7e]
+
+v_movreld_b32 v5, -1
+// CHECK: [0xc1,0x6c,0x0a,0x7e]
+
+v_movreld_b32 v5, 0.5
+// CHECK: [0xf0,0x6c,0x0a,0x7e]
+
+v_movreld_b32 v5, -4.0
+// CHECK: [0xf7,0x6c,0x0a,0x7e]
+
+v_movreld_b32 v5, v1
+// CHECK: [0x01,0x6d,0x0a,0x7e]
+
+v_movreld_b32 v5, v255
+// CHECK: [0xff,0x6d,0x0a,0x7e]
+
+v_movreld_b32_e64 v5, m0
+// CHECK: [0x05,0x00,0x76,0xd1,0x7c,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v255, m0
+// CHECK: [0xff,0x00,0x76,0xd1,0x7c,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, 0
+// CHECK: [0x05,0x00,0x76,0xd1,0x80,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, -1
+// CHECK: [0x05,0x00,0x76,0xd1,0xc1,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x76,0xd1,0xf0,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x76,0xd1,0xf7,0x00,0x00,0x00]
+
+v_movreld_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x76,0xd1,0x01,0x01,0x00,0x00]
+
+v_movreld_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x76,0xd1,0xff,0x01,0x00,0x00]
+
+v_movrels_b32 v5, v1
+// CHECK: [0x01,0x6f,0x0a,0x7e]
+
+v_movrels_b32 v255, v1
+// CHECK: [0x01,0x6f,0xfe,0x7f]
+
+v_movrels_b32 v5, v255
+// CHECK: [0xff,0x6f,0x0a,0x7e]
+
+v_movrels_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x77,0xd1,0x01,0x01,0x00,0x00]
+
+v_movrels_b32_e64 v255, v1
+// CHECK: [0xff,0x00,0x77,0xd1,0x01,0x01,0x00,0x00]
+
+v_movrels_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x77,0xd1,0xff,0x01,0x00,0x00]
+
+v_movrelsd_b32 v5, v1
+// CHECK: [0x01,0x71,0x0a,0x7e]
+
+v_movrelsd_b32 v255, v1
+// CHECK: [0x01,0x71,0xfe,0x7f]
+
+v_movrelsd_b32 v5, v255
+// CHECK: [0xff,0x71,0x0a,0x7e]
+
+v_movrelsd_b32_e64 v5, v1
+// CHECK: [0x05,0x00,0x78,0xd1,0x01,0x01,0x00,0x00]
+
+v_movrelsd_b32_e64 v255, v1
+// CHECK: [0xff,0x00,0x78,0xd1,0x01,0x01,0x00,0x00]
+
+v_movrelsd_b32_e64 v5, v255
+// CHECK: [0x05,0x00,0x78,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f16_u16 v5, s1
+// CHECK: [0x01,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v255, s1
+// CHECK: [0x01,0x72,0xfe,0x7f]
+
+v_cvt_f16_u16 v5, s101
+// CHECK: [0x65,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, flat_scratch_lo
+// CHECK: [0x66,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, flat_scratch_hi
+// CHECK: [0x67,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, vcc_lo
+// CHECK: [0x6a,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, vcc_hi
+// CHECK: [0x6b,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, tba_lo
+// CHECK: [0x6c,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, tba_hi
+// CHECK: [0x6d,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, tma_lo
+// CHECK: [0x6e,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, tma_hi
+// CHECK: [0x6f,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, ttmp11
+// CHECK: [0x7b,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, m0
+// CHECK: [0x7c,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, exec_lo
+// CHECK: [0x7e,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, exec_hi
+// CHECK: [0x7f,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, 0
+// CHECK: [0x80,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, -1
+// CHECK: [0xc1,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, 0.5
+// CHECK: [0xf0,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, -4.0
+// CHECK: [0xf7,0x72,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, 0xfe0b
+// CHECK: [0xff,0x72,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_cvt_f16_u16 v5, 0x3456
+// CHECK: [0xff,0x72,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_cvt_f16_u16 v5, v1
+// CHECK: [0x01,0x73,0x0a,0x7e]
+
+v_cvt_f16_u16 v5, v255
+// CHECK: [0xff,0x73,0x0a,0x7e]
+
+v_cvt_f16_u16_e64 v5, s1
+// CHECK: [0x05,0x00,0x79,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v255, s1
+// CHECK: [0xff,0x00,0x79,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, s101
+// CHECK: [0x05,0x00,0x79,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x79,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x79,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x79,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x79,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x79,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x79,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x79,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x79,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x79,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, m0
+// CHECK: [0x05,0x00,0x79,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x79,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x79,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, 0
+// CHECK: [0x05,0x00,0x79,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, -1
+// CHECK: [0x05,0x00,0x79,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x79,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x79,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, v1
+// CHECK: [0x05,0x00,0x79,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f16_u16_e64 v5, v255
+// CHECK: [0x05,0x00,0x79,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_f16_i16 v5, s1
+// CHECK: [0x01,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v255, s1
+// CHECK: [0x01,0x74,0xfe,0x7f]
+
+v_cvt_f16_i16 v5, s101
+// CHECK: [0x65,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, flat_scratch_lo
+// CHECK: [0x66,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, flat_scratch_hi
+// CHECK: [0x67,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, vcc_lo
+// CHECK: [0x6a,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, vcc_hi
+// CHECK: [0x6b,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, tba_lo
+// CHECK: [0x6c,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, tba_hi
+// CHECK: [0x6d,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, tma_lo
+// CHECK: [0x6e,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, tma_hi
+// CHECK: [0x6f,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, ttmp11
+// CHECK: [0x7b,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, m0
+// CHECK: [0x7c,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, exec_lo
+// CHECK: [0x7e,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, exec_hi
+// CHECK: [0x7f,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, 0
+// CHECK: [0x80,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, -1
+// CHECK: [0xc1,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, 0.5
+// CHECK: [0xf0,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, -4.0
+// CHECK: [0xf7,0x74,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, 0xfe0b
+// CHECK: [0xff,0x74,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_cvt_f16_i16 v5, 0x3456
+// CHECK: [0xff,0x74,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_cvt_f16_i16 v5, v1
+// CHECK: [0x01,0x75,0x0a,0x7e]
+
+v_cvt_f16_i16 v5, v255
+// CHECK: [0xff,0x75,0x0a,0x7e]
+
+v_cvt_f16_i16_e64 v5, s1
+// CHECK: [0x05,0x00,0x7a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v255, s1
+// CHECK: [0xff,0x00,0x7a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, s101
+// CHECK: [0x05,0x00,0x7a,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7a,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7a,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7a,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7a,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7a,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7a,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7a,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7a,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7a,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, m0
+// CHECK: [0x05,0x00,0x7a,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7a,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7a,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, 0
+// CHECK: [0x05,0x00,0x7a,0xd1,0x80,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, -1
+// CHECK: [0x05,0x00,0x7a,0xd1,0xc1,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, 0.5
+// CHECK: [0x05,0x00,0x7a,0xd1,0xf0,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, -4.0
+// CHECK: [0x05,0x00,0x7a,0xd1,0xf7,0x00,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, v1
+// CHECK: [0x05,0x00,0x7a,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_f16_i16_e64 v5, v255
+// CHECK: [0x05,0x00,0x7a,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_u16_f16 v5, s1
+// CHECK: [0x01,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v255, s1
+// CHECK: [0x01,0x76,0xfe,0x7f]
+
+v_cvt_u16_f16 v5, s101
+// CHECK: [0x65,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, vcc_lo
+// CHECK: [0x6a,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, vcc_hi
+// CHECK: [0x6b,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, tba_lo
+// CHECK: [0x6c,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, tba_hi
+// CHECK: [0x6d,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, tma_lo
+// CHECK: [0x6e,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, tma_hi
+// CHECK: [0x6f,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, ttmp11
+// CHECK: [0x7b,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, m0
+// CHECK: [0x7c,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, exec_lo
+// CHECK: [0x7e,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, exec_hi
+// CHECK: [0x7f,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, 0
+// CHECK: [0x80,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, -1
+// CHECK: [0xc1,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, 0.5
+// CHECK: [0xf0,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, -4.0
+// CHECK: [0xf7,0x76,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, 0xfe0b
+// CHECK: [0xff,0x76,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_cvt_u16_f16 v5, 0x3456
+// CHECK: [0xff,0x76,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_cvt_u16_f16 v5, v1
+// CHECK: [0x01,0x77,0x0a,0x7e]
+
+v_cvt_u16_f16 v5, v255
+// CHECK: [0xff,0x77,0x0a,0x7e]
+
+v_cvt_u16_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x7b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x7b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x7b,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7b,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7b,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7b,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7b,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7b,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7b,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7b,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7b,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7b,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x7b,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7b,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7b,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x7b,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x7b,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x7b,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x7b,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_u16_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x7b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_u16_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x7b,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i16_f16 v5, s1
+// CHECK: [0x01,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v255, s1
+// CHECK: [0x01,0x78,0xfe,0x7f]
+
+v_cvt_i16_f16 v5, s101
+// CHECK: [0x65,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, vcc_lo
+// CHECK: [0x6a,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, vcc_hi
+// CHECK: [0x6b,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, tba_lo
+// CHECK: [0x6c,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, tba_hi
+// CHECK: [0x6d,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, tma_lo
+// CHECK: [0x6e,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, tma_hi
+// CHECK: [0x6f,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, ttmp11
+// CHECK: [0x7b,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, m0
+// CHECK: [0x7c,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, exec_lo
+// CHECK: [0x7e,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, exec_hi
+// CHECK: [0x7f,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, 0
+// CHECK: [0x80,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, -1
+// CHECK: [0xc1,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, 0.5
+// CHECK: [0xf0,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, -4.0
+// CHECK: [0xf7,0x78,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, 0xfe0b
+// CHECK: [0xff,0x78,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_cvt_i16_f16 v5, 0x3456
+// CHECK: [0xff,0x78,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_cvt_i16_f16 v5, v1
+// CHECK: [0x01,0x79,0x0a,0x7e]
+
+v_cvt_i16_f16 v5, v255
+// CHECK: [0xff,0x79,0x0a,0x7e]
+
+v_cvt_i16_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x7c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x7c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x7c,0xd1,0x65,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7c,0xd1,0x66,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7c,0xd1,0x67,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7c,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7c,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7c,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7c,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7c,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7c,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7c,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x7c,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7c,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7c,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x7c,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x7c,0xd1,0x01,0x01,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x7c,0xd1,0xff,0x01,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x7c,0xd1,0x01,0x00,0x00,0x20]
+
+v_cvt_i16_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x7c,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_i16_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x7c,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f16 v5, s1
+// CHECK: [0x01,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v255, s1
+// CHECK: [0x01,0x7a,0xfe,0x7f]
+
+v_rcp_f16 v5, s101
+// CHECK: [0x65,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, vcc_lo
+// CHECK: [0x6a,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, vcc_hi
+// CHECK: [0x6b,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, tba_lo
+// CHECK: [0x6c,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, tba_hi
+// CHECK: [0x6d,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, tma_lo
+// CHECK: [0x6e,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, tma_hi
+// CHECK: [0x6f,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, ttmp11
+// CHECK: [0x7b,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, m0
+// CHECK: [0x7c,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, exec_lo
+// CHECK: [0x7e,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, exec_hi
+// CHECK: [0x7f,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, 0
+// CHECK: [0x80,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, -1
+// CHECK: [0xc1,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, 0.5
+// CHECK: [0xf0,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, -4.0
+// CHECK: [0xf7,0x7a,0x0a,0x7e]
+
+v_rcp_f16 v5, 0xfe0b
+// CHECK: [0xff,0x7a,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_rcp_f16 v5, 0x3456
+// CHECK: [0xff,0x7a,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_rcp_f16 v5, v1
+// CHECK: [0x01,0x7b,0x0a,0x7e]
+
+v_rcp_f16 v5, v255
+// CHECK: [0xff,0x7b,0x0a,0x7e]
+
+v_rcp_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x7d,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x7d,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x7d,0xd1,0x65,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7d,0xd1,0x66,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7d,0xd1,0x67,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7d,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7d,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7d,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7d,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7d,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7d,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7d,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x7d,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7d,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7d,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x7d,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x7d,0xd1,0x01,0x01,0x00,0x00]
+
+v_rcp_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x7d,0xd1,0xff,0x01,0x00,0x00]
+
+v_rcp_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x7d,0xd1,0x01,0x00,0x00,0x20]
+
+v_rcp_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x7d,0xd1,0x01,0x00,0x00,0x00]
+
+v_rcp_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x7d,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f16 v5, s1
+// CHECK: [0x01,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v255, s1
+// CHECK: [0x01,0x7c,0xfe,0x7f]
+
+v_sqrt_f16 v5, s101
+// CHECK: [0x65,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, vcc_lo
+// CHECK: [0x6a,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, vcc_hi
+// CHECK: [0x6b,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, tba_lo
+// CHECK: [0x6c,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, tba_hi
+// CHECK: [0x6d,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, tma_lo
+// CHECK: [0x6e,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, tma_hi
+// CHECK: [0x6f,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, ttmp11
+// CHECK: [0x7b,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, m0
+// CHECK: [0x7c,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, exec_lo
+// CHECK: [0x7e,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, exec_hi
+// CHECK: [0x7f,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, 0
+// CHECK: [0x80,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, -1
+// CHECK: [0xc1,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, 0.5
+// CHECK: [0xf0,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, -4.0
+// CHECK: [0xf7,0x7c,0x0a,0x7e]
+
+v_sqrt_f16 v5, 0xfe0b
+// CHECK: [0xff,0x7c,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_sqrt_f16 v5, 0x3456
+// CHECK: [0xff,0x7c,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_sqrt_f16 v5, v1
+// CHECK: [0x01,0x7d,0x0a,0x7e]
+
+v_sqrt_f16 v5, v255
+// CHECK: [0xff,0x7d,0x0a,0x7e]
+
+v_sqrt_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x7e,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x7e,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x7e,0xd1,0x65,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7e,0xd1,0x66,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7e,0xd1,0x67,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7e,0xd1,0x6a,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7e,0xd1,0x6b,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7e,0xd1,0x6c,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7e,0xd1,0x6d,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7e,0xd1,0x6e,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7e,0xd1,0x6f,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7e,0xd1,0x7b,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x7e,0xd1,0x7c,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7e,0xd1,0x7e,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7e,0xd1,0x7f,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x7e,0xd1,0xfd,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x7e,0xd1,0x01,0x01,0x00,0x00]
+
+v_sqrt_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x7e,0xd1,0xff,0x01,0x00,0x00]
+
+v_sqrt_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x7e,0xd1,0x01,0x00,0x00,0x20]
+
+v_sqrt_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x7e,0xd1,0x01,0x00,0x00,0x00]
+
+v_sqrt_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x7e,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f16 v5, s1
+// CHECK: [0x01,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v255, s1
+// CHECK: [0x01,0x7e,0xfe,0x7f]
+
+v_rsq_f16 v5, s101
+// CHECK: [0x65,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, vcc_lo
+// CHECK: [0x6a,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, vcc_hi
+// CHECK: [0x6b,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, tba_lo
+// CHECK: [0x6c,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, tba_hi
+// CHECK: [0x6d,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, tma_lo
+// CHECK: [0x6e,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, tma_hi
+// CHECK: [0x6f,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, ttmp11
+// CHECK: [0x7b,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, m0
+// CHECK: [0x7c,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, exec_lo
+// CHECK: [0x7e,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, exec_hi
+// CHECK: [0x7f,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, 0
+// CHECK: [0x80,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, -1
+// CHECK: [0xc1,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, 0.5
+// CHECK: [0xf0,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, -4.0
+// CHECK: [0xf7,0x7e,0x0a,0x7e]
+
+v_rsq_f16 v5, 0xfe0b
+// CHECK: [0xff,0x7e,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_rsq_f16 v5, 0x3456
+// CHECK: [0xff,0x7e,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_rsq_f16 v5, v1
+// CHECK: [0x01,0x7f,0x0a,0x7e]
+
+v_rsq_f16 v5, v255
+// CHECK: [0xff,0x7f,0x0a,0x7e]
+
+v_rsq_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x7f,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x7f,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x7f,0xd1,0x65,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x7f,0xd1,0x66,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x7f,0xd1,0x67,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x7f,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x7f,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x7f,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x7f,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x7f,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x7f,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x7f,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x7f,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x7f,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x7f,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x7f,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x7f,0xd1,0x01,0x01,0x00,0x00]
+
+v_rsq_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x7f,0xd1,0xff,0x01,0x00,0x00]
+
+v_rsq_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x7f,0xd1,0x01,0x00,0x00,0x20]
+
+v_rsq_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x7f,0xd1,0x01,0x00,0x00,0x00]
+
+v_rsq_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x7f,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f16 v5, s1
+// CHECK: [0x01,0x80,0x0a,0x7e]
+
+v_log_f16 v255, s1
+// CHECK: [0x01,0x80,0xfe,0x7f]
+
+v_log_f16 v5, s101
+// CHECK: [0x65,0x80,0x0a,0x7e]
+
+v_log_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x80,0x0a,0x7e]
+
+v_log_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x80,0x0a,0x7e]
+
+v_log_f16 v5, vcc_lo
+// CHECK: [0x6a,0x80,0x0a,0x7e]
+
+v_log_f16 v5, vcc_hi
+// CHECK: [0x6b,0x80,0x0a,0x7e]
+
+v_log_f16 v5, tba_lo
+// CHECK: [0x6c,0x80,0x0a,0x7e]
+
+v_log_f16 v5, tba_hi
+// CHECK: [0x6d,0x80,0x0a,0x7e]
+
+v_log_f16 v5, tma_lo
+// CHECK: [0x6e,0x80,0x0a,0x7e]
+
+v_log_f16 v5, tma_hi
+// CHECK: [0x6f,0x80,0x0a,0x7e]
+
+v_log_f16 v5, ttmp11
+// CHECK: [0x7b,0x80,0x0a,0x7e]
+
+v_log_f16 v5, m0
+// CHECK: [0x7c,0x80,0x0a,0x7e]
+
+v_log_f16 v5, exec_lo
+// CHECK: [0x7e,0x80,0x0a,0x7e]
+
+v_log_f16 v5, exec_hi
+// CHECK: [0x7f,0x80,0x0a,0x7e]
+
+v_log_f16 v5, 0
+// CHECK: [0x80,0x80,0x0a,0x7e]
+
+v_log_f16 v5, -1
+// CHECK: [0xc1,0x80,0x0a,0x7e]
+
+v_log_f16 v5, 0.5
+// CHECK: [0xf0,0x80,0x0a,0x7e]
+
+v_log_f16 v5, -4.0
+// CHECK: [0xf7,0x80,0x0a,0x7e]
+
+v_log_f16 v5, 0xfe0b
+// CHECK: [0xff,0x80,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_log_f16 v5, 0x3456
+// CHECK: [0xff,0x80,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_log_f16 v5, v1
+// CHECK: [0x01,0x81,0x0a,0x7e]
+
+v_log_f16 v5, v255
+// CHECK: [0xff,0x81,0x0a,0x7e]
+
+v_log_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x80,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x80,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x80,0xd1,0x65,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x80,0xd1,0x66,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x80,0xd1,0x67,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x80,0xd1,0x6a,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x80,0xd1,0x6b,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x80,0xd1,0x6c,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x80,0xd1,0x6d,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x80,0xd1,0x6e,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x80,0xd1,0x6f,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x80,0xd1,0x7b,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x80,0xd1,0x7c,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x80,0xd1,0x7e,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x80,0xd1,0x7f,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x80,0xd1,0xfd,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x80,0xd1,0x01,0x01,0x00,0x00]
+
+v_log_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x80,0xd1,0xff,0x01,0x00,0x00]
+
+v_log_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x80,0xd1,0x01,0x00,0x00,0x20]
+
+v_log_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x80,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x80,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f16 v5, s1
+// CHECK: [0x01,0x82,0x0a,0x7e]
+
+v_exp_f16 v255, s1
+// CHECK: [0x01,0x82,0xfe,0x7f]
+
+v_exp_f16 v5, s101
+// CHECK: [0x65,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, vcc_lo
+// CHECK: [0x6a,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, vcc_hi
+// CHECK: [0x6b,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, tba_lo
+// CHECK: [0x6c,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, tba_hi
+// CHECK: [0x6d,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, tma_lo
+// CHECK: [0x6e,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, tma_hi
+// CHECK: [0x6f,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, ttmp11
+// CHECK: [0x7b,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, m0
+// CHECK: [0x7c,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, exec_lo
+// CHECK: [0x7e,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, exec_hi
+// CHECK: [0x7f,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, 0
+// CHECK: [0x80,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, -1
+// CHECK: [0xc1,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, 0.5
+// CHECK: [0xf0,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, -4.0
+// CHECK: [0xf7,0x82,0x0a,0x7e]
+
+v_exp_f16 v5, 0xfe0b
+// CHECK: [0xff,0x82,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_exp_f16 v5, 0x3456
+// CHECK: [0xff,0x82,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_exp_f16 v5, v1
+// CHECK: [0x01,0x83,0x0a,0x7e]
+
+v_exp_f16 v5, v255
+// CHECK: [0xff,0x83,0x0a,0x7e]
+
+v_exp_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x81,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x81,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x81,0xd1,0x65,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x81,0xd1,0x66,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x81,0xd1,0x67,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x81,0xd1,0x6a,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x81,0xd1,0x6b,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x81,0xd1,0x6c,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x81,0xd1,0x6d,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x81,0xd1,0x6e,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x81,0xd1,0x6f,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x81,0xd1,0x7b,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x81,0xd1,0x7c,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x81,0xd1,0x7e,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x81,0xd1,0x7f,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x81,0xd1,0xfd,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x81,0xd1,0x01,0x01,0x00,0x00]
+
+v_exp_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x81,0xd1,0xff,0x01,0x00,0x00]
+
+v_exp_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x81,0xd1,0x01,0x00,0x00,0x20]
+
+v_exp_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x81,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x81,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f16 v5, s1
+// CHECK: [0x01,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v255, s1
+// CHECK: [0x01,0x84,0xfe,0x7f]
+
+v_frexp_mant_f16 v5, s101
+// CHECK: [0x65,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, vcc_lo
+// CHECK: [0x6a,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, vcc_hi
+// CHECK: [0x6b,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, tba_lo
+// CHECK: [0x6c,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, tba_hi
+// CHECK: [0x6d,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, tma_lo
+// CHECK: [0x6e,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, tma_hi
+// CHECK: [0x6f,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, ttmp11
+// CHECK: [0x7b,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, m0
+// CHECK: [0x7c,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, exec_lo
+// CHECK: [0x7e,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, exec_hi
+// CHECK: [0x7f,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, 0
+// CHECK: [0x80,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, -1
+// CHECK: [0xc1,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, 0.5
+// CHECK: [0xf0,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, -4.0
+// CHECK: [0xf7,0x84,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, 0xfe0b
+// CHECK: [0xff,0x84,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_frexp_mant_f16 v5, 0x3456
+// CHECK: [0xff,0x84,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_frexp_mant_f16 v5, v1
+// CHECK: [0x01,0x85,0x0a,0x7e]
+
+v_frexp_mant_f16 v5, v255
+// CHECK: [0xff,0x85,0x0a,0x7e]
+
+v_frexp_mant_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x82,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x82,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x82,0xd1,0x65,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x82,0xd1,0x66,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x82,0xd1,0x67,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x82,0xd1,0x6a,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x82,0xd1,0x6b,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x82,0xd1,0x6c,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x82,0xd1,0x6d,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x82,0xd1,0x6e,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x82,0xd1,0x6f,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x82,0xd1,0x7b,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x82,0xd1,0x7c,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x82,0xd1,0x7e,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x82,0xd1,0x7f,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x82,0xd1,0xfd,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x82,0xd1,0x01,0x01,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x82,0xd1,0xff,0x01,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x82,0xd1,0x01,0x00,0x00,0x20]
+
+v_frexp_mant_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x82,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_mant_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x82,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16 v5, s1
+// CHECK: [0x01,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v255, s1
+// CHECK: [0x01,0x86,0xfe,0x7f]
+
+v_frexp_exp_i16_f16 v5, s101
+// CHECK: [0x65,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, vcc_lo
+// CHECK: [0x6a,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, vcc_hi
+// CHECK: [0x6b,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, tba_lo
+// CHECK: [0x6c,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, tba_hi
+// CHECK: [0x6d,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, tma_lo
+// CHECK: [0x6e,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, tma_hi
+// CHECK: [0x6f,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, ttmp11
+// CHECK: [0x7b,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, m0
+// CHECK: [0x7c,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, exec_lo
+// CHECK: [0x7e,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, exec_hi
+// CHECK: [0x7f,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, 0
+// CHECK: [0x80,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, -1
+// CHECK: [0xc1,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, 0.5
+// CHECK: [0xf0,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, -4.0
+// CHECK: [0xf7,0x86,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, 0xfe0b
+// CHECK: [0xff,0x86,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_frexp_exp_i16_f16 v5, 0x3456
+// CHECK: [0xff,0x86,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_frexp_exp_i16_f16 v5, v1
+// CHECK: [0x01,0x87,0x0a,0x7e]
+
+v_frexp_exp_i16_f16 v5, v255
+// CHECK: [0xff,0x87,0x0a,0x7e]
+
+v_frexp_exp_i16_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x83,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x83,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x83,0xd1,0x65,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x83,0xd1,0x66,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x83,0xd1,0x67,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x83,0xd1,0x6a,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x83,0xd1,0x6b,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x83,0xd1,0x6c,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x83,0xd1,0x6d,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x83,0xd1,0x6e,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x83,0xd1,0x6f,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x83,0xd1,0x7b,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x83,0xd1,0x7c,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x83,0xd1,0x7e,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x83,0xd1,0x7f,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x83,0xd1,0xfd,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x83,0xd1,0x01,0x01,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x83,0xd1,0xff,0x01,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x83,0xd1,0x01,0x00,0x00,0x20]
+
+v_frexp_exp_i16_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x83,0xd1,0x01,0x00,0x00,0x00]
+
+v_frexp_exp_i16_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x83,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f16 v5, s1
+// CHECK: [0x01,0x88,0x0a,0x7e]
+
+v_floor_f16 v255, s1
+// CHECK: [0x01,0x88,0xfe,0x7f]
+
+v_floor_f16 v5, s101
+// CHECK: [0x65,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, vcc_lo
+// CHECK: [0x6a,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, vcc_hi
+// CHECK: [0x6b,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, tba_lo
+// CHECK: [0x6c,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, tba_hi
+// CHECK: [0x6d,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, tma_lo
+// CHECK: [0x6e,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, tma_hi
+// CHECK: [0x6f,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, ttmp11
+// CHECK: [0x7b,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, m0
+// CHECK: [0x7c,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, exec_lo
+// CHECK: [0x7e,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, exec_hi
+// CHECK: [0x7f,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, 0
+// CHECK: [0x80,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, -1
+// CHECK: [0xc1,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, 0.5
+// CHECK: [0xf0,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, -4.0
+// CHECK: [0xf7,0x88,0x0a,0x7e]
+
+v_floor_f16 v5, 0xfe0b
+// CHECK: [0xff,0x88,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_floor_f16 v5, 0x3456
+// CHECK: [0xff,0x88,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_floor_f16 v5, v1
+// CHECK: [0x01,0x89,0x0a,0x7e]
+
+v_floor_f16 v5, v255
+// CHECK: [0xff,0x89,0x0a,0x7e]
+
+v_floor_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x84,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x84,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x84,0xd1,0x65,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x84,0xd1,0x66,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x84,0xd1,0x67,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x84,0xd1,0x6a,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x84,0xd1,0x6b,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x84,0xd1,0x6c,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x84,0xd1,0x6d,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x84,0xd1,0x6e,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x84,0xd1,0x6f,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x84,0xd1,0x7b,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x84,0xd1,0x7c,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x84,0xd1,0x7e,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x84,0xd1,0x7f,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x84,0xd1,0xfd,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x84,0xd1,0x01,0x01,0x00,0x00]
+
+v_floor_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x84,0xd1,0xff,0x01,0x00,0x00]
+
+v_floor_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x84,0xd1,0x01,0x00,0x00,0x20]
+
+v_floor_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x84,0xd1,0x01,0x00,0x00,0x00]
+
+v_floor_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x84,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f16 v5, s1
+// CHECK: [0x01,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v255, s1
+// CHECK: [0x01,0x8a,0xfe,0x7f]
+
+v_ceil_f16 v5, s101
+// CHECK: [0x65,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, vcc_lo
+// CHECK: [0x6a,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, vcc_hi
+// CHECK: [0x6b,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, tba_lo
+// CHECK: [0x6c,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, tba_hi
+// CHECK: [0x6d,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, tma_lo
+// CHECK: [0x6e,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, tma_hi
+// CHECK: [0x6f,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, ttmp11
+// CHECK: [0x7b,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, m0
+// CHECK: [0x7c,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, exec_lo
+// CHECK: [0x7e,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, exec_hi
+// CHECK: [0x7f,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, 0
+// CHECK: [0x80,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, -1
+// CHECK: [0xc1,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, 0.5
+// CHECK: [0xf0,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, -4.0
+// CHECK: [0xf7,0x8a,0x0a,0x7e]
+
+v_ceil_f16 v5, 0xfe0b
+// CHECK: [0xff,0x8a,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_ceil_f16 v5, 0x3456
+// CHECK: [0xff,0x8a,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_ceil_f16 v5, v1
+// CHECK: [0x01,0x8b,0x0a,0x7e]
+
+v_ceil_f16 v5, v255
+// CHECK: [0xff,0x8b,0x0a,0x7e]
+
+v_ceil_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x85,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x85,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x85,0xd1,0x65,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x85,0xd1,0x66,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x85,0xd1,0x67,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x85,0xd1,0x6a,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x85,0xd1,0x6b,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x85,0xd1,0x6c,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x85,0xd1,0x6d,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x85,0xd1,0x6e,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x85,0xd1,0x6f,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x85,0xd1,0x7b,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x85,0xd1,0x7c,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x85,0xd1,0x7e,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x85,0xd1,0x7f,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x85,0xd1,0xfd,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x85,0xd1,0x01,0x01,0x00,0x00]
+
+v_ceil_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x85,0xd1,0xff,0x01,0x00,0x00]
+
+v_ceil_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x85,0xd1,0x01,0x00,0x00,0x20]
+
+v_ceil_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x85,0xd1,0x01,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x85,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f16 v5, s1
+// CHECK: [0x01,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v255, s1
+// CHECK: [0x01,0x8c,0xfe,0x7f]
+
+v_trunc_f16 v5, s101
+// CHECK: [0x65,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, vcc_lo
+// CHECK: [0x6a,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, vcc_hi
+// CHECK: [0x6b,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, tba_lo
+// CHECK: [0x6c,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, tba_hi
+// CHECK: [0x6d,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, tma_lo
+// CHECK: [0x6e,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, tma_hi
+// CHECK: [0x6f,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, ttmp11
+// CHECK: [0x7b,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, m0
+// CHECK: [0x7c,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, exec_lo
+// CHECK: [0x7e,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, exec_hi
+// CHECK: [0x7f,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, 0
+// CHECK: [0x80,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, -1
+// CHECK: [0xc1,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, 0.5
+// CHECK: [0xf0,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, -4.0
+// CHECK: [0xf7,0x8c,0x0a,0x7e]
+
+v_trunc_f16 v5, 0xfe0b
+// CHECK: [0xff,0x8c,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_trunc_f16 v5, 0x3456
+// CHECK: [0xff,0x8c,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_trunc_f16 v5, v1
+// CHECK: [0x01,0x8d,0x0a,0x7e]
+
+v_trunc_f16 v5, v255
+// CHECK: [0xff,0x8d,0x0a,0x7e]
+
+v_trunc_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x86,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x86,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x86,0xd1,0x65,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x86,0xd1,0x66,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x86,0xd1,0x67,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x86,0xd1,0x6a,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x86,0xd1,0x6b,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x86,0xd1,0x6c,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x86,0xd1,0x6d,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x86,0xd1,0x6e,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x86,0xd1,0x6f,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x86,0xd1,0x7b,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x86,0xd1,0x7c,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x86,0xd1,0x7e,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x86,0xd1,0x7f,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x86,0xd1,0xfd,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x86,0xd1,0x01,0x01,0x00,0x00]
+
+v_trunc_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x86,0xd1,0xff,0x01,0x00,0x00]
+
+v_trunc_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x86,0xd1,0x01,0x00,0x00,0x20]
+
+v_trunc_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x86,0xd1,0x01,0x00,0x00,0x00]
+
+v_trunc_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x86,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f16 v5, s1
+// CHECK: [0x01,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v255, s1
+// CHECK: [0x01,0x8e,0xfe,0x7f]
+
+v_rndne_f16 v5, s101
+// CHECK: [0x65,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, vcc_lo
+// CHECK: [0x6a,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, vcc_hi
+// CHECK: [0x6b,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, tba_lo
+// CHECK: [0x6c,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, tba_hi
+// CHECK: [0x6d,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, tma_lo
+// CHECK: [0x6e,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, tma_hi
+// CHECK: [0x6f,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, ttmp11
+// CHECK: [0x7b,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, m0
+// CHECK: [0x7c,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, exec_lo
+// CHECK: [0x7e,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, exec_hi
+// CHECK: [0x7f,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, 0
+// CHECK: [0x80,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, -1
+// CHECK: [0xc1,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, 0.5
+// CHECK: [0xf0,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, -4.0
+// CHECK: [0xf7,0x8e,0x0a,0x7e]
+
+v_rndne_f16 v5, 0xfe0b
+// CHECK: [0xff,0x8e,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_rndne_f16 v5, 0x3456
+// CHECK: [0xff,0x8e,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_rndne_f16 v5, v1
+// CHECK: [0x01,0x8f,0x0a,0x7e]
+
+v_rndne_f16 v5, v255
+// CHECK: [0xff,0x8f,0x0a,0x7e]
+
+v_rndne_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x87,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x87,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x87,0xd1,0x65,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x87,0xd1,0x66,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x87,0xd1,0x67,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x87,0xd1,0x6a,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x87,0xd1,0x6b,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x87,0xd1,0x6c,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x87,0xd1,0x6d,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x87,0xd1,0x6e,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x87,0xd1,0x6f,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x87,0xd1,0x7b,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x87,0xd1,0x7c,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x87,0xd1,0x7e,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x87,0xd1,0x7f,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x87,0xd1,0xfd,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x87,0xd1,0x01,0x01,0x00,0x00]
+
+v_rndne_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x87,0xd1,0xff,0x01,0x00,0x00]
+
+v_rndne_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x87,0xd1,0x01,0x00,0x00,0x20]
+
+v_rndne_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x87,0xd1,0x01,0x00,0x00,0x00]
+
+v_rndne_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x87,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f16 v5, s1
+// CHECK: [0x01,0x90,0x0a,0x7e]
+
+v_fract_f16 v255, s1
+// CHECK: [0x01,0x90,0xfe,0x7f]
+
+v_fract_f16 v5, s101
+// CHECK: [0x65,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, vcc_lo
+// CHECK: [0x6a,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, vcc_hi
+// CHECK: [0x6b,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, tba_lo
+// CHECK: [0x6c,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, tba_hi
+// CHECK: [0x6d,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, tma_lo
+// CHECK: [0x6e,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, tma_hi
+// CHECK: [0x6f,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, ttmp11
+// CHECK: [0x7b,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, m0
+// CHECK: [0x7c,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, exec_lo
+// CHECK: [0x7e,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, exec_hi
+// CHECK: [0x7f,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, 0
+// CHECK: [0x80,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, -1
+// CHECK: [0xc1,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, 0.5
+// CHECK: [0xf0,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, -4.0
+// CHECK: [0xf7,0x90,0x0a,0x7e]
+
+v_fract_f16 v5, 0xfe0b
+// CHECK: [0xff,0x90,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_fract_f16 v5, 0x3456
+// CHECK: [0xff,0x90,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_fract_f16 v5, v1
+// CHECK: [0x01,0x91,0x0a,0x7e]
+
+v_fract_f16 v5, v255
+// CHECK: [0xff,0x91,0x0a,0x7e]
+
+v_fract_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x88,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x88,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x88,0xd1,0x65,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x88,0xd1,0x66,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x88,0xd1,0x67,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x88,0xd1,0x6a,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x88,0xd1,0x6b,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x88,0xd1,0x6c,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x88,0xd1,0x6d,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x88,0xd1,0x6e,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x88,0xd1,0x6f,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x88,0xd1,0x7b,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x88,0xd1,0x7c,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x88,0xd1,0x7e,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x88,0xd1,0x7f,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x88,0xd1,0xfd,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x88,0xd1,0x01,0x01,0x00,0x00]
+
+v_fract_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x88,0xd1,0xff,0x01,0x00,0x00]
+
+v_fract_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x88,0xd1,0x01,0x00,0x00,0x20]
+
+v_fract_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x88,0xd1,0x01,0x00,0x00,0x00]
+
+v_fract_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x88,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f16 v5, s1
+// CHECK: [0x01,0x92,0x0a,0x7e]
+
+v_sin_f16 v255, s1
+// CHECK: [0x01,0x92,0xfe,0x7f]
+
+v_sin_f16 v5, s101
+// CHECK: [0x65,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, vcc_lo
+// CHECK: [0x6a,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, vcc_hi
+// CHECK: [0x6b,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, tba_lo
+// CHECK: [0x6c,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, tba_hi
+// CHECK: [0x6d,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, tma_lo
+// CHECK: [0x6e,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, tma_hi
+// CHECK: [0x6f,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, ttmp11
+// CHECK: [0x7b,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, m0
+// CHECK: [0x7c,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, exec_lo
+// CHECK: [0x7e,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, exec_hi
+// CHECK: [0x7f,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, 0
+// CHECK: [0x80,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, -1
+// CHECK: [0xc1,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, 0.5
+// CHECK: [0xf0,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, -4.0
+// CHECK: [0xf7,0x92,0x0a,0x7e]
+
+v_sin_f16 v5, 0xfe0b
+// CHECK: [0xff,0x92,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_sin_f16 v5, 0x3456
+// CHECK: [0xff,0x92,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_sin_f16 v5, v1
+// CHECK: [0x01,0x93,0x0a,0x7e]
+
+v_sin_f16 v5, v255
+// CHECK: [0xff,0x93,0x0a,0x7e]
+
+v_sin_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x89,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x89,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x89,0xd1,0x65,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x89,0xd1,0x66,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x89,0xd1,0x67,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x89,0xd1,0x6a,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x89,0xd1,0x6b,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x89,0xd1,0x6c,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x89,0xd1,0x6d,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x89,0xd1,0x6e,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x89,0xd1,0x6f,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x89,0xd1,0x7b,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x89,0xd1,0x7c,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x89,0xd1,0x7e,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x89,0xd1,0x7f,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x89,0xd1,0xfd,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x89,0xd1,0x01,0x01,0x00,0x00]
+
+v_sin_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x89,0xd1,0xff,0x01,0x00,0x00]
+
+v_sin_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x89,0xd1,0x01,0x00,0x00,0x20]
+
+v_sin_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x89,0xd1,0x01,0x00,0x00,0x00]
+
+v_sin_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x89,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f16 v5, s1
+// CHECK: [0x01,0x94,0x0a,0x7e]
+
+v_cos_f16 v255, s1
+// CHECK: [0x01,0x94,0xfe,0x7f]
+
+v_cos_f16 v5, s101
+// CHECK: [0x65,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, flat_scratch_lo
+// CHECK: [0x66,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, flat_scratch_hi
+// CHECK: [0x67,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, vcc_lo
+// CHECK: [0x6a,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, vcc_hi
+// CHECK: [0x6b,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, tba_lo
+// CHECK: [0x6c,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, tba_hi
+// CHECK: [0x6d,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, tma_lo
+// CHECK: [0x6e,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, tma_hi
+// CHECK: [0x6f,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, ttmp11
+// CHECK: [0x7b,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, m0
+// CHECK: [0x7c,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, exec_lo
+// CHECK: [0x7e,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, exec_hi
+// CHECK: [0x7f,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, 0
+// CHECK: [0x80,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, -1
+// CHECK: [0xc1,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, 0.5
+// CHECK: [0xf0,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, -4.0
+// CHECK: [0xf7,0x94,0x0a,0x7e]
+
+v_cos_f16 v5, 0xfe0b
+// CHECK: [0xff,0x94,0x0a,0x7e,0x0b,0xfe,0x00,0x00]
+
+v_cos_f16 v5, 0x3456
+// CHECK: [0xff,0x94,0x0a,0x7e,0x56,0x34,0x00,0x00]
+
+v_cos_f16 v5, v1
+// CHECK: [0x01,0x95,0x0a,0x7e]
+
+v_cos_f16 v5, v255
+// CHECK: [0xff,0x95,0x0a,0x7e]
+
+v_cos_f16_e64 v5, s1
+// CHECK: [0x05,0x00,0x8a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f16_e64 v255, s1
+// CHECK: [0xff,0x00,0x8a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, s101
+// CHECK: [0x05,0x00,0x8a,0xd1,0x65,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8a,0xd1,0x66,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8a,0xd1,0x67,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x8a,0xd1,0x6a,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x8a,0xd1,0x6b,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x8a,0xd1,0x6c,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x8a,0xd1,0x6d,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x8a,0xd1,0x6e,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x8a,0xd1,0x6f,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x8a,0xd1,0x7b,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, m0
+// CHECK: [0x05,0x00,0x8a,0xd1,0x7c,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x8a,0xd1,0x7e,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x8a,0xd1,0x7f,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, scc
+// CHECK: [0x05,0x00,0x8a,0xd1,0xfd,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, v1
+// CHECK: [0x05,0x00,0x8a,0xd1,0x01,0x01,0x00,0x00]
+
+v_cos_f16_e64 v5, v255
+// CHECK: [0x05,0x00,0x8a,0xd1,0xff,0x01,0x00,0x00]
+
+v_cos_f16_e64 v5, -s1
+// CHECK: [0x05,0x00,0x8a,0xd1,0x01,0x00,0x00,0x20]
+
+v_cos_f16_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x8a,0xd1,0x01,0x00,0x00,0x00]
+
+v_cos_f16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x8a,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32 v5, s1
+// CHECK: [0x01,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v255, s1
+// CHECK: [0x01,0x96,0xfe,0x7f]
+
+v_exp_legacy_f32 v5, s101
+// CHECK: [0x65,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, vcc_lo
+// CHECK: [0x6a,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, vcc_hi
+// CHECK: [0x6b,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tba_lo
+// CHECK: [0x6c,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tba_hi
+// CHECK: [0x6d,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tma_lo
+// CHECK: [0x6e,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, tma_hi
+// CHECK: [0x6f,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, ttmp11
+// CHECK: [0x7b,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, m0
+// CHECK: [0x7c,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, exec_lo
+// CHECK: [0x7e,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, exec_hi
+// CHECK: [0x7f,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, 0
+// CHECK: [0x80,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, -1
+// CHECK: [0xc1,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, 0.5
+// CHECK: [0xf0,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, -4.0
+// CHECK: [0xf7,0x96,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, 0xaf123456
+// CHECK: [0xff,0x96,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_exp_legacy_f32 v5, 0x3f717273
+// CHECK: [0xff,0x96,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_exp_legacy_f32 v5, v1
+// CHECK: [0x01,0x97,0x0a,0x7e]
+
+v_exp_legacy_f32 v5, v255
+// CHECK: [0xff,0x97,0x0a,0x7e]
+
+v_exp_legacy_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x8b,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x8b,0xd1,0x65,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8b,0xd1,0x66,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8b,0xd1,0x67,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x8b,0xd1,0x6a,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x8b,0xd1,0x6b,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x8b,0xd1,0x6c,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x8b,0xd1,0x6d,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x8b,0xd1,0x6e,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x8b,0xd1,0x6f,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x8b,0xd1,0x7b,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x8b,0xd1,0x7c,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x8b,0xd1,0x7e,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x8b,0xd1,0x7f,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x8b,0xd1,0xfd,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x01,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x8b,0xd1,0xff,0x01,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x00,0x00,0x20]
+
+v_exp_legacy_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x8b,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x8b,0xd1,0x01,0x00,0x00,0x00]
+
+v_exp_legacy_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x00,0x00,0x08]
+
+v_exp_legacy_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x00,0x00,0x10]
+
+v_exp_legacy_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x8b,0xd1,0x01,0x00,0x00,0x18]
+
+v_log_legacy_f32 v5, s1
+// CHECK: [0x01,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v255, s1
+// CHECK: [0x01,0x98,0xfe,0x7f]
+
+v_log_legacy_f32 v5, s101
+// CHECK: [0x65,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, flat_scratch_lo
+// CHECK: [0x66,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, flat_scratch_hi
+// CHECK: [0x67,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, vcc_lo
+// CHECK: [0x6a,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, vcc_hi
+// CHECK: [0x6b,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tba_lo
+// CHECK: [0x6c,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tba_hi
+// CHECK: [0x6d,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tma_lo
+// CHECK: [0x6e,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, tma_hi
+// CHECK: [0x6f,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, ttmp11
+// CHECK: [0x7b,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, m0
+// CHECK: [0x7c,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, exec_lo
+// CHECK: [0x7e,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, exec_hi
+// CHECK: [0x7f,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, 0
+// CHECK: [0x80,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, -1
+// CHECK: [0xc1,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, 0.5
+// CHECK: [0xf0,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, -4.0
+// CHECK: [0xf7,0x98,0x0a,0x7e]
+
+v_log_legacy_f32 v5, 0xaf123456
+// CHECK: [0xff,0x98,0x0a,0x7e,0x56,0x34,0x12,0xaf]
+
+v_log_legacy_f32 v5, 0x3f717273
+// CHECK: [0xff,0x98,0x0a,0x7e,0x73,0x72,0x71,0x3f]
+
+v_log_legacy_f32 v5, v1
+// CHECK: [0x01,0x99,0x0a,0x7e]
+
+v_log_legacy_f32 v5, v255
+// CHECK: [0xff,0x99,0x0a,0x7e]
+
+v_log_legacy_f32_e64 v5, s1
+// CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v255, s1
+// CHECK: [0xff,0x00,0x8c,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, s101
+// CHECK: [0x05,0x00,0x8c,0xd1,0x65,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8c,0xd1,0x66,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8c,0xd1,0x67,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, vcc_lo
+// CHECK: [0x05,0x00,0x8c,0xd1,0x6a,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, vcc_hi
+// CHECK: [0x05,0x00,0x8c,0xd1,0x6b,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tba_lo
+// CHECK: [0x05,0x00,0x8c,0xd1,0x6c,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tba_hi
+// CHECK: [0x05,0x00,0x8c,0xd1,0x6d,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tma_lo
+// CHECK: [0x05,0x00,0x8c,0xd1,0x6e,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, tma_hi
+// CHECK: [0x05,0x00,0x8c,0xd1,0x6f,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, ttmp11
+// CHECK: [0x05,0x00,0x8c,0xd1,0x7b,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, m0
+// CHECK: [0x05,0x00,0x8c,0xd1,0x7c,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, exec_lo
+// CHECK: [0x05,0x00,0x8c,0xd1,0x7e,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, exec_hi
+// CHECK: [0x05,0x00,0x8c,0xd1,0x7f,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, scc
+// CHECK: [0x05,0x00,0x8c,0xd1,0xfd,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, v1
+// CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x01,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, v255
+// CHECK: [0x05,0x00,0x8c,0xd1,0xff,0x01,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, -s1
+// CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x00,0x00,0x20]
+
+v_log_legacy_f32_e64 v5, |s1|
+// CHECK: [0x05,0x01,0x8c,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x8c,0xd1,0x01,0x00,0x00,0x00]
+
+v_log_legacy_f32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x00,0x00,0x08]
+
+v_log_legacy_f32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x00,0x00,0x10]
+
+v_log_legacy_f32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x8c,0xd1,0x01,0x00,0x00,0x18]
+
+v_cndmask_b32 v5, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x00]
+
+v_cndmask_b32 v255, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x01]
+
+v_cndmask_b32 v5, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x00]
+
+v_cndmask_b32 v5, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x00]
+
+v_cndmask_b32 v5, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x00]
+
+v_cndmask_b32 v5, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x00]
+
+v_cndmask_b32 v5, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x00]
+
+v_cndmask_b32 v5, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v255, 0, 0, s[6:7]
+// CHECK: [0xff,0x00,0x00,0xd1,0x80,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, -1, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0xc1,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0.5, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0xf0,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, -4.0, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0xf7,0x00,0x19,0x00]
+
+v_cndmask_b32_e64 v5, v1, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x01,0x01,0x19,0x00]
+
+v_cndmask_b32_e64 v5, v255, 0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0xff,0x01,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, -1, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x82,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0.5, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0xe0,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, -4.0, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0xee,0x19,0x00]
+
+v_cndmask_b32_e64 v5, 0, v2, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x04,0x1a,0x00]
+
+v_cndmask_b32_e64 v5, 0, v255, s[6:7]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0xfe,0x1b,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0, s[8:9]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0x21,0x00]
+
+v_cndmask_b32_e64 v5, 0, 0, s[100:101]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0x91,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, flat_scratch
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0x99,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, vcc
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0xa9,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, tba
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0xb1,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, tma
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0xb9,0x01]
+
+v_cndmask_b32_e64 v5, 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x00,0x00,0xd1,0x80,0x00,0xe9,0x01]
+
+v_add_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x02]
+
+v_add_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x03]
+
+v_add_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x02]
+
+v_add_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x02]
+
+v_add_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x02]
+
+v_add_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x02]
+
+v_add_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x02]
+
+v_add_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x02]
+
+v_add_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x02]
+
+v_add_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x02]
+
+v_add_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x02]
+
+v_add_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x02]
+
+v_add_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x02]
+
+v_add_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x02]
+
+v_add_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x02]
+
+v_add_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x02]
+
+v_add_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x02]
+
+v_add_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x02]
+
+v_add_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x02]
+
+v_add_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x02,0x56,0x34,0x12,0xaf]
+
+v_add_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x02,0x73,0x72,0x71,0x3f]
+
+v_add_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x02]
+
+v_add_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x02]
+
+v_add_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x02]
+
+v_add_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x01,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x01,0xd1,0xff,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xcb,0x00,0x00]
+
+v_add_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xcd,0x00,0x00]
+
+v_add_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xcf,0x00,0x00]
+
+v_add_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xd5,0x00,0x00]
+
+v_add_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xd7,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xd9,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xdb,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xdd,0x00,0x00]
+
+v_add_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xdf,0x00,0x00]
+
+v_add_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xf7,0x00,0x00]
+
+v_add_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xf9,0x00,0x00]
+
+v_add_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xfd,0x00,0x00]
+
+v_add_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xff,0x00,0x00]
+
+v_add_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xfb,0x01,0x00]
+
+v_add_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x02,0x00]
+
+v_add_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0xff,0x03,0x00]
+
+v_add_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x20]
+
+v_add_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x40]
+
+v_add_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x60]
+
+v_add_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x01,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x01,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x01,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x01,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x08]
+
+v_add_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x10]
+
+v_add_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x01,0xd1,0x01,0x05,0x00,0x18]
+
+v_sub_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x04]
+
+v_sub_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x05]
+
+v_sub_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x04]
+
+v_sub_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x04]
+
+v_sub_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x04]
+
+v_sub_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x04]
+
+v_sub_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x04]
+
+v_sub_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x04]
+
+v_sub_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x04]
+
+v_sub_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x04]
+
+v_sub_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x04]
+
+v_sub_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x04]
+
+v_sub_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x04]
+
+v_sub_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x04]
+
+v_sub_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x04]
+
+v_sub_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x04]
+
+v_sub_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x04]
+
+v_sub_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x04]
+
+v_sub_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x04]
+
+v_sub_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x04,0x56,0x34,0x12,0xaf]
+
+v_sub_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x04,0x73,0x72,0x71,0x3f]
+
+v_sub_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x04]
+
+v_sub_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x04]
+
+v_sub_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x04]
+
+v_sub_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x02,0xd1,0xff,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xcb,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xcd,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xcf,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xd5,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xd7,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xd9,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xdb,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xdd,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xdf,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xf7,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xf9,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xfd,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xff,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xfb,0x01,0x00]
+
+v_sub_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x02,0x00]
+
+v_sub_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0xff,0x03,0x00]
+
+v_sub_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x20]
+
+v_sub_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x40]
+
+v_sub_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x60]
+
+v_sub_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x02,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x08]
+
+v_sub_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x10]
+
+v_sub_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x02,0xd1,0x01,0x05,0x00,0x18]
+
+v_subrev_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x06]
+
+v_subrev_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x07]
+
+v_subrev_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x06]
+
+v_subrev_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x06,0x56,0x34,0x12,0xaf]
+
+v_subrev_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x06,0x73,0x72,0x71,0x3f]
+
+v_subrev_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x06]
+
+v_subrev_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x06]
+
+v_subrev_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x06]
+
+v_subrev_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x03,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x03,0xd1,0xff,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xcb,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xcd,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xcf,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xd5,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xd7,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xd9,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xdb,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xdd,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xdf,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xf7,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xf9,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xfd,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xff,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xfb,0x01,0x00]
+
+v_subrev_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x02,0x00]
+
+v_subrev_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0xff,0x03,0x00]
+
+v_subrev_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x20]
+
+v_subrev_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x40]
+
+v_subrev_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x60]
+
+v_subrev_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x03,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x03,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x03,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x03,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x08]
+
+v_subrev_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x10]
+
+v_subrev_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x03,0xd1,0x01,0x05,0x00,0x18]
+
+v_mul_legacy_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x09]
+
+v_mul_legacy_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x08]
+
+v_mul_legacy_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x08,0x56,0x34,0x12,0xaf]
+
+v_mul_legacy_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x08,0x73,0x72,0x71,0x3f]
+
+v_mul_legacy_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x08]
+
+v_mul_legacy_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x08]
+
+v_mul_legacy_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x08]
+
+v_mul_legacy_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x04,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xcb,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xcd,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xcf,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xd5,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xd7,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xd9,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xdb,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xdd,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xdf,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xf7,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xf9,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xfd,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xff,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xfb,0x01,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x02,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0xff,0x03,0x00]
+
+v_mul_legacy_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x20]
+
+v_mul_legacy_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x40]
+
+v_mul_legacy_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x60]
+
+v_mul_legacy_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x04,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_legacy_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x08]
+
+v_mul_legacy_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x10]
+
+v_mul_legacy_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x04,0xd1,0x01,0x05,0x00,0x18]
+
+v_mul_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x0a]
+
+v_mul_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x0b]
+
+v_mul_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x0a]
+
+v_mul_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x0a,0x56,0x34,0x12,0xaf]
+
+v_mul_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x0a,0x73,0x72,0x71,0x3f]
+
+v_mul_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x0a]
+
+v_mul_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x0a]
+
+v_mul_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x0a]
+
+v_mul_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x05,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x05,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xcb,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xcd,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xcf,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xd5,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xd7,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xd9,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xdb,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xdd,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xdf,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xf7,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xf9,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xfd,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xff,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xfb,0x01,0x00]
+
+v_mul_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x02,0x00]
+
+v_mul_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0xff,0x03,0x00]
+
+v_mul_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x20]
+
+v_mul_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x40]
+
+v_mul_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x60]
+
+v_mul_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x05,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x05,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x05,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x05,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x08]
+
+v_mul_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x10]
+
+v_mul_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x05,0xd1,0x01,0x05,0x00,0x18]
+
+v_mul_i32_i24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x0d]
+
+v_mul_i32_i24 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x0c]
+
+v_mul_i32_i24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x0c,0x56,0x34,0x12,0xaf]
+
+v_mul_i32_i24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x0c,0x73,0x72,0x71,0x3f]
+
+v_mul_i32_i24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x0c]
+
+v_mul_i32_i24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x0c]
+
+v_mul_i32_i24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x0c]
+
+v_mul_i32_i24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x06,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x06,0xd1,0xc1,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x06,0xd1,0xf0,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x06,0xd1,0xf7,0x04,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x06,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x06,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xca,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xcc,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xce,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xd4,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xd6,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xd8,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xda,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xdc,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xde,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xf6,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xf8,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xfc,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xfe,0x00,0x00]
+
+v_mul_i32_i24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0x00,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0x82,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xe0,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xee,0x01,0x00]
+
+v_mul_i32_i24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0x04,0x02,0x00]
+
+v_mul_i32_i24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x06,0xd1,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_i32_i24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x0f]
+
+v_mul_hi_i32_i24 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x0e,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_i32_i24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x0e,0x73,0x72,0x71,0x3f]
+
+v_mul_hi_i32_i24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x0e]
+
+v_mul_hi_i32_i24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x0e]
+
+v_mul_hi_i32_i24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x07,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x07,0xd1,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x07,0xd1,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x07,0xd1,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x07,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x07,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xca,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xcc,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xce,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xda,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xde,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0x00,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0x82,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xee,0x01,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0x04,0x02,0x00]
+
+v_mul_hi_i32_i24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x07,0xd1,0x80,0xfe,0x03,0x00]
+
+v_mul_u32_u24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x11]
+
+v_mul_u32_u24 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x10]
+
+v_mul_u32_u24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x10,0x56,0x34,0x12,0xaf]
+
+v_mul_u32_u24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x10,0x73,0x72,0x71,0x3f]
+
+v_mul_u32_u24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x10]
+
+v_mul_u32_u24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x10]
+
+v_mul_u32_u24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x10]
+
+v_mul_u32_u24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x08,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x08,0xd1,0xc1,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x08,0xd1,0xf0,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x08,0xd1,0xf7,0x04,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x08,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x08,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xca,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xcc,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xce,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xd4,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xd6,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xd8,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xda,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xdc,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xde,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xf6,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xf8,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xfc,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xfe,0x00,0x00]
+
+v_mul_u32_u24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0x00,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0x82,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xe0,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xee,0x01,0x00]
+
+v_mul_u32_u24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0x04,0x02,0x00]
+
+v_mul_u32_u24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x08,0xd1,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_u32_u24 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x13]
+
+v_mul_hi_u32_u24 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x12,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_u32_u24 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x12,0x73,0x72,0x71,0x3f]
+
+v_mul_hi_u32_u24 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x12]
+
+v_mul_hi_u32_u24 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x12]
+
+v_mul_hi_u32_u24_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x09,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x09,0xd1,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x09,0xd1,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x09,0xd1,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x09,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x09,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xca,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xcc,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xce,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xda,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xde,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0x00,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0x82,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xee,0x01,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0x04,0x02,0x00]
+
+v_mul_hi_u32_u24_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x09,0xd1,0x80,0xfe,0x03,0x00]
+
+v_min_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x14]
+
+v_min_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x15]
+
+v_min_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x14]
+
+v_min_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x14]
+
+v_min_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x14]
+
+v_min_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x14]
+
+v_min_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x14]
+
+v_min_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x14]
+
+v_min_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x14]
+
+v_min_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x14]
+
+v_min_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x14]
+
+v_min_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x14]
+
+v_min_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x14]
+
+v_min_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x14]
+
+v_min_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x14]
+
+v_min_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x14]
+
+v_min_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x14]
+
+v_min_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x14]
+
+v_min_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x14]
+
+v_min_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x14,0x56,0x34,0x12,0xaf]
+
+v_min_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x14,0x73,0x72,0x71,0x3f]
+
+v_min_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x14]
+
+v_min_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x14]
+
+v_min_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x14]
+
+v_min_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0a,0xd1,0xff,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xcb,0x00,0x00]
+
+v_min_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xcd,0x00,0x00]
+
+v_min_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xcf,0x00,0x00]
+
+v_min_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xd5,0x00,0x00]
+
+v_min_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xd7,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xd9,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xdb,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xdd,0x00,0x00]
+
+v_min_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xdf,0x00,0x00]
+
+v_min_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xf7,0x00,0x00]
+
+v_min_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xf9,0x00,0x00]
+
+v_min_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xfd,0x00,0x00]
+
+v_min_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xff,0x00,0x00]
+
+v_min_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xfb,0x01,0x00]
+
+v_min_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x02,0x00]
+
+v_min_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0xff,0x03,0x00]
+
+v_min_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x20]
+
+v_min_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x40]
+
+v_min_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x60]
+
+v_min_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x0a,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x08]
+
+v_min_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x10]
+
+v_min_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x0a,0xd1,0x01,0x05,0x00,0x18]
+
+v_max_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x16]
+
+v_max_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x17]
+
+v_max_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x16]
+
+v_max_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x16]
+
+v_max_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x16]
+
+v_max_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x16]
+
+v_max_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x16]
+
+v_max_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x16]
+
+v_max_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x16]
+
+v_max_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x16]
+
+v_max_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x16]
+
+v_max_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x16]
+
+v_max_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x16]
+
+v_max_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x16]
+
+v_max_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x16]
+
+v_max_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x16]
+
+v_max_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x16]
+
+v_max_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x16]
+
+v_max_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x16]
+
+v_max_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x16,0x56,0x34,0x12,0xaf]
+
+v_max_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x16,0x73,0x72,0x71,0x3f]
+
+v_max_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x16]
+
+v_max_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x16]
+
+v_max_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x16]
+
+v_max_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x0b,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0b,0xd1,0xff,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xcb,0x00,0x00]
+
+v_max_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xcd,0x00,0x00]
+
+v_max_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xcf,0x00,0x00]
+
+v_max_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xd5,0x00,0x00]
+
+v_max_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xd7,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xd9,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xdb,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xdd,0x00,0x00]
+
+v_max_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xdf,0x00,0x00]
+
+v_max_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xf7,0x00,0x00]
+
+v_max_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xf9,0x00,0x00]
+
+v_max_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xfd,0x00,0x00]
+
+v_max_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xff,0x00,0x00]
+
+v_max_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xfb,0x01,0x00]
+
+v_max_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x02,0x00]
+
+v_max_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0xff,0x03,0x00]
+
+v_max_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x20]
+
+v_max_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x40]
+
+v_max_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x60]
+
+v_max_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x0b,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x0b,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x0b,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x0b,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x08]
+
+v_max_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x10]
+
+v_max_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x0b,0xd1,0x01,0x05,0x00,0x18]
+
+v_min_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x18]
+
+v_min_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x19]
+
+v_min_i32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x18]
+
+v_min_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x18]
+
+v_min_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x18]
+
+v_min_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x18]
+
+v_min_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x18]
+
+v_min_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x18]
+
+v_min_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x18]
+
+v_min_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x18]
+
+v_min_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x18]
+
+v_min_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x18]
+
+v_min_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x18]
+
+v_min_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x18]
+
+v_min_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x18]
+
+v_min_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x18]
+
+v_min_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x18]
+
+v_min_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x18]
+
+v_min_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x18]
+
+v_min_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x18,0x56,0x34,0x12,0xaf]
+
+v_min_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x18,0x73,0x72,0x71,0x3f]
+
+v_min_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x18]
+
+v_min_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x18]
+
+v_min_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x18]
+
+v_min_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x0c,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x0c,0xd1,0xc1,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x0c,0xd1,0xf0,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x0c,0xd1,0xf7,0x04,0x00,0x00]
+
+v_min_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0c,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0c,0xd1,0xff,0x05,0x00,0x00]
+
+v_min_i32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xca,0x00,0x00]
+
+v_min_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xcc,0x00,0x00]
+
+v_min_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xce,0x00,0x00]
+
+v_min_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xd4,0x00,0x00]
+
+v_min_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xd6,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xd8,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xda,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xdc,0x00,0x00]
+
+v_min_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xde,0x00,0x00]
+
+v_min_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xf6,0x00,0x00]
+
+v_min_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xf8,0x00,0x00]
+
+v_min_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xfc,0x00,0x00]
+
+v_min_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xfe,0x00,0x00]
+
+v_min_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0x00,0x01,0x00]
+
+v_min_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0x82,0x01,0x00]
+
+v_min_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xe0,0x01,0x00]
+
+v_min_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xee,0x01,0x00]
+
+v_min_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0x04,0x02,0x00]
+
+v_min_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x0c,0xd1,0x80,0xfe,0x03,0x00]
+
+v_max_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x1a]
+
+v_max_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x1b]
+
+v_max_i32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x1a]
+
+v_max_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x1a]
+
+v_max_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x1a]
+
+v_max_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x1a]
+
+v_max_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x1a]
+
+v_max_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x1a]
+
+v_max_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x1a]
+
+v_max_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x1a]
+
+v_max_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x1a]
+
+v_max_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x1a]
+
+v_max_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x1a]
+
+v_max_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x1a]
+
+v_max_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x1a]
+
+v_max_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x1a]
+
+v_max_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x1a]
+
+v_max_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x1a]
+
+v_max_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x1a]
+
+v_max_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x1a,0x56,0x34,0x12,0xaf]
+
+v_max_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x1a,0x73,0x72,0x71,0x3f]
+
+v_max_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x1a]
+
+v_max_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x1a]
+
+v_max_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x1a]
+
+v_max_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x0d,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x0d,0xd1,0xc1,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x0d,0xd1,0xf0,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x0d,0xd1,0xf7,0x04,0x00,0x00]
+
+v_max_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0d,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0d,0xd1,0xff,0x05,0x00,0x00]
+
+v_max_i32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xca,0x00,0x00]
+
+v_max_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xcc,0x00,0x00]
+
+v_max_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xce,0x00,0x00]
+
+v_max_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xd4,0x00,0x00]
+
+v_max_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xd6,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xd8,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xda,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xdc,0x00,0x00]
+
+v_max_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xde,0x00,0x00]
+
+v_max_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xf6,0x00,0x00]
+
+v_max_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xf8,0x00,0x00]
+
+v_max_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xfc,0x00,0x00]
+
+v_max_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xfe,0x00,0x00]
+
+v_max_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0x00,0x01,0x00]
+
+v_max_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0x82,0x01,0x00]
+
+v_max_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xe0,0x01,0x00]
+
+v_max_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xee,0x01,0x00]
+
+v_max_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0x04,0x02,0x00]
+
+v_max_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x0d,0xd1,0x80,0xfe,0x03,0x00]
+
+v_min_u32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x1c]
+
+v_min_u32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x1d]
+
+v_min_u32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x1c]
+
+v_min_u32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x1c]
+
+v_min_u32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x1c]
+
+v_min_u32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x1c]
+
+v_min_u32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x1c]
+
+v_min_u32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x1c]
+
+v_min_u32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x1c]
+
+v_min_u32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x1c]
+
+v_min_u32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x1c]
+
+v_min_u32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x1c]
+
+v_min_u32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x1c]
+
+v_min_u32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x1c]
+
+v_min_u32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x1c]
+
+v_min_u32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x1c]
+
+v_min_u32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x1c]
+
+v_min_u32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x1c]
+
+v_min_u32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x1c]
+
+v_min_u32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x1c,0x56,0x34,0x12,0xaf]
+
+v_min_u32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x1c,0x73,0x72,0x71,0x3f]
+
+v_min_u32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x1c]
+
+v_min_u32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x1c]
+
+v_min_u32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x1c]
+
+v_min_u32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_u32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x0e,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x0e,0xd1,0xc1,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x0e,0xd1,0xf0,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x0e,0xd1,0xf7,0x04,0x00,0x00]
+
+v_min_u32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0e,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_u32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0e,0xd1,0xff,0x05,0x00,0x00]
+
+v_min_u32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xca,0x00,0x00]
+
+v_min_u32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xcc,0x00,0x00]
+
+v_min_u32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xce,0x00,0x00]
+
+v_min_u32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xd4,0x00,0x00]
+
+v_min_u32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xd6,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xd8,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xda,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xdc,0x00,0x00]
+
+v_min_u32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xde,0x00,0x00]
+
+v_min_u32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xf6,0x00,0x00]
+
+v_min_u32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xf8,0x00,0x00]
+
+v_min_u32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xfc,0x00,0x00]
+
+v_min_u32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xfe,0x00,0x00]
+
+v_min_u32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0x00,0x01,0x00]
+
+v_min_u32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0x82,0x01,0x00]
+
+v_min_u32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xe0,0x01,0x00]
+
+v_min_u32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xee,0x01,0x00]
+
+v_min_u32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0x04,0x02,0x00]
+
+v_min_u32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x0e,0xd1,0x80,0xfe,0x03,0x00]
+
+v_max_u32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x1e]
+
+v_max_u32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x1f]
+
+v_max_u32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x1e]
+
+v_max_u32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x1e]
+
+v_max_u32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x1e]
+
+v_max_u32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x1e]
+
+v_max_u32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x1e]
+
+v_max_u32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x1e]
+
+v_max_u32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x1e]
+
+v_max_u32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x1e]
+
+v_max_u32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x1e]
+
+v_max_u32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x1e]
+
+v_max_u32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x1e]
+
+v_max_u32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x1e]
+
+v_max_u32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x1e]
+
+v_max_u32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x1e]
+
+v_max_u32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x1e]
+
+v_max_u32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x1e]
+
+v_max_u32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x1e]
+
+v_max_u32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x1e,0x56,0x34,0x12,0xaf]
+
+v_max_u32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x1e,0x73,0x72,0x71,0x3f]
+
+v_max_u32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x1e]
+
+v_max_u32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x1e]
+
+v_max_u32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x1e]
+
+v_max_u32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_u32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x0f,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x0f,0xd1,0xc1,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x0f,0xd1,0xf0,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x0f,0xd1,0xf7,0x04,0x00,0x00]
+
+v_max_u32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x0f,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_u32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x0f,0xd1,0xff,0x05,0x00,0x00]
+
+v_max_u32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xca,0x00,0x00]
+
+v_max_u32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xcc,0x00,0x00]
+
+v_max_u32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xce,0x00,0x00]
+
+v_max_u32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xd4,0x00,0x00]
+
+v_max_u32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xd6,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xd8,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xda,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xdc,0x00,0x00]
+
+v_max_u32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xde,0x00,0x00]
+
+v_max_u32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xf6,0x00,0x00]
+
+v_max_u32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xf8,0x00,0x00]
+
+v_max_u32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xfc,0x00,0x00]
+
+v_max_u32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xfe,0x00,0x00]
+
+v_max_u32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0x00,0x01,0x00]
+
+v_max_u32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0x82,0x01,0x00]
+
+v_max_u32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xe0,0x01,0x00]
+
+v_max_u32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xee,0x01,0x00]
+
+v_max_u32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0x04,0x02,0x00]
+
+v_max_u32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x0f,0xd1,0x80,0xfe,0x03,0x00]
+
+v_lshrrev_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x21]
+
+v_lshrrev_b32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x20]
+
+v_lshrrev_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x20,0x56,0x34,0x12,0xaf]
+
+v_lshrrev_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x20,0x73,0x72,0x71,0x3f]
+
+v_lshrrev_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x20]
+
+v_lshrrev_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x20]
+
+v_lshrrev_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x20]
+
+v_lshrrev_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x10,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x10,0xd1,0xc1,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x10,0xd1,0xf0,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x10,0xd1,0xf7,0x04,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x10,0xd1,0x01,0x05,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x10,0xd1,0xff,0x05,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xca,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xcc,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xce,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xd4,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xd6,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xd8,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xda,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xdc,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xde,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xf6,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xf8,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xfc,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xfe,0x00,0x00]
+
+v_lshrrev_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0x00,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0x82,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xe0,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xee,0x01,0x00]
+
+v_lshrrev_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0x04,0x02,0x00]
+
+v_lshrrev_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x10,0xd1,0x80,0xfe,0x03,0x00]
+
+v_ashrrev_i32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x23]
+
+v_ashrrev_i32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x22]
+
+v_ashrrev_i32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x22,0x56,0x34,0x12,0xaf]
+
+v_ashrrev_i32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x22,0x73,0x72,0x71,0x3f]
+
+v_ashrrev_i32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x22]
+
+v_ashrrev_i32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x22]
+
+v_ashrrev_i32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x22]
+
+v_ashrrev_i32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x11,0xd1,0x80,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x11,0xd1,0xc1,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x11,0xd1,0xf0,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x11,0xd1,0xf7,0x04,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x11,0xd1,0x01,0x05,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x11,0xd1,0xff,0x05,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xca,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xcc,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xce,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xd4,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xd6,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xd8,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xda,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xdc,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xde,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xf6,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xf8,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xfc,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xfe,0x00,0x00]
+
+v_ashrrev_i32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0x00,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0x82,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xe0,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xee,0x01,0x00]
+
+v_ashrrev_i32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0x04,0x02,0x00]
+
+v_ashrrev_i32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x11,0xd1,0x80,0xfe,0x03,0x00]
+
+v_lshlrev_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x25]
+
+v_lshlrev_b32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x24]
+
+v_lshlrev_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x24,0x56,0x34,0x12,0xaf]
+
+v_lshlrev_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x24,0x73,0x72,0x71,0x3f]
+
+v_lshlrev_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x24]
+
+v_lshlrev_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x24]
+
+v_lshlrev_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x24]
+
+v_lshlrev_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x12,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x12,0xd1,0xc1,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x12,0xd1,0xf0,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x12,0xd1,0xf7,0x04,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x12,0xd1,0x01,0x05,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x12,0xd1,0xff,0x05,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xca,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xcc,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xce,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xd4,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xd6,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xd8,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xda,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xdc,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xde,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xf6,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xf8,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xfc,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xfe,0x00,0x00]
+
+v_lshlrev_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0x00,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0x82,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xe0,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xee,0x01,0x00]
+
+v_lshlrev_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0x04,0x02,0x00]
+
+v_lshlrev_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x12,0xd1,0x80,0xfe,0x03,0x00]
+
+v_and_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x26]
+
+v_and_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x27]
+
+v_and_b32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x26]
+
+v_and_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x26]
+
+v_and_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x26]
+
+v_and_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x26]
+
+v_and_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x26]
+
+v_and_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x26]
+
+v_and_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x26]
+
+v_and_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x26]
+
+v_and_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x26]
+
+v_and_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x26]
+
+v_and_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x26]
+
+v_and_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x26]
+
+v_and_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x26]
+
+v_and_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x26]
+
+v_and_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x26]
+
+v_and_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x26]
+
+v_and_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x26]
+
+v_and_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x26,0x56,0x34,0x12,0xaf]
+
+v_and_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x26,0x73,0x72,0x71,0x3f]
+
+v_and_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x26]
+
+v_and_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x26]
+
+v_and_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x26]
+
+v_and_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0x04,0x00,0x00]
+
+v_and_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x13,0xd1,0x80,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x13,0xd1,0xc1,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x13,0xd1,0xf0,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x13,0xd1,0xf7,0x04,0x00,0x00]
+
+v_and_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x13,0xd1,0x01,0x05,0x00,0x00]
+
+v_and_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x13,0xd1,0xff,0x05,0x00,0x00]
+
+v_and_b32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xca,0x00,0x00]
+
+v_and_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xcc,0x00,0x00]
+
+v_and_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xce,0x00,0x00]
+
+v_and_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xd4,0x00,0x00]
+
+v_and_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xd6,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xd8,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xda,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xdc,0x00,0x00]
+
+v_and_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xde,0x00,0x00]
+
+v_and_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xf6,0x00,0x00]
+
+v_and_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xf8,0x00,0x00]
+
+v_and_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xfc,0x00,0x00]
+
+v_and_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xfe,0x00,0x00]
+
+v_and_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0x00,0x01,0x00]
+
+v_and_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0x82,0x01,0x00]
+
+v_and_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xe0,0x01,0x00]
+
+v_and_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xee,0x01,0x00]
+
+v_and_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0x04,0x02,0x00]
+
+v_and_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x13,0xd1,0x80,0xfe,0x03,0x00]
+
+v_or_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x28]
+
+v_or_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x29]
+
+v_or_b32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x28]
+
+v_or_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x28]
+
+v_or_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x28]
+
+v_or_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x28]
+
+v_or_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x28]
+
+v_or_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x28]
+
+v_or_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x28]
+
+v_or_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x28]
+
+v_or_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x28]
+
+v_or_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x28]
+
+v_or_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x28]
+
+v_or_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x28]
+
+v_or_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x28]
+
+v_or_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x28]
+
+v_or_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x28]
+
+v_or_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x28]
+
+v_or_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x28]
+
+v_or_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x28,0x56,0x34,0x12,0xaf]
+
+v_or_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x28,0x73,0x72,0x71,0x3f]
+
+v_or_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x28]
+
+v_or_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x28]
+
+v_or_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x28]
+
+v_or_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0x04,0x00,0x00]
+
+v_or_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x14,0xd1,0x80,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x14,0xd1,0xc1,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x14,0xd1,0xf0,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x14,0xd1,0xf7,0x04,0x00,0x00]
+
+v_or_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x14,0xd1,0x01,0x05,0x00,0x00]
+
+v_or_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x14,0xd1,0xff,0x05,0x00,0x00]
+
+v_or_b32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xca,0x00,0x00]
+
+v_or_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xcc,0x00,0x00]
+
+v_or_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xce,0x00,0x00]
+
+v_or_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xd4,0x00,0x00]
+
+v_or_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xd6,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xd8,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xda,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xdc,0x00,0x00]
+
+v_or_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xde,0x00,0x00]
+
+v_or_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xf6,0x00,0x00]
+
+v_or_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xf8,0x00,0x00]
+
+v_or_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xfc,0x00,0x00]
+
+v_or_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xfe,0x00,0x00]
+
+v_or_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0x00,0x01,0x00]
+
+v_or_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0x82,0x01,0x00]
+
+v_or_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xe0,0x01,0x00]
+
+v_or_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xee,0x01,0x00]
+
+v_or_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0x04,0x02,0x00]
+
+v_or_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x14,0xd1,0x80,0xfe,0x03,0x00]
+
+v_xor_b32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x2a]
+
+v_xor_b32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x2b]
+
+v_xor_b32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x2a]
+
+v_xor_b32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x2a,0x56,0x34,0x12,0xaf]
+
+v_xor_b32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x2a,0x73,0x72,0x71,0x3f]
+
+v_xor_b32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x2a]
+
+v_xor_b32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x2a]
+
+v_xor_b32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x2a]
+
+v_xor_b32_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0x04,0x00,0x00]
+
+v_xor_b32_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x15,0xd1,0x80,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x15,0xd1,0xc1,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x15,0xd1,0xf0,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x15,0xd1,0xf7,0x04,0x00,0x00]
+
+v_xor_b32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x15,0xd1,0x01,0x05,0x00,0x00]
+
+v_xor_b32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x15,0xd1,0xff,0x05,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xca,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xcc,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xce,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xd4,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xd6,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xd8,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xda,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xdc,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xde,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xf6,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xf8,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xfc,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xfe,0x00,0x00]
+
+v_xor_b32_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0x00,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0x82,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xe0,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xee,0x01,0x00]
+
+v_xor_b32_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0x04,0x02,0x00]
+
+v_xor_b32_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x15,0xd1,0x80,0xfe,0x03,0x00]
+
+v_mac_f32 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x2c]
+
+v_mac_f32 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x2d]
+
+v_mac_f32 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x2c]
+
+v_mac_f32 v5, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x2c,0x56,0x34,0x12,0xaf]
+
+v_mac_f32 v5, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x2c,0x73,0x72,0x71,0x3f]
+
+v_mac_f32 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x2c]
+
+v_mac_f32 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x2c]
+
+v_mac_f32 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x2c]
+
+v_mac_f32_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x16,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x16,0xd1,0xff,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xcb,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xcd,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xcf,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xd5,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xd7,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xd9,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xdb,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xdd,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xdf,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xf7,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xf9,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xfd,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xff,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xfb,0x01,0x00]
+
+v_mac_f32_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x02,0x00]
+
+v_mac_f32_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0xff,0x03,0x00]
+
+v_mac_f32_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x20]
+
+v_mac_f32_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x40]
+
+v_mac_f32_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x60]
+
+v_mac_f32_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x16,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x16,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x16,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x16,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f32_e64 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x08]
+
+v_mac_f32_e64 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x10]
+
+v_mac_f32_e64 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x16,0xd1,0x01,0x05,0x00,0x18]
+
+v_madmk_f32 v5, 0, 0x11213141, v3
+// CHECK: [0x80,0x06,0x0a,0x2e,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v255, 0, 0x11213141, v3
+// CHECK: [0x80,0x06,0xfe,0x2f,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, -1, 0x11213141, v3
+// CHECK: [0xc1,0x06,0x0a,0x2e,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, 0.5, 0x11213141, v3
+// CHECK: [0xf0,0x06,0x0a,0x2e,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, -4.0, 0x11213141, v3
+// CHECK: [0xf7,0x06,0x0a,0x2e,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, v1, 0x11213141, v3
+// CHECK: [0x01,0x07,0x0a,0x2e,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, v255, 0x11213141, v3
+// CHECK: [0xff,0x07,0x0a,0x2e,0x41,0x31,0x21,0x11]
+
+v_madmk_f32 v5, 0, 0xa1b1c1d1, v3
+// CHECK: [0x80,0x06,0x0a,0x2e,0xd1,0xc1,0xb1,0xa1]
+
+v_madmk_f32 v5, 0, 0x11213141, v255
+// CHECK: [0x80,0xfe,0x0b,0x2e,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0, v2, 0x11213141
+// CHECK: [0x80,0x04,0x0a,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v255, 0, v2, 0x11213141
+// CHECK: [0x80,0x04,0xfe,0x31,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, -1, v2, 0x11213141
+// CHECK: [0xc1,0x04,0x0a,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0.5, v2, 0x11213141
+// CHECK: [0xf0,0x04,0x0a,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, -4.0, v2, 0x11213141
+// CHECK: [0xf7,0x04,0x0a,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, v1, v2, 0x11213141
+// CHECK: [0x01,0x05,0x0a,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, v255, v2, 0x11213141
+// CHECK: [0xff,0x05,0x0a,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0, v255, 0x11213141
+// CHECK: [0x80,0xfe,0x0b,0x30,0x41,0x31,0x21,0x11]
+
+v_madak_f32 v5, 0, v2, 0xa1b1c1d1
+// CHECK: [0x80,0x04,0x0a,0x30,0xd1,0xc1,0xb1,0xa1]
+
+v_add_u32 v5, vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x32]
+
+v_add_u32 v255, vcc, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x33]
+
+v_add_u32 v5, vcc, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x32]
+
+v_add_u32 v5, vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x32,0x56,0x34,0x12,0xaf]
+
+v_add_u32 v5, vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x32,0x73,0x72,0x71,0x3f]
+
+v_add_u32 v5, vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x32]
+
+v_add_u32 v5, vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x32]
+
+v_add_u32 v5, vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x32]
+
+v_add_u32_e64 v5, s[12:13], 0, s2
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v255, s[12:13], 0, s2
+// CHECK: [0xff,0x0c,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, s[14:15], 0, s2
+// CHECK: [0x05,0x0e,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, s[100:101], 0, s2
+// CHECK: [0x05,0x64,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, flat_scratch, 0, s2
+// CHECK: [0x05,0x66,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, vcc, 0, s2
+// CHECK: [0x05,0x6a,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, tba, 0, s2
+// CHECK: [0x05,0x6c,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, tma, 0, s2
+// CHECK: [0x05,0x6e,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, ttmp[10:11], 0, s2
+// CHECK: [0x05,0x7a,0x19,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], -1, s2
+// CHECK: [0x05,0x0c,0x19,0xd1,0xc1,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0.5, s2
+// CHECK: [0x05,0x0c,0x19,0xd1,0xf0,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], -4.0, s2
+// CHECK: [0x05,0x0c,0x19,0xd1,0xf7,0x04,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], v1, s2
+// CHECK: [0x05,0x0c,0x19,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], v255, s2
+// CHECK: [0x05,0x0c,0x19,0xd1,0xff,0x05,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, s101
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xca,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, flat_scratch_lo
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xcc,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, flat_scratch_hi
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xce,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, vcc_lo
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xd4,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, vcc_hi
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xd6,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, tba_lo
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xd8,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, tba_hi
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xda,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, tma_lo
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xdc,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, tma_hi
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xde,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, ttmp11
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xf6,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, m0
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xf8,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, exec_lo
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xfc,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, exec_hi
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xfe,0x00,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, 0
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0x00,0x01,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, -1
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0x82,0x01,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, 0.5
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xe0,0x01,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, -4.0
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xee,0x01,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, v2
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0x04,0x02,0x00]
+
+v_add_u32_e64 v5, s[12:13], 0, v255
+// CHECK: [0x05,0x0c,0x19,0xd1,0x80,0xfe,0x03,0x00]
+
+v_sub_u32 v5, vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x34]
+
+v_sub_u32 v255, vcc, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x35]
+
+v_sub_u32 v5, vcc, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x34]
+
+v_sub_u32 v5, vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x34,0x56,0x34,0x12,0xaf]
+
+v_sub_u32 v5, vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x34,0x73,0x72,0x71,0x3f]
+
+v_sub_u32 v5, vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x34]
+
+v_sub_u32 v5, vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x34]
+
+v_sub_u32 v5, vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x34]
+
+v_sub_u32_e64 v5, s[12:13], 0, s2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v255, s[12:13], 0, s2
+// CHECK: [0xff,0x0c,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, s[14:15], 0, s2
+// CHECK: [0x05,0x0e,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, s[100:101], 0, s2
+// CHECK: [0x05,0x64,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, flat_scratch, 0, s2
+// CHECK: [0x05,0x66,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, vcc, 0, s2
+// CHECK: [0x05,0x6a,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, tba, 0, s2
+// CHECK: [0x05,0x6c,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, tma, 0, s2
+// CHECK: [0x05,0x6e,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, ttmp[10:11], 0, s2
+// CHECK: [0x05,0x7a,0x1a,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], -1, s2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0xc1,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0.5, s2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0xf0,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], -4.0, s2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0xf7,0x04,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], v1, s2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], v255, s2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0xff,0x05,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, s101
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xca,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, flat_scratch_lo
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xcc,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, flat_scratch_hi
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xce,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, vcc_lo
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xd4,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, vcc_hi
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xd6,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, tba_lo
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xd8,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, tba_hi
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xda,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, tma_lo
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xdc,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, tma_hi
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xde,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, ttmp11
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xf6,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, m0
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xf8,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, exec_lo
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xfc,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, exec_hi
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xfe,0x00,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, 0
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0x00,0x01,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, -1
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0x82,0x01,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, 0.5
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xe0,0x01,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, -4.0
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xee,0x01,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, v2
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0x04,0x02,0x00]
+
+v_sub_u32_e64 v5, s[12:13], 0, v255
+// CHECK: [0x05,0x0c,0x1a,0xd1,0x80,0xfe,0x03,0x00]
+
+v_subrev_u32 v5, vcc, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x36]
+
+v_subrev_u32 v255, vcc, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x37]
+
+v_subrev_u32 v5, vcc, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x0a,0x36,0x56,0x34,0x12,0xaf]
+
+v_subrev_u32 v5, vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x0a,0x36,0x73,0x72,0x71,0x3f]
+
+v_subrev_u32 v5, vcc, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x36]
+
+v_subrev_u32 v5, vcc, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x36]
+
+v_subrev_u32_e64 v5, s[12:13], 0, s2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v255, s[12:13], 0, s2
+// CHECK: [0xff,0x0c,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[14:15], 0, s2
+// CHECK: [0x05,0x0e,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[100:101], 0, s2
+// CHECK: [0x05,0x64,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, flat_scratch, 0, s2
+// CHECK: [0x05,0x66,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, vcc, 0, s2
+// CHECK: [0x05,0x6a,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, tba, 0, s2
+// CHECK: [0x05,0x6c,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, tma, 0, s2
+// CHECK: [0x05,0x6e,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, ttmp[10:11], 0, s2
+// CHECK: [0x05,0x7a,0x1b,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], -1, s2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0xc1,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0.5, s2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0xf0,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], -4.0, s2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0xf7,0x04,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], v1, s2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], v255, s2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0xff,0x05,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, s101
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xca,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, flat_scratch_lo
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xcc,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, flat_scratch_hi
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xce,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, vcc_lo
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xd4,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, vcc_hi
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xd6,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, tba_lo
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xd8,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, tba_hi
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xda,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, tma_lo
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xdc,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, tma_hi
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xde,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, ttmp11
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xf6,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, m0
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xf8,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, exec_lo
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xfc,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, exec_hi
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xfe,0x00,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, 0
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0x00,0x01,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, -1
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0x82,0x01,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, 0.5
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xe0,0x01,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, -4.0
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xee,0x01,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, v2
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0x04,0x02,0x00]
+
+v_subrev_u32_e64 v5, s[12:13], 0, v255
+// CHECK: [0x05,0x0c,0x1b,0xd1,0x80,0xfe,0x03,0x00]
+
+v_addc_u32 v5, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x38]
+
+v_addc_u32 v255, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x39]
+
+v_addc_u32 v5, vcc, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x38]
+
+v_addc_u32 v5, vcc, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x38]
+
+v_addc_u32 v5, vcc, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x38]
+
+v_addc_u32 v5, vcc, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x38]
+
+v_addc_u32 v5, vcc, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x38]
+
+v_addc_u32 v5, vcc, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x38]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v255, s[12:13], 0, 0, s[6:7]
+// CHECK: [0xff,0x0c,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[14:15], 0, 0, s[6:7]
+// CHECK: [0x05,0x0e,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[100:101], 0, 0, s[6:7]
+// CHECK: [0x05,0x64,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, flat_scratch, 0, 0, s[6:7]
+// CHECK: [0x05,0x66,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, vcc, 0, 0, s[6:7]
+// CHECK: [0x05,0x6a,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, tba, 0, 0, s[6:7]
+// CHECK: [0x05,0x6c,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, tma, 0, 0, s[6:7]
+// CHECK: [0x05,0x6e,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, ttmp[10:11], 0, 0, s[6:7]
+// CHECK: [0x05,0x7a,0x1c,0xd1,0x80,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], -1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0xc1,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0.5, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0xf0,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], -4.0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0xf7,0x00,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], v1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x01,0x01,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], v255, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0xff,0x01,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, -1, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x82,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0.5, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0xe0,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, -4.0, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0xee,0x19,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, v2, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x04,0x1a,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, v255, s[6:7]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0xfe,0x1b,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, s[8:9]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0x21,0x00]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, s[100:101]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0x91,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, flat_scratch
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0x99,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, vcc
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0xa9,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, tba
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0xb1,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, tma
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0xb9,0x01]
+
+v_addc_u32_e64 v5, s[12:13], 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x0c,0x1c,0xd1,0x80,0x00,0xe9,0x01]
+
+v_subb_u32 v5, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x3a]
+
+v_subb_u32 v255, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x3b]
+
+v_subb_u32 v5, vcc, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x3a]
+
+v_subb_u32 v5, vcc, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x3a]
+
+v_subb_u32 v5, vcc, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x3a]
+
+v_subb_u32 v5, vcc, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x3a]
+
+v_subb_u32 v5, vcc, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x3a]
+
+v_subb_u32 v5, vcc, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x3a]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v255, s[12:13], 0, 0, s[6:7]
+// CHECK: [0xff,0x0c,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[14:15], 0, 0, s[6:7]
+// CHECK: [0x05,0x0e,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[100:101], 0, 0, s[6:7]
+// CHECK: [0x05,0x64,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, flat_scratch, 0, 0, s[6:7]
+// CHECK: [0x05,0x66,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, vcc, 0, 0, s[6:7]
+// CHECK: [0x05,0x6a,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, tba, 0, 0, s[6:7]
+// CHECK: [0x05,0x6c,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, tma, 0, 0, s[6:7]
+// CHECK: [0x05,0x6e,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, ttmp[10:11], 0, 0, s[6:7]
+// CHECK: [0x05,0x7a,0x1d,0xd1,0x80,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], -1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0xc1,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0.5, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0xf0,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], -4.0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0xf7,0x00,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], v1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x01,0x01,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], v255, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0xff,0x01,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, -1, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x82,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0.5, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0xe0,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, -4.0, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0xee,0x19,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, v2, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x04,0x1a,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, v255, s[6:7]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0xfe,0x1b,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, s[8:9]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0x21,0x00]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, s[100:101]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0x91,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, flat_scratch
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0x99,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, vcc
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0xa9,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, tba
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0xb1,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, tma
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0xb9,0x01]
+
+v_subb_u32_e64 v5, s[12:13], 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x0c,0x1d,0xd1,0x80,0x00,0xe9,0x01]
+
+v_subbrev_u32 v5, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0x0a,0x3c]
+
+v_subbrev_u32 v255, vcc, 0, v2, vcc
+// CHECK: [0x80,0x04,0xfe,0x3d]
+
+v_subbrev_u32 v5, vcc, -1, v2, vcc
+// CHECK: [0xc1,0x04,0x0a,0x3c]
+
+v_subbrev_u32 v5, vcc, 0.5, v2, vcc
+// CHECK: [0xf0,0x04,0x0a,0x3c]
+
+v_subbrev_u32 v5, vcc, -4.0, v2, vcc
+// CHECK: [0xf7,0x04,0x0a,0x3c]
+
+v_subbrev_u32 v5, vcc, v1, v2, vcc
+// CHECK: [0x01,0x05,0x0a,0x3c]
+
+v_subbrev_u32 v5, vcc, v255, v2, vcc
+// CHECK: [0xff,0x05,0x0a,0x3c]
+
+v_subbrev_u32 v5, vcc, 0, v255, vcc
+// CHECK: [0x80,0xfe,0x0b,0x3c]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v255, s[12:13], 0, 0, s[6:7]
+// CHECK: [0xff,0x0c,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[14:15], 0, 0, s[6:7]
+// CHECK: [0x05,0x0e,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[100:101], 0, 0, s[6:7]
+// CHECK: [0x05,0x64,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, flat_scratch, 0, 0, s[6:7]
+// CHECK: [0x05,0x66,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, vcc, 0, 0, s[6:7]
+// CHECK: [0x05,0x6a,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, tba, 0, 0, s[6:7]
+// CHECK: [0x05,0x6c,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, tma, 0, 0, s[6:7]
+// CHECK: [0x05,0x6e,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, ttmp[10:11], 0, 0, s[6:7]
+// CHECK: [0x05,0x7a,0x1e,0xd1,0x80,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], -1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0xc1,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0.5, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0xf0,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], -4.0, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0xf7,0x00,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], v1, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x01,0x01,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], v255, 0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0xff,0x01,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, -1, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x82,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0.5, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0xe0,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, -4.0, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0xee,0x19,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, v2, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x04,0x1a,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, v255, s[6:7]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0xfe,0x1b,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, s[8:9]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0x21,0x00]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, s[100:101]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0x91,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, flat_scratch
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0x99,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, vcc
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0xa9,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, tba
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0xb1,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, tma
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0xb9,0x01]
+
+v_subbrev_u32_e64 v5, s[12:13], 0, 0, ttmp[10:11]
+// CHECK: [0x05,0x0c,0x1e,0xd1,0x80,0x00,0xe9,0x01]
+
+v_add_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x3e]
+
+v_add_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x3f]
+
+v_add_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x3e]
+
+v_add_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x3e]
+
+v_add_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x3e]
+
+v_add_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x3e]
+
+v_add_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x3e]
+
+v_add_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x3e]
+
+v_add_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x3e]
+
+v_add_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x3e]
+
+v_add_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x3e]
+
+v_add_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x3e]
+
+v_add_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x3e]
+
+v_add_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x3e]
+
+v_add_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x3e]
+
+v_add_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x3e]
+
+v_add_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x3e]
+
+v_add_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x3e]
+
+v_add_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x3e]
+
+v_add_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x3e,0x0b,0xfe,0x00,0x00]
+
+v_add_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x3e,0x56,0x34,0x00,0x00]
+
+v_add_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x3e]
+
+v_add_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x3e]
+
+v_add_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x3e]
+
+v_add_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x1f,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x1f,0xd1,0xff,0x05,0x00,0x00]
+
+v_add_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xcb,0x00,0x00]
+
+v_add_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xcd,0x00,0x00]
+
+v_add_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xcf,0x00,0x00]
+
+v_add_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xd5,0x00,0x00]
+
+v_add_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xd7,0x00,0x00]
+
+v_add_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xd9,0x00,0x00]
+
+v_add_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xdb,0x00,0x00]
+
+v_add_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xdd,0x00,0x00]
+
+v_add_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xdf,0x00,0x00]
+
+v_add_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xf7,0x00,0x00]
+
+v_add_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xf9,0x00,0x00]
+
+v_add_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xfd,0x00,0x00]
+
+v_add_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xff,0x00,0x00]
+
+v_add_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xfb,0x01,0x00]
+
+v_add_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x02,0x00]
+
+v_add_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0xff,0x03,0x00]
+
+v_add_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x20]
+
+v_add_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x40]
+
+v_add_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x1f,0xd1,0x01,0x05,0x00,0x60]
+
+v_add_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x1f,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x1f,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x1f,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x1f,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x40]
+
+v_sub_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x41]
+
+v_sub_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x40]
+
+v_sub_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x40]
+
+v_sub_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x40]
+
+v_sub_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x40]
+
+v_sub_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x40]
+
+v_sub_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x40]
+
+v_sub_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x40]
+
+v_sub_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x40]
+
+v_sub_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x40]
+
+v_sub_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x40]
+
+v_sub_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x40]
+
+v_sub_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x40]
+
+v_sub_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x40]
+
+v_sub_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x40]
+
+v_sub_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x40]
+
+v_sub_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x40]
+
+v_sub_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x40]
+
+v_sub_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x40,0x0b,0xfe,0x00,0x00]
+
+v_sub_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x40,0x56,0x34,0x00,0x00]
+
+v_sub_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x40]
+
+v_sub_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x40]
+
+v_sub_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x40]
+
+v_sub_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x20,0xd1,0xff,0x05,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xcb,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xcd,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xcf,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xd5,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xd7,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xd9,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xdb,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xdd,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xdf,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xf7,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xf9,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xfd,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xff,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xfb,0x01,0x00]
+
+v_sub_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x02,0x00]
+
+v_sub_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0xff,0x03,0x00]
+
+v_sub_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x20]
+
+v_sub_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x40]
+
+v_sub_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x20,0xd1,0x01,0x05,0x00,0x60]
+
+v_sub_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x20,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x42]
+
+v_subrev_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x43]
+
+v_subrev_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x42]
+
+v_subrev_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x42,0x0b,0xfe,0x00,0x00]
+
+v_subrev_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x42,0x56,0x34,0x00,0x00]
+
+v_subrev_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x42]
+
+v_subrev_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x42]
+
+v_subrev_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x42]
+
+v_subrev_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x21,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x21,0xd1,0xff,0x05,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xcb,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xcd,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xcf,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xd5,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xd7,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xd9,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xdb,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xdd,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xdf,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xf7,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xf9,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xfd,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xff,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xfb,0x01,0x00]
+
+v_subrev_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x02,0x00]
+
+v_subrev_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0xff,0x03,0x00]
+
+v_subrev_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x20]
+
+v_subrev_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x40]
+
+v_subrev_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x21,0xd1,0x01,0x05,0x00,0x60]
+
+v_subrev_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x21,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x21,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x21,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x21,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x44]
+
+v_mul_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x45]
+
+v_mul_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x44]
+
+v_mul_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x44]
+
+v_mul_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x44]
+
+v_mul_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x44]
+
+v_mul_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x44]
+
+v_mul_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x44]
+
+v_mul_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x44]
+
+v_mul_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x44]
+
+v_mul_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x44]
+
+v_mul_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x44]
+
+v_mul_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x44]
+
+v_mul_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x44]
+
+v_mul_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x44]
+
+v_mul_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x44]
+
+v_mul_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x44]
+
+v_mul_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x44]
+
+v_mul_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x44]
+
+v_mul_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x44,0x0b,0xfe,0x00,0x00]
+
+v_mul_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x44,0x56,0x34,0x00,0x00]
+
+v_mul_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x44]
+
+v_mul_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x44]
+
+v_mul_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x44]
+
+v_mul_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x22,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xcb,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xcd,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xcf,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xd5,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xd7,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xd9,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xdb,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xdd,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xdf,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xf7,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xf9,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xfd,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xff,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xfb,0x01,0x00]
+
+v_mul_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x02,0x00]
+
+v_mul_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0xff,0x03,0x00]
+
+v_mul_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x20]
+
+v_mul_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x40]
+
+v_mul_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x22,0xd1,0x01,0x05,0x00,0x60]
+
+v_mul_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x22,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x46]
+
+v_mac_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x47]
+
+v_mac_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x46]
+
+v_mac_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x46]
+
+v_mac_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x46]
+
+v_mac_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x46]
+
+v_mac_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x46]
+
+v_mac_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x46]
+
+v_mac_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x46]
+
+v_mac_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x46]
+
+v_mac_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x46]
+
+v_mac_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x46]
+
+v_mac_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x46]
+
+v_mac_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x46]
+
+v_mac_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x46]
+
+v_mac_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x46]
+
+v_mac_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x46]
+
+v_mac_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x46]
+
+v_mac_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x46]
+
+v_mac_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x46,0x0b,0xfe,0x00,0x00]
+
+v_mac_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x46,0x56,0x34,0x00,0x00]
+
+v_mac_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x46]
+
+v_mac_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x46]
+
+v_mac_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x46]
+
+v_mac_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x23,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x23,0xd1,0xff,0x05,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xcb,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xcd,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xcf,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xd5,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xd7,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xd9,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xdb,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xdd,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xdf,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xf7,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xf9,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xfd,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xff,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xfb,0x01,0x00]
+
+v_mac_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x02,0x00]
+
+v_mac_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0xff,0x03,0x00]
+
+v_mac_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x20]
+
+v_mac_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x40]
+
+v_mac_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x23,0xd1,0x01,0x05,0x00,0x60]
+
+v_mac_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x23,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x23,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x23,0xd1,0x01,0x05,0x00,0x00]
+
+v_mac_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x23,0xd1,0x01,0x05,0x00,0x00]
+
+v_madmk_f16 v5, 0, 0x1121, v3
+// CHECK: [0x80,0x06,0x0a,0x48,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v255, 0, 0x1121, v3
+// CHECK: [0x80,0x06,0xfe,0x49,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v5, -1, 0x1121, v3
+// CHECK: [0xc1,0x06,0x0a,0x48,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v5, 0.5, 0x1121, v3
+// CHECK: [0xf0,0x06,0x0a,0x48,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v5, -4.0, 0x1121, v3
+// CHECK: [0xf7,0x06,0x0a,0x48,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v5, v1, 0x1121, v3
+// CHECK: [0x01,0x07,0x0a,0x48,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v5, v255, 0x1121, v3
+// CHECK: [0xff,0x07,0x0a,0x48,0x21,0x11,0x00,0x00]
+
+v_madmk_f16 v5, 0, 0xa1b1, v3
+// CHECK: [0x80,0x06,0x0a,0x48,0xb1,0xa1,0x00,0x00]
+
+v_madmk_f16 v5, 0, 0x1121, v255
+// CHECK: [0x80,0xfe,0x0b,0x48,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, 0, v2, 0x1121
+// CHECK: [0x80,0x04,0x0a,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v255, 0, v2, 0x1121
+// CHECK: [0x80,0x04,0xfe,0x4b,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, -1, v2, 0x1121
+// CHECK: [0xc1,0x04,0x0a,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, 0.5, v2, 0x1121
+// CHECK: [0xf0,0x04,0x0a,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, -4.0, v2, 0x1121
+// CHECK: [0xf7,0x04,0x0a,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, v1, v2, 0x1121
+// CHECK: [0x01,0x05,0x0a,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, v255, v2, 0x1121
+// CHECK: [0xff,0x05,0x0a,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, 0, v255, 0x1121
+// CHECK: [0x80,0xfe,0x0b,0x4a,0x21,0x11,0x00,0x00]
+
+v_madak_f16 v5, 0, v2, 0xa1b1
+// CHECK: [0x80,0x04,0x0a,0x4a,0xb1,0xa1,0x00,0x00]
+
+v_add_u16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x4c]
+
+v_add_u16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x4d]
+
+v_add_u16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x4c]
+
+v_add_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x4c]
+
+v_add_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x4c]
+
+v_add_u16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x4c]
+
+v_add_u16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x4c]
+
+v_add_u16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x4c]
+
+v_add_u16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x4c]
+
+v_add_u16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x4c]
+
+v_add_u16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x4c]
+
+v_add_u16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x4c]
+
+v_add_u16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x4c]
+
+v_add_u16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x4c]
+
+v_add_u16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x4c]
+
+v_add_u16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x4c]
+
+v_add_u16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x4c]
+
+v_add_u16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x4c]
+
+v_add_u16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x4c]
+
+v_add_u16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x4c,0x0b,0xfe,0x00,0x00]
+
+v_add_u16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x4c,0x56,0x34,0x00,0x00]
+
+v_add_u16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x4c]
+
+v_add_u16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x4c]
+
+v_add_u16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x4c]
+
+v_add_u16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x26,0xd1,0x80,0x04,0x00,0x00]
+
+v_add_u16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x26,0xd1,0xc1,0x04,0x00,0x00]
+
+v_add_u16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x26,0xd1,0xf0,0x04,0x00,0x00]
+
+v_add_u16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x26,0xd1,0xf7,0x04,0x00,0x00]
+
+v_add_u16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x26,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_u16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x26,0xd1,0xff,0x05,0x00,0x00]
+
+v_add_u16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xca,0x00,0x00]
+
+v_add_u16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xcc,0x00,0x00]
+
+v_add_u16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xce,0x00,0x00]
+
+v_add_u16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xd4,0x00,0x00]
+
+v_add_u16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xd6,0x00,0x00]
+
+v_add_u16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xd8,0x00,0x00]
+
+v_add_u16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xda,0x00,0x00]
+
+v_add_u16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xdc,0x00,0x00]
+
+v_add_u16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xde,0x00,0x00]
+
+v_add_u16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xf6,0x00,0x00]
+
+v_add_u16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xf8,0x00,0x00]
+
+v_add_u16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xfc,0x00,0x00]
+
+v_add_u16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xfe,0x00,0x00]
+
+v_add_u16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0x00,0x01,0x00]
+
+v_add_u16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0x82,0x01,0x00]
+
+v_add_u16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xe0,0x01,0x00]
+
+v_add_u16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xee,0x01,0x00]
+
+v_add_u16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0x04,0x02,0x00]
+
+v_add_u16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x26,0xd1,0x80,0xfe,0x03,0x00]
+
+v_sub_u16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x4e]
+
+v_sub_u16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x4f]
+
+v_sub_u16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x4e]
+
+v_sub_u16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x4e,0x0b,0xfe,0x00,0x00]
+
+v_sub_u16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x4e,0x56,0x34,0x00,0x00]
+
+v_sub_u16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x4e]
+
+v_sub_u16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x4e]
+
+v_sub_u16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x4e]
+
+v_sub_u16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x27,0xd1,0x80,0x04,0x00,0x00]
+
+v_sub_u16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x27,0xd1,0xc1,0x04,0x00,0x00]
+
+v_sub_u16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x27,0xd1,0xf0,0x04,0x00,0x00]
+
+v_sub_u16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x27,0xd1,0xf7,0x04,0x00,0x00]
+
+v_sub_u16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x27,0xd1,0x01,0x05,0x00,0x00]
+
+v_sub_u16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x27,0xd1,0xff,0x05,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xca,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xcc,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xce,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xd4,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xd6,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xd8,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xda,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xdc,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xde,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xf6,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xf8,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xfc,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xfe,0x00,0x00]
+
+v_sub_u16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0x00,0x01,0x00]
+
+v_sub_u16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0x82,0x01,0x00]
+
+v_sub_u16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xe0,0x01,0x00]
+
+v_sub_u16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xee,0x01,0x00]
+
+v_sub_u16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0x04,0x02,0x00]
+
+v_sub_u16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x27,0xd1,0x80,0xfe,0x03,0x00]
+
+v_subrev_u16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x50]
+
+v_subrev_u16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x51]
+
+v_subrev_u16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x50]
+
+v_subrev_u16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x50,0x0b,0xfe,0x00,0x00]
+
+v_subrev_u16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x50,0x56,0x34,0x00,0x00]
+
+v_subrev_u16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x50]
+
+v_subrev_u16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x50]
+
+v_subrev_u16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x50]
+
+v_subrev_u16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x28,0xd1,0x80,0x04,0x00,0x00]
+
+v_subrev_u16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x28,0xd1,0xc1,0x04,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x28,0xd1,0xf0,0x04,0x00,0x00]
+
+v_subrev_u16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x28,0xd1,0xf7,0x04,0x00,0x00]
+
+v_subrev_u16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x28,0xd1,0x01,0x05,0x00,0x00]
+
+v_subrev_u16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x28,0xd1,0xff,0x05,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xca,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xcc,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xce,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xd4,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xd6,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xd8,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xda,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xdc,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xde,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xf6,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xf8,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xfc,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xfe,0x00,0x00]
+
+v_subrev_u16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0x00,0x01,0x00]
+
+v_subrev_u16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0x82,0x01,0x00]
+
+v_subrev_u16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xe0,0x01,0x00]
+
+v_subrev_u16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xee,0x01,0x00]
+
+v_subrev_u16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0x04,0x02,0x00]
+
+v_subrev_u16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x28,0xd1,0x80,0xfe,0x03,0x00]
+
+v_mul_lo_u16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x53]
+
+v_mul_lo_u16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x52]
+
+v_mul_lo_u16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x52,0x0b,0xfe,0x00,0x00]
+
+v_mul_lo_u16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x52,0x56,0x34,0x00,0x00]
+
+v_mul_lo_u16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x52]
+
+v_mul_lo_u16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x52]
+
+v_mul_lo_u16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x52]
+
+v_mul_lo_u16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_lo_u16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x29,0xd1,0x80,0x04,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x29,0xd1,0xc1,0x04,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x29,0xd1,0xf0,0x04,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x29,0xd1,0xf7,0x04,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x29,0xd1,0x01,0x05,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x29,0xd1,0xff,0x05,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xca,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xcc,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xce,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xd4,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xd6,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xd8,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xda,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xdc,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xde,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xf6,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xf8,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xfc,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xfe,0x00,0x00]
+
+v_mul_lo_u16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0x00,0x01,0x00]
+
+v_mul_lo_u16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0x82,0x01,0x00]
+
+v_mul_lo_u16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xe0,0x01,0x00]
+
+v_mul_lo_u16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xee,0x01,0x00]
+
+v_mul_lo_u16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0x04,0x02,0x00]
+
+v_mul_lo_u16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x29,0xd1,0x80,0xfe,0x03,0x00]
+
+v_lshlrev_b16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x55]
+
+v_lshlrev_b16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x54]
+
+v_lshlrev_b16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x54,0x0b,0xfe,0x00,0x00]
+
+v_lshlrev_b16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x54,0x56,0x34,0x00,0x00]
+
+v_lshlrev_b16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x54]
+
+v_lshlrev_b16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x54]
+
+v_lshlrev_b16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x54]
+
+v_lshlrev_b16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshlrev_b16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2a,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2a,0xd1,0xc1,0x04,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2a,0xd1,0xf0,0x04,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2a,0xd1,0xf7,0x04,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2a,0xd1,0x01,0x05,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2a,0xd1,0xff,0x05,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xca,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xcc,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xce,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xd4,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xd6,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xd8,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xda,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xdc,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xde,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xf6,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xf8,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xfc,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xfe,0x00,0x00]
+
+v_lshlrev_b16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0x00,0x01,0x00]
+
+v_lshlrev_b16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0x82,0x01,0x00]
+
+v_lshlrev_b16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xe0,0x01,0x00]
+
+v_lshlrev_b16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xee,0x01,0x00]
+
+v_lshlrev_b16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0x04,0x02,0x00]
+
+v_lshlrev_b16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2a,0xd1,0x80,0xfe,0x03,0x00]
+
+v_lshrrev_b16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x57]
+
+v_lshrrev_b16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x56]
+
+v_lshrrev_b16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x56,0x0b,0xfe,0x00,0x00]
+
+v_lshrrev_b16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x56,0x56,0x34,0x00,0x00]
+
+v_lshrrev_b16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x56]
+
+v_lshrrev_b16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x56]
+
+v_lshrrev_b16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x56]
+
+v_lshrrev_b16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshrrev_b16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2b,0xd1,0x80,0x04,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2b,0xd1,0xc1,0x04,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2b,0xd1,0xf0,0x04,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2b,0xd1,0xf7,0x04,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2b,0xd1,0x01,0x05,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2b,0xd1,0xff,0x05,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xca,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xcc,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xce,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xd4,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xd6,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xd8,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xda,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xdc,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xde,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xf6,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xf8,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xfc,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xfe,0x00,0x00]
+
+v_lshrrev_b16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0x00,0x01,0x00]
+
+v_lshrrev_b16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0x82,0x01,0x00]
+
+v_lshrrev_b16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xe0,0x01,0x00]
+
+v_lshrrev_b16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xee,0x01,0x00]
+
+v_lshrrev_b16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0x04,0x02,0x00]
+
+v_lshrrev_b16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2b,0xd1,0x80,0xfe,0x03,0x00]
+
+v_ashrrev_i16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x59]
+
+v_ashrrev_i16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x58]
+
+v_ashrrev_i16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x58,0x0b,0xfe,0x00,0x00]
+
+v_ashrrev_i16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x58,0x56,0x34,0x00,0x00]
+
+v_ashrrev_i16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x58]
+
+v_ashrrev_i16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x58]
+
+v_ashrrev_i16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x58]
+
+v_ashrrev_i16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_ashrrev_i16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2c,0xd1,0x80,0x04,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2c,0xd1,0xc1,0x04,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2c,0xd1,0xf0,0x04,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2c,0xd1,0xf7,0x04,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2c,0xd1,0x01,0x05,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2c,0xd1,0xff,0x05,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xca,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xcc,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xce,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xd4,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xd6,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xd8,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xda,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xdc,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xde,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xf6,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xf8,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xfc,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xfe,0x00,0x00]
+
+v_ashrrev_i16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0x00,0x01,0x00]
+
+v_ashrrev_i16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0x82,0x01,0x00]
+
+v_ashrrev_i16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xe0,0x01,0x00]
+
+v_ashrrev_i16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xee,0x01,0x00]
+
+v_ashrrev_i16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0x04,0x02,0x00]
+
+v_ashrrev_i16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2c,0xd1,0x80,0xfe,0x03,0x00]
+
+v_max_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x5a]
+
+v_max_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x5b]
+
+v_max_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x5a]
+
+v_max_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x5a]
+
+v_max_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x5a]
+
+v_max_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x5a]
+
+v_max_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x5a]
+
+v_max_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x5a]
+
+v_max_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x5a]
+
+v_max_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x5a]
+
+v_max_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x5a]
+
+v_max_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x5a]
+
+v_max_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x5a]
+
+v_max_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x5a]
+
+v_max_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x5a]
+
+v_max_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x5a]
+
+v_max_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x5a]
+
+v_max_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x5a]
+
+v_max_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x5a]
+
+v_max_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x5a,0x0b,0xfe,0x00,0x00]
+
+v_max_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x5a,0x56,0x34,0x00,0x00]
+
+v_max_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x5a]
+
+v_max_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x5a]
+
+v_max_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x5a]
+
+v_max_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x2d,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2d,0xd1,0xff,0x05,0x00,0x00]
+
+v_max_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xcb,0x00,0x00]
+
+v_max_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xcd,0x00,0x00]
+
+v_max_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xcf,0x00,0x00]
+
+v_max_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xd5,0x00,0x00]
+
+v_max_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xd7,0x00,0x00]
+
+v_max_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xd9,0x00,0x00]
+
+v_max_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xdb,0x00,0x00]
+
+v_max_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xdd,0x00,0x00]
+
+v_max_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xdf,0x00,0x00]
+
+v_max_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xf7,0x00,0x00]
+
+v_max_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xf9,0x00,0x00]
+
+v_max_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xfd,0x00,0x00]
+
+v_max_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xff,0x00,0x00]
+
+v_max_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xfb,0x01,0x00]
+
+v_max_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x02,0x00]
+
+v_max_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0xff,0x03,0x00]
+
+v_max_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x20]
+
+v_max_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x40]
+
+v_max_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x2d,0xd1,0x01,0x05,0x00,0x60]
+
+v_max_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x2d,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x2d,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x2d,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x2d,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x5c]
+
+v_min_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x5d]
+
+v_min_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x5c]
+
+v_min_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x5c]
+
+v_min_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x5c]
+
+v_min_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x5c]
+
+v_min_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x5c]
+
+v_min_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x5c]
+
+v_min_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x5c]
+
+v_min_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x5c]
+
+v_min_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x5c]
+
+v_min_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x5c]
+
+v_min_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x5c]
+
+v_min_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x5c]
+
+v_min_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x5c]
+
+v_min_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x5c]
+
+v_min_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x5c]
+
+v_min_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x5c]
+
+v_min_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x5c]
+
+v_min_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x5c,0x0b,0xfe,0x00,0x00]
+
+v_min_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x5c,0x56,0x34,0x00,0x00]
+
+v_min_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x5c]
+
+v_min_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x5c]
+
+v_min_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x5c]
+
+v_min_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2e,0xd1,0xff,0x05,0x00,0x00]
+
+v_min_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xcb,0x00,0x00]
+
+v_min_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xcd,0x00,0x00]
+
+v_min_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xcf,0x00,0x00]
+
+v_min_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xd5,0x00,0x00]
+
+v_min_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xd7,0x00,0x00]
+
+v_min_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xd9,0x00,0x00]
+
+v_min_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xdb,0x00,0x00]
+
+v_min_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xdd,0x00,0x00]
+
+v_min_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xdf,0x00,0x00]
+
+v_min_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xf7,0x00,0x00]
+
+v_min_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xf9,0x00,0x00]
+
+v_min_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xfd,0x00,0x00]
+
+v_min_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xff,0x00,0x00]
+
+v_min_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xfb,0x01,0x00]
+
+v_min_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x02,0x00]
+
+v_min_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0xff,0x03,0x00]
+
+v_min_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x20]
+
+v_min_f16_e64 v5, v1, -s2
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x40]
+
+v_min_f16_e64 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x2e,0xd1,0x01,0x05,0x00,0x60]
+
+v_min_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f16_e64 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f16_e64 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x2e,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_u16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x5e]
+
+v_max_u16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x5f]
+
+v_max_u16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x5e]
+
+v_max_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x5e]
+
+v_max_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x5e]
+
+v_max_u16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x5e]
+
+v_max_u16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x5e]
+
+v_max_u16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x5e]
+
+v_max_u16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x5e]
+
+v_max_u16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x5e]
+
+v_max_u16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x5e]
+
+v_max_u16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x5e]
+
+v_max_u16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x5e]
+
+v_max_u16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x5e]
+
+v_max_u16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x5e]
+
+v_max_u16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x5e]
+
+v_max_u16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x5e]
+
+v_max_u16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x5e]
+
+v_max_u16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x5e]
+
+v_max_u16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x5e,0x0b,0xfe,0x00,0x00]
+
+v_max_u16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x5e,0x56,0x34,0x00,0x00]
+
+v_max_u16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x5e]
+
+v_max_u16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x5e]
+
+v_max_u16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x5e]
+
+v_max_u16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_u16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x2f,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_u16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x2f,0xd1,0xc1,0x04,0x00,0x00]
+
+v_max_u16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x2f,0xd1,0xf0,0x04,0x00,0x00]
+
+v_max_u16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x2f,0xd1,0xf7,0x04,0x00,0x00]
+
+v_max_u16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x2f,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_u16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x2f,0xd1,0xff,0x05,0x00,0x00]
+
+v_max_u16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xca,0x00,0x00]
+
+v_max_u16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xcc,0x00,0x00]
+
+v_max_u16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xce,0x00,0x00]
+
+v_max_u16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xd4,0x00,0x00]
+
+v_max_u16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xd6,0x00,0x00]
+
+v_max_u16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xd8,0x00,0x00]
+
+v_max_u16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xda,0x00,0x00]
+
+v_max_u16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xdc,0x00,0x00]
+
+v_max_u16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xde,0x00,0x00]
+
+v_max_u16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xf6,0x00,0x00]
+
+v_max_u16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xf8,0x00,0x00]
+
+v_max_u16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xfc,0x00,0x00]
+
+v_max_u16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xfe,0x00,0x00]
+
+v_max_u16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0x00,0x01,0x00]
+
+v_max_u16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0x82,0x01,0x00]
+
+v_max_u16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xe0,0x01,0x00]
+
+v_max_u16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xee,0x01,0x00]
+
+v_max_u16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0x04,0x02,0x00]
+
+v_max_u16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x2f,0xd1,0x80,0xfe,0x03,0x00]
+
+v_max_i16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x60]
+
+v_max_i16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x61]
+
+v_max_i16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x60]
+
+v_max_i16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x60]
+
+v_max_i16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x60]
+
+v_max_i16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x60]
+
+v_max_i16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x60]
+
+v_max_i16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x60]
+
+v_max_i16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x60]
+
+v_max_i16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x60]
+
+v_max_i16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x60]
+
+v_max_i16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x60]
+
+v_max_i16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x60]
+
+v_max_i16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x60]
+
+v_max_i16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x60]
+
+v_max_i16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x60]
+
+v_max_i16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x60]
+
+v_max_i16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x60]
+
+v_max_i16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x60]
+
+v_max_i16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x60,0x0b,0xfe,0x00,0x00]
+
+v_max_i16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x60,0x56,0x34,0x00,0x00]
+
+v_max_i16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x60]
+
+v_max_i16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x60]
+
+v_max_i16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x60]
+
+v_max_i16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_i16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x30,0xd1,0x80,0x04,0x00,0x00]
+
+v_max_i16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x30,0xd1,0xc1,0x04,0x00,0x00]
+
+v_max_i16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x30,0xd1,0xf0,0x04,0x00,0x00]
+
+v_max_i16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x30,0xd1,0xf7,0x04,0x00,0x00]
+
+v_max_i16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x30,0xd1,0x01,0x05,0x00,0x00]
+
+v_max_i16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x30,0xd1,0xff,0x05,0x00,0x00]
+
+v_max_i16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xca,0x00,0x00]
+
+v_max_i16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xcc,0x00,0x00]
+
+v_max_i16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xce,0x00,0x00]
+
+v_max_i16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xd4,0x00,0x00]
+
+v_max_i16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xd6,0x00,0x00]
+
+v_max_i16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xd8,0x00,0x00]
+
+v_max_i16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xda,0x00,0x00]
+
+v_max_i16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xdc,0x00,0x00]
+
+v_max_i16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xde,0x00,0x00]
+
+v_max_i16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xf6,0x00,0x00]
+
+v_max_i16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xf8,0x00,0x00]
+
+v_max_i16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xfc,0x00,0x00]
+
+v_max_i16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xfe,0x00,0x00]
+
+v_max_i16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0x00,0x01,0x00]
+
+v_max_i16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0x82,0x01,0x00]
+
+v_max_i16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xe0,0x01,0x00]
+
+v_max_i16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xee,0x01,0x00]
+
+v_max_i16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0x04,0x02,0x00]
+
+v_max_i16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x30,0xd1,0x80,0xfe,0x03,0x00]
+
+v_min_u16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x62]
+
+v_min_u16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x63]
+
+v_min_u16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x62]
+
+v_min_u16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x62]
+
+v_min_u16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x62]
+
+v_min_u16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x62]
+
+v_min_u16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x62]
+
+v_min_u16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x62]
+
+v_min_u16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x62]
+
+v_min_u16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x62]
+
+v_min_u16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x62]
+
+v_min_u16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x62]
+
+v_min_u16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x62]
+
+v_min_u16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x62]
+
+v_min_u16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x62]
+
+v_min_u16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x62]
+
+v_min_u16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x62]
+
+v_min_u16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x62]
+
+v_min_u16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x62]
+
+v_min_u16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x62,0x0b,0xfe,0x00,0x00]
+
+v_min_u16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x62,0x56,0x34,0x00,0x00]
+
+v_min_u16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x62]
+
+v_min_u16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x62]
+
+v_min_u16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x62]
+
+v_min_u16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_u16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x31,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_u16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x31,0xd1,0xc1,0x04,0x00,0x00]
+
+v_min_u16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x31,0xd1,0xf0,0x04,0x00,0x00]
+
+v_min_u16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x31,0xd1,0xf7,0x04,0x00,0x00]
+
+v_min_u16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x31,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_u16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x31,0xd1,0xff,0x05,0x00,0x00]
+
+v_min_u16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xca,0x00,0x00]
+
+v_min_u16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xcc,0x00,0x00]
+
+v_min_u16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xce,0x00,0x00]
+
+v_min_u16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xd4,0x00,0x00]
+
+v_min_u16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xd6,0x00,0x00]
+
+v_min_u16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xd8,0x00,0x00]
+
+v_min_u16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xda,0x00,0x00]
+
+v_min_u16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xdc,0x00,0x00]
+
+v_min_u16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xde,0x00,0x00]
+
+v_min_u16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xf6,0x00,0x00]
+
+v_min_u16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xf8,0x00,0x00]
+
+v_min_u16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xfc,0x00,0x00]
+
+v_min_u16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xfe,0x00,0x00]
+
+v_min_u16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0x00,0x01,0x00]
+
+v_min_u16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0x82,0x01,0x00]
+
+v_min_u16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xe0,0x01,0x00]
+
+v_min_u16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xee,0x01,0x00]
+
+v_min_u16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0x04,0x02,0x00]
+
+v_min_u16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x31,0xd1,0x80,0xfe,0x03,0x00]
+
+v_min_i16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x64]
+
+v_min_i16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x65]
+
+v_min_i16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x64]
+
+v_min_i16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x64]
+
+v_min_i16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x64]
+
+v_min_i16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x64]
+
+v_min_i16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x64]
+
+v_min_i16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x64]
+
+v_min_i16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x64]
+
+v_min_i16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x64]
+
+v_min_i16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x64]
+
+v_min_i16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x64]
+
+v_min_i16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x64]
+
+v_min_i16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x64]
+
+v_min_i16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x64]
+
+v_min_i16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x64]
+
+v_min_i16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x64]
+
+v_min_i16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x64]
+
+v_min_i16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x64]
+
+v_min_i16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x64,0x0b,0xfe,0x00,0x00]
+
+v_min_i16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x64,0x56,0x34,0x00,0x00]
+
+v_min_i16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x64]
+
+v_min_i16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x64]
+
+v_min_i16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x64]
+
+v_min_i16_e64 v5, 0, s2
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_i16_e64 v255, 0, s2
+// CHECK: [0xff,0x00,0x32,0xd1,0x80,0x04,0x00,0x00]
+
+v_min_i16_e64 v5, -1, s2
+// CHECK: [0x05,0x00,0x32,0xd1,0xc1,0x04,0x00,0x00]
+
+v_min_i16_e64 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x32,0xd1,0xf0,0x04,0x00,0x00]
+
+v_min_i16_e64 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x32,0xd1,0xf7,0x04,0x00,0x00]
+
+v_min_i16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x32,0xd1,0x01,0x05,0x00,0x00]
+
+v_min_i16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x32,0xd1,0xff,0x05,0x00,0x00]
+
+v_min_i16_e64 v5, 0, s101
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xca,0x00,0x00]
+
+v_min_i16_e64 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xcc,0x00,0x00]
+
+v_min_i16_e64 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xce,0x00,0x00]
+
+v_min_i16_e64 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xd4,0x00,0x00]
+
+v_min_i16_e64 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xd6,0x00,0x00]
+
+v_min_i16_e64 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xd8,0x00,0x00]
+
+v_min_i16_e64 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xda,0x00,0x00]
+
+v_min_i16_e64 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xdc,0x00,0x00]
+
+v_min_i16_e64 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xde,0x00,0x00]
+
+v_min_i16_e64 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xf6,0x00,0x00]
+
+v_min_i16_e64 v5, 0, m0
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xf8,0x00,0x00]
+
+v_min_i16_e64 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xfc,0x00,0x00]
+
+v_min_i16_e64 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xfe,0x00,0x00]
+
+v_min_i16_e64 v5, 0, 0
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0x00,0x01,0x00]
+
+v_min_i16_e64 v5, 0, -1
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0x82,0x01,0x00]
+
+v_min_i16_e64 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xe0,0x01,0x00]
+
+v_min_i16_e64 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xee,0x01,0x00]
+
+v_min_i16_e64 v5, 0, v2
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0x04,0x02,0x00]
+
+v_min_i16_e64 v5, 0, v255
+// CHECK: [0x05,0x00,0x32,0xd1,0x80,0xfe,0x03,0x00]
+
+v_ldexp_f16 v5, s1, v2
+// CHECK: [0x01,0x04,0x0a,0x66]
+
+v_ldexp_f16 v255, s1, v2
+// CHECK: [0x01,0x04,0xfe,0x67]
+
+v_ldexp_f16 v5, s101, v2
+// CHECK: [0x65,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, tba_lo, v2
+// CHECK: [0x6c,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, tba_hi, v2
+// CHECK: [0x6d,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, tma_lo, v2
+// CHECK: [0x6e,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, tma_hi, v2
+// CHECK: [0x6f,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, ttmp11, v2
+// CHECK: [0x7b,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, m0, v2
+// CHECK: [0x7c,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, exec_lo, v2
+// CHECK: [0x7e,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, exec_hi, v2
+// CHECK: [0x7f,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, 0, v2
+// CHECK: [0x80,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, 0.5, v2
+// CHECK: [0xf0,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, -4.0, v2
+// CHECK: [0xf7,0x04,0x0a,0x66]
+
+v_ldexp_f16 v5, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x0a,0x66,0x0b,0xfe,0x00,0x00]
+
+v_ldexp_f16 v5, 0x3456, v2
+// CHECK: [0xff,0x04,0x0a,0x66,0x56,0x34,0x00,0x00]
+
+v_ldexp_f16 v5, v1, v2
+// CHECK: [0x01,0x05,0x0a,0x66]
+
+v_ldexp_f16 v5, v255, v2
+// CHECK: [0xff,0x05,0x0a,0x66]
+
+v_ldexp_f16 v5, s1, v255
+// CHECK: [0x01,0xfe,0x0b,0x66]
+
+v_ldexp_f16_e64 v5, v1, s2
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x05,0x00,0x00]
+
+v_ldexp_f16_e64 v255, v1, s2
+// CHECK: [0xff,0x00,0x33,0xd1,0x01,0x05,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v255, s2
+// CHECK: [0x05,0x00,0x33,0xd1,0xff,0x05,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, s101
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xcb,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xcd,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xcf,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xd5,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xd7,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xd9,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xdb,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xdd,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xdf,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xf7,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, m0
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xf9,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xfd,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xff,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, 0
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x01,0x01,0x00]
+
+v_ldexp_f16_e64 v5, v1, -1
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x83,0x01,0x00]
+
+v_ldexp_f16_e64 v5, v1, 0.5
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xe1,0x01,0x00]
+
+v_ldexp_f16_e64 v5, v1, -4.0
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xef,0x01,0x00]
+
+v_ldexp_f16_e64 v5, v1, scc
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xfb,0x01,0x00]
+
+v_ldexp_f16_e64 v5, v1, v2
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x05,0x02,0x00]
+
+v_ldexp_f16_e64 v5, v1, v255
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0xff,0x03,0x00]
+
+v_ldexp_f16_e64 v5, -v1, s2
+// CHECK: [0x05,0x00,0x33,0xd1,0x01,0x05,0x00,0x20]
+
+v_ldexp_f16_e64 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x33,0xd1,0x01,0x05,0x00,0x00]
+
+v_ldexp_f16_e64 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x33,0xd1,0x01,0x05,0x00,0x00]
+
+v_mad_legacy_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x65,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x66,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x67,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x05,0x0e,0x04]
+
+v_mad_legacy_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0xff,0x05,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0xfe,0x07]
+
+v_mad_legacy_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x24]
+
+v_mad_legacy_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x44]
+
+v_mad_legacy_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x84]
+
+v_mad_legacy_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_mad_legacy_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xc0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_legacy_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_mad_legacy_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x14]
+
+v_mad_legacy_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xc0,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_mad_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x65,0x04,0x0e,0x04]
+
+v_mad_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x66,0x04,0x0e,0x04]
+
+v_mad_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x67,0x04,0x0e,0x04]
+
+v_mad_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_mad_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_mad_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_mad_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_mad_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_mad_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_mad_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_mad_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_mad_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x05,0x0e,0x04]
+
+v_mad_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0xff,0x05,0x0e,0x04]
+
+v_mad_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_mad_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0xfe,0x07]
+
+v_mad_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x24]
+
+v_mad_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x44]
+
+v_mad_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x84]
+
+v_mad_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_mad_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xc1,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_mad_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x14]
+
+v_mad_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xc1,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_mad_i32_i24 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_i32_i24 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xc2,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x65,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x66,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x67,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x6a,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x6b,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x6c,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x6d,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x6e,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x6f,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x7b,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x7c,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x7e,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x7f,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x80,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0xc1,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0xf0,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0xf7,0x00,0x01,0x02]
+
+v_mad_i32_i24 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x01,0x01,0x02]
+
+v_mad_i32_i24 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0xff,0x01,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x82,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0xe0,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0xee,0x01,0x02]
+
+v_mad_i32_i24 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x04,0x02,0x02]
+
+v_mad_i32_i24 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0xfe,0x03,0x02]
+
+v_mad_i32_i24 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0x05,0x03]
+
+v_mad_i32_i24 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0xc1,0x03]
+
+v_mad_i32_i24 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0xdd,0x03]
+
+v_mad_i32_i24 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0x0d,0x04]
+
+v_mad_i32_i24 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xc2,0xd1,0x01,0x00,0xfd,0x07]
+
+v_mad_u32_u24 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_u32_u24 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xc3,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x65,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x66,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x67,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x6a,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x6b,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x6c,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x6d,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x6e,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x6f,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x7b,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x7c,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x7e,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x7f,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x80,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0xc1,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0xf0,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0xf7,0x00,0x01,0x02]
+
+v_mad_u32_u24 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x01,0x01,0x02]
+
+v_mad_u32_u24 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0xff,0x01,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x82,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0xe0,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0xee,0x01,0x02]
+
+v_mad_u32_u24 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x04,0x02,0x02]
+
+v_mad_u32_u24 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0xfe,0x03,0x02]
+
+v_mad_u32_u24 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0x05,0x03]
+
+v_mad_u32_u24 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0xc1,0x03]
+
+v_mad_u32_u24 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0xdd,0x03]
+
+v_mad_u32_u24 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0x0d,0x04]
+
+v_mad_u32_u24 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xc3,0xd1,0x01,0x00,0xfd,0x07]
+
+v_cubeid_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x65,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x66,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x67,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x05,0x0e,0x04]
+
+v_cubeid_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0xff,0x05,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_cubeid_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0xfe,0x07]
+
+v_cubeid_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x24]
+
+v_cubeid_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x44]
+
+v_cubeid_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x84]
+
+v_cubeid_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_cubeid_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xc4,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubeid_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_cubeid_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x14]
+
+v_cubeid_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xc4,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_cubesc_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x65,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x66,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x67,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x05,0x0e,0x04]
+
+v_cubesc_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0xff,0x05,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_cubesc_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0xfe,0x07]
+
+v_cubesc_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x24]
+
+v_cubesc_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x44]
+
+v_cubesc_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x84]
+
+v_cubesc_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_cubesc_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xc5,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubesc_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_cubesc_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x14]
+
+v_cubesc_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xc5,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_cubetc_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x65,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x66,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x67,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x05,0x0e,0x04]
+
+v_cubetc_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0xff,0x05,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_cubetc_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0xfe,0x07]
+
+v_cubetc_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x24]
+
+v_cubetc_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x44]
+
+v_cubetc_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x84]
+
+v_cubetc_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_cubetc_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xc6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubetc_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_cubetc_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x14]
+
+v_cubetc_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xc6,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_cubema_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x65,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x66,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x67,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x05,0x0e,0x04]
+
+v_cubema_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0xff,0x05,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_cubema_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0xfe,0x07]
+
+v_cubema_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x24]
+
+v_cubema_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x44]
+
+v_cubema_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x84]
+
+v_cubema_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_cubema_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xc7,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cubema_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_cubema_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x14]
+
+v_cubema_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xc7,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_bfe_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0x01,0x02]
+
+v_bfe_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xc8,0xd1,0x01,0x00,0x01,0x02]
+
+v_bfe_u32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x65,0x00,0x01,0x02]
+
+v_bfe_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x66,0x00,0x01,0x02]
+
+v_bfe_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x67,0x00,0x01,0x02]
+
+v_bfe_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x6a,0x00,0x01,0x02]
+
+v_bfe_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x6b,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x6c,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x6d,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x6e,0x00,0x01,0x02]
+
+v_bfe_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x6f,0x00,0x01,0x02]
+
+v_bfe_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x7b,0x00,0x01,0x02]
+
+v_bfe_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x7c,0x00,0x01,0x02]
+
+v_bfe_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x7e,0x00,0x01,0x02]
+
+v_bfe_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x7f,0x00,0x01,0x02]
+
+v_bfe_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x80,0x00,0x01,0x02]
+
+v_bfe_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0xc1,0x00,0x01,0x02]
+
+v_bfe_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0xf0,0x00,0x01,0x02]
+
+v_bfe_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0xf7,0x00,0x01,0x02]
+
+v_bfe_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x01,0x01,0x02]
+
+v_bfe_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0xff,0x01,0x01,0x02]
+
+v_bfe_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x82,0x01,0x02]
+
+v_bfe_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0xe0,0x01,0x02]
+
+v_bfe_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0xee,0x01,0x02]
+
+v_bfe_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x04,0x02,0x02]
+
+v_bfe_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0xfe,0x03,0x02]
+
+v_bfe_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0x05,0x03]
+
+v_bfe_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0xc1,0x03]
+
+v_bfe_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0xdd,0x03]
+
+v_bfe_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0x0d,0x04]
+
+v_bfe_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xc8,0xd1,0x01,0x00,0xfd,0x07]
+
+v_bfe_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x00,0x01,0x02]
+
+v_bfe_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xc9,0xd1,0x01,0x00,0x01,0x02]
+
+v_bfe_i32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x65,0x00,0x01,0x02]
+
+v_bfe_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x66,0x00,0x01,0x02]
+
+v_bfe_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x67,0x00,0x01,0x02]
+
+v_bfe_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x6a,0x00,0x01,0x02]
+
+v_bfe_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x6b,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x6c,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x6d,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x6e,0x00,0x01,0x02]
+
+v_bfe_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x6f,0x00,0x01,0x02]
+
+v_bfe_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x7b,0x00,0x01,0x02]
+
+v_bfe_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x7c,0x00,0x01,0x02]
+
+v_bfe_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x7e,0x00,0x01,0x02]
+
+v_bfe_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x7f,0x00,0x01,0x02]
+
+v_bfe_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x80,0x00,0x01,0x02]
+
+v_bfe_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0xc1,0x00,0x01,0x02]
+
+v_bfe_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0xf0,0x00,0x01,0x02]
+
+v_bfe_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0xf7,0x00,0x01,0x02]
+
+v_bfe_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x01,0x01,0x02]
+
+v_bfe_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0xff,0x01,0x01,0x02]
+
+v_bfe_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x82,0x01,0x02]
+
+v_bfe_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0xe0,0x01,0x02]
+
+v_bfe_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0xee,0x01,0x02]
+
+v_bfe_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x04,0x02,0x02]
+
+v_bfe_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0xfe,0x03,0x02]
+
+v_bfe_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x00,0x05,0x03]
+
+v_bfe_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x00,0xc1,0x03]
+
+v_bfe_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x00,0xdd,0x03]
+
+v_bfe_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x00,0x0d,0x04]
+
+v_bfe_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xc9,0xd1,0x01,0x00,0xfd,0x07]
+
+v_bfi_b32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0x01,0x02]
+
+v_bfi_b32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xca,0xd1,0x01,0x00,0x01,0x02]
+
+v_bfi_b32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x65,0x00,0x01,0x02]
+
+v_bfi_b32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x66,0x00,0x01,0x02]
+
+v_bfi_b32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x67,0x00,0x01,0x02]
+
+v_bfi_b32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x6a,0x00,0x01,0x02]
+
+v_bfi_b32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x6b,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x6c,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x6d,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x6e,0x00,0x01,0x02]
+
+v_bfi_b32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x6f,0x00,0x01,0x02]
+
+v_bfi_b32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x7b,0x00,0x01,0x02]
+
+v_bfi_b32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x7c,0x00,0x01,0x02]
+
+v_bfi_b32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x7e,0x00,0x01,0x02]
+
+v_bfi_b32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x7f,0x00,0x01,0x02]
+
+v_bfi_b32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x80,0x00,0x01,0x02]
+
+v_bfi_b32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0xc1,0x00,0x01,0x02]
+
+v_bfi_b32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0xf0,0x00,0x01,0x02]
+
+v_bfi_b32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0xf7,0x00,0x01,0x02]
+
+v_bfi_b32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x01,0x01,0x02]
+
+v_bfi_b32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0xff,0x01,0x01,0x02]
+
+v_bfi_b32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x82,0x01,0x02]
+
+v_bfi_b32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0xe0,0x01,0x02]
+
+v_bfi_b32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0xee,0x01,0x02]
+
+v_bfi_b32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x04,0x02,0x02]
+
+v_bfi_b32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0xfe,0x03,0x02]
+
+v_bfi_b32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0x05,0x03]
+
+v_bfi_b32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0xc1,0x03]
+
+v_bfi_b32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0xdd,0x03]
+
+v_bfi_b32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0x0d,0x04]
+
+v_bfi_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xca,0xd1,0x01,0x00,0xfd,0x07]
+
+v_fma_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x65,0x04,0x0e,0x04]
+
+v_fma_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x66,0x04,0x0e,0x04]
+
+v_fma_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x67,0x04,0x0e,0x04]
+
+v_fma_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_fma_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_fma_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_fma_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_fma_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_fma_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_fma_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_fma_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_fma_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x05,0x0e,0x04]
+
+v_fma_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0xff,0x05,0x0e,0x04]
+
+v_fma_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_fma_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0xfe,0x07]
+
+v_fma_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x24]
+
+v_fma_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x44]
+
+v_fma_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x84]
+
+v_fma_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_fma_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xcb,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_fma_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x14]
+
+v_fma_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xcb,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[254:255], s[2:3], v[2:3], v[3:4]
+// CHECK: [0xfe,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[4:5], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x04,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[100:101], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x64,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], flat_scratch, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x66,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], vcc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], tba, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], tma, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x7a,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], exec, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], scc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x01,0x05,0x0e,0x04]
+
+v_fma_f64 v[5:6], v[254:255], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0xfe,0x05,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[254:255], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0xfc,0x0f,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[254:255]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0xfa,0x07]
+
+v_fma_f64 v[5:6], -s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x24]
+
+v_fma_f64 v[5:6], s[2:3], -v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x44]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x84]
+
+v_fma_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0xe4]
+
+v_fma_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4]
+// CHECK: [0x05,0x01,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4]
+// CHECK: [0x05,0x02,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], |v[3:4]|
+// CHECK: [0x05,0x04,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]|
+// CHECK: [0x05,0x07,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp
+// CHECK: [0x05,0x80,0xcc,0xd1,0x02,0x04,0x0e,0x04]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x0c]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x14]
+
+v_fma_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2
+// CHECK: [0x05,0x00,0xcc,0xd1,0x02,0x04,0x0e,0x1c]
+
+v_lerp_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0x01,0x02]
+
+v_lerp_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xcd,0xd1,0x01,0x00,0x01,0x02]
+
+v_lerp_u8 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x65,0x00,0x01,0x02]
+
+v_lerp_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x66,0x00,0x01,0x02]
+
+v_lerp_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x67,0x00,0x01,0x02]
+
+v_lerp_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x6a,0x00,0x01,0x02]
+
+v_lerp_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x6b,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x6c,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x6d,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x6e,0x00,0x01,0x02]
+
+v_lerp_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x6f,0x00,0x01,0x02]
+
+v_lerp_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x7b,0x00,0x01,0x02]
+
+v_lerp_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x7c,0x00,0x01,0x02]
+
+v_lerp_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x7e,0x00,0x01,0x02]
+
+v_lerp_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x7f,0x00,0x01,0x02]
+
+v_lerp_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x80,0x00,0x01,0x02]
+
+v_lerp_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0xc1,0x00,0x01,0x02]
+
+v_lerp_u8 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0xf0,0x00,0x01,0x02]
+
+v_lerp_u8 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0xf7,0x00,0x01,0x02]
+
+v_lerp_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x01,0x01,0x02]
+
+v_lerp_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0xff,0x01,0x01,0x02]
+
+v_lerp_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x82,0x01,0x02]
+
+v_lerp_u8 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0xe0,0x01,0x02]
+
+v_lerp_u8 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0xee,0x01,0x02]
+
+v_lerp_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x04,0x02,0x02]
+
+v_lerp_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0xfe,0x03,0x02]
+
+v_lerp_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0x05,0x03]
+
+v_lerp_u8 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0xc1,0x03]
+
+v_lerp_u8 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0xdd,0x03]
+
+v_lerp_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0x0d,0x04]
+
+v_lerp_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xcd,0xd1,0x01,0x00,0xfd,0x07]
+
+v_alignbit_b32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x00,0x01,0x02]
+
+v_alignbit_b32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xce,0xd1,0x01,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x65,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x66,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x67,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x6a,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x6b,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x6c,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x6d,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x6e,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x6f,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x7b,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x7c,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x7e,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x7f,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x80,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0xc1,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0xf0,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0xf7,0x00,0x01,0x02]
+
+v_alignbit_b32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x01,0x01,0x02]
+
+v_alignbit_b32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0xff,0x01,0x01,0x02]
+
+v_alignbit_b32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x82,0x01,0x02]
+
+v_alignbit_b32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0xe0,0x01,0x02]
+
+v_alignbit_b32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0xee,0x01,0x02]
+
+v_alignbit_b32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x04,0x02,0x02]
+
+v_alignbit_b32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0xfe,0x03,0x02]
+
+v_alignbit_b32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x00,0x05,0x03]
+
+v_alignbit_b32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x00,0xc1,0x03]
+
+v_alignbit_b32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x00,0xdd,0x03]
+
+v_alignbit_b32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x00,0x0d,0x04]
+
+v_alignbit_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xce,0xd1,0x01,0x00,0xfd,0x07]
+
+v_alignbyte_b32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0x01,0x02]
+
+v_alignbyte_b32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xcf,0xd1,0x01,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x65,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x66,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x67,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x6a,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x6b,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x6c,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x6d,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x6e,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x6f,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x7b,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x7c,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x7e,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x7f,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x80,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0xc1,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0xf0,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0xf7,0x00,0x01,0x02]
+
+v_alignbyte_b32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x01,0x01,0x02]
+
+v_alignbyte_b32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0xff,0x01,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x82,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0xe0,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0xee,0x01,0x02]
+
+v_alignbyte_b32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x04,0x02,0x02]
+
+v_alignbyte_b32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0xfe,0x03,0x02]
+
+v_alignbyte_b32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0x05,0x03]
+
+v_alignbyte_b32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0xc1,0x03]
+
+v_alignbyte_b32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0xdd,0x03]
+
+v_alignbyte_b32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0x0d,0x04]
+
+v_alignbyte_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xcf,0xd1,0x01,0x00,0xfd,0x07]
+
+v_min3_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x65,0x04,0x0e,0x04]
+
+v_min3_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x66,0x04,0x0e,0x04]
+
+v_min3_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x67,0x04,0x0e,0x04]
+
+v_min3_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_min3_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_min3_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_min3_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_min3_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_min3_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_min3_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_min3_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_min3_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x05,0x0e,0x04]
+
+v_min3_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0xff,0x05,0x0e,0x04]
+
+v_min3_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_min3_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0xfe,0x07]
+
+v_min3_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x24]
+
+v_min3_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x44]
+
+v_min3_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x84]
+
+v_min3_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_min3_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xd0,0xd1,0x01,0x04,0x0e,0x04]
+
+v_min3_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_min3_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x14]
+
+v_min3_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xd0,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_min3_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0x01,0x02]
+
+v_min3_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd1,0xd1,0x01,0x00,0x01,0x02]
+
+v_min3_i32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x65,0x00,0x01,0x02]
+
+v_min3_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x66,0x00,0x01,0x02]
+
+v_min3_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x67,0x00,0x01,0x02]
+
+v_min3_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x6a,0x00,0x01,0x02]
+
+v_min3_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x6b,0x00,0x01,0x02]
+
+v_min3_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x6c,0x00,0x01,0x02]
+
+v_min3_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x6d,0x00,0x01,0x02]
+
+v_min3_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x6e,0x00,0x01,0x02]
+
+v_min3_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x6f,0x00,0x01,0x02]
+
+v_min3_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x7b,0x00,0x01,0x02]
+
+v_min3_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x7c,0x00,0x01,0x02]
+
+v_min3_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x7e,0x00,0x01,0x02]
+
+v_min3_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x7f,0x00,0x01,0x02]
+
+v_min3_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x80,0x00,0x01,0x02]
+
+v_min3_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0xc1,0x00,0x01,0x02]
+
+v_min3_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0xf0,0x00,0x01,0x02]
+
+v_min3_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0xf7,0x00,0x01,0x02]
+
+v_min3_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x01,0x01,0x02]
+
+v_min3_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0xff,0x01,0x01,0x02]
+
+v_min3_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x82,0x01,0x02]
+
+v_min3_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0xe0,0x01,0x02]
+
+v_min3_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0xee,0x01,0x02]
+
+v_min3_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x04,0x02,0x02]
+
+v_min3_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0xfe,0x03,0x02]
+
+v_min3_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0x05,0x03]
+
+v_min3_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0xc1,0x03]
+
+v_min3_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0xdd,0x03]
+
+v_min3_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0x0d,0x04]
+
+v_min3_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd1,0xd1,0x01,0x00,0xfd,0x07]
+
+v_min3_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0x01,0x02]
+
+v_min3_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd2,0xd1,0x01,0x00,0x01,0x02]
+
+v_min3_u32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x65,0x00,0x01,0x02]
+
+v_min3_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x66,0x00,0x01,0x02]
+
+v_min3_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x67,0x00,0x01,0x02]
+
+v_min3_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x6a,0x00,0x01,0x02]
+
+v_min3_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x6b,0x00,0x01,0x02]
+
+v_min3_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x6c,0x00,0x01,0x02]
+
+v_min3_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x6d,0x00,0x01,0x02]
+
+v_min3_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x6e,0x00,0x01,0x02]
+
+v_min3_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x6f,0x00,0x01,0x02]
+
+v_min3_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x7b,0x00,0x01,0x02]
+
+v_min3_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x7c,0x00,0x01,0x02]
+
+v_min3_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x7e,0x00,0x01,0x02]
+
+v_min3_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x7f,0x00,0x01,0x02]
+
+v_min3_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x80,0x00,0x01,0x02]
+
+v_min3_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0xc1,0x00,0x01,0x02]
+
+v_min3_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0xf0,0x00,0x01,0x02]
+
+v_min3_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0xf7,0x00,0x01,0x02]
+
+v_min3_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x01,0x01,0x02]
+
+v_min3_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0xff,0x01,0x01,0x02]
+
+v_min3_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x82,0x01,0x02]
+
+v_min3_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0xe0,0x01,0x02]
+
+v_min3_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0xee,0x01,0x02]
+
+v_min3_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x04,0x02,0x02]
+
+v_min3_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0xfe,0x03,0x02]
+
+v_min3_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0x05,0x03]
+
+v_min3_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0xc1,0x03]
+
+v_min3_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0xdd,0x03]
+
+v_min3_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0x0d,0x04]
+
+v_min3_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd2,0xd1,0x01,0x00,0xfd,0x07]
+
+v_max3_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x65,0x04,0x0e,0x04]
+
+v_max3_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x66,0x04,0x0e,0x04]
+
+v_max3_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x67,0x04,0x0e,0x04]
+
+v_max3_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_max3_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_max3_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_max3_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_max3_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_max3_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_max3_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_max3_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_max3_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x05,0x0e,0x04]
+
+v_max3_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0xff,0x05,0x0e,0x04]
+
+v_max3_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_max3_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0xfe,0x07]
+
+v_max3_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x24]
+
+v_max3_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x44]
+
+v_max3_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x84]
+
+v_max3_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_max3_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xd3,0xd1,0x01,0x04,0x0e,0x04]
+
+v_max3_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_max3_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x14]
+
+v_max3_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xd3,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_max3_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0x01,0x02]
+
+v_max3_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd4,0xd1,0x01,0x00,0x01,0x02]
+
+v_max3_i32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x65,0x00,0x01,0x02]
+
+v_max3_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x66,0x00,0x01,0x02]
+
+v_max3_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x67,0x00,0x01,0x02]
+
+v_max3_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x6a,0x00,0x01,0x02]
+
+v_max3_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x6b,0x00,0x01,0x02]
+
+v_max3_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x6c,0x00,0x01,0x02]
+
+v_max3_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x6d,0x00,0x01,0x02]
+
+v_max3_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x6e,0x00,0x01,0x02]
+
+v_max3_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x6f,0x00,0x01,0x02]
+
+v_max3_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x7b,0x00,0x01,0x02]
+
+v_max3_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x7c,0x00,0x01,0x02]
+
+v_max3_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x7e,0x00,0x01,0x02]
+
+v_max3_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x7f,0x00,0x01,0x02]
+
+v_max3_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x80,0x00,0x01,0x02]
+
+v_max3_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0xc1,0x00,0x01,0x02]
+
+v_max3_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0xf0,0x00,0x01,0x02]
+
+v_max3_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0xf7,0x00,0x01,0x02]
+
+v_max3_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x01,0x01,0x02]
+
+v_max3_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0xff,0x01,0x01,0x02]
+
+v_max3_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x82,0x01,0x02]
+
+v_max3_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0xe0,0x01,0x02]
+
+v_max3_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0xee,0x01,0x02]
+
+v_max3_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x04,0x02,0x02]
+
+v_max3_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0xfe,0x03,0x02]
+
+v_max3_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0x05,0x03]
+
+v_max3_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0xc1,0x03]
+
+v_max3_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0xdd,0x03]
+
+v_max3_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0x0d,0x04]
+
+v_max3_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd4,0xd1,0x01,0x00,0xfd,0x07]
+
+v_max3_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0x01,0x02]
+
+v_max3_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd5,0xd1,0x01,0x00,0x01,0x02]
+
+v_max3_u32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x65,0x00,0x01,0x02]
+
+v_max3_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x66,0x00,0x01,0x02]
+
+v_max3_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x67,0x00,0x01,0x02]
+
+v_max3_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x6a,0x00,0x01,0x02]
+
+v_max3_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x6b,0x00,0x01,0x02]
+
+v_max3_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x6c,0x00,0x01,0x02]
+
+v_max3_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x6d,0x00,0x01,0x02]
+
+v_max3_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x6e,0x00,0x01,0x02]
+
+v_max3_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x6f,0x00,0x01,0x02]
+
+v_max3_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x7b,0x00,0x01,0x02]
+
+v_max3_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x7c,0x00,0x01,0x02]
+
+v_max3_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x7e,0x00,0x01,0x02]
+
+v_max3_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x7f,0x00,0x01,0x02]
+
+v_max3_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x80,0x00,0x01,0x02]
+
+v_max3_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0xc1,0x00,0x01,0x02]
+
+v_max3_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0xf0,0x00,0x01,0x02]
+
+v_max3_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0xf7,0x00,0x01,0x02]
+
+v_max3_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x01,0x01,0x02]
+
+v_max3_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0xff,0x01,0x01,0x02]
+
+v_max3_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x82,0x01,0x02]
+
+v_max3_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0xe0,0x01,0x02]
+
+v_max3_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0xee,0x01,0x02]
+
+v_max3_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x04,0x02,0x02]
+
+v_max3_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0xfe,0x03,0x02]
+
+v_max3_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0x05,0x03]
+
+v_max3_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0xc1,0x03]
+
+v_max3_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0xdd,0x03]
+
+v_max3_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0x0d,0x04]
+
+v_max3_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd5,0xd1,0x01,0x00,0xfd,0x07]
+
+v_med3_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x65,0x04,0x0e,0x04]
+
+v_med3_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x66,0x04,0x0e,0x04]
+
+v_med3_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x67,0x04,0x0e,0x04]
+
+v_med3_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_med3_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_med3_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_med3_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_med3_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_med3_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_med3_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_med3_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_med3_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x05,0x0e,0x04]
+
+v_med3_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0xff,0x05,0x0e,0x04]
+
+v_med3_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_med3_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0xfe,0x07]
+
+v_med3_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x24]
+
+v_med3_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x44]
+
+v_med3_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x84]
+
+v_med3_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_med3_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xd6,0xd1,0x01,0x04,0x0e,0x04]
+
+v_med3_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_med3_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x14]
+
+v_med3_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xd6,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_med3_i32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0x01,0x02]
+
+v_med3_i32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd7,0xd1,0x01,0x00,0x01,0x02]
+
+v_med3_i32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x65,0x00,0x01,0x02]
+
+v_med3_i32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x66,0x00,0x01,0x02]
+
+v_med3_i32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x67,0x00,0x01,0x02]
+
+v_med3_i32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x6a,0x00,0x01,0x02]
+
+v_med3_i32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x6b,0x00,0x01,0x02]
+
+v_med3_i32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x6c,0x00,0x01,0x02]
+
+v_med3_i32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x6d,0x00,0x01,0x02]
+
+v_med3_i32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x6e,0x00,0x01,0x02]
+
+v_med3_i32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x6f,0x00,0x01,0x02]
+
+v_med3_i32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x7b,0x00,0x01,0x02]
+
+v_med3_i32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x7c,0x00,0x01,0x02]
+
+v_med3_i32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x7e,0x00,0x01,0x02]
+
+v_med3_i32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x7f,0x00,0x01,0x02]
+
+v_med3_i32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x80,0x00,0x01,0x02]
+
+v_med3_i32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0xc1,0x00,0x01,0x02]
+
+v_med3_i32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0xf0,0x00,0x01,0x02]
+
+v_med3_i32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0xf7,0x00,0x01,0x02]
+
+v_med3_i32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x01,0x01,0x02]
+
+v_med3_i32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0xff,0x01,0x01,0x02]
+
+v_med3_i32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x82,0x01,0x02]
+
+v_med3_i32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0xe0,0x01,0x02]
+
+v_med3_i32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0xee,0x01,0x02]
+
+v_med3_i32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x04,0x02,0x02]
+
+v_med3_i32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0xfe,0x03,0x02]
+
+v_med3_i32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0x05,0x03]
+
+v_med3_i32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0xc1,0x03]
+
+v_med3_i32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0xdd,0x03]
+
+v_med3_i32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0x0d,0x04]
+
+v_med3_i32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd7,0xd1,0x01,0x00,0xfd,0x07]
+
+v_med3_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x00,0x01,0x02]
+
+v_med3_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd8,0xd1,0x01,0x00,0x01,0x02]
+
+v_med3_u32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x65,0x00,0x01,0x02]
+
+v_med3_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x66,0x00,0x01,0x02]
+
+v_med3_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x67,0x00,0x01,0x02]
+
+v_med3_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x6a,0x00,0x01,0x02]
+
+v_med3_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x6b,0x00,0x01,0x02]
+
+v_med3_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x6c,0x00,0x01,0x02]
+
+v_med3_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x6d,0x00,0x01,0x02]
+
+v_med3_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x6e,0x00,0x01,0x02]
+
+v_med3_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x6f,0x00,0x01,0x02]
+
+v_med3_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x7b,0x00,0x01,0x02]
+
+v_med3_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x7c,0x00,0x01,0x02]
+
+v_med3_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x7e,0x00,0x01,0x02]
+
+v_med3_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x7f,0x00,0x01,0x02]
+
+v_med3_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x80,0x00,0x01,0x02]
+
+v_med3_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0xc1,0x00,0x01,0x02]
+
+v_med3_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0xf0,0x00,0x01,0x02]
+
+v_med3_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0xf7,0x00,0x01,0x02]
+
+v_med3_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x01,0x01,0x02]
+
+v_med3_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0xff,0x01,0x01,0x02]
+
+v_med3_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x82,0x01,0x02]
+
+v_med3_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0xe0,0x01,0x02]
+
+v_med3_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0xee,0x01,0x02]
+
+v_med3_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x04,0x02,0x02]
+
+v_med3_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0xfe,0x03,0x02]
+
+v_med3_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x00,0x05,0x03]
+
+v_med3_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x00,0xc1,0x03]
+
+v_med3_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x00,0xdd,0x03]
+
+v_med3_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x00,0x0d,0x04]
+
+v_med3_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd8,0xd1,0x01,0x00,0xfd,0x07]
+
+v_sad_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xd9,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_u8 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x65,0x00,0x01,0x02]
+
+v_sad_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x66,0x00,0x01,0x02]
+
+v_sad_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x67,0x00,0x01,0x02]
+
+v_sad_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x6a,0x00,0x01,0x02]
+
+v_sad_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x6b,0x00,0x01,0x02]
+
+v_sad_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x6c,0x00,0x01,0x02]
+
+v_sad_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x6d,0x00,0x01,0x02]
+
+v_sad_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x6e,0x00,0x01,0x02]
+
+v_sad_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x6f,0x00,0x01,0x02]
+
+v_sad_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x7b,0x00,0x01,0x02]
+
+v_sad_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x7c,0x00,0x01,0x02]
+
+v_sad_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x7e,0x00,0x01,0x02]
+
+v_sad_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x7f,0x00,0x01,0x02]
+
+v_sad_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x80,0x00,0x01,0x02]
+
+v_sad_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0xc1,0x00,0x01,0x02]
+
+v_sad_u8 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0xf0,0x00,0x01,0x02]
+
+v_sad_u8 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0xf7,0x00,0x01,0x02]
+
+v_sad_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x01,0x01,0x02]
+
+v_sad_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0xff,0x01,0x01,0x02]
+
+v_sad_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x82,0x01,0x02]
+
+v_sad_u8 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0xe0,0x01,0x02]
+
+v_sad_u8 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0xee,0x01,0x02]
+
+v_sad_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x04,0x02,0x02]
+
+v_sad_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0xfe,0x03,0x02]
+
+v_sad_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x00,0x05,0x03]
+
+v_sad_u8 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x00,0xc1,0x03]
+
+v_sad_u8 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x00,0xdd,0x03]
+
+v_sad_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x00,0x0d,0x04]
+
+v_sad_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xd9,0xd1,0x01,0x00,0xfd,0x07]
+
+v_sad_hi_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_hi_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xda,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x65,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x66,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x67,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x6a,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x6b,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x6c,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x6d,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x6e,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x6f,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x7b,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x7c,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x7e,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x7f,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x80,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0xc1,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0xf0,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0xf7,0x00,0x01,0x02]
+
+v_sad_hi_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x01,0x01,0x02]
+
+v_sad_hi_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0xff,0x01,0x01,0x02]
+
+v_sad_hi_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x82,0x01,0x02]
+
+v_sad_hi_u8 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0xe0,0x01,0x02]
+
+v_sad_hi_u8 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0xee,0x01,0x02]
+
+v_sad_hi_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x04,0x02,0x02]
+
+v_sad_hi_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0xfe,0x03,0x02]
+
+v_sad_hi_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x00,0x05,0x03]
+
+v_sad_hi_u8 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x00,0xc1,0x03]
+
+v_sad_hi_u8 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x00,0xdd,0x03]
+
+v_sad_hi_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x00,0x0d,0x04]
+
+v_sad_hi_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xda,0xd1,0x01,0x00,0xfd,0x07]
+
+v_sad_u16 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_u16 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xdb,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_u16 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x65,0x00,0x01,0x02]
+
+v_sad_u16 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x66,0x00,0x01,0x02]
+
+v_sad_u16 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x67,0x00,0x01,0x02]
+
+v_sad_u16 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x6a,0x00,0x01,0x02]
+
+v_sad_u16 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x6b,0x00,0x01,0x02]
+
+v_sad_u16 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x6c,0x00,0x01,0x02]
+
+v_sad_u16 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x6d,0x00,0x01,0x02]
+
+v_sad_u16 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x6e,0x00,0x01,0x02]
+
+v_sad_u16 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x6f,0x00,0x01,0x02]
+
+v_sad_u16 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x7b,0x00,0x01,0x02]
+
+v_sad_u16 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x7c,0x00,0x01,0x02]
+
+v_sad_u16 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x7e,0x00,0x01,0x02]
+
+v_sad_u16 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x7f,0x00,0x01,0x02]
+
+v_sad_u16 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x80,0x00,0x01,0x02]
+
+v_sad_u16 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0xc1,0x00,0x01,0x02]
+
+v_sad_u16 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0xf0,0x00,0x01,0x02]
+
+v_sad_u16 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0xf7,0x00,0x01,0x02]
+
+v_sad_u16 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x01,0x01,0x02]
+
+v_sad_u16 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0xff,0x01,0x01,0x02]
+
+v_sad_u16 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x82,0x01,0x02]
+
+v_sad_u16 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0xe0,0x01,0x02]
+
+v_sad_u16 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0xee,0x01,0x02]
+
+v_sad_u16 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x04,0x02,0x02]
+
+v_sad_u16 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0xfe,0x03,0x02]
+
+v_sad_u16 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x00,0x05,0x03]
+
+v_sad_u16 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x00,0xc1,0x03]
+
+v_sad_u16 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x00,0xdd,0x03]
+
+v_sad_u16 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x00,0x0d,0x04]
+
+v_sad_u16 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xdb,0xd1,0x01,0x00,0xfd,0x07]
+
+v_sad_u32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_u32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xdc,0xd1,0x01,0x00,0x01,0x02]
+
+v_sad_u32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x65,0x00,0x01,0x02]
+
+v_sad_u32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x66,0x00,0x01,0x02]
+
+v_sad_u32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x67,0x00,0x01,0x02]
+
+v_sad_u32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x6a,0x00,0x01,0x02]
+
+v_sad_u32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x6b,0x00,0x01,0x02]
+
+v_sad_u32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x6c,0x00,0x01,0x02]
+
+v_sad_u32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x6d,0x00,0x01,0x02]
+
+v_sad_u32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x6e,0x00,0x01,0x02]
+
+v_sad_u32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x6f,0x00,0x01,0x02]
+
+v_sad_u32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x7b,0x00,0x01,0x02]
+
+v_sad_u32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x7c,0x00,0x01,0x02]
+
+v_sad_u32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x7e,0x00,0x01,0x02]
+
+v_sad_u32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x7f,0x00,0x01,0x02]
+
+v_sad_u32 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x80,0x00,0x01,0x02]
+
+v_sad_u32 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0xc1,0x00,0x01,0x02]
+
+v_sad_u32 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0xf0,0x00,0x01,0x02]
+
+v_sad_u32 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0xf7,0x00,0x01,0x02]
+
+v_sad_u32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x01,0x01,0x02]
+
+v_sad_u32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0xff,0x01,0x01,0x02]
+
+v_sad_u32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x82,0x01,0x02]
+
+v_sad_u32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0xe0,0x01,0x02]
+
+v_sad_u32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0xee,0x01,0x02]
+
+v_sad_u32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x04,0x02,0x02]
+
+v_sad_u32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0xfe,0x03,0x02]
+
+v_sad_u32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x00,0x05,0x03]
+
+v_sad_u32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x00,0xc1,0x03]
+
+v_sad_u32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x00,0xdd,0x03]
+
+v_sad_u32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x00,0x0d,0x04]
+
+v_sad_u32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xdc,0xd1,0x01,0x00,0xfd,0x07]
+
+v_cvt_pk_u8_f32 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xdd,0xd1,0x01,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x65,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x66,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x67,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x6a,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x6b,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x6c,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x6d,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x6e,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x6f,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x7b,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x7c,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x7e,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x7f,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, scc, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0xfd,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x01,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0xff,0x01,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x82,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0xe0,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0xee,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x04,0x02,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0xfe,0x03,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0x05,0x03]
+
+v_cvt_pk_u8_f32 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0xc1,0x03]
+
+v_cvt_pk_u8_f32 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0xdd,0x03]
+
+v_cvt_pk_u8_f32 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0x0d,0x04]
+
+v_cvt_pk_u8_f32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0xfd,0x07]
+
+v_cvt_pk_u8_f32 v5, -s1, 0, 0
+// CHECK: [0x05,0x00,0xdd,0xd1,0x01,0x00,0x01,0x22]
+
+v_cvt_pk_u8_f32 v5, |s1|, 0, 0
+// CHECK: [0x05,0x01,0xdd,0xd1,0x01,0x00,0x01,0x02]
+
+v_cvt_pk_u8_f32 v5, s1, 0, 0 clamp
+// CHECK: [0x05,0x80,0xdd,0xd1,0x01,0x00,0x01,0x02]
+
+v_div_fixup_f32 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x65,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x66,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x67,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0xff,0x05,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_div_fixup_f32 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0xfe,0x07]
+
+v_div_fixup_f32 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x24]
+
+v_div_fixup_f32 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x44]
+
+v_div_fixup_f32 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x84]
+
+v_div_fixup_f32 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_div_fixup_f32 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xde,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f32 v5, s1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x0c]
+
+v_div_fixup_f32 v5, s1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x14]
+
+v_div_fixup_f32 v5, s1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xde,0xd1,0x01,0x04,0x0e,0x1c]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[254:255], s[2:3], v[2:3], v[3:4]
+// CHECK: [0xfe,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[4:5], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x04,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[100:101], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x64,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], flat_scratch, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x66,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], vcc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], tba, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], tma, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], ttmp[10:11], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x7a,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], exec, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], scc, v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], v[254:255], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0xfe,0x05,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[254:255], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0xfc,0x0f,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[254:255]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0xfa,0x07]
+
+v_div_fixup_f64 v[5:6], -s[2:3], v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x24]
+
+v_div_fixup_f64 v[5:6], s[2:3], -v[2:3], v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x44]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x84]
+
+v_div_fixup_f64 v[5:6], -s[2:3], -v[2:3], -v[3:4]
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0xe4]
+
+v_div_fixup_f64 v[5:6], |s[2:3]|, v[2:3], v[3:4]
+// CHECK: [0x05,0x01,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], |v[2:3]|, v[3:4]
+// CHECK: [0x05,0x02,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], |v[3:4]|
+// CHECK: [0x05,0x04,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], |s[2:3]|, |v[2:3]|, |v[3:4]|
+// CHECK: [0x05,0x07,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] clamp
+// CHECK: [0x05,0x80,0xdf,0xd1,0x02,0x04,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:2
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x0c]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] mul:4
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x14]
+
+v_div_fixup_f64 v[5:6], s[2:3], v[2:3], v[3:4] div:2
+// CHECK: [0x05,0x00,0xdf,0xd1,0x02,0x04,0x0e,0x1c]
+
+v_div_scale_f32 v5, vcc, s1, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0x01,0x02]
+
+v_div_scale_f32 v255, vcc, s1, 0, 0
+// CHECK: [0xff,0x6a,0xe0,0xd1,0x01,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s101, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x65,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x66,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x67,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, vcc_lo, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x6a,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, vcc_hi, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x6b,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tba_lo, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x6c,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tba_hi, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x6d,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tma_lo, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x6e,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, tma_hi, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x6f,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, ttmp11, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x7b,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, m0, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x7c,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, exec_lo, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x7e,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, exec_hi, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x7f,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, 0, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x80,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, -1, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0xc1,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, 0.5, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0xf0,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, -4.0, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0xf7,0x00,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, v1, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x01,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, v255, 0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0xff,0x01,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, -1, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x82,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, 0.5, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0xe0,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, -4.0, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0xee,0x01,0x02]
+
+v_div_scale_f32 v5, vcc, s1, v2, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x04,0x02,0x02]
+
+v_div_scale_f32 v5, vcc, s1, v255, 0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0xfe,0x03,0x02]
+
+v_div_scale_f32 v5, vcc, s1, 0, -1
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0x05,0x03]
+
+v_div_scale_f32 v5, vcc, s1, 0, 0.5
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0xc1,0x03]
+
+v_div_scale_f32 v5, vcc, s1, 0, -4.0
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0xdd,0x03]
+
+v_div_scale_f32 v5, vcc, s1, 0, v3
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0x0d,0x04]
+
+v_div_scale_f32 v5, vcc, s1, 0, v255
+// CHECK: [0x05,0x6a,0xe0,0xd1,0x01,0x00,0xfd,0x07]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0x01,0x02]
+
+v_div_scale_f64 v[254:255], vcc, s[2:3], 0, 0
+// CHECK: [0xfe,0x6a,0xe1,0xd1,0x02,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[4:5], 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x04,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[100:101], 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x64,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, flat_scratch, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x66,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, vcc, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x6a,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, tba, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x6c,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, tma, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x6e,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, ttmp[10:11], 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x7a,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, exec, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x7e,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, 0, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x80,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, -1, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0xc1,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, 0.5, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0xf0,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, -4.0, 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0xf7,0x00,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, v[1:2], 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x01,0x01,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, v[254:255], 0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0xfe,0x01,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], -1, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x82,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0.5, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0xe0,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], -4.0, 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0xee,0x01,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], v[2:3], 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x04,0x02,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], v[254:255], 0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0xfc,0x03,0x02]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, -1
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0x05,0x03]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, 0.5
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0xc1,0x03]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, -4.0
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0xdd,0x03]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[3:4]
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0x0d,0x04]
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], 0, v[254:255]
+// CHECK: [0x05,0x6a,0xe1,0xd1,0x02,0x00,0xf9,0x07]
+
+v_div_fmas_f32 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v255, v1, v2, v3
+// CHECK: [0xff,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0xff,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v255, v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0xff,0x0f,0x04]
+
+v_div_fmas_f32 v5, v1, v2, v255
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0xfe,0x07]
+
+v_div_fmas_f32 v5, -v1, v2, v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x24]
+
+v_div_fmas_f32 v5, v1, -v2, v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x44]
+
+v_div_fmas_f32 v5, v1, v2, -v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x84]
+
+v_div_fmas_f32 v5, -v1, -v2, -v3
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0xe4]
+
+v_div_fmas_f32 v5, |v1|, v2, v3
+// CHECK: [0x05,0x01,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, |v2|, v3
+// CHECK: [0x05,0x02,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v2, |v3|
+// CHECK: [0x05,0x04,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, |v1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xe2,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fmas_f32 v5, v1, v2, v3 mul:2
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x0c]
+
+v_div_fmas_f32 v5, v1, v2, v3 mul:4
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x14]
+
+v_div_fmas_f32 v5, v1, v2, v3 div:2
+// CHECK: [0x05,0x00,0xe2,0xd1,0x01,0x05,0x0e,0x1c]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[254:255], vcc, vcc, vcc
+// CHECK: [0xfe,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], v[1:2], vcc, vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x01,0xd5,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], v[254:255], vcc, vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0xfe,0xd5,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, v[2:3], vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0x04,0xaa,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, v[254:255], vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xfc,0xab,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, v[3:4]
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0x0c,0x04]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, v[254:255]
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xf8,0x07]
+
+v_div_fmas_f64 v[5:6], -vcc, vcc, vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x21]
+
+v_div_fmas_f64 v[5:6], vcc, -vcc, vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x41]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, -vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x81]
+
+v_div_fmas_f64 v[5:6], -vcc, -vcc, -vcc
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0xe1]
+
+v_div_fmas_f64 v[5:6], |vcc|, vcc, vcc
+// CHECK: [0x05,0x01,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, |vcc|, vcc
+// CHECK: [0x05,0x02,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, |vcc|
+// CHECK: [0x05,0x04,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], |vcc|, |vcc|, |vcc|
+// CHECK: [0x05,0x07,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc clamp
+// CHECK: [0x05,0x80,0xe3,0xd1,0x6a,0xd4,0xa8,0x01]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc mul:2
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x09]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc mul:4
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x11]
+
+v_div_fmas_f64 v[5:6], vcc, vcc, vcc div:2
+// CHECK: [0x05,0x00,0xe3,0xd1,0x6a,0xd4,0xa8,0x19]
+
+v_msad_u8 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x00,0x01,0x02]
+
+v_msad_u8 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xe4,0xd1,0x01,0x00,0x01,0x02]
+
+v_msad_u8 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x65,0x00,0x01,0x02]
+
+v_msad_u8 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x66,0x00,0x01,0x02]
+
+v_msad_u8 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x67,0x00,0x01,0x02]
+
+v_msad_u8 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x6a,0x00,0x01,0x02]
+
+v_msad_u8 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x6b,0x00,0x01,0x02]
+
+v_msad_u8 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x6c,0x00,0x01,0x02]
+
+v_msad_u8 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x6d,0x00,0x01,0x02]
+
+v_msad_u8 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x6e,0x00,0x01,0x02]
+
+v_msad_u8 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x6f,0x00,0x01,0x02]
+
+v_msad_u8 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x7b,0x00,0x01,0x02]
+
+v_msad_u8 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x7c,0x00,0x01,0x02]
+
+v_msad_u8 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x7e,0x00,0x01,0x02]
+
+v_msad_u8 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x7f,0x00,0x01,0x02]
+
+v_msad_u8 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x80,0x00,0x01,0x02]
+
+v_msad_u8 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0xc1,0x00,0x01,0x02]
+
+v_msad_u8 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0xf0,0x00,0x01,0x02]
+
+v_msad_u8 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0xf7,0x00,0x01,0x02]
+
+v_msad_u8 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x01,0x01,0x02]
+
+v_msad_u8 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0xff,0x01,0x01,0x02]
+
+v_msad_u8 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x82,0x01,0x02]
+
+v_msad_u8 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0xe0,0x01,0x02]
+
+v_msad_u8 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0xee,0x01,0x02]
+
+v_msad_u8 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x04,0x02,0x02]
+
+v_msad_u8 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0xfe,0x03,0x02]
+
+v_msad_u8 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x00,0x05,0x03]
+
+v_msad_u8 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x00,0xc1,0x03]
+
+v_msad_u8 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x00,0xdd,0x03]
+
+v_msad_u8 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x00,0x0d,0x04]
+
+v_msad_u8 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xe4,0xd1,0x01,0x00,0xfd,0x07]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[254:255], s[2:3], 0, 0
+// CHECK: [0xfe,0x00,0xe5,0xd1,0x02,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[4:5], 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x04,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[100:101], 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x64,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], flat_scratch, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x66,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], vcc, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x6a,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], tba, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x6c,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], tma, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x6e,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], ttmp[10:11], 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x7a,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], exec, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x7e,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], 0, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x80,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], -1, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0xc1,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], 0.5, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0xf0,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], -4.0, 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0xf7,0x00,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x01,0x01,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], v[254:255], 0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0xfe,0x01,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], -1, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x82,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0.5, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0xe0,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], -4.0, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0xee,0x01,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], v2, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x04,0x02,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], v255, 0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0xfe,0x03,0x02]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, -1
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x00,0x05,0x03]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, 0.5
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x00,0xc1,0x03]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, -4.0
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x00,0xdd,0x03]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, v[3:4]
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x00,0x0d,0x04]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], 0, v[254:255]
+// CHECK: [0x05,0x00,0xe5,0xd1,0x02,0x00,0xf9,0x07]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[254:255], s[2:3], 0, 0
+// CHECK: [0xfe,0x00,0xe6,0xd1,0x02,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[4:5], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x04,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[100:101], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x64,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], flat_scratch, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x66,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], vcc, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x6a,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], tba, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x6c,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], tma, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x6e,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], ttmp[10:11], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x7a,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], exec, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x7e,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], 0, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x80,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], -1, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0xc1,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], 0.5, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0xf0,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], -4.0, 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0xf7,0x00,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x01,0x01,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], v[254:255], 0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0xfe,0x01,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], -1, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x82,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0.5, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0xe0,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], -4.0, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0xee,0x01,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], v2, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x04,0x02,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], v255, 0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0xfe,0x03,0x02]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, -1
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x00,0x05,0x03]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, 0.5
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x00,0xc1,0x03]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, -4.0
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x00,0xdd,0x03]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, v[3:4]
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x00,0x0d,0x04]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], 0, v[254:255]
+// CHECK: [0x05,0x00,0xe6,0xd1,0x02,0x00,0xf9,0x07]
+
+v_mqsad_u32_u8 v[5:8], 0, s2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0x04,0x0c,0x04]
+
+v_mqsad_u32_u8 v[252:255], 0, s2, v[3:6]
+// CHECK: [0xfc,0x00,0xe7,0xd1,0x80,0x04,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], -1, s2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0xc1,0x04,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0.5, s2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0xf0,0x04,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], -4.0, s2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0xf7,0x04,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], s2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x01,0x05,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], v[254:255], s2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0xfe,0x05,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, s101, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xca,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, flat_scratch_lo, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xcc,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, flat_scratch_hi, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xce,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, vcc_lo, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xd4,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, vcc_hi, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xd6,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, tba_lo, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xd8,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, tba_hi, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xda,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, tma_lo, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xdc,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, tma_hi, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xde,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, ttmp11, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xf6,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, m0, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xf8,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, exec_lo, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xfc,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, exec_hi, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xfe,0x0c,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, 0, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0x00,0x0d,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, -1, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0x82,0x0d,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, 0.5, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xe0,0x0d,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, -4.0, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xee,0x0d,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, v2, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0x04,0x0e,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, v255, v[3:6]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0xfe,0x0f,0x04]
+
+v_mqsad_u32_u8 v[5:8], 0, s2, v[252:255]
+// CHECK: [0x05,0x00,0xe7,0xd1,0x80,0x04,0xf0,0x07]
+
+v_mad_f16 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f16 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f16 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x65,0x04,0x0e,0x04]
+
+v_mad_f16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x66,0x04,0x0e,0x04]
+
+v_mad_f16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x67,0x04,0x0e,0x04]
+
+v_mad_f16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_mad_f16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_mad_f16 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_mad_f16 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_mad_f16 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_mad_f16 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_mad_f16 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_mad_f16 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_mad_f16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_mad_f16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_mad_f16 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_mad_f16 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x05,0x0e,0x04]
+
+v_mad_f16 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0xff,0x05,0x0e,0x04]
+
+v_mad_f16 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_mad_f16 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0xfe,0x07]
+
+v_mad_f16 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x24]
+
+v_mad_f16 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x44]
+
+v_mad_f16 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0x84]
+
+v_mad_f16 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xea,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_mad_f16 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f16 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f16 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f16 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_f16 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xea,0xd1,0x01,0x04,0x0e,0x04]
+
+v_mad_u16 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_u16 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xeb,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_u16 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x65,0x00,0x01,0x02]
+
+v_mad_u16 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x66,0x00,0x01,0x02]
+
+v_mad_u16 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x67,0x00,0x01,0x02]
+
+v_mad_u16 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x6a,0x00,0x01,0x02]
+
+v_mad_u16 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x6b,0x00,0x01,0x02]
+
+v_mad_u16 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x6c,0x00,0x01,0x02]
+
+v_mad_u16 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x6d,0x00,0x01,0x02]
+
+v_mad_u16 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x6e,0x00,0x01,0x02]
+
+v_mad_u16 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x6f,0x00,0x01,0x02]
+
+v_mad_u16 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x7b,0x00,0x01,0x02]
+
+v_mad_u16 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x7c,0x00,0x01,0x02]
+
+v_mad_u16 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x7e,0x00,0x01,0x02]
+
+v_mad_u16 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x7f,0x00,0x01,0x02]
+
+v_mad_u16 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x80,0x00,0x01,0x02]
+
+v_mad_u16 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0xc1,0x00,0x01,0x02]
+
+v_mad_u16 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0xf0,0x00,0x01,0x02]
+
+v_mad_u16 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0xf7,0x00,0x01,0x02]
+
+v_mad_u16 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x01,0x01,0x02]
+
+v_mad_u16 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0xff,0x01,0x01,0x02]
+
+v_mad_u16 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x82,0x01,0x02]
+
+v_mad_u16 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0xe0,0x01,0x02]
+
+v_mad_u16 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0xee,0x01,0x02]
+
+v_mad_u16 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x04,0x02,0x02]
+
+v_mad_u16 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0xfe,0x03,0x02]
+
+v_mad_u16 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0x05,0x03]
+
+v_mad_u16 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0xc1,0x03]
+
+v_mad_u16 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0xdd,0x03]
+
+v_mad_u16 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0x0d,0x04]
+
+v_mad_u16 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xeb,0xd1,0x01,0x00,0xfd,0x07]
+
+v_mad_i16 v5, s1, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_i16 v255, s1, 0, 0
+// CHECK: [0xff,0x00,0xec,0xd1,0x01,0x00,0x01,0x02]
+
+v_mad_i16 v5, s101, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x65,0x00,0x01,0x02]
+
+v_mad_i16 v5, flat_scratch_lo, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x66,0x00,0x01,0x02]
+
+v_mad_i16 v5, flat_scratch_hi, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x67,0x00,0x01,0x02]
+
+v_mad_i16 v5, vcc_lo, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x6a,0x00,0x01,0x02]
+
+v_mad_i16 v5, vcc_hi, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x6b,0x00,0x01,0x02]
+
+v_mad_i16 v5, tba_lo, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x6c,0x00,0x01,0x02]
+
+v_mad_i16 v5, tba_hi, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x6d,0x00,0x01,0x02]
+
+v_mad_i16 v5, tma_lo, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x6e,0x00,0x01,0x02]
+
+v_mad_i16 v5, tma_hi, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x6f,0x00,0x01,0x02]
+
+v_mad_i16 v5, ttmp11, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x7b,0x00,0x01,0x02]
+
+v_mad_i16 v5, m0, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x7c,0x00,0x01,0x02]
+
+v_mad_i16 v5, exec_lo, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x7e,0x00,0x01,0x02]
+
+v_mad_i16 v5, exec_hi, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x7f,0x00,0x01,0x02]
+
+v_mad_i16 v5, 0, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x80,0x00,0x01,0x02]
+
+v_mad_i16 v5, -1, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0xc1,0x00,0x01,0x02]
+
+v_mad_i16 v5, 0.5, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0xf0,0x00,0x01,0x02]
+
+v_mad_i16 v5, -4.0, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0xf7,0x00,0x01,0x02]
+
+v_mad_i16 v5, v1, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x01,0x01,0x02]
+
+v_mad_i16 v5, v255, 0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0xff,0x01,0x01,0x02]
+
+v_mad_i16 v5, s1, -1, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x82,0x01,0x02]
+
+v_mad_i16 v5, s1, 0.5, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0xe0,0x01,0x02]
+
+v_mad_i16 v5, s1, -4.0, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0xee,0x01,0x02]
+
+v_mad_i16 v5, s1, v2, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x04,0x02,0x02]
+
+v_mad_i16 v5, s1, v255, 0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0xfe,0x03,0x02]
+
+v_mad_i16 v5, s1, 0, -1
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0x05,0x03]
+
+v_mad_i16 v5, s1, 0, 0.5
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0xc1,0x03]
+
+v_mad_i16 v5, s1, 0, -4.0
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0xdd,0x03]
+
+v_mad_i16 v5, s1, 0, v3
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0x0d,0x04]
+
+v_mad_i16 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xec,0xd1,0x01,0x00,0xfd,0x07]
+
+v_fma_f16 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f16 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f16 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x65,0x04,0x0e,0x04]
+
+v_fma_f16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x66,0x04,0x0e,0x04]
+
+v_fma_f16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x67,0x04,0x0e,0x04]
+
+v_fma_f16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_fma_f16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_fma_f16 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_fma_f16 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_fma_f16 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_fma_f16 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_fma_f16 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_fma_f16 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_fma_f16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_fma_f16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_fma_f16 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_fma_f16 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x05,0x0e,0x04]
+
+v_fma_f16 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0xff,0x05,0x0e,0x04]
+
+v_fma_f16 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_fma_f16 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0xfe,0x07]
+
+v_fma_f16 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x24]
+
+v_fma_f16 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x44]
+
+v_fma_f16 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0x84]
+
+v_fma_f16 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xee,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_fma_f16 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f16 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f16 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f16 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_fma_f16 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xee,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, s1, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v255, s1, v2, v3
+// CHECK: [0xff,0x00,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, s101, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x65,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, flat_scratch_lo, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x66,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, flat_scratch_hi, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x67,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, vcc_lo, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x6a,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, vcc_hi, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x6b,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, tba_lo, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x6c,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, tba_hi, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x6d,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, tma_lo, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x6e,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, tma_hi, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x6f,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, ttmp11, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x7b,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, m0, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x7c,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, exec_lo, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x7e,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, exec_hi, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x7f,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, scc, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0xfd,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, v1, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f16 v5, v255, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0xff,0x05,0x0e,0x04]
+
+v_div_fixup_f16 v5, s1, v255, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0xfe,0x0f,0x04]
+
+v_div_fixup_f16 v5, s1, v2, v255
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0xfe,0x07]
+
+v_div_fixup_f16 v5, -s1, v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x24]
+
+v_div_fixup_f16 v5, s1, -v2, v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x44]
+
+v_div_fixup_f16 v5, s1, v2, -v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0x84]
+
+v_div_fixup_f16 v5, -s1, -v2, -v3
+// CHECK: [0x05,0x00,0xef,0xd1,0x01,0x04,0x0e,0xe4]
+
+v_div_fixup_f16 v5, |s1|, v2, v3
+// CHECK: [0x05,0x01,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, s1, |v2|, v3
+// CHECK: [0x05,0x02,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, s1, v2, |v3|
+// CHECK: [0x05,0x04,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, |s1|, |v2|, |v3|
+// CHECK: [0x05,0x07,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_div_fixup_f16 v5, s1, v2, v3 clamp
+// CHECK: [0x05,0x80,0xef,0xd1,0x01,0x04,0x0e,0x04]
+
+v_cvt_pkaccum_u8_f32 v5, v1, s2
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v255, v1, s2
+// CHECK: [0xff,0x00,0xf0,0xd1,0x01,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v255, s2
+// CHECK: [0x05,0x00,0xf0,0xd1,0xff,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, s101
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xcb,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xcd,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xcf,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xd5,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xd7,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xd9,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xdb,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xdd,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xdf,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xf7,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, m0
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xf9,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xfd,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xff,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, 0
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x01,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, -1
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x83,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, 0.5
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xe1,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, -4.0
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xef,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, scc
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xfb,0x01,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, v2
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x05,0x02,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, v255
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0xff,0x03,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, -v1, s2
+// CHECK: [0x05,0x00,0xf0,0xd1,0x01,0x05,0x00,0x20]
+
+v_cvt_pkaccum_u8_f32 v5, |v1|, s2
+// CHECK: [0x05,0x01,0xf0,0xd1,0x01,0x05,0x00,0x00]
+
+v_cvt_pkaccum_u8_f32 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0xf0,0xd1,0x01,0x05,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0x80,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0x80,0xd2,0x01,0x09,0x00,0x00]
+
+v_add_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0x80,0xd2,0xfe,0x09,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x04,0x02,0x00]
+
+v_add_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0xfc,0x03,0x00]
+
+v_add_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x20]
+
+v_add_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x40]
+
+v_add_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x60]
+
+v_add_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0x80,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0x80,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0x80,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x80,0x80,0xd2,0x04,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x08]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x10]
+
+v_add_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0x80,0xd2,0x04,0x08,0x00,0x18]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0x81,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0x81,0xd2,0x01,0x09,0x00,0x00]
+
+v_mul_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0x81,0xd2,0xfe,0x09,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x04,0x02,0x00]
+
+v_mul_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0xfc,0x03,0x00]
+
+v_mul_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x20]
+
+v_mul_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x40]
+
+v_mul_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x60]
+
+v_mul_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0x81,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0x81,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0x81,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x80,0x81,0xd2,0x04,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x08]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x10]
+
+v_mul_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0x81,0xd2,0x04,0x08,0x00,0x18]
+
+v_min_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0x82,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0x82,0xd2,0x01,0x09,0x00,0x00]
+
+v_min_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0x82,0xd2,0xfe,0x09,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x04,0x02,0x00]
+
+v_min_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0xfc,0x03,0x00]
+
+v_min_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x20]
+
+v_min_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x40]
+
+v_min_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x60]
+
+v_min_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0x82,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0x82,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0x82,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x80,0x82,0xd2,0x04,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x08]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x10]
+
+v_min_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0x82,0xd2,0x04,0x08,0x00,0x18]
+
+v_max_f64 v[5:6], s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[254:255], s[4:5], s[4:5]
+// CHECK: [0xfe,0x00,0x83,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], v[1:2], s[4:5]
+// CHECK: [0x05,0x00,0x83,0xd2,0x01,0x09,0x00,0x00]
+
+v_max_f64 v[5:6], v[254:255], s[4:5]
+// CHECK: [0x05,0x00,0x83,0xd2,0xfe,0x09,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], v[2:3]
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x04,0x02,0x00]
+
+v_max_f64 v[5:6], s[4:5], v[254:255]
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0xfc,0x03,0x00]
+
+v_max_f64 v[5:6], -s[4:5], s[4:5]
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x20]
+
+v_max_f64 v[5:6], s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x40]
+
+v_max_f64 v[5:6], -s[4:5], -s[4:5]
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x60]
+
+v_max_f64 v[5:6], |s[4:5]|, s[4:5]
+// CHECK: [0x05,0x01,0x83,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], |s[4:5]|
+// CHECK: [0x05,0x02,0x83,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], |s[4:5]|, |s[4:5]|
+// CHECK: [0x05,0x03,0x83,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] clamp
+// CHECK: [0x05,0x80,0x83,0xd2,0x04,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] mul:2
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x08]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] mul:4
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x10]
+
+v_max_f64 v[5:6], s[4:5], s[4:5] div:2
+// CHECK: [0x05,0x00,0x83,0xd2,0x04,0x08,0x00,0x18]
+
+v_ldexp_f64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0x84,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0x84,0xd2,0xf0,0x04,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0x84,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0x84,0xd2,0xfe,0x05,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, s101
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xca,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xcc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xce,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xd6,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xda,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xde,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xf6,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xf8,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xfe,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x00,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x82,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xee,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, scc
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xfa,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x02,0x00]
+
+v_ldexp_f64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0xfe,0x03,0x00]
+
+v_ldexp_f64 v[5:6], 0, s2 clamp
+// CHECK: [0x05,0x80,0x84,0xd2,0x80,0x04,0x00,0x00]
+
+v_ldexp_f64 v[5:6], 0, s2 mul:2
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x00,0x08]
+
+v_ldexp_f64 v[5:6], 0, s2 mul:4
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x00,0x10]
+
+v_ldexp_f64 v[5:6], 0, s2 div:2
+// CHECK: [0x05,0x00,0x84,0xd2,0x80,0x04,0x00,0x18]
+
+v_mul_lo_u32 v5, 0, s2
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_lo_u32 v255, 0, s2
+// CHECK: [0xff,0x00,0x85,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, -1, s2
+// CHECK: [0x05,0x00,0x85,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x85,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x85,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, v1, s2
+// CHECK: [0x05,0x00,0x85,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_lo_u32 v5, v255, s2
+// CHECK: [0x05,0x00,0x85,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, s101
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xca,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xcc,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, m0
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_lo_u32 v5, 0, 0
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, -1
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_lo_u32 v5, 0, v2
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_lo_u32 v5, 0, v255
+// CHECK: [0x05,0x00,0x85,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_u32 v5, 0, s2
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32 v255, 0, s2
+// CHECK: [0xff,0x00,0x86,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, -1, s2
+// CHECK: [0x05,0x00,0x86,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x86,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x86,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, v1, s2
+// CHECK: [0x05,0x00,0x86,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_hi_u32 v5, v255, s2
+// CHECK: [0x05,0x00,0x86,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, s101
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xca,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xcc,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, m0
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_u32 v5, 0, 0
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, -1
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_hi_u32 v5, 0, v2
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_hi_u32 v5, 0, v255
+// CHECK: [0x05,0x00,0x86,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mul_hi_i32 v5, 0, s2
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32 v255, 0, s2
+// CHECK: [0xff,0x00,0x87,0xd2,0x80,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, -1, s2
+// CHECK: [0x05,0x00,0x87,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x87,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x87,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, v1, s2
+// CHECK: [0x05,0x00,0x87,0xd2,0x01,0x05,0x00,0x00]
+
+v_mul_hi_i32 v5, v255, s2
+// CHECK: [0x05,0x00,0x87,0xd2,0xff,0x05,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, s101
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xca,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xcc,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xce,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xda,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xde,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, m0
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mul_hi_i32 v5, 0, 0
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0x00,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, -1
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0x82,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xee,0x01,0x00]
+
+v_mul_hi_i32 v5, 0, v2
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0x04,0x02,0x00]
+
+v_mul_hi_i32 v5, 0, v255
+// CHECK: [0x05,0x00,0x87,0xd2,0x80,0xfe,0x03,0x00]
+
+v_ldexp_f32 v5, v1, s2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f32 v255, v1, s2
+// CHECK: [0xff,0x00,0x88,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f32 v5, v255, s2
+// CHECK: [0x05,0x00,0x88,0xd2,0xff,0x05,0x00,0x00]
+
+v_ldexp_f32 v5, v1, s101
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xcb,0x00,0x00]
+
+v_ldexp_f32 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xcd,0x00,0x00]
+
+v_ldexp_f32 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xcf,0x00,0x00]
+
+v_ldexp_f32 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xd5,0x00,0x00]
+
+v_ldexp_f32 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xd7,0x00,0x00]
+
+v_ldexp_f32 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xd9,0x00,0x00]
+
+v_ldexp_f32 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xdb,0x00,0x00]
+
+v_ldexp_f32 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xdd,0x00,0x00]
+
+v_ldexp_f32 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xdf,0x00,0x00]
+
+v_ldexp_f32 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xf7,0x00,0x00]
+
+v_ldexp_f32 v5, v1, m0
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xf9,0x00,0x00]
+
+v_ldexp_f32 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfd,0x00,0x00]
+
+v_ldexp_f32 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xff,0x00,0x00]
+
+v_ldexp_f32 v5, v1, 0
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x01,0x01,0x00]
+
+v_ldexp_f32 v5, v1, -1
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x83,0x01,0x00]
+
+v_ldexp_f32 v5, v1, 0.5
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xe1,0x01,0x00]
+
+v_ldexp_f32 v5, v1, -4.0
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xef,0x01,0x00]
+
+v_ldexp_f32 v5, v1, scc
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xfb,0x01,0x00]
+
+v_ldexp_f32 v5, v1, v2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x02,0x00]
+
+v_ldexp_f32 v5, v1, v255
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0xff,0x03,0x00]
+
+v_ldexp_f32 v5, -v1, s2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x20]
+
+v_ldexp_f32 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x88,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f32 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x88,0xd2,0x01,0x05,0x00,0x00]
+
+v_ldexp_f32 v5, v1, s2 mul:2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x08]
+
+v_ldexp_f32 v5, v1, s2 mul:4
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x10]
+
+v_ldexp_f32 v5, v1, s2 div:2
+// CHECK: [0x05,0x00,0x88,0xd2,0x01,0x05,0x00,0x18]
+
+v_readlane_b32 s5, v1, s2
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 s101, v1, s2
+// CHECK: [0x65,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 flat_scratch_lo, v1, s2
+// CHECK: [0x66,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 flat_scratch_hi, v1, s2
+// CHECK: [0x67,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 tba_lo, v1, s2
+// CHECK: [0x6c,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 tba_hi, v1, s2
+// CHECK: [0x6d,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 tma_lo, v1, s2
+// CHECK: [0x6e,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 tma_hi, v1, s2
+// CHECK: [0x6f,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 ttmp11, v1, s2
+// CHECK: [0x7b,0x00,0x89,0xd2,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 s5, v255, s2
+// CHECK: [0x05,0x00,0x89,0xd2,0xff,0x05,0x00,0x00]
+
+v_readlane_b32 s5, v1, s101
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xcb,0x00,0x00]
+
+v_readlane_b32 s5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xcd,0x00,0x00]
+
+v_readlane_b32 s5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xcf,0x00,0x00]
+
+v_readlane_b32 s5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xd5,0x00,0x00]
+
+v_readlane_b32 s5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xd7,0x00,0x00]
+
+v_readlane_b32 s5, v1, tba_lo
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xd9,0x00,0x00]
+
+v_readlane_b32 s5, v1, tba_hi
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xdb,0x00,0x00]
+
+v_readlane_b32 s5, v1, tma_lo
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xdd,0x00,0x00]
+
+v_readlane_b32 s5, v1, tma_hi
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xdf,0x00,0x00]
+
+v_readlane_b32 s5, v1, ttmp11
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xf7,0x00,0x00]
+
+v_readlane_b32 s5, v1, m0
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0xf9,0x00,0x00]
+
+v_readlane_b32 s5, v1, 0
+// CHECK: [0x05,0x00,0x89,0xd2,0x01,0x01,0x01,0x00]
+
+v_writelane_b32 v5, s1, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x01,0x00,0x01,0x00]
+
+v_writelane_b32 v255, s1, 0
+// CHECK: [0xff,0x00,0x8a,0xd2,0x01,0x00,0x01,0x00]
+
+v_writelane_b32 v5, s101, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x65,0x00,0x01,0x00]
+
+v_writelane_b32 v5, flat_scratch_lo, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x66,0x00,0x01,0x00]
+
+v_writelane_b32 v5, flat_scratch_hi, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x67,0x00,0x01,0x00]
+
+v_writelane_b32 v5, vcc_lo, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6a,0x00,0x01,0x00]
+
+v_writelane_b32 v5, vcc_hi, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6b,0x00,0x01,0x00]
+
+v_writelane_b32 v5, tba_lo, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6c,0x00,0x01,0x00]
+
+v_writelane_b32 v5, tba_hi, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6d,0x00,0x01,0x00]
+
+v_writelane_b32 v5, tma_lo, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6e,0x00,0x01,0x00]
+
+v_writelane_b32 v5, tma_hi, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x6f,0x00,0x01,0x00]
+
+v_writelane_b32 v5, ttmp11, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7b,0x00,0x01,0x00]
+
+v_writelane_b32 v5, m0, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7c,0x00,0x01,0x00]
+
+v_writelane_b32 v5, exec_lo, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7e,0x00,0x01,0x00]
+
+v_writelane_b32 v5, exec_hi, 0
+// CHECK: [0x05,0x00,0x8a,0xd2,0x7f,0x00,0x01,0x00]
+
+v_bcnt_u32_b32 v5, 0, s2
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v255, 0, s2
+// CHECK: [0xff,0x00,0x8b,0xd2,0x80,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v5, -1, s2
+// CHECK: [0x05,0x00,0x8b,0xd2,0xc1,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x8b,0xd2,0xf0,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x8b,0xd2,0xf7,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v5, v1, s2
+// CHECK: [0x05,0x00,0x8b,0xd2,0x01,0x05,0x00,0x00]
+
+v_bcnt_u32_b32 v5, v255, s2
+// CHECK: [0x05,0x00,0x8b,0xd2,0xff,0x05,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, s101
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xca,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xcc,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xce,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xd4,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xd6,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xd8,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xda,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xdc,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xde,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xf6,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, m0
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xf8,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xfc,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xfe,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0, 0
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0x00,0x01,0x00]
+
+v_bcnt_u32_b32 v5, 0, -1
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0x82,0x01,0x00]
+
+v_bcnt_u32_b32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xe0,0x01,0x00]
+
+v_bcnt_u32_b32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xee,0x01,0x00]
+
+v_bcnt_u32_b32 v5, 0, v2
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0x04,0x02,0x00]
+
+v_bcnt_u32_b32 v5, 0, v255
+// CHECK: [0x05,0x00,0x8b,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, s2
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v255, 0, s2
+// CHECK: [0xff,0x00,0x8c,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, -1, s2
+// CHECK: [0x05,0x00,0x8c,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x8c,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x8c,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, v1, s2
+// CHECK: [0x05,0x00,0x8c,0xd2,0x01,0x05,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, v255, s2
+// CHECK: [0x05,0x00,0x8c,0xd2,0xff,0x05,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, s101
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xca,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xcc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xce,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xda,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xde,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, m0
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, 0
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0x00,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, -1
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0x82,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xee,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, v2
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0x04,0x02,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0, v255
+// CHECK: [0x05,0x00,0x8c,0xd2,0x80,0xfe,0x03,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, s2
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v255, 0, s2
+// CHECK: [0xff,0x00,0x8d,0xd2,0x80,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, -1, s2
+// CHECK: [0x05,0x00,0x8d,0xd2,0xc1,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x8d,0xd2,0xf0,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x8d,0xd2,0xf7,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, v1, s2
+// CHECK: [0x05,0x00,0x8d,0xd2,0x01,0x05,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, v255, s2
+// CHECK: [0x05,0x00,0x8d,0xd2,0xff,0x05,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, s101
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xca,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xcc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xce,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xd4,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xd6,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xd8,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xda,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xdc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xde,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xf6,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, m0
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xf8,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xfc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xfe,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, 0
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0x00,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, -1
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0x82,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xe0,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xee,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, v2
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0x04,0x02,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0, v255
+// CHECK: [0x05,0x00,0x8d,0xd2,0x80,0xfe,0x03,0x00]
+
+v_lshlrev_b64 v[5:6], 0, s[4:5]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0x08,0x00,0x00]
+
+v_lshlrev_b64 v[254:255], 0, s[4:5]
+// CHECK: [0xfe,0x00,0x8f,0xd2,0x80,0x08,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], -1, s[4:5]
+// CHECK: [0x05,0x00,0x8f,0xd2,0xc1,0x08,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0.5, s[4:5]
+// CHECK: [0x05,0x00,0x8f,0xd2,0xf0,0x08,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], -4.0, s[4:5]
+// CHECK: [0x05,0x00,0x8f,0xd2,0xf7,0x08,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], v1, s[4:5]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x01,0x09,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], v255, s[4:5]
+// CHECK: [0x05,0x00,0x8f,0xd2,0xff,0x09,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, s[6:7]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0x0c,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, s[100:101]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xc8,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, flat_scratch
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xcc,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, vcc
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, tba
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, tma
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, ttmp[10:11]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xf4,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, exec
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshlrev_b64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshlrev_b64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshlrev_b64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshlrev_b64 v[5:6], 0, v[2:3]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshlrev_b64 v[5:6], 0, v[254:255]
+// CHECK: [0x05,0x00,0x8f,0xd2,0x80,0xfc,0x03,0x00]
+
+v_lshrrev_b64 v[5:6], 0, s[4:5]
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0x08,0x00,0x00]
+
+v_lshrrev_b64 v[254:255], 0, s[4:5]
+// CHECK: [0xfe,0x00,0x90,0xd2,0x80,0x08,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], -1, s[4:5]
+// CHECK: [0x05,0x00,0x90,0xd2,0xc1,0x08,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0.5, s[4:5]
+// CHECK: [0x05,0x00,0x90,0xd2,0xf0,0x08,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], -4.0, s[4:5]
+// CHECK: [0x05,0x00,0x90,0xd2,0xf7,0x08,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], v1, s[4:5]
+// CHECK: [0x05,0x00,0x90,0xd2,0x01,0x09,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], v255, s[4:5]
+// CHECK: [0x05,0x00,0x90,0xd2,0xff,0x09,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, s[6:7]
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0x0c,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, s[100:101]
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xc8,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, flat_scratch
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xcc,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, vcc
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xd4,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, tba
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xd8,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, tma
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xdc,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, ttmp[10:11]
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xf4,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, exec
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xfc,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0x00,0x01,0x00]
+
+v_lshrrev_b64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0x82,0x01,0x00]
+
+v_lshrrev_b64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xe0,0x01,0x00]
+
+v_lshrrev_b64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xee,0x01,0x00]
+
+v_lshrrev_b64 v[5:6], 0, v[2:3]
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0x04,0x02,0x00]
+
+v_lshrrev_b64 v[5:6], 0, v[254:255]
+// CHECK: [0x05,0x00,0x90,0xd2,0x80,0xfc,0x03,0x00]
+
+v_ashrrev_i64 v[5:6], 0, s[4:5]
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0x08,0x00,0x00]
+
+v_ashrrev_i64 v[254:255], 0, s[4:5]
+// CHECK: [0xfe,0x00,0x91,0xd2,0x80,0x08,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], -1, s[4:5]
+// CHECK: [0x05,0x00,0x91,0xd2,0xc1,0x08,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0.5, s[4:5]
+// CHECK: [0x05,0x00,0x91,0xd2,0xf0,0x08,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], -4.0, s[4:5]
+// CHECK: [0x05,0x00,0x91,0xd2,0xf7,0x08,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], v1, s[4:5]
+// CHECK: [0x05,0x00,0x91,0xd2,0x01,0x09,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], v255, s[4:5]
+// CHECK: [0x05,0x00,0x91,0xd2,0xff,0x09,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, s[6:7]
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0x0c,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, s[100:101]
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xc8,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, flat_scratch
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xcc,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, vcc
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xd4,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, tba
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xd8,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, tma
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xdc,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, ttmp[10:11]
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xf4,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, exec
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xfc,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0x00,0x01,0x00]
+
+v_ashrrev_i64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0x82,0x01,0x00]
+
+v_ashrrev_i64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xe0,0x01,0x00]
+
+v_ashrrev_i64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xee,0x01,0x00]
+
+v_ashrrev_i64 v[5:6], 0, v[2:3]
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0x04,0x02,0x00]
+
+v_ashrrev_i64 v[5:6], 0, v[254:255]
+// CHECK: [0x05,0x00,0x91,0xd2,0x80,0xfc,0x03,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s2
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[254:255], 0, s2
+// CHECK: [0xfe,0x00,0x92,0xd2,0x80,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0.5, s2
+// CHECK: [0x05,0x00,0x92,0xd2,0xf0,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], s2
+// CHECK: [0x05,0x00,0x92,0xd2,0x01,0x05,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[254:255], s2
+// CHECK: [0x05,0x00,0x92,0xd2,0xfe,0x05,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s101
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xca,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xcc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xce,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, vcc_lo
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xd4,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, vcc_hi
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xd6,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tba_lo
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xd8,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tba_hi
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xda,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tma_lo
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xdc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, tma_hi
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xde,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, ttmp11
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xf6,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, m0
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xf8,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, exec_lo
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xfc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, exec_hi
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xfe,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, 0
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x00,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, -1
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x82,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, 0.5
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xe0,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, -4.0
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xee,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, scc
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xfa,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0, v2
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x02,0x00]
+
+v_trig_preop_f64 v[5:6], 0, v255
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0xfe,0x03,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s2 clamp
+// CHECK: [0x05,0x80,0x92,0xd2,0x80,0x04,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], 0, s2 mul:2
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x00,0x08]
+
+v_trig_preop_f64 v[5:6], 0, s2 mul:4
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x00,0x10]
+
+v_trig_preop_f64 v[5:6], 0, s2 div:2
+// CHECK: [0x05,0x00,0x92,0xd2,0x80,0x04,0x00,0x18]
+
+v_bfm_b32 v5, 0, s2
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0x04,0x00,0x00]
+
+v_bfm_b32 v255, 0, s2
+// CHECK: [0xff,0x00,0x93,0xd2,0x80,0x04,0x00,0x00]
+
+v_bfm_b32 v5, -1, s2
+// CHECK: [0x05,0x00,0x93,0xd2,0xc1,0x04,0x00,0x00]
+
+v_bfm_b32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x93,0xd2,0xf0,0x04,0x00,0x00]
+
+v_bfm_b32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x93,0xd2,0xf7,0x04,0x00,0x00]
+
+v_bfm_b32 v5, v1, s2
+// CHECK: [0x05,0x00,0x93,0xd2,0x01,0x05,0x00,0x00]
+
+v_bfm_b32 v5, v255, s2
+// CHECK: [0x05,0x00,0x93,0xd2,0xff,0x05,0x00,0x00]
+
+v_bfm_b32 v5, 0, s101
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xca,0x00,0x00]
+
+v_bfm_b32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xcc,0x00,0x00]
+
+v_bfm_b32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xce,0x00,0x00]
+
+v_bfm_b32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xd4,0x00,0x00]
+
+v_bfm_b32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xd6,0x00,0x00]
+
+v_bfm_b32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xd8,0x00,0x00]
+
+v_bfm_b32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xda,0x00,0x00]
+
+v_bfm_b32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xdc,0x00,0x00]
+
+v_bfm_b32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xde,0x00,0x00]
+
+v_bfm_b32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xf6,0x00,0x00]
+
+v_bfm_b32 v5, 0, m0
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xf8,0x00,0x00]
+
+v_bfm_b32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xfc,0x00,0x00]
+
+v_bfm_b32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xfe,0x00,0x00]
+
+v_bfm_b32 v5, 0, 0
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0x00,0x01,0x00]
+
+v_bfm_b32 v5, 0, -1
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0x82,0x01,0x00]
+
+v_bfm_b32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xe0,0x01,0x00]
+
+v_bfm_b32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xee,0x01,0x00]
+
+v_bfm_b32 v5, 0, v2
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0x04,0x02,0x00]
+
+v_bfm_b32 v5, 0, v255
+// CHECK: [0x05,0x00,0x93,0xd2,0x80,0xfe,0x03,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, s2
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v255, v1, s2
+// CHECK: [0xff,0x00,0x94,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v255, s2
+// CHECK: [0x05,0x00,0x94,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, s101
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xcb,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xcd,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, m0
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, scc
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, v2
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, v255
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pknorm_i16_f32 v5, -v1, s2
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pknorm_i16_f32 v5, v1, -s2
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x40]
+
+v_cvt_pknorm_i16_f32 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x94,0xd2,0x01,0x05,0x00,0x60]
+
+v_cvt_pknorm_i16_f32 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x94,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x94,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x94,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_i16_f32 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x94,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, s2
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v255, v1, s2
+// CHECK: [0xff,0x00,0x95,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v255, s2
+// CHECK: [0x05,0x00,0x95,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, s101
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xcb,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xcd,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, m0
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, scc
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, v2
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, v255
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pknorm_u16_f32 v5, -v1, s2
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pknorm_u16_f32 v5, v1, -s2
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x40]
+
+v_cvt_pknorm_u16_f32 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x95,0xd2,0x01,0x05,0x00,0x60]
+
+v_cvt_pknorm_u16_f32 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x95,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x95,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x95,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pknorm_u16_f32 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x95,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, s2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v255, v1, s2
+// CHECK: [0xff,0x00,0x96,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v255, s2
+// CHECK: [0x05,0x00,0x96,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, s101
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xcb,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, flat_scratch_lo
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xcd,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, flat_scratch_hi
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xcf,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, vcc_lo
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xd5,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, vcc_hi
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xd7,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, tba_lo
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xd9,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, tba_hi
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xdb,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, tma_lo
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xdd,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, tma_hi
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xdf,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, ttmp11
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xf7,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, m0
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xf9,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, exec_lo
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfd,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, exec_hi
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xff,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, scc
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xfb,0x01,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, v2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x02,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, v255
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0xff,0x03,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, -v1, s2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x20]
+
+v_cvt_pkrtz_f16_f32 v5, v1, -s2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x40]
+
+v_cvt_pkrtz_f16_f32 v5, -v1, -s2
+// CHECK: [0x05,0x00,0x96,0xd2,0x01,0x05,0x00,0x60]
+
+v_cvt_pkrtz_f16_f32 v5, |v1|, s2
+// CHECK: [0x05,0x01,0x96,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, |s2|
+// CHECK: [0x05,0x02,0x96,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, |v1|, |s2|
+// CHECK: [0x05,0x03,0x96,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pkrtz_f16_f32 v5, v1, s2 clamp
+// CHECK: [0x05,0x80,0x96,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, s2
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v255, 0, s2
+// CHECK: [0xff,0x00,0x97,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, -1, s2
+// CHECK: [0x05,0x00,0x97,0xd2,0xc1,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x97,0xd2,0xf0,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x97,0xd2,0xf7,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, v1, s2
+// CHECK: [0x05,0x00,0x97,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, v255, s2
+// CHECK: [0x05,0x00,0x97,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, s101
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xca,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xcc,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xce,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xd4,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xd6,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xd8,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xda,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xdc,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xde,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xf6,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, m0
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xf8,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xfc,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, 0
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0x00,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, -1
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0x82,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xe0,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xee,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, v2
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0x04,0x02,0x00]
+
+v_cvt_pk_u16_u32 v5, 0, v255
+// CHECK: [0x05,0x00,0x97,0xd2,0x80,0xfe,0x03,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, s2
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v255, 0, s2
+// CHECK: [0xff,0x00,0x98,0xd2,0x80,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, -1, s2
+// CHECK: [0x05,0x00,0x98,0xd2,0xc1,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0.5, s2
+// CHECK: [0x05,0x00,0x98,0xd2,0xf0,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, -4.0, s2
+// CHECK: [0x05,0x00,0x98,0xd2,0xf7,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, v1, s2
+// CHECK: [0x05,0x00,0x98,0xd2,0x01,0x05,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, v255, s2
+// CHECK: [0x05,0x00,0x98,0xd2,0xff,0x05,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, s101
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xca,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, flat_scratch_lo
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xcc,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, flat_scratch_hi
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xce,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, vcc_lo
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xd4,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, vcc_hi
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xd6,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, tba_lo
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xd8,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, tba_hi
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xda,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, tma_lo
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xdc,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, tma_hi
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xde,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, ttmp11
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xf6,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, m0
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xf8,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, exec_lo
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xfc,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, exec_hi
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xfe,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, 0
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0x00,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, -1
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0x82,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, 0.5
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xe0,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, -4.0
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xee,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, v2
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0x04,0x02,0x00]
+
+v_cvt_pk_i16_i32 v5, 0, v255
+// CHECK: [0x05,0x00,0x98,0xd2,0x80,0xfe,0x03,0x00]
+
+v_cmp_class_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x20,0x7c]
+
+v_cmp_class_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x20,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_class_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x20,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_class_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x20,0x7c]
+
+v_cmp_class_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x20,0x7c]
+
+v_cmp_class_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x21,0x7c]
+
+v_cmpx_class_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x22,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x22,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_class_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x22,0x7c]
+
+v_cmpx_class_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x23,0x7c]
+
+v_cmp_class_f64 vcc, s[2:3], v2
+// CHECK: [0x02,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, s[4:5], v2
+// CHECK: [0x04,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, s[100:101], v2
+// CHECK: [0x64,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, flat_scratch, v2
+// CHECK: [0x66,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, vcc, v2
+// CHECK: [0x6a,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, tba, v2
+// CHECK: [0x6c,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, tma, v2
+// CHECK: [0x6e,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, ttmp[10:11], v2
+// CHECK: [0x7a,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, exec, v2
+// CHECK: [0x7e,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, 0, v2
+// CHECK: [0x80,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x24,0x7c]
+
+v_cmp_class_f64 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x24,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_class_f64 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x24,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_class_f64 vcc, v[1:2], v2
+// CHECK: [0x01,0x05,0x24,0x7c]
+
+v_cmp_class_f64 vcc, v[254:255], v2
+// CHECK: [0xfe,0x05,0x24,0x7c]
+
+v_cmp_class_f64 vcc, s[2:3], v255
+// CHECK: [0x02,0xfe,0x25,0x7c]
+
+v_cmpx_class_f64 vcc, s[2:3], v2
+// CHECK: [0x02,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, s[4:5], v2
+// CHECK: [0x04,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, s[100:101], v2
+// CHECK: [0x64,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, flat_scratch, v2
+// CHECK: [0x66,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, vcc, v2
+// CHECK: [0x6a,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, tba, v2
+// CHECK: [0x6c,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, tma, v2
+// CHECK: [0x6e,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, ttmp[10:11], v2
+// CHECK: [0x7a,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, exec, v2
+// CHECK: [0x7e,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, 0, v2
+// CHECK: [0x80,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x26,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f64 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x26,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_class_f64 vcc, v[1:2], v2
+// CHECK: [0x01,0x05,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, v[254:255], v2
+// CHECK: [0xfe,0x05,0x26,0x7c]
+
+v_cmpx_class_f64 vcc, s[2:3], v255
+// CHECK: [0x02,0xfe,0x27,0x7c]
+
+v_cmp_class_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x28,0x7c]
+
+v_cmp_class_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x28,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_class_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x28,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_class_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x28,0x7c]
+
+v_cmp_class_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x28,0x7c]
+
+v_cmp_class_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x29,0x7c]
+
+v_cmpx_class_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x2a,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_class_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x2a,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_class_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x2a,0x7c]
+
+v_cmpx_class_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x2b,0x7c]
+
+v_cmp_f_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x40,0x7c]
+
+v_cmp_f_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x40,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_f_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x40,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_f_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x40,0x7c]
+
+v_cmp_f_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x40,0x7c]
+
+v_cmp_f_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x41,0x7c]
+
+v_cmp_f_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x20,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_f_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x20,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x42,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_lt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x42,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_lt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x42,0x7c]
+
+v_cmp_lt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x43,0x7c]
+
+v_cmp_lt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x21,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x21,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x21,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x21,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_lt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x21,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x44,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_eq_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x44,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_eq_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x44,0x7c]
+
+v_cmp_eq_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x45,0x7c]
+
+v_cmp_eq_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x22,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_eq_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x22,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x46,0x7c]
+
+v_cmp_le_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x46,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_le_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x46,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_le_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x46,0x7c]
+
+v_cmp_le_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x46,0x7c]
+
+v_cmp_le_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x47,0x7c]
+
+v_cmp_le_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x23,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x23,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x23,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x23,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_le_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x23,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x48,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_gt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x48,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_gt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x48,0x7c]
+
+v_cmp_gt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x49,0x7c]
+
+v_cmp_gt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x24,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_gt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x24,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x4a,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_lg_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x4a,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_lg_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x4a,0x7c]
+
+v_cmp_lg_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x4b,0x7c]
+
+v_cmp_lg_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x25,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x25,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x25,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lg_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x25,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_lg_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x25,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x4c,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_ge_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x4c,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_ge_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x4c,0x7c]
+
+v_cmp_ge_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x4d,0x7c]
+
+v_cmp_ge_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x26,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_ge_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x26,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x4e,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_o_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x4e,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_o_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x4e,0x7c]
+
+v_cmp_o_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x4f,0x7c]
+
+v_cmp_o_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x27,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x27,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x27,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_o_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x27,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_o_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x27,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x50,0x7c]
+
+v_cmp_u_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x50,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_u_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x50,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_u_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x50,0x7c]
+
+v_cmp_u_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x50,0x7c]
+
+v_cmp_u_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x51,0x7c]
+
+v_cmp_u_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_u_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x28,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_u_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x28,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x52,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_nge_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x52,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_nge_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x52,0x7c]
+
+v_cmp_nge_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x53,0x7c]
+
+v_cmp_nge_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x29,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x29,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x29,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nge_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x29,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nge_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x29,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x54,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_nlg_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x54,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_nlg_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x54,0x7c]
+
+v_cmp_nlg_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x55,0x7c]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nlg_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x2a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x56,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_ngt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x56,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_ngt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x56,0x7c]
+
+v_cmp_ngt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x57,0x7c]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2b,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2b,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2b,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_ngt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x2b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x58,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_nle_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x58,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_nle_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x58,0x7c]
+
+v_cmp_nle_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x59,0x7c]
+
+v_cmp_nle_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nle_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nle_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x2c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x5a,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_neq_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x5a,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_neq_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x5a,0x7c]
+
+v_cmp_neq_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x5b,0x7c]
+
+v_cmp_neq_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2d,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2d,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_neq_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2d,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_neq_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x2d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x5c,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_nlt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x5c,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_nlt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x5c,0x7c]
+
+v_cmp_nlt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x5d,0x7c]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nlt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x2e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x5e,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmp_tru_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x5e,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmp_tru_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x5e,0x7c]
+
+v_cmp_tru_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x5f,0x7c]
+
+v_cmp_tru_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x2f,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x2f,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_tru_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x2f,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_tru_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x2f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x60,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_f_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x60,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_f_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x60,0x7c]
+
+v_cmpx_f_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x61,0x7c]
+
+v_cmpx_f_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x30,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_f_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x30,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x62,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x62,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_lt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x62,0x7c]
+
+v_cmpx_lt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x63,0x7c]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x31,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x31,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x31,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x31,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_lt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x31,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x64,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x64,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_eq_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x64,0x7c]
+
+v_cmpx_eq_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x65,0x7c]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x32,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_eq_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x32,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x66,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x66,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_le_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x66,0x7c]
+
+v_cmpx_le_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x67,0x7c]
+
+v_cmpx_le_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x33,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x33,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x33,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x33,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_le_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x33,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x68,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x68,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_gt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x68,0x7c]
+
+v_cmpx_gt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x69,0x7c]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x34,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_gt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x34,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x6a,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lg_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x6a,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_lg_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x6a,0x7c]
+
+v_cmpx_lg_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x6b,0x7c]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x35,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x35,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x35,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x35,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_lg_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x35,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x6c,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x6c,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_ge_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x6c,0x7c]
+
+v_cmpx_ge_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x6d,0x7c]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x36,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_ge_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x36,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x6e,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_o_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x6e,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_o_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x6e,0x7c]
+
+v_cmpx_o_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x6f,0x7c]
+
+v_cmpx_o_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x37,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x37,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x37,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_o_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x37,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_o_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x37,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x70,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_u_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x70,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_u_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x70,0x7c]
+
+v_cmpx_u_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x71,0x7c]
+
+v_cmpx_u_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_u_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x38,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_u_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x38,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x72,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nge_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x72,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_nge_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x72,0x7c]
+
+v_cmpx_nge_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x73,0x7c]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x39,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x39,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x39,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x39,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nge_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x39,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x74,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x74,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_nlg_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x74,0x7c]
+
+v_cmpx_nlg_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x75,0x7c]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nlg_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x3a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x76,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x76,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_ngt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x76,0x7c]
+
+v_cmpx_ngt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x77,0x7c]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3b,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3b,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3b,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_ngt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x3b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x78,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nle_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x78,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_nle_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x78,0x7c]
+
+v_cmpx_nle_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x79,0x7c]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nle_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x3c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x7a,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_neq_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x7a,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_neq_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x7a,0x7c]
+
+v_cmpx_neq_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x7b,0x7c]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3d,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3d,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3d,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_neq_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x3d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x7c,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x7c,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_nlt_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x7c,0x7c]
+
+v_cmpx_nlt_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x7d,0x7c]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nlt_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x3e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x7e,0x7c,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_tru_f16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x7e,0x7c,0x56,0x34,0x00,0x00]
+
+v_cmpx_tru_f16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x7e,0x7c]
+
+v_cmpx_tru_f16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x7f,0x7c]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x3f,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x3f,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x3f,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_tru_f16_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x3f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x80,0x7c]
+
+v_cmp_f_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x80,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x80,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x80,0x7c]
+
+v_cmp_f_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x80,0x7c]
+
+v_cmp_f_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x81,0x7c]
+
+v_cmp_f_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x40,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x40,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x40,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x40,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_f_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x40,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x82,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x82,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x82,0x7c]
+
+v_cmp_lt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x83,0x7c]
+
+v_cmp_lt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x41,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x41,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x41,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x41,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_lt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x41,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x84,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x84,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x84,0x7c]
+
+v_cmp_eq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x85,0x7c]
+
+v_cmp_eq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x42,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x42,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x42,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x42,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_eq_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x42,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x86,0x7c]
+
+v_cmp_le_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x86,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x86,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x86,0x7c]
+
+v_cmp_le_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x86,0x7c]
+
+v_cmp_le_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x87,0x7c]
+
+v_cmp_le_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x43,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x43,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x43,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x43,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_le_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x43,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x88,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x88,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x88,0x7c]
+
+v_cmp_gt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x89,0x7c]
+
+v_cmp_gt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x44,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x44,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x44,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x44,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_gt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x44,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8a,0x7c]
+
+v_cmp_lg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8b,0x7c]
+
+v_cmp_lg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x45,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x45,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x45,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x45,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_lg_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x45,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8c,0x7c]
+
+v_cmp_ge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8d,0x7c]
+
+v_cmp_ge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x46,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x46,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x46,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x46,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_ge_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x46,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_o_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_o_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8e,0x7c]
+
+v_cmp_o_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8f,0x7c]
+
+v_cmp_o_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x47,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x47,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x47,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_o_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x47,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_o_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x47,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x90,0x7c]
+
+v_cmp_u_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x90,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_u_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x90,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_u_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x90,0x7c]
+
+v_cmp_u_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x90,0x7c]
+
+v_cmp_u_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x91,0x7c]
+
+v_cmp_u_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x48,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x48,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x48,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_u_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x48,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_u_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x48,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x92,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x92,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x92,0x7c]
+
+v_cmp_nge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x93,0x7c]
+
+v_cmp_nge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x49,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x49,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x49,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x49,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nge_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x49,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x94,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x94,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x94,0x7c]
+
+v_cmp_nlg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x95,0x7c]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x4a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x4a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x4a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nlg_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x4a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x96,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ngt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x96,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ngt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x96,0x7c]
+
+v_cmp_ngt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x97,0x7c]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x4b,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x4b,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x4b,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_ngt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x4b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x98,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nle_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x98,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nle_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x98,0x7c]
+
+v_cmp_nle_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x99,0x7c]
+
+v_cmp_nle_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x4c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x4c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nle_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x4c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nle_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x4c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9a,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_neq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9a,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_neq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9a,0x7c]
+
+v_cmp_neq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9b,0x7c]
+
+v_cmp_neq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x4d,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x4d,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_neq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x4d,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_neq_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x4d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9c,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9c,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9c,0x7c]
+
+v_cmp_nlt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9d,0x7c]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x4e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x4e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x4e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_nlt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x4e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9e,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_tru_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9e,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_tru_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9e,0x7c]
+
+v_cmp_tru_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9f,0x7c]
+
+v_cmp_tru_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x4f,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x4f,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_tru_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x4f,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmp_tru_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x4f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa0,0x7c]
+
+v_cmpx_f_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa1,0x7c]
+
+v_cmpx_f_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x50,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x50,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x50,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x50,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_f_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x50,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa2,0x7c]
+
+v_cmpx_lt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa3,0x7c]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x51,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x51,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x51,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x51,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_lt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x51,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa4,0x7c]
+
+v_cmpx_eq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa5,0x7c]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x52,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x52,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x52,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x52,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_eq_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x52,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa6,0x7c]
+
+v_cmpx_le_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa7,0x7c]
+
+v_cmpx_le_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x53,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x53,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x53,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x53,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_le_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x53,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa8,0x7c]
+
+v_cmpx_gt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa9,0x7c]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x54,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x54,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x54,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x54,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_gt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x54,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xaa,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xaa,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xaa,0x7c]
+
+v_cmpx_lg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xab,0x7c]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x55,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x55,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x55,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x55,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_lg_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x55,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xac,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xac,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xac,0x7c]
+
+v_cmpx_ge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xad,0x7c]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x56,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x56,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x56,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x56,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_ge_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x56,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xae,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xae,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_o_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xae,0x7c]
+
+v_cmpx_o_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xaf,0x7c]
+
+v_cmpx_o_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x57,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x57,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x57,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_o_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x57,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_o_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x57,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_u_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb0,0x7c]
+
+v_cmpx_u_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb1,0x7c]
+
+v_cmpx_u_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x58,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x58,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x58,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_u_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x58,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_u_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x58,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nge_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb2,0x7c]
+
+v_cmpx_nge_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb3,0x7c]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x59,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x59,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x59,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x59,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nge_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x59,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlg_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb4,0x7c]
+
+v_cmpx_nlg_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb5,0x7c]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x5a,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x5a,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x5a,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nlg_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x5a,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ngt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb6,0x7c]
+
+v_cmpx_ngt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb7,0x7c]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x5b,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x5b,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x5b,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_ngt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x5b,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nle_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb8,0x7c]
+
+v_cmpx_nle_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb9,0x7c]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x5c,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x5c,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x5c,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nle_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x5c,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xba,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xba,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_neq_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xba,0x7c]
+
+v_cmpx_neq_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbb,0x7c]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x5d,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x5d,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x5d,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_neq_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x5d,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xbc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xbc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlt_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xbc,0x7c]
+
+v_cmpx_nlt_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbd,0x7c]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x5e,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x5e,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x5e,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_nlt_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x5e,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xbe,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_tru_f32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xbe,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_tru_f32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xbe,0x7c]
+
+v_cmpx_tru_f32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbf,0x7c]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0x5f,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0x5f,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, scc
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xfa,0x01,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, -s2
+// CHECK: [0x0a,0x00,0x5f,0xd0,0x80,0x04,0x00,0x40]
+
+v_cmpx_tru_f32_e64 s[10:11], 0, s2 clamp
+// CHECK: [0x0a,0x80,0x5f,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc0,0x7c]
+
+v_cmp_f_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc1,0x7c]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_f_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_f_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x60,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_f_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x60,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc2,0x7c]
+
+v_cmp_lt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc3,0x7c]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_lt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_lt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x61,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_lt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x61,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc4,0x7c]
+
+v_cmp_eq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc5,0x7c]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_eq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_eq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x62,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_eq_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x62,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc6,0x7c]
+
+v_cmp_le_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc7,0x7c]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_le_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_le_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x63,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_le_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x63,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc8,0x7c]
+
+v_cmp_gt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc9,0x7c]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_gt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_gt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x64,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_gt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x64,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_lg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_lg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xca,0x7c]
+
+v_cmp_lg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcb,0x7c]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_lg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_lg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x65,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_lg_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x65,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xcc,0x7c]
+
+v_cmp_ge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcd,0x7c]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_ge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x66,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_ge_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x66,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xce,0x7c]
+
+v_cmp_o_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_o_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_o_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xce,0x7c]
+
+v_cmp_o_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xce,0x7c]
+
+v_cmp_o_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcf,0x7c]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_o_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_o_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x67,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_o_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x67,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_u_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_u_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd0,0x7c]
+
+v_cmp_u_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd1,0x7c]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_u_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_u_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x68,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_u_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x68,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd2,0x7c]
+
+v_cmp_nge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd3,0x7c]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x69,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nge_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x69,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd4,0x7c]
+
+v_cmp_nlg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd5,0x7c]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nlg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nlg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6a,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nlg_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x6a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_ngt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_ngt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd6,0x7c]
+
+v_cmp_ngt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd7,0x7c]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ngt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_ngt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6b,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_ngt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x6b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nle_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nle_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd8,0x7c]
+
+v_cmp_nle_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd9,0x7c]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nle_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nle_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6c,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nle_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x6c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xda,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_neq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xda,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_neq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xda,0x7c]
+
+v_cmp_neq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdb,0x7c]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_neq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_neq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6d,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_neq_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x6d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xdc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_nlt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xdc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_nlt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xdc,0x7c]
+
+v_cmp_nlt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdd,0x7c]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_nlt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_nlt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6e,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_nlt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x6e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xde,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmp_tru_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xde,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmp_tru_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xde,0x7c]
+
+v_cmp_tru_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdf,0x7c]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_tru_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmp_tru_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x6f,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmp_tru_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x6f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe0,0x7c]
+
+v_cmpx_f_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe1,0x7c]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_f_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_f_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x70,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_f_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x70,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe2,0x7c]
+
+v_cmpx_lt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe3,0x7c]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_lt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x71,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_lt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x71,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe4,0x7c]
+
+v_cmpx_eq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe5,0x7c]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_eq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_eq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x72,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_eq_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x72,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe6,0x7c]
+
+v_cmpx_le_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe7,0x7c]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_le_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_le_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x73,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_le_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x73,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe8,0x7c]
+
+v_cmpx_gt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe9,0x7c]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_gt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_gt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x74,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_gt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x74,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xea,0x7c]
+
+v_cmpx_lg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xeb,0x7c]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_lg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x75,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_lg_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x75,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xec,0x7c]
+
+v_cmpx_ge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xed,0x7c]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_ge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x76,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_ge_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x76,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_o_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xee,0x7c]
+
+v_cmpx_o_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xef,0x7c]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_o_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_o_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x77,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_o_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x77,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf0,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf0,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_u_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf0,0x7c]
+
+v_cmpx_u_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf1,0x7c]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_u_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_u_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x78,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_u_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x78,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf2,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf2,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nge_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf2,0x7c]
+
+v_cmpx_nge_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf3,0x7c]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nge_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nge_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x79,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nge_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x79,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf4,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf4,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlg_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf4,0x7c]
+
+v_cmpx_nlg_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf5,0x7c]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nlg_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nlg_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7a,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nlg_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x7a,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf6,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf6,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ngt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf6,0x7c]
+
+v_cmpx_ngt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf7,0x7c]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ngt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_ngt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7b,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_ngt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x7b,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf8,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf8,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nle_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf8,0x7c]
+
+v_cmpx_nle_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf9,0x7c]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nle_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nle_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7c,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nle_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x7c,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfa,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfa,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_neq_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfa,0x7c]
+
+v_cmpx_neq_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xfb,0x7c]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_neq_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_neq_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7d,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_neq_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x7d,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfc,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfc,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_nlt_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfc,0x7c]
+
+v_cmpx_nlt_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xfd,0x7c]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_nlt_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_nlt_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7e,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_nlt_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x7e,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfe,0x7c,0x56,0x34,0x12,0xaf]
+
+v_cmpx_tru_f64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfe,0x7c,0x73,0x72,0x71,0x3f]
+
+v_cmpx_tru_f64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfe,0x7c]
+
+v_cmpx_tru_f64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xff,0x7c]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_tru_f64_e64 s[10:11], -s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x08,0x00,0x20]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x08,0x00,0x40]
+
+v_cmpx_tru_f64_e64 s[10:11], -s[4:5], -s[4:5]
+// CHECK: [0x0a,0x00,0x7f,0xd0,0x04,0x08,0x00,0x60]
+
+v_cmpx_tru_f64_e64 s[10:11], s[4:5], s[4:5] clamp
+// CHECK: [0x0a,0x80,0x7f,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x40,0x7d]
+
+v_cmp_f_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x40,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_f_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x40,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_f_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x40,0x7d]
+
+v_cmp_f_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x40,0x7d]
+
+v_cmp_f_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x41,0x7d]
+
+v_cmp_f_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa0,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x42,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_lt_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x42,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_lt_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x42,0x7d]
+
+v_cmp_lt_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x43,0x7d]
+
+v_cmp_lt_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa1,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x44,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_eq_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x44,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_eq_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x44,0x7d]
+
+v_cmp_eq_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x45,0x7d]
+
+v_cmp_eq_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa2,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x46,0x7d]
+
+v_cmp_le_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x46,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_le_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x46,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_le_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x46,0x7d]
+
+v_cmp_le_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x46,0x7d]
+
+v_cmp_le_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x47,0x7d]
+
+v_cmp_le_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa3,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x48,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_gt_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x48,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_gt_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x48,0x7d]
+
+v_cmp_gt_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x49,0x7d]
+
+v_cmp_gt_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa4,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ne_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x4a,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_ne_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x4a,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_ne_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x4a,0x7d]
+
+v_cmp_ne_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x4b,0x7d]
+
+v_cmp_ne_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ne_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa5,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x4c,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_ge_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x4c,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_ge_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x4c,0x7d]
+
+v_cmp_ge_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x4d,0x7d]
+
+v_cmp_ge_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa6,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_t_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x4e,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_t_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x4e,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_t_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x4e,0x7d]
+
+v_cmp_t_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x4f,0x7d]
+
+v_cmp_t_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_t_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa7,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x50,0x7d]
+
+v_cmp_f_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x50,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_f_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x50,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_f_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x50,0x7d]
+
+v_cmp_f_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x50,0x7d]
+
+v_cmp_f_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x51,0x7d]
+
+v_cmp_f_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa8,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x52,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_lt_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x52,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_lt_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x52,0x7d]
+
+v_cmp_lt_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x53,0x7d]
+
+v_cmp_lt_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xa9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xa9,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x54,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_eq_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x54,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_eq_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x54,0x7d]
+
+v_cmp_eq_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x55,0x7d]
+
+v_cmp_eq_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xaa,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xaa,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x56,0x7d]
+
+v_cmp_le_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x56,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_le_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x56,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_le_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x56,0x7d]
+
+v_cmp_le_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x56,0x7d]
+
+v_cmp_le_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x57,0x7d]
+
+v_cmp_le_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xab,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xab,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xab,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xab,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xab,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xab,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xab,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x58,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_gt_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x58,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_gt_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x58,0x7d]
+
+v_cmp_gt_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x59,0x7d]
+
+v_cmp_gt_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xac,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xac,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xac,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ne_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x5a,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_ne_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x5a,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_ne_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x5a,0x7d]
+
+v_cmp_ne_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x5b,0x7d]
+
+v_cmp_ne_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xad,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xad,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xad,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xad,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xad,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xad,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ne_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xad,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x5c,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_ge_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x5c,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_ge_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x5c,0x7d]
+
+v_cmp_ge_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x5d,0x7d]
+
+v_cmp_ge_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xae,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xae,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xae,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_t_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x5e,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmp_t_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x5e,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmp_t_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x5e,0x7d]
+
+v_cmp_t_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x5f,0x7d]
+
+v_cmp_t_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xaf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_t_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xaf,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x60,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_f_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x60,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_f_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x60,0x7d]
+
+v_cmpx_f_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x61,0x7d]
+
+v_cmpx_f_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb0,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x62,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x62,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_lt_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x62,0x7d]
+
+v_cmpx_lt_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x63,0x7d]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb1,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x64,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x64,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_eq_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x64,0x7d]
+
+v_cmpx_eq_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x65,0x7d]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb2,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x66,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x66,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_le_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x66,0x7d]
+
+v_cmpx_le_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x67,0x7d]
+
+v_cmpx_le_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb3,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x68,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x68,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_gt_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x68,0x7d]
+
+v_cmpx_gt_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x69,0x7d]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb4,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ne_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x6a,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x6a,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_ne_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x6a,0x7d]
+
+v_cmpx_ne_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x6b,0x7d]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ne_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb5,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x6c,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x6c,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_ge_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x6c,0x7d]
+
+v_cmpx_ge_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x6d,0x7d]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb6,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_t_i16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x6e,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_t_i16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x6e,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_t_i16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x6e,0x7d]
+
+v_cmpx_t_i16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x6f,0x7d]
+
+v_cmpx_t_i16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_t_i16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb7,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x70,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_f_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x70,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_f_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x70,0x7d]
+
+v_cmpx_f_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x71,0x7d]
+
+v_cmpx_f_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb8,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x72,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x72,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_lt_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x72,0x7d]
+
+v_cmpx_lt_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x73,0x7d]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xb9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xb9,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x74,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x74,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_eq_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x74,0x7d]
+
+v_cmpx_eq_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x75,0x7d]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xba,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xba,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xba,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x76,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x76,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_le_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x76,0x7d]
+
+v_cmpx_le_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x77,0x7d]
+
+v_cmpx_le_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbb,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x78,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x78,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_gt_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x78,0x7d]
+
+v_cmpx_gt_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x79,0x7d]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbc,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ne_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x7a,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x7a,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_ne_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x7a,0x7d]
+
+v_cmpx_ne_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x7b,0x7d]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ne_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbd,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x7c,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x7c,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_ge_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x7c,0x7d]
+
+v_cmpx_ge_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x7d,0x7d]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbe,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbe,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_t_u16 vcc, s1, v2
+// CHECK: [0x01,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, s101, v2
+// CHECK: [0x65,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, 0, v2
+// CHECK: [0x80,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, 0xfe0b, v2
+// CHECK: [0xff,0x04,0x7e,0x7d,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_t_u16 vcc, 0x3456, v2
+// CHECK: [0xff,0x04,0x7e,0x7d,0x56,0x34,0x00,0x00]
+
+v_cmpx_t_u16 vcc, v1, v2
+// CHECK: [0x01,0x05,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, v255, v2
+// CHECK: [0xff,0x05,0x7e,0x7d]
+
+v_cmpx_t_u16 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x7f,0x7d]
+
+v_cmpx_t_u16_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xbf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_t_u16_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xbf,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x80,0x7d]
+
+v_cmp_f_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x80,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x80,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x80,0x7d]
+
+v_cmp_f_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x80,0x7d]
+
+v_cmp_f_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x81,0x7d]
+
+v_cmp_f_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc0,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x82,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x82,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x82,0x7d]
+
+v_cmp_lt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x83,0x7d]
+
+v_cmp_lt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc1,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x84,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x84,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x84,0x7d]
+
+v_cmp_eq_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x85,0x7d]
+
+v_cmp_eq_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc2,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x86,0x7d]
+
+v_cmp_le_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x86,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x86,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x86,0x7d]
+
+v_cmp_le_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x86,0x7d]
+
+v_cmp_le_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x87,0x7d]
+
+v_cmp_le_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc3,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x88,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x88,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x88,0x7d]
+
+v_cmp_gt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x89,0x7d]
+
+v_cmp_gt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc4,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ne_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8a,0x7d]
+
+v_cmp_ne_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8b,0x7d]
+
+v_cmp_ne_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ne_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc5,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8c,0x7d]
+
+v_cmp_ge_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8d,0x7d]
+
+v_cmp_ge_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc6,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_t_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x8e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x8e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x8e,0x7d]
+
+v_cmp_t_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x8f,0x7d]
+
+v_cmp_t_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_t_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc7,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x90,0x7d]
+
+v_cmp_f_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x90,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x90,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x90,0x7d]
+
+v_cmp_f_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x90,0x7d]
+
+v_cmp_f_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x91,0x7d]
+
+v_cmp_f_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_f_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc8,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_lt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x92,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x92,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x92,0x7d]
+
+v_cmp_lt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x93,0x7d]
+
+v_cmp_lt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xc9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_lt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xc9,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_eq_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x94,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x94,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x94,0x7d]
+
+v_cmp_eq_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x95,0x7d]
+
+v_cmp_eq_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xca,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xca,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xca,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xca,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xca,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xca,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_eq_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xca,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_le_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x96,0x7d]
+
+v_cmp_le_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x96,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x96,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x96,0x7d]
+
+v_cmp_le_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x96,0x7d]
+
+v_cmp_le_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x97,0x7d]
+
+v_cmp_le_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xcb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_le_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xcb,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_gt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x98,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x98,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x98,0x7d]
+
+v_cmp_gt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x99,0x7d]
+
+v_cmp_gt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xcc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_gt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xcc,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ne_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9a,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9a,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9a,0x7d]
+
+v_cmp_ne_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9b,0x7d]
+
+v_cmp_ne_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xcd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ne_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xcd,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_ge_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9c,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9c,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9c,0x7d]
+
+v_cmp_ge_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9d,0x7d]
+
+v_cmp_ge_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xce,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xce,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xce,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xce,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xce,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xce,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_ge_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xce,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_t_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0x9e,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0x9e,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0x9e,0x7d]
+
+v_cmp_t_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0x9f,0x7d]
+
+v_cmp_t_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xcf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmp_t_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xcf,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa0,0x7d]
+
+v_cmpx_f_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa1,0x7d]
+
+v_cmpx_f_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd0,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd0,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa2,0x7d]
+
+v_cmpx_lt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa3,0x7d]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd1,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd1,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa4,0x7d]
+
+v_cmpx_eq_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa5,0x7d]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd2,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd2,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa6,0x7d]
+
+v_cmpx_le_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa7,0x7d]
+
+v_cmpx_le_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd3,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd3,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xa8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xa8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xa8,0x7d]
+
+v_cmpx_gt_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xa9,0x7d]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd4,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd4,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ne_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xaa,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xaa,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xaa,0x7d]
+
+v_cmpx_ne_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xab,0x7d]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd5,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ne_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd5,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xac,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xac,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xac,0x7d]
+
+v_cmpx_ge_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xad,0x7d]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd6,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd6,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_t_i32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xae,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_i32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xae,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_i32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xae,0x7d]
+
+v_cmpx_t_i32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xaf,0x7d]
+
+v_cmpx_t_i32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd7,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_t_i32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd7,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_f_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb0,0x7d]
+
+v_cmpx_f_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb1,0x7d]
+
+v_cmpx_f_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd8,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_f_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd8,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_lt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb2,0x7d]
+
+v_cmpx_lt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb3,0x7d]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xd9,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_lt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xd9,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_eq_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb4,0x7d]
+
+v_cmpx_eq_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb5,0x7d]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xda,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xda,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xda,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xda,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xda,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xda,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_eq_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xda,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_le_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb6,0x7d]
+
+v_cmpx_le_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb7,0x7d]
+
+v_cmpx_le_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xdb,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_le_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xdb,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_gt_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xb8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xb8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xb8,0x7d]
+
+v_cmpx_gt_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xb9,0x7d]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xdc,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_gt_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xdc,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ne_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xba,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xba,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xba,0x7d]
+
+v_cmpx_ne_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbb,0x7d]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xdd,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ne_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xdd,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_ge_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xbc,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xbc,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xbc,0x7d]
+
+v_cmpx_ge_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbd,0x7d]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xde,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xde,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xde,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xde,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xde,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xde,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_ge_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xde,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmpx_t_u32 vcc, s1, v2
+// CHECK: [0x01,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, s101, v2
+// CHECK: [0x65,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, flat_scratch_lo, v2
+// CHECK: [0x66,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, flat_scratch_hi, v2
+// CHECK: [0x67,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, vcc_lo, v2
+// CHECK: [0x6a,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, vcc_hi, v2
+// CHECK: [0x6b,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, tba_lo, v2
+// CHECK: [0x6c,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, tba_hi, v2
+// CHECK: [0x6d,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, tma_lo, v2
+// CHECK: [0x6e,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, tma_hi, v2
+// CHECK: [0x6f,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, ttmp11, v2
+// CHECK: [0x7b,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, m0, v2
+// CHECK: [0x7c,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, exec_lo, v2
+// CHECK: [0x7e,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, exec_hi, v2
+// CHECK: [0x7f,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, 0, v2
+// CHECK: [0x80,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, -1, v2
+// CHECK: [0xc1,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, 0.5, v2
+// CHECK: [0xf0,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, -4.0, v2
+// CHECK: [0xf7,0x04,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, 0xaf123456, v2
+// CHECK: [0xff,0x04,0xbe,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_u32 vcc, 0x3f717273, v2
+// CHECK: [0xff,0x04,0xbe,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_u32 vcc, v1, v2
+// CHECK: [0x01,0x05,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, v255, v2
+// CHECK: [0xff,0x05,0xbe,0x7d]
+
+v_cmpx_t_u32 vcc, s1, v255
+// CHECK: [0x01,0xfe,0xbf,0x7d]
+
+v_cmpx_t_u32_e64 s[10:11], 0, s2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[12:13], 0, s2
+// CHECK: [0x0c,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[100:101], 0, s2
+// CHECK: [0x64,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 flat_scratch, 0, s2
+// CHECK: [0x66,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 vcc, 0, s2
+// CHECK: [0x6a,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 tba, 0, s2
+// CHECK: [0x6c,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 tma, 0, s2
+// CHECK: [0x6e,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 ttmp[10:11], 0, s2
+// CHECK: [0x7a,0x00,0xdf,0xd0,0x80,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], -1, s2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0xc1,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0.5, s2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0xf0,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], -4.0, s2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0xf7,0x04,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], v1, s2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], v255, s2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, s101
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, flat_scratch_lo
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xcc,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, flat_scratch_hi
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xce,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, vcc_lo
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xd4,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, vcc_hi
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xd6,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tba_lo
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xd8,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tba_hi
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xda,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tma_lo
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xdc,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, tma_hi
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xde,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, ttmp11
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xf6,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, m0
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xf8,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, exec_lo
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xfc,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, exec_hi
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xfe,0x00,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, 0
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0x00,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, -1
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0x82,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, 0.5
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xe0,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, -4.0
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xee,0x01,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, v2
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0x04,0x02,0x00]
+
+v_cmpx_t_u32_e64 s[10:11], 0, v255
+// CHECK: [0x0a,0x00,0xdf,0xd0,0x80,0xfe,0x03,0x00]
+
+v_cmp_f_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc0,0x7d]
+
+v_cmp_f_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc1,0x7d]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_f_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe0,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_lt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc2,0x7d]
+
+v_cmp_lt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc3,0x7d]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_lt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe1,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_eq_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc4,0x7d]
+
+v_cmp_eq_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc5,0x7d]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_eq_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe2,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_le_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc6,0x7d]
+
+v_cmp_le_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc7,0x7d]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_le_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe3,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_gt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xc8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xc8,0x7d]
+
+v_cmp_gt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xc9,0x7d]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_gt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe4,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ne_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xca,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xca,0x7d]
+
+v_cmp_ne_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcb,0x7d]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ne_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe5,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ge_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xcc,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xcc,0x7d]
+
+v_cmp_ge_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcd,0x7d]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ge_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe6,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_t_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xce,0x7d]
+
+v_cmp_t_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xce,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xce,0x7d]
+
+v_cmp_t_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xce,0x7d]
+
+v_cmp_t_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xcf,0x7d]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_t_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe7,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_f_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_f_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_f_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd0,0x7d]
+
+v_cmp_f_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd1,0x7d]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_f_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe8,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_lt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_lt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_lt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd2,0x7d]
+
+v_cmp_lt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd3,0x7d]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xe9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_lt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xe9,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_eq_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_eq_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_eq_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd4,0x7d]
+
+v_cmp_eq_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd5,0x7d]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xea,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xea,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_eq_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xea,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_le_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_le_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_le_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd6,0x7d]
+
+v_cmp_le_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd7,0x7d]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xeb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_le_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xeb,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_gt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xd8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_gt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xd8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_gt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xd8,0x7d]
+
+v_cmp_gt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xd9,0x7d]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xec,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xec,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_gt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xec,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ne_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xda,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ne_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xda,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ne_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xda,0x7d]
+
+v_cmp_ne_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdb,0x7d]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xed,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xed,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ne_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xed,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_ge_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xdc,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_ge_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xdc,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_ge_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xdc,0x7d]
+
+v_cmp_ge_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdd,0x7d]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xee,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xee,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_ge_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xee,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmp_t_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xde,0x7d]
+
+v_cmp_t_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xde,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmp_t_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xde,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmp_t_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xde,0x7d]
+
+v_cmp_t_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xde,0x7d]
+
+v_cmp_t_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xdf,0x7d]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xef,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xef,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmp_t_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xef,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_f_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe0,0x7d]
+
+v_cmpx_f_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe1,0x7d]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf0,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_f_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf0,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe2,0x7d]
+
+v_cmpx_lt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe3,0x7d]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf1,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_lt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf1,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_eq_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe4,0x7d]
+
+v_cmpx_eq_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe5,0x7d]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf2,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_eq_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf2,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_le_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe6,0x7d]
+
+v_cmpx_le_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe7,0x7d]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf3,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_le_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf3,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_gt_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xe8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xe8,0x7d]
+
+v_cmpx_gt_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xe9,0x7d]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf4,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_gt_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf4,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ne_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xea,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xea,0x7d]
+
+v_cmpx_ne_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xeb,0x7d]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf5,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ne_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf5,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ge_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xec,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xec,0x7d]
+
+v_cmpx_ge_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xed,0x7d]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf6,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ge_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf6,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_t_i64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_i64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xee,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_i64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xee,0x7d]
+
+v_cmpx_t_i64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xef,0x7d]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf7,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_t_i64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf7,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_f_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf0,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_f_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf0,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_f_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf0,0x7d]
+
+v_cmpx_f_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf1,0x7d]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf8,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_f_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf8,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_lt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf2,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf2,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_lt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf2,0x7d]
+
+v_cmpx_lt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf3,0x7d]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xf9,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_lt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xf9,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_eq_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf4,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf4,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_eq_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf4,0x7d]
+
+v_cmpx_eq_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf5,0x7d]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfa,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_eq_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfa,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_le_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf6,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf6,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_le_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf6,0x7d]
+
+v_cmpx_le_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf7,0x7d]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfb,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_le_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfb,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_gt_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xf8,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xf8,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_gt_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xf8,0x7d]
+
+v_cmpx_gt_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xf9,0x7d]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfc,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_gt_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfc,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ne_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfa,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfa,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ne_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfa,0x7d]
+
+v_cmpx_ne_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xfb,0x7d]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfd,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ne_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfd,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_ge_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfc,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfc,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_ge_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfc,0x7d]
+
+v_cmpx_ge_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xfd,0x7d]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xfe,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_ge_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xfe,0xd0,0x04,0xfc,0x03,0x00]
+
+v_cmpx_t_u64 vcc, s[2:3], v[2:3]
+// CHECK: [0x02,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, s[4:5], v[2:3]
+// CHECK: [0x04,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, s[100:101], v[2:3]
+// CHECK: [0x64,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, flat_scratch, v[2:3]
+// CHECK: [0x66,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, vcc, v[2:3]
+// CHECK: [0x6a,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, tba, v[2:3]
+// CHECK: [0x6c,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, tma, v[2:3]
+// CHECK: [0x6e,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, ttmp[10:11], v[2:3]
+// CHECK: [0x7a,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, exec, v[2:3]
+// CHECK: [0x7e,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, 0, v[2:3]
+// CHECK: [0x80,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, -1, v[2:3]
+// CHECK: [0xc1,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, 0.5, v[2:3]
+// CHECK: [0xf0,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, -4.0, v[2:3]
+// CHECK: [0xf7,0x04,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, 0xaf123456, v[2:3]
+// CHECK: [0xff,0x04,0xfe,0x7d,0x56,0x34,0x12,0xaf]
+
+v_cmpx_t_u64 vcc, 0x3f717273, v[2:3]
+// CHECK: [0xff,0x04,0xfe,0x7d,0x73,0x72,0x71,0x3f]
+
+v_cmpx_t_u64 vcc, v[1:2], v[2:3]
+// CHECK: [0x01,0x05,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, v[254:255], v[2:3]
+// CHECK: [0xfe,0x05,0xfe,0x7d]
+
+v_cmpx_t_u64 vcc, s[2:3], v[254:255]
+// CHECK: [0x02,0xfc,0xff,0x7d]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[12:13], s[4:5], s[4:5]
+// CHECK: [0x0c,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[100:101], s[4:5], s[4:5]
+// CHECK: [0x64,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 flat_scratch, s[4:5], s[4:5]
+// CHECK: [0x66,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 vcc, s[4:5], s[4:5]
+// CHECK: [0x6a,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 tba, s[4:5], s[4:5]
+// CHECK: [0x6c,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 tma, s[4:5], s[4:5]
+// CHECK: [0x6e,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 ttmp[10:11], s[4:5], s[4:5]
+// CHECK: [0x7a,0x00,0xff,0xd0,0x04,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], 0, s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0x80,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], -1, s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0xc1,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], 0.5, s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0xf0,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], -4.0, s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0xf7,0x08,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], v[1:2], s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0x01,0x09,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], v[254:255], s[4:5]
+// CHECK: [0x0a,0x00,0xff,0xd0,0xfe,0x09,0x00,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], 0
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0x00,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], -1
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0x82,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], 0.5
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0xe0,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], -4.0
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0xee,0x01,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], v[2:3]
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0x04,0x02,0x00]
+
+v_cmpx_t_u64_e64 s[10:11], s[4:5], v[254:255]
+// CHECK: [0x0a,0x00,0xff,0xd0,0x04,0xfc,0x03,0x00]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_mov_b32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_mov_b32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_mov_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_mov_b32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x02,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_mov_b32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_mov_b32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_mov_b32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_mov_b32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_mov_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x02,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_i32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0a,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_i32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_i32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x0a,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_u32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0c,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_u32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_u32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x0c,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_u32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_u32_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_u32_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x0e,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_u32_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_u32_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_u32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_u32_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_u32_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x0e,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_i32_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_i32_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x10,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_i32_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_i32_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_i32_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_i32_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x10,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f16_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f16_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_f16_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x14,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f16_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f16_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f16_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f16_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_f16_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x14,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_f32_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x16,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_f32_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x16,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_rpi_i32_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x18,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_rpi_i32_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_rpi_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_rpi_i32_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x18,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_flr_i32_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1a,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_flr_i32_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_flr_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_flr_i32_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1a,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_off_f32_i4_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x1c,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_off_f32_i4_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_off_f32_i4_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x1c,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_ubyte0_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x22,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte0_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte0_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x22,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_ubyte1_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x24,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte1_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte1_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x24,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_ubyte2_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x26,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte2_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte2_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x26,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f32_ubyte3_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x28,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte3_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f32_ubyte3_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x28,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_fract_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_fract_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_fract_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_fract_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_fract_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x36,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_fract_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_fract_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_fract_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_fract_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_fract_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_fract_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_fract_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x36,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_trunc_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_trunc_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_trunc_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_trunc_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x38,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_trunc_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_trunc_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_trunc_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_trunc_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_trunc_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_trunc_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x38,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ceil_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_ceil_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_ceil_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_ceil_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3a,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_ceil_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_ceil_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_ceil_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ceil_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_ceil_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_ceil_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3a,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rndne_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rndne_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rndne_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rndne_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3c,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rndne_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rndne_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rndne_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rndne_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rndne_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rndne_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3c,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_floor_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_floor_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_floor_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_floor_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_floor_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x3e,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_floor_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_floor_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_floor_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_floor_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_floor_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_floor_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_floor_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_exp_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_exp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_exp_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_exp_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x40,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_exp_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_exp_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_exp_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_exp_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_exp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_exp_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_exp_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x40,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_log_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_log_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_log_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_log_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x42,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_log_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_log_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_log_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_log_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_log_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_log_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_log_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_log_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_log_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_log_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_log_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_log_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x42,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rcp_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rcp_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rcp_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x44,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rcp_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rcp_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rcp_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rcp_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rcp_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rcp_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x44,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rcp_iflag_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rcp_iflag_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rcp_iflag_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x46,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rcp_iflag_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rcp_iflag_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rcp_iflag_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rcp_iflag_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rcp_iflag_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x46,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rsq_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rsq_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rsq_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rsq_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x48,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rsq_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rsq_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rsq_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rsq_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rsq_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rsq_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x48,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sqrt_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_sqrt_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_sqrt_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_sqrt_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x4e,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_sqrt_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_sqrt_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sqrt_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_sqrt_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_sqrt_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x4e,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sin_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_sin_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_sin_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_sin_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_sin_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x52,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_sin_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_sin_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_sin_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_sin_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sin_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_sin_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_sin_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x52,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cos_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cos_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cos_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cos_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cos_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x54,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cos_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cos_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cos_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cos_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cos_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cos_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cos_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x54,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_not_b32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_not_b32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_not_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_not_b32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x56,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_not_b32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_not_b32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_not_b32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_not_b32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_not_b32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_not_b32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_not_b32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_not_b32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_not_b32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_not_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x56,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_bfrev_b32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_bfrev_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_bfrev_b32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x58,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_bfrev_b32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_bfrev_b32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_bfrev_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x58,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbh_u32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_ffbh_u32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_ffbh_u32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5a,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_ffbh_u32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_ffbh_u32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ffbh_u32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x5a,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbl_b32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_ffbl_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_ffbl_b32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5c,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_ffbl_b32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_ffbl_b32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ffbl_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x5c,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbh_i32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_ffbh_i32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_ffbh_i32_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x5e,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_ffbh_i32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_ffbh_i32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ffbh_i32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x5e,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_frexp_exp_i32_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x66,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_frexp_exp_i32_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_exp_i32_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_frexp_exp_i32_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x66,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_frexp_mant_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_frexp_mant_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_frexp_mant_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x68,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_frexp_mant_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_frexp_mant_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_mant_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_frexp_mant_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_frexp_mant_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x68,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f16_u16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f16_u16_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x72,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f16_u16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f16_u16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f16_u16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x72,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_f16_i16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_f16_i16_sdwa v5, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x74,0x0a,0x7e,0x01,0x06,0x0e,0x06]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_f16_i16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_f16_i16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_f16_i16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x74,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_u16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_u16_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_u16_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x76,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_u16_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_u16_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_u16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_u16_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_u16_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x76,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cvt_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cvt_i16_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cvt_i16_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x78,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cvt_i16_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cvt_i16_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cvt_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cvt_i16_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cvt_i16_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x78,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rcp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rcp_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rcp_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7a,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rcp_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rcp_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rcp_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rcp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rcp_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rcp_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7a,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sqrt_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_sqrt_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_sqrt_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_sqrt_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7c,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_sqrt_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_sqrt_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sqrt_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_sqrt_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_sqrt_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7c,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rsq_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rsq_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rsq_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rsq_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x7e,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rsq_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rsq_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rsq_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rsq_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rsq_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rsq_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x7e,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_log_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_log_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_log_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_log_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x80,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_log_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_log_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_log_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_log_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_log_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_log_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_log_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_log_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_log_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_log_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_log_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_log_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x80,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_exp_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_exp_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_exp_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_exp_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x82,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_exp_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_exp_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_exp_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_exp_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_exp_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_exp_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_exp_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x82,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_frexp_mant_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_frexp_mant_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_frexp_mant_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x84,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_frexp_mant_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_frexp_mant_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_mant_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_frexp_mant_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_frexp_mant_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x84,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_frexp_exp_i16_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x86,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_frexp_exp_i16_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_frexp_exp_i16_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_frexp_exp_i16_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x86,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_floor_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_floor_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_floor_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_floor_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_floor_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x88,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_floor_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_floor_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_floor_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_floor_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_floor_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_floor_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_floor_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x88,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ceil_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_ceil_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_ceil_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_ceil_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8a,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_ceil_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_ceil_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_ceil_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_ceil_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_ceil_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_ceil_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8a,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_trunc_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_trunc_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_trunc_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_trunc_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8c,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_trunc_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_trunc_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_trunc_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_trunc_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_trunc_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_trunc_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8c,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rndne_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_rndne_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_rndne_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_rndne_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x8e,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_rndne_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_rndne_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_rndne_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_rndne_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_rndne_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_rndne_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x8e,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_fract_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_fract_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_fract_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_fract_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_fract_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x90,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_fract_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_fract_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_fract_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_fract_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_fract_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_fract_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_fract_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x90,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sin_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_sin_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_sin_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_sin_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_sin_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x92,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_sin_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_sin_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_sin_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_sin_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_sin_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_sin_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_sin_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x92,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cos_f16_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_cos_f16_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_cos_f16_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_cos_f16_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_cos_f16_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x94,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_cos_f16_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_cos_f16_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_cos_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_cos_f16_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_cos_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_cos_f16_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_cos_f16_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_exp_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_exp_legacy_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_exp_legacy_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x96,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_exp_legacy_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_exp_legacy_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_exp_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_exp_legacy_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_exp_legacy_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x96,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_legacy_f32_sdwa v255, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0xfe,0x7f,0x01,0x06,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0xff,0x06,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x26,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x00,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x01,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x02,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x03,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x04,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x05,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x0e,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x16,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x00,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x01,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x02,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x03,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x04,0x06]
+
+v_log_legacy_f32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x05,0x06]
+
+v_log_legacy_f32_sdwa v5, -v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x16,0x06]
+
+v_log_legacy_f32_sdwa v5, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x98,0x0a,0x7e,0x01,0x06,0x26,0x06]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_log_legacy_f32_dpp v255, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0xfe,0x7f,0x01,0xe4,0x00,0x00]
+
+v_log_legacy_f32_dpp v5, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0xff,0xe4,0x00,0x00]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x1b,0x00,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x40,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x41,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x42,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x43,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x30,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x34,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x38,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x3c,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x01,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x0f,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x11,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x1f,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x21,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0x2f,0x01,0x00]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x10]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x30]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0xf0]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x01]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x03]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x00,0x0f]
+
+v_log_legacy_f32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x08,0x00]
+
+v_log_legacy_f32_dpp v5, -v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x10,0x00]
+
+v_log_legacy_f32_dpp v5, |v1| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x98,0x0a,0x7e,0x01,0xe4,0x20,0x00]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x06]
+
+v_add_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x03,0x01,0x06,0x06,0x06]
+
+v_add_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0xff,0x06,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x02,0x01,0x06,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x26,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x00,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x01,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x02,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x03,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x04,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x05,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x0e,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x16,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x16,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x00,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x01,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x02,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x03,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x04,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x05,0x06]
+
+v_add_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x16,0x06]
+
+v_add_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x26,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x06]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x00]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x01]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x02]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x03]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x04]
+
+v_add_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x05]
+
+v_add_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x16]
+
+v_add_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x02,0x01,0x06,0x06,0x26]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x00]
+
+v_add_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x03,0x01,0xe4,0x00,0x00]
+
+v_add_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0xff,0xe4,0x00,0x00]
+
+v_add_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x02,0x01,0xe4,0x00,0x00]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x1b,0x00,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x40,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x41,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x42,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x43,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x30,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x34,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x38,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x3c,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x01,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x0f,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x11,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x1f,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x21,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0x2f,0x01,0x00]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x10]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x30]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0xf0]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0xf0]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x01]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x03]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x0f]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x00,0x0f]
+
+v_add_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x08,0x00]
+
+v_add_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x10,0x00]
+
+v_add_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x20,0x00]
+
+v_add_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x40,0x00]
+
+v_add_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x02,0x01,0xe4,0x80,0x00]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x05,0x01,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0xff,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x04,0x01,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x26,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x00,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x01,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x02,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x03,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x04,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x05,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x0e,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x16,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x16,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x00,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x01,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x02,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x03,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x04,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x05,0x06]
+
+v_sub_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x16,0x06]
+
+v_sub_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x26,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x06]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x00]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x01]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x02]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x03]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x04]
+
+v_sub_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x05]
+
+v_sub_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x16]
+
+v_sub_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x04,0x01,0x06,0x06,0x26]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x00]
+
+v_sub_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x05,0x01,0xe4,0x00,0x00]
+
+v_sub_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0xff,0xe4,0x00,0x00]
+
+v_sub_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x04,0x01,0xe4,0x00,0x00]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x1b,0x00,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x40,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x41,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x42,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x43,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x30,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x34,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x38,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x3c,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x01,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x0f,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x11,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x1f,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x21,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0x2f,0x01,0x00]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x10]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x30]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0xf0]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0xf0]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x01]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x03]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x0f]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x00,0x0f]
+
+v_sub_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x08,0x00]
+
+v_sub_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x10,0x00]
+
+v_sub_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x20,0x00]
+
+v_sub_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x40,0x00]
+
+v_sub_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x04,0x01,0xe4,0x80,0x00]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x07,0x01,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0xff,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x06,0x01,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x26,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x00,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x01,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x02,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x03,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x04,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x05,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x0e,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x16,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x16,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x00,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x01,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x02,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x03,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x04,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x05,0x06]
+
+v_subrev_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x16,0x06]
+
+v_subrev_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x26,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x06]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x00]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x01]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x02]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x03]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x04]
+
+v_subrev_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x05]
+
+v_subrev_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x16]
+
+v_subrev_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x06,0x01,0x06,0x06,0x26]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x00]
+
+v_subrev_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0x00]
+
+v_subrev_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0xff,0xe4,0x00,0x00]
+
+v_subrev_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x06,0x01,0xe4,0x00,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x1b,0x00,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x40,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x41,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x42,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x43,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x30,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x34,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x38,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x3c,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x01,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x0f,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x11,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x1f,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x21,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0x2f,0x01,0x00]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x10]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x30]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0xf0]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0xf0]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x01]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x03]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x0f]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x00,0x0f]
+
+v_subrev_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x08,0x00]
+
+v_subrev_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x10,0x00]
+
+v_subrev_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x20,0x00]
+
+v_subrev_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x40,0x00]
+
+v_subrev_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x06,0x01,0xe4,0x80,0x00]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x09,0x01,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0xff,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x08,0x01,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x26,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x00,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x01,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x02,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x03,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x04,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x05,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x0e,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x16,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x16,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x00,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x01,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x02,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x03,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x04,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x05,0x06]
+
+v_mul_legacy_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x16,0x06]
+
+v_mul_legacy_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x26,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x06]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x00]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x01]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x02]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x03]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x04]
+
+v_mul_legacy_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x05]
+
+v_mul_legacy_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x16]
+
+v_mul_legacy_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x08,0x01,0x06,0x06,0x26]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x00]
+
+v_mul_legacy_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x09,0x01,0xe4,0x00,0x00]
+
+v_mul_legacy_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0xff,0xe4,0x00,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x08,0x01,0xe4,0x00,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x1b,0x00,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x40,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x41,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x42,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x43,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x30,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x34,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x38,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x3c,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x01,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x0f,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x11,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x1f,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x21,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0x2f,0x01,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x10]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x30]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0xf0]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0xf0]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x01]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x03]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x0f]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x00,0x0f]
+
+v_mul_legacy_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x08,0x00]
+
+v_mul_legacy_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x10,0x00]
+
+v_mul_legacy_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x20,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x40,0x00]
+
+v_mul_legacy_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x08,0x01,0xe4,0x80,0x00]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x0b,0x01,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0xff,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x0a,0x01,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x26,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x00,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x01,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x02,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x03,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x04,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x05,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x0e,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x16,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x16,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x00,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x01,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x02,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x03,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x04,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x05,0x06]
+
+v_mul_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x16,0x06]
+
+v_mul_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x26,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x06]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x00]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x01]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x02]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x03]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x04]
+
+v_mul_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x05]
+
+v_mul_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x16]
+
+v_mul_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0a,0x01,0x06,0x06,0x26]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x00]
+
+v_mul_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x0b,0x01,0xe4,0x00,0x00]
+
+v_mul_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0xff,0xe4,0x00,0x00]
+
+v_mul_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x0a,0x01,0xe4,0x00,0x00]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x1b,0x00,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x40,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x41,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x42,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x43,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x30,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x34,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x38,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x3c,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x01,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x0f,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x11,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x1f,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x21,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0x2f,0x01,0x00]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x10]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x30]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0xf0]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0xf0]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x01]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x03]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x0f]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x00,0x0f]
+
+v_mul_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x08,0x00]
+
+v_mul_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x10,0x00]
+
+v_mul_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x20,0x00]
+
+v_mul_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x40,0x00]
+
+v_mul_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0a,0x01,0xe4,0x80,0x00]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x0d,0x01,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0xff,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x0c,0x01,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x26,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x00,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x01,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x02,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x03,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x04,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x05,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x0e,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x16,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x16,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x00,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x01,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x02,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x03,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x04,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x05,0x06]
+
+v_mul_i32_i24_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x0e,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x06]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x00]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x01]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x02]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x03]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x04]
+
+v_mul_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x05]
+
+v_mul_i32_i24_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0c,0x01,0x06,0x06,0x0e]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x00]
+
+v_mul_i32_i24_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x0d,0x01,0xe4,0x00,0x00]
+
+v_mul_i32_i24_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0xff,0xe4,0x00,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x0c,0x01,0xe4,0x00,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x1b,0x00,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x40,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x41,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x42,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x43,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x30,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x34,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x38,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x3c,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x01,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x0f,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x11,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x1f,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x21,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0x2f,0x01,0x00]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x10]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x30]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0xf0]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0xf0]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x01]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x03]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x0f]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x00,0x0f]
+
+v_mul_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x0c,0x01,0xe4,0x08,0x00]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x0f,0x01,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0xff,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x0e,0x01,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x26,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x00,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x01,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x02,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x03,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x04,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x05,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x0e,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x16,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x16,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x00,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x01,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x02,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x03,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x04,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x05,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x0e,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x06]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x00]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x01]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x02]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x03]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x04]
+
+v_mul_hi_i32_i24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x05]
+
+v_mul_hi_i32_i24_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x0e,0x01,0x06,0x06,0x0e]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x00]
+
+v_mul_hi_i32_i24_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x0f,0x01,0xe4,0x00,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0xff,0xe4,0x00,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x0e,0x01,0xe4,0x00,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x1b,0x00,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x40,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x41,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x42,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x43,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x30,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x34,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x38,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x3c,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x01,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x0f,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x11,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x1f,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x21,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0x2f,0x01,0x00]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x10]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x30]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0xf0]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0xf0]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x01]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x03]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x0f]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x00,0x0f]
+
+v_mul_hi_i32_i24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x0e,0x01,0xe4,0x08,0x00]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x11,0x01,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0xff,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x10,0x01,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x26,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x00,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x01,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x02,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x03,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x04,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x05,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x0e,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x16,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x16,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x00,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x01,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x02,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x03,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x04,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x05,0x06]
+
+v_mul_u32_u24_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x0e,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x06]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x00]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x01]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x02]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x03]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x04]
+
+v_mul_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x05]
+
+v_mul_u32_u24_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x10,0x01,0x06,0x06,0x0e]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x00]
+
+v_mul_u32_u24_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x11,0x01,0xe4,0x00,0x00]
+
+v_mul_u32_u24_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0xff,0xe4,0x00,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x10,0x01,0xe4,0x00,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x1b,0x00,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x40,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x41,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x42,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x43,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x30,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x34,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x38,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x3c,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x01,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x0f,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x11,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x1f,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x21,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0x2f,0x01,0x00]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x10]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x30]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0xf0]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0xf0]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x01]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x03]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x0f]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x00,0x0f]
+
+v_mul_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x10,0x01,0xe4,0x08,0x00]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x13,0x01,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0xff,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x12,0x01,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x26,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x00,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x01,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x02,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x03,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x04,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x05,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x0e,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x16,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x16,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x00,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x01,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x02,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x03,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x04,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x05,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x0e,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x06]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x00]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x01]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x02]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x03]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x04]
+
+v_mul_hi_u32_u24_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x05]
+
+v_mul_hi_u32_u24_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x12,0x01,0x06,0x06,0x0e]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x00]
+
+v_mul_hi_u32_u24_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x13,0x01,0xe4,0x00,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0xff,0xe4,0x00,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x12,0x01,0xe4,0x00,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x1b,0x00,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x40,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x41,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x42,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x43,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x30,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x34,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x38,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x3c,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x01,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x0f,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x11,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x1f,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x21,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0x2f,0x01,0x00]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x10]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x30]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0xf0]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0xf0]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x01]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x03]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x0f]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x00,0x0f]
+
+v_mul_hi_u32_u24_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x12,0x01,0xe4,0x08,0x00]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x06]
+
+v_min_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x15,0x01,0x06,0x06,0x06]
+
+v_min_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0xff,0x06,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x14,0x01,0x06,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x26,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x00,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x01,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x02,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x03,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x04,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x05,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x0e,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x16,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x16,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x00,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x01,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x02,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x03,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x04,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x05,0x06]
+
+v_min_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x16,0x06]
+
+v_min_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x26,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x06]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x00]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x01]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x02]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x03]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x04]
+
+v_min_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x05]
+
+v_min_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x16]
+
+v_min_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x14,0x01,0x06,0x06,0x26]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x00]
+
+v_min_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x15,0x01,0xe4,0x00,0x00]
+
+v_min_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0xff,0xe4,0x00,0x00]
+
+v_min_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x14,0x01,0xe4,0x00,0x00]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x1b,0x00,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x40,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x41,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x42,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x43,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x30,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x34,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x38,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x3c,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x01,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x0f,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x11,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x1f,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x21,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0x2f,0x01,0x00]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x10]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x30]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0xf0]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0xf0]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x01]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x03]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x0f]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x00,0x0f]
+
+v_min_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x08,0x00]
+
+v_min_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x10,0x00]
+
+v_min_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x20,0x00]
+
+v_min_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x40,0x00]
+
+v_min_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x14,0x01,0xe4,0x80,0x00]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x06]
+
+v_max_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x17,0x01,0x06,0x06,0x06]
+
+v_max_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0xff,0x06,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x16,0x01,0x06,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x26,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x00,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x01,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x02,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x03,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x04,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x05,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x0e,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x16,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x16,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x00,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x01,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x02,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x03,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x04,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x05,0x06]
+
+v_max_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x16,0x06]
+
+v_max_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x26,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x06]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x00]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x01]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x02]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x03]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x04]
+
+v_max_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x05]
+
+v_max_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x16]
+
+v_max_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x16,0x01,0x06,0x06,0x26]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x00]
+
+v_max_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x17,0x01,0xe4,0x00,0x00]
+
+v_max_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0xff,0xe4,0x00,0x00]
+
+v_max_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x16,0x01,0xe4,0x00,0x00]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x1b,0x00,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x40,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x41,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x42,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x43,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x30,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x34,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x38,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x3c,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x01,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x0f,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x11,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x1f,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x21,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0x2f,0x01,0x00]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x10]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x30]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0xf0]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0xf0]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x01]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x03]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x0f]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x00,0x0f]
+
+v_max_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x08,0x00]
+
+v_max_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x10,0x00]
+
+v_max_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x20,0x00]
+
+v_max_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x40,0x00]
+
+v_max_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x16,0x01,0xe4,0x80,0x00]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x06]
+
+v_min_i32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x19,0x01,0x06,0x06,0x06]
+
+v_min_i32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0xff,0x06,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x18,0x01,0x06,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x26,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x00,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x01,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x02,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x03,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x04,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x05,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x0e,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x16,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x16,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x00,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x01,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x02,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x03,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x04,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x05,0x06]
+
+v_min_i32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x0e,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x06]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x00]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x01]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x02]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x03]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x04]
+
+v_min_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x05]
+
+v_min_i32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x18,0x01,0x06,0x06,0x0e]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x00]
+
+v_min_i32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x19,0x01,0xe4,0x00,0x00]
+
+v_min_i32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0xff,0xe4,0x00,0x00]
+
+v_min_i32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x18,0x01,0xe4,0x00,0x00]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x1b,0x00,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x40,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x41,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x42,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x43,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x30,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x34,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x38,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x3c,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x01,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x0f,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x11,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x1f,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x21,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0x2f,0x01,0x00]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x10]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x30]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0xf0]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0xf0]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x01]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x03]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x0f]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x00,0x0f]
+
+v_min_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x18,0x01,0xe4,0x08,0x00]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x06]
+
+v_max_i32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x1b,0x01,0x06,0x06,0x06]
+
+v_max_i32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0xff,0x06,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x1a,0x01,0x06,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x26,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x00,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x01,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x02,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x03,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x04,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x05,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x0e,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x16,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x16,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x00,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x01,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x02,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x03,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x04,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x05,0x06]
+
+v_max_i32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x0e,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x06]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x00]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x01]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x02]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x03]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x04]
+
+v_max_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x05]
+
+v_max_i32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1a,0x01,0x06,0x06,0x0e]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x00]
+
+v_max_i32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x1b,0x01,0xe4,0x00,0x00]
+
+v_max_i32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0xff,0xe4,0x00,0x00]
+
+v_max_i32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x1a,0x01,0xe4,0x00,0x00]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x1b,0x00,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x40,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x41,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x42,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x43,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x30,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x34,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x38,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x3c,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x01,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x0f,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x11,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x1f,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x21,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0x2f,0x01,0x00]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x10]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x30]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0xf0]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0xf0]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x01]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x03]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x0f]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x00,0x0f]
+
+v_max_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x1a,0x01,0xe4,0x08,0x00]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x06]
+
+v_min_u32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x1d,0x01,0x06,0x06,0x06]
+
+v_min_u32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0xff,0x06,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x1c,0x01,0x06,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x26,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x00,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x01,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x02,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x03,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x04,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x05,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x0e,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x16,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x16,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x00,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x01,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x02,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x03,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x04,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x05,0x06]
+
+v_min_u32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x0e,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x06]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x00]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x01]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x02]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x03]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x04]
+
+v_min_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x05]
+
+v_min_u32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1c,0x01,0x06,0x06,0x0e]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x00]
+
+v_min_u32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x1d,0x01,0xe4,0x00,0x00]
+
+v_min_u32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0xff,0xe4,0x00,0x00]
+
+v_min_u32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x1c,0x01,0xe4,0x00,0x00]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x1b,0x00,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x40,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x41,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x42,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x43,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x30,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x34,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x38,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x3c,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x01,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x0f,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x11,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x1f,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x21,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0x2f,0x01,0x00]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x10]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x30]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0xf0]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0xf0]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x01]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x03]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x0f]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x00,0x0f]
+
+v_min_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x1c,0x01,0xe4,0x08,0x00]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x06]
+
+v_max_u32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x1f,0x01,0x06,0x06,0x06]
+
+v_max_u32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0xff,0x06,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x1e,0x01,0x06,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x26,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x00,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x01,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x02,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x03,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x04,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x05,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x0e,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x16,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x16,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x00,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x01,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x02,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x03,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x04,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x05,0x06]
+
+v_max_u32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x0e,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x06]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x00]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x01]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x02]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x03]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x04]
+
+v_max_u32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x05]
+
+v_max_u32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x1e,0x01,0x06,0x06,0x0e]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x00]
+
+v_max_u32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x1f,0x01,0xe4,0x00,0x00]
+
+v_max_u32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0xff,0xe4,0x00,0x00]
+
+v_max_u32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x1e,0x01,0xe4,0x00,0x00]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x1b,0x00,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x40,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x41,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x42,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x43,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x30,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x34,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x38,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x3c,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x01,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x0f,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x11,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x1f,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x21,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0x2f,0x01,0x00]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x10]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x30]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0xf0]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0xf0]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x01]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x03]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x0f]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x00,0x0f]
+
+v_max_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x1e,0x01,0xe4,0x08,0x00]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x21,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0xff,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x20,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x26,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x00,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x01,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x02,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x03,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x04,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x05,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x0e,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x16,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x16,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x00,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x01,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x02,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x03,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x04,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x05,0x06]
+
+v_lshrrev_b32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x0e,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x00]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x01]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x02]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x03]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x04]
+
+v_lshrrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x05]
+
+v_lshrrev_b32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x20,0x01,0x06,0x06,0x0e]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x00]
+
+v_lshrrev_b32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x21,0x01,0xe4,0x00,0x00]
+
+v_lshrrev_b32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0xff,0xe4,0x00,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x20,0x01,0xe4,0x00,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x1b,0x00,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x40,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x41,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x42,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x43,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x30,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x34,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x38,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x3c,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x01,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x0f,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x11,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x1f,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x21,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0x2f,0x01,0x00]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x10]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x30]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0xf0]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0xf0]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x01]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x03]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x0f]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x00,0x0f]
+
+v_lshrrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x20,0x01,0xe4,0x08,0x00]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x23,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0xff,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x22,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x26,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x00,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x01,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x02,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x03,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x04,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x05,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x0e,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x16,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x16,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x00,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x01,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x02,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x03,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x04,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x05,0x06]
+
+v_ashrrev_i32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x0e,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x00]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x01]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x02]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x03]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x04]
+
+v_ashrrev_i32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x05]
+
+v_ashrrev_i32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x22,0x01,0x06,0x06,0x0e]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x00]
+
+v_ashrrev_i32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x23,0x01,0xe4,0x00,0x00]
+
+v_ashrrev_i32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0xff,0xe4,0x00,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x22,0x01,0xe4,0x00,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x1b,0x00,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x40,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x41,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x42,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x43,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x30,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x34,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x38,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x3c,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x01,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x0f,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x11,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x1f,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x21,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0x2f,0x01,0x00]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x10]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x30]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0xf0]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0xf0]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x01]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x03]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x0f]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x00,0x0f]
+
+v_ashrrev_i32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x22,0x01,0xe4,0x08,0x00]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x25,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0xff,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x24,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x26,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x00,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x01,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x02,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x03,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x04,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x05,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x0e,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x16,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x16,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x00,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x01,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x02,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x03,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x04,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x05,0x06]
+
+v_lshlrev_b32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x0e,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x00]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x01]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x02]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x03]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x04]
+
+v_lshlrev_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x05]
+
+v_lshlrev_b32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x24,0x01,0x06,0x06,0x0e]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x00]
+
+v_lshlrev_b32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x25,0x01,0xe4,0x00,0x00]
+
+v_lshlrev_b32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0xff,0xe4,0x00,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x24,0x01,0xe4,0x00,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x1b,0x00,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x40,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x41,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x42,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x43,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x30,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x34,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x38,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x3c,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x01,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x0f,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x11,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x1f,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x21,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0x2f,0x01,0x00]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x10]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x30]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0xf0]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0xf0]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x01]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x03]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x0f]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x00,0x0f]
+
+v_lshlrev_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x24,0x01,0xe4,0x08,0x00]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x06]
+
+v_and_b32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x27,0x01,0x06,0x06,0x06]
+
+v_and_b32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0xff,0x06,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x26,0x01,0x06,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x00,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x01,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x02,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x03,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x04,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x05,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x0e,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x16,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x16,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x00,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x01,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x02,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x03,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x04,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x05,0x06]
+
+v_and_b32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x0e,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x06]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x00]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x01]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x02]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x03]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x04]
+
+v_and_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x05]
+
+v_and_b32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x26,0x01,0x06,0x06,0x0e]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x00]
+
+v_and_b32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x27,0x01,0xe4,0x00,0x00]
+
+v_and_b32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0xff,0xe4,0x00,0x00]
+
+v_and_b32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x26,0x01,0xe4,0x00,0x00]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x1b,0x00,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x40,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x41,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x42,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x43,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x30,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x34,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x38,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x3c,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x01,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x0f,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x11,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x1f,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x21,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0x2f,0x01,0x00]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x10]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x30]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0xf0]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0xf0]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x01]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x03]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x0f]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x00,0x0f]
+
+v_and_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x26,0x01,0xe4,0x08,0x00]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x06]
+
+v_or_b32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x29,0x01,0x06,0x06,0x06]
+
+v_or_b32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0xff,0x06,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x28,0x01,0x06,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x00,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x01,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x02,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x03,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x04,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x05,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x0e,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x16,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x16,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x00,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x01,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x02,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x03,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x04,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x05,0x06]
+
+v_or_b32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x0e,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x06]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x00]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x01]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x02]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x03]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x04]
+
+v_or_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x05]
+
+v_or_b32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x28,0x01,0x06,0x06,0x0e]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x00]
+
+v_or_b32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x29,0x01,0xe4,0x00,0x00]
+
+v_or_b32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0xff,0xe4,0x00,0x00]
+
+v_or_b32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x28,0x01,0xe4,0x00,0x00]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x1b,0x00,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x40,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x41,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x42,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x43,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x30,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x34,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x38,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x3c,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x01,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x0f,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x11,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x1f,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x21,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0x2f,0x01,0x00]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x10]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x30]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0xf0]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0xf0]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x01]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x03]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x0f]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x00,0x0f]
+
+v_or_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x28,0x01,0xe4,0x08,0x00]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x2b,0x01,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0xff,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x2a,0x01,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x00,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x01,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x02,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x03,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x04,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x05,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x0e,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x16,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x16,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x00,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x01,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x02,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x03,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x04,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x05,0x06]
+
+v_xor_b32_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x0e,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x06]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x00]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x01]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x02]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x03]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x04]
+
+v_xor_b32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x05]
+
+v_xor_b32_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2a,0x01,0x06,0x06,0x0e]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x00]
+
+v_xor_b32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x2b,0x01,0xe4,0x00,0x00]
+
+v_xor_b32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0xff,0xe4,0x00,0x00]
+
+v_xor_b32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x2a,0x01,0xe4,0x00,0x00]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x1b,0x00,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x40,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x41,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x42,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x43,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x30,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x34,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x38,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x3c,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x01,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x0f,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x11,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x1f,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x21,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0x2f,0x01,0x00]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x10]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x30]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0xf0]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0xf0]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x01]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x03]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x0f]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x00,0x0f]
+
+v_xor_b32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x2a,0x01,0xe4,0x08,0x00]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x2d,0x01,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0xff,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x2c,0x01,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x26,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x0e,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x16,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x16,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x00,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x01,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x02,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x03,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x04,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x05,0x06]
+
+v_mac_f32_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x16,0x06]
+
+v_mac_f32_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x26,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x06]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x00]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x01]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x02]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x03]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x04]
+
+v_mac_f32_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x05]
+
+v_mac_f32_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x16]
+
+v_mac_f32_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x2c,0x01,0x06,0x06,0x26]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x00]
+
+v_mac_f32_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x2d,0x01,0xe4,0x00,0x00]
+
+v_mac_f32_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0xff,0xe4,0x00,0x00]
+
+v_mac_f32_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x2c,0x01,0xe4,0x00,0x00]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x1b,0x00,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x40,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x41,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x42,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x43,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x30,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x34,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x38,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x3c,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x01,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x0f,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x11,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x1f,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x21,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0x2f,0x01,0x00]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x10]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x30]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0xf0]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0xf0]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x01]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x03]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x0f]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x00,0x0f]
+
+v_mac_f32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x08,0x00]
+
+v_mac_f32_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x10,0x00]
+
+v_mac_f32_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x20,0x00]
+
+v_mac_f32_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x40,0x00]
+
+v_mac_f32_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x2c,0x01,0xe4,0x80,0x00]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v255, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x39,0x01,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v255, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0xff,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v255, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x38,0x01,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x26,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x00,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x01,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x02,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x03,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x04,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x05,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x0e,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x16,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x16,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x00,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x01,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x02,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x03,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x04,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x05,0x06]
+
+v_addc_u32_sdwa v5, vcc, sext(v1), v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x0e,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x06]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x00]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x01]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x02]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x03]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x04]
+
+v_addc_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x05]
+
+v_addc_u32_sdwa v5, vcc, v1, sext(v2), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x38,0x01,0x06,0x06,0x0e]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x00]
+
+v_addc_u32_dpp v255, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x39,0x01,0xe4,0x00,0x00]
+
+v_addc_u32_dpp v5, vcc, v255, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0xff,0xe4,0x00,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v255, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x38,0x01,0xe4,0x00,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x1b,0x00,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x40,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x41,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x42,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x43,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x30,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x34,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x38,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x3c,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x01,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x0f,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x11,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x1f,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x21,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0x2f,0x01,0x00]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x10]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x30]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0xf0]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0xf0]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x01]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x03]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x0f]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x00,0x0f]
+
+v_addc_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x38,0x01,0xe4,0x08,0x00]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v255, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x3b,0x01,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v255, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0xff,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v255, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x3a,0x01,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x26,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x00,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x01,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x02,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x03,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x04,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x05,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x0e,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x16,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x16,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x00,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x01,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x02,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x03,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x04,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x05,0x06]
+
+v_subb_u32_sdwa v5, vcc, sext(v1), v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x0e,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x06]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x00]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x01]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x02]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x03]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x04]
+
+v_subb_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x05]
+
+v_subb_u32_sdwa v5, vcc, v1, sext(v2), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3a,0x01,0x06,0x06,0x0e]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x00]
+
+v_subb_u32_dpp v255, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x3b,0x01,0xe4,0x00,0x00]
+
+v_subb_u32_dpp v5, vcc, v255, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0xff,0xe4,0x00,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v255, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x3a,0x01,0xe4,0x00,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x1b,0x00,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x40,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x41,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x42,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x43,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x30,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x34,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x38,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x3c,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x01,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x0f,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x11,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x1f,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x21,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0x2f,0x01,0x00]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x10]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x30]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0xf0]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0xf0]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x01]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x03]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x0f]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x00,0x0f]
+
+v_subb_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x3a,0x01,0xe4,0x08,0x00]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v255, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x3d,0x01,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v255, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0xff,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v255, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x3c,0x01,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x26,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x00,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x01,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x02,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x03,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x04,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x05,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x0e,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x16,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x16,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x00,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x01,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x02,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x03,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x04,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x05,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, sext(v1), v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x0e,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x06]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x00]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x01]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x02]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x03]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x04]
+
+v_subbrev_u32_sdwa v5, vcc, v1, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x05]
+
+v_subbrev_u32_sdwa v5, vcc, v1, sext(v2), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3c,0x01,0x06,0x06,0x0e]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x00]
+
+v_subbrev_u32_dpp v255, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x3d,0x01,0xe4,0x00,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v255, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0xff,0xe4,0x00,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v255, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x3c,0x01,0xe4,0x00,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x1b,0x00,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x40,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x41,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x42,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x43,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x30,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x34,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x38,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x3c,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x01,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x0f,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x11,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x1f,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x21,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0x2f,0x01,0x00]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x10]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x30]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0xf0]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0xf0]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x01]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x03]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x0f]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x00,0x0f]
+
+v_subbrev_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x3c,0x01,0xe4,0x08,0x00]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x06]
+
+v_add_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x3f,0x01,0x06,0x06,0x06]
+
+v_add_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0xff,0x06,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x3e,0x01,0x06,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x26,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x00,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x01,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x02,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x03,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x04,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x05,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x0e,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x16,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x16,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x00,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x01,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x02,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x03,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x04,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x05,0x06]
+
+v_add_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x16,0x06]
+
+v_add_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x26,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x06]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x00]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x01]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x02]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x03]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x04]
+
+v_add_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x05]
+
+v_add_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x16]
+
+v_add_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x3e,0x01,0x06,0x06,0x26]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x00]
+
+v_add_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x3f,0x01,0xe4,0x00,0x00]
+
+v_add_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0xff,0xe4,0x00,0x00]
+
+v_add_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x3e,0x01,0xe4,0x00,0x00]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x1b,0x00,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x40,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x41,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x42,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x43,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x30,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x34,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x38,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x3c,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x01,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x0f,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x11,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x1f,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x21,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0x2f,0x01,0x00]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x10]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x30]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0xf0]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0xf0]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x01]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x03]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x0f]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x00,0x0f]
+
+v_add_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x08,0x00]
+
+v_add_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x10,0x00]
+
+v_add_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x20,0x00]
+
+v_add_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x40,0x00]
+
+v_add_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x3e,0x01,0xe4,0x80,0x00]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x41,0x01,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0xff,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x40,0x01,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x26,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x00,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x01,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x02,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x03,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x04,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x05,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x0e,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x16,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x16,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x00,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x01,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x02,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x03,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x04,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x05,0x06]
+
+v_sub_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x16,0x06]
+
+v_sub_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x26,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x06]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x00]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x01]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x02]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x03]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x04]
+
+v_sub_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x05]
+
+v_sub_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x16]
+
+v_sub_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x40,0x01,0x06,0x06,0x26]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x00]
+
+v_sub_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x41,0x01,0xe4,0x00,0x00]
+
+v_sub_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0xff,0xe4,0x00,0x00]
+
+v_sub_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x40,0x01,0xe4,0x00,0x00]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x1b,0x00,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x40,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x41,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x42,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x43,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x30,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x34,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x38,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x3c,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x01,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x0f,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x11,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x1f,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x21,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0x2f,0x01,0x00]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x10]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x30]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0xf0]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0xf0]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x01]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x03]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x0f]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x00,0x0f]
+
+v_sub_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x08,0x00]
+
+v_sub_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x10,0x00]
+
+v_sub_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x20,0x00]
+
+v_sub_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x40,0x00]
+
+v_sub_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x40,0x01,0xe4,0x80,0x00]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x43,0x01,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0xff,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x42,0x01,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x26,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x00,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x01,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x02,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x03,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x04,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x05,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x0e,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x16,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x16,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x00,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x01,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x02,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x03,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x04,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x05,0x06]
+
+v_subrev_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x16,0x06]
+
+v_subrev_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x26,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x06]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x00]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x01]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x02]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x03]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x04]
+
+v_subrev_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x05]
+
+v_subrev_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x16]
+
+v_subrev_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x42,0x01,0x06,0x06,0x26]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x00]
+
+v_subrev_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x43,0x01,0xe4,0x00,0x00]
+
+v_subrev_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0xff,0xe4,0x00,0x00]
+
+v_subrev_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x42,0x01,0xe4,0x00,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x1b,0x00,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x40,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x41,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x42,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x43,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x30,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x34,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x38,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x3c,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x01,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x0f,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x11,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x1f,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x21,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0x2f,0x01,0x00]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x10]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x30]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0xf0]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0xf0]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x01]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x03]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x0f]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x00,0x0f]
+
+v_subrev_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x08,0x00]
+
+v_subrev_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x10,0x00]
+
+v_subrev_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x20,0x00]
+
+v_subrev_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x40,0x00]
+
+v_subrev_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x42,0x01,0xe4,0x80,0x00]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x45,0x01,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0xff,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x44,0x01,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x26,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x00,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x01,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x02,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x03,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x04,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x05,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x0e,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x16,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x16,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x00,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x01,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x02,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x03,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x04,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x05,0x06]
+
+v_mul_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x16,0x06]
+
+v_mul_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x26,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x06]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x00]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x01]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x02]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x03]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x04]
+
+v_mul_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x05]
+
+v_mul_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x16]
+
+v_mul_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x44,0x01,0x06,0x06,0x26]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x00]
+
+v_mul_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x45,0x01,0xe4,0x00,0x00]
+
+v_mul_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0xff,0xe4,0x00,0x00]
+
+v_mul_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x44,0x01,0xe4,0x00,0x00]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x1b,0x00,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x40,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x41,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x42,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x43,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x30,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x34,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x38,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x3c,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x01,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x0f,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x11,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x1f,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x21,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0x2f,0x01,0x00]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x10]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x30]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0xf0]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0xf0]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x01]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x03]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x0f]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x00,0x0f]
+
+v_mul_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x08,0x00]
+
+v_mul_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x10,0x00]
+
+v_mul_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x20,0x00]
+
+v_mul_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x40,0x00]
+
+v_mul_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x44,0x01,0xe4,0x80,0x00]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x47,0x01,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0xff,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x46,0x01,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x26,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x0e,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x16,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x16,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x00,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x01,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x02,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x03,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x04,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x05,0x06]
+
+v_mac_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x16,0x06]
+
+v_mac_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x26,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x06]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x00]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x01]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x02]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x03]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x04]
+
+v_mac_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x05]
+
+v_mac_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x16]
+
+v_mac_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x46,0x01,0x06,0x06,0x26]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x00]
+
+v_mac_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x47,0x01,0xe4,0x00,0x00]
+
+v_mac_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0xff,0xe4,0x00,0x00]
+
+v_mac_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x46,0x01,0xe4,0x00,0x00]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x1b,0x00,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x40,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x41,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x42,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x43,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x30,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x34,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x38,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x3c,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x01,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x0f,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x11,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x1f,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x21,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0x2f,0x01,0x00]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x10]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x30]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0xf0]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0xf0]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x01]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x03]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x0f]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x00,0x0f]
+
+v_mac_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x08,0x00]
+
+v_mac_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x10,0x00]
+
+v_mac_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x20,0x00]
+
+v_mac_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x40,0x00]
+
+v_mac_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x46,0x01,0xe4,0x80,0x00]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x06]
+
+v_add_u16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x4d,0x01,0x06,0x06,0x06]
+
+v_add_u16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0xff,0x06,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x4c,0x01,0x06,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x26,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x00,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x01,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x02,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x03,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x04,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x05,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x0e,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x16,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x16,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x00,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x01,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x02,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x03,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x04,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x05,0x06]
+
+v_add_u16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x0e,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x06]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x00]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x01]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x02]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x03]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x04]
+
+v_add_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x05]
+
+v_add_u16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4c,0x01,0x06,0x06,0x0e]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x00]
+
+v_add_u16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x4d,0x01,0xe4,0x00,0x00]
+
+v_add_u16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0xff,0xe4,0x00,0x00]
+
+v_add_u16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x4c,0x01,0xe4,0x00,0x00]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x1b,0x00,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x40,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x41,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x42,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x43,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x30,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x34,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x38,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x3c,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x01,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x0f,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x11,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x1f,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x21,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0x2f,0x01,0x00]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x10]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x30]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0xf0]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0xf0]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x01]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x03]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x0f]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x00,0x0f]
+
+v_add_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x4c,0x01,0xe4,0x08,0x00]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x4f,0x01,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0xff,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x4e,0x01,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x26,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x00,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x01,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x02,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x03,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x04,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x05,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x0e,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x16,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x16,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x00,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x01,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x02,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x03,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x04,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x05,0x06]
+
+v_sub_u16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x0e,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x06]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x00]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x01]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x02]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x03]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x04]
+
+v_sub_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x05]
+
+v_sub_u16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x4e,0x01,0x06,0x06,0x0e]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x00]
+
+v_sub_u16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x4f,0x01,0xe4,0x00,0x00]
+
+v_sub_u16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0xff,0xe4,0x00,0x00]
+
+v_sub_u16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x4e,0x01,0xe4,0x00,0x00]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x1b,0x00,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x40,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x41,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x42,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x43,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x30,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x34,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x38,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x3c,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x01,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x0f,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x11,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x1f,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x21,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0x2f,0x01,0x00]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x10]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x30]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0xf0]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0xf0]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x01]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x03]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x0f]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x00,0x0f]
+
+v_sub_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x4e,0x01,0xe4,0x08,0x00]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x51,0x01,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0xff,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x50,0x01,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x26,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x00,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x01,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x02,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x03,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x04,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x05,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x0e,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x16,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x16,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x00,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x01,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x02,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x03,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x04,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x05,0x06]
+
+v_subrev_u16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x0e,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x06]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x00]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x01]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x02]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x03]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x04]
+
+v_subrev_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x05]
+
+v_subrev_u16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x50,0x01,0x06,0x06,0x0e]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00]
+
+v_subrev_u16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x51,0x01,0xe4,0x00,0x00]
+
+v_subrev_u16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0xff,0xe4,0x00,0x00]
+
+v_subrev_u16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x50,0x01,0xe4,0x00,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x1b,0x00,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x40,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x41,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x42,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x43,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x30,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x34,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x38,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x3c,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x01,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x0f,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x11,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x1f,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x21,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0x2f,0x01,0x00]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x10]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x30]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0xf0]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0xf0]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x01]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x03]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x0f]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x0f]
+
+v_subrev_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x08,0x00]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x53,0x01,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0xff,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x52,0x01,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x26,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x00,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x01,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x02,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x03,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x04,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x05,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x0e,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x16,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x16,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x00,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x01,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x02,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x03,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x04,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x05,0x06]
+
+v_mul_lo_u16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x0e,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x06]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x00]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x01]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x02]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x03]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x04]
+
+v_mul_lo_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x05]
+
+v_mul_lo_u16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x52,0x01,0x06,0x06,0x0e]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00]
+
+v_mul_lo_u16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x53,0x01,0xe4,0x00,0x00]
+
+v_mul_lo_u16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0xff,0xe4,0x00,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x52,0x01,0xe4,0x00,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x1b,0x00,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x40,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x41,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x42,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x43,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x30,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x34,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x38,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x3c,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x01,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x0f,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x11,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x1f,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x21,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0x2f,0x01,0x00]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x10]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x30]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0xf0]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0xf0]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x01]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x03]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x0f]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x0f]
+
+v_mul_lo_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x08,0x00]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x55,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0xff,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x54,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x26,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x00,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x01,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x02,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x03,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x04,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x05,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x0e,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x16,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x16,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x00,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x01,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x02,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x03,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x04,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x05,0x06]
+
+v_lshlrev_b16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x0e,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x06]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x00]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x01]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x02]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x03]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x04]
+
+v_lshlrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x05]
+
+v_lshlrev_b16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x54,0x01,0x06,0x06,0x0e]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00]
+
+v_lshlrev_b16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x55,0x01,0xe4,0x00,0x00]
+
+v_lshlrev_b16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0xff,0xe4,0x00,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x54,0x01,0xe4,0x00,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x1b,0x00,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x40,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x41,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x42,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x43,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x30,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x34,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x38,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x3c,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x01,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x0f,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x11,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x1f,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x21,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0x2f,0x01,0x00]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x10]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x30]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0xf0]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0xf0]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x01]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x03]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x0f]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x0f]
+
+v_lshlrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x08,0x00]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x57,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0xff,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x56,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x26,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x00,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x01,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x02,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x03,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x04,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x05,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x0e,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x16,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x16,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x00,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x01,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x02,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x03,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x04,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x05,0x06]
+
+v_lshrrev_b16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x0e,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x06]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x00]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x01]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x02]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x03]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x04]
+
+v_lshrrev_b16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x05]
+
+v_lshrrev_b16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x56,0x01,0x06,0x06,0x0e]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x00]
+
+v_lshrrev_b16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x57,0x01,0xe4,0x00,0x00]
+
+v_lshrrev_b16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0xff,0xe4,0x00,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x56,0x01,0xe4,0x00,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x1b,0x00,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x40,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x41,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x42,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x43,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x30,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x34,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x38,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x3c,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x01,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x0f,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x11,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x1f,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x21,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0x2f,0x01,0x00]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x10]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x30]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0xf0]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0xf0]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x01]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x03]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x0f]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x00,0x0f]
+
+v_lshrrev_b16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x56,0x01,0xe4,0x08,0x00]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x59,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0xff,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x58,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x26,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x00,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x01,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x02,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x03,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x04,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x05,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x0e,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x16,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x16,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x00,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x01,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x02,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x03,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x04,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x05,0x06]
+
+v_ashrrev_i16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x0e,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x06]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x00]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x01]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x02]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x03]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x04]
+
+v_ashrrev_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x05]
+
+v_ashrrev_i16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x58,0x01,0x06,0x06,0x0e]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x00]
+
+v_ashrrev_i16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x59,0x01,0xe4,0x00,0x00]
+
+v_ashrrev_i16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0xff,0xe4,0x00,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x58,0x01,0xe4,0x00,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x1b,0x00,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x40,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x41,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x42,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x43,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x30,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x34,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x38,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x3c,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x01,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x0f,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x11,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x1f,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x21,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0x2f,0x01,0x00]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x10]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x30]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0xf0]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0xf0]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x01]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x03]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x0f]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x00,0x0f]
+
+v_ashrrev_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x58,0x01,0xe4,0x08,0x00]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x06]
+
+v_max_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x5b,0x01,0x06,0x06,0x06]
+
+v_max_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0xff,0x06,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x5a,0x01,0x06,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x26,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x00,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x01,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x02,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x03,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x04,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x05,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x0e,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x16,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x16,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x00,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x01,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x02,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x03,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x04,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x05,0x06]
+
+v_max_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x16,0x06]
+
+v_max_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x26,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x06]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x00]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x01]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x02]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x03]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x04]
+
+v_max_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x05]
+
+v_max_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x16]
+
+v_max_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5a,0x01,0x06,0x06,0x26]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x00]
+
+v_max_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x5b,0x01,0xe4,0x00,0x00]
+
+v_max_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0xff,0xe4,0x00,0x00]
+
+v_max_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x5a,0x01,0xe4,0x00,0x00]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x1b,0x00,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x40,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x41,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x42,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x43,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x30,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x34,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x38,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x3c,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x01,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x0f,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x11,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x1f,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x21,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0x2f,0x01,0x00]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x10]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x30]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0xf0]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0xf0]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x01]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x03]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x0f]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x00,0x0f]
+
+v_max_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x08,0x00]
+
+v_max_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x10,0x00]
+
+v_max_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x20,0x00]
+
+v_max_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x40,0x00]
+
+v_max_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5a,0x01,0xe4,0x80,0x00]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x06]
+
+v_min_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x5d,0x01,0x06,0x06,0x06]
+
+v_min_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0xff,0x06,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x5c,0x01,0x06,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x26,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x00,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x01,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x02,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x03,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x04,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x05,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x0e,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x16,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x16,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x00,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x01,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x02,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x03,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x04,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x05,0x06]
+
+v_min_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x16,0x06]
+
+v_min_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x26,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x06]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x00]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x01]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x02]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x03]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x04]
+
+v_min_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x05]
+
+v_min_f16_sdwa v5, v1, -v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x16]
+
+v_min_f16_sdwa v5, v1, |v2| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5c,0x01,0x06,0x06,0x26]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x00]
+
+v_min_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x5d,0x01,0xe4,0x00,0x00]
+
+v_min_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0xff,0xe4,0x00,0x00]
+
+v_min_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x5c,0x01,0xe4,0x00,0x00]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x1b,0x00,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x40,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x41,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x42,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x43,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x30,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x34,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x38,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x3c,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x01,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x0f,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x11,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x1f,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x21,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0x2f,0x01,0x00]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x10]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x30]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0xf0]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0xf0]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x01]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x03]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x0f]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x00,0x0f]
+
+v_min_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x08,0x00]
+
+v_min_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x10,0x00]
+
+v_min_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x20,0x00]
+
+v_min_f16_dpp v5, v1, -v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x40,0x00]
+
+v_min_f16_dpp v5, v1, |v2| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5c,0x01,0xe4,0x80,0x00]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x06]
+
+v_max_u16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x5f,0x01,0x06,0x06,0x06]
+
+v_max_u16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0xff,0x06,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x5e,0x01,0x06,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x26,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x00,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x01,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x02,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x03,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x04,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x05,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x0e,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x16,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x16,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x00,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x01,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x02,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x03,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x04,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x05,0x06]
+
+v_max_u16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x0e,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x06]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x00]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x01]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x02]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x03]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x04]
+
+v_max_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x05]
+
+v_max_u16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x5e,0x01,0x06,0x06,0x0e]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x00]
+
+v_max_u16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x5f,0x01,0xe4,0x00,0x00]
+
+v_max_u16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0xff,0xe4,0x00,0x00]
+
+v_max_u16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x5e,0x01,0xe4,0x00,0x00]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x1b,0x00,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x40,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x41,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x42,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x43,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x30,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x34,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x38,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x3c,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x01,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x0f,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x11,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x1f,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x21,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0x2f,0x01,0x00]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x10]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x30]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0xf0]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0xf0]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x01]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x03]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x0f]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x00,0x0f]
+
+v_max_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x5e,0x01,0xe4,0x08,0x00]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x06]
+
+v_max_i16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x61,0x01,0x06,0x06,0x06]
+
+v_max_i16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0xff,0x06,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x60,0x01,0x06,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x26,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x00,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x01,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x02,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x03,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x04,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x05,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x0e,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x16,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x16,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x00,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x01,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x02,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x03,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x04,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x05,0x06]
+
+v_max_i16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x0e,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x06]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x00]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x01]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x02]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x03]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x04]
+
+v_max_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x05]
+
+v_max_i16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x60,0x01,0x06,0x06,0x0e]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x00]
+
+v_max_i16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x61,0x01,0xe4,0x00,0x00]
+
+v_max_i16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0xff,0xe4,0x00,0x00]
+
+v_max_i16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x60,0x01,0xe4,0x00,0x00]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x1b,0x00,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x40,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x41,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x42,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x43,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x30,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x34,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x38,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x3c,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x01,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x0f,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x11,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x1f,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x21,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0x2f,0x01,0x00]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x10]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x30]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0xf0]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0xf0]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x01]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x03]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x0f]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x00,0x0f]
+
+v_max_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x60,0x01,0xe4,0x08,0x00]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x06]
+
+v_min_u16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x63,0x01,0x06,0x06,0x06]
+
+v_min_u16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0xff,0x06,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x62,0x01,0x06,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x26,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x00,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x01,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x02,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x03,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x04,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x05,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x0e,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x16,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x16,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x00,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x01,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x02,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x03,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x04,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x05,0x06]
+
+v_min_u16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x0e,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x06]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x00]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x01]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x02]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x03]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x04]
+
+v_min_u16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x05]
+
+v_min_u16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x62,0x01,0x06,0x06,0x0e]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x00]
+
+v_min_u16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x63,0x01,0xe4,0x00,0x00]
+
+v_min_u16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0xff,0xe4,0x00,0x00]
+
+v_min_u16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x62,0x01,0xe4,0x00,0x00]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x1b,0x00,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x40,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x41,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x42,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x43,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x30,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x34,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x38,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x3c,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x01,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x0f,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x11,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x1f,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x21,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0x2f,0x01,0x00]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x10]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x30]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0xf0]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0xf0]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x01]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x03]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x0f]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x00,0x0f]
+
+v_min_u16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x62,0x01,0xe4,0x08,0x00]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x06]
+
+v_min_i16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x65,0x01,0x06,0x06,0x06]
+
+v_min_i16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0xff,0x06,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x64,0x01,0x06,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x26,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x00,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x01,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x02,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x03,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x04,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x05,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x0e,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x16,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x16,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x00,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x01,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x02,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x03,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x04,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x05,0x06]
+
+v_min_i16_sdwa v5, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x0e,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x06]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x00]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x01]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x02]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x03]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x04]
+
+v_min_i16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x05]
+
+v_min_i16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x64,0x01,0x06,0x06,0x0e]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x00]
+
+v_min_i16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x65,0x01,0xe4,0x00,0x00]
+
+v_min_i16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0xff,0xe4,0x00,0x00]
+
+v_min_i16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x64,0x01,0xe4,0x00,0x00]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x1b,0x00,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x40,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x41,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x42,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x43,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x30,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x34,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x38,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x3c,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x01,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x0f,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x11,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x1f,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x21,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0x2f,0x01,0x00]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x10]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x30]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0xf0]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0xf0]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x01]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x03]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x0f]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x00,0x0f]
+
+v_min_i16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x64,0x01,0xe4,0x08,0x00]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v255, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xfe,0x67,0x01,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v255, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0xff,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x0b,0x66,0x01,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x26,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x00,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x01,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x02,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x03,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x04,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x05,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x0e,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x16,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x16,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x00,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x01,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x02,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x03,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x04,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x05,0x06]
+
+v_ldexp_f16_sdwa v5, -v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x16,0x06]
+
+v_ldexp_f16_sdwa v5, |v1|, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x26,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x06]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x00]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x01]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x02]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x03]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x04]
+
+v_ldexp_f16_sdwa v5, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x05]
+
+v_ldexp_f16_sdwa v5, v1, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x0a,0x66,0x01,0x06,0x06,0x0e]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x00]
+
+v_ldexp_f16_dpp v255, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0xfe,0x67,0x01,0xe4,0x00,0x00]
+
+v_ldexp_f16_dpp v5, v255, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0xff,0xe4,0x00,0x00]
+
+v_ldexp_f16_dpp v5, v1, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0xfe,0x0b,0x66,0x01,0xe4,0x00,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x1b,0x00,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x40,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_half_mirror row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x41,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_bcast:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x42,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_bcast:31 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x43,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 wave_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x30,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 wave_rol:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x34,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 wave_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x38,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 wave_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x3c,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_shl:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x01,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_shl:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x0f,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_shr:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x11,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_shr:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x1f,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_ror:1 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x21,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 row_ror:15 row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0x2f,0x01,0x00]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x10]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x30]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0xf0]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0xf0]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x01]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x03]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x0f]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x00,0x0f]
+
+v_ldexp_f16_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x08,0x00]
+
+v_ldexp_f16_dpp v5, -v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x10,0x00]
+
+v_ldexp_f16_dpp v5, |v1|, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x04,0x0a,0x66,0x01,0xe4,0x20,0x00]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x21,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_class_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_class_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_class_f32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x20,0x7c,0x01,0x16,0x06,0x0e]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x23,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_class_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_class_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_class_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_class_f32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x06,0x0e]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x29,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_class_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_class_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_class_f16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x28,0x7c,0x01,0x16,0x06,0x0e]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x2b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_class_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_class_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_class_f16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x2a,0x7c,0x01,0x16,0x06,0x0e]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x41,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_f_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_f_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_f_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_f_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x43,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_lt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_lt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_lt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_lt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x45,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_eq_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_eq_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_eq_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_eq_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x47,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_le_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_le_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_le_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_le_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x49,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_gt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_gt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_gt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_gt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x4b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_lg_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_lg_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_lg_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_lg_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x4d,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_ge_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_ge_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_ge_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_ge_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x4f,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_o_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_o_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_o_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_o_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x51,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_u_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_u_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_u_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_u_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x53,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nge_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nge_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nge_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nge_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x55,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nlg_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nlg_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x57,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_ngt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_ngt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x59,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nle_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nle_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nle_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nle_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x5b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_neq_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_neq_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_neq_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_neq_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x5d,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nlt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nlt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x5f,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_tru_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_tru_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_tru_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_tru_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x61,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_f_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_f_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_f_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_f_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_f_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x63,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_lt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_lt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_lt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x65,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_eq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_eq_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_eq_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x67,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_le_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_le_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_le_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_le_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_le_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x69,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_gt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_gt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_gt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x6b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_lg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_lg_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_lg_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x6d,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_ge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_ge_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_ge_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x6f,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_o_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_o_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_o_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_o_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_o_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x71,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_u_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_u_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_u_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_u_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_u_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x73,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nge_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nge_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nge_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x75,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nlg_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x77,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_ngt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x79,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nle_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nle_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nle_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x7b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_neq_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_neq_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_neq_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x7d,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nlt_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x7f,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_tru_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_tru_f16_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_tru_f16_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x81,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_f_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_f_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_f_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_f_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x83,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_lt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_lt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_lt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_lt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x85,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_eq_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_eq_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_eq_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_eq_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x87,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_le_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_le_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_le_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_le_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x89,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_gt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_gt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_gt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_gt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x8b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_lg_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_lg_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_lg_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_lg_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x8d,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_ge_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_ge_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_ge_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_ge_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x8f,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_o_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_o_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_o_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_o_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x91,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_u_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_u_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_u_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_u_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x93,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nge_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nge_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nge_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nge_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x95,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nlg_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nlg_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x97,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_ngt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_ngt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x99,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nle_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nle_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nle_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nle_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x9b,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_neq_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_neq_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_neq_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_neq_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x9d,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_nlt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_nlt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x9f,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmp_tru_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmp_tru_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmp_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmp_tru_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmp_tru_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa1,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_f_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_f_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_f_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_f_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_f_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa3,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_lt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_lt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_lt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa5,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_eq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_eq_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_eq_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa7,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_le_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_le_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_le_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_le_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_le_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa9,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_gt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_gt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_gt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xab,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_lg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_lg_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_lg_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xad,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_ge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_ge_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_ge_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xaf,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_o_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_o_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_o_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_o_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_o_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb1,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_u_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_u_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_u_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_u_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_u_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb3,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nge_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nge_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nge_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb5,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nlg_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb7,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_ngt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb9,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nle_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nle_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nle_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xbb,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_neq_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_neq_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_neq_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xbd,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_nlt_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0xff,0x16,0x06,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xbf,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x36,0x06,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x00,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x01,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x02,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x03,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x04,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x05,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, -v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x16,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, |v1|, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x26,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x06]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x00]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x01]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x02]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x03]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x04]
+
+v_cmpx_tru_f32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x05]
+
+v_cmpx_tru_f32_sdwa vcc, v1, -v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x16]
+
+v_cmpx_tru_f32_sdwa vcc, v1, |v2| src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7c,0x01,0x16,0x06,0x26]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x41,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_f_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_f_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x40,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x43,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_lt_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_lt_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x42,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x45,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_eq_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_eq_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x44,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x47,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_le_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_le_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x46,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x49,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_gt_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_gt_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x48,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x4b,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ne_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ne_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4a,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x4d,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ge_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ge_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4c,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x4f,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_t_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_t_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x4e,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x51,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_f_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_f_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x50,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x53,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_lt_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_lt_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x52,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x55,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_eq_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_eq_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x54,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x57,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_le_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_le_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x56,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x59,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_gt_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_gt_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x58,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x5b,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ne_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ne_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5a,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x5d,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ge_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ge_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5c,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x5f,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_t_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_t_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x5e,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x61,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_f_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_f_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_f_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x60,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x63,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_lt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_lt_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x62,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x65,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_eq_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_eq_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x64,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x67,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_le_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_le_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_le_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x66,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x69,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_gt_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_gt_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x68,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x6b,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ne_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ne_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6a,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x6d,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ge_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ge_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6c,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x6f,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_t_i16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_t_i16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_t_i16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x6e,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x71,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_f_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_f_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_f_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x70,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x73,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_lt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_lt_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x72,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x75,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_eq_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_eq_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x74,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x77,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_le_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_le_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_le_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x76,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x79,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_gt_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_gt_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x78,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x7b,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ne_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ne_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7a,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x7d,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ge_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ge_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7c,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x7f,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_t_u16_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_t_u16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_t_u16_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x7e,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x81,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_f_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_f_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x80,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x83,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_lt_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_lt_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x82,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x85,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_eq_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_eq_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x84,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x87,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_le_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_le_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x86,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x89,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_gt_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_gt_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x88,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x8b,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ne_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ne_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8a,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x8d,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ge_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ge_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8c,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x8f,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_t_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_t_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x8e,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x91,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_f_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_f_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x90,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x93,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_lt_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_lt_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x92,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x95,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_eq_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_eq_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x94,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x97,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_le_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_le_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x96,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x99,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_gt_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_gt_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x98,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x9b,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ne_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ne_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9a,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x9d,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_ge_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_ge_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9c,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0x9f,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmp_t_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmp_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmp_t_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0x9e,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa1,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_f_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_f_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_f_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa0,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa3,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_lt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_lt_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa2,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa5,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_eq_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_eq_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa4,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa7,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_le_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_le_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_le_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa6,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xa9,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_gt_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_gt_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xa8,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xab,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ne_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ne_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xaa,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xad,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ge_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ge_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xac,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xaf,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_t_i32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_t_i32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_t_i32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xae,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb1,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_f_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_f_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_f_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb0,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb3,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_lt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_lt_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb2,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb5,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_eq_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_eq_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb4,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb7,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_le_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_le_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_le_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb6,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xb9,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_gt_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_gt_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xb8,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xbb,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ne_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ne_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xba,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xbd,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_ge_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_ge_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbc,0x7d,0x01,0x16,0x06,0x0e]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v255, v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0xff,0x16,0x06,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v255 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0xfe,0xbf,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 clamp src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x36,0x06,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x00,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x01,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_2 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x02,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:BYTE_3 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x03,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:WORD_0 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x04,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x05,0x06]
+
+v_cmpx_t_u32_sdwa vcc, sext(v1), v2 src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x0e,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x06]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_0
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x00]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_1
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x01]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_2
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x02]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:BYTE_3
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x03]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_0
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x04]
+
+v_cmpx_t_u32_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:WORD_1
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x05]
+
+v_cmpx_t_u32_sdwa vcc, v1, sext(v2) src0_sel:DWORD src1_sel:DWORD
+// CHECK: [0xf9,0x04,0xbe,0x7d,0x01,0x16,0x06,0x0e]
+
+s_rfe_restore_b64 s[4:5], s2
+// CHECK: [0x04,0x02,0x80,0x95]
+
+v_mov_fed_b32_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// CHECK: [0xfa,0x12,0x0a,0x7e,0x01,0xe4,0x00,0x00]
+
+v_mov_fed_b32_e64 v5, s1
+// CHECK: [0x05,0x00,0x49,0xd1,0x01,0x00,0x00,0x00]
+
+v_mov_fed_b32_sdwa v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD
+// CHECK: [0xf9,0x12,0x0a,0x7e,0x01,0x06,0x06,0x06]
+
+v_perm_b32 v5, s1, 0, v255
+// CHECK: [0x05,0x00,0xed,0xd1,0x01,0x00,0xfd,0x07]
diff --git a/test/MC/AMDGPU/hsa-exp.s b/test/MC/AMDGPU/hsa-exp.s
index 488afc5b400f..0323056b7bb2 100644
--- a/test/MC/AMDGPU/hsa-exp.s
+++ b/test/MC/AMDGPU/hsa-exp.s
@@ -65,7 +65,7 @@ amd_kernel_code_t_minimal:
// ASM-LABEL: {{^}}amd_kernel_code_t_minimal:
// ASM: .amd_kernel_code_t
// ASM: amd_code_version_major = 7
-// ASM: amd_code_version_minor = 0
+// ASM: amd_code_version_minor = 1
// ASM: amd_machine_kind = 1
// ASM: amd_machine_version_major = 7
// ASM: amd_machine_version_minor = 0
@@ -124,6 +124,6 @@ amd_kernel_code_t_minimal:
// ASM: group_segment_alignment = 4
// ASM: private_segment_alignment = 4
// ASM: wavefront_size = 6
-// ASM: call_convention = 0
+// ASM: call_convention = -1
// ASM: runtime_loader_kernel_symbol = 0
// ASM: .end_amd_kernel_code_t
diff --git a/test/MC/AMDGPU/hsa.s b/test/MC/AMDGPU/hsa.s
index b428c817ea67..5f1297e0f376 100644
--- a/test/MC/AMDGPU/hsa.s
+++ b/test/MC/AMDGPU/hsa.s
@@ -37,25 +37,19 @@
.hsa_code_object_isa 7,0,0,"AMD","AMDGPU"
// ASM: .hsa_code_object_isa 7,0,0,"AMD","AMDGPU"
-.amdgpu_runtime_metadata
- {
- amd.MDVersion: [ 2, 0 ]
- amd.Kernels: [
- { amd.KernelName: amd_kernel_code_t_test_all },
- { amd.KernelName: amd_kernel_code_t_minimal }
- ]
- }
-.end_amdgpu_runtime_metadata
+.amdgpu_code_object_metadata
+ Version: [ 3, 0 ]
+ Kernels:
+ - Name: amd_kernel_code_t_test_all
+ - Name: amd_kernel_code_t_minimal
+.end_amdgpu_code_object_metadata
-// ASM: .amdgpu_runtime_metadata
-// ASM: {
-// ASM: amd.MDVersion: [ 2, 0 ]
-// ASM: amd.Kernels: [
-// ASM: { amd.KernelName: amd_kernel_code_t_test_all },
-// ASM: { amd.KernelName: amd_kernel_code_t_minimal }
-// ASM: ]
-// ASM: }
-// ASM: .end_amdgpu_runtime_metadata
+// ASM: .amdgpu_code_object_metadata
+// ASM: Version: [ 3, 0 ]
+// ASM: Kernels:
+// ASM: - Name: amd_kernel_code_t_test_all
+// ASM: - Name: amd_kernel_code_t_minimal
+// ASM: .end_amdgpu_code_object_metadata
.amdgpu_hsa_kernel amd_kernel_code_t_test_all
.amdgpu_hsa_kernel amd_kernel_code_t_minimal
@@ -214,7 +208,7 @@ amd_kernel_code_t_minimal:
// ASM-LABEL: {{^}}amd_kernel_code_t_minimal:
// ASM: .amd_kernel_code_t
// ASM: amd_code_version_major = 1
-// ASM: amd_code_version_minor = 0
+// ASM: amd_code_version_minor = 1
// ASM: amd_machine_kind = 1
// ASM: amd_machine_version_major = 7
// ASM: amd_machine_version_minor = 0
@@ -273,6 +267,6 @@ amd_kernel_code_t_minimal:
// ASM: group_segment_alignment = 4
// ASM: private_segment_alignment = 4
// ASM: wavefront_size = 6
-// ASM: call_convention = 0
+// ASM: call_convention = -1
// ASM: runtime_loader_kernel_symbol = 0
// ASM: .end_amd_kernel_code_t
diff --git a/test/MC/AMDGPU/literals.s b/test/MC/AMDGPU/literals.s
index af3c47b7ce50..c18da5dd8ffe 100644
--- a/test/MC/AMDGPU/literals.s
+++ b/test/MC/AMDGPU/literals.s
@@ -248,12 +248,12 @@ v_trunc_f32_e32 v0, -13
// VI: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x64,0x00,0x7e]
v_fract_f64_e32 v[0:1], -13
-// SICI: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x42,0xd3,0x8d,0x00,0x00,0x20]
-// VI: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x5c,0xd1,0x8d,0x00,0x00,0x20]
+// SICI: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x42,0xd3,0xcd,0x00,0x00,0x00]
+// VI: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x5c,0xd1,0xcd,0x00,0x00,0x00]
v_trunc_f32_e64 v0, -13
-// SICI: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x7c,0xd3,0x8d,0x00,0x00,0x20]
-// VI: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x72,0xd1,0x8d,0x00,0x00,0x20]
+// SICI: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x7c,0xd3,0xcd,0x00,0x00,0x00]
+// VI: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x72,0xd1,0xcd,0x00,0x00,0x00]
v_fract_f64_e64 v[0:1], -13
// SICI: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x42,0x00,0x7e]
diff --git a/test/MC/AMDGPU/literalv216-err.s b/test/MC/AMDGPU/literalv216-err.s
new file mode 100644
index 000000000000..09739024e9e8
--- /dev/null
+++ b/test/MC/AMDGPU/literalv216-err.s
@@ -0,0 +1,22 @@
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX9 %s
+
+v_pk_add_f16 v1, -17, v2
+// GFX9: :19: error: invalid operand for instruction
+
+v_pk_add_f16 v1, 65, v2
+// GFX9: :18: error: invalid operand for instruction
+
+v_pk_add_f16 v1, 64.0, v2
+// GFX9: :18: error: invalid operand for instruction
+
+v_pk_add_f16 v1, -0.15915494, v2
+// GFX9: :19: error: invalid operand for instruction
+
+v_pk_add_f16 v1, -0.0, v2
+// GFX9: :19: error: invalid operand for instruction
+
+v_pk_add_f16 v1, -32768, v2
+// GFX9: :19: error: invalid operand for instruction
+
+v_pk_add_f16 v1, 32767, v2
+// GFX9: :18: error: invalid operand for instruction
diff --git a/test/MC/AMDGPU/literalv216.s b/test/MC/AMDGPU/literalv216.s
new file mode 100644
index 000000000000..1ea05d55d754
--- /dev/null
+++ b/test/MC/AMDGPU/literalv216.s
@@ -0,0 +1,112 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s | FileCheck -check-prefix=GFX9 %s
+
+v_pk_add_f16 v1, 0, v2
+// GFX9: v_pk_add_f16 v1, 0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x80,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0.0, v2
+// GFX9: v_pk_add_f16 v1, 0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x80,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, v2, 0
+// GFX9: v_pk_add_f16 v1, v2, 0 ; encoding: [0x01,0x00,0x8f,0xd3,0x02,0x01,0x01,0x18]
+
+v_pk_add_f16 v1, v2, 0.0
+// GFX9: v_pk_add_f16 v1, v2, 0 ; encoding: [0x01,0x00,0x8f,0xd3,0x02,0x01,0x01,0x18]
+
+v_pk_add_f16 v1, 1.0, v2
+// GFX9: v_pk_add_f16 v1, 1.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf2,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -1.0, v2
+// GFX9: v_pk_add_f16 v1, -1.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf3,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -0.5, v2
+// GFX9: v_pk_add_f16 v1, -0.5, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf1,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0.5, v2
+// GFX9: v_pk_add_f16 v1, 0.5, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf0,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 2.0, v2
+// GFX9: v_pk_add_f16 v1, 2.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf4,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -2.0, v2
+// GFX9: v_pk_add_f16 v1, -2.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf5,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 4.0, v2
+// GFX9: v_pk_add_f16 v1, 4.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf6,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -4.0, v2
+// GFX9: v_pk_add_f16 v1, -4.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf7,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0.15915494, v2
+// GFX9: v_pk_add_f16 v1, 0.15915494, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf8,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -1, v2
+// GFX9: v_pk_add_f16 v1, -1, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xc1,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -2, v2
+// GFX9: v_pk_add_f16 v1, -2, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xc2,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -3, v2
+// GFX9: v_pk_add_f16 v1, -3, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xc3,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, -16, v2
+// GFX9: v_pk_add_f16 v1, -16, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xd0,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 1, v2
+// GFX9: v_pk_add_f16 v1, 1, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x81,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 2, v2
+// GFX9: v_pk_add_f16 v1, 2, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x82,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 3, v2
+// GFX9: v_pk_add_f16 v1, 3, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x83,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 4, v2
+// GFX9: v_pk_add_f16 v1, 4, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x84,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 15, v2
+// GFX9: v_pk_add_f16 v1, 15, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x8f,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 16, v2
+// GFX9: v_pk_add_f16 v1, 16, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x90,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 63, v2
+// GFX9: v_pk_add_f16 v1, 63, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xbf,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 64, v2
+// GFX9: v_pk_add_f16 v1, 64, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xc0,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0x0001, v2
+// GFX9: v_pk_add_f16 v1, 1, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0x81,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0xffff, v2
+// GFX9: v_pk_add_f16 v1, -1, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xc1,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0x3c00, v2
+// GFX9: v_pk_add_f16 v1, 1.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf2,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0xbc00, v2
+// GFX9: v_pk_add_f16 v1, -1.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf3,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0x3800, v2
+// GFX9: v_pk_add_f16 v1, 0.5, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf0,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0xb800, v2
+// GFX9: v_pk_add_f16 v1, -0.5, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf1,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0x4000, v2
+// GFX9: v_pk_add_f16 v1, 2.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf4,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0xc000, v2
+// GFX9: v_pk_add_f16 v1, -2.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf5,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0x4400, v2
+// GFX9: v_pk_add_f16 v1, 4.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf6,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0xc400, v2
+// GFX9: v_pk_add_f16 v1, -4.0, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf7,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 0x3118, v2
+// GFX9: v_pk_add_f16 v1, 0.15915494, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xf8,0x04,0x02,0x18]
+
+v_pk_add_f16 v1, 65535, v2
+// GFX9: v_pk_add_f16 v1, -1, v2 ; encoding: [0x01,0x00,0x8f,0xd3,0xc1,0x04,0x02,0x18]
diff --git a/test/MC/AMDGPU/metadata.s b/test/MC/AMDGPU/metadata.s
deleted file mode 100644
index 3c009ff590d3..000000000000
--- a/test/MC/AMDGPU/metadata.s
+++ /dev/null
@@ -1,35 +0,0 @@
-// RUN: llvm-mc -triple amdgcn--amdhsa -mcpu=kaveri -show-encoding %s | FileCheck %s --check-prefix=ASM
-
-.amdgpu_runtime_metadata
- { amd.MDVersion: [ 2, 0 ], amd.PrintfInfo: [ '1:1:4:%d\n', '2:1:8:%g\n' ], amd.Kernels:
-
- - { amd.KernelName: test_char, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
- - { amd.ArgSize: 1, amd.ArgAlign: 1, amd.ArgKind: 0, amd.ArgValueType: 1, amd.ArgTypeName: char, amd.ArgAccQual: 0 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-
- - { amd.KernelName: test_ushort2, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
- - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 4, amd.ArgTypeName: ushort2, amd.ArgAccQual: 0 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
- - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
- }
-.end_amdgpu_runtime_metadata
-
-// ASM: { amd.MDVersion: [ 2, 0 ], amd.PrintfInfo: [ '1:1:4:%d\n', '2:1:8:%g\n' ], amd.Kernels:
-// ASM: - { amd.KernelName: test_char, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-// ASM: - { amd.ArgSize: 1, amd.ArgAlign: 1, amd.ArgKind: 0, amd.ArgValueType: 1, amd.ArgTypeName: char, amd.ArgAccQual: 0 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-// ASM: - { amd.KernelName: test_ushort2, amd.Language: OpenCL C, amd.LanguageVersion: [ 2, 0 ], amd.Args:
-// ASM: - { amd.ArgSize: 4, amd.ArgAlign: 4, amd.ArgKind: 0, amd.ArgValueType: 4, amd.ArgTypeName: ushort2, amd.ArgAccQual: 0 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 7, amd.ArgValueType: 9 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 8, amd.ArgValueType: 9 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 9, amd.ArgValueType: 9 }
-// ASM: - { amd.ArgSize: 8, amd.ArgAlign: 8, amd.ArgKind: 11, amd.ArgValueType: 1, amd.ArgAddrQual: 1 } }
-// ASM: }
diff --git a/test/MC/AMDGPU/regression/bug28168.s b/test/MC/AMDGPU/regression/bug28168.s
index 86f818937efb..e836a3f96a90 100644
--- a/test/MC/AMDGPU/regression/bug28168.s
+++ b/test/MC/AMDGPU/regression/bug28168.s
@@ -1,10 +1,10 @@
// RUN: llvm-mc -arch=amdgcn -mcpu=bonaire -show-encoding %s | FileCheck %s --check-prefix=GCN --check-prefix=SICI --check-prefix=CI
// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s --check-prefix=GCN --check-prefix=CIVI --check-prefix=VI
-v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, v[254:255]
-// CI: [0x00,0x00,0xe6,0xd2,0x00,0x00,0xf8,0x07]
-// VI: [0x00,0x00,0xe6,0xd1,0x00,0x00,0xf8,0x07]
+v_mqsad_pk_u16_u8 v[0:1], s[0:1], 1, v[254:255]
+// CI: [0x00,0x00,0xe6,0xd2,0x00,0x02,0xf9,0x07]
+// VI: [0x00,0x00,0xe6,0xd1,0x00,0x02,0xf9,0x07]
-v_qsad_pk_u16_u8 v[0:1], v[0:1], s0, s[0:1]
-// CI: [0x00,0x00,0xe4,0xd2,0x00,0x01,0x00,0x00]
-// VI: [0x00,0x00,0xe5,0xd1,0x00,0x01,0x00,0x00]
+v_qsad_pk_u16_u8 v[0:1], v[0:1], 1, s[0:1]
+// CI: [0x00,0x00,0xe4,0xd2,0x00,0x03,0x01,0x00]
+// VI: [0x00,0x00,0xe5,0xd1,0x00,0x03,0x01,0x00]
diff --git a/test/MC/AMDGPU/sop1.s b/test/MC/AMDGPU/sop1.s
index f611b022fb4b..c1fe7d013e6c 100644
--- a/test/MC/AMDGPU/sop1.s
+++ b/test/MC/AMDGPU/sop1.s
@@ -232,9 +232,17 @@ s_movreld_b64 s[2:3], s[4:5]
// SICI: s_movreld_b64 s[2:3], s[4:5] ; encoding: [0x04,0x31,0x82,0xbe]
// VI: s_movreld_b64 s[2:3], s[4:5] ; encoding: [0x04,0x2d,0x82,0xbe]
-s_cbranch_join s[4:5]
-// SICI: s_cbranch_join s[4:5] ; encoding: [0x04,0x32,0x80,0xbe]
-// VI: s_cbranch_join s[4:5] ; encoding: [0x04,0x2e,0x80,0xbe]
+s_cbranch_join s4
+// SICI: s_cbranch_join s4 ; encoding: [0x04,0x32,0x80,0xbe]
+// VI: s_cbranch_join s4 ; encoding: [0x04,0x2e,0x80,0xbe]
+
+s_cbranch_join 1
+// NOSICI: error: invalid operand for instruction
+// NOVI: error: invalid operand for instruction
+
+s_cbranch_join 100
+// NOSICI: error: invalid operand for instruction
+// NOVI: error: invalid operand for instruction
s_abs_i32 s1, s2
// SICI: s_abs_i32 s1, s2 ; encoding: [0x02,0x34,0x81,0xbe]
diff --git a/test/MC/AMDGPU/sop2-err.s b/test/MC/AMDGPU/sop2-err.s
new file mode 100644
index 000000000000..5115489a7f27
--- /dev/null
+++ b/test/MC/AMDGPU/sop2-err.s
@@ -0,0 +1,7 @@
+// RUN: not llvm-mc -arch=amdgcn %s 2>&1 | FileCheck -check-prefix=GCN %s
+
+s_cbranch_g_fork 100, s[6:7]
+// GCN: error: invalid operand for instruction
+
+s_cbranch_g_fork s[6:7], 100
+// GCN: error: invalid operand for instruction
diff --git a/test/MC/AMDGPU/sop2.s b/test/MC/AMDGPU/sop2.s
index 805710d9b974..6f1d083e302b 100644
--- a/test/MC/AMDGPU/sop2.s
+++ b/test/MC/AMDGPU/sop2.s
@@ -160,6 +160,14 @@ s_cbranch_g_fork s[4:5], s[6:7]
// SICI: s_cbranch_g_fork s[4:5], s[6:7] ; encoding: [0x04,0x06,0x80,0x95]
// VI: s_cbranch_g_fork s[4:5], s[6:7] ; encoding: [0x04,0x06,0x80,0x94]
+s_cbranch_g_fork 1, s[6:7]
+// SICI: s_cbranch_g_fork 1, s[6:7] ; encoding: [0x81,0x06,0x80,0x95]
+// VI: s_cbranch_g_fork 1, s[6:7] ; encoding: [0x81,0x06,0x80,0x94]
+
+s_cbranch_g_fork s[6:7], 2
+// SICI: s_cbranch_g_fork s[6:7], 2 ; encoding: [0x06,0x82,0x80,0x95]
+// VI: s_cbranch_g_fork s[6:7], 2 ; encoding: [0x06,0x82,0x80,0x94]
+
s_absdiff_i32 s2, s4, s6
// SICI: s_absdiff_i32 s2, s4, s6 ; encoding: [0x04,0x06,0x02,0x96]
// VI: s_absdiff_i32 s2, s4, s6 ; encoding: [0x04,0x06,0x02,0x95]
diff --git a/test/MC/AMDGPU/sopp-gfx9.s b/test/MC/AMDGPU/sopp-gfx9.s
new file mode 100644
index 000000000000..237bceb287f2
--- /dev/null
+++ b/test/MC/AMDGPU/sopp-gfx9.s
@@ -0,0 +1,71 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=gfx900 -show-encoding %s | FileCheck --check-prefix=GFX9 %s
+
+//===----------------------------------------------------------------------===//
+// s_waitcnt
+//===----------------------------------------------------------------------===//
+
+s_waitcnt 0
+// GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf]
+
+s_waitcnt vmcnt(0) & expcnt(0) & lgkmcnt(0)
+// GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf]
+
+s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+// GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf]
+
+s_waitcnt vmcnt(0), expcnt(0), lgkmcnt(0)
+// GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; encoding: [0x00,0x00,0x8c,0xbf]
+
+s_waitcnt vmcnt(1)
+// GFX9: s_waitcnt vmcnt(1) ; encoding: [0x71,0x0f,0x8c,0xbf]
+
+s_waitcnt vmcnt(9)
+// GFX9: s_waitcnt vmcnt(9) ; encoding: [0x79,0x0f,0x8c,0xbf]
+
+s_waitcnt expcnt(2)
+// GFX9: s_waitcnt expcnt(2) ; encoding: [0x2f,0xcf,0x8c,0xbf]
+
+s_waitcnt lgkmcnt(3)
+// GFX9: s_waitcnt lgkmcnt(3) ; encoding: [0x7f,0xc3,0x8c,0xbf]
+
+s_waitcnt lgkmcnt(9)
+// GFX9: s_waitcnt lgkmcnt(9) ; encoding: [0x7f,0xc9,0x8c,0xbf]
+
+s_waitcnt vmcnt(0), expcnt(0)
+// GFX9: s_waitcnt vmcnt(0) expcnt(0) ; encoding: [0x00,0x0f,0x8c,0xbf]
+
+s_waitcnt vmcnt(15)
+// GFX9: s_waitcnt vmcnt(15) ; encoding: [0x7f,0x0f,0x8c,0xbf]
+
+s_waitcnt vmcnt(15) expcnt(6)
+// GFX9: s_waitcnt vmcnt(15) expcnt(6) ; encoding: [0x6f,0x0f,0x8c,0xbf]
+
+s_waitcnt vmcnt(15) lgkmcnt(14)
+// GFX9: s_waitcnt vmcnt(15) lgkmcnt(14) ; encoding: [0x7f,0x0e,0x8c,0xbf]
+
+s_waitcnt vmcnt(15) expcnt(6) lgkmcnt(14)
+// GFX9: s_waitcnt vmcnt(15) expcnt(6) lgkmcnt(14) ; encoding: [0x6f,0x0e,0x8c,0xbf]
+
+s_waitcnt vmcnt(31)
+// GFX9: s_waitcnt vmcnt(31) ; encoding: [0x7f,0x4f,0x8c,0xbf]
+
+s_waitcnt vmcnt(31) expcnt(6)
+// GFX9: s_waitcnt vmcnt(31) expcnt(6) ; encoding: [0x6f,0x4f,0x8c,0xbf]
+
+s_waitcnt vmcnt(31) lgkmcnt(14)
+// GFX9: s_waitcnt vmcnt(31) lgkmcnt(14) ; encoding: [0x7f,0x4e,0x8c,0xbf]
+
+s_waitcnt vmcnt(31) expcnt(6) lgkmcnt(14)
+// GFX9: s_waitcnt vmcnt(31) expcnt(6) lgkmcnt(14) ; encoding: [0x6f,0x4e,0x8c,0xbf]
+
+s_waitcnt vmcnt(62)
+// GFX9: s_waitcnt vmcnt(62) ; encoding: [0x7e,0xcf,0x8c,0xbf]
+
+s_waitcnt vmcnt(62) expcnt(6)
+// GFX9: s_waitcnt vmcnt(62) expcnt(6) ; encoding: [0x6e,0xcf,0x8c,0xbf]
+
+s_waitcnt vmcnt(62) lgkmcnt(14)
+// GFX9: s_waitcnt vmcnt(62) lgkmcnt(14) ; encoding: [0x7e,0xce,0x8c,0xbf]
+
+s_waitcnt vmcnt(62) expcnt(6) lgkmcnt(14)
+// GFX9: s_waitcnt vmcnt(62) expcnt(6) lgkmcnt(14) ; encoding: [0x6e,0xce,0x8c,0xbf]
diff --git a/test/MC/AMDGPU/sopp.s b/test/MC/AMDGPU/sopp.s
index b073c8dfc635..140e26a9f600 100644
--- a/test/MC/AMDGPU/sopp.s
+++ b/test/MC/AMDGPU/sopp.s
@@ -43,6 +43,18 @@ s_cbranch_execz 7
s_cbranch_execnz 8
// GCN: s_cbranch_execnz 8 ; encoding: [0x08,0x00,0x89,0xbf]
+s_cbranch_cdbgsys 9
+// GCN: s_cbranch_cdbgsys 9 ; encoding: [0x09,0x00,0x97,0xbf]
+
+s_cbranch_cdbgsys_and_user 10
+// GCN: s_cbranch_cdbgsys_and_user 10 ; encoding: [0x0a,0x00,0x9a,0xbf]
+
+s_cbranch_cdbgsys_or_user 11
+// GCN: s_cbranch_cdbgsys_or_user 11 ; encoding: [0x0b,0x00,0x99,0xbf]
+
+s_cbranch_cdbguser 12
+// GCN: s_cbranch_cdbguser 12 ; encoding: [0x0c,0x00,0x98,0xbf]
+
s_barrier
// GCN: s_barrier ; encoding: [0x00,0x00,0x8a,0xbf]
@@ -84,6 +96,9 @@ s_waitcnt vmcnt(0), expcnt(0)
s_sethalt 9
// GCN: s_sethalt 9 ; encoding: [0x09,0x00,0x8d,0xbf]
+s_setkill 7
+// GCN: s_setkill 7 ; encoding: [0x07,0x00,0x8b,0xbf]
+
s_sleep 10
// GCN: s_sleep 10 ; encoding: [0x0a,0x00,0x8e,0xbf]
@@ -188,3 +203,11 @@ s_set_gpr_idx_mode 0
s_set_gpr_idx_mode 15
// VI: s_set_gpr_idx_mode dst src0 src1 src2 ; encoding: [0x0f,0x00,0x9d,0xbf]
// NOSICI: error: instruction not supported on this GPU
+
+s_endpgm_saved
+// VI: s_endpgm_saved ; encoding: [0x00,0x00,0x9b,0xbf]
+// NOSICI: error: instruction not supported on this GPU
+
+s_wakeup
+// VI: s_wakeup ; encoding: [0x00,0x00,0x83,0xbf]
+// NOSICI: error: instruction not supported on this GPU
diff --git a/test/MC/AMDGPU/vop-err.s b/test/MC/AMDGPU/vop-err.s
new file mode 100644
index 000000000000..13388263b20e
--- /dev/null
+++ b/test/MC/AMDGPU/vop-err.s
@@ -0,0 +1,290 @@
+// RUN: not llvm-mc -arch=amdgcn -mcpu=bonaire -show-encoding %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck %s
+
+// GENERIC LIMITATIONS ON VOP FORMATS: CONSTANT BUS RESTRICTIONS
+
+//=====================================================
+// v_movreld_b32: implicitly reads m0 (VOP1/VOP3)
+
+v_movreld_b32 v0, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, flat_scratch_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, flat_scratch_hi
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, vcc_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, vcc_hi
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, exec_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, exec_hi
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, ttmp0
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, ttmp1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32 v0, 123
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32_e64 v0, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32_e64 v0, flat_scratch_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_movreld_b32_e64 v0, flat_scratch_hi
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// v_div_fmas: implicitly read VCC (VOP3)
+
+v_div_fmas_f32 v0, s1, s1, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v2, v3, -s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v1, s2, |v3|
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v1, -v2, -s3
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v1, flat_scratch_lo, v3
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v1, v2, flat_scratch_hi
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v1, v2, m0
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f32 v0, v1, ttmp2, v2
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f64 v[0:1], s[2:3], v[4:5], v[6:7]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f64 v[0:1], v[2:3], s[4:5], v[6:7]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f64 v[0:1], v[2:3], v[4:5], s[6:7]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f64 v[0:1], v[2:3], v[4:5], ttmp[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f64 v[0:1], v[2:3], v[4:5], flat_scratch
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_div_fmas_f64 v[0:1], v[2:3], v[4:5], exec
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// v_cndmask_b32: implicitly reads VCC (VOP2)
+
+v_cndmask_b32 v0, s1, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32 v0, flat_scratch_lo, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32 v0, flat_scratch_hi, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32 v0, exec_lo, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32 v0, exec_hi, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// v_cndmask_b32_e64: VOP3, no implicit reads
+
+v_cndmask_b32_e64 v0, s1, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, flat_scratch_lo, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, flat_scratch_hi, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s1, v2, flat_scratch
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s0, v2, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, v2, s0, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s0, s0, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s1, v2, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, v2, s1, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s1, s1, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s1, v2, s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, v2, s1, s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cndmask_b32_e64 v0, s1, s1, s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// v_addc_u32: implicitly reads VCC (VOP2 only!)
+
+v_addc_u32 v0, vcc, s0, v0, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32 v0, vcc, flat_scratch_lo, v0, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32 v0, vcc, flat_scratch_hi, v0, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32 v0, vcc, exec_lo, v0, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32 v0, vcc, exec_hi, v0, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// v_addc_u32_e64: no implicit read in VOP3
+
+v_addc_u32_e64 v0, s[0:1], s2, v2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], v2, s2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], s2, s2, vcc
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], s0, v2, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], v2, s0, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], s0, s0, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], s2, v2, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], v2, s2, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_addc_u32_e64 v0, s[0:1], s2, s2, s[0:1]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// VOP1 w/o implicit reads have no negative test cases on constant bus use
+// VOPC has no negative test cases on constant bus use
+
+//=====================================================
+// madak/madmk: a special case for VOP2 w/o implicit reads
+
+v_madak_f32 v0, s0, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_madak_f32 v0, flat_scratch_lo, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_madak_f32 v0, flat_scratch_hi, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_madak_f32 v0, exec_lo, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_madak_f32 v0, exec_hi, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_madak_f32 v0, vcc_lo, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_madak_f32 v0, vcc_hi, v0, 0x11213141
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// VOP3 w/o implicit reads
+
+v_mad_f32 v0, s0, s1, s0
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_mad_f32 v0, s1, s0, s0
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_mad_f32 v0, s0, s0, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_mad_f32 v0, s0, s0, flat_scratch_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// VOP2_e64:
+
+v_add_f32_e64 v0, s0, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_add_f32_e64 v0, s0, flat_scratch_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_add_f32_e64 v0, flat_scratch_hi, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_add_f32_e64 v0, flat_scratch_hi, m0
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_add_f64 v[0:1], s[0:1], s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_add_f64 v[0:1], s[0:1], flat_scratch
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_add_f64 v[0:1], vcc, s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+//=====================================================
+// VOPC_e64:
+
+v_cmp_eq_f32_e64 s[0:1], s0, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cmp_eq_f32_e64 s[0:1], s0, flat_scratch_lo
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cmp_eq_f32_e64 s[0:1], flat_scratch_hi, s1
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cmp_eq_f32_e64 s[0:1], s0, m0
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cmp_eq_f64_e64 s[0:1], s[0:1], s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cmp_eq_f64_e64 s[0:1], s[0:1], flat_scratch
+// CHECK: error: invalid operand (violates constant bus restrictions)
+
+v_cmp_eq_f64_e64 s[0:1], vcc, s[2:3]
+// CHECK: error: invalid operand (violates constant bus restrictions)
diff --git a/test/MC/AMDGPU/vop1-gfx9-err.s b/test/MC/AMDGPU/vop1-gfx9-err.s
new file mode 100644
index 000000000000..87251e6243cc
--- /dev/null
+++ b/test/MC/AMDGPU/vop1-gfx9-err.s
@@ -0,0 +1,25 @@
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s 2>&1 | FileCheck -check-prefix=GCN %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck -check-prefix=GCN %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=hawaii -show-encoding %s 2>&1 | FileCheck -check-prefix=GCN %s
+
+v_swap_b32 v1, 1
+// GCN: :16: error: invalid operand for instruction
+
+v_swap_b32 v1, s0
+// GCN: :16: error: invalid operand for instruction
+
+// FIXME: Better error for it requiring VOP1 encoding
+v_swap_b32_e64 v1, v2
+// GCN: :1: error: unrecognized instruction mnemonic
+
+v_swap_b32 v1, v2, v1
+// GCN: :20: error: invalid operand for instruction
+
+v_swap_b32 v1, v2, v2
+// GCN: :20: error: invalid operand for instruction
+
+v_swap_b32 v1, v2, v2, v2
+// GCN: :20: error: invalid operand for instruction
+
+v_swap_codegen_pseudo_b32 v1, v2
+// GCN: :1: error: unrecognized instruction mnemonic
diff --git a/test/MC/AMDGPU/vop1-gfx9.s b/test/MC/AMDGPU/vop1-gfx9.s
new file mode 100644
index 000000000000..8706190aa142
--- /dev/null
+++ b/test/MC/AMDGPU/vop1-gfx9.s
@@ -0,0 +1,13 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s | FileCheck -check-prefix=GFX9 %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tahiti -show-encoding %s 2>&1 | FileCheck -check-prefix=NOVI %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=hawaii -show-encoding %s 2>&1 | FileCheck -check-prefix=NOVI %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck -check-prefix=NOVI %s
+
+v_swap_b32 v1, v2
+// GFX9: v_swap_b32 v1, v2 ; encoding: [0x02,0xa3,0x02,0x7e]
+// NOVI: :1: error: instruction not supported on this GPU
+
+// FIXME: Error for it requiring VOP1 encoding
+v_swap_b32_e32 v1, v2
+// GFX9: v_swap_b32 v1, v2 ; encoding: [0x02,0xa3,0x02,0x7e]
+// NOVI: :1: error: instruction not supported on this GPU
diff --git a/test/MC/AMDGPU/vop1.s b/test/MC/AMDGPU/vop1.s
index e55c05e5c539..03abd6107f3d 100644
--- a/test/MC/AMDGPU/vop1.s
+++ b/test/MC/AMDGPU/vop1.s
@@ -56,7 +56,7 @@ v_cvt_u32_f32_e32 v1, v2
v_cvt_i32_f32_e32 v1, v2
// SICI: v_mov_fed_b32_e32 v1, v2 ; encoding: [0x02,0x13,0x02,0x7e]
-// NOVI: error: instruction not supported on this GPU
+// VI: v_mov_fed_b32_e32 v1, v2 ; encoding: [0x02,0x13,0x02,0x7e]
v_mov_fed_b32_e32 v1, v2
// GCN: v_cvt_f16_f32_e32 v1, v2 ; encoding: [0x02,0x15,0x02,0x7e]
diff --git a/test/MC/AMDGPU/vop2.s b/test/MC/AMDGPU/vop2.s
index 43b5c5de3eec..078b68638008 100644
--- a/test/MC/AMDGPU/vop2.s
+++ b/test/MC/AMDGPU/vop2.s
@@ -116,9 +116,17 @@ v_cndmask_b32_e32 v1, v2, v3, vcc
// VI: v_readlane_b32 s1, v2, s3 ; encoding: [0x01,0x00,0x89,0xd2,0x02,0x07,0x00,0x00]
v_readlane_b32 s1, v2, s3
-// SICI: v_writelane_b32 v1, s2, s3 ; encoding: [0x02,0x06,0x02,0x04]
-// VI: v_writelane_b32 v1, s2, s3 ; encoding: [0x01,0x00,0x8a,0xd2,0x02,0x06,0x00,0x00]
-v_writelane_b32 v1, s2, s3
+// SICI: v_writelane_b32 v1, s2, 4 ; encoding: [0x02,0x08,0x03,0x04]
+// VI: v_writelane_b32 v1, s2, 4 ; encoding: [0x01,0x00,0x8a,0xd2,0x02,0x08,0x01,0x00]
+v_writelane_b32 v1, s2, 4
+
+// SICI: v_writelane_b32 v2, 1, s4 ; encoding: [0x81,0x08,0x04,0x04]
+// VI: v_writelane_b32 v2, 1, s4 ; encoding: [0x02,0x00,0x8a,0xd2,0x81,0x08,0x00,0x00]
+v_writelane_b32 v2, 1, s4
+
+// SICI: v_writelane_b32 v255, 0xaf123456, 2 ; encoding: [0xff,0x04,0xff,0x05,0x56,0x34,0x12,0xaf]
+// NOVI: error: instruction not supported on this GPU
+v_writelane_b32 v255, 0xaf123456, 2
// SICI: v_add_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x06]
// VI: v_add_f32_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x02]
diff --git a/test/MC/AMDGPU/vop3-gfx9.s b/test/MC/AMDGPU/vop3-gfx9.s
new file mode 100644
index 000000000000..22a0cddceab4
--- /dev/null
+++ b/test/MC/AMDGPU/vop3-gfx9.s
@@ -0,0 +1,48 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s | FileCheck -check-prefix=GFX9 %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tahiti -show-encoding %s 2>&1 | FileCheck -check-prefix=NOVI %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=hawaii -show-encoding %s 2>&1 | FileCheck -check-prefix=NOVI %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck -check-prefix=NOVI %s
+
+v_lshl_add_u32 v1, v2, v3, v4
+// GFX9: v_lshl_add_u32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xfd,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_add_lshl_u32 v1, v2, v3, v4
+// GFX9: v_add_lshl_u32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xfe,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_add3_u32 v1, v2, v3, v4
+// GFX9: v_add3_u32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xff,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_lshl_or_b32 v1, v2, v3, v4
+// GFX9: v_lshl_or_b32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0x00,0xd2,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_and_or_b32 v1, v2, v3, v4
+// GFX9: v_and_or_b32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0x01,0xd2,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_or3_b32 v1, v2, v3, v4
+// GFX9: v_or3_b32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0x02,0xd2,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_pack_b32_f16 v1, v2, v3
+// GFX9: v_pack_b32_f16 v1, v2, v3 ; encoding: [0x01,0x00,0xa0,0xd2,0x02,0x07,0x02,0x00]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_xad_u32 v1, v2, v3, v4
+// GFX9: v_xad_u32 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xf3,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_med3_f16 v1, v2, v3, v4
+// GFX9: v_med3_f16 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xfa,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_med3_i16 v1, v2, v3, v4
+// GFX9: v_med3_i16 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xfb,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
+
+v_med3_u16 v1, v2, v3, v4
+// GFX9: v_med3_u16 v1, v2, v3, v4 ; encoding: [0x01,0x00,0xfc,0xd1,0x02,0x07,0x12,0x04]
+// NOVI: :1: error: instruction not supported on this GPU
diff --git a/test/MC/AMDGPU/vop3-modifiers-err.s b/test/MC/AMDGPU/vop3-modifiers-err.s
new file mode 100644
index 000000000000..bd08ee2d10aa
--- /dev/null
+++ b/test/MC/AMDGPU/vop3-modifiers-err.s
@@ -0,0 +1,15 @@
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck %s
+
+//---------------------------------------------------------------------------//
+// VOP3 Modifiers
+//---------------------------------------------------------------------------//
+
+// 'neg(1)' cannot be encoded as 32-bit literal while preserving e64 semantics
+v_ceil_f64_e32 v[0:1], neg(1)
+// CHECK: error: invalid operand for instruction
+
+v_ceil_f32 v0, --1
+// CHECK: error: invalid syntax, expected 'neg' modifier
+
+v_ceil_f16 v0, abs(neg(1))
+// CHECK: error: not a valid operand \ No newline at end of file
diff --git a/test/MC/AMDGPU/vop3-modifiers.s b/test/MC/AMDGPU/vop3-modifiers.s
new file mode 100644
index 000000000000..f18a38caac38
--- /dev/null
+++ b/test/MC/AMDGPU/vop3-modifiers.s
@@ -0,0 +1,388 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s
+
+//---------------------------------------------------------------------------//
+// VOP1/VOP3 F16
+//---------------------------------------------------------------------------//
+
+v_ceil_f16 v0, -1
+// CHECK: [0xc1,0x8a,0x00,0x7e]
+
+v_ceil_f16 v0, -2
+// CHECK: [0xc2,0x8a,0x00,0x7e]
+
+v_ceil_f16 v0, -16
+// CHECK: [0xd0,0x8a,0x00,0x7e]
+
+v_ceil_f16 v0, -0.5
+// CHECK: [0xf1,0x8a,0x00,0x7e]
+
+v_ceil_f16 v0, -1.0
+// CHECK: [0xf3,0x8a,0x00,0x7e]
+
+v_ceil_f16 v0, -2.0
+// CHECK: [0xf5,0x8a,0x00,0x7e]
+
+v_ceil_f16 v0, -4.0
+// CHECK: [0xf7,0x8a,0x00,0x7e]
+
+// Arbitrary f16 literal in hex
+v_ceil_f16 v0, 0xabcd
+// CHECK: [0xff,0x8a,0x00,0x7e,0xcd,0xab,0x00,0x00]
+
+// '-' is a part of hex literal (not a 'neg' modifier)
+v_ceil_f16 v0, -0x5433
+// CHECK: [0xff,0x8a,0x00,0x7e,0xcd,0xab,0x00,0x00]
+
+v_ceil_f16 v0, abs(0xabcd)
+// CHECK: [0xff,0x8a,0x00,0x7e,0xcd,0x2b,0x00,0x00]
+
+v_ceil_f16 v0, neg(0xabcd)
+// CHECK: [0xff,0x8a,0x00,0x7e,0xcd,0x2b,0x00,0x00]
+
+v_ceil_f16 v0, neg(abs(0xabcd))
+// CHECK: [0xff,0x8a,0x00,0x7e,0xcd,0xab,0x00,0x00]
+
+v_ceil_f16 v0, -abs(0xabcd)
+// CHECK: [0xff,0x8a,0x00,0x7e,0xcd,0xab,0x00,0x00]
+
+// 1/(2*pi) encoded as inline constant in VOP1
+v_ceil_f16 v0, 0x3118
+// CHECK: [0xf8,0x8a,0x00,0x7e]
+
+// 1/(2*pi) encoded as inline constant in VOP3
+v_ceil_f16_e64 v0, 0x3118
+// CHECK: [0x00,0x00,0x85,0xd1,0xf8,0x00,0x00,0x00]
+
+// neg(-1/(2*pi)) = 1/(2*pi)
+v_ceil_f16 v0, neg(0xb118)
+// CHECK: [0xf8,0x8a,0x00,0x7e]
+
+// -1/(2*pi) cannot be encoded as inline constant in VOP1
+v_ceil_f16 v0, 0xb118
+// CHECK: [0xff,0x8a,0x00,0x7e,0x18,0xb1,0x00,0x00]
+
+// -1/(2*pi) cannot be encoded as inline constant in VOP1
+v_ceil_f16 v0, neg(0x3118)
+// CHECK: [0xff,0x8a,0x00,0x7e,0x18,0xb1,0x00,0x00]
+
+// -1/(2*pi) can be encoded as inline constant w/ modifiers in VOP3
+v_ceil_f16_e64 v0, neg(0x3118)
+// CHECK: [0x00,0x00,0x85,0xd1,0xf8,0x00,0x00,0x20]
+
+v_ceil_f16_e64 v0, abs(0x3118)
+// CHECK: 0x00,0x01,0x85,0xd1,0xf8,0x00,0x00,0x00]
+
+v_ceil_f16_e64 v0, neg(abs(0x3118))
+// CHECK: [0x00,0x01,0x85,0xd1,0xf8,0x00,0x00,0x20]
+
+v_ceil_f16_e64 v0, neg(|v1|)
+// CHECK: [0x00,0x01,0x85,0xd1,0x01,0x01,0x00,0x20]
+
+v_ceil_f16_e64 v0, -|v1|
+// CHECK: [0x00,0x01,0x85,0xd1,0x01,0x01,0x00,0x20]
+
+//---------------------------------------------------------------------------//
+// VOP1/VOP3 F64
+//---------------------------------------------------------------------------//
+
+// Encoded as inline constant 1 with 'neg' modifier
+v_ceil_f64 v[0:1], neg(1)
+// CHECK: [0x00,0x00,0x58,0xd1,0x81,0x00,0x00,0x20]
+
+// Encoded as inline constant -1 with 'neg' modifier
+v_ceil_f64 v[0:1], neg(-1)
+// CHECK: [0x00,0x00,0x58,0xd1,0xc1,0x00,0x00,0x20]
+
+v_ceil_f64_e32 v[0:1], 1.0
+// CHECK: [0xf2,0x30,0x00,0x7e]
+
+// abs(1.0) = 1.0
+v_ceil_f64_e32 v[0:1], abs(1.0)
+// CHECK: [0xf2,0x30,0x00,0x7e]
+
+// neg(1.0) = -1.0
+v_ceil_f64_e32 v[0:1], neg(1.0)
+// CHECK: [0xf3,0x30,0x00,0x7e]
+
+// 1/(2*pi) encoded as inline constant in VOP1
+v_ceil_f64 v[0:1], 0x3fc45f306dc9c882
+// CHECK: [0xf8,0x30,0x00,0x7e]
+
+// 1/(2*pi) encoded as inline constant in VOP3
+v_ceil_f64_e64 v[0:1], 0x3fc45f306dc9c882
+// CHECK: [0x00,0x00,0x58,0xd1,0xf8,0x00,0x00,0x00]
+
+// -1/(2*pi) cannot be encoded as inline constant in VOP1.
+// It cannot be encoded as literal either due to int literal rules.
+// So it is encoded as VOP3
+v_ceil_f64 v[0:1], abs(0x3fc45f306dc9c882)
+// CHECK: [0x00,0x01,0x58,0xd1,0xf8,0x00,0x00,0x00]
+
+v_ceil_f64 v[0:1], neg(abs(0x3fc45f306dc9c882))
+// CHECK: [0x00,0x01,0x58,0xd1,0xf8,0x00,0x00,0x20]
+
+
+//---------------------------------------------------------------------------//
+// VOP2/VOP3 F32
+//---------------------------------------------------------------------------//
+
+v_add_f32 v5, -1, v2
+// CHECK: [0xc1,0x04,0x0a,0x02]
+
+v_add_f32 v5, -16, v2
+// CHECK: [0xd0,0x04,0x0a,0x02]
+
+v_add_f32 v5, 0x3e22f983, v2
+// CHECK: [0xf8,0x04,0x0a,0x02]
+
+// abs(1/(2*pi)) = 1/(2*pi)
+v_add_f32 v5, abs(0x3e22f983), v2
+// CHECK: [0xf8,0x04,0x0a,0x02]
+
+// neg(-1/(2*pi)) = 1/(2*pi)
+v_add_f32 v5, neg(0xbe22f983), v2
+// CHECK: [0xf8,0x04,0x0a,0x02]
+
+// -1/(2*pi) cannot be encoded as inline constant in VOP1
+v_add_f32 v5, neg(0x3e22f983), v2
+// CHECK: [0xff,0x04,0x0a,0x02,0x83,0xf9,0x22,0xbe]
+
+
+v_add_f32_e64 v0, -2, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xc2,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, -16, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xd0,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, -0.5, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xf1,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, -1.0, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xf3,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, -2.0, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xf5,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, -4.0, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xf7,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, 0x3e22f983, s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xf8,0x00,0x00,0x00]
+
+v_add_f32_e64 v0, neg(0x3e22f983), s0
+// CHECK: [0x00,0x00,0x01,0xd1,0xf8,0x00,0x00,0x20]
+
+//---------------------------------------------------------------------------//
+// VOPC/VOP3
+//---------------------------------------------------------------------------//
+
+v_cmp_eq_f16 vcc, -1, v0
+// CHECK: [0xc1,0x00,0x44,0x7c]
+
+v_cmp_eq_f16_e64 s[0:1], s0, -1
+// CHECK: [0x00,0x00,0x22,0xd0,0x00,0x82,0x01,0x00]
+
+v_cmp_eq_f16_e64 s[0:1], s0, 0x3118
+// CHECK: [0x00,0x00,0x22,0xd0,0x00,0xf0,0x01,0x00]
+
+v_cmp_eq_f16_e64 s[0:1], s0, neg(0x3118)
+// CHECK: [0x00,0x00,0x22,0xd0,0x00,0xf0,0x01,0x40]
+
+v_cmp_eq_f32 vcc, -4.0, v0
+// CHECK: [0xf7,0x00,0x84,0x7c]
+
+// 1/(2*pi) can be encoded as inline constant
+v_cmp_eq_f32 vcc, 0x3e22f983, v0
+// CHECK: [0xf8,0x00,0x84,0x7c]
+
+// -1/(2*pi) cannot be encoded as inline constant in VOPC
+v_cmp_eq_f32 vcc, neg(0x3e22f983), v0
+// CHECK: [0xff,0x00,0x84,0x7c,0x83,0xf9,0x22,0xbe]
+
+// abs(1/(2*pi)) = 1/(2*pi)
+v_cmp_eq_f32 vcc, abs(0x3e22f983), v0
+// CHECK: [0xf8,0x00,0x84,0x7c]
+
+// -1/(2*pi) can be encoded as inline constant w/ modifiers in VOP3
+v_cmp_eq_f32_e64 vcc, neg(0x3e22f983), v0
+// CHECK: [0x6a,0x00,0x42,0xd0,0xf8,0x00,0x02,0x20]
+
+v_cmp_eq_f32_e64 vcc, v0, abs(0x3e22f983)
+// CHECK: [0x6a,0x02,0x42,0xd0,0x00,0xf1,0x01,0x00]
+
+v_cmp_eq_f32_e64 vcc, v0, -abs(0x3e22f983)
+// CHECK: [0x6a,0x02,0x42,0xd0,0x00,0xf1,0x01,0x40]
+
+//---------------------------------------------------------------------------//
+// VOP3
+//---------------------------------------------------------------------------//
+
+v_add_f64 v[0:1], s[0:1], -1
+// CHECK: [0x00,0x00,0x80,0xd2,0x00,0x82,0x01,0x00]
+
+v_add_f64 v[0:1], s[0:1], -16
+// CHECK: [0x00,0x00,0x80,0xd2,0x00,0xa0,0x01,0x00]
+
+v_add_f64 v[0:1], s[0:1], -0.5
+// CHECK: [0x00,0x00,0x80,0xd2,0x00,0xe2,0x01,0x00]
+
+v_add_f64 v[0:1], s[0:1], -1.0
+// CHECK: [0x00,0x00,0x80,0xd2,0x00,0xe6,0x01,0x00]
+
+v_add_f64 v[0:1], s[0:1], -2.0
+// CHECK: [0x00,0x00,0x80,0xd2,0x00,0xea,0x01,0x00]
+
+v_add_f64 v[0:1], s[0:1], -4.0
+// CHECK: [0x00,0x00,0x80,0xd2,0x00,0xee,0x01,0x00]
+
+v_add_f64 v[4:5], s[0:1], 0x3fc45f306dc9c882
+// CHECK: [0x04,0x00,0x80,0xd2,0x00,0xf0,0x01,0x00]
+
+v_add_f64 v[4:5], s[0:1], neg(0x3fc45f306dc9c882)
+// CHECK: [0x04,0x00,0x80,0xd2,0x00,0xf0,0x01,0x40]
+
+
+v_cubeid_f32 v0, s0, s0, -1
+// CHECK: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x04,0x03]
+
+v_cubeid_f32 v0, s0, s0, -4.0
+// CHECK: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xdc,0x03]
+
+v_cubeid_f32 v0, s0, s0, 0x3e22f983
+// CHECK: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xe0,0x03]
+
+v_cubeid_f32 v0, s0, s0, neg(0x3e22f983)
+// CHECK: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xe0,0x83]
+
+v_cubeid_f32 v0, s0, s0, abs(0x3e22f983)
+// CHECK: [0x00,0x04,0xc4,0xd1,0x00,0x00,0xe0,0x03]
+
+
+//---------------------------------------------------------------------------//
+// VOP3 Instructions without Input Modifiers but with Output Modifiers
+//---------------------------------------------------------------------------//
+
+v_cvt_f64_i32_e64 v[5:6], s1 clamp
+// CHECK: [0x05,0x80,0x44,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_i32_e64 v[5:6], s1 mul:2
+// CHECK: [0x05,0x00,0x44,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f64_i32_e64 v[5:6], s1 mul:4
+// CHECK: [0x05,0x00,0x44,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f64_i32_e64 v[5:6], s1 div:2
+// CHECK: [0x05,0x00,0x44,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f64_u32_e64 v[5:6], s1 clamp
+// CHECK: [0x05,0x80,0x56,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f64_u32_e64 v[5:6], s1 mul:2
+// CHECK: [0x05,0x00,0x56,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f64_u32_e64 v[5:6], s1 mul:4
+// CHECK: [0x05,0x00,0x56,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f64_u32_e64 v[5:6], s1 div:2
+// CHECK: [0x05,0x00,0x56,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f32_i32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x45,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_i32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x45,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_i32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x45,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_i32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x45,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f32_u32_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x46,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_u32_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x46,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_u32_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x46,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_u32_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x46,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_off_f32_i4_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x4e,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_off_f32_i4_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x4e,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_off_f32_i4_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x4e,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_off_f32_i4_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x4e,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f32_ubyte0_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x51,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte0_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x51,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_ubyte0_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x51,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_ubyte0_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x51,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f32_ubyte1_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x52,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte1_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x52,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_ubyte1_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x52,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_ubyte1_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x52,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f32_ubyte2_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x53,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte2_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x53,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_ubyte2_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x53,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_ubyte2_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x53,0xd1,0x01,0x00,0x00,0x18]
+
+
+v_cvt_f32_ubyte3_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x54,0xd1,0x01,0x00,0x00,0x00]
+
+v_cvt_f32_ubyte3_e64 v5, s1 mul:2
+// CHECK: [0x05,0x00,0x54,0xd1,0x01,0x00,0x00,0x08]
+
+v_cvt_f32_ubyte3_e64 v5, s1 mul:4
+// CHECK: [0x05,0x00,0x54,0xd1,0x01,0x00,0x00,0x10]
+
+v_cvt_f32_ubyte3_e64 v5, s1 div:2
+// CHECK: [0x05,0x00,0x54,0xd1,0x01,0x00,0x00,0x18]
+
+
+// NB: output modifiers are not supported for f16
+v_cvt_f16_i16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x7a,0xd1,0x01,0x00,0x00,0x00]
+
+// NB: output modifiers are not supported for f16
+v_cvt_f16_u16_e64 v5, s1 clamp
+// CHECK: [0x05,0x80,0x79,0xd1,0x01,0x00,0x00,0x00]
diff --git a/test/MC/AMDGPU/vop3.s b/test/MC/AMDGPU/vop3.s
index 21fbc644bb5c..98cc9cc35bc7 100644
--- a/test/MC/AMDGPU/vop3.s
+++ b/test/MC/AMDGPU/vop3.s
@@ -1,9 +1,10 @@
// RUN: not llvm-mc -arch=amdgcn -show-encoding %s | FileCheck %s --check-prefix=SICI
-// RUN: llvm-mc -arch=amdgcn -mcpu=hawaii -show-encoding %s | FileCheck %s --check-prefix=CI
+// RUN: not llvm-mc -arch=amdgcn -mcpu=hawaii -show-encoding %s | FileCheck %s --check-prefix=CI
// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s --check-prefix=VI
-// RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSI
-// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck %s -check-prefix=NOVI
+// RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSI --check-prefix=NOSICI
+// RUN: not llvm-mc -arch=amdgcn -mcpu=hawaii -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSICI
+// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOVI
//===----------------------------------------------------------------------===//
@@ -258,6 +259,18 @@ v_mac_f32_e64 v0, -v1, |v2|
// SICI: v_mac_f32_e64 v0, -v1, |v2| ; encoding: [0x00,0x02,0x3e,0xd2,0x01,0x05,0x02,0x20]
// VI: v_mac_f32_e64 v0, -v1, |v2| ; encoding: [0x00,0x02,0x16,0xd1,0x01,0x05,0x02,0x20]
+v_mac_f16_e64 v0, 0.5, flat_scratch_lo
+// NOSICI: error:
+// VI: v_mac_f16_e64 v0, 0.5, flat_scratch_lo ; encoding: [0x00,0x00,0x23,0xd1,0xf0,0xcc,0x00,0x00]
+
+v_mac_f16_e64 v0, -4.0, flat_scratch_lo
+// NOSICI: error:
+// VI: v_mac_f16_e64 v0, -4.0, flat_scratch_lo ; encoding: [0x00,0x00,0x23,0xd1,0xf7,0xcc,0x00,0x00]
+
+v_mac_f16_e64 v0, flat_scratch_lo, -4.0
+// NOSICI: error:
+// VI: v_mac_f16_e64 v0, flat_scratch_lo, -4.0 ; encoding: [0x00,0x00,0x23,0xd1,0x66,0xee,0x01,0x00]
+
///===---------------------------------------------------------------------===//
// VOP3 Instructions
///===---------------------------------------------------------------------===//
@@ -373,6 +386,52 @@ v_mad_f32 v9, 0.5, v5, -v8
// VI: v_mad_f32 v9, 0.5, v5, -v8 ; encoding: [0x09,0x00,0xc1,0xd1,0xf0,0x0a,0x22,0x84]
v_mqsad_u32_u8 v[0:3], s[2:3], v4, v[0:3]
-// CI: v_mqsad_u32_u8 v[0:3], s[2:3], v4, v[0:3] ; encoding: [0x00,0x00,0xe8,0xd2,0x02,0x08,0x02,0x04]
+// CI: v_mqsad_u32_u8 v[0:3], s[2:3], v4, v[0:3] ; encoding: [0x00,0x00,0xea,0xd2,0x02,0x08,0x02,0x04]
// VI: v_mqsad_u32_u8 v[0:3], s[2:3], v4, v[0:3] ; encoding: [0x00,0x00,0xe7,0xd1,0x02,0x08,0x02,0x04]
// NOSI: error: instruction not supported on this GPU
+
+v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0
+// CI: v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0 ; encoding: [0x05,0x0c,0xec,0xd2,0x01,0x00,0x01,0x02]
+// VI: v_mad_u64_u32 v[5:6], s[12:13], s1, 0, 0 ; encoding: [0x05,0x0c,0xe8,0xd1,0x01,0x00,0x01,0x02]
+// NOSI: error: instruction not supported on this GPU
+
+v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[254:255]
+// CI: v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[254:255] ; encoding: [0x05,0x0c,0xee,0xd2,0x01,0x00,0xf9,0x07]
+// VI: v_mad_i64_i32 v[5:6], s[12:13], s1, 0, v[254:255] ; encoding: [0x05,0x0c,0xe9,0xd1,0x01,0x00,0xf9,0x07]
+// NOSI: error: instruction not supported on this GPU
+
+v_cmp_class_f16_e64 s[10:11], v1, s2
+// NOSICI: error: instruction not supported on this GPU
+// VI: v_cmp_class_f16_e64 s[10:11], v1, s2 ; encoding: [0x0a,0x00,0x14,0xd0,0x01,0x05,0x00,0x00]
+
+v_cmp_class_f32_e64 s[10:11], -v1, s2
+// SICI: v_cmp_class_f32_e64 s[10:11], -v1, s2 ; encoding: [0x0a,0x00,0x10,0xd1,0x01,0x05,0x00,0x20]
+// VI: v_cmp_class_f32_e64 s[10:11], -v1, s2 ; encoding: [0x0a,0x00,0x10,0xd0,0x01,0x05,0x00,0x20]
+
+v_cmp_class_f64_e64 s[10:11], -v[254:255], s2
+// SICI: v_cmp_class_f64_e64 s[10:11], -v[254:255], s2 ; encoding: [0x0a,0x00,0x50,0xd1,0xfe,0x05,0x00,0x20]
+// VI: v_cmp_class_f64_e64 s[10:11], -v[254:255], s2 ; encoding: [0x0a,0x00,0x12,0xd0,0xfe,0x05,0x00,0x20]
+
+v_cmpx_class_f16_e64 s[10:11], v255, s2
+// NOSICI: error: instruction not supported on this GPU
+// VI: v_cmpx_class_f16_e64 s[10:11], v255, s2 ; encoding: [0x0a,0x00,0x15,0xd0,0xff,0x05,0x00,0x00]
+
+v_cmpx_class_f32_e64 s[10:11], 0, s101
+// SICI: v_cmpx_class_f32_e64 s[10:11], 0, s101 ; encoding: [0x0a,0x00,0x30,0xd1,0x80,0xca,0x00,0x00]
+// VI: v_cmpx_class_f32_e64 s[10:11], 0, s101 ; encoding: [0x0a,0x00,0x11,0xd0,0x80,0xca,0x00,0x00]
+
+v_cmpx_class_f64_e64 s[10:11], -v[1:2], s2
+// SICI: v_cmpx_class_f64_e64 s[10:11], -v[1:2], s2 ; encoding: [0x0a,0x00,0x70,0xd1,0x01,0x05,0x00,0x20]
+// VI: v_cmpx_class_f64_e64 s[10:11], -v[1:2], s2 ; encoding: [0x0a,0x00,0x13,0xd0,0x01,0x05,0x00,0x20]
+
+//
+// Modifier tests:
+//
+
+v_mul_f64 v[0:1], |0|, |0|
+// SICI: v_mul_f64 v[0:1], |0|, |0| ; encoding: [0x00,0x03,0xca,0xd2,0x80,0x00,0x01,0x00]
+// VI: v_mul_f64 v[0:1], |0|, |0| ; encoding: [0x00,0x03,0x81,0xd2,0x80,0x00,0x01,0x00]
+
+v_cubeid_f32 v0, |-1|, |-1.0|, |1.0|
+// SICI: v_cubeid_f32 v0, |-1|, |-1.0|, |1.0| ; encoding: [0x00,0x07,0x88,0xd2,0xc1,0xe6,0xc9,0x03]
+// VI: v_cubeid_f32 v0, |-1|, |-1.0|, |1.0| ; encoding: [0x00,0x07,0xc4,0xd1,0xc1,0xe6,0xc9,0x03]
diff --git a/test/MC/AMDGPU/vop3p-err.s b/test/MC/AMDGPU/vop3p-err.s
new file mode 100644
index 000000000000..f4b1a3da714f
--- /dev/null
+++ b/test/MC/AMDGPU/vop3p-err.s
@@ -0,0 +1,120 @@
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX9 %s
+
+// GFX9: 31: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel
+
+// GFX9: 32: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:
+
+// GFX9: 33: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[
+
+// GFX9: 33: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[]
+
+// GFX9: 34: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[,]
+
+// XXGFX9: 34: error: failed parsing operand.
+// v_pk_add_u16 v1, v2, v3 op_sel:[0]
+
+// GFX9: 35: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[0,]
+
+// XXGFX9: 36: error: failed parsing operand.
+// v_pk_add_u16 v1, v2, v3 op_sel:[,0]
+
+// GFX9: 36: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[0,2]
+
+// GFX9: 35: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[2,0]
+
+// GFX9: 33: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[-1,0]
+
+// GFX9: 35: error: failed parsing operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[0,-1]
+
+// GFX9: 40: error: not a valid operand.
+v_pk_add_u16 v1, v2, v3 op_sel:[0,0,0,0]
+
+// XXGFX9: invalid operand for instruction
+v_pk_add_u16 v1, v2, v3 neg_lo:[0,0]
+
+//
+// Regular modifiers on packed instructions
+//
+
+// FIXME: should be invalid operand for instruction
+// GFX9: :18: error: not a valid operand.
+v_pk_add_f16 v1, |v2|, v3
+
+// GFX9: :21: error: not a valid operand.
+v_pk_add_f16 v1, abs(v2), v3
+
+// GFX9: :22: error: not a valid operand.
+v_pk_add_f16 v1, v2, |v3|
+
+// GFX9: :25: error: not a valid operand.
+v_pk_add_f16 v1, v2, abs(v3)
+
+// GFX9: :19: error: invalid operand for instruction
+v_pk_add_f16 v1, -v2, v3
+
+// GFX9: :23: error: invalid operand for instruction
+v_pk_add_f16 v1, v2, -v3
+
+// GFX9: :21: error: not a valid operand.
+v_pk_add_u16 v1, abs(v2), v3
+
+// GFX9: :19: error: invalid operand for instruction
+v_pk_add_u16 v1, -v2, v3
+
+
+//
+// Packed operands on the non-packed VOP3P instructions
+//
+
+// GFX9: invalid operand for instruction
+v_mad_mix_f32 v1, v2, v3, v4 op_sel:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mix_f32 v1, v2, v3, v4 op_sel_hi:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mix_f32 v1, v2, v3, v4 neg_lo:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mix_f32 v1, v2, v3, v4 neg_hi:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixlo_f16 v1, v2, v3, v4 op_sel:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixlo_f16 v1, v2, v3, v4 op_sel_hi:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixlo_f16 v1, v2, v3, v4 neg_lo:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixlo_f16 v1, v2, v3, v4 neg_hi:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixhi_f16 v1, v2, v3, v4 op_sel:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixhi_f16 v1, v2, v3, v4 op_sel_hi:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixhi_f16 v1, v2, v3, v4 neg_lo:[0,0,0]
+
+// GFX9: invalid operand for instruction
+v_mad_mixhi_f16 v1, v2, v3, v4 neg_hi:[0,0,0]
+
+//
+// Constant bus restrictions
+//
+
+// GFX9: invalid operand (violates constant bus restrictions)
+v_pk_add_f16 v255, s1, s2
diff --git a/test/MC/AMDGPU/vop3p.s b/test/MC/AMDGPU/vop3p.s
new file mode 100644
index 000000000000..c9eda69e13d2
--- /dev/null
+++ b/test/MC/AMDGPU/vop3p.s
@@ -0,0 +1,216 @@
+// RUN: llvm-mc -arch=amdgcn -mcpu=gfx901 -show-encoding %s | FileCheck -check-prefix=GFX9 %s
+
+//
+// Test op_sel/op_sel_hi
+//
+
+v_pk_add_u16 v1, v2, v3
+// GFX9: v_pk_add_u16 v1, v2, v3 ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[0,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel_hi:[1,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[0,0] op_sel_hi:[1,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel_hi:[0,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel_hi:[0,0] ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x00]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[0,0] op_sel_hi:[0,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel_hi:[0,0] ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x00]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[1,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[1,0] ; encoding: [0x01,0x08,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[0,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[0,1] ; encoding: [0x01,0x10,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[1,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[1,1] ; encoding: [0x01,0x18,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel_hi:[0,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel_hi:[0,1] ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x10]
+
+v_pk_add_u16 v1, v2, v3 op_sel_hi:[1,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel_hi:[1,0] ; encoding: [0x01,0x00,0x8a,0xd3,0x02,0x07,0x02,0x08]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[1,1] op_sel_hi:[1,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[1,1] ; encoding: [0x01,0x18,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[1,0] op_sel_hi:[1,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[1,0] op_sel_hi:[1,0] ; encoding: [0x01,0x08,0x8a,0xd3,0x02,0x07,0x02,0x08]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[0,1] op_sel_hi:[0,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[0,1] op_sel_hi:[0,1] ; encoding: [0x01,0x10,0x8a,0xd3,0x02,0x07,0x02,0x10]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[1,0] op_sel_hi:[0,1]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[1,0] op_sel_hi:[0,1] ; encoding: [0x01,0x08,0x8a,0xd3,0x02,0x07,0x02,0x10]
+
+v_pk_add_u16 v1, v2, v3 op_sel:[0,1] op_sel_hi:[1,0]
+// GFX9: v_pk_add_u16 v1, v2, v3 op_sel:[0,1] op_sel_hi:[1,0] ; encoding: [0x01,0x10,0x8a,0xd3,0x02,0x07,0x02,0x08]
+
+//
+// Test src2 op_sel/op_sel_hi
+//
+
+v_pk_fma_f16 v8, v0, s0, v1
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[0,0,0] neg_hi:[0,0,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 op_sel:[0,0,0] op_sel_hi:[1,1,1] neg_lo:[0,0,0] neg_hi:[0,0,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 op_sel:[0,0,0] op_sel_hi:[1,1,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 op_sel:[0,0,0] op_sel_hi:[0,0,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 op_sel_hi:[0,0,0] ; encoding: [0x08,0x00,0x8e,0xd3,0x00,0x01,0x04,0x04]
+
+v_pk_fma_f16 v8, v0, s0, v1 op_sel:[0,0,1] op_sel_hi:[0,0,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 op_sel:[0,0,1] op_sel_hi:[0,0,1] ; encoding: [0x08,0x60,0x8e,0xd3,0x00,0x01,0x04,0x04]
+
+//
+// Test neg_lo/neg_hi
+//
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[1,1,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[1,1,1] ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0xfc]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[1,1,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[1,1,1] ; encoding: [0x08,0x47,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[1,1,1] neg_hi:[1,1,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[1,1,1] neg_hi:[1,1,1] ; encoding: [0x08,0x47,0x8e,0xd3,0x00,0x01,0x04,0xfc]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[1,0,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[1,0,0] ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x3c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[0,1,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[0,1,0] ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x5c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[0,0,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_lo:[0,0,1] ; encoding: [0x08,0x40,0x8e,0xd3,0x00,0x01,0x04,0x9c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[1,0,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[1,0,0] ; encoding: [0x08,0x41,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[0,1,0]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[0,1,0] ; encoding: [0x08,0x42,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[0,0,1]
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 neg_hi:[0,0,1] ; encoding: [0x08,0x44,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+
+// Test clamp
+v_pk_fma_f16 v8, v0, s0, v1 clamp
+// GFX9: v_pk_fma_f16 v8, v0, s0, v1 clamp ; encoding: [0x08,0xc0,0x8e,0xd3,0x00,0x01,0x04,0x1c]
+
+v_pk_add_u16 v1, v2, v3 clamp
+// GFX9: v_pk_add_u16 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x8a,0xd3,0x02,0x07,0x02,0x18]
+
+v_pk_min_i16 v0, v1, v2 clamp
+// GFX9: v_pk_min_i16 v0, v1, v2 clamp ; encoding: [0x00,0x80,0x88,0xd3,0x01,0x05,0x02,0x18]
+
+//
+// Instruction tests:
+//
+
+v_pk_mul_lo_u16 v0, v1, v2
+// GFX9: v_pk_mul_lo_u16 v0, v1, v2 ; encoding: [0x00,0x00,0x81,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_add_i16 v0, v1, v2
+// GFX9: v_pk_add_i16 v0, v1, v2 ; encoding: [0x00,0x00,0x82,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_sub_i16 v0, v1, v2
+// GFX9: v_pk_sub_i16 v0, v1, v2 ; encoding: [0x00,0x00,0x83,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_lshlrev_b16 v0, v1, v2
+// GFX9: v_pk_lshlrev_b16 v0, v1, v2 ; encoding: [0x00,0x00,0x84,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_lshrrev_b16 v0, v1, v2
+// GFX9: v_pk_lshrrev_b16 v0, v1, v2 ; encoding: [0x00,0x00,0x85,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_ashrrev_i16 v0, v1, v2
+// GFX9: v_pk_ashrrev_i16 v0, v1, v2 ; encoding: [0x00,0x00,0x86,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_max_i16 v0, v1, v2
+// GFX9: v_pk_max_i16 v0, v1, v2 ; encoding: [0x00,0x00,0x87,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_min_i16 v0, v1, v2
+// GFX9: v_pk_min_i16 v0, v1, v2 ; encoding: [0x00,0x00,0x88,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_add_u16 v0, v1, v2
+// GFX9: v_pk_add_u16 v0, v1, v2 ; encoding: [0x00,0x00,0x8a,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_max_u16 v0, v1, v2
+// GFX9: v_pk_max_u16 v0, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_min_u16 v0, v1, v2
+// GFX9: v_pk_min_u16 v0, v1, v2 ; encoding: [0x00,0x00,0x8d,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_fma_f16 v0, v1, v2, v3
+// GFX9: v_pk_fma_f16 v0, v1, v2, v3 ; encoding: [0x00,0x40,0x8e,0xd3,0x01,0x05,0x0e,0x1c]
+
+v_pk_add_f16 v0, v1, v2
+// GFX9: v_pk_add_f16 v0, v1, v2 ; encoding: [0x00,0x00,0x8f,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_mul_f16 v0, v1, v2
+// GFX9: v_pk_mul_f16 v0, v1, v2 ; encoding: [0x00,0x00,0x90,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_min_f16 v0, v1, v2
+// GFX9: v_pk_min_f16 v0, v1, v2 ; encoding: [0x00,0x00,0x91,0xd3,0x01,0x05,0x02,0x18]
+
+v_pk_max_f16 v0, v1, v2
+// GFX9: v_pk_max_f16 v0, v1, v2 ; encoding: [0x00,0x00,0x92,0xd3,0x01,0x05,0x02,0x18]
+
+v_mad_mix_f32 v0, v1, v2, v3
+// GFX9: v_mad_mix_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mixlo_f16 v0, v1, v2, v3
+// GFX9: v_mad_mixlo_f16 v0, v1, v2, v3 ; encoding: [0x00,0x00,0xa1,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mixhi_f16 v0, v1, v2, v3
+// GFX9: v_mad_mixhi_f16 v0, v1, v2, v3 ; encoding: [0x00,0x00,0xa2,0xd3,0x01,0x05,0x0e,0x04]
+
+
+//
+// Regular source modifiers on non-packed instructions
+//
+
+v_mad_mix_f32 v0, abs(v1), v2, v3
+// GFX9: v_mad_mix_f32 v0, |v1|, v2, v3 ; encoding: [0x00,0x01,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v0, v1, abs(v2), v3
+// GFX9: v_mad_mix_f32 v0, v1, |v2|, v3 ; encoding: [0x00,0x02,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v0, v1, v2, abs(v3)
+// GFX9: v_mad_mix_f32 v0, v1, v2, |v3| ; encoding: [0x00,0x04,0xa0,0xd3,0x01,0x05,0x0e,0x04]
+
+v_mad_mix_f32 v0, -v1, v2, v3
+// GFX9: v_mad_mix_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x24]
+
+v_mad_mix_f32 v0, v1, -v2, v3
+// GFX9: v_mad_mix_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x44]
+
+v_mad_mix_f32 v0, v1, v2, -v3
+// GFX9: v_mad_mix_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0xa0,0xd3,0x01,0x05,0x0e,0x84]
+
+v_mad_mix_f32 v0, -abs(v1), v2, v3
+// GFX9: v_mad_mix_f32 v0, -|v1|, v2, v3 ; encoding: [0x00,0x01,0xa0,0xd3,0x01,0x05,0x0e,0x24]
+
+v_mad_mix_f32 v0, v1, -abs(v2), v3
+// GFX9: v_mad_mix_f32 v0, v1, -|v2|, v3 ; encoding: [0x00,0x02,0xa0,0xd3,0x01,0x05,0x0e,0x44]
+
+v_mad_mix_f32 v0, v1, v2, -abs(v3)
+// GFX9: v_mad_mix_f32 v0, v1, v2, -|v3| ; encoding: [0x00,0x04,0xa0,0xd3,0x01,0x05,0x0e,0x84]
+
+v_mad_mixlo_f16 v0, abs(v1), -v2, abs(v3)
+// GFX9: v_mad_mixlo_f16 v0, |v1|, -v2, |v3| ; encoding: [0x00,0x05,0xa1,0xd3,0x01,0x05,0x0e,0x44]
+
+v_mad_mixhi_f16 v0, -v1, abs(v2), -abs(v3)
+// GFX9: v_mad_mixhi_f16 v0, -v1, |v2|, -|v3| ; encoding: [0x00,0x06,0xa2,0xd3,0x01,0x05,0x0e,0xa4]
diff --git a/test/MC/ARM/Inputs/1.s b/test/MC/ARM/Inputs/1.s
new file mode 100644
index 000000000000..0afcc633f641
--- /dev/null
+++ b/test/MC/ARM/Inputs/1.s
@@ -0,0 +1,3 @@
+ .section .foobar,"ax",%progbits
+ nop
+ .word 32
diff --git a/test/MC/ARM/Inputs/2.s b/test/MC/ARM/Inputs/2.s
new file mode 100644
index 000000000000..0ecdb294ab86
--- /dev/null
+++ b/test/MC/ARM/Inputs/2.s
@@ -0,0 +1,3 @@
+ .section .foobar,"",%progbits
+ nop
+ .word 32
diff --git a/test/MC/ARM/Inputs/3.s b/test/MC/ARM/Inputs/3.s
new file mode 100644
index 000000000000..09392623fc10
--- /dev/null
+++ b/test/MC/ARM/Inputs/3.s
@@ -0,0 +1,3 @@
+ .section .foobar,"aw",%progbits
+ nop
+ .word 32
diff --git a/test/MC/ARM/Inputs/4.s b/test/MC/ARM/Inputs/4.s
new file mode 100644
index 000000000000..28d8244bb417
--- /dev/null
+++ b/test/MC/ARM/Inputs/4.s
@@ -0,0 +1,2 @@
+ .section .foobar,"",%progbits
+ .word 32
diff --git a/test/MC/ARM/Inputs/5.s b/test/MC/ARM/Inputs/5.s
new file mode 100644
index 000000000000..1faef539b135
--- /dev/null
+++ b/test/MC/ARM/Inputs/5.s
@@ -0,0 +1,2 @@
+ .section .foobar,"aw",%progbits
+ .word 32
diff --git a/test/MC/ARM/Inputs/6.s b/test/MC/ARM/Inputs/6.s
new file mode 100644
index 000000000000..0fdb9daaf295
--- /dev/null
+++ b/test/MC/ARM/Inputs/6.s
@@ -0,0 +1,12 @@
+ .section .foo
+ .word 30
+ .word 31
+ .word 32
+ .word 33
+ nop
+ .word 34
+ .word 35
+ .word 36
+ .word 37
+ .word 38
+ nop
diff --git a/test/MC/ARM/Inputs/7.s b/test/MC/ARM/Inputs/7.s
new file mode 100644
index 000000000000..b92a61ec971f
--- /dev/null
+++ b/test/MC/ARM/Inputs/7.s
@@ -0,0 +1,3 @@
+ .section .foobar,"aw",%progbits
+ .word 32
+ nop
diff --git a/test/MC/ARM/Inputs/attr.s b/test/MC/ARM/Inputs/attr.s
new file mode 100644
index 000000000000..412cad768425
--- /dev/null
+++ b/test/MC/ARM/Inputs/attr.s
@@ -0,0 +1,5 @@
+ .text
+ .syntax unified
+ .eabi_attribute 67, "2.09" @ Tag_conformance
+ .cpu arm7tdmi
+ .eabi_attribute 6, 2 @ Tag_CPU_arch
diff --git a/test/MC/ARM/Inputs/ident.s b/test/MC/ARM/Inputs/ident.s
new file mode 100644
index 000000000000..19d65fcc7e07
--- /dev/null
+++ b/test/MC/ARM/Inputs/ident.s
@@ -0,0 +1 @@
+ .ident "LLVM ARM Compiler"
diff --git a/test/MC/ARM/arm-thumb-trustzone.s b/test/MC/ARM/arm-thumb-trustzone.s
index 4fec4b7e982c..de38c7f15e09 100644
--- a/test/MC/ARM/arm-thumb-trustzone.s
+++ b/test/MC/ARM/arm-thumb-trustzone.s
@@ -16,11 +16,11 @@ _func:
@ SMC
@------------------------------------------------------------------------------
smc #0xf
- ite eq
+ it eq
smceq #0
@ NOTZ-NOT: smc #15
@ NOTZ-NOT: smceq #0
@ TZ: smc #15 @ encoding: [0xff,0xf7,0x00,0x80]
-@ TZ: ite eq @ encoding: [0x0c,0xbf]
+@ TZ: it eq @ encoding: [0x08,0xbf]
@ TZ: smceq #0 @ encoding: [0xf0,0xf7,0x00,0x80]
diff --git a/test/MC/ARM/basic-arm-instructions-v8.1a.s b/test/MC/ARM/basic-arm-instructions-v8.1a.s
index 9b764c18448a..6193796ffba3 100644
--- a/test/MC/ARM/basic-arm-instructions-v8.1a.s
+++ b/test/MC/ARM/basic-arm-instructions-v8.1a.s
@@ -192,10 +192,10 @@
//CHECK-ERROR: error: too few operands for instruction
//CHECK-ERROR: setpan
//CHECK-ERROR: ^
-//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: error: immediate operand must be in the range [0,1]
//CHECK-ERROR: setpan #-1
//CHECK-ERROR: ^
-//CHECK-ERROR: error: invalid operand for instruction
+//CHECK-ERROR: error: immediate operand must be in the range [0,1]
//CHECK-ERROR: setpan #2
//CHECK-ERROR: ^
diff --git a/test/MC/ARM/basic-thumb2-instructions.s b/test/MC/ARM/basic-thumb2-instructions.s
index f0319717b995..af1b6289755e 100644
--- a/test/MC/ARM/basic-thumb2-instructions.s
+++ b/test/MC/ARM/basic-thumb2-instructions.s
@@ -268,6 +268,11 @@ _func:
asrs.w r7, #5
asr.w r12, #21
+ asrs r1, r2, #1
+ itt eq
+ asrseq r1, r2, #1
+ asreq r1, r2, #1
+
@ CHECK: asr.w r2, r3, #12 @ encoding: [0x4f,0xea,0x23,0x32]
@ CHECK: asrs.w r8, r3, #32 @ encoding: [0x5f,0xea,0x23,0x08]
@ CHECK: asrs.w r2, r3, #1 @ encoding: [0x5f,0xea,0x63,0x02]
@@ -279,6 +284,10 @@ _func:
@ CHECK: asrs.w r7, r7, #5 @ encoding: [0x5f,0xea,0x67,0x17]
@ CHECK: asr.w r12, r12, #21 @ encoding: [0x4f,0xea,0x6c,0x5c]
+@ CHECK: asrs r1, r2, #1 @ encoding: [0x51,0x10]
+@ CHECK: itt eq @ encoding: [0x04,0xbf]
+@ CHECK: asrseq.w r1, r2, #1 @ encoding: [0x5f,0xea,0x62,0x01]
+@ CHECK: asreq r1, r2, #1 @ encoding: [0x51,0x10]
@------------------------------------------------------------------------------
@ ASR (register)
@@ -1314,6 +1323,11 @@ _func:
lsls.w r7, #5
lsl.w r12, #21
+ lsls r1, r2, #1
+ itt eq
+ lslseq r1, r2, #1
+ lsleq r1, r2, #1
+
@ CHECK: lsl.w r2, r3, #12 @ encoding: [0x4f,0xea,0x03,0x32]
@ CHECK: lsls.w r8, r3, #31 @ encoding: [0x5f,0xea,0xc3,0x78]
@ CHECK: lsls.w r2, r3, #1 @ encoding: [0x5f,0xea,0x43,0x02]
@@ -1325,6 +1339,10 @@ _func:
@ CHECK: lsls.w r7, r7, #5 @ encoding: [0x5f,0xea,0x47,0x17]
@ CHECK: lsl.w r12, r12, #21 @ encoding: [0x4f,0xea,0x4c,0x5c]
+@ CHECK: lsls r1, r2, #1 @ encoding: [0x51,0x00]
+@ CHECK: itt eq @ encoding: [0x04,0xbf]
+@ CHECK: lslseq.w r1, r2, #1 @ encoding: [0x5f,0xea,0x42,0x01]
+@ CHECK: lsleq r1, r2, #1 @ encoding: [0x51,0x00]
@------------------------------------------------------------------------------
@ LSL (register)
@@ -1352,6 +1370,11 @@ _func:
lsrs.w r7, #5
lsr.w r12, #21
+ lsrs r1, r2, #1
+ itt eq
+ lsrseq r1, r2, #1
+ lsreq r1, r2, #1
+
@ CHECK: lsr.w r2, r3, #12 @ encoding: [0x4f,0xea,0x13,0x32]
@ CHECK: lsrs.w r8, r3, #32 @ encoding: [0x5f,0xea,0x13,0x08]
@ CHECK: lsrs.w r2, r3, #1 @ encoding: [0x5f,0xea,0x53,0x02]
@@ -1363,6 +1386,10 @@ _func:
@ CHECK: lsrs.w r7, r7, #5 @ encoding: [0x5f,0xea,0x57,0x17]
@ CHECK: lsr.w r12, r12, #21 @ encoding: [0x4f,0xea,0x5c,0x5c]
+@ CHECK: lsrs r1, r2, #1 @ encoding: [0x51,0x08]
+@ CHECK: itt eq @ encoding: [0x04,0xbf]
+@ CHECK: lsrseq.w r1, r2, #1 @ encoding: [0x5f,0xea,0x52,0x01]
+@ CHECK: lsreq r1, r2, #1 @ encoding: [0x51,0x08]
@------------------------------------------------------------------------------
@ LSR (register)
@@ -3066,13 +3093,15 @@ _func:
@ SVC
@------------------------------------------------------------------------------
svc #0
- ite eq
+ it eq
svceq #255
+ it ne
swine #33
@ CHECK: svc #0 @ encoding: [0x00,0xdf]
-@ CHECK: ite eq @ encoding: [0x0c,0xbf]
+@ CHECK: it eq @ encoding: [0x08,0xbf]
@ CHECK: svceq #255 @ encoding: [0xff,0xdf]
+@ CHECK: it ne @ encoding: [0x18,0xbf]
@ CHECK: svcne #33 @ encoding: [0x21,0xdf]
diff --git a/test/MC/ARM/branch-disassemble.s b/test/MC/ARM/branch-disassemble.s
new file mode 100644
index 000000000000..4df40e05e8c9
--- /dev/null
+++ b/test/MC/ARM/branch-disassemble.s
@@ -0,0 +1,15 @@
+@ RUN: llvm-mc -mcpu=cortex-a9 -triple armv7-arm-none-eabi -filetype obj -o - %s \
+@ RUN: | llvm-objdump -mcpu=cortex-a9 -triple armv7-arm-none-eabi -d - \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARM
+
+@ RUN: llvm-mc -mcpu=cortex-m3 -triple thumbv7m-arm-none-eabi -filetype obj -o - %s \
+@ RUN: | llvm-objdump -mcpu=cortex-m3 -triple thumbv7m-arm-none-eabi -d - \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMB
+
+b.w .Lbranch
+@ CHECK-ARM: b #4 <$a.0+0xC>
+@ CHECK-THUMB: b.w #8 <$t.0+0xC>
+adds r0, r1, #42
+adds r1, r2, #42
+.Lbranch:
+movs r2, r3
diff --git a/test/MC/ARM/coff-relocations.s b/test/MC/ARM/coff-relocations.s
index fa2d407bb8f3..c15b99f17f78 100644
--- a/test/MC/ARM/coff-relocations.s
+++ b/test/MC/ARM/coff-relocations.s
@@ -14,21 +14,21 @@
branch24t:
b target
-@ CHECK-ENCODING-LABEL: branch24t
+@ CHECK-ENCODING-LABEL: branch24t:
@ CHECK-ENCODING-NEXT: b.w #0
.thumb_func
branch20t:
bcc target
-@ CHECK-ENCODING-LABEL: branch20t
+@ CHECK-ENCODING-LABEL: branch20t:
@ CHECK-ENCODING-NEXT: blo.w #0
.thumb_func
blx23t:
bl target
-@ CHECK-ENCODING-LABEL: blx23t
+@ CHECK-ENCODING-LABEL: blx23t:
@ CHECK-ENCODING-NEXT: bl #0
.thumb_func
@@ -37,7 +37,7 @@ mov32t:
movt r0, :upper16:target
blx r0
-@ CHECK-ENCODING-LABEL: mov32t
+@ CHECK-ENCODING-LABEL: mov32t:
@ CHECK-ENCODING-NEXT: movw r0, #0
@ CHECK-ENCODING-NEXT: movt r0, #0
@ CHECK-ENCODING-NEXT: blx r0
@@ -50,7 +50,7 @@ addr32:
.Laddr32:
.long target
-@ CHECK-ENCODING-LABEL: addr32
+@ CHECK-ENCODING-LABEL: addr32:
@ CHECK-ENCODING-NEXT: ldr r0, [pc, #4]
@ CHECK-ENCODING-NEXT: bx r0
@ CHECK-ENCODING-NEXT: trap
@@ -65,7 +65,7 @@ addr32nb:
.Laddr32nb:
.long target(imgrel)
-@ CHECK-ENCODING-LABEL: addr32nb
+@ CHECK-ENCODING-LABEL: addr32nb:
@ CHECK-ENCODING-NEXT: ldr.w r0, [pc, #4]
@ CHECK-ENCODING-NEXT: bx r0
@ CHECK-ENCODING-NEXT: trap
@@ -80,7 +80,7 @@ secrel:
.Lsecrel:
.long target(secrel32)
-@ CHECK-ENCODING-LABEL: secrel
+@ CHECK-ENCODING-LABEL: secrel:
@ CHECK-ENCODING-NEXT: ldr.w r0, [pc, #4]
@ CHECK-ENCODING-NEXT: bx r0
@ CHECK-ENCODING-NEXT: trap
diff --git a/test/MC/ARM/data-in-code.ll b/test/MC/ARM/data-in-code.ll
index c2194e9179c8..e579146acfb3 100644
--- a/test/MC/ARM/data-in-code.ll
+++ b/test/MC/ARM/data-in-code.ll
@@ -60,23 +60,6 @@ exit:
;; ARM-NEXT: Other:
;; ARM-NEXT: Section: [[MIXED_SECT]]
-;; ARM: Symbol {
-;; ARM: Name: $d
-;; ARM-NEXT: Value: 0x0
-;; ARM-NEXT: Size: 0
-;; ARM-NEXT: Binding: Local (0x0)
-;; ARM-NEXT: Type: None (0x0)
-;; ARM-NEXT: Other: 0
-;; ARM-NEXT: Section: .ARM.exidx
-;; ARM-NEXT: }
-
-;; ARM: Symbol {
-;; ARM: Name: $d
-;; ARM-NEXT: Value: 0
-;; ARM-NEXT: Size: 0
-;; ARM-NEXT: Binding: Local
-;; ARM-NEXT: Type: None
-
;; ARM-NOT: ${{[atd]}}
;; TMB: Symbol {
diff --git a/test/MC/ARM/diagnostics.s b/test/MC/ARM/diagnostics.s
index a1dd95f7d7fc..49299380d062 100644
--- a/test/MC/ARM/diagnostics.s
+++ b/test/MC/ARM/diagnostics.s
@@ -93,17 +93,19 @@
@ Out of range 16-bit immediate on BKPT
bkpt #65536
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,65535]
+@ CHECK-ERRORS: bkpt #65536
+@ CHECK-ERRORS: ^
@ Out of range immediates for v8 HLT instruction.
hlt #65536
hlt #-1
-@CHECK-ERRORS: error: invalid operand for instruction
+@CHECK-ERRORS: error: immediate operand must be in the range [0,65535]
@CHECK-ERRORS: hlt #65536
-@CHECK-ERRORS: ^
-@CHECK-ERRORS: error: invalid operand for instruction
+@CHECK-ERRORS: ^
+@CHECK-ERRORS: error: immediate operand must be in the range [0,65535]
@CHECK-ERRORS: hlt #-1
-@CHECK-ERRORS: ^
+@CHECK-ERRORS: ^
@ Illegal condition code for v8 HLT instruction.
hlteq #2
@@ -123,10 +125,14 @@
cdp2 p7, #2, c1, c1, c1, #8
cdp2 p7, #1, c1, c1, c1, #8
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS-V8: error: invalid operand for instruction
+@ CHECK-ERRORS-V8: error: invalid operand for instruction
+@ CHECK-ERRORS-V8: error: invalid operand for instruction
+@ CHECK-ERRORS-V8: error: invalid operand for instruction
@ Out of range immediates for DBG
dbg #-1
@@ -136,6 +142,7 @@
@ CHECK-ERRORS: error: immediate operand must be in the range [0,15]
@ Double-check that we're synced up with the right diagnostics.
@ CHECK-ERRORS: dbg #16
+@ CHECK-ERRORS: ^
@ Out of range immediate for MCR/MCR2/MCRR/MCRR2
mcr p7, #8, r5, c1, c1, #4
@@ -144,10 +151,10 @@
mcr2 p7, #1, r5, c1, c1, #8
mcrr p7, #16, r5, r4, c1
mcrr2 p7, #16, r5, r4, c1
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
@ CHECK-ERRORS: error: immediate operand must be in the range [0,15]
@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,15]
@ CHECK-ERRORS-V8: error: invalid operand for instruction
@@ -161,16 +168,20 @@
@ Out of range immediate for MOV
movw r9, 0x10000
@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: movw r9, 0x10000
+@ CHECK-ERRORS: ^
@ Invalid 's' bit usage for MOVW
movs r6, #0xffff
movwseq r9, #0xffff
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,255]
@ CHECK-ERRORS: error: instruction 'movw' can not set flags, but 's' suffix specified
@ Out of range immediate for MOVT
movt r9, 0x10000
@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: movt r9, 0x10000
+@ CHECK-ERRORS: ^
@ Out of range immediates for MRC/MRC2/MRRC/MRRC2
mrc p14, #8, r1, c1, c2, #4
@@ -179,10 +190,10 @@
mrc2 p14, #0, r1, c1, c2, #9
mrrc p7, #16, r5, r4, c1
mrrc2 p7, #17, r5, r4, c1
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
@ CHECK-ERRORS: error: immediate operand must be in the range [0,15]
@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,15]
@ CHECK-ERRORS-V8: error: invalid operand for instruction
@@ -242,10 +253,10 @@
ssat r8, #1, r10, lsl fred
ssat r8, #1, r10, lsl #fred
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [1,32]
@ CHECK-ERRORS: ssat r8, #0, r10, lsl #8
@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [1,32]
@ CHECK-ERRORS: ssat r8, #33, r10, lsl #8
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: 'lsr' shift amount must be in range [0,31]
@@ -274,10 +285,10 @@
ssat16 r2, #0, r7
ssat16 r3, #17, r5
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [1,16]
@ CHECK-ERRORS: ssat16 r2, #0, r7
@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [1,16]
@ CHECK-ERRORS: ssat16 r3, #17, r5
@ CHECK-ERRORS: ^
@@ -292,7 +303,7 @@
@ Out of range immediate on SVC
svc #0x1000000
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,0xffffff]
@ CHECK-ERRORS: svc #0x1000000
@ CHECK-ERRORS: ^
@@ -407,7 +418,7 @@
@ Bad CPS instruction format.
cps f,#1
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,31]
@ CHECK-ERRORS: cps f,#1
@ CHECK-ERRORS: ^
@@ -491,10 +502,13 @@
foo2:
mov r0, foo2
movw r0, foo2
+ movt r0, foo2
@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
+@ CHECK-ERRORS: ^
str r0, [r0, #4]!
str r0, [r0, r1]!
diff --git a/test/MC/ARM/dwarf-asm-multiple-sections.s b/test/MC/ARM/dwarf-asm-multiple-sections.s
index 49550559e956..619f4e4c3bff 100644
--- a/test/MC/ARM/dwarf-asm-multiple-sections.s
+++ b/test/MC/ARM/dwarf-asm-multiple-sections.s
@@ -1,11 +1,14 @@
+// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 5 -fdebug-compilation-dir=/tmp
+// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF -check-prefix DWARF45 %s
+// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC -check-prefix RELOC5 %s
// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -fdebug-compilation-dir=/tmp
-// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF -check-prefix DWARF4 %s
-// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC %s
+// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF -check-prefix DWARF45 %s
+// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC -check-prefix RELOC4 %s
// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 3 -fdebug-compilation-dir=/tmp
// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF -check-prefix DWARF3 %s
// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 2 2>&1 | FileCheck -check-prefix VERSION %s
// RUN: not llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 1 2>&1 | FileCheck -check-prefix DWARF1 %s
-// RUN: not llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 5 2>&1 | FileCheck -check-prefix DWARF5 %s
+// RUN: not llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 6 2>&1 | FileCheck -check-prefix DWARF6 %s
.section .text, "ax"
a:
mov r0, r0
@@ -18,9 +21,9 @@ b:
// DWARF: Abbrev table for offset: 0x00000000
// DWARF: [1] DW_TAG_compile_unit DW_CHILDREN_yes
// DWARF3: DW_AT_stmt_list DW_FORM_data4
-// DWARF4: DW_AT_stmt_list DW_FORM_sec_offset
+// DWARF45: DW_AT_stmt_list DW_FORM_sec_offset
// DWARF3: DW_AT_ranges DW_FORM_data4
-// DWARF4: DW_AT_ranges DW_FORM_sec_offset
+// DWARF45: DW_AT_ranges DW_FORM_sec_offset
// DWARF: DW_AT_name DW_FORM_string
// DWARF: DW_AT_comp_dir DW_FORM_string
// DWARF: DW_AT_producer DW_FORM_string
@@ -29,8 +32,8 @@ b:
// DWARF: .debug_info contents:
// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_compile_unit [1]
// DWARF-NOT: DW_TAG_
-// DWARF3: DW_AT_ranges [DW_FORM_data4] (0x00000000
-// DWARF4: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
+// DWARF3: DW_AT_ranges [DW_FORM_data4] (0x00000000
+// DWARF45: DW_AT_ranges [DW_FORM_sec_offset] (0x00000000
// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_label [2] *
// DWARF-NEXT: DW_AT_name [DW_FORM_string] ("a")
@@ -46,10 +49,10 @@ b:
// DWARF: .debug_line contents:
-// DWARF: 0x0000000000000000 11 0 1 0 0 is_stmt
-// DWARF-NEXT: 0x0000000000000004 11 0 1 0 0 is_stmt end_sequence
-// DWARF-NEXT: 0x0000000000000000 15 0 1 0 0 is_stmt
-// DWARF-NEXT: 0x0000000000000004 15 0 1 0 0 is_stmt end_sequence
+// DWARF: 0x0000000000000000 14 0 1 0 0 is_stmt
+// DWARF-NEXT: 0x0000000000000004 14 0 1 0 0 is_stmt end_sequence
+// DWARF-NEXT: 0x0000000000000000 18 0 1 0 0 is_stmt
+// DWARF-NEXT: 0x0000000000000004 18 0 1 0 0 is_stmt end_sequence
// DWARF: .debug_ranges contents:
@@ -61,10 +64,14 @@ b:
+// Offsets are different in DWARF v5 due to different header layout.
// RELOC: RELOCATION RECORDS FOR [.rel.debug_info]:
-// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_abbrev
-// RELOC-NEXT: 0000000c R_ARM_ABS32 .debug_line
-// RELOC-NEXT: 00000010 R_ARM_ABS32 .debug_ranges
+// RELOC4-NEXT: 00000006 R_ARM_ABS32 .debug_abbrev
+// RELOC4-NEXT: 0000000c R_ARM_ABS32 .debug_line
+// RELOC4-NEXT: 00000010 R_ARM_ABS32 .debug_ranges
+// RELOC5-NEXT: 00000008 R_ARM_ABS32 .debug_abbrev
+// RELOC5-NEXT: 0000000d R_ARM_ABS32 .debug_line
+// RELOC5-NEXT: 00000011 R_ARM_ABS32 .debug_ranges
// RELOC-NEXT: R_ARM_ABS32 .text
// RELOC-NEXT: R_ARM_ABS32 foo
@@ -81,4 +88,4 @@ b:
// VERSION: {{.*}} warning: DWARF2 only supports one section per compilation unit
// DWARF1: Dwarf version 1 is not supported.
-// DWARF5: Dwarf version 5 is not supported.
+// DWARF6: Dwarf version 6 is not supported.
diff --git a/test/MC/ARM/elf-thumbfunc.s b/test/MC/ARM/elf-thumbfunc.s
index af061b50bc3d..b6b0b03059c0 100644
--- a/test/MC/ARM/elf-thumbfunc.s
+++ b/test/MC/ARM/elf-thumbfunc.s
@@ -14,6 +14,9 @@ foo:
.global bar
bar = foo
+ .global baz
+baz = foo + 2
+
@@ make sure foo and bar are thumb function: bit 0 = 1 (st_value)
@CHECK: Symbol {
@CHECK: Name: bar
@@ -23,6 +26,13 @@ bar = foo
@CHECK-NEXT: Type: Function
@CHECK: Symbol {
+@CHECK: Name: baz
+@CHECK-NEXT: Value: 0x3
+@CHECK-NEXT: Size: 0
+@CHECK-NEXT: Binding: Global
+@CHECK-NEXT: Type: Function
+
+@CHECK: Symbol {
@CHECK: Name: foo
@CHECK-NEXT: Value: 0x1
@CHECK-NEXT: Size: 0
diff --git a/test/MC/ARM/error-location-post-layout.s b/test/MC/ARM/error-location-post-layout.s
index d6a59fd1738d..dea929e4352e 100644
--- a/test/MC/ARM/error-location-post-layout.s
+++ b/test/MC/ARM/error-location-post-layout.s
@@ -1,14 +1,14 @@
@ RUN: not llvm-mc -triple armv7a--none-eabi -filetype obj < %s -o /dev/null 2>&1 | FileCheck %s
-@ Note: These errors are not always emitted in the order in which the relevant
-@ source appears, this file is carefully ordered so that that is the case.
-
-@ CHECK: <unknown>:0: error: expression could not be evaluated
.set v1, -undef
+@ CHECK: 3:12: error: expression could not be evaluated
.comm common, 4
-@ CHECK: <unknown>:0: error: Common symbol 'common' cannot be used in assignment expr
.set v3, common
+@ CHECK: 7:12: error: Common symbol 'common' cannot be used in assignment expr
-@ CHECK: <unknown>:0: error: symbol 'undef' could not be evaluated in a subtraction expression
.set v2, a-undef
+@ CHECK-DAG: 10:13: error: symbol 'undef' could not be evaluated in a subtraction expression
+
+ .equ STACK_START, (a + undef)
+@ CHECK-DAG: 13:24: error: expression could not be evaluated
diff --git a/test/MC/ARM/inline-asm-diags.ll b/test/MC/ARM/inline-asm-diags.ll
new file mode 100644
index 000000000000..f71338215548
--- /dev/null
+++ b/test/MC/ARM/inline-asm-diags.ll
@@ -0,0 +1,9 @@
+; RUN: not llc -mtriple=armv7-linux -filetype=obj < %s 2>&1 -o /dev/null | FileCheck %s
+
+module asm ".word 0x10"
+module asm ".word -bar"
+
+; CHECK: <inline asm>:2:{{[0-9]+}}: error: expected relocatable expression
+
+module asm ".word -foo"
+; CHECK: <inline asm>:3:{{[0-9]+}}: error: expected relocatable expression
diff --git a/test/MC/ARM/inline-asm-srcloc.ll b/test/MC/ARM/inline-asm-srcloc.ll
new file mode 100644
index 000000000000..9fb9c5b4ef9d
--- /dev/null
+++ b/test/MC/ARM/inline-asm-srcloc.ll
@@ -0,0 +1,37 @@
+; RUN: not llc -filetype=obj 2>&1 -o /dev/null < %s | FileCheck %s
+
+; ModuleID = '/scratch/llvm/master/tools/clang/test/Misc/inline-asm-diags.c'
+source_filename = "/scratch/llvm/master/tools/clang/test/Misc/inline-asm-diags.c"
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv7-arm-none-eabi"
+
+; Function Attrs: noinline nounwind
+define void @foo2() #0 {
+entry:
+ call void asm sideeffect " wibble", ""() #1, !srcloc !3
+; CHECK: note: !srcloc = 107
+ ret void
+}
+
+; Function Attrs: noinline nounwind
+define void @foo() #0 {
+entry:
+ call void asm sideeffect " .word -bar", ""() #1, !srcloc !4
+; CHECK: note: !srcloc = 181
+ call void asm sideeffect " .word -foo", ""() #1, !srcloc !5
+; CHECK: note: !srcloc = 257
+ ret void
+}
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+strict-align,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"min_enum_size", i32 4}
+!2 = !{!"clang version 5.0.0 "}
+!3 = !{i32 107}
+!4 = !{i32 181}
+!5 = !{i32 257}
diff --git a/test/MC/ARM/invalid-special-reg.s b/test/MC/ARM/invalid-special-reg.s
new file mode 100644
index 000000000000..2e39fe6e250a
--- /dev/null
+++ b/test/MC/ARM/invalid-special-reg.s
@@ -0,0 +1,11 @@
+@ RUN: not llvm-mc -triple armv7a--none-eabi < %s 2>&1 | FileCheck %s
+@ RUN: not llvm-mc -triple thumbv7a--none-eabi < %s 2>&1 | FileCheck %s
+
+ msr apsr_c, r0
+@ CHECK: invalid operand for instruction
+ msr cpsr_w
+@ CHECK: invalid operand for instruction
+ msr cpsr_cc
+@ CHECK: invalid operand for instruction
+ msr xpsr_c
+@ CHECK: invalid operand for instruction
diff --git a/test/MC/ARM/ldr-pseudo-cond-darwin.s b/test/MC/ARM/ldr-pseudo-cond-darwin.s
index 542b060d1e23..915b883bc755 100644
--- a/test/MC/ARM/ldr-pseudo-cond-darwin.s
+++ b/test/MC/ARM/ldr-pseudo-cond-darwin.s
@@ -37,7 +37,7 @@ f2:
@ CHECK-ARM moveq r2, #520093696
@ CHECK-THUMB2 moveq.w r2, #520093696
ldrne r3, = 0x00001234
-@ CHECK movwne r2, #4660
+@ CHECK: movwne r3, #4660
@
@ Constant Pools
diff --git a/test/MC/ARM/ldr-pseudo-cond.s b/test/MC/ARM/ldr-pseudo-cond.s
index f8d17f6c46bf..fa78311965c5 100644
--- a/test/MC/ARM/ldr-pseudo-cond.s
+++ b/test/MC/ARM/ldr-pseudo-cond.s
@@ -37,7 +37,7 @@ f2:
@ CHECK-ARM moveq r2, #520093696
@ CHECK-THUMB2 moveq.w r2, #520093696
ldrne r3, = 0x00001234
-@ CHECK movwne r2, #4660
+@ CHECK: movwne r3, #4660
@
@ Constant Pools
diff --git a/test/MC/ARM/lsl-zero-errors.s b/test/MC/ARM/lsl-zero-errors.s
new file mode 100644
index 000000000000..845507c069ad
--- /dev/null
+++ b/test/MC/ARM/lsl-zero-errors.s
@@ -0,0 +1,103 @@
+// RUN: not llvm-mc -triple=thumbv7 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-NONARM --check-prefix=CHECK-THUMBV7 %s
+// RUN: not llvm-mc -triple=thumbv8 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-NONARM --check-prefix=CHECK-THUMBV8 %s
+// RUN: llvm-mc -triple=armv7 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARM %s
+
+ // lsl #0 is actually mov, so here we check that it behaves the same as
+ // mov with regards to the permitted registers
+
+ // Using PC is invalid in thumb
+ lsl pc, r0, #0
+ lsl r0, pc, #0
+ lsl pc, pc, #0
+ lsls pc, r0, #0
+ lsls r0, pc, #0
+ lsls pc, pc, #0
+
+// CHECK-NONARM: error: instruction requires: arm-mode
+// CHECK-NONARM-NEXT: lsl pc, r0, #0
+// CHECK-NONARM: error: instruction requires: arm-mode
+// CHECK-NONARM-NEXT: lsl r0, pc, #0
+// CHECK-NONARM: error: instruction requires: arm-mode
+// CHECK-NONARM-NEXT: lsl pc, pc, #0
+// CHECK-NONARM: error: instruction requires: arm-mode
+// CHECK-NONARM-NEXT: lsls pc, r0, #0
+// CHECK-NONARM: error: instruction requires: arm-mode
+// CHECK-NONARM-NEXT: lsls r0, pc, #0
+// CHECK-NONARM: error: instruction requires: arm-mode
+// CHECK-NONARM-NEXT: lsls pc, pc, #0
+
+// CHECK-ARM: mov pc, r0 @ encoding: [0x00,0xf0,0xa0,0xe1]
+// CHECK-ARM: mov r0, pc @ encoding: [0x0f,0x00,0xa0,0xe1]
+// CHECK-ARM: mov pc, pc @ encoding: [0x0f,0xf0,0xa0,0xe1]
+// CHECK-ARM: movs pc, r0 @ encoding: [0x00,0xf0,0xb0,0xe1]
+// CHECK-ARM: movs r0, pc @ encoding: [0x0f,0x00,0xb0,0xe1]
+// CHECK-ARM: movs pc, pc @ encoding: [0x0f,0xf0,0xb0,0xe1]
+
+ mov pc, r0, lsl #0
+ mov r0, pc, lsl #0
+ mov pc, pc, lsl #0
+ movs pc, r0, lsl #0
+ movs r0, pc, lsl #0
+ movs pc, pc, lsl #0
+
+// FIXME: Really the error we should be giving is "requires: arm-mode"
+// CHECK-NONARM: error: invalid operand for instruction
+// CHECK-NONARM-NEXT: mov pc, r0, lsl #0
+// CHECK-NONARM: error: invalid operand for instruction
+// CHECK-NONARM-NEXT: mov r0, pc, lsl #0
+// CHECK-NONARM: error: invalid operand for instruction
+// CHECK-NONARM-NEXT: mov pc, pc, lsl #0
+// CHECK-NONARM: error: invalid operand for instruction
+// CHECK-NONARM-NEXT: movs pc, r0, lsl #0
+// CHECK-NONARM: error: invalid operand for instruction
+// CHECK-NONARM-NEXT: movs r0, pc, lsl #0
+// CHECK-NONARM: error: invalid operand for instruction
+// CHECK-NONARM-NEXT: movs pc, pc, lsl #0
+
+// CHECK-ARM: mov pc, r0 @ encoding: [0x00,0xf0,0xa0,0xe1]
+// CHECK-ARM: mov r0, pc @ encoding: [0x0f,0x00,0xa0,0xe1]
+// CHECK-ARM: mov pc, pc @ encoding: [0x0f,0xf0,0xa0,0xe1]
+// CHECK-ARM: movs pc, r0 @ encoding: [0x00,0xf0,0xb0,0xe1]
+// CHECK-ARM: movs r0, pc @ encoding: [0x0f,0x00,0xb0,0xe1]
+// CHECK-ARM: movs pc, pc @ encoding: [0x0f,0xf0,0xb0,0xe1]
+
+ // Using SP is invalid before ARMv8 in thumb unless non-flags-setting
+ // and one of the source and destination is not SP
+ lsl sp, sp, #0
+ lsls sp, sp, #0
+ lsls r0, sp, #0
+ lsls sp, r0, #0
+
+// CHECK-THUMBV7: error: instruction variant requires ARMv8 or later
+// CHECK-THUMBV7-NEXT: lsl sp, sp, #0
+// CHECK-THUMBV7: error: instruction variant requires ARMv8 or later
+// CHECK-THUMBV7-NEXT: lsls sp, sp, #0
+// CHECK-THUMBV7: error: instruction variant requires ARMv8 or later
+// CHECK-THUMBV7-NEXT: lsls r0, sp, #0
+// CHECK-THUMBV7: error: instruction variant requires ARMv8 or later
+// CHECK-THUMBV7-NEXT: lsls sp, r0, #0
+
+// CHECK-ARM: mov sp, sp @ encoding: [0x0d,0xd0,0xa0,0xe1]
+// CHECK-ARM: movs sp, sp @ encoding: [0x0d,0xd0,0xb0,0xe1]
+// CHECK-ARM: movs r0, sp @ encoding: [0x0d,0x00,0xb0,0xe1]
+// CHECK-ARM: movs sp, r0 @ encoding: [0x00,0xd0,0xb0,0xe1]
+
+ mov sp, sp, lsl #0
+ movs sp, sp, lsl #0
+ movs r0, sp, lsl #0
+ movs sp, r0, lsl #0
+
+// FIXME: We should consistently have the "requires ARMv8" error here
+// CHECK-THUMBV7: error: invalid operand for instruction
+// CHECK-THUMBV7-NEXT: mov sp, sp, lsl #0
+// CHECK-THUMBV7: error: invalid operand for instruction
+// CHECK-THUMBV7-NEXT: movs sp, sp, lsl #0
+// CHECK-THUMBV7: error: instruction variant requires ARMv8 or later
+// CHECK-THUMBV7-NEXT: movs r0, sp, lsl #0
+// CHECK-THUMBV7: error: invalid operand for instruction
+// CHECK-THUMBV7-NEXT: movs sp, r0, lsl #0
+
+// CHECK-ARM: mov sp, sp @ encoding: [0x0d,0xd0,0xa0,0xe1]
+// CHECK-ARM: movs sp, sp @ encoding: [0x0d,0xd0,0xb0,0xe1]
+// CHECK-ARM: movs r0, sp @ encoding: [0x0d,0x00,0xb0,0xe1]
+// CHECK-ARM: movs sp, r0 @ encoding: [0x00,0xd0,0xb0,0xe1]
diff --git a/test/MC/ARM/lsl-zero.s b/test/MC/ARM/lsl-zero.s
new file mode 100644
index 000000000000..5d097115448f
--- /dev/null
+++ b/test/MC/ARM/lsl-zero.s
@@ -0,0 +1,57 @@
+// RUN: llvm-mc -triple=thumbv7 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-NONARM --check-prefix=CHECK-THUMBV7 %s
+// RUN: llvm-mc -triple=thumbv8 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-NONARM --check-prefix=CHECK-THUMBV8 %s
+// RUN: llvm-mc -triple=armv7 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARM %s
+
+ // lsl #0 is actually mov, so here we check that it behaves the same as
+ // mov with regards to the permitted registers and how it behaves in an
+ // IT block.
+
+ // Non-flags-setting with only one of source and destination SP should
+ // be OK
+ lsl sp, r0, #0
+ lsl r0, sp, #0
+
+// CHECK-NONARM: mov.w sp, r0 @ encoding: [0x4f,0xea,0x00,0x0d]
+// CHECK-NONARM: mov.w r0, sp @ encoding: [0x4f,0xea,0x0d,0x00]
+
+// CHECK-ARM: mov sp, r0 @ encoding: [0x00,0xd0,0xa0,0xe1]
+// CHECK-ARM: mov r0, sp @ encoding: [0x0d,0x00,0xa0,0xe1]
+
+ //FIXME: pre-ARMv8 we give an error for these instructions
+ //mov sp, r0, lsl #0
+ //mov r0, sp, lsl #0
+
+ // LSL #0 in IT block should select the 32-bit encoding
+ itt eq
+ lsleq r0, r1, #0
+ lslseq r0, r1, #0
+ itt gt
+ lslgt r0, r1, #0
+ lslsgt r0, r1, #0
+
+// CHECK-NONARM: moveq.w r0, r1 @ encoding: [0x4f,0xea,0x01,0x00]
+// CHECK-NONARM: movseq.w r0, r1 @ encoding: [0x5f,0xea,0x01,0x00]
+// CHECK-NONARM: movgt.w r0, r1 @ encoding: [0x4f,0xea,0x01,0x00]
+// CHECK-NONARM: movsgt.w r0, r1 @ encoding: [0x5f,0xea,0x01,0x00]
+
+// CHECK-ARM: moveq r0, r1 @ encoding: [0x01,0x00,0xa0,0x01]
+// CHECK-ARM: movseq r0, r1 @ encoding: [0x01,0x00,0xb0,0x01]
+// CHECK-ARM: movgt r0, r1 @ encoding: [0x01,0x00,0xa0,0xc1]
+// CHECK-ARM: movsgt r0, r1 @ encoding: [0x01,0x00,0xb0,0xc1]
+
+ itt eq
+ moveq r0, r1, lsl #0
+ movseq r0, r1, lsl #0
+ itt gt
+ movgt r0, r1, lsl #0
+ movsgt r0, r1, lsl #0
+
+// CHECK-NONARM: moveq.w r0, r1 @ encoding: [0x4f,0xea,0x01,0x00]
+// CHECK-NONARM: movseq.w r0, r1 @ encoding: [0x5f,0xea,0x01,0x00]
+// CHECK-NONARM: movgt.w r0, r1 @ encoding: [0x4f,0xea,0x01,0x00]
+// CHECK-NONARM: movsgt.w r0, r1 @ encoding: [0x5f,0xea,0x01,0x00]
+
+// CHECK-ARM: moveq r0, r1 @ encoding: [0x01,0x00,0xa0,0x01]
+// CHECK-ARM: movseq r0, r1 @ encoding: [0x01,0x00,0xb0,0x01]
+// CHECK-ARM: movgt r0, r1 @ encoding: [0x01,0x00,0xa0,0xc1]
+// CHECK-ARM: movsgt r0, r1 @ encoding: [0x01,0x00,0xb0,0xc1]
diff --git a/test/MC/ARM/mappingsymbols.s b/test/MC/ARM/mappingsymbols.s
new file mode 100644
index 000000000000..fff8e1047810
--- /dev/null
+++ b/test/MC/ARM/mappingsymbols.s
@@ -0,0 +1,48 @@
+# Check section containing code and data with permission executable for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/1.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s
+
+# Check section containing code and data with no permissions for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/2.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s
+
+# Check section containing code and data with read/write permissions for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/3.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s
+
+# Check section containing data with no permissions for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/4.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s -check-prefix=MAPPINGSYMBOLS
+
+# Check section containing only data with read/write permissions for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/5.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s -check-prefix=MAPPINGSYMBOLS
+
+# Check section containing the ident string with no permissions for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/ident.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s -check-prefix=MAPPINGSYMBOLS
+
+# Check section containing the attributes with no permissions for the section.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/attr.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s -check-prefix=MAPPINGSYMBOLS
+
+# Check section containing code and data with no permissions for the section.
+# data comes before code.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/6.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s -check-prefix=MIX
+
+# Check section containing code and data with no permissions for the section.
+# data comes before code.
+@ RUN: llvm-mc -triple armv7-none-linux -filetype=obj -o %t.o %p/Inputs/7.s
+@ RUN: llvm-readobj -elf-output-style=GNU -symbols %t.o | FileCheck %s
+
+#CHECK: $a
+#CHECK: $d
+
+#MIX: $a
+#MIX: $a
+#MIX: $d
+#MIX: $d
+
+#MAPPINGSYMBOLS-NOT: $a
+#MAPPINGSYMBOLS-NOT: $d
diff --git a/test/MC/ARM/multi-section-mapping.s b/test/MC/ARM/multi-section-mapping.s
index 2b1b0efab53c..e4b7146e4b0f 100644
--- a/test/MC/ARM/multi-section-mapping.s
+++ b/test/MC/ARM/multi-section-mapping.s
@@ -29,7 +29,6 @@
@ CHECK: 00000000 .text 00000000 $a
@ CHECK-NEXT: 00000000 .wibble 00000000 $a
-@ CHECK-NEXT: 00000000 .starts_data 00000000 $d
@ CHECK-NEXT: 00000000 .starts_thumb 00000000 $t
@ CHECK-NOT: ${{[adt]}}
diff --git a/test/MC/ARM/negative-immediates-fail.s b/test/MC/ARM/negative-immediates-fail.s
new file mode 100644
index 000000000000..dd45e4316389
--- /dev/null
+++ b/test/MC/ARM/negative-immediates-fail.s
@@ -0,0 +1,13 @@
+# RUN: not llvm-mc -triple armv7 %s 2>&1| FileCheck %s
+
+.arm
+
+ADC r0, r1, #0xFFFFFEEE
+# CHECK: error: invalid operand for instruction
+ADC r0, r1, #0xABFEABFF
+# CHECK: error: invalid operand for instruction
+ADC r0, r1, #0xFFFFFE02
+# CHECK: error: invalid operand for instruction
+
+ADD.W r0, r0, #0xFF01FF01
+# CHECK: error: immediate operand must be in the range [0,7]
diff --git a/test/MC/ARM/negative-immediates-thumb1-fail.s b/test/MC/ARM/negative-immediates-thumb1-fail.s
new file mode 100644
index 000000000000..0e8525ede903
--- /dev/null
+++ b/test/MC/ARM/negative-immediates-thumb1-fail.s
@@ -0,0 +1,15 @@
+# RUN: not llvm-mc -triple thumbv7 -mcpu=cortex-m0 %s 2>&1 | FileCheck %s
+
+.thumb
+
+ADDs r1, r0, #0xFFFFFFF5
+# CHECK: error: instruction requires: arm-mode
+
+ADDs r0, #0xFFFFFEFF
+# CHECK: error: immediate operand must be in the range [0,255]
+
+SUBs r1, r0, #0xFFFFFFF5
+# CHECK: error: instruction requires: arm-mode
+
+SUBs r0, #0xFFFFFEFF
+# CHECK: error: immediate operand must be in the range [0,255]
diff --git a/test/MC/ARM/negative-immediates-thumb1.s b/test/MC/ARM/negative-immediates-thumb1.s
new file mode 100644
index 000000000000..7b6f57b3aae1
--- /dev/null
+++ b/test/MC/ARM/negative-immediates-thumb1.s
@@ -0,0 +1,19 @@
+# RUN: llvm-mc -triple thumbv7 -mcpu=cortex-m0 %s -show-encoding | FileCheck %s
+# RUN: not llvm-mc -triple thumbv7 -mcpu=cortex-m0 %s -show-encoding -mattr=+no-neg-immediates 2>&1 | FileCheck %s -check-prefix=CHECK-DISABLED
+
+.thumb
+
+ ADDs r1, r0, #0xFFFFFFF9
+# CHECK: subs r1, r0, #7
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+ ADDs r0, #0xFFFFFF01
+# CHECK: subs r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+
+ SUBs r0, #0xFFFFFF01
+# CHECK: adds r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+
+ SUBs r1, r0, #0xFFFFFFF9
+# CHECK: adds r1, r0, #7
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
diff --git a/test/MC/ARM/negative-immediates.s b/test/MC/ARM/negative-immediates.s
new file mode 100644
index 000000000000..aa3998163d88
--- /dev/null
+++ b/test/MC/ARM/negative-immediates.s
@@ -0,0 +1,128 @@
+# RUN: llvm-mc -triple armv7 %s -show-encoding | FileCheck %s
+# RUN: not llvm-mc -triple armv7 %s -show-encoding -mattr=+no-neg-immediates 2>&1 | FileCheck %s -check-prefix=CHECK-DISABLED
+
+.arm
+
+ ADC r0, r1, #0xFFFFFF00
+# CHECK: sbc r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADC
+ ADC r0, r1, #0xFFFFFE03
+# CHECK: sbc r0, r1, #508
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADC
+ ADD r0, r1, #0xFFFFFF01
+# CHECK: sub r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADD
+ AND r0, r1, #0xFFFFFF00
+# CHECK: bic r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: AND
+ BIC r0, r1, #0xFFFFFF00
+# CHECK: and r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: BIC
+ CMP r0, #0xFFFFFF01
+# CHECK: cmn r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: CMP
+ CMN r0, #0xFFFFFF01
+# CHECK: cmp r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: CMN
+ MOV r0, #0xFFFFFF00
+# CHECK: mvn r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: MOV
+ MVN r0, #0xFFFFFF00
+# CHECK: mov r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: MVN
+ SBC r0, r1, #0xFFFFFF00
+# CHECK: adc r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: SBC
+ SUB r0, r1, #0xFFFFFF01
+# CHECK: add r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: SUB
+
+.thumb
+
+ ADC r0, r1, #0xFFFFFF00
+# CHECK: sbc r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADC
+ ADC r0, r1, #0xFFFF00FF
+# CHECK: sbc r0, r1, #65280
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADC
+ ADC r0, r1, #0xFFFEFFFE
+# CHECK: sbc r0, r1, #65537 @ encoding: [0x61,0xf1,0x01,0x10]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADC
+ ADC r0, r1, #0xFEFFFEFF
+# CHECK: sbc r0, r1, #16777472 @ encoding: [0x61,0xf1,0x01,0x20]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADC
+ ADD.W r0, r0, #0xFFFFFF01
+# CHECK: sub.w r0, r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADD.W
+ ADD.W r0, r0, #0xFF01FF02
+# CHECK: sub.w r0, r0, #16646398 @ encoding: [0xa0,0xf1,0xfe,0x10]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADD.W
+ ADDW r0, r1, #0xFFFFFF01
+# CHECK: subw r0, r1, #255 @ encoding: [0xa1,0xf2,0xff,0x00]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADDW
+ ADD.W r0, r1, #0xFFFFFF01
+# CHECK: sub.w r0, r1, #255 @ encoding: [0xa1,0xf1,0xff,0x00]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ADD.W
+ AND r0, r1, #0xFFFFFF00
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: AND
+# CHECK: bic r0, r1, #255
+ AND r0, r1, #0xFEFFFEFF
+# CHECK: bic r0, r1, #16777472 @ encoding: [0x21,0xf0,0x01,0x20]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: AND
+ BIC r0, r1, #0xFFFFFF00
+# CHECK: and r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: BIC
+ BIC r0, r1, #0xFEFFFEFF
+# CHECK: and r0, r1, #16777472 @ encoding: [0x01,0xf0,0x01,0x20]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: BIC
+ CMP r0, #0xFFFFFF01
+# CHECK: cmn.w r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: CMP
+ CMN r0, #0xFFFFFF01
+# CHECK: cmp.w r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: CMN
+ MOV r0, #0xFFFFFF00
+# CHECK: mvn r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: MOV
+ MVN r0, #0xFFFFFF00
+# CHECK: mov.w r0, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: MVN
+ SBC r0, r1, #0xFFFFFF00
+# CHECK: adc r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: SBC
+ SUBW r0, r1, #0xFFFFFF01
+# CHECK: addw r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: SUBW
+ SUB.W r0, r1, #0xFFFFFF01
+# CHECK: add.w r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: SUB.W
diff --git a/test/MC/ARM/quad-relocation.s b/test/MC/ARM/quad-relocation.s
new file mode 100644
index 000000000000..34de182924e2
--- /dev/null
+++ b/test/MC/ARM/quad-relocation.s
@@ -0,0 +1,9 @@
+@ RUN: not llvm-mc -triple arm-arm-none-eabi -filetype obj < %s -o /dev/null 2>&1 | FileCheck %s
+
+ .align 3
+symbol:
+ .quad(symbol)
+
+@ CHECK: error: unsupported relocation on symbol
+@ CHECK-NEXT: .quad(symbol)
+@ CHECK-NEXT: ^
diff --git a/test/MC/ARM/simple-fp-encoding.s b/test/MC/ARM/simple-fp-encoding.s
index 539dd2c4d976..74babf9a699a 100644
--- a/test/MC/ARM/simple-fp-encoding.s
+++ b/test/MC/ARM/simple-fp-encoding.s
@@ -38,6 +38,18 @@
@ CHECK: vnmul.f64 d16, d17, d16 @ encoding: [0xe0,0x0b,0x61,0xee]
@ CHECK: vnmul.f32 s0, s1, s0 @ encoding: [0xc0,0x0a,0x20,0xee]
+ vcmp.f64 d17, d16
+ vcmp.f32 s1, s0
+
+@ CHECK: vcmp.f64 d17, d16 @ encoding: [0x60,0x1b,0xf4,0xee]
+@ CHECK: vcmp.f32 s1, s0 @ encoding: [0x40,0x0a,0xf4,0xee]
+
+ vcmp.f64 d17, #0.0
+ vcmp.f32 s1, #0.0
+
+@ CHECK: vcmp.f64 d17, #0 @ encoding: [0x40,0x1b,0xf5,0xee]
+@ CHECK: vcmp.f32 s1, #0 @ encoding: [0x40,0x0a,0xf5,0xee]
+
vcmpe.f64 d17, d16
vcmpe.f32 s1, s0
diff --git a/test/MC/ARM/thumb-diagnostics.s b/test/MC/ARM/thumb-diagnostics.s
index ab7c92cf3b90..f0a94aa81055 100644
--- a/test/MC/ARM/thumb-diagnostics.s
+++ b/test/MC/ARM/thumb-diagnostics.s
@@ -28,7 +28,7 @@
@ Out of range immediates for ASR instruction.
asrs r2, r3, #33
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,32]
@ CHECK-ERRORS: asrs r2, r3, #33
@ CHECK-ERRORS: ^
@@ -51,7 +51,7 @@ error: invalid operand for instruction
@CHECK-ERRORS-V8: error: instruction requires: arm-mode
@CHECK-ERRORS-V8: hlt #64
@CHECK-ERRORS-V8: ^
-@CHECK-ERRORS: error: invalid operand for instruction
+@CHECK-ERRORS: error: immediate operand must be in the range [0,65535]
@CHECK-ERRORS: hlt #-1
@CHECK-ERRORS: ^
@@ -153,10 +153,10 @@ error: invalid operand for instruction
@ Out of range immediates for LSL instruction.
lsls r4, r5, #-1
lsls r4, r5, #32
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,31]
@ CHECK-ERRORS: lsls r4, r5, #-1
@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,31]
@ CHECK-ERRORS: lsls r4, r5, #32
@ CHECK-ERRORS: ^
@@ -184,7 +184,7 @@ error: invalid operand for instruction
@ Out of range immediate for SVC instruction.
svc #-1
svc #256
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,0xffffff]
@ CHECK-ERRORS: svc #-1
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: instruction requires: arm-mode
diff --git a/test/MC/ARM/thumb-mov.s b/test/MC/ARM/thumb-mov.s
new file mode 100644
index 000000000000..0a644ea00bfa
--- /dev/null
+++ b/test/MC/ARM/thumb-mov.s
@@ -0,0 +1,100 @@
+// RUN: not llvm-mc -triple=thumbv7 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-V7 %s
+// RUN: not llvm-mc -triple=thumbv8 -show-encoding < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-V8 %s
+
+ // Tests to check handling of sp and pc in thumb mov instructions. We
+ // have to be careful about the order of things, as stdout/stderr
+ // buffering means the errors appear before the non-error output, so
+ // we have to put all the error checks at the top.
+
+ // First check instructions that are never valid. These are thumb2
+ // instructions that uses pc
+
+ // t2MOVr selected because no thumb1 movs that can access high regs
+ movs pc, r0
+ movs r0, pc
+ movs pc, pc
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: movs pc, r0
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: movs r0, pc
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: movs pc, pc
+
+ // mov.w selects t2MOVr
+ mov.w pc, r0
+ mov.w r0, pc
+ mov.w pc, pc
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: mov.w pc, r0
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: mov.w r0, pc
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: mov.w pc, pc
+
+ // movs.w selects t2MOVr
+ movs.w pc, r0
+ movs.w r0, pc
+ movs.w pc, pc
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: movs.w pc, r0
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: movs.w r0, pc
+// CHECK: error: invalid operand for instruction
+// CHECK-NEXT: movs.w pc, pc
+
+
+ // Now check instructions that are invalid before ARMv8 due to SP usage
+
+ movs sp, r0
+ movs r0, sp
+ movs sp, sp
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: movs sp, r0
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: movs r0, sp
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: movs sp, sp
+// CHECK-V8: movs.w sp, r0 @ encoding: [0x5f,0xea,0x00,0x0d]
+// CHECK-V8: movs.w r0, sp @ encoding: [0x5f,0xea,0x0d,0x00]
+// CHECK-V8: movs.w sp, sp @ encoding: [0x5f,0xea,0x0d,0x0d]
+
+ mov.w sp, sp
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: mov.w sp, sp
+// CHECK-V8: mov.w sp, sp @ encoding: [0x4f,0xea,0x0d,0x0d]
+
+ movs.w sp, r0
+ movs.w r0, sp
+ movs.w sp, sp
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: movs.w sp, r0
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: movs.w r0, sp
+// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7-NEXT: movs.w sp, sp
+// CHECK-V8: movs.w sp, r0 @ encoding: [0x5f,0xea,0x00,0x0d]
+// CHECK-V8: movs.w r0, sp @ encoding: [0x5f,0xea,0x0d,0x00]
+// CHECK-V8: movs.w sp, sp @ encoding: [0x5f,0xea,0x0d,0x0d]
+
+
+ // Now instructions that are always valid
+
+ // mov selects tMOVr, where sp and pc are allowed
+ mov sp, r0
+ mov r0, sp
+ mov sp, sp
+ mov pc, r0
+ mov r0, pc
+ mov pc, pc
+// CHECK: mov sp, r0 @ encoding: [0x85,0x46]
+// CHECK: mov r0, sp @ encoding: [0x68,0x46]
+// CHECK: mov sp, sp @ encoding: [0xed,0x46]
+// CHECK: mov pc, r0 @ encoding: [0x87,0x46]
+// CHECK: mov r0, pc @ encoding: [0x78,0x46]
+// CHECK: mov pc, pc @ encoding: [0xff,0x46]
+
+ // sp allowed in non-flags-setting t2MOVr
+ mov.w sp, r0
+ mov.w r0, sp
+// CHECK: mov.w sp, r0 @ encoding: [0x4f,0xea,0x00,0x0d]
+// CHECK: mov.w r0, sp @ encoding: [0x4f,0xea,0x0d,0x00]
diff --git a/test/MC/ARM/thumb-not-mclass.s b/test/MC/ARM/thumb-not-mclass.s
index fec545e64b06..a90dc7eefe31 100644
--- a/test/MC/ARM/thumb-not-mclass.s
+++ b/test/MC/ARM/thumb-not-mclass.s
@@ -22,5 +22,5 @@
setend be
setend le
-@ CHECK: error: invalid operand for instruction
-@ CHECK: error: invalid operand for instruction
+@ CHECK: error: immediate operand must be in the range [0,1]
+@ CHECK: error: immediate operand must be in the range [0,1]
diff --git a/test/MC/ARM/thumb2-diagnostics.s b/test/MC/ARM/thumb2-diagnostics.s
index 38cc74dee565..76b4cf12626b 100644
--- a/test/MC/ARM/thumb2-diagnostics.s
+++ b/test/MC/ARM/thumb2-diagnostics.s
@@ -39,10 +39,10 @@
mrc2 p14, #0, r1, c1, c2, #9
mrrc p7, #16, r5, r4, c1
mrrc2 p7, #17, r5, r4, c1
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
-@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
+@ CHECK-ERRORS: error: immediate operand must be in the range [0,7]
@ CHECK-ERRORS: error: immediate operand must be in the range [0,15]
@ CHECK-ERRORS-V7: error: immediate operand must be in the range [0,15]
@ CHECK-ERRORS-V8: error: invalid operand for instruction
@@ -79,8 +79,7 @@ foo2:
mov r0, foo2
movw r0, foo2
movt r0, foo2
-@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
-@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: instruction requires: arm-mode
@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
@ CHECK-ERRORS: ^
@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
@@ -118,3 +117,9 @@ foo2:
@ CHECK-ERRORS: error: instruction requires: arm-mode
@ CHECK-ERRORS: error: immediate value expected for vector index
@ CHECK-ERRORS: error: instruction requires: arm-mode
+
+ @ SWP(B) is an ARM-only instruction
+ swp r0, r1, [r2]
+ swpb r3, r4, [r5]
+@ CHECK-ERRORS: error: instruction requires: arm-mode
+@ CHECK-ERRORS: error: instruction requires: arm-mode
diff --git a/test/MC/ARM/thumbv8m.s b/test/MC/ARM/thumbv8m.s
index a0830a227a15..9af32ddd4ea1 100644
--- a/test/MC/ARM/thumbv8m.s
+++ b/test/MC/ARM/thumbv8m.s
@@ -4,7 +4,7 @@
// RUN: not llvm-mc -triple=thumbv8m.main -show-encoding < %s 2>%t \
// RUN: | FileCheck --check-prefix=CHECK-MAINLINE --check-prefix=CHECK %s
// RUN: FileCheck --check-prefix=UNDEF-MAINLINE --check-prefix=UNDEF < %t %s
-// RUN: not llvm-mc -triple=thumbv8m.main -mattr=+dsp,+t2xtpk -show-encoding < %s 2>%t \
+// RUN: not llvm-mc -triple=thumbv8m.main -mattr=+dsp -show-encoding < %s 2>%t \
// RUN: | FileCheck --check-prefix=CHECK-MAINLINE_DSP --check-prefix=CHECK %s
// RUN: FileCheck --check-prefix=UNDEF-MAINLINE_DSP --check-prefix=UNDEF < %t %s
@@ -18,7 +18,7 @@ mov.w r0, r0
// UNDEF: target does not support ARM mode
.arm
-// And only +dsp,+t2xtpk has DSP and t2xtpk instructions
+// And only +dsp has DSP and instructions
// UNDEF-BASELINE: error: instruction requires: arm-mode
// UNDEF-MAINLINE: error: instruction requires: arm-mode
// UNDEF-MAINLINE_DSP-NOT: error: instruction requires:
diff --git a/test/MC/ARM/udf-arm-diagnostics.s b/test/MC/ARM/udf-arm-diagnostics.s
index 9ec9bf2124f0..71a1e387eebb 100644
--- a/test/MC/ARM/udf-arm-diagnostics.s
+++ b/test/MC/ARM/udf-arm-diagnostics.s
@@ -13,7 +13,7 @@ undefined:
udf #65536
-@ CHECK: error: invalid operand for instruction
+@ CHECK: error: immediate operand must be in the range [0,65535]
@ CHECK: udf #65536
@ CHECK: ^
diff --git a/test/MC/ARM/udf-thumb-2-diagnostics.s b/test/MC/ARM/udf-thumb-2-diagnostics.s
index f8375601a031..f1916446d65d 100644
--- a/test/MC/ARM/udf-thumb-2-diagnostics.s
+++ b/test/MC/ARM/udf-thumb-2-diagnostics.s
@@ -19,7 +19,7 @@ undefined:
udf.w #65536
-@ CHECK: error: invalid operand for instruction
+@ CHECK: error: immediate operand must be in the range [0,65535]
@ CHECK: udf.w #65536
@ CHECK: ^
diff --git a/test/MC/ARM/unpred-control-flow-in-it-block.s b/test/MC/ARM/unpred-control-flow-in-it-block.s
new file mode 100644
index 000000000000..885d158d83dd
--- /dev/null
+++ b/test/MC/ARM/unpred-control-flow-in-it-block.s
@@ -0,0 +1,57 @@
+@ RUN: not llvm-mc -triple=thumbv7m--none-eabi < %s 2>&1 | FileCheck %s
+
+@ These instructions all write to the PC, so are UNPREDICTABLE if they are in
+@ an IT block, but not the last instruction in the block.
+
+ itttt eq
+ addeq pc, r0
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ addeq pc, sp, pc
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ beq.n #.+0x20
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
+ itttt eq
+ beq.w #.+0x20
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ bleq sym
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ blxeq r0
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
+ itttt eq
+ bxeq r0
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ ldmeq r0, {r8, pc}
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ ldmdbeq r0, {r8, pc}
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
+ itttt eq
+ ldreq pc, [r0, #4]
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ ldreq pc, [r0, #-4]
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ ldreq pc, [pc, #4]
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
+ itttt eq
+ ldreq pc, [r0, r1, LSL #1]
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ moveq pc, r0
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ popeq {r0, pc}
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
+ itttt eq
+ popeq {r8, pc}
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ popeq {pc}
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ tbbeq [r0, r1]
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
+ itt eq
+ tbheq [r0, r1, LSL #1]
+@ CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction must be outside of IT block or the last instruction in an IT block
+ nopeq
diff --git a/test/MC/AsmParser/macro-duplicate-params-names-err.s b/test/MC/AsmParser/macro-duplicate-params-names-err.s
new file mode 100644
index 000000000000..618cce02abdf
--- /dev/null
+++ b/test/MC/AsmParser/macro-duplicate-params-names-err.s
@@ -0,0 +1,7 @@
+// RUN: not llvm-mc %s 2> %t
+// RUN: FileCheck < %t %s
+
+.macro M a a
+.endm
+
+// CHECK: macro 'M' has multiple parameters named 'a'
diff --git a/test/MC/AsmParser/section_names.s b/test/MC/AsmParser/section_names.s
index 3883e15880a5..38a5310099d9 100644
--- a/test/MC/AsmParser/section_names.s
+++ b/test/MC/AsmParser/section_names.s
@@ -8,6 +8,8 @@
.byte 1
.section .init_array
.byte 1
+.section .init_array.42
+.byte 1
.section .init_array2
.byte 1
.section .init_arrayfoo
@@ -30,6 +32,14 @@
.byte 1
.section .notefoo
.byte 1
+.section .bss
+.space 1
+.section .bss.foo
+.space 1
+.section .tbss
+.space 1
+.section .tbss.foo
+.space 1
# CHECK: Name: .nobits
# CHECK-NEXT: Type: SHT_PROGBITS
# CHECK: Name: .nobits2
@@ -38,6 +48,8 @@
# CHECK-NEXT: Type: SHT_PROGBITS
# CHECK: Name: .init_array
# CHECK-NEXT: Type: SHT_INIT_ARRAY
+# CHECK: Name: .init_array.42
+# CHECK-NEXT: Type: SHT_INIT_ARRAY
# CHECK: Name: .init_array2
# CHECK-NEXT: Type: SHT_PROGBITS
# CHECK: Name: .init_arrayfoo
@@ -60,3 +72,11 @@
# CHECK-NEXT: Type: SHT_NOTE
# CHECK: Name: .notefoo
# CHECK-NEXT: Type: SHT_NOTE
+# CHECK: Name: .bss
+# CHECK-NEXT: Type: SHT_NOBITS
+# CHECK: Name: .bss.foo
+# CHECK-NEXT: Type: SHT_NOBITS
+# CHECK: Name: .tbss
+# CHECK-NEXT: Type: SHT_NOBITS
+# CHECK: Name: .tbss.foo
+# CHECK-NEXT: Type: SHT_NOBITS
diff --git a/test/MC/COFF/section-comdat.s b/test/MC/COFF/section-comdat.s
index e7052d8f5ae3..7669ffbadc3a 100644
--- a/test/MC/COFF/section-comdat.s
+++ b/test/MC/COFF/section-comdat.s
@@ -161,7 +161,7 @@ Symbol8:
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (9)
+// CHECK: Section: SecName (11)
// CHECK: AuxSectionDef {
// CHECK: Selection: Associative
// CHECK: AssocSection: assocSec (4)
@@ -169,25 +169,25 @@ Symbol8:
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (10)
+// CHECK: Section: SecName (9)
// CHECK: AuxSectionDef {
// CHECK: Selection: Largest
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: Symbol6
-// CHECK: Section: SecName (10)
+// CHECK: Section: SecName (9)
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (11)
+// CHECK: Section: SecName (10)
// CHECK: AuxSectionDef {
// CHECK: Selection: Newest (0x7)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: Symbol7
-// CHECK: Section: SecName (11)
+// CHECK: Section: SecName (10)
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: assocSec
@@ -199,7 +199,7 @@ Symbol8:
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: Symbol5
-// CHECK: Section: SecName (9)
+// CHECK: Section: SecName (11)
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: Symbol8
diff --git a/test/MC/Disassembler/AMDGPU/aperture-regs.ll b/test/MC/Disassembler/AMDGPU/aperture-regs.ll
new file mode 100644
index 000000000000..5fec281145b3
--- /dev/null
+++ b/test/MC/Disassembler/AMDGPU/aperture-regs.ll
@@ -0,0 +1,13 @@
+# RUN: llvm-mc -arch=amdgcn -mcpu=gfx900 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX9 %s
+
+# GFX9: v_mov_b32_e32 v1, src_shared_base ; encoding: [0xeb,0x02,0x02,0x7e]
+0xeb 0x02 0x02 0x7e
+
+# GFX9: v_mov_b32_e32 v1, src_shared_limit ; encoding: [0xec,0x02,0x02,0x7e]
+0xec 0x02 0x02 0x7e
+
+# GFX9: v_mov_b32_e32 v1, src_private_base ; encoding: [0xed,0x02,0x02,0x7e]
+0xed 0x02 0x02 0x7e
+
+# GFX9: v_mov_b32_e32 v1, src_private_limit ; encoding: [0xee,0x02,0x02,0x7e]
+0xee 0x02 0x02 0x7e
diff --git a/test/MC/Disassembler/AMDGPU/ds_vi.txt b/test/MC/Disassembler/AMDGPU/ds_vi.txt
index 84d55cd7e63d..6d910ea5bb58 100644
--- a/test/MC/Disassembler/AMDGPU/ds_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/ds_vi.txt
@@ -81,20 +81,26 @@
# VI: ds_max_f32 v2, v4 ; encoding: [0x00,0x00,0x26,0xd8,0x02,0x04,0x00,0x00]
0x00 0x00 0x26 0xd8 0x02 0x04 0x00 0x00
-# VI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x33,0xd8,0x02,0x00,0x00,0x00]
-0x00 0x00 0x33 0xd8 0x02 0x00 0x00 0x00
+# VI: ds_gws_init v2 gds ; encoding: [0x00,0x00,0x33,0xd9,0x00,0x02,0x00,0x00]
+0x00 0x00 0x33 0xd9 0x00 0x02 0x00,0x00
-# VI: ds_gws_sema_v v2 gds ; encoding: [0x00,0x00,0x35,0xd8,0x02,0x00,0x00,0x00]
-0x00 0x00 0x35 0xd8 0x02 0x00 0x00 0x00
+# VI: ds_gws_init v3 offset:12345 gds ; encoding: [0x39,0x30,0x33,0xd9,0x00,0x03,0x00,0x00]
+0x39 0x30 0x33 0xd9 0x00 0x03 0x00 0x00
-# VI: ds_gws_sema_br v2 gds ; encoding: [0x00,0x00,0x37,0xd8,0x02,0x00,0x00,0x00]
-0x00 0x00 0x37 0xd8 0x02 0x00 0x00 0x00
+# VI: ds_gws_sema_v gds ; encoding: [0x00,0x00,0x35,0xd9,0x00,0x00,0x00,0x00]
+0x00 0x00 0x35 0xd9 0x00 0x00 0x00 0x00
-# VI: ds_gws_sema_p v2 gds ; encoding: [0x00,0x00,0x39,0xd8,0x02,0x00,0x00,0x00]
-0x00 0x00 0x39 0xd8 0x02 0x00 0x00 0x00
+# VI: ds_gws_sema_v offset:257 gds ; encoding: [0x01,0x01,0x35,0xd9,0x00,0x00,0x00,0x00]
+0x01 0x01 0x35 0xd9 0x00 0x00 0x00 0x00
-# VI: ds_gws_barrier v2 gds ; encoding: [0x00,0x00,0x3b,0xd8,0x02,0x00,0x00,0x00]
-0x00 0x00 0x3b 0xd8 0x02 0x00 0x00 0x00
+# VI: ds_gws_sema_br v2 gds ; encoding: [0x00,0x00,0x37,0xd9,0x00,0x02,0x00,0x00]
+0x00 0x00 0x37 0xd9 0x00 0x02 0x00 0x00
+
+# VI: ds_gws_sema_p gds ; encoding: [0x00,0x00,0x39,0xd9,0x00,0x00,0x00,0x00]
+0x00 0x00 0x39 0xd9 0x00 0x00 0x00 0x00
+
+# VI: ds_gws_barrier v2 gds ; encoding: [0x00,0x00,0x3b,0xd9,0x00,0x02,0x00,0x00]
+0x00 0x00 0x3b 0xd9 0x00 0x02 0x00 0x00
# VI: ds_write_b8 v2, v4 ; encoding: [0x00,0x00,0x3c,0xd8,0x02,0x04,0x00,0x00]
0x00 0x00 0x3c 0xd8 0x02 0x04 0x00 0x00
diff --git a/test/MC/Disassembler/AMDGPU/gfx8_dasm_all.txt b/test/MC/Disassembler/AMDGPU/gfx8_dasm_all.txt
new file mode 100644
index 000000000000..21d1d5a5dab1
--- /dev/null
+++ b/test/MC/Disassembler/AMDGPU/gfx8_dasm_all.txt
@@ -0,0 +1,87676 @@
+# RUN: llvm-mc -arch=amdgcn -mcpu=tonga -disassemble -show-encoding < %s | FileCheck %s
+
+# CHECK: ds_add_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x00,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x00,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x00,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_add_u32 v0, v0 ; encoding: [0x00,0x00,0x00,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x00,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x00,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x01,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x01,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x02,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x02,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x02,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x02,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_sub_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x02,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x02,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_sub_u32 v0, v0 ; encoding: [0x00,0x00,0x02,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x02,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x02,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x03,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x03,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x04,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x04,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x04,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_rsub_u32 v0, v0 ; encoding: [0x00,0x00,0x04,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x04,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x04,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x05,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x05,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x06,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x06,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x06,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x06,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_inc_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x06,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x06,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_inc_u32 v0, v0 ; encoding: [0x00,0x00,0x06,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x06,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x06,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x07,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x07,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x08,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x08,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_dec_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x08,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_dec_u32 v0, v0 ; encoding: [0x00,0x00,0x08,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x08,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x08,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x09,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x09,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x0a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x0a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x0a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_i32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x0a,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x0a,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_min_i32 v0, v0 ; encoding: [0x00,0x00,0x0a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x0a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x0a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x0b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x0c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_i32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x0c,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_max_i32 v0, v0 ; encoding: [0x00,0x00,0x0c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x0c,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x0c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x0d,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x0e,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x0e,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x0e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x0e,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x0e,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_min_u32 v0, v0 ; encoding: [0x00,0x00,0x0e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x0e,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x0e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x0f,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x10,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x10,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_u32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x10,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_max_u32 v0, v0 ; encoding: [0x00,0x00,0x10,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x10,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x10,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x11,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x11,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x12,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x12,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x12,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x12,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_and_b32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x12,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x12,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_and_b32 v0, v0 ; encoding: [0x00,0x00,0x12,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x12,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x12,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x13,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x13,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x14,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x14,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_or_b32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x14,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_or_b32 v0, v0 ; encoding: [0x00,0x00,0x14,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x14,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x14,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x15,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x15,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x16,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x16,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x16,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x16,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_xor_b32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x16,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x16,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_xor_b32 v0, v0 ; encoding: [0x00,0x00,0x16,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x16,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x16,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x16,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x17,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x17,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x18,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x18,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x18,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_mskor_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x00,0x00,0xff,0x00]
+0xff,0xff,0x18,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_mskor_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x18,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x18,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x18,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x18,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x19,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x19,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x1a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x1a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x1a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x1a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write_b32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x1a,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x1a,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_write_b32 v0, v0 ; encoding: [0x00,0x00,0x1a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x1a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x1a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x1b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x1b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1c,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x1c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v255, v0, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1c,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0x1c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v255, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1c,0xd8,0x00,0xff,0x00,0x00]
+0x7f,0xff,0x1c,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v255 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1c,0xd8,0x00,0x00,0xff,0x00]
+0x7f,0xff,0x1c,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v0 offset1:255 ; encoding: [0x00,0xff,0x1c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0x1c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v0 offset0:16 offset1:255 ; encoding: [0x10,0xff,0x1c,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0x1c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v0 offset0:127 ; encoding: [0x7f,0x00,0x1c,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0x1c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v0 offset0:127 offset1:1 ; encoding: [0x7f,0x01,0x1c,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0x1c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b32 v0, v0, v0 offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0x1d,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x1d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x1e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v255, v0, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1e,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0x1e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v255, v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1e,0xd8,0x00,0xff,0x00,0x00]
+0x7f,0xff,0x1e,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v255 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x1e,0xd8,0x00,0x00,0xff,0x00]
+0x7f,0xff,0x1e,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v0 offset1:255 ; encoding: [0x00,0xff,0x1e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0x1e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v0 offset0:16 offset1:255 ; encoding: [0x10,0xff,0x1e,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0x1e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v0 offset0:127 ; encoding: [0x7f,0x00,0x1e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0x1e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v0 offset0:127 offset1:1 ; encoding: [0x7f,0x01,0x1e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0x1e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b32 v0, v0, v0 offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0x1f,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x1f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x20,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x20,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x20,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_cmpst_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x00,0x00,0xff,0x00]
+0xff,0xff,0x20,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_cmpst_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x20,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x20,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x20,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x21,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x21,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x22,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x22,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x22,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x22,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x22,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x22,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_cmpst_f32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x22,0xd8,0x00,0x00,0xff,0x00]
+0xff,0xff,0x22,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_cmpst_f32 v0, v0, v0 ; encoding: [0x00,0x00,0x22,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x22,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x22,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x23,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x23,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x24,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x24,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_f32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x24,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_min_f32 v0, v0 ; encoding: [0x00,0x00,0x24,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x24,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x24,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x25,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x25,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x26,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x26,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x26,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x26,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_f32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x26,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x26,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_max_f32 v0, v0 ; encoding: [0x00,0x00,0x26,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x26,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x26,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x27,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x27,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_f32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x2a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x2a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_f32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x2a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x2a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_f32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x2a,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x2a,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_add_f32 v0, v0 ; encoding: [0x00,0x00,0x2a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_f32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x2a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x2a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_f32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x2b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x2b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b8 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x3c,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x3c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b8 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x3c,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x3c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write_b8 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x3c,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x3c,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_write_b8 v0, v0 ; encoding: [0x00,0x00,0x3c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b8 v0, v0 offset:4 ; encoding: [0x04,0x00,0x3c,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x3c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b8 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x3d,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x3d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x3e,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x3e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b16 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x3e,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x3e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write_b16 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x3e,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x3e,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_write_b16 v0, v0 ; encoding: [0x00,0x00,0x3e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b16 v0, v0 offset:4 ; encoding: [0x04,0x00,0x3e,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x3e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b16 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x3f,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x3f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x40,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x40,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_add_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x40,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x40,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_add_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x40,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x40,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x40,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x41,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x41,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x42,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x42,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x42,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x42,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_sub_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x42,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x42,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x42,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x42,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_sub_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x42,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x42,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x42,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x43,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x43,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x44,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x44,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x44,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x44,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_rsub_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x44,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x44,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x44,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x44,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x44,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x44,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x44,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x45,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x45,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x46,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x46,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x46,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x46,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_inc_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x46,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x46,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x46,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x46,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_inc_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x46,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x46,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x46,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x47,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x47,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x48,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x48,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_dec_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x48,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x48,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_dec_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x48,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x48,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x48,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x49,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x49,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x4a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x4a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x4a,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x4a,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_min_rtn_i32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x4a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x4a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x4a,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x4a,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_min_rtn_i32 v0, v0, v0 ; encoding: [0x00,0x00,0x4a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x4a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x4a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x4b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x4b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x4c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x4c,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_max_rtn_i32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x4c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x4c,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_max_rtn_i32 v0, v0, v0 ; encoding: [0x00,0x00,0x4c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x4c,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x4c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x4d,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x4d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x4e,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x4e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x4e,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x4e,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_min_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x4e,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x4e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x4e,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x4e,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_min_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x4e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x4e,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x4e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x4f,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x4f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x50,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x50,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x50,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x50,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_max_rtn_u32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x50,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x50,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x50,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x50,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_max_rtn_u32 v0, v0, v0 ; encoding: [0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x50,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x51,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x51,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x52,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x52,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x52,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x52,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_and_rtn_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x52,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x52,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x52,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x52,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_and_rtn_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x52,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x52,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x52,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x53,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x53,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x54,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x54,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_or_rtn_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x54,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x54,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_or_rtn_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x54,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x54,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x54,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x55,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x55,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x56,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x56,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x56,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x56,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_xor_rtn_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x56,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x56,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x56,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x56,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_xor_rtn_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x56,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x56,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x56,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x57,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x57,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b32 v0, v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x58,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x58,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b32 v255, v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x58,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x58,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_mskor_rtn_b32 v0, v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x58,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x58,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b32 v0, v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x58,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x58,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b32 v0, v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x58,0xd8,0x00,0x00,0xff,0x00]
+0xff,0xff,0x58,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_mskor_rtn_b32 v0, v0, v0, v0 ; encoding: [0x00,0x00,0x58,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b32 v0, v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x58,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x58,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b32 v0, v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x59,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x59,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x5a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x5a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x5a,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x5a,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_wrxchg_rtn_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x5a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x5a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x5a,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x5a,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x5a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x5a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x5a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x5b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x5b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v0, v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x60,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x60,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v255, v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x60,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x60,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_cmpst_rtn_b32 v0, v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x60,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x60,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v0, v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x60,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x60,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v0, v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x60,0xd8,0x00,0x00,0xff,0x00]
+0xff,0xff,0x60,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v0, v0, v0, v0 ; encoding: [0x00,0x00,0x60,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v0, v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x60,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x60,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b32 v0, v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x61,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x61,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v0, v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x62,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x62,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v255, v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x62,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x62,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_cmpst_rtn_f32 v0, v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x62,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x62,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v0, v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x62,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x62,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v0, v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x62,0xd8,0x00,0x00,0xff,0x00]
+0xff,0xff,0x62,0xd8,0x00,0x00,0xff,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v0, v0, v0, v0 ; encoding: [0x00,0x00,0x62,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v0, v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x62,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x62,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f32 v0, v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x63,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x63,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x64,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x64,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x64,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x64,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_min_rtn_f32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x64,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x64,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x64,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x64,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_min_rtn_f32 v0, v0, v0 ; encoding: [0x00,0x00,0x64,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x64,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x64,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x65,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x65,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x66,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x66,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x66,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x66,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_max_rtn_f32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x66,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x66,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x66,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x66,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_max_rtn_f32 v0, v0, v0 ; encoding: [0x00,0x00,0x66,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x66,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x66,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x67,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x67,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_f32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x6a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_f32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6a,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x6a,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_add_rtn_f32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x6a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x6a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_f32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x6a,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x6a,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_add_rtn_f32 v0, v0, v0 ; encoding: [0x00,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_f32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x6a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_f32 v0, v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x6b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x6b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6c,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x6c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x6c,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x6c,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_read_b32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x6c,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x6c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read_b32 v0, v0 ; encoding: [0x00,0x00,0x6c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x6c,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x6c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x6d,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x6d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[0:1], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x6e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x6e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[254:255], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x6e,0xd8,0x00,0x00,0x00,0xfe]
+0x7f,0xff,0x6e,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_read2_b32 v[0:1], v255 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x6e,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0x6e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[0:1], v0 offset1:255 ; encoding: [0x00,0xff,0x6e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0x6e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[0:1], v0 offset0:16 offset1:255 ; encoding: [0x10,0xff,0x6e,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0x6e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[0:1], v0 offset0:127 ; encoding: [0x7f,0x00,0x6e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0x6e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[0:1], v0 offset0:127 offset1:1 ; encoding: [0x7f,0x01,0x6e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0x6e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b32 v[0:1], v0 offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0x6f,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x6f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[0:1], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x70,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x70,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[254:255], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x70,0xd8,0x00,0x00,0x00,0xfe]
+0x7f,0xff,0x70,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_read2st64_b32 v[0:1], v255 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x70,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0x70,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[0:1], v0 offset1:255 ; encoding: [0x00,0xff,0x70,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0x70,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[0:1], v0 offset0:16 offset1:255 ; encoding: [0x10,0xff,0x70,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0x70,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[0:1], v0 offset0:127 ; encoding: [0x7f,0x00,0x70,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0x70,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[0:1], v0 offset0:127 offset1:1 ; encoding: [0x7f,0x01,0x70,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0x70,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b32 v[0:1], v0 offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0x71,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x71,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i8 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x72,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x72,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i8 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x72,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x72,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_read_i8 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x72,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x72,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read_i8 v0, v0 ; encoding: [0x00,0x00,0x72,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x72,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i8 v0, v0 offset:4 ; encoding: [0x04,0x00,0x72,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x72,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i8 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x73,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x73,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u8 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x74,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x74,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u8 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x74,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x74,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_read_u8 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x74,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x74,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read_u8 v0, v0 ; encoding: [0x00,0x00,0x74,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u8 v0, v0 offset:4 ; encoding: [0x04,0x00,0x74,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x74,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u8 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x75,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x75,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x76,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x76,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i16 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x76,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x76,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_read_i16 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x76,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x76,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read_i16 v0, v0 ; encoding: [0x00,0x00,0x76,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i16 v0, v0 offset:4 ; encoding: [0x04,0x00,0x76,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x76,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_i16 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x77,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x77,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x78,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u16 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x78,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_read_u16 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x78,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read_u16 v0, v0 ; encoding: [0x00,0x00,0x78,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x78,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u16 v0, v0 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x78,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_u16 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x79,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x79,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_swizzle_b32 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x7a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x7a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_swizzle_b32 v255, v0 offset:65535 ; encoding: [0xff,0xff,0x7a,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x7a,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_swizzle_b32 v0, v255 offset:65535 ; encoding: [0xff,0xff,0x7a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x7a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_swizzle_b32 v0, v0 ; encoding: [0x00,0x00,0x7a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_swizzle_b32 v0, v0 offset:4 ; encoding: [0x04,0x00,0x7a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x7a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_swizzle_b32 v0, v0 offset:65535 gds ; encoding: [0xff,0xff,0x7b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x7b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_permute_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x7c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_permute_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x7c,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_permute_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x7c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_permute_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x7c,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_permute_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x7c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_permute_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x7c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_bpermute_b32 v0, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x7e,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x7e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_bpermute_b32 v255, v0, v0 offset:65535 ; encoding: [0xff,0xff,0x7e,0xd8,0x00,0x00,0x00,0xff]
+0xff,0xff,0x7e,0xd8,0x00,0x00,0x00,0xff
+
+# CHECK: ds_bpermute_b32 v0, v255, v0 offset:65535 ; encoding: [0xff,0xff,0x7e,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x7e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_bpermute_b32 v0, v0, v255 offset:65535 ; encoding: [0xff,0xff,0x7e,0xd8,0x00,0xff,0x00,0x00]
+0xff,0xff,0x7e,0xd8,0x00,0xff,0x00,0x00
+
+# CHECK: ds_bpermute_b32 v0, v0, v0 ; encoding: [0x00,0x00,0x7e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_bpermute_b32 v0, v0, v0 offset:4 ; encoding: [0x04,0x00,0x7e,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x7e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x80,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x80,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x80,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_add_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x80,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x80,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x80,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x81,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x81,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x82,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x82,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x82,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x82,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_sub_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x82,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x82,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_sub_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x82,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x82,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x82,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x83,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x83,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x84,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x84,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x84,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_rsub_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x84,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x84,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x84,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x85,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x85,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x86,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x86,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x86,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x86,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_inc_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x86,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x86,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_inc_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x86,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x86,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x86,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x87,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x87,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x88,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x88,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_dec_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x88,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_dec_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x88,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x88,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x88,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x89,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x89,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x8a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x8a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x8a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_i64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x8a,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x8a,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_min_i64 v0, v[0:1] ; encoding: [0x00,0x00,0x8a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x8a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x8a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_i64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x8b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x8c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_i64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x8c,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_max_i64 v0, v[0:1] ; encoding: [0x00,0x00,0x8c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x8c,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x8c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_i64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x8d,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x8e,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x8e,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x8e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x8e,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x8e,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_min_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x8e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x8e,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x8e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x8f,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x90,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x90,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_u64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x90,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_max_u64 v0, v[0:1] ; encoding: [0x00,0x00,0x90,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x90,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x90,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_u64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x91,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x91,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x92,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x92,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x92,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x92,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_and_b64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x92,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x92,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_and_b64 v0, v[0:1] ; encoding: [0x00,0x00,0x92,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x92,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x92,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_b64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x93,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x93,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x94,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x94,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_or_b64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x94,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_or_b64 v0, v[0:1] ; encoding: [0x00,0x00,0x94,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x94,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x94,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x94,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_b64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x95,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x95,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x96,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x96,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x96,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x96,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_xor_b64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x96,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x96,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_xor_b64 v0, v[0:1] ; encoding: [0x00,0x00,0x96,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x96,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x96,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x96,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_b64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x97,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x97,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b64 v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x98,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b64 v255, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x98,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b64 v0, v[254:255], v[0:1] offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x98,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_mskor_b64 v0, v[0:1], v[254:255] offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x00,0x00,0xfe,0x00]
+0xff,0xff,0x98,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_mskor_b64 v0, v[0:1], v[0:1] ; encoding: [0x00,0x00,0x98,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x98,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b64 v0, v[0:1], v[0:1] offset:4 ; encoding: [0x04,0x00,0x98,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x98,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_b64 v0, v[0:1], v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x99,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x99,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x9a,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x9a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0x9a,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0x9a,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write_b64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0x9a,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0x9a,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_write_b64 v0, v[0:1] ; encoding: [0x00,0x00,0x9a,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0x9a,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0x9a,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_b64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0x9b,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0x9b,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9c,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x9c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v255, v[0:1], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9c,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0x9c,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[254:255], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9c,0xd8,0x00,0xfe,0x00,0x00]
+0x7f,0xff,0x9c,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[254:255] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9c,0xd8,0x00,0x00,0xfe,0x00]
+0x7f,0xff,0x9c,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[0:1] offset1:255 ; encoding: [0x00,0xff,0x9c,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0x9c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[0:1] offset0:16 offset1:255 ; encoding: [0x10,0xff,0x9c,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0x9c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[0:1] offset0:127 ; encoding: [0x7f,0x00,0x9c,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0x9c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[0:1] offset0:127 offset1:1 ; encoding: [0x7f,0x01,0x9c,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0x9c,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2_b64 v0, v[0:1], v[0:1] offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0x9d,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x9d,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x9e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v255, v[0:1], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9e,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0x9e,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[254:255], v[0:1] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9e,0xd8,0x00,0xfe,0x00,0x00]
+0x7f,0xff,0x9e,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[254:255] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x9e,0xd8,0x00,0x00,0xfe,0x00]
+0x7f,0xff,0x9e,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[0:1] offset1:255 ; encoding: [0x00,0xff,0x9e,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0x9e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[0:1] offset0:16 offset1:255 ; encoding: [0x10,0xff,0x9e,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0x9e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[0:1] offset0:127 ; encoding: [0x7f,0x00,0x9e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0x9e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[0:1] offset0:127 offset1:1 ; encoding: [0x7f,0x01,0x9e,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0x9e,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write2st64_b64 v0, v[0:1], v[0:1] offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0x9f,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0x9f,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b64 v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b64 v255, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xa0,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b64 v0, v[254:255], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xa0,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_cmpst_b64 v0, v[0:1], v[254:255] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x00,0x00,0xfe,0x00]
+0xff,0xff,0xa0,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_cmpst_b64 v0, v[0:1], v[0:1] ; encoding: [0x00,0x00,0xa0,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b64 v0, v[0:1], v[0:1] offset:4 ; encoding: [0x04,0x00,0xa0,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xa0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_b64 v0, v[0:1], v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xa1,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa1,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f64 v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa2,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f64 v255, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa2,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xa2,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f64 v0, v[254:255], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa2,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xa2,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_cmpst_f64 v0, v[0:1], v[254:255] offset:65535 ; encoding: [0xff,0xff,0xa2,0xd8,0x00,0x00,0xfe,0x00]
+0xff,0xff,0xa2,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_cmpst_f64 v0, v[0:1], v[0:1] ; encoding: [0x00,0x00,0xa2,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f64 v0, v[0:1], v[0:1] offset:4 ; encoding: [0x04,0x00,0xa2,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xa2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_f64 v0, v[0:1], v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xa3,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa3,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xa4,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_f64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xa4,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_min_f64 v0, v[0:1] ; encoding: [0x00,0x00,0xa4,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xa4,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xa4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_f64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xa5,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa5,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f64 v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa6,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f64 v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xa6,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xa6,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_f64 v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xa6,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xa6,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_max_f64 v0, v[0:1] ; encoding: [0x00,0x00,0xa6,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f64 v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xa6,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xa6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_f64 v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xa7,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa7,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xc0,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_add_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xc0,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xc0,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_add_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xc0,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xc0,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xc0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xc1,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc1,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc2,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc2,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xc2,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_sub_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc2,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xc2,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xc2,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xc2,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_sub_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xc2,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xc2,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xc2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xc3,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc3,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc4,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc4,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xc4,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_rsub_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc4,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xc4,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xc4,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xc4,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xc4,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xc4,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xc4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xc5,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc5,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc6,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc6,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xc6,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_inc_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc6,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xc6,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xc6,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xc6,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_inc_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xc6,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xc6,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xc6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xc7,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc7,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc8,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xc8,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_dec_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xc8,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xc8,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_dec_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xc8,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xc8,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xc8,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xc9,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xc9,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xca,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xca,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xca,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xca,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_min_rtn_i64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xca,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xca,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xca,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xca,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_min_rtn_i64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xca,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xca,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xca,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_i64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xcb,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xcb,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xcc,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xcc,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_max_rtn_i64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xcc,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xcc,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_max_rtn_i64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xcc,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xcc,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xcc,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_i64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xcd,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xcd,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xce,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xce,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xce,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xce,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_min_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xce,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xce,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xce,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xce,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_min_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xce,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xce,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xce,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xcf,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xcf,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xd0,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_max_rtn_u64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xd0,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd0,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xd0,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_max_rtn_u64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xd0,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xd0,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xd0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_u64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xd1,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd1,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd2,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd2,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xd2,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_and_rtn_b64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd2,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xd2,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd2,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xd2,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_and_rtn_b64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xd2,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xd2,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xd2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_rtn_b64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xd3,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd3,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd4,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd4,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xd4,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_or_rtn_b64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd4,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xd4,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd4,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xd4,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_or_rtn_b64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xd4,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xd4,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xd4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_rtn_b64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xd5,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd5,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd6,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd6,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xd6,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_xor_rtn_b64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd6,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xd6,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd6,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xd6,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_xor_rtn_b64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xd6,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xd6,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xd6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_rtn_b64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xd7,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd7,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd8,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[254:255], v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xd8,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v255, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xd8,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v0, v[254:255], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xd8,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v0, v[0:1], v[254:255] offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x00,0x00,0xfe,0x00]
+0xff,0xff,0xd8,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v0, v[0:1], v[0:1] ; encoding: [0x00,0x00,0xd8,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v0, v[0:1], v[0:1] offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xd8,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_mskor_rtn_b64 v[0:1], v0, v[0:1], v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xd9,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xd9,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xda,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xda,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xda,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xda,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_wrxchg_rtn_b64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xda,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xda,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xda,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xda,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xda,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xda,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xda,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_wrxchg_rtn_b64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xdb,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xdb,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe0,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[254:255], v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe0,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xe0,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v255, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe0,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xe0,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v0, v[254:255], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe0,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xe0,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v0, v[0:1], v[254:255] offset:65535 ; encoding: [0xff,0xff,0xe0,0xd8,0x00,0x00,0xfe,0x00]
+0xff,0xff,0xe0,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v0, v[0:1], v[0:1] ; encoding: [0x00,0x00,0xe0,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v0, v[0:1], v[0:1] offset:4 ; encoding: [0x04,0x00,0xe0,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xe0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_b64 v[0:1], v0, v[0:1], v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xe1,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe1,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe2,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[254:255], v0, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe2,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xe2,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v255, v[0:1], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe2,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xe2,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v0, v[254:255], v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe2,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xe2,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v0, v[0:1], v[254:255] offset:65535 ; encoding: [0xff,0xff,0xe2,0xd8,0x00,0x00,0xfe,0x00]
+0xff,0xff,0xe2,0xd8,0x00,0x00,0xfe,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v0, v[0:1], v[0:1] ; encoding: [0x00,0x00,0xe2,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v0, v[0:1], v[0:1] offset:4 ; encoding: [0x04,0x00,0xe2,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xe2,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_cmpst_rtn_f64 v[0:1], v0, v[0:1], v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xe3,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe3,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xe4,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_min_rtn_f64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xe4,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xe4,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_min_rtn_f64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xe4,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xe4,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_rtn_f64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xe5,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe5,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f64 v[0:1], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe6,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f64 v[254:255], v0, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe6,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xe6,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_max_rtn_f64 v[0:1], v255, v[0:1] offset:65535 ; encoding: [0xff,0xff,0xe6,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xe6,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f64 v[0:1], v0, v[254:255] offset:65535 ; encoding: [0xff,0xff,0xe6,0xd8,0x00,0xfe,0x00,0x00]
+0xff,0xff,0xe6,0xd8,0x00,0xfe,0x00,0x00
+
+# CHECK: ds_max_rtn_f64 v[0:1], v0, v[0:1] ; encoding: [0x00,0x00,0xe6,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f64 v[0:1], v0, v[0:1] offset:4 ; encoding: [0x04,0x00,0xe6,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xe6,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_rtn_f64 v[0:1], v0, v[0:1] offset:65535 gds ; encoding: [0xff,0xff,0xe7,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xe7,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b64 v[0:1], v0 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xec,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b64 v[254:255], v0 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x00,0x00,0x00,0xfe]
+0xff,0xff,0xec,0xd8,0x00,0x00,0x00,0xfe
+
+# CHECK: ds_read_b64 v[0:1], v255 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0xff,0x00,0x00,0x00]
+0xff,0xff,0xec,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read_b64 v[0:1], v0 ; encoding: [0x00,0x00,0xec,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b64 v[0:1], v0 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0x00,0x00,0x00,0x00]
+0x04,0x00,0xec,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read_b64 v[0:1], v0 offset:65535 gds ; encoding: [0xff,0xff,0xed,0xd8,0x00,0x00,0x00,0x00]
+0xff,0xff,0xed,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[0:3], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xee,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0xee,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[252:255], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xee,0xd8,0x00,0x00,0x00,0xfc]
+0x7f,0xff,0xee,0xd8,0x00,0x00,0x00,0xfc
+
+# CHECK: ds_read2_b64 v[0:3], v255 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xee,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0xee,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[0:3], v0 offset1:255 ; encoding: [0x00,0xff,0xee,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0xee,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[0:3], v0 offset0:16 offset1:255 ; encoding: [0x10,0xff,0xee,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0xee,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[0:3], v0 offset0:127 ; encoding: [0x7f,0x00,0xee,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0xee,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[0:3], v0 offset0:127 offset1:1 ; encoding: [0x7f,0x01,0xee,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0xee,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2_b64 v[0:3], v0 offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0xef,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0xef,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[0:3], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xf0,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0xf0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[252:255], v0 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xf0,0xd8,0x00,0x00,0x00,0xfc]
+0x7f,0xff,0xf0,0xd8,0x00,0x00,0x00,0xfc
+
+# CHECK: ds_read2st64_b64 v[0:3], v255 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xf0,0xd8,0xff,0x00,0x00,0x00]
+0x7f,0xff,0xf0,0xd8,0xff,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[0:3], v0 offset1:255 ; encoding: [0x00,0xff,0xf0,0xd8,0x00,0x00,0x00,0x00]
+0x00,0xff,0xf0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[0:3], v0 offset0:16 offset1:255 ; encoding: [0x10,0xff,0xf0,0xd8,0x00,0x00,0x00,0x00]
+0x10,0xff,0xf0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[0:3], v0 offset0:127 ; encoding: [0x7f,0x00,0xf0,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x00,0xf0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[0:3], v0 offset0:127 offset1:1 ; encoding: [0x7f,0x01,0xf0,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0x01,0xf0,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_read2st64_b64 v[0:3], v0 offset0:127 offset1:255 gds ; encoding: [0x7f,0xff,0xf1,0xd8,0x00,0x00,0x00,0x00]
+0x7f,0xff,0xf1,0xd8,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x00,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x00,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x00,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x00,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u32 v0 ; encoding: [0x00,0x00,0x00,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x00,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x00,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x01,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x01,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x02,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x02,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x02,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x02,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u32 v0 ; encoding: [0x00,0x00,0x02,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x02,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x02,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x03,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x03,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x04,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x04,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x04,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x04,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u32 v0 ; encoding: [0x00,0x00,0x04,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x04,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x04,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x05,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x05,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x06,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x06,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x06,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x06,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u32 v0 ; encoding: [0x00,0x00,0x06,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x06,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x06,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x07,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x07,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x08,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x08,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x08,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x08,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u32 v0 ; encoding: [0x00,0x00,0x08,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x08,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x08,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x09,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x09,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i32 v0 offset:65535 ; encoding: [0xff,0xff,0x0a,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i32 v255 offset:65535 ; encoding: [0xff,0xff,0x0a,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x0a,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i32 v0 ; encoding: [0x00,0x00,0x0a,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i32 v0 offset:4 ; encoding: [0x04,0x00,0x0a,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x0a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x0b,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0b,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i32 v0 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0c,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i32 v255 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x0c,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i32 v0 ; encoding: [0x00,0x00,0x0c,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i32 v0 offset:4 ; encoding: [0x04,0x00,0x0c,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x0c,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x0d,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0d,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x0e,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0e,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x0e,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x0e,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u32 v0 ; encoding: [0x00,0x00,0x0e,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x0e,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x0e,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x0f,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x0f,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u32 v0 offset:65535 ; encoding: [0xff,0xff,0x10,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x10,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u32 v255 offset:65535 ; encoding: [0xff,0xff,0x10,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x10,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u32 v0 ; encoding: [0x00,0x00,0x10,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u32 v0 offset:4 ; encoding: [0x04,0x00,0x10,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x10,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x11,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x11,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b32 v0 offset:65535 ; encoding: [0xff,0xff,0x14,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x14,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b32 v255 offset:65535 ; encoding: [0xff,0xff,0x14,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x14,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b32 v0 ; encoding: [0x00,0x00,0x14,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b32 v0 offset:4 ; encoding: [0x04,0x00,0x14,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x14,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x15,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x15,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b32 v0 offset:65535 ; encoding: [0xff,0xff,0x16,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x16,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b32 v255 offset:65535 ; encoding: [0xff,0xff,0x16,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x16,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b32 v0 ; encoding: [0x00,0x00,0x16,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x16,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b32 v0 offset:4 ; encoding: [0x04,0x00,0x16,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x16,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x17,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x17,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_src2_b32 v0 ; encoding: [0x00,0x00,0x1a,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f32 v0 offset:65535 ; encoding: [0xff,0xff,0x24,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x24,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f32 v255 offset:65535 ; encoding: [0xff,0xff,0x24,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x24,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f32 v0 ; encoding: [0x00,0x00,0x24,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f32 v0 offset:4 ; encoding: [0x04,0x00,0x24,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x24,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x25,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x25,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f32 v0 offset:65535 ; encoding: [0xff,0xff,0x26,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x26,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f32 v255 offset:65535 ; encoding: [0xff,0xff,0x26,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x26,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f32 v0 ; encoding: [0x00,0x00,0x26,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f32 v0 offset:4 ; encoding: [0x04,0x00,0x26,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x26,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f32 v0 offset:65535 gds ; encoding: [0xff,0xff,0x27,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x27,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x80,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x80,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x80,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x80,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u64 v0 ; encoding: [0x00,0x00,0x80,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x80,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x80,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_add_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x81,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x81,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x82,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x82,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x82,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x82,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u64 v0 ; encoding: [0x00,0x00,0x82,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x82,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x82,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_sub_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x83,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x83,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x84,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x84,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x84,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x84,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u64 v0 ; encoding: [0x00,0x00,0x84,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x84,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x84,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_rsub_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x85,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x85,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x86,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x86,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x86,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x86,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u64 v0 ; encoding: [0x00,0x00,0x86,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x86,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x86,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_inc_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x87,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x87,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x88,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x88,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x88,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x88,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u64 v0 ; encoding: [0x00,0x00,0x88,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x88,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x88,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_dec_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x89,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x89,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i64 v0 offset:65535 ; encoding: [0xff,0xff,0x8a,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i64 v255 offset:65535 ; encoding: [0xff,0xff,0x8a,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x8a,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i64 v0 ; encoding: [0x00,0x00,0x8a,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i64 v0 offset:4 ; encoding: [0x04,0x00,0x8a,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x8a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_i64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x8b,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8b,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i64 v0 offset:65535 ; encoding: [0xff,0xff,0x8c,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8c,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i64 v255 offset:65535 ; encoding: [0xff,0xff,0x8c,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x8c,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i64 v0 ; encoding: [0x00,0x00,0x8c,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i64 v0 offset:4 ; encoding: [0x04,0x00,0x8c,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x8c,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_i64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x8d,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8d,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x8e,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8e,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x8e,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x8e,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u64 v0 ; encoding: [0x00,0x00,0x8e,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8e,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x8e,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x8e,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x8f,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x8f,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u64 v0 offset:65535 ; encoding: [0xff,0xff,0x90,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x90,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u64 v255 offset:65535 ; encoding: [0xff,0xff,0x90,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x90,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u64 v0 ; encoding: [0x00,0x00,0x90,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u64 v0 offset:4 ; encoding: [0x04,0x00,0x90,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x90,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_u64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x91,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x91,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_src2_b64 v0 offset:65535 ; encoding: [0xff,0xff,0x92,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x92,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_src2_b64 v255 offset:65535 ; encoding: [0xff,0xff,0x92,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x92,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_and_src2_b64 v0 ; encoding: [0x00,0x00,0x92,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_src2_b64 v0 offset:4 ; encoding: [0x04,0x00,0x92,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x92,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_and_src2_b64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x93,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x93,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b64 v0 offset:65535 ; encoding: [0xff,0xff,0x94,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x94,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b64 v255 offset:65535 ; encoding: [0xff,0xff,0x94,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x94,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b64 v0 ; encoding: [0x00,0x00,0x94,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x94,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b64 v0 offset:4 ; encoding: [0x04,0x00,0x94,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x94,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_or_src2_b64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x95,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x95,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b64 v0 offset:65535 ; encoding: [0xff,0xff,0x96,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x96,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b64 v255 offset:65535 ; encoding: [0xff,0xff,0x96,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0x96,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b64 v0 ; encoding: [0x00,0x00,0x96,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x96,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b64 v0 offset:4 ; encoding: [0x04,0x00,0x96,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0x96,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_xor_src2_b64 v0 offset:65535 gds ; encoding: [0xff,0xff,0x97,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0x97,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_write_src2_b64 v0 ; encoding: [0x00,0x00,0x9a,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9a,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f64 v0 offset:65535 ; encoding: [0xff,0xff,0xa4,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa4,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f64 v255 offset:65535 ; encoding: [0xff,0xff,0xa4,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0xa4,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f64 v0 ; encoding: [0x00,0x00,0xa4,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f64 v0 offset:4 ; encoding: [0x04,0x00,0xa4,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0xa4,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_min_src2_f64 v0 offset:65535 gds ; encoding: [0xff,0xff,0xa5,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa5,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f64 v0 offset:65535 ; encoding: [0xff,0xff,0xa6,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa6,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f64 v255 offset:65535 ; encoding: [0xff,0xff,0xa6,0xd9,0xff,0x00,0x00,0x00]
+0xff,0xff,0xa6,0xd9,0xff,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f64 v0 ; encoding: [0x00,0x00,0xa6,0xd9,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f64 v0 offset:4 ; encoding: [0x04,0x00,0xa6,0xd9,0x00,0x00,0x00,0x00]
+0x04,0x00,0xa6,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: ds_max_src2_f64 v0 offset:65535 gds ; encoding: [0xff,0xff,0xa7,0xd9,0x00,0x00,0x00,0x00]
+0xff,0xff,0xa7,0xd9,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_ubyte v0, v[0:1] ; encoding: [0x00,0x00,0x40,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x40,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_ubyte v255, v[0:1] ; encoding: [0x00,0x00,0x40,0xdc,0x00,0x00,0x00,0xff]
+0x00,0x00,0x40,0xdc,0x00,0x00,0x00,0xff
+
+# CHECK: flat_load_ubyte v0, v[254:255] ; encoding: [0x00,0x00,0x40,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x40,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_ubyte v0, v[0:1] glc ; encoding: [0x00,0x00,0x41,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x41,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_ubyte v0, v[0:1] slc ; encoding: [0x00,0x00,0x42,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x42,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_sbyte v0, v[0:1] ; encoding: [0x00,0x00,0x44,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x44,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_sbyte v255, v[0:1] ; encoding: [0x00,0x00,0x44,0xdc,0x00,0x00,0x00,0xff]
+0x00,0x00,0x44,0xdc,0x00,0x00,0x00,0xff
+
+# CHECK: flat_load_sbyte v0, v[254:255] ; encoding: [0x00,0x00,0x44,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x44,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_sbyte v0, v[0:1] glc ; encoding: [0x00,0x00,0x45,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x45,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_sbyte v0, v[0:1] slc ; encoding: [0x00,0x00,0x46,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x46,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_ushort v0, v[0:1] ; encoding: [0x00,0x00,0x48,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x48,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_ushort v255, v[0:1] ; encoding: [0x00,0x00,0x48,0xdc,0x00,0x00,0x00,0xff]
+0x00,0x00,0x48,0xdc,0x00,0x00,0x00,0xff
+
+# CHECK: flat_load_ushort v0, v[254:255] ; encoding: [0x00,0x00,0x48,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x48,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_ushort v0, v[0:1] glc ; encoding: [0x00,0x00,0x49,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x49,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_ushort v0, v[0:1] slc ; encoding: [0x00,0x00,0x4a,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_sshort v0, v[0:1] ; encoding: [0x00,0x00,0x4c,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_sshort v255, v[0:1] ; encoding: [0x00,0x00,0x4c,0xdc,0x00,0x00,0x00,0xff]
+0x00,0x00,0x4c,0xdc,0x00,0x00,0x00,0xff
+
+# CHECK: flat_load_sshort v0, v[254:255] ; encoding: [0x00,0x00,0x4c,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_sshort v0, v[0:1] glc ; encoding: [0x00,0x00,0x4d,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_sshort v0, v[0:1] slc ; encoding: [0x00,0x00,0x4e,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dword v0, v[0:1] ; encoding: [0x00,0x00,0x50,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dword v255, v[0:1] ; encoding: [0x00,0x00,0x50,0xdc,0x00,0x00,0x00,0xff]
+0x00,0x00,0x50,0xdc,0x00,0x00,0x00,0xff
+
+# CHECK: flat_load_dword v0, v[254:255] ; encoding: [0x00,0x00,0x50,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x50,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_dword v0, v[0:1] glc ; encoding: [0x00,0x00,0x51,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x51,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dword v0, v[0:1] slc ; encoding: [0x00,0x00,0x52,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x52,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x54,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x54,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x54,0xdc,0x00,0x00,0x00,0xfe]
+0x00,0x00,0x54,0xdc,0x00,0x00,0x00,0xfe
+
+# CHECK: flat_load_dwordx2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x54,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x54,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx2 v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x55,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x55,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x56,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x56,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx3 v[0:2], v[0:1] ; encoding: [0x00,0x00,0x58,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x58,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx3 v[253:255], v[0:1] ; encoding: [0x00,0x00,0x58,0xdc,0x00,0x00,0x00,0xfd]
+0x00,0x00,0x58,0xdc,0x00,0x00,0x00,0xfd
+
+# CHECK: flat_load_dwordx3 v[0:2], v[254:255] ; encoding: [0x00,0x00,0x58,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x58,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx3 v[0:2], v[0:1] glc ; encoding: [0x00,0x00,0x59,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x59,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx3 v[0:2], v[0:1] slc ; encoding: [0x00,0x00,0x5a,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx4 v[0:3], v[0:1] ; encoding: [0x00,0x00,0x5c,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx4 v[252:255], v[0:1] ; encoding: [0x00,0x00,0x5c,0xdc,0x00,0x00,0x00,0xfc]
+0x00,0x00,0x5c,0xdc,0x00,0x00,0x00,0xfc
+
+# CHECK: flat_load_dwordx4 v[0:3], v[254:255] ; encoding: [0x00,0x00,0x5c,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx4 v[0:3], v[0:1] glc ; encoding: [0x00,0x00,0x5d,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_load_dwordx4 v[0:3], v[0:1] slc ; encoding: [0x00,0x00,0x5e,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_byte v[0:1], v0 ; encoding: [0x00,0x00,0x60,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x60,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_byte v[254:255], v0 ; encoding: [0x00,0x00,0x60,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x60,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_store_byte v[0:1], v255 ; encoding: [0x00,0x00,0x60,0xdc,0x00,0xff,0x00,0x00]
+0x00,0x00,0x60,0xdc,0x00,0xff,0x00,0x00
+
+# CHECK: flat_store_byte v[0:1], v0 glc ; encoding: [0x00,0x00,0x61,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x61,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_byte v[0:1], v0 slc ; encoding: [0x00,0x00,0x62,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x62,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_short v[0:1], v0 ; encoding: [0x00,0x00,0x68,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x68,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_short v[254:255], v0 ; encoding: [0x00,0x00,0x68,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x68,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_store_short v[0:1], v255 ; encoding: [0x00,0x00,0x68,0xdc,0x00,0xff,0x00,0x00]
+0x00,0x00,0x68,0xdc,0x00,0xff,0x00,0x00
+
+# CHECK: flat_store_short v[0:1], v0 glc ; encoding: [0x00,0x00,0x69,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x69,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_short v[0:1], v0 slc ; encoding: [0x00,0x00,0x6a,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dword v[0:1], v0 ; encoding: [0x00,0x00,0x70,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x70,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dword v[254:255], v0 ; encoding: [0x00,0x00,0x70,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x70,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_store_dword v[0:1], v255 ; encoding: [0x00,0x00,0x70,0xdc,0x00,0xff,0x00,0x00]
+0x00,0x00,0x70,0xdc,0x00,0xff,0x00,0x00
+
+# CHECK: flat_store_dword v[0:1], v0 glc ; encoding: [0x00,0x00,0x71,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x71,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dword v[0:1], v0 slc ; encoding: [0x00,0x00,0x72,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x72,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x74,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x74,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x74,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x74,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x74,0xdc,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x74,0xdc,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_store_dwordx2 v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x75,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x75,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx3 v[0:1], v[0:2] ; encoding: [0x00,0x00,0x78,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x78,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx3 v[254:255], v[0:2] ; encoding: [0x00,0x00,0x78,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x78,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx3 v[0:1], v[253:255] ; encoding: [0x00,0x00,0x78,0xdc,0x00,0xfd,0x00,0x00]
+0x00,0x00,0x78,0xdc,0x00,0xfd,0x00,0x00
+
+# CHECK: flat_store_dwordx3 v[0:1], v[0:2] glc ; encoding: [0x00,0x00,0x79,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x79,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx3 v[0:1], v[0:2] slc ; encoding: [0x00,0x00,0x7a,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx4 v[0:1], v[0:3] ; encoding: [0x00,0x00,0x7c,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx4 v[254:255], v[0:3] ; encoding: [0x00,0x00,0x7c,0xdc,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xdc,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx4 v[0:1], v[252:255] ; encoding: [0x00,0x00,0x7c,0xdc,0x00,0xfc,0x00,0x00]
+0x00,0x00,0x7c,0xdc,0x00,0xfc,0x00,0x00
+
+# CHECK: flat_store_dwordx4 v[0:1], v[0:3] glc ; encoding: [0x00,0x00,0x7d,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_store_dwordx4 v[0:1], v[0:3] slc ; encoding: [0x00,0x00,0x7e,0xdc,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xdc,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap v[0:1], v0 ; encoding: [0x00,0x00,0x00,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap v[254:255], v0 ; encoding: [0x00,0x00,0x00,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x00,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap v[0:1], v255 ; encoding: [0x00,0x00,0x00,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x00,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_swap v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x01,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x01,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap v[0:1], v0 slc ; encoding: [0x00,0x00,0x02,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x02,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap v[0:1], v[0:1] ; encoding: [0x00,0x00,0x04,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap v[254:255], v[0:1] ; encoding: [0x00,0x00,0x04,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x04,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap v[0:1], v[254:255] ; encoding: [0x00,0x00,0x04,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x04,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap v0, v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x05,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x05,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x06,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x06,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add v[0:1], v0 ; encoding: [0x00,0x00,0x08,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add v[254:255], v0 ; encoding: [0x00,0x00,0x08,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x08,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add v[0:1], v255 ; encoding: [0x00,0x00,0x08,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x08,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_add v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x09,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x09,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add v[0:1], v0 slc ; encoding: [0x00,0x00,0x0a,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub v[0:1], v0 ; encoding: [0x00,0x00,0x0c,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub v[254:255], v0 ; encoding: [0x00,0x00,0x0c,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub v[0:1], v255 ; encoding: [0x00,0x00,0x0c,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x0c,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_sub v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x0d,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub v[0:1], v0 slc ; encoding: [0x00,0x00,0x0e,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin v[0:1], v0 ; encoding: [0x00,0x00,0x10,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin v[254:255], v0 ; encoding: [0x00,0x00,0x10,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x10,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin v[0:1], v255 ; encoding: [0x00,0x00,0x10,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x10,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_smin v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x11,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x11,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin v[0:1], v0 slc ; encoding: [0x00,0x00,0x12,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x12,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin v[0:1], v0 ; encoding: [0x00,0x00,0x14,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin v[254:255], v0 ; encoding: [0x00,0x00,0x14,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x14,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin v[0:1], v255 ; encoding: [0x00,0x00,0x14,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x14,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_umin v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x15,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x15,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin v[0:1], v0 slc ; encoding: [0x00,0x00,0x16,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x16,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax v[0:1], v0 ; encoding: [0x00,0x00,0x18,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x18,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax v[254:255], v0 ; encoding: [0x00,0x00,0x18,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x18,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax v[0:1], v255 ; encoding: [0x00,0x00,0x18,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x18,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_smax v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x19,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x19,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax v[0:1], v0 slc ; encoding: [0x00,0x00,0x1a,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1a,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax v[0:1], v0 ; encoding: [0x00,0x00,0x1c,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax v[254:255], v0 ; encoding: [0x00,0x00,0x1c,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax v[0:1], v255 ; encoding: [0x00,0x00,0x1c,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x1c,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_umax v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x1d,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax v[0:1], v0 slc ; encoding: [0x00,0x00,0x1e,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and v[0:1], v0 ; encoding: [0x00,0x00,0x20,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x20,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and v[254:255], v0 ; encoding: [0x00,0x00,0x20,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x20,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and v[0:1], v255 ; encoding: [0x00,0x00,0x20,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x20,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_and v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x21,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x21,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and v[0:1], v0 slc ; encoding: [0x00,0x00,0x22,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x22,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or v[0:1], v0 ; encoding: [0x00,0x00,0x24,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x24,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or v[254:255], v0 ; encoding: [0x00,0x00,0x24,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x24,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or v[0:1], v255 ; encoding: [0x00,0x00,0x24,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x24,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_or v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x25,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x25,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or v[0:1], v0 slc ; encoding: [0x00,0x00,0x26,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x26,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor v[0:1], v0 ; encoding: [0x00,0x00,0x28,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x28,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor v[254:255], v0 ; encoding: [0x00,0x00,0x28,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x28,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor v[0:1], v255 ; encoding: [0x00,0x00,0x28,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x28,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_xor v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x29,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x29,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor v[0:1], v0 slc ; encoding: [0x00,0x00,0x2a,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc v[0:1], v0 ; encoding: [0x00,0x00,0x2c,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc v[254:255], v0 ; encoding: [0x00,0x00,0x2c,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc v[0:1], v255 ; encoding: [0x00,0x00,0x2c,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x2c,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_inc v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x2d,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc v[0:1], v0 slc ; encoding: [0x00,0x00,0x2e,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec v[0:1], v0 ; encoding: [0x00,0x00,0x30,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x30,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec v[254:255], v0 ; encoding: [0x00,0x00,0x30,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x30,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec v[0:1], v255 ; encoding: [0x00,0x00,0x30,0xdd,0x00,0xff,0x00,0x00]
+0x00,0x00,0x30,0xdd,0x00,0xff,0x00,0x00
+
+# CHECK: flat_atomic_dec v0, v[0:1], v0 glc ; encoding: [0x00,0x00,0x31,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x31,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec v[0:1], v0 slc ; encoding: [0x00,0x00,0x32,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x32,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x80,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x80,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x80,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x80,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x80,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_swap_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x81,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x81,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_swap_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x82,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x82,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap_x2 v[0:1], v[0:3] ; encoding: [0x00,0x00,0x84,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap_x2 v[254:255], v[0:3] ; encoding: [0x00,0x00,0x84,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x84,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap_x2 v[0:1], v[252:255] ; encoding: [0x00,0x00,0x84,0xdd,0x00,0xfc,0x00,0x00]
+0x00,0x00,0x84,0xdd,0x00,0xfc,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap_x2 v[0:1], v[0:1], v[0:3] glc ; encoding: [0x00,0x00,0x85,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x85,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_cmpswap_x2 v[0:1], v[0:3] slc ; encoding: [0x00,0x00,0x86,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x86,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x88,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x88,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x88,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x88,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x88,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x88,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_add_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x89,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x89,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_add_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x8a,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x8c,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x8c,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x8c,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x8c,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_sub_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x8d,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8d,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_sub_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x8e,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8e,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x90,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x90,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x90,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x90,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x90,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x90,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_smin_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x91,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x91,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smin_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x92,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x92,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x94,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x94,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x94,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x94,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x94,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x94,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_umin_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x95,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x95,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umin_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x96,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x96,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x98,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x98,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x98,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x98,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x98,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x98,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_smax_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x99,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x99,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_smax_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x9a,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9a,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x9c,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9c,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0x9c,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0x9c,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x9c,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0x9c,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_umax_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0x9d,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9d,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_umax_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0x9e,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9e,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0xa0,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0xa0,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0xa0,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0xa0,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_and_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0xa1,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_and_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0xa2,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0xa4,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0xa4,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0xa4,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0xa4,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_or_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0xa5,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_or_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0xa6,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0xa8,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0xa8,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0xa8,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0xa8,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_xor_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0xa9,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_xor_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0xaa,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0xac,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xac,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0xac,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0xac,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0xac,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0xac,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_inc_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0xad,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xad,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_inc_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0xae,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xae,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec_x2 v[0:1], v[0:1] ; encoding: [0x00,0x00,0xb0,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec_x2 v[254:255], v[0:1] ; encoding: [0x00,0x00,0xb0,0xdd,0xfe,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xdd,0xfe,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec_x2 v[0:1], v[254:255] ; encoding: [0x00,0x00,0xb0,0xdd,0x00,0xfe,0x00,0x00]
+0x00,0x00,0xb0,0xdd,0x00,0xfe,0x00,0x00
+
+# CHECK: flat_atomic_dec_x2 v[0:1], v[0:1], v[0:1] glc ; encoding: [0x00,0x00,0xb1,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: flat_atomic_dec_x2 v[0:1], v[0:1] slc ; encoding: [0x00,0x00,0xb2,0xdd,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xdd,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v252, v0, s[0:7], s[0:3] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0xfc,0x00,0x00]
+0x00,0x01,0x80,0xf1,0x00,0xfc,0x00,0x00
+
+# CHECK: image_get_lod v0, v255, s[0:7], s[0:3] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0xff,0x00,0x00,0x00]
+0x00,0x01,0x80,0xf1,0xff,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[4:11], s[0:3] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0x00,0x01,0x00]
+0x00,0x01,0x80,0xf1,0x00,0x00,0x01,0x00
+
+# CHECK: image_get_lod v0, v0, s[92:99], s[0:3] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0x00,0x17,0x00]
+0x00,0x01,0x80,0xf1,0x00,0x00,0x17,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[4:7] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0x00,0x20,0x00]
+0x00,0x01,0x80,0xf1,0x00,0x00,0x20,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[96:99] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0x00,0x00,0x03]
+0x00,0x01,0x80,0xf1,0x00,0x00,0x00,0x03
+
+# CHECK: image_get_lod v0, v0, s[0:7], ttmp[8:11] dmask:0x1 ; encoding: [0x00,0x01,0x80,0xf1,0x00,0x00,0xc0,0x03]
+0x00,0x01,0x80,0xf1,0x00,0x00,0xc0,0x03
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x2 ; encoding: [0x00,0x02,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x4 ; encoding: [0x00,0x04,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x04,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x8 ; encoding: [0x00,0x08,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x08,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x1 unorm ; encoding: [0x00,0x11,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x11,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x1 glc ; encoding: [0x00,0x21,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x21,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x1 slc ; encoding: [0x00,0x01,0x80,0xf3,0x00,0x00,0x00,0x00]
+0x00,0x01,0x80,0xf3,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x1 lwe ; encoding: [0x00,0x01,0x82,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x82,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: image_get_lod v0, v0, s[0:7], s[0:3] dmask:0x1 da ; encoding: [0x00,0x41,0x80,0xf1,0x00,0x00,0x00,0x00]
+0x00,0x41,0x80,0xf1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x00,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_format_x v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x00,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_format_x v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x00,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x00,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x00,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x00,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x00,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x00,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x00,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x00,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x00,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_x v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x02,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x02,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x04,0xe0,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x04,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_format_xy v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x04,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x04,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x04,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x04,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x04,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x04,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x04,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x04,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x04,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xy v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x06,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x06,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[253:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0xfd,0x00,0x00]
+0xff,0x0f,0x08,0xe0,0x00,0xfd,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x08,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_format_xyz v[0:2], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x08,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x08,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x08,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x08,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], s0 ; encoding: [0x00,0x00,0x08,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x08,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x08,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x08,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x08,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyz v[0:2], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x0a,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x0a,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[252:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0xfc,0x00,0x00]
+0xff,0x0f,0x0c,0xe0,0x00,0xfc,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x0c,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x0c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x0c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x0c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x0c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], s0 ; encoding: [0x00,0x00,0x0c,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x0c,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x0c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x0c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x0c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_format_xyzw v[0:3], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x0e,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x0e,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x10,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_format_x v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x10,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_format_x v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x10,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x10,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x10,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x10,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x10,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x10,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x10,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x10,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x10,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_x v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x12,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x12,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x14,0xe0,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x14,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_format_xy v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x14,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x14,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x14,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x14,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x14,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x14,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x14,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x14,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x14,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xy v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x16,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x16,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[253:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0xfd,0x00,0x00]
+0xff,0x0f,0x18,0xe0,0x00,0xfd,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x18,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_format_xyz v[0:2], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x18,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x18,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x18,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x18,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], s0 ; encoding: [0x00,0x00,0x18,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x18,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x18,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x18,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x18,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x18,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x1a,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x1a,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[252:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0xfc,0x00,0x00]
+0xff,0x0f,0x1c,0xe0,0x00,0xfc,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x1c,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_format_xyzw v[0:3], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x1c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x1c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x1c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x1c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], s0 ; encoding: [0x00,0x00,0x1c,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x1c,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x1c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x1c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x1c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_format_xyzw v[0:3], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x1e,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x1e,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x40,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_ubyte v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x40,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_ubyte v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x40,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x40,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x40,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x40,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x40,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x40,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x40,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x40,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x40,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x40,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ubyte v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x42,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x42,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x44,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_sbyte v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x44,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_sbyte v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x44,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x44,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x44,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x44,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x44,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x44,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x44,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x44,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x44,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x44,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sbyte v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x46,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x46,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x48,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_ushort v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x48,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_ushort v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x48,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x48,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x48,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x48,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x48,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x48,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x48,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x48,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x48,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x48,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_ushort v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x4a,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x4a,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x4c,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_sshort v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x4c,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_sshort v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x4c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x4c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x4c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x4c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x4c,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x4c,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x4c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x4c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x4c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_sshort v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x4e,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x4e,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x50,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_load_dword v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_dword v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_dword v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_dword v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_dword v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_dword v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_dword v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_dword v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_dword v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x50,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_dword v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x50,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x50,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x50,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x50,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x50,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x50,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x50,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x50,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x50,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dword v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x52,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x52,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x54,0xe0,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x54,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_dwordx2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x54,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x54,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x54,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x54,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x54,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x54,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x54,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x54,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x54,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x54,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x56,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x56,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[253:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0xfd,0x00,0x00]
+0xff,0x0f,0x58,0xe0,0x00,0xfd,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x58,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_dwordx3 v[0:2], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x58,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x58,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x58,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x58,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], s0 ; encoding: [0x00,0x00,0x58,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x58,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x58,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x58,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x58,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x58,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx3 v[0:2], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x5a,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x5a,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[252:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0xfc,0x00,0x00]
+0xff,0x0f,0x5c,0xe0,0x00,0xfc,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x5c,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_load_dwordx4 v[0:3], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x5c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x5c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x5c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x5c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], s0 ; encoding: [0x00,0x00,0x5c,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x5c,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x5c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x5c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x5c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_load_dwordx4 v[0:3], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x5e,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x5e,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x60,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_store_byte v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_byte v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_byte v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_byte v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_byte v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_byte v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_byte v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_byte v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_byte v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x60,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_byte v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x60,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x60,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x60,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x60,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x60,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x60,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x60,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x60,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x60,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x60,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_byte v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x62,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x62,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x68,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_store_short v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_short v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_short v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_short v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_short v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_short v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_short v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_short v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_short v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x68,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_short v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x68,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x68,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x68,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x68,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x68,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x68,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x68,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x68,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x68,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x68,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_short v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x6a,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x6a,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x70,0xe0,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_store_dword v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_dword v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_dword v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_dword v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_dword v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_dword v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_dword v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_dword v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_dword v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x70,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_dword v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x70,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x70,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x70,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x70,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x70,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x70,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x70,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x70,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x70,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x70,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dword v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x72,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x72,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x74,0xe0,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x74,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_dwordx2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x74,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x74,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x74,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x74,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x74,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x74,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x74,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x74,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x74,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x74,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x76,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x76,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[253:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0xfd,0x00,0x00]
+0xff,0x0f,0x78,0xe0,0x00,0xfd,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x78,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_dwordx3 v[0:2], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x78,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x78,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x78,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x78,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], s0 ; encoding: [0x00,0x00,0x78,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x78,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x78,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x78,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x78,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x78,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x7a,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x7a,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[252:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0xfc,0x00,0x00]
+0xff,0x0f,0x7c,0xe0,0x00,0xfc,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x7c,0xe0,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_store_dwordx4 v[0:3], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x7c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x7c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x7c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x7c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], s0 ; encoding: [0x00,0x00,0x7c,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x7c,0xe0,0x00,0x00,0x00,0x00]
+0x07,0x00,0x7c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x7c,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x7c,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_store_dwordx4 v[0:3], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x7e,0xe0,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x7e,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_wbinvl1 ; encoding: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x00,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_swap v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x00,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_swap v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x00,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x00,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x00,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x00,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x00,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x00,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x00,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x00,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x00,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x02,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x02,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x04,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x04,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_cmpswap v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x04,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x04,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x04,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x04,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x04,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x04,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x04,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x04,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x04,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x06,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x06,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x08,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_add v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_add v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x08,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x08,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x08,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x08,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x08,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x08,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x08,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x08,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x08,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x0a,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x0a,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x0c,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_sub v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x0c,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_sub v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x0c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x0c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x0c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x0c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x0c,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x0c,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x0c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x0c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x0c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x0e,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x0e,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x10,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_smin v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x10,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_smin v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x10,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x10,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x10,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x10,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x10,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x10,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x10,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x10,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x10,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x12,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x12,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x14,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_umin v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x14,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_umin v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x14,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x14,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x14,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x14,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x14,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x14,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x14,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x14,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x14,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x16,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x16,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x18,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_smax v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x18,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_smax v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x18,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x18,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x18,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x18,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x18,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x18,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x18,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x18,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x18,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x18,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x1a,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x1a,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x1c,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_umax v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x1c,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_umax v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x1c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x1c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x1c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x1c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x1c,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x1c,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x1c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x1c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x1c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x1e,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x1e,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x20,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_and v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x20,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_and v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x20,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x20,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x20,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x20,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x20,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x20,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x20,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x20,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x20,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x20,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x22,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x22,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x24,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_or v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x24,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_or v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x24,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x24,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x24,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x24,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x24,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x24,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x24,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x24,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x24,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x24,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x26,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x26,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x28,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_xor v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x28,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_xor v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x28,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x28,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x28,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x28,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x28,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x28,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x28,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x28,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x28,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x28,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x2a,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x2a,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x2c,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_inc v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x2c,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_inc v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x2c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x2c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x2c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x2c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x2c,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x2c,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x2c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x2c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x2c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x2e,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x2e,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v255, off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0xff,0x00,0x00]
+0xff,0x0f,0x30,0xe1,0x00,0xff,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_dec v0, off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x30,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_dec v0, v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x30,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x30,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x30,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x30,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], s0 ; encoding: [0x00,0x00,0x30,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x30,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x30,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x30,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x30,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x30,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec v0, off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x32,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x32,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x80,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x80,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x80,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x80,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x80,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x80,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x80,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x80,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x80,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x80,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x80,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_swap_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x82,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x82,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[252:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0xfc,0x00,0x00]
+0xff,0x0f,0x84,0xe1,0x00,0xfc,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x84,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x84,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x84,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x84,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x84,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], s0 ; encoding: [0x00,0x00,0x84,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x84,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x84,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x84,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x84,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_cmpswap_x2 v[0:3], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x86,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x86,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x88,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x88,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_add_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x88,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x88,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x88,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x88,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x88,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x88,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x88,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x88,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x88,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x88,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_add_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x8a,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x8a,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x8c,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x8c,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x8c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x8c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x8c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x8c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x8c,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x8c,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x8c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x8c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x8c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_sub_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x8e,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x8e,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x90,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x90,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x90,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x90,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x90,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x90,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x90,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x90,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x90,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x90,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x90,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x90,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smin_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x92,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x92,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x94,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x94,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x94,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x94,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x94,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x94,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x94,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x94,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x94,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x94,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x94,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x94,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umin_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x96,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x96,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x98,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x98,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x98,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x98,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x98,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x98,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x98,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x98,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x98,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x98,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x98,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x98,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_smax_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x9a,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x9a,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0x9c,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0x9c,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0x9c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0x9c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0x9c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0x9c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0x9c,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x9c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0x9c,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0x9c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0x9c,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0x9c,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_umax_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0x9e,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0x9e,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0xa0,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0xa0,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_and_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0xa0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0xa0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0xa0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0xa0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0xa0,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0xa0,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0xa0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0xa0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0xa0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_and_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0xa2,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xa2,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0xa4,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0xa4,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_or_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0xa4,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0xa4,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0xa4,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0xa4,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0xa4,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0xa4,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0xa4,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0xa4,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0xa4,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_or_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0xa6,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xa6,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0xa8,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0xa8,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0xa8,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0xa8,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0xa8,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0xa8,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0xa8,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0xa8,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0xa8,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0xa8,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0xa8,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_xor_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0xaa,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xaa,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0xac,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0xac,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0xac,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0xac,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0xac,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0xac,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0xac,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xac,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0xac,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0xac,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0xac,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0xac,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_inc_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0xae,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xae,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[254:255], off, s[0:3], s0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0xfe,0x00,0x00]
+0xff,0x0f,0xb0,0xe1,0x00,0xfe,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[4:7], s0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x01,0x00]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x01,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[96:99], s0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x18,0x00]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x18,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, ttmp[8:11], s0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x1e,0x00]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x1e,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], s101 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x65]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x65
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], m0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x7c]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x7c
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], 0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x80]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0x80
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], -1 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0xc1]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0xc1
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], 0.5 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0xf0]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0xf0
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], -4.0 offset:4095 ; encoding: [0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0xf7]
+0xff,0x0f,0xb0,0xe1,0x00,0x00,0x00,0xf7
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], v0, s[0:3], s0 idxen offset:4095 ; encoding: [0xff,0x2f,0xb0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x2f,0xb0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], v0, s[0:3], s0 offen offset:4095 ; encoding: [0xff,0x1f,0xb0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x1f,0xb0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], s0 ; encoding: [0x00,0x00,0xb0,0xe1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], s0 offset:7 ; encoding: [0x07,0x00,0xb0,0xe1,0x00,0x00,0x00,0x00]
+0x07,0x00,0xb0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], s0 offset:4095 glc ; encoding: [0xff,0x4f,0xb0,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x4f,0xb0,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: buffer_atomic_dec_x2 v[0:1], off, s[0:3], s0 offset:4095 slc ; encoding: [0xff,0x0f,0xb2,0xe1,0x00,0x00,0x00,0x00]
+0xff,0x0f,0xb2,0xe1,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], s0 ; encoding: [0x00,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s101, s[0:1], s0 ; encoding: [0x40,0x19,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x40,0x19,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword flat_scratch_lo, s[0:1], s0 ; encoding: [0x80,0x19,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x19,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword flat_scratch_hi, s[0:1], s0 ; encoding: [0xc0,0x19,0x00,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x19,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword vcc_lo, s[0:1], s0 ; encoding: [0x80,0x1a,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1a,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword vcc_hi, s[0:1], s0 ; encoding: [0xc0,0x1a,0x00,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x1a,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword tba_lo, s[0:1], s0 ; encoding: [0x00,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword tba_hi, s[0:1], s0 ; encoding: [0x40,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x40,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword tma_lo, s[0:1], s0 ; encoding: [0x80,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword tma_hi, s[0:1], s0 ; encoding: [0xc0,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x1b,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword ttmp11, s[0:1], s0 ; encoding: [0xc0,0x1e,0x00,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x1e,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[2:3], s0 ; encoding: [0x01,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x01,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[100:101], s0 ; encoding: [0x32,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x32,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, flat_scratch, s0 ; encoding: [0x33,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x33,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, vcc, s0 ; encoding: [0x35,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x35,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, tba, s0 ; encoding: [0x36,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x36,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, tma, s0 ; encoding: [0x37,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x37,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, ttmp[10:11], s0 ; encoding: [0x3d,0x00,0x00,0xc0,0x00,0x00,0x00,0x00]
+0x3d,0x00,0x00,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], s101 ; encoding: [0x00,0x00,0x00,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x00,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x00,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], vcc_lo ; encoding: [0x00,0x00,0x00,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], vcc_hi ; encoding: [0x00,0x00,0x00,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], tba_lo ; encoding: [0x00,0x00,0x00,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], tba_hi ; encoding: [0x00,0x00,0x00,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], tma_lo ; encoding: [0x00,0x00,0x00,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], tma_hi ; encoding: [0x00,0x00,0x00,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], ttmp11 ; encoding: [0x00,0x00,0x00,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], m0 ; encoding: [0x00,0x00,0x00,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x00,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_load_dword s0, s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x02,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x02,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_load_dword s0, s[0:1], s0 glc ; encoding: [0x00,0x00,0x01,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x01,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[2:3], s[0:1], s0 ; encoding: [0x80,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[100:101], s[0:1], s0 ; encoding: [0x00,0x19,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x19,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 flat_scratch, s[0:1], s0 ; encoding: [0x80,0x19,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x19,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 vcc, s[0:1], s0 ; encoding: [0x80,0x1a,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1a,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 tba, s[0:1], s0 ; encoding: [0x00,0x1b,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1b,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 tma, s[0:1], s0 ; encoding: [0x80,0x1b,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1b,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 ttmp[10:11], s[0:1], s0 ; encoding: [0x80,0x1e,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1e,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[2:3], s0 ; encoding: [0x01,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x01,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[100:101], s0 ; encoding: [0x32,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x32,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], flat_scratch, s0 ; encoding: [0x33,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x33,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], vcc, s0 ; encoding: [0x35,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x35,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], tba, s0 ; encoding: [0x36,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x36,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], tma, s0 ; encoding: [0x37,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x37,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], ttmp[10:11], s0 ; encoding: [0x3d,0x00,0x04,0xc0,0x00,0x00,0x00,0x00]
+0x3d,0x00,0x04,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], s101 ; encoding: [0x00,0x00,0x04,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x04,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x04,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], vcc_lo ; encoding: [0x00,0x00,0x04,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], vcc_hi ; encoding: [0x00,0x00,0x04,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], tba_lo ; encoding: [0x00,0x00,0x04,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], tba_hi ; encoding: [0x00,0x00,0x04,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], tma_lo ; encoding: [0x00,0x00,0x04,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], tma_hi ; encoding: [0x00,0x00,0x04,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], ttmp11 ; encoding: [0x00,0x00,0x04,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], m0 ; encoding: [0x00,0x00,0x04,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x04,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x06,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x06,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_load_dwordx2 s[0:1], s[0:1], s0 glc ; encoding: [0x00,0x00,0x05,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x05,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], s0 ; encoding: [0x00,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[4:7], s[0:1], s0 ; encoding: [0x00,0x01,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x01,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[96:99], s[0:1], s0 ; encoding: [0x00,0x18,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x18,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 ttmp[8:11], s[0:1], s0 ; encoding: [0x00,0x1e,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1e,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[2:3], s0 ; encoding: [0x01,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x01,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[100:101], s0 ; encoding: [0x32,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x32,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], flat_scratch, s0 ; encoding: [0x33,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x33,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], vcc, s0 ; encoding: [0x35,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x35,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], tba, s0 ; encoding: [0x36,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x36,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], tma, s0 ; encoding: [0x37,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x37,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], ttmp[10:11], s0 ; encoding: [0x3d,0x00,0x08,0xc0,0x00,0x00,0x00,0x00]
+0x3d,0x00,0x08,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], s101 ; encoding: [0x00,0x00,0x08,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x08,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x08,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], vcc_lo ; encoding: [0x00,0x00,0x08,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], vcc_hi ; encoding: [0x00,0x00,0x08,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], tba_lo ; encoding: [0x00,0x00,0x08,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], tba_hi ; encoding: [0x00,0x00,0x08,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], tma_lo ; encoding: [0x00,0x00,0x08,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], tma_hi ; encoding: [0x00,0x00,0x08,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], ttmp11 ; encoding: [0x00,0x00,0x08,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], m0 ; encoding: [0x00,0x00,0x08,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x08,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x0a,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x0a,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_load_dwordx4 s[0:3], s[0:1], s0 glc ; encoding: [0x00,0x00,0x09,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x09,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], s0 ; encoding: [0x00,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[4:11], s[0:1], s0 ; encoding: [0x00,0x01,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x01,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[92:99], s[0:1], s0 ; encoding: [0x00,0x17,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x17,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[2:3], s0 ; encoding: [0x01,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x01,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[100:101], s0 ; encoding: [0x32,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x32,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], flat_scratch, s0 ; encoding: [0x33,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x33,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], vcc, s0 ; encoding: [0x35,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x35,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], tba, s0 ; encoding: [0x36,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x36,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], tma, s0 ; encoding: [0x37,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x37,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], ttmp[10:11], s0 ; encoding: [0x3d,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00]
+0x3d,0x00,0x0c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], s101 ; encoding: [0x00,0x00,0x0c,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x0c,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x0c,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], vcc_lo ; encoding: [0x00,0x00,0x0c,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], vcc_hi ; encoding: [0x00,0x00,0x0c,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], tba_lo ; encoding: [0x00,0x00,0x0c,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], tba_hi ; encoding: [0x00,0x00,0x0c,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], tma_lo ; encoding: [0x00,0x00,0x0c,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], tma_hi ; encoding: [0x00,0x00,0x0c,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], ttmp11 ; encoding: [0x00,0x00,0x0c,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], m0 ; encoding: [0x00,0x00,0x0c,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x0e,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x0e,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_load_dwordx8 s[0:7], s[0:1], s0 glc ; encoding: [0x00,0x00,0x0d,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], s0 ; encoding: [0x00,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[4:19], s[0:1], s0 ; encoding: [0x00,0x01,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x01,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[84:99], s[0:1], s0 ; encoding: [0x00,0x15,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x15,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[2:3], s0 ; encoding: [0x01,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x01,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[100:101], s0 ; encoding: [0x32,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x32,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], flat_scratch, s0 ; encoding: [0x33,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x33,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], vcc, s0 ; encoding: [0x35,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x35,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], tba, s0 ; encoding: [0x36,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x36,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], tma, s0 ; encoding: [0x37,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x37,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], ttmp[10:11], s0 ; encoding: [0x3d,0x00,0x10,0xc0,0x00,0x00,0x00,0x00]
+0x3d,0x00,0x10,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], s101 ; encoding: [0x00,0x00,0x10,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x10,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x10,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], vcc_lo ; encoding: [0x00,0x00,0x10,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], vcc_hi ; encoding: [0x00,0x00,0x10,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], tba_lo ; encoding: [0x00,0x00,0x10,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], tba_hi ; encoding: [0x00,0x00,0x10,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], tma_lo ; encoding: [0x00,0x00,0x10,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], tma_hi ; encoding: [0x00,0x00,0x10,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], ttmp11 ; encoding: [0x00,0x00,0x10,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], m0 ; encoding: [0x00,0x00,0x10,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x10,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x12,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x12,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_load_dwordx16 s[0:15], s[0:1], s0 glc ; encoding: [0x00,0x00,0x11,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x11,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], s0 ; encoding: [0x00,0x00,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s101, s[0:3], s0 ; encoding: [0x40,0x19,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x40,0x19,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword flat_scratch_lo, s[0:3], s0 ; encoding: [0x80,0x19,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x19,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword flat_scratch_hi, s[0:3], s0 ; encoding: [0xc0,0x19,0x20,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x19,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword vcc_lo, s[0:3], s0 ; encoding: [0x80,0x1a,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1a,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword vcc_hi, s[0:3], s0 ; encoding: [0xc0,0x1a,0x20,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x1a,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword tba_lo, s[0:3], s0 ; encoding: [0x00,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword tba_hi, s[0:3], s0 ; encoding: [0x40,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x40,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword tma_lo, s[0:3], s0 ; encoding: [0x80,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword tma_hi, s[0:3], s0 ; encoding: [0xc0,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x1b,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword ttmp11, s[0:3], s0 ; encoding: [0xc0,0x1e,0x20,0xc0,0x00,0x00,0x00,0x00]
+0xc0,0x1e,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[4:7], s0 ; encoding: [0x02,0x00,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[96:99], s0 ; encoding: [0x30,0x00,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x30,0x00,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, ttmp[8:11], s0 ; encoding: [0x3c,0x00,0x20,0xc0,0x00,0x00,0x00,0x00]
+0x3c,0x00,0x20,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], s101 ; encoding: [0x00,0x00,0x20,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], flat_scratch_lo ; encoding: [0x00,0x00,0x20,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], flat_scratch_hi ; encoding: [0x00,0x00,0x20,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], vcc_lo ; encoding: [0x00,0x00,0x20,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], vcc_hi ; encoding: [0x00,0x00,0x20,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], tba_lo ; encoding: [0x00,0x00,0x20,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], tba_hi ; encoding: [0x00,0x00,0x20,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], tma_lo ; encoding: [0x00,0x00,0x20,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], tma_hi ; encoding: [0x00,0x00,0x20,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], ttmp11 ; encoding: [0x00,0x00,0x20,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], m0 ; encoding: [0x00,0x00,0x20,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x20,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x22,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x22,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_load_dword s0, s[0:3], s0 glc ; encoding: [0x00,0x00,0x21,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x21,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], s0 ; encoding: [0x00,0x00,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[2:3], s[0:3], s0 ; encoding: [0x80,0x00,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x00,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[100:101], s[0:3], s0 ; encoding: [0x00,0x19,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x19,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 flat_scratch, s[0:3], s0 ; encoding: [0x80,0x19,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x19,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 vcc, s[0:3], s0 ; encoding: [0x80,0x1a,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1a,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 tba, s[0:3], s0 ; encoding: [0x00,0x1b,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1b,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 tma, s[0:3], s0 ; encoding: [0x80,0x1b,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1b,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 ttmp[10:11], s[0:3], s0 ; encoding: [0x80,0x1e,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1e,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[4:7], s0 ; encoding: [0x02,0x00,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[96:99], s0 ; encoding: [0x30,0x00,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x30,0x00,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], ttmp[8:11], s0 ; encoding: [0x3c,0x00,0x24,0xc0,0x00,0x00,0x00,0x00]
+0x3c,0x00,0x24,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], s101 ; encoding: [0x00,0x00,0x24,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], flat_scratch_lo ; encoding: [0x00,0x00,0x24,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], flat_scratch_hi ; encoding: [0x00,0x00,0x24,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], vcc_lo ; encoding: [0x00,0x00,0x24,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], vcc_hi ; encoding: [0x00,0x00,0x24,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], tba_lo ; encoding: [0x00,0x00,0x24,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], tba_hi ; encoding: [0x00,0x00,0x24,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], tma_lo ; encoding: [0x00,0x00,0x24,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], tma_hi ; encoding: [0x00,0x00,0x24,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], ttmp11 ; encoding: [0x00,0x00,0x24,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], m0 ; encoding: [0x00,0x00,0x24,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x24,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x26,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x26,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_load_dwordx2 s[0:1], s[0:3], s0 glc ; encoding: [0x00,0x00,0x25,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x25,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], s0 ; encoding: [0x00,0x00,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[4:7], s[0:3], s0 ; encoding: [0x00,0x01,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x01,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[96:99], s[0:3], s0 ; encoding: [0x00,0x18,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x18,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 ttmp[8:11], s[0:3], s0 ; encoding: [0x00,0x1e,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1e,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[4:7], s0 ; encoding: [0x02,0x00,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[96:99], s0 ; encoding: [0x30,0x00,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x30,0x00,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], ttmp[8:11], s0 ; encoding: [0x3c,0x00,0x28,0xc0,0x00,0x00,0x00,0x00]
+0x3c,0x00,0x28,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], s101 ; encoding: [0x00,0x00,0x28,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], flat_scratch_lo ; encoding: [0x00,0x00,0x28,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], flat_scratch_hi ; encoding: [0x00,0x00,0x28,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], vcc_lo ; encoding: [0x00,0x00,0x28,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], vcc_hi ; encoding: [0x00,0x00,0x28,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], tba_lo ; encoding: [0x00,0x00,0x28,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], tba_hi ; encoding: [0x00,0x00,0x28,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], tma_lo ; encoding: [0x00,0x00,0x28,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], tma_hi ; encoding: [0x00,0x00,0x28,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], ttmp11 ; encoding: [0x00,0x00,0x28,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], m0 ; encoding: [0x00,0x00,0x28,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x28,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x2a,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x2a,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_load_dwordx4 s[0:3], s[0:3], s0 glc ; encoding: [0x00,0x00,0x29,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x29,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], s0 ; encoding: [0x00,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[4:11], s[0:3], s0 ; encoding: [0x00,0x01,0x2c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x01,0x2c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[92:99], s[0:3], s0 ; encoding: [0x00,0x17,0x2c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x17,0x2c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[4:7], s0 ; encoding: [0x02,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[96:99], s0 ; encoding: [0x30,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00]
+0x30,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], ttmp[8:11], s0 ; encoding: [0x3c,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00]
+0x3c,0x00,0x2c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], s101 ; encoding: [0x00,0x00,0x2c,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], flat_scratch_lo ; encoding: [0x00,0x00,0x2c,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], flat_scratch_hi ; encoding: [0x00,0x00,0x2c,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], vcc_lo ; encoding: [0x00,0x00,0x2c,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], vcc_hi ; encoding: [0x00,0x00,0x2c,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], tba_lo ; encoding: [0x00,0x00,0x2c,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], tba_hi ; encoding: [0x00,0x00,0x2c,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], tma_lo ; encoding: [0x00,0x00,0x2c,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], tma_hi ; encoding: [0x00,0x00,0x2c,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], ttmp11 ; encoding: [0x00,0x00,0x2c,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], m0 ; encoding: [0x00,0x00,0x2c,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x2e,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x2e,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_load_dwordx8 s[0:7], s[0:3], s0 glc ; encoding: [0x00,0x00,0x2d,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], s0 ; encoding: [0x00,0x00,0x30,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[4:19], s[0:3], s0 ; encoding: [0x00,0x01,0x30,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x01,0x30,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[84:99], s[0:3], s0 ; encoding: [0x00,0x15,0x30,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x15,0x30,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[4:7], s0 ; encoding: [0x02,0x00,0x30,0xc0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x30,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[96:99], s0 ; encoding: [0x30,0x00,0x30,0xc0,0x00,0x00,0x00,0x00]
+0x30,0x00,0x30,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], ttmp[8:11], s0 ; encoding: [0x3c,0x00,0x30,0xc0,0x00,0x00,0x00,0x00]
+0x3c,0x00,0x30,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], s101 ; encoding: [0x00,0x00,0x30,0xc0,0x65,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x65,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], flat_scratch_lo ; encoding: [0x00,0x00,0x30,0xc0,0x66,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x66,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], flat_scratch_hi ; encoding: [0x00,0x00,0x30,0xc0,0x67,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x67,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], vcc_lo ; encoding: [0x00,0x00,0x30,0xc0,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x6a,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], vcc_hi ; encoding: [0x00,0x00,0x30,0xc0,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x6b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], tba_lo ; encoding: [0x00,0x00,0x30,0xc0,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x6c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], tba_hi ; encoding: [0x00,0x00,0x30,0xc0,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x6d,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], tma_lo ; encoding: [0x00,0x00,0x30,0xc0,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x6e,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], tma_hi ; encoding: [0x00,0x00,0x30,0xc0,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x6f,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], ttmp11 ; encoding: [0x00,0x00,0x30,0xc0,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x7b,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], m0 ; encoding: [0x00,0x00,0x30,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x30,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x32,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x32,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_load_dwordx16 s[0:15], s[0:3], s0 glc ; encoding: [0x00,0x00,0x31,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x31,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, s[0:1], m0 ; encoding: [0x00,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s101, s[0:1], m0 ; encoding: [0x40,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x40,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword flat_scratch_lo, s[0:1], m0 ; encoding: [0x80,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword flat_scratch_hi, s[0:1], m0 ; encoding: [0xc0,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x19,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword vcc_lo, s[0:1], m0 ; encoding: [0x80,0x1a,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1a,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword vcc_hi, s[0:1], m0 ; encoding: [0xc0,0x1a,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x1a,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword tba_lo, s[0:1], m0 ; encoding: [0x00,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword tba_hi, s[0:1], m0 ; encoding: [0x40,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x40,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword tma_lo, s[0:1], m0 ; encoding: [0x80,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword tma_hi, s[0:1], m0 ; encoding: [0xc0,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x1b,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword ttmp11, s[0:1], m0 ; encoding: [0xc0,0x1e,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x1e,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, s[2:3], m0 ; encoding: [0x01,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x01,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, s[100:101], m0 ; encoding: [0x32,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x32,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, flat_scratch, m0 ; encoding: [0x33,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x33,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, vcc, m0 ; encoding: [0x35,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x35,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, tba, m0 ; encoding: [0x36,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x36,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, tma, m0 ; encoding: [0x37,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x37,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, ttmp[10:11], m0 ; encoding: [0x3d,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00]
+0x3d,0x00,0x40,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dword s0, s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x42,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x42,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_store_dword s0, s[0:1], m0 glc ; encoding: [0x00,0x00,0x41,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x41,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], s[0:1], m0 ; encoding: [0x00,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[2:3], s[0:1], m0 ; encoding: [0x80,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[100:101], s[0:1], m0 ; encoding: [0x00,0x19,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x19,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 flat_scratch, s[0:1], m0 ; encoding: [0x80,0x19,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x19,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 vcc, s[0:1], m0 ; encoding: [0x80,0x1a,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1a,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 tba, s[0:1], m0 ; encoding: [0x00,0x1b,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x1b,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 tma, s[0:1], m0 ; encoding: [0x80,0x1b,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1b,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 ttmp[10:11], s[0:1], m0 ; encoding: [0x80,0x1e,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1e,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], s[2:3], m0 ; encoding: [0x01,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x01,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], s[100:101], m0 ; encoding: [0x32,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x32,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], flat_scratch, m0 ; encoding: [0x33,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x33,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], vcc, m0 ; encoding: [0x35,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x35,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], tba, m0 ; encoding: [0x36,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x36,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], tma, m0 ; encoding: [0x37,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x37,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], ttmp[10:11], m0 ; encoding: [0x3d,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00]
+0x3d,0x00,0x44,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x46,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x46,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_store_dwordx2 s[0:1], s[0:1], m0 glc ; encoding: [0x00,0x00,0x45,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x45,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], s[0:1], m0 ; encoding: [0x00,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[4:7], s[0:1], m0 ; encoding: [0x00,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x01,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[96:99], s[0:1], m0 ; encoding: [0x00,0x18,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x18,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 ttmp[8:11], s[0:1], m0 ; encoding: [0x00,0x1e,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x1e,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], s[2:3], m0 ; encoding: [0x01,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x01,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], s[100:101], m0 ; encoding: [0x32,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x32,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], flat_scratch, m0 ; encoding: [0x33,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x33,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], vcc, m0 ; encoding: [0x35,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x35,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], tba, m0 ; encoding: [0x36,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x36,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], tma, m0 ; encoding: [0x37,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x37,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], ttmp[10:11], m0 ; encoding: [0x3d,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00]
+0x3d,0x00,0x48,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], s[0:1], 0x7ffff ; encoding: [0x00,0x00,0x4a,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x4a,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_store_dwordx4 s[0:3], s[0:1], m0 glc ; encoding: [0x00,0x00,0x49,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x49,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword s0, s[0:3], m0 ; encoding: [0x00,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword s101, s[0:3], m0 ; encoding: [0x40,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x40,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword flat_scratch_lo, s[0:3], m0 ; encoding: [0x80,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword flat_scratch_hi, s[0:3], m0 ; encoding: [0xc0,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x19,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword vcc_lo, s[0:3], m0 ; encoding: [0x80,0x1a,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1a,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword vcc_hi, s[0:3], m0 ; encoding: [0xc0,0x1a,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x1a,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword tba_lo, s[0:3], m0 ; encoding: [0x00,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword tba_hi, s[0:3], m0 ; encoding: [0x40,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x40,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword tma_lo, s[0:3], m0 ; encoding: [0x80,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword tma_hi, s[0:3], m0 ; encoding: [0xc0,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x1b,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword ttmp11, s[0:3], m0 ; encoding: [0xc0,0x1e,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0xc0,0x1e,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword s0, s[4:7], m0 ; encoding: [0x02,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x02,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword s0, s[96:99], m0 ; encoding: [0x30,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x30,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword s0, ttmp[8:11], m0 ; encoding: [0x3c,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00]
+0x3c,0x00,0x60,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dword s0, s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x62,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x62,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_store_dword s0, s[0:3], m0 glc ; encoding: [0x00,0x00,0x61,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x61,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[0:1], s[0:3], m0 ; encoding: [0x00,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[2:3], s[0:3], m0 ; encoding: [0x80,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[100:101], s[0:3], m0 ; encoding: [0x00,0x19,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x19,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 flat_scratch, s[0:3], m0 ; encoding: [0x80,0x19,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x19,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 vcc, s[0:3], m0 ; encoding: [0x80,0x1a,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1a,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 tba, s[0:3], m0 ; encoding: [0x00,0x1b,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x1b,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 tma, s[0:3], m0 ; encoding: [0x80,0x1b,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1b,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 ttmp[10:11], s[0:3], m0 ; encoding: [0x80,0x1e,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x80,0x1e,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[0:1], s[4:7], m0 ; encoding: [0x02,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x02,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[0:1], s[96:99], m0 ; encoding: [0x30,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x30,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[0:1], ttmp[8:11], m0 ; encoding: [0x3c,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00]
+0x3c,0x00,0x64,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[0:1], s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x66,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x66,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_store_dwordx2 s[0:1], s[0:3], m0 glc ; encoding: [0x00,0x00,0x65,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x65,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[0:3], s[0:3], m0 ; encoding: [0x00,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[4:7], s[0:3], m0 ; encoding: [0x00,0x01,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x01,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[96:99], s[0:3], m0 ; encoding: [0x00,0x18,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x18,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 ttmp[8:11], s[0:3], m0 ; encoding: [0x00,0x1e,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x1e,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[0:3], s[4:7], m0 ; encoding: [0x02,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x02,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[0:3], s[96:99], m0 ; encoding: [0x30,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x30,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[0:3], ttmp[8:11], m0 ; encoding: [0x3c,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00]
+0x3c,0x00,0x68,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[0:3], s[0:3], 0x7ffff ; encoding: [0x00,0x00,0x6a,0xc0,0xff,0xff,0x07,0x00]
+0x00,0x00,0x6a,0xc0,0xff,0xff,0x07,0x00
+
+# CHECK: s_buffer_store_dwordx4 s[0:3], s[0:3], m0 glc ; encoding: [0x00,0x00,0x69,0xc0,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x69,0xc0,0x7c,0x00,0x00,0x00
+
+# CHECK: s_dcache_inv ; encoding: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_dcache_wb ; encoding: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_dcache_inv_vol ; encoding: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_dcache_wb_vol ; encoding: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime s[0:1] ; encoding: [0x00,0x00,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime s[2:3] ; encoding: [0x80,0x00,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x00,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime s[100:101] ; encoding: [0x00,0x19,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x19,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime flat_scratch ; encoding: [0x80,0x19,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x19,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime vcc ; encoding: [0x80,0x1a,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1a,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime tba ; encoding: [0x00,0x1b,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1b,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime tma ; encoding: [0x80,0x1b,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1b,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memtime ttmp[10:11] ; encoding: [0x80,0x1e,0x90,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1e,0x90,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime s[0:1] ; encoding: [0x00,0x00,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime s[2:3] ; encoding: [0x80,0x00,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x00,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime s[100:101] ; encoding: [0x00,0x19,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x19,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime flat_scratch ; encoding: [0x80,0x19,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x19,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime vcc ; encoding: [0x80,0x1a,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1a,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime tba ; encoding: [0x00,0x1b,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x00,0x1b,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime tma ; encoding: [0x80,0x1b,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1b,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_memrealtime ttmp[10:11] ; encoding: [0x80,0x1e,0x94,0xc0,0x00,0x00,0x00,0x00]
+0x80,0x1e,0x94,0xc0,0x00,0x00,0x00,0x00
+
+# CHECK: s_mov_b32 s0, s0 ; encoding: [0x00,0x00,0x80,0xbe]
+0x00,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s101, s0 ; encoding: [0x00,0x00,0xe5,0xbe]
+0x00,0x00,0xe5,0xbe
+
+# CHECK: s_mov_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x00,0xe6,0xbe]
+0x00,0x00,0xe6,0xbe
+
+# CHECK: s_mov_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x00,0xe7,0xbe]
+0x00,0x00,0xe7,0xbe
+
+# CHECK: s_mov_b32 vcc_lo, s0 ; encoding: [0x00,0x00,0xea,0xbe]
+0x00,0x00,0xea,0xbe
+
+# CHECK: s_mov_b32 vcc_hi, s0 ; encoding: [0x00,0x00,0xeb,0xbe]
+0x00,0x00,0xeb,0xbe
+
+# CHECK: s_mov_b32 tba_lo, s0 ; encoding: [0x00,0x00,0xec,0xbe]
+0x00,0x00,0xec,0xbe
+
+# CHECK: s_mov_b32 tba_hi, s0 ; encoding: [0x00,0x00,0xed,0xbe]
+0x00,0x00,0xed,0xbe
+
+# CHECK: s_mov_b32 tma_lo, s0 ; encoding: [0x00,0x00,0xee,0xbe]
+0x00,0x00,0xee,0xbe
+
+# CHECK: s_mov_b32 tma_hi, s0 ; encoding: [0x00,0x00,0xef,0xbe]
+0x00,0x00,0xef,0xbe
+
+# CHECK: s_mov_b32 ttmp11, s0 ; encoding: [0x00,0x00,0xfb,0xbe]
+0x00,0x00,0xfb,0xbe
+
+# CHECK: s_mov_b32 m0, s0 ; encoding: [0x00,0x00,0xfc,0xbe]
+0x00,0x00,0xfc,0xbe
+
+# CHECK: s_mov_b32 exec_lo, s0 ; encoding: [0x00,0x00,0xfe,0xbe]
+0x00,0x00,0xfe,0xbe
+
+# CHECK: s_mov_b32 exec_hi, s0 ; encoding: [0x00,0x00,0xff,0xbe]
+0x00,0x00,0xff,0xbe
+
+# CHECK: s_mov_b32 s0, s101 ; encoding: [0x65,0x00,0x80,0xbe]
+0x65,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, flat_scratch_lo ; encoding: [0x66,0x00,0x80,0xbe]
+0x66,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, flat_scratch_hi ; encoding: [0x67,0x00,0x80,0xbe]
+0x67,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, vcc_lo ; encoding: [0x6a,0x00,0x80,0xbe]
+0x6a,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, vcc_hi ; encoding: [0x6b,0x00,0x80,0xbe]
+0x6b,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, tba_lo ; encoding: [0x6c,0x00,0x80,0xbe]
+0x6c,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, tba_hi ; encoding: [0x6d,0x00,0x80,0xbe]
+0x6d,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, tma_lo ; encoding: [0x6e,0x00,0x80,0xbe]
+0x6e,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, tma_hi ; encoding: [0x6f,0x00,0x80,0xbe]
+0x6f,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, ttmp11 ; encoding: [0x7b,0x00,0x80,0xbe]
+0x7b,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, m0 ; encoding: [0x7c,0x00,0x80,0xbe]
+0x7c,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, exec_lo ; encoding: [0x7e,0x00,0x80,0xbe]
+0x7e,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, exec_hi ; encoding: [0x7f,0x00,0x80,0xbe]
+0x7f,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, 0 ; encoding: [0x80,0x00,0x80,0xbe]
+0x80,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, -1 ; encoding: [0xc1,0x00,0x80,0xbe]
+0xc1,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, 0.5 ; encoding: [0xf0,0x00,0x80,0xbe]
+0xf0,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, -4.0 ; encoding: [0xf7,0x00,0x80,0xbe]
+0xf7,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, scc ; encoding: [0xfd,0x00,0x80,0xbe]
+0xfd,0x00,0x80,0xbe
+
+# CHECK: s_mov_b32 s0, 0xaf123456 ; encoding: [0xff,0x00,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_mov_b32 s0, 0x3f717273 ; encoding: [0xff,0x00,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_mov_b64 s[0:1], s[0:1] ; encoding: [0x00,0x01,0x80,0xbe]
+0x00,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[2:3], s[0:1] ; encoding: [0x00,0x01,0x82,0xbe]
+0x00,0x01,0x82,0xbe
+
+# CHECK: s_mov_b64 s[100:101], s[0:1] ; encoding: [0x00,0x01,0xe4,0xbe]
+0x00,0x01,0xe4,0xbe
+
+# CHECK: s_mov_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x01,0xe6,0xbe]
+0x00,0x01,0xe6,0xbe
+
+# CHECK: s_mov_b64 vcc, s[0:1] ; encoding: [0x00,0x01,0xea,0xbe]
+0x00,0x01,0xea,0xbe
+
+# CHECK: s_mov_b64 tba, s[0:1] ; encoding: [0x00,0x01,0xec,0xbe]
+0x00,0x01,0xec,0xbe
+
+# CHECK: s_mov_b64 tma, s[0:1] ; encoding: [0x00,0x01,0xee,0xbe]
+0x00,0x01,0xee,0xbe
+
+# CHECK: s_mov_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x01,0xfa,0xbe]
+0x00,0x01,0xfa,0xbe
+
+# CHECK: s_mov_b64 exec, s[0:1] ; encoding: [0x00,0x01,0xfe,0xbe]
+0x00,0x01,0xfe,0xbe
+
+# CHECK: s_mov_b64 s[0:1], s[2:3] ; encoding: [0x02,0x01,0x80,0xbe]
+0x02,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], s[100:101] ; encoding: [0x64,0x01,0x80,0xbe]
+0x64,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], flat_scratch ; encoding: [0x66,0x01,0x80,0xbe]
+0x66,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], vcc ; encoding: [0x6a,0x01,0x80,0xbe]
+0x6a,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], tba ; encoding: [0x6c,0x01,0x80,0xbe]
+0x6c,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], tma ; encoding: [0x6e,0x01,0x80,0xbe]
+0x6e,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x01,0x80,0xbe]
+0x7a,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], exec ; encoding: [0x7e,0x01,0x80,0xbe]
+0x7e,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], 0 ; encoding: [0x80,0x01,0x80,0xbe]
+0x80,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], -1 ; encoding: [0xc1,0x01,0x80,0xbe]
+0xc1,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], 0.5 ; encoding: [0xf0,0x01,0x80,0xbe]
+0xf0,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], -4.0 ; encoding: [0xf7,0x01,0x80,0xbe]
+0xf7,0x01,0x80,0xbe
+
+# CHECK: s_mov_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x01,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x01,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_mov_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x01,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x01,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmov_b32 s0, s0 ; encoding: [0x00,0x02,0x80,0xbe]
+0x00,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s101, s0 ; encoding: [0x00,0x02,0xe5,0xbe]
+0x00,0x02,0xe5,0xbe
+
+# CHECK: s_cmov_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x02,0xe6,0xbe]
+0x00,0x02,0xe6,0xbe
+
+# CHECK: s_cmov_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x02,0xe7,0xbe]
+0x00,0x02,0xe7,0xbe
+
+# CHECK: s_cmov_b32 vcc_lo, s0 ; encoding: [0x00,0x02,0xea,0xbe]
+0x00,0x02,0xea,0xbe
+
+# CHECK: s_cmov_b32 vcc_hi, s0 ; encoding: [0x00,0x02,0xeb,0xbe]
+0x00,0x02,0xeb,0xbe
+
+# CHECK: s_cmov_b32 tba_lo, s0 ; encoding: [0x00,0x02,0xec,0xbe]
+0x00,0x02,0xec,0xbe
+
+# CHECK: s_cmov_b32 tba_hi, s0 ; encoding: [0x00,0x02,0xed,0xbe]
+0x00,0x02,0xed,0xbe
+
+# CHECK: s_cmov_b32 tma_lo, s0 ; encoding: [0x00,0x02,0xee,0xbe]
+0x00,0x02,0xee,0xbe
+
+# CHECK: s_cmov_b32 tma_hi, s0 ; encoding: [0x00,0x02,0xef,0xbe]
+0x00,0x02,0xef,0xbe
+
+# CHECK: s_cmov_b32 ttmp11, s0 ; encoding: [0x00,0x02,0xfb,0xbe]
+0x00,0x02,0xfb,0xbe
+
+# CHECK: s_cmov_b32 m0, s0 ; encoding: [0x00,0x02,0xfc,0xbe]
+0x00,0x02,0xfc,0xbe
+
+# CHECK: s_cmov_b32 exec_lo, s0 ; encoding: [0x00,0x02,0xfe,0xbe]
+0x00,0x02,0xfe,0xbe
+
+# CHECK: s_cmov_b32 exec_hi, s0 ; encoding: [0x00,0x02,0xff,0xbe]
+0x00,0x02,0xff,0xbe
+
+# CHECK: s_cmov_b32 s0, s101 ; encoding: [0x65,0x02,0x80,0xbe]
+0x65,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, flat_scratch_lo ; encoding: [0x66,0x02,0x80,0xbe]
+0x66,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, flat_scratch_hi ; encoding: [0x67,0x02,0x80,0xbe]
+0x67,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, vcc_lo ; encoding: [0x6a,0x02,0x80,0xbe]
+0x6a,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, vcc_hi ; encoding: [0x6b,0x02,0x80,0xbe]
+0x6b,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, tba_lo ; encoding: [0x6c,0x02,0x80,0xbe]
+0x6c,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, tba_hi ; encoding: [0x6d,0x02,0x80,0xbe]
+0x6d,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, tma_lo ; encoding: [0x6e,0x02,0x80,0xbe]
+0x6e,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, tma_hi ; encoding: [0x6f,0x02,0x80,0xbe]
+0x6f,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, ttmp11 ; encoding: [0x7b,0x02,0x80,0xbe]
+0x7b,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, m0 ; encoding: [0x7c,0x02,0x80,0xbe]
+0x7c,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, exec_lo ; encoding: [0x7e,0x02,0x80,0xbe]
+0x7e,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, exec_hi ; encoding: [0x7f,0x02,0x80,0xbe]
+0x7f,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, 0 ; encoding: [0x80,0x02,0x80,0xbe]
+0x80,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, -1 ; encoding: [0xc1,0x02,0x80,0xbe]
+0xc1,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, 0.5 ; encoding: [0xf0,0x02,0x80,0xbe]
+0xf0,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, -4.0 ; encoding: [0xf7,0x02,0x80,0xbe]
+0xf7,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, scc ; encoding: [0xfd,0x02,0x80,0xbe]
+0xfd,0x02,0x80,0xbe
+
+# CHECK: s_cmov_b32 s0, 0xaf123456 ; encoding: [0xff,0x02,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x02,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmov_b32 s0, 0x3f717273 ; encoding: [0xff,0x02,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x02,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmov_b64 s[0:1], s[0:1] ; encoding: [0x00,0x03,0x80,0xbe]
+0x00,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[2:3], s[0:1] ; encoding: [0x00,0x03,0x82,0xbe]
+0x00,0x03,0x82,0xbe
+
+# CHECK: s_cmov_b64 s[100:101], s[0:1] ; encoding: [0x00,0x03,0xe4,0xbe]
+0x00,0x03,0xe4,0xbe
+
+# CHECK: s_cmov_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x03,0xe6,0xbe]
+0x00,0x03,0xe6,0xbe
+
+# CHECK: s_cmov_b64 vcc, s[0:1] ; encoding: [0x00,0x03,0xea,0xbe]
+0x00,0x03,0xea,0xbe
+
+# CHECK: s_cmov_b64 tba, s[0:1] ; encoding: [0x00,0x03,0xec,0xbe]
+0x00,0x03,0xec,0xbe
+
+# CHECK: s_cmov_b64 tma, s[0:1] ; encoding: [0x00,0x03,0xee,0xbe]
+0x00,0x03,0xee,0xbe
+
+# CHECK: s_cmov_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x03,0xfa,0xbe]
+0x00,0x03,0xfa,0xbe
+
+# CHECK: s_cmov_b64 exec, s[0:1] ; encoding: [0x00,0x03,0xfe,0xbe]
+0x00,0x03,0xfe,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], s[2:3] ; encoding: [0x02,0x03,0x80,0xbe]
+0x02,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], s[100:101] ; encoding: [0x64,0x03,0x80,0xbe]
+0x64,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], flat_scratch ; encoding: [0x66,0x03,0x80,0xbe]
+0x66,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], vcc ; encoding: [0x6a,0x03,0x80,0xbe]
+0x6a,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], tba ; encoding: [0x6c,0x03,0x80,0xbe]
+0x6c,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], tma ; encoding: [0x6e,0x03,0x80,0xbe]
+0x6e,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x03,0x80,0xbe]
+0x7a,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], exec ; encoding: [0x7e,0x03,0x80,0xbe]
+0x7e,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], 0 ; encoding: [0x80,0x03,0x80,0xbe]
+0x80,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], -1 ; encoding: [0xc1,0x03,0x80,0xbe]
+0xc1,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], 0.5 ; encoding: [0xf0,0x03,0x80,0xbe]
+0xf0,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], -4.0 ; encoding: [0xf7,0x03,0x80,0xbe]
+0xf7,0x03,0x80,0xbe
+
+# CHECK: s_cmov_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x03,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x03,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmov_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x03,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x03,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_not_b32 s0, s0 ; encoding: [0x00,0x04,0x80,0xbe]
+0x00,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s101, s0 ; encoding: [0x00,0x04,0xe5,0xbe]
+0x00,0x04,0xe5,0xbe
+
+# CHECK: s_not_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x04,0xe6,0xbe]
+0x00,0x04,0xe6,0xbe
+
+# CHECK: s_not_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x04,0xe7,0xbe]
+0x00,0x04,0xe7,0xbe
+
+# CHECK: s_not_b32 vcc_lo, s0 ; encoding: [0x00,0x04,0xea,0xbe]
+0x00,0x04,0xea,0xbe
+
+# CHECK: s_not_b32 vcc_hi, s0 ; encoding: [0x00,0x04,0xeb,0xbe]
+0x00,0x04,0xeb,0xbe
+
+# CHECK: s_not_b32 tba_lo, s0 ; encoding: [0x00,0x04,0xec,0xbe]
+0x00,0x04,0xec,0xbe
+
+# CHECK: s_not_b32 tba_hi, s0 ; encoding: [0x00,0x04,0xed,0xbe]
+0x00,0x04,0xed,0xbe
+
+# CHECK: s_not_b32 tma_lo, s0 ; encoding: [0x00,0x04,0xee,0xbe]
+0x00,0x04,0xee,0xbe
+
+# CHECK: s_not_b32 tma_hi, s0 ; encoding: [0x00,0x04,0xef,0xbe]
+0x00,0x04,0xef,0xbe
+
+# CHECK: s_not_b32 ttmp11, s0 ; encoding: [0x00,0x04,0xfb,0xbe]
+0x00,0x04,0xfb,0xbe
+
+# CHECK: s_not_b32 m0, s0 ; encoding: [0x00,0x04,0xfc,0xbe]
+0x00,0x04,0xfc,0xbe
+
+# CHECK: s_not_b32 exec_lo, s0 ; encoding: [0x00,0x04,0xfe,0xbe]
+0x00,0x04,0xfe,0xbe
+
+# CHECK: s_not_b32 exec_hi, s0 ; encoding: [0x00,0x04,0xff,0xbe]
+0x00,0x04,0xff,0xbe
+
+# CHECK: s_not_b32 s0, s101 ; encoding: [0x65,0x04,0x80,0xbe]
+0x65,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, flat_scratch_lo ; encoding: [0x66,0x04,0x80,0xbe]
+0x66,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, flat_scratch_hi ; encoding: [0x67,0x04,0x80,0xbe]
+0x67,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, vcc_lo ; encoding: [0x6a,0x04,0x80,0xbe]
+0x6a,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, vcc_hi ; encoding: [0x6b,0x04,0x80,0xbe]
+0x6b,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, tba_lo ; encoding: [0x6c,0x04,0x80,0xbe]
+0x6c,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, tba_hi ; encoding: [0x6d,0x04,0x80,0xbe]
+0x6d,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, tma_lo ; encoding: [0x6e,0x04,0x80,0xbe]
+0x6e,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, tma_hi ; encoding: [0x6f,0x04,0x80,0xbe]
+0x6f,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, ttmp11 ; encoding: [0x7b,0x04,0x80,0xbe]
+0x7b,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, m0 ; encoding: [0x7c,0x04,0x80,0xbe]
+0x7c,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, exec_lo ; encoding: [0x7e,0x04,0x80,0xbe]
+0x7e,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, exec_hi ; encoding: [0x7f,0x04,0x80,0xbe]
+0x7f,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, 0 ; encoding: [0x80,0x04,0x80,0xbe]
+0x80,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, -1 ; encoding: [0xc1,0x04,0x80,0xbe]
+0xc1,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, 0.5 ; encoding: [0xf0,0x04,0x80,0xbe]
+0xf0,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, -4.0 ; encoding: [0xf7,0x04,0x80,0xbe]
+0xf7,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, scc ; encoding: [0xfd,0x04,0x80,0xbe]
+0xfd,0x04,0x80,0xbe
+
+# CHECK: s_not_b32 s0, 0xaf123456 ; encoding: [0xff,0x04,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x04,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_not_b32 s0, 0x3f717273 ; encoding: [0xff,0x04,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x04,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_not_b64 s[0:1], s[0:1] ; encoding: [0x00,0x05,0x80,0xbe]
+0x00,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[2:3], s[0:1] ; encoding: [0x00,0x05,0x82,0xbe]
+0x00,0x05,0x82,0xbe
+
+# CHECK: s_not_b64 s[100:101], s[0:1] ; encoding: [0x00,0x05,0xe4,0xbe]
+0x00,0x05,0xe4,0xbe
+
+# CHECK: s_not_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x05,0xe6,0xbe]
+0x00,0x05,0xe6,0xbe
+
+# CHECK: s_not_b64 vcc, s[0:1] ; encoding: [0x00,0x05,0xea,0xbe]
+0x00,0x05,0xea,0xbe
+
+# CHECK: s_not_b64 tba, s[0:1] ; encoding: [0x00,0x05,0xec,0xbe]
+0x00,0x05,0xec,0xbe
+
+# CHECK: s_not_b64 tma, s[0:1] ; encoding: [0x00,0x05,0xee,0xbe]
+0x00,0x05,0xee,0xbe
+
+# CHECK: s_not_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x05,0xfa,0xbe]
+0x00,0x05,0xfa,0xbe
+
+# CHECK: s_not_b64 exec, s[0:1] ; encoding: [0x00,0x05,0xfe,0xbe]
+0x00,0x05,0xfe,0xbe
+
+# CHECK: s_not_b64 s[0:1], s[2:3] ; encoding: [0x02,0x05,0x80,0xbe]
+0x02,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], s[100:101] ; encoding: [0x64,0x05,0x80,0xbe]
+0x64,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], flat_scratch ; encoding: [0x66,0x05,0x80,0xbe]
+0x66,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], vcc ; encoding: [0x6a,0x05,0x80,0xbe]
+0x6a,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], tba ; encoding: [0x6c,0x05,0x80,0xbe]
+0x6c,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], tma ; encoding: [0x6e,0x05,0x80,0xbe]
+0x6e,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x05,0x80,0xbe]
+0x7a,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], exec ; encoding: [0x7e,0x05,0x80,0xbe]
+0x7e,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], 0 ; encoding: [0x80,0x05,0x80,0xbe]
+0x80,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], -1 ; encoding: [0xc1,0x05,0x80,0xbe]
+0xc1,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], 0.5 ; encoding: [0xf0,0x05,0x80,0xbe]
+0xf0,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], -4.0 ; encoding: [0xf7,0x05,0x80,0xbe]
+0xf7,0x05,0x80,0xbe
+
+# CHECK: s_not_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x05,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x05,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_not_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x05,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x05,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_wqm_b32 s0, s0 ; encoding: [0x00,0x06,0x80,0xbe]
+0x00,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s101, s0 ; encoding: [0x00,0x06,0xe5,0xbe]
+0x00,0x06,0xe5,0xbe
+
+# CHECK: s_wqm_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x06,0xe6,0xbe]
+0x00,0x06,0xe6,0xbe
+
+# CHECK: s_wqm_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x06,0xe7,0xbe]
+0x00,0x06,0xe7,0xbe
+
+# CHECK: s_wqm_b32 vcc_lo, s0 ; encoding: [0x00,0x06,0xea,0xbe]
+0x00,0x06,0xea,0xbe
+
+# CHECK: s_wqm_b32 vcc_hi, s0 ; encoding: [0x00,0x06,0xeb,0xbe]
+0x00,0x06,0xeb,0xbe
+
+# CHECK: s_wqm_b32 tba_lo, s0 ; encoding: [0x00,0x06,0xec,0xbe]
+0x00,0x06,0xec,0xbe
+
+# CHECK: s_wqm_b32 tba_hi, s0 ; encoding: [0x00,0x06,0xed,0xbe]
+0x00,0x06,0xed,0xbe
+
+# CHECK: s_wqm_b32 tma_lo, s0 ; encoding: [0x00,0x06,0xee,0xbe]
+0x00,0x06,0xee,0xbe
+
+# CHECK: s_wqm_b32 tma_hi, s0 ; encoding: [0x00,0x06,0xef,0xbe]
+0x00,0x06,0xef,0xbe
+
+# CHECK: s_wqm_b32 ttmp11, s0 ; encoding: [0x00,0x06,0xfb,0xbe]
+0x00,0x06,0xfb,0xbe
+
+# CHECK: s_wqm_b32 m0, s0 ; encoding: [0x00,0x06,0xfc,0xbe]
+0x00,0x06,0xfc,0xbe
+
+# CHECK: s_wqm_b32 exec_lo, s0 ; encoding: [0x00,0x06,0xfe,0xbe]
+0x00,0x06,0xfe,0xbe
+
+# CHECK: s_wqm_b32 exec_hi, s0 ; encoding: [0x00,0x06,0xff,0xbe]
+0x00,0x06,0xff,0xbe
+
+# CHECK: s_wqm_b32 s0, s101 ; encoding: [0x65,0x06,0x80,0xbe]
+0x65,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, flat_scratch_lo ; encoding: [0x66,0x06,0x80,0xbe]
+0x66,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, flat_scratch_hi ; encoding: [0x67,0x06,0x80,0xbe]
+0x67,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, vcc_lo ; encoding: [0x6a,0x06,0x80,0xbe]
+0x6a,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, vcc_hi ; encoding: [0x6b,0x06,0x80,0xbe]
+0x6b,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, tba_lo ; encoding: [0x6c,0x06,0x80,0xbe]
+0x6c,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, tba_hi ; encoding: [0x6d,0x06,0x80,0xbe]
+0x6d,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, tma_lo ; encoding: [0x6e,0x06,0x80,0xbe]
+0x6e,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, tma_hi ; encoding: [0x6f,0x06,0x80,0xbe]
+0x6f,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, ttmp11 ; encoding: [0x7b,0x06,0x80,0xbe]
+0x7b,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, m0 ; encoding: [0x7c,0x06,0x80,0xbe]
+0x7c,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, exec_lo ; encoding: [0x7e,0x06,0x80,0xbe]
+0x7e,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, exec_hi ; encoding: [0x7f,0x06,0x80,0xbe]
+0x7f,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, 0 ; encoding: [0x80,0x06,0x80,0xbe]
+0x80,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, -1 ; encoding: [0xc1,0x06,0x80,0xbe]
+0xc1,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, 0.5 ; encoding: [0xf0,0x06,0x80,0xbe]
+0xf0,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, -4.0 ; encoding: [0xf7,0x06,0x80,0xbe]
+0xf7,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, scc ; encoding: [0xfd,0x06,0x80,0xbe]
+0xfd,0x06,0x80,0xbe
+
+# CHECK: s_wqm_b32 s0, 0xaf123456 ; encoding: [0xff,0x06,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x06,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_wqm_b32 s0, 0x3f717273 ; encoding: [0xff,0x06,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x06,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_wqm_b64 s[0:1], s[0:1] ; encoding: [0x00,0x07,0x80,0xbe]
+0x00,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[2:3], s[0:1] ; encoding: [0x00,0x07,0x82,0xbe]
+0x00,0x07,0x82,0xbe
+
+# CHECK: s_wqm_b64 s[100:101], s[0:1] ; encoding: [0x00,0x07,0xe4,0xbe]
+0x00,0x07,0xe4,0xbe
+
+# CHECK: s_wqm_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x07,0xe6,0xbe]
+0x00,0x07,0xe6,0xbe
+
+# CHECK: s_wqm_b64 vcc, s[0:1] ; encoding: [0x00,0x07,0xea,0xbe]
+0x00,0x07,0xea,0xbe
+
+# CHECK: s_wqm_b64 tba, s[0:1] ; encoding: [0x00,0x07,0xec,0xbe]
+0x00,0x07,0xec,0xbe
+
+# CHECK: s_wqm_b64 tma, s[0:1] ; encoding: [0x00,0x07,0xee,0xbe]
+0x00,0x07,0xee,0xbe
+
+# CHECK: s_wqm_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x07,0xfa,0xbe]
+0x00,0x07,0xfa,0xbe
+
+# CHECK: s_wqm_b64 exec, s[0:1] ; encoding: [0x00,0x07,0xfe,0xbe]
+0x00,0x07,0xfe,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], s[2:3] ; encoding: [0x02,0x07,0x80,0xbe]
+0x02,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], s[100:101] ; encoding: [0x64,0x07,0x80,0xbe]
+0x64,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], flat_scratch ; encoding: [0x66,0x07,0x80,0xbe]
+0x66,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], vcc ; encoding: [0x6a,0x07,0x80,0xbe]
+0x6a,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], tba ; encoding: [0x6c,0x07,0x80,0xbe]
+0x6c,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], tma ; encoding: [0x6e,0x07,0x80,0xbe]
+0x6e,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x07,0x80,0xbe]
+0x7a,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], exec ; encoding: [0x7e,0x07,0x80,0xbe]
+0x7e,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], 0 ; encoding: [0x80,0x07,0x80,0xbe]
+0x80,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], -1 ; encoding: [0xc1,0x07,0x80,0xbe]
+0xc1,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], 0.5 ; encoding: [0xf0,0x07,0x80,0xbe]
+0xf0,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], -4.0 ; encoding: [0xf7,0x07,0x80,0xbe]
+0xf7,0x07,0x80,0xbe
+
+# CHECK: s_wqm_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x07,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x07,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_wqm_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x07,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x07,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_brev_b32 s0, s0 ; encoding: [0x00,0x08,0x80,0xbe]
+0x00,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s101, s0 ; encoding: [0x00,0x08,0xe5,0xbe]
+0x00,0x08,0xe5,0xbe
+
+# CHECK: s_brev_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x08,0xe6,0xbe]
+0x00,0x08,0xe6,0xbe
+
+# CHECK: s_brev_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x08,0xe7,0xbe]
+0x00,0x08,0xe7,0xbe
+
+# CHECK: s_brev_b32 vcc_lo, s0 ; encoding: [0x00,0x08,0xea,0xbe]
+0x00,0x08,0xea,0xbe
+
+# CHECK: s_brev_b32 vcc_hi, s0 ; encoding: [0x00,0x08,0xeb,0xbe]
+0x00,0x08,0xeb,0xbe
+
+# CHECK: s_brev_b32 tba_lo, s0 ; encoding: [0x00,0x08,0xec,0xbe]
+0x00,0x08,0xec,0xbe
+
+# CHECK: s_brev_b32 tba_hi, s0 ; encoding: [0x00,0x08,0xed,0xbe]
+0x00,0x08,0xed,0xbe
+
+# CHECK: s_brev_b32 tma_lo, s0 ; encoding: [0x00,0x08,0xee,0xbe]
+0x00,0x08,0xee,0xbe
+
+# CHECK: s_brev_b32 tma_hi, s0 ; encoding: [0x00,0x08,0xef,0xbe]
+0x00,0x08,0xef,0xbe
+
+# CHECK: s_brev_b32 ttmp11, s0 ; encoding: [0x00,0x08,0xfb,0xbe]
+0x00,0x08,0xfb,0xbe
+
+# CHECK: s_brev_b32 m0, s0 ; encoding: [0x00,0x08,0xfc,0xbe]
+0x00,0x08,0xfc,0xbe
+
+# CHECK: s_brev_b32 exec_lo, s0 ; encoding: [0x00,0x08,0xfe,0xbe]
+0x00,0x08,0xfe,0xbe
+
+# CHECK: s_brev_b32 exec_hi, s0 ; encoding: [0x00,0x08,0xff,0xbe]
+0x00,0x08,0xff,0xbe
+
+# CHECK: s_brev_b32 s0, s101 ; encoding: [0x65,0x08,0x80,0xbe]
+0x65,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, flat_scratch_lo ; encoding: [0x66,0x08,0x80,0xbe]
+0x66,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, flat_scratch_hi ; encoding: [0x67,0x08,0x80,0xbe]
+0x67,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, vcc_lo ; encoding: [0x6a,0x08,0x80,0xbe]
+0x6a,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, vcc_hi ; encoding: [0x6b,0x08,0x80,0xbe]
+0x6b,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, tba_lo ; encoding: [0x6c,0x08,0x80,0xbe]
+0x6c,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, tba_hi ; encoding: [0x6d,0x08,0x80,0xbe]
+0x6d,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, tma_lo ; encoding: [0x6e,0x08,0x80,0xbe]
+0x6e,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, tma_hi ; encoding: [0x6f,0x08,0x80,0xbe]
+0x6f,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, ttmp11 ; encoding: [0x7b,0x08,0x80,0xbe]
+0x7b,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, m0 ; encoding: [0x7c,0x08,0x80,0xbe]
+0x7c,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, exec_lo ; encoding: [0x7e,0x08,0x80,0xbe]
+0x7e,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, exec_hi ; encoding: [0x7f,0x08,0x80,0xbe]
+0x7f,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, 0 ; encoding: [0x80,0x08,0x80,0xbe]
+0x80,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, -1 ; encoding: [0xc1,0x08,0x80,0xbe]
+0xc1,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, 0.5 ; encoding: [0xf0,0x08,0x80,0xbe]
+0xf0,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, -4.0 ; encoding: [0xf7,0x08,0x80,0xbe]
+0xf7,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, scc ; encoding: [0xfd,0x08,0x80,0xbe]
+0xfd,0x08,0x80,0xbe
+
+# CHECK: s_brev_b32 s0, 0xaf123456 ; encoding: [0xff,0x08,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x08,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_brev_b32 s0, 0x3f717273 ; encoding: [0xff,0x08,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x08,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_brev_b64 s[0:1], s[0:1] ; encoding: [0x00,0x09,0x80,0xbe]
+0x00,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[2:3], s[0:1] ; encoding: [0x00,0x09,0x82,0xbe]
+0x00,0x09,0x82,0xbe
+
+# CHECK: s_brev_b64 s[100:101], s[0:1] ; encoding: [0x00,0x09,0xe4,0xbe]
+0x00,0x09,0xe4,0xbe
+
+# CHECK: s_brev_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x09,0xe6,0xbe]
+0x00,0x09,0xe6,0xbe
+
+# CHECK: s_brev_b64 vcc, s[0:1] ; encoding: [0x00,0x09,0xea,0xbe]
+0x00,0x09,0xea,0xbe
+
+# CHECK: s_brev_b64 tba, s[0:1] ; encoding: [0x00,0x09,0xec,0xbe]
+0x00,0x09,0xec,0xbe
+
+# CHECK: s_brev_b64 tma, s[0:1] ; encoding: [0x00,0x09,0xee,0xbe]
+0x00,0x09,0xee,0xbe
+
+# CHECK: s_brev_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x09,0xfa,0xbe]
+0x00,0x09,0xfa,0xbe
+
+# CHECK: s_brev_b64 exec, s[0:1] ; encoding: [0x00,0x09,0xfe,0xbe]
+0x00,0x09,0xfe,0xbe
+
+# CHECK: s_brev_b64 s[0:1], s[2:3] ; encoding: [0x02,0x09,0x80,0xbe]
+0x02,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], s[100:101] ; encoding: [0x64,0x09,0x80,0xbe]
+0x64,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], flat_scratch ; encoding: [0x66,0x09,0x80,0xbe]
+0x66,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], vcc ; encoding: [0x6a,0x09,0x80,0xbe]
+0x6a,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], tba ; encoding: [0x6c,0x09,0x80,0xbe]
+0x6c,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], tma ; encoding: [0x6e,0x09,0x80,0xbe]
+0x6e,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x09,0x80,0xbe]
+0x7a,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], exec ; encoding: [0x7e,0x09,0x80,0xbe]
+0x7e,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], 0 ; encoding: [0x80,0x09,0x80,0xbe]
+0x80,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], -1 ; encoding: [0xc1,0x09,0x80,0xbe]
+0xc1,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], 0.5 ; encoding: [0xf0,0x09,0x80,0xbe]
+0xf0,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], -4.0 ; encoding: [0xf7,0x09,0x80,0xbe]
+0xf7,0x09,0x80,0xbe
+
+# CHECK: s_brev_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x09,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x09,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_brev_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x09,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x09,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bcnt0_i32_b32 s0, s0 ; encoding: [0x00,0x0a,0x80,0xbe]
+0x00,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s101, s0 ; encoding: [0x00,0x0a,0xe5,0xbe]
+0x00,0x0a,0xe5,0xbe
+
+# CHECK: s_bcnt0_i32_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x0a,0xe6,0xbe]
+0x00,0x0a,0xe6,0xbe
+
+# CHECK: s_bcnt0_i32_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x0a,0xe7,0xbe]
+0x00,0x0a,0xe7,0xbe
+
+# CHECK: s_bcnt0_i32_b32 vcc_lo, s0 ; encoding: [0x00,0x0a,0xea,0xbe]
+0x00,0x0a,0xea,0xbe
+
+# CHECK: s_bcnt0_i32_b32 vcc_hi, s0 ; encoding: [0x00,0x0a,0xeb,0xbe]
+0x00,0x0a,0xeb,0xbe
+
+# CHECK: s_bcnt0_i32_b32 tba_lo, s0 ; encoding: [0x00,0x0a,0xec,0xbe]
+0x00,0x0a,0xec,0xbe
+
+# CHECK: s_bcnt0_i32_b32 tba_hi, s0 ; encoding: [0x00,0x0a,0xed,0xbe]
+0x00,0x0a,0xed,0xbe
+
+# CHECK: s_bcnt0_i32_b32 tma_lo, s0 ; encoding: [0x00,0x0a,0xee,0xbe]
+0x00,0x0a,0xee,0xbe
+
+# CHECK: s_bcnt0_i32_b32 tma_hi, s0 ; encoding: [0x00,0x0a,0xef,0xbe]
+0x00,0x0a,0xef,0xbe
+
+# CHECK: s_bcnt0_i32_b32 ttmp11, s0 ; encoding: [0x00,0x0a,0xfb,0xbe]
+0x00,0x0a,0xfb,0xbe
+
+# CHECK: s_bcnt0_i32_b32 m0, s0 ; encoding: [0x00,0x0a,0xfc,0xbe]
+0x00,0x0a,0xfc,0xbe
+
+# CHECK: s_bcnt0_i32_b32 exec_lo, s0 ; encoding: [0x00,0x0a,0xfe,0xbe]
+0x00,0x0a,0xfe,0xbe
+
+# CHECK: s_bcnt0_i32_b32 exec_hi, s0 ; encoding: [0x00,0x0a,0xff,0xbe]
+0x00,0x0a,0xff,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, s101 ; encoding: [0x65,0x0a,0x80,0xbe]
+0x65,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, flat_scratch_lo ; encoding: [0x66,0x0a,0x80,0xbe]
+0x66,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, flat_scratch_hi ; encoding: [0x67,0x0a,0x80,0xbe]
+0x67,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, vcc_lo ; encoding: [0x6a,0x0a,0x80,0xbe]
+0x6a,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, vcc_hi ; encoding: [0x6b,0x0a,0x80,0xbe]
+0x6b,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, tba_lo ; encoding: [0x6c,0x0a,0x80,0xbe]
+0x6c,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, tba_hi ; encoding: [0x6d,0x0a,0x80,0xbe]
+0x6d,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, tma_lo ; encoding: [0x6e,0x0a,0x80,0xbe]
+0x6e,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, tma_hi ; encoding: [0x6f,0x0a,0x80,0xbe]
+0x6f,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, ttmp11 ; encoding: [0x7b,0x0a,0x80,0xbe]
+0x7b,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, m0 ; encoding: [0x7c,0x0a,0x80,0xbe]
+0x7c,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, exec_lo ; encoding: [0x7e,0x0a,0x80,0xbe]
+0x7e,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, exec_hi ; encoding: [0x7f,0x0a,0x80,0xbe]
+0x7f,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, 0 ; encoding: [0x80,0x0a,0x80,0xbe]
+0x80,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, -1 ; encoding: [0xc1,0x0a,0x80,0xbe]
+0xc1,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, 0.5 ; encoding: [0xf0,0x0a,0x80,0xbe]
+0xf0,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, -4.0 ; encoding: [0xf7,0x0a,0x80,0xbe]
+0xf7,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, scc ; encoding: [0xfd,0x0a,0x80,0xbe]
+0xfd,0x0a,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b32 s0, 0xaf123456 ; encoding: [0xff,0x0a,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x0a,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bcnt0_i32_b32 s0, 0x3f717273 ; encoding: [0xff,0x0a,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x0a,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bcnt0_i32_b64 s0, s[0:1] ; encoding: [0x00,0x0b,0x80,0xbe]
+0x00,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s101, s[0:1] ; encoding: [0x00,0x0b,0xe5,0xbe]
+0x00,0x0b,0xe5,0xbe
+
+# CHECK: s_bcnt0_i32_b64 flat_scratch_lo, s[0:1] ; encoding: [0x00,0x0b,0xe6,0xbe]
+0x00,0x0b,0xe6,0xbe
+
+# CHECK: s_bcnt0_i32_b64 flat_scratch_hi, s[0:1] ; encoding: [0x00,0x0b,0xe7,0xbe]
+0x00,0x0b,0xe7,0xbe
+
+# CHECK: s_bcnt0_i32_b64 vcc_lo, s[0:1] ; encoding: [0x00,0x0b,0xea,0xbe]
+0x00,0x0b,0xea,0xbe
+
+# CHECK: s_bcnt0_i32_b64 vcc_hi, s[0:1] ; encoding: [0x00,0x0b,0xeb,0xbe]
+0x00,0x0b,0xeb,0xbe
+
+# CHECK: s_bcnt0_i32_b64 tba_lo, s[0:1] ; encoding: [0x00,0x0b,0xec,0xbe]
+0x00,0x0b,0xec,0xbe
+
+# CHECK: s_bcnt0_i32_b64 tba_hi, s[0:1] ; encoding: [0x00,0x0b,0xed,0xbe]
+0x00,0x0b,0xed,0xbe
+
+# CHECK: s_bcnt0_i32_b64 tma_lo, s[0:1] ; encoding: [0x00,0x0b,0xee,0xbe]
+0x00,0x0b,0xee,0xbe
+
+# CHECK: s_bcnt0_i32_b64 tma_hi, s[0:1] ; encoding: [0x00,0x0b,0xef,0xbe]
+0x00,0x0b,0xef,0xbe
+
+# CHECK: s_bcnt0_i32_b64 ttmp11, s[0:1] ; encoding: [0x00,0x0b,0xfb,0xbe]
+0x00,0x0b,0xfb,0xbe
+
+# CHECK: s_bcnt0_i32_b64 m0, s[0:1] ; encoding: [0x00,0x0b,0xfc,0xbe]
+0x00,0x0b,0xfc,0xbe
+
+# CHECK: s_bcnt0_i32_b64 exec_lo, s[0:1] ; encoding: [0x00,0x0b,0xfe,0xbe]
+0x00,0x0b,0xfe,0xbe
+
+# CHECK: s_bcnt0_i32_b64 exec_hi, s[0:1] ; encoding: [0x00,0x0b,0xff,0xbe]
+0x00,0x0b,0xff,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, s[2:3] ; encoding: [0x02,0x0b,0x80,0xbe]
+0x02,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, s[100:101] ; encoding: [0x64,0x0b,0x80,0xbe]
+0x64,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, flat_scratch ; encoding: [0x66,0x0b,0x80,0xbe]
+0x66,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, vcc ; encoding: [0x6a,0x0b,0x80,0xbe]
+0x6a,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, tba ; encoding: [0x6c,0x0b,0x80,0xbe]
+0x6c,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, tma ; encoding: [0x6e,0x0b,0x80,0xbe]
+0x6e,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, ttmp[10:11] ; encoding: [0x7a,0x0b,0x80,0xbe]
+0x7a,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, exec ; encoding: [0x7e,0x0b,0x80,0xbe]
+0x7e,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, 0 ; encoding: [0x80,0x0b,0x80,0xbe]
+0x80,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, -1 ; encoding: [0xc1,0x0b,0x80,0xbe]
+0xc1,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, 0.5 ; encoding: [0xf0,0x0b,0x80,0xbe]
+0xf0,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, -4.0 ; encoding: [0xf7,0x0b,0x80,0xbe]
+0xf7,0x0b,0x80,0xbe
+
+# CHECK: s_bcnt0_i32_b64 s0, 0xaf123456 ; encoding: [0xff,0x0b,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x0b,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bcnt0_i32_b64 s0, 0x3f717273 ; encoding: [0xff,0x0b,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x0b,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bcnt1_i32_b32 s0, s0 ; encoding: [0x00,0x0c,0x80,0xbe]
+0x00,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s101, s0 ; encoding: [0x00,0x0c,0xe5,0xbe]
+0x00,0x0c,0xe5,0xbe
+
+# CHECK: s_bcnt1_i32_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x0c,0xe6,0xbe]
+0x00,0x0c,0xe6,0xbe
+
+# CHECK: s_bcnt1_i32_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x0c,0xe7,0xbe]
+0x00,0x0c,0xe7,0xbe
+
+# CHECK: s_bcnt1_i32_b32 vcc_lo, s0 ; encoding: [0x00,0x0c,0xea,0xbe]
+0x00,0x0c,0xea,0xbe
+
+# CHECK: s_bcnt1_i32_b32 vcc_hi, s0 ; encoding: [0x00,0x0c,0xeb,0xbe]
+0x00,0x0c,0xeb,0xbe
+
+# CHECK: s_bcnt1_i32_b32 tba_lo, s0 ; encoding: [0x00,0x0c,0xec,0xbe]
+0x00,0x0c,0xec,0xbe
+
+# CHECK: s_bcnt1_i32_b32 tba_hi, s0 ; encoding: [0x00,0x0c,0xed,0xbe]
+0x00,0x0c,0xed,0xbe
+
+# CHECK: s_bcnt1_i32_b32 tma_lo, s0 ; encoding: [0x00,0x0c,0xee,0xbe]
+0x00,0x0c,0xee,0xbe
+
+# CHECK: s_bcnt1_i32_b32 tma_hi, s0 ; encoding: [0x00,0x0c,0xef,0xbe]
+0x00,0x0c,0xef,0xbe
+
+# CHECK: s_bcnt1_i32_b32 ttmp11, s0 ; encoding: [0x00,0x0c,0xfb,0xbe]
+0x00,0x0c,0xfb,0xbe
+
+# CHECK: s_bcnt1_i32_b32 m0, s0 ; encoding: [0x00,0x0c,0xfc,0xbe]
+0x00,0x0c,0xfc,0xbe
+
+# CHECK: s_bcnt1_i32_b32 exec_lo, s0 ; encoding: [0x00,0x0c,0xfe,0xbe]
+0x00,0x0c,0xfe,0xbe
+
+# CHECK: s_bcnt1_i32_b32 exec_hi, s0 ; encoding: [0x00,0x0c,0xff,0xbe]
+0x00,0x0c,0xff,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, s101 ; encoding: [0x65,0x0c,0x80,0xbe]
+0x65,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, flat_scratch_lo ; encoding: [0x66,0x0c,0x80,0xbe]
+0x66,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, flat_scratch_hi ; encoding: [0x67,0x0c,0x80,0xbe]
+0x67,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, vcc_lo ; encoding: [0x6a,0x0c,0x80,0xbe]
+0x6a,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, vcc_hi ; encoding: [0x6b,0x0c,0x80,0xbe]
+0x6b,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, tba_lo ; encoding: [0x6c,0x0c,0x80,0xbe]
+0x6c,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, tba_hi ; encoding: [0x6d,0x0c,0x80,0xbe]
+0x6d,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, tma_lo ; encoding: [0x6e,0x0c,0x80,0xbe]
+0x6e,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, tma_hi ; encoding: [0x6f,0x0c,0x80,0xbe]
+0x6f,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, ttmp11 ; encoding: [0x7b,0x0c,0x80,0xbe]
+0x7b,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, m0 ; encoding: [0x7c,0x0c,0x80,0xbe]
+0x7c,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, exec_lo ; encoding: [0x7e,0x0c,0x80,0xbe]
+0x7e,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, exec_hi ; encoding: [0x7f,0x0c,0x80,0xbe]
+0x7f,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, 0 ; encoding: [0x80,0x0c,0x80,0xbe]
+0x80,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, -1 ; encoding: [0xc1,0x0c,0x80,0xbe]
+0xc1,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, 0.5 ; encoding: [0xf0,0x0c,0x80,0xbe]
+0xf0,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, -4.0 ; encoding: [0xf7,0x0c,0x80,0xbe]
+0xf7,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, scc ; encoding: [0xfd,0x0c,0x80,0xbe]
+0xfd,0x0c,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b32 s0, 0xaf123456 ; encoding: [0xff,0x0c,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x0c,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bcnt1_i32_b32 s0, 0x3f717273 ; encoding: [0xff,0x0c,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x0c,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bcnt1_i32_b64 s0, s[0:1] ; encoding: [0x00,0x0d,0x80,0xbe]
+0x00,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s101, s[0:1] ; encoding: [0x00,0x0d,0xe5,0xbe]
+0x00,0x0d,0xe5,0xbe
+
+# CHECK: s_bcnt1_i32_b64 flat_scratch_lo, s[0:1] ; encoding: [0x00,0x0d,0xe6,0xbe]
+0x00,0x0d,0xe6,0xbe
+
+# CHECK: s_bcnt1_i32_b64 flat_scratch_hi, s[0:1] ; encoding: [0x00,0x0d,0xe7,0xbe]
+0x00,0x0d,0xe7,0xbe
+
+# CHECK: s_bcnt1_i32_b64 vcc_lo, s[0:1] ; encoding: [0x00,0x0d,0xea,0xbe]
+0x00,0x0d,0xea,0xbe
+
+# CHECK: s_bcnt1_i32_b64 vcc_hi, s[0:1] ; encoding: [0x00,0x0d,0xeb,0xbe]
+0x00,0x0d,0xeb,0xbe
+
+# CHECK: s_bcnt1_i32_b64 tba_lo, s[0:1] ; encoding: [0x00,0x0d,0xec,0xbe]
+0x00,0x0d,0xec,0xbe
+
+# CHECK: s_bcnt1_i32_b64 tba_hi, s[0:1] ; encoding: [0x00,0x0d,0xed,0xbe]
+0x00,0x0d,0xed,0xbe
+
+# CHECK: s_bcnt1_i32_b64 tma_lo, s[0:1] ; encoding: [0x00,0x0d,0xee,0xbe]
+0x00,0x0d,0xee,0xbe
+
+# CHECK: s_bcnt1_i32_b64 tma_hi, s[0:1] ; encoding: [0x00,0x0d,0xef,0xbe]
+0x00,0x0d,0xef,0xbe
+
+# CHECK: s_bcnt1_i32_b64 ttmp11, s[0:1] ; encoding: [0x00,0x0d,0xfb,0xbe]
+0x00,0x0d,0xfb,0xbe
+
+# CHECK: s_bcnt1_i32_b64 m0, s[0:1] ; encoding: [0x00,0x0d,0xfc,0xbe]
+0x00,0x0d,0xfc,0xbe
+
+# CHECK: s_bcnt1_i32_b64 exec_lo, s[0:1] ; encoding: [0x00,0x0d,0xfe,0xbe]
+0x00,0x0d,0xfe,0xbe
+
+# CHECK: s_bcnt1_i32_b64 exec_hi, s[0:1] ; encoding: [0x00,0x0d,0xff,0xbe]
+0x00,0x0d,0xff,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, s[2:3] ; encoding: [0x02,0x0d,0x80,0xbe]
+0x02,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, s[100:101] ; encoding: [0x64,0x0d,0x80,0xbe]
+0x64,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, flat_scratch ; encoding: [0x66,0x0d,0x80,0xbe]
+0x66,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, vcc ; encoding: [0x6a,0x0d,0x80,0xbe]
+0x6a,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, tba ; encoding: [0x6c,0x0d,0x80,0xbe]
+0x6c,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, tma ; encoding: [0x6e,0x0d,0x80,0xbe]
+0x6e,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, ttmp[10:11] ; encoding: [0x7a,0x0d,0x80,0xbe]
+0x7a,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, exec ; encoding: [0x7e,0x0d,0x80,0xbe]
+0x7e,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, 0 ; encoding: [0x80,0x0d,0x80,0xbe]
+0x80,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, -1 ; encoding: [0xc1,0x0d,0x80,0xbe]
+0xc1,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, 0.5 ; encoding: [0xf0,0x0d,0x80,0xbe]
+0xf0,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, -4.0 ; encoding: [0xf7,0x0d,0x80,0xbe]
+0xf7,0x0d,0x80,0xbe
+
+# CHECK: s_bcnt1_i32_b64 s0, 0xaf123456 ; encoding: [0xff,0x0d,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x0d,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bcnt1_i32_b64 s0, 0x3f717273 ; encoding: [0xff,0x0d,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x0d,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_ff0_i32_b32 s0, s0 ; encoding: [0x00,0x0e,0x80,0xbe]
+0x00,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s101, s0 ; encoding: [0x00,0x0e,0xe5,0xbe]
+0x00,0x0e,0xe5,0xbe
+
+# CHECK: s_ff0_i32_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x0e,0xe6,0xbe]
+0x00,0x0e,0xe6,0xbe
+
+# CHECK: s_ff0_i32_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x0e,0xe7,0xbe]
+0x00,0x0e,0xe7,0xbe
+
+# CHECK: s_ff0_i32_b32 vcc_lo, s0 ; encoding: [0x00,0x0e,0xea,0xbe]
+0x00,0x0e,0xea,0xbe
+
+# CHECK: s_ff0_i32_b32 vcc_hi, s0 ; encoding: [0x00,0x0e,0xeb,0xbe]
+0x00,0x0e,0xeb,0xbe
+
+# CHECK: s_ff0_i32_b32 tba_lo, s0 ; encoding: [0x00,0x0e,0xec,0xbe]
+0x00,0x0e,0xec,0xbe
+
+# CHECK: s_ff0_i32_b32 tba_hi, s0 ; encoding: [0x00,0x0e,0xed,0xbe]
+0x00,0x0e,0xed,0xbe
+
+# CHECK: s_ff0_i32_b32 tma_lo, s0 ; encoding: [0x00,0x0e,0xee,0xbe]
+0x00,0x0e,0xee,0xbe
+
+# CHECK: s_ff0_i32_b32 tma_hi, s0 ; encoding: [0x00,0x0e,0xef,0xbe]
+0x00,0x0e,0xef,0xbe
+
+# CHECK: s_ff0_i32_b32 ttmp11, s0 ; encoding: [0x00,0x0e,0xfb,0xbe]
+0x00,0x0e,0xfb,0xbe
+
+# CHECK: s_ff0_i32_b32 m0, s0 ; encoding: [0x00,0x0e,0xfc,0xbe]
+0x00,0x0e,0xfc,0xbe
+
+# CHECK: s_ff0_i32_b32 exec_lo, s0 ; encoding: [0x00,0x0e,0xfe,0xbe]
+0x00,0x0e,0xfe,0xbe
+
+# CHECK: s_ff0_i32_b32 exec_hi, s0 ; encoding: [0x00,0x0e,0xff,0xbe]
+0x00,0x0e,0xff,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, s101 ; encoding: [0x65,0x0e,0x80,0xbe]
+0x65,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, flat_scratch_lo ; encoding: [0x66,0x0e,0x80,0xbe]
+0x66,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, flat_scratch_hi ; encoding: [0x67,0x0e,0x80,0xbe]
+0x67,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, vcc_lo ; encoding: [0x6a,0x0e,0x80,0xbe]
+0x6a,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, vcc_hi ; encoding: [0x6b,0x0e,0x80,0xbe]
+0x6b,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, tba_lo ; encoding: [0x6c,0x0e,0x80,0xbe]
+0x6c,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, tba_hi ; encoding: [0x6d,0x0e,0x80,0xbe]
+0x6d,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, tma_lo ; encoding: [0x6e,0x0e,0x80,0xbe]
+0x6e,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, tma_hi ; encoding: [0x6f,0x0e,0x80,0xbe]
+0x6f,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, ttmp11 ; encoding: [0x7b,0x0e,0x80,0xbe]
+0x7b,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, m0 ; encoding: [0x7c,0x0e,0x80,0xbe]
+0x7c,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, exec_lo ; encoding: [0x7e,0x0e,0x80,0xbe]
+0x7e,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, exec_hi ; encoding: [0x7f,0x0e,0x80,0xbe]
+0x7f,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, 0 ; encoding: [0x80,0x0e,0x80,0xbe]
+0x80,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, -1 ; encoding: [0xc1,0x0e,0x80,0xbe]
+0xc1,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, 0.5 ; encoding: [0xf0,0x0e,0x80,0xbe]
+0xf0,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, -4.0 ; encoding: [0xf7,0x0e,0x80,0xbe]
+0xf7,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, scc ; encoding: [0xfd,0x0e,0x80,0xbe]
+0xfd,0x0e,0x80,0xbe
+
+# CHECK: s_ff0_i32_b32 s0, 0xaf123456 ; encoding: [0xff,0x0e,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x0e,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_ff0_i32_b32 s0, 0x3f717273 ; encoding: [0xff,0x0e,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x0e,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_ff0_i32_b64 s0, s[0:1] ; encoding: [0x00,0x0f,0x80,0xbe]
+0x00,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s101, s[0:1] ; encoding: [0x00,0x0f,0xe5,0xbe]
+0x00,0x0f,0xe5,0xbe
+
+# CHECK: s_ff0_i32_b64 flat_scratch_lo, s[0:1] ; encoding: [0x00,0x0f,0xe6,0xbe]
+0x00,0x0f,0xe6,0xbe
+
+# CHECK: s_ff0_i32_b64 flat_scratch_hi, s[0:1] ; encoding: [0x00,0x0f,0xe7,0xbe]
+0x00,0x0f,0xe7,0xbe
+
+# CHECK: s_ff0_i32_b64 vcc_lo, s[0:1] ; encoding: [0x00,0x0f,0xea,0xbe]
+0x00,0x0f,0xea,0xbe
+
+# CHECK: s_ff0_i32_b64 vcc_hi, s[0:1] ; encoding: [0x00,0x0f,0xeb,0xbe]
+0x00,0x0f,0xeb,0xbe
+
+# CHECK: s_ff0_i32_b64 tba_lo, s[0:1] ; encoding: [0x00,0x0f,0xec,0xbe]
+0x00,0x0f,0xec,0xbe
+
+# CHECK: s_ff0_i32_b64 tba_hi, s[0:1] ; encoding: [0x00,0x0f,0xed,0xbe]
+0x00,0x0f,0xed,0xbe
+
+# CHECK: s_ff0_i32_b64 tma_lo, s[0:1] ; encoding: [0x00,0x0f,0xee,0xbe]
+0x00,0x0f,0xee,0xbe
+
+# CHECK: s_ff0_i32_b64 tma_hi, s[0:1] ; encoding: [0x00,0x0f,0xef,0xbe]
+0x00,0x0f,0xef,0xbe
+
+# CHECK: s_ff0_i32_b64 ttmp11, s[0:1] ; encoding: [0x00,0x0f,0xfb,0xbe]
+0x00,0x0f,0xfb,0xbe
+
+# CHECK: s_ff0_i32_b64 m0, s[0:1] ; encoding: [0x00,0x0f,0xfc,0xbe]
+0x00,0x0f,0xfc,0xbe
+
+# CHECK: s_ff0_i32_b64 exec_lo, s[0:1] ; encoding: [0x00,0x0f,0xfe,0xbe]
+0x00,0x0f,0xfe,0xbe
+
+# CHECK: s_ff0_i32_b64 exec_hi, s[0:1] ; encoding: [0x00,0x0f,0xff,0xbe]
+0x00,0x0f,0xff,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, s[2:3] ; encoding: [0x02,0x0f,0x80,0xbe]
+0x02,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, s[100:101] ; encoding: [0x64,0x0f,0x80,0xbe]
+0x64,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, flat_scratch ; encoding: [0x66,0x0f,0x80,0xbe]
+0x66,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, vcc ; encoding: [0x6a,0x0f,0x80,0xbe]
+0x6a,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, tba ; encoding: [0x6c,0x0f,0x80,0xbe]
+0x6c,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, tma ; encoding: [0x6e,0x0f,0x80,0xbe]
+0x6e,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, ttmp[10:11] ; encoding: [0x7a,0x0f,0x80,0xbe]
+0x7a,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, exec ; encoding: [0x7e,0x0f,0x80,0xbe]
+0x7e,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, 0 ; encoding: [0x80,0x0f,0x80,0xbe]
+0x80,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, -1 ; encoding: [0xc1,0x0f,0x80,0xbe]
+0xc1,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, 0.5 ; encoding: [0xf0,0x0f,0x80,0xbe]
+0xf0,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, -4.0 ; encoding: [0xf7,0x0f,0x80,0xbe]
+0xf7,0x0f,0x80,0xbe
+
+# CHECK: s_ff0_i32_b64 s0, 0xaf123456 ; encoding: [0xff,0x0f,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x0f,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_ff0_i32_b64 s0, 0x3f717273 ; encoding: [0xff,0x0f,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x0f,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_ff1_i32_b32 s0, s0 ; encoding: [0x00,0x10,0x80,0xbe]
+0x00,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s101, s0 ; encoding: [0x00,0x10,0xe5,0xbe]
+0x00,0x10,0xe5,0xbe
+
+# CHECK: s_ff1_i32_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x10,0xe6,0xbe]
+0x00,0x10,0xe6,0xbe
+
+# CHECK: s_ff1_i32_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x10,0xe7,0xbe]
+0x00,0x10,0xe7,0xbe
+
+# CHECK: s_ff1_i32_b32 vcc_lo, s0 ; encoding: [0x00,0x10,0xea,0xbe]
+0x00,0x10,0xea,0xbe
+
+# CHECK: s_ff1_i32_b32 vcc_hi, s0 ; encoding: [0x00,0x10,0xeb,0xbe]
+0x00,0x10,0xeb,0xbe
+
+# CHECK: s_ff1_i32_b32 tba_lo, s0 ; encoding: [0x00,0x10,0xec,0xbe]
+0x00,0x10,0xec,0xbe
+
+# CHECK: s_ff1_i32_b32 tba_hi, s0 ; encoding: [0x00,0x10,0xed,0xbe]
+0x00,0x10,0xed,0xbe
+
+# CHECK: s_ff1_i32_b32 tma_lo, s0 ; encoding: [0x00,0x10,0xee,0xbe]
+0x00,0x10,0xee,0xbe
+
+# CHECK: s_ff1_i32_b32 tma_hi, s0 ; encoding: [0x00,0x10,0xef,0xbe]
+0x00,0x10,0xef,0xbe
+
+# CHECK: s_ff1_i32_b32 ttmp11, s0 ; encoding: [0x00,0x10,0xfb,0xbe]
+0x00,0x10,0xfb,0xbe
+
+# CHECK: s_ff1_i32_b32 m0, s0 ; encoding: [0x00,0x10,0xfc,0xbe]
+0x00,0x10,0xfc,0xbe
+
+# CHECK: s_ff1_i32_b32 exec_lo, s0 ; encoding: [0x00,0x10,0xfe,0xbe]
+0x00,0x10,0xfe,0xbe
+
+# CHECK: s_ff1_i32_b32 exec_hi, s0 ; encoding: [0x00,0x10,0xff,0xbe]
+0x00,0x10,0xff,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, s101 ; encoding: [0x65,0x10,0x80,0xbe]
+0x65,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, flat_scratch_lo ; encoding: [0x66,0x10,0x80,0xbe]
+0x66,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, flat_scratch_hi ; encoding: [0x67,0x10,0x80,0xbe]
+0x67,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, vcc_lo ; encoding: [0x6a,0x10,0x80,0xbe]
+0x6a,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, vcc_hi ; encoding: [0x6b,0x10,0x80,0xbe]
+0x6b,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, tba_lo ; encoding: [0x6c,0x10,0x80,0xbe]
+0x6c,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, tba_hi ; encoding: [0x6d,0x10,0x80,0xbe]
+0x6d,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, tma_lo ; encoding: [0x6e,0x10,0x80,0xbe]
+0x6e,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, tma_hi ; encoding: [0x6f,0x10,0x80,0xbe]
+0x6f,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, ttmp11 ; encoding: [0x7b,0x10,0x80,0xbe]
+0x7b,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, m0 ; encoding: [0x7c,0x10,0x80,0xbe]
+0x7c,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, exec_lo ; encoding: [0x7e,0x10,0x80,0xbe]
+0x7e,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, exec_hi ; encoding: [0x7f,0x10,0x80,0xbe]
+0x7f,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, 0 ; encoding: [0x80,0x10,0x80,0xbe]
+0x80,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, -1 ; encoding: [0xc1,0x10,0x80,0xbe]
+0xc1,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, 0.5 ; encoding: [0xf0,0x10,0x80,0xbe]
+0xf0,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, -4.0 ; encoding: [0xf7,0x10,0x80,0xbe]
+0xf7,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, scc ; encoding: [0xfd,0x10,0x80,0xbe]
+0xfd,0x10,0x80,0xbe
+
+# CHECK: s_ff1_i32_b32 s0, 0xaf123456 ; encoding: [0xff,0x10,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x10,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_ff1_i32_b32 s0, 0x3f717273 ; encoding: [0xff,0x10,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x10,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_ff1_i32_b64 s0, s[0:1] ; encoding: [0x00,0x11,0x80,0xbe]
+0x00,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s101, s[0:1] ; encoding: [0x00,0x11,0xe5,0xbe]
+0x00,0x11,0xe5,0xbe
+
+# CHECK: s_ff1_i32_b64 flat_scratch_lo, s[0:1] ; encoding: [0x00,0x11,0xe6,0xbe]
+0x00,0x11,0xe6,0xbe
+
+# CHECK: s_ff1_i32_b64 flat_scratch_hi, s[0:1] ; encoding: [0x00,0x11,0xe7,0xbe]
+0x00,0x11,0xe7,0xbe
+
+# CHECK: s_ff1_i32_b64 vcc_lo, s[0:1] ; encoding: [0x00,0x11,0xea,0xbe]
+0x00,0x11,0xea,0xbe
+
+# CHECK: s_ff1_i32_b64 vcc_hi, s[0:1] ; encoding: [0x00,0x11,0xeb,0xbe]
+0x00,0x11,0xeb,0xbe
+
+# CHECK: s_ff1_i32_b64 tba_lo, s[0:1] ; encoding: [0x00,0x11,0xec,0xbe]
+0x00,0x11,0xec,0xbe
+
+# CHECK: s_ff1_i32_b64 tba_hi, s[0:1] ; encoding: [0x00,0x11,0xed,0xbe]
+0x00,0x11,0xed,0xbe
+
+# CHECK: s_ff1_i32_b64 tma_lo, s[0:1] ; encoding: [0x00,0x11,0xee,0xbe]
+0x00,0x11,0xee,0xbe
+
+# CHECK: s_ff1_i32_b64 tma_hi, s[0:1] ; encoding: [0x00,0x11,0xef,0xbe]
+0x00,0x11,0xef,0xbe
+
+# CHECK: s_ff1_i32_b64 ttmp11, s[0:1] ; encoding: [0x00,0x11,0xfb,0xbe]
+0x00,0x11,0xfb,0xbe
+
+# CHECK: s_ff1_i32_b64 m0, s[0:1] ; encoding: [0x00,0x11,0xfc,0xbe]
+0x00,0x11,0xfc,0xbe
+
+# CHECK: s_ff1_i32_b64 exec_lo, s[0:1] ; encoding: [0x00,0x11,0xfe,0xbe]
+0x00,0x11,0xfe,0xbe
+
+# CHECK: s_ff1_i32_b64 exec_hi, s[0:1] ; encoding: [0x00,0x11,0xff,0xbe]
+0x00,0x11,0xff,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, s[2:3] ; encoding: [0x02,0x11,0x80,0xbe]
+0x02,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, s[100:101] ; encoding: [0x64,0x11,0x80,0xbe]
+0x64,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, flat_scratch ; encoding: [0x66,0x11,0x80,0xbe]
+0x66,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, vcc ; encoding: [0x6a,0x11,0x80,0xbe]
+0x6a,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, tba ; encoding: [0x6c,0x11,0x80,0xbe]
+0x6c,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, tma ; encoding: [0x6e,0x11,0x80,0xbe]
+0x6e,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, ttmp[10:11] ; encoding: [0x7a,0x11,0x80,0xbe]
+0x7a,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, exec ; encoding: [0x7e,0x11,0x80,0xbe]
+0x7e,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, 0 ; encoding: [0x80,0x11,0x80,0xbe]
+0x80,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, -1 ; encoding: [0xc1,0x11,0x80,0xbe]
+0xc1,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, 0.5 ; encoding: [0xf0,0x11,0x80,0xbe]
+0xf0,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, -4.0 ; encoding: [0xf7,0x11,0x80,0xbe]
+0xf7,0x11,0x80,0xbe
+
+# CHECK: s_ff1_i32_b64 s0, 0xaf123456 ; encoding: [0xff,0x11,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x11,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_ff1_i32_b64 s0, 0x3f717273 ; encoding: [0xff,0x11,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x11,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_flbit_i32_b32 s0, s0 ; encoding: [0x00,0x12,0x80,0xbe]
+0x00,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s101, s0 ; encoding: [0x00,0x12,0xe5,0xbe]
+0x00,0x12,0xe5,0xbe
+
+# CHECK: s_flbit_i32_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x12,0xe6,0xbe]
+0x00,0x12,0xe6,0xbe
+
+# CHECK: s_flbit_i32_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x12,0xe7,0xbe]
+0x00,0x12,0xe7,0xbe
+
+# CHECK: s_flbit_i32_b32 vcc_lo, s0 ; encoding: [0x00,0x12,0xea,0xbe]
+0x00,0x12,0xea,0xbe
+
+# CHECK: s_flbit_i32_b32 vcc_hi, s0 ; encoding: [0x00,0x12,0xeb,0xbe]
+0x00,0x12,0xeb,0xbe
+
+# CHECK: s_flbit_i32_b32 tba_lo, s0 ; encoding: [0x00,0x12,0xec,0xbe]
+0x00,0x12,0xec,0xbe
+
+# CHECK: s_flbit_i32_b32 tba_hi, s0 ; encoding: [0x00,0x12,0xed,0xbe]
+0x00,0x12,0xed,0xbe
+
+# CHECK: s_flbit_i32_b32 tma_lo, s0 ; encoding: [0x00,0x12,0xee,0xbe]
+0x00,0x12,0xee,0xbe
+
+# CHECK: s_flbit_i32_b32 tma_hi, s0 ; encoding: [0x00,0x12,0xef,0xbe]
+0x00,0x12,0xef,0xbe
+
+# CHECK: s_flbit_i32_b32 ttmp11, s0 ; encoding: [0x00,0x12,0xfb,0xbe]
+0x00,0x12,0xfb,0xbe
+
+# CHECK: s_flbit_i32_b32 m0, s0 ; encoding: [0x00,0x12,0xfc,0xbe]
+0x00,0x12,0xfc,0xbe
+
+# CHECK: s_flbit_i32_b32 exec_lo, s0 ; encoding: [0x00,0x12,0xfe,0xbe]
+0x00,0x12,0xfe,0xbe
+
+# CHECK: s_flbit_i32_b32 exec_hi, s0 ; encoding: [0x00,0x12,0xff,0xbe]
+0x00,0x12,0xff,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, s101 ; encoding: [0x65,0x12,0x80,0xbe]
+0x65,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, flat_scratch_lo ; encoding: [0x66,0x12,0x80,0xbe]
+0x66,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, flat_scratch_hi ; encoding: [0x67,0x12,0x80,0xbe]
+0x67,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, vcc_lo ; encoding: [0x6a,0x12,0x80,0xbe]
+0x6a,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, vcc_hi ; encoding: [0x6b,0x12,0x80,0xbe]
+0x6b,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, tba_lo ; encoding: [0x6c,0x12,0x80,0xbe]
+0x6c,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, tba_hi ; encoding: [0x6d,0x12,0x80,0xbe]
+0x6d,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, tma_lo ; encoding: [0x6e,0x12,0x80,0xbe]
+0x6e,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, tma_hi ; encoding: [0x6f,0x12,0x80,0xbe]
+0x6f,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, ttmp11 ; encoding: [0x7b,0x12,0x80,0xbe]
+0x7b,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, m0 ; encoding: [0x7c,0x12,0x80,0xbe]
+0x7c,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, exec_lo ; encoding: [0x7e,0x12,0x80,0xbe]
+0x7e,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, exec_hi ; encoding: [0x7f,0x12,0x80,0xbe]
+0x7f,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, 0 ; encoding: [0x80,0x12,0x80,0xbe]
+0x80,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, -1 ; encoding: [0xc1,0x12,0x80,0xbe]
+0xc1,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, 0.5 ; encoding: [0xf0,0x12,0x80,0xbe]
+0xf0,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, -4.0 ; encoding: [0xf7,0x12,0x80,0xbe]
+0xf7,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, scc ; encoding: [0xfd,0x12,0x80,0xbe]
+0xfd,0x12,0x80,0xbe
+
+# CHECK: s_flbit_i32_b32 s0, 0xaf123456 ; encoding: [0xff,0x12,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x12,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_flbit_i32_b32 s0, 0x3f717273 ; encoding: [0xff,0x12,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x12,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_flbit_i32_b64 s0, s[0:1] ; encoding: [0x00,0x13,0x80,0xbe]
+0x00,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s101, s[0:1] ; encoding: [0x00,0x13,0xe5,0xbe]
+0x00,0x13,0xe5,0xbe
+
+# CHECK: s_flbit_i32_b64 flat_scratch_lo, s[0:1] ; encoding: [0x00,0x13,0xe6,0xbe]
+0x00,0x13,0xe6,0xbe
+
+# CHECK: s_flbit_i32_b64 flat_scratch_hi, s[0:1] ; encoding: [0x00,0x13,0xe7,0xbe]
+0x00,0x13,0xe7,0xbe
+
+# CHECK: s_flbit_i32_b64 vcc_lo, s[0:1] ; encoding: [0x00,0x13,0xea,0xbe]
+0x00,0x13,0xea,0xbe
+
+# CHECK: s_flbit_i32_b64 vcc_hi, s[0:1] ; encoding: [0x00,0x13,0xeb,0xbe]
+0x00,0x13,0xeb,0xbe
+
+# CHECK: s_flbit_i32_b64 tba_lo, s[0:1] ; encoding: [0x00,0x13,0xec,0xbe]
+0x00,0x13,0xec,0xbe
+
+# CHECK: s_flbit_i32_b64 tba_hi, s[0:1] ; encoding: [0x00,0x13,0xed,0xbe]
+0x00,0x13,0xed,0xbe
+
+# CHECK: s_flbit_i32_b64 tma_lo, s[0:1] ; encoding: [0x00,0x13,0xee,0xbe]
+0x00,0x13,0xee,0xbe
+
+# CHECK: s_flbit_i32_b64 tma_hi, s[0:1] ; encoding: [0x00,0x13,0xef,0xbe]
+0x00,0x13,0xef,0xbe
+
+# CHECK: s_flbit_i32_b64 ttmp11, s[0:1] ; encoding: [0x00,0x13,0xfb,0xbe]
+0x00,0x13,0xfb,0xbe
+
+# CHECK: s_flbit_i32_b64 m0, s[0:1] ; encoding: [0x00,0x13,0xfc,0xbe]
+0x00,0x13,0xfc,0xbe
+
+# CHECK: s_flbit_i32_b64 exec_lo, s[0:1] ; encoding: [0x00,0x13,0xfe,0xbe]
+0x00,0x13,0xfe,0xbe
+
+# CHECK: s_flbit_i32_b64 exec_hi, s[0:1] ; encoding: [0x00,0x13,0xff,0xbe]
+0x00,0x13,0xff,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, s[2:3] ; encoding: [0x02,0x13,0x80,0xbe]
+0x02,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, s[100:101] ; encoding: [0x64,0x13,0x80,0xbe]
+0x64,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, flat_scratch ; encoding: [0x66,0x13,0x80,0xbe]
+0x66,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, vcc ; encoding: [0x6a,0x13,0x80,0xbe]
+0x6a,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, tba ; encoding: [0x6c,0x13,0x80,0xbe]
+0x6c,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, tma ; encoding: [0x6e,0x13,0x80,0xbe]
+0x6e,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, ttmp[10:11] ; encoding: [0x7a,0x13,0x80,0xbe]
+0x7a,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, exec ; encoding: [0x7e,0x13,0x80,0xbe]
+0x7e,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, 0 ; encoding: [0x80,0x13,0x80,0xbe]
+0x80,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, -1 ; encoding: [0xc1,0x13,0x80,0xbe]
+0xc1,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, 0.5 ; encoding: [0xf0,0x13,0x80,0xbe]
+0xf0,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, -4.0 ; encoding: [0xf7,0x13,0x80,0xbe]
+0xf7,0x13,0x80,0xbe
+
+# CHECK: s_flbit_i32_b64 s0, 0xaf123456 ; encoding: [0xff,0x13,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x13,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_flbit_i32_b64 s0, 0x3f717273 ; encoding: [0xff,0x13,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x13,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_flbit_i32 s0, s0 ; encoding: [0x00,0x14,0x80,0xbe]
+0x00,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s101, s0 ; encoding: [0x00,0x14,0xe5,0xbe]
+0x00,0x14,0xe5,0xbe
+
+# CHECK: s_flbit_i32 flat_scratch_lo, s0 ; encoding: [0x00,0x14,0xe6,0xbe]
+0x00,0x14,0xe6,0xbe
+
+# CHECK: s_flbit_i32 flat_scratch_hi, s0 ; encoding: [0x00,0x14,0xe7,0xbe]
+0x00,0x14,0xe7,0xbe
+
+# CHECK: s_flbit_i32 vcc_lo, s0 ; encoding: [0x00,0x14,0xea,0xbe]
+0x00,0x14,0xea,0xbe
+
+# CHECK: s_flbit_i32 vcc_hi, s0 ; encoding: [0x00,0x14,0xeb,0xbe]
+0x00,0x14,0xeb,0xbe
+
+# CHECK: s_flbit_i32 tba_lo, s0 ; encoding: [0x00,0x14,0xec,0xbe]
+0x00,0x14,0xec,0xbe
+
+# CHECK: s_flbit_i32 tba_hi, s0 ; encoding: [0x00,0x14,0xed,0xbe]
+0x00,0x14,0xed,0xbe
+
+# CHECK: s_flbit_i32 tma_lo, s0 ; encoding: [0x00,0x14,0xee,0xbe]
+0x00,0x14,0xee,0xbe
+
+# CHECK: s_flbit_i32 tma_hi, s0 ; encoding: [0x00,0x14,0xef,0xbe]
+0x00,0x14,0xef,0xbe
+
+# CHECK: s_flbit_i32 ttmp11, s0 ; encoding: [0x00,0x14,0xfb,0xbe]
+0x00,0x14,0xfb,0xbe
+
+# CHECK: s_flbit_i32 m0, s0 ; encoding: [0x00,0x14,0xfc,0xbe]
+0x00,0x14,0xfc,0xbe
+
+# CHECK: s_flbit_i32 exec_lo, s0 ; encoding: [0x00,0x14,0xfe,0xbe]
+0x00,0x14,0xfe,0xbe
+
+# CHECK: s_flbit_i32 exec_hi, s0 ; encoding: [0x00,0x14,0xff,0xbe]
+0x00,0x14,0xff,0xbe
+
+# CHECK: s_flbit_i32 s0, s101 ; encoding: [0x65,0x14,0x80,0xbe]
+0x65,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, flat_scratch_lo ; encoding: [0x66,0x14,0x80,0xbe]
+0x66,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, flat_scratch_hi ; encoding: [0x67,0x14,0x80,0xbe]
+0x67,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, vcc_lo ; encoding: [0x6a,0x14,0x80,0xbe]
+0x6a,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, vcc_hi ; encoding: [0x6b,0x14,0x80,0xbe]
+0x6b,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, tba_lo ; encoding: [0x6c,0x14,0x80,0xbe]
+0x6c,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, tba_hi ; encoding: [0x6d,0x14,0x80,0xbe]
+0x6d,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, tma_lo ; encoding: [0x6e,0x14,0x80,0xbe]
+0x6e,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, tma_hi ; encoding: [0x6f,0x14,0x80,0xbe]
+0x6f,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, ttmp11 ; encoding: [0x7b,0x14,0x80,0xbe]
+0x7b,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, m0 ; encoding: [0x7c,0x14,0x80,0xbe]
+0x7c,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, exec_lo ; encoding: [0x7e,0x14,0x80,0xbe]
+0x7e,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, exec_hi ; encoding: [0x7f,0x14,0x80,0xbe]
+0x7f,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, 0 ; encoding: [0x80,0x14,0x80,0xbe]
+0x80,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, -1 ; encoding: [0xc1,0x14,0x80,0xbe]
+0xc1,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, 0.5 ; encoding: [0xf0,0x14,0x80,0xbe]
+0xf0,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, -4.0 ; encoding: [0xf7,0x14,0x80,0xbe]
+0xf7,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, scc ; encoding: [0xfd,0x14,0x80,0xbe]
+0xfd,0x14,0x80,0xbe
+
+# CHECK: s_flbit_i32 s0, 0xaf123456 ; encoding: [0xff,0x14,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x14,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_flbit_i32 s0, 0x3f717273 ; encoding: [0xff,0x14,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x14,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_flbit_i32_i64 s0, s[0:1] ; encoding: [0x00,0x15,0x80,0xbe]
+0x00,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s101, s[0:1] ; encoding: [0x00,0x15,0xe5,0xbe]
+0x00,0x15,0xe5,0xbe
+
+# CHECK: s_flbit_i32_i64 flat_scratch_lo, s[0:1] ; encoding: [0x00,0x15,0xe6,0xbe]
+0x00,0x15,0xe6,0xbe
+
+# CHECK: s_flbit_i32_i64 flat_scratch_hi, s[0:1] ; encoding: [0x00,0x15,0xe7,0xbe]
+0x00,0x15,0xe7,0xbe
+
+# CHECK: s_flbit_i32_i64 vcc_lo, s[0:1] ; encoding: [0x00,0x15,0xea,0xbe]
+0x00,0x15,0xea,0xbe
+
+# CHECK: s_flbit_i32_i64 vcc_hi, s[0:1] ; encoding: [0x00,0x15,0xeb,0xbe]
+0x00,0x15,0xeb,0xbe
+
+# CHECK: s_flbit_i32_i64 tba_lo, s[0:1] ; encoding: [0x00,0x15,0xec,0xbe]
+0x00,0x15,0xec,0xbe
+
+# CHECK: s_flbit_i32_i64 tba_hi, s[0:1] ; encoding: [0x00,0x15,0xed,0xbe]
+0x00,0x15,0xed,0xbe
+
+# CHECK: s_flbit_i32_i64 tma_lo, s[0:1] ; encoding: [0x00,0x15,0xee,0xbe]
+0x00,0x15,0xee,0xbe
+
+# CHECK: s_flbit_i32_i64 tma_hi, s[0:1] ; encoding: [0x00,0x15,0xef,0xbe]
+0x00,0x15,0xef,0xbe
+
+# CHECK: s_flbit_i32_i64 ttmp11, s[0:1] ; encoding: [0x00,0x15,0xfb,0xbe]
+0x00,0x15,0xfb,0xbe
+
+# CHECK: s_flbit_i32_i64 m0, s[0:1] ; encoding: [0x00,0x15,0xfc,0xbe]
+0x00,0x15,0xfc,0xbe
+
+# CHECK: s_flbit_i32_i64 exec_lo, s[0:1] ; encoding: [0x00,0x15,0xfe,0xbe]
+0x00,0x15,0xfe,0xbe
+
+# CHECK: s_flbit_i32_i64 exec_hi, s[0:1] ; encoding: [0x00,0x15,0xff,0xbe]
+0x00,0x15,0xff,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, s[2:3] ; encoding: [0x02,0x15,0x80,0xbe]
+0x02,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, s[100:101] ; encoding: [0x64,0x15,0x80,0xbe]
+0x64,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, flat_scratch ; encoding: [0x66,0x15,0x80,0xbe]
+0x66,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, vcc ; encoding: [0x6a,0x15,0x80,0xbe]
+0x6a,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, tba ; encoding: [0x6c,0x15,0x80,0xbe]
+0x6c,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, tma ; encoding: [0x6e,0x15,0x80,0xbe]
+0x6e,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, ttmp[10:11] ; encoding: [0x7a,0x15,0x80,0xbe]
+0x7a,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, exec ; encoding: [0x7e,0x15,0x80,0xbe]
+0x7e,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, 0 ; encoding: [0x80,0x15,0x80,0xbe]
+0x80,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, -1 ; encoding: [0xc1,0x15,0x80,0xbe]
+0xc1,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, 0.5 ; encoding: [0xf0,0x15,0x80,0xbe]
+0xf0,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, -4.0 ; encoding: [0xf7,0x15,0x80,0xbe]
+0xf7,0x15,0x80,0xbe
+
+# CHECK: s_flbit_i32_i64 s0, 0xaf123456 ; encoding: [0xff,0x15,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x15,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_flbit_i32_i64 s0, 0x3f717273 ; encoding: [0xff,0x15,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x15,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_sext_i32_i8 s0, s0 ; encoding: [0x00,0x16,0x80,0xbe]
+0x00,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s101, s0 ; encoding: [0x00,0x16,0xe5,0xbe]
+0x00,0x16,0xe5,0xbe
+
+# CHECK: s_sext_i32_i8 flat_scratch_lo, s0 ; encoding: [0x00,0x16,0xe6,0xbe]
+0x00,0x16,0xe6,0xbe
+
+# CHECK: s_sext_i32_i8 flat_scratch_hi, s0 ; encoding: [0x00,0x16,0xe7,0xbe]
+0x00,0x16,0xe7,0xbe
+
+# CHECK: s_sext_i32_i8 vcc_lo, s0 ; encoding: [0x00,0x16,0xea,0xbe]
+0x00,0x16,0xea,0xbe
+
+# CHECK: s_sext_i32_i8 vcc_hi, s0 ; encoding: [0x00,0x16,0xeb,0xbe]
+0x00,0x16,0xeb,0xbe
+
+# CHECK: s_sext_i32_i8 tba_lo, s0 ; encoding: [0x00,0x16,0xec,0xbe]
+0x00,0x16,0xec,0xbe
+
+# CHECK: s_sext_i32_i8 tba_hi, s0 ; encoding: [0x00,0x16,0xed,0xbe]
+0x00,0x16,0xed,0xbe
+
+# CHECK: s_sext_i32_i8 tma_lo, s0 ; encoding: [0x00,0x16,0xee,0xbe]
+0x00,0x16,0xee,0xbe
+
+# CHECK: s_sext_i32_i8 tma_hi, s0 ; encoding: [0x00,0x16,0xef,0xbe]
+0x00,0x16,0xef,0xbe
+
+# CHECK: s_sext_i32_i8 ttmp11, s0 ; encoding: [0x00,0x16,0xfb,0xbe]
+0x00,0x16,0xfb,0xbe
+
+# CHECK: s_sext_i32_i8 m0, s0 ; encoding: [0x00,0x16,0xfc,0xbe]
+0x00,0x16,0xfc,0xbe
+
+# CHECK: s_sext_i32_i8 exec_lo, s0 ; encoding: [0x00,0x16,0xfe,0xbe]
+0x00,0x16,0xfe,0xbe
+
+# CHECK: s_sext_i32_i8 exec_hi, s0 ; encoding: [0x00,0x16,0xff,0xbe]
+0x00,0x16,0xff,0xbe
+
+# CHECK: s_sext_i32_i8 s0, s101 ; encoding: [0x65,0x16,0x80,0xbe]
+0x65,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, flat_scratch_lo ; encoding: [0x66,0x16,0x80,0xbe]
+0x66,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, flat_scratch_hi ; encoding: [0x67,0x16,0x80,0xbe]
+0x67,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, vcc_lo ; encoding: [0x6a,0x16,0x80,0xbe]
+0x6a,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, vcc_hi ; encoding: [0x6b,0x16,0x80,0xbe]
+0x6b,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, tba_lo ; encoding: [0x6c,0x16,0x80,0xbe]
+0x6c,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, tba_hi ; encoding: [0x6d,0x16,0x80,0xbe]
+0x6d,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, tma_lo ; encoding: [0x6e,0x16,0x80,0xbe]
+0x6e,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, tma_hi ; encoding: [0x6f,0x16,0x80,0xbe]
+0x6f,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, ttmp11 ; encoding: [0x7b,0x16,0x80,0xbe]
+0x7b,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, m0 ; encoding: [0x7c,0x16,0x80,0xbe]
+0x7c,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, exec_lo ; encoding: [0x7e,0x16,0x80,0xbe]
+0x7e,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, exec_hi ; encoding: [0x7f,0x16,0x80,0xbe]
+0x7f,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, 0 ; encoding: [0x80,0x16,0x80,0xbe]
+0x80,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, -1 ; encoding: [0xc1,0x16,0x80,0xbe]
+0xc1,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, 0.5 ; encoding: [0xf0,0x16,0x80,0xbe]
+0xf0,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, -4.0 ; encoding: [0xf7,0x16,0x80,0xbe]
+0xf7,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, scc ; encoding: [0xfd,0x16,0x80,0xbe]
+0xfd,0x16,0x80,0xbe
+
+# CHECK: s_sext_i32_i8 s0, 0x71 ; encoding: [0xff,0x16,0x80,0xbe,0x71,0x00,0x00,0x00]
+0xff,0x16,0x80,0xbe,0x71,0x00,0x00,0x00
+
+# CHECK: s_sext_i32_i8 s0, 0xf0 ; encoding: [0xff,0x16,0x80,0xbe,0xf0,0x00,0x00,0x00]
+0xff,0x16,0x80,0xbe,0xf0,0x00,0x00,0x00
+
+# CHECK: s_sext_i32_i16 s0, s0 ; encoding: [0x00,0x17,0x80,0xbe]
+0x00,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s101, s0 ; encoding: [0x00,0x17,0xe5,0xbe]
+0x00,0x17,0xe5,0xbe
+
+# CHECK: s_sext_i32_i16 flat_scratch_lo, s0 ; encoding: [0x00,0x17,0xe6,0xbe]
+0x00,0x17,0xe6,0xbe
+
+# CHECK: s_sext_i32_i16 flat_scratch_hi, s0 ; encoding: [0x00,0x17,0xe7,0xbe]
+0x00,0x17,0xe7,0xbe
+
+# CHECK: s_sext_i32_i16 vcc_lo, s0 ; encoding: [0x00,0x17,0xea,0xbe]
+0x00,0x17,0xea,0xbe
+
+# CHECK: s_sext_i32_i16 vcc_hi, s0 ; encoding: [0x00,0x17,0xeb,0xbe]
+0x00,0x17,0xeb,0xbe
+
+# CHECK: s_sext_i32_i16 tba_lo, s0 ; encoding: [0x00,0x17,0xec,0xbe]
+0x00,0x17,0xec,0xbe
+
+# CHECK: s_sext_i32_i16 tba_hi, s0 ; encoding: [0x00,0x17,0xed,0xbe]
+0x00,0x17,0xed,0xbe
+
+# CHECK: s_sext_i32_i16 tma_lo, s0 ; encoding: [0x00,0x17,0xee,0xbe]
+0x00,0x17,0xee,0xbe
+
+# CHECK: s_sext_i32_i16 tma_hi, s0 ; encoding: [0x00,0x17,0xef,0xbe]
+0x00,0x17,0xef,0xbe
+
+# CHECK: s_sext_i32_i16 ttmp11, s0 ; encoding: [0x00,0x17,0xfb,0xbe]
+0x00,0x17,0xfb,0xbe
+
+# CHECK: s_sext_i32_i16 m0, s0 ; encoding: [0x00,0x17,0xfc,0xbe]
+0x00,0x17,0xfc,0xbe
+
+# CHECK: s_sext_i32_i16 exec_lo, s0 ; encoding: [0x00,0x17,0xfe,0xbe]
+0x00,0x17,0xfe,0xbe
+
+# CHECK: s_sext_i32_i16 exec_hi, s0 ; encoding: [0x00,0x17,0xff,0xbe]
+0x00,0x17,0xff,0xbe
+
+# CHECK: s_sext_i32_i16 s0, s101 ; encoding: [0x65,0x17,0x80,0xbe]
+0x65,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, flat_scratch_lo ; encoding: [0x66,0x17,0x80,0xbe]
+0x66,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, flat_scratch_hi ; encoding: [0x67,0x17,0x80,0xbe]
+0x67,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, vcc_lo ; encoding: [0x6a,0x17,0x80,0xbe]
+0x6a,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, vcc_hi ; encoding: [0x6b,0x17,0x80,0xbe]
+0x6b,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, tba_lo ; encoding: [0x6c,0x17,0x80,0xbe]
+0x6c,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, tba_hi ; encoding: [0x6d,0x17,0x80,0xbe]
+0x6d,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, tma_lo ; encoding: [0x6e,0x17,0x80,0xbe]
+0x6e,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, tma_hi ; encoding: [0x6f,0x17,0x80,0xbe]
+0x6f,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, ttmp11 ; encoding: [0x7b,0x17,0x80,0xbe]
+0x7b,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, m0 ; encoding: [0x7c,0x17,0x80,0xbe]
+0x7c,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, exec_lo ; encoding: [0x7e,0x17,0x80,0xbe]
+0x7e,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, exec_hi ; encoding: [0x7f,0x17,0x80,0xbe]
+0x7f,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, 0 ; encoding: [0x80,0x17,0x80,0xbe]
+0x80,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, -1 ; encoding: [0xc1,0x17,0x80,0xbe]
+0xc1,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, 0.5 ; encoding: [0xf0,0x17,0x80,0xbe]
+0xf0,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, -4.0 ; encoding: [0xf7,0x17,0x80,0xbe]
+0xf7,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, scc ; encoding: [0xfd,0x17,0x80,0xbe]
+0xfd,0x17,0x80,0xbe
+
+# CHECK: s_sext_i32_i16 s0, 0xaf123456 ; encoding: [0xff,0x17,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x17,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_sext_i32_i16 s0, 0x3f717273 ; encoding: [0xff,0x17,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x17,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitset0_b32 s0, s0 ; encoding: [0x00,0x18,0x80,0xbe]
+0x00,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s101, s0 ; encoding: [0x00,0x18,0xe5,0xbe]
+0x00,0x18,0xe5,0xbe
+
+# CHECK: s_bitset0_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x18,0xe6,0xbe]
+0x00,0x18,0xe6,0xbe
+
+# CHECK: s_bitset0_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x18,0xe7,0xbe]
+0x00,0x18,0xe7,0xbe
+
+# CHECK: s_bitset0_b32 vcc_lo, s0 ; encoding: [0x00,0x18,0xea,0xbe]
+0x00,0x18,0xea,0xbe
+
+# CHECK: s_bitset0_b32 vcc_hi, s0 ; encoding: [0x00,0x18,0xeb,0xbe]
+0x00,0x18,0xeb,0xbe
+
+# CHECK: s_bitset0_b32 tba_lo, s0 ; encoding: [0x00,0x18,0xec,0xbe]
+0x00,0x18,0xec,0xbe
+
+# CHECK: s_bitset0_b32 tba_hi, s0 ; encoding: [0x00,0x18,0xed,0xbe]
+0x00,0x18,0xed,0xbe
+
+# CHECK: s_bitset0_b32 tma_lo, s0 ; encoding: [0x00,0x18,0xee,0xbe]
+0x00,0x18,0xee,0xbe
+
+# CHECK: s_bitset0_b32 tma_hi, s0 ; encoding: [0x00,0x18,0xef,0xbe]
+0x00,0x18,0xef,0xbe
+
+# CHECK: s_bitset0_b32 ttmp11, s0 ; encoding: [0x00,0x18,0xfb,0xbe]
+0x00,0x18,0xfb,0xbe
+
+# CHECK: s_bitset0_b32 m0, s0 ; encoding: [0x00,0x18,0xfc,0xbe]
+0x00,0x18,0xfc,0xbe
+
+# CHECK: s_bitset0_b32 exec_lo, s0 ; encoding: [0x00,0x18,0xfe,0xbe]
+0x00,0x18,0xfe,0xbe
+
+# CHECK: s_bitset0_b32 exec_hi, s0 ; encoding: [0x00,0x18,0xff,0xbe]
+0x00,0x18,0xff,0xbe
+
+# CHECK: s_bitset0_b32 s0, s101 ; encoding: [0x65,0x18,0x80,0xbe]
+0x65,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, flat_scratch_lo ; encoding: [0x66,0x18,0x80,0xbe]
+0x66,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, flat_scratch_hi ; encoding: [0x67,0x18,0x80,0xbe]
+0x67,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, vcc_lo ; encoding: [0x6a,0x18,0x80,0xbe]
+0x6a,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, vcc_hi ; encoding: [0x6b,0x18,0x80,0xbe]
+0x6b,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, tba_lo ; encoding: [0x6c,0x18,0x80,0xbe]
+0x6c,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, tba_hi ; encoding: [0x6d,0x18,0x80,0xbe]
+0x6d,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, tma_lo ; encoding: [0x6e,0x18,0x80,0xbe]
+0x6e,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, tma_hi ; encoding: [0x6f,0x18,0x80,0xbe]
+0x6f,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, ttmp11 ; encoding: [0x7b,0x18,0x80,0xbe]
+0x7b,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, m0 ; encoding: [0x7c,0x18,0x80,0xbe]
+0x7c,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, exec_lo ; encoding: [0x7e,0x18,0x80,0xbe]
+0x7e,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, exec_hi ; encoding: [0x7f,0x18,0x80,0xbe]
+0x7f,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, 0 ; encoding: [0x80,0x18,0x80,0xbe]
+0x80,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, -1 ; encoding: [0xc1,0x18,0x80,0xbe]
+0xc1,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, 0.5 ; encoding: [0xf0,0x18,0x80,0xbe]
+0xf0,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, -4.0 ; encoding: [0xf7,0x18,0x80,0xbe]
+0xf7,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, scc ; encoding: [0xfd,0x18,0x80,0xbe]
+0xfd,0x18,0x80,0xbe
+
+# CHECK: s_bitset0_b32 s0, 0xaf123456 ; encoding: [0xff,0x18,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x18,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitset0_b32 s0, 0x3f717273 ; encoding: [0xff,0x18,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x18,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitset0_b64 s[0:1], s0 ; encoding: [0x00,0x19,0x80,0xbe]
+0x00,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[2:3], s0 ; encoding: [0x00,0x19,0x82,0xbe]
+0x00,0x19,0x82,0xbe
+
+# CHECK: s_bitset0_b64 s[100:101], s0 ; encoding: [0x00,0x19,0xe4,0xbe]
+0x00,0x19,0xe4,0xbe
+
+# CHECK: s_bitset0_b64 flat_scratch, s0 ; encoding: [0x00,0x19,0xe6,0xbe]
+0x00,0x19,0xe6,0xbe
+
+# CHECK: s_bitset0_b64 vcc, s0 ; encoding: [0x00,0x19,0xea,0xbe]
+0x00,0x19,0xea,0xbe
+
+# CHECK: s_bitset0_b64 tba, s0 ; encoding: [0x00,0x19,0xec,0xbe]
+0x00,0x19,0xec,0xbe
+
+# CHECK: s_bitset0_b64 tma, s0 ; encoding: [0x00,0x19,0xee,0xbe]
+0x00,0x19,0xee,0xbe
+
+# CHECK: s_bitset0_b64 ttmp[10:11], s0 ; encoding: [0x00,0x19,0xfa,0xbe]
+0x00,0x19,0xfa,0xbe
+
+# CHECK: s_bitset0_b64 exec, s0 ; encoding: [0x00,0x19,0xfe,0xbe]
+0x00,0x19,0xfe,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], s101 ; encoding: [0x65,0x19,0x80,0xbe]
+0x65,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], flat_scratch_lo ; encoding: [0x66,0x19,0x80,0xbe]
+0x66,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], flat_scratch_hi ; encoding: [0x67,0x19,0x80,0xbe]
+0x67,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], vcc_lo ; encoding: [0x6a,0x19,0x80,0xbe]
+0x6a,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], vcc_hi ; encoding: [0x6b,0x19,0x80,0xbe]
+0x6b,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], tba_lo ; encoding: [0x6c,0x19,0x80,0xbe]
+0x6c,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], tba_hi ; encoding: [0x6d,0x19,0x80,0xbe]
+0x6d,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], tma_lo ; encoding: [0x6e,0x19,0x80,0xbe]
+0x6e,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], tma_hi ; encoding: [0x6f,0x19,0x80,0xbe]
+0x6f,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], ttmp11 ; encoding: [0x7b,0x19,0x80,0xbe]
+0x7b,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], m0 ; encoding: [0x7c,0x19,0x80,0xbe]
+0x7c,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], exec_lo ; encoding: [0x7e,0x19,0x80,0xbe]
+0x7e,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], exec_hi ; encoding: [0x7f,0x19,0x80,0xbe]
+0x7f,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], 0 ; encoding: [0x80,0x19,0x80,0xbe]
+0x80,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], -1 ; encoding: [0xc1,0x19,0x80,0xbe]
+0xc1,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], 0.5 ; encoding: [0xf0,0x19,0x80,0xbe]
+0xf0,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], -4.0 ; encoding: [0xf7,0x19,0x80,0xbe]
+0xf7,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], scc ; encoding: [0xfd,0x19,0x80,0xbe]
+0xfd,0x19,0x80,0xbe
+
+# CHECK: s_bitset0_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x19,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x19,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitset0_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x19,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x19,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitset1_b32 s0, s0 ; encoding: [0x00,0x1a,0x80,0xbe]
+0x00,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s101, s0 ; encoding: [0x00,0x1a,0xe5,0xbe]
+0x00,0x1a,0xe5,0xbe
+
+# CHECK: s_bitset1_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x1a,0xe6,0xbe]
+0x00,0x1a,0xe6,0xbe
+
+# CHECK: s_bitset1_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x1a,0xe7,0xbe]
+0x00,0x1a,0xe7,0xbe
+
+# CHECK: s_bitset1_b32 vcc_lo, s0 ; encoding: [0x00,0x1a,0xea,0xbe]
+0x00,0x1a,0xea,0xbe
+
+# CHECK: s_bitset1_b32 vcc_hi, s0 ; encoding: [0x00,0x1a,0xeb,0xbe]
+0x00,0x1a,0xeb,0xbe
+
+# CHECK: s_bitset1_b32 tba_lo, s0 ; encoding: [0x00,0x1a,0xec,0xbe]
+0x00,0x1a,0xec,0xbe
+
+# CHECK: s_bitset1_b32 tba_hi, s0 ; encoding: [0x00,0x1a,0xed,0xbe]
+0x00,0x1a,0xed,0xbe
+
+# CHECK: s_bitset1_b32 tma_lo, s0 ; encoding: [0x00,0x1a,0xee,0xbe]
+0x00,0x1a,0xee,0xbe
+
+# CHECK: s_bitset1_b32 tma_hi, s0 ; encoding: [0x00,0x1a,0xef,0xbe]
+0x00,0x1a,0xef,0xbe
+
+# CHECK: s_bitset1_b32 ttmp11, s0 ; encoding: [0x00,0x1a,0xfb,0xbe]
+0x00,0x1a,0xfb,0xbe
+
+# CHECK: s_bitset1_b32 m0, s0 ; encoding: [0x00,0x1a,0xfc,0xbe]
+0x00,0x1a,0xfc,0xbe
+
+# CHECK: s_bitset1_b32 exec_lo, s0 ; encoding: [0x00,0x1a,0xfe,0xbe]
+0x00,0x1a,0xfe,0xbe
+
+# CHECK: s_bitset1_b32 exec_hi, s0 ; encoding: [0x00,0x1a,0xff,0xbe]
+0x00,0x1a,0xff,0xbe
+
+# CHECK: s_bitset1_b32 s0, s101 ; encoding: [0x65,0x1a,0x80,0xbe]
+0x65,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, flat_scratch_lo ; encoding: [0x66,0x1a,0x80,0xbe]
+0x66,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, flat_scratch_hi ; encoding: [0x67,0x1a,0x80,0xbe]
+0x67,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, vcc_lo ; encoding: [0x6a,0x1a,0x80,0xbe]
+0x6a,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, vcc_hi ; encoding: [0x6b,0x1a,0x80,0xbe]
+0x6b,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, tba_lo ; encoding: [0x6c,0x1a,0x80,0xbe]
+0x6c,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, tba_hi ; encoding: [0x6d,0x1a,0x80,0xbe]
+0x6d,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, tma_lo ; encoding: [0x6e,0x1a,0x80,0xbe]
+0x6e,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, tma_hi ; encoding: [0x6f,0x1a,0x80,0xbe]
+0x6f,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, ttmp11 ; encoding: [0x7b,0x1a,0x80,0xbe]
+0x7b,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, m0 ; encoding: [0x7c,0x1a,0x80,0xbe]
+0x7c,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, exec_lo ; encoding: [0x7e,0x1a,0x80,0xbe]
+0x7e,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, exec_hi ; encoding: [0x7f,0x1a,0x80,0xbe]
+0x7f,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, 0 ; encoding: [0x80,0x1a,0x80,0xbe]
+0x80,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, -1 ; encoding: [0xc1,0x1a,0x80,0xbe]
+0xc1,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, 0.5 ; encoding: [0xf0,0x1a,0x80,0xbe]
+0xf0,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, -4.0 ; encoding: [0xf7,0x1a,0x80,0xbe]
+0xf7,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, scc ; encoding: [0xfd,0x1a,0x80,0xbe]
+0xfd,0x1a,0x80,0xbe
+
+# CHECK: s_bitset1_b32 s0, 0xaf123456 ; encoding: [0xff,0x1a,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x1a,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitset1_b32 s0, 0x3f717273 ; encoding: [0xff,0x1a,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x1a,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitset1_b64 s[0:1], s0 ; encoding: [0x00,0x1b,0x80,0xbe]
+0x00,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[2:3], s0 ; encoding: [0x00,0x1b,0x82,0xbe]
+0x00,0x1b,0x82,0xbe
+
+# CHECK: s_bitset1_b64 s[100:101], s0 ; encoding: [0x00,0x1b,0xe4,0xbe]
+0x00,0x1b,0xe4,0xbe
+
+# CHECK: s_bitset1_b64 flat_scratch, s0 ; encoding: [0x00,0x1b,0xe6,0xbe]
+0x00,0x1b,0xe6,0xbe
+
+# CHECK: s_bitset1_b64 vcc, s0 ; encoding: [0x00,0x1b,0xea,0xbe]
+0x00,0x1b,0xea,0xbe
+
+# CHECK: s_bitset1_b64 tba, s0 ; encoding: [0x00,0x1b,0xec,0xbe]
+0x00,0x1b,0xec,0xbe
+
+# CHECK: s_bitset1_b64 tma, s0 ; encoding: [0x00,0x1b,0xee,0xbe]
+0x00,0x1b,0xee,0xbe
+
+# CHECK: s_bitset1_b64 ttmp[10:11], s0 ; encoding: [0x00,0x1b,0xfa,0xbe]
+0x00,0x1b,0xfa,0xbe
+
+# CHECK: s_bitset1_b64 exec, s0 ; encoding: [0x00,0x1b,0xfe,0xbe]
+0x00,0x1b,0xfe,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], s101 ; encoding: [0x65,0x1b,0x80,0xbe]
+0x65,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], flat_scratch_lo ; encoding: [0x66,0x1b,0x80,0xbe]
+0x66,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], flat_scratch_hi ; encoding: [0x67,0x1b,0x80,0xbe]
+0x67,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], vcc_lo ; encoding: [0x6a,0x1b,0x80,0xbe]
+0x6a,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], vcc_hi ; encoding: [0x6b,0x1b,0x80,0xbe]
+0x6b,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], tba_lo ; encoding: [0x6c,0x1b,0x80,0xbe]
+0x6c,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], tba_hi ; encoding: [0x6d,0x1b,0x80,0xbe]
+0x6d,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], tma_lo ; encoding: [0x6e,0x1b,0x80,0xbe]
+0x6e,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], tma_hi ; encoding: [0x6f,0x1b,0x80,0xbe]
+0x6f,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], ttmp11 ; encoding: [0x7b,0x1b,0x80,0xbe]
+0x7b,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], m0 ; encoding: [0x7c,0x1b,0x80,0xbe]
+0x7c,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], exec_lo ; encoding: [0x7e,0x1b,0x80,0xbe]
+0x7e,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], exec_hi ; encoding: [0x7f,0x1b,0x80,0xbe]
+0x7f,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], 0 ; encoding: [0x80,0x1b,0x80,0xbe]
+0x80,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], -1 ; encoding: [0xc1,0x1b,0x80,0xbe]
+0xc1,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], 0.5 ; encoding: [0xf0,0x1b,0x80,0xbe]
+0xf0,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], -4.0 ; encoding: [0xf7,0x1b,0x80,0xbe]
+0xf7,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], scc ; encoding: [0xfd,0x1b,0x80,0xbe]
+0xfd,0x1b,0x80,0xbe
+
+# CHECK: s_bitset1_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x1b,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x1b,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitset1_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x1b,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x1b,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_getpc_b64 s[0:1] ; encoding: [0x00,0x1c,0x80,0xbe]
+0x00,0x1c,0x80,0xbe
+
+# CHECK: s_getpc_b64 s[2:3] ; encoding: [0x00,0x1c,0x82,0xbe]
+0x00,0x1c,0x82,0xbe
+
+# CHECK: s_getpc_b64 s[100:101] ; encoding: [0x00,0x1c,0xe4,0xbe]
+0x00,0x1c,0xe4,0xbe
+
+# CHECK: s_getpc_b64 flat_scratch ; encoding: [0x00,0x1c,0xe6,0xbe]
+0x00,0x1c,0xe6,0xbe
+
+# CHECK: s_getpc_b64 vcc ; encoding: [0x00,0x1c,0xea,0xbe]
+0x00,0x1c,0xea,0xbe
+
+# CHECK: s_getpc_b64 tba ; encoding: [0x00,0x1c,0xec,0xbe]
+0x00,0x1c,0xec,0xbe
+
+# CHECK: s_getpc_b64 tma ; encoding: [0x00,0x1c,0xee,0xbe]
+0x00,0x1c,0xee,0xbe
+
+# CHECK: s_getpc_b64 ttmp[10:11] ; encoding: [0x00,0x1c,0xfa,0xbe]
+0x00,0x1c,0xfa,0xbe
+
+# CHECK: s_getpc_b64 exec ; encoding: [0x00,0x1c,0xfe,0xbe]
+0x00,0x1c,0xfe,0xbe
+
+# CHECK: s_setpc_b64 s[0:1] ; encoding: [0x00,0x1d,0x80,0xbe]
+0x00,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 s[2:3] ; encoding: [0x02,0x1d,0x80,0xbe]
+0x02,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 s[100:101] ; encoding: [0x64,0x1d,0x80,0xbe]
+0x64,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 flat_scratch ; encoding: [0x66,0x1d,0x80,0xbe]
+0x66,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 vcc ; encoding: [0x6a,0x1d,0x80,0xbe]
+0x6a,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 tba ; encoding: [0x6c,0x1d,0x80,0xbe]
+0x6c,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 tma ; encoding: [0x6e,0x1d,0x80,0xbe]
+0x6e,0x1d,0x80,0xbe
+
+# CHECK: s_setpc_b64 ttmp[10:11] ; encoding: [0x7a,0x1d,0x80,0xbe]
+0x7a,0x1d,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], s[0:1] ; encoding: [0x00,0x1e,0x80,0xbe]
+0x00,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[2:3], s[0:1] ; encoding: [0x00,0x1e,0x82,0xbe]
+0x00,0x1e,0x82,0xbe
+
+# CHECK: s_swappc_b64 s[100:101], s[0:1] ; encoding: [0x00,0x1e,0xe4,0xbe]
+0x00,0x1e,0xe4,0xbe
+
+# CHECK: s_swappc_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x1e,0xe6,0xbe]
+0x00,0x1e,0xe6,0xbe
+
+# CHECK: s_swappc_b64 vcc, s[0:1] ; encoding: [0x00,0x1e,0xea,0xbe]
+0x00,0x1e,0xea,0xbe
+
+# CHECK: s_swappc_b64 tba, s[0:1] ; encoding: [0x00,0x1e,0xec,0xbe]
+0x00,0x1e,0xec,0xbe
+
+# CHECK: s_swappc_b64 tma, s[0:1] ; encoding: [0x00,0x1e,0xee,0xbe]
+0x00,0x1e,0xee,0xbe
+
+# CHECK: s_swappc_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x1e,0xfa,0xbe]
+0x00,0x1e,0xfa,0xbe
+
+# CHECK: s_swappc_b64 exec, s[0:1] ; encoding: [0x00,0x1e,0xfe,0xbe]
+0x00,0x1e,0xfe,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], s[2:3] ; encoding: [0x02,0x1e,0x80,0xbe]
+0x02,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], s[100:101] ; encoding: [0x64,0x1e,0x80,0xbe]
+0x64,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], flat_scratch ; encoding: [0x66,0x1e,0x80,0xbe]
+0x66,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], vcc ; encoding: [0x6a,0x1e,0x80,0xbe]
+0x6a,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], tba ; encoding: [0x6c,0x1e,0x80,0xbe]
+0x6c,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], tma ; encoding: [0x6e,0x1e,0x80,0xbe]
+0x6e,0x1e,0x80,0xbe
+
+# CHECK: s_swappc_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x1e,0x80,0xbe]
+0x7a,0x1e,0x80,0xbe
+
+# CHECK: s_rfe_b64 s[0:1] ; encoding: [0x00,0x1f,0x80,0xbe]
+0x00,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 s[2:3] ; encoding: [0x02,0x1f,0x80,0xbe]
+0x02,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 s[100:101] ; encoding: [0x64,0x1f,0x80,0xbe]
+0x64,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 flat_scratch ; encoding: [0x66,0x1f,0x80,0xbe]
+0x66,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 vcc ; encoding: [0x6a,0x1f,0x80,0xbe]
+0x6a,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 tba ; encoding: [0x6c,0x1f,0x80,0xbe]
+0x6c,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 tma ; encoding: [0x6e,0x1f,0x80,0xbe]
+0x6e,0x1f,0x80,0xbe
+
+# CHECK: s_rfe_b64 ttmp[10:11] ; encoding: [0x7a,0x1f,0x80,0xbe]
+0x7a,0x1f,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x20,0x80,0xbe]
+0x00,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x20,0x82,0xbe]
+0x00,0x20,0x82,0xbe
+
+# CHECK: s_and_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x20,0xe4,0xbe]
+0x00,0x20,0xe4,0xbe
+
+# CHECK: s_and_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x20,0xe6,0xbe]
+0x00,0x20,0xe6,0xbe
+
+# CHECK: s_and_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x20,0xea,0xbe]
+0x00,0x20,0xea,0xbe
+
+# CHECK: s_and_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x20,0xec,0xbe]
+0x00,0x20,0xec,0xbe
+
+# CHECK: s_and_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x20,0xee,0xbe]
+0x00,0x20,0xee,0xbe
+
+# CHECK: s_and_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x20,0xfa,0xbe]
+0x00,0x20,0xfa,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x20,0x80,0xbe]
+0x02,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x20,0x80,0xbe]
+0x64,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x20,0x80,0xbe]
+0x66,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x20,0x80,0xbe]
+0x6a,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x20,0x80,0xbe]
+0x6c,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x20,0x80,0xbe]
+0x6e,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x20,0x80,0xbe]
+0x7a,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x20,0x80,0xbe]
+0x7e,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x20,0x80,0xbe]
+0x80,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x20,0x80,0xbe]
+0xc1,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x20,0x80,0xbe]
+0xf0,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x20,0x80,0xbe]
+0xf7,0x20,0x80,0xbe
+
+# CHECK: s_and_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x20,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x20,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_and_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x20,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x20,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_or_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x21,0x80,0xbe]
+0x00,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x21,0x82,0xbe]
+0x00,0x21,0x82,0xbe
+
+# CHECK: s_or_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x21,0xe4,0xbe]
+0x00,0x21,0xe4,0xbe
+
+# CHECK: s_or_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x21,0xe6,0xbe]
+0x00,0x21,0xe6,0xbe
+
+# CHECK: s_or_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x21,0xea,0xbe]
+0x00,0x21,0xea,0xbe
+
+# CHECK: s_or_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x21,0xec,0xbe]
+0x00,0x21,0xec,0xbe
+
+# CHECK: s_or_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x21,0xee,0xbe]
+0x00,0x21,0xee,0xbe
+
+# CHECK: s_or_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x21,0xfa,0xbe]
+0x00,0x21,0xfa,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x21,0x80,0xbe]
+0x02,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x21,0x80,0xbe]
+0x64,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x21,0x80,0xbe]
+0x66,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x21,0x80,0xbe]
+0x6a,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x21,0x80,0xbe]
+0x6c,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x21,0x80,0xbe]
+0x6e,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x21,0x80,0xbe]
+0x7a,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x21,0x80,0xbe]
+0x7e,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x21,0x80,0xbe]
+0x80,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x21,0x80,0xbe]
+0xc1,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x21,0x80,0xbe]
+0xf0,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x21,0x80,0xbe]
+0xf7,0x21,0x80,0xbe
+
+# CHECK: s_or_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x21,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x21,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_or_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x21,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x21,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_xor_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x22,0x80,0xbe]
+0x00,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x22,0x82,0xbe]
+0x00,0x22,0x82,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x22,0xe4,0xbe]
+0x00,0x22,0xe4,0xbe
+
+# CHECK: s_xor_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x22,0xe6,0xbe]
+0x00,0x22,0xe6,0xbe
+
+# CHECK: s_xor_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x22,0xea,0xbe]
+0x00,0x22,0xea,0xbe
+
+# CHECK: s_xor_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x22,0xec,0xbe]
+0x00,0x22,0xec,0xbe
+
+# CHECK: s_xor_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x22,0xee,0xbe]
+0x00,0x22,0xee,0xbe
+
+# CHECK: s_xor_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x22,0xfa,0xbe]
+0x00,0x22,0xfa,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x22,0x80,0xbe]
+0x02,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x22,0x80,0xbe]
+0x64,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x22,0x80,0xbe]
+0x66,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x22,0x80,0xbe]
+0x6a,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x22,0x80,0xbe]
+0x6c,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x22,0x80,0xbe]
+0x6e,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x22,0x80,0xbe]
+0x7a,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x22,0x80,0xbe]
+0x7e,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x22,0x80,0xbe]
+0x80,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x22,0x80,0xbe]
+0xc1,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x22,0x80,0xbe]
+0xf0,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x22,0x80,0xbe]
+0xf7,0x22,0x80,0xbe
+
+# CHECK: s_xor_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x22,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x22,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_xor_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x22,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x22,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x23,0x80,0xbe]
+0x00,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x23,0x82,0xbe]
+0x00,0x23,0x82,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x23,0xe4,0xbe]
+0x00,0x23,0xe4,0xbe
+
+# CHECK: s_andn2_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x23,0xe6,0xbe]
+0x00,0x23,0xe6,0xbe
+
+# CHECK: s_andn2_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x23,0xea,0xbe]
+0x00,0x23,0xea,0xbe
+
+# CHECK: s_andn2_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x23,0xec,0xbe]
+0x00,0x23,0xec,0xbe
+
+# CHECK: s_andn2_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x23,0xee,0xbe]
+0x00,0x23,0xee,0xbe
+
+# CHECK: s_andn2_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x23,0xfa,0xbe]
+0x00,0x23,0xfa,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x23,0x80,0xbe]
+0x02,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x23,0x80,0xbe]
+0x64,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x23,0x80,0xbe]
+0x66,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x23,0x80,0xbe]
+0x6a,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x23,0x80,0xbe]
+0x6c,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x23,0x80,0xbe]
+0x6e,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x23,0x80,0xbe]
+0x7a,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x23,0x80,0xbe]
+0x7e,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x23,0x80,0xbe]
+0x80,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x23,0x80,0xbe]
+0xc1,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x23,0x80,0xbe]
+0xf0,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x23,0x80,0xbe]
+0xf7,0x23,0x80,0xbe
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x23,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x23,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_andn2_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x23,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x23,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x24,0x80,0xbe]
+0x00,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x24,0x82,0xbe]
+0x00,0x24,0x82,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x24,0xe4,0xbe]
+0x00,0x24,0xe4,0xbe
+
+# CHECK: s_orn2_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x24,0xe6,0xbe]
+0x00,0x24,0xe6,0xbe
+
+# CHECK: s_orn2_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x24,0xea,0xbe]
+0x00,0x24,0xea,0xbe
+
+# CHECK: s_orn2_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x24,0xec,0xbe]
+0x00,0x24,0xec,0xbe
+
+# CHECK: s_orn2_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x24,0xee,0xbe]
+0x00,0x24,0xee,0xbe
+
+# CHECK: s_orn2_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x24,0xfa,0xbe]
+0x00,0x24,0xfa,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x24,0x80,0xbe]
+0x02,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x24,0x80,0xbe]
+0x64,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x24,0x80,0xbe]
+0x66,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x24,0x80,0xbe]
+0x6a,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x24,0x80,0xbe]
+0x6c,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x24,0x80,0xbe]
+0x6e,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x24,0x80,0xbe]
+0x7a,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x24,0x80,0xbe]
+0x7e,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x24,0x80,0xbe]
+0x80,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x24,0x80,0xbe]
+0xc1,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x24,0x80,0xbe]
+0xf0,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x24,0x80,0xbe]
+0xf7,0x24,0x80,0xbe
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x24,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x24,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_orn2_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x24,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x24,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_nand_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x25,0x80,0xbe]
+0x00,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x25,0x82,0xbe]
+0x00,0x25,0x82,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x25,0xe4,0xbe]
+0x00,0x25,0xe4,0xbe
+
+# CHECK: s_nand_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x25,0xe6,0xbe]
+0x00,0x25,0xe6,0xbe
+
+# CHECK: s_nand_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x25,0xea,0xbe]
+0x00,0x25,0xea,0xbe
+
+# CHECK: s_nand_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x25,0xec,0xbe]
+0x00,0x25,0xec,0xbe
+
+# CHECK: s_nand_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x25,0xee,0xbe]
+0x00,0x25,0xee,0xbe
+
+# CHECK: s_nand_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x25,0xfa,0xbe]
+0x00,0x25,0xfa,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x25,0x80,0xbe]
+0x02,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x25,0x80,0xbe]
+0x64,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x25,0x80,0xbe]
+0x66,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x25,0x80,0xbe]
+0x6a,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x25,0x80,0xbe]
+0x6c,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x25,0x80,0xbe]
+0x6e,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x25,0x80,0xbe]
+0x7a,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x25,0x80,0xbe]
+0x7e,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x25,0x80,0xbe]
+0x80,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x25,0x80,0xbe]
+0xc1,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x25,0x80,0xbe]
+0xf0,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x25,0x80,0xbe]
+0xf7,0x25,0x80,0xbe
+
+# CHECK: s_nand_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x25,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x25,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_nand_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x25,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x25,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_nor_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x26,0x80,0xbe]
+0x00,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x26,0x82,0xbe]
+0x00,0x26,0x82,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x26,0xe4,0xbe]
+0x00,0x26,0xe4,0xbe
+
+# CHECK: s_nor_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x26,0xe6,0xbe]
+0x00,0x26,0xe6,0xbe
+
+# CHECK: s_nor_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x26,0xea,0xbe]
+0x00,0x26,0xea,0xbe
+
+# CHECK: s_nor_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x26,0xec,0xbe]
+0x00,0x26,0xec,0xbe
+
+# CHECK: s_nor_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x26,0xee,0xbe]
+0x00,0x26,0xee,0xbe
+
+# CHECK: s_nor_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x26,0xfa,0xbe]
+0x00,0x26,0xfa,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x26,0x80,0xbe]
+0x02,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x26,0x80,0xbe]
+0x64,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x26,0x80,0xbe]
+0x66,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x26,0x80,0xbe]
+0x6a,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x26,0x80,0xbe]
+0x6c,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x26,0x80,0xbe]
+0x6e,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x26,0x80,0xbe]
+0x7a,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x26,0x80,0xbe]
+0x7e,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x26,0x80,0xbe]
+0x80,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x26,0x80,0xbe]
+0xc1,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x26,0x80,0xbe]
+0xf0,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x26,0x80,0xbe]
+0xf7,0x26,0x80,0xbe
+
+# CHECK: s_nor_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x26,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x26,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_nor_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x26,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x26,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], s[0:1] ; encoding: [0x00,0x27,0x80,0xbe]
+0x00,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[2:3], s[0:1] ; encoding: [0x00,0x27,0x82,0xbe]
+0x00,0x27,0x82,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[100:101], s[0:1] ; encoding: [0x00,0x27,0xe4,0xbe]
+0x00,0x27,0xe4,0xbe
+
+# CHECK: s_xnor_saveexec_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x27,0xe6,0xbe]
+0x00,0x27,0xe6,0xbe
+
+# CHECK: s_xnor_saveexec_b64 vcc, s[0:1] ; encoding: [0x00,0x27,0xea,0xbe]
+0x00,0x27,0xea,0xbe
+
+# CHECK: s_xnor_saveexec_b64 tba, s[0:1] ; encoding: [0x00,0x27,0xec,0xbe]
+0x00,0x27,0xec,0xbe
+
+# CHECK: s_xnor_saveexec_b64 tma, s[0:1] ; encoding: [0x00,0x27,0xee,0xbe]
+0x00,0x27,0xee,0xbe
+
+# CHECK: s_xnor_saveexec_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x27,0xfa,0xbe]
+0x00,0x27,0xfa,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], s[2:3] ; encoding: [0x02,0x27,0x80,0xbe]
+0x02,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], s[100:101] ; encoding: [0x64,0x27,0x80,0xbe]
+0x64,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], flat_scratch ; encoding: [0x66,0x27,0x80,0xbe]
+0x66,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], vcc ; encoding: [0x6a,0x27,0x80,0xbe]
+0x6a,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], tba ; encoding: [0x6c,0x27,0x80,0xbe]
+0x6c,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], tma ; encoding: [0x6e,0x27,0x80,0xbe]
+0x6e,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x27,0x80,0xbe]
+0x7a,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], exec ; encoding: [0x7e,0x27,0x80,0xbe]
+0x7e,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], 0 ; encoding: [0x80,0x27,0x80,0xbe]
+0x80,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], -1 ; encoding: [0xc1,0x27,0x80,0xbe]
+0xc1,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], 0.5 ; encoding: [0xf0,0x27,0x80,0xbe]
+0xf0,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], -4.0 ; encoding: [0xf7,0x27,0x80,0xbe]
+0xf7,0x27,0x80,0xbe
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x27,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x27,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_xnor_saveexec_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x27,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x27,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_quadmask_b32 s0, s0 ; encoding: [0x00,0x28,0x80,0xbe]
+0x00,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s101, s0 ; encoding: [0x00,0x28,0xe5,0xbe]
+0x00,0x28,0xe5,0xbe
+
+# CHECK: s_quadmask_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x28,0xe6,0xbe]
+0x00,0x28,0xe6,0xbe
+
+# CHECK: s_quadmask_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x28,0xe7,0xbe]
+0x00,0x28,0xe7,0xbe
+
+# CHECK: s_quadmask_b32 vcc_lo, s0 ; encoding: [0x00,0x28,0xea,0xbe]
+0x00,0x28,0xea,0xbe
+
+# CHECK: s_quadmask_b32 vcc_hi, s0 ; encoding: [0x00,0x28,0xeb,0xbe]
+0x00,0x28,0xeb,0xbe
+
+# CHECK: s_quadmask_b32 tba_lo, s0 ; encoding: [0x00,0x28,0xec,0xbe]
+0x00,0x28,0xec,0xbe
+
+# CHECK: s_quadmask_b32 tba_hi, s0 ; encoding: [0x00,0x28,0xed,0xbe]
+0x00,0x28,0xed,0xbe
+
+# CHECK: s_quadmask_b32 tma_lo, s0 ; encoding: [0x00,0x28,0xee,0xbe]
+0x00,0x28,0xee,0xbe
+
+# CHECK: s_quadmask_b32 tma_hi, s0 ; encoding: [0x00,0x28,0xef,0xbe]
+0x00,0x28,0xef,0xbe
+
+# CHECK: s_quadmask_b32 ttmp11, s0 ; encoding: [0x00,0x28,0xfb,0xbe]
+0x00,0x28,0xfb,0xbe
+
+# CHECK: s_quadmask_b32 m0, s0 ; encoding: [0x00,0x28,0xfc,0xbe]
+0x00,0x28,0xfc,0xbe
+
+# CHECK: s_quadmask_b32 exec_lo, s0 ; encoding: [0x00,0x28,0xfe,0xbe]
+0x00,0x28,0xfe,0xbe
+
+# CHECK: s_quadmask_b32 exec_hi, s0 ; encoding: [0x00,0x28,0xff,0xbe]
+0x00,0x28,0xff,0xbe
+
+# CHECK: s_quadmask_b32 s0, s101 ; encoding: [0x65,0x28,0x80,0xbe]
+0x65,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, flat_scratch_lo ; encoding: [0x66,0x28,0x80,0xbe]
+0x66,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, flat_scratch_hi ; encoding: [0x67,0x28,0x80,0xbe]
+0x67,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, vcc_lo ; encoding: [0x6a,0x28,0x80,0xbe]
+0x6a,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, vcc_hi ; encoding: [0x6b,0x28,0x80,0xbe]
+0x6b,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, tba_lo ; encoding: [0x6c,0x28,0x80,0xbe]
+0x6c,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, tba_hi ; encoding: [0x6d,0x28,0x80,0xbe]
+0x6d,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, tma_lo ; encoding: [0x6e,0x28,0x80,0xbe]
+0x6e,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, tma_hi ; encoding: [0x6f,0x28,0x80,0xbe]
+0x6f,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, ttmp11 ; encoding: [0x7b,0x28,0x80,0xbe]
+0x7b,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, m0 ; encoding: [0x7c,0x28,0x80,0xbe]
+0x7c,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, exec_lo ; encoding: [0x7e,0x28,0x80,0xbe]
+0x7e,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, exec_hi ; encoding: [0x7f,0x28,0x80,0xbe]
+0x7f,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, 0 ; encoding: [0x80,0x28,0x80,0xbe]
+0x80,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, -1 ; encoding: [0xc1,0x28,0x80,0xbe]
+0xc1,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, 0.5 ; encoding: [0xf0,0x28,0x80,0xbe]
+0xf0,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, -4.0 ; encoding: [0xf7,0x28,0x80,0xbe]
+0xf7,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, scc ; encoding: [0xfd,0x28,0x80,0xbe]
+0xfd,0x28,0x80,0xbe
+
+# CHECK: s_quadmask_b32 s0, 0xaf123456 ; encoding: [0xff,0x28,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x28,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_quadmask_b32 s0, 0x3f717273 ; encoding: [0xff,0x28,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x28,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_quadmask_b64 s[0:1], s[0:1] ; encoding: [0x00,0x29,0x80,0xbe]
+0x00,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[2:3], s[0:1] ; encoding: [0x00,0x29,0x82,0xbe]
+0x00,0x29,0x82,0xbe
+
+# CHECK: s_quadmask_b64 s[100:101], s[0:1] ; encoding: [0x00,0x29,0xe4,0xbe]
+0x00,0x29,0xe4,0xbe
+
+# CHECK: s_quadmask_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x29,0xe6,0xbe]
+0x00,0x29,0xe6,0xbe
+
+# CHECK: s_quadmask_b64 vcc, s[0:1] ; encoding: [0x00,0x29,0xea,0xbe]
+0x00,0x29,0xea,0xbe
+
+# CHECK: s_quadmask_b64 tba, s[0:1] ; encoding: [0x00,0x29,0xec,0xbe]
+0x00,0x29,0xec,0xbe
+
+# CHECK: s_quadmask_b64 tma, s[0:1] ; encoding: [0x00,0x29,0xee,0xbe]
+0x00,0x29,0xee,0xbe
+
+# CHECK: s_quadmask_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x29,0xfa,0xbe]
+0x00,0x29,0xfa,0xbe
+
+# CHECK: s_quadmask_b64 exec, s[0:1] ; encoding: [0x00,0x29,0xfe,0xbe]
+0x00,0x29,0xfe,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], s[2:3] ; encoding: [0x02,0x29,0x80,0xbe]
+0x02,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], s[100:101] ; encoding: [0x64,0x29,0x80,0xbe]
+0x64,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], flat_scratch ; encoding: [0x66,0x29,0x80,0xbe]
+0x66,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], vcc ; encoding: [0x6a,0x29,0x80,0xbe]
+0x6a,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], tba ; encoding: [0x6c,0x29,0x80,0xbe]
+0x6c,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], tma ; encoding: [0x6e,0x29,0x80,0xbe]
+0x6e,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x29,0x80,0xbe]
+0x7a,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], exec ; encoding: [0x7e,0x29,0x80,0xbe]
+0x7e,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], 0 ; encoding: [0x80,0x29,0x80,0xbe]
+0x80,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], -1 ; encoding: [0xc1,0x29,0x80,0xbe]
+0xc1,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], 0.5 ; encoding: [0xf0,0x29,0x80,0xbe]
+0xf0,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], -4.0 ; encoding: [0xf7,0x29,0x80,0xbe]
+0xf7,0x29,0x80,0xbe
+
+# CHECK: s_quadmask_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x29,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x29,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_quadmask_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x29,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x29,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_movrels_b32 s0, s0 ; encoding: [0x00,0x2a,0x80,0xbe]
+0x00,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s101, s0 ; encoding: [0x00,0x2a,0xe5,0xbe]
+0x00,0x2a,0xe5,0xbe
+
+# CHECK: s_movrels_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x2a,0xe6,0xbe]
+0x00,0x2a,0xe6,0xbe
+
+# CHECK: s_movrels_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x2a,0xe7,0xbe]
+0x00,0x2a,0xe7,0xbe
+
+# CHECK: s_movrels_b32 vcc_lo, s0 ; encoding: [0x00,0x2a,0xea,0xbe]
+0x00,0x2a,0xea,0xbe
+
+# CHECK: s_movrels_b32 vcc_hi, s0 ; encoding: [0x00,0x2a,0xeb,0xbe]
+0x00,0x2a,0xeb,0xbe
+
+# CHECK: s_movrels_b32 tba_lo, s0 ; encoding: [0x00,0x2a,0xec,0xbe]
+0x00,0x2a,0xec,0xbe
+
+# CHECK: s_movrels_b32 tba_hi, s0 ; encoding: [0x00,0x2a,0xed,0xbe]
+0x00,0x2a,0xed,0xbe
+
+# CHECK: s_movrels_b32 tma_lo, s0 ; encoding: [0x00,0x2a,0xee,0xbe]
+0x00,0x2a,0xee,0xbe
+
+# CHECK: s_movrels_b32 tma_hi, s0 ; encoding: [0x00,0x2a,0xef,0xbe]
+0x00,0x2a,0xef,0xbe
+
+# CHECK: s_movrels_b32 ttmp11, s0 ; encoding: [0x00,0x2a,0xfb,0xbe]
+0x00,0x2a,0xfb,0xbe
+
+# CHECK: s_movrels_b32 m0, s0 ; encoding: [0x00,0x2a,0xfc,0xbe]
+0x00,0x2a,0xfc,0xbe
+
+# CHECK: s_movrels_b32 exec_lo, s0 ; encoding: [0x00,0x2a,0xfe,0xbe]
+0x00,0x2a,0xfe,0xbe
+
+# CHECK: s_movrels_b32 exec_hi, s0 ; encoding: [0x00,0x2a,0xff,0xbe]
+0x00,0x2a,0xff,0xbe
+
+# CHECK: s_movrels_b32 s0, s101 ; encoding: [0x65,0x2a,0x80,0xbe]
+0x65,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, flat_scratch_lo ; encoding: [0x66,0x2a,0x80,0xbe]
+0x66,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, flat_scratch_hi ; encoding: [0x67,0x2a,0x80,0xbe]
+0x67,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, vcc_lo ; encoding: [0x6a,0x2a,0x80,0xbe]
+0x6a,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, vcc_hi ; encoding: [0x6b,0x2a,0x80,0xbe]
+0x6b,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, tba_lo ; encoding: [0x6c,0x2a,0x80,0xbe]
+0x6c,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, tba_hi ; encoding: [0x6d,0x2a,0x80,0xbe]
+0x6d,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, tma_lo ; encoding: [0x6e,0x2a,0x80,0xbe]
+0x6e,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, tma_hi ; encoding: [0x6f,0x2a,0x80,0xbe]
+0x6f,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b32 s0, ttmp11 ; encoding: [0x7b,0x2a,0x80,0xbe]
+0x7b,0x2a,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], s[0:1] ; encoding: [0x00,0x2b,0x80,0xbe]
+0x00,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[2:3], s[0:1] ; encoding: [0x00,0x2b,0x82,0xbe]
+0x00,0x2b,0x82,0xbe
+
+# CHECK: s_movrels_b64 s[100:101], s[0:1] ; encoding: [0x00,0x2b,0xe4,0xbe]
+0x00,0x2b,0xe4,0xbe
+
+# CHECK: s_movrels_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x2b,0xe6,0xbe]
+0x00,0x2b,0xe6,0xbe
+
+# CHECK: s_movrels_b64 vcc, s[0:1] ; encoding: [0x00,0x2b,0xea,0xbe]
+0x00,0x2b,0xea,0xbe
+
+# CHECK: s_movrels_b64 tba, s[0:1] ; encoding: [0x00,0x2b,0xec,0xbe]
+0x00,0x2b,0xec,0xbe
+
+# CHECK: s_movrels_b64 tma, s[0:1] ; encoding: [0x00,0x2b,0xee,0xbe]
+0x00,0x2b,0xee,0xbe
+
+# CHECK: s_movrels_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x2b,0xfa,0xbe]
+0x00,0x2b,0xfa,0xbe
+
+# CHECK: s_movrels_b64 exec, s[0:1] ; encoding: [0x00,0x2b,0xfe,0xbe]
+0x00,0x2b,0xfe,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], s[2:3] ; encoding: [0x02,0x2b,0x80,0xbe]
+0x02,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], s[100:101] ; encoding: [0x64,0x2b,0x80,0xbe]
+0x64,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], flat_scratch ; encoding: [0x66,0x2b,0x80,0xbe]
+0x66,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], vcc ; encoding: [0x6a,0x2b,0x80,0xbe]
+0x6a,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], tba ; encoding: [0x6c,0x2b,0x80,0xbe]
+0x6c,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], tma ; encoding: [0x6e,0x2b,0x80,0xbe]
+0x6e,0x2b,0x80,0xbe
+
+# CHECK: s_movrels_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x2b,0x80,0xbe]
+0x7a,0x2b,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, s0 ; encoding: [0x00,0x2c,0x80,0xbe]
+0x00,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s101, s0 ; encoding: [0x00,0x2c,0xe5,0xbe]
+0x00,0x2c,0xe5,0xbe
+
+# CHECK: s_movreld_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x2c,0xe6,0xbe]
+0x00,0x2c,0xe6,0xbe
+
+# CHECK: s_movreld_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x2c,0xe7,0xbe]
+0x00,0x2c,0xe7,0xbe
+
+# CHECK: s_movreld_b32 vcc_lo, s0 ; encoding: [0x00,0x2c,0xea,0xbe]
+0x00,0x2c,0xea,0xbe
+
+# CHECK: s_movreld_b32 vcc_hi, s0 ; encoding: [0x00,0x2c,0xeb,0xbe]
+0x00,0x2c,0xeb,0xbe
+
+# CHECK: s_movreld_b32 tba_lo, s0 ; encoding: [0x00,0x2c,0xec,0xbe]
+0x00,0x2c,0xec,0xbe
+
+# CHECK: s_movreld_b32 tba_hi, s0 ; encoding: [0x00,0x2c,0xed,0xbe]
+0x00,0x2c,0xed,0xbe
+
+# CHECK: s_movreld_b32 tma_lo, s0 ; encoding: [0x00,0x2c,0xee,0xbe]
+0x00,0x2c,0xee,0xbe
+
+# CHECK: s_movreld_b32 tma_hi, s0 ; encoding: [0x00,0x2c,0xef,0xbe]
+0x00,0x2c,0xef,0xbe
+
+# CHECK: s_movreld_b32 ttmp11, s0 ; encoding: [0x00,0x2c,0xfb,0xbe]
+0x00,0x2c,0xfb,0xbe
+
+# CHECK: s_movreld_b32 s0, s101 ; encoding: [0x65,0x2c,0x80,0xbe]
+0x65,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, flat_scratch_lo ; encoding: [0x66,0x2c,0x80,0xbe]
+0x66,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, flat_scratch_hi ; encoding: [0x67,0x2c,0x80,0xbe]
+0x67,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, vcc_lo ; encoding: [0x6a,0x2c,0x80,0xbe]
+0x6a,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, vcc_hi ; encoding: [0x6b,0x2c,0x80,0xbe]
+0x6b,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, tba_lo ; encoding: [0x6c,0x2c,0x80,0xbe]
+0x6c,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, tba_hi ; encoding: [0x6d,0x2c,0x80,0xbe]
+0x6d,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, tma_lo ; encoding: [0x6e,0x2c,0x80,0xbe]
+0x6e,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, tma_hi ; encoding: [0x6f,0x2c,0x80,0xbe]
+0x6f,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, ttmp11 ; encoding: [0x7b,0x2c,0x80,0xbe]
+0x7b,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, m0 ; encoding: [0x7c,0x2c,0x80,0xbe]
+0x7c,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, 0 ; encoding: [0x80,0x2c,0x80,0xbe]
+0x80,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, -1 ; encoding: [0xc1,0x2c,0x80,0xbe]
+0xc1,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, 0.5 ; encoding: [0xf0,0x2c,0x80,0xbe]
+0xf0,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, -4.0 ; encoding: [0xf7,0x2c,0x80,0xbe]
+0xf7,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, scc ; encoding: [0xfd,0x2c,0x80,0xbe]
+0xfd,0x2c,0x80,0xbe
+
+# CHECK: s_movreld_b32 s0, 0xaf123456 ; encoding: [0xff,0x2c,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x2c,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_movreld_b32 s0, 0x3f717273 ; encoding: [0xff,0x2c,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x2c,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_movreld_b64 s[0:1], s[0:1] ; encoding: [0x00,0x2d,0x80,0xbe]
+0x00,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[2:3], s[0:1] ; encoding: [0x00,0x2d,0x82,0xbe]
+0x00,0x2d,0x82,0xbe
+
+# CHECK: s_movreld_b64 s[100:101], s[0:1] ; encoding: [0x00,0x2d,0xe4,0xbe]
+0x00,0x2d,0xe4,0xbe
+
+# CHECK: s_movreld_b64 flat_scratch, s[0:1] ; encoding: [0x00,0x2d,0xe6,0xbe]
+0x00,0x2d,0xe6,0xbe
+
+# CHECK: s_movreld_b64 vcc, s[0:1] ; encoding: [0x00,0x2d,0xea,0xbe]
+0x00,0x2d,0xea,0xbe
+
+# CHECK: s_movreld_b64 tba, s[0:1] ; encoding: [0x00,0x2d,0xec,0xbe]
+0x00,0x2d,0xec,0xbe
+
+# CHECK: s_movreld_b64 tma, s[0:1] ; encoding: [0x00,0x2d,0xee,0xbe]
+0x00,0x2d,0xee,0xbe
+
+# CHECK: s_movreld_b64 ttmp[10:11], s[0:1] ; encoding: [0x00,0x2d,0xfa,0xbe]
+0x00,0x2d,0xfa,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], s[2:3] ; encoding: [0x02,0x2d,0x80,0xbe]
+0x02,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], s[100:101] ; encoding: [0x64,0x2d,0x80,0xbe]
+0x64,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], flat_scratch ; encoding: [0x66,0x2d,0x80,0xbe]
+0x66,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], vcc ; encoding: [0x6a,0x2d,0x80,0xbe]
+0x6a,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], tba ; encoding: [0x6c,0x2d,0x80,0xbe]
+0x6c,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], tma ; encoding: [0x6e,0x2d,0x80,0xbe]
+0x6e,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], ttmp[10:11] ; encoding: [0x7a,0x2d,0x80,0xbe]
+0x7a,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], 0 ; encoding: [0x80,0x2d,0x80,0xbe]
+0x80,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], -1 ; encoding: [0xc1,0x2d,0x80,0xbe]
+0xc1,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], 0.5 ; encoding: [0xf0,0x2d,0x80,0xbe]
+0xf0,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], -4.0 ; encoding: [0xf7,0x2d,0x80,0xbe]
+0xf7,0x2d,0x80,0xbe
+
+# CHECK: s_movreld_b64 s[0:1], 0xaf123456 ; encoding: [0xff,0x2d,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x2d,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_movreld_b64 s[0:1], 0x3f717273 ; encoding: [0xff,0x2d,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x2d,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_abs_i32 s0, s0 ; encoding: [0x00,0x30,0x80,0xbe]
+0x00,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s101, s0 ; encoding: [0x00,0x30,0xe5,0xbe]
+0x00,0x30,0xe5,0xbe
+
+# CHECK: s_abs_i32 flat_scratch_lo, s0 ; encoding: [0x00,0x30,0xe6,0xbe]
+0x00,0x30,0xe6,0xbe
+
+# CHECK: s_abs_i32 flat_scratch_hi, s0 ; encoding: [0x00,0x30,0xe7,0xbe]
+0x00,0x30,0xe7,0xbe
+
+# CHECK: s_abs_i32 vcc_lo, s0 ; encoding: [0x00,0x30,0xea,0xbe]
+0x00,0x30,0xea,0xbe
+
+# CHECK: s_abs_i32 vcc_hi, s0 ; encoding: [0x00,0x30,0xeb,0xbe]
+0x00,0x30,0xeb,0xbe
+
+# CHECK: s_abs_i32 tba_lo, s0 ; encoding: [0x00,0x30,0xec,0xbe]
+0x00,0x30,0xec,0xbe
+
+# CHECK: s_abs_i32 tba_hi, s0 ; encoding: [0x00,0x30,0xed,0xbe]
+0x00,0x30,0xed,0xbe
+
+# CHECK: s_abs_i32 tma_lo, s0 ; encoding: [0x00,0x30,0xee,0xbe]
+0x00,0x30,0xee,0xbe
+
+# CHECK: s_abs_i32 tma_hi, s0 ; encoding: [0x00,0x30,0xef,0xbe]
+0x00,0x30,0xef,0xbe
+
+# CHECK: s_abs_i32 ttmp11, s0 ; encoding: [0x00,0x30,0xfb,0xbe]
+0x00,0x30,0xfb,0xbe
+
+# CHECK: s_abs_i32 m0, s0 ; encoding: [0x00,0x30,0xfc,0xbe]
+0x00,0x30,0xfc,0xbe
+
+# CHECK: s_abs_i32 exec_lo, s0 ; encoding: [0x00,0x30,0xfe,0xbe]
+0x00,0x30,0xfe,0xbe
+
+# CHECK: s_abs_i32 exec_hi, s0 ; encoding: [0x00,0x30,0xff,0xbe]
+0x00,0x30,0xff,0xbe
+
+# CHECK: s_abs_i32 s0, s101 ; encoding: [0x65,0x30,0x80,0xbe]
+0x65,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, flat_scratch_lo ; encoding: [0x66,0x30,0x80,0xbe]
+0x66,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, flat_scratch_hi ; encoding: [0x67,0x30,0x80,0xbe]
+0x67,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, vcc_lo ; encoding: [0x6a,0x30,0x80,0xbe]
+0x6a,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, vcc_hi ; encoding: [0x6b,0x30,0x80,0xbe]
+0x6b,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, tba_lo ; encoding: [0x6c,0x30,0x80,0xbe]
+0x6c,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, tba_hi ; encoding: [0x6d,0x30,0x80,0xbe]
+0x6d,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, tma_lo ; encoding: [0x6e,0x30,0x80,0xbe]
+0x6e,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, tma_hi ; encoding: [0x6f,0x30,0x80,0xbe]
+0x6f,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, ttmp11 ; encoding: [0x7b,0x30,0x80,0xbe]
+0x7b,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, m0 ; encoding: [0x7c,0x30,0x80,0xbe]
+0x7c,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, exec_lo ; encoding: [0x7e,0x30,0x80,0xbe]
+0x7e,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, exec_hi ; encoding: [0x7f,0x30,0x80,0xbe]
+0x7f,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, 0 ; encoding: [0x80,0x30,0x80,0xbe]
+0x80,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, -1 ; encoding: [0xc1,0x30,0x80,0xbe]
+0xc1,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, 0.5 ; encoding: [0xf0,0x30,0x80,0xbe]
+0xf0,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, -4.0 ; encoding: [0xf7,0x30,0x80,0xbe]
+0xf7,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, scc ; encoding: [0xfd,0x30,0x80,0xbe]
+0xfd,0x30,0x80,0xbe
+
+# CHECK: s_abs_i32 s0, 0xaf123456 ; encoding: [0xff,0x30,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x30,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_abs_i32 s0, 0x3f717273 ; encoding: [0xff,0x30,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x30,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_mov_fed_b32 s0, s0 ; encoding: [0x00,0x31,0x80,0xbe]
+0x00,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s101, s0 ; encoding: [0x00,0x31,0xe5,0xbe]
+0x00,0x31,0xe5,0xbe
+
+# CHECK: s_mov_fed_b32 flat_scratch_lo, s0 ; encoding: [0x00,0x31,0xe6,0xbe]
+0x00,0x31,0xe6,0xbe
+
+# CHECK: s_mov_fed_b32 flat_scratch_hi, s0 ; encoding: [0x00,0x31,0xe7,0xbe]
+0x00,0x31,0xe7,0xbe
+
+# CHECK: s_mov_fed_b32 vcc_lo, s0 ; encoding: [0x00,0x31,0xea,0xbe]
+0x00,0x31,0xea,0xbe
+
+# CHECK: s_mov_fed_b32 vcc_hi, s0 ; encoding: [0x00,0x31,0xeb,0xbe]
+0x00,0x31,0xeb,0xbe
+
+# CHECK: s_mov_fed_b32 tba_lo, s0 ; encoding: [0x00,0x31,0xec,0xbe]
+0x00,0x31,0xec,0xbe
+
+# CHECK: s_mov_fed_b32 tba_hi, s0 ; encoding: [0x00,0x31,0xed,0xbe]
+0x00,0x31,0xed,0xbe
+
+# CHECK: s_mov_fed_b32 tma_lo, s0 ; encoding: [0x00,0x31,0xee,0xbe]
+0x00,0x31,0xee,0xbe
+
+# CHECK: s_mov_fed_b32 tma_hi, s0 ; encoding: [0x00,0x31,0xef,0xbe]
+0x00,0x31,0xef,0xbe
+
+# CHECK: s_mov_fed_b32 ttmp11, s0 ; encoding: [0x00,0x31,0xfb,0xbe]
+0x00,0x31,0xfb,0xbe
+
+# CHECK: s_mov_fed_b32 m0, s0 ; encoding: [0x00,0x31,0xfc,0xbe]
+0x00,0x31,0xfc,0xbe
+
+# CHECK: s_mov_fed_b32 exec_lo, s0 ; encoding: [0x00,0x31,0xfe,0xbe]
+0x00,0x31,0xfe,0xbe
+
+# CHECK: s_mov_fed_b32 exec_hi, s0 ; encoding: [0x00,0x31,0xff,0xbe]
+0x00,0x31,0xff,0xbe
+
+# CHECK: s_mov_fed_b32 s0, s101 ; encoding: [0x65,0x31,0x80,0xbe]
+0x65,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, flat_scratch_lo ; encoding: [0x66,0x31,0x80,0xbe]
+0x66,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, flat_scratch_hi ; encoding: [0x67,0x31,0x80,0xbe]
+0x67,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, vcc_lo ; encoding: [0x6a,0x31,0x80,0xbe]
+0x6a,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, vcc_hi ; encoding: [0x6b,0x31,0x80,0xbe]
+0x6b,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, tba_lo ; encoding: [0x6c,0x31,0x80,0xbe]
+0x6c,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, tba_hi ; encoding: [0x6d,0x31,0x80,0xbe]
+0x6d,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, tma_lo ; encoding: [0x6e,0x31,0x80,0xbe]
+0x6e,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, tma_hi ; encoding: [0x6f,0x31,0x80,0xbe]
+0x6f,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, ttmp11 ; encoding: [0x7b,0x31,0x80,0xbe]
+0x7b,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, m0 ; encoding: [0x7c,0x31,0x80,0xbe]
+0x7c,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, exec_lo ; encoding: [0x7e,0x31,0x80,0xbe]
+0x7e,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, exec_hi ; encoding: [0x7f,0x31,0x80,0xbe]
+0x7f,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, 0 ; encoding: [0x80,0x31,0x80,0xbe]
+0x80,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, -1 ; encoding: [0xc1,0x31,0x80,0xbe]
+0xc1,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, 0.5 ; encoding: [0xf0,0x31,0x80,0xbe]
+0xf0,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, -4.0 ; encoding: [0xf7,0x31,0x80,0xbe]
+0xf7,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, scc ; encoding: [0xfd,0x31,0x80,0xbe]
+0xfd,0x31,0x80,0xbe
+
+# CHECK: s_mov_fed_b32 s0, 0xaf123456 ; encoding: [0xff,0x31,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x31,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_mov_fed_b32 s0, 0x3f717273 ; encoding: [0xff,0x31,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x31,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_set_gpr_idx_idx s0 ; encoding: [0x00,0x32,0x80,0xbe]
+0x00,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx s101 ; encoding: [0x65,0x32,0x80,0xbe]
+0x65,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx flat_scratch_lo ; encoding: [0x66,0x32,0x80,0xbe]
+0x66,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx flat_scratch_hi ; encoding: [0x67,0x32,0x80,0xbe]
+0x67,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx vcc_lo ; encoding: [0x6a,0x32,0x80,0xbe]
+0x6a,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx vcc_hi ; encoding: [0x6b,0x32,0x80,0xbe]
+0x6b,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx tba_lo ; encoding: [0x6c,0x32,0x80,0xbe]
+0x6c,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx tba_hi ; encoding: [0x6d,0x32,0x80,0xbe]
+0x6d,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx tma_lo ; encoding: [0x6e,0x32,0x80,0xbe]
+0x6e,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx tma_hi ; encoding: [0x6f,0x32,0x80,0xbe]
+0x6f,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx ttmp11 ; encoding: [0x7b,0x32,0x80,0xbe]
+0x7b,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx m0 ; encoding: [0x7c,0x32,0x80,0xbe]
+0x7c,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx 0 ; encoding: [0x80,0x32,0x80,0xbe]
+0x80,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx -1 ; encoding: [0xc1,0x32,0x80,0xbe]
+0xc1,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx 0.5 ; encoding: [0xf0,0x32,0x80,0xbe]
+0xf0,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx -4.0 ; encoding: [0xf7,0x32,0x80,0xbe]
+0xf7,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx scc ; encoding: [0xfd,0x32,0x80,0xbe]
+0xfd,0x32,0x80,0xbe
+
+# CHECK: s_set_gpr_idx_idx 0xaf123456 ; encoding: [0xff,0x32,0x80,0xbe,0x56,0x34,0x12,0xaf]
+0xff,0x32,0x80,0xbe,0x56,0x34,0x12,0xaf
+
+# CHECK: s_set_gpr_idx_idx 0x3f717273 ; encoding: [0xff,0x32,0x80,0xbe,0x73,0x72,0x71,0x3f]
+0xff,0x32,0x80,0xbe,0x73,0x72,0x71,0x3f
+
+# CHECK: s_add_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x80]
+0x00,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x80]
+0x00,0x00,0x65,0x80
+
+# CHECK: s_add_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x80]
+0x00,0x00,0x66,0x80
+
+# CHECK: s_add_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x80]
+0x00,0x00,0x67,0x80
+
+# CHECK: s_add_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x80]
+0x00,0x00,0x6a,0x80
+
+# CHECK: s_add_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x80]
+0x00,0x00,0x6b,0x80
+
+# CHECK: s_add_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x80]
+0x00,0x00,0x6c,0x80
+
+# CHECK: s_add_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x80]
+0x00,0x00,0x6d,0x80
+
+# CHECK: s_add_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x80]
+0x00,0x00,0x6e,0x80
+
+# CHECK: s_add_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x80]
+0x00,0x00,0x6f,0x80
+
+# CHECK: s_add_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x80]
+0x00,0x00,0x7b,0x80
+
+# CHECK: s_add_u32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x80]
+0x00,0x00,0x7c,0x80
+
+# CHECK: s_add_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x80]
+0x00,0x00,0x7e,0x80
+
+# CHECK: s_add_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x80]
+0x00,0x00,0x7f,0x80
+
+# CHECK: s_add_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x80]
+0x65,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x80]
+0x66,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x80]
+0x67,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x80]
+0x6a,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x80]
+0x6b,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x80]
+0x6c,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x80]
+0x6d,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x80]
+0x6e,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x80]
+0x6f,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x80]
+0x7b,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x80]
+0x7c,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x80]
+0x7e,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x80]
+0x7f,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x80]
+0x80,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x80]
+0xc1,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x80]
+0xf0,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x80]
+0xf7,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x80]
+0xfd,0x00,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x80]
+0x00,0x65,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x80]
+0x00,0x66,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x80]
+0x00,0x67,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x80]
+0x00,0x6a,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x80]
+0x00,0x6b,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x80]
+0x00,0x6c,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x80]
+0x00,0x6d,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x80]
+0x00,0x6e,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x80]
+0x00,0x6f,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x80]
+0x00,0x7b,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x80]
+0x00,0x7c,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x80]
+0x00,0x7e,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x80]
+0x00,0x7f,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x80]
+0x00,0x80,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x80]
+0x00,0xc1,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x80]
+0x00,0xf0,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x80]
+0x00,0xf7,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x80]
+0x00,0xfd,0x00,0x80
+
+# CHECK: s_add_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x80,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x80,0x56,0x34,0x12,0xaf
+
+# CHECK: s_add_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x80,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x80,0x73,0x72,0x71,0x3f
+
+# CHECK: s_sub_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x80,0x80]
+0x00,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s101, s0, s0 ; encoding: [0x00,0x00,0xe5,0x80]
+0x00,0x00,0xe5,0x80
+
+# CHECK: s_sub_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0xe6,0x80]
+0x00,0x00,0xe6,0x80
+
+# CHECK: s_sub_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0xe7,0x80]
+0x00,0x00,0xe7,0x80
+
+# CHECK: s_sub_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0xea,0x80]
+0x00,0x00,0xea,0x80
+
+# CHECK: s_sub_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0xeb,0x80]
+0x00,0x00,0xeb,0x80
+
+# CHECK: s_sub_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0xec,0x80]
+0x00,0x00,0xec,0x80
+
+# CHECK: s_sub_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0xed,0x80]
+0x00,0x00,0xed,0x80
+
+# CHECK: s_sub_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0xee,0x80]
+0x00,0x00,0xee,0x80
+
+# CHECK: s_sub_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0xef,0x80]
+0x00,0x00,0xef,0x80
+
+# CHECK: s_sub_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0xfb,0x80]
+0x00,0x00,0xfb,0x80
+
+# CHECK: s_sub_u32 m0, s0, s0 ; encoding: [0x00,0x00,0xfc,0x80]
+0x00,0x00,0xfc,0x80
+
+# CHECK: s_sub_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0xfe,0x80]
+0x00,0x00,0xfe,0x80
+
+# CHECK: s_sub_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0xff,0x80]
+0x00,0x00,0xff,0x80
+
+# CHECK: s_sub_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x80,0x80]
+0x65,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x80]
+0x66,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x80]
+0x67,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x80]
+0x6a,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x80]
+0x6b,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x80]
+0x6c,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x80]
+0x6d,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x80]
+0x6e,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x80]
+0x6f,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x80]
+0x7b,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x80,0x80]
+0x7c,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x80]
+0x7e,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x80]
+0x7f,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x80,0x80]
+0x80,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x80,0x80]
+0xc1,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x80]
+0xf0,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x80]
+0xf7,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x80,0x80]
+0xfd,0x00,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x80,0x80]
+0x00,0x65,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x80]
+0x00,0x66,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x80]
+0x00,0x67,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x80]
+0x00,0x6a,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x80]
+0x00,0x6b,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x80]
+0x00,0x6c,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x80]
+0x00,0x6d,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x80]
+0x00,0x6e,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x80]
+0x00,0x6f,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x80]
+0x00,0x7b,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x80,0x80]
+0x00,0x7c,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x80]
+0x00,0x7e,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x80]
+0x00,0x7f,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x80,0x80]
+0x00,0x80,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x80,0x80]
+0x00,0xc1,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x80]
+0x00,0xf0,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x80]
+0x00,0xf7,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x80,0x80]
+0x00,0xfd,0x80,0x80
+
+# CHECK: s_sub_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x80,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x80,0x56,0x34,0x12,0xaf
+
+# CHECK: s_sub_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x80,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x80,0x73,0x72,0x71,0x3f
+
+# CHECK: s_add_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x81]
+0x00,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x81]
+0x00,0x00,0x65,0x81
+
+# CHECK: s_add_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x81]
+0x00,0x00,0x66,0x81
+
+# CHECK: s_add_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x81]
+0x00,0x00,0x67,0x81
+
+# CHECK: s_add_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x81]
+0x00,0x00,0x6a,0x81
+
+# CHECK: s_add_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x81]
+0x00,0x00,0x6b,0x81
+
+# CHECK: s_add_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x81]
+0x00,0x00,0x6c,0x81
+
+# CHECK: s_add_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x81]
+0x00,0x00,0x6d,0x81
+
+# CHECK: s_add_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x81]
+0x00,0x00,0x6e,0x81
+
+# CHECK: s_add_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x81]
+0x00,0x00,0x6f,0x81
+
+# CHECK: s_add_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x81]
+0x00,0x00,0x7b,0x81
+
+# CHECK: s_add_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x81]
+0x00,0x00,0x7c,0x81
+
+# CHECK: s_add_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x81]
+0x00,0x00,0x7e,0x81
+
+# CHECK: s_add_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x81]
+0x00,0x00,0x7f,0x81
+
+# CHECK: s_add_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x81]
+0x65,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x81]
+0x66,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x81]
+0x67,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x81]
+0x6a,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x81]
+0x6b,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x81]
+0x6c,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x81]
+0x6d,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x81]
+0x6e,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x81]
+0x6f,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x81]
+0x7b,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x81]
+0x7c,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x81]
+0x7e,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x81]
+0x7f,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x81]
+0x80,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x81]
+0xc1,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x81]
+0xf0,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x81]
+0xf7,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x81]
+0xfd,0x00,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x81]
+0x00,0x65,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x81]
+0x00,0x66,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x81]
+0x00,0x67,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x81]
+0x00,0x6a,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x81]
+0x00,0x6b,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x81]
+0x00,0x6c,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x81]
+0x00,0x6d,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x81]
+0x00,0x6e,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x81]
+0x00,0x6f,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x81]
+0x00,0x7b,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x81]
+0x00,0x7c,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x81]
+0x00,0x7e,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x81]
+0x00,0x7f,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x81]
+0x00,0x80,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x81]
+0x00,0xc1,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x81]
+0x00,0xf0,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x81]
+0x00,0xf7,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x81]
+0x00,0xfd,0x00,0x81
+
+# CHECK: s_add_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x81,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x81,0x56,0x34,0x12,0xaf
+
+# CHECK: s_add_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x81,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x81,0x73,0x72,0x71,0x3f
+
+# CHECK: s_sub_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x80,0x81]
+0x00,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s101, s0, s0 ; encoding: [0x00,0x00,0xe5,0x81]
+0x00,0x00,0xe5,0x81
+
+# CHECK: s_sub_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0xe6,0x81]
+0x00,0x00,0xe6,0x81
+
+# CHECK: s_sub_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0xe7,0x81]
+0x00,0x00,0xe7,0x81
+
+# CHECK: s_sub_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0xea,0x81]
+0x00,0x00,0xea,0x81
+
+# CHECK: s_sub_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0xeb,0x81]
+0x00,0x00,0xeb,0x81
+
+# CHECK: s_sub_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0xec,0x81]
+0x00,0x00,0xec,0x81
+
+# CHECK: s_sub_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0xed,0x81]
+0x00,0x00,0xed,0x81
+
+# CHECK: s_sub_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0xee,0x81]
+0x00,0x00,0xee,0x81
+
+# CHECK: s_sub_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0xef,0x81]
+0x00,0x00,0xef,0x81
+
+# CHECK: s_sub_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0xfb,0x81]
+0x00,0x00,0xfb,0x81
+
+# CHECK: s_sub_i32 m0, s0, s0 ; encoding: [0x00,0x00,0xfc,0x81]
+0x00,0x00,0xfc,0x81
+
+# CHECK: s_sub_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0xfe,0x81]
+0x00,0x00,0xfe,0x81
+
+# CHECK: s_sub_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0xff,0x81]
+0x00,0x00,0xff,0x81
+
+# CHECK: s_sub_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x80,0x81]
+0x65,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x81]
+0x66,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x81]
+0x67,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x81]
+0x6a,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x81]
+0x6b,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x81]
+0x6c,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x81]
+0x6d,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x81]
+0x6e,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x81]
+0x6f,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x81]
+0x7b,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x80,0x81]
+0x7c,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x81]
+0x7e,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x81]
+0x7f,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x80,0x81]
+0x80,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x80,0x81]
+0xc1,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x81]
+0xf0,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x81]
+0xf7,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x80,0x81]
+0xfd,0x00,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x80,0x81]
+0x00,0x65,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x81]
+0x00,0x66,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x81]
+0x00,0x67,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x81]
+0x00,0x6a,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x81]
+0x00,0x6b,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x81]
+0x00,0x6c,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x81]
+0x00,0x6d,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x81]
+0x00,0x6e,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x81]
+0x00,0x6f,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x81]
+0x00,0x7b,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x80,0x81]
+0x00,0x7c,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x81]
+0x00,0x7e,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x81]
+0x00,0x7f,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x80,0x81]
+0x00,0x80,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x80,0x81]
+0x00,0xc1,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x81]
+0x00,0xf0,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x81]
+0x00,0xf7,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x80,0x81]
+0x00,0xfd,0x80,0x81
+
+# CHECK: s_sub_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x81,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x81,0x56,0x34,0x12,0xaf
+
+# CHECK: s_sub_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x81,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x81,0x73,0x72,0x71,0x3f
+
+# CHECK: s_addc_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x82]
+0x00,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x82]
+0x00,0x00,0x65,0x82
+
+# CHECK: s_addc_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x82]
+0x00,0x00,0x66,0x82
+
+# CHECK: s_addc_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x82]
+0x00,0x00,0x67,0x82
+
+# CHECK: s_addc_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x82]
+0x00,0x00,0x6a,0x82
+
+# CHECK: s_addc_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x82]
+0x00,0x00,0x6b,0x82
+
+# CHECK: s_addc_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x82]
+0x00,0x00,0x6c,0x82
+
+# CHECK: s_addc_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x82]
+0x00,0x00,0x6d,0x82
+
+# CHECK: s_addc_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x82]
+0x00,0x00,0x6e,0x82
+
+# CHECK: s_addc_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x82]
+0x00,0x00,0x6f,0x82
+
+# CHECK: s_addc_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x82]
+0x00,0x00,0x7b,0x82
+
+# CHECK: s_addc_u32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x82]
+0x00,0x00,0x7c,0x82
+
+# CHECK: s_addc_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x82]
+0x00,0x00,0x7e,0x82
+
+# CHECK: s_addc_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x82]
+0x00,0x00,0x7f,0x82
+
+# CHECK: s_addc_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x82]
+0x65,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x82]
+0x66,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x82]
+0x67,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x82]
+0x6a,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x82]
+0x6b,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x82]
+0x6c,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x82]
+0x6d,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x82]
+0x6e,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x82]
+0x6f,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x82]
+0x7b,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x82]
+0x7c,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x82]
+0x7e,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x82]
+0x7f,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x82]
+0x80,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x82]
+0xc1,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x82]
+0xf0,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x82]
+0xf7,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x82]
+0xfd,0x00,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x82]
+0x00,0x65,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x82]
+0x00,0x66,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x82]
+0x00,0x67,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x82]
+0x00,0x6a,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x82]
+0x00,0x6b,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x82]
+0x00,0x6c,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x82]
+0x00,0x6d,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x82]
+0x00,0x6e,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x82]
+0x00,0x6f,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x82]
+0x00,0x7b,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x82]
+0x00,0x7c,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x82]
+0x00,0x7e,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x82]
+0x00,0x7f,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x82]
+0x00,0x80,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x82]
+0x00,0xc1,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x82]
+0x00,0xf0,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x82]
+0x00,0xf7,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x82]
+0x00,0xfd,0x00,0x82
+
+# CHECK: s_addc_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x82,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x82,0x56,0x34,0x12,0xaf
+
+# CHECK: s_addc_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x82,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x82,0x73,0x72,0x71,0x3f
+
+# CHECK: s_subb_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x80,0x82]
+0x00,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s101, s0, s0 ; encoding: [0x00,0x00,0xe5,0x82]
+0x00,0x00,0xe5,0x82
+
+# CHECK: s_subb_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0xe6,0x82]
+0x00,0x00,0xe6,0x82
+
+# CHECK: s_subb_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0xe7,0x82]
+0x00,0x00,0xe7,0x82
+
+# CHECK: s_subb_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0xea,0x82]
+0x00,0x00,0xea,0x82
+
+# CHECK: s_subb_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0xeb,0x82]
+0x00,0x00,0xeb,0x82
+
+# CHECK: s_subb_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0xec,0x82]
+0x00,0x00,0xec,0x82
+
+# CHECK: s_subb_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0xed,0x82]
+0x00,0x00,0xed,0x82
+
+# CHECK: s_subb_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0xee,0x82]
+0x00,0x00,0xee,0x82
+
+# CHECK: s_subb_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0xef,0x82]
+0x00,0x00,0xef,0x82
+
+# CHECK: s_subb_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0xfb,0x82]
+0x00,0x00,0xfb,0x82
+
+# CHECK: s_subb_u32 m0, s0, s0 ; encoding: [0x00,0x00,0xfc,0x82]
+0x00,0x00,0xfc,0x82
+
+# CHECK: s_subb_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0xfe,0x82]
+0x00,0x00,0xfe,0x82
+
+# CHECK: s_subb_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0xff,0x82]
+0x00,0x00,0xff,0x82
+
+# CHECK: s_subb_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x80,0x82]
+0x65,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x82]
+0x66,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x82]
+0x67,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x82]
+0x6a,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x82]
+0x6b,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x82]
+0x6c,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x82]
+0x6d,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x82]
+0x6e,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x82]
+0x6f,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x82]
+0x7b,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x80,0x82]
+0x7c,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x82]
+0x7e,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x82]
+0x7f,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x80,0x82]
+0x80,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x80,0x82]
+0xc1,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x82]
+0xf0,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x82]
+0xf7,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x80,0x82]
+0xfd,0x00,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x80,0x82]
+0x00,0x65,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x82]
+0x00,0x66,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x82]
+0x00,0x67,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x82]
+0x00,0x6a,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x82]
+0x00,0x6b,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x82]
+0x00,0x6c,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x82]
+0x00,0x6d,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x82]
+0x00,0x6e,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x82]
+0x00,0x6f,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x82]
+0x00,0x7b,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x80,0x82]
+0x00,0x7c,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x82]
+0x00,0x7e,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x82]
+0x00,0x7f,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x80,0x82]
+0x00,0x80,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x80,0x82]
+0x00,0xc1,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x82]
+0x00,0xf0,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x82]
+0x00,0xf7,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x80,0x82]
+0x00,0xfd,0x80,0x82
+
+# CHECK: s_subb_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x82,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x82,0x56,0x34,0x12,0xaf
+
+# CHECK: s_subb_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x82,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x82,0x73,0x72,0x71,0x3f
+
+# CHECK: s_min_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x83]
+0x00,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x83]
+0x00,0x00,0x65,0x83
+
+# CHECK: s_min_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x83]
+0x00,0x00,0x66,0x83
+
+# CHECK: s_min_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x83]
+0x00,0x00,0x67,0x83
+
+# CHECK: s_min_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x83]
+0x00,0x00,0x6a,0x83
+
+# CHECK: s_min_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x83]
+0x00,0x00,0x6b,0x83
+
+# CHECK: s_min_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x83]
+0x00,0x00,0x6c,0x83
+
+# CHECK: s_min_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x83]
+0x00,0x00,0x6d,0x83
+
+# CHECK: s_min_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x83]
+0x00,0x00,0x6e,0x83
+
+# CHECK: s_min_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x83]
+0x00,0x00,0x6f,0x83
+
+# CHECK: s_min_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x83]
+0x00,0x00,0x7b,0x83
+
+# CHECK: s_min_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x83]
+0x00,0x00,0x7c,0x83
+
+# CHECK: s_min_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x83]
+0x00,0x00,0x7e,0x83
+
+# CHECK: s_min_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x83]
+0x00,0x00,0x7f,0x83
+
+# CHECK: s_min_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x83]
+0x65,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x83]
+0x66,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x83]
+0x67,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x83]
+0x6a,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x83]
+0x6b,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x83]
+0x6c,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x83]
+0x6d,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x83]
+0x6e,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x83]
+0x6f,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x83]
+0x7b,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x83]
+0x7c,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x83]
+0x7e,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x83]
+0x7f,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x83]
+0x80,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x83]
+0xc1,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x83]
+0xf0,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x83]
+0xf7,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x83]
+0xfd,0x00,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x83]
+0x00,0x65,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x83]
+0x00,0x66,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x83]
+0x00,0x67,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x83]
+0x00,0x6a,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x83]
+0x00,0x6b,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x83]
+0x00,0x6c,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x83]
+0x00,0x6d,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x83]
+0x00,0x6e,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x83]
+0x00,0x6f,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x83]
+0x00,0x7b,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x83]
+0x00,0x7c,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x83]
+0x00,0x7e,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x83]
+0x00,0x7f,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x83]
+0x00,0x80,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x83]
+0x00,0xc1,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x83]
+0x00,0xf0,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x83]
+0x00,0xf7,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x83]
+0x00,0xfd,0x00,0x83
+
+# CHECK: s_min_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x83,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x83,0x56,0x34,0x12,0xaf
+
+# CHECK: s_min_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x83,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x83,0x73,0x72,0x71,0x3f
+
+# CHECK: s_min_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x80,0x83]
+0x00,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s101, s0, s0 ; encoding: [0x00,0x00,0xe5,0x83]
+0x00,0x00,0xe5,0x83
+
+# CHECK: s_min_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0xe6,0x83]
+0x00,0x00,0xe6,0x83
+
+# CHECK: s_min_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0xe7,0x83]
+0x00,0x00,0xe7,0x83
+
+# CHECK: s_min_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0xea,0x83]
+0x00,0x00,0xea,0x83
+
+# CHECK: s_min_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0xeb,0x83]
+0x00,0x00,0xeb,0x83
+
+# CHECK: s_min_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0xec,0x83]
+0x00,0x00,0xec,0x83
+
+# CHECK: s_min_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0xed,0x83]
+0x00,0x00,0xed,0x83
+
+# CHECK: s_min_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0xee,0x83]
+0x00,0x00,0xee,0x83
+
+# CHECK: s_min_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0xef,0x83]
+0x00,0x00,0xef,0x83
+
+# CHECK: s_min_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0xfb,0x83]
+0x00,0x00,0xfb,0x83
+
+# CHECK: s_min_u32 m0, s0, s0 ; encoding: [0x00,0x00,0xfc,0x83]
+0x00,0x00,0xfc,0x83
+
+# CHECK: s_min_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0xfe,0x83]
+0x00,0x00,0xfe,0x83
+
+# CHECK: s_min_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0xff,0x83]
+0x00,0x00,0xff,0x83
+
+# CHECK: s_min_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x80,0x83]
+0x65,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x83]
+0x66,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x83]
+0x67,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x83]
+0x6a,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x83]
+0x6b,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x83]
+0x6c,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x83]
+0x6d,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x83]
+0x6e,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x83]
+0x6f,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x83]
+0x7b,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x80,0x83]
+0x7c,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x83]
+0x7e,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x83]
+0x7f,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x80,0x83]
+0x80,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x80,0x83]
+0xc1,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x83]
+0xf0,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x83]
+0xf7,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x80,0x83]
+0xfd,0x00,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x80,0x83]
+0x00,0x65,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x83]
+0x00,0x66,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x83]
+0x00,0x67,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x83]
+0x00,0x6a,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x83]
+0x00,0x6b,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x83]
+0x00,0x6c,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x83]
+0x00,0x6d,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x83]
+0x00,0x6e,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x83]
+0x00,0x6f,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x83]
+0x00,0x7b,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x80,0x83]
+0x00,0x7c,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x83]
+0x00,0x7e,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x83]
+0x00,0x7f,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x80,0x83]
+0x00,0x80,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x80,0x83]
+0x00,0xc1,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x83]
+0x00,0xf0,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x83]
+0x00,0xf7,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x80,0x83]
+0x00,0xfd,0x80,0x83
+
+# CHECK: s_min_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x83,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x83,0x56,0x34,0x12,0xaf
+
+# CHECK: s_min_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x83,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x83,0x73,0x72,0x71,0x3f
+
+# CHECK: s_max_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x84]
+0x00,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x84]
+0x00,0x00,0x65,0x84
+
+# CHECK: s_max_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x84]
+0x00,0x00,0x66,0x84
+
+# CHECK: s_max_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x84]
+0x00,0x00,0x67,0x84
+
+# CHECK: s_max_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x84]
+0x00,0x00,0x6a,0x84
+
+# CHECK: s_max_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x84]
+0x00,0x00,0x6b,0x84
+
+# CHECK: s_max_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x84]
+0x00,0x00,0x6c,0x84
+
+# CHECK: s_max_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x84]
+0x00,0x00,0x6d,0x84
+
+# CHECK: s_max_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x84]
+0x00,0x00,0x6e,0x84
+
+# CHECK: s_max_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x84]
+0x00,0x00,0x6f,0x84
+
+# CHECK: s_max_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x84]
+0x00,0x00,0x7b,0x84
+
+# CHECK: s_max_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x84]
+0x00,0x00,0x7c,0x84
+
+# CHECK: s_max_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x84]
+0x00,0x00,0x7e,0x84
+
+# CHECK: s_max_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x84]
+0x00,0x00,0x7f,0x84
+
+# CHECK: s_max_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x84]
+0x65,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x84]
+0x66,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x84]
+0x67,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x84]
+0x6a,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x84]
+0x6b,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x84]
+0x6c,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x84]
+0x6d,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x84]
+0x6e,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x84]
+0x6f,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x84]
+0x7b,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x84]
+0x7c,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x84]
+0x7e,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x84]
+0x7f,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x84]
+0x80,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x84]
+0xc1,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x84]
+0xf0,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x84]
+0xf7,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x84]
+0xfd,0x00,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x84]
+0x00,0x65,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x84]
+0x00,0x66,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x84]
+0x00,0x67,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x84]
+0x00,0x6a,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x84]
+0x00,0x6b,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x84]
+0x00,0x6c,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x84]
+0x00,0x6d,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x84]
+0x00,0x6e,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x84]
+0x00,0x6f,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x84]
+0x00,0x7b,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x84]
+0x00,0x7c,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x84]
+0x00,0x7e,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x84]
+0x00,0x7f,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x84]
+0x00,0x80,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x84]
+0x00,0xc1,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x84]
+0x00,0xf0,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x84]
+0x00,0xf7,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x84]
+0x00,0xfd,0x00,0x84
+
+# CHECK: s_max_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x84,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x84,0x56,0x34,0x12,0xaf
+
+# CHECK: s_max_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x84,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x84,0x73,0x72,0x71,0x3f
+
+# CHECK: s_max_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x80,0x84]
+0x00,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s101, s0, s0 ; encoding: [0x00,0x00,0xe5,0x84]
+0x00,0x00,0xe5,0x84
+
+# CHECK: s_max_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0xe6,0x84]
+0x00,0x00,0xe6,0x84
+
+# CHECK: s_max_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0xe7,0x84]
+0x00,0x00,0xe7,0x84
+
+# CHECK: s_max_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0xea,0x84]
+0x00,0x00,0xea,0x84
+
+# CHECK: s_max_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0xeb,0x84]
+0x00,0x00,0xeb,0x84
+
+# CHECK: s_max_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0xec,0x84]
+0x00,0x00,0xec,0x84
+
+# CHECK: s_max_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0xed,0x84]
+0x00,0x00,0xed,0x84
+
+# CHECK: s_max_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0xee,0x84]
+0x00,0x00,0xee,0x84
+
+# CHECK: s_max_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0xef,0x84]
+0x00,0x00,0xef,0x84
+
+# CHECK: s_max_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0xfb,0x84]
+0x00,0x00,0xfb,0x84
+
+# CHECK: s_max_u32 m0, s0, s0 ; encoding: [0x00,0x00,0xfc,0x84]
+0x00,0x00,0xfc,0x84
+
+# CHECK: s_max_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0xfe,0x84]
+0x00,0x00,0xfe,0x84
+
+# CHECK: s_max_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0xff,0x84]
+0x00,0x00,0xff,0x84
+
+# CHECK: s_max_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x80,0x84]
+0x65,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x84]
+0x66,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x84]
+0x67,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x84]
+0x6a,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x84]
+0x6b,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x84]
+0x6c,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x84]
+0x6d,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x84]
+0x6e,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x84]
+0x6f,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x84]
+0x7b,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x80,0x84]
+0x7c,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x84]
+0x7e,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x84]
+0x7f,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x80,0x84]
+0x80,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x80,0x84]
+0xc1,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x84]
+0xf0,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x84]
+0xf7,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x80,0x84]
+0xfd,0x00,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x80,0x84]
+0x00,0x65,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x84]
+0x00,0x66,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x84]
+0x00,0x67,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x84]
+0x00,0x6a,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x84]
+0x00,0x6b,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x84]
+0x00,0x6c,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x84]
+0x00,0x6d,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x84]
+0x00,0x6e,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x84]
+0x00,0x6f,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x84]
+0x00,0x7b,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x80,0x84]
+0x00,0x7c,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x84]
+0x00,0x7e,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x84]
+0x00,0x7f,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x80,0x84]
+0x00,0x80,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x80,0x84]
+0x00,0xc1,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x84]
+0x00,0xf0,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x84]
+0x00,0xf7,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x80,0x84]
+0x00,0xfd,0x80,0x84
+
+# CHECK: s_max_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x84,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x84,0x56,0x34,0x12,0xaf
+
+# CHECK: s_max_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x84,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x84,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cselect_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x85]
+0x00,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x85]
+0x00,0x00,0x65,0x85
+
+# CHECK: s_cselect_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x85]
+0x00,0x00,0x66,0x85
+
+# CHECK: s_cselect_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x85]
+0x00,0x00,0x67,0x85
+
+# CHECK: s_cselect_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x85]
+0x00,0x00,0x6a,0x85
+
+# CHECK: s_cselect_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x85]
+0x00,0x00,0x6b,0x85
+
+# CHECK: s_cselect_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x85]
+0x00,0x00,0x6c,0x85
+
+# CHECK: s_cselect_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x85]
+0x00,0x00,0x6d,0x85
+
+# CHECK: s_cselect_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x85]
+0x00,0x00,0x6e,0x85
+
+# CHECK: s_cselect_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x85]
+0x00,0x00,0x6f,0x85
+
+# CHECK: s_cselect_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x85]
+0x00,0x00,0x7b,0x85
+
+# CHECK: s_cselect_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x85]
+0x00,0x00,0x7c,0x85
+
+# CHECK: s_cselect_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x85]
+0x00,0x00,0x7e,0x85
+
+# CHECK: s_cselect_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x85]
+0x00,0x00,0x7f,0x85
+
+# CHECK: s_cselect_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x85]
+0x65,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x85]
+0x66,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x85]
+0x67,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x85]
+0x6a,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x85]
+0x6b,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x85]
+0x6c,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x85]
+0x6d,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x85]
+0x6e,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x85]
+0x6f,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x85]
+0x7b,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x85]
+0x7c,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x85]
+0x7e,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x85]
+0x7f,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x85]
+0x80,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x85]
+0xc1,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x85]
+0xf0,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x85]
+0xf7,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x85]
+0xfd,0x00,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x85]
+0x00,0x65,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x85]
+0x00,0x66,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x85]
+0x00,0x67,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x85]
+0x00,0x6a,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x85]
+0x00,0x6b,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x85]
+0x00,0x6c,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x85]
+0x00,0x6d,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x85]
+0x00,0x6e,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x85]
+0x00,0x6f,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x85]
+0x00,0x7b,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x85]
+0x00,0x7c,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x85]
+0x00,0x7e,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x85]
+0x00,0x7f,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x85]
+0x00,0x80,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x85]
+0x00,0xc1,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x85]
+0x00,0xf0,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x85]
+0x00,0xf7,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x85]
+0x00,0xfd,0x00,0x85
+
+# CHECK: s_cselect_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x85,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x85,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cselect_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x85,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x85,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x85]
+0x00,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x85]
+0x00,0x00,0x82,0x85
+
+# CHECK: s_cselect_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x85]
+0x00,0x00,0xe4,0x85
+
+# CHECK: s_cselect_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x85]
+0x00,0x00,0xe6,0x85
+
+# CHECK: s_cselect_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x85]
+0x00,0x00,0xea,0x85
+
+# CHECK: s_cselect_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x85]
+0x00,0x00,0xec,0x85
+
+# CHECK: s_cselect_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x85]
+0x00,0x00,0xee,0x85
+
+# CHECK: s_cselect_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x85]
+0x00,0x00,0xfa,0x85
+
+# CHECK: s_cselect_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x85]
+0x00,0x00,0xfe,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x85]
+0x02,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x85]
+0x64,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x85]
+0x66,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x85]
+0x6a,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x85]
+0x6c,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x85]
+0x6e,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x85]
+0x7a,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x85]
+0x7e,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x85]
+0x80,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x85]
+0xc1,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x85]
+0xf0,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x85]
+0xf7,0x00,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x85]
+0x00,0x02,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x85]
+0x00,0x64,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x85]
+0x00,0x66,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x85]
+0x00,0x6a,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x85]
+0x00,0x6c,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x85]
+0x00,0x6e,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x85]
+0x00,0x7a,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x85]
+0x00,0x7e,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x85]
+0x00,0x80,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x85]
+0x00,0xc1,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x85]
+0x00,0xf0,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x85]
+0x00,0xf7,0x80,0x85
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x85,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x85,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cselect_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x85,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x85,0x73,0x72,0x71,0x3f
+
+# CHECK: s_and_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x86]
+0x00,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x86]
+0x00,0x00,0x65,0x86
+
+# CHECK: s_and_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x86]
+0x00,0x00,0x66,0x86
+
+# CHECK: s_and_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x86]
+0x00,0x00,0x67,0x86
+
+# CHECK: s_and_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x86]
+0x00,0x00,0x6a,0x86
+
+# CHECK: s_and_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x86]
+0x00,0x00,0x6b,0x86
+
+# CHECK: s_and_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x86]
+0x00,0x00,0x6c,0x86
+
+# CHECK: s_and_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x86]
+0x00,0x00,0x6d,0x86
+
+# CHECK: s_and_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x86]
+0x00,0x00,0x6e,0x86
+
+# CHECK: s_and_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x86]
+0x00,0x00,0x6f,0x86
+
+# CHECK: s_and_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x86]
+0x00,0x00,0x7b,0x86
+
+# CHECK: s_and_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x86]
+0x00,0x00,0x7c,0x86
+
+# CHECK: s_and_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x86]
+0x00,0x00,0x7e,0x86
+
+# CHECK: s_and_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x86]
+0x00,0x00,0x7f,0x86
+
+# CHECK: s_and_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x86]
+0x65,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x86]
+0x66,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x86]
+0x67,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x86]
+0x6a,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x86]
+0x6b,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x86]
+0x6c,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x86]
+0x6d,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x86]
+0x6e,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x86]
+0x6f,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x86]
+0x7b,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x86]
+0x7c,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x86]
+0x7e,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x86]
+0x7f,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x86]
+0x80,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x86]
+0xc1,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x86]
+0xf0,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x86]
+0xf7,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x86]
+0xfd,0x00,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x86]
+0x00,0x65,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x86]
+0x00,0x66,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x86]
+0x00,0x67,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x86]
+0x00,0x6a,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x86]
+0x00,0x6b,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x86]
+0x00,0x6c,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x86]
+0x00,0x6d,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x86]
+0x00,0x6e,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x86]
+0x00,0x6f,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x86]
+0x00,0x7b,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x86]
+0x00,0x7c,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x86]
+0x00,0x7e,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x86]
+0x00,0x7f,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x86]
+0x00,0x80,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x86]
+0x00,0xc1,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x86]
+0x00,0xf0,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x86]
+0x00,0xf7,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x86]
+0x00,0xfd,0x00,0x86
+
+# CHECK: s_and_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x86,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x86,0x56,0x34,0x12,0xaf
+
+# CHECK: s_and_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x86,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x86,0x73,0x72,0x71,0x3f
+
+# CHECK: s_and_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x86]
+0x00,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x86]
+0x00,0x00,0x82,0x86
+
+# CHECK: s_and_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x86]
+0x00,0x00,0xe4,0x86
+
+# CHECK: s_and_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x86]
+0x00,0x00,0xe6,0x86
+
+# CHECK: s_and_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x86]
+0x00,0x00,0xea,0x86
+
+# CHECK: s_and_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x86]
+0x00,0x00,0xec,0x86
+
+# CHECK: s_and_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x86]
+0x00,0x00,0xee,0x86
+
+# CHECK: s_and_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x86]
+0x00,0x00,0xfa,0x86
+
+# CHECK: s_and_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x86]
+0x00,0x00,0xfe,0x86
+
+# CHECK: s_and_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x86]
+0x02,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x86]
+0x64,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x86]
+0x66,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x86]
+0x6a,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x86]
+0x6c,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x86]
+0x6e,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x86]
+0x7a,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x86]
+0x7e,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x86]
+0x80,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x86]
+0xc1,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x86]
+0xf0,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x86]
+0xf7,0x00,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x86]
+0x00,0x02,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x86]
+0x00,0x64,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x86]
+0x00,0x66,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x86]
+0x00,0x6a,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x86]
+0x00,0x6c,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x86]
+0x00,0x6e,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x86]
+0x00,0x7a,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x86]
+0x00,0x7e,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x86]
+0x00,0x80,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x86]
+0x00,0xc1,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x86]
+0x00,0xf0,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x86]
+0x00,0xf7,0x80,0x86
+
+# CHECK: s_and_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x86,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x86,0x56,0x34,0x12,0xaf
+
+# CHECK: s_and_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x86,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x86,0x73,0x72,0x71,0x3f
+
+# CHECK: s_or_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x87]
+0x00,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x87]
+0x00,0x00,0x65,0x87
+
+# CHECK: s_or_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x87]
+0x00,0x00,0x66,0x87
+
+# CHECK: s_or_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x87]
+0x00,0x00,0x67,0x87
+
+# CHECK: s_or_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x87]
+0x00,0x00,0x6a,0x87
+
+# CHECK: s_or_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x87]
+0x00,0x00,0x6b,0x87
+
+# CHECK: s_or_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x87]
+0x00,0x00,0x6c,0x87
+
+# CHECK: s_or_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x87]
+0x00,0x00,0x6d,0x87
+
+# CHECK: s_or_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x87]
+0x00,0x00,0x6e,0x87
+
+# CHECK: s_or_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x87]
+0x00,0x00,0x6f,0x87
+
+# CHECK: s_or_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x87]
+0x00,0x00,0x7b,0x87
+
+# CHECK: s_or_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x87]
+0x00,0x00,0x7c,0x87
+
+# CHECK: s_or_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x87]
+0x00,0x00,0x7e,0x87
+
+# CHECK: s_or_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x87]
+0x00,0x00,0x7f,0x87
+
+# CHECK: s_or_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x87]
+0x65,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x87]
+0x66,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x87]
+0x67,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x87]
+0x6a,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x87]
+0x6b,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x87]
+0x6c,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x87]
+0x6d,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x87]
+0x6e,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x87]
+0x6f,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x87]
+0x7b,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x87]
+0x7c,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x87]
+0x7e,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x87]
+0x7f,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x87]
+0x80,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x87]
+0xc1,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x87]
+0xf0,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x87]
+0xf7,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x87]
+0xfd,0x00,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x87]
+0x00,0x65,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x87]
+0x00,0x66,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x87]
+0x00,0x67,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x87]
+0x00,0x6a,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x87]
+0x00,0x6b,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x87]
+0x00,0x6c,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x87]
+0x00,0x6d,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x87]
+0x00,0x6e,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x87]
+0x00,0x6f,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x87]
+0x00,0x7b,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x87]
+0x00,0x7c,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x87]
+0x00,0x7e,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x87]
+0x00,0x7f,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x87]
+0x00,0x80,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x87]
+0x00,0xc1,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x87]
+0x00,0xf0,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x87]
+0x00,0xf7,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x87]
+0x00,0xfd,0x00,0x87
+
+# CHECK: s_or_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x87,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x87,0x56,0x34,0x12,0xaf
+
+# CHECK: s_or_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x87,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x87,0x73,0x72,0x71,0x3f
+
+# CHECK: s_or_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x87]
+0x00,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x87]
+0x00,0x00,0x82,0x87
+
+# CHECK: s_or_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x87]
+0x00,0x00,0xe4,0x87
+
+# CHECK: s_or_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x87]
+0x00,0x00,0xe6,0x87
+
+# CHECK: s_or_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x87]
+0x00,0x00,0xea,0x87
+
+# CHECK: s_or_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x87]
+0x00,0x00,0xec,0x87
+
+# CHECK: s_or_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x87]
+0x00,0x00,0xee,0x87
+
+# CHECK: s_or_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x87]
+0x00,0x00,0xfa,0x87
+
+# CHECK: s_or_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x87]
+0x00,0x00,0xfe,0x87
+
+# CHECK: s_or_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x87]
+0x02,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x87]
+0x64,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x87]
+0x66,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x87]
+0x6a,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x87]
+0x6c,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x87]
+0x6e,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x87]
+0x7a,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x87]
+0x7e,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x87]
+0x80,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x87]
+0xc1,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x87]
+0xf0,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x87]
+0xf7,0x00,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x87]
+0x00,0x02,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x87]
+0x00,0x64,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x87]
+0x00,0x66,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x87]
+0x00,0x6a,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x87]
+0x00,0x6c,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x87]
+0x00,0x6e,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x87]
+0x00,0x7a,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x87]
+0x00,0x7e,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x87]
+0x00,0x80,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x87]
+0x00,0xc1,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x87]
+0x00,0xf0,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x87]
+0x00,0xf7,0x80,0x87
+
+# CHECK: s_or_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x87,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x87,0x56,0x34,0x12,0xaf
+
+# CHECK: s_or_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x87,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x87,0x73,0x72,0x71,0x3f
+
+# CHECK: s_xor_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x88]
+0x00,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x88]
+0x00,0x00,0x65,0x88
+
+# CHECK: s_xor_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x88]
+0x00,0x00,0x66,0x88
+
+# CHECK: s_xor_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x88]
+0x00,0x00,0x67,0x88
+
+# CHECK: s_xor_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x88]
+0x00,0x00,0x6a,0x88
+
+# CHECK: s_xor_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x88]
+0x00,0x00,0x6b,0x88
+
+# CHECK: s_xor_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x88]
+0x00,0x00,0x6c,0x88
+
+# CHECK: s_xor_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x88]
+0x00,0x00,0x6d,0x88
+
+# CHECK: s_xor_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x88]
+0x00,0x00,0x6e,0x88
+
+# CHECK: s_xor_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x88]
+0x00,0x00,0x6f,0x88
+
+# CHECK: s_xor_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x88]
+0x00,0x00,0x7b,0x88
+
+# CHECK: s_xor_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x88]
+0x00,0x00,0x7c,0x88
+
+# CHECK: s_xor_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x88]
+0x00,0x00,0x7e,0x88
+
+# CHECK: s_xor_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x88]
+0x00,0x00,0x7f,0x88
+
+# CHECK: s_xor_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x88]
+0x65,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x88]
+0x66,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x88]
+0x67,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x88]
+0x6a,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x88]
+0x6b,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x88]
+0x6c,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x88]
+0x6d,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x88]
+0x6e,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x88]
+0x6f,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x88]
+0x7b,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x88]
+0x7c,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x88]
+0x7e,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x88]
+0x7f,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x88]
+0x80,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x88]
+0xc1,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x88]
+0xf0,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x88]
+0xf7,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x88]
+0xfd,0x00,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x88]
+0x00,0x65,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x88]
+0x00,0x66,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x88]
+0x00,0x67,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x88]
+0x00,0x6a,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x88]
+0x00,0x6b,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x88]
+0x00,0x6c,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x88]
+0x00,0x6d,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x88]
+0x00,0x6e,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x88]
+0x00,0x6f,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x88]
+0x00,0x7b,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x88]
+0x00,0x7c,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x88]
+0x00,0x7e,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x88]
+0x00,0x7f,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x88]
+0x00,0x80,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x88]
+0x00,0xc1,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x88]
+0x00,0xf0,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x88]
+0x00,0xf7,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x88]
+0x00,0xfd,0x00,0x88
+
+# CHECK: s_xor_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x88,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x88,0x56,0x34,0x12,0xaf
+
+# CHECK: s_xor_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x88,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x88,0x73,0x72,0x71,0x3f
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x88]
+0x00,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x88]
+0x00,0x00,0x82,0x88
+
+# CHECK: s_xor_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x88]
+0x00,0x00,0xe4,0x88
+
+# CHECK: s_xor_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x88]
+0x00,0x00,0xe6,0x88
+
+# CHECK: s_xor_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x88]
+0x00,0x00,0xea,0x88
+
+# CHECK: s_xor_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x88]
+0x00,0x00,0xec,0x88
+
+# CHECK: s_xor_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x88]
+0x00,0x00,0xee,0x88
+
+# CHECK: s_xor_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x88]
+0x00,0x00,0xfa,0x88
+
+# CHECK: s_xor_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x88]
+0x00,0x00,0xfe,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x88]
+0x02,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x88]
+0x64,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x88]
+0x66,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x88]
+0x6a,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x88]
+0x6c,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x88]
+0x6e,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x88]
+0x7a,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x88]
+0x7e,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x88]
+0x80,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x88]
+0xc1,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x88]
+0xf0,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x88]
+0xf7,0x00,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x88]
+0x00,0x02,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x88]
+0x00,0x64,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x88]
+0x00,0x66,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x88]
+0x00,0x6a,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x88]
+0x00,0x6c,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x88]
+0x00,0x6e,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x88]
+0x00,0x7a,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x88]
+0x00,0x7e,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x88]
+0x00,0x80,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x88]
+0x00,0xc1,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x88]
+0x00,0xf0,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x88]
+0x00,0xf7,0x80,0x88
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x88,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x88,0x56,0x34,0x12,0xaf
+
+# CHECK: s_xor_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x88,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x88,0x73,0x72,0x71,0x3f
+
+# CHECK: s_andn2_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x89]
+0x00,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x89]
+0x00,0x00,0x65,0x89
+
+# CHECK: s_andn2_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x89]
+0x00,0x00,0x66,0x89
+
+# CHECK: s_andn2_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x89]
+0x00,0x00,0x67,0x89
+
+# CHECK: s_andn2_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x89]
+0x00,0x00,0x6a,0x89
+
+# CHECK: s_andn2_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x89]
+0x00,0x00,0x6b,0x89
+
+# CHECK: s_andn2_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x89]
+0x00,0x00,0x6c,0x89
+
+# CHECK: s_andn2_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x89]
+0x00,0x00,0x6d,0x89
+
+# CHECK: s_andn2_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x89]
+0x00,0x00,0x6e,0x89
+
+# CHECK: s_andn2_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x89]
+0x00,0x00,0x6f,0x89
+
+# CHECK: s_andn2_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x89]
+0x00,0x00,0x7b,0x89
+
+# CHECK: s_andn2_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x89]
+0x00,0x00,0x7c,0x89
+
+# CHECK: s_andn2_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x89]
+0x00,0x00,0x7e,0x89
+
+# CHECK: s_andn2_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x89]
+0x00,0x00,0x7f,0x89
+
+# CHECK: s_andn2_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x89]
+0x65,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x89]
+0x66,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x89]
+0x67,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x89]
+0x6a,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x89]
+0x6b,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x89]
+0x6c,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x89]
+0x6d,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x89]
+0x6e,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x89]
+0x6f,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x89]
+0x7b,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x89]
+0x7c,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x89]
+0x7e,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x89]
+0x7f,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x89]
+0x80,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x89]
+0xc1,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x89]
+0xf0,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x89]
+0xf7,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x89]
+0xfd,0x00,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x89]
+0x00,0x65,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x89]
+0x00,0x66,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x89]
+0x00,0x67,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x89]
+0x00,0x6a,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x89]
+0x00,0x6b,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x89]
+0x00,0x6c,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x89]
+0x00,0x6d,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x89]
+0x00,0x6e,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x89]
+0x00,0x6f,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x89]
+0x00,0x7b,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x89]
+0x00,0x7c,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x89]
+0x00,0x7e,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x89]
+0x00,0x7f,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x89]
+0x00,0x80,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x89]
+0x00,0xc1,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x89]
+0x00,0xf0,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x89]
+0x00,0xf7,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x89]
+0x00,0xfd,0x00,0x89
+
+# CHECK: s_andn2_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x89,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x89,0x56,0x34,0x12,0xaf
+
+# CHECK: s_andn2_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x89,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x89,0x73,0x72,0x71,0x3f
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x89]
+0x00,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x89]
+0x00,0x00,0x82,0x89
+
+# CHECK: s_andn2_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x89]
+0x00,0x00,0xe4,0x89
+
+# CHECK: s_andn2_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x89]
+0x00,0x00,0xe6,0x89
+
+# CHECK: s_andn2_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x89]
+0x00,0x00,0xea,0x89
+
+# CHECK: s_andn2_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x89]
+0x00,0x00,0xec,0x89
+
+# CHECK: s_andn2_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x89]
+0x00,0x00,0xee,0x89
+
+# CHECK: s_andn2_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x89]
+0x00,0x00,0xfa,0x89
+
+# CHECK: s_andn2_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x89]
+0x00,0x00,0xfe,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x89]
+0x02,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x89]
+0x64,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x89]
+0x66,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x89]
+0x6a,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x89]
+0x6c,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x89]
+0x6e,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x89]
+0x7a,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x89]
+0x7e,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x89]
+0x80,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x89]
+0xc1,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x89]
+0xf0,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x89]
+0xf7,0x00,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x89]
+0x00,0x02,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x89]
+0x00,0x64,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x89]
+0x00,0x66,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x89]
+0x00,0x6a,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x89]
+0x00,0x6c,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x89]
+0x00,0x6e,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x89]
+0x00,0x7a,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x89]
+0x00,0x7e,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x89]
+0x00,0x80,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x89]
+0x00,0xc1,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x89]
+0x00,0xf0,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x89]
+0x00,0xf7,0x80,0x89
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x89,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x89,0x56,0x34,0x12,0xaf
+
+# CHECK: s_andn2_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x89,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x89,0x73,0x72,0x71,0x3f
+
+# CHECK: s_orn2_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x8a]
+0x00,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x8a]
+0x00,0x00,0x65,0x8a
+
+# CHECK: s_orn2_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x8a]
+0x00,0x00,0x66,0x8a
+
+# CHECK: s_orn2_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x8a]
+0x00,0x00,0x67,0x8a
+
+# CHECK: s_orn2_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x8a]
+0x00,0x00,0x6a,0x8a
+
+# CHECK: s_orn2_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x8a]
+0x00,0x00,0x6b,0x8a
+
+# CHECK: s_orn2_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x8a]
+0x00,0x00,0x6c,0x8a
+
+# CHECK: s_orn2_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x8a]
+0x00,0x00,0x6d,0x8a
+
+# CHECK: s_orn2_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x8a]
+0x00,0x00,0x6e,0x8a
+
+# CHECK: s_orn2_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x8a]
+0x00,0x00,0x6f,0x8a
+
+# CHECK: s_orn2_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x8a]
+0x00,0x00,0x7b,0x8a
+
+# CHECK: s_orn2_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x8a]
+0x00,0x00,0x7c,0x8a
+
+# CHECK: s_orn2_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x8a]
+0x00,0x00,0x7e,0x8a
+
+# CHECK: s_orn2_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x8a]
+0x00,0x00,0x7f,0x8a
+
+# CHECK: s_orn2_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x8a]
+0x65,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x8a]
+0x66,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x8a]
+0x67,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x8a]
+0x6a,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x8a]
+0x6b,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x8a]
+0x6c,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x8a]
+0x6d,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x8a]
+0x6e,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x8a]
+0x6f,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x8a]
+0x7b,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x8a]
+0x7c,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x8a]
+0x7e,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x8a]
+0x7f,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x8a]
+0x80,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x8a]
+0xc1,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x8a]
+0xf0,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x8a]
+0xf7,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x8a]
+0xfd,0x00,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x8a]
+0x00,0x65,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x8a]
+0x00,0x66,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x8a]
+0x00,0x67,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x8a]
+0x00,0x6a,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x8a]
+0x00,0x6b,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x8a]
+0x00,0x6c,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x8a]
+0x00,0x6d,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x8a]
+0x00,0x6e,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x8a]
+0x00,0x6f,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x8a]
+0x00,0x7b,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x8a]
+0x00,0x7c,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x8a]
+0x00,0x7e,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x8a]
+0x00,0x7f,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x8a]
+0x00,0x80,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x8a]
+0x00,0xc1,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x8a]
+0x00,0xf0,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x8a]
+0x00,0xf7,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x8a]
+0x00,0xfd,0x00,0x8a
+
+# CHECK: s_orn2_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x8a,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x8a,0x56,0x34,0x12,0xaf
+
+# CHECK: s_orn2_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x8a,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x8a,0x73,0x72,0x71,0x3f
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x8a]
+0x00,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x8a]
+0x00,0x00,0x82,0x8a
+
+# CHECK: s_orn2_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x8a]
+0x00,0x00,0xe4,0x8a
+
+# CHECK: s_orn2_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x8a]
+0x00,0x00,0xe6,0x8a
+
+# CHECK: s_orn2_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x8a]
+0x00,0x00,0xea,0x8a
+
+# CHECK: s_orn2_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x8a]
+0x00,0x00,0xec,0x8a
+
+# CHECK: s_orn2_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x8a]
+0x00,0x00,0xee,0x8a
+
+# CHECK: s_orn2_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x8a]
+0x00,0x00,0xfa,0x8a
+
+# CHECK: s_orn2_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x8a]
+0x00,0x00,0xfe,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x8a]
+0x02,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x8a]
+0x64,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x8a]
+0x66,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x8a]
+0x6a,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x8a]
+0x6c,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x8a]
+0x6e,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x8a]
+0x7a,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x8a]
+0x7e,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x8a]
+0x80,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x8a]
+0xc1,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x8a]
+0xf0,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x8a]
+0xf7,0x00,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x8a]
+0x00,0x02,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x8a]
+0x00,0x64,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x8a]
+0x00,0x66,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x8a]
+0x00,0x6a,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x8a]
+0x00,0x6c,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x8a]
+0x00,0x6e,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x8a]
+0x00,0x7a,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x8a]
+0x00,0x7e,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x8a]
+0x00,0x80,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x8a]
+0x00,0xc1,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x8a]
+0x00,0xf0,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x8a]
+0x00,0xf7,0x80,0x8a
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x8a,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x8a,0x56,0x34,0x12,0xaf
+
+# CHECK: s_orn2_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x8a,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x8a,0x73,0x72,0x71,0x3f
+
+# CHECK: s_nand_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x8b]
+0x00,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x8b]
+0x00,0x00,0x65,0x8b
+
+# CHECK: s_nand_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x8b]
+0x00,0x00,0x66,0x8b
+
+# CHECK: s_nand_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x8b]
+0x00,0x00,0x67,0x8b
+
+# CHECK: s_nand_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x8b]
+0x00,0x00,0x6a,0x8b
+
+# CHECK: s_nand_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x8b]
+0x00,0x00,0x6b,0x8b
+
+# CHECK: s_nand_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x8b]
+0x00,0x00,0x6c,0x8b
+
+# CHECK: s_nand_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x8b]
+0x00,0x00,0x6d,0x8b
+
+# CHECK: s_nand_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x8b]
+0x00,0x00,0x6e,0x8b
+
+# CHECK: s_nand_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x8b]
+0x00,0x00,0x6f,0x8b
+
+# CHECK: s_nand_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x8b]
+0x00,0x00,0x7b,0x8b
+
+# CHECK: s_nand_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x8b]
+0x00,0x00,0x7c,0x8b
+
+# CHECK: s_nand_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x8b]
+0x00,0x00,0x7e,0x8b
+
+# CHECK: s_nand_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x8b]
+0x00,0x00,0x7f,0x8b
+
+# CHECK: s_nand_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x8b]
+0x65,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x8b]
+0x66,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x8b]
+0x67,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x8b]
+0x6a,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x8b]
+0x6b,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x8b]
+0x6c,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x8b]
+0x6d,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x8b]
+0x6e,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x8b]
+0x6f,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x8b]
+0x7b,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x8b]
+0x7c,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x8b]
+0x7e,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x8b]
+0x7f,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x8b]
+0x80,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x8b]
+0xc1,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x8b]
+0xf0,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x8b]
+0xf7,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x8b]
+0xfd,0x00,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x8b]
+0x00,0x65,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x8b]
+0x00,0x66,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x8b]
+0x00,0x67,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x8b]
+0x00,0x6a,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x8b]
+0x00,0x6b,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x8b]
+0x00,0x6c,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x8b]
+0x00,0x6d,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x8b]
+0x00,0x6e,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x8b]
+0x00,0x6f,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x8b]
+0x00,0x7b,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x8b]
+0x00,0x7c,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x8b]
+0x00,0x7e,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x8b]
+0x00,0x7f,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x8b]
+0x00,0x80,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x8b]
+0x00,0xc1,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x8b]
+0x00,0xf0,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x8b]
+0x00,0xf7,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x8b]
+0x00,0xfd,0x00,0x8b
+
+# CHECK: s_nand_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x8b,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x8b,0x56,0x34,0x12,0xaf
+
+# CHECK: s_nand_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x8b,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x8b,0x73,0x72,0x71,0x3f
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x8b]
+0x00,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x8b]
+0x00,0x00,0x82,0x8b
+
+# CHECK: s_nand_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x8b]
+0x00,0x00,0xe4,0x8b
+
+# CHECK: s_nand_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x8b]
+0x00,0x00,0xe6,0x8b
+
+# CHECK: s_nand_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x8b]
+0x00,0x00,0xea,0x8b
+
+# CHECK: s_nand_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x8b]
+0x00,0x00,0xec,0x8b
+
+# CHECK: s_nand_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x8b]
+0x00,0x00,0xee,0x8b
+
+# CHECK: s_nand_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x8b]
+0x00,0x00,0xfa,0x8b
+
+# CHECK: s_nand_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x8b]
+0x00,0x00,0xfe,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x8b]
+0x02,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x8b]
+0x64,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x8b]
+0x66,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x8b]
+0x6a,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x8b]
+0x6c,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x8b]
+0x6e,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x8b]
+0x7a,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x8b]
+0x7e,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x8b]
+0x80,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x8b]
+0xc1,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x8b]
+0xf0,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x8b]
+0xf7,0x00,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x8b]
+0x00,0x02,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x8b]
+0x00,0x64,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x8b]
+0x00,0x66,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x8b]
+0x00,0x6a,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x8b]
+0x00,0x6c,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x8b]
+0x00,0x6e,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x8b]
+0x00,0x7a,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x8b]
+0x00,0x7e,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x8b]
+0x00,0x80,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x8b]
+0x00,0xc1,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x8b]
+0x00,0xf0,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x8b]
+0x00,0xf7,0x80,0x8b
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x8b,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x8b,0x56,0x34,0x12,0xaf
+
+# CHECK: s_nand_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x8b,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x8b,0x73,0x72,0x71,0x3f
+
+# CHECK: s_nor_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x8c]
+0x00,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x8c]
+0x00,0x00,0x65,0x8c
+
+# CHECK: s_nor_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x8c]
+0x00,0x00,0x66,0x8c
+
+# CHECK: s_nor_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x8c]
+0x00,0x00,0x67,0x8c
+
+# CHECK: s_nor_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x8c]
+0x00,0x00,0x6a,0x8c
+
+# CHECK: s_nor_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x8c]
+0x00,0x00,0x6b,0x8c
+
+# CHECK: s_nor_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x8c]
+0x00,0x00,0x6c,0x8c
+
+# CHECK: s_nor_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x8c]
+0x00,0x00,0x6d,0x8c
+
+# CHECK: s_nor_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x8c]
+0x00,0x00,0x6e,0x8c
+
+# CHECK: s_nor_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x8c]
+0x00,0x00,0x6f,0x8c
+
+# CHECK: s_nor_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x8c]
+0x00,0x00,0x7b,0x8c
+
+# CHECK: s_nor_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x8c]
+0x00,0x00,0x7c,0x8c
+
+# CHECK: s_nor_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x8c]
+0x00,0x00,0x7e,0x8c
+
+# CHECK: s_nor_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x8c]
+0x00,0x00,0x7f,0x8c
+
+# CHECK: s_nor_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x8c]
+0x65,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x8c]
+0x66,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x8c]
+0x67,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x8c]
+0x6a,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x8c]
+0x6b,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x8c]
+0x6c,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x8c]
+0x6d,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x8c]
+0x6e,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x8c]
+0x6f,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x8c]
+0x7b,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x8c]
+0x7c,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x8c]
+0x7e,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x8c]
+0x7f,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x8c]
+0x80,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x8c]
+0xc1,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x8c]
+0xf0,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x8c]
+0xf7,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x8c]
+0xfd,0x00,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x8c]
+0x00,0x65,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x8c]
+0x00,0x66,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x8c]
+0x00,0x67,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x8c]
+0x00,0x6a,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x8c]
+0x00,0x6b,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x8c]
+0x00,0x6c,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x8c]
+0x00,0x6d,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x8c]
+0x00,0x6e,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x8c]
+0x00,0x6f,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x8c]
+0x00,0x7b,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x8c]
+0x00,0x7c,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x8c]
+0x00,0x7e,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x8c]
+0x00,0x7f,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x8c]
+0x00,0x80,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x8c]
+0x00,0xc1,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x8c]
+0x00,0xf0,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x8c]
+0x00,0xf7,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x8c]
+0x00,0xfd,0x00,0x8c
+
+# CHECK: s_nor_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x8c,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x8c,0x56,0x34,0x12,0xaf
+
+# CHECK: s_nor_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x8c,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x8c,0x73,0x72,0x71,0x3f
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x8c]
+0x00,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x8c]
+0x00,0x00,0x82,0x8c
+
+# CHECK: s_nor_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x8c]
+0x00,0x00,0xe4,0x8c
+
+# CHECK: s_nor_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x8c]
+0x00,0x00,0xe6,0x8c
+
+# CHECK: s_nor_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x8c]
+0x00,0x00,0xea,0x8c
+
+# CHECK: s_nor_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x8c]
+0x00,0x00,0xec,0x8c
+
+# CHECK: s_nor_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x8c]
+0x00,0x00,0xee,0x8c
+
+# CHECK: s_nor_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x8c]
+0x00,0x00,0xfa,0x8c
+
+# CHECK: s_nor_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x8c]
+0x00,0x00,0xfe,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x8c]
+0x02,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x8c]
+0x64,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x8c]
+0x66,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x8c]
+0x6a,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x8c]
+0x6c,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x8c]
+0x6e,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x8c]
+0x7a,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x8c]
+0x7e,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x8c]
+0x80,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x8c]
+0xc1,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x8c]
+0xf0,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x8c]
+0xf7,0x00,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x8c]
+0x00,0x02,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x8c]
+0x00,0x64,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x8c]
+0x00,0x66,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x8c]
+0x00,0x6a,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x8c]
+0x00,0x6c,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x8c]
+0x00,0x6e,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x8c]
+0x00,0x7a,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x8c]
+0x00,0x7e,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x8c]
+0x00,0x80,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x8c]
+0x00,0xc1,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x8c]
+0x00,0xf0,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x8c]
+0x00,0xf7,0x80,0x8c
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x8c,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x8c,0x56,0x34,0x12,0xaf
+
+# CHECK: s_nor_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x8c,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x8c,0x73,0x72,0x71,0x3f
+
+# CHECK: s_xnor_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x8d]
+0x00,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x8d]
+0x00,0x00,0x65,0x8d
+
+# CHECK: s_xnor_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x8d]
+0x00,0x00,0x66,0x8d
+
+# CHECK: s_xnor_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x8d]
+0x00,0x00,0x67,0x8d
+
+# CHECK: s_xnor_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x8d]
+0x00,0x00,0x6a,0x8d
+
+# CHECK: s_xnor_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x8d]
+0x00,0x00,0x6b,0x8d
+
+# CHECK: s_xnor_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x8d]
+0x00,0x00,0x6c,0x8d
+
+# CHECK: s_xnor_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x8d]
+0x00,0x00,0x6d,0x8d
+
+# CHECK: s_xnor_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x8d]
+0x00,0x00,0x6e,0x8d
+
+# CHECK: s_xnor_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x8d]
+0x00,0x00,0x6f,0x8d
+
+# CHECK: s_xnor_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x8d]
+0x00,0x00,0x7b,0x8d
+
+# CHECK: s_xnor_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x8d]
+0x00,0x00,0x7c,0x8d
+
+# CHECK: s_xnor_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x8d]
+0x00,0x00,0x7e,0x8d
+
+# CHECK: s_xnor_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x8d]
+0x00,0x00,0x7f,0x8d
+
+# CHECK: s_xnor_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x8d]
+0x65,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x8d]
+0x66,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x8d]
+0x67,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x8d]
+0x6a,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x8d]
+0x6b,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x8d]
+0x6c,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x8d]
+0x6d,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x8d]
+0x6e,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x8d]
+0x6f,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x8d]
+0x7b,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x8d]
+0x7c,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x8d]
+0x7e,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x8d]
+0x7f,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x8d]
+0x80,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x8d]
+0xc1,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x8d]
+0xf0,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x8d]
+0xf7,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x8d]
+0xfd,0x00,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x8d]
+0x00,0x65,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x8d]
+0x00,0x66,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x8d]
+0x00,0x67,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x8d]
+0x00,0x6a,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x8d]
+0x00,0x6b,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x8d]
+0x00,0x6c,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x8d]
+0x00,0x6d,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x8d]
+0x00,0x6e,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x8d]
+0x00,0x6f,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x8d]
+0x00,0x7b,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x8d]
+0x00,0x7c,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x8d]
+0x00,0x7e,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x8d]
+0x00,0x7f,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x8d]
+0x00,0x80,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x8d]
+0x00,0xc1,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x8d]
+0x00,0xf0,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x8d]
+0x00,0xf7,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x8d]
+0x00,0xfd,0x00,0x8d
+
+# CHECK: s_xnor_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x8d,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x8d,0x56,0x34,0x12,0xaf
+
+# CHECK: s_xnor_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x8d,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x8d,0x73,0x72,0x71,0x3f
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x8d]
+0x00,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[2:3], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0x8d]
+0x00,0x00,0x82,0x8d
+
+# CHECK: s_xnor_b64 s[100:101], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0x8d]
+0x00,0x00,0xe4,0x8d
+
+# CHECK: s_xnor_b64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0x8d]
+0x00,0x00,0xe6,0x8d
+
+# CHECK: s_xnor_b64 vcc, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0x8d]
+0x00,0x00,0xea,0x8d
+
+# CHECK: s_xnor_b64 tba, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0x8d]
+0x00,0x00,0xec,0x8d
+
+# CHECK: s_xnor_b64 tma, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0x8d]
+0x00,0x00,0xee,0x8d
+
+# CHECK: s_xnor_b64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0x8d]
+0x00,0x00,0xfa,0x8d
+
+# CHECK: s_xnor_b64 exec, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0x8d]
+0x00,0x00,0xfe,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x8d]
+0x02,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x8d]
+0x64,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x8d]
+0x66,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x8d]
+0x6a,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x8d]
+0x6c,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x8d]
+0x6e,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x8d]
+0x7a,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x8d]
+0x7e,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], 0, s[0:1] ; encoding: [0x80,0x00,0x80,0x8d]
+0x80,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], -1, s[0:1] ; encoding: [0xc1,0x00,0x80,0x8d]
+0xc1,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], 0.5, s[0:1] ; encoding: [0xf0,0x00,0x80,0x8d]
+0xf0,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], -4.0, s[0:1] ; encoding: [0xf7,0x00,0x80,0x8d]
+0xf7,0x00,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x8d]
+0x00,0x02,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x8d]
+0x00,0x64,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x8d]
+0x00,0x66,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x8d]
+0x00,0x6a,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x8d]
+0x00,0x6c,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x8d]
+0x00,0x6e,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x8d]
+0x00,0x7a,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x8d]
+0x00,0x7e,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x8d]
+0x00,0x80,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x8d]
+0x00,0xc1,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x8d]
+0x00,0xf0,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x8d]
+0x00,0xf7,0x80,0x8d
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x80,0x8d,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x8d,0x56,0x34,0x12,0xaf
+
+# CHECK: s_xnor_b64 s[0:1], s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x80,0x8d,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x8d,0x73,0x72,0x71,0x3f
+
+# CHECK: s_lshl_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x8e]
+0x00,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x8e]
+0x00,0x00,0x65,0x8e
+
+# CHECK: s_lshl_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x8e]
+0x00,0x00,0x66,0x8e
+
+# CHECK: s_lshl_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x8e]
+0x00,0x00,0x67,0x8e
+
+# CHECK: s_lshl_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x8e]
+0x00,0x00,0x6a,0x8e
+
+# CHECK: s_lshl_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x8e]
+0x00,0x00,0x6b,0x8e
+
+# CHECK: s_lshl_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x8e]
+0x00,0x00,0x6c,0x8e
+
+# CHECK: s_lshl_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x8e]
+0x00,0x00,0x6d,0x8e
+
+# CHECK: s_lshl_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x8e]
+0x00,0x00,0x6e,0x8e
+
+# CHECK: s_lshl_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x8e]
+0x00,0x00,0x6f,0x8e
+
+# CHECK: s_lshl_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x8e]
+0x00,0x00,0x7b,0x8e
+
+# CHECK: s_lshl_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x8e]
+0x00,0x00,0x7c,0x8e
+
+# CHECK: s_lshl_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x8e]
+0x00,0x00,0x7e,0x8e
+
+# CHECK: s_lshl_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x8e]
+0x00,0x00,0x7f,0x8e
+
+# CHECK: s_lshl_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x8e]
+0x65,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x8e]
+0x66,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x8e]
+0x67,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x8e]
+0x6a,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x8e]
+0x6b,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x8e]
+0x6c,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x8e]
+0x6d,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x8e]
+0x6e,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x8e]
+0x6f,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x8e]
+0x7b,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x8e]
+0x7c,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x8e]
+0x7e,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x8e]
+0x7f,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x8e]
+0x80,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x8e]
+0xc1,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x8e]
+0xf0,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x8e]
+0xf7,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x8e]
+0xfd,0x00,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x8e]
+0x00,0x65,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x8e]
+0x00,0x66,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x8e]
+0x00,0x67,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x8e]
+0x00,0x6a,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x8e]
+0x00,0x6b,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x8e]
+0x00,0x6c,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x8e]
+0x00,0x6d,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x8e]
+0x00,0x6e,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x8e]
+0x00,0x6f,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x8e]
+0x00,0x7b,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x8e]
+0x00,0x7c,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x8e]
+0x00,0x7e,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x8e]
+0x00,0x7f,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x8e]
+0x00,0x80,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x8e]
+0x00,0xc1,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x8e]
+0x00,0xf0,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x8e]
+0x00,0xf7,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x8e]
+0x00,0xfd,0x00,0x8e
+
+# CHECK: s_lshl_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x8e,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x8e,0x56,0x34,0x12,0xaf
+
+# CHECK: s_lshl_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x8e,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x8e,0x73,0x72,0x71,0x3f
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x80,0x8e]
+0x00,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[2:3], s[0:1], s0 ; encoding: [0x00,0x00,0x82,0x8e]
+0x00,0x00,0x82,0x8e
+
+# CHECK: s_lshl_b64 s[100:101], s[0:1], s0 ; encoding: [0x00,0x00,0xe4,0x8e]
+0x00,0x00,0xe4,0x8e
+
+# CHECK: s_lshl_b64 flat_scratch, s[0:1], s0 ; encoding: [0x00,0x00,0xe6,0x8e]
+0x00,0x00,0xe6,0x8e
+
+# CHECK: s_lshl_b64 vcc, s[0:1], s0 ; encoding: [0x00,0x00,0xea,0x8e]
+0x00,0x00,0xea,0x8e
+
+# CHECK: s_lshl_b64 tba, s[0:1], s0 ; encoding: [0x00,0x00,0xec,0x8e]
+0x00,0x00,0xec,0x8e
+
+# CHECK: s_lshl_b64 tma, s[0:1], s0 ; encoding: [0x00,0x00,0xee,0x8e]
+0x00,0x00,0xee,0x8e
+
+# CHECK: s_lshl_b64 ttmp[10:11], s[0:1], s0 ; encoding: [0x00,0x00,0xfa,0x8e]
+0x00,0x00,0xfa,0x8e
+
+# CHECK: s_lshl_b64 exec, s[0:1], s0 ; encoding: [0x00,0x00,0xfe,0x8e]
+0x00,0x00,0xfe,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[2:3], s0 ; encoding: [0x02,0x00,0x80,0x8e]
+0x02,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[100:101], s0 ; encoding: [0x64,0x00,0x80,0x8e]
+0x64,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], flat_scratch, s0 ; encoding: [0x66,0x00,0x80,0x8e]
+0x66,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], vcc, s0 ; encoding: [0x6a,0x00,0x80,0x8e]
+0x6a,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], tba, s0 ; encoding: [0x6c,0x00,0x80,0x8e]
+0x6c,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], tma, s0 ; encoding: [0x6e,0x00,0x80,0x8e]
+0x6e,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x80,0x8e]
+0x7a,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], exec, s0 ; encoding: [0x7e,0x00,0x80,0x8e]
+0x7e,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], 0, s0 ; encoding: [0x80,0x00,0x80,0x8e]
+0x80,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], -1, s0 ; encoding: [0xc1,0x00,0x80,0x8e]
+0xc1,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x8e]
+0xf0,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x8e]
+0xf7,0x00,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], 0xaf123456, s0 ; encoding: [0xff,0x00,0x80,0x8e,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0x8e,0x56,0x34,0x12,0xaf
+
+# CHECK: s_lshl_b64 s[0:1], 0x3f717273, s0 ; encoding: [0xff,0x00,0x80,0x8e,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0x8e,0x73,0x72,0x71,0x3f
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], s101 ; encoding: [0x00,0x65,0x80,0x8e]
+0x00,0x65,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x8e]
+0x00,0x66,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x8e]
+0x00,0x67,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x80,0x8e]
+0x00,0x6a,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x80,0x8e]
+0x00,0x6b,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], tba_lo ; encoding: [0x00,0x6c,0x80,0x8e]
+0x00,0x6c,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], tba_hi ; encoding: [0x00,0x6d,0x80,0x8e]
+0x00,0x6d,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], tma_lo ; encoding: [0x00,0x6e,0x80,0x8e]
+0x00,0x6e,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], tma_hi ; encoding: [0x00,0x6f,0x80,0x8e]
+0x00,0x6f,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x80,0x8e]
+0x00,0x7b,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], m0 ; encoding: [0x00,0x7c,0x80,0x8e]
+0x00,0x7c,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], exec_lo ; encoding: [0x00,0x7e,0x80,0x8e]
+0x00,0x7e,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], exec_hi ; encoding: [0x00,0x7f,0x80,0x8e]
+0x00,0x7f,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x8e]
+0x00,0x80,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x8e]
+0x00,0xc1,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x8e]
+0x00,0xf0,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x8e]
+0x00,0xf7,0x80,0x8e
+
+# CHECK: s_lshl_b64 s[0:1], s[0:1], scc ; encoding: [0x00,0xfd,0x80,0x8e]
+0x00,0xfd,0x80,0x8e
+
+# CHECK: s_lshr_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x8f]
+0x00,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x8f]
+0x00,0x00,0x65,0x8f
+
+# CHECK: s_lshr_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x8f]
+0x00,0x00,0x66,0x8f
+
+# CHECK: s_lshr_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x8f]
+0x00,0x00,0x67,0x8f
+
+# CHECK: s_lshr_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x8f]
+0x00,0x00,0x6a,0x8f
+
+# CHECK: s_lshr_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x8f]
+0x00,0x00,0x6b,0x8f
+
+# CHECK: s_lshr_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x8f]
+0x00,0x00,0x6c,0x8f
+
+# CHECK: s_lshr_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x8f]
+0x00,0x00,0x6d,0x8f
+
+# CHECK: s_lshr_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x8f]
+0x00,0x00,0x6e,0x8f
+
+# CHECK: s_lshr_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x8f]
+0x00,0x00,0x6f,0x8f
+
+# CHECK: s_lshr_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x8f]
+0x00,0x00,0x7b,0x8f
+
+# CHECK: s_lshr_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x8f]
+0x00,0x00,0x7c,0x8f
+
+# CHECK: s_lshr_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x8f]
+0x00,0x00,0x7e,0x8f
+
+# CHECK: s_lshr_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x8f]
+0x00,0x00,0x7f,0x8f
+
+# CHECK: s_lshr_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x8f]
+0x65,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x8f]
+0x66,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x8f]
+0x67,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x8f]
+0x6a,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x8f]
+0x6b,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x8f]
+0x6c,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x8f]
+0x6d,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x8f]
+0x6e,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x8f]
+0x6f,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x8f]
+0x7b,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x8f]
+0x7c,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x8f]
+0x7e,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x8f]
+0x7f,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x8f]
+0x80,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x8f]
+0xc1,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x8f]
+0xf0,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x8f]
+0xf7,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x8f]
+0xfd,0x00,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x8f]
+0x00,0x65,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x8f]
+0x00,0x66,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x8f]
+0x00,0x67,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x8f]
+0x00,0x6a,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x8f]
+0x00,0x6b,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x8f]
+0x00,0x6c,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x8f]
+0x00,0x6d,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x8f]
+0x00,0x6e,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x8f]
+0x00,0x6f,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x8f]
+0x00,0x7b,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x8f]
+0x00,0x7c,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x8f]
+0x00,0x7e,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x8f]
+0x00,0x7f,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x8f]
+0x00,0x80,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x8f]
+0x00,0xc1,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x8f]
+0x00,0xf0,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x8f]
+0x00,0xf7,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x8f]
+0x00,0xfd,0x00,0x8f
+
+# CHECK: s_lshr_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x8f,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x8f,0x56,0x34,0x12,0xaf
+
+# CHECK: s_lshr_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x8f,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x8f,0x73,0x72,0x71,0x3f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x80,0x8f]
+0x00,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[2:3], s[0:1], s0 ; encoding: [0x00,0x00,0x82,0x8f]
+0x00,0x00,0x82,0x8f
+
+# CHECK: s_lshr_b64 s[100:101], s[0:1], s0 ; encoding: [0x00,0x00,0xe4,0x8f]
+0x00,0x00,0xe4,0x8f
+
+# CHECK: s_lshr_b64 flat_scratch, s[0:1], s0 ; encoding: [0x00,0x00,0xe6,0x8f]
+0x00,0x00,0xe6,0x8f
+
+# CHECK: s_lshr_b64 vcc, s[0:1], s0 ; encoding: [0x00,0x00,0xea,0x8f]
+0x00,0x00,0xea,0x8f
+
+# CHECK: s_lshr_b64 tba, s[0:1], s0 ; encoding: [0x00,0x00,0xec,0x8f]
+0x00,0x00,0xec,0x8f
+
+# CHECK: s_lshr_b64 tma, s[0:1], s0 ; encoding: [0x00,0x00,0xee,0x8f]
+0x00,0x00,0xee,0x8f
+
+# CHECK: s_lshr_b64 ttmp[10:11], s[0:1], s0 ; encoding: [0x00,0x00,0xfa,0x8f]
+0x00,0x00,0xfa,0x8f
+
+# CHECK: s_lshr_b64 exec, s[0:1], s0 ; encoding: [0x00,0x00,0xfe,0x8f]
+0x00,0x00,0xfe,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[2:3], s0 ; encoding: [0x02,0x00,0x80,0x8f]
+0x02,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[100:101], s0 ; encoding: [0x64,0x00,0x80,0x8f]
+0x64,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], flat_scratch, s0 ; encoding: [0x66,0x00,0x80,0x8f]
+0x66,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], vcc, s0 ; encoding: [0x6a,0x00,0x80,0x8f]
+0x6a,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], tba, s0 ; encoding: [0x6c,0x00,0x80,0x8f]
+0x6c,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], tma, s0 ; encoding: [0x6e,0x00,0x80,0x8f]
+0x6e,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x80,0x8f]
+0x7a,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], exec, s0 ; encoding: [0x7e,0x00,0x80,0x8f]
+0x7e,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], 0, s0 ; encoding: [0x80,0x00,0x80,0x8f]
+0x80,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], -1, s0 ; encoding: [0xc1,0x00,0x80,0x8f]
+0xc1,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x8f]
+0xf0,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x8f]
+0xf7,0x00,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], 0xaf123456, s0 ; encoding: [0xff,0x00,0x80,0x8f,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0x8f,0x56,0x34,0x12,0xaf
+
+# CHECK: s_lshr_b64 s[0:1], 0x3f717273, s0 ; encoding: [0xff,0x00,0x80,0x8f,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0x8f,0x73,0x72,0x71,0x3f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], s101 ; encoding: [0x00,0x65,0x80,0x8f]
+0x00,0x65,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x8f]
+0x00,0x66,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x8f]
+0x00,0x67,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x80,0x8f]
+0x00,0x6a,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x80,0x8f]
+0x00,0x6b,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], tba_lo ; encoding: [0x00,0x6c,0x80,0x8f]
+0x00,0x6c,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], tba_hi ; encoding: [0x00,0x6d,0x80,0x8f]
+0x00,0x6d,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], tma_lo ; encoding: [0x00,0x6e,0x80,0x8f]
+0x00,0x6e,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], tma_hi ; encoding: [0x00,0x6f,0x80,0x8f]
+0x00,0x6f,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x80,0x8f]
+0x00,0x7b,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], m0 ; encoding: [0x00,0x7c,0x80,0x8f]
+0x00,0x7c,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], exec_lo ; encoding: [0x00,0x7e,0x80,0x8f]
+0x00,0x7e,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], exec_hi ; encoding: [0x00,0x7f,0x80,0x8f]
+0x00,0x7f,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x8f]
+0x00,0x80,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x8f]
+0x00,0xc1,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x8f]
+0x00,0xf0,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x8f]
+0x00,0xf7,0x80,0x8f
+
+# CHECK: s_lshr_b64 s[0:1], s[0:1], scc ; encoding: [0x00,0xfd,0x80,0x8f]
+0x00,0xfd,0x80,0x8f
+
+# CHECK: s_ashr_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x90]
+0x00,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x90]
+0x00,0x00,0x65,0x90
+
+# CHECK: s_ashr_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x90]
+0x00,0x00,0x66,0x90
+
+# CHECK: s_ashr_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x90]
+0x00,0x00,0x67,0x90
+
+# CHECK: s_ashr_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x90]
+0x00,0x00,0x6a,0x90
+
+# CHECK: s_ashr_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x90]
+0x00,0x00,0x6b,0x90
+
+# CHECK: s_ashr_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x90]
+0x00,0x00,0x6c,0x90
+
+# CHECK: s_ashr_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x90]
+0x00,0x00,0x6d,0x90
+
+# CHECK: s_ashr_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x90]
+0x00,0x00,0x6e,0x90
+
+# CHECK: s_ashr_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x90]
+0x00,0x00,0x6f,0x90
+
+# CHECK: s_ashr_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x90]
+0x00,0x00,0x7b,0x90
+
+# CHECK: s_ashr_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x90]
+0x00,0x00,0x7c,0x90
+
+# CHECK: s_ashr_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x90]
+0x00,0x00,0x7e,0x90
+
+# CHECK: s_ashr_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x90]
+0x00,0x00,0x7f,0x90
+
+# CHECK: s_ashr_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x90]
+0x65,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x90]
+0x66,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x90]
+0x67,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x90]
+0x6a,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x90]
+0x6b,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x90]
+0x6c,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x90]
+0x6d,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x90]
+0x6e,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x90]
+0x6f,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x90]
+0x7b,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x90]
+0x7c,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x90]
+0x7e,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x90]
+0x7f,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x90]
+0x80,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x90]
+0xc1,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x90]
+0xf0,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x90]
+0xf7,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x90]
+0xfd,0x00,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x90]
+0x00,0x65,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x90]
+0x00,0x66,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x90]
+0x00,0x67,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x90]
+0x00,0x6a,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x90]
+0x00,0x6b,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x90]
+0x00,0x6c,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x90]
+0x00,0x6d,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x90]
+0x00,0x6e,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x90]
+0x00,0x6f,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x90]
+0x00,0x7b,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x90]
+0x00,0x7c,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x90]
+0x00,0x7e,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x90]
+0x00,0x7f,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x90]
+0x00,0x80,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x90]
+0x00,0xc1,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x90]
+0x00,0xf0,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x90]
+0x00,0xf7,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x90]
+0x00,0xfd,0x00,0x90
+
+# CHECK: s_ashr_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x90,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x90,0x56,0x34,0x12,0xaf
+
+# CHECK: s_ashr_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x90,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x90,0x73,0x72,0x71,0x3f
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x80,0x90]
+0x00,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[2:3], s[0:1], s0 ; encoding: [0x00,0x00,0x82,0x90]
+0x00,0x00,0x82,0x90
+
+# CHECK: s_ashr_i64 s[100:101], s[0:1], s0 ; encoding: [0x00,0x00,0xe4,0x90]
+0x00,0x00,0xe4,0x90
+
+# CHECK: s_ashr_i64 flat_scratch, s[0:1], s0 ; encoding: [0x00,0x00,0xe6,0x90]
+0x00,0x00,0xe6,0x90
+
+# CHECK: s_ashr_i64 vcc, s[0:1], s0 ; encoding: [0x00,0x00,0xea,0x90]
+0x00,0x00,0xea,0x90
+
+# CHECK: s_ashr_i64 tba, s[0:1], s0 ; encoding: [0x00,0x00,0xec,0x90]
+0x00,0x00,0xec,0x90
+
+# CHECK: s_ashr_i64 tma, s[0:1], s0 ; encoding: [0x00,0x00,0xee,0x90]
+0x00,0x00,0xee,0x90
+
+# CHECK: s_ashr_i64 ttmp[10:11], s[0:1], s0 ; encoding: [0x00,0x00,0xfa,0x90]
+0x00,0x00,0xfa,0x90
+
+# CHECK: s_ashr_i64 exec, s[0:1], s0 ; encoding: [0x00,0x00,0xfe,0x90]
+0x00,0x00,0xfe,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[2:3], s0 ; encoding: [0x02,0x00,0x80,0x90]
+0x02,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[100:101], s0 ; encoding: [0x64,0x00,0x80,0x90]
+0x64,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], flat_scratch, s0 ; encoding: [0x66,0x00,0x80,0x90]
+0x66,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], vcc, s0 ; encoding: [0x6a,0x00,0x80,0x90]
+0x6a,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], tba, s0 ; encoding: [0x6c,0x00,0x80,0x90]
+0x6c,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], tma, s0 ; encoding: [0x6e,0x00,0x80,0x90]
+0x6e,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x80,0x90]
+0x7a,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], exec, s0 ; encoding: [0x7e,0x00,0x80,0x90]
+0x7e,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], 0, s0 ; encoding: [0x80,0x00,0x80,0x90]
+0x80,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], -1, s0 ; encoding: [0xc1,0x00,0x80,0x90]
+0xc1,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x90]
+0xf0,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x90]
+0xf7,0x00,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], 0xaf123456, s0 ; encoding: [0xff,0x00,0x80,0x90,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0x90,0x56,0x34,0x12,0xaf
+
+# CHECK: s_ashr_i64 s[0:1], 0x3f717273, s0 ; encoding: [0xff,0x00,0x80,0x90,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0x90,0x73,0x72,0x71,0x3f
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], s101 ; encoding: [0x00,0x65,0x80,0x90]
+0x00,0x65,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x90]
+0x00,0x66,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x90]
+0x00,0x67,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x80,0x90]
+0x00,0x6a,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x80,0x90]
+0x00,0x6b,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], tba_lo ; encoding: [0x00,0x6c,0x80,0x90]
+0x00,0x6c,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], tba_hi ; encoding: [0x00,0x6d,0x80,0x90]
+0x00,0x6d,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], tma_lo ; encoding: [0x00,0x6e,0x80,0x90]
+0x00,0x6e,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], tma_hi ; encoding: [0x00,0x6f,0x80,0x90]
+0x00,0x6f,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x80,0x90]
+0x00,0x7b,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], m0 ; encoding: [0x00,0x7c,0x80,0x90]
+0x00,0x7c,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], exec_lo ; encoding: [0x00,0x7e,0x80,0x90]
+0x00,0x7e,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], exec_hi ; encoding: [0x00,0x7f,0x80,0x90]
+0x00,0x7f,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x90]
+0x00,0x80,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x90]
+0x00,0xc1,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x90]
+0x00,0xf0,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x90]
+0x00,0xf7,0x80,0x90
+
+# CHECK: s_ashr_i64 s[0:1], s[0:1], scc ; encoding: [0x00,0xfd,0x80,0x90]
+0x00,0xfd,0x80,0x90
+
+# CHECK: s_bfm_b32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x91]
+0x00,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x91]
+0x00,0x00,0x65,0x91
+
+# CHECK: s_bfm_b32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x91]
+0x00,0x00,0x66,0x91
+
+# CHECK: s_bfm_b32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x91]
+0x00,0x00,0x67,0x91
+
+# CHECK: s_bfm_b32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x91]
+0x00,0x00,0x6a,0x91
+
+# CHECK: s_bfm_b32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x91]
+0x00,0x00,0x6b,0x91
+
+# CHECK: s_bfm_b32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x91]
+0x00,0x00,0x6c,0x91
+
+# CHECK: s_bfm_b32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x91]
+0x00,0x00,0x6d,0x91
+
+# CHECK: s_bfm_b32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x91]
+0x00,0x00,0x6e,0x91
+
+# CHECK: s_bfm_b32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x91]
+0x00,0x00,0x6f,0x91
+
+# CHECK: s_bfm_b32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x91]
+0x00,0x00,0x7b,0x91
+
+# CHECK: s_bfm_b32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x91]
+0x00,0x00,0x7c,0x91
+
+# CHECK: s_bfm_b32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x91]
+0x00,0x00,0x7e,0x91
+
+# CHECK: s_bfm_b32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x91]
+0x00,0x00,0x7f,0x91
+
+# CHECK: s_bfm_b32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x91]
+0x65,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x91]
+0x66,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x91]
+0x67,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x91]
+0x6a,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x91]
+0x6b,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x91]
+0x6c,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x91]
+0x6d,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x91]
+0x6e,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x91]
+0x6f,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x91]
+0x7b,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x91]
+0x7c,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x91]
+0x7e,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x91]
+0x7f,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x91]
+0x80,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x91]
+0xc1,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x91]
+0xf0,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x91]
+0xf7,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x91]
+0xfd,0x00,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x91]
+0x00,0x65,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x91]
+0x00,0x66,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x91]
+0x00,0x67,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x91]
+0x00,0x6a,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x91]
+0x00,0x6b,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x91]
+0x00,0x6c,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x91]
+0x00,0x6d,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x91]
+0x00,0x6e,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x91]
+0x00,0x6f,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x91]
+0x00,0x7b,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x91]
+0x00,0x7c,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x91]
+0x00,0x7e,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x91]
+0x00,0x7f,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x91]
+0x00,0x80,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x91]
+0x00,0xc1,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x91]
+0x00,0xf0,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x91]
+0x00,0xf7,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x91]
+0x00,0xfd,0x00,0x91
+
+# CHECK: s_bfm_b32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x91,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x91,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bfm_b32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x91,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x91,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bfm_b64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x80,0x91]
+0x00,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[2:3], s0, s0 ; encoding: [0x00,0x00,0x82,0x91]
+0x00,0x00,0x82,0x91
+
+# CHECK: s_bfm_b64 s[100:101], s0, s0 ; encoding: [0x00,0x00,0xe4,0x91]
+0x00,0x00,0xe4,0x91
+
+# CHECK: s_bfm_b64 flat_scratch, s0, s0 ; encoding: [0x00,0x00,0xe6,0x91]
+0x00,0x00,0xe6,0x91
+
+# CHECK: s_bfm_b64 vcc, s0, s0 ; encoding: [0x00,0x00,0xea,0x91]
+0x00,0x00,0xea,0x91
+
+# CHECK: s_bfm_b64 tba, s0, s0 ; encoding: [0x00,0x00,0xec,0x91]
+0x00,0x00,0xec,0x91
+
+# CHECK: s_bfm_b64 tma, s0, s0 ; encoding: [0x00,0x00,0xee,0x91]
+0x00,0x00,0xee,0x91
+
+# CHECK: s_bfm_b64 ttmp[10:11], s0, s0 ; encoding: [0x00,0x00,0xfa,0x91]
+0x00,0x00,0xfa,0x91
+
+# CHECK: s_bfm_b64 exec, s0, s0 ; encoding: [0x00,0x00,0xfe,0x91]
+0x00,0x00,0xfe,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s101, s0 ; encoding: [0x65,0x00,0x80,0x91]
+0x65,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x91]
+0x66,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x91]
+0x67,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x91]
+0x6a,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x91]
+0x6b,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x91]
+0x6c,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x91]
+0x6d,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x91]
+0x6e,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x91]
+0x6f,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x91]
+0x7b,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], m0, s0 ; encoding: [0x7c,0x00,0x80,0x91]
+0x7c,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x91]
+0x7e,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x91]
+0x7f,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], 0, s0 ; encoding: [0x80,0x00,0x80,0x91]
+0x80,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], -1, s0 ; encoding: [0xc1,0x00,0x80,0x91]
+0xc1,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x91]
+0xf0,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x91]
+0xf7,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], scc, s0 ; encoding: [0xfd,0x00,0x80,0x91]
+0xfd,0x00,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, s101 ; encoding: [0x00,0x65,0x80,0x91]
+0x00,0x65,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x91]
+0x00,0x66,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x91]
+0x00,0x67,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x91]
+0x00,0x6a,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x91]
+0x00,0x6b,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x91]
+0x00,0x6c,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x91]
+0x00,0x6d,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x91]
+0x00,0x6e,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x91]
+0x00,0x6f,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x91]
+0x00,0x7b,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, m0 ; encoding: [0x00,0x7c,0x80,0x91]
+0x00,0x7c,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x91]
+0x00,0x7e,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x91]
+0x00,0x7f,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, 0 ; encoding: [0x00,0x80,0x80,0x91]
+0x00,0x80,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, -1 ; encoding: [0x00,0xc1,0x80,0x91]
+0x00,0xc1,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x91]
+0x00,0xf0,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x91]
+0x00,0xf7,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, scc ; encoding: [0x00,0xfd,0x80,0x91]
+0x00,0xfd,0x80,0x91
+
+# CHECK: s_bfm_b64 s[0:1], s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x91,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x91,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bfm_b64 s[0:1], s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x91,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x91,0x73,0x72,0x71,0x3f
+
+# CHECK: s_mul_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x92]
+0x00,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x92]
+0x00,0x00,0x65,0x92
+
+# CHECK: s_mul_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x92]
+0x00,0x00,0x66,0x92
+
+# CHECK: s_mul_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x92]
+0x00,0x00,0x67,0x92
+
+# CHECK: s_mul_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x92]
+0x00,0x00,0x6a,0x92
+
+# CHECK: s_mul_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x92]
+0x00,0x00,0x6b,0x92
+
+# CHECK: s_mul_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x92]
+0x00,0x00,0x6c,0x92
+
+# CHECK: s_mul_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x92]
+0x00,0x00,0x6d,0x92
+
+# CHECK: s_mul_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x92]
+0x00,0x00,0x6e,0x92
+
+# CHECK: s_mul_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x92]
+0x00,0x00,0x6f,0x92
+
+# CHECK: s_mul_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x92]
+0x00,0x00,0x7b,0x92
+
+# CHECK: s_mul_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x92]
+0x00,0x00,0x7c,0x92
+
+# CHECK: s_mul_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x92]
+0x00,0x00,0x7e,0x92
+
+# CHECK: s_mul_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x92]
+0x00,0x00,0x7f,0x92
+
+# CHECK: s_mul_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x92]
+0x65,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x92]
+0x66,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x92]
+0x67,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x92]
+0x6a,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x92]
+0x6b,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x92]
+0x6c,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x92]
+0x6d,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x92]
+0x6e,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x92]
+0x6f,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x92]
+0x7b,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x92]
+0x7c,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x92]
+0x7e,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x92]
+0x7f,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x92]
+0x80,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x92]
+0xc1,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x92]
+0xf0,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x92]
+0xf7,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x92]
+0xfd,0x00,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x92]
+0x00,0x65,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x92]
+0x00,0x66,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x92]
+0x00,0x67,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x92]
+0x00,0x6a,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x92]
+0x00,0x6b,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x92]
+0x00,0x6c,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x92]
+0x00,0x6d,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x92]
+0x00,0x6e,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x92]
+0x00,0x6f,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x92]
+0x00,0x7b,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x92]
+0x00,0x7c,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x92]
+0x00,0x7e,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x92]
+0x00,0x7f,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x92]
+0x00,0x80,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x92]
+0x00,0xc1,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x92]
+0x00,0xf0,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x92]
+0x00,0xf7,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x92]
+0x00,0xfd,0x00,0x92
+
+# CHECK: s_mul_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x92,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x92,0x56,0x34,0x12,0xaf
+
+# CHECK: s_mul_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x92,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x92,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bfe_u32 s0, s0, s0 ; encoding: [0x00,0x00,0x80,0x92]
+0x00,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s101, s0, s0 ; encoding: [0x00,0x00,0xe5,0x92]
+0x00,0x00,0xe5,0x92
+
+# CHECK: s_bfe_u32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0xe6,0x92]
+0x00,0x00,0xe6,0x92
+
+# CHECK: s_bfe_u32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0xe7,0x92]
+0x00,0x00,0xe7,0x92
+
+# CHECK: s_bfe_u32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0xea,0x92]
+0x00,0x00,0xea,0x92
+
+# CHECK: s_bfe_u32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0xeb,0x92]
+0x00,0x00,0xeb,0x92
+
+# CHECK: s_bfe_u32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0xec,0x92]
+0x00,0x00,0xec,0x92
+
+# CHECK: s_bfe_u32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0xed,0x92]
+0x00,0x00,0xed,0x92
+
+# CHECK: s_bfe_u32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0xee,0x92]
+0x00,0x00,0xee,0x92
+
+# CHECK: s_bfe_u32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0xef,0x92]
+0x00,0x00,0xef,0x92
+
+# CHECK: s_bfe_u32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0xfb,0x92]
+0x00,0x00,0xfb,0x92
+
+# CHECK: s_bfe_u32 m0, s0, s0 ; encoding: [0x00,0x00,0xfc,0x92]
+0x00,0x00,0xfc,0x92
+
+# CHECK: s_bfe_u32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0xfe,0x92]
+0x00,0x00,0xfe,0x92
+
+# CHECK: s_bfe_u32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0xff,0x92]
+0x00,0x00,0xff,0x92
+
+# CHECK: s_bfe_u32 s0, s101, s0 ; encoding: [0x65,0x00,0x80,0x92]
+0x65,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x80,0x92]
+0x66,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x80,0x92]
+0x67,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x80,0x92]
+0x6a,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x80,0x92]
+0x6b,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x80,0x92]
+0x6c,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x80,0x92]
+0x6d,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x80,0x92]
+0x6e,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x80,0x92]
+0x6f,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x80,0x92]
+0x7b,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, m0, s0 ; encoding: [0x7c,0x00,0x80,0x92]
+0x7c,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x80,0x92]
+0x7e,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x80,0x92]
+0x7f,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, 0, s0 ; encoding: [0x80,0x00,0x80,0x92]
+0x80,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, -1, s0 ; encoding: [0xc1,0x00,0x80,0x92]
+0xc1,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x92]
+0xf0,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x92]
+0xf7,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, scc, s0 ; encoding: [0xfd,0x00,0x80,0x92]
+0xfd,0x00,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, s101 ; encoding: [0x00,0x65,0x80,0x92]
+0x00,0x65,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x92]
+0x00,0x66,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x92]
+0x00,0x67,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x80,0x92]
+0x00,0x6a,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x80,0x92]
+0x00,0x6b,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x80,0x92]
+0x00,0x6c,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x80,0x92]
+0x00,0x6d,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x80,0x92]
+0x00,0x6e,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x80,0x92]
+0x00,0x6f,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x80,0x92]
+0x00,0x7b,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, m0 ; encoding: [0x00,0x7c,0x80,0x92]
+0x00,0x7c,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x80,0x92]
+0x00,0x7e,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x80,0x92]
+0x00,0x7f,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, 0 ; encoding: [0x00,0x80,0x80,0x92]
+0x00,0x80,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, -1 ; encoding: [0x00,0xc1,0x80,0x92]
+0x00,0xc1,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x80,0x92]
+0x00,0xf0,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x80,0x92]
+0x00,0xf7,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, scc ; encoding: [0x00,0xfd,0x80,0x92]
+0x00,0xfd,0x80,0x92
+
+# CHECK: s_bfe_u32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x80,0x92,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x80,0x92,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bfe_u32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x80,0x92,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x80,0x92,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bfe_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x93]
+0x00,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x93]
+0x00,0x00,0x65,0x93
+
+# CHECK: s_bfe_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x93]
+0x00,0x00,0x66,0x93
+
+# CHECK: s_bfe_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x93]
+0x00,0x00,0x67,0x93
+
+# CHECK: s_bfe_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x93]
+0x00,0x00,0x6a,0x93
+
+# CHECK: s_bfe_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x93]
+0x00,0x00,0x6b,0x93
+
+# CHECK: s_bfe_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x93]
+0x00,0x00,0x6c,0x93
+
+# CHECK: s_bfe_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x93]
+0x00,0x00,0x6d,0x93
+
+# CHECK: s_bfe_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x93]
+0x00,0x00,0x6e,0x93
+
+# CHECK: s_bfe_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x93]
+0x00,0x00,0x6f,0x93
+
+# CHECK: s_bfe_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x93]
+0x00,0x00,0x7b,0x93
+
+# CHECK: s_bfe_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x93]
+0x00,0x00,0x7c,0x93
+
+# CHECK: s_bfe_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x93]
+0x00,0x00,0x7e,0x93
+
+# CHECK: s_bfe_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x93]
+0x00,0x00,0x7f,0x93
+
+# CHECK: s_bfe_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x93]
+0x65,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x93]
+0x66,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x93]
+0x67,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x93]
+0x6a,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x93]
+0x6b,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x93]
+0x6c,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x93]
+0x6d,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x93]
+0x6e,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x93]
+0x6f,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x93]
+0x7b,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x93]
+0x7c,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x93]
+0x7e,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x93]
+0x7f,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x93]
+0x80,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x93]
+0xc1,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x93]
+0xf0,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x93]
+0xf7,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x93]
+0xfd,0x00,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x93]
+0x00,0x65,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x93]
+0x00,0x66,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x93]
+0x00,0x67,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x93]
+0x00,0x6a,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x93]
+0x00,0x6b,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x93]
+0x00,0x6c,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x93]
+0x00,0x6d,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x93]
+0x00,0x6e,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x93]
+0x00,0x6f,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x93]
+0x00,0x7b,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x93]
+0x00,0x7c,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x93]
+0x00,0x7e,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x93]
+0x00,0x7f,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x93]
+0x00,0x80,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x93]
+0x00,0xc1,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x93]
+0x00,0xf0,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x93]
+0x00,0xf7,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x93]
+0x00,0xfd,0x00,0x93
+
+# CHECK: s_bfe_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x93,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x93,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bfe_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x93,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x93,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x80,0x93]
+0x00,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[2:3], s[0:1], s0 ; encoding: [0x00,0x00,0x82,0x93]
+0x00,0x00,0x82,0x93
+
+# CHECK: s_bfe_u64 s[100:101], s[0:1], s0 ; encoding: [0x00,0x00,0xe4,0x93]
+0x00,0x00,0xe4,0x93
+
+# CHECK: s_bfe_u64 flat_scratch, s[0:1], s0 ; encoding: [0x00,0x00,0xe6,0x93]
+0x00,0x00,0xe6,0x93
+
+# CHECK: s_bfe_u64 vcc, s[0:1], s0 ; encoding: [0x00,0x00,0xea,0x93]
+0x00,0x00,0xea,0x93
+
+# CHECK: s_bfe_u64 tba, s[0:1], s0 ; encoding: [0x00,0x00,0xec,0x93]
+0x00,0x00,0xec,0x93
+
+# CHECK: s_bfe_u64 tma, s[0:1], s0 ; encoding: [0x00,0x00,0xee,0x93]
+0x00,0x00,0xee,0x93
+
+# CHECK: s_bfe_u64 ttmp[10:11], s[0:1], s0 ; encoding: [0x00,0x00,0xfa,0x93]
+0x00,0x00,0xfa,0x93
+
+# CHECK: s_bfe_u64 exec, s[0:1], s0 ; encoding: [0x00,0x00,0xfe,0x93]
+0x00,0x00,0xfe,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[2:3], s0 ; encoding: [0x02,0x00,0x80,0x93]
+0x02,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[100:101], s0 ; encoding: [0x64,0x00,0x80,0x93]
+0x64,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], flat_scratch, s0 ; encoding: [0x66,0x00,0x80,0x93]
+0x66,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], vcc, s0 ; encoding: [0x6a,0x00,0x80,0x93]
+0x6a,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], tba, s0 ; encoding: [0x6c,0x00,0x80,0x93]
+0x6c,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], tma, s0 ; encoding: [0x6e,0x00,0x80,0x93]
+0x6e,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x80,0x93]
+0x7a,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], exec, s0 ; encoding: [0x7e,0x00,0x80,0x93]
+0x7e,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], 0, s0 ; encoding: [0x80,0x00,0x80,0x93]
+0x80,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], -1, s0 ; encoding: [0xc1,0x00,0x80,0x93]
+0xc1,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], 0.5, s0 ; encoding: [0xf0,0x00,0x80,0x93]
+0xf0,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], -4.0, s0 ; encoding: [0xf7,0x00,0x80,0x93]
+0xf7,0x00,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], 0xaf123456, s0 ; encoding: [0xff,0x00,0x80,0x93,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0x93,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bfe_u64 s[0:1], 0x3f717273, s0 ; encoding: [0xff,0x00,0x80,0x93,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0x93,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], s101 ; encoding: [0x00,0x65,0x80,0x93]
+0x00,0x65,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x80,0x93]
+0x00,0x66,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x80,0x93]
+0x00,0x67,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x80,0x93]
+0x00,0x6a,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x80,0x93]
+0x00,0x6b,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], tba_lo ; encoding: [0x00,0x6c,0x80,0x93]
+0x00,0x6c,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], tba_hi ; encoding: [0x00,0x6d,0x80,0x93]
+0x00,0x6d,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], tma_lo ; encoding: [0x00,0x6e,0x80,0x93]
+0x00,0x6e,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], tma_hi ; encoding: [0x00,0x6f,0x80,0x93]
+0x00,0x6f,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x80,0x93]
+0x00,0x7b,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], m0 ; encoding: [0x00,0x7c,0x80,0x93]
+0x00,0x7c,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], exec_lo ; encoding: [0x00,0x7e,0x80,0x93]
+0x00,0x7e,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], exec_hi ; encoding: [0x00,0x7f,0x80,0x93]
+0x00,0x7f,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x80,0x93]
+0x00,0x80,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x80,0x93]
+0x00,0xc1,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x80,0x93]
+0x00,0xf0,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x80,0x93]
+0x00,0xf7,0x80,0x93
+
+# CHECK: s_bfe_u64 s[0:1], s[0:1], scc ; encoding: [0x00,0xfd,0x80,0x93]
+0x00,0xfd,0x80,0x93
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x00,0x94]
+0x00,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[2:3], s[0:1], s0 ; encoding: [0x00,0x00,0x02,0x94]
+0x00,0x00,0x02,0x94
+
+# CHECK: s_bfe_i64 s[100:101], s[0:1], s0 ; encoding: [0x00,0x00,0x64,0x94]
+0x00,0x00,0x64,0x94
+
+# CHECK: s_bfe_i64 flat_scratch, s[0:1], s0 ; encoding: [0x00,0x00,0x66,0x94]
+0x00,0x00,0x66,0x94
+
+# CHECK: s_bfe_i64 vcc, s[0:1], s0 ; encoding: [0x00,0x00,0x6a,0x94]
+0x00,0x00,0x6a,0x94
+
+# CHECK: s_bfe_i64 tba, s[0:1], s0 ; encoding: [0x00,0x00,0x6c,0x94]
+0x00,0x00,0x6c,0x94
+
+# CHECK: s_bfe_i64 tma, s[0:1], s0 ; encoding: [0x00,0x00,0x6e,0x94]
+0x00,0x00,0x6e,0x94
+
+# CHECK: s_bfe_i64 ttmp[10:11], s[0:1], s0 ; encoding: [0x00,0x00,0x7a,0x94]
+0x00,0x00,0x7a,0x94
+
+# CHECK: s_bfe_i64 exec, s[0:1], s0 ; encoding: [0x00,0x00,0x7e,0x94]
+0x00,0x00,0x7e,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[2:3], s0 ; encoding: [0x02,0x00,0x00,0x94]
+0x02,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[100:101], s0 ; encoding: [0x64,0x00,0x00,0x94]
+0x64,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], flat_scratch, s0 ; encoding: [0x66,0x00,0x00,0x94]
+0x66,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], vcc, s0 ; encoding: [0x6a,0x00,0x00,0x94]
+0x6a,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], tba, s0 ; encoding: [0x6c,0x00,0x00,0x94]
+0x6c,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], tma, s0 ; encoding: [0x6e,0x00,0x00,0x94]
+0x6e,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x00,0x94]
+0x7a,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], exec, s0 ; encoding: [0x7e,0x00,0x00,0x94]
+0x7e,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], 0, s0 ; encoding: [0x80,0x00,0x00,0x94]
+0x80,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], -1, s0 ; encoding: [0xc1,0x00,0x00,0x94]
+0xc1,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x94]
+0xf0,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x94]
+0xf7,0x00,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], 0xaf123456, s0 ; encoding: [0xff,0x00,0x00,0x94,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x94,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bfe_i64 s[0:1], 0x3f717273, s0 ; encoding: [0xff,0x00,0x00,0x94,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x94,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], s101 ; encoding: [0x00,0x65,0x00,0x94]
+0x00,0x65,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x94]
+0x00,0x66,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x94]
+0x00,0x67,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x00,0x94]
+0x00,0x6a,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x00,0x94]
+0x00,0x6b,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], tba_lo ; encoding: [0x00,0x6c,0x00,0x94]
+0x00,0x6c,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], tba_hi ; encoding: [0x00,0x6d,0x00,0x94]
+0x00,0x6d,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], tma_lo ; encoding: [0x00,0x6e,0x00,0x94]
+0x00,0x6e,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], tma_hi ; encoding: [0x00,0x6f,0x00,0x94]
+0x00,0x6f,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x00,0x94]
+0x00,0x7b,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], m0 ; encoding: [0x00,0x7c,0x00,0x94]
+0x00,0x7c,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], exec_lo ; encoding: [0x00,0x7e,0x00,0x94]
+0x00,0x7e,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], exec_hi ; encoding: [0x00,0x7f,0x00,0x94]
+0x00,0x7f,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x80,0x00,0x94]
+0x00,0x80,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], -1 ; encoding: [0x00,0xc1,0x00,0x94]
+0x00,0xc1,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0xf0,0x00,0x94]
+0x00,0xf0,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0xf7,0x00,0x94]
+0x00,0xf7,0x00,0x94
+
+# CHECK: s_bfe_i64 s[0:1], s[0:1], scc ; encoding: [0x00,0xfd,0x00,0x94]
+0x00,0xfd,0x00,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0x94]
+0x00,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[2:3], s[0:1] ; encoding: [0x02,0x00,0x80,0x94]
+0x02,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[100:101], s[0:1] ; encoding: [0x64,0x00,0x80,0x94]
+0x64,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x80,0x94]
+0x66,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork vcc, s[0:1] ; encoding: [0x6a,0x00,0x80,0x94]
+0x6a,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork tba, s[0:1] ; encoding: [0x6c,0x00,0x80,0x94]
+0x6c,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork tma, s[0:1] ; encoding: [0x6e,0x00,0x80,0x94]
+0x6e,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x80,0x94]
+0x7a,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork exec, s[0:1] ; encoding: [0x7e,0x00,0x80,0x94]
+0x7e,0x00,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], s[2:3] ; encoding: [0x00,0x02,0x80,0x94]
+0x00,0x02,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], s[100:101] ; encoding: [0x00,0x64,0x80,0x94]
+0x00,0x64,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], flat_scratch ; encoding: [0x00,0x66,0x80,0x94]
+0x00,0x66,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], vcc ; encoding: [0x00,0x6a,0x80,0x94]
+0x00,0x6a,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], tba ; encoding: [0x00,0x6c,0x80,0x94]
+0x00,0x6c,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], tma ; encoding: [0x00,0x6e,0x80,0x94]
+0x00,0x6e,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x80,0x94]
+0x00,0x7a,0x80,0x94
+
+# CHECK: s_cbranch_g_fork s[0:1], exec ; encoding: [0x00,0x7e,0x80,0x94]
+0x00,0x7e,0x80,0x94
+
+# CHECK: s_absdiff_i32 s0, s0, s0 ; encoding: [0x00,0x00,0x00,0x95]
+0x00,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s101, s0, s0 ; encoding: [0x00,0x00,0x65,0x95]
+0x00,0x00,0x65,0x95
+
+# CHECK: s_absdiff_i32 flat_scratch_lo, s0, s0 ; encoding: [0x00,0x00,0x66,0x95]
+0x00,0x00,0x66,0x95
+
+# CHECK: s_absdiff_i32 flat_scratch_hi, s0, s0 ; encoding: [0x00,0x00,0x67,0x95]
+0x00,0x00,0x67,0x95
+
+# CHECK: s_absdiff_i32 vcc_lo, s0, s0 ; encoding: [0x00,0x00,0x6a,0x95]
+0x00,0x00,0x6a,0x95
+
+# CHECK: s_absdiff_i32 vcc_hi, s0, s0 ; encoding: [0x00,0x00,0x6b,0x95]
+0x00,0x00,0x6b,0x95
+
+# CHECK: s_absdiff_i32 tba_lo, s0, s0 ; encoding: [0x00,0x00,0x6c,0x95]
+0x00,0x00,0x6c,0x95
+
+# CHECK: s_absdiff_i32 tba_hi, s0, s0 ; encoding: [0x00,0x00,0x6d,0x95]
+0x00,0x00,0x6d,0x95
+
+# CHECK: s_absdiff_i32 tma_lo, s0, s0 ; encoding: [0x00,0x00,0x6e,0x95]
+0x00,0x00,0x6e,0x95
+
+# CHECK: s_absdiff_i32 tma_hi, s0, s0 ; encoding: [0x00,0x00,0x6f,0x95]
+0x00,0x00,0x6f,0x95
+
+# CHECK: s_absdiff_i32 ttmp11, s0, s0 ; encoding: [0x00,0x00,0x7b,0x95]
+0x00,0x00,0x7b,0x95
+
+# CHECK: s_absdiff_i32 m0, s0, s0 ; encoding: [0x00,0x00,0x7c,0x95]
+0x00,0x00,0x7c,0x95
+
+# CHECK: s_absdiff_i32 exec_lo, s0, s0 ; encoding: [0x00,0x00,0x7e,0x95]
+0x00,0x00,0x7e,0x95
+
+# CHECK: s_absdiff_i32 exec_hi, s0, s0 ; encoding: [0x00,0x00,0x7f,0x95]
+0x00,0x00,0x7f,0x95
+
+# CHECK: s_absdiff_i32 s0, s101, s0 ; encoding: [0x65,0x00,0x00,0x95]
+0x65,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0x95]
+0x66,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0x95]
+0x67,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0x95]
+0x6a,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0x95]
+0x6b,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0x95]
+0x6c,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0x95]
+0x6d,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0x95]
+0x6e,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0x95]
+0x6f,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0x95]
+0x7b,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, m0, s0 ; encoding: [0x7c,0x00,0x00,0x95]
+0x7c,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0x95]
+0x7e,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0x95]
+0x7f,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, 0, s0 ; encoding: [0x80,0x00,0x00,0x95]
+0x80,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, -1, s0 ; encoding: [0xc1,0x00,0x00,0x95]
+0xc1,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, 0.5, s0 ; encoding: [0xf0,0x00,0x00,0x95]
+0xf0,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, -4.0, s0 ; encoding: [0xf7,0x00,0x00,0x95]
+0xf7,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, scc, s0 ; encoding: [0xfd,0x00,0x00,0x95]
+0xfd,0x00,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, s101 ; encoding: [0x00,0x65,0x00,0x95]
+0x00,0x65,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0x95]
+0x00,0x66,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0x95]
+0x00,0x67,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0x95]
+0x00,0x6a,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0x95]
+0x00,0x6b,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, tba_lo ; encoding: [0x00,0x6c,0x00,0x95]
+0x00,0x6c,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, tba_hi ; encoding: [0x00,0x6d,0x00,0x95]
+0x00,0x6d,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, tma_lo ; encoding: [0x00,0x6e,0x00,0x95]
+0x00,0x6e,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, tma_hi ; encoding: [0x00,0x6f,0x00,0x95]
+0x00,0x6f,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0x95]
+0x00,0x7b,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, m0 ; encoding: [0x00,0x7c,0x00,0x95]
+0x00,0x7c,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, exec_lo ; encoding: [0x00,0x7e,0x00,0x95]
+0x00,0x7e,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, exec_hi ; encoding: [0x00,0x7f,0x00,0x95]
+0x00,0x7f,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, 0 ; encoding: [0x00,0x80,0x00,0x95]
+0x00,0x80,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, -1 ; encoding: [0x00,0xc1,0x00,0x95]
+0x00,0xc1,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, 0.5 ; encoding: [0x00,0xf0,0x00,0x95]
+0x00,0xf0,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, -4.0 ; encoding: [0x00,0xf7,0x00,0x95]
+0x00,0xf7,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, scc ; encoding: [0x00,0xfd,0x00,0x95]
+0x00,0xfd,0x00,0x95
+
+# CHECK: s_absdiff_i32 s0, s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0x95,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0x95,0x56,0x34,0x12,0xaf
+
+# CHECK: s_absdiff_i32 s0, s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0x95,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0x95,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_eq_i32 s0, s0 ; encoding: [0x00,0x00,0x00,0xbf]
+0x00,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s101, s0 ; encoding: [0x65,0x00,0x00,0xbf]
+0x65,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x00,0xbf]
+0x66,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x00,0xbf]
+0x67,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x00,0xbf]
+0x6a,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x00,0xbf]
+0x6b,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 tba_lo, s0 ; encoding: [0x6c,0x00,0x00,0xbf]
+0x6c,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 tba_hi, s0 ; encoding: [0x6d,0x00,0x00,0xbf]
+0x6d,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 tma_lo, s0 ; encoding: [0x6e,0x00,0x00,0xbf]
+0x6e,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 tma_hi, s0 ; encoding: [0x6f,0x00,0x00,0xbf]
+0x6f,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 ttmp11, s0 ; encoding: [0x7b,0x00,0x00,0xbf]
+0x7b,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 m0, s0 ; encoding: [0x7c,0x00,0x00,0xbf]
+0x7c,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 exec_lo, s0 ; encoding: [0x7e,0x00,0x00,0xbf]
+0x7e,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 exec_hi, s0 ; encoding: [0x7f,0x00,0x00,0xbf]
+0x7f,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 0, s0 ; encoding: [0x80,0x00,0x00,0xbf]
+0x80,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 -1, s0 ; encoding: [0xc1,0x00,0x00,0xbf]
+0xc1,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 0.5, s0 ; encoding: [0xf0,0x00,0x00,0xbf]
+0xf0,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 -4.0, s0 ; encoding: [0xf7,0x00,0x00,0xbf]
+0xf7,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 scc, s0 ; encoding: [0xfd,0x00,0x00,0xbf]
+0xfd,0x00,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, s101 ; encoding: [0x00,0x65,0x00,0xbf]
+0x00,0x65,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x00,0xbf]
+0x00,0x66,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x00,0xbf]
+0x00,0x67,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, vcc_lo ; encoding: [0x00,0x6a,0x00,0xbf]
+0x00,0x6a,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, vcc_hi ; encoding: [0x00,0x6b,0x00,0xbf]
+0x00,0x6b,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, tba_lo ; encoding: [0x00,0x6c,0x00,0xbf]
+0x00,0x6c,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, tba_hi ; encoding: [0x00,0x6d,0x00,0xbf]
+0x00,0x6d,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, tma_lo ; encoding: [0x00,0x6e,0x00,0xbf]
+0x00,0x6e,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, tma_hi ; encoding: [0x00,0x6f,0x00,0xbf]
+0x00,0x6f,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, ttmp11 ; encoding: [0x00,0x7b,0x00,0xbf]
+0x00,0x7b,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, m0 ; encoding: [0x00,0x7c,0x00,0xbf]
+0x00,0x7c,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, exec_lo ; encoding: [0x00,0x7e,0x00,0xbf]
+0x00,0x7e,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, exec_hi ; encoding: [0x00,0x7f,0x00,0xbf]
+0x00,0x7f,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, 0 ; encoding: [0x00,0x80,0x00,0xbf]
+0x00,0x80,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, -1 ; encoding: [0x00,0xc1,0x00,0xbf]
+0x00,0xc1,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, 0.5 ; encoding: [0x00,0xf0,0x00,0xbf]
+0x00,0xf0,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, -4.0 ; encoding: [0x00,0xf7,0x00,0xbf]
+0x00,0xf7,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, scc ; encoding: [0x00,0xfd,0x00,0xbf]
+0x00,0xfd,0x00,0xbf
+
+# CHECK: s_cmp_eq_i32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x00,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x00,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_eq_i32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x00,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x00,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_lg_i32 s0, s0 ; encoding: [0x00,0x00,0x01,0xbf]
+0x00,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s101, s0 ; encoding: [0x65,0x00,0x01,0xbf]
+0x65,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x01,0xbf]
+0x66,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x01,0xbf]
+0x67,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x01,0xbf]
+0x6a,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x01,0xbf]
+0x6b,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 tba_lo, s0 ; encoding: [0x6c,0x00,0x01,0xbf]
+0x6c,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 tba_hi, s0 ; encoding: [0x6d,0x00,0x01,0xbf]
+0x6d,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 tma_lo, s0 ; encoding: [0x6e,0x00,0x01,0xbf]
+0x6e,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 tma_hi, s0 ; encoding: [0x6f,0x00,0x01,0xbf]
+0x6f,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 ttmp11, s0 ; encoding: [0x7b,0x00,0x01,0xbf]
+0x7b,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 m0, s0 ; encoding: [0x7c,0x00,0x01,0xbf]
+0x7c,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 exec_lo, s0 ; encoding: [0x7e,0x00,0x01,0xbf]
+0x7e,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 exec_hi, s0 ; encoding: [0x7f,0x00,0x01,0xbf]
+0x7f,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 0, s0 ; encoding: [0x80,0x00,0x01,0xbf]
+0x80,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 -1, s0 ; encoding: [0xc1,0x00,0x01,0xbf]
+0xc1,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 0.5, s0 ; encoding: [0xf0,0x00,0x01,0xbf]
+0xf0,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 -4.0, s0 ; encoding: [0xf7,0x00,0x01,0xbf]
+0xf7,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 scc, s0 ; encoding: [0xfd,0x00,0x01,0xbf]
+0xfd,0x00,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, s101 ; encoding: [0x00,0x65,0x01,0xbf]
+0x00,0x65,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x01,0xbf]
+0x00,0x66,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x01,0xbf]
+0x00,0x67,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, vcc_lo ; encoding: [0x00,0x6a,0x01,0xbf]
+0x00,0x6a,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, vcc_hi ; encoding: [0x00,0x6b,0x01,0xbf]
+0x00,0x6b,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, tba_lo ; encoding: [0x00,0x6c,0x01,0xbf]
+0x00,0x6c,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, tba_hi ; encoding: [0x00,0x6d,0x01,0xbf]
+0x00,0x6d,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, tma_lo ; encoding: [0x00,0x6e,0x01,0xbf]
+0x00,0x6e,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, tma_hi ; encoding: [0x00,0x6f,0x01,0xbf]
+0x00,0x6f,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, ttmp11 ; encoding: [0x00,0x7b,0x01,0xbf]
+0x00,0x7b,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, m0 ; encoding: [0x00,0x7c,0x01,0xbf]
+0x00,0x7c,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, exec_lo ; encoding: [0x00,0x7e,0x01,0xbf]
+0x00,0x7e,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, exec_hi ; encoding: [0x00,0x7f,0x01,0xbf]
+0x00,0x7f,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, 0 ; encoding: [0x00,0x80,0x01,0xbf]
+0x00,0x80,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, -1 ; encoding: [0x00,0xc1,0x01,0xbf]
+0x00,0xc1,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, 0.5 ; encoding: [0x00,0xf0,0x01,0xbf]
+0x00,0xf0,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, -4.0 ; encoding: [0x00,0xf7,0x01,0xbf]
+0x00,0xf7,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, scc ; encoding: [0x00,0xfd,0x01,0xbf]
+0x00,0xfd,0x01,0xbf
+
+# CHECK: s_cmp_lg_i32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x01,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x01,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_lg_i32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x01,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x01,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_gt_i32 s0, s0 ; encoding: [0x00,0x00,0x02,0xbf]
+0x00,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s101, s0 ; encoding: [0x65,0x00,0x02,0xbf]
+0x65,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x02,0xbf]
+0x66,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x02,0xbf]
+0x67,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x02,0xbf]
+0x6a,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x02,0xbf]
+0x6b,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 tba_lo, s0 ; encoding: [0x6c,0x00,0x02,0xbf]
+0x6c,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 tba_hi, s0 ; encoding: [0x6d,0x00,0x02,0xbf]
+0x6d,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 tma_lo, s0 ; encoding: [0x6e,0x00,0x02,0xbf]
+0x6e,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 tma_hi, s0 ; encoding: [0x6f,0x00,0x02,0xbf]
+0x6f,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 ttmp11, s0 ; encoding: [0x7b,0x00,0x02,0xbf]
+0x7b,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 m0, s0 ; encoding: [0x7c,0x00,0x02,0xbf]
+0x7c,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 exec_lo, s0 ; encoding: [0x7e,0x00,0x02,0xbf]
+0x7e,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 exec_hi, s0 ; encoding: [0x7f,0x00,0x02,0xbf]
+0x7f,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 0, s0 ; encoding: [0x80,0x00,0x02,0xbf]
+0x80,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 -1, s0 ; encoding: [0xc1,0x00,0x02,0xbf]
+0xc1,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 0.5, s0 ; encoding: [0xf0,0x00,0x02,0xbf]
+0xf0,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 -4.0, s0 ; encoding: [0xf7,0x00,0x02,0xbf]
+0xf7,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 scc, s0 ; encoding: [0xfd,0x00,0x02,0xbf]
+0xfd,0x00,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, s101 ; encoding: [0x00,0x65,0x02,0xbf]
+0x00,0x65,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x02,0xbf]
+0x00,0x66,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x02,0xbf]
+0x00,0x67,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, vcc_lo ; encoding: [0x00,0x6a,0x02,0xbf]
+0x00,0x6a,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, vcc_hi ; encoding: [0x00,0x6b,0x02,0xbf]
+0x00,0x6b,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, tba_lo ; encoding: [0x00,0x6c,0x02,0xbf]
+0x00,0x6c,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, tba_hi ; encoding: [0x00,0x6d,0x02,0xbf]
+0x00,0x6d,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, tma_lo ; encoding: [0x00,0x6e,0x02,0xbf]
+0x00,0x6e,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, tma_hi ; encoding: [0x00,0x6f,0x02,0xbf]
+0x00,0x6f,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, ttmp11 ; encoding: [0x00,0x7b,0x02,0xbf]
+0x00,0x7b,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, m0 ; encoding: [0x00,0x7c,0x02,0xbf]
+0x00,0x7c,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, exec_lo ; encoding: [0x00,0x7e,0x02,0xbf]
+0x00,0x7e,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, exec_hi ; encoding: [0x00,0x7f,0x02,0xbf]
+0x00,0x7f,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, 0 ; encoding: [0x00,0x80,0x02,0xbf]
+0x00,0x80,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, -1 ; encoding: [0x00,0xc1,0x02,0xbf]
+0x00,0xc1,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, 0.5 ; encoding: [0x00,0xf0,0x02,0xbf]
+0x00,0xf0,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, -4.0 ; encoding: [0x00,0xf7,0x02,0xbf]
+0x00,0xf7,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, scc ; encoding: [0x00,0xfd,0x02,0xbf]
+0x00,0xfd,0x02,0xbf
+
+# CHECK: s_cmp_gt_i32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x02,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x02,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_gt_i32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x02,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x02,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_ge_i32 s0, s0 ; encoding: [0x00,0x00,0x03,0xbf]
+0x00,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s101, s0 ; encoding: [0x65,0x00,0x03,0xbf]
+0x65,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x03,0xbf]
+0x66,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x03,0xbf]
+0x67,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x03,0xbf]
+0x6a,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x03,0xbf]
+0x6b,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 tba_lo, s0 ; encoding: [0x6c,0x00,0x03,0xbf]
+0x6c,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 tba_hi, s0 ; encoding: [0x6d,0x00,0x03,0xbf]
+0x6d,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 tma_lo, s0 ; encoding: [0x6e,0x00,0x03,0xbf]
+0x6e,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 tma_hi, s0 ; encoding: [0x6f,0x00,0x03,0xbf]
+0x6f,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 ttmp11, s0 ; encoding: [0x7b,0x00,0x03,0xbf]
+0x7b,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 m0, s0 ; encoding: [0x7c,0x00,0x03,0xbf]
+0x7c,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 exec_lo, s0 ; encoding: [0x7e,0x00,0x03,0xbf]
+0x7e,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 exec_hi, s0 ; encoding: [0x7f,0x00,0x03,0xbf]
+0x7f,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 0, s0 ; encoding: [0x80,0x00,0x03,0xbf]
+0x80,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 -1, s0 ; encoding: [0xc1,0x00,0x03,0xbf]
+0xc1,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 0.5, s0 ; encoding: [0xf0,0x00,0x03,0xbf]
+0xf0,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 -4.0, s0 ; encoding: [0xf7,0x00,0x03,0xbf]
+0xf7,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 scc, s0 ; encoding: [0xfd,0x00,0x03,0xbf]
+0xfd,0x00,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, s101 ; encoding: [0x00,0x65,0x03,0xbf]
+0x00,0x65,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x03,0xbf]
+0x00,0x66,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x03,0xbf]
+0x00,0x67,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, vcc_lo ; encoding: [0x00,0x6a,0x03,0xbf]
+0x00,0x6a,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, vcc_hi ; encoding: [0x00,0x6b,0x03,0xbf]
+0x00,0x6b,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, tba_lo ; encoding: [0x00,0x6c,0x03,0xbf]
+0x00,0x6c,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, tba_hi ; encoding: [0x00,0x6d,0x03,0xbf]
+0x00,0x6d,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, tma_lo ; encoding: [0x00,0x6e,0x03,0xbf]
+0x00,0x6e,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, tma_hi ; encoding: [0x00,0x6f,0x03,0xbf]
+0x00,0x6f,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, ttmp11 ; encoding: [0x00,0x7b,0x03,0xbf]
+0x00,0x7b,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, m0 ; encoding: [0x00,0x7c,0x03,0xbf]
+0x00,0x7c,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, exec_lo ; encoding: [0x00,0x7e,0x03,0xbf]
+0x00,0x7e,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, exec_hi ; encoding: [0x00,0x7f,0x03,0xbf]
+0x00,0x7f,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, 0 ; encoding: [0x00,0x80,0x03,0xbf]
+0x00,0x80,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, -1 ; encoding: [0x00,0xc1,0x03,0xbf]
+0x00,0xc1,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, 0.5 ; encoding: [0x00,0xf0,0x03,0xbf]
+0x00,0xf0,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, -4.0 ; encoding: [0x00,0xf7,0x03,0xbf]
+0x00,0xf7,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, scc ; encoding: [0x00,0xfd,0x03,0xbf]
+0x00,0xfd,0x03,0xbf
+
+# CHECK: s_cmp_ge_i32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x03,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x03,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_ge_i32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x03,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x03,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_lt_i32 s0, s0 ; encoding: [0x00,0x00,0x04,0xbf]
+0x00,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s101, s0 ; encoding: [0x65,0x00,0x04,0xbf]
+0x65,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x04,0xbf]
+0x66,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x04,0xbf]
+0x67,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x04,0xbf]
+0x6a,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x04,0xbf]
+0x6b,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 tba_lo, s0 ; encoding: [0x6c,0x00,0x04,0xbf]
+0x6c,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 tba_hi, s0 ; encoding: [0x6d,0x00,0x04,0xbf]
+0x6d,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 tma_lo, s0 ; encoding: [0x6e,0x00,0x04,0xbf]
+0x6e,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 tma_hi, s0 ; encoding: [0x6f,0x00,0x04,0xbf]
+0x6f,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 ttmp11, s0 ; encoding: [0x7b,0x00,0x04,0xbf]
+0x7b,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 m0, s0 ; encoding: [0x7c,0x00,0x04,0xbf]
+0x7c,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 exec_lo, s0 ; encoding: [0x7e,0x00,0x04,0xbf]
+0x7e,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 exec_hi, s0 ; encoding: [0x7f,0x00,0x04,0xbf]
+0x7f,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 0, s0 ; encoding: [0x80,0x00,0x04,0xbf]
+0x80,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 -1, s0 ; encoding: [0xc1,0x00,0x04,0xbf]
+0xc1,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 0.5, s0 ; encoding: [0xf0,0x00,0x04,0xbf]
+0xf0,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 -4.0, s0 ; encoding: [0xf7,0x00,0x04,0xbf]
+0xf7,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 scc, s0 ; encoding: [0xfd,0x00,0x04,0xbf]
+0xfd,0x00,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, s101 ; encoding: [0x00,0x65,0x04,0xbf]
+0x00,0x65,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x04,0xbf]
+0x00,0x66,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x04,0xbf]
+0x00,0x67,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, vcc_lo ; encoding: [0x00,0x6a,0x04,0xbf]
+0x00,0x6a,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, vcc_hi ; encoding: [0x00,0x6b,0x04,0xbf]
+0x00,0x6b,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, tba_lo ; encoding: [0x00,0x6c,0x04,0xbf]
+0x00,0x6c,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, tba_hi ; encoding: [0x00,0x6d,0x04,0xbf]
+0x00,0x6d,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, tma_lo ; encoding: [0x00,0x6e,0x04,0xbf]
+0x00,0x6e,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, tma_hi ; encoding: [0x00,0x6f,0x04,0xbf]
+0x00,0x6f,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, ttmp11 ; encoding: [0x00,0x7b,0x04,0xbf]
+0x00,0x7b,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, m0 ; encoding: [0x00,0x7c,0x04,0xbf]
+0x00,0x7c,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, exec_lo ; encoding: [0x00,0x7e,0x04,0xbf]
+0x00,0x7e,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, exec_hi ; encoding: [0x00,0x7f,0x04,0xbf]
+0x00,0x7f,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, 0 ; encoding: [0x00,0x80,0x04,0xbf]
+0x00,0x80,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, -1 ; encoding: [0x00,0xc1,0x04,0xbf]
+0x00,0xc1,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, 0.5 ; encoding: [0x00,0xf0,0x04,0xbf]
+0x00,0xf0,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, -4.0 ; encoding: [0x00,0xf7,0x04,0xbf]
+0x00,0xf7,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, scc ; encoding: [0x00,0xfd,0x04,0xbf]
+0x00,0xfd,0x04,0xbf
+
+# CHECK: s_cmp_lt_i32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x04,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x04,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_lt_i32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x04,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x04,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_le_i32 s0, s0 ; encoding: [0x00,0x00,0x05,0xbf]
+0x00,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s101, s0 ; encoding: [0x65,0x00,0x05,0xbf]
+0x65,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x05,0xbf]
+0x66,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x05,0xbf]
+0x67,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x05,0xbf]
+0x6a,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x05,0xbf]
+0x6b,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 tba_lo, s0 ; encoding: [0x6c,0x00,0x05,0xbf]
+0x6c,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 tba_hi, s0 ; encoding: [0x6d,0x00,0x05,0xbf]
+0x6d,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 tma_lo, s0 ; encoding: [0x6e,0x00,0x05,0xbf]
+0x6e,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 tma_hi, s0 ; encoding: [0x6f,0x00,0x05,0xbf]
+0x6f,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 ttmp11, s0 ; encoding: [0x7b,0x00,0x05,0xbf]
+0x7b,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 m0, s0 ; encoding: [0x7c,0x00,0x05,0xbf]
+0x7c,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 exec_lo, s0 ; encoding: [0x7e,0x00,0x05,0xbf]
+0x7e,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 exec_hi, s0 ; encoding: [0x7f,0x00,0x05,0xbf]
+0x7f,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 0, s0 ; encoding: [0x80,0x00,0x05,0xbf]
+0x80,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 -1, s0 ; encoding: [0xc1,0x00,0x05,0xbf]
+0xc1,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 0.5, s0 ; encoding: [0xf0,0x00,0x05,0xbf]
+0xf0,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 -4.0, s0 ; encoding: [0xf7,0x00,0x05,0xbf]
+0xf7,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 scc, s0 ; encoding: [0xfd,0x00,0x05,0xbf]
+0xfd,0x00,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, s101 ; encoding: [0x00,0x65,0x05,0xbf]
+0x00,0x65,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x05,0xbf]
+0x00,0x66,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x05,0xbf]
+0x00,0x67,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, vcc_lo ; encoding: [0x00,0x6a,0x05,0xbf]
+0x00,0x6a,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, vcc_hi ; encoding: [0x00,0x6b,0x05,0xbf]
+0x00,0x6b,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, tba_lo ; encoding: [0x00,0x6c,0x05,0xbf]
+0x00,0x6c,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, tba_hi ; encoding: [0x00,0x6d,0x05,0xbf]
+0x00,0x6d,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, tma_lo ; encoding: [0x00,0x6e,0x05,0xbf]
+0x00,0x6e,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, tma_hi ; encoding: [0x00,0x6f,0x05,0xbf]
+0x00,0x6f,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, ttmp11 ; encoding: [0x00,0x7b,0x05,0xbf]
+0x00,0x7b,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, m0 ; encoding: [0x00,0x7c,0x05,0xbf]
+0x00,0x7c,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, exec_lo ; encoding: [0x00,0x7e,0x05,0xbf]
+0x00,0x7e,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, exec_hi ; encoding: [0x00,0x7f,0x05,0xbf]
+0x00,0x7f,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, 0 ; encoding: [0x00,0x80,0x05,0xbf]
+0x00,0x80,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, -1 ; encoding: [0x00,0xc1,0x05,0xbf]
+0x00,0xc1,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, 0.5 ; encoding: [0x00,0xf0,0x05,0xbf]
+0x00,0xf0,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, -4.0 ; encoding: [0x00,0xf7,0x05,0xbf]
+0x00,0xf7,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, scc ; encoding: [0x00,0xfd,0x05,0xbf]
+0x00,0xfd,0x05,0xbf
+
+# CHECK: s_cmp_le_i32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x05,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x05,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_le_i32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x05,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x05,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_eq_u32 s0, s0 ; encoding: [0x00,0x00,0x06,0xbf]
+0x00,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s101, s0 ; encoding: [0x65,0x00,0x06,0xbf]
+0x65,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x06,0xbf]
+0x66,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x06,0xbf]
+0x67,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x06,0xbf]
+0x6a,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x06,0xbf]
+0x6b,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 tba_lo, s0 ; encoding: [0x6c,0x00,0x06,0xbf]
+0x6c,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 tba_hi, s0 ; encoding: [0x6d,0x00,0x06,0xbf]
+0x6d,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 tma_lo, s0 ; encoding: [0x6e,0x00,0x06,0xbf]
+0x6e,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 tma_hi, s0 ; encoding: [0x6f,0x00,0x06,0xbf]
+0x6f,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 ttmp11, s0 ; encoding: [0x7b,0x00,0x06,0xbf]
+0x7b,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 m0, s0 ; encoding: [0x7c,0x00,0x06,0xbf]
+0x7c,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 exec_lo, s0 ; encoding: [0x7e,0x00,0x06,0xbf]
+0x7e,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 exec_hi, s0 ; encoding: [0x7f,0x00,0x06,0xbf]
+0x7f,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 0, s0 ; encoding: [0x80,0x00,0x06,0xbf]
+0x80,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 -1, s0 ; encoding: [0xc1,0x00,0x06,0xbf]
+0xc1,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 0.5, s0 ; encoding: [0xf0,0x00,0x06,0xbf]
+0xf0,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 -4.0, s0 ; encoding: [0xf7,0x00,0x06,0xbf]
+0xf7,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 scc, s0 ; encoding: [0xfd,0x00,0x06,0xbf]
+0xfd,0x00,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, s101 ; encoding: [0x00,0x65,0x06,0xbf]
+0x00,0x65,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x06,0xbf]
+0x00,0x66,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x06,0xbf]
+0x00,0x67,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, vcc_lo ; encoding: [0x00,0x6a,0x06,0xbf]
+0x00,0x6a,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, vcc_hi ; encoding: [0x00,0x6b,0x06,0xbf]
+0x00,0x6b,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, tba_lo ; encoding: [0x00,0x6c,0x06,0xbf]
+0x00,0x6c,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, tba_hi ; encoding: [0x00,0x6d,0x06,0xbf]
+0x00,0x6d,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, tma_lo ; encoding: [0x00,0x6e,0x06,0xbf]
+0x00,0x6e,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, tma_hi ; encoding: [0x00,0x6f,0x06,0xbf]
+0x00,0x6f,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, ttmp11 ; encoding: [0x00,0x7b,0x06,0xbf]
+0x00,0x7b,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, m0 ; encoding: [0x00,0x7c,0x06,0xbf]
+0x00,0x7c,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, exec_lo ; encoding: [0x00,0x7e,0x06,0xbf]
+0x00,0x7e,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, exec_hi ; encoding: [0x00,0x7f,0x06,0xbf]
+0x00,0x7f,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, 0 ; encoding: [0x00,0x80,0x06,0xbf]
+0x00,0x80,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, -1 ; encoding: [0x00,0xc1,0x06,0xbf]
+0x00,0xc1,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, 0.5 ; encoding: [0x00,0xf0,0x06,0xbf]
+0x00,0xf0,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, -4.0 ; encoding: [0x00,0xf7,0x06,0xbf]
+0x00,0xf7,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, scc ; encoding: [0x00,0xfd,0x06,0xbf]
+0x00,0xfd,0x06,0xbf
+
+# CHECK: s_cmp_eq_u32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x06,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x06,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_eq_u32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x06,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x06,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_lg_u32 s0, s0 ; encoding: [0x00,0x00,0x07,0xbf]
+0x00,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s101, s0 ; encoding: [0x65,0x00,0x07,0xbf]
+0x65,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x07,0xbf]
+0x66,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x07,0xbf]
+0x67,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x07,0xbf]
+0x6a,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x07,0xbf]
+0x6b,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 tba_lo, s0 ; encoding: [0x6c,0x00,0x07,0xbf]
+0x6c,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 tba_hi, s0 ; encoding: [0x6d,0x00,0x07,0xbf]
+0x6d,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 tma_lo, s0 ; encoding: [0x6e,0x00,0x07,0xbf]
+0x6e,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 tma_hi, s0 ; encoding: [0x6f,0x00,0x07,0xbf]
+0x6f,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 ttmp11, s0 ; encoding: [0x7b,0x00,0x07,0xbf]
+0x7b,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 m0, s0 ; encoding: [0x7c,0x00,0x07,0xbf]
+0x7c,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 exec_lo, s0 ; encoding: [0x7e,0x00,0x07,0xbf]
+0x7e,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 exec_hi, s0 ; encoding: [0x7f,0x00,0x07,0xbf]
+0x7f,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 0, s0 ; encoding: [0x80,0x00,0x07,0xbf]
+0x80,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 -1, s0 ; encoding: [0xc1,0x00,0x07,0xbf]
+0xc1,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 0.5, s0 ; encoding: [0xf0,0x00,0x07,0xbf]
+0xf0,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 -4.0, s0 ; encoding: [0xf7,0x00,0x07,0xbf]
+0xf7,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 scc, s0 ; encoding: [0xfd,0x00,0x07,0xbf]
+0xfd,0x00,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, s101 ; encoding: [0x00,0x65,0x07,0xbf]
+0x00,0x65,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x07,0xbf]
+0x00,0x66,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x07,0xbf]
+0x00,0x67,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, vcc_lo ; encoding: [0x00,0x6a,0x07,0xbf]
+0x00,0x6a,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, vcc_hi ; encoding: [0x00,0x6b,0x07,0xbf]
+0x00,0x6b,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, tba_lo ; encoding: [0x00,0x6c,0x07,0xbf]
+0x00,0x6c,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, tba_hi ; encoding: [0x00,0x6d,0x07,0xbf]
+0x00,0x6d,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, tma_lo ; encoding: [0x00,0x6e,0x07,0xbf]
+0x00,0x6e,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, tma_hi ; encoding: [0x00,0x6f,0x07,0xbf]
+0x00,0x6f,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, ttmp11 ; encoding: [0x00,0x7b,0x07,0xbf]
+0x00,0x7b,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, m0 ; encoding: [0x00,0x7c,0x07,0xbf]
+0x00,0x7c,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, exec_lo ; encoding: [0x00,0x7e,0x07,0xbf]
+0x00,0x7e,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, exec_hi ; encoding: [0x00,0x7f,0x07,0xbf]
+0x00,0x7f,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, 0 ; encoding: [0x00,0x80,0x07,0xbf]
+0x00,0x80,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, -1 ; encoding: [0x00,0xc1,0x07,0xbf]
+0x00,0xc1,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, 0.5 ; encoding: [0x00,0xf0,0x07,0xbf]
+0x00,0xf0,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, -4.0 ; encoding: [0x00,0xf7,0x07,0xbf]
+0x00,0xf7,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, scc ; encoding: [0x00,0xfd,0x07,0xbf]
+0x00,0xfd,0x07,0xbf
+
+# CHECK: s_cmp_lg_u32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x07,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x07,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_lg_u32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x07,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x07,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_gt_u32 s0, s0 ; encoding: [0x00,0x00,0x08,0xbf]
+0x00,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s101, s0 ; encoding: [0x65,0x00,0x08,0xbf]
+0x65,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x08,0xbf]
+0x66,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x08,0xbf]
+0x67,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x08,0xbf]
+0x6a,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x08,0xbf]
+0x6b,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 tba_lo, s0 ; encoding: [0x6c,0x00,0x08,0xbf]
+0x6c,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 tba_hi, s0 ; encoding: [0x6d,0x00,0x08,0xbf]
+0x6d,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 tma_lo, s0 ; encoding: [0x6e,0x00,0x08,0xbf]
+0x6e,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 tma_hi, s0 ; encoding: [0x6f,0x00,0x08,0xbf]
+0x6f,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 ttmp11, s0 ; encoding: [0x7b,0x00,0x08,0xbf]
+0x7b,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 m0, s0 ; encoding: [0x7c,0x00,0x08,0xbf]
+0x7c,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 exec_lo, s0 ; encoding: [0x7e,0x00,0x08,0xbf]
+0x7e,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 exec_hi, s0 ; encoding: [0x7f,0x00,0x08,0xbf]
+0x7f,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 0, s0 ; encoding: [0x80,0x00,0x08,0xbf]
+0x80,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 -1, s0 ; encoding: [0xc1,0x00,0x08,0xbf]
+0xc1,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 0.5, s0 ; encoding: [0xf0,0x00,0x08,0xbf]
+0xf0,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 -4.0, s0 ; encoding: [0xf7,0x00,0x08,0xbf]
+0xf7,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 scc, s0 ; encoding: [0xfd,0x00,0x08,0xbf]
+0xfd,0x00,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, s101 ; encoding: [0x00,0x65,0x08,0xbf]
+0x00,0x65,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x08,0xbf]
+0x00,0x66,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x08,0xbf]
+0x00,0x67,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, vcc_lo ; encoding: [0x00,0x6a,0x08,0xbf]
+0x00,0x6a,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, vcc_hi ; encoding: [0x00,0x6b,0x08,0xbf]
+0x00,0x6b,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, tba_lo ; encoding: [0x00,0x6c,0x08,0xbf]
+0x00,0x6c,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, tba_hi ; encoding: [0x00,0x6d,0x08,0xbf]
+0x00,0x6d,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, tma_lo ; encoding: [0x00,0x6e,0x08,0xbf]
+0x00,0x6e,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, tma_hi ; encoding: [0x00,0x6f,0x08,0xbf]
+0x00,0x6f,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, ttmp11 ; encoding: [0x00,0x7b,0x08,0xbf]
+0x00,0x7b,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, m0 ; encoding: [0x00,0x7c,0x08,0xbf]
+0x00,0x7c,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, exec_lo ; encoding: [0x00,0x7e,0x08,0xbf]
+0x00,0x7e,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, exec_hi ; encoding: [0x00,0x7f,0x08,0xbf]
+0x00,0x7f,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, 0 ; encoding: [0x00,0x80,0x08,0xbf]
+0x00,0x80,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, -1 ; encoding: [0x00,0xc1,0x08,0xbf]
+0x00,0xc1,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, 0.5 ; encoding: [0x00,0xf0,0x08,0xbf]
+0x00,0xf0,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, -4.0 ; encoding: [0x00,0xf7,0x08,0xbf]
+0x00,0xf7,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, scc ; encoding: [0x00,0xfd,0x08,0xbf]
+0x00,0xfd,0x08,0xbf
+
+# CHECK: s_cmp_gt_u32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x08,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x08,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_gt_u32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x08,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x08,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_ge_u32 s0, s0 ; encoding: [0x00,0x00,0x09,0xbf]
+0x00,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s101, s0 ; encoding: [0x65,0x00,0x09,0xbf]
+0x65,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x09,0xbf]
+0x66,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x09,0xbf]
+0x67,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x09,0xbf]
+0x6a,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x09,0xbf]
+0x6b,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 tba_lo, s0 ; encoding: [0x6c,0x00,0x09,0xbf]
+0x6c,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 tba_hi, s0 ; encoding: [0x6d,0x00,0x09,0xbf]
+0x6d,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 tma_lo, s0 ; encoding: [0x6e,0x00,0x09,0xbf]
+0x6e,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 tma_hi, s0 ; encoding: [0x6f,0x00,0x09,0xbf]
+0x6f,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 ttmp11, s0 ; encoding: [0x7b,0x00,0x09,0xbf]
+0x7b,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 m0, s0 ; encoding: [0x7c,0x00,0x09,0xbf]
+0x7c,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 exec_lo, s0 ; encoding: [0x7e,0x00,0x09,0xbf]
+0x7e,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 exec_hi, s0 ; encoding: [0x7f,0x00,0x09,0xbf]
+0x7f,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 0, s0 ; encoding: [0x80,0x00,0x09,0xbf]
+0x80,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 -1, s0 ; encoding: [0xc1,0x00,0x09,0xbf]
+0xc1,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 0.5, s0 ; encoding: [0xf0,0x00,0x09,0xbf]
+0xf0,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 -4.0, s0 ; encoding: [0xf7,0x00,0x09,0xbf]
+0xf7,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 scc, s0 ; encoding: [0xfd,0x00,0x09,0xbf]
+0xfd,0x00,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, s101 ; encoding: [0x00,0x65,0x09,0xbf]
+0x00,0x65,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x09,0xbf]
+0x00,0x66,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x09,0xbf]
+0x00,0x67,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, vcc_lo ; encoding: [0x00,0x6a,0x09,0xbf]
+0x00,0x6a,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, vcc_hi ; encoding: [0x00,0x6b,0x09,0xbf]
+0x00,0x6b,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, tba_lo ; encoding: [0x00,0x6c,0x09,0xbf]
+0x00,0x6c,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, tba_hi ; encoding: [0x00,0x6d,0x09,0xbf]
+0x00,0x6d,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, tma_lo ; encoding: [0x00,0x6e,0x09,0xbf]
+0x00,0x6e,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, tma_hi ; encoding: [0x00,0x6f,0x09,0xbf]
+0x00,0x6f,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, ttmp11 ; encoding: [0x00,0x7b,0x09,0xbf]
+0x00,0x7b,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, m0 ; encoding: [0x00,0x7c,0x09,0xbf]
+0x00,0x7c,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, exec_lo ; encoding: [0x00,0x7e,0x09,0xbf]
+0x00,0x7e,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, exec_hi ; encoding: [0x00,0x7f,0x09,0xbf]
+0x00,0x7f,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, 0 ; encoding: [0x00,0x80,0x09,0xbf]
+0x00,0x80,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, -1 ; encoding: [0x00,0xc1,0x09,0xbf]
+0x00,0xc1,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, 0.5 ; encoding: [0x00,0xf0,0x09,0xbf]
+0x00,0xf0,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, -4.0 ; encoding: [0x00,0xf7,0x09,0xbf]
+0x00,0xf7,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, scc ; encoding: [0x00,0xfd,0x09,0xbf]
+0x00,0xfd,0x09,0xbf
+
+# CHECK: s_cmp_ge_u32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x09,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x09,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_ge_u32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x09,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x09,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_lt_u32 s0, s0 ; encoding: [0x00,0x00,0x0a,0xbf]
+0x00,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s101, s0 ; encoding: [0x65,0x00,0x0a,0xbf]
+0x65,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x0a,0xbf]
+0x66,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x0a,0xbf]
+0x67,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x0a,0xbf]
+0x6a,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x0a,0xbf]
+0x6b,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 tba_lo, s0 ; encoding: [0x6c,0x00,0x0a,0xbf]
+0x6c,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 tba_hi, s0 ; encoding: [0x6d,0x00,0x0a,0xbf]
+0x6d,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 tma_lo, s0 ; encoding: [0x6e,0x00,0x0a,0xbf]
+0x6e,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 tma_hi, s0 ; encoding: [0x6f,0x00,0x0a,0xbf]
+0x6f,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 ttmp11, s0 ; encoding: [0x7b,0x00,0x0a,0xbf]
+0x7b,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 m0, s0 ; encoding: [0x7c,0x00,0x0a,0xbf]
+0x7c,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 exec_lo, s0 ; encoding: [0x7e,0x00,0x0a,0xbf]
+0x7e,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 exec_hi, s0 ; encoding: [0x7f,0x00,0x0a,0xbf]
+0x7f,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 0, s0 ; encoding: [0x80,0x00,0x0a,0xbf]
+0x80,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 -1, s0 ; encoding: [0xc1,0x00,0x0a,0xbf]
+0xc1,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 0.5, s0 ; encoding: [0xf0,0x00,0x0a,0xbf]
+0xf0,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 -4.0, s0 ; encoding: [0xf7,0x00,0x0a,0xbf]
+0xf7,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 scc, s0 ; encoding: [0xfd,0x00,0x0a,0xbf]
+0xfd,0x00,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, s101 ; encoding: [0x00,0x65,0x0a,0xbf]
+0x00,0x65,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x0a,0xbf]
+0x00,0x66,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x0a,0xbf]
+0x00,0x67,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, vcc_lo ; encoding: [0x00,0x6a,0x0a,0xbf]
+0x00,0x6a,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, vcc_hi ; encoding: [0x00,0x6b,0x0a,0xbf]
+0x00,0x6b,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, tba_lo ; encoding: [0x00,0x6c,0x0a,0xbf]
+0x00,0x6c,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, tba_hi ; encoding: [0x00,0x6d,0x0a,0xbf]
+0x00,0x6d,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, tma_lo ; encoding: [0x00,0x6e,0x0a,0xbf]
+0x00,0x6e,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, tma_hi ; encoding: [0x00,0x6f,0x0a,0xbf]
+0x00,0x6f,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, ttmp11 ; encoding: [0x00,0x7b,0x0a,0xbf]
+0x00,0x7b,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, m0 ; encoding: [0x00,0x7c,0x0a,0xbf]
+0x00,0x7c,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, exec_lo ; encoding: [0x00,0x7e,0x0a,0xbf]
+0x00,0x7e,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, exec_hi ; encoding: [0x00,0x7f,0x0a,0xbf]
+0x00,0x7f,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, 0 ; encoding: [0x00,0x80,0x0a,0xbf]
+0x00,0x80,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, -1 ; encoding: [0x00,0xc1,0x0a,0xbf]
+0x00,0xc1,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, 0.5 ; encoding: [0x00,0xf0,0x0a,0xbf]
+0x00,0xf0,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, -4.0 ; encoding: [0x00,0xf7,0x0a,0xbf]
+0x00,0xf7,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, scc ; encoding: [0x00,0xfd,0x0a,0xbf]
+0x00,0xfd,0x0a,0xbf
+
+# CHECK: s_cmp_lt_u32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x0a,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x0a,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_lt_u32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x0a,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x0a,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_le_u32 s0, s0 ; encoding: [0x00,0x00,0x0b,0xbf]
+0x00,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s101, s0 ; encoding: [0x65,0x00,0x0b,0xbf]
+0x65,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x0b,0xbf]
+0x66,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x0b,0xbf]
+0x67,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x0b,0xbf]
+0x6a,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x0b,0xbf]
+0x6b,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 tba_lo, s0 ; encoding: [0x6c,0x00,0x0b,0xbf]
+0x6c,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 tba_hi, s0 ; encoding: [0x6d,0x00,0x0b,0xbf]
+0x6d,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 tma_lo, s0 ; encoding: [0x6e,0x00,0x0b,0xbf]
+0x6e,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 tma_hi, s0 ; encoding: [0x6f,0x00,0x0b,0xbf]
+0x6f,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 ttmp11, s0 ; encoding: [0x7b,0x00,0x0b,0xbf]
+0x7b,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 m0, s0 ; encoding: [0x7c,0x00,0x0b,0xbf]
+0x7c,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 exec_lo, s0 ; encoding: [0x7e,0x00,0x0b,0xbf]
+0x7e,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 exec_hi, s0 ; encoding: [0x7f,0x00,0x0b,0xbf]
+0x7f,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 0, s0 ; encoding: [0x80,0x00,0x0b,0xbf]
+0x80,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 -1, s0 ; encoding: [0xc1,0x00,0x0b,0xbf]
+0xc1,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 0.5, s0 ; encoding: [0xf0,0x00,0x0b,0xbf]
+0xf0,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 -4.0, s0 ; encoding: [0xf7,0x00,0x0b,0xbf]
+0xf7,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 scc, s0 ; encoding: [0xfd,0x00,0x0b,0xbf]
+0xfd,0x00,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, s101 ; encoding: [0x00,0x65,0x0b,0xbf]
+0x00,0x65,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x0b,0xbf]
+0x00,0x66,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x0b,0xbf]
+0x00,0x67,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, vcc_lo ; encoding: [0x00,0x6a,0x0b,0xbf]
+0x00,0x6a,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, vcc_hi ; encoding: [0x00,0x6b,0x0b,0xbf]
+0x00,0x6b,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, tba_lo ; encoding: [0x00,0x6c,0x0b,0xbf]
+0x00,0x6c,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, tba_hi ; encoding: [0x00,0x6d,0x0b,0xbf]
+0x00,0x6d,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, tma_lo ; encoding: [0x00,0x6e,0x0b,0xbf]
+0x00,0x6e,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, tma_hi ; encoding: [0x00,0x6f,0x0b,0xbf]
+0x00,0x6f,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, ttmp11 ; encoding: [0x00,0x7b,0x0b,0xbf]
+0x00,0x7b,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, m0 ; encoding: [0x00,0x7c,0x0b,0xbf]
+0x00,0x7c,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, exec_lo ; encoding: [0x00,0x7e,0x0b,0xbf]
+0x00,0x7e,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, exec_hi ; encoding: [0x00,0x7f,0x0b,0xbf]
+0x00,0x7f,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, 0 ; encoding: [0x00,0x80,0x0b,0xbf]
+0x00,0x80,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, -1 ; encoding: [0x00,0xc1,0x0b,0xbf]
+0x00,0xc1,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, 0.5 ; encoding: [0x00,0xf0,0x0b,0xbf]
+0x00,0xf0,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, -4.0 ; encoding: [0x00,0xf7,0x0b,0xbf]
+0x00,0xf7,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, scc ; encoding: [0x00,0xfd,0x0b,0xbf]
+0x00,0xfd,0x0b,0xbf
+
+# CHECK: s_cmp_le_u32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x0b,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x0b,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_le_u32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x0b,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x0b,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitcmp0_b32 s0, s0 ; encoding: [0x00,0x00,0x0c,0xbf]
+0x00,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s101, s0 ; encoding: [0x65,0x00,0x0c,0xbf]
+0x65,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x0c,0xbf]
+0x66,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x0c,0xbf]
+0x67,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x0c,0xbf]
+0x6a,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x0c,0xbf]
+0x6b,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 tba_lo, s0 ; encoding: [0x6c,0x00,0x0c,0xbf]
+0x6c,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 tba_hi, s0 ; encoding: [0x6d,0x00,0x0c,0xbf]
+0x6d,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 tma_lo, s0 ; encoding: [0x6e,0x00,0x0c,0xbf]
+0x6e,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 tma_hi, s0 ; encoding: [0x6f,0x00,0x0c,0xbf]
+0x6f,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 ttmp11, s0 ; encoding: [0x7b,0x00,0x0c,0xbf]
+0x7b,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 m0, s0 ; encoding: [0x7c,0x00,0x0c,0xbf]
+0x7c,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 exec_lo, s0 ; encoding: [0x7e,0x00,0x0c,0xbf]
+0x7e,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 exec_hi, s0 ; encoding: [0x7f,0x00,0x0c,0xbf]
+0x7f,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 0, s0 ; encoding: [0x80,0x00,0x0c,0xbf]
+0x80,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 -1, s0 ; encoding: [0xc1,0x00,0x0c,0xbf]
+0xc1,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 0.5, s0 ; encoding: [0xf0,0x00,0x0c,0xbf]
+0xf0,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 -4.0, s0 ; encoding: [0xf7,0x00,0x0c,0xbf]
+0xf7,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 scc, s0 ; encoding: [0xfd,0x00,0x0c,0xbf]
+0xfd,0x00,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, s101 ; encoding: [0x00,0x65,0x0c,0xbf]
+0x00,0x65,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x0c,0xbf]
+0x00,0x66,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x0c,0xbf]
+0x00,0x67,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, vcc_lo ; encoding: [0x00,0x6a,0x0c,0xbf]
+0x00,0x6a,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, vcc_hi ; encoding: [0x00,0x6b,0x0c,0xbf]
+0x00,0x6b,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, tba_lo ; encoding: [0x00,0x6c,0x0c,0xbf]
+0x00,0x6c,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, tba_hi ; encoding: [0x00,0x6d,0x0c,0xbf]
+0x00,0x6d,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, tma_lo ; encoding: [0x00,0x6e,0x0c,0xbf]
+0x00,0x6e,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, tma_hi ; encoding: [0x00,0x6f,0x0c,0xbf]
+0x00,0x6f,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, ttmp11 ; encoding: [0x00,0x7b,0x0c,0xbf]
+0x00,0x7b,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, m0 ; encoding: [0x00,0x7c,0x0c,0xbf]
+0x00,0x7c,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, exec_lo ; encoding: [0x00,0x7e,0x0c,0xbf]
+0x00,0x7e,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, exec_hi ; encoding: [0x00,0x7f,0x0c,0xbf]
+0x00,0x7f,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, 0 ; encoding: [0x00,0x80,0x0c,0xbf]
+0x00,0x80,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, -1 ; encoding: [0x00,0xc1,0x0c,0xbf]
+0x00,0xc1,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, 0.5 ; encoding: [0x00,0xf0,0x0c,0xbf]
+0x00,0xf0,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, -4.0 ; encoding: [0x00,0xf7,0x0c,0xbf]
+0x00,0xf7,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, scc ; encoding: [0x00,0xfd,0x0c,0xbf]
+0x00,0xfd,0x0c,0xbf
+
+# CHECK: s_bitcmp0_b32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x0c,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x0c,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitcmp0_b32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x0c,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x0c,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitcmp1_b32 s0, s0 ; encoding: [0x00,0x00,0x0d,0xbf]
+0x00,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s101, s0 ; encoding: [0x65,0x00,0x0d,0xbf]
+0x65,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x0d,0xbf]
+0x66,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x0d,0xbf]
+0x67,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 vcc_lo, s0 ; encoding: [0x6a,0x00,0x0d,0xbf]
+0x6a,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 vcc_hi, s0 ; encoding: [0x6b,0x00,0x0d,0xbf]
+0x6b,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 tba_lo, s0 ; encoding: [0x6c,0x00,0x0d,0xbf]
+0x6c,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 tba_hi, s0 ; encoding: [0x6d,0x00,0x0d,0xbf]
+0x6d,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 tma_lo, s0 ; encoding: [0x6e,0x00,0x0d,0xbf]
+0x6e,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 tma_hi, s0 ; encoding: [0x6f,0x00,0x0d,0xbf]
+0x6f,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 ttmp11, s0 ; encoding: [0x7b,0x00,0x0d,0xbf]
+0x7b,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 m0, s0 ; encoding: [0x7c,0x00,0x0d,0xbf]
+0x7c,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 exec_lo, s0 ; encoding: [0x7e,0x00,0x0d,0xbf]
+0x7e,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 exec_hi, s0 ; encoding: [0x7f,0x00,0x0d,0xbf]
+0x7f,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 0, s0 ; encoding: [0x80,0x00,0x0d,0xbf]
+0x80,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 -1, s0 ; encoding: [0xc1,0x00,0x0d,0xbf]
+0xc1,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 0.5, s0 ; encoding: [0xf0,0x00,0x0d,0xbf]
+0xf0,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 -4.0, s0 ; encoding: [0xf7,0x00,0x0d,0xbf]
+0xf7,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 scc, s0 ; encoding: [0xfd,0x00,0x0d,0xbf]
+0xfd,0x00,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, s101 ; encoding: [0x00,0x65,0x0d,0xbf]
+0x00,0x65,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, flat_scratch_lo ; encoding: [0x00,0x66,0x0d,0xbf]
+0x00,0x66,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, flat_scratch_hi ; encoding: [0x00,0x67,0x0d,0xbf]
+0x00,0x67,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, vcc_lo ; encoding: [0x00,0x6a,0x0d,0xbf]
+0x00,0x6a,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, vcc_hi ; encoding: [0x00,0x6b,0x0d,0xbf]
+0x00,0x6b,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, tba_lo ; encoding: [0x00,0x6c,0x0d,0xbf]
+0x00,0x6c,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, tba_hi ; encoding: [0x00,0x6d,0x0d,0xbf]
+0x00,0x6d,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, tma_lo ; encoding: [0x00,0x6e,0x0d,0xbf]
+0x00,0x6e,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, tma_hi ; encoding: [0x00,0x6f,0x0d,0xbf]
+0x00,0x6f,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, ttmp11 ; encoding: [0x00,0x7b,0x0d,0xbf]
+0x00,0x7b,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, m0 ; encoding: [0x00,0x7c,0x0d,0xbf]
+0x00,0x7c,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, exec_lo ; encoding: [0x00,0x7e,0x0d,0xbf]
+0x00,0x7e,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, exec_hi ; encoding: [0x00,0x7f,0x0d,0xbf]
+0x00,0x7f,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, 0 ; encoding: [0x00,0x80,0x0d,0xbf]
+0x00,0x80,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, -1 ; encoding: [0x00,0xc1,0x0d,0xbf]
+0x00,0xc1,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, 0.5 ; encoding: [0x00,0xf0,0x0d,0xbf]
+0x00,0xf0,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, -4.0 ; encoding: [0x00,0xf7,0x0d,0xbf]
+0x00,0xf7,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, scc ; encoding: [0x00,0xfd,0x0d,0xbf]
+0x00,0xfd,0x0d,0xbf
+
+# CHECK: s_bitcmp1_b32 s0, 0xaf123456 ; encoding: [0x00,0xff,0x0d,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x0d,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitcmp1_b32 s0, 0x3f717273 ; encoding: [0x00,0xff,0x0d,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x0d,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitcmp0_b64 s[0:1], s0 ; encoding: [0x00,0x00,0x0e,0xbf]
+0x00,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[2:3], s0 ; encoding: [0x02,0x00,0x0e,0xbf]
+0x02,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[100:101], s0 ; encoding: [0x64,0x00,0x0e,0xbf]
+0x64,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 flat_scratch, s0 ; encoding: [0x66,0x00,0x0e,0xbf]
+0x66,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 vcc, s0 ; encoding: [0x6a,0x00,0x0e,0xbf]
+0x6a,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 tba, s0 ; encoding: [0x6c,0x00,0x0e,0xbf]
+0x6c,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 tma, s0 ; encoding: [0x6e,0x00,0x0e,0xbf]
+0x6e,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x0e,0xbf]
+0x7a,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 exec, s0 ; encoding: [0x7e,0x00,0x0e,0xbf]
+0x7e,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 0, s0 ; encoding: [0x80,0x00,0x0e,0xbf]
+0x80,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 -1, s0 ; encoding: [0xc1,0x00,0x0e,0xbf]
+0xc1,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 0.5, s0 ; encoding: [0xf0,0x00,0x0e,0xbf]
+0xf0,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 -4.0, s0 ; encoding: [0xf7,0x00,0x0e,0xbf]
+0xf7,0x00,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 0xaf123456, s0 ; encoding: [0xff,0x00,0x0e,0xbf,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x0e,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitcmp0_b64 0x3f717273, s0 ; encoding: [0xff,0x00,0x0e,0xbf,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x0e,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitcmp0_b64 s[0:1], s101 ; encoding: [0x00,0x65,0x0e,0xbf]
+0x00,0x65,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x0e,0xbf]
+0x00,0x66,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x0e,0xbf]
+0x00,0x67,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x0e,0xbf]
+0x00,0x6a,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x0e,0xbf]
+0x00,0x6b,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], tba_lo ; encoding: [0x00,0x6c,0x0e,0xbf]
+0x00,0x6c,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], tba_hi ; encoding: [0x00,0x6d,0x0e,0xbf]
+0x00,0x6d,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], tma_lo ; encoding: [0x00,0x6e,0x0e,0xbf]
+0x00,0x6e,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], tma_hi ; encoding: [0x00,0x6f,0x0e,0xbf]
+0x00,0x6f,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x0e,0xbf]
+0x00,0x7b,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], m0 ; encoding: [0x00,0x7c,0x0e,0xbf]
+0x00,0x7c,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], exec_lo ; encoding: [0x00,0x7e,0x0e,0xbf]
+0x00,0x7e,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], exec_hi ; encoding: [0x00,0x7f,0x0e,0xbf]
+0x00,0x7f,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], 0 ; encoding: [0x00,0x80,0x0e,0xbf]
+0x00,0x80,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], -1 ; encoding: [0x00,0xc1,0x0e,0xbf]
+0x00,0xc1,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], 0.5 ; encoding: [0x00,0xf0,0x0e,0xbf]
+0x00,0xf0,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], -4.0 ; encoding: [0x00,0xf7,0x0e,0xbf]
+0x00,0xf7,0x0e,0xbf
+
+# CHECK: s_bitcmp0_b64 s[0:1], scc ; encoding: [0x00,0xfd,0x0e,0xbf]
+0x00,0xfd,0x0e,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], s0 ; encoding: [0x00,0x00,0x0f,0xbf]
+0x00,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[2:3], s0 ; encoding: [0x02,0x00,0x0f,0xbf]
+0x02,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[100:101], s0 ; encoding: [0x64,0x00,0x0f,0xbf]
+0x64,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 flat_scratch, s0 ; encoding: [0x66,0x00,0x0f,0xbf]
+0x66,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 vcc, s0 ; encoding: [0x6a,0x00,0x0f,0xbf]
+0x6a,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 tba, s0 ; encoding: [0x6c,0x00,0x0f,0xbf]
+0x6c,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 tma, s0 ; encoding: [0x6e,0x00,0x0f,0xbf]
+0x6e,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 ttmp[10:11], s0 ; encoding: [0x7a,0x00,0x0f,0xbf]
+0x7a,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 exec, s0 ; encoding: [0x7e,0x00,0x0f,0xbf]
+0x7e,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 0, s0 ; encoding: [0x80,0x00,0x0f,0xbf]
+0x80,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 -1, s0 ; encoding: [0xc1,0x00,0x0f,0xbf]
+0xc1,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 0.5, s0 ; encoding: [0xf0,0x00,0x0f,0xbf]
+0xf0,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 -4.0, s0 ; encoding: [0xf7,0x00,0x0f,0xbf]
+0xf7,0x00,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 0xaf123456, s0 ; encoding: [0xff,0x00,0x0f,0xbf,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x0f,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_bitcmp1_b64 0x3f717273, s0 ; encoding: [0xff,0x00,0x0f,0xbf,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x0f,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_bitcmp1_b64 s[0:1], s101 ; encoding: [0x00,0x65,0x0f,0xbf]
+0x00,0x65,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], flat_scratch_lo ; encoding: [0x00,0x66,0x0f,0xbf]
+0x00,0x66,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], flat_scratch_hi ; encoding: [0x00,0x67,0x0f,0xbf]
+0x00,0x67,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], vcc_lo ; encoding: [0x00,0x6a,0x0f,0xbf]
+0x00,0x6a,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], vcc_hi ; encoding: [0x00,0x6b,0x0f,0xbf]
+0x00,0x6b,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], tba_lo ; encoding: [0x00,0x6c,0x0f,0xbf]
+0x00,0x6c,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], tba_hi ; encoding: [0x00,0x6d,0x0f,0xbf]
+0x00,0x6d,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], tma_lo ; encoding: [0x00,0x6e,0x0f,0xbf]
+0x00,0x6e,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], tma_hi ; encoding: [0x00,0x6f,0x0f,0xbf]
+0x00,0x6f,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], ttmp11 ; encoding: [0x00,0x7b,0x0f,0xbf]
+0x00,0x7b,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], m0 ; encoding: [0x00,0x7c,0x0f,0xbf]
+0x00,0x7c,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], exec_lo ; encoding: [0x00,0x7e,0x0f,0xbf]
+0x00,0x7e,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], exec_hi ; encoding: [0x00,0x7f,0x0f,0xbf]
+0x00,0x7f,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], 0 ; encoding: [0x00,0x80,0x0f,0xbf]
+0x00,0x80,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], -1 ; encoding: [0x00,0xc1,0x0f,0xbf]
+0x00,0xc1,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], 0.5 ; encoding: [0x00,0xf0,0x0f,0xbf]
+0x00,0xf0,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], -4.0 ; encoding: [0x00,0xf7,0x0f,0xbf]
+0x00,0xf7,0x0f,0xbf
+
+# CHECK: s_bitcmp1_b64 s[0:1], scc ; encoding: [0x00,0xfd,0x0f,0xbf]
+0x00,0xfd,0x0f,0xbf
+
+# CHECK: s_setvskip s0, s0 ; encoding: [0x00,0x00,0x10,0xbf]
+0x00,0x00,0x10,0xbf
+
+# CHECK: s_setvskip s101, s0 ; encoding: [0x65,0x00,0x10,0xbf]
+0x65,0x00,0x10,0xbf
+
+# CHECK: s_setvskip flat_scratch_lo, s0 ; encoding: [0x66,0x00,0x10,0xbf]
+0x66,0x00,0x10,0xbf
+
+# CHECK: s_setvskip flat_scratch_hi, s0 ; encoding: [0x67,0x00,0x10,0xbf]
+0x67,0x00,0x10,0xbf
+
+# CHECK: s_setvskip vcc_lo, s0 ; encoding: [0x6a,0x00,0x10,0xbf]
+0x6a,0x00,0x10,0xbf
+
+# CHECK: s_setvskip vcc_hi, s0 ; encoding: [0x6b,0x00,0x10,0xbf]
+0x6b,0x00,0x10,0xbf
+
+# CHECK: s_setvskip tba_lo, s0 ; encoding: [0x6c,0x00,0x10,0xbf]
+0x6c,0x00,0x10,0xbf
+
+# CHECK: s_setvskip tba_hi, s0 ; encoding: [0x6d,0x00,0x10,0xbf]
+0x6d,0x00,0x10,0xbf
+
+# CHECK: s_setvskip tma_lo, s0 ; encoding: [0x6e,0x00,0x10,0xbf]
+0x6e,0x00,0x10,0xbf
+
+# CHECK: s_setvskip tma_hi, s0 ; encoding: [0x6f,0x00,0x10,0xbf]
+0x6f,0x00,0x10,0xbf
+
+# CHECK: s_setvskip ttmp11, s0 ; encoding: [0x7b,0x00,0x10,0xbf]
+0x7b,0x00,0x10,0xbf
+
+# CHECK: s_setvskip m0, s0 ; encoding: [0x7c,0x00,0x10,0xbf]
+0x7c,0x00,0x10,0xbf
+
+# CHECK: s_setvskip exec_lo, s0 ; encoding: [0x7e,0x00,0x10,0xbf]
+0x7e,0x00,0x10,0xbf
+
+# CHECK: s_setvskip exec_hi, s0 ; encoding: [0x7f,0x00,0x10,0xbf]
+0x7f,0x00,0x10,0xbf
+
+# CHECK: s_setvskip 0, s0 ; encoding: [0x80,0x00,0x10,0xbf]
+0x80,0x00,0x10,0xbf
+
+# CHECK: s_setvskip -1, s0 ; encoding: [0xc1,0x00,0x10,0xbf]
+0xc1,0x00,0x10,0xbf
+
+# CHECK: s_setvskip 0.5, s0 ; encoding: [0xf0,0x00,0x10,0xbf]
+0xf0,0x00,0x10,0xbf
+
+# CHECK: s_setvskip -4.0, s0 ; encoding: [0xf7,0x00,0x10,0xbf]
+0xf7,0x00,0x10,0xbf
+
+# CHECK: s_setvskip scc, s0 ; encoding: [0xfd,0x00,0x10,0xbf]
+0xfd,0x00,0x10,0xbf
+
+# CHECK: s_setvskip s0, s101 ; encoding: [0x00,0x65,0x10,0xbf]
+0x00,0x65,0x10,0xbf
+
+# CHECK: s_setvskip s0, flat_scratch_lo ; encoding: [0x00,0x66,0x10,0xbf]
+0x00,0x66,0x10,0xbf
+
+# CHECK: s_setvskip s0, flat_scratch_hi ; encoding: [0x00,0x67,0x10,0xbf]
+0x00,0x67,0x10,0xbf
+
+# CHECK: s_setvskip s0, vcc_lo ; encoding: [0x00,0x6a,0x10,0xbf]
+0x00,0x6a,0x10,0xbf
+
+# CHECK: s_setvskip s0, vcc_hi ; encoding: [0x00,0x6b,0x10,0xbf]
+0x00,0x6b,0x10,0xbf
+
+# CHECK: s_setvskip s0, tba_lo ; encoding: [0x00,0x6c,0x10,0xbf]
+0x00,0x6c,0x10,0xbf
+
+# CHECK: s_setvskip s0, tba_hi ; encoding: [0x00,0x6d,0x10,0xbf]
+0x00,0x6d,0x10,0xbf
+
+# CHECK: s_setvskip s0, tma_lo ; encoding: [0x00,0x6e,0x10,0xbf]
+0x00,0x6e,0x10,0xbf
+
+# CHECK: s_setvskip s0, tma_hi ; encoding: [0x00,0x6f,0x10,0xbf]
+0x00,0x6f,0x10,0xbf
+
+# CHECK: s_setvskip s0, ttmp11 ; encoding: [0x00,0x7b,0x10,0xbf]
+0x00,0x7b,0x10,0xbf
+
+# CHECK: s_setvskip s0, m0 ; encoding: [0x00,0x7c,0x10,0xbf]
+0x00,0x7c,0x10,0xbf
+
+# CHECK: s_setvskip s0, exec_lo ; encoding: [0x00,0x7e,0x10,0xbf]
+0x00,0x7e,0x10,0xbf
+
+# CHECK: s_setvskip s0, exec_hi ; encoding: [0x00,0x7f,0x10,0xbf]
+0x00,0x7f,0x10,0xbf
+
+# CHECK: s_setvskip s0, 0 ; encoding: [0x00,0x80,0x10,0xbf]
+0x00,0x80,0x10,0xbf
+
+# CHECK: s_setvskip s0, -1 ; encoding: [0x00,0xc1,0x10,0xbf]
+0x00,0xc1,0x10,0xbf
+
+# CHECK: s_setvskip s0, 0.5 ; encoding: [0x00,0xf0,0x10,0xbf]
+0x00,0xf0,0x10,0xbf
+
+# CHECK: s_setvskip s0, -4.0 ; encoding: [0x00,0xf7,0x10,0xbf]
+0x00,0xf7,0x10,0xbf
+
+# CHECK: s_setvskip s0, scc ; encoding: [0x00,0xfd,0x10,0xbf]
+0x00,0xfd,0x10,0xbf
+
+# CHECK: s_setvskip s0, 0xaf123456 ; encoding: [0x00,0xff,0x10,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x10,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_setvskip s0, 0x3f717273 ; encoding: [0x00,0xff,0x10,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x10,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_eq_u64 s[0:1], s[0:1] ; encoding: [0x00,0x00,0x12,0xbf]
+0x00,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[2:3], s[0:1] ; encoding: [0x02,0x00,0x12,0xbf]
+0x02,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[100:101], s[0:1] ; encoding: [0x64,0x00,0x12,0xbf]
+0x64,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x12,0xbf]
+0x66,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 vcc, s[0:1] ; encoding: [0x6a,0x00,0x12,0xbf]
+0x6a,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 tba, s[0:1] ; encoding: [0x6c,0x00,0x12,0xbf]
+0x6c,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 tma, s[0:1] ; encoding: [0x6e,0x00,0x12,0xbf]
+0x6e,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x12,0xbf]
+0x7a,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 exec, s[0:1] ; encoding: [0x7e,0x00,0x12,0xbf]
+0x7e,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 0, s[0:1] ; encoding: [0x80,0x00,0x12,0xbf]
+0x80,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 -1, s[0:1] ; encoding: [0xc1,0x00,0x12,0xbf]
+0xc1,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 0.5, s[0:1] ; encoding: [0xf0,0x00,0x12,0xbf]
+0xf0,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 -4.0, s[0:1] ; encoding: [0xf7,0x00,0x12,0xbf]
+0xf7,0x00,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], s[2:3] ; encoding: [0x00,0x02,0x12,0xbf]
+0x00,0x02,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], s[100:101] ; encoding: [0x00,0x64,0x12,0xbf]
+0x00,0x64,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], flat_scratch ; encoding: [0x00,0x66,0x12,0xbf]
+0x00,0x66,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], vcc ; encoding: [0x00,0x6a,0x12,0xbf]
+0x00,0x6a,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], tba ; encoding: [0x00,0x6c,0x12,0xbf]
+0x00,0x6c,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], tma ; encoding: [0x00,0x6e,0x12,0xbf]
+0x00,0x6e,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x12,0xbf]
+0x00,0x7a,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], exec ; encoding: [0x00,0x7e,0x12,0xbf]
+0x00,0x7e,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], 0 ; encoding: [0x00,0x80,0x12,0xbf]
+0x00,0x80,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], -1 ; encoding: [0x00,0xc1,0x12,0xbf]
+0x00,0xc1,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], 0.5 ; encoding: [0x00,0xf0,0x12,0xbf]
+0x00,0xf0,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], -4.0 ; encoding: [0x00,0xf7,0x12,0xbf]
+0x00,0xf7,0x12,0xbf
+
+# CHECK: s_cmp_eq_u64 s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x12,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x12,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_eq_u64 s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x12,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x12,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_cmp_lg_u64 s[0:1], s[0:1] ; encoding: [0x00,0x00,0x13,0xbf]
+0x00,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[2:3], s[0:1] ; encoding: [0x02,0x00,0x13,0xbf]
+0x02,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[100:101], s[0:1] ; encoding: [0x64,0x00,0x13,0xbf]
+0x64,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 flat_scratch, s[0:1] ; encoding: [0x66,0x00,0x13,0xbf]
+0x66,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 vcc, s[0:1] ; encoding: [0x6a,0x00,0x13,0xbf]
+0x6a,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 tba, s[0:1] ; encoding: [0x6c,0x00,0x13,0xbf]
+0x6c,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 tma, s[0:1] ; encoding: [0x6e,0x00,0x13,0xbf]
+0x6e,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 ttmp[10:11], s[0:1] ; encoding: [0x7a,0x00,0x13,0xbf]
+0x7a,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 exec, s[0:1] ; encoding: [0x7e,0x00,0x13,0xbf]
+0x7e,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 0, s[0:1] ; encoding: [0x80,0x00,0x13,0xbf]
+0x80,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 -1, s[0:1] ; encoding: [0xc1,0x00,0x13,0xbf]
+0xc1,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 0.5, s[0:1] ; encoding: [0xf0,0x00,0x13,0xbf]
+0xf0,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 -4.0, s[0:1] ; encoding: [0xf7,0x00,0x13,0xbf]
+0xf7,0x00,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], s[2:3] ; encoding: [0x00,0x02,0x13,0xbf]
+0x00,0x02,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], s[100:101] ; encoding: [0x00,0x64,0x13,0xbf]
+0x00,0x64,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], flat_scratch ; encoding: [0x00,0x66,0x13,0xbf]
+0x00,0x66,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], vcc ; encoding: [0x00,0x6a,0x13,0xbf]
+0x00,0x6a,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], tba ; encoding: [0x00,0x6c,0x13,0xbf]
+0x00,0x6c,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], tma ; encoding: [0x00,0x6e,0x13,0xbf]
+0x00,0x6e,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], ttmp[10:11] ; encoding: [0x00,0x7a,0x13,0xbf]
+0x00,0x7a,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], exec ; encoding: [0x00,0x7e,0x13,0xbf]
+0x00,0x7e,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], 0 ; encoding: [0x00,0x80,0x13,0xbf]
+0x00,0x80,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], -1 ; encoding: [0x00,0xc1,0x13,0xbf]
+0x00,0xc1,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], 0.5 ; encoding: [0x00,0xf0,0x13,0xbf]
+0x00,0xf0,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], -4.0 ; encoding: [0x00,0xf7,0x13,0xbf]
+0x00,0xf7,0x13,0xbf
+
+# CHECK: s_cmp_lg_u64 s[0:1], 0xaf123456 ; encoding: [0x00,0xff,0x13,0xbf,0x56,0x34,0x12,0xaf]
+0x00,0xff,0x13,0xbf,0x56,0x34,0x12,0xaf
+
+# CHECK: s_cmp_lg_u64 s[0:1], 0x3f717273 ; encoding: [0x00,0xff,0x13,0xbf,0x73,0x72,0x71,0x3f]
+0x00,0xff,0x13,0xbf,0x73,0x72,0x71,0x3f
+
+# CHECK: s_movk_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb0]
+0x41,0x31,0x00,0xb0
+
+# CHECK: s_movk_i32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb0]
+0x41,0x31,0x65,0xb0
+
+# CHECK: s_movk_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb0]
+0x41,0x31,0x66,0xb0
+
+# CHECK: s_movk_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb0]
+0x41,0x31,0x67,0xb0
+
+# CHECK: s_movk_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb0]
+0x41,0x31,0x6a,0xb0
+
+# CHECK: s_movk_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb0]
+0x41,0x31,0x6b,0xb0
+
+# CHECK: s_movk_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb0]
+0x41,0x31,0x6c,0xb0
+
+# CHECK: s_movk_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb0]
+0x41,0x31,0x6d,0xb0
+
+# CHECK: s_movk_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb0]
+0x41,0x31,0x6e,0xb0
+
+# CHECK: s_movk_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb0]
+0x41,0x31,0x6f,0xb0
+
+# CHECK: s_movk_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb0]
+0x41,0x31,0x7b,0xb0
+
+# CHECK: s_movk_i32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb0]
+0x41,0x31,0x7c,0xb0
+
+# CHECK: s_movk_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb0]
+0x41,0x31,0x7e,0xb0
+
+# CHECK: s_movk_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb0]
+0x41,0x31,0x7f,0xb0
+
+# CHECK: s_movk_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb0]
+0xd1,0xc1,0x00,0xb0
+
+# CHECK: s_cmovk_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb0]
+0x41,0x31,0x80,0xb0
+
+# CHECK: s_cmovk_i32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb0]
+0x41,0x31,0xe5,0xb0
+
+# CHECK: s_cmovk_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb0]
+0x41,0x31,0xe6,0xb0
+
+# CHECK: s_cmovk_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb0]
+0x41,0x31,0xe7,0xb0
+
+# CHECK: s_cmovk_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb0]
+0x41,0x31,0xea,0xb0
+
+# CHECK: s_cmovk_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb0]
+0x41,0x31,0xeb,0xb0
+
+# CHECK: s_cmovk_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb0]
+0x41,0x31,0xec,0xb0
+
+# CHECK: s_cmovk_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb0]
+0x41,0x31,0xed,0xb0
+
+# CHECK: s_cmovk_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb0]
+0x41,0x31,0xee,0xb0
+
+# CHECK: s_cmovk_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb0]
+0x41,0x31,0xef,0xb0
+
+# CHECK: s_cmovk_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb0]
+0x41,0x31,0xfb,0xb0
+
+# CHECK: s_cmovk_i32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb0]
+0x41,0x31,0xfc,0xb0
+
+# CHECK: s_cmovk_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb0]
+0x41,0x31,0xfe,0xb0
+
+# CHECK: s_cmovk_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb0]
+0x41,0x31,0xff,0xb0
+
+# CHECK: s_cmovk_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb0]
+0xd1,0xc1,0x80,0xb0
+
+# CHECK: s_cmpk_eq_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb1]
+0x41,0x31,0x00,0xb1
+
+# CHECK: s_cmpk_eq_i32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb1]
+0x41,0x31,0x65,0xb1
+
+# CHECK: s_cmpk_eq_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb1]
+0x41,0x31,0x66,0xb1
+
+# CHECK: s_cmpk_eq_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb1]
+0x41,0x31,0x67,0xb1
+
+# CHECK: s_cmpk_eq_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb1]
+0x41,0x31,0x6a,0xb1
+
+# CHECK: s_cmpk_eq_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb1]
+0x41,0x31,0x6b,0xb1
+
+# CHECK: s_cmpk_eq_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb1]
+0x41,0x31,0x6c,0xb1
+
+# CHECK: s_cmpk_eq_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb1]
+0x41,0x31,0x6d,0xb1
+
+# CHECK: s_cmpk_eq_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb1]
+0x41,0x31,0x6e,0xb1
+
+# CHECK: s_cmpk_eq_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb1]
+0x41,0x31,0x6f,0xb1
+
+# CHECK: s_cmpk_eq_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb1]
+0x41,0x31,0x7b,0xb1
+
+# CHECK: s_cmpk_eq_i32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb1]
+0x41,0x31,0x7c,0xb1
+
+# CHECK: s_cmpk_eq_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb1]
+0x41,0x31,0x7e,0xb1
+
+# CHECK: s_cmpk_eq_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb1]
+0x41,0x31,0x7f,0xb1
+
+# CHECK: s_cmpk_eq_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb1]
+0xd1,0xc1,0x00,0xb1
+
+# CHECK: s_cmpk_lg_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb1]
+0x41,0x31,0x80,0xb1
+
+# CHECK: s_cmpk_lg_i32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb1]
+0x41,0x31,0xe5,0xb1
+
+# CHECK: s_cmpk_lg_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb1]
+0x41,0x31,0xe6,0xb1
+
+# CHECK: s_cmpk_lg_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb1]
+0x41,0x31,0xe7,0xb1
+
+# CHECK: s_cmpk_lg_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb1]
+0x41,0x31,0xea,0xb1
+
+# CHECK: s_cmpk_lg_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb1]
+0x41,0x31,0xeb,0xb1
+
+# CHECK: s_cmpk_lg_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb1]
+0x41,0x31,0xec,0xb1
+
+# CHECK: s_cmpk_lg_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb1]
+0x41,0x31,0xed,0xb1
+
+# CHECK: s_cmpk_lg_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb1]
+0x41,0x31,0xee,0xb1
+
+# CHECK: s_cmpk_lg_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb1]
+0x41,0x31,0xef,0xb1
+
+# CHECK: s_cmpk_lg_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb1]
+0x41,0x31,0xfb,0xb1
+
+# CHECK: s_cmpk_lg_i32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb1]
+0x41,0x31,0xfc,0xb1
+
+# CHECK: s_cmpk_lg_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb1]
+0x41,0x31,0xfe,0xb1
+
+# CHECK: s_cmpk_lg_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb1]
+0x41,0x31,0xff,0xb1
+
+# CHECK: s_cmpk_lg_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb1]
+0xd1,0xc1,0x80,0xb1
+
+# CHECK: s_cmpk_gt_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb2]
+0x41,0x31,0x00,0xb2
+
+# CHECK: s_cmpk_gt_i32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb2]
+0x41,0x31,0x65,0xb2
+
+# CHECK: s_cmpk_gt_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb2]
+0x41,0x31,0x66,0xb2
+
+# CHECK: s_cmpk_gt_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb2]
+0x41,0x31,0x67,0xb2
+
+# CHECK: s_cmpk_gt_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb2]
+0x41,0x31,0x6a,0xb2
+
+# CHECK: s_cmpk_gt_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb2]
+0x41,0x31,0x6b,0xb2
+
+# CHECK: s_cmpk_gt_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb2]
+0x41,0x31,0x6c,0xb2
+
+# CHECK: s_cmpk_gt_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb2]
+0x41,0x31,0x6d,0xb2
+
+# CHECK: s_cmpk_gt_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb2]
+0x41,0x31,0x6e,0xb2
+
+# CHECK: s_cmpk_gt_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb2]
+0x41,0x31,0x6f,0xb2
+
+# CHECK: s_cmpk_gt_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb2]
+0x41,0x31,0x7b,0xb2
+
+# CHECK: s_cmpk_gt_i32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb2]
+0x41,0x31,0x7c,0xb2
+
+# CHECK: s_cmpk_gt_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb2]
+0x41,0x31,0x7e,0xb2
+
+# CHECK: s_cmpk_gt_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb2]
+0x41,0x31,0x7f,0xb2
+
+# CHECK: s_cmpk_gt_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb2]
+0xd1,0xc1,0x00,0xb2
+
+# CHECK: s_cmpk_ge_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb2]
+0x41,0x31,0x80,0xb2
+
+# CHECK: s_cmpk_ge_i32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb2]
+0x41,0x31,0xe5,0xb2
+
+# CHECK: s_cmpk_ge_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb2]
+0x41,0x31,0xe6,0xb2
+
+# CHECK: s_cmpk_ge_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb2]
+0x41,0x31,0xe7,0xb2
+
+# CHECK: s_cmpk_ge_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb2]
+0x41,0x31,0xea,0xb2
+
+# CHECK: s_cmpk_ge_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb2]
+0x41,0x31,0xeb,0xb2
+
+# CHECK: s_cmpk_ge_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb2]
+0x41,0x31,0xec,0xb2
+
+# CHECK: s_cmpk_ge_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb2]
+0x41,0x31,0xed,0xb2
+
+# CHECK: s_cmpk_ge_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb2]
+0x41,0x31,0xee,0xb2
+
+# CHECK: s_cmpk_ge_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb2]
+0x41,0x31,0xef,0xb2
+
+# CHECK: s_cmpk_ge_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb2]
+0x41,0x31,0xfb,0xb2
+
+# CHECK: s_cmpk_ge_i32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb2]
+0x41,0x31,0xfc,0xb2
+
+# CHECK: s_cmpk_ge_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb2]
+0x41,0x31,0xfe,0xb2
+
+# CHECK: s_cmpk_ge_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb2]
+0x41,0x31,0xff,0xb2
+
+# CHECK: s_cmpk_ge_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb2]
+0xd1,0xc1,0x80,0xb2
+
+# CHECK: s_cmpk_lt_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb3]
+0x41,0x31,0x00,0xb3
+
+# CHECK: s_cmpk_lt_i32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb3]
+0x41,0x31,0x65,0xb3
+
+# CHECK: s_cmpk_lt_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb3]
+0x41,0x31,0x66,0xb3
+
+# CHECK: s_cmpk_lt_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb3]
+0x41,0x31,0x67,0xb3
+
+# CHECK: s_cmpk_lt_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb3]
+0x41,0x31,0x6a,0xb3
+
+# CHECK: s_cmpk_lt_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb3]
+0x41,0x31,0x6b,0xb3
+
+# CHECK: s_cmpk_lt_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb3]
+0x41,0x31,0x6c,0xb3
+
+# CHECK: s_cmpk_lt_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb3]
+0x41,0x31,0x6d,0xb3
+
+# CHECK: s_cmpk_lt_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb3]
+0x41,0x31,0x6e,0xb3
+
+# CHECK: s_cmpk_lt_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb3]
+0x41,0x31,0x6f,0xb3
+
+# CHECK: s_cmpk_lt_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb3]
+0x41,0x31,0x7b,0xb3
+
+# CHECK: s_cmpk_lt_i32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb3]
+0x41,0x31,0x7c,0xb3
+
+# CHECK: s_cmpk_lt_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb3]
+0x41,0x31,0x7e,0xb3
+
+# CHECK: s_cmpk_lt_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb3]
+0x41,0x31,0x7f,0xb3
+
+# CHECK: s_cmpk_lt_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb3]
+0xd1,0xc1,0x00,0xb3
+
+# CHECK: s_cmpk_le_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb3]
+0x41,0x31,0x80,0xb3
+
+# CHECK: s_cmpk_le_i32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb3]
+0x41,0x31,0xe5,0xb3
+
+# CHECK: s_cmpk_le_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb3]
+0x41,0x31,0xe6,0xb3
+
+# CHECK: s_cmpk_le_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb3]
+0x41,0x31,0xe7,0xb3
+
+# CHECK: s_cmpk_le_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb3]
+0x41,0x31,0xea,0xb3
+
+# CHECK: s_cmpk_le_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb3]
+0x41,0x31,0xeb,0xb3
+
+# CHECK: s_cmpk_le_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb3]
+0x41,0x31,0xec,0xb3
+
+# CHECK: s_cmpk_le_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb3]
+0x41,0x31,0xed,0xb3
+
+# CHECK: s_cmpk_le_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb3]
+0x41,0x31,0xee,0xb3
+
+# CHECK: s_cmpk_le_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb3]
+0x41,0x31,0xef,0xb3
+
+# CHECK: s_cmpk_le_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb3]
+0x41,0x31,0xfb,0xb3
+
+# CHECK: s_cmpk_le_i32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb3]
+0x41,0x31,0xfc,0xb3
+
+# CHECK: s_cmpk_le_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb3]
+0x41,0x31,0xfe,0xb3
+
+# CHECK: s_cmpk_le_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb3]
+0x41,0x31,0xff,0xb3
+
+# CHECK: s_cmpk_le_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb3]
+0xd1,0xc1,0x80,0xb3
+
+# CHECK: s_cmpk_eq_u32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb4]
+0x41,0x31,0x00,0xb4
+
+# CHECK: s_cmpk_eq_u32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb4]
+0x41,0x31,0x65,0xb4
+
+# CHECK: s_cmpk_eq_u32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb4]
+0x41,0x31,0x66,0xb4
+
+# CHECK: s_cmpk_eq_u32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb4]
+0x41,0x31,0x67,0xb4
+
+# CHECK: s_cmpk_eq_u32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb4]
+0x41,0x31,0x6a,0xb4
+
+# CHECK: s_cmpk_eq_u32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb4]
+0x41,0x31,0x6b,0xb4
+
+# CHECK: s_cmpk_eq_u32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb4]
+0x41,0x31,0x6c,0xb4
+
+# CHECK: s_cmpk_eq_u32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb4]
+0x41,0x31,0x6d,0xb4
+
+# CHECK: s_cmpk_eq_u32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb4]
+0x41,0x31,0x6e,0xb4
+
+# CHECK: s_cmpk_eq_u32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb4]
+0x41,0x31,0x6f,0xb4
+
+# CHECK: s_cmpk_eq_u32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb4]
+0x41,0x31,0x7b,0xb4
+
+# CHECK: s_cmpk_eq_u32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb4]
+0x41,0x31,0x7c,0xb4
+
+# CHECK: s_cmpk_eq_u32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb4]
+0x41,0x31,0x7e,0xb4
+
+# CHECK: s_cmpk_eq_u32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb4]
+0x41,0x31,0x7f,0xb4
+
+# CHECK: s_cmpk_eq_u32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb4]
+0xd1,0xc1,0x00,0xb4
+
+# CHECK: s_cmpk_lg_u32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb4]
+0x41,0x31,0x80,0xb4
+
+# CHECK: s_cmpk_lg_u32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb4]
+0x41,0x31,0xe5,0xb4
+
+# CHECK: s_cmpk_lg_u32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb4]
+0x41,0x31,0xe6,0xb4
+
+# CHECK: s_cmpk_lg_u32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb4]
+0x41,0x31,0xe7,0xb4
+
+# CHECK: s_cmpk_lg_u32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb4]
+0x41,0x31,0xea,0xb4
+
+# CHECK: s_cmpk_lg_u32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb4]
+0x41,0x31,0xeb,0xb4
+
+# CHECK: s_cmpk_lg_u32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb4]
+0x41,0x31,0xec,0xb4
+
+# CHECK: s_cmpk_lg_u32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb4]
+0x41,0x31,0xed,0xb4
+
+# CHECK: s_cmpk_lg_u32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb4]
+0x41,0x31,0xee,0xb4
+
+# CHECK: s_cmpk_lg_u32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb4]
+0x41,0x31,0xef,0xb4
+
+# CHECK: s_cmpk_lg_u32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb4]
+0x41,0x31,0xfb,0xb4
+
+# CHECK: s_cmpk_lg_u32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb4]
+0x41,0x31,0xfc,0xb4
+
+# CHECK: s_cmpk_lg_u32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb4]
+0x41,0x31,0xfe,0xb4
+
+# CHECK: s_cmpk_lg_u32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb4]
+0x41,0x31,0xff,0xb4
+
+# CHECK: s_cmpk_lg_u32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb4]
+0xd1,0xc1,0x80,0xb4
+
+# CHECK: s_cmpk_gt_u32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb5]
+0x41,0x31,0x00,0xb5
+
+# CHECK: s_cmpk_gt_u32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb5]
+0x41,0x31,0x65,0xb5
+
+# CHECK: s_cmpk_gt_u32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb5]
+0x41,0x31,0x66,0xb5
+
+# CHECK: s_cmpk_gt_u32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb5]
+0x41,0x31,0x67,0xb5
+
+# CHECK: s_cmpk_gt_u32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb5]
+0x41,0x31,0x6a,0xb5
+
+# CHECK: s_cmpk_gt_u32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb5]
+0x41,0x31,0x6b,0xb5
+
+# CHECK: s_cmpk_gt_u32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb5]
+0x41,0x31,0x6c,0xb5
+
+# CHECK: s_cmpk_gt_u32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb5]
+0x41,0x31,0x6d,0xb5
+
+# CHECK: s_cmpk_gt_u32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb5]
+0x41,0x31,0x6e,0xb5
+
+# CHECK: s_cmpk_gt_u32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb5]
+0x41,0x31,0x6f,0xb5
+
+# CHECK: s_cmpk_gt_u32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb5]
+0x41,0x31,0x7b,0xb5
+
+# CHECK: s_cmpk_gt_u32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb5]
+0x41,0x31,0x7c,0xb5
+
+# CHECK: s_cmpk_gt_u32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb5]
+0x41,0x31,0x7e,0xb5
+
+# CHECK: s_cmpk_gt_u32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb5]
+0x41,0x31,0x7f,0xb5
+
+# CHECK: s_cmpk_gt_u32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb5]
+0xd1,0xc1,0x00,0xb5
+
+# CHECK: s_cmpk_ge_u32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb5]
+0x41,0x31,0x80,0xb5
+
+# CHECK: s_cmpk_ge_u32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb5]
+0x41,0x31,0xe5,0xb5
+
+# CHECK: s_cmpk_ge_u32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb5]
+0x41,0x31,0xe6,0xb5
+
+# CHECK: s_cmpk_ge_u32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb5]
+0x41,0x31,0xe7,0xb5
+
+# CHECK: s_cmpk_ge_u32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb5]
+0x41,0x31,0xea,0xb5
+
+# CHECK: s_cmpk_ge_u32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb5]
+0x41,0x31,0xeb,0xb5
+
+# CHECK: s_cmpk_ge_u32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb5]
+0x41,0x31,0xec,0xb5
+
+# CHECK: s_cmpk_ge_u32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb5]
+0x41,0x31,0xed,0xb5
+
+# CHECK: s_cmpk_ge_u32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb5]
+0x41,0x31,0xee,0xb5
+
+# CHECK: s_cmpk_ge_u32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb5]
+0x41,0x31,0xef,0xb5
+
+# CHECK: s_cmpk_ge_u32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb5]
+0x41,0x31,0xfb,0xb5
+
+# CHECK: s_cmpk_ge_u32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb5]
+0x41,0x31,0xfc,0xb5
+
+# CHECK: s_cmpk_ge_u32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb5]
+0x41,0x31,0xfe,0xb5
+
+# CHECK: s_cmpk_ge_u32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb5]
+0x41,0x31,0xff,0xb5
+
+# CHECK: s_cmpk_ge_u32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb5]
+0xd1,0xc1,0x80,0xb5
+
+# CHECK: s_cmpk_lt_u32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb6]
+0x41,0x31,0x00,0xb6
+
+# CHECK: s_cmpk_lt_u32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb6]
+0x41,0x31,0x65,0xb6
+
+# CHECK: s_cmpk_lt_u32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb6]
+0x41,0x31,0x66,0xb6
+
+# CHECK: s_cmpk_lt_u32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb6]
+0x41,0x31,0x67,0xb6
+
+# CHECK: s_cmpk_lt_u32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb6]
+0x41,0x31,0x6a,0xb6
+
+# CHECK: s_cmpk_lt_u32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb6]
+0x41,0x31,0x6b,0xb6
+
+# CHECK: s_cmpk_lt_u32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb6]
+0x41,0x31,0x6c,0xb6
+
+# CHECK: s_cmpk_lt_u32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb6]
+0x41,0x31,0x6d,0xb6
+
+# CHECK: s_cmpk_lt_u32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb6]
+0x41,0x31,0x6e,0xb6
+
+# CHECK: s_cmpk_lt_u32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb6]
+0x41,0x31,0x6f,0xb6
+
+# CHECK: s_cmpk_lt_u32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb6]
+0x41,0x31,0x7b,0xb6
+
+# CHECK: s_cmpk_lt_u32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb6]
+0x41,0x31,0x7c,0xb6
+
+# CHECK: s_cmpk_lt_u32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb6]
+0x41,0x31,0x7e,0xb6
+
+# CHECK: s_cmpk_lt_u32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb6]
+0x41,0x31,0x7f,0xb6
+
+# CHECK: s_cmpk_lt_u32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb6]
+0xd1,0xc1,0x00,0xb6
+
+# CHECK: s_cmpk_le_u32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb6]
+0x41,0x31,0x80,0xb6
+
+# CHECK: s_cmpk_le_u32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb6]
+0x41,0x31,0xe5,0xb6
+
+# CHECK: s_cmpk_le_u32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb6]
+0x41,0x31,0xe6,0xb6
+
+# CHECK: s_cmpk_le_u32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb6]
+0x41,0x31,0xe7,0xb6
+
+# CHECK: s_cmpk_le_u32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb6]
+0x41,0x31,0xea,0xb6
+
+# CHECK: s_cmpk_le_u32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb6]
+0x41,0x31,0xeb,0xb6
+
+# CHECK: s_cmpk_le_u32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb6]
+0x41,0x31,0xec,0xb6
+
+# CHECK: s_cmpk_le_u32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb6]
+0x41,0x31,0xed,0xb6
+
+# CHECK: s_cmpk_le_u32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb6]
+0x41,0x31,0xee,0xb6
+
+# CHECK: s_cmpk_le_u32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb6]
+0x41,0x31,0xef,0xb6
+
+# CHECK: s_cmpk_le_u32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb6]
+0x41,0x31,0xfb,0xb6
+
+# CHECK: s_cmpk_le_u32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb6]
+0x41,0x31,0xfc,0xb6
+
+# CHECK: s_cmpk_le_u32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb6]
+0x41,0x31,0xfe,0xb6
+
+# CHECK: s_cmpk_le_u32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb6]
+0x41,0x31,0xff,0xb6
+
+# CHECK: s_cmpk_le_u32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb6]
+0xd1,0xc1,0x80,0xb6
+
+# CHECK: s_addk_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x00,0xb7]
+0x41,0x31,0x00,0xb7
+
+# CHECK: s_addk_i32 s101, 0x3141 ; encoding: [0x41,0x31,0x65,0xb7]
+0x41,0x31,0x65,0xb7
+
+# CHECK: s_addk_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0x66,0xb7]
+0x41,0x31,0x66,0xb7
+
+# CHECK: s_addk_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0x67,0xb7]
+0x41,0x31,0x67,0xb7
+
+# CHECK: s_addk_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0x6a,0xb7]
+0x41,0x31,0x6a,0xb7
+
+# CHECK: s_addk_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0x6b,0xb7]
+0x41,0x31,0x6b,0xb7
+
+# CHECK: s_addk_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0x6c,0xb7]
+0x41,0x31,0x6c,0xb7
+
+# CHECK: s_addk_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0x6d,0xb7]
+0x41,0x31,0x6d,0xb7
+
+# CHECK: s_addk_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0x6e,0xb7]
+0x41,0x31,0x6e,0xb7
+
+# CHECK: s_addk_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0x6f,0xb7]
+0x41,0x31,0x6f,0xb7
+
+# CHECK: s_addk_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0x7b,0xb7]
+0x41,0x31,0x7b,0xb7
+
+# CHECK: s_addk_i32 m0, 0x3141 ; encoding: [0x41,0x31,0x7c,0xb7]
+0x41,0x31,0x7c,0xb7
+
+# CHECK: s_addk_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0x7e,0xb7]
+0x41,0x31,0x7e,0xb7
+
+# CHECK: s_addk_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0x7f,0xb7]
+0x41,0x31,0x7f,0xb7
+
+# CHECK: s_addk_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x00,0xb7]
+0xd1,0xc1,0x00,0xb7
+
+# CHECK: s_mulk_i32 s0, 0x3141 ; encoding: [0x41,0x31,0x80,0xb7]
+0x41,0x31,0x80,0xb7
+
+# CHECK: s_mulk_i32 s101, 0x3141 ; encoding: [0x41,0x31,0xe5,0xb7]
+0x41,0x31,0xe5,0xb7
+
+# CHECK: s_mulk_i32 flat_scratch_lo, 0x3141 ; encoding: [0x41,0x31,0xe6,0xb7]
+0x41,0x31,0xe6,0xb7
+
+# CHECK: s_mulk_i32 flat_scratch_hi, 0x3141 ; encoding: [0x41,0x31,0xe7,0xb7]
+0x41,0x31,0xe7,0xb7
+
+# CHECK: s_mulk_i32 vcc_lo, 0x3141 ; encoding: [0x41,0x31,0xea,0xb7]
+0x41,0x31,0xea,0xb7
+
+# CHECK: s_mulk_i32 vcc_hi, 0x3141 ; encoding: [0x41,0x31,0xeb,0xb7]
+0x41,0x31,0xeb,0xb7
+
+# CHECK: s_mulk_i32 tba_lo, 0x3141 ; encoding: [0x41,0x31,0xec,0xb7]
+0x41,0x31,0xec,0xb7
+
+# CHECK: s_mulk_i32 tba_hi, 0x3141 ; encoding: [0x41,0x31,0xed,0xb7]
+0x41,0x31,0xed,0xb7
+
+# CHECK: s_mulk_i32 tma_lo, 0x3141 ; encoding: [0x41,0x31,0xee,0xb7]
+0x41,0x31,0xee,0xb7
+
+# CHECK: s_mulk_i32 tma_hi, 0x3141 ; encoding: [0x41,0x31,0xef,0xb7]
+0x41,0x31,0xef,0xb7
+
+# CHECK: s_mulk_i32 ttmp11, 0x3141 ; encoding: [0x41,0x31,0xfb,0xb7]
+0x41,0x31,0xfb,0xb7
+
+# CHECK: s_mulk_i32 m0, 0x3141 ; encoding: [0x41,0x31,0xfc,0xb7]
+0x41,0x31,0xfc,0xb7
+
+# CHECK: s_mulk_i32 exec_lo, 0x3141 ; encoding: [0x41,0x31,0xfe,0xb7]
+0x41,0x31,0xfe,0xb7
+
+# CHECK: s_mulk_i32 exec_hi, 0x3141 ; encoding: [0x41,0x31,0xff,0xb7]
+0x41,0x31,0xff,0xb7
+
+# CHECK: s_mulk_i32 s0, 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xb7]
+0xd1,0xc1,0x80,0xb7
+
+# CHECK: s_nop 0x3141 ; encoding: [0x41,0x31,0x80,0xbf]
+0x41,0x31,0x80,0xbf
+
+# CHECK: s_nop 0xc1d1 ; encoding: [0xd1,0xc1,0x80,0xbf]
+0xd1,0xc1,0x80,0xbf
+
+# CHECK: s_endpgm ; encoding: [0x00,0x00,0x81,0xbf]
+0x00,0x00,0x81,0xbf
+
+# CHECK: s_branch 12609 ; encoding: [0x41,0x31,0x82,0xbf]
+0x41,0x31,0x82,0xbf
+
+# CHECK: s_branch 49617 ; encoding: [0xd1,0xc1,0x82,0xbf]
+0xd1,0xc1,0x82,0xbf
+
+# CHECK: s_cbranch_scc0 12609 ; encoding: [0x41,0x31,0x84,0xbf]
+0x41,0x31,0x84,0xbf
+
+# CHECK: s_cbranch_scc0 49617 ; encoding: [0xd1,0xc1,0x84,0xbf]
+0xd1,0xc1,0x84,0xbf
+
+# CHECK: s_cbranch_scc1 12609 ; encoding: [0x41,0x31,0x85,0xbf]
+0x41,0x31,0x85,0xbf
+
+# CHECK: s_cbranch_scc1 49617 ; encoding: [0xd1,0xc1,0x85,0xbf]
+0xd1,0xc1,0x85,0xbf
+
+# CHECK: s_cbranch_vccz 12609 ; encoding: [0x41,0x31,0x86,0xbf]
+0x41,0x31,0x86,0xbf
+
+# CHECK: s_cbranch_vccz 49617 ; encoding: [0xd1,0xc1,0x86,0xbf]
+0xd1,0xc1,0x86,0xbf
+
+# CHECK: s_cbranch_vccnz 12609 ; encoding: [0x41,0x31,0x87,0xbf]
+0x41,0x31,0x87,0xbf
+
+# CHECK: s_cbranch_vccnz 49617 ; encoding: [0xd1,0xc1,0x87,0xbf]
+0xd1,0xc1,0x87,0xbf
+
+# CHECK: s_cbranch_execz 12609 ; encoding: [0x41,0x31,0x88,0xbf]
+0x41,0x31,0x88,0xbf
+
+# CHECK: s_cbranch_execz 49617 ; encoding: [0xd1,0xc1,0x88,0xbf]
+0xd1,0xc1,0x88,0xbf
+
+# CHECK: s_cbranch_execnz 12609 ; encoding: [0x41,0x31,0x89,0xbf]
+0x41,0x31,0x89,0xbf
+
+# CHECK: s_cbranch_execnz 49617 ; encoding: [0xd1,0xc1,0x89,0xbf]
+0xd1,0xc1,0x89,0xbf
+
+# CHECK: s_barrier ; encoding: [0x00,0x00,0x8a,0xbf]
+0x00,0x00,0x8a,0xbf
+
+# CHECK: s_sethalt 0x3141 ; encoding: [0x41,0x31,0x8d,0xbf]
+0x41,0x31,0x8d,0xbf
+
+# CHECK: s_sethalt 0xc1d1 ; encoding: [0xd1,0xc1,0x8d,0xbf]
+0xd1,0xc1,0x8d,0xbf
+
+# CHECK: s_sleep 0x3141 ; encoding: [0x41,0x31,0x8e,0xbf]
+0x41,0x31,0x8e,0xbf
+
+# CHECK: s_sleep 0xc1d1 ; encoding: [0xd1,0xc1,0x8e,0xbf]
+0xd1,0xc1,0x8e,0xbf
+
+# CHECK: s_setprio 0x3141 ; encoding: [0x41,0x31,0x8f,0xbf]
+0x41,0x31,0x8f,0xbf
+
+# CHECK: s_setprio 0xc1d1 ; encoding: [0xd1,0xc1,0x8f,0xbf]
+0xd1,0xc1,0x8f,0xbf
+
+# CHECK: s_trap 0x3141 ; encoding: [0x41,0x31,0x92,0xbf]
+0x41,0x31,0x92,0xbf
+
+# CHECK: s_trap 0xc1d1 ; encoding: [0xd1,0xc1,0x92,0xbf]
+0xd1,0xc1,0x92,0xbf
+
+# CHECK: s_icache_inv ; encoding: [0x00,0x00,0x93,0xbf]
+0x00,0x00,0x93,0xbf
+
+# CHECK: s_incperflevel 0x3141 ; encoding: [0x41,0x31,0x94,0xbf]
+0x41,0x31,0x94,0xbf
+
+# CHECK: s_incperflevel 0xc1d1 ; encoding: [0xd1,0xc1,0x94,0xbf]
+0xd1,0xc1,0x94,0xbf
+
+# CHECK: s_decperflevel 0x3141 ; encoding: [0x41,0x31,0x95,0xbf]
+0x41,0x31,0x95,0xbf
+
+# CHECK: s_decperflevel 0xc1d1 ; encoding: [0xd1,0xc1,0x95,0xbf]
+0xd1,0xc1,0x95,0xbf
+
+# CHECK: s_ttracedata ; encoding: [0x00,0x00,0x96,0xbf]
+0x00,0x00,0x96,0xbf
+
+# CHECK: s_set_gpr_idx_off ; encoding: [0x00,0x00,0x9c,0xbf]
+0x00,0x00,0x9c,0xbf
+
+# CHECK: v_mov_b32_e32 v0, s0 ; encoding: [0x00,0x02,0x00,0x7e]
+0x00,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v255, s0 ; encoding: [0x00,0x02,0xfe,0x7f]
+0x00,0x02,0xfe,0x7f
+
+# CHECK: v_mov_b32_e32 v0, s101 ; encoding: [0x65,0x02,0x00,0x7e]
+0x65,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x02,0x00,0x7e]
+0x66,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x02,0x00,0x7e]
+0x67,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, vcc_lo ; encoding: [0x6a,0x02,0x00,0x7e]
+0x6a,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, vcc_hi ; encoding: [0x6b,0x02,0x00,0x7e]
+0x6b,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, tba_lo ; encoding: [0x6c,0x02,0x00,0x7e]
+0x6c,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, tba_hi ; encoding: [0x6d,0x02,0x00,0x7e]
+0x6d,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, tma_lo ; encoding: [0x6e,0x02,0x00,0x7e]
+0x6e,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, tma_hi ; encoding: [0x6f,0x02,0x00,0x7e]
+0x6f,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, ttmp11 ; encoding: [0x7b,0x02,0x00,0x7e]
+0x7b,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, m0 ; encoding: [0x7c,0x02,0x00,0x7e]
+0x7c,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, exec_lo ; encoding: [0x7e,0x02,0x00,0x7e]
+0x7e,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, exec_hi ; encoding: [0x7f,0x02,0x00,0x7e]
+0x7f,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, 0 ; encoding: [0x80,0x02,0x00,0x7e]
+0x80,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, -1 ; encoding: [0xc1,0x02,0x00,0x7e]
+0xc1,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, 0.5 ; encoding: [0xf0,0x02,0x00,0x7e]
+0xf0,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, -4.0 ; encoding: [0xf7,0x02,0x00,0x7e]
+0xf7,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, scc ; encoding: [0xfd,0x02,0x00,0x7e]
+0xfd,0x02,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, 0xaf123456 ; encoding: [0xff,0x02,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x02,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mov_b32_e32 v0, 0x3f717273 ; encoding: [0xff,0x02,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x02,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mov_b32_e32 v0, v0 ; encoding: [0x00,0x03,0x00,0x7e]
+0x00,0x03,0x00,0x7e
+
+# CHECK: v_mov_b32_e32 v0, v255 ; encoding: [0xff,0x03,0x00,0x7e]
+0xff,0x03,0x00,0x7e
+
+# CHECK: v_mov_b32_e64 v0, s0 ; encoding: [0x00,0x00,0x41,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v255, s0 ; encoding: [0xff,0x00,0x41,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x41,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, s101 ; encoding: [0x00,0x00,0x41,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x41,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x41,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x41,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x41,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x41,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x41,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x41,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x41,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x41,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, m0 ; encoding: [0x00,0x00,0x41,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x41,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x41,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, 0 ; encoding: [0x00,0x00,0x41,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, -1 ; encoding: [0x00,0x00,0x41,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x41,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x41,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, scc ; encoding: [0x00,0x00,0x41,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x41,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x41,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mov_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x41,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x41,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_readfirstlane_b32 s0, v0 ; encoding: [0x00,0x05,0x00,0x7e]
+0x00,0x05,0x00,0x7e
+
+# CHECK: v_readfirstlane_b32 s101, v0 ; encoding: [0x00,0x05,0xca,0x7e]
+0x00,0x05,0xca,0x7e
+
+# CHECK: v_readfirstlane_b32 flat_scratch_lo, v0 ; encoding: [0x00,0x05,0xcc,0x7e]
+0x00,0x05,0xcc,0x7e
+
+# CHECK: v_readfirstlane_b32 flat_scratch_hi, v0 ; encoding: [0x00,0x05,0xce,0x7e]
+0x00,0x05,0xce,0x7e
+
+# CHECK: v_readfirstlane_b32 tba_lo, v0 ; encoding: [0x00,0x05,0xd8,0x7e]
+0x00,0x05,0xd8,0x7e
+
+# CHECK: v_readfirstlane_b32 tba_hi, v0 ; encoding: [0x00,0x05,0xda,0x7e]
+0x00,0x05,0xda,0x7e
+
+# CHECK: v_readfirstlane_b32 tma_lo, v0 ; encoding: [0x00,0x05,0xdc,0x7e]
+0x00,0x05,0xdc,0x7e
+
+# CHECK: v_readfirstlane_b32 tma_hi, v0 ; encoding: [0x00,0x05,0xde,0x7e]
+0x00,0x05,0xde,0x7e
+
+# CHECK: v_readfirstlane_b32 ttmp11, v0 ; encoding: [0x00,0x05,0xf6,0x7e]
+0x00,0x05,0xf6,0x7e
+
+# CHECK: v_readfirstlane_b32 s0, v255 ; encoding: [0xff,0x05,0x00,0x7e]
+0xff,0x05,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, s[0:1] ; encoding: [0x00,0x06,0x00,0x7e]
+0x00,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v255, s[0:1] ; encoding: [0x00,0x06,0xfe,0x7f]
+0x00,0x06,0xfe,0x7f
+
+# CHECK: v_cvt_i32_f64_e32 v0, s[2:3] ; encoding: [0x02,0x06,0x00,0x7e]
+0x02,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, s[100:101] ; encoding: [0x64,0x06,0x00,0x7e]
+0x64,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, flat_scratch ; encoding: [0x66,0x06,0x00,0x7e]
+0x66,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, vcc ; encoding: [0x6a,0x06,0x00,0x7e]
+0x6a,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, tba ; encoding: [0x6c,0x06,0x00,0x7e]
+0x6c,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, tma ; encoding: [0x6e,0x06,0x00,0x7e]
+0x6e,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, ttmp[10:11] ; encoding: [0x7a,0x06,0x00,0x7e]
+0x7a,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, exec ; encoding: [0x7e,0x06,0x00,0x7e]
+0x7e,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, 0 ; encoding: [0x80,0x06,0x00,0x7e]
+0x80,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, -1 ; encoding: [0xc1,0x06,0x00,0x7e]
+0xc1,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, 0.5 ; encoding: [0xf0,0x06,0x00,0x7e]
+0xf0,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, -4.0 ; encoding: [0xf7,0x06,0x00,0x7e]
+0xf7,0x06,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, 0xaf123456 ; encoding: [0xff,0x06,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x06,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_i32_f64_e32 v0, 0x3f717273 ; encoding: [0xff,0x06,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x06,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_i32_f64_e32 v0, v[0:1] ; encoding: [0x00,0x07,0x00,0x7e]
+0x00,0x07,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e32 v0, v[254:255] ; encoding: [0xfe,0x07,0x00,0x7e]
+0xfe,0x07,0x00,0x7e
+
+# CHECK: v_cvt_i32_f64_e64 v0, s[0:1] ; encoding: [0x00,0x00,0x43,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v255, s[0:1] ; encoding: [0xff,0x00,0x43,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x43,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, s[2:3] ; encoding: [0x00,0x00,0x43,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, s[100:101] ; encoding: [0x00,0x00,0x43,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, flat_scratch ; encoding: [0x00,0x00,0x43,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, vcc ; encoding: [0x00,0x00,0x43,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, tba ; encoding: [0x00,0x00,0x43,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, tma ; encoding: [0x00,0x00,0x43,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, ttmp[10:11] ; encoding: [0x00,0x00,0x43,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, exec ; encoding: [0x00,0x00,0x43,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, 0 ; encoding: [0x00,0x00,0x43,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, -1 ; encoding: [0x00,0x00,0x43,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, 0.5 ; encoding: [0x00,0x00,0x43,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, -4.0 ; encoding: [0x00,0x00,0x43,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, v[0:1] ; encoding: [0x00,0x00,0x43,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x43,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, v[254:255] ; encoding: [0x00,0x00,0x43,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x43,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, -s[0:1] ; encoding: [0x00,0x00,0x43,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x43,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_i32_f64_e64 v0, |s[0:1]| ; encoding: [0x00,0x01,0x43,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x43,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f64_e64 v0, s[0:1] clamp ; encoding: [0x00,0x80,0x43,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x43,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], s0 ; encoding: [0x00,0x08,0x00,0x7e]
+0x00,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[254:255], s0 ; encoding: [0x00,0x08,0xfc,0x7f]
+0x00,0x08,0xfc,0x7f
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], s101 ; encoding: [0x65,0x08,0x00,0x7e]
+0x65,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], flat_scratch_lo ; encoding: [0x66,0x08,0x00,0x7e]
+0x66,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], flat_scratch_hi ; encoding: [0x67,0x08,0x00,0x7e]
+0x67,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], vcc_lo ; encoding: [0x6a,0x08,0x00,0x7e]
+0x6a,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], vcc_hi ; encoding: [0x6b,0x08,0x00,0x7e]
+0x6b,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], tba_lo ; encoding: [0x6c,0x08,0x00,0x7e]
+0x6c,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], tba_hi ; encoding: [0x6d,0x08,0x00,0x7e]
+0x6d,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], tma_lo ; encoding: [0x6e,0x08,0x00,0x7e]
+0x6e,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], tma_hi ; encoding: [0x6f,0x08,0x00,0x7e]
+0x6f,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], ttmp11 ; encoding: [0x7b,0x08,0x00,0x7e]
+0x7b,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], m0 ; encoding: [0x7c,0x08,0x00,0x7e]
+0x7c,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], exec_lo ; encoding: [0x7e,0x08,0x00,0x7e]
+0x7e,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], exec_hi ; encoding: [0x7f,0x08,0x00,0x7e]
+0x7f,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], 0 ; encoding: [0x80,0x08,0x00,0x7e]
+0x80,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], -1 ; encoding: [0xc1,0x08,0x00,0x7e]
+0xc1,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], 0.5 ; encoding: [0xf0,0x08,0x00,0x7e]
+0xf0,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], -4.0 ; encoding: [0xf7,0x08,0x00,0x7e]
+0xf7,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], scc ; encoding: [0xfd,0x08,0x00,0x7e]
+0xfd,0x08,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x08,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x08,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x08,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x08,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], v0 ; encoding: [0x00,0x09,0x00,0x7e]
+0x00,0x09,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e32 v[0:1], v255 ; encoding: [0xff,0x09,0x00,0x7e]
+0xff,0x09,0x00,0x7e
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], s0 ; encoding: [0x00,0x00,0x44,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[254:255], s0 ; encoding: [0xfe,0x00,0x44,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x44,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], s101 ; encoding: [0x00,0x00,0x44,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x44,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x44,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], vcc_lo ; encoding: [0x00,0x00,0x44,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], vcc_hi ; encoding: [0x00,0x00,0x44,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], tba_lo ; encoding: [0x00,0x00,0x44,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], tba_hi ; encoding: [0x00,0x00,0x44,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], tma_lo ; encoding: [0x00,0x00,0x44,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], tma_hi ; encoding: [0x00,0x00,0x44,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], ttmp11 ; encoding: [0x00,0x00,0x44,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], m0 ; encoding: [0x00,0x00,0x44,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], exec_lo ; encoding: [0x00,0x00,0x44,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], exec_hi ; encoding: [0x00,0x00,0x44,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x44,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x44,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x44,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x44,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], scc ; encoding: [0x00,0x00,0x44,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], v0 ; encoding: [0x00,0x00,0x44,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x44,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f64_i32_e64 v[0:1], v255 ; encoding: [0x00,0x00,0x44,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x44,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e32 v0, s0 ; encoding: [0x00,0x0a,0x00,0x7e]
+0x00,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v255, s0 ; encoding: [0x00,0x0a,0xfe,0x7f]
+0x00,0x0a,0xfe,0x7f
+
+# CHECK: v_cvt_f32_i32_e32 v0, s101 ; encoding: [0x65,0x0a,0x00,0x7e]
+0x65,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x0a,0x00,0x7e]
+0x66,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x0a,0x00,0x7e]
+0x67,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, vcc_lo ; encoding: [0x6a,0x0a,0x00,0x7e]
+0x6a,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, vcc_hi ; encoding: [0x6b,0x0a,0x00,0x7e]
+0x6b,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, tba_lo ; encoding: [0x6c,0x0a,0x00,0x7e]
+0x6c,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, tba_hi ; encoding: [0x6d,0x0a,0x00,0x7e]
+0x6d,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, tma_lo ; encoding: [0x6e,0x0a,0x00,0x7e]
+0x6e,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, tma_hi ; encoding: [0x6f,0x0a,0x00,0x7e]
+0x6f,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, ttmp11 ; encoding: [0x7b,0x0a,0x00,0x7e]
+0x7b,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, m0 ; encoding: [0x7c,0x0a,0x00,0x7e]
+0x7c,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, exec_lo ; encoding: [0x7e,0x0a,0x00,0x7e]
+0x7e,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, exec_hi ; encoding: [0x7f,0x0a,0x00,0x7e]
+0x7f,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, 0 ; encoding: [0x80,0x0a,0x00,0x7e]
+0x80,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, -1 ; encoding: [0xc1,0x0a,0x00,0x7e]
+0xc1,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, 0.5 ; encoding: [0xf0,0x0a,0x00,0x7e]
+0xf0,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, -4.0 ; encoding: [0xf7,0x0a,0x00,0x7e]
+0xf7,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, scc ; encoding: [0xfd,0x0a,0x00,0x7e]
+0xfd,0x0a,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, 0xaf123456 ; encoding: [0xff,0x0a,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x0a,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_i32_e32 v0, 0x3f717273 ; encoding: [0xff,0x0a,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x0a,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_i32_e32 v0, v0 ; encoding: [0x00,0x0b,0x00,0x7e]
+0x00,0x0b,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e32 v0, v255 ; encoding: [0xff,0x0b,0x00,0x7e]
+0xff,0x0b,0x00,0x7e
+
+# CHECK: v_cvt_f32_i32_e64 v0, s0 ; encoding: [0x00,0x00,0x45,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v255, s0 ; encoding: [0xff,0x00,0x45,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x45,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, s101 ; encoding: [0x00,0x00,0x45,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x45,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x45,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x45,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x45,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x45,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x45,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x45,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x45,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x45,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, m0 ; encoding: [0x00,0x00,0x45,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x45,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x45,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, 0 ; encoding: [0x00,0x00,0x45,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, -1 ; encoding: [0x00,0x00,0x45,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x45,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x45,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, scc ; encoding: [0x00,0x00,0x45,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, v0 ; encoding: [0x00,0x00,0x45,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x45,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_e64 v0, v255 ; encoding: [0x00,0x00,0x45,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x45,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e32 v0, s0 ; encoding: [0x00,0x0c,0x00,0x7e]
+0x00,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v255, s0 ; encoding: [0x00,0x0c,0xfe,0x7f]
+0x00,0x0c,0xfe,0x7f
+
+# CHECK: v_cvt_f32_u32_e32 v0, s101 ; encoding: [0x65,0x0c,0x00,0x7e]
+0x65,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x0c,0x00,0x7e]
+0x66,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x0c,0x00,0x7e]
+0x67,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, vcc_lo ; encoding: [0x6a,0x0c,0x00,0x7e]
+0x6a,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, vcc_hi ; encoding: [0x6b,0x0c,0x00,0x7e]
+0x6b,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, tba_lo ; encoding: [0x6c,0x0c,0x00,0x7e]
+0x6c,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, tba_hi ; encoding: [0x6d,0x0c,0x00,0x7e]
+0x6d,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, tma_lo ; encoding: [0x6e,0x0c,0x00,0x7e]
+0x6e,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, tma_hi ; encoding: [0x6f,0x0c,0x00,0x7e]
+0x6f,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, ttmp11 ; encoding: [0x7b,0x0c,0x00,0x7e]
+0x7b,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, m0 ; encoding: [0x7c,0x0c,0x00,0x7e]
+0x7c,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, exec_lo ; encoding: [0x7e,0x0c,0x00,0x7e]
+0x7e,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, exec_hi ; encoding: [0x7f,0x0c,0x00,0x7e]
+0x7f,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, 0 ; encoding: [0x80,0x0c,0x00,0x7e]
+0x80,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, -1 ; encoding: [0xc1,0x0c,0x00,0x7e]
+0xc1,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, 0.5 ; encoding: [0xf0,0x0c,0x00,0x7e]
+0xf0,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, -4.0 ; encoding: [0xf7,0x0c,0x00,0x7e]
+0xf7,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, scc ; encoding: [0xfd,0x0c,0x00,0x7e]
+0xfd,0x0c,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, 0xaf123456 ; encoding: [0xff,0x0c,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x0c,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_u32_e32 v0, 0x3f717273 ; encoding: [0xff,0x0c,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x0c,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_u32_e32 v0, v0 ; encoding: [0x00,0x0d,0x00,0x7e]
+0x00,0x0d,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e32 v0, v255 ; encoding: [0xff,0x0d,0x00,0x7e]
+0xff,0x0d,0x00,0x7e
+
+# CHECK: v_cvt_f32_u32_e64 v0, s0 ; encoding: [0x00,0x00,0x46,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v255, s0 ; encoding: [0xff,0x00,0x46,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x46,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, s101 ; encoding: [0x00,0x00,0x46,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x46,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x46,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x46,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x46,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x46,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x46,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x46,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x46,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x46,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, m0 ; encoding: [0x00,0x00,0x46,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x46,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x46,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, 0 ; encoding: [0x00,0x00,0x46,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, -1 ; encoding: [0x00,0x00,0x46,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x46,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x46,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, scc ; encoding: [0x00,0x00,0x46,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, v0 ; encoding: [0x00,0x00,0x46,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x46,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_e64 v0, v255 ; encoding: [0x00,0x00,0x46,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x46,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e32 v0, s0 ; encoding: [0x00,0x0e,0x00,0x7e]
+0x00,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v255, s0 ; encoding: [0x00,0x0e,0xfe,0x7f]
+0x00,0x0e,0xfe,0x7f
+
+# CHECK: v_cvt_u32_f32_e32 v0, s101 ; encoding: [0x65,0x0e,0x00,0x7e]
+0x65,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x0e,0x00,0x7e]
+0x66,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x0e,0x00,0x7e]
+0x67,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x0e,0x00,0x7e]
+0x6a,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x0e,0x00,0x7e]
+0x6b,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, tba_lo ; encoding: [0x6c,0x0e,0x00,0x7e]
+0x6c,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, tba_hi ; encoding: [0x6d,0x0e,0x00,0x7e]
+0x6d,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, tma_lo ; encoding: [0x6e,0x0e,0x00,0x7e]
+0x6e,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, tma_hi ; encoding: [0x6f,0x0e,0x00,0x7e]
+0x6f,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x0e,0x00,0x7e]
+0x7b,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, m0 ; encoding: [0x7c,0x0e,0x00,0x7e]
+0x7c,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, exec_lo ; encoding: [0x7e,0x0e,0x00,0x7e]
+0x7e,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, exec_hi ; encoding: [0x7f,0x0e,0x00,0x7e]
+0x7f,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, 0 ; encoding: [0x80,0x0e,0x00,0x7e]
+0x80,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, -1 ; encoding: [0xc1,0x0e,0x00,0x7e]
+0xc1,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, 0.5 ; encoding: [0xf0,0x0e,0x00,0x7e]
+0xf0,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, -4.0 ; encoding: [0xf7,0x0e,0x00,0x7e]
+0xf7,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, scc ; encoding: [0xfd,0x0e,0x00,0x7e]
+0xfd,0x0e,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x0e,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x0e,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_u32_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x0e,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x0e,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_u32_f32_e32 v0, v0 ; encoding: [0x00,0x0f,0x00,0x7e]
+0x00,0x0f,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e32 v0, v255 ; encoding: [0xff,0x0f,0x00,0x7e]
+0xff,0x0f,0x00,0x7e
+
+# CHECK: v_cvt_u32_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x47,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x47,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x47,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x47,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x47,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x47,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x47,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x47,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x47,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x47,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x47,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x47,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x47,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x47,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x47,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x47,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x47,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x47,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x47,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x47,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, scc ; encoding: [0x00,0x00,0x47,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x47,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x47,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x47,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x47,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x47,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x47,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_u32_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x47,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x47,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x47,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x47,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e32 v0, s0 ; encoding: [0x00,0x10,0x00,0x7e]
+0x00,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v255, s0 ; encoding: [0x00,0x10,0xfe,0x7f]
+0x00,0x10,0xfe,0x7f
+
+# CHECK: v_cvt_i32_f32_e32 v0, s101 ; encoding: [0x65,0x10,0x00,0x7e]
+0x65,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x10,0x00,0x7e]
+0x66,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x10,0x00,0x7e]
+0x67,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x10,0x00,0x7e]
+0x6a,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x10,0x00,0x7e]
+0x6b,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, tba_lo ; encoding: [0x6c,0x10,0x00,0x7e]
+0x6c,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, tba_hi ; encoding: [0x6d,0x10,0x00,0x7e]
+0x6d,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, tma_lo ; encoding: [0x6e,0x10,0x00,0x7e]
+0x6e,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, tma_hi ; encoding: [0x6f,0x10,0x00,0x7e]
+0x6f,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x10,0x00,0x7e]
+0x7b,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, m0 ; encoding: [0x7c,0x10,0x00,0x7e]
+0x7c,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, exec_lo ; encoding: [0x7e,0x10,0x00,0x7e]
+0x7e,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, exec_hi ; encoding: [0x7f,0x10,0x00,0x7e]
+0x7f,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, 0 ; encoding: [0x80,0x10,0x00,0x7e]
+0x80,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, -1 ; encoding: [0xc1,0x10,0x00,0x7e]
+0xc1,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, 0.5 ; encoding: [0xf0,0x10,0x00,0x7e]
+0xf0,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, -4.0 ; encoding: [0xf7,0x10,0x00,0x7e]
+0xf7,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, scc ; encoding: [0xfd,0x10,0x00,0x7e]
+0xfd,0x10,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x10,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x10,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_i32_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x10,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x10,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_i32_f32_e32 v0, v0 ; encoding: [0x00,0x11,0x00,0x7e]
+0x00,0x11,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e32 v0, v255 ; encoding: [0xff,0x11,0x00,0x7e]
+0xff,0x11,0x00,0x7e
+
+# CHECK: v_cvt_i32_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x48,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x48,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x48,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x48,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x48,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x48,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x48,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x48,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x48,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x48,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x48,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x48,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x48,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x48,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x48,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x48,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x48,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x48,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x48,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x48,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, scc ; encoding: [0x00,0x00,0x48,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x48,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x48,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x48,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x48,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x48,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x48,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_i32_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x48,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x48,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x48,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x48,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e32 v0, s0 ; encoding: [0x00,0x14,0x00,0x7e]
+0x00,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v255, s0 ; encoding: [0x00,0x14,0xfe,0x7f]
+0x00,0x14,0xfe,0x7f
+
+# CHECK: v_cvt_f16_f32_e32 v0, s101 ; encoding: [0x65,0x14,0x00,0x7e]
+0x65,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x14,0x00,0x7e]
+0x66,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x14,0x00,0x7e]
+0x67,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x14,0x00,0x7e]
+0x6a,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x14,0x00,0x7e]
+0x6b,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, tba_lo ; encoding: [0x6c,0x14,0x00,0x7e]
+0x6c,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, tba_hi ; encoding: [0x6d,0x14,0x00,0x7e]
+0x6d,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, tma_lo ; encoding: [0x6e,0x14,0x00,0x7e]
+0x6e,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, tma_hi ; encoding: [0x6f,0x14,0x00,0x7e]
+0x6f,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x14,0x00,0x7e]
+0x7b,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, m0 ; encoding: [0x7c,0x14,0x00,0x7e]
+0x7c,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, exec_lo ; encoding: [0x7e,0x14,0x00,0x7e]
+0x7e,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, exec_hi ; encoding: [0x7f,0x14,0x00,0x7e]
+0x7f,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, 0 ; encoding: [0x80,0x14,0x00,0x7e]
+0x80,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, -1 ; encoding: [0xc1,0x14,0x00,0x7e]
+0xc1,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, 0.5 ; encoding: [0xf0,0x14,0x00,0x7e]
+0xf0,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, -4.0 ; encoding: [0xf7,0x14,0x00,0x7e]
+0xf7,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, scc ; encoding: [0xfd,0x14,0x00,0x7e]
+0xfd,0x14,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x14,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x14,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f16_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x14,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x14,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f16_f32_e32 v0, v0 ; encoding: [0x00,0x15,0x00,0x7e]
+0x00,0x15,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e32 v0, v255 ; encoding: [0xff,0x15,0x00,0x7e]
+0xff,0x15,0x00,0x7e
+
+# CHECK: v_cvt_f16_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x4a,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x4a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x4a,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x4a,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x4a,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x4a,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x4a,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x4a,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x4a,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x4a,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x4a,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x4a,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x4a,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x4a,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x4a,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x4a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x4a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x4a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x4a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, scc ; encoding: [0x00,0x00,0x4a,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x4a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x4a,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4a,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_f16_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x4a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x4a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x4a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cvt_f16_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cvt_f16_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x4a,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cvt_f32_f16_e32 v0, s0 ; encoding: [0x00,0x16,0x00,0x7e]
+0x00,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v255, s0 ; encoding: [0x00,0x16,0xfe,0x7f]
+0x00,0x16,0xfe,0x7f
+
+# CHECK: v_cvt_f32_f16_e32 v0, s101 ; encoding: [0x65,0x16,0x00,0x7e]
+0x65,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x16,0x00,0x7e]
+0x66,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x16,0x00,0x7e]
+0x67,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x16,0x00,0x7e]
+0x6a,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x16,0x00,0x7e]
+0x6b,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, tba_lo ; encoding: [0x6c,0x16,0x00,0x7e]
+0x6c,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, tba_hi ; encoding: [0x6d,0x16,0x00,0x7e]
+0x6d,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, tma_lo ; encoding: [0x6e,0x16,0x00,0x7e]
+0x6e,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, tma_hi ; encoding: [0x6f,0x16,0x00,0x7e]
+0x6f,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x16,0x00,0x7e]
+0x7b,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, m0 ; encoding: [0x7c,0x16,0x00,0x7e]
+0x7c,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, exec_lo ; encoding: [0x7e,0x16,0x00,0x7e]
+0x7e,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, exec_hi ; encoding: [0x7f,0x16,0x00,0x7e]
+0x7f,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, 0 ; encoding: [0x80,0x16,0x00,0x7e]
+0x80,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, -1 ; encoding: [0xc1,0x16,0x00,0x7e]
+0xc1,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, 0.5 ; encoding: [0xf0,0x16,0x00,0x7e]
+0xf0,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, -4.0 ; encoding: [0xf7,0x16,0x00,0x7e]
+0xf7,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, scc ; encoding: [0xfd,0x16,0x00,0x7e]
+0xfd,0x16,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x16,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x16,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e32 v0, 0x3456 ; encoding: [0xff,0x16,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x16,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e32 v0, v0 ; encoding: [0x00,0x17,0x00,0x7e]
+0x00,0x17,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e32 v0, v255 ; encoding: [0xff,0x17,0x00,0x7e]
+0xff,0x17,0x00,0x7e
+
+# CHECK: v_cvt_f32_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x4b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x4b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x4b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x4b,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x4b,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x4b,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x4b,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x4b,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x4b,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x4b,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x4b,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x4b,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x4b,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x4b,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x4b,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x4b,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x4b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x4b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x4b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x4b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, scc ; encoding: [0x00,0x00,0x4b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x4b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x4b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, s0 ; encoding: [0x00,0x18,0x00,0x7e]
+0x00,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v255, s0 ; encoding: [0x00,0x18,0xfe,0x7f]
+0x00,0x18,0xfe,0x7f
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, s101 ; encoding: [0x65,0x18,0x00,0x7e]
+0x65,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x18,0x00,0x7e]
+0x66,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x18,0x00,0x7e]
+0x67,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x18,0x00,0x7e]
+0x6a,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x18,0x00,0x7e]
+0x6b,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, tba_lo ; encoding: [0x6c,0x18,0x00,0x7e]
+0x6c,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, tba_hi ; encoding: [0x6d,0x18,0x00,0x7e]
+0x6d,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, tma_lo ; encoding: [0x6e,0x18,0x00,0x7e]
+0x6e,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, tma_hi ; encoding: [0x6f,0x18,0x00,0x7e]
+0x6f,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x18,0x00,0x7e]
+0x7b,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, m0 ; encoding: [0x7c,0x18,0x00,0x7e]
+0x7c,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, exec_lo ; encoding: [0x7e,0x18,0x00,0x7e]
+0x7e,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, exec_hi ; encoding: [0x7f,0x18,0x00,0x7e]
+0x7f,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, 0 ; encoding: [0x80,0x18,0x00,0x7e]
+0x80,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, -1 ; encoding: [0xc1,0x18,0x00,0x7e]
+0xc1,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, 0.5 ; encoding: [0xf0,0x18,0x00,0x7e]
+0xf0,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, -4.0 ; encoding: [0xf7,0x18,0x00,0x7e]
+0xf7,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, scc ; encoding: [0xfd,0x18,0x00,0x7e]
+0xfd,0x18,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x18,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x18,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x18,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x18,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, v0 ; encoding: [0x00,0x19,0x00,0x7e]
+0x00,0x19,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e32 v0, v255 ; encoding: [0xff,0x19,0x00,0x7e]
+0xff,0x19,0x00,0x7e
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x4c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x4c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x4c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x4c,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x4c,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x4c,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x4c,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x4c,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x4c,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x4c,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x4c,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x4c,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x4c,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x4c,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x4c,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x4c,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x4c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x4c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x4c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x4c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, scc ; encoding: [0x00,0x00,0x4c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x4c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x4c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x4c,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4c,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x4c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x4c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x4c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, s0 ; encoding: [0x00,0x1a,0x00,0x7e]
+0x00,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v255, s0 ; encoding: [0x00,0x1a,0xfe,0x7f]
+0x00,0x1a,0xfe,0x7f
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, s101 ; encoding: [0x65,0x1a,0x00,0x7e]
+0x65,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x1a,0x00,0x7e]
+0x66,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x1a,0x00,0x7e]
+0x67,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x1a,0x00,0x7e]
+0x6a,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x1a,0x00,0x7e]
+0x6b,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, tba_lo ; encoding: [0x6c,0x1a,0x00,0x7e]
+0x6c,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, tba_hi ; encoding: [0x6d,0x1a,0x00,0x7e]
+0x6d,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, tma_lo ; encoding: [0x6e,0x1a,0x00,0x7e]
+0x6e,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, tma_hi ; encoding: [0x6f,0x1a,0x00,0x7e]
+0x6f,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x1a,0x00,0x7e]
+0x7b,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, m0 ; encoding: [0x7c,0x1a,0x00,0x7e]
+0x7c,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, exec_lo ; encoding: [0x7e,0x1a,0x00,0x7e]
+0x7e,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, exec_hi ; encoding: [0x7f,0x1a,0x00,0x7e]
+0x7f,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, 0 ; encoding: [0x80,0x1a,0x00,0x7e]
+0x80,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, -1 ; encoding: [0xc1,0x1a,0x00,0x7e]
+0xc1,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, 0.5 ; encoding: [0xf0,0x1a,0x00,0x7e]
+0xf0,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, -4.0 ; encoding: [0xf7,0x1a,0x00,0x7e]
+0xf7,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, scc ; encoding: [0xfd,0x1a,0x00,0x7e]
+0xfd,0x1a,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x1a,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x1a,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x1a,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x1a,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, v0 ; encoding: [0x00,0x1b,0x00,0x7e]
+0x00,0x1b,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e32 v0, v255 ; encoding: [0xff,0x1b,0x00,0x7e]
+0xff,0x1b,0x00,0x7e
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x4d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x4d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x4d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x4d,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x4d,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x4d,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x4d,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x4d,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x4d,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x4d,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x4d,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x4d,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x4d,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x4d,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x4d,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x4d,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x4d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x4d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x4d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x4d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, scc ; encoding: [0x00,0x00,0x4d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x4d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x4d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x4d,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4d,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x4d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x4d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x4d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, s0 ; encoding: [0x00,0x1c,0x00,0x7e]
+0x00,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v255, s0 ; encoding: [0x00,0x1c,0xfe,0x7f]
+0x00,0x1c,0xfe,0x7f
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, s101 ; encoding: [0x65,0x1c,0x00,0x7e]
+0x65,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, flat_scratch_lo ; encoding: [0x66,0x1c,0x00,0x7e]
+0x66,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, flat_scratch_hi ; encoding: [0x67,0x1c,0x00,0x7e]
+0x67,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, vcc_lo ; encoding: [0x6a,0x1c,0x00,0x7e]
+0x6a,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, vcc_hi ; encoding: [0x6b,0x1c,0x00,0x7e]
+0x6b,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, tba_lo ; encoding: [0x6c,0x1c,0x00,0x7e]
+0x6c,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, tba_hi ; encoding: [0x6d,0x1c,0x00,0x7e]
+0x6d,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, tma_lo ; encoding: [0x6e,0x1c,0x00,0x7e]
+0x6e,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, tma_hi ; encoding: [0x6f,0x1c,0x00,0x7e]
+0x6f,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, ttmp11 ; encoding: [0x7b,0x1c,0x00,0x7e]
+0x7b,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, m0 ; encoding: [0x7c,0x1c,0x00,0x7e]
+0x7c,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, exec_lo ; encoding: [0x7e,0x1c,0x00,0x7e]
+0x7e,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, exec_hi ; encoding: [0x7f,0x1c,0x00,0x7e]
+0x7f,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, 0 ; encoding: [0x80,0x1c,0x00,0x7e]
+0x80,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, -1 ; encoding: [0xc1,0x1c,0x00,0x7e]
+0xc1,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, 0.5 ; encoding: [0xf0,0x1c,0x00,0x7e]
+0xf0,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, -4.0 ; encoding: [0xf7,0x1c,0x00,0x7e]
+0xf7,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, scc ; encoding: [0xfd,0x1c,0x00,0x7e]
+0xfd,0x1c,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, 0x4f ; encoding: [0xff,0x1c,0x00,0x7e,0x4f,0x00,0x00,0x00]
+0xff,0x1c,0x00,0x7e,0x4f,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, 0x41 ; encoding: [0xff,0x1c,0x00,0x7e,0x41,0x00,0x00,0x00]
+0xff,0x1c,0x00,0x7e,0x41,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, v0 ; encoding: [0x00,0x1d,0x00,0x7e]
+0x00,0x1d,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e32 v0, v255 ; encoding: [0xff,0x1d,0x00,0x7e]
+0xff,0x1d,0x00,0x7e
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, s0 ; encoding: [0x00,0x00,0x4e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v255, s0 ; encoding: [0xff,0x00,0x4e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x4e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, s101 ; encoding: [0x00,0x00,0x4e,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x4e,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x4e,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x4e,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x4e,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, tba_lo ; encoding: [0x00,0x00,0x4e,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, tba_hi ; encoding: [0x00,0x00,0x4e,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, tma_lo ; encoding: [0x00,0x00,0x4e,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, tma_hi ; encoding: [0x00,0x00,0x4e,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x4e,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, m0 ; encoding: [0x00,0x00,0x4e,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, exec_lo ; encoding: [0x00,0x00,0x4e,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, exec_hi ; encoding: [0x00,0x00,0x4e,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, 0 ; encoding: [0x00,0x00,0x4e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, -1 ; encoding: [0x00,0x00,0x4e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, 0.5 ; encoding: [0x00,0x00,0x4e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, -4.0 ; encoding: [0x00,0x00,0x4e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, scc ; encoding: [0x00,0x00,0x4e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, v0 ; encoding: [0x00,0x00,0x4e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_e64 v0, v255 ; encoding: [0x00,0x00,0x4e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e32 v0, s[0:1] ; encoding: [0x00,0x1e,0x00,0x7e]
+0x00,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v255, s[0:1] ; encoding: [0x00,0x1e,0xfe,0x7f]
+0x00,0x1e,0xfe,0x7f
+
+# CHECK: v_cvt_f32_f64_e32 v0, s[2:3] ; encoding: [0x02,0x1e,0x00,0x7e]
+0x02,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, s[100:101] ; encoding: [0x64,0x1e,0x00,0x7e]
+0x64,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, flat_scratch ; encoding: [0x66,0x1e,0x00,0x7e]
+0x66,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, vcc ; encoding: [0x6a,0x1e,0x00,0x7e]
+0x6a,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, tba ; encoding: [0x6c,0x1e,0x00,0x7e]
+0x6c,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, tma ; encoding: [0x6e,0x1e,0x00,0x7e]
+0x6e,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, ttmp[10:11] ; encoding: [0x7a,0x1e,0x00,0x7e]
+0x7a,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, exec ; encoding: [0x7e,0x1e,0x00,0x7e]
+0x7e,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, 0 ; encoding: [0x80,0x1e,0x00,0x7e]
+0x80,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, -1 ; encoding: [0xc1,0x1e,0x00,0x7e]
+0xc1,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, 0.5 ; encoding: [0xf0,0x1e,0x00,0x7e]
+0xf0,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, -4.0 ; encoding: [0xf7,0x1e,0x00,0x7e]
+0xf7,0x1e,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, 0xaf123456 ; encoding: [0xff,0x1e,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x1e,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_f64_e32 v0, 0x3f717273 ; encoding: [0xff,0x1e,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x1e,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_f64_e32 v0, v[0:1] ; encoding: [0x00,0x1f,0x00,0x7e]
+0x00,0x1f,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e32 v0, v[254:255] ; encoding: [0xfe,0x1f,0x00,0x7e]
+0xfe,0x1f,0x00,0x7e
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[0:1] ; encoding: [0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v255, s[0:1] ; encoding: [0xff,0x00,0x4f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x4f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[2:3] ; encoding: [0x00,0x00,0x4f,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[100:101] ; encoding: [0x00,0x00,0x4f,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, flat_scratch ; encoding: [0x00,0x00,0x4f,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, vcc ; encoding: [0x00,0x00,0x4f,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, tba ; encoding: [0x00,0x00,0x4f,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, tma ; encoding: [0x00,0x00,0x4f,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, ttmp[10:11] ; encoding: [0x00,0x00,0x4f,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, exec ; encoding: [0x00,0x00,0x4f,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, 0 ; encoding: [0x00,0x00,0x4f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, -1 ; encoding: [0x00,0x00,0x4f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, 0.5 ; encoding: [0x00,0x00,0x4f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, -4.0 ; encoding: [0x00,0x00,0x4f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, v[0:1] ; encoding: [0x00,0x00,0x4f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, v[254:255] ; encoding: [0x00,0x00,0x4f,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x4f,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, -s[0:1] ; encoding: [0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_f32_f64_e64 v0, |s[0:1]| ; encoding: [0x00,0x01,0x4f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x4f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[0:1] clamp ; encoding: [0x00,0x80,0x4f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[0:1] mul:2 ; encoding: [0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[0:1] mul:4 ; encoding: [0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cvt_f32_f64_e64 v0, s[0:1] div:2 ; encoding: [0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x4f,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], s0 ; encoding: [0x00,0x20,0x00,0x7e]
+0x00,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[254:255], s0 ; encoding: [0x00,0x20,0xfc,0x7f]
+0x00,0x20,0xfc,0x7f
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], s101 ; encoding: [0x65,0x20,0x00,0x7e]
+0x65,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], flat_scratch_lo ; encoding: [0x66,0x20,0x00,0x7e]
+0x66,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], flat_scratch_hi ; encoding: [0x67,0x20,0x00,0x7e]
+0x67,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], vcc_lo ; encoding: [0x6a,0x20,0x00,0x7e]
+0x6a,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], vcc_hi ; encoding: [0x6b,0x20,0x00,0x7e]
+0x6b,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], tba_lo ; encoding: [0x6c,0x20,0x00,0x7e]
+0x6c,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], tba_hi ; encoding: [0x6d,0x20,0x00,0x7e]
+0x6d,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], tma_lo ; encoding: [0x6e,0x20,0x00,0x7e]
+0x6e,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], tma_hi ; encoding: [0x6f,0x20,0x00,0x7e]
+0x6f,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], ttmp11 ; encoding: [0x7b,0x20,0x00,0x7e]
+0x7b,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], m0 ; encoding: [0x7c,0x20,0x00,0x7e]
+0x7c,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], exec_lo ; encoding: [0x7e,0x20,0x00,0x7e]
+0x7e,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], exec_hi ; encoding: [0x7f,0x20,0x00,0x7e]
+0x7f,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], 0 ; encoding: [0x80,0x20,0x00,0x7e]
+0x80,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], -1 ; encoding: [0xc1,0x20,0x00,0x7e]
+0xc1,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], 0.5 ; encoding: [0xf0,0x20,0x00,0x7e]
+0xf0,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], -4.0 ; encoding: [0xf7,0x20,0x00,0x7e]
+0xf7,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], scc ; encoding: [0xfd,0x20,0x00,0x7e]
+0xfd,0x20,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x20,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x20,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x20,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x20,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], v0 ; encoding: [0x00,0x21,0x00,0x7e]
+0x00,0x21,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e32 v[0:1], v255 ; encoding: [0xff,0x21,0x00,0x7e]
+0xff,0x21,0x00,0x7e
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], s0 ; encoding: [0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[254:255], s0 ; encoding: [0xfe,0x00,0x50,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x50,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], s101 ; encoding: [0x00,0x00,0x50,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x50,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x50,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], vcc_lo ; encoding: [0x00,0x00,0x50,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], vcc_hi ; encoding: [0x00,0x00,0x50,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], tba_lo ; encoding: [0x00,0x00,0x50,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], tba_hi ; encoding: [0x00,0x00,0x50,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], tma_lo ; encoding: [0x00,0x00,0x50,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], tma_hi ; encoding: [0x00,0x00,0x50,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], ttmp11 ; encoding: [0x00,0x00,0x50,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], m0 ; encoding: [0x00,0x00,0x50,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], exec_lo ; encoding: [0x00,0x00,0x50,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], exec_hi ; encoding: [0x00,0x00,0x50,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x50,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x50,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x50,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x50,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], scc ; encoding: [0x00,0x00,0x50,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], v0 ; encoding: [0x00,0x00,0x50,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x50,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], v255 ; encoding: [0x00,0x00,0x50,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x50,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], -s0 ; encoding: [0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], |s0| ; encoding: [0x00,0x01,0x50,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x50,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], s0 clamp ; encoding: [0x00,0x80,0x50,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x50,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], s0 mul:2 ; encoding: [0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], s0 mul:4 ; encoding: [0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cvt_f64_f32_e64 v[0:1], s0 div:2 ; encoding: [0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x50,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, s0 ; encoding: [0x00,0x22,0x00,0x7e]
+0x00,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v255, s0 ; encoding: [0x00,0x22,0xfe,0x7f]
+0x00,0x22,0xfe,0x7f
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, s101 ; encoding: [0x65,0x22,0x00,0x7e]
+0x65,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, flat_scratch_lo ; encoding: [0x66,0x22,0x00,0x7e]
+0x66,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, flat_scratch_hi ; encoding: [0x67,0x22,0x00,0x7e]
+0x67,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, vcc_lo ; encoding: [0x6a,0x22,0x00,0x7e]
+0x6a,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, vcc_hi ; encoding: [0x6b,0x22,0x00,0x7e]
+0x6b,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, tba_lo ; encoding: [0x6c,0x22,0x00,0x7e]
+0x6c,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, tba_hi ; encoding: [0x6d,0x22,0x00,0x7e]
+0x6d,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, tma_lo ; encoding: [0x6e,0x22,0x00,0x7e]
+0x6e,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, tma_hi ; encoding: [0x6f,0x22,0x00,0x7e]
+0x6f,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, ttmp11 ; encoding: [0x7b,0x22,0x00,0x7e]
+0x7b,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, m0 ; encoding: [0x7c,0x22,0x00,0x7e]
+0x7c,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, exec_lo ; encoding: [0x7e,0x22,0x00,0x7e]
+0x7e,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, exec_hi ; encoding: [0x7f,0x22,0x00,0x7e]
+0x7f,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, 0 ; encoding: [0x80,0x22,0x00,0x7e]
+0x80,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, -1 ; encoding: [0xc1,0x22,0x00,0x7e]
+0xc1,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, 0.5 ; encoding: [0xf0,0x22,0x00,0x7e]
+0xf0,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, -4.0 ; encoding: [0xf7,0x22,0x00,0x7e]
+0xf7,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, scc ; encoding: [0xfd,0x22,0x00,0x7e]
+0xfd,0x22,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, 0xaf123456 ; encoding: [0xff,0x22,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x22,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, 0x3f717273 ; encoding: [0xff,0x22,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x22,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, v0 ; encoding: [0x00,0x23,0x00,0x7e]
+0x00,0x23,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e32 v0, v255 ; encoding: [0xff,0x23,0x00,0x7e]
+0xff,0x23,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, s0 ; encoding: [0x00,0x00,0x51,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v255, s0 ; encoding: [0xff,0x00,0x51,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x51,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, s101 ; encoding: [0x00,0x00,0x51,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x51,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x51,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x51,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x51,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, tba_lo ; encoding: [0x00,0x00,0x51,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, tba_hi ; encoding: [0x00,0x00,0x51,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, tma_lo ; encoding: [0x00,0x00,0x51,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, tma_hi ; encoding: [0x00,0x00,0x51,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x51,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, m0 ; encoding: [0x00,0x00,0x51,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, exec_lo ; encoding: [0x00,0x00,0x51,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, exec_hi ; encoding: [0x00,0x00,0x51,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, 0 ; encoding: [0x00,0x00,0x51,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, -1 ; encoding: [0x00,0x00,0x51,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, 0.5 ; encoding: [0x00,0x00,0x51,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, -4.0 ; encoding: [0x00,0x00,0x51,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, scc ; encoding: [0x00,0x00,0x51,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, v0 ; encoding: [0x00,0x00,0x51,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x51,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_e64 v0, v255 ; encoding: [0x00,0x00,0x51,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x51,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, s0 ; encoding: [0x00,0x24,0x00,0x7e]
+0x00,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v255, s0 ; encoding: [0x00,0x24,0xfe,0x7f]
+0x00,0x24,0xfe,0x7f
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, s101 ; encoding: [0x65,0x24,0x00,0x7e]
+0x65,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, flat_scratch_lo ; encoding: [0x66,0x24,0x00,0x7e]
+0x66,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, flat_scratch_hi ; encoding: [0x67,0x24,0x00,0x7e]
+0x67,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, vcc_lo ; encoding: [0x6a,0x24,0x00,0x7e]
+0x6a,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, vcc_hi ; encoding: [0x6b,0x24,0x00,0x7e]
+0x6b,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, tba_lo ; encoding: [0x6c,0x24,0x00,0x7e]
+0x6c,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, tba_hi ; encoding: [0x6d,0x24,0x00,0x7e]
+0x6d,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, tma_lo ; encoding: [0x6e,0x24,0x00,0x7e]
+0x6e,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, tma_hi ; encoding: [0x6f,0x24,0x00,0x7e]
+0x6f,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, ttmp11 ; encoding: [0x7b,0x24,0x00,0x7e]
+0x7b,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, m0 ; encoding: [0x7c,0x24,0x00,0x7e]
+0x7c,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, exec_lo ; encoding: [0x7e,0x24,0x00,0x7e]
+0x7e,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, exec_hi ; encoding: [0x7f,0x24,0x00,0x7e]
+0x7f,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, 0 ; encoding: [0x80,0x24,0x00,0x7e]
+0x80,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, -1 ; encoding: [0xc1,0x24,0x00,0x7e]
+0xc1,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, 0.5 ; encoding: [0xf0,0x24,0x00,0x7e]
+0xf0,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, -4.0 ; encoding: [0xf7,0x24,0x00,0x7e]
+0xf7,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, scc ; encoding: [0xfd,0x24,0x00,0x7e]
+0xfd,0x24,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, 0xaf123456 ; encoding: [0xff,0x24,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x24,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, 0x3f717273 ; encoding: [0xff,0x24,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x24,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, v0 ; encoding: [0x00,0x25,0x00,0x7e]
+0x00,0x25,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e32 v0, v255 ; encoding: [0xff,0x25,0x00,0x7e]
+0xff,0x25,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, s0 ; encoding: [0x00,0x00,0x52,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v255, s0 ; encoding: [0xff,0x00,0x52,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x52,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, s101 ; encoding: [0x00,0x00,0x52,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x52,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x52,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x52,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x52,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, tba_lo ; encoding: [0x00,0x00,0x52,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, tba_hi ; encoding: [0x00,0x00,0x52,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, tma_lo ; encoding: [0x00,0x00,0x52,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, tma_hi ; encoding: [0x00,0x00,0x52,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x52,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, m0 ; encoding: [0x00,0x00,0x52,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, exec_lo ; encoding: [0x00,0x00,0x52,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, exec_hi ; encoding: [0x00,0x00,0x52,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, 0 ; encoding: [0x00,0x00,0x52,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, -1 ; encoding: [0x00,0x00,0x52,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, 0.5 ; encoding: [0x00,0x00,0x52,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, -4.0 ; encoding: [0x00,0x00,0x52,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, scc ; encoding: [0x00,0x00,0x52,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, v0 ; encoding: [0x00,0x00,0x52,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x52,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_e64 v0, v255 ; encoding: [0x00,0x00,0x52,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x52,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, s0 ; encoding: [0x00,0x26,0x00,0x7e]
+0x00,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v255, s0 ; encoding: [0x00,0x26,0xfe,0x7f]
+0x00,0x26,0xfe,0x7f
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, s101 ; encoding: [0x65,0x26,0x00,0x7e]
+0x65,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, flat_scratch_lo ; encoding: [0x66,0x26,0x00,0x7e]
+0x66,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, flat_scratch_hi ; encoding: [0x67,0x26,0x00,0x7e]
+0x67,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, vcc_lo ; encoding: [0x6a,0x26,0x00,0x7e]
+0x6a,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, vcc_hi ; encoding: [0x6b,0x26,0x00,0x7e]
+0x6b,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, tba_lo ; encoding: [0x6c,0x26,0x00,0x7e]
+0x6c,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, tba_hi ; encoding: [0x6d,0x26,0x00,0x7e]
+0x6d,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, tma_lo ; encoding: [0x6e,0x26,0x00,0x7e]
+0x6e,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, tma_hi ; encoding: [0x6f,0x26,0x00,0x7e]
+0x6f,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, ttmp11 ; encoding: [0x7b,0x26,0x00,0x7e]
+0x7b,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, m0 ; encoding: [0x7c,0x26,0x00,0x7e]
+0x7c,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, exec_lo ; encoding: [0x7e,0x26,0x00,0x7e]
+0x7e,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, exec_hi ; encoding: [0x7f,0x26,0x00,0x7e]
+0x7f,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, 0 ; encoding: [0x80,0x26,0x00,0x7e]
+0x80,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, -1 ; encoding: [0xc1,0x26,0x00,0x7e]
+0xc1,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, 0.5 ; encoding: [0xf0,0x26,0x00,0x7e]
+0xf0,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, -4.0 ; encoding: [0xf7,0x26,0x00,0x7e]
+0xf7,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, scc ; encoding: [0xfd,0x26,0x00,0x7e]
+0xfd,0x26,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, 0xaf123456 ; encoding: [0xff,0x26,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x26,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, 0x3f717273 ; encoding: [0xff,0x26,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x26,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, v0 ; encoding: [0x00,0x27,0x00,0x7e]
+0x00,0x27,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e32 v0, v255 ; encoding: [0xff,0x27,0x00,0x7e]
+0xff,0x27,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, s0 ; encoding: [0x00,0x00,0x53,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v255, s0 ; encoding: [0xff,0x00,0x53,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x53,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, s101 ; encoding: [0x00,0x00,0x53,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x53,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x53,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x53,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x53,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, tba_lo ; encoding: [0x00,0x00,0x53,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, tba_hi ; encoding: [0x00,0x00,0x53,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, tma_lo ; encoding: [0x00,0x00,0x53,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, tma_hi ; encoding: [0x00,0x00,0x53,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x53,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, m0 ; encoding: [0x00,0x00,0x53,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, exec_lo ; encoding: [0x00,0x00,0x53,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, exec_hi ; encoding: [0x00,0x00,0x53,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, 0 ; encoding: [0x00,0x00,0x53,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, -1 ; encoding: [0x00,0x00,0x53,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, 0.5 ; encoding: [0x00,0x00,0x53,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, -4.0 ; encoding: [0x00,0x00,0x53,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, scc ; encoding: [0x00,0x00,0x53,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, v0 ; encoding: [0x00,0x00,0x53,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x53,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_e64 v0, v255 ; encoding: [0x00,0x00,0x53,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x53,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, s0 ; encoding: [0x00,0x28,0x00,0x7e]
+0x00,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v255, s0 ; encoding: [0x00,0x28,0xfe,0x7f]
+0x00,0x28,0xfe,0x7f
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, s101 ; encoding: [0x65,0x28,0x00,0x7e]
+0x65,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, flat_scratch_lo ; encoding: [0x66,0x28,0x00,0x7e]
+0x66,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, flat_scratch_hi ; encoding: [0x67,0x28,0x00,0x7e]
+0x67,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, vcc_lo ; encoding: [0x6a,0x28,0x00,0x7e]
+0x6a,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, vcc_hi ; encoding: [0x6b,0x28,0x00,0x7e]
+0x6b,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, tba_lo ; encoding: [0x6c,0x28,0x00,0x7e]
+0x6c,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, tba_hi ; encoding: [0x6d,0x28,0x00,0x7e]
+0x6d,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, tma_lo ; encoding: [0x6e,0x28,0x00,0x7e]
+0x6e,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, tma_hi ; encoding: [0x6f,0x28,0x00,0x7e]
+0x6f,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, ttmp11 ; encoding: [0x7b,0x28,0x00,0x7e]
+0x7b,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, m0 ; encoding: [0x7c,0x28,0x00,0x7e]
+0x7c,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, exec_lo ; encoding: [0x7e,0x28,0x00,0x7e]
+0x7e,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, exec_hi ; encoding: [0x7f,0x28,0x00,0x7e]
+0x7f,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, 0 ; encoding: [0x80,0x28,0x00,0x7e]
+0x80,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, -1 ; encoding: [0xc1,0x28,0x00,0x7e]
+0xc1,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, 0.5 ; encoding: [0xf0,0x28,0x00,0x7e]
+0xf0,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, -4.0 ; encoding: [0xf7,0x28,0x00,0x7e]
+0xf7,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, scc ; encoding: [0xfd,0x28,0x00,0x7e]
+0xfd,0x28,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, 0xaf123456 ; encoding: [0xff,0x28,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x28,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, 0x3f717273 ; encoding: [0xff,0x28,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x28,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, v0 ; encoding: [0x00,0x29,0x00,0x7e]
+0x00,0x29,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e32 v0, v255 ; encoding: [0xff,0x29,0x00,0x7e]
+0xff,0x29,0x00,0x7e
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, s0 ; encoding: [0x00,0x00,0x54,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v255, s0 ; encoding: [0xff,0x00,0x54,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x54,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, s101 ; encoding: [0x00,0x00,0x54,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x54,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x54,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x54,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x54,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, tba_lo ; encoding: [0x00,0x00,0x54,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, tba_hi ; encoding: [0x00,0x00,0x54,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, tma_lo ; encoding: [0x00,0x00,0x54,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, tma_hi ; encoding: [0x00,0x00,0x54,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x54,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, m0 ; encoding: [0x00,0x00,0x54,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, exec_lo ; encoding: [0x00,0x00,0x54,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, exec_hi ; encoding: [0x00,0x00,0x54,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, 0 ; encoding: [0x00,0x00,0x54,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, -1 ; encoding: [0x00,0x00,0x54,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, 0.5 ; encoding: [0x00,0x00,0x54,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, -4.0 ; encoding: [0x00,0x00,0x54,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, scc ; encoding: [0x00,0x00,0x54,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, v0 ; encoding: [0x00,0x00,0x54,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x54,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_e64 v0, v255 ; encoding: [0x00,0x00,0x54,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x54,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e32 v0, s[0:1] ; encoding: [0x00,0x2a,0x00,0x7e]
+0x00,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v255, s[0:1] ; encoding: [0x00,0x2a,0xfe,0x7f]
+0x00,0x2a,0xfe,0x7f
+
+# CHECK: v_cvt_u32_f64_e32 v0, s[2:3] ; encoding: [0x02,0x2a,0x00,0x7e]
+0x02,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, s[100:101] ; encoding: [0x64,0x2a,0x00,0x7e]
+0x64,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, flat_scratch ; encoding: [0x66,0x2a,0x00,0x7e]
+0x66,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, vcc ; encoding: [0x6a,0x2a,0x00,0x7e]
+0x6a,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, tba ; encoding: [0x6c,0x2a,0x00,0x7e]
+0x6c,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, tma ; encoding: [0x6e,0x2a,0x00,0x7e]
+0x6e,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, ttmp[10:11] ; encoding: [0x7a,0x2a,0x00,0x7e]
+0x7a,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, exec ; encoding: [0x7e,0x2a,0x00,0x7e]
+0x7e,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, 0 ; encoding: [0x80,0x2a,0x00,0x7e]
+0x80,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, -1 ; encoding: [0xc1,0x2a,0x00,0x7e]
+0xc1,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, 0.5 ; encoding: [0xf0,0x2a,0x00,0x7e]
+0xf0,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, -4.0 ; encoding: [0xf7,0x2a,0x00,0x7e]
+0xf7,0x2a,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, 0xaf123456 ; encoding: [0xff,0x2a,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x2a,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_u32_f64_e32 v0, 0x3f717273 ; encoding: [0xff,0x2a,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x2a,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_u32_f64_e32 v0, v[0:1] ; encoding: [0x00,0x2b,0x00,0x7e]
+0x00,0x2b,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e32 v0, v[254:255] ; encoding: [0xfe,0x2b,0x00,0x7e]
+0xfe,0x2b,0x00,0x7e
+
+# CHECK: v_cvt_u32_f64_e64 v0, s[0:1] ; encoding: [0x00,0x00,0x55,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v255, s[0:1] ; encoding: [0xff,0x00,0x55,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x55,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, s[2:3] ; encoding: [0x00,0x00,0x55,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, s[100:101] ; encoding: [0x00,0x00,0x55,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, flat_scratch ; encoding: [0x00,0x00,0x55,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, vcc ; encoding: [0x00,0x00,0x55,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, tba ; encoding: [0x00,0x00,0x55,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, tma ; encoding: [0x00,0x00,0x55,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, ttmp[10:11] ; encoding: [0x00,0x00,0x55,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, exec ; encoding: [0x00,0x00,0x55,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, 0 ; encoding: [0x00,0x00,0x55,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, -1 ; encoding: [0x00,0x00,0x55,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, 0.5 ; encoding: [0x00,0x00,0x55,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, -4.0 ; encoding: [0x00,0x00,0x55,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, v[0:1] ; encoding: [0x00,0x00,0x55,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x55,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, v[254:255] ; encoding: [0x00,0x00,0x55,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x55,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, -s[0:1] ; encoding: [0x00,0x00,0x55,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x55,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_u32_f64_e64 v0, |s[0:1]| ; encoding: [0x00,0x01,0x55,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x55,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u32_f64_e64 v0, s[0:1] clamp ; encoding: [0x00,0x80,0x55,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x55,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], s0 ; encoding: [0x00,0x2c,0x00,0x7e]
+0x00,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[254:255], s0 ; encoding: [0x00,0x2c,0xfc,0x7f]
+0x00,0x2c,0xfc,0x7f
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], s101 ; encoding: [0x65,0x2c,0x00,0x7e]
+0x65,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], flat_scratch_lo ; encoding: [0x66,0x2c,0x00,0x7e]
+0x66,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], flat_scratch_hi ; encoding: [0x67,0x2c,0x00,0x7e]
+0x67,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], vcc_lo ; encoding: [0x6a,0x2c,0x00,0x7e]
+0x6a,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], vcc_hi ; encoding: [0x6b,0x2c,0x00,0x7e]
+0x6b,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], tba_lo ; encoding: [0x6c,0x2c,0x00,0x7e]
+0x6c,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], tba_hi ; encoding: [0x6d,0x2c,0x00,0x7e]
+0x6d,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], tma_lo ; encoding: [0x6e,0x2c,0x00,0x7e]
+0x6e,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], tma_hi ; encoding: [0x6f,0x2c,0x00,0x7e]
+0x6f,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], ttmp11 ; encoding: [0x7b,0x2c,0x00,0x7e]
+0x7b,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], m0 ; encoding: [0x7c,0x2c,0x00,0x7e]
+0x7c,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], exec_lo ; encoding: [0x7e,0x2c,0x00,0x7e]
+0x7e,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], exec_hi ; encoding: [0x7f,0x2c,0x00,0x7e]
+0x7f,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], 0 ; encoding: [0x80,0x2c,0x00,0x7e]
+0x80,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], -1 ; encoding: [0xc1,0x2c,0x00,0x7e]
+0xc1,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], 0.5 ; encoding: [0xf0,0x2c,0x00,0x7e]
+0xf0,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], -4.0 ; encoding: [0xf7,0x2c,0x00,0x7e]
+0xf7,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], scc ; encoding: [0xfd,0x2c,0x00,0x7e]
+0xfd,0x2c,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x2c,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x2c,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x2c,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x2c,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], v0 ; encoding: [0x00,0x2d,0x00,0x7e]
+0x00,0x2d,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e32 v[0:1], v255 ; encoding: [0xff,0x2d,0x00,0x7e]
+0xff,0x2d,0x00,0x7e
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], s0 ; encoding: [0x00,0x00,0x56,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[254:255], s0 ; encoding: [0xfe,0x00,0x56,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x56,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], s101 ; encoding: [0x00,0x00,0x56,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], flat_scratch_lo ; encoding: [0x00,0x00,0x56,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], flat_scratch_hi ; encoding: [0x00,0x00,0x56,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], vcc_lo ; encoding: [0x00,0x00,0x56,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], vcc_hi ; encoding: [0x00,0x00,0x56,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], tba_lo ; encoding: [0x00,0x00,0x56,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], tba_hi ; encoding: [0x00,0x00,0x56,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], tma_lo ; encoding: [0x00,0x00,0x56,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], tma_hi ; encoding: [0x00,0x00,0x56,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], ttmp11 ; encoding: [0x00,0x00,0x56,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], m0 ; encoding: [0x00,0x00,0x56,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], exec_lo ; encoding: [0x00,0x00,0x56,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], exec_hi ; encoding: [0x00,0x00,0x56,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x56,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x56,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x56,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x56,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], scc ; encoding: [0x00,0x00,0x56,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], v0 ; encoding: [0x00,0x00,0x56,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x56,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f64_u32_e64 v[0:1], v255 ; encoding: [0x00,0x00,0x56,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x56,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_trunc_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x2e,0x00,0x7e]
+0x00,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x2e,0xfc,0x7f]
+0x00,0x2e,0xfc,0x7f
+
+# CHECK: v_trunc_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x2e,0x00,0x7e]
+0x02,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x2e,0x00,0x7e]
+0x64,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x2e,0x00,0x7e]
+0x66,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x2e,0x00,0x7e]
+0x6a,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], tba ; encoding: [0x6c,0x2e,0x00,0x7e]
+0x6c,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], tma ; encoding: [0x6e,0x2e,0x00,0x7e]
+0x6e,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x2e,0x00,0x7e]
+0x7a,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], exec ; encoding: [0x7e,0x2e,0x00,0x7e]
+0x7e,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], 0 ; encoding: [0x80,0x2e,0x00,0x7e]
+0x80,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x2e,0x00,0x7e]
+0xc1,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x2e,0x00,0x7e]
+0xf0,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x2e,0x00,0x7e]
+0xf7,0x2e,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x2e,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x2e,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_trunc_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x2e,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x2e,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_trunc_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x2f,0x00,0x7e]
+0x00,0x2f,0x00,0x7e
+
+# CHECK: v_trunc_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x2f,0x00,0x7e]
+0xfe,0x2f,0x00,0x7e
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x57,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x57,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x57,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x57,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x57,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x57,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x57,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x57,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x57,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x57,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x57,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x57,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x57,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x57,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x57,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x57,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x57,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x57,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_trunc_f64_e64 v[0:1], |s[0:1]| ; encoding: [0x00,0x01,0x57,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x57,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x57,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x57,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_trunc_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x57,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_ceil_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x30,0x00,0x7e]
+0x00,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x30,0xfc,0x7f]
+0x00,0x30,0xfc,0x7f
+
+# CHECK: v_ceil_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x30,0x00,0x7e]
+0x02,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x30,0x00,0x7e]
+0x64,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x30,0x00,0x7e]
+0x66,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x30,0x00,0x7e]
+0x6a,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], tba ; encoding: [0x6c,0x30,0x00,0x7e]
+0x6c,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], tma ; encoding: [0x6e,0x30,0x00,0x7e]
+0x6e,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x30,0x00,0x7e]
+0x7a,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], exec ; encoding: [0x7e,0x30,0x00,0x7e]
+0x7e,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], 0 ; encoding: [0x80,0x30,0x00,0x7e]
+0x80,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x30,0x00,0x7e]
+0xc1,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x30,0x00,0x7e]
+0xf0,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x30,0x00,0x7e]
+0xf7,0x30,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x30,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x30,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_ceil_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x30,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x30,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_ceil_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x31,0x00,0x7e]
+0x00,0x31,0x00,0x7e
+
+# CHECK: v_ceil_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x31,0x00,0x7e]
+0xfe,0x31,0x00,0x7e
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x58,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x58,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x58,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x58,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x58,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x58,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x58,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x58,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x58,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x58,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x58,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x58,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x58,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x58,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x58,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x58,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x58,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x58,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_ceil_f64_e64 v[0:1], |s[0:1]| ; encoding: [0x00,0x01,0x58,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x58,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x58,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x58,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_ceil_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x58,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rndne_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x32,0x00,0x7e]
+0x00,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x32,0xfc,0x7f]
+0x00,0x32,0xfc,0x7f
+
+# CHECK: v_rndne_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x32,0x00,0x7e]
+0x02,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x32,0x00,0x7e]
+0x64,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x32,0x00,0x7e]
+0x66,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x32,0x00,0x7e]
+0x6a,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], tba ; encoding: [0x6c,0x32,0x00,0x7e]
+0x6c,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], tma ; encoding: [0x6e,0x32,0x00,0x7e]
+0x6e,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x32,0x00,0x7e]
+0x7a,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], exec ; encoding: [0x7e,0x32,0x00,0x7e]
+0x7e,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], 0 ; encoding: [0x80,0x32,0x00,0x7e]
+0x80,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x32,0x00,0x7e]
+0xc1,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x32,0x00,0x7e]
+0xf0,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x32,0x00,0x7e]
+0xf7,0x32,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x32,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x32,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rndne_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x32,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x32,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rndne_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x33,0x00,0x7e]
+0x00,0x33,0x00,0x7e
+
+# CHECK: v_rndne_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x33,0x00,0x7e]
+0xfe,0x33,0x00,0x7e
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x59,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x59,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x59,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x59,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x59,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x59,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x59,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x59,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x59,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x59,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x59,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x59,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x59,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x59,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x59,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x59,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x59,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x59,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x59,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x59,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rndne_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x59,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_floor_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x34,0x00,0x7e]
+0x00,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x34,0xfc,0x7f]
+0x00,0x34,0xfc,0x7f
+
+# CHECK: v_floor_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x34,0x00,0x7e]
+0x02,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x34,0x00,0x7e]
+0x64,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x34,0x00,0x7e]
+0x66,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x34,0x00,0x7e]
+0x6a,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], tba ; encoding: [0x6c,0x34,0x00,0x7e]
+0x6c,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], tma ; encoding: [0x6e,0x34,0x00,0x7e]
+0x6e,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x34,0x00,0x7e]
+0x7a,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], exec ; encoding: [0x7e,0x34,0x00,0x7e]
+0x7e,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], 0 ; encoding: [0x80,0x34,0x00,0x7e]
+0x80,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x34,0x00,0x7e]
+0xc1,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x34,0x00,0x7e]
+0xf0,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x34,0x00,0x7e]
+0xf7,0x34,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x34,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x34,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_floor_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x34,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x34,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_floor_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x35,0x00,0x7e]
+0x00,0x35,0x00,0x7e
+
+# CHECK: v_floor_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x35,0x00,0x7e]
+0xfe,0x35,0x00,0x7e
+
+# CHECK: v_floor_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x5a,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x5a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x5a,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x5a,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x5a,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x5a,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x5a,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x5a,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x5a,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x5a,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x5a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x5a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x5a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x5a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x5a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x5a,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x5a,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_floor_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x5a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_floor_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_floor_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x5a,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_fract_f32_e32 v0, s0 ; encoding: [0x00,0x36,0x00,0x7e]
+0x00,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v255, s0 ; encoding: [0x00,0x36,0xfe,0x7f]
+0x00,0x36,0xfe,0x7f
+
+# CHECK: v_fract_f32_e32 v0, s101 ; encoding: [0x65,0x36,0x00,0x7e]
+0x65,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x36,0x00,0x7e]
+0x66,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x36,0x00,0x7e]
+0x67,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x36,0x00,0x7e]
+0x6a,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x36,0x00,0x7e]
+0x6b,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, tba_lo ; encoding: [0x6c,0x36,0x00,0x7e]
+0x6c,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, tba_hi ; encoding: [0x6d,0x36,0x00,0x7e]
+0x6d,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, tma_lo ; encoding: [0x6e,0x36,0x00,0x7e]
+0x6e,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, tma_hi ; encoding: [0x6f,0x36,0x00,0x7e]
+0x6f,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x36,0x00,0x7e]
+0x7b,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, m0 ; encoding: [0x7c,0x36,0x00,0x7e]
+0x7c,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, exec_lo ; encoding: [0x7e,0x36,0x00,0x7e]
+0x7e,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, exec_hi ; encoding: [0x7f,0x36,0x00,0x7e]
+0x7f,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, 0 ; encoding: [0x80,0x36,0x00,0x7e]
+0x80,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, -1 ; encoding: [0xc1,0x36,0x00,0x7e]
+0xc1,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, 0.5 ; encoding: [0xf0,0x36,0x00,0x7e]
+0xf0,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, -4.0 ; encoding: [0xf7,0x36,0x00,0x7e]
+0xf7,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, scc ; encoding: [0xfd,0x36,0x00,0x7e]
+0xfd,0x36,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x36,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x36,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_fract_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x36,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x36,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_fract_f32_e32 v0, v0 ; encoding: [0x00,0x37,0x00,0x7e]
+0x00,0x37,0x00,0x7e
+
+# CHECK: v_fract_f32_e32 v0, v255 ; encoding: [0xff,0x37,0x00,0x7e]
+0xff,0x37,0x00,0x7e
+
+# CHECK: v_fract_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x5b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x5b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x5b,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x5b,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x5b,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x5b,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x5b,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x5b,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x5b,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x5b,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x5b,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x5b,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x5b,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x5b,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x5b,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x5b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x5b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x5b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, scc ; encoding: [0x00,0x00,0x5b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x5b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x5b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_fract_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x5b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_fract_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_fract_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x5b,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_trunc_f32_e32 v0, s0 ; encoding: [0x00,0x38,0x00,0x7e]
+0x00,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v255, s0 ; encoding: [0x00,0x38,0xfe,0x7f]
+0x00,0x38,0xfe,0x7f
+
+# CHECK: v_trunc_f32_e32 v0, s101 ; encoding: [0x65,0x38,0x00,0x7e]
+0x65,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x38,0x00,0x7e]
+0x66,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x38,0x00,0x7e]
+0x67,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x38,0x00,0x7e]
+0x6a,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x38,0x00,0x7e]
+0x6b,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, tba_lo ; encoding: [0x6c,0x38,0x00,0x7e]
+0x6c,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, tba_hi ; encoding: [0x6d,0x38,0x00,0x7e]
+0x6d,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, tma_lo ; encoding: [0x6e,0x38,0x00,0x7e]
+0x6e,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, tma_hi ; encoding: [0x6f,0x38,0x00,0x7e]
+0x6f,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x38,0x00,0x7e]
+0x7b,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, m0 ; encoding: [0x7c,0x38,0x00,0x7e]
+0x7c,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, exec_lo ; encoding: [0x7e,0x38,0x00,0x7e]
+0x7e,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, exec_hi ; encoding: [0x7f,0x38,0x00,0x7e]
+0x7f,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x38,0x00,0x7e]
+0x80,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x38,0x00,0x7e]
+0xc1,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x38,0x00,0x7e]
+0xf0,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, -4.0 ; encoding: [0xf7,0x38,0x00,0x7e]
+0xf7,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, scc ; encoding: [0xfd,0x38,0x00,0x7e]
+0xfd,0x38,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x38,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x38,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_trunc_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x38,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x38,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_trunc_f32_e32 v0, v0 ; encoding: [0x00,0x39,0x00,0x7e]
+0x00,0x39,0x00,0x7e
+
+# CHECK: v_trunc_f32_e32 v0, v255 ; encoding: [0xff,0x39,0x00,0x7e]
+0xff,0x39,0x00,0x7e
+
+# CHECK: v_trunc_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x5c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x5c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x5c,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x5c,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x5c,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x5c,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x5c,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x5c,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x5c,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x5c,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x5c,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x5c,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x5c,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x5c,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x5c,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x5c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x5c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x5c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, scc ; encoding: [0x00,0x00,0x5c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x5c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x5c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_trunc_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x5c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_trunc_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_trunc_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x5c,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_ceil_f32_e32 v0, s0 ; encoding: [0x00,0x3a,0x00,0x7e]
+0x00,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v255, s0 ; encoding: [0x00,0x3a,0xfe,0x7f]
+0x00,0x3a,0xfe,0x7f
+
+# CHECK: v_ceil_f32_e32 v0, s101 ; encoding: [0x65,0x3a,0x00,0x7e]
+0x65,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x3a,0x00,0x7e]
+0x66,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x3a,0x00,0x7e]
+0x67,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x3a,0x00,0x7e]
+0x6a,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x3a,0x00,0x7e]
+0x6b,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, tba_lo ; encoding: [0x6c,0x3a,0x00,0x7e]
+0x6c,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, tba_hi ; encoding: [0x6d,0x3a,0x00,0x7e]
+0x6d,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, tma_lo ; encoding: [0x6e,0x3a,0x00,0x7e]
+0x6e,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, tma_hi ; encoding: [0x6f,0x3a,0x00,0x7e]
+0x6f,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x3a,0x00,0x7e]
+0x7b,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, m0 ; encoding: [0x7c,0x3a,0x00,0x7e]
+0x7c,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, exec_lo ; encoding: [0x7e,0x3a,0x00,0x7e]
+0x7e,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, exec_hi ; encoding: [0x7f,0x3a,0x00,0x7e]
+0x7f,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, 0 ; encoding: [0x80,0x3a,0x00,0x7e]
+0x80,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, -1 ; encoding: [0xc1,0x3a,0x00,0x7e]
+0xc1,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, 0.5 ; encoding: [0xf0,0x3a,0x00,0x7e]
+0xf0,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, -4.0 ; encoding: [0xf7,0x3a,0x00,0x7e]
+0xf7,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, scc ; encoding: [0xfd,0x3a,0x00,0x7e]
+0xfd,0x3a,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x3a,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x3a,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_ceil_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x3a,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x3a,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_ceil_f32_e32 v0, v0 ; encoding: [0x00,0x3b,0x00,0x7e]
+0x00,0x3b,0x00,0x7e
+
+# CHECK: v_ceil_f32_e32 v0, v255 ; encoding: [0xff,0x3b,0x00,0x7e]
+0xff,0x3b,0x00,0x7e
+
+# CHECK: v_ceil_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x5d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x5d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x5d,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x5d,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x5d,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x5d,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x5d,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x5d,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x5d,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x5d,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x5d,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x5d,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x5d,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x5d,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x5d,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x5d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x5d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x5d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, scc ; encoding: [0x00,0x00,0x5d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x5d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x5d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_ceil_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x5d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_ceil_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_ceil_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x5d,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rndne_f32_e32 v0, s0 ; encoding: [0x00,0x3c,0x00,0x7e]
+0x00,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v255, s0 ; encoding: [0x00,0x3c,0xfe,0x7f]
+0x00,0x3c,0xfe,0x7f
+
+# CHECK: v_rndne_f32_e32 v0, s101 ; encoding: [0x65,0x3c,0x00,0x7e]
+0x65,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x3c,0x00,0x7e]
+0x66,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x3c,0x00,0x7e]
+0x67,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x3c,0x00,0x7e]
+0x6a,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x3c,0x00,0x7e]
+0x6b,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, tba_lo ; encoding: [0x6c,0x3c,0x00,0x7e]
+0x6c,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, tba_hi ; encoding: [0x6d,0x3c,0x00,0x7e]
+0x6d,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, tma_lo ; encoding: [0x6e,0x3c,0x00,0x7e]
+0x6e,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, tma_hi ; encoding: [0x6f,0x3c,0x00,0x7e]
+0x6f,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x3c,0x00,0x7e]
+0x7b,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, m0 ; encoding: [0x7c,0x3c,0x00,0x7e]
+0x7c,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, exec_lo ; encoding: [0x7e,0x3c,0x00,0x7e]
+0x7e,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, exec_hi ; encoding: [0x7f,0x3c,0x00,0x7e]
+0x7f,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, 0 ; encoding: [0x80,0x3c,0x00,0x7e]
+0x80,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, -1 ; encoding: [0xc1,0x3c,0x00,0x7e]
+0xc1,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, 0.5 ; encoding: [0xf0,0x3c,0x00,0x7e]
+0xf0,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, -4.0 ; encoding: [0xf7,0x3c,0x00,0x7e]
+0xf7,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, scc ; encoding: [0xfd,0x3c,0x00,0x7e]
+0xfd,0x3c,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x3c,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x3c,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rndne_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x3c,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x3c,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rndne_f32_e32 v0, v0 ; encoding: [0x00,0x3d,0x00,0x7e]
+0x00,0x3d,0x00,0x7e
+
+# CHECK: v_rndne_f32_e32 v0, v255 ; encoding: [0xff,0x3d,0x00,0x7e]
+0xff,0x3d,0x00,0x7e
+
+# CHECK: v_rndne_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x5e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x5e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x5e,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x5e,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x5e,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x5e,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x5e,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x5e,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x5e,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x5e,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x5e,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x5e,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x5e,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x5e,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x5e,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x5e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x5e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x5e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, scc ; encoding: [0x00,0x00,0x5e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x5e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x5e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rndne_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x5e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rndne_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rndne_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x5e,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_floor_f32_e32 v0, s0 ; encoding: [0x00,0x3e,0x00,0x7e]
+0x00,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v255, s0 ; encoding: [0x00,0x3e,0xfe,0x7f]
+0x00,0x3e,0xfe,0x7f
+
+# CHECK: v_floor_f32_e32 v0, s101 ; encoding: [0x65,0x3e,0x00,0x7e]
+0x65,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x3e,0x00,0x7e]
+0x66,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x3e,0x00,0x7e]
+0x67,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x3e,0x00,0x7e]
+0x6a,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x3e,0x00,0x7e]
+0x6b,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, tba_lo ; encoding: [0x6c,0x3e,0x00,0x7e]
+0x6c,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, tba_hi ; encoding: [0x6d,0x3e,0x00,0x7e]
+0x6d,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, tma_lo ; encoding: [0x6e,0x3e,0x00,0x7e]
+0x6e,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, tma_hi ; encoding: [0x6f,0x3e,0x00,0x7e]
+0x6f,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x3e,0x00,0x7e]
+0x7b,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, m0 ; encoding: [0x7c,0x3e,0x00,0x7e]
+0x7c,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, exec_lo ; encoding: [0x7e,0x3e,0x00,0x7e]
+0x7e,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, exec_hi ; encoding: [0x7f,0x3e,0x00,0x7e]
+0x7f,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, 0 ; encoding: [0x80,0x3e,0x00,0x7e]
+0x80,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, -1 ; encoding: [0xc1,0x3e,0x00,0x7e]
+0xc1,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, 0.5 ; encoding: [0xf0,0x3e,0x00,0x7e]
+0xf0,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, -4.0 ; encoding: [0xf7,0x3e,0x00,0x7e]
+0xf7,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, scc ; encoding: [0xfd,0x3e,0x00,0x7e]
+0xfd,0x3e,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x3e,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x3e,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_floor_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x3e,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x3e,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_floor_f32_e32 v0, v0 ; encoding: [0x00,0x3f,0x00,0x7e]
+0x00,0x3f,0x00,0x7e
+
+# CHECK: v_floor_f32_e32 v0, v255 ; encoding: [0xff,0x3f,0x00,0x7e]
+0xff,0x3f,0x00,0x7e
+
+# CHECK: v_floor_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x5f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x5f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x5f,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x5f,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x5f,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x5f,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x5f,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x5f,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x5f,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x5f,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x5f,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x5f,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x5f,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x5f,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x5f,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x5f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x5f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x5f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, scc ; encoding: [0x00,0x00,0x5f,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x5f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x5f,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5f,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_floor_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x5f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x5f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x5f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_floor_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_floor_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x5f,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_exp_f32_e32 v0, s0 ; encoding: [0x00,0x40,0x00,0x7e]
+0x00,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v255, s0 ; encoding: [0x00,0x40,0xfe,0x7f]
+0x00,0x40,0xfe,0x7f
+
+# CHECK: v_exp_f32_e32 v0, s101 ; encoding: [0x65,0x40,0x00,0x7e]
+0x65,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x40,0x00,0x7e]
+0x66,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x40,0x00,0x7e]
+0x67,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x40,0x00,0x7e]
+0x6a,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x40,0x00,0x7e]
+0x6b,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, tba_lo ; encoding: [0x6c,0x40,0x00,0x7e]
+0x6c,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, tba_hi ; encoding: [0x6d,0x40,0x00,0x7e]
+0x6d,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, tma_lo ; encoding: [0x6e,0x40,0x00,0x7e]
+0x6e,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, tma_hi ; encoding: [0x6f,0x40,0x00,0x7e]
+0x6f,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x40,0x00,0x7e]
+0x7b,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, m0 ; encoding: [0x7c,0x40,0x00,0x7e]
+0x7c,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, exec_lo ; encoding: [0x7e,0x40,0x00,0x7e]
+0x7e,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, exec_hi ; encoding: [0x7f,0x40,0x00,0x7e]
+0x7f,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, 0 ; encoding: [0x80,0x40,0x00,0x7e]
+0x80,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, -1 ; encoding: [0xc1,0x40,0x00,0x7e]
+0xc1,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, 0.5 ; encoding: [0xf0,0x40,0x00,0x7e]
+0xf0,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, -4.0 ; encoding: [0xf7,0x40,0x00,0x7e]
+0xf7,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, scc ; encoding: [0xfd,0x40,0x00,0x7e]
+0xfd,0x40,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x40,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x40,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_exp_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x40,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x40,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_exp_f32_e32 v0, v0 ; encoding: [0x00,0x41,0x00,0x7e]
+0x00,0x41,0x00,0x7e
+
+# CHECK: v_exp_f32_e32 v0, v255 ; encoding: [0xff,0x41,0x00,0x7e]
+0xff,0x41,0x00,0x7e
+
+# CHECK: v_exp_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x60,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x60,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x60,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x60,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x60,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x60,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x60,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x60,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x60,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x60,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x60,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x60,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x60,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x60,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x60,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x60,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x60,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x60,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x60,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, scc ; encoding: [0x00,0x00,0x60,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x60,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x60,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x60,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x60,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_exp_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x60,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x60,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x60,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x60,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_exp_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_exp_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x60,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_log_f32_e32 v0, s0 ; encoding: [0x00,0x42,0x00,0x7e]
+0x00,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v255, s0 ; encoding: [0x00,0x42,0xfe,0x7f]
+0x00,0x42,0xfe,0x7f
+
+# CHECK: v_log_f32_e32 v0, s101 ; encoding: [0x65,0x42,0x00,0x7e]
+0x65,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x42,0x00,0x7e]
+0x66,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x42,0x00,0x7e]
+0x67,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x42,0x00,0x7e]
+0x6a,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x42,0x00,0x7e]
+0x6b,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, tba_lo ; encoding: [0x6c,0x42,0x00,0x7e]
+0x6c,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, tba_hi ; encoding: [0x6d,0x42,0x00,0x7e]
+0x6d,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, tma_lo ; encoding: [0x6e,0x42,0x00,0x7e]
+0x6e,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, tma_hi ; encoding: [0x6f,0x42,0x00,0x7e]
+0x6f,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x42,0x00,0x7e]
+0x7b,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, m0 ; encoding: [0x7c,0x42,0x00,0x7e]
+0x7c,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, exec_lo ; encoding: [0x7e,0x42,0x00,0x7e]
+0x7e,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, exec_hi ; encoding: [0x7f,0x42,0x00,0x7e]
+0x7f,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e]
+0x80,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e]
+0xc1,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, 0.5 ; encoding: [0xf0,0x42,0x00,0x7e]
+0xf0,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, -4.0 ; encoding: [0xf7,0x42,0x00,0x7e]
+0xf7,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, scc ; encoding: [0xfd,0x42,0x00,0x7e]
+0xfd,0x42,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x42,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x42,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_log_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x42,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x42,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_log_f32_e32 v0, v0 ; encoding: [0x00,0x43,0x00,0x7e]
+0x00,0x43,0x00,0x7e
+
+# CHECK: v_log_f32_e32 v0, v255 ; encoding: [0xff,0x43,0x00,0x7e]
+0xff,0x43,0x00,0x7e
+
+# CHECK: v_log_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x61,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x61,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x61,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x61,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x61,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x61,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x61,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x61,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x61,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x61,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x61,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x61,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x61,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x61,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x61,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x61,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x61,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x61,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x61,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, scc ; encoding: [0x00,0x00,0x61,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x61,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x61,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x61,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x61,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_log_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x61,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x61,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x61,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x61,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_log_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_log_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x61,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rcp_f32_e32 v0, s0 ; encoding: [0x00,0x44,0x00,0x7e]
+0x00,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v255, s0 ; encoding: [0x00,0x44,0xfe,0x7f]
+0x00,0x44,0xfe,0x7f
+
+# CHECK: v_rcp_f32_e32 v0, s101 ; encoding: [0x65,0x44,0x00,0x7e]
+0x65,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x44,0x00,0x7e]
+0x66,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x44,0x00,0x7e]
+0x67,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x44,0x00,0x7e]
+0x6a,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x44,0x00,0x7e]
+0x6b,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, tba_lo ; encoding: [0x6c,0x44,0x00,0x7e]
+0x6c,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, tba_hi ; encoding: [0x6d,0x44,0x00,0x7e]
+0x6d,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, tma_lo ; encoding: [0x6e,0x44,0x00,0x7e]
+0x6e,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, tma_hi ; encoding: [0x6f,0x44,0x00,0x7e]
+0x6f,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x44,0x00,0x7e]
+0x7b,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, m0 ; encoding: [0x7c,0x44,0x00,0x7e]
+0x7c,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, exec_lo ; encoding: [0x7e,0x44,0x00,0x7e]
+0x7e,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, exec_hi ; encoding: [0x7f,0x44,0x00,0x7e]
+0x7f,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, 0 ; encoding: [0x80,0x44,0x00,0x7e]
+0x80,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, -1 ; encoding: [0xc1,0x44,0x00,0x7e]
+0xc1,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, 0.5 ; encoding: [0xf0,0x44,0x00,0x7e]
+0xf0,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, -4.0 ; encoding: [0xf7,0x44,0x00,0x7e]
+0xf7,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, scc ; encoding: [0xfd,0x44,0x00,0x7e]
+0xfd,0x44,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x44,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x44,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rcp_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x44,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x44,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rcp_f32_e32 v0, v0 ; encoding: [0x00,0x45,0x00,0x7e]
+0x00,0x45,0x00,0x7e
+
+# CHECK: v_rcp_f32_e32 v0, v255 ; encoding: [0xff,0x45,0x00,0x7e]
+0xff,0x45,0x00,0x7e
+
+# CHECK: v_rcp_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x62,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x62,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x62,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x62,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x62,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x62,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x62,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x62,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x62,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x62,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x62,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x62,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x62,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x62,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x62,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x62,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x62,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x62,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x62,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, scc ; encoding: [0x00,0x00,0x62,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x62,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x62,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x62,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x62,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rcp_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x62,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x62,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x62,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x62,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rcp_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rcp_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x62,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rcp_iflag_f32_e32 v0, s0 ; encoding: [0x00,0x46,0x00,0x7e]
+0x00,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v255, s0 ; encoding: [0x00,0x46,0xfe,0x7f]
+0x00,0x46,0xfe,0x7f
+
+# CHECK: v_rcp_iflag_f32_e32 v0, s101 ; encoding: [0x65,0x46,0x00,0x7e]
+0x65,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x46,0x00,0x7e]
+0x66,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x46,0x00,0x7e]
+0x67,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x46,0x00,0x7e]
+0x6a,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x46,0x00,0x7e]
+0x6b,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, tba_lo ; encoding: [0x6c,0x46,0x00,0x7e]
+0x6c,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, tba_hi ; encoding: [0x6d,0x46,0x00,0x7e]
+0x6d,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, tma_lo ; encoding: [0x6e,0x46,0x00,0x7e]
+0x6e,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, tma_hi ; encoding: [0x6f,0x46,0x00,0x7e]
+0x6f,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x46,0x00,0x7e]
+0x7b,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, m0 ; encoding: [0x7c,0x46,0x00,0x7e]
+0x7c,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, exec_lo ; encoding: [0x7e,0x46,0x00,0x7e]
+0x7e,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, exec_hi ; encoding: [0x7f,0x46,0x00,0x7e]
+0x7f,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, 0 ; encoding: [0x80,0x46,0x00,0x7e]
+0x80,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, -1 ; encoding: [0xc1,0x46,0x00,0x7e]
+0xc1,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, 0.5 ; encoding: [0xf0,0x46,0x00,0x7e]
+0xf0,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, -4.0 ; encoding: [0xf7,0x46,0x00,0x7e]
+0xf7,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, scc ; encoding: [0xfd,0x46,0x00,0x7e]
+0xfd,0x46,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x46,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x46,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rcp_iflag_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x46,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x46,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rcp_iflag_f32_e32 v0, v0 ; encoding: [0x00,0x47,0x00,0x7e]
+0x00,0x47,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e32 v0, v255 ; encoding: [0xff,0x47,0x00,0x7e]
+0xff,0x47,0x00,0x7e
+
+# CHECK: v_rcp_iflag_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x63,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x63,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x63,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x63,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x63,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x63,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x63,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x63,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x63,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x63,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x63,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x63,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x63,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x63,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x63,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x63,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x63,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x63,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x63,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, scc ; encoding: [0x00,0x00,0x63,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x63,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x63,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x63,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x63,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rcp_iflag_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x63,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x63,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x63,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x63,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rcp_iflag_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rcp_iflag_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x63,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rsq_f32_e32 v0, s0 ; encoding: [0x00,0x48,0x00,0x7e]
+0x00,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v255, s0 ; encoding: [0x00,0x48,0xfe,0x7f]
+0x00,0x48,0xfe,0x7f
+
+# CHECK: v_rsq_f32_e32 v0, s101 ; encoding: [0x65,0x48,0x00,0x7e]
+0x65,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x48,0x00,0x7e]
+0x66,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x48,0x00,0x7e]
+0x67,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x48,0x00,0x7e]
+0x6a,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x48,0x00,0x7e]
+0x6b,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, tba_lo ; encoding: [0x6c,0x48,0x00,0x7e]
+0x6c,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, tba_hi ; encoding: [0x6d,0x48,0x00,0x7e]
+0x6d,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, tma_lo ; encoding: [0x6e,0x48,0x00,0x7e]
+0x6e,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, tma_hi ; encoding: [0x6f,0x48,0x00,0x7e]
+0x6f,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x48,0x00,0x7e]
+0x7b,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, m0 ; encoding: [0x7c,0x48,0x00,0x7e]
+0x7c,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, exec_lo ; encoding: [0x7e,0x48,0x00,0x7e]
+0x7e,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, exec_hi ; encoding: [0x7f,0x48,0x00,0x7e]
+0x7f,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, 0 ; encoding: [0x80,0x48,0x00,0x7e]
+0x80,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, -1 ; encoding: [0xc1,0x48,0x00,0x7e]
+0xc1,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, 0.5 ; encoding: [0xf0,0x48,0x00,0x7e]
+0xf0,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, -4.0 ; encoding: [0xf7,0x48,0x00,0x7e]
+0xf7,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, scc ; encoding: [0xfd,0x48,0x00,0x7e]
+0xfd,0x48,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x48,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x48,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rsq_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x48,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x48,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rsq_f32_e32 v0, v0 ; encoding: [0x00,0x49,0x00,0x7e]
+0x00,0x49,0x00,0x7e
+
+# CHECK: v_rsq_f32_e32 v0, v255 ; encoding: [0xff,0x49,0x00,0x7e]
+0xff,0x49,0x00,0x7e
+
+# CHECK: v_rsq_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x64,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x64,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x64,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x64,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x64,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x64,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x64,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x64,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x64,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x64,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x64,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x64,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x64,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x64,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x64,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x64,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x64,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x64,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x64,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, scc ; encoding: [0x00,0x00,0x64,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x64,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x64,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x64,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x64,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rsq_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x64,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x64,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x64,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x64,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rsq_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rsq_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x64,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rcp_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x4a,0x00,0x7e]
+0x00,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x4a,0xfc,0x7f]
+0x00,0x4a,0xfc,0x7f
+
+# CHECK: v_rcp_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x4a,0x00,0x7e]
+0x02,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x4a,0x00,0x7e]
+0x64,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x4a,0x00,0x7e]
+0x66,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x4a,0x00,0x7e]
+0x6a,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], tba ; encoding: [0x6c,0x4a,0x00,0x7e]
+0x6c,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], tma ; encoding: [0x6e,0x4a,0x00,0x7e]
+0x6e,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x4a,0x00,0x7e]
+0x7a,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], exec ; encoding: [0x7e,0x4a,0x00,0x7e]
+0x7e,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], 0 ; encoding: [0x80,0x4a,0x00,0x7e]
+0x80,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x4a,0x00,0x7e]
+0xc1,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x4a,0x00,0x7e]
+0xf0,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x4a,0x00,0x7e]
+0xf7,0x4a,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x4a,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x4a,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rcp_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x4a,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x4a,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rcp_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x4b,0x00,0x7e]
+0x00,0x4b,0x00,0x7e
+
+# CHECK: v_rcp_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x4b,0x00,0x7e]
+0xfe,0x4b,0x00,0x7e
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x65,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x65,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x65,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x65,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x65,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x65,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x65,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x65,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x65,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x65,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x65,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x65,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x65,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x65,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x65,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x65,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x65,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x65,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rcp_f64_e64 v[0:1], |s[0:1]| ; encoding: [0x00,0x01,0x65,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x65,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x65,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x65,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rcp_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x65,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_rsq_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x4c,0x00,0x7e]
+0x00,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x4c,0xfc,0x7f]
+0x00,0x4c,0xfc,0x7f
+
+# CHECK: v_rsq_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x4c,0x00,0x7e]
+0x02,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x4c,0x00,0x7e]
+0x64,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x4c,0x00,0x7e]
+0x66,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x4c,0x00,0x7e]
+0x6a,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], tba ; encoding: [0x6c,0x4c,0x00,0x7e]
+0x6c,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], tma ; encoding: [0x6e,0x4c,0x00,0x7e]
+0x6e,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x4c,0x00,0x7e]
+0x7a,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], exec ; encoding: [0x7e,0x4c,0x00,0x7e]
+0x7e,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], 0 ; encoding: [0x80,0x4c,0x00,0x7e]
+0x80,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x4c,0x00,0x7e]
+0xc1,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x4c,0x00,0x7e]
+0xf0,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x4c,0x00,0x7e]
+0xf7,0x4c,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x4c,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x4c,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_rsq_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x4c,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x4c,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_rsq_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x4d,0x00,0x7e]
+0x00,0x4d,0x00,0x7e
+
+# CHECK: v_rsq_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x4d,0x00,0x7e]
+0xfe,0x4d,0x00,0x7e
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x66,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x66,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x66,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x66,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x66,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x66,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x66,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x66,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x66,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x66,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x66,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x66,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x66,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x66,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x66,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x66,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x66,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x66,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rsq_f64_e64 v[0:1], |s[0:1]| ; encoding: [0x00,0x01,0x66,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x66,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x66,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x66,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_rsq_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x66,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_sqrt_f32_e32 v0, s0 ; encoding: [0x00,0x4e,0x00,0x7e]
+0x00,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v255, s0 ; encoding: [0x00,0x4e,0xfe,0x7f]
+0x00,0x4e,0xfe,0x7f
+
+# CHECK: v_sqrt_f32_e32 v0, s101 ; encoding: [0x65,0x4e,0x00,0x7e]
+0x65,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x4e,0x00,0x7e]
+0x66,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x4e,0x00,0x7e]
+0x67,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x4e,0x00,0x7e]
+0x6a,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x4e,0x00,0x7e]
+0x6b,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, tba_lo ; encoding: [0x6c,0x4e,0x00,0x7e]
+0x6c,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, tba_hi ; encoding: [0x6d,0x4e,0x00,0x7e]
+0x6d,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, tma_lo ; encoding: [0x6e,0x4e,0x00,0x7e]
+0x6e,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, tma_hi ; encoding: [0x6f,0x4e,0x00,0x7e]
+0x6f,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x4e,0x00,0x7e]
+0x7b,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, m0 ; encoding: [0x7c,0x4e,0x00,0x7e]
+0x7c,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, exec_lo ; encoding: [0x7e,0x4e,0x00,0x7e]
+0x7e,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, exec_hi ; encoding: [0x7f,0x4e,0x00,0x7e]
+0x7f,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, 0 ; encoding: [0x80,0x4e,0x00,0x7e]
+0x80,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, -1 ; encoding: [0xc1,0x4e,0x00,0x7e]
+0xc1,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, 0.5 ; encoding: [0xf0,0x4e,0x00,0x7e]
+0xf0,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, -4.0 ; encoding: [0xf7,0x4e,0x00,0x7e]
+0xf7,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, scc ; encoding: [0xfd,0x4e,0x00,0x7e]
+0xfd,0x4e,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x4e,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x4e,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_sqrt_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x4e,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x4e,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_sqrt_f32_e32 v0, v0 ; encoding: [0x00,0x4f,0x00,0x7e]
+0x00,0x4f,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e32 v0, v255 ; encoding: [0xff,0x4f,0x00,0x7e]
+0xff,0x4f,0x00,0x7e
+
+# CHECK: v_sqrt_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x67,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x67,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x67,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x67,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x67,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x67,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x67,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x67,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x67,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x67,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x67,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x67,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x67,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x67,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x67,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x67,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x67,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x67,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x67,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, scc ; encoding: [0x00,0x00,0x67,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x67,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x67,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x67,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x67,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sqrt_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x67,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x67,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x67,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x67,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_sqrt_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_sqrt_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x67,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_sqrt_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x50,0x00,0x7e]
+0x00,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x50,0xfc,0x7f]
+0x00,0x50,0xfc,0x7f
+
+# CHECK: v_sqrt_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x50,0x00,0x7e]
+0x02,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x50,0x00,0x7e]
+0x64,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x50,0x00,0x7e]
+0x66,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x50,0x00,0x7e]
+0x6a,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], tba ; encoding: [0x6c,0x50,0x00,0x7e]
+0x6c,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], tma ; encoding: [0x6e,0x50,0x00,0x7e]
+0x6e,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x50,0x00,0x7e]
+0x7a,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], exec ; encoding: [0x7e,0x50,0x00,0x7e]
+0x7e,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], 0 ; encoding: [0x80,0x50,0x00,0x7e]
+0x80,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x50,0x00,0x7e]
+0xc1,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x50,0x00,0x7e]
+0xf0,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x50,0x00,0x7e]
+0xf7,0x50,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x50,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x50,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_sqrt_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x50,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x50,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_sqrt_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x51,0x00,0x7e]
+0x00,0x51,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x51,0x00,0x7e]
+0xfe,0x51,0x00,0x7e
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x68,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x68,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x68,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x68,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x68,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x68,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x68,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x68,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x68,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x68,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x68,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x68,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x68,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x68,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x68,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x68,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x68,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x68,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sqrt_f64_e64 v[0:1], |s[0:1]| ; encoding: [0x00,0x01,0x68,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x68,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x68,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x68,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_sqrt_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x68,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_sin_f32_e32 v0, s0 ; encoding: [0x00,0x52,0x00,0x7e]
+0x00,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v255, s0 ; encoding: [0x00,0x52,0xfe,0x7f]
+0x00,0x52,0xfe,0x7f
+
+# CHECK: v_sin_f32_e32 v0, s101 ; encoding: [0x65,0x52,0x00,0x7e]
+0x65,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x52,0x00,0x7e]
+0x66,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x52,0x00,0x7e]
+0x67,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x52,0x00,0x7e]
+0x6a,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x52,0x00,0x7e]
+0x6b,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, tba_lo ; encoding: [0x6c,0x52,0x00,0x7e]
+0x6c,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, tba_hi ; encoding: [0x6d,0x52,0x00,0x7e]
+0x6d,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, tma_lo ; encoding: [0x6e,0x52,0x00,0x7e]
+0x6e,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, tma_hi ; encoding: [0x6f,0x52,0x00,0x7e]
+0x6f,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x52,0x00,0x7e]
+0x7b,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, m0 ; encoding: [0x7c,0x52,0x00,0x7e]
+0x7c,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, exec_lo ; encoding: [0x7e,0x52,0x00,0x7e]
+0x7e,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, exec_hi ; encoding: [0x7f,0x52,0x00,0x7e]
+0x7f,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, 0 ; encoding: [0x80,0x52,0x00,0x7e]
+0x80,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, -1 ; encoding: [0xc1,0x52,0x00,0x7e]
+0xc1,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, 0.5 ; encoding: [0xf0,0x52,0x00,0x7e]
+0xf0,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, -4.0 ; encoding: [0xf7,0x52,0x00,0x7e]
+0xf7,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, scc ; encoding: [0xfd,0x52,0x00,0x7e]
+0xfd,0x52,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x52,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x52,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_sin_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x52,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x52,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_sin_f32_e32 v0, v0 ; encoding: [0x00,0x53,0x00,0x7e]
+0x00,0x53,0x00,0x7e
+
+# CHECK: v_sin_f32_e32 v0, v255 ; encoding: [0xff,0x53,0x00,0x7e]
+0xff,0x53,0x00,0x7e
+
+# CHECK: v_sin_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x69,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x69,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x69,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x69,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x69,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x69,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x69,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x69,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x69,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x69,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x69,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x69,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x69,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x69,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x69,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x69,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x69,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x69,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x69,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, scc ; encoding: [0x00,0x00,0x69,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x69,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x69,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x69,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x69,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sin_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x69,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x69,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x69,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x69,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_sin_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_sin_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x69,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cos_f32_e32 v0, s0 ; encoding: [0x00,0x54,0x00,0x7e]
+0x00,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v255, s0 ; encoding: [0x00,0x54,0xfe,0x7f]
+0x00,0x54,0xfe,0x7f
+
+# CHECK: v_cos_f32_e32 v0, s101 ; encoding: [0x65,0x54,0x00,0x7e]
+0x65,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x54,0x00,0x7e]
+0x66,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x54,0x00,0x7e]
+0x67,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x54,0x00,0x7e]
+0x6a,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x54,0x00,0x7e]
+0x6b,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, tba_lo ; encoding: [0x6c,0x54,0x00,0x7e]
+0x6c,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, tba_hi ; encoding: [0x6d,0x54,0x00,0x7e]
+0x6d,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, tma_lo ; encoding: [0x6e,0x54,0x00,0x7e]
+0x6e,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, tma_hi ; encoding: [0x6f,0x54,0x00,0x7e]
+0x6f,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x54,0x00,0x7e]
+0x7b,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, m0 ; encoding: [0x7c,0x54,0x00,0x7e]
+0x7c,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, exec_lo ; encoding: [0x7e,0x54,0x00,0x7e]
+0x7e,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, exec_hi ; encoding: [0x7f,0x54,0x00,0x7e]
+0x7f,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, 0 ; encoding: [0x80,0x54,0x00,0x7e]
+0x80,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, -1 ; encoding: [0xc1,0x54,0x00,0x7e]
+0xc1,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, 0.5 ; encoding: [0xf0,0x54,0x00,0x7e]
+0xf0,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, -4.0 ; encoding: [0xf7,0x54,0x00,0x7e]
+0xf7,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, scc ; encoding: [0xfd,0x54,0x00,0x7e]
+0xfd,0x54,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x54,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x54,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cos_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x54,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x54,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cos_f32_e32 v0, v0 ; encoding: [0x00,0x55,0x00,0x7e]
+0x00,0x55,0x00,0x7e
+
+# CHECK: v_cos_f32_e32 v0, v255 ; encoding: [0xff,0x55,0x00,0x7e]
+0xff,0x55,0x00,0x7e
+
+# CHECK: v_cos_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x6a,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x6a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x6a,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x6a,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x6a,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x6a,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x6a,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x6a,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x6a,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x6a,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x6a,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x6a,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x6a,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x6a,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x6a,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x6a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x6a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x6a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x6a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, scc ; encoding: [0x00,0x00,0x6a,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x6a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x6a,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x6a,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cos_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x6a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x6a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x6a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cos_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cos_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x6a,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_not_b32_e32 v0, s0 ; encoding: [0x00,0x56,0x00,0x7e]
+0x00,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v255, s0 ; encoding: [0x00,0x56,0xfe,0x7f]
+0x00,0x56,0xfe,0x7f
+
+# CHECK: v_not_b32_e32 v0, s101 ; encoding: [0x65,0x56,0x00,0x7e]
+0x65,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x56,0x00,0x7e]
+0x66,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x56,0x00,0x7e]
+0x67,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, vcc_lo ; encoding: [0x6a,0x56,0x00,0x7e]
+0x6a,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, vcc_hi ; encoding: [0x6b,0x56,0x00,0x7e]
+0x6b,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, tba_lo ; encoding: [0x6c,0x56,0x00,0x7e]
+0x6c,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, tba_hi ; encoding: [0x6d,0x56,0x00,0x7e]
+0x6d,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, tma_lo ; encoding: [0x6e,0x56,0x00,0x7e]
+0x6e,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, tma_hi ; encoding: [0x6f,0x56,0x00,0x7e]
+0x6f,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, ttmp11 ; encoding: [0x7b,0x56,0x00,0x7e]
+0x7b,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, m0 ; encoding: [0x7c,0x56,0x00,0x7e]
+0x7c,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, exec_lo ; encoding: [0x7e,0x56,0x00,0x7e]
+0x7e,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, exec_hi ; encoding: [0x7f,0x56,0x00,0x7e]
+0x7f,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, 0 ; encoding: [0x80,0x56,0x00,0x7e]
+0x80,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, -1 ; encoding: [0xc1,0x56,0x00,0x7e]
+0xc1,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, 0.5 ; encoding: [0xf0,0x56,0x00,0x7e]
+0xf0,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, -4.0 ; encoding: [0xf7,0x56,0x00,0x7e]
+0xf7,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, scc ; encoding: [0xfd,0x56,0x00,0x7e]
+0xfd,0x56,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, 0xaf123456 ; encoding: [0xff,0x56,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x56,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_not_b32_e32 v0, 0x3f717273 ; encoding: [0xff,0x56,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x56,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_not_b32_e32 v0, v0 ; encoding: [0x00,0x57,0x00,0x7e]
+0x00,0x57,0x00,0x7e
+
+# CHECK: v_not_b32_e32 v0, v255 ; encoding: [0xff,0x57,0x00,0x7e]
+0xff,0x57,0x00,0x7e
+
+# CHECK: v_not_b32_e64 v0, s0 ; encoding: [0x00,0x00,0x6b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v255, s0 ; encoding: [0xff,0x00,0x6b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x6b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, s101 ; encoding: [0x00,0x00,0x6b,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x6b,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x6b,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x6b,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x6b,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x6b,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x6b,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x6b,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x6b,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x6b,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, m0 ; encoding: [0x00,0x00,0x6b,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x6b,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x6b,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, 0 ; encoding: [0x00,0x00,0x6b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, -1 ; encoding: [0x00,0x00,0x6b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x6b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x6b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, scc ; encoding: [0x00,0x00,0x6b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x6b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_not_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x6b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x6b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_bfrev_b32_e32 v0, s0 ; encoding: [0x00,0x58,0x00,0x7e]
+0x00,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v255, s0 ; encoding: [0x00,0x58,0xfe,0x7f]
+0x00,0x58,0xfe,0x7f
+
+# CHECK: v_bfrev_b32_e32 v0, s101 ; encoding: [0x65,0x58,0x00,0x7e]
+0x65,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x58,0x00,0x7e]
+0x66,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x58,0x00,0x7e]
+0x67,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, vcc_lo ; encoding: [0x6a,0x58,0x00,0x7e]
+0x6a,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, vcc_hi ; encoding: [0x6b,0x58,0x00,0x7e]
+0x6b,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, tba_lo ; encoding: [0x6c,0x58,0x00,0x7e]
+0x6c,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, tba_hi ; encoding: [0x6d,0x58,0x00,0x7e]
+0x6d,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, tma_lo ; encoding: [0x6e,0x58,0x00,0x7e]
+0x6e,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, tma_hi ; encoding: [0x6f,0x58,0x00,0x7e]
+0x6f,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, ttmp11 ; encoding: [0x7b,0x58,0x00,0x7e]
+0x7b,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, m0 ; encoding: [0x7c,0x58,0x00,0x7e]
+0x7c,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, exec_lo ; encoding: [0x7e,0x58,0x00,0x7e]
+0x7e,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, exec_hi ; encoding: [0x7f,0x58,0x00,0x7e]
+0x7f,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, 0 ; encoding: [0x80,0x58,0x00,0x7e]
+0x80,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, -1 ; encoding: [0xc1,0x58,0x00,0x7e]
+0xc1,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, 0.5 ; encoding: [0xf0,0x58,0x00,0x7e]
+0xf0,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, -4.0 ; encoding: [0xf7,0x58,0x00,0x7e]
+0xf7,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, scc ; encoding: [0xfd,0x58,0x00,0x7e]
+0xfd,0x58,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, 0xaf123456 ; encoding: [0xff,0x58,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x58,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_bfrev_b32_e32 v0, 0x3f717273 ; encoding: [0xff,0x58,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x58,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_bfrev_b32_e32 v0, v0 ; encoding: [0x00,0x59,0x00,0x7e]
+0x00,0x59,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e32 v0, v255 ; encoding: [0xff,0x59,0x00,0x7e]
+0xff,0x59,0x00,0x7e
+
+# CHECK: v_bfrev_b32_e64 v0, s0 ; encoding: [0x00,0x00,0x6c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v255, s0 ; encoding: [0xff,0x00,0x6c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x6c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, s101 ; encoding: [0x00,0x00,0x6c,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x6c,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x6c,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x6c,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x6c,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x6c,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x6c,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x6c,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x6c,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x6c,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, m0 ; encoding: [0x00,0x00,0x6c,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x6c,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x6c,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, 0 ; encoding: [0x00,0x00,0x6c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, -1 ; encoding: [0x00,0x00,0x6c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x6c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x6c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, scc ; encoding: [0x00,0x00,0x6c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x6c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_bfrev_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x6c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x6c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ffbh_u32_e32 v0, s0 ; encoding: [0x00,0x5a,0x00,0x7e]
+0x00,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v255, s0 ; encoding: [0x00,0x5a,0xfe,0x7f]
+0x00,0x5a,0xfe,0x7f
+
+# CHECK: v_ffbh_u32_e32 v0, s101 ; encoding: [0x65,0x5a,0x00,0x7e]
+0x65,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x5a,0x00,0x7e]
+0x66,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x5a,0x00,0x7e]
+0x67,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, vcc_lo ; encoding: [0x6a,0x5a,0x00,0x7e]
+0x6a,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, vcc_hi ; encoding: [0x6b,0x5a,0x00,0x7e]
+0x6b,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, tba_lo ; encoding: [0x6c,0x5a,0x00,0x7e]
+0x6c,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, tba_hi ; encoding: [0x6d,0x5a,0x00,0x7e]
+0x6d,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, tma_lo ; encoding: [0x6e,0x5a,0x00,0x7e]
+0x6e,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, tma_hi ; encoding: [0x6f,0x5a,0x00,0x7e]
+0x6f,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, ttmp11 ; encoding: [0x7b,0x5a,0x00,0x7e]
+0x7b,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, m0 ; encoding: [0x7c,0x5a,0x00,0x7e]
+0x7c,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, exec_lo ; encoding: [0x7e,0x5a,0x00,0x7e]
+0x7e,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, exec_hi ; encoding: [0x7f,0x5a,0x00,0x7e]
+0x7f,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, 0 ; encoding: [0x80,0x5a,0x00,0x7e]
+0x80,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, -1 ; encoding: [0xc1,0x5a,0x00,0x7e]
+0xc1,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, 0.5 ; encoding: [0xf0,0x5a,0x00,0x7e]
+0xf0,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, -4.0 ; encoding: [0xf7,0x5a,0x00,0x7e]
+0xf7,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, scc ; encoding: [0xfd,0x5a,0x00,0x7e]
+0xfd,0x5a,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, 0xaf123456 ; encoding: [0xff,0x5a,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x5a,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_ffbh_u32_e32 v0, 0x3f717273 ; encoding: [0xff,0x5a,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x5a,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_ffbh_u32_e32 v0, v0 ; encoding: [0x00,0x5b,0x00,0x7e]
+0x00,0x5b,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e32 v0, v255 ; encoding: [0xff,0x5b,0x00,0x7e]
+0xff,0x5b,0x00,0x7e
+
+# CHECK: v_ffbh_u32_e64 v0, s0 ; encoding: [0x00,0x00,0x6d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v255, s0 ; encoding: [0xff,0x00,0x6d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x6d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, s101 ; encoding: [0x00,0x00,0x6d,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x6d,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x6d,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x6d,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x6d,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x6d,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x6d,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x6d,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x6d,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x6d,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, m0 ; encoding: [0x00,0x00,0x6d,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x6d,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x6d,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, 0 ; encoding: [0x00,0x00,0x6d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, -1 ; encoding: [0x00,0x00,0x6d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x6d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x6d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, scc ; encoding: [0x00,0x00,0x6d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, v0 ; encoding: [0x00,0x00,0x6d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ffbh_u32_e64 v0, v255 ; encoding: [0x00,0x00,0x6d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x6d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ffbl_b32_e32 v0, s0 ; encoding: [0x00,0x5c,0x00,0x7e]
+0x00,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v255, s0 ; encoding: [0x00,0x5c,0xfe,0x7f]
+0x00,0x5c,0xfe,0x7f
+
+# CHECK: v_ffbl_b32_e32 v0, s101 ; encoding: [0x65,0x5c,0x00,0x7e]
+0x65,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x5c,0x00,0x7e]
+0x66,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x5c,0x00,0x7e]
+0x67,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, vcc_lo ; encoding: [0x6a,0x5c,0x00,0x7e]
+0x6a,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, vcc_hi ; encoding: [0x6b,0x5c,0x00,0x7e]
+0x6b,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, tba_lo ; encoding: [0x6c,0x5c,0x00,0x7e]
+0x6c,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, tba_hi ; encoding: [0x6d,0x5c,0x00,0x7e]
+0x6d,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, tma_lo ; encoding: [0x6e,0x5c,0x00,0x7e]
+0x6e,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, tma_hi ; encoding: [0x6f,0x5c,0x00,0x7e]
+0x6f,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, ttmp11 ; encoding: [0x7b,0x5c,0x00,0x7e]
+0x7b,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, m0 ; encoding: [0x7c,0x5c,0x00,0x7e]
+0x7c,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, exec_lo ; encoding: [0x7e,0x5c,0x00,0x7e]
+0x7e,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, exec_hi ; encoding: [0x7f,0x5c,0x00,0x7e]
+0x7f,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, 0 ; encoding: [0x80,0x5c,0x00,0x7e]
+0x80,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, -1 ; encoding: [0xc1,0x5c,0x00,0x7e]
+0xc1,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, 0.5 ; encoding: [0xf0,0x5c,0x00,0x7e]
+0xf0,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, -4.0 ; encoding: [0xf7,0x5c,0x00,0x7e]
+0xf7,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, scc ; encoding: [0xfd,0x5c,0x00,0x7e]
+0xfd,0x5c,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, 0xaf123456 ; encoding: [0xff,0x5c,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x5c,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_ffbl_b32_e32 v0, 0x3f717273 ; encoding: [0xff,0x5c,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x5c,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_ffbl_b32_e32 v0, v0 ; encoding: [0x00,0x5d,0x00,0x7e]
+0x00,0x5d,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e32 v0, v255 ; encoding: [0xff,0x5d,0x00,0x7e]
+0xff,0x5d,0x00,0x7e
+
+# CHECK: v_ffbl_b32_e64 v0, s0 ; encoding: [0x00,0x00,0x6e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v255, s0 ; encoding: [0xff,0x00,0x6e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x6e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, s101 ; encoding: [0x00,0x00,0x6e,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x6e,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x6e,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x6e,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x6e,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x6e,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x6e,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x6e,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x6e,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x6e,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, m0 ; encoding: [0x00,0x00,0x6e,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x6e,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x6e,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, 0 ; encoding: [0x00,0x00,0x6e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, -1 ; encoding: [0x00,0x00,0x6e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x6e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x6e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, scc ; encoding: [0x00,0x00,0x6e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x6e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ffbl_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x6e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x6e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ffbh_i32_e32 v0, s0 ; encoding: [0x00,0x5e,0x00,0x7e]
+0x00,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v255, s0 ; encoding: [0x00,0x5e,0xfe,0x7f]
+0x00,0x5e,0xfe,0x7f
+
+# CHECK: v_ffbh_i32_e32 v0, s101 ; encoding: [0x65,0x5e,0x00,0x7e]
+0x65,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x5e,0x00,0x7e]
+0x66,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x5e,0x00,0x7e]
+0x67,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, vcc_lo ; encoding: [0x6a,0x5e,0x00,0x7e]
+0x6a,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, vcc_hi ; encoding: [0x6b,0x5e,0x00,0x7e]
+0x6b,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, tba_lo ; encoding: [0x6c,0x5e,0x00,0x7e]
+0x6c,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, tba_hi ; encoding: [0x6d,0x5e,0x00,0x7e]
+0x6d,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, tma_lo ; encoding: [0x6e,0x5e,0x00,0x7e]
+0x6e,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, tma_hi ; encoding: [0x6f,0x5e,0x00,0x7e]
+0x6f,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, ttmp11 ; encoding: [0x7b,0x5e,0x00,0x7e]
+0x7b,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, m0 ; encoding: [0x7c,0x5e,0x00,0x7e]
+0x7c,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, exec_lo ; encoding: [0x7e,0x5e,0x00,0x7e]
+0x7e,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, exec_hi ; encoding: [0x7f,0x5e,0x00,0x7e]
+0x7f,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, 0 ; encoding: [0x80,0x5e,0x00,0x7e]
+0x80,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, -1 ; encoding: [0xc1,0x5e,0x00,0x7e]
+0xc1,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, 0.5 ; encoding: [0xf0,0x5e,0x00,0x7e]
+0xf0,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, -4.0 ; encoding: [0xf7,0x5e,0x00,0x7e]
+0xf7,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, scc ; encoding: [0xfd,0x5e,0x00,0x7e]
+0xfd,0x5e,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, 0xaf123456 ; encoding: [0xff,0x5e,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x5e,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_ffbh_i32_e32 v0, 0x3f717273 ; encoding: [0xff,0x5e,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x5e,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_ffbh_i32_e32 v0, v0 ; encoding: [0x00,0x5f,0x00,0x7e]
+0x00,0x5f,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e32 v0, v255 ; encoding: [0xff,0x5f,0x00,0x7e]
+0xff,0x5f,0x00,0x7e
+
+# CHECK: v_ffbh_i32_e64 v0, s0 ; encoding: [0x00,0x00,0x6f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v255, s0 ; encoding: [0xff,0x00,0x6f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x6f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, s101 ; encoding: [0x00,0x00,0x6f,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x6f,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x6f,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x6f,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x6f,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x6f,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x6f,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x6f,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x6f,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x6f,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, m0 ; encoding: [0x00,0x00,0x6f,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x6f,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x6f,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, 0 ; encoding: [0x00,0x00,0x6f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, -1 ; encoding: [0x00,0x00,0x6f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x6f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x6f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, scc ; encoding: [0x00,0x00,0x6f,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, v0 ; encoding: [0x00,0x00,0x6f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ffbh_i32_e64 v0, v255 ; encoding: [0x00,0x00,0x6f,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x6f,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, s[0:1] ; encoding: [0x00,0x60,0x00,0x7e]
+0x00,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v255, s[0:1] ; encoding: [0x00,0x60,0xfe,0x7f]
+0x00,0x60,0xfe,0x7f
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, s[2:3] ; encoding: [0x02,0x60,0x00,0x7e]
+0x02,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, s[100:101] ; encoding: [0x64,0x60,0x00,0x7e]
+0x64,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, flat_scratch ; encoding: [0x66,0x60,0x00,0x7e]
+0x66,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, vcc ; encoding: [0x6a,0x60,0x00,0x7e]
+0x6a,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, tba ; encoding: [0x6c,0x60,0x00,0x7e]
+0x6c,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, tma ; encoding: [0x6e,0x60,0x00,0x7e]
+0x6e,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, ttmp[10:11] ; encoding: [0x7a,0x60,0x00,0x7e]
+0x7a,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, exec ; encoding: [0x7e,0x60,0x00,0x7e]
+0x7e,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, 0 ; encoding: [0x80,0x60,0x00,0x7e]
+0x80,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, -1 ; encoding: [0xc1,0x60,0x00,0x7e]
+0xc1,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, 0.5 ; encoding: [0xf0,0x60,0x00,0x7e]
+0xf0,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, -4.0 ; encoding: [0xf7,0x60,0x00,0x7e]
+0xf7,0x60,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, 0xaf123456 ; encoding: [0xff,0x60,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x60,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, 0x3f717273 ; encoding: [0xff,0x60,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x60,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, v[0:1] ; encoding: [0x00,0x61,0x00,0x7e]
+0x00,0x61,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e32 v0, v[254:255] ; encoding: [0xfe,0x61,0x00,0x7e]
+0xfe,0x61,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, s[0:1] ; encoding: [0x00,0x00,0x70,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v255, s[0:1] ; encoding: [0xff,0x00,0x70,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x70,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, s[2:3] ; encoding: [0x00,0x00,0x70,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, s[100:101] ; encoding: [0x00,0x00,0x70,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, flat_scratch ; encoding: [0x00,0x00,0x70,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, vcc ; encoding: [0x00,0x00,0x70,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, tba ; encoding: [0x00,0x00,0x70,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, tma ; encoding: [0x00,0x00,0x70,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, ttmp[10:11] ; encoding: [0x00,0x00,0x70,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, exec ; encoding: [0x00,0x00,0x70,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, 0 ; encoding: [0x00,0x00,0x70,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, -1 ; encoding: [0x00,0x00,0x70,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, 0.5 ; encoding: [0x00,0x00,0x70,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, -4.0 ; encoding: [0x00,0x00,0x70,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, v[0:1] ; encoding: [0x00,0x00,0x70,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x70,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, v[254:255] ; encoding: [0x00,0x00,0x70,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x70,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, -s[0:1] ; encoding: [0x00,0x00,0x70,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x70,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, |s[0:1]| ; encoding: [0x00,0x01,0x70,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x70,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f64_e64 v0, s[0:1] clamp ; encoding: [0x00,0x80,0x70,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x70,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x62,0x00,0x7e]
+0x00,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x62,0xfc,0x7f]
+0x00,0x62,0xfc,0x7f
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x62,0x00,0x7e]
+0x02,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x62,0x00,0x7e]
+0x64,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x62,0x00,0x7e]
+0x66,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x62,0x00,0x7e]
+0x6a,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], tba ; encoding: [0x6c,0x62,0x00,0x7e]
+0x6c,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], tma ; encoding: [0x6e,0x62,0x00,0x7e]
+0x6e,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x62,0x00,0x7e]
+0x7a,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], exec ; encoding: [0x7e,0x62,0x00,0x7e]
+0x7e,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], 0 ; encoding: [0x80,0x62,0x00,0x7e]
+0x80,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x62,0x00,0x7e]
+0xc1,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x62,0x00,0x7e]
+0xf0,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x62,0x00,0x7e]
+0xf7,0x62,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x62,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x62,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x62,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x62,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x63,0x00,0x7e]
+0x00,0x63,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x63,0x00,0x7e]
+0xfe,0x63,0x00,0x7e
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[0:1] ; encoding: [0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[254:255], s[0:1] ; encoding: [0xfe,0x00,0x71,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x71,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[2:3] ; encoding: [0x00,0x00,0x71,0xd1,0x02,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x02,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[100:101] ; encoding: [0x00,0x00,0x71,0xd1,0x64,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x64,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], flat_scratch ; encoding: [0x00,0x00,0x71,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], vcc ; encoding: [0x00,0x00,0x71,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], tba ; encoding: [0x00,0x00,0x71,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], tma ; encoding: [0x00,0x00,0x71,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], ttmp[10:11] ; encoding: [0x00,0x00,0x71,0xd1,0x7a,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x7a,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], exec ; encoding: [0x00,0x00,0x71,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x71,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], -1 ; encoding: [0x00,0x00,0x71,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], 0.5 ; encoding: [0x00,0x00,0x71,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], -4.0 ; encoding: [0x00,0x00,0x71,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], v[0:1] ; encoding: [0x00,0x00,0x71,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x71,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], v[254:255] ; encoding: [0x00,0x00,0x71,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x71,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], -s[0:1] ; encoding: [0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], |s[0:1]| ; encoding: [0x00,0x01,0x71,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x71,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x71,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x71,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_frexp_mant_f64_e64 v[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x71,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_fract_f64_e32 v[0:1], s[0:1] ; encoding: [0x00,0x64,0x00,0x7e]
+0x00,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[254:255], s[0:1] ; encoding: [0x00,0x64,0xfc,0x7f]
+0x00,0x64,0xfc,0x7f
+
+# CHECK: v_fract_f64_e32 v[0:1], s[2:3] ; encoding: [0x02,0x64,0x00,0x7e]
+0x02,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], s[100:101] ; encoding: [0x64,0x64,0x00,0x7e]
+0x64,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], flat_scratch ; encoding: [0x66,0x64,0x00,0x7e]
+0x66,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], vcc ; encoding: [0x6a,0x64,0x00,0x7e]
+0x6a,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], tba ; encoding: [0x6c,0x64,0x00,0x7e]
+0x6c,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], tma ; encoding: [0x6e,0x64,0x00,0x7e]
+0x6e,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], ttmp[10:11] ; encoding: [0x7a,0x64,0x00,0x7e]
+0x7a,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], exec ; encoding: [0x7e,0x64,0x00,0x7e]
+0x7e,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x64,0x00,0x7e]
+0x80,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x64,0x00,0x7e]
+0xc1,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x64,0x00,0x7e]
+0xf0,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x64,0x00,0x7e]
+0xf7,0x64,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], 0xaf123456 ; encoding: [0xff,0x64,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x64,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_fract_f64_e32 v[0:1], 0x3f717273 ; encoding: [0xff,0x64,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x64,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_fract_f64_e32 v[0:1], v[0:1] ; encoding: [0x00,0x65,0x00,0x7e]
+0x00,0x65,0x00,0x7e
+
+# CHECK: v_fract_f64_e32 v[0:1], v[254:255] ; encoding: [0xfe,0x65,0x00,0x7e]
+0xfe,0x65,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, s0 ; encoding: [0x00,0x66,0x00,0x7e]
+0x00,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v255, s0 ; encoding: [0x00,0x66,0xfe,0x7f]
+0x00,0x66,0xfe,0x7f
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, s101 ; encoding: [0x65,0x66,0x00,0x7e]
+0x65,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x66,0x00,0x7e]
+0x66,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x66,0x00,0x7e]
+0x67,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x66,0x00,0x7e]
+0x6a,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x66,0x00,0x7e]
+0x6b,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, tba_lo ; encoding: [0x6c,0x66,0x00,0x7e]
+0x6c,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, tba_hi ; encoding: [0x6d,0x66,0x00,0x7e]
+0x6d,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, tma_lo ; encoding: [0x6e,0x66,0x00,0x7e]
+0x6e,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, tma_hi ; encoding: [0x6f,0x66,0x00,0x7e]
+0x6f,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x66,0x00,0x7e]
+0x7b,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, m0 ; encoding: [0x7c,0x66,0x00,0x7e]
+0x7c,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, exec_lo ; encoding: [0x7e,0x66,0x00,0x7e]
+0x7e,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, exec_hi ; encoding: [0x7f,0x66,0x00,0x7e]
+0x7f,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, 0 ; encoding: [0x80,0x66,0x00,0x7e]
+0x80,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, -1 ; encoding: [0xc1,0x66,0x00,0x7e]
+0xc1,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, 0.5 ; encoding: [0xf0,0x66,0x00,0x7e]
+0xf0,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, -4.0 ; encoding: [0xf7,0x66,0x00,0x7e]
+0xf7,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, scc ; encoding: [0xfd,0x66,0x00,0x7e]
+0xfd,0x66,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x66,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x66,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x66,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x66,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, v0 ; encoding: [0x00,0x67,0x00,0x7e]
+0x00,0x67,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e32 v0, v255 ; encoding: [0xff,0x67,0x00,0x7e]
+0xff,0x67,0x00,0x7e
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x73,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x73,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x73,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x73,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x73,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x73,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x73,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x73,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x73,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x73,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x73,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x73,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x73,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x73,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x73,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x73,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x73,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x73,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x73,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x73,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, scc ; encoding: [0x00,0x00,0x73,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x73,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x73,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x73,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x73,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x73,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x73,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x73,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x73,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x73,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x73,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e32 v0, s0 ; encoding: [0x00,0x68,0x00,0x7e]
+0x00,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v255, s0 ; encoding: [0x00,0x68,0xfe,0x7f]
+0x00,0x68,0xfe,0x7f
+
+# CHECK: v_frexp_mant_f32_e32 v0, s101 ; encoding: [0x65,0x68,0x00,0x7e]
+0x65,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x68,0x00,0x7e]
+0x66,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x68,0x00,0x7e]
+0x67,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x68,0x00,0x7e]
+0x6a,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x68,0x00,0x7e]
+0x6b,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, tba_lo ; encoding: [0x6c,0x68,0x00,0x7e]
+0x6c,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, tba_hi ; encoding: [0x6d,0x68,0x00,0x7e]
+0x6d,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, tma_lo ; encoding: [0x6e,0x68,0x00,0x7e]
+0x6e,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, tma_hi ; encoding: [0x6f,0x68,0x00,0x7e]
+0x6f,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x68,0x00,0x7e]
+0x7b,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, m0 ; encoding: [0x7c,0x68,0x00,0x7e]
+0x7c,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, exec_lo ; encoding: [0x7e,0x68,0x00,0x7e]
+0x7e,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, exec_hi ; encoding: [0x7f,0x68,0x00,0x7e]
+0x7f,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, 0 ; encoding: [0x80,0x68,0x00,0x7e]
+0x80,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, -1 ; encoding: [0xc1,0x68,0x00,0x7e]
+0xc1,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, 0.5 ; encoding: [0xf0,0x68,0x00,0x7e]
+0xf0,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, -4.0 ; encoding: [0xf7,0x68,0x00,0x7e]
+0xf7,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, scc ; encoding: [0xfd,0x68,0x00,0x7e]
+0xfd,0x68,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x68,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x68,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_frexp_mant_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x68,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x68,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_frexp_mant_f32_e32 v0, v0 ; encoding: [0x00,0x69,0x00,0x7e]
+0x00,0x69,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e32 v0, v255 ; encoding: [0xff,0x69,0x00,0x7e]
+0xff,0x69,0x00,0x7e
+
+# CHECK: v_frexp_mant_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x74,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x74,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x74,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x74,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x74,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x74,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x74,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x74,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x74,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x74,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x74,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x74,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x74,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x74,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x74,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x74,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x74,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x74,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x74,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, scc ; encoding: [0x00,0x00,0x74,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x74,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x74,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x74,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x74,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_frexp_mant_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x74,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x74,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x74,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x74,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_frexp_mant_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_frexp_mant_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x74,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_movreld_b32_e32 v0, m0 ; encoding: [0x7c,0x6c,0x00,0x7e]
+0x7c,0x6c,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v255, m0 ; encoding: [0x7c,0x6c,0xfe,0x7f]
+0x7c,0x6c,0xfe,0x7f
+
+# CHECK: v_movreld_b32_e32 v0, 0 ; encoding: [0x80,0x6c,0x00,0x7e]
+0x80,0x6c,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v0, -1 ; encoding: [0xc1,0x6c,0x00,0x7e]
+0xc1,0x6c,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v0, 0.5 ; encoding: [0xf0,0x6c,0x00,0x7e]
+0xf0,0x6c,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v0, -4.0 ; encoding: [0xf7,0x6c,0x00,0x7e]
+0xf7,0x6c,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v0, scc ; encoding: [0xfd,0x6c,0x00,0x7e]
+0xfd,0x6c,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v0, v0 ; encoding: [0x00,0x6d,0x00,0x7e]
+0x00,0x6d,0x00,0x7e
+
+# CHECK: v_movreld_b32_e32 v0, v255 ; encoding: [0xff,0x6d,0x00,0x7e]
+0xff,0x6d,0x00,0x7e
+
+# CHECK: v_movreld_b32_e64 v0, m0 ; encoding: [0x00,0x00,0x76,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v255, m0 ; encoding: [0xff,0x00,0x76,0xd1,0x7c,0x00,0x00,0x00]
+0xff,0x00,0x76,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, 0 ; encoding: [0x00,0x00,0x76,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, -1 ; encoding: [0x00,0x00,0x76,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x76,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x76,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, scc ; encoding: [0x00,0x00,0x76,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x76,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x76,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_movreld_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x76,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x76,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_movrels_b32_e32 v0, v0 ; encoding: [0x00,0x6f,0x00,0x7e]
+0x00,0x6f,0x00,0x7e
+
+# CHECK: v_movrels_b32_e32 v255, v0 ; encoding: [0x00,0x6f,0xfe,0x7f]
+0x00,0x6f,0xfe,0x7f
+
+# CHECK: v_movrels_b32_e32 v0, v255 ; encoding: [0xff,0x6f,0x00,0x7e]
+0xff,0x6f,0x00,0x7e
+
+# CHECK: v_movrels_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x77,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x77,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_movrels_b32_e64 v255, v0 ; encoding: [0xff,0x00,0x77,0xd1,0x00,0x01,0x00,0x00]
+0xff,0x00,0x77,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_movrels_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x77,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x77,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_movrelsd_b32_e32 v0, v0 ; encoding: [0x00,0x71,0x00,0x7e]
+0x00,0x71,0x00,0x7e
+
+# CHECK: v_movrelsd_b32_e32 v255, v0 ; encoding: [0x00,0x71,0xfe,0x7f]
+0x00,0x71,0xfe,0x7f
+
+# CHECK: v_movrelsd_b32_e32 v0, v255 ; encoding: [0xff,0x71,0x00,0x7e]
+0xff,0x71,0x00,0x7e
+
+# CHECK: v_movrelsd_b32_e64 v0, v0 ; encoding: [0x00,0x00,0x78,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x78,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_movrelsd_b32_e64 v255, v0 ; encoding: [0xff,0x00,0x78,0xd1,0x00,0x01,0x00,0x00]
+0xff,0x00,0x78,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_movrelsd_b32_e64 v0, v255 ; encoding: [0x00,0x00,0x78,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x78,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e32 v0, s0 ; encoding: [0x00,0x72,0x00,0x7e]
+0x00,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v255, s0 ; encoding: [0x00,0x72,0xfe,0x7f]
+0x00,0x72,0xfe,0x7f
+
+# CHECK: v_cvt_f16_u16_e32 v0, s101 ; encoding: [0x65,0x72,0x00,0x7e]
+0x65,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x72,0x00,0x7e]
+0x66,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x72,0x00,0x7e]
+0x67,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, vcc_lo ; encoding: [0x6a,0x72,0x00,0x7e]
+0x6a,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, vcc_hi ; encoding: [0x6b,0x72,0x00,0x7e]
+0x6b,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, tba_lo ; encoding: [0x6c,0x72,0x00,0x7e]
+0x6c,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, tba_hi ; encoding: [0x6d,0x72,0x00,0x7e]
+0x6d,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, tma_lo ; encoding: [0x6e,0x72,0x00,0x7e]
+0x6e,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, tma_hi ; encoding: [0x6f,0x72,0x00,0x7e]
+0x6f,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, ttmp11 ; encoding: [0x7b,0x72,0x00,0x7e]
+0x7b,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, m0 ; encoding: [0x7c,0x72,0x00,0x7e]
+0x7c,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, exec_lo ; encoding: [0x7e,0x72,0x00,0x7e]
+0x7e,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, exec_hi ; encoding: [0x7f,0x72,0x00,0x7e]
+0x7f,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, 0 ; encoding: [0x80,0x72,0x00,0x7e]
+0x80,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, -1 ; encoding: [0xc1,0x72,0x00,0x7e]
+0xc1,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, 0.5 ; encoding: [0xf0,0x72,0x00,0x7e]
+0xf0,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, -4.0 ; encoding: [0xf7,0x72,0x00,0x7e]
+0xf7,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, scc ; encoding: [0xfd,0x72,0x00,0x7e]
+0xfd,0x72,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, 0xfe0b ; encoding: [0xff,0x72,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x72,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e32 v0, 0x3456 ; encoding: [0xff,0x72,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x72,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e32 v0, v0 ; encoding: [0x00,0x73,0x00,0x7e]
+0x00,0x73,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e32 v0, v255 ; encoding: [0xff,0x73,0x00,0x7e]
+0xff,0x73,0x00,0x7e
+
+# CHECK: v_cvt_f16_u16_e64 v0, s0 ; encoding: [0x00,0x00,0x79,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v255, s0 ; encoding: [0xff,0x00,0x79,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x79,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, s101 ; encoding: [0x00,0x00,0x79,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x79,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x79,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x79,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x79,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x79,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x79,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x79,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x79,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x79,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, m0 ; encoding: [0x00,0x00,0x79,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x79,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x79,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, 0 ; encoding: [0x00,0x00,0x79,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, -1 ; encoding: [0x00,0x00,0x79,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x79,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x79,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, scc ; encoding: [0x00,0x00,0x79,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, v0 ; encoding: [0x00,0x00,0x79,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x79,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_e64 v0, v255 ; encoding: [0x00,0x00,0x79,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x79,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e32 v0, s0 ; encoding: [0x00,0x74,0x00,0x7e]
+0x00,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v255, s0 ; encoding: [0x00,0x74,0xfe,0x7f]
+0x00,0x74,0xfe,0x7f
+
+# CHECK: v_cvt_f16_i16_e32 v0, s101 ; encoding: [0x65,0x74,0x00,0x7e]
+0x65,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x74,0x00,0x7e]
+0x66,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x74,0x00,0x7e]
+0x67,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, vcc_lo ; encoding: [0x6a,0x74,0x00,0x7e]
+0x6a,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, vcc_hi ; encoding: [0x6b,0x74,0x00,0x7e]
+0x6b,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, tba_lo ; encoding: [0x6c,0x74,0x00,0x7e]
+0x6c,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, tba_hi ; encoding: [0x6d,0x74,0x00,0x7e]
+0x6d,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, tma_lo ; encoding: [0x6e,0x74,0x00,0x7e]
+0x6e,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, tma_hi ; encoding: [0x6f,0x74,0x00,0x7e]
+0x6f,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, ttmp11 ; encoding: [0x7b,0x74,0x00,0x7e]
+0x7b,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, m0 ; encoding: [0x7c,0x74,0x00,0x7e]
+0x7c,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, exec_lo ; encoding: [0x7e,0x74,0x00,0x7e]
+0x7e,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, exec_hi ; encoding: [0x7f,0x74,0x00,0x7e]
+0x7f,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, 0 ; encoding: [0x80,0x74,0x00,0x7e]
+0x80,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, -1 ; encoding: [0xc1,0x74,0x00,0x7e]
+0xc1,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, 0.5 ; encoding: [0xf0,0x74,0x00,0x7e]
+0xf0,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, -4.0 ; encoding: [0xf7,0x74,0x00,0x7e]
+0xf7,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, scc ; encoding: [0xfd,0x74,0x00,0x7e]
+0xfd,0x74,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, 0xfe0b ; encoding: [0xff,0x74,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x74,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e32 v0, 0x3456 ; encoding: [0xff,0x74,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x74,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e32 v0, v0 ; encoding: [0x00,0x75,0x00,0x7e]
+0x00,0x75,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e32 v0, v255 ; encoding: [0xff,0x75,0x00,0x7e]
+0xff,0x75,0x00,0x7e
+
+# CHECK: v_cvt_f16_i16_e64 v0, s0 ; encoding: [0x00,0x00,0x7a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v255, s0 ; encoding: [0xff,0x00,0x7a,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x7a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, s101 ; encoding: [0x00,0x00,0x7a,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x7a,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x7a,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x7a,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x7a,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x7a,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x7a,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x7a,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x7a,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x7a,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, m0 ; encoding: [0x00,0x00,0x7a,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x7a,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x7a,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, 0 ; encoding: [0x00,0x00,0x7a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, -1 ; encoding: [0x00,0x00,0x7a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x7a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x7a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, scc ; encoding: [0x00,0x00,0x7a,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, v0 ; encoding: [0x00,0x00,0x7a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_e64 v0, v255 ; encoding: [0x00,0x00,0x7a,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x7a,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e32 v0, s0 ; encoding: [0x00,0x76,0x00,0x7e]
+0x00,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v255, s0 ; encoding: [0x00,0x76,0xfe,0x7f]
+0x00,0x76,0xfe,0x7f
+
+# CHECK: v_cvt_u16_f16_e32 v0, s101 ; encoding: [0x65,0x76,0x00,0x7e]
+0x65,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x76,0x00,0x7e]
+0x66,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x76,0x00,0x7e]
+0x67,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x76,0x00,0x7e]
+0x6a,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x76,0x00,0x7e]
+0x6b,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, tba_lo ; encoding: [0x6c,0x76,0x00,0x7e]
+0x6c,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, tba_hi ; encoding: [0x6d,0x76,0x00,0x7e]
+0x6d,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, tma_lo ; encoding: [0x6e,0x76,0x00,0x7e]
+0x6e,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, tma_hi ; encoding: [0x6f,0x76,0x00,0x7e]
+0x6f,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x76,0x00,0x7e]
+0x7b,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, m0 ; encoding: [0x7c,0x76,0x00,0x7e]
+0x7c,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, exec_lo ; encoding: [0x7e,0x76,0x00,0x7e]
+0x7e,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, exec_hi ; encoding: [0x7f,0x76,0x00,0x7e]
+0x7f,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, 0 ; encoding: [0x80,0x76,0x00,0x7e]
+0x80,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, -1 ; encoding: [0xc1,0x76,0x00,0x7e]
+0xc1,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, 0.5 ; encoding: [0xf0,0x76,0x00,0x7e]
+0xf0,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, -4.0 ; encoding: [0xf7,0x76,0x00,0x7e]
+0xf7,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, scc ; encoding: [0xfd,0x76,0x00,0x7e]
+0xfd,0x76,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x76,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x76,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e32 v0, 0x3456 ; encoding: [0xff,0x76,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x76,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e32 v0, v0 ; encoding: [0x00,0x77,0x00,0x7e]
+0x00,0x77,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e32 v0, v255 ; encoding: [0xff,0x77,0x00,0x7e]
+0xff,0x77,0x00,0x7e
+
+# CHECK: v_cvt_u16_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x7b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x7b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x7b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x7b,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x7b,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x7b,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x7b,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x7b,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x7b,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x7b,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x7b,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x7b,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x7b,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x7b,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x7b,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x7b,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x7b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x7b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x7b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x7b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, scc ; encoding: [0x00,0x00,0x7b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x7b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x7b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x7b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x7b,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7b,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_u16_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x7b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x7b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x7b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e32 v0, s0 ; encoding: [0x00,0x78,0x00,0x7e]
+0x00,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v255, s0 ; encoding: [0x00,0x78,0xfe,0x7f]
+0x00,0x78,0xfe,0x7f
+
+# CHECK: v_cvt_i16_f16_e32 v0, s101 ; encoding: [0x65,0x78,0x00,0x7e]
+0x65,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x78,0x00,0x7e]
+0x66,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x78,0x00,0x7e]
+0x67,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x78,0x00,0x7e]
+0x6a,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x78,0x00,0x7e]
+0x6b,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, tba_lo ; encoding: [0x6c,0x78,0x00,0x7e]
+0x6c,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, tba_hi ; encoding: [0x6d,0x78,0x00,0x7e]
+0x6d,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, tma_lo ; encoding: [0x6e,0x78,0x00,0x7e]
+0x6e,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, tma_hi ; encoding: [0x6f,0x78,0x00,0x7e]
+0x6f,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x78,0x00,0x7e]
+0x7b,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, m0 ; encoding: [0x7c,0x78,0x00,0x7e]
+0x7c,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, exec_lo ; encoding: [0x7e,0x78,0x00,0x7e]
+0x7e,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, exec_hi ; encoding: [0x7f,0x78,0x00,0x7e]
+0x7f,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, 0 ; encoding: [0x80,0x78,0x00,0x7e]
+0x80,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, -1 ; encoding: [0xc1,0x78,0x00,0x7e]
+0xc1,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, 0.5 ; encoding: [0xf0,0x78,0x00,0x7e]
+0xf0,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, -4.0 ; encoding: [0xf7,0x78,0x00,0x7e]
+0xf7,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, scc ; encoding: [0xfd,0x78,0x00,0x7e]
+0xfd,0x78,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x78,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x78,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e32 v0, 0x3456 ; encoding: [0xff,0x78,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x78,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e32 v0, v0 ; encoding: [0x00,0x79,0x00,0x7e]
+0x00,0x79,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e32 v0, v255 ; encoding: [0xff,0x79,0x00,0x7e]
+0xff,0x79,0x00,0x7e
+
+# CHECK: v_cvt_i16_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x7c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x7c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x7c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x7c,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x7c,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x7c,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x7c,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x7c,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x7c,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x7c,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x7c,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x7c,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x7c,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x7c,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x7c,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x7c,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x7c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x7c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x7c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x7c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, scc ; encoding: [0x00,0x00,0x7c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x7c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x7c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x7c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x7c,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7c,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_i16_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x7c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x7c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x7c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e32 v0, s0 ; encoding: [0x00,0x7a,0x00,0x7e]
+0x00,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v255, s0 ; encoding: [0x00,0x7a,0xfe,0x7f]
+0x00,0x7a,0xfe,0x7f
+
+# CHECK: v_rcp_f16_e32 v0, s101 ; encoding: [0x65,0x7a,0x00,0x7e]
+0x65,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x7a,0x00,0x7e]
+0x66,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x7a,0x00,0x7e]
+0x67,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x7a,0x00,0x7e]
+0x6a,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x7a,0x00,0x7e]
+0x6b,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, tba_lo ; encoding: [0x6c,0x7a,0x00,0x7e]
+0x6c,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, tba_hi ; encoding: [0x6d,0x7a,0x00,0x7e]
+0x6d,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, tma_lo ; encoding: [0x6e,0x7a,0x00,0x7e]
+0x6e,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, tma_hi ; encoding: [0x6f,0x7a,0x00,0x7e]
+0x6f,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x7a,0x00,0x7e]
+0x7b,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, m0 ; encoding: [0x7c,0x7a,0x00,0x7e]
+0x7c,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, exec_lo ; encoding: [0x7e,0x7a,0x00,0x7e]
+0x7e,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, exec_hi ; encoding: [0x7f,0x7a,0x00,0x7e]
+0x7f,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, 0 ; encoding: [0x80,0x7a,0x00,0x7e]
+0x80,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, -1 ; encoding: [0xc1,0x7a,0x00,0x7e]
+0xc1,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, 0.5 ; encoding: [0xf0,0x7a,0x00,0x7e]
+0xf0,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, -4.0 ; encoding: [0xf7,0x7a,0x00,0x7e]
+0xf7,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, scc ; encoding: [0xfd,0x7a,0x00,0x7e]
+0xfd,0x7a,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x7a,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x7a,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_rcp_f16_e32 v0, 0x3456 ; encoding: [0xff,0x7a,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x7a,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_rcp_f16_e32 v0, v0 ; encoding: [0x00,0x7b,0x00,0x7e]
+0x00,0x7b,0x00,0x7e
+
+# CHECK: v_rcp_f16_e32 v0, v255 ; encoding: [0xff,0x7b,0x00,0x7e]
+0xff,0x7b,0x00,0x7e
+
+# CHECK: v_rcp_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x7d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x7d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x7d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x7d,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x7d,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x7d,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x7d,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x7d,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x7d,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x7d,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x7d,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x7d,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x7d,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x7d,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x7d,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x7d,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x7d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x7d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x7d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x7d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, scc ; encoding: [0x00,0x00,0x7d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x7d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x7d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x7d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x7d,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7d,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rcp_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x7d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x7d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rcp_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x7d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e32 v0, s0 ; encoding: [0x00,0x7c,0x00,0x7e]
+0x00,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v255, s0 ; encoding: [0x00,0x7c,0xfe,0x7f]
+0x00,0x7c,0xfe,0x7f
+
+# CHECK: v_sqrt_f16_e32 v0, s101 ; encoding: [0x65,0x7c,0x00,0x7e]
+0x65,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x7c,0x00,0x7e]
+0x66,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x7c,0x00,0x7e]
+0x67,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x7c,0x00,0x7e]
+0x6a,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x7c,0x00,0x7e]
+0x6b,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, tba_lo ; encoding: [0x6c,0x7c,0x00,0x7e]
+0x6c,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, tba_hi ; encoding: [0x6d,0x7c,0x00,0x7e]
+0x6d,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, tma_lo ; encoding: [0x6e,0x7c,0x00,0x7e]
+0x6e,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, tma_hi ; encoding: [0x6f,0x7c,0x00,0x7e]
+0x6f,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x7c,0x00,0x7e]
+0x7b,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, m0 ; encoding: [0x7c,0x7c,0x00,0x7e]
+0x7c,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, exec_lo ; encoding: [0x7e,0x7c,0x00,0x7e]
+0x7e,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, exec_hi ; encoding: [0x7f,0x7c,0x00,0x7e]
+0x7f,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, 0 ; encoding: [0x80,0x7c,0x00,0x7e]
+0x80,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, -1 ; encoding: [0xc1,0x7c,0x00,0x7e]
+0xc1,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, 0.5 ; encoding: [0xf0,0x7c,0x00,0x7e]
+0xf0,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, -4.0 ; encoding: [0xf7,0x7c,0x00,0x7e]
+0xf7,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, scc ; encoding: [0xfd,0x7c,0x00,0x7e]
+0xfd,0x7c,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x7c,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x7c,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_sqrt_f16_e32 v0, 0x3456 ; encoding: [0xff,0x7c,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x7c,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_sqrt_f16_e32 v0, v0 ; encoding: [0x00,0x7d,0x00,0x7e]
+0x00,0x7d,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e32 v0, v255 ; encoding: [0xff,0x7d,0x00,0x7e]
+0xff,0x7d,0x00,0x7e
+
+# CHECK: v_sqrt_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x7e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x7e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x7e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x7e,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x7e,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x7e,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x7e,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x7e,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x7e,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x7e,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x7e,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x7e,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x7e,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x7e,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x7e,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x7e,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x7e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x7e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x7e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x7e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, scc ; encoding: [0x00,0x00,0x7e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x7e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x7e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x7e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x7e,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7e,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sqrt_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x7e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x7e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sqrt_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x7e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e32 v0, s0 ; encoding: [0x00,0x7e,0x00,0x7e]
+0x00,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v255, s0 ; encoding: [0x00,0x7e,0xfe,0x7f]
+0x00,0x7e,0xfe,0x7f
+
+# CHECK: v_rsq_f16_e32 v0, s101 ; encoding: [0x65,0x7e,0x00,0x7e]
+0x65,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x7e,0x00,0x7e]
+0x66,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x7e,0x00,0x7e]
+0x67,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x7e,0x00,0x7e]
+0x6a,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x7e,0x00,0x7e]
+0x6b,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, tba_lo ; encoding: [0x6c,0x7e,0x00,0x7e]
+0x6c,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, tba_hi ; encoding: [0x6d,0x7e,0x00,0x7e]
+0x6d,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, tma_lo ; encoding: [0x6e,0x7e,0x00,0x7e]
+0x6e,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, tma_hi ; encoding: [0x6f,0x7e,0x00,0x7e]
+0x6f,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x7e,0x00,0x7e]
+0x7b,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, m0 ; encoding: [0x7c,0x7e,0x00,0x7e]
+0x7c,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, exec_lo ; encoding: [0x7e,0x7e,0x00,0x7e]
+0x7e,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, exec_hi ; encoding: [0x7f,0x7e,0x00,0x7e]
+0x7f,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, 0 ; encoding: [0x80,0x7e,0x00,0x7e]
+0x80,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, -1 ; encoding: [0xc1,0x7e,0x00,0x7e]
+0xc1,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, 0.5 ; encoding: [0xf0,0x7e,0x00,0x7e]
+0xf0,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, -4.0 ; encoding: [0xf7,0x7e,0x00,0x7e]
+0xf7,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, scc ; encoding: [0xfd,0x7e,0x00,0x7e]
+0xfd,0x7e,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x7e,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x7e,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_rsq_f16_e32 v0, 0x3456 ; encoding: [0xff,0x7e,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x7e,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_rsq_f16_e32 v0, v0 ; encoding: [0x00,0x7f,0x00,0x7e]
+0x00,0x7f,0x00,0x7e
+
+# CHECK: v_rsq_f16_e32 v0, v255 ; encoding: [0xff,0x7f,0x00,0x7e]
+0xff,0x7f,0x00,0x7e
+
+# CHECK: v_rsq_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x7f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x7f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x7f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x7f,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x7f,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x7f,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x7f,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x7f,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x7f,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x7f,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x7f,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x7f,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x7f,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x7f,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x7f,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x7f,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x7f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x7f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x7f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x7f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, scc ; encoding: [0x00,0x00,0x7f,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x7f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x7f,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x7f,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x7f,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7f,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rsq_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x7f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x7f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rsq_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x7f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e32 v0, s0 ; encoding: [0x00,0x80,0x00,0x7e]
+0x00,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v255, s0 ; encoding: [0x00,0x80,0xfe,0x7f]
+0x00,0x80,0xfe,0x7f
+
+# CHECK: v_log_f16_e32 v0, s101 ; encoding: [0x65,0x80,0x00,0x7e]
+0x65,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x80,0x00,0x7e]
+0x66,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x80,0x00,0x7e]
+0x67,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x80,0x00,0x7e]
+0x6a,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x80,0x00,0x7e]
+0x6b,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, tba_lo ; encoding: [0x6c,0x80,0x00,0x7e]
+0x6c,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, tba_hi ; encoding: [0x6d,0x80,0x00,0x7e]
+0x6d,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, tma_lo ; encoding: [0x6e,0x80,0x00,0x7e]
+0x6e,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, tma_hi ; encoding: [0x6f,0x80,0x00,0x7e]
+0x6f,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x80,0x00,0x7e]
+0x7b,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, m0 ; encoding: [0x7c,0x80,0x00,0x7e]
+0x7c,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, exec_lo ; encoding: [0x7e,0x80,0x00,0x7e]
+0x7e,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, exec_hi ; encoding: [0x7f,0x80,0x00,0x7e]
+0x7f,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, 0 ; encoding: [0x80,0x80,0x00,0x7e]
+0x80,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, -1 ; encoding: [0xc1,0x80,0x00,0x7e]
+0xc1,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, 0.5 ; encoding: [0xf0,0x80,0x00,0x7e]
+0xf0,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, -4.0 ; encoding: [0xf7,0x80,0x00,0x7e]
+0xf7,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, scc ; encoding: [0xfd,0x80,0x00,0x7e]
+0xfd,0x80,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x80,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x80,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_log_f16_e32 v0, 0x3456 ; encoding: [0xff,0x80,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x80,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_log_f16_e32 v0, v0 ; encoding: [0x00,0x81,0x00,0x7e]
+0x00,0x81,0x00,0x7e
+
+# CHECK: v_log_f16_e32 v0, v255 ; encoding: [0xff,0x81,0x00,0x7e]
+0xff,0x81,0x00,0x7e
+
+# CHECK: v_log_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x80,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x80,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x80,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x80,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x80,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x80,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x80,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x80,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x80,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x80,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x80,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x80,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x80,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x80,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x80,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x80,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x80,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x80,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x80,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x80,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, scc ; encoding: [0x00,0x00,0x80,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x80,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x80,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x80,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x80,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x80,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x80,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_log_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x80,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x80,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x80,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x80,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e32 v0, s0 ; encoding: [0x00,0x82,0x00,0x7e]
+0x00,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v255, s0 ; encoding: [0x00,0x82,0xfe,0x7f]
+0x00,0x82,0xfe,0x7f
+
+# CHECK: v_exp_f16_e32 v0, s101 ; encoding: [0x65,0x82,0x00,0x7e]
+0x65,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x82,0x00,0x7e]
+0x66,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x82,0x00,0x7e]
+0x67,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x82,0x00,0x7e]
+0x6a,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x82,0x00,0x7e]
+0x6b,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, tba_lo ; encoding: [0x6c,0x82,0x00,0x7e]
+0x6c,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, tba_hi ; encoding: [0x6d,0x82,0x00,0x7e]
+0x6d,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, tma_lo ; encoding: [0x6e,0x82,0x00,0x7e]
+0x6e,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, tma_hi ; encoding: [0x6f,0x82,0x00,0x7e]
+0x6f,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x82,0x00,0x7e]
+0x7b,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, m0 ; encoding: [0x7c,0x82,0x00,0x7e]
+0x7c,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, exec_lo ; encoding: [0x7e,0x82,0x00,0x7e]
+0x7e,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, exec_hi ; encoding: [0x7f,0x82,0x00,0x7e]
+0x7f,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, 0 ; encoding: [0x80,0x82,0x00,0x7e]
+0x80,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, -1 ; encoding: [0xc1,0x82,0x00,0x7e]
+0xc1,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, 0.5 ; encoding: [0xf0,0x82,0x00,0x7e]
+0xf0,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, -4.0 ; encoding: [0xf7,0x82,0x00,0x7e]
+0xf7,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, scc ; encoding: [0xfd,0x82,0x00,0x7e]
+0xfd,0x82,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x82,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x82,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_exp_f16_e32 v0, 0x3456 ; encoding: [0xff,0x82,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x82,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_exp_f16_e32 v0, v0 ; encoding: [0x00,0x83,0x00,0x7e]
+0x00,0x83,0x00,0x7e
+
+# CHECK: v_exp_f16_e32 v0, v255 ; encoding: [0xff,0x83,0x00,0x7e]
+0xff,0x83,0x00,0x7e
+
+# CHECK: v_exp_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x81,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x81,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x81,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x81,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x81,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x81,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x81,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x81,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x81,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x81,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x81,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x81,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x81,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x81,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x81,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x81,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x81,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x81,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x81,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x81,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, scc ; encoding: [0x00,0x00,0x81,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x81,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x81,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x81,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x81,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x81,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x81,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_exp_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x81,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x81,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x81,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x81,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e32 v0, s0 ; encoding: [0x00,0x84,0x00,0x7e]
+0x00,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v255, s0 ; encoding: [0x00,0x84,0xfe,0x7f]
+0x00,0x84,0xfe,0x7f
+
+# CHECK: v_frexp_mant_f16_e32 v0, s101 ; encoding: [0x65,0x84,0x00,0x7e]
+0x65,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x84,0x00,0x7e]
+0x66,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x84,0x00,0x7e]
+0x67,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x84,0x00,0x7e]
+0x6a,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x84,0x00,0x7e]
+0x6b,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, tba_lo ; encoding: [0x6c,0x84,0x00,0x7e]
+0x6c,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, tba_hi ; encoding: [0x6d,0x84,0x00,0x7e]
+0x6d,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, tma_lo ; encoding: [0x6e,0x84,0x00,0x7e]
+0x6e,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, tma_hi ; encoding: [0x6f,0x84,0x00,0x7e]
+0x6f,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x84,0x00,0x7e]
+0x7b,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, m0 ; encoding: [0x7c,0x84,0x00,0x7e]
+0x7c,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, exec_lo ; encoding: [0x7e,0x84,0x00,0x7e]
+0x7e,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, exec_hi ; encoding: [0x7f,0x84,0x00,0x7e]
+0x7f,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, 0 ; encoding: [0x80,0x84,0x00,0x7e]
+0x80,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, -1 ; encoding: [0xc1,0x84,0x00,0x7e]
+0xc1,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, 0.5 ; encoding: [0xf0,0x84,0x00,0x7e]
+0xf0,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, -4.0 ; encoding: [0xf7,0x84,0x00,0x7e]
+0xf7,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, scc ; encoding: [0xfd,0x84,0x00,0x7e]
+0xfd,0x84,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x84,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x84,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e32 v0, 0x3456 ; encoding: [0xff,0x84,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x84,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e32 v0, v0 ; encoding: [0x00,0x85,0x00,0x7e]
+0x00,0x85,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e32 v0, v255 ; encoding: [0xff,0x85,0x00,0x7e]
+0xff,0x85,0x00,0x7e
+
+# CHECK: v_frexp_mant_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x82,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x82,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x82,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x82,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x82,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x82,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x82,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x82,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x82,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x82,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x82,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x82,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x82,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x82,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x82,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x82,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x82,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x82,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x82,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x82,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, scc ; encoding: [0x00,0x00,0x82,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x82,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x82,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x82,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x82,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x82,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x82,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_frexp_mant_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x82,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x82,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x82,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x82,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, s0 ; encoding: [0x00,0x86,0x00,0x7e]
+0x00,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v255, s0 ; encoding: [0x00,0x86,0xfe,0x7f]
+0x00,0x86,0xfe,0x7f
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, s101 ; encoding: [0x65,0x86,0x00,0x7e]
+0x65,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x86,0x00,0x7e]
+0x66,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x86,0x00,0x7e]
+0x67,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x86,0x00,0x7e]
+0x6a,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x86,0x00,0x7e]
+0x6b,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, tba_lo ; encoding: [0x6c,0x86,0x00,0x7e]
+0x6c,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, tba_hi ; encoding: [0x6d,0x86,0x00,0x7e]
+0x6d,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, tma_lo ; encoding: [0x6e,0x86,0x00,0x7e]
+0x6e,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, tma_hi ; encoding: [0x6f,0x86,0x00,0x7e]
+0x6f,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x86,0x00,0x7e]
+0x7b,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, m0 ; encoding: [0x7c,0x86,0x00,0x7e]
+0x7c,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, exec_lo ; encoding: [0x7e,0x86,0x00,0x7e]
+0x7e,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, exec_hi ; encoding: [0x7f,0x86,0x00,0x7e]
+0x7f,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, 0 ; encoding: [0x80,0x86,0x00,0x7e]
+0x80,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, -1 ; encoding: [0xc1,0x86,0x00,0x7e]
+0xc1,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, 0.5 ; encoding: [0xf0,0x86,0x00,0x7e]
+0xf0,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, -4.0 ; encoding: [0xf7,0x86,0x00,0x7e]
+0xf7,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, scc ; encoding: [0xfd,0x86,0x00,0x7e]
+0xfd,0x86,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x86,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x86,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, 0x3456 ; encoding: [0xff,0x86,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x86,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, v0 ; encoding: [0x00,0x87,0x00,0x7e]
+0x00,0x87,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e32 v0, v255 ; encoding: [0xff,0x87,0x00,0x7e]
+0xff,0x87,0x00,0x7e
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x83,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x83,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x83,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x83,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x83,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x83,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x83,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x83,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x83,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x83,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x83,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x83,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x83,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x83,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x83,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x83,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x83,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x83,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x83,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x83,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, scc ; encoding: [0x00,0x00,0x83,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x83,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x83,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x83,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x83,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x83,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x83,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x83,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x83,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x83,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x83,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e32 v0, s0 ; encoding: [0x00,0x88,0x00,0x7e]
+0x00,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v255, s0 ; encoding: [0x00,0x88,0xfe,0x7f]
+0x00,0x88,0xfe,0x7f
+
+# CHECK: v_floor_f16_e32 v0, s101 ; encoding: [0x65,0x88,0x00,0x7e]
+0x65,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x88,0x00,0x7e]
+0x66,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x88,0x00,0x7e]
+0x67,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x88,0x00,0x7e]
+0x6a,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x88,0x00,0x7e]
+0x6b,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, tba_lo ; encoding: [0x6c,0x88,0x00,0x7e]
+0x6c,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, tba_hi ; encoding: [0x6d,0x88,0x00,0x7e]
+0x6d,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, tma_lo ; encoding: [0x6e,0x88,0x00,0x7e]
+0x6e,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, tma_hi ; encoding: [0x6f,0x88,0x00,0x7e]
+0x6f,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x88,0x00,0x7e]
+0x7b,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, m0 ; encoding: [0x7c,0x88,0x00,0x7e]
+0x7c,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, exec_lo ; encoding: [0x7e,0x88,0x00,0x7e]
+0x7e,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, exec_hi ; encoding: [0x7f,0x88,0x00,0x7e]
+0x7f,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, 0 ; encoding: [0x80,0x88,0x00,0x7e]
+0x80,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, -1 ; encoding: [0xc1,0x88,0x00,0x7e]
+0xc1,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, 0.5 ; encoding: [0xf0,0x88,0x00,0x7e]
+0xf0,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, -4.0 ; encoding: [0xf7,0x88,0x00,0x7e]
+0xf7,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, scc ; encoding: [0xfd,0x88,0x00,0x7e]
+0xfd,0x88,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x88,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x88,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_floor_f16_e32 v0, 0x3456 ; encoding: [0xff,0x88,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x88,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_floor_f16_e32 v0, v0 ; encoding: [0x00,0x89,0x00,0x7e]
+0x00,0x89,0x00,0x7e
+
+# CHECK: v_floor_f16_e32 v0, v255 ; encoding: [0xff,0x89,0x00,0x7e]
+0xff,0x89,0x00,0x7e
+
+# CHECK: v_floor_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x84,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x84,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x84,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x84,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x84,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x84,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x84,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x84,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x84,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x84,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x84,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x84,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x84,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x84,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x84,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x84,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x84,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x84,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x84,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x84,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, scc ; encoding: [0x00,0x00,0x84,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x84,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x84,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x84,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x84,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x84,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x84,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_floor_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x84,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x84,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_floor_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x84,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x84,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e32 v0, s0 ; encoding: [0x00,0x8a,0x00,0x7e]
+0x00,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v255, s0 ; encoding: [0x00,0x8a,0xfe,0x7f]
+0x00,0x8a,0xfe,0x7f
+
+# CHECK: v_ceil_f16_e32 v0, s101 ; encoding: [0x65,0x8a,0x00,0x7e]
+0x65,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x8a,0x00,0x7e]
+0x66,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x8a,0x00,0x7e]
+0x67,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x8a,0x00,0x7e]
+0x6a,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x8a,0x00,0x7e]
+0x6b,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, tba_lo ; encoding: [0x6c,0x8a,0x00,0x7e]
+0x6c,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, tba_hi ; encoding: [0x6d,0x8a,0x00,0x7e]
+0x6d,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, tma_lo ; encoding: [0x6e,0x8a,0x00,0x7e]
+0x6e,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, tma_hi ; encoding: [0x6f,0x8a,0x00,0x7e]
+0x6f,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x8a,0x00,0x7e]
+0x7b,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, m0 ; encoding: [0x7c,0x8a,0x00,0x7e]
+0x7c,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, exec_lo ; encoding: [0x7e,0x8a,0x00,0x7e]
+0x7e,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, exec_hi ; encoding: [0x7f,0x8a,0x00,0x7e]
+0x7f,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, 0 ; encoding: [0x80,0x8a,0x00,0x7e]
+0x80,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, -1 ; encoding: [0xc1,0x8a,0x00,0x7e]
+0xc1,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, 0.5 ; encoding: [0xf0,0x8a,0x00,0x7e]
+0xf0,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, -4.0 ; encoding: [0xf7,0x8a,0x00,0x7e]
+0xf7,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, scc ; encoding: [0xfd,0x8a,0x00,0x7e]
+0xfd,0x8a,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x8a,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x8a,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_ceil_f16_e32 v0, 0x3456 ; encoding: [0xff,0x8a,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x8a,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_ceil_f16_e32 v0, v0 ; encoding: [0x00,0x8b,0x00,0x7e]
+0x00,0x8b,0x00,0x7e
+
+# CHECK: v_ceil_f16_e32 v0, v255 ; encoding: [0xff,0x8b,0x00,0x7e]
+0xff,0x8b,0x00,0x7e
+
+# CHECK: v_ceil_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x85,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x85,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x85,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x85,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x85,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x85,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x85,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x85,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x85,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x85,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x85,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x85,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x85,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x85,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x85,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x85,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x85,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x85,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x85,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x85,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, scc ; encoding: [0x00,0x00,0x85,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x85,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x85,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x85,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x85,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x85,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x85,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_ceil_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x85,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x85,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ceil_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x85,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x85,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e32 v0, s0 ; encoding: [0x00,0x8c,0x00,0x7e]
+0x00,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v255, s0 ; encoding: [0x00,0x8c,0xfe,0x7f]
+0x00,0x8c,0xfe,0x7f
+
+# CHECK: v_trunc_f16_e32 v0, s101 ; encoding: [0x65,0x8c,0x00,0x7e]
+0x65,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x8c,0x00,0x7e]
+0x66,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x8c,0x00,0x7e]
+0x67,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x8c,0x00,0x7e]
+0x6a,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x8c,0x00,0x7e]
+0x6b,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, tba_lo ; encoding: [0x6c,0x8c,0x00,0x7e]
+0x6c,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, tba_hi ; encoding: [0x6d,0x8c,0x00,0x7e]
+0x6d,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, tma_lo ; encoding: [0x6e,0x8c,0x00,0x7e]
+0x6e,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, tma_hi ; encoding: [0x6f,0x8c,0x00,0x7e]
+0x6f,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x8c,0x00,0x7e]
+0x7b,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, m0 ; encoding: [0x7c,0x8c,0x00,0x7e]
+0x7c,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, exec_lo ; encoding: [0x7e,0x8c,0x00,0x7e]
+0x7e,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, exec_hi ; encoding: [0x7f,0x8c,0x00,0x7e]
+0x7f,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, 0 ; encoding: [0x80,0x8c,0x00,0x7e]
+0x80,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, -1 ; encoding: [0xc1,0x8c,0x00,0x7e]
+0xc1,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, 0.5 ; encoding: [0xf0,0x8c,0x00,0x7e]
+0xf0,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, -4.0 ; encoding: [0xf7,0x8c,0x00,0x7e]
+0xf7,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, scc ; encoding: [0xfd,0x8c,0x00,0x7e]
+0xfd,0x8c,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x8c,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x8c,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_trunc_f16_e32 v0, 0x3456 ; encoding: [0xff,0x8c,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x8c,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_trunc_f16_e32 v0, v0 ; encoding: [0x00,0x8d,0x00,0x7e]
+0x00,0x8d,0x00,0x7e
+
+# CHECK: v_trunc_f16_e32 v0, v255 ; encoding: [0xff,0x8d,0x00,0x7e]
+0xff,0x8d,0x00,0x7e
+
+# CHECK: v_trunc_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x86,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x86,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x86,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x86,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x86,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x86,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x86,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x86,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x86,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x86,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x86,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x86,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x86,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x86,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x86,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x86,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x86,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x86,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x86,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x86,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, scc ; encoding: [0x00,0x00,0x86,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x86,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x86,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x86,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x86,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x86,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x86,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_trunc_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x86,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x86,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_trunc_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x86,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x86,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e32 v0, s0 ; encoding: [0x00,0x8e,0x00,0x7e]
+0x00,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v255, s0 ; encoding: [0x00,0x8e,0xfe,0x7f]
+0x00,0x8e,0xfe,0x7f
+
+# CHECK: v_rndne_f16_e32 v0, s101 ; encoding: [0x65,0x8e,0x00,0x7e]
+0x65,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x8e,0x00,0x7e]
+0x66,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x8e,0x00,0x7e]
+0x67,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x8e,0x00,0x7e]
+0x6a,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x8e,0x00,0x7e]
+0x6b,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, tba_lo ; encoding: [0x6c,0x8e,0x00,0x7e]
+0x6c,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, tba_hi ; encoding: [0x6d,0x8e,0x00,0x7e]
+0x6d,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, tma_lo ; encoding: [0x6e,0x8e,0x00,0x7e]
+0x6e,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, tma_hi ; encoding: [0x6f,0x8e,0x00,0x7e]
+0x6f,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x8e,0x00,0x7e]
+0x7b,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, m0 ; encoding: [0x7c,0x8e,0x00,0x7e]
+0x7c,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, exec_lo ; encoding: [0x7e,0x8e,0x00,0x7e]
+0x7e,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, exec_hi ; encoding: [0x7f,0x8e,0x00,0x7e]
+0x7f,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, 0 ; encoding: [0x80,0x8e,0x00,0x7e]
+0x80,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, -1 ; encoding: [0xc1,0x8e,0x00,0x7e]
+0xc1,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, 0.5 ; encoding: [0xf0,0x8e,0x00,0x7e]
+0xf0,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, -4.0 ; encoding: [0xf7,0x8e,0x00,0x7e]
+0xf7,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, scc ; encoding: [0xfd,0x8e,0x00,0x7e]
+0xfd,0x8e,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x8e,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x8e,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_rndne_f16_e32 v0, 0x3456 ; encoding: [0xff,0x8e,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x8e,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_rndne_f16_e32 v0, v0 ; encoding: [0x00,0x8f,0x00,0x7e]
+0x00,0x8f,0x00,0x7e
+
+# CHECK: v_rndne_f16_e32 v0, v255 ; encoding: [0xff,0x8f,0x00,0x7e]
+0xff,0x8f,0x00,0x7e
+
+# CHECK: v_rndne_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x87,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x87,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x87,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x87,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x87,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x87,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x87,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x87,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x87,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x87,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x87,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x87,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x87,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x87,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x87,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x87,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x87,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x87,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x87,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x87,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, scc ; encoding: [0x00,0x00,0x87,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x87,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x87,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x87,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x87,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x87,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x87,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_rndne_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x87,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x87,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_rndne_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x87,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x87,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e32 v0, s0 ; encoding: [0x00,0x90,0x00,0x7e]
+0x00,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v255, s0 ; encoding: [0x00,0x90,0xfe,0x7f]
+0x00,0x90,0xfe,0x7f
+
+# CHECK: v_fract_f16_e32 v0, s101 ; encoding: [0x65,0x90,0x00,0x7e]
+0x65,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x90,0x00,0x7e]
+0x66,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x90,0x00,0x7e]
+0x67,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x90,0x00,0x7e]
+0x6a,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x90,0x00,0x7e]
+0x6b,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, tba_lo ; encoding: [0x6c,0x90,0x00,0x7e]
+0x6c,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, tba_hi ; encoding: [0x6d,0x90,0x00,0x7e]
+0x6d,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, tma_lo ; encoding: [0x6e,0x90,0x00,0x7e]
+0x6e,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, tma_hi ; encoding: [0x6f,0x90,0x00,0x7e]
+0x6f,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x90,0x00,0x7e]
+0x7b,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, m0 ; encoding: [0x7c,0x90,0x00,0x7e]
+0x7c,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, exec_lo ; encoding: [0x7e,0x90,0x00,0x7e]
+0x7e,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, exec_hi ; encoding: [0x7f,0x90,0x00,0x7e]
+0x7f,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, 0 ; encoding: [0x80,0x90,0x00,0x7e]
+0x80,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, -1 ; encoding: [0xc1,0x90,0x00,0x7e]
+0xc1,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, 0.5 ; encoding: [0xf0,0x90,0x00,0x7e]
+0xf0,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, -4.0 ; encoding: [0xf7,0x90,0x00,0x7e]
+0xf7,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, scc ; encoding: [0xfd,0x90,0x00,0x7e]
+0xfd,0x90,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x90,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x90,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_fract_f16_e32 v0, 0x3456 ; encoding: [0xff,0x90,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x90,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_fract_f16_e32 v0, v0 ; encoding: [0x00,0x91,0x00,0x7e]
+0x00,0x91,0x00,0x7e
+
+# CHECK: v_fract_f16_e32 v0, v255 ; encoding: [0xff,0x91,0x00,0x7e]
+0xff,0x91,0x00,0x7e
+
+# CHECK: v_fract_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x88,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x88,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x88,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x88,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x88,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x88,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x88,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x88,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x88,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x88,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x88,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x88,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x88,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x88,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x88,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x88,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x88,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x88,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x88,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x88,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, scc ; encoding: [0x00,0x00,0x88,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x88,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x88,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x88,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x88,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x88,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x88,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x88,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_fract_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x88,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x88,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fract_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x88,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x88,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e32 v0, s0 ; encoding: [0x00,0x92,0x00,0x7e]
+0x00,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v255, s0 ; encoding: [0x00,0x92,0xfe,0x7f]
+0x00,0x92,0xfe,0x7f
+
+# CHECK: v_sin_f16_e32 v0, s101 ; encoding: [0x65,0x92,0x00,0x7e]
+0x65,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x92,0x00,0x7e]
+0x66,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x92,0x00,0x7e]
+0x67,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x92,0x00,0x7e]
+0x6a,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x92,0x00,0x7e]
+0x6b,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, tba_lo ; encoding: [0x6c,0x92,0x00,0x7e]
+0x6c,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, tba_hi ; encoding: [0x6d,0x92,0x00,0x7e]
+0x6d,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, tma_lo ; encoding: [0x6e,0x92,0x00,0x7e]
+0x6e,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, tma_hi ; encoding: [0x6f,0x92,0x00,0x7e]
+0x6f,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x92,0x00,0x7e]
+0x7b,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, m0 ; encoding: [0x7c,0x92,0x00,0x7e]
+0x7c,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, exec_lo ; encoding: [0x7e,0x92,0x00,0x7e]
+0x7e,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, exec_hi ; encoding: [0x7f,0x92,0x00,0x7e]
+0x7f,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, 0 ; encoding: [0x80,0x92,0x00,0x7e]
+0x80,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, -1 ; encoding: [0xc1,0x92,0x00,0x7e]
+0xc1,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, 0.5 ; encoding: [0xf0,0x92,0x00,0x7e]
+0xf0,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, -4.0 ; encoding: [0xf7,0x92,0x00,0x7e]
+0xf7,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, scc ; encoding: [0xfd,0x92,0x00,0x7e]
+0xfd,0x92,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x92,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x92,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_sin_f16_e32 v0, 0x3456 ; encoding: [0xff,0x92,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x92,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_sin_f16_e32 v0, v0 ; encoding: [0x00,0x93,0x00,0x7e]
+0x00,0x93,0x00,0x7e
+
+# CHECK: v_sin_f16_e32 v0, v255 ; encoding: [0xff,0x93,0x00,0x7e]
+0xff,0x93,0x00,0x7e
+
+# CHECK: v_sin_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x89,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x89,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x89,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x89,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x89,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x89,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x89,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x89,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x89,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x89,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x89,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x89,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x89,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x89,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x89,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x89,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x89,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x89,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x89,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x89,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, scc ; encoding: [0x00,0x00,0x89,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x89,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x89,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x89,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x89,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x89,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x89,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x89,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sin_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x89,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x89,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sin_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x89,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x89,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e32 v0, s0 ; encoding: [0x00,0x94,0x00,0x7e]
+0x00,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v255, s0 ; encoding: [0x00,0x94,0xfe,0x7f]
+0x00,0x94,0xfe,0x7f
+
+# CHECK: v_cos_f16_e32 v0, s101 ; encoding: [0x65,0x94,0x00,0x7e]
+0x65,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, flat_scratch_lo ; encoding: [0x66,0x94,0x00,0x7e]
+0x66,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, flat_scratch_hi ; encoding: [0x67,0x94,0x00,0x7e]
+0x67,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, vcc_lo ; encoding: [0x6a,0x94,0x00,0x7e]
+0x6a,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, vcc_hi ; encoding: [0x6b,0x94,0x00,0x7e]
+0x6b,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, tba_lo ; encoding: [0x6c,0x94,0x00,0x7e]
+0x6c,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, tba_hi ; encoding: [0x6d,0x94,0x00,0x7e]
+0x6d,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, tma_lo ; encoding: [0x6e,0x94,0x00,0x7e]
+0x6e,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, tma_hi ; encoding: [0x6f,0x94,0x00,0x7e]
+0x6f,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, ttmp11 ; encoding: [0x7b,0x94,0x00,0x7e]
+0x7b,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, m0 ; encoding: [0x7c,0x94,0x00,0x7e]
+0x7c,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, exec_lo ; encoding: [0x7e,0x94,0x00,0x7e]
+0x7e,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, exec_hi ; encoding: [0x7f,0x94,0x00,0x7e]
+0x7f,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, 0 ; encoding: [0x80,0x94,0x00,0x7e]
+0x80,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, -1 ; encoding: [0xc1,0x94,0x00,0x7e]
+0xc1,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, 0.5 ; encoding: [0xf0,0x94,0x00,0x7e]
+0xf0,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, -4.0 ; encoding: [0xf7,0x94,0x00,0x7e]
+0xf7,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, scc ; encoding: [0xfd,0x94,0x00,0x7e]
+0xfd,0x94,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, 0xfe0b ; encoding: [0xff,0x94,0x00,0x7e,0x0b,0xfe,0x00,0x00]
+0xff,0x94,0x00,0x7e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cos_f16_e32 v0, 0x3456 ; encoding: [0xff,0x94,0x00,0x7e,0x56,0x34,0x00,0x00]
+0xff,0x94,0x00,0x7e,0x56,0x34,0x00,0x00
+
+# CHECK: v_cos_f16_e32 v0, v0 ; encoding: [0x00,0x95,0x00,0x7e]
+0x00,0x95,0x00,0x7e
+
+# CHECK: v_cos_f16_e32 v0, v255 ; encoding: [0xff,0x95,0x00,0x7e]
+0xff,0x95,0x00,0x7e
+
+# CHECK: v_cos_f16_e64 v0, s0 ; encoding: [0x00,0x00,0x8a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v255, s0 ; encoding: [0xff,0x00,0x8a,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x8a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, s101 ; encoding: [0x00,0x00,0x8a,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x8a,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x8a,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x8a,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x8a,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, tba_lo ; encoding: [0x00,0x00,0x8a,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, tba_hi ; encoding: [0x00,0x00,0x8a,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, tma_lo ; encoding: [0x00,0x00,0x8a,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, tma_hi ; encoding: [0x00,0x00,0x8a,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x8a,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, m0 ; encoding: [0x00,0x00,0x8a,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, exec_lo ; encoding: [0x00,0x00,0x8a,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, exec_hi ; encoding: [0x00,0x00,0x8a,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, 0 ; encoding: [0x00,0x00,0x8a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, -1 ; encoding: [0x00,0x00,0x8a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, 0.5 ; encoding: [0x00,0x00,0x8a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, -4.0 ; encoding: [0x00,0x00,0x8a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, scc ; encoding: [0x00,0x00,0x8a,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, v0 ; encoding: [0x00,0x00,0x8a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, v255 ; encoding: [0x00,0x00,0x8a,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x8a,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, -s0 ; encoding: [0x00,0x00,0x8a,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x8a,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cos_f16_e64 v0, |s0| ; encoding: [0x00,0x01,0x8a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x8a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cos_f16_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x8a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x8a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e32 v0, s0 ; encoding: [0x00,0x96,0x00,0x7e]
+0x00,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v255, s0 ; encoding: [0x00,0x96,0xfe,0x7f]
+0x00,0x96,0xfe,0x7f
+
+# CHECK: v_exp_legacy_f32_e32 v0, s101 ; encoding: [0x65,0x96,0x00,0x7e]
+0x65,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x96,0x00,0x7e]
+0x66,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x96,0x00,0x7e]
+0x67,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x96,0x00,0x7e]
+0x6a,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x96,0x00,0x7e]
+0x6b,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, tba_lo ; encoding: [0x6c,0x96,0x00,0x7e]
+0x6c,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, tba_hi ; encoding: [0x6d,0x96,0x00,0x7e]
+0x6d,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, tma_lo ; encoding: [0x6e,0x96,0x00,0x7e]
+0x6e,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, tma_hi ; encoding: [0x6f,0x96,0x00,0x7e]
+0x6f,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x96,0x00,0x7e]
+0x7b,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, m0 ; encoding: [0x7c,0x96,0x00,0x7e]
+0x7c,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, exec_lo ; encoding: [0x7e,0x96,0x00,0x7e]
+0x7e,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, exec_hi ; encoding: [0x7f,0x96,0x00,0x7e]
+0x7f,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, 0 ; encoding: [0x80,0x96,0x00,0x7e]
+0x80,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, -1 ; encoding: [0xc1,0x96,0x00,0x7e]
+0xc1,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, 0.5 ; encoding: [0xf0,0x96,0x00,0x7e]
+0xf0,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, -4.0 ; encoding: [0xf7,0x96,0x00,0x7e]
+0xf7,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, scc ; encoding: [0xfd,0x96,0x00,0x7e]
+0xfd,0x96,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x96,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x96,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_exp_legacy_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x96,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x96,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_exp_legacy_f32_e32 v0, v0 ; encoding: [0x00,0x97,0x00,0x7e]
+0x00,0x97,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e32 v0, v255 ; encoding: [0xff,0x97,0x00,0x7e]
+0xff,0x97,0x00,0x7e
+
+# CHECK: v_exp_legacy_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x8b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x8b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x8b,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x8b,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x8b,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x8b,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x8b,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x8b,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x8b,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x8b,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x8b,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x8b,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x8b,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x8b,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x8b,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x8b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x8b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x8b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x8b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, scc ; encoding: [0x00,0x00,0x8b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x8b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x8b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x8b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_exp_legacy_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x8b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x8b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x8b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x8b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_exp_legacy_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_exp_legacy_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x8b,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_log_legacy_f32_e32 v0, s0 ; encoding: [0x00,0x98,0x00,0x7e]
+0x00,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v255, s0 ; encoding: [0x00,0x98,0xfe,0x7f]
+0x00,0x98,0xfe,0x7f
+
+# CHECK: v_log_legacy_f32_e32 v0, s101 ; encoding: [0x65,0x98,0x00,0x7e]
+0x65,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, flat_scratch_lo ; encoding: [0x66,0x98,0x00,0x7e]
+0x66,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, flat_scratch_hi ; encoding: [0x67,0x98,0x00,0x7e]
+0x67,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, vcc_lo ; encoding: [0x6a,0x98,0x00,0x7e]
+0x6a,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, vcc_hi ; encoding: [0x6b,0x98,0x00,0x7e]
+0x6b,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, tba_lo ; encoding: [0x6c,0x98,0x00,0x7e]
+0x6c,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, tba_hi ; encoding: [0x6d,0x98,0x00,0x7e]
+0x6d,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, tma_lo ; encoding: [0x6e,0x98,0x00,0x7e]
+0x6e,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, tma_hi ; encoding: [0x6f,0x98,0x00,0x7e]
+0x6f,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, ttmp11 ; encoding: [0x7b,0x98,0x00,0x7e]
+0x7b,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, m0 ; encoding: [0x7c,0x98,0x00,0x7e]
+0x7c,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, exec_lo ; encoding: [0x7e,0x98,0x00,0x7e]
+0x7e,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, exec_hi ; encoding: [0x7f,0x98,0x00,0x7e]
+0x7f,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, 0 ; encoding: [0x80,0x98,0x00,0x7e]
+0x80,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, -1 ; encoding: [0xc1,0x98,0x00,0x7e]
+0xc1,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, 0.5 ; encoding: [0xf0,0x98,0x00,0x7e]
+0xf0,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, -4.0 ; encoding: [0xf7,0x98,0x00,0x7e]
+0xf7,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, scc ; encoding: [0xfd,0x98,0x00,0x7e]
+0xfd,0x98,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, 0xaf123456 ; encoding: [0xff,0x98,0x00,0x7e,0x56,0x34,0x12,0xaf]
+0xff,0x98,0x00,0x7e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_log_legacy_f32_e32 v0, 0x3f717273 ; encoding: [0xff,0x98,0x00,0x7e,0x73,0x72,0x71,0x3f]
+0xff,0x98,0x00,0x7e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_log_legacy_f32_e32 v0, v0 ; encoding: [0x00,0x99,0x00,0x7e]
+0x00,0x99,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e32 v0, v255 ; encoding: [0xff,0x99,0x00,0x7e]
+0xff,0x99,0x00,0x7e
+
+# CHECK: v_log_legacy_f32_e64 v0, s0 ; encoding: [0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v255, s0 ; encoding: [0xff,0x00,0x8c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x8c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, s101 ; encoding: [0x00,0x00,0x8c,0xd1,0x65,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x65,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, flat_scratch_lo ; encoding: [0x00,0x00,0x8c,0xd1,0x66,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x66,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, flat_scratch_hi ; encoding: [0x00,0x00,0x8c,0xd1,0x67,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x67,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, vcc_lo ; encoding: [0x00,0x00,0x8c,0xd1,0x6a,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x6a,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, vcc_hi ; encoding: [0x00,0x00,0x8c,0xd1,0x6b,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x6b,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, tba_lo ; encoding: [0x00,0x00,0x8c,0xd1,0x6c,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x6c,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, tba_hi ; encoding: [0x00,0x00,0x8c,0xd1,0x6d,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x6d,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, tma_lo ; encoding: [0x00,0x00,0x8c,0xd1,0x6e,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x6e,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, tma_hi ; encoding: [0x00,0x00,0x8c,0xd1,0x6f,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x6f,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, ttmp11 ; encoding: [0x00,0x00,0x8c,0xd1,0x7b,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x7b,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, m0 ; encoding: [0x00,0x00,0x8c,0xd1,0x7c,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x7c,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, exec_lo ; encoding: [0x00,0x00,0x8c,0xd1,0x7e,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x7e,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, exec_hi ; encoding: [0x00,0x00,0x8c,0xd1,0x7f,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x7f,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x8c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, -1 ; encoding: [0x00,0x00,0x8c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, 0.5 ; encoding: [0x00,0x00,0x8c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, -4.0 ; encoding: [0x00,0x00,0x8c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, scc ; encoding: [0x00,0x00,0x8c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, v0 ; encoding: [0x00,0x00,0x8c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, v255 ; encoding: [0x00,0x00,0x8c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x8c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, -s0 ; encoding: [0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_log_legacy_f32_e64 v0, |s0| ; encoding: [0x00,0x01,0x8c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x8c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, s0 clamp ; encoding: [0x00,0x80,0x8c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x8c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_log_legacy_f32_e64 v0, s0 mul:2 ; encoding: [0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_log_legacy_f32_e64 v0, s0 mul:4 ; encoding: [0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_log_legacy_f32_e64 v0, s0 div:2 ; encoding: [0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x8c,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cndmask_b32_e32 v0, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0x00,0x00]
+0x6a,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v255, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0xfe,0x01]
+0x6a,0x00,0xfe,0x01
+
+# CHECK: v_cndmask_b32_e32 v0, vcc_hi, v0, vcc ; encoding: [0x6b,0x00,0x00,0x00]
+0x6b,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, 0, v0, vcc ; encoding: [0x80,0x00,0x00,0x00]
+0x80,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, -1, v0, vcc ; encoding: [0xc1,0x00,0x00,0x00]
+0xc1,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, 0.5, v0, vcc ; encoding: [0xf0,0x00,0x00,0x00]
+0xf0,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, -4.0, v0, vcc ; encoding: [0xf7,0x00,0x00,0x00]
+0xf7,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, scc, v0, vcc ; encoding: [0xfd,0x00,0x00,0x00]
+0xfd,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, v0, v0, vcc ; encoding: [0x00,0x01,0x00,0x00]
+0x00,0x01,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, v255, v0, vcc ; encoding: [0xff,0x01,0x00,0x00]
+0xff,0x01,0x00,0x00
+
+# CHECK: v_cndmask_b32_e32 v0, vcc_lo, v255, vcc ; encoding: [0x6a,0xfe,0x01,0x00]
+0x6a,0xfe,0x01,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v255, s0, s0, s[0:1] ; encoding: [0xff,0x00,0x00,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x00,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, 0, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, -1, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, 0.5, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, -4.0, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, scc, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x00,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, v0, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x00,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, v255, s0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x00,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, 0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x00,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, -1, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x00,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, 0.5, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x00,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, -4.0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x00,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, scc, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x00,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, v0, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x00,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_cndmask_b32_e64 v0, s0, v255, s[0:1] ; encoding: [0x00,0x00,0x00,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x00,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_add_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x02]
+0x00,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x03]
+0x00,0x00,0xfe,0x03
+
+# CHECK: v_add_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x02]
+0x65,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x02]
+0x66,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x02]
+0x67,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x02]
+0x6a,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x02]
+0x6b,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x02]
+0x6c,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x02]
+0x6d,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x02]
+0x6e,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x02]
+0x6f,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x02]
+0x7b,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x02]
+0x7c,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x02]
+0x7e,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x02]
+0x7f,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x02]
+0x80,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x02]
+0xc1,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x02]
+0xf0,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x02]
+0xf7,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x02]
+0xfd,0x00,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x02,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x02,0x56,0x34,0x12,0xaf
+
+# CHECK: v_add_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x02,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x02,0x73,0x72,0x71,0x3f
+
+# CHECK: v_add_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x02]
+0x00,0x01,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x02]
+0xff,0x01,0x00,0x02
+
+# CHECK: v_add_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x02]
+0x00,0xfe,0x01,0x02
+
+# CHECK: v_add_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x01,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x01,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x01,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x01,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x01,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x01,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x01,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x01,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x01,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x01,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x01,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x01,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x01,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x01,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x01,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x01,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x01,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x01,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x01,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x01,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x01,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_add_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_add_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_add_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_add_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x01,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x01,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x01,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x01,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x01,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x01,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x01,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x01,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_add_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_add_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x01,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_sub_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x04]
+0x00,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x05]
+0x00,0x00,0xfe,0x05
+
+# CHECK: v_sub_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x04]
+0x65,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x04]
+0x66,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x04]
+0x67,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x04]
+0x6a,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x04]
+0x6b,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x04]
+0x6c,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x04]
+0x6d,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x04]
+0x6e,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x04]
+0x6f,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x04]
+0x7b,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x04]
+0x7c,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x04]
+0x7e,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x04]
+0x7f,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x04]
+0x80,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x04]
+0xc1,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x04]
+0xf0,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x04]
+0xf7,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x04]
+0xfd,0x00,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x04,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x04,0x56,0x34,0x12,0xaf
+
+# CHECK: v_sub_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x04,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x04,0x73,0x72,0x71,0x3f
+
+# CHECK: v_sub_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x04]
+0x00,0x01,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x04]
+0xff,0x01,0x00,0x04
+
+# CHECK: v_sub_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x04]
+0x00,0xfe,0x01,0x04
+
+# CHECK: v_sub_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x02,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x02,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x02,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x02,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x02,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x02,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x02,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x02,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x02,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x02,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x02,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x02,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x02,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x02,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x02,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x02,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x02,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sub_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sub_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_sub_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_sub_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x02,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x02,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x02,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x02,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x02,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x02,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x02,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x02,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_sub_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_sub_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x02,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_subrev_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x06]
+0x00,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x07]
+0x00,0x00,0xfe,0x07
+
+# CHECK: v_subrev_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x06]
+0x65,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x06]
+0x66,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x06]
+0x67,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x06]
+0x6a,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x06]
+0x6b,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x06]
+0x6c,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x06]
+0x6d,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x06]
+0x6e,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x06]
+0x6f,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x06]
+0x7b,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x06]
+0x7c,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x06]
+0x7e,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x06]
+0x7f,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x06]
+0x80,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x06]
+0xc1,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x06]
+0xf0,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x06]
+0xf7,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x06]
+0xfd,0x00,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x06,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x06,0x56,0x34,0x12,0xaf
+
+# CHECK: v_subrev_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x06,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x06,0x73,0x72,0x71,0x3f
+
+# CHECK: v_subrev_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x06]
+0x00,0x01,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x06]
+0xff,0x01,0x00,0x06
+
+# CHECK: v_subrev_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x06]
+0x00,0xfe,0x01,0x06
+
+# CHECK: v_subrev_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x03,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x03,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x03,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x03,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x03,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x03,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x03,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x03,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x03,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x03,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x03,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x03,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x03,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x03,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x03,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x03,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x03,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x03,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x03,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x03,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x03,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_subrev_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_subrev_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_subrev_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_subrev_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x03,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x03,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x03,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x03,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x03,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x03,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x03,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x03,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_subrev_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_subrev_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x03,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_mul_legacy_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x08]
+0x00,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x09]
+0x00,0x00,0xfe,0x09
+
+# CHECK: v_mul_legacy_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x08]
+0x65,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x08]
+0x66,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x08]
+0x67,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x08]
+0x6a,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x08]
+0x6b,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x08]
+0x6c,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x08]
+0x6d,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x08]
+0x6e,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x08]
+0x6f,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x08]
+0x7b,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x08]
+0x7c,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x08]
+0x7e,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x08]
+0x7f,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x08]
+0x80,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x08]
+0xc1,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x08]
+0xf0,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x08]
+0xf7,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x08]
+0xfd,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x08,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x08,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mul_legacy_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x08,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x08,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mul_legacy_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x08]
+0x00,0x01,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x08]
+0xff,0x01,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x08]
+0x00,0xfe,0x01,0x08
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x04,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x04,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x04,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x04,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x04,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x04,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x04,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x04,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x04,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x04,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x04,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x04,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x04,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x04,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x04,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x04,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x04,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_mul_legacy_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_mul_legacy_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x04,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x04,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x04,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x04,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x04,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x04,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x04,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x04,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_mul_legacy_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x04,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_mul_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x0a]
+0x00,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x0b]
+0x00,0x00,0xfe,0x0b
+
+# CHECK: v_mul_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x0a]
+0x65,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x0a]
+0x66,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x0a]
+0x67,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x0a]
+0x6a,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x0a]
+0x6b,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x0a]
+0x6c,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x0a]
+0x6d,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x0a]
+0x6e,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x0a]
+0x6f,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x0a]
+0x7b,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x0a]
+0x7c,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x0a]
+0x7e,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x0a]
+0x7f,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x0a]
+0x80,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x0a]
+0xc1,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x0a]
+0xf0,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x0a]
+0xf7,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x0a]
+0xfd,0x00,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x0a,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x0a,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mul_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x0a,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x0a,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mul_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x0a]
+0x00,0x01,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x0a]
+0xff,0x01,0x00,0x0a
+
+# CHECK: v_mul_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x0a]
+0x00,0xfe,0x01,0x0a
+
+# CHECK: v_mul_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x05,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x05,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x05,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x05,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x05,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x05,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x05,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x05,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x05,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x05,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x05,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x05,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x05,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x05,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x05,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x05,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x05,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x05,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x05,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x05,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x05,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_mul_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_mul_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_mul_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x05,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x05,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x05,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x05,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x05,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x05,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x05,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x05,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_mul_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_mul_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x05,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_mul_i32_i24_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x0c]
+0x00,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x0d]
+0x00,0x00,0xfe,0x0d
+
+# CHECK: v_mul_i32_i24_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x0c]
+0x65,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x0c]
+0x66,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x0c]
+0x67,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x0c]
+0x6a,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x0c]
+0x6b,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x0c]
+0x6c,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x0c]
+0x6d,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x0c]
+0x6e,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x0c]
+0x6f,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x0c]
+0x7b,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x0c]
+0x7c,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x0c]
+0x7e,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x0c]
+0x7f,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x0c]
+0x80,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x0c]
+0xc1,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x0c]
+0xf0,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x0c]
+0xf7,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x0c]
+0xfd,0x00,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x0c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x0c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mul_i32_i24_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x0c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x0c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mul_i32_i24_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x0c]
+0x00,0x01,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x0c]
+0xff,0x01,0x00,0x0c
+
+# CHECK: v_mul_i32_i24_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x0c]
+0x00,0xfe,0x01,0x0c
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x06,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x06,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x06,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x06,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x06,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x06,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x06,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x06,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x06,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x06,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x06,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x06,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x06,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x06,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x06,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, scc ; encoding: [0x00,0x00,0x06,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x06,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x06,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_i32_i24_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x06,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x06,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x0e]
+0x00,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x0f]
+0x00,0x00,0xfe,0x0f
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x0e]
+0x65,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x0e]
+0x66,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x0e]
+0x67,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x0e]
+0x6a,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x0e]
+0x6b,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x0e]
+0x6c,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x0e]
+0x6d,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x0e]
+0x6e,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x0e]
+0x6f,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x0e]
+0x7b,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x0e]
+0x7c,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x0e]
+0x7e,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x0e]
+0x7f,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x0e]
+0x80,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x0e]
+0xc1,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x0e]
+0xf0,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x0e]
+0xf7,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x0e]
+0xfd,0x00,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x0e,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x0e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x0e,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x0e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x0e]
+0x00,0x01,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x0e]
+0xff,0x01,0x00,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x0e]
+0x00,0xfe,0x01,0x0e
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x07,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x07,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x07,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x07,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x07,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x07,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x07,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x07,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x07,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x07,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x07,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x07,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x07,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x07,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x07,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x07,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x07,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x07,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x07,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x07,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, scc ; encoding: [0x00,0x00,0x07,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x07,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x07,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_hi_i32_i24_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x07,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x07,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_u32_u24_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x10]
+0x00,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x11]
+0x00,0x00,0xfe,0x11
+
+# CHECK: v_mul_u32_u24_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x10]
+0x65,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x10]
+0x66,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x10]
+0x67,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x10]
+0x6a,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x10]
+0x6b,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x10]
+0x6c,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x10]
+0x6d,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x10]
+0x6e,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x10]
+0x6f,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x10]
+0x7b,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x10]
+0x7c,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x10]
+0x7e,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x10]
+0x7f,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x10]
+0x80,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x10]
+0xc1,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x10]
+0xf0,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x10]
+0xf7,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x10]
+0xfd,0x00,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x10,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x10,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mul_u32_u24_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x10,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x10,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mul_u32_u24_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x10]
+0x00,0x01,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x10]
+0xff,0x01,0x00,0x10
+
+# CHECK: v_mul_u32_u24_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x10]
+0x00,0xfe,0x01,0x10
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x08,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x08,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x08,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x08,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x08,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x08,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x08,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x08,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x08,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x08,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x08,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x08,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x08,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x08,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x08,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, scc ; encoding: [0x00,0x00,0x08,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x08,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x08,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_u32_u24_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x08,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x08,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x12]
+0x00,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x13]
+0x00,0x00,0xfe,0x13
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x12]
+0x65,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x12]
+0x66,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x12]
+0x67,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x12]
+0x6a,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x12]
+0x6b,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x12]
+0x6c,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x12]
+0x6d,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x12]
+0x6e,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x12]
+0x6f,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x12]
+0x7b,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x12]
+0x7c,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x12]
+0x7e,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x12]
+0x7f,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x12]
+0x80,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x12]
+0xc1,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x12]
+0xf0,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x12]
+0xf7,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x12]
+0xfd,0x00,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x12,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x12,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x12,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x12,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x12]
+0x00,0x01,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x12]
+0xff,0x01,0x00,0x12
+
+# CHECK: v_mul_hi_u32_u24_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x12]
+0x00,0xfe,0x01,0x12
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x09,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x09,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x09,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x09,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x09,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x09,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x09,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x09,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x09,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x09,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x09,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x09,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x09,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x09,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x09,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x09,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x09,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x09,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x09,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x09,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, scc ; encoding: [0x00,0x00,0x09,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x09,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x09,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_hi_u32_u24_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x09,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x09,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x14]
+0x00,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x15]
+0x00,0x00,0xfe,0x15
+
+# CHECK: v_min_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x14]
+0x65,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x14]
+0x66,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x14]
+0x67,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x14]
+0x6a,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x14]
+0x6b,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x14]
+0x6c,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x14]
+0x6d,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x14]
+0x6e,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x14]
+0x6f,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x14]
+0x7b,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x14]
+0x7c,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x14]
+0x7e,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x14]
+0x7f,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x14]
+0x80,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x14]
+0xc1,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x14]
+0xf0,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x14]
+0xf7,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x14]
+0xfd,0x00,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x14,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x14,0x56,0x34,0x12,0xaf
+
+# CHECK: v_min_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x14,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x14,0x73,0x72,0x71,0x3f
+
+# CHECK: v_min_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x14]
+0x00,0x01,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x14]
+0xff,0x01,0x00,0x14
+
+# CHECK: v_min_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x14]
+0x00,0xfe,0x01,0x14
+
+# CHECK: v_min_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x0a,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x0a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x0a,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x0a,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_min_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_min_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_min_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x0a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x0a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x0a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x0a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x0a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x0a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x0a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x0a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_min_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_min_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x0a,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_max_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x16]
+0x00,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x17]
+0x00,0x00,0xfe,0x17
+
+# CHECK: v_max_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x16]
+0x65,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x16]
+0x66,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x16]
+0x67,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x16]
+0x6a,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x16]
+0x6b,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x16]
+0x6c,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x16]
+0x6d,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x16]
+0x6e,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x16]
+0x6f,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x16]
+0x7b,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x16]
+0x7c,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x16]
+0x7e,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x16]
+0x7f,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x16]
+0x80,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x16]
+0xc1,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x16]
+0xf0,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x16]
+0xf7,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x16]
+0xfd,0x00,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x16,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x16,0x56,0x34,0x12,0xaf
+
+# CHECK: v_max_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x16,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x16,0x73,0x72,0x71,0x3f
+
+# CHECK: v_max_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x16]
+0x00,0x01,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x16]
+0xff,0x01,0x00,0x16
+
+# CHECK: v_max_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x16]
+0x00,0xfe,0x01,0x16
+
+# CHECK: v_max_f32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x0b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x0b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x0b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x0b,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max_f32_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_max_f32_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_max_f32_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_max_f32_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x0b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x0b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x0b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x0b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x0b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x0b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x0b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x0b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f32_e64 v0, s0, s0 mul:2 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_max_f32_e64 v0, s0, s0 mul:4 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_max_f32_e64 v0, s0, s0 div:2 ; encoding: [0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0x0b,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x18]
+0x00,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x19]
+0x00,0x00,0xfe,0x19
+
+# CHECK: v_min_i32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x18]
+0x65,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x18]
+0x66,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x18]
+0x67,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x18]
+0x6a,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x18]
+0x6b,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x18]
+0x6c,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x18]
+0x6d,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x18]
+0x6e,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x18]
+0x6f,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x18]
+0x7b,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x18]
+0x7c,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x18]
+0x7e,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x18]
+0x7f,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x18]
+0x80,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x18]
+0xc1,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x18]
+0xf0,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x18]
+0xf7,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x18]
+0xfd,0x00,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf
+
+# CHECK: v_min_i32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x18,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x18,0x73,0x72,0x71,0x3f
+
+# CHECK: v_min_i32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x18]
+0x00,0x01,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x18]
+0xff,0x01,0x00,0x18
+
+# CHECK: v_min_i32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x18]
+0x00,0xfe,0x01,0x18
+
+# CHECK: v_min_i32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x0c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x0c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x0c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x0c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_i32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x0c,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x0c,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max_i32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x1a]
+0x00,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x1b]
+0x00,0x00,0xfe,0x1b
+
+# CHECK: v_max_i32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x1a]
+0x65,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x1a]
+0x66,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x1a]
+0x67,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x1a]
+0x6a,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x1a]
+0x6b,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x1a]
+0x6c,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x1a]
+0x6d,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x1a]
+0x6e,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x1a]
+0x6f,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x1a]
+0x7b,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x1a]
+0x7c,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x1a]
+0x7e,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x1a]
+0x7f,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x1a]
+0x80,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x1a]
+0xc1,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x1a]
+0xf0,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x1a]
+0xf7,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x1a]
+0xfd,0x00,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x1a,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x1a,0x56,0x34,0x12,0xaf
+
+# CHECK: v_max_i32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x1a,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x1a,0x73,0x72,0x71,0x3f
+
+# CHECK: v_max_i32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x1a]
+0x00,0x01,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x1a]
+0xff,0x01,0x00,0x1a
+
+# CHECK: v_max_i32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x1a]
+0x00,0xfe,0x01,0x1a
+
+# CHECK: v_max_i32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x0d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x0d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x0d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x0d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_i32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x0d,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x0d,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min_u32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x1c]
+0x00,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x1d]
+0x00,0x00,0xfe,0x1d
+
+# CHECK: v_min_u32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x1c]
+0x65,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x1c]
+0x66,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x1c]
+0x67,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x1c]
+0x6a,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x1c]
+0x6b,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x1c]
+0x6c,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x1c]
+0x6d,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x1c]
+0x6e,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x1c]
+0x6f,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x1c]
+0x7b,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x1c]
+0x7c,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x1c]
+0x7e,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x1c]
+0x7f,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x1c]
+0x80,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x1c]
+0xc1,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x1c]
+0xf0,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x1c]
+0xf7,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x1c]
+0xfd,0x00,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x1c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x1c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_min_u32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x1c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x1c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_min_u32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x1c]
+0x00,0x01,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x1c]
+0xff,0x01,0x00,0x1c
+
+# CHECK: v_min_u32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x1c]
+0x00,0xfe,0x01,0x1c
+
+# CHECK: v_min_u32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x0e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x0e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x0e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x0e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_u32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x0e,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x0e,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max_u32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x1e]
+0x00,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x1f]
+0x00,0x00,0xfe,0x1f
+
+# CHECK: v_max_u32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x1e]
+0x65,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x1e]
+0x66,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x1e]
+0x67,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x1e]
+0x6a,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x1e]
+0x6b,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x1e]
+0x6c,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x1e]
+0x6d,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x1e]
+0x6e,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x1e]
+0x6f,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x1e]
+0x7b,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x1e]
+0x7c,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x1e]
+0x7e,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x1e]
+0x7f,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x1e]
+0x80,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x1e]
+0xc1,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x1e]
+0xf0,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x1e]
+0xf7,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x1e]
+0xfd,0x00,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x1e,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x1e,0x56,0x34,0x12,0xaf
+
+# CHECK: v_max_u32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x1e,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x1e,0x73,0x72,0x71,0x3f
+
+# CHECK: v_max_u32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x1e]
+0x00,0x01,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x1e]
+0xff,0x01,0x00,0x1e
+
+# CHECK: v_max_u32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x1e]
+0x00,0xfe,0x01,0x1e
+
+# CHECK: v_max_u32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x0f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x0f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x0f,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x0f,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_u32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x0f,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x0f,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_lshrrev_b32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x20]
+0x00,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x21]
+0x00,0x00,0xfe,0x21
+
+# CHECK: v_lshrrev_b32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x20]
+0x65,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x20]
+0x66,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x20]
+0x67,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x20]
+0x6a,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x20]
+0x6b,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x20]
+0x6c,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x20]
+0x6d,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x20]
+0x6e,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x20]
+0x6f,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x20]
+0x7b,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x20]
+0x7c,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x20]
+0x7e,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x20]
+0x7f,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x20]
+0x80,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x20]
+0xc1,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x20]
+0xf0,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x20]
+0xf7,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x20]
+0xfd,0x00,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf
+
+# CHECK: v_lshrrev_b32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x20,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x20,0x73,0x72,0x71,0x3f
+
+# CHECK: v_lshrrev_b32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x20]
+0x00,0x01,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x20]
+0xff,0x01,0x00,0x20
+
+# CHECK: v_lshrrev_b32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x20]
+0x00,0xfe,0x01,0x20
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x10,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x10,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x10,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x10,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x10,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x10,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x10,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x10,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x10,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x10,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x10,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x10,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x10,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x10,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x10,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x10,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x10,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_lshrrev_b32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x10,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x10,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_ashrrev_i32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x22]
+0x00,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x23]
+0x00,0x00,0xfe,0x23
+
+# CHECK: v_ashrrev_i32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x22]
+0x65,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x22]
+0x66,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x22]
+0x67,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x22]
+0x6a,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x22]
+0x6b,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x22]
+0x6c,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x22]
+0x6d,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x22]
+0x6e,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x22]
+0x6f,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x22]
+0x7b,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x22]
+0x7c,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x22]
+0x7e,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x22]
+0x7f,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x22]
+0x80,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x22]
+0xc1,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x22]
+0xf0,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x22]
+0xf7,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x22]
+0xfd,0x00,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x22,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x22,0x56,0x34,0x12,0xaf
+
+# CHECK: v_ashrrev_i32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x22,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x22,0x73,0x72,0x71,0x3f
+
+# CHECK: v_ashrrev_i32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x22]
+0x00,0x01,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x22]
+0xff,0x01,0x00,0x22
+
+# CHECK: v_ashrrev_i32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x22]
+0x00,0xfe,0x01,0x22
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x11,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x11,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x11,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x11,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x11,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x11,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x11,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x11,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x11,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x11,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x11,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x11,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x11,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x11,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x11,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x11,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x11,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_ashrrev_i32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x11,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x11,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_lshlrev_b32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x24]
+0x00,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x25]
+0x00,0x00,0xfe,0x25
+
+# CHECK: v_lshlrev_b32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x24]
+0x65,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x24]
+0x66,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x24]
+0x67,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x24]
+0x6a,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x24]
+0x6b,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x24]
+0x6c,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x24]
+0x6d,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x24]
+0x6e,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x24]
+0x6f,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x24]
+0x7b,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x24]
+0x7c,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x24]
+0x7e,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x24]
+0x7f,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x24]
+0x80,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x24]
+0xc1,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x24]
+0xf0,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x24]
+0xf7,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x24]
+0xfd,0x00,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x24,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x24,0x56,0x34,0x12,0xaf
+
+# CHECK: v_lshlrev_b32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x24,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x24,0x73,0x72,0x71,0x3f
+
+# CHECK: v_lshlrev_b32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x24]
+0x00,0x01,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x24]
+0xff,0x01,0x00,0x24
+
+# CHECK: v_lshlrev_b32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x24]
+0x00,0xfe,0x01,0x24
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x12,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x12,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x12,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x12,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x12,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x12,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x12,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x12,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x12,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x12,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x12,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x12,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x12,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x12,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x12,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x12,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x12,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_lshlrev_b32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x12,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x12,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_and_b32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x26]
+0x00,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x27]
+0x00,0x00,0xfe,0x27
+
+# CHECK: v_and_b32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x26]
+0x65,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x26]
+0x66,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x26]
+0x67,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x26]
+0x6a,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x26]
+0x6b,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x26]
+0x6c,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x26]
+0x6d,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x26]
+0x6e,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x26]
+0x6f,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x26]
+0x7b,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x26]
+0x7c,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x26]
+0x7e,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x26]
+0x7f,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x26]
+0x80,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x26]
+0xc1,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x26]
+0xf0,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x26]
+0xf7,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x26]
+0xfd,0x00,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x26,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x26,0x56,0x34,0x12,0xaf
+
+# CHECK: v_and_b32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x26,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x26,0x73,0x72,0x71,0x3f
+
+# CHECK: v_and_b32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x26]
+0x00,0x01,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x26]
+0xff,0x01,0x00,0x26
+
+# CHECK: v_and_b32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x26]
+0x00,0xfe,0x01,0x26
+
+# CHECK: v_and_b32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x13,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x13,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x13,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x13,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x13,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x13,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x13,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x13,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x13,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x13,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x13,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x13,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x13,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x13,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x13,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x13,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x13,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_and_b32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x13,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x13,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_or_b32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x28]
+0x00,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x29]
+0x00,0x00,0xfe,0x29
+
+# CHECK: v_or_b32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x28]
+0x65,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x28]
+0x66,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x28]
+0x67,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x28]
+0x6a,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x28]
+0x6b,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x28]
+0x6c,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x28]
+0x6d,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x28]
+0x6e,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x28]
+0x6f,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x28]
+0x7b,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x28]
+0x7c,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x28]
+0x7e,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x28]
+0x7f,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x28]
+0x80,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x28]
+0xc1,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x28]
+0xf0,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x28]
+0xf7,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x28]
+0xfd,0x00,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x28,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x28,0x56,0x34,0x12,0xaf
+
+# CHECK: v_or_b32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x28,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x28,0x73,0x72,0x71,0x3f
+
+# CHECK: v_or_b32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x28]
+0x00,0x01,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x28]
+0xff,0x01,0x00,0x28
+
+# CHECK: v_or_b32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x28]
+0x00,0xfe,0x01,0x28
+
+# CHECK: v_or_b32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x14,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x14,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x14,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x14,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x14,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x14,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x14,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x14,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x14,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x14,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x14,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x14,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x14,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x14,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x14,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x14,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x14,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_or_b32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x14,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x14,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_xor_b32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x2a]
+0x00,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x2b]
+0x00,0x00,0xfe,0x2b
+
+# CHECK: v_xor_b32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x2a]
+0x65,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x2a]
+0x66,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x2a]
+0x67,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x2a]
+0x6a,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x2a]
+0x6b,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x2a]
+0x6c,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x2a]
+0x6d,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x2a]
+0x6e,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x2a]
+0x6f,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x2a]
+0x7b,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x2a]
+0x7c,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x2a]
+0x7e,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x2a]
+0x7f,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x2a]
+0x80,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x2a]
+0xc1,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x2a]
+0xf0,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x2a]
+0xf7,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x2a]
+0xfd,0x00,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x2a,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x2a,0x56,0x34,0x12,0xaf
+
+# CHECK: v_xor_b32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x2a,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x2a,0x73,0x72,0x71,0x3f
+
+# CHECK: v_xor_b32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x2a]
+0x00,0x01,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x2a]
+0xff,0x01,0x00,0x2a
+
+# CHECK: v_xor_b32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x2a]
+0x00,0xfe,0x01,0x2a
+
+# CHECK: v_xor_b32_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x15,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x15,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x15,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x15,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x15,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x15,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x15,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x15,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x15,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x15,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x15,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x15,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x15,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x15,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, scc ; encoding: [0x00,0x00,0x15,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x15,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x15,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_xor_b32_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x15,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x15,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mac_f32_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x2c]
+0x00,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x2d]
+0x00,0x00,0xfe,0x2d
+
+# CHECK: v_mac_f32_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x2c]
+0x65,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x2c]
+0x66,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x2c]
+0x67,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x2c]
+0x6a,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x2c]
+0x6b,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x2c]
+0x6c,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x2c]
+0x6d,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x2c]
+0x6e,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x2c]
+0x6f,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x2c]
+0x7b,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x2c]
+0x7c,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x2c]
+0x7e,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x2c]
+0x7f,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x2c]
+0x80,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x2c]
+0xc1,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x2c]
+0xf0,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x2c]
+0xf7,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x2c]
+0xfd,0x00,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, 0xaf123456, v0 ; encoding: [0xff,0x00,0x00,0x2c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x00,0x2c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_mac_f32_e32 v0, 0x3f717273, v0 ; encoding: [0xff,0x00,0x00,0x2c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x00,0x2c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_mac_f32_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x2c]
+0x00,0x01,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x2c]
+0xff,0x01,0x00,0x2c
+
+# CHECK: v_mac_f32_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x2c]
+0x00,0xfe,0x01,0x2c
+
+# CHECK: v_addc_u32_e32 v0, vcc, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0x00,0x38]
+0x6a,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v255, vcc, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0xfe,0x39]
+0x6a,0x00,0xfe,0x39
+
+# CHECK: v_addc_u32_e32 v0, vcc, vcc_hi, v0, vcc ; encoding: [0x6b,0x00,0x00,0x38]
+0x6b,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, 0, v0, vcc ; encoding: [0x80,0x00,0x00,0x38]
+0x80,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, -1, v0, vcc ; encoding: [0xc1,0x00,0x00,0x38]
+0xc1,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, 0.5, v0, vcc ; encoding: [0xf0,0x00,0x00,0x38]
+0xf0,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, -4.0, v0, vcc ; encoding: [0xf7,0x00,0x00,0x38]
+0xf7,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, scc, v0, vcc ; encoding: [0xfd,0x00,0x00,0x38]
+0xfd,0x00,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, v0, v0, vcc ; encoding: [0x00,0x01,0x00,0x38]
+0x00,0x01,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, v255, v0, vcc ; encoding: [0xff,0x01,0x00,0x38]
+0xff,0x01,0x00,0x38
+
+# CHECK: v_addc_u32_e32 v0, vcc, vcc_lo, v255, vcc ; encoding: [0x6a,0xfe,0x01,0x38]
+0x6a,0xfe,0x01,0x38
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v255, s[0:1], s0, s0, s[0:1] ; encoding: [0xff,0x00,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[2:3], s0, s0, s[0:1] ; encoding: [0x00,0x02,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[100:101], s0, s0, s[0:1] ; encoding: [0x00,0x64,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x64,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, flat_scratch, s0, s0, s[0:1] ; encoding: [0x00,0x66,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x66,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, vcc, s0, s0, s[0:1] ; encoding: [0x00,0x6a,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6a,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, tba, s0, s0, s[0:1] ; encoding: [0x00,0x6c,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6c,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, tma, s0, s0, s[0:1] ; encoding: [0x00,0x6e,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6e,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, ttmp[10:11], s0, s0, s[0:1] ; encoding: [0x00,0x7a,0x1c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x7a,0x1c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], 0, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], -1, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], 0.5, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], -4.0, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], scc, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], v0, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], v255, s0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x1c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, 0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, -1, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, 0.5, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, -4.0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, scc, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, v0, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_addc_u32_e64 v0, s[0:1], s0, v255, s[0:1] ; encoding: [0x00,0x00,0x1c,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x1c,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_subb_u32_e32 v0, vcc, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0x00,0x3a]
+0x6a,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v255, vcc, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0xfe,0x3b]
+0x6a,0x00,0xfe,0x3b
+
+# CHECK: v_subb_u32_e32 v0, vcc, vcc_hi, v0, vcc ; encoding: [0x6b,0x00,0x00,0x3a]
+0x6b,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, 0, v0, vcc ; encoding: [0x80,0x00,0x00,0x3a]
+0x80,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, -1, v0, vcc ; encoding: [0xc1,0x00,0x00,0x3a]
+0xc1,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, 0.5, v0, vcc ; encoding: [0xf0,0x00,0x00,0x3a]
+0xf0,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, -4.0, v0, vcc ; encoding: [0xf7,0x00,0x00,0x3a]
+0xf7,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, scc, v0, vcc ; encoding: [0xfd,0x00,0x00,0x3a]
+0xfd,0x00,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, v0, v0, vcc ; encoding: [0x00,0x01,0x00,0x3a]
+0x00,0x01,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, v255, v0, vcc ; encoding: [0xff,0x01,0x00,0x3a]
+0xff,0x01,0x00,0x3a
+
+# CHECK: v_subb_u32_e32 v0, vcc, vcc_lo, v255, vcc ; encoding: [0x6a,0xfe,0x01,0x3a]
+0x6a,0xfe,0x01,0x3a
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v255, s[0:1], s0, s0, s[0:1] ; encoding: [0xff,0x00,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[2:3], s0, s0, s[0:1] ; encoding: [0x00,0x02,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[100:101], s0, s0, s[0:1] ; encoding: [0x00,0x64,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x64,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, flat_scratch, s0, s0, s[0:1] ; encoding: [0x00,0x66,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x66,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, vcc, s0, s0, s[0:1] ; encoding: [0x00,0x6a,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6a,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, tba, s0, s0, s[0:1] ; encoding: [0x00,0x6c,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6c,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, tma, s0, s0, s[0:1] ; encoding: [0x00,0x6e,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6e,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, ttmp[10:11], s0, s0, s[0:1] ; encoding: [0x00,0x7a,0x1d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x7a,0x1d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], 0, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], -1, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], 0.5, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], -4.0, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], scc, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], v0, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], v255, s0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x1d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, 0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, -1, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, 0.5, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, -4.0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, scc, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, v0, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_subb_u32_e64 v0, s[0:1], s0, v255, s[0:1] ; encoding: [0x00,0x00,0x1d,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x1d,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0x00,0x3c]
+0x6a,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v255, vcc, vcc_lo, v0, vcc ; encoding: [0x6a,0x00,0xfe,0x3d]
+0x6a,0x00,0xfe,0x3d
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, vcc_hi, v0, vcc ; encoding: [0x6b,0x00,0x00,0x3c]
+0x6b,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, 0, v0, vcc ; encoding: [0x80,0x00,0x00,0x3c]
+0x80,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, -1, v0, vcc ; encoding: [0xc1,0x00,0x00,0x3c]
+0xc1,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, 0.5, v0, vcc ; encoding: [0xf0,0x00,0x00,0x3c]
+0xf0,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, -4.0, v0, vcc ; encoding: [0xf7,0x00,0x00,0x3c]
+0xf7,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, scc, v0, vcc ; encoding: [0xfd,0x00,0x00,0x3c]
+0xfd,0x00,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, v0, v0, vcc ; encoding: [0x00,0x01,0x00,0x3c]
+0x00,0x01,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, v255, v0, vcc ; encoding: [0xff,0x01,0x00,0x3c]
+0xff,0x01,0x00,0x3c
+
+# CHECK: v_subbrev_u32_e32 v0, vcc, vcc_lo, v255, vcc ; encoding: [0x6a,0xfe,0x01,0x3c]
+0x6a,0xfe,0x01,0x3c
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v255, s[0:1], s0, s0, s[0:1] ; encoding: [0xff,0x00,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[2:3], s0, s0, s[0:1] ; encoding: [0x00,0x02,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[100:101], s0, s0, s[0:1] ; encoding: [0x00,0x64,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x64,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, flat_scratch, s0, s0, s[0:1] ; encoding: [0x00,0x66,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x66,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, vcc, s0, s0, s[0:1] ; encoding: [0x00,0x6a,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6a,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, tba, s0, s0, s[0:1] ; encoding: [0x00,0x6c,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6c,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, tma, s0, s0, s[0:1] ; encoding: [0x00,0x6e,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6e,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, ttmp[10:11], s0, s0, s[0:1] ; encoding: [0x00,0x7a,0x1e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x7a,0x1e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], 0, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], -1, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], 0.5, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], -4.0, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], scc, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], v0, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], v255, s0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x1e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, 0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, -1, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, 0.5, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, -4.0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, scc, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, v0, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_subbrev_u32_e64 v0, s[0:1], s0, v255, s[0:1] ; encoding: [0x00,0x00,0x1e,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x1e,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_add_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x3e]
+0x00,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x3f]
+0x00,0x00,0xfe,0x3f
+
+# CHECK: v_add_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x3e]
+0x65,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x3e]
+0x66,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x3e]
+0x67,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x3e]
+0x6a,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x3e]
+0x6b,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x3e]
+0x6c,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x3e]
+0x6d,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x3e]
+0x6e,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x3e]
+0x6f,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x3e]
+0x7b,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x3e]
+0x7c,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x3e]
+0x7e,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x3e]
+0x7f,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x3e]
+0x80,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x3e]
+0xc1,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x3e]
+0xf0,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x3e]
+0xf7,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x3e]
+0xfd,0x00,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x3e,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x3e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_add_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x3e,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x3e,0x56,0x34,0x00,0x00
+
+# CHECK: v_add_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x3e]
+0x00,0x01,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x3e]
+0xff,0x01,0x00,0x3e
+
+# CHECK: v_add_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x3e]
+0x00,0xfe,0x01,0x3e
+
+# CHECK: v_add_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x1f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x1f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x1f,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x1f,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_add_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_add_f16_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_add_f16_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x1f,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_add_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x1f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x1f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x1f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x1f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x1f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x1f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x1f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x1f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x40]
+0x00,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x41]
+0x00,0x00,0xfe,0x41
+
+# CHECK: v_sub_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x40]
+0x65,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x40]
+0x66,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x40]
+0x67,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x40]
+0x6a,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x40]
+0x6b,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x40]
+0x6c,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x40]
+0x6d,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x40]
+0x6e,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x40]
+0x6f,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x40]
+0x7b,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x40]
+0x7c,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x40]
+0x7e,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x40]
+0x7f,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x40]
+0x80,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x40]
+0xc1,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x40]
+0xf0,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x40]
+0xf7,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x40]
+0xfd,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x40,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x40,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_sub_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x40,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x40,0x56,0x34,0x00,0x00
+
+# CHECK: v_sub_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x40]
+0x00,0x01,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x40]
+0xff,0x01,0x00,0x40
+
+# CHECK: v_sub_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x40]
+0x00,0xfe,0x01,0x40
+
+# CHECK: v_sub_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x20,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x20,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x20,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x20,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x20,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x20,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x20,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x20,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x20,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x20,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x20,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x20,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x20,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x20,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x20,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x20,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x20,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x20,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sub_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_sub_f16_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_sub_f16_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x20,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_sub_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x20,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x20,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x20,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x20,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x20,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x20,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x20,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x20,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x42]
+0x00,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x43]
+0x00,0x00,0xfe,0x43
+
+# CHECK: v_subrev_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x42]
+0x65,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x42]
+0x66,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x42]
+0x67,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x42]
+0x6a,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x42]
+0x6b,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x42]
+0x6c,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x42]
+0x6d,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x42]
+0x6e,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x42]
+0x6f,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x42]
+0x7b,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x42]
+0x7c,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x42]
+0x7e,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x42]
+0x7f,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x42]
+0x80,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x42]
+0xc1,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x42]
+0xf0,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x42]
+0xf7,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x42]
+0xfd,0x00,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x42,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x42,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_subrev_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x42,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x42,0x56,0x34,0x00,0x00
+
+# CHECK: v_subrev_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x42]
+0x00,0x01,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x42]
+0xff,0x01,0x00,0x42
+
+# CHECK: v_subrev_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x42]
+0x00,0xfe,0x01,0x42
+
+# CHECK: v_subrev_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x21,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x21,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x21,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x21,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x21,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x21,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x21,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x21,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x21,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x21,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x21,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x21,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x21,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x21,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x21,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x21,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x21,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x21,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_subrev_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_subrev_f16_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_subrev_f16_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x21,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_subrev_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x21,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x21,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x21,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x21,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x21,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x21,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x21,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x21,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x44]
+0x00,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x45]
+0x00,0x00,0xfe,0x45
+
+# CHECK: v_mul_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x44]
+0x65,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x44]
+0x66,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x44]
+0x67,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x44]
+0x6a,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x44]
+0x6b,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x44]
+0x6c,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x44]
+0x6d,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x44]
+0x6e,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x44]
+0x6f,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x44]
+0x7b,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x44]
+0x7c,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x44]
+0x7e,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x44]
+0x7f,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x44]
+0x80,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x44]
+0xc1,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x44]
+0xf0,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x44]
+0xf7,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x44]
+0xfd,0x00,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x44,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x44,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_mul_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x44,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x44,0x56,0x34,0x00,0x00
+
+# CHECK: v_mul_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x44]
+0x00,0x01,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x44]
+0xff,0x01,0x00,0x44
+
+# CHECK: v_mul_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x44]
+0x00,0xfe,0x01,0x44
+
+# CHECK: v_mul_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x22,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x22,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x22,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x22,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x22,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x22,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x22,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x22,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x22,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x22,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x22,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x22,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x22,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x22,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x22,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x22,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x22,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x22,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_mul_f16_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_mul_f16_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x22,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_mul_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x22,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x22,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x22,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x22,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x22,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x22,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x22,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x22,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mac_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x46]
+0x00,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x47]
+0x00,0x00,0xfe,0x47
+
+# CHECK: v_mac_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x46]
+0x65,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x46]
+0x66,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x46]
+0x67,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x46]
+0x6a,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x46]
+0x6b,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x46]
+0x6c,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x46]
+0x6d,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x46]
+0x6e,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x46]
+0x6f,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x46]
+0x7b,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x46]
+0x7c,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x46]
+0x7e,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x46]
+0x7f,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x46]
+0x80,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x46]
+0xc1,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x46]
+0xf0,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x46]
+0xf7,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x46]
+0xfd,0x00,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x46,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x46,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_mac_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x46,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x46,0x56,0x34,0x00,0x00
+
+# CHECK: v_mac_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x46]
+0x00,0x01,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x46]
+0xff,0x01,0x00,0x46
+
+# CHECK: v_mac_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x46]
+0x00,0xfe,0x01,0x46
+
+# CHECK: v_add_u16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x4c]
+0x00,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x4d]
+0x00,0x00,0xfe,0x4d
+
+# CHECK: v_add_u16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x4c]
+0x65,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x4c]
+0x66,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x4c]
+0x67,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x4c]
+0x6a,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x4c]
+0x6b,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x4c]
+0x6c,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x4c]
+0x6d,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x4c]
+0x6e,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x4c]
+0x6f,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x4c]
+0x7b,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x4c]
+0x7c,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x4c]
+0x7e,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x4c]
+0x7f,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x4c]
+0x80,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x4c]
+0xc1,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x4c]
+0xf0,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x4c]
+0xf7,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x4c]
+0xfd,0x00,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x4c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x4c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_add_u16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x4c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x4c,0x56,0x34,0x00,0x00
+
+# CHECK: v_add_u16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x4c]
+0x00,0x01,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x4c]
+0xff,0x01,0x00,0x4c
+
+# CHECK: v_add_u16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x4c]
+0x00,0xfe,0x01,0x4c
+
+# CHECK: v_add_u16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x26,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x26,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x26,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x26,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x26,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x26,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x26,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x26,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x26,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x26,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x26,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x26,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x26,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x26,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x26,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x26,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x26,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_add_u16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x26,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x26,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sub_u16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x4e]
+0x00,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x4f]
+0x00,0x00,0xfe,0x4f
+
+# CHECK: v_sub_u16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x4e]
+0x65,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x4e]
+0x66,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x4e]
+0x67,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x4e]
+0x6a,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x4e]
+0x6b,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x4e]
+0x6c,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x4e]
+0x6d,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x4e]
+0x6e,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x4e]
+0x6f,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x4e]
+0x7b,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x4e]
+0x7c,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x4e]
+0x7e,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x4e]
+0x7f,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x4e]
+0x80,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x4e]
+0xc1,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x4e]
+0xf0,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x4e]
+0xf7,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x4e]
+0xfd,0x00,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x4e,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x4e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_sub_u16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x4e,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x4e,0x56,0x34,0x00,0x00
+
+# CHECK: v_sub_u16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x4e]
+0x00,0x01,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x4e]
+0xff,0x01,0x00,0x4e
+
+# CHECK: v_sub_u16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x4e]
+0x00,0xfe,0x01,0x4e
+
+# CHECK: v_sub_u16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x27,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x27,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x27,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x27,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x27,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x27,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x27,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x27,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x27,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x27,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x27,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x27,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x27,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x27,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x27,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x27,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x27,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sub_u16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x27,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x27,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_subrev_u16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x50]
+0x00,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x51]
+0x00,0x00,0xfe,0x51
+
+# CHECK: v_subrev_u16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x50]
+0x65,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x50]
+0x66,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x50]
+0x67,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x50]
+0x6a,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x50]
+0x6b,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x50]
+0x6c,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x50]
+0x6d,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x50]
+0x6e,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x50]
+0x6f,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x50]
+0x7b,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x50]
+0x7c,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x50]
+0x7e,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x50]
+0x7f,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x50]
+0x80,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x50]
+0xc1,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x50]
+0xf0,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x50]
+0xf7,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x50]
+0xfd,0x00,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x50,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x50,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_subrev_u16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x50,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x50,0x56,0x34,0x00,0x00
+
+# CHECK: v_subrev_u16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x50]
+0x00,0x01,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x50]
+0xff,0x01,0x00,0x50
+
+# CHECK: v_subrev_u16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x50]
+0x00,0xfe,0x01,0x50
+
+# CHECK: v_subrev_u16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x28,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x28,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x28,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x28,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x28,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x28,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x28,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x28,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x28,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x28,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x28,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x28,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x28,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x28,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x28,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x28,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x28,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_subrev_u16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x28,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x28,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_lo_u16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x52]
+0x00,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x53]
+0x00,0x00,0xfe,0x53
+
+# CHECK: v_mul_lo_u16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x52]
+0x65,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x52]
+0x66,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x52]
+0x67,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x52]
+0x6a,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x52]
+0x6b,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x52]
+0x6c,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x52]
+0x6d,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x52]
+0x6e,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x52]
+0x6f,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x52]
+0x7b,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x52]
+0x7c,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x52]
+0x7e,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x52]
+0x7f,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x52]
+0x80,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x52]
+0xc1,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x52]
+0xf0,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x52]
+0xf7,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x52]
+0xfd,0x00,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x52,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x52,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x52,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x52,0x56,0x34,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x52]
+0x00,0x01,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x52]
+0xff,0x01,0x00,0x52
+
+# CHECK: v_mul_lo_u16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x52]
+0x00,0xfe,0x01,0x52
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x29,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x29,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x29,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x29,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x29,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x29,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x29,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x29,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x29,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x29,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x29,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x29,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x29,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x29,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x29,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x29,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x29,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_lo_u16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x29,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x29,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_lshlrev_b16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x54]
+0x00,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x55]
+0x00,0x00,0xfe,0x55
+
+# CHECK: v_lshlrev_b16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x54]
+0x65,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x54]
+0x66,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x54]
+0x67,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x54]
+0x6a,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x54]
+0x6b,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x54]
+0x6c,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x54]
+0x6d,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x54]
+0x6e,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x54]
+0x6f,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x54]
+0x7b,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x54]
+0x7c,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x54]
+0x7e,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x54]
+0x7f,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x54]
+0x80,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x54]
+0xc1,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x54]
+0xf0,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x54]
+0xf7,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x54]
+0xfd,0x00,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x54,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x54,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x54,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x54,0x56,0x34,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x54]
+0x00,0x01,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x54]
+0xff,0x01,0x00,0x54
+
+# CHECK: v_lshlrev_b16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x54]
+0x00,0xfe,0x01,0x54
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x2a,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x2a,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x2a,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2a,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_lshlrev_b16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x2a,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2a,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_lshrrev_b16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x56]
+0x00,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x57]
+0x00,0x00,0xfe,0x57
+
+# CHECK: v_lshrrev_b16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x56]
+0x65,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x56]
+0x66,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x56]
+0x67,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x56]
+0x6a,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x56]
+0x6b,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x56]
+0x6c,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x56]
+0x6d,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x56]
+0x6e,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x56]
+0x6f,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x56]
+0x7b,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x56]
+0x7c,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x56]
+0x7e,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x56]
+0x7f,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x56]
+0x80,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x56]
+0xc1,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x56]
+0xf0,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x56]
+0xf7,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x56]
+0xfd,0x00,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x56,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x56,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x56,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x56,0x56,0x34,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x56]
+0x00,0x01,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x56]
+0xff,0x01,0x00,0x56
+
+# CHECK: v_lshrrev_b16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x56]
+0x00,0xfe,0x01,0x56
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x2b,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x2b,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x2b,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2b,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_lshrrev_b16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x2b,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2b,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_ashrrev_i16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x58]
+0x00,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x59]
+0x00,0x00,0xfe,0x59
+
+# CHECK: v_ashrrev_i16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x58]
+0x65,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x58]
+0x66,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x58]
+0x67,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x58]
+0x6a,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x58]
+0x6b,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x58]
+0x6c,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x58]
+0x6d,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x58]
+0x6e,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x58]
+0x6f,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x58]
+0x7b,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x58]
+0x7c,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x58]
+0x7e,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x58]
+0x7f,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x58]
+0x80,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x58]
+0xc1,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x58]
+0xf0,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x58]
+0xf7,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x58]
+0xfd,0x00,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x58,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x58,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x58,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x58,0x56,0x34,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x58]
+0x00,0x01,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x58]
+0xff,0x01,0x00,0x58
+
+# CHECK: v_ashrrev_i16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x58]
+0x00,0xfe,0x01,0x58
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x2c,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x2c,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x2c,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2c,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_ashrrev_i16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x2c,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2c,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x5a]
+0x00,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x5b]
+0x00,0x00,0xfe,0x5b
+
+# CHECK: v_max_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x5a]
+0x65,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x5a]
+0x66,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x5a]
+0x67,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x5a]
+0x6a,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x5a]
+0x6b,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x5a]
+0x6c,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x5a]
+0x6d,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x5a]
+0x6e,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x5a]
+0x6f,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x5a]
+0x7b,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x5a]
+0x7c,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x5a]
+0x7e,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x5a]
+0x7f,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x5a]
+0x80,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x5a]
+0xc1,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x5a]
+0xf0,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x5a]
+0xf7,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x5a]
+0xfd,0x00,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x5a,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x5a,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_max_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x5a,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x5a,0x56,0x34,0x00,0x00
+
+# CHECK: v_max_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x5a]
+0x00,0x01,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x5a]
+0xff,0x01,0x00,0x5a
+
+# CHECK: v_max_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x5a]
+0x00,0xfe,0x01,0x5a
+
+# CHECK: v_max_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x2d,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x2d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2d,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2d,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_max_f16_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_max_f16_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2d,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_max_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x2d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x2d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x2d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x2d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x2d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x2d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x2d,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2d,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x5c]
+0x00,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x5d]
+0x00,0x00,0xfe,0x5d
+
+# CHECK: v_min_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x5c]
+0x65,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x5c]
+0x66,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x5c]
+0x67,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x5c]
+0x6a,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x5c]
+0x6b,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x5c]
+0x6c,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x5c]
+0x6d,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x5c]
+0x6e,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x5c]
+0x6f,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x5c]
+0x7b,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x5c]
+0x7c,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x5c]
+0x7e,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x5c]
+0x7f,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x5c]
+0x80,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x5c]
+0xc1,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x5c]
+0xf0,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x5c]
+0xf7,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x5c]
+0xfd,0x00,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x5c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x5c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_min_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x5c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x5c,0x56,0x34,0x00,0x00
+
+# CHECK: v_min_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x5c]
+0x00,0x01,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x5c]
+0xff,0x01,0x00,0x5c
+
+# CHECK: v_min_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x5c]
+0x00,0xfe,0x01,0x5c
+
+# CHECK: v_min_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x2e,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x2e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2e,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2e,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_min_f16_e64 v0, s0, -s0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_min_f16_e64 v0, -s0, -s0 ; encoding: [0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2e,0xd1,0x00,0x00,0x00,0x60
+
+# CHECK: v_min_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x2e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x2e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, |s0| ; encoding: [0x00,0x02,0x2e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0x2e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, |s0|, |s0| ; encoding: [0x00,0x03,0x2e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x03,0x2e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x2e,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2e,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x5e]
+0x00,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x5f]
+0x00,0x00,0xfe,0x5f
+
+# CHECK: v_max_u16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x5e]
+0x65,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x5e]
+0x66,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x5e]
+0x67,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x5e]
+0x6a,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x5e]
+0x6b,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x5e]
+0x6c,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x5e]
+0x6d,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x5e]
+0x6e,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x5e]
+0x6f,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x5e]
+0x7b,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x5e]
+0x7c,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x5e]
+0x7e,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x5e]
+0x7f,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x5e]
+0x80,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x5e]
+0xc1,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x5e]
+0xf0,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x5e]
+0xf7,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x5e]
+0xfd,0x00,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x5e,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x5e,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_max_u16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x5e,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x5e,0x56,0x34,0x00,0x00
+
+# CHECK: v_max_u16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x5e]
+0x00,0x01,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x5e]
+0xff,0x01,0x00,0x5e
+
+# CHECK: v_max_u16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x5e]
+0x00,0xfe,0x01,0x5e
+
+# CHECK: v_max_u16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x2f,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x2f,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x2f,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2f,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_u16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x2f,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2f,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max_i16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x60]
+0x00,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x61]
+0x00,0x00,0xfe,0x61
+
+# CHECK: v_max_i16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x60]
+0x65,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x60]
+0x66,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x60]
+0x67,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x60]
+0x6a,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x60]
+0x6b,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x60]
+0x6c,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x60]
+0x6d,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x60]
+0x6e,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x60]
+0x6f,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x60]
+0x7b,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x60]
+0x7c,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x60]
+0x7e,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x60]
+0x7f,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x60]
+0x80,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x60]
+0xc1,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x60]
+0xf0,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x60]
+0xf7,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x60]
+0xfd,0x00,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x60,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x60,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_max_i16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x60,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x60,0x56,0x34,0x00,0x00
+
+# CHECK: v_max_i16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x60]
+0x00,0x01,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x60]
+0xff,0x01,0x00,0x60
+
+# CHECK: v_max_i16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x60]
+0x00,0xfe,0x01,0x60
+
+# CHECK: v_max_i16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x30,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x30,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x30,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x30,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x30,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x30,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x30,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x30,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x30,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x30,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x30,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x30,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x30,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x30,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x30,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x30,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x30,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_i16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x30,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x30,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min_u16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x62]
+0x00,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x63]
+0x00,0x00,0xfe,0x63
+
+# CHECK: v_min_u16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x62]
+0x65,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x62]
+0x66,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x62]
+0x67,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x62]
+0x6a,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x62]
+0x6b,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x62]
+0x6c,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x62]
+0x6d,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x62]
+0x6e,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x62]
+0x6f,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x62]
+0x7b,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x62]
+0x7c,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x62]
+0x7e,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x62]
+0x7f,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x62]
+0x80,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x62]
+0xc1,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x62]
+0xf0,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x62]
+0xf7,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x62]
+0xfd,0x00,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x62,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x62,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_min_u16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x62,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x62,0x56,0x34,0x00,0x00
+
+# CHECK: v_min_u16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x62]
+0x00,0x01,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x62]
+0xff,0x01,0x00,0x62
+
+# CHECK: v_min_u16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x62]
+0x00,0xfe,0x01,0x62
+
+# CHECK: v_min_u16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x31,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x31,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x31,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x31,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x31,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x31,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x31,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x31,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x31,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x31,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x31,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x31,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x31,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x31,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x31,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x31,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x31,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_u16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x31,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x31,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min_i16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x64]
+0x00,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x65]
+0x00,0x00,0xfe,0x65
+
+# CHECK: v_min_i16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x64]
+0x65,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x64]
+0x66,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x64]
+0x67,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x64]
+0x6a,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x64]
+0x6b,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x64]
+0x6c,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x64]
+0x6d,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x64]
+0x6e,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x64]
+0x6f,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x64]
+0x7b,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x64]
+0x7c,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x64]
+0x7e,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x64]
+0x7f,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x64]
+0x80,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x64]
+0xc1,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x64]
+0xf0,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x64]
+0xf7,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x64]
+0xfd,0x00,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x64,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x64,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_min_i16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x64,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x64,0x56,0x34,0x00,0x00
+
+# CHECK: v_min_i16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x64]
+0x00,0x01,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x64]
+0xff,0x01,0x00,0x64
+
+# CHECK: v_min_i16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x64]
+0x00,0xfe,0x01,0x64
+
+# CHECK: v_min_i16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x32,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x32,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x32,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x32,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x32,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x32,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x32,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x32,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x32,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x32,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x32,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x32,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x32,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x32,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x32,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x32,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x32,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_i16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x32,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x32,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_ldexp_f16_e32 v0, s0, v0 ; encoding: [0x00,0x00,0x00,0x66]
+0x00,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v255, s0, v0 ; encoding: [0x00,0x00,0xfe,0x67]
+0x00,0x00,0xfe,0x67
+
+# CHECK: v_ldexp_f16_e32 v0, s101, v0 ; encoding: [0x65,0x00,0x00,0x66]
+0x65,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x00,0x66]
+0x66,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x00,0x66]
+0x67,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, vcc_lo, v0 ; encoding: [0x6a,0x00,0x00,0x66]
+0x6a,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, vcc_hi, v0 ; encoding: [0x6b,0x00,0x00,0x66]
+0x6b,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, tba_lo, v0 ; encoding: [0x6c,0x00,0x00,0x66]
+0x6c,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, tba_hi, v0 ; encoding: [0x6d,0x00,0x00,0x66]
+0x6d,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, tma_lo, v0 ; encoding: [0x6e,0x00,0x00,0x66]
+0x6e,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, tma_hi, v0 ; encoding: [0x6f,0x00,0x00,0x66]
+0x6f,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, ttmp11, v0 ; encoding: [0x7b,0x00,0x00,0x66]
+0x7b,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, m0, v0 ; encoding: [0x7c,0x00,0x00,0x66]
+0x7c,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, exec_lo, v0 ; encoding: [0x7e,0x00,0x00,0x66]
+0x7e,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, exec_hi, v0 ; encoding: [0x7f,0x00,0x00,0x66]
+0x7f,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, 0, v0 ; encoding: [0x80,0x00,0x00,0x66]
+0x80,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, -1, v0 ; encoding: [0xc1,0x00,0x00,0x66]
+0xc1,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, 0.5, v0 ; encoding: [0xf0,0x00,0x00,0x66]
+0xf0,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, -4.0, v0 ; encoding: [0xf7,0x00,0x00,0x66]
+0xf7,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, scc, v0 ; encoding: [0xfd,0x00,0x00,0x66]
+0xfd,0x00,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, 0xfe0b, v0 ; encoding: [0xff,0x00,0x00,0x66,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x00,0x66,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_ldexp_f16_e32 v0, 0x3456, v0 ; encoding: [0xff,0x00,0x00,0x66,0x56,0x34,0x00,0x00]
+0xff,0x00,0x00,0x66,0x56,0x34,0x00,0x00
+
+# CHECK: v_ldexp_f16_e32 v0, v0, v0 ; encoding: [0x00,0x01,0x00,0x66]
+0x00,0x01,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, v255, v0 ; encoding: [0xff,0x01,0x00,0x66]
+0xff,0x01,0x00,0x66
+
+# CHECK: v_ldexp_f16_e32 v0, s0, v255 ; encoding: [0x00,0xfe,0x01,0x66]
+0x00,0xfe,0x01,0x66
+
+# CHECK: v_ldexp_f16_e64 v0, s0, s0 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v255, s0, s0 ; encoding: [0xff,0x00,0x33,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0x33,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, 0, s0 ; encoding: [0x00,0x00,0x33,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, -1, s0 ; encoding: [0x00,0x00,0x33,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, 0.5, s0 ; encoding: [0x00,0x00,0x33,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, -4.0, s0 ; encoding: [0x00,0x00,0x33,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, scc, s0 ; encoding: [0x00,0x00,0x33,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, v0, s0 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0x33,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, v255, s0 ; encoding: [0x00,0x00,0x33,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0x33,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, 0 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0x33,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, -1 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0x33,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, 0.5 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x33,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, -4.0 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0x33,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, scc ; encoding: [0x00,0x00,0x33,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x33,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, v0 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0x33,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, v255 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x33,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, -s0, s0 ; encoding: [0x00,0x00,0x33,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0x33,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_ldexp_f16_e64 v0, |s0|, s0 ; encoding: [0x00,0x01,0x33,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0x33,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f16_e64 v0, s0, s0 clamp ; encoding: [0x00,0x80,0x33,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0x33,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc0,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc0,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_mad_legacy_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_mad_legacy_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_mad_legacy_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_mad_legacy_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xc0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xc0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_mad_legacy_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xc0,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_mad_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc1,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc1,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_mad_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_mad_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_mad_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_mad_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_mad_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xc1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xc1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_mad_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_mad_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xc1,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc2,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc2,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc2,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_i32_i24 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc2,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc2,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc3,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc3,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc3,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_u32_u24 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc3,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc3,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cubeid_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc4,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc4,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_cubeid_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_cubeid_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_cubeid_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_cubeid_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_cubeid_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_cubeid_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cubeid_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cubeid_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_cubeid_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_cubeid_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_cubeid_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xc4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xc4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubeid_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cubeid_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cubeid_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xc4,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cubesc_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc5,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc5,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_cubesc_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_cubesc_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_cubesc_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_cubesc_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_cubesc_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_cubesc_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cubesc_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cubesc_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_cubesc_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_cubesc_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_cubesc_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xc5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xc5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubesc_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cubesc_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cubesc_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xc5,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cubetc_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc6,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc6,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_cubetc_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_cubetc_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_cubetc_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_cubetc_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_cubetc_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_cubetc_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cubetc_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cubetc_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_cubetc_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_cubetc_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_cubetc_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xc6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xc6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubetc_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cubetc_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cubetc_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xc6,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_cubema_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc7,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_cubema_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_cubema_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cubema_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_cubema_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cubema_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_cubema_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc7,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cubema_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_cubema_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_cubema_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_cubema_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_cubema_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_cubema_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_cubema_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cubema_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cubema_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_cubema_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_cubema_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_cubema_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xc7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xc7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cubema_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_cubema_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_cubema_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xc7,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_bfe_u32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc8,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc8,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc8,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_bfe_u32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_bfe_u32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_bfe_u32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_bfe_u32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_bfe_u32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_bfe_u32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_bfe_u32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc8,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_bfe_u32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_bfe_u32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_bfe_u32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_bfe_u32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_bfe_u32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_bfe_u32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_bfe_u32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc8,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc8,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_bfe_i32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xc9,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xc9,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc9,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_bfe_i32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_bfe_i32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_bfe_i32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_bfe_i32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_bfe_i32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_bfe_i32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_bfe_i32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc9,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_bfe_i32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_bfe_i32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_bfe_i32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_bfe_i32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_bfe_i32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_bfe_i32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_bfe_i32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xc9,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xc9,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_bfi_b32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xca,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xca,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xca,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xca,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_bfi_b32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xca,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_bfi_b32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xca,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_bfi_b32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xca,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_bfi_b32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xca,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_bfi_b32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xca,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_bfi_b32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xca,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_bfi_b32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xca,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_bfi_b32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xca,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_bfi_b32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xca,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_bfi_b32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xca,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_bfi_b32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xca,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_bfi_b32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xca,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_bfi_b32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xca,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_bfi_b32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xca,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xca,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_fma_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_fma_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcb,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_fma_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_fma_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_fma_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_fma_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_fma_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_fma_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_fma_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcb,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_fma_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_fma_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_fma_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_fma_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_fma_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_fma_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_fma_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_fma_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_fma_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_fma_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_fma_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_fma_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xcb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xcb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_fma_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_fma_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xcb,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[254:255], s[0:1], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], 0, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], -1, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], 0.5, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], -4.0, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], v[254:255], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xcc,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xcc,0xd1,0x00,0xfc,0x03,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0xf8,0x07]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0xf8,0x07
+
+# CHECK: v_fma_f64 v[0:1], -s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_fma_f64 v[0:1], -s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_fma_f64 v[0:1], |s[0:1]|, s[0:1], s[0:1] ; encoding: [0x00,0x01,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x02,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x04,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], |s[0:1]|, |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x07,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0xcc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xcc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_fma_f64 v[0:1], s[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xcc,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_lerp_u8 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xcd,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xcd,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcd,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_lerp_u8 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_lerp_u8 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_lerp_u8 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lerp_u8 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_lerp_u8 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_lerp_u8 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_lerp_u8 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcd,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_lerp_u8 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_lerp_u8 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_lerp_u8 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_lerp_u8 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_lerp_u8 v0, s0, s0, scc ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_lerp_u8 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_lerp_u8 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xcd,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xcd,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_alignbit_b32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xce,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xce,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xce,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xce,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xce,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xce,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xce,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xce,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xce,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xce,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xce,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_alignbit_b32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xce,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_alignbit_b32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xce,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_alignbit_b32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xce,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_alignbit_b32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xce,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_alignbit_b32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xce,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_alignbit_b32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xce,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_alignbit_b32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xce,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xce,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xcf,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xcf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcf,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcf,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_alignbyte_b32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xcf,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xcf,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_min3_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min3_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd0,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min3_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min3_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min3_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min3_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min3_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min3_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min3_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd0,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min3_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_min3_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_min3_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_min3_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_min3_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_min3_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_min3_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_min3_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_min3_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_min3_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_min3_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_min3_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xd0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xd0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_min3_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_min3_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xd0,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_min3_i32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd1,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min3_i32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min3_i32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd1,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min3_i32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min3_i32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min3_i32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min3_i32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min3_i32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min3_i32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min3_i32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd1,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min3_i32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_min3_i32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_min3_i32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_min3_i32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_min3_i32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_min3_i32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_min3_i32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd1,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd1,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_min3_u32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd2,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_min3_u32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_min3_u32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd2,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_min3_u32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_min3_u32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_min3_u32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min3_u32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_min3_u32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_min3_u32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_min3_u32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd2,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_min3_u32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_min3_u32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_min3_u32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_min3_u32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_min3_u32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_min3_u32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_min3_u32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd2,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd2,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_max3_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max3_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd3,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max3_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max3_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max3_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max3_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max3_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max3_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max3_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd3,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max3_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_max3_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_max3_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_max3_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_max3_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_max3_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_max3_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_max3_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_max3_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_max3_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_max3_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_max3_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xd3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xd3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_max3_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_max3_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xd3,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_max3_i32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd4,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max3_i32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max3_i32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd4,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max3_i32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max3_i32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max3_i32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max3_i32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max3_i32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max3_i32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max3_i32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd4,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max3_i32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_max3_i32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_max3_i32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_max3_i32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_max3_i32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_max3_i32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_max3_i32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd4,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd4,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_max3_u32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd5,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_max3_u32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_max3_u32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd5,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_max3_u32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_max3_u32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_max3_u32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max3_u32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_max3_u32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_max3_u32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_max3_u32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd5,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_max3_u32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_max3_u32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_max3_u32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_max3_u32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_max3_u32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_max3_u32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_max3_u32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd5,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd5,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_med3_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_med3_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd6,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_med3_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_med3_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_med3_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_med3_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_med3_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_med3_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_med3_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd6,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_med3_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_med3_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_med3_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_med3_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_med3_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_med3_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_med3_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_med3_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_med3_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_med3_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_med3_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_med3_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xd6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xd6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_med3_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_med3_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xd6,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_med3_i32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd7,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd7,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_med3_i32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_med3_i32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd7,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_med3_i32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_med3_i32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_med3_i32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_med3_i32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_med3_i32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_med3_i32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_med3_i32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd7,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_med3_i32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_med3_i32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_med3_i32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_med3_i32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_med3_i32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_med3_i32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_med3_i32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd7,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd7,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_med3_u32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd8,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd8,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_med3_u32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_med3_u32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd8,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_med3_u32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_med3_u32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_med3_u32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_med3_u32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_med3_u32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_med3_u32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_med3_u32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd8,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_med3_u32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_med3_u32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_med3_u32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_med3_u32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_med3_u32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_med3_u32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_med3_u32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd8,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd8,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_sad_u8 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xd9,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xd9,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sad_u8 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sad_u8 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd9,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sad_u8 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sad_u8 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sad_u8 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sad_u8 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sad_u8 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sad_u8 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sad_u8 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd9,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sad_u8 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_sad_u8 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_sad_u8 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_sad_u8 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_sad_u8 v0, s0, s0, scc ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_sad_u8 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_sad_u8 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xd9,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xd9,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xda,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xda,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xda,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xda,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xda,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xda,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xda,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xda,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xda,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xda,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xda,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xda,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xda,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xda,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xda,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, scc ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xda,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xda,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_sad_hi_u8 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xda,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xda,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_sad_u16 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xdb,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xdb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sad_u16 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sad_u16 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdb,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sad_u16 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sad_u16 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sad_u16 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sad_u16 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sad_u16 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sad_u16 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sad_u16 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdb,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sad_u16 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_sad_u16 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_sad_u16 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_sad_u16 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_sad_u16 v0, s0, s0, scc ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_sad_u16 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_sad_u16 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xdb,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xdb,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_sad_u32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xdc,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xdc,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_sad_u32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_sad_u32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdc,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_sad_u32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_sad_u32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_sad_u32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_sad_u32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_sad_u32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_sad_u32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_sad_u32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdc,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_sad_u32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_sad_u32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_sad_u32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_sad_u32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_sad_u32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_sad_u32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_sad_u32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xdc,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xdc,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xdd,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xdd,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdd,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdd,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_cvt_pk_u8_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xdd,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_cvt_pk_u8_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xdd,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xdd,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_cvt_pk_u8_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xdd,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xdd,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xde,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xde,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xde,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xde,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xde,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xde,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xde,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xde,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xde,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xde,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xde,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xde,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_div_fixup_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_div_fixup_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_div_fixup_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_div_fixup_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xde,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xde,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xde,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xde,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xde,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xde,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_div_fixup_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xde,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[254:255], s[0:1], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], 0, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], -1, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], 0.5, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], -4.0, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], v[254:255], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xdf,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xdf,0xd1,0x00,0xfc,0x03,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0xf8,0x07]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0xf8,0x07
+
+# CHECK: v_div_fixup_f64 v[0:1], -s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_div_fixup_f64 v[0:1], -s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_div_fixup_f64 v[0:1], |s[0:1]|, s[0:1], s[0:1] ; encoding: [0x00,0x01,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x02,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x04,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], |s[0:1]|, |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x07,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0xdf,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xdf,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_div_fixup_f64 v[0:1], s[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xdf,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v255, vcc, s0, s0, s0 ; encoding: [0xff,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, 0, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, -1, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, 0.5, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, -4.0, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, scc, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, v0, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, v255, s0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x6a,0xe0,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, 0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, -1, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, 0.5, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, -4.0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, scc, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, v0, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, v255, s0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x6a,0xe0,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, 0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, -1 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, 0.5 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, -4.0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, scc ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, v0 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_scale_f32 v0, vcc, s0, s0, v255 ; encoding: [0x00,0x6a,0xe0,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x6a,0xe0,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[254:255], vcc, s[0:1], s[0:1], s[0:1] ; encoding: [0xfe,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, 0, s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, -1, s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, 0.5, s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, -4.0, s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, v[254:255], s[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x6a,0xe1,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], 0, s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], -1, s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0xfc,0x03,0x00]
+0x00,0x6a,0xe1,0xd1,0x00,0xfc,0x03,0x00
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], 0 ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], -1 ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_scale_f64 v[0:1], vcc, s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x6a,0xe1,0xd1,0x00,0x00,0xf8,0x07]
+0x00,0x6a,0xe1,0xd1,0x00,0x00,0xf8,0x07
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xe2,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xe2,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, scc ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_div_fmas_f32 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_div_fmas_f32 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_div_fmas_f32 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_div_fmas_f32 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xe2,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xe2,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, s0 mul:2 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, s0 mul:4 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_div_fmas_f32 v0, s0, s0, s0 div:2 ; encoding: [0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xe2,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[254:255], s[0:1], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], 0, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], -1, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], 0.5, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], -4.0, s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], v[254:255], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe3,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe3,0xd1,0x00,0xfc,0x03,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0xf8,0x07]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0xf8,0x07
+
+# CHECK: v_div_fmas_f64 v[0:1], -s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_div_fmas_f64 v[0:1], -s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_div_fmas_f64 v[0:1], |s[0:1]|, s[0:1], s[0:1] ; encoding: [0x00,0x01,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x02,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x04,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], |s[0:1]|, |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x07,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0xe3,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xe3,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x08]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x08
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x10]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x10
+
+# CHECK: v_div_fmas_f64 v[0:1], s[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x18]
+0x00,0x00,0xe3,0xd1,0x00,0x00,0x00,0x18
+
+# CHECK: v_msad_u8 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xe4,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xe4,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_msad_u8 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_msad_u8 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xe4,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_msad_u8 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_msad_u8 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_msad_u8 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_msad_u8 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_msad_u8 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_msad_u8 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_msad_u8 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xe4,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_msad_u8 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_msad_u8 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_msad_u8 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_msad_u8 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_msad_u8 v0, s0, s0, scc ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_msad_u8 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_msad_u8 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xe4,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xe4,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[254:255], s[0:1], s0, s[0:1] ; encoding: [0xfe,0x00,0xe5,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0xe5,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], 0, s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], -1, s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], 0.5, s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], -4.0, s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], v[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], v[254:255], s0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe5,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], scc, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], v0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], v255, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xe5,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, 0 ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, -1 ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, v[0:1] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_qsad_pk_u16_u8 v[0:1], s[0:1], s0, v[254:255] ; encoding: [0x00,0x00,0xe5,0xd1,0x00,0x00,0xf8,0x07]
+0x00,0x00,0xe5,0xd1,0x00,0x00,0xf8,0x07
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[254:255], s[0:1], s0, s[0:1] ; encoding: [0xfe,0x00,0xe6,0xd1,0x00,0x00,0x00,0x00]
+0xfe,0x00,0xe6,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], 0, s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], -1, s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], 0.5, s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], -4.0, s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], v[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], v[254:255], s0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe6,0xd1,0xfe,0x01,0x00,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], scc, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], v0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], v255, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xe6,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, 0 ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, -1 ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, v[0:1] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mqsad_pk_u16_u8 v[0:1], s[0:1], s0, v[254:255] ; encoding: [0x00,0x00,0xe6,0xd1,0x00,0x00,0xf8,0x07]
+0x00,0x00,0xe6,0xd1,0x00,0x00,0xf8,0x07
+
+# CHECK: v_mad_f16 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xea,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xea,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_f16 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xea,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_f16 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_f16 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xea,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_f16 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xea,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_f16 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xea,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_f16 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xea,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_f16 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_f16 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xea,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_f16 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_f16 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_f16 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xea,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_f16 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xea,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_f16 v0, s0, s0, scc ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xea,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_f16 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_f16 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xea,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_mad_f16 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_mad_f16 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_mad_f16 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_mad_f16 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xea,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_mad_f16 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xea,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xea,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xea,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xea,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_f16 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xea,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xea,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xeb,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xeb,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_u16 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_u16 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xeb,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_u16 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_u16 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_u16 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_u16 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_u16 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_u16 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_u16 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xeb,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_u16 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_u16 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_u16 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_u16 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_u16 v0, s0, s0, scc ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_u16 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_u16 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xeb,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xeb,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_mad_i16 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xec,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xec,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mad_i16 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xec,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_mad_i16 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xec,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_mad_i16 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xec,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_mad_i16 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xec,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_mad_i16 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xec,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mad_i16 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xec,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_mad_i16 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xec,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mad_i16 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xec,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_mad_i16 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xec,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mad_i16 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xec,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_mad_i16 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xec,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_mad_i16 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xec,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_mad_i16 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xec,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_mad_i16 v0, s0, s0, scc ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xec,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_mad_i16 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xec,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_mad_i16 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xec,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xec,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_fma_f16 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xee,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xee,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_fma_f16 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xee,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_fma_f16 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_fma_f16 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xee,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_fma_f16 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xee,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_fma_f16 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xee,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_fma_f16 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xee,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_fma_f16 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_fma_f16 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xee,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_fma_f16 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_fma_f16 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_fma_f16 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xee,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_fma_f16 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xee,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_fma_f16 v0, s0, s0, scc ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xee,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_fma_f16 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_fma_f16 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xee,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_fma_f16 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_fma_f16 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_fma_f16 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_fma_f16 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xee,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_fma_f16 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xee,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xee,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xee,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xee,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_fma_f16 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xee,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xee,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v255, s0, s0, s0 ; encoding: [0xff,0x00,0xef,0xd1,0x00,0x00,0x00,0x00]
+0xff,0x00,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, 0, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x80,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd1,0x80,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, -1, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd1,0xc1,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, 0.5, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd1,0xf0,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, -4.0, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd1,0xf7,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, scc, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd1,0xfd,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, v0, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x01,0x00,0x00]
+0x00,0x00,0xef,0xd1,0x00,0x01,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, v255, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0xff,0x01,0x00,0x00]
+0x00,0x00,0xef,0xd1,0xff,0x01,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, 0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x01,0x00]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x01,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, -1, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x82,0x01,0x00]
+0x00,0x00,0xef,0xd1,0x00,0x82,0x01,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, 0.5, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xef,0xd1,0x00,0xe0,0x01,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, -4.0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0xee,0x01,0x00]
+0x00,0x00,0xef,0xd1,0x00,0xee,0x01,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, scc, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xef,0xd1,0x00,0xfa,0x01,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, v0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x02,0x00]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x02,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, v255, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xef,0xd1,0x00,0xfe,0x03,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, 0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x02]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x02
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, -1 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x04,0x03]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x04,0x03
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, 0.5 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0xc0,0x03]
+0x00,0x00,0xef,0xd1,0x00,0x00,0xc0,0x03
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, -4.0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0xdc,0x03]
+0x00,0x00,0xef,0xd1,0x00,0x00,0xdc,0x03
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, scc ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0xf4,0x03]
+0x00,0x00,0xef,0xd1,0x00,0x00,0xf4,0x03
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, v0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x04]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x04
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, v255 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0xfc,0x07]
+0x00,0x00,0xef,0xd1,0x00,0x00,0xfc,0x07
+
+# CHECK: v_div_fixup_f16 v0, -s0, s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x20]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x20
+
+# CHECK: v_div_fixup_f16 v0, s0, -s0, s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x40]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x40
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, -s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x80]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0x80
+
+# CHECK: v_div_fixup_f16 v0, -s0, -s0, -s0 ; encoding: [0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0xe0]
+0x00,0x00,0xef,0xd1,0x00,0x00,0x00,0xe0
+
+# CHECK: v_div_fixup_f16 v0, |s0|, s0, s0 ; encoding: [0x00,0x01,0xef,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x01,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, |s0|, s0 ; encoding: [0x00,0x02,0xef,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x02,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, |s0| ; encoding: [0x00,0x04,0xef,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x04,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, |s0|, |s0|, |s0| ; encoding: [0x00,0x07,0xef,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x07,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_div_fixup_f16 v0, s0, s0, s0 clamp ; encoding: [0x00,0x80,0xef,0xd1,0x00,0x00,0x00,0x00]
+0x00,0x80,0xef,0xd1,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[254:255], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0x80,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x80,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x80,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x80,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x80,0xd2,0xfe,0x01,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x80,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x80,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x80,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x80,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_add_f64 v[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x20]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x20
+
+# CHECK: v_add_f64 v[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x40]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x40
+
+# CHECK: v_add_f64 v[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x60]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x60
+
+# CHECK: v_add_f64 v[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x01,0x80,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x01,0x80,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x02,0x80,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x02,0x80,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x03,0x80,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x03,0x80,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x80,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x80,0x80,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_add_f64 v[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x08]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x08
+
+# CHECK: v_add_f64 v[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x10]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x10
+
+# CHECK: v_add_f64 v[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x18]
+0x00,0x00,0x80,0xd2,0x00,0x00,0x00,0x18
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[254:255], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0x81,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x81,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x81,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x81,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x81,0xd2,0xfe,0x01,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x81,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x81,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x81,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x81,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_mul_f64 v[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x20]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x20
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x40]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x40
+
+# CHECK: v_mul_f64 v[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x60]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x60
+
+# CHECK: v_mul_f64 v[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x01,0x81,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x01,0x81,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x02,0x81,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x02,0x81,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x03,0x81,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x03,0x81,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x81,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x80,0x81,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x08]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x08
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x10]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x10
+
+# CHECK: v_mul_f64 v[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x18]
+0x00,0x00,0x81,0xd2,0x00,0x00,0x00,0x18
+
+# CHECK: v_min_f64 v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[254:255], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0x82,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x82,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x82,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x82,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x82,0xd2,0xfe,0x01,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x82,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x82,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x82,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x82,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_min_f64 v[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x20]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x20
+
+# CHECK: v_min_f64 v[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x40]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x40
+
+# CHECK: v_min_f64 v[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x60]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x60
+
+# CHECK: v_min_f64 v[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x01,0x82,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x01,0x82,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x02,0x82,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x02,0x82,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x03,0x82,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x03,0x82,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x82,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x80,0x82,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_min_f64 v[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x08]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x08
+
+# CHECK: v_min_f64 v[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x10]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x10
+
+# CHECK: v_min_f64 v[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x18]
+0x00,0x00,0x82,0xd2,0x00,0x00,0x00,0x18
+
+# CHECK: v_max_f64 v[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[254:255], s[0:1], s[0:1] ; encoding: [0xfe,0x00,0x83,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x83,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x83,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x83,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x83,0xd2,0xfe,0x01,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x83,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x83,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x83,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x83,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_max_f64 v[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x20]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x20
+
+# CHECK: v_max_f64 v[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x40]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x40
+
+# CHECK: v_max_f64 v[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x60]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x60
+
+# CHECK: v_max_f64 v[0:1], |s[0:1]|, s[0:1] ; encoding: [0x00,0x01,0x83,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x01,0x83,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], |s[0:1]| ; encoding: [0x00,0x02,0x83,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x02,0x83,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], |s[0:1]|, |s[0:1]| ; encoding: [0x00,0x03,0x83,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x03,0x83,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x83,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x80,0x83,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_max_f64 v[0:1], s[0:1], s[0:1] mul:2 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x08]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x08
+
+# CHECK: v_max_f64 v[0:1], s[0:1], s[0:1] mul:4 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x10]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x10
+
+# CHECK: v_max_f64 v[0:1], s[0:1], s[0:1] div:2 ; encoding: [0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x18]
+0x00,0x00,0x83,0xd2,0x00,0x00,0x00,0x18
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[254:255], s[0:1], s0 ; encoding: [0xfe,0x00,0x84,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x84,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], 0, s0 ; encoding: [0x00,0x00,0x84,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], -1, s0 ; encoding: [0x00,0x00,0x84,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x84,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x84,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x84,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], v[0:1], s0 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x84,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], v[254:255], s0 ; encoding: [0x00,0x00,0x84,0xd2,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x84,0xd2,0xfe,0x01,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x84,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x84,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x84,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], scc ; encoding: [0x00,0x00,0x84,0xd2,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x84,0xd2,0x00,0xfa,0x01,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], v0 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], v255 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x84,0xd2,0x00,0xfe,0x03,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], -s[0:1], s0 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x20]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x20
+
+# CHECK: v_ldexp_f64 v[0:1], |s[0:1]|, s0 ; encoding: [0x00,0x01,0x84,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x01,0x84,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], s0 clamp ; encoding: [0x00,0x80,0x84,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x80,0x84,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], s0 mul:2 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x08]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x08
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], s0 mul:4 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x10]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x10
+
+# CHECK: v_ldexp_f64 v[0:1], s[0:1], s0 div:2 ; encoding: [0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x18]
+0x00,0x00,0x84,0xd2,0x00,0x00,0x00,0x18
+
+# CHECK: v_mul_lo_u32 v0, s0, s0 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v255, s0, s0 ; encoding: [0xff,0x00,0x85,0xd2,0x00,0x00,0x00,0x00]
+0xff,0x00,0x85,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, 0, s0 ; encoding: [0x00,0x00,0x85,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, -1, s0 ; encoding: [0x00,0x00,0x85,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, 0.5, s0 ; encoding: [0x00,0x00,0x85,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, -4.0, s0 ; encoding: [0x00,0x00,0x85,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, scc, s0 ; encoding: [0x00,0x00,0x85,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x85,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, v0, s0 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x85,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, v255, s0 ; encoding: [0x00,0x00,0x85,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x85,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, 0 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x85,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, -1 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x85,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, 0.5 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x85,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, -4.0 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x85,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, scc ; encoding: [0x00,0x00,0x85,0xd2,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x85,0xd2,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, v0 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x85,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_lo_u32 v0, s0, v255 ; encoding: [0x00,0x00,0x85,0xd2,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x85,0xd2,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, s0 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v255, s0, s0 ; encoding: [0xff,0x00,0x86,0xd2,0x00,0x00,0x00,0x00]
+0xff,0x00,0x86,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, 0, s0 ; encoding: [0x00,0x00,0x86,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, -1, s0 ; encoding: [0x00,0x00,0x86,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, 0.5, s0 ; encoding: [0x00,0x00,0x86,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, -4.0, s0 ; encoding: [0x00,0x00,0x86,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, scc, s0 ; encoding: [0x00,0x00,0x86,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x86,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, v0, s0 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x86,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, v255, s0 ; encoding: [0x00,0x00,0x86,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x86,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, 0 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x86,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, -1 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x86,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, 0.5 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x86,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, -4.0 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x86,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, scc ; encoding: [0x00,0x00,0x86,0xd2,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x86,0xd2,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, v0 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x86,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_hi_u32 v0, s0, v255 ; encoding: [0x00,0x00,0x86,0xd2,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x86,0xd2,0x00,0xfe,0x03,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, s0 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v255, s0, s0 ; encoding: [0xff,0x00,0x87,0xd2,0x00,0x00,0x00,0x00]
+0xff,0x00,0x87,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, 0, s0 ; encoding: [0x00,0x00,0x87,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, -1, s0 ; encoding: [0x00,0x00,0x87,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, 0.5, s0 ; encoding: [0x00,0x00,0x87,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, -4.0, s0 ; encoding: [0x00,0x00,0x87,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, scc, s0 ; encoding: [0x00,0x00,0x87,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x87,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, v0, s0 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x87,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, v255, s0 ; encoding: [0x00,0x00,0x87,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x87,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, 0 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x87,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, -1 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x87,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, 0.5 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x87,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, -4.0 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x87,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, scc ; encoding: [0x00,0x00,0x87,0xd2,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x87,0xd2,0x00,0xfa,0x01,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, v0 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x87,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_mul_hi_i32 v0, s0, v255 ; encoding: [0x00,0x00,0x87,0xd2,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x87,0xd2,0x00,0xfe,0x03,0x00
+
+# CHECK: v_readlane_b32 s0, v0, s0 ; encoding: [0x00,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 s101, v0, s0 ; encoding: [0x65,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x65,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 flat_scratch_lo, v0, s0 ; encoding: [0x66,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x66,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 flat_scratch_hi, v0, s0 ; encoding: [0x67,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x67,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 tba_lo, v0, s0 ; encoding: [0x6c,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x6c,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 tba_hi, v0, s0 ; encoding: [0x6d,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x6d,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 tma_lo, v0, s0 ; encoding: [0x6e,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x6e,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 tma_hi, v0, s0 ; encoding: [0x6f,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x6f,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 ttmp11, v0, s0 ; encoding: [0x7b,0x00,0x89,0xd2,0x00,0x01,0x00,0x00]
+0x7b,0x00,0x89,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v255, s0 ; encoding: [0x00,0x00,0x89,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x89,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, s101 ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xcb,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xcb,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, flat_scratch_lo ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xcd,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xcd,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, flat_scratch_hi ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xcf,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xcf,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, vcc_lo ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xd5,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xd5,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, vcc_hi ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xd7,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xd7,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, tba_lo ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xd9,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xd9,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, tba_hi ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xdb,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xdb,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, tma_lo ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xdd,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xdd,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, tma_hi ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xdf,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xdf,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, ttmp11 ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xf7,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xf7,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, m0 ; encoding: [0x00,0x00,0x89,0xd2,0x00,0xf9,0x00,0x00]
+0x00,0x00,0x89,0xd2,0x00,0xf9,0x00,0x00
+
+# CHECK: v_readlane_b32 s0, v0, 0 ; encoding: [0x00,0x00,0x89,0xd2,0x00,0x01,0x01,0x00]
+0x00,0x00,0x89,0xd2,0x00,0x01,0x01,0x00
+
+# CHECK: v_writelane_b32 v0, s0, s0 ; encoding: [0x00,0x00,0x8a,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_writelane_b32 v255, s0, s0 ; encoding: [0xff,0x00,0x8a,0xd2,0x00,0x00,0x00,0x00]
+0xff,0x00,0x8a,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_writelane_b32 v0, scc, s0 ; encoding: [0x00,0x00,0x8a,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x8a,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_writelane_b32 v0, s0, 0 ; encoding: [0x00,0x00,0x8a,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x8a,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[254:255], s0, s[0:1] ; encoding: [0xfe,0x00,0x8f,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x8f,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], scc, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], v0, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], v255, s[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x8f,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, 0 ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, -1 ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, v[0:1] ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_lshlrev_b64 v[0:1], s0, v[254:255] ; encoding: [0x00,0x00,0x8f,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x8f,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[254:255], s0, s[0:1] ; encoding: [0xfe,0x00,0x90,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x90,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], scc, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x90,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], v0, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x90,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], v255, s[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x90,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, 0 ; encoding: [0x00,0x00,0x90,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x90,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, -1 ; encoding: [0x00,0x00,0x90,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x90,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x90,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x90,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x90,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x90,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, v[0:1] ; encoding: [0x00,0x00,0x90,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x90,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_lshrrev_b64 v[0:1], s0, v[254:255] ; encoding: [0x00,0x00,0x90,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x90,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x91,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[254:255], s0, s[0:1] ; encoding: [0xfe,0x00,0x91,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x91,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x91,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x91,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x91,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x91,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], scc, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x91,0xd2,0xfd,0x00,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], v0, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x91,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], v255, s[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0xff,0x01,0x00,0x00]
+0x00,0x00,0x91,0xd2,0xff,0x01,0x00,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, 0 ; encoding: [0x00,0x00,0x91,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x91,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, -1 ; encoding: [0x00,0x00,0x91,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x91,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x91,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x91,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x91,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x91,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, v[0:1] ; encoding: [0x00,0x00,0x91,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x91,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_ashrrev_i64 v[0:1], s0, v[254:255] ; encoding: [0x00,0x00,0x91,0xd2,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x91,0xd2,0x00,0xfc,0x03,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[254:255], s[0:1], s0 ; encoding: [0xfe,0x00,0x92,0xd2,0x00,0x00,0x00,0x00]
+0xfe,0x00,0x92,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], 0, s0 ; encoding: [0x00,0x00,0x92,0xd2,0x80,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd2,0x80,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], -1, s0 ; encoding: [0x00,0x00,0x92,0xd2,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd2,0xc1,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x92,0xd2,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd2,0xf0,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x92,0xd2,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x92,0xd2,0xf7,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], v[0:1], s0 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x01,0x00,0x00]
+0x00,0x00,0x92,0xd2,0x00,0x01,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], v[254:255], s0 ; encoding: [0x00,0x00,0x92,0xd2,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x92,0xd2,0xfe,0x01,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x01,0x00]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x01,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x82,0x01,0x00]
+0x00,0x00,0x92,0xd2,0x00,0x82,0x01,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x92,0xd2,0x00,0xe0,0x01,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0xee,0x01,0x00]
+0x00,0x00,0x92,0xd2,0x00,0xee,0x01,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], scc ; encoding: [0x00,0x00,0x92,0xd2,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x92,0xd2,0x00,0xfa,0x01,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], v0 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x02,0x00]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x02,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], v255 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x92,0xd2,0x00,0xfe,0x03,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], -s[0:1], s0 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x20]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x20
+
+# CHECK: v_trig_preop_f64 v[0:1], |s[0:1]|, s0 ; encoding: [0x00,0x01,0x92,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x01,0x92,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], s0 clamp ; encoding: [0x00,0x80,0x92,0xd2,0x00,0x00,0x00,0x00]
+0x00,0x80,0x92,0xd2,0x00,0x00,0x00,0x00
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], s0 mul:2 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x08]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x08
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], s0 mul:4 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x10]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x10
+
+# CHECK: v_trig_preop_f64 v[0:1], s[0:1], s0 div:2 ; encoding: [0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x18]
+0x00,0x00,0x92,0xd2,0x00,0x00,0x00,0x18
+
+# CHECK: v_cmp_class_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x20,0x7c]
+0x00,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x20,0x7c]
+0x65,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x20,0x7c]
+0x66,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x20,0x7c]
+0x67,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x20,0x7c]
+0x6a,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x20,0x7c]
+0x6b,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x20,0x7c]
+0x6c,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x20,0x7c]
+0x6d,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x20,0x7c]
+0x6e,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x20,0x7c]
+0x6f,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x20,0x7c]
+0x7b,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x20,0x7c]
+0x7c,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x20,0x7c]
+0x7e,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x20,0x7c]
+0x7f,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x20,0x7c]
+0x80,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x20,0x7c]
+0xc1,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x20,0x7c]
+0xf0,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x20,0x7c]
+0xf7,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x20,0x7c]
+0xfd,0x00,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x20,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x20,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_class_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x20,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x20,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_class_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x20,0x7c]
+0x00,0x01,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x20,0x7c]
+0xff,0x01,0x20,0x7c
+
+# CHECK: v_cmp_class_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x21,0x7c]
+0x00,0xfe,0x21,0x7c
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x10,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x10,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x10,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x10,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x10,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x10,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x10,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x10,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x10,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x10,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x10,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x10,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x10,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x10,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x10,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x10,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x10,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x10,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x10,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_class_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x10,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x10,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_class_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x22,0x7c]
+0x00,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x22,0x7c]
+0x65,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x22,0x7c]
+0x66,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x22,0x7c]
+0x67,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x22,0x7c]
+0x6a,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x22,0x7c]
+0x6b,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x22,0x7c]
+0x6c,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x22,0x7c]
+0x6d,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x22,0x7c]
+0x6e,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x22,0x7c]
+0x6f,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x22,0x7c]
+0x7b,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x22,0x7c]
+0x7c,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x22,0x7c]
+0x7e,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x22,0x7c]
+0x7f,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x22,0x7c]
+0x80,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x22,0x7c]
+0xc1,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x22,0x7c]
+0xf0,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x22,0x7c]
+0xf7,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x22,0x7c]
+0xfd,0x00,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x22,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x22,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_class_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x22,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x22,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_class_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x22,0x7c]
+0x00,0x01,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x22,0x7c]
+0xff,0x01,0x22,0x7c
+
+# CHECK: v_cmpx_class_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x23,0x7c]
+0x00,0xfe,0x23,0x7c
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x11,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x11,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x11,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x11,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x11,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x11,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x11,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x11,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x11,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x11,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x11,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x11,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x11,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x11,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x11,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x11,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x11,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x11,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x11,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_class_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x11,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x11,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_class_f64_e32 vcc, s[0:1], v0 ; encoding: [0x00,0x00,0x24,0x7c]
+0x00,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, s[2:3], v0 ; encoding: [0x02,0x00,0x24,0x7c]
+0x02,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, s[100:101], v0 ; encoding: [0x64,0x00,0x24,0x7c]
+0x64,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, flat_scratch, v0 ; encoding: [0x66,0x00,0x24,0x7c]
+0x66,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, vcc, v0 ; encoding: [0x6a,0x00,0x24,0x7c]
+0x6a,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, tba, v0 ; encoding: [0x6c,0x00,0x24,0x7c]
+0x6c,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, tma, v0 ; encoding: [0x6e,0x00,0x24,0x7c]
+0x6e,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, ttmp[10:11], v0 ; encoding: [0x7a,0x00,0x24,0x7c]
+0x7a,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, exec, v0 ; encoding: [0x7e,0x00,0x24,0x7c]
+0x7e,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x24,0x7c]
+0x80,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x24,0x7c]
+0xc1,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x24,0x7c]
+0xf0,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x24,0x7c]
+0xf7,0x00,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x24,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x24,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_class_f64_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x24,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x24,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_class_f64_e32 vcc, v[0:1], v0 ; encoding: [0x00,0x01,0x24,0x7c]
+0x00,0x01,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, v[254:255], v0 ; encoding: [0xfe,0x01,0x24,0x7c]
+0xfe,0x01,0x24,0x7c
+
+# CHECK: v_cmp_class_f64_e32 vcc, s[0:1], v255 ; encoding: [0x00,0xfe,0x25,0x7c]
+0x00,0xfe,0x25,0x7c
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[2:3], s[0:1], s0 ; encoding: [0x02,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[100:101], s[0:1], s0 ; encoding: [0x64,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 flat_scratch, s[0:1], s0 ; encoding: [0x66,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 vcc, s[0:1], s0 ; encoding: [0x6a,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 tba, s[0:1], s0 ; encoding: [0x6c,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 tma, s[0:1], s0 ; encoding: [0x6e,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 ttmp[10:11], s[0:1], s0 ; encoding: [0x7a,0x00,0x12,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x12,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x12,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x12,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x12,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x12,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x12,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], v[0:1], s0 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x12,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], v[254:255], s0 ; encoding: [0x00,0x00,0x12,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x12,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x12,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x12,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x12,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x12,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], scc ; encoding: [0x00,0x00,0x12,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x12,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], v0 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x12,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], s[0:1], v255 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x12,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_class_f64_e64 s[0:1], -s[0:1], s0 ; encoding: [0x00,0x00,0x12,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x12,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_class_f64_e32 vcc, s[0:1], v0 ; encoding: [0x00,0x00,0x26,0x7c]
+0x00,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, s[2:3], v0 ; encoding: [0x02,0x00,0x26,0x7c]
+0x02,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, s[100:101], v0 ; encoding: [0x64,0x00,0x26,0x7c]
+0x64,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, flat_scratch, v0 ; encoding: [0x66,0x00,0x26,0x7c]
+0x66,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, vcc, v0 ; encoding: [0x6a,0x00,0x26,0x7c]
+0x6a,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, tba, v0 ; encoding: [0x6c,0x00,0x26,0x7c]
+0x6c,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, tma, v0 ; encoding: [0x6e,0x00,0x26,0x7c]
+0x6e,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, ttmp[10:11], v0 ; encoding: [0x7a,0x00,0x26,0x7c]
+0x7a,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, exec, v0 ; encoding: [0x7e,0x00,0x26,0x7c]
+0x7e,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x26,0x7c]
+0x80,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x26,0x7c]
+0xc1,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x26,0x7c]
+0xf0,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x26,0x7c]
+0xf7,0x00,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x26,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x26,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_class_f64_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x26,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x26,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_class_f64_e32 vcc, v[0:1], v0 ; encoding: [0x00,0x01,0x26,0x7c]
+0x00,0x01,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, v[254:255], v0 ; encoding: [0xfe,0x01,0x26,0x7c]
+0xfe,0x01,0x26,0x7c
+
+# CHECK: v_cmpx_class_f64_e32 vcc, s[0:1], v255 ; encoding: [0x00,0xfe,0x27,0x7c]
+0x00,0xfe,0x27,0x7c
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], s0 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[2:3], s[0:1], s0 ; encoding: [0x02,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[100:101], s[0:1], s0 ; encoding: [0x64,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 flat_scratch, s[0:1], s0 ; encoding: [0x66,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 vcc, s[0:1], s0 ; encoding: [0x6a,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 tba, s[0:1], s0 ; encoding: [0x6c,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 tma, s[0:1], s0 ; encoding: [0x6e,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 ttmp[10:11], s[0:1], s0 ; encoding: [0x7a,0x00,0x13,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x13,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x13,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x13,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x13,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x13,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x13,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], v[0:1], s0 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x13,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], v[254:255], s0 ; encoding: [0x00,0x00,0x13,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x13,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x13,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x13,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x13,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x13,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], scc ; encoding: [0x00,0x00,0x13,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x13,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], v0 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x13,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], s[0:1], v255 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x13,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_class_f64_e64 s[0:1], -s[0:1], s0 ; encoding: [0x00,0x00,0x13,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x13,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_class_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x28,0x7c]
+0x00,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x28,0x7c]
+0x65,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x28,0x7c]
+0x66,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x28,0x7c]
+0x67,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x28,0x7c]
+0x6a,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x28,0x7c]
+0x6b,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x28,0x7c]
+0x6c,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x28,0x7c]
+0x6d,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x28,0x7c]
+0x6e,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x28,0x7c]
+0x6f,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x28,0x7c]
+0x7b,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x28,0x7c]
+0x7c,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x28,0x7c]
+0x7e,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x28,0x7c]
+0x7f,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x28,0x7c]
+0x80,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x28,0x7c]
+0xc1,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x28,0x7c]
+0xf0,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x28,0x7c]
+0xf7,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x28,0x7c]
+0xfd,0x00,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x28,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x28,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x28,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x28,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x28,0x7c]
+0x00,0x01,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x28,0x7c]
+0xff,0x01,0x28,0x7c
+
+# CHECK: v_cmp_class_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x29,0x7c]
+0x00,0xfe,0x29,0x7c
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x14,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x14,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x14,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x14,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x14,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x14,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x14,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x14,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x14,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x14,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x14,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x14,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x14,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x14,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x14,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x14,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x14,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x14,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x14,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_class_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x14,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x14,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_class_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x2a,0x7c]
+0x00,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x2a,0x7c]
+0x65,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x2a,0x7c]
+0x66,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x2a,0x7c]
+0x67,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x2a,0x7c]
+0x6a,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x2a,0x7c]
+0x6b,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x2a,0x7c]
+0x6c,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x2a,0x7c]
+0x6d,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x2a,0x7c]
+0x6e,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x2a,0x7c]
+0x6f,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x2a,0x7c]
+0x7b,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x2a,0x7c]
+0x7c,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x2a,0x7c]
+0x7e,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x2a,0x7c]
+0x7f,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x2a,0x7c]
+0x80,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x2a,0x7c]
+0xc1,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x2a,0x7c]
+0xf0,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x2a,0x7c]
+0xf7,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x2a,0x7c]
+0xfd,0x00,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x2a,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x2a,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x2a,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x2a,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x2a,0x7c]
+0x00,0x01,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x2a,0x7c]
+0xff,0x01,0x2a,0x7c
+
+# CHECK: v_cmpx_class_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x2b,0x7c]
+0x00,0xfe,0x2b,0x7c
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x15,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x15,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x15,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x15,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x15,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x15,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x15,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x15,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x15,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x15,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x15,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x15,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x15,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x15,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x15,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x15,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x15,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x15,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x15,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_class_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x15,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x15,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_f_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x40,0x7c]
+0x00,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x40,0x7c]
+0x65,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x40,0x7c]
+0x66,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x40,0x7c]
+0x67,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x40,0x7c]
+0x6a,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x40,0x7c]
+0x6b,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x40,0x7c]
+0x6c,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x40,0x7c]
+0x6d,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x40,0x7c]
+0x6e,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x40,0x7c]
+0x6f,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x40,0x7c]
+0x7b,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x40,0x7c]
+0x7c,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x40,0x7c]
+0x7e,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x40,0x7c]
+0x7f,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x40,0x7c]
+0x80,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x40,0x7c]
+0xc1,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x40,0x7c]
+0xf0,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x40,0x7c]
+0xf7,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x40,0x7c]
+0xfd,0x00,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x40,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x40,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x40,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x40,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x40,0x7c]
+0x00,0x01,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x40,0x7c]
+0xff,0x01,0x40,0x7c
+
+# CHECK: v_cmp_f_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x41,0x7c]
+0x00,0xfe,0x41,0x7c
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x20,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x20,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x20,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x20,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x20,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x20,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x20,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x20,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x20,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x20,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x20,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x20,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x20,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x20,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x20,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x20,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x20,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x20,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_f_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x20,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x20,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x42,0x7c]
+0x00,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x42,0x7c]
+0x65,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x42,0x7c]
+0x66,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x42,0x7c]
+0x67,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x42,0x7c]
+0x6a,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x42,0x7c]
+0x6b,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x42,0x7c]
+0x6c,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x42,0x7c]
+0x6d,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x42,0x7c]
+0x6e,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x42,0x7c]
+0x6f,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x42,0x7c]
+0x7b,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x42,0x7c]
+0x7c,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x42,0x7c]
+0x7e,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x42,0x7c]
+0x7f,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x42,0x7c]
+0x80,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x42,0x7c]
+0xc1,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x42,0x7c]
+0xf0,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x42,0x7c]
+0xf7,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x42,0x7c]
+0xfd,0x00,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x42,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x42,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x42,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x42,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x42,0x7c]
+0x00,0x01,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x42,0x7c]
+0xff,0x01,0x42,0x7c
+
+# CHECK: v_cmp_lt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x43,0x7c]
+0x00,0xfe,0x43,0x7c
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x21,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x21,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x21,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x21,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x21,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x21,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x21,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x21,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x21,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x21,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x21,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x21,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x21,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x21,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x21,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x21,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x21,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x21,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_lt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x21,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x21,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x44,0x7c]
+0x00,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x44,0x7c]
+0x65,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x44,0x7c]
+0x66,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x44,0x7c]
+0x67,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x44,0x7c]
+0x6a,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x44,0x7c]
+0x6b,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x44,0x7c]
+0x6c,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x44,0x7c]
+0x6d,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x44,0x7c]
+0x6e,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x44,0x7c]
+0x6f,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x44,0x7c]
+0x7b,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x44,0x7c]
+0x7c,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x44,0x7c]
+0x7e,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x44,0x7c]
+0x7f,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x44,0x7c]
+0x80,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x44,0x7c]
+0xc1,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x44,0x7c]
+0xf0,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x44,0x7c]
+0xf7,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x44,0x7c]
+0xfd,0x00,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x44,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x44,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x44,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x44,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x44,0x7c]
+0x00,0x01,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x44,0x7c]
+0xff,0x01,0x44,0x7c
+
+# CHECK: v_cmp_eq_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x45,0x7c]
+0x00,0xfe,0x45,0x7c
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x22,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x22,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x22,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x22,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x22,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x22,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x22,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x22,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x22,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x22,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x22,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x22,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x22,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x22,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x22,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x22,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x22,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x22,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_eq_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x22,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x22,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x46,0x7c]
+0x00,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x46,0x7c]
+0x65,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x46,0x7c]
+0x66,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x46,0x7c]
+0x67,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x46,0x7c]
+0x6a,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x46,0x7c]
+0x6b,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x46,0x7c]
+0x6c,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x46,0x7c]
+0x6d,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x46,0x7c]
+0x6e,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x46,0x7c]
+0x6f,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x46,0x7c]
+0x7b,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x46,0x7c]
+0x7c,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x46,0x7c]
+0x7e,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x46,0x7c]
+0x7f,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x46,0x7c]
+0x80,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x46,0x7c]
+0xc1,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x46,0x7c]
+0xf0,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x46,0x7c]
+0xf7,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x46,0x7c]
+0xfd,0x00,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x46,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x46,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x46,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x46,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x46,0x7c]
+0x00,0x01,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x46,0x7c]
+0xff,0x01,0x46,0x7c
+
+# CHECK: v_cmp_le_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x47,0x7c]
+0x00,0xfe,0x47,0x7c
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x23,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x23,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x23,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x23,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x23,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x23,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x23,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x23,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x23,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x23,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x23,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x23,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x23,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x23,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x23,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x23,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x23,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x23,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x23,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x23,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x23,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x23,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_le_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x23,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x23,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x48,0x7c]
+0x00,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x48,0x7c]
+0x65,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x48,0x7c]
+0x66,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x48,0x7c]
+0x67,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x48,0x7c]
+0x6a,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x48,0x7c]
+0x6b,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x48,0x7c]
+0x6c,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x48,0x7c]
+0x6d,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x48,0x7c]
+0x6e,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x48,0x7c]
+0x6f,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x48,0x7c]
+0x7b,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x48,0x7c]
+0x7c,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x48,0x7c]
+0x7e,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x48,0x7c]
+0x7f,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x48,0x7c]
+0x80,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x48,0x7c]
+0xc1,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x48,0x7c]
+0xf0,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x48,0x7c]
+0xf7,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x48,0x7c]
+0xfd,0x00,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x48,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x48,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x48,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x48,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x48,0x7c]
+0x00,0x01,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x48,0x7c]
+0xff,0x01,0x48,0x7c
+
+# CHECK: v_cmp_gt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x49,0x7c]
+0x00,0xfe,0x49,0x7c
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x24,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x24,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x24,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x24,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x24,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x24,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x24,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x24,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x24,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x24,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x24,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x24,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x24,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x24,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x24,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x24,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x24,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x24,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_gt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x24,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x24,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x4a,0x7c]
+0x00,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x4a,0x7c]
+0x65,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x4a,0x7c]
+0x66,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x4a,0x7c]
+0x67,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x4a,0x7c]
+0x6a,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x4a,0x7c]
+0x6b,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x4a,0x7c]
+0x6c,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x4a,0x7c]
+0x6d,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x4a,0x7c]
+0x6e,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x4a,0x7c]
+0x6f,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x4a,0x7c]
+0x7b,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x4a,0x7c]
+0x7c,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x4a,0x7c]
+0x7e,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x4a,0x7c]
+0x7f,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x4a,0x7c]
+0x80,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x4a,0x7c]
+0xc1,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x4a,0x7c]
+0xf0,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x4a,0x7c]
+0xf7,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x4a,0x7c]
+0xfd,0x00,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x4a,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x4a,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x4a,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x4a,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x4a,0x7c]
+0x00,0x01,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x4a,0x7c]
+0xff,0x01,0x4a,0x7c
+
+# CHECK: v_cmp_lg_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x4b,0x7c]
+0x00,0xfe,0x4b,0x7c
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x25,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x25,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x25,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x25,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x25,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x25,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x25,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x25,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x25,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x25,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x25,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x25,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x25,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x25,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x25,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x25,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x25,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x25,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x25,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x25,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x25,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x25,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_lg_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x25,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x25,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x4c,0x7c]
+0x00,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x4c,0x7c]
+0x65,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x4c,0x7c]
+0x66,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x4c,0x7c]
+0x67,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x4c,0x7c]
+0x6a,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x4c,0x7c]
+0x6b,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x4c,0x7c]
+0x6c,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x4c,0x7c]
+0x6d,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x4c,0x7c]
+0x6e,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x4c,0x7c]
+0x6f,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x4c,0x7c]
+0x7b,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x4c,0x7c]
+0x7c,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x4c,0x7c]
+0x7e,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x4c,0x7c]
+0x7f,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x4c,0x7c]
+0x80,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x4c,0x7c]
+0xc1,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x4c,0x7c]
+0xf0,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x4c,0x7c]
+0xf7,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x4c,0x7c]
+0xfd,0x00,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x4c,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x4c,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x4c,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x4c,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x4c,0x7c]
+0x00,0x01,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x4c,0x7c]
+0xff,0x01,0x4c,0x7c
+
+# CHECK: v_cmp_ge_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x4d,0x7c]
+0x00,0xfe,0x4d,0x7c
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x26,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x26,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x26,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x26,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x26,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x26,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x26,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x26,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x26,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x26,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x26,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x26,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x26,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x26,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x26,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x26,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x26,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x26,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_ge_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x26,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x26,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x4e,0x7c]
+0x00,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x4e,0x7c]
+0x65,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x4e,0x7c]
+0x66,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x4e,0x7c]
+0x67,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x4e,0x7c]
+0x6a,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x4e,0x7c]
+0x6b,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x4e,0x7c]
+0x6c,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x4e,0x7c]
+0x6d,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x4e,0x7c]
+0x6e,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x4e,0x7c]
+0x6f,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x4e,0x7c]
+0x7b,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x4e,0x7c]
+0x7c,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x4e,0x7c]
+0x7e,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x4e,0x7c]
+0x7f,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x4e,0x7c]
+0x80,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x4e,0x7c]
+0xc1,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x4e,0x7c]
+0xf0,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x4e,0x7c]
+0xf7,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x4e,0x7c]
+0xfd,0x00,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x4e,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x4e,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x4e,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x4e,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x4e,0x7c]
+0x00,0x01,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x4e,0x7c]
+0xff,0x01,0x4e,0x7c
+
+# CHECK: v_cmp_o_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x4f,0x7c]
+0x00,0xfe,0x4f,0x7c
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x27,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x27,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x27,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x27,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x27,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x27,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x27,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x27,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x27,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x27,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x27,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x27,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x27,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x27,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x27,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x27,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x27,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x27,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_o_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x27,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x27,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x50,0x7c]
+0x00,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x50,0x7c]
+0x65,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x50,0x7c]
+0x66,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x50,0x7c]
+0x67,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x50,0x7c]
+0x6a,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x50,0x7c]
+0x6b,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x50,0x7c]
+0x6c,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x50,0x7c]
+0x6d,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x50,0x7c]
+0x6e,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x50,0x7c]
+0x6f,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x50,0x7c]
+0x7b,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x50,0x7c]
+0x7c,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x50,0x7c]
+0x7e,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x50,0x7c]
+0x7f,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x50,0x7c]
+0x80,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x50,0x7c]
+0xc1,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x50,0x7c]
+0xf0,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x50,0x7c]
+0xf7,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x50,0x7c]
+0xfd,0x00,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x50,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x50,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x50,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x50,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x50,0x7c]
+0x00,0x01,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x50,0x7c]
+0xff,0x01,0x50,0x7c
+
+# CHECK: v_cmp_u_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x51,0x7c]
+0x00,0xfe,0x51,0x7c
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x28,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x28,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x28,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x28,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x28,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x28,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x28,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x28,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x28,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x28,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x28,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x28,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x28,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x28,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x28,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x28,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x28,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x28,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_u_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x28,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x28,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x52,0x7c]
+0x00,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x52,0x7c]
+0x65,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x52,0x7c]
+0x66,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x52,0x7c]
+0x67,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x52,0x7c]
+0x6a,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x52,0x7c]
+0x6b,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x52,0x7c]
+0x6c,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x52,0x7c]
+0x6d,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x52,0x7c]
+0x6e,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x52,0x7c]
+0x6f,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x52,0x7c]
+0x7b,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x52,0x7c]
+0x7c,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x52,0x7c]
+0x7e,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x52,0x7c]
+0x7f,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x52,0x7c]
+0x80,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x52,0x7c]
+0xc1,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x52,0x7c]
+0xf0,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x52,0x7c]
+0xf7,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x52,0x7c]
+0xfd,0x00,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x52,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x52,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x52,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x52,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x52,0x7c]
+0x00,0x01,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x52,0x7c]
+0xff,0x01,0x52,0x7c
+
+# CHECK: v_cmp_nge_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x53,0x7c]
+0x00,0xfe,0x53,0x7c
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x29,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x29,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x29,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x29,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x29,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x29,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x29,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x29,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x29,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x29,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x29,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x29,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x29,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x29,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x29,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x29,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x29,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x29,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nge_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x29,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x29,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x54,0x7c]
+0x00,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x54,0x7c]
+0x65,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x54,0x7c]
+0x66,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x54,0x7c]
+0x67,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x54,0x7c]
+0x6a,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x54,0x7c]
+0x6b,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x54,0x7c]
+0x6c,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x54,0x7c]
+0x6d,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x54,0x7c]
+0x6e,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x54,0x7c]
+0x6f,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x54,0x7c]
+0x7b,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x54,0x7c]
+0x7c,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x54,0x7c]
+0x7e,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x54,0x7c]
+0x7f,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x54,0x7c]
+0x80,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x54,0x7c]
+0xc1,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x54,0x7c]
+0xf0,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x54,0x7c]
+0xf7,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x54,0x7c]
+0xfd,0x00,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x54,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x54,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x54,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x54,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x54,0x7c]
+0x00,0x01,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x54,0x7c]
+0xff,0x01,0x54,0x7c
+
+# CHECK: v_cmp_nlg_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x55,0x7c]
+0x00,0xfe,0x55,0x7c
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2a,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2a,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2a,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nlg_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x2a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x56,0x7c]
+0x00,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x56,0x7c]
+0x65,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x56,0x7c]
+0x66,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x56,0x7c]
+0x67,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x56,0x7c]
+0x6a,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x56,0x7c]
+0x6b,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x56,0x7c]
+0x6c,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x56,0x7c]
+0x6d,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x56,0x7c]
+0x6e,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x56,0x7c]
+0x6f,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x56,0x7c]
+0x7b,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x56,0x7c]
+0x7c,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x56,0x7c]
+0x7e,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x56,0x7c]
+0x7f,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x56,0x7c]
+0x80,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x56,0x7c]
+0xc1,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x56,0x7c]
+0xf0,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x56,0x7c]
+0xf7,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x56,0x7c]
+0xfd,0x00,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x56,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x56,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x56,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x56,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x56,0x7c]
+0x00,0x01,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x56,0x7c]
+0xff,0x01,0x56,0x7c
+
+# CHECK: v_cmp_ngt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x57,0x7c]
+0x00,0xfe,0x57,0x7c
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2b,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2b,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2b,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_ngt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x2b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x58,0x7c]
+0x00,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x58,0x7c]
+0x65,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x58,0x7c]
+0x66,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x58,0x7c]
+0x67,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x58,0x7c]
+0x6a,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x58,0x7c]
+0x6b,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x58,0x7c]
+0x6c,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x58,0x7c]
+0x6d,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x58,0x7c]
+0x6e,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x58,0x7c]
+0x6f,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x58,0x7c]
+0x7b,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x58,0x7c]
+0x7c,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x58,0x7c]
+0x7e,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x58,0x7c]
+0x7f,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x58,0x7c]
+0x80,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x58,0x7c]
+0xc1,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x58,0x7c]
+0xf0,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x58,0x7c]
+0xf7,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x58,0x7c]
+0xfd,0x00,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x58,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x58,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x58,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x58,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x58,0x7c]
+0x00,0x01,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x58,0x7c]
+0xff,0x01,0x58,0x7c
+
+# CHECK: v_cmp_nle_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x59,0x7c]
+0x00,0xfe,0x59,0x7c
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2c,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2c,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2c,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nle_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x2c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x5a,0x7c]
+0x00,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x5a,0x7c]
+0x65,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x5a,0x7c]
+0x66,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x5a,0x7c]
+0x67,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x5a,0x7c]
+0x6a,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x5a,0x7c]
+0x6b,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x5a,0x7c]
+0x6c,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x5a,0x7c]
+0x6d,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x5a,0x7c]
+0x6e,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x5a,0x7c]
+0x6f,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x5a,0x7c]
+0x7b,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x5a,0x7c]
+0x7c,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x5a,0x7c]
+0x7e,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x5a,0x7c]
+0x7f,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x5a,0x7c]
+0x80,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x5a,0x7c]
+0xc1,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x5a,0x7c]
+0xf0,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x5a,0x7c]
+0xf7,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x5a,0x7c]
+0xfd,0x00,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x5a,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x5a,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x5a,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x5a,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x5a,0x7c]
+0x00,0x01,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x5a,0x7c]
+0xff,0x01,0x5a,0x7c
+
+# CHECK: v_cmp_neq_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x5b,0x7c]
+0x00,0xfe,0x5b,0x7c
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2d,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2d,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2d,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_neq_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x2d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x5c,0x7c]
+0x00,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x5c,0x7c]
+0x65,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x5c,0x7c]
+0x66,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x5c,0x7c]
+0x67,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x5c,0x7c]
+0x6a,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x5c,0x7c]
+0x6b,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x5c,0x7c]
+0x6c,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x5c,0x7c]
+0x6d,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x5c,0x7c]
+0x6e,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x5c,0x7c]
+0x6f,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x5c,0x7c]
+0x7b,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x5c,0x7c]
+0x7c,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x5c,0x7c]
+0x7e,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x5c,0x7c]
+0x7f,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x5c,0x7c]
+0x80,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x5c,0x7c]
+0xc1,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x5c,0x7c]
+0xf0,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x5c,0x7c]
+0xf7,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x5c,0x7c]
+0xfd,0x00,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x5c,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x5c,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x5c,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x5c,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x5c,0x7c]
+0x00,0x01,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x5c,0x7c]
+0xff,0x01,0x5c,0x7c
+
+# CHECK: v_cmp_nlt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x5d,0x7c]
+0x00,0xfe,0x5d,0x7c
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2e,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2e,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2e,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nlt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x2e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x5e,0x7c]
+0x00,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x5e,0x7c]
+0x65,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x5e,0x7c]
+0x66,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x5e,0x7c]
+0x67,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x5e,0x7c]
+0x6a,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x5e,0x7c]
+0x6b,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x5e,0x7c]
+0x6c,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x5e,0x7c]
+0x6d,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x5e,0x7c]
+0x6e,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x5e,0x7c]
+0x6f,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x5e,0x7c]
+0x7b,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x5e,0x7c]
+0x7c,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x5e,0x7c]
+0x7e,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x5e,0x7c]
+0x7f,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x5e,0x7c]
+0x80,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x5e,0x7c]
+0xc1,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x5e,0x7c]
+0xf0,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x5e,0x7c]
+0xf7,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x5e,0x7c]
+0xfd,0x00,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x5e,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x5e,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x5e,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x5e,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x5e,0x7c]
+0x00,0x01,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x5e,0x7c]
+0xff,0x01,0x5e,0x7c
+
+# CHECK: v_cmp_tru_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x5f,0x7c]
+0x00,0xfe,0x5f,0x7c
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x2f,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x2f,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x2f,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_tru_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x2f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x2f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x60,0x7c]
+0x00,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x60,0x7c]
+0x65,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x60,0x7c]
+0x66,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x60,0x7c]
+0x67,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x60,0x7c]
+0x6a,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x60,0x7c]
+0x6b,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x60,0x7c]
+0x6c,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x60,0x7c]
+0x6d,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x60,0x7c]
+0x6e,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x60,0x7c]
+0x6f,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x60,0x7c]
+0x7b,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x60,0x7c]
+0x7c,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x60,0x7c]
+0x7e,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x60,0x7c]
+0x7f,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x60,0x7c]
+0x80,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x60,0x7c]
+0xc1,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x60,0x7c]
+0xf0,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x60,0x7c]
+0xf7,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x60,0x7c]
+0xfd,0x00,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x60,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x60,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x60,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x60,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x60,0x7c]
+0x00,0x01,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x60,0x7c]
+0xff,0x01,0x60,0x7c
+
+# CHECK: v_cmpx_f_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x61,0x7c]
+0x00,0xfe,0x61,0x7c
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x30,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x30,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x30,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x30,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x30,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x30,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x30,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x30,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x30,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x30,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x30,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x30,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x30,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x30,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x30,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x30,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x30,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x30,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_f_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x30,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x30,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x62,0x7c]
+0x00,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x62,0x7c]
+0x65,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x62,0x7c]
+0x66,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x62,0x7c]
+0x67,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x62,0x7c]
+0x6a,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x62,0x7c]
+0x6b,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x62,0x7c]
+0x6c,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x62,0x7c]
+0x6d,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x62,0x7c]
+0x6e,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x62,0x7c]
+0x6f,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x62,0x7c]
+0x7b,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x62,0x7c]
+0x7c,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x62,0x7c]
+0x7e,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x62,0x7c]
+0x7f,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x62,0x7c]
+0x80,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x62,0x7c]
+0xc1,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x62,0x7c]
+0xf0,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x62,0x7c]
+0xf7,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x62,0x7c]
+0xfd,0x00,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x62,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x62,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x62,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x62,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x62,0x7c]
+0x00,0x01,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x62,0x7c]
+0xff,0x01,0x62,0x7c
+
+# CHECK: v_cmpx_lt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x63,0x7c]
+0x00,0xfe,0x63,0x7c
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x31,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x31,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x31,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x31,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x31,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x31,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x31,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x31,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x31,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x31,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x31,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x31,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x31,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x31,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x31,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x31,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x31,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x31,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_lt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x31,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x31,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x64,0x7c]
+0x00,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x64,0x7c]
+0x65,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x64,0x7c]
+0x66,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x64,0x7c]
+0x67,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x64,0x7c]
+0x6a,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x64,0x7c]
+0x6b,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x64,0x7c]
+0x6c,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x64,0x7c]
+0x6d,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x64,0x7c]
+0x6e,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x64,0x7c]
+0x6f,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x64,0x7c]
+0x7b,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x64,0x7c]
+0x7c,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x64,0x7c]
+0x7e,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x64,0x7c]
+0x7f,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x64,0x7c]
+0x80,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x64,0x7c]
+0xc1,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x64,0x7c]
+0xf0,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x64,0x7c]
+0xf7,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x64,0x7c]
+0xfd,0x00,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x64,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x64,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x64,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x64,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x64,0x7c]
+0x00,0x01,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x64,0x7c]
+0xff,0x01,0x64,0x7c
+
+# CHECK: v_cmpx_eq_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x65,0x7c]
+0x00,0xfe,0x65,0x7c
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x32,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x32,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x32,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x32,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x32,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x32,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x32,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x32,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x32,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x32,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x32,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x32,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x32,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x32,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x32,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x32,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x32,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x32,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_eq_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x32,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x32,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x66,0x7c]
+0x00,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x66,0x7c]
+0x65,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x66,0x7c]
+0x66,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x66,0x7c]
+0x67,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x66,0x7c]
+0x6a,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x66,0x7c]
+0x6b,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x66,0x7c]
+0x6c,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x66,0x7c]
+0x6d,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x66,0x7c]
+0x6e,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x66,0x7c]
+0x6f,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x66,0x7c]
+0x7b,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x66,0x7c]
+0x7c,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x66,0x7c]
+0x7e,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x66,0x7c]
+0x7f,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x66,0x7c]
+0x80,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x66,0x7c]
+0xc1,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x66,0x7c]
+0xf0,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x66,0x7c]
+0xf7,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x66,0x7c]
+0xfd,0x00,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x66,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x66,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x66,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x66,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x66,0x7c]
+0x00,0x01,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x66,0x7c]
+0xff,0x01,0x66,0x7c
+
+# CHECK: v_cmpx_le_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x67,0x7c]
+0x00,0xfe,0x67,0x7c
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x33,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x33,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x33,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x33,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x33,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x33,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x33,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x33,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x33,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x33,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x33,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x33,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x33,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x33,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x33,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x33,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x33,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x33,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_le_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x33,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x33,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x68,0x7c]
+0x00,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x68,0x7c]
+0x65,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x68,0x7c]
+0x66,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x68,0x7c]
+0x67,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x68,0x7c]
+0x6a,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x68,0x7c]
+0x6b,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x68,0x7c]
+0x6c,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x68,0x7c]
+0x6d,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x68,0x7c]
+0x6e,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x68,0x7c]
+0x6f,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x68,0x7c]
+0x7b,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x68,0x7c]
+0x7c,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x68,0x7c]
+0x7e,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x68,0x7c]
+0x7f,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x68,0x7c]
+0x80,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x68,0x7c]
+0xc1,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x68,0x7c]
+0xf0,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x68,0x7c]
+0xf7,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x68,0x7c]
+0xfd,0x00,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x68,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x68,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x68,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x68,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x68,0x7c]
+0x00,0x01,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x68,0x7c]
+0xff,0x01,0x68,0x7c
+
+# CHECK: v_cmpx_gt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x69,0x7c]
+0x00,0xfe,0x69,0x7c
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x34,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x34,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x34,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x34,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x34,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x34,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x34,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x34,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x34,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x34,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x34,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x34,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x34,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x34,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x34,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x34,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x34,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x34,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x34,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x34,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x34,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x34,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_gt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x34,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x34,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x6a,0x7c]
+0x00,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x6a,0x7c]
+0x65,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x6a,0x7c]
+0x66,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x6a,0x7c]
+0x67,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x6a,0x7c]
+0x6a,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x6a,0x7c]
+0x6b,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x6a,0x7c]
+0x6c,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x6a,0x7c]
+0x6d,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x6a,0x7c]
+0x6e,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x6a,0x7c]
+0x6f,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x6a,0x7c]
+0x7b,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x6a,0x7c]
+0x7c,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x6a,0x7c]
+0x7e,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x6a,0x7c]
+0x7f,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x6a,0x7c]
+0x80,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x6a,0x7c]
+0xc1,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x6a,0x7c]
+0xf0,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x6a,0x7c]
+0xf7,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x6a,0x7c]
+0xfd,0x00,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x6a,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x6a,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x6a,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x6a,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x6a,0x7c]
+0x00,0x01,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x6a,0x7c]
+0xff,0x01,0x6a,0x7c
+
+# CHECK: v_cmpx_lg_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x6b,0x7c]
+0x00,0xfe,0x6b,0x7c
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x35,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x35,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x35,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x35,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x35,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x35,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x35,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x35,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x35,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x35,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x35,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x35,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x35,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x35,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x35,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x35,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x35,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x35,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x35,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x35,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x35,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x35,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_lg_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x35,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x35,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x6c,0x7c]
+0x00,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x6c,0x7c]
+0x65,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x6c,0x7c]
+0x66,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x6c,0x7c]
+0x67,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x6c,0x7c]
+0x6a,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x6c,0x7c]
+0x6b,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x6c,0x7c]
+0x6c,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x6c,0x7c]
+0x6d,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x6c,0x7c]
+0x6e,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x6c,0x7c]
+0x6f,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x6c,0x7c]
+0x7b,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x6c,0x7c]
+0x7c,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x6c,0x7c]
+0x7e,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x6c,0x7c]
+0x7f,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x6c,0x7c]
+0x80,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x6c,0x7c]
+0xc1,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x6c,0x7c]
+0xf0,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x6c,0x7c]
+0xf7,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x6c,0x7c]
+0xfd,0x00,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x6c,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x6c,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x6c,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x6c,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x6c,0x7c]
+0x00,0x01,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x6c,0x7c]
+0xff,0x01,0x6c,0x7c
+
+# CHECK: v_cmpx_ge_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x6d,0x7c]
+0x00,0xfe,0x6d,0x7c
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x36,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x36,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x36,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x36,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x36,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x36,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x36,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x36,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x36,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x36,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x36,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x36,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x36,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x36,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x36,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x36,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x36,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x36,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x36,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x36,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x36,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x36,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_ge_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x36,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x36,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x6e,0x7c]
+0x00,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x6e,0x7c]
+0x65,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x6e,0x7c]
+0x66,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x6e,0x7c]
+0x67,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x6e,0x7c]
+0x6a,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x6e,0x7c]
+0x6b,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x6e,0x7c]
+0x6c,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x6e,0x7c]
+0x6d,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x6e,0x7c]
+0x6e,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x6e,0x7c]
+0x6f,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x6e,0x7c]
+0x7b,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x6e,0x7c]
+0x7c,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x6e,0x7c]
+0x7e,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x6e,0x7c]
+0x7f,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x6e,0x7c]
+0x80,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x6e,0x7c]
+0xc1,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x6e,0x7c]
+0xf0,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x6e,0x7c]
+0xf7,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x6e,0x7c]
+0xfd,0x00,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x6e,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x6e,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x6e,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x6e,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x6e,0x7c]
+0x00,0x01,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x6e,0x7c]
+0xff,0x01,0x6e,0x7c
+
+# CHECK: v_cmpx_o_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x6f,0x7c]
+0x00,0xfe,0x6f,0x7c
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x37,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x37,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x37,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x37,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x37,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x37,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x37,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x37,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x37,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x37,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x37,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x37,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x37,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x37,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x37,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x37,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x37,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x37,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x37,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x37,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x37,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x37,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_o_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x37,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x37,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x70,0x7c]
+0x00,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x70,0x7c]
+0x65,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x70,0x7c]
+0x66,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x70,0x7c]
+0x67,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x70,0x7c]
+0x6a,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x70,0x7c]
+0x6b,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x70,0x7c]
+0x6c,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x70,0x7c]
+0x6d,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x70,0x7c]
+0x6e,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x70,0x7c]
+0x6f,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x70,0x7c]
+0x7b,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x70,0x7c]
+0x7c,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x70,0x7c]
+0x7e,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x70,0x7c]
+0x7f,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x70,0x7c]
+0x80,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x70,0x7c]
+0xc1,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x70,0x7c]
+0xf0,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x70,0x7c]
+0xf7,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x70,0x7c]
+0xfd,0x00,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x70,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x70,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x70,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x70,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x70,0x7c]
+0x00,0x01,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x70,0x7c]
+0xff,0x01,0x70,0x7c
+
+# CHECK: v_cmpx_u_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x71,0x7c]
+0x00,0xfe,0x71,0x7c
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x38,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x38,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x38,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x38,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x38,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x38,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x38,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x38,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x38,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x38,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x38,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x38,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x38,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x38,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x38,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x38,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x38,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x38,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x38,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x38,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x38,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x38,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_u_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x38,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x38,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x72,0x7c]
+0x00,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x72,0x7c]
+0x65,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x72,0x7c]
+0x66,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x72,0x7c]
+0x67,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x72,0x7c]
+0x6a,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x72,0x7c]
+0x6b,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x72,0x7c]
+0x6c,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x72,0x7c]
+0x6d,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x72,0x7c]
+0x6e,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x72,0x7c]
+0x6f,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x72,0x7c]
+0x7b,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x72,0x7c]
+0x7c,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x72,0x7c]
+0x7e,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x72,0x7c]
+0x7f,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x72,0x7c]
+0x80,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x72,0x7c]
+0xc1,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x72,0x7c]
+0xf0,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x72,0x7c]
+0xf7,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x72,0x7c]
+0xfd,0x00,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x72,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x72,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x72,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x72,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x72,0x7c]
+0x00,0x01,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x72,0x7c]
+0xff,0x01,0x72,0x7c
+
+# CHECK: v_cmpx_nge_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x73,0x7c]
+0x00,0xfe,0x73,0x7c
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x39,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x39,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x39,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x39,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x39,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x39,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x39,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x39,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x39,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x39,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x39,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x39,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x39,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x39,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x39,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x39,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x39,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x39,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x39,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x39,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x39,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x39,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nge_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x39,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x39,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x74,0x7c]
+0x00,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x74,0x7c]
+0x65,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x74,0x7c]
+0x66,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x74,0x7c]
+0x67,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x74,0x7c]
+0x6a,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x74,0x7c]
+0x6b,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x74,0x7c]
+0x6c,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x74,0x7c]
+0x6d,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x74,0x7c]
+0x6e,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x74,0x7c]
+0x6f,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x74,0x7c]
+0x7b,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x74,0x7c]
+0x7c,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x74,0x7c]
+0x7e,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x74,0x7c]
+0x7f,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x74,0x7c]
+0x80,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x74,0x7c]
+0xc1,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x74,0x7c]
+0xf0,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x74,0x7c]
+0xf7,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x74,0x7c]
+0xfd,0x00,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x74,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x74,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x74,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x74,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x74,0x7c]
+0x00,0x01,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x74,0x7c]
+0xff,0x01,0x74,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x75,0x7c]
+0x00,0xfe,0x75,0x7c
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x3a,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x3a,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x3a,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nlg_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x3a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x3a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x76,0x7c]
+0x00,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x76,0x7c]
+0x65,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x76,0x7c]
+0x66,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x76,0x7c]
+0x67,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x76,0x7c]
+0x6a,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x76,0x7c]
+0x6b,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x76,0x7c]
+0x6c,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x76,0x7c]
+0x6d,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x76,0x7c]
+0x6e,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x76,0x7c]
+0x6f,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x76,0x7c]
+0x7b,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x76,0x7c]
+0x7c,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x76,0x7c]
+0x7e,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x76,0x7c]
+0x7f,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x76,0x7c]
+0x80,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x76,0x7c]
+0xc1,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x76,0x7c]
+0xf0,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x76,0x7c]
+0xf7,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x76,0x7c]
+0xfd,0x00,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x76,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x76,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x76,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x76,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x76,0x7c]
+0x00,0x01,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x76,0x7c]
+0xff,0x01,0x76,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x77,0x7c]
+0x00,0xfe,0x77,0x7c
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x3b,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x3b,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x3b,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_ngt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x3b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x3b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x78,0x7c]
+0x00,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x78,0x7c]
+0x65,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x78,0x7c]
+0x66,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x78,0x7c]
+0x67,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x78,0x7c]
+0x6a,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x78,0x7c]
+0x6b,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x78,0x7c]
+0x6c,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x78,0x7c]
+0x6d,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x78,0x7c]
+0x6e,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x78,0x7c]
+0x6f,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x78,0x7c]
+0x7b,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x78,0x7c]
+0x7c,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x78,0x7c]
+0x7e,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x78,0x7c]
+0x7f,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x78,0x7c]
+0x80,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x78,0x7c]
+0xc1,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x78,0x7c]
+0xf0,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x78,0x7c]
+0xf7,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x78,0x7c]
+0xfd,0x00,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x78,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x78,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x78,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x78,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x78,0x7c]
+0x00,0x01,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x78,0x7c]
+0xff,0x01,0x78,0x7c
+
+# CHECK: v_cmpx_nle_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x79,0x7c]
+0x00,0xfe,0x79,0x7c
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x3c,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x3c,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x3c,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nle_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x3c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x3c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x7a,0x7c]
+0x00,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x7a,0x7c]
+0x65,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x7a,0x7c]
+0x66,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x7a,0x7c]
+0x67,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x7a,0x7c]
+0x6a,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x7a,0x7c]
+0x6b,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x7a,0x7c]
+0x6c,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x7a,0x7c]
+0x6d,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x7a,0x7c]
+0x6e,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x7a,0x7c]
+0x6f,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x7a,0x7c]
+0x7b,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x7a,0x7c]
+0x7c,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x7a,0x7c]
+0x7e,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x7a,0x7c]
+0x7f,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x7a,0x7c]
+0x80,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x7a,0x7c]
+0xc1,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x7a,0x7c]
+0xf0,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x7a,0x7c]
+0xf7,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x7a,0x7c]
+0xfd,0x00,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x7a,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x7a,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x7a,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x7a,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x7a,0x7c]
+0x00,0x01,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x7a,0x7c]
+0xff,0x01,0x7a,0x7c
+
+# CHECK: v_cmpx_neq_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x7b,0x7c]
+0x00,0xfe,0x7b,0x7c
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x3d,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x3d,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x3d,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_neq_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x3d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x3d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x7c,0x7c]
+0x00,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x7c,0x7c]
+0x65,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x7c,0x7c]
+0x66,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x7c,0x7c]
+0x67,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x7c,0x7c]
+0x6a,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x7c,0x7c]
+0x6b,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x7c,0x7c]
+0x6c,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x7c,0x7c]
+0x6d,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x7c,0x7c]
+0x6e,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x7c,0x7c]
+0x6f,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x7c,0x7c]
+0x7b,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x7c,0x7c]
+0x7c,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x7c,0x7c]
+0x7e,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x7c,0x7c]
+0x7f,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x7c,0x7c]
+0x80,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x7c,0x7c]
+0xc1,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x7c,0x7c]
+0xf0,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x7c,0x7c]
+0xf7,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x7c,0x7c]
+0xfd,0x00,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x7c,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x7c,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x7c,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x7c,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x7c,0x7c]
+0x00,0x01,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x7c,0x7c]
+0xff,0x01,0x7c,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x7d,0x7c]
+0x00,0xfe,0x7d,0x7c
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x3e,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x3e,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x3e,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nlt_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x3e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x3e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x7e,0x7c]
+0x00,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x7e,0x7c]
+0x65,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x7e,0x7c]
+0x66,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x7e,0x7c]
+0x67,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x7e,0x7c]
+0x6a,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x7e,0x7c]
+0x6b,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x7e,0x7c]
+0x6c,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x7e,0x7c]
+0x6d,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x7e,0x7c]
+0x6e,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x7e,0x7c]
+0x6f,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x7e,0x7c]
+0x7b,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x7e,0x7c]
+0x7c,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x7e,0x7c]
+0x7e,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x7e,0x7c]
+0x7f,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x7e,0x7c]
+0x80,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x7e,0x7c]
+0xc1,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x7e,0x7c]
+0xf0,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x7e,0x7c]
+0xf7,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x7e,0x7c]
+0xfd,0x00,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x7e,0x7c,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x7e,0x7c,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x7e,0x7c,0x56,0x34,0x00,0x00]
+0xff,0x00,0x7e,0x7c,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x7e,0x7c]
+0x00,0x01,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x7e,0x7c]
+0xff,0x01,0x7e,0x7c
+
+# CHECK: v_cmpx_tru_f16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x7f,0x7c]
+0x00,0xfe,0x7f,0x7c
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x3f,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x3f,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x3f,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_tru_f16_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x3f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x3f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x80,0x7c]
+0x00,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x80,0x7c]
+0x65,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x80,0x7c]
+0x66,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x80,0x7c]
+0x67,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x80,0x7c]
+0x6a,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x80,0x7c]
+0x6b,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x80,0x7c]
+0x6c,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x80,0x7c]
+0x6d,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x80,0x7c]
+0x6e,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x80,0x7c]
+0x6f,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x80,0x7c]
+0x7b,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x80,0x7c]
+0x7c,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x80,0x7c]
+0x7e,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x80,0x7c]
+0x7f,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x80,0x7c]
+0x80,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x80,0x7c]
+0xc1,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x80,0x7c]
+0xf0,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x80,0x7c]
+0xf7,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x80,0x7c]
+0xfd,0x00,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x80,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_f_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x80,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_f_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x80,0x7c]
+0x00,0x01,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x80,0x7c]
+0xff,0x01,0x80,0x7c
+
+# CHECK: v_cmp_f_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x81,0x7c]
+0x00,0xfe,0x81,0x7c
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x40,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x40,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x40,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x40,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x40,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x40,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x40,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x40,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x40,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x40,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x40,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x40,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x40,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x40,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x40,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x40,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x40,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x40,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_f_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x40,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x40,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x82,0x7c]
+0x00,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x82,0x7c]
+0x65,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x82,0x7c]
+0x66,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x82,0x7c]
+0x67,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x82,0x7c]
+0x6a,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x82,0x7c]
+0x6b,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x82,0x7c]
+0x6c,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x82,0x7c]
+0x6d,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x82,0x7c]
+0x6e,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x82,0x7c]
+0x6f,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x82,0x7c]
+0x7b,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x82,0x7c]
+0x7c,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x82,0x7c]
+0x7e,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x82,0x7c]
+0x7f,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x82,0x7c]
+0x80,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x82,0x7c]
+0xc1,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x82,0x7c]
+0xf0,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x82,0x7c]
+0xf7,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x82,0x7c]
+0xfd,0x00,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x82,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x82,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x82,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x82,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x82,0x7c]
+0x00,0x01,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x82,0x7c]
+0xff,0x01,0x82,0x7c
+
+# CHECK: v_cmp_lt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x83,0x7c]
+0x00,0xfe,0x83,0x7c
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x41,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x41,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x41,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x41,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x41,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x41,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x41,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x41,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x41,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x41,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x41,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x41,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x41,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x41,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x41,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x41,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x41,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x41,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_lt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x41,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x41,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x84,0x7c]
+0x00,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x84,0x7c]
+0x65,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x84,0x7c]
+0x66,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x84,0x7c]
+0x67,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x84,0x7c]
+0x6a,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x84,0x7c]
+0x6b,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x84,0x7c]
+0x6c,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x84,0x7c]
+0x6d,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x84,0x7c]
+0x6e,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x84,0x7c]
+0x6f,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x84,0x7c]
+0x7b,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x84,0x7c]
+0x7c,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x84,0x7c]
+0x7e,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x84,0x7c]
+0x7f,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x84,0x7c]
+0x80,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x84,0x7c]
+0xc1,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x84,0x7c]
+0xf0,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x84,0x7c]
+0xf7,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x84,0x7c]
+0xfd,0x00,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x84,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x84,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_eq_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x84,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x84,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_eq_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x84,0x7c]
+0x00,0x01,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x84,0x7c]
+0xff,0x01,0x84,0x7c
+
+# CHECK: v_cmp_eq_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x85,0x7c]
+0x00,0xfe,0x85,0x7c
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x42,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x42,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x42,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x42,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x42,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x42,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x42,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x42,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x42,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x42,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x42,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x42,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x42,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x42,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x42,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x42,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x42,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x42,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_eq_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x42,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x42,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x86,0x7c]
+0x00,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x86,0x7c]
+0x65,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x86,0x7c]
+0x66,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x86,0x7c]
+0x67,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x86,0x7c]
+0x6a,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x86,0x7c]
+0x6b,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x86,0x7c]
+0x6c,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x86,0x7c]
+0x6d,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x86,0x7c]
+0x6e,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x86,0x7c]
+0x6f,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x86,0x7c]
+0x7b,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x86,0x7c]
+0x7c,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x86,0x7c]
+0x7e,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x86,0x7c]
+0x7f,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x86,0x7c]
+0x80,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x86,0x7c]
+0xc1,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x86,0x7c]
+0xf0,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x86,0x7c]
+0xf7,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x86,0x7c]
+0xfd,0x00,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x86,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x86,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_le_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x86,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x86,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_le_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x86,0x7c]
+0x00,0x01,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x86,0x7c]
+0xff,0x01,0x86,0x7c
+
+# CHECK: v_cmp_le_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x87,0x7c]
+0x00,0xfe,0x87,0x7c
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x43,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x43,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x43,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x43,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x43,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x43,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x43,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x43,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x43,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x43,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x43,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x43,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x43,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x43,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x43,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x43,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x43,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x43,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_le_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x43,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x43,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x88,0x7c]
+0x00,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x88,0x7c]
+0x65,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x88,0x7c]
+0x66,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x88,0x7c]
+0x67,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x88,0x7c]
+0x6a,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x88,0x7c]
+0x6b,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x88,0x7c]
+0x6c,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x88,0x7c]
+0x6d,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x88,0x7c]
+0x6e,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x88,0x7c]
+0x6f,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x88,0x7c]
+0x7b,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x88,0x7c]
+0x7c,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x88,0x7c]
+0x7e,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x88,0x7c]
+0x7f,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x88,0x7c]
+0x80,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x88,0x7c]
+0xc1,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x88,0x7c]
+0xf0,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x88,0x7c]
+0xf7,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x88,0x7c]
+0xfd,0x00,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x88,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x88,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_gt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x88,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x88,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_gt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x88,0x7c]
+0x00,0x01,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x88,0x7c]
+0xff,0x01,0x88,0x7c
+
+# CHECK: v_cmp_gt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x89,0x7c]
+0x00,0xfe,0x89,0x7c
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x44,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x44,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x44,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x44,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x44,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x44,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x44,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x44,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x44,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x44,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x44,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x44,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x44,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x44,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x44,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x44,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x44,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x44,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_gt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x44,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x44,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x8a,0x7c]
+0x00,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x8a,0x7c]
+0x65,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x8a,0x7c]
+0x66,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x8a,0x7c]
+0x67,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x8a,0x7c]
+0x6a,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x8a,0x7c]
+0x6b,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x8a,0x7c]
+0x6c,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x8a,0x7c]
+0x6d,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x8a,0x7c]
+0x6e,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x8a,0x7c]
+0x6f,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x8a,0x7c]
+0x7b,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x8a,0x7c]
+0x7c,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x8a,0x7c]
+0x7e,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x8a,0x7c]
+0x7f,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x8a,0x7c]
+0x80,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x8a,0x7c]
+0xc1,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x8a,0x7c]
+0xf0,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x8a,0x7c]
+0xf7,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x8a,0x7c]
+0xfd,0x00,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x8a,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x8a,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lg_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x8a,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x8a,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lg_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x8a,0x7c]
+0x00,0x01,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x8a,0x7c]
+0xff,0x01,0x8a,0x7c
+
+# CHECK: v_cmp_lg_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x8b,0x7c]
+0x00,0xfe,0x8b,0x7c
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x45,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x45,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x45,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x45,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x45,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x45,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x45,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x45,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x45,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x45,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x45,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x45,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x45,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x45,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x45,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x45,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x45,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x45,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_lg_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x45,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x45,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x8c,0x7c]
+0x00,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x8c,0x7c]
+0x65,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x8c,0x7c]
+0x66,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x8c,0x7c]
+0x67,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x8c,0x7c]
+0x6a,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x8c,0x7c]
+0x6b,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x8c,0x7c]
+0x6c,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x8c,0x7c]
+0x6d,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x8c,0x7c]
+0x6e,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x8c,0x7c]
+0x6f,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x8c,0x7c]
+0x7b,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x8c,0x7c]
+0x7c,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x8c,0x7c]
+0x7e,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x8c,0x7c]
+0x7f,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x8c,0x7c]
+0x80,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x8c,0x7c]
+0xc1,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x8c,0x7c]
+0xf0,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x8c,0x7c]
+0xf7,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x8c,0x7c]
+0xfd,0x00,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x8c,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x8c,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ge_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x8c,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x8c,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ge_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x8c,0x7c]
+0x00,0x01,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x8c,0x7c]
+0xff,0x01,0x8c,0x7c
+
+# CHECK: v_cmp_ge_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x8d,0x7c]
+0x00,0xfe,0x8d,0x7c
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x46,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x46,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x46,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x46,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x46,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x46,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x46,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x46,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x46,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x46,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x46,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x46,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x46,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x46,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x46,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x46,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x46,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x46,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_ge_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x46,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x46,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x8e,0x7c]
+0x00,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x8e,0x7c]
+0x65,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x8e,0x7c]
+0x66,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x8e,0x7c]
+0x67,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x8e,0x7c]
+0x6a,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x8e,0x7c]
+0x6b,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x8e,0x7c]
+0x6c,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x8e,0x7c]
+0x6d,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x8e,0x7c]
+0x6e,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x8e,0x7c]
+0x6f,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x8e,0x7c]
+0x7b,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x8e,0x7c]
+0x7c,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x8e,0x7c]
+0x7e,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x8e,0x7c]
+0x7f,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x8e,0x7c]
+0x80,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x8e,0x7c]
+0xc1,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x8e,0x7c]
+0xf0,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x8e,0x7c]
+0xf7,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x8e,0x7c]
+0xfd,0x00,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x8e,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x8e,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_o_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x8e,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x8e,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_o_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x8e,0x7c]
+0x00,0x01,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x8e,0x7c]
+0xff,0x01,0x8e,0x7c
+
+# CHECK: v_cmp_o_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x8f,0x7c]
+0x00,0xfe,0x8f,0x7c
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x47,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x47,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x47,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x47,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x47,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x47,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x47,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x47,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x47,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x47,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x47,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x47,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x47,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x47,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x47,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x47,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x47,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x47,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_o_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x47,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x47,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x90,0x7c]
+0x00,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x90,0x7c]
+0x65,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x90,0x7c]
+0x66,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x90,0x7c]
+0x67,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x90,0x7c]
+0x6a,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x90,0x7c]
+0x6b,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x90,0x7c]
+0x6c,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x90,0x7c]
+0x6d,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x90,0x7c]
+0x6e,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x90,0x7c]
+0x6f,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x90,0x7c]
+0x7b,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x90,0x7c]
+0x7c,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x90,0x7c]
+0x7e,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x90,0x7c]
+0x7f,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x90,0x7c]
+0x80,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x90,0x7c]
+0xc1,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x90,0x7c]
+0xf0,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x90,0x7c]
+0xf7,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x90,0x7c]
+0xfd,0x00,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x90,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x90,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_u_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x90,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x90,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_u_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x90,0x7c]
+0x00,0x01,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x90,0x7c]
+0xff,0x01,0x90,0x7c
+
+# CHECK: v_cmp_u_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x91,0x7c]
+0x00,0xfe,0x91,0x7c
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x48,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x48,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x48,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x48,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x48,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x48,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x48,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x48,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x48,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x48,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x48,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x48,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x48,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x48,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x48,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x48,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x48,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x48,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_u_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x48,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x48,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x92,0x7c]
+0x00,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x92,0x7c]
+0x65,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x92,0x7c]
+0x66,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x92,0x7c]
+0x67,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x92,0x7c]
+0x6a,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x92,0x7c]
+0x6b,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x92,0x7c]
+0x6c,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x92,0x7c]
+0x6d,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x92,0x7c]
+0x6e,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x92,0x7c]
+0x6f,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x92,0x7c]
+0x7b,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x92,0x7c]
+0x7c,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x92,0x7c]
+0x7e,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x92,0x7c]
+0x7f,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x92,0x7c]
+0x80,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x92,0x7c]
+0xc1,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x92,0x7c]
+0xf0,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x92,0x7c]
+0xf7,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x92,0x7c]
+0xfd,0x00,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x92,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x92,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nge_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x92,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x92,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nge_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x92,0x7c]
+0x00,0x01,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x92,0x7c]
+0xff,0x01,0x92,0x7c
+
+# CHECK: v_cmp_nge_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x93,0x7c]
+0x00,0xfe,0x93,0x7c
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x49,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x49,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x49,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x49,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x49,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x49,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x49,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x49,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x49,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x49,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x49,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x49,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x49,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x49,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x49,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x49,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x49,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x49,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x49,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x49,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x49,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x49,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nge_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x49,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x49,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x94,0x7c]
+0x00,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x94,0x7c]
+0x65,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x94,0x7c]
+0x66,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x94,0x7c]
+0x67,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x94,0x7c]
+0x6a,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x94,0x7c]
+0x6b,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x94,0x7c]
+0x6c,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x94,0x7c]
+0x6d,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x94,0x7c]
+0x6e,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x94,0x7c]
+0x6f,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x94,0x7c]
+0x7b,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x94,0x7c]
+0x7c,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x94,0x7c]
+0x7e,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x94,0x7c]
+0x7f,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x94,0x7c]
+0x80,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x94,0x7c]
+0xc1,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x94,0x7c]
+0xf0,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x94,0x7c]
+0xf7,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x94,0x7c]
+0xfd,0x00,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x94,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x94,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x94,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x94,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x94,0x7c]
+0x00,0x01,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x94,0x7c]
+0xff,0x01,0x94,0x7c
+
+# CHECK: v_cmp_nlg_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x95,0x7c]
+0x00,0xfe,0x95,0x7c
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4a,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x4a,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x4a,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nlg_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x4a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x96,0x7c]
+0x00,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x96,0x7c]
+0x65,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x96,0x7c]
+0x66,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x96,0x7c]
+0x67,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x96,0x7c]
+0x6a,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x96,0x7c]
+0x6b,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x96,0x7c]
+0x6c,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x96,0x7c]
+0x6d,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x96,0x7c]
+0x6e,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x96,0x7c]
+0x6f,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x96,0x7c]
+0x7b,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x96,0x7c]
+0x7c,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x96,0x7c]
+0x7e,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x96,0x7c]
+0x7f,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x96,0x7c]
+0x80,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x96,0x7c]
+0xc1,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x96,0x7c]
+0xf0,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x96,0x7c]
+0xf7,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x96,0x7c]
+0xfd,0x00,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x96,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x96,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x96,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x96,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x96,0x7c]
+0x00,0x01,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x96,0x7c]
+0xff,0x01,0x96,0x7c
+
+# CHECK: v_cmp_ngt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x97,0x7c]
+0x00,0xfe,0x97,0x7c
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4b,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x4b,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x4b,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_ngt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x4b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x98,0x7c]
+0x00,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x98,0x7c]
+0x65,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x98,0x7c]
+0x66,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x98,0x7c]
+0x67,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x98,0x7c]
+0x6a,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x98,0x7c]
+0x6b,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x98,0x7c]
+0x6c,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x98,0x7c]
+0x6d,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x98,0x7c]
+0x6e,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x98,0x7c]
+0x6f,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x98,0x7c]
+0x7b,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x98,0x7c]
+0x7c,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x98,0x7c]
+0x7e,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x98,0x7c]
+0x7f,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x98,0x7c]
+0x80,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x98,0x7c]
+0xc1,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x98,0x7c]
+0xf0,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x98,0x7c]
+0xf7,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x98,0x7c]
+0xfd,0x00,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x98,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x98,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nle_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x98,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x98,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nle_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x98,0x7c]
+0x00,0x01,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x98,0x7c]
+0xff,0x01,0x98,0x7c
+
+# CHECK: v_cmp_nle_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x99,0x7c]
+0x00,0xfe,0x99,0x7c
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4c,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x4c,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x4c,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nle_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x4c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x9a,0x7c]
+0x00,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x9a,0x7c]
+0x65,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x9a,0x7c]
+0x66,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x9a,0x7c]
+0x67,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x9a,0x7c]
+0x6a,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x9a,0x7c]
+0x6b,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x9a,0x7c]
+0x6c,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x9a,0x7c]
+0x6d,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x9a,0x7c]
+0x6e,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x9a,0x7c]
+0x6f,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x9a,0x7c]
+0x7b,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x9a,0x7c]
+0x7c,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x9a,0x7c]
+0x7e,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x9a,0x7c]
+0x7f,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x9a,0x7c]
+0x80,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x9a,0x7c]
+0xc1,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x9a,0x7c]
+0xf0,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x9a,0x7c]
+0xf7,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x9a,0x7c]
+0xfd,0x00,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x9a,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x9a,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_neq_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x9a,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x9a,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_neq_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x9a,0x7c]
+0x00,0x01,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x9a,0x7c]
+0xff,0x01,0x9a,0x7c
+
+# CHECK: v_cmp_neq_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x9b,0x7c]
+0x00,0xfe,0x9b,0x7c
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4d,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x4d,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x4d,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_neq_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x4d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x9c,0x7c]
+0x00,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x9c,0x7c]
+0x65,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x9c,0x7c]
+0x66,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x9c,0x7c]
+0x67,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x9c,0x7c]
+0x6a,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x9c,0x7c]
+0x6b,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x9c,0x7c]
+0x6c,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x9c,0x7c]
+0x6d,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x9c,0x7c]
+0x6e,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x9c,0x7c]
+0x6f,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x9c,0x7c]
+0x7b,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x9c,0x7c]
+0x7c,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x9c,0x7c]
+0x7e,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x9c,0x7c]
+0x7f,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x9c,0x7c]
+0x80,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x9c,0x7c]
+0xc1,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x9c,0x7c]
+0xf0,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x9c,0x7c]
+0xf7,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x9c,0x7c]
+0xfd,0x00,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x9c,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x9c,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x9c,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x9c,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x9c,0x7c]
+0x00,0x01,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x9c,0x7c]
+0xff,0x01,0x9c,0x7c
+
+# CHECK: v_cmp_nlt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x9d,0x7c]
+0x00,0xfe,0x9d,0x7c
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4e,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x4e,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x4e,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nlt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x4e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x9e,0x7c]
+0x00,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x9e,0x7c]
+0x65,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x9e,0x7c]
+0x66,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x9e,0x7c]
+0x67,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x9e,0x7c]
+0x6a,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x9e,0x7c]
+0x6b,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x9e,0x7c]
+0x6c,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x9e,0x7c]
+0x6d,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x9e,0x7c]
+0x6e,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x9e,0x7c]
+0x6f,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x9e,0x7c]
+0x7b,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x9e,0x7c]
+0x7c,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x9e,0x7c]
+0x7e,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x9e,0x7c]
+0x7f,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x9e,0x7c]
+0x80,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x9e,0x7c]
+0xc1,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x9e,0x7c]
+0xf0,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x9e,0x7c]
+0xf7,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x9e,0x7c]
+0xfd,0x00,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x9e,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x9e,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_tru_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x9e,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x9e,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_tru_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x9e,0x7c]
+0x00,0x01,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x9e,0x7c]
+0xff,0x01,0x9e,0x7c
+
+# CHECK: v_cmp_tru_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x9f,0x7c]
+0x00,0xfe,0x9f,0x7c
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x4f,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x4f,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x4f,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_tru_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x4f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x4f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa0,0x7c]
+0x00,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa0,0x7c]
+0x65,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa0,0x7c]
+0x66,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa0,0x7c]
+0x67,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa0,0x7c]
+0x6a,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa0,0x7c]
+0x6b,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa0,0x7c]
+0x6c,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa0,0x7c]
+0x6d,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa0,0x7c]
+0x6e,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa0,0x7c]
+0x6f,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa0,0x7c]
+0x7b,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa0,0x7c]
+0x7c,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa0,0x7c]
+0x7e,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa0,0x7c]
+0x7f,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa0,0x7c]
+0x80,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa0,0x7c]
+0xc1,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa0,0x7c]
+0xf0,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa0,0x7c]
+0xf7,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa0,0x7c]
+0xfd,0x00,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa0,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa0,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_f_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa0,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa0,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_f_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa0,0x7c]
+0x00,0x01,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa0,0x7c]
+0xff,0x01,0xa0,0x7c
+
+# CHECK: v_cmpx_f_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa1,0x7c]
+0x00,0xfe,0xa1,0x7c
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x50,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x50,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x50,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x50,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x50,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x50,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x50,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x50,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x50,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x50,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x50,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x50,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x50,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x50,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x50,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x50,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x50,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_f_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x50,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x50,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa2,0x7c]
+0x00,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa2,0x7c]
+0x65,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa2,0x7c]
+0x66,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa2,0x7c]
+0x67,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa2,0x7c]
+0x6a,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa2,0x7c]
+0x6b,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa2,0x7c]
+0x6c,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa2,0x7c]
+0x6d,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa2,0x7c]
+0x6e,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa2,0x7c]
+0x6f,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa2,0x7c]
+0x7b,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa2,0x7c]
+0x7c,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa2,0x7c]
+0x7e,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa2,0x7c]
+0x7f,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa2,0x7c]
+0x80,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa2,0x7c]
+0xc1,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa2,0x7c]
+0xf0,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa2,0x7c]
+0xf7,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa2,0x7c]
+0xfd,0x00,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa2,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa2,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa2,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa2,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa2,0x7c]
+0x00,0x01,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa2,0x7c]
+0xff,0x01,0xa2,0x7c
+
+# CHECK: v_cmpx_lt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa3,0x7c]
+0x00,0xfe,0xa3,0x7c
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x51,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x51,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x51,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x51,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x51,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x51,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x51,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x51,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x51,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x51,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x51,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x51,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x51,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x51,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x51,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x51,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x51,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x51,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_lt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x51,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x51,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa4,0x7c]
+0x00,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa4,0x7c]
+0x65,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa4,0x7c]
+0x66,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa4,0x7c]
+0x67,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa4,0x7c]
+0x6a,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa4,0x7c]
+0x6b,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa4,0x7c]
+0x6c,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa4,0x7c]
+0x6d,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa4,0x7c]
+0x6e,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa4,0x7c]
+0x6f,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa4,0x7c]
+0x7b,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa4,0x7c]
+0x7c,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa4,0x7c]
+0x7e,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa4,0x7c]
+0x7f,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa4,0x7c]
+0x80,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa4,0x7c]
+0xc1,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa4,0x7c]
+0xf0,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa4,0x7c]
+0xf7,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa4,0x7c]
+0xfd,0x00,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa4,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa4,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa4,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa4,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa4,0x7c]
+0x00,0x01,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa4,0x7c]
+0xff,0x01,0xa4,0x7c
+
+# CHECK: v_cmpx_eq_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa5,0x7c]
+0x00,0xfe,0xa5,0x7c
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x52,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x52,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x52,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x52,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x52,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x52,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x52,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x52,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x52,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x52,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x52,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x52,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x52,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x52,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x52,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x52,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x52,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x52,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_eq_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x52,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x52,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa6,0x7c]
+0x00,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa6,0x7c]
+0x65,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa6,0x7c]
+0x66,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa6,0x7c]
+0x67,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa6,0x7c]
+0x6a,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa6,0x7c]
+0x6b,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa6,0x7c]
+0x6c,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa6,0x7c]
+0x6d,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa6,0x7c]
+0x6e,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa6,0x7c]
+0x6f,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa6,0x7c]
+0x7b,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa6,0x7c]
+0x7c,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa6,0x7c]
+0x7e,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa6,0x7c]
+0x7f,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa6,0x7c]
+0x80,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa6,0x7c]
+0xc1,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa6,0x7c]
+0xf0,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa6,0x7c]
+0xf7,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa6,0x7c]
+0xfd,0x00,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa6,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa6,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_le_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa6,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa6,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_le_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa6,0x7c]
+0x00,0x01,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa6,0x7c]
+0xff,0x01,0xa6,0x7c
+
+# CHECK: v_cmpx_le_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa7,0x7c]
+0x00,0xfe,0xa7,0x7c
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x53,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x53,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x53,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x53,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x53,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x53,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x53,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x53,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x53,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x53,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x53,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x53,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x53,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x53,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x53,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x53,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x53,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x53,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_le_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x53,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x53,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa8,0x7c]
+0x00,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa8,0x7c]
+0x65,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa8,0x7c]
+0x66,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa8,0x7c]
+0x67,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa8,0x7c]
+0x6a,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa8,0x7c]
+0x6b,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa8,0x7c]
+0x6c,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa8,0x7c]
+0x6d,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa8,0x7c]
+0x6e,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa8,0x7c]
+0x6f,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa8,0x7c]
+0x7b,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa8,0x7c]
+0x7c,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa8,0x7c]
+0x7e,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa8,0x7c]
+0x7f,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa8,0x7c]
+0x80,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa8,0x7c]
+0xc1,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa8,0x7c]
+0xf0,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa8,0x7c]
+0xf7,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa8,0x7c]
+0xfd,0x00,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa8,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa8,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa8,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa8,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa8,0x7c]
+0x00,0x01,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa8,0x7c]
+0xff,0x01,0xa8,0x7c
+
+# CHECK: v_cmpx_gt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa9,0x7c]
+0x00,0xfe,0xa9,0x7c
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x54,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x54,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x54,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x54,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x54,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x54,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x54,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x54,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x54,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x54,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x54,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x54,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x54,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x54,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x54,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x54,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x54,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x54,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_gt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x54,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x54,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xaa,0x7c]
+0x00,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xaa,0x7c]
+0x65,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xaa,0x7c]
+0x66,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xaa,0x7c]
+0x67,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xaa,0x7c]
+0x6a,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xaa,0x7c]
+0x6b,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xaa,0x7c]
+0x6c,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xaa,0x7c]
+0x6d,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xaa,0x7c]
+0x6e,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xaa,0x7c]
+0x6f,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xaa,0x7c]
+0x7b,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xaa,0x7c]
+0x7c,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xaa,0x7c]
+0x7e,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xaa,0x7c]
+0x7f,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xaa,0x7c]
+0x80,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xaa,0x7c]
+0xc1,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xaa,0x7c]
+0xf0,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xaa,0x7c]
+0xf7,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xaa,0x7c]
+0xfd,0x00,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xaa,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xaa,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xaa,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xaa,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xaa,0x7c]
+0x00,0x01,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xaa,0x7c]
+0xff,0x01,0xaa,0x7c
+
+# CHECK: v_cmpx_lg_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xab,0x7c]
+0x00,0xfe,0xab,0x7c
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x55,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x55,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x55,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x55,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x55,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x55,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x55,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x55,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x55,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x55,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x55,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x55,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x55,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x55,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x55,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x55,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x55,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x55,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_lg_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x55,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x55,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xac,0x7c]
+0x00,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xac,0x7c]
+0x65,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xac,0x7c]
+0x66,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xac,0x7c]
+0x67,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xac,0x7c]
+0x6a,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xac,0x7c]
+0x6b,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xac,0x7c]
+0x6c,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xac,0x7c]
+0x6d,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xac,0x7c]
+0x6e,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xac,0x7c]
+0x6f,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xac,0x7c]
+0x7b,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xac,0x7c]
+0x7c,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xac,0x7c]
+0x7e,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xac,0x7c]
+0x7f,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xac,0x7c]
+0x80,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xac,0x7c]
+0xc1,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xac,0x7c]
+0xf0,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xac,0x7c]
+0xf7,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xac,0x7c]
+0xfd,0x00,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xac,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xac,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xac,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xac,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xac,0x7c]
+0x00,0x01,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xac,0x7c]
+0xff,0x01,0xac,0x7c
+
+# CHECK: v_cmpx_ge_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xad,0x7c]
+0x00,0xfe,0xad,0x7c
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x56,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x56,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x56,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x56,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x56,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x56,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x56,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x56,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x56,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x56,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x56,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x56,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x56,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x56,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x56,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x56,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x56,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x56,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_ge_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x56,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x56,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xae,0x7c]
+0x00,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xae,0x7c]
+0x65,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xae,0x7c]
+0x66,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xae,0x7c]
+0x67,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xae,0x7c]
+0x6a,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xae,0x7c]
+0x6b,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xae,0x7c]
+0x6c,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xae,0x7c]
+0x6d,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xae,0x7c]
+0x6e,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xae,0x7c]
+0x6f,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xae,0x7c]
+0x7b,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xae,0x7c]
+0x7c,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xae,0x7c]
+0x7e,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xae,0x7c]
+0x7f,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xae,0x7c]
+0x80,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xae,0x7c]
+0xc1,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xae,0x7c]
+0xf0,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xae,0x7c]
+0xf7,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xae,0x7c]
+0xfd,0x00,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xae,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xae,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_o_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xae,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xae,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_o_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xae,0x7c]
+0x00,0x01,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xae,0x7c]
+0xff,0x01,0xae,0x7c
+
+# CHECK: v_cmpx_o_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xaf,0x7c]
+0x00,0xfe,0xaf,0x7c
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x57,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x57,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x57,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x57,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x57,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x57,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x57,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x57,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x57,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x57,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x57,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x57,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x57,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x57,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x57,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x57,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x57,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x57,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_o_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x57,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x57,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb0,0x7c]
+0x00,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb0,0x7c]
+0x65,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb0,0x7c]
+0x66,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb0,0x7c]
+0x67,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb0,0x7c]
+0x6a,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb0,0x7c]
+0x6b,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb0,0x7c]
+0x6c,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb0,0x7c]
+0x6d,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb0,0x7c]
+0x6e,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb0,0x7c]
+0x6f,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb0,0x7c]
+0x7b,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb0,0x7c]
+0x7c,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb0,0x7c]
+0x7e,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb0,0x7c]
+0x7f,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb0,0x7c]
+0x80,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb0,0x7c]
+0xc1,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb0,0x7c]
+0xf0,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb0,0x7c]
+0xf7,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb0,0x7c]
+0xfd,0x00,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb0,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb0,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_u_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb0,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb0,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_u_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb0,0x7c]
+0x00,0x01,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb0,0x7c]
+0xff,0x01,0xb0,0x7c
+
+# CHECK: v_cmpx_u_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb1,0x7c]
+0x00,0xfe,0xb1,0x7c
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x58,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x58,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x58,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x58,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x58,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x58,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x58,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x58,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x58,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x58,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x58,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x58,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x58,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x58,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x58,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x58,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x58,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x58,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_u_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x58,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x58,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb2,0x7c]
+0x00,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb2,0x7c]
+0x65,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb2,0x7c]
+0x66,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb2,0x7c]
+0x67,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb2,0x7c]
+0x6a,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb2,0x7c]
+0x6b,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb2,0x7c]
+0x6c,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb2,0x7c]
+0x6d,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb2,0x7c]
+0x6e,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb2,0x7c]
+0x6f,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb2,0x7c]
+0x7b,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb2,0x7c]
+0x7c,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb2,0x7c]
+0x7e,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb2,0x7c]
+0x7f,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb2,0x7c]
+0x80,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb2,0x7c]
+0xc1,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb2,0x7c]
+0xf0,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb2,0x7c]
+0xf7,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb2,0x7c]
+0xfd,0x00,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb2,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb2,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb2,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb2,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb2,0x7c]
+0x00,0x01,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb2,0x7c]
+0xff,0x01,0xb2,0x7c
+
+# CHECK: v_cmpx_nge_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb3,0x7c]
+0x00,0xfe,0xb3,0x7c
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x59,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x59,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x59,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x59,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x59,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x59,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x59,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x59,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x59,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x59,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x59,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x59,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x59,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x59,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x59,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x59,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x59,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x59,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nge_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x59,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x59,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb4,0x7c]
+0x00,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb4,0x7c]
+0x65,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb4,0x7c]
+0x66,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb4,0x7c]
+0x67,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb4,0x7c]
+0x6a,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb4,0x7c]
+0x6b,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb4,0x7c]
+0x6c,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb4,0x7c]
+0x6d,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb4,0x7c]
+0x6e,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb4,0x7c]
+0x6f,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb4,0x7c]
+0x7b,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb4,0x7c]
+0x7c,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb4,0x7c]
+0x7e,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb4,0x7c]
+0x7f,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb4,0x7c]
+0x80,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb4,0x7c]
+0xc1,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb4,0x7c]
+0xf0,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb4,0x7c]
+0xf7,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb4,0x7c]
+0xfd,0x00,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb4,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb4,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb4,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb4,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb4,0x7c]
+0x00,0x01,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb4,0x7c]
+0xff,0x01,0xb4,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb5,0x7c]
+0x00,0xfe,0xb5,0x7c
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5a,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x5a,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x5a,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nlg_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x5a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb6,0x7c]
+0x00,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb6,0x7c]
+0x65,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb6,0x7c]
+0x66,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb6,0x7c]
+0x67,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb6,0x7c]
+0x6a,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb6,0x7c]
+0x6b,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb6,0x7c]
+0x6c,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb6,0x7c]
+0x6d,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb6,0x7c]
+0x6e,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb6,0x7c]
+0x6f,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb6,0x7c]
+0x7b,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb6,0x7c]
+0x7c,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb6,0x7c]
+0x7e,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb6,0x7c]
+0x7f,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb6,0x7c]
+0x80,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb6,0x7c]
+0xc1,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb6,0x7c]
+0xf0,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb6,0x7c]
+0xf7,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb6,0x7c]
+0xfd,0x00,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb6,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb6,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb6,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb6,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb6,0x7c]
+0x00,0x01,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb6,0x7c]
+0xff,0x01,0xb6,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb7,0x7c]
+0x00,0xfe,0xb7,0x7c
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5b,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x5b,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x5b,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_ngt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x5b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb8,0x7c]
+0x00,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb8,0x7c]
+0x65,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb8,0x7c]
+0x66,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb8,0x7c]
+0x67,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb8,0x7c]
+0x6a,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb8,0x7c]
+0x6b,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb8,0x7c]
+0x6c,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb8,0x7c]
+0x6d,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb8,0x7c]
+0x6e,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb8,0x7c]
+0x6f,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb8,0x7c]
+0x7b,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb8,0x7c]
+0x7c,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb8,0x7c]
+0x7e,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb8,0x7c]
+0x7f,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb8,0x7c]
+0x80,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb8,0x7c]
+0xc1,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb8,0x7c]
+0xf0,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb8,0x7c]
+0xf7,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb8,0x7c]
+0xfd,0x00,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb8,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb8,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb8,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb8,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb8,0x7c]
+0x00,0x01,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb8,0x7c]
+0xff,0x01,0xb8,0x7c
+
+# CHECK: v_cmpx_nle_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb9,0x7c]
+0x00,0xfe,0xb9,0x7c
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5c,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x5c,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x5c,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nle_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x5c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xba,0x7c]
+0x00,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xba,0x7c]
+0x65,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xba,0x7c]
+0x66,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xba,0x7c]
+0x67,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xba,0x7c]
+0x6a,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xba,0x7c]
+0x6b,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xba,0x7c]
+0x6c,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xba,0x7c]
+0x6d,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xba,0x7c]
+0x6e,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xba,0x7c]
+0x6f,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xba,0x7c]
+0x7b,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xba,0x7c]
+0x7c,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xba,0x7c]
+0x7e,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xba,0x7c]
+0x7f,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xba,0x7c]
+0x80,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xba,0x7c]
+0xc1,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xba,0x7c]
+0xf0,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xba,0x7c]
+0xf7,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xba,0x7c]
+0xfd,0x00,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xba,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xba,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xba,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xba,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xba,0x7c]
+0x00,0x01,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xba,0x7c]
+0xff,0x01,0xba,0x7c
+
+# CHECK: v_cmpx_neq_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xbb,0x7c]
+0x00,0xfe,0xbb,0x7c
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5d,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x5d,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x5d,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_neq_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x5d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xbc,0x7c]
+0x00,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xbc,0x7c]
+0x65,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xbc,0x7c]
+0x66,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xbc,0x7c]
+0x67,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xbc,0x7c]
+0x6a,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xbc,0x7c]
+0x6b,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xbc,0x7c]
+0x6c,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xbc,0x7c]
+0x6d,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xbc,0x7c]
+0x6e,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xbc,0x7c]
+0x6f,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xbc,0x7c]
+0x7b,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xbc,0x7c]
+0x7c,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xbc,0x7c]
+0x7e,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xbc,0x7c]
+0x7f,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xbc,0x7c]
+0x80,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xbc,0x7c]
+0xc1,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xbc,0x7c]
+0xf0,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xbc,0x7c]
+0xf7,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xbc,0x7c]
+0xfd,0x00,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xbc,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xbc,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xbc,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xbc,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xbc,0x7c]
+0x00,0x01,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xbc,0x7c]
+0xff,0x01,0xbc,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xbd,0x7c]
+0x00,0xfe,0xbd,0x7c
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5e,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x5e,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x5e,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nlt_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x5e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xbe,0x7c]
+0x00,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xbe,0x7c]
+0x65,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xbe,0x7c]
+0x66,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xbe,0x7c]
+0x67,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xbe,0x7c]
+0x6a,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xbe,0x7c]
+0x6b,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xbe,0x7c]
+0x6c,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xbe,0x7c]
+0x6d,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xbe,0x7c]
+0x6e,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xbe,0x7c]
+0x6f,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xbe,0x7c]
+0x7b,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xbe,0x7c]
+0x7c,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xbe,0x7c]
+0x7e,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xbe,0x7c]
+0x7f,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xbe,0x7c]
+0x80,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xbe,0x7c]
+0xc1,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xbe,0x7c]
+0xf0,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xbe,0x7c]
+0xf7,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xbe,0x7c]
+0xfd,0x00,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xbe,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xbe,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xbe,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xbe,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xbe,0x7c]
+0x00,0x01,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xbe,0x7c]
+0xff,0x01,0xbe,0x7c
+
+# CHECK: v_cmpx_tru_f32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xbf,0x7c]
+0x00,0xfe,0xbf,0x7c
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0x5f,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0x5f,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], -s0, s0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, -s0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], -s0, -s0 ; encoding: [0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x5f,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_tru_f32_e64 s[0:1], s0, s0 clamp ; encoding: [0x00,0x80,0x5f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x5f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc0,0x7c]
+0x00,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc0,0x7c]
+0x02,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc0,0x7c]
+0x64,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc0,0x7c]
+0x66,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc0,0x7c]
+0x6a,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc0,0x7c]
+0x6c,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc0,0x7c]
+0x6e,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc0,0x7c]
+0x7a,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc0,0x7c]
+0x7e,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc0,0x7c]
+0x80,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc0,0x7c]
+0xc1,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc0,0x7c]
+0xf0,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc0,0x7c]
+0xf7,0x00,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc0,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc0,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_f_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc0,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc0,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_f_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc0,0x7c]
+0x00,0x01,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc0,0x7c]
+0xfe,0x01,0xc0,0x7c
+
+# CHECK: v_cmp_f_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc1,0x7c]
+0x00,0xfc,0xc1,0x7c
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x60,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x60,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x60,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x60,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x60,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x60,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x60,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x60,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x60,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x60,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x60,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x60,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_f_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x60,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x60,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc2,0x7c]
+0x00,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc2,0x7c]
+0x02,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc2,0x7c]
+0x64,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc2,0x7c]
+0x66,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc2,0x7c]
+0x6a,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc2,0x7c]
+0x6c,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc2,0x7c]
+0x6e,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc2,0x7c]
+0x7a,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc2,0x7c]
+0x7e,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc2,0x7c]
+0x80,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc2,0x7c]
+0xc1,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc2,0x7c]
+0xf0,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc2,0x7c]
+0xf7,0x00,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc2,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc2,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc2,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc2,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc2,0x7c]
+0x00,0x01,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc2,0x7c]
+0xfe,0x01,0xc2,0x7c
+
+# CHECK: v_cmp_lt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc3,0x7c]
+0x00,0xfc,0xc3,0x7c
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x61,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x61,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x61,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x61,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x61,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x61,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x61,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x61,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x61,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x61,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x61,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x61,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_lt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x61,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x61,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc4,0x7c]
+0x00,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc4,0x7c]
+0x02,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc4,0x7c]
+0x64,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc4,0x7c]
+0x66,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc4,0x7c]
+0x6a,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc4,0x7c]
+0x6c,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc4,0x7c]
+0x6e,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc4,0x7c]
+0x7a,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc4,0x7c]
+0x7e,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc4,0x7c]
+0x80,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc4,0x7c]
+0xc1,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc4,0x7c]
+0xf0,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc4,0x7c]
+0xf7,0x00,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc4,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc4,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_eq_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc4,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc4,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_eq_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc4,0x7c]
+0x00,0x01,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc4,0x7c]
+0xfe,0x01,0xc4,0x7c
+
+# CHECK: v_cmp_eq_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc5,0x7c]
+0x00,0xfc,0xc5,0x7c
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x62,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x62,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x62,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x62,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x62,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x62,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x62,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x62,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x62,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x62,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x62,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x62,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_eq_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x62,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x62,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc6,0x7c]
+0x00,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc6,0x7c]
+0x02,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc6,0x7c]
+0x64,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc6,0x7c]
+0x66,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc6,0x7c]
+0x6a,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc6,0x7c]
+0x6c,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc6,0x7c]
+0x6e,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc6,0x7c]
+0x7a,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc6,0x7c]
+0x7e,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc6,0x7c]
+0x80,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc6,0x7c]
+0xc1,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc6,0x7c]
+0xf0,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc6,0x7c]
+0xf7,0x00,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc6,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc6,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_le_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc6,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc6,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_le_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc6,0x7c]
+0x00,0x01,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc6,0x7c]
+0xfe,0x01,0xc6,0x7c
+
+# CHECK: v_cmp_le_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc7,0x7c]
+0x00,0xfc,0xc7,0x7c
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x63,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x63,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x63,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x63,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x63,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x63,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x63,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x63,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x63,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x63,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x63,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x63,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_le_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x63,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x63,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc8,0x7c]
+0x00,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc8,0x7c]
+0x02,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc8,0x7c]
+0x64,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc8,0x7c]
+0x66,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc8,0x7c]
+0x6a,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc8,0x7c]
+0x6c,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc8,0x7c]
+0x6e,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc8,0x7c]
+0x7a,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc8,0x7c]
+0x7e,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc8,0x7c]
+0x80,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc8,0x7c]
+0xc1,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc8,0x7c]
+0xf0,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc8,0x7c]
+0xf7,0x00,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc8,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc8,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_gt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc8,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc8,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_gt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc8,0x7c]
+0x00,0x01,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc8,0x7c]
+0xfe,0x01,0xc8,0x7c
+
+# CHECK: v_cmp_gt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc9,0x7c]
+0x00,0xfc,0xc9,0x7c
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x64,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x64,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x64,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x64,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x64,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x64,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x64,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x64,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x64,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x64,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x64,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x64,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_gt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x64,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x64,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xca,0x7c]
+0x00,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xca,0x7c]
+0x02,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xca,0x7c]
+0x64,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xca,0x7c]
+0x66,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xca,0x7c]
+0x6a,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xca,0x7c]
+0x6c,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xca,0x7c]
+0x6e,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xca,0x7c]
+0x7a,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xca,0x7c]
+0x7e,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xca,0x7c]
+0x80,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xca,0x7c]
+0xc1,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xca,0x7c]
+0xf0,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xca,0x7c]
+0xf7,0x00,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xca,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xca,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lg_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xca,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xca,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lg_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xca,0x7c]
+0x00,0x01,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xca,0x7c]
+0xfe,0x01,0xca,0x7c
+
+# CHECK: v_cmp_lg_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xcb,0x7c]
+0x00,0xfc,0xcb,0x7c
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x65,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x65,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x65,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x65,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x65,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x65,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x65,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x65,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x65,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x65,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x65,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x65,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_lg_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x65,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x65,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xcc,0x7c]
+0x00,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xcc,0x7c]
+0x02,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xcc,0x7c]
+0x64,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xcc,0x7c]
+0x66,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xcc,0x7c]
+0x6a,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xcc,0x7c]
+0x6c,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xcc,0x7c]
+0x6e,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xcc,0x7c]
+0x7a,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xcc,0x7c]
+0x7e,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xcc,0x7c]
+0x80,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xcc,0x7c]
+0xc1,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xcc,0x7c]
+0xf0,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xcc,0x7c]
+0xf7,0x00,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xcc,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xcc,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ge_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xcc,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xcc,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ge_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xcc,0x7c]
+0x00,0x01,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xcc,0x7c]
+0xfe,0x01,0xcc,0x7c
+
+# CHECK: v_cmp_ge_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xcd,0x7c]
+0x00,0xfc,0xcd,0x7c
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x66,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x66,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x66,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x66,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x66,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x66,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x66,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x66,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x66,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x66,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x66,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x66,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_ge_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x66,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x66,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xce,0x7c]
+0x00,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xce,0x7c]
+0x02,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xce,0x7c]
+0x64,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xce,0x7c]
+0x66,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xce,0x7c]
+0x6a,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xce,0x7c]
+0x6c,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xce,0x7c]
+0x6e,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xce,0x7c]
+0x7a,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xce,0x7c]
+0x7e,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xce,0x7c]
+0x80,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xce,0x7c]
+0xc1,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xce,0x7c]
+0xf0,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xce,0x7c]
+0xf7,0x00,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xce,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xce,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_o_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xce,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xce,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_o_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xce,0x7c]
+0x00,0x01,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xce,0x7c]
+0xfe,0x01,0xce,0x7c
+
+# CHECK: v_cmp_o_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xcf,0x7c]
+0x00,0xfc,0xcf,0x7c
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x67,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x67,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x67,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x67,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x67,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x67,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x67,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x67,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x67,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x67,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x67,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x67,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_o_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x67,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x67,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd0,0x7c]
+0x00,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd0,0x7c]
+0x02,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd0,0x7c]
+0x64,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd0,0x7c]
+0x66,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd0,0x7c]
+0x6a,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd0,0x7c]
+0x6c,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd0,0x7c]
+0x6e,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd0,0x7c]
+0x7a,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd0,0x7c]
+0x7e,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd0,0x7c]
+0x80,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd0,0x7c]
+0xc1,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd0,0x7c]
+0xf0,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd0,0x7c]
+0xf7,0x00,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd0,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd0,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_u_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd0,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd0,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd0,0x7c]
+0x00,0x01,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd0,0x7c]
+0xfe,0x01,0xd0,0x7c
+
+# CHECK: v_cmp_u_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd1,0x7c]
+0x00,0xfc,0xd1,0x7c
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x68,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x68,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x68,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x68,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x68,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x68,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x68,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x68,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x68,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x68,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x68,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x68,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_u_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x68,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x68,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd2,0x7c]
+0x00,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd2,0x7c]
+0x02,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd2,0x7c]
+0x64,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd2,0x7c]
+0x66,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd2,0x7c]
+0x6a,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd2,0x7c]
+0x6c,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd2,0x7c]
+0x6e,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd2,0x7c]
+0x7a,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd2,0x7c]
+0x7e,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd2,0x7c]
+0x80,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd2,0x7c]
+0xc1,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd2,0x7c]
+0xf0,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd2,0x7c]
+0xf7,0x00,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd2,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd2,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nge_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd2,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd2,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nge_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd2,0x7c]
+0x00,0x01,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd2,0x7c]
+0xfe,0x01,0xd2,0x7c
+
+# CHECK: v_cmp_nge_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd3,0x7c]
+0x00,0xfc,0xd3,0x7c
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x69,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x69,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x69,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x69,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x69,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x69,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x69,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x69,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x69,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x69,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x69,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x69,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nge_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x69,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x69,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd4,0x7c]
+0x00,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd4,0x7c]
+0x02,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd4,0x7c]
+0x64,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd4,0x7c]
+0x66,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd4,0x7c]
+0x6a,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd4,0x7c]
+0x6c,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd4,0x7c]
+0x6e,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd4,0x7c]
+0x7a,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd4,0x7c]
+0x7e,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd4,0x7c]
+0x80,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd4,0x7c]
+0xc1,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd4,0x7c]
+0xf0,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd4,0x7c]
+0xf7,0x00,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd4,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd4,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd4,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd4,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd4,0x7c]
+0x00,0x01,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd4,0x7c]
+0xfe,0x01,0xd4,0x7c
+
+# CHECK: v_cmp_nlg_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd5,0x7c]
+0x00,0xfc,0xd5,0x7c
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x6a,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x6a,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x6a,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nlg_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x6a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd6,0x7c]
+0x00,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd6,0x7c]
+0x02,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd6,0x7c]
+0x64,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd6,0x7c]
+0x66,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd6,0x7c]
+0x6a,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd6,0x7c]
+0x6c,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd6,0x7c]
+0x6e,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd6,0x7c]
+0x7a,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd6,0x7c]
+0x7e,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd6,0x7c]
+0x80,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd6,0x7c]
+0xc1,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd6,0x7c]
+0xf0,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd6,0x7c]
+0xf7,0x00,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd6,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd6,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd6,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd6,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd6,0x7c]
+0x00,0x01,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd6,0x7c]
+0xfe,0x01,0xd6,0x7c
+
+# CHECK: v_cmp_ngt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd7,0x7c]
+0x00,0xfc,0xd7,0x7c
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x6b,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x6b,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x6b,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_ngt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x6b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd8,0x7c]
+0x00,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd8,0x7c]
+0x02,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd8,0x7c]
+0x64,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd8,0x7c]
+0x66,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd8,0x7c]
+0x6a,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd8,0x7c]
+0x6c,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd8,0x7c]
+0x6e,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd8,0x7c]
+0x7a,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd8,0x7c]
+0x7e,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd8,0x7c]
+0x80,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd8,0x7c]
+0xc1,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd8,0x7c]
+0xf0,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd8,0x7c]
+0xf7,0x00,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd8,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd8,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nle_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd8,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd8,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nle_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd8,0x7c]
+0x00,0x01,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd8,0x7c]
+0xfe,0x01,0xd8,0x7c
+
+# CHECK: v_cmp_nle_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd9,0x7c]
+0x00,0xfc,0xd9,0x7c
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x6c,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x6c,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x6c,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nle_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x6c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xda,0x7c]
+0x00,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xda,0x7c]
+0x02,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xda,0x7c]
+0x64,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xda,0x7c]
+0x66,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xda,0x7c]
+0x6a,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xda,0x7c]
+0x6c,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xda,0x7c]
+0x6e,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xda,0x7c]
+0x7a,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xda,0x7c]
+0x7e,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xda,0x7c]
+0x80,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xda,0x7c]
+0xc1,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xda,0x7c]
+0xf0,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xda,0x7c]
+0xf7,0x00,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xda,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xda,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_neq_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xda,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xda,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_neq_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xda,0x7c]
+0x00,0x01,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xda,0x7c]
+0xfe,0x01,0xda,0x7c
+
+# CHECK: v_cmp_neq_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xdb,0x7c]
+0x00,0xfc,0xdb,0x7c
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x6d,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x6d,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x6d,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_neq_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x6d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xdc,0x7c]
+0x00,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xdc,0x7c]
+0x02,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xdc,0x7c]
+0x64,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xdc,0x7c]
+0x66,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xdc,0x7c]
+0x6a,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xdc,0x7c]
+0x6c,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xdc,0x7c]
+0x6e,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xdc,0x7c]
+0x7a,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xdc,0x7c]
+0x7e,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xdc,0x7c]
+0x80,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xdc,0x7c]
+0xc1,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xdc,0x7c]
+0xf0,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xdc,0x7c]
+0xf7,0x00,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xdc,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xdc,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xdc,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xdc,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xdc,0x7c]
+0x00,0x01,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xdc,0x7c]
+0xfe,0x01,0xdc,0x7c
+
+# CHECK: v_cmp_nlt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xdd,0x7c]
+0x00,0xfc,0xdd,0x7c
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x6e,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x6e,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x6e,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_nlt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x6e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xde,0x7c]
+0x00,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xde,0x7c]
+0x02,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xde,0x7c]
+0x64,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xde,0x7c]
+0x66,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xde,0x7c]
+0x6a,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xde,0x7c]
+0x6c,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xde,0x7c]
+0x6e,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xde,0x7c]
+0x7a,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xde,0x7c]
+0x7e,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xde,0x7c]
+0x80,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xde,0x7c]
+0xc1,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xde,0x7c]
+0xf0,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xde,0x7c]
+0xf7,0x00,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xde,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xde,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_tru_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xde,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xde,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_tru_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xde,0x7c]
+0x00,0x01,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xde,0x7c]
+0xfe,0x01,0xde,0x7c
+
+# CHECK: v_cmp_tru_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xdf,0x7c]
+0x00,0xfc,0xdf,0x7c
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x6f,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x6f,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x6f,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmp_tru_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x6f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x6f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe0,0x7c]
+0x00,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe0,0x7c]
+0x02,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe0,0x7c]
+0x64,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe0,0x7c]
+0x66,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe0,0x7c]
+0x6a,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe0,0x7c]
+0x6c,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe0,0x7c]
+0x6e,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe0,0x7c]
+0x7a,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe0,0x7c]
+0x7e,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe0,0x7c]
+0x80,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe0,0x7c]
+0xc1,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe0,0x7c]
+0xf0,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe0,0x7c]
+0xf7,0x00,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe0,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe0,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_f_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe0,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe0,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_f_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe0,0x7c]
+0x00,0x01,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe0,0x7c]
+0xfe,0x01,0xe0,0x7c
+
+# CHECK: v_cmpx_f_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe1,0x7c]
+0x00,0xfc,0xe1,0x7c
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x70,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x70,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x70,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x70,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x70,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x70,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x70,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x70,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x70,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x70,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x70,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x70,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_f_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x70,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x70,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe2,0x7c]
+0x00,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe2,0x7c]
+0x02,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe2,0x7c]
+0x64,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe2,0x7c]
+0x66,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe2,0x7c]
+0x6a,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe2,0x7c]
+0x6c,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe2,0x7c]
+0x6e,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe2,0x7c]
+0x7a,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe2,0x7c]
+0x7e,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe2,0x7c]
+0x80,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe2,0x7c]
+0xc1,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe2,0x7c]
+0xf0,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe2,0x7c]
+0xf7,0x00,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe2,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe2,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe2,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe2,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe2,0x7c]
+0x00,0x01,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe2,0x7c]
+0xfe,0x01,0xe2,0x7c
+
+# CHECK: v_cmpx_lt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe3,0x7c]
+0x00,0xfc,0xe3,0x7c
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x71,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x71,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x71,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x71,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x71,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x71,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x71,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x71,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x71,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x71,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x71,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x71,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_lt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x71,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x71,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe4,0x7c]
+0x00,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe4,0x7c]
+0x02,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe4,0x7c]
+0x64,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe4,0x7c]
+0x66,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe4,0x7c]
+0x6a,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe4,0x7c]
+0x6c,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe4,0x7c]
+0x6e,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe4,0x7c]
+0x7a,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe4,0x7c]
+0x7e,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe4,0x7c]
+0x80,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe4,0x7c]
+0xc1,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe4,0x7c]
+0xf0,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe4,0x7c]
+0xf7,0x00,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe4,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe4,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe4,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe4,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe4,0x7c]
+0x00,0x01,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe4,0x7c]
+0xfe,0x01,0xe4,0x7c
+
+# CHECK: v_cmpx_eq_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe5,0x7c]
+0x00,0xfc,0xe5,0x7c
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x72,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x72,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x72,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x72,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x72,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x72,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x72,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x72,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x72,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x72,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x72,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x72,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x72,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x72,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x72,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_eq_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x72,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x72,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe6,0x7c]
+0x00,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe6,0x7c]
+0x02,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe6,0x7c]
+0x64,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe6,0x7c]
+0x66,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe6,0x7c]
+0x6a,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe6,0x7c]
+0x6c,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe6,0x7c]
+0x6e,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe6,0x7c]
+0x7a,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe6,0x7c]
+0x7e,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe6,0x7c]
+0x80,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe6,0x7c]
+0xc1,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe6,0x7c]
+0xf0,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe6,0x7c]
+0xf7,0x00,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe6,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe6,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_le_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe6,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe6,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_le_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe6,0x7c]
+0x00,0x01,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe6,0x7c]
+0xfe,0x01,0xe6,0x7c
+
+# CHECK: v_cmpx_le_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe7,0x7c]
+0x00,0xfc,0xe7,0x7c
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x73,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x73,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x73,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x73,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x73,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x73,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x73,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x73,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x73,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x73,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x73,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x73,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_le_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x73,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x73,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe8,0x7c]
+0x00,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe8,0x7c]
+0x02,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe8,0x7c]
+0x64,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe8,0x7c]
+0x66,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe8,0x7c]
+0x6a,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe8,0x7c]
+0x6c,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe8,0x7c]
+0x6e,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe8,0x7c]
+0x7a,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe8,0x7c]
+0x7e,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe8,0x7c]
+0x80,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe8,0x7c]
+0xc1,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe8,0x7c]
+0xf0,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe8,0x7c]
+0xf7,0x00,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe8,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe8,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe8,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe8,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe8,0x7c]
+0x00,0x01,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe8,0x7c]
+0xfe,0x01,0xe8,0x7c
+
+# CHECK: v_cmpx_gt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe9,0x7c]
+0x00,0xfc,0xe9,0x7c
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x74,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x74,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x74,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x74,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x74,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x74,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x74,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x74,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x74,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x74,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x74,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x74,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_gt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x74,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x74,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xea,0x7c]
+0x00,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xea,0x7c]
+0x02,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xea,0x7c]
+0x64,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xea,0x7c]
+0x66,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xea,0x7c]
+0x6a,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xea,0x7c]
+0x6c,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xea,0x7c]
+0x6e,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xea,0x7c]
+0x7a,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xea,0x7c]
+0x7e,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xea,0x7c]
+0x80,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xea,0x7c]
+0xc1,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xea,0x7c]
+0xf0,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xea,0x7c]
+0xf7,0x00,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xea,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xea,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xea,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xea,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xea,0x7c]
+0x00,0x01,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xea,0x7c]
+0xfe,0x01,0xea,0x7c
+
+# CHECK: v_cmpx_lg_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xeb,0x7c]
+0x00,0xfc,0xeb,0x7c
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x75,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x75,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x75,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x75,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x75,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x75,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x75,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x75,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x75,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x75,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x75,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x75,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x75,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x75,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x75,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_lg_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x75,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x75,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xec,0x7c]
+0x00,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xec,0x7c]
+0x02,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xec,0x7c]
+0x64,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xec,0x7c]
+0x66,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xec,0x7c]
+0x6a,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xec,0x7c]
+0x6c,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xec,0x7c]
+0x6e,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xec,0x7c]
+0x7a,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xec,0x7c]
+0x7e,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xec,0x7c]
+0x80,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xec,0x7c]
+0xc1,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xec,0x7c]
+0xf0,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xec,0x7c]
+0xf7,0x00,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xec,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xec,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xec,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xec,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xec,0x7c]
+0x00,0x01,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xec,0x7c]
+0xfe,0x01,0xec,0x7c
+
+# CHECK: v_cmpx_ge_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xed,0x7c]
+0x00,0xfc,0xed,0x7c
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x76,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x76,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x76,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x76,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x76,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x76,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x76,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x76,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x76,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x76,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x76,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x76,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_ge_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x76,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x76,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xee,0x7c]
+0x00,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xee,0x7c]
+0x02,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xee,0x7c]
+0x64,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xee,0x7c]
+0x66,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xee,0x7c]
+0x6a,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xee,0x7c]
+0x6c,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xee,0x7c]
+0x6e,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xee,0x7c]
+0x7a,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xee,0x7c]
+0x7e,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xee,0x7c]
+0x80,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xee,0x7c]
+0xc1,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xee,0x7c]
+0xf0,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xee,0x7c]
+0xf7,0x00,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xee,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xee,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_o_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xee,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xee,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_o_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xee,0x7c]
+0x00,0x01,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xee,0x7c]
+0xfe,0x01,0xee,0x7c
+
+# CHECK: v_cmpx_o_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xef,0x7c]
+0x00,0xfc,0xef,0x7c
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x77,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x77,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x77,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x77,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x77,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x77,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x77,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x77,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x77,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x77,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x77,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x77,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x77,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x77,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x77,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_o_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x77,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x77,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf0,0x7c]
+0x00,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf0,0x7c]
+0x02,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf0,0x7c]
+0x64,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf0,0x7c]
+0x66,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf0,0x7c]
+0x6a,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf0,0x7c]
+0x6c,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf0,0x7c]
+0x6e,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf0,0x7c]
+0x7a,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf0,0x7c]
+0x7e,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf0,0x7c]
+0x80,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf0,0x7c]
+0xc1,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf0,0x7c]
+0xf0,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf0,0x7c]
+0xf7,0x00,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf0,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf0,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_u_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf0,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf0,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_u_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf0,0x7c]
+0x00,0x01,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf0,0x7c]
+0xfe,0x01,0xf0,0x7c
+
+# CHECK: v_cmpx_u_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf1,0x7c]
+0x00,0xfc,0xf1,0x7c
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x78,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x78,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x78,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x78,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x78,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x78,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x78,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x78,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x78,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x78,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x78,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x78,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x78,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x78,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x78,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_u_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x78,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x78,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf2,0x7c]
+0x00,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf2,0x7c]
+0x02,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf2,0x7c]
+0x64,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf2,0x7c]
+0x66,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf2,0x7c]
+0x6a,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf2,0x7c]
+0x6c,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf2,0x7c]
+0x6e,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf2,0x7c]
+0x7a,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf2,0x7c]
+0x7e,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf2,0x7c]
+0x80,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf2,0x7c]
+0xc1,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf2,0x7c]
+0xf0,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf2,0x7c]
+0xf7,0x00,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf2,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf2,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf2,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf2,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf2,0x7c]
+0x00,0x01,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf2,0x7c]
+0xfe,0x01,0xf2,0x7c
+
+# CHECK: v_cmpx_nge_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf3,0x7c]
+0x00,0xfc,0xf3,0x7c
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x79,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x79,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x79,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x79,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x79,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x79,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x79,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x79,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x79,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x79,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x79,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x79,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nge_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x79,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x79,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf4,0x7c]
+0x00,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf4,0x7c]
+0x02,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf4,0x7c]
+0x64,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf4,0x7c]
+0x66,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf4,0x7c]
+0x6a,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf4,0x7c]
+0x6c,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf4,0x7c]
+0x6e,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf4,0x7c]
+0x7a,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf4,0x7c]
+0x7e,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf4,0x7c]
+0x80,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf4,0x7c]
+0xc1,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf4,0x7c]
+0xf0,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf4,0x7c]
+0xf7,0x00,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf4,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf4,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf4,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf4,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf4,0x7c]
+0x00,0x01,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf4,0x7c]
+0xfe,0x01,0xf4,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf5,0x7c]
+0x00,0xfc,0xf5,0x7c
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x7a,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x7a,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x7a,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nlg_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x7a,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7a,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf6,0x7c]
+0x00,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf6,0x7c]
+0x02,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf6,0x7c]
+0x64,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf6,0x7c]
+0x66,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf6,0x7c]
+0x6a,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf6,0x7c]
+0x6c,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf6,0x7c]
+0x6e,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf6,0x7c]
+0x7a,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf6,0x7c]
+0x7e,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf6,0x7c]
+0x80,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf6,0x7c]
+0xc1,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf6,0x7c]
+0xf0,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf6,0x7c]
+0xf7,0x00,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf6,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf6,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf6,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf6,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf6,0x7c]
+0x00,0x01,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf6,0x7c]
+0xfe,0x01,0xf6,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf7,0x7c]
+0x00,0xfc,0xf7,0x7c
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x7b,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x7b,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x7b,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_ngt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x7b,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7b,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf8,0x7c]
+0x00,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf8,0x7c]
+0x02,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf8,0x7c]
+0x64,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf8,0x7c]
+0x66,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf8,0x7c]
+0x6a,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf8,0x7c]
+0x6c,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf8,0x7c]
+0x6e,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf8,0x7c]
+0x7a,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf8,0x7c]
+0x7e,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf8,0x7c]
+0x80,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf8,0x7c]
+0xc1,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf8,0x7c]
+0xf0,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf8,0x7c]
+0xf7,0x00,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf8,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf8,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf8,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf8,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf8,0x7c]
+0x00,0x01,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf8,0x7c]
+0xfe,0x01,0xf8,0x7c
+
+# CHECK: v_cmpx_nle_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf9,0x7c]
+0x00,0xfc,0xf9,0x7c
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x7c,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x7c,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x7c,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nle_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x7c,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7c,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfa,0x7c]
+0x00,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xfa,0x7c]
+0x02,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xfa,0x7c]
+0x64,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xfa,0x7c]
+0x66,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xfa,0x7c]
+0x6a,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xfa,0x7c]
+0x6c,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xfa,0x7c]
+0x6e,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xfa,0x7c]
+0x7a,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xfa,0x7c]
+0x7e,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xfa,0x7c]
+0x80,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xfa,0x7c]
+0xc1,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xfa,0x7c]
+0xf0,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xfa,0x7c]
+0xf7,0x00,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xfa,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xfa,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xfa,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xfa,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xfa,0x7c]
+0x00,0x01,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xfa,0x7c]
+0xfe,0x01,0xfa,0x7c
+
+# CHECK: v_cmpx_neq_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xfb,0x7c]
+0x00,0xfc,0xfb,0x7c
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x7d,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x7d,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x7d,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_neq_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x7d,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7d,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfc,0x7c]
+0x00,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xfc,0x7c]
+0x02,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xfc,0x7c]
+0x64,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xfc,0x7c]
+0x66,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xfc,0x7c]
+0x6a,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xfc,0x7c]
+0x6c,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xfc,0x7c]
+0x6e,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xfc,0x7c]
+0x7a,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xfc,0x7c]
+0x7e,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xfc,0x7c]
+0x80,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xfc,0x7c]
+0xc1,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xfc,0x7c]
+0xf0,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xfc,0x7c]
+0xf7,0x00,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xfc,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xfc,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xfc,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xfc,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xfc,0x7c]
+0x00,0x01,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xfc,0x7c]
+0xfe,0x01,0xfc,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xfd,0x7c]
+0x00,0xfc,0xfd,0x7c
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x7e,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x7e,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x7e,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_nlt_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x7e,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7e,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfe,0x7c]
+0x00,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xfe,0x7c]
+0x02,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xfe,0x7c]
+0x64,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xfe,0x7c]
+0x66,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xfe,0x7c]
+0x6a,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xfe,0x7c]
+0x6c,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xfe,0x7c]
+0x6e,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xfe,0x7c]
+0x7a,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xfe,0x7c]
+0x7e,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xfe,0x7c]
+0x80,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xfe,0x7c]
+0xc1,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xfe,0x7c]
+0xf0,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xfe,0x7c]
+0xf7,0x00,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xfe,0x7c,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xfe,0x7c,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xfe,0x7c,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xfe,0x7c,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xfe,0x7c]
+0x00,0x01,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xfe,0x7c]
+0xfe,0x01,0xfe,0x7c
+
+# CHECK: v_cmpx_tru_f64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xff,0x7c]
+0x00,0xfc,0xff,0x7c
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0x7f,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0x7f,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], -s[0:1], s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x20]
+0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x20
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x40]
+0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x40
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], -s[0:1], -s[0:1] ; encoding: [0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x60]
+0x00,0x00,0x7f,0xd0,0x00,0x00,0x00,0x60
+
+# CHECK: v_cmpx_tru_f64_e64 s[0:1], s[0:1], s[0:1] clamp ; encoding: [0x00,0x80,0x7f,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x80,0x7f,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x40,0x7d]
+0x00,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x40,0x7d]
+0x65,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x40,0x7d]
+0x66,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x40,0x7d]
+0x67,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x40,0x7d]
+0x6a,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x40,0x7d]
+0x6b,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x40,0x7d]
+0x6c,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x40,0x7d]
+0x6d,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x40,0x7d]
+0x6e,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x40,0x7d]
+0x6f,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x40,0x7d]
+0x7b,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x40,0x7d]
+0x7c,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x40,0x7d]
+0x7e,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x40,0x7d]
+0x7f,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x40,0x7d]
+0x80,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x40,0x7d]
+0xc1,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x40,0x7d]
+0xf0,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x40,0x7d]
+0xf7,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x40,0x7d]
+0xfd,0x00,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x40,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x40,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x40,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x40,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x40,0x7d]
+0x00,0x01,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x40,0x7d]
+0xff,0x01,0x40,0x7d
+
+# CHECK: v_cmp_f_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x41,0x7d]
+0x00,0xfe,0x41,0x7d
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa0,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa0,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa0,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa0,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lt_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x42,0x7d]
+0x00,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x42,0x7d]
+0x65,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x42,0x7d]
+0x66,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x42,0x7d]
+0x67,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x42,0x7d]
+0x6a,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x42,0x7d]
+0x6b,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x42,0x7d]
+0x6c,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x42,0x7d]
+0x6d,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x42,0x7d]
+0x6e,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x42,0x7d]
+0x6f,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x42,0x7d]
+0x7b,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x42,0x7d]
+0x7c,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x42,0x7d]
+0x7e,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x42,0x7d]
+0x7f,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x42,0x7d]
+0x80,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x42,0x7d]
+0xc1,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x42,0x7d]
+0xf0,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x42,0x7d]
+0xf7,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x42,0x7d]
+0xfd,0x00,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x42,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x42,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x42,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x42,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x42,0x7d]
+0x00,0x01,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x42,0x7d]
+0xff,0x01,0x42,0x7d
+
+# CHECK: v_cmp_lt_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x43,0x7d]
+0x00,0xfe,0x43,0x7d
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa1,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa1,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa1,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa1,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_eq_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x44,0x7d]
+0x00,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x44,0x7d]
+0x65,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x44,0x7d]
+0x66,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x44,0x7d]
+0x67,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x44,0x7d]
+0x6a,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x44,0x7d]
+0x6b,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x44,0x7d]
+0x6c,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x44,0x7d]
+0x6d,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x44,0x7d]
+0x6e,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x44,0x7d]
+0x6f,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x44,0x7d]
+0x7b,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x44,0x7d]
+0x7c,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x44,0x7d]
+0x7e,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x44,0x7d]
+0x7f,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x44,0x7d]
+0x80,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x44,0x7d]
+0xc1,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x44,0x7d]
+0xf0,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x44,0x7d]
+0xf7,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x44,0x7d]
+0xfd,0x00,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x44,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x44,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x44,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x44,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x44,0x7d]
+0x00,0x01,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x44,0x7d]
+0xff,0x01,0x44,0x7d
+
+# CHECK: v_cmp_eq_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x45,0x7d]
+0x00,0xfe,0x45,0x7d
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa2,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa2,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa2,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa2,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_le_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x46,0x7d]
+0x00,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x46,0x7d]
+0x65,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x46,0x7d]
+0x66,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x46,0x7d]
+0x67,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x46,0x7d]
+0x6a,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x46,0x7d]
+0x6b,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x46,0x7d]
+0x6c,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x46,0x7d]
+0x6d,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x46,0x7d]
+0x6e,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x46,0x7d]
+0x6f,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x46,0x7d]
+0x7b,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x46,0x7d]
+0x7c,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x46,0x7d]
+0x7e,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x46,0x7d]
+0x7f,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x46,0x7d]
+0x80,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x46,0x7d]
+0xc1,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x46,0x7d]
+0xf0,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x46,0x7d]
+0xf7,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x46,0x7d]
+0xfd,0x00,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x46,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x46,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x46,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x46,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x46,0x7d]
+0x00,0x01,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x46,0x7d]
+0xff,0x01,0x46,0x7d
+
+# CHECK: v_cmp_le_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x47,0x7d]
+0x00,0xfe,0x47,0x7d
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa3,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa3,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa3,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa3,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_gt_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x48,0x7d]
+0x00,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x48,0x7d]
+0x65,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x48,0x7d]
+0x66,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x48,0x7d]
+0x67,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x48,0x7d]
+0x6a,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x48,0x7d]
+0x6b,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x48,0x7d]
+0x6c,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x48,0x7d]
+0x6d,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x48,0x7d]
+0x6e,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x48,0x7d]
+0x6f,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x48,0x7d]
+0x7b,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x48,0x7d]
+0x7c,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x48,0x7d]
+0x7e,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x48,0x7d]
+0x7f,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x48,0x7d]
+0x80,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x48,0x7d]
+0xc1,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x48,0x7d]
+0xf0,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x48,0x7d]
+0xf7,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x48,0x7d]
+0xfd,0x00,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x48,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x48,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x48,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x48,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x48,0x7d]
+0x00,0x01,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x48,0x7d]
+0xff,0x01,0x48,0x7d
+
+# CHECK: v_cmp_gt_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x49,0x7d]
+0x00,0xfe,0x49,0x7d
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa4,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa4,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa4,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa4,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ne_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x4a,0x7d]
+0x00,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x4a,0x7d]
+0x65,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x4a,0x7d]
+0x66,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x4a,0x7d]
+0x67,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x4a,0x7d]
+0x6a,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x4a,0x7d]
+0x6b,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x4a,0x7d]
+0x6c,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x4a,0x7d]
+0x6d,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x4a,0x7d]
+0x6e,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x4a,0x7d]
+0x6f,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x4a,0x7d]
+0x7b,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x4a,0x7d]
+0x7c,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x4a,0x7d]
+0x7e,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x4a,0x7d]
+0x7f,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x4a,0x7d]
+0x80,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x4a,0x7d]
+0xc1,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x4a,0x7d]
+0xf0,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x4a,0x7d]
+0xf7,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x4a,0x7d]
+0xfd,0x00,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x4a,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x4a,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x4a,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x4a,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x4a,0x7d]
+0x00,0x01,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x4a,0x7d]
+0xff,0x01,0x4a,0x7d
+
+# CHECK: v_cmp_ne_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x4b,0x7d]
+0x00,0xfe,0x4b,0x7d
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa5,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa5,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ne_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa5,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa5,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ge_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x4c,0x7d]
+0x00,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x4c,0x7d]
+0x65,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x4c,0x7d]
+0x66,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x4c,0x7d]
+0x67,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x4c,0x7d]
+0x6a,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x4c,0x7d]
+0x6b,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x4c,0x7d]
+0x6c,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x4c,0x7d]
+0x6d,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x4c,0x7d]
+0x6e,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x4c,0x7d]
+0x6f,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x4c,0x7d]
+0x7b,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x4c,0x7d]
+0x7c,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x4c,0x7d]
+0x7e,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x4c,0x7d]
+0x7f,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x4c,0x7d]
+0x80,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x4c,0x7d]
+0xc1,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x4c,0x7d]
+0xf0,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x4c,0x7d]
+0xf7,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x4c,0x7d]
+0xfd,0x00,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x4c,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x4c,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x4c,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x4c,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x4c,0x7d]
+0x00,0x01,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x4c,0x7d]
+0xff,0x01,0x4c,0x7d
+
+# CHECK: v_cmp_ge_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x4d,0x7d]
+0x00,0xfe,0x4d,0x7d
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa6,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa6,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa6,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa6,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_t_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x4e,0x7d]
+0x00,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x4e,0x7d]
+0x65,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x4e,0x7d]
+0x66,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x4e,0x7d]
+0x67,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x4e,0x7d]
+0x6a,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x4e,0x7d]
+0x6b,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x4e,0x7d]
+0x6c,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x4e,0x7d]
+0x6d,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x4e,0x7d]
+0x6e,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x4e,0x7d]
+0x6f,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x4e,0x7d]
+0x7b,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x4e,0x7d]
+0x7c,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x4e,0x7d]
+0x7e,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x4e,0x7d]
+0x7f,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x4e,0x7d]
+0x80,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x4e,0x7d]
+0xc1,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x4e,0x7d]
+0xf0,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x4e,0x7d]
+0xf7,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x4e,0x7d]
+0xfd,0x00,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x4e,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x4e,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x4e,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x4e,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x4e,0x7d]
+0x00,0x01,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x4e,0x7d]
+0xff,0x01,0x4e,0x7d
+
+# CHECK: v_cmp_t_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x4f,0x7d]
+0x00,0xfe,0x4f,0x7d
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa7,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa7,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_t_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa7,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa7,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_f_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x50,0x7d]
+0x00,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x50,0x7d]
+0x65,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x50,0x7d]
+0x66,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x50,0x7d]
+0x67,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x50,0x7d]
+0x6a,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x50,0x7d]
+0x6b,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x50,0x7d]
+0x6c,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x50,0x7d]
+0x6d,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x50,0x7d]
+0x6e,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x50,0x7d]
+0x6f,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x50,0x7d]
+0x7b,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x50,0x7d]
+0x7c,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x50,0x7d]
+0x7e,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x50,0x7d]
+0x7f,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x50,0x7d]
+0x80,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x50,0x7d]
+0xc1,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x50,0x7d]
+0xf0,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x50,0x7d]
+0xf7,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x50,0x7d]
+0xfd,0x00,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x50,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x50,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x50,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x50,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x50,0x7d]
+0x00,0x01,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x50,0x7d]
+0xff,0x01,0x50,0x7d
+
+# CHECK: v_cmp_f_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x51,0x7d]
+0x00,0xfe,0x51,0x7d
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa8,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa8,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa8,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa8,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lt_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x52,0x7d]
+0x00,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x52,0x7d]
+0x65,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x52,0x7d]
+0x66,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x52,0x7d]
+0x67,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x52,0x7d]
+0x6a,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x52,0x7d]
+0x6b,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x52,0x7d]
+0x6c,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x52,0x7d]
+0x6d,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x52,0x7d]
+0x6e,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x52,0x7d]
+0x6f,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x52,0x7d]
+0x7b,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x52,0x7d]
+0x7c,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x52,0x7d]
+0x7e,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x52,0x7d]
+0x7f,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x52,0x7d]
+0x80,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x52,0x7d]
+0xc1,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x52,0x7d]
+0xf0,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x52,0x7d]
+0xf7,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x52,0x7d]
+0xfd,0x00,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x52,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x52,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x52,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x52,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x52,0x7d]
+0x00,0x01,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x52,0x7d]
+0xff,0x01,0x52,0x7d
+
+# CHECK: v_cmp_lt_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x53,0x7d]
+0x00,0xfe,0x53,0x7d
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xa9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xa9,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xa9,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xa9,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xa9,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_eq_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x54,0x7d]
+0x00,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x54,0x7d]
+0x65,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x54,0x7d]
+0x66,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x54,0x7d]
+0x67,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x54,0x7d]
+0x6a,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x54,0x7d]
+0x6b,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x54,0x7d]
+0x6c,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x54,0x7d]
+0x6d,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x54,0x7d]
+0x6e,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x54,0x7d]
+0x6f,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x54,0x7d]
+0x7b,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x54,0x7d]
+0x7c,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x54,0x7d]
+0x7e,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x54,0x7d]
+0x7f,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x54,0x7d]
+0x80,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x54,0x7d]
+0xc1,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x54,0x7d]
+0xf0,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x54,0x7d]
+0xf7,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x54,0x7d]
+0xfd,0x00,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x54,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x54,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x54,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x54,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x54,0x7d]
+0x00,0x01,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x54,0x7d]
+0xff,0x01,0x54,0x7d
+
+# CHECK: v_cmp_eq_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x55,0x7d]
+0x00,0xfe,0x55,0x7d
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xaa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xaa,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xaa,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xaa,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xaa,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_le_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x56,0x7d]
+0x00,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x56,0x7d]
+0x65,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x56,0x7d]
+0x66,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x56,0x7d]
+0x67,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x56,0x7d]
+0x6a,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x56,0x7d]
+0x6b,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x56,0x7d]
+0x6c,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x56,0x7d]
+0x6d,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x56,0x7d]
+0x6e,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x56,0x7d]
+0x6f,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x56,0x7d]
+0x7b,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x56,0x7d]
+0x7c,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x56,0x7d]
+0x7e,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x56,0x7d]
+0x7f,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x56,0x7d]
+0x80,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x56,0x7d]
+0xc1,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x56,0x7d]
+0xf0,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x56,0x7d]
+0xf7,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x56,0x7d]
+0xfd,0x00,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x56,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x56,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x56,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x56,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x56,0x7d]
+0x00,0x01,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x56,0x7d]
+0xff,0x01,0x56,0x7d
+
+# CHECK: v_cmp_le_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x57,0x7d]
+0x00,0xfe,0x57,0x7d
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xab,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xab,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xab,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xab,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xab,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xab,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xab,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xab,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xab,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xab,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xab,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xab,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xab,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xab,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xab,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xab,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xab,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xab,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xab,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xab,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xab,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xab,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xab,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xab,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_gt_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x58,0x7d]
+0x00,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x58,0x7d]
+0x65,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x58,0x7d]
+0x66,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x58,0x7d]
+0x67,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x58,0x7d]
+0x6a,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x58,0x7d]
+0x6b,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x58,0x7d]
+0x6c,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x58,0x7d]
+0x6d,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x58,0x7d]
+0x6e,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x58,0x7d]
+0x6f,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x58,0x7d]
+0x7b,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x58,0x7d]
+0x7c,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x58,0x7d]
+0x7e,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x58,0x7d]
+0x7f,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x58,0x7d]
+0x80,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x58,0x7d]
+0xc1,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x58,0x7d]
+0xf0,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x58,0x7d]
+0xf7,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x58,0x7d]
+0xfd,0x00,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x58,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x58,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x58,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x58,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x58,0x7d]
+0x00,0x01,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x58,0x7d]
+0xff,0x01,0x58,0x7d
+
+# CHECK: v_cmp_gt_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x59,0x7d]
+0x00,0xfe,0x59,0x7d
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xac,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xac,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xac,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xac,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xac,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xac,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xac,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xac,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xac,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xac,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xac,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xac,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xac,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xac,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xac,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xac,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xac,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xac,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xac,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xac,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xac,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xac,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xac,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xac,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ne_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x5a,0x7d]
+0x00,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x5a,0x7d]
+0x65,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x5a,0x7d]
+0x66,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x5a,0x7d]
+0x67,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x5a,0x7d]
+0x6a,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x5a,0x7d]
+0x6b,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x5a,0x7d]
+0x6c,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x5a,0x7d]
+0x6d,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x5a,0x7d]
+0x6e,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x5a,0x7d]
+0x6f,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x5a,0x7d]
+0x7b,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x5a,0x7d]
+0x7c,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x5a,0x7d]
+0x7e,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x5a,0x7d]
+0x7f,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x5a,0x7d]
+0x80,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x5a,0x7d]
+0xc1,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x5a,0x7d]
+0xf0,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x5a,0x7d]
+0xf7,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x5a,0x7d]
+0xfd,0x00,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x5a,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x5a,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x5a,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x5a,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x5a,0x7d]
+0x00,0x01,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x5a,0x7d]
+0xff,0x01,0x5a,0x7d
+
+# CHECK: v_cmp_ne_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x5b,0x7d]
+0x00,0xfe,0x5b,0x7d
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xad,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xad,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xad,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xad,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xad,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xad,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xad,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xad,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xad,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xad,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xad,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xad,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xad,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xad,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xad,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xad,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xad,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xad,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xad,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xad,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xad,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xad,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ne_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xad,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xad,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ge_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x5c,0x7d]
+0x00,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x5c,0x7d]
+0x65,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x5c,0x7d]
+0x66,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x5c,0x7d]
+0x67,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x5c,0x7d]
+0x6a,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x5c,0x7d]
+0x6b,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x5c,0x7d]
+0x6c,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x5c,0x7d]
+0x6d,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x5c,0x7d]
+0x6e,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x5c,0x7d]
+0x6f,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x5c,0x7d]
+0x7b,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x5c,0x7d]
+0x7c,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x5c,0x7d]
+0x7e,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x5c,0x7d]
+0x7f,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x5c,0x7d]
+0x80,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x5c,0x7d]
+0xc1,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x5c,0x7d]
+0xf0,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x5c,0x7d]
+0xf7,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x5c,0x7d]
+0xfd,0x00,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x5c,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x5c,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x5c,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x5c,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x5c,0x7d]
+0x00,0x01,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x5c,0x7d]
+0xff,0x01,0x5c,0x7d
+
+# CHECK: v_cmp_ge_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x5d,0x7d]
+0x00,0xfe,0x5d,0x7d
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xae,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xae,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xae,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xae,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xae,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xae,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xae,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xae,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xae,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xae,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xae,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xae,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xae,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xae,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xae,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xae,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xae,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xae,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xae,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xae,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xae,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xae,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xae,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xae,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_t_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x5e,0x7d]
+0x00,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x5e,0x7d]
+0x65,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x5e,0x7d]
+0x66,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x5e,0x7d]
+0x67,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x5e,0x7d]
+0x6a,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x5e,0x7d]
+0x6b,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x5e,0x7d]
+0x6c,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x5e,0x7d]
+0x6d,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x5e,0x7d]
+0x6e,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x5e,0x7d]
+0x6f,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x5e,0x7d]
+0x7b,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x5e,0x7d]
+0x7c,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x5e,0x7d]
+0x7e,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x5e,0x7d]
+0x7f,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x5e,0x7d]
+0x80,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x5e,0x7d]
+0xc1,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x5e,0x7d]
+0xf0,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x5e,0x7d]
+0xf7,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x5e,0x7d]
+0xfd,0x00,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x5e,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x5e,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x5e,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x5e,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x5e,0x7d]
+0x00,0x01,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x5e,0x7d]
+0xff,0x01,0x5e,0x7d
+
+# CHECK: v_cmp_t_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x5f,0x7d]
+0x00,0xfe,0x5f,0x7d
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xaf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xaf,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xaf,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_t_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xaf,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xaf,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_f_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x60,0x7d]
+0x00,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x60,0x7d]
+0x65,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x60,0x7d]
+0x66,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x60,0x7d]
+0x67,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x60,0x7d]
+0x6a,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x60,0x7d]
+0x6b,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x60,0x7d]
+0x6c,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x60,0x7d]
+0x6d,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x60,0x7d]
+0x6e,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x60,0x7d]
+0x6f,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x60,0x7d]
+0x7b,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x60,0x7d]
+0x7c,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x60,0x7d]
+0x7e,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x60,0x7d]
+0x7f,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x60,0x7d]
+0x80,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x60,0x7d]
+0xc1,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x60,0x7d]
+0xf0,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x60,0x7d]
+0xf7,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x60,0x7d]
+0xfd,0x00,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x60,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x60,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x60,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x60,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x60,0x7d]
+0x00,0x01,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x60,0x7d]
+0xff,0x01,0x60,0x7d
+
+# CHECK: v_cmpx_f_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x61,0x7d]
+0x00,0xfe,0x61,0x7d
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb0,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb0,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb0,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb0,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x62,0x7d]
+0x00,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x62,0x7d]
+0x65,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x62,0x7d]
+0x66,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x62,0x7d]
+0x67,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x62,0x7d]
+0x6a,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x62,0x7d]
+0x6b,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x62,0x7d]
+0x6c,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x62,0x7d]
+0x6d,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x62,0x7d]
+0x6e,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x62,0x7d]
+0x6f,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x62,0x7d]
+0x7b,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x62,0x7d]
+0x7c,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x62,0x7d]
+0x7e,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x62,0x7d]
+0x7f,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x62,0x7d]
+0x80,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x62,0x7d]
+0xc1,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x62,0x7d]
+0xf0,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x62,0x7d]
+0xf7,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x62,0x7d]
+0xfd,0x00,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x62,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x62,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x62,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x62,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x62,0x7d]
+0x00,0x01,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x62,0x7d]
+0xff,0x01,0x62,0x7d
+
+# CHECK: v_cmpx_lt_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x63,0x7d]
+0x00,0xfe,0x63,0x7d
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb1,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb1,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb1,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb1,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x64,0x7d]
+0x00,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x64,0x7d]
+0x65,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x64,0x7d]
+0x66,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x64,0x7d]
+0x67,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x64,0x7d]
+0x6a,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x64,0x7d]
+0x6b,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x64,0x7d]
+0x6c,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x64,0x7d]
+0x6d,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x64,0x7d]
+0x6e,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x64,0x7d]
+0x6f,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x64,0x7d]
+0x7b,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x64,0x7d]
+0x7c,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x64,0x7d]
+0x7e,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x64,0x7d]
+0x7f,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x64,0x7d]
+0x80,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x64,0x7d]
+0xc1,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x64,0x7d]
+0xf0,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x64,0x7d]
+0xf7,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x64,0x7d]
+0xfd,0x00,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x64,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x64,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x64,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x64,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x64,0x7d]
+0x00,0x01,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x64,0x7d]
+0xff,0x01,0x64,0x7d
+
+# CHECK: v_cmpx_eq_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x65,0x7d]
+0x00,0xfe,0x65,0x7d
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb2,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb2,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb2,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb2,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_le_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x66,0x7d]
+0x00,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x66,0x7d]
+0x65,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x66,0x7d]
+0x66,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x66,0x7d]
+0x67,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x66,0x7d]
+0x6a,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x66,0x7d]
+0x6b,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x66,0x7d]
+0x6c,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x66,0x7d]
+0x6d,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x66,0x7d]
+0x6e,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x66,0x7d]
+0x6f,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x66,0x7d]
+0x7b,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x66,0x7d]
+0x7c,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x66,0x7d]
+0x7e,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x66,0x7d]
+0x7f,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x66,0x7d]
+0x80,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x66,0x7d]
+0xc1,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x66,0x7d]
+0xf0,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x66,0x7d]
+0xf7,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x66,0x7d]
+0xfd,0x00,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x66,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x66,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x66,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x66,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x66,0x7d]
+0x00,0x01,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x66,0x7d]
+0xff,0x01,0x66,0x7d
+
+# CHECK: v_cmpx_le_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x67,0x7d]
+0x00,0xfe,0x67,0x7d
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb3,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb3,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb3,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb3,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x68,0x7d]
+0x00,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x68,0x7d]
+0x65,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x68,0x7d]
+0x66,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x68,0x7d]
+0x67,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x68,0x7d]
+0x6a,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x68,0x7d]
+0x6b,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x68,0x7d]
+0x6c,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x68,0x7d]
+0x6d,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x68,0x7d]
+0x6e,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x68,0x7d]
+0x6f,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x68,0x7d]
+0x7b,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x68,0x7d]
+0x7c,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x68,0x7d]
+0x7e,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x68,0x7d]
+0x7f,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x68,0x7d]
+0x80,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x68,0x7d]
+0xc1,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x68,0x7d]
+0xf0,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x68,0x7d]
+0xf7,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x68,0x7d]
+0xfd,0x00,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x68,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x68,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x68,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x68,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x68,0x7d]
+0x00,0x01,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x68,0x7d]
+0xff,0x01,0x68,0x7d
+
+# CHECK: v_cmpx_gt_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x69,0x7d]
+0x00,0xfe,0x69,0x7d
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb4,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb4,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb4,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb4,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x6a,0x7d]
+0x00,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x6a,0x7d]
+0x65,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x6a,0x7d]
+0x66,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x6a,0x7d]
+0x67,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x6a,0x7d]
+0x6a,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x6a,0x7d]
+0x6b,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x6a,0x7d]
+0x6c,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x6a,0x7d]
+0x6d,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x6a,0x7d]
+0x6e,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x6a,0x7d]
+0x6f,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x6a,0x7d]
+0x7b,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x6a,0x7d]
+0x7c,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x6a,0x7d]
+0x7e,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x6a,0x7d]
+0x7f,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x6a,0x7d]
+0x80,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x6a,0x7d]
+0xc1,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x6a,0x7d]
+0xf0,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x6a,0x7d]
+0xf7,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x6a,0x7d]
+0xfd,0x00,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x6a,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x6a,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x6a,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x6a,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x6a,0x7d]
+0x00,0x01,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x6a,0x7d]
+0xff,0x01,0x6a,0x7d
+
+# CHECK: v_cmpx_ne_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x6b,0x7d]
+0x00,0xfe,0x6b,0x7d
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb5,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb5,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ne_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb5,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb5,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x6c,0x7d]
+0x00,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x6c,0x7d]
+0x65,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x6c,0x7d]
+0x66,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x6c,0x7d]
+0x67,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x6c,0x7d]
+0x6a,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x6c,0x7d]
+0x6b,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x6c,0x7d]
+0x6c,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x6c,0x7d]
+0x6d,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x6c,0x7d]
+0x6e,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x6c,0x7d]
+0x6f,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x6c,0x7d]
+0x7b,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x6c,0x7d]
+0x7c,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x6c,0x7d]
+0x7e,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x6c,0x7d]
+0x7f,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x6c,0x7d]
+0x80,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x6c,0x7d]
+0xc1,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x6c,0x7d]
+0xf0,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x6c,0x7d]
+0xf7,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x6c,0x7d]
+0xfd,0x00,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x6c,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x6c,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x6c,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x6c,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x6c,0x7d]
+0x00,0x01,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x6c,0x7d]
+0xff,0x01,0x6c,0x7d
+
+# CHECK: v_cmpx_ge_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x6d,0x7d]
+0x00,0xfe,0x6d,0x7d
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb6,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb6,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb6,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb6,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_t_i16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x6e,0x7d]
+0x00,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x6e,0x7d]
+0x65,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x6e,0x7d]
+0x66,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x6e,0x7d]
+0x67,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x6e,0x7d]
+0x6a,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x6e,0x7d]
+0x6b,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x6e,0x7d]
+0x6c,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x6e,0x7d]
+0x6d,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x6e,0x7d]
+0x6e,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x6e,0x7d]
+0x6f,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x6e,0x7d]
+0x7b,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x6e,0x7d]
+0x7c,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x6e,0x7d]
+0x7e,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x6e,0x7d]
+0x7f,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x6e,0x7d]
+0x80,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x6e,0x7d]
+0xc1,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x6e,0x7d]
+0xf0,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x6e,0x7d]
+0xf7,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x6e,0x7d]
+0xfd,0x00,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x6e,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x6e,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x6e,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x6e,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x6e,0x7d]
+0x00,0x01,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x6e,0x7d]
+0xff,0x01,0x6e,0x7d
+
+# CHECK: v_cmpx_t_i16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x6f,0x7d]
+0x00,0xfe,0x6f,0x7d
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb7,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb7,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_t_i16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb7,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb7,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_f_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x70,0x7d]
+0x00,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x70,0x7d]
+0x65,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x70,0x7d]
+0x66,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x70,0x7d]
+0x67,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x70,0x7d]
+0x6a,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x70,0x7d]
+0x6b,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x70,0x7d]
+0x6c,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x70,0x7d]
+0x6d,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x70,0x7d]
+0x6e,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x70,0x7d]
+0x6f,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x70,0x7d]
+0x7b,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x70,0x7d]
+0x7c,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x70,0x7d]
+0x7e,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x70,0x7d]
+0x7f,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x70,0x7d]
+0x80,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x70,0x7d]
+0xc1,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x70,0x7d]
+0xf0,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x70,0x7d]
+0xf7,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x70,0x7d]
+0xfd,0x00,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x70,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x70,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x70,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x70,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x70,0x7d]
+0x00,0x01,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x70,0x7d]
+0xff,0x01,0x70,0x7d
+
+# CHECK: v_cmpx_f_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x71,0x7d]
+0x00,0xfe,0x71,0x7d
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb8,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb8,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb8,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb8,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x72,0x7d]
+0x00,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x72,0x7d]
+0x65,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x72,0x7d]
+0x66,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x72,0x7d]
+0x67,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x72,0x7d]
+0x6a,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x72,0x7d]
+0x6b,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x72,0x7d]
+0x6c,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x72,0x7d]
+0x6d,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x72,0x7d]
+0x6e,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x72,0x7d]
+0x6f,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x72,0x7d]
+0x7b,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x72,0x7d]
+0x7c,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x72,0x7d]
+0x7e,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x72,0x7d]
+0x7f,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x72,0x7d]
+0x80,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x72,0x7d]
+0xc1,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x72,0x7d]
+0xf0,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x72,0x7d]
+0xf7,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x72,0x7d]
+0xfd,0x00,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x72,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x72,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x72,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x72,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x72,0x7d]
+0x00,0x01,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x72,0x7d]
+0xff,0x01,0x72,0x7d
+
+# CHECK: v_cmpx_lt_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x73,0x7d]
+0x00,0xfe,0x73,0x7d
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xb9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xb9,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xb9,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xb9,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xb9,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x74,0x7d]
+0x00,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x74,0x7d]
+0x65,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x74,0x7d]
+0x66,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x74,0x7d]
+0x67,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x74,0x7d]
+0x6a,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x74,0x7d]
+0x6b,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x74,0x7d]
+0x6c,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x74,0x7d]
+0x6d,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x74,0x7d]
+0x6e,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x74,0x7d]
+0x6f,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x74,0x7d]
+0x7b,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x74,0x7d]
+0x7c,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x74,0x7d]
+0x7e,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x74,0x7d]
+0x7f,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x74,0x7d]
+0x80,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x74,0x7d]
+0xc1,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x74,0x7d]
+0xf0,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x74,0x7d]
+0xf7,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x74,0x7d]
+0xfd,0x00,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x74,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x74,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x74,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x74,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x74,0x7d]
+0x00,0x01,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x74,0x7d]
+0xff,0x01,0x74,0x7d
+
+# CHECK: v_cmpx_eq_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x75,0x7d]
+0x00,0xfe,0x75,0x7d
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xba,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xba,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xba,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xba,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xba,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xba,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xba,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xba,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xba,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xba,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xba,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xba,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xba,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xba,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xba,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xba,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xba,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xba,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xba,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xba,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xba,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xba,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xba,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xba,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_le_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x76,0x7d]
+0x00,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x76,0x7d]
+0x65,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x76,0x7d]
+0x66,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x76,0x7d]
+0x67,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x76,0x7d]
+0x6a,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x76,0x7d]
+0x6b,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x76,0x7d]
+0x6c,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x76,0x7d]
+0x6d,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x76,0x7d]
+0x6e,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x76,0x7d]
+0x6f,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x76,0x7d]
+0x7b,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x76,0x7d]
+0x7c,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x76,0x7d]
+0x7e,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x76,0x7d]
+0x7f,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x76,0x7d]
+0x80,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x76,0x7d]
+0xc1,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x76,0x7d]
+0xf0,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x76,0x7d]
+0xf7,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x76,0x7d]
+0xfd,0x00,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x76,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x76,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x76,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x76,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x76,0x7d]
+0x00,0x01,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x76,0x7d]
+0xff,0x01,0x76,0x7d
+
+# CHECK: v_cmpx_le_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x77,0x7d]
+0x00,0xfe,0x77,0x7d
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xbb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xbb,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xbb,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xbb,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xbb,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x78,0x7d]
+0x00,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x78,0x7d]
+0x65,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x78,0x7d]
+0x66,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x78,0x7d]
+0x67,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x78,0x7d]
+0x6a,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x78,0x7d]
+0x6b,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x78,0x7d]
+0x6c,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x78,0x7d]
+0x6d,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x78,0x7d]
+0x6e,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x78,0x7d]
+0x6f,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x78,0x7d]
+0x7b,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x78,0x7d]
+0x7c,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x78,0x7d]
+0x7e,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x78,0x7d]
+0x7f,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x78,0x7d]
+0x80,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x78,0x7d]
+0xc1,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x78,0x7d]
+0xf0,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x78,0x7d]
+0xf7,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x78,0x7d]
+0xfd,0x00,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x78,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x78,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x78,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x78,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x78,0x7d]
+0x00,0x01,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x78,0x7d]
+0xff,0x01,0x78,0x7d
+
+# CHECK: v_cmpx_gt_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x79,0x7d]
+0x00,0xfe,0x79,0x7d
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xbc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xbc,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xbc,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xbc,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xbc,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x7a,0x7d]
+0x00,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x7a,0x7d]
+0x65,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x7a,0x7d]
+0x66,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x7a,0x7d]
+0x67,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x7a,0x7d]
+0x6a,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x7a,0x7d]
+0x6b,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x7a,0x7d]
+0x6c,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x7a,0x7d]
+0x6d,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x7a,0x7d]
+0x6e,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x7a,0x7d]
+0x6f,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x7a,0x7d]
+0x7b,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x7a,0x7d]
+0x7c,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x7a,0x7d]
+0x7e,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x7a,0x7d]
+0x7f,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x7a,0x7d]
+0x80,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x7a,0x7d]
+0xc1,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x7a,0x7d]
+0xf0,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x7a,0x7d]
+0xf7,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x7a,0x7d]
+0xfd,0x00,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x7a,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x7a,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x7a,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x7a,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x7a,0x7d]
+0x00,0x01,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x7a,0x7d]
+0xff,0x01,0x7a,0x7d
+
+# CHECK: v_cmpx_ne_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x7b,0x7d]
+0x00,0xfe,0x7b,0x7d
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xbd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xbd,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xbd,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ne_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xbd,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xbd,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x7c,0x7d]
+0x00,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x7c,0x7d]
+0x65,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x7c,0x7d]
+0x66,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x7c,0x7d]
+0x67,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x7c,0x7d]
+0x6a,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x7c,0x7d]
+0x6b,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x7c,0x7d]
+0x6c,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x7c,0x7d]
+0x6d,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x7c,0x7d]
+0x6e,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x7c,0x7d]
+0x6f,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x7c,0x7d]
+0x7b,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x7c,0x7d]
+0x7c,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x7c,0x7d]
+0x7e,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x7c,0x7d]
+0x7f,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x7c,0x7d]
+0x80,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x7c,0x7d]
+0xc1,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x7c,0x7d]
+0xf0,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x7c,0x7d]
+0xf7,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x7c,0x7d]
+0xfd,0x00,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x7c,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x7c,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x7c,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x7c,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x7c,0x7d]
+0x00,0x01,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x7c,0x7d]
+0xff,0x01,0x7c,0x7d
+
+# CHECK: v_cmpx_ge_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x7d,0x7d]
+0x00,0xfe,0x7d,0x7d
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xbe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xbe,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xbe,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xbe,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xbe,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_t_u16_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x7e,0x7d]
+0x00,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x7e,0x7d]
+0x65,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x7e,0x7d]
+0x66,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x7e,0x7d]
+0x67,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x7e,0x7d]
+0x6a,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x7e,0x7d]
+0x6b,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x7e,0x7d]
+0x6c,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x7e,0x7d]
+0x6d,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x7e,0x7d]
+0x6e,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x7e,0x7d]
+0x6f,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x7e,0x7d]
+0x7b,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x7e,0x7d]
+0x7c,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x7e,0x7d]
+0x7e,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x7e,0x7d]
+0x7f,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x7e,0x7d]
+0x80,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x7e,0x7d]
+0xc1,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x7e,0x7d]
+0xf0,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x7e,0x7d]
+0xf7,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x7e,0x7d]
+0xfd,0x00,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, 0xfe0b, v0 ; encoding: [0xff,0x00,0x7e,0x7d,0x0b,0xfe,0x00,0x00]
+0xff,0x00,0x7e,0x7d,0x0b,0xfe,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e32 vcc, 0x3456, v0 ; encoding: [0xff,0x00,0x7e,0x7d,0x56,0x34,0x00,0x00]
+0xff,0x00,0x7e,0x7d,0x56,0x34,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x7e,0x7d]
+0x00,0x01,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x7e,0x7d]
+0xff,0x01,0x7e,0x7d
+
+# CHECK: v_cmpx_t_u16_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x7f,0x7d]
+0x00,0xfe,0x7f,0x7d
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xbf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xbf,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xbf,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_t_u16_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xbf,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xbf,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_f_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x80,0x7d]
+0x00,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x80,0x7d]
+0x65,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x80,0x7d]
+0x66,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x80,0x7d]
+0x67,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x80,0x7d]
+0x6a,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x80,0x7d]
+0x6b,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x80,0x7d]
+0x6c,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x80,0x7d]
+0x6d,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x80,0x7d]
+0x6e,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x80,0x7d]
+0x6f,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x80,0x7d]
+0x7b,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x80,0x7d]
+0x7c,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x80,0x7d]
+0x7e,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x80,0x7d]
+0x7f,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x80,0x7d]
+0x80,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x80,0x7d]
+0xc1,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x80,0x7d]
+0xf0,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x80,0x7d]
+0xf7,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x80,0x7d]
+0xfd,0x00,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x80,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x80,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_f_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x80,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x80,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_f_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x80,0x7d]
+0x00,0x01,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x80,0x7d]
+0xff,0x01,0x80,0x7d
+
+# CHECK: v_cmp_f_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x81,0x7d]
+0x00,0xfe,0x81,0x7d
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc0,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc0,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc0,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc0,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lt_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x82,0x7d]
+0x00,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x82,0x7d]
+0x65,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x82,0x7d]
+0x66,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x82,0x7d]
+0x67,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x82,0x7d]
+0x6a,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x82,0x7d]
+0x6b,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x82,0x7d]
+0x6c,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x82,0x7d]
+0x6d,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x82,0x7d]
+0x6e,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x82,0x7d]
+0x6f,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x82,0x7d]
+0x7b,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x82,0x7d]
+0x7c,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x82,0x7d]
+0x7e,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x82,0x7d]
+0x7f,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x82,0x7d]
+0x80,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x82,0x7d]
+0xc1,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x82,0x7d]
+0xf0,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x82,0x7d]
+0xf7,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x82,0x7d]
+0xfd,0x00,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x82,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x82,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lt_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x82,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x82,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lt_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x82,0x7d]
+0x00,0x01,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x82,0x7d]
+0xff,0x01,0x82,0x7d
+
+# CHECK: v_cmp_lt_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x83,0x7d]
+0x00,0xfe,0x83,0x7d
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc1,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc1,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc1,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc1,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_eq_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x84,0x7d]
+0x00,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x84,0x7d]
+0x65,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x84,0x7d]
+0x66,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x84,0x7d]
+0x67,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x84,0x7d]
+0x6a,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x84,0x7d]
+0x6b,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x84,0x7d]
+0x6c,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x84,0x7d]
+0x6d,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x84,0x7d]
+0x6e,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x84,0x7d]
+0x6f,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x84,0x7d]
+0x7b,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x84,0x7d]
+0x7c,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x84,0x7d]
+0x7e,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x84,0x7d]
+0x7f,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x84,0x7d]
+0x80,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x84,0x7d]
+0xc1,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x84,0x7d]
+0xf0,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x84,0x7d]
+0xf7,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x84,0x7d]
+0xfd,0x00,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x84,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x84,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_eq_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x84,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x84,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_eq_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x84,0x7d]
+0x00,0x01,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x84,0x7d]
+0xff,0x01,0x84,0x7d
+
+# CHECK: v_cmp_eq_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x85,0x7d]
+0x00,0xfe,0x85,0x7d
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc2,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc2,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc2,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc2,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_le_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x86,0x7d]
+0x00,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x86,0x7d]
+0x65,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x86,0x7d]
+0x66,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x86,0x7d]
+0x67,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x86,0x7d]
+0x6a,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x86,0x7d]
+0x6b,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x86,0x7d]
+0x6c,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x86,0x7d]
+0x6d,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x86,0x7d]
+0x6e,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x86,0x7d]
+0x6f,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x86,0x7d]
+0x7b,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x86,0x7d]
+0x7c,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x86,0x7d]
+0x7e,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x86,0x7d]
+0x7f,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x86,0x7d]
+0x80,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x86,0x7d]
+0xc1,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x86,0x7d]
+0xf0,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x86,0x7d]
+0xf7,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x86,0x7d]
+0xfd,0x00,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x86,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x86,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_le_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x86,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x86,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_le_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x86,0x7d]
+0x00,0x01,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x86,0x7d]
+0xff,0x01,0x86,0x7d
+
+# CHECK: v_cmp_le_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x87,0x7d]
+0x00,0xfe,0x87,0x7d
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc3,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc3,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc3,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc3,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_gt_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x88,0x7d]
+0x00,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x88,0x7d]
+0x65,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x88,0x7d]
+0x66,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x88,0x7d]
+0x67,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x88,0x7d]
+0x6a,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x88,0x7d]
+0x6b,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x88,0x7d]
+0x6c,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x88,0x7d]
+0x6d,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x88,0x7d]
+0x6e,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x88,0x7d]
+0x6f,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x88,0x7d]
+0x7b,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x88,0x7d]
+0x7c,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x88,0x7d]
+0x7e,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x88,0x7d]
+0x7f,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x88,0x7d]
+0x80,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x88,0x7d]
+0xc1,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x88,0x7d]
+0xf0,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x88,0x7d]
+0xf7,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x88,0x7d]
+0xfd,0x00,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x88,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x88,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_gt_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x88,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x88,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_gt_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x88,0x7d]
+0x00,0x01,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x88,0x7d]
+0xff,0x01,0x88,0x7d
+
+# CHECK: v_cmp_gt_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x89,0x7d]
+0x00,0xfe,0x89,0x7d
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc4,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc4,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc4,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc4,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ne_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x8a,0x7d]
+0x00,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x8a,0x7d]
+0x65,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x8a,0x7d]
+0x66,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x8a,0x7d]
+0x67,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x8a,0x7d]
+0x6a,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x8a,0x7d]
+0x6b,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x8a,0x7d]
+0x6c,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x8a,0x7d]
+0x6d,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x8a,0x7d]
+0x6e,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x8a,0x7d]
+0x6f,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x8a,0x7d]
+0x7b,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x8a,0x7d]
+0x7c,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x8a,0x7d]
+0x7e,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x8a,0x7d]
+0x7f,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x8a,0x7d]
+0x80,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x8a,0x7d]
+0xc1,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x8a,0x7d]
+0xf0,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x8a,0x7d]
+0xf7,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x8a,0x7d]
+0xfd,0x00,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x8a,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x8a,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ne_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x8a,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x8a,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ne_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x8a,0x7d]
+0x00,0x01,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x8a,0x7d]
+0xff,0x01,0x8a,0x7d
+
+# CHECK: v_cmp_ne_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x8b,0x7d]
+0x00,0xfe,0x8b,0x7d
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc5,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc5,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ne_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc5,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc5,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ge_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x8c,0x7d]
+0x00,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x8c,0x7d]
+0x65,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x8c,0x7d]
+0x66,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x8c,0x7d]
+0x67,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x8c,0x7d]
+0x6a,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x8c,0x7d]
+0x6b,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x8c,0x7d]
+0x6c,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x8c,0x7d]
+0x6d,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x8c,0x7d]
+0x6e,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x8c,0x7d]
+0x6f,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x8c,0x7d]
+0x7b,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x8c,0x7d]
+0x7c,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x8c,0x7d]
+0x7e,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x8c,0x7d]
+0x7f,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x8c,0x7d]
+0x80,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x8c,0x7d]
+0xc1,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x8c,0x7d]
+0xf0,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x8c,0x7d]
+0xf7,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x8c,0x7d]
+0xfd,0x00,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x8c,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x8c,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ge_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x8c,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x8c,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ge_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x8c,0x7d]
+0x00,0x01,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x8c,0x7d]
+0xff,0x01,0x8c,0x7d
+
+# CHECK: v_cmp_ge_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x8d,0x7d]
+0x00,0xfe,0x8d,0x7d
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc6,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc6,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc6,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc6,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_t_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x8e,0x7d]
+0x00,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x8e,0x7d]
+0x65,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x8e,0x7d]
+0x66,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x8e,0x7d]
+0x67,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x8e,0x7d]
+0x6a,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x8e,0x7d]
+0x6b,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x8e,0x7d]
+0x6c,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x8e,0x7d]
+0x6d,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x8e,0x7d]
+0x6e,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x8e,0x7d]
+0x6f,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x8e,0x7d]
+0x7b,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x8e,0x7d]
+0x7c,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x8e,0x7d]
+0x7e,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x8e,0x7d]
+0x7f,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x8e,0x7d]
+0x80,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x8e,0x7d]
+0xc1,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x8e,0x7d]
+0xf0,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x8e,0x7d]
+0xf7,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x8e,0x7d]
+0xfd,0x00,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x8e,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x8e,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_t_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x8e,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x8e,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_t_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x8e,0x7d]
+0x00,0x01,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x8e,0x7d]
+0xff,0x01,0x8e,0x7d
+
+# CHECK: v_cmp_t_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x8f,0x7d]
+0x00,0xfe,0x8f,0x7d
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc7,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc7,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_t_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc7,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc7,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_f_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x90,0x7d]
+0x00,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x90,0x7d]
+0x65,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x90,0x7d]
+0x66,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x90,0x7d]
+0x67,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x90,0x7d]
+0x6a,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x90,0x7d]
+0x6b,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x90,0x7d]
+0x6c,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x90,0x7d]
+0x6d,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x90,0x7d]
+0x6e,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x90,0x7d]
+0x6f,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x90,0x7d]
+0x7b,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x90,0x7d]
+0x7c,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x90,0x7d]
+0x7e,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x90,0x7d]
+0x7f,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x90,0x7d]
+0x80,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x90,0x7d]
+0xc1,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x90,0x7d]
+0xf0,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x90,0x7d]
+0xf7,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x90,0x7d]
+0xfd,0x00,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x90,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x90,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_f_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x90,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x90,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_f_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x90,0x7d]
+0x00,0x01,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x90,0x7d]
+0xff,0x01,0x90,0x7d
+
+# CHECK: v_cmp_f_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x91,0x7d]
+0x00,0xfe,0x91,0x7d
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc8,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc8,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc8,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc8,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_lt_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x92,0x7d]
+0x00,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x92,0x7d]
+0x65,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x92,0x7d]
+0x66,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x92,0x7d]
+0x67,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x92,0x7d]
+0x6a,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x92,0x7d]
+0x6b,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x92,0x7d]
+0x6c,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x92,0x7d]
+0x6d,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x92,0x7d]
+0x6e,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x92,0x7d]
+0x6f,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x92,0x7d]
+0x7b,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x92,0x7d]
+0x7c,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x92,0x7d]
+0x7e,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x92,0x7d]
+0x7f,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x92,0x7d]
+0x80,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x92,0x7d]
+0xc1,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x92,0x7d]
+0xf0,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x92,0x7d]
+0xf7,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x92,0x7d]
+0xfd,0x00,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x92,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x92,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lt_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x92,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x92,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lt_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x92,0x7d]
+0x00,0x01,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x92,0x7d]
+0xff,0x01,0x92,0x7d
+
+# CHECK: v_cmp_lt_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x93,0x7d]
+0x00,0xfe,0x93,0x7d
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xc9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xc9,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xc9,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xc9,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xc9,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_eq_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x94,0x7d]
+0x00,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x94,0x7d]
+0x65,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x94,0x7d]
+0x66,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x94,0x7d]
+0x67,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x94,0x7d]
+0x6a,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x94,0x7d]
+0x6b,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x94,0x7d]
+0x6c,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x94,0x7d]
+0x6d,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x94,0x7d]
+0x6e,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x94,0x7d]
+0x6f,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x94,0x7d]
+0x7b,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x94,0x7d]
+0x7c,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x94,0x7d]
+0x7e,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x94,0x7d]
+0x7f,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x94,0x7d]
+0x80,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x94,0x7d]
+0xc1,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x94,0x7d]
+0xf0,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x94,0x7d]
+0xf7,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x94,0x7d]
+0xfd,0x00,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x94,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x94,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_eq_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x94,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x94,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_eq_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x94,0x7d]
+0x00,0x01,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x94,0x7d]
+0xff,0x01,0x94,0x7d
+
+# CHECK: v_cmp_eq_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x95,0x7d]
+0x00,0xfe,0x95,0x7d
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xca,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xca,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xca,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xca,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xca,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xca,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xca,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xca,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xca,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xca,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xca,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xca,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xca,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xca,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xca,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xca,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xca,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xca,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xca,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xca,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_le_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x96,0x7d]
+0x00,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x96,0x7d]
+0x65,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x96,0x7d]
+0x66,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x96,0x7d]
+0x67,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x96,0x7d]
+0x6a,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x96,0x7d]
+0x6b,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x96,0x7d]
+0x6c,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x96,0x7d]
+0x6d,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x96,0x7d]
+0x6e,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x96,0x7d]
+0x6f,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x96,0x7d]
+0x7b,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x96,0x7d]
+0x7c,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x96,0x7d]
+0x7e,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x96,0x7d]
+0x7f,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x96,0x7d]
+0x80,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x96,0x7d]
+0xc1,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x96,0x7d]
+0xf0,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x96,0x7d]
+0xf7,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x96,0x7d]
+0xfd,0x00,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x96,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x96,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_le_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x96,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x96,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_le_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x96,0x7d]
+0x00,0x01,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x96,0x7d]
+0xff,0x01,0x96,0x7d
+
+# CHECK: v_cmp_le_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x97,0x7d]
+0x00,0xfe,0x97,0x7d
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xcb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xcb,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcb,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xcb,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcb,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_gt_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x98,0x7d]
+0x00,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x98,0x7d]
+0x65,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x98,0x7d]
+0x66,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x98,0x7d]
+0x67,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x98,0x7d]
+0x6a,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x98,0x7d]
+0x6b,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x98,0x7d]
+0x6c,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x98,0x7d]
+0x6d,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x98,0x7d]
+0x6e,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x98,0x7d]
+0x6f,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x98,0x7d]
+0x7b,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x98,0x7d]
+0x7c,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x98,0x7d]
+0x7e,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x98,0x7d]
+0x7f,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x98,0x7d]
+0x80,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x98,0x7d]
+0xc1,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x98,0x7d]
+0xf0,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x98,0x7d]
+0xf7,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x98,0x7d]
+0xfd,0x00,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x98,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x98,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_gt_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x98,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x98,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_gt_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x98,0x7d]
+0x00,0x01,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x98,0x7d]
+0xff,0x01,0x98,0x7d
+
+# CHECK: v_cmp_gt_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x99,0x7d]
+0x00,0xfe,0x99,0x7d
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xcc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xcc,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcc,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xcc,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcc,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ne_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x9a,0x7d]
+0x00,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x9a,0x7d]
+0x65,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x9a,0x7d]
+0x66,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x9a,0x7d]
+0x67,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x9a,0x7d]
+0x6a,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x9a,0x7d]
+0x6b,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x9a,0x7d]
+0x6c,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x9a,0x7d]
+0x6d,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x9a,0x7d]
+0x6e,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x9a,0x7d]
+0x6f,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x9a,0x7d]
+0x7b,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x9a,0x7d]
+0x7c,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x9a,0x7d]
+0x7e,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x9a,0x7d]
+0x7f,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x9a,0x7d]
+0x80,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x9a,0x7d]
+0xc1,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x9a,0x7d]
+0xf0,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x9a,0x7d]
+0xf7,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x9a,0x7d]
+0xfd,0x00,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x9a,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x9a,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ne_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x9a,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x9a,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ne_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x9a,0x7d]
+0x00,0x01,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x9a,0x7d]
+0xff,0x01,0x9a,0x7d
+
+# CHECK: v_cmp_ne_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x9b,0x7d]
+0x00,0xfe,0x9b,0x7d
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xcd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xcd,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcd,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ne_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xcd,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcd,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_ge_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x9c,0x7d]
+0x00,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x9c,0x7d]
+0x65,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x9c,0x7d]
+0x66,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x9c,0x7d]
+0x67,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x9c,0x7d]
+0x6a,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x9c,0x7d]
+0x6b,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x9c,0x7d]
+0x6c,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x9c,0x7d]
+0x6d,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x9c,0x7d]
+0x6e,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x9c,0x7d]
+0x6f,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x9c,0x7d]
+0x7b,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x9c,0x7d]
+0x7c,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x9c,0x7d]
+0x7e,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x9c,0x7d]
+0x7f,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x9c,0x7d]
+0x80,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x9c,0x7d]
+0xc1,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x9c,0x7d]
+0xf0,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x9c,0x7d]
+0xf7,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x9c,0x7d]
+0xfd,0x00,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x9c,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x9c,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ge_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x9c,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x9c,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ge_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x9c,0x7d]
+0x00,0x01,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x9c,0x7d]
+0xff,0x01,0x9c,0x7d
+
+# CHECK: v_cmp_ge_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x9d,0x7d]
+0x00,0xfe,0x9d,0x7d
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xce,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xce,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xce,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xce,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xce,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xce,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xce,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xce,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xce,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xce,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xce,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xce,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xce,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xce,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xce,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xce,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xce,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xce,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xce,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xce,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_t_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x9e,0x7d]
+0x00,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0x9e,0x7d]
+0x65,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0x9e,0x7d]
+0x66,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0x9e,0x7d]
+0x67,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0x9e,0x7d]
+0x6a,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0x9e,0x7d]
+0x6b,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0x9e,0x7d]
+0x6c,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0x9e,0x7d]
+0x6d,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0x9e,0x7d]
+0x6e,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0x9e,0x7d]
+0x6f,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0x9e,0x7d]
+0x7b,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0x9e,0x7d]
+0x7c,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0x9e,0x7d]
+0x7e,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0x9e,0x7d]
+0x7f,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0x9e,0x7d]
+0x80,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0x9e,0x7d]
+0xc1,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0x9e,0x7d]
+0xf0,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0x9e,0x7d]
+0xf7,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0x9e,0x7d]
+0xfd,0x00,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0x9e,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0x9e,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_t_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0x9e,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0x9e,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_t_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0x9e,0x7d]
+0x00,0x01,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0x9e,0x7d]
+0xff,0x01,0x9e,0x7d
+
+# CHECK: v_cmp_t_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0x9f,0x7d]
+0x00,0xfe,0x9f,0x7d
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xcf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xcf,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xcf,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_t_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xcf,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xcf,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_f_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa0,0x7d]
+0x00,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa0,0x7d]
+0x65,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa0,0x7d]
+0x66,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa0,0x7d]
+0x67,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa0,0x7d]
+0x6a,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa0,0x7d]
+0x6b,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa0,0x7d]
+0x6c,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa0,0x7d]
+0x6d,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa0,0x7d]
+0x6e,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa0,0x7d]
+0x6f,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa0,0x7d]
+0x7b,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa0,0x7d]
+0x7c,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa0,0x7d]
+0x7e,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa0,0x7d]
+0x7f,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa0,0x7d]
+0x80,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa0,0x7d]
+0xc1,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa0,0x7d]
+0xf0,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa0,0x7d]
+0xf7,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa0,0x7d]
+0xfd,0x00,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa0,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa0,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_f_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa0,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa0,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_f_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa0,0x7d]
+0x00,0x01,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa0,0x7d]
+0xff,0x01,0xa0,0x7d
+
+# CHECK: v_cmpx_f_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa1,0x7d]
+0x00,0xfe,0xa1,0x7d
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd0,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd0,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd0,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd0,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa2,0x7d]
+0x00,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa2,0x7d]
+0x65,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa2,0x7d]
+0x66,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa2,0x7d]
+0x67,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa2,0x7d]
+0x6a,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa2,0x7d]
+0x6b,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa2,0x7d]
+0x6c,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa2,0x7d]
+0x6d,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa2,0x7d]
+0x6e,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa2,0x7d]
+0x6f,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa2,0x7d]
+0x7b,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa2,0x7d]
+0x7c,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa2,0x7d]
+0x7e,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa2,0x7d]
+0x7f,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa2,0x7d]
+0x80,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa2,0x7d]
+0xc1,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa2,0x7d]
+0xf0,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa2,0x7d]
+0xf7,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa2,0x7d]
+0xfd,0x00,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa2,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa2,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa2,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa2,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa2,0x7d]
+0x00,0x01,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa2,0x7d]
+0xff,0x01,0xa2,0x7d
+
+# CHECK: v_cmpx_lt_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa3,0x7d]
+0x00,0xfe,0xa3,0x7d
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd1,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd1,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd1,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd1,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa4,0x7d]
+0x00,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa4,0x7d]
+0x65,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa4,0x7d]
+0x66,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa4,0x7d]
+0x67,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa4,0x7d]
+0x6a,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa4,0x7d]
+0x6b,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa4,0x7d]
+0x6c,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa4,0x7d]
+0x6d,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa4,0x7d]
+0x6e,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa4,0x7d]
+0x6f,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa4,0x7d]
+0x7b,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa4,0x7d]
+0x7c,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa4,0x7d]
+0x7e,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa4,0x7d]
+0x7f,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa4,0x7d]
+0x80,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa4,0x7d]
+0xc1,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa4,0x7d]
+0xf0,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa4,0x7d]
+0xf7,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa4,0x7d]
+0xfd,0x00,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa4,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa4,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa4,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa4,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa4,0x7d]
+0x00,0x01,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa4,0x7d]
+0xff,0x01,0xa4,0x7d
+
+# CHECK: v_cmpx_eq_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa5,0x7d]
+0x00,0xfe,0xa5,0x7d
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd2,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd2,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd2,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd2,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_le_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa6,0x7d]
+0x00,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa6,0x7d]
+0x65,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa6,0x7d]
+0x66,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa6,0x7d]
+0x67,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa6,0x7d]
+0x6a,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa6,0x7d]
+0x6b,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa6,0x7d]
+0x6c,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa6,0x7d]
+0x6d,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa6,0x7d]
+0x6e,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa6,0x7d]
+0x6f,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa6,0x7d]
+0x7b,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa6,0x7d]
+0x7c,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa6,0x7d]
+0x7e,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa6,0x7d]
+0x7f,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa6,0x7d]
+0x80,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa6,0x7d]
+0xc1,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa6,0x7d]
+0xf0,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa6,0x7d]
+0xf7,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa6,0x7d]
+0xfd,0x00,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa6,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa6,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_le_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa6,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa6,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_le_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa6,0x7d]
+0x00,0x01,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa6,0x7d]
+0xff,0x01,0xa6,0x7d
+
+# CHECK: v_cmpx_le_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa7,0x7d]
+0x00,0xfe,0xa7,0x7d
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd3,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd3,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd3,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd3,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xa8,0x7d]
+0x00,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xa8,0x7d]
+0x65,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xa8,0x7d]
+0x66,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xa8,0x7d]
+0x67,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xa8,0x7d]
+0x6a,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xa8,0x7d]
+0x6b,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xa8,0x7d]
+0x6c,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xa8,0x7d]
+0x6d,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xa8,0x7d]
+0x6e,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xa8,0x7d]
+0x6f,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xa8,0x7d]
+0x7b,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xa8,0x7d]
+0x7c,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xa8,0x7d]
+0x7e,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xa8,0x7d]
+0x7f,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xa8,0x7d]
+0x80,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xa8,0x7d]
+0xc1,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xa8,0x7d]
+0xf0,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xa8,0x7d]
+0xf7,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xa8,0x7d]
+0xfd,0x00,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xa8,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xa8,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xa8,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xa8,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xa8,0x7d]
+0x00,0x01,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xa8,0x7d]
+0xff,0x01,0xa8,0x7d
+
+# CHECK: v_cmpx_gt_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xa9,0x7d]
+0x00,0xfe,0xa9,0x7d
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd4,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd4,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd4,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd4,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xaa,0x7d]
+0x00,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xaa,0x7d]
+0x65,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xaa,0x7d]
+0x66,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xaa,0x7d]
+0x67,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xaa,0x7d]
+0x6a,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xaa,0x7d]
+0x6b,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xaa,0x7d]
+0x6c,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xaa,0x7d]
+0x6d,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xaa,0x7d]
+0x6e,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xaa,0x7d]
+0x6f,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xaa,0x7d]
+0x7b,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xaa,0x7d]
+0x7c,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xaa,0x7d]
+0x7e,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xaa,0x7d]
+0x7f,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xaa,0x7d]
+0x80,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xaa,0x7d]
+0xc1,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xaa,0x7d]
+0xf0,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xaa,0x7d]
+0xf7,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xaa,0x7d]
+0xfd,0x00,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xaa,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xaa,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xaa,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xaa,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xaa,0x7d]
+0x00,0x01,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xaa,0x7d]
+0xff,0x01,0xaa,0x7d
+
+# CHECK: v_cmpx_ne_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xab,0x7d]
+0x00,0xfe,0xab,0x7d
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd5,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd5,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ne_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd5,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd5,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xac,0x7d]
+0x00,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xac,0x7d]
+0x65,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xac,0x7d]
+0x66,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xac,0x7d]
+0x67,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xac,0x7d]
+0x6a,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xac,0x7d]
+0x6b,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xac,0x7d]
+0x6c,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xac,0x7d]
+0x6d,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xac,0x7d]
+0x6e,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xac,0x7d]
+0x6f,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xac,0x7d]
+0x7b,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xac,0x7d]
+0x7c,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xac,0x7d]
+0x7e,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xac,0x7d]
+0x7f,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xac,0x7d]
+0x80,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xac,0x7d]
+0xc1,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xac,0x7d]
+0xf0,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xac,0x7d]
+0xf7,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xac,0x7d]
+0xfd,0x00,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xac,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xac,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xac,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xac,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xac,0x7d]
+0x00,0x01,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xac,0x7d]
+0xff,0x01,0xac,0x7d
+
+# CHECK: v_cmpx_ge_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xad,0x7d]
+0x00,0xfe,0xad,0x7d
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd6,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd6,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd6,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd6,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_t_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xae,0x7d]
+0x00,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xae,0x7d]
+0x65,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xae,0x7d]
+0x66,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xae,0x7d]
+0x67,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xae,0x7d]
+0x6a,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xae,0x7d]
+0x6b,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xae,0x7d]
+0x6c,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xae,0x7d]
+0x6d,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xae,0x7d]
+0x6e,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xae,0x7d]
+0x6f,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xae,0x7d]
+0x7b,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xae,0x7d]
+0x7c,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xae,0x7d]
+0x7e,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xae,0x7d]
+0x7f,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xae,0x7d]
+0x80,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xae,0x7d]
+0xc1,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xae,0x7d]
+0xf0,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xae,0x7d]
+0xf7,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xae,0x7d]
+0xfd,0x00,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xae,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xae,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_t_i32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xae,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xae,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_t_i32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xae,0x7d]
+0x00,0x01,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xae,0x7d]
+0xff,0x01,0xae,0x7d
+
+# CHECK: v_cmpx_t_i32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xaf,0x7d]
+0x00,0xfe,0xaf,0x7d
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd7,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd7,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_t_i32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd7,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd7,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_f_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb0,0x7d]
+0x00,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb0,0x7d]
+0x65,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb0,0x7d]
+0x66,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb0,0x7d]
+0x67,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb0,0x7d]
+0x6a,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb0,0x7d]
+0x6b,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb0,0x7d]
+0x6c,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb0,0x7d]
+0x6d,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb0,0x7d]
+0x6e,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb0,0x7d]
+0x6f,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb0,0x7d]
+0x7b,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb0,0x7d]
+0x7c,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb0,0x7d]
+0x7e,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb0,0x7d]
+0x7f,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb0,0x7d]
+0x80,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb0,0x7d]
+0xc1,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb0,0x7d]
+0xf0,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb0,0x7d]
+0xf7,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb0,0x7d]
+0xfd,0x00,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb0,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb0,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_f_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb0,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb0,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_f_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb0,0x7d]
+0x00,0x01,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb0,0x7d]
+0xff,0x01,0xb0,0x7d
+
+# CHECK: v_cmpx_f_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb1,0x7d]
+0x00,0xfe,0xb1,0x7d
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd8,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd8,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd8,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd8,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb2,0x7d]
+0x00,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb2,0x7d]
+0x65,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb2,0x7d]
+0x66,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb2,0x7d]
+0x67,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb2,0x7d]
+0x6a,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb2,0x7d]
+0x6b,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb2,0x7d]
+0x6c,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb2,0x7d]
+0x6d,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb2,0x7d]
+0x6e,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb2,0x7d]
+0x6f,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb2,0x7d]
+0x7b,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb2,0x7d]
+0x7c,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb2,0x7d]
+0x7e,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb2,0x7d]
+0x7f,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb2,0x7d]
+0x80,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb2,0x7d]
+0xc1,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb2,0x7d]
+0xf0,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb2,0x7d]
+0xf7,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb2,0x7d]
+0xfd,0x00,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb2,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb2,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb2,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb2,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb2,0x7d]
+0x00,0x01,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb2,0x7d]
+0xff,0x01,0xb2,0x7d
+
+# CHECK: v_cmpx_lt_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb3,0x7d]
+0x00,0xfe,0xb3,0x7d
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xd9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xd9,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xd9,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xd9,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xd9,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb4,0x7d]
+0x00,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb4,0x7d]
+0x65,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb4,0x7d]
+0x66,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb4,0x7d]
+0x67,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb4,0x7d]
+0x6a,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb4,0x7d]
+0x6b,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb4,0x7d]
+0x6c,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb4,0x7d]
+0x6d,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb4,0x7d]
+0x6e,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb4,0x7d]
+0x6f,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb4,0x7d]
+0x7b,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb4,0x7d]
+0x7c,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb4,0x7d]
+0x7e,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb4,0x7d]
+0x7f,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb4,0x7d]
+0x80,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb4,0x7d]
+0xc1,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb4,0x7d]
+0xf0,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb4,0x7d]
+0xf7,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb4,0x7d]
+0xfd,0x00,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb4,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb4,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb4,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb4,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb4,0x7d]
+0x00,0x01,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb4,0x7d]
+0xff,0x01,0xb4,0x7d
+
+# CHECK: v_cmpx_eq_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb5,0x7d]
+0x00,0xfe,0xb5,0x7d
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xda,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xda,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xda,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xda,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xda,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xda,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xda,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xda,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xda,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xda,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xda,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xda,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xda,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xda,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xda,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xda,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xda,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xda,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xda,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xda,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_le_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb6,0x7d]
+0x00,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb6,0x7d]
+0x65,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb6,0x7d]
+0x66,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb6,0x7d]
+0x67,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb6,0x7d]
+0x6a,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb6,0x7d]
+0x6b,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb6,0x7d]
+0x6c,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb6,0x7d]
+0x6d,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb6,0x7d]
+0x6e,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb6,0x7d]
+0x6f,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb6,0x7d]
+0x7b,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb6,0x7d]
+0x7c,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb6,0x7d]
+0x7e,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb6,0x7d]
+0x7f,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb6,0x7d]
+0x80,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb6,0x7d]
+0xc1,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb6,0x7d]
+0xf0,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb6,0x7d]
+0xf7,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb6,0x7d]
+0xfd,0x00,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb6,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb6,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_le_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb6,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb6,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_le_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb6,0x7d]
+0x00,0x01,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb6,0x7d]
+0xff,0x01,0xb6,0x7d
+
+# CHECK: v_cmpx_le_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb7,0x7d]
+0x00,0xfe,0xb7,0x7d
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xdb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xdb,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdb,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xdb,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdb,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xb8,0x7d]
+0x00,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xb8,0x7d]
+0x65,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xb8,0x7d]
+0x66,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xb8,0x7d]
+0x67,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xb8,0x7d]
+0x6a,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xb8,0x7d]
+0x6b,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xb8,0x7d]
+0x6c,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xb8,0x7d]
+0x6d,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xb8,0x7d]
+0x6e,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xb8,0x7d]
+0x6f,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xb8,0x7d]
+0x7b,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xb8,0x7d]
+0x7c,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xb8,0x7d]
+0x7e,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xb8,0x7d]
+0x7f,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xb8,0x7d]
+0x80,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xb8,0x7d]
+0xc1,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xb8,0x7d]
+0xf0,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xb8,0x7d]
+0xf7,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xb8,0x7d]
+0xfd,0x00,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xb8,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xb8,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xb8,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xb8,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xb8,0x7d]
+0x00,0x01,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xb8,0x7d]
+0xff,0x01,0xb8,0x7d
+
+# CHECK: v_cmpx_gt_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xb9,0x7d]
+0x00,0xfe,0xb9,0x7d
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xdc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xdc,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdc,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xdc,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdc,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xba,0x7d]
+0x00,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xba,0x7d]
+0x65,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xba,0x7d]
+0x66,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xba,0x7d]
+0x67,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xba,0x7d]
+0x6a,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xba,0x7d]
+0x6b,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xba,0x7d]
+0x6c,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xba,0x7d]
+0x6d,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xba,0x7d]
+0x6e,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xba,0x7d]
+0x6f,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xba,0x7d]
+0x7b,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xba,0x7d]
+0x7c,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xba,0x7d]
+0x7e,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xba,0x7d]
+0x7f,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xba,0x7d]
+0x80,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xba,0x7d]
+0xc1,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xba,0x7d]
+0xf0,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xba,0x7d]
+0xf7,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xba,0x7d]
+0xfd,0x00,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xba,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xba,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xba,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xba,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xba,0x7d]
+0x00,0x01,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xba,0x7d]
+0xff,0x01,0xba,0x7d
+
+# CHECK: v_cmpx_ne_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xbb,0x7d]
+0x00,0xfe,0xbb,0x7d
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xdd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xdd,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdd,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ne_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xdd,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdd,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xbc,0x7d]
+0x00,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xbc,0x7d]
+0x65,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xbc,0x7d]
+0x66,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xbc,0x7d]
+0x67,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xbc,0x7d]
+0x6a,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xbc,0x7d]
+0x6b,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xbc,0x7d]
+0x6c,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xbc,0x7d]
+0x6d,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xbc,0x7d]
+0x6e,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xbc,0x7d]
+0x6f,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xbc,0x7d]
+0x7b,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xbc,0x7d]
+0x7c,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xbc,0x7d]
+0x7e,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xbc,0x7d]
+0x7f,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xbc,0x7d]
+0x80,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xbc,0x7d]
+0xc1,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xbc,0x7d]
+0xf0,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xbc,0x7d]
+0xf7,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xbc,0x7d]
+0xfd,0x00,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xbc,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xbc,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xbc,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xbc,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xbc,0x7d]
+0x00,0x01,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xbc,0x7d]
+0xff,0x01,0xbc,0x7d
+
+# CHECK: v_cmpx_ge_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xbd,0x7d]
+0x00,0xfe,0xbd,0x7d
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xde,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xde,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xde,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xde,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xde,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xde,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xde,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xde,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xde,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xde,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xde,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xde,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xde,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xde,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xde,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xde,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xde,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xde,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xde,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xde,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmpx_t_u32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0xbe,0x7d]
+0x00,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, s101, v0 ; encoding: [0x65,0x00,0xbe,0x7d]
+0x65,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, flat_scratch_lo, v0 ; encoding: [0x66,0x00,0xbe,0x7d]
+0x66,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, flat_scratch_hi, v0 ; encoding: [0x67,0x00,0xbe,0x7d]
+0x67,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, vcc_lo, v0 ; encoding: [0x6a,0x00,0xbe,0x7d]
+0x6a,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, vcc_hi, v0 ; encoding: [0x6b,0x00,0xbe,0x7d]
+0x6b,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, tba_lo, v0 ; encoding: [0x6c,0x00,0xbe,0x7d]
+0x6c,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, tba_hi, v0 ; encoding: [0x6d,0x00,0xbe,0x7d]
+0x6d,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, tma_lo, v0 ; encoding: [0x6e,0x00,0xbe,0x7d]
+0x6e,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, tma_hi, v0 ; encoding: [0x6f,0x00,0xbe,0x7d]
+0x6f,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, ttmp11, v0 ; encoding: [0x7b,0x00,0xbe,0x7d]
+0x7b,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, m0, v0 ; encoding: [0x7c,0x00,0xbe,0x7d]
+0x7c,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, exec_lo, v0 ; encoding: [0x7e,0x00,0xbe,0x7d]
+0x7e,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, exec_hi, v0 ; encoding: [0x7f,0x00,0xbe,0x7d]
+0x7f,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, 0, v0 ; encoding: [0x80,0x00,0xbe,0x7d]
+0x80,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, -1, v0 ; encoding: [0xc1,0x00,0xbe,0x7d]
+0xc1,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, 0.5, v0 ; encoding: [0xf0,0x00,0xbe,0x7d]
+0xf0,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, -4.0, v0 ; encoding: [0xf7,0x00,0xbe,0x7d]
+0xf7,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, scc, v0 ; encoding: [0xfd,0x00,0xbe,0x7d]
+0xfd,0x00,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, 0xaf123456, v0 ; encoding: [0xff,0x00,0xbe,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xbe,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_t_u32_e32 vcc, 0x3f717273, v0 ; encoding: [0xff,0x00,0xbe,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xbe,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_t_u32_e32 vcc, v0, v0 ; encoding: [0x00,0x01,0xbe,0x7d]
+0x00,0x01,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, v255, v0 ; encoding: [0xff,0x01,0xbe,0x7d]
+0xff,0x01,0xbe,0x7d
+
+# CHECK: v_cmpx_t_u32_e32 vcc, s0, v255 ; encoding: [0x00,0xfe,0xbf,0x7d]
+0x00,0xfe,0xbf,0x7d
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[2:3], s0, s0 ; encoding: [0x02,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[100:101], s0, s0 ; encoding: [0x64,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 flat_scratch, s0, s0 ; encoding: [0x66,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 vcc, s0, s0 ; encoding: [0x6a,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 tba, s0, s0 ; encoding: [0x6c,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 tma, s0, s0 ; encoding: [0x6e,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 ttmp[10:11], s0, s0 ; encoding: [0x7a,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xdf,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], 0, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], -1, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], 0.5, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], -4.0, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], scc, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0xfd,0x00,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0xfd,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], v0, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], v255, s0 ; encoding: [0x00,0x00,0xdf,0xd0,0xff,0x01,0x00,0x00]
+0x00,0x00,0xdf,0xd0,0xff,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, 0 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, -1 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, 0.5 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, -4.0 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, scc ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0xfa,0x01,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0xfa,0x01,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, v0 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_t_u32_e64 s[0:1], s0, v255 ; encoding: [0x00,0x00,0xdf,0xd0,0x00,0xfe,0x03,0x00]
+0x00,0x00,0xdf,0xd0,0x00,0xfe,0x03,0x00
+
+# CHECK: v_cmp_f_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc0,0x7d]
+0x00,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc0,0x7d]
+0x02,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc0,0x7d]
+0x64,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc0,0x7d]
+0x66,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc0,0x7d]
+0x6a,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc0,0x7d]
+0x6c,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc0,0x7d]
+0x6e,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc0,0x7d]
+0x7a,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc0,0x7d]
+0x7e,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc0,0x7d]
+0x80,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc0,0x7d]
+0xc1,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc0,0x7d]
+0xf0,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc0,0x7d]
+0xf7,0x00,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc0,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc0,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_f_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc0,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc0,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_f_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc0,0x7d]
+0x00,0x01,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc0,0x7d]
+0xfe,0x01,0xc0,0x7d
+
+# CHECK: v_cmp_f_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc1,0x7d]
+0x00,0xfc,0xc1,0x7d
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe0,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe0,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe0,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc2,0x7d]
+0x00,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc2,0x7d]
+0x02,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc2,0x7d]
+0x64,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc2,0x7d]
+0x66,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc2,0x7d]
+0x6a,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc2,0x7d]
+0x6c,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc2,0x7d]
+0x6e,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc2,0x7d]
+0x7a,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc2,0x7d]
+0x7e,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc2,0x7d]
+0x80,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc2,0x7d]
+0xc1,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc2,0x7d]
+0xf0,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc2,0x7d]
+0xf7,0x00,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc2,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc2,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lt_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc2,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc2,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lt_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc2,0x7d]
+0x00,0x01,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc2,0x7d]
+0xfe,0x01,0xc2,0x7d
+
+# CHECK: v_cmp_lt_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc3,0x7d]
+0x00,0xfc,0xc3,0x7d
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe1,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe1,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe1,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_eq_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc4,0x7d]
+0x00,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc4,0x7d]
+0x02,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc4,0x7d]
+0x64,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc4,0x7d]
+0x66,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc4,0x7d]
+0x6a,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc4,0x7d]
+0x6c,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc4,0x7d]
+0x6e,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc4,0x7d]
+0x7a,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc4,0x7d]
+0x7e,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc4,0x7d]
+0x80,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc4,0x7d]
+0xc1,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc4,0x7d]
+0xf0,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc4,0x7d]
+0xf7,0x00,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc4,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc4,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_eq_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc4,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc4,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_eq_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc4,0x7d]
+0x00,0x01,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc4,0x7d]
+0xfe,0x01,0xc4,0x7d
+
+# CHECK: v_cmp_eq_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc5,0x7d]
+0x00,0xfc,0xc5,0x7d
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe2,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe2,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe2,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_le_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc6,0x7d]
+0x00,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc6,0x7d]
+0x02,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc6,0x7d]
+0x64,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc6,0x7d]
+0x66,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc6,0x7d]
+0x6a,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc6,0x7d]
+0x6c,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc6,0x7d]
+0x6e,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc6,0x7d]
+0x7a,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc6,0x7d]
+0x7e,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc6,0x7d]
+0x80,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc6,0x7d]
+0xc1,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc6,0x7d]
+0xf0,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc6,0x7d]
+0xf7,0x00,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc6,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc6,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_le_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc6,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc6,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_le_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc6,0x7d]
+0x00,0x01,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc6,0x7d]
+0xfe,0x01,0xc6,0x7d
+
+# CHECK: v_cmp_le_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc7,0x7d]
+0x00,0xfc,0xc7,0x7d
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe3,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe3,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe3,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_gt_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xc8,0x7d]
+0x00,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xc8,0x7d]
+0x02,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xc8,0x7d]
+0x64,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xc8,0x7d]
+0x66,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xc8,0x7d]
+0x6a,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xc8,0x7d]
+0x6c,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xc8,0x7d]
+0x6e,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xc8,0x7d]
+0x7a,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xc8,0x7d]
+0x7e,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xc8,0x7d]
+0x80,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xc8,0x7d]
+0xc1,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xc8,0x7d]
+0xf0,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xc8,0x7d]
+0xf7,0x00,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xc8,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xc8,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_gt_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xc8,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xc8,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_gt_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xc8,0x7d]
+0x00,0x01,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xc8,0x7d]
+0xfe,0x01,0xc8,0x7d
+
+# CHECK: v_cmp_gt_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xc9,0x7d]
+0x00,0xfc,0xc9,0x7d
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe4,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe4,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe4,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_ne_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xca,0x7d]
+0x00,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xca,0x7d]
+0x02,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xca,0x7d]
+0x64,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xca,0x7d]
+0x66,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xca,0x7d]
+0x6a,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xca,0x7d]
+0x6c,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xca,0x7d]
+0x6e,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xca,0x7d]
+0x7a,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xca,0x7d]
+0x7e,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xca,0x7d]
+0x80,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xca,0x7d]
+0xc1,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xca,0x7d]
+0xf0,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xca,0x7d]
+0xf7,0x00,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xca,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xca,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ne_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xca,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xca,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ne_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xca,0x7d]
+0x00,0x01,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xca,0x7d]
+0xfe,0x01,0xca,0x7d
+
+# CHECK: v_cmp_ne_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xcb,0x7d]
+0x00,0xfc,0xcb,0x7d
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe5,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ne_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe5,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe5,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_ge_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xcc,0x7d]
+0x00,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xcc,0x7d]
+0x02,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xcc,0x7d]
+0x64,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xcc,0x7d]
+0x66,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xcc,0x7d]
+0x6a,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xcc,0x7d]
+0x6c,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xcc,0x7d]
+0x6e,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xcc,0x7d]
+0x7a,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xcc,0x7d]
+0x7e,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xcc,0x7d]
+0x80,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xcc,0x7d]
+0xc1,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xcc,0x7d]
+0xf0,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xcc,0x7d]
+0xf7,0x00,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xcc,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xcc,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ge_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xcc,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xcc,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ge_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xcc,0x7d]
+0x00,0x01,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xcc,0x7d]
+0xfe,0x01,0xcc,0x7d
+
+# CHECK: v_cmp_ge_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xcd,0x7d]
+0x00,0xfc,0xcd,0x7d
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe6,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe6,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe6,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_t_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xce,0x7d]
+0x00,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xce,0x7d]
+0x02,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xce,0x7d]
+0x64,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xce,0x7d]
+0x66,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xce,0x7d]
+0x6a,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xce,0x7d]
+0x6c,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xce,0x7d]
+0x6e,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xce,0x7d]
+0x7a,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xce,0x7d]
+0x7e,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xce,0x7d]
+0x80,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xce,0x7d]
+0xc1,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xce,0x7d]
+0xf0,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xce,0x7d]
+0xf7,0x00,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xce,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xce,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_t_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xce,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xce,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_t_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xce,0x7d]
+0x00,0x01,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xce,0x7d]
+0xfe,0x01,0xce,0x7d
+
+# CHECK: v_cmp_t_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xcf,0x7d]
+0x00,0xfc,0xcf,0x7d
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe7,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_t_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe7,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe7,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_f_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd0,0x7d]
+0x00,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd0,0x7d]
+0x02,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd0,0x7d]
+0x64,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd0,0x7d]
+0x66,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd0,0x7d]
+0x6a,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd0,0x7d]
+0x6c,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd0,0x7d]
+0x6e,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd0,0x7d]
+0x7a,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd0,0x7d]
+0x7e,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd0,0x7d]
+0x80,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd0,0x7d]
+0xc1,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd0,0x7d]
+0xf0,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd0,0x7d]
+0xf7,0x00,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd0,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd0,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_f_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd0,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd0,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_f_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd0,0x7d]
+0x00,0x01,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd0,0x7d]
+0xfe,0x01,0xd0,0x7d
+
+# CHECK: v_cmp_f_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd1,0x7d]
+0x00,0xfc,0xd1,0x7d
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe8,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_f_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe8,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe8,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd2,0x7d]
+0x00,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd2,0x7d]
+0x02,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd2,0x7d]
+0x64,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd2,0x7d]
+0x66,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd2,0x7d]
+0x6a,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd2,0x7d]
+0x6c,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd2,0x7d]
+0x6e,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd2,0x7d]
+0x7a,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd2,0x7d]
+0x7e,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd2,0x7d]
+0x80,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd2,0x7d]
+0xc1,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd2,0x7d]
+0xf0,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd2,0x7d]
+0xf7,0x00,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd2,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd2,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_lt_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd2,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd2,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_lt_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd2,0x7d]
+0x00,0x01,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd2,0x7d]
+0xfe,0x01,0xd2,0x7d
+
+# CHECK: v_cmp_lt_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd3,0x7d]
+0x00,0xfc,0xd3,0x7d
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xe9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xe9,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_lt_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xe9,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xe9,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_eq_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd4,0x7d]
+0x00,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd4,0x7d]
+0x02,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd4,0x7d]
+0x64,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd4,0x7d]
+0x66,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd4,0x7d]
+0x6a,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd4,0x7d]
+0x6c,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd4,0x7d]
+0x6e,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd4,0x7d]
+0x7a,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd4,0x7d]
+0x7e,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd4,0x7d]
+0x80,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd4,0x7d]
+0xc1,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd4,0x7d]
+0xf0,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd4,0x7d]
+0xf7,0x00,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd4,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd4,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_eq_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd4,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd4,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_eq_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd4,0x7d]
+0x00,0x01,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd4,0x7d]
+0xfe,0x01,0xd4,0x7d
+
+# CHECK: v_cmp_eq_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd5,0x7d]
+0x00,0xfc,0xd5,0x7d
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xea,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xea,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xea,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xea,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xea,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xea,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xea,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xea,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xea,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xea,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xea,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xea,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xea,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xea,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xea,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_eq_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xea,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xea,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_le_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd6,0x7d]
+0x00,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd6,0x7d]
+0x02,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd6,0x7d]
+0x64,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd6,0x7d]
+0x66,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd6,0x7d]
+0x6a,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd6,0x7d]
+0x6c,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd6,0x7d]
+0x6e,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd6,0x7d]
+0x7a,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd6,0x7d]
+0x7e,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd6,0x7d]
+0x80,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd6,0x7d]
+0xc1,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd6,0x7d]
+0xf0,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd6,0x7d]
+0xf7,0x00,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd6,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd6,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_le_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd6,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd6,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_le_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd6,0x7d]
+0x00,0x01,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd6,0x7d]
+0xfe,0x01,0xd6,0x7d
+
+# CHECK: v_cmp_le_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd7,0x7d]
+0x00,0xfc,0xd7,0x7d
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xeb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xeb,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_le_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xeb,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xeb,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xd8,0x7d]
+0x00,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xd8,0x7d]
+0x02,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xd8,0x7d]
+0x64,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xd8,0x7d]
+0x66,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xd8,0x7d]
+0x6a,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xd8,0x7d]
+0x6c,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xd8,0x7d]
+0x6e,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xd8,0x7d]
+0x7a,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xd8,0x7d]
+0x7e,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xd8,0x7d]
+0x80,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xd8,0x7d]
+0xc1,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xd8,0x7d]
+0xf0,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xd8,0x7d]
+0xf7,0x00,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xd8,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xd8,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_gt_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xd8,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xd8,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_gt_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xd8,0x7d]
+0x00,0x01,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xd8,0x7d]
+0xfe,0x01,0xd8,0x7d
+
+# CHECK: v_cmp_gt_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xd9,0x7d]
+0x00,0xfc,0xd9,0x7d
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xec,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xec,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xec,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xec,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xec,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xec,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xec,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xec,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xec,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xec,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xec,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xec,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xec,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xec,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xec,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_gt_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xec,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xec,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_ne_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xda,0x7d]
+0x00,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xda,0x7d]
+0x02,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xda,0x7d]
+0x64,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xda,0x7d]
+0x66,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xda,0x7d]
+0x6a,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xda,0x7d]
+0x6c,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xda,0x7d]
+0x6e,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xda,0x7d]
+0x7a,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xda,0x7d]
+0x7e,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xda,0x7d]
+0x80,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xda,0x7d]
+0xc1,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xda,0x7d]
+0xf0,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xda,0x7d]
+0xf7,0x00,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xda,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xda,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ne_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xda,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xda,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ne_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xda,0x7d]
+0x00,0x01,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xda,0x7d]
+0xfe,0x01,0xda,0x7d
+
+# CHECK: v_cmp_ne_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xdb,0x7d]
+0x00,0xfc,0xdb,0x7d
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xed,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xed,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xed,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xed,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xed,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xed,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xed,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xed,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xed,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xed,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xed,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xed,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xed,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xed,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xed,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xed,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xed,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xed,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ne_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xed,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xed,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_ge_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xdc,0x7d]
+0x00,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xdc,0x7d]
+0x02,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xdc,0x7d]
+0x64,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xdc,0x7d]
+0x66,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xdc,0x7d]
+0x6a,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xdc,0x7d]
+0x6c,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xdc,0x7d]
+0x6e,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xdc,0x7d]
+0x7a,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xdc,0x7d]
+0x7e,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xdc,0x7d]
+0x80,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xdc,0x7d]
+0xc1,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xdc,0x7d]
+0xf0,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xdc,0x7d]
+0xf7,0x00,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xdc,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xdc,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_ge_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xdc,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xdc,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_ge_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xdc,0x7d]
+0x00,0x01,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xdc,0x7d]
+0xfe,0x01,0xdc,0x7d
+
+# CHECK: v_cmp_ge_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xdd,0x7d]
+0x00,0xfc,0xdd,0x7d
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xee,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xee,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xee,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xee,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xee,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xee,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xee,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xee,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xee,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xee,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xee,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xee,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xee,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xee,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xee,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_ge_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xee,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xee,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmp_t_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xde,0x7d]
+0x00,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xde,0x7d]
+0x02,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xde,0x7d]
+0x64,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xde,0x7d]
+0x66,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xde,0x7d]
+0x6a,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xde,0x7d]
+0x6c,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xde,0x7d]
+0x6e,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xde,0x7d]
+0x7a,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xde,0x7d]
+0x7e,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xde,0x7d]
+0x80,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xde,0x7d]
+0xc1,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xde,0x7d]
+0xf0,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xde,0x7d]
+0xf7,0x00,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xde,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xde,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmp_t_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xde,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xde,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmp_t_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xde,0x7d]
+0x00,0x01,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xde,0x7d]
+0xfe,0x01,0xde,0x7d
+
+# CHECK: v_cmp_t_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xdf,0x7d]
+0x00,0xfc,0xdf,0x7d
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xef,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xef,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xef,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xef,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xef,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xef,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xef,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xef,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xef,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xef,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xef,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xef,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xef,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xef,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xef,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmp_t_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xef,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xef,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_f_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe0,0x7d]
+0x00,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe0,0x7d]
+0x02,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe0,0x7d]
+0x64,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe0,0x7d]
+0x66,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe0,0x7d]
+0x6a,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe0,0x7d]
+0x6c,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe0,0x7d]
+0x6e,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe0,0x7d]
+0x7a,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe0,0x7d]
+0x7e,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe0,0x7d]
+0x80,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe0,0x7d]
+0xc1,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe0,0x7d]
+0xf0,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe0,0x7d]
+0xf7,0x00,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe0,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe0,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_f_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe0,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe0,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_f_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe0,0x7d]
+0x00,0x01,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe0,0x7d]
+0xfe,0x01,0xe0,0x7d
+
+# CHECK: v_cmpx_f_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe1,0x7d]
+0x00,0xfc,0xe1,0x7d
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf0,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf0,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf0,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf0,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe2,0x7d]
+0x00,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe2,0x7d]
+0x02,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe2,0x7d]
+0x64,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe2,0x7d]
+0x66,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe2,0x7d]
+0x6a,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe2,0x7d]
+0x6c,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe2,0x7d]
+0x6e,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe2,0x7d]
+0x7a,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe2,0x7d]
+0x7e,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe2,0x7d]
+0x80,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe2,0x7d]
+0xc1,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe2,0x7d]
+0xf0,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe2,0x7d]
+0xf7,0x00,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe2,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe2,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe2,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe2,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe2,0x7d]
+0x00,0x01,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe2,0x7d]
+0xfe,0x01,0xe2,0x7d
+
+# CHECK: v_cmpx_lt_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe3,0x7d]
+0x00,0xfc,0xe3,0x7d
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf1,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf1,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf1,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf1,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe4,0x7d]
+0x00,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe4,0x7d]
+0x02,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe4,0x7d]
+0x64,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe4,0x7d]
+0x66,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe4,0x7d]
+0x6a,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe4,0x7d]
+0x6c,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe4,0x7d]
+0x6e,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe4,0x7d]
+0x7a,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe4,0x7d]
+0x7e,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe4,0x7d]
+0x80,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe4,0x7d]
+0xc1,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe4,0x7d]
+0xf0,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe4,0x7d]
+0xf7,0x00,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe4,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe4,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe4,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe4,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe4,0x7d]
+0x00,0x01,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe4,0x7d]
+0xfe,0x01,0xe4,0x7d
+
+# CHECK: v_cmpx_eq_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe5,0x7d]
+0x00,0xfc,0xe5,0x7d
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf2,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf2,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf2,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf2,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_le_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe6,0x7d]
+0x00,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe6,0x7d]
+0x02,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe6,0x7d]
+0x64,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe6,0x7d]
+0x66,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe6,0x7d]
+0x6a,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe6,0x7d]
+0x6c,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe6,0x7d]
+0x6e,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe6,0x7d]
+0x7a,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe6,0x7d]
+0x7e,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe6,0x7d]
+0x80,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe6,0x7d]
+0xc1,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe6,0x7d]
+0xf0,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe6,0x7d]
+0xf7,0x00,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe6,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe6,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_le_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe6,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe6,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_le_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe6,0x7d]
+0x00,0x01,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe6,0x7d]
+0xfe,0x01,0xe6,0x7d
+
+# CHECK: v_cmpx_le_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe7,0x7d]
+0x00,0xfc,0xe7,0x7d
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf3,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf3,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf3,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf3,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xe8,0x7d]
+0x00,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xe8,0x7d]
+0x02,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xe8,0x7d]
+0x64,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xe8,0x7d]
+0x66,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xe8,0x7d]
+0x6a,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xe8,0x7d]
+0x6c,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xe8,0x7d]
+0x6e,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xe8,0x7d]
+0x7a,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xe8,0x7d]
+0x7e,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xe8,0x7d]
+0x80,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xe8,0x7d]
+0xc1,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xe8,0x7d]
+0xf0,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xe8,0x7d]
+0xf7,0x00,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xe8,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xe8,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xe8,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xe8,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xe8,0x7d]
+0x00,0x01,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xe8,0x7d]
+0xfe,0x01,0xe8,0x7d
+
+# CHECK: v_cmpx_gt_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xe9,0x7d]
+0x00,0xfc,0xe9,0x7d
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf4,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf4,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf4,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf4,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xea,0x7d]
+0x00,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xea,0x7d]
+0x02,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xea,0x7d]
+0x64,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xea,0x7d]
+0x66,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xea,0x7d]
+0x6a,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xea,0x7d]
+0x6c,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xea,0x7d]
+0x6e,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xea,0x7d]
+0x7a,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xea,0x7d]
+0x7e,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xea,0x7d]
+0x80,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xea,0x7d]
+0xc1,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xea,0x7d]
+0xf0,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xea,0x7d]
+0xf7,0x00,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xea,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xea,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xea,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xea,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xea,0x7d]
+0x00,0x01,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xea,0x7d]
+0xfe,0x01,0xea,0x7d
+
+# CHECK: v_cmpx_ne_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xeb,0x7d]
+0x00,0xfc,0xeb,0x7d
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf5,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf5,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ne_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf5,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf5,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xec,0x7d]
+0x00,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xec,0x7d]
+0x02,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xec,0x7d]
+0x64,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xec,0x7d]
+0x66,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xec,0x7d]
+0x6a,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xec,0x7d]
+0x6c,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xec,0x7d]
+0x6e,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xec,0x7d]
+0x7a,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xec,0x7d]
+0x7e,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xec,0x7d]
+0x80,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xec,0x7d]
+0xc1,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xec,0x7d]
+0xf0,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xec,0x7d]
+0xf7,0x00,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xec,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xec,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xec,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xec,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xec,0x7d]
+0x00,0x01,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xec,0x7d]
+0xfe,0x01,0xec,0x7d
+
+# CHECK: v_cmpx_ge_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xed,0x7d]
+0x00,0xfc,0xed,0x7d
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf6,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf6,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf6,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf6,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_t_i64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xee,0x7d]
+0x00,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xee,0x7d]
+0x02,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xee,0x7d]
+0x64,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xee,0x7d]
+0x66,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xee,0x7d]
+0x6a,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xee,0x7d]
+0x6c,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xee,0x7d]
+0x6e,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xee,0x7d]
+0x7a,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xee,0x7d]
+0x7e,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xee,0x7d]
+0x80,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xee,0x7d]
+0xc1,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xee,0x7d]
+0xf0,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xee,0x7d]
+0xf7,0x00,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xee,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xee,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_t_i64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xee,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xee,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_t_i64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xee,0x7d]
+0x00,0x01,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xee,0x7d]
+0xfe,0x01,0xee,0x7d
+
+# CHECK: v_cmpx_t_i64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xef,0x7d]
+0x00,0xfc,0xef,0x7d
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf7,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf7,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_t_i64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf7,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf7,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_f_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf0,0x7d]
+0x00,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf0,0x7d]
+0x02,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf0,0x7d]
+0x64,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf0,0x7d]
+0x66,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf0,0x7d]
+0x6a,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf0,0x7d]
+0x6c,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf0,0x7d]
+0x6e,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf0,0x7d]
+0x7a,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf0,0x7d]
+0x7e,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf0,0x7d]
+0x80,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf0,0x7d]
+0xc1,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf0,0x7d]
+0xf0,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf0,0x7d]
+0xf7,0x00,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf0,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf0,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_f_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf0,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf0,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_f_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf0,0x7d]
+0x00,0x01,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf0,0x7d]
+0xfe,0x01,0xf0,0x7d
+
+# CHECK: v_cmpx_f_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf1,0x7d]
+0x00,0xfc,0xf1,0x7d
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf8,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf8,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_f_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf8,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf8,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf2,0x7d]
+0x00,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf2,0x7d]
+0x02,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf2,0x7d]
+0x64,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf2,0x7d]
+0x66,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf2,0x7d]
+0x6a,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf2,0x7d]
+0x6c,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf2,0x7d]
+0x6e,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf2,0x7d]
+0x7a,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf2,0x7d]
+0x7e,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf2,0x7d]
+0x80,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf2,0x7d]
+0xc1,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf2,0x7d]
+0xf0,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf2,0x7d]
+0xf7,0x00,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf2,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf2,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf2,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf2,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf2,0x7d]
+0x00,0x01,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf2,0x7d]
+0xfe,0x01,0xf2,0x7d
+
+# CHECK: v_cmpx_lt_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf3,0x7d]
+0x00,0xfc,0xf3,0x7d
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xf9,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xf9,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_lt_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xf9,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xf9,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf4,0x7d]
+0x00,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf4,0x7d]
+0x02,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf4,0x7d]
+0x64,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf4,0x7d]
+0x66,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf4,0x7d]
+0x6a,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf4,0x7d]
+0x6c,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf4,0x7d]
+0x6e,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf4,0x7d]
+0x7a,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf4,0x7d]
+0x7e,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf4,0x7d]
+0x80,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf4,0x7d]
+0xc1,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf4,0x7d]
+0xf0,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf4,0x7d]
+0xf7,0x00,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf4,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf4,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf4,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf4,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf4,0x7d]
+0x00,0x01,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf4,0x7d]
+0xfe,0x01,0xf4,0x7d
+
+# CHECK: v_cmpx_eq_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf5,0x7d]
+0x00,0xfc,0xf5,0x7d
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xfa,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xfa,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_eq_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xfa,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xfa,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_le_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf6,0x7d]
+0x00,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf6,0x7d]
+0x02,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf6,0x7d]
+0x64,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf6,0x7d]
+0x66,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf6,0x7d]
+0x6a,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf6,0x7d]
+0x6c,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf6,0x7d]
+0x6e,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf6,0x7d]
+0x7a,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf6,0x7d]
+0x7e,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf6,0x7d]
+0x80,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf6,0x7d]
+0xc1,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf6,0x7d]
+0xf0,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf6,0x7d]
+0xf7,0x00,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf6,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf6,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_le_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf6,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf6,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_le_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf6,0x7d]
+0x00,0x01,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf6,0x7d]
+0xfe,0x01,0xf6,0x7d
+
+# CHECK: v_cmpx_le_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf7,0x7d]
+0x00,0xfc,0xf7,0x7d
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xfb,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xfb,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_le_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xfb,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xfb,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xf8,0x7d]
+0x00,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xf8,0x7d]
+0x02,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xf8,0x7d]
+0x64,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xf8,0x7d]
+0x66,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xf8,0x7d]
+0x6a,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xf8,0x7d]
+0x6c,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xf8,0x7d]
+0x6e,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xf8,0x7d]
+0x7a,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xf8,0x7d]
+0x7e,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xf8,0x7d]
+0x80,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xf8,0x7d]
+0xc1,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xf8,0x7d]
+0xf0,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xf8,0x7d]
+0xf7,0x00,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xf8,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xf8,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xf8,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xf8,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xf8,0x7d]
+0x00,0x01,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xf8,0x7d]
+0xfe,0x01,0xf8,0x7d
+
+# CHECK: v_cmpx_gt_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xf9,0x7d]
+0x00,0xfc,0xf9,0x7d
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xfc,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xfc,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_gt_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xfc,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xfc,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfa,0x7d]
+0x00,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xfa,0x7d]
+0x02,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xfa,0x7d]
+0x64,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xfa,0x7d]
+0x66,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xfa,0x7d]
+0x6a,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xfa,0x7d]
+0x6c,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xfa,0x7d]
+0x6e,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xfa,0x7d]
+0x7a,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xfa,0x7d]
+0x7e,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xfa,0x7d]
+0x80,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xfa,0x7d]
+0xc1,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xfa,0x7d]
+0xf0,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xfa,0x7d]
+0xf7,0x00,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xfa,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xfa,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xfa,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xfa,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xfa,0x7d]
+0x00,0x01,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xfa,0x7d]
+0xfe,0x01,0xfa,0x7d
+
+# CHECK: v_cmpx_ne_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xfb,0x7d]
+0x00,0xfc,0xfb,0x7d
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xfd,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xfd,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ne_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xfd,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xfd,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfc,0x7d]
+0x00,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xfc,0x7d]
+0x02,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xfc,0x7d]
+0x64,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xfc,0x7d]
+0x66,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xfc,0x7d]
+0x6a,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xfc,0x7d]
+0x6c,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xfc,0x7d]
+0x6e,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xfc,0x7d]
+0x7a,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xfc,0x7d]
+0x7e,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xfc,0x7d]
+0x80,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xfc,0x7d]
+0xc1,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xfc,0x7d]
+0xf0,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xfc,0x7d]
+0xf7,0x00,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xfc,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xfc,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xfc,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xfc,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xfc,0x7d]
+0x00,0x01,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xfc,0x7d]
+0xfe,0x01,0xfc,0x7d
+
+# CHECK: v_cmpx_ge_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xfd,0x7d]
+0x00,0xfc,0xfd,0x7d
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xfe,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xfe,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_ge_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xfe,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xfe,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_cmpx_t_u64_e32 vcc, s[0:1], v[0:1] ; encoding: [0x00,0x00,0xfe,0x7d]
+0x00,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, s[2:3], v[0:1] ; encoding: [0x02,0x00,0xfe,0x7d]
+0x02,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, s[100:101], v[0:1] ; encoding: [0x64,0x00,0xfe,0x7d]
+0x64,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, flat_scratch, v[0:1] ; encoding: [0x66,0x00,0xfe,0x7d]
+0x66,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, vcc, v[0:1] ; encoding: [0x6a,0x00,0xfe,0x7d]
+0x6a,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, tba, v[0:1] ; encoding: [0x6c,0x00,0xfe,0x7d]
+0x6c,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, tma, v[0:1] ; encoding: [0x6e,0x00,0xfe,0x7d]
+0x6e,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, ttmp[10:11], v[0:1] ; encoding: [0x7a,0x00,0xfe,0x7d]
+0x7a,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, exec, v[0:1] ; encoding: [0x7e,0x00,0xfe,0x7d]
+0x7e,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, 0, v[0:1] ; encoding: [0x80,0x00,0xfe,0x7d]
+0x80,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, -1, v[0:1] ; encoding: [0xc1,0x00,0xfe,0x7d]
+0xc1,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, 0.5, v[0:1] ; encoding: [0xf0,0x00,0xfe,0x7d]
+0xf0,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, -4.0, v[0:1] ; encoding: [0xf7,0x00,0xfe,0x7d]
+0xf7,0x00,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, 0xaf123456, v[0:1] ; encoding: [0xff,0x00,0xfe,0x7d,0x56,0x34,0x12,0xaf]
+0xff,0x00,0xfe,0x7d,0x56,0x34,0x12,0xaf
+
+# CHECK: v_cmpx_t_u64_e32 vcc, 0x3f717273, v[0:1] ; encoding: [0xff,0x00,0xfe,0x7d,0x73,0x72,0x71,0x3f]
+0xff,0x00,0xfe,0x7d,0x73,0x72,0x71,0x3f
+
+# CHECK: v_cmpx_t_u64_e32 vcc, v[0:1], v[0:1] ; encoding: [0x00,0x01,0xfe,0x7d]
+0x00,0x01,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, v[254:255], v[0:1] ; encoding: [0xfe,0x01,0xfe,0x7d]
+0xfe,0x01,0xfe,0x7d
+
+# CHECK: v_cmpx_t_u64_e32 vcc, s[0:1], v[254:255] ; encoding: [0x00,0xfc,0xff,0x7d]
+0x00,0xfc,0xff,0x7d
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x00,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[2:3], s[0:1], s[0:1] ; encoding: [0x02,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x02,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[100:101], s[0:1], s[0:1] ; encoding: [0x64,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x64,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 flat_scratch, s[0:1], s[0:1] ; encoding: [0x66,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x66,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 vcc, s[0:1], s[0:1] ; encoding: [0x6a,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x6a,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 tba, s[0:1], s[0:1] ; encoding: [0x6c,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x6c,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 tma, s[0:1], s[0:1] ; encoding: [0x6e,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x6e,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 ttmp[10:11], s[0:1], s[0:1] ; encoding: [0x7a,0x00,0xff,0xd0,0x00,0x00,0x00,0x00]
+0x7a,0x00,0xff,0xd0,0x00,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], 0, s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0x80,0x00,0x00,0x00]
+0x00,0x00,0xff,0xd0,0x80,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], -1, s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0xc1,0x00,0x00,0x00]
+0x00,0x00,0xff,0xd0,0xc1,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], 0.5, s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0xf0,0x00,0x00,0x00]
+0x00,0x00,0xff,0xd0,0xf0,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], -4.0, s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0xf7,0x00,0x00,0x00]
+0x00,0x00,0xff,0xd0,0xf7,0x00,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], v[0:1], s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0x00,0x01,0x00,0x00]
+0x00,0x00,0xff,0xd0,0x00,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], v[254:255], s[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0xfe,0x01,0x00,0x00]
+0x00,0x00,0xff,0xd0,0xfe,0x01,0x00,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], 0 ; encoding: [0x00,0x00,0xff,0xd0,0x00,0x00,0x01,0x00]
+0x00,0x00,0xff,0xd0,0x00,0x00,0x01,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], -1 ; encoding: [0x00,0x00,0xff,0xd0,0x00,0x82,0x01,0x00]
+0x00,0x00,0xff,0xd0,0x00,0x82,0x01,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], 0.5 ; encoding: [0x00,0x00,0xff,0xd0,0x00,0xe0,0x01,0x00]
+0x00,0x00,0xff,0xd0,0x00,0xe0,0x01,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], -4.0 ; encoding: [0x00,0x00,0xff,0xd0,0x00,0xee,0x01,0x00]
+0x00,0x00,0xff,0xd0,0x00,0xee,0x01,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], v[0:1] ; encoding: [0x00,0x00,0xff,0xd0,0x00,0x00,0x02,0x00]
+0x00,0x00,0xff,0xd0,0x00,0x00,0x02,0x00
+
+# CHECK: v_cmpx_t_u64_e64 s[0:1], s[0:1], v[254:255] ; encoding: [0x00,0x00,0xff,0xd0,0x00,0xfc,0x03,0x00]
+0x00,0x00,0xff,0xd0,0x00,0xfc,0x03,0x00
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x02,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_mov_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_mov_b32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x02,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x02,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mov_b32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x02,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mov_b32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x02,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x02,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x0a,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0a,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x0a,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x0a,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x0a,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x0a,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x0c,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_u32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0c,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x0c,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x0c,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x0c,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x0c,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x0e,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_u32_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x0e,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x0e,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x0e,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x0e,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_u32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_u32_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x0e,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x10,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_i32_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x10,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x10,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x10,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x10,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_i32_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x10,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x10,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x14,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_f16_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x14,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x14,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x14,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x14,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f16_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_f16_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x14,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x14,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x16,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x16,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x16,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x16,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x16,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x16,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x16,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x18,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x18,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x18,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x18,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x18,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_rpi_i32_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x18,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x18,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x1a,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_flr_i32_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1a,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x1a,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x1a,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x1a,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_flr_i32_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x1a,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x1c,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_off_f32_i4_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x1c,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x1c,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x1c,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x1c,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_off_f32_i4_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x1c,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x22,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_ubyte0_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x22,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x22,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x22,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x22,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_ubyte0_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x22,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x22,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x24,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_ubyte1_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x24,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x24,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x24,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x24,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_ubyte1_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x24,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x24,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x26,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_ubyte2_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x26,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x26,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x26,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x26,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_ubyte2_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x26,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x26,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x28,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f32_ubyte3_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x28,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x28,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x28,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x28,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f32_ubyte3_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x28,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x28,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x36,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_fract_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_fract_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_fract_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x36,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x36,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_fract_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x36,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_fract_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x36,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_fract_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_fract_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_fract_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x36,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x36,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x38,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_trunc_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x38,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x38,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_trunc_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x38,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x38,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_trunc_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_trunc_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_trunc_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x38,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x38,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x3a,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_ceil_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3a,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x3a,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ceil_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x3a,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x3a,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ceil_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ceil_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_ceil_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x3a,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x3c,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rndne_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3c,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x3c,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rndne_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x3c,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x3c,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rndne_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rndne_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rndne_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x3c,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x3e,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_floor_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_floor_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_floor_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x3e,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x3e,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_floor_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x3e,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_floor_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x3e,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_floor_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_floor_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_floor_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x3e,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x40,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_exp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_exp_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_exp_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x40,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x40,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_exp_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x40,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_exp_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x40,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_exp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_exp_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_exp_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x40,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x40,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x42,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_log_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_log_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_log_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x42,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x42,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_log_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x42,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_log_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x42,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_log_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_log_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_log_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x42,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x42,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x44,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rcp_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x44,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x44,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rcp_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x44,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x44,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rcp_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rcp_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rcp_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x44,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x44,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x46,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rcp_iflag_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x46,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x46,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x46,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x46,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rcp_iflag_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rcp_iflag_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x46,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x46,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x48,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rsq_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x48,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x48,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rsq_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x48,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x48,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rsq_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rsq_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rsq_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x48,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x48,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x4e,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_sqrt_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x4e,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x4e,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sqrt_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x4e,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x4e,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sqrt_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_sqrt_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x4e,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x52,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_sin_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_sin_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_sin_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x52,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x52,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sin_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x52,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sin_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x52,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sin_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sin_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_sin_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x52,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x52,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x54,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cos_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cos_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cos_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x54,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x54,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cos_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x54,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cos_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x54,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cos_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cos_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cos_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x54,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x54,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x56,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_not_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_not_b32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x56,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x56,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_not_b32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x56,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_not_b32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x56,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_not_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x56,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x56,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x58,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_bfrev_b32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x58,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x58,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_bfrev_b32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x58,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x58,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_bfrev_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x58,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x58,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x5a,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_ffbh_u32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5a,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x5a,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ffbh_u32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x5a,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x5a,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ffbh_u32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x5a,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x5c,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_ffbl_b32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5c,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x5c,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ffbl_b32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x5c,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x5c,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ffbl_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x5c,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x5e,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_ffbh_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x5e,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x5e,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ffbh_i32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x5e,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x5e,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ffbh_i32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x5e,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x66,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_frexp_exp_i32_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x66,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x66,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x66,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x66,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_frexp_exp_i32_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x66,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x66,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x68,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_frexp_mant_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x68,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x68,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x68,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x68,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_frexp_mant_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_frexp_mant_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x68,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x68,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x72,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f16_u16_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x72,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x72,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x72,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x72,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f16_u16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x72,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x72,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x74,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_f16_i16_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x74,0x00,0x7e,0x00,0x06,0x0e,0x06]
+0xf9,0x74,0x00,0x7e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x74,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x74,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_f16_i16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x74,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x74,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x76,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_u16_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x76,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x76,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x76,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x76,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_u16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_u16_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x76,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x76,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x78,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cvt_i16_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x78,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x78,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x78,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x78,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cvt_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cvt_i16_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x78,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x78,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x7a,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rcp_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7a,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x7a,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rcp_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x7a,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x7a,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rcp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rcp_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rcp_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x7a,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x7c,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_sqrt_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7c,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x7c,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sqrt_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x7c,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x7c,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sqrt_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_sqrt_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x7c,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x7e,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rsq_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x7e,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x7e,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rsq_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x7e,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x7e,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rsq_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rsq_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rsq_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x7e,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x80,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_log_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_log_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_log_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x80,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x80,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_log_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x80,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_log_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x80,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_log_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_log_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_log_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x80,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x80,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x82,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_exp_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_exp_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x82,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x82,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_exp_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x82,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_exp_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x82,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_exp_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_exp_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_exp_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x82,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x82,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x84,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_frexp_mant_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x84,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x84,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x84,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x84,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_frexp_mant_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_frexp_mant_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x84,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x84,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x86,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_frexp_exp_i16_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x86,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x86,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x86,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x86,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_frexp_exp_i16_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x86,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x86,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x88,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_floor_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_floor_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_floor_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x88,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x88,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_floor_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x88,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_floor_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x88,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_floor_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_floor_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_floor_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x88,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x88,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x8a,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_ceil_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8a,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x8a,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ceil_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x8a,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x8a,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ceil_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ceil_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_ceil_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x8a,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x8c,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_trunc_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8c,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x8c,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_trunc_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x8c,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x8c,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_trunc_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_trunc_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_trunc_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x8c,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x8e,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_rndne_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x8e,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x8e,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rndne_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x8e,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x8e,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_rndne_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_rndne_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_rndne_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x8e,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x90,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_fract_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_fract_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_fract_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x90,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x90,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_fract_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x90,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_fract_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x90,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_fract_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_fract_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_fract_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x90,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x90,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x92,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_sin_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_sin_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_sin_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x92,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x92,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sin_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x92,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sin_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x92,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sin_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sin_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_sin_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x92,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x92,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x94,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_cos_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_cos_f16_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_cos_f16_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x94,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x94,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cos_f16_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x94,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_cos_f16_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x94,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_cos_f16_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_cos_f16_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_cos_f16_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x94,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x94,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x96,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_exp_legacy_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x96,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x96,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x96,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x96,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_exp_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_exp_legacy_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x96,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x96,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0xfe,0x7f,0x00,0x06,0x06,0x06]
+0xf9,0x98,0xfe,0x7f,0x00,0x06,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0xff,0x06,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0xff,0x06,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x26,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x26,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x00,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x00,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x01,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x01,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x02,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x02,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x03,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x03,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x04,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x04,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x05,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x05,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x0e,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x16,0x06,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x16,0x06,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x00,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x00,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x01,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x01,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x02,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x02,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x03,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x03,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x04,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x04,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x05,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x05,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x16,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x16,0x06
+
+# CHECK: v_log_legacy_f32_sdwa v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD ; encoding: [0xf9,0x98,0x00,0x7e,0x00,0x06,0x26,0x06]
+0xf9,0x98,0x00,0x7e,0x00,0x06,0x26,0x06
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_log_legacy_f32_dpp v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0xfe,0x7f,0x00,0xe4,0x00,0x00]
+0xfa,0x98,0xfe,0x7f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0xff,0xe4,0x00,0x00]
+0xfa,0x98,0x00,0x7e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x1b,0x00,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x40,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x40,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x41,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x41,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x42,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x42,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x43,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x43,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x30,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x30,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x34,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x34,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x38,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x38,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x3c,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x01,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x01,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x0f,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x11,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x11,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x1f,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x21,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x21,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0x2f,0x01,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x10]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x30]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0xf0]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x01]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x03]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x0f]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_log_legacy_f32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x08,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x10,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_log_legacy_f32_dpp v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x98,0x00,0x7e,0x00,0xe4,0x20,0x00]
+0xfa,0x98,0x00,0x7e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x03,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x03,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x02,0xff,0x06,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x02,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x02,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x26,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x00,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x01,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x02,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x03,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x04,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x05,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x0e,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x16,0x06,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x00,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x01,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x02,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x03,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x04,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x05,0x06
+
+# CHECK: v_add_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x16,0x06
+
+# CHECK: v_add_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x26,0x06
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x00
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x01
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x02
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x03
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x04
+
+# CHECK: v_add_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x05
+
+# CHECK: v_add_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x16
+
+# CHECK: v_add_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x02,0x00,0x06,0x06,0x26
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x03,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x03,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x02,0xff,0xe4,0x00,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x02,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x02,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x1b,0x00,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x40,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x41,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x42,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x43,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x30,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x34,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x38,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x3c,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x01,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x0f,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x11,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x1f,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x21,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x02,0x00,0x2f,0x01,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x10
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x30
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x01
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x03
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_add_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x08,0x00
+
+# CHECK: v_add_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x10,0x00
+
+# CHECK: v_add_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x20,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x40,0x00
+
+# CHECK: v_add_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x02,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x02,0x00,0xe4,0x80,0x00
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x05,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x05,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x04,0xff,0x06,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x04,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x04,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x26,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x00,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x01,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x02,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x03,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x04,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x05,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x16,0x06,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x00,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x01,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x02,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x03,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x04,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x05,0x06
+
+# CHECK: v_sub_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x16,0x06
+
+# CHECK: v_sub_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x26,0x06
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x00
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x01
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x02
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x03
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x04
+
+# CHECK: v_sub_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x05
+
+# CHECK: v_sub_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x16
+
+# CHECK: v_sub_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x04,0x00,0x06,0x06,0x26
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x05,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x05,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x04,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x04,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x04,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x40,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x41,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x42,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x43,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x30,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x34,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x38,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x01,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x11,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x21,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x04,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sub_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sub_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x10,0x00
+
+# CHECK: v_sub_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x20,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x40,0x00
+
+# CHECK: v_sub_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x04,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x04,0x00,0xe4,0x80,0x00
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x07,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x07,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x06,0xff,0x06,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x06,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x06,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x26,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x00,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x01,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x02,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x03,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x04,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x05,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x0e,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x16,0x06,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x00,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x01,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x02,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x03,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x04,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x05,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x16,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x26,0x06
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x00
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x01
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x02
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x03
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x04
+
+# CHECK: v_subrev_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x05
+
+# CHECK: v_subrev_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x16
+
+# CHECK: v_subrev_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x06,0x00,0x06,0x06,0x26
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x07,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x07,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x06,0xff,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x06,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x06,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x1b,0x00,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x40,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x41,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x42,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x43,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x30,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x34,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x38,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x3c,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x01,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x0f,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x11,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x1f,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x21,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x06,0x00,0x2f,0x01,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x10
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x30
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x01
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x03
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_subrev_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x08,0x00
+
+# CHECK: v_subrev_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x10,0x00
+
+# CHECK: v_subrev_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x20,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x40,0x00
+
+# CHECK: v_subrev_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x06,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x06,0x00,0xe4,0x80,0x00
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x09,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x09,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x08,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x08,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x08,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x16,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x26,0x06
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x16
+
+# CHECK: v_mul_legacy_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x08,0x00,0x06,0x06,0x26
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x09,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x09,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x08,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x08,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x08,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x08,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x10,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x20,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x40,0x00
+
+# CHECK: v_mul_legacy_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x08,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x08,0x00,0xe4,0x80,0x00
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x0b,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x0b,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x0a,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x0a,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x16,0x06
+
+# CHECK: v_mul_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x26,0x06
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x16
+
+# CHECK: v_mul_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x0a,0x00,0x06,0x06,0x26
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x0b,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x0b,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x0a,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x0a,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x0a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x10,0x00
+
+# CHECK: v_mul_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x20,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x40,0x00
+
+# CHECK: v_mul_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0a,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x0a,0x00,0xe4,0x80,0x00
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x0d,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x0d,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x0c,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x0c,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x0e,0x06
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_i32_i24_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x0c,0x00,0x06,0x06,0x0e
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_i32_i24_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x0d,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x0d,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x0c,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x0c,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x0c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x0c,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x0c,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x0f,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x0f,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x0e,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x0e,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_hi_i32_i24_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x0e,0x00,0x06,0x06,0x0e
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x0f,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x0f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x0e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x0e,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x0e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_hi_i32_i24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x0e,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x0e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x11,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x11,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x10,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x10,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x10,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x0e,0x06
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_u32_u24_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x10,0x00,0x06,0x06,0x0e
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_u32_u24_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x11,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x11,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x10,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x10,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x10,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x10,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x10,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x10,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x13,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x13,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x12,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x12,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x12,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x0e,0x06
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_hi_u32_u24_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x12,0x00,0x06,0x06,0x0e
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x13,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x13,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x12,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x12,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x12,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x12,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_hi_u32_u24_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x12,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x12,0x00,0xe4,0x08,0x00
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x15,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x15,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x14,0xff,0x06,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x14,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x14,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x26,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x00,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x01,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x02,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x03,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x04,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x05,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x0e,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x16,0x06,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x00,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x01,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x02,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x03,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x04,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x05,0x06
+
+# CHECK: v_min_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x16,0x06
+
+# CHECK: v_min_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x26,0x06
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x00
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x01
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x02
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x03
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x04
+
+# CHECK: v_min_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x05
+
+# CHECK: v_min_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x16
+
+# CHECK: v_min_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x14,0x00,0x06,0x06,0x26
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x15,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x15,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x14,0xff,0xe4,0x00,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x14,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x14,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x1b,0x00,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x40,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x41,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x42,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x43,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x30,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x34,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x38,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x3c,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x01,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x0f,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x11,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x1f,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x21,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x14,0x00,0x2f,0x01,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x10
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x30
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x01
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x03
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_min_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x08,0x00
+
+# CHECK: v_min_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x10,0x00
+
+# CHECK: v_min_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x20,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x40,0x00
+
+# CHECK: v_min_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x14,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x14,0x00,0xe4,0x80,0x00
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x17,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x17,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x16,0xff,0x06,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x16,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x16,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x26,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x00,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x01,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x02,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x03,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x04,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x05,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x0e,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x16,0x06,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x00,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x01,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x02,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x03,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x04,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x05,0x06
+
+# CHECK: v_max_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x16,0x06
+
+# CHECK: v_max_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x26,0x06
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x00
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x01
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x02
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x03
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x04
+
+# CHECK: v_max_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x05
+
+# CHECK: v_max_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x16
+
+# CHECK: v_max_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x16,0x00,0x06,0x06,0x26
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x17,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x17,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x16,0xff,0xe4,0x00,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x16,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x16,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x1b,0x00,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x40,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x41,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x42,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x43,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x30,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x34,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x38,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x3c,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x01,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x0f,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x11,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x1f,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x21,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x16,0x00,0x2f,0x01,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x10
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x30
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x01
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x03
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_max_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x08,0x00
+
+# CHECK: v_max_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x10,0x00
+
+# CHECK: v_max_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x20,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x40,0x00
+
+# CHECK: v_max_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x16,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x16,0x00,0xe4,0x80,0x00
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x19,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x19,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x18,0xff,0x06,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x18,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x18,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x26,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x00,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x01,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x02,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x03,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x04,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x05,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x0e,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x16,0x06,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x00,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x01,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x02,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x03,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x04,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x05,0x06
+
+# CHECK: v_min_i32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x0e,0x06
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x00
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x01
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x02
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x03
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x04
+
+# CHECK: v_min_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x05
+
+# CHECK: v_min_i32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x18,0x00,0x06,0x06,0x0e
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_i32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x19,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x19,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_i32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x18,0xff,0xe4,0x00,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x18,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x18,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x1b,0x00,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x40,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x41,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x42,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x43,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x30,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x34,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x38,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x3c,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x01,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x0f,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x11,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x1f,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x21,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x18,0x00,0x2f,0x01,0x00
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x10
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x30
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x01
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x03
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_min_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x18,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x18,0x00,0xe4,0x08,0x00
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x1b,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x1b,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0xff,0x06,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x1a,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x1a,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x26,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x00,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x01,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x02,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x03,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x04,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x05,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x0e,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x16,0x06,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x00,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x01,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x02,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x03,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x04,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x05,0x06
+
+# CHECK: v_max_i32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x0e,0x06
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x00
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x01
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x02
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x03
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x04
+
+# CHECK: v_max_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x05
+
+# CHECK: v_max_i32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x1a,0x00,0x06,0x06,0x0e
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_i32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x1b,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x1b,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_i32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x1a,0xff,0xe4,0x00,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x1a,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x1a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x1b,0x00,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x40,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x41,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x42,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x43,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x30,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x34,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x38,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x3c,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x01,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x0f,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x11,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x1f,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x21,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0x2f,0x01,0x00
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x10
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x30
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x01
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x03
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_max_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x1a,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x1a,0x00,0xe4,0x08,0x00
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x1d,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x1d,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0xff,0x06,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x1c,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x1c,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x26,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x00,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x01,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x02,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x03,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x04,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x05,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x0e,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x16,0x06,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x00,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x01,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x02,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x03,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x04,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x05,0x06
+
+# CHECK: v_min_u32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x0e,0x06
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x00
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x01
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x02
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x03
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x04
+
+# CHECK: v_min_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x05
+
+# CHECK: v_min_u32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x1c,0x00,0x06,0x06,0x0e
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_u32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x1d,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x1d,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_u32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x1c,0xff,0xe4,0x00,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x1c,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x1c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x1b,0x00,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x40,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x41,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x42,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x43,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x30,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x34,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x38,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x3c,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x01,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x0f,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x11,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x1f,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x21,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0x2f,0x01,0x00
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x10
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x30
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x01
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x03
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_min_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x1c,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x1c,0x00,0xe4,0x08,0x00
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x1f,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x1f,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0xff,0x06,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x1e,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x1e,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x26,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x00,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x01,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x02,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x03,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x04,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x05,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x16,0x06,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x00,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x01,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x02,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x03,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x04,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x05,0x06
+
+# CHECK: v_max_u32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x00
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x01
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x02
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x03
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x04
+
+# CHECK: v_max_u32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x05
+
+# CHECK: v_max_u32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x1e,0x00,0x06,0x06,0x0e
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_u32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x1f,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x1f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_u32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x1e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x1e,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x1e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x40,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x41,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x42,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x43,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x30,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x34,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x38,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x01,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x11,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x21,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_max_u32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x1e,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x1e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x21,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x21,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x20,0xff,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x20,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x20,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x26,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x00,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x01,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x02,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x03,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x04,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x05,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x0e,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x16,0x06,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x00,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x01,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x02,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x03,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x04,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x05,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x0e,0x06
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x00
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x01
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x02
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x03
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x04
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x05
+
+# CHECK: v_lshrrev_b32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x20,0x00,0x06,0x06,0x0e
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x21,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x21,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x20,0xff,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x20,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x20,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x1b,0x00,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x40,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x41,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x42,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x43,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x30,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x34,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x38,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x3c,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x01,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x0f,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x11,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x1f,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x21,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x20,0x00,0x2f,0x01,0x00
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x10
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x30
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x01
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x03
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_lshrrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x20,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x20,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x23,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x23,0x00,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x22,0xff,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x22,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x22,0x00,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x26,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x00,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x01,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x02,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x03,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x04,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x05,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x16,0x06,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x00,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x01,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x02,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x03,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x04,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x05,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x0e,0x06
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x00
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x01
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x02
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x03
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x04
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x05
+
+# CHECK: v_ashrrev_i32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x22,0x00,0x06,0x06,0x0e
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x23,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x23,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x22,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x22,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x22,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x40,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x41,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x42,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x43,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x30,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x34,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x38,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x01,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x11,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x21,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x22,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ashrrev_i32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x22,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x22,0x00,0xe4,0x08,0x00
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x25,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x25,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x24,0xff,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x24,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x24,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x26,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x00,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x01,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x02,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x03,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x04,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x05,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x0e,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x16,0x06,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x00,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x01,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x02,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x03,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x04,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x05,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x0e,0x06
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x00
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x01
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x02
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x03
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x04
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x05
+
+# CHECK: v_lshlrev_b32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x24,0x00,0x06,0x06,0x0e
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x25,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x25,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x24,0xff,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x24,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x24,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x1b,0x00,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x40,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x41,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x42,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x43,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x30,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x34,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x38,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x3c,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x01,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x0f,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x11,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x1f,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x21,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x24,0x00,0x2f,0x01,0x00
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x10
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x30
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x01
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x03
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_lshlrev_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x24,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x24,0x00,0xe4,0x08,0x00
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x27,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x27,0x00,0x06,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x26,0xff,0x06,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x26,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x26,0x00,0x06,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x00,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x01,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x02,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x03,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x04,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x05,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x0e,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x16,0x06,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x00,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x01,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x02,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x03,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x04,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x05,0x06
+
+# CHECK: v_and_b32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x0e,0x06
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x00
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x01
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x02
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x03
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x04
+
+# CHECK: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x05
+
+# CHECK: v_and_b32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x26,0x00,0x06,0x06,0x0e
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x00
+
+# CHECK: v_and_b32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x27,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x27,0x00,0xe4,0x00,0x00
+
+# CHECK: v_and_b32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x26,0xff,0xe4,0x00,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x26,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x26,0x00,0xe4,0x00,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x1b,0x00,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x40,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x41,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x42,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x43,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x30,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x34,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x38,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x3c,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x01,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x0f,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x11,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x1f,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x21,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x26,0x00,0x2f,0x01,0x00
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x10
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x30
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x01
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x03
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_and_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x26,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x26,0x00,0xe4,0x08,0x00
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x29,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x29,0x00,0x06,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x28,0xff,0x06,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x28,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x28,0x00,0x06,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x00,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x01,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x02,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x03,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x04,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x05,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x0e,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x16,0x06,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x00,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x01,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x02,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x03,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x04,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x05,0x06
+
+# CHECK: v_or_b32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x0e,0x06
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x00
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x01
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x02
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x03
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x04
+
+# CHECK: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x05
+
+# CHECK: v_or_b32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x28,0x00,0x06,0x06,0x0e
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x00
+
+# CHECK: v_or_b32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x29,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x29,0x00,0xe4,0x00,0x00
+
+# CHECK: v_or_b32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x28,0xff,0xe4,0x00,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x28,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x28,0x00,0xe4,0x00,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x1b,0x00,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x40,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x41,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x42,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x43,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x30,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x34,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x38,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x3c,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x01,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x0f,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x11,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x1f,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x21,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x28,0x00,0x2f,0x01,0x00
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x10
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x30
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x01
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x03
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_or_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x28,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x28,0x00,0xe4,0x08,0x00
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x2b,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x2b,0x00,0x06,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0xff,0x06,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x2a,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x2a,0x00,0x06,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x00,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x01,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x02,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x03,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x04,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x05,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x0e,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x16,0x06,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x00,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x01,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x02,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x03,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x04,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x05,0x06
+
+# CHECK: v_xor_b32_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x0e,0x06
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x00
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x01
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x02
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x03
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x04
+
+# CHECK: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x05
+
+# CHECK: v_xor_b32_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x2a,0x00,0x06,0x06,0x0e
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_xor_b32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x2b,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x2b,0x00,0xe4,0x00,0x00
+
+# CHECK: v_xor_b32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x2a,0xff,0xe4,0x00,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x2a,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x2a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x1b,0x00,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x40,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x41,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x42,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x43,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x30,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x34,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x38,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x3c,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x01,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x0f,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x11,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x1f,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x21,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0x2f,0x01,0x00
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x10
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x30
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x01
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x03
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_xor_b32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x2a,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x2a,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x2d,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x2d,0x00,0x06,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x2c,0xff,0x06,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x2c,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x2c,0x00,0x06,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x26,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x16,0x06,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x00,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x01,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x02,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x03,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x04,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x05,0x06
+
+# CHECK: v_mac_f32_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x16,0x06
+
+# CHECK: v_mac_f32_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x26,0x06
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x00
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x01
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x02
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x03
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x04
+
+# CHECK: v_mac_f32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x05
+
+# CHECK: v_mac_f32_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x16
+
+# CHECK: v_mac_f32_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x2c,0x00,0x06,0x06,0x26
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mac_f32_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x2d,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x2d,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mac_f32_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x2c,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x2c,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x2c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x40,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x41,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x42,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x43,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x30,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x34,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x38,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x01,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x11,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x21,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mac_f32_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mac_f32_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x10,0x00
+
+# CHECK: v_mac_f32_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x20,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x40,0x00
+
+# CHECK: v_mac_f32_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x2c,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x2c,0x00,0xe4,0x80,0x00
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x3f,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x3f,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0xff,0x06,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x3e,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x3e,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x26,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x00,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x01,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x02,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x03,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x04,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x05,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x16,0x06,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x00,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x01,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x02,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x03,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x04,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x05,0x06
+
+# CHECK: v_add_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x16,0x06
+
+# CHECK: v_add_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x26,0x06
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x00
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x01
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x02
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x03
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x04
+
+# CHECK: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x05
+
+# CHECK: v_add_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x16
+
+# CHECK: v_add_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x3e,0x00,0x06,0x06,0x26
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x3f,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x3f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x3e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x3e,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x3e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x40,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x41,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x42,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x43,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x30,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x34,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x38,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x01,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x11,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x21,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_add_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_add_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x10,0x00
+
+# CHECK: v_add_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x20,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x40,0x00
+
+# CHECK: v_add_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x3e,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x3e,0x00,0xe4,0x80,0x00
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x41,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x41,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x40,0xff,0x06,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x40,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x40,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x26,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x00,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x01,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x02,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x03,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x04,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x05,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x16,0x06,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x00,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x01,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x02,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x03,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x04,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x05,0x06
+
+# CHECK: v_sub_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x16,0x06
+
+# CHECK: v_sub_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x26,0x06
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x00
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x01
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x02
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x03
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x04
+
+# CHECK: v_sub_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x05
+
+# CHECK: v_sub_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x16
+
+# CHECK: v_sub_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x40,0x00,0x06,0x06,0x26
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x41,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x41,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x40,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x40,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x40,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x40,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x41,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x42,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x43,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x30,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x34,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x38,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x01,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x11,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x21,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x40,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sub_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sub_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x10,0x00
+
+# CHECK: v_sub_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x20,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x40,0x00
+
+# CHECK: v_sub_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x40,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x40,0x00,0xe4,0x80,0x00
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x43,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x43,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x42,0xff,0x06,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x42,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x42,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x26,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x00,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x01,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x02,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x03,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x04,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x05,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x0e,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x16,0x06,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x00,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x01,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x02,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x03,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x04,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x05,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x16,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x26,0x06
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x00
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x01
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x02
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x03
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x04
+
+# CHECK: v_subrev_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x05
+
+# CHECK: v_subrev_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x16
+
+# CHECK: v_subrev_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x42,0x00,0x06,0x06,0x26
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x43,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x43,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x42,0xff,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x42,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x42,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x1b,0x00,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x40,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x41,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x42,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x43,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x30,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x34,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x38,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x3c,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x01,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x0f,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x11,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x1f,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x21,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x42,0x00,0x2f,0x01,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x10
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x30
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x01
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x03
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_subrev_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x08,0x00
+
+# CHECK: v_subrev_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x10,0x00
+
+# CHECK: v_subrev_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x20,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x40,0x00
+
+# CHECK: v_subrev_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x42,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x42,0x00,0xe4,0x80,0x00
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x45,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x45,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x44,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x44,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x44,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x16,0x06
+
+# CHECK: v_mul_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x26,0x06
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x16
+
+# CHECK: v_mul_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x44,0x00,0x06,0x06,0x26
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x45,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x45,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x44,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x44,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x44,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x44,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x10,0x00
+
+# CHECK: v_mul_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x20,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x40,0x00
+
+# CHECK: v_mul_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x44,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x44,0x00,0xe4,0x80,0x00
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x47,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x47,0x00,0x06,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x46,0xff,0x06,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x46,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x46,0x00,0x06,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x26,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x16,0x06,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x00,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x01,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x02,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x03,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x04,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x05,0x06
+
+# CHECK: v_mac_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x16,0x06
+
+# CHECK: v_mac_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x26,0x06
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x00
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x01
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x02
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x03
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x04
+
+# CHECK: v_mac_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x05
+
+# CHECK: v_mac_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x16
+
+# CHECK: v_mac_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x46,0x00,0x06,0x06,0x26
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mac_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x47,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x47,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mac_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x46,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x46,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x46,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x40,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x41,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x42,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x43,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x30,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x34,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x38,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x01,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x11,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x21,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x46,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mac_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mac_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x10,0x00
+
+# CHECK: v_mac_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x20,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x40,0x00
+
+# CHECK: v_mac_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x46,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x46,0x00,0xe4,0x80,0x00
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x4d,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x4d,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0xff,0x06,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x4c,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x4c,0x00,0x06,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x26,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x00,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x01,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x02,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x03,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x04,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x05,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x0e,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x16,0x06,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x00,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x01,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x02,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x03,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x04,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x05,0x06
+
+# CHECK: v_add_u16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x0e,0x06
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x00
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x01
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x02
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x03
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x04
+
+# CHECK: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x05
+
+# CHECK: v_add_u16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x4c,0x00,0x06,0x06,0x0e
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_u16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x4d,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x4d,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_u16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x4c,0xff,0xe4,0x00,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x4c,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x4c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x1b,0x00,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x40,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x41,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x42,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x43,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x30,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x34,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x38,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x3c,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x01,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x0f,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x11,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x1f,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x21,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0x2f,0x01,0x00
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x10
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x30
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x01
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x03
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_add_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x4c,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x4c,0x00,0xe4,0x08,0x00
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x4f,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x4f,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0xff,0x06,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x4e,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x4e,0x00,0x06,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x26,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x00,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x01,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x02,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x03,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x04,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x05,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x16,0x06,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x00,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x01,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x02,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x03,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x04,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x05,0x06
+
+# CHECK: v_sub_u16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x00
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x01
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x02
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x03
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x04
+
+# CHECK: v_sub_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x05
+
+# CHECK: v_sub_u16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x4e,0x00,0x06,0x06,0x0e
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_u16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x4f,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x4f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_u16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x4e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x4e,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x4e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x40,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x41,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x42,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x43,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x30,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x34,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x38,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x01,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x11,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x21,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_sub_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x4e,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x4e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x51,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x51,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x50,0xff,0x06,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x50,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x50,0x00,0x06,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x26,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x00,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x01,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x02,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x03,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x04,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x05,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x0e,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x16,0x06,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x00,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x01,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x02,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x03,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x04,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x05,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x0e,0x06
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x00
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x01
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x02
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x03
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x04
+
+# CHECK: v_subrev_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x05
+
+# CHECK: v_subrev_u16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x50,0x00,0x06,0x06,0x0e
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_u16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x51,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x51,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x50,0xff,0xe4,0x00,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x50,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x50,0x00,0xe4,0x00,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x1b,0x00,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x40,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x41,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x42,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x43,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x30,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x34,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x38,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x3c,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x01,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x0f,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x11,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x1f,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x21,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x50,0x00,0x2f,0x01,0x00
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x10
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x30
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x01
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x03
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_subrev_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x50,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x50,0x00,0xe4,0x08,0x00
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x53,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x53,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x52,0xff,0x06,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x52,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x52,0x00,0x06,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x26,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x00,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x01,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x02,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x03,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x04,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x05,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x0e,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x16,0x06,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x00,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x01,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x02,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x03,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x04,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x05,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x0e,0x06
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x00
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x01
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x02
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x03
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x04
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x05
+
+# CHECK: v_mul_lo_u16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x52,0x00,0x06,0x06,0x0e
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_lo_u16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x53,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x53,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x52,0xff,0xe4,0x00,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x52,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x52,0x00,0xe4,0x00,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x1b,0x00,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x40,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x41,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x42,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x43,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x30,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x34,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x38,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x3c,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x01,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x0f,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x11,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x1f,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x21,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x52,0x00,0x2f,0x01,0x00
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x10
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x30
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x01
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x03
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_mul_lo_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x52,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x52,0x00,0xe4,0x08,0x00
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x55,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x55,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x54,0xff,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x54,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x54,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x26,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x00,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x01,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x02,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x03,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x04,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x05,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x0e,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x16,0x06,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x00,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x01,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x02,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x03,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x04,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x05,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x0e,0x06
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x00
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x01
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x02
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x03
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x04
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x05
+
+# CHECK: v_lshlrev_b16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x54,0x00,0x06,0x06,0x0e
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x55,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x55,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x54,0xff,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x54,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x54,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x1b,0x00,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x40,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x41,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x42,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x43,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x30,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x34,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x38,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x3c,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x01,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x0f,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x11,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x1f,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x21,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x54,0x00,0x2f,0x01,0x00
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x10
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x30
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x01
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x03
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_lshlrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x54,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x54,0x00,0xe4,0x08,0x00
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x57,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x57,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x56,0xff,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x56,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x56,0x00,0x06,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x26,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x00,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x01,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x02,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x03,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x04,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x05,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x0e,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x16,0x06,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x00,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x01,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x02,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x03,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x04,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x05,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x0e,0x06
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x00
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x01
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x02
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x03
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x04
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x05
+
+# CHECK: v_lshrrev_b16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x56,0x00,0x06,0x06,0x0e
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x57,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x57,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x56,0xff,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x56,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x56,0x00,0xe4,0x00,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x1b,0x00,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x40,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x41,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x42,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x43,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x30,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x34,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x38,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x3c,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x01,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x0f,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x11,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x1f,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x21,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x56,0x00,0x2f,0x01,0x00
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x10
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x30
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x01
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x03
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_lshrrev_b16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x56,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x56,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x59,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x59,0x00,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x58,0xff,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x58,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x58,0x00,0x06,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x26,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x00,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x01,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x02,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x03,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x04,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x05,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x16,0x06,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x00,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x01,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x02,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x03,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x04,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x05,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x0e,0x06
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x00
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x01
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x02
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x03
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x04
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x05
+
+# CHECK: v_ashrrev_i16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x58,0x00,0x06,0x06,0x0e
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x59,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x59,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x58,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x58,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x58,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x40,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x41,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x42,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x43,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x30,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x34,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x38,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x01,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x11,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x21,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x58,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ashrrev_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x58,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x58,0x00,0xe4,0x08,0x00
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x5b,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x5b,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0xff,0x06,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x5a,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x5a,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x26,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x00,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x01,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x02,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x03,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x04,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x05,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x0e,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x16,0x06,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x00,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x01,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x02,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x03,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x04,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x05,0x06
+
+# CHECK: v_max_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x16,0x06
+
+# CHECK: v_max_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x26,0x06
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x00
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x01
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x02
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x03
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x04
+
+# CHECK: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x05
+
+# CHECK: v_max_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x16
+
+# CHECK: v_max_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x5a,0x00,0x06,0x06,0x26
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x5b,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x5b,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x5a,0xff,0xe4,0x00,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x5a,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x5a,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x1b,0x00,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x40,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x41,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x42,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x43,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x30,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x34,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x38,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x3c,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x01,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x0f,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x11,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x1f,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x21,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0x2f,0x01,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x10
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x30
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x01
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x03
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_max_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x08,0x00
+
+# CHECK: v_max_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x10,0x00
+
+# CHECK: v_max_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x20,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x40,0x00
+
+# CHECK: v_max_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5a,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x5a,0x00,0xe4,0x80,0x00
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x5d,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x5d,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0xff,0x06,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x5c,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x5c,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x26,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x00,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x01,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x02,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x03,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x04,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x05,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x0e,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x16,0x06,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x00,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x01,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x02,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x03,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x04,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x05,0x06
+
+# CHECK: v_min_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x16,0x06
+
+# CHECK: v_min_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x26,0x06
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x00
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x01
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x02
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x03
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x04
+
+# CHECK: v_min_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x05
+
+# CHECK: v_min_f16_sdwa v0, v0, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x16]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x16
+
+# CHECK: v_min_f16_sdwa v0, v0, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x26]
+0xf9,0x00,0x00,0x5c,0x00,0x06,0x06,0x26
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x5d,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x5d,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x5c,0xff,0xe4,0x00,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x5c,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x5c,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x1b,0x00,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x40,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x41,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x42,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x43,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x30,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x34,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x38,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x3c,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x01,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x0f,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x11,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x1f,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x21,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0x2f,0x01,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x10
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x30
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x01
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x03
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_min_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x08,0x00
+
+# CHECK: v_min_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x10,0x00
+
+# CHECK: v_min_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x20,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, -v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x40,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x40,0x00
+
+# CHECK: v_min_f16_dpp v0, v0, |v0| quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5c,0x00,0xe4,0x80,0x00]
+0xfa,0x00,0x00,0x5c,0x00,0xe4,0x80,0x00
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x5f,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x5f,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0xff,0x06,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x5e,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x5e,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x26,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x00,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x01,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x02,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x03,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x04,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x05,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x0e,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x16,0x06,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x00,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x01,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x02,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x03,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x04,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x05,0x06
+
+# CHECK: v_max_u16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x0e,0x06
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x00
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x01
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x02
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x03
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x04
+
+# CHECK: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x05
+
+# CHECK: v_max_u16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x5e,0x00,0x06,0x06,0x0e
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_u16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x5f,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x5f,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_u16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x5e,0xff,0xe4,0x00,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x5e,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x5e,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x1b,0x00,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x40,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x41,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x42,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x43,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x30,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x34,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x38,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x3c,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x01,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x0f,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x11,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x1f,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x21,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0x2f,0x01,0x00
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x10
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x30
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x01
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x03
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_max_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x5e,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x5e,0x00,0xe4,0x08,0x00
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x61,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x61,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x60,0xff,0x06,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x60,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x60,0x00,0x06,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x26,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x00,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x01,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x02,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x03,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x04,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x05,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x0e,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x16,0x06,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x00,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x01,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x02,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x03,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x04,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x05,0x06
+
+# CHECK: v_max_i16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x0e,0x06
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x00
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x01
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x02
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x03
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x04
+
+# CHECK: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x05
+
+# CHECK: v_max_i16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x60,0x00,0x06,0x06,0x0e
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_i16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x61,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x61,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_i16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x60,0xff,0xe4,0x00,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x60,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x60,0x00,0xe4,0x00,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x1b,0x00,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x40,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x41,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x42,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x43,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x30,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x34,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x38,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x3c,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x01,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x0f,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x11,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x1f,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x21,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x60,0x00,0x2f,0x01,0x00
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x10
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x30
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x01
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x03
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_max_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x60,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x60,0x00,0xe4,0x08,0x00
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x63,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x63,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x62,0xff,0x06,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x62,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x62,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x26,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x00,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x01,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x02,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x03,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x04,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x05,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x0e,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x16,0x06,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x00,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x01,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x02,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x03,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x04,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x05,0x06
+
+# CHECK: v_min_u16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x0e,0x06
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x00
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x01
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x02
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x03
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x04
+
+# CHECK: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x05
+
+# CHECK: v_min_u16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x62,0x00,0x06,0x06,0x0e
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_u16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x63,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x63,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_u16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x62,0xff,0xe4,0x00,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x62,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x62,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x1b,0x00,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x40,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x41,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x42,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x43,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x30,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x34,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x38,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x3c,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x01,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x0f,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x11,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x1f,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x21,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x62,0x00,0x2f,0x01,0x00
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x10
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x30
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x01
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x03
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_min_u16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x62,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x62,0x00,0xe4,0x08,0x00
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x65,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x65,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x64,0xff,0x06,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x64,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x64,0x00,0x06,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x26,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x00,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x01,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x02,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x03,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x04,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x05,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x0e,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x16,0x06,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x00,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x01,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x02,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x03,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x04,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x05,0x06
+
+# CHECK: v_min_i16_sdwa v0, sext(v0), v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x0e,0x06]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x0e,0x06
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x00
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x01
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x02
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x03
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x04
+
+# CHECK: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x05
+
+# CHECK: v_min_i16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x64,0x00,0x06,0x06,0x0e
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_i16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x65,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x65,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_i16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x64,0xff,0xe4,0x00,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x64,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x64,0x00,0xe4,0x00,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x1b,0x00,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x40,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x41,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x42,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x43,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x30,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x34,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x38,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x3c,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x01,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x0f,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x11,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x1f,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x21,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x64,0x00,0x2f,0x01,0x00
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x10
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x30
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x01
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x03
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_min_i16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x64,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x64,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v255, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xfe,0x67,0x00,0x06,0x06,0x06]
+0xf9,0x00,0xfe,0x67,0x00,0x06,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v255, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0xff,0x06,0x06,0x06]
+0xf9,0x00,0x00,0x66,0xff,0x06,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v255 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x01,0x66,0x00,0x06,0x06,0x06]
+0xf9,0xfe,0x01,0x66,0x00,0x06,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 clamp dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x26,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x26,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:BYTE_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x00,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x00,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x01,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x01,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:BYTE_2 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x02,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x02,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x03,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x03,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:WORD_0 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x04,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x04,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x05,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x05,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_SEXT src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x0e,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x0e,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x16,0x06,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x00,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x00,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x01,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x01,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x02,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x02,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x03,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x03,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x04,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x04,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x05,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x05,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, -v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x16,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x16,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, |v0|, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x26,0x06]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x26,0x06
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x00]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x00
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x01]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x01
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x02]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x02
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x03]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x03
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x04]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x04
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x05]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x05
+
+# CHECK: v_ldexp_f16_sdwa v0, v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x0e]
+0xf9,0x00,0x00,0x66,0x00,0x06,0x06,0x0e
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ldexp_f16_dpp v255, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0xfe,0x67,0x00,0xe4,0x00,0x00]
+0xfa,0x00,0xfe,0x67,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v255, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0xff,0xe4,0x00,0x00]
+0xfa,0x00,0x00,0x66,0xff,0xe4,0x00,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v255 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0xfe,0x01,0x66,0x00,0xe4,0x00,0x00]
+0xfa,0xfe,0x01,0x66,0x00,0xe4,0x00,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[3,2,1,0] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x1b,0x00,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x1b,0x00,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x40,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x40,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_half_mirror row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x41,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x41,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_bcast:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x42,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x42,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_bcast:31 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x43,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x43,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 wave_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x30,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x30,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 wave_rol:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x34,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x34,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 wave_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x38,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x38,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 wave_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x3c,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x3c,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_shl:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x01,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x01,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_shl:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x0f,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x0f,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_shr:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x11,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x11,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_shr:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x1f,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x1f,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_ror:1 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x21,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x21,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 row_ror:15 row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0x2f,0x01,0x00]
+0xfa,0x00,0x00,0x66,0x00,0x2f,0x01,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x1 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x10]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x10
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x3 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x30]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x30
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0xf0]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0xf0
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x01]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x01
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x3 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x03]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x03
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0xf ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x0f]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x00,0x0f
+
+# CHECK: v_ldexp_f16_dpp v0, v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 bound_ctrl:0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x08,0x00]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x08,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, -v0, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x10,0x00]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x10,0x00
+
+# CHECK: v_ldexp_f16_dpp v0, |v0|, v0 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x00,0x00,0x66,0x00,0xe4,0x20,0x00]
+0xfa,0x00,0x00,0x66,0x00,0xe4,0x20,0x00
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x40,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x41,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x41,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_f_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_f_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_f_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_f_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x40,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x42,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x43,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x43,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_lt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x42,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x44,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x45,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x45,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_eq_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_eq_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x44,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x46,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x47,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x47,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_le_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_le_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_le_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_le_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x46,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x48,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x49,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x49,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_gt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_gt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x48,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x4a,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x4b,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x4b,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lg_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_lg_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x4a,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x4c,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x4d,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x4d,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ge_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_ge_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x4c,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x4e,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x4f,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x4f,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_o_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_o_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_o_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_o_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x4e,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x50,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x51,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x51,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_u_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_u_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_u_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_u_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x50,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x52,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x53,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x53,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nge_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nge_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x52,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x54,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x55,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x55,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nlg_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x54,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x56,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x57,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x57,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_ngt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x56,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x58,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x59,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x59,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nle_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nle_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x58,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x5a,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x5b,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x5b,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_neq_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_neq_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x5a,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x5c,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x5d,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x5d,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nlt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x5c,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x5e,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x5f,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x5f,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_tru_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_tru_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x5e,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x60,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x61,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x61,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_f_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_f_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_f_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x60,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x62,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x63,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x63,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_lt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x62,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x64,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x65,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x65,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_eq_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x64,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x66,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x67,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x67,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_le_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_le_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_le_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x66,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x68,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x69,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x69,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_gt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x68,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x6a,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x6b,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x6b,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_lg_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x6a,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x6c,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x6d,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x6d,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_ge_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x6c,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x6e,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x6f,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x6f,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_o_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_o_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_o_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x6e,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x70,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x71,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x71,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_u_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_u_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_u_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x70,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x72,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x73,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x73,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nge_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x72,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x74,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x75,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x75,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nlg_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x74,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x76,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x77,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x77,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_ngt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x76,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x78,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x79,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x79,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nle_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x78,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x7a,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x7b,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x7b,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_neq_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x7a,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x7c,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x7d,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x7d,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nlt_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x7c,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x7e,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x7f,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x7f,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_tru_f16 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x7e,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x80,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x81,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x81,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_f_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_f_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_f_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_f_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x80,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x82,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x83,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x83,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_lt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x82,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x84,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x85,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x85,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_eq_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_eq_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x84,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x86,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x87,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x87,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_le_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_le_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_le_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_le_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x86,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x88,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x89,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x89,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_gt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_gt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x88,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x8a,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x8b,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x8b,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lg_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_lg_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x8a,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x8c,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x8d,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x8d,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ge_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_ge_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x8c,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x8e,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x8f,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x8f,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_o_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_o_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_o_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_o_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x8e,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x90,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x91,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x91,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_u_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_u_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_u_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_u_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x90,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x92,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x93,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x93,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nge_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nge_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x92,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x94,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x95,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x95,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nlg_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x94,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x96,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x97,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x97,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_ngt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x96,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x98,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x99,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x99,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nle_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nle_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x98,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x9a,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x9b,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x9b,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_neq_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_neq_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x9a,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x9c,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x9d,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x9d,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_nlt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x9c,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x9e,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x9f,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x9f,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_tru_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmp_tru_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0x9e,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa0,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa1,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa1,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_f_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_f_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_f_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xa0,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa2,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa3,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa3,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_lt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xa2,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa4,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa5,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa5,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_eq_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xa4,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa6,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa7,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa7,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_le_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_le_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_le_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xa6,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa8,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa9,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa9,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_gt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xa8,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xaa,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xab,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xab,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_lg_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xaa,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xac,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xad,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xad,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_ge_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xac,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xae,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xaf,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xaf,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_o_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_o_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_o_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xae,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb0,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb1,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb1,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_u_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_u_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_u_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xb0,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb2,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb3,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb3,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nge_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xb2,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb4,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb5,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb5,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nlg_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xb4,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb6,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb7,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb7,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_ngt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xb6,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb8,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb9,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb9,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nle_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xb8,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xba,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xbb,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xbb,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_neq_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xba,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xbc,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xbd,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xbd,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_nlt_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xbc,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xbe,0x7c,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xbf,0x7c,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xbf,0x7c,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, -v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x16,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x16,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, |v0|, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x26,0x06]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x26,0x06
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, -v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x16]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x16
+
+# CHECK: v_cmpx_tru_f32 vcc, v0, |v0| src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x26]
+0xf9,0x00,0xbe,0x7c,0x00,0x16,0x06,0x26
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x40,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x41,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x41,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_f_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_f_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x40,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x42,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x43,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x43,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lt_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x42,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x44,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x45,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x45,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_eq_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x44,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x46,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x47,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x47,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_le_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_le_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x46,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x48,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x49,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x49,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_gt_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x48,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x4a,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x4b,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x4b,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ne_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x4a,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x4c,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x4d,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x4d,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ge_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x4c,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x4e,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x4f,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x4f,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_t_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_t_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x4e,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x50,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x51,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x51,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_f_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_f_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x50,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x52,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x53,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x53,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lt_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x52,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x54,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x55,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x55,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_eq_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x54,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x56,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x57,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x57,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_le_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_le_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x56,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x58,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x59,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x59,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_gt_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x58,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x5a,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x5b,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x5b,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ne_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x5a,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x5c,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x5d,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x5d,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ge_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x5c,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x5e,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x5f,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x5f,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_t_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_t_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x5e,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x60,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x61,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x61,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_f_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_f_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x60,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x62,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x63,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x63,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lt_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x62,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x64,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x65,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x65,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_eq_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x64,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x66,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x67,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x67,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_le_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_le_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x66,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x68,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x69,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x69,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_gt_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x68,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x6a,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x6b,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x6b,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ne_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x6a,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x6c,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x6d,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x6d,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ge_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x6c,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x6e,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x6f,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x6f,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_t_i16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_t_i16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x6e,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x70,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x71,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x71,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_f_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_f_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x70,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x72,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x73,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x73,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lt_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x72,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x74,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x75,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x75,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_eq_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x74,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x76,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x77,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x77,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_le_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_le_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x76,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x78,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x79,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x79,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_gt_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x78,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x7a,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x7b,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x7b,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ne_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x7a,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x7c,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x7d,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x7d,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ge_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x7c,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x7e,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x7f,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x7f,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_t_u16 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_t_u16 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x7e,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x80,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x81,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x81,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_f_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_f_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x80,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x82,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x83,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x83,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lt_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x82,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x84,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x85,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x85,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_eq_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x84,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x86,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x87,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x87,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_le_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_le_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x86,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x88,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x89,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x89,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_gt_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x88,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x8a,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x8b,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x8b,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ne_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x8a,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x8c,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x8d,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x8d,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ge_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x8c,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x8e,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x8f,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x8f,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_t_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_t_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x8e,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x90,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x91,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x91,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_f_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_f_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x90,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x92,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x93,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x93,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_lt_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x92,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x94,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x95,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x95,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_eq_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x94,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x96,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x97,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x97,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_le_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_le_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x96,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x98,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x99,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x99,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_gt_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x98,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x9a,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x9b,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x9b,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ne_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x9a,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x9c,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x9d,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x9d,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_ge_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x9c,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0x9e,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0x9f,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0x9f,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmp_t_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmp_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmp_t_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0x9e,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa0,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa1,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa1,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_f_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_f_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xa0,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa2,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa3,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa3,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lt_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xa2,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa4,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa5,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa5,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_eq_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xa4,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa6,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa7,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa7,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_le_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_le_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xa6,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xa8,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xa9,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xa9,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_gt_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xa8,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xaa,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xab,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xab,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ne_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xaa,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xac,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xad,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xad,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ge_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xac,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xae,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xaf,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xaf,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_t_i32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_t_i32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xae,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb0,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb1,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb1,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_f_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_f_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xb0,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb2,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb3,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb3,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_lt_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xb2,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb4,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb5,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb5,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_eq_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xb4,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb6,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb7,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb7,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_le_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_le_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xb6,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xb8,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xb9,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xb9,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_gt_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xb8,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xba,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xbb,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xbb,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ne_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xba,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xbc,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xbd,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xbd,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_ge_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xbc,0x7d,0x00,0x16,0x06,0x0e
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v255, v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0xff,0x16,0x06,0x06]
+0xf9,0x00,0xbe,0x7d,0xff,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v255 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfe,0xbf,0x7d,0x00,0x16,0x06,0x06]
+0xf9,0xfe,0xbf,0x7d,0x00,0x16,0x06,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 clamp src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x36,0x06,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x36,0x06,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x00,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x00,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:BYTE_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x01,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x01,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:BYTE_2 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x02,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x02,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:BYTE_3 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x03,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x03,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:WORD_0 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x04,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x04,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x05,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x05,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, sext(v0), v0 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x0e,0x06]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x0e,0x06
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_0 ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x00]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x00
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_1 ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x01]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x01
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_2 ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x02]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x02
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:BYTE_3 ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x03]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x03
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_0 ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x04]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x04
+
+# CHECK: v_cmpx_t_u32 vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1 ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x05]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x05
+
+# CHECK: v_cmpx_t_u32 vcc, v0, sext(v0) src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x0e]
+0xf9,0x00,0xbe,0x7d,0x00,0x16,0x06,0x0e
diff --git a/test/MC/Disassembler/AMDGPU/mac.txt b/test/MC/Disassembler/AMDGPU/mac.txt
new file mode 100644
index 000000000000..7f7f952655a4
--- /dev/null
+++ b/test/MC/Disassembler/AMDGPU/mac.txt
@@ -0,0 +1,19 @@
+# RUN: llvm-mc -arch=amdgcn -mcpu=tonga -disassemble -show-encoding < %s | FileCheck %s -check-prefix=VI
+
+# VI: v_mac_f32_e64 v0, v1, v2 mul:2 ; encoding: [0x00,0x00,0x16,0xd1,0x01,0x05,0x02,0x08]
+0x00 0x00 0x16 0xd1 0x01 0x05 0x02 0x08
+
+# VI: v_mac_f32_e64 v0, v1, v2 clamp ; encoding: [0x00,0x80,0x16,0xd1,0x01,0x05,0x02,0x00]
+0x00 0x80 0x16 0xd1 0x01 0x05 0x02 0x00
+
+# VI: v_mac_f32_e64 v0, v1, v2 clamp mul:2 ; encoding: [0x00,0x80,0x16,0xd1,0x01,0x05,0x02,0x08]
+0x00 0x80 0x16 0xd1 0x01 0x05 0x02 0x08
+
+# VI: v_mac_f16_e64 v0, v1, v2 mul:2 ; encoding: [0x00,0x00,0x23,0xd1,0x01,0x05,0x02,0x08]
+0x00 0x00 0x23 0xd1 0x01 0x05 0x02 0x08
+
+# VI: v_mac_f16_e64 v0, v1, v2 clamp ; encoding: [0x00,0x80,0x23,0xd1,0x01,0x05,0x02,0x00]
+0x00 0x80 0x23 0xd1 0x01 0x05 0x02 0x00
+
+# VI: v_mac_f16_e64 v0, v1, v2 clamp mul:2 ; encoding: [0x00,0x80,0x23,0xd1,0x01,0x05,0x02,0x08]
+0x00 0x80 0x23 0xd1 0x01 0x05 0x02 0x08
diff --git a/test/MC/Disassembler/AMDGPU/si-support.txt b/test/MC/Disassembler/AMDGPU/si-support.txt
new file mode 100644
index 000000000000..f3f5ab946eb3
--- /dev/null
+++ b/test/MC/Disassembler/AMDGPU/si-support.txt
@@ -0,0 +1,4 @@
+# RUN: not llvm-mc -arch=amdgcn -mcpu=tahiti -disassemble < %s 2>&1 | FileCheck %s
+
+# CHECK: LLVM ERROR: Disassembly not yet supported for subtarget
+0x00 0x00 0x00 0x7e
diff --git a/test/MC/Disassembler/AMDGPU/sop1_vi.txt b/test/MC/Disassembler/AMDGPU/sop1_vi.txt
index 49c030594e57..749783d3bf89 100644
--- a/test/MC/Disassembler/AMDGPU/sop1_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/sop1_vi.txt
@@ -165,7 +165,7 @@
# VI: s_movreld_b64 s[2:3], s[4:5] ; encoding: [0x04,0x2d,0x82,0xbe]
0x04 0x2d 0x82 0xbe
-# VI: s_cbranch_join s[4:5] ; encoding: [0x04,0x2e,0x80,0xbe]
+# VI: s_cbranch_join s4 ; encoding: [0x04,0x2e,0x80,0xbe]
0x04 0x2e 0x80 0xbe
# VI: s_abs_i32 s1, s2 ; encoding: [0x02,0x30,0x81,0xbe]
diff --git a/test/MC/Disassembler/AMDGPU/vop1_gfx9.txt b/test/MC/Disassembler/AMDGPU/vop1_gfx9.txt
new file mode 100644
index 000000000000..370ba632ebca
--- /dev/null
+++ b/test/MC/Disassembler/AMDGPU/vop1_gfx9.txt
@@ -0,0 +1,4 @@
+# RUN: llvm-mc -arch=amdgcn -mcpu=gfx901 -disassemble -show-encoding < %s | FileCheck %s -check-prefix=GFX9
+
+# GFX9: v_swap_b32 v1, v2 ; encoding: [0x02,0xa3,0x02,0x7e]
+0x02 0xa3 0x02 0x7e
diff --git a/test/MC/Disassembler/AMDGPU/vop3_vi.txt b/test/MC/Disassembler/AMDGPU/vop3_vi.txt
index d28a231edf2c..c15fbaa1e3a8 100644
--- a/test/MC/Disassembler/AMDGPU/vop3_vi.txt
+++ b/test/MC/Disassembler/AMDGPU/vop3_vi.txt
@@ -215,3 +215,9 @@
# VI: v_mad_f32 v9, 0.5, v5, -v8 ; encoding: [0x09,0x00,0xc1,0xd1,0xf0,0x0a,0x22,0x84]
0x09 0x00 0xc1 0xd1 0xf0 0x0a 0x22 0x84
+
+# VI: v_ceil_f32_e64 v0, neg(-1) ; encoding: [0x00,0x00,0x5d,0xd1,0xc1,0x00,0x00,0x20]
+0x00,0x00,0x5d,0xd1,0xc1,0x00,0x00,0x20
+
+# VI: v_ceil_f32_e64 v0, neg(-1.0) ; encoding: [0x00,0x00,0x5d,0xd1,0xf3,0x00,0x00,0x20]
+0x00,0x00,0x5d,0xd1,0xf3,0x00,0x00,0x20
diff --git a/test/MC/Disassembler/Hexagon/alu32_alu.txt b/test/MC/Disassembler/Hexagon/alu32_alu.txt
index 26b320ecde00..e75a9982abd1 100644
--- a/test/MC/Disassembler/Hexagon/alu32_alu.txt
+++ b/test/MC/Disassembler/Hexagon/alu32_alu.txt
@@ -3,27 +3,27 @@
# Add
0xf1 0xc3 0x15 0xb0
-# CHECK: r17 = add(r21, #31)
+# CHECK: r17 = add(r21,#31)
0x11 0xdf 0x15 0xf3
-# CHECK: r17 = add(r21, r31)
+# CHECK: r17 = add(r21,r31)
0x11 0xdf 0x55 0xf6
-# CHECK: r17 = add(r21, r31):sat
+# CHECK: r17 = add(r21,r31):sat
# And
0xf1 0xc3 0x15 0x76
-# CHECK: r17 = and(r21, #31)
+# CHECK: r17 = and(r21,#31)
0xf1 0xc3 0x95 0x76
-# CHECK: r17 = or(r21, #31)
+# CHECK: r17 = or(r21,#31)
0x11 0xdf 0x15 0xf1
-# CHECK: r17 = and(r21, r31)
+# CHECK: r17 = and(r21,r31)
0x11 0xdf 0x35 0xf1
-# CHECK: r17 = or(r21, r31)
+# CHECK: r17 = or(r21,r31)
0x11 0xdf 0x75 0xf1
-# CHECK: r17 = xor(r21, r31)
+# CHECK: r17 = xor(r21,r31)
0x11 0xd5 0x9f 0xf1
-# CHECK: r17 = and(r21, ~r31)
+# CHECK: r17 = and(r21,~r31)
0x11 0xd5 0xbf 0xf1
-# CHECK: r17 = or(r21, ~r31)
+# CHECK: r17 = or(r21,~r31)
# Nop
0x00 0xc0 0x00 0x7f
@@ -31,11 +31,11 @@
# Subtract
0xb1 0xc2 0x5f 0x76
-# CHECK: r17 = sub(#21, r31)
+# CHECK: r17 = sub(#21,r31)
0x11 0xdf 0x35 0xf3
-# CHECK: r17 = sub(r31, r21)
+# CHECK: r17 = sub(r31,r21)
0x11 0xdf 0xd5 0xf6
-# CHECK: r17 = sub(r31, r21):sat
+# CHECK: r17 = sub(r31,r21):sat
# Sign extend
0x11 0xc0 0xbf 0x70
@@ -57,27 +57,27 @@
# Vector add halfwords
0x11 0xdf 0x15 0xf6
-# CHECK: r17 = vaddh(r21, r31)
+# CHECK: r17 = vaddh(r21,r31)
0x11 0xdf 0x35 0xf6
-# CHECK: r17 = vaddh(r21, r31):sat
+# CHECK: r17 = vaddh(r21,r31):sat
0x11 0xdf 0x75 0xf6
-# CHECK: r17 = vadduh(r21, r31):sat
+# CHECK: r17 = vadduh(r21,r31):sat
# Vector average halfwords
0x11 0xdf 0x15 0xf7
-# CHECK: r17 = vavgh(r21, r31)
+# CHECK: r17 = vavgh(r21,r31)
0x11 0xdf 0x35 0xf7
-# CHECK: r17 = vavgh(r21, r31):rnd
+# CHECK: r17 = vavgh(r21,r31):rnd
0x11 0xdf 0x75 0xf7
-# CHECK: r17 = vnavgh(r31, r21)
+# CHECK: r17 = vnavgh(r31,r21)
# Vector subtract halfwords
0x11 0xdf 0x95 0xf6
-# CHECK: r17 = vsubh(r31, r21)
+# CHECK: r17 = vsubh(r31,r21)
0x11 0xdf 0xb5 0xf6
-# CHECK: r17 = vsubh(r31, r21):sat
+# CHECK: r17 = vsubh(r31,r21):sat
0x11 0xdf 0xf5 0xf6
-# CHECK: r17 = vsubuh(r31, r21):sat
+# CHECK: r17 = vsubuh(r31,r21):sat
# Zero extend
0x11 0xc0 0xd5 0x70
diff --git a/test/MC/Disassembler/Hexagon/alu32_perm.txt b/test/MC/Disassembler/Hexagon/alu32_perm.txt
index a2953506c599..c4b1ab97963e 100644
--- a/test/MC/Disassembler/Hexagon/alu32_perm.txt
+++ b/test/MC/Disassembler/Hexagon/alu32_perm.txt
@@ -3,31 +3,31 @@
# Combine words in to doublewords
0x11 0xdf 0x95 0xf3
-# CHECK: r17 = combine(r31.h, r21.h)
+# CHECK: r17 = combine(r31.h,r21.h)
0x11 0xdf 0xb5 0xf3
-# CHECK: r17 = combine(r31.h, r21.l)
+# CHECK: r17 = combine(r31.h,r21.l)
0x11 0xdf 0xd5 0xf3
-# CHECK: r17 = combine(r31.l, r21.h)
+# CHECK: r17 = combine(r31.l,r21.h)
0x11 0xdf 0xf5 0xf3
-# CHECK: r17 = combine(r31.l, r21.l)
+# CHECK: r17 = combine(r31.l,r21.l)
0xb0 0xe2 0x0f 0x7c
-# CHECK: r17:16 = combine(#21, #31)
+# CHECK: r17:16 = combine(#21,#31)
0xb0 0xe2 0x3f 0x73
-# CHECK: r17:16 = combine(#21, r31)
+# CHECK: r17:16 = combine(#21,r31)
0xf0 0xe3 0x15 0x73
-# CHECK: r17:16 = combine(r21, #31)
+# CHECK: r17:16 = combine(r21,#31)
0x10 0xdf 0x15 0xf5
-# CHECK: r17:16 = combine(r21, r31)
+# CHECK: r17:16 = combine(r21,r31)
# Mux
0xf1 0xc3 0x75 0x73
-# CHECK: r17 = mux(p3, r21, #31)
+# CHECK: r17 = mux(p3,r21,#31)
0xb1 0xc2 0xff 0x73
-# CHECK: r17 = mux(p3, #21, r31)
+# CHECK: r17 = mux(p3,#21,r31)
0xb1 0xe2 0x8f 0x7b
-# CHECK: r17 = mux(p3, #21, #31)
+# CHECK: r17 = mux(p3,#21,#31)
0x71 0xdf 0x15 0xf4
-# CHECK: r17 = mux(p3, r21, r31)
+# CHECK: r17 = mux(p3,r21,r31)
# Shift word by 16
0x11 0xc0 0x15 0x70
@@ -37,4 +37,4 @@
# Pack high and low halfwords
0x10 0xdf 0x95 0xf5
-# CHECK: r17:16 = packhl(r21, r31)
+# CHECK: r17:16 = packhl(r21,r31)
diff --git a/test/MC/Disassembler/Hexagon/alu32_pred.txt b/test/MC/Disassembler/Hexagon/alu32_pred.txt
index 084b39d8cbf5..b9e111364e67 100644
--- a/test/MC/Disassembler/Hexagon/alu32_pred.txt
+++ b/test/MC/Disassembler/Hexagon/alu32_pred.txt
@@ -3,25 +3,25 @@
# Conditional add
0xf1 0xc3 0x75 0x74
-# CHECK: if (p3) r17 = add(r21, #31)
+# CHECK: if (p3) r17 = add(r21,#31)
0x03 0x40 0x45 0x85 0xf1 0xe3 0x75 0x74
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = add(r21, #31)
+# CHECK-NEXT: if (p3.new) r17 = add(r21,#31)
0xf1 0xc3 0xf5 0x74
-# CHECK: if (!p3) r17 = add(r21, #31)
+# CHECK: if (!p3) r17 = add(r21,#31)
0x03 0x40 0x45 0x85 0xf1 0xe3 0xf5 0x74
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = add(r21, #31)
+# CHECK-NEXT: if (!p3.new) r17 = add(r21,#31)
0x71 0xdf 0x15 0xfb
-# CHECK: if (p3) r17 = add(r21, r31)
+# CHECK: if (p3) r17 = add(r21,r31)
0x03 0x40 0x45 0x85 0x71 0xff 0x15 0xfb
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = add(r21, r31)
+# CHECK-NEXT: if (p3.new) r17 = add(r21,r31)
0xf1 0xdf 0x15 0xfb
-# CHECK: if (!p3) r17 = add(r21, r31)
+# CHECK: if (!p3) r17 = add(r21,r31)
0x03 0x40 0x45 0x85 0xf1 0xff 0x15 0xfb
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = add(r21, r31)
+# CHECK-NEXT: if (!p3.new) r17 = add(r21,r31)
# Conditional shift halfword
0x11 0xe3 0x15 0x70
@@ -47,59 +47,59 @@
# Conditional combine
0x70 0xdf 0x15 0xfd
-# CHECK: if (p3) r17:16 = combine(r21, r31)
+# CHECK: if (p3) r17:16 = combine(r21,r31)
0xf0 0xdf 0x15 0xfd
-# CHECK: if (!p3) r17:16 = combine(r21, r31)
+# CHECK: if (!p3) r17:16 = combine(r21,r31)
0x03 0x40 0x45 0x85 0x70 0xff 0x15 0xfd
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17:16 = combine(r21, r31)
+# CHECK-NEXT: if (p3.new) r17:16 = combine(r21,r31)
0x03 0x40 0x45 0x85 0xf0 0xff 0x15 0xfd
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17:16 = combine(r21, r31)
+# CHECK-NEXT: if (!p3.new) r17:16 = combine(r21,r31)
# Conditional logical operations
0x71 0xdf 0x15 0xf9
-# CHECK: if (p3) r17 = and(r21, r31)
+# CHECK: if (p3) r17 = and(r21,r31)
0xf1 0xdf 0x15 0xf9
-# CHECK: if (!p3) r17 = and(r21, r31)
+# CHECK: if (!p3) r17 = and(r21,r31)
0x03 0x40 0x45 0x85 0x71 0xff 0x15 0xf9
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = and(r21, r31)
+# CHECK-NEXT: if (p3.new) r17 = and(r21,r31)
0x03 0x40 0x45 0x85 0xf1 0xff 0x15 0xf9
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = and(r21, r31)
+# CHECK-NEXT: if (!p3.new) r17 = and(r21,r31)
0x71 0xdf 0x35 0xf9
-# CHECK: if (p3) r17 = or(r21, r31)
+# CHECK: if (p3) r17 = or(r21,r31)
0xf1 0xdf 0x35 0xf9
-# CHECK: if (!p3) r17 = or(r21, r31)
+# CHECK: if (!p3) r17 = or(r21,r31)
0x03 0x40 0x45 0x85 0x71 0xff 0x35 0xf9
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = or(r21, r31)
+# CHECK-NEXT: if (p3.new) r17 = or(r21,r31)
0x03 0x40 0x45 0x85 0xf1 0xff 0x35 0xf9
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = or(r21, r31)
+# CHECK-NEXT: if (!p3.new) r17 = or(r21,r31)
0x71 0xdf 0x75 0xf9
-# CHECK: if (p3) r17 = xor(r21, r31)
+# CHECK: if (p3) r17 = xor(r21,r31)
0xf1 0xdf 0x75 0xf9
-# CHECK: if (!p3) r17 = xor(r21, r31)
+# CHECK: if (!p3) r17 = xor(r21,r31)
0x03 0x40 0x45 0x85 0x71 0xff 0x75 0xf9
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = xor(r21, r31)
+# CHECK-NEXT: if (p3.new) r17 = xor(r21,r31)
0x03 0x40 0x45 0x85 0xf1 0xff 0x75 0xf9
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = xor(r21, r31)
+# CHECK-NEXT: if (!p3.new) r17 = xor(r21,r31)
# Conditional subtract
0x71 0xdf 0x35 0xfb
-# CHECK: if (p3) r17 = sub(r31, r21)
+# CHECK: if (p3) r17 = sub(r31,r21)
0xf1 0xdf 0x35 0xfb
-# CHECK: if (!p3) r17 = sub(r31, r21)
+# CHECK: if (!p3) r17 = sub(r31,r21)
0x03 0x40 0x45 0x85 0x71 0xff 0x35 0xfb
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = sub(r31, r21)
+# CHECK-NEXT: if (p3.new) r17 = sub(r31,r21)
0x03 0x40 0x45 0x85 0xf1 0xff 0x35 0xfb
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = sub(r31, r21)
+# CHECK-NEXT: if (!p3.new) r17 = sub(r31,r21)
# Conditional sign extend
0x11 0xe3 0xb5 0x70
@@ -159,36 +159,36 @@
# Compare
0xe3 0xc3 0x15 0x75
-# CHECK: p3 = cmp.eq(r21, #31)
+# CHECK: p3 = cmp.eq(r21,#31)
0xf3 0xc3 0x15 0x75
-# CHECK: p3 = !cmp.eq(r21, #31)
+# CHECK: p3 = !cmp.eq(r21,#31)
0xe3 0xc3 0x55 0x75
-# CHECK: p3 = cmp.gt(r21, #31)
+# CHECK: p3 = cmp.gt(r21,#31)
0xf3 0xc3 0x55 0x75
-# CHECK: p3 = !cmp.gt(r21, #31)
+# CHECK: p3 = !cmp.gt(r21,#31)
0xe3 0xc3 0x95 0x75
-# CHECK: p3 = cmp.gtu(r21, #31)
+# CHECK: p3 = cmp.gtu(r21,#31)
0xf3 0xc3 0x95 0x75
-# CHECK: p3 = !cmp.gtu(r21, #31)
+# CHECK: p3 = !cmp.gtu(r21,#31)
0x03 0xdf 0x15 0xf2
-# CHECK: p3 = cmp.eq(r21, r31)
+# CHECK: p3 = cmp.eq(r21,r31)
0x13 0xdf 0x15 0xf2
-# CHECK: p3 = !cmp.eq(r21, r31)
+# CHECK: p3 = !cmp.eq(r21,r31)
0x03 0xdf 0x55 0xf2
-# CHECK: p3 = cmp.gt(r21, r31)
+# CHECK: p3 = cmp.gt(r21,r31)
0x13 0xdf 0x55 0xf2
-# CHECK: p3 = !cmp.gt(r21, r31)
+# CHECK: p3 = !cmp.gt(r21,r31)
0x03 0xdf 0x75 0xf2
-# CHECK: p3 = cmp.gtu(r21, r31)
+# CHECK: p3 = cmp.gtu(r21,r31)
0x13 0xdf 0x75 0xf2
-# CHECK: p3 = !cmp.gtu(r21, r31)
+# CHECK: p3 = !cmp.gtu(r21,r31)
# Compare to general register
0xf1 0xe3 0x55 0x73
-# CHECK: r17 = cmp.eq(r21, #31)
+# CHECK: r17 = cmp.eq(r21,#31)
0xf1 0xe3 0x75 0x73
-# CHECK: r17 = !cmp.eq(r21, #31)
+# CHECK: r17 = !cmp.eq(r21,#31)
0x11 0xdf 0x55 0xf3
-# CHECK: r17 = cmp.eq(r21, r31)
+# CHECK: r17 = cmp.eq(r21,r31)
0x11 0xdf 0x75 0xf3
-# CHECK: r17 = !cmp.eq(r21, r31)
+# CHECK: r17 = !cmp.eq(r21,r31)
diff --git a/test/MC/Disassembler/Hexagon/cr.txt b/test/MC/Disassembler/Hexagon/cr.txt
index 6cf2b5fda399..8e505299d966 100644
--- a/test/MC/Disassembler/Hexagon/cr.txt
+++ b/test/MC/Disassembler/Hexagon/cr.txt
@@ -3,9 +3,9 @@
# Corner detection acceleration
0x93 0xe1 0x12 0x6b
-# CHECK: p3 = !fastcorner9(p2, p1)
+# CHECK: p3 = !fastcorner9(p2,p1)
0x91 0xe3 0x02 0x6b
-# CHECK: p1 = fastcorner9(p2, p3)
+# CHECK: p1 = fastcorner9(p2,p3)
# Logical reductions on predicates
0x01 0xc0 0x82 0x6b
@@ -25,7 +25,7 @@
# Add to PC
0x91 0xca 0x49 0x6a
-# CHECK: r17 = add(pc, #21)
+# CHECK: r17 = add(pc,#21)
# Pipelined loop instructions
0x08 0xc4 0xb5 0x60
@@ -43,33 +43,33 @@
# Logical operations on predicates
0x01 0xc3 0x02 0x6b
-# CHECK: p1 = and(p3, p2)
+# CHECK: p1 = and(p3,p2)
0xc1 0xc3 0x12 0x6b
-# CHECK: p1 = and(p2, and(p3, p3))
+# CHECK: p1 = and(p2,and(p3,p3))
0x01 0xc3 0x22 0x6b
-# CHECK: p1 = or(p3, p2)
+# CHECK: p1 = or(p3,p2)
0xc1 0xc3 0x32 0x6b
-# CHECK: p1 = and(p2, or(p3, p3))
+# CHECK: p1 = and(p2,or(p3,p3))
0x01 0xc3 0x42 0x6b
-# CHECK: p1 = xor(p2, p3)
+# CHECK: p1 = xor(p2,p3)
0xc1 0xc3 0x52 0x6b
-# CHECK: p1 = or(p2, and(p3, p3))
+# CHECK: p1 = or(p2,and(p3,p3))
0x01 0xc2 0x63 0x6b
-# CHECK: p1 = and(p2, !p3)
+# CHECK: p1 = and(p2,!p3)
0xc1 0xc3 0x72 0x6b
-# CHECK: p1 = or(p2, or(p3, p3))
+# CHECK: p1 = or(p2,or(p3,p3))
0xc1 0xc3 0x92 0x6b
-# CHECK: p1 = and(p2, and(p3, !p3))
+# CHECK: p1 = and(p2,and(p3,!p3))
0xc1 0xc3 0xb2 0x6b
-# CHECK: p1 = and(p2, or(p3, !p3))
+# CHECK: p1 = and(p2,or(p3,!p3))
0x01 0xc0 0xc2 0x6b
# CHECK: p1 = not(p2)
0xc1 0xc3 0xd2 0x6b
-# CHECK: p1 = or(p2, and(p3, !p3))
+# CHECK: p1 = or(p2,and(p3,!p3))
0x01 0xc2 0xe3 0x6b
-# CHECK: p1 = or(p2, !p3)
+# CHECK: p1 = or(p2,!p3)
0xc1 0xc3 0xf2 0x6b
-# CHECK: p1 = or(p2, or(p3, !p3))
+# CHECK: p1 = or(p2,or(p3,!p3))
# User control register transfer
0x0d 0xc0 0x35 0x62
diff --git a/test/MC/Disassembler/Hexagon/j.txt b/test/MC/Disassembler/Hexagon/j.txt
index 661670e2a614..c3d163863930 100644
--- a/test/MC/Disassembler/Hexagon/j.txt
+++ b/test/MC/Disassembler/Hexagon/j.txt
@@ -15,145 +15,145 @@
0x00 0xc1 0x89 0x11
# CHECK: p0 = cmp.gt(r17,#-1); if (p0.new) jump:nt
0x00 0xc3 0x89 0x11
-# CHECK: p0 = tstbit(r17, #0); if (p0.new) jump:nt
+# CHECK: p0 = tstbit(r17,#0); if (p0.new) jump:nt
0x00 0xe0 0x89 0x11
# CHECK: p0 = cmp.eq(r17,#-1); if (p0.new) jump:t
0x00 0xe1 0x89 0x11
# CHECK: p0 = cmp.gt(r17,#-1); if (p0.new) jump:t
0x00 0xe3 0x89 0x11
-# CHECK: p0 = tstbit(r17, #0); if (p0.new) jump:t
+# CHECK: p0 = tstbit(r17,#0); if (p0.new) jump:t
0x00 0xc0 0xc9 0x11
# CHECK: p0 = cmp.eq(r17,#-1); if (!p0.new) jump:nt
0x00 0xc1 0xc9 0x11
# CHECK: p0 = cmp.gt(r17,#-1); if (!p0.new) jump:nt
0x00 0xc3 0xc9 0x11
-# CHECK: p0 = tstbit(r17, #0); if (!p0.new) jump:nt
+# CHECK: p0 = tstbit(r17,#0); if (!p0.new) jump:nt
0x00 0xe0 0xc9 0x11
# CHECK: p0 = cmp.eq(r17,#-1); if (!p0.new) jump:t
0x00 0xe1 0xc9 0x11
# CHECK: p0 = cmp.gt(r17,#-1); if (!p0.new) jump:t
0x00 0xe3 0xc9 0x11
-# CHECK: p0 = tstbit(r17, #0); if (!p0.new) jump:t
+# CHECK: p0 = tstbit(r17,#0); if (!p0.new) jump:t
0x00 0xd5 0x09 0x10
-# CHECK: p0 = cmp.eq(r17, #21); if (p0.new) jump:nt
+# CHECK: p0 = cmp.eq(r17,#21); if (p0.new) jump:nt
0x00 0xf5 0x09 0x10
-# CHECK: p0 = cmp.eq(r17, #21); if (p0.new) jump:t
+# CHECK: p0 = cmp.eq(r17,#21); if (p0.new) jump:t
0x00 0xd5 0x49 0x10
-# CHECK: p0 = cmp.eq(r17, #21); if (!p0.new) jump:nt
+# CHECK: p0 = cmp.eq(r17,#21); if (!p0.new) jump:nt
0x00 0xf5 0x49 0x10
-# CHECK: p0 = cmp.eq(r17, #21); if (!p0.new) jump:t
+# CHECK: p0 = cmp.eq(r17,#21); if (!p0.new) jump:t
0x00 0xd5 0x89 0x10
-# CHECK: p0 = cmp.gt(r17, #21); if (p0.new) jump:nt
+# CHECK: p0 = cmp.gt(r17,#21); if (p0.new) jump:nt
0x00 0xf5 0x89 0x10
-# CHECK: p0 = cmp.gt(r17, #21); if (p0.new) jump:t
+# CHECK: p0 = cmp.gt(r17,#21); if (p0.new) jump:t
0x00 0xd5 0xc9 0x10
-# CHECK: p0 = cmp.gt(r17, #21); if (!p0.new) jump:nt
+# CHECK: p0 = cmp.gt(r17,#21); if (!p0.new) jump:nt
0x00 0xf5 0xc9 0x10
-# CHECK: p0 = cmp.gt(r17, #21); if (!p0.new) jump:t
+# CHECK: p0 = cmp.gt(r17,#21); if (!p0.new) jump:t
0x00 0xd5 0x09 0x11
-# CHECK: p0 = cmp.gtu(r17, #21); if (p0.new) jump:nt
+# CHECK: p0 = cmp.gtu(r17,#21); if (p0.new) jump:nt
0x00 0xf5 0x09 0x11
-# CHECK: p0 = cmp.gtu(r17, #21); if (p0.new) jump:t
+# CHECK: p0 = cmp.gtu(r17,#21); if (p0.new) jump:t
0x00 0xd5 0x49 0x11
-# CHECK: p0 = cmp.gtu(r17, #21); if (!p0.new) jump:nt
+# CHECK: p0 = cmp.gtu(r17,#21); if (!p0.new) jump:nt
0x00 0xf5 0x49 0x11
-# CHECK: p0 = cmp.gtu(r17, #21); if (!p0.new) jump:t
+# CHECK: p0 = cmp.gtu(r17,#21); if (!p0.new) jump:t
0x00 0xc0 0x89 0x13
# CHECK: p1 = cmp.eq(r17,#-1); if (p1.new) jump:nt
0x00 0xc1 0x89 0x13
# CHECK: p1 = cmp.gt(r17,#-1); if (p1.new) jump:nt
0x00 0xc3 0x89 0x13
-# CHECK: p1 = tstbit(r17, #0); if (p1.new) jump:nt
+# CHECK: p1 = tstbit(r17,#0); if (p1.new) jump:nt
0x00 0xe0 0x89 0x13
# CHECK: p1 = cmp.eq(r17,#-1); if (p1.new) jump:t
0x00 0xe1 0x89 0x13
# CHECK: p1 = cmp.gt(r17,#-1); if (p1.new) jump:t
0x00 0xe3 0x89 0x13
-# CHECK: p1 = tstbit(r17, #0); if (p1.new) jump:t
+# CHECK: p1 = tstbit(r17,#0); if (p1.new) jump:t
0x00 0xc0 0xc9 0x13
# CHECK: p1 = cmp.eq(r17,#-1); if (!p1.new) jump:nt
0x00 0xc1 0xc9 0x13
# CHECK: p1 = cmp.gt(r17,#-1); if (!p1.new) jump:nt
0x00 0xc3 0xc9 0x13
-# CHECK: p1 = tstbit(r17, #0); if (!p1.new) jump:nt
+# CHECK: p1 = tstbit(r17,#0); if (!p1.new) jump:nt
0x00 0xe0 0xc9 0x13
# CHECK: p1 = cmp.eq(r17,#-1); if (!p1.new) jump:t
0x00 0xe1 0xc9 0x13
# CHECK: p1 = cmp.gt(r17,#-1); if (!p1.new) jump:t
0x00 0xe3 0xc9 0x13
-# CHECK: p1 = tstbit(r17, #0); if (!p1.new) jump:t
+# CHECK: p1 = tstbit(r17,#0); if (!p1.new) jump:t
0x00 0xd5 0x09 0x12
-# CHECK: p1 = cmp.eq(r17, #21); if (p1.new) jump:nt
+# CHECK: p1 = cmp.eq(r17,#21); if (p1.new) jump:nt
0x00 0xf5 0x09 0x12
-# CHECK: p1 = cmp.eq(r17, #21); if (p1.new) jump:t
+# CHECK: p1 = cmp.eq(r17,#21); if (p1.new) jump:t
0x00 0xd5 0x49 0x12
-# CHECK: p1 = cmp.eq(r17, #21); if (!p1.new) jump:nt
+# CHECK: p1 = cmp.eq(r17,#21); if (!p1.new) jump:nt
0x00 0xf5 0x49 0x12
-# CHECK: p1 = cmp.eq(r17, #21); if (!p1.new) jump:t
+# CHECK: p1 = cmp.eq(r17,#21); if (!p1.new) jump:t
0x00 0xd5 0x89 0x12
-# CHECK: p1 = cmp.gt(r17, #21); if (p1.new) jump:nt
+# CHECK: p1 = cmp.gt(r17,#21); if (p1.new) jump:nt
0x00 0xf5 0x89 0x12
-# CHECK: p1 = cmp.gt(r17, #21); if (p1.new) jump:t
+# CHECK: p1 = cmp.gt(r17,#21); if (p1.new) jump:t
0x00 0xd5 0xc9 0x12
-# CHECK: p1 = cmp.gt(r17, #21); if (!p1.new) jump:nt
+# CHECK: p1 = cmp.gt(r17,#21); if (!p1.new) jump:nt
0x00 0xf5 0xc9 0x12
-# CHECK: p1 = cmp.gt(r17, #21); if (!p1.new) jump:t
+# CHECK: p1 = cmp.gt(r17,#21); if (!p1.new) jump:t
0x00 0xd5 0x09 0x13
-# CHECK: p1 = cmp.gtu(r17, #21); if (p1.new) jump:nt
+# CHECK: p1 = cmp.gtu(r17,#21); if (p1.new) jump:nt
0x00 0xf5 0x09 0x13
-# CHECK: p1 = cmp.gtu(r17, #21); if (p1.new) jump:t
+# CHECK: p1 = cmp.gtu(r17,#21); if (p1.new) jump:t
0x00 0xd5 0x49 0x13
-# CHECK: p1 = cmp.gtu(r17, #21); if (!p1.new) jump:nt
+# CHECK: p1 = cmp.gtu(r17,#21); if (!p1.new) jump:nt
0x00 0xf5 0x49 0x13
-# CHECK: p1 = cmp.gtu(r17, #21); if (!p1.new) jump:t
+# CHECK: p1 = cmp.gtu(r17,#21); if (!p1.new) jump:t
0x00 0xcd 0x09 0x14
-# CHECK: p0 = cmp.eq(r17, r21); if (p0.new) jump:nt
+# CHECK: p0 = cmp.eq(r17,r21); if (p0.new) jump:nt
0x00 0xdd 0x09 0x14
-# CHECK: p1 = cmp.eq(r17, r21); if (p1.new) jump:nt
+# CHECK: p1 = cmp.eq(r17,r21); if (p1.new) jump:nt
0x00 0xed 0x09 0x14
-# CHECK: p0 = cmp.eq(r17, r21); if (p0.new) jump:t
+# CHECK: p0 = cmp.eq(r17,r21); if (p0.new) jump:t
0x00 0xfd 0x09 0x14
-# CHECK: p1 = cmp.eq(r17, r21); if (p1.new) jump:t
+# CHECK: p1 = cmp.eq(r17,r21); if (p1.new) jump:t
0x00 0xcd 0x49 0x14
-# CHECK: p0 = cmp.eq(r17, r21); if (!p0.new) jump:nt
+# CHECK: p0 = cmp.eq(r17,r21); if (!p0.new) jump:nt
0x00 0xdd 0x49 0x14
-# CHECK: p1 = cmp.eq(r17, r21); if (!p1.new) jump:nt
+# CHECK: p1 = cmp.eq(r17,r21); if (!p1.new) jump:nt
0x00 0xed 0x49 0x14
-# CHECK: p0 = cmp.eq(r17, r21); if (!p0.new) jump:t
+# CHECK: p0 = cmp.eq(r17,r21); if (!p0.new) jump:t
0x00 0xfd 0x49 0x14
-# CHECK: p1 = cmp.eq(r17, r21); if (!p1.new) jump:t
+# CHECK: p1 = cmp.eq(r17,r21); if (!p1.new) jump:t
0x00 0xcd 0x89 0x14
-# CHECK: p0 = cmp.gt(r17, r21); if (p0.new) jump:nt
+# CHECK: p0 = cmp.gt(r17,r21); if (p0.new) jump:nt
0x00 0xdd 0x89 0x14
-# CHECK: p1 = cmp.gt(r17, r21); if (p1.new) jump:nt
+# CHECK: p1 = cmp.gt(r17,r21); if (p1.new) jump:nt
0x00 0xed 0x89 0x14
-# CHECK: p0 = cmp.gt(r17, r21); if (p0.new) jump:t
+# CHECK: p0 = cmp.gt(r17,r21); if (p0.new) jump:t
0x00 0xfd 0x89 0x14
-# CHECK: p1 = cmp.gt(r17, r21); if (p1.new) jump:t
+# CHECK: p1 = cmp.gt(r17,r21); if (p1.new) jump:t
0x00 0xcd 0xc9 0x14
-# CHECK: p0 = cmp.gt(r17, r21); if (!p0.new) jump:nt
+# CHECK: p0 = cmp.gt(r17,r21); if (!p0.new) jump:nt
0x00 0xdd 0xc9 0x14
-# CHECK: p1 = cmp.gt(r17, r21); if (!p1.new) jump:nt
+# CHECK: p1 = cmp.gt(r17,r21); if (!p1.new) jump:nt
0x00 0xed 0xc9 0x14
-# CHECK: p0 = cmp.gt(r17, r21); if (!p0.new) jump:t
+# CHECK: p0 = cmp.gt(r17,r21); if (!p0.new) jump:t
0x00 0xfd 0xc9 0x14
-# CHECK: p1 = cmp.gt(r17, r21); if (!p1.new) jump:t
+# CHECK: p1 = cmp.gt(r17,r21); if (!p1.new) jump:t
0x00 0xcd 0x09 0x15
-# CHECK: p0 = cmp.gtu(r17, r21); if (p0.new) jump:nt
+# CHECK: p0 = cmp.gtu(r17,r21); if (p0.new) jump:nt
0x00 0xdd 0x09 0x15
-# CHECK: p1 = cmp.gtu(r17, r21); if (p1.new) jump:nt
+# CHECK: p1 = cmp.gtu(r17,r21); if (p1.new) jump:nt
0x00 0xed 0x09 0x15
-# CHECK: p0 = cmp.gtu(r17, r21); if (p0.new) jump:t
+# CHECK: p0 = cmp.gtu(r17,r21); if (p0.new) jump:t
0x00 0xfd 0x09 0x15
-# CHECK: p1 = cmp.gtu(r17, r21); if (p1.new) jump:t
+# CHECK: p1 = cmp.gtu(r17,r21); if (p1.new) jump:t
0x00 0xcd 0x49 0x15
-# CHECK: p0 = cmp.gtu(r17, r21); if (!p0.new) jump:nt
+# CHECK: p0 = cmp.gtu(r17,r21); if (!p0.new) jump:nt
0x00 0xdd 0x49 0x15
-# CHECK: p1 = cmp.gtu(r17, r21); if (!p1.new) jump:nt
+# CHECK: p1 = cmp.gtu(r17,r21); if (!p1.new) jump:nt
0x00 0xed 0x49 0x15
-# CHECK: p0 = cmp.gtu(r17, r21); if (!p0.new) jump:t
+# CHECK: p0 = cmp.gtu(r17,r21); if (!p0.new) jump:t
0x00 0xfd 0x49 0x15
-# CHECK: p1 = cmp.gtu(r17, r21); if (!p1.new) jump:t
+# CHECK: p1 = cmp.gtu(r17,r21); if (!p1.new) jump:t
# Jump to address
0x22 0xc0 0x00 0x58
diff --git a/test/MC/Disassembler/Hexagon/ld.txt b/test/MC/Disassembler/Hexagon/ld.txt
index 56e00fd94f56..66e014fea59f 100644
--- a/test/MC/Disassembler/Hexagon/ld.txt
+++ b/test/MC/Disassembler/Hexagon/ld.txt
@@ -3,25 +3,25 @@
# Load doubleword
0x90 0xff 0xd5 0x3a
-# CHECK: r17:16 = memd(r21 + r31<<#3)
+# CHECK: r17:16 = memd(r21+r31<<#3)
0xb0 0xc2 0xc0 0x49
-# CHECK: r17:16 = memd(#168)
+# CHECK: r17:16 = memd(gp+#168)
0x02 0x40 0x00 0x00 0x10 0xc5 0xc0 0x49
# CHECK: r17:16 = memd(##168)
0xd0 0xc0 0xd5 0x91
-# CHECK: r17:16 = memd(r21 + #48)
+# CHECK: r17:16 = memd(r21+#48)
0xb0 0xe0 0xd5 0x99
-# CHECK: r17:16 = memd(r21 ++ #40:circ(m1))
+# CHECK: r17:16 = memd(r21++#40:circ(m1))
0x10 0xe2 0xd5 0x99
-# CHECK: r17:16 = memd(r21 ++ I:circ(m1))
+# CHECK: r17:16 = memd(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x70 0xd7 0xd5 0x9b
-# CHECK: r17:16 = memd(r21 = ##31)
+# CHECK: r17:16 = memd(r21=##31)
0xb0 0xc0 0xd5 0x9b
# CHECK: r17:16 = memd(r21++#40)
0x10 0xe0 0xd5 0x9d
# CHECK: r17:16 = memd(r21++m1)
0x10 0xe0 0xd5 0x9f
-# CHECK: r17:16 = memd(r21 ++ m1:brev)
+# CHECK: r17:16 = memd(r21++m1:brev)
# Load doubleword conditionally
0xf0 0xff 0xd5 0x30
@@ -35,15 +35,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) r17:16 = memd(r21+r31<<#3)
0x70 0xd8 0xd5 0x41
-# CHECK: if (p3) r17:16 = memd(r21 + #24)
+# CHECK: if (p3) r17:16 = memd(r21+#24)
0x03 0x40 0x45 0x85 0x70 0xd8 0xd5 0x43
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17:16 = memd(r21 + #24)
+# CHECK-NEXT: if (p3.new) r17:16 = memd(r21+#24)
0x70 0xd8 0xd5 0x45
-# CHECK: if (!p3) r17:16 = memd(r21 + #24)
+# CHECK: if (!p3) r17:16 = memd(r21+#24)
0x03 0x40 0x45 0x85 0x70 0xd8 0xd5 0x47
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17:16 = memd(r21 + #24)
+# CHECK-NEXT: if (!p3.new) r17:16 = memd(r21+#24)
0xb0 0xe6 0xd5 0x9b
# CHECK: if (p3) r17:16 = memd(r21++#40)
0xb0 0xee 0xd5 0x9b
@@ -57,25 +57,25 @@
# Load byte
0x91 0xff 0x15 0x3a
-# CHECK: r17 = memb(r21 + r31<<#3)
+# CHECK: r17 = memb(r21+r31<<#3)
0xb1 0xc2 0x00 0x49
-# CHECK: r17 = memb(#21)
+# CHECK: r17 = memb(gp+#21)
0x00 0x40 0x00 0x00 0xb1 0xc2 0x00 0x49
# CHECK: r17 = memb(##21)
0xf1 0xc3 0x15 0x91
-# CHECK: r17 = memb(r21 + #31)
+# CHECK: r17 = memb(r21+#31)
0xb1 0xe0 0x15 0x99
-# CHECK: r17 = memb(r21 ++ #5:circ(m1))
+# CHECK: r17 = memb(r21++#5:circ(m1))
0x11 0xe2 0x15 0x99
-# CHECK: r17 = memb(r21 ++ I:circ(m1))
+# CHECK: r17 = memb(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x71 0xd7 0x15 0x9b
-# CHECK: r17 = memb(r21 = ##31)
+# CHECK: r17 = memb(r21=##31)
0xb1 0xc0 0x15 0x9b
# CHECK: r17 = memb(r21++#5)
0x11 0xe0 0x15 0x9d
# CHECK: r17 = memb(r21++m1)
0x11 0xe0 0x15 0x9f
-# CHECK: r17 = memb(r21 ++ m1:brev)
+# CHECK: r17 = memb(r21++m1:brev)
# Load byte conditionally
0xf1 0xff 0x15 0x30
@@ -89,15 +89,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) r17 = memb(r21+r31<<#3)
0x91 0xdd 0x15 0x41
-# CHECK: if (p3) r17 = memb(r21 + #44)
+# CHECK: if (p3) r17 = memb(r21+#44)
0x03 0x40 0x45 0x85 0x91 0xdd 0x15 0x43
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = memb(r21 + #44)
+# CHECK-NEXT: if (p3.new) r17 = memb(r21+#44)
0x91 0xdd 0x15 0x45
-# CHECK: if (!p3) r17 = memb(r21 + #44)
+# CHECK: if (!p3) r17 = memb(r21+#44)
0x03 0x40 0x45 0x85 0x91 0xdd 0x15 0x47
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = memb(r21 + #44)
+# CHECK-NEXT: if (!p3.new) r17 = memb(r21+#44)
0xb1 0xe6 0x15 0x9b
# CHECK: if (p3) r17 = memb(r21++#5)
0xb1 0xee 0x15 0x9b
@@ -111,41 +111,41 @@
# Load byte into shifted vector
0xf0 0xc3 0x95 0x90
-# CHECK: r17:16 = memb_fifo(r21 + #31)
+# CHECK: r17:16 = memb_fifo(r21+#31)
0xb0 0xe0 0x95 0x98
-# CHECK: r17:16 = memb_fifo(r21 ++ #5:circ(m1))
+# CHECK: r17:16 = memb_fifo(r21++#5:circ(m1))
0x10 0xe2 0x95 0x98
-# CHECK: r17:16 = memb_fifo(r21 ++ I:circ(m1))
+# CHECK: r17:16 = memb_fifo(r21++I:circ(m1))
# Load half into shifted vector
0xf0 0xc3 0x55 0x90
-# CHECK: r17:16 = memh_fifo(r21 + #62)
+# CHECK: r17:16 = memh_fifo(r21+#62)
0xb0 0xe0 0x55 0x98
-# CHECK: r17:16 = memh_fifo(r21 ++ #10:circ(m1))
+# CHECK: r17:16 = memh_fifo(r21++#10:circ(m1))
0x10 0xe2 0x55 0x98
-# CHECK: r17:16 = memh_fifo(r21 ++ I:circ(m1))
+# CHECK: r17:16 = memh_fifo(r21++I:circ(m1))
# Load halfword
0x91 0xff 0x55 0x3a
-# CHECK: r17 = memh(r21 + r31<<#3)
+# CHECK: r17 = memh(r21+r31<<#3)
0xb1 0xc2 0x40 0x49
-# CHECK: r17 = memh(#42)
+# CHECK: r17 = memh(gp+#42)
0x00 0x40 0x00 0x00 0x51 0xc5 0x40 0x49
# CHECK: r17 = memh(##42)
0xf1 0xc3 0x55 0x91
-# CHECK: r17 = memh(r21 + #62)
+# CHECK: r17 = memh(r21+#62)
0xb1 0xe0 0x55 0x99
-# CHECK: r17 = memh(r21 ++ #10:circ(m1))
+# CHECK: r17 = memh(r21++#10:circ(m1))
0x11 0xe2 0x55 0x99
-# CHECK: r17 = memh(r21 ++ I:circ(m1))
+# CHECK: r17 = memh(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x71 0xd7 0x55 0x9b
-# CHECK: r17 = memh(r21 = ##31)
+# CHECK: r17 = memh(r21=##31)
0xb1 0xc0 0x55 0x9b
# CHECK: r17 = memh(r21++#10)
0x11 0xe0 0x55 0x9d
# CHECK: r17 = memh(r21++m1)
0x11 0xe0 0x55 0x9f
-# CHECK: r17 = memh(r21 ++ m1:brev)
+# CHECK: r17 = memh(r21++m1:brev)
# Load halfword conditionally
0xf1 0xff 0x55 0x30
@@ -169,37 +169,37 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) r17 = memh(r21++#10)
0xf1 0xdb 0x55 0x41
-# CHECK: if (p3) r17 = memh(r21 + #62)
+# CHECK: if (p3) r17 = memh(r21+#62)
0xf1 0xdb 0x55 0x45
-# CHECK: if (!p3) r17 = memh(r21 + #62)
+# CHECK: if (!p3) r17 = memh(r21+#62)
0x03 0x40 0x45 0x85 0xf1 0xdb 0x55 0x43
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = memh(r21 + #62)
+# CHECK-NEXT: if (p3.new) r17 = memh(r21+#62)
0x03 0x40 0x45 0x85 0xf1 0xdb 0x55 0x47
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = memh(r21 + #62)
+# CHECK-NEXT: if (!p3.new) r17 = memh(r21+#62)
# Load unsigned byte
0x91 0xff 0x35 0x3a
-# CHECK: r17 = memub(r21 + r31<<#3)
+# CHECK: r17 = memub(r21+r31<<#3)
0xb1 0xc2 0x20 0x49
-# CHECK: r17 = memub(#21)
+# CHECK: r17 = memub(gp+#21)
0x00 0x40 0x00 0x00 0xb1 0xc2 0x20 0x49
# CHECK: r17 = memub(##21)
0xf1 0xc3 0x35 0x91
-# CHECK: r17 = memub(r21 + #31)
+# CHECK: r17 = memub(r21+#31)
0xb1 0xe0 0x35 0x99
-# CHECK: r17 = memub(r21 ++ #5:circ(m1))
+# CHECK: r17 = memub(r21++#5:circ(m1))
0x11 0xe2 0x35 0x99
-# CHECK: r17 = memub(r21 ++ I:circ(m1))
+# CHECK: r17 = memub(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x71 0xd7 0x35 0x9b
-# CHECK: r17 = memub(r21 = ##31)
+# CHECK: r17 = memub(r21=##31)
0xb1 0xc0 0x35 0x9b
# CHECK: r17 = memub(r21++#5)
0x11 0xe0 0x35 0x9d
# CHECK: r17 = memub(r21++m1)
0x11 0xe0 0x35 0x9f
-# CHECK: r17 = memub(r21 ++ m1:brev)
+# CHECK: r17 = memub(r21++m1:brev)
# Load unsigned byte conditionally
0xf1 0xff 0x35 0x30
@@ -213,15 +213,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) r17 = memub(r21+r31<<#3)
0xf1 0xdb 0x35 0x41
-# CHECK: if (p3) r17 = memub(r21 + #31)
+# CHECK: if (p3) r17 = memub(r21+#31)
0x03 0x40 0x45 0x85 0xf1 0xdb 0x35 0x43
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = memub(r21 + #31)
+# CHECK-NEXT: if (p3.new) r17 = memub(r21+#31)
0xf1 0xdb 0x35 0x45
-# CHECK: if (!p3) r17 = memub(r21 + #31)
+# CHECK: if (!p3) r17 = memub(r21+#31)
0x03 0x40 0x45 0x85 0xf1 0xdb 0x35 0x47
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = memub(r21 + #31)
+# CHECK-NEXT: if (!p3.new) r17 = memub(r21+#31)
0xb1 0xe6 0x35 0x9b
# CHECK: if (p3) r17 = memub(r21++#5)
0xb1 0xee 0x35 0x9b
@@ -235,25 +235,25 @@
# Load unsigned halfword
0x91 0xff 0x75 0x3a
-# CHECK: r17 = memuh(r21 + r31<<#3)
+# CHECK: r17 = memuh(r21+r31<<#3)
0xb1 0xc2 0x60 0x49
-# CHECK: r17 = memuh(#42)
+# CHECK: r17 = memuh(gp+#42)
0x00 0x40 0x00 0x00 0x51 0xc5 0x60 0x49
# CHECK: r17 = memuh(##42)
0xb1 0xc2 0x75 0x91
-# CHECK: r17 = memuh(r21 + #42)
+# CHECK: r17 = memuh(r21+#42)
0xb1 0xe0 0x75 0x99
-# CHECK: r17 = memuh(r21 ++ #10:circ(m1))
+# CHECK: r17 = memuh(r21++#10:circ(m1))
0x11 0xe2 0x75 0x99
-# CHECK: r17 = memuh(r21 ++ I:circ(m1))
+# CHECK: r17 = memuh(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x71 0xd7 0x75 0x9b
-# CHECK: r17 = memuh(r21 = ##31)
+# CHECK: r17 = memuh(r21=##31)
0xb1 0xc0 0x75 0x9b
# CHECK: r17 = memuh(r21++#10)
0x11 0xe0 0x75 0x9d
# CHECK: r17 = memuh(r21++m1)
0x11 0xe0 0x75 0x9f
-# CHECK: r17 = memuh(r21 ++ m1:brev)
+# CHECK: r17 = memuh(r21++m1:brev)
# Load unsigned halfword conditionally
0xf1 0xff 0x75 0x30
@@ -267,15 +267,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) r17 = memuh(r21+r31<<#3)
0xb1 0xda 0x75 0x41
-# CHECK: if (p3) r17 = memuh(r21 + #42)
+# CHECK: if (p3) r17 = memuh(r21+#42)
0xb1 0xda 0x75 0x45
-# CHECK: if (!p3) r17 = memuh(r21 + #42)
+# CHECK: if (!p3) r17 = memuh(r21+#42)
0x03 0x40 0x45 0x85 0xb1 0xda 0x75 0x43
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = memuh(r21 + #42)
+# CHECK-NEXT: if (p3.new) r17 = memuh(r21+#42)
0x03 0x40 0x45 0x85 0xb1 0xda 0x75 0x47
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = memuh(r21 + #42)
+# CHECK-NEXT: if (!p3.new) r17 = memuh(r21+#42)
0xb1 0xe6 0x75 0x9b
# CHECK: if (p3) r17 = memuh(r21++#10)
0xb1 0xee 0x75 0x9b
@@ -289,25 +289,25 @@
# Load word
0x91 0xff 0x95 0x3a
-# CHECK: r17 = memw(r21 + r31<<#3)
+# CHECK: r17 = memw(r21+r31<<#3)
0xb1 0xc2 0x80 0x49
-# CHECK: r17 = memw(#84)
+# CHECK: r17 = memw(gp+#84)
0x01 0x40 0x00 0x00 0x91 0xc2 0x80 0x49
# CHECK: r17 = memw(##84)
0xb1 0xc2 0x95 0x91
-# CHECK: r17 = memw(r21 + #84)
+# CHECK: r17 = memw(r21+#84)
0xb1 0xe0 0x95 0x99
-# CHECK: r17 = memw(r21 ++ #20:circ(m1))
+# CHECK: r17 = memw(r21++#20:circ(m1))
0x11 0xe2 0x95 0x99
-# CHECK: r17 = memw(r21 ++ I:circ(m1))
+# CHECK: r17 = memw(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x71 0xd7 0x95 0x9b
-# CHECK: r17 = memw(r21 = ##31)
+# CHECK: r17 = memw(r21=##31)
0xb1 0xc0 0x95 0x9b
# CHECK: r17 = memw(r21++#20)
0x11 0xe0 0x95 0x9d
# CHECK: r17 = memw(r21++m1)
0x11 0xe0 0x95 0x9f
-# CHECK: r17 = memw(r21 ++ m1:brev)
+# CHECK: r17 = memw(r21++m1:brev)
# Load word conditionally
0xf1 0xff 0x95 0x30
@@ -321,15 +321,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) r17 = memw(r21+r31<<#3)
0xb1 0xda 0x95 0x41
-# CHECK: if (p3) r17 = memw(r21 + #84)
+# CHECK: if (p3) r17 = memw(r21+#84)
0xb1 0xda 0x95 0x45
-# CHECK: if (!p3) r17 = memw(r21 + #84)
+# CHECK: if (!p3) r17 = memw(r21+#84)
0x03 0x40 0x45 0x85 0xb1 0xda 0x95 0x43
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) r17 = memw(r21 + #84)
+# CHECK-NEXT: if (p3.new) r17 = memw(r21+#84)
0x03 0x40 0x45 0x85 0xb1 0xda 0x95 0x47
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) r17 = memw(r21 + #84)
+# CHECK-NEXT: if (!p3.new) r17 = memw(r21+#84)
0xb1 0xe6 0x95 0x9b
# CHECK: if (p3) r17 = memw(r21++#20)
0xb1 0xee 0x95 0x9b
@@ -367,59 +367,59 @@
# Load and unpack bytes to halfwords
0xf1 0xc3 0x35 0x90
-# CHECK: r17 = membh(r21 + #62)
+# CHECK: r17 = membh(r21+#62)
0xf1 0xc3 0x75 0x90
-# CHECK: r17 = memubh(r21 + #62)
+# CHECK: r17 = memubh(r21+#62)
0xf0 0xc3 0xb5 0x90
-# CHECK: r17:16 = memubh(r21 + #124)
+# CHECK: r17:16 = memubh(r21+#124)
0xf0 0xc3 0xf5 0x90
-# CHECK: r17:16 = membh(r21 + #124)
+# CHECK: r17:16 = membh(r21+#124)
0xb1 0xe0 0x35 0x98
-# CHECK: r17 = membh(r21 ++ #10:circ(m1))
+# CHECK: r17 = membh(r21++#10:circ(m1))
0x11 0xe2 0x35 0x98
-# CHECK: r17 = membh(r21 ++ I:circ(m1))
+# CHECK: r17 = membh(r21++I:circ(m1))
0xb1 0xe0 0x75 0x98
-# CHECK: r17 = memubh(r21 ++ #10:circ(m1))
+# CHECK: r17 = memubh(r21++#10:circ(m1))
0x11 0xe2 0x75 0x98
-# CHECK: r17 = memubh(r21 ++ I:circ(m1))
+# CHECK: r17 = memubh(r21++I:circ(m1))
0xb0 0xe0 0xf5 0x98
-# CHECK: r17:16 = membh(r21 ++ #20:circ(m1))
+# CHECK: r17:16 = membh(r21++#20:circ(m1))
0x10 0xe2 0xf5 0x98
-# CHECK: r17:16 = membh(r21 ++ I:circ(m1))
+# CHECK: r17:16 = membh(r21++I:circ(m1))
0xb0 0xe0 0xb5 0x98
-# CHECK: r17:16 = memubh(r21 ++ #20:circ(m1))
+# CHECK: r17:16 = memubh(r21++#20:circ(m1))
0x10 0xe2 0xb5 0x98
-# CHECK: r17:16 = memubh(r21 ++ I:circ(m1))
+# CHECK: r17:16 = memubh(r21++I:circ(m1))
0x00 0x40 0x00 0x00 0x71 0xd7 0x35 0x9a
-# CHECK: r17 = membh(r21 = ##31)
+# CHECK: r17 = membh(r21=##31)
0xb1 0xc0 0x35 0x9a
# CHECK: r17 = membh(r21++#10)
0x00 0x40 0x00 0x00 0x71 0xd7 0x75 0x9a
-# CHECK: r17 = memubh(r21 = ##31)
+# CHECK: r17 = memubh(r21=##31)
0xb1 0xc0 0x75 0x9a
# CHECK: r17 = memubh(r21++#10)
0x00 0x40 0x00 0x00 0x70 0xd7 0xb5 0x9a
-# CHECK: r17:16 = memubh(r21 = ##31)
+# CHECK: r17:16 = memubh(r21=##31)
0xb0 0xc0 0xb5 0x9a
# CHECK: r17:16 = memubh(r21++#20)
0x00 0x40 0x00 0x00 0x70 0xd7 0xf5 0x9a
-# CHECK: r17:16 = membh(r21 = ##31)
+# CHECK: r17:16 = membh(r21=##31)
0xb0 0xc0 0xf5 0x9a
# CHECK: r17:16 = membh(r21++#20)
0x00 0x40 0x00 0x00 0xf1 0xf7 0x35 0x9c
-# CHECK: r17 = membh(r21<<#3 + ##31)
+# CHECK: r17 = membh(r21<<#3+##31)
0x11 0xe0 0x35 0x9c
# CHECK: r17 = membh(r21++m1)
0x00 0x40 0x00 0x00 0xf1 0xf7 0x75 0x9c
-# CHECK: r17 = memubh(r21<<#3 + ##31)
+# CHECK: r17 = memubh(r21<<#3+##31)
0x11 0xe0 0x75 0x9c
# CHECK: r17 = memubh(r21++m1)
0x00 0x40 0x00 0x00 0xf0 0xf7 0xf5 0x9c
-# CHECK: r17:16 = membh(r21<<#3 + ##31)
+# CHECK: r17:16 = membh(r21<<#3+##31)
0x10 0xe0 0xf5 0x9c
# CHECK: r17:16 = membh(r21++m1)
0x00 0x40 0x00 0x00 0xf0 0xf7 0xb5 0x9c
-# CHECK: r17:16 = memubh(r21<<#3 + ##31)
+# CHECK: r17:16 = memubh(r21<<#3+##31)
0x11 0xe0 0x35 0x9c
# CHECK: r17 = membh(r21++m1)
0x11 0xe0 0x75 0x9c
@@ -429,10 +429,10 @@
0x10 0xe0 0xb5 0x9c
# CHECK: r17:16 = memubh(r21++m1)
0x11 0xe0 0x35 0x9e
-# CHECK: r17 = membh(r21 ++ m1:brev)
+# CHECK: r17 = membh(r21++m1:brev)
0x11 0xe0 0x75 0x9e
-# CHECK: r17 = memubh(r21 ++ m1:brev)
+# CHECK: r17 = memubh(r21++m1:brev)
0x10 0xe0 0xb5 0x9e
-# CHECK: r17:16 = memubh(r21 ++ m1:brev)
+# CHECK: r17:16 = memubh(r21++m1:brev)
0x10 0xe0 0xf5 0x9e
-# CHECK: r17:16 = membh(r21 ++ m1:brev)
+# CHECK: r17:16 = membh(r21++m1:brev)
diff --git a/test/MC/Disassembler/Hexagon/nv_j.txt b/test/MC/Disassembler/Hexagon/nv_j.txt
index 2135b5a039f6..f3b7140f8a75 100644
--- a/test/MC/Disassembler/Hexagon/nv_j.txt
+++ b/test/MC/Disassembler/Hexagon/nv_j.txt
@@ -4,133 +4,133 @@
# Jump to address conditioned on new register value
0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.eq(r17.new, r21)) jump:nt
+# CHECK-NEXT: if (cmp.eq(r17.new,r21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.eq(r17.new, r21)) jump:t
+# CHECK-NEXT: if (cmp.eq(r17.new,r21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.eq(r17.new, r21)) jump:nt
+# CHECK-NEXT: if (!cmp.eq(r17.new,r21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.eq(r17.new, r21)) jump:t
+# CHECK-NEXT: if (!cmp.eq(r17.new,r21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x82 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r17.new, r21)) jump:nt
+# CHECK-NEXT: if (cmp.gt(r17.new,r21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x82 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r17.new, r21)) jump:t
+# CHECK-NEXT: if (cmp.gt(r17.new,r21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0xc2 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r17.new, r21)) jump:nt
+# CHECK-NEXT: if (!cmp.gt(r17.new,r21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0xc2 0x20
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r17.new, r21)) jump:t
+# CHECK-NEXT: if (!cmp.gt(r17.new,r21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gtu(r17.new, r21)) jump:nt
+# CHECK-NEXT: if (cmp.gtu(r17.new,r21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gtu(r17.new, r21)) jump:t
+# CHECK-NEXT: if (cmp.gtu(r17.new,r21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gtu(r17.new, r21)) jump:nt
+# CHECK-NEXT: if (!cmp.gtu(r17.new,r21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gtu(r17.new, r21)) jump:t
+# CHECK-NEXT: if (!cmp.gtu(r17.new,r21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x82 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r21, r17.new)) jump:nt
+# CHECK-NEXT: if (cmp.gt(r21,r17.new)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x82 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r21, r17.new)) jump:t
+# CHECK-NEXT: if (cmp.gt(r21,r17.new)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0xc2 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r21, r17.new)) jump:nt
+# CHECK-NEXT: if (!cmp.gt(r21,r17.new)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0xc2 0x21
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r21, r17.new)) jump:t
+# CHECK-NEXT: if (!cmp.gt(r21,r17.new)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x22
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gtu(r21, r17.new)) jump:nt
+# CHECK-NEXT: if (cmp.gtu(r21,r17.new)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x22
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gtu(r21, r17.new)) jump:t
+# CHECK-NEXT: if (cmp.gtu(r21,r17.new)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x22
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gtu(r21, r17.new)) jump:nt
+# CHECK-NEXT: if (!cmp.gtu(r21,r17.new)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x22
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gtu(r21, r17.new)) jump:t
+# CHECK-NEXT: if (!cmp.gtu(r21,r17.new)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.eq(r17.new, #21)) jump:nt
+# CHECK-NEXT: if (cmp.eq(r17.new,#21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x24
# CHECK: r17 = r17
-# CHECK-NETX: if (cmp.eq(r17.new, #21)) jump:t
+# CHECK-NETX: if (cmp.eq(r17.new,#21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.eq(r17.new, #21)) jump:nt
+# CHECK-NEXT: if (!cmp.eq(r17.new,#21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.eq(r17.new, #21)) jump:t
+# CHECK-NEXT: if (!cmp.eq(r17.new,#21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x82 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r17.new, #21)) jump:nt
+# CHECK-NEXT: if (cmp.gt(r17.new,#21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x82 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r17.new, #21)) jump:t
+# CHECK-NEXT: if (cmp.gt(r17.new,#21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0xc2 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r17.new, #21)) jump:nt
+# CHECK-NEXT: if (!cmp.gt(r17.new,#21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0xc2 0x24
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r17.new, #21)) jump:t
+# CHECK-NEXT: if (!cmp.gt(r17.new,#21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x02 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gtu(r17.new, #21)) jump:nt
+# CHECK-NEXT: if (cmp.gtu(r17.new,#21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x02 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gtu(r17.new, #21)) jump:t
+# CHECK-NEXT: if (cmp.gtu(r17.new,#21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xd5 0x42 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gtu(r17.new, #21)) jump:nt
+# CHECK-NEXT: if (!cmp.gtu(r17.new,#21)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xf5 0x42 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gtu(r17.new, #21)) jump:t
+# CHECK-NEXT: if (!cmp.gtu(r17.new,#21)) jump:t
0x11 0x40 0x71 0x70 0x92 0xc0 0x82 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (tstbit(r17.new, #0)) jump:nt
+# CHECK-NEXT: if (tstbit(r17.new,#0)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xe0 0x82 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (tstbit(r17.new, #0)) jump:t
+# CHECK-NEXT: if (tstbit(r17.new,#0)) jump:t
0x11 0x40 0x71 0x70 0x92 0xc0 0xc2 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (!tstbit(r17.new, #0)) jump:nt
+# CHECK-NEXT: if (!tstbit(r17.new,#0)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xe0 0xc2 0x25
# CHECK: r17 = r17
-# CHECK-NEXT: if (!tstbit(r17.new, #0)) jump:t
+# CHECK-NEXT: if (!tstbit(r17.new,#0)) jump:t
0x11 0x40 0x71 0x70 0x92 0xc0 0x02 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.eq(r17.new, #-1)) jump:nt
+# CHECK-NEXT: if (cmp.eq(r17.new,#-1)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xe0 0x02 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.eq(r17.new, #-1)) jump:t
+# CHECK-NEXT: if (cmp.eq(r17.new,#-1)) jump:t
0x11 0x40 0x71 0x70 0x92 0xc0 0x42 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.eq(r17.new, #-1)) jump:nt
+# CHECK-NEXT: if (!cmp.eq(r17.new,#-1)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xe0 0x42 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.eq(r17.new, #-1)) jump:t
+# CHECK-NEXT: if (!cmp.eq(r17.new,#-1)) jump:t
0x11 0x40 0x71 0x70 0x92 0xc0 0x82 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r17.new, #-1)) jump:nt
+# CHECK-NEXT: if (cmp.gt(r17.new,#-1)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xe0 0x82 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (cmp.gt(r17.new, #-1)) jump:t
+# CHECK-NEXT: if (cmp.gt(r17.new,#-1)) jump:t
0x11 0x40 0x71 0x70 0x92 0xc0 0xc2 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r17.new, #-1)) jump:nt
+# CHECK-NEXT: if (!cmp.gt(r17.new,#-1)) jump:nt
0x11 0x40 0x71 0x70 0x92 0xe0 0xc2 0x26
# CHECK: r17 = r17
-# CHECK-NEXT: if (!cmp.gt(r17.new, #-1)) jump:t
+# CHECK-NEXT: if (!cmp.gt(r17.new,#-1)) jump:t
diff --git a/test/MC/Disassembler/Hexagon/nv_st.txt b/test/MC/Disassembler/Hexagon/nv_st.txt
index 3a767f33b36a..7b76cb56dd3a 100644
--- a/test/MC/Disassembler/Hexagon/nv_st.txt
+++ b/test/MC/Disassembler/Hexagon/nv_st.txt
@@ -4,19 +4,19 @@
# Store new-value byte
0x1f 0x40 0x7f 0x70 0x82 0xf5 0xb1 0x3b
# CHECK: r31 = r31
-# CHECK-NEXT: memb(r17 + r21<<#3) = r31.new
+# CHECK-NEXT: memb(r17+r21<<#3) = r31.new
0x1f 0x40 0x7f 0x70 0x11 0xc2 0xa0 0x48
# CHECK: r31 = r31
-# CHECK-NEXT: memb(#17) = r31.new
+# CHECK-NEXT: memb(gp+#17) = r31.new
0x1f 0x40 0x7f 0x70 0x15 0xc2 0xb1 0xa1
# CHECK: r31 = r31
# CHECK-NEXT: memb(r17+#21) = r31.new
0x1f 0x40 0x7f 0x70 0x02 0xe2 0xb1 0xa9
# CHECK: r31 = r31
-# CHECK-NEXT: memb(r17 ++ I:circ(m1)) = r31.new
+# CHECK-NEXT: memb(r17++I:circ(m1)) = r31.new
0x1f 0x40 0x7f 0x70 0x28 0xe2 0xb1 0xa9
# CHECK: r31 = r31
-# CHECK-NEXT: memb(r17 ++ #5:circ(m1)) = r31.new
+# CHECK-NEXT: memb(r17++#5:circ(m1)) = r31.new
0x1f 0x40 0x7f 0x70 0x28 0xc2 0xb1 0xab
# CHECK: r31 = r31
# CHECK-NEXT: memb(r17++#5) = r31.new
@@ -25,7 +25,7 @@
# CHECK-NEXT: memb(r17++m1) = r31.new
0x1f 0x40 0x7f 0x70 0x00 0xe2 0xb1 0xaf
# CHECK: r31 = r31
-# CHECK-NEXT: memb(r17 ++ m1:brev) = r31.new
+# CHECK-NEXT: memb(r17++m1:brev) = r31.new
# Store new-value byte conditionally
0x1f 0x40 0x7f 0x70 0xe2 0xf5 0xb1 0x34
@@ -74,19 +74,19 @@
# Store new-value halfword
0x1f 0x40 0x7f 0x70 0x8a 0xf5 0xb1 0x3b
# CHECK: r31 = r31
-# CHECK-NEXT: memh(r17 + r21<<#3) = r31.new
+# CHECK-NEXT: memh(r17+r21<<#3) = r31.new
0x1f 0x40 0x7f 0x70 0x15 0xca 0xa0 0x48
# CHECK: r31 = r31
-# CHECK-NEXT: memh(#42) = r31.new
+# CHECK-NEXT: memh(gp+#42) = r31.new
0x1f 0x40 0x7f 0x70 0x15 0xca 0xb1 0xa1
# CHECK: r31 = r31
# CHECK-NEXT: memh(r17+#42) = r31.new
0x1f 0x40 0x7f 0x70 0x02 0xea 0xb1 0xa9
# CHECK: r31 = r31
-# CHECK-NEXT: memh(r17 ++ I:circ(m1)) = r31.new
+# CHECK-NEXT: memh(r17++I:circ(m1)) = r31.new
0x1f 0x40 0x7f 0x70 0x28 0xea 0xb1 0xa9
# CHECK: r31 = r31
-# CHECK-NEXT: memh(r17 ++ #10:circ(m1)) = r31.new
+# CHECK-NEXT: memh(r17++#10:circ(m1)) = r31.new
0x1f 0x40 0x7f 0x70 0x28 0xca 0xb1 0xab
# CHECK: r31 = r31
# CHECK-NEXT: memh(r17++#10) = r31.new
@@ -95,7 +95,7 @@
# CHECK-NEXT: memh(r17++m1) = r31.new
0x1f 0x40 0x7f 0x70 0x00 0xea 0xb1 0xaf
# CHECK: r31 = r31
-# CHECK-NEXT: memh(r17 ++ m1:brev) = r31.new
+# CHECK-NEXT: memh(r17++m1:brev) = r31.new
# Store new-value halfword conditionally
0x1f 0x40 0x7f 0x70 0xea 0xf5 0xb1 0x34
@@ -144,19 +144,19 @@
# Store new-value word
0x1f 0x40 0x7f 0x70 0x92 0xf5 0xb1 0x3b
# CHECK: r31 = r31
-# CHECK-NEXT: memw(r17 + r21<<#3) = r31.new
+# CHECK-NEXT: memw(r17+r21<<#3) = r31.new
0x1f 0x40 0x7f 0x70 0x15 0xd2 0xa0 0x48
# CHECK: r31 = r31
-# CHECK-NEXT: memw(#84) = r31.new
+# CHECK-NEXT: memw(gp+#84) = r31.new
0x1f 0x40 0x7f 0x70 0x15 0xd2 0xb1 0xa1
# CHECK: r31 = r31
# CHECK-NEXT: memw(r17+#84) = r31.new
0x1f 0x40 0x7f 0x70 0x02 0xf2 0xb1 0xa9
# CHECK: r31 = r31
-# CHECK-NEXT: memw(r17 ++ I:circ(m1)) = r31.new
+# CHECK-NEXT: memw(r17++I:circ(m1)) = r31.new
0x1f 0x40 0x7f 0x70 0x28 0xf2 0xb1 0xa9
# CHECK: r31 = r31
-# CHECK-NEXT: memw(r17 ++ #20:circ(m1)) = r31.new
+# CHECK-NEXT: memw(r17++#20:circ(m1)) = r31.new
0x1f 0x40 0x7f 0x70 0x28 0xd2 0xb1 0xab
# CHECK: r31 = r31
# CHECK-NEXT: memw(r17++#20) = r31.new
@@ -165,7 +165,7 @@
# CHECK-NEXT: memw(r17++m1) = r31.new
0x1f 0x40 0x7f 0x70 0x00 0xf2 0xb1 0xaf
# CHECK: r31 = r31
-# CHECK-NEXT: memw(r17 ++ m1:brev) = r31.new
+# CHECK-NEXT: memw(r17++m1:brev) = r31.new
# Store new-value word conditionally
0x1f 0x40 0x7f 0x70 0xf2 0xf5 0xb1 0x34
diff --git a/test/MC/Disassembler/Hexagon/st.txt b/test/MC/Disassembler/Hexagon/st.txt
index 6d9074a05ef7..0f936c267f56 100644
--- a/test/MC/Disassembler/Hexagon/st.txt
+++ b/test/MC/Disassembler/Hexagon/st.txt
@@ -3,25 +3,25 @@
# Store doubleword
0x9e 0xf5 0xd1 0x3b
-# CHECK: memd(r17 + r21<<#3) = r31:30
+# CHECK: memd(r17+r21<<#3) = r31:30
0x28 0xd4 0xc0 0x48
-# CHECK: memd(#320) = r21:20
+# CHECK: memd(gp+#320) = r21:20
0x02 0x40 0x00 0x00 0x28 0xd4 0xc0 0x48
# CHECK: memd(##168) = r21:20
0x15 0xd4 0xd1 0xa1
# CHECK: memd(r17+#168) = r21:20
0x02 0xf4 0xd1 0xa9
-# CHECK: memd(r17 ++ I:circ(m1)) = r21:20
+# CHECK: memd(r17++I:circ(m1)) = r21:20
0x28 0xf4 0xd1 0xa9
-# CHECK: memd(r17 ++ #40:circ(m1)) = r21:20
+# CHECK: memd(r17++#40:circ(m1)) = r21:20
0x28 0xd4 0xd1 0xab
# CHECK: memd(r17++#40) = r21:20
0x00 0x40 0x00 0x00 0xd5 0xfe 0xd1 0xad
-# CHECK: memd(r17<<#3 + ##21) = r31:30
+# CHECK: memd(r17<<#3+##21) = r31:30
0x00 0xf4 0xd1 0xad
# CHECK: memd(r17++m1) = r21:20
0x00 0xf4 0xd1 0xaf
-# CHECK: memd(r17 ++ m1:brev) = r21:20
+# CHECK: memd(r17++m1:brev) = r21:20
# Store doubleword conditionally
0xfe 0xf5 0xd1 0x34
@@ -67,27 +67,27 @@
# Store byte
0x9f 0xf5 0x11 0x3b
-# CHECK: memb(r17 + r21<<#3) = r31
+# CHECK: memb(r17+r21<<#3) = r31
0x9f 0xca 0x11 0x3c
-# CHECK: memb(r17+#21)=#31
+# CHECK: memb(r17+#21) = #31
0x15 0xd5 0x00 0x48
-# CHECK: memb(#21) = r21
+# CHECK: memb(gp+#21) = r21
0x00 0x40 0x00 0x00 0x15 0xd5 0x00 0x48
# CHECK: memb(##21) = r21
0x15 0xd5 0x11 0xa1
# CHECK: memb(r17+#21) = r21
0x02 0xf5 0x11 0xa9
-# CHECK: memb(r17 ++ I:circ(m1)) = r21
+# CHECK: memb(r17++I:circ(m1)) = r21
0x28 0xf5 0x11 0xa9
-# CHECK: memb(r17 ++ #5:circ(m1)) = r21
+# CHECK: memb(r17++#5:circ(m1)) = r21
0x28 0xd5 0x11 0xab
# CHECK: memb(r17++#5) = r21
0x00 0x40 0x00 0x00 0xd5 0xff 0x11 0xad
-# CHECK: memb(r17<<#3 + ##21) = r31
+# CHECK: memb(r17<<#3+##21) = r31
0x00 0xf5 0x11 0xad
# CHECK: memb(r17++m1) = r21
0x00 0xf5 0x11 0xaf
-# CHECK: memb(r17 ++ m1:brev) = r21
+# CHECK: memb(r17++m1:brev) = r21
# Store byte conditionally
0xff 0xf5 0x11 0x34
@@ -101,15 +101,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) memb(r17+r21<<#3) = r31
0xff 0xca 0x11 0x38
-# CHECK: if (p3) memb(r17+#21)=#31
+# CHECK: if (p3) memb(r17+#21) = #31
0xff 0xca 0x91 0x38
-# CHECK: if (!p3) memb(r17+#21)=#31
+# CHECK: if (!p3) memb(r17+#21) = #31
0x03 0x40 0x45 0x85 0xff 0xca 0x11 0x39
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) memb(r17+#21)=#31
+# CHECK-NEXT: if (p3.new) memb(r17+#21) = #31
0x03 0x40 0x45 0x85 0xff 0xca 0x91 0x39
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) memb(r17+#21)=#31
+# CHECK-NEXT: if (!p3.new) memb(r17+#21) = #31
0xab 0xdf 0x11 0x40
# CHECK: if (p3) memb(r17+#21) = r31
0xab 0xdf 0x11 0x44
@@ -143,47 +143,47 @@
# Store halfword
0x9f 0xf5 0x51 0x3b
-# CHECK: memh(r17 + r21<<#3) = r31
+# CHECK: memh(r17+r21<<#3) = r31
0x9f 0xf5 0x71 0x3b
-# CHECK: memh(r17 + r21<<#3) = r31.h
+# CHECK: memh(r17+r21<<#3) = r31.h
0x95 0xcf 0x31 0x3c
-# CHECK: memh(r17+#62)=#21
+# CHECK: memh(r17+#62) = #21
0x00 0x40 0x00 0x00 0x2a 0xd5 0x40 0x48
# CHECK: memh(##42) = r21
0x00 0x40 0x00 0x00 0x2a 0xd5 0x60 0x48
# CHECK: memh(##42) = r21.h
0x2a 0xd5 0x40 0x48
-# CHECK: memh(#84) = r21
+# CHECK: memh(gp+#84) = r21
0x2a 0xd5 0x60 0x48
-# CHECK: memh(#84) = r21.h
+# CHECK: memh(gp+#84) = r21.h
0x15 0xdf 0x51 0xa1
# CHECK: memh(r17+#42) = r31
0x15 0xdf 0x71 0xa1
# CHECK: memh(r17+#42) = r31.h
0x02 0xf5 0x51 0xa9
-# CHECK: memh(r17 ++ I:circ(m1)) = r21
+# CHECK: memh(r17++I:circ(m1)) = r21
0x28 0xf5 0x51 0xa9
-# CHECK: memh(r17 ++ #10:circ(m1)) = r21
+# CHECK: memh(r17++#10:circ(m1)) = r21
0x02 0xf5 0x71 0xa9
-# CHECK: memh(r17 ++ I:circ(m1)) = r21.h
+# CHECK: memh(r17++I:circ(m1)) = r21.h
0x28 0xf5 0x71 0xa9
-# CHECK: memh(r17 ++ #10:circ(m1)) = r21.h
+# CHECK: memh(r17++#10:circ(m1)) = r21.h
0x28 0xd5 0x51 0xab
# CHECK: memh(r17++#10) = r21
0x00 0x40 0x00 0x00 0xd5 0xff 0x51 0xad
-# CHECK: memh(r17<<#3 + ##21) = r31
+# CHECK: memh(r17<<#3+##21) = r31
0x28 0xd5 0x71 0xab
# CHECK: memh(r17++#10) = r21.h
0x00 0x40 0x00 0x00 0xd5 0xff 0x71 0xad
-# CHECK: memh(r17<<#3 + ##21) = r31.h
+# CHECK: memh(r17<<#3+##21) = r31.h
0x00 0xf5 0x51 0xad
# CHECK: memh(r17++m1) = r21
0x00 0xf5 0x71 0xad
# CHECK: memh(r17++m1) = r21.h
0x00 0xf5 0x51 0xaf
-# CHECK: memh(r17 ++ m1:brev) = r21
+# CHECK: memh(r17++m1:brev) = r21
0x00 0xf5 0x71 0xaf
-# CHECK: memh(r17 ++ m1:brev) = r21.h
+# CHECK: memh(r17++m1:brev) = r21.h
# Store halfword conditionally
0xff 0xf5 0x51 0x34
@@ -207,15 +207,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) memh(r17+r21<<#3) = r31.h
0xf5 0xcf 0x31 0x38
-# CHECK: if (p3) memh(r17+#62)=#21
+# CHECK: if (p3) memh(r17+#62) = #21
0xf5 0xcf 0xb1 0x38
-# CHECK: if (!p3) memh(r17+#62)=#21
+# CHECK: if (!p3) memh(r17+#62) = #21
0x03 0x40 0x45 0x85 0xf5 0xcf 0x31 0x39
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) memh(r17+#62)=#21
+# CHECK-NEXT: if (p3.new) memh(r17+#62) = #21
0x03 0x40 0x45 0x85 0xf5 0xcf 0xb1 0x39
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) memh(r17+#62)=#21
+# CHECK-NEXT: if (!p3.new) memh(r17+#62) = #21
0xfb 0xd5 0x51 0x40
# CHECK: if (p3) memh(r17+#62) = r21
0xfb 0xd5 0x71 0x40
@@ -279,29 +279,29 @@
# Store word
0x9f 0xf5 0x91 0x3b
-# CHECK: memw(r17 + r21<<#3) = r31
+# CHECK: memw(r17+r21<<#3) = r31
0x9f 0xca 0x51 0x3c
-# CHECK: memw(r17{{ *}}+{{ *}}#84)=#31
+# CHECK: memw(r17+#84) = #31
0x15 0xdf 0x80 0x48
-# CHECK: memw(#84) = r31
+# CHECK: memw(gp+#84) = r31
0x01 0x40 0x00 0x00 0x14 0xd5 0x80 0x48
# CHECK: memw(##84) = r21
0x9f 0xca 0x51 0x3c
-# CHECK: memw(r17+#84)=#31
+# CHECK: memw(r17+#84) = #31
0x15 0xdf 0x91 0xa1
# CHECK: memw(r17+#84) = r31
0x02 0xf5 0x91 0xa9
-# CHECK: memw(r17 ++ I:circ(m1)) = r21
+# CHECK: memw(r17++I:circ(m1)) = r21
0x28 0xf5 0x91 0xa9
-# CHECK: memw(r17 ++ #20:circ(m1)) = r21
+# CHECK: memw(r17++#20:circ(m1)) = r21
0x28 0xd5 0x91 0xab
# CHECK: memw(r17++#20) = r21
0x00 0x40 0x00 0x00 0xd5 0xff 0x91 0xad
-# CHECK: memw(r17<<#3 + ##21) = r31
+# CHECK: memw(r17<<#3+##21) = r31
0x00 0xf5 0x91 0xad
# CHECK: memw(r17++m1) = r21
0x00 0xf5 0x91 0xaf
-# CHECK: memw(r17 ++ m1:brev) = r21
+# CHECK: memw(r17++m1:brev) = r21
# Store word conditionally
0xff 0xf5 0x91 0x34
@@ -315,15 +315,15 @@
# CHECK: p3 = r5
# CHECK-NEXT: if (!p3.new) memw(r17+r21<<#3) = r31
0xff 0xca 0x51 0x38
-# CHECK: if (p3) memw(r17+#84)=#31
+# CHECK: if (p3) memw(r17+#84) = #31
0xff 0xca 0xd1 0x38
-# CHECK: if (!p3) memw(r17+#84)=#31
+# CHECK: if (!p3) memw(r17+#84) = #31
0x03 0x40 0x45 0x85 0xff 0xca 0x51 0x39
# CHECK: p3 = r5
-# CHECK-NEXT: if (p3.new) memw(r17+#84)=#31
+# CHECK-NEXT: if (p3.new) memw(r17+#84) = #31
0x03 0x40 0x45 0x85 0xff 0xca 0xd1 0x39
# CHECK: p3 = r5
-# CHECK-NEXT: if (!p3.new) memw(r17+#84)=#31
+# CHECK-NEXT: if (!p3.new) memw(r17+#84) = #31
0xab 0xdf 0x91 0x40
# CHECK: if (p3) memw(r17+#84) = r31
0xab 0xdf 0x91 0x44
diff --git a/test/MC/Disassembler/Hexagon/system_user.txt b/test/MC/Disassembler/Hexagon/system_user.txt
index d55a94e939b5..f4d731059e04 100644
--- a/test/MC/Disassembler/Hexagon/system_user.txt
+++ b/test/MC/Disassembler/Hexagon/system_user.txt
@@ -9,9 +9,9 @@
# Store conditional
0x03 0xd5 0xb1 0xa0
-# CHECK: memw_locked(r17, p3) = r21
+# CHECK: memw_locked(r17,p3) = r21
0x03 0xd4 0xf1 0xa0
-# CHECK: memd_locked(r17, p3) = r21:20
+# CHECK: memd_locked(r17,p3) = r21:20
# Memory barrier
0x00 0xc0 0x00 0xa8
@@ -19,7 +19,7 @@
# Data cache prefetch
0x15 0xc0 0x11 0x94
-# CHECK: dcfetch(r17 + #168)
+# CHECK: dcfetch(r17+#168)
# Send value to ETM trace
0x00 0xc0 0x51 0x62
diff --git a/test/MC/Disassembler/Hexagon/xtype_alu.txt b/test/MC/Disassembler/Hexagon/xtype_alu.txt
index 03d0f0518a3d..f05dafb3fce8 100644
--- a/test/MC/Disassembler/Hexagon/xtype_alu.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_alu.txt
@@ -11,137 +11,137 @@
# Add and accumulate
0xff 0xd1 0x35 0xdb
-# CHECK: r17 = add(r21, add(r31, #23))
+# CHECK: r17 = add(r21,add(r31,#23))
0xff 0xd1 0xb5 0xdb
-# CHECK: r17 = add(r21, sub(#23, r31))
+# CHECK: r17 = add(r21,sub(#23,r31))
0xf1 0xc2 0x15 0xe2
-# CHECK: r17 += add(r21, #23)
+# CHECK: r17 += add(r21,#23)
0xf1 0xc2 0x95 0xe2
-# CHECK: r17 -= add(r21, #23)
+# CHECK: r17 -= add(r21,#23)
0x31 0xdf 0x15 0xef
-# CHECK: r17 += add(r21, r31)
+# CHECK: r17 += add(r21,r31)
0x31 0xdf 0x95 0xef
-# CHECK: r17 -= add(r21, r31)
+# CHECK: r17 -= add(r21,r31)
# Add doublewords
0xf0 0xde 0x14 0xd3
-# CHECK: r17:16 = add(r21:20, r31:30)
+# CHECK: r17:16 = add(r21:20,r31:30)
0xb0 0xde 0x74 0xd3
-# CHECK: r17:16 = add(r21:20, r31:30):sat
+# CHECK: r17:16 = add(r21:20,r31:30):sat
0xd0 0xde 0x74 0xd3
-# CHECK: r17:16 = add(r21:20, r31:30):raw:lo
+# CHECK: r17:16 = add(r21:20,r31:30):raw:lo
0xf0 0xde 0x74 0xd3
-# CHECK: r17:16 = add(r21:20, r31:30):raw:hi
+# CHECK: r17:16 = add(r21:20,r31:30):raw:hi
# Add halfword
0x11 0xd5 0x1f 0xd5
-# CHECK: r17 = add(r21.l, r31.l)
+# CHECK: r17 = add(r21.l,r31.l)
0x51 0xd5 0x1f 0xd5
-# CHECK: r17 = add(r21.l, r31.h)
+# CHECK: r17 = add(r21.l,r31.h)
0x91 0xd5 0x1f 0xd5
-# CHECK: r17 = add(r21.l, r31.l):sat
+# CHECK: r17 = add(r21.l,r31.l):sat
0xd1 0xd5 0x1f 0xd5
-# CHECK: r17 = add(r21.l, r31.h):sat
+# CHECK: r17 = add(r21.l,r31.h):sat
0x11 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.l, r31.l):<<16
+# CHECK: r17 = add(r21.l,r31.l):<<16
0x31 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.l, r31.h):<<16
+# CHECK: r17 = add(r21.l,r31.h):<<16
0x51 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.h, r31.l):<<16
+# CHECK: r17 = add(r21.h,r31.l):<<16
0x71 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.h, r31.h):<<16
+# CHECK: r17 = add(r21.h,r31.h):<<16
0x91 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.l, r31.l):sat:<<16
+# CHECK: r17 = add(r21.l,r31.l):sat:<<16
0xb1 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.l, r31.h):sat:<<16
+# CHECK: r17 = add(r21.l,r31.h):sat:<<16
0xd1 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.h, r31.l):sat:<<16
+# CHECK: r17 = add(r21.h,r31.l):sat:<<16
0xf1 0xd5 0x5f 0xd5
-# CHECK: r17 = add(r21.h, r31.h):sat:<<16
+# CHECK: r17 = add(r21.h,r31.h):sat:<<16
# Add or subtract doublewords with carry
0x70 0xde 0xd4 0xc2
-# CHECK: r17:16 = add(r21:20, r31:30, p3):carry
+# CHECK: r17:16 = add(r21:20,r31:30,p3):carry
0x70 0xde 0xf4 0xc2
-# CHECK: r17:16 = sub(r21:20, r31:30, p3):carry
+# CHECK: r17:16 = sub(r21:20,r31:30,p3):carry
# Logical doublewords
0x90 0xc0 0x94 0x80
# CHECK: r17:16 = not(r21:20)
0x10 0xde 0xf4 0xd3
-# CHECK: r17:16 = and(r21:20, r31:30)
+# CHECK: r17:16 = and(r21:20,r31:30)
0x30 0xd4 0xfe 0xd3
-# CHECK: r17:16 = and(r21:20, ~r31:30)
+# CHECK: r17:16 = and(r21:20,~r31:30)
0x50 0xde 0xf4 0xd3
-# CHECK: r17:16 = or(r21:20, r31:30)
+# CHECK: r17:16 = or(r21:20,r31:30)
0x70 0xd4 0xfe 0xd3
-# CHECK: r17:16 = or(r21:20, ~r31:30)
+# CHECK: r17:16 = or(r21:20,~r31:30)
0x90 0xde 0xf4 0xd3
-# CHECK: r17:16 = xor(r21:20, r31:30)
+# CHECK: r17:16 = xor(r21:20,r31:30)
# Logical-logical doublewords
0x10 0xde 0x94 0xca
-# CHECK: r17:16 ^= xor(r21:20, r31:30)
+# CHECK: r17:16 ^= xor(r21:20,r31:30)
# Logical-logical words
0xf1 0xc3 0x15 0xda
-# CHECK: r17 |= and(r21, #31)
+# CHECK: r17 |= and(r21,#31)
0xf5 0xc3 0x51 0xda
-# CHECK: r17 = or(r21, and(r17, #31))
+# CHECK: r17 = or(r21,and(r17,#31))
0xf1 0xc3 0x95 0xda
-# CHECK: r17 |= or(r21, #31)
+# CHECK: r17 |= or(r21,#31)
0x11 0xdf 0x35 0xef
-# CHECK: r17 |= and(r21, ~r31)
+# CHECK: r17 |= and(r21,~r31)
0x31 0xdf 0x35 0xef
-# CHECK: r17 &= and(r21, ~r31)
+# CHECK: r17 &= and(r21,~r31)
0x51 0xdf 0x35 0xef
-# CHECK: r17 ^= and(r21, ~r31)
+# CHECK: r17 ^= and(r21,~r31)
0x11 0xdf 0x55 0xef
-# CHECK: r17 &= and(r21, r31)
+# CHECK: r17 &= and(r21,r31)
0x31 0xdf 0x55 0xef
-# CHECK: r17 &= or(r21, r31)
+# CHECK: r17 &= or(r21,r31)
0x51 0xdf 0x55 0xef
-# CHECK: r17 &= xor(r21, r31)
+# CHECK: r17 &= xor(r21,r31)
0x71 0xdf 0x55 0xef
-# CHECK: r17 |= and(r21, r31)
+# CHECK: r17 |= and(r21,r31)
0x71 0xdf 0x95 0xef
-# CHECK: r17 ^= xor(r21, r31)
+# CHECK: r17 ^= xor(r21,r31)
0x11 0xdf 0xd5 0xef
-# CHECK: r17 |= or(r21, r31)
+# CHECK: r17 |= or(r21,r31)
0x31 0xdf 0xd5 0xef
-# CHECK: r17 |= xor(r21, r31)
+# CHECK: r17 |= xor(r21,r31)
0x51 0xdf 0xd5 0xef
-# CHECK: r17 ^= and(r21, r31)
+# CHECK: r17 ^= and(r21,r31)
0x71 0xdf 0xd5 0xef
-# CHECK: r17 ^= or(r21, r31)
+# CHECK: r17 ^= or(r21,r31)
# Maximum words
0x11 0xdf 0xd5 0xd5
-# CHECK: r17 = max(r21, r31)
+# CHECK: r17 = max(r21,r31)
0x91 0xdf 0xd5 0xd5
-# CHECK: r17 = maxu(r21, r31)
+# CHECK: r17 = maxu(r21,r31)
# Maximum doublewords
0x90 0xde 0xd4 0xd3
-# CHECK: r17:16 = max(r21:20, r31:30)
+# CHECK: r17:16 = max(r21:20,r31:30)
0xb0 0xde 0xd4 0xd3
-# CHECK: r17:16 = maxu(r21:20, r31:30)
+# CHECK: r17:16 = maxu(r21:20,r31:30)
# Minimum words
0x11 0xd5 0xbf 0xd5
-# CHECK: r17 = min(r21, r31)
+# CHECK: r17 = min(r21,r31)
0x91 0xd5 0xbf 0xd5
-# CHECK: r17 = minu(r21, r31)
+# CHECK: r17 = minu(r21,r31)
# Minimum doublewords
0xd0 0xd4 0xbe 0xd3
-# CHECK: r17:16 = min(r21:20, r31:30)
+# CHECK: r17:16 = min(r21:20,r31:30)
0xf0 0xd4 0xbe 0xd3
-# CHECK: r17:16 = minu(r21:20, r31:30)
+# CHECK: r17:16 = minu(r21:20,r31:30)
# Module wrap
0xf1 0xdf 0xf5 0xd3
-# CHECK: r17 = modwrap(r21, r31)
+# CHECK: r17 = modwrap(r21,r31)
# Negate
0xb0 0xc0 0x94 0x80
@@ -153,51 +153,51 @@
0x31 0xc0 0xd4 0x88
# CHECK: r17 = round(r21:20):sat
0x11 0xdf 0xf5 0x8c
-# CHECK: r17 = cround(r21, #31)
+# CHECK: r17 = cround(r21,#31)
0x91 0xdf 0xf5 0x8c
-# CHECK: r17 = round(r21, #31)
+# CHECK: r17 = round(r21,#31)
0xd1 0xdf 0xf5 0x8c
-# CHECK: r17 = round(r21, #31):sat
+# CHECK: r17 = round(r21,#31):sat
0x11 0xdf 0xd5 0xc6
-# CHECK: r17 = cround(r21, r31)
+# CHECK: r17 = cround(r21,r31)
0x91 0xdf 0xd5 0xc6
-# CHECK: r17 = round(r21, r31)
+# CHECK: r17 = round(r21,r31)
0xd1 0xdf 0xd5 0xc6
-# CHECK: r17 = round(r21, r31):sat
+# CHECK: r17 = round(r21,r31):sat
# Subtract doublewords
0xf0 0xd4 0x3e 0xd3
-# CHECK: r17:16 = sub(r21:20, r31:30)
+# CHECK: r17:16 = sub(r21:20,r31:30)
# Subtract and accumulate words
0x71 0xd5 0x1f 0xef
-# CHECK: r17 += sub(r21, r31)
+# CHECK: r17 += sub(r21,r31)
# Subtract halfword
0x11 0xd5 0x3f 0xd5
-# CHECK: r17 = sub(r21.l, r31.l)
+# CHECK: r17 = sub(r21.l,r31.l)
0x51 0xd5 0x3f 0xd5
-# CHECK: r17 = sub(r21.l, r31.h)
+# CHECK: r17 = sub(r21.l,r31.h)
0x91 0xd5 0x3f 0xd5
-# CHECK: r17 = sub(r21.l, r31.l):sat
+# CHECK: r17 = sub(r21.l,r31.l):sat
0xd1 0xd5 0x3f 0xd5
-# CHECK: r17 = sub(r21.l, r31.h):sat
+# CHECK: r17 = sub(r21.l,r31.h):sat
0x11 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.l, r31.l):<<16
+# CHECK: r17 = sub(r21.l,r31.l):<<16
0x31 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.l, r31.h):<<16
+# CHECK: r17 = sub(r21.l,r31.h):<<16
0x51 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.h, r31.l):<<16
+# CHECK: r17 = sub(r21.h,r31.l):<<16
0x71 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.h, r31.h):<<16
+# CHECK: r17 = sub(r21.h,r31.h):<<16
0x91 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.l, r31.l):sat:<<16
+# CHECK: r17 = sub(r21.l,r31.l):sat:<<16
0xb1 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.l, r31.h):sat:<<16
+# CHECK: r17 = sub(r21.l,r31.h):sat:<<16
0xd1 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.h, r31.l):sat:<<16
+# CHECK: r17 = sub(r21.h,r31.l):sat:<<16
0xf1 0xd5 0x7f 0xd5
-# CHECK: r17 = sub(r21.h, r31.h):sat:<<16
+# CHECK: r17 = sub(r21.h,r31.h):sat:<<16
# Sign extend word to doubleword
0x10 0xc0 0x55 0x84
@@ -217,179 +217,179 @@
# Vector absolute difference halfwords
0x10 0xd4 0x7e 0xe8
-# CHECK: r17:16 = vabsdiffh(r21:20, r31:30)
+# CHECK: r17:16 = vabsdiffh(r21:20,r31:30)
# Vector absolute difference words
0x10 0xd4 0x3e 0xe8
-# CHECK: r17:16 = vabsdiffw(r21:20, r31:30)
+# CHECK: r17:16 = vabsdiffw(r21:20,r31:30)
# Vector add halfwords
0x50 0xde 0x14 0xd3
-# CHECK: r17:16 = vaddh(r21:20, r31:30)
+# CHECK: r17:16 = vaddh(r21:20,r31:30)
0x70 0xde 0x14 0xd3
-# CHECK: r17:16 = vaddh(r21:20, r31:30):sat
+# CHECK: r17:16 = vaddh(r21:20,r31:30):sat
0x90 0xde 0x14 0xd3
-# CHECK: r17:16 = vadduh(r21:20, r31:30):sat
+# CHECK: r17:16 = vadduh(r21:20,r31:30):sat
# Vector add halfwords with saturate and pack to unsigned bytes
0x31 0xde 0x54 0xc1
-# CHECK: r17 = vaddhub(r21:20, r31:30):sat
+# CHECK: r17 = vaddhub(r21:20,r31:30):sat
# Vector reduce add unsigned bytes
0x30 0xde 0x54 0xe8
-# CHECK: r17:16 = vraddub(r21:20, r31:30)
+# CHECK: r17:16 = vraddub(r21:20,r31:30)
0x30 0xde 0x54 0xea
-# CHECK: r17:16 += vraddub(r21:20, r31:30)
+# CHECK: r17:16 += vraddub(r21:20,r31:30)
# Vector reduce add halfwords
0x31 0xde 0x14 0xe9
-# CHECK: r17 = vradduh(r21:20, r31:30)
+# CHECK: r17 = vradduh(r21:20,r31:30)
0xf1 0xde 0x34 0xe9
-# CHECK: r17 = vraddh(r21:20, r31:30)
+# CHECK: r17 = vraddh(r21:20,r31:30)
# Vector add bytes
0x10 0xde 0x14 0xd3
-# CHECK: r17:16 = vaddub(r21:20, r31:30)
+# CHECK: r17:16 = vaddub(r21:20,r31:30)
0x30 0xde 0x14 0xd3
-# CHECK: r17:16 = vaddub(r21:20, r31:30):sat
+# CHECK: r17:16 = vaddub(r21:20,r31:30):sat
# Vector add words
0xb0 0xde 0x14 0xd3
-# CHECK: r17:16 = vaddw(r21:20, r31:30)
+# CHECK: r17:16 = vaddw(r21:20,r31:30)
0xd0 0xde 0x14 0xd3
-# CHECK: r17:16 = vaddw(r21:20, r31:30):sat
+# CHECK: r17:16 = vaddw(r21:20,r31:30):sat
# Vector average halfwords
0x50 0xde 0x54 0xd3
-# CHECK: r17:16 = vavgh(r21:20, r31:30)
+# CHECK: r17:16 = vavgh(r21:20,r31:30)
0x70 0xde 0x54 0xd3
-# CHECK: r17:16 = vavgh(r21:20, r31:30):rnd
+# CHECK: r17:16 = vavgh(r21:20,r31:30):rnd
0x90 0xde 0x54 0xd3
-# CHECK: r17:16 = vavgh(r21:20, r31:30):crnd
+# CHECK: r17:16 = vavgh(r21:20,r31:30):crnd
0xb0 0xde 0x54 0xd3
-# CHECK: r17:16 = vavguh(r21:20, r31:30)
+# CHECK: r17:16 = vavguh(r21:20,r31:30)
0xd0 0xde 0x54 0xd3
-# CHECK: r17:16 = vavguh(r21:20, r31:30):rnd
+# CHECK: r17:16 = vavguh(r21:20,r31:30):rnd
0x10 0xd4 0x9e 0xd3
-# CHECK: r17:16 = vnavgh(r21:20, r31:30)
+# CHECK: r17:16 = vnavgh(r21:20,r31:30)
0x30 0xd4 0x9e 0xd3
-# CHECK: r17:16 = vnavgh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 = vnavgh(r21:20,r31:30):rnd:sat
0x50 0xd4 0x9e 0xd3
-# CHECK: r17:16 = vnavgh(r21:20, r31:30):crnd:sat
+# CHECK: r17:16 = vnavgh(r21:20,r31:30):crnd:sat
# Vector average unsigned bytes
0x10 0xde 0x54 0xd3
-# CHECK: r17:16 = vavgub(r21:20, r31:30)
+# CHECK: r17:16 = vavgub(r21:20,r31:30)
0x30 0xde 0x54 0xd3
-# CHECK: r17:16 = vavgub(r21:20, r31:30):rnd
+# CHECK: r17:16 = vavgub(r21:20,r31:30):rnd
# Vector average words
0x10 0xde 0x74 0xd3
-# CHECK: r17:16 = vavgw(r21:20, r31:30)
+# CHECK: r17:16 = vavgw(r21:20,r31:30)
0x30 0xde 0x74 0xd3
-# CHECK: r17:16 = vavgw(r21:20, r31:30):rnd
+# CHECK: r17:16 = vavgw(r21:20,r31:30):rnd
0x50 0xde 0x74 0xd3
-# CHECK: r17:16 = vavgw(r21:20, r31:30):crnd
+# CHECK: r17:16 = vavgw(r21:20,r31:30):crnd
0x70 0xde 0x74 0xd3
-# CHECK: r17:16 = vavguw(r21:20, r31:30)
+# CHECK: r17:16 = vavguw(r21:20,r31:30)
0x90 0xde 0x74 0xd3
-# CHECK: r17:16 = vavguw(r21:20, r31:30):rnd
+# CHECK: r17:16 = vavguw(r21:20,r31:30):rnd
0x70 0xd4 0x9e 0xd3
-# CHECK: r17:16 = vnavgw(r21:20, r31:30)
+# CHECK: r17:16 = vnavgw(r21:20,r31:30)
0x90 0xd4 0x9e 0xd3
-# CHECK: r17:16 = vnavgw(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 = vnavgw(r21:20,r31:30):rnd:sat
0xd0 0xd4 0x9e 0xd3
-# CHECK: r17:16 = vnavgw(r21:20, r31:30):crnd:sat
+# CHECK: r17:16 = vnavgw(r21:20,r31:30):crnd:sat
# Vector conditional negate
0x50 0xdf 0xd4 0xc3
-# CHECK: r17:16 = vcnegh(r21:20, r31)
+# CHECK: r17:16 = vcnegh(r21:20,r31)
0xf0 0xff 0x34 0xcb
-# CHECK: r17:16 += vrcnegh(r21:20, r31)
+# CHECK: r17:16 += vrcnegh(r21:20,r31)
# Vector maximum bytes
0x10 0xd4 0xde 0xd3
-# CHECK: r17:16 = vmaxub(r21:20, r31:30)
+# CHECK: r17:16 = vmaxub(r21:20,r31:30)
0xd0 0xd4 0xde 0xd3
-# CHECK: r17:16 = vmaxb(r21:20, r31:30)
+# CHECK: r17:16 = vmaxb(r21:20,r31:30)
# Vector maximum halfwords
0x30 0xd4 0xde 0xd3
-# CHECK: r17:16 = vmaxh(r21:20, r31:30)
+# CHECK: r17:16 = vmaxh(r21:20,r31:30)
0x50 0xd4 0xde 0xd3
-# CHECK: r17:16 = vmaxuh(r21:20, r31:30)
+# CHECK: r17:16 = vmaxuh(r21:20,r31:30)
# Vector reduce maximum halfwords
0x3f 0xd0 0x34 0xcb
-# CHECK: r17:16 = vrmaxh(r21:20, r31)
+# CHECK: r17:16 = vrmaxh(r21:20,r31)
0x3f 0xf0 0x34 0xcb
-# CHECK: r17:16 = vrmaxuh(r21:20, r31)
+# CHECK: r17:16 = vrmaxuh(r21:20,r31)
# Vector reduce maximum words
0x5f 0xd0 0x34 0xcb
-# CHECK: r17:16 = vrmaxw(r21:20, r31)
+# CHECK: r17:16 = vrmaxw(r21:20,r31)
0x5f 0xf0 0x34 0xcb
-# CHECK: r17:16 = vrmaxuw(r21:20, r31)
+# CHECK: r17:16 = vrmaxuw(r21:20,r31)
# Vector maximum words
0xb0 0xd4 0xbe 0xd3
-# CHECK: r17:16 = vmaxuw(r21:20, r31:30)
+# CHECK: r17:16 = vmaxuw(r21:20,r31:30)
0x70 0xd4 0xde 0xd3
-# CHECK: r17:16 = vmaxw(r21:20, r31:30)
+# CHECK: r17:16 = vmaxw(r21:20,r31:30)
# Vector minimum bytes
0x10 0xd4 0xbe 0xd3
-# CHECK: r17:16 = vminub(r21:20, r31:30)
+# CHECK: r17:16 = vminub(r21:20,r31:30)
0xf0 0xd4 0xde 0xd3
-# CHECK: r17:16 = vminb(r21:20, r31:30)
+# CHECK: r17:16 = vminb(r21:20,r31:30)
# Vector minimum halfwords
0x30 0xd4 0xbe 0xd3
-# CHECK: r17:16 = vminh(r21:20, r31:30)
+# CHECK: r17:16 = vminh(r21:20,r31:30)
0x50 0xd4 0xbe 0xd3
-# CHECK: r17:16 = vminuh(r21:20, r31:30)
+# CHECK: r17:16 = vminuh(r21:20,r31:30)
# Vector reduce minimum halfwords
0xbf 0xd0 0x34 0xcb
-# CHECK: r17:16 = vrminh(r21:20, r31)
+# CHECK: r17:16 = vrminh(r21:20,r31)
0xbf 0xf0 0x34 0xcb
-# CHECK: r17:16 = vrminuh(r21:20, r31)
+# CHECK: r17:16 = vrminuh(r21:20,r31)
# Vector reduce minimum words
0xdf 0xd0 0x34 0xcb
-# CHECK: r17:16 = vrminw(r21:20, r31)
+# CHECK: r17:16 = vrminw(r21:20,r31)
0xdf 0xf0 0x34 0xcb
-# CHECK: r17:16 = vrminuw(r21:20, r31)
+# CHECK: r17:16 = vrminuw(r21:20,r31)
# Vector minimum words
0x70 0xd4 0xbe 0xd3
-# CHECK: r17:16 = vminw(r21:20, r31:30)
+# CHECK: r17:16 = vminw(r21:20,r31:30)
0x90 0xd4 0xbe 0xd3
-# CHECK: r17:16 = vminuw(r21:20, r31:30)
+# CHECK: r17:16 = vminuw(r21:20,r31:30)
# Vector sum of absolute differences unsigned bytes
0x50 0xde 0x54 0xe8
-# CHECK: r17:16 = vrsadub(r21:20, r31:30)
+# CHECK: r17:16 = vrsadub(r21:20,r31:30)
0x50 0xde 0x54 0xea
-# CHECK: r17:16 += vrsadub(r21:20, r31:30)
+# CHECK: r17:16 += vrsadub(r21:20,r31:30)
# Vector subtract halfwords
0x50 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubh(r21:20, r31:30)
+# CHECK: r17:16 = vsubh(r21:20,r31:30)
0x70 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubh(r21:20, r31:30):sat
+# CHECK: r17:16 = vsubh(r21:20,r31:30):sat
0x90 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubuh(r21:20, r31:30):sat
+# CHECK: r17:16 = vsubuh(r21:20,r31:30):sat
# Vector subtract bytes
0x10 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubub(r21:20, r31:30)
+# CHECK: r17:16 = vsubub(r21:20,r31:30)
0x30 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubub(r21:20, r31:30):sat
+# CHECK: r17:16 = vsubub(r21:20,r31:30):sat
# Vector subtract words
0xb0 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubw(r21:20, r31:30)
+# CHECK: r17:16 = vsubw(r21:20,r31:30)
0xd0 0xd4 0x3e 0xd3
-# CHECK: r17:16 = vsubw(r21:20, r31:30):sat
+# CHECK: r17:16 = vsubw(r21:20,r31:30):sat
diff --git a/test/MC/Disassembler/Hexagon/xtype_bit.txt b/test/MC/Disassembler/Hexagon/xtype_bit.txt
index 89b6906afa92..490a8bf85029 100644
--- a/test/MC/Disassembler/Hexagon/xtype_bit.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_bit.txt
@@ -11,9 +11,9 @@
0x11 0xc0 0x74 0x88
# CHECK: r17 = normamt(r21:20)
0x51 0xd7 0x74 0x88
-# CHECK: r17 = add(clb(r21:20), #23)
+# CHECK: r17 = add(clb(r21:20),#23)
0x11 0xd7 0x35 0x8c
-# CHECK: r17 = add(clb(r21), #23)
+# CHECK: r17 = add(clb(r21),#23)
0x91 0xc0 0x15 0x8c
# CHECK: r17 = clb(r21)
0xb1 0xc0 0x15 0x8c
@@ -39,31 +39,31 @@
# Extract bitfield
0xf0 0xdf 0x54 0x81
-# CHECK: r17:16 = extractu(r21:20, #31, #23)
+# CHECK: r17:16 = extractu(r21:20,#31,#23)
0xf0 0xdf 0x54 0x8a
-# CHECK: r17:16 = extract(r21:20, #31, #23)
+# CHECK: r17:16 = extract(r21:20,#31,#23)
0xf1 0xdf 0x55 0x8d
-# CHECK: r17 = extractu(r21, #31, #23)
+# CHECK: r17 = extractu(r21,#31,#23)
0xf1 0xdf 0xd5 0x8d
-# CHECK: r17 = extract(r21, #31, #23)
+# CHECK: r17 = extract(r21,#31,#23)
0x10 0xde 0x14 0xc1
-# CHECK: r17:16 = extractu(r21:20, r31:30)
+# CHECK: r17:16 = extractu(r21:20,r31:30)
0x90 0xde 0xd4 0xc1
-# CHECK: r17:16 = extract(r21:20, r31:30)
+# CHECK: r17:16 = extract(r21:20,r31:30)
0x11 0xde 0x15 0xc9
-# CHECK: r17 = extractu(r21, r31:30)
+# CHECK: r17 = extractu(r21,r31:30)
0x51 0xde 0x15 0xc9
-# CHECK: r17 = extract(r21, r31:30)
+# CHECK: r17 = extract(r21,r31:30)
# Insert bitfield
0xf0 0xdf 0x54 0x83
-# CHECK: r17:16 = insert(r21:20, #31, #23)
+# CHECK: r17:16 = insert(r21:20,#31,#23)
0xf1 0xdf 0x55 0x8f
-# CHECK: r17 = insert(r21, #31, #23)
+# CHECK: r17 = insert(r21,#31,#23)
0x11 0xde 0x15 0xc8
-# CHECK: r17 = insert(r21, r31:30)
+# CHECK: r17 = insert(r21,r31:30)
0x10 0xde 0x14 0xca
-# CHECK: r17:16 = insert(r21:20, r31:30)
+# CHECK: r17:16 = insert(r21:20,r31:30)
# Interleave/deinterleave
0x90 0xc0 0xd4 0x80
@@ -73,13 +73,13 @@
# Linear feedback-shift iteration
0xd0 0xde 0x94 0xc1
-# CHECK: r17:16 = lfs(r21:20, r31:30)
+# CHECK: r17:16 = lfs(r21:20,r31:30)
# Masked parity
0x11 0xde 0x14 0xd0
-# CHECK: r17 = parity(r21:20, r31:30)
+# CHECK: r17 = parity(r21:20,r31:30)
0x11 0xdf 0xf5 0xd5
-# CHECK: r17 = parity(r21, r31)
+# CHECK: r17 = parity(r21,r31)
# Bit reverse
0xd0 0xc0 0xd4 0x80
@@ -89,30 +89,30 @@
# Set/clear/toggle bit
0x11 0xdf 0xd5 0x8c
-# CHECK: r17 = setbit(r21, #31)
+# CHECK: r17 = setbit(r21,#31)
0x31 0xdf 0xd5 0x8c
-# CHECK: r17 = clrbit(r21, #31)
+# CHECK: r17 = clrbit(r21,#31)
0x51 0xdf 0xd5 0x8c
-# CHECK: r17 = togglebit(r21, #31)
+# CHECK: r17 = togglebit(r21,#31)
0x11 0xdf 0x95 0xc6
-# CHECK: r17 = setbit(r21, r31)
+# CHECK: r17 = setbit(r21,r31)
0x51 0xdf 0x95 0xc6
-# CHECK: r17 = clrbit(r21, r31)
+# CHECK: r17 = clrbit(r21,r31)
0x91 0xdf 0x95 0xc6
-# CHECK: r17 = togglebit(r21, r31)
+# CHECK: r17 = togglebit(r21,r31)
# Split bitfield
0x90 0xdf 0xd5 0x88
-# CHECK: r17:16 = bitsplit(r21, #31)
+# CHECK: r17:16 = bitsplit(r21,#31)
0x10 0xdf 0x35 0xd4
-# CHECK: r17:16 = bitsplit(r21, r31)
+# CHECK: r17:16 = bitsplit(r21,r31)
# Table index
0xf1 0xcd 0x15 0x87
-# CHECK: r17 = tableidxb(r21, #7, #13):raw
+# CHECK: r17 = tableidxb(r21,#7,#13):raw
0xf1 0xcd 0x55 0x87
-# CHECK: r17 = tableidxh(r21, #7, #13):raw
+# CHECK: r17 = tableidxh(r21,#7,#13):raw
0xf1 0xcd 0x95 0x87
-# CHECK: r17 = tableidxw(r21, #7, #13):raw
+# CHECK: r17 = tableidxw(r21,#7,#13):raw
0xf1 0xcd 0xd5 0x87
-# CHECK: r17 = tableidxd(r21, #7, #13):raw
+# CHECK: r17 = tableidxd(r21,#7,#13):raw
diff --git a/test/MC/Disassembler/Hexagon/xtype_complex.txt b/test/MC/Disassembler/Hexagon/xtype_complex.txt
index 2332082d835e..2c604f37d2ec 100644
--- a/test/MC/Disassembler/Hexagon/xtype_complex.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_complex.txt
@@ -3,89 +3,89 @@
# Complex add/sub halfwords
0x90 0xde 0x54 0xc1
-# CHECK: r17:16 = vxaddsubh(r21:20, r31:30):sat
+# CHECK: r17:16 = vxaddsubh(r21:20,r31:30):sat
0xd0 0xde 0x54 0xc1
-# CHECK: r17:16 = vxsubaddh(r21:20, r31:30):sat
+# CHECK: r17:16 = vxsubaddh(r21:20,r31:30):sat
0x10 0xde 0xd4 0xc1
-# CHECK: r17:16 = vxaddsubh(r21:20, r31:30):rnd:>>1:sat
+# CHECK: r17:16 = vxaddsubh(r21:20,r31:30):rnd:>>1:sat
0x50 0xde 0xd4 0xc1
-# CHECK: r17:16 = vxsubaddh(r21:20, r31:30):rnd:>>1:sat
+# CHECK: r17:16 = vxsubaddh(r21:20,r31:30):rnd:>>1:sat
# Complex add/sub words
0x10 0xde 0x54 0xc1
-# CHECK: r17:16 = vxaddsubw(r21:20, r31:30):sat
+# CHECK: r17:16 = vxaddsubw(r21:20,r31:30):sat
0x50 0xde 0x54 0xc1
-# CHECK: r17:16 = vxsubaddw(r21:20, r31:30):sat
+# CHECK: r17:16 = vxsubaddw(r21:20,r31:30):sat
# Complex multiply
0xd0 0xdf 0x15 0xe5
-# CHECK: r17:16 = cmpy(r21, r31):sat
+# CHECK: r17:16 = cmpy(r21,r31):sat
0xd0 0xdf 0x95 0xe5
-# CHECK: r17:16 = cmpy(r21, r31):<<1:sat
+# CHECK: r17:16 = cmpy(r21,r31):<<1:sat
0xd0 0xdf 0x55 0xe5
-# CHECK: r17:16 = cmpy(r21, r31*):sat
+# CHECK: r17:16 = cmpy(r21,r31*):sat
0xd0 0xdf 0xd5 0xe5
-# CHECK: r17:16 = cmpy(r21, r31*):<<1:sat
+# CHECK: r17:16 = cmpy(r21,r31*):<<1:sat
0xd0 0xdf 0x15 0xe7
-# CHECK: r17:16 += cmpy(r21, r31):sat
+# CHECK: r17:16 += cmpy(r21,r31):sat
0xd0 0xdf 0x95 0xe7
-# CHECK: r17:16 += cmpy(r21, r31):<<1:sat
+# CHECK: r17:16 += cmpy(r21,r31):<<1:sat
0xf0 0xdf 0x15 0xe7
-# CHECK: r17:16 -= cmpy(r21, r31):sat
+# CHECK: r17:16 -= cmpy(r21,r31):sat
0xf0 0xdf 0x95 0xe7
-# CHECK: r17:16 -= cmpy(r21, r31):<<1:sat
+# CHECK: r17:16 -= cmpy(r21,r31):<<1:sat
0xd0 0xdf 0x55 0xe7
-# CHECK: r17:16 += cmpy(r21, r31*):sat
+# CHECK: r17:16 += cmpy(r21,r31*):sat
0xd0 0xdf 0xd5 0xe7
-# CHECK: r17:16 += cmpy(r21, r31*):<<1:sat
+# CHECK: r17:16 += cmpy(r21,r31*):<<1:sat
0xf0 0xdf 0x55 0xe7
-# CHECK: r17:16 -= cmpy(r21, r31*):sat
+# CHECK: r17:16 -= cmpy(r21,r31*):sat
0xf0 0xdf 0xd5 0xe7
-# CHECK: r17:16 -= cmpy(r21, r31*):<<1:sat
+# CHECK: r17:16 -= cmpy(r21,r31*):<<1:sat
# Complex multiply real or imaginary
0x30 0xdf 0x15 0xe5
-# CHECK: r17:16 = cmpyi(r21, r31)
+# CHECK: r17:16 = cmpyi(r21,r31)
0x50 0xdf 0x15 0xe5
-# CHECK: r17:16 = cmpyr(r21, r31)
+# CHECK: r17:16 = cmpyr(r21,r31)
0x30 0xdf 0x15 0xe7
-# CHECK: r17:16 += cmpyi(r21, r31)
+# CHECK: r17:16 += cmpyi(r21,r31)
0x50 0xdf 0x15 0xe7
-# CHECK: r17:16 += cmpyr(r21, r31)
+# CHECK: r17:16 += cmpyr(r21,r31)
# Complex multiply with round and pack
0xd1 0xdf 0x35 0xed
-# CHECK: r17 = cmpy(r21, r31):rnd:sat
+# CHECK: r17 = cmpy(r21,r31):rnd:sat
0xd1 0xdf 0xb5 0xed
-# CHECK: r17 = cmpy(r21, r31):<<1:rnd:sat
+# CHECK: r17 = cmpy(r21,r31):<<1:rnd:sat
0xd1 0xdf 0x75 0xed
-# CHECK: r17 = cmpy(r21, r31*):rnd:sat
+# CHECK: r17 = cmpy(r21,r31*):rnd:sat
0xd1 0xdf 0xf5 0xed
-# CHECK: r17 = cmpy(r21, r31*):<<1:rnd:sat
+# CHECK: r17 = cmpy(r21,r31*):<<1:rnd:sat
# Complex multiply 32x16
0x91 0xdf 0x14 0xc5
-# CHECK: r17 = cmpyiwh(r21:20, r31):<<1:rnd:sat
+# CHECK: r17 = cmpyiwh(r21:20,r31):<<1:rnd:sat
0xb1 0xdf 0x14 0xc5
-# CHECK: r17 = cmpyiwh(r21:20, r31*):<<1:rnd:sat
+# CHECK: r17 = cmpyiwh(r21:20,r31*):<<1:rnd:sat
0xd1 0xdf 0x14 0xc5
-# CHECK: r17 = cmpyrwh(r21:20, r31):<<1:rnd:sat
+# CHECK: r17 = cmpyrwh(r21:20,r31):<<1:rnd:sat
0xf1 0xdf 0x14 0xc5
-# CHECK: r17 = cmpyrwh(r21:20, r31*):<<1:rnd:sat
+# CHECK: r17 = cmpyrwh(r21:20,r31*):<<1:rnd:sat
# Vector complex multiply real or imaginary
0xd0 0xde 0x34 0xe8
-# CHECK: r17:16 = vcmpyr(r21:20, r31:30):sat
+# CHECK: r17:16 = vcmpyr(r21:20,r31:30):sat
0xd0 0xde 0xb4 0xe8
-# CHECK: r17:16 = vcmpyr(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vcmpyr(r21:20,r31:30):<<1:sat
0xd0 0xde 0x54 0xe8
-# CHECK: r17:16 = vcmpyi(r21:20, r31:30):sat
+# CHECK: r17:16 = vcmpyi(r21:20,r31:30):sat
0xd0 0xde 0xd4 0xe8
-# CHECK: r17:16 = vcmpyi(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vcmpyi(r21:20,r31:30):<<1:sat
0x90 0xde 0x34 0xea
-# CHECK: r17:16 += vcmpyr(r21:20, r31:30):sat
+# CHECK: r17:16 += vcmpyr(r21:20,r31:30):sat
0x90 0xde 0x54 0xea
-# CHECK: r17:16 += vcmpyi(r21:20, r31:30):sat
+# CHECK: r17:16 += vcmpyi(r21:20,r31:30):sat
# Vector complex conjugate
0xf0 0xc0 0x94 0x80
@@ -93,36 +93,36 @@
# Vector complex rotate
0x10 0xdf 0xd4 0xc3
-# CHECK: r17:16 = vcrotate(r21:20, r31)
+# CHECK: r17:16 = vcrotate(r21:20,r31)
# Vector reduce complex multiply real or imaginary
0x10 0xde 0x14 0xe8
-# CHECK: r17:16 = vrcmpyi(r21:20, r31:30)
+# CHECK: r17:16 = vrcmpyi(r21:20,r31:30)
0x30 0xde 0x14 0xe8
-# CHECK: r17:16 = vrcmpyr(r21:20, r31:30)
+# CHECK: r17:16 = vrcmpyr(r21:20,r31:30)
0x10 0xde 0x54 0xe8
-# CHECK: r17:16 = vrcmpyi(r21:20, r31:30*)
+# CHECK: r17:16 = vrcmpyi(r21:20,r31:30*)
0x30 0xde 0x74 0xe8
-# CHECK: r17:16 = vrcmpyr(r21:20, r31:30*)
+# CHECK: r17:16 = vrcmpyr(r21:20,r31:30*)
# Vector reduce complex multiply by scalar
0x90 0xde 0xb4 0xe8
-# CHECK: r17:16 = vrcmpys(r21:20, r31:30):<<1:sat:raw:hi
+# CHECK: r17:16 = vrcmpys(r21:20,r31:30):<<1:sat:raw:hi
0x90 0xde 0xf4 0xe8
-# CHECK: r17:16 = vrcmpys(r21:20, r31:30):<<1:sat:raw:lo
+# CHECK: r17:16 = vrcmpys(r21:20,r31:30):<<1:sat:raw:lo
0x90 0xde 0xb4 0xea
-# CHECK: r17:16 += vrcmpys(r21:20, r31:30):<<1:sat:raw:hi
+# CHECK: r17:16 += vrcmpys(r21:20,r31:30):<<1:sat:raw:hi
0x90 0xde 0xf4 0xea
-# CHECK: r17:16 += vrcmpys(r21:20, r31:30):<<1:sat:raw:lo
+# CHECK: r17:16 += vrcmpys(r21:20,r31:30):<<1:sat:raw:lo
# Vector reduce complex multiply by scalar with round and pack
0xd1 0xde 0xb4 0xe9
-# CHECK: r17 = vrcmpys(r21:20, r31:30):<<1:rnd:sat:raw:hi
+# CHECK: r17 = vrcmpys(r21:20,r31:30):<<1:rnd:sat:raw:hi
0xf1 0xde 0xb4 0xe9
-# CHECK: r17 = vrcmpys(r21:20, r31:30):<<1:rnd:sat:raw:lo
+# CHECK: r17 = vrcmpys(r21:20,r31:30):<<1:rnd:sat:raw:lo
# Vector reduce complex rotate
0xf0 0xff 0xd4 0xc3
-# CHECK: r17:16 = vrcrotate(r21:20, r31, #3)
+# CHECK: r17:16 = vrcrotate(r21:20,r31,#3)
0x30 0xff 0xb4 0xcb
-# CHECK: r17:16 += vrcrotate(r21:20, r31, #3)
+# CHECK: r17:16 += vrcrotate(r21:20,r31,#3)
diff --git a/test/MC/Disassembler/Hexagon/xtype_fp.txt b/test/MC/Disassembler/Hexagon/xtype_fp.txt
index 70074208edad..31f2a5330f2b 100644
--- a/test/MC/Disassembler/Hexagon/xtype_fp.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_fp.txt
@@ -3,31 +3,31 @@
# Floating point addition
0x11 0xdf 0x15 0xeb
-# CHECK: r17 = sfadd(r21, r31)
+# CHECK: r17 = sfadd(r21,r31)
# Classify floating-point value
0x03 0xd5 0xf1 0x85
-# CHECK: p3 = sfclass(r17, #21)
+# CHECK: p3 = sfclass(r17,#21)
0xb3 0xc2 0x90 0xdc
-# CHECK: p3 = dfclass(r17:16, #21)
+# CHECK: p3 = dfclass(r17:16,#21)
# Compare floating-point value
0x03 0xd5 0xf1 0xc7
-# CHECK: p3 = sfcmp.ge(r17, r21)
+# CHECK: p3 = sfcmp.ge(r17,r21)
0x23 0xd5 0xf1 0xc7
-# CHECK: p3 = sfcmp.uo(r17, r21)
+# CHECK: p3 = sfcmp.uo(r17,r21)
0x63 0xd5 0xf1 0xc7
-# CHECK: p3 = sfcmp.eq(r17, r21)
+# CHECK: p3 = sfcmp.eq(r17,r21)
0x83 0xd5 0xf1 0xc7
-# CHECK: p3 = sfcmp.gt(r17, r21)
+# CHECK: p3 = sfcmp.gt(r17,r21)
0x03 0xd4 0xf0 0xd2
-# CHECK: p3 = dfcmp.eq(r17:16, r21:20)
+# CHECK: p3 = dfcmp.eq(r17:16,r21:20)
0x23 0xd4 0xf0 0xd2
-# CHECK: p3 = dfcmp.gt(r17:16, r21:20)
+# CHECK: p3 = dfcmp.gt(r17:16,r21:20)
0x43 0xd4 0xf0 0xd2
-# CHECK: p3 = dfcmp.ge(r17:16, r21:20)
+# CHECK: p3 = dfcmp.ge(r17:16,r21:20)
0x63 0xd4 0xf0 0xd2
-# CHECK: p3 = dfcmp.uo(r17:16, r21:20)
+# CHECK: p3 = dfcmp.uo(r17:16,r21:20)
# Convert floating-point value to other format
0x10 0xc0 0x95 0x84
@@ -91,29 +91,29 @@
0x11 0xc0 0xb5 0x8b
# CHECK: r17 = sffixupr(r21)
0x11 0xdf 0xd5 0xeb
-# CHECK: r17 = sffixupn(r21, r31)
+# CHECK: r17 = sffixupn(r21,r31)
0x31 0xdf 0xd5 0xeb
-# CHECK: r17 = sffixupd(r21, r31)
+# CHECK: r17 = sffixupd(r21,r31)
# Floating point fused multiply-add
0x91 0xdf 0x15 0xef
-# CHECK: r17 += sfmpy(r21, r31)
+# CHECK: r17 += sfmpy(r21,r31)
0xb1 0xdf 0x15 0xef
-# CHECK: r17 -= sfmpy(r21, r31)
+# CHECK: r17 -= sfmpy(r21,r31)
# Floating point fused multiply-add with scaling
0xf1 0xdf 0x75 0xef
-# CHECK: r17 += sfmpy(r21, r31, p3):scale
+# CHECK: r17 += sfmpy(r21,r31,p3):scale
# Floating point reciprocal square root approximation
0x71 0xc0 0xf5 0x8b
-# CHECK: r17, p3 = sfinvsqrta(r21)
+# CHECK: r17,p3 = sfinvsqrta(r21)
# Floating point fused multiply-add for library routines
0xd1 0xdf 0x15 0xef
-# CHECK: r17 += sfmpy(r21, r31):lib
+# CHECK: r17 += sfmpy(r21,r31):lib
0xf1 0xdf 0x15 0xef
-# CHECK: r17 -= sfmpy(r21, r31):lib
+# CHECK: r17 -= sfmpy(r21,r31):lib
# Create floating-point constant
0xb1 0xc2 0x00 0xd6
@@ -127,20 +127,20 @@
# Floating point maximum
0x11 0xdf 0x95 0xeb
-# CHECK: r17 = sfmax(r21, r31)
+# CHECK: r17 = sfmax(r21,r31)
# Floating point minimum
0x31 0xdf 0x95 0xeb
-# CHECK: r17 = sfmin(r21, r31)
+# CHECK: r17 = sfmin(r21,r31)
# Floating point multiply
0x11 0xdf 0x55 0xeb
-# CHECK: r17 = sfmpy(r21, r31)
+# CHECK: r17 = sfmpy(r21,r31)
# Floating point reciprocal approximation
0xf1 0xdf 0xf5 0xeb
-# CHECK: r17, p3 = sfrecipa(r21, r31)
+# CHECK: r17,p3 = sfrecipa(r21,r31)
# Floating point subtraction
0x31 0xdf 0x15 0xeb
-# CHECK: r17 = sfsub(r21, r31)
+# CHECK: r17 = sfsub(r21,r31)
diff --git a/test/MC/Disassembler/Hexagon/xtype_mpy.txt b/test/MC/Disassembler/Hexagon/xtype_mpy.txt
index ada32162a81e..dde6e76b266a 100644
--- a/test/MC/Disassembler/Hexagon/xtype_mpy.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_mpy.txt
@@ -3,398 +3,398 @@
# Multiply and use lower result
0xb1 0xdf 0x35 0xd7
-# CHECK: r17 = add(#21, mpyi(r21, r31))
+# CHECK: r17 = add(#21,mpyi(r21,r31))
0xbf 0xd1 0x35 0xd8
-# CHECK: r17 = add(#21, mpyi(r21, #31))
+# CHECK: r17 = add(#21,mpyi(r21,#31))
0xb5 0xd1 0x3f 0xdf
-# CHECK: r17 = add(r21, mpyi(#84, r31))
+# CHECK: r17 = add(r21,mpyi(#84,r31))
0xf5 0xf1 0xb5 0xdf
-# CHECK: r17 = add(r21, mpyi(r21, #31))
+# CHECK: r17 = add(r21,mpyi(r21,#31))
0x15 0xd1 0x1f 0xe3
-# CHECK: r17 = add(r21, mpyi(r17, r31))
+# CHECK: r17 = add(r21,mpyi(r17,r31))
0xf1 0xc3 0x15 0xe0
-# CHECK: r17 =+ mpyi(r21, #31)
+# CHECK: r17 = +mpyi(r21,#31)
0xf1 0xc3 0x95 0xe0
-# CHECK: r17 =- mpyi(r21, #31)
+# CHECK: r17 = -mpyi(r21,#31)
0xf1 0xc3 0x15 0xe1
-# CHECK: r17 += mpyi(r21, #31)
+# CHECK: r17 += mpyi(r21,#31)
0xf1 0xc3 0x95 0xe1
-# CHECK: r17 -= mpyi(r21, #31)
+# CHECK: r17 -= mpyi(r21,#31)
0x11 0xdf 0x15 0xed
-# CHECK: r17 = mpyi(r21, r31)
+# CHECK: r17 = mpyi(r21,r31)
0x11 0xdf 0x15 0xef
-# CHECK: r17 += mpyi(r21, r31)
+# CHECK: r17 += mpyi(r21,r31)
# Vector multiply word by signed half (32x16)
0xb0 0xde 0x14 0xe8
-# CHECK: r17:16 = vmpyweh(r21:20, r31:30):sat
+# CHECK: r17:16 = vmpyweh(r21:20,r31:30):sat
0xb0 0xde 0x94 0xe8
-# CHECK: r17:16 = vmpyweh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vmpyweh(r21:20,r31:30):<<1:sat
0xf0 0xde 0x14 0xe8
-# CHECK: r17:16 = vmpywoh(r21:20, r31:30):sat
+# CHECK: r17:16 = vmpywoh(r21:20,r31:30):sat
0xf0 0xde 0x94 0xe8
-# CHECK: r17:16 = vmpywoh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vmpywoh(r21:20,r31:30):<<1:sat
0xb0 0xde 0x34 0xe8
-# CHECK: r17:16 = vmpyweh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 = vmpyweh(r21:20,r31:30):rnd:sat
0xb0 0xde 0xb4 0xe8
-# CHECK: r17:16 = vmpyweh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 = vmpyweh(r21:20,r31:30):<<1:rnd:sat
0xf0 0xde 0x34 0xe8
-# CHECK: r17:16 = vmpywoh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 = vmpywoh(r21:20,r31:30):rnd:sat
0xf0 0xde 0xb4 0xe8
-# CHECK: r17:16 = vmpywoh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 = vmpywoh(r21:20,r31:30):<<1:rnd:sat
0xb0 0xde 0x14 0xea
-# CHECK: r17:16 += vmpyweh(r21:20, r31:30):sat
+# CHECK: r17:16 += vmpyweh(r21:20,r31:30):sat
0xb0 0xde 0x94 0xea
-# CHECK: r17:16 += vmpyweh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 += vmpyweh(r21:20,r31:30):<<1:sat
0xf0 0xde 0x14 0xea
-# CHECK: r17:16 += vmpywoh(r21:20, r31:30):sat
+# CHECK: r17:16 += vmpywoh(r21:20,r31:30):sat
0xf0 0xde 0x94 0xea
-# CHECK: r17:16 += vmpywoh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 += vmpywoh(r21:20,r31:30):<<1:sat
0xb0 0xde 0x34 0xea
-# CHECK: r17:16 += vmpyweh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 += vmpyweh(r21:20,r31:30):rnd:sat
0xb0 0xde 0xb4 0xea
-# CHECK: r17:16 += vmpyweh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 += vmpyweh(r21:20,r31:30):<<1:rnd:sat
0xf0 0xde 0x34 0xea
-# CHECK: r17:16 += vmpywoh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 += vmpywoh(r21:20,r31:30):rnd:sat
0xf0 0xde 0xb4 0xea
-# CHECK: r17:16 += vmpywoh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 += vmpywoh(r21:20,r31:30):<<1:rnd:sat
# Vector multiply word by unsigned half (32x16)
0xb0 0xde 0x54 0xe8
-# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):sat
+# CHECK: r17:16 = vmpyweuh(r21:20,r31:30):sat
0xb0 0xde 0xd4 0xe8
-# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vmpyweuh(r21:20,r31:30):<<1:sat
0xf0 0xde 0x54 0xe8
-# CHECK: r17:16 = vmpywouh(r21:20, r31:30):sat
+# CHECK: r17:16 = vmpywouh(r21:20,r31:30):sat
0xf0 0xde 0xd4 0xe8
-# CHECK: r17:16 = vmpywouh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vmpywouh(r21:20,r31:30):<<1:sat
0xb0 0xde 0x74 0xe8
-# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 = vmpyweuh(r21:20,r31:30):rnd:sat
0xb0 0xde 0xf4 0xe8
-# CHECK: r17:16 = vmpyweuh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 = vmpyweuh(r21:20,r31:30):<<1:rnd:sat
0xf0 0xde 0x74 0xe8
-# CHECK: r17:16 = vmpywouh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 = vmpywouh(r21:20,r31:30):rnd:sat
0xf0 0xde 0xf4 0xe8
-# CHECK: r17:16 = vmpywouh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 = vmpywouh(r21:20,r31:30):<<1:rnd:sat
0xb0 0xde 0x54 0xea
-# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):sat
+# CHECK: r17:16 += vmpyweuh(r21:20,r31:30):sat
0xb0 0xde 0xd4 0xea
-# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 += vmpyweuh(r21:20,r31:30):<<1:sat
0xf0 0xde 0x54 0xea
-# CHECK: r17:16 += vmpywouh(r21:20, r31:30):sat
+# CHECK: r17:16 += vmpywouh(r21:20,r31:30):sat
0xf0 0xde 0xd4 0xea
-# CHECK: r17:16 += vmpywouh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 += vmpywouh(r21:20,r31:30):<<1:sat
0xb0 0xde 0x74 0xea
-# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 += vmpyweuh(r21:20,r31:30):rnd:sat
0xb0 0xde 0xf4 0xea
-# CHECK: r17:16 += vmpyweuh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 += vmpyweuh(r21:20,r31:30):<<1:rnd:sat
0xf0 0xde 0x74 0xea
-# CHECK: r17:16 += vmpywouh(r21:20, r31:30):rnd:sat
+# CHECK: r17:16 += vmpywouh(r21:20,r31:30):rnd:sat
0xf0 0xde 0xf4 0xea
-# CHECK: r17:16 += vmpywouh(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17:16 += vmpywouh(r21:20,r31:30):<<1:rnd:sat
# Multiply signed halfwords
0x10 0xdf 0x95 0xe4
-# CHECK: r17:16 = mpy(r21.l, r31.l):<<1
+# CHECK: r17:16 = mpy(r21.l,r31.l):<<1
0x30 0xdf 0x95 0xe4
-# CHECK: r17:16 = mpy(r21.l, r31.h):<<1
+# CHECK: r17:16 = mpy(r21.l,r31.h):<<1
0x50 0xdf 0x95 0xe4
-# CHECK: r17:16 = mpy(r21.h, r31.l):<<1
+# CHECK: r17:16 = mpy(r21.h,r31.l):<<1
0x70 0xdf 0x95 0xe4
-# CHECK: r17:16 = mpy(r21.h, r31.h):<<1
+# CHECK: r17:16 = mpy(r21.h,r31.h):<<1
0x10 0xdf 0xb5 0xe4
-# CHECK: r17:16 = mpy(r21.l, r31.l):<<1:rnd
+# CHECK: r17:16 = mpy(r21.l,r31.l):<<1:rnd
0x30 0xdf 0xb5 0xe4
-# CHECK: r17:16 = mpy(r21.l, r31.h):<<1:rnd
+# CHECK: r17:16 = mpy(r21.l,r31.h):<<1:rnd
0x50 0xdf 0xb5 0xe4
-# CHECK: r17:16 = mpy(r21.h, r31.l):<<1:rnd
+# CHECK: r17:16 = mpy(r21.h,r31.l):<<1:rnd
0x70 0xdf 0xb5 0xe4
-# CHECK: r17:16 = mpy(r21.h, r31.h):<<1:rnd
+# CHECK: r17:16 = mpy(r21.h,r31.h):<<1:rnd
0x10 0xdf 0x95 0xe6
-# CHECK: r17:16 += mpy(r21.l, r31.l):<<1
+# CHECK: r17:16 += mpy(r21.l,r31.l):<<1
0x30 0xdf 0x95 0xe6
-# CHECK: r17:16 += mpy(r21.l, r31.h):<<1
+# CHECK: r17:16 += mpy(r21.l,r31.h):<<1
0x50 0xdf 0x95 0xe6
-# CHECK: r17:16 += mpy(r21.h, r31.l):<<1
+# CHECK: r17:16 += mpy(r21.h,r31.l):<<1
0x70 0xdf 0x95 0xe6
-# CHECK: r17:16 += mpy(r21.h, r31.h):<<1
+# CHECK: r17:16 += mpy(r21.h,r31.h):<<1
0x10 0xdf 0xb5 0xe6
-# CHECK: r17:16 -= mpy(r21.l, r31.l):<<1
+# CHECK: r17:16 -= mpy(r21.l,r31.l):<<1
0x30 0xdf 0xb5 0xe6
-# CHECK: r17:16 -= mpy(r21.l, r31.h):<<1
+# CHECK: r17:16 -= mpy(r21.l,r31.h):<<1
0x50 0xdf 0xb5 0xe6
-# CHECK: r17:16 -= mpy(r21.h, r31.l):<<1
+# CHECK: r17:16 -= mpy(r21.h,r31.l):<<1
0x70 0xdf 0xb5 0xe6
-# CHECK: r17:16 -= mpy(r21.h, r31.h):<<1
+# CHECK: r17:16 -= mpy(r21.h,r31.h):<<1
0x11 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.l, r31.l):<<1
+# CHECK: r17 = mpy(r21.l,r31.l):<<1
0x31 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.l, r31.h):<<1
+# CHECK: r17 = mpy(r21.l,r31.h):<<1
0x51 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.h, r31.l):<<1
+# CHECK: r17 = mpy(r21.h,r31.l):<<1
0x71 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.h, r31.h):<<1
+# CHECK: r17 = mpy(r21.h,r31.h):<<1
0x91 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.l, r31.l):<<1:sat
+# CHECK: r17 = mpy(r21.l,r31.l):<<1:sat
0xb1 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.l, r31.h):<<1:sat
+# CHECK: r17 = mpy(r21.l,r31.h):<<1:sat
0xd1 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.h, r31.l):<<1:sat
+# CHECK: r17 = mpy(r21.h,r31.l):<<1:sat
0xf1 0xdf 0x95 0xec
-# CHECK: r17 = mpy(r21.h, r31.h):<<1:sat
+# CHECK: r17 = mpy(r21.h,r31.h):<<1:sat
0x11 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.l, r31.l):<<1:rnd
+# CHECK: r17 = mpy(r21.l,r31.l):<<1:rnd
0x31 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.l, r31.h):<<1:rnd
+# CHECK: r17 = mpy(r21.l,r31.h):<<1:rnd
0x51 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.h, r31.l):<<1:rnd
+# CHECK: r17 = mpy(r21.h,r31.l):<<1:rnd
0x71 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.h, r31.h):<<1:rnd
+# CHECK: r17 = mpy(r21.h,r31.h):<<1:rnd
0x91 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.l, r31.l):<<1:rnd:sat
+# CHECK: r17 = mpy(r21.l,r31.l):<<1:rnd:sat
0xb1 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.l, r31.h):<<1:rnd:sat
+# CHECK: r17 = mpy(r21.l,r31.h):<<1:rnd:sat
0xd1 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.h, r31.l):<<1:rnd:sat
+# CHECK: r17 = mpy(r21.h,r31.l):<<1:rnd:sat
0xf1 0xdf 0xb5 0xec
-# CHECK: r17 = mpy(r21.h, r31.h):<<1:rnd:sat
+# CHECK: r17 = mpy(r21.h,r31.h):<<1:rnd:sat
0x11 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.l, r31.l):<<1
+# CHECK: r17 += mpy(r21.l,r31.l):<<1
0x31 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.l, r31.h):<<1
+# CHECK: r17 += mpy(r21.l,r31.h):<<1
0x51 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.h, r31.l):<<1
+# CHECK: r17 += mpy(r21.h,r31.l):<<1
0x71 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.h, r31.h):<<1
+# CHECK: r17 += mpy(r21.h,r31.h):<<1
0x91 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.l, r31.l):<<1:sat
+# CHECK: r17 += mpy(r21.l,r31.l):<<1:sat
0xb1 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.l, r31.h):<<1:sat
+# CHECK: r17 += mpy(r21.l,r31.h):<<1:sat
0xd1 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.h, r31.l):<<1:sat
+# CHECK: r17 += mpy(r21.h,r31.l):<<1:sat
0xf1 0xdf 0x95 0xee
-# CHECK: r17 += mpy(r21.h, r31.h):<<1:sat
+# CHECK: r17 += mpy(r21.h,r31.h):<<1:sat
0x11 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.l, r31.l):<<1
+# CHECK: r17 -= mpy(r21.l,r31.l):<<1
0x31 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.l, r31.h):<<1
+# CHECK: r17 -= mpy(r21.l,r31.h):<<1
0x51 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.h, r31.l):<<1
+# CHECK: r17 -= mpy(r21.h,r31.l):<<1
0x71 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.h, r31.h):<<1
+# CHECK: r17 -= mpy(r21.h,r31.h):<<1
0x91 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.l, r31.l):<<1:sat
+# CHECK: r17 -= mpy(r21.l,r31.l):<<1:sat
0xb1 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.l, r31.h):<<1:sat
+# CHECK: r17 -= mpy(r21.l,r31.h):<<1:sat
0xd1 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.h, r31.l):<<1:sat
+# CHECK: r17 -= mpy(r21.h,r31.l):<<1:sat
0xf1 0xdf 0xb5 0xee
-# CHECK: r17 -= mpy(r21.h, r31.h):<<1:sat
+# CHECK: r17 -= mpy(r21.h,r31.h):<<1:sat
# Multiply unsigned halfwords
0x10 0xdf 0xd5 0xe4
-# CHECK: r17:16 = mpyu(r21.l, r31.l):<<1
+# CHECK: r17:16 = mpyu(r21.l,r31.l):<<1
0x30 0xdf 0xd5 0xe4
-# CHECK: r17:16 = mpyu(r21.l, r31.h):<<1
+# CHECK: r17:16 = mpyu(r21.l,r31.h):<<1
0x50 0xdf 0xd5 0xe4
-# CHECK: r17:16 = mpyu(r21.h, r31.l):<<1
+# CHECK: r17:16 = mpyu(r21.h,r31.l):<<1
0x70 0xdf 0xd5 0xe4
-# CHECK: r17:16 = mpyu(r21.h, r31.h):<<1
+# CHECK: r17:16 = mpyu(r21.h,r31.h):<<1
0x10 0xdf 0xd5 0xe6
-# CHECK: r17:16 += mpyu(r21.l, r31.l):<<1
+# CHECK: r17:16 += mpyu(r21.l,r31.l):<<1
0x30 0xdf 0xd5 0xe6
-# CHECK: r17:16 += mpyu(r21.l, r31.h):<<1
+# CHECK: r17:16 += mpyu(r21.l,r31.h):<<1
0x50 0xdf 0xd5 0xe6
-# CHECK: r17:16 += mpyu(r21.h, r31.l):<<1
+# CHECK: r17:16 += mpyu(r21.h,r31.l):<<1
0x70 0xdf 0xd5 0xe6
-# CHECK: r17:16 += mpyu(r21.h, r31.h):<<1
+# CHECK: r17:16 += mpyu(r21.h,r31.h):<<1
0x10 0xdf 0xf5 0xe6
-# CHECK: r17:16 -= mpyu(r21.l, r31.l):<<1
+# CHECK: r17:16 -= mpyu(r21.l,r31.l):<<1
0x30 0xdf 0xf5 0xe6
-# CHECK: r17:16 -= mpyu(r21.l, r31.h):<<1
+# CHECK: r17:16 -= mpyu(r21.l,r31.h):<<1
0x50 0xdf 0xf5 0xe6
-# CHECK: r17:16 -= mpyu(r21.h, r31.l):<<1
+# CHECK: r17:16 -= mpyu(r21.h,r31.l):<<1
0x70 0xdf 0xf5 0xe6
-# CHECK: r17:16 -= mpyu(r21.h, r31.h):<<1
+# CHECK: r17:16 -= mpyu(r21.h,r31.h):<<1
0x11 0xdf 0xd5 0xec
-# CHECK: r17 = mpyu(r21.l, r31.l):<<1
+# CHECK: r17 = mpyu(r21.l,r31.l):<<1
0x31 0xdf 0xd5 0xec
-# CHECK: r17 = mpyu(r21.l, r31.h):<<1
+# CHECK: r17 = mpyu(r21.l,r31.h):<<1
0x51 0xdf 0xd5 0xec
-# CHECK: r17 = mpyu(r21.h, r31.l):<<1
+# CHECK: r17 = mpyu(r21.h,r31.l):<<1
0x71 0xdf 0xd5 0xec
-# CHECK: r17 = mpyu(r21.h, r31.h):<<1
+# CHECK: r17 = mpyu(r21.h,r31.h):<<1
0x11 0xdf 0xd5 0xee
-# CHECK: r17 += mpyu(r21.l, r31.l):<<1
+# CHECK: r17 += mpyu(r21.l,r31.l):<<1
0x31 0xdf 0xd5 0xee
-# CHECK: r17 += mpyu(r21.l, r31.h):<<1
+# CHECK: r17 += mpyu(r21.l,r31.h):<<1
0x51 0xdf 0xd5 0xee
-# CHECK: r17 += mpyu(r21.h, r31.l):<<1
+# CHECK: r17 += mpyu(r21.h,r31.l):<<1
0x71 0xdf 0xd5 0xee
-# CHECK: r17 += mpyu(r21.h, r31.h):<<1
+# CHECK: r17 += mpyu(r21.h,r31.h):<<1
0x11 0xdf 0xf5 0xee
-# CHECK: r17 -= mpyu(r21.l, r31.l):<<1
+# CHECK: r17 -= mpyu(r21.l,r31.l):<<1
0x31 0xdf 0xf5 0xee
-# CHECK: r17 -= mpyu(r21.l, r31.h):<<1
+# CHECK: r17 -= mpyu(r21.l,r31.h):<<1
0x51 0xdf 0xf5 0xee
-# CHECK: r17 -= mpyu(r21.h, r31.l):<<1
+# CHECK: r17 -= mpyu(r21.h,r31.l):<<1
0x71 0xdf 0xf5 0xee
-# CHECK: r17 -= mpyu(r21.h, r31.h):<<1
+# CHECK: r17 -= mpyu(r21.h,r31.h):<<1
# Polynomial multiply words
0xf0 0xdf 0x55 0xe5
-# CHECK: r17:16 = pmpyw(r21, r31)
+# CHECK: r17:16 = pmpyw(r21,r31)
0xf0 0xdf 0x35 0xe7
-# CHECK: r17:16 ^= pmpyw(r21, r31)
+# CHECK: r17:16 ^= pmpyw(r21,r31)
# Vector reduce multiply word by signed half (32x16)
0x50 0xde 0x34 0xe8
-# CHECK: r17:16 = vrmpywoh(r21:20, r31:30)
+# CHECK: r17:16 = vrmpywoh(r21:20,r31:30)
0x50 0xde 0xb4 0xe8
-# CHECK: r17:16 = vrmpywoh(r21:20, r31:30):<<1
+# CHECK: r17:16 = vrmpywoh(r21:20,r31:30):<<1
0x90 0xde 0x54 0xe8
-# CHECK: r17:16 = vrmpyweh(r21:20, r31:30)
+# CHECK: r17:16 = vrmpyweh(r21:20,r31:30)
0x90 0xde 0xd4 0xe8
-# CHECK: r17:16 = vrmpyweh(r21:20, r31:30):<<1
+# CHECK: r17:16 = vrmpyweh(r21:20,r31:30):<<1
0xd0 0xde 0x74 0xea
-# CHECK: r17:16 += vrmpywoh(r21:20, r31:30)
+# CHECK: r17:16 += vrmpywoh(r21:20,r31:30)
0xd0 0xde 0xf4 0xea
-# CHECK: r17:16 += vrmpywoh(r21:20, r31:30):<<1
+# CHECK: r17:16 += vrmpywoh(r21:20,r31:30):<<1
0xd0 0xde 0x34 0xea
-# CHECK: r17:16 += vrmpyweh(r21:20, r31:30)
+# CHECK: r17:16 += vrmpyweh(r21:20,r31:30)
0xd0 0xde 0xb4 0xea
-# CHECK: r17:16 += vrmpyweh(r21:20, r31:30):<<1
+# CHECK: r17:16 += vrmpyweh(r21:20,r31:30):<<1
# Multiply and use upper result
0x31 0xdf 0x15 0xed
-# CHECK: r17 = mpy(r21, r31)
+# CHECK: r17 = mpy(r21,r31)
0x31 0xdf 0x35 0xed
-# CHECK: r17 = mpy(r21, r31):rnd
+# CHECK: r17 = mpy(r21,r31):rnd
0x31 0xdf 0x55 0xed
-# CHECK: r17 = mpyu(r21, r31)
+# CHECK: r17 = mpyu(r21,r31)
0x31 0xdf 0x75 0xed
-# CHECK: r17 = mpysu(r21, r31)
+# CHECK: r17 = mpysu(r21,r31)
0x11 0xdf 0xb5 0xed
-# CHECK: r17 = mpy(r21, r31.h):<<1:sat
+# CHECK: r17 = mpy(r21,r31.h):<<1:sat
0x31 0xdf 0xb5 0xed
-# CHECK: r17 = mpy(r21, r31.l):<<1:sat
+# CHECK: r17 = mpy(r21,r31.l):<<1:sat
0x91 0xdf 0xb5 0xed
-# CHECK: r17 = mpy(r21, r31.h):<<1:rnd:sat
+# CHECK: r17 = mpy(r21,r31.h):<<1:rnd:sat
0x11 0xdf 0xf5 0xed
-# CHECK: r17 = mpy(r21, r31):<<1:sat
+# CHECK: r17 = mpy(r21,r31):<<1:sat
0x91 0xdf 0xf5 0xed
-# CHECK: r17 = mpy(r21, r31.l):<<1:rnd:sat
+# CHECK: r17 = mpy(r21,r31.l):<<1:rnd:sat
0x51 0xdf 0xb5 0xed
-# CHECK: r17 = mpy(r21, r31):<<1
+# CHECK: r17 = mpy(r21,r31):<<1
0x11 0xdf 0x75 0xef
-# CHECK: r17 += mpy(r21, r31):<<1:sat
+# CHECK: r17 += mpy(r21,r31):<<1:sat
0x31 0xdf 0x75 0xef
-# CHECK: r17 -= mpy(r21, r31):<<1:sat
+# CHECK: r17 -= mpy(r21,r31):<<1:sat
# Multiply and use full result
0x10 0xdf 0x15 0xe5
-# CHECK: r17:16 = mpy(r21, r31)
+# CHECK: r17:16 = mpy(r21,r31)
0x10 0xdf 0x55 0xe5
-# CHECK: r17:16 = mpyu(r21, r31)
+# CHECK: r17:16 = mpyu(r21,r31)
0x10 0xdf 0x15 0xe7
-# CHECK: r17:16 += mpy(r21, r31)
+# CHECK: r17:16 += mpy(r21,r31)
0x10 0xdf 0x35 0xe7
-# CHECK: r17:16 -= mpy(r21, r31)
+# CHECK: r17:16 -= mpy(r21,r31)
0x10 0xdf 0x55 0xe7
-# CHECK: r17:16 += mpyu(r21, r31)
+# CHECK: r17:16 += mpyu(r21,r31)
0x10 0xdf 0x75 0xe7
-# CHECK: r17:16 -= mpyu(r21, r31)
+# CHECK: r17:16 -= mpyu(r21,r31)
# Vector dual multiply
0x90 0xde 0x14 0xe8
-# CHECK: r17:16 = vdmpy(r21:20, r31:30):sat
+# CHECK: r17:16 = vdmpy(r21:20,r31:30):sat
0x90 0xde 0x94 0xe8
-# CHECK: r17:16 = vdmpy(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vdmpy(r21:20,r31:30):<<1:sat
0x90 0xde 0x14 0xea
-# CHECK: r17:16 += vdmpy(r21:20, r31:30):sat
+# CHECK: r17:16 += vdmpy(r21:20,r31:30):sat
0x90 0xde 0x94 0xea
-# CHECK: r17:16 += vdmpy(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 += vdmpy(r21:20,r31:30):<<1:sat
# Vector dual multiply with round and pack
0x11 0xde 0x14 0xe9
-# CHECK: r17 = vdmpy(r21:20, r31:30):rnd:sat
+# CHECK: r17 = vdmpy(r21:20,r31:30):rnd:sat
0x11 0xde 0x94 0xe9
-# CHECK: r17 = vdmpy(r21:20, r31:30):<<1:rnd:sat
+# CHECK: r17 = vdmpy(r21:20,r31:30):<<1:rnd:sat
# Vector reduce multiply bytes
0x30 0xde 0x94 0xe8
-# CHECK: r17:16 = vrmpybu(r21:20, r31:30)
+# CHECK: r17:16 = vrmpybu(r21:20,r31:30)
0x30 0xde 0xd4 0xe8
-# CHECK: r17:16 = vrmpybsu(r21:20, r31:30)
+# CHECK: r17:16 = vrmpybsu(r21:20,r31:30)
0x30 0xde 0x94 0xea
-# CHECK: r17:16 += vrmpybu(r21:20, r31:30)
+# CHECK: r17:16 += vrmpybu(r21:20,r31:30)
0x30 0xde 0xd4 0xea
-# CHECK: r17:16 += vrmpybsu(r21:20, r31:30)
+# CHECK: r17:16 += vrmpybsu(r21:20,r31:30)
# Vector dual multiply signed by unsigned bytes
0x30 0xde 0xb4 0xe8
-# CHECK: r17:16 = vdmpybsu(r21:20, r31:30):sat
+# CHECK: r17:16 = vdmpybsu(r21:20,r31:30):sat
0x30 0xde 0x34 0xea
-# CHECK: r17:16 += vdmpybsu(r21:20, r31:30):sat
+# CHECK: r17:16 += vdmpybsu(r21:20,r31:30):sat
# Vector multiply even haldwords
0xd0 0xde 0x14 0xe8
-# CHECK: r17:16 = vmpyeh(r21:20, r31:30):sat
+# CHECK: r17:16 = vmpyeh(r21:20,r31:30):sat
0xd0 0xde 0x94 0xe8
-# CHECK: r17:16 = vmpyeh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 = vmpyeh(r21:20,r31:30):<<1:sat
0x50 0xde 0x34 0xea
-# CHECK: r17:16 += vmpyeh(r21:20, r31:30)
+# CHECK: r17:16 += vmpyeh(r21:20,r31:30)
0xd0 0xde 0x14 0xea
-# CHECK: r17:16 += vmpyeh(r21:20, r31:30):sat
+# CHECK: r17:16 += vmpyeh(r21:20,r31:30):sat
0xd0 0xde 0x94 0xea
-# CHECK: r17:16 += vmpyeh(r21:20, r31:30):<<1:sat
+# CHECK: r17:16 += vmpyeh(r21:20,r31:30):<<1:sat
# Vector multiply halfwords
0xb0 0xdf 0x15 0xe5
-# CHECK: r17:16 = vmpyh(r21, r31):sat
+# CHECK: r17:16 = vmpyh(r21,r31):sat
0xb0 0xdf 0x95 0xe5
-# CHECK: r17:16 = vmpyh(r21, r31):<<1:sat
+# CHECK: r17:16 = vmpyh(r21,r31):<<1:sat
0x30 0xdf 0x35 0xe7
-# CHECK: r17:16 += vmpyh(r21, r31)
+# CHECK: r17:16 += vmpyh(r21,r31)
0xb0 0xdf 0x15 0xe7
-# CHECK: r17:16 += vmpyh(r21, r31):sat
+# CHECK: r17:16 += vmpyh(r21,r31):sat
0xb0 0xdf 0x95 0xe7
-# CHECK: r17:16 += vmpyh(r21, r31):<<1:sat
+# CHECK: r17:16 += vmpyh(r21,r31):<<1:sat
# Vector multiply halfwords with round and pack
0xf1 0xdf 0x35 0xed
-# CHECK: r17 = vmpyh(r21, r31):rnd:sat
+# CHECK: r17 = vmpyh(r21,r31):rnd:sat
0xf1 0xdf 0xb5 0xed
-# CHECK: r17 = vmpyh(r21, r31):<<1:rnd:sat
+# CHECK: r17 = vmpyh(r21,r31):<<1:rnd:sat
# Vector multiply halfwords signed by unsigned
0xf0 0xdf 0x15 0xe5
-# CHECK: r17:16 = vmpyhsu(r21, r31):sat
+# CHECK: r17:16 = vmpyhsu(r21,r31):sat
0xf0 0xdf 0x95 0xe5
-# CHECK: r17:16 = vmpyhsu(r21, r31):<<1:sat
+# CHECK: r17:16 = vmpyhsu(r21,r31):<<1:sat
0xb0 0xdf 0x75 0xe7
-# CHECK: r17:16 += vmpyhsu(r21, r31):sat
+# CHECK: r17:16 += vmpyhsu(r21,r31):sat
0xb0 0xdf 0xf5 0xe7
-# CHECK: r17:16 += vmpyhsu(r21, r31):<<1:sat
+# CHECK: r17:16 += vmpyhsu(r21,r31):<<1:sat
# Vector reduce multiply halfwords
0x50 0xde 0x14 0xe8
-# CHECK: r17:16 = vrmpyh(r21:20, r31:30)
+# CHECK: r17:16 = vrmpyh(r21:20,r31:30)
0x50 0xde 0x14 0xea
-# CHECK: r17:16 += vrmpyh(r21:20, r31:30)
+# CHECK: r17:16 += vrmpyh(r21:20,r31:30)
# Vector multiply bytes
0x30 0xdf 0x55 0xe5
-# CHECK: r17:16 = vmpybsu(r21, r31)
+# CHECK: r17:16 = vmpybsu(r21,r31)
0x30 0xdf 0x95 0xe5
-# CHECK: r17:16 = vmpybu(r21, r31)
+# CHECK: r17:16 = vmpybu(r21,r31)
0x30 0xdf 0x95 0xe7
-# CHECK: r17:16 += vmpybu(r21, r31)
+# CHECK: r17:16 += vmpybu(r21,r31)
0x30 0xdf 0xd5 0xe7
-# CHECK: r17:16 += vmpybsu(r21, r31)
+# CHECK: r17:16 += vmpybsu(r21,r31)
# Vector polynomial multiply halfwords
0xf0 0xdf 0xd5 0xe5
-# CHECK: r17:16 = vpmpyh(r21, r31)
+# CHECK: r17:16 = vpmpyh(r21,r31)
0xf0 0xdf 0xb5 0xe7
-# CHECK: r17:16 ^= vpmpyh(r21, r31)
+# CHECK: r17:16 ^= vpmpyh(r21,r31)
diff --git a/test/MC/Disassembler/Hexagon/xtype_perm.txt b/test/MC/Disassembler/Hexagon/xtype_perm.txt
index 91d2fc5ae698..e8173fb049c1 100644
--- a/test/MC/Disassembler/Hexagon/xtype_perm.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_perm.txt
@@ -3,7 +3,7 @@
# CABAC decode bin
0xd0 0xde 0xd4 0xc1
-# CHECK: r17:16 = decbin(r21:20, r31:30)
+# CHECK: r17:16 = decbin(r21:20,r31:30)
# Saturate
0x11 0xc0 0xd4 0x88
@@ -23,9 +23,9 @@
# Vector align
0x70 0xd4 0x1e 0xc2
-# CHECK: r17:16 = valignb(r21:20, r31:30, p3)
+# CHECK: r17:16 = valignb(r21:20,r31:30,p3)
0x70 0xde 0x94 0xc2
-# CHECK: r17:16 = vspliceb(r21:20, r31:30, p3)
+# CHECK: r17:16 = vspliceb(r21:20,r31:30,p3)
# Vector round and pack
0x91 0xc0 0x94 0x88
@@ -59,13 +59,13 @@
# Vector shuffle
0x50 0xde 0x14 0xc1
-# CHECK: r17:16 = shuffeb(r21:20, r31:30)
+# CHECK: r17:16 = shuffeb(r21:20,r31:30)
0x90 0xd4 0x1e 0xc1
-# CHECK: r17:16 = shuffob(r21:20, r31:30)
+# CHECK: r17:16 = shuffob(r21:20,r31:30)
0xd0 0xde 0x14 0xc1
-# CHECK: r17:16 = shuffeh(r21:20, r31:30)
+# CHECK: r17:16 = shuffeh(r21:20,r31:30)
0x10 0xd4 0x9e 0xc1
-# CHECK: r17:16 = shuffoh(r21:20, r31:30)
+# CHECK: r17:16 = shuffoh(r21:20,r31:30)
# Vector splat bytes
0xf1 0xc0 0x55 0x8c
@@ -77,9 +77,9 @@
# Vector splice
0x70 0xde 0x94 0xc0
-# CHECK: r17:16 = vspliceb(r21:20, r31:30, #3)
+# CHECK: r17:16 = vspliceb(r21:20,r31:30,#3)
0x70 0xde 0x94 0xc2
-# CHECK: r17:16 = vspliceb(r21:20, r31:30, p3)
+# CHECK: r17:16 = vspliceb(r21:20,r31:30,p3)
# Vector sign extend
0x10 0xc0 0x15 0x84
@@ -93,9 +93,9 @@
0x51 0xc0 0x94 0x88
# CHECK: r17 = vtrunehb(r21:20)
0x50 0xde 0x94 0xc1
-# CHECK: r17:16 = vtrunewh(r21:20, r31:30)
+# CHECK: r17:16 = vtrunewh(r21:20,r31:30)
0x90 0xde 0x94 0xc1
-# CHECK: r17:16 = vtrunowh(r21:20, r31:30)
+# CHECK: r17:16 = vtrunowh(r21:20,r31:30)
# Vector zero extend
0x50 0xc0 0x15 0x84
diff --git a/test/MC/Disassembler/Hexagon/xtype_pred.txt b/test/MC/Disassembler/Hexagon/xtype_pred.txt
index cec6d1be0f10..816eef58a099 100644
--- a/test/MC/Disassembler/Hexagon/xtype_pred.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_pred.txt
@@ -3,59 +3,59 @@
# Bounds check
0x83 0xf4 0x10 0xd2
-# CHECK: p3 = boundscheck(r17:16, r21:20):raw:lo
+# CHECK: p3 = boundscheck(r17:16,r21:20):raw:lo
0xa3 0xf4 0x10 0xd2
-# CHECK: p3 = boundscheck(r17:16, r21:20):raw:hi
+# CHECK: p3 = boundscheck(r17:16,r21:20):raw:hi
# Compare byte
0x43 0xd5 0xd1 0xc7
-# CHECK: p3 = cmpb.gt(r17, r21)
+# CHECK: p3 = cmpb.gt(r17,r21)
0xc3 0xd5 0xd1 0xc7
-# CHECK: p3 = cmpb.eq(r17, r21)
+# CHECK: p3 = cmpb.eq(r17,r21)
0xe3 0xd5 0xd1 0xc7
-# CHECK: p3 = cmpb.gtu(r17, r21)
+# CHECK: p3 = cmpb.gtu(r17,r21)
0xa3 0xc2 0x11 0xdd
-# CHECK: p3 = cmpb.eq(r17, #21)
+# CHECK: p3 = cmpb.eq(r17,#21)
0xa3 0xc2 0x31 0xdd
-# CHECK: p3 = cmpb.gt(r17, #21)
+# CHECK: p3 = cmpb.gt(r17,#21)
0xa3 0xc2 0x51 0xdd
-# CHECK: p3 = cmpb.gtu(r17, #21)
+# CHECK: p3 = cmpb.gtu(r17,#21)
# Compare half
0x63 0xd5 0xd1 0xc7
-# CHECK: p3 = cmph.eq(r17, r21)
+# CHECK: p3 = cmph.eq(r17,r21)
0x83 0xd5 0xd1 0xc7
-# CHECK: p3 = cmph.gt(r17, r21)
+# CHECK: p3 = cmph.gt(r17,r21)
0xa3 0xd5 0xd1 0xc7
-# CHECK: p3 = cmph.gtu(r17, r21)
+# CHECK: p3 = cmph.gtu(r17,r21)
0xab 0xc2 0x11 0xdd
-# CHECK: p3 = cmph.eq(r17, #21)
+# CHECK: p3 = cmph.eq(r17,#21)
0xab 0xc2 0x31 0xdd
-# CHECK: p3 = cmph.gt(r17, #21)
+# CHECK: p3 = cmph.gt(r17,#21)
0xab 0xc2 0x51 0xdd
-# CHECK: p3 = cmph.gtu(r17, #21)
+# CHECK: p3 = cmph.gtu(r17,#21)
# Compare doublewords
0x03 0xde 0x94 0xd2
-# CHECK: p3 = cmp.eq(r21:20, r31:30)
+# CHECK: p3 = cmp.eq(r21:20,r31:30)
0x43 0xde 0x94 0xd2
-# CHECK: p3 = cmp.gt(r21:20, r31:30)
+# CHECK: p3 = cmp.gt(r21:20,r31:30)
0x83 0xde 0x94 0xd2
-# CHECK: p3 = cmp.gtu(r21:20, r31:30)
+# CHECK: p3 = cmp.gtu(r21:20,r31:30)
# Compare bitmask
0x03 0xd5 0x91 0x85
-# CHECK: p3 = bitsclr(r17, #21)
+# CHECK: p3 = bitsclr(r17,#21)
0x03 0xd5 0xb1 0x85
-# CHECK: p3 = !bitsclr(r17, #21)
+# CHECK: p3 = !bitsclr(r17,#21)
0x03 0xd5 0x51 0xc7
-# CHECK: p3 = bitsset(r17, r21)
+# CHECK: p3 = bitsset(r17,r21)
0x03 0xd5 0x71 0xc7
-# CHECK: p3 = !bitsset(r17, r21)
+# CHECK: p3 = !bitsset(r17,r21)
0x03 0xd5 0x91 0xc7
-# CHECK: p3 = bitsclr(r17, r21)
+# CHECK: p3 = bitsclr(r17,r21)
0x03 0xd5 0xb1 0xc7
-# CHECK: p3 = !bitsclr(r17, r21)
+# CHECK: p3 = !bitsclr(r17,r21)
# mask generate from predicate
0x10 0xc3 0x00 0x86
@@ -63,7 +63,7 @@
# Check for TLB match
0x63 0xf5 0x10 0xd2
-# CHECK: p3 = tlbmatch(r17:16, r21)
+# CHECK: p3 = tlbmatch(r17:16,r21)
# Predicate Transfer
0x03 0xc0 0x45 0x85
@@ -73,64 +73,64 @@
# Test bit
0x03 0xd5 0x11 0x85
-# CHECK: p3 = tstbit(r17, #21)
+# CHECK: p3 = tstbit(r17,#21)
0x03 0xd5 0x31 0x85
-# CHECK: p3 = !tstbit(r17, #21)
+# CHECK: p3 = !tstbit(r17,#21)
0x03 0xd5 0x11 0xc7
-# CHECK: p3 = tstbit(r17, r21)
+# CHECK: p3 = tstbit(r17,r21)
0x03 0xd5 0x31 0xc7
-# CHECK: p3 = !tstbit(r17, r21)
+# CHECK: p3 = !tstbit(r17,r21)
# Vector compare halfwords
0x63 0xde 0x14 0xd2
-# CHECK: p3 = vcmph.eq(r21:20, r31:30)
+# CHECK: p3 = vcmph.eq(r21:20,r31:30)
0x83 0xde 0x14 0xd2
-# CHECK: p3 = vcmph.gt(r21:20, r31:30)
+# CHECK: p3 = vcmph.gt(r21:20,r31:30)
0xa3 0xde 0x14 0xd2
-# CHECK: p3 = vcmph.gtu(r21:20, r31:30)
+# CHECK: p3 = vcmph.gtu(r21:20,r31:30)
0xeb 0xc3 0x14 0xdc
-# CHECK: p3 = vcmph.eq(r21:20, #31)
+# CHECK: p3 = vcmph.eq(r21:20,#31)
0xeb 0xc3 0x34 0xdc
-# CHECK: p3 = vcmph.gt(r21:20, #31)
+# CHECK: p3 = vcmph.gt(r21:20,#31)
0xeb 0xc3 0x54 0xdc
-# CHECK: p3 = vcmph.gtu(r21:20, #31)
+# CHECK: p3 = vcmph.gtu(r21:20,#31)
# Vector compare bytes for any match
0x03 0xfe 0x14 0xd2
-# CHECK: p3 = any8(vcmpb.eq(r21:20, r31:30))
+# CHECK: p3 = any8(vcmpb.eq(r21:20,r31:30))
# Vector compare bytes
0x63 0xde 0x14 0xd2
-# CHECK: p3 = vcmph.eq(r21:20, r31:30)
+# CHECK: p3 = vcmph.eq(r21:20,r31:30)
0x83 0xde 0x14 0xd2
-# CHECK: p3 = vcmph.gt(r21:20, r31:30)
+# CHECK: p3 = vcmph.gt(r21:20,r31:30)
0xa3 0xde 0x14 0xd2
-# CHECK: p3 = vcmph.gtu(r21:20, r31:30)
+# CHECK: p3 = vcmph.gtu(r21:20,r31:30)
0xeb 0xc3 0x14 0xdc
-# CHECK: p3 = vcmph.eq(r21:20, #31)
+# CHECK: p3 = vcmph.eq(r21:20,#31)
0xeb 0xc3 0x34 0xdc
-# CHECK: p3 = vcmph.gt(r21:20, #31)
+# CHECK: p3 = vcmph.gt(r21:20,#31)
0xeb 0xc3 0x54 0xdc
-# CHECK: p3 = vcmph.gtu(r21:20, #31)
+# CHECK: p3 = vcmph.gtu(r21:20,#31)
# Vector compare words
0x03 0xde 0x14 0xd2
-# CHECK: p3 = vcmpw.eq(r21:20, r31:30)
+# CHECK: p3 = vcmpw.eq(r21:20,r31:30)
0x23 0xde 0x14 0xd2
-# CHECK: p3 = vcmpw.gt(r21:20, r31:30)
+# CHECK: p3 = vcmpw.gt(r21:20,r31:30)
0x43 0xde 0x14 0xd2
-# CHECK: p3 = vcmpw.gtu(r21:20, r31:30)
+# CHECK: p3 = vcmpw.gtu(r21:20,r31:30)
0xf3 0xc3 0x14 0xdc
-# CHECK: p3 = vcmpw.eq(r21:20, #31)
+# CHECK: p3 = vcmpw.eq(r21:20,#31)
0xf3 0xc3 0x34 0xdc
-# CHECK: p3 = vcmpw.gt(r21:20, #31)
+# CHECK: p3 = vcmpw.gt(r21:20,#31)
0xf3 0xc3 0x54 0xdc
-# CHECK: p3 = vcmpw.gtu(r21:20, #31)
+# CHECK: p3 = vcmpw.gtu(r21:20,#31)
# Viterbi pack even and odd predicate bits
0x11 0xc2 0x03 0x89
-# CHECK: r17 = vitpack(p3, p2)
+# CHECK: r17 = vitpack(p3,p2)
# Vector mux
0x70 0xde 0x14 0xd1
-# CHECK: r17:16 = vmux(p3, r21:20, r31:30)
+# CHECK: r17:16 = vmux(p3,r21:20,r31:30)
diff --git a/test/MC/Disassembler/Hexagon/xtype_shift.txt b/test/MC/Disassembler/Hexagon/xtype_shift.txt
index e2d6816c1cac..d5688c962cfe 100644
--- a/test/MC/Disassembler/Hexagon/xtype_shift.txt
+++ b/test/MC/Disassembler/Hexagon/xtype_shift.txt
@@ -3,258 +3,258 @@
# Shift by immediate
0x10 0xdf 0x14 0x80
-# CHECK: r17:16 = asr(r21:20, #31)
+# CHECK: r17:16 = asr(r21:20,#31)
0x30 0xdf 0x14 0x80
-# CHECK: r17:16 = lsr(r21:20, #31)
+# CHECK: r17:16 = lsr(r21:20,#31)
0x50 0xdf 0x14 0x80
-# CHECK: r17:16 = asl(r21:20, #31)
+# CHECK: r17:16 = asl(r21:20,#31)
0x11 0xdf 0x15 0x8c
-# CHECK: r17 = asr(r21, #31)
+# CHECK: r17 = asr(r21,#31)
0x31 0xdf 0x15 0x8c
-# CHECK: r17 = lsr(r21, #31)
+# CHECK: r17 = lsr(r21,#31)
0x51 0xdf 0x15 0x8c
-# CHECK: r17 = asl(r21, #31)
+# CHECK: r17 = asl(r21,#31)
# Shift by immediate and accumulate
0x10 0xdf 0x14 0x82
-# CHECK: r17:16 -= asr(r21:20, #31)
+# CHECK: r17:16 -= asr(r21:20,#31)
0x30 0xdf 0x14 0x82
-# CHECK: r17:16 -= lsr(r21:20, #31)
+# CHECK: r17:16 -= lsr(r21:20,#31)
0x50 0xdf 0x14 0x82
-# CHECK: r17:16 -= asl(r21:20, #31)
+# CHECK: r17:16 -= asl(r21:20,#31)
0x90 0xdf 0x14 0x82
-# CHECK: r17:16 += asr(r21:20, #31)
+# CHECK: r17:16 += asr(r21:20,#31)
0xb0 0xdf 0x14 0x82
-# CHECK: r17:16 += lsr(r21:20, #31)
+# CHECK: r17:16 += lsr(r21:20,#31)
0xd0 0xdf 0x14 0x82
-# CHECK: r17:16 += asl(r21:20, #31)
+# CHECK: r17:16 += asl(r21:20,#31)
0x11 0xdf 0x15 0x8e
-# CHECK: r17 -= asr(r21, #31)
+# CHECK: r17 -= asr(r21,#31)
0x31 0xdf 0x15 0x8e
-# CHECK: r17 -= lsr(r21, #31)
+# CHECK: r17 -= lsr(r21,#31)
0x51 0xdf 0x15 0x8e
-# CHECK: r17 -= asl(r21, #31)
+# CHECK: r17 -= asl(r21,#31)
0x91 0xdf 0x15 0x8e
-# CHECK: r17 += asr(r21, #31)
+# CHECK: r17 += asr(r21,#31)
0xb1 0xdf 0x15 0x8e
-# CHECK: r17 += lsr(r21, #31)
+# CHECK: r17 += lsr(r21,#31)
0xd1 0xdf 0x15 0x8e
-# CHECK: r17 += asl(r21, #31)
+# CHECK: r17 += asl(r21,#31)
0x4c 0xf7 0x11 0xde
-# CHECK: r17 = add(#21, asl(r17, #23))
+# CHECK: r17 = add(#21,asl(r17,#23))
0x4e 0xf7 0x11 0xde
-# CHECK: r17 = sub(#21, asl(r17, #23))
+# CHECK: r17 = sub(#21,asl(r17,#23))
0x5c 0xf7 0x11 0xde
-# CHECK: r17 = add(#21, lsr(r17, #23))
+# CHECK: r17 = add(#21,lsr(r17,#23))
0x5e 0xf7 0x11 0xde
-# CHECK: r17 = sub(#21, lsr(r17, #23))
+# CHECK: r17 = sub(#21,lsr(r17,#23))
# Shift by immediate and add
0xf1 0xd5 0x1f 0xc4
-# CHECK: r17 = addasl(r21, r31, #7)
+# CHECK: r17 = addasl(r21,r31,#7)
# Shift by immediate and logical
0x10 0xdf 0x54 0x82
-# CHECK: r17:16 &= asr(r21:20, #31)
+# CHECK: r17:16 &= asr(r21:20,#31)
0x30 0xdf 0x54 0x82
-# CHECK: r17:16 &= lsr(r21:20, #31)
+# CHECK: r17:16 &= lsr(r21:20,#31)
0x50 0xdf 0x54 0x82
-# CHECK: r17:16 &= asl(r21:20, #31)
+# CHECK: r17:16 &= asl(r21:20,#31)
0x90 0xdf 0x54 0x82
-# CHECK: r17:16 |= asr(r21:20, #31)
+# CHECK: r17:16 |= asr(r21:20,#31)
0xb0 0xdf 0x54 0x82
-# CHECK: r17:16 |= lsr(r21:20, #31)
+# CHECK: r17:16 |= lsr(r21:20,#31)
0xd0 0xdf 0x54 0x82
-# CHECK: r17:16 |= asl(r21:20, #31)
+# CHECK: r17:16 |= asl(r21:20,#31)
0x30 0xdf 0x94 0x82
-# CHECK: r17:16 ^= lsr(r21:20, #31)
+# CHECK: r17:16 ^= lsr(r21:20,#31)
0x50 0xdf 0x94 0x82
-# CHECK: r17:16 ^= asl(r21:20, #31)
+# CHECK: r17:16 ^= asl(r21:20,#31)
0x11 0xdf 0x55 0x8e
-# CHECK: r17 &= asr(r21, #31)
+# CHECK: r17 &= asr(r21,#31)
0x31 0xdf 0x55 0x8e
-# CHECK: r17 &= lsr(r21, #31)
+# CHECK: r17 &= lsr(r21,#31)
0x51 0xdf 0x55 0x8e
-# CHECK: r17 &= asl(r21, #31)
+# CHECK: r17 &= asl(r21,#31)
0x91 0xdf 0x55 0x8e
-# CHECK: r17 |= asr(r21, #31)
+# CHECK: r17 |= asr(r21,#31)
0xb1 0xdf 0x55 0x8e
-# CHECK: r17 |= lsr(r21, #31)
+# CHECK: r17 |= lsr(r21,#31)
0xd1 0xdf 0x55 0x8e
-# CHECK: r17 |= asl(r21, #31)
+# CHECK: r17 |= asl(r21,#31)
0x31 0xdf 0x95 0x8e
-# CHECK: r17 ^= lsr(r21, #31)
+# CHECK: r17 ^= lsr(r21,#31)
0x51 0xdf 0x95 0x8e
-# CHECK: r17 ^= asl(r21, #31)
+# CHECK: r17 ^= asl(r21,#31)
0x48 0xff 0x11 0xde
-# CHECK: r17 = and(#21, asl(r17, #31))
+# CHECK: r17 = and(#21,asl(r17,#31))
0x4a 0xff 0x11 0xde
-# CHECK: r17 = or(#21, asl(r17, #31))
+# CHECK: r17 = or(#21,asl(r17,#31))
0x58 0xff 0x11 0xde
-# CHECK: r17 = and(#21, lsr(r17, #31))
+# CHECK: r17 = and(#21,lsr(r17,#31))
0x5a 0xff 0x11 0xde
-# CHECK: r17 = or(#21, lsr(r17, #31))
+# CHECK: r17 = or(#21,lsr(r17,#31))
# Shift right by immediate with rounding
0xf0 0xdf 0xd4 0x80
-# CHECK: r17:16 = asr(r21:20, #31):rnd
+# CHECK: r17:16 = asr(r21:20,#31):rnd
0x11 0xdf 0x55 0x8c
-# CHECK: r17 = asr(r21, #31):rnd
+# CHECK: r17 = asr(r21,#31):rnd
# Shift left by immediate with saturation
0x51 0xdf 0x55 0x8c
-# CHECK: r17 = asl(r21, #31):sat
+# CHECK: r17 = asl(r21,#31):sat
# Shift by register
0x10 0xdf 0x94 0xc3
-# CHECK: r17:16 = asr(r21:20, r31)
+# CHECK: r17:16 = asr(r21:20,r31)
0x50 0xdf 0x94 0xc3
-# CHECK: r17:16 = lsr(r21:20, r31)
+# CHECK: r17:16 = lsr(r21:20,r31)
0x90 0xdf 0x94 0xc3
-# CHECK: r17:16 = asl(r21:20, r31)
+# CHECK: r17:16 = asl(r21:20,r31)
0xd0 0xdf 0x94 0xc3
-# CHECK: r17:16 = lsl(r21:20, r31)
+# CHECK: r17:16 = lsl(r21:20,r31)
0x11 0xdf 0x55 0xc6
-# CHECK: r17 = asr(r21, r31)
+# CHECK: r17 = asr(r21,r31)
0x51 0xdf 0x55 0xc6
-# CHECK: r17 = lsr(r21, r31)
+# CHECK: r17 = lsr(r21,r31)
0x91 0xdf 0x55 0xc6
-# CHECK: r17 = asl(r21, r31)
+# CHECK: r17 = asl(r21,r31)
0xd1 0xdf 0x55 0xc6
-# CHECK: r17 = lsl(r21, r31)
+# CHECK: r17 = lsl(r21,r31)
0xf1 0xdf 0x8a 0xc6
-# CHECK: r17 = lsl(#21, r31)
+# CHECK: r17 = lsl(#21,r31)
# Shift by register and accumulate
0x10 0xdf 0x94 0xcb
-# CHECK: r17:16 -= asr(r21:20, r31)
+# CHECK: r17:16 -= asr(r21:20,r31)
0x50 0xdf 0x94 0xcb
-# CHECK: r17:16 -= lsr(r21:20, r31)
+# CHECK: r17:16 -= lsr(r21:20,r31)
0x90 0xdf 0x94 0xcb
-# CHECK: r17:16 -= asl(r21:20, r31)
+# CHECK: r17:16 -= asl(r21:20,r31)
0xd0 0xdf 0x94 0xcb
-# CHECK: r17:16 -= lsl(r21:20, r31)
+# CHECK: r17:16 -= lsl(r21:20,r31)
0x10 0xdf 0xd4 0xcb
-# CHECK: r17:16 += asr(r21:20, r31)
+# CHECK: r17:16 += asr(r21:20,r31)
0x50 0xdf 0xd4 0xcb
-# CHECK: r17:16 += lsr(r21:20, r31)
+# CHECK: r17:16 += lsr(r21:20,r31)
0x90 0xdf 0xd4 0xcb
-# CHECK: r17:16 += asl(r21:20, r31)
+# CHECK: r17:16 += asl(r21:20,r31)
0xd0 0xdf 0xd4 0xcb
-# CHECK: r17:16 += lsl(r21:20, r31)
+# CHECK: r17:16 += lsl(r21:20,r31)
0x11 0xdf 0x95 0xcc
-# CHECK: r17 -= asr(r21, r31)
+# CHECK: r17 -= asr(r21,r31)
0x51 0xdf 0x95 0xcc
-# CHECK: r17 -= lsr(r21, r31)
+# CHECK: r17 -= lsr(r21,r31)
0x91 0xdf 0x95 0xcc
-# CHECK: r17 -= asl(r21, r31)
+# CHECK: r17 -= asl(r21,r31)
0xd1 0xdf 0x95 0xcc
-# CHECK: r17 -= lsl(r21, r31)
+# CHECK: r17 -= lsl(r21,r31)
0x11 0xdf 0xd5 0xcc
-# CHECK: r17 += asr(r21, r31)
+# CHECK: r17 += asr(r21,r31)
0x51 0xdf 0xd5 0xcc
-# CHECK: r17 += lsr(r21, r31)
+# CHECK: r17 += lsr(r21,r31)
0x91 0xdf 0xd5 0xcc
-# CHECK: r17 += asl(r21, r31)
+# CHECK: r17 += asl(r21,r31)
0xd1 0xdf 0xd5 0xcc
-# CHECK: r17 += lsl(r21, r31)
+# CHECK: r17 += lsl(r21,r31)
# Shift by register and logical
0x10 0xdf 0x14 0xcb
-# CHECK: r17:16 |= asr(r21:20, r31)
+# CHECK: r17:16 |= asr(r21:20,r31)
0x50 0xdf 0x14 0xcb
-# CHECK: r17:16 |= lsr(r21:20, r31)
+# CHECK: r17:16 |= lsr(r21:20,r31)
0x90 0xdf 0x14 0xcb
-# CHECK: r17:16 |= asl(r21:20, r31)
+# CHECK: r17:16 |= asl(r21:20,r31)
0xd0 0xdf 0x14 0xcb
-# CHECK: r17:16 |= lsl(r21:20, r31)
+# CHECK: r17:16 |= lsl(r21:20,r31)
0x10 0xdf 0x54 0xcb
-# CHECK: r17:16 &= asr(r21:20, r31)
+# CHECK: r17:16 &= asr(r21:20,r31)
0x50 0xdf 0x54 0xcb
-# CHECK: r17:16 &= lsr(r21:20, r31)
+# CHECK: r17:16 &= lsr(r21:20,r31)
0x90 0xdf 0x54 0xcb
-# CHECK: r17:16 &= asl(r21:20, r31)
+# CHECK: r17:16 &= asl(r21:20,r31)
0xd0 0xdf 0x54 0xcb
-# CHECK: r17:16 &= lsl(r21:20, r31)
+# CHECK: r17:16 &= lsl(r21:20,r31)
0x10 0xdf 0x74 0xcb
-# CHECK: r17:16 ^= asr(r21:20, r31)
+# CHECK: r17:16 ^= asr(r21:20,r31)
0x50 0xdf 0x74 0xcb
-# CHECK: r17:16 ^= lsr(r21:20, r31)
+# CHECK: r17:16 ^= lsr(r21:20,r31)
0x90 0xdf 0x74 0xcb
-# CHECK: r17:16 ^= asl(r21:20, r31)
+# CHECK: r17:16 ^= asl(r21:20,r31)
0xd0 0xdf 0x74 0xcb
-# CHECK: r17:16 ^= lsl(r21:20, r31)
+# CHECK: r17:16 ^= lsl(r21:20,r31)
0x11 0xdf 0x15 0xcc
-# CHECK: r17 |= asr(r21, r31)
+# CHECK: r17 |= asr(r21,r31)
0x51 0xdf 0x15 0xcc
-# CHECK: r17 |= lsr(r21, r31)
+# CHECK: r17 |= lsr(r21,r31)
0x91 0xdf 0x15 0xcc
-# CHECK: r17 |= asl(r21, r31)
+# CHECK: r17 |= asl(r21,r31)
0xd1 0xdf 0x15 0xcc
-# CHECK: r17 |= lsl(r21, r31)
+# CHECK: r17 |= lsl(r21,r31)
0x11 0xdf 0x55 0xcc
-# CHECK: r17 &= asr(r21, r31)
+# CHECK: r17 &= asr(r21,r31)
0x51 0xdf 0x55 0xcc
-# CHECK: r17 &= lsr(r21, r31)
+# CHECK: r17 &= lsr(r21,r31)
0x91 0xdf 0x55 0xcc
-# CHECK: r17 &= asl(r21, r31)
+# CHECK: r17 &= asl(r21,r31)
0xd1 0xdf 0x55 0xcc
-# CHECK: r17 &= lsl(r21, r31)
+# CHECK: r17 &= lsl(r21,r31)
# Shift by register with saturation
0x11 0xdf 0x15 0xc6
-# CHECK: r17 = asr(r21, r31):sat
+# CHECK: r17 = asr(r21,r31):sat
0x91 0xdf 0x15 0xc6
-# CHECK: r17 = asl(r21, r31):sat
+# CHECK: r17 = asl(r21,r31):sat
# Vector shift halfwords by immediate
0x10 0xc5 0x94 0x80
-# CHECK: r17:16 = vasrh(r21:20, #5)
+# CHECK: r17:16 = vasrh(r21:20,#5)
0x30 0xc5 0x94 0x80
-# CHECK: r17:16 = vlsrh(r21:20, #5)
+# CHECK: r17:16 = vlsrh(r21:20,#5)
0x50 0xc5 0x94 0x80
-# CHECK: r17:16 = vaslh(r21:20, #5)
+# CHECK: r17:16 = vaslh(r21:20,#5)
# Vector arithmetic shift halfwords with round
0x10 0xc5 0x34 0x80
-# CHECK: r17:16 = vasrh(r21:20, #5):raw
+# CHECK: r17:16 = vasrh(r21:20,#5):raw
# Vector arithmetic shift halfwords with saturate and pack
0x91 0xc5 0x74 0x88
-# CHECK: r17 = vasrhub(r21:20, #5):raw
+# CHECK: r17 = vasrhub(r21:20,#5):raw
0xb1 0xc5 0x74 0x88
-# CHECK: r17 = vasrhub(r21:20, #5):sat
+# CHECK: r17 = vasrhub(r21:20,#5):sat
# Vector shift halfwords by register
0x10 0xdf 0x54 0xc3
-# CHECK: r17:16 = vasrh(r21:20, r31)
+# CHECK: r17:16 = vasrh(r21:20,r31)
0x50 0xdf 0x54 0xc3
-# CHECK: r17:16 = vlsrh(r21:20, r31)
+# CHECK: r17:16 = vlsrh(r21:20,r31)
0x90 0xdf 0x54 0xc3
-# CHECK: r17:16 = vaslh(r21:20, r31)
+# CHECK: r17:16 = vaslh(r21:20,r31)
0xd0 0xdf 0x54 0xc3
-# CHECK: r17:16 = vlslh(r21:20, r31)
+# CHECK: r17:16 = vlslh(r21:20,r31)
# Vector shift words by immediate
0x10 0xdf 0x54 0x80
-# CHECK: r17:16 = vasrw(r21:20, #31)
+# CHECK: r17:16 = vasrw(r21:20,#31)
0x30 0xdf 0x54 0x80
-# CHECK: r17:16 = vlsrw(r21:20, #31)
+# CHECK: r17:16 = vlsrw(r21:20,#31)
0x50 0xdf 0x54 0x80
-# CHECK: r17:16 = vaslw(r21:20, #31)
+# CHECK: r17:16 = vaslw(r21:20,#31)
# Vector shift words by register
0x10 0xdf 0x14 0xc3
-# CHECK: r17:16 = vasrw(r21:20, r31)
+# CHECK: r17:16 = vasrw(r21:20,r31)
0x50 0xdf 0x14 0xc3
-# CHECK: r17:16 = vlsrw(r21:20, r31)
+# CHECK: r17:16 = vlsrw(r21:20,r31)
0x90 0xdf 0x14 0xc3
-# CHECK: r17:16 = vaslw(r21:20, r31)
+# CHECK: r17:16 = vaslw(r21:20,r31)
0xd0 0xdf 0x14 0xc3
-# CHECK: r17:16 = vlslw(r21:20, r31)
+# CHECK: r17:16 = vlslw(r21:20,r31)
# Vector shift words with truncate and pack
0x51 0xdf 0xd4 0x88
-# CHECK: r17 = vasrw(r21:20, #31)
+# CHECK: r17 = vasrw(r21:20,#31)
0x51 0xdf 0x14 0xc5
-# CHECK: r17 = vasrw(r21:20, r31)
+# CHECK: r17 = vasrw(r21:20,r31)
diff --git a/test/MC/Disassembler/PowerPC/vsx.txt b/test/MC/Disassembler/PowerPC/vsx.txt
index 3f8adc912452..0c647737c371 100644
--- a/test/MC/Disassembler/PowerPC/vsx.txt
+++ b/test/MC/Disassembler/PowerPC/vsx.txt
@@ -525,8 +525,8 @@
# CHECK: xxswapd 7, 63
0xf0 0xff 0xfa 0x56
-# CHECK: mfvsrd 3, 0
-0x7c 0x03 0x00 0x66
+# CHECK: mfvsrd 3, 40
+0x7d 0x03 0x00 0x67
# CHECK: mfvsrwz 5, 0
0x7c 0x05 0x00 0xe6
diff --git a/test/MC/Disassembler/X86/avx-512.txt b/test/MC/Disassembler/X86/avx-512.txt
index fd1db3b22ec5..b0d1009476f5 100644
--- a/test/MC/Disassembler/X86/avx-512.txt
+++ b/test/MC/Disassembler/X86/avx-512.txt
@@ -170,3 +170,98 @@
# CHECK: vcmpps $127, %xmm27, %xmm11, %k4
0x62 0x91 0x24 0x08 0xc2 0xe3 0x7f
+# CHECK: vpgatherdd 256(%r9,%xmm31), %xmm17 {%k1}
+0x62 0x82 0x7d 0x01 0x90 0x4c 0x39 0x40
+
+# CHECK: vpgatherdd 256(%r9,%ymm31), %ymm19 {%k1}
+0x62 0x82 0x7d 0x21 0x90 0x5c 0x39 0x40
+
+# CHECK: vpgatherdq 256(%r9,%xmm31), %xmm17 {%k1}
+0x62 0x82 0xfd 0x01 0x90 0x4c 0x39 0x20
+
+# CHECK: vpgatherdq 256(%r9,%xmm31), %ymm26 {%k1}
+0x62 0x02 0xfd 0x21 0x90 0x54 0x39 0x20
+
+# CHECK: vpgatherqd 256(%r9,%xmm31), %xmm21 {%k1}
+0x62 0x82 0x7d 0x01 0x91 0x6c 0x39 0x40
+
+# CHECK: vpgatherqd 256(%r9,%ymm31), %xmm25 {%k1}
+0x62 0x02 0x7d 0x21 0x91 0x4c 0x39 0x40
+
+# CHECK: vpgatherqq 256(%r9,%xmm31), %xmm18 {%k1}
+0x62 0x82 0xfd 0x01 0x91 0x54 0x39 0x20
+
+# CHECK: vpgatherqq 256(%r9,%ymm31), %ymm19 {%k1}
+0x62 0x82 0xfd 0x21 0x91 0x5c 0x39 0x20
+
+# CHECK: vgatherdpd 256(%r9,%xmm31), %xmm17 {%k1}
+0x62 0x82 0xfd 0x01 0x92 0x4c 0x39 0x20
+
+# CHECK: vgatherdpd 256(%r9,%xmm31), %ymm23 {%k1}
+0x62 0x82 0xfd 0x21 0x92 0x7c 0x39 0x20
+
+# CHECK: vgatherdps 256(%r9,%xmm31), %xmm18 {%k1}
+0x62 0x82 0x7d 0x01 0x92 0x54 0x39 0x40
+
+# CHECK: vgatherdps 256(%r9,%ymm31), %ymm27 {%k1}
+0x62 0x02 0x7d 0x21 0x92 0x5c 0x39 0x40
+
+# CHECK: vgatherqpd 256(%r9,%xmm31), %xmm17 {%k1}
+0x62 0x82 0xfd 0x01 0x93 0x4c 0x39 0x20
+
+# CHECK: vgatherqpd 256(%r9,%ymm31), %ymm29 {%k1}
+0x62 0x02 0xfd 0x21 0x93 0x6c 0x39 0x20
+
+# CHECK: vgatherqps 256(%r9,%xmm31), %xmm21 {%k1}
+0x62 0x82 0x7d 0x01 0x93 0x6c 0x39 0x40
+
+# CHECK: vgatherqps 256(%r9,%ymm31), %xmm19 {%k1}
+0x62 0x82 0x7d 0x21 0x93 0x5c 0x39 0x40
+
+# CHECK: vpscatterdd %xmm20, 256(%r9,%xmm31) {%k1}
+0x62 0x82 0x7d 0x01 0xa0 0x64 0x39 0x40
+
+# CHECK: vpscatterdd %ymm28, 256(%r9,%ymm31) {%k1}
+0x62 0x02 0x7d 0x21 0xa0 0x64 0x39 0x40
+
+# CHECK: vpscatterdq %xmm21, 256(%r9,%xmm31) {%k1}
+0x62 0x82 0xfd 0x01 0xa0 0x6c 0x39 0x20
+
+# CHECK: vpscatterdq %ymm28, 256(%r9,%xmm31) {%k1}
+0x62 0x02 0xfd 0x21 0xa0 0x64 0x39 0x20
+
+# CHECK: vpscatterqd %xmm22, 256(%r9,%xmm31) {%k1}
+0x62 0x82 0x7d 0x01 0xa1 0x74 0x39 0x40
+
+# CHECK: vpscatterqd %xmm24, 256(%r9,%ymm31) {%k1}
+0x62 0x02 0x7d 0x21 0xa1 0x44 0x39 0x40
+
+# CHECK: vpscatterqq %xmm28, 256(%r9,%xmm31) {%k1}
+0x62 0x02 0xfd 0x01 0xa1 0x64 0x39 0x20
+
+# CHECK: vpscatterqq %ymm19, 256(%r9,%ymm31) {%k1}
+0x62 0x82 0xfd 0x21 0xa1 0x5c 0x39 0x20
+
+# CHECK: vscatterdps %xmm20, 256(%r9,%xmm31) {%k1}
+0x62 0x82 0x7d 0x01 0xa2 0x64 0x39 0x40
+
+# CHECK: vscatterdps %ymm28, 256(%r9,%ymm31) {%k1}
+0x62 0x02 0x7d 0x21 0xa2 0x64 0x39 0x40
+
+# CHECK: vscatterdpd %xmm21, 256(%r9,%xmm31) {%k1}
+0x62 0x82 0xfd 0x01 0xa2 0x6c 0x39 0x20
+
+# CHECK: vscatterdpd %ymm28, 256(%r9,%xmm31) {%k1}
+0x62 0x02 0xfd 0x21 0xa2 0x64 0x39 0x20
+
+# CHECK: vscatterqps %xmm22, 256(%r9,%xmm31) {%k1}
+0x62 0x82 0x7d 0x01 0xa3 0x74 0x39 0x40
+
+# CHECK: vscatterqps %xmm24, 256(%r9,%ymm31) {%k1}
+0x62 0x02 0x7d 0x21 0xa3 0x44 0x39 0x40
+
+# CHECK: vscatterqpd %xmm28, 256(%r9,%xmm31) {%k1}
+0x62 0x02 0xfd 0x01 0xa3 0x64 0x39 0x20
+
+# CHECK: vscatterqpd %ymm19, 256(%r9,%ymm31) {%k1}
+0x62 0x82 0xfd 0x21 0xa3 0x5c 0x39 0x20
diff --git a/test/MC/Disassembler/X86/fp-stack.txt b/test/MC/Disassembler/X86/fp-stack.txt
index f9aa402a3913..ad8d820d798d 100644
--- a/test/MC/Disassembler/X86/fp-stack.txt
+++ b/test/MC/Disassembler/X86/fp-stack.txt
@@ -1,1037 +1,1061 @@
-# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s
-# RUN: llvm-mc --disassemble %s -triple=i686-apple-darwin9 | FileCheck %s
-
-# CHECK: fadd %st(0)
-0xd8,0xc0
-
-# CHECK: fadd %st(1)
-0xd8,0xc1
-
-# CHECK: fadd %st(2)
-0xd8,0xc2
-
-# CHECK: fadd %st(3)
-0xd8,0xc3
-
-# CHECK: fadd %st(4)
-0xd8,0xc4
-
-# CHECK: fadd %st(5)
-0xd8,0xc5
-
-# CHECK: fadd %st(6)
-0xd8,0xc6
-
-# CHECK: fadd %st(7)
-0xd8,0xc7
-
-# CHECK: fmul %st(0)
-0xd8,0xc8
-
-# CHECK: fmul %st(1)
-0xd8,0xc9
-
-# CHECK: fmul %st(2)
-0xd8,0xca
-
-# CHECK: fmul %st(3)
-0xd8,0xcb
-
-# CHECK: fmul %st(4)
-0xd8,0xcc
-
-# CHECK: fmul %st(5)
-0xd8,0xcd
-
-# CHECK: fmul %st(6)
-0xd8,0xce
-
-# CHECK: fmul %st(7)
-0xd8,0xcf
-
-# CHECK: fcom %st(0)
-0xd8,0xd0
-
-# CHECK: fcom %st(1)
-0xd8,0xd1
-
-# CHECK: fcom %st(2)
-0xd8,0xd2
-
-# CHECK: fcom %st(3)
-0xd8,0xd3
-
-# CHECK: fcom %st(4)
-0xd8,0xd4
-
-# CHECK: fcom %st(5)
-0xd8,0xd5
-
-# CHECK: fcom %st(6)
-0xd8,0xd6
-
-# CHECK: fcom %st(7)
-0xd8,0xd7
-
-# CHECK: fcomp %st(0)
-0xd8,0xd8
-
-# CHECK: fcomp %st(1)
-0xd8,0xd9
-
-# CHECK: fcomp %st(2)
-0xd8,0xda
-
-# CHECK: fcomp %st(3)
-0xd8,0xdb
-
-# CHECK: fcomp %st(4)
-0xd8,0xdc
-
-# CHECK: fcomp %st(5)
-0xd8,0xdd
-
-# CHECK: fcomp %st(6)
-0xd8,0xde
-
-# CHECK: fcomp %st(7)
-0xd8,0xdf
-
-# CHECK: fsub %st(0)
-0xd8,0xe0
-
-# CHECK: fsub %st(1)
-0xd8,0xe1
-
-# CHECK: fsub %st(2)
-0xd8,0xe2
-
-# CHECK: fsub %st(3)
-0xd8,0xe3
-
-# CHECK: fsub %st(4)
-0xd8,0xe4
-
-# CHECK: fsub %st(5)
-0xd8,0xe5
-
-# CHECK: fsub %st(6)
-0xd8,0xe6
-
-# CHECK: fsub %st(7)
-0xd8,0xe7
-
-# CHECK: fsubr %st(0)
-0xd8,0xe8
-
-# CHECK: fsubr %st(1)
-0xd8,0xe9
-
-# CHECK: fsubr %st(2)
-0xd8,0xea
-
-# CHECK: fsubr %st(3)
-0xd8,0xeb
-
-# CHECK: fsubr %st(4)
-0xd8,0xec
-
-# CHECK: fsubr %st(5)
-0xd8,0xed
-
-# CHECK: fsubr %st(6)
-0xd8,0xee
-
-# CHECK: fsubr %st(7)
-0xd8,0xef
-
-# CHECK: fdiv %st(0)
-0xd8,0xf0
-
-# CHECK: fdiv %st(1)
-0xd8,0xf1
-
-# CHECK: fdiv %st(2)
-0xd8,0xf2
-
-# CHECK: fdiv %st(3)
-0xd8,0xf3
-
-# CHECK: fdiv %st(4)
-0xd8,0xf4
-
-# CHECK: fdiv %st(5)
-0xd8,0xf5
-
-# CHECK: fdiv %st(6)
-0xd8,0xf6
-
-# CHECK: fdiv %st(7)
-0xd8,0xf7
-
-# CHECK: fdivr %st(0)
-0xd8,0xf8
-
-# CHECK: fdivr %st(1)
-0xd8,0xf9
-
-# CHECK: fdivr %st(2)
-0xd8,0xfa
-
-# CHECK: fdivr %st(3)
-0xd8,0xfb
-
-# CHECK: fdivr %st(4)
-0xd8,0xfc
-
-# CHECK: fdivr %st(5)
-0xd8,0xfd
-
-# CHECK: fdivr %st(6)
-0xd8,0xfe
-
-# CHECK: fdivr %st(7)
-0xd8,0xff
-
-# CHECK: fld %st(0)
-0xd9,0xc0
-
-# CHECK: fld %st(1)
-0xd9,0xc1
-
-# CHECK: fld %st(2)
-0xd9,0xc2
-
-# CHECK: fld %st(3)
-0xd9,0xc3
-
-# CHECK: fld %st(4)
-0xd9,0xc4
-
-# CHECK: fld %st(5)
-0xd9,0xc5
-
-# CHECK: fld %st(6)
-0xd9,0xc6
-
-# CHECK: fld %st(7)
-0xd9,0xc7
-
-# CHECK: fxch %st(0)
-0xd9,0xc8
-
-# CHECK: fxch %st(1)
-0xd9,0xc9
-
-# CHECK: fxch %st(2)
-0xd9,0xca
-
-# CHECK: fxch %st(3)
-0xd9,0xcb
-
-# CHECK: fxch %st(4)
-0xd9,0xcc
-
-# CHECK: fxch %st(5)
-0xd9,0xcd
-
-# CHECK: fxch %st(6)
-0xd9,0xce
-
-# CHECK: fxch %st(7)
-0xd9,0xcf
-
-# CHECK: fnop
-0xd9,0xd0
-
-# CHECK: fchs
-0xd9,0xe0
-
-# CHECK: fabs
-0xd9,0xe1
-
-# CHECK: ftst
-0xd9,0xe4
-
-# CHECK: fxam
-0xd9,0xe5
-
-# CHECK: fld1
-0xd9,0xe8
-
-# CHECK: fldl2t
-0xd9,0xe9
-
-# CHECK: fldl2e
-0xd9,0xea
-
-# CHECK: fldpi
-0xd9,0xeb
-
-# CHECK: fldlg2
-0xd9,0xec
-
-# CHECK: fldln2
-0xd9,0xed
-
-# CHECK: fldz
-0xd9,0xee
-
-# CHECK: f2xm1
-0xd9,0xf0
-
-# CHECK: fyl2x
-0xd9,0xf1
-
-# CHECK: fptan
-0xd9,0xf2
-
-# CHECK: fpatan
-0xd9,0xf3
-
-# CHECK: fxtract
-0xd9,0xf4
-
-# CHECK: fprem1
-0xd9,0xf5
-
-# CHECK: fdecstp
-0xd9,0xf6
-
-# CHECK: fincstp
-0xd9,0xf7
-
-# CHECK: fprem
-0xd9,0xf8
-
-# CHECK: fyl2xp1
-0xd9,0xf9
-
-# CHECK: fsqrt
-0xd9,0xfa
-
-# CHECK: fsincos
-0xd9,0xfb
-
-# CHECK: frndint
-0xd9,0xfc
-
-# CHECK: fscale
-0xd9,0xfd
-
-# CHECK: fsin
-0xd9,0xfe
-
-# CHECK: fcos
-0xd9,0xff
-
-# CHECK: fcmovb %st(0), %st(0)
-0xda,0xc0
-
-# CHECK: fcmovb %st(1), %st(0)
-0xda,0xc1
-
-# CHECK: fcmovb %st(2), %st(0)
-0xda,0xc2
-
-# CHECK: fcmovb %st(3), %st(0)
-0xda,0xc3
-
-# CHECK: fcmovb %st(4), %st(0)
-0xda,0xc4
-
-# CHECK: fcmovb %st(5), %st(0)
-0xda,0xc5
-
-# CHECK: fcmovb %st(6), %st(0)
-0xda,0xc6
-
-# CHECK: fcmovb %st(7), %st(0)
-0xda,0xc7
-
-# CHECK: fcmove %st(0), %st(0)
-0xda,0xc8
-
-# CHECK: fcmove %st(1), %st(0)
-0xda,0xc9
-
-# CHECK: fcmove %st(2), %st(0)
-0xda,0xca
-
-# CHECK: fcmove %st(3), %st(0)
-0xda,0xcb
-
-# CHECK: fcmove %st(4), %st(0)
-0xda,0xcc
-
-# CHECK: fcmove %st(5), %st(0)
-0xda,0xcd
-
-# CHECK: fcmove %st(6), %st(0)
-0xda,0xce
-
-# CHECK: fcmove %st(7), %st(0)
-0xda,0xcf
-
-# CHECK: fcmovbe %st(0), %st(0)
-0xda,0xd0
-
-# CHECK: fcmovbe %st(1), %st(0)
-0xda,0xd1
-
-# CHECK: fcmovbe %st(2), %st(0)
-0xda,0xd2
-
-# CHECK: fcmovbe %st(3), %st(0)
-0xda,0xd3
-
-# CHECK: fcmovbe %st(4), %st(0)
-0xda,0xd4
-
-# CHECK: fcmovbe %st(5), %st(0)
-0xda,0xd5
-
-# CHECK: fcmovbe %st(6), %st(0)
-0xda,0xd6
-
-# CHECK: fcmovbe %st(7), %st(0)
-0xda,0xd7
-
-# CHECK: fcmovu %st(0), %st(0)
-0xda,0xd8
-
-# CHECK: fcmovu %st(1), %st(0)
-0xda,0xd9
-
-# CHECK: fcmovu %st(2), %st(0)
-0xda,0xda
-
-# CHECK: fcmovu %st(3), %st(0)
-0xda,0xdb
-
-# CHECK: fcmovu %st(4), %st(0)
-0xda,0xdc
-
-# CHECK: fcmovu %st(5), %st(0)
-0xda,0xdd
-
-# CHECK: fcmovu %st(6), %st(0)
-0xda,0xde
-
-# CHECK: fcmovu %st(7), %st(0)
-0xda,0xdf
-
-# CHECK: fucompp
-0xda,0xe9
-
-# CHECK: fcmovnb %st(0), %st(0)
-0xdb,0xc0
-
-# CHECK: fcmovnb %st(1), %st(0)
-0xdb,0xc1
-
-# CHECK: fcmovnb %st(2), %st(0)
-0xdb,0xc2
-
-# CHECK: fcmovnb %st(3), %st(0)
-0xdb,0xc3
-
-# CHECK: fcmovnb %st(4), %st(0)
-0xdb,0xc4
-
-# CHECK: fcmovnb %st(5), %st(0)
-0xdb,0xc5
-
-# CHECK: fcmovnb %st(6), %st(0)
-0xdb,0xc6
-
-# CHECK: fcmovnb %st(7), %st(0)
-0xdb,0xc7
-
-# CHECK: fcmovne %st(0), %st(0)
-0xdb,0xc8
-
-# CHECK: fcmovne %st(1), %st(0)
-0xdb,0xc9
-
-# CHECK: fcmovne %st(2), %st(0)
-0xdb,0xca
-
-# CHECK: fcmovne %st(3), %st(0)
-0xdb,0xcb
-
-# CHECK: fcmovne %st(4), %st(0)
-0xdb,0xcc
-
-# CHECK: fcmovne %st(5), %st(0)
-0xdb,0xcd
-
-# CHECK: fcmovne %st(6), %st(0)
-0xdb,0xce
-
-# CHECK: fcmovne %st(7), %st(0)
-0xdb,0xcf
-
-# CHECK: fcmovnbe %st(0), %st(0)
-0xdb,0xd0
-
-# CHECK: fcmovnbe %st(1), %st(0)
-0xdb,0xd1
-
-# CHECK: fcmovnbe %st(2), %st(0)
-0xdb,0xd2
-
-# CHECK: fcmovnbe %st(3), %st(0)
-0xdb,0xd3
-
-# CHECK: fcmovnbe %st(4), %st(0)
-0xdb,0xd4
-
-# CHECK: fcmovnbe %st(5), %st(0)
-0xdb,0xd5
-
-# CHECK: fcmovnbe %st(6), %st(0)
-0xdb,0xd6
-
-# CHECK: fcmovnbe %st(7), %st(0)
-0xdb,0xd7
-
-# CHECK: fcmovnu %st(0), %st(0)
-0xdb,0xd8
-
-# CHECK: fcmovnu %st(1), %st(0)
-0xdb,0xd9
-
-# CHECK: fcmovnu %st(2), %st(0)
-0xdb,0xda
-
-# CHECK: fcmovnu %st(3), %st(0)
-0xdb,0xdb
-
-# CHECK: fcmovnu %st(4), %st(0)
-0xdb,0xdc
-
-# CHECK: fcmovnu %st(5), %st(0)
-0xdb,0xdd
-
-# CHECK: fcmovnu %st(6), %st(0)
-0xdb,0xde
-
-# CHECK: fcmovnu %st(7), %st(0)
-0xdb,0xdf
-
-# CHECK: fnclex
-0xdb,0xe2
-
-# CHECK: fninit
-0xdb,0xe3
-
-# CHECK: fucomi %st(0)
-0xdb,0xe8
-
-# CHECK: fucomi %st(1)
-0xdb,0xe9
-
-# CHECK: fucomi %st(2)
-0xdb,0xea
-
-# CHECK: fucomi %st(3)
-0xdb,0xeb
-
-# CHECK: fucomi %st(4)
-0xdb,0xec
-
-# CHECK: fucomi %st(5)
-0xdb,0xed
-
-# CHECK: fucomi %st(6)
-0xdb,0xee
-
-# CHECK: fucomi %st(7)
-0xdb,0xef
-
-# CHECK: fcomi %st(0)
-0xdb,0xf0
-
-# CHECK: fcomi %st(1)
-0xdb,0xf1
-
-# CHECK: fcomi %st(2)
-0xdb,0xf2
-
-# CHECK: fcomi %st(3)
-0xdb,0xf3
-
-# CHECK: fcomi %st(4)
-0xdb,0xf4
-
-# CHECK: fcomi %st(5)
-0xdb,0xf5
-
-# CHECK: fcomi %st(6)
-0xdb,0xf6
-
-# CHECK: fcomi %st(7)
-0xdb,0xf7
-
-# CHECK: fadd %st(0), %st(0)
-0xdc,0xc0
-
-# CHECK: fadd %st(0), %st(1)
-0xdc,0xc1
-
-# CHECK: fadd %st(0), %st(2)
-0xdc,0xc2
-
-# CHECK: fadd %st(0), %st(3)
-0xdc,0xc3
-
-# CHECK: fadd %st(0), %st(4)
-0xdc,0xc4
-
-# CHECK: fadd %st(0), %st(5)
-0xdc,0xc5
-
-# CHECK: fadd %st(0), %st(6)
-0xdc,0xc6
-
-# CHECK: fadd %st(0), %st(7)
-0xdc,0xc7
-
-# CHECK: fmul %st(0), %st(0)
-0xdc,0xc8
-
-# CHECK: fmul %st(0), %st(1)
-0xdc,0xc9
-
-# CHECK: fmul %st(0), %st(2)
-0xdc,0xca
-
-# CHECK: fmul %st(0), %st(3)
-0xdc,0xcb
-
-# CHECK: fmul %st(0), %st(4)
-0xdc,0xcc
-
-# CHECK: fmul %st(0), %st(5)
-0xdc,0xcd
-
-# CHECK: fmul %st(0), %st(6)
-0xdc,0xce
-
-# CHECK: fmul %st(0), %st(7)
-0xdc,0xcf
-
-# CHECK: fsub %st(0), %st(0)
-0xdc,0xe0
-
-# CHECK: fsub %st(0), %st(1)
-0xdc,0xe1
-
-# CHECK: fsub %st(0), %st(2)
-0xdc,0xe2
-
-# CHECK: fsub %st(0), %st(3)
-0xdc,0xe3
-
-# CHECK: fsub %st(0), %st(4)
-0xdc,0xe4
-
-# CHECK: fsub %st(0), %st(5)
-0xdc,0xe5
-
-# CHECK: fsub %st(0), %st(6)
-0xdc,0xe6
-
-# CHECK: fsub %st(0), %st(7)
-0xdc,0xe7
-
-# CHECK: fsubr %st(0), %st(0)
-0xdc,0xe8
-
-# CHECK: fsubr %st(0), %st(1)
-0xdc,0xe9
-
-# CHECK: fsubr %st(0), %st(2)
-0xdc,0xea
-
-# CHECK: fsubr %st(0), %st(3)
-0xdc,0xeb
-
-# CHECK: fsubr %st(0), %st(4)
-0xdc,0xec
-
-# CHECK: fsubr %st(0), %st(5)
-0xdc,0xed
-
-# CHECK: fsubr %st(0), %st(6)
-0xdc,0xee
-
-# CHECK: fsubr %st(0), %st(7)
-0xdc,0xef
-
-# CHECK: fdiv %st(0), %st(0)
-0xdc,0xf0
-
-# CHECK: fdiv %st(0), %st(1)
-0xdc,0xf1
-
-# CHECK: fdiv %st(0), %st(2)
-0xdc,0xf2
-
-# CHECK: fdiv %st(0), %st(3)
-0xdc,0xf3
-
-# CHECK: fdiv %st(0), %st(4)
-0xdc,0xf4
-
-# CHECK: fdiv %st(0), %st(5)
-0xdc,0xf5
-
-# CHECK: fdiv %st(0), %st(6)
-0xdc,0xf6
-
-# CHECK: fdiv %st(0), %st(7)
-0xdc,0xf7
-
-# CHECK: fdivr %st(0), %st(0)
-0xdc,0xf8
-
-# CHECK: fdivr %st(0), %st(1)
-0xdc,0xf9
-
-# CHECK: fdivr %st(0), %st(2)
-0xdc,0xfa
-
-# CHECK: fdivr %st(0), %st(3)
-0xdc,0xfb
-
-# CHECK: fdivr %st(0), %st(4)
-0xdc,0xfc
-
-# CHECK: fdivr %st(0), %st(5)
-0xdc,0xfd
-
-# CHECK: fdivr %st(0), %st(6)
-0xdc,0xfe
-
-# CHECK: fdivr %st(0), %st(7)
-0xdc,0xff
-
-# CHECK: ffree %st(0)
-0xdd,0xc0
-
-# CHECK: ffree %st(1)
-0xdd,0xc1
-
-# CHECK: ffree %st(2)
-0xdd,0xc2
-
-# CHECK: ffree %st(3)
-0xdd,0xc3
-
-# CHECK: ffree %st(4)
-0xdd,0xc4
-
-# CHECK: ffree %st(5)
-0xdd,0xc5
-
-# CHECK: ffree %st(6)
-0xdd,0xc6
-
-# CHECK: ffree %st(7)
-0xdd,0xc7
-
-# CHECK: fst %st(0)
-0xdd,0xd0
-
-# CHECK: fst %st(1)
-0xdd,0xd1
-
-# CHECK: fst %st(2)
-0xdd,0xd2
-
-# CHECK: fst %st(3)
-0xdd,0xd3
-
-# CHECK: fst %st(4)
-0xdd,0xd4
-
-# CHECK: fst %st(5)
-0xdd,0xd5
-
-# CHECK: fst %st(6)
-0xdd,0xd6
-
-# CHECK: fst %st(7)
-0xdd,0xd7
-
-# CHECK: fstp %st(0)
-0xdd,0xd8
-
-# CHECK: fstp %st(1)
-0xdd,0xd9
-
-# CHECK: fstp %st(2)
-0xdd,0xda
-
-# CHECK: fstp %st(3)
-0xdd,0xdb
-
-# CHECK: fstp %st(4)
-0xdd,0xdc
-
-# CHECK: fstp %st(5)
-0xdd,0xdd
-
-# CHECK: fstp %st(6)
-0xdd,0xde
-
-# CHECK: fstp %st(7)
-0xdd,0xdf
-
-# CHECK: fucom %st(0)
-0xdd,0xe0
-
-# CHECK: fucom %st(1)
-0xdd,0xe1
-
-# CHECK: fucom %st(2)
-0xdd,0xe2
-
-# CHECK: fucom %st(3)
-0xdd,0xe3
-
-# CHECK: fucom %st(4)
-0xdd,0xe4
-
-# CHECK: fucom %st(5)
-0xdd,0xe5
-
-# CHECK: fucom %st(6)
-0xdd,0xe6
-
-# CHECK: fucom %st(7)
-0xdd,0xe7
-
-# CHECK: fucomp %st(0)
-0xdd,0xe8
-
-# CHECK: fucomp %st(1)
-0xdd,0xe9
-
-# CHECK: fucomp %st(2)
-0xdd,0xea
-
-# CHECK: fucomp %st(3)
-0xdd,0xeb
-
-# CHECK: fucomp %st(4)
-0xdd,0xec
-
-# CHECK: fucomp %st(5)
-0xdd,0xed
-
-# CHECK: fucomp %st(6)
-0xdd,0xee
-
-# CHECK: fucomp %st(7)
-0xdd,0xef
-
-# CHECK: faddp %st(0)
-0xde,0xc0
-
-# CHECK: faddp %st(1)
-0xde,0xc1
-
-# CHECK: faddp %st(2)
-0xde,0xc2
-
-# CHECK: faddp %st(3)
-0xde,0xc3
-
-# CHECK: faddp %st(4)
-0xde,0xc4
-
-# CHECK: faddp %st(5)
-0xde,0xc5
-
-# CHECK: faddp %st(6)
-0xde,0xc6
-
-# CHECK: faddp %st(7)
-0xde,0xc7
-
-# CHECK: fmulp %st(0)
-0xde,0xc8
-
-# CHECK: fmulp %st(1)
-0xde,0xc9
-
-# CHECK: fmulp %st(2)
-0xde,0xca
-
-# CHECK: fmulp %st(3)
-0xde,0xcb
-
-# CHECK: fmulp %st(4)
-0xde,0xcc
-
-# CHECK: fmulp %st(5)
-0xde,0xcd
-
-# CHECK: fmulp %st(6)
-0xde,0xce
-
-# CHECK: fmulp %st(7)
-0xde,0xcf
-
-# CHECK: fcompp
-0xde,0xd9
-
-# CHECK: fsubp %st(0)
-0xde,0xe0
-
-# CHECK: fsubp %st(1)
-0xde,0xe1
-
-# CHECK: fsubp %st(2)
-0xde,0xe2
-
-# CHECK: fsubp %st(3)
-0xde,0xe3
-
-# CHECK: fsubp %st(4)
-0xde,0xe4
-
-# CHECK: fsubp %st(5)
-0xde,0xe5
-
-# CHECK: fsubp %st(6)
-0xde,0xe6
-
-# CHECK: fsubp %st(7)
-0xde,0xe7
-
-# CHECK: fsubrp %st(0)
-0xde,0xe8
-
-# CHECK: fsubrp %st(1)
-0xde,0xe9
-
-# CHECK: fsubrp %st(2)
-0xde,0xea
-
-# CHECK: fsubrp %st(3)
-0xde,0xeb
-
-# CHECK: fsubrp %st(4)
-0xde,0xec
-
-# CHECK: fsubrp %st(5)
-0xde,0xed
-
-# CHECK: fsubrp %st(6)
-0xde,0xee
-
-# CHECK: fsubrp %st(7)
-0xde,0xef
-
-# CHECK: fdivp %st(0)
-0xde,0xf0
-
-# CHECK: fdivp %st(1)
-0xde,0xf1
-
-# CHECK: fdivp %st(2)
-0xde,0xf2
-
-# CHECK: fdivp %st(3)
-0xde,0xf3
-
-# CHECK: fdivp %st(4)
-0xde,0xf4
-
-# CHECK: fdivp %st(5)
-0xde,0xf5
-
-# CHECK: fdivp %st(6)
-0xde,0xf6
-
-# CHECK: fdivp %st(7)
-0xde,0xf7
-
-# CHECK: fdivrp %st(0)
-0xde,0xf8
-
-# CHECK: fdivrp %st(1)
-0xde,0xf9
-
-# CHECK: fdivrp %st(2)
-0xde,0xfa
-
-# CHECK: fdivrp %st(3)
-0xde,0xfb
-
-# CHECK: fdivrp %st(4)
-0xde,0xfc
-
-# CHECK: fdivrp %st(5)
-0xde,0xfd
-
-# CHECK: fdivrp %st(6)
-0xde,0xfe
-
-# CHECK: fdivrp %st(7)
-0xde,0xff
-
-# CHECK: fnstsw %ax
-0xdf,0xe0
-
-# CHECK: fucompi %st(0)
-0xdf,0xe8
-
-# CHECK: fucompi %st(1)
-0xdf,0xe9
-
-# CHECK: fucompi %st(2)
-0xdf,0xea
-
-# CHECK: fucompi %st(3)
-0xdf,0xeb
-
-# CHECK: fucompi %st(4)
-0xdf,0xec
-
-# CHECK: fucompi %st(5)
-0xdf,0xed
-
-# CHECK: fucompi %st(6)
-0xdf,0xee
-
-# CHECK: fucompi %st(7)
-0xdf,0xef
-
-# CHECK: fcompi %st(0)
-0xdf,0xf0
-
-# CHECK: fcompi %st(1)
-0xdf,0xf1
-
-# CHECK: fcompi %st(2)
-0xdf,0xf2
-
-# CHECK: fcompi %st(3)
-0xdf,0xf3
-
-# CHECK: fcompi %st(4)
-0xdf,0xf4
-
-# CHECK: fcompi %st(5)
-0xdf,0xf5
-
-# CHECK: fcompi %st(6)
-0xdf,0xf6
-
-# CHECK: fcompi %st(7)
-0xdf,0xf7
+# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s
+# RUN: llvm-mc --disassemble %s -triple=i686-apple-darwin9 | FileCheck %s
+
+# CHECK: fadd %st(0)
+0xd8,0xc0
+
+# CHECK: fadd %st(1)
+0xd8,0xc1
+
+# CHECK: fadd %st(2)
+0xd8,0xc2
+
+# CHECK: fadd %st(3)
+0xd8,0xc3
+
+# CHECK: fadd %st(4)
+0xd8,0xc4
+
+# CHECK: fadd %st(5)
+0xd8,0xc5
+
+# CHECK: fadd %st(6)
+0xd8,0xc6
+
+# CHECK: fadd %st(7)
+0xd8,0xc7
+
+# CHECK: fmul %st(0)
+0xd8,0xc8
+
+# CHECK: fmul %st(1)
+0xd8,0xc9
+
+# CHECK: fmul %st(2)
+0xd8,0xca
+
+# CHECK: fmul %st(3)
+0xd8,0xcb
+
+# CHECK: fmul %st(4)
+0xd8,0xcc
+
+# CHECK: fmul %st(5)
+0xd8,0xcd
+
+# CHECK: fmul %st(6)
+0xd8,0xce
+
+# CHECK: fmul %st(7)
+0xd8,0xcf
+
+# CHECK: fcom %st(0)
+0xd8,0xd0
+
+# CHECK: fcom %st(1)
+0xd8,0xd1
+
+# CHECK: fcom %st(2)
+0xd8,0xd2
+
+# CHECK: fcom %st(3)
+0xd8,0xd3
+
+# CHECK: fcom %st(4)
+0xd8,0xd4
+
+# CHECK: fcom %st(5)
+0xd8,0xd5
+
+# CHECK: fcom %st(6)
+0xd8,0xd6
+
+# CHECK: fcom %st(7)
+0xd8,0xd7
+
+# CHECK: fcomp %st(0)
+0xd8,0xd8
+
+# CHECK: fcomp %st(1)
+0xd8,0xd9
+
+# CHECK: fcomp %st(2)
+0xd8,0xda
+
+# CHECK: fcomp %st(3)
+0xd8,0xdb
+
+# CHECK: fcomp %st(4)
+0xd8,0xdc
+
+# CHECK: fcomp %st(5)
+0xd8,0xdd
+
+# CHECK: fcomp %st(6)
+0xd8,0xde
+
+# CHECK: fcomp %st(7)
+0xd8,0xdf
+
+# CHECK: fsub %st(0)
+0xd8,0xe0
+
+# CHECK: fsub %st(1)
+0xd8,0xe1
+
+# CHECK: fsub %st(2)
+0xd8,0xe2
+
+# CHECK: fsub %st(3)
+0xd8,0xe3
+
+# CHECK: fsub %st(4)
+0xd8,0xe4
+
+# CHECK: fsub %st(5)
+0xd8,0xe5
+
+# CHECK: fsub %st(6)
+0xd8,0xe6
+
+# CHECK: fsub %st(7)
+0xd8,0xe7
+
+# CHECK: fsubr %st(0)
+0xd8,0xe8
+
+# CHECK: fsubr %st(1)
+0xd8,0xe9
+
+# CHECK: fsubr %st(2)
+0xd8,0xea
+
+# CHECK: fsubr %st(3)
+0xd8,0xeb
+
+# CHECK: fsubr %st(4)
+0xd8,0xec
+
+# CHECK: fsubr %st(5)
+0xd8,0xed
+
+# CHECK: fsubr %st(6)
+0xd8,0xee
+
+# CHECK: fsubr %st(7)
+0xd8,0xef
+
+# CHECK: fdiv %st(0)
+0xd8,0xf0
+
+# CHECK: fdiv %st(1)
+0xd8,0xf1
+
+# CHECK: fdiv %st(2)
+0xd8,0xf2
+
+# CHECK: fdiv %st(3)
+0xd8,0xf3
+
+# CHECK: fdiv %st(4)
+0xd8,0xf4
+
+# CHECK: fdiv %st(5)
+0xd8,0xf5
+
+# CHECK: fdiv %st(6)
+0xd8,0xf6
+
+# CHECK: fdiv %st(7)
+0xd8,0xf7
+
+# CHECK: fdivr %st(0)
+0xd8,0xf8
+
+# CHECK: fdivr %st(1)
+0xd8,0xf9
+
+# CHECK: fdivr %st(2)
+0xd8,0xfa
+
+# CHECK: fdivr %st(3)
+0xd8,0xfb
+
+# CHECK: fdivr %st(4)
+0xd8,0xfc
+
+# CHECK: fdivr %st(5)
+0xd8,0xfd
+
+# CHECK: fdivr %st(6)
+0xd8,0xfe
+
+# CHECK: fdivr %st(7)
+0xd8,0xff
+
+# CHECK: fld %st(0)
+0xd9,0xc0
+
+# CHECK: fld %st(1)
+0xd9,0xc1
+
+# CHECK: fld %st(2)
+0xd9,0xc2
+
+# CHECK: fld %st(3)
+0xd9,0xc3
+
+# CHECK: fld %st(4)
+0xd9,0xc4
+
+# CHECK: fld %st(5)
+0xd9,0xc5
+
+# CHECK: fld %st(6)
+0xd9,0xc6
+
+# CHECK: fld %st(7)
+0xd9,0xc7
+
+# CHECK: fxch %st(0)
+0xd9,0xc8
+
+# CHECK: fxch %st(1)
+0xd9,0xc9
+
+# CHECK: fxch %st(2)
+0xd9,0xca
+
+# CHECK: fxch %st(3)
+0xd9,0xcb
+
+# CHECK: fxch %st(4)
+0xd9,0xcc
+
+# CHECK: fxch %st(5)
+0xd9,0xcd
+
+# CHECK: fxch %st(6)
+0xd9,0xce
+
+# CHECK: fxch %st(7)
+0xd9,0xcf
+
+# CHECK: fnop
+0xd9,0xd0
+
+# CHECK: fchs
+0xd9,0xe0
+
+# CHECK: fabs
+0xd9,0xe1
+
+# CHECK: ftst
+0xd9,0xe4
+
+# CHECK: fxam
+0xd9,0xe5
+
+# CHECK: fld1
+0xd9,0xe8
+
+# CHECK: fldl2t
+0xd9,0xe9
+
+# CHECK: fldl2e
+0xd9,0xea
+
+# CHECK: fldpi
+0xd9,0xeb
+
+# CHECK: fldlg2
+0xd9,0xec
+
+# CHECK: fldln2
+0xd9,0xed
+
+# CHECK: fldz
+0xd9,0xee
+
+# CHECK: f2xm1
+0xd9,0xf0
+
+# CHECK: fyl2x
+0xd9,0xf1
+
+# CHECK: fptan
+0xd9,0xf2
+
+# CHECK: fpatan
+0xd9,0xf3
+
+# CHECK: fxtract
+0xd9,0xf4
+
+# CHECK: fprem1
+0xd9,0xf5
+
+# CHECK: fdecstp
+0xd9,0xf6
+
+# CHECK: fincstp
+0xd9,0xf7
+
+# CHECK: fprem
+0xd9,0xf8
+
+# CHECK: fyl2xp1
+0xd9,0xf9
+
+# CHECK: fsqrt
+0xd9,0xfa
+
+# CHECK: fsincos
+0xd9,0xfb
+
+# CHECK: frndint
+0xd9,0xfc
+
+# CHECK: fscale
+0xd9,0xfd
+
+# CHECK: fsin
+0xd9,0xfe
+
+# CHECK: fcos
+0xd9,0xff
+
+# CHECK: fcmovb %st(0), %st(0)
+0xda,0xc0
+
+# CHECK: fcmovb %st(1), %st(0)
+0xda,0xc1
+
+# CHECK: fcmovb %st(2), %st(0)
+0xda,0xc2
+
+# CHECK: fcmovb %st(3), %st(0)
+0xda,0xc3
+
+# CHECK: fcmovb %st(4), %st(0)
+0xda,0xc4
+
+# CHECK: fcmovb %st(5), %st(0)
+0xda,0xc5
+
+# CHECK: fcmovb %st(6), %st(0)
+0xda,0xc6
+
+# CHECK: fcmovb %st(7), %st(0)
+0xda,0xc7
+
+# CHECK: fcmove %st(0), %st(0)
+0xda,0xc8
+
+# CHECK: fcmove %st(1), %st(0)
+0xda,0xc9
+
+# CHECK: fcmove %st(2), %st(0)
+0xda,0xca
+
+# CHECK: fcmove %st(3), %st(0)
+0xda,0xcb
+
+# CHECK: fcmove %st(4), %st(0)
+0xda,0xcc
+
+# CHECK: fcmove %st(5), %st(0)
+0xda,0xcd
+
+# CHECK: fcmove %st(6), %st(0)
+0xda,0xce
+
+# CHECK: fcmove %st(7), %st(0)
+0xda,0xcf
+
+# CHECK: fcmovbe %st(0), %st(0)
+0xda,0xd0
+
+# CHECK: fcmovbe %st(1), %st(0)
+0xda,0xd1
+
+# CHECK: fcmovbe %st(2), %st(0)
+0xda,0xd2
+
+# CHECK: fcmovbe %st(3), %st(0)
+0xda,0xd3
+
+# CHECK: fcmovbe %st(4), %st(0)
+0xda,0xd4
+
+# CHECK: fcmovbe %st(5), %st(0)
+0xda,0xd5
+
+# CHECK: fcmovbe %st(6), %st(0)
+0xda,0xd6
+
+# CHECK: fcmovbe %st(7), %st(0)
+0xda,0xd7
+
+# CHECK: fcmovu %st(0), %st(0)
+0xda,0xd8
+
+# CHECK: fcmovu %st(1), %st(0)
+0xda,0xd9
+
+# CHECK: fcmovu %st(2), %st(0)
+0xda,0xda
+
+# CHECK: fcmovu %st(3), %st(0)
+0xda,0xdb
+
+# CHECK: fcmovu %st(4), %st(0)
+0xda,0xdc
+
+# CHECK: fcmovu %st(5), %st(0)
+0xda,0xdd
+
+# CHECK: fcmovu %st(6), %st(0)
+0xda,0xde
+
+# CHECK: fcmovu %st(7), %st(0)
+0xda,0xdf
+
+# CHECK: fucompp
+0xda,0xe9
+
+# CHECK: fcmovnb %st(0), %st(0)
+0xdb,0xc0
+
+# CHECK: fcmovnb %st(1), %st(0)
+0xdb,0xc1
+
+# CHECK: fcmovnb %st(2), %st(0)
+0xdb,0xc2
+
+# CHECK: fcmovnb %st(3), %st(0)
+0xdb,0xc3
+
+# CHECK: fcmovnb %st(4), %st(0)
+0xdb,0xc4
+
+# CHECK: fcmovnb %st(5), %st(0)
+0xdb,0xc5
+
+# CHECK: fcmovnb %st(6), %st(0)
+0xdb,0xc6
+
+# CHECK: fcmovnb %st(7), %st(0)
+0xdb,0xc7
+
+# CHECK: fcmovne %st(0), %st(0)
+0xdb,0xc8
+
+# CHECK: fcmovne %st(1), %st(0)
+0xdb,0xc9
+
+# CHECK: fcmovne %st(2), %st(0)
+0xdb,0xca
+
+# CHECK: fcmovne %st(3), %st(0)
+0xdb,0xcb
+
+# CHECK: fcmovne %st(4), %st(0)
+0xdb,0xcc
+
+# CHECK: fcmovne %st(5), %st(0)
+0xdb,0xcd
+
+# CHECK: fcmovne %st(6), %st(0)
+0xdb,0xce
+
+# CHECK: fcmovne %st(7), %st(0)
+0xdb,0xcf
+
+# CHECK: fcmovnbe %st(0), %st(0)
+0xdb,0xd0
+
+# CHECK: fcmovnbe %st(1), %st(0)
+0xdb,0xd1
+
+# CHECK: fcmovnbe %st(2), %st(0)
+0xdb,0xd2
+
+# CHECK: fcmovnbe %st(3), %st(0)
+0xdb,0xd3
+
+# CHECK: fcmovnbe %st(4), %st(0)
+0xdb,0xd4
+
+# CHECK: fcmovnbe %st(5), %st(0)
+0xdb,0xd5
+
+# CHECK: fcmovnbe %st(6), %st(0)
+0xdb,0xd6
+
+# CHECK: fcmovnbe %st(7), %st(0)
+0xdb,0xd7
+
+# CHECK: fcmovnu %st(0), %st(0)
+0xdb,0xd8
+
+# CHECK: fcmovnu %st(1), %st(0)
+0xdb,0xd9
+
+# CHECK: fcmovnu %st(2), %st(0)
+0xdb,0xda
+
+# CHECK: fcmovnu %st(3), %st(0)
+0xdb,0xdb
+
+# CHECK: fcmovnu %st(4), %st(0)
+0xdb,0xdc
+
+# CHECK: fcmovnu %st(5), %st(0)
+0xdb,0xdd
+
+# CHECK: fcmovnu %st(6), %st(0)
+0xdb,0xde
+
+# CHECK: fcmovnu %st(7), %st(0)
+0xdb,0xdf
+
+# CHECK: fnclex
+0xdb,0xe2
+
+# CHECK: fninit
+0xdb,0xe3
+
+# CHECK: fucomi %st(0)
+0xdb,0xe8
+
+# CHECK: fucomi %st(1)
+0xdb,0xe9
+
+# CHECK: fucomi %st(2)
+0xdb,0xea
+
+# CHECK: fucomi %st(3)
+0xdb,0xeb
+
+# CHECK: fucomi %st(4)
+0xdb,0xec
+
+# CHECK: fucomi %st(5)
+0xdb,0xed
+
+# CHECK: fucomi %st(6)
+0xdb,0xee
+
+# CHECK: fucomi %st(7)
+0xdb,0xef
+
+# CHECK: fcomi %st(0)
+0xdb,0xf0
+
+# CHECK: fcomi %st(1)
+0xdb,0xf1
+
+# CHECK: fcomi %st(2)
+0xdb,0xf2
+
+# CHECK: fcomi %st(3)
+0xdb,0xf3
+
+# CHECK: fcomi %st(4)
+0xdb,0xf4
+
+# CHECK: fcomi %st(5)
+0xdb,0xf5
+
+# CHECK: fcomi %st(6)
+0xdb,0xf6
+
+# CHECK: fcomi %st(7)
+0xdb,0xf7
+
+# CHECK: fadd %st(0), %st(0)
+0xdc,0xc0
+
+# CHECK: fadd %st(0), %st(1)
+0xdc,0xc1
+
+# CHECK: fadd %st(0), %st(2)
+0xdc,0xc2
+
+# CHECK: fadd %st(0), %st(3)
+0xdc,0xc3
+
+# CHECK: fadd %st(0), %st(4)
+0xdc,0xc4
+
+# CHECK: fadd %st(0), %st(5)
+0xdc,0xc5
+
+# CHECK: fadd %st(0), %st(6)
+0xdc,0xc6
+
+# CHECK: fadd %st(0), %st(7)
+0xdc,0xc7
+
+# CHECK: fmul %st(0), %st(0)
+0xdc,0xc8
+
+# CHECK: fmul %st(0), %st(1)
+0xdc,0xc9
+
+# CHECK: fmul %st(0), %st(2)
+0xdc,0xca
+
+# CHECK: fmul %st(0), %st(3)
+0xdc,0xcb
+
+# CHECK: fmul %st(0), %st(4)
+0xdc,0xcc
+
+# CHECK: fmul %st(0), %st(5)
+0xdc,0xcd
+
+# CHECK: fmul %st(0), %st(6)
+0xdc,0xce
+
+# CHECK: fmul %st(0), %st(7)
+0xdc,0xcf
+
+# CHECK: fsub %st(0), %st(0)
+0xdc,0xe0
+
+# CHECK: fsub %st(0), %st(1)
+0xdc,0xe1
+
+# CHECK: fsub %st(0), %st(2)
+0xdc,0xe2
+
+# CHECK: fsub %st(0), %st(3)
+0xdc,0xe3
+
+# CHECK: fsub %st(0), %st(4)
+0xdc,0xe4
+
+# CHECK: fsub %st(0), %st(5)
+0xdc,0xe5
+
+# CHECK: fsub %st(0), %st(6)
+0xdc,0xe6
+
+# CHECK: fsub %st(0), %st(7)
+0xdc,0xe7
+
+# CHECK: fsubr %st(0), %st(0)
+0xdc,0xe8
+
+# CHECK: fsubr %st(0), %st(1)
+0xdc,0xe9
+
+# CHECK: fsubr %st(0), %st(2)
+0xdc,0xea
+
+# CHECK: fsubr %st(0), %st(3)
+0xdc,0xeb
+
+# CHECK: fsubr %st(0), %st(4)
+0xdc,0xec
+
+# CHECK: fsubr %st(0), %st(5)
+0xdc,0xed
+
+# CHECK: fsubr %st(0), %st(6)
+0xdc,0xee
+
+# CHECK: fsubr %st(0), %st(7)
+0xdc,0xef
+
+# CHECK: fdiv %st(0), %st(0)
+0xdc,0xf0
+
+# CHECK: fdiv %st(0), %st(1)
+0xdc,0xf1
+
+# CHECK: fdiv %st(0), %st(2)
+0xdc,0xf2
+
+# CHECK: fdiv %st(0), %st(3)
+0xdc,0xf3
+
+# CHECK: fdiv %st(0), %st(4)
+0xdc,0xf4
+
+# CHECK: fdiv %st(0), %st(5)
+0xdc,0xf5
+
+# CHECK: fdiv %st(0), %st(6)
+0xdc,0xf6
+
+# CHECK: fdiv %st(0), %st(7)
+0xdc,0xf7
+
+# CHECK: fdivr %st(0), %st(0)
+0xdc,0xf8
+
+# CHECK: fdivr %st(0), %st(1)
+0xdc,0xf9
+
+# CHECK: fdivr %st(0), %st(2)
+0xdc,0xfa
+
+# CHECK: fdivr %st(0), %st(3)
+0xdc,0xfb
+
+# CHECK: fdivr %st(0), %st(4)
+0xdc,0xfc
+
+# CHECK: fdivr %st(0), %st(5)
+0xdc,0xfd
+
+# CHECK: fdivr %st(0), %st(6)
+0xdc,0xfe
+
+# CHECK: fdivr %st(0), %st(7)
+0xdc,0xff
+
+# CHECK: ffree %st(0)
+0xdd,0xc0
+
+# CHECK: ffree %st(1)
+0xdd,0xc1
+
+# CHECK: ffree %st(2)
+0xdd,0xc2
+
+# CHECK: ffree %st(3)
+0xdd,0xc3
+
+# CHECK: ffree %st(4)
+0xdd,0xc4
+
+# CHECK: ffree %st(5)
+0xdd,0xc5
+
+# CHECK: ffree %st(6)
+0xdd,0xc6
+
+# CHECK: ffree %st(7)
+0xdd,0xc7
+
+# CHECK: fst %st(0)
+0xdd,0xd0
+
+# CHECK: fst %st(1)
+0xdd,0xd1
+
+# CHECK: fst %st(2)
+0xdd,0xd2
+
+# CHECK: fst %st(3)
+0xdd,0xd3
+
+# CHECK: fst %st(4)
+0xdd,0xd4
+
+# CHECK: fst %st(5)
+0xdd,0xd5
+
+# CHECK: fst %st(6)
+0xdd,0xd6
+
+# CHECK: fst %st(7)
+0xdd,0xd7
+
+# CHECK: fstp %st(0)
+0xdd,0xd8
+
+# CHECK: fstp %st(1)
+0xdd,0xd9
+
+# CHECK: fstp %st(2)
+0xdd,0xda
+
+# CHECK: fstp %st(3)
+0xdd,0xdb
+
+# CHECK: fstp %st(4)
+0xdd,0xdc
+
+# CHECK: fstp %st(5)
+0xdd,0xdd
+
+# CHECK: fstp %st(6)
+0xdd,0xde
+
+# CHECK: fstp %st(7)
+0xdd,0xdf
+
+# CHECK: fucom %st(0)
+0xdd,0xe0
+
+# CHECK: fucom %st(1)
+0xdd,0xe1
+
+# CHECK: fucom %st(2)
+0xdd,0xe2
+
+# CHECK: fucom %st(3)
+0xdd,0xe3
+
+# CHECK: fucom %st(4)
+0xdd,0xe4
+
+# CHECK: fucom %st(5)
+0xdd,0xe5
+
+# CHECK: fucom %st(6)
+0xdd,0xe6
+
+# CHECK: fucom %st(7)
+0xdd,0xe7
+
+# CHECK: fucomp %st(0)
+0xdd,0xe8
+
+# CHECK: fucomp %st(1)
+0xdd,0xe9
+
+# CHECK: fucomp %st(2)
+0xdd,0xea
+
+# CHECK: fucomp %st(3)
+0xdd,0xeb
+
+# CHECK: fucomp %st(4)
+0xdd,0xec
+
+# CHECK: fucomp %st(5)
+0xdd,0xed
+
+# CHECK: fucomp %st(6)
+0xdd,0xee
+
+# CHECK: fucomp %st(7)
+0xdd,0xef
+
+# CHECK: faddp %st(0)
+0xde,0xc0
+
+# CHECK: faddp %st(1)
+0xde,0xc1
+
+# CHECK: faddp %st(2)
+0xde,0xc2
+
+# CHECK: faddp %st(3)
+0xde,0xc3
+
+# CHECK: faddp %st(4)
+0xde,0xc4
+
+# CHECK: faddp %st(5)
+0xde,0xc5
+
+# CHECK: faddp %st(6)
+0xde,0xc6
+
+# CHECK: faddp %st(7)
+0xde,0xc7
+
+# CHECK: fmulp %st(0)
+0xde,0xc8
+
+# CHECK: fmulp %st(1)
+0xde,0xc9
+
+# CHECK: fmulp %st(2)
+0xde,0xca
+
+# CHECK: fmulp %st(3)
+0xde,0xcb
+
+# CHECK: fmulp %st(4)
+0xde,0xcc
+
+# CHECK: fmulp %st(5)
+0xde,0xcd
+
+# CHECK: fmulp %st(6)
+0xde,0xce
+
+# CHECK: fmulp %st(7)
+0xde,0xcf
+
+# CHECK: fcompp
+0xde,0xd9
+
+# CHECK: fsubp %st(0)
+0xde,0xe0
+
+# CHECK: fsubp %st(1)
+0xde,0xe1
+
+# CHECK: fsubp %st(2)
+0xde,0xe2
+
+# CHECK: fsubp %st(3)
+0xde,0xe3
+
+# CHECK: fsubp %st(4)
+0xde,0xe4
+
+# CHECK: fsubp %st(5)
+0xde,0xe5
+
+# CHECK: fsubp %st(6)
+0xde,0xe6
+
+# CHECK: fsubp %st(7)
+0xde,0xe7
+
+# CHECK: fsubrp %st(0)
+0xde,0xe8
+
+# CHECK: fsubrp %st(1)
+0xde,0xe9
+
+# CHECK: fsubrp %st(2)
+0xde,0xea
+
+# CHECK: fsubrp %st(3)
+0xde,0xeb
+
+# CHECK: fsubrp %st(4)
+0xde,0xec
+
+# CHECK: fsubrp %st(5)
+0xde,0xed
+
+# CHECK: fsubrp %st(6)
+0xde,0xee
+
+# CHECK: fsubrp %st(7)
+0xde,0xef
+
+# CHECK: fdivp %st(0)
+0xde,0xf0
+
+# CHECK: fdivp %st(1)
+0xde,0xf1
+
+# CHECK: fdivp %st(2)
+0xde,0xf2
+
+# CHECK: fdivp %st(3)
+0xde,0xf3
+
+# CHECK: fdivp %st(4)
+0xde,0xf4
+
+# CHECK: fdivp %st(5)
+0xde,0xf5
+
+# CHECK: fdivp %st(6)
+0xde,0xf6
+
+# CHECK: fdivp %st(7)
+0xde,0xf7
+
+# CHECK: fdivrp %st(0)
+0xde,0xf8
+
+# CHECK: fdivrp %st(1)
+0xde,0xf9
+
+# CHECK: fdivrp %st(2)
+0xde,0xfa
+
+# CHECK: fdivrp %st(3)
+0xde,0xfb
+
+# CHECK: fdivrp %st(4)
+0xde,0xfc
+
+# CHECK: fdivrp %st(5)
+0xde,0xfd
+
+# CHECK: fdivrp %st(6)
+0xde,0xfe
+
+# CHECK: fdivrp %st(7)
+0xde,0xff
+
+# CHECK: ffreep %st(0)
+0xdf,0xc0
+
+# CHECK: ffreep %st(1)
+0xdf,0xc1
+
+# CHECK: ffreep %st(2)
+0xdf,0xc2
+
+# CHECK: ffreep %st(3)
+0xdf,0xc3
+
+# CHECK: ffreep %st(4)
+0xdf,0xc4
+
+# CHECK: ffreep %st(5)
+0xdf,0xc5
+
+# CHECK: ffreep %st(6)
+0xdf,0xc6
+
+# CHECK: ffreep %st(7)
+0xdf,0xc7
+
+# CHECK: fnstsw %ax
+0xdf,0xe0
+
+# CHECK: fucompi %st(0)
+0xdf,0xe8
+
+# CHECK: fucompi %st(1)
+0xdf,0xe9
+
+# CHECK: fucompi %st(2)
+0xdf,0xea
+
+# CHECK: fucompi %st(3)
+0xdf,0xeb
+
+# CHECK: fucompi %st(4)
+0xdf,0xec
+
+# CHECK: fucompi %st(5)
+0xdf,0xed
+
+# CHECK: fucompi %st(6)
+0xdf,0xee
+
+# CHECK: fucompi %st(7)
+0xdf,0xef
+
+# CHECK: fcompi %st(0)
+0xdf,0xf0
+
+# CHECK: fcompi %st(1)
+0xdf,0xf1
+
+# CHECK: fcompi %st(2)
+0xdf,0xf2
+
+# CHECK: fcompi %st(3)
+0xdf,0xf3
+
+# CHECK: fcompi %st(4)
+0xdf,0xf4
+
+# CHECK: fcompi %st(5)
+0xdf,0xf5
+
+# CHECK: fcompi %st(6)
+0xdf,0xf6
+
+# CHECK: fcompi %st(7)
+0xdf,0xf7
diff --git a/test/MC/Disassembler/X86/x86-16.txt b/test/MC/Disassembler/X86/x86-16.txt
index 021cb2371812..407b695ef636 100644
--- a/test/MC/Disassembler/X86/x86-16.txt
+++ b/test/MC/Disassembler/X86/x86-16.txt
@@ -1,790 +1,793 @@
-# RUN: llvm-mc --disassemble %s -triple=i686-linux-gnu-code16 | FileCheck %s
-
-# CHECK: movl $305419896, %ebx
-0x66 0xbb 0x78 0x56 0x34 0x12
-
-# CHECK: pause
-0xf3 0x90
-
-# CHECK: sfence
-0x0f 0xae 0xf8
-
-# CHECK: lfence
-0x0f 0xae 0xe8
-
-# CHECK: mfence
-0x0f 0xae 0xf0
-
-# CHECK: stgi
-0x0f 0x01 0xdc
-
-# CHECK: clgi
-0x0f 0x01 0xdd
-
-# CHECK: rdtscp
-0x0f 0x01 0xf9
-
-# CHECK: movl %eax, 16(%ebp)
-0x67 0x66 0x89 0x45 0x10
-
-# CHECK: movl %eax, -16(%ebp)
-0x67 0x66 0x89 0x45 0xf0
-
-# CHECK: testb %cl, %bl
-0x84 0xcb
-
-# CHECK: cmpl %eax, %ebx
-0x66 0x39 0xc3
-
-# CHECK: addw %ax, %ax
-0x01 0xc0
-
-# CHECK: shrl %eax
-0x66 0xd1 0xe8
-
-# CHECK: shll %eax
-0x66 0xd1 0xe0
-
-# CHECK: shll %eax
-0x66 0xd1 0xe0
-
-# CHECK: movb 0, %al
-0xa0 0x00 0x00
-
-# CHECK: movw 0, %ax
-0xa1 0x00 0x00
-
-# CHECK: movl 0, %eax
-0x66 0xa1 0x00 0x00
-
-# CHECK: into
-0xce
-
-# CHECK: int3
-0xcc
-
-# CHECK: int $4
-0xcd 0x04
-
-# CHECK: int $127
-0xcd 0x7f
-
-# CHECK: pushfw
-0x9c
-
-# CHECK: pushfl
-0x66 0x9c
-
-# CHECK: popfw
-0x9d
-
-# CHECK: popfl
-0x66 0x9d
-
-# CHECK: retl
-0x66 0xc3
-
-# CHECK: cmoval %eax, %edx
-0x66 0x0f 0x47 0xd0
-
-# CHECK: cmovael %eax, %edx
-0x66 0x0f 0x43 0xd0
-
-# CHECK: cmovbel %eax, %edx
-0x66 0x0f 0x46 0xd0
-
-# CHECK: cmovbl %eax, %edx
-0x66 0x0f 0x42 0xd0
-
-# CHECK: cmovbw %bx, %bx
-0x0f 0x42 0xdb
-
-# CHECK: cmovbel %eax, %edx
-0x66 0x0f 0x46 0xd0
-
-# CHECK: cmovbl %eax, %edx
-0x66 0x0f 0x42 0xd0
-
-# CHECK: cmovel %eax, %edx
-0x66 0x0f 0x44 0xd0
-
-# CHECK: cmovgl %eax, %edx
-0x66 0x0f 0x4f 0xd0
-
-# CHECK: cmovgel %eax, %edx
-0x66 0x0f 0x4d 0xd0
-
-# CHECK: cmovll %eax, %edx
-0x66 0x0f 0x4c 0xd0
-
-# CHECK: cmovlel %eax, %edx
-0x66 0x0f 0x4e 0xd0
-
-# CHECK: cmovbel %eax, %edx
-0x66 0x0f 0x46 0xd0
-
-# CHECK: cmovnel %eax, %edx
-0x66 0x0f 0x45 0xd0
-
-# CHECK: cmovael %eax, %edx
-0x66 0x0f 0x43 0xd0
-
-# CHECK: cmoval %eax, %edx
-0x66 0x0f 0x47 0xd0
-
-# CHECK: cmovael %eax, %edx
-0x66 0x0f 0x43 0xd0
-
-# CHECK: cmovnel %eax, %edx
-0x66 0x0f 0x45 0xd0
-
-# CHECK: cmovlel %eax, %edx
-0x66 0x0f 0x4e 0xd0
-
-# CHECK: cmovgel %eax, %edx
-0x66 0x0f 0x4d 0xd0
-
-# CHECK: cmovnel %eax, %edx
-0x66 0x0f 0x45 0xd0
-
-# CHECK: cmovlel %eax, %edx
-0x66 0x0f 0x4e 0xd0
-
-# CHECK: cmovll %eax, %edx
-0x66 0x0f 0x4c 0xd0
-
-# CHECK: cmovgel %eax, %edx
-0x66 0x0f 0x4d 0xd0
-
-# CHECK: cmovgl %eax, %edx
-0x66 0x0f 0x4f 0xd0
-
-# CHECK: cmovnol %eax, %edx
-0x66 0x0f 0x41 0xd0
-
-# CHECK: cmovnpl %eax, %edx
-0x66 0x0f 0x4b 0xd0
-
-# CHECK: cmovnsl %eax, %edx
-0x66 0x0f 0x49 0xd0
-
-# CHECK: cmovnel %eax, %edx
-0x66 0x0f 0x45 0xd0
-
-# CHECK: cmovol %eax, %edx
-0x66 0x0f 0x40 0xd0
-
-# CHECK: cmovpl %eax, %edx
-0x66 0x0f 0x4a 0xd0
-
-# CHECK: cmovsl %eax, %edx
-0x66 0x0f 0x48 0xd0
-
-# CHECK: cmovel %eax, %edx
-0x66 0x0f 0x44 0xd0
-
-# CHECK: fmul %st(0)
-0xd8 0xc8
-
-# CHECK: fadd %st(0)
-0xd8 0xc0
-
-# CHECK: fsub %st(0)
-0xd8 0xe0
-
-# CHECK: fsubr %st(0)
-0xd8 0xe8
-
-# CHECK: fdivr %st(0)
-0xd8 0xf8
-
-# CHECK: fdiv %st(0)
-0xd8 0xf0
-
-# CHECK: movl %cs, %eax
-0x66 0x8c 0xc8
-
-# CHECK: movw %cs, %ax
-0x8c 0xc8
-
-# CHECK: movl %cs, (%eax)
-0x67 0x66 0x8c 0x08
-
-# CHECK: movw %cs, (%eax)
-0x67 0x8c 0x08
-
-# CHECK: movl %eax, %cs
-0x66 0x8e 0xc8
-
-# CHECK: movl (%eax), %cs
-0x67 0x66 0x8e 0x08
-
-# CHECK: movw (%eax), %cs
-0x67 0x8e 0x08
-
-# CHECKX: movl %cr0, %eax
-0x0f 0x20 0xc0
-
-# CHECKX: movl %cr1, %eax
-0x0f 0x20 0xc8
-
-# CHECKX: movl %cr2, %eax
-0x0f 0x20 0xd0
-
-# CHECKX: movl %cr3, %eax
-0x0f 0x20 0xd8
-
-# CHECKX: movl %cr4, %eax
-0x0f 0x20 0xe0
-
-# CHECKX: movl %dr0, %eax
-0x0f 0x21 0xc0
-
-# CHECKX: movl %dr1, %eax
-0x0f 0x21 0xc8
-
-# CHECKX: movl %dr1, %eax
-0x0f 0x21 0xc8
-
-# CHECKX: movl %dr2, %eax
-0x0f 0x21 0xd0
-
-# CHECKX: movl %dr3, %eax
-0x0f 0x21 0xd8
-
-# CHECKX: movl %dr4, %eax
-0x0f 0x21 0xe0
-
-# CHECKX: movl %dr5, %eax
-0x0f 0x21 0xe8
-
-# CHECKX: movl %dr6, %eax
-0x0f 0x21 0xf0
-
-# CHECKX: movl %dr7, %eax
-0x0f 0x21 0xf8
-
-# CHECK: wait
-0x9b
-
-# CHECK: movl %gs:124, %eax
-0x65 0x66 0x8b 0x06 0x7c 0x00
-
-# CHECK: pushaw
-0x60
-
-# CHECK: popaw
-0x61
-
-# CHECK: pushaw
-0x60
-
-# CHECK: popaw
-0x61
-
-# CHECK: pushal
-0x66 0x60
-
-# CHECK: popal
-0x66 0x61
-
-# CHECK: jmpw *8(%eax)
-0x67 0xff 0x60 0x08
-
-# CHECK: jmpl *8(%eax)
-0x67 0x66 0xff 0x60 0x08
-
-# CHECK: lcalll $2, $4660
-0x66 0x9a 0x34 0x12 0x00 0x00 0x02 0x00
-
-# CHECK: jcxz
-0xe3 0x00
-
-# CHECK: jecxz
-0x67 0xe3 0x00
-
-# CHECK: iretw
-0xcf
-
-# CHECK: iretw
-0xcf
-
-# CHECK: iretl
-0x66 0xcf
-
-# CHECK: sysretl
-0x0f 0x07
-
-# CHECK: sysretl
-0x0f 0x07
-
-# CHECK: testl -24(%ebp), %ecx
-0x67 0x66 0x85 0x4d 0xe8
-
-# CHECK: testl -24(%ebp), %ecx
-0x67 0x66 0x85 0x4d 0xe8
-
-# CHECK: pushw %cs
-0x0e
-
-# CHECK: pushw %ds
-0x1e
-
-# CHECK: pushw %ss
-0x16
-
-# CHECK: pushw %es
-0x06
-
-# CHECK: pushw %fs
-0x0f 0xa0
-
-# CHECK: pushw %gs
-0x0f 0xa8
-
-# CHECK: pushw %cs
-0x0e
-
-# CHECK: pushw %ds
-0x1e
-
-# CHECK: pushw %ss
-0x16
-
-# CHECK: pushw %es
-0x06
-
-# CHECK: pushw %fs
-0x0f 0xa0
-
-# CHECK: pushw %gs
-0x0f 0xa8
-
-# CHECK: pushl %cs
-0x66 0x0e
-
-# CHECK: pushl %ds
-0x66 0x1e
-
-# CHECK: pushl %ss
-0x66 0x16
-
-# CHECK: pushl %es
-0x66 0x06
-
-# CHECK: pushl %fs
-0x66 0x0f 0xa0
-
-# CHECK: pushl %gs
-0x66 0x0f 0xa8
-
-# CHECK: popw %ss
-0x17
-
-# CHECK: popw %ds
-0x1f
-
-# CHECK: popw %es
-0x07
-
-# CHECK: popl %ss
-0x66 0x17
-
-# CHECK: popl %ds
-0x66 0x1f
-
-# CHECK: popl %es
-0x66 0x07
-
-# CHECK: pushfl
-0x66 0x9c
-
-# CHECK: popfl
-0x66 0x9d
-
-# CHECK: pushfl
-0x66 0x9c
-
-# CHECK: popfl
-0x66 0x9d
-
-# CHECK: setb %bl
-0x0f 0x92 0xc3
-
-# CHECK: setb %bl
-0x0f 0x92 0xc3
-
-# CHECK: setae %bl
-0x0f 0x93 0xc3
-
-# CHECK: setae %bl
-0x0f 0x93 0xc3
-
-# CHECK: setbe %bl
-0x0f 0x96 0xc3
-
-# CHECK: seta %bl
-0x0f 0x97 0xc3
-
-# CHECK: setp %bl
-0x0f 0x9a 0xc3
-
-# CHECK: setnp %bl
-0x0f 0x9b 0xc3
-
-# CHECK: setl %bl
-0x0f 0x9c 0xc3
-
-# CHECK: setge %bl
-0x0f 0x9d 0xc3
-
-# CHECK: setle %bl
-0x0f 0x9e 0xc3
-
-# CHECK: setg %bl
-0x0f 0x9f 0xc3
-
-# CHECK: setne %cl
-0x0f 0x95 0xc1
-
-# CHECK: setb %bl
-0x0f 0x92 0xc3
-
-# CHECK: setb %bl
-0x0f 0x92 0xc3
-
-# CHECK: lcalll $31438, $31438
-0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
-
-# CHECK: lcalll $31438, $31438
-0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
-
-# CHECK: ljmpl $31438, $31438
-0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
-
-# CHECK: ljmpl $31438, $31438
-0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
-
-# CHECK: lcallw $31438, $31438
-0x9a 0xce 0x7a 0xce 0x7a
-
-# CHECK: lcallw $31438, $31438
-0x9a 0xce 0x7a 0xce 0x7a
-
-# CHECK: ljmpw $31438, $31438
-0xea 0xce 0x7a 0xce 0x7a
-
-# CHECK: ljmpw $31438, $31438
-0xea 0xce 0x7a 0xce 0x7a
-
-# CHECK: lcallw $31438, $31438
-0x9a 0xce 0x7a 0xce 0x7a
-
-# CHECK: lcallw $31438, $31438
-0x9a 0xce 0x7a 0xce 0x7a
-
-# CHECK: ljmpw $31438, $31438
-0xea 0xce 0x7a 0xce 0x7a
-
-# CHECK: ljmpw $31438, $31438
-0xea 0xce 0x7a 0xce 0x7a
-
-# CHECK: calll
-0x66 0xe8 0x00 0x00 0x00 0x00
-
-# CHECK: callw
-0xe8 0x00 0x00
-
-# CHECK: incb %al
-0xfe 0xc0
-
-# CHECK: incw %ax
-0x40
-
-# CHECK: incl %eax
-0x66 0x40
-
-# CHECK: decb %al
-0xfe 0xc8
-
-# CHECK: decw %ax
-0x48
-
-# CHECK: decl %eax
-0x66 0x48
-
-# CHECK: pshufw $14, %mm4, %mm0
-0x0f 0x70 0xc4 0x0e
-
-# CHECK: pshufw $90, %mm4, %mm0
-0x0f 0x70 0xc4 0x5a
-
-# CHECK: aaa
-0x37
-
-# CHECK: aad $1
-0xd5 0x01
-
-# CHECK: aad
-0xd5 0x0a
-
-# CHECK: aad
-0xd5 0x0a
-
-# CHECK: aam $2
-0xd4 0x02
-
-# CHECK: aam
-0xd4 0x0a
-
-# CHECK: aam
-0xd4 0x0a
-
-# CHECK: aas
-0x3f
-
-# CHECK: daa
-0x27
-
-# CHECK: das
-0x2f
-
-# CHECK: retw $31438
-0xc2 0xce 0x7a
-
-# CHECK: lretw $31438
-0xca 0xce 0x7a
-
-# CHECK: retw $31438
-0xc2 0xce 0x7a
-
-# CHECK: lretw $31438
-0xca 0xce 0x7a
-
-# CHECK: retl $31438
-0x66 0xc2 0xce 0x7a
-
-# CHECK: lretl $31438
-0x66 0xca 0xce 0x7a
-
-# CHECK: bound 2(%eax), %bx
-0x67 0x62 0x58 0x02
-
-# CHECK: bound 4(%ebx), %ecx
-0x67 0x66 0x62 0x4b 0x04
-
-# CHECK: arpl %bx, %bx
-0x63 0xdb
-
-# CHECK: arpl %bx, 6(%ecx)
-0x67 0x63 0x59 0x06
-
-# CHECK: lgdtw 4(%eax)
-0x67 0x0f 0x01 0x50 0x04
-
-# CHECK: lgdtw 4(%eax)
-0x67 0x0f 0x01 0x50 0x04
-
-# CHECK: lgdtl 4(%eax)
-0x67 0x66 0x0f 0x01 0x50 0x04
-
-# CHECK: lidtw 4(%eax)
-0x67 0x0f 0x01 0x58 0x04
-
-# CHECK: lidtw 4(%eax)
-0x67 0x0f 0x01 0x58 0x04
-
-# CHECK: lidtl 4(%eax)
-0x67 0x66 0x0f 0x01 0x58 0x04
-
-# CHECK: sgdtw 4(%eax)
-0x67 0x0f 0x01 0x40 0x04
-
-# CHECK: sgdtw 4(%eax)
-0x67 0x0f 0x01 0x40 0x04
-
-# CHECK: sgdtl 4(%eax)
-0x67 0x66 0x0f 0x01 0x40 0x04
-
-# CHECK: sidtw 4(%eax)
-0x67 0x0f 0x01 0x48 0x04
-
-# CHECK: sidtw 4(%eax)
-0x67 0x0f 0x01 0x48 0x04
-
-# CHECK: sidtl 4(%eax)
-0x67 0x66 0x0f 0x01 0x48 0x04
-
-# CHECK: fcompi %st(2)
-0xdf 0xf2
-
-# CHECK: fcompi %st(2)
-0xdf 0xf2
-
-# CHECK: fcompi %st(1)
-0xdf 0xf1
-
-# CHECK: fucompi %st(2)
-0xdf 0xea
-
-# CHECK: fucompi %st(2)
-0xdf 0xea
-
-# CHECK: fucompi %st(1)
-0xdf 0xe9
-
-# CHECK: fldcw 32493
-0xd9 0x2e 0xed 0x7e
-
-# CHECK: fldcw 32493
-0xd9 0x2e 0xed 0x7e
-
-# CHECK: fnstcw 32493
-0xd9 0x3e 0xed 0x7e
-
-# CHECK: fnstcw 32493
-0xd9 0x3e 0xed 0x7e
-
-# CHECK: wait
-0x9b
-
-# CHECK: fnstcw 32493
-0xd9 0x3e 0xed 0x7e
-
-# CHECK: wait
-0x9b
-
-# CHECK: fnstcw 32493
-0xd9 0x3e 0xed 0x7e
-
-# CHECK: fnstsw 32493
-0xdd 0x3e 0xed 0x7e
-
-# CHECK: fnstsw 32493
-0xdd 0x3e 0xed 0x7e
-
-# CHECK: wait
-0x9b
-
-# CHECK: fnstsw 32493
-0xdd 0x3e 0xed 0x7e
-
-# CHECK: wait
-0x9b
-
-# CHECK: fnstsw 32493
-0xdd 0x3e 0xed 0x7e
-
-# CHECK: verr 32493
-0x0f 0x00 0x26 0xed 0x7e
-
-# CHECK: verr 32493
-0x0f 0x00 0x26 0xed 0x7e
-
-# CHECK: wait
-0x9b
-
-# CHECK: fnclex
-0xdb 0xe2
-
-# CHECK: fnclex
-0xdb 0xe2
-
-# CHECK: ud2
-0x0f 0x0b
-
-# CHECK: ud2
-0x0f 0x0b
-
-# CHECK: ud2b
-0x0f 0xb9
-
-# CHECK: loope
-0xe1 0x00
-
-# CHECK: loopne
-0xe0 0x00
-
-# CHECK: outsb
-0x6e
-
-# CHECK: outsw
-0x6f
-
-# CHECK: outsl
-0x66 0x6f
-
-# CHECK: insb
-0x6c
-
-# CHECK: insw
-0x6d
-
-# CHECK: insl
-0x66 0x6d
-
-# CHECK: movsb
-0xa4
-
-# CHECK: movsw
-0xa5
-
-# CHECK: movsl
-0x66 0xa5
-
-# CHECK: lodsb
-0xac
-
-# CHECK: lodsw
-0xad
-
-# CHECK: lodsl
-0x66 0xad
-
-# CHECK: stosb
-0xaa
-
-# CHECK: stosw
-0xab
-
-# CHECK: stosl
-0x66 0xab
-
-# CHECK: strw %ax
-0x0f 0x00 0xc8
-
-# CHECK: strl %eax
-0x66 0x0f 0x00 0xc8
-
-# CHECK: fsubp %st(1)
-0xde 0xe1
-
-# CHECK: fsubp %st(2)
-0xde 0xe2
-
-# CHECKX: nop
-0x66 0x90
-
-# CHECKX: nop
-0x90
-
-# CHECK: xchgl %ecx, %eax
-0x66 0x91
-
-# CHECK: xchgl %ecx, %eax
-0x66 0x91
-
-# CHECK: retw
-0xc3
-
-# CHECK: retl
-0x66 0xc3
-
-# CHECK: lretw
-0xcb
-
-# CHECK: lretl
-0x66 0xcb
-
-# CHECK: callw -1
-0xe8 0xff 0xff
+# RUN: llvm-mc --disassemble %s -triple=i686-linux-gnu-code16 | FileCheck %s
+
+# CHECK: movl $305419896, %ebx
+0x66 0xbb 0x78 0x56 0x34 0x12
+
+# CHECK: pause
+0xf3 0x90
+
+# CHECK: sfence
+0x0f 0xae 0xf8
+
+# CHECK: lfence
+0x0f 0xae 0xe8
+
+# CHECK: mfence
+0x0f 0xae 0xf0
+
+# CHECK: stgi
+0x0f 0x01 0xdc
+
+# CHECK: clgi
+0x0f 0x01 0xdd
+
+# CHECK: rdtscp
+0x0f 0x01 0xf9
+
+# CHECK: movl %eax, 16(%ebp)
+0x67 0x66 0x89 0x45 0x10
+
+# CHECK: movl %eax, -16(%ebp)
+0x67 0x66 0x89 0x45 0xf0
+
+# CHECK: testb %cl, %bl
+0x84 0xcb
+
+# CHECK: cmpl %eax, %ebx
+0x66 0x39 0xc3
+
+# CHECK: addw %ax, %ax
+0x01 0xc0
+
+# CHECK: shrl %eax
+0x66 0xd1 0xe8
+
+# CHECK: shll %eax
+0x66 0xd1 0xe0
+
+# CHECK: shll %eax
+0x66 0xd1 0xe0
+
+# CHECK: movb 0, %al
+0xa0 0x00 0x00
+
+# CHECK: movw 0, %ax
+0xa1 0x00 0x00
+
+# CHECK: movl 0, %eax
+0x66 0xa1 0x00 0x00
+
+# CHECK: into
+0xce
+
+# CHECK: int3
+0xcc
+
+# CHECK: int $4
+0xcd 0x04
+
+# CHECK: int $127
+0xcd 0x7f
+
+# CHECK: pushfw
+0x9c
+
+# CHECK: pushfl
+0x66 0x9c
+
+# CHECK: popfw
+0x9d
+
+# CHECK: popfl
+0x66 0x9d
+
+# CHECK: retl
+0x66 0xc3
+
+# CHECK: cmoval %eax, %edx
+0x66 0x0f 0x47 0xd0
+
+# CHECK: cmovael %eax, %edx
+0x66 0x0f 0x43 0xd0
+
+# CHECK: cmovbel %eax, %edx
+0x66 0x0f 0x46 0xd0
+
+# CHECK: cmovbl %eax, %edx
+0x66 0x0f 0x42 0xd0
+
+# CHECK: cmovbw %bx, %bx
+0x0f 0x42 0xdb
+
+# CHECK: cmovbel %eax, %edx
+0x66 0x0f 0x46 0xd0
+
+# CHECK: cmovbl %eax, %edx
+0x66 0x0f 0x42 0xd0
+
+# CHECK: cmovel %eax, %edx
+0x66 0x0f 0x44 0xd0
+
+# CHECK: cmovgl %eax, %edx
+0x66 0x0f 0x4f 0xd0
+
+# CHECK: cmovgel %eax, %edx
+0x66 0x0f 0x4d 0xd0
+
+# CHECK: cmovll %eax, %edx
+0x66 0x0f 0x4c 0xd0
+
+# CHECK: cmovlel %eax, %edx
+0x66 0x0f 0x4e 0xd0
+
+# CHECK: cmovbel %eax, %edx
+0x66 0x0f 0x46 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovael %eax, %edx
+0x66 0x0f 0x43 0xd0
+
+# CHECK: cmoval %eax, %edx
+0x66 0x0f 0x47 0xd0
+
+# CHECK: cmovael %eax, %edx
+0x66 0x0f 0x43 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovlel %eax, %edx
+0x66 0x0f 0x4e 0xd0
+
+# CHECK: cmovgel %eax, %edx
+0x66 0x0f 0x4d 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovlel %eax, %edx
+0x66 0x0f 0x4e 0xd0
+
+# CHECK: cmovll %eax, %edx
+0x66 0x0f 0x4c 0xd0
+
+# CHECK: cmovgel %eax, %edx
+0x66 0x0f 0x4d 0xd0
+
+# CHECK: cmovgl %eax, %edx
+0x66 0x0f 0x4f 0xd0
+
+# CHECK: cmovnol %eax, %edx
+0x66 0x0f 0x41 0xd0
+
+# CHECK: cmovnpl %eax, %edx
+0x66 0x0f 0x4b 0xd0
+
+# CHECK: cmovnsl %eax, %edx
+0x66 0x0f 0x49 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovol %eax, %edx
+0x66 0x0f 0x40 0xd0
+
+# CHECK: cmovpl %eax, %edx
+0x66 0x0f 0x4a 0xd0
+
+# CHECK: cmovsl %eax, %edx
+0x66 0x0f 0x48 0xd0
+
+# CHECK: cmovel %eax, %edx
+0x66 0x0f 0x44 0xd0
+
+# CHECK: fmul %st(0)
+0xd8 0xc8
+
+# CHECK: fadd %st(0)
+0xd8 0xc0
+
+# CHECK: fsub %st(0)
+0xd8 0xe0
+
+# CHECK: fsubr %st(0)
+0xd8 0xe8
+
+# CHECK: fdivr %st(0)
+0xd8 0xf8
+
+# CHECK: fdiv %st(0)
+0xd8 0xf0
+
+# CHECK: movl %cs, %eax
+0x66 0x8c 0xc8
+
+# CHECK: movw %cs, %ax
+0x8c 0xc8
+
+# CHECK: movl %cs, (%eax)
+0x67 0x66 0x8c 0x08
+
+# CHECK: movw %cs, (%eax)
+0x67 0x8c 0x08
+
+# CHECK: movl %eax, %cs
+0x66 0x8e 0xc8
+
+# CHECK: movl (%eax), %cs
+0x67 0x66 0x8e 0x08
+
+# CHECK: movw (%eax), %cs
+0x67 0x8e 0x08
+
+# CHECKX: movl %cr0, %eax
+0x0f 0x20 0xc0
+
+# CHECKX: movl %cr1, %eax
+0x0f 0x20 0xc8
+
+# CHECKX: movl %cr2, %eax
+0x0f 0x20 0xd0
+
+# CHECKX: movl %cr3, %eax
+0x0f 0x20 0xd8
+
+# CHECKX: movl %cr4, %eax
+0x0f 0x20 0xe0
+
+# CHECKX: movl %dr0, %eax
+0x0f 0x21 0xc0
+
+# CHECKX: movl %dr1, %eax
+0x0f 0x21 0xc8
+
+# CHECKX: movl %dr1, %eax
+0x0f 0x21 0xc8
+
+# CHECKX: movl %dr2, %eax
+0x0f 0x21 0xd0
+
+# CHECKX: movl %dr3, %eax
+0x0f 0x21 0xd8
+
+# CHECKX: movl %dr4, %eax
+0x0f 0x21 0xe0
+
+# CHECKX: movl %dr5, %eax
+0x0f 0x21 0xe8
+
+# CHECKX: movl %dr6, %eax
+0x0f 0x21 0xf0
+
+# CHECKX: movl %dr7, %eax
+0x0f 0x21 0xf8
+
+# CHECK: wait
+0x9b
+
+# CHECK: movl %gs:124, %eax
+0x65 0x66 0x8b 0x06 0x7c 0x00
+
+# CHECK: pushaw
+0x60
+
+# CHECK: popaw
+0x61
+
+# CHECK: pushaw
+0x60
+
+# CHECK: popaw
+0x61
+
+# CHECK: pushal
+0x66 0x60
+
+# CHECK: popal
+0x66 0x61
+
+# CHECK: jmpw *8(%eax)
+0x67 0xff 0x60 0x08
+
+# CHECK: jmpl *8(%eax)
+0x67 0x66 0xff 0x60 0x08
+
+# CHECK: lcalll $2, $4660
+0x66 0x9a 0x34 0x12 0x00 0x00 0x02 0x00
+
+# CHECK: jcxz
+0xe3 0x00
+
+# CHECK: jecxz
+0x67 0xe3 0x00
+
+# CHECK: iretw
+0xcf
+
+# CHECK: iretw
+0xcf
+
+# CHECK: iretl
+0x66 0xcf
+
+# CHECK: sysretl
+0x0f 0x07
+
+# CHECK: sysretl
+0x0f 0x07
+
+# CHECK: testl -24(%ebp), %ecx
+0x67 0x66 0x85 0x4d 0xe8
+
+# CHECK: testl -24(%ebp), %ecx
+0x67 0x66 0x85 0x4d 0xe8
+
+# CHECK: pushw %cs
+0x0e
+
+# CHECK: pushw %ds
+0x1e
+
+# CHECK: pushw %ss
+0x16
+
+# CHECK: pushw %es
+0x06
+
+# CHECK: pushw %fs
+0x0f 0xa0
+
+# CHECK: pushw %gs
+0x0f 0xa8
+
+# CHECK: pushw %cs
+0x0e
+
+# CHECK: pushw %ds
+0x1e
+
+# CHECK: pushw %ss
+0x16
+
+# CHECK: pushw %es
+0x06
+
+# CHECK: pushw %fs
+0x0f 0xa0
+
+# CHECK: pushw %gs
+0x0f 0xa8
+
+# CHECK: pushl %cs
+0x66 0x0e
+
+# CHECK: pushl %ds
+0x66 0x1e
+
+# CHECK: pushl %ss
+0x66 0x16
+
+# CHECK: pushl %es
+0x66 0x06
+
+# CHECK: pushl %fs
+0x66 0x0f 0xa0
+
+# CHECK: pushl %gs
+0x66 0x0f 0xa8
+
+# CHECK: popw %ss
+0x17
+
+# CHECK: popw %ds
+0x1f
+
+# CHECK: popw %es
+0x07
+
+# CHECK: popl %ss
+0x66 0x17
+
+# CHECK: popl %ds
+0x66 0x1f
+
+# CHECK: popl %es
+0x66 0x07
+
+# CHECK: pushfl
+0x66 0x9c
+
+# CHECK: popfl
+0x66 0x9d
+
+# CHECK: pushfl
+0x66 0x9c
+
+# CHECK: popfl
+0x66 0x9d
+
+# CHECK: salc
+0xd6
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: setae %bl
+0x0f 0x93 0xc3
+
+# CHECK: setae %bl
+0x0f 0x93 0xc3
+
+# CHECK: setbe %bl
+0x0f 0x96 0xc3
+
+# CHECK: seta %bl
+0x0f 0x97 0xc3
+
+# CHECK: setp %bl
+0x0f 0x9a 0xc3
+
+# CHECK: setnp %bl
+0x0f 0x9b 0xc3
+
+# CHECK: setl %bl
+0x0f 0x9c 0xc3
+
+# CHECK: setge %bl
+0x0f 0x9d 0xc3
+
+# CHECK: setle %bl
+0x0f 0x9e 0xc3
+
+# CHECK: setg %bl
+0x0f 0x9f 0xc3
+
+# CHECK: setne %cl
+0x0f 0x95 0xc1
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: lcalll $31438, $31438
+0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: lcalll $31438, $31438
+0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: ljmpl $31438, $31438
+0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: ljmpl $31438, $31438
+0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: calll
+0x66 0xe8 0x00 0x00 0x00 0x00
+
+# CHECK: callw
+0xe8 0x00 0x00
+
+# CHECK: incb %al
+0xfe 0xc0
+
+# CHECK: incw %ax
+0x40
+
+# CHECK: incl %eax
+0x66 0x40
+
+# CHECK: decb %al
+0xfe 0xc8
+
+# CHECK: decw %ax
+0x48
+
+# CHECK: decl %eax
+0x66 0x48
+
+# CHECK: pshufw $14, %mm4, %mm0
+0x0f 0x70 0xc4 0x0e
+
+# CHECK: pshufw $90, %mm4, %mm0
+0x0f 0x70 0xc4 0x5a
+
+# CHECK: aaa
+0x37
+
+# CHECK: aad $1
+0xd5 0x01
+
+# CHECK: aad
+0xd5 0x0a
+
+# CHECK: aad
+0xd5 0x0a
+
+# CHECK: aam $2
+0xd4 0x02
+
+# CHECK: aam
+0xd4 0x0a
+
+# CHECK: aam
+0xd4 0x0a
+
+# CHECK: aas
+0x3f
+
+# CHECK: daa
+0x27
+
+# CHECK: das
+0x2f
+
+# CHECK: retw $31438
+0xc2 0xce 0x7a
+
+# CHECK: lretw $31438
+0xca 0xce 0x7a
+
+# CHECK: retw $31438
+0xc2 0xce 0x7a
+
+# CHECK: lretw $31438
+0xca 0xce 0x7a
+
+# CHECK: retl $31438
+0x66 0xc2 0xce 0x7a
+
+# CHECK: lretl $31438
+0x66 0xca 0xce 0x7a
+
+# CHECK: bound 2(%eax), %bx
+0x67 0x62 0x58 0x02
+
+# CHECK: bound 4(%ebx), %ecx
+0x67 0x66 0x62 0x4b 0x04
+
+# CHECK: arpl %bx, %bx
+0x63 0xdb
+
+# CHECK: arpl %bx, 6(%ecx)
+0x67 0x63 0x59 0x06
+
+# CHECK: lgdtw 4(%eax)
+0x67 0x0f 0x01 0x50 0x04
+
+# CHECK: lgdtw 4(%eax)
+0x67 0x0f 0x01 0x50 0x04
+
+# CHECK: lgdtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x50 0x04
+
+# CHECK: lidtw 4(%eax)
+0x67 0x0f 0x01 0x58 0x04
+
+# CHECK: lidtw 4(%eax)
+0x67 0x0f 0x01 0x58 0x04
+
+# CHECK: lidtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x58 0x04
+
+# CHECK: sgdtw 4(%eax)
+0x67 0x0f 0x01 0x40 0x04
+
+# CHECK: sgdtw 4(%eax)
+0x67 0x0f 0x01 0x40 0x04
+
+# CHECK: sgdtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x40 0x04
+
+# CHECK: sidtw 4(%eax)
+0x67 0x0f 0x01 0x48 0x04
+
+# CHECK: sidtw 4(%eax)
+0x67 0x0f 0x01 0x48 0x04
+
+# CHECK: sidtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x48 0x04
+
+# CHECK: fcompi %st(2)
+0xdf 0xf2
+
+# CHECK: fcompi %st(2)
+0xdf 0xf2
+
+# CHECK: fcompi %st(1)
+0xdf 0xf1
+
+# CHECK: fucompi %st(2)
+0xdf 0xea
+
+# CHECK: fucompi %st(2)
+0xdf 0xea
+
+# CHECK: fucompi %st(1)
+0xdf 0xe9
+
+# CHECK: fldcw 32493
+0xd9 0x2e 0xed 0x7e
+
+# CHECK: fldcw 32493
+0xd9 0x2e 0xed 0x7e
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: verr 32493
+0x0f 0x00 0x26 0xed 0x7e
+
+# CHECK: verr 32493
+0x0f 0x00 0x26 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnclex
+0xdb 0xe2
+
+# CHECK: fnclex
+0xdb 0xe2
+
+# CHECK: ud2
+0x0f 0x0b
+
+# CHECK: ud2
+0x0f 0x0b
+
+# CHECK: ud2b
+0x0f 0xb9
+
+# CHECK: loope
+0xe1 0x00
+
+# CHECK: loopne
+0xe0 0x00
+
+# CHECK: outsb
+0x6e
+
+# CHECK: outsw
+0x6f
+
+# CHECK: outsl
+0x66 0x6f
+
+# CHECK: insb
+0x6c
+
+# CHECK: insw
+0x6d
+
+# CHECK: insl
+0x66 0x6d
+
+# CHECK: movsb
+0xa4
+
+# CHECK: movsw
+0xa5
+
+# CHECK: movsl
+0x66 0xa5
+
+# CHECK: lodsb
+0xac
+
+# CHECK: lodsw
+0xad
+
+# CHECK: lodsl
+0x66 0xad
+
+# CHECK: stosb
+0xaa
+
+# CHECK: stosw
+0xab
+
+# CHECK: stosl
+0x66 0xab
+
+# CHECK: strw %ax
+0x0f 0x00 0xc8
+
+# CHECK: strl %eax
+0x66 0x0f 0x00 0xc8
+
+# CHECK: fsubp %st(1)
+0xde 0xe1
+
+# CHECK: fsubp %st(2)
+0xde 0xe2
+
+# CHECKX: nop
+0x66 0x90
+
+# CHECKX: nop
+0x90
+
+# CHECK: xchgl %ecx, %eax
+0x66 0x91
+
+# CHECK: xchgl %ecx, %eax
+0x66 0x91
+
+# CHECK: retw
+0xc3
+
+# CHECK: retl
+0x66 0xc3
+
+# CHECK: lretw
+0xcb
+
+# CHECK: lretl
+0x66 0xcb
+
+# CHECK: callw -1
+0xe8 0xff 0xff
diff --git a/test/MC/Disassembler/X86/x86-32.txt b/test/MC/Disassembler/X86/x86-32.txt
index f92d6057877b..9dd49e51d91b 100644
--- a/test/MC/Disassembler/X86/x86-32.txt
+++ b/test/MC/Disassembler/X86/x86-32.txt
@@ -129,6 +129,9 @@
# CHECK: invlpga
0x0f 0x01 0xdf
+# CHECK: clzero
+0x0f,0x01,0xfc
+
# CHECK: movl $0, -4(%ebp)
0xc7 0x45 0xfc 0x00 0x00 0x00 0x00
@@ -517,9 +520,6 @@
# CHECK: clwb (%eax)
0x66 0x0f 0xae 0x30
-# CHECK: pcommit
-0x66 0x0f 0xae 0xf8
-
# CHECK: vcvtph2ps %xmm0, %xmm0
0xc4 0xe2 0x79 0x13 0xc0
diff --git a/test/MC/Disassembler/X86/x86-64.txt b/test/MC/Disassembler/X86/x86-64.txt
index 13e36df002a4..de62b0ff1d77 100644
--- a/test/MC/Disassembler/X86/x86-64.txt
+++ b/test/MC/Disassembler/X86/x86-64.txt
@@ -185,10 +185,10 @@
# CHECK: sha1msg2 (%rax), %xmm2
0x0f 0x38 0xca 0x10
-# CHECK: sha256rnds2 (%rax), %xmm2
+# CHECK: sha256rnds2 %xmm0, (%rax), %xmm2
0x0f 0x38 0xcb 0x10
-# CHECK: sha256rnds2 %xmm1, %xmm2
+# CHECK: sha256rnds2 %xmm0, %xmm1, %xmm2
0x0f 0x38 0xcb 0xd1
# CHECK: sha256msg1 %xmm1, %xmm2
diff --git a/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s b/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s
index eb364755c4d7..7dc656d5a4d7 100644
--- a/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s
+++ b/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s
@@ -7,3 +7,7 @@
// CHECK: .type TYPE #32
// CHECK: ^
+ // For ARM, the comment character is '@', so we don't list '@<type>' as a
+ // valid option.
+ .section "foo", "a", @progbits
+// CHECK: error: expected '%<type>' or "<type>"
diff --git a/test/MC/ELF/gen-dwarf.s b/test/MC/ELF/gen-dwarf.s
index 4e773c79af28..e00580926827 100644
--- a/test/MC/ELF/gen-dwarf.s
+++ b/test/MC/ELF/gen-dwarf.s
@@ -3,7 +3,8 @@
// RUN: llvm-mc -g -dwarf-version 2 -triple i686-pc-linux-gnu %s -filetype=asm -o - | FileCheck --check-prefix=ASM --check-prefix=DWARF2 %s
// RUN: llvm-mc -g -dwarf-version 3 -triple i686-pc-linux-gnu %s -filetype=asm -o - | FileCheck --check-prefix=ASM --check-prefix=DWARF3 %s
// RUN: llvm-mc -g -triple i686-pc-linux-gnu %s -filetype=asm -o - | FileCheck --check-prefix=ASM --check-prefix=DWARF4 %s
-// RUN: not llvm-mc -g -dwarf-version 5 -triple i686-pc-linux-gnu %s -filetype=asm -o - 2>&1 | FileCheck --check-prefix=DWARF5 %s
+// RUN: llvm-mc -g -dwarf-version 5 -triple i686-pc-linux-gnu %s -filetype=asm -o - 2>&1 | FileCheck --check-prefix=DWARF5 %s
+// RUN: not llvm-mc -g -dwarf-version 6 -triple i686-pc-linux-gnu %s -filetype=asm -o - 2>&1 | FileCheck --check-prefix=DWARF6 %s
// Test that on ELF:
@@ -34,8 +35,9 @@ foo:
// ASM: .section .debug_info
// ASM: .section .debug_abbrev
-// ASM-NEXT: .Lsection_abbrev:
// ASM-NEXT: [[ABBREV_LABEL:.Ltmp[0-9]+]]
+// DWARF5: .section .debug_abbrev
+// DWARF5-NEXT: [[ABBREV_LABEL:.Ltmp[0-9]+]]
// Second instance of the section has the CU
// ASM: .section .debug_info
@@ -44,6 +46,11 @@ foo:
// DWARF3: .short 3
// DWARF4: .short 4
// ASM-NEXT: .long [[ABBREV_LABEL]]
+// DWARF5: .short 5
+// DWARF5-NEXT: .byte 1
+// DWARF5-NEXT: .byte 4
+// DWARF5-NEXT: .long [[ABBREV_LABEL]]
+
// First .byte 1 is the abbreviation number for the compile_unit abbrev
// ASM: .byte 1
// ASM-NEXT: .long [[LINE_LABEL:.L[a-z0-9]+]]
@@ -52,4 +59,4 @@ foo:
// ASM-NEXT: [[LINE_LABEL]]
// DWARF1: Dwarf version 1 is not supported.
-// DWARF5: Dwarf version 5 is not supported.
+// DWARF6: Dwarf version 6 is not supported.
diff --git a/test/MC/ELF/section-metadata-err1.s b/test/MC/ELF/section-metadata-err1.s
new file mode 100644
index 000000000000..682f0e82f30d
--- /dev/null
+++ b/test/MC/ELF/section-metadata-err1.s
@@ -0,0 +1,5 @@
+// RUN: not llvm-mc -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+// CHECK: error: symbol is not in a section: foo
+
+ .section .shf_metadata,"ao",@progbits,foo
diff --git a/test/MC/ELF/section-metadata-err2.s b/test/MC/ELF/section-metadata-err2.s
new file mode 100644
index 000000000000..1912f67d0e0f
--- /dev/null
+++ b/test/MC/ELF/section-metadata-err2.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+// CHECK: error: symbol is not in a section: foo
+
+ .quad foo
+ .section .shf_metadata,"ao",@progbits,foo
diff --git a/test/MC/ELF/section-metadata-err3.s b/test/MC/ELF/section-metadata-err3.s
new file mode 100644
index 000000000000..388ca377fd42
--- /dev/null
+++ b/test/MC/ELF/section-metadata-err3.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+// CHECK: error: symbol is not in a section: foo
+
+ foo = 42
+ .section .shf_metadata,"ao",@progbits,foo
diff --git a/test/MC/ELF/section-metadata-err4.s b/test/MC/ELF/section-metadata-err4.s
new file mode 100644
index 000000000000..d7677d292f70
--- /dev/null
+++ b/test/MC/ELF/section-metadata-err4.s
@@ -0,0 +1,5 @@
+// RUN: not llvm-mc -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+// CHECK: error: expected metadata symbol
+
+ .section .shf_metadata,"ao",@progbits
diff --git a/test/MC/ELF/section-numeric-invalid-type.s b/test/MC/ELF/section-numeric-invalid-type.s
new file mode 100644
index 000000000000..3ae071bc7c13
--- /dev/null
+++ b/test/MC/ELF/section-numeric-invalid-type.s
@@ -0,0 +1,14 @@
+// RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux-gnu %s -o - \
+// RUN: | llvm-readobj -s -t | FileCheck --check-prefix=OBJ %s
+
+// RUN: not llvm-mc -filetype=asm -triple=x86_64-pc-linux-gnu %s -o - 2>&1 \
+// RUN: | FileCheck --check-prefix=ASM %s
+
+ .section .sec,"a",@0x7fffffff
+
+// OBJ: Section {
+// OBJ: Name: .sec
+// OBJ-NEXT: Type: (0x7FFFFFFF)
+// OBJ: }
+
+// ASM: unsupported type 0x7fffffff for section .sec
diff --git a/test/MC/ELF/section-numeric-type.s b/test/MC/ELF/section-numeric-type.s
new file mode 100644
index 000000000000..2e51bd4eb187
--- /dev/null
+++ b/test/MC/ELF/section-numeric-type.s
@@ -0,0 +1,20 @@
+// RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux-gnu %s -o - \
+// RUN: | llvm-readobj -s -t | FileCheck --check-prefix=OBJ %s
+
+// RUN: llvm-mc -filetype=asm -triple=x86_64-pc-linux-gnu %s -o - \
+// RUN: | FileCheck --check-prefix=ASM %s
+
+ .section .sec1,"a",@0x70000001
+ .section .sec2,"a",@1879048193
+
+// OBJ: Section {
+// OBJ: Name: .sec1
+// OBJ-NEXT: Type: SHT_X86_64_UNWIND (0x70000001)
+// OBJ: }
+// OBJ: Section {
+// OBJ: Name: .sec2
+// OBJ-NEXT: Type: SHT_X86_64_UNWIND (0x70000001)
+// OBJ: }
+
+// ASM: .section .sec1,"a",@unwind
+// ASM: .section .sec2,"a",@unwind
diff --git a/test/MC/ELF/section-sym-err.s b/test/MC/ELF/section-sym-err.s
new file mode 100644
index 000000000000..789fee7c422c
--- /dev/null
+++ b/test/MC/ELF/section-sym-err.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t.o 2>&1 | FileCheck %s
+
+.section foo
+foo:
+
+// CHECK: error: invalid symbol redefinition
diff --git a/test/MC/ELF/section-sym-err2.s b/test/MC/ELF/section-sym-err2.s
new file mode 100644
index 000000000000..27d8e9a9ac24
--- /dev/null
+++ b/test/MC/ELF/section-sym-err2.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t.o 2>&1 | FileCheck %s
+
+foo:
+.section foo
+
+// CHECK: error: invalid symbol redefinition
diff --git a/test/MC/ELF/section-sym-redefine.s b/test/MC/ELF/section-sym-redefine.s
deleted file mode 100644
index 1f6dd5723af1..000000000000
--- a/test/MC/ELF/section-sym-redefine.s
+++ /dev/null
@@ -1,138 +0,0 @@
-// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -t -r --expand-relocs | FileCheck %s
-
-// Local symbol overriding section.
-.section x1,"a",@progbits
-.local x1
-.comm x1,4,4
-.long x1 // reloc: .bss + 0
-
-// Section declared after local. Local symbol wins.
-.local x2
-.comm x2,4,4
-.section x2,"a",@progbits
-.long x2 // reloc: .bss + 4
-
-// No overriding symbol.
-.section x3,"a",@progbits
-.long x3 // reloc: x3(section) + 0
-
-// Global vs section.
-.section x4,"a",@progbits
-.long 0
-.globl x4
-.section foo, "a", @progbits
-x4:
-.long 0
-.long x4 // reloc: x4(global) + 0
-
-// Global vs implicit section
-.globl .data
-.data:
-.long 42
-.long .data // reloc: .data(global) + 0
-
-// CHECK: Relocations [
-// CHECK: Section (4) .relax1 {
-// CHECK: Relocation {
-// CHECK: Offset: 0x0
-// CHECK: Type: R_X86_64_32 (10)
-// CHECK: Symbol: .bss (3)
-// CHECK: Addend: 0x0
-// CHECK: }
-// CHECK: }
-// CHECK: Section (7) .relax2 {
-// CHECK: Relocation {
-// CHECK: Offset: 0x0
-// CHECK: Type: R_X86_64_32 (10)
-// CHECK: Symbol: .bss (3)
-// CHECK: Addend: 0x4
-// CHECK: }
-// CHECK: }
-// CHECK: Section (9) .relax3 {
-// CHECK: Relocation {
-// CHECK: Offset: 0x0
-// CHECK: Type: R_X86_64_32 (10)
-// CHECK: Symbol: x3 (4)
-// CHECK: Addend: 0x0
-// CHECK: }
-// CHECK: }
-// CHECK: Section (12) .relafoo {
-// CHECK: Relocation {
-// CHECK: Offset: 0x4
-// CHECK: Type: R_X86_64_32 (10)
-// CHECK: Symbol: x4 (6)
-// CHECK: Addend: 0x0
-// CHECK: }
-// CHECK: Relocation {
-// CHECK: Offset: 0xC
-// CHECK: Type: R_X86_64_32 (10)
-// CHECK: Symbol: .data (5)
-// CHECK: Addend: 0x0
-// CHECK: }
-// CHECK: }
-// CHECK: ]
-// CHECK: Symbols [
-// CHECK: Symbol {
-// CHECK: Name: (0)
-// CHECK: Value: 0x0
-// CHECK: Size: 0
-// CHECK: Binding: Local (0x0)
-// CHECK: Type: None (0x0)
-// CHECK: Other: 0
-// CHECK: Section: Undefined (0x0)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: x1 (67)
-// CHECK: Value: 0x0
-// CHECK: Size: 4
-// CHECK: Binding: Local (0x0)
-// CHECK: Type: Object (0x1)
-// CHECK: Other: 0
-// CHECK: Section: .bss (0x5)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: x2 (59)
-// CHECK: Value: 0x4
-// CHECK: Size: 4
-// CHECK: Binding: Local (0x0)
-// CHECK: Type: Object (0x1)
-// CHECK: Other: 0
-// CHECK: Section: .bss (0x5)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: (0)
-// CHECK: Value: 0x0
-// CHECK: Size: 0
-// CHECK: Binding: Local (0x0)
-// CHECK: Type: Section (0x3)
-// CHECK: Other: 0
-// CHECK: Section: .bss (0x5)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: (0)
-// CHECK: Value: 0x0
-// CHECK: Size: 0
-// CHECK: Binding: Local (0x0)
-// CHECK: Type: Section (0x3)
-// CHECK: Other: 0
-// CHECK: Section: x3 (0x8)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: .data (37)
-// CHECK: Value: 0x8
-// CHECK: Size: 0
-// CHECK: Binding: Global (0x1)
-// CHECK: Type: None (0x0)
-// CHECK: Other: 0
-// CHECK: Section: foo (0xB)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: x4 (43)
-// CHECK: Value: 0x0
-// CHECK: Size: 0
-// CHECK: Binding: Global (0x1)
-// CHECK: Type: None (0x0)
-// CHECK: Other: 0
-// CHECK: Section: foo (0xB)
-// CHECK: }
-// CHECK: ]
diff --git a/test/MC/ELF/section.s b/test/MC/ELF/section.s
index 0277be522361..03a0f22e580b 100644
--- a/test/MC/ELF/section.s
+++ b/test/MC/ELF/section.s
@@ -1,4 +1,5 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s | FileCheck %s
+// RUN: llvm-mc -filetype=asm -triple x86_64-pc-linux-gnu %s -o - | FileCheck %s --check-prefix=ASM
// Test that these names are accepted.
@@ -143,9 +144,126 @@ bar:
// Test that we handle the strings like gas
.section bar-"foo"
-.section "foo"
+.section "fooo"
+
// CHECK: Section {
// CHECK: Name: bar-"foo"
// CHECK: Section {
-// CHECK: Name: foo
+// CHECK: Name: fooo
+
+// Test SHF_LINK_ORDER
+
+.section .shf_metadata_target1, "a"
+ .quad 0
+.section .shf_metadata_target2, "a", @progbits, unique, 1
+.Lshf_metadata_target2_1:
+ .quad 0
+.section .shf_metadata_target2, "a", @progbits, unique, 2
+.Lshf_metadata_target2_2:
+ .quad 0
+
+.section .shf_metadata1,"ao",@progbits,.Lshf_metadata_target2_1
+.section .shf_metadata2,"ao",@progbits,.Lshf_metadata_target2_2
+.section .shf_metadata3,"ao",@progbits,.shf_metadata_target1
+// ASM: .section .shf_metadata1,"ao",@progbits,.Lshf_metadata_target2_1
+// ASM: .section .shf_metadata2,"ao",@progbits,.Lshf_metadata_target2_2
+// ASM: .section .shf_metadata3,"ao",@progbits,.shf_metadata_target1
+
+// CHECK: Section {
+// CHECK: Index: 22
+// CHECK-NEXT: Name: .shf_metadata_target1
+
+// CHECK: Section {
+// CHECK: Index: 23
+// CHECK-NEXT: Name: .shf_metadata_target2
+
+// CHECK: Section {
+// CHECK: Index: 24
+// CHECK-NEXT: Name: .shf_metadata_target2
+
+// CHECK: Section {
+// CHECK: Name: .shf_metadata1
+// CHECK-NEXT: Type: SHT_PROGBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_LINK_ORDER
+// CHECK-NEXT: ]
+// CHECK-NEXT: Address:
+// CHECK-NEXT: Offset:
+// CHECK-NEXT: Size:
+// CHECK-NEXT: Link: 23
+// CHECK-NEXT: Info: 0
+
+// CHECK: Section {
+// CHECK: Name: .shf_metadata2
+// CHECK-NEXT: Type: SHT_PROGBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_LINK_ORDER
+// CHECK-NEXT: ]
+// CHECK-NEXT: Address:
+// CHECK-NEXT: Offset:
+// CHECK-NEXT: Size:
+// CHECK-NEXT: Link: 24
+// CHECK-NEXT: Info: 0
+
+// CHECK: Section {
+// CHECK: Name: .shf_metadata3
+// CHECK-NEXT: Type: SHT_PROGBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_LINK_ORDER
+// CHECK-NEXT: ]
+// CHECK-NEXT: Address:
+// CHECK-NEXT: Offset:
+// CHECK-NEXT: Size:
+// CHECK-NEXT: Link: 22
+// CHECK-NEXT: Info: 0
+
+.section .text.foo
+// CHECK: Section {
+// CHECK: Name: .text.foo
+// CHECK-NEXT: Type: SHT_PROGBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_EXECINSTR
+// CHECK-NEXT: ]
+
+.section .bss
+// CHECK: Section {
+// CHECK: Name: .bss
+// CHECK-NEXT: Type: SHT_NOBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_WRITE
+// CHECK-NEXT: ]
+
+.section .bss.foo
+// CHECK: Section {
+// CHECK: Name: .bss.foo
+// CHECK-NEXT: Type: SHT_NOBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_WRITE
+// CHECK-NEXT: ]
+
+.section .tbss
+// CHECK: Section {
+// CHECK: Name: .tbss
+// CHECK-NEXT: Type: SHT_NOBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_TLS
+// CHECK-NEXT: SHF_WRITE
+// CHECK-NEXT: ]
+
+.section .tbss.foo
+// CHECK: Section {
+// CHECK: Name: .tbss.foo
+// CHECK-NEXT: Type: SHT_NOBITS
+// CHECK-NEXT: Flags [
+// CHECK-NEXT: SHF_ALLOC
+// CHECK-NEXT: SHF_TLS
+// CHECK-NEXT: SHF_WRITE
+// CHECK-NEXT: ]
diff --git a/test/MC/Hexagon/align.s b/test/MC/Hexagon/align.s
index 01a112392ed4..80cebf125cea 100644
--- a/test/MC/Hexagon/align.s
+++ b/test/MC/Hexagon/align.s
@@ -3,7 +3,7 @@
# Verify that the .align directive emits the proper insn packets.
{ r1 = sub(#1, r1) }
-# CHECK: 76414021 { r1 = sub(#1, r1)
+# CHECK: 76414021 { r1 = sub(#1,r1)
# CHECK-NEXT: 7f004000 nop
# CHECK-NEXT: 7f004000 nop
# CHECK-NEXT: 7f00c000 nop }
@@ -11,8 +11,8 @@
.align 16
{ r1 = sub(#1, r1)
r2 = sub(#1, r2) }
-# CHECK: 76414021 { r1 = sub(#1, r1)
-# CHECK-NEXT: 76424022 r2 = sub(#1, r2)
+# CHECK: 76414021 { r1 = sub(#1,r1)
+# CHECK-NEXT: 76424022 r2 = sub(#1,r2)
# CHECK-NEXT: 7f004000 nop
# CHECK-NEXT: 7f00c000 nop }
@@ -20,7 +20,7 @@
{ r1 = sub(#1, r1)
r2 = sub(#1, r2)
r3 = sub(#1, r3) }
-# CHECK: 76434023 r3 = sub(#1, r3)
+# CHECK: 76434023 r3 = sub(#1,r3)
# CHECK-NEXT: 7f00c000 nop }
.align 16
@@ -33,13 +33,13 @@
# CHECK: 9200c020 { r0 = vextract(v0,r0) }
r0 = vextract(v0, r0)
.align 128
-# CHECK: 76414021 { r1 = sub(#1, r1)
+# CHECK: 76414021 { r1 = sub(#1,r1)
# CHECK-NEXT: 7f00c000 nop }
{ r1 = sub(#1, r1) }
-#CHECK: { r1 = sub(#1, r1)
-#CHECK: r2 = sub(#1, r2)
-#CHECK: r3 = sub(#1, r3) }
+#CHECK: { r1 = sub(#1,r1)
+#CHECK: r2 = sub(#1,r2)
+#CHECK: r3 = sub(#1,r3) }
.falign
.align 8
{ r1 = sub(#1, r1)
@@ -47,14 +47,14 @@ r0 = vextract(v0, r0)
r3 = sub(#1, r3) }
# CHECK: { immext(#0)
-# CHECK: r0 = sub(##1, r0)
+# CHECK: r0 = sub(##1,r0)
# CHECK: immext(#0)
-# CHECK: r1 = sub(##1, r1) }
+# CHECK: r1 = sub(##1,r1) }
# CHECK: { nop
# CHECK: nop
# CHECK: nop }
-# CHECK: { r0 = sub(#1, r0) }
+# CHECK: { r0 = sub(#1,r0) }
{ r0 = sub (##1, r0)
r1 = sub (##1, r1) }
.align 16
-{ r0 = sub (#1, r0) } \ No newline at end of file
+{ r0 = sub (#1, r0) }
diff --git a/test/MC/Hexagon/asmMap.s b/test/MC/Hexagon/asmMap.s
index 81bb8f31f02c..4a2ca2499cc1 100644
--- a/test/MC/Hexagon/asmMap.s
+++ b/test/MC/Hexagon/asmMap.s
@@ -2,607 +2,607 @@
# Make sure that the assembler mapped instructions are being handled correctly.
-#CHECK: 3c56c000 { memw(r22{{ *}}+{{ *}}#0)=#0
+#CHECK: 3c56c000 { memw(r22+#0) = #0
memw(r22)=#0
-#CHECK: 3c23e05f { memh(r3{{ *}}+{{ *}}#0)=#-33
+#CHECK: 3c23e05f { memh(r3+#0) = #-33
memh(r3)=#-33
-#CHECK: 3c07c012 { memb(r7{{ *}}+{{ *}}#0)=#18
+#CHECK: 3c07c012 { memb(r7+#0) = #18
memb(r7)=#18
-#CHECK: 4101c008 { if (p0) r8 = memb(r1{{ *}}+{{ *}}#0)
+#CHECK: 4101c008 { if (p0) r8 = memb(r1+#0)
if (p0) r8=memb(r1)
-#CHECK: 4519d817 { if (!p3) r23 = memb(r25{{ *}}+{{ *}}#0)
+#CHECK: 4519d817 { if (!p3) r23 = memb(r25+#0)
if (!p3) r23=memb(r25)
-#CHECK: 412dc002 { if (p0) r2 = memub(r13{{ *}}+{{ *}}#0)
+#CHECK: 412dc002 { if (p0) r2 = memub(r13+#0)
if (p0) r2=memub(r13)
-#CHECK: 453cc01a { if (!p0) r26 = memub(r28{{ *}}+{{ *}}#0)
+#CHECK: 453cc01a { if (!p0) r26 = memub(r28+#0)
if (!p0) r26=memub(r28)
-#CHECK: 416bc818 { if (p1) r24 = memuh(r11{{ *}}+{{ *}}#0)
+#CHECK: 416bc818 { if (p1) r24 = memuh(r11+#0)
if (p1) r24=memuh(r11)
-#CHECK: 457fc012 { if (!p0) r18 = memuh(r31{{ *}}+{{ *}}#0)
+#CHECK: 457fc012 { if (!p0) r18 = memuh(r31+#0)
if (!p0) r18=memuh(r31)
-#CHECK: 455dc014 { if (!p0) r20 = memh(r29{{ *}}+{{ *}}#0)
+#CHECK: 455dc014 { if (!p0) r20 = memh(r29+#0)
if (!p0) r20=memh(r29)
-#CHECK: 415dc01d { if (p0) r29 = memh(r29{{ *}}+{{ *}}#0)
+#CHECK: 415dc01d { if (p0) r29 = memh(r29+#0)
if (p0) r29=memh(r29)
-#CHECK: 4583c01d { if (!p0) r29 = memw(r3{{ *}}+{{ *}}#0)
+#CHECK: 4583c01d { if (!p0) r29 = memw(r3+#0)
if (!p0) r29=memw(r3)
-#CHECK: 419bd01e { if (p2) r30 = memw(r27{{ *}}+{{ *}}#0)
+#CHECK: 419bd01e { if (p2) r30 = memw(r27+#0)
if (p2) r30=memw(r27)
-#CHECK: 90e2c018 { r25:24 = membh(r2{{ *}}+{{ *}}#0)
+#CHECK: 90e2c018 { r25:24 = membh(r2+#0)
r25:24=membh(r2)
-#CHECK: 902bc006 { r6 = membh(r11{{ *}}+{{ *}}#0)
+#CHECK: 902bc006 { r6 = membh(r11+#0)
r6=membh(r11)
-#CHECK: 90a2c01c { r29:28 = memubh(r2{{ *}}+{{ *}}#0)
+#CHECK: 90a2c01c { r29:28 = memubh(r2+#0)
r29:28=memubh(r2)
-#CHECK: 906ec00d { r13 = memubh(r14{{ *}}+{{ *}}#0)
+#CHECK: 906ec00d { r13 = memubh(r14+#0)
r13=memubh(r14)
-#CHECK: 91dac00c { r13:12 = memd(r26{{ *}}+{{ *}}#0)
+#CHECK: 91dac00c { r13:12 = memd(r26+#0)
r13:12=memd(r26)
-#CHECK: 919bc004 { r4 = memw(r27{{ *}}+{{ *}}#0)
+#CHECK: 919bc004 { r4 = memw(r27+#0)
r4=memw(r27)
-#CHECK: 914cc005 { r5 = memh(r12{{ *}}+{{ *}}#0)
+#CHECK: 914cc005 { r5 = memh(r12+#0)
r5=memh(r12)
-#CHECK: 9176c010 { r16 = memuh(r22{{ *}}+{{ *}}#0)
+#CHECK: 9176c010 { r16 = memuh(r22+#0)
r16=memuh(r22)
-#CHECK: 910bc017 { r23 = memb(r11{{ *}}+{{ *}}#0)
+#CHECK: 910bc017 { r23 = memb(r11+#0)
r23=memb(r11)
-#CHECK: 912bc01b { r27 = memub(r11{{ *}}+{{ *}}#0)
+#CHECK: 912bc01b { r27 = memub(r11+#0)
r27=memub(r11)
-#CHECK: 404ede01 { if (p1) memh(r14{{ *}}+{{ *}}#0) = r30
+#CHECK: 404ede01 { if (p1) memh(r14+#0) = r30
if (p1) memh(r14)=r30
-#CHECK: 4449d900 { if (!p0) memh(r9{{ *}}+{{ *}}#0) = r25
+#CHECK: 4449d900 { if (!p0) memh(r9+#0) = r25
if (!p0) memh(r9)=r25
-#CHECK: 400ecd00 { if (p0) memb(r14{{ *}}+{{ *}}#0) = r13
+#CHECK: 400ecd00 { if (p0) memb(r14+#0) = r13
if (p0) memb(r14)=r13
-#CHECK: 440bcc01 { if (!p1) memb(r11{{ *}}+{{ *}}#0) = r12
+#CHECK: 440bcc01 { if (!p1) memb(r11+#0) = r12
if (!p1) memb(r11)=r12
-#CHECK: 41d0d804 { if (p3) r5:4 = memd(r16{{ *}}+{{ *}}#0)
+#CHECK: 41d0d804 { if (p3) r5:4 = memd(r16+#0)
if (p3) r5:4=memd(r16)
-#CHECK: 45d9c00c { if (!p0) r13:12 = memd(r25{{ *}}+{{ *}}#0)
+#CHECK: 45d9c00c { if (!p0) r13:12 = memd(r25+#0)
if (!p0) r13:12=memd(r25)
-#CHECK: 385ee06d { if (p3) memw(r30{{ *}}+{{ *}}#0)=#-19
+#CHECK: 385ee06d { if (p3) memw(r30+#0) = #-19
if (p3) memw(r30)=#-19
-#CHECK: 38c6c053 { if (!p2) memw(r6{{ *}}+{{ *}}#0)=#19
+#CHECK: 38c6c053 { if (!p2) memw(r6+#0) = #19
if (!p2) memw(r6)=#19
-#CHECK: 381fc034 { if (p1) memb(r31{{ *}}+{{ *}}#0)=#20
+#CHECK: 381fc034 { if (p1) memb(r31+#0) = #20
if (p1) memb(r31)=#20
-#CHECK: 389dc010 { if (!p0) memb(r29{{ *}}+{{ *}}#0)=#16
+#CHECK: 389dc010 { if (!p0) memb(r29+#0) = #16
if (!p0) memb(r29)=#16
-#CHECK: 3833e019 { if (p0) memh(r19{{ *}}+{{ *}}#0)=#-7
+#CHECK: 3833e019 { if (p0) memh(r19+#0) = #-7
if (p0) memh(r19)=#-7
-#CHECK: 38b7c013 { if (!p0) memh(r23{{ *}}+{{ *}}#0)=#19
+#CHECK: 38b7c013 { if (!p0) memh(r23+#0) = #19
if (!p0) memh(r23)=#19
-#CHECK: 4488d401 { if (!p1) memw(r8{{ *}}+{{ *}}#0) = r20
+#CHECK: 4488d401 { if (!p1) memw(r8+#0) = r20
if (!p1) memw(r8)=r20
-#CHECK: 409ddc02 { if (p2) memw(r29{{ *}}+{{ *}}#0) = r28
+#CHECK: 409ddc02 { if (p2) memw(r29+#0) = r28
if (p2) memw(r29)=r28
-#CHECK: 446fc301 { if (!p1) memh(r15{{ *}}+{{ *}}#0) = r3.h
+#CHECK: 446fc301 { if (!p1) memh(r15+#0) = r3.h
if (!p1) memh(r15)=r3.h
-#CHECK: 406dc201 { if (p1) memh(r13{{ *}}+{{ *}}#0) = r2.h
+#CHECK: 406dc201 { if (p1) memh(r13+#0) = r2.h
if (p1) memh(r13)=r2.h
-#CHECK: 40d9c601 { if (p1) memd(r25{{ *}}+{{ *}}#0) = r7:6
+#CHECK: 40d9c601 { if (p1) memd(r25+#0) = r7:6
if (p1) memd(r25)=r7:6
-#CHECK: 44dad803 { if (!p3) memd(r26{{ *}}+{{ *}}#0) = r25:24
+#CHECK: 44dad803 { if (!p3) memd(r26+#0) = r25:24
if (!p3) memd(r26)=r25:24
-#CHECK: 3e21c011 { memh(r1{{ *}}+{{ *}}#0) {{ *}}+={{ *}} r17
+#CHECK: 3e21c011 { memh(r1+#0) += r17
memh(r1)+=r17
-#CHECK: 3e4fc019 { memw(r15{{ *}}+{{ *}}#0) {{ *}}+={{ *}} r25
+#CHECK: 3e4fc019 { memw(r15+#0) += r25
memw(r15)+=r25
-#CHECK: 3e5dc022 { memw(r29{{ *}}+{{ *}}#0) {{ *}}-={{ *}} r2
+#CHECK: 3e5dc022 { memw(r29+#0) -= r2
memw(r29)-=r2
-#CHECK: 3e04c004 { memb(r4{{ *}}+{{ *}}#0) {{ *}}+={{ *}} r4
+#CHECK: 3e04c004 { memb(r4+#0) += r4
memb(r4)+=r4
-#CHECK: 3f53c016 { memw(r19{{ *}}+{{ *}}#0){{ *}}{{ *}}+={{ *}}{{ *}}#22
+#CHECK: 3f53c016 { memw(r19+#0) += #22
memw(r19)+=#22
-#CHECK: 3f24c01e { memh(r4{{ *}}+{{ *}}#0){{ *}}{{ *}}+={{ *}}{{ *}}#30
+#CHECK: 3f24c01e { memh(r4+#0) += #30
memh(r4)+=#30
-#CHECK: 3e27c02d { memh(r7{{ *}}+{{ *}}#0) {{ *}}-={{ *}} r13
+#CHECK: 3e27c02d { memh(r7+#0) -= r13
memh(r7)-=r13
-#CHECK: 3e1ec032 { memb(r30{{ *}}+{{ *}}#0) {{ *}}-={{ *}} r18
+#CHECK: 3e1ec032 { memb(r30+#0) -= r18
memb(r30)-=r18
-#CHECK: 3e49c05b { memw(r9{{ *}}+{{ *}}#0) &= r27
+#CHECK: 3e49c05b { memw(r9+#0) &= r27
memw(r9)&=r27
-#CHECK: 3e2dc040 { memh(r13{{ *}}+{{ *}}#0) &= r0
+#CHECK: 3e2dc040 { memh(r13+#0) &= r0
memh(r13)&=r0
-#CHECK: 3e05c046 { memb(r5{{ *}}+{{ *}}#0) &= r6
+#CHECK: 3e05c046 { memb(r5+#0) &= r6
memb(r5)&=r6
-#CHECK: 3e45c06a { memw(r5{{ *}}+{{ *}}#0) |= r10
+#CHECK: 3e45c06a { memw(r5+#0) |= r10
memw(r5)|=r10
-#CHECK: 3e21c07e { memh(r1{{ *}}+{{ *}}#0) |= r30
+#CHECK: 3e21c07e { memh(r1+#0) |= r30
memh(r1)|=r30
-#CHECK: 3e09c06f { memb(r9{{ *}}+{{ *}}#0) |= r15
+#CHECK: 3e09c06f { memb(r9+#0) |= r15
memb(r9)|=r15
-#CHECK: a157d100 { memh(r23{{ *}}+{{ *}}#0) = r17
+#CHECK: a157d100 { memh(r23+#0) = r17
memh(r23)=r17
-#CHECK: a10fd400 { memb(r15{{ *}}+{{ *}}#0) = r20
+#CHECK: a10fd400 { memb(r15+#0) = r20
memb(r15)=r20
-#CHECK: 9082c014 { r21:20 = memb_fifo(r2{{ *}}+{{ *}}#0)
+#CHECK: 9082c014 { r21:20 = memb_fifo(r2+#0)
r21:20=memb_fifo(r2)
-#CHECK: 9056c01c { r29:28 = memh_fifo(r22{{ *}}+{{ *}}#0)
+#CHECK: 9056c01c { r29:28 = memh_fifo(r22+#0)
r29:28=memh_fifo(r22)
-#CHECK: a1d8ca00 { memd(r24{{ *}}+{{ *}}#0) = r11:10
+#CHECK: a1d8ca00 { memd(r24+#0) = r11:10
memd(r24)=r11:10
-#CHECK: a19ed900 { memw(r30{{ *}}+{{ *}}#0) = r25
+#CHECK: a19ed900 { memw(r30+#0) = r25
memw(r30)=r25
-#CHECK: a169ce00 { memh(r9{{ *}}+{{ *}}#0) = r14.h
+#CHECK: a169ce00 { memh(r9+#0) = r14.h
memh(r9)=r14.h
-#CHECK: 3f07c06b { memb(r7{{ *}}+{{ *}}#0) = setbit(#11)
+#CHECK: 3f07c06b { memb(r7+#0) = setbit(#11)
memb(r7)=setbit(#11)
-#CHECK: 3f34c07b { memh(r20{{ *}}+{{ *}}#0) = setbit(#27)
+#CHECK: 3f34c07b { memh(r20+#0) = setbit(#27)
memh(r20)=setbit(#27)
-#CHECK: 3f1cc032 { memb(r28{{ *}}+{{ *}}#0){{ *}}-={{ *}}#18
+#CHECK: 3f1cc032 { memb(r28+#0) -= #18
memb(r28)-=#18
-#CHECK: 3f29c02a { memh(r9{{ *}}+{{ *}}#0){{ *}}-={{ *}}#10
+#CHECK: 3f29c02a { memh(r9+#0) -= #10
memh(r9)-=#10
-#CHECK: 3f4cc026 { memw(r12{{ *}}+{{ *}}#0){{ *}}-={{ *}}#6
+#CHECK: 3f4cc026 { memw(r12+#0) -= #6
memw(r12)-=#6
-#CHECK: 3f00c00c { memb(r0{{ *}}+{{ *}}#0){{ *}}+={{ *}}#12
+#CHECK: 3f00c00c { memb(r0+#0) += #12
memb(r0)+=#12
-#CHECK: 3f50c07a { memw(r16{{ *}}+{{ *}}#0) = setbit(#26)
+#CHECK: 3f50c07a { memw(r16+#0) = setbit(#26)
memw(r16)=setbit(#26)
-#CHECK: 3f1fc05d { memb(r31{{ *}}+{{ *}}#0) = clrbit(#29)
+#CHECK: 3f1fc05d { memb(r31+#0) = clrbit(#29)
memb(r31)=clrbit(#29)
-#CHECK: 3f20c05e { memh(r0{{ *}}+{{ *}}#0) = clrbit(#30)
+#CHECK: 3f20c05e { memh(r0+#0) = clrbit(#30)
memh(r0)=clrbit(#30)
-#CHECK: 3f42c059 { memw(r2{{ *}}+{{ *}}#0) = clrbit(#25)
+#CHECK: 3f42c059 { memw(r2+#0) = clrbit(#25)
memw(r2)=clrbit(#25)
-#CHECK: 39cfe072 if (!p3.new) memw(r15{{ *}}+{{ *}}#0)=#-14
+#CHECK: 39cfe072 if (!p3.new) memw(r15+#0) = #-14
{
p3=cmp.eq(r5,##-1997506977)
if (!p3.new) memw(r15)=#-14
}
-#CHECK: 3959e06b if (p3.new) memw(r25{{ *}}+{{ *}}#0)=#-21
+#CHECK: 3959e06b if (p3.new) memw(r25+#0) = #-21
{
p3=cmp.eq(r0,##1863618461)
if (p3.new) memw(r25)=#-21
}
-#CHECK: 4312c801 if (p1.new) r1 = memb(r18{{ *}}+{{ *}}#0)
+#CHECK: 4312c801 if (p1.new) r1 = memb(r18+#0)
{
if (p1.new) r1=memb(r18)
p1=cmp.eq(r23,##-1105571618)
}
-#CHECK: 4718d803 if (!p3.new) r3 = memb(r24{{ *}}+{{ *}}#0)
+#CHECK: 4718d803 if (!p3.new) r3 = memb(r24+#0)
{
if (!p3.new) r3=memb(r24)
p3=cmp.eq(r3,##-210870878)
}
-#CHECK: 4326c81b if (p1.new) r27 = memub(r6{{ *}}+{{ *}}#0)
+#CHECK: 4326c81b if (p1.new) r27 = memub(r6+#0)
{
if (p1.new) r27=memub(r6)
p1=cmp.eq(r29,##-188410493)
}
-#CHECK: 473ad00d if (!p2.new) r13 = memub(r26{{ *}}+{{ *}}#0)
+#CHECK: 473ad00d if (!p2.new) r13 = memub(r26+#0)
{
p2=cmp.eq(r30,##-1823852150)
if (!p2.new) r13=memub(r26)
}
-#CHECK: 4785d80e if (!p3.new) r14 = memw(r5{{ *}}+{{ *}}#0)
+#CHECK: 4785d80e if (!p3.new) r14 = memw(r5+#0)
{
if (!p3.new) r14=memw(r5)
p3=cmp.eq(r31,##-228524711)
}
-#CHECK: 438cc81a if (p1.new) r26 = memw(r12{{ *}}+{{ *}}#0)
+#CHECK: 438cc81a if (p1.new) r26 = memw(r12+#0)
{
if (p1.new) r26=memw(r12)
p1=cmp.eq(r11,##-485232313)
}
-#CHECK: 477dc019 if (!p0.new) r25 = memuh(r29{{ *}}+{{ *}}#0)
+#CHECK: 477dc019 if (!p0.new) r25 = memuh(r29+#0)
{
p0=cmp.eq(r23,##127565957)
if (!p0.new) r25=memuh(r29)
}
-#CHECK: 4377c807 if (p1.new) r7 = memuh(r23{{ *}}+{{ *}}#0)
+#CHECK: 4377c807 if (p1.new) r7 = memuh(r23+#0)
{
p1=cmp.eq(r30,##-222020054)
if (p1.new) r7=memuh(r23)
}
-#CHECK: 4754c81c if (!p1.new) r28 = memh(r20{{ *}}+{{ *}}#0)
+#CHECK: 4754c81c if (!p1.new) r28 = memh(r20+#0)
{
p1=cmp.eq(r18,##1159699785)
if (!p1.new) r28=memh(r20)
}
-#CHECK: 435ec01b if (p0.new) r27 = memh(r30{{ *}}+{{ *}}#0)
+#CHECK: 435ec01b if (p0.new) r27 = memh(r30+#0)
{
p0=cmp.eq(r7,##-1114567705)
if (p0.new) r27=memh(r30)
}
-#CHECK: 420dd100 if (p0.new) memb(r13{{ *}}+{{ *}}#0) = r17
+#CHECK: 420dd100 if (p0.new) memb(r13+#0) = r17
{
p0=cmp.eq(r21,##-1458796638)
if (p0.new) memb(r13)=r17
}
-#CHECK: 4601d602 if (!p2.new) memb(r1{{ *}}+{{ *}}#0) = r22
+#CHECK: 4601d602 if (!p2.new) memb(r1+#0) = r22
{
p2=cmp.eq(r20,##-824022439)
if (!p2.new) memb(r1)=r22
}
-#CHECK: 43dcd808 if (p3.new) r9:8 = memd(r28{{ *}}+{{ *}}#0)
+#CHECK: 43dcd808 if (p3.new) r9:8 = memd(r28+#0)
{
p3=cmp.eq(r13,##56660744)
if (p3.new) r9:8=memd(r28)
}
-#CHECK: 47d8c80e if (!p1.new) r15:14 = memd(r24{{ *}}+{{ *}}#0)
+#CHECK: 47d8c80e if (!p1.new) r15:14 = memd(r24+#0)
{
if (!p1.new) r15:14=memd(r24)
p1=cmp.eq(r15,##1536716489)
}
-#CHECK: 3918e045 if (p2.new) memb(r24{{ *}}+{{ *}}#0)=#-27
+#CHECK: 3918e045 if (p2.new) memb(r24+#0) = #-27
{
if (p2.new) memb(r24)=#-27
p2=cmp.eq(r21,##1741091811)
}
-#CHECK: 398fe04d if (!p2.new) memb(r15{{ *}}+{{ *}}#0)=#-19
+#CHECK: 398fe04d if (!p2.new) memb(r15+#0) = #-19
{
if (!p2.new) memb(r15)=#-19
p2=cmp.eq(r15,##779870261)
}
-#CHECK: 3931c04b if (p2.new) memh(r17{{ *}}+{{ *}}#0)=#11
+#CHECK: 3931c04b if (p2.new) memh(r17+#0) = #11
{
if (p2.new) memh(r17)=#11
p2=cmp.eq(r13,##-1171145798)
}
-#CHECK: 39aee056 if (!p2.new) memh(r14{{ *}}+{{ *}}#0)=#-10
+#CHECK: 39aee056 if (!p2.new) memh(r14+#0) = #-10
{
p2=cmp.eq(r23,##-633976762)
if (!p2.new) memh(r14)=#-10
}
-#CHECK: 4692df01 if (!p1.new) memw(r18{{ *}}+{{ *}}#0) = r31
+#CHECK: 4692df01 if (!p1.new) memw(r18+#0) = r31
{
if (!p1.new) memw(r18)=r31
p1=cmp.eq(r11,##-319375732)
}
-#CHECK: 428dc402 if (p2.new) memw(r13{{ *}}+{{ *}}#0) = r4
+#CHECK: 428dc402 if (p2.new) memw(r13+#0) = r4
{
if (p2.new) memw(r13)=r4
p2=cmp.eq(r18,##1895120239)
}
-#CHECK: 4670c300 if (!p0.new) memh(r16{{ *}}+{{ *}}#0) = r3.h
+#CHECK: 4670c300 if (!p0.new) memh(r16+#0) = r3.h
{
p0=cmp.eq(r25,##1348715015)
if (!p0.new) memh(r16)=r3.h
}
-#CHECK: 426ddf02 if (p2.new) memh(r13{{ *}}+{{ *}}#0) = r31.h
+#CHECK: 426ddf02 if (p2.new) memh(r13+#0) = r31.h
{
p2=cmp.eq(r25,##1085560657)
if (p2.new) memh(r13)=r31.h
}
-#CHECK: 464bcb01 if (!p1.new) memh(r11{{ *}}+{{ *}}#0) = r11
+#CHECK: 464bcb01 if (!p1.new) memh(r11+#0) = r11
{
p1=cmp.eq(r10,##1491455911)
if (!p1.new) memh(r11)=r11
}
-#CHECK: 4248d200 if (p0.new) memh(r8{{ *}}+{{ *}}#0) = r18
+#CHECK: 4248d200 if (p0.new) memh(r8+#0) = r18
{
p0=cmp.eq(r3,##687581160)
if (p0.new) memh(r8)=r18
}
-#CHECK: 42deca00 if (p0.new) memd(r30{{ *}}+{{ *}}#0) = r11:10
+#CHECK: 42deca00 if (p0.new) memd(r30+#0) = r11:10
{
if (p0.new) memd(r30)=r11:10
p0=cmp.eq(r28,##562796189)
}
-#CHECK: 46d5cc03 if (!p3.new) memd(r21{{ *}}+{{ *}}#0) = r13:12
+#CHECK: 46d5cc03 if (!p3.new) memd(r21+#0) = r13:12
{
if (!p3.new) memd(r21)=r13:12
p3=cmp.eq(r6,##-969273288)
}
-#CHECK: 42bad201 if (p1.new) memw(r26{{ *}}+{{ *}}#0) = r22.new
+#CHECK: 42bad201 if (p1.new) memw(r26+#0) = r22.new
{
if (p1.new) memw(r26)=r22.new
p1=cmp.eq(r0,##-1110065473)
r22=add(r28,r9)
}
-#CHECK: 46b9d201 if (!p1.new) memw(r25{{ *}}+{{ *}}#0) = r26.new
+#CHECK: 46b9d201 if (!p1.new) memw(r25+#0) = r26.new
{
p1=cmp.eq(r11,##-753121346)
r26=add(r19,r7)
if (!p1.new) memw(r25)=r26.new
}
-#CHECK: 40aad200 if (p0) memw(r10{{ *}}+{{ *}}#0) = r6.new
+#CHECK: 40aad200 if (p0) memw(r10+#0) = r6.new
{
r6=add(r30,r0)
if (p0) memw(r10)=r6.new
}
-#CHECK: 44a6d202 if (!p2) memw(r6{{ *}}+{{ *}}#0) = r4.new
+#CHECK: 44a6d202 if (!p2) memw(r6+#0) = r4.new
{
if (!p2) memw(r6)=r4.new
r4=add(r0,r3)
}
-#CHECK: 40b9c200 if (p0) memb(r25{{ *}}+{{ *}}#0) = r29.new
+#CHECK: 40b9c200 if (p0) memb(r25+#0) = r29.new
{
if (p0) memb(r25)=r29.new
r29=add(r27,r30)
}
-#CHECK: 44bec203 if (!p3) memb(r30{{ *}}+{{ *}}#0) = r8.new
+#CHECK: 44bec203 if (!p3) memb(r30+#0) = r8.new
{
if (!p3) memb(r30)=r8.new
r8=add(r24,r4)
}
-#CHECK: 46aecc01 if (!p1.new) memh(r14{{ *}}+{{ *}}#0) = r13.new
+#CHECK: 46aecc01 if (!p1.new) memh(r14+#0) = r13.new
{
if (!p1.new) memh(r14)=r13.new
r13=add(r21,r2)
p1=cmp.eq(r3,##-1529345886)
}
-#CHECK: 42bcca02 if (p2.new) memh(r28{{ *}}+{{ *}}#0) = r18.new
+#CHECK: 42bcca02 if (p2.new) memh(r28+#0) = r18.new
{
p2=cmp.eq(r15,##2048545649)
if (p2.new) memh(r28)=r18.new
r18=add(r9,r3)
}
-#CHECK: 46aac200 if (!p0.new) memb(r10{{ *}}+{{ *}}#0) = r30.new
+#CHECK: 46aac200 if (!p0.new) memb(r10+#0) = r30.new
{
p0=cmp.eq(r21,##-1160401822)
r30=add(r9,r22)
if (!p0.new) memb(r10)=r30.new
}
-#CHECK: 42b8c202 if (p2.new) memb(r24{{ *}}+{{ *}}#0) = r11.new
+#CHECK: 42b8c202 if (p2.new) memb(r24+#0) = r11.new
{
if (p2.new) memb(r24)=r11.new
p2=cmp.eq(r30,##1267977346)
r11=add(r8,r18)
}
-#CHECK: 44a3ca00 if (!p0) memh(r3{{ *}}+{{ *}}#0) = r28.new
+#CHECK: 44a3ca00 if (!p0) memh(r3+#0) = r28.new
{
r28=add(r16,r11)
if (!p0) memh(r3)=r28.new
}
-#CHECK: 40abca03 if (p3) memh(r11{{ *}}+{{ *}}#0) = r24.new
+#CHECK: 40abca03 if (p3) memh(r11+#0) = r24.new
{
if (p3) memh(r11)=r24.new
r24=add(r18,r19)
}
-#CHECK: a1abd200 memw(r11{{ *}}+{{ *}}#0) = r5.new
+#CHECK: a1abd200 memw(r11+#0) = r5.new
{
memw(r11)=r5.new
r5=add(r0,r10)
}
-#CHECK: a1a2ca00 memh(r2{{ *}}+{{ *}}#0) = r18.new
+#CHECK: a1a2ca00 memh(r2+#0) = r18.new
{
r18=add(r27,r18)
memh(r2)=r18.new
}
-#CHECK: a1bac200 memb(r26{{ *}}+{{ *}}#0) = r15.new
+#CHECK: a1bac200 memb(r26+#0) = r15.new
{
r15=add(r22,r17)
memb(r26)=r15.new
}
-#CHECK: d328ce1c { r29:28{{ *}}={{ *}}vsubub(r15:14, r9:8)
+#CHECK: d328ce1c { r29:28 = vsubub(r15:14,r9:8)
r29:28=vsubb(r15:14,r9:8)
-#CHECK: 8c5ed60c { r12{{ *}}={{ *}}asr(r30, #22):rnd
+#CHECK: 8c5ed60c { r12 = asr(r30,#22):rnd
r12=asrrnd(r30,#23)
-#CHECK: ed1ec109 { r9{{ *}}={{ *}}mpyi(r30, r1)
+#CHECK: ed1ec109 { r9 = mpyi(r30,r1)
r9=mpyui(r30,r1)
-#CHECK: e010d787 { r7{{ *}}={{ *}}+{{ *}}mpyi(r16, #188)
+#CHECK: e010d787 { r7 = +mpyi(r16,#188)
r7=mpyi(r16,#188)
-#CHECK: d206eea2 { p2{{ *}}={{ *}}boundscheck(r7:6, r15:14):raw:hi
+#CHECK: d206eea2 { p2 = boundscheck(r7:6,r15:14):raw:hi
p2=boundscheck(r7,r15:14)
-#CHECK: f27ac102 { p2{{ *}}={{ *}}cmp.gtu(r26, r1)
+#CHECK: f27ac102 { p2 = cmp.gtu(r26,r1)
p2=cmp.ltu(r1,r26)
-#CHECK: f240df00 { p0{{ *}}={{ *}}cmp.gt(r0, r31)
+#CHECK: f240df00 { p0 = cmp.gt(r0,r31)
p0=cmp.lt(r31,r0)
-#CHECK: 7586cc01 { p1{{ *}}={{ *}}cmp.gtu(r6, #96)
+#CHECK: 7586cc01 { p1 = cmp.gtu(r6,#96)
p1=cmp.geu(r6,#97)
-#CHECK: 755dc9a2 { p2{{ *}}={{ *}}cmp.gt(r29, #77)
+#CHECK: 755dc9a2 { p2 = cmp.gt(r29,#77)
p2=cmp.ge(r29,#78)
-#CHECK: d310d60a { r11:10{{ *}}={{ *}}vaddub(r17:16, r23:22)
+#CHECK: d310d60a { r11:10 = vaddub(r17:16,r23:22)
r11:10=vaddb(r17:16,r23:22)
-#CHECK: 8753d1e6 { r6{{ *}}={{ *}}tableidxh(r19, #7, #17):raw
+#CHECK: 8753d1e6 { r6 = tableidxh(r19,#7,#17):raw
r6=tableidxh(r19,#7,#18)
-#CHECK: 8786d277 { r23{{ *}}={{ *}}tableidxw(r6, #3, #18):raw
+#CHECK: 8786d277 { r23 = tableidxw(r6,#3,#18):raw
r23=tableidxw(r6,#3,#20)
-#CHECK: 7c4dfff8 { r25:24{{ *}}={{ *}}combine(#-1, #-101)
+#CHECK: 7c4dfff8 { r25:24 = combine(#-1,#-101)
r25:24=#-101
-#CHECK: 8866c09a { r26{{ *}}={{ *}}vasrhub(r7:6, #0):raw
+#CHECK: 8866c09a { r26 = vasrhub(r7:6,#0):raw
r26=vasrhub(r7:6,#1):rnd:sat
-#CHECK: 7654c016 { r22{{ *}}={{ *}}sub(#0, r20)
+#CHECK: 7654c016 { r22 = sub(#0,r20)
r22=neg(r20)
-#CHECK: 802cc808 { r9:8{{ *}}={{ *}}vasrh(r13:12, #8):raw
+#CHECK: 802cc808 { r9:8 = vasrh(r13:12,#8):raw
r9:8=vasrh(r13:12,#9):rnd
-#CHECK: 7614dfe5 { r5{{ *}}={{ *}}{{zxtb\(r20\)|and\(r20, *#255\)}}
+#CHECK: 7614dfe5 { r5 = {{zxtb\(r20\)|and\(r20,#255\)}}
r5=zxtb(r20)
#CHECK: 00ab68e2 immext(#179976320)
-#CHECK: 7500c500 p0{{ *}}={{ *}}cmp.eq(r0, ##179976360)
+#CHECK: 7500c500 p0 = cmp.eq(r0,##179976360)
{
if (p0.new) r11=r26
p0=cmp.eq(r0,##179976360)
}
-#CHECK: 74f9c00f { if (!p3) r15{{ *}}={{ *}}r25
+#CHECK: 74f9c00f { if (!p3) r15 = add(r25,#0)
if (!p3) r15=r25
-#CHECK: 7425c005 { if (p1) r5{{ *}}={{ *}}r5
+#CHECK: 7425c005 { if (p1) r5 = add(r5,#0)
if (p1) r5=r5
-#CHECK: e9badae2 { r2{{ *}}={{ *}}vrcmpys(r27:26, r27:26):<<1:rnd:sat:raw:lo
+#CHECK: e9badae2 { r2 = vrcmpys(r27:26,r27:26):<<1:rnd:sat:raw:lo
r2=vrcmpys(r27:26,r26):<<1:rnd:sat
-#CHECK: fd13f20e if (p0.new) r15:14{{ *}}={{ *}}{{r19:18|combine\(r19, *r18\)}}
+#CHECK: fd13f20e if (p0.new) r15:14 = {{r19:18|combine\(r19,r18\)}}
{
p0=cmp.eq(r26,##1766934387)
if (p0.new) r15:14=r19:18
}
-#CHECK: fd07c6c2 { if (!p2) r3:2{{ *}}={{ *}}{{r7:6|combine\(r7, *r6\)}}
+#CHECK: fd07c6c2 { if (!p2) r3:2 = {{r7:6|combine\(r7,r6\)}}
if (!p2) r3:2=r7:6
-#CHECK: fd0dcc7e { if (p3) r31:30{{ *}}={{ *}}{{r13:12|combine\(r13, *r12\)}}
+#CHECK: fd0dcc7e { if (p3) r31:30 = {{r13:12|combine\(r13,r12\)}}
if (p3) r31:30=r13:12
-#CHECK: 748ae015 if (!p0.new) r21{{ *}}={{ *}}r10
+#CHECK: 748ae015 if (!p0.new) r21 = add(r10,#0)
{
p0=cmp.eq(r23,##805633208)
if (!p0.new) r21=r10
}
-#CHECK: d36ec6c8 { r9:8{{ *}}={{ *}}add(r15:14, r7:6):raw:lo
+#CHECK: d36ec6c8 { r9:8 = add(r15:14,r7:6):raw:lo
r9:8=add(r14,r7:6)
#CHECK: 01e65477 immext(#509943232)
-#CHECK: 7516c3a3 p3{{ *}}={{ *}}cmp.eq(r22, ##509943261)
+#CHECK: 7516c3a3 p3 = cmp.eq(r22,##509943261)
{
- if (!p3.new) r9:8=r25:24
+ if (!p3.new) r9:8 = r25:24
p3=cmp.eq(r22,##509943261)
}
-#CHECK: 87e0d5e5 { r5{{ *}}={{ *}}tableidxd(r0, #15, #21):raw
+#CHECK: 87e0d5e5 { r5 = tableidxd(r0,#15,#21):raw
r5=tableidxd(r0,#15,#24)
-#CHECK: 8701db65 { r5{{ *}}={{ *}}tableidxb(r1, #3, #27):raw
+#CHECK: 8701db65 { r5 = tableidxb(r1,#3,#27):raw
r5=tableidxb(r1,#3,#27)
-#CHECK: 767affe3 { r3{{ *}}={{ *}}sub(#-1, r26)
+#CHECK: 767affe3 { r3 = sub(#-1,r26)
r3=not(r26)
-#CHECK: f51ddc06 { r7:6{{ *}}={{ *}}{{r29:28|combine\(r29, *r28\)}}
+#CHECK: f51ddc06 { r7:6 = {{r29:28|combine\(r29,r28\)}}
r7:6=r29:28
-#CHECK: 9406c000 { dcfetch(r6 + #0)
+#CHECK: 9406c000 { dcfetch(r6+#0)
dcfetch(r6)
-#CHECK: 6b20c001 { p1{{ *}}={{ *}}or(p0, p0)
+#CHECK: 6b20c001 { p1 = or(p0,p0)
p1=p0
-#CHECK: eafcdc82 { r3:2 += vrcmpys(r29:28, r29:28):<<1:sat:raw:lo
+#CHECK: eafcdc82 { r3:2 += vrcmpys(r29:28,r29:28):<<1:sat:raw:lo
r3:2+=vrcmpys(r29:28,r28):<<1:sat
-#CHECK: e8ead092 { r19:18{{ *}}={{ *}}vrcmpys(r11:10, r17:16):<<1:sat:raw:lo
+#CHECK: e8ead092 { r19:18 = vrcmpys(r11:10,r17:16):<<1:sat:raw:lo
r19:18=vrcmpys(r11:10,r16):<<1:sat
-#CHECK: 9082c014 { r21:20{{ *}}={{ *}}memb_fifo(r2{{ *}}+{{ *}}#0)
+#CHECK: 9082c014 { r21:20 = memb_fifo(r2+#0)
r21:20=memb_fifo(r2)
-#CHECK: 9056c01c { r29:28{{ *}}={{ *}}memh_fifo(r22{{ *}}+{{ *}}#0)
-r29:28=memh_fifo(r22) \ No newline at end of file
+#CHECK: 9056c01c { r29:28 = memh_fifo(r22+#0)
+r29:28=memh_fifo(r22)
diff --git a/test/MC/Hexagon/bug20416.s b/test/MC/Hexagon/bug20416.s
new file mode 100644
index 000000000000..530a4e64778a
--- /dev/null
+++ b/test/MC/Hexagon/bug20416.s
@@ -0,0 +1,13 @@
+# RUN: not llvm-mc -triple=hexagon -mv60 -mhvx -filetype=asm %s 2>%t; FileCheck %s --check-prefix=CHECK-V60-ERROR <%t
+# RUN: llvm-mc -triple=hexagon -mv62 -mhvx -filetype=asm %s | FileCheck %s
+
+// for this a v60+/hvx instruction sequence, make sure fails with v60
+// but passes with v62. this is because this instruction uses different
+// itinerary between v60 and v62
+{
+ v0.h=vsat(v5.w,v9.w)
+ v16.h=vsat(v6.w,v26.w)
+}
+# CHECK-V60-ERROR: rror: invalid instruction packet: slot error
+# CHECK: v0.h = vsat(v5.w,v9.w)
+# CHECK: v16.h = vsat(v6.w,v26.w)
diff --git a/test/MC/Hexagon/capitalizedEndloop.s b/test/MC/Hexagon/capitalizedEndloop.s
index d20ff34de6fe..c7a25d9fb27b 100644
--- a/test/MC/Hexagon/capitalizedEndloop.s
+++ b/test/MC/Hexagon/capitalizedEndloop.s
@@ -15,7 +15,7 @@
{ R0 = mpyi(R0,R0) } : ENDLOOP0 : ENDLOOP1
{ R0 = mpyi(R0,R0) }:endloop0:endloop1
-# CHECK: r0 = mpyi(r0, r0)
+# CHECK: r0 = mpyi(r0,r0)
# CHECK: :endloop0
# CHECK: :endloop0
# CHECK: :endloop0
diff --git a/test/MC/Hexagon/common-redeclare.s b/test/MC/Hexagon/common-redeclare.s
new file mode 100644
index 000000000000..52b77992a871
--- /dev/null
+++ b/test/MC/Hexagon/common-redeclare.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -t - | FileCheck %s
+
+# CHECK: 00000062 g *COM* 00000008 quartet_table_isqrt
+
+.common quartet_table_isqrt, 98, 8
+.common quartet_table_isqrt, 98, 8
diff --git a/test/MC/Hexagon/dcfetch-symbol.s b/test/MC/Hexagon/dcfetch-symbol.s
new file mode 100644
index 000000000000..8309439a2aaa
--- /dev/null
+++ b/test/MC/Hexagon/dcfetch-symbol.s
@@ -0,0 +1,8 @@
+# RUN: not llvm-mc -arch=hexagon -filetype=obj %s
+
+#CHECK: 9400c000 { dcfetch(r0 + #0) }
+
+junk:
+{
+ dcfetch(r0 + #junk)
+}
diff --git a/test/MC/Hexagon/decode_acc_type.s b/test/MC/Hexagon/decode_acc_type.s
new file mode 100644
index 000000000000..84d0abc0e18d
--- /dev/null
+++ b/test/MC/Hexagon/decode_acc_type.s
@@ -0,0 +1,150 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+#
+
+# Currently ignore if there is one or two #'s
+
+ r7 = memw(gp+#192)
+# CHECK: r7 = memw(gp+#192)
+
+ r3:2 = memd(gp+#64)
+# CHECK: r3:2 = memd(gp+#64)
+
+ { p3 = p1; r8 = #2; if (p3.new) memw(##8) = r8.new }
+# CHECK: if (p3.new) memw({{..}}8) = r8
+
+ { p3 = p1; r8 = #2; if (!p3.new) memw(##8) = r8.new }
+# CHECK: if (!p3.new) memw({{..}}8) = r8.new
+
+ { r8 = #2; if (p3) memw(##8) = r8.new }
+# CHECK: if (p3) memw({{..}}8) = r8.new
+
+ { r8 = #2; if (!p3) memw(##8) = r8.new }
+# CHECK: if (!p3) memw({{..}}8) = r8.new
+
+ { p3 = p1; r8 = #2; if (p3.new) memh(##8) = r8.new }
+# CHECK: if (p3.new) memh({{..}}8) = r8.new
+
+ { p3 = p1; r8 = #2; if (!p3.new) memh(##8) = r8.new }
+# CHECK: if (!p3.new) memh({{..}}8) = r8.new
+
+ { r8 = #2; if (p3) memh(##8) = r8.new }
+# CHECK: memh({{..}}8) = r8.new
+
+ { r8 = #2; if (!p3) memh(##8) = r8.new }
+# CHECK: if (!p3) memh({{..}}8) = r8.new
+
+ { p3 = p1; r8 = #2; if (p3.new) memb(##8) = r8.new }
+# CHECK: if (p3.new) memb({{..}}8) = r8.new
+
+ { p3 = p1; r8 = #2; if (!p3.new) memb(##8) = r8.new }
+# CHECK: if (!p3.new) memb({{..}}8) = r8.new
+
+ { r8 = #2; if (p3) memb(##8) = r8.new }
+# CHECK: if (p3) memb({{..}}8) = r8.new
+
+ { r8 = #2; if (!p3) memb(##8) = r8.new }
+# CHECK: if (!p3) memb({{..}}8) = r8.new
+
+ { if (p3) memw(##8) = r8 }
+# CHECK: if (p3) memw({{..}}8) = r8
+
+ { if (!p3) memw(##8) = r8 }
+# CHECK: if (!p3) memw({{..}}8) = r8
+
+ { p3 = p1; if (p3.new) memw(##8) = r8 }
+# CHECK: if (p3.new) memw({{..}}8) = r8
+
+ { p3 = p1; if (!p3.new) memw(##8) = r8 }
+# CHECK: if (!p3.new) memw({{..}}8) = r8
+
+
+ if (!p2) r14 = memb(##48)
+# CHECK: if (!p2) r14 = memb({{..}}48)
+
+ if (p2) r14 = memb(##48)
+# CHECK: if (p2) r14 = memb({{..}}48)
+
+ {p2 = p0; if (!p2.new) r14 = memb(##48) }
+# CHECK: if (!p2.new) r14 = memb({{..}}48)
+
+ {p3 = p2; if (p3.new) r14 = memb(##48) }
+# CHECK: if (p3.new) r14 = memb({{..}}48)
+
+
+ if (!p2) r14 = memh(##48)
+# CHECK: if (!p2) r14 = memh({{..}}48)
+
+ if (p2) r14 = memh(##48)
+# CHECK: if (p2) r14 = memh({{..}}48)
+
+ {p2 = p0; if (!p2.new) r14 = memh(##48) }
+# CHECK: if (!p2.new) r14 = memh({{..}}48)
+
+ {p3 = p2; if (p3.new) r14 = memh(##48) }
+# CHECK: if (p3.new) r14 = memh({{..}}48)
+
+
+ if (!p2) r14 = memub(##48)
+# CHECK: if (!p2) r14 = memub({{..}}48)
+
+ if (p2) r14 = memub(##48)
+# CHECK: if (p2) r14 = memub({{..}}48)
+
+ {p2 = p0; if (!p2.new) r14 = memub(##48) }
+# CHECK: if (!p2.new) r14 = memub({{..}}48)
+
+ {p3 = p2; if (p3.new) r14 = memub(##48) }
+# CHECK: if (p3.new) r14 = memub({{..}}48)
+
+
+ if (!p2) r14 = memuh(##48)
+# CHECK: if (!p2) r14 = memuh({{..}}48)
+
+ if (p2) r14 = memuh(##48)
+# CHECK: if (p2) r14 = memuh({{..}}48)
+
+ {p2 = p0; if (!p2.new) r14 = memuh(##48) }
+# CHECK: if (!p2.new) r14 = memuh({{..}}48)
+
+ {p3 = p2; if (p3.new) r14 = memuh(##48) }
+# CHECK: r14 = memuh({{..}}48)
+
+
+ if (!p2) r14 = memw(##48)
+# CHECK: if (!p2) r14 = memw({{..}}48)
+
+ if (p2) r14 = memw(##48)
+# CHECK: if (p2) r14 = memw({{..}}48)
+
+ {p2 = p0; if (!p2.new) r14 = memw(##48) }
+# CHECK: if (!p2.new) r14 = memw({{..}}48)
+
+ {p3 = p2; if (p3.new) r14 = memw(##48) }
+# CHECK: if (p3.new) r14 = memw({{..}}48)
+
+ r7 = memh(##32)
+# CHECK: r7 = memh(##32)
+ r7 = memuh(##32)
+# CHECK: r7 = memuh(##32)
+
+ memd(##32) = r15:14
+# CHECK: memd(##32) = r15:14
+
+ {r2 = #9; memw(##32) = r2.new}
+# CHECK: memw(##32) = r2.new
+
+ {r2 = #9; memb(##32) = r2.new}
+# CHECK: memb(##32) = r2.new
+
+ memw(##32) = r15
+# CHECK: memw(##32) = r15
+
+ memh(##32) = r16
+# CHECK: memh(##32) = r16
+
+ memb(##32) = r17
+# CHECK: memb(##32) = r17
+
+
+ r3:2 = interleave(r31:30)
+# CHECK: r3:2 = interleave(r31:30)
diff --git a/test/MC/Hexagon/dis-duplex-p0.s b/test/MC/Hexagon/dis-duplex-p0.s
index dc6a1260145e..4ee518fa2a31 100644
--- a/test/MC/Hexagon/dis-duplex-p0.s
+++ b/test/MC/Hexagon/dis-duplex-p0.s
@@ -1,7 +1,10 @@
// RUN: llvm-mc -arch=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
-// REQUIRES: asserts
- .text
-// CHECK: { r7 = #-1; r7 = #-1 }
- .long 0x3a373a27
-// CHECK: { if (!p0.new) r7 = #0; if (p0.new) r7 = #0 }
- .long 0x3a573a47
+
+{ r7 = #-1
+ r6 = #-1 }
+// CHECK: { r7 = #-1; r6 = #-1 }
+
+{ p0 = r0
+ if (p0.new) r7 = #0
+ if (!p0.new) r7 = #0 }
+// CHECK: if (p0.new) r7 = #0; if (!p0.new) r7 = #0
diff --git a/test/MC/Hexagon/duplex-registers.s b/test/MC/Hexagon/duplex-registers.s
index f0cde7f9628d..2a02b4534f29 100644
--- a/test/MC/Hexagon/duplex-registers.s
+++ b/test/MC/Hexagon/duplex-registers.s
@@ -7,4 +7,4 @@
}
# CHECK: 289808ba
-# CHECK: r16 = memuh(r17 + #0);{{ *}}r18 = memuh(r19 + #0)
+# CHECK: r16 = memuh(r17+#0);{{ *}}r18 = memuh(r19+#0)
diff --git a/test/MC/Hexagon/elf-flags.s b/test/MC/Hexagon/elf-flags.s
index 94dce8152144..0d2f007cb3da 100644
--- a/test/MC/Hexagon/elf-flags.s
+++ b/test/MC/Hexagon/elf-flags.s
@@ -2,8 +2,10 @@
# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv5 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V5 %s
# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv55 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V55 %s
# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv60 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V60 %s
+# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V62 %s
# CHECK-V4: Flags: 0x3
# CHECK-V5: Flags: 0x4
# CHECK-V55: Flags: 0x5
# CHECK-V60: Flags: 0x60
+# CHECK-V62: Flags: 0x62
diff --git a/test/MC/Hexagon/equ.s b/test/MC/Hexagon/equ.s
new file mode 100644
index 000000000000..fbf09edbbc1e
--- /dev/null
+++ b/test/MC/Hexagon/equ.s
@@ -0,0 +1,9 @@
+# RUN: not llvm-mc -arch=hexagon %s 2> %t
+# RUN: FileCheck < %t %s
+
+.equ a, 0
+.set a, 1
+.equ a, 2
+.equiv a, 3
+# CHECK: {{[Ee]}}rror: redefinition of 'a'
+
diff --git a/test/MC/Hexagon/ext-callt-rel.s b/test/MC/Hexagon/ext-callt-rel.s
new file mode 100644
index 000000000000..344a8fbc11b9
--- /dev/null
+++ b/test/MC/Hexagon/ext-callt-rel.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s -o - | llvm-objdump -r - | FileCheck %s
+
+if (p0) call foo
+#CHECK: R_HEX_B32_PCREL_X
+#CHECK: R_HEX_B15_PCREL_X
+
diff --git a/test/MC/Hexagon/extended_relocations.ll b/test/MC/Hexagon/extended_relocations.ll
new file mode 100644
index 000000000000..a16185c39945
--- /dev/null
+++ b/test/MC/Hexagon/extended_relocations.ll
@@ -0,0 +1,23 @@
+; RUN: llc -filetype=obj -march=hexagon %s -o - | llvm-objdump -r - | FileCheck %s
+
+; CHECK: RELOCATION RECORDS FOR [.rela.text]:
+; CHECK: 00000000 R_HEX_B22_PCREL printf
+; CHECK: 00000004 R_HEX_32_6_X .rodata.str1.1
+; CHECK: 00000008 R_HEX_6_X .rodata.str1.1
+
+target triple = "hexagon-unknown--elf"
+
+@.str = private unnamed_addr constant [10 x i8] c"cxfir.log\00", align 1
+
+declare i32 @printf(i8*, ...) #1
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0))
+ ret i32 0
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/MC/Hexagon/extender.s b/test/MC/Hexagon/extender.s
new file mode 100644
index 000000000000..f807dbe0cdd7
--- /dev/null
+++ b/test/MC/Hexagon/extender.s
@@ -0,0 +1,210 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+#
+
+# STrib_abs_V4
+{
+ memb(##1024056) = r0
+}
+
+# CHECK: immext(#1024000)
+# CHECK: memb(##1024056) = r0
+
+# S2_storerbgp
+{
+ memb(GP + #56) = r0
+}
+
+# CHECK: memb(gp+#56) = r0
+
+# STrih_abs_V4
+{
+ memh(##1024056) = r0
+}
+
+# CHECK: immext(#1024000)
+# CHECK: memh(##1024056) = r0
+
+# S2_storerhgp
+{
+ memh(GP + #56) = r0
+}
+
+# CHECK: memh(gp+#56) = r0
+
+# STriw_abs_V4
+{
+ memw(##1024056) = r0
+}
+
+# CHECK: immext(#1024000)
+# CHECK: memw(##1024056) = r0
+
+# S2_storerigp
+{
+ memw(GP + #56) = r0
+}
+
+# CHECK: memw(gp+#56) = r0
+
+# STrib_abs_nv_V4
+{
+ r0 = #1
+ memb(##1024056) = r0.new
+}
+
+# CHECK: r0 = #1
+# CHECK: immext(#1024000)
+# CHECK: memb(##1024056) = r0.new
+
+# S2_storerbnewgp
+{
+ r0 = #1
+ memb(GP + #56) = r0.new
+}
+
+# CHECK: r0 = #1
+# CHECK: memb(gp+#56) = r0.new
+
+# STrih_abs_nv_V4
+{
+ r0 = #1
+ memh(##1024056) = r0.new
+}
+
+# CHECK: r0 = #1
+# CHECK: immext(#1024000)
+# CHECK: memh(##1024056) = r0.new
+
+# S2_storerhnewgp
+{
+ r0 = #1
+ memh(GP + #56) = r0.new
+}
+
+# CHECK: r0 = #1
+# CHECK: memh(gp+#56) = r0.new
+
+# STriw_abs_nv_V4
+{
+ r0 = #1
+ memw(##1024056) = r0.new
+}
+
+# CHECK: r0 = #1
+# CHECK: immext(#1024000)
+# CHECK: memw(##1024056) = r0.new
+
+# S2_storerinewgp
+{
+ r0 = #1
+ memw(GP + #56) = r0.new
+}
+
+# CHECK: r0 = #1
+# CHECK: memw(gp+#56) = r0.new
+
+# STrid_abs_V4
+{
+ memd(##1024056) = r1:0
+}
+
+# CHECK: immext(#1024000)
+# CHECK: memd(##1024056) = r1:0
+
+# S2_storerdgp
+{
+ memd(GP + #56) = r1:0
+}
+
+# CHECK: memd(gp+#56) = r1:0
+
+# LDrib_abs_V4
+{
+ r0 = memb(##1024056)
+}
+
+# CHECK: immext(#1024000)
+# CHECK: r0 = memb(##1024056)
+
+# LDb_GP_V4
+{
+ r0 = memb(GP + #56)
+}
+
+# CHECK: r0 = memb(gp+#56)
+
+# LDriub_abs_V4
+{
+ r0 = memub(##1024056)
+}
+
+# CHECK: immext(#1024000)
+# CHECK: r0 = memub(##1024056)
+
+# LDub_GP_V4
+{
+ r0 = memub(GP + #56)
+}
+
+# CHECK: r0 = memub(gp+#56)
+
+# LDrih_abs_V4
+{
+ r0 = memh(##1024056)
+}
+
+# CHECK: immext(#1024000)
+# CHECK: r0 = memh(##1024056)
+
+# LDh_GP_V4
+{
+ r0 = memh(GP + #56)
+}
+
+# CHECK: r0 = memh(gp+#56)
+
+# LDriuh_abs_V4
+{
+ r0 = memuh(##1024056)
+}
+
+# CHECK: immext(#1024000)
+# CHECK: r0 = memuh(##1024056)
+
+# LDuh_GP_V4
+{
+ r0 = memuh(GP + #56)
+}
+
+# CHECK: r0 = memuh(gp+#56)
+
+# LDriw_abs_V4
+{
+ r0 = memw(##1024056)
+}
+
+# CHECK: immext(#1024000)
+# CHECK: r0 = memw(##1024056)
+
+# LDw_GP_V4
+{
+ r0 = memw(GP + #56)
+}
+
+# CHECK: r0 = memw(gp+#56)
+
+# LDrid_abs_V4
+{
+ r1:0 = memd(##1024056)
+}
+
+# CHECK: immext(#1024000)
+# CHECK: r1:0 = memd(##1024056)
+
+# LDd_GP_V4
+{
+ r1:0 = memd(GP + #56)
+}
+
+# CHECK: r1:0 = memd(gp+#56)
+
diff --git a/test/MC/Hexagon/fixups.s b/test/MC/Hexagon/fixups.s
index 059a18fa8822..33913362df7b 100644
--- a/test/MC/Hexagon/fixups.s
+++ b/test/MC/Hexagon/fixups.s
@@ -3,7 +3,7 @@
.text
# CHECK-LABEL: 0:
# CHECK: 2442e106
-# CHECK: if (!cmp.eq(r1.new, #1)) jump:t 0xc
+# CHECK: if (!cmp.eq(r1.new,#1)) jump:t 0xc
{
r1 = zxth(r2)
if (!cmp.eq(r1.new, #1)) jump:t .L1
@@ -15,7 +15,7 @@
# CHECK: 00004020
# CHECK: immext(#2048)
# CHECK: 2442e118
-# CHECK: if (!cmp.eq(r1.new, #1)) jump:t 0x81c
+# CHECK: if (!cmp.eq(r1.new,#1)) jump:t 0x81c
{
r1 = zxth(r2)
if (!cmp.eq(r1.new, #1)) jump:t .L2
diff --git a/test/MC/Hexagon/iconst.s b/test/MC/Hexagon/iconst.s
index 277c4de86923..917cc64ba953 100644
--- a/test/MC/Hexagon/iconst.s
+++ b/test/MC/Hexagon/iconst.s
@@ -1,6 +1,6 @@
# RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-objdump -d -r - | FileCheck %s
a:
-# CHECK: r0 = add(r0, #0)
+# CHECK: r0 = add(r0,#0)
# CHECK: R_HEX_23_REG
-r0 = iconst(#a) \ No newline at end of file
+r0 = iconst(#a)
diff --git a/test/MC/Hexagon/inst_cmp_eq.ll b/test/MC/Hexagon/inst_cmp_eq.ll
index 98202368aff3..5c483451d713 100644
--- a/test/MC/Hexagon/inst_cmp_eq.ll
+++ b/test/MC/Hexagon/inst_cmp_eq.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: p0 = cmp.eq(r0, r1)
+; CHECK: p0 = cmp.eq(r0,r1)
; CHECK: r0 = p0
; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_eqi.ll b/test/MC/Hexagon/inst_cmp_eqi.ll
index 612dfdc8f23d..5d8132b70bb9 100644
--- a/test/MC/Hexagon/inst_cmp_eqi.ll
+++ b/test/MC/Hexagon/inst_cmp_eqi.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a)
ret i1 %1
}
-; CHECK: p0 = cmp.eq(r0, #42)
+; CHECK: p0 = cmp.eq(r0,#42)
; CHECK: r0 = p0
; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_gt.ll b/test/MC/Hexagon/inst_cmp_gt.ll
index 3ce1c0addad7..45a4e33e940f 100644
--- a/test/MC/Hexagon/inst_cmp_gt.ll
+++ b/test/MC/Hexagon/inst_cmp_gt.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: p0 = cmp.gt(r0, r1)
+; CHECK: p0 = cmp.gt(r0,r1)
; CHECK: r0 = p0
-; CHECK: jumpr r31 } \ No newline at end of file
+; CHECK: jumpr r31 }
diff --git a/test/MC/Hexagon/inst_cmp_gti.ll b/test/MC/Hexagon/inst_cmp_gti.ll
index f3c13a2fb96e..67cdc4c909bb 100644
--- a/test/MC/Hexagon/inst_cmp_gti.ll
+++ b/test/MC/Hexagon/inst_cmp_gti.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a)
ret i1 %1
}
-; CHECK: p0 = cmp.gt(r0, #42)
+; CHECK: p0 = cmp.gt(r0,#42)
; CHECK: r0 = p0
; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_lt.ll b/test/MC/Hexagon/inst_cmp_lt.ll
index 80ba16f41418..b19a4a676aaf 100644
--- a/test/MC/Hexagon/inst_cmp_lt.ll
+++ b/test/MC/Hexagon/inst_cmp_lt.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: p0 = cmp.gt(r1, r0)
+; CHECK: p0 = cmp.gt(r1,r0)
; CHECK: r0 = p0
; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_ugt.ll b/test/MC/Hexagon/inst_cmp_ugt.ll
index 07fa784dc64a..7af40c6ed034 100644
--- a/test/MC/Hexagon/inst_cmp_ugt.ll
+++ b/test/MC/Hexagon/inst_cmp_ugt.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: p0 = cmp.gtu(r0, r1)
+; CHECK: p0 = cmp.gtu(r0,r1)
; CHECK: r0 = p0
; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_ugti.ll b/test/MC/Hexagon/inst_cmp_ugti.ll
index 59db552b39f4..63d94e4ff87a 100644
--- a/test/MC/Hexagon/inst_cmp_ugti.ll
+++ b/test/MC/Hexagon/inst_cmp_ugti.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a)
ret i1 %1
}
-; CHECK: p0 = cmp.gtu(r0, #42)
+; CHECK: p0 = cmp.gtu(r0,#42)
; CHECK: r0 = p0
; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_cmp_ult.ll b/test/MC/Hexagon/inst_cmp_ult.ll
index c880ac8a229c..ecda120a4598 100644
--- a/test/MC/Hexagon/inst_cmp_ult.ll
+++ b/test/MC/Hexagon/inst_cmp_ult.ll
@@ -7,6 +7,6 @@ define i1 @foo (i32 %a, i32 %b)
ret i1 %1
}
-; CHECK: p0 = cmp.gtu(r1, r0)
+; CHECK: p0 = cmp.gtu(r1,r0)
; CHECK: r0 = p0
-; CHECK: jumpr r31 \ No newline at end of file
+; CHECK: jumpr r31
diff --git a/test/MC/Hexagon/inst_select.ll b/test/MC/Hexagon/inst_select.ll
index 9d12c1de73fe..a730419c854a 100644
--- a/test/MC/Hexagon/inst_select.ll
+++ b/test/MC/Hexagon/inst_select.ll
@@ -7,7 +7,7 @@ define i32 @foo (i1 %a, i32 %b, i32 %c)
ret i32 %1
}
-; CHECK: 00 40 00 85 85004000
+; CHECK: 00 40 40 85 85404000
; CHECK: 00 40 9f 52 529f4000
; CHECK: 00 60 01 74 74016000
-; CHECK: 00 e0 82 74 7482e000 \ No newline at end of file
+; CHECK: 00 e0 82 74 7482e000
diff --git a/test/MC/Hexagon/instructions/ld.s b/test/MC/Hexagon/instructions/ld.s
index 2695999aa85f..5d18e6a30492 100644
--- a/test/MC/Hexagon/instructions/ld.s
+++ b/test/MC/Hexagon/instructions/ld.s
@@ -1,6 +1,11 @@
# RUN: llvm-mc -triple hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s
# Hexagon Programmer's Reference Manual 11.5 LD
+# Load doubleword
+# CHECK: 90 ff d5 3a
+r17:16 = memd(r21 + r31<<#3)
+# CHECK: b0 c2 c0 49
+r17:16 = memd(gp+#168)
# CHECK: 02 40 00 00
# CHECK-NEXT: 10 c5 c0 49
r17:16 = memd(##168)
@@ -62,7 +67,7 @@ if (!p3) r17:16 = memd(r21++#40)
# CHECK: 91 ff 15 3a
r17 = memb(r21 + r31<<#3)
# CHECK: b1 c2 00 49
-r17 = memb(#21)
+r17 = memb(gp+#21)
# CHECK: 00 40 00 00
# CHECK-NEXT: b1 c2 00 49
r17 = memb(##21)
@@ -140,7 +145,7 @@ r17:16 = memh_fifo(r21 ++ I:circ(m1))
# CHECK: 91 ff 55 3a
r17 = memh(r21 + r31<<#3)
# CHECK: b1 c2 40 49
-r17 = memh(#42)
+r17 = memh(gp+#42)
# CHECK: 00 40 00 00
# CHECK-NEXT: 51 c5 40 49
r17 = memh(##42)
@@ -202,7 +207,7 @@ if (!p3) r17 = memh(r21 + #62)
# CHECK: 91 ff 35 3a
r17 = memub(r21 + r31<<#3)
# CHECK: b1 c2 20 49
-r17 = memub(#21)
+r17 = memub(gp+#21)
# CHECK: 00 40 00 00
# CHECK-NEXT: b1 c2 20 49
r17 = memub(##21)
@@ -264,7 +269,7 @@ if (!p3) r17 = memub(r21++#5)
# CHECK: 91 ff 75 3a
r17 = memuh(r21 + r31<<#3)
# CHECK: b1 c2 60 49
-r17 = memuh(#42)
+r17 = memuh(gp+#42)
# CHECK: 00 40 00 00
# CHECK-NEXT: 51 c5 60 49
r17 = memuh(##42)
@@ -326,7 +331,7 @@ if (!p3) r17 = memuh(r21++#10)
# CHECK: 91 ff 95 3a
r17 = memw(r21 + r31<<#3)
# CHECK: b1 c2 80 49
-r17 = memw(#84)
+r17 = memw(gp+#84)
# CHECK: 01 40 00 00
# CHECK-NEXT: 91 c2 80 49
r17 = memw(##84)
diff --git a/test/MC/Hexagon/instructions/nv_st.s b/test/MC/Hexagon/instructions/nv_st.s
index 4ff490024a82..46ab31ef2f75 100644
--- a/test/MC/Hexagon/instructions/nv_st.s
+++ b/test/MC/Hexagon/instructions/nv_st.s
@@ -9,7 +9,7 @@
# CHECK: 1f 40 7f 70
# CHECK-NEXT: 11 c2 a0 48
{ r31 = r31
- memb(#17) = r31.new }
+ memb(gp+#17) = r31.new }
# CHECK: 1f 40 7f 70
# CHECK-NEXT: 15 c2 b1 a1
{ r31 = r31
@@ -105,7 +105,7 @@
# CHECK: 1f 40 7f 70
# CHECK-NEXT: 15 ca a0 48
{ r31 = r31
- memh(#42) = r31.new }
+ memh(gp+#42) = r31.new }
# CHECK: 1f 40 7f 70
# CHECK-NEXT: 15 ca b1 a1
{ r31 = r31
@@ -201,7 +201,7 @@
# CHECK: 1f 40 7f 70
# CHECK-NEXT: 15 d2 a0 48
{ r31 = r31
- memw(#84) = r31.new }
+ memw(gp+#84) = r31.new }
# CHECK: 1f 40 7f 70
# CHECK-NEXT: 15 d2 b1 a1
{ r31 = r31
diff --git a/test/MC/Hexagon/instructions/st.s b/test/MC/Hexagon/instructions/st.s
index 3b5e8ee18100..6ea6e9f47f77 100644
--- a/test/MC/Hexagon/instructions/st.s
+++ b/test/MC/Hexagon/instructions/st.s
@@ -5,7 +5,7 @@
# CHECK: 9e f5 d1 3b
memd(r17 + r21<<#3) = r31:30
# CHECK: 28 d4 c0 48
-memd(#320) = r21:20
+memd(gp+#320) = r21:20
# CHECK: 02 40 00 00
# CHECK-NEXT: 28 d4 c0 48
memd(##168) = r21:20
@@ -83,7 +83,7 @@ memb(r17 + r21<<#3) = r31
# CHECK: 9f ca 11 3c
memb(r17+#21)=#31
# CHECK: 15 d5 00 48
-memb(#21) = r21
+memb(gp+#21) = r21
# CHECK: 00 40 00 00
# CHECK-NEXT: 15 d5 00 48
memb(##21) = r21
@@ -183,9 +183,9 @@ memh(##42) = r21
# CHECK-NEXT: 2a d5 60 48
memh(##42) = r21.h
# CHECK: 2a d5 40 48
-memh(#84) = r21
+memh(gp+#84) = r21
# CHECK: 2a d5 60 48
-memh(#84) = r21.h
+memh(gp+#84) = r21.h
# CHECK: 15 df 51 a1
memh(r17+#42) = r31
# CHECK: 15 df 71 a1
@@ -341,7 +341,7 @@ memw(r17 + r21<<#3) = r31
# CHECK: 9f ca 51 3c
memw(r17+#84)=#31
# CHECK: 15 df 80 48
-memw(#84) = r31
+memw(gp+#84) = r31
# CHECK: 01 40 00 00
# CHECK-NEXT: 14 d5 80 48
memw(##84) = r21
diff --git a/test/MC/Hexagon/instructions/system_user.s b/test/MC/Hexagon/instructions/system_user.s
index f0ead9645dd5..02c81fa09928 100644
--- a/test/MC/Hexagon/instructions/system_user.s
+++ b/test/MC/Hexagon/instructions/system_user.s
@@ -57,6 +57,3 @@ syncht
# CHECK: 18 df 00 54
trap0(#254)
-
-# CHECK: 14 df 80 54
-trap1(#253)
diff --git a/test/MC/Hexagon/jumpdoublepound.s b/test/MC/Hexagon/jumpdoublepound.s
index 6b829360a906..8d0eef7fb60a 100644
--- a/test/MC/Hexagon/jumpdoublepound.s
+++ b/test/MC/Hexagon/jumpdoublepound.s
@@ -7,7 +7,7 @@ mylabel:
# CHECK: if (p0) jump
if (p0) jump ##mylabel
-# CHECK: if (cmp.gtu(r5.new, r4)) jump:t
+# CHECK: if (cmp.gtu(r5.new,r4)) jump:t
{ r5 = r4
if (cmp.gtu(r5.new, r4)) jump:t ##mylabel }
diff --git a/test/MC/Hexagon/labels.s b/test/MC/Hexagon/labels.s
index d52ae004b07d..f2b62d1412ba 100644
--- a/test/MC/Hexagon/labels.s
+++ b/test/MC/Hexagon/labels.s
@@ -10,17 +10,17 @@ r1:
# CHECK: nop
r3:nop
-# CHECK: r5:4 = combine(r5, r4)
+# CHECK: r5:4 = combine(r5,r4)
r5:4 = r5:4
# CHECK: r0 = r1
-# CHECK: p0 = tstbit(r0, #10)
+# CHECK: p0 = tstbit(r0,#10)
# CHECK: if (!p0) jump
1:r0=r1; p0=tstbit(r0, #10); if !p0 jump 1b;
# CHECK: nop
-# CHECK: r1 = add(r1, #4)
-# CHECK: r5 = memw(r1 + #0)
+# CHECK: r1 = add(r1,#4)
+# CHECK: r5 = memw(r1+#0)
# CHECK: endloop0
b: { r5 = memw(r1)
- r1 = add(r1, #4) } : endloop0 \ No newline at end of file
+ r1 = add(r1, #4) } : endloop0
diff --git a/test/MC/Hexagon/load-GPRel.s b/test/MC/Hexagon/load-GPRel.s
new file mode 100644
index 000000000000..88f33cd6d7eb
--- /dev/null
+++ b/test/MC/Hexagon/load-GPRel.s
@@ -0,0 +1,33 @@
+#RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+
+# Check encoding bits for GP-relative loads.
+
+#CHECK: 4fc6ff8c { r13:12 = memd(gp+#421856) }
+r13:12 = memd(gp+#421856)
+#CHECK: 4fc6ff8c { r13:12 = memd(gp+#421856) }
+r13:12 = memd(#421856)
+
+#CHECK: 4d1ac4d2 { r18 = memb(gp+#46118) }
+r18 = memb(gp+#46118)
+#CHECK: 4d1ac4d2 { r18 = memb(gp+#46118) }
+r18 = memb(#46118)
+
+#CHECK: 4d81f772 { r18 = memw(gp+#134892) }
+r18 = memw(gp+#134892)
+#CHECK: 4d81f772 { r18 = memw(gp+#134892) }
+r18 = memw(#134892)
+
+#CHECK: 497de287 { r7 = memuh(gp+#30248) }
+r7 = memuh(gp+#30248)
+#CHECK: 497de287 { r7 = memuh(gp+#30248) }
+r7 = memuh(#30248)
+
+#CHECK: 4b43e87a { r26 = memh(gp+#36486) }
+r26 = memh(gp+#36486)
+#CHECK: 4b43e87a { r26 = memh(gp+#36486) }
+r26 = memh(#36486)
+
+#CHECK: 4f37d07f { r31 = memub(gp+#61059) }
+r31 = memub(gp+#61059)
+#CHECK: 4f37d07f { r31 = memub(gp+#61059) }
+r31 = memub(#61059)
diff --git a/test/MC/Hexagon/missing_label.s b/test/MC/Hexagon/missing_label.s
new file mode 100644
index 000000000000..80f69472029c
--- /dev/null
+++ b/test/MC/Hexagon/missing_label.s
@@ -0,0 +1,8 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+#
+
+.I1:
+nop
+
+# CHECK: .I1:
+# CHECK: nop
diff --git a/test/MC/Hexagon/non-relocatable.s b/test/MC/Hexagon/non-relocatable.s
new file mode 100644
index 000000000000..72a17901c622
--- /dev/null
+++ b/test/MC/Hexagon/non-relocatable.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc -arch=hexagon -filetype=obj %s 2>%t; FileCheck %s <%t
+
+# Don't allow a symbolic operand for an insn that cannot take a
+# relocation.
+
+r7:6 = rol(r5:4,#r2)
+
+# This should produce an error
+#CHECK: error:
+
diff --git a/test/MC/Hexagon/not-over.s b/test/MC/Hexagon/not-over.s
new file mode 100644
index 000000000000..c31ce5312305
--- /dev/null
+++ b/test/MC/Hexagon/not-over.s
@@ -0,0 +1,55 @@
+# RUN: llvm-mc -arch=hexagon -filetype=asm %s 2>%t; FileCheck %s <%t
+#
+
+# Check that proper packets are not wrongly flagged as invalid.
+
+1-3-4-f:
+ {
+ r3 = memub(r2++#1)
+ if (cmp.eq(r3.new,#0)) jump:nt .
+ jumpr lr
+ r4 = #4
+ }
+# CHECK-NOT: rror: invalid instruction packet
+
+1-3-f-f:
+ {
+ r3 = memub(r2++#1)
+ if (cmp.eq(r3.new,#0)) jump:nt .
+ r5 = #5
+ r4 = #4
+ }
+# CHECK-NOT: rror: invalid instruction packet
+
+# Special case of a fat packet that will slim when a compound is formed.
+3-3-8-c:
+ { LOOP0(3-3-8-c, R7)
+ P0 = CMP.GT(R7, #0)
+ IF (!P0.NEW) JUMP:NT .
+ R21:20 = MEMD(R0+#16)
+ R23:22 = MEMD(R0+#24)
+ }
+# CHECK-NOT: rror: invalid instruction packet
+
+1-f-f-f:
+ {
+ r3 = #3
+ if (cmp.eq(r3.new,#0)) jump:nt .
+ r5 = #5
+ r4 = #4
+ }
+# CHECK-NOT: rror: invalid instruction packet
+
+4:
+ jumpr lr
+# CHECK-NOT: rror: invalid instruction packet
+
+f-f-f-f:
+ {
+ r3 = #3
+ r2 = #2
+ r5 = #5
+ r4 = #4
+ }
+# CHECK-NOT: rror: invalid instruction packet
+
diff --git a/test/MC/Hexagon/not_found.s b/test/MC/Hexagon/not_found.s
new file mode 100644
index 000000000000..2403042792dd
--- /dev/null
+++ b/test/MC/Hexagon/not_found.s
@@ -0,0 +1,4 @@
+# RUN: not llvm-mc -arch=hexagon -filetype=asm junk123.s 2>%t ; FileCheck %s < %t
+#
+
+# CHECK: junk123.s: {{[N|n]}}o such file or directory
diff --git a/test/MC/Hexagon/offset.s b/test/MC/Hexagon/offset.s
new file mode 100644
index 000000000000..b079634814d0
--- /dev/null
+++ b/test/MC/Hexagon/offset.s
@@ -0,0 +1,7 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -t - | FileCheck %s
+#
+
+sym_a:
+.set sym_d, sym_a + 8
+# CHECK: 00000000 .text 00000000 sym_a
+# CHECK: 00000008 .text 00000000 sym_d
diff --git a/test/MC/Hexagon/operand-range.s b/test/MC/Hexagon/operand-range.s
new file mode 100644
index 000000000000..c38aab7060dd
--- /dev/null
+++ b/test/MC/Hexagon/operand-range.s
@@ -0,0 +1,7 @@
+# RUN: not llvm-mc -arch=hexagon -filetype=asm %s 2>&1 | FileCheck %s
+
+# Expect errors here, insn needs to be extended
+R1 = mpyi(R2, #-256)
+# CHECK: error:
+R3 = mpyi(R4, #256)
+# CHECK: error:
diff --git a/test/MC/Hexagon/parse-pound-hi.s b/test/MC/Hexagon/parse-pound-hi.s
new file mode 100644
index 000000000000..5c6786481c72
--- /dev/null
+++ b/test/MC/Hexagon/parse-pound-hi.s
@@ -0,0 +1,60 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+
+ memw(gp+#hi_htc_version) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ memw(gp+#HI) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#HI)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#HI_x) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#HI_x)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#hi) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#hi)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#hi_x) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#hi_x)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#lo) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#lo)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#lo_x) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#lo_x)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#LO) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#lo)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ memw(gp+#LO_x) = r3
+#CHECK: 4880c300 { memw(gp+#0) = r3 }
+ r3 = memw(gp+#LO_x)
+#CHECK: 4980c003 { r3 = memw(gp+#0) }
+ r16.h = #HI(0x405000)
+#CHECK: 7230c040 { r16.h = #64 }
+ r16.h = #HI (0x405000)
+#CHECK: 7230c040 { r16.h = #64 }
+ r16.h = #hi(0x405000)
+#CHECK: 7230c040 { r16.h = #64 }
+ r16.h = #hi (0x405000)
+#CHECK: 7230c040 { r16.h = #64 }
+ r16.l = #LO(0x405020)
+#CHECK: 7170d020 { r16.l = #20512 }
+ r16.l = #LO (0x405020)
+#CHECK: 7170d020 { r16.l = #20512 }
+ r16.l = #lo(0x405020)
+#CHECK: 7170d020 { r16.l = #20512 }
+ r16.l = #lo (0x405020)
+#CHECK: 7170d020 { r16.l = #20512 }
+
+{
+ r19.h = #HI(-559030611)
+ memw(r17+#0) = r19.new
+}
+# CHECK: 72f35ead { r19.h = #57005
+# CHECK: a1b1d200 memw(r17+#0) = r19.new }
+
diff --git a/test/MC/Hexagon/reg_altnames.s b/test/MC/Hexagon/reg_altnames.s
new file mode 100644
index 000000000000..9c7f7e9b0bfa
--- /dev/null
+++ b/test/MC/Hexagon/reg_altnames.s
@@ -0,0 +1,10 @@
+# RUN: llvm-mc -triple hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+
+# CHECK: 11 df 75 f1
+r17 = xor(r21, lr)
+
+# CHECK: 1d df 35 f3
+sp = sub(lr, r21)
+
+# CHECK: 15 c0 3e 71
+fp.l = #21
diff --git a/test/MC/Hexagon/register-alt-names.s b/test/MC/Hexagon/register-alt-names.s
index 97bfd32c51d9..3e514661887e 100644
--- a/test/MC/Hexagon/register-alt-names.s
+++ b/test/MC/Hexagon/register-alt-names.s
@@ -9,6 +9,6 @@ r1 = fp
# CHECK: r2 = r29
r2 = sp
-# CHECK: r1:0 = combine(r31, r30)
+# CHECK: r1:0 = combine(r31,r30)
r1:0 = lr:fp
diff --git a/test/MC/Hexagon/relaxed_newvalue.s b/test/MC/Hexagon/relaxed_newvalue.s
index 65fbd312e0ac..4e8c6cc2cbc5 100644
--- a/test/MC/Hexagon/relaxed_newvalue.s
+++ b/test/MC/Hexagon/relaxed_newvalue.s
@@ -1,9 +1,9 @@
# RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
# Make sure relaxation doesn't hinder newvalue calculation
-#CHECK: r18 = add(r2, #-6)
+#CHECK: r18 = add(r2,#-6)
#CHECK-NEXT: immext(#0)
-#CHECK-NEXT: if (!cmp.gt(r18.new, #1)) jump:t
+#CHECK-NEXT: if (!cmp.gt(r18.new,#1)) jump:t
{
r18 = add(r2, #-6)
if (!cmp.gt(r18.new, #1)) jump:t .unknown
diff --git a/test/MC/Hexagon/relocations.s b/test/MC/Hexagon/relocations.s
index 8b90bc7c0cdf..4acc8084ae6a 100644
--- a/test/MC/Hexagon/relocations.s
+++ b/test/MC/Hexagon/relocations.s
@@ -12,6 +12,14 @@ r_hex_b15_pcrel:
r_hex_b7_pcrel:
{ loop1 (#undefined, #0) }
+# CHECK: R_HEX_LO16
+r_hex_lo16:
+{ r0.l = #lo(undefined) }
+
+# CHECK: R_HEX_HI16
+r_hex_hi16:
+{ r0.h = #hi(undefined) }
+
# CHECK: R_HEX_32
r_hex_32:
.word undefined
@@ -30,19 +38,19 @@ r_hex_8:
# CHECK: R_HEX_GPREL16_0
r_hex_gprel16_0:
-{ r0 = memb (#undefined@gotrel) }
+{ r0 = memb (gp+#undefined) }
# CHECK: R_HEX_GPREL16_1
r_hex_gprel16_1:
-{ r0 = memh (#undefined@gotrel) }
+{ r0 = memh (gp+#undefined) }
# CHECK: R_HEX_GPREL16_2
r_hex_gprel16_2:
-{ r0 = memw (#undefined@gotrel) }
+{ r0 = memw (gp+#undefined) }
# CHECK: R_HEX_GPREL16_3
r_hex_gprel16_3:
-{ r1:0 = memd (#undefined@gotrel) }
+{ r1:0 = memd (gp+#undefined) }
# CHECK: R_HEX_B13_PCREL
r_hex_b13_pcrel:
@@ -68,10 +76,6 @@ r_hex_b22_pcrel_x:
r_hex_b15_pcrel_x:
{ if (p0) jump ##undefined }
-# CHECK: R_HEX_B9_PCREL_X
-r_hex_b9_pcrel_x:
-{ r0 = #0 ; jump ##undefined }
-
# CHECK: R_HEX_B7_PCREL_X
r_hex_b7_pcrel_x:
{ loop1 (##undefined, #0) }
diff --git a/test/MC/Hexagon/store-GPRel.s b/test/MC/Hexagon/store-GPRel.s
new file mode 100644
index 000000000000..090a6d0059b0
--- /dev/null
+++ b/test/MC/Hexagon/store-GPRel.s
@@ -0,0 +1,46 @@
+#RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d -r - | FileCheck %s
+
+# Check encoding bits for gp-rel stores.
+
+#CHECK: 4ab3f229 memw(gp+#105636) = r12.new
+{ r12 = add(r0,r19)
+ memw(gp+#105636) = r12.new }
+
+#CHECK: 4ab3f229 memw(gp+#105636) = r12.new
+{ r12 = add(r0,r19)
+ memw(#105636) = r12.new }
+
+#CHECK: 4ebdca35 memh(gp+#128106) = r6.new
+{ r6 = add(r18,r13)
+ memh(gp+#128106) = r6.new }
+
+#CHECK: 4ebdca35 memh(gp+#128106) = r6.new
+{ r6 = add(r18,r13)
+ memh(#128106) = r6.new }
+
+#CHECK: 4eb3e2fc memb(gp+#59388) = r17.new
+{ r17 = add(r26,r18)
+ memb(gp+#59388) = r17.new }
+#CHECK: 4eb3e2fc memb(gp+#59388) = r17.new
+{ r17 = add(r26,r18)
+ memb(#59388) = r17.new }
+
+#CHECK: 4ad2ea01 { memd(gp+#206856) = r11:10
+{ memd(gp+#206856) = r11:10 }
+#CHECK: 4ad2ea01 { memd(gp+#206856) = r11:10
+{ memd(#206856) = r11:10 }
+
+#CHECK: 4c9dfa1e { memw(gp+#191608) = r26
+{ memw(gp+#191608) = r26 }
+#CHECK: 4c9dfa1e { memw(gp+#191608) = r26
+{ memw(#191608) = r26 }
+
+#CHECK: 4855cfdc { memh(gp+#21944) = r15
+{ memh(gp+#21944) = r15 }
+#CHECK: 4855cfdc { memh(gp+#21944) = r15
+{ memh(#21944) = r15 }
+
+#CHECK: 4a00cea2 { memb(gp+#16546) = r14
+{ memb(gp+#16546) = r14 }
+#CHECK: 4a00cea2 { memb(gp+#16546) = r14
+{ memb(#16546) = r14 }
diff --git a/test/MC/Hexagon/two-extenders.s b/test/MC/Hexagon/two-extenders.s
new file mode 100644
index 000000000000..314579270135
--- /dev/null
+++ b/test/MC/Hexagon/two-extenders.s
@@ -0,0 +1,135 @@
+# RUN: llvm-mc -arch=hexagon -filetype=obj %s | llvm-objdump -d - | FileCheck %s
+#
+
+# In packets with two extensions assembler is not extending both instructions
+#
+
+//['D_DUMMY,C4_or_or,L4_ploadrbtnew_abs,S2_storerfgp']
+{
+ if (p3) r23 = memb(##2164335510)
+ memh(##1696682668) = r28.h
+}
+# CHECK: { immext(#2164335488)
+# CHECK: if (p3) r23 = memb(##2164335510)
+# CHECK: immext(#1696682624)
+# CHECK: memh(##1696682668) = r28.h }
+
+//['D_DUMMY,C4_or_or,L4_ploadrbtnew_abs,S2_storerfgp']
+{
+ if (p3.new) r23 = memb(##2164335510)
+ p3 = or(p2,or(p3, p0))
+}
+# CHECK: { p3 = or(p2,or(p3,p0))
+# CHECK: immext(#2164335488)
+# CHECK: if (p3.new) r23 = memb(##2164335510) }
+
+
+# -------------------------- Non-extended cases:
+# -------------------------- Use GP and non GP notation
+
+R2 = memb(gp+#0x1000)
+# CHECK: { r2 = memb(gp+#4096) }
+
+R3 = memh(gp+#0x1000)
+# CHECK: { r3 = memh(gp+#4096) }
+
+r4 = memub(gp+#0x1000)
+# CHECK: { r4 = memub(gp+#4096) }
+
+r5 = memuh(gp+#0x1000)
+# CHECK: { r5 = memuh(gp+#4096) }
+
+r6 = memw(gp+#0x1000)
+# CHECK: { r6 = memw(gp+#4096) }
+
+R1:0 = memd(gp+#0x1000)
+# CHECK: { r1:0 = memd(gp+#4096) }
+
+{R25 = #1; memb(gp+#0x1000) = R25.new}
+# CHECK: { r25 = #1
+# CHECK-NEXT: memb(gp+#4096) = r25.new }
+
+{R26 = #1; memh(gp+#0x1000) = R26.new}
+# CHECK: { r26 = #1
+# CHECK-NEXT: memh(gp+#4096) = r26.new }
+
+{R27 = #1; memw(gp+#0x1000) = R27.new}
+# CHECK: { r27 = #1
+# CHECK-NEXT: memw(gp+#4096) = r27.new }
+
+memd(gp+#0x1000) = R1:0
+# CHECK: { memd(gp+#4096) = r1:0 }
+
+memb(gp+#0x1000) = R2
+# CHECK: { memb(gp+#4096) = r2 }
+
+memh(gp+#0x1000) = r3.h
+# CHECK: { memh(gp+#4096) = r3.h }
+
+memh(gp+#0x1000) = R4
+# CHECK: { memh(gp+#4096) = r4 }
+
+memw(gp+#0x1000) = R5
+# CHECK: { memw(gp+#4096) = r5 }
+
+# -------------------------- Extended cases:
+# -------------------------- Use GP and non GP notation
+
+R11:10 = memd(##0x1000)
+# CHECK: { immext(#4096)
+# CHECK-NEXT: r11:10 = memd(##4096) }
+
+R11 = memb(##0x1000)
+# CHECK: { immext(#4096)
+# CHECK-NEXT: r11 = memb(##4096) }
+
+R12 = memh(##0x1000)
+# CHECK: { immext(#4096)
+# CHECK-NEXT: r12 = memh(##4096) }
+
+r13 = memub(##0x1000)
+# CHECK: { immext(#4096)
+# CHECK-NEXT: r13 = memub(##4096) }
+
+r14 = memuh(##0x1000)
+# CHECK: { immext(#4096)
+# CHECK-NEXT: r14 = memuh(##4096) }
+
+r15 = memw(##0x1000)
+# CHECK: { immext(#4096)
+# CHECK-NEXT: r15 = memw(##4096) }
+
+{R22 = #1; memb(##0x1000) = R22.new}
+# CHECK: { r22 = #1
+# CHECK-NEXT: immext(#4096)
+# CHECK-NEXT: memb(##4096) = r22.new }
+
+{R23 = #1; memh(##0x1000) = R23.new}
+# CHECK: { r23 = #1
+# CHECK-NEXT: immext(#4096)
+# CHECK-NEXT: memh(##4096) = r23.new }
+
+{R24 = #1; memw(##0x1000) = R24.new}
+# CHECK: { r24 = #1
+# CHECK-NEXT: immext(#4096)
+# CHECK-NEXT: memw(##4096) = r24.new }
+
+memd(##0x1000) = R17:16
+# CHECK: { immext(#4096)
+# CHECK-NEXT: memd(##4096) = r17:16 }
+
+memb(##0x1000) = R18
+# CHECK: { immext(#4096)
+# CHECK-NEXT: memb(##4096) = r18 }
+
+memh(##0x1000) = r19.h
+# CHECK: { immext(#4096)
+# CHECK-NEXT: memh(##4096) = r19.h }
+
+memh(##0x1000) = R20
+# CHECK: { immext(#4096)
+# CHECK-NEXT: memh(##4096) = r20 }
+
+memw(##0x1000) = R21
+# CHECK: { immext(#4096)
+# CHECK-NEXT: memw(##4096) = r21 }
diff --git a/test/MC/Hexagon/v60-misc.s b/test/MC/Hexagon/v60-misc.s
index e16034948dc3..b278447ab100 100644
--- a/test/MC/Hexagon/v60-misc.s
+++ b/test/MC/Hexagon/v60-misc.s
@@ -14,10 +14,10 @@ if (p2) jumpr r0
# CHECK: 5361c300 { if (!p3) jumpr:nt
if (!p3) jumpr r1
-# CHECK: 1c2eceee { v14 = vxor(v14,{{ *}}v14) }
+# CHECK: 1c2eceee { v14 = vxor(v14,v14) }
v14 = #0
-# CHECK: 1c80c0a0 { v1:0.w = vsub(v1:0.w,v1:0.w) }
+# CHECK: 1c9edea0 { v1:0.w = vsub(v31:30.w,v31:30.w) }
v1:0 = #0
# CHECK: 1f42c3e0 { v1:0 = vcombine(v3,v2) }
@@ -53,7 +53,7 @@ q0 = vcmp.eq(v8.uw, v9.uw)
# CHECK: 1c8aea09 { q1 &= vcmp.eq(v10.w,v10.w) }
q1 &= vcmp.eq(v10.uw, v10.uw)
-# CHECK: 1c8ceb46 { q2 |= vcmp.eq(v11.h,v12.h) }
+# CHECK: 1c8ceb4a { q2 |= vcmp.eq(v11.w,v12.w) }
q2 |= vcmp.eq(v11.uw, v12.uw)
# CHECK: 1c8eed8b { q3 ^= vcmp.eq(v13.w,v14.w) }
diff --git a/test/MC/Hexagon/v60-vmem.s b/test/MC/Hexagon/v60-vmem.s
index fe202251ec4b..0580a1e62448 100644
--- a/test/MC/Hexagon/v60-vmem.s
+++ b/test/MC/Hexagon/v60-vmem.s
@@ -327,25 +327,25 @@
vmem(r6+#-6):nt=v16.new
}
-#CHECK: 28b1cd42 if(p1) vmem(r17+#5) = v17.new }
+#CHECK: 28b1cd42 if (p1) vmem(r17+#5) = v17.new }
{
v17 = v25
if(p1)vmem(r17+#5)=v17.new
}
-#CHECK: 28bbeb6a if(!p1) vmem(r27+#-5) = v17.new }
+#CHECK: 28bbeb6a if (!p1) vmem(r27+#-5) = v17.new }
{
v17 = v15
if(!p1)vmem(r27+#-5)=v17.new
}
-#CHECK: 28e4d252 if(p2) vmem(r4+#2):nt = v24.new }
+#CHECK: 28e4d252 if (p2) vmem(r4+#2):nt = v24.new }
{
v24 = v10
if(p2)vmem(r4+#2):nt=v24.new
}
-#CHECK: 28f8d17a if(!p2) vmem(r24+#1):nt = v4.new }
+#CHECK: 28f8d17a if (!p2) vmem(r24+#1):nt = v4.new }
{
v4 = v8
if(!p2)vmem(r24+#1):nt=v4.new
@@ -363,25 +363,25 @@
vmem(r1++#1):nt=v7.new
}
-#CHECK: 29a6d042 if(p2) vmem(r6++#0) = v11.new }
+#CHECK: 29a6d042 if (p2) vmem(r6++#0) = v11.new }
{
v11 = v13
if(p2)vmem(r6++#0)=v11.new
}
-#CHECK: 29a2cb6a if(!p1) vmem(r2++#3) = v25.new }
+#CHECK: 29a2cb6a if (!p1) vmem(r2++#3) = v25.new }
{
v25 = v17
if(!p1)vmem(r2++#3)=v25.new
}
-#CHECK: 29f5c952 if(p1) vmem(r21++#1):nt = v14.new }
+#CHECK: 29f5c952 if (p1) vmem(r21++#1):nt = v14.new }
{
v14 = v13
if(p1)vmem(r21++#1):nt=v14.new
}
-#CHECK: 29f7cd7a if(!p1) vmem(r23++#-3):nt = v1.new }
+#CHECK: 29f7cd7a if (!p1) vmem(r23++#-3):nt = v1.new }
{
v1 = v0
if(!p1)vmem(r23++#-3):nt=v1.new
@@ -399,25 +399,25 @@
vmem(r15++m0):nt=v19.new
}
-#CHECK: 2bb7f042 if(p2) vmem(r23++m1) = v6.new }
+#CHECK: 2bb7f042 if (p2) vmem(r23++m1) = v6.new }
{
v6 = v30
if(p2)vmem(r23++m1)=v6.new
}
-#CHECK: 2ba2f06a if(!p2) vmem(r2++m1) = v12.new }
+#CHECK: 2ba2f06a if (!p2) vmem(r2++m1) = v12.new }
{
v12 = v9
if(!p2)vmem(r2++m1)=v12.new
}
-#CHECK: 2be7e852 if(p1) vmem(r7++m1):nt = v3.new }
+#CHECK: 2be7e852 if (p1) vmem(r7++m1):nt = v3.new }
{
v3 = v13
if(p1)vmem(r7++m1):nt=v3.new
}
-#CHECK: 2bfdd07a if(!p2) vmem(r29++m0):nt = v29.new }
+#CHECK: 2bfdd07a if (!p2) vmem(r29++m0):nt = v29.new }
{
v29 = v9
if(!p2)vmem(r29++m0):nt=v29.new
diff --git a/test/MC/Hexagon/v62_all.s b/test/MC/Hexagon/v62_all.s
new file mode 100644
index 000000000000..6effdc0caba9
--- /dev/null
+++ b/test/MC/Hexagon/v62_all.s
@@ -0,0 +1,552 @@
+# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj %s | llvm-objdump -arch=hexagon -mcpu=hexagonv62 -d - | FileCheck %s
+
+// V6_lvsplatb
+// Vd32.b=vsplat(Rt32)
+ V0.b=vsplat(R0)
+# CHECK: 19c0c040 { v0.b = vsplat(r0) }
+
+// V6_lvsplath
+// Vd32.h=vsplat(Rt32)
+ V0.h=vsplat(R0)
+# CHECK: 19c0c020 { v0.h = vsplat(r0) }
+
+// V6_pred_scalar2v2
+// Qd4=vsetq2(Rt32)
+ Q0=vsetq2(R0)
+# CHECK: 19a0c04c { q0 = vsetq2(r0) }
+
+// V6_shuffeqh
+// Qd4.b=vshuffe(Qs4.h,Qt4.h)
+ Q0.b=vshuffe(Q0.h,Q0.h)
+# CHECK: 1e03c018 { q0.b = vshuffe(q0.h,q0.h) }
+
+// V6_shuffeqw
+// Qd4.h=vshuffe(Qs4.w,Qt4.w)
+ Q0.h=vshuffe(Q0.w,Q0.w)
+# CHECK: 1e03c01c { q0.h = vshuffe(q0.w,q0.w) }
+
+// V6_vaddbsat
+// Vd32.b=vadd(Vu32.b,Vv32.b):sat
+ V0.b=vadd(V0.b,V0.b):sat
+# CHECK: 1f00c000 { v0.b = vadd(v0.b,v0.b):sat }
+
+// V6_vaddbsat_dv
+// Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat
+ V1:0.b=vadd(V1:0.b,V1:0.b):sat
+# CHECK: 1ea0c000 { v1:0.b = vadd(v1:0.b,v1:0.b):sat }
+
+// V6_vaddcarry
+// Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry
+ V0.w=vadd(V0.w,V0.w,Q0):carry
+# CHECK: 1ca0e000 { v0.w = vadd(v0.w,v0.w,q0):carry }
+
+// V6_vaddclbh
+// $Vd.h=vadd(vclb($Vu.h),$Vv.h)
+ V0.h=vadd(vclb(V0.h),V0.h)
+# CHECK: 1f00e000 { v0.h = vadd(vclb(v0.h),v0.h) }
+
+// V6_vaddclbw
+// $Vd.w=vadd(vclb($Vu.w),$Vv.w)
+ V0.w=vadd(vclb(V0.w),V0.w)
+# CHECK: 1f00e020 { v0.w = vadd(vclb(v0.w),v0.w) }
+
+// V6_vaddhw_acc
+// Vxx32.w+=vadd(Vu32.h,Vv32.h)
+ V1:0.w+=vadd(V0.h,V0.h)
+# CHECK: 1c20e040 { v1:0.w += vadd(v0.h,v0.h) }
+
+// V6_vaddubh_acc
+// Vxx32.h+=vadd(Vu32.ub,Vv32.ub)
+ V1:0.h+=vadd(V0.ub,V0.ub)
+# CHECK: 1c40e0a0 { v1:0.h += vadd(v0.ub,v0.ub) }
+
+// V6_vaddububb_sat
+// Vd32.ub=vadd(Vu32.ub,Vv32.b):sat
+ V0.ub=vadd(V0.ub,V0.b):sat
+# CHECK: 1ea0c080 { v0.ub = vadd(v0.ub,v0.b):sat }
+
+// V6_vadduhw_acc
+// Vxx32.w+=vadd(Vu32.uh,Vv32.uh)
+ V1:0.w+=vadd(V0.uh,V0.uh)
+# CHECK: 1c40e080 { v1:0.w += vadd(v0.uh,v0.uh) }
+
+// V6_vadduwsat
+// Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat
+ V0.uw=vadd(V0.uw,V0.uw):sat
+# CHECK: 1f60c020 { v0.uw = vadd(v0.uw,v0.uw):sat }
+
+// V6_vadduwsat_dv
+// Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat
+ V1:0.uw=vadd(V1:0.uw,V1:0.uw):sat
+# CHECK: 1ea0c040 { v1:0.uw = vadd(v1:0.uw,v1:0.uw):sat }
+
+// V6_vandnqrt
+// Vd32=vand(!Qu4,Rt32)
+ V0=vand(!Q0,R0)
+# CHECK: 19a0c4a0 { v0 = vand(!q0,r0) }
+
+// V6_vandnqrt_acc
+// Vx32|=vand(!Qu4,Rt32)
+ V0|=vand(!Q0,R0)
+# CHECK: 1960e460 { v0 |= vand(!q0,r0) }
+
+// V6_vandvnqv
+// Vd32=vand(!Qv4,Vu32)
+ V0=vand(!Q0,V0)
+# CHECK: 1e03e020 { v0 = vand(!q0,v0) }
+
+// V6_vandvqv
+// Vd32=vand(Qv4,Vu32)
+ V0=vand(Q0,V0)
+# CHECK: 1e03e000 { v0 = vand(q0,v0) }
+
+// V6_vasrhbsat
+// Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat
+ V0.b=vasr(V0.h,V0.h,R0):sat
+# CHECK: 1800c000 { v0.b = vasr(v0.h,v0.h,r0):sat }
+
+// V6_vasruwuhrndsat
+// Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat
+ V0.uh=vasr(V0.uw,V0.uw,R0):rnd:sat
+# CHECK: 1800c020 { v0.uh = vasr(v0.uw,v0.uw,r0):rnd:sat }
+
+// V6_vasrwuhrndsat
+// Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
+ V0.uh=vasr(V0.w,V0.w,R0):rnd:sat
+# CHECK: 1800c040 { v0.uh = vasr(v0.w,v0.w,r0):rnd:sat }
+
+// V6_vL32b_cur_npred_ai
+// if (!Pv4) Vd32.cur=vmem(Rt32+#s4)
+ {
+ v1=v0
+ if (!P0) V0.cur=vmem(R0+#04)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2880c4a0 if (!p0) v0.cur = vmem(r0+#4) }
+
+// V6_vL32b_cur_npred_pi
+// if (!Pv4) Vd32.cur=vmem(Rx32++#s3)
+ {
+ v1=v0
+ if (!P0) V0.cur=vmem(R0++#03)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2980c3a0 if (!p0) v0.cur = vmem(r0++#3) }
+
+// V6_vL32b_cur_npred_ppu
+// if (!Pv4) Vd32.cur=vmem(Rx32++Mu2)
+ {
+ v1=v0
+ if (!P0) V0.cur=vmem(R0++M0)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2b80c0a0 if (!p0) v0.cur = vmem(r0++m0) }
+
+// V6_vL32b_cur_pred_ai
+// if (Pv4) Vd32.cur=vmem(Rt32+#s4)
+ {
+ v1=v0
+ if (P0) V0.cur=vmem(R0+#04)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2880c480 if (p0) v0.cur = vmem(r0+#4) }
+
+// V6_vL32b_cur_pred_pi
+// if (Pv4) Vd32.cur=vmem(Rx32++#s3)
+ {
+ v1=v0
+ if (P0) V0.cur=vmem(R0++#03)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2980c380 if (p0) v0.cur = vmem(r0++#3) }
+
+// V6_vL32b_cur_pred_ppu
+// if (Pv4) Vd32.cur=vmem(Rx32++Mu2)
+ {
+ v1=v0
+ if (P0) V0.cur=vmem(R0++M0)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2b80c080 if (p0) v0.cur = vmem(r0++m0) }
+
+// V6_vL32b_npred_ai
+// if (!Pv4) Vd32=vmem(Rt32+#s4)
+ if (!P0) V0=vmem(R0+#04)
+# CHECK: 2880c460 { if (!p0) v0 = vmem(r0+#4) }
+
+// V6_vL32b_npred_pi
+// if (!Pv4) Vd32=vmem(Rx32++#s3)
+ if (!P0) V0=vmem(R0++#03)
+# CHECK: 2980c360 { if (!p0) v0 = vmem(r0++#3) }
+
+// V6_vL32b_npred_ppu
+// if (!Pv4) Vd32=vmem(Rx32++Mu2)
+ if (!P0) V0=vmem(R0++M0)
+# CHECK: 2b80c060 { if (!p0) v0 = vmem(r0++m0) }
+
+// V6_vL32b_nt_cur_npred_ai
+// if (!Pv4) Vd32.cur=vmem(Rt32+#s4):nt
+ {
+ v1=v0
+ if (!P0) V0.cur=vmem(R0+#04):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 28c0c4a0 if (!p0) v0.cur = vmem(r0+#4):nt }
+
+// V6_vL32b_nt_cur_npred_pi
+// if (!Pv4) Vd32.cur=vmem(Rx32++#s3):nt
+ {
+ v1=v0
+ if (!P0) V0.cur=vmem(R0++#03):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 29c0c3a0 if (!p0) v0.cur = vmem(r0++#3):nt }
+
+// V6_vL32b_nt_cur_npred_ppu
+// if (!Pv4) Vd32.cur=vmem(Rx32++Mu2):nt
+ {
+ v1=v0
+ if (!P0) V0.cur=vmem(R0++M0):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2bc0c0a0 if (!p0) v0.cur = vmem(r0++m0):nt }
+
+// V6_vL32b_nt_cur_pred_ai
+// if (Pv4) Vd32.cur=vmem(Rt32+#s4):nt
+ {
+ v1=v0
+ if (P0) V0.cur=vmem(R0+#04):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 28c0c480 if (p0) v0.cur = vmem(r0+#4):nt }
+
+// V6_vL32b_nt_cur_pred_pi
+// if (Pv4) Vd32.cur=vmem(Rx32++#s3):nt
+ {
+ v1=v0
+ if (P0) V0.cur=vmem(R0++#03):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 29c0c380 if (p0) v0.cur = vmem(r0++#3):nt }
+
+// V6_vL32b_nt_cur_pred_ppu
+// if (Pv4) Vd32.cur=vmem(Rx32++Mu2):nt
+ {
+ v1=v0
+ if (P0) V0.cur=vmem(R0++M0):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2bc0c080 if (p0) v0.cur = vmem(r0++m0):nt }
+
+// V6_vL32b_nt_npred_ai
+// if (!Pv4) Vd32=vmem(Rt32+#s4):nt
+ if (!P0) V0=vmem(R0+#04):nt
+# CHECK: 28c0c460 { if (!p0) v0 = vmem(r0+#4):nt }
+
+// V6_vL32b_nt_npred_pi
+// if (!Pv4) Vd32=vmem(Rx32++#s3):nt
+ if (!P0) V0=vmem(R0++#03):nt
+# CHECK: 29c0c360 { if (!p0) v0 = vmem(r0++#3):nt }
+
+// V6_vL32b_nt_npred_ppu
+// if (!Pv4) Vd32=vmem(Rx32++Mu2):nt
+ if (!P0) V0=vmem(R0++M0):nt
+# CHECK: 2bc0c060 { if (!p0) v0 = vmem(r0++m0):nt }
+
+// V6_vL32b_nt_pred_ai
+// if (Pv4) Vd32=vmem(Rt32+#s4):nt
+ if (P0) V0=vmem(R0+#04):nt
+# CHECK: 28c0c440 { if (p0) v0 = vmem(r0+#4):nt }
+
+// V6_vL32b_nt_pred_pi
+// if (Pv4) Vd32=vmem(Rx32++#s3):nt
+ if (P0) V0=vmem(R0++#03):nt
+# CHECK: 29c0c340 { if (p0) v0 = vmem(r0++#3):nt }
+
+// V6_vL32b_nt_pred_ppu
+// if (Pv4) Vd32=vmem(Rx32++Mu2):nt
+ if (P0) V0=vmem(R0++M0):nt
+# CHECK: 2bc0c040 { if (p0) v0 = vmem(r0++m0):nt }
+
+// V6_vL32b_nt_tmp_npred_ai
+// if (!Pv4) Vd32.tmp=vmem(Rt32+#s4):nt
+ {
+ v1=v0
+ if (!P0) V0.tmp=vmem(R0+#04):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 28c0c4e0 if (!p0) v0.tmp = vmem(r0+#4):nt }
+
+// V6_vL32b_nt_tmp_npred_pi
+// if (!Pv4) Vd32.tmp=vmem(Rx32++#s3):nt
+ {
+ v1=v0
+ if (!P0) V0.tmp=vmem(R0++#03):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 29c0c3e0 if (!p0) v0.tmp = vmem(r0++#3):nt }
+
+// V6_vL32b_nt_tmp_npred_ppu
+// if (!Pv4) Vd32.tmp=vmem(Rx32++Mu2):nt
+ {
+ v1=v0
+ if (!P0) V0.tmp=vmem(R0++M0):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2bc0c0e0 if (!p0) v0.tmp = vmem(r0++m0):nt }
+
+// V6_vL32b_nt_tmp_pred_ai
+// if (Pv4) Vd32.tmp=vmem(Rt32+#s4):nt
+ {
+ v1=v0
+ if (P0) V0.tmp=vmem(R0+#04):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 28c0c4c0 if (p0) v0.tmp = vmem(r0+#4):nt }
+
+// V6_vL32b_nt_tmp_pred_pi
+// if (Pv4) Vd32.tmp=vmem(Rx32++#s3):nt
+ {
+ v1=v0
+ if (P0) V0.tmp=vmem(R0++#03):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 29c0c3c0 if (p0) v0.tmp = vmem(r0++#3):nt }
+
+// V6_vL32b_nt_tmp_pred_ppu
+// if (Pv4) Vd32.tmp=vmem(Rx32++Mu2):nt
+ {
+ v1=v0
+ if (P0) V0.tmp=vmem(R0++M0):nt
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2bc0c0c0 if (p0) v0.tmp = vmem(r0++m0):nt }
+
+// V6_vL32b_pred_ai
+// if (Pv4) Vd32=vmem(Rt32+#s4)
+ if (P0) V0=vmem(R0+#04)
+# CHECK: 2880c440 { if (p0) v0 = vmem(r0+#4) }
+
+// V6_vL32b_pred_pi
+// if (Pv4) Vd32=vmem(Rx32++#s3)
+ if (P0) V0=vmem(R0++#03)
+# CHECK: 2980c340 { if (p0) v0 = vmem(r0++#3) }
+
+// V6_vL32b_pred_ppu
+// if (Pv4) Vd32=vmem(Rx32++Mu2)
+ if (P0) V0=vmem(R0++M0)
+# CHECK: 2b80c040 { if (p0) v0 = vmem(r0++m0) }
+
+// V6_vL32b_tmp_npred_ai
+// if (!Pv4) Vd32.tmp=vmem(Rt32+#s4)
+ {
+ v1=v0
+ if (!P0) V0.tmp=vmem(R0+#04)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2880c4e0 if (!p0) v0.tmp = vmem(r0+#4) }
+
+// V6_vL32b_tmp_npred_pi
+// if (!Pv4) Vd32.tmp=vmem(Rx32++#s3)
+ {
+ v1=v0
+ if (!P0) V0.tmp=vmem(R0++#03)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2980c3e0 if (!p0) v0.tmp = vmem(r0++#3) }
+
+// V6_vL32b_tmp_npred_ppu
+// if (!Pv4) Vd32.tmp=vmem(Rx32++Mu2)
+ {
+ v1=v0
+ if (!P0) V0.tmp=vmem(R0++M0)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2b80c0e0 if (!p0) v0.tmp = vmem(r0++m0) }
+
+// V6_vL32b_tmp_pred_ai
+// if (Pv4) Vd32.tmp=vmem(Rt32+#s4)
+ {
+ v1=v0
+ if (P0) V0.tmp=vmem(R0+#04)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2880c4c0 if (p0) v0.tmp = vmem(r0+#4) }
+
+// V6_vL32b_tmp_pred_pi
+// if (Pv4) Vd32.tmp=vmem(Rx32++#s3)
+ {
+ v1=v0
+ if (P0) V0.tmp=vmem(R0++#03)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2980c3c0 if (p0) v0.tmp = vmem(r0++#3) }
+
+// V6_vL32b_tmp_pred_ppu
+// if (Pv4) Vd32.tmp=vmem(Rx32++Mu2)
+ {
+ v1=v0
+ if (P0) V0.tmp=vmem(R0++M0)
+ }
+# CHECK: 1e0360e1 { v1 = v0
+# CHECK: 2b80c0c0 if (p0) v0.tmp = vmem(r0++m0) }
+
+// V6_vlsrb
+// Vd32.ub=vlsr(Vu32.ub,Rt32)
+ V0.ub=vlsr(V0.ub,R0)
+# CHECK: 1980c060 { v0.ub = vlsr(v0.ub,r0) }
+
+// V6_vlutvvbi
+// Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)
+ V0.b=vlut32(V0.b,V0.b,#03)
+# CHECK: 1e20c060 { v0.b = vlut32(v0.b,v0.b,#3) }
+
+// V6_vlutvvb_nm
+// Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch
+ V0.b=vlut32(V0.b,V0.b,R0):nomatch
+# CHECK: 1800c060 { v0.b = vlut32(v0.b,v0.b,r0):nomatch }
+
+// V6_vlutvvb_oracci
+// Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)
+ V0.b|=vlut32(V0.b,V0.b,#03)
+# CHECK: 1cc0e060 { v0.b |= vlut32(v0.b,v0.b,#3) }
+
+// V6_vlutvwhi
+// Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)
+ V1:0.h=vlut16(V0.b,V0.h,#03)
+# CHECK: 1e60c060 { v1:0.h = vlut16(v0.b,v0.h,#3) }
+
+// V6_vlutvwh_nm
+// Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch
+ V1:0.h=vlut16(V0.b,V0.h,R0):nomatch
+# CHECK: 1800c080 { v1:0.h = vlut16(v0.b,v0.h,r0):nomatch }
+
+// V6_vlutvwh_oracci
+// Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)
+ V1:0.h|=vlut16(V0.b,V0.h,#03)
+# CHECK: 1ce0e060 { v1:0.h |= vlut16(v0.b,v0.h,#3) }
+
+// V6_vmaxb
+// Vd32.b=vmax(Vu32.b,Vv32.b)
+ V0.b=vmax(V0.b,V0.b)
+# CHECK: 1f20c0a0 { v0.b = vmax(v0.b,v0.b) }
+
+// V6_vminb
+// Vd32.b=vmin(Vu32.b,Vv32.b)
+ V0.b=vmin(V0.b,V0.b)
+# CHECK: 1f20c080 { v0.b = vmin(v0.b,v0.b) }
+
+// V6_vmpauhb
+// Vdd32.w=vmpa(Vuu32.uh,Rt32.b)
+ V1:0.w=vmpa(V1:0.uh,R0.b)
+# CHECK: 1980c0a0 { v1:0.w = vmpa(v1:0.uh,r0.b) }
+
+// V6_vmpauhb_acc
+// Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)
+ V1:0.w+=vmpa(V1:0.uh,R0.b)
+# CHECK: 1980e040 { v1:0.w += vmpa(v1:0.uh,r0.b) }
+
+// V6_vmpyewuh_64
+// Vdd32=vmpye(Vu32.w,Vv32.uh)
+ V1:0=vmpye(V0.w,V0.uh)
+# CHECK: 1ea0c0c0 { v1:0 = vmpye(v0.w,v0.uh) }
+
+// V6_vmpyiwub
+// Vd32.w=vmpyi(Vu32.w,Rt32.ub)
+ V0.w=vmpyi(V0.w,R0.ub)
+# CHECK: 1980c0c0 { v0.w = vmpyi(v0.w,r0.ub) }
+
+// V6_vmpyiwub_acc
+// Vx32.w+=vmpyi(Vu32.w,Rt32.ub)
+ V0.w+=vmpyi(V0.w,R0.ub)
+# CHECK: 1980e020 { v0.w += vmpyi(v0.w,r0.ub) }
+
+// V6_vmpyowh_64_acc
+// Vxx32+=vmpyo(Vu32.w,Vv32.h)
+ V1:0+=vmpyo(V0.w,V0.h)
+# CHECK: 1c20e060 { v1:0 += vmpyo(v0.w,v0.h) }
+
+// V6_vrounduhub
+// Vd32.ub=vround(Vu32.uh,Vv32.uh):sat
+ V0.ub=vround(V0.uh,V0.uh):sat
+# CHECK: 1fe0c060 { v0.ub = vround(v0.uh,v0.uh):sat }
+
+// V6_vrounduwuh
+// Vd32.uh=vround(Vu32.uw,Vv32.uw):sat
+ V0.uh=vround(V0.uw,V0.uw):sat
+# CHECK: 1fe0c080 { v0.uh = vround(v0.uw,v0.uw):sat }
+
+// V6_vsatuwuh
+// Vd32.uh=vsat(Vu32.uw,Vv32.uw)
+ V0.uh=vsat(V0.uw,V0.uw)
+# CHECK: 1f20c0c0 { v0.uh = vsat(v0.uw,v0.uw) }
+
+// V6_vsubbsat
+// Vd32.b=vsub(Vu32.b,Vv32.b):sat
+ V0.b=vsub(V0.b,V0.b):sat
+# CHECK: 1f20c040 { v0.b = vsub(v0.b,v0.b):sat }
+
+// V6_vsubbsat_dv
+// Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat
+ V1:0.b=vsub(V1:0.b,V1:0.b):sat
+# CHECK: 1ea0c020 { v1:0.b = vsub(v1:0.b,v1:0.b):sat }
+
+// V6_vsubcarry
+// Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry
+ V0.w=vsub(V0.w,V0.w,Q0):carry
+# CHECK: 1ca0e080 { v0.w = vsub(v0.w,v0.w,q0):carry }
+
+// V6_vsubububb_sat
+// Vd32.ub=vsub(Vu32.ub,Vv32.b):sat
+ V0.ub=vsub(V0.ub,V0.b):sat
+# CHECK: 1ea0c0a0 { v0.ub = vsub(v0.ub,v0.b):sat }
+
+// V6_vsubuwsat
+// Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat
+ V0.uw=vsub(V0.uw,V0.uw):sat
+# CHECK: 1fc0c080 { v0.uw = vsub(v0.uw,v0.uw):sat }
+
+// V6_vsubuwsat_dv
+// Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat
+ V1:0.uw=vsub(V1:0.uw,V1:0.uw):sat
+# CHECK: 1ea0c060 { v1:0.uw = vsub(v1:0.uw,v1:0.uw):sat }
+
+// V6_vwhist128
+// vwhist128
+ vwhist128
+# CHECK: 1e00e480 { vwhist128 }
+
+// V6_vwhist128m
+// vwhist128(#u1)
+ vwhist128(#01)
+# CHECK: 1e00e780 { vwhist128(#1) }
+
+// V6_vwhist128q
+// vwhist128(Qv4)
+ vwhist128(Q0)
+# CHECK: 1e02e480 { vwhist128(q0) }
+
+// V6_vwhist128qm
+// vwhist128(Qv4,#u1)
+ vwhist128(Q0,#01)
+# CHECK: 1e02e780 { vwhist128(q0,#1) }
+
+// V6_vwhist256
+// vwhist256
+ vwhist256
+# CHECK: 1e00e280 { vwhist256 }
+
+// V6_vwhist256q
+// vwhist256(Qv4)
+ vwhist256(Q0)
+# CHECK: 1e02e280 { vwhist256(q0) }
+
+// V6_vwhist256q_sat
+// vwhist256(Qv4):sat
+ vwhist256(Q0):sat
+# CHECK: 1e02e380 { vwhist256(q0):sat }
+
+// V6_vwhist256_sat
+// vwhist256:sat
+ vwhist256:sat
+# CHECK: 1e00e380 { vwhist256:sat }
diff --git a/test/MC/Hexagon/v62_jumps.s b/test/MC/Hexagon/v62_jumps.s
new file mode 100644
index 000000000000..0197ecdd2321
--- /dev/null
+++ b/test/MC/Hexagon/v62_jumps.s
@@ -0,0 +1,13 @@
+# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj %s | llvm-objdump -arch=hexagon -mcpu=hexagonv62 -d - | FileCheck %s
+
+# verify compound is split into single instructions if needed
+{
+ p0=cmp.eq(R1:0,R3:2)
+ if (!p0.new) jump:nt ltmp
+ r0=r1 ; jump ltmp
+}
+
+# CHECK: 5c204800 { if (!p0.new) jump:nt
+# CHECK: d2804200 p0 = cmp.eq(r1:0,r3:2)
+# CHECK: 58004000 jump
+# CHECK: 7061c000 r0 = r1 }
diff --git a/test/MC/Hexagon/v62a.s b/test/MC/Hexagon/v62a.s
new file mode 100644
index 000000000000..4cc6302f6fab
--- /dev/null
+++ b/test/MC/Hexagon/v62a.s
@@ -0,0 +1,19 @@
+# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj -o - %s | llvm-objdump -arch=hexagon -arch=hexagon -mcpu=hexagonv62 -d - | FileCheck %s
+
+ r31:30=vabsdiffb(r29:28, r27:26)
+# CHECK: e8fadc1e { r31:30 = vabsdiffb(r29:28,r27:26)
+
+ r25:24=vabsdiffub(r23:22, r21:20)
+# CHECK: e8b4d618 { r25:24 = vabsdiffub(r23:22,r21:20)
+
+ r19:18,p3=vminub(r17:16, r15:14)
+# CHECK: eaeed072 { r19:18,p3 = vminub(r17:16,r15:14)
+
+ r13:12=vtrunehb(r11:10, r9:8)
+# CHECK: c18ac86c { r13:12 = vtrunehb(r11:10,r9:8)
+
+ r7:6=vtrunohb(r5:4, r3:2)
+# CHECK: c184c2a6 { r7:6 = vtrunohb(r5:4,r3:2)
+
+ r1:0=vsplatb(r31)
+# CHECK: 845fc080 { r1:0 = vsplatb(r31)
diff --git a/test/MC/Hexagon/v62a_regs.s b/test/MC/Hexagon/v62a_regs.s
new file mode 100644
index 000000000000..2d31b837afd4
--- /dev/null
+++ b/test/MC/Hexagon/v62a_regs.s
@@ -0,0 +1,44 @@
+# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj %s | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-V62
+# RUN: not llvm-mc -arch=hexagon -mcpu=hexagonv60 -filetype=asm %s 2>%t; FileCheck -check-prefix=CHECK-NOV62 %s < %t
+#
+
+# Assure that v62 added registers are understood
+
+r0=framelimit
+r0=framekey
+r1:0=c17:16
+
+# CHECK-V62: 6a10c000 { r0 = framelimit }
+# CHECK-V62: 6a11c000 { r0 = framekey }
+# CHECK-V62: 6810c000 { r1:0 = c17:16 }
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+
+r0=pktcountlo
+r0=pktcounthi
+r1:0=c19:18
+r1:0=pktcount
+
+# CHECK-V62: 6a12c000 { r0 = pktcountlo }
+# CHECK-V62: 6a13c000 { r0 = pktcounthi }
+# CHECK-V62: 6812c000 { r1:0 = c19:18 }
+# CHECK-V62: 6812c000 { r1:0 = c19:18 }
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+
+r0=utimerlo
+r0=utimerhi
+r1:0=c31:30
+r1:0=UTIMER
+
+# CHECK-V62: 6a1ec000 { r0 = utimerlo }
+# CHECK-V62: 6a1fc000 { r0 = utimerhi }
+# CHECK-V62: 681ec000 { r1:0 = c31:30 }
+# CHECK-V62: 681ec000 { r1:0 = c31:30 }
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
+# CHECK-NOV62: rror: invalid operand for instruction
diff --git a/test/MC/MachO/darwin-version-min-load-command.s b/test/MC/MachO/darwin-version-min-load-command.s
index 17f3784d6326..7fd4daa40a09 100644
--- a/test/MC/MachO/darwin-version-min-load-command.s
+++ b/test/MC/MachO/darwin-version-min-load-command.s
@@ -26,3 +26,10 @@
// CHECK-TVOS: cmd LC_VERSION_MIN_TVOS
// CHECK-TVOS-NEXT: cmdsize 16
// CHECK-TVOS-NEXT: version 8.0
+
+// CHECK-BRIDGEOS: cmd LC_BUILD_VERSION
+// CHECK-BRIDGEOS-NEXT: cmdsize 24
+// CHECK-BRIDGEOS-NEXT: platform bridgeos
+// CHECK-BRIDGEOS-NEXT: sdk n/a
+// CHECK-BRIDGEOS-NEXT: minos 2.0
+// CHECK-BRIDGEOS-NEXT: ntools 0
diff --git a/test/MC/Mips/bopt-directive.s b/test/MC/Mips/bopt-directive.s
new file mode 100644
index 000000000000..63e2a05281a8
--- /dev/null
+++ b/test/MC/Mips/bopt-directive.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc -arch=mips -mcpu=mips32 %s 2>&1 | FileCheck %s
+
+# We don't support the bopt option in the integrated assembler. Given it's
+# single pass nature, it would be quite difficult to implement currently.
+
+# Ensure we parse the bopt & nobopt directives and warn in the bopt case.
+
+# CHECK: warning: 'bopt' feature is unsupported
+# CHECK: nop
+.text
+f:
+.set bopt
+g:
+.set nobopt
+nop
+
diff --git a/test/MC/Mips/branch-pseudos-bad.s b/test/MC/Mips/branch-pseudos-bad.s
index 3a0193b2e94b..f2fa74fdcee0 100644
--- a/test/MC/Mips/branch-pseudos-bad.s
+++ b/test/MC/Mips/branch-pseudos-bad.s
@@ -20,6 +20,10 @@ local_label:
bgtu $7, $8, local_label
# CHECK: :[[@LINE-1]]:3: error: pseudo-instruction requires $at, which is not available
+ beql $7, 256, local_label
+# CHECK: :[[@LINE-1]]:3: error: pseudo-instruction requires $at, which is not available
+ bnel $7, 256, local_label
+# CHECK: :[[@LINE-1]]:3: error: pseudo-instruction requires $at, which is not available
bltl $7, $8, local_label
# CHECK: :[[@LINE-1]]:3: error: pseudo-instruction requires $at, which is not available
bltul $7, $8, local_label
diff --git a/test/MC/Mips/elf-debug-section.s b/test/MC/Mips/elf-debug-section.s
new file mode 100644
index 000000000000..6cc901bcb59e
--- /dev/null
+++ b/test/MC/Mips/elf-debug-section.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -filetype=obj -triple=mips-linux-gnu -g %s -o - \
+# RUN: | llvm-readobj -s | FileCheck %s
+
+# CHECK: Section {
+# CHECK: Name: .debug_line
+# CHECK-NEXT: Type: SHT_MIPS_DWARF (0x7000001E)
diff --git a/test/MC/Mips/elf_eflags.s b/test/MC/Mips/elf_eflags.s
index 244b07db25fa..b53528c967c3 100644
--- a/test/MC/Mips/elf_eflags.s
+++ b/test/MC/Mips/elf_eflags.s
@@ -2,26 +2,48 @@
# corresponding options (-mcpu=mips32 -> -mips32 for example).
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r6 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R6 %s
-# MIPSEL-MIPS64R6: Flags [ (0xA0000406)
+# MIPSEL-MIPS64R6: Flags [ (0xA0000404)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r6 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R6-NAN2008 %s
-# MIPSEL-MIPS64R6-NAN2008: Flags [ (0xA0000406)
+# MIPSEL-MIPS64R6-NAN2008: Flags [ (0xA0000404)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r2 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2 %s
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r3 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2 %s
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r5 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2 %s
-# MIPSEL-MIPS64R2: Flags [ (0x80000006)
+# MIPSEL-MIPS64R2: Flags [ (0x80000004)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r2 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008 %s
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r3 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008 %s
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r5 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008 %s
-# MIPSEL-MIPS64R2-NAN2008: Flags [ (0x80000406)
+# MIPSEL-MIPS64R2-NAN2008: Flags [ (0x80000404)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64 %s
-# MIPSEL-MIPS64: Flags [ (0x60000006)
+# MIPSEL-MIPS64: Flags [ (0x60000004)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64-NAN2008 %s
-# MIPSEL-MIPS64-NAN2008: Flags [ (0x60000406)
+# MIPSEL-MIPS64-NAN2008: Flags [ (0x60000404)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r6 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R6-PIC %s
+# MIPSEL-MIPS64R6-PIC: Flags [ (0xA0000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r6 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R6-NAN2008-PIC %s
+# MIPSEL-MIPS64R6-NAN2008-PIC: Flags [ (0xA0000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r2 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-PIC %s
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r3 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-PIC %s
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r5 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-PIC %s
+# MIPSEL-MIPS64R2-PIC: Flags [ (0x80000006)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r2 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008-PIC %s
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r3 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008-PIC %s
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64r5 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008-PIC %s
+# MIPSEL-MIPS64R2-NAN2008-PIC: Flags [ (0x80000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64-PIC %s
+# MIPSEL-MIPS64-PIC: Flags [ (0x60000006)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=mips64 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64-NAN2008-PIC %s
+# MIPSEL-MIPS64-NAN2008-PIC: Flags [ (0x60000406)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32r6 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32R6 %s
# MIPSEL-MIPS32R6: Flags [ (0x90001404)
@@ -59,16 +81,28 @@
# MIPS64EL-MIPS64-N32-NAN2008: Flags [ (0x60000424)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N64 %s
-# MIPS64EL-MIPS64R2-N64: Flags [ (0x80000006)
+# MIPS64EL-MIPS64R2-N64: Flags [ (0x80000004)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N64-NAN2008 %s
-# MIPS64EL-MIPS64R2-N64-NAN2008: Flags [ (0x80000406)
+# MIPS64EL-MIPS64R2-N64-NAN2008: Flags [ (0x80000404)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -target-abi n64 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N64 %s
-# MIPS64EL-MIPS64-N64: Flags [ (0x60000006)
+# MIPS64EL-MIPS64-N64: Flags [ (0x60000004)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -target-abi n64 -mattr=+nan2008 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N64-NAN2008 %s
-# MIPS64EL-MIPS64-N64-NAN2008: Flags [ (0x60000406)
+# MIPS64EL-MIPS64-N64-NAN2008: Flags [ (0x60000404)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64r2 -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N64-PIC %s
+# MIPS64EL-MIPS64R2-N64-PIC: Flags [ (0x80000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64r2 -target-abi n64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N64-NAN2008-PIC %s
+# MIPS64EL-MIPS64R2-N64-NAN2008-PIC: Flags [ (0x80000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64 %s -target-abi n64 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N64-PIC %s
+# MIPS64EL-MIPS64-N64-PIC: Flags [ (0x60000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64 %s -target-abi n64 -mattr=+nan2008 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N64-NAN2008-PIC %s
+# MIPS64EL-MIPS64-N64-NAN2008-PIC: Flags [ (0x60000406)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -target-abi o32 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-O32 %s
# MIPS64EL-MIPS64R2-O32: Flags [ (0x80001104)
@@ -77,22 +111,40 @@
# MIPS64EL-MIPS64R2-O32-NAN2008: Flags [ (0x80001504)
# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips5 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5 %s
-# MIPS5: Flags [ (0x40000006)
+# MIPS5: Flags [ (0x40000004)
- # RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips5 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5-NAN2008 %s
-# MIPS5-NAN2008: Flags [ (0x40000406)
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips5 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5-NAN2008 %s
+# MIPS5-NAN2008: Flags [ (0x40000404)
# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips4 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4 %s
-# MIPS4: Flags [ (0x30000006)
+# MIPS4: Flags [ (0x30000004)
- # RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips4 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4-NAN2008 %s
-# MIPS4-NAN2008: Flags [ (0x30000406)
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips4 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4-NAN2008 %s
+# MIPS4-NAN2008: Flags [ (0x30000404)
# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips3 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3 %s
-# MIPS3: Flags [ (0x20000006)
+# MIPS3: Flags [ (0x20000004)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips3 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3-NAN2008 %s
+# MIPS3-NAN2008: Flags [ (0x20000404)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -position-independent -mcpu=mips5 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5-PIC %s
+# MIPS5-PIC: Flags [ (0x40000006)
- # RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips3 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3-NAN2008 %s
-# MIPS3-NAN2008: Flags [ (0x20000406)
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -position-independent -mcpu=mips5 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5-NAN2008-PIC %s
+# MIPS5-NAN2008-PIC: Flags [ (0x40000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -position-independent -mcpu=mips4 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4-PIC %s
+# MIPS4-PIC: Flags [ (0x30000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -position-independent -mcpu=mips4 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4-NAN2008-PIC %s
+# MIPS4-NAN2008-PIC: Flags [ (0x30000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -position-independent -mcpu=mips3 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3-PIC %s
+# MIPS3-PIC: Flags [ (0x20000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -position-independent -mcpu=mips3 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3-NAN2008-PIC %s
+# MIPS3-NAN2008-PIC: Flags [ (0x20000406)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS2 %s
# MIPSEL-MIPS2: Flags [ (0x10001004)
@@ -103,7 +155,7 @@
# RUN: llvm-mc -filetype=obj -triple mips-unknown-linux -mcpu=mips1 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS1 %s
# MIPS1: Flags [ (0x1004)
- # RUN: llvm-mc -filetype=obj -triple mips-unknown-linux -mcpu=mips1 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS1-NAN2008 %s
+# RUN: llvm-mc -filetype=obj -triple mips-unknown-linux -mcpu=mips1 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS1-NAN2008 %s
# MIPS1-NAN2008: Flags [ (0x1404)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -target-abi o32 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-O32 %s
@@ -114,17 +166,34 @@
# Default ABI for MIPS64 is N64 as opposed to GCC/GAS (N32)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2 %s
-# MIPS64EL-MIPS64R2: Flags [ (0x80000006)
+# MIPS64EL-MIPS64R2: Flags [ (0x80000004)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-NAN2008 %s
-# MIPS64EL-MIPS64R2-NAN2008: Flags [ (0x80000406)
+# MIPS64EL-MIPS64R2-NAN2008: Flags [ (0x80000404)
# Default ABI for MIPS64 is N64 as opposed to GCC/GAS (N32)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64 %s
-# MIPS64EL-MIPS64: Flags [ (0x60000006)
+# MIPS64EL-MIPS64: Flags [ (0x60000004)
# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-NAN2008 %s
-# MIPS64EL-MIPS64-NAN2008: Flags [ (0x60000406)
+# MIPS64EL-MIPS64-NAN2008: Flags [ (0x60000404)
+
+# Default ABI for MIPS64 is N64 as opposed to GCC/GAS (N32)
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64r2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-PIC %s
+# MIPS64EL-MIPS64R2-PIC: Flags [ (0x80000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64r2 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-NAN2008-PIC %s
+# MIPS64EL-MIPS64R2-NAN2008-PIC: Flags [ (0x80000406)
+
+# Default ABI for MIPS64 is N64 as opposed to GCC/GAS (N32)
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-PIC %s
+# MIPS64EL-MIPS64-PIC: Flags [ (0x60000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -position-independent -mcpu=mips64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-NAN2008-PIC %s
+# MIPS64EL-MIPS64-NAN2008-PIC: Flags [ (0x60000406)
# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=octeon -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-OCTEON %s
-# MIPSEL-OCTEON: Flags [ (0x808B0006)
+# MIPSEL-OCTEON: Flags [ (0x808B0004)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -position-independent -mcpu=octeon -target-abi n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-OCTEON-PIC %s
+# MIPSEL-OCTEON-PIC: Flags [ (0x808B0006)
diff --git a/test/MC/Mips/end-directive.s b/test/MC/Mips/end-directive.s
new file mode 100644
index 000000000000..b2959409e661
--- /dev/null
+++ b/test/MC/Mips/end-directive.s
@@ -0,0 +1,22 @@
+# RUN: llvm-mc -arch=mips -mcpu=mips32 -filetype=obj %s -o - | \
+# RUN: llvm-readobj -symbols | FileCheck %s
+
+# Check that the assembler doesn't choke on .align between a symbol and the
+# .end directive.
+
+ .text
+ .globl a
+ .p2align 2
+ .type a,@function
+ .ent a
+a:
+ addu $2, $5, $4
+ .align 4
+ jr $ra
+ .end a
+$func_end0:
+ .size a, ($func_end0)-a
+
+# CHECK: Name: a
+# CHECK-NEXT: Value: 0x0
+# CHECK-NEXT: Size: 24
diff --git a/test/MC/Mips/expansion-j-sym-pic.s b/test/MC/Mips/expansion-j-sym-pic.s
index b22d60ca4f2f..3c0f5ea7c4d6 100644
--- a/test/MC/Mips/expansion-j-sym-pic.s
+++ b/test/MC/Mips/expansion-j-sym-pic.s
@@ -87,11 +87,11 @@ local_label:
# MICRO: b .text # encoding: [0x94,0x00,A,A]
# MICRO: # fixup A - offset: 0, value: .text, kind: fixup_MICROMIPS_PC16_S1
-# ELF-O32: 10 00 ff ff b 0
-# ELF-O32-NEXT: 00000018: R_MIPS_PC16 .text
+# ELF-O32: 10 00 ff f9 b -24 <local_label>
+# ELF-O32-NEXT: 00 00 00 00 nop
-# ELF-NXX: 10 00 00 00 b 4
-# ELF-NXX-NEXT: R_MIPS_PC16/R_MIPS_NONE/R_MIPS_NONE .text
+# ELF-NXX: 10 00 ff f9 b -24 <local_label>
+# ELF-NXX-NEXT: 00 00 00 00 nop
j 1f
nop
diff --git a/test/MC/Mips/expansion-jal-sym-pic.s b/test/MC/Mips/expansion-jal-sym-pic.s
index d188101d66e2..116d1eb15b34 100644
--- a/test/MC/Mips/expansion-jal-sym-pic.s
+++ b/test/MC/Mips/expansion-jal-sym-pic.s
@@ -151,37 +151,35 @@ local_label:
jal .text
nop
-# FIXME: The .text section MCSymbol isn't created when printing assembly. However,
-# it is created when generating an ELF object file.
# Expanding "jal .text":
-# O32-FIXME: lw $25, %call16(.text)($gp) # encoding: [0x8f,0x99,A,A]
-# O32-FIXME: # fixup A - offset: 0, value: %got(.text), kind: fixup_Mips_GOT_CALL
+# O32: lw $25, %got(.text)($gp) # encoding: [0x8f,0x99,A,A]
+# O32-NEXT: # fixup A - offset: 0, value: %got(.text), kind: fixup_Mips_GOT
# ELF-O32: 8f 99 00 00 lw $25, 0($gp)
-# ELF-O32-NEXT: R_MIPS_CALL16 .text
+# ELF-O32-NEXT: R_MIPS_GOT16 .text
-# N32-FIXME: lw $25, %call16(.text)($gp) # encoding: [0x8f,0x99,A,A]
-# N32-FIXME: # fixup A - offset: 0, value: %call16(.text), kind: fixup_Mips_GOT_DISP
+# N32: lw $25, %got_disp(.text)($gp) # encoding: [0x8f,0x99,A,A]
+# N32-NEXT: # fixup A - offset: 0, value: %got_disp(.text), kind: fixup_Mips_GOT_DISP
# ELF-N32: 8f 99 00 00 lw $25, 0($gp)
-# ELF-N32-NEXT: R_MIPS_CALL16/R_MIPS_NONE/R_MIPS_NONE .text
+# ELF-N32-NEXT: R_MIPS_GOT_DISP/R_MIPS_NONE/R_MIPS_NONE .text
-# N64-FIXME: ld $25, %call16(.text)($gp) # encoding: [0xdf,0x99,A,A]
-# N64-FIXME: # fixup A - offset: 0, value: %call16(.text), kind: fixup_Mips_GOT_DISP
+# N64: ld $25, %got_disp(.text)($gp) # encoding: [0xdf,0x99,A,A]
+# N64-NEXT: # fixup A - offset: 0, value: %got_disp(.text), kind: fixup_Mips_GOT_DISP
# ELF-N64: df 99 00 00 ld $25, 0($gp)
-# ELF-N64-NEXT: R_MIPS_CALL16/R_MIPS_NONE/R_MIPS_NONE .text
+# ELF-N64-NEXT: R_MIPS_GOT_DISP/R_MIPS_NONE/R_MIPS_NONE .text
-# O32-MM-FIXME: lw $25, %got(.text)($gp) # encoding: [0xff,0x3c,A,A]
-# O32-MM-FIXME: # fixup A - offset: 0, value: %got(.text), kind: fixup_MICROMIPS_GOT16
-# O32-MM-FIXME: addiu $25, $25, %lo(.text) # encoding: [0x33,0x39,A,A]
-# O32-MM-FIXME: # fixup A - offset: 0, value: %lo(.text), kind: fixup_MICROMIPS_LO16
+# O32-MM: lw $25, %got(.text)($gp) # encoding: [0xff,0x3c,A,A]
+# O32-MM-NEXT: # fixup A - offset: 0, value: %got(.text), kind: fixup_MICROMIPS_GOT16
+# O32-MM-NEXT: addiu $25, $25, %lo(.text) # encoding: [0x33,0x39,A,A]
+# O32-MM-NEXT: # fixup A - offset: 0, value: %lo(.text), kind: fixup_MICROMIPS_LO16
-# N32-MM-FIXME: lw $25, %got_disp(.text)($gp) # encoding: [0xff,0x3c,A,A]
-# N32-MM-FIXME: # fixup A - offset: 0, value: %got_disp(.text), kind: fixup_MICROMIPS_GOT_DISP
+# N32-MM: lw $25, %got_disp(.text)($gp) # encoding: [0xff,0x3c,A,A]
+# N32-MM-NEXT: # fixup A - offset: 0, value: %got_disp(.text), kind: fixup_MICROMIPS_GOT_DISP
-# N64-MM-FIXME: ld $25, %got_disp(.text)($gp) # encoding: [0xdf,0x99,A,A]
-# N64-MM-FIXME: # fixup A - offset: 0, value: %got_disp(.text), kind: fixup_MICROMIPS_GOT_DISP
+# N64-MM: ld $25, %got_disp(.text)($gp) # encoding: [0xdf,0x99,A,A]
+# N64-MM-NEXT: # fixup A - offset: 0, value: %got_disp(.text), kind: fixup_MICROMIPS_GOT_DISP
# MIPS: jalr $25 # encoding: [0x03,0x20,0xf8,0x09]
# MM: jalr $ra, $25 # encoding: [0x03,0xf9,0x0f,0x3c]
@@ -199,7 +197,7 @@ local_label:
# ELF-O32: 8f 99 00 00 lw $25, 0($gp)
# ELF-O32-NEXT: R_MIPS_GOT16 .text
-# ELF-O32-NEXT: 27 39 00 54 addiu $25, $25, 84
+# ELF-O32-NEXT: 27 39 00 58 addiu $25, $25, 88
# ELF-O32-NEXT: R_MIPS_LO16 .text
# N32: lw $25, %got_disp($tmp0)($gp) # encoding: [0x8f,0x99,A,A]
@@ -241,7 +239,7 @@ local_label:
# ELF-O32: 8f 99 00 00 lw $25, 0($gp)
# ELF-O32-NEXT: R_MIPS_GOT16 .text
-# ELF-O32-NEXT: 27 39 00 60 addiu $25, $25, 96
+# ELF-O32-NEXT: 27 39 00 64 addiu $25, $25, 100
# ELF-O32-NEXT: R_MIPS_LO16 .text
# N32-FIXME: lw $25, %got_disp(forward_local)($gp) # encoding: [0x8f,0x99,A,A]
diff --git a/test/MC/Mips/instalias-imm-expanding.s b/test/MC/Mips/instalias-imm-expanding.s
index 9759dabdc087..b26863169f00 100644
--- a/test/MC/Mips/instalias-imm-expanding.s
+++ b/test/MC/Mips/instalias-imm-expanding.s
@@ -23,6 +23,10 @@ text_label:
# CHECK: add $4, $4, $1 # encoding: [0x20,0x20,0x81,0x00]
add $4, 0xFFFFFFFF
# CHECK: addi $4, $4, -1 # encoding: [0xff,0xff,0x84,0x20]
+ add $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: add $5, $5, $1 # encoding: [0x20,0x28,0xa1,0x00]
add $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -43,6 +47,10 @@ text_label:
# CHECK: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
add $4, $5, 0xFFFFFFFF
# CHECK: addi $4, $5, -1 # encoding: [0xff,0xff,0xa4,0x20]
+ add $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
addu $4, -0x80000000
# CHECK: lui $1, 32768 # encoding: [0x00,0x80,0x01,0x3c]
@@ -63,6 +71,10 @@ text_label:
# CHECK: addu $4, $4, $1 # encoding: [0x21,0x20,0x81,0x00]
addu $4, 0xFFFFFFFF
# CHECK: addiu $4, $4, -1 # encoding: [0xff,0xff,0x84,0x24]
+ addu $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: addu $5, $5, $1 # encoding: [0x21,0x28,0xa1,0x00]
addu $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -83,6 +95,10 @@ text_label:
# CHECK: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
addu $4, $5, 0xFFFFFFFF
# CHECK: addiu $4, $5, -1 # encoding: [0xff,0xff,0xa4,0x24]
+ addu $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
and $4, -0x80000000
# CHECK: lui $1, 32768 # encoding: [0x00,0x80,0x01,0x3c]
@@ -103,6 +119,10 @@ text_label:
and $4, 0xFFFFFFFF
# CHECK: addiu $1, $zero, -1 # encoding: [0xff,0xff,0x01,0x24]
# CHECK: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: and $5, $5, $1 # encoding: [0x24,0x28,0xa1,0x00]
and $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -124,6 +144,10 @@ text_label:
and $4, $5, 0xFFFFFFFF
# CHECK: addiu $4, $zero, -1 # encoding: [0xff,0xff,0x04,0x24]
# CHECK: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
nor $4, $5, 0
# CHECK: addiu $4, $zero, 0 # encoding: [0x00,0x00,0x04,0x24]
@@ -144,6 +168,34 @@ text_label:
# CHECK: lui $4, 1 # encoding: [0x01,0x00,0x04,0x3c]
# CHECK: ori $4, $4, 42405 # encoding: [0xa5,0xa5,0x84,0x34]
# CHECK: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+
+ nor $4, 0
+# CHECK: addiu $1, $zero, 0 # encoding: [0x00,0x00,0x01,0x24]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 1
+# CHECK: addiu $1, $zero, 1 # encoding: [0x01,0x00,0x01,0x24]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x8000
+# CHECK: ori $1, $zero, 32768 # encoding: [0x00,0x80,0x01,0x34]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, -0x8000
+# CHECK: addiu $1, $zero, -32768 # encoding: [0x00,0x80,0x01,0x24]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x10000
+# CHECK: lui $1, 1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x1a5a5
+# CHECK: lui $1, 1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 42405 # encoding: [0xa5,0xa5,0x21,0x34]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
or $4, -0x80000000
# CHECK: lui $1, 32768 # encoding: [0x00,0x80,0x01,0x3c]
@@ -165,6 +217,10 @@ text_label:
or $4, 0xFFFFFFFF
# CHECK: addiu $1, $zero, -1 # encoding: [0xff,0xff,0x01,0x24]
# CHECK: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: or $5, $5, $1 # encoding: [0x25,0x28,0xa1,0x00]
or $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -186,6 +242,10 @@ text_label:
or $4, $5, 0xFFFFFFFF
# CHECK: addiu $4, $zero, -1 # encoding: [0xff,0xff,0x04,0x24]
# CHECK: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, ~(0xF0000000|0x0F000000|0x000000F0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
slt $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -205,6 +265,10 @@ text_label:
# CHECK: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
slt $4, $5, 0xFFFFFFFF
# CHECK: slti $4, $5, -1 # encoding: [0xff,0xff,0xa4,0x28]
+ slt $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
sltu $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -224,6 +288,10 @@ text_label:
# CHECK: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
sltu $4, $5, 0xFFFFFFFF
# CHECK: sltiu $4, $5, -1 # encoding: [0xff,0xff,0xa4,0x2c]
+ sltu $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
xor $4, -0x80000000
# CHECK: lui $1, 32768 # encoding: [0x00,0x80,0x01,0x3c]
@@ -244,6 +312,10 @@ text_label:
xor $4, 0xFFFFFFFF
# CHECK: addiu $1, $zero, -1 # encoding: [0xff,0xff,0x01,0x24]
# CHECK: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $1, 255 # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK: ori $1, $1, 65295 # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
xor $4, $5, -0x80000000
# CHECK: lui $4, 32768 # encoding: [0x00,0x80,0x04,0x3c]
@@ -254,7 +326,7 @@ text_label:
# CHECK: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
xor $4, $5, -0x8000
# CHECK: addiu $4, $zero, -32768 # encoding: [0x00,0x80,0x04,0x24]
-# CHECK: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+# CHECK: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
xor $4, $5, 0
# CHECK: xori $4, $5, 0 # encoding: [0x00,0x00,0xa4,0x38]
xor $4, $5, 0xFFFF
@@ -265,3 +337,7 @@ text_label:
xor $4, $5, 0xFFFFFFFF
# CHECK: addiu $4, $zero, -1 # encoding: [0xff,0xff,0x04,0x24]
# CHECK: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK: lui $4, 255 # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK: ori $4, $4, 65295 # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
diff --git a/test/MC/Mips/macro-bcc-imm.s b/test/MC/Mips/macro-bcc-imm.s
index fbc4662d6833..ebc4cd2ce189 100644
--- a/test/MC/Mips/macro-bcc-imm.s
+++ b/test/MC/Mips/macro-bcc-imm.s
@@ -2,7 +2,45 @@
# RUN: FileCheck %s --check-prefix=ALL
.text
-foo: # ALL-LABEL: foo:
+foo:
+ beql $a2, 0x1ffff, foo # ALL: lui $1, 1
+ # ALL: ori $1, $1, 65535
+ # ALL: beql $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ # ALL: nop
+ beql $a2, -4096, foo # ALL: addiu $1, $zero, -4096
+ # ALL: beql $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ beql $a2, -0x10000, foo # ALL: lui $1, 65535
+ # ALL: beql $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ beql $a2, 16, foo # ALL: addiu $1, $zero, 16
+ # ALL: beql $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ # ALL: nop
+ bnel $a2, 0x1ffff, foo # ALL: lui $1, 1
+ # ALL: ori $1, $1, 65535
+ # ALL: bnel $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ # ALL: nop
+ bnel $a2, -4096, foo # ALL: addiu $1, $zero, -4096
+ # ALL: bnel $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ bnel $a2, -0x10000, foo # ALL: lui $1, 65535
+ # ALL: bnel $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ bnel $a2, 16, foo # ALL: addiu $1, $zero, 16
+ # ALL: bnel $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ # ALL: nop
+ beql $a2, 32767, foo # ALL: addiu $1, $zero, 32767
+ # ALL: beql $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ # ALL: nop
+ bnel $a2, 32768, foo # ALL: ori $1, $zero, 32768
+ # ALL: bnel $6, $1, foo
+ # ALL: # fixup A - offset: 0, value: foo-4, kind: fixup_Mips_PC16
+ # ALL: nop
blt $a2, 16, foo # ALL: addiu $1, $zero, 16
# ALL: slt $1, $6, $1
# ALL: bnez $1, foo
diff --git a/test/MC/Mips/macro-ddiv.s b/test/MC/Mips/macro-ddiv.s
index d36e6998d603..44650d7fdba3 100644
--- a/test/MC/Mips/macro-ddiv.s
+++ b/test/MC/Mips/macro-ddiv.s
@@ -1,126 +1,354 @@
-# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips64r2 | \
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r2 | \
# RUN: FileCheck %s --check-prefix=CHECK-NOTRAP
-# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips64r2 \
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r2 \
# RUN: -mattr=+use-tcc-in-div | FileCheck %s --check-prefix=CHECK-TRAP
- ddiv $25, $11
-# CHECK-NOTRAP: bne $11, $zero, 8 # encoding: [0x15,0x60,0x00,0x02]
+ ddiv $25,$11
+# CHECK-NOTRAP: bne $11, $zero, .Ltmp0 # encoding: [0x15,0x60,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp0-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: ddiv $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1e]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp0
# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $11, $1, 20 # encoding: [0x15,0x61,0x00,0x05]
+# CHECK-NOTRAP: bne $11, $1, .Ltmp1 # encoding: [0x15,0x61,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp1-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
# CHECK-NOTRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
-# CHECK-NOTRAP: bne $25, $1, 8 # encoding: [0x17,0x21,0x00,0x02]
+# CHECK-NOTRAP: bne $25, $1, .Ltmp1 # encoding: [0x17,0x21,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp1-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: sll $zero, $zero, 0 # encoding: [0x00,0x00,0x00,0x00]
# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp1
# CHECK-NOTRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+# CHECK-TRAP: teq $11, $zero, 7 # encoding: [0x01,0x60,0x01,0xf4]
+# CHECK-TRAP: ddiv $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1e]
+# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-TRAP: bne $11, $1, .Ltmp0 # encoding: [0x15,0x61,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: .Ltmp0-4, kind: fixup_Mips_PC16
+# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
+# CHECK-TRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
+# CHECK-TRAP: teq $25, $1, 6 # encoding: [0x03,0x21,0x01,0xb4]
+# CHECK-TRAP: .Ltmp0:
+# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+
ddiv $24,$12
-# CHECK-NOTRAP: bne $12, $zero, 8 # encoding: [0x15,0x80,0x00,0x02]
+# CHECK-NOTRAP: bne $12, $zero, .Ltmp2 # encoding: [0x15,0x80,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp2-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: ddiv $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1e]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp2:
# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $12, $1, 20 # encoding: [0x15,0x81,0x00,0x05]
+# CHECK-NOTRAP: bne $12, $1, .Ltmp3 # encoding: [0x15,0x81,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp3-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
# CHECK-NOTRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
-# CHECK-NOTRAP: bne $24, $1, 8 # encoding: [0x17,0x01,0x00,0x02]
+# CHECK-NOTRAP: bne $24, $1, .Ltmp3 # encoding: [0x17,0x01,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp3-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: sll $zero, $zero, 0 # encoding: [0x00,0x00,0x00,0x00]
# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp3
# CHECK-NOTRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
- ddiv $25,$0
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-
- ddiv $0,$9
-# CHECK-NOTRAP: bne $9, $zero, 8 # encoding: [0x15,0x20,0x00,0x02]
-# CHECK-NOTRAP: ddiv $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1e]
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $9, $1, 20 # encoding: [0x15,0x21,0x00,0x05]
-# CHECK-NOTRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
-# CHECK-NOTRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
-# CHECK-NOTRAP: bne $zero, $1, 8 # encoding: [0x14,0x01,0x00,0x02]
-# CHECK-NOTRAP: sll $zero, $zero, 0 # encoding: [0x00,0x00,0x00,0x00]
-# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
-# CHECK-NOTRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
-
- ddiv $0,$0
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-
- ddiv $4,$5,$6
-# CHECK-NOTRAP: bne $6, $zero, 8 # encoding: [0x14,0xc0,0x00,0x02]
-# CHECK-NOTRAP: ddiv $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1e]
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $6, $1, 20 # encoding: [0x14,0xc1,0x00,0x05]
-# CHECK-NOTRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
-# CHECK-NOTRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
-# CHECK-NOTRAP: bne $5, $1, 8 # encoding: [0x14,0xa1,0x00,0x02]
-# CHECK-NOTRAP: sll $zero, $zero, 0 # encoding: [0x00,0x00,0x00,0x00]
-# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
-# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
-
- ddiv $4,$5,$0
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-
- ddiv $4,$0,$0
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-
- ddiv $0, $4, $5
-# CHECK-NOTRAP: ddiv $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1e]
-
- ddiv $25,$11
-# CHECK-TRAP: teq $11, $zero, 7 # encoding: [0x01,0x60,0x01,0xf4]
-# CHECK-TRAP: ddiv $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1e]
-# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $11, $1, 12 # encoding: [0x15,0x61,0x00,0x03]
-# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
-# CHECK-TRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
-# CHECK-TRAP: teq $25, $1, 6 # encoding: [0x03,0x21,0x01,0xb4]
-# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
-
- ddiv $24,$12
# CHECK-TRAP: teq $12, $zero, 7 # encoding: [0x01,0x80,0x01,0xf4]
# CHECK-TRAP: ddiv $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1e]
# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $12, $1, 12 # encoding: [0x15,0x81,0x00,0x03]
+# CHECK-TRAP: bne $12, $1, .Ltmp1 # encoding: [0x15,0x81,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: .Ltmp1-4, kind: fixup_Mips_PC16
# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
# CHECK-TRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
# CHECK-TRAP: teq $24, $1, 6 # encoding: [0x03,0x01,0x01,0xb4]
+# CHECK-TRAP: .Ltmp1:
# CHECK-TRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
ddiv $25,$0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
ddiv $0,$9
+# CHECK-NOTRAP: ddiv $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1e]
+
# CHECK-TRAP: teq $9, $zero, 7 # encoding: [0x01,0x20,0x01,0xf4]
# CHECK-TRAP: ddiv $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1e]
# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $9, $1, 12 # encoding: [0x15,0x21,0x00,0x03]
+# CHECK-TRAP: bne $9, $1, .Ltmp2 # encoding: [0x15,0x21,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: .Ltmp2-4, kind: fixup_Mips_PC16
# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
# CHECK-TRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
# CHECK-TRAP: teq $zero, $1, 6 # encoding: [0x00,0x01,0x01,0xb4]
+# CHECH-TRAP: .Ltmp2:
# CHECK-TRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
ddiv $0,$0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- ddiv $4,$5,$6
+ ddiv $4,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddiv $0,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddiv $4,1
+# CHECK-NOTRAP: move $4, $4 # encoding: [0x00,0x80,0x20,0x25]
+# CHECK-TRAP: move $4, $4 # encoding: [0x00,0x80,0x20,0x25]
+
+ ddiv $4,-1
+# CHECK-NOTRAP: dneg $4, $4 # encoding: [0x00,0x04,0x20,0x2e]
+# CHECK-TRAP: dneg $4, $4 # encoding: [0x00,0x04,0x20,0x2e]
+
+ ddiv $4,2
+# CHECK-NOTRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0x8000
+# CHECK-NOTRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,-0x8000
+# CHECK-NOTRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0x10000
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0x1a5a5
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0xfffffff
+# CHECK-NOTRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0x10000000
+# CHECK-NOTRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0xfffffffe
+# CHECK-NOTRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+
+ ddiv $4,0xffffffff
+# CHECK-NOTRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-TRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,0xfffffffff
+# CHECK-NOTRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddiv $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,$6
+# CHECK-NOTRAP: bne $6, $zero, .Ltmp6 # encoding: [0x14,0xc0,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp6-4, kind: fixup_Mips_PC16
+# CHECK-NOTRAP: ddiv $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1e]
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp6:
+# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-NOTRAP: bne $6, $1, .Ltmp7 # encoding: [0x14,0xc1,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp7-4, kind: fixup_Mips_PC16
+# CHECK-NOTRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
+# CHECK-NOTRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
+# CHECK-NOTRAP: bne $5, $1, .Ltmp7 # encoding: [0x14,0xa1,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp7-4, kind: fixup_Mips_PC16
+# CHECK-NOTRAP: sll $zero, $zero, 0 # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp7:
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
# CHECK-TRAP: teq $6, $zero, 7 # encoding: [0x00,0xc0,0x01,0xf4]
# CHECK-TRAP: ddiv $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1e]
# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $6, $1, 12 # encoding: [0x14,0xc1,0x00,0x03]
+# CHECK-TRAP: bne $6, $1, .Ltmp3 # encoding: [0x14,0xc1,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: .Ltmp3-4, kind: fixup_Mips_PC16
# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
# CHECK-TRAP: dsll32 $1, $1, 31 # encoding: [0x00,0x01,0x0f,0xfc]
# CHECK-TRAP: teq $5, $1, 6 # encoding: [0x00,0xa1,0x01,0xb4]
+# CHECK-TRAP: .Ltmp3:
# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddiv $4,$5,$0
+ ddiv $4,$5,$0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- ddiv $4,$0,$0
+ ddiv $4,$0,$0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- ddiv $0, $4, $5
+ ddiv $0,$4,$5
+# CHECK-NOTRAP: ddiv $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1e]
# CHECK-TRAP: ddiv $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1e]
+
+ ddiv $4,$0,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddiv $4,$5,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddiv $4,$5,1
+# CHECK-NOTRAP: move $4, $5 # encoding: [0x00,0xa0,0x20,0x25]
+# CHECK-TRAP: move $4, $5 # encoding: [0x00,0xa0,0x20,0x25]
+
+ ddiv $4,$5,-1
+# CHECK-NOTRAP: dneg $4, $5 # encoding: [0x00,0x05,0x20,0x2e]
+# CHECK-TRAP: dneg $4, $5 # encoding: [0x00,0x05,0x20,0x2e]
+
+ ddiv $4,$5,2
+# CHECK-NOTRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0x8000
+# CHECK-NOTRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,-0x8000
+# CHECK-NOTRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0x10000
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0x1a5a5
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0xfffffff
+# CHECK-NOTRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0x10000000
+# CHECK-NOTRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0xfffffffe
+# CHECK-NOTRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0xffffffff
+# CHECK-NOTRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-TRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddiv $4,$5,0xfffffffff
+# CHECK-NOTRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddiv $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1e]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
diff --git a/test/MC/Mips/macro-ddivu.s b/test/MC/Mips/macro-ddivu.s
index ff7e8c46d0be..88998ac76be7 100644
--- a/test/MC/Mips/macro-ddivu.s
+++ b/test/MC/Mips/macro-ddivu.s
@@ -1,98 +1,301 @@
-# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips64r2 | \
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r2 | \
# RUN: FileCheck %s --check-prefix=CHECK-NOTRAP
-# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips64r2 \
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r2 \
# RUN: -mattr=+use-tcc-in-div | FileCheck %s --check-prefix=CHECK-TRAP
ddivu $25,$11
-# CHECK-NOTRAP: bne $11, $zero, 8 # encoding: [0x15,0x60,0x00,0x02]
+# CHECK-NOTRAP: bne $11, $zero, .Ltmp0 # encoding: [0x15,0x60,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp0-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: ddivu $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1f]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp0
# CHECK-NOTRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+# CHECK-TRAP: teq $11, $zero, 7 # encoding: [0x01,0x60,0x01,0xf4]
+# CHECK-TRAP: ddivu $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1f]
+# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
ddivu $24,$12
-# CHECK-NOTRAP: bne $12, $zero, 8 # encoding: [0x15,0x80,0x00,0x02]
+# CHECK-NOTRAP: bne $12, $zero, .Ltmp1 # encoding: [0x15,0x80,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp1-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: ddivu $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1f]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp1
# CHECK-NOTRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
+# CHECK-TRAP: teq $12, $zero, 7 # encoding: [0x01,0x80,0x01,0xf4]
+# CHECK-TRAP: ddivu $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1f]
+# CHECK-TRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
ddivu $25,$0
-# CHECK-NOTRAP: bne $zero, $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: ddivu $zero, $25, $zero # encoding: [0x03,0x20,0x00,0x1f]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
ddivu $0,$9
-# CHECK-NOTRAP: bne $9, $zero, 8 # encoding: [0x15,0x20,0x00,0x02]
+# CHECK-NOTRAP: bne $9, $zero, .Ltmp2 # encoding: [0x15,0x20,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp2-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: ddivu $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1f]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp2
# CHECK-NOTRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
+# CHECK-TRAP: teq $9, $zero, 7 # encoding: [0x01,0x20,0x01,0xf4]
+# CHECK-TRAP: ddivu $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1f]
+# CHECK-TRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
ddivu $0,$0
-# CHECK-NOTRAP: bne $zero, $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: ddivu $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1f]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- ddivu $4,$5,$6
-# CHECK-NOTRAP: bne $6, $zero, 8 # encoding: [0x14,0xc0,0x00,0x02]
-# CHECK-NOTRAP: ddivu $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1f]
+ ddivu $4,0
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- ddivu $4,$5,$0
-# CHECK-NOTRAP: bne $zero, $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: ddivu $zero, $5, $zero # encoding: [0x00,0xa0,0x00,0x1f]
+ ddivu $0,0
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddivu $4,1
+# CHECK-NOTRAP: move $4, $4 # encoding: [0x00,0x80,0x20,0x25]
+# CHECK-TRAP: move $4, $4 # encoding: [0x00,0x80,0x20,0x25]
+
+ ddivu $4,-1
+# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $4,$0,$0
-# CHECK-NOTRAP: bne $zero, $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: ddivu $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1f]
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+ ddivu $4,0x8000
+# CHECK-NOTRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $0, $4, $5
-# CHECK-NOTRAP: ddivu $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1f]
+ ddivu $4,-0x8000
+# CHECK-NOTRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $25, $11
-# CHECK-TRAP: teq $11, $zero, 7 # encoding: [0x01,0x60,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1f]
-# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+ ddivu $4,0x10000
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $24,$12
-# CHECK-TRAP: teq $12, $zero, 7 # encoding: [0x01,0x80,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1f]
-# CHECK-TRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
+ ddivu $4,0x1a5a5
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $25,$0
-# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $25, $zero # encoding: [0x03,0x20,0x00,0x1f]
-# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+ ddivu $4,0xfffffff
+# CHECK-NOTRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $0,$9
-# CHECK-TRAP: teq $9, $zero, 7 # encoding: [0x01,0x20,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1f]
-# CHECK-TRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
+ ddivu $4,0x10000000
+# CHECK-NOTRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $0,$0
-# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1f]
-# CHECK-TRAP: mflo $zero # encoding: [0x00,0x00,0x00,0x12]
+ ddivu $4,0xfffffffe
+# CHECK-NOTRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- ddivu $4,$5,$6
+ ddivu $4,0xffffffff
+# CHECK-NOTRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-TRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,0xfffffffff
+# CHECK-NOTRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddivu $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,$6
+# CHECK-NOTRAP: bne $6, $zero, .Ltmp3 # encoding: [0x14,0xc0,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: .Ltmp3-4, kind: fixup_Mips_PC16
+# CHECK-NOTRAP: ddivu $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1f]
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: .Ltmp3:
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
# CHECK-TRAP: teq $6, $zero, 7 # encoding: [0x00,0xc0,0x01,0xf4]
# CHECK-TRAP: ddivu $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1f]
# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
ddivu $4,$5,$0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $5, $zero # encoding: [0x00,0xa0,0x00,0x1f]
-# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
ddivu $4,$0,$0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
-# CHECK-TRAP: ddivu $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1f]
-# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
ddivu $0, $4, $5
+# CHECK-NOTRAP: ddivu $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1f]
# CHECK-TRAP: ddivu $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1f]
+
+ ddivu $4,$5,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddivu $4,$0,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddivu $0,$0,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ ddivu $4,$5,1
+# CHECK-NOTRAP: move $4, $5 # encoding: [0x00,0xa0,0x20,0x25]
+# CHECK-TRAP: move $4, $5 # encoding: [0x00,0xa0,0x20,0x25]
+
+ ddivu $4,$5,-1
+# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,2
+# CHECK-NOTRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0x8000
+# CHECK-NOTRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,-0x8000
+# CHECK-NOTRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0x10000
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0x1a5a5
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0xfffffff
+# CHECK-NOTRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4095 # encoding: [0x3c,0x01,0x0f,0xff]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0x10000000
+# CHECK-NOTRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 4096 # encoding: [0x3c,0x01,0x10,0x00]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0xfffffffe
+
+# CHECK-NOTRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 65535 # encoding: [0x34,0x01,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65534 # encoding: [0x34,0x21,0xff,0xfe]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0xffffffff
+# CHECK-NOTRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-NOTRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 65535 # encoding: [0x3c,0x01,0xff,0xff]
+# CHECK-TRAP: dsrl32 $1, $1, 0 # encoding: [0x00,0x01,0x08,0x3e]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ ddivu $4,$5,0xfffffffff
+# CHECK-NOTRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-NOTRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-NOTRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 15 # encoding: [0x24,0x01,0x00,0x0f]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: dsll $1, $1, 16 # encoding: [0x00,0x01,0x0c,0x38]
+# CHECK-TRAP: ori $1, $1, 65535 # encoding: [0x34,0x21,0xff,0xff]
+# CHECK-TRAP: ddivu $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1f]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
diff --git a/test/MC/Mips/macro-div-bad.s b/test/MC/Mips/macro-div-bad.s
index 20ad39087a14..4d93a1a9a69d 100644
--- a/test/MC/Mips/macro-div-bad.s
+++ b/test/MC/Mips/macro-div-bad.s
@@ -8,7 +8,7 @@
# RUN: FileCheck %s --check-prefix=NOT-R6
.text
- div $25, $11
+ div $25, 11
# R6: :[[@LINE-1]]:3: error: instruction requires a CPU feature not currently enabled
div $25, $0
diff --git a/test/MC/Mips/macro-div.s b/test/MC/Mips/macro-div.s
index 3ac763e17d7c..8ce30d745bcf 100644
--- a/test/MC/Mips/macro-div.s
+++ b/test/MC/Mips/macro-div.s
@@ -4,100 +4,219 @@
# RUN: -mattr=+use-tcc-in-div | FileCheck %s --check-prefix=CHECK-TRAP
div $25,$11
-# CHECK-NOTRAP: bnez $11, 8 # encoding: [0x15,0x60,0x00,0x02]
+# CHECK-NOTRAP: bnez $11, $tmp0 # encoding: [0x15,0x60,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp0)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: div $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1a]
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: $tmp0:
# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $11, $1, 16 # encoding: [0x15,0x61,0x00,0x04]
+# CHECK-NOTRAP: bne $11, $1, $tmp1 # encoding: [0x15,0x61,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp1)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
-# CHECK-NOTRAP: bne $25, $1, 8 # encoding: [0x17,0x21,0x00,0x02]
+# CHECK-NOTRAP: bne $25, $1, $tmp1 # encoding: [0x17,0x21,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp1)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: $tmp1:
# CHECK-NOTRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
+# CHECK-TRAP: teq $11, $zero, 7 # encoding: [0x01,0x60,0x01,0xf4]
+# CHECK-TRAP: div $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1a]
+# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-TRAP: bne $11, $1, $tmp0 # encoding: [0x15,0x61,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: ($tmp0)-4, kind: fixup_Mips_PC16
+# CHECK-TRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
+# CHECK-TRAP: teq $25, $1, 6 # encoding: [0x03,0x21,0x01,0xb4]
+# CHECK-TRAP: $tmp0:
+# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
div $24,$12
-# CHECK-NOTRAP: bnez $12, 8 # encoding: [0x15,0x80,0x00,0x02]
+# CHECK-NOTRAP: bnez $12, $tmp2 # encoding: [0x15,0x80,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp2)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: div $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1a]
-# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: $tmp2:
# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $12, $1, 16 # encoding: [0x15,0x81,0x00,0x04]
+# CHECK-NOTRAP: bne $12, $1, $tmp3 # encoding: [0x15,0x81,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp3)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
-# CHECK-NOTRAP: bne $24, $1, 8 # encoding: [0x17,0x01,0x00,0x02]
+# CHECK-NOTRAP: bne $24, $1, $tmp3 # encoding: [0x17,0x01,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp3)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: nop # encoding: [0x00,0x00,0x00,0x00]
-# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: $tmp3:
# CHECK-NOTRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
+# CHECK-TRAP: teq $12, $zero, 7 # encoding: [0x01,0x80,0x01,0xf4]
+# CHECK-TRAP: div $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1a]
+# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-TRAP: bne $12, $1, $tmp1 # encoding: [0x15,0x81,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: ($tmp1)-4, kind: fixup_Mips_PC16
+# CHECK-TRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
+# CHECK-TRAP: teq $24, $1, 6 # encoding: [0x03,0x01,0x01,0xb4]
+# CHECK-TRAP: $tmp1:
+# CHECK-TRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
div $25,$0
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
div $0,$9
# CHECK-NOTRAP: div $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1a]
+# CHECK-TRAP: div $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1a]
div $0,$0
# CHECK-NOTRAP: div $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1a]
+# CHECK-TRAP: div $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1a]
- div $4,$5,$6
-# CHECK-NOTRAP: bnez $6, 8 # encoding: [0x14,0xc0,0x00,0x02]
-# CHECK-NOTRAP: div $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1a]
+ div $4,0
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ div $0,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+
+ div $4,1
+# CHECK-NOTRAP: move $4, $4 # encoding: [0x00,0x80,0x20,0x25]
+# CHECK-TRAP: move $4, $4 # encoding: [0x00,0x80,0x20,0x25]
+
+ div $4,-1
+# CHECK-NOTRAP: neg $4, $4 # encoding: [0x00,0x04,0x20,0x22]
+# CHECK-TRAP: neg $4, $4 # encoding: [0x00,0x04,0x20,0x22]
+
+ div $4,2
+# CHECK-NOTRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-NOTRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-TRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ div $4,0x8000
+# CHECK-NOTRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-NOTRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ div $4,-0x8000
+# CHECK-NOTRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-NOTRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ div $4,0x10000
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ div $4,0x1a5a5
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-NOTRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: div $zero, $4, $1 # encoding: [0x00,0x81,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ div $4,$5,$6
+# CHECK-NOTRAP: bnez $6, $tmp4 # encoding: [0x14,0xc0,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp4)-4, kind: fixup_Mips_PC16
+# CHECK-NOTRAP: div $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1a]
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: $tmp4:
# CHECK-NOTRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-NOTRAP: bne $6, $1, 16 # encoding: [0x14,0xc1,0x00,0x04]
+# CHECK-NOTRAP: bne $6, $1, $tmp5 # encoding: [0x14,0xc1,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp5)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
-# CHECK-NOTRAP: bne $5, $1, 8 # encoding: [0x14,0xa1,0x00,0x02]
+# CHECK-NOTRAP: bne $5, $1, $tmp5 # encoding: [0x14,0xa1,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp5)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK-NOTRAP: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-NOTRAP: $tmp5:
# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: teq $6, $zero, 7 # encoding: [0x00,0xc0,0x01,0xf4]
+# CHECK-TRAP: div $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1a]
+# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
+# CHECK-TRAP: bne $6, $1, $tmp2 # encoding: [0x14,0xc1,A,A]
+# CHECK-TRAP: # fixup A - offset: 0, value: ($tmp2)-4, kind: fixup_Mips_PC16
+# CHECK-TRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
+# CHECK-TRAP: teq $5, $1, 6 # encoding: [0x00,0xa1,0x01,0xb4]
+# CHECK-TRAP: $tmp2:
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- div $4,$5,$0
+ div $4,$5,$0
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- div $4,$0,$0
+ div $4,$0,$0
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- div $0, $4, $5
+ div $0,$4,$5
# CHECK-NOTRAP: div $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1a]
+# CHECK-TRAP: div $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1a]
- div $25, $11
-# CHECK-TRAP: teq $11, $zero, 7 # encoding: [0x01,0x60,0x01,0xf4]
-# CHECK-TRAP: div $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1a]
-# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $11, $1, 8 # encoding: [0x15,0x61,0x00,0x02]
-# CHECK-TRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
-# CHECK-TRAP: teq $25, $1, 6 # encoding: [0x03,0x21,0x01,0xb4]
-# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
-
- div $24,$12
-# CHECK-TRAP: teq $12, $zero, 7 # encoding: [0x01,0x80,0x01,0xf4]
-# CHECK-TRAP: div $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1a]
-# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $12, $1, 8 # encoding: [0x15,0x81,0x00,0x02]
-# CHECK-TRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
-# CHECK-TRAP: teq $24, $1, 6 # encoding: [0x03,0x01,0x01,0xb4]
-# CHECK-TRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
+ div $4,$5,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- div $25,$0
+ div $4,$0,0
+# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
- div $0,$9
-# CHECK-TRAP: div $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1a]
+ div $4,$5,1
+# CHECK-NOTRAP: move $4, $5 # encoding: [0x00,0xa0,0x20,0x25]
+# CHECK-TRAP: move $4, $5 # encoding: [0x00,0xa0,0x20,0x25]
- div $0,$0
-# CHECK-TRAP: div $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1a]
+ div $4,$5,-1
+# CHECK-NOTRAP: neg $4, $5 # encoding: [0x00,0x05,0x20,0x22]
+# CHECK-TRAP: neg $4, $5 # encoding: [0x00,0x05,0x20,0x22]
- div $4,$5,$6
-# CHECK-TRAP: teq $6, $zero, 7 # encoding: [0x00,0xc0,0x01,0xf4]
-# CHECK-TRAP: div $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1a]
-# CHECK-TRAP: addiu $1, $zero, -1 # encoding: [0x24,0x01,0xff,0xff]
-# CHECK-TRAP: bne $6, $1, 8 # encoding: [0x14,0xc1,0x00,0x02]
-# CHECK-TRAP: lui $1, 32768 # encoding: [0x3c,0x01,0x80,0x00]
-# CHECK-TRAP: teq $5, $1, 6 # encoding: [0x00,0xa1,0x01,0xb4]
+ div $4,$5,2
+# CHECK-NOTRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-NOTRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 2 # encoding: [0x24,0x01,0x00,0x02]
+# CHECK-TRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- div $4,$5,$0
-# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+ div $4,$5,0x8000
+# CHECK-NOTRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-NOTRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- div $4,$0,$0
-# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
+ div $4,$5,-0x8000
+# CHECK-NOTRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-NOTRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ div $4,$5,0x10000
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
- div $0, $4, $5
-# CHECK-TRAP: div $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1a] \ No newline at end of file
+ div $4,$5,0x1a5a5
+# CHECK-NOTRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-NOTRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-NOTRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: div $zero, $5, $1 # encoding: [0x00,0xa1,0x00,0x1a]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
diff --git a/test/MC/Mips/macro-divu-bad.s b/test/MC/Mips/macro-divu-bad.s
index 6eeaa614ff86..b5b492ec6828 100644
--- a/test/MC/Mips/macro-divu-bad.s
+++ b/test/MC/Mips/macro-divu-bad.s
@@ -8,7 +8,7 @@
# RUN: FileCheck %s --check-prefix=NOT-R6
.text
- divu $25, $11
+ divu $25, 11
# R6: :[[@LINE-1]]:3: error: instruction requires a CPU feature not currently enabled
divu $25, $0
diff --git a/test/MC/Mips/macro-divu.s b/test/MC/Mips/macro-divu.s
index d8137d5ba733..a3e8ae067c74 100644
--- a/test/MC/Mips/macro-divu.s
+++ b/test/MC/Mips/macro-divu.s
@@ -4,22 +4,23 @@
# RUN: -mattr=+use-tcc-in-div | FileCheck %s --check-prefix=CHECK-TRAP
divu $25,$11
-# CHECK-NOTRAP: bnez $11, 8 # encoding: [0x15,0x60,0x00,0x02]
+# CHECK-NOTRAP: bnez $11, $tmp0 # encoding: [0x15,0x60,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp0)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: divu $zero, $25, $11 # encoding: [0x03,0x2b,0x00,0x1b]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: $tmp0:
# CHECK-NOTRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
divu $24,$12
-# CHECK-NOTRAP: bnez $12, 8 # encoding: [0x15,0x80,0x00,0x02]
+# CHECK-NOTRAP: bnez $12, $tmp1 # encoding: [0x15,0x80,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp1)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: divu $zero, $24, $12 # encoding: [0x03,0x0c,0x00,0x1b]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: $tmp1:
# CHECK-NOTRAP: mflo $24 # encoding: [0x00,0x00,0xc0,0x12]
divu $25,$0
-# CHECK-NOTRAP: bnez $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: divu $zero, $25, $zero # encoding: [0x03,0x20,0x00,0x1b]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
divu $0,$9
# CHECK-NOTRAP: divu $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1b]
@@ -28,22 +29,18 @@
# CHECK-NOTRAP: divu $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1b]
divu $4,$5,$6
-# CHECK-NOTRAP: bnez $6, 8 # encoding: [0x14,0xc0,0x00,0x02]
+# CHECK-NOTRAP: bnez $6, $tmp2 # encoding: [0x14,0xc0,A,A]
+# CHECK-NOTRAP: # fixup A - offset: 0, value: ($tmp2)-4, kind: fixup_Mips_PC16
# CHECK-NOTRAP: divu $zero, $5, $6 # encoding: [0x00,0xa6,0x00,0x1b]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK-NOTRAP: $tmp2:
# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
divu $4,$5,$0
-# CHECK-NOTRAP: bnez $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: divu $zero, $5, $zero # encoding: [0x00,0xa0,0x00,0x1b]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
divu $4,$0,$0
-# CHECK-NOTRAP: bnez $zero, 8 # encoding: [0x14,0x00,0x00,0x02]
-# CHECK-NOTRAP: divu $zero, $zero, $zero # encoding: [0x00,0x00,0x00,0x1b]
# CHECK-NOTRAP: break 7 # encoding: [0x00,0x07,0x00,0x0d]
-# CHECK-NOTRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
divu $0, $4, $5
# CHECK-NOTRAP: divu $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1b]
@@ -60,8 +57,6 @@
divu $25,$0
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
-# CHECK-TRAP: divu $zero, $25, $zero # encoding: [0x03,0x20,0x00,0x1b]
-# CHECK-TRAP: mflo $25 # encoding: [0x00,0x00,0xc8,0x12]
divu $0,$9
# CHECK-TRAP: divu $zero, $zero, $9 # encoding: [0x00,0x09,0x00,0x1b]
@@ -76,8 +71,6 @@
divu $4,$5,$0
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
-# CHECK-TRAP: divu $zero, $5, $zero # encoding: [0x00,0xa0,0x00,0x1b]
-# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
divu $4,$0,$0
# CHECK-TRAP: teq $zero, $zero, 7 # encoding: [0x00,0x00,0x01,0xf4]
@@ -85,4 +78,4 @@
# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
divu $0, $4, $5
-# CHECK-TRAP: divu $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1b] \ No newline at end of file
+# CHECK-TRAP: divu $zero, $4, $5 # encoding: [0x00,0x85,0x00,0x1b]
diff --git a/test/MC/Mips/macro-dla.s b/test/MC/Mips/macro-dla.s
index e3b558e9e514..321af00c8be3 100644
--- a/test/MC/Mips/macro-dla.s
+++ b/test/MC/Mips/macro-dla.s
@@ -702,6 +702,54 @@ dla $5, extern_sym+8($5) # CHECK: lui $1, %highest(extern_sym+8) # encodin
# CHECK: daddiu $1, $1, %lo(extern_sym+8) # encoding: [0x64,0x21,A,A]
# CHECK: # fixup A - offset: 0, value: %lo(extern_sym+8), kind: fixup_Mips_LO16
# CHECK: daddu $5, $1, $5 # encoding: [0x00,0x25,0x28,0x2d]
+.set noat
+dla $5, extern_sym # CHECK: lui $5, %highest(extern_sym) # encoding: [0x3c,0x05,A,A]
+ # CHECK: # fixup A - offset: 0, value: %highest(extern_sym), kind: fixup_Mips_HIGHEST
+ # CHECK: daddiu $5, $5, %higher(extern_sym) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %higher(extern_sym), kind: fixup_Mips_HIGHER
+ # CHECK: dsll $5, $5, 16 # encoding: [0x00,0x05,0x2c,0x38]
+ # CHECK: daddiu $5, $5, %hi(extern_sym) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %hi(extern_sym), kind: fixup_Mips_HI16
+ # CHECK: dsll $5, $5, 16 # encoding: [0x00,0x05,0x2c,0x38]
+ # CHECK: daddiu $5, $5, %lo(extern_sym) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %lo(extern_sym), kind: fixup_Mips_LO16
+
+dla $5, extern_sym+8 # CHECK: lui $5, %highest(extern_sym+8) # encoding: [0x3c,0x05,A,A]
+ # CHECK: # fixup A - offset: 0, value: %highest(extern_sym+8), kind: fixup_Mips_HIGHEST
+ # CHECK: daddiu $5, $5, %higher(extern_sym+8) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %higher(extern_sym+8), kind: fixup_Mips_HIGHER
+ # CHECK: dsll $5, $5, 16 # encoding: [0x00,0x05,0x2c,0x38]
+ # CHECK: daddiu $5, $5, %hi(extern_sym+8) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %hi(extern_sym+8), kind: fixup_Mips_HI16
+ # CHECK: dsll $5, $5, 16 # encoding: [0x00,0x05,0x2c,0x38]
+ # CHECK: daddiu $5, $5, %lo(extern_sym+8) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %lo(extern_sym+8), kind: fixup_Mips_LO16
+
+dla $5, extern_sym($6) # CHECK: lui $5, %highest(extern_sym) # encoding: [0x3c,0x05,A,A]
+ # CHECK: # fixup A - offset: 0, value: %highest(extern_sym), kind: fixup_Mips_HIGHEST
+ # CHECK: daddiu $5, $5, %higher(extern_sym) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %higher(extern_sym), kind: fixup_Mips_HIGHER
+ # CHECK: dsll $5, $5, 16 # encoding: [0x00,0x05,0x2c,0x38]
+ # CHECK: daddiu $5, $5, %hi(extern_sym) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %hi(extern_sym), kind: fixup_Mips_HI16
+ # CHECK: dsll $5, $5, 16 # encoding: [0x00,0x05,0x2c,0x38]
+ # CHECK: daddiu $5, $5, %lo(extern_sym) # encoding: [0x64,0xa5,A,A]
+ # CHECK: # fixup A - offset: 0, value: %lo(extern_sym), kind: fixup_Mips_LO16
+ # CHECK: daddu $5, $5, $6 # encoding: [0x00,0xa6,0x28,0x2d]
+
+dla $4, extern_sym+8($6) # CHECK: lui $4, %highest(extern_sym+8) # encoding: [0x3c,0x04,A,A]
+ # CHECK: # fixup A - offset: 0, value: %highest(extern_sym+8), kind: fixup_Mips_HIGHEST
+ # CHECK: daddiu $4, $4, %higher(extern_sym+8) # encoding: [0x64,0x84,A,A]
+ # CHECK: # fixup A - offset: 0, value: %higher(extern_sym+8), kind: fixup_Mips_HIGHER
+ # CHECK: dsll $4, $4, 16 # encoding: [0x00,0x04,0x24,0x38]
+ # CHECK: daddiu $4, $4, %hi(extern_sym+8) # encoding: [0x64,0x84,A,A]
+ # CHECK: # fixup A - offset: 0, value: %hi(extern_sym+8), kind: fixup_Mips_HI16
+ # CHECK: dsll $4, $4, 16 # encoding: [0x00,0x04,0x24,0x38]
+ # CHECK: daddiu $4, $4, %lo(extern_sym+8) # encoding: [0x64,0x84,A,A]
+ # CHECK: # fixup A - offset: 0, value: %lo(extern_sym+8), kind: fixup_Mips_LO16
+ # CHECK: daddu $4, $4, $6 # encoding: [0x00,0x86,0x20,0x2d]
+
+.set at
.option pic2
#dla $5, symbol
diff --git a/test/MC/Mips/macro-li.s b/test/MC/Mips/macro-li.s
index 6cdc11d70d25..f5ccb5c8944f 100644
--- a/test/MC/Mips/macro-li.s
+++ b/test/MC/Mips/macro-li.s
@@ -65,7 +65,7 @@ li $5, 0xc0008000 # CHECK: lui $5, 49152 # encoding: [0x3c,0x05,0xc0,0x00
# CHECK: ori $5, $5, 32768 # encoding: [0x34,0xa5,0x80,0x00]
li $5, 0x80008000 # CHECK: lui $5, 32768 # encoding: [0x3c,0x05,0x80,0x00]
# CHECK: ori $5, $5, 32768 # encoding: [0x34,0xa5,0x80,0x00]
-li $4, ~0xffffffff # CHECK; addiu $4, $zero, 0 # encoding: [0x24,0x04,0x00,0x00]
+li $4, ~0xffffffff # CHECK: addiu $4, $zero, 0 # encoding: [0x24,0x04,0x00,0x00]
li $4, ~0x80000001 # CHECK: lui $4, 32767 # encoding: [0x3c,0x04,0x7f,0xff]
# CHECK: ori $4, $4, 65534 # encoding: [0x34,0x84,0xff,0xfe]
li $4, ~0x80000000 # CHECK: lui $4, 32767 # encoding: [0x3c,0x04,0x7f,0xff]
diff --git a/test/MC/Mips/micromips32r6/valid.s b/test/MC/Mips/micromips32r6/valid.s
index fedcdeb297ad..a5a55d741a7d 100644
--- a/test/MC/Mips/micromips32r6/valid.s
+++ b/test/MC/Mips/micromips32r6/valid.s
@@ -100,10 +100,10 @@
syscall 396 # CHECK: syscall 396 # encoding: [0x01,0x8c,0x8b,0x7c]
mod $3, $4, $5 # CHECK: mod $3, $4, $5 # encoding: [0x00,0xa4,0x19,0x58]
modu $3, $4, $5 # CHECK: modu $3, $4, $5 # encoding: [0x00,0xa4,0x19,0xd8]
- mul $3, $4, $5 # CHECK mul $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x18]
- muh $3, $4, $5 # CHECK muh $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x58]
- mulu $3, $4, $5 # CHECK mulu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x98]
- muhu $3, $4, $5 # CHECK muhu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0xd8]
+ mul $3, $4, $5 # CHECK: mul $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x18]
+ muh $3, $4, $5 # CHECK: muh $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x58]
+ mulu $3, $4, $5 # CHECK: mulu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x98]
+ muhu $3, $4, $5 # CHECK: muhu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0xd8]
nop # CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
nor $3, $4, $5 # CHECK: nor $3, $4, $5 # encoding: [0x00,0xa4,0x1a,0xd0]
or $3, $4, $5 # CHECK: or $3, $4, $5 # encoding: [0x00,0xa4,0x1a,0x90]
diff --git a/test/MC/Mips/micromips64r6/valid.s b/test/MC/Mips/micromips64r6/valid.s
index 1c4a65977a94..d757384344d4 100644
--- a/test/MC/Mips/micromips64r6/valid.s
+++ b/test/MC/Mips/micromips64r6/valid.s
@@ -269,14 +269,14 @@ a:
dneg $10 # CHECK: dneg $10, $10 # encoding: [0x59,0x40,0x51,0x90]
dnegu $1, $11 # CHECK: dnegu $1, $11 # encoding: [0x59,0x60,0x09,0xd0]
dnegu $5 # CHECK: dnegu $5, $5 # encoding: [0x58,0xa0,0x29,0xd0]
- mul $3, $4, $5 # CHECK mul $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x18]
- muh $3, $4, $5 # CHECK muh $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x58]
- mulu $3, $4, $5 # CHECK mulu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x98]
- muhu $3, $4, $5 # CHECK muhu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0xd8]
- dmul $3, $4, $5 # CHECK dmul $3, $4, $5 # encoding: [0x58,0xa4,0x18,0x18]
- dmuh $3, $4, $5 # CHECK dmuh $3, $4, $5 # encoding: [0x58,0xa4,0x18,0x58]
- dmulu $3, $4, $5 # CHECK dmulu $3, $4, $5 # encoding: [0x58,0xa4,0x18,0x98]
- dmuhu $3, $4, $5 # CHECK dmuhu $3, $4, $5 # encoding: [0x58,0xa4,0x18,0xd8]
+ mul $3, $4, $5 # CHECK: mul $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x18]
+ muh $3, $4, $5 # CHECK: muh $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x58]
+ mulu $3, $4, $5 # CHECK: mulu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0x98]
+ muhu $3, $4, $5 # CHECK: muhu $3, $4, $5 # encoding: [0x00,0xa4,0x18,0xd8]
+ dmul $3, $4, $5 # CHECK: dmul $3, $4, $5 # encoding: [0x58,0xa4,0x18,0x18]
+ dmuh $3, $4, $5 # CHECK: dmuh $3, $4, $5 # encoding: [0x58,0xa4,0x18,0x58]
+ dmulu $3, $4, $5 # CHECK: dmulu $3, $4, $5 # encoding: [0x58,0xa4,0x18,0x98]
+ dmuhu $3, $4, $5 # CHECK: dmuhu $3, $4, $5 # encoding: [0x58,0xa4,0x18,0xd8]
lwp $16, 8($4) # CHECK: lwp $16, 8($4) # encoding: [0x22,0x04,0x10,0x08]
swp $16, 8($4) # CHECK: swp $16, 8($4) # encoding: [0x22,0x04,0x90,0x08]
dsbh $3, $4 # CHECK: dsbh $3, $4 # encoding: [0x58,0x64,0x7b,0x3c]
diff --git a/test/MC/Mips/mips64-instalias-imm-expanding.s b/test/MC/Mips/mips64-instalias-imm-expanding.s
new file mode 100644
index 000000000000..80764ebd9746
--- /dev/null
+++ b/test/MC/Mips/mips64-instalias-imm-expanding.s
@@ -0,0 +1,741 @@
+# RUN: llvm-mc -triple mips64el-unknown-linux -show-encoding -print-imm-hex %s | FileCheck %s
+
+ .text
+text_label:
+# CHECK: text_label:
+ add $4, -0x80000000
+# CHECK-NEXT: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: add $4, $4, $1 # encoding: [0x20,0x20,0x81,0x00]
+ add $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: add $4, $4, $1 # encoding: [0x20,0x20,0x81,0x00]
+ add $4, -0x8000
+# CHECK-NEXT: addi $4, $4, -0x8000 # encoding: [0x00,0x80,0x84,0x20]
+ add $4, 0
+# CHECK-NEXT: addi $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x20]
+ add $4, 0xFFFF
+# CHECK-NEXT: ori $1, $zero, 0xffff # encoding: [0xff,0xff,0x01,0x34]
+# CHECK-NEXT: add $4, $4, $1 # encoding: [0x20,0x20,0x81,0x00]
+ add $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: add $4, $4, $1 # encoding: [0x20,0x20,0x81,0x00]
+ add $4, 0xFFFFFFFF # This should be sign-extended because it's a 32-bit add
+# CHECK-NEXT: addi $4, $4, -0x1 # encoding: [0xff,0xff,0x84,0x20]
+ add $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: lui $1, 0xff # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: add $4, $4, $1 # encoding: [0x20,0x20,0x81,0x00]
+
+ add $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
+ add $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
+ add $4, $5, -0x8000
+# CHECK-NEXT: addi $4, $5, -0x8000 # encoding: [0x00,0x80,0xa4,0x20]
+ add $4, $5, 0
+# CHECK-NEXT: addi $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x20]
+ add $4, $5, 0xFFFF
+# CHECK-NEXT: ori $4, $zero, 0xffff # encoding: [0xff,0xff,0x04,0x34]
+# CHECK-NEXT: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
+ add $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
+ add $4, $5, 0xFFFFFFFF # This should be sign-extended because it's a 32-bit addi
+# CHECK-NEXT: addi $4, $5, -0x1 # encoding: [0xff,0xff,0xa4,0x20]
+ add $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: lui $4, 0xff # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: add $4, $4, $5 # encoding: [0x20,0x20,0x85,0x00]
+
+
+ addu $4, -0x80000000
+# CHECK: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: addu $4, $4, $1 # encoding: [0x21,0x20,0x81,0x00]
+ addu $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: addu $4, $4, $1 # encoding: [0x21,0x20,0x81,0x00]
+ addu $4, -0x8000
+# CHECK-NEXT: addiu $4, $4, -0x8000 # encoding: [0x00,0x80,0x84,0x24]
+ addu $4, 0
+# CHECK-NEXT: addiu $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x24]
+ addu $4, 0xFFFF
+# CHECK-NEXT: ori $1, $zero, 0xffff # encoding: [0xff,0xff,0x01,0x34]
+# CHECK-NEXT: addu $4, $4, $1 # encoding: [0x21,0x20,0x81,0x00]
+ addu $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: addu $4, $4, $1 # encoding: [0x21,0x20,0x81,0x00]
+ addu $4, 0xFFFFFFFF # This should be sign-extended because it's a 32-bit add
+# CHECK-NEXT: addiu $4, $4, -0x1 # encoding: [0xff,0xff,0x84,0x24]
+ addu $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: lui $1, 0xff # encoding: [0xff,0x00,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: addu $4, $4, $1 # encoding: [0x21,0x20,0x81,0x00]
+
+ addu $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
+ addu $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
+ addu $4, $5, -0x8000
+# CHECK-NEXT: addiu $4, $5, -0x8000 # encoding: [0x00,0x80,0xa4,0x24]
+ addu $4, $5, 0
+# CHECK-NEXT: addiu $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x24]
+ addu $4, $5, 0xFFFF
+# CHECK-NEXT: ori $4, $zero, 0xffff # encoding: [0xff,0xff,0x04,0x34]
+# CHECK-NEXT: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
+ addu $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
+ addu $4, $5, 0xFFFFFFFF # This should be sign-extended because it's a 32-bit add
+# CHECK-NEXT: addiu $4, $5, -0x1 # encoding: [0xff,0xff,0xa4,0x24]
+ addu $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: lui $4, 0xff # encoding: [0xff,0x00,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: addu $4, $4, $5 # encoding: [0x21,0x20,0x85,0x00]
+
+
+ and $4, -0x80000000
+# CHECK: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, -0x8000
+# CHECK-NEXT: addiu $1, $zero, -0x8000 # encoding: [0x00,0x80,0x01,0x24]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0
+# CHECK-NEXT: andi $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x30]
+ and $4, 0xFFFF
+# CHECK-NEXT: andi $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x30]
+ and $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0xFFFFFFFF
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: dsrl32 $1, $1, 0x0 # encoding: [0x3e,0x08,0x01,0x00]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0xF0000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0x7FFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, 0xF000000000000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x30 # encoding: [0x3c,0x0c,0x01,0x00]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+ and $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff # encoding: [0xff,0x00,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: and $4, $4, $1 # encoding: [0x24,0x20,0x81,0x00]
+
+ and $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, -0x8000
+# CHECK-NEXT: addiu $4, $zero, -0x8000 # encoding: [0x00,0x80,0x04,0x24]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0
+# CHECK-NEXT: andi $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x30]
+ and $4, $5, 0xFFFF
+# CHECK-NEXT: andi $4, $5, 0xffff # encoding: [0xff,0xff,0xa4,0x30]
+ and $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0xFFFFFFFF
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: dsrl32 $4, $4, 0x0 # encoding: [0x3e,0x20,0x04,0x00]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0xF0000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0x7FFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, 0xF000000000000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x30 # encoding: [0x3c,0x24,0x04,0x00]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+ and $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: and $4, $4, $5 # encoding: [0x24,0x20,0x85,0x00]
+
+ or $4, -0x80000000
+# CHECK: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, -0x8000
+# CHECK-NEXT: addiu $1, $zero, -0x8000 # encoding: [0x00,0x80,0x01,0x24]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0
+# CHECK-NEXT: ori $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x34]
+ or $4, 0xFFFF
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+ or $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0xFFFFFFFF
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: dsrl32 $1, $1, 0x0 # encoding: [0x3e,0x08,0x01,0x00]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0xF0000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0x7FFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, 0xF000000000000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x30 # encoding: [0x3c,0x0c,0x01,0x00]
+# CHECK-NEXT: or $4, $4, $1 # encoding: [0x25,0x20,0x81,0x00]
+ or $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+
+ or $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, -0x8000
+# CHECK-NEXT: addiu $4, $zero, -0x8000 # encoding: [0x00,0x80,0x04,0x24]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0
+# CHECK-NEXT: ori $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x34]
+ or $4, $5, 0xFFFF
+# CHECK-NEXT: ori $4, $5, 0xffff # encoding: [0xff,0xff,0xa4,0x34]
+ or $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0xFFFFFFFF
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: dsrl32 $4, $4, 0x0 # encoding: [0x3e,0x20,0x04,0x00]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0xF0000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0x7FFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, 0xF000000000000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x30 # encoding: [0x3c,0x24,0x04,0x00]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+ or $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: or $4, $4, $5 # encoding: [0x25,0x20,0x85,0x00]
+
+ xor $4, -0x80000000
+# CHECK: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, -0x8000
+# CHECK-NEXT: addiu $1, $zero, -0x8000 # encoding: [0x00,0x80,0x01,0x24]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, 0
+# CHECK-NEXT: xori $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x38]
+ xor $4, 0xFFFF
+# CHECK-NEXT: xori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x38]
+ xor $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, 0xFFFFFFFF
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: dsrl32 $1, $1, 0x0 # encoding: [0x3e,0x08,0x01,0x00]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, 0xF0000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00
+ xor $4, 0x7FFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, 0xF000000000000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x30 # encoding: [0x3c,0x0c,0x01,0x00]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+ xor $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff # encoding: [0xff,0x00,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: xor $4, $4, $1 # encoding: [0x26,0x20,0x81,0x00]
+
+ xor $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, -0x8000
+# CHECK-NEXT: addiu $4, $zero, -0x8000 # encoding: [0x00,0x80,0x04,0x24]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0
+# CHECK-NEXT: xori $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x38]
+ xor $4, $5, 0xFFFF
+# CHECK-NEXT: xori $4, $5, 0xffff # encoding: [0xff,0xff,0xa4,0x38]
+ xor $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0xFFFFFFFF
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: dsrl32 $4, $4, 0x0 # encoding: [0x3e,0x20,0x04,0x00]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0xF0000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0x7FFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0x7FFFFFFFFFFFFFFF
+# FIXME: this is awfully inefficient...
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, 0xF000000000000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x30 # encoding: [0x3c,0x24,0x04,0x00]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+ xor $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: xor $4, $4, $5 # encoding: [0x26,0x20,0x85,0x00]
+
+ nor $4, 0
+# CHECK: addiu $1, $zero, 0x0 # encoding: [0x00,0x00,0x01,0x24]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 1
+# CHECK-NEXT: addiu $1, $zero, 0x1 # encoding: [0x01,0x00,0x01,0x24]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x8000
+# CHECK-NEXT: ori $1, $zero, 0x8000 # encoding: [0x00,0x80,0x01,0x34]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, -0x8000
+# CHECK-NEXT: addiu $1, $zero, -0x8000 # encoding: [0x00,0x80,0x01,0x24]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x1a5a5
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xa5a5 # encoding: [0xa5,0xa5,0x21,0x34]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0xFFFFFFFF
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: dsrl32 $1, $1, 0x0 # encoding: [0x3e,0x08,0x01,0x00]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0xF0000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x7FFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0x7FFFFFFFFFFFFFF
+# CHECK-NEXT: lui $1, 0x7ff # encoding: [0xff,0x07,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0xF000000000000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x30 # encoding: [0x3c,0x0c,0x01,0x00]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff # encoding: [0xff,0x00,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+ nor $4, 0xff00ff00
+# CHECK-NEXT: ori $1, $zero, 0xff00 # encoding: [0x00,0xff,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff00 # encoding: [0x00,0xff,0x21,0x34]
+# CHECK-NEXT: nor $4, $4, $1 # encoding: [0x27,0x20,0x81,0x00]
+
+ nor $4, $5, 0
+# CHECK: addiu $4, $zero, 0x0 # encoding: [0x00,0x00,0x04,0x24]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 1
+# CHECK-NEXT: addiu $4, $zero, 0x1 # encoding: [0x01,0x00,0x04,0x24]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0x8000
+# CHECK-NEXT: ori $4, $zero, 0x8000 # encoding: [0x00,0x80,0x04,0x34]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, -0x8000
+# CHECK-NEXT: addiu $4, $zero, -0x8000 # encoding: [0x00,0x80,0x04,0x24]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0x1a5a5
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xa5a5 # encoding: [0xa5,0xa5,0x84,0x34]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0xFFFFFFFF
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: dsrl32 $4, $4, 0x0 # encoding: [0x3e,0x20,0x04,0x00]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0xF0000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0x7FFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0x7FFFFFFFFFFFFFFF
+# FIXME: this is awfully inefficient...
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0xF000000000000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x30 # encoding: [0x3c,0x24,0x04,0x00]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+ nor $4, $5, 0xff00ff00
+# CHECK-NEXT: ori $4, $zero, 0xff00 # encoding: [0x00,0xff,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff00 # encoding: [0x00,0xff,0x84,0x34]
+# CHECK-NEXT: nor $4, $4, $5 # encoding: [0x27,0x20,0x85,0x00]
+
+
+ slt $4, -0x80000000
+# CHECK: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, -0x8000
+# CHECK-NEXT: slti $4, $4, -0x8000 # encoding: [0x00,0x80,0x84,0x28]
+ slt $4, 0
+# CHECK-NEXT: slti $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x28]
+ slt $4, 0xFFFF
+# CHECK-NEXT: ori $1, $zero, 0xffff # encoding: [0xff,0xff,0x01,0x34]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, 0xFFFFFFFF
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: dsrl32 $1, $1, 0x0 # encoding: [0x3e,0x08,0x01,0x00]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, 0xF0000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, 0x7FFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: slti $4, $4, -0x1 # encoding: [0xff,0xff,0x84,0x28]
+ slt $4, 0xF000000000000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x30 # encoding: [0x3c,0x0c,0x01,0x00]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+ slt $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff # encoding: [0xff,0x00,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: slt $4, $4, $1 # encoding: [0x2a,0x20,0x81,0x00]
+
+ slt $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, -0x8000
+# CHECK-NEXT: slti $4, $5, -0x8000 # encoding: [0x00,0x80,0xa4,0x28]
+ slt $4, $5, 0
+# CHECK-NEXT: slti $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x28]
+ slt $4, $5, 0xFFFF
+# CHECK-NEXT: ori $4, $zero, 0xffff # encoding: [0xff,0xff,0x04,0x34]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, 0xFFFFFFFF
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: dsrl32 $4, $4, 0x0 # encoding: [0x3e,0x20,0x04,0x00]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, 0xF0000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, 0x7FFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, 0x7FFFFFFFFFFFFFFF
+# FIXME: this is awfully inefficient...
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: slti $4, $5, -0x1 # encoding: [0xff,0xff,0xa4,0x28]
+ slt $4, $5, 0xF000000000000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x30 # encoding: [0x3c,0x24,0x04,0x00]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+ slt $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: slt $4, $4, $5 # encoding: [0x2a,0x20,0x85,0x00]
+
+ sltu $4, -0x80000000
+# CHECK: lui $1, 0x8000 # encoding: [0x00,0x80,0x01,0x3c]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, -0x8001
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0x7fff # encoding: [0xff,0x7f,0x21,0x34]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, -0x8000
+# CHECK-NEXT: sltiu $4, $4, -0x8000 # encoding: [0x00,0x80,0x84,0x2c]
+ sltu $4, 0
+# CHECK-NEXT: sltiu $4, $4, 0x0 # encoding: [0x00,0x00,0x84,0x2c]
+ sltu $4, 0xFFFF
+# CHECK-NEXT: ori $1, $zero, 0xffff # encoding: [0xff,0xff,0x01,0x34]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, 0x10000
+# CHECK-NEXT: lui $1, 0x1 # encoding: [0x01,0x00,0x01,0x3c]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, 0xFFFFFFFF
+# CHECK-NEXT: lui $1, 0xffff # encoding: [0xff,0xff,0x01,0x3c]
+# CHECK-NEXT: dsrl32 $1, $1, 0x0 # encoding: [0x3e,0x08,0x01,0x00]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, 0xF0000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, 0x7FFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, 0x7FFFFFFFFFFFFFFF
+# CHECK-NEXT: lui $1, 0x7fff # encoding: [0xff,0x7f,0x01,0x3c]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xffff # encoding: [0xff,0xff,0x21,0x34]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: sltiu $4, $4, -0x1 # encoding: [0xff,0xff,0x84,0x2c]
+ sltu $4, 0xF000000000000000
+# CHECK-NEXT: ori $1, $zero, 0xf000 # encoding: [0x00,0xf0,0x01,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x30 # encoding: [0x3c,0x0c,0x01,0x00]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+ sltu $4, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $1, $zero, -0x1 # encoding: [0xff,0xff,0x01,0x24]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff # encoding: [0xff,0x00,0x21,0x34]
+# CHECK-NEXT: dsll $1, $1, 0x10 # encoding: [0x38,0x0c,0x01,0x00]
+# CHECK-NEXT: ori $1, $1, 0xff0f # encoding: [0x0f,0xff,0x21,0x34]
+# CHECK-NEXT: sltu $4, $4, $1 # encoding: [0x2b,0x20,0x81,0x00]
+
+ sltu $4, $5, -0x80000000
+# CHECK: lui $4, 0x8000 # encoding: [0x00,0x80,0x04,0x3c]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, -0x8001
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0x7fff # encoding: [0xff,0x7f,0x84,0x34]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, -0x8000
+# CHECK-NEXT: sltiu $4, $5, -0x8000 # encoding: [0x00,0x80,0xa4,0x2c]
+ sltu $4, $5, 0
+# CHECK-NEXT: sltiu $4, $5, 0x0 # encoding: [0x00,0x00,0xa4,0x2c]
+ sltu $4, $5, 0xFFFF
+# CHECK-NEXT: ori $4, $zero, 0xffff # encoding: [0xff,0xff,0x04,0x34]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, 0x10000
+# CHECK-NEXT: lui $4, 0x1 # encoding: [0x01,0x00,0x04,0x3c]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, 0xFFFFFFFF
+# CHECK-NEXT: lui $4, 0xffff # encoding: [0xff,0xff,0x04,0x3c]
+# CHECK-NEXT: dsrl32 $4, $4, 0x0 # encoding: [0x3e,0x20,0x04,0x00]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, 0xF0000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, 0x7FFFFFFF
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, 0x7FFFFFFFFFFFFFFF
+# FIXME: this is awfully inefficient...
+# CHECK-NEXT: lui $4, 0x7fff # encoding: [0xff,0x7f,0x04,0x3c]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xffff # encoding: [0xff,0xff,0x84,0x34]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, 0xFFFFFFFFFFFFFFFF
+# CHECK-NEXT: sltiu $4, $5, -0x1 # encoding: [0xff,0xff,0xa4,0x2c]
+ sltu $4, $5, 0xF000000000000000
+# CHECK-NEXT: ori $4, $zero, 0xf000 # encoding: [0x00,0xf0,0x04,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x30 # encoding: [0x3c,0x24,0x04,0x00]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
+ sltu $4, $5, ~(0xf0000000|0x0f000000|0x000000f0)
+# CHECK-NEXT: addiu $4, $zero, -0x1 # encoding: [0xff,0xff,0x04,0x24]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff # encoding: [0xff,0x00,0x84,0x34]
+# CHECK-NEXT: dsll $4, $4, 0x10 # encoding: [0x38,0x24,0x04,0x00]
+# CHECK-NEXT: ori $4, $4, 0xff0f # encoding: [0x0f,0xff,0x84,0x34]
+# CHECK-NEXT: sltu $4, $4, $5 # encoding: [0x2b,0x20,0x85,0x00]
diff --git a/test/MC/Mips/mips64extins.s b/test/MC/Mips/mips64extins.s
index 3f1973bf52de..5bd18ff62d5e 100644
--- a/test/MC/Mips/mips64extins.s
+++ b/test/MC/Mips/mips64extins.s
@@ -5,5 +5,5 @@
dextu $2, $4, 34, 6 # CHECK: dextu ${{[0-9]+}}, ${{[0-9]+}}, 34, 6
dextm $2, $4, 5, 34 # CHECK: dextm ${{[0-9]+}}, ${{[0-9]+}}, 5, 34
dins $4, $5, 8, 10 # CHECK: dins ${{[0-9]+}}, ${{[0-9]+}}, 8, 10
- dinsm $4, $5, 10, 1 # CHECK: dinsm ${{[0-9]+}}, ${{[0-9]+}}, 10, 1
+ dinsm $4, $5, 30, 6 # CHECK: dinsm ${{[0-9]+}}, ${{[0-9]+}}, 30, 6
dinsu $4, $5, 40, 13 # CHECK: dinsu ${{[0-9]+}}, ${{[0-9]+}}, 40, 13
diff --git a/test/MC/Mips/mul-macro-variants.s b/test/MC/Mips/mul-macro-variants.s
new file mode 100644
index 000000000000..a15c5e595228
--- /dev/null
+++ b/test/MC/Mips/mul-macro-variants.s
@@ -0,0 +1,154 @@
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mcpu=mips64r2 | FileCheck %s
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mcpu=mips64r3 | FileCheck %s
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mcpu=mips64r5 | FileCheck %s
+
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mattr=use-tcc-in-div -mcpu=mips64 | FileCheck %s --check-prefix=CHECK-TRAP
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mattr=use-tcc-in-div -mcpu=mips64r2 | FileCheck %s --check-prefix=CHECK-TRAP
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mattr=use-tcc-in-div -mcpu=mips64r3 | FileCheck %s --check-prefix=CHECK-TRAP
+# RUN: llvm-mc %s -triple mips-unknown-linux -show-encoding -mattr=use-tcc-in-div -mcpu=mips64r5 | FileCheck %s --check-prefix=CHECK-TRAP
+
+.text
+text_label:
+
+ mul $4, $5
+# CHECK: mul $4, $4, $5 # encoding: [0x70,0x85,0x20,0x02]
+# CHECK-TRAP: mul $4, $4, $5 # encoding: [0x70,0x85,0x20,0x02]
+ mul $4, $5, $6
+# CHECK: mul $4, $5, $6 # encoding: [0x70,0xa6,0x20,0x02]
+# CHECK-TRAP: mul $4, $5, $6 # encoding: [0x70,0xa6,0x20,0x02]
+ mul $4, $5, 0
+# CHECK: addiu $1, $zero, 0 # encoding: [0x24,0x01,0x00,0x00]
+# CHECK: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 0 # encoding: [0x24,0x01,0x00,0x00]
+# CHECK-TRAP: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mul $4, $5, 1
+# CHECK: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
+# CHECK: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
+# CHECK-TRAP: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mul $4, $5, 0x8000
+# CHECK: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: ori $1, $zero, 32768 # encoding: [0x34,0x01,0x80,0x00]
+# CHECK-TRAP: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mul $4, $5, -0x8000
+# CHECK: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, -32768 # encoding: [0x24,0x01,0x80,0x00]
+# CHECK-TRAP: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mul $4, $5, 0x10000
+# CHECK: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mul $4, $5, 0x1a5a5
+# CHECK: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: lui $1, 1 # encoding: [0x3c,0x01,0x00,0x01]
+# CHECK-TRAP: ori $1, $1, 42405 # encoding: [0x34,0x21,0xa5,0xa5]
+# CHECK-TRAP: mult $5, $1 # encoding: [0x00,0xa1,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mulo $4, $5
+# CHECK: mult $4, $5 # encoding: [0x00,0x85,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK: sra $4, $4, 31 # encoding: [0x00,0x04,0x27,0xc3]
+# CHECK: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK: beq $4, $1, $tmp0 # encoding: [0x10,0x81,A,A]
+# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: mult $4, $5 # encoding: [0x00,0x85,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: sra $4, $4, 31 # encoding: [0x00,0x04,0x27,0xc3]
+# CHECK-TRAP: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK-TRAP: tne $4, $1, 6 # encoding: [0x00,0x81,0x01,0xb6]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+
+ mulo $4, $5, $6
+# CHECK: mult $5, $6 # encoding: [0x00,0xa6,0x00,0x18]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK: sra $4, $4, 31 # encoding: [0x00,0x04,0x27,0xc3]
+# CHECK: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK: beq $4, $1, $tmp1 # encoding: [0x10,0x81,A,A]
+# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: mult $5, $6 # encoding: [0x00,0xa6,0x00,0x18]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: sra $4, $4, 31 # encoding: [0x00,0x04,0x27,0xc3]
+# CHECK-TRAP: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK-TRAP: tne $4, $1, 6 # encoding: [0x00,0x81,0x01,0xb6]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ mulou $4,$5
+# CHECK: multu $4, $5 # encoding: [0x00,0x85,0x00,0x19]
+# CHECK: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK: beqz $1, $tmp2 # encoding: [0x10,0x20,A,A]
+# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-TRAP: multu $4, $5 # encoding: [0x00,0x85,0x00,0x19]
+# CHECK-TRAP: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: tne $1, $zero, 6 # encoding: [0x00,0x20,0x01,0xb6]
+ mulou $4, $5, $6
+# CHECK: multu $5, $6 # encoding: [0x00,0xa6,0x00,0x19]
+# CHECK: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK: beqz $1, $tmp3 # encoding: [0x10,0x20,A,A]
+# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-TRAP: multu $5, $6 # encoding: [0x00,0xa6,0x00,0x19]
+# CHECK-TRAP: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: tne $1, $zero, 6 # encoding: [0x00,0x20,0x01,0xb6]
+
+ dmul $4, $5, $6
+# CHECK: dmultu $5, $6 # encoding: [0x00,0xa6,0x00,0x1d]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP dmultu $5, $6 # encoding: [0x00,0xa6,0x00,0x1d]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ dmul $4, $5, 1
+# CHECK: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
+# CHECK: dmult $5, $1 # encoding: [0x00,0xa1,0x00,0x1c]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: addiu $1, $zero, 1 # encoding: [0x24,0x01,0x00,0x01]
+# CHECK-TRAP: dmult $5, $1 # encoding: [0x00,0xa1,0x00,0x1c]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ dmulo $4, $5, $6
+# CHECK: dmult $5, $6 # encoding: [0x00,0xa6,0x00,0x1c]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK: dsra32 $4, $4, 31 # encoding: [0x00,0x04,0x27,0xff]
+# CHECK: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK: beq $4, $1, $tmp4 # encoding: [0x10,0x81,A,A]
+# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: dmult $5, $6 # encoding: [0x00,0xa6,0x00,0x1c]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: dsra32 $4, $4, 31 # encoding: [0x00,0x04,0x27,0xff]
+# CHECK-TRAP: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK-TRAP: tne $4, $1, 6 # encoding: [0x00,0x81,0x01,0xb6]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+ dmulou $4,$5,$6
+# CHECK: dmultu $5, $6 # encoding: [0x00,0xa6,0x00,0x1d]
+# CHECK: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK: beqz $1, $tmp5 # encoding: [0x10,0x20,A,A]
+# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK: break 6 # encoding: [0x00,0x06,0x00,0x0d]
+# CHECK-TRAP: dmultu $5, $6 # encoding: [0x00,0xa6,0x00,0x1d]
+# CHECK-TRAP: mfhi $1 # encoding: [0x00,0x00,0x08,0x10]
+# CHECK-TRAP: mflo $4 # encoding: [0x00,0x00,0x20,0x12]
+# CHECK-TRAP: tne $1, $zero, 6 # encoding: [0x00,0x20,0x01,0xb6]
diff --git a/test/MC/Mips/set-nomacro.s b/test/MC/Mips/set-nomacro.s
index 1b7a49fbaffb..f0e2f8883863 100644
--- a/test/MC/Mips/set-nomacro.s
+++ b/test/MC/Mips/set-nomacro.s
@@ -181,6 +181,15 @@
bgtu $0, $0, local_label
# CHECK-NOT: [[@LINE-1]]:3: warning: macro instruction expanded into multiple instructions
+ bnel $2, 0, local_label
+# CHECK-NOT: [[@LINE-1]]:3: warning: macro instruction expanded into multiple instructions
+ bnel $2, 1, local_label
+# CHECK: [[@LINE-1]]:3: warning: macro instruction expanded into multiple instructions
+ beql $2, 0, local_label
+# CHECK-NOT: [[@LINE-1]]:3: warning: macro instruction expanded into multiple instructions
+ beql $2, 1, local_label
+# CHECK: [[@LINE-1]]:3: warning: macro instruction expanded into multiple instructions
+
ulh $5, 0
# CHECK: [[@LINE-1]]:3: warning: macro instruction expanded into multiple instructions
ulhu $5, 0
diff --git a/test/MC/Mips/sext_64_32.ll b/test/MC/Mips/sext_64_32.ll
index 5679829e8eab..f6c468187d7b 100644
--- a/test/MC/Mips/sext_64_32.ll
+++ b/test/MC/Mips/sext_64_32.ll
@@ -11,7 +11,8 @@ entry:
ret i64 %conv
}
-; CHECK: dsll32 ${{[a-z0-9]+}}, ${{[a-z0-9]+}}, 0
+; CHECK-LABEL: foo_2:
+; CHECK: dext ${{[a-z0-9]+}}, ${{[a-z0-9]+}}, 0, 32
define i64 @foo_2(i32 %ival_2) nounwind readnone {
entry:
diff --git a/test/MC/PowerPC/ppc64-encoding-vmx.s b/test/MC/PowerPC/ppc64-encoding-vmx.s
index 16c48a71e428..62851e4082d8 100644
--- a/test/MC/PowerPC/ppc64-encoding-vmx.s
+++ b/test/MC/PowerPC/ppc64-encoding-vmx.s
@@ -550,9 +550,15 @@
# CHECK-BE: vnor 2, 3, 4 # encoding: [0x10,0x43,0x25,0x04]
# CHECK-LE: vnor 2, 3, 4 # encoding: [0x04,0x25,0x43,0x10]
vnor 2, 3, 4
+# CHECK-BE: vnot 2, 3 # encoding: [0x10,0x43,0x1d,0x04]
+# CHECK-LE: vnot 2, 3 # encoding: [0x04,0x1d,0x43,0x10]
+ vnot 2, 3
# CHECK-BE: vor 2, 3, 4 # encoding: [0x10,0x43,0x24,0x84]
# CHECK-LE: vor 2, 3, 4 # encoding: [0x84,0x24,0x43,0x10]
vor 2, 3, 4
+# CHECK-BE: vmr 2, 3 # encoding: [0x10,0x43,0x1c,0x84]
+# CHECK-LE: vmr 2, 3 # encoding: [0x84,0x1c,0x43,0x10]
+ vmr 2, 3
# CHECK-BE: vxor 2, 3, 4 # encoding: [0x10,0x43,0x24,0xc4]
# CHECK-LE: vxor 2, 3, 4 # encoding: [0xc4,0x24,0x43,0x10]
vxor 2, 3, 4
diff --git a/test/MC/PowerPC/vsx.s b/test/MC/PowerPC/vsx.s
index 7dae97b0060a..fc92af6967cd 100644
--- a/test/MC/PowerPC/vsx.s
+++ b/test/MC/PowerPC/vsx.s
@@ -532,9 +532,12 @@
xxswapd 7, 63
# Move to/from VSR
-# CHECK-BE: mfvsrd 3, 0 # encoding: [0x7c,0x03,0x00,0x66]
-# CHECK-LE: mfvsrd 3, 0 # encoding: [0x66,0x00,0x03,0x7c]
- mfvsrd 3, 0
+# CHECK-BE: mfvsrd 3, 40 # encoding: [0x7d,0x03,0x00,0x67]
+# CHECK-LE: mfvsrd 3, 40 # encoding: [0x67,0x00,0x03,0x7d]
+ mfvsrd 3, 40
+# CHECK-BE: mfvsrd 3, 40 # encoding: [0x7d,0x03,0x00,0x67]
+# CHECK-LE: mfvsrd 3, 40 # encoding: [0x67,0x00,0x03,0x7d]
+ mfvrd 3, 8
# CHECK-BE: mfvsrwz 5, 0 # encoding: [0x7c,0x05,0x00,0xe6]
# CHECK-LE: mfvsrwz 5, 0 # encoding: [0xe6,0x00,0x05,0x7c]
mfvsrwz 5, 0
diff --git a/test/MC/WebAssembly/file-headers.ll b/test/MC/WebAssembly/file-headers.ll
new file mode 100644
index 000000000000..1e5dd4b160a8
--- /dev/null
+++ b/test/MC/WebAssembly/file-headers.ll
@@ -0,0 +1,9 @@
+; RUN: llc -filetype=obj %s -o - | llvm-readobj -file-headers | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK: Format: WASM{{$}}
+; CHECK: Arch: wasm32{{$}}
+; CHECK: AddressSize: 32bit{{$}}
+; CHECK: Version: 0x1{{$}}
diff --git a/test/MC/WebAssembly/lit.local.cfg b/test/MC/WebAssembly/lit.local.cfg
new file mode 100644
index 000000000000..0dd8c920ff1e
--- /dev/null
+++ b/test/MC/WebAssembly/lit.local.cfg
@@ -0,0 +1,2 @@
+if 'WebAssembly' not in config.root.targets:
+ config.unsupported = True
diff --git a/test/MC/X86/abs8.s b/test/MC/X86/abs8.s
new file mode 100644
index 000000000000..1172fb08d4e5
--- /dev/null
+++ b/test/MC/X86/abs8.s
@@ -0,0 +1,8 @@
+// RUN: llvm-mc -filetype=obj %s -o - -triple i686-pc-linux | llvm-objdump -d -r - | FileCheck --check-prefix=32 %s
+// RUN: llvm-mc -filetype=obj %s -o - -triple x86_64-pc-linux | llvm-objdump -d -r - | FileCheck --check-prefix=64 %s
+
+// 32: 0: 83 ff 00 cmpl $0, %edi
+// 32: 00000002: R_386_8 foo
+// 64: 0: 83 ff 00 cmpl $0, %edi
+// 64: 0000000000000002: R_X86_64_8 foo+0
+cmp $foo@ABS8, %edi
diff --git a/test/MC/X86/avx512-encodings.s b/test/MC/X86/avx512-encodings.s
index e883249b0039..23d58e7d77e3 100644
--- a/test/MC/X86/avx512-encodings.s
+++ b/test/MC/X86/avx512-encodings.s
@@ -12525,6 +12525,54 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
// CHECK: encoding: [0x62,0xe2,0xfd,0x41,0x91,0x8c,0xa9,0x00,0x04,0x00,0x00]
vpgatherqq 1024(%rcx, %zmm21,4), %zmm17 {%k1}
+// CHECK: vgatherdps 123(%r14,%zmm11,8), %zmm17 {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x49,0x92,0x8c,0xde,0x7b,0x00,0x00,0x00]
+ vgatherdps 123(%r14, %zmm11,8), %zmm17 {%k1}
+
+// CHECK: vgatherdps 256(%r9,%zmm11), %zmm17 {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x49,0x92,0x4c,0x19,0x40]
+ vgatherdps 256(%r9,%zmm11), %zmm17 {%k1}
+
+// CHECK: vgatherdps 1024(%rcx,%zmm11,4), %zmm17 {%k1}
+// CHECK: encoding: [0x62,0xa2,0x7d,0x49,0x92,0x8c,0x99,0x00,0x04,0x00,0x00]
+ vgatherdps 1024(%rcx, %zmm11,4), %zmm17 {%k1}
+
+// CHECK: vgatherdpd 123(%r14,%ymm14,8), %zmm8 {%k1}
+// CHECK: encoding: [0x62,0x12,0xfd,0x49,0x92,0x84,0xf6,0x7b,0x00,0x00,0x00]
+ vgatherdpd 123(%r14, %ymm14,8), %zmm8 {%k1}
+
+// CHECK: vgatherdpd 256(%r9,%ymm14), %zmm8 {%k1}
+// CHECK: encoding: [0x62,0x12,0xfd,0x49,0x92,0x44,0x31,0x20]
+ vgatherdpd 256(%r9, %ymm14), %zmm8 {%k1}
+
+// CHECK: vgatherdpd 1024(%rcx,%ymm14,4), %zmm8 {%k1}
+// CHECK: encoding: [0x62,0x32,0xfd,0x49,0x92,0x84,0xb1,0x00,0x04,0x00,0x00]
+ vgatherdpd 1024(%rcx, %ymm14,4), %zmm8 {%k1}
+
+// CHECK: vgatherqps 123(%r14,%zmm17,8), %ymm3 {%k1}
+// CHECK: encoding: [0x62,0xd2,0x7d,0x41,0x93,0x9c,0xce,0x7b,0x00,0x00,0x00]
+ vgatherqps 123(%r14, %zmm17,8), %ymm3 {%k1}
+
+// CHECK: vgatherqps 256(%r9,%zmm17), %ymm3 {%k1}
+// CHECK: encoding: [0x62,0xd2,0x7d,0x41,0x93,0x5c,0x09,0x40]
+ vgatherqps 256(%r9,%zmm17), %ymm3 {%k1}
+
+// CHECK: vgatherqps 1024(%rcx,%zmm17,4), %ymm3 {%k1}
+// CHECK: encoding: [0x62,0xf2,0x7d,0x41,0x93,0x9c,0x89,0x00,0x04,0x00,0x00]
+ vgatherqps 1024(%rcx, %zmm17,4), %ymm3 {%k1}
+
+// CHECK: vgatherqpd 123(%r14,%zmm21,8), %zmm17 {%k1}
+// CHECK: encoding: [0x62,0xc2,0xfd,0x41,0x93,0x8c,0xee,0x7b,0x00,0x00,0x00]
+ vgatherqpd 123(%r14, %zmm21,8), %zmm17 {%k1}
+
+// CHECK: vgatherqpd 256(%r9,%zmm21), %zmm17 {%k1}
+// CHECK: encoding: [0x62,0xc2,0xfd,0x41,0x93,0x4c,0x29,0x20]
+ vgatherqpd 256(%r9,%zmm21), %zmm17 {%k1}
+
+// CHECK: vgatherqpd 1024(%rcx,%zmm21,4), %zmm17 {%k1}
+// CHECK: encoding: [0x62,0xe2,0xfd,0x41,0x93,0x8c,0xa9,0x00,0x04,0x00,0x00]
+ vgatherqpd 1024(%rcx, %zmm21,4), %zmm17 {%k1}
+
// CHECK: vpscatterdd %zmm19, 123(%r14,%zmm16,8) {%k1}
// CHECK: encoding: [0x62,0xc2,0x7d,0x41,0xa0,0x9c,0xc6,0x7b,0x00,0x00,0x00]
vpscatterdd %zmm19, 123(%r14,%zmm16,8) {%k1}
diff --git a/test/MC/X86/avx512vl-encoding.s b/test/MC/X86/avx512vl-encoding.s
index 2e3eaf2aa4c7..42b412cc50d6 100644
--- a/test/MC/X86/avx512vl-encoding.s
+++ b/test/MC/X86/avx512vl-encoding.s
@@ -1980,3 +1980,131 @@
// CHECK: vpscatterqq %ymm19, 1024(%rcx,%ymm31,4) {%k1}
// CHECK: encoding: [0x62,0xa2,0xfd,0x21,0xa1,0x9c,0xb9,0x00,0x04,0x00,0x00]
vpscatterqq %ymm19, 1024(%rcx,%ymm31,4) {%k1}
+
+// CHECK: vscatterdps %xmm20, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x01,0xa2,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdps %xmm20, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterdps %xmm20, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x01,0xa2,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdps %xmm20, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterdps %xmm20, 256(%r9,%xmm31) {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x01,0xa2,0x64,0x39,0x40]
+ vscatterdps %xmm20, 256(%r9,%xmm31) {%k1}
+
+// CHECK: vscatterdps %xmm20, 1024(%rcx,%xmm31,4) {%k1}
+// CHECK: encoding: [0x62,0xa2,0x7d,0x01,0xa2,0xa4,0xb9,0x00,0x04,0x00,0x00]
+ vscatterdps %xmm20, 1024(%rcx,%xmm31,4) {%k1}
+
+// CHECK: vscatterdps %ymm28, 123(%r14,%ymm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x21,0xa2,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdps %ymm28, 123(%r14,%ymm31,8) {%k1}
+
+// CHECK: vscatterdps %ymm28, 123(%r14,%ymm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x21,0xa2,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdps %ymm28, 123(%r14,%ymm31,8) {%k1}
+
+// CHECK: vscatterdps %ymm28, 256(%r9,%ymm31) {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x21,0xa2,0x64,0x39,0x40]
+ vscatterdps %ymm28, 256(%r9,%ymm31) {%k1}
+
+// CHECK: vscatterdps %ymm28, 1024(%rcx,%ymm31,4) {%k1}
+// CHECK: encoding: [0x62,0x22,0x7d,0x21,0xa2,0xa4,0xb9,0x00,0x04,0x00,0x00]
+ vscatterdps %ymm28, 1024(%rcx,%ymm31,4) {%k1}
+
+// CHECK: vscatterdpd %xmm21, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0xfd,0x01,0xa2,0xac,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdpd %xmm21, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterdpd %xmm21, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0xfd,0x01,0xa2,0xac,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdpd %xmm21, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterdpd %xmm21, 256(%r9,%xmm31) {%k1}
+// CHECK: encoding: [0x62,0x82,0xfd,0x01,0xa2,0x6c,0x39,0x20]
+ vscatterdpd %xmm21, 256(%r9,%xmm31) {%k1}
+
+// CHECK: vscatterdpd %xmm21, 1024(%rcx,%xmm31,4) {%k1}
+// CHECK: encoding: [0x62,0xa2,0xfd,0x01,0xa2,0xac,0xb9,0x00,0x04,0x00,0x00]
+ vscatterdpd %xmm21, 1024(%rcx,%xmm31,4) {%k1}
+
+// CHECK: vscatterdpd %ymm28, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0xfd,0x21,0xa2,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdpd %ymm28, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterdpd %ymm28, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0xfd,0x21,0xa2,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterdpd %ymm28, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterdpd %ymm28, 256(%r9,%xmm31) {%k1}
+// CHECK: encoding: [0x62,0x02,0xfd,0x21,0xa2,0x64,0x39,0x20]
+ vscatterdpd %ymm28, 256(%r9,%xmm31) {%k1}
+
+// CHECK: vscatterdpd %ymm28, 1024(%rcx,%xmm31,4) {%k1}
+// CHECK: encoding: [0x62,0x22,0xfd,0x21,0xa2,0xa4,0xb9,0x00,0x04,0x00,0x00]
+ vscatterdpd %ymm28, 1024(%rcx,%xmm31,4) {%k1}
+
+// CHECK: vscatterqps %xmm22, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x01,0xa3,0xb4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqps %xmm22, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterqps %xmm22, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x01,0xa3,0xb4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqps %xmm22, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterqps %xmm22, 256(%r9,%xmm31) {%k1}
+// CHECK: encoding: [0x62,0x82,0x7d,0x01,0xa3,0x74,0x39,0x40]
+ vscatterqps %xmm22, 256(%r9,%xmm31) {%k1}
+
+// CHECK: vscatterqps %xmm22, 1024(%rcx,%xmm31,4) {%k1}
+// CHECK: encoding: [0x62,0xa2,0x7d,0x01,0xa3,0xb4,0xb9,0x00,0x04,0x00,0x00]
+ vscatterqps %xmm22, 1024(%rcx,%xmm31,4) {%k1}
+
+// CHECK: vscatterqps %xmm24, 123(%r14,%ymm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x21,0xa3,0x84,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqps %xmm24, 123(%r14,%ymm31,8) {%k1}
+
+// CHECK: vscatterqps %xmm24, 123(%r14,%ymm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x21,0xa3,0x84,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqps %xmm24, 123(%r14,%ymm31,8) {%k1}
+
+// CHECK: vscatterqps %xmm24, 256(%r9,%ymm31) {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x21,0xa3,0x44,0x39,0x40]
+ vscatterqps %xmm24, 256(%r9,%ymm31) {%k1}
+
+// CHECK: vscatterqps %xmm24, 1024(%rcx,%ymm31,4) {%k1}
+// CHECK: encoding: [0x62,0x22,0x7d,0x21,0xa3,0x84,0xb9,0x00,0x04,0x00,0x00]
+ vscatterqps %xmm24, 1024(%rcx,%ymm31,4) {%k1}
+
+// CHECK: vscatterqpd %xmm28, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0xfd,0x01,0xa3,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqpd %xmm28, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterqpd %xmm28, 123(%r14,%xmm31,8) {%k1}
+// CHECK: encoding: [0x62,0x02,0xfd,0x01,0xa3,0xa4,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqpd %xmm28, 123(%r14,%xmm31,8) {%k1}
+
+// CHECK: vscatterqpd %xmm28, 256(%r9,%xmm31) {%k1}
+// CHECK: encoding: [0x62,0x02,0xfd,0x01,0xa3,0x64,0x39,0x20]
+ vscatterqpd %xmm28, 256(%r9,%xmm31) {%k1}
+
+// CHECK: vscatterqpd %xmm28, 1024(%rcx,%xmm31,4) {%k1}
+// CHECK: encoding: [0x62,0x22,0xfd,0x01,0xa3,0xa4,0xb9,0x00,0x04,0x00,0x00]
+ vscatterqpd %xmm28, 1024(%rcx,%xmm31,4) {%k1}
+
+// CHECK: vscatterqpd %ymm19, 123(%r14,%ymm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0xfd,0x21,0xa3,0x9c,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqpd %ymm19, 123(%r14,%ymm31,8) {%k1}
+
+// CHECK: vscatterqpd %ymm19, 123(%r14,%ymm31,8) {%k1}
+// CHECK: encoding: [0x62,0x82,0xfd,0x21,0xa3,0x9c,0xfe,0x7b,0x00,0x00,0x00]
+ vscatterqpd %ymm19, 123(%r14,%ymm31,8) {%k1}
+
+// CHECK: vscatterqpd %ymm19, 256(%r9,%ymm31) {%k1}
+// CHECK: encoding: [0x62,0x82,0xfd,0x21,0xa3,0x5c,0x39,0x20]
+ vscatterqpd %ymm19, 256(%r9,%ymm31) {%k1}
+
+// CHECK: vscatterqpd %ymm19, 1024(%rcx,%ymm31,4) {%k1}
+// CHECK: encoding: [0x62,0xa2,0xfd,0x21,0xa3,0x9c,0xb9,0x00,0x04,0x00,0x00]
+ vscatterqpd %ymm19, 1024(%rcx,%ymm31,4) {%k1}
diff --git a/test/MC/X86/data-prefix-fail.s b/test/MC/X86/data-prefix-fail.s
new file mode 100644
index 000000000000..2b910cdc1093
--- /dev/null
+++ b/test/MC/X86/data-prefix-fail.s
@@ -0,0 +1,25 @@
+// RUN: not llvm-mc -triple x86_64-unknown-unknown --show-encoding %s 2> %t.err | FileCheck --check-prefix=64 %s
+// RUN: FileCheck --check-prefix=ERR64 < %t.err %s
+// RUN: not llvm-mc -triple i386-unknown-unknown --show-encoding %s 2> %t.err | FileCheck --check-prefix=32 %s
+// RUN: FileCheck --check-prefix=ERR32 < %t.err %s
+// RUN: not llvm-mc -triple i386-unknown-unknown-code16 --show-encoding %s 2> %t.err | FileCheck --check-prefix=16 %s
+// RUN: FileCheck --check-prefix=ERR16 < %t.err %s
+
+// ERR64: error: instruction requires: 16-bit mode
+// ERR32: error: instruction requires: 16-bit mode
+// 16: data32
+// 16: encoding: [0x66]
+// 16: lgdtw 0
+// 16: encoding: [0x0f,0x01,0x16,0x00,0x00]
+data32 lgdt 0
+
+// 64: data16
+// 64: encoding: [0x66]
+// 64: lgdtq 0
+// 64: encoding: [0x0f,0x01,0x14,0x25,0x00,0x00,0x00,0x00]
+// 32: data16
+// 32: encoding: [0x66]
+// 32: lgdtl 0
+// 32: encoding: [0x0f,0x01,0x15,0x00,0x00,0x00,0x00]
+// ERR16: error: instruction requires: Not 16-bit mode
+data16 lgdt 0
diff --git a/test/MC/X86/data-prefix16.s b/test/MC/X86/data-prefix16.s
new file mode 100644
index 000000000000..d90b9dc5a934
--- /dev/null
+++ b/test/MC/X86/data-prefix16.s
@@ -0,0 +1,9 @@
+# RUN: llvm-mc -triple i386-unknown-unknown-code16 -filetype=obj %s -o - | llvm-objdump -triple i386-unknown-unknown-code16 -d - | FileCheck %s
+
+# CHECK: 66 0f 01 16 00 00
+# CHECK: lgdtl 0
+data32 lgdt 0
+
+# CHECK: 66
+# CHECK: data32
+data32
diff --git a/test/MC/X86/data-prefix32.s b/test/MC/X86/data-prefix32.s
new file mode 100644
index 000000000000..15a718b1a97c
--- /dev/null
+++ b/test/MC/X86/data-prefix32.s
@@ -0,0 +1,9 @@
+# RUN: llvm-mc -triple=i386-unknown-unknown -filetype=obj %s -o - | llvm-objdump -triple=i386-unknown-unknown -d - | FileCheck %s
+
+# CHECK: 66 0f 01 15 00 00 00 00
+# CHECK: lgdtw 0
+data16 lgdt 0
+
+# CHECK: 66
+# CHECK: data16
+data16
diff --git a/test/MC/X86/data-prefix64.s b/test/MC/X86/data-prefix64.s
new file mode 100644
index 000000000000..acd0db3ec104
--- /dev/null
+++ b/test/MC/X86/data-prefix64.s
@@ -0,0 +1,9 @@
+# RUN: llvm-mc -triple=x86_64-unknown-unknown -filetype=obj %s -o - | llvm-objdump -triple=x86_64-unknown-unknown -d - | FileCheck %s
+
+# CHECK: 66 0f 01 14 25 00 00 00 00
+# CHECK: lgdtq 0
+data16 lgdt 0
+
+# CHECK: 66
+# CHECK: data16
+data16
diff --git a/test/MC/X86/intel-syntax-avx512.s b/test/MC/X86/intel-syntax-avx512.s
index bf6e2ea98935..528920ef4c3e 100644
--- a/test/MC/X86/intel-syntax-avx512.s
+++ b/test/MC/X86/intel-syntax-avx512.s
@@ -37936,17 +37936,17 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0x62,0xfd,0x49,0x93,0xac,0x91,0x00,0x04,0x00,0x00]
vgatherqpd zmm29{k1},ZMMWORD PTR [rcx+zmm2*4+0x400]
-// CHECK: vgatherqps ymm18 {k1}, zmmword ptr [r14 + 8*zmm4 + 123]
+// CHECK: vgatherqps ymm18 {k1}, ymmword ptr [r14 + 8*zmm4 + 123]
// CHECK: encoding: [0x62,0xc2,0x7d,0x49,0x93,0x94,0xe6,0x7b,0x00,0x00,0x00]
- vgatherqps ymm18{k1},ZMMWORD PTR [r14+zmm4*8+0x7b]
+ vgatherqps ymm18{k1},YMMWORD PTR [r14+zmm4*8+0x7b]
-// CHECK: vgatherqps ymm18 {k1}, zmmword ptr [r9 + zmm4 + 256]
+// CHECK: vgatherqps ymm18 {k1}, ymmword ptr [r9 + zmm4 + 256]
// CHECK: encoding: [0x62,0xc2,0x7d,0x49,0x93,0x54,0x21,0x40]
- vgatherqps ymm18{k1},ZMMWORD PTR [r9+zmm4*1+0x100]
+ vgatherqps ymm18{k1},YMMWORD PTR [r9+zmm4*1+0x100]
-// CHECK: vgatherqps ymm18 {k1}, zmmword ptr [rcx + 4*zmm4 + 1024]
+// CHECK: vgatherqps ymm18 {k1}, ymmword ptr [rcx + 4*zmm4 + 1024]
// CHECK: encoding: [0x62,0xe2,0x7d,0x49,0x93,0x94,0xa1,0x00,0x04,0x00,0x00]
- vgatherqps ymm18{k1},ZMMWORD PTR [rcx+zmm4*4+0x400]
+ vgatherqps ymm18{k1},YMMWORD PTR [rcx+zmm4*4+0x400]
// CHECK: vpgatherdd zmm17 {k1}, zmmword ptr [r14 + 8*zmm11 + 123]
// CHECK: encoding: [0x62,0x82,0x7d,0x49,0x90,0x8c,0xde,0x7b,0x00,0x00,0x00]
@@ -37972,17 +37972,17 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0x32,0xfd,0x49,0x90,0x84,0xb1,0x00,0x04,0x00,0x00]
vpgatherdq zmm8{k1},ZMMWORD PTR [rcx+ymm14*4+0x400]
-// CHECK: vpgatherqd ymm3 {k1}, zmmword ptr [r14 + 8*zmm17 + 123]
+// CHECK: vpgatherqd ymm3 {k1}, ymmword ptr [r14 + 8*zmm17 + 123]
// CHECK: encoding: [0x62,0xd2,0x7d,0x41,0x91,0x9c,0xce,0x7b,0x00,0x00,0x00]
- vpgatherqd ymm3{k1},ZMMWORD PTR [r14+zmm17*8+0x7b]
+ vpgatherqd ymm3{k1},YMMWORD PTR [r14+zmm17*8+0x7b]
-// CHECK: vpgatherqd ymm3 {k1}, zmmword ptr [r9 + zmm17 + 256]
+// CHECK: vpgatherqd ymm3 {k1}, ymmword ptr [r9 + zmm17 + 256]
// CHECK: encoding: [0x62,0xd2,0x7d,0x41,0x91,0x5c,0x09,0x40]
- vpgatherqd ymm3{k1},ZMMWORD PTR [r9+zmm17*1+0x100]
+ vpgatherqd ymm3{k1},YMMWORD PTR [r9+zmm17*1+0x100]
-// CHECK: vpgatherqd ymm3 {k1}, zmmword ptr [rcx + 4*zmm17 + 1024]
+// CHECK: vpgatherqd ymm3 {k1}, ymmword ptr [rcx + 4*zmm17 + 1024]
// CHECK: encoding: [0x62,0xf2,0x7d,0x41,0x91,0x9c,0x89,0x00,0x04,0x00,0x00]
- vpgatherqd ymm3{k1},ZMMWORD PTR [rcx+zmm17*4+0x400]
+ vpgatherqd ymm3{k1},YMMWORD PTR [rcx+zmm17*4+0x400]
// CHECK: vpgatherqq zmm17 {k1}, zmmword ptr [r14 + 8*zmm21 + 123]
// CHECK: encoding: [0x62,0xc2,0xfd,0x41,0x91,0x8c,0xee,0x7b,0x00,0x00,0x00]
@@ -38028,21 +38028,21 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0xf2,0xfd,0x49,0xa0,0xac,0xb1,0x00,0x04,0x00,0x00]
vpscatterdq ZMMWORD PTR [rcx+ymm6*4+0x400]{k1},zmm5
-// CHECK: vpscatterqd zmmword ptr [r14 + 8*zmm2 + 123] {k1}, ymm20
+// CHECK: vpscatterqd ymmword ptr [r14 + 8*zmm2 + 123] {k1}, ymm20
// CHECK: encoding: [0x62,0xc2,0x7d,0x49,0xa1,0xa4,0xd6,0x7b,0x00,0x00,0x00]
- vpscatterqd ZMMWORD PTR [r14+zmm2*8+0x7b]{k1},ymm20
+ vpscatterqd YMMWORD PTR [r14+zmm2*8+0x7b]{k1},ymm20
-// CHECK: vpscatterqd zmmword ptr [r14 + 8*zmm2 + 123] {k1}, ymm20
+// CHECK: vpscatterqd ymmword ptr [r14 + 8*zmm2 + 123] {k1}, ymm20
// CHECK: encoding: [0x62,0xc2,0x7d,0x49,0xa1,0xa4,0xd6,0x7b,0x00,0x00,0x00]
- vpscatterqd ZMMWORD PTR [r14+zmm2*8+0x7b]{k1},ymm20
+ vpscatterqd YMMWORD PTR [r14+zmm2*8+0x7b]{k1},ymm20
-// CHECK: vpscatterqd zmmword ptr [r9 + zmm2 + 256] {k1}, ymm20
+// CHECK: vpscatterqd ymmword ptr [r9 + zmm2 + 256] {k1}, ymm20
// CHECK: encoding: [0x62,0xc2,0x7d,0x49,0xa1,0x64,0x11,0x40]
- vpscatterqd ZMMWORD PTR [r9+zmm2*1+0x100]{k1},ymm20
+ vpscatterqd YMMWORD PTR [r9+zmm2*1+0x100]{k1},ymm20
-// CHECK: vpscatterqd zmmword ptr [rcx + 4*zmm2 + 1024] {k1}, ymm20
+// CHECK: vpscatterqd ymmword ptr [rcx + 4*zmm2 + 1024] {k1}, ymm20
// CHECK: encoding: [0x62,0xe2,0x7d,0x49,0xa1,0xa4,0x91,0x00,0x04,0x00,0x00]
- vpscatterqd ZMMWORD PTR [rcx+zmm2*4+0x400]{k1},ymm20
+ vpscatterqd YMMWORD PTR [rcx+zmm2*4+0x400]{k1},ymm20
// CHECK: vpscatterqq zmmword ptr [r14 + 8*zmm20 + 123] {k1}, zmm14
// CHECK: encoding: [0x62,0x52,0xfd,0x41,0xa1,0xb4,0xe6,0x7b,0x00,0x00,0x00]
@@ -38108,21 +38108,21 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0xa2,0xfd,0x41,0xa3,0xb4,0xa1,0x00,0x04,0x00,0x00]
vscatterqpd ZMMWORD PTR [rcx+zmm28*4+0x400]{k1},zmm22
-// CHECK: vscatterqps zmmword ptr [r14 + 8*zmm27 + 123] {k1}, ymm6
+// CHECK: vscatterqps ymmword ptr [r14 + 8*zmm27 + 123] {k1}, ymm6
// CHECK: encoding: [0x62,0x92,0x7d,0x41,0xa3,0xb4,0xde,0x7b,0x00,0x00,0x00]
- vscatterqps ZMMWORD PTR [r14+zmm27*8+0x7b]{k1},ymm6
+ vscatterqps YMMWORD PTR [r14+zmm27*8+0x7b]{k1},ymm6
-// CHECK: vscatterqps zmmword ptr [r14 + 8*zmm27 + 123] {k1}, ymm6
+// CHECK: vscatterqps ymmword ptr [r14 + 8*zmm27 + 123] {k1}, ymm6
// CHECK: encoding: [0x62,0x92,0x7d,0x41,0xa3,0xb4,0xde,0x7b,0x00,0x00,0x00]
- vscatterqps ZMMWORD PTR [r14+zmm27*8+0x7b]{k1},ymm6
+ vscatterqps YMMWORD PTR [r14+zmm27*8+0x7b]{k1},ymm6
-// CHECK: vscatterqps zmmword ptr [r9 + zmm27 + 256] {k1}, ymm6
+// CHECK: vscatterqps ymmword ptr [r9 + zmm27 + 256] {k1}, ymm6
// CHECK: encoding: [0x62,0x92,0x7d,0x41,0xa3,0x74,0x19,0x40]
- vscatterqps ZMMWORD PTR [r9+zmm27*1+0x100]{k1},ymm6
+ vscatterqps YMMWORD PTR [r9+zmm27*1+0x100]{k1},ymm6
-// CHECK: vscatterqps zmmword ptr [rcx + 4*zmm27 + 1024] {k1}, ymm6
+// CHECK: vscatterqps ymmword ptr [rcx + 4*zmm27 + 1024] {k1}, ymm6
// CHECK: encoding: [0x62,0xb2,0x7d,0x41,0xa3,0xb4,0x99,0x00,0x04,0x00,0x00]
- vscatterqps ZMMWORD PTR [rcx+zmm27*4+0x400]{k1},ymm6
+ vscatterqps YMMWORD PTR [rcx+zmm27*4+0x400]{k1},ymm6
// CHECK: vscatterdpd zmmword ptr [r14 + 8*ymm27 - 123] {k1}, zmm18
// CHECK: encoding: [0x62,0x82,0xfd,0x41,0xa2,0x94,0xde,0x85,0xff,0xff,0xff]
@@ -38172,21 +38172,21 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0x32,0xfd,0x41,0xa3,0x84,0x89,0x00,0x04,0x00,0x00]
vscatterqpd ZMMWORD PTR [rcx+zmm25*4+0x400]{k1},zmm8
-// CHECK: vscatterqps zmmword ptr [r14 + 8*zmm10 - 123] {k1}, ymm13
+// CHECK: vscatterqps ymmword ptr [r14 + 8*zmm10 - 123] {k1}, ymm13
// CHECK: encoding: [0x62,0x12,0x7d,0x49,0xa3,0xac,0xd6,0x85,0xff,0xff,0xff]
- vscatterqps ZMMWORD PTR [r14+zmm10*8-0x7b]{k1},ymm13
+ vscatterqps YMMWORD PTR [r14+zmm10*8-0x7b]{k1},ymm13
-// CHECK: vscatterqps zmmword ptr [r14 + 8*zmm10 - 123] {k1}, ymm13
+// CHECK: vscatterqps ymmword ptr [r14 + 8*zmm10 - 123] {k1}, ymm13
// CHECK: encoding: [0x62,0x12,0x7d,0x49,0xa3,0xac,0xd6,0x85,0xff,0xff,0xff]
- vscatterqps ZMMWORD PTR [r14+zmm10*8-0x7b]{k1},ymm13
+ vscatterqps YMMWORD PTR [r14+zmm10*8-0x7b]{k1},ymm13
-// CHECK: vscatterqps zmmword ptr [r9 + zmm10 + 256] {k1}, ymm13
+// CHECK: vscatterqps ymmword ptr [r9 + zmm10 + 256] {k1}, ymm13
// CHECK: encoding: [0x62,0x12,0x7d,0x49,0xa3,0x6c,0x11,0x40]
- vscatterqps ZMMWORD PTR [r9+zmm10*1+0x100]{k1},ymm13
+ vscatterqps YMMWORD PTR [r9+zmm10*1+0x100]{k1},ymm13
-// CHECK: vscatterqps zmmword ptr [rcx + 4*zmm10 + 1024] {k1}, ymm13
+// CHECK: vscatterqps ymmword ptr [rcx + 4*zmm10 + 1024] {k1}, ymm13
// CHECK: encoding: [0x62,0x32,0x7d,0x49,0xa3,0xac,0x91,0x00,0x04,0x00,0x00]
- vscatterqps ZMMWORD PTR [rcx+zmm10*4+0x400]{k1},ymm13
+ vscatterqps YMMWORD PTR [rcx+zmm10*4+0x400]{k1},ymm13
// CHECK: vgatherdpd zmm30 {k1}, zmmword ptr [r14 + 8*ymm5 - 123]
// CHECK: encoding: [0x62,0x42,0xfd,0x49,0x92,0xb4,0xee,0x85,0xff,0xff,0xff]
@@ -38224,17 +38224,17 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0x22,0xfd,0x49,0x93,0x9c,0xa9,0x00,0x04,0x00,0x00]
vgatherqpd zmm27{k1},ZMMWORD PTR [rcx+zmm13*4+0x400]
-// CHECK: vgatherqps ymm27 {k1}, zmmword ptr [r14 + 8*zmm14 - 123]
+// CHECK: vgatherqps ymm27 {k1}, ymmword ptr [r14 + 8*zmm14 - 123]
// CHECK: encoding: [0x62,0x02,0x7d,0x49,0x93,0x9c,0xf6,0x85,0xff,0xff,0xff]
- vgatherqps ymm27{k1},ZMMWORD PTR [r14+zmm14*8-0x7b]
+ vgatherqps ymm27{k1},YMMWORD PTR [r14+zmm14*8-0x7b]
-// CHECK: vgatherqps ymm27 {k1}, zmmword ptr [r9 + zmm14 + 256]
+// CHECK: vgatherqps ymm27 {k1}, ymmword ptr [r9 + zmm14 + 256]
// CHECK: encoding: [0x62,0x02,0x7d,0x49,0x93,0x5c,0x31,0x40]
- vgatherqps ymm27{k1},ZMMWORD PTR [r9+zmm14*1+0x100]
+ vgatherqps ymm27{k1},YMMWORD PTR [r9+zmm14*1+0x100]
-// CHECK: vgatherqps ymm27 {k1}, zmmword ptr [rcx + 4*zmm14 + 1024]
+// CHECK: vgatherqps ymm27 {k1}, ymmword ptr [rcx + 4*zmm14 + 1024]
// CHECK: encoding: [0x62,0x22,0x7d,0x49,0x93,0x9c,0xb1,0x00,0x04,0x00,0x00]
- vgatherqps ymm27{k1},ZMMWORD PTR [rcx+zmm14*4+0x400]
+ vgatherqps ymm27{k1},YMMWORD PTR [rcx+zmm14*4+0x400]
// CHECK: vpgatherdd zmm7 {k1}, zmmword ptr [r14 + 8*zmm16 - 123]
// CHECK: encoding: [0x62,0xd2,0x7d,0x41,0x90,0xbc,0xc6,0x85,0xff,0xff,0xff]
@@ -38260,17 +38260,17 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0x62,0xfd,0x49,0x90,0x8c,0xb9,0x00,0x04,0x00,0x00]
vpgatherdq zmm25{k1},ZMMWORD PTR [rcx+ymm7*4+0x400]
-// CHECK: vpgatherqd ymm19 {k1}, zmmword ptr [r14 + 8*zmm17 - 123]
+// CHECK: vpgatherqd ymm19 {k1}, ymmword ptr [r14 + 8*zmm17 - 123]
// CHECK: encoding: [0x62,0xc2,0x7d,0x41,0x91,0x9c,0xce,0x85,0xff,0xff,0xff]
- vpgatherqd ymm19{k1},ZMMWORD PTR [r14+zmm17*8-0x7b]
+ vpgatherqd ymm19{k1},YMMWORD PTR [r14+zmm17*8-0x7b]
-// CHECK: vpgatherqd ymm19 {k1}, zmmword ptr [r9 + zmm17 + 256]
+// CHECK: vpgatherqd ymm19 {k1}, ymmword ptr [r9 + zmm17 + 256]
// CHECK: encoding: [0x62,0xc2,0x7d,0x41,0x91,0x5c,0x09,0x40]
- vpgatherqd ymm19{k1},ZMMWORD PTR [r9+zmm17*1+0x100]
+ vpgatherqd ymm19{k1},YMMWORD PTR [r9+zmm17*1+0x100]
-// CHECK: vpgatherqd ymm19 {k1}, zmmword ptr [rcx + 4*zmm17 + 1024]
+// CHECK: vpgatherqd ymm19 {k1}, ymmword ptr [rcx + 4*zmm17 + 1024]
// CHECK: encoding: [0x62,0xe2,0x7d,0x41,0x91,0x9c,0x89,0x00,0x04,0x00,0x00]
- vpgatherqd ymm19{k1},ZMMWORD PTR [rcx+zmm17*4+0x400]
+ vpgatherqd ymm19{k1},YMMWORD PTR [rcx+zmm17*4+0x400]
// CHECK: vpgatherqq zmm10 {k1}, zmmword ptr [r14 + 8*zmm13 - 123]
// CHECK: encoding: [0x62,0x12,0xfd,0x49,0x91,0x94,0xee,0x85,0xff,0xff,0xff]
@@ -38316,21 +38316,21 @@ vaddpd zmm1, zmm1, zmm2, {rz-sae}
// CHECK: encoding: [0x62,0xb2,0xfd,0x41,0xa0,0x8c,0x89,0x00,0x04,0x00,0x00]
vpscatterdq ZMMWORD PTR [rcx+ymm25*4+0x400]{k1},zmm1
-// CHECK: vpscatterqd zmmword ptr [r14 + 8*zmm22 - 123] {k1}, ymm23
+// CHECK: vpscatterqd ymmword ptr [r14 + 8*zmm22 - 123] {k1}, ymm23
// CHECK: encoding: [0x62,0xc2,0x7d,0x41,0xa1,0xbc,0xf6,0x85,0xff,0xff,0xff]
- vpscatterqd ZMMWORD PTR [r14+zmm22*8-0x7b]{k1},ymm23
+ vpscatterqd YMMWORD PTR [r14+zmm22*8-0x7b]{k1},ymm23
-// CHECK: vpscatterqd zmmword ptr [r14 + 8*zmm22 - 123] {k1}, ymm23
+// CHECK: vpscatterqd ymmword ptr [r14 + 8*zmm22 - 123] {k1}, ymm23
// CHECK: encoding: [0x62,0xc2,0x7d,0x41,0xa1,0xbc,0xf6,0x85,0xff,0xff,0xff]
- vpscatterqd ZMMWORD PTR [r14+zmm22*8-0x7b]{k1},ymm23
+ vpscatterqd YMMWORD PTR [r14+zmm22*8-0x7b]{k1},ymm23
-// CHECK: vpscatterqd zmmword ptr [r9 + zmm22 + 256] {k1}, ymm23
+// CHECK: vpscatterqd ymmword ptr [r9 + zmm22 + 256] {k1}, ymm23
// CHECK: encoding: [0x62,0xc2,0x7d,0x41,0xa1,0x7c,0x31,0x40]
- vpscatterqd ZMMWORD PTR [r9+zmm22*1+0x100]{k1},ymm23
+ vpscatterqd YMMWORD PTR [r9+zmm22*1+0x100]{k1},ymm23
-// CHECK: vpscatterqd zmmword ptr [rcx + 4*zmm22 + 1024] {k1}, ymm23
+// CHECK: vpscatterqd ymmword ptr [rcx + 4*zmm22 + 1024] {k1}, ymm23
// CHECK: encoding: [0x62,0xe2,0x7d,0x41,0xa1,0xbc,0xb1,0x00,0x04,0x00,0x00]
- vpscatterqd ZMMWORD PTR [rcx+zmm22*4+0x400]{k1},ymm23
+ vpscatterqd YMMWORD PTR [rcx+zmm22*4+0x400]{k1},ymm23
// CHECK: vpscatterqq zmmword ptr [r14 + 8*zmm8 - 123] {k1}, zmm2
// CHECK: encoding: [0x62,0x92,0xfd,0x49,0xa1,0x94,0xc6,0x85,0xff,0xff,0xff]
diff --git a/test/MC/X86/intel-syntax-bitwise-ops.s b/test/MC/X86/intel-syntax-bitwise-ops.s
index 1f09996fe914..6d4df609c061 100644
--- a/test/MC/X86/intel-syntax-bitwise-ops.s
+++ b/test/MC/X86/intel-syntax-bitwise-ops.s
@@ -6,19 +6,53 @@
and ecx, 1+2
// CHECK: andl $3, %ecx
and ecx, 1|2
-// CHECK: andl $3, %ecx
+// CHECK: andl $3, %ecx
+ and ecx, 1 or 2
+// CHECK: andl $3, %ecx
+ and ecx, 1 OR 2
+// CHECK: andl $3, %ecx
and ecx, 1*3
// CHECK: andl $1, %ecx
and ecx, 1&3
-// CHECK: andl $0, %ecx
+// CHECK: andl $1, %ecx
+ and ecx, 1 and 3
+// CHECK: andl $1, %ecx
+ and ecx, 1 AND 3
+// CHECK: andl $0, %ecx
and ecx, (1&2)
-// CHECK: andl $3, %ecx
+// CHECK: andl $0, %ecx
+ and ecx, (1 and 2)
+// CHECK: andl $0, %ecx
+ and ecx, (1 AND 2)
+// CHECK: andl $3, %ecx
and ecx, ((1)|2)
-// CHECK: andl $1, %ecx
+// CHECK: andl $3, %ecx
+ and ecx, ((1) or 2)
+// CHECK: andl $3, %ecx
+ and ecx, ((1) OR 2)
+// CHECK: andl $1, %ecx
and ecx, 1&2+3
-// CHECK: addl $4938, %eax
+// CHECK: andl $1, %ecx
+ and ecx, 1 and 2+3
+// CHECK: andl $1, %ecx
+ and ecx, 1 AND 2+3
+// CHECK: addl $4938, %eax
add eax, 9876 >> 1
-// CHECK: addl $19752, %eax
+// CHECK: addl $4938, %eax
+ add eax, 9876 shr 1
+// CHECK: addl $4938, %eax
+ add eax, 9876 SHR 1
+// CHECK: addl $19752, %eax
add eax, 9876 << 1
-// CHECK: addl $5, %eax
+// CHECK: addl $19752, %eax
+ add eax, 9876 shl 1
+// CHECK: addl $19752, %eax
+ add eax, 9876 SHL 1
+// CHECK: addl $5, %eax
add eax, 6 ^ 3
+// CHECK: addl $5, %eax
+ add eax, 6 xor 3
+// CHECK: addl $5, %eax
+ add eax, 6 XOR 3
+// CHECK: addl $5, %eax
+ add eax, 6 XOR 3 shl 1 SHR 1
diff --git a/test/MC/X86/intel-syntax.s b/test/MC/X86/intel-syntax.s
index e5b1f9f995f7..a8172fc67980 100644
--- a/test/MC/X86/intel-syntax.s
+++ b/test/MC/X86/intel-syntax.s
@@ -19,6 +19,8 @@ _main:
mov EAX, DWORD PTR [RSP - 4]
// CHECK: movq (%rsp), %rax
mov RAX, QWORD PTR [RSP]
+// CHECK: movabsq $4294967289, %rax
+ mov RAX, 4294967289
// CHECK: movl $-4, -4(%rsp)
mov DWORD PTR [RSP - 4], -4
// CHECK: movq 0, %rcx
diff --git a/test/MC/X86/line-table-sections.s b/test/MC/X86/line-table-sections.s
new file mode 100644
index 000000000000..93b911d9576f
--- /dev/null
+++ b/test/MC/X86/line-table-sections.s
@@ -0,0 +1,15 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -filetype=obj %s | llvm-objdump -r - | FileCheck %s
+// CHECK-NOT: RELOCATION RECORDS
+
+// ensure that a .loc directive at the end of a section doesn't bleed into the
+// following section previously this would produce a relocation for
+// .other_section in the line table. But it should actually produce no line
+// table entries at all.
+ .text
+ .file 1 "fail.cpp"
+ .loc 1 7 3 prologue_end # fail.cpp:7:3
+ # addss %xmm0, %xmm1
+
+ .section .other_section,"",@progbits
+ .long 46 # Length of Unit
+
diff --git a/test/MC/X86/x86-16.s b/test/MC/X86/x86-16.s
index f5669358f219..b95f66ef84d8 100644
--- a/test/MC/X86/x86-16.s
+++ b/test/MC/X86/x86-16.s
@@ -959,3 +959,13 @@ lretw
// CHECK: lretl
// CHECK: encoding: [0x66,0xcb]
lretl
+
+// CHECK: data32
+// CHECK: encoding: [0x66]
+data32
+
+// CHECK: data32
+// CHECK: encoding: [0x66]
+// CHECK: lgdtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x50,0x04]
+data32 lgdt 4(%eax)
diff --git a/test/MC/X86/x86-32-coverage.s b/test/MC/X86/x86-32-coverage.s
index 357034cfcf8e..c4f649ff4f4b 100644
--- a/test/MC/X86/x86-32-coverage.s
+++ b/test/MC/X86/x86-32-coverage.s
@@ -281,6 +281,10 @@
// CHECK: encoding: [0xfb]
sti
+// CHECK: salc
+// CHECK: encoding: [0xd6]
+ salc
+
// CHECK: addb $254, 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x80,0x84,0xcb,0xef,0xbe,0xad,0xde,0xfe]
addb $0xfe,0xdeadbeef(%ebx,%ecx,8)
@@ -2747,6 +2751,10 @@
// CHECK: encoding: [0xdd,0xc2]
ffree %st(2)
+// CHECK: ffreep %st(2)
+// CHECK: encoding: [0xdf,0xc2]
+ ffreep %st(2)
+
// CHECK: fnop
// CHECK: encoding: [0xd9,0xd0]
fnop
@@ -10518,9 +10526,9 @@
// CHECK: invlpga %ecx, %eax
invlpga %ecx, %eax
-// CHECK: blendvps (%eax), %xmm1 # encoding: [0x66,0x0f,0x38,0x14,0x08]
+// CHECK: blendvps %xmm0, (%eax), %xmm1 # encoding: [0x66,0x0f,0x38,0x14,0x08]
blendvps (%eax), %xmm1
-// CHECK: blendvps %xmm2, %xmm1 # encoding: [0x66,0x0f,0x38,0x14,0xca]
+// CHECK: blendvps %xmm0, %xmm2, %xmm1 # encoding: [0x66,0x0f,0x38,0x14,0xca]
blendvps %xmm2, %xmm1
// These instructions can take an unsigned 8-bit mask as well as a signed 8-bit
@@ -10555,29 +10563,29 @@
insertps $-64, %xmm2, %xmm1
// PR13253 handle implicit optional third argument that must always be xmm0
-// CHECK: pblendvb %xmm2, %xmm1
+// CHECK: pblendvb %xmm0, %xmm2, %xmm1
pblendvb %xmm2, %xmm1
-// CHECK: pblendvb %xmm2, %xmm1
+// CHECK: pblendvb %xmm0, %xmm2, %xmm1
pblendvb %xmm0, %xmm2, %xmm1
-// CHECK: pblendvb (%eax), %xmm1
+// CHECK: pblendvb %xmm0, (%eax), %xmm1
pblendvb (%eax), %xmm1
-// CHECK: pblendvb (%eax), %xmm1
+// CHECK: pblendvb %xmm0, (%eax), %xmm1
pblendvb %xmm0, (%eax), %xmm1
-// CHECK: blendvpd %xmm2, %xmm1
+// CHECK: blendvpd %xmm0, %xmm2, %xmm1
blendvpd %xmm2, %xmm1
-// CHECK: blendvpd %xmm2, %xmm1
+// CHECK: blendvpd %xmm0, %xmm2, %xmm1
blendvpd %xmm0, %xmm2, %xmm1
-// CHECK: blendvpd (%eax), %xmm1
+// CHECK: blendvpd %xmm0, (%eax), %xmm1
blendvpd (%eax), %xmm1
-// CHECK: blendvpd (%eax), %xmm1
+// CHECK: blendvpd %xmm0, (%eax), %xmm1
blendvpd %xmm0, (%eax), %xmm1
-// CHECK: blendvps %xmm2, %xmm1
+// CHECK: blendvps %xmm0, %xmm2, %xmm1
blendvps %xmm2, %xmm1
-// CHECK: blendvps %xmm2, %xmm1
+// CHECK: blendvps %xmm0, %xmm2, %xmm1
blendvps %xmm0, %xmm2, %xmm1
-// CHECK: blendvps (%eax), %xmm1
+// CHECK: blendvps %xmm0, (%eax), %xmm1
blendvps (%eax), %xmm1
-// CHECK: blendvps (%eax), %xmm1
+// CHECK: blendvps %xmm0, (%eax), %xmm1
blendvps %xmm0, (%eax), %xmm1
@@ -10646,10 +10654,6 @@ btcq $4, (%eax)
// CHECK: encoding: [0x66,0x0f,0xae,0x35,0x78,0x56,0x34,0x12]
clwb 0x12345678
-// CHECK: pcommit
-// CHECK: encoding: [0x66,0x0f,0xae,0xf8]
- pcommit
-
// CHECK: xsave 3735928559(%ebx,%ecx,8)
// CHECK: encoding: [0x0f,0xae,0xa4,0xcb,0xef,0xbe,0xad,0xde]
xsave 0xdeadbeef(%ebx,%ecx,8)
diff --git a/test/MC/X86/x86-32.s b/test/MC/X86/x86-32.s
index 7207652a3be3..f3633dcffef4 100644
--- a/test/MC/X86/x86-32.s
+++ b/test/MC/X86/x86-32.s
@@ -444,6 +444,14 @@ cmovnae %bx,%bx
// CHECK: encoding: [0x0f,0x21,0xf8]
movl %dr7,%eax
+// CHECK: clzero
+// CHECK: encoding: [0x0f,0x01,0xfc]
+ clzero
+
+// CHECK: clzero
+// CHECK: encoding: [0x0f,0x01,0xfc]
+ clzero %eax
+
// radr://8017522
// CHECK: wait
// CHECK: encoding: [0x9b]
@@ -1079,3 +1087,13 @@ retw
// CHECK: lretw
// CHECK: encoding: [0x66,0xcb]
lretw
+
+// CHECK: data16
+// CHECK: encoding: [0x66]
+data16
+
+// CHECK: data16
+// CHECK: encoding: [0x66]
+// CHECK: lgdtl 4(%eax)
+// CHECK: encoding: [0x0f,0x01,0x50,0x04]
+data16 lgdt 4(%eax)
diff --git a/test/MC/X86/x86-64.s b/test/MC/X86/x86-64.s
index 1af25e5412f1..a605dbbbd746 100644
--- a/test/MC/X86/x86-64.s
+++ b/test/MC/X86/x86-64.s
@@ -1119,6 +1119,12 @@ movq %mm5, %rbx // CHECK: movd %mm5, %rbx # encoding: [0x48,0x0f,0x7e,0xeb]
rex64 // CHECK: rex64 # encoding: [0x48]
data16 // CHECK: data16 # encoding: [0x66]
+// CHECK: data16
+// CHECK: encoding: [0x66]
+// CHECK: lgdtq 4(%rax)
+// CHECK: encoding: [0x0f,0x01,0x50,0x04]
+data16 lgdt 4(%rax)
+
// PR8855
movq 18446744073709551615,%rbx // CHECK: movq -1, %rbx
@@ -1496,6 +1502,14 @@ vmovq %xmm0, %rax
// CHECK: encoding: [0x0f,0x01,0xfb]
mwaitx %rax, %rcx, %rbx
+// CHECK: clzero
+// CHECK: encoding: [0x0f,0x01,0xfc]
+ clzero
+
+// CHECK: clzero
+// CHECK: encoding: [0x0f,0x01,0xfc]
+ clzero %rax
+
// CHECK: movl %r15d, (%r15,%r15)
// CHECK: encoding: [0x47,0x89,0x3c,0x3f]
movl %r15d, (%r15,%r15)
diff --git a/test/MC/X86/x86_64-encoding.s b/test/MC/X86/x86_64-encoding.s
index 62af1bdb2357..c502ed466433 100644
--- a/test/MC/X86/x86_64-encoding.s
+++ b/test/MC/X86/x86_64-encoding.s
@@ -148,19 +148,19 @@ sha1msg2 %xmm1, %xmm2
// CHECK: encoding: [0x0f,0x38,0xca,0x10]
sha1msg2 (%rax), %xmm2
-// CHECK: sha256rnds2 (%rax), %xmm2
+// CHECK: sha256rnds2 %xmm0, (%rax), %xmm2
// CHECK: encoding: [0x0f,0x38,0xcb,0x10]
sha256rnds2 (%rax), %xmm2
-// CHECK: sha256rnds2 %xmm1, %xmm2
+// CHECK: sha256rnds2 %xmm0, %xmm1, %xmm2
// CHECK: encoding: [0x0f,0x38,0xcb,0xd1]
sha256rnds2 %xmm1, %xmm2
-// CHECK: sha256rnds2 (%rax), %xmm2
+// CHECK: sha256rnds2 %xmm0, (%rax), %xmm2
// CHECK: encoding: [0x0f,0x38,0xcb,0x10]
sha256rnds2 %xmm0, (%rax), %xmm2
-// CHECK: sha256rnds2 %xmm1, %xmm2
+// CHECK: sha256rnds2 %xmm0, %xmm1, %xmm2
// CHECK: encoding: [0x0f,0x38,0xcb,0xd1]
sha256rnds2 %xmm0, %xmm1, %xmm2
diff --git a/test/Object/AMDGPU/elf-definitios.yaml b/test/Object/AMDGPU/elf-definitions.yaml
index 819786aa1902..819786aa1902 100644
--- a/test/Object/AMDGPU/elf-definitios.yaml
+++ b/test/Object/AMDGPU/elf-definitions.yaml
diff --git a/test/Object/ARM/nm-mapping-symbol.s b/test/Object/ARM/nm-mapping-symbol.s
index 485c1cc39d72..9b7b5b583ea0 100644
--- a/test/Object/ARM/nm-mapping-symbol.s
+++ b/test/Object/ARM/nm-mapping-symbol.s
@@ -9,3 +9,4 @@
.section .foobar,"",%progbits
.asciz "foo"
+ nop
diff --git a/test/Object/Inputs/invalid-reloc.elf-x86-64 b/test/Object/Inputs/invalid-reloc.elf-x86-64
new file mode 100644
index 000000000000..ce742de3bd2b
--- /dev/null
+++ b/test/Object/Inputs/invalid-reloc.elf-x86-64
Binary files differ
diff --git a/test/Object/Inputs/macho-invalid-note b/test/Object/Inputs/macho-invalid-note
new file mode 100644
index 000000000000..9bc21d99323e
--- /dev/null
+++ b/test/Object/Inputs/macho-invalid-note
Binary files differ
diff --git a/test/Object/Inputs/solaris-nosymbols.yaml b/test/Object/Inputs/solaris-nosymbols.yaml
new file mode 100644
index 000000000000..85dabedcf9a5
--- /dev/null
+++ b/test/Object/Inputs/solaris-nosymbols.yaml
@@ -0,0 +1,7 @@
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+...
diff --git a/test/Object/X86/nm-ir.ll b/test/Object/X86/nm-ir.ll
index 1742a8f938e2..29f7a5c7018c 100644
--- a/test/Object/X86/nm-ir.ll
+++ b/test/Object/X86/nm-ir.ll
@@ -12,6 +12,7 @@
; CHECK-NEXT: C g3
; CHECK-NOT: g4
; CHECK-NEXT: T global_asm_sym
+; CHECK-NEXT: D ifunc_f1
; CHECK-NEXT: t local_asm_sym
; CHECK-NEXT: U undef_asm_sy
@@ -36,6 +37,8 @@ define void @f1() {
ret void
}
+@ifunc_f1 = ifunc void (), void ()* @f1
+
define internal void @f2() {
ret void
}
diff --git a/test/Object/archive-extract.test b/test/Object/archive-extract.test
index 50372d530d88..664529d6c807 100644
--- a/test/Object/archive-extract.test
+++ b/test/Object/archive-extract.test
@@ -37,11 +37,16 @@
; RUN: rm -f very_long_bytecode_file_name.bc
; RUN: llvm-ar xo %p/Inputs/GNU.a very_long_bytecode_file_name.bc
; RUN: rm -f %t.a
-; RUN: llvm-ar rcU %t.a very_long_bytecode_file_name.bc
-; RUN: env TZ=GMT llvm-ar tv %t.a | FileCheck %s
+; RUN: llvm-ar -format gnu rcU %t.a very_long_bytecode_file_name.bc
+; RUN: env TZ=GMT llvm-ar tv %t.a | FileCheck %s -check-prefix CHECK-GNU
-CHECK: 1465 2004-11-19 03:01:31.000000000 very_long_bytecode_file_name.bc
+CHECK-GNU: 1465 2004-11-19 03:01:31.000000000 very_long_bytecode_file_name.bc
+; RUN: rm -f %t.a
+; RUN: llvm-ar -format darwin rcU %t.a very_long_bytecode_file_name.bc
+; RUN: env TZ=GMT llvm-ar tv %t.a | FileCheck %s -check-prefix CHECK-DARWIN
+
+CHECK-DARWIN: 1472 2004-11-19 03:01:31.000000000 very_long_bytecode_file_name.bc
RUN: not llvm-ar x %p/Inputs/GNU.a foo.o 2>&1 | FileCheck --check-prefix=NOTFOUND %s
NOTFOUND: foo.o was not found
@@ -52,5 +57,5 @@ THINEXTRACT: extracting from a thin archive is not supported
RUN: llvm-ar p %p/Inputs/thin.a evenlen | FileCheck %s --check-prefix=EVENLEN
EVENLEN: evenlen
-RUN: not llvm-ar p %p/Inputs/thin-path.a t/test2.o | FileCheck %s --check-prefix=MISSING
+RUN: not llvm-ar p %p/Inputs/thin-path.a t/test2.o 2>&1 | FileCheck %s --check-prefix=MISSING
MISSING: {{N|n}}o such file or directory.
diff --git a/test/Object/archive-format.test b/test/Object/archive-format.test
index b9562a36d67b..219fc7f894a7 100644
--- a/test/Object/archive-format.test
+++ b/test/Object/archive-format.test
@@ -37,6 +37,19 @@ BSD-NEXT: 0123456789abcde{{.....}}bar.
BSD-SAME: #1/16 0 0 0 644 20 `
BSD-NEXT: 0123456789abcdefzed.
+RUN: rm -f %t.a
+RUN: llvm-ar --format=darwin rc %t.a 0123456789abcde 0123456789abcdef
+RUN: cat %t.a | FileCheck -strict-whitespace --check-prefix=DARWIN %s
+
+DARWIN: !<arch>
+DARWIN-NEXT: #1/20 0 0 0 644 28 `
+Each [[:space:]] matches a newline. We explicitly match 3 newlines, as the
+fourth newline is implicitly consumed by FileCheck and cannot be matched.
+DARWIN-NEXT: 0123456789abcde{{.....}}bar.{{[[:space:]][[:space:]][[:space:]]}}
+DARWIN-NEXT: #1/20 0 0 0 644 28 `
+DARWIN-NEXT: 0123456789abcdef{{....}}zed.
+
+
RUN: rm -f test.a
RUN: llvm-ar --format=gnu rcT test.a 0123456789abcde 0123456789abcdef
RUN: cat test.a | FileCheck -strict-whitespace --check-prefix=THIN %s
@@ -65,3 +78,15 @@ THIN-PATH-NEXT: /65 0 0 0 644 4 `
RUN: not llvm-ar --format=bsd rcT bad.a 0123456789abcde 0123456789abcdef 2>&1 | FileCheck --check-prefix=BSD-THIN %s
BSD-THIN: Only the gnu format has a thin mode.
+
+If an archive has an object with no symbols, the linker and some other
+tools on some versions of Solaris will abort operations if there is no
+symbol table. Create such an object, put it into an archive, and check to
+see that there is an empty symbol table.
+RUN: mkdir -p %t
+RUN: yaml2obj %S/Inputs/solaris-nosymbols.yaml > %t/foo.o
+RUN: llvm-ar rs %t/foo.a %t/foo.o
+RUN: cat -v %t/foo.a | FileCheck -strict-whitespace --check-prefix=SOLARIS %s
+SOLARIS: !<arch>
+SOLARIS-NEXT: / 0 0 0 0 8 `
+SOLARIS-NEXT: ^@^@^@^@^@^@^@^@foo.o/
diff --git a/test/Object/archive-pad.test b/test/Object/archive-pad.test
new file mode 100644
index 000000000000..343f51ef60af
--- /dev/null
+++ b/test/Object/archive-pad.test
@@ -0,0 +1,19 @@
+Test that only the darwin format needs to modify archive members to
+avoid a ld64 bug.
+
+RUN: echo foo > %t.o
+
+RUN: rm -f %t.a
+RUN: llvm-ar -format=bsd rc %t.a %t.o
+RUN: llvm-ar p %t.a > %t.bsd.o
+RUN: cmp %t.bsd.o %t.o
+
+RUN: rm -f %t.a
+RUN: llvm-ar -format=gnu rc %t.a %t.o
+RUN: llvm-ar p %t.a > %t.gnu.o
+RUN: cmp %t.gnu.o %t.o
+
+RUN: rm -f %t.a
+RUN: llvm-ar -format=darwin rc %t.a %t.o
+RUN: llvm-ar p %t.a > %t.darwin.o
+RUN: not cmp %t.darwin.o %t.o
diff --git a/test/Object/macho-invalid.test b/test/Object/macho-invalid.test
index 6370228e5986..e956680a2ce5 100644
--- a/test/Object/macho-invalid.test
+++ b/test/Object/macho-invalid.test
@@ -83,8 +83,8 @@ RUN: not llvm-objdump -t %p/Inputs/macho-invalid-section-index-getSectionRawName
RUN: | FileCheck -check-prefix INVALID-SECTION-IDX-SYMBOL-SEC-objdump %s
INVALID-SECTION-IDX-SYMBOL-SEC-objdump: truncated or malformed object (bad section index: 66 for symbol at index 8)
-RUN: not llvm-objdump -macho -private-headers %p/Inputs/macho-invalid-header 2>&1 | FileCheck -check-prefix INVALID-HEADER %s
-INVALID-HEADER: The file was not recognized as a valid object file
+RUN: llvm-objdump -macho -private-headers %p/Inputs/macho-invalid-header 2>&1 | FileCheck -check-prefix INVALID-HEADER %s
+INVALID-HEADER: is not an object file
RUN: not llvm-objdump -macho -private-headers %p/Inputs/macho64-invalid-incomplete-segment-load-command 2>&1 | FileCheck -check-prefix INCOMPLETE-SEGMENT-LOADC %s
INCOMPLETE-SEGMENT-LOADC: truncated or malformed object (load commands extend past the end of the file)
@@ -117,7 +117,7 @@ RUN: not llvm-objdump -macho -private-headers %p/Inputs/macho-invalid-segment-fi
INVALID-SEGMENT-FILESIZE: macho-invalid-segment-filesize': truncated or malformed object (load command 0 fileoff field plus filesize field in LC_SEGMENT extends past the end of the file)
RUN: not llvm-objdump -macho -private-headers %p/Inputs/macho-invalid-segment-vmsize 2>&1 | FileCheck -check-prefix INVALID-SEGMENT-VMSIZE %s
-INVALID-SEGMENT-VMSIZE: macho-invalid-segment-vmsize': truncated or malformed object (load command 0 fileoff field in LC_SEGMENT greater than vmsize field)
+INVALID-SEGMENT-VMSIZE: macho-invalid-segment-vmsize': truncated or malformed object (load command 0 filesize field in LC_SEGMENT greater than vmsize field)
RUN: not llvm-objdump -macho -private-headers %p/Inputs/macho-invalid-section-offset 2>&1 | FileCheck -check-prefix INVALID-SECTION-FILEOFF %s
INVALID-SECTION-FILEOFF: macho-invalid-section-offset': truncated or malformed object (offset field of section 0 in LC_SEGMENT command 0 extends past the end of the file)
@@ -505,3 +505,6 @@ INVALID-FAT-ARCH-OVERLAP: macho-invalid-fat-arch-overlap': truncated or malforme
RUN: not llvm-objdump -macho -universal-headers %p/Inputs/macho-invalid-fat-arch-overlapheaders 2>&1 | FileCheck -check-prefix INVALID-FAT-ARCH-OVERLAPHEADERS %s
INVALID-FAT-ARCH-OVERLAPHEADERS: macho-invalid-fat-arch-overlapheaders': truncated or malformed fat file (cputype (7) cpusubtype (3) offset 12 overlaps universal headers)
+
+RUN: not llvm-objdump -macho -private-headers %p/Inputs/macho-invalid-note 2>&1 | FileCheck -check-prefix INVALID-NOTE-COMMAND %s
+INVALID-NOTE-COMMAND: macho-invalid-note': truncated or malformed object (size field plus offset field of LC_NOTE command 0 extends past the end of the file)
diff --git a/test/Object/nm-shared-object.test b/test/Object/nm-shared-object.test
index 32ae6a861529..975cf760ba9f 100644
--- a/test/Object/nm-shared-object.test
+++ b/test/Object/nm-shared-object.test
@@ -29,3 +29,5 @@ RUN: not llvm-nm -D %p/Inputs/trivial-object-test.coff-i386 2>&1 \
RUN: | FileCheck %s -check-prefix ERROR
ERROR: File format has no dynamic symbol table.
+
+RUN: llvm-nm -D %p/Inputs/trivial-object-test.elf-i386 | count 0
diff --git a/test/Object/obj2yaml-invalid-reloc.test b/test/Object/obj2yaml-invalid-reloc.test
new file mode 100644
index 000000000000..c0a5a1b5fbd8
--- /dev/null
+++ b/test/Object/obj2yaml-invalid-reloc.test
@@ -0,0 +1,37 @@
+RUN: obj2yaml %p/Inputs/invalid-reloc.elf-x86-64 | FileCheck %s
+
+CHECK: --- !ELF
+CHECK-NEXT: FileHeader:
+CHECK-NEXT: Class: ELFCLASS64
+CHECK-NEXT: Data: ELFDATA2LSB
+CHECK-NEXT: OSABI: ELFOSABI_FREEBSD
+CHECK-NEXT: Type: ET_REL
+CHECK-NEXT: Machine: EM_X86_64
+CHECK-NEXT: Sections:
+CHECK-NEXT: - Name: .text
+CHECK-NEXT: Type: SHT_PROGBITS
+CHECK-NEXT: Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+CHECK-NEXT: AddressAlign: 0x0000000000000004
+CHECK-NEXT: Content: 4833C0C390
+CHECK-NEXT: - Name: .rela.text
+CHECK-NEXT: Type: SHT_RELA
+CHECK-NEXT: Link: .symtab
+CHECK-NEXT: AddressAlign: 0x0000000000000008
+CHECK-NEXT: Info: .text
+CHECK-NEXT: Relocations:
+CHECK-NEXT: - Offset: 0x0000000000000000
+CHECK-NEXT: Symbol: ''
+CHECK-NEXT: Type: R_X86_64_NONE
+CHECK-NEXT: Symbols:
+CHECK-NEXT: Local:
+CHECK-NEXT: - Name: rb_ary_new_capa
+CHECK-NEXT: Type: STT_FUNC
+CHECK-NEXT: Section: .text
+CHECK-NEXT: Size: 0x0000000000000005
+CHECK-NEXT: Global:
+CHECK-NEXT: - Name: __dtraceenabled_ruby___array-create
+CHECK-NEXT: - Name: '$dtrace1316529.rb_ary_new_capa'
+CHECK-NEXT: Type: STT_FUNC
+CHECK-NEXT: Section: .text
+CHECK-NEXT: Size: 0x0000000000000005
+CHECK-NEXT: Visibility: STV_HIDDEN
diff --git a/test/ObjectYAML/MachO/DWARF-debug_aranges.yaml b/test/ObjectYAML/MachO/DWARF-debug_aranges.yaml
index 2822c94d7751..0b0421d6a092 100644
--- a/test/ObjectYAML/MachO/DWARF-debug_aranges.yaml
+++ b/test/ObjectYAML/MachO/DWARF-debug_aranges.yaml
@@ -313,7 +313,8 @@ LinkEditData:
- _main
DWARF:
debug_aranges:
- - Length: 44
+ - Length:
+ TotalLength: 44
Version: 2
CuOffset: 0
AddrSize: 8
@@ -325,7 +326,8 @@ DWARF:
#CHECK: DWARF:
#CHECK: debug_aranges:
-#CHECK: - Length: 44
+#CHECK: - Length:
+#CHECK: TotalLength: 44
#CHECK: Version: 2
#CHECK: CuOffset: 0
#CHECK: AddrSize: 8
diff --git a/test/ObjectYAML/MachO/DWARF-debug_info.yaml b/test/ObjectYAML/MachO/DWARF-debug_info.yaml
index b1b6b8ad19e8..0ede72bd1f41 100644
--- a/test/ObjectYAML/MachO/DWARF-debug_info.yaml
+++ b/test/ObjectYAML/MachO/DWARF-debug_info.yaml
@@ -375,7 +375,8 @@ DWARF:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
debug_aranges:
- - Length: 44
+ - Length:
+ TotalLength: 44
Version: 2
CuOffset: 0
AddrSize: 8
@@ -384,7 +385,8 @@ DWARF:
- Address: 0x0000000100000F50
Length: 52
debug_info:
- - Length: 117
+ - Length:
+ TotalLength: 117
Version: 4
AbbrOffset: 0
AddrSize: 8
@@ -452,7 +454,8 @@ DWARF:
- AbbrCode: 0x00000000
Values:
debug_line:
- - TotalLength: 65
+ - Length:
+ TotalLength: 65
Version: 2
PrologueLength: 36
MinInstLength: 1
@@ -508,7 +511,8 @@ DWARF:
#CHECK: DWARF:
#CHECK: debug_info:
-#CHECK: - Length: 117
+#CHECK: - Length:
+#CHECK: TotalLength: 117
#CHECK: Version: 4
#CHECK: AbbrOffset: 0
#CHECK: AddrSize: 8
diff --git a/test/ObjectYAML/MachO/DWARF-debug_line.yaml b/test/ObjectYAML/MachO/DWARF-debug_line.yaml
index c1e015839f97..6d87ea68cdcf 100644
--- a/test/ObjectYAML/MachO/DWARF-debug_line.yaml
+++ b/test/ObjectYAML/MachO/DWARF-debug_line.yaml
@@ -394,7 +394,8 @@ DWARF:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
debug_aranges:
- - Length: 44
+ - Length:
+ TotalLength: 44
Version: 2
CuOffset: 0
AddrSize: 8
@@ -403,7 +404,8 @@ DWARF:
- Address: 0x0000000100000F50
Length: 52
debug_pubnames:
- Length: 23
+ Length:
+ TotalLength: 23
Version: 2
UnitOffset: 0
UnitSize: 121
@@ -411,7 +413,8 @@ DWARF:
- DieOffset: 0x0000002A
Name: main
debug_pubtypes:
- Length: 31
+ Length:
+ TotalLength: 31
Version: 2
UnitOffset: 0
UnitSize: 121
@@ -421,7 +424,8 @@ DWARF:
- DieOffset: 0x00000071
Name: char
debug_info:
- - Length: 117
+ - Length:
+ TotalLength: 117
Version: 4
AbbrOffset: 0
AddrSize: 8
@@ -489,7 +493,8 @@ DWARF:
- AbbrCode: 0x00000000
Values:
debug_line:
- - TotalLength: 65
+ - Length:
+ TotalLength: 65
Version: 2
PrologueLength: 36
MinInstLength: 1
@@ -497,19 +502,7 @@ DWARF:
LineBase: 251
LineRange: 14
OpcodeBase: 13
- StandardOpcodeLengths:
- - 0
- - 1
- - 1
- - 1
- - 1
- - 0
- - 0
- - 0
- - 1
- - 0
- - 0
- - 1
+ StandardOpcodeLengths: [ 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 ]
IncludeDirs:
Files:
- Name: hello_world.c
@@ -542,7 +535,8 @@ DWARF:
...
#CHECK: debug_line:
-#CHECK: - TotalLength: 65
+#CHECK: - Length:
+#CHECK: TotalLength: 65
#CHECK: Version: 2
#CHECK: PrologueLength: 36
#CHECK: MinInstLength: 1
@@ -550,19 +544,7 @@ DWARF:
#CHECK: LineBase: 251
#CHECK: LineRange: 14
#CHECK: OpcodeBase: 13
-#CHECK: StandardOpcodeLengths:
-#CHECK: - 0
-#CHECK: - 1
-#CHECK: - 1
-#CHECK: - 1
-#CHECK: - 1
-#CHECK: - 0
-#CHECK: - 0
-#CHECK: - 0
-#CHECK: - 1
-#CHECK: - 0
-#CHECK: - 0
-#CHECK: - 1
+#CHECK: StandardOpcodeLengths: [ 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 ]
#CHECK: IncludeDirs:
#CHECK: Files:
#CHECK: - Name: hello_world.c
diff --git a/test/ObjectYAML/MachO/DWARF-pubsections.yaml b/test/ObjectYAML/MachO/DWARF-pubsections.yaml
index 8535ed0b5c45..a3c05ca5b358 100644
--- a/test/ObjectYAML/MachO/DWARF-pubsections.yaml
+++ b/test/ObjectYAML/MachO/DWARF-pubsections.yaml
@@ -314,7 +314,8 @@ DWARF:
- int
- char
debug_pubnames:
- Length: 23
+ Length:
+ TotalLength: 23
Version: 2
UnitOffset: 0
UnitSize: 121
@@ -322,7 +323,8 @@ DWARF:
- DieOffset: 0x0000002A
Name: main
debug_pubtypes:
- Length: 31
+ Length:
+ TotalLength: 31
Version: 2
UnitOffset: 0
UnitSize: 121
@@ -335,7 +337,8 @@ DWARF:
#CHECK: DWARF:
#CHECK: debug_pubnames:
-#CHECK: Length: 23
+#CHECK: Length:
+#CHECK: TotalLength: 23
#CHECK: Version: 2
#CHECK: UnitOffset: 0
#CHECK: UnitSize: 121
@@ -343,7 +346,8 @@ DWARF:
#CHECK: - DieOffset: 0x0000002A
#CHECK: Name: main
#CHECK: debug_pubtypes:
-#CHECK: Length: 31
+#CHECK: Length:
+#CHECK: TotalLength: 31
#CHECK: Version: 2
#CHECK: UnitOffset: 0
#CHECK: UnitSize: 121
diff --git a/test/ObjectYAML/MachO/DWARF2-AddrSize8-FormValues.yaml b/test/ObjectYAML/MachO/DWARF2-AddrSize8-FormValues.yaml
new file mode 100644
index 000000000000..e91935bcdc14
--- /dev/null
+++ b/test/ObjectYAML/MachO/DWARF2-AddrSize8-FormValues.yaml
@@ -0,0 +1,507 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x01000007
+ cpusubtype: 0x00000003
+ filetype: 0x0000000A
+ ncmds: 5
+ sizeofcmds: 1800
+ flags: 0x00000000
+ reserved: 0x00000000
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __PAGEZERO
+ vmaddr: 0
+ vmsize: 4294967296
+ fileoff: 0
+ filesize: 0
+ maxprot: 0
+ initprot: 0
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 472
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 4096
+ fileoff: 0
+ filesize: 0
+ maxprot: 7
+ initprot: 5
+ nsects: 5
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x0000000100000F50
+ size: 52
+ offset: 0x00000000
+ align: 4
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __stubs
+ segname: __TEXT
+ addr: 0x0000000100000F84
+ size: 6
+ offset: 0x00000000
+ align: 1
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000408
+ reserved1: 0x00000000
+ reserved2: 0x00000006
+ reserved3: 0x00000000
+ - sectname: __stub_helper
+ segname: __TEXT
+ addr: 0x0000000100000F8C
+ size: 26
+ offset: 0x00000000
+ align: 2
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __cstring
+ segname: __TEXT
+ addr: 0x0000000100000FA6
+ size: 14
+ offset: 0x00000000
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000002
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0x0000000100000FB4
+ size: 72
+ offset: 0x00000000
+ align: 2
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __DATA
+ vmaddr: 4294971392
+ vmsize: 4096
+ fileoff: 0
+ filesize: 0
+ maxprot: 7
+ initprot: 3
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __nl_symbol_ptr
+ segname: __DATA
+ addr: 0x0000000100001000
+ size: 16
+ offset: 0x00000000
+ align: 3
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000006
+ reserved1: 0x00000001
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __la_symbol_ptr
+ segname: __DATA
+ addr: 0x0000000100001010
+ size: 8
+ offset: 0x00000000
+ align: 3
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000007
+ reserved1: 0x00000003
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 4294975488
+ vmsize: 4096
+ fileoff: 4096
+ filesize: 60
+ maxprot: 7
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 952
+ segname: __DWARF
+ vmaddr: 4294979584
+ vmsize: 4096
+ fileoff: 8192
+ filesize: 764
+ maxprot: 7
+ initprot: 3
+ nsects: 11
+ flags: 0
+ Sections:
+ - sectname: __debug_line
+ segname: __DWARF
+ addr: 0x0000000100003000
+ size: 69
+ offset: 0x00002000
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_pubnames
+ segname: __DWARF
+ addr: 0x0000000100003045
+ size: 27
+ offset: 0x00002045
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_pubtypes
+ segname: __DWARF
+ addr: 0x0000000100003060
+ size: 35
+ offset: 0x00002060
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_aranges
+ segname: __DWARF
+ addr: 0x0000000100003083
+ size: 48
+ offset: 0x00002083
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_info
+ segname: __DWARF
+ addr: 0x00000001000030B3
+ size: 180
+ offset: 0x000020B3
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_abbrev
+ segname: __DWARF
+ addr: 0x000000010000312C
+ size: 84
+ offset: 0x00002167
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_str
+ segname: __DWARF
+ addr: 0x0000000100003178
+ size: 83
+ offset: 0x000021BB
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_names
+ segname: __DWARF
+ addr: 0x0000000100003206
+ size: 36
+ offset: 0x0000221E
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_namespac
+ segname: __DWARF
+ addr: 0x0000000100003242
+ size: 36
+ offset: 0x00002242
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_types
+ segname: __DWARF
+ addr: 0x0000000100003266
+ size: 114
+ offset: 0x00002266
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_objc
+ segname: __DWARF
+ addr: 0x00000001000032D8
+ size: 36
+ offset: 0x000022D8
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+DWARF:
+ debug_str:
+ - World
+ debug_abbrev:
+ - Code: 0x00000001
+ Tag: DW_TAG_compile_unit
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: 0x2000
+ Form: DW_FORM_addr
+ - Attribute: 0x2001
+ Form: DW_FORM_block
+ - Attribute: DW_AT_MIPS_loop_begin
+ Form: DW_FORM_block1
+ - Attribute: DW_AT_MIPS_tail_loop_begin
+ Form: DW_FORM_block2
+ - Attribute: DW_AT_MIPS_epilog_begin
+ Form: DW_FORM_block4
+ - Attribute: DW_AT_MIPS_loop_unroll_factor
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_MIPS_software_pipeline_depth
+ Form: DW_FORM_data2
+ - Attribute: DW_AT_MIPS_linkage_name
+ Form: DW_FORM_data4
+ - Attribute: DW_AT_MIPS_stride
+ Form: DW_FORM_data8
+ - Attribute: DW_AT_MIPS_abstract_name
+ Form: DW_FORM_string
+ - Attribute: DW_AT_MIPS_clone_origin
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_MIPS_has_inlines
+ Form: DW_FORM_ref_addr
+ - Attribute: DW_AT_MIPS_stride_byte
+ Form: DW_FORM_ref1
+ - Attribute: DW_AT_MIPS_stride_elem
+ Form: DW_FORM_ref2
+ - Attribute: DW_AT_MIPS_ptr_dopetype
+ Form: DW_FORM_ref4
+ - Attribute: DW_AT_MIPS_allocatable_dopetype
+ Form: DW_FORM_ref8
+ - Attribute: DW_AT_MIPS_assumed_shape_dopetype
+ Form: DW_FORM_ref_sig8
+ - Attribute: DW_AT_MIPS_assumed_size
+ Form: DW_FORM_ref_udata
+ - Attribute: 0x2012
+ Form: DW_FORM_flag
+ - Attribute: 0x2013
+ Form: DW_FORM_flag
+ - Attribute: 0x2014
+ Form: DW_FORM_flag_present
+ - Attribute: 0x2015
+ Form: DW_FORM_sdata
+ - Attribute: 0x2017
+ Form: DW_FORM_udata
+ - Attribute: 0x2018
+ Form: DW_FORM_GNU_ref_alt
+ - Attribute: 0x2019
+ Form: DW_FORM_sec_offset
+ - Attribute: 0x201A
+ Form: DW_FORM_addr
+ debug_info:
+ - Length:
+ TotalLength: 168
+ Version: 2
+ AbbrOffset: 0
+ AddrSize: 8
+ Entries:
+ - AbbrCode: 0x00000001
+ Values:
+ - Value: 0x0123456789ABCDEF
+ - Value: 0x000000000000000A
+ BlockData:
+ - 0x01
+ - 0x02
+ - 0x03
+ - 0x04
+ - 0x05
+ - 0x06
+ - 0x07
+ - 0x08
+ - 0x09
+ - 0x00
+ - Value: 0x000000000000000A
+ BlockData:
+ - 0x01
+ - 0x02
+ - 0x03
+ - 0x04
+ - 0x05
+ - 0x06
+ - 0x07
+ - 0x08
+ - 0x09
+ - 0x00
+ - Value: 0x000000000000000A
+ BlockData:
+ - 0x01
+ - 0x02
+ - 0x03
+ - 0x04
+ - 0x05
+ - 0x06
+ - 0x07
+ - 0x08
+ - 0x09
+ - 0x00
+ - Value: 0x000000000000000A
+ BlockData:
+ - 0x01
+ - 0x02
+ - 0x03
+ - 0x04
+ - 0x05
+ - 0x06
+ - 0x07
+ - 0x08
+ - 0x09
+ - 0x00
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000002345
+ - Value: 0x000000006789ABCD
+ - Value: 0x0011223344556677
+ - Value: 0x0000000000000000
+ CStr: Hello
+ - Value: 0x0000000000000000
+ - Value: 0x0000000012345678
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000002345
+ - Value: 0x000000006789ABCD
+ - Value: 0x0011223344556677
+ - Value: 0xAABBCCDDEEFF0011
+ - Value: 0xFFFFFFFFFFFFFFFE
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000000000
+ - Value: 0x0000000000000001
+ - Value: 0x8000000000000000
+ - Value: 0xFFFFFFFFFFFFFFFE
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000000002
+ - Value: 0x0123456789ABCDEF
+...
+
+#CHECK: debug_info:
+#CHECK: - Length:
+#CHECK: TotalLength: 168
+#CHECK: Version: 2
+#CHECK: AbbrOffset: 0
+#CHECK: AddrSize: 8
+#CHECK: Entries:
+#CHECK: - AbbrCode: 0x00000001
+#CHECK: Values:
+#CHECK: - Value: 0x0123456789ABCDEF
+#CHECK: - Value: 0x000000000000000A
+#CHECK: BlockData:
+#CHECK: - 0x01
+#CHECK: - 0x02
+#CHECK: - 0x03
+#CHECK: - 0x04
+#CHECK: - 0x05
+#CHECK: - 0x06
+#CHECK: - 0x07
+#CHECK: - 0x08
+#CHECK: - 0x09
+#CHECK: - 0x00
+#CHECK: - Value: 0x000000000000000A
+#CHECK: BlockData:
+#CHECK: - 0x01
+#CHECK: - 0x02
+#CHECK: - 0x03
+#CHECK: - 0x04
+#CHECK: - 0x05
+#CHECK: - 0x06
+#CHECK: - 0x07
+#CHECK: - 0x08
+#CHECK: - 0x09
+#CHECK: - 0x00
+#CHECK: - Value: 0x000000000000000A
+#CHECK: BlockData:
+#CHECK: - 0x01
+#CHECK: - 0x02
+#CHECK: - 0x03
+#CHECK: - 0x04
+#CHECK: - 0x05
+#CHECK: - 0x06
+#CHECK: - 0x07
+#CHECK: - 0x08
+#CHECK: - 0x09
+#CHECK: - 0x00
+#CHECK: - Value: 0x000000000000000A
+#CHECK: BlockData:
+#CHECK: - 0x01
+#CHECK: - 0x02
+#CHECK: - 0x03
+#CHECK: - 0x04
+#CHECK: - 0x05
+#CHECK: - 0x06
+#CHECK: - 0x07
+#CHECK: - 0x08
+#CHECK: - 0x09
+#CHECK: - 0x00
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000002345
+#CHECK: - Value: 0x000000006789ABCD
+#CHECK: - Value: 0x0011223344556677
+#CHECK: CStr: Hello
+#CHECK: - Value: 0x0000000000000000
+#CHECK: - Value: 0x0000000012345678
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000002345
+#CHECK: - Value: 0x000000006789ABCD
+#CHECK: - Value: 0x0011223344556677
+#CHECK: - Value: 0xAABBCCDDEEFF0011
+#CHECK: - Value: 0xFFFFFFFFFFFFFFFE
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000000000
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0xFFFFFFFFFFFFFFFE
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000000002
+#CHECK: - Value: 0x0123456789ABCDEF
diff --git a/test/ObjectYAML/MachO/DWARF5-abbrevValues.yaml b/test/ObjectYAML/MachO/DWARF5-abbrevValues.yaml
new file mode 100644
index 000000000000..047ee749d674
--- /dev/null
+++ b/test/ObjectYAML/MachO/DWARF5-abbrevValues.yaml
@@ -0,0 +1,307 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x01000007
+ cpusubtype: 0x00000003
+ filetype: 0x0000000A
+ ncmds: 5
+ sizeofcmds: 1800
+ flags: 0x00000000
+ reserved: 0x00000000
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __PAGEZERO
+ vmaddr: 0
+ vmsize: 4294967296
+ fileoff: 0
+ filesize: 0
+ maxprot: 0
+ initprot: 0
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 472
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 4096
+ fileoff: 0
+ filesize: 0
+ maxprot: 7
+ initprot: 5
+ nsects: 5
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x0000000100000F50
+ size: 52
+ offset: 0x00000000
+ align: 4
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __stubs
+ segname: __TEXT
+ addr: 0x0000000100000F84
+ size: 6
+ offset: 0x00000000
+ align: 1
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000408
+ reserved1: 0x00000000
+ reserved2: 0x00000006
+ reserved3: 0x00000000
+ - sectname: __stub_helper
+ segname: __TEXT
+ addr: 0x0000000100000F8C
+ size: 26
+ offset: 0x00000000
+ align: 2
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __cstring
+ segname: __TEXT
+ addr: 0x0000000100000FA6
+ size: 14
+ offset: 0x00000000
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000002
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0x0000000100000FB4
+ size: 72
+ offset: 0x00000000
+ align: 2
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __DATA
+ vmaddr: 4294971392
+ vmsize: 4096
+ fileoff: 0
+ filesize: 0
+ maxprot: 7
+ initprot: 3
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __nl_symbol_ptr
+ segname: __DATA
+ addr: 0x0000000100001000
+ size: 16
+ offset: 0x00000000
+ align: 3
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000006
+ reserved1: 0x00000001
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __la_symbol_ptr
+ segname: __DATA
+ addr: 0x0000000100001010
+ size: 8
+ offset: 0x00000000
+ align: 3
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000007
+ reserved1: 0x00000003
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 4294975488
+ vmsize: 4096
+ fileoff: 4096
+ filesize: 60
+ maxprot: 7
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 952
+ segname: __DWARF
+ vmaddr: 4294979584
+ vmsize: 4096
+ fileoff: 8192
+ filesize: 764
+ maxprot: 7
+ initprot: 3
+ nsects: 11
+ flags: 0
+ Sections:
+ - sectname: __debug_line
+ segname: __DWARF
+ addr: 0x0000000100003000
+ size: 69
+ offset: 0x00002000
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_pubnames
+ segname: __DWARF
+ addr: 0x0000000100003045
+ size: 27
+ offset: 0x00002045
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_pubtypes
+ segname: __DWARF
+ addr: 0x0000000100003060
+ size: 35
+ offset: 0x00002060
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_aranges
+ segname: __DWARF
+ addr: 0x0000000100003083
+ size: 48
+ offset: 0x00002083
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_info
+ segname: __DWARF
+ addr: 0x00000001000030B3
+ size: 121
+ offset: 0x000020B3
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_abbrev
+ segname: __DWARF
+ addr: 0x000000010000312C
+ size: 76
+ offset: 0x0000212C
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_str
+ segname: __DWARF
+ addr: 0x0000000100003178
+ size: 142
+ offset: 0x00002178
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_names
+ segname: __DWARF
+ addr: 0x0000000100003206
+ size: 60
+ offset: 0x00002206
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_namespac
+ segname: __DWARF
+ addr: 0x0000000100003242
+ size: 36
+ offset: 0x00002242
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_types
+ segname: __DWARF
+ addr: 0x0000000100003266
+ size: 114
+ offset: 0x00002266
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_objc
+ segname: __DWARF
+ addr: 0x00000001000032D8
+ size: 36
+ offset: 0x000022D8
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+DWARF:
+ debug_abbrev:
+ - Code: 0x00000001
+ Tag: DW_TAG_compile_unit
+ Children: DW_CHILDREN_yes
+ Attributes:
+ - Attribute: 0x2001
+ Form: DW_FORM_implicit_const
+ Value: 0x12345678
+...
+
+#CHECK: DWARF:
+#CHECK: debug_abbrev:
+#CHECK: - Code: 0x00000001
+#CHECK: Tag: DW_TAG_compile_unit
+#CHECK: Children: DW_CHILDREN_yes
+#CHECK: Attributes:
+#CHECK: - Attribute: 0x2001
+#CHECK: Form: DW_FORM_implicit_const
+#CHECK: Value: 0x0000000012345678
diff --git a/test/ObjectYAML/MachO/DWARF5-debug_info.yaml b/test/ObjectYAML/MachO/DWARF5-debug_info.yaml
new file mode 100644
index 000000000000..d0e81834b593
--- /dev/null
+++ b/test/ObjectYAML/MachO/DWARF5-debug_info.yaml
@@ -0,0 +1,582 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x01000007
+ cpusubtype: 0x00000003
+ filetype: 0x0000000A
+ ncmds: 5
+ sizeofcmds: 1800
+ flags: 0x00000000
+ reserved: 0x00000000
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __PAGEZERO
+ vmaddr: 0
+ vmsize: 4294967296
+ fileoff: 0
+ filesize: 0
+ maxprot: 0
+ initprot: 0
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 472
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 4096
+ fileoff: 0
+ filesize: 0
+ maxprot: 7
+ initprot: 5
+ nsects: 5
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x0000000100000F50
+ size: 52
+ offset: 0x00000000
+ align: 4
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __stubs
+ segname: __TEXT
+ addr: 0x0000000100000F84
+ size: 6
+ offset: 0x00000000
+ align: 1
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000408
+ reserved1: 0x00000000
+ reserved2: 0x00000006
+ reserved3: 0x00000000
+ - sectname: __stub_helper
+ segname: __TEXT
+ addr: 0x0000000100000F8C
+ size: 26
+ offset: 0x00000000
+ align: 2
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __cstring
+ segname: __TEXT
+ addr: 0x0000000100000FA6
+ size: 14
+ offset: 0x00000000
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000002
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0x0000000100000FB4
+ size: 72
+ offset: 0x00000000
+ align: 2
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __DATA
+ vmaddr: 4294971392
+ vmsize: 4096
+ fileoff: 0
+ filesize: 0
+ maxprot: 7
+ initprot: 3
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __nl_symbol_ptr
+ segname: __DATA
+ addr: 0x0000000100001000
+ size: 16
+ offset: 0x00000000
+ align: 3
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000006
+ reserved1: 0x00000001
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __la_symbol_ptr
+ segname: __DATA
+ addr: 0x0000000100001010
+ size: 8
+ offset: 0x00000000
+ align: 3
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000007
+ reserved1: 0x00000003
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 4294975488
+ vmsize: 4096
+ fileoff: 4096
+ filesize: 60
+ maxprot: 7
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 952
+ segname: __DWARF
+ vmaddr: 4294979584
+ vmsize: 4096
+ fileoff: 8192
+ filesize: 765
+ maxprot: 7
+ initprot: 3
+ nsects: 11
+ flags: 0
+ Sections:
+ - sectname: __debug_line
+ segname: __DWARF
+ addr: 0x0000000100003000
+ size: 69
+ offset: 0x00002000
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_pubnames
+ segname: __DWARF
+ addr: 0x0000000100003045
+ size: 27
+ offset: 0x00002045
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_pubtypes
+ segname: __DWARF
+ addr: 0x0000000100003060
+ size: 35
+ offset: 0x00002060
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_aranges
+ segname: __DWARF
+ addr: 0x0000000100003083
+ size: 48
+ offset: 0x00002083
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_info
+ segname: __DWARF
+ addr: 0x00000001000030B3
+ size: 122
+ offset: 0x000020B3
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_abbrev
+ segname: __DWARF
+ addr: 0x000000010000312C
+ size: 76
+ offset: 0x0000212D
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __debug_str
+ segname: __DWARF
+ addr: 0x0000000100003178
+ size: 142
+ offset: 0x00002179
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_names
+ segname: __DWARF
+ addr: 0x0000000100003206
+ size: 60
+ offset: 0x00002207
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_namespac
+ segname: __DWARF
+ addr: 0x0000000100003242
+ size: 36
+ offset: 0x00002243
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_types
+ segname: __DWARF
+ addr: 0x0000000100003266
+ size: 114
+ offset: 0x00002267
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - sectname: __apple_objc
+ segname: __DWARF
+ addr: 0x00000001000032D8
+ size: 36
+ offset: 0x000022D9
+ align: 0
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x00000000
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+LinkEditData:
+ NameList:
+ - n_strx: 2
+ n_type: 0x0F
+ n_sect: 1
+ n_desc: 16
+ n_value: 4294967296
+ - n_strx: 22
+ n_type: 0x0F
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294971216
+ StringTable:
+ - ''
+ - ''
+ - __mh_execute_header
+ - _main
+DWARF:
+ debug_abbrev:
+ - Code: 0x00000001
+ Tag: DW_TAG_compile_unit
+ Children: DW_CHILDREN_yes
+ Attributes:
+ - Attribute: DW_AT_producer
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_language
+ Form: DW_FORM_data2
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_stmt_list
+ Form: DW_FORM_sec_offset
+ - Attribute: DW_AT_comp_dir
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_low_pc
+ Form: DW_FORM_addr
+ - Attribute: DW_AT_high_pc
+ Form: DW_FORM_data4
+ - Code: 0x00000002
+ Tag: DW_TAG_subprogram
+ Children: DW_CHILDREN_yes
+ Attributes:
+ - Attribute: DW_AT_low_pc
+ Form: DW_FORM_addr
+ - Attribute: DW_AT_high_pc
+ Form: DW_FORM_data4
+ - Attribute: DW_AT_frame_base
+ Form: DW_FORM_exprloc
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_decl_file
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_decl_line
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_prototyped
+ Form: DW_FORM_flag_present
+ - Attribute: DW_AT_type
+ Form: DW_FORM_ref4
+ - Attribute: DW_AT_external
+ Form: DW_FORM_flag_present
+ - Code: 0x00000003
+ Tag: DW_TAG_formal_parameter
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: DW_AT_location
+ Form: DW_FORM_exprloc
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_decl_file
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_decl_line
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_type
+ Form: DW_FORM_ref4
+ - Code: 0x00000004
+ Tag: DW_TAG_base_type
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_encoding
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_byte_size
+ Form: DW_FORM_data1
+ - Code: 0x00000005
+ Tag: DW_TAG_pointer_type
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: DW_AT_type
+ Form: DW_FORM_ref4
+ debug_aranges:
+ - Length:
+ TotalLength: 44
+ Version: 2
+ CuOffset: 0
+ AddrSize: 8
+ SegSize: 0
+ Descriptors:
+ - Address: 0x0000000100000F50
+ Length: 52
+ debug_info:
+ - Length:
+ TotalLength: 118
+ Version: 5
+ UnitType: DW_UT_compile
+ AbbrOffset: 0
+ AddrSize: 8
+ Entries:
+ - AbbrCode: 0x00000001
+ Values:
+ - Value: 0x0000000000000001
+ - Value: 0x000000000000000C
+ - Value: 0x0000000000000038
+ - Value: 0x0000000000000000
+ - Value: 0x0000000000000046
+ - Value: 0x0000000100000F50
+ - Value: 0x0000000000000034
+ - AbbrCode: 0x00000002
+ Values:
+ - Value: 0x0000000100000F50
+ - Value: 0x0000000000000034
+ - Value: 0x0000000000000001
+ BlockData:
+ - 0x56
+ - Value: 0x0000000000000076
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000000003
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000000060
+ - Value: 0x0000000000000001
+ - AbbrCode: 0x00000003
+ Values:
+ - Value: 0x0000000000000002
+ BlockData:
+ - 0x91
+ - 0x78
+ - Value: 0x000000000000007B
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000000003
+ - Value: 0x0000000000000060
+ - AbbrCode: 0x00000003
+ Values:
+ - Value: 0x0000000000000002
+ BlockData:
+ - 0x91
+ - 0x70
+ - Value: 0x0000000000000080
+ - Value: 0x0000000000000001
+ - Value: 0x0000000000000003
+ - Value: 0x0000000000000067
+ - AbbrCode: 0x00000000
+ Values:
+ - AbbrCode: 0x00000004
+ Values:
+ - Value: 0x0000000000000085
+ - Value: 0x0000000000000005
+ - Value: 0x0000000000000004
+ - AbbrCode: 0x00000005
+ Values:
+ - Value: 0x000000000000006C
+ - AbbrCode: 0x00000005
+ Values:
+ - Value: 0x0000000000000071
+ - AbbrCode: 0x00000004
+ Values:
+ - Value: 0x0000000000000089
+ - Value: 0x0000000000000006
+ - Value: 0x0000000000000001
+ - AbbrCode: 0x00000000
+ Values:
+ debug_line:
+ - Length:
+ TotalLength: 65
+ Version: 2
+ PrologueLength: 36
+ MinInstLength: 1
+ DefaultIsStmt: 1
+ LineBase: 251
+ LineRange: 14
+ OpcodeBase: 13
+ StandardOpcodeLengths:
+ - 0
+ - 1
+ - 1
+ - 1
+ - 1
+ - 0
+ - 0
+ - 0
+ - 1
+ - 0
+ - 0
+ - 1
+ IncludeDirs:
+ Files:
+ - Name: hello_world.c
+ DirIdx: 0
+ ModTime: 0
+ Length: 0
+ Opcodes:
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 9
+ SubOpcode: DW_LNE_set_address
+ Data: 4294971216
+ - Opcode: 0x14
+ Data: 4294971216
+ - Opcode: DW_LNS_set_column
+ Data: 3
+ - Opcode: DW_LNS_set_prologue_end
+ Data: 3
+ - Opcode: DW_LNS_const_add_pc
+ Data: 3
+ - Opcode: 0xBB
+ Data: 3
+ - Opcode: 0xBB
+ Data: 3
+ - Opcode: DW_LNS_advance_pc
+ Data: 11
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 1
+ SubOpcode: DW_LNE_end_sequence
+ Data: 11
+...
+
+
+#CHECK: DWARF:
+#CHECK: debug_info:
+#CHECK: - Length:
+#CHECK: TotalLength: 118
+#CHECK: Version: 5
+#CHECK: UnitType: DW_UT_compile
+#CHECK: AbbrOffset: 0
+#CHECK: AddrSize: 8
+#CHECK: Entries:
+#CHECK: - AbbrCode: 0x00000001
+#CHECK: Values:
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x000000000000000C
+#CHECK: - Value: 0x0000000000000038
+#CHECK: - Value: 0x0000000000000000
+#CHECK: - Value: 0x0000000000000046
+#CHECK: - Value: 0x0000000100000F50
+#CHECK: - Value: 0x0000000000000034
+#CHECK: - AbbrCode: 0x00000002
+#CHECK: Values:
+#CHECK: - Value: 0x0000000100000F50
+#CHECK: - Value: 0x0000000000000034
+#CHECK: - Value: 0x0000000000000001
+#CHECK: BlockData:
+#CHECK: - 0x56
+#CHECK: - Value: 0x0000000000000076
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000000003
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000000060
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - AbbrCode: 0x00000003
+#CHECK: Values:
+#CHECK: - Value: 0x0000000000000002
+#CHECK: BlockData:
+#CHECK: - 0x91
+#CHECK: - 0x78
+#CHECK: - Value: 0x000000000000007B
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000000003
+#CHECK: - Value: 0x0000000000000060
+#CHECK: - AbbrCode: 0x00000003
+#CHECK: Values:
+#CHECK: - Value: 0x0000000000000002
+#CHECK: BlockData:
+#CHECK: - 0x91
+#CHECK: - 0x70
+#CHECK: - Value: 0x0000000000000080
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - Value: 0x0000000000000003
+#CHECK: - Value: 0x0000000000000067
+#CHECK: - AbbrCode: 0x00000000
+#CHECK: Values:
+#CHECK: - AbbrCode: 0x00000004
+#CHECK: Values:
+#CHECK: - Value: 0x0000000000000085
+#CHECK: - Value: 0x0000000000000005
+#CHECK: - Value: 0x0000000000000004
+#CHECK: - AbbrCode: 0x00000005
+#CHECK: Values:
+#CHECK: - Value: 0x000000000000006C
+#CHECK: - AbbrCode: 0x00000005
+#CHECK: Values:
+#CHECK: - Value: 0x0000000000000071
+#CHECK: - AbbrCode: 0x00000004
+#CHECK: Values:
+#CHECK: - Value: 0x0000000000000089
+#CHECK: - Value: 0x0000000000000006
+#CHECK: - Value: 0x0000000000000001
+#CHECK: - AbbrCode: 0x00000000
+#CHECK: Values:
diff --git a/test/ObjectYAML/MachO/build_version_command.yaml b/test/ObjectYAML/MachO/build_version_command.yaml
new file mode 100644
index 000000000000..5df321168604
--- /dev/null
+++ b/test/ObjectYAML/MachO/build_version_command.yaml
@@ -0,0 +1,35 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x01000007
+ cpusubtype: 0x80000003
+ filetype: 0x00000002
+ ncmds: 1
+ sizeofcmds: 32
+ flags: 0x00218085
+ reserved: 0x00000000
+LoadCommands:
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 2
+ minos: 0x00080000
+ sdk: 0x00090000
+ ntools: 1
+ Tools:
+ - tool: 1
+ version: 0x00000000
+...
+
+
+CHECK: LoadCommands:
+CHECK: - cmd: LC_BUILD_VERSION
+CHECK-NEXT: cmdsize: 32
+CHECK-NEXT: platform: 2
+CHECK-NEXT: minos: 524288
+CHECK-NEXT: sdk: 589824
+CHECK-NEXT: ntools: 1
+CHECK-NEXT: Tools:
+CHECK-NEXT: - tool: 1
+CHECK-NEXT: version: 0
diff --git a/test/ObjectYAML/wasm/code_section.yaml b/test/ObjectYAML/wasm/code_section.yaml
new file mode 100644
index 000000000000..b75bf7e1cfd5
--- /dev/null
+++ b/test/ObjectYAML/wasm/code_section.yaml
@@ -0,0 +1,72 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: F32
+ ParamTypes:
+ - I32
+ - ReturnType: NORESULT
+ ParamTypes:
+ - I32
+ - I64
+ - Type: FUNCTION
+ FunctionTypes:
+ - 0
+ - 1
+ - Type: CODE
+ Relocations:
+ - Type: R_WEBASSEMBLY_TABLE_INDEX_SLEB
+ Index: 0
+ Offset: 0x00000006
+ Addend: 0x00000000
+ - Type: R_WEBASSEMBLY_FUNCTION_INDEX_LEB
+ Index: 1
+ Offset: 0x00000025
+ Addend: 0x00000000
+ Functions:
+ - Locals:
+ - Type: I32
+ Count: 3
+ Body: 418080808000210020002101200111808080800000210220020F0B
+ - Locals:
+ - Type: I32
+ Count: 1
+ Body: 108180808000210020000F0B
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: TYPE
+# CHECK: Signatures:
+# CHECK: - Index: 0
+# CHECK: ReturnType: F32
+# CHECK: ParamTypes:
+# CHECK: - I32
+# CHECK: - Index: 1
+# CHECK: ReturnType: NORESULT
+# CHECK: ParamTypes:
+# CHECK: - I32
+# CHECK: - I64
+# CHECK: - Type: CODE
+# CHECK: Relocations:
+# CHECK: - Type: R_WEBASSEMBLY_TABLE_INDEX_SLEB
+# CHECK: Index: 0
+# CHECK: Offset: 0x00000006
+# CHECK: Addend: 0x00000000
+# CHECK: - Type: R_WEBASSEMBLY_FUNCTION_INDEX_LEB
+# CHECK: Index: 1
+# CHECK: Offset: 0x00000025
+# CHECK: Addend: 0x00000000
+# CHECK: Functions:
+# CHECK: - Locals:
+# CHECK: - Type: I32
+# CHECK: Count: 3
+# CHECK: Body: 418080808000210020002101200111808080800000210220020F0B
+# CHECK: - Locals:
+# CHECK: - Type: I32
+# CHECK: Count: 1
+# CHECK: Body: 108180808000210020000F0B
diff --git a/test/ObjectYAML/wasm/custom_section.yaml b/test/ObjectYAML/wasm/custom_section.yaml
new file mode 100644
index 000000000000..c7b87cb4239d
--- /dev/null
+++ b/test/ObjectYAML/wasm/custom_section.yaml
@@ -0,0 +1,17 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: CUSTOM
+ Name: foo
+ Payload: 03666F6F0401020304
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: CUSTOM
+# CHECK: Name: foo
+# CHECK: Payload: 03666F6F0401020304
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/data_section.yaml b/test/ObjectYAML/wasm/data_section.yaml
new file mode 100644
index 000000000000..542d0efe42f3
--- /dev/null
+++ b/test/ObjectYAML/wasm/data_section.yaml
@@ -0,0 +1,28 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: MEMORY
+ Memories:
+ - Initial: 0x00000003
+ - Type: DATA
+ Segments:
+ - Index: 0
+ Offset:
+ Opcode: I32_CONST
+ Value: 4
+ Content: '10001000'
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: DATA
+# CHECK: Segments:
+# CHECK: - Index: 0
+# CHECK: Offset:
+# CHECK: Opcode: I32_CONST
+# CHECK: Value: 4
+# CHECK: Content: '10001000'
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/elem_section.yaml b/test/ObjectYAML/wasm/elem_section.yaml
new file mode 100644
index 000000000000..498c9aa93ea2
--- /dev/null
+++ b/test/ObjectYAML/wasm/elem_section.yaml
@@ -0,0 +1,40 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TABLE
+ Tables:
+ - ElemType: ANYFUNC
+ Limits:
+ Flags: 0x00000001
+ Initial: 0x00000010
+ Maximum: 0x00000011
+ - Type: ELEM
+ Segments:
+ - Offset:
+ Opcode: I32_CONST
+ Value: 3
+ Functions:
+ - 1
+ - Offset:
+ Opcode: I32_CONST
+ Value: 5
+ Functions:
+ - 4
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: ELEM
+# CHECK: Segments:
+# CHECK: - Offset:
+# CHECK: Opcode: I32_CONST
+# CHECK: Value: 3
+# CHECK: Functions: [ 1 ]
+# CHECK: - Offset:
+# CHECK: Opcode: I32_CONST
+# CHECK: Value: 5
+# CHECK: Functions: [ 4 ]
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/export_section.yaml b/test/ObjectYAML/wasm/export_section.yaml
new file mode 100644
index 000000000000..1d1a16fb8335
--- /dev/null
+++ b/test/ObjectYAML/wasm/export_section.yaml
@@ -0,0 +1,27 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: EXPORT
+ Exports:
+ - Name: foo
+ Kind: FUNCTION
+ Index: 0
+ - Name: bar
+ Kind: FUNCTION
+ Index: 1
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: EXPORT
+# CHECK: Exports:
+# CHECK: - Name: foo
+# CHECK: Kind: FUNCTION
+# CHECK: Index: 0
+# CHECK: - Name: bar
+# CHECK: Kind: FUNCTION
+# CHECK: Index: 1
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/function_section.yaml b/test/ObjectYAML/wasm/function_section.yaml
new file mode 100644
index 000000000000..39e6b75d5cdc
--- /dev/null
+++ b/test/ObjectYAML/wasm/function_section.yaml
@@ -0,0 +1,17 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: FUNCTION
+ FunctionTypes:
+ - 1
+ - 0
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: FUNCTION
+# CHECK: FunctionTypes: [ 1, 0 ]
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/global_section.yaml b/test/ObjectYAML/wasm/global_section.yaml
new file mode 100644
index 000000000000..3f17c6d88ba4
--- /dev/null
+++ b/test/ObjectYAML/wasm/global_section.yaml
@@ -0,0 +1,25 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: GLOBAL
+ Globals:
+ - Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I64_CONST
+ Value: -5
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: GLOBAL
+# CHECK: Globals:
+# CHECK: - Type: I32
+# CHECK: Mutable: false
+# CHECK: InitExpr:
+# CHECK: Opcode: I64_CONST
+# CHECK: Value: -5
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/header.yaml b/test/ObjectYAML/wasm/header.yaml
new file mode 100644
index 000000000000..c4be4c8a4aaf
--- /dev/null
+++ b/test/ObjectYAML/wasm/header.yaml
@@ -0,0 +1,9 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/header_invalid_version.yaml b/test/ObjectYAML/wasm/header_invalid_version.yaml
new file mode 100644
index 000000000000..fa742db8cbaf
--- /dev/null
+++ b/test/ObjectYAML/wasm/header_invalid_version.yaml
@@ -0,0 +1,6 @@
+# RUN: yaml2obj %s | not obj2yaml 2>&1 | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000002
+...
+# CHECK: Error: 'Invalid data was encountered while parsing the file'
diff --git a/test/ObjectYAML/wasm/import_section.yaml b/test/ObjectYAML/wasm/import_section.yaml
new file mode 100644
index 000000000000..52f466a00b66
--- /dev/null
+++ b/test/ObjectYAML/wasm/import_section.yaml
@@ -0,0 +1,41 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - I32
+ - Type: IMPORT
+ Imports:
+ - Module: foo
+ Field: bar
+ Kind: FUNCTION
+ SigIndex: 0
+ - Module: fiz
+ Field: baz
+ Kind: GLOBAL
+ GlobalType: I32
+ GlobalMutable: false
+ - Type: FUNCTION
+ FunctionTypes:
+ - 0
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: IMPORT
+# CHECK: Imports:
+# CHECK: - Module: foo
+# CHECK: Field: bar
+# CHECK: Kind: FUNCTION
+# CHECK: SigIndex: 0
+# CHECK: - Module: fiz
+# CHECK: Field: baz
+# CHECK: Kind: GLOBAL
+# CHECK: GlobalType: I32
+# CHECK: GlobalMutable: false
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/memory_section.yaml b/test/ObjectYAML/wasm/memory_section.yaml
new file mode 100644
index 000000000000..83aae05871db
--- /dev/null
+++ b/test/ObjectYAML/wasm/memory_section.yaml
@@ -0,0 +1,23 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: MEMORY
+ Memories:
+ - Flags: 0x00000001
+ Initial: 0x00000002
+ Maximum: 0x000000FF
+ - Initial: 0x00000003
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: MEMORY
+# CHECK: Memories:
+# CHECK: - Flags: 0x00000001
+# CHECK: Initial: 0x00000002
+# CHECK: Maximum: 0x000000FF
+# CHECK: - Initial: 0x00000003
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/start_section.yaml b/test/ObjectYAML/wasm/start_section.yaml
new file mode 100644
index 000000000000..41301a620037
--- /dev/null
+++ b/test/ObjectYAML/wasm/start_section.yaml
@@ -0,0 +1,15 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: START
+ StartFunction: 1
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: START
+# CHECK: StartFunction: 1
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/table_section.yaml b/test/ObjectYAML/wasm/table_section.yaml
new file mode 100644
index 000000000000..d8b1df25e043
--- /dev/null
+++ b/test/ObjectYAML/wasm/table_section.yaml
@@ -0,0 +1,25 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TABLE
+ Tables:
+ - ElemType: ANYFUNC
+ Limits:
+ Flags: 0x00000001
+ Initial: 0x00000010
+ Maximum: 0x00000011
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: TABLE
+# CHECK: Tables:
+# CHECK: - ElemType: ANYFUNC
+# CHECK: Limits:
+# CHECK: Flags: 0x00000001
+# CHECK: Initial: 0x00000010
+# CHECK: Maximum: 0x00000011
+# CHECK: ...
diff --git a/test/ObjectYAML/wasm/type_section.yaml b/test/ObjectYAML/wasm/type_section.yaml
new file mode 100644
index 000000000000..d6602595a60e
--- /dev/null
+++ b/test/ObjectYAML/wasm/type_section.yaml
@@ -0,0 +1,33 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - F32
+ - F32
+ - ReturnType: I64
+ ParamTypes:
+ - F64
+ - F64
+...
+# CHECK: --- !WASM
+# CHECK: FileHeader:
+# CHECK: Version: 0x00000001
+# CHECK: Sections:
+# CHECK: - Type: TYPE
+# CHECK: Signatures:
+# CHECK: - Index: 0
+# CHECK: ReturnType: I32
+# CHECK: ParamTypes:
+# CHECK: - F32
+# CHECK: - F32
+# CHECK: - Index: 1
+# CHECK: ReturnType: I64
+# CHECK: ParamTypes:
+# CHECK: - F64
+# CHECK: - F64
+# CHECK: ...
diff --git a/test/Other/Inputs/glob-input b/test/Other/Inputs/glob-input
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/Other/Inputs/glob-input
diff --git a/test/Other/cgscc-devirt-iteration.ll b/test/Other/cgscc-devirt-iteration.ll
index df5ea2985b94..111dac5bccaf 100644
--- a/test/Other/cgscc-devirt-iteration.ll
+++ b/test/Other/cgscc-devirt-iteration.ll
@@ -7,6 +7,9 @@
; RUN: opt -aa-pipeline=basic-aa -passes='cgscc(function-attrs,function(gvn,instcombine))' -S < %s | FileCheck %s --check-prefix=CHECK --check-prefix=BEFORE
; RUN: opt -aa-pipeline=basic-aa -passes='cgscc(devirt<1>(function-attrs,function(gvn,instcombine)))' -S < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AFTER --check-prefix=AFTER1
; RUN: opt -aa-pipeline=basic-aa -passes='cgscc(devirt<2>(function-attrs,function(gvn,instcombine)))' -S < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AFTER --check-prefix=AFTER2
+;
+; We also verify that the real O2 pipeline catches these cases.
+; RUN: opt -aa-pipeline=basic-aa -passes='default<O2>' -S < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AFTER --check-prefix=AFTER2
declare void @readnone() readnone
; CHECK: Function Attrs: readnone
@@ -93,8 +96,7 @@ entry:
}
declare i8* @memcpy(i8*, i8*, i64)
-; CHECK-NOT: Function Attrs
-; CHECK: declare i8* @memcpy(i8*, i8*, i64)
+; CHECK: declare i8* @memcpy(
; The @test3 function checks that when we refine an indirect call to an
; intrinsic we still revisit the SCC pass. This also covers cases where the
@@ -112,3 +114,15 @@ define void @test3(i8* %src, i8* %dest, i64 %size) {
; CHECK: call void @llvm.memcpy
ret void
}
+
+; A boring function that just keeps our declarations around.
+define void @keep(i8** %sink) {
+; CHECK-NOT: Function Attrs
+; CHECK: define void @keep(
+entry:
+ store volatile i8* bitcast (void ()* @readnone to i8*), i8** %sink
+ store volatile i8* bitcast (void ()* @unknown to i8*), i8** %sink
+ store volatile i8* bitcast (i8* (i8*, i8*, i64)* @memcpy to i8*), i8** %sink
+ call void @unknown()
+ ret void
+}
diff --git a/test/Other/constant-fold-gep.ll b/test/Other/constant-fold-gep.ll
index 77c43a200c03..8028b4fff987 100644
--- a/test/Other/constant-fold-gep.ll
+++ b/test/Other/constant-fold-gep.ll
@@ -8,7 +8,7 @@
; "TO" - Optimizations and targetdata. This tests target-dependent
; folding in the optimizers.
-; RUN: opt -S -o - -instcombine -globalopt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
+; RUN: opt -S -o - -instcombine -globalopt -data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
; "SCEV" - ScalarEvolution with default target layout
; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
diff --git a/test/Other/debugcounter-newgvn.ll b/test/Other/debugcounter-newgvn.ll
new file mode 100644
index 000000000000..cfe043c8455a
--- /dev/null
+++ b/test/Other/debugcounter-newgvn.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; REQUIRES: asserts
+; RUN: opt -S -debug-counter=newgvn-vn-skip=1,newgvn-vn-count=2 -newgvn < %s 2>&1 | FileCheck %s
+;; Test that, with debug counters on, we don't value number the first instruction, only the second and third,
+;; which means we do not discover the return is constant.
+define i32 @vntest() {
+; CHECK-LABEL: @vntest(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[A:%.*]] = add i32 1, 3
+; CHECK-NEXT: [[D:%.*]] = add i32 8, 8
+; CHECK-NEXT: ret i32 [[D]]
+;
+bb:
+ %a = add i32 1, 3
+ %b = add i32 %a, %a
+ %c = add i32 %a, %a
+ %d = add i32 %b, %c
+ ret i32 %d
+}
+
+
+
diff --git a/test/Other/debugcounter-predicateinfo.ll b/test/Other/debugcounter-predicateinfo.ll
new file mode 100644
index 000000000000..eb2ec09802fe
--- /dev/null
+++ b/test/Other/debugcounter-predicateinfo.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; REQUIRES: asserts
+; RUN: opt -debug-counter=predicateinfo-rename-skip=1,predicateinfo-rename-count=1 -print-predicateinfo -analyze < %s 2>&1 | FileCheck %s
+;; Test that, with debug counters on, we don't rename the first info, only the second
+define fastcc void @barney() {
+; CHECK-LABEL: @barney(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br label [[BB22:%.*]]
+; CHECK: bb22:
+; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i32 undef, 2
+; CHECK: [[TMP23_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[TMP23]])
+; CHECK-NEXT: br i1 [[TMP23]], label [[BB29:%.*]], label [[BB35:%.*]]
+; CHECK: bb29:
+; CHECK: [[TMP23_0_1:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[TMP23_0]])
+; CHECK-NEXT: br i1 [[TMP23]], label [[BB33:%.*]], label [[BB35]]
+; CHECK: bb33:
+; CHECK-NEXT: br i1 [[TMP23_0_1]], label [[BB35]], label [[BB35]]
+; CHECK: bb35:
+; CHECK-NEXT: unreachable
+;
+bb:
+ br label %bb22
+bb22: ; preds = %bb21
+ %tmp23 = icmp eq i32 undef, 2
+ br i1 %tmp23, label %bb29, label %bb35
+
+
+bb29: ; preds = %bb28
+;; We will not rename this one (we will still generate a copy of a copy for the next one)
+ br i1 %tmp23, label %bb33, label %bb35
+
+
+bb33: ; preds = %bb31
+;; We will rename this one
+ br i1 %tmp23, label %bb35, label %bb35
+
+bb35: ; preds = %bb33, %bb29, %bb22
+ unreachable
+}
diff --git a/test/Other/invariant.group.barrier.ll b/test/Other/invariant.group.barrier.ll
new file mode 100644
index 000000000000..5b9b54f784f5
--- /dev/null
+++ b/test/Other/invariant.group.barrier.ll
@@ -0,0 +1,62 @@
+; RUN: opt -S -early-cse < %s | FileCheck %s
+; RUN: opt -S -gvn < %s | FileCheck %s
+; RUN: opt -S -newgvn < %s | FileCheck %s
+; RUN: opt -S -O3 < %s | FileCheck %s
+
+; These tests checks if passes with CSE functionality can do CSE on
+; invariant.group.barrier, that is prohibited if there is a memory clobber
+; between barriers call.
+
+; CHECK-LABEL: define i8 @optimizable()
+define i8 @optimizable() {
+entry:
+ %ptr = alloca i8
+ store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.invariant.group.barrier
+ %ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
+; CHECK-NOT: call i8* @llvm.invariant.group.barrier
+ %ptr3 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+ call void @clobber(i8* %ptr)
+
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+ call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+ call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr2, {{.*}}!invariant.group
+ %v = load i8, i8* %ptr3, !invariant.group !0
+
+ ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable()
+define i8 @unoptimizable() {
+entry:
+ %ptr = alloca i8
+ store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.invariant.group.barrier
+ %ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
+ call void @clobber(i8* %ptr)
+; CHECK: call i8* @llvm.invariant.group.barrier
+ %ptr3 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+ call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+ call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+ call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+ %v = load i8, i8* %ptr3, !invariant.group !0
+
+ ret i8 %v
+}
+
+declare void @use(i8* readonly)
+
+declare void @clobber(i8*)
+; CHECK: Function Attrs: argmemonly nounwind readonly
+; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.invariant.group.barrier(i8*)
+
+!0 = !{}
+
diff --git a/test/Other/lit-globbing.ll b/test/Other/lit-globbing.ll
new file mode 100644
index 000000000000..5a668a90a40b
--- /dev/null
+++ b/test/Other/lit-globbing.ll
@@ -0,0 +1,28 @@
+RUN: echo TA > %T/TA.txt
+RUN: echo TB > %T/TB.txt
+RUN: echo TAB > %T/TAB.txt
+
+RUN: echo %T/TA* | FileCheck -check-prefix=STAR %s
+RUN: echo %T/'TA'* | FileCheck -check-prefix=STAR %s
+RUN: echo %T/T'A'* | FileCheck -check-prefix=STAR %s
+
+RUN: echo %T/T?.txt | FileCheck -check-prefix=QUESTION %s
+RUN: echo %T/'T'?.txt | FileCheck -check-prefix=QUESTION %s
+
+RUN: echo %T/T??.txt | FileCheck -check-prefix=QUESTION2 %s
+RUN: echo %T/'T'??.txt | FileCheck -check-prefix=QUESTION2 %s
+
+RUN: echo 'T*' 'T?.txt' 'T??.txt' | FileCheck -check-prefix=QUOTEDARGS %s
+
+STAR-NOT: TB.txt
+STAR: {{(TA.txt.*TAB.txt|TAB.txt.*TA.txt)}}
+
+QUESTION-NOT: TAB.txt
+QUESTION: {{(TA.txt.*TB.txt|TB.txt.*TA.txt)}}
+
+QUESTION2-NOT: TA.txt
+QUESTION2-NOT: TB.txt
+QUESTION2: TAB.txt
+
+QUOTEDARGS-NOT: .txt
+QUOTEDARGS: T* T?.txt T??.txt
diff --git a/test/Other/loop-pm-invalidation.ll b/test/Other/loop-pm-invalidation.ll
new file mode 100644
index 000000000000..d2a0e23a7200
--- /dev/null
+++ b/test/Other/loop-pm-invalidation.ll
@@ -0,0 +1,277 @@
+; Test that the loop PM infrastructure is invalidated appropriately.
+;
+; Check that we always nuke the LPM stuff when the loops themselves are
+; invalidated.
+; RUN: opt -disable-output -disable-verify -debug-pass-manager %s 2>&1 \
+; RUN: -passes='loop(no-op-loop),invalidate<loops>,loop(no-op-loop)' \
+; RUN: | FileCheck %s --check-prefix=CHECK-LOOP-INV
+;
+; If we ended up building the standard analyses, their invalidation should nuke
+; stuff as well.
+; RUN: opt -disable-output -disable-verify -debug-pass-manager %s 2>&1 \
+; RUN: -passes='loop(no-op-loop),invalidate<scalar-evolution>,loop(no-op-loop)' \
+; RUN: | FileCheck %s --check-prefix=CHECK-SCEV-INV
+;
+; Also provide a test that can delete loops after populating analyses for them.
+; RUN: opt -disable-output -disable-verify -debug-pass-manager %s 2>&1 \
+; RUN: -passes='loop(no-op-loop,loop-deletion),invalidate<scalar-evolution>,loop(no-op-loop)' \
+; RUN: | FileCheck %s --check-prefix=CHECK-SCEV-INV-AFTER-DELETE
+
+define void @no_loops() {
+; CHECK-LOOP-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on no_loops
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on no_loops
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Function pass manager run.
+;
+; CHECK-SCEV-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on no_loops
+; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-SCEV-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on no_loops
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Function pass manager run.
+
+entry:
+ ret void
+}
+
+define void @one_loop(i1* %ptr) {
+; CHECK-LOOP-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on one_loop
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AAManager
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-LOOP-INV-NEXT: Clearing all analysis results for: l0.header
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on one_loop
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Function pass manager run.
+;
+; CHECK-SCEV-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on one_loop
+; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AAManager
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-SCEV-INV-NEXT: Clearing all analysis results for: l0.header
+; CHECK-SCEV-INV-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on one_loop
+; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Function pass manager run.
+
+entry:
+ br label %l0.header
+
+l0.header:
+ %flag0 = load volatile i1, i1* %ptr
+ br i1 %flag0, label %l0.header, label %exit
+
+exit:
+ ret void
+}
+
+define void @nested_loops(i1* %ptr) {
+; CHECK-LOOP-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on nested_loops
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AAManager
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-LOOP-INV-NEXT: Clearing all analysis results for: l.0.header
+; CHECK-LOOP-INV-NEXT: Clearing all analysis results for: l.0.0.header
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on nested_loops
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Function pass manager run.
+;
+; CHECK-SCEV-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on nested_loops
+; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AAManager
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-SCEV-INV-NEXT: Clearing all analysis results for: l.0.header
+; CHECK-SCEV-INV-NEXT: Clearing all analysis results for: l.0.0.header
+; CHECK-SCEV-INV-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on nested_loops
+; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Function pass manager run.
+
+entry:
+ br label %l.0.header
+
+l.0.header:
+ br label %l.0.0.header
+
+l.0.0.header:
+ %flag.0.0 = load volatile i1, i1* %ptr
+ br i1 %flag.0.0, label %l.0.0.header, label %l.0.latch
+
+l.0.latch:
+ %flag.0 = load volatile i1, i1* %ptr
+ br i1 %flag.0, label %l.0.header, label %exit
+
+exit:
+ ret void
+}
+
+define void @dead_loop() {
+; CHECK-LOOP-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on dead_loop
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: AAManager
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-LOOP-INV-NEXT: Clearing all analysis results for: l0.header
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on dead_loop
+; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-LOOP-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-LOOP-INV-NEXT: Finished {{.*}}Function pass manager run.
+;
+; CHECK-SCEV-INV-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on dead_loop
+; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: AAManager
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating all non-preserved analyses
+; CHECK-SCEV-INV-NEXT: Clearing all analysis results for: l0.header
+; CHECK-SCEV-INV-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on dead_loop
+; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-NEXT: Finished {{.*}}Function pass manager run.
+;
+; CHECK-SCEV-INV-AFTER-DELETE-LABEL: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on dead_loop
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: LoopAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: AAManager
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Starting {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: NoOpLoopPass
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: LoopDeletionPass
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Clearing all analysis results for:
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Finished {{.*}}Loop pass manager run.
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Invalidating all non-preserved analyses
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Invalidating all non-preserved analyses
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Invalidating analysis: ScalarEvolutionAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Invalidating analysis: InnerAnalysisManagerProxy<{{.*}}Loop
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}> on dead_loop
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Finished {{.*}}Function pass manager run.
+
+entry:
+ br label %l0.header
+
+l0.header:
+ br i1 false, label %l0.header, label %exit
+
+exit:
+ ret void
+}
diff --git a/test/Other/new-pass-manager.ll b/test/Other/new-pass-manager.ll
index eae2d855e92f..bf8e596d118b 100644
--- a/test/Other/new-pass-manager.ll
+++ b/test/Other/new-pass-manager.ll
@@ -384,95 +384,6 @@
; CHECK-O0-NEXT: Finished llvm::Module pass manager run
; RUN: opt -disable-output -disable-verify -debug-pass-manager \
-; RUN: -passes='default<O1>' %s 2>&1 \
-; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O1
-; RUN: opt -disable-output -disable-verify -debug-pass-manager \
-; RUN: -passes='default<O2>' %s 2>&1 \
-; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
-; RUN: opt -disable-output -disable-verify -debug-pass-manager \
-; RUN: -passes='default<Os>' %s 2>&1 \
-; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-Os
-; RUN: opt -disable-output -disable-verify -debug-pass-manager \
-; RUN: -passes='default<Oz>' %s 2>&1 \
-; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-Oz
-; RUN: opt -disable-output -disable-verify -debug-pass-manager \
-; RUN: -passes='lto-pre-link<O2>' %s 2>&1 \
-; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
-; CHECK-O: Starting llvm::Module pass manager run
-; CHECK-O: Starting llvm::Module pass manager run
-; CHECK-O: Running pass: ForceFunctionAttrsPass
-; CHECK-O: Running pass: InferFunctionAttrsPass
-; CHECK-O: Starting llvm::Function pass manager run.
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Running pass: SROA
-; CHECK-O: Running pass: EarlyCSEPass
-; CHECK-O: Running pass: LowerExpectIntrinsicPass
-; CHECK-O: Running pass: GVNHoistPass
-; CHECK-O: Finished llvm::Function pass manager run.
-; CHECK-O: Running pass: IPSCCPPass
-; CHECK-O: Running pass: GlobalOptPass
-; CHECK-O: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PromotePass>
-; CHECK-O: Running pass: DeadArgumentEliminationPass
-; CHECK-O: Starting llvm::Function pass manager run.
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Finished llvm::Function pass manager run.
-; CHECK-O: Starting CGSCC pass manager run.
-; CHECK-O: Starting llvm::Function pass manager run.
-; CHECK-O: Running pass: SROA
-; CHECK-O: Running pass: EarlyCSEPass
-; CHECK-O: Running pass: SpeculativeExecutionPass
-; CHECK-O: Running pass: JumpThreadingPass
-; CHECK-O: Running pass: CorrelatedValuePropagationPass
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O1: Running pass: LibCallsShrinkWrapPass
-; CHECK-O2: Running pass: LibCallsShrinkWrapPass
-; CHECK-Os-NOT: Running pass: LibCallsShrinkWrapPass
-; CHECK-Oz-NOT: Running pass: LibCallsShrinkWrapPass
-; CHECK-O: Running pass: TailCallElimPass
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Running pass: ReassociatePass
-; CHECK-O: Starting Loop pass manager run.
-; CHECK-O: Finished Loop pass manager run.
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Starting Loop pass manager run.
-; CHECK-O: Finished Loop pass manager run.
-; CHECK-O: Running pass: MemCpyOptPass
-; CHECK-O: Running pass: SCCPPass
-; CHECK-O: Running pass: BDCEPass
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Running pass: JumpThreadingPass
-; CHECK-O: Running pass: CorrelatedValuePropagationPass
-; CHECK-O: Running pass: DSEPass
-; CHECK-O: Running pass: ADCEPass
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Finished llvm::Function pass manager run.
-; CHECK-O: Finished CGSCC pass manager run.
-; CHECK-O: Running pass: EliminateAvailableExternallyPass
-; CHECK-O: Running pass: ReversePostOrderFunctionAttrsPass
-; CHECK-O: Starting llvm::Function pass manager run.
-; CHECK-O: Running pass: Float2IntPass
-; CHECK-O: Running pass: LoopDistributePass
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Running pass: SLPVectorizerPass
-; CHECK-O: Running pass: SimplifyCFGPass
-; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Running pass: AlignmentFromAssumptionsPass
-; CHECK-O: Finished llvm::Function pass manager run.
-; CHECK-O: Running pass: GlobalDCEPass
-; CHECK-O: Running pass: ConstantMergePass
-
-; RUN: opt -disable-output -disable-verify -debug-pass-manager \
-; RUN: -passes='lto<O2>' %s 2>&1 \
-; RUN: | FileCheck %s --check-prefix=CHECK-LTO-O2
-; CHECK-LTO-O2: Starting llvm::Module pass manager run
-; CHECK-LTO-O2: Running pass: InstCombinePass
-; CHECK-LTO-O2: Running pass: SimplifyCFGPass
-
-; RUN: opt -disable-output -disable-verify -debug-pass-manager \
; RUN: -passes='repeat<3>(no-op-module)' %s 2>&1 \
; RUN: | FileCheck %s --check-prefix=CHECK-REPEAT-MODULE-PASS
; CHECK-REPEAT-MODULE-PASS: Starting llvm::Module pass manager run
@@ -539,14 +450,15 @@
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}>
; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Function pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: FunctionToLoopPassAdaptor
-; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}>
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: LoopAnalysis
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-REPEAT-LOOP-PASS-NEXT: Invalidating all non-preserved analyses
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AAManager
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetLibraryAnalysis
-; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AssumptionAnalysis
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: ScalarEvolutionAnalysis
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}>
; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: RepeatedPass
; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run
@@ -560,6 +472,7 @@
; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Function pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Invalidating all non-preserved analyses
; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Module pass manager run
define void @foo(i1 %x, i8* %p1, i8* %p2) {
diff --git a/test/Other/new-pm-defaults.ll b/test/Other/new-pm-defaults.ll
new file mode 100644
index 000000000000..7657f184b28c
--- /dev/null
+++ b/test/Other/new-pm-defaults.ll
@@ -0,0 +1,191 @@
+; The IR below was crafted so as:
+; 1) To have a loop, so we create a loop pass manager
+; 2) To be "immutable" in the sense that no pass in the standard
+; pipeline will modify it.
+; Since no transformations take place, we don't expect any analyses
+; to be invalidated.
+; Any invalidation that shows up here is a bug, unless we started modifying
+; the IR, in which case we need to make it immutable harder.
+
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='default<O1>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O1
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='default<O2>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='default<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O3
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='default<Os>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-Os
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='default<Oz>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-Oz
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto-pre-link<O2>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+
+; CHECK-O: Starting llvm::Module pass manager run.
+; CHECK-O-NEXT: Running pass: PassManager<{{.*}}Module{{.*}}>
+; CHECK-O-NEXT: Starting llvm::Module pass manager run.
+; CHECK-O-NEXT: Running pass: ForceFunctionAttrsPass
+; CHECK-O-NEXT: Running pass: InferFunctionAttrsPass
+; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy
+; CHECK-O-NEXT: Starting llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-O-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-O-NEXT: Running pass: SROA
+; CHECK-O-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-O-NEXT: Running pass: EarlyCSEPass
+; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-O-NEXT: Running pass: LowerExpectIntrinsicPass
+; CHECK-O-NEXT: Running pass: GVNHoistPass
+; CHECK-O-NEXT: Running analysis: AAManager
+; CHECK-O-NEXT: Running analysis: MemoryDependenceAnalysis
+; CHECK-O-NEXT: Running analysis: MemorySSAAnalysis
+; CHECK-O-NEXT: Finished llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: IPSCCPPass
+; CHECK-O-NEXT: Running pass: GlobalOptPass
+; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PromotePass>
+; CHECK-O-NEXT: Running pass: DeadArgumentEliminationPass
+; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O-NEXT: Starting llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Finished llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: PGOIndirectCallPromotion
+; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}GlobalsAA
+; CHECK-O-NEXT: Running analysis: GlobalsAA
+; CHECK-O-NEXT: Running analysis: CallGraphAnalysis
+; CHECK-O-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}LazyCallGraph{{.*}}>
+; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy
+; CHECK-O-NEXT: Running analysis: LazyCallGraphAnalysis
+; CHECK-O-NEXT: Starting CGSCC pass manager run.
+; CHECK-O-NEXT: Running pass: InlinerPass
+; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy<{{.*}}LazyCallGraph{{.*}}>
+; CHECK-O-NEXT: Running pass: PostOrderFunctionAttrsPass
+; CHECK-O-NEXT: Running analysis: FunctionAnalysisManagerCGSCCProxy
+; CHECK-O3-NEXT: Running pass: ArgumentPromotionPass
+; CHECK-O-NEXT: Running pass: CGSCCToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O-NEXT: Starting llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: SROA
+; CHECK-O-NEXT: Running pass: EarlyCSEPass
+; CHECK-O-NEXT: Running pass: SpeculativeExecutionPass
+; CHECK-O-NEXT: Running pass: JumpThreadingPass
+; CHECK-O-NEXT: Running analysis: LazyValueAnalysis
+; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O1-NEXT: Running pass: LibCallsShrinkWrapPass
+; CHECK-O2-NEXT: Running pass: LibCallsShrinkWrapPass
+; CHECK-O3-NEXT: Running pass: LibCallsShrinkWrapPass
+; CHECK-O-NEXT: Running pass: TailCallElimPass
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Running pass: ReassociatePass
+; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}OptimizationRemarkEmitterAnalysis
+; CHECK-O-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LoopStandardAnalysisResults{{.*}}>
+; CHECK-O-NEXT: Running analysis: LoopAnalysis
+; CHECK-O-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy
+; CHECK-O-NEXT: Starting Loop pass manager run.
+; CHECK-O-NEXT: Running pass: LoopRotatePass
+; CHECK-O-NEXT: Running pass: LICM
+; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
+; CHECK-O-NEXT: Finished Loop pass manager run.
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LoopStandardAnalysisResults{{.*}}>
+; CHECK-O-NEXT: Starting Loop pass manager run.
+; CHECK-O-NEXT: Running pass: IndVarSimplifyPass
+; CHECK-O-NEXT: Running pass: LoopIdiomRecognizePass
+; CHECK-O-NEXT: Running pass: LoopDeletionPass
+; CHECK-O-NEXT: Running pass: LoopUnrollPass
+; CHECK-O-NEXT: Finished Loop pass manager run.
+; CHECK-Os-NEXT: Running pass: MergedLoadStoreMotionPass
+; CHECK-Os-NEXT: Running pass: GVN
+; CHECK-Oz-NEXT: Running pass: MergedLoadStoreMotionPass
+; CHECK-Oz-NEXT: Running pass: GVN
+; CHECK-O2-NEXT: Running pass: MergedLoadStoreMotionPass
+; CHECK-O2-NEXT: Running pass: GVN
+; CHECK-O3-NEXT: Running pass: MergedLoadStoreMotionPass
+; CHECK-O3-NEXT: Running pass: GVN
+; CHECK-O-NEXT: Running pass: MemCpyOptPass
+; CHECK-O-NEXT: Running pass: SCCPPass
+; CHECK-O-NEXT: Running pass: BDCEPass
+; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running pass: JumpThreadingPass
+; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass
+; CHECK-O-NEXT: Running pass: DSEPass
+; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LICMPass{{.*}}>
+; CHECK-O-NEXT: Running pass: ADCEPass
+; CHECK-O-NEXT: Running analysis: PostDominatorTreeAnalysis
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Finished llvm::Function pass manager run.
+; CHECK-O-NEXT: Finished CGSCC pass manager run.
+; CHECK-O-NEXT: Running pass: EliminateAvailableExternallyPass
+; CHECK-O-NEXT: Running pass: ReversePostOrderFunctionAttrsPass
+; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}GlobalsAA
+; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O-NEXT: Starting llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: Float2IntPass
+; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LoopRotatePass
+; CHECK-O-NEXT: Running pass: LoopDistributePass
+; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-O-NEXT: Running analysis: BlockFrequencyAnalysis
+; CHECK-O-NEXT: Running analysis: BranchProbabilityAnalysis
+; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
+; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running pass: SLPVectorizerPass
+; CHECK-O-NEXT: Running pass: SimplifyCFGPass
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LoopUnrollPass
+; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}OptimizationRemarkEmitterAnalysis
+; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LICMPass
+; CHECK-O-NEXT: Running pass: AlignmentFromAssumptionsPass
+; CHECK-O-NEXT: Running pass: LoopSinkPass
+; CHECK-O-NEXT: Running pass: InstSimplifierPass
+; CHECK-O-NEXT: Finished llvm::Function pass manager run.
+; CHECK-O-NEXT: Running pass: GlobalDCEPass
+; CHECK-O-NEXT: Running pass: ConstantMergePass
+; CHECK-O-NEXT: Finished llvm::Module pass manager run.
+; CHECK-O-NEXT: Running pass: PrintModulePass
+;
+; Make sure we get the IR back out without changes when we print the module.
+; CHECK-O-LABEL: define void @foo(i32 %n) local_unnamed_addr {
+; CHECK-O-NEXT: entry:
+; CHECK-O-NEXT: br label %loop
+; CHECK-O: loop:
+; CHECK-O-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+; CHECK-O-NEXT: %iv.next = add i32 %iv, 1
+; CHECK-O-NEXT: tail call void @bar()
+; CHECK-O-NEXT: %cmp = icmp eq i32 %iv, %n
+; CHECK-O-NEXT: br i1 %cmp, label %exit, label %loop
+; CHECK-O: exit:
+; CHECK-O-NEXT: ret void
+; CHECK-O-NEXT: }
+;
+; CHECK-O-NEXT: Finished llvm::Module pass manager run.
+
+declare void @bar() local_unnamed_addr
+
+define void @foo(i32 %n) local_unnamed_addr {
+entry:
+ br label %loop
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ tail call void @bar()
+ %cmp = icmp eq i32 %iv, %n
+ br i1 %cmp, label %exit, label %loop
+exit:
+ ret void
+}
diff --git a/test/Other/new-pm-lto-defaults.ll b/test/Other/new-pm-lto-defaults.ll
new file mode 100644
index 000000000000..dfd298353272
--- /dev/null
+++ b/test/Other/new-pm-lto-defaults.ll
@@ -0,0 +1,101 @@
+; Basic test for the new LTO pipeline.
+; For now the only difference is between -O1 and everything else, so
+; -O2, -O3, -Os, -Oz are the same.
+
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto<O1>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto<O2>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto<O3>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto<Os>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+; RUN: opt -disable-verify -debug-pass-manager \
+; RUN: -passes='lto<Oz>' -S %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2
+
+; CHECK-O: Starting llvm::Module pass manager run.
+; CHECK-O-NEXT: Running pass: PassManager<{{.*}}Module
+; CHECK-O-NEXT: Starting llvm::Module pass manager run.
+; CHECK-O-NEXT: Running pass: GlobalDCEPass
+; CHECK-O-NEXT: Running pass: ForceFunctionAttrsPass
+; CHECK-O-NEXT: Running pass: InferFunctionAttrsPass
+; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-O2-NEXT: PGOIndirectCallPromotion
+; CHECK-O2-NEXT: Running pass: IPSCCPPass
+; CHECK-O-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}PostOrderFunctionAttrsPass>
+; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy
+; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy
+; CHECK-O-NEXT: Running analysis: LazyCallGraphAnalysis
+; CHECK-O-NEXT: Running analysis: FunctionAnalysisManagerCGSCCProxy
+; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy<{{.*}}LazyCallGraph{{.*}}>
+; CHECK-O-NEXT: Running analysis: AAManager
+; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-O-NEXT: Running pass: ReversePostOrderFunctionAttrsPass
+; CHECK-O-NEXT: Running analysis: CallGraphAnalysis
+; CHECK-O-NEXT: Running pass: GlobalSplitPass
+; CHECK-O-NEXT: Running pass: WholeProgramDevirtPass
+; CHECK-O2-NEXT: Running pass: GlobalOptPass
+; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PromotePass>
+; CHECK-O2-NEXT: Running analysis: DominatorTreeAnalysis
+; CHECK-O2-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-O2-NEXT: Running pass: ConstantMergePass
+; CHECK-O2-NEXT: Running pass: DeadArgumentEliminationPass
+; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}InstCombinePass>
+; CHECK-O2-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}InlinerPass>
+; CHECK-O2-NEXT: Running pass: GlobalOptPass
+; CHECK-O2-NEXT: Running pass: GlobalDCEPass
+; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O2-NEXT: Starting llvm::Function pass manager run.
+; CHECK-O2-NEXT: Running pass: InstCombinePass
+; CHECK-O2-NEXT: Running pass: JumpThreadingPass
+; CHECK-O2-NEXT: Running analysis: LazyValueAnalysis
+; CHECK-O2-NEXT: Running pass: SROA on foo
+; CHECK-O2-NEXT: Finished llvm::Function pass manager run.
+; CHECK-O2-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}PostOrderFunctionAttrsPass>
+; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}>
+; CHECK-O2-NEXT: Running analysis: MemoryDependenceAnalysis
+; CHECK-O2-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-O2-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-O2-NEXT: Running analysis: DemandedBitsAnalysis
+; CHECK-O2-NEXT: Running pass: CrossDSOCFIPass
+; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}SimplifyCFGPass>
+; CHECK-O2-NEXT: Running pass: EliminateAvailableExternallyPass
+; CHECK-O2-NEXT: Running pass: GlobalDCEPass
+; CHECK-O-NEXT: Finished llvm::Module pass manager run.
+; CHECK-O-NEXT: Running pass: PrintModulePass
+
+; Make sure we get the IR back out without changes when we print the module.
+; CHECK-O-LABEL: define void @foo(i32 %n) local_unnamed_addr {
+; CHECK-O-NEXT: entry:
+; CHECK-O-NEXT: br label %loop
+; CHECK-O: loop:
+; CHECK-O-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+; CHECK-O-NEXT: %iv.next = add i32 %iv, 1
+; CHECK-O-NEXT: tail call void @bar()
+; CHECK-O-NEXT: %cmp = icmp eq i32 %iv, %n
+; CHECK-O-NEXT: br i1 %cmp, label %exit, label %loop
+; CHECK-O: exit:
+; CHECK-O-NEXT: ret void
+; CHECK-O-NEXT: }
+;
+; CHECK-O-NEXT: Finished llvm::Module pass manager run.
+
+declare void @bar() local_unnamed_addr
+
+define void @foo(i32 %n) local_unnamed_addr {
+entry:
+ br label %loop
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ tail call void @bar()
+ %cmp = icmp eq i32 %iv, %n
+ br i1 %cmp, label %exit, label %loop
+exit:
+ ret void
+}
diff --git a/test/Other/optimization-remarks-invalidation.ll b/test/Other/optimization-remarks-invalidation.ll
new file mode 100644
index 000000000000..4a9fbac15c8a
--- /dev/null
+++ b/test/Other/optimization-remarks-invalidation.ll
@@ -0,0 +1,80 @@
+; The purpose of this test is to check that the remark emission analysis result
+; stays valid when it can and is invalidated otherwise. The code is just
+; a minimal amount of code to exercise the pass.
+;
+; First make sure we emit remarks on this test case.
+; RUN: opt %s -disable-output -aa-pipeline=basic-aa 2>&1 \
+; RUN: -passes='require<opt-remark-emit>,loop(licm)' \
+; RUN: -pass-remarks=licm -pass-remarks-with-hotness \
+; RUN: | FileCheck %s
+;
+; Check that passes which preserve BFI don't invalidate the emitter.
+; RUN: opt %s -disable-output -aa-pipeline=basic-aa 2>&1 \
+; RUN: -passes='require<opt-remark-emit>,instcombine,require<opt-remark-emit>,loop(licm)' -debug-pass-manager \
+; RUN: -pass-remarks=licm -pass-remarks-with-hotness \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-PM-PRESERVE
+;
+; Check that invalidating BFI computes a fresh emitter.
+; RUN: opt %s -disable-output -aa-pipeline=basic-aa 2>&1 \
+; RUN: -passes='require<opt-remark-emit>,invalidate<block-freq>,require<opt-remark-emit>,loop(licm)' -debug-pass-manager \
+; RUN: -pass-remarks=licm -pass-remarks-with-hotness \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-PM-INVALIDATE
+;
+; Check that invalidating BFI desn't compute a fresh emitter when we don't
+; request hotness remarks.
+; RUN: opt %s -disable-output -aa-pipeline=basic-aa 2>&1 \
+; RUN: -passes='require<opt-remark-emit>,invalidate<block-freq>,require<opt-remark-emit>,loop(licm)' -debug-pass-manager \
+; RUN: -pass-remarks=licm \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-PM-NO-INVALIDATE
+
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+define void @hoist(i32* %array, i32* noalias %p) {
+; CHECK-PM-PRESERVE: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-PRESERVE: Running pass: InstCombinePass
+; CHECK-PM-PRESERVE-NOT: Invalidating analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-PRESERVE-NOT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-PRESERVE: Running pass: LICMPass
+; CHECK-PM-INVALIDATE: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-PM-INVALIDATE: Invalidating analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-INVALIDATE: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-INVALIDATE: Running pass: LICMPass
+; CHECK-PM-NO-INVALIDATE: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-NO-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-PM-NO-INVALIDATE-NOT: Invalidating analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-NO-INVALIDATE-NOT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-PM-NO-INVALIDATE: Running pass: LICMPass
+
+Entry:
+ br label %Loop
+
+Loop:
+ %j = phi i32 [ 0, %Entry ], [ %Next, %Loop ]
+ %addr = getelementptr i32, i32* %array, i32 %j
+ %a = load i32, i32* %addr
+; CHECK: remark: /tmp/kk.c:2:20: hoisting load
+ %b = load i32, i32* %p, !dbg !8
+ %a2 = add i32 %a, %b
+ store i32 %a2, i32* %addr
+ %Next = add i32 %j, 1
+ %cond = icmp eq i32 %Next, 0
+ br i1 %cond, label %Out, label %Loop
+
+Out:
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 ", isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/kk.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"PIC Level", i32 2}
+!5 = !{!"clang version 3.9.0 "}
+!6 = distinct !DISubprogram(name: "success", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !2)
+!8 = !DILocation(line: 2, column: 20, scope: !6)
diff --git a/test/Transforms/LoopDistribute/diagnostics-with-hotness-lazy-BFI.ll b/test/Other/optimization-remarks-lazy-bfi.ll
index bb6459acc062..bb6459acc062 100644
--- a/test/Transforms/LoopDistribute/diagnostics-with-hotness-lazy-BFI.ll
+++ b/test/Other/optimization-remarks-lazy-bfi.ll
diff --git a/test/Other/writing-to-stdout.ll b/test/Other/writing-to-stdout.ll
new file mode 100644
index 000000000000..e3dee782ce69
--- /dev/null
+++ b/test/Other/writing-to-stdout.ll
@@ -0,0 +1,16 @@
+; REQUIRES: default_triple
+
+; Often LLVM tools use "-" to indicate that output should be written to stdout
+; instead of a file. This behaviour is implemented by the raw_fd_ostream class.
+; This test verifies that when doing so multiple times we don't try to access a
+; closed STDOUT_FILENO. The exact options used in this test are unimportant, as
+; long as they write to stdout using raw_fd_ostream.
+; RUN: llc %s -o=- -pass-remarks-output=- -filetype=asm | FileCheck %s
+; foobar should appear as a function somewhere in the assembly file.
+; CHECK: foobar
+; !Analysis appears at the start of pass-remarks-output.
+; CHECK: !Analysis
+
+define void @foobar() {
+ ret void
+}
diff --git a/test/TableGen/GlobalISelEmitter.td b/test/TableGen/GlobalISelEmitter.td
new file mode 100644
index 000000000000..25be435df2de
--- /dev/null
+++ b/test/TableGen/GlobalISelEmitter.td
@@ -0,0 +1,407 @@
+// RUN: llvm-tblgen -gen-global-isel -I %p/../../include %s | FileCheck %s
+
+include "llvm/Target/Target.td"
+
+//===- Define the necessary boilerplate for our test target. --------------===//
+
+def MyTargetISA : InstrInfo;
+def MyTarget : Target { let InstructionSet = MyTargetISA; }
+
+def R0 : Register<"r0"> { let Namespace = "MyTarget"; }
+def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0)>;
+
+class I<dag OOps, dag IOps, list<dag> Pat>
+ : Instruction {
+ let Namespace = "MyTarget";
+ let OutOperandList = OOps;
+ let InOperandList = IOps;
+ let Pattern = Pat;
+}
+
+def complex : Operand<i32>, ComplexPattern<i32, 2, "SelectComplexPattern", []> {
+ let MIOperandInfo = (ops i32imm, i32imm);
+}
+def gi_complex :
+ GIComplexOperandMatcher<s32, (ops i32imm, i32imm), "selectComplexPattern">,
+ GIComplexPatternEquiv<complex>;
+
+def m1 : OperandWithDefaultOps <i32, (ops (i32 -1))>;
+def Z : OperandWithDefaultOps <i32, (ops R0)>;
+def m1Z : OperandWithDefaultOps <i32, (ops (i32 -1), R0)>;
+
+//===- Test the function definition boilerplate. --------------------------===//
+
+// CHECK: bool MyTargetInstructionSelector::selectImpl(MachineInstr &I) const {
+// CHECK: MachineFunction &MF = *I.getParent()->getParent();
+// CHECK: const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+//===- Test a pattern with multiple ComplexPattern operands. --------------===//
+//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 4)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_SELECT) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (selectComplexPattern(MI0.getOperand(2), TempOp0, TempOp1)))) &&
+// CHECK-NEXT: ((/* src3 */ (MRI.getType(MI0.getOperand(3).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (selectComplexPattern(MI0.getOperand(3), TempOp2, TempOp3))))) {
+// CHECK-NEXT: // (select:i32 GPR32:i32:$src1, complex:i32:$src2, complex:i32:$src3) => (INSN2:i32 GPR32:i32:$src1, complex:i32:$src3, complex:i32:$src2)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::INSN2));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
+// CHECK-NEXT: MIB.add(TempOp2);
+// CHECK-NEXT: MIB.add(TempOp3);
+// CHECK-NEXT: MIB.add(TempOp0);
+// CHECK-NEXT: MIB.add(TempOp1);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+def : GINodeEquiv<G_SELECT, select>;
+def INSN2 : I<(outs GPR32:$dst), (ins GPR32:$src1, complex:$src2, complex:$src3), []>;
+def : Pat<(select GPR32:$src1, complex:$src2, complex:$src3),
+ (INSN2 GPR32:$src1, complex:$src3, complex:$src2)>;
+
+//===- Test a simple pattern with regclass operands. ----------------------===//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_ADD) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) {
+
+// CHECK-NEXT: // (add:i32 GPR32:i32:$src1, GPR32:i32:$src2) => (ADD:i32 GPR32:i32:$src1, GPR32:i32:$src2)
+// CHECK-NEXT: I.setDesc(TII.get(MyTarget::ADD));
+// CHECK-NEXT: MachineInstr &NewI = I;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+
+def ADD : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2),
+ [(set GPR32:$dst, (add GPR32:$src1, GPR32:$src2))]>;
+
+//===- Test a nested instruction match. -----------------------------------===//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if (!MI0.getOperand(1).isReg())
+// CHECK-NEXT: return false;
+// CHECK-NEXT: MachineInstr &MI1 = *MRI.getVRegDef(MI0.getOperand(1).getReg());
+// CHECK-NEXT: if (MI1.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_MUL) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Operand 1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (((MI1.getOpcode() == TargetOpcode::G_ADD) &&
+// CHECK-NEXT: ((/* Operand 0 */ (MRI.getType(MI1.getOperand(0).getReg()) == (LLT::scalar(32))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI1.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI1.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(2).getReg(), MRI, TRI))))))
+// CHECK-NEXT: ))) &&
+// CHECK-NEXT: ((/* src3 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) {
+// CHECK-NEXT: if (!isObviouslySafeToFold(MI1)) return false;
+// CHECK-NEXT: // (mul:i32 (add:i32 GPR32:i32:$src1, GPR32:i32:$src2), GPR32:i32:$src3) => (MULADD:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MULADD));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.add(MI1.getOperand(1)/*src1*/);
+// CHECK-NEXT: MIB.add(MI1.getOperand(2)/*src2*/);
+// CHECK-NEXT: MIB.add(MI0.getOperand(2)/*src3*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, &MI1, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+// We also get a second rule by commutativity.
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if (!MI0.getOperand(2).isReg())
+// CHECK-NEXT: return false;
+// CHECK-NEXT: MachineInstr &MI1 = *MRI.getVRegDef(MI0.getOperand(2).getReg());
+// CHECK-NEXT: if (MI1.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_MUL) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src3 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (((MI1.getOpcode() == TargetOpcode::G_ADD) &&
+// CHECK-NEXT: ((/* Operand 0 */ (MRI.getType(MI1.getOperand(0).getReg()) == (LLT::scalar(32))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI1.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI1.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI1.getOperand(2).getReg(), MRI, TRI))))))
+// CHECK-NEXT: )))) {
+// CHECK-NEXT: if (!isObviouslySafeToFold(MI1)) return false;
+// CHECK-NEXT: // (mul:i32 GPR32:i32:$src3, (add:i32 GPR32:i32:$src1, GPR32:i32:$src2)) => (MULADD:i32 GPR32:i32:$src1, GPR32:i32:$src2, GPR32:i32:$src3)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MULADD));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.add(MI1.getOperand(1)/*src1*/);
+// CHECK-NEXT: MIB.add(MI1.getOperand(2)/*src2*/);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src3*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, &MI1, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+def MULADD : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3),
+ [(set GPR32:$dst,
+ (mul (add GPR32:$src1, GPR32:$src2), GPR32:$src3))]>;
+
+//===- Test another simple pattern with regclass operands. ----------------===//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_MUL) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) {
+// CHECK-NEXT: // (mul:i32 GPR32:i32:$src1, GPR32:i32:$src2) => (MUL:i32 GPR32:i32:$src2, GPR32:i32:$src1)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MUL));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.add(MI0.getOperand(2)/*src2*/);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+def MUL : I<(outs GPR32:$dst), (ins GPR32:$src2, GPR32:$src1),
+ [(set GPR32:$dst, (mul GPR32:$src1, GPR32:$src2))]>;
+
+//===- Test a pattern with ComplexPattern operands. -----------------------===//
+//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_SUB) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (selectComplexPattern(MI0.getOperand(2), TempOp0, TempOp1))))) {
+// CHECK-NEXT: // (sub:i32 GPR32:i32:$src1, complex:i32:$src2) => (INSN1:i32 GPR32:i32:$src1, complex:i32:$src2)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::INSN1));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
+// CHECK-NEXT: MIB.add(TempOp0);
+// CHECK-NEXT: MIB.add(TempOp1);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+
+def INSN1 : I<(outs GPR32:$dst), (ins GPR32:$src1, complex:$src2), []>;
+def : Pat<(sub GPR32:$src1, complex:$src2), (INSN1 GPR32:$src1, complex:$src2)>;
+
+//===- Test a simple pattern with a default operand. ----------------------===//
+//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -2, MRI))))) {
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -2:i32) => (XORI:i32 GPR32:i32:$src1)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XORI));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.addImm(-1);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+// The -2 is just to distinguish it from the 'not' case below.
+def XORI : I<(outs GPR32:$dst), (ins m1:$src2, GPR32:$src1),
+ [(set GPR32:$dst, (xor GPR32:$src1, -2))]>;
+
+//===- Test a simple pattern with a default register operand. -------------===//
+//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -3, MRI))))) {
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -3:i32) => (XOR:i32 GPR32:i32:$src1)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XOR));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.addReg(MyTarget::R0);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+// The -3 is just to distinguish it from the 'not' case below and the other default op case above.
+def XOR : I<(outs GPR32:$dst), (ins Z:$src2, GPR32:$src1),
+ [(set GPR32:$dst, (xor GPR32:$src1, -3))]>;
+
+//===- Test a simple pattern with a multiple default operands. ------------===//
+//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -4, MRI))))) {
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$src1, -4:i32) => (XORlike:i32 GPR32:i32:$src1)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::XORlike));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.addImm(-1);
+// CHECK-NEXT: MIB.addReg(MyTarget::R0);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*src1*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+// The -4 is just to distinguish it from the other 'not' cases.
+def XORlike : I<(outs GPR32:$dst), (ins m1Z:$src2, GPR32:$src1),
+ [(set GPR32:$dst, (xor GPR32:$src1, -4))]>;
+
+//===- Test a simple pattern with constant immediate operands. ------------===//
+//
+// This must precede the 3-register variants because constant immediates have
+// priority over register banks.
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 3)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_XOR) &&
+// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Wm */ (MRI.getType(MI0.getOperand(1).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(1).getReg(), MRI, TRI))))) &&
+// CHECK-NEXT: ((/* Operand 2 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) &&
+// CHECK-NEXT: (isOperandImmEqual(MI0.getOperand(2), -1, MRI))))) {
+// CHECK-NEXT: // (xor:i32 GPR32:i32:$Wm, -1:i32) => (ORN:i32 R0:i32, GPR32:i32:$Wm)
+// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::ORN));
+// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/);
+// CHECK-NEXT: MIB.addReg(MyTarget::R0);
+// CHECK-NEXT: MIB.add(MI0.getOperand(1)/*Wm*/);
+// CHECK-NEXT: for (const auto *FromMI : {&MI0, })
+// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands())
+// CHECK-NEXT: MIB.addMemOperand(MMO);
+// CHECK-NEXT: I.eraseFromParent();
+// CHECK-NEXT: MachineInstr &NewI = *MIB;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+def ORN : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2), []>;
+def : Pat<(not GPR32:$Wm), (ORN R0, GPR32:$Wm)>;
+
+//===- Test a pattern with an MBB operand. --------------------------------===//
+
+// CHECK-LABEL: if ([&]() {
+// CHECK-NEXT: MachineInstr &MI0 = I;
+// CHECK-NEXT: if (MI0.getNumOperands() < 1)
+// CHECK-NEXT: return false;
+// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_BR) &&
+// CHECK-NEXT: ((/* target */ (MI0.getOperand(0).isMBB())))) {
+
+// CHECK-NEXT: // (br (bb:Other):$target) => (BR (bb:Other):$target)
+// CHECK-NEXT: I.setDesc(TII.get(MyTarget::BR));
+// CHECK-NEXT: MachineInstr &NewI = I;
+// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI);
+// CHECK-NEXT: return true;
+// CHECK-NEXT: }
+// CHECK-NEXT: return false;
+// CHECK-NEXT: }()) { return true; }
+
+def BR : I<(outs), (ins unknown:$target),
+ [(br bb:$target)]>;
diff --git a/test/TableGen/RegisterBankEmitter.td b/test/TableGen/RegisterBankEmitter.td
new file mode 100644
index 000000000000..88c7ec1f7915
--- /dev/null
+++ b/test/TableGen/RegisterBankEmitter.td
@@ -0,0 +1,15 @@
+// RUN: llvm-tblgen -gen-register-bank -I %p/../../include %s | FileCheck %s
+
+include "llvm/Target/Target.td"
+
+def MyTarget : Target;
+def R0 : Register<"r0">;
+let Size = 32 in {
+ def ClassA : RegisterClass<"MyTarget", [i32], 32, (add R0)>;
+ def ClassB : RegisterClass<"MyTarget", [i1], 32, (add ClassA)>;
+}
+
+// CHECK: GPRRegBankCoverageData
+// CHECK: MyTarget::ClassARegClassID
+// CHECK: MyTarget::ClassBRegClassID
+def GPRRegBank : RegisterBank<"GPR", [ClassA]>;
diff --git a/test/ThinLTO/X86/Inputs/cache-import-lists1.ll b/test/ThinLTO/X86/Inputs/cache-import-lists1.ll
new file mode 100644
index 000000000000..58bfb39f9ee1
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/cache-import-lists1.ll
@@ -0,0 +1,11 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @f1() {
+ call void @linkonce_odr()
+ ret void
+}
+
+define linkonce_odr void @linkonce_odr() {
+ ret void
+}
diff --git a/test/ThinLTO/X86/Inputs/cache-import-lists2.ll b/test/ThinLTO/X86/Inputs/cache-import-lists2.ll
new file mode 100644
index 000000000000..899bbaea13d6
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/cache-import-lists2.ll
@@ -0,0 +1,11 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @f2() {
+ call void @linkonce_odr()
+ ret void
+}
+
+define linkonce_odr void @linkonce_odr() {
+ ret void
+}
diff --git a/test/ThinLTO/X86/Inputs/cache-typeid-resolutions-import.ll b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions-import.ll
new file mode 100644
index 000000000000..95ecd1824351
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions-import.ll
@@ -0,0 +1,15 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i1 @importf1(i8* %p) {
+ %x = call i1 @f1(i8* %p)
+ ret i1 %x
+}
+
+define i1 @importf2(i8* %p) {
+ %x = call i1 @f2(i8* %p)
+ ret i1 %x
+}
+
+declare i1 @f1(i8* %p)
+declare i1 @f2(i8* %p)
diff --git a/test/ThinLTO/X86/Inputs/cache-typeid-resolutions1.ll b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions1.ll
new file mode 100644
index 000000000000..e53673bcd05e
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions1.ll
@@ -0,0 +1,6 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@vt1 = constant i32 0, !type !0
+
+!0 = !{i32 0, !"typeid1"}
diff --git a/test/ThinLTO/X86/Inputs/cache-typeid-resolutions2.ll b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions2.ll
new file mode 100644
index 000000000000..283badad3bbf
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions2.ll
@@ -0,0 +1,10 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@vt2 = constant i1 (i8*)* @vf2, !type !0
+
+define internal i1 @vf2(i8* %this) {
+ ret i1 0
+}
+
+!0 = !{i32 0, !"typeid2"}
diff --git a/test/ThinLTO/X86/Inputs/cache-typeid-resolutions3.ll b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions3.ll
new file mode 100644
index 000000000000..830622e9cd76
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/cache-typeid-resolutions3.ll
@@ -0,0 +1,15 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@vt2a = constant i1 (i8*)* @vf2a, !type !0
+@vt2b = constant i1 (i8*)* @vf2b, !type !0
+
+define internal i1 @vf2a(i8* %this) {
+ ret i1 0
+}
+
+define internal i1 @vf2b(i8* %this) {
+ ret i1 1
+}
+
+!0 = !{i32 0, !"typeid2"}
diff --git a/test/ThinLTO/X86/cache-config.ll b/test/ThinLTO/X86/cache-config.ll
index a947969f6690..01e44b8b16a3 100644
--- a/test/ThinLTO/X86/cache-config.ll
+++ b/test/ThinLTO/X86/cache-config.ll
@@ -1,21 +1,21 @@
-; RUN: rm -rf %t.cache && mkdir %t.cache
+; RUN: rm -rf %t.cache
; RUN: opt -module-hash -module-summary %s -o %t.bc
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -mcpu=yonah
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -relax-elf-relocations
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -function-sections
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -data-sections
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -debugger-tune=sce
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -mattr=+sse2
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -relocation-model=static
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -code-model=large
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -cg-opt-level=0
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -O1
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -opt-pipeline=loweratomic
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -aa-pipeline=basic-aa
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -override-triple=x86_64-unknown-linux-gnu
-; RUN: llvm-lto2 -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -default-triple=x86_64-unknown-linux-gnu
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -mcpu=yonah
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -relax-elf-relocations
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -function-sections
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -data-sections
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -debugger-tune=sce
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -mattr=+sse2
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -relocation-model=static
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -code-model=large
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -cg-opt-level=0
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -O1
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -opt-pipeline=loweratomic
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -aa-pipeline=basic-aa
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -override-triple=x86_64-unknown-linux-gnu
+; RUN: llvm-lto2 run -o %t.o %t.bc -cache-dir %t.cache -r=%t.bc,globalfunc,plx -default-triple=x86_64-unknown-linux-gnu
; RUN: ls %t.cache | count 15
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/ThinLTO/X86/cache-import-lists.ll b/test/ThinLTO/X86/cache-import-lists.ll
new file mode 100644
index 000000000000..ba7b437e8cad
--- /dev/null
+++ b/test/ThinLTO/X86/cache-import-lists.ll
@@ -0,0 +1,24 @@
+; RUN: opt -module-hash -module-summary %s -o %t.bc
+; RUN: opt -module-hash -module-summary %S/Inputs/cache-import-lists1.ll -o %t1.bc
+; RUN: opt -module-hash -module-summary %S/Inputs/cache-import-lists2.ll -o %t2.bc
+
+; Tests that the hash for t is sensitive to the set of imported functions
+; for each module, which in this case depends on the link order (the function
+; linkonce_odr will be imported from either t1 or t2, whichever comes first).
+
+; RUN: rm -rf %t.cache
+; RUN: llvm-lto2 run -cache-dir %t.cache -o %t.o %t.bc %t1.bc %t2.bc -r=%t.bc,main,plx -r=%t.bc,f1,lx -r=%t.bc,f2,lx -r=%t1.bc,f1,plx -r=%t1.bc,linkonce_odr,plx -r=%t2.bc,f2,plx -r=%t2.bc,linkonce_odr,lx
+; RUN: llvm-lto2 run -cache-dir %t.cache -o %t.o %t.bc %t2.bc %t1.bc -r=%t.bc,main,plx -r=%t.bc,f1,lx -r=%t.bc,f2,lx -r=%t2.bc,f2,plx -r=%t2.bc,linkonce_odr,plx -r=%t1.bc,f1,plx -r=%t1.bc,linkonce_odr,lx
+; RUN: ls %t.cache | count 6
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @main() {
+ call void @f1()
+ call void @f2()
+ ret void
+}
+
+declare void @f1()
+declare void @f2()
diff --git a/test/ThinLTO/X86/cache-typeid-resolutions.ll b/test/ThinLTO/X86/cache-typeid-resolutions.ll
new file mode 100644
index 000000000000..1609e90b0ae6
--- /dev/null
+++ b/test/ThinLTO/X86/cache-typeid-resolutions.ll
@@ -0,0 +1,47 @@
+; RUN: opt -module-hash -module-summary %s -o %t.bc
+; RUN: opt -module-hash -module-summary %S/Inputs/cache-typeid-resolutions-import.ll -o %t-import.bc
+
+; RUN: llvm-as -o %t1.bc %S/Inputs/cache-typeid-resolutions1.ll
+; RUN: llvm-as -o %t2.bc %S/Inputs/cache-typeid-resolutions2.ll
+; RUN: llvm-as -o %t3.bc %S/Inputs/cache-typeid-resolutions3.ll
+
+; Two resolutions for typeid1: Unsat, Single
+; where both t and t-import are sensitive to typeid1's resolution
+; so 4 distinct objects in total.
+; RUN: rm -rf %t.cache
+; RUN: llvm-lto2 run -o %t.o %t.bc %t-import.bc -cache-dir %t.cache -r=%t.bc,f1,plx -r=%t.bc,f2,plx -r=%t-import.bc,importf1,plx -r=%t-import.bc,f1,lx -r=%t-import.bc,importf2,plx -r=%t-import.bc,f2,lx
+; RUN: llvm-lto2 run -o %t.o %t.bc %t-import.bc %t1.bc -cache-dir %t.cache -r=%t.bc,f1,plx -r=%t.bc,f2,plx -r=%t-import.bc,importf1,plx -r=%t-import.bc,f1,lx -r=%t-import.bc,importf2,plx -r=%t-import.bc,f2,lx -r=%t1.bc,vt1,plx
+; RUN: ls %t.cache | count 4
+
+; Three resolutions for typeid2: Indir, SingleImpl, UniqueRetVal
+; where both t and t-import are sensitive to typeid2's resolution
+; so 6 distinct objects in total.
+; RUN: rm -rf %t.cache
+; RUN: llvm-lto2 run -o %t.o %t.bc %t-import.bc -cache-dir %t.cache -r=%t.bc,f1,plx -r=%t.bc,f2,plx -r=%t-import.bc,importf1,plx -r=%t-import.bc,f1,lx -r=%t-import.bc,importf2,plx -r=%t-import.bc,f2,lx
+; RUN: llvm-lto2 run -o %t.o %t.bc %t-import.bc %t2.bc -cache-dir %t.cache -r=%t.bc,f1,plx -r=%t.bc,f2,plx -r=%t2.bc,vt2,plx -r=%t-import.bc,importf1,plx -r=%t-import.bc,f1,lx -r=%t-import.bc,importf2,plx -r=%t-import.bc,f2,lx
+; RUN: llvm-lto2 run -o %t.o %t.bc %t-import.bc %t3.bc -cache-dir %t.cache -r=%t.bc,f1,plx -r=%t.bc,f2,plx -r=%t3.bc,vt2a,plx -r=%t3.bc,vt2b,plx -r=%t-import.bc,importf1,plx -r=%t-import.bc,f1,lx -r=%t-import.bc,importf2,plx -r=%t-import.bc,f2,lx
+; RUN: ls %t.cache | count 6
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i1 @f1(i8* %p) {
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"typeid1")
+ ret i1 %x
+}
+
+define i1 @f2(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [3 x i8*]**
+ %vtable = load [3 x i8*]*, [3 x i8*]** %vtableptr
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 0
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*)*
+ %result = call i1 %fptr_casted(i8* %obj)
+ ret i1 %result
+}
+
+declare i1 @llvm.type.test(i8*, metadata)
+declare void @llvm.assume(i1)
diff --git a/test/ThinLTO/X86/cache.ll b/test/ThinLTO/X86/cache.ll
index b796b00fc5d5..ea5c2f98d876 100644
--- a/test/ThinLTO/X86/cache.ll
+++ b/test/ThinLTO/X86/cache.ll
@@ -10,8 +10,8 @@
; RUN: ls %t.cache | count 1
; Verify that enabling caching is ignoring module without hash with llvm-lto2
-; RUN: rm -Rf %t.cache && mkdir %t.cache
-; RUN: llvm-lto2 -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
+; RUN: rm -Rf %t.cache
+; RUN: llvm-lto2 run -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
; RUN: -r=%t2.bc,_main,plx \
; RUN: -r=%t2.bc,_globalfunc,lx \
; RUN: -r=%t.bc,_globalfunc,plx
@@ -23,19 +23,25 @@
; RUN: opt -module-hash -module-summary %s -o %t.bc
; RUN: opt -module-hash -module-summary %p/Inputs/cache.ll -o %t2.bc
-; Verify that enabling caching is working
+; Verify that enabling caching is working, and that the pruner only removes
+; files matching the pattern "llvmcache-*".
; RUN: rm -Rf %t.cache && mkdir %t.cache
+; RUN: touch -t 197001011200 %t.cache/llvmcache-foo %t.cache/foo
; RUN: llvm-lto -thinlto-action=run -exported-symbol=globalfunc %t2.bc %t.bc -thinlto-cache-dir %t.cache
+; RUN: ls %t.cache | count 4
; RUN: ls %t.cache/llvmcache.timestamp
-; RUN: ls %t.cache | count 3
+; RUN: ls %t.cache/foo
+; RUN: not ls %t.cache/llvmcache-foo
+; RUN: ls %t.cache/llvmcache-* | count 2
; Verify that enabling caching is working with llvm-lto2
-; RUN: rm -Rf %t.cache && mkdir %t.cache
-; RUN: llvm-lto2 -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
+; RUN: rm -Rf %t.cache
+; RUN: llvm-lto2 run -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
; RUN: -r=%t2.bc,_main,plx \
; RUN: -r=%t2.bc,_globalfunc,lx \
; RUN: -r=%t.bc,_globalfunc,plx
; RUN: ls %t.cache | count 2
+; RUN: ls %t.cache/llvmcache-* | count 2
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
diff --git a/test/ThinLTO/X86/crash_debuginfo.ll b/test/ThinLTO/X86/crash_debuginfo.ll
index 31b55fb4f9e4..8638c24d0820 100644
--- a/test/ThinLTO/X86/crash_debuginfo.ll
+++ b/test/ThinLTO/X86/crash_debuginfo.ll
@@ -41,6 +41,5 @@ declare void @bar(i32)
!14 = !DILocalVariable(name: "caster", scope: !9, file: !1, line: 728, type: !15)
!15 = distinct !DICompositeType(tag: DW_TAG_union_type, scope: !9, file: !1, line: 728, size: 64, align: 64, elements: !6, identifier: "someclass")
!16 = distinct !DILocation(line: 87, column: 9, scope: !17)
-!17 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !10, line: 73, type: !11, isLocal: false, isDefinition: true, scopeLine: 74, flags: DIFlagPrototyped, isOptimized: true, unit: !0, declaration: !18, variables: !6)
+!17 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !10, file: !1, line: 73, type: !11, isLocal: false, isDefinition: true, scopeLine: 74, flags: DIFlagPrototyped, isOptimized: true, unit: !0, declaration: !18, variables: !6)
!18 = !DISubprogram(name: "foo", linkageName: "foo", scope: !10, file: !1, line: 83, type: !11, isLocal: false, isDefinition: false, scopeLine: 83, flags: DIFlagPrototyped, isOptimized: true)
-
diff --git a/test/ThinLTO/X86/deadstrip.ll b/test/ThinLTO/X86/deadstrip.ll
index 6f1cbfe59693..0c85322eb565 100644
--- a/test/ThinLTO/X86/deadstrip.ll
+++ b/test/ThinLTO/X86/deadstrip.ll
@@ -8,7 +8,7 @@
; RUN: llvm-lto -exported-symbol=_main -thinlto-action=run %t1.bc %t2.bc
; RUN: llvm-nm %t1.bc.thinlto.o | FileCheck %s --check-prefix=CHECK-NM
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.out -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.out -save-temps \
; RUN: -r %t1.bc,_main,plx \
; RUN: -r %t1.bc,_bar,pl \
; RUN: -r %t1.bc,_dead_func,pl \
@@ -51,7 +51,7 @@
; In that case there are uses of @dead_func in the regular LTO partition
; and it shouldn't be internalized.
; RUN: opt %p/Inputs/deadstrip.ll -o %t3.bc
-; RUN: llvm-lto2 %t1.bc %t3.bc -o %t4.out -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t3.bc -o %t4.out -save-temps \
; RUN: -r %t1.bc,_main,plx \
; RUN: -r %t1.bc,_bar,pl \
; RUN: -r %t1.bc,_dead_func,pl \
diff --git a/test/ThinLTO/X86/debuginfo-compositetype-import.ll b/test/ThinLTO/X86/debuginfo-compositetype-import.ll
index 0b3a7a45224e..ae2f5f26d226 100644
--- a/test/ThinLTO/X86/debuginfo-compositetype-import.ll
+++ b/test/ThinLTO/X86/debuginfo-compositetype-import.ll
@@ -7,7 +7,7 @@
; By default, composite types are imported as type declarations
; RUN: llvm-lto -thinlto-action=import %t2.bc -thinlto-index=%t.index.bc -o - | llvm-dis -o - | FileCheck %s
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.out -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.out -save-temps \
; RUN: -r %t2.bc,main,plx \
; RUN: -r %t2.bc,foo,l \
; RUN: -r %t1.bc,foo,pl
@@ -20,7 +20,7 @@
; Ensure that full type definitions of composite types are imported if requested
; RUN: llvm-lto -import-full-type-definitions -thinlto-action=import %t2.bc -thinlto-index=%t.index.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=FULL
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.out -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.out -save-temps \
; RUN: -import-full-type-definitions \
; RUN: -r %t2.bc,main,plx \
; RUN: -r %t2.bc,foo,l \
diff --git a/test/ThinLTO/X86/diagnostic-handler-remarks.ll b/test/ThinLTO/X86/diagnostic-handler-remarks.ll
index 7467a082c5a5..3880b6f11380 100644
--- a/test/ThinLTO/X86/diagnostic-handler-remarks.ll
+++ b/test/ThinLTO/X86/diagnostic-handler-remarks.ll
@@ -2,6 +2,7 @@
; RUN: opt -module-summary %p/Inputs/diagnostic-handler-remarks.ll -o %t2.bc
; Optimization records are collected regardless of the diagnostic handler
+; RUN: rm -f %t.yaml.thin.0.yaml %t.yaml.thin.1.yaml
; RUN: llvm-lto -thinlto-action=run \
; RUN: -lto-pass-remarks-output=%t.yaml \
; RUN: -exported-symbol _func2 \
diff --git a/test/ThinLTO/X86/dicompositetype-unique.ll b/test/ThinLTO/X86/dicompositetype-unique.ll
index 3550e6c6a74a..7a35f877e63d 100644
--- a/test/ThinLTO/X86/dicompositetype-unique.ll
+++ b/test/ThinLTO/X86/dicompositetype-unique.ll
@@ -1,7 +1,7 @@
; RUN: opt -module-summary -o %t1.bc %s
; RUN: opt -module-summary -o %t2.bc %S/Inputs/dicompositetype-unique.ll
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t --save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t --save-temps \
; RUN: -r %t1.bc,_foo,lx \
; RUN: -r %t1.bc,_main,plx \
; RUN: -r %t2.bc,_foo,plx
diff --git a/test/ThinLTO/X86/distributed_import.ll b/test/ThinLTO/X86/distributed_import.ll
index 0a3f9c07f257..82cc57c48303 100644
--- a/test/ThinLTO/X86/distributed_import.ll
+++ b/test/ThinLTO/X86/distributed_import.ll
@@ -1,15 +1,50 @@
-; RUN: opt -module-summary %s -o %t1.bc
-; RUN: opt -module-summary %p/Inputs/distributed_import.ll -o %t2.bc
+; Test distributed build thin link output from llvm-lto2
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; Generate bitcode files with summary, as well as minimized bitcode without
+; the debug metadata for the thin link.
+; RUN: opt -thinlto-bc %s -thin-link-bitcode-file=%t1.thinlink.bc -o %t1.bc
+; RUN: opt -thinlto-bc %p/Inputs/distributed_import.ll -thin-link-bitcode-file=%t2.thinlink.bc -o %t2.bc
+
+; First perform the thin link on the normal bitcode file.
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: -thinlto-distributed-indexes \
+; RUN: -r=%t1.bc,g, \
+; RUN: -r=%t1.bc,f,px \
+; RUN: -r=%t2.bc,g,px
+; RUN: opt -function-import -summary-file %t1.bc.thinlto.bc %t1.bc -o %t1.out
+; RUN: opt -function-import -summary-file %t2.bc.thinlto.bc %t2.bc -o %t2.out
+; RUN: llvm-dis -o - %t2.out | FileCheck %s
+
+; Save the generated index files.
+; RUN: cp %t1.bc.thinlto.bc %t1.bc.thinlto.bc.orig
+; RUN: cp %t2.bc.thinlto.bc %t2.bc.thinlto.bc.orig
+
+; Copy the minimized bitcode to the regular bitcode path so the module
+; paths in the index are the same (save the regular bitcode for use again
+; further down).
+; RUN: cp %t1.bc %t1.bc.sv
+; RUN: cp %t1.thinlink.bc %t1.bc
+; RUN: cp %t2.bc %t2.bc.sv
+; RUN: cp %t2.thinlink.bc %t2.bc
+
+; Next perform the thin link on the minimized bitcode files, and compare dumps
+; of the resulting indexes to the above dumps to ensure they are identical.
+; RUN: rm -f %t1.bc.thinlto.bc %t2.bc.thinlto.bc
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -thinlto-distributed-indexes \
; RUN: -r=%t1.bc,g, \
; RUN: -r=%t1.bc,f,px \
; RUN: -r=%t2.bc,g,px
-; RUN: opt -function-import -summary-file %t1.bc.thinlto.bc %t1.bc -o %t1.out
+; RUN: diff %t1.bc.thinlto.bc.orig %t1.bc.thinlto.bc
+; RUN: diff %t2.bc.thinlto.bc.orig %t2.bc.thinlto.bc
+
+; Make sure importing occurs as expected
+; RUN: cp %t1.bc.sv %t1.bc
+; RUN: cp %t2.bc.sv %t2.bc
; RUN: opt -function-import -summary-file %t2.bc.thinlto.bc %t2.bc -o %t2.out
; RUN: llvm-dis -o - %t2.out | FileCheck %s
-; CHECK: @G.llvm.0
+
+; CHECK: @G.llvm.
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -20,3 +55,8 @@ entry:
call i32 (...) @g()
ret void
}
+
+!llvm.dbg.cu = !{}
+
+!1 = !{i32 2, !"Debug Info Version", i32 3}
+!llvm.module.flags = !{!1}
diff --git a/test/ThinLTO/X86/emit_imports.ll b/test/ThinLTO/X86/emit_imports.ll
index 64ea02d857e6..fc025f416ae1 100644
--- a/test/ThinLTO/X86/emit_imports.ll
+++ b/test/ThinLTO/X86/emit_imports.ll
@@ -22,7 +22,7 @@
; RUN: rm -f %t1.thinlto.bc %t1.bc.imports
; RUN: rm -f %t2.thinlto.bc %t2.bc.imports
; RUN: rm -f %t3.bc.thinlto.bc %t3.bc.imports
-; RUN: llvm-lto2 %t1.bc %t2.bc %t3.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc %t3.bc -o %t.o -save-temps \
; RUN: -thinlto-distributed-indexes \
; RUN: -r=%t1.bc,g, \
; RUN: -r=%t1.bc,f,px \
diff --git a/test/ThinLTO/X86/empty_module_with_cache.ll b/test/ThinLTO/X86/empty_module_with_cache.ll
index 3e16c395a893..76fe3e91d202 100644
--- a/test/ThinLTO/X86/empty_module_with_cache.ll
+++ b/test/ThinLTO/X86/empty_module_with_cache.ll
@@ -8,8 +8,8 @@
; RUN: ls %t.cache | count 3
; Verify that enabling caching is working with llvm-lto2
-; RUN: rm -Rf %t.cache && mkdir %t.cache
-; RUN: llvm-lto2 -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
+; RUN: rm -Rf %t.cache
+; RUN: llvm-lto2 run -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
; RUN: -r=%t2.bc,_main,plx
; RUN: ls %t.cache | count 2
@@ -25,8 +25,8 @@
; RUN: ls %t.cache | count 1
; Verify that caching is disabled for module without hash, with llvm-lto2
-; RUN: rm -Rf %t.cache && mkdir %t.cache
-; RUN: llvm-lto2 -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
+; RUN: rm -Rf %t.cache
+; RUN: llvm-lto2 run -o %t.o %t2.bc %t.bc -cache-dir %t.cache \
; RUN: -r=%t2.bc,_main,plx
; RUN: ls %t.cache | count 0
diff --git a/test/ThinLTO/X86/error-newpm.ll b/test/ThinLTO/X86/error-newpm.ll
new file mode 100644
index 000000000000..9c2fd2c70d6d
--- /dev/null
+++ b/test/ThinLTO/X86/error-newpm.ll
@@ -0,0 +1,13 @@
+; RUN: opt -module-summary %s -o %t1.bc
+; RUN: not llvm-lto2 run %t1.bc -o %t.o \
+; RUN: -r=%t1.bc,_tinkywinky,pxl \
+; RUN: -lto-use-new-pm 2>&1 | FileCheck %s
+
+; CHECK: ThinLTO not supported with the new PM yet!
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.11.0"
+
+define void @tinkywinky() {
+ ret void
+}
diff --git a/test/ThinLTO/X86/funcimport2.ll b/test/ThinLTO/X86/funcimport2.ll
index c83370be9706..7338f9a9d98a 100644
--- a/test/ThinLTO/X86/funcimport2.ll
+++ b/test/ThinLTO/X86/funcimport2.ll
@@ -2,7 +2,7 @@
; RUN: opt -module-summary %s -o %t1.bc
; RUN: opt -module-summary %p/Inputs/funcimport2.ll -o %t2.bc
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r=%t1.bc,_foo,plx \
; RUN: -r=%t2.bc,_main,plx \
; RUN: -r=%t2.bc,_foo,l
@@ -11,7 +11,7 @@
; We shouldn't do any importing at -O0
; rm -f %t.o.1.3.import.bc
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -O0 \
; RUN: -r=%t1.bc,_foo,plx \
; RUN: -r=%t2.bc,_main,plx \
diff --git a/test/ThinLTO/X86/internalize.ll b/test/ThinLTO/X86/internalize.ll
index 14ff6791561d..867e3e5a00ab 100644
--- a/test/ThinLTO/X86/internalize.ll
+++ b/test/ThinLTO/X86/internalize.ll
@@ -3,7 +3,7 @@
; RUN: llvm-lto -thinlto-action=internalize -thinlto-index %t.index.bc %t1.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=REGULAR
; RUN: llvm-lto -thinlto-action=internalize -thinlto-index %t.index.bc %t1.bc -o - --exported-symbol=foo | llvm-dis -o - | FileCheck %s --check-prefix=INTERNALIZE
-; RUN: llvm-lto2 %t1.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc -o %t.o -save-temps \
; RUN: -r=%t1.bc,_foo,pxl \
; RUN: -r=%t1.bc,_bar,pl \
; RUN: -r=%t1.bc,_linkonce_func,pl
diff --git a/test/ThinLTO/X86/lazyload_metadata.ll b/test/ThinLTO/X86/lazyload_metadata.ll
index bddabcdf9e72..f5b6b96ebf02 100644
--- a/test/ThinLTO/X86/lazyload_metadata.ll
+++ b/test/ThinLTO/X86/lazyload_metadata.ll
@@ -10,13 +10,13 @@
; RUN: llvm-lto -thinlto-action=import %t2.bc -thinlto-index=%t3.bc \
; RUN: -o /dev/null -stats \
; RUN: 2>&1 | FileCheck %s -check-prefix=LAZY
-; LAZY: 49 bitcode-reader - Number of Metadata records loaded
+; LAZY: 51 bitcode-reader - Number of Metadata records loaded
; LAZY: 2 bitcode-reader - Number of MDStrings loaded
; RUN: llvm-lto -thinlto-action=import %t2.bc -thinlto-index=%t3.bc \
; RUN: -o /dev/null -disable-ondemand-mds-loading -stats \
; RUN: 2>&1 | FileCheck %s -check-prefix=NOTLAZY
-; NOTLAZY: 58 bitcode-reader - Number of Metadata records loaded
+; NOTLAZY: 60 bitcode-reader - Number of Metadata records loaded
; NOTLAZY: 7 bitcode-reader - Number of MDStrings loaded
@@ -55,4 +55,4 @@ declare i1 @llvm.type.test(i8* %ptr, metadata %bitset) nounwind readnone
!6 = !{!9}
!7 = !{!"7"}
!8 = !{!"8"}
-!9 = !{!6} \ No newline at end of file
+!9 = !{!6}
diff --git a/test/ThinLTO/X86/linkonce_aliasee_ref_import.ll b/test/ThinLTO/X86/linkonce_aliasee_ref_import.ll
index 9b8cc7f7228a..9086d9824b7b 100644
--- a/test/ThinLTO/X86/linkonce_aliasee_ref_import.ll
+++ b/test/ThinLTO/X86/linkonce_aliasee_ref_import.ll
@@ -7,7 +7,7 @@
; RUN: llvm-nm -o - < %t2.bc.thinlto.o | FileCheck %s --check-prefix=NM2
; Import with instr limit to ensure only foo imported.
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r=%t1.bc,foo,pxl \
; RUN: -r=%t1.bc,baz,pxl \
; RUN: -r=%t1.bc,baz.clone,pxl \
diff --git a/test/ThinLTO/X86/local_name_conflict.ll b/test/ThinLTO/X86/local_name_conflict.ll
index 9cbb32ecf211..ea2922ed9b91 100644
--- a/test/ThinLTO/X86/local_name_conflict.ll
+++ b/test/ThinLTO/X86/local_name_conflict.ll
@@ -1,14 +1,25 @@
+; Test handling when two files with the same source file name contain
+; static functions with the same name (which will have the same GUID
+; in the combined index.
+
; Do setup work for all below tests: generate bitcode and combined index
; RUN: opt -module-summary -module-hash %s -o %t.bc
; RUN: opt -module-summary -module-hash %p/Inputs/local_name_conflict1.ll -o %t2.bc
; RUN: opt -module-summary -module-hash %p/Inputs/local_name_conflict2.ll -o %t3.bc
; RUN: llvm-lto -thinlto-action=thinlink -o %t4.bc %t.bc %t2.bc %t3.bc
-; Make sure foo is promoted and renamed without complaint in both
-; Inputs/local_name_conflict1.ll and Inputs/local_name_conflict2.ll
-; FIXME: Once the importer is fixed to import the correct copy of the
-; local, we should be able to verify that via an import action.
-; RUN: llvm-lto -thinlto-action=promote %t2.bc -thinlto-index=%t4.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=EXPORTSTATIC
+; This module will import b() which should cause the copy of foo from
+; that module (%t3.bc) to be imported. Check that the imported reference's
+; promoted name matches the imported copy.
+; RUN: llvm-lto -thinlto-action=import %t.bc -thinlto-index=%t4.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=IMPORT
+; IMPORT: call i32 @foo.llvm.[[HASH:[0-9A-F]+]]
+; IMPORT: define available_externally hidden i32 @foo.llvm.[[HASH]]()
+
+; The copy in %t2.bc should not be exported/promoted/renamed
+; RUN: llvm-lto -thinlto-action=promote %t2.bc -thinlto-index=%t4.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=NOEXPORTSTATIC
+; NOEXPORTSTATIC: define internal i32 @foo()
+
+; Make sure foo is promoted and renamed without complaint in %t3.bc.
; RUN: llvm-lto -thinlto-action=promote %t3.bc -thinlto-index=%t4.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=EXPORTSTATIC
; EXPORTSTATIC: define hidden i32 @foo.llvm.
diff --git a/test/ThinLTO/X86/module_asm2.ll b/test/ThinLTO/X86/module_asm2.ll
index 02404062163d..b46f40196535 100644
--- a/test/ThinLTO/X86/module_asm2.ll
+++ b/test/ThinLTO/X86/module_asm2.ll
@@ -8,7 +8,7 @@
; RUN: llvm-nm %t1.bc.thinlto.o | FileCheck %s --check-prefix=NM0
; RUN: llvm-nm %t2.bc.thinlto.o | FileCheck %s --check-prefix=NM1
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r=%t1.bc,foo,plx \
; RUN: -r=%t1.bc,globalfunc,plx \
; RUN: -r=%t1.bc,globalfunc,plx \
diff --git a/test/ThinLTO/X86/module_asm_glob.ll b/test/ThinLTO/X86/module_asm_glob.ll
index bcc44c58c9f0..e27007524ce4 100644
--- a/test/ThinLTO/X86/module_asm_glob.ll
+++ b/test/ThinLTO/X86/module_asm_glob.ll
@@ -5,7 +5,7 @@
; RUN: llvm-nm %t1.bc.thinlto.o | FileCheck %s --check-prefix=NM0
; RUN: llvm-nm %t2.bc.thinlto.o | FileCheck %s --check-prefix=NM1
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r=%t1.bc,foo,lx \
; RUN: -r=%t1.bc,foo,plx \
; RUN: -r=%t1.bc,_simplefunction,pl \
diff --git a/test/ThinLTO/X86/reference_non_importable.ll b/test/ThinLTO/X86/reference_non_importable.ll
index a001666d28a2..5cf225e95de0 100644
--- a/test/ThinLTO/X86/reference_non_importable.ll
+++ b/test/ThinLTO/X86/reference_non_importable.ll
@@ -1,7 +1,7 @@
; RUN: opt -module-summary %s -o %t1.bc
; RUN: opt -module-summary %p/Inputs/reference_non_importable.ll -o %t2.bc
-; RUN: llvm-lto2 %t1.bc %t2.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc %t2.bc -o %t.o -save-temps \
; RUN: -r=%t1.bc,_foo,pxl \
; RUN: -r=%t1.bc,_b,pxl \
; RUN: -r=%t2.bc,_main,pxl \
diff --git a/test/ThinLTO/X86/tli-nobuiltin.ll b/test/ThinLTO/X86/tli-nobuiltin.ll
new file mode 100644
index 000000000000..9a480cba1156
--- /dev/null
+++ b/test/ThinLTO/X86/tli-nobuiltin.ll
@@ -0,0 +1,46 @@
+; Test -lto-freestanding option for libLTO.
+; RUN: llvm-as < %s > %t.bc
+
+; Regular run: expects fprintf to be turned into fwrite
+; RUN: llvm-lto %t.bc -exported-symbol=_foo -o %t.o
+; RUN: llvm-nm %t.o | FileCheck %s --check-prefix=LTO
+; LTO: fwrite
+
+; Freestanding run: expects fprintf to NOT be turned into fwrite
+; RUN: llvm-lto %t.bc -lto-freestanding -exported-symbol=_foo -o %t.o
+; RUN: llvm-nm %t.o | FileCheck %s --check-prefix=LTO-FREESTANDING
+; LTO-FREESTANDING: fprintf
+
+; Same with ThinLTO now.
+; RUN: opt -module-hash -module-summary %s -o %t.bc
+
+; Regular run: expects fprintf to be turned into fwrite
+; RUN: llvm-lto -exported-symbol=_foo -thinlto-action=run %t.bc
+; RUN: llvm-nm %t.bc.thinlto.o | FileCheck %s --check-prefix=ThinLTO
+; ThinLTO: fwrite
+
+; Freestanding run: expects fprintf to NOT be turned into fwrite
+; RUN: llvm-lto -lto-freestanding -exported-symbol=_foo -thinlto-action=run %t.bc
+; RUN: llvm-nm %t.bc.thinlto.o | FileCheck %s --check-prefix=ThinLTO-FREESTANDING
+; ThinLTO-FREESTANDING: fprintf
+
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.11.0"
+
+declare i32 @fprintf(%FILE*, i8*, ...)
+
+%FILE = type { }
+
+@hello_world = constant [13 x i8] c"hello world\0A\00"
+@percent_s = constant [3 x i8] c"%s\00"
+
+; Check fprintf(fp, "%s", str) -> fwrite(str, fp) only when builtins are enabled
+
+define void @foo(%FILE* %fp) {
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_s, i32 0, i32 0
+ %str = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
+ call i32 (%FILE*, i8*, ...) @fprintf(%FILE* %fp, i8* %fmt, i8* %str)
+ ret void
+}
+
diff --git a/test/ThinLTO/X86/weak_resolution.ll b/test/ThinLTO/X86/weak_resolution.ll
index 685c91cbc9f5..612cd6c206b5 100644
--- a/test/ThinLTO/X86/weak_resolution.ll
+++ b/test/ThinLTO/X86/weak_resolution.ll
@@ -53,7 +53,7 @@ entry:
}
; MOD1: define weak void @linkoncefunc()
; MOD1-INT: define weak void @linkoncefunc()
-; MOD2: define linkonce void @linkoncefunc()
+; MOD2: declare void @linkoncefunc()
define linkonce void @linkoncefunc() #0 {
entry:
ret void
@@ -65,7 +65,7 @@ entry:
ret void
}
; MOD1: define weak void @weakfunc()
-; MOD2: define weak void @weakfunc()
+; MOD2: declare void @weakfunc()
define weak void @weakfunc() #0 {
entry:
ret void
diff --git a/test/Transforms/ADCE/delete-profiling-calls-to-constant.ll b/test/Transforms/ADCE/delete-profiling-calls-to-constant.ll
index a61e8f8caccb..804b3dd67f2a 100644
--- a/test/Transforms/ADCE/delete-profiling-calls-to-constant.ll
+++ b/test/Transforms/ADCE/delete-profiling-calls-to-constant.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -adce | FileCheck %s
-; RUN: opt < %s -passes=adce | FileCheck %s
+; RUN: opt < %s -adce -S | FileCheck %s
+; RUN: opt < %s -passes=adce -S | FileCheck %s
; Verify that a call to instrument a constant is deleted.
@@ -7,7 +7,7 @@
@__profd_foo = private global { i64, i64, i64*, i8*, i8*, i32, [1 x i16] } { i64 6699318081062747564, i64 0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_foo, i32 0, i32 0), i8* bitcast (i32 ()* @foo to i8*), i8* null, i32 1, [1 x i16] [i16 1] }, section "__llvm_prf_data", align 8
define i32 @foo() {
-; CHECK-NOT: __llvm_profile_instrument_target
+; CHECK-NOT: call void @__llvm_profile_instrument_target
entry:
tail call void @__llvm_profile_instrument_target(i64 ptrtoint (i32 (i32)* @bar to i64), i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [1 x i16] }* @__profd_foo to i8*), i32 0)
%call = tail call i32 @bar(i32 21)
diff --git a/test/Transforms/AddDiscriminators/basic.ll b/test/Transforms/AddDiscriminators/basic.ll
index 801eda2b0665..a781c0d409bc 100644
--- a/test/Transforms/AddDiscriminators/basic.ll
+++ b/test/Transforms/AddDiscriminators/basic.ll
@@ -58,5 +58,5 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
; CHECK: ![[FOO:[0-9]+]] = distinct !DISubprogram(name: "foo"
; CHECK: ![[BLOCK:[0-9]+]] = distinct !DILexicalBlock(scope: ![[FOO]],{{.*}} line: 3)
; CHECK: ![[THEN]] = !DILocation(line: 3, scope: ![[BLOCKFILE:[0-9]+]])
-; CHECK: ![[BLOCKFILE]] = !DILexicalBlockFile(scope: ![[BLOCK]],{{.*}} discriminator: 1)
+; CHECK: ![[BLOCKFILE]] = !DILexicalBlockFile(scope: ![[BLOCK]],{{.*}} discriminator: 2)
; CHECK: ![[END]] = !DILocation(line: 4, scope: ![[FOO]])
diff --git a/test/Transforms/AddDiscriminators/call-nested.ll b/test/Transforms/AddDiscriminators/call-nested.ll
index 481d6f260047..4d5145abafe1 100644
--- a/test/Transforms/AddDiscriminators/call-nested.ll
+++ b/test/Transforms/AddDiscriminators/call-nested.ll
@@ -47,4 +47,4 @@ attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!14 = !DILocation(line: 4, column: 3, scope: !4)
; CHECK: ![[CALL2]] = !DILocation(line: 4, column: 10, scope: ![[CALL2BLOCK:[0-9]+]])
-; CHECK: ![[CALL2BLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 1)
+; CHECK: ![[CALL2BLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 2)
diff --git a/test/Transforms/AddDiscriminators/call.ll b/test/Transforms/AddDiscriminators/call.ll
index 847a6ad4dc3a..49aca5a488f5 100644
--- a/test/Transforms/AddDiscriminators/call.ll
+++ b/test/Transforms/AddDiscriminators/call.ll
@@ -5,7 +5,7 @@
; #1 void bar();
; #2
; #3 void foo() {
-; #4 bar();bar()/*discriminator 1*/;bar()/*discriminator 2*/;
+; #4 bar();bar()/*discriminator 2*/;bar()/*discriminator 4*/;
; #5 }
; Function Attrs: uwtable
@@ -14,8 +14,8 @@ define void @_Z3foov() #0 !dbg !4 {
; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%a = alloca [100 x i8], align 16
%b = bitcast [100 x i8]* %a to i8*
- call void @llvm.lifetime.start(i64 100, i8* %b), !dbg !11
- call void @llvm.lifetime.end(i64 100, i8* %b), !dbg !11
+ call void @llvm.lifetime.start.p0i8(i64 100, i8* %b), !dbg !11
+ call void @llvm.lifetime.end.p0i8(i64 100, i8* %b), !dbg !11
call void @_Z3barv(), !dbg !11
; CHECK: call void @_Z3barv(), !dbg ![[CALL1:[0-9]+]]
call void @_Z3barv(), !dbg !12
@@ -24,8 +24,8 @@ define void @_Z3foov() #0 !dbg !4 {
}
declare void @_Z3barv() #1
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind argmemonly
attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
@@ -49,6 +49,6 @@ attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!13 = !DILocation(line: 5, column: 1, scope: !4)
; CHECK: ![[CALL1]] = !DILocation(line: 4, column: 9, scope: ![[CALL1BLOCK:[0-9]+]])
-; CHECK: ![[CALL1BLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 1)
+; CHECK: ![[CALL1BLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 2)
; CHECK: ![[CALL2]] = !DILocation(line: 4, column: 15, scope: ![[CALL2BLOCK:[0-9]+]])
-; CHECK: ![[CALL2BLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 2)
+; CHECK: ![[CALL2BLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 4)
diff --git a/test/Transforms/AddDiscriminators/diamond.ll b/test/Transforms/AddDiscriminators/diamond.ll
index b3afe7285472..307e95f41e18 100644
--- a/test/Transforms/AddDiscriminators/diamond.ll
+++ b/test/Transforms/AddDiscriminators/diamond.ll
@@ -10,7 +10,7 @@
; #6 }
; bar(5): discriminator 0
-; bar(3): discriminator 1
+; bar(3): discriminator 2
; Function Attrs: uwtable
define void @_Z3fooi(i32 %i) #0 !dbg !4 {
@@ -69,4 +69,4 @@ attributes #2 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!20 = !DILocation(line: 6, column: 1, scope: !4)
; CHECK: ![[ELSE]] = !DILocation(line: 5, column: 18, scope: ![[ELSEBLOCK:[0-9]+]])
-; CHECK: ![[ELSEBLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 1)
+; CHECK: ![[ELSEBLOCK]] = !DILexicalBlockFile({{.*}} discriminator: 2)
diff --git a/test/Transforms/AddDiscriminators/first-only.ll b/test/Transforms/AddDiscriminators/first-only.ll
index 1bd8dae5d05c..dd2117a5b187 100644
--- a/test/Transforms/AddDiscriminators/first-only.ll
+++ b/test/Transforms/AddDiscriminators/first-only.ll
@@ -69,7 +69,7 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
!12 = !DILocation(line: 3, scope: !13)
!13 = distinct !DILexicalBlock(line: 3, column: 0, file: !1, scope: !11)
-; CHECK: !DILexicalBlockFile(scope: ![[BLOCK2:[0-9]+]],{{.*}} discriminator: 1)
+; CHECK: !DILexicalBlockFile(scope: ![[BLOCK2:[0-9]+]],{{.*}} discriminator: 2)
!14 = !DILocation(line: 4, scope: !13)
; CHECK: ![[BLOCK2]] = distinct !DILexicalBlock(scope: ![[BLOCK1]],{{.*}} line: 3)
diff --git a/test/Transforms/AddDiscriminators/inlined.ll b/test/Transforms/AddDiscriminators/inlined.ll
index 2e8ea97348d0..226e903ee212 100644
--- a/test/Transforms/AddDiscriminators/inlined.ll
+++ b/test/Transforms/AddDiscriminators/inlined.ll
@@ -62,8 +62,8 @@ attributes #3 = { nounwind readnone }
!12 = distinct !DISubprogram(name: "g", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, variables: !2)
!13 = distinct !DILocation(line: 1, column: 17, scope: !14)
; CHECK: ![[BF:.*]] = !DILexicalBlockFile(scope: ![[LB1:[0-9]+]],
-; CHECK-SAME: discriminator: 1)
-!14 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 1)
+; CHECK-SAME: discriminator: 2)
+!14 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 2)
; CHECK: ![[LB1]] = distinct !DILexicalBlock(scope: ![[LB2:[0-9]+]],
; CHECK-SAME: line: 1, column: 16)
!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 1, column: 16)
diff --git a/test/Transforms/AddDiscriminators/memcpy-discriminator.ll b/test/Transforms/AddDiscriminators/memcpy-discriminator.ll
new file mode 100644
index 000000000000..00642d29502e
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/memcpy-discriminator.ll
@@ -0,0 +1,104 @@
+; RUN: opt < %s -add-discriminators -sroa -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Test case obtained from the following C code:
+
+; struct A {
+; int field1;
+; short field2;
+; };
+;
+; struct B {
+; struct A field1;
+; int field2;
+; };
+;
+;
+; extern struct B g_b;
+; extern int bar(struct B b, int c);
+;
+; int foo(int cond) {
+; int result = cond ? bar(g_b, 33) : 42;
+; return result;
+; }
+
+; In this test, global variable g_b is passed by copy to function bar. That
+; copy is located on the stack (see alloca %g_b.coerce), and it is initialized
+; by a memcpy call.
+;
+; SROA would split alloca %g_b.coerce into two (smaller disjoint) slices:
+; slice [0,8) and slice [8, 12). Users of the original alloca are rewritten
+; as users of the new alloca slices.
+; In particular, the memcpy is rewritten by SROA as two load/store pairs.
+;
+; Later on, mem2reg successfully promotes the new alloca slices to registers,
+; and loads %3 and %5 are made redundant by the loads obtained from the memcpy
+; intrinsic expansion.
+;
+; If pass AddDiscriminators doesn't assign a discriminator to the intrinsic
+; memcpy call, then the loads obtained from the memcpy expansion would not have
+; a correct discriminator.
+;
+; This test checks that the two new loads inserted by SROA in %cond.true
+; correctly reference a debug location with a non-zero discriminator. This test
+; also checks that the same discriminator is used by all instructions from
+; basic block %cond.true.
+
+%struct.B = type { %struct.A, i32 }
+%struct.A = type { i32, i16 }
+
+@g_b = external global %struct.B, align 4
+
+define i32 @foo(i32 %cond) #0 !dbg !5 {
+entry:
+ %g_b.coerce = alloca { i64, i32 }, align 4
+ %tobool = icmp ne i32 %cond, 0, !dbg !7
+ br i1 %tobool, label %cond.true, label %cond.end, !dbg !7
+
+cond.true:
+; CHECK-LABEL: cond.true:
+; CHECK: load i64, {{.*}}, !dbg ![[LOC:[0-9]+]]
+; CHECK-NEXT: load i32, {{.*}}, !dbg ![[LOC]]
+; CHECK-NEXT: %call = call i32 @bar({{.*}}), !dbg ![[LOC]]
+; CHECK-NEXT: br label %cond.end, !dbg ![[BR_LOC:[0-9]+]]
+
+; CHECK-DAG: ![[LOC]] = !DILocation(line: 16, column: 23, scope: ![[SCOPE:[0-9]+]])
+; CHECK-DAG: ![[SCOPE]] = !DILexicalBlockFile({{.*}}, discriminator: 2)
+; CHECK-DAG: ![[BR_LOC]] = !DILocation(line: 16, column: 16, scope: ![[SCOPE]])
+
+ %0 = bitcast { i64, i32 }* %g_b.coerce to i8*, !dbg !8
+ %1 = bitcast %struct.B* @g_b to i8*, !dbg !8
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 4, i1 false), !dbg !8
+ %2 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %g_b.coerce, i32 0, i32 0, !dbg !8
+ %3 = load i64, i64* %2, align 4, !dbg !8
+ %4 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %g_b.coerce, i32 0, i32 1, !dbg !8
+ %5 = load i32, i32* %4, align 4, !dbg !8
+ %call = call i32 @bar(i64 %3, i32 %5, i32 33), !dbg !8
+ br label %cond.end, !dbg !7
+
+cond.end: ; preds = %entry, %cond.true
+ %cond1 = phi i32 [ %call, %cond.true ], [ 42, %entry ], !dbg !7
+ ret i32 %cond1, !dbg !9
+}
+
+declare i32 @bar(i64, i32, i32)
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1
+
+attributes #0 = { noinline nounwind uwtable }
+attributes #1 = { argmemonly nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "test.c", directory: ".")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 15, type: !6, isLocal: false, isDefinition: true, scopeLine: 15, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!6 = !DISubroutineType(types: !2)
+!7 = !DILocation(line: 16, column: 16, scope: !5)
+!8 = !DILocation(line: 16, column: 23, scope: !5)
+!9 = !DILocation(line: 17, column: 3, scope: !5)
diff --git a/test/Transforms/AddDiscriminators/multiple.ll b/test/Transforms/AddDiscriminators/multiple.ll
index 387689caddff..b4c353cf00f1 100644
--- a/test/Transforms/AddDiscriminators/multiple.ll
+++ b/test/Transforms/AddDiscriminators/multiple.ll
@@ -67,6 +67,6 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
!12 = !DILocation(line: 4, scope: !4)
; CHECK: ![[THEN]] = !DILocation(line: 3, scope: ![[THENBLOCK:[0-9]+]])
-; CHECK: ![[THENBLOCK]] = !DILexicalBlockFile(scope: ![[SCOPE:[0-9]+]],{{.*}} discriminator: 1)
+; CHECK: ![[THENBLOCK]] = !DILexicalBlockFile(scope: ![[SCOPE:[0-9]+]],{{.*}} discriminator: 2)
; CHECK: ![[ELSE]] = !DILocation(line: 3, scope: ![[ELSEBLOCK:[0-9]+]])
-; CHECK: ![[ELSEBLOCK]] = !DILexicalBlockFile(scope: ![[SCOPE]],{{.*}} discriminator: 2)
+; CHECK: ![[ELSEBLOCK]] = !DILexicalBlockFile(scope: ![[SCOPE]],{{.*}} discriminator: 4)
diff --git a/test/Transforms/AddDiscriminators/oneline.ll b/test/Transforms/AddDiscriminators/oneline.ll
index aa52ae42ee47..724574a24ddf 100644
--- a/test/Transforms/AddDiscriminators/oneline.ll
+++ b/test/Transforms/AddDiscriminators/oneline.ll
@@ -7,9 +7,9 @@
; #3 }
; i == 3: discriminator 0
-; i == 5: discriminator 1
-; return 100: discriminator 2
-; return 99: discriminator 3
+; i == 5: discriminator 2
+; return 100: discriminator 4
+; return 99: discriminator 6
define i32 @_Z3fooi(i32 %i) #0 !dbg !4 {
%1 = alloca i32, align 4
@@ -91,11 +91,11 @@ attributes #1 = { nounwind readnone }
; CHECK: ![[F:.*]] = distinct !DISubprogram(name: "foo",
; CHECK: ![[IF:.*]] = distinct !DILexicalBlock(scope: ![[F]],{{.*}}line: 2, column: 7)
; CHECK: ![[THEN1]] = !DILocation(line: 2, column: 17, scope: ![[THENBLOCK:[0-9]+]])
-; CHECK: ![[THENBLOCK]] = !DILexicalBlockFile(scope: ![[IF]],{{.*}} discriminator: 1)
+; CHECK: ![[THENBLOCK]] = !DILexicalBlockFile(scope: ![[IF]],{{.*}} discriminator: 2)
; CHECK: ![[THEN2]] = !DILocation(line: 2, column: 19, scope: ![[THENBLOCK]])
; CHECK: ![[THEN3]] = !DILocation(line: 2, column: 7, scope: ![[BRBLOCK:[0-9]+]])
-; CHECK: ![[BRBLOCK]] = !DILexicalBlockFile(scope: ![[F]],{{.*}} discriminator: 1)
+; CHECK: ![[BRBLOCK]] = !DILexicalBlockFile(scope: ![[F]],{{.*}} discriminator: 2)
; CHECK: ![[ELSE]] = !DILocation(line: 2, column: 25, scope: ![[ELSEBLOCK:[0-9]+]])
-; CHECK: ![[ELSEBLOCK]] = !DILexicalBlockFile(scope: ![[IF]],{{.*}} discriminator: 2)
+; CHECK: ![[ELSEBLOCK]] = !DILexicalBlockFile(scope: ![[IF]],{{.*}} discriminator: 4)
; CHECK: ![[COMBINE]] = !DILocation(line: 2, column: 42, scope: ![[COMBINEBLOCK:[0-9]+]])
-; CHECK: ![[COMBINEBLOCK]] = !DILexicalBlockFile(scope: ![[IF]],{{.*}} discriminator: 3)
+; CHECK: ![[COMBINEBLOCK]] = !DILexicalBlockFile(scope: ![[IF]],{{.*}} discriminator: 6)
diff --git a/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll b/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
index 267a6c045974..fac84d092df3 100644
--- a/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
+++ b/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
@@ -1,25 +1,30 @@
-; RUN: opt < %s -argpromotion -S > %t
-; RUN: cat %t | grep "define.*@callee(.*i32\*"
+; RUN: opt < %s -argpromotion -S | FileCheck %s
; PR2498
; This test tries to convince argpromotion about promoting the load from %A + 2,
; because there is a load of %A in the entry block
define internal i32 @callee(i1 %C, i32* %A) {
+; CHECK-LABEL: define internal i32 @callee(
+; CHECK: i1 %C, i32* %A)
entry:
- ; Unconditonally load the element at %A
- %A.0 = load i32, i32* %A
- br i1 %C, label %T, label %F
+ ; Unconditonally load the element at %A
+ %A.0 = load i32, i32* %A
+ br i1 %C, label %T, label %F
+
T:
- ret i32 %A.0
+ ret i32 %A.0
+
F:
- ; Load the element at offset two from %A. This should not be promoted!
- %A.2 = getelementptr i32, i32* %A, i32 2
- %R = load i32, i32* %A.2
- ret i32 %R
+ ; Load the element at offset two from %A. This should not be promoted!
+ %A.2 = getelementptr i32, i32* %A, i32 2
+ %R = load i32, i32* %A.2
+ ret i32 %R
}
define i32 @foo() {
+; CHECK-LABEL: define i32 @foo
%X = call i32 @callee(i1 false, i32* null) ; <i32> [#uses=1]
+; CHECK: call i32 @callee(i1 false, i32* null)
ret i32 %X
}
diff --git a/test/Transforms/ArgumentPromotion/aggregate-promote.ll b/test/Transforms/ArgumentPromotion/aggregate-promote.ll
index 3f521bace7f3..b0bab7784edb 100644
--- a/test/Transforms/ArgumentPromotion/aggregate-promote.ll
+++ b/test/Transforms/ArgumentPromotion/aggregate-promote.ll
@@ -1,24 +1,31 @@
-; RUN: opt < %s -argpromotion -instcombine -S | not grep load
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
-%QuadTy = type { i32, i32, i32, i32 }
-@G = constant %QuadTy {
- i32 0,
- i32 0,
- i32 17,
- i32 25 } ; <%QuadTy*> [#uses=1]
+%T = type { i32, i32, i32, i32 }
+@G = constant %T { i32 0, i32 0, i32 17, i32 25 }
-define internal i32 @test(%QuadTy* %P) {
- %A = getelementptr %QuadTy, %QuadTy* %P, i64 0, i32 3 ; <i32*> [#uses=1]
- %B = getelementptr %QuadTy, %QuadTy* %P, i64 0, i32 2 ; <i32*> [#uses=1]
- %a = load i32, i32* %A ; <i32> [#uses=1]
- %b = load i32, i32* %B ; <i32> [#uses=1]
- %V = add i32 %a, %b ; <i32> [#uses=1]
- ret i32 %V
+define internal i32 @test(%T* %p) {
+; CHECK-LABEL: define internal i32 @test(
+; CHECK: i32 %{{.*}}, i32 %{{.*}})
+entry:
+ %a.gep = getelementptr %T, %T* %p, i64 0, i32 3
+ %b.gep = getelementptr %T, %T* %p, i64 0, i32 2
+ %a = load i32, i32* %a.gep
+ %b = load i32, i32* %b.gep
+; CHECK-NOT: load
+ %v = add i32 %a, %b
+ ret i32 %v
+; CHECK: ret i32
}
define i32 @caller() {
- %V = call i32 @test( %QuadTy* @G ) ; <i32> [#uses=1]
- ret i32 %V
+; CHECK-LABEL: define i32 @caller(
+entry:
+ %v = call i32 @test(%T* @G)
+; CHECK: %[[B_GEP:.*]] = getelementptr %T, %T* @G, i64 0, i32 2
+; CHECK: %[[B:.*]] = load i32, i32* %[[B_GEP]]
+; CHECK: %[[A_GEP:.*]] = getelementptr %T, %T* @G, i64 0, i32 3
+; CHECK: %[[A:.*]] = load i32, i32* %[[A_GEP]]
+; CHECK: call i32 @test(i32 %[[B]], i32 %[[A]])
+ ret i32 %v
}
-
diff --git a/test/Transforms/ArgumentPromotion/attrs.ll b/test/Transforms/ArgumentPromotion/attrs.ll
index 46128f93c240..29cef50fe802 100644
--- a/test/Transforms/ArgumentPromotion/attrs.ll
+++ b/test/Transforms/ArgumentPromotion/attrs.ll
@@ -1,25 +1,52 @@
-; RUN: opt < %s -argpromotion -S | grep zeroext
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
- %struct.ss = type { i32, i64 }
+%struct.ss = type { i32, i64 }
-define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind {
+; Don't drop 'byval' on %X here.
+define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind {
+; CHECK-LABEL: define internal void @f(
+; CHECK: i32 %[[B0:.*]], i64 %[[B1:.*]], i32* byval %X, i32 %i)
entry:
- %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0
- %tmp1 = load i32, i32* %tmp, align 4
- %tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %tmp, align 4
+; CHECK: %[[B:.*]] = alloca %struct.ss
+; CHECK: %[[B_GEP0:.*]] = getelementptr %struct.ss, %struct.ss* %[[B]], i32 0, i32 0
+; CHECK: store i32 %[[B0]], i32* %[[B_GEP0]]
+; CHECK: %[[B_GEP1:.*]] = getelementptr %struct.ss, %struct.ss* %[[B]], i32 0, i32 1
+; CHECK: store i64 %[[B1]], i64* %[[B_GEP1]]
- store i32 0, i32* %X
- ret void
+ %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0
+; CHECK: %[[TMP:.*]] = getelementptr %struct.ss, %struct.ss* %[[B]], i32 0, i32 0
+ %tmp1 = load i32, i32* %tmp, align 4
+; CHECK: %[[TMP1:.*]] = load i32, i32* %[[TMP]]
+ %tmp2 = add i32 %tmp1, 1
+; CHECK: %[[TMP2:.*]] = add i32 %[[TMP1]], 1
+ store i32 %tmp2, i32* %tmp, align 4
+; CHECK: store i32 %[[TMP2]], i32* %[[TMP]]
+
+ store i32 0, i32* %X
+; CHECK: store i32 0, i32* %X
+ ret void
}
+; Also make sure we don't drop the call zeroext attribute.
define i32 @test(i32* %X) {
+; CHECK-LABEL: define i32 @test(
entry:
- %S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
- store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S, i32* byval %X, i32 zeroext 0)
- ret i32 0
+ %S = alloca %struct.ss
+; CHECK: %[[S:.*]] = alloca %struct.ss
+ %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0
+ store i32 1, i32* %tmp1, align 8
+; CHECK: store i32 1
+ %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
+ store i64 2, i64* %tmp4, align 4
+; CHECK: store i64 2
+
+ call void @f( %struct.ss* byval %S, i32* byval %X, i32 zeroext 0)
+; CHECK: %[[S_GEP0:.*]] = getelementptr %struct.ss, %struct.ss* %[[S]], i32 0, i32 0
+; CHECK: %[[S0:.*]] = load i32, i32* %[[S_GEP0]]
+; CHECK: %[[S_GEP1:.*]] = getelementptr %struct.ss, %struct.ss* %[[S]], i32 0, i32 1
+; CHECK: %[[S1:.*]] = load i64, i64* %[[S_GEP1]]
+; CHECK: call void @f(i32 %[[S0]], i64 %[[S1]], i32* byval %X, i32 zeroext 0)
+
+ ret i32 0
}
diff --git a/test/Transforms/ArgumentPromotion/byval-2.ll b/test/Transforms/ArgumentPromotion/byval-2.ll
index 6c0288f5f989..3e1fee8badd9 100644
--- a/test/Transforms/ArgumentPromotion/byval-2.ll
+++ b/test/Transforms/ArgumentPromotion/byval-2.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
; Arg promotion eliminates the struct argument.
; FIXME: Should it eliminate the i32* argument?
diff --git a/test/Transforms/ArgumentPromotion/byval.ll b/test/Transforms/ArgumentPromotion/byval.ll
index b091b09a3597..58475fc89607 100644
--- a/test/Transforms/ArgumentPromotion/byval.ll
+++ b/test/Transforms/ArgumentPromotion/byval.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
diff --git a/test/Transforms/ArgumentPromotion/callgraph-update.ll b/test/Transforms/ArgumentPromotion/callgraph-update.ll
deleted file mode 100644
index 989043d7ea58..000000000000
--- a/test/Transforms/ArgumentPromotion/callgraph-update.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: opt < %s -argpromotion -simplifycfg -constmerge | llvm-dis
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin10.0"
-
-%struct.VEC2 = type { double, double, double }
-%struct.VERTEX = type { %struct.VEC2, %struct.VERTEX*, %struct.VERTEX* }
-%struct.edge_rec = type { %struct.VERTEX*, %struct.edge_rec*, i32, i8* }
-
-declare %struct.edge_rec* @alloc_edge() nounwind ssp
-
-define i64 @build_delaunay(%struct.VERTEX* %tree, %struct.VERTEX* %extra) nounwind ssp {
-entry:
- br i1 undef, label %bb11, label %bb12
-
-bb11: ; preds = %bb10
- %a = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=0]
- ret i64 123
-
-bb12: ; preds = %bb10
- %b = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=1]
- %c = ptrtoint %struct.edge_rec* %b to i64
- ret i64 %c
-}
diff --git a/test/Transforms/ArgumentPromotion/chained.ll b/test/Transforms/ArgumentPromotion/chained.ll
index 6ba2e8d48694..028c6c426e52 100644
--- a/test/Transforms/ArgumentPromotion/chained.ll
+++ b/test/Transforms/ArgumentPromotion/chained.ll
@@ -1,17 +1,27 @@
-; RUN: opt < %s -argpromotion -instcombine -S | not grep load
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
-@G1 = constant i32 0 ; <i32*> [#uses=1]
-@G2 = constant i32* @G1 ; <i32**> [#uses=1]
+@G1 = constant i32 0
+@G2 = constant i32* @G1
-define internal i32 @test(i32** %X) {
- %Y = load i32*, i32** %X ; <i32*> [#uses=1]
- %X.upgrd.1 = load i32, i32* %Y ; <i32> [#uses=1]
- ret i32 %X.upgrd.1
+define internal i32 @test(i32** %x) {
+; CHECK-LABEL: define internal i32 @test(
+; CHECK: i32 %{{.*}})
+entry:
+ %y = load i32*, i32** %x
+ %z = load i32, i32* %y
+; CHECK-NOT: load
+ ret i32 %z
+; CHECK: ret i32
}
-define i32 @caller(i32** %P) {
- %X = call i32 @test( i32** @G2 ) ; <i32> [#uses=1]
- ret i32 %X
+define i32 @caller() {
+; CHECK-LABEL: define i32 @caller()
+entry:
+ %x = call i32 @test(i32** @G2)
+; CHECK: %[[Y:.*]] = load i32*, i32** @G2
+; CHECK: %[[Z:.*]] = load i32, i32* %[[Y]]
+; CHECK: call i32 @test(i32 %[[Z]])
+ ret i32 %x
}
diff --git a/test/Transforms/ArgumentPromotion/control-flow.ll b/test/Transforms/ArgumentPromotion/control-flow.ll
index cdff36eb83c0..c3fe0c00e877 100644
--- a/test/Transforms/ArgumentPromotion/control-flow.ll
+++ b/test/Transforms/ArgumentPromotion/control-flow.ll
@@ -1,19 +1,27 @@
-; RUN: opt < %s -argpromotion -S | \
-; RUN: not grep "load i32* null"
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
+; Don't promote around control flow.
define internal i32 @callee(i1 %C, i32* %P) {
- br i1 %C, label %T, label %F
+; CHECK-LABEL: define internal i32 @callee(
+; CHECK: i1 %C, i32* %P)
+entry:
+ br i1 %C, label %T, label %F
-T: ; preds = %0
- ret i32 17
+T:
+ ret i32 17
-F: ; preds = %0
- %X = load i32, i32* %P ; <i32> [#uses=1]
- ret i32 %X
+F:
+ %X = load i32, i32* %P
+ ret i32 %X
}
define i32 @foo() {
- %X = call i32 @callee( i1 true, i32* null ) ; <i32> [#uses=1]
- ret i32 %X
+; CHECK-LABEL: define i32 @foo(
+entry:
+; CHECK-NOT: load i32, i32* null
+ %X = call i32 @callee(i1 true, i32* null)
+; CHECK: call i32 @callee(i1 true, i32* null)
+ ret i32 %X
}
diff --git a/test/Transforms/ArgumentPromotion/control-flow2.ll b/test/Transforms/ArgumentPromotion/control-flow2.ll
index 7413f46a860f..b75a32ddb331 100644
--- a/test/Transforms/ArgumentPromotion/control-flow2.ll
+++ b/test/Transforms/ArgumentPromotion/control-flow2.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
; CHECK: load i32, i32* %A
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
diff --git a/test/Transforms/ArgumentPromotion/crash.ll b/test/Transforms/ArgumentPromotion/crash.ll
index d3f412da14d9..d46a48101e78 100644
--- a/test/Transforms/ArgumentPromotion/crash.ll
+++ b/test/Transforms/ArgumentPromotion/crash.ll
@@ -1,61 +1,73 @@
-; RUN: opt -inline -argpromotion < %s
-; rdar://7879828
+; RUN: opt -S < %s -inline -argpromotion | FileCheck %s
+; RUN: opt -S < %s -passes=inline,argpromotion | FileCheck %s
-define void @foo() personality i32 (...)* @__gxx_personality_v0 {
- invoke void @foo2()
- to label %if.end432 unwind label %for.end520
+%S = type { %S* }
-if.end432:
+; Inlining should nuke the invoke (and any inlined calls) here even with
+; argument promotion running along with it.
+define void @zot() personality i32 (...)* @wibble {
+; CHECK-LABEL: define void @zot() personality i32 (...)* @wibble
+; CHECK-NOT: call
+; CHECK-NOT: invoke
+bb:
+ invoke void @hoge()
+ to label %bb1 unwind label %bb2
+
+bb1:
unreachable
-for.end520:
- %exn = landingpad {i8*, i32}
- cleanup
+bb2:
+ %tmp = landingpad { i8*, i32 }
+ cleanup
unreachable
}
-define internal void @foo2() ssp {
- %call7 = call fastcc i8* @foo3(i1 (i8*)* @foo4)
- %call58 = call fastcc i8* @foo3(i1 (i8*)* @foo5)
+define internal void @hoge() {
+bb:
+ %tmp = call fastcc i8* @spam(i1 (i8*)* @eggs)
+ %tmp1 = call fastcc i8* @spam(i1 (i8*)* @barney)
unreachable
}
-define internal fastcc i8* @foo3(i1 (i8*)* %Pred) {
-entry:
+define internal fastcc i8* @spam(i1 (i8*)* %arg) {
+bb:
unreachable
}
-define internal i1 @foo4(i8* %O) nounwind {
-entry:
- %call = call zeroext i1 @foo5(i8* %O) ; <i1> [#uses=0]
+define internal i1 @eggs(i8* %arg) {
+bb:
+ %tmp = call zeroext i1 @barney(i8* %arg)
unreachable
}
-define internal i1 @foo5(i8* %O) nounwind {
-entry:
+define internal i1 @barney(i8* %arg) {
+bb:
ret i1 undef
}
+define i32 @test_inf_promote_caller(i32 %arg) {
+; CHECK-LABEL: define i32 @test_inf_promote_caller(
+bb:
+ %tmp = alloca %S
+ %tmp1 = alloca %S
+ %tmp2 = call i32 @test_inf_promote_callee(%S* %tmp, %S* %tmp1)
+; CHECK: call i32 @test_inf_promote_callee(%S* %{{.*}}, %S* %{{.*}})
-; PR8932 - infinite promotion.
-%0 = type { %0* }
-
-define i32 @test2(i32 %a) {
-init:
- %0 = alloca %0
- %1 = alloca %0
- %2 = call i32 @"clay_assign(Chain, Chain)"(%0* %0, %0* %1)
ret i32 0
}
-define internal i32 @"clay_assign(Chain, Chain)"(%0* %c, %0* %d) {
-init:
- %0 = getelementptr %0, %0* %d, i32 0, i32 0
- %1 = load %0*, %0** %0
- %2 = getelementptr %0, %0* %c, i32 0, i32 0
- %3 = load %0*, %0** %2
- %4 = call i32 @"clay_assign(Chain, Chain)"(%0* %3, %0* %1)
+define internal i32 @test_inf_promote_callee(%S* %arg, %S* %arg1) {
+; CHECK-LABEL: define internal i32 @test_inf_promote_callee(
+; CHECK: %S* %{{.*}}, %S* %{{.*}})
+bb:
+ %tmp = getelementptr %S, %S* %arg1, i32 0, i32 0
+ %tmp2 = load %S*, %S** %tmp
+ %tmp3 = getelementptr %S, %S* %arg, i32 0, i32 0
+ %tmp4 = load %S*, %S** %tmp3
+ %tmp5 = call i32 @test_inf_promote_callee(%S* %tmp4, %S* %tmp2)
+; CHECK: call i32 @test_inf_promote_callee(%S* %{{.*}}, %S* %{{.*}})
+
ret i32 0
}
-declare i32 @__gxx_personality_v0(...)
+declare i32 @wibble(...)
diff --git a/test/Transforms/ArgumentPromotion/dbg.ll b/test/Transforms/ArgumentPromotion/dbg.ll
index 3d353db105fd..61b7c1843e48 100644
--- a/test/Transforms/ArgumentPromotion/dbg.ll
+++ b/test/Transforms/ArgumentPromotion/dbg.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
declare void @sink(i32)
@@ -23,6 +24,6 @@ define void @caller(i32** %Y) {
!0 = !{i32 2, !"Debug Info Version", i32 3}
!1 = !DILocation(line: 8, scope: !2)
-!2 = distinct !DISubprogram(name: "test", line: 3, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !3, scopeLine: 3, scope: null)
+!2 = distinct !DISubprogram(name: "test", file: !5, line: 3, isLocal: true, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !3, scopeLine: 3, scope: null)
!3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.5.0 ", isOptimized: false, emissionKind: LineTablesOnly, file: !5)
!5 = !DIFile(filename: "test.c", directory: "")
diff --git a/test/Transforms/ArgumentPromotion/fp80.ll b/test/Transforms/ArgumentPromotion/fp80.ll
index 84ef603de82c..bd780fa21aeb 100644
--- a/test/Transforms/ArgumentPromotion/fp80.ll
+++ b/test/Transforms/ArgumentPromotion/fp80.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/ArgumentPromotion/inalloca.ll b/test/Transforms/ArgumentPromotion/inalloca.ll
index 5bf57c8ff465..7ea3b4e42777 100644
--- a/test/Transforms/ArgumentPromotion/inalloca.ll
+++ b/test/Transforms/ArgumentPromotion/inalloca.ll
@@ -1,4 +1,5 @@
; RUN: opt %s -argpromotion -sroa -S | FileCheck %s
+; RUN: opt %s -passes='argpromotion,function(sroa)' -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
diff --git a/test/Transforms/ArgumentPromotion/pr27568.ll b/test/Transforms/ArgumentPromotion/pr27568.ll
index 648317aee0da..1496780748da 100644
--- a/test/Transforms/ArgumentPromotion/pr27568.ll
+++ b/test/Transforms/ArgumentPromotion/pr27568.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -argpromotion < %s | FileCheck %s
+; RUN: opt -S -passes=argpromotion < %s | FileCheck %s
target triple = "x86_64-pc-windows-msvc"
define internal void @callee(i8*) {
diff --git a/test/Transforms/ArgumentPromotion/profile.ll b/test/Transforms/ArgumentPromotion/profile.ll
new file mode 100644
index 000000000000..f667f9ea2c2a
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/profile.ll
@@ -0,0 +1,23 @@
+; RUN: opt -argpromotion -mem2reg -S < %s | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; Checks if !prof metadata is corret in deadargelim.
+
+define void @caller() #0 {
+ %x = alloca i32
+ store i32 42, i32* %x
+ call void @promote_i32_ptr(i32* %x), !prof !0
+; CHECK: call void @promote_i32_ptr(i32 42), !prof ![[PROF:[0-9]]]
+ ret void
+}
+
+define internal void @promote_i32_ptr(i32* %xp) {
+ %x = load i32, i32* %xp
+ call void @use_i32(i32 %x)
+ ret void
+}
+
+declare void @use_i32(i32)
+
+; CHECK: ![[PROF]] = !{!"branch_weights", i32 30}
+!0 = !{!"branch_weights", i32 30}
diff --git a/test/Transforms/ArgumentPromotion/reserve-tbaa.ll b/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
index 3c8ed79eeb29..3a3aa44b2a98 100644
--- a/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
+++ b/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -argpromotion -S
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
; PR17906
; When we promote two arguments in a single function with different types,
diff --git a/test/Transforms/ArgumentPromotion/sret.ll b/test/Transforms/ArgumentPromotion/sret.ll
index 8e5521f48d10..55fc036f1775 100644
--- a/test/Transforms/ArgumentPromotion/sret.ll
+++ b/test/Transforms/ArgumentPromotion/sret.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc"
diff --git a/test/Transforms/ArgumentPromotion/tail.ll b/test/Transforms/ArgumentPromotion/tail.ll
index 2ea387cd2645..93de60afe915 100644
--- a/test/Transforms/ArgumentPromotion/tail.ll
+++ b/test/Transforms/ArgumentPromotion/tail.ll
@@ -1,4 +1,5 @@
; RUN: opt %s -argpromotion -S -o - | FileCheck %s
+; RUN: opt %s -passes=argpromotion -S -o - | FileCheck %s
; PR14710
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Transforms/ArgumentPromotion/variadic.ll b/test/Transforms/ArgumentPromotion/variadic.ll
index 0e03882d3b20..034f853883fd 100644
--- a/test/Transforms/ArgumentPromotion/variadic.ll
+++ b/test/Transforms/ArgumentPromotion/variadic.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
+; RUN: opt < %s -passes=argpromotion -S | FileCheck %s
; Unused arguments from variadic functions cannot be eliminated as that changes
; their classiciation according to the SysV amd64 ABI. Clang and other frontends
diff --git a/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index afab7a39b278..fc6aade8708a 100644
--- a/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -43,11 +43,11 @@ define i16 @test_exchange_i16(i16* %arg, i16 %val) {
; CHECK: %1 = bitcast i16* %arg to i8*
; CHECK: %2 = alloca i16, align 2
; CHECK: %3 = bitcast i16* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 2, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 2, i8* %3)
; CHECK: store i16 %old, i16* %2, align 2
; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange_2(i8* %1, i8* %3, i16 %new, i32 5, i32 0)
; CHECK: %5 = load i16, i16* %2, align 2
-; CHECK: call void @llvm.lifetime.end(i64 2, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 2, i8* %3)
; CHECK: %6 = insertvalue { i16, i1 } undef, i16 %5, 0
; CHECK: %7 = insertvalue { i16, i1 } %6, i1 %4, 1
; CHECK: %ret = extractvalue { i16, i1 } %7, 0
@@ -76,10 +76,10 @@ define i16 @test_add_i16(i16* %arg, i16 %val) {
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: call void @__atomic_load(i32 16, i8* %1, i8* %3, i32 5)
; CHECK: %4 = load i128, i128* %2, align 8
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: ret i128 %4
define i128 @test_load_i128(i128* %arg) {
%ret = load atomic i128, i128* %arg seq_cst, align 16
@@ -90,10 +90,10 @@ define i128 @test_load_i128(i128* %arg) {
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store i128 %val, i128* %2, align 8
; CHECK: call void @__atomic_store(i32 16, i8* %1, i8* %3, i32 5)
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: ret void
define void @test_store_i128(i128* %arg, i128 %val) {
store atomic i128 %val, i128* %arg seq_cst, align 16
@@ -104,15 +104,15 @@ define void @test_store_i128(i128* %arg, i128 %val) {
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store i128 %val, i128* %2, align 8
; CHECK: %4 = alloca i128, align 8
; CHECK: %5 = bitcast i128* %4 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %5)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %5)
; CHECK: call void @__atomic_exchange(i32 16, i8* %1, i8* %3, i8* %5, i32 5)
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: %6 = load i128, i128* %4, align 8
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %5)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %5)
; CHECK: ret i128 %6
define i128 @test_exchange_i128(i128* %arg, i128 %val) {
%ret = atomicrmw xchg i128* %arg, i128 %val seq_cst
@@ -123,16 +123,16 @@ define i128 @test_exchange_i128(i128* %arg, i128 %val) {
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store i128 %old, i128* %2, align 8
; CHECK: %4 = alloca i128, align 8
; CHECK: %5 = bitcast i128* %4 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %5)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %5)
; CHECK: store i128 %new, i128* %4, align 8
; CHECK: %6 = call zeroext i1 @__atomic_compare_exchange(i32 16, i8* %1, i8* %3, i8* %5, i32 5, i32 0)
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %5)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %5)
; CHECK: %7 = load i128, i128* %2, align 8
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: %8 = insertvalue { i128, i1 } undef, i128 %7, 0
; CHECK: %9 = insertvalue { i128, i1 } %8, i1 %6, 1
; CHECK: %ret = extractvalue { i128, i1 } %9, 0
@@ -157,15 +157,15 @@ define i128 @test_cmpxchg_i128(i128* %arg, i128 %old, i128 %new) {
; CHECK: %new = add i128 %loaded, %val
; CHECK: %4 = bitcast i128* %arg to i8*
; CHECK: %5 = bitcast i128* %1 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %5)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %5)
; CHECK: store i128 %loaded, i128* %1, align 8
; CHECK: %6 = bitcast i128* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %6)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %6)
; CHECK: store i128 %new, i128* %2, align 8
; CHECK: %7 = call zeroext i1 @__atomic_compare_exchange(i32 16, i8* %4, i8* %5, i8* %6, i32 5, i32 5)
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %6)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %6)
; CHECK: %8 = load i128, i128* %1, align 8
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %5)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %5)
; CHECK: %9 = insertvalue { i128, i1 } undef, i128 %8, 0
; CHECK: %10 = insertvalue { i128, i1 } %9, i1 %7, 1
; CHECK: %success = extractvalue { i128, i1 } %10, 1
@@ -204,12 +204,12 @@ define void @test_store_double(double* %arg, double %val) {
; CHECK: %1 = bitcast i16** %arg to i8*
; CHECK: %2 = alloca i16*, align 4
; CHECK: %3 = bitcast i16** %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 4, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 4, i8* %3)
; CHECK: store i16* %old, i16** %2, align 4
; CHECK: %4 = ptrtoint i16* %new to i32
; CHECK: %5 = call zeroext i1 @__atomic_compare_exchange_4(i8* %1, i8* %3, i32 %4, i32 5, i32 2)
; CHECK: %6 = load i16*, i16** %2, align 4
-; CHECK: call void @llvm.lifetime.end(i64 4, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 4, i8* %3)
; CHECK: %7 = insertvalue { i16*, i1 } undef, i16* %6, 0
; CHECK: %8 = insertvalue { i16*, i1 } %7, i1 %5, 1
; CHECK: %ret = extractvalue { i16*, i1 } %8, 0
@@ -227,10 +227,10 @@ define i16* @test_cmpxchg_ptr(i16** %arg, i16* %old, i16* %new) {
; CHECK: %1 = bitcast fp128* %arg to i8*
; CHECK: %2 = alloca fp128, align 8
; CHECK: %3 = bitcast fp128* %2 to i8*
-; CHECK: call void @llvm.lifetime.start(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store fp128 %val, fp128* %2, align 8
; CHECK: call void @__atomic_store(i32 16, i8* %1, i8* %3, i32 5)
-; CHECK: call void @llvm.lifetime.end(i64 16, i8* %3)
+; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: ret void
define void @test_store_fp128(fp128* %arg, fp128 %val) {
store atomic fp128 %val, fp128* %arg seq_cst, align 16
diff --git a/test/Transforms/BBVectorize/X86/loop1.ll b/test/Transforms/BBVectorize/X86/loop1.ll
index 551fbd73eb27..a533713609a7 100644
--- a/test/Transforms/BBVectorize/X86/loop1.ll
+++ b/test/Transforms/BBVectorize/X86/loop1.ll
@@ -1,7 +1,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -basicaa -loop-unroll -unroll-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -basicaa -loop-unroll -unroll-partial-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
; The second check covers the use of alias analysis (with loop unrolling).
define void @test1(double* noalias %out, double* noalias %in1, double* noalias %in2) nounwind uwtable {
diff --git a/test/Transforms/BBVectorize/X86/wr-aliases.ll b/test/Transforms/BBVectorize/X86/wr-aliases.ll
index a6ea27fc3ecb..e34414988f32 100644
--- a/test/Transforms/BBVectorize/X86/wr-aliases.ll
+++ b/test/Transforms/BBVectorize/X86/wr-aliases.ll
@@ -14,7 +14,7 @@ declare fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval nocaptur
declare void @llvm.lifetime.start(i64, i8* nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
define void @main_arrayctor.cont([10 x %class.QBezier.15]* %beziers, %class.QBezier.15* %agg.tmp.i, %class.QBezier.15* %agg.tmp55.i, %class.QBezier.15* %agg.tmp56.i) {
newFuncRoot:
@@ -134,9 +134,9 @@ arrayctor.cont: ; preds = %newFuncRoot
call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp55.i)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v2, i8* %v3, i64 64, i32 8, i1 false)
call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp56.i)
- call void @llvm.lifetime.end(i64 64, i8* %v0)
- call void @llvm.lifetime.end(i64 64, i8* %v1)
- call void @llvm.lifetime.end(i64 64, i8* %v2)
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %v0)
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %v1)
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %v2)
br label %arrayctor.cont.ret.exitStub
}
diff --git a/test/Transforms/BBVectorize/loop1.ll b/test/Transforms/BBVectorize/loop1.ll
index 7e7b603116fc..8ff5953cf46a 100644
--- a/test/Transforms/BBVectorize/loop1.ll
+++ b/test/Transforms/BBVectorize/loop1.ll
@@ -1,7 +1,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
-; RUN: opt < %s -dont-improve-non-negative-phi-bits=false -basicaa -loop-unroll -unroll-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
+; RUN: opt < %s -dont-improve-non-negative-phi-bits=false -basicaa -loop-unroll -unroll-threshold=45 -unroll-partial-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
; The second check covers the use of alias analysis (with loop unrolling).
define void @test1(double* noalias %out, double* noalias %in1, double* noalias %in2) nounwind uwtable {
diff --git a/test/Transforms/BDCE/basic.ll b/test/Transforms/BDCE/basic.ll
index 6e748c69a16e..6132c5d797fc 100644
--- a/test/Transforms/BDCE/basic.ll
+++ b/test/Transforms/BDCE/basic.ll
@@ -136,6 +136,44 @@ entry:
declare i32 @llvm.bswap.i32(i32) #0
; Function Attrs: nounwind readnone
+define signext i32 @tim(i32 signext %x) #0 {
+entry:
+ %call = tail call signext i32 @foo(i32 signext 5) #0
+ %and = and i32 %call, 536870912
+ %or = or i32 %and, %x
+ %call1 = tail call signext i32 @foo(i32 signext 3) #0
+ %and2 = and i32 %call1, 1073741824
+ %or3 = or i32 %or, %and2
+ %call4 = tail call signext i32 @foo(i32 signext 2) #0
+ %and5 = and i32 %call4, 16
+ %or6 = or i32 %or3, %and5
+ %call7 = tail call signext i32 @foo(i32 signext 1) #0
+ %and8 = and i32 %call7, 32
+ %or9 = or i32 %or6, %and8
+ %call10 = tail call signext i32 @foo(i32 signext 0) #0
+ %and11 = and i32 %call10, 64
+ %or12 = or i32 %or9, %and11
+ %call13 = tail call signext i32 @foo(i32 signext 4) #0
+ %and14 = and i32 %call13, 128
+ %or15 = or i32 %or12, %and14
+ %bs = tail call i32 @llvm.bitreverse.i32(i32 %or15) #0
+ %shr = ashr i32 %bs, 4
+ ret i32 %shr
+
+; CHECK-LABEL: @tim
+; CHECK-NOT: tail call signext i32 @foo(i32 signext 5)
+; CHECK-NOT: tail call signext i32 @foo(i32 signext 3)
+; CHECK: tail call signext i32 @foo(i32 signext 2)
+; CHECK: tail call signext i32 @foo(i32 signext 1)
+; CHECK: tail call signext i32 @foo(i32 signext 0)
+; CHECK: tail call signext i32 @foo(i32 signext 4)
+; CHECK: ret i32
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.bitreverse.i32(i32) #0
+
+; Function Attrs: nounwind readnone
define signext i32 @tar2(i32 signext %x) #0 {
entry:
%call = tail call signext i32 @foo(i32 signext 5) #0
diff --git a/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll b/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll
index 6cec253bbf9b..2bcb3a9d1e3d 100644
--- a/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll
+++ b/test/Transforms/CodeGenPrepare/AMDGPU/no-sink-addrspacecast.ll
@@ -5,7 +5,7 @@
; ASC-NOT: ptrtoint
; ASC-NOT: inttoptr
-define void @test_sink_ptrtoint_asc(float addrspace(1)* nocapture %arg, float addrspace(1)* nocapture readonly %arg1, float addrspace(3)* %arg2) #0 {
+define amdgpu_kernel void @test_sink_ptrtoint_asc(float addrspace(1)* nocapture %arg, float addrspace(1)* nocapture readonly %arg1, float addrspace(3)* %arg2) #0 {
bb:
%tmp = getelementptr inbounds float, float addrspace(3)* %arg2, i32 16
%tmp2 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
diff --git a/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll b/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll
new file mode 100644
index 000000000000..dfa81b54cc3d
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -codegenprepare < %s | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; No bypassing should be done in apparently unsuitable cases.
+define void @Test_no_bypassing(i32 %a, i64 %b, i64* %retptr) {
+; CHECK-LABEL: @Test_no_bypassing(
+; CHECK-NEXT: [[A_1:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT: [[A_2:%.*]] = sub i64 -1, [[A_1]]
+; CHECK-NEXT: [[RES:%.*]] = srem i64 [[A_2]], [[B:%.*]]
+; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %a.1 = zext i32 %a to i64
+ ; %a.2 is always negative so the division cannot be bypassed.
+ %a.2 = sub i64 -1, %a.1
+ %res = srem i64 %a.2, %b
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+; No OR instruction is needed if one of the operands (divisor) is known
+; to fit into 32 bits.
+define void @Test_check_one_operand(i64 %a, i32 %b, i64* %retptr) {
+; CHECK-LABEL: @Test_check_one_operand(
+; CHECK-NEXT: [[B_1:%.*]] = zext i32 [[B:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], -4294967296
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP8:%.*]]
+; CHECK: [[TMP4:%.*]] = trunc i64 [[B_1]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[A]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = udiv i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: br label [[TMP10:%.*]]
+; CHECK: [[TMP9:%.*]] = sdiv i64 [[A]], [[B_1]]
+; CHECK-NEXT: br label [[TMP10]]
+; CHECK: [[TMP11:%.*]] = phi i64 [ [[TMP7]], [[TMP3]] ], [ [[TMP9]], [[TMP8]] ]
+; CHECK-NEXT: store i64 [[TMP11]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %b.1 = zext i32 %b to i64
+ %res = sdiv i64 %a, %b.1
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+; If both operands are known to fit into 32 bits, then replace the division
+; in-place without CFG modification.
+define void @Test_check_none(i64 %a, i32 %b, i64* %retptr) {
+; CHECK-LABEL: @Test_check_none(
+; CHECK-NEXT: [[A_1:%.*]] = and i64 [[A:%.*]], 4294967295
+; CHECK-NEXT: [[B_1:%.*]] = zext i32 [[B:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_1]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[B_1]] to i32
+; CHECK-NEXT: [[TMP3:%.*]] = udiv i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
+; CHECK-NEXT: store i64 [[TMP4]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %a.1 = and i64 %a, 4294967295
+ %b.1 = zext i32 %b to i64
+ %res = udiv i64 %a.1, %b.1
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+; In case of unsigned long division with a short dividend,
+; the long division is not needed any more.
+define void @Test_special_case(i32 %a, i64 %b, i64* %retptr) {
+; CHECK-LABEL: @Test_special_case(
+; CHECK-NEXT: [[A_1:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i64 [[A_1]], [[B:%.*]]
+; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP9:%.*]]
+; CHECK: [[TMP3:%.*]] = trunc i64 [[B]] to i32
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[A_1]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = udiv i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = urem i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: br label [[TMP9]]
+; CHECK: [[TMP10:%.*]] = phi i64 [ [[TMP7]], [[TMP2]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = phi i64 [ [[TMP8]], [[TMP2]] ], [ [[A_1]], [[TMP0]] ]
+; CHECK-NEXT: [[RES:%.*]] = add i64 [[TMP10]], [[TMP11]]
+; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %a.1 = zext i32 %a to i64
+ %div = udiv i64 %a.1, %b
+ %rem = urem i64 %a.1, %b
+ %res = add i64 %div, %rem
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+
+; Do not bypass a division if one of the operands looks like a hash value.
+define void @Test_dont_bypass_xor(i64 %a, i64 %b, i64 %l, i64* %retptr) {
+; CHECK-LABEL: @Test_dont_bypass_xor(
+; CHECK-NEXT: [[C:%.*]] = xor i64 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = udiv i64 [[C]], [[L:%.*]]
+; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %c = xor i64 %a, %b
+ %res = udiv i64 %c, %l
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+define void @Test_dont_bypass_phi_xor(i64 %a, i64 %b, i64 %l, i64* %retptr) {
+; CHECK-LABEL: @Test_dont_bypass_phi_xor(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[MERGE:%.*]], label [[XORPATH:%.*]]
+; CHECK: xorpath:
+; CHECK-NEXT: [[C:%.*]] = xor i64 [[A:%.*]], [[B]]
+; CHECK-NEXT: br label [[MERGE]]
+; CHECK: merge:
+; CHECK-NEXT: [[E:%.*]] = phi i64 [ undef, [[ENTRY:%.*]] ], [ [[C]], [[XORPATH]] ]
+; CHECK-NEXT: [[RES:%.*]] = sdiv i64 [[E]], [[L:%.*]]
+; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+entry:
+ %cmp = icmp eq i64 %b, 0
+ br i1 %cmp, label %merge, label %xorpath
+
+xorpath:
+ %c = xor i64 %a, %b
+ br label %merge
+
+merge:
+ %e = phi i64 [ undef, %entry ], [ %c, %xorpath ]
+ %res = sdiv i64 %e, %l
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+define void @Test_dont_bypass_mul_long_const(i64 %a, i64 %l, i64* %retptr) {
+; CHECK-LABEL: @Test_dont_bypass_mul_long_const(
+; CHECK-NEXT: [[C:%.*]] = mul i64 [[A:%.*]], 5229553307
+; CHECK-NEXT: [[RES:%.*]] = urem i64 [[C]], [[L:%.*]]
+; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %c = mul i64 %a, 5229553307 ; the constant doesn't fit 32 bits
+ %res = urem i64 %c, %l
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+define void @Test_bypass_phi_mul_const(i64 %a, i64 %b, i64* %retptr) {
+; CHECK-LABEL: @Test_bypass_phi_mul_const(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A_MUL:%.*]] = mul nsw i64 [[A:%.*]], 34806414968801
+; CHECK-NEXT: [[P:%.*]] = icmp sgt i64 [[A]], [[B:%.*]]
+; CHECK-NEXT: br i1 [[P]], label [[BRANCH:%.*]], label [[MERGE:%.*]]
+; CHECK: branch:
+; CHECK-NEXT: br label [[MERGE]]
+; CHECK: merge:
+; CHECK-NEXT: [[LHS:%.*]] = phi i64 [ 42, [[BRANCH]] ], [ [[A_MUL]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = or i64 [[LHS]], [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], -4294967296
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP8:%.*]]
+; CHECK: [[TMP4:%.*]] = trunc i64 [[B]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[LHS]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = udiv i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; CHECK-NEXT: br label [[TMP10:%.*]]
+; CHECK: [[TMP9:%.*]] = sdiv i64 [[LHS]], [[B]]
+; CHECK-NEXT: br label [[TMP10]]
+; CHECK: [[TMP11:%.*]] = phi i64 [ [[TMP7]], [[TMP3]] ], [ [[TMP9]], [[TMP8]] ]
+; CHECK-NEXT: store i64 [[TMP11]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+entry:
+ %a.mul = mul nsw i64 %a, 34806414968801
+ %p = icmp sgt i64 %a, %b
+ br i1 %p, label %branch, label %merge
+
+branch:
+ br label %merge
+
+merge:
+ %lhs = phi i64 [ 42, %branch ], [ %a.mul, %entry ]
+ %res = sdiv i64 %lhs, %b
+ store i64 %res, i64* %retptr
+ ret void
+}
+
+define void @Test_bypass_mul_short_const(i64 %a, i64 %l, i64* %retptr) {
+; CHECK-LABEL: @Test_bypass_mul_short_const(
+; CHECK-NEXT: [[C:%.*]] = mul i64 [[A:%.*]], -42
+; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[C]], [[L:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4294967296
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP9:%.*]]
+; CHECK: [[TMP5:%.*]] = trunc i64 [[L]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[C]] to i32
+; CHECK-NEXT: [[TMP7:%.*]] = urem i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: br label [[TMP11:%.*]]
+; CHECK: [[TMP10:%.*]] = urem i64 [[C]], [[L]]
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: [[TMP12:%.*]] = phi i64 [ [[TMP8]], [[TMP4]] ], [ [[TMP10]], [[TMP9]] ]
+; CHECK-NEXT: store i64 [[TMP12]], i64* [[RETPTR:%.*]]
+; CHECK-NEXT: ret void
+;
+ %c = mul i64 %a, -42
+ %res = urem i64 %c, %l
+ store i64 %res, i64* %retptr
+ ret void
+}
diff --git a/test/Transforms/CodeGenPrepare/X86/computedgoto.ll b/test/Transforms/CodeGenPrepare/X86/computedgoto.ll
new file mode 100644
index 000000000000..00a4df9b2c59
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/X86/computedgoto.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -codegenprepare -S < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @use(i32) local_unnamed_addr
+declare void @useptr([2 x i8*]*) local_unnamed_addr
+
+; CHECK: @simple.targets = constant [2 x i8*] [i8* blockaddress(@simple, %bb0), i8* blockaddress(@simple, %bb1)], align 16
+@simple.targets = constant [2 x i8*] [i8* blockaddress(@simple, %bb0), i8* blockaddress(@simple, %bb1)], align 16
+
+; CHECK: @multi.targets = constant [2 x i8*] [i8* blockaddress(@multi, %bb0), i8* blockaddress(@multi, %bb1)], align 16
+@multi.targets = constant [2 x i8*] [i8* blockaddress(@multi, %bb0), i8* blockaddress(@multi, %bb1)], align 16
+
+; CHECK: @loop.targets = constant [2 x i8*] [i8* blockaddress(@loop, %bb0), i8* blockaddress(@loop, %bb1)], align 16
+@loop.targets = constant [2 x i8*] [i8* blockaddress(@loop, %bb0), i8* blockaddress(@loop, %bb1)], align 16
+
+; CHECK: @nophi.targets = constant [2 x i8*] [i8* blockaddress(@nophi, %bb0), i8* blockaddress(@nophi, %bb1)], align 16
+@nophi.targets = constant [2 x i8*] [i8* blockaddress(@nophi, %bb0), i8* blockaddress(@nophi, %bb1)], align 16
+
+; CHECK: @noncritical.targets = constant [2 x i8*] [i8* blockaddress(@noncritical, %bb0), i8* blockaddress(@noncritical, %bb1)], align 16
+@noncritical.targets = constant [2 x i8*] [i8* blockaddress(@noncritical, %bb0), i8* blockaddress(@noncritical, %bb1)], align 16
+
+; Check that we break the critical edge when an jump table has only one use.
+define void @simple(i32* nocapture readonly %p) {
+; CHECK-LABEL: @simple(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; CHECK-NEXT: [[INITVAL:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: [[INITOP:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: switch i32 [[INITOP]], label [[EXIT:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB0_CLONE:%.*]]
+; CHECK-NEXT: i32 1, label [[BB1_CLONE:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb0:
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[MERGE:%.*]] = phi i32* [ [[PTR:%.*]], [[BB0:%.*]] ], [ [[INCDEC_PTR]], [[BB0_CLONE]] ]
+; CHECK-NEXT: [[MERGE2:%.*]] = phi i32 [ 0, [[BB0]] ], [ [[INITVAL]], [[BB0_CLONE]] ]
+; CHECK-NEXT: tail call void @use(i32 [[MERGE2]])
+; CHECK-NEXT: br label [[INDIRECTGOTO:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[DOTSPLIT3:%.*]]
+; CHECK: .split3:
+; CHECK-NEXT: [[MERGE5:%.*]] = phi i32* [ [[PTR]], [[BB1:%.*]] ], [ [[INCDEC_PTR]], [[BB1_CLONE]] ]
+; CHECK-NEXT: [[MERGE7:%.*]] = phi i32 [ 1, [[BB1]] ], [ [[INITVAL]], [[BB1_CLONE]] ]
+; CHECK-NEXT: tail call void @use(i32 [[MERGE7]])
+; CHECK-NEXT: br label [[INDIRECTGOTO]]
+; CHECK: indirectgoto:
+; CHECK-NEXT: [[P_ADDR_SINK:%.*]] = phi i32* [ [[MERGE5]], [[DOTSPLIT3]] ], [ [[MERGE]], [[DOTSPLIT]] ]
+; CHECK-NEXT: [[PTR]] = getelementptr inbounds i32, i32* [[P_ADDR_SINK]], i64 1
+; CHECK-NEXT: [[NEWP:%.*]] = load i32, i32* [[P_ADDR_SINK]], align 4
+; CHECK-NEXT: [[IDX:%.*]] = sext i32 [[NEWP]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* @simple.targets, i64 0, i64 [[IDX]]
+; CHECK-NEXT: [[NEWOP:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
+; CHECK-NEXT: indirectbr i8* [[NEWOP]], [label [[BB0]], label %bb1]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: bb0.clone:
+; CHECK-NEXT: br label [[DOTSPLIT]]
+; CHECK: bb1.clone:
+; CHECK-NEXT: br label [[DOTSPLIT3]]
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %p, i64 1
+ %initval = load i32, i32* %p, align 4
+ %initop = load i32, i32* %incdec.ptr, align 4
+ switch i32 %initop, label %exit [
+ i32 0, label %bb0
+ i32 1, label %bb1
+ ]
+
+bb0:
+ %p.addr.0 = phi i32* [ %incdec.ptr, %entry ], [ %ptr, %indirectgoto ]
+ %opcode.0 = phi i32 [ %initval, %entry ], [ 0, %indirectgoto ]
+ tail call void @use(i32 %opcode.0)
+ br label %indirectgoto
+
+bb1:
+ %p.addr.1 = phi i32* [ %incdec.ptr, %entry ], [ %ptr, %indirectgoto ]
+ %opcode.1 = phi i32 [ %initval, %entry ], [ 1, %indirectgoto ]
+ tail call void @use(i32 %opcode.1)
+ br label %indirectgoto
+
+indirectgoto:
+ %p.addr.sink = phi i32* [ %p.addr.1, %bb1 ], [ %p.addr.0, %bb0 ]
+ %ptr = getelementptr inbounds i32, i32* %p.addr.sink, i64 1
+ %newp = load i32, i32* %p.addr.sink, align 4
+ %idx = sext i32 %newp to i64
+ %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @simple.targets, i64 0, i64 %idx
+ %newop = load i8*, i8** %arrayidx, align 8
+ indirectbr i8* %newop, [label %bb0, label %bb1]
+
+exit:
+ ret void
+}
+
+; Don't try to break critical edges when several indirectbr point to a single block
+define void @multi(i32* nocapture readonly %p) {
+; CHECK-LABEL: @multi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; CHECK-NEXT: [[INITVAL:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: [[INITOP:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: switch i32 [[INITOP]], label [[EXIT:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB0:%.*]]
+; CHECK-NEXT: i32 1, label [[BB1:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb0:
+; CHECK-NEXT: [[P_ADDR_0:%.*]] = phi i32* [ [[INCDEC_PTR]], [[ENTRY:%.*]] ], [ [[NEXT0:%.*]], [[BB0]] ], [ [[NEXT1:%.*]], [[BB1]] ]
+; CHECK-NEXT: [[OPCODE_0:%.*]] = phi i32 [ [[INITVAL]], [[ENTRY]] ], [ 0, [[BB0]] ], [ 1, [[BB1]] ]
+; CHECK-NEXT: tail call void @use(i32 [[OPCODE_0]])
+; CHECK-NEXT: [[NEXT0]] = getelementptr inbounds i32, i32* [[P_ADDR_0]], i64 1
+; CHECK-NEXT: [[NEWP0:%.*]] = load i32, i32* [[P_ADDR_0]], align 4
+; CHECK-NEXT: [[IDX0:%.*]] = sext i32 [[NEWP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* @multi.targets, i64 0, i64 [[IDX0]]
+; CHECK-NEXT: [[NEWOP0:%.*]] = load i8*, i8** [[ARRAYIDX0]], align 8
+; CHECK-NEXT: indirectbr i8* [[NEWOP0]], [label [[BB0]], label %bb1]
+; CHECK: bb1:
+; CHECK-NEXT: [[P_ADDR_1:%.*]] = phi i32* [ [[INCDEC_PTR]], [[ENTRY]] ], [ [[NEXT0]], [[BB0]] ], [ [[NEXT1]], [[BB1]] ]
+; CHECK-NEXT: [[OPCODE_1:%.*]] = phi i32 [ [[INITVAL]], [[ENTRY]] ], [ 0, [[BB0]] ], [ 1, [[BB1]] ]
+; CHECK-NEXT: tail call void @use(i32 [[OPCODE_1]])
+; CHECK-NEXT: [[NEXT1]] = getelementptr inbounds i32, i32* [[P_ADDR_1]], i64 1
+; CHECK-NEXT: [[NEWP1:%.*]] = load i32, i32* [[P_ADDR_1]], align 4
+; CHECK-NEXT: [[IDX1:%.*]] = sext i32 [[NEWP1]] to i64
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* @multi.targets, i64 0, i64 [[IDX1]]
+; CHECK-NEXT: [[NEWOP1:%.*]] = load i8*, i8** [[ARRAYIDX1]], align 8
+; CHECK-NEXT: indirectbr i8* [[NEWOP1]], [label [[BB0]], label %bb1]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %p, i64 1
+ %initval = load i32, i32* %p, align 4
+ %initop = load i32, i32* %incdec.ptr, align 4
+ switch i32 %initop, label %exit [
+ i32 0, label %bb0
+ i32 1, label %bb1
+ ]
+
+bb0:
+ %p.addr.0 = phi i32* [ %incdec.ptr, %entry ], [ %next0, %bb0 ], [ %next1, %bb1 ]
+ %opcode.0 = phi i32 [ %initval, %entry ], [ 0, %bb0 ], [ 1, %bb1 ]
+ tail call void @use(i32 %opcode.0)
+ %next0 = getelementptr inbounds i32, i32* %p.addr.0, i64 1
+ %newp0 = load i32, i32* %p.addr.0, align 4
+ %idx0 = sext i32 %newp0 to i64
+ %arrayidx0 = getelementptr inbounds [2 x i8*], [2 x i8*]* @multi.targets, i64 0, i64 %idx0
+ %newop0 = load i8*, i8** %arrayidx0, align 8
+ indirectbr i8* %newop0, [label %bb0, label %bb1]
+
+bb1:
+ %p.addr.1 = phi i32* [ %incdec.ptr, %entry ], [ %next0, %bb0 ], [ %next1, %bb1 ]
+ %opcode.1 = phi i32 [ %initval, %entry ], [ 0, %bb0 ], [ 1, %bb1 ]
+ tail call void @use(i32 %opcode.1)
+ %next1 = getelementptr inbounds i32, i32* %p.addr.1, i64 1
+ %newp1 = load i32, i32* %p.addr.1, align 4
+ %idx1 = sext i32 %newp1 to i64
+ %arrayidx1 = getelementptr inbounds [2 x i8*], [2 x i8*]* @multi.targets, i64 0, i64 %idx1
+ %newop1 = load i8*, i8** %arrayidx1, align 8
+ indirectbr i8* %newop1, [label %bb0, label %bb1]
+
+exit:
+ ret void
+}
+
+; Make sure we do the right thing for cases where the indirectbr branches to
+; the block it terminates.
+define void @loop(i64* nocapture readonly %p) {
+; CHECK-LABEL: @loop(
+; CHECK-NEXT: bb0.clone:
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: bb0:
+; CHECK-NEXT: br label [[DOTSPLIT]]
+; CHECK: .split:
+; CHECK-NEXT: [[MERGE:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[BB0:%.*]] ], [ 0, [[BB0_CLONE:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 [[MERGE]]
+; CHECK-NEXT: store i64 [[MERGE]], i64* [[TMP0]], align 4
+; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[MERGE]], 1
+; CHECK-NEXT: [[IDX:%.*]] = srem i64 [[MERGE]], 2
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* @loop.targets, i64 0, i64 [[IDX]]
+; CHECK-NEXT: [[TARGET:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
+; CHECK-NEXT: indirectbr i8* [[TARGET]], [label [[BB0]], label %bb1]
+; CHECK: bb1:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %bb0
+
+bb0:
+ %i = phi i64 [ %i.next, %bb0 ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i64, i64* %p, i64 %i
+ store i64 %i, i64* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 1
+ %idx = srem i64 %i, 2
+ %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @loop.targets, i64 0, i64 %idx
+ %target = load i8*, i8** %arrayidx, align 8
+ indirectbr i8* %target, [label %bb0, label %bb1]
+
+bb1:
+ ret void
+}
+
+; Don't do anything for cases that contain no phis.
+define void @nophi(i32* %p) {
+; CHECK-LABEL: @nophi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; CHECK-NEXT: [[INITOP:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: switch i32 [[INITOP]], label [[EXIT:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB0:%.*]]
+; CHECK-NEXT: i32 1, label [[BB1:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb0:
+; CHECK-NEXT: tail call void @use(i32 0)
+; CHECK-NEXT: br label [[INDIRECTGOTO:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: tail call void @use(i32 1)
+; CHECK-NEXT: br label [[INDIRECTGOTO]]
+; CHECK: indirectgoto:
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to i8*
+; CHECK-NEXT: [[SUNKADDR:%.*]] = getelementptr i8, i8* [[TMP0]], i64 4
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[SUNKADDR]] to i32*
+; CHECK-NEXT: [[NEWP:%.*]] = load i32, i32* [[TMP1]], align 4
+; CHECK-NEXT: [[IDX:%.*]] = sext i32 [[NEWP]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* @nophi.targets, i64 0, i64 [[IDX]]
+; CHECK-NEXT: [[NEWOP:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
+; CHECK-NEXT: indirectbr i8* [[NEWOP]], [label [[BB0]], label %bb1]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %p, i64 1
+ %initop = load i32, i32* %incdec.ptr, align 4
+ switch i32 %initop, label %exit [
+ i32 0, label %bb0
+ i32 1, label %bb1
+ ]
+
+bb0:
+ tail call void @use(i32 0) br label %indirectgoto
+
+bb1:
+ tail call void @use(i32 1)
+ br label %indirectgoto
+
+indirectgoto:
+ %newp = load i32, i32* %incdec.ptr, align 4
+ %idx = sext i32 %newp to i64
+ %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @nophi.targets, i64 0, i64 %idx
+ %newop = load i8*, i8** %arrayidx, align 8
+ indirectbr i8* %newop, [label %bb0, label %bb1]
+
+exit:
+ ret void
+}
+
+; Don't do anything if the edge isn't critical.
+define i32 @noncritical(i32 %k, i8* %p)
+; CHECK-LABEL: @noncritical(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[D:%.*]] = add i32 [[K:%.*]], 1
+; CHECK-NEXT: indirectbr i8* [[P:%.*]], [label [[BB0:%.*]], label %bb1]
+; CHECK: bb0:
+; CHECK-NEXT: [[R0:%.*]] = sub i32 [[K]], [[D]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[R1:%.*]] = sub i32 [[D]], [[K]]
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[V:%.*]] = phi i32 [ [[R0]], [[BB0]] ], [ [[R1]], [[BB1:%.*]] ]
+; CHECK-NEXT: ret i32 0
+;
+{
+entry:
+ %d = add i32 %k, 1
+ indirectbr i8* %p, [label %bb0, label %bb1]
+
+bb0:
+ %v00 = phi i32 [%k, %entry]
+ %v01 = phi i32 [%d, %entry]
+ %r0 = sub i32 %v00, %v01
+ br label %exit
+
+bb1:
+ %v10 = phi i32 [%d, %entry]
+ %v11 = phi i32 [%k, %entry]
+ %r1 = sub i32 %v10, %v11
+ br label %exit
+
+exit:
+ %v = phi i32 [%r0, %bb0], [%r1, %bb1]
+ ret i32 0
+}
diff --git a/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll b/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
index 5c0b5f3839d0..9d6e668167fb 100644
--- a/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
+++ b/test/Transforms/CodeGenPrepare/X86/sink-addrmode.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Can we sink single addressing mode computation to use?
define void @test1(i1 %cond, i64* %base) {
; CHECK-LABEL: @test1
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
entry:
%addr = getelementptr inbounds i64, i64* %base, i64 5
%casted = bitcast i64* %addr to i32*
@@ -33,7 +33,7 @@ entry:
if.then:
; CHECK-LABEL: if.then:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
%v1 = load i32, i32* %casted, align 4
call void @foo(i32 %v1)
%cmp = icmp eq i32 %v1, 0
@@ -41,7 +41,7 @@ if.then:
next:
; CHECK-LABEL: next:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
%v2 = load i32, i32* %casted, align 4
call void @foo(i32 %v2)
br label %fallthrough
@@ -61,10 +61,10 @@ entry:
if.then:
; CHECK-LABEL: if.then:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
%v1 = load i32, i32* %casted, align 4
call void @foo(i32 %v1)
-; CHECK-NOT: add i64 {{.+}}, 40
+; CHECK-NOT: getelementptr i8, {{.+}}, 40
%v2 = load i32, i32* %casted, align 4
call void @foo(i32 %v2)
br label %fallthrough
@@ -84,7 +84,7 @@ entry:
if.then:
; CHECK-LABEL: if.then:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
%v1 = load i32, i32* %casted, align 4
call void @foo(i32 %v1)
%cmp = icmp eq i32 %v1, 0
@@ -95,7 +95,7 @@ fallthrough:
rare.1:
; CHECK-LABEL: rare.1:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
call void @slowpath(i32 %v1, i32* %casted) cold
br label %fallthrough
}
@@ -111,7 +111,7 @@ entry:
if.then:
; CHECK-LABEL: if.then:
-; CHECK-NOT: add i64 {{.+}}, 40
+; CHECK-NOT: getelementptr i8, {{.+}} 40
%v1 = load i32, i32* %casted, align 4
call void @foo(i32 %v1)
%cmp = icmp eq i32 %v1, 0
@@ -136,7 +136,7 @@ entry:
if.then:
; CHECK-LABEL: if.then:
-; CHECK-NOT: add i64 {{.+}}, 40
+; CHECK-NOT: getelementptr i8, {{.+}} 40
%v1 = load i32, i32* %casted, align 4
call void @foo(i32 %v1)
%cmp = icmp eq i32 %v1, 0
@@ -162,7 +162,7 @@ entry:
if.then:
; CHECK-LABEL: if.then:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
%v1 = load i32, i32* %casted, align 4
call void @foo(i32 %v1)
%cmp = icmp eq i32 %v1, 0
@@ -170,7 +170,7 @@ if.then:
next:
; CHECK-LABEL: next:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
%v2 = load i32, i32* %casted, align 4
call void @foo(i32 %v2)
%cmp2 = icmp eq i32 %v2, 0
@@ -181,13 +181,13 @@ fallthrough:
rare.1:
; CHECK-LABEL: rare.1:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
call void @slowpath(i32 %v1, i32* %casted) cold
br label %next
rare.2:
; CHECK-LABEL: rare.2:
-; CHECK: add i64 {{.+}}, 40
+; CHECK: getelementptr i8, {{.+}} 40
call void @slowpath(i32 %v2, i32* %casted) cold
br label %fallthrough
}
diff --git a/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll b/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
index c9f49b5d4f86..31f0ca239e3a 100644
--- a/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
+++ b/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
@@ -1,11 +1,12 @@
-; RUN: opt -S -codegenprepare < %s | FileCheck %s
+; RUN: opt -S -codegenprepare < %s | FileCheck %s -check-prefix=CHECK -check-prefix=GEP
target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
; CHECK-LABEL: @load_cast_gep
-; CHECK: add i64 %sunkaddr, 40
+; GEP: [[CAST:%[0-9]+]] = addrspacecast i64* %base to i8 addrspace(1)*
+; GEP: getelementptr i8, i8 addrspace(1)* [[CAST]], i64 40
define void @load_cast_gep(i1 %cond, i64* %base) {
entry:
%addr = getelementptr inbounds i64, i64* %base, i64 5
@@ -21,7 +22,8 @@ fallthrough:
}
; CHECK-LABEL: @store_gep_cast
-; CHECK: add i64 %sunkaddr, 20
+; GEP: [[CAST:%[0-9]+]] = addrspacecast i64* %base to i8 addrspace(1)*
+; GEP: getelementptr i8, i8 addrspace(1)* [[CAST]], i64 20
define void @store_gep_cast(i1 %cond, i64* %base) {
entry:
%casted = addrspacecast i64* %base to i32 addrspace(1)*
diff --git a/test/Transforms/CodeGenPrepare/basic.ll b/test/Transforms/CodeGenPrepare/basic.ll
index 495d910b5cd6..2e58de7d0934 100644
--- a/test/Transforms/CodeGenPrepare/basic.ll
+++ b/test/Transforms/CodeGenPrepare/basic.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin10.0.0"
; rdar://8785296
define i32 @test1(i8* %ptr) nounwind ssp noredzone align 2 {
entry:
- %0 = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %0 = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false, i1 false)
%1 = icmp ugt i64 %0, 3
br i1 %1, label %T, label %trap
@@ -25,6 +25,44 @@ T:
ret i32 4
}
-declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readonly
+; CHECK-LABEL: @test_objectsize_null_flag(
+define i64 @test_objectsize_null_flag(i8* %ptr) {
+entry:
+ ; CHECK: ret i64 -1
+ %0 = tail call i64 @llvm.objectsize.i64(i8* null, i1 false, i1 true)
+ ret i64 %0
+}
+
+; CHECK-LABEL: @test_objectsize_null_flag_min(
+define i64 @test_objectsize_null_flag_min(i8* %ptr) {
+entry:
+ ; CHECK: ret i64 0
+ %0 = tail call i64 @llvm.objectsize.i64(i8* null, i1 true, i1 true)
+ ret i64 %0
+}
+
+; Test foldable null pointers because we evaluate them with non-exact modes in
+; CodeGenPrepare.
+; CHECK-LABEL: @test_objectsize_null_flag_noas0(
+define i64 @test_objectsize_null_flag_noas0() {
+entry:
+ ; CHECK: ret i64 0
+ %0 = tail call i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)* null, i1 false,
+ i1 true)
+ ret i64 %0
+}
+
+; CHECK-LABEL: @test_objectsize_null_flag_min_noas0(
+define i64 @test_objectsize_null_flag_min_noas0() {
+entry:
+ ; CHECK: ret i64 0
+ %0 = tail call i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)* null, i1 true,
+ i1 true)
+ ret i64 %0
+}
+
+
+declare i64 @llvm.objectsize.i64(i8*, i1, i1) nounwind readonly
+declare i64 @llvm.objectsize.i64.p1i8(i8 addrspace(1)*, i1, i1) nounwind readonly
declare void @llvm.trap() nounwind
diff --git a/test/Transforms/CodeGenPrepare/builtin-condition.ll b/test/Transforms/CodeGenPrepare/builtin-condition.ll
index 0d41e9e1eddb..e42529a7b9a1 100644
--- a/test/Transforms/CodeGenPrepare/builtin-condition.ll
+++ b/test/Transforms/CodeGenPrepare/builtin-condition.ll
@@ -74,39 +74,39 @@ entry:
%chararray = alloca [30 x i8], align 16
%chararray2 = alloca [10 x i8], align 1
%0 = getelementptr inbounds [30 x i8], [30 x i8]* %chararray, i64 0, i64 0
- call void @llvm.lifetime.start(i64 30, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 30, i8* %0)
%1 = getelementptr inbounds [10 x i8], [10 x i8]* %chararray2, i64 0, i64 0
- call void @llvm.lifetime.start(i64 10, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 10, i8* %1)
%tobool = icmp eq i32 %flag, 0
%cptr.0 = select i1 %tobool, i8* %0, i8* %1
%2 = call i64 @llvm.objectsize.i64.p0i8(i8* %cptr.0, i1 true)
- call void @llvm.lifetime.end(i64 10, i8* %1)
- call void @llvm.lifetime.end(i64 30, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 10, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 30, i8* %0)
ret i64 %2
; CHECK-LABEL: foo1
; CHECK: ret i64 10
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define i64 @foo2(i32 %n) {
entry:
%Small = alloca [10 x i8], align 1
%Large = alloca [20 x i8], align 16
%0 = getelementptr inbounds [10 x i8], [10 x i8]* %Small, i64 0, i64 0
- call void @llvm.lifetime.start(i64 10, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 10, i8* %0)
%1 = getelementptr inbounds [20 x i8], [20 x i8]* %Large, i64 0, i64 0
- call void @llvm.lifetime.start(i64 20, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 20, i8* %1)
%tobool = icmp ne i32 %n, 0
%add.ptr = getelementptr inbounds [20 x i8], [20 x i8]* %Large, i64 0, i64 19
%cond = select i1 %tobool, i8* %0, i8* %add.ptr
%2 = call i64 @llvm.objectsize.i64.p0i8(i8* %cond, i1 false)
- call void @llvm.lifetime.end(i64 20, i8* %1)
- call void @llvm.lifetime.end(i64 10, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 20, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 10, i8* %0)
ret i64 %2
; CHECK-LABEL: foo2
; CHECK: ret i64 10
diff --git a/test/Transforms/CodeGenPrepare/section.ll b/test/Transforms/CodeGenPrepare/section.ll
index 795c45c220db..2c96612e1baf 100644
--- a/test/Transforms/CodeGenPrepare/section.ll
+++ b/test/Transforms/CodeGenPrepare/section.ll
@@ -5,12 +5,32 @@ target triple = "x86_64-pc-linux-gnu"
; This tests that hot/cold functions get correct section prefix assigned
; CHECK: hot_func{{.*}}!section_prefix ![[HOT_ID:[0-9]+]]
+; The entry is hot
define void @hot_func() !prof !15 {
ret void
}
+; CHECK: hot_call_func{{.*}}!section_prefix ![[HOT_ID]]
+; The sum of 2 callsites are hot
+define void @hot_call_func() !prof !16 {
+ call void @hot_func(), !prof !17
+ call void @hot_func(), !prof !17
+ ret void
+}
+
+; CHECK-NOT: normal_func{{.*}}!section_prefix
+; The sum of all callsites are neither hot or cold
+define void @normal_func() !prof !16 {
+ call void @hot_func(), !prof !17
+ call void @hot_func(), !prof !18
+ call void @hot_func(), !prof !18
+ ret void
+}
+
; CHECK: cold_func{{.*}}!section_prefix ![[COLD_ID:[0-9]+]]
+; The entry and the callsite are both cold
define void @cold_func() !prof !16 {
+ call void @hot_func(), !prof !18
ret void
}
@@ -33,3 +53,5 @@ define void @cold_func() !prof !16 {
!14 = !{i32 999999, i64 1, i32 2}
!15 = !{!"function_entry_count", i64 1000}
!16 = !{!"function_entry_count", i64 1}
+!17 = !{!"branch_weights", i32 80}
+!18 = !{!"branch_weights", i32 1}
diff --git a/test/Transforms/ConstProp/loads.ll b/test/Transforms/ConstProp/loads.ll
index 89387ad06ba8..dce2068a8d55 100644
--- a/test/Transforms/ConstProp/loads.ll
+++ b/test/Transforms/ConstProp/loads.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -default-data-layout="e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=LE
-; RUN: opt < %s -default-data-layout="E-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=BE
+; RUN: opt < %s -data-layout="e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=LE
+; RUN: opt < %s -data-layout="E-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=BE
; {{ 0xDEADBEEF, 0xBA }, 0xCAFEBABE}
@g1 = constant {{i32,i8},i32} {{i32,i8} { i32 -559038737, i8 186 }, i32 -889275714 }
diff --git a/test/Transforms/ConstantHoisting/X86/ehpad.ll b/test/Transforms/ConstantHoisting/X86/ehpad.ll
new file mode 100644
index 000000000000..3178e87f7548
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/ehpad.ll
@@ -0,0 +1,62 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+; FIXME: The catchpad doesn't even use the constant, so a better fix would be to
+; insert the bitcast in the catchpad block.
+
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+; CHECK-LABEL: define i32 @main
+; CHECK: %tobool = icmp eq i32 %argc, 0
+; CHECK-NEXT: bitcast i64 9209618997431186100 to i64
+; CHECK-NEXT: br i1 %tobool
+
+; Function Attrs: norecurse
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+ %call = tail call i64 @fn(i64 0)
+ %call1 = tail call i64 @fn(i64 1)
+ %tobool = icmp eq i32 %argc, 0
+ br i1 %tobool, label %2, label %1
+
+; <label>:1: ; preds = %0
+ %call2 = invoke i64 @fn(i64 %call)
+ to label %6 unwind label %catch.dispatch
+
+; <label>:2: ; preds = %0
+ %call3 = invoke i64 @fn(i64 %call1)
+ to label %6 unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %2, %1
+ %z.0 = phi i64 [ %call, %1 ], [ %call1, %2 ]
+ %3 = catchswitch within none [label %4] unwind to caller
+
+; <label>:4: ; preds = %catch.dispatch
+ %5 = catchpad within %3 [i8* null, i32 64, i8* null]
+ br i1 %tobool, label %then, label %else
+
+then:
+ %call4 = tail call i64 @fn(i64 %z.0) [ "funclet"(token %5) ]
+ %add = add i64 %call4, 9209618997431186100
+ br label %endif
+
+else:
+ %call5 = tail call i64 @fn(i64 0) [ "funclet"(token %5) ]
+ %add6 = add i64 %call5, 9209618997431186100
+ br label %endif
+
+endif:
+ %v = phi i64 [ %add, %then ], [ %add6, %else ]
+ %call7 = tail call i64 @fn(i64 %v) [ "funclet"(token %5) ]
+ %call8 = tail call i64 @fn(i64 %call7) [ "funclet"(token %5) ]
+ catchret from %5 to label %6
+
+; <label>:6: ; preds = %1, %2, %4
+ ret i32 0
+}
+
+declare i64 @fn(i64) local_unnamed_addr #1
+
+declare i32 @__CxxFrameHandler3(...)
+
+attributes #0 = { norecurse "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/Transforms/ConstantMerge/dont-merge.ll b/test/Transforms/ConstantMerge/dont-merge.ll
index e5337dff27df..21e390785df5 100644
--- a/test/Transforms/ConstantMerge/dont-merge.ll
+++ b/test/Transforms/ConstantMerge/dont-merge.ll
@@ -42,3 +42,41 @@ define void @test3() {
call void asm sideeffect "T3A, T3B",""() ; invisible use of T3A and T3B
ret void
}
+
+; Don't merge constants with !type annotations.
+
+@T4A1 = internal constant i32 2, !type !0
+@T4A2 = internal unnamed_addr constant i32 2, !type !1
+
+@T4B1 = internal constant i32 3, !type !0
+@T4B2 = internal unnamed_addr constant i32 3, !type !0
+
+@T4C1 = internal constant i32 4, !type !0
+@T4C2 = unnamed_addr constant i32 4
+
+@T4D1 = unnamed_addr constant i32 5, !type !0
+@T4D2 = internal constant i32 5
+
+!0 = !{i64 0, !"typeinfo name for A"}
+!1 = !{i64 0, !"typeinfo name for B"}
+
+; CHECK: @T4A1
+; CHECK: @T4A2
+; CHECK: @T4B1
+; CHECK: @T4B2
+; CHECK: @T4C1
+; CHECK: @T4C2
+; CHECK: @T4D1
+; CHECK: @T4D2
+
+define void @test4(i32** %P1, i32** %P2, i32** %P3, i32** %P4, i32** %P5, i32** %P6, i32** %P7, i32** %P8) {
+ store i32* @T4A1, i32** %P1
+ store i32* @T4A2, i32** %P2
+ store i32* @T4B1, i32** %P3
+ store i32* @T4B2, i32** %P4
+ store i32* @T4C1, i32** %P5
+ store i32* @T4C2, i32** %P6
+ store i32* @T4D1, i32** %P7
+ store i32* @T4D2, i32** %P8
+ ret void
+}
diff --git a/test/Transforms/ConstantMerge/merge-dbg.ll b/test/Transforms/ConstantMerge/merge-dbg.ll
new file mode 100644
index 000000000000..bc33248514e0
--- /dev/null
+++ b/test/Transforms/ConstantMerge/merge-dbg.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -constmerge -S | FileCheck %s
+
+; CHECK: = constant i32 1, !dbg [[A:![0-9]+]], !dbg [[B:![0-9]+]]
+@a = internal constant i32 1, !dbg !0
+@b = unnamed_addr constant i32 1, !dbg !9
+
+define void @test1(i32** %P1, i32** %P2) {
+ store i32* @a, i32** %P1
+ store i32* @b, i32** %P2
+ ret void
+}
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!7, !8}
+
+; CHECK: [[A]] = !DIGlobalVariableExpression(var: [[VA:![0-9]+]])
+; CHECK: [[VA]] = distinct !DIGlobalVariable(name: "y"
+; CHECK: [[B]] = !DIGlobalVariableExpression(var: [[VB:![0-9]+]])
+; CHECK: [[VB]] = distinct !DIGlobalVariable(name: "x"
+
+!0 = !DIGlobalVariableExpression(var: !1)
+!1 = distinct !DIGlobalVariable(name: "x", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 5.0.0 (trunk 297227) (llvm/trunk 297234)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5)
+!3 = !DIFile(filename: "1.cc", directory: "/build")
+!4 = !{}
+!5 = !{!0}
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+
+!9 = !DIGlobalVariableExpression(var: !10)
+!10 = distinct !DIGlobalVariable(name: "y", scope: !2, file: !3, line: 1, type: !6, isLocal: false, isDefinition: true)
diff --git a/test/Transforms/Coroutines/ArgAddr.ll b/test/Transforms/Coroutines/ArgAddr.ll
index 4bedb510cd9e..5d0fbd781be9 100644
--- a/test/Transforms/Coroutines/ArgAddr.ll
+++ b/test/Transforms/Coroutines/ArgAddr.ll
@@ -32,7 +32,7 @@ coro_Cleanup:
br label %coro_Suspend
coro_Suspend:
- call void @llvm.coro.end(i8* null, i1 false)
+ call i1 @llvm.coro.end(i8* null, i1 false)
ret i8* %1
}
@@ -61,7 +61,7 @@ declare i32 @llvm.coro.size.i32()
declare i8* @llvm.coro.begin(token, i8*)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
diff --git a/test/Transforms/Coroutines/coro-frame.ll b/test/Transforms/Coroutines/coro-frame.ll
new file mode 100644
index 000000000000..001012fcd0c9
--- /dev/null
+++ b/test/Transforms/Coroutines/coro-frame.ll
@@ -0,0 +1,61 @@
+; Check that we can handle spills of the result of the invoke instruction
+; RUN: opt < %s -coro-split -S | FileCheck %s
+
+define i8* @f() "coroutine.presplit"="1" personality i32 0 {
+entry:
+ %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
+ %size = call i32 @llvm.coro.size.i32()
+ %alloc = call i8* @malloc(i32 %size)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
+ %r = invoke double @print(double 0.0) to label %cont unwind label %pad
+
+cont:
+ %0 = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %0, label %suspend [i8 0, label %resume
+ i8 1, label %cleanup]
+resume:
+ call double @print(double %r)
+ br label %cleanup
+
+cleanup:
+ %mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
+ call void @free(i8* %mem)
+ br label %suspend
+suspend:
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
+ ret i8* %hdl
+pad:
+ %tok = cleanuppad within none []
+ cleanupret from %tok unwind to caller
+}
+
+; See if the float was added to the frame
+; CHECK-LABEL: %f.Frame = type { void (%f.Frame*)*, void (%f.Frame*)*, i1, i1, double }
+
+; See if the float was spilled into the frame
+; CHECK-LABEL: @f(
+; CHECK: %r = call double @print(
+; CHECK: %r.spill.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, i32 4
+; CHECK: store double %r, double* %r.spill.addr
+; CHECK: ret i8* %hdl
+
+; See of the float was loaded from the frame
+; CHECK-LABEL: @f.resume(
+; CHECK: %r.reload = load double, double* %r.reload.addr
+; CHECK: call double @print(double %r.reload)
+; CHECK: ret void
+
+declare i8* @llvm.coro.free(token, i8*)
+declare i32 @llvm.coro.size.i32()
+declare i8 @llvm.coro.suspend(token, i1)
+declare void @llvm.coro.resume(i8*)
+declare void @llvm.coro.destroy(i8*)
+
+declare token @llvm.coro.id(i32, i8*, i8*, i8*)
+declare i1 @llvm.coro.alloc(token)
+declare i8* @llvm.coro.begin(token, i8*)
+declare i1 @llvm.coro.end(i8*, i1)
+
+declare noalias i8* @malloc(i32)
+declare double @print(double)
+declare void @free(i8*)
diff --git a/test/Transforms/Coroutines/coro-spill-after-phi.ll b/test/Transforms/Coroutines/coro-spill-after-phi.ll
new file mode 100644
index 000000000000..3c7e050c09e9
--- /dev/null
+++ b/test/Transforms/Coroutines/coro-spill-after-phi.ll
@@ -0,0 +1,60 @@
+; Verifies that we insert spills of PHI instruction _after) all PHI Nodes
+; RUN: opt < %s -coro-split -S | FileCheck %s
+
+define i8* @f(i1 %n) "coroutine.presplit"="1" {
+entry:
+ %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
+ %size = call i32 @llvm.coro.size.i32()
+ %alloc = call i8* @malloc(i32 %size)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
+ br i1 %n, label %begin, label %alt
+alt:
+ br label %begin
+
+begin:
+ %phi1 = phi i32 [ 0, %entry ], [ 2, %alt ]
+ %phi2 = phi i32 [ 1, %entry ], [ 3, %alt ]
+
+ %sp1 = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %sp1, label %suspend [i8 0, label %resume
+ i8 1, label %cleanup]
+resume:
+ call i32 @print(i32 %phi1)
+ call i32 @print(i32 %phi2)
+ br label %cleanup
+
+cleanup:
+ %mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
+ call void @free(i8* %mem)
+ br label %suspend
+suspend:
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
+ ret i8* %hdl
+}
+
+; Verifies that the both phis are stored correctly in the coroutine frame
+; CHECK: %f.Frame = type { void (%f.Frame*)*, void (%f.Frame*)*, i1, i1, i32, i32 }
+; CHECK-LABEL: @f(
+; CHECK: store void (%f.Frame*)* @f.destroy, void (%f.Frame*)** %destroy.addr
+; CHECK: %phi1 = select i1 %n, i32 0, i32 2
+; CHECK: %phi2 = select i1 %n, i32 1, i32 3
+; CHECK: %phi2.spill.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, i32 5
+; CHECK: store i32 %phi2, i32* %phi2.spill.addr
+; CHECK: %phi1.spill.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, i32 4
+; CHECK: store i32 %phi1, i32* %phi1.spill.addr
+; CHECK: ret i8* %hdl
+
+declare i8* @llvm.coro.free(token, i8*)
+declare i32 @llvm.coro.size.i32()
+declare i8 @llvm.coro.suspend(token, i1)
+declare void @llvm.coro.resume(i8*)
+declare void @llvm.coro.destroy(i8*)
+
+declare token @llvm.coro.id(i32, i8*, i8*, i8*)
+declare i1 @llvm.coro.alloc(token)
+declare i8* @llvm.coro.begin(token, i8*)
+declare i1 @llvm.coro.end(i8*, i1)
+
+declare noalias i8* @malloc(i32)
+declare i32 @print(i32)
+declare void @free(i8*)
diff --git a/test/Transforms/Coroutines/coro-split-00.ll b/test/Transforms/Coroutines/coro-split-00.ll
index 12aec27b2fe6..0461b7dddb6c 100644
--- a/test/Transforms/Coroutines/coro-split-00.ll
+++ b/test/Transforms/Coroutines/coro-split-00.ll
@@ -28,7 +28,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 0)
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
ret i8* %hdl
}
@@ -72,7 +72,7 @@ declare void @llvm.coro.destroy(i8*)
declare token @llvm.coro.id(i32, i8*, i8*, i8*)
declare i1 @llvm.coro.alloc(token)
declare i8* @llvm.coro.begin(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare noalias i8* @malloc(i32)
declare void @print(i32)
diff --git a/test/Transforms/Coroutines/coro-split-01.ll b/test/Transforms/Coroutines/coro-split-01.ll
index 2b5801f7ddd1..cff2e9ca6f0a 100644
--- a/test/Transforms/Coroutines/coro-split-01.ll
+++ b/test/Transforms/Coroutines/coro-split-01.ll
@@ -26,7 +26,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 0)
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
ret i8* %hdl
}
define i32 @main() {
@@ -49,7 +49,7 @@ declare void @llvm.coro.destroy(i8*)
declare token @llvm.coro.id(i32, i8*, i8*, i8*)
declare i1 @llvm.coro.alloc(token)
declare i8* @llvm.coro.begin(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare noalias i8* @malloc(i32)
declare void @print(i32)
diff --git a/test/Transforms/Coroutines/coro-split-02.ll b/test/Transforms/Coroutines/coro-split-02.ll
index 2326f77f1987..953c25088652 100644
--- a/test/Transforms/Coroutines/coro-split-02.ll
+++ b/test/Transforms/Coroutines/coro-split-02.ll
@@ -28,7 +28,7 @@ await.ready:
call void @print(i32 %val)
br label %exit
exit:
- call void @llvm.coro.end(i8* null, i1 false)
+ call i1 @llvm.coro.end(i8* null, i1 false)
ret void
}
@@ -50,5 +50,5 @@ declare i8* @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(i8*) local_unnamed_addr #10
declare i8* @llvm.coro.free(token, i8* nocapture readonly) #2
-declare void @llvm.coro.end(i8*, i1) #3
+declare i1 @llvm.coro.end(i8*, i1) #3
diff --git a/test/Transforms/Coroutines/coro-split-dbg.ll b/test/Transforms/Coroutines/coro-split-dbg.ll
index 293622c40ebd..80f706879e55 100644
--- a/test/Transforms/Coroutines/coro-split-dbg.ll
+++ b/test/Transforms/Coroutines/coro-split-dbg.ll
@@ -38,12 +38,12 @@ coro_Cleanup: ; preds = %for.cond
br label %coro_Suspend, !dbg !36
coro_Suspend: ; preds = %for.cond, %if.then, %coro_Cleanup
- tail call void @llvm.coro.end(i8* null, i1 false) #9, !dbg !38
+ tail call i1 @llvm.coro.end(i8* null, i1 false) #9, !dbg !38
ret i8* %2, !dbg !39
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #4
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #4
; Function Attrs: argmemonly nounwind readonly
declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*) #5
@@ -54,10 +54,10 @@ declare i64 @llvm.coro.size.i64() #1
declare i8* @llvm.coro.begin(token, i8* writeonly) #7
declare token @llvm.coro.save(i8*) #7
declare i8 @llvm.coro.suspend(token, i1) #7
-declare void @llvm.lifetime.end(i64, i8* nocapture) #4
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #4
declare i8* @llvm.coro.free(token, i8* nocapture readonly) #5
declare void @free(i8* nocapture) local_unnamed_addr #6
-declare void @llvm.coro.end(i8*, i1) #7
+declare i1 @llvm.coro.end(i8*, i1) #7
declare i8* @llvm.coro.subfn.addr(i8* nocapture readonly, i8) #5
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
diff --git a/test/Transforms/Coroutines/coro-split-eh.ll b/test/Transforms/Coroutines/coro-split-eh.ll
new file mode 100644
index 000000000000..7fc97e261e81
--- /dev/null
+++ b/test/Transforms/Coroutines/coro-split-eh.ll
@@ -0,0 +1,145 @@
+; Tests that coro-split removes cleanup code after coro.end in resume functions
+; and retains it in the start function.
+; RUN: opt < %s -coro-split -S | FileCheck %s
+
+define i8* @f(i1 %val) "coroutine.presplit"="1" personality i32 3 {
+entry:
+ %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* null)
+ call void @print(i32 0)
+ br i1 %val, label %resume, label %susp
+
+susp:
+ %0 = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %0, label %suspend [i8 0, label %resume
+ i8 1, label %suspend]
+resume:
+ invoke void @print(i32 1) to label %suspend unwind label %lpad
+
+suspend:
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
+ call void @print(i32 0) ; should not be present in f.resume
+ ret i8* %hdl
+
+lpad:
+ %lpval = landingpad { i8*, i32 }
+ cleanup
+
+ call void @print(i32 2)
+ %need.resume = call i1 @llvm.coro.end(i8* null, i1 true)
+ br i1 %need.resume, label %eh.resume, label %cleanup.cont
+
+cleanup.cont:
+ call void @print(i32 3) ; should not be present in f.resume
+ br label %eh.resume
+
+eh.resume:
+ resume { i8*, i32 } %lpval
+}
+
+; Verify that start function contains both print calls the one before and after coro.end
+; CHECK-LABEL: define i8* @f(
+; CHECK: invoke void @print(i32 1)
+; CHECK: to label %AfterCoroEnd unwind label %lpad
+
+; CHECK: AfterCoroEnd:
+; CHECK: call void @print(i32 0)
+; CHECK: ret i8* %hdl
+
+; CHECK: lpad:
+; CHECK-NEXT: %lpval = landingpad { i8*, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: call void @print(i32 2)
+; CHECK-NEXT: call void @print(i32 3)
+; CHECK-NEXT: resume { i8*, i32 } %lpval
+
+define i8* @f2(i1 %val) "coroutine.presplit"="1" personality i32 4 {
+entry:
+ %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
+ %hdl = call i8* @llvm.coro.begin(token %id, i8* null)
+ call void @print(i32 0)
+ br i1 %val, label %resume, label %susp
+
+susp:
+ %0 = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %0, label %suspend [i8 0, label %resume
+ i8 1, label %suspend]
+resume:
+ invoke void @print(i32 1) to label %suspend unwind label %lpad
+
+suspend:
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
+ call void @print(i32 0) ; should not be present in f.resume
+ ret i8* %hdl
+
+lpad:
+ %tok = cleanuppad within none []
+ call void @print(i32 2)
+ %unused = call i1 @llvm.coro.end(i8* null, i1 true) [ "funclet"(token %tok) ]
+ cleanupret from %tok unwind label %cleanup.cont
+
+cleanup.cont:
+ %tok2 = cleanuppad within none []
+ call void @print(i32 3) ; should not be present in f.resume
+ cleanupret from %tok2 unwind to caller
+}
+
+; Verify that start function contains both print calls the one before and after coro.end
+; CHECK-LABEL: define i8* @f2(
+; CHECK: invoke void @print(i32 1)
+; CHECK: to label %AfterCoroEnd unwind label %lpad
+
+; CHECK: AfterCoroEnd:
+; CHECK: call void @print(i32 0)
+; CHECK: ret i8* %hdl
+
+; CHECK: lpad:
+; CHECK-NEXT: %tok = cleanuppad within none []
+; CHECK-NEXT: call void @print(i32 2)
+; CHECK-NEXT: call void @print(i32 3)
+; CHECK-NEXT: cleanupret from %tok unwind to caller
+
+; VERIFY Resume Parts
+
+; Verify that resume function does not contains both print calls appearing after coro.end
+; CHECK-LABEL: define internal fastcc void @f.resume
+; CHECK: invoke void @print(i32 1)
+; CHECK: to label %CoroEnd unwind label %lpad
+
+; CHECK: CoroEnd:
+; CHECK-NEXT: ret void
+
+; CHECK: lpad:
+; CHECK-NEXT: %lpval = landingpad { i8*, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: call void @print(i32 2)
+; CHECK-NEXT: resume { i8*, i32 } %lpval
+
+; Verify that resume function does not contains both print calls appearing after coro.end
+; CHECK-LABEL: define internal fastcc void @f2.resume
+; CHECK: invoke void @print(i32 1)
+; CHECK: to label %CoroEnd unwind label %lpad
+
+; CHECK: CoroEnd:
+; CHECK-NEXT: ret void
+
+; CHECK: lpad:
+; CHECK-NEXT: %tok = cleanuppad within none []
+; CHECK-NEXT: call void @print(i32 2)
+; CHECK-NEXT: cleanupret from %tok unwind to caller
+
+declare i8* @llvm.coro.free(token, i8*)
+declare i32 @llvm.coro.size.i32()
+declare i8 @llvm.coro.suspend(token, i1)
+declare void @llvm.coro.resume(i8*)
+declare void @llvm.coro.destroy(i8*)
+
+declare token @llvm.coro.id(i32, i8*, i8*, i8*)
+declare i8* @llvm.coro.alloc(token)
+declare i8* @llvm.coro.begin(token, i8*)
+declare i1 @llvm.coro.end(i8*, i1)
+
+declare noalias i8* @malloc(i32)
+declare void @print(i32)
+declare void @free(i8*)
+
diff --git a/test/Transforms/Coroutines/ex0.ll b/test/Transforms/Coroutines/ex0.ll
index d4a9f941d838..59bebc546649 100644
--- a/test/Transforms/Coroutines/ex0.ll
+++ b/test/Transforms/Coroutines/ex0.ll
@@ -24,7 +24,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 0)
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
ret i8* %hdl
}
@@ -52,7 +52,7 @@ declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
declare i8* @llvm.coro.begin(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare noalias i8* @malloc(i32)
declare void @print(i32)
diff --git a/test/Transforms/Coroutines/ex1.ll b/test/Transforms/Coroutines/ex1.ll
index 86ac75b13404..c2a5586fde58 100644
--- a/test/Transforms/Coroutines/ex1.ll
+++ b/test/Transforms/Coroutines/ex1.ll
@@ -20,7 +20,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret i8* %hdl
}
@@ -48,7 +48,7 @@ declare i32 @llvm.coro.size.i32()
declare i8* @llvm.coro.begin(token, i8*)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
diff --git a/test/Transforms/Coroutines/ex2.ll b/test/Transforms/Coroutines/ex2.ll
index 8681e4cecc80..6987d2a4c9fd 100644
--- a/test/Transforms/Coroutines/ex2.ll
+++ b/test/Transforms/Coroutines/ex2.ll
@@ -29,7 +29,7 @@ dyn.free:
call void @CustomFree(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret i8* %hdl
}
@@ -57,7 +57,7 @@ declare i32 @llvm.coro.size.i32()
declare i8* @llvm.coro.begin(token, i8*)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
diff --git a/test/Transforms/Coroutines/ex3.ll b/test/Transforms/Coroutines/ex3.ll
index 13289c8e974a..8ff4d718230f 100644
--- a/test/Transforms/Coroutines/ex3.ll
+++ b/test/Transforms/Coroutines/ex3.ll
@@ -26,7 +26,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret i8* %hdl
}
@@ -54,7 +54,7 @@ declare i32 @llvm.coro.size.i32()
declare i8* @llvm.coro.begin(token, i8*)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
diff --git a/test/Transforms/Coroutines/ex4.ll b/test/Transforms/Coroutines/ex4.ll
index ce896ad7ee41..4992052acd2e 100644
--- a/test/Transforms/Coroutines/ex4.ll
+++ b/test/Transforms/Coroutines/ex4.ll
@@ -28,7 +28,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret i8* %hdl
}
@@ -65,7 +65,7 @@ declare i32 @llvm.coro.size.i32()
declare i8* @llvm.coro.begin(token, i8*)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
diff --git a/test/Transforms/Coroutines/ex5.ll b/test/Transforms/Coroutines/ex5.ll
index c9772825f250..34767584c811 100644
--- a/test/Transforms/Coroutines/ex5.ll
+++ b/test/Transforms/Coroutines/ex5.ll
@@ -31,7 +31,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret i8* %hdl
}
@@ -46,7 +46,7 @@ declare i8* @llvm.coro.begin(token, i8*)
declare token @llvm.coro.save(i8*)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
; CHECK-LABEL: @main
define i32 @main() {
diff --git a/test/Transforms/Coroutines/no-suspend.ll b/test/Transforms/Coroutines/no-suspend.ll
index d219495de6cc..804b38cc1abe 100644
--- a/test/Transforms/Coroutines/no-suspend.ll
+++ b/test/Transforms/Coroutines/no-suspend.ll
@@ -32,7 +32,7 @@ dyn.free:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret void
}
@@ -77,7 +77,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret void
}
@@ -122,7 +122,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret void
}
@@ -167,7 +167,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
ret void
}
@@ -183,7 +183,7 @@ declare i8* @llvm.coro.begin(token, i8*)
declare token @llvm.coro.save(i8* %hdl)
declare i8 @llvm.coro.suspend(token, i1)
declare i8* @llvm.coro.free(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
diff --git a/test/Transforms/Coroutines/phi-coro-end.ll b/test/Transforms/Coroutines/phi-coro-end.ll
index e2529412e72c..f99990cf33bc 100644
--- a/test/Transforms/Coroutines/phi-coro-end.ll
+++ b/test/Transforms/Coroutines/phi-coro-end.ll
@@ -17,7 +17,7 @@ cleanup:
suspend:
%r = phi i32 [%n, %entry], [1, %cleanup]
- call void @llvm.coro.end(i8* %hdl, i1 false)
+ call i1 @llvm.coro.end(i8* %hdl, i1 false)
call void @print(i32 %r)
ret i8* %hdl
}
@@ -41,7 +41,7 @@ declare void @llvm.coro.destroy(i8*)
declare token @llvm.coro.id(i32, i8*, i8*, i8*)
declare i8* @llvm.coro.begin(token, i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare noalias i8* @malloc(i32)
declare void @print(i32)
diff --git a/test/Transforms/Coroutines/restart-trigger.ll b/test/Transforms/Coroutines/restart-trigger.ll
index 2240f8fa6323..f7f203f2fb5c 100644
--- a/test/Transforms/Coroutines/restart-trigger.ll
+++ b/test/Transforms/Coroutines/restart-trigger.ll
@@ -25,7 +25,7 @@ cleanup:
call void @free(i8* %mem)
br label %suspend
suspend:
- call void @llvm.coro.end(i8* %hdl, i1 0)
+ call i1 @llvm.coro.end(i8* %hdl, i1 0)
ret void
}
@@ -36,7 +36,7 @@ declare i32 @llvm.coro.size.i32()
declare i8 @llvm.coro.suspend(token, i1)
declare void @llvm.coro.resume(i8*)
declare void @llvm.coro.destroy(i8*)
-declare void @llvm.coro.end(i8*, i1)
+declare i1 @llvm.coro.end(i8*, i1)
declare noalias i8* @malloc(i32)
declare void @print(i32)
diff --git a/test/Transforms/CorrelatedValuePropagation/alloca.ll b/test/Transforms/CorrelatedValuePropagation/alloca.ll
index 0a6ba675a477..37b27b29445c 100644
--- a/test/Transforms/CorrelatedValuePropagation/alloca.ll
+++ b/test/Transforms/CorrelatedValuePropagation/alloca.ll
@@ -13,14 +13,14 @@ target triple = "x86_64-unknown-linux-gnu"
@.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @hoo(i64*)
declare i32 @printf(i8* nocapture readonly, ...)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define void @goo(i32 %N, i64* %b) {
entry:
@@ -35,12 +35,12 @@ for.cond: ; preds = %for.body, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start(i64 8, i8* %tmp)
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %tmp)
call void @hoo(i64* %a.i)
call void @hoo(i64* %c)
%tmp1 = load volatile i64, i64* %a.i, align 8
%call.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i64 %tmp1)
- call void @llvm.lifetime.end(i64 8, i8* %tmp)
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %tmp)
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/test/Transforms/CorrelatedValuePropagation/basic.ll b/test/Transforms/CorrelatedValuePropagation/basic.ll
index 9836c7f80778..14b9a1999cc3 100644
--- a/test/Transforms/CorrelatedValuePropagation/basic.ll
+++ b/test/Transforms/CorrelatedValuePropagation/basic.ll
@@ -115,9 +115,9 @@ negative:
i32 1, label %out
; CHECK-NOT: i32 1
i32 -1, label %next
-; CHECK: i32 -1, label %next
+; CHECK-DAG: i32 -1, label %next
i32 -2, label %next
-; CHECK: i32 -2, label %next
+; CHECK-DAG: i32 -2, label %next
i32 2, label %out
; CHECK-NOT: i32 2
i32 3, label %out
diff --git a/test/Transforms/DeadArgElim/call_profile.ll b/test/Transforms/DeadArgElim/call_profile.ll
new file mode 100644
index 000000000000..6acb6f000dbe
--- /dev/null
+++ b/test/Transforms/DeadArgElim/call_profile.ll
@@ -0,0 +1,22 @@
+; RUN: opt -deadargelim -S < %s | FileCheck %s
+
+; Checks if !prof metadata is corret in deadargelim.
+
+define void @caller() #0 {
+; CHECK: call void @test_vararg(), !prof ![[PROF:[0-9]]]
+; CHECK: call void @test(), !prof ![[PROF]]
+ call void (i32, ...) @test_vararg(i32 1), !prof !0
+ call void @test(i32 1), !prof !0
+ ret void
+}
+
+define internal void @test_vararg(i32, ...) #1 {
+ ret void
+}
+
+define internal void @test(i32 %a) #1 {
+ ret void
+}
+
+; CHECK:![[PROF]] = !{!"branch_weights", i32 30}
+!0 = !{!"branch_weights", i32 30}
diff --git a/test/Transforms/DeadStoreElimination/dominate.ll b/test/Transforms/DeadStoreElimination/dominate.ll
index 638992bae729..24dd65e07bbc 100644
--- a/test/Transforms/DeadStoreElimination/dominate.ll
+++ b/test/Transforms/DeadStoreElimination/dominate.ll
@@ -9,12 +9,12 @@ bb1:
br label %bb3
bb2:
- call void @llvm.lifetime.end(i64 -1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
br label %bb3
bb3:
call void @bar()
- call void @llvm.lifetime.end(i64 -1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
br label %bb4
bb4:
@@ -22,4 +22,4 @@ bb4:
}
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/Transforms/DeadStoreElimination/lifetime.ll b/test/Transforms/DeadStoreElimination/lifetime.ll
index 305c916dc02b..97f199b5e0f6 100644
--- a/test/Transforms/DeadStoreElimination/lifetime.ll
+++ b/test/Transforms/DeadStoreElimination/lifetime.ll
@@ -2,8 +2,8 @@
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
declare void @llvm.memset.p0i8.i8(i8* nocapture, i8, i8, i32, i1) nounwind
define void @test1() {
@@ -11,7 +11,7 @@ define void @test1() {
%A = alloca i8
store i8 0, i8* %A ;; Written to by memset
- call void @llvm.lifetime.end(i64 1, i8* %A)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %A)
; CHECK: lifetime.end
call void @llvm.memset.p0i8.i8(i8* %A, i8 0, i8 -1, i32 0, i1 false)
@@ -25,11 +25,11 @@ define void @test2(i32* %P) {
; CHECK: test2
%Q = getelementptr i32, i32* %P, i32 1
%R = bitcast i32* %Q to i8*
- call void @llvm.lifetime.start(i64 4, i8* %R)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %R)
; CHECK: lifetime.start
store i32 0, i32* %Q ;; This store is dead.
; CHECK-NOT: store
- call void @llvm.lifetime.end(i64 4, i8* %R)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %R)
; CHECK: lifetime.end
ret void
}
diff --git a/test/Transforms/DeadStoreElimination/operand-bundles.ll b/test/Transforms/DeadStoreElimination/operand-bundles.ll
index d71b9673ed1d..784b2e8e55f7 100644
--- a/test/Transforms/DeadStoreElimination/operand-bundles.ll
+++ b/test/Transforms/DeadStoreElimination/operand-bundles.ll
@@ -41,3 +41,15 @@ define void @test3() {
store i64 0, i64* %s
ret void
}
+
+declare noalias i8* @calloc(i64, i64)
+
+define void @test4() {
+; CHECK-LABEL: @test4
+ %local_obj = call i8* @calloc(i64 1, i64 4)
+ call void @foo() ["deopt" (i8* %local_obj)]
+ store i8 0, i8* %local_obj, align 4
+ ; CHECK-NOT: store i8 0, i8* %local_obj, align 4
+ call void @bar(i8* nocapture %local_obj)
+ ret void
+}
diff --git a/test/Transforms/EarlyCSE/readnone-mayunwind.ll b/test/Transforms/EarlyCSE/readnone-mayunwind.ll
new file mode 100644
index 000000000000..47a513f2d6a6
--- /dev/null
+++ b/test/Transforms/EarlyCSE/readnone-mayunwind.ll
@@ -0,0 +1,15 @@
+; RUN: opt -S -early-cse < %s | FileCheck %s
+
+declare void @readnone_may_unwind() readnone
+
+define void @f(i32* %ptr) {
+; CHECK-LABEL: @f(
+; CHECK: store i32 100, i32* %ptr
+; CHECK: call void @readnone_may_unwind()
+; CHECK: store i32 200, i32* %ptr
+
+ store i32 100, i32* %ptr
+ call void @readnone_may_unwind()
+ store i32 200, i32* %ptr
+ ret void
+}
diff --git a/test/Transforms/FunctionAttrs/nonnull.ll b/test/Transforms/FunctionAttrs/nonnull.ll
index 1fb64b7434ab..4a1ff14b2041 100644
--- a/test/Transforms/FunctionAttrs/nonnull.ll
+++ b/test/Transforms/FunctionAttrs/nonnull.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -functionattrs %s | FileCheck %s
+; RUN: opt -S -functionattrs -enable-nonnull-arg-prop %s | FileCheck %s
declare nonnull i8* @ret_nonnull()
; Return a pointer trivially nonnull (call return attribute)
@@ -71,4 +71,148 @@ exit:
ret i8* %phi
}
+; Test propagation of nonnull callsite args back to caller.
+
+declare void @use1(i8* %x)
+declare void @use2(i8* %x, i8* %y);
+declare void @use3(i8* %x, i8* %y, i8* %z);
+
+declare void @use1nonnull(i8* nonnull %x);
+declare void @use2nonnull(i8* nonnull %x, i8* nonnull %y);
+declare void @use3nonnull(i8* nonnull %x, i8* nonnull %y, i8* nonnull %z);
+
+declare i8 @use1safecall(i8* %x) readonly nounwind ; readonly+nounwind guarantees that execution continues to successor
+
+; Can't extend non-null to parent for any argument because the 2nd call is not guaranteed to execute.
+
+define void @parent1(i8* %a, i8* %b, i8* %c) {
+; CHECK-LABEL: @parent1(i8* %a, i8* %b, i8* %c)
+; CHECK-NEXT: call void @use3(i8* %c, i8* %a, i8* %b)
+; CHECK-NEXT: call void @use3nonnull(i8* %b, i8* %c, i8* %a)
+; CHECK-NEXT: ret void
+;
+ call void @use3(i8* %c, i8* %a, i8* %b)
+ call void @use3nonnull(i8* %b, i8* %c, i8* %a)
+ ret void
+}
+
+; Extend non-null to parent for all arguments.
+
+define void @parent2(i8* %a, i8* %b, i8* %c) {
+; CHECK-LABEL: @parent2(i8* nonnull %a, i8* nonnull %b, i8* nonnull %c)
+; CHECK-NEXT: call void @use3nonnull(i8* %b, i8* %c, i8* %a)
+; CHECK-NEXT: call void @use3(i8* %c, i8* %a, i8* %b)
+; CHECK-NEXT: ret void
+;
+ call void @use3nonnull(i8* %b, i8* %c, i8* %a)
+ call void @use3(i8* %c, i8* %a, i8* %b)
+ ret void
+}
+
+; Extend non-null to parent for 1st argument.
+
+define void @parent3(i8* %a, i8* %b, i8* %c) {
+; CHECK-LABEL: @parent3(i8* nonnull %a, i8* %b, i8* %c)
+; CHECK-NEXT: call void @use1nonnull(i8* %a)
+; CHECK-NEXT: call void @use3(i8* %c, i8* %b, i8* %a)
+; CHECK-NEXT: ret void
+;
+ call void @use1nonnull(i8* %a)
+ call void @use3(i8* %c, i8* %b, i8* %a)
+ ret void
+}
+
+; Extend non-null to parent for last 2 arguments.
+
+define void @parent4(i8* %a, i8* %b, i8* %c) {
+; CHECK-LABEL: @parent4(i8* %a, i8* nonnull %b, i8* nonnull %c)
+; CHECK-NEXT: call void @use2nonnull(i8* %c, i8* %b)
+; CHECK-NEXT: call void @use2(i8* %a, i8* %c)
+; CHECK-NEXT: call void @use1(i8* %b)
+; CHECK-NEXT: ret void
+;
+ call void @use2nonnull(i8* %c, i8* %b)
+ call void @use2(i8* %a, i8* %c)
+ call void @use1(i8* %b)
+ ret void
+}
+
+; The callsite must execute in order for the attribute to transfer to the parent.
+; It appears benign to extend non-null to the parent in this case, but we can't do that
+; because it would incorrectly propagate the wrong information to its callers.
+
+define void @parent5(i8* %a, i1 %a_is_notnull) {
+; CHECK-LABEL: @parent5(i8* %a, i1 %a_is_notnull)
+; CHECK-NEXT: br i1 %a_is_notnull, label %t, label %f
+; CHECK: t:
+; CHECK-NEXT: call void @use1nonnull(i8* %a)
+; CHECK-NEXT: ret void
+; CHECK: f:
+; CHECK-NEXT: ret void
+;
+ br i1 %a_is_notnull, label %t, label %f
+t:
+ call void @use1nonnull(i8* %a)
+ ret void
+f:
+ ret void
+}
+
+; The callsite must execute in order for the attribute to transfer to the parent.
+; The volatile load might trap, so there's no guarantee that we'll ever get to the call.
+
+define i8 @parent6(i8* %a, i8* %b) {
+; CHECK-LABEL: @parent6(i8* %a, i8* %b)
+; CHECK-NEXT: [[C:%.*]] = load volatile i8, i8* %b
+; CHECK-NEXT: call void @use1nonnull(i8* %a)
+; CHECK-NEXT: ret i8 [[C]]
+;
+ %c = load volatile i8, i8* %b
+ call void @use1nonnull(i8* %a)
+ ret i8 %c
+}
+
+; The nonnull callsite is guaranteed to execute, so the argument must be nonnull throughout the parent.
+
+define i8 @parent7(i8* %a) {
+; CHECK-LABEL: @parent7(i8* nonnull %a)
+; CHECK-NEXT: [[RET:%.*]] = call i8 @use1safecall(i8* %a)
+; CHECK-NEXT: call void @use1nonnull(i8* %a)
+; CHECK-NEXT: ret i8 [[RET]]
+;
+ %ret = call i8 @use1safecall(i8* %a)
+ call void @use1nonnull(i8* %a)
+ ret i8 %ret
+}
+
+; Make sure that an invoke works similarly to a call.
+
+declare i32 @esfp(...)
+
+define i1 @parent8(i8* %a, i8* %bogus1, i8* %b) personality i8* bitcast (i32 (...)* @esfp to i8*){
+; CHECK-LABEL: @parent8(i8* nonnull %a, i8* nocapture readnone %bogus1, i8* nonnull %b)
+; CHECK-NEXT: entry:
+; CHECK-NEXT: invoke void @use2nonnull(i8* %a, i8* %b)
+; CHECK-NEXT: to label %cont unwind label %exc
+; CHECK: cont:
+; CHECK-NEXT: [[NULL_CHECK:%.*]] = icmp eq i8* %b, null
+; CHECK-NEXT: ret i1 [[NULL_CHECK]]
+; CHECK: exc:
+; CHECK-NEXT: [[LP:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: filter [0 x i8*] zeroinitializer
+; CHECK-NEXT: unreachable
+;
+entry:
+ invoke void @use2nonnull(i8* %a, i8* %b)
+ to label %cont unwind label %exc
+
+cont:
+ %null_check = icmp eq i8* %b, null
+ ret i1 %null_check
+
+exc:
+ %lp = landingpad { i8*, i32 }
+ filter [0 x i8*] zeroinitializer
+ unreachable
+}
diff --git a/test/Transforms/FunctionImport/funcimport.ll b/test/Transforms/FunctionImport/funcimport.ll
index 97c18488af64..cc732a3bd98d 100644
--- a/test/Transforms/FunctionImport/funcimport.ll
+++ b/test/Transforms/FunctionImport/funcimport.ll
@@ -4,20 +4,16 @@
; RUN: llvm-lto -thinlto -print-summary-global-ids -o %t3 %t.bc %t2.bc 2>&1 | FileCheck %s --check-prefix=GUID
; Do the import now
-; RUN: opt -disable-force-link-odr -function-import -stats -print-imports -enable-import-metadata -summary-file %t3.thinlto.bc %t.bc -S 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=INSTLIMDEF
+; RUN: opt -function-import -stats -print-imports -enable-import-metadata -summary-file %t3.thinlto.bc %t.bc -S 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=INSTLIMDEF
; Try again with new pass manager
-; RUN: opt -disable-force-link-odr -passes='function-import' -stats -print-imports -enable-import-metadata -summary-file %t3.thinlto.bc %t.bc -S 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=INSTLIMDEF
+; RUN: opt -passes='function-import' -stats -print-imports -enable-import-metadata -summary-file %t3.thinlto.bc %t.bc -S 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=INSTLIMDEF
; "-stats" requires +Asserts.
; REQUIRES: asserts
; Test import with smaller instruction limit
-; RUN: opt -disable-force-link-odr -function-import -enable-import-metadata -summary-file %t3.thinlto.bc %t.bc -import-instr-limit=5 -S | FileCheck %s --check-prefix=CHECK --check-prefix=INSTLIM5
+; RUN: opt -function-import -enable-import-metadata -summary-file %t3.thinlto.bc %t.bc -import-instr-limit=5 -S | FileCheck %s --check-prefix=CHECK --check-prefix=INSTLIM5
; INSTLIM5-NOT: @staticfunc.llvm.
-; Test import with smaller instruction limit and without the -disable-force-link-odr
-; RUN: opt -function-import -summary-file %t3.thinlto.bc %t.bc -import-instr-limit=5 -S | FileCheck %s --check-prefix=INSTLIM5ODR
-; INSTLIM5ODR: define linkonce_odr void @linkonceodr() {
-
define i32 @main() #0 {
entry:
@@ -44,10 +40,12 @@ declare void @weakalias(...) #1
; CHECK-DAG: declare void @analias
declare void @analias(...) #1
-; FIXME: Add this checking back when follow on fix to add alias summary
-; records is committed.
; Aliases import the aliasee function
declare void @linkoncealias(...) #1
+; INSTLIMDEF-DAG: Import linkoncealias
+; INSTLIMDEF-DAG: Import linkoncefunc
+; CHECK-DAG: define linkonce_odr void @linkoncefunc()
+; CHECK-DAG: @linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*
; INSTLIMDEF-DAG: Import referencestatics
; INSTLIMDEF-DAG: define available_externally i32 @referencestatics(i32 %i) !thinlto_src_module !0 {
diff --git a/test/Transforms/FunctionImport/unnamed-globals.ll b/test/Transforms/FunctionImport/unnamed-globals.ll
new file mode 100644
index 000000000000..167fad28f439
--- /dev/null
+++ b/test/Transforms/FunctionImport/unnamed-globals.ll
@@ -0,0 +1,10 @@
+; Make sure we don't crash when referencing an unnamed global.
+; RUN: opt %s -module-summary-analysis -S
+
+@0 = external global [1 x { i64 }]
+
+define internal void @tinkywinky() {
+ call void @patatino(i64 ptrtoint ([1 x { i64 }]* @0 to i64), i64 4)
+ ret void
+}
+declare void @patatino(i64, i64)
diff --git a/test/Transforms/GVN/PRE/rle-addrspace-cast.ll b/test/Transforms/GVN/PRE/rle-addrspace-cast.ll
index 07fd7c11d1b5..d8de5b360ba1 100644
--- a/test/Transforms/GVN/PRE/rle-addrspace-cast.ll
+++ b/test/Transforms/GVN/PRE/rle-addrspace-cast.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -default-data-layout="e-p:32:32:32-p1:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -gvn -S -die | FileCheck %s
+; RUN: opt < %s -data-layout="e-p:32:32:32-p1:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -gvn -S -die | FileCheck %s
define i8 @coerce_offset0_addrspacecast(i32 %V, i32* %P) {
store i32 %V, i32* %P
diff --git a/test/Transforms/GVN/PRE/rle.ll b/test/Transforms/GVN/PRE/rle.ll
index c1946faab20e..1d2cba2f1f64 100644
--- a/test/Transforms/GVN/PRE/rle.ll
+++ b/test/Transforms/GVN/PRE/rle.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -default-data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -gvn -S -die | FileCheck %s
-; RUN: opt < %s -default-data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basicaa -gvn -S -die | FileCheck %s
+; RUN: opt < %s -data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -gvn -S -die | FileCheck %s
+; RUN: opt < %s -data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basicaa -gvn -S -die | FileCheck %s
;; Trivial RLE test.
define i32 @test0(i32 %V, i32* %P) {
diff --git a/test/Transforms/GVN/cond_br2.ll b/test/Transforms/GVN/cond_br2.ll
index baa282ec200c..a3749510cb4a 100644
--- a/test/Transforms/GVN/cond_br2.ll
+++ b/test/Transforms/GVN/cond_br2.ll
@@ -18,7 +18,7 @@ define void @_Z4testv() #0 personality i8* bitcast (i32 (...)* @__gxx_personalit
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
%0 = bitcast %"class.llvm::SmallVector"* %sv to i8*
- call void @llvm.lifetime.start(i64 64, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 64, i8* %0) #1
%BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
%1 = bitcast %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i to i8*
@@ -94,7 +94,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end(i64 64, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %0) #1
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -113,14 +113,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare i32 @__gxx_personality_v0(...)
declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(%"class.llvm::SmallVector"*) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"*, i64, i64) #2
diff --git a/test/Transforms/GVN/debugloc.ll b/test/Transforms/GVN/debugloc.ll
new file mode 100644
index 000000000000..d8c1632d1442
--- /dev/null
+++ b/test/Transforms/GVN/debugloc.ll
@@ -0,0 +1,77 @@
+; RUN: opt < %s -gvn -S | FileCheck %s
+; CHECK: {{^}}for.body:
+; CHECK-NEXT: [[VREG1:%[^ ]+]] = phi{{.*}}[[VREG2:%[^ ]+]],{{.*}}%.sink,
+; CHECK-NOT: !dbg
+; CHECK-SAME: {{$}}
+; CHECK: {{^}}for.inc:
+; CHECK-NEXT: [[VREG2]] = phi{{.*}}%inc,{{.*}}[[VREG1]]
+
+target triple = "x86_64-unknown-linux-gnu"
+
+@g = external local_unnamed_addr global i32, align 4
+
+; Function Attrs: nounwind uwtable
+define void @foo(i32 %x, i32 %y, i32 %z) local_unnamed_addr #0 !dbg !4 {
+entry:
+ %not.tobool = icmp eq i32 %x, 0, !dbg !8
+ %.sink = zext i1 %not.tobool to i32, !dbg !8
+ store i32 %.sink, i32* @g, align 4, !tbaa !9
+ %cmp8 = icmp sgt i32 %y, 0, !dbg !13
+ br i1 %cmp8, label %for.body.preheader, label %for.end, !dbg !17
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !19
+
+for.body: ; preds = %for.body.preheader, %for.inc
+ %i.09 = phi i32 [ %inc4, %for.inc ], [ 0, %for.body.preheader ]
+ %cmp1 = icmp sgt i32 %i.09, %z, !dbg !19
+ br i1 %cmp1, label %if.then2, label %for.inc, !dbg !21
+
+if.then2: ; preds = %for.body
+ %0 = load i32, i32* @g, align 4, !dbg !22, !tbaa !9
+ %inc = add nsw i32 %0, 1, !dbg !22
+ store i32 %inc, i32* @g, align 4, !dbg !22, !tbaa !9
+ br label %for.inc, !dbg !23
+
+for.inc: ; preds = %for.body, %if.then2
+ %inc4 = add nuw nsw i32 %i.09, 1, !dbg !24
+ %exitcond = icmp ne i32 %inc4, %y, !dbg !13
+ br i1 %exitcond, label %for.body, label %for.end.loopexit, !dbg !17
+
+for.end.loopexit: ; preds = %for.inc
+ br label %for.end, !dbg !26
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void, !dbg !26
+}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!5 = !DISubroutineType(types: !6)
+!6 = !{null, !7, !7, !7}
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !DILocation(line: 4, column: 7, scope: !4)
+!9 = !{!10, !10, i64 0}
+!10 = !{!"int", !11, i64 0}
+!11 = !{!"omnipotent char", !12, i64 0}
+!12 = !{!"Simple C/C++ TBAA"}
+!13 = !DILocation(line: 10, column: 13, scope: !14)
+!14 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 1)
+!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 10, column: 3)
+!16 = distinct !DILexicalBlock(scope: !4, file: !1, line: 10, column: 3)
+!17 = !DILocation(line: 10, column: 3, scope: !18)
+!18 = !DILexicalBlockFile(scope: !16, file: !1, discriminator: 1)
+!19 = !DILocation(line: 11, column: 11, scope: !20)
+!20 = distinct !DILexicalBlock(scope: !15, file: !1, line: 11, column: 9)
+!21 = !DILocation(line: 11, column: 9, scope: !15)
+!22 = !DILocation(line: 12, column: 8, scope: !20)
+!23 = !DILocation(line: 12, column: 7, scope: !20)
+!24 = !DILocation(line: 10, column: 20, scope: !25)
+!25 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 2)
+!26 = !DILocation(line: 13, column: 1, scope: !4)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
diff --git a/test/Transforms/GVN/fence.ll b/test/Transforms/GVN/fence.ll
index f68861dad1ac..a2d98e13b380 100644
--- a/test/Transforms/GVN/fence.ll
+++ b/test/Transforms/GVN/fence.ll
@@ -1,5 +1,6 @@
; RUN: opt -S -basicaa -gvn < %s | FileCheck %s
+@a = external constant i32
; We can value forward across the fence since we can (semantically)
; reorder the following load before the fence.
define i32 @test(i32* %addr.i) {
@@ -52,6 +53,25 @@ define i32 @test3(i32* noalias %addr.i, i32* noalias %otheraddr) {
ret i32 %res
}
+; We can forward the value forward the load
+; across both the fences, because the load is from
+; a constant memory location.
+define i32 @test4(i32* %addr) {
+; CHECK-LABEL: @test4
+; CHECK-NOT: load
+; CHECK: fence release
+; CHECK: store
+; CHECK: fence seq_cst
+; CHECK: ret i32 0
+ %var = load i32, i32* @a
+ fence release
+ store i32 42, i32* %addr, align 8
+ fence seq_cst
+ %var2 = load i32, i32* @a
+ %var3 = sub i32 %var, %var2
+ ret i32 %var3
+}
+
; Another example of why forwarding across an acquire fence is problematic
; can be seen in a normal locking operation. Say we had:
; *p = 5; unlock(l); lock(l); use(p);
diff --git a/test/Transforms/GVN/invariant.group.ll b/test/Transforms/GVN/invariant.group.ll
index 6f1f357cad65..570519bec520 100644
--- a/test/Transforms/GVN/invariant.group.ll
+++ b/test/Transforms/GVN/invariant.group.ll
@@ -382,12 +382,12 @@ define void @testNotGlobal() {
%b0 = bitcast i8* %a to i1*
call void @fooBit(i1* %b0, i1 1)
-; CHECK: %trunc = trunc i8 %b to i1
+; CHECK: %1 = trunc i8 %b to i1
%2 = load i1, i1* %b0, !invariant.group !0
-; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %trunc)
+; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %1)
call void @fooBit(i1* %b0, i1 %2)
%3 = load i1, i1* %b0, !invariant.group !0
-; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %trunc)
+; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %1)
call void @fooBit(i1* %b0, i1 %3)
ret void
}
diff --git a/test/Transforms/GVN/lifetime-simple.ll b/test/Transforms/GVN/lifetime-simple.ll
index d03b62c8158a..8da3e4cbd30f 100644
--- a/test/Transforms/GVN/lifetime-simple.ll
+++ b/test/Transforms/GVN/lifetime-simple.ll
@@ -8,13 +8,13 @@ define i8 @test(i8* %P) nounwind {
; CHECK-NOT: load
; CHECK: lifetime.end
entry:
- call void @llvm.lifetime.start(i64 32, i8* %P)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
%0 = load i8, i8* %P
store i8 1, i8* %P
- call void @llvm.lifetime.end(i64 32, i8* %P)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
%1 = load i8, i8* %P
ret i8 %1
}
-declare void @llvm.lifetime.start(i64 %S, i8* nocapture %P) readonly
-declare void @llvm.lifetime.end(i64 %S, i8* nocapture %P)
+declare void @llvm.lifetime.start.p0i8(i64 %S, i8* nocapture %P) readonly
+declare void @llvm.lifetime.end.p0i8(i64 %S, i8* nocapture %P)
diff --git a/test/Transforms/GVNHoist/hoist-inline.ll b/test/Transforms/GVNHoist/hoist-inline.ll
new file mode 100644
index 000000000000..7d761486ab15
--- /dev/null
+++ b/test/Transforms/GVNHoist/hoist-inline.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -O2 < %s | FileCheck %s
+
+; Check that the inlined loads are hoisted.
+; CHECK-LABEL: define i32 @fun(
+; CHECK-LABEL: entry:
+; CHECK: load i32, i32* @A
+; CHECK: if.then:
+
+@A = external global i32
+@B = external global i32
+@C = external global i32
+
+define i32 @loadA() {
+ %a = load i32, i32* @A
+ ret i32 %a
+}
+
+define i32 @fun(i1 %c) {
+entry:
+ br i1 %c, label %if.then, label %if.else
+
+if.then:
+ store i32 1, i32* @B
+ %call1 = call i32 @loadA()
+ store i32 2, i32* @C
+ br label %if.endif
+
+if.else:
+ store i32 2, i32* @C
+ %call2 = call i32 @loadA()
+ store i32 1, i32* @B
+ br label %if.endif
+
+if.endif:
+ %ret = phi i32 [ %call1, %if.then ], [ %call2, %if.else ]
+ ret i32 %ret
+}
+
diff --git a/test/Transforms/GVNHoist/hoist-pr31891.ll b/test/Transforms/GVNHoist/hoist-pr31891.ll
new file mode 100644
index 000000000000..3f6a22fc54a6
--- /dev/null
+++ b/test/Transforms/GVNHoist/hoist-pr31891.ll
@@ -0,0 +1,83 @@
+; RUN: opt -S -gvn-hoist < %s | FileCheck %s
+
+; Hoisted inlinable calls need to have accurate scope information, but we're
+; allowed to erase the line information.
+
+source_filename = "t.c"
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.0.24215"
+
+; Function Attrs: noinline nounwind readnone uwtable
+define float @fabsf(float %f) #0 !dbg !7 {
+entry:
+ %conv = fpext float %f to double, !dbg !9
+ %call = call double @fabs(double %conv) #1, !dbg !10
+ %conv1 = fptrunc double %call to float, !dbg !11
+ ret float %conv1, !dbg !12
+}
+
+; Function Attrs: nounwind readnone
+declare double @fabs(double) #1
+
+; Function Attrs: noinline nounwind uwtable
+define void @hoistit(i32 %cond, float %f) #2 !dbg !13 {
+entry:
+ %tobool = icmp ne i32 %cond, 0, !dbg !14
+ br i1 %tobool, label %if.then, label %if.else, !dbg !14
+
+if.then: ; preds = %entry
+ %call = call float @fabsf(float %f) #1, !dbg !15
+ call void @useit1(float %call), !dbg !16
+ br label %if.end, !dbg !18
+
+if.else: ; preds = %entry
+ %call1 = call float @fabsf(float %f) #1, !dbg !19
+ call void @useit2(float %call1), !dbg !20
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void, !dbg !21
+}
+
+; CHECK-LABEL: define void @hoistit
+; CHECK-SAME: !dbg ![[sp_hoistit:[0-9]+]]
+; CHECK: call float @fabsf(float %f) {{.*}} !dbg ![[dbgloc:[0-9]+]]
+; CHECK: br i1 %tobool, label %if.then, label %if.else
+
+; CHECK: ![[sp_hoistit]] = distinct !DISubprogram(name: "hoistit", {{.*}})
+; CHECK: ![[dbgloc]] = !DILocation({{.*}}, scope: ![[sp_hoistit]])
+
+declare void @useit1(float)
+
+declare void @useit2(float)
+
+attributes #0 = { noinline nounwind readnone uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noinline nounwind uwtable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 ", isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "t.c", directory: "C:\5Csrc\5Cllvm\5Cbuild")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 5.0.0 "}
+!7 = distinct !DISubprogram(name: "fabsf", scope: !1, file: !1, line: 4, type: !8, isLocal: false, isDefinition: true, scopeLine: 4, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !2)
+!9 = !DILocation(line: 5, column: 22, scope: !7)
+!10 = !DILocation(line: 5, column: 17, scope: !7)
+!11 = !DILocation(line: 5, column: 10, scope: !7)
+!12 = !DILocation(line: 5, column: 3, scope: !7)
+!13 = distinct !DISubprogram(name: "hoistit", scope: !1, file: !1, line: 7, type: !8, isLocal: false, isDefinition: true, scopeLine: 7, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!14 = !DILocation(line: 8, column: 7, scope: !13)
+!15 = !DILocation(line: 9, column: 12, scope: !13)
+!16 = !DILocation(line: 9, column: 5, scope: !17)
+!17 = !DILexicalBlockFile(scope: !13, file: !1, discriminator: 1)
+!18 = !DILocation(line: 10, column: 3, scope: !13)
+!19 = !DILocation(line: 11, column: 12, scope: !13)
+!20 = !DILocation(line: 11, column: 5, scope: !17)
+!21 = !DILocation(line: 13, column: 1, scope: !13)
diff --git a/test/Transforms/GVNHoist/hoist-very-busy.ll b/test/Transforms/GVNHoist/hoist-very-busy.ll
new file mode 100644
index 000000000000..f421eff9921a
--- /dev/null
+++ b/test/Transforms/GVNHoist/hoist-very-busy.ll
@@ -0,0 +1,55 @@
+; RUN: opt -S -gvn-hoist < %s | FileCheck %s
+
+%struct.__jmp_buf_tag = type { [8 x i64], i32 }
+
+; Check that hoisting only happens when the expression is very busy.
+; CHECK: store
+; CHECK: store
+
+@test_exit_buf = global %struct.__jmp_buf_tag zeroinitializer
+@G = global i32 0
+
+define void @test_command(i32 %c1) {
+entry:
+ switch i32 %c1, label %exit [
+ i32 0, label %sw0
+ i32 1, label %sw1
+ ]
+
+sw0:
+ store i32 1, i32* @G
+ br label %exit
+
+sw1:
+ store i32 1, i32* @G
+ br label %exit
+
+exit:
+ call void @longjmp(%struct.__jmp_buf_tag* @test_exit_buf, i32 1) #0
+ unreachable
+}
+
+declare void @longjmp(%struct.__jmp_buf_tag*, i32) #0
+
+attributes #0 = { noreturn nounwind }
+
+; Check that the store is hoisted.
+; CHECK-LABEL: define void @fun(
+; CHECK: store
+; CHECK-NOT: store
+
+define void @fun() {
+entry:
+ br label %if.then
+
+if.then: ; preds = %entry
+ br i1 undef, label %sw0, label %sw1
+
+sw0:
+ store i32 1, i32* @G
+ unreachable
+
+sw1:
+ store i32 1, i32* @G
+ ret void
+}
diff --git a/test/Transforms/InstMerge/ld_hoist1.ll b/test/Transforms/GVNHoist/ld_hoist1.ll
index 74c8900b8ab1..8d4698d87e6f 100644
--- a/test/Transforms/InstMerge/ld_hoist1.ll
+++ b/test/Transforms/GVNHoist/ld_hoist1.ll
@@ -1,5 +1,5 @@
; Test load hoist
-; RUN: opt -basicaa -memdep -mldst-motion -S < %s | FileCheck %s
+; RUN: opt -gvn-hoist -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc_linux"
diff --git a/test/Transforms/InstMerge/ld_hoist_st_sink.ll b/test/Transforms/GVNHoist/ld_hoist_st_sink.ll
index 1d3f941882e5..c85edc2d8170 100644
--- a/test/Transforms/InstMerge/ld_hoist_st_sink.ll
+++ b/test/Transforms/GVNHoist/ld_hoist_st_sink.ll
@@ -1,6 +1,6 @@
; Tests to make sure that loads and stores in a diamond get merged
; Loads are hoisted into the header. Stores sunks into the footer.
-; RUN: opt -basicaa -memdep -mldst-motion -S < %s | FileCheck %s
+; RUN: opt -gvn-hoist -S < %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
%struct.node = type { i64, %struct.node*, %struct.node*, %struct.node*, i64, %struct.arc*, i64, i64, i64 }
@@ -41,7 +41,7 @@ if.then: ; preds = %while.body
%4 = load i64, i64* %p, align 8
%add = add nsw i64 %4, %2
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
-; CHECK-NOT: store i64
+; FIXME: store i64
store i64 %add, i64* %p1, align 8
br label %if.end
@@ -61,13 +61,13 @@ if.else: ; preds = %while.body
%8 = load i64, i64* %cost5, align 8
%sub = sub nsw i64 %6, %8
%p6 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
-; CHECK-NOT: store i64
+; FIXME: store i64
store i64 %sub, i64* %p6, align 8
br label %if.end
; CHECK: if.end
if.end: ; preds = %if.else, %if.then
-; CHECK: store
+; FIXME: store
%inc = add nsw i64 %sum.019, 1
%node.0.in = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 2
%node.0 = load %struct.node*, %struct.node** %node.0.in, align 8
diff --git a/test/Transforms/GVNHoist/pr29034.ll b/test/Transforms/GVNHoist/pr29034.ll
index 5e725ad38c86..c0fcc3e741a8 100644
--- a/test/Transforms/GVNHoist/pr29034.ll
+++ b/test/Transforms/GVNHoist/pr29034.ll
@@ -38,7 +38,7 @@ define void @music_task(i8* nocapture readnone %p) local_unnamed_addr {
entry:
%mapi = alloca %struct._MUSIC_OP_API_*, align 8
%0 = bitcast %struct._MUSIC_OP_API_** %mapi to i8*
- call void @llvm.lifetime.start(i64 8, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %0)
store %struct._MUSIC_OP_API_* null, %struct._MUSIC_OP_API_** %mapi, align 8, !tbaa !1
%call = call i32 @music_decoder_init(%struct._MUSIC_OP_API_** nonnull %mapi)
br label %while.cond
@@ -103,7 +103,7 @@ while.cond2.backedge: ; preds = %sw.default, %sw.bb1
br label %while.cond2
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare i32 @music_decoder_init(%struct._MUSIC_OP_API_**)
declare i32 @music_play_api(%struct._MUSIC_OP_API_*, i32, i32, i32, i8*)
declare i32 @printf(i8* nocapture readonly, ...)
diff --git a/test/Transforms/JumpThreading/crash-assertingvh.ll b/test/Transforms/GlobalDCE/crash-assertingvh.ll
index e78431992239..2919999d5e28 100644
--- a/test/Transforms/JumpThreading/crash-assertingvh.ll
+++ b/test/Transforms/GlobalDCE/crash-assertingvh.ll
@@ -1,4 +1,9 @@
+; Make sure that if a pass like jump threading populates a function analysis
+; like LVI with asserting handles into the body of a function, those don't begin
+; to assert when global DCE deletes the body of the function.
+;
; RUN: opt -disable-output < %s -passes='module(function(jump-threading),globaldce)'
+; RUN: opt -disable-output < %s -passes='module(rpo-functionattrs,globaldce)'
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/GlobalOpt/2009-03-05-dbg.ll b/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
index c785e13403cc..da82b01560b3 100644
--- a/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
+++ b/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
@@ -77,4 +77,4 @@ attributes #1 = { nounwind readnone }
!6 = !{i32 2, !"Dwarf Version", i32 2}
!7 = !{i32 2, !"Debug Info Version", i32 3}
!8 = !DILocalVariable(name: "i", arg: 1, scope: !9, file: !3, line: 4, type: !5)
-!9 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !2, line: 4, type: !10, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !2)!10 = !DISubroutineType(types: !11)!11 = !{!5, !5}!12 = !DIExpression()!13 = !DILocation(line: 5, scope: !14)!14 = distinct !DILexicalBlock(scope: !9, file: !3)!15 = !DILocation(line: 6, scope: !14)!16 = !DILocation(line: 7, scope: !14)!17 = !DILocation(line: 9, scope: !14)!18 = !DILocation(line: 11, scope: !14)!19 = !DILocation(line: 14, scope: !20)!20 = distinct !DILexicalBlock(scope: !21, file: !3)!21 = distinct !DISubprogram(name: "bar", linkageName: "bar", scope: !2, line: 13, type: !22, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !2)!22 = !DISubroutineType(types: !23)!23 = !{!5}!24 = !DILocation(line: 15, scope: !20)!25 = !DILocation(line: 16, scope: !20) \ No newline at end of file
+!9 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !2, file: !3, line: 4, type: !10, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !2)!10 = !DISubroutineType(types: !11)!11 = !{!5, !5}!12 = !DIExpression()!13 = !DILocation(line: 5, scope: !14)!14 = distinct !DILexicalBlock(scope: !9, file: !3)!15 = !DILocation(line: 6, scope: !14)!16 = !DILocation(line: 7, scope: !14)!17 = !DILocation(line: 9, scope: !14)!18 = !DILocation(line: 11, scope: !14)!19 = !DILocation(line: 14, scope: !20)!20 = distinct !DILexicalBlock(scope: !21, file: !3)!21 = distinct !DISubprogram(name: "bar", linkageName: "bar", scope: !2, file: !3, line: 13, type: !22, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: false, unit: !2)!22 = !DISubroutineType(types: !23)!23 = !{!5}!24 = !DILocation(line: 15, scope: !20)!25 = !DILocation(line: 16, scope: !20)
diff --git a/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll b/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
index b446d24f1fd2..2434f20e92b2 100644
--- a/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
+++ b/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll
@@ -5,11 +5,11 @@
; store to @a[0] from being constant propagated to the load in @foo, but will not
; prevent @a[1] from being removed since it is dead.
; CHECK: @a.0 = internal unnamed_addr externally_initialized global i32 undef
-; CHECK-NOT @a.1
+; CHECK-NOT: @a.1
@a = internal externally_initialized global [2 x i32] undef, align 4
; This is the same, but a struct rather than an array.
; CHECK: @b.0 = internal unnamed_addr externally_initialized global i32 undef
-; CHECK-NOT @b.1
+; CHECK-NOT: @b.1
@b = internal externally_initialized global {i32, i32} undef, align 4
define i32 @foo() {
diff --git a/test/Transforms/GlobalSplit/basic.ll b/test/Transforms/GlobalSplit/basic.ll
index a0aaeffb6c3f..6834a8d18be9 100644
--- a/test/Transforms/GlobalSplit/basic.ll
+++ b/test/Transforms/GlobalSplit/basic.ll
@@ -12,13 +12,13 @@ target triple = "x86_64-unknown-linux-gnu"
]
; CHECK-NOT: @global =
-; CHECK: @global.0 = private constant [2 x i8* ()*] [i8* ()* @f1, i8* ()* @f2], !type [[T1:![0-9]+$]]
-; CHECK: @global.1 = private constant [1 x i8* ()*] [i8* ()* @f3], !type [[T2:![0-9]+$]]
+; CHECK: @global.0 = private constant [2 x i8* ()*] [i8* ()* @f1, i8* ()* @f2], !type [[T1:![0-9]+]], !type [[T2:![0-9]+]], !type [[T3:![0-9]+$]]
+; CHECK: @global.1 = private constant [1 x i8* ()*] [i8* ()* @f3], !type [[T4:![0-9]+]], !type [[T5:![0-9]+$]]
; CHECK-NOT: @global =
@global = internal constant { [2 x i8* ()*], [1 x i8* ()*] } {
[2 x i8* ()*] [i8* ()* @f1, i8* ()* @f2],
[1 x i8* ()*] [i8* ()* @f3]
-}, !type !0, !type !1
+}, !type !0, !type !1, !type !2, !type !3, !type !4
; CHECK: define i8* @f1()
define i8* @f1() {
@@ -51,7 +51,13 @@ define void @foo() {
declare i1 @llvm.type.test(i8*, metadata) nounwind readnone
-; CHECK: [[T1]] = !{i32 8, !"foo"}
-; CHECK: [[T2]] = !{i32 0, !"bar"}
-!0 = !{i32 8, !"foo"}
-!1 = !{i32 16, !"bar"}
+; CHECK: [[T1]] = !{i32 0, !"foo"}
+; CHECK: [[T2]] = !{i32 15, !"bar"}
+; CHECK: [[T3]] = !{i32 16, !"a"}
+; CHECK: [[T4]] = !{i32 1, !"b"}
+; CHECK: [[T5]] = !{i32 8, !"c"}
+!0 = !{i32 0, !"foo"}
+!1 = !{i32 15, !"bar"}
+!2 = !{i32 16, !"a"}
+!3 = !{i32 17, !"b"}
+!4 = !{i32 24, !"c"}
diff --git a/test/Transforms/IPConstantProp/naked-return.ll b/test/Transforms/IPConstantProp/naked-return.ll
index f52417fcf7ea..3a2dedafcd37 100644
--- a/test/Transforms/IPConstantProp/naked-return.ll
+++ b/test/Transforms/IPConstantProp/naked-return.ll
@@ -1,4 +1,5 @@
; RUN: opt -ipsccp -S %s | FileCheck %s
+; RUN: opt -ipconstprop -S %s | FileCheck %s
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i686-pc-windows-msvc19.0.24215"
@@ -24,5 +25,5 @@ bb:
ret void
}
-attributes #0 = { naked noinline optnone }
+attributes #0 = { naked }
attributes #1 = { "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" }
diff --git a/test/Transforms/IRCE/bad-loop-structure.ll b/test/Transforms/IRCE/bad-loop-structure.ll
new file mode 100644
index 000000000000..9c2e4251423d
--- /dev/null
+++ b/test/Transforms/IRCE/bad-loop-structure.ll
@@ -0,0 +1,45 @@
+; RUN: opt -S -irce -irce-print-changed-loops=true < %s | FileCheck %s
+
+; CHECK-NOT: irce
+
+define void @bad_loop_structure_increasing(i64 %iv.start) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %iv.start, %entry ], [ %indvars.iv.next, %for.inc ]
+ %cmp = icmp ult i64 %indvars.iv, 100
+ br i1 %cmp, label %switch.lookup, label %for.inc
+
+switch.lookup:
+ br label %for.inc
+
+for.inc:
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %cmp55 = icmp slt i64 %indvars.iv.next, 11
+ br i1 %cmp55, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+define void @bad_loop_structure_decreasing(i64 %iv.start) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %iv.start, %entry ], [ %indvars.iv.next, %for.inc ]
+ %cmp = icmp ult i64 %indvars.iv, 100
+ br i1 %cmp, label %switch.lookup, label %for.inc
+
+switch.lookup:
+ br label %for.inc
+
+for.inc:
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, -1
+ %cmp55 = icmp sgt i64 %indvars.iv.next, 11
+ br i1 %cmp55, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll b/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
index 402ae8cc05d0..b9d571d9b64f 100644
--- a/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
+++ b/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -indvars -S "-default-data-layout=e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" | FileCheck %s
-; RUN: opt < %s -indvars -S "-default-data-layout=e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" | FileCheck %s
+; RUN: opt < %s -indvars -S "-data-layout=e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" | FileCheck %s
+; RUN: opt < %s -indvars -S "-data-layout=e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" | FileCheck %s
;
; PR11279: Assertion !IVLimit->getType()->isPointerTy()
;
diff --git a/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll b/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
index aa4fb8e68eb3..36c7bd9c5ec3 100644
--- a/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
+++ b/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
@@ -14,7 +14,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK-LABEL: @indvar_32_bit(
; CHECK-NOT: sext i32
; CHECK: phi i32
-define void @indvar_32_bit(i32 %n, i32* nocapture %output) {
+define amdgpu_kernel void @indvar_32_bit(i32 %n, i32* nocapture %output) {
entry:
%cmp5 = icmp sgt i32 %n, 0
br i1 %cmp5, label %for.body.preheader, label %for.end
@@ -46,7 +46,7 @@ for.end: ; preds = %for.end.loopexit, %
; CHECK-NOT: ashr i64
; CHECK-NOT: mul nsw i64
; CHECK-NOT: add nsw i64
-define void @no_promote_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define amdgpu_kernel void @no_promote_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
br label %for.body
@@ -72,7 +72,7 @@ for.end:
; be legalized anyway.
; CHECK-LABEL: @indvar_48_bit(
-define void @indvar_48_bit(i48 %n, i48* nocapture %output) {
+define amdgpu_kernel void @indvar_48_bit(i48 %n, i48* nocapture %output) {
entry:
%cmp5 = icmp sgt i48 %n, 0
br i1 %cmp5, label %for.body.preheader, label %for.end
diff --git a/test/Transforms/IndVarSimplify/exit_value_test2.ll b/test/Transforms/IndVarSimplify/exit_value_test2.ll
index 24e3e95a8918..ee641667506c 100644
--- a/test/Transforms/IndVarSimplify/exit_value_test2.ll
+++ b/test/Transforms/IndVarSimplify/exit_value_test2.ll
@@ -8,14 +8,14 @@
; CHECK-NOT: udiv
declare void @_Z3mixRjj(i32* dereferenceable(4), i32)
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define i32 @_Z3fooPKcjj(i8* nocapture readonly %s, i32 %len, i32 %c) {
entry:
%a = alloca i32, align 4
%tmp = bitcast i32* %a to i8*
- call void @llvm.lifetime.start(i64 4, i8* %tmp)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %tmp)
store i32 -1640531527, i32* %a, align 4
%cmp8 = icmp ugt i32 %len, 11
br i1 %cmp8, label %while.body.lr.ph, label %while.end
@@ -47,6 +47,6 @@ while.end: ; preds = %while.cond.while.en
%keylen.0.lcssa = phi i32 [ %sub.lcssa, %while.cond.while.end_crit_edge ], [ %len, %entry ]
call void @_Z3mixRjj(i32* dereferenceable(4) %a, i32 %keylen.0.lcssa)
%tmp4 = load i32, i32* %a, align 4
- call void @llvm.lifetime.end(i64 4, i8* %tmp)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %tmp)
ret i32 %tmp4
}
diff --git a/test/Transforms/IndVarSimplify/pr32045.ll b/test/Transforms/IndVarSimplify/pr32045.ll
new file mode 100644
index 000000000000..31efac3f833c
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/pr32045.ll
@@ -0,0 +1,39 @@
+; RUN: opt -S -indvars < %s | FileCheck %s
+
+; This is not an IndVarSimplify bug, but the original symptom
+; manifested as one.
+
+define i32 @foo(i32 %a, i32 %b, i32 %c, i32* %sink) {
+; CHECK-LABEL: @foo(
+; CHECK: for.end:
+; CHECK-NEXT: [[SHR:%.*]] = ashr i32 %neg3, -1
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 0, [[SHR]]
+; CHECK-NEXT: [[SHR1:%.*]] = ashr i32 [[SUB]], [[B:%.*]]
+; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[SHR1]], -1
+; CHECK-NEXT: store i32 [[NEG]], i32* %sink
+;
+entry:
+ %tobool2 = icmp eq i32 %a, 0
+ br i1 %tobool2, label %exit, label %preheader
+
+preheader:
+ %neg3 = phi i32 [ %c, %entry ], [ %neg, %for.end ]
+ br label %for
+
+for:
+ %p = phi i32 [ %dec, %for ], [ 1, %preheader ]
+ %cmp = icmp sgt i32 %p, -1
+ %dec = add nsw i32 %p, -1
+ br i1 %cmp, label %for, label %for.end
+
+for.end:
+ %shr = ashr i32 %neg3, %p
+ %sub = sub nsw i32 0, %shr
+ %shr1 = ashr i32 %sub, %b
+ %neg = xor i32 %shr1, -1
+ store i32 %neg, i32* %sink
+ br i1 false, label %exit, label %preheader
+
+exit:
+ ret i32 0
+}
diff --git a/test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll b/test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll
new file mode 100644
index 000000000000..af25b20bec37
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll
@@ -0,0 +1,130 @@
+; RUN: opt < %s -indvars -S | FileCheck %s
+
+define void @test0(i32* %a) {
+; CHECK-LABEL: @test0(
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %div = sdiv i32 %i.01, 2
+; CHECK-NOT: sdiv
+; CHECK: udiv
+ %idxprom = sext i32 %div to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+ store i32 %i.01, i32* %arrayidx, align 4
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @test1(i32* %a) {
+; CHECK-LABEL: @test1(
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %div = sdiv exact i32 %i.01, 2
+; CHECK-NOT: sdiv
+; CHECK: udiv exact
+ %idxprom = sext i32 %div to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+ store i32 %i.01, i32* %arrayidx, align 4
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @test2(i32* %a, i32 %d) {
+; CHECK-LABEL: @test2(
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %mul = mul nsw i32 %i.01, 64
+ %div = sdiv i32 %mul, %d
+; CHECK-NOT: udiv
+ %idxprom = sext i32 %div to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+ store i32 %i.01, i32* %arrayidx, align 4
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @test3(i32* %a) {
+; CHECK-LABEL: @test3(
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %div = sdiv i32 2048, %i.01
+; CHECK: udiv
+; CHECK-NOT: sdiv
+ %idxprom = sext i32 %div to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+ store i32 %i.01, i32* %arrayidx, align 4
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @test4(i32* %a) {
+; CHECK-LABEL: @test4(
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %mul = mul nsw i32 %i.01, 64
+ %div = sdiv i32 %mul, 8
+; CHECK: udiv
+; CHECK-NOT: sdiv
+ %idxprom = sext i32 %div to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+ store i32 %i.01, i32* %arrayidx, align 4
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @test5(i32* %a) {
+; CHECK-LABEL: @test5(
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %mul = mul nsw i32 %i.01, 64
+ %div = sdiv i32 %mul, 6
+; CHECK: udiv
+; CHECK-NOT: sdiv
+ %idxprom = sext i32 %div to i64
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+ store i32 %i.01, i32* %arrayidx, align 4
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 64
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll b/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
new file mode 100644
index 000000000000..b566c147e9b8
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
@@ -0,0 +1,173 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; Trivial optimization of generic addressing
+
+; CHECK-LABEL: @load_global_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+ %tmp1 = load float, float addrspace(1)* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @load_constant_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
+; CHECK-NEXT: %tmp1 = load float, float addrspace(2)* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_constant_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
+ %tmp1 = load float, float addrspace(2)* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @load_group_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+; CHECK-NEXT: %tmp1 = load float, float addrspace(3)* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+ %tmp1 = load float, float addrspace(3)* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @load_private_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+; CHECK-NEXT: %tmp1 = load float, float* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+ %tmp1 = load float, float* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @store_global_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0
+define amdgpu_kernel void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+ store float 0.0, float addrspace(1)* %tmp0
+ ret void
+}
+
+; CHECK-LABEL: @store_group_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0
+define amdgpu_kernel void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+ store float 0.0, float addrspace(3)* %tmp0
+ ret void
+}
+
+; CHECK-LABEL: @store_private_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+; CHECK-NEXT: store float 0.000000e+00, float* %tmp0
+define amdgpu_kernel void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+ store float 0.0, float* %tmp0
+ ret void
+}
+
+; optimized to global load/store.
+; CHECK-LABEL: @load_store_global(
+; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4
+; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; Optimized to group load/store.
+; CHECK-LABEL: @load_store_group(
+; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4
+; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; Optimized to private load/store.
+; CHECK-LABEL: @load_store_private(
+; CHECK-NEXT: %val = load i32, i32* %input, align 4
+; CHECK-NEXT: store i32 %val, i32* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; No optimization. flat load/store.
+; CHECK-LABEL: @load_store_flat(
+; CHECK-NEXT: %val = load i32, i32 addrspace(4)* %input, align 4
+; CHECK-NEXT: store i32 %val, i32 addrspace(4)* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 {
+ %val = load i32, i32 addrspace(4)* %input, align 4
+ store i32 %val, i32 addrspace(4)* %output, align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_addrspacecast_ptr_value(
+; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+; CHECK-NEXT: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
+define amdgpu_kernel void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+ store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
+ ret void
+}
+
+; CHECK-LABEL: @atomicrmw_add_global_to_flat(
+; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(1)* %global.ptr, i32 %y seq_cst
+define i32 @atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @atomicrmw_add_group_to_flat(
+; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(3)* %group.ptr, i32 %y seq_cst
+define i32 @atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @cmpxchg_global_to_flat(
+; CHECK: %ret = cmpxchg i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val seq_cst monotonic
+define { i32, i1 } @cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
+ ret { i32, i1 } %ret
+}
+
+; CHECK-LABEL: @cmpxchg_group_to_flat(
+; CHECK: %ret = cmpxchg i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val seq_cst monotonic
+define { i32, i1 } @cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
+ ret { i32, i1 } %ret
+}
+
+; Not pointer operand
+; CHECK-LABEL: @cmpxchg_group_to_flat_wrong_operand(
+; CHECK: %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
+; CHECK: %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
+define { i32 addrspace(4)*, i1 } @cmpxchg_group_to_flat_wrong_operand(i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(3)* %cmp.ptr, i32 addrspace(4)* %val) #0 {
+ %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
+ %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
+ ret { i32 addrspace(4)*, i1 } %ret
+}
+
+attributes #0 = { nounwind }
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll b/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
new file mode 100644
index 000000000000..b185ede26579
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
@@ -0,0 +1,160 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; CHECK-LABEL: @icmp_flat_cmp_self(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, %group.ptr.0
+define i1 @icmp_flat_cmp_self(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, %cast0
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_flat_flat_from_group(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, %group.ptr.1
+define i1 @icmp_flat_flat_from_group(i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, %cast1
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_mismatch_flat_from_group_private(
+; CHECK: %1 = addrspacecast i32* %private.ptr.0 to i32 addrspace(4)*
+; CHECK: %2 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, %2
+define i1 @icmp_mismatch_flat_from_group_private(i32* %private.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
+ %cast0 = addrspacecast i32* %private.ptr.0 to i32 addrspace(4)*
+ %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, %cast1
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_flat_group_flat(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, %flat.ptr.1
+define i1 @icmp_flat_group_flat(i32 addrspace(3)* %group.ptr.0, i32 addrspace(4)* %flat.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, %flat.ptr.1
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_flat_flat_group(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+; CHECK: %cmp = icmp eq i32 addrspace(4)* %flat.ptr.0, %1
+define i1 @icmp_flat_flat_group(i32 addrspace(4)* %flat.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
+ %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %flat.ptr.0, %cast1
+ ret i1 %cmp
+}
+
+; Keeping as cmp addrspace(3)* is better
+; CHECK-LABEL: @icmp_flat_to_group_cmp(
+; CHECK: %cast0 = addrspacecast i32 addrspace(4)* %flat.ptr.0 to i32 addrspace(3)*
+; CHECK: %cast1 = addrspacecast i32 addrspace(4)* %flat.ptr.1 to i32 addrspace(3)*
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %cast0, %cast1
+define i1 @icmp_flat_to_group_cmp(i32 addrspace(4)* %flat.ptr.0, i32 addrspace(4)* %flat.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(4)* %flat.ptr.0 to i32 addrspace(3)*
+ %cast1 = addrspacecast i32 addrspace(4)* %flat.ptr.1 to i32 addrspace(3)*
+ %cmp = icmp eq i32 addrspace(3)* %cast0, %cast1
+ ret i1 %cmp
+}
+
+; FIXME: Should be able to ask target about how to constant fold the
+; constant cast if this is OK to change if 0 is a valid pointer.
+
+; CHECK-LABEL: @icmp_group_flat_cmp_null(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
+define i1 @icmp_group_flat_cmp_null(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, null
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_group_flat_cmp_constant_inttoptr(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, addrspacecast (i32 addrspace(4)* inttoptr (i64 400 to i32 addrspace(4)*) to i32 addrspace(3)*)
+define i1 @icmp_group_flat_cmp_constant_inttoptr(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, inttoptr (i64 400 to i32 addrspace(4)*)
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, addrspacecast (i32* null to i32 addrspace(4)*)
+define i1 @icmp_mismatch_flat_group_private_cmp_null(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, addrspacecast (i32* null to i32 addrspace(4)*)
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_undef(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, undef
+define i1 @icmp_mismatch_flat_group_private_cmp_undef(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, addrspacecast (i32* undef to i32 addrspace(4)*)
+ ret i1 %cmp
+}
+
+@lds0 = internal addrspace(3) global i32 0, align 4
+@global0 = internal addrspace(1) global i32 0, align 4
+
+; CHECK-LABEL: @icmp_mismatch_flat_group_global_cmp_gv(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %cmp = icmp eq i32 addrspace(4)* %1, addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
+define i1 @icmp_mismatch_flat_group_global_cmp_gv(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_mismatch_group_global_cmp_gv_gv(
+; CHECK: %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
+define i1 @icmp_mismatch_group_global_cmp_gv_gv(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_group_flat_cmp_undef(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, undef
+define i1 @icmp_group_flat_cmp_undef(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* %cast0, undef
+ ret i1 %cmp
+}
+
+; Test non-canonical orders
+; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null_swap(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32* null to i32 addrspace(4)*), %1
+define i1 @icmp_mismatch_flat_group_private_cmp_null_swap(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32* null to i32 addrspace(4)*), %cast0
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_group_flat_cmp_undef_swap(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* undef, %group.ptr.0
+define i1 @icmp_group_flat_cmp_undef_swap(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* undef, %cast0
+ ret i1 %cmp
+}
+
+; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_undef_swap(
+; CHECK: %cmp = icmp eq i32 addrspace(3)* undef, %group.ptr.0
+define i1 @icmp_mismatch_flat_group_private_cmp_undef_swap(i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cmp = icmp eq i32 addrspace(4)* addrspacecast (i32* undef to i32 addrspace(4)*), %cast0
+ ret i1 %cmp
+}
+
+; TODO: Should be handled
+; CHECK-LABEL: @icmp_flat_flat_from_group_vector(
+; CHECK: %cmp = icmp eq <2 x i32 addrspace(4)*> %cast0, %cast1
+define <2 x i1> @icmp_flat_flat_from_group_vector(<2 x i32 addrspace(3)*> %group.ptr.0, <2 x i32 addrspace(3)*> %group.ptr.1) #0 {
+ %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32 addrspace(4)*>
+ %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32 addrspace(4)*>
+ %cmp = icmp eq <2 x i32 addrspace(4)*> %cast0, %cast1
+ ret <2 x i1> %cmp
+}
+
+attributes #0 = { nounwind }
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll b/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
new file mode 100644
index 000000000000..52067cd37bb9
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
@@ -0,0 +1,175 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+; Ports of most of test/CodeGen/NVPTX/access-non-generic.ll
+
+@scalar = internal addrspace(3) global float 0.0, align 4
+@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
+
+; CHECK-LABEL: @load_store_lds_f32(
+; CHECK: %tmp = load float, float addrspace(3)* @scalar, align 4
+; CHECK: call void @use(float %tmp)
+; CHECK: store float %v, float addrspace(3)* @scalar, align 4
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: %tmp2 = load float, float addrspace(3)* @scalar, align 4
+; CHECK: call void @use(float %tmp2)
+; CHECK: store float %v, float addrspace(3)* @scalar, align 4
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: %tmp3 = load float, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
+; CHECK: call void @use(float %tmp3)
+; CHECK: store float %v, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: %tmp4 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5
+; CHECK: %tmp5 = load float, float addrspace(3)* %tmp4, align 4
+; CHECK: call void @use(float %tmp5)
+; CHECK: store float %v, float addrspace(3)* %tmp4, align 4
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: %tmp7 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 %i
+; CHECK: %tmp8 = load float, float addrspace(3)* %tmp7, align 4
+; CHECK: call void @use(float %tmp8)
+; CHECK: store float %v, float addrspace(3)* %tmp7, align 4
+; CHECK: call void @llvm.amdgcn.s.barrier()
+; CHECK: ret void
+define amdgpu_kernel void @load_store_lds_f32(i32 %i, float %v) #0 {
+bb:
+ %tmp = load float, float addrspace(4)* addrspacecast (float addrspace(3)* @scalar to float addrspace(4)*), align 4
+ call void @use(float %tmp)
+ store float %v, float addrspace(4)* addrspacecast (float addrspace(3)* @scalar to float addrspace(4)*), align 4
+ call void @llvm.amdgcn.s.barrier()
+ %tmp1 = addrspacecast float addrspace(3)* @scalar to float addrspace(4)*
+ %tmp2 = load float, float addrspace(4)* %tmp1, align 4
+ call void @use(float %tmp2)
+ store float %v, float addrspace(4)* %tmp1, align 4
+ call void @llvm.amdgcn.s.barrier()
+ %tmp3 = load float, float addrspace(4)* getelementptr inbounds ([10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i32 0, i32 5), align 4
+ call void @use(float %tmp3)
+ store float %v, float addrspace(4)* getelementptr inbounds ([10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i32 0, i32 5), align 4
+ call void @llvm.amdgcn.s.barrier()
+ %tmp4 = getelementptr inbounds [10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i32 0, i32 5
+ %tmp5 = load float, float addrspace(4)* %tmp4, align 4
+ call void @use(float %tmp5)
+ store float %v, float addrspace(4)* %tmp4, align 4
+ call void @llvm.amdgcn.s.barrier()
+ %tmp6 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*
+ %tmp7 = getelementptr inbounds [10 x float], [10 x float] addrspace(4)* %tmp6, i32 0, i32 %i
+ %tmp8 = load float, float addrspace(4)* %tmp7, align 4
+ call void @use(float %tmp8)
+ store float %v, float addrspace(4)* %tmp7, align 4
+ call void @llvm.amdgcn.s.barrier()
+ ret void
+}
+
+; CHECK-LABEL: @constexpr_load_int_from_float_lds(
+; CHECK: %tmp = load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*), align 4
+define i32 @constexpr_load_int_from_float_lds() #0 {
+bb:
+ %tmp = load i32, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*) to i32 addrspace(4)*), align 4
+ ret i32 %tmp
+}
+
+; CHECK-LABEL: @load_int_from_global_float(
+; CHECK: %tmp1 = getelementptr float, float addrspace(1)* %input, i32 %i
+; CHECK: %tmp2 = getelementptr float, float addrspace(1)* %tmp1, i32 %j
+; CHECK: %tmp3 = bitcast float addrspace(1)* %tmp2 to i32 addrspace(1)*
+; CHECK: %tmp4 = load i32, i32 addrspace(1)* %tmp3
+; CHECK: ret i32 %tmp4
+define i32 @load_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) #0 {
+bb:
+ %tmp = addrspacecast float addrspace(1)* %input to float addrspace(4)*
+ %tmp1 = getelementptr float, float addrspace(4)* %tmp, i32 %i
+ %tmp2 = getelementptr float, float addrspace(4)* %tmp1, i32 %j
+ %tmp3 = bitcast float addrspace(4)* %tmp2 to i32 addrspace(4)*
+ %tmp4 = load i32, i32 addrspace(4)* %tmp3
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @nested_const_expr(
+; CHECK: store i32 1, i32 addrspace(3)* bitcast (float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i64 0, i64 1) to i32 addrspace(3)*), align 4
+define amdgpu_kernel void @nested_const_expr() #0 {
+ store i32 1, i32 addrspace(4)* bitcast (float addrspace(4)* getelementptr ([10 x float], [10 x float] addrspace(4)* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float] addrspace(4)*), i64 0, i64 1) to i32 addrspace(4)*), align 4
+ ret void
+}
+
+; CHECK-LABEL: @rauw(
+; CHECK: %addr = getelementptr float, float addrspace(1)* %input, i64 10
+; CHECK-NEXT: %v = load float, float addrspace(1)* %addr
+; CHECK-NEXT: store float %v, float addrspace(1)* %addr
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @rauw(float addrspace(1)* %input) #0 {
+bb:
+ %generic_input = addrspacecast float addrspace(1)* %input to float addrspace(4)*
+ %addr = getelementptr float, float addrspace(4)* %generic_input, i64 10
+ %v = load float, float addrspace(4)* %addr
+ store float %v, float addrspace(4)* %addr
+ ret void
+}
+
+; FIXME: Should be able to eliminate the cast inside the loop
+; CHECK-LABEL: @loop(
+
+; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
+; CHECK: %end = getelementptr float, float addrspace(3)* %p, i64 10
+; CHECK: br label %loop
+
+; CHECK: loop: ; preds = %loop, %entry
+; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
+; CHECK: %v = load float, float addrspace(3)* %i
+; CHECK: call void @use(float %v)
+; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
+; CHECK: %exit_cond = icmp eq float addrspace(3)* %i2, %end
+
+; CHECK: br i1 %exit_cond, label %exit, label %loop
+define amdgpu_kernel void @loop() #0 {
+entry:
+ %p = addrspacecast [10 x float] addrspace(3)* @array to float addrspace(4)*
+ %end = getelementptr float, float addrspace(4)* %p, i64 10
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %i = phi float addrspace(4)* [ %p, %entry ], [ %i2, %loop ]
+ %v = load float, float addrspace(4)* %i
+ call void @use(float %v)
+ %i2 = getelementptr float, float addrspace(4)* %i, i64 1
+ %exit_cond = icmp eq float addrspace(4)* %i2, %end
+ br i1 %exit_cond, label %exit, label %loop
+
+exit: ; preds = %loop
+ ret void
+}
+
+@generic_end = external addrspace(1) global float addrspace(4)*
+
+; CHECK-LABEL: @loop_with_generic_bound(
+; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
+; CHECK: %end = load float addrspace(4)*, float addrspace(4)* addrspace(1)* @generic_end
+; CHECK: br label %loop
+
+; CHECK: loop:
+; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
+; CHECK: %v = load float, float addrspace(3)* %i
+; CHECK: call void @use(float %v)
+; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
+; CHECK: %0 = addrspacecast float addrspace(3)* %i2 to float addrspace(4)*
+; CHECK: %exit_cond = icmp eq float addrspace(4)* %0, %end
+; CHECK: br i1 %exit_cond, label %exit, label %loop
+define amdgpu_kernel void @loop_with_generic_bound() #0 {
+entry:
+ %p = addrspacecast [10 x float] addrspace(3)* @array to float addrspace(4)*
+ %end = load float addrspace(4)*, float addrspace(4)* addrspace(1)* @generic_end
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %i = phi float addrspace(4)* [ %p, %entry ], [ %i2, %loop ]
+ %v = load float, float addrspace(4)* %i
+ call void @use(float %v)
+ %i2 = getelementptr float, float addrspace(4)* %i, i64 1
+ %exit_cond = icmp eq float addrspace(4)* %i2, %end
+ br i1 %exit_cond, label %exit, label %loop
+
+exit: ; preds = %loop
+ ret void
+}
+
+declare void @llvm.amdgcn.s.barrier() #1
+declare void @use(float) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { convergent nounwind }
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll b/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll
new file mode 100644
index 000000000000..ca6138d3fb01
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll
@@ -0,0 +1,146 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; CHECK-LABEL: @objectsize_group_to_flat_i32(
+; CHECK: %val = call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* %group.ptr, i1 true, i1 false)
+define i32 @objectsize_group_to_flat_i32(i8 addrspace(3)* %group.ptr) #0 {
+ %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
+ %val = call i32 @llvm.objectsize.i32.p4i8(i8 addrspace(4)* %cast, i1 true, i1 false)
+ ret i32 %val
+}
+
+; CHECK-LABEL: @objectsize_global_to_flat_i64(
+; CHECK: %val = call i64 @llvm.objectsize.i64.p3i8(i8 addrspace(3)* %global.ptr, i1 true, i1 false)
+define i64 @objectsize_global_to_flat_i64(i8 addrspace(3)* %global.ptr) #0 {
+ %cast = addrspacecast i8 addrspace(3)* %global.ptr to i8 addrspace(4)*
+ %val = call i64 @llvm.objectsize.i64.p4i8(i8 addrspace(4)* %cast, i1 true, i1 false)
+ ret i64 %val
+}
+
+; CHECK-LABEL: @atomicinc_global_to_flat_i32(
+; CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %global.ptr, i32 %y, i32 0, i32 0, i1 false)
+define i32 @atomicinc_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %cast, i32 %y, i32 0, i32 0, i1 false)
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @atomicinc_group_to_flat_i32(
+; CHECK: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %group.ptr, i32 %y, i32 0, i32 0, i1 false)
+define i32 @atomicinc_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* %cast, i32 %y, i32 0, i32 0, i1 false)
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @atomicinc_global_to_flat_i64(
+; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %global.ptr, i64 %y, i32 0, i32 0, i1 false)
+define i64 @atomicinc_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @atomicinc_group_to_flat_i64(
+; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %group.ptr, i64 %y, i32 0, i32 0, i1 false)
+define i64 @atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @atomicdec_global_to_flat_i32(
+; CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %global.ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @atomicdec_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 false)
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @atomicdec_group_to_flat_i32(
+; CHECK: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %group.ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @atomicdec_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 false)
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @atomicdec_global_to_flat_i64(
+; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %global.ptr, i64 %y, i32 0, i32 0, i1 false)
+define i64 @atomicdec_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @atomicdec_group_to_flat_i64(
+; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %group.ptr, i64 %y, i32 0, i32 0, i1 false
+define i64 @atomicdec_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 false)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @volatile_atomicinc_group_to_flat_i64(
+; CHECK-NEXT: %1 = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 true)
+define i64 @volatile_atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 true)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @volatile_atomicdec_global_to_flat_i32(
+; CHECK-NEXT: %1 = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %1, i32 %val, i32 0, i32 0, i1 true)
+define i32 @volatile_atomicdec_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 true)
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @volatile_atomicdec_group_to_flat_i32(
+; CHECK-NEXT: %1 = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %1, i32 %val, i32 0, i32 0, i1 true)
+define i32 @volatile_atomicdec_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* %cast, i32 %val, i32 0, i32 0, i1 true)
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @volatile_atomicdec_global_to_flat_i64(
+; CHECK-NEXT: %1 = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 true)
+define i64 @volatile_atomicdec_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 true)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @volatile_atomicdec_group_to_flat_i64(
+; CHECK-NEXT: %1 = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 true)
+define i64 @volatile_atomicdec_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
+ %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 true)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @invalid_variable_volatile_atomicinc_group_to_flat_i64(
+; CHECK-NEXT: %1 = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %1, i64 %y, i32 0, i32 0, i1 %volatile.var)
+define i64 @invalid_variable_volatile_atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y, i1 %volatile.var) #0 {
+ %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64 addrspace(4)*
+ %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* %cast, i64 %y, i32 0, i32 0, i1 %volatile.var)
+ ret i64 %ret
+}
+
+declare i32 @llvm.objectsize.i32.p4i8(i8 addrspace(4)*, i1, i1) #1
+declare i64 @llvm.objectsize.i64.p4i8(i8 addrspace(4)*, i1, i1) #1
+declare i32 @llvm.amdgcn.atomic.inc.i32.p4i32(i32 addrspace(4)* nocapture, i32, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.inc.i64.p4i64(i64 addrspace(4)* nocapture, i64, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.dec.i32.p4i32(i32 addrspace(4)* nocapture, i32, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.dec.i64.p4i64(i64 addrspace(4)* nocapture, i64, i32, i32, i1) #2
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind argmemonly }
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/lit.local.cfg b/test/Transforms/InferAddressSpaces/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..6baccf05fff0
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'AMDGPU' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll b/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
new file mode 100644
index 000000000000..557a80f1a5d1
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
@@ -0,0 +1,134 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; CHECK-LABEL: @memset_group_to_flat(
+; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* %group.ptr, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
+ %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memset_global_to_flat(
+; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %global.ptr, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
+ %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memset_group_to_flat_no_md(
+; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* %group.ptr, i8 4, i64 %size, i32 4, i1 false){{$}}
+define amdgpu_kernel void @memset_group_to_flat_no_md(i8 addrspace(3)* %group.ptr, i64 %size) #0 {
+ %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 %size, i32 4, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @memset_global_to_flat_no_md(
+; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %global.ptr, i8 4, i64 %size, i32 4, i1 false){{$}}
+define amdgpu_kernel void @memset_global_to_flat_no_md(i8 addrspace(1)* %global.ptr, i64 %size) #0 {
+ %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 %size, i32 4, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group(
+; CHCK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_with_group(
+; CHECK: call void @llvm.memcpy.p3i8.p4i8.i64(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(4)* %src.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_with_group(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(4)* %src.ptr, i64 %size) #0 {
+ %cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast.dest, i8 addrspace(4)* %src.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_src_with_group(
+; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* %src.group.ptr, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_src_with_group(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ %cast.dest = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast.dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_group_src_global(
+; CHECK: call void @llvm.memcpy.p3i8.p1i8.i64(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(1)* %src.global.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_group_src_global(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(1)* %src.global.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(1)* %src.global.ptr to i8 addrspace(4)*
+ %cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast.dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_group_to_flat_replace_dest_global(
+; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %dest.global.ptr, i8 addrspace(3)* %src.group.ptr, i32 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memcpy_group_to_flat_replace_dest_global(i8 addrspace(1)* %dest.global.ptr, i8 addrspace(3)* %src.group.ptr, i32 %size) #0 {
+ %cast.dest = addrspacecast i8 addrspace(1)* %dest.global.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p3i8.i32(i8 addrspace(4)* %cast.dest, i8 addrspace(3)* %src.group.ptr, i32 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(
+; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa.struct !7
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa.struct !7
+ ret void
+}
+
+; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_no_md(
+; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false){{$}}
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_no_md(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(
+; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest0, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false){{$}}
+; CHECK: call void @llvm.memcpy.p4i8.p3i8.i64(i8 addrspace(4)* %dest1, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false){{$}}
+define amdgpu_kernel void @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(i8 addrspace(4)* %dest0, i8 addrspace(4)* %dest1, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest0, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false)
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %dest1, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false)
+ ret void
+}
+
+; Check for iterator problems if the pointer has 2 uses in the same call
+; CHECK-LABEL: @memcpy_group_flat_to_flat_self(
+; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* %group.ptr, i8 addrspace(3)* %group.ptr, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memcpy_group_flat_to_flat_self(i8 addrspace(3)* %group.ptr) #0 {
+ %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
+ call void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* %cast, i8 addrspace(4)* %cast, i64 32, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+; CHECK-LABEL: @memmove_flat_to_flat_replace_src_with_group(
+; CHECK: call void @llvm.memmove.p4i8.p3i8.i64(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+define amdgpu_kernel void @memmove_flat_to_flat_replace_src_with_group(i8 addrspace(4)* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
+ %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8 addrspace(4)*
+ call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* %dest, i8 addrspace(4)* %cast.src, i64 %size, i32 4, i1 false), !tbaa !0, !alias.scope !3, !noalias !4
+ ret void
+}
+
+declare void @llvm.memset.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8, i64, i32, i1) #1
+declare void @llvm.memcpy.p4i8.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8 addrspace(4)* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p4i8.p3i8.i32(i8 addrspace(4)* nocapture writeonly, i8 addrspace(3)* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8 addrspace(4)* nocapture readonly, i64, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"A", !2}
+!2 = !{!"tbaa root"}
+!3 = !{!"B", !2}
+!4 = !{!5}
+!5 = distinct !{!5, !6, !"some scope"}
+!6 = distinct !{!6, !"some domain"}
+!7 = !{i64 0, i64 8, null} \ No newline at end of file
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll b/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll
new file mode 100644
index 000000000000..3231b6ccf1cc
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll
@@ -0,0 +1,143 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; Regression tests from old HSAIL addrspacecast optimization pass
+
+@data = internal addrspace(1) global [100 x double] [double 0.00, double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 7.000000e-01, double 8.000000e-01, double 9.000000e-01, double 1.00, double 1.10, double 1.20, double 1.30, double 1.40, double 1.50, double 1.60, double 1.70, double 1.80, double 1.90, double 2.00, double 2.10, double 2.20, double 2.30, double 2.40, double 2.50, double 2.60, double 2.70, double 2.80, double 2.90, double 3.00, double 3.10, double 3.20, double 3.30, double 3.40, double 3.50, double 3.60, double 3.70, double 3.80, double 3.90, double 4.00, double 4.10, double 4.20, double 4.30, double 4.40, double 4.50, double 4.60, double 4.70, double 4.80, double 4.90, double 5.00, double 5.10, double 5.20, double 5.30, double 5.40, double 5.50, double 5.60, double 5.70, double 5.80, double 5.90, double 6.00, double 6.10, double 6.20, double 6.30, double 6.40, double 6.50, double 6.60, double 6.70, double 6.80, double 6.90, double 7.00, double 7.10, double 7.20, double 7.30, double 7.40, double 7.50, double 7.60, double 7.70, double 7.80, double 7.90, double 8.00, double 8.10, double 8.20, double 8.30, double 8.40, double 8.50, double 8.60, double 8.70, double 8.80, double 8.90, double 9.00, double 9.10, double 9.20, double 9.30, double 9.40, double 9.50, double 9.60, double 9.70, double 9.80, double 9.90], align 8
+
+
+; Should generate flat load
+
+; CHECK-LABEL: @generic_address_bitcast_const(
+; CHECK: %vecload1 = load <2 x double>, <2 x double> addrspace(1)* bitcast (double addrspace(1)* getelementptr inbounds ([100 x double], [100 x double] addrspace(1)* @data, i64 0, i64 4) to <2 x double> addrspace(1)*), align 8
+define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, i32 addrspace(1)* nocapture %results) #0 {
+entry:
+ %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 %tmp2, %arg0
+ %vecload1 = load <2 x double>, <2 x double> addrspace(4)* bitcast (double addrspace(4)* getelementptr ([100 x double], [100 x double] addrspace(4)* addrspacecast ([100 x double] addrspace(1)* @data to [100 x double] addrspace(4)*), i64 0, i64 4) to <2 x double> addrspace(4)*), align 8
+ %cmp = fcmp ord <2 x double> %vecload1, zeroinitializer
+ %sext = sext <2 x i1> %cmp to <2 x i64>
+ %tmp4 = extractelement <2 x i64> %sext, i64 0
+ %tmp5 = extractelement <2 x i64> %sext, i64 1
+ %tmp6 = and i64 %tmp4, %tmp5
+ %tmp7 = lshr i64 %tmp6, 63
+ %tmp8 = trunc i64 %tmp7 to i32
+ %idxprom = and i64 %tmp3, 4294967295
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %idxprom
+ store i32 %tmp8, i32 addrspace(1)* %arrayidx, align 4
+ ret void
+}
+
+@generic_address_bug9749.val = internal addrspace(1) global float 0.0, align 4
+
+declare i32 @_Z9get_fencePU3AS4v(i8 addrspace(4)*)
+%opencl.pipe_t = type opaque
+
+; This is a compile time assert bug, but we still want to check optimization
+; is performed to generate ld_global.
+; CHECK-LABEL: @generic_address_pipe_bug9673(
+; CHECK: %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
+; CHECK: %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
+; CHECK: %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
+define amdgpu_kernel void @generic_address_pipe_bug9673(%opencl.pipe_t addrspace(3)* nocapture %in_pipe, i32 addrspace(1)* nocapture %dst) #0 {
+entry:
+ %tmp = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
+ %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
+ %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %dst, i32 %tmp
+ store i32 %tmp2, i32 addrspace(1)* %arrayidx, align 4
+ ret void
+}
+
+; Should generate flat load
+; CHECK-LABEL: @generic_address_bug9749(
+; CHECK: br i1
+; CHECK: load float, float addrspace(4)*
+; CHECK: br label
+define amdgpu_kernel void @generic_address_bug9749(i32 addrspace(1)* nocapture %results) #0 {
+entry:
+ %ptr = alloca float addrspace(4)*, align 8
+ %tmp = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp1 = zext i32 %tmp to i64
+ store float 0x3FB99999A0000000, float addrspace(1)* @generic_address_bug9749.val, align 4
+ store volatile float addrspace(4)* addrspacecast (float addrspace(1)* @generic_address_bug9749.val to float addrspace(4)*), float addrspace(4)** %ptr, align 8
+ %tmp2 = load volatile float addrspace(4)*, float addrspace(4)** %ptr, align 8
+ %tmp3 = load float, float addrspace(1)* @generic_address_bug9749.val, align 4
+ %tmp4 = bitcast float addrspace(4)* %tmp2 to i8 addrspace(4)*
+ %call.i = call i32 @_Z9get_fencePU3AS4v(i8 addrspace(4)* %tmp4) #1
+ %switch.i.i = icmp ult i32 %call.i, 4
+ br i1 %switch.i.i, label %if.end.i, label %helperFunction.exit
+
+if.end.i: ; preds = %entry
+ %tmp5 = load float, float addrspace(4)* %tmp2, align 4
+ %not.cmp.i = fcmp oeq float %tmp5, %tmp3
+ %phitmp = zext i1 %not.cmp.i to i32
+ br label %helperFunction.exit
+
+helperFunction.exit: ; preds = %if.end.i, %entry
+ %retval.0.i = phi i32 [ 0, %entry ], [ %phitmp, %if.end.i ]
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %tmp1
+ store i32 %retval.0.i, i32 addrspace(1)* %arrayidx, align 4
+ ret void
+}
+
+; CHECK-LABEL: @generic_address_opt_phi_bug9776_simple_phi_kernel(
+; CHECK: phi i32 addrspace(3)*
+; CHECK: store i32 %i.03, i32 addrspace(3)* %
+define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(i32 addrspace(3)* nocapture %in, i32 %numElems) #0 {
+entry:
+ %cmp1 = icmp eq i32 %numElems, 0
+ br i1 %cmp1, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %tmp = addrspacecast i32 addrspace(3)* %in to i32 addrspace(4)*
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %i.03 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %ptr.02 = phi i32 addrspace(4)* [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
+ store i32 %i.03, i32 addrspace(4)* %ptr.02, align 4
+ %add.ptr = getelementptr inbounds i32, i32 addrspace(4)* %ptr.02, i64 4
+ %inc = add nuw i32 %i.03, 1
+ %exitcond = icmp eq i32 %inc, %numElems
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; CHECK-LABEL: @generic_address_bug9899(
+; CHECK: %vecload = load <2 x i32>, <2 x i32> addrspace(3)*
+; CHECK: store <2 x i32> %tmp16, <2 x i32> addrspace(3)*
+define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, i32 addrspace(3)* nocapture %sourceA, i32 addrspace(3)* nocapture %destValues) #0 {
+entry:
+ %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp2 = zext i32 %tmp1 to i64
+ %tmp3 = add i64 %tmp2, %arg0
+ %sext = shl i64 %tmp3, 32
+ %tmp4 = addrspacecast i32 addrspace(3)* %destValues to i32 addrspace(4)*
+ %tmp5 = addrspacecast i32 addrspace(3)* %sourceA to i32 addrspace(4)*
+ %tmp6 = ashr exact i64 %sext, 31
+ %tmp7 = getelementptr inbounds i32, i32 addrspace(4)* %tmp5, i64 %tmp6
+ %arrayidx_v4 = bitcast i32 addrspace(4)* %tmp7 to <2 x i32> addrspace(4)*
+ %vecload = load <2 x i32>, <2 x i32> addrspace(4)* %arrayidx_v4, align 4
+ %tmp8 = extractelement <2 x i32> %vecload, i32 0
+ %tmp9 = extractelement <2 x i32> %vecload, i32 1
+ %tmp10 = icmp eq i32 %tmp8, 0
+ %tmp11 = select i1 %tmp10, i32 32, i32 %tmp8
+ %tmp12 = icmp eq i32 %tmp9, 0
+ %tmp13 = select i1 %tmp12, i32 32, i32 %tmp9
+ %tmp14 = getelementptr inbounds i32, i32 addrspace(4)* %tmp4, i64 %tmp6
+ %tmp15 = insertelement <2 x i32> undef, i32 %tmp11, i32 0
+ %tmp16 = insertelement <2 x i32> %tmp15, i32 %tmp13, i32 1
+ %arrayidx_v41 = bitcast i32 addrspace(4)* %tmp14 to <2 x i32> addrspace(4)*
+ store <2 x i32> %tmp16, <2 x i32> addrspace(4)* %arrayidx_v41, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readonly }
+attributes #2 = { nounwind readnone } \ No newline at end of file
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/select.ll b/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
new file mode 100644
index 000000000000..08edc20ecf9b
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
@@ -0,0 +1,264 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; Instcombine pulls the addrspacecast out of the select, make sure
+; this doesn't do something insane on non-canonical IR.
+
+; CHECK-LABEL: @return_select_group_flat(
+; CHECK-NEXT: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK-NEXT: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+; CHECK-NEXT: %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
+; CHECK-NEXT: ret i32 addrspace(4)* %select
+define i32 addrspace(4)* @return_select_group_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
+ ret i32 addrspace(4)* %select
+}
+
+; CHECK-LABEL: @store_select_group_flat(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1
+; CHECK: store i32 -1, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
+ store i32 -1, i32 addrspace(4)* %select
+ ret void
+}
+
+; Make sure metadata is preserved
+; CHECK-LABEL: @load_select_group_flat_md(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1, !prof !0
+; CHECK: %load = load i32, i32 addrspace(3)* %select
+define i32 @load_select_group_flat_md(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1, !prof !0
+ %load = load i32, i32 addrspace(4)* %select
+ ret i32 %load
+}
+
+; CHECK-LABEL: @store_select_mismatch_group_private_flat(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %2 = addrspacecast i32* %private.ptr.1 to i32 addrspace(4)*
+; CHECK: %select = select i1 %c, i32 addrspace(4)* %1, i32 addrspace(4)* %2
+; CHECK: store i32 -1, i32 addrspace(4)* %select
+define amdgpu_kernel void @store_select_mismatch_group_private_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32* %private.ptr.1) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %cast1 = addrspacecast i32* %private.ptr.1 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* %cast1
+ store i32 -1, i32 addrspace(4)* %select
+ ret void
+}
+
+@lds0 = internal addrspace(3) global i32 123, align 4
+@lds1 = internal addrspace(3) global i32 456, align 4
+
+; CHECK-LABEL: @constexpr_select_group_flat(
+; CHECK: %tmp = load i32, i32 addrspace(3)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(3)* @lds0, i32 addrspace(3)* @lds1)
+define i32 @constexpr_select_group_flat() #0 {
+bb:
+ %tmp = load i32, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds1 to i32 addrspace(4)*))
+ ret i32 %tmp
+}
+
+; CHECK-LABEL: @constexpr_select_group_global_flat_mismatch(
+; CHECK: %tmp = load i32, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*))
+define i32 @constexpr_select_group_global_flat_mismatch() #0 {
+bb:
+ %tmp = load i32, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*))
+ ret i32 %tmp
+}
+
+; CHECK-LABEL: @store_select_group_flat_null(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
+; CHECK: store i32 -1, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_null(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* null
+ store i32 -1, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_flat_null_swap(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*), i32 addrspace(3)* %group.ptr.0
+; CHECK: store i32 -1, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_null_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* null, i32 addrspace(4)* %cast0
+ store i32 -1, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_flat_undef(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* undef
+; CHECK: store i32 -1, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_undef(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* undef
+ store i32 -1, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_flat_undef_swap(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* undef, i32 addrspace(3)* %group.ptr.0
+; CHECK: store i32 -1, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_undef_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* undef, i32 addrspace(4)* %cast0
+ store i32 -1, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_gep_group_flat_null(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
+; CHECK: %gep = getelementptr i32, i32 addrspace(3)* %select, i64 16
+; CHECK: store i32 -1, i32 addrspace(3)* %gep
+define amdgpu_kernel void @store_select_gep_group_flat_null(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* null
+ %gep = getelementptr i32, i32 addrspace(4)* %select, i64 16
+ store i32 -1, i32 addrspace(4)* %gep
+ ret void
+}
+
+@global0 = internal addrspace(1) global i32 123, align 4
+
+; CHECK-LABEL: @store_select_group_flat_constexpr(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* @lds1
+; CHECK: store i32 7, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_constexpr(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds1 to i32 addrspace(4)*)
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_flat_inttoptr_flat(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* inttoptr (i64 12345 to i32 addrspace(4)*) to i32 addrspace(3)*)
+; CHECK: store i32 7, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_inttoptr_flat(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* inttoptr (i64 12345 to i32 addrspace(4)*)
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_flat_inttoptr_group(
+; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* inttoptr (i32 400 to i32 addrspace(3)*)
+; CHECK-NEXT: store i32 7, i32 addrspace(3)* %select
+define amdgpu_kernel void @store_select_group_flat_inttoptr_group(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i32 400 to i32 addrspace(3)*) to i32 addrspace(4)*)
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_flat_constexpr(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %select = select i1 %c, i32 addrspace(4)* %1, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
+; CHECK: store i32 7, i32 addrspace(4)* %select
+define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_flat_constexpr_swap(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*), i32 addrspace(4)* %1
+; CHECK: store i32 7, i32 addrspace(4)* %select
+define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*), i32 addrspace(4)* %cast0
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_null_null(
+; CHECK: %select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)
+; CHECK: store i32 7, i32 addrspace(4)* %select
+define amdgpu_kernel void @store_select_group_global_mismatch_null_null(i1 %c) #0 {
+ %select = select i1 %c, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_null_null_constexpr(
+; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+define amdgpu_kernel void @store_select_group_global_mismatch_null_null_constexpr() #0 {
+ store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_gv_null_constexpr(
+; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+define amdgpu_kernel void @store_select_group_global_mismatch_gv_null_constexpr() #0 {
+ store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds0 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_null_gv_constexpr(
+; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)), align 4
+define amdgpu_kernel void @store_select_group_global_mismatch_null_gv_constexpr() #0 {
+ store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global0 to i32 addrspace(4)*)), align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_inttoptr_null_constexpr(
+; CHECK: store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i64 123 to i32 addrspace(3)*) to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+define amdgpu_kernel void @store_select_group_global_mismatch_inttoptr_null_constexpr() #0 {
+ store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i64 123 to i32 addrspace(3)*) to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_inttoptr_flat_null_constexpr(
+; CHECK: store i32 7, i32 addrspace(1)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(1)* addrspacecast (i32 addrspace(4)* inttoptr (i64 123 to i32 addrspace(4)*) to i32 addrspace(1)*), i32 addrspace(1)* null), align 4
+define amdgpu_kernel void @store_select_group_global_mismatch_inttoptr_flat_null_constexpr() #0 {
+ store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* inttoptr (i64 123 to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* null to i32 addrspace(4)*)), align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_global_mismatch_undef_undef_constexpr(
+; CHECK: store i32 7, i32 addrspace(3)* null
+define amdgpu_kernel void @store_select_group_global_mismatch_undef_undef_constexpr() #0 {
+ store i32 7, i32 addrspace(4)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), i32 addrspace(4)* addrspacecast (i32 addrspace(1)* undef to i32 addrspace(4)*)), align 4
+ ret void
+}
+
+@lds2 = external addrspace(3) global [1024 x i32], align 4
+
+; CHECK-LABEL: @store_select_group_constexpr_ptrtoint(
+; CHECK: %1 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+; CHECK: %select = select i1 %c, i32 addrspace(4)* %1, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* inttoptr (i32 add (i32 ptrtoint ([1024 x i32] addrspace(3)* @lds2 to i32), i32 124) to i32 addrspace(1)*) to i32 addrspace(4)*)
+; CHECK: store i32 7, i32 addrspace(4)* %select
+define amdgpu_kernel void @store_select_group_constexpr_ptrtoint(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
+ %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32 addrspace(4)*
+ %select = select i1 %c, i32 addrspace(4)* %cast0, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* inttoptr (i32 add (i32 ptrtoint ([1024 x i32] addrspace(3)* @lds2 to i32), i32 124) to i32 addrspace(1)*) to i32 addrspace(4)*)
+ store i32 7, i32 addrspace(4)* %select
+ ret void
+}
+
+; CHECK-LABEL: @store_select_group_flat_vector(
+; CHECK: %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32 addrspace(4)*>
+; CHECK: %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32 addrspace(4)*>
+; CHECK: %select = select i1 %c, <2 x i32 addrspace(4)*> %cast0, <2 x i32 addrspace(4)*> %cast1
+; CHECK: %extract0 = extractelement <2 x i32 addrspace(4)*> %select, i32 0
+; CHECK: %extract1 = extractelement <2 x i32 addrspace(4)*> %select, i32 1
+; CHECK: store i32 -1, i32 addrspace(4)* %extract0
+; CHECK: store i32 -2, i32 addrspace(4)* %extract1
+define amdgpu_kernel void @store_select_group_flat_vector(i1 %c, <2 x i32 addrspace(3)*> %group.ptr.0, <2 x i32 addrspace(3)*> %group.ptr.1) #0 {
+ %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32 addrspace(4)*>
+ %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32 addrspace(4)*>
+ %select = select i1 %c, <2 x i32 addrspace(4)*> %cast0, <2 x i32 addrspace(4)*> %cast1
+ %extract0 = extractelement <2 x i32 addrspace(4)*> %select, i32 0
+ %extract1 = extractelement <2 x i32 addrspace(4)*> %select, i32 1
+ store i32 -1, i32 addrspace(4)* %extract0
+ store i32 -2, i32 addrspace(4)* %extract1
+ ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!"branch_weights", i32 2, i32 10}
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll b/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
new file mode 100644
index 000000000000..79bf92610a8d
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
@@ -0,0 +1,140 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; Check that volatile users of addrspacecast are not replaced.
+
+; CHECK-LABEL: @volatile_load_flat_from_global(
+; CHECK: load volatile i32, i32 addrspace(4)*
+; CHECK: store i32 %val, i32 addrspace(1)*
+define amdgpu_kernel void @volatile_load_flat_from_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
+ %val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_load_flat_from_constant(
+; CHECK: load volatile i32, i32 addrspace(4)*
+; CHECK: store i32 %val, i32 addrspace(1)*
+define amdgpu_kernel void @volatile_load_flat_from_constant(i32 addrspace(2)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(2)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
+ %val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_load_flat_from_group(
+; CHECK: load volatile i32, i32 addrspace(4)*
+; CHECK: store i32 %val, i32 addrspace(3)*
+define amdgpu_kernel void @volatile_load_flat_from_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
+ %val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_load_flat_from_private(
+; CHECK: load volatile i32, i32 addrspace(4)*
+; CHECK: store i32 %val, i32*
+define amdgpu_kernel void @volatile_load_flat_from_private(i32* nocapture %input, i32* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
+ %val = load volatile i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_store_flat_to_global(
+; CHECK: load i32, i32 addrspace(1)*
+; CHECK: store volatile i32 %val, i32 addrspace(4)*
+define amdgpu_kernel void @volatile_store_flat_to_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store volatile i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_store_flat_to_group(
+; CHECK: load i32, i32 addrspace(3)*
+; CHECK: store volatile i32 %val, i32 addrspace(4)*
+define amdgpu_kernel void @volatile_store_flat_to_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store volatile i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_store_flat_to_private(
+; CHECK: load i32, i32*
+; CHECK: store volatile i32 %val, i32 addrspace(4)*
+define amdgpu_kernel void @volatile_store_flat_to_private(i32* nocapture %input, i32* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store volatile i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; CHECK-LABEL: @volatile_atomicrmw_add_group_to_flat(
+; CHECK: addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+; CHECK: atomicrmw volatile add i32 addrspace(4)*
+define i32 @volatile_atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = atomicrmw volatile add i32 addrspace(4)* %cast, i32 %y seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @volatile_atomicrmw_add_global_to_flat(
+; CHECK: addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+; CHECK: %ret = atomicrmw volatile add i32 addrspace(4)*
+define i32 @volatile_atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = atomicrmw volatile add i32 addrspace(4)* %cast, i32 %y seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @volatile_cmpxchg_global_to_flat(
+; CHECK: addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+; CHECK: cmpxchg volatile i32 addrspace(4)*
+define { i32, i1 } @volatile_cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = cmpxchg volatile i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
+ ret { i32, i1 } %ret
+}
+
+; CHECK-LABEL: @volatile_cmpxchg_group_to_flat(
+; CHECK: addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+; CHECK: cmpxchg volatile i32 addrspace(4)*
+define { i32, i1 } @volatile_cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = cmpxchg volatile i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
+ ret { i32, i1 } %ret
+}
+
+; FIXME: Shouldn't be losing names
+; CHECK-LABEL: @volatile_memset_group_to_flat(
+; CHECK: addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
+; CHECK: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %1, i8 4, i64 32, i32 4, i1 true)
+define amdgpu_kernel void @volatile_memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
+ %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 true)
+ ret void
+}
+
+; CHECK-LABEL: @volatile_memset_global_to_flat(
+; CHECK: addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
+; CHECK: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %1, i8 4, i64 32, i32 4, i1 true)
+define amdgpu_kernel void @volatile_memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
+ %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* %cast, i8 4, i64 32, i32 4, i1 true)
+ ret void
+}
+
+declare void @llvm.memset.p4i8.i64(i8 addrspace(4)* nocapture writeonly, i8, i64, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }
diff --git a/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll b/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
new file mode 100644
index 000000000000..b2d8ddb19565
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
@@ -0,0 +1,24 @@
+; RUN: opt -S -mtriple=nvptx64-nvidia-cuda -infer-address-spaces %s | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+
+%struct.bar = type { float, float* }
+
+@var1 = local_unnamed_addr addrspace(3) externally_initialized global %struct.bar undef, align 8
+
+; CHECK-LABEL: @bug31948(
+; CHECK: %tmp = load float*, float* addrspace(3)* getelementptr inbounds (%struct.bar, %struct.bar addrspace(3)* @var1, i64 0, i32 1), align 8
+; CHECK: %tmp1 = load float, float* %tmp, align 4
+; CHECK: store float %conv1, float* %tmp, align 4
+; CHECK: store i32 32, i32 addrspace(3)* addrspacecast (i32* bitcast (float** getelementptr (%struct.bar, %struct.bar* addrspacecast (%struct.bar addrspace(3)* @var1 to %struct.bar*), i64 0, i32 1) to i32*) to i32 addrspace(3)*), align 4
+define void @bug31948(float %a, float* nocapture readnone %x, float* nocapture readnone %y) local_unnamed_addr #0 {
+entry:
+ %tmp = load float*, float** getelementptr (%struct.bar, %struct.bar* addrspacecast (%struct.bar addrspace(3)* @var1 to %struct.bar*), i64 0, i32 1), align 8
+ %tmp1 = load float, float* %tmp, align 4
+ %conv1 = fadd float %tmp1, 1.000000e+00
+ store float %conv1, float* %tmp, align 4
+ store i32 32, i32* bitcast (float** getelementptr (%struct.bar, %struct.bar* addrspacecast (%struct.bar addrspace(3)* @var1 to %struct.bar*), i64 0, i32 1) to i32*), align 4
+ ret void
+}
+
+attributes #0 = { norecurse nounwind }
diff --git a/test/Transforms/InferAddressSpaces/NVPTX/lit.local.cfg b/test/Transforms/InferAddressSpaces/NVPTX/lit.local.cfg
new file mode 100644
index 000000000000..2cb98eb371b2
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/NVPTX/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'NVPTX' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/Inline/AArch64/gep-cost.ll b/test/Transforms/Inline/AArch64/gep-cost.ll
new file mode 100644
index 000000000000..204958f082dd
--- /dev/null
+++ b/test/Transforms/Inline/AArch64/gep-cost.ll
@@ -0,0 +1,30 @@
+; REQUIRES: asserts
+; RUN: opt -inline -mtriple=aarch64--linux-gnu -mcpu=kryo -S -debug-only=inline-cost < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+define void @outer([4 x i32]* %ptr, i32 %i) {
+ call void @inner1([4 x i32]* %ptr, i32 %i)
+ call void @inner2([4 x i32]* %ptr, i32 %i)
+ ret void
+}
+; The gep in inner1() is reg+reg, which is a legal addressing mode for AArch64.
+; Thus, both the gep and ret can be simplified.
+; CHECK: Analyzing call of inner1
+; CHECK: NumInstructionsSimplified: 2
+; CHECK: NumInstructions: 2
+define void @inner1([4 x i32]* %ptr, i32 %i) {
+ %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 0, i32 %i
+ ret void
+}
+
+; The gep in inner2() is reg+imm+reg, which is not a legal addressing mode for
+; AArch64. Thus, only the ret can be simplified and not the gep.
+; CHECK: Analyzing call of inner2
+; CHECK: NumInstructionsSimplified: 1
+; CHECK: NumInstructions: 2
+define void @inner2([4 x i32]* %ptr, i32 %i) {
+ %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 1, i32 %i
+ ret void
+}
diff --git a/test/Transforms/Inline/AArch64/lit.local.cfg b/test/Transforms/Inline/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..7184443994b6
--- /dev/null
+++ b/test/Transforms/Inline/AArch64/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/Inline/alloca-bonus.ll b/test/Transforms/Inline/alloca-bonus.ll
index 542dcee0fcb2..c5c2ce11cc5b 100644
--- a/test/Transforms/Inline/alloca-bonus.ll
+++ b/test/Transforms/Inline/alloca-bonus.ll
@@ -3,7 +3,7 @@
target datalayout = "p:32:32"
-declare void @llvm.lifetime.start(i64 %size, i8* nocapture %ptr)
+declare void @llvm.lifetime.start.p0i8(i64 %size, i8* nocapture %ptr)
@glbl = external global i32
@@ -22,7 +22,7 @@ define void @inner1(i32 *%ptr) {
%D = getelementptr inbounds i32, i32* %ptr, i32 1
%E = bitcast i32* %ptr to i8*
%F = select i1 false, i32* %ptr, i32* @glbl
- call void @llvm.lifetime.start(i64 0, i8* %E)
+ call void @llvm.lifetime.start.p0i8(i64 0, i8* %E)
call void @extern()
ret void
}
@@ -43,7 +43,7 @@ define void @inner2(i32 *%ptr) {
%D = getelementptr inbounds i32, i32* %ptr, i32 %A
%E = bitcast i32* %ptr to i8*
%F = select i1 false, i32* %ptr, i32* @glbl
- call void @llvm.lifetime.start(i64 0, i8* %E)
+ call void @llvm.lifetime.start.p0i8(i64 0, i8* %E)
call void @extern()
ret void
}
@@ -152,7 +152,7 @@ if.then:
%D = getelementptr inbounds i32, i32* %ptr, i32 %A
%E = bitcast i32* %ptr to i8*
%F = select i1 false, i32* %ptr, i32* @glbl
- call void @llvm.lifetime.start(i64 0, i8* %E)
+ call void @llvm.lifetime.start.p0i8(i64 0, i8* %E)
ret void
exit:
diff --git a/test/Transforms/Inline/arg-attr-propagation.ll b/test/Transforms/Inline/arg-attr-propagation.ll
new file mode 100644
index 000000000000..3d18e8047e5b
--- /dev/null
+++ b/test/Transforms/Inline/arg-attr-propagation.ll
@@ -0,0 +1,50 @@
+; RUN: opt -inline -S < %s | FileCheck %s
+
+; The callee guarantees that the pointer argument is nonnull and dereferenceable.
+; That information should transfer to the caller.
+
+define i32 @callee(i32* dereferenceable(32) %t1) {
+; CHECK-LABEL: @callee(i32* dereferenceable(32) %t1)
+; CHECK-NEXT: [[T2:%.*]] = load i32, i32* %t1
+; CHECK-NEXT: ret i32 [[T2]]
+;
+ %t2 = load i32, i32* %t1
+ ret i32 %t2
+}
+
+; FIXME: All dereferenceability information is lost.
+; The caller argument could be known nonnull and dereferenceable(32).
+
+define i32 @caller1(i32* %t1) {
+; CHECK-LABEL: @caller1(i32* %t1)
+; CHECK-NEXT: [[T2_I:%.*]] = load i32, i32* %t1
+; CHECK-NEXT: ret i32 [[T2_I]]
+;
+ %t2 = tail call i32 @callee(i32* dereferenceable(32) %t1)
+ ret i32 %t2
+}
+
+; The caller argument is nonnull, but that can be explicit.
+; The dereferenceable amount could be increased.
+
+define i32 @caller2(i32* dereferenceable(31) %t1) {
+; CHECK-LABEL: @caller2(i32* dereferenceable(31) %t1)
+; CHECK-NEXT: [[T2_I:%.*]] = load i32, i32* %t1
+; CHECK-NEXT: ret i32 [[T2_I]]
+;
+ %t2 = tail call i32 @callee(i32* dereferenceable(32) %t1)
+ ret i32 %t2
+}
+
+; The caller argument is nonnull, but that can be explicit.
+; Make sure that we don't propagate a smaller dereferenceable amount.
+
+define i32 @caller3(i32* dereferenceable(33) %t1) {
+; CHECK-LABEL: @caller3(i32* dereferenceable(33) %t1)
+; CHECK-NEXT: [[T2_I:%.*]] = load i32, i32* %t1
+; CHECK-NEXT: ret i32 [[T2_I]]
+;
+ %t2 = tail call i32 @callee(i32* dereferenceable(32) %t1)
+ ret i32 %t2
+}
+
diff --git a/test/Transforms/Inline/bfi-update.ll b/test/Transforms/Inline/bfi-update.ll
new file mode 100644
index 000000000000..94584e2e6ce5
--- /dev/null
+++ b/test/Transforms/Inline/bfi-update.ll
@@ -0,0 +1,93 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -S -inline-threshold=50 -inline-cold-callsite-threshold=0 -hot-callsite-threshold=50 | FileCheck %s
+; This tests incremental updates to caller's BFI as a callee gets inlined.
+; In bottom-up inlining, first c->e inlining is considered and fails because
+; e's size exceeds the threshold of 50. Then a->c inlining is considered and it
+; succeeds. a's BFI is updated incrementally. As c's blocks get pruned, the
+; block with label cond_false is removed and since the remanining code is
+; straight-line a single block gets cloned into a. This block should get the
+; maximum block frequency among the original blocks in c. If it gets the
+; frequency of the block with label cond_true in @c, its frequency will be
+; 1/10th of function a's entry block frequency, resulting in a callsite count of
+; 2 (since a's entry count is 20) which means that a->e callsite will be
+; considered cold and not inlined.
+
+@data = external global i32
+; CHECK-LABEL: define i32 @a(
+define i32 @a(i32 %a1) !prof !21 {
+; CHECK-NOT: call i32 @c
+; CHECK-NOT: call i32 @e
+; CHECK: ret
+entry:
+ %cond = icmp sle i32 %a1, 1
+ %a2 = call i32 @c(i32 1)
+ br label %exit
+exit:
+ ret i32 %a2
+}
+
+declare void @ext();
+
+; CHECK: @c(i32 %c1) !prof [[COUNT1:![0-9]+]]
+define i32 @c(i32 %c1) !prof !23 {
+ call void @ext()
+ %cond = icmp sle i32 %c1, 1
+ br i1 %cond, label %cond_true, label %cond_false, !prof !25
+
+cond_false:
+ br label %exit
+
+cond_true:
+ %c11 = call i32 @e(i32 %c1)
+ br label %exit
+exit:
+ %c12 = phi i32 [ 0, %cond_false], [ %c11, %cond_true ]
+ ret i32 %c12
+}
+
+
+; CHECK: @e(i32 %c1) !prof [[COUNT2:![0-9]+]]
+define i32 @e(i32 %c1) !prof !24 {
+ call void @ext()
+ call void @ext()
+ %cond = icmp sle i32 %c1, 1
+ br i1 %cond, label %cond_true, label %cond_false
+
+cond_false:
+ call void @ext()
+ %c2 = load i32, i32* @data, align 4
+ %c3 = add i32 %c1, %c2
+ %c4 = mul i32 %c3, %c2
+ %c5 = add i32 %c4, %c2
+ %c6 = mul i32 %c5, %c2
+ %c7 = add i32 %c6, %c2
+ %c8 = mul i32 %c7, %c2
+ %c9 = add i32 %c8, %c2
+ %c10 = mul i32 %c9, %c2
+ ret i32 %c10
+
+cond_true:
+ ret i32 0
+}
+
+; CHECK: [[COUNT1]] = !{!"function_entry_count", i64 480}
+; CHECK: [[COUNT2]] = !{!"function_entry_count", i64 80}
+!21 = !{!"function_entry_count", i64 20}
+!23 = !{!"function_entry_count", i64 500}
+!24 = !{!"function_entry_count", i64 100}
+!25 = !{!"branch_weights", i32 1, i32 9}
+
+!llvm.module.flags = !{!1}
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 1000}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 1000, i32 1}
+!13 = !{i32 999000, i64 1000, i32 1}
+!14 = !{i32 999999, i64 5, i32 2}
diff --git a/test/Transforms/Inline/cgscc-incremental-invalidate.ll b/test/Transforms/Inline/cgscc-incremental-invalidate.ll
new file mode 100644
index 000000000000..82d321ccf225
--- /dev/null
+++ b/test/Transforms/Inline/cgscc-incremental-invalidate.ll
@@ -0,0 +1,111 @@
+; Test for a subtle bug when computing analyses during inlining and mutating
+; the SCC structure. Without care, this can fail to invalidate analyses.
+;
+; RUN: opt < %s -passes='cgscc(inline,function(verify<domtree>))' -debug-pass-manager -S 2>&1 | FileCheck %s
+
+; First we check that the passes run in the way we expect. Otherwise this test
+; may stop testing anything.
+;
+; CHECK-LABEL: Starting llvm::Module pass manager run.
+; CHECK: Running pass: InlinerPass on (test1_f, test1_g, test1_h)
+; CHECK: Running analysis: FunctionAnalysisManagerCGSCCProxy on (test1_f, test1_g, test1_h)
+; CHECK: Running analysis: DominatorTreeAnalysis on test1_f
+; CHECK: Running analysis: DominatorTreeAnalysis on test1_g
+; CHECK: Invalidating all non-preserved analyses for: (test1_f, test1_g, test1_h)
+; CHECK: Invalidating all non-preserved analyses for: test1_f
+; CHECK: Invalidating analysis: DominatorTreeAnalysis on test1_f
+; CHECK: Invalidating all non-preserved analyses for: test1_g
+; CHECK: Invalidating analysis: DominatorTreeAnalysis on test1_g
+; CHECK: Invalidating all non-preserved analyses for: test1_h
+; CHECK-NOT: Invalidating anaylsis:
+; CHECK: Running analysis: DominatorTreeAnalysis on test1_h
+; CHECK: Invalidating all non-preserved analyses for: (test1_g, test1_h)
+; CHECK: Invalidating all non-preserved analyses for: test1_h
+; CHECK: Invalidating analysis: DominatorTreeAnalysis on test1_h
+
+; An external function used to control branches.
+declare i1 @flag()
+; CHECK-LABEL: declare i1 @flag()
+
+; The utility function with interesting control flow that gets inlined below to
+; perturb the dominator tree.
+define internal void @callee() {
+entry:
+ %ptr = alloca i8
+ %flag = call i1 @flag()
+ br i1 %flag, label %then, label %else
+
+then:
+ store volatile i8 42, i8* %ptr
+ br label %return
+
+else:
+ store volatile i8 -42, i8* %ptr
+ br label %return
+
+return:
+ ret void
+}
+
+; The 'test1_' prefixed functions work to carefully test that incrementally
+; reducing an SCC in the inliner cannot accidentially leave stale function
+; analysis results due to failing to invalidate them for all the functions.
+
+; The inliner visits this last function. It can't actually break any cycles
+; here, but because we visit this function we compute fresh analyses for it.
+; These analyses are then invalidated when we inline callee disrupting the
+; CFG, and it is important that they be freed.
+define void @test1_h() {
+; CHECK-LABEL: define void @test1_h()
+entry:
+ call void @test1_g()
+; CHECK: call void @test1_g()
+
+ ; Pull interesting CFG into this function.
+ call void @callee()
+; CHECK-NOT: call void @callee()
+
+ ret void
+; CHECK: ret void
+}
+
+; We visit this function second and here we inline the edge to 'test1_f'
+; separating it into its own SCC. The current SCC is now just 'test1_g' and
+; 'test1_h'.
+define void @test1_g() {
+; CHECK-LABEL: define void @test1_g()
+entry:
+ ; This edge gets inlined away.
+ call void @test1_f()
+; CHECK-NOT: call void @test1_f()
+; CHECK: call void @test1_g()
+
+ ; We force this edge to survive inlining.
+ call void @test1_h() noinline
+; CHECK: call void @test1_h()
+
+ ; Pull interesting CFG into this function.
+ call void @callee()
+; CHECK-NOT: call void @callee()
+
+ ret void
+; CHECK: ret void
+}
+
+; We visit this function first in the inliner, and while we inline callee
+; perturbing the CFG, we don't inline anything else and the SCC structure
+; remains in tact.
+define void @test1_f() {
+; CHECK-LABEL: define void @test1_f()
+entry:
+ ; We force this edge to survive inlining.
+ call void @test1_g() noinline
+; CHECK: call void @test1_g()
+
+ ; Pull interesting CFG into this function.
+ call void @callee()
+; CHECK-NOT: call void @callee()
+
+ ret void
+; CHECK: ret void
+}
diff --git a/test/Transforms/Inline/cgscc-invalidate.ll b/test/Transforms/Inline/cgscc-invalidate.ll
index 60315cda771d..69d84f65e251 100644
--- a/test/Transforms/Inline/cgscc-invalidate.ll
+++ b/test/Transforms/Inline/cgscc-invalidate.ll
@@ -65,15 +65,15 @@ entry:
; The 'test3_' prefixed functions test the scenario of not inlining preserving
; dominators after splitting an SCC into two smaller SCCs.
-; The first function gets visited first and we end up inlining everything we
-; can into this routine. That splits test3_g into a separate SCC that is enqued
-; for later processing.
-define void @test3_f() {
-; CHECK-LABEL: define void @test3_f()
+; This function ends up split into a separate SCC, which can cause its analyses
+; to become stale if the splitting doesn't properly invalidate things. Also, as
+; a consequence of being split out, test3_f is too large to inline by the time
+; we get here.
+define void @test3_g() {
+; CHECK-LABEL: define void @test3_g()
entry:
- ; Create the first edge in the SCC cycle.
- call void @test3_g()
-; CHECK-NOT: @test3_g()
+ ; Create the second edge in the SCC cycle.
+ call void @test3_f()
; CHECK: call void @test3_f()
; Pull interesting CFG into this function.
@@ -84,15 +84,15 @@ entry:
; CHECK: ret void
}
-; This function ends up split into a separate SCC, which can cause its analyses
-; to become stale if the splitting doesn't properly invalidate things. Also, as
-; a consequence of being split out, test3_f is too large to inline by the time
-; we get here.
-define void @test3_g() {
-; CHECK-LABEL: define void @test3_g()
+; The second function gets visited first and we end up inlining everything we
+; can into this routine. That splits test3_g into a separate SCC that is enqued
+; for later processing.
+define void @test3_f() {
+; CHECK-LABEL: define void @test3_f()
entry:
- ; Create the second edge in the SCC cycle.
- call void @test3_f()
+ ; Create the first edge in the SCC cycle.
+ call void @test3_g()
+; CHECK-NOT: @test3_g()
; CHECK: call void @test3_f()
; Pull interesting CFG into this function.
diff --git a/test/Transforms/Inline/clear-analyses.ll b/test/Transforms/Inline/clear-analyses.ll
new file mode 100644
index 000000000000..4b1d37ca29a9
--- /dev/null
+++ b/test/Transforms/Inline/clear-analyses.ll
@@ -0,0 +1,32 @@
+; Test that when a pass like correlated-propagation populates an analysis such
+; as LVI with references back into the IR of a function that the inliner will
+; delete, this doesn't crash or go awry despite the inliner clearing the analyses
+; separately from when it deletes the function.
+;
+; RUN: opt -debug-pass-manager -S < %s 2>&1 \
+; RUN: -passes='cgscc(inline,function(correlated-propagation))' \
+; RUN: | FileCheck %s
+;
+; CHECK-LABEL: Starting llvm::Module pass manager run.
+; CHECK: Running pass: InlinerPass on (callee)
+; CHECK: Running pass: CorrelatedValuePropagationPass on callee
+; CHECK: Running analysis: LazyValueAnalysis
+; CHECK: Running pass: InlinerPass on (caller)
+; CHECK: Clearing all analysis results for: callee
+; CHECK: Running pass: CorrelatedValuePropagationPass on caller
+; CHECK: Running analysis: LazyValueAnalysis
+
+define internal i32 @callee(i32 %x) {
+; CHECK-NOT: @callee
+entry:
+ ret i32 %x
+}
+
+define i32 @caller(i32 %x) {
+; CHECK-LABEL: define i32 @caller
+entry:
+ %call = call i32 @callee(i32 %x)
+; CHECK-NOT: call
+ ret i32 %call
+; CHECK: ret i32 %x
+}
diff --git a/test/Transforms/Inline/crash-lifetime-marker.ll b/test/Transforms/Inline/crash-lifetime-marker.ll
index e7a594cdb5e4..7196616521e9 100644
--- a/test/Transforms/Inline/crash-lifetime-marker.ll
+++ b/test/Transforms/Inline/crash-lifetime-marker.ll
@@ -15,9 +15,9 @@ define i32 @callee1(i32 %count) {
; CHECK-LABEL: define i32 @caller1(
; CHECK: [[ALLOCA:%[a-z0-9\.]+]] = alloca i8
-; CHECK-NOT: call void @llvm.lifetime.start(
+; CHECK-NOT: call void @llvm.lifetime.start.p0i8(
; CHECK: call i32 @callee2(i8* [[ALLOCA]])
-; CHECK-NOT: call void @llvm.lifetime.end(
+; CHECK-NOT: call void @llvm.lifetime.end.p0i8(
define i32 @caller1(i32 %count) {
%call0 = call i32 @callee1(i32 0)
diff --git a/test/Transforms/Inline/function-count-update-2.ll b/test/Transforms/Inline/function-count-update-2.ll
new file mode 100644
index 000000000000..702fa6292c29
--- /dev/null
+++ b/test/Transforms/Inline/function-count-update-2.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -S | FileCheck %s
+
+; This tests that the function count of a callee gets correctly updated after it
+; has been inlined into a two callsites.
+
+; CHECK: @callee() !prof [[COUNT:![0-9]+]]
+define i32 @callee() !prof !1 {
+ ret i32 0
+}
+
+define i32 @caller1() !prof !2 {
+; CHECK-LABEL: @caller1
+; CHECK-NOT: callee
+; CHECK: ret
+ %i = call i32 @callee()
+ ret i32 %i
+}
+
+define i32 @caller2() !prof !3 {
+; CHECK-LABEL: @caller2
+; CHECK-NOT: callee
+; CHECK: ret
+ %i = call i32 @callee()
+ ret i32 %i
+}
+
+!llvm.module.flags = !{!0}
+; CHECK: [[COUNT]] = !{!"function_entry_count", i64 0}
+!0 = !{i32 1, !"MaxFunctionCount", i32 1000}
+!1 = !{!"function_entry_count", i64 1000}
+!2 = !{!"function_entry_count", i64 600}
+!3 = !{!"function_entry_count", i64 400}
+
diff --git a/test/Transforms/Inline/function-count-update-3.ll b/test/Transforms/Inline/function-count-update-3.ll
new file mode 100644
index 000000000000..215d64175faf
--- /dev/null
+++ b/test/Transforms/Inline/function-count-update-3.ll
@@ -0,0 +1,78 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -S -inline-threshold=50 | FileCheck %s
+
+; This tests that the function count of a function gets properly scaled after
+; inlining a call chain leading to the function.
+; Function a calls c with count 200 (C1)
+; Function c calls e with count 250 (C2)
+; Entry count of e is 500 (C3)
+; Entry count of c is 500 (C4)
+; Function b calls c with count 300 (C5)
+; c->e inlining does not happen since the cost exceeds threshold.
+; c then inlined into a.
+; e now gets inlined into a (through c) since the branch condition in e is now
+; known and hence the cost gets reduced.
+; Estimated count of a->e callsite = C2 * (C1 / C4)
+; Estimated count of a->e callsite = 250 * (200 / 500) = 100
+; Remaining count of e = C3 - 100 = 500 - 100 = 400
+; Remaining count of c = C4 - C1 - C5 = 500 - 200 - 300 = 0
+
+@data = external global i32
+
+define i32 @a(i32 %a1) !prof !1 {
+ %a2 = call i32 @c(i32 %a1, i32 1)
+ ret i32 %a2
+}
+
+define i32 @b(i32 %b1) !prof !2 {
+ %b2 = call i32 @c(i32 %b1, i32 %b1)
+ ret i32 %b2
+}
+
+declare void @ext();
+
+; CHECK: @c(i32 %c1, i32 %c100) !prof [[COUNT1:![0-9]+]]
+define i32 @c(i32 %c1, i32 %c100) !prof !3 {
+ call void @ext()
+ %cond = icmp sle i32 %c1, 1
+ br i1 %cond, label %cond_true, label %cond_false
+
+cond_false:
+ ret i32 0
+
+cond_true:
+ %c11 = call i32 @e(i32 %c100)
+ ret i32 %c11
+}
+
+
+; CHECK: @e(i32 %c1) !prof [[COUNT2:![0-9]+]]
+define i32 @e(i32 %c1) !prof !4 {
+ %cond = icmp sle i32 %c1, 1
+ br i1 %cond, label %cond_true, label %cond_false
+
+cond_false:
+ call void @ext()
+ %c2 = load i32, i32* @data, align 4
+ %c3 = add i32 %c1, %c2
+ %c4 = mul i32 %c3, %c2
+ %c5 = add i32 %c4, %c2
+ %c6 = mul i32 %c5, %c2
+ %c7 = add i32 %c6, %c2
+ %c8 = mul i32 %c7, %c2
+ %c9 = add i32 %c8, %c2
+ %c10 = mul i32 %c9, %c2
+ ret i32 %c10
+
+cond_true:
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0}
+; CHECK: [[COUNT1]] = !{!"function_entry_count", i64 0}
+; CHECK: [[COUNT2]] = !{!"function_entry_count", i64 400}
+!0 = !{i32 1, !"MaxFunctionCount", i32 5000}
+!1 = !{!"function_entry_count", i64 200}
+!2 = !{!"function_entry_count", i64 300}
+!3 = !{!"function_entry_count", i64 500}
+!4 = !{!"function_entry_count", i64 500}
+
diff --git a/test/Transforms/Inline/function-count-update.ll b/test/Transforms/Inline/function-count-update.ll
new file mode 100644
index 000000000000..094ad5a2ae67
--- /dev/null
+++ b/test/Transforms/Inline/function-count-update.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -S | FileCheck %s
+
+; This tests that the function count of two callees get correctly updated after
+; they have been inlined into two back-to-back callsites in a single basic block
+; in the caller. The callees have the alwaysinline attribute and so they get
+; inlined both with the regular inliner pass and the always inline pass. In
+; both cases, the new count of each callee is the original count minus callsite
+; count which is 200 (since the caller's entry count is 400 and the block
+; containing the calls have a relative block frequency of 0.5).
+
+; CHECK: @callee1(i32 %n) #0 !prof [[COUNT1:![0-9]+]]
+define i32 @callee1(i32 %n) #0 !prof !1 {
+ %cond = icmp sle i32 %n, 10
+ br i1 %cond, label %cond_true, label %cond_false
+
+cond_true:
+ %r1 = add i32 %n, 1
+ ret i32 %r1
+cond_false:
+ %r2 = add i32 %n, 2
+ ret i32 %r2
+}
+
+; CHECK: @callee2(i32 %n) #0 !prof [[COUNT2:![0-9]+]]
+define i32 @callee2(i32 %n) #0 !prof !2 {
+ %r1 = add i32 %n, 1
+ ret i32 %r1
+}
+
+define i32 @caller(i32 %n) !prof !3 {
+ %cond = icmp sle i32 %n, 100
+ br i1 %cond, label %cond_true, label %cond_false
+
+cond_true:
+ %i = call i32 @callee1(i32 %n)
+ %j = call i32 @callee2(i32 %i)
+ ret i32 %j
+cond_false:
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0}
+; CHECK: [[COUNT1]] = !{!"function_entry_count", i64 800}
+; CHECK: [[COUNT2]] = !{!"function_entry_count", i64 1800}
+!0 = !{i32 1, !"MaxFunctionCount", i32 1000}
+!1 = !{!"function_entry_count", i64 1000}
+!2 = !{!"function_entry_count", i64 2000}
+!3 = !{!"function_entry_count", i64 400}
+attributes #0 = { alwaysinline }
+
diff --git a/test/Transforms/Inline/inline-cold-callee.ll b/test/Transforms/Inline/inline-cold-callee.ll
index 153f446c5c2e..404c537b297f 100644
--- a/test/Transforms/Inline/inline-cold-callee.ll
+++ b/test/Transforms/Inline/inline-cold-callee.ll
@@ -1,5 +1,4 @@
; RUN: opt < %s -inline -inlinecold-threshold=0 -S | FileCheck %s
-; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inlinecold-threshold=0 -S | FileCheck %s
; This tests that a cold callee gets the (lower) inlinecold-threshold even without
; Cold hint and does not get inlined because the cost exceeds the inlinecold-threshold.
diff --git a/test/Transforms/Inline/inline-cold-callsite.ll b/test/Transforms/Inline/inline-cold-callsite.ll
new file mode 100644
index 000000000000..26ea8e50eaf1
--- /dev/null
+++ b/test/Transforms/Inline/inline-cold-callsite.ll
@@ -0,0 +1,54 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=100 -inline-cold-callsite-threshold=0 -S | FileCheck %s
+
+; This tests that a cold callsite gets the inline-cold-callsite-threshold
+; and does not get inlined. Another callsite to an identical callee that
+; is not cold gets inlined because cost is below the inline-threshold.
+
+define i32 @callee1(i32 %x) !prof !21 {
+ %x1 = add i32 %x, 1
+ %x2 = add i32 %x1, 1
+ %x3 = add i32 %x2, 1
+ call void @extern()
+ ret i32 %x3
+}
+
+define i32 @caller(i32 %n) !prof !22 {
+; CHECK-LABEL: @caller(
+ %cond = icmp sle i32 %n, 100
+ br i1 %cond, label %cond_true, label %cond_false, !prof !0
+
+cond_true:
+; CHECK-LABEL: cond_true:
+; CHECK-NOT: call i32 @callee1
+; CHECK: ret i32 %x3.i
+ %i = call i32 @callee1(i32 %n)
+ ret i32 %i
+cond_false:
+; CHECK-LABEL: cond_false:
+; CHECK: call i32 @callee1
+; CHECK: ret i32 %j
+ %j = call i32 @callee1(i32 %n)
+ ret i32 %j
+}
+declare void @extern()
+
+!0 = !{!"branch_weights", i32 200, i32 1}
+
+!llvm.module.flags = !{!1}
+!21 = !{!"function_entry_count", i64 200}
+!22 = !{!"function_entry_count", i64 200}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 1000}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 1000, i32 1}
+!13 = !{i32 999000, i64 1000, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
diff --git a/test/Transforms/Inline/inline-hot-callsite-2.ll b/test/Transforms/Inline/inline-hot-callsite-2.ll
new file mode 100644
index 000000000000..ccfe2f0b5dec
--- /dev/null
+++ b/test/Transforms/Inline/inline-hot-callsite-2.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=0 -inlinehint-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s
+
+; This tests that a callsite which is determined to be hot based on the caller's
+; entry count and the callsite block frequency gets the hot-callsite-threshold.
+; Another callsite with the same callee that is not hot does not get inlined
+; because cost exceeds the inline-threshold. inlinthint-threshold is set to 0
+; to ensure callee's hotness is not used to boost the threshold.
+
+define i32 @callee1(i32 %x) !prof !21 {
+ %x1 = add i32 %x, 1
+ %x2 = add i32 %x1, 1
+ %x3 = add i32 %x2, 1
+ call void @extern()
+ ret i32 %x3
+}
+
+define i32 @caller(i32 %n) !prof !22 {
+; CHECK-LABEL: @caller(
+ %cond = icmp sle i32 %n, 100
+ br i1 %cond, label %cond_true, label %cond_false, !prof !0
+
+cond_true:
+; CHECK-LABEL: cond_true:
+; CHECK-NOT: call i32 @callee1
+; CHECK: ret i32 %x3.i
+ %i = call i32 @callee1(i32 %n)
+ ret i32 %i
+cond_false:
+; CHECK-LABEL: cond_false:
+; CHECK: call i32 @callee1
+; CHECK: ret i32 %j
+ %j = call i32 @callee1(i32 %n)
+ ret i32 %j
+}
+declare void @extern()
+
+!0 = !{!"branch_weights", i32 64, i32 4}
+
+!llvm.module.flags = !{!1}
+!21 = !{!"function_entry_count", i64 200}
+!22 = !{!"function_entry_count", i64 200}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 1000}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
diff --git a/test/Transforms/Inline/inline-hot-callsite.ll b/test/Transforms/Inline/inline-hot-callsite.ll
index bdd7175b3eea..ebf4030d3d10 100644
--- a/test/Transforms/Inline/inline-hot-callsite.ll
+++ b/test/Transforms/Inline/inline-hot-callsite.ll
@@ -41,7 +41,7 @@ declare void @extern()
!1 = !{i32 1, !"ProfileSummary", !2}
!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
-!3 = !{!"ProfileFormat", !"InstrProf"}
+!3 = !{!"ProfileFormat", !"SampleProfile"}
!4 = !{!"TotalCount", i64 10000}
!5 = !{!"MaxCount", i64 1000}
!6 = !{!"MaxInternalCount", i64 1}
diff --git a/test/Transforms/Inline/inline_stats.ll b/test/Transforms/Inline/inline_stats.ll
index cf0d43e9215b..bc005b6afd51 100644
--- a/test/Transforms/Inline/inline_stats.ll
+++ b/test/Transforms/Inline/inline_stats.ll
@@ -36,9 +36,12 @@ define void @internal3() {
ret void
}
+declare void @external_decl()
+
define void @external1() alwaysinline !thinlto_src_module !0 {
call fastcc void @internal2()
call fastcc void @external2();
+ call void @external_decl();
ret void
}
diff --git a/test/Transforms/Inline/internal-scc-members.ll b/test/Transforms/Inline/internal-scc-members.ll
new file mode 100644
index 000000000000..258ce00744c5
--- /dev/null
+++ b/test/Transforms/Inline/internal-scc-members.ll
@@ -0,0 +1,31 @@
+; Test that the inliner can handle deleting functions within an SCC while still
+; processing the calls in that SCC.
+;
+; RUN: opt < %s -S -inline | FileCheck %s
+; RUN: opt < %s -S -passes=inline | FileCheck %s
+
+; CHECK-LABEL: define internal void @test1_scc0()
+; CHECK-NOT: call
+; CHECK: call void @test1_scc0()
+; CHECK-NOT: call
+; CHECK: ret
+define internal void @test1_scc0() {
+entry:
+ call void @test1_scc1()
+ ret void
+}
+
+; CHECK-NOT: @test1_scc1
+define internal void @test1_scc1() {
+entry:
+ call void @test1_scc0()
+ ret void
+}
+
+; CHECK-LABEL: define void @test1()
+; CHECK: call void @test1_scc0()
+define void @test1() {
+entry:
+ call void @test1_scc0() noinline
+ ret void
+}
diff --git a/test/Transforms/Inline/last-call-bonus.ll b/test/Transforms/Inline/last-call-bonus.ll
new file mode 100644
index 000000000000..0088d316848f
--- /dev/null
+++ b/test/Transforms/Inline/last-call-bonus.ll
@@ -0,0 +1,52 @@
+; The goal of this test is checking if LastCallToStaticBonus is applied
+; correctly while deciding inline deferral. For the test code below, when
+; inliner evaluates the callsite of bar->baz, it checks if inlining of bar->baz
+; prevents ininling of foo->bar, even when foo->bar inlining is more beneficial
+; than bar->baz inlining. As LastCallToStaticBonus has a massive value, and
+; both baz and bar has only one caller, the cost of foo->bar inlining and
+; bar->baz inlining should be non-trivial for inliner to compute that bar->baz
+; inlining can actaully prevent foo->bar inlining. To make the cost of these
+; callsites big enough, loop unrolling pass with very high threshold is used to
+; preprocess the test.
+
+; RUN: opt < %s -loop-unroll -inline -unroll-threshold=15000 -inline-threshold=250 -S | FileCheck %s
+; CHECK-LABEL: define internal i32 @bar()
+
+define internal i32 @baz() {
+entry:
+ br label %bb1
+
+bb1:
+ %ind = phi i32 [ 0, %entry ], [ %inc, %bb1 ]
+ call void @extern()
+ %inc = add nsw i32 %ind, 1
+ %cmp = icmp sgt i32 %inc, 510
+ br i1 %cmp, label %ret, label %bb1
+
+ret:
+ ret i32 0
+}
+
+define internal i32 @bar() {
+entry:
+ br label %bb1
+
+bb1:
+ %ind = phi i32 [ 0, %entry ], [ %inc, %bb1 ]
+ call void @extern()
+ %inc = add nsw i32 %ind, 1
+ %cmp = icmp sgt i32 %inc, 510
+ br i1 %cmp, label %ret, label %bb1
+
+ret:
+ call i32 @baz()
+ ret i32 0
+}
+
+define i32 @foo() {
+entry:
+ call i32 @bar()
+ ret i32 0
+}
+
+declare void @extern()
diff --git a/test/Transforms/Inline/lifetime-no-datalayout.ll b/test/Transforms/Inline/lifetime-no-datalayout.ll
index 0212e69d624a..5d1872c6a244 100644
--- a/test/Transforms/Inline/lifetime-no-datalayout.ll
+++ b/test/Transforms/Inline/lifetime-no-datalayout.ll
@@ -13,9 +13,9 @@ define void @helper() {
define void @test() {
; CHECK-LABEL: @test(
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 1
+; CHECK: llvm.lifetime.start.p0i8(i64 1
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 1
+; CHECK: llvm.lifetime.end.p0i8(i64 1
call void @helper()
; CHECK-NOT: lifetime
; CHECK: ret void
diff --git a/test/Transforms/Inline/lifetime.ll b/test/Transforms/Inline/lifetime.ll
index 4f415e58f1bf..c47091395fce 100644
--- a/test/Transforms/Inline/lifetime.ll
+++ b/test/Transforms/Inline/lifetime.ll
@@ -2,25 +2,25 @@
; RUN: opt -passes='cgscc(inline)' -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start(i64, i8*)
-declare void @llvm.lifetime.end(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
define void @helper_both_markers() {
%a = alloca i8
; Size in llvm.lifetime.start / llvm.lifetime.end differs from
; allocation size. We should use the former.
- call void @llvm.lifetime.start(i64 2, i8* %a)
- call void @llvm.lifetime.end(i64 2, i8* %a)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %a)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %a)
ret void
}
define void @test_both_markers() {
; CHECK-LABEL: @test_both_markers(
-; CHECK: llvm.lifetime.start(i64 2
-; CHECK-NEXT: llvm.lifetime.end(i64 2
+; CHECK: llvm.lifetime.start.p0i8(i64 2
+; CHECK-NEXT: llvm.lifetime.end.p0i8(i64 2
call void @helper_both_markers()
-; CHECK-NEXT: llvm.lifetime.start(i64 2
-; CHECK-NEXT: llvm.lifetime.end(i64 2
+; CHECK-NEXT: llvm.lifetime.start.p0i8(i64 2
+; CHECK-NEXT: llvm.lifetime.end.p0i8(i64 2
call void @helper_both_markers()
; CHECK-NEXT: ret void
ret void
@@ -41,14 +41,14 @@ define void @helper_no_markers() {
define void @test_no_marker() {
; CHECK-LABEL: @test_no_marker(
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 1
+; CHECK: llvm.lifetime.start.p0i8(i64 1
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 1
+; CHECK: llvm.lifetime.end.p0i8(i64 1
call void @helper_no_markers()
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 1
+; CHECK: llvm.lifetime.start.p0i8(i64 1
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 1
+; CHECK: llvm.lifetime.end.p0i8(i64 1
call void @helper_no_markers()
; CHECK-NOT: lifetime
; CHECK: ret void
@@ -58,23 +58,23 @@ define void @test_no_marker() {
define void @helper_two_casts() {
%a = alloca i32
%b = bitcast i32* %a to i8*
- call void @llvm.lifetime.start(i64 4, i8* %b)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %b)
%c = bitcast i32* %a to i8*
- call void @llvm.lifetime.end(i64 4, i8* %c)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %c)
ret void
}
define void @test_two_casts() {
; CHECK-LABEL: @test_two_casts(
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 4
+; CHECK: llvm.lifetime.start.p0i8(i64 4
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 4
+; CHECK: llvm.lifetime.end.p0i8(i64 4
call void @helper_two_casts()
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 4
+; CHECK: llvm.lifetime.start.p0i8(i64 4
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 4
+; CHECK: llvm.lifetime.end.p0i8(i64 4
call void @helper_two_casts()
; CHECK-NOT: lifetime
; CHECK: ret void
@@ -91,9 +91,9 @@ define void @helper_arrays_alloca() {
define void @test_arrays_alloca() {
; CHECK-LABEL: @test_arrays_alloca(
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 40,
+; CHECK: llvm.lifetime.start.p0i8(i64 40,
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 40,
+; CHECK: llvm.lifetime.end.p0i8(i64 40,
call void @helper_arrays_alloca()
; CHECK-NOT: lifetime
; CHECK: ret void
diff --git a/test/Transforms/Inline/monster_scc.ll b/test/Transforms/Inline/monster_scc.ll
new file mode 100644
index 000000000000..0f8f1f21c8b5
--- /dev/null
+++ b/test/Transforms/Inline/monster_scc.ll
@@ -0,0 +1,460 @@
+; This test creates a monster SCC with a very pernicious call graph. It builds
+; a cycle of cross-connected pairs of functions with interesting inlining
+; decisions throughout, but ultimately trivial code complexity.
+;
+; Typically, a greedy approach to inlining works well for bottom-up inliners
+; such as LLVM's. However, there is no way to be bottom-up over an SCC: it's
+; a cycle! Greedily inlining as much as possible into each function of this
+; *SCC* will have the disasterous effect of inlining all N-1 functions into the
+; first one visited, N-2 functions into the second one visited, N-3 into the
+; third, and so on. This is because until inlining occurs, each function in
+; isolation appears to be an excellent inline candidate.
+;
+; Note that the exact number of calls in each function doesn't really matter.
+; It is mostly a function of cost thresholds and visit order. Because this is an
+; SCC there is no "right" or "wrong" answer here as long as no function blows up
+; to be *huge*. The specific concerning pattern is if one or more functions get
+; more than 16 calls in them.
+;
+; This test is extracted from the following C++ program compiled with Clang.
+; The IR is simplified with SROA, instcombine, and simplify-cfg. Then C++
+; linkage stuff, attributes, target specific things, metadata and comments were
+; removed. The order of the fuctions is also made more predictable than Clang's
+; output order.
+;
+; void g(int);
+;
+; template <bool K, int N> void f(bool *B, bool *E) {
+; if (K)
+; g(N);
+; if (B == E)
+; return;
+; if (*B)
+; f<true, N + 1>(B + 1, E);
+; else
+; f<false, N + 1>(B + 1, E);
+; }
+; template <> void f<false, MAX>(bool *B, bool *E) { return f<false, 0>(B, E); }
+; template <> void f<true, MAX>(bool *B, bool *E) { return f<true, 0>(B, E); }
+;
+; void test(bool *B, bool *E) { f<false, 0>(B, E); }
+;
+; RUN: opt -S < %s -inline -inline-threshold=150 | FileCheck %s --check-prefixes=CHECK,OLD
+; RUN: opt -S < %s -passes=inline -inline-threshold=150 | FileCheck %s --check-prefixes=CHECK,NEW
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+declare void @_Z1gi(i32)
+
+; CHECK-LABEL: define void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi1EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi2EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi2EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb0ELi0EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi1EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi2EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb1ELi0EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1gi(i32 0)
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb0ELi1EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi2EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb0ELi1EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb1ELi1EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi2EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb1ELi1EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1gi(i32 1)
+ %cmp = icmp eq i8* %B, %E
+; CHECK-NOT: call
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb0ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb0ELi2EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb1ELi2EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1gi(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb1ELi2EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1gi(i32 2)
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb0ELi3EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb0ELi0EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb0ELi3EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb1ELi3EEvPbS0_(
+; CHECK-NOT: call
+; CHECK: call void @_Z1gi(
+; CHECK-NOT: call
+; CHECK: call void @_Z1fILb1ELi0EEvPbS0_(
+; CHECK-NOT: call
+; CHECK: call void @_Z1fILb0ELi0EEvPbS0_(
+; CHECK-NOT: call
+define void @_Z1fILb1ELi3EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1gi(i32 3)
+ %cmp = icmp eq i8* %B, %E
+ br i1 %cmp, label %if.end3, label %if.end
+
+if.end:
+ %0 = load i8, i8* %B, align 1
+ %tobool = icmp eq i8 %0, 0
+ %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+ br i1 %tobool, label %if.else, label %if.then1
+
+if.then1:
+ call void @_Z1fILb1ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.else:
+ call void @_Z1fILb0ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+ br label %if.end3
+
+if.end3:
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb0ELi4EEvPbS0_(
+; CHECK-NOT: call
+; CHECK: call void @_Z1fILb0ELi0EEvPbS0_(
+; CHECK-NOT: call
+define void @_Z1fILb0ELi4EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1fILb0ELi0EEvPbS0_(i8* %B, i8* %E)
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z1fILb1ELi4EEvPbS0_(
+; OLD-NOT: call
+; OLD: call void @_Z1fILb1ELi0EEvPbS0_(
+; OLD-NOT: call
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1gi(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
+; NEW-NOT: call
+; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
+; NEW-NOT: call
+define void @_Z1fILb1ELi4EEvPbS0_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1fILb1ELi0EEvPbS0_(i8* %B, i8* %E)
+ ret void
+}
+
+; CHECK-LABEL: define void @_Z4testPbS_(
+; CHECK: call
+; CHECK-NOT: call
+define void @_Z4testPbS_(i8* %B, i8* %E) {
+entry:
+ call void @_Z1fILb0ELi0EEvPbS0_(i8* %B, i8* %E)
+ ret void
+}
+
diff --git a/test/Transforms/Inline/optimization-remarks-with-hotness.ll b/test/Transforms/Inline/optimization-remarks-with-hotness.ll
index 9611a2dd1bd4..1d6d135bdda8 100644
--- a/test/Transforms/Inline/optimization-remarks-with-hotness.ll
+++ b/test/Transforms/Inline/optimization-remarks-with-hotness.ll
@@ -4,8 +4,7 @@
; CHECK: foo should always be inlined (cost=always) (hotness: 30)
; CHECK: foo inlined into bar (hotness: 30)
-; CHECK: foz should never be inlined (cost=never) (hotness: 30)
-; CHECK: foz will not be inlined into bar (hotness: 30)
+; CHECK: foz not inlined into bar because it should never be inlined (cost=never) (hotness: 30)
; Function Attrs: alwaysinline nounwind uwtable
define i32 @foo() #0 !prof !1 {
diff --git a/test/Transforms/Inline/optimization-remarks.ll b/test/Transforms/Inline/optimization-remarks.ll
index 59cf08327350..61e270cff76c 100644
--- a/test/Transforms/Inline/optimization-remarks.ll
+++ b/test/Transforms/Inline/optimization-remarks.ll
@@ -9,8 +9,7 @@
; NO_HOTNESS-NOT: fox will not be inlined into bar because its definition is unavailable
; CHECK: foo should always be inlined (cost=always)
; CHECK: foo inlined into bar
-; CHECK: foz should never be inlined (cost=never)
-; CHECK: foz will not be inlined into bar
+; CHECK: foz not inlined into bar because it should never be inlined (cost=never)
; Function Attrs: alwaysinline nounwind uwtable
define i32 @foo(i32 %x, i32 %y) #0 {
diff --git a/test/Transforms/Inline/prof-update.ll b/test/Transforms/Inline/prof-update.ll
new file mode 100644
index 000000000000..38fcc7e45996
--- /dev/null
+++ b/test/Transforms/Inline/prof-update.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -inline -S | FileCheck %s
+; Checks if inliner updates branch_weights annotation for call instructions.
+
+declare void @ext();
+declare void @ext1();
+
+; CHECK: define void @callee(i32 %n) !prof ![[ENTRY_COUNT:[0-9]*]]
+define void @callee(i32 %n) !prof !1 {
+ %cond = icmp sle i32 %n, 10
+ br i1 %cond, label %cond_true, label %cond_false
+cond_true:
+; ext1 is optimized away, thus not updated.
+; CHECK: call void @ext1(), !prof ![[COUNT_CALLEE1:[0-9]*]]
+ call void @ext1(), !prof !2
+ ret void
+cond_false:
+; ext is cloned and updated.
+; CHECK: call void @ext(), !prof ![[COUNT_CALLEE:[0-9]*]]
+ call void @ext(), !prof !2
+ ret void
+}
+
+; CHECK: define void @caller()
+define void @caller() {
+; CHECK: call void @ext(), !prof ![[COUNT_CALLER:[0-9]*]]
+ call void @callee(i32 15), !prof !3
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 1, !"MaxFunctionCount", i32 2000}
+!1 = !{!"function_entry_count", i64 1000}
+!2 = !{!"branch_weights", i64 2000}
+!3 = !{!"branch_weights", i64 400}
+attributes #0 = { alwaysinline }
+; CHECK: ![[ENTRY_COUNT]] = !{!"function_entry_count", i64 600}
+; CHECK: ![[COUNT_CALLEE1]] = !{!"branch_weights", i64 2000}
+; CHECK: ![[COUNT_CALLEE]] = !{!"branch_weights", i32 1200}
+; CHECK: ![[COUNT_CALLER]] = !{!"branch_weights", i32 800}
diff --git a/test/Transforms/InstCombine/2008-01-29-AddICmp.ll b/test/Transforms/InstCombine/2008-01-29-AddICmp.ll
deleted file mode 100644
index a33eb9c1ddd4..000000000000
--- a/test/Transforms/InstCombine/2008-01-29-AddICmp.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-; PR1949
-
-define i1 @test1(i32 %a) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 %a, -5
-; CHECK-NEXT: ret i1 [[C]]
-;
- %b = add i32 %a, 4
- %c = icmp ult i32 %b, 4
- ret i1 %c
-}
-
-define <2 x i1> @test1vec(<2 x i32> %a) {
-; CHECK-LABEL: @test1vec(
-; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i32> %a, <i32 -5, i32 -5>
-; CHECK-NEXT: ret <2 x i1> [[C]]
-;
- %b = add <2 x i32> %a, <i32 4, i32 4>
- %c = icmp ult <2 x i32> %b, <i32 4, i32 4>
- ret <2 x i1> %c
-}
-
-define i1 @test2(i32 %a) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 %a, 4
-; CHECK-NEXT: ret i1 [[C]]
-;
- %b = sub i32 %a, 4
- %c = icmp ugt i32 %b, -5
- ret i1 %c
-}
-
-define <2 x i1> @test2vec(<2 x i32> %a) {
-; CHECK-LABEL: @test2vec(
-; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i32> %a, <i32 4, i32 4>
-; CHECK-NEXT: ret <2 x i1> [[C]]
-;
- %b = sub <2 x i32> %a, <i32 4, i32 4>
- %c = icmp ugt <2 x i32> %b, <i32 -5, i32 -5>
- ret <2 x i1> %c
-}
-
-define i1 @test3(i32 %a) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 %a, 2147483643
-; CHECK-NEXT: ret i1 [[C]]
-;
- %b = add i32 %a, 4
- %c = icmp slt i32 %b, 2147483652
- ret i1 %c
-}
-
-define <2 x i1> @test3vec(<2 x i32> %a) {
-; CHECK-LABEL: @test3vec(
-; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i32> %a, <i32 2147483643, i32 2147483643>
-; CHECK-NEXT: ret <2 x i1> [[C]]
-;
- %b = add <2 x i32> %a, <i32 4, i32 4>
- %c = icmp slt <2 x i32> %b, <i32 2147483652, i32 2147483652>
- ret <2 x i1> %c
-}
-
-define i1 @test4(i32 %a) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[C:%.*]] = icmp slt i32 %a, -4
-; CHECK-NEXT: ret i1 [[C]]
-;
- %b = add i32 %a, 2147483652
- %c = icmp sge i32 %b, 4
- ret i1 %c
-}
-
-define <2 x i1> @test4vec(<2 x i32> %a) {
-; CHECK-LABEL: @test4vec(
-; CHECK-NEXT: [[C:%.*]] = icmp slt <2 x i32> %a, <i32 -4, i32 -4>
-; CHECK-NEXT: ret <2 x i1> [[C]]
-;
- %b = add <2 x i32> %a, <i32 2147483652, i32 2147483652>
- %c = icmp sge <2 x i32> %b, <i32 4, i32 4>
- ret <2 x i1> %c
-}
-
diff --git a/test/Transforms/InstCombine/2008-05-22-NegValVector.ll b/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
index bf92faf2fec5..58259be8bc92 100644
--- a/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
+++ b/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
@@ -6,3 +6,9 @@ define <3 x i8> @f(<3 x i8> %a) {
ret <3 x i8> %B
}
+define <3 x i4> @g(<3 x i4> %a) {
+ %A = sub <3 x i4> zeroinitializer, %a
+ %B = mul <3 x i4> %A, <i4 5, i4 5, i4 5>
+ ret <3 x i4> %B
+}
+
diff --git a/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll b/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll
deleted file mode 100644
index 0c0e55a0b2d9..000000000000
--- a/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
-; PR3103
-
-define i8 @test1(i8 %x, i8 %y) {
-; CHECK-LABEL: @test1(
- %A = udiv i8 %x, %y
-; CHECK-NEXT: urem
- %B = mul i8 %A, %y
- %C = sub i8 %x, %B
- ret i8 %C
-; CHECK-NEXT: ret
-}
-
-define i8 @test2(i8 %x, i8 %y) {
-; CHECK-LABEL: @test2(
- %A = sdiv i8 %x, %y
-; CHECK-NEXT: srem
- %B = mul i8 %A, %y
- %C = sub i8 %x, %B
- ret i8 %C
-; CHECK-NEXT: ret
-}
-
-define i8 @test3(i8 %x, i8 %y) {
-; CHECK-LABEL: @test3(
- %A = udiv i8 %x, %y
-; CHECK-NEXT: urem
- %B = mul i8 %A, %y
- %C = sub i8 %B, %x
-; CHECK-NEXT: sub
- ret i8 %C
-; CHECK-NEXT: ret
-}
-
-define i8 @test4(i8 %x) {
-; CHECK-LABEL: @test4(
- %A = udiv i8 %x, 3
-; CHECK-NEXT: urem
- %B = mul i8 %A, -3
-; CHECK-NEXT: sub
- %C = sub i8 %x, %B
-; CHECK-NEXT: add
- ret i8 %C
-; CHECK-NEXT: ret
-}
-
-define i32 @test5(i32 %x, i32 %y) {
-; CHECK-LABEL: @test5(
-; (((X / Y) * Y) / Y) -> X / Y
- %div = sdiv i32 %x, %y
-; CHECK-NEXT: sdiv
- %mul = mul i32 %div, %y
- %r = sdiv i32 %mul, %y
- ret i32 %r
-; CHECK-NEXT: ret
-}
-
-define i32 @test6(i32 %x, i32 %y) {
-; CHECK-LABEL: @test6(
-; (((X / Y) * Y) / Y) -> X / Y
- %div = udiv i32 %x, %y
-; CHECK-NEXT: udiv
- %mul = mul i32 %div, %y
- %r = udiv i32 %mul, %y
- ret i32 %r
-; CHECK-NEXT: ret
-}
diff --git a/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll b/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll
deleted file mode 100644
index 4d4797720c53..000000000000
--- a/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: opt < %s -instcombine -S | grep "ashr i32 %val, 31"
-; PR3851
-
-define i32 @foo2(i32 %val) nounwind {
-entry:
- %shr = ashr i32 %val, 15 ; <i32> [#uses=3]
- %shr4 = ashr i32 %shr, 17 ; <i32> [#uses=1]
- ret i32 %shr4
- }
diff --git a/test/Transforms/InstCombine/2012-07-25-LoadPart.ll b/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
index 14fcf52fe9a7..71255ebbf81f 100644
--- a/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
+++ b/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -default-data-layout="e-p:32:32:32" -instcombine -S | FileCheck %s --check-prefix=LE
-; RUN: opt < %s -default-data-layout="E-p:32:32:32" -instcombine -S | FileCheck %s --check-prefix=BE
+; RUN: opt < %s -data-layout="e-p:32:32:32" -instcombine -S | FileCheck %s --check-prefix=LE
+; RUN: opt < %s -data-layout="E-p:32:32:32" -instcombine -S | FileCheck %s --check-prefix=BE
; PR13442
@test = constant [4 x i32] [i32 1, i32 2, i32 3, i32 4]
diff --git a/test/Transforms/InstCombine/X86FsubCmpCombine.ll b/test/Transforms/InstCombine/X86FsubCmpCombine.ll
new file mode 100644
index 000000000000..fde0692d00a2
--- /dev/null
+++ b/test/Transforms/InstCombine/X86FsubCmpCombine.ll
@@ -0,0 +1,181 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; The test checks the folding of cmp(sub(a,b),0) into cmp(a,b).
+
+define i8 @sub_compare_foldingPD128_safe(<2 x double> %a, <2 x double> %b){
+; CHECK-LABEL: @sub_compare_foldingPD128_safe(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB_SAFE:%.*]] = fsub <2 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[SUB_SAFE]], <2 x double> zeroinitializer, i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.safe = fsub <2 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %sub.safe , <2 x double> zeroinitializer, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_foldingPD128(<2 x double> %a, <2 x double> %b){
+; CHECK-LABEL: @sub_compare_foldingPD128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <2 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %sub.i , <2 x double> zeroinitializer, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_foldingPD256(<4 x double> %a, <4 x double> %b){
+; CHECK-LABEL: @sub_compare_foldingPD256(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> [[A:%.*]], <4 x double> [[B:%.*]], i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i1 = fsub ninf <4 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %sub.i1, <4 x double> zeroinitializer, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_foldingPD512(<8 x double> %a, <8 x double> %b){
+; CHECK-LABEL: @sub_compare_foldingPD512(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], i32 11, i8 -1, i32 4)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i2 = fsub ninf <8 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %sub.i2, <8 x double> zeroinitializer, i32 11, i8 -1, i32 4)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_foldingPS128(<4 x float> %a, <4 x float> %b){
+; CHECK-LABEL: @sub_compare_foldingPS128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], i32 12, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i3 = fsub ninf <4 x float> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %sub.i3, <4 x float> zeroinitializer, i32 12, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_foldingPS256(<8 x float> %a, <8 x float> %b){
+; CHECK-LABEL: @sub_compare_foldingPS256(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> [[A:%.*]], <8 x float> [[B:%.*]], i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i4 = fsub ninf <8 x float> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %sub.i4, <8 x float> zeroinitializer, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i16 @sub_compare_foldingPS512(<16 x float> %a, <16 x float> %b){
+; CHECK-LABEL: @sub_compare_foldingPS512(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], i32 11, i16 -1, i32 4)
+; CHECK-NEXT: ret i16 [[TMP0]]
+;
+entry:
+ %sub.i5 = fsub ninf <16 x float> %a, %b
+ %0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %sub.i5, <16 x float> zeroinitializer, i32 11, i16 -1, i32 4)
+ ret i16 %0
+}
+
+
+
+define i8 @sub_compare_folding_swapPD128(<2 x double> %a, <2 x double> %b){
+; CHECK-LABEL: @sub_compare_folding_swapPD128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> [[B:%.*]], <2 x double> [[A:%.*]], i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <2 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> zeroinitializer, <2 x double> %sub.i, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_folding_swapPD256(<4 x double> %a, <4 x double> %b){
+; CHECK-LABEL: @sub_compare_folding_swapPD256(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> [[B:%.*]], <4 x double> [[A:%.*]], i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <4 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> zeroinitializer, <4 x double> %sub.i, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_folding_swapPD512(<8 x double> %a, <8 x double> %b){
+; CHECK-LABEL: @sub_compare_folding_swapPD512(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> [[B:%.*]], <8 x double> [[A:%.*]], i32 11, i8 -1, i32 4)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <8 x double> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> zeroinitializer, <8 x double> %sub.i, i32 11, i8 -1, i32 4)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_folding_swapPS128(<4 x float> %a, <4 x float> %b){
+; CHECK-LABEL: @sub_compare_folding_swapPS128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> [[B:%.*]], <4 x float> [[A:%.*]], i32 12, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <4 x float> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> zeroinitializer, <4 x float> %sub.i, i32 12, i8 -1)
+ ret i8 %0
+}
+
+
+define i8 @sub_compare_folding_swapPS256(<8 x float> %a, <8 x float> %b){
+; CHECK-LABEL: @sub_compare_folding_swapPS256(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> [[B:%.*]], <8 x float> [[A:%.*]], i32 5, i8 -1)
+; CHECK-NEXT: ret i8 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <8 x float> %a, %b
+ %0 = tail call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> zeroinitializer, <8 x float> %sub.i, i32 5, i8 -1)
+ ret i8 %0
+}
+
+
+define i16 @sub_compare_folding_swapPS512(<16 x float> %a, <16 x float> %b){
+; CHECK-LABEL: @sub_compare_folding_swapPS512(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> [[B:%.*]], <16 x float> [[A:%.*]], i32 11, i16 -1, i32 4)
+; CHECK-NEXT: ret i16 [[TMP0]]
+;
+entry:
+ %sub.i = fsub ninf <16 x float> %a, %b
+ %0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> zeroinitializer, <16 x float> %sub.i, i32 11, i16 -1, i32 4)
+ ret i16 %0
+}
+
+declare i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double>, <2 x double>, i32, i8)
+declare i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double>, <4 x double>, i32, i8)
+declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double>, <8 x double>, i32, i8, i32)
+declare i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float>, <4 x float>, i32, i8)
+declare i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float>, <8 x float>, i32, i8)
+declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float>, <16 x float>, i32, i16, i32)
diff --git a/test/Transforms/InstCombine/add-sitofp.ll b/test/Transforms/InstCombine/add-sitofp.ll
index 3b5485e00528..2abfa436f6d3 100644
--- a/test/Transforms/InstCombine/add-sitofp.ll
+++ b/test/Transforms/InstCombine/add-sitofp.ll
@@ -1,6 +1,14 @@
-; RUN: opt < %s -instcombine -S | grep "add nuw nsw i32"
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
-define double @x(i32 %a, i32 %b) nounwind {
+define double @x(i32 %a, i32 %b) {
+; CHECK-LABEL: @x(
+; CHECK-NEXT: [[M:%.*]] = lshr i32 [[A:%.*]], 24
+; CHECK-NEXT: [[N:%.*]] = and i32 [[M]], [[B:%.*]]
+; CHECK-NEXT: [[ADDCONV:%.*]] = add nuw nsw i32 [[N]], 1
+; CHECK-NEXT: [[P:%.*]] = sitofp i32 [[ADDCONV]] to double
+; CHECK-NEXT: ret double [[P]]
+;
%m = lshr i32 %a, 24
%n = and i32 %m, %b
%o = sitofp i32 %n to double
diff --git a/test/Transforms/InstCombine/add.ll b/test/Transforms/InstCombine/add.ll
index 39a746ab310b..648305d134cd 100644
--- a/test/Transforms/InstCombine/add.ll
+++ b/test/Transforms/InstCombine/add.ll
@@ -1,6 +1,32 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
+; TODO: This should be canonicalized to either a select or xor+zext.
+
+define i32 @select_0_or_1_from_bool(i1 %x) {
+; CHECK-LABEL: @select_0_or_1_from_bool(
+; CHECK-NEXT: [[EXT:%.*]] = sext i1 %x to i32
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[EXT]], 1
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %ext = sext i1 %x to i32
+ %add = add i32 %ext, 1
+ ret i32 %add
+}
+
+; TODO: This should be canonicalized to either a select or xor+zext.
+
+define <2 x i32> @select_0_or_1_from_bool_vec(<2 x i1> %x) {
+; CHECK-LABEL: @select_0_or_1_from_bool_vec(
+; CHECK-NEXT: [[EXT:%.*]] = sext <2 x i1> %x to <2 x i32>
+; CHECK-NEXT: [[ADD:%.*]] = add nsw <2 x i32> [[EXT]], <i32 1, i32 1>
+; CHECK-NEXT: ret <2 x i32> [[ADD]]
+;
+ %ext = sext <2 x i1> %x to <2 x i32>
+ %add = add <2 x i32> %ext, <i32 1, i32 1>
+ ret <2 x i32> %add
+}
+
define i32 @test1(i32 %A) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: ret i32 %A
@@ -100,7 +126,7 @@ define i32 @test9(i32 %A) {
define i1 @test10(i8 %A, i8 %b) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[B:%.*]] = sub i8 0, %b
-; CHECK-NEXT: [[C:%.*]] = icmp ne i8 %A, [[B]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[B]], %A
; CHECK-NEXT: ret i1 [[C]]
;
%B = add i8 %A, %b
@@ -112,7 +138,7 @@ define i1 @test10(i8 %A, i8 %b) {
define <2 x i1> @test10vec(<2 x i8> %a, <2 x i8> %b) {
; CHECK-LABEL: @test10vec(
; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> zeroinitializer, %b
-; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> %a, [[C]]
+; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[C]], %a
; CHECK-NEXT: ret <2 x i1> [[D]]
;
%c = add <2 x i8> %a, %b
@@ -244,14 +270,59 @@ define i32 @test19(i1 %C) {
ret i32 %V
}
+define <2 x i32> @test19vec(i1 %C) {
+; CHECK-LABEL: @test19vec(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1123, i32 1123>, <2 x i32> <i32 133, i32 133>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
+ %V = add <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %V
+}
+
+; This is an InstSimplify fold, but test it here to make sure that
+; InstCombine does not prevent the fold.
+; With NSW, add of sign bit -> or of sign bit.
+
define i32 @test20(i32 %x) {
; CHECK-LABEL: @test20(
; CHECK-NEXT: ret i32 %x
;
- %tmp.2 = xor i32 %x, -2147483648
- ;; Add of sign bit -> xor of sign bit.
- %tmp.4 = add i32 %tmp.2, -2147483648
- ret i32 %tmp.4
+ %y = xor i32 %x, -2147483648
+ %z = add nsw i32 %y, -2147483648
+ ret i32 %z
+}
+
+define i32 @xor_sign_bit(i32 %x) {
+; CHECK-LABEL: @xor_sign_bit(
+; CHECK-NEXT: [[ADD:%.*]] = add i32 %x, -2147483606
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %xor = xor i32 %x, 2147483648
+ %add = add i32 %xor, 42
+ ret i32 %add
+}
+
+; No-wrap info allows converting the add to 'or'.
+
+define i8 @add_nsw_signbit(i8 %x) {
+; CHECK-LABEL: @add_nsw_signbit(
+; CHECK-NEXT: [[Y:%.*]] = or i8 %x, -128
+; CHECK-NEXT: ret i8 [[Y]]
+;
+ %y = add nsw i8 %x, -128
+ ret i8 %y
+}
+
+; No-wrap info allows converting the add to 'or'.
+
+define i8 @add_nuw_signbit(i8 %x) {
+; CHECK-LABEL: @add_nuw_signbit(
+; CHECK-NEXT: [[Y:%.*]] = or i8 %x, -128
+; CHECK-NEXT: ret i8 [[Y]]
+;
+ %y = add nuw i8 %x, 128
+ ret i8 %y
}
define i1 @test21(i32 %x) {
@@ -519,3 +590,99 @@ define i64 @test41(i32 %a) {
%sub = add i64 %zext, -1
ret i64 %sub
}
+
+define i32 @test42(i1 %C) {
+; CHECK-LABEL: @test42(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 1123, i32 133
+; CHECK-NEXT: ret i32 [[V]]
+;
+ %A = select i1 %C, i32 1000, i32 10
+ %V = add i32 123, %A
+ ret i32 %V
+}
+
+define <2 x i32> @test42vec(i1 %C) {
+; CHECK-LABEL: @test42vec(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1123, i32 1123>, <2 x i32> <i32 133, i32 133>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
+ %V = add <2 x i32> <i32 123, i32 123>, %A
+ ret <2 x i32> %V
+}
+
+define <2 x i32> @test42vec2(i1 %C) {
+; CHECK-LABEL: @test42vec2(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1123, i32 2833>, <2 x i32> <i32 133, i32 363>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
+ %V = add <2 x i32> <i32 123, i32 333>, %A
+ ret <2 x i32> %V
+}
+
+define i32 @test55(i1 %which) {
+; CHECK-LABEL: @test55(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1123, [[ENTRY:%.*]] ], [ 133, [[DELAY]] ]
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi i32 [ 1000, %entry ], [ 10, %delay ]
+ %value = add i32 123, %A
+ ret i32 %value
+}
+
+define <2 x i32> @test43vec(i1 %which) {
+; CHECK-LABEL: @test43vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1123, i32 1123>, [[ENTRY:%.*]] ], [ <i32 133, i32 133>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
+ %value = add <2 x i32> <i32 123, i32 123>, %A
+ ret <2 x i32> %value
+}
+
+define <2 x i32> @test43vec2(i1 %which) {
+; CHECK-LABEL: @test43vec2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1123, i32 2833>, [[ENTRY:%.*]] ], [ <i32 133, i32 363>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
+ %value = add <2 x i32> <i32 123, i32 333>, %A
+ ret <2 x i32> %value
+}
diff --git a/test/Transforms/InstCombine/alloca.ll b/test/Transforms/InstCombine/alloca.ll
index 2ee0372e5e0a..f81f700e6cf4 100644
--- a/test/Transforms/InstCombine/alloca.ll
+++ b/test/Transforms/InstCombine/alloca.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -instcombine -S -default-data-layout="E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" | FileCheck %s -check-prefix=CHECK -check-prefix=ALL
-; RUN: opt < %s -instcombine -S -default-data-layout="E-p:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" | FileCheck %s -check-prefix=P32 -check-prefix=ALL
+; RUN: opt < %s -instcombine -S -data-layout="E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" | FileCheck %s -check-prefix=CHECK -check-prefix=ALL
+; RUN: opt < %s -instcombine -S -data-layout="E-p:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" | FileCheck %s -check-prefix=P32 -check-prefix=ALL
; RUN: opt < %s -instcombine -S | FileCheck %s -check-prefix=NODL -check-prefix=ALL
diff --git a/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll b/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll
new file mode 100644
index 000000000000..888f51bf939d
--- /dev/null
+++ b/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll
@@ -0,0 +1,322 @@
+; RUN: opt -S -instcombine %s | FileCheck %s
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.buffer.load
+; --------------------------------------------------------------------
+
+; CHECK-LABEL: @buffer_load_f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @buffer_load_f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ ret float %data
+}
+
+; CHECK-LABEL: @buffer_load_v1f32(
+; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <1 x float> %data
+define amdgpu_ps <1 x float> @buffer_load_v1f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ ret <1 x float> %data
+}
+
+; CHECK-LABEL: @buffer_load_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ ret <2 x float> %data
+}
+
+; CHECK-LABEL: @buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <4 x float> %data
+define amdgpu_ps <4 x float> @buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ ret <4 x float> %data
+}
+
+; CHECK-LABEL: @extract_elt0_buffer_load_v2f32(
+; CHECK: %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt0 = extractelement <2 x float> %data, i32 0
+ ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_buffer_load_v2f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt1 = extractelement <2 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt1 = extractelement <2 x float> %data, i32 1
+ ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_buffer_load_v4f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %data, i32 0
+ ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt1 = extractelement <4 x float> %data, i32 1
+ ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt1 = extractelement <4 x float> %data, i32 2
+ ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt3_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 3
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt3_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt1 = extractelement <4 x float> %data, i32 3
+ ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 1, i32 2>
+ ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt2_elt3_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt2_elt3_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_elt2_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt1_elt2_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+ ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_elt3_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt1_elt2_elt3_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 1, i32 2, i32 3>
+ ret <3 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt2_elt3_buffer_load_v4f32(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+; CHECK-NEXT: ret <3 x float> %shuf
+define amdgpu_ps <3 x float> @extract_elt0_elt2_elt3_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <3 x i32> <i32 0, i32 2, i32 3>
+ ret <3 x float> %shuf
+}
+
+; FIXME: Not handled even though only 2 elts used
+; CHECK-LABEL: @extract_elt0_elt1_buffer_load_v4f32_2(
+; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt0 = extractelement <4 x float> %data, i32 0
+; CHECK-NEXT: %elt1 = extractelement <4 x float> %data, i32 1
+; CHECK-NEXT: %ins0 = insertvalue { float, float } undef, float %elt0, 0
+; CHECK-NEXT: %ins1 = insertvalue { float, float } %ins0, float %elt1, 1
+; CHECK-NEXT: ret { float, float } %ins1
+define amdgpu_ps { float, float } @extract_elt0_elt1_buffer_load_v4f32_2(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt0 = extractelement <4 x float> %data, i32 0
+ %elt1 = extractelement <4 x float> %data, i32 1
+ %ins0 = insertvalue { float, float } undef, float %elt0, 0
+ %ins1 = insertvalue { float, float } %ins0, float %elt1, 1
+ ret { float, float } %ins1
+}
+
+; CHECK-LABEL: @extract_elt0_buffer_load_v3f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt0 = extractelement <3 x float> %data, i32 0
+ ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt1_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt1 = extractelement <3 x float> %data, i32 1
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt1_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt1 = extractelement <3 x float> %data, i32 1
+ ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt2_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %elt1 = extractelement <3 x float> %data, i32 2
+; CHECK-NEXT: ret float %elt1
+define amdgpu_ps float @extract_elt2_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %elt1 = extractelement <3 x float> %data, i32 2
+ ret float %elt1
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <2 x float>
+define amdgpu_ps <2 x float> @extract_elt0_elt1_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt1_elt2_buffer_load_v3f32(
+; CHECK-NEXT: %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT: ret <2 x float> %shuf
+define amdgpu_ps <2 x float> @extract_elt1_elt2_buffer_load_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 1, i32 2>
+ ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @preserve_metadata_extract_elt0_buffer_load_v2f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false), !fpmath !0
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @preserve_metadata_extract_elt0_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false), !fpmath !0
+ %elt0 = extractelement <2 x float> %data, i32 0
+ ret float %elt0
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.buffer.load.format
+; --------------------------------------------------------------------
+
+; CHECK-LABEL: @buffer_load_format_v1f32(
+; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.buffer.load.format.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 true)
+; CHECK-NEXT: ret <1 x float> %data
+define amdgpu_ps <1 x float> @buffer_load_format_v1f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <1 x float> @llvm.amdgcn.buffer.load.format.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 true)
+ ret <1 x float> %data
+}
+
+; CHECK-LABEL: @extract_elt0_buffer_load_format_v2f32(
+; CHECK-NEXT: %data = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 true, i1 false)
+; CHECK-NEXT: ret float %data
+define amdgpu_ps float @extract_elt0_buffer_load_format_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <2 x float> @llvm.amdgcn.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 true, i1 false)
+ %elt0 = extractelement <2 x float> %data, i32 0
+ ret float %elt0
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_buffer_load_format_v3f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @extract_elt0_elt1_buffer_load_format_v3f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <3 x float> @llvm.amdgcn.buffer.load.format.v3f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <3 x float> %data, <3 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuf
+}
+
+; CHECK-LABEL: @extract_elt0_elt1_buffer_load_format_v4f32(
+; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.format.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+; CHECK-NEXT: ret <2 x float> %data
+define amdgpu_ps <2 x float> @extract_elt0_elt1_buffer_load_format_v4f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 {
+ %data = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 false)
+ %shuf = shufflevector <4 x float> %data, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuf
+}
+
+; The initial insertion point is at the extractelement
+; CHECK-LABEL: @extract01_bitcast_buffer_load_format_v4f32(
+; CHECK-NEXT: %tmp = call <2 x float> @llvm.amdgcn.buffer.load.format.v2f32(<4 x i32> undef, i32 %arg, i32 16, i1 false, i1 false)
+; CHECK-NEXT: %1 = shufflevector <2 x float> %tmp, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+; CHECK-NEXT: %tmp1 = bitcast <4 x float> %1 to <2 x double>
+; CHECK-NEXT: %tmp2 = extractelement <2 x double> %tmp1, i32 0
+; CHECK-NEXT: ret double %tmp2
+define double @extract01_bitcast_buffer_load_format_v4f32(i32 %arg) #0 {
+ %tmp = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> undef, i32 %arg, i32 16, i1 false, i1 false) #3
+ %tmp1 = bitcast <4 x float> %tmp to <2 x double>
+ %tmp2 = extractelement <2 x double> %tmp1, i32 0
+ ret double %tmp2
+}
+
+; CHECK-LABEL: @extract0_bitcast_buffer_load_format_v4f32(
+; CHECK-NEXT: %tmp = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> undef, i32 %arg, i32 16, i1 false, i1 false)
+; CHECK-NEXT: %tmp2 = bitcast float %tmp to i32
+; CHECK-NEXT: ret i32 %tmp2
+define i32 @extract0_bitcast_buffer_load_format_v4f32(i32 %arg) #0 {
+ %tmp = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> undef, i32 %arg, i32 16, i1 false, i1 false) #3
+ %tmp1 = bitcast <4 x float> %tmp to <4 x i32>
+ %tmp2 = extractelement <4 x i32> %tmp1, i32 0
+ ret i32 %tmp2
+}
+
+; CHECK-LABEL: @extract_lo16_0_bitcast_buffer_load_format_v4f32(
+; CHECK-NEXT: %tmp = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> undef, i32 %arg, i32 16, i1 false, i1 false)
+; CHECK-NEXT: %1 = insertelement <4 x float> undef, float %tmp, i64 0
+; CHECK-NEXT: %tmp1 = bitcast <4 x float> %1 to <8 x i16>
+; CHECK-NEXT: %tmp2 = extractelement <8 x i16> %tmp1, i32 0
+; CHECK-NEXT: ret i16 %tmp2
+define i16 @extract_lo16_0_bitcast_buffer_load_format_v4f32(i32 %arg) #0 {
+ %tmp = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> undef, i32 %arg, i32 16, i1 false, i1 false) #3
+ %tmp1 = bitcast <4 x float> %tmp to <8 x i16>
+ %tmp2 = extractelement <8 x i16> %tmp1, i32 0
+ ret i16 %tmp2
+}
+
+declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32>, i32, i32, i1, i1) #1
+
+declare float @llvm.amdgcn.buffer.load.format.f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <1 x float> @llvm.amdgcn.buffer.load.format.v1f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <2 x float> @llvm.amdgcn.buffer.load.format.v2f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <3 x float> @llvm.amdgcn.buffer.load.format.v3f32(<4 x i32>, i32, i32, i1, i1) #1
+declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readonly }
+
+!0 = !{float 2.500000e+00}
diff --git a/test/Transforms/InstCombine/amdgcn-intrinsics.ll b/test/Transforms/InstCombine/amdgcn-intrinsics.ll
index a228968f25bc..deae5502bcdb 100644
--- a/test/Transforms/InstCombine/amdgcn-intrinsics.ll
+++ b/test/Transforms/InstCombine/amdgcn-intrinsics.ll
@@ -7,6 +7,12 @@
declare float @llvm.amdgcn.rcp.f32(float) nounwind readnone
declare double @llvm.amdgcn.rcp.f64(double) nounwind readnone
+; CHECK-LABEL: @test_constant_fold_rcp_f32_undef
+; CHECK-NEXT: ret float undef
+define float @test_constant_fold_rcp_f32_undef() nounwind {
+ %val = call float @llvm.amdgcn.rcp.f32(float undef) nounwind readnone
+ ret float %val
+}
; CHECK-LABEL: @test_constant_fold_rcp_f32_1
; CHECK-NEXT: ret float 1.000000e+00
@@ -50,6 +56,18 @@ define double @test_constant_fold_rcp_f64_43() nounwind {
ret double %val
}
+; --------------------------------------------------------------------
+; llvm.amdgcn.rsq
+; --------------------------------------------------------------------
+
+declare float @llvm.amdgcn.rsq.f32(float) nounwind readnone
+
+; CHECK-LABEL: @test_constant_fold_rsq_f32_undef
+; CHECK-NEXT: ret float undef
+define float @test_constant_fold_rsq_f32_undef() nounwind {
+ %val = call float @llvm.amdgcn.rsq.f32(float undef) nounwind readnone
+ ret float %val
+}
; --------------------------------------------------------------------
; llvm.amdgcn.frexp.mant
@@ -633,3 +651,888 @@ define float @cos_fabs_fneg_f32(float %x) {
%cos = call float @llvm.amdgcn.cos.f32(float %x.fabs.fneg)
ret float %cos
}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.cvt.pkrtz
+; --------------------------------------------------------------------
+
+declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) nounwind readnone
+
+; CHECK-LABEL: @vars_lhs_cvt_pkrtz(
+; CHECK: %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y)
+define <2 x half> @vars_lhs_cvt_pkrtz(float %x, float %y) {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @constant_lhs_cvt_pkrtz(
+; CHECK: %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 0.000000e+00, float %y)
+define <2 x half> @constant_lhs_cvt_pkrtz(float %y) {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 0.0, float %y)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @constant_rhs_cvt_pkrtz(
+; CHECK: %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float 0.000000e+00)
+define <2 x half> @constant_rhs_cvt_pkrtz(float %x) {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float 0.0)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @undef_lhs_cvt_pkrtz(
+; CHECK: %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %y)
+define <2 x half> @undef_lhs_cvt_pkrtz(float %y) {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %y)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @undef_rhs_cvt_pkrtz(
+; CHECK: %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float undef)
+define <2 x half> @undef_rhs_cvt_pkrtz(float %x) {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float undef)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @undef_cvt_pkrtz(
+; CHECK: ret <2 x half> undef
+define <2 x half> @undef_cvt_pkrtz() {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float undef)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @constant_splat0_cvt_pkrtz(
+; CHECK: ret <2 x half> zeroinitializer
+define <2 x half> @constant_splat0_cvt_pkrtz() {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 0.0, float 0.0)
+ ret <2 x half> %cvt
+}
+
+; CHECK-LABEL: @constant_cvt_pkrtz(
+; CHECK: ret <2 x half> <half 0xH4000, half 0xH4400>
+define <2 x half> @constant_cvt_pkrtz() {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 2.0, float 4.0)
+ ret <2 x half> %cvt
+}
+
+; Test constant values where rtz changes result
+; CHECK-LABEL: @constant_rtz_pkrtz(
+; CHECK: ret <2 x half> <half 0xH7BFF, half 0xH7BFF>
+define <2 x half> @constant_rtz_pkrtz() {
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 65535.0, float 65535.0)
+ ret <2 x half> %cvt
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.ubfe
+; --------------------------------------------------------------------
+
+declare i32 @llvm.amdgcn.ubfe.i32(i32, i32, i32) nounwind readnone
+declare i64 @llvm.amdgcn.ubfe.i64(i64, i32, i32) nounwind readnone
+
+; CHECK-LABEL: @ubfe_var_i32(
+; CHECK-NEXT: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 %width)
+define i32 @ubfe_var_i32(i32 %src, i32 %offset, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_clear_high_bits_constant_offset_i32(
+; CHECK-NEXT: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 5, i32 %width)
+define i32 @ubfe_clear_high_bits_constant_offset_i32(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 133, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_clear_high_bits_constant_width_i32(
+; CHECK-NEXT: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 5)
+define i32 @ubfe_clear_high_bits_constant_width_i32(i32 %src, i32 %offset) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 133)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_width_0(
+; CHECK-NEXT: ret i32 0
+define i32 @ubfe_width_0(i32 %src, i32 %offset) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 0)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_width_31(
+; CHECK: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 31)
+define i32 @ubfe_width_31(i32 %src, i32 %offset) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 31)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_width_32(
+; CHECK-NEXT: ret i32 0
+define i32 @ubfe_width_32(i32 %src, i32 %offset) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 32)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_width_33(
+; CHECK-NEXT: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 1)
+define i32 @ubfe_width_33(i32 %src, i32 %offset) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 33)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_33(
+; CHECK-NEXT: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 1, i32 %width)
+define i32 @ubfe_offset_33(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 33, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_0(
+; CHECK-NEXT: %1 = sub i32 32, %width
+; CHECK-NEXT: %2 = shl i32 %src, %1
+; CHECK-NEXT: %bfe = lshr i32 %2, %1
+; CHECK-NEXT: ret i32 %bfe
+define i32 @ubfe_offset_0(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 0, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_32(
+; CHECK-NEXT: %1 = sub i32 32, %width
+; CHECK-NEXT: %2 = shl i32 %src, %1
+; CHECK-NEXT: %bfe = lshr i32 %2, %1
+; CHECK-NEXT: ret i32 %bfe
+define i32 @ubfe_offset_32(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 32, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_31(
+; CHECK-NEXT: %1 = sub i32 32, %width
+; CHECK-NEXT: %2 = shl i32 %src, %1
+; CHECK-NEXT: %bfe = lshr i32 %2, %1
+; CHECK-NEXT: ret i32 %bfe
+define i32 @ubfe_offset_31(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 32, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_0_width_0(
+; CHECK-NEXT: ret i32 0
+define i32 @ubfe_offset_0_width_0(i32 %src) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 0, i32 0)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_0_width_3(
+; CHECK-NEXT: and i32 %src, 7
+; CHECK-NEXT: ret
+define i32 @ubfe_offset_0_width_3(i32 %src) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 0, i32 3)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_3_width_1(
+; CHECK-NEXT: %1 = lshr i32 %src, 3
+; CHECK-NEXT: and i32 %1, 1
+; CHECK-NEXT: ret i32
+define i32 @ubfe_offset_3_width_1(i32 %src) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 3, i32 1)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_3_width_4(
+; CHECK-NEXT: %1 = lshr i32 %src, 3
+; CHECK-NEXT: and i32 %1, 15
+; CHECK-NEXT: ret i32
+define i32 @ubfe_offset_3_width_4(i32 %src) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 3, i32 4)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_0_0_0(
+; CHECK-NEXT: ret i32 0
+define i32 @ubfe_0_0_0() {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 0, i32 0, i32 0)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_neg1_5_7(
+; CHECK-NEXT: ret i32 127
+define i32 @ubfe_neg1_5_7() {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 -1, i32 5, i32 7)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_undef_src_i32(
+; CHECK-NEXT: ret i32 undef
+define i32 @ubfe_undef_src_i32(i32 %offset, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 undef, i32 %offset, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_undef_offset_i32(
+; CHECK: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 undef, i32 %width)
+define i32 @ubfe_undef_offset_i32(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 undef, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_undef_width_i32(
+; CHECK: %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 undef)
+define i32 @ubfe_undef_width_i32(i32 %src, i32 %offset) {
+ %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 %offset, i32 undef)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_33_width_4_i64(
+; CHECK-NEXT: %1 = lshr i64 %src, 33
+; CHECK-NEXT: %bfe = and i64 %1, 15
+define i64 @ubfe_offset_33_width_4_i64(i64 %src) {
+ %bfe = call i64 @llvm.amdgcn.ubfe.i64(i64 %src, i32 33, i32 4)
+ ret i64 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_0_i64(
+; CHECK-NEXT: %1 = sub i32 64, %width
+; CHECK-NEXT: %2 = zext i32 %1 to i64
+; CHECK-NEXT: %3 = shl i64 %src, %2
+; CHECK-NEXT: %bfe = lshr i64 %3, %2
+; CHECK-NEXT: ret i64 %bfe
+define i64 @ubfe_offset_0_i64(i64 %src, i32 %width) {
+ %bfe = call i64 @llvm.amdgcn.ubfe.i64(i64 %src, i32 0, i32 %width)
+ ret i64 %bfe
+}
+
+; CHECK-LABEL: @ubfe_offset_32_width_32_i64(
+; CHECK-NEXT: %bfe = lshr i64 %src, 32
+; CHECK-NEXT: ret i64 %bfe
+define i64 @ubfe_offset_32_width_32_i64(i64 %src) {
+ %bfe = call i64 @llvm.amdgcn.ubfe.i64(i64 %src, i32 32, i32 32)
+ ret i64 %bfe
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.sbfe
+; --------------------------------------------------------------------
+
+declare i32 @llvm.amdgcn.sbfe.i32(i32, i32, i32) nounwind readnone
+declare i64 @llvm.amdgcn.sbfe.i64(i64, i32, i32) nounwind readnone
+
+; CHECK-LABEL: @sbfe_offset_31(
+; CHECK-NEXT: %1 = sub i32 32, %width
+; CHECK-NEXT: %2 = shl i32 %src, %1
+; CHECK-NEXT: %bfe = ashr i32 %2, %1
+; CHECK-NEXT: ret i32 %bfe
+define i32 @sbfe_offset_31(i32 %src, i32 %width) {
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 32, i32 %width)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @sbfe_neg1_5_7(
+; CHECK-NEXT: ret i32 -1
+define i32 @sbfe_neg1_5_7() {
+ %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 -1, i32 5, i32 7)
+ ret i32 %bfe
+}
+
+; CHECK-LABEL: @sbfe_offset_32_width_32_i64(
+; CHECK-NEXT: %bfe = ashr i64 %src, 32
+; CHECK-NEXT: ret i64 %bfe
+define i64 @sbfe_offset_32_width_32_i64(i64 %src) {
+ %bfe = call i64 @llvm.amdgcn.sbfe.i64(i64 %src, i32 32, i32 32)
+ ret i64 %bfe
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.exp
+; --------------------------------------------------------------------
+
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) nounwind inaccessiblememonly
+
+; Make sure no crashing on invalid variable params
+; CHECK-LABEL: @exp_invalid_inputs(
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 %en, float 1.000000e+00, float 2.000000e+00, float 5.000000e-01, float 4.000000e+00, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 %tgt, i32 15, float 1.000000e+00, float 2.000000e+00, float 5.000000e-01, float 4.000000e+00, i1 true, i1 false)
+define void @exp_invalid_inputs(i32 %tgt, i32 %en) {
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 %en, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 %tgt, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @exp_disabled_inputs_to_undef(
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float 1.000000e+00, float undef, float undef, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 2, float undef, float 2.000000e+00, float undef, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 4, float undef, float undef, float 5.000000e-01, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 8, float undef, float undef, float undef, float 4.000000e+00, i1 true, i1 false)
+
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float %x, float undef, float undef, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 2, float undef, float %y, float undef, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 4, float undef, float undef, float %z, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 8, float undef, float undef, float undef, float %w, i1 true, i1 false)
+
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float undef, float undef, float undef, float undef, i1 true, i1 false)
+
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 3, float 1.000000e+00, float 2.000000e+00, float undef, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 5, float 1.000000e+00, float undef, float 5.000000e-01, float undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 9, float 1.000000e+00, float undef, float undef, float 4.000000e+00, i1 false, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 1.000000e+00, float 2.000000e+00, float 5.000000e-01, float 4.000000e+00, i1 false, i1 false)
+define void @exp_disabled_inputs_to_undef(float %x, float %y, float %z, float %w) {
+ ; enable src0..src3 constants
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 2, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 4, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 8, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+
+ ; enable src0..src3 variables
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float %x, float %y, float %z, float %w, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 2, float %x, float %y, float %z, float %w, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 4, float %x, float %y, float %z, float %w, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 8, float %x, float %y, float %z, float %w, i1 true, i1 false)
+
+ ; enable none
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float %x, float %y, float %z, float %w, i1 true, i1 false)
+
+ ; enable different source combinations
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 3, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 5, float 1.0, float 2.0, float 0.5, float 4.0, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 9, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 1.0, float 2.0, float 0.5, float 4.0, i1 false, i1 false)
+
+ ret void
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.exp.compr
+; --------------------------------------------------------------------
+
+declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) nounwind inaccessiblememonly
+
+; CHECK-LABEL: @exp_compr_invalid_inputs(
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 %en, <2 x half> <half 0xH3C00, half 0xH4000>, <2 x half> <half 0xH3800, half 0xH4400>, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 %tgt, i32 5, <2 x half> <half 0xH3C00, half 0xH4000>, <2 x half> <half 0xH3800, half 0xH4400>, i1 true, i1 false)
+define void @exp_compr_invalid_inputs(i32 %tgt, i32 %en) {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 %en, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 %tgt, i32 5, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @exp_compr_disabled_inputs_to_undef(
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> undef, <2 x half> undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 1, <2 x half> <half 0xH3C00, half 0xH4000>, <2 x half> undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 2, <2 x half> <half 0xH3C00, half 0xH4000>, <2 x half> undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 3, <2 x half> <half 0xH3C00, half 0xH4000>, <2 x half> undef, i1 true, i1 false)
+
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> undef, <2 x half> undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 1, <2 x half> %xy, <2 x half> undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 2, <2 x half> %xy, <2 x half> undef, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 3, <2 x half> %xy, <2 x half> undef, i1 true, i1 false)
+
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 12, <2 x half> undef, <2 x half> %zw, i1 true, i1 false)
+; CHECK: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+define void @exp_compr_disabled_inputs_to_undef(<2 x half> %xy, <2 x half> %zw) {
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 1, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 2, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 3, <2 x half> <half 1.0, half 2.0>, <2 x half> <half 0.5, half 4.0>, i1 true, i1 false)
+
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 1, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 2, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 3, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 12, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+ call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %xy, <2 x half> %zw, i1 true, i1 false)
+ ret void
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.fmed3
+; --------------------------------------------------------------------
+
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) nounwind readnone
+
+; CHECK-LABEL: @fmed3_f32(
+; CHECK: %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float %z)
+define float @fmed3_f32(float %x, float %y, float %z) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float %z)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_canonicalize_x_c0_c1_f32(
+; CHECK: call float @llvm.amdgcn.fmed3.f32(float %x, float 0.000000e+00, float 1.000000e+00)
+define float @fmed3_canonicalize_x_c0_c1_f32(float %x) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0.0, float 1.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_canonicalize_c0_x_c1_f32(
+; CHECK: call float @llvm.amdgcn.fmed3.f32(float %x, float 0.000000e+00, float 1.000000e+00)
+define float @fmed3_canonicalize_c0_x_c1_f32(float %x) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float %x, float 1.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_canonicalize_c0_c1_x_f32(
+; CHECK: call float @llvm.amdgcn.fmed3.f32(float %x, float 0.000000e+00, float 1.000000e+00)
+define float @fmed3_canonicalize_c0_c1_x_f32(float %x) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.0, float 1.0, float %x)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_canonicalize_x_y_c_f32(
+; CHECK: call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 1.000000e+00)
+define float @fmed3_canonicalize_x_y_c_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 1.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_canonicalize_x_c_y_f32(
+; CHECK: %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 1.000000e+00)
+define float @fmed3_canonicalize_x_c_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 1.0, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_canonicalize_c_x_y_f32(
+; CHECK: call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 1.000000e+00)
+define float @fmed3_canonicalize_c_x_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 1.0, float %x, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_undef_x_y_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_undef_x_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float undef, float %x, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_fmf_undef_x_y_f32(
+; CHECK: call nnan float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_fmf_undef_x_y_f32(float %x, float %y) {
+ %med3 = call nnan float @llvm.amdgcn.fmed3.f32(float undef, float %x, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_x_undef_y_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_x_undef_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float undef, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_x_y_undef_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_x_y_undef_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float undef)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_qnan0_x_y_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_qnan0_x_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000000000000, float %x, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_x_qnan0_y_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_x_qnan0_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8000000000000, float %y)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_x_y_qnan0_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_x_y_qnan0_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float %y, float 0x7FF8000000000000)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_qnan1_x_y_f32(
+; CHECK: call float @llvm.minnum.f32(float %x, float %y)
+define float @fmed3_qnan1_x_y_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000100000000, float %x, float %y)
+ ret float %med3
+}
+
+; This can return any of the qnans.
+; CHECK-LABEL: @fmed3_qnan0_qnan1_qnan2_f32(
+; CHECK: ret float 0x7FF8002000000000
+define float @fmed3_qnan0_qnan1_qnan2_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8000100000000, float 0x7FF8002000000000, float 0x7FF8030000000000)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_constant_src0_0_f32(
+; CHECK: ret float 5.000000e-01
+define float @fmed3_constant_src0_0_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.5, float -1.0, float 4.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_constant_src0_1_f32(
+; CHECK: ret float 5.000000e-01
+define float @fmed3_constant_src0_1_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0.5, float 4.0, float -1.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_constant_src1_0_f32(
+; CHECK: ret float 5.000000e-01
+define float @fmed3_constant_src1_0_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 0.5, float 4.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_constant_src1_1_f32(
+; CHECK: ret float 5.000000e-01
+define float @fmed3_constant_src1_1_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 4.0, float 0.5, float -1.0)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_constant_src2_0_f32(
+; CHECK: ret float 5.000000e-01
+define float @fmed3_constant_src2_0_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float -1.0, float 4.0, float 0.5)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_constant_src2_1_f32(
+; CHECK: ret float 5.000000e-01
+define float @fmed3_constant_src2_1_f32(float %x, float %y) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 4.0, float -1.0, float 0.5)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_x_qnan0_qnan1_f32(
+; CHECK: ret float %x
+define float @fmed3_x_qnan0_qnan1_f32(float %x) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8001000000000, float 0x7FF8002000000000)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_qnan0_x_qnan1_f32(
+; CHECK: ret float %x
+define float @fmed3_qnan0_x_qnan1_f32(float %x) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float %x, float 0x7FF8002000000000)
+ ret float %med3
+}
+
+; CHECK-LABEL: @fmed3_qnan0_qnan1_x_f32(
+; CHECK: ret float %x
+define float @fmed3_qnan0_qnan1_x_f32(float %x) {
+ %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float 0x7FF8002000000000, float %x)
+ ret float %med3
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.icmp
+; --------------------------------------------------------------------
+
+declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) nounwind readnone convergent
+declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) nounwind readnone convergent
+
+; Make sure there's no crash for invalid input
+; CHECK-LABEL: @invalid_nonconstant_icmp_code(
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 %c)
+define i64 @invalid_nonconstant_icmp_code(i32 %a, i32 %b, i32 %c) {
+ %result = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 %c)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @invalid_icmp_code(
+; CHECK: %under = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 31)
+; CHECK: %over = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 42)
+define i64 @invalid_icmp_code(i32 %a, i32 %b) {
+ %under = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 31)
+ %over = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 42)
+ %or = or i64 %under, %over
+ ret i64 %or
+}
+
+; CHECK-LABEL: @icmp_constant_inputs_false(
+; CHECK: ret i64 0
+define i64 @icmp_constant_inputs_false() {
+ %result = call i64 @llvm.amdgcn.icmp.i32(i32 9, i32 8, i32 32)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @icmp_constant_inputs_true(
+; CHECK: ret i64 -1
+define i64 @icmp_constant_inputs_true() {
+ %result = call i64 @llvm.amdgcn.icmp.i32(i32 9, i32 8, i32 34)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @icmp_constant_to_rhs_slt(
+; CHECK: %result = call i64 @llvm.amdgcn.icmp.i32(i32 %x, i32 9, i32 38)
+define i64 @icmp_constant_to_rhs_slt(i32 %x) {
+ %result = call i64 @llvm.amdgcn.icmp.i32(i32 9, i32 %x, i32 40)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 32)
+define i64 @fold_icmp_ne_0_zext_icmp_eq_i32(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ne_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 33)
+define i64 @fold_icmp_ne_0_zext_icmp_ne_i32(i32 %a, i32 %b) {
+ %cmp = icmp ne i32 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_sle_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 41)
+define i64 @fold_icmp_ne_0_zext_icmp_sle_i32(i32 %a, i32 %b) {
+ %cmp = icmp sle i32 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ugt_i64(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i64(i64 %a, i64 %b, i32 34)
+define i64 @fold_icmp_ne_0_zext_icmp_ugt_i64(i64 %a, i64 %b) {
+ %cmp = icmp ugt i64 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ult_swap_i64(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i64(i64 %a, i64 %b, i32 34)
+define i64 @fold_icmp_ne_0_zext_icmp_ult_swap_i64(i64 %a, i64 %b) {
+ %cmp = icmp ugt i64 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 0, i32 %zext.cmp, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_oeq_f32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 1)
+define i64 @fold_icmp_ne_0_zext_fcmp_oeq_f32(float %a, float %b) {
+ %cmp = fcmp oeq float %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_une_f32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 14)
+define i64 @fold_icmp_ne_0_zext_fcmp_une_f32(float %a, float %b) {
+ %cmp = fcmp une float %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_olt_f64(
+; CHECK-NEXT: call i64 @llvm.amdgcn.fcmp.f64(double %a, double %b, i32 4)
+define i64 @fold_icmp_ne_0_zext_fcmp_olt_f64(double %a, double %b) {
+ %cmp = fcmp olt double %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_sext_icmp_ne_0_i32(
+; CHECK: %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 32)
+define i64 @fold_icmp_sext_icmp_ne_0_i32(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %sext.cmp = sext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_eq_0_zext_icmp_eq_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 33)
+define i64 @fold_icmp_eq_0_zext_icmp_eq_i32(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_eq_0_zext_icmp_slt_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 39)
+define i64 @fold_icmp_eq_0_zext_icmp_slt_i32(i32 %a, i32 %b) {
+ %cmp = icmp slt i32 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_eq_0_zext_fcmp_oeq_f32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 14)
+define i64 @fold_icmp_eq_0_zext_fcmp_oeq_f32(float %a, float %b) {
+ %cmp = fcmp oeq float %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_eq_0_zext_fcmp_ule_f32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 2)
+define i64 @fold_icmp_eq_0_zext_fcmp_ule_f32(float %a, float %b) {
+ %cmp = fcmp ule float %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_eq_0_zext_fcmp_ogt_f32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 13)
+define i64 @fold_icmp_eq_0_zext_fcmp_ogt_f32(float %a, float %b) {
+ %cmp = fcmp ogt float %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_zext_icmp_eq_1_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 32)
+define i64 @fold_icmp_zext_icmp_eq_1_i32(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %zext.cmp = zext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_zext_argi1_eq_1_i32(
+; CHECK: %zext.cond = zext i1 %cond to i32
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cond, i32 0, i32 33)
+define i64 @fold_icmp_zext_argi1_eq_1_i32(i1 %cond) {
+ %zext.cond = zext i1 %cond to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cond, i32 1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_zext_argi1_eq_neg1_i32(
+; CHECK: %zext.cond = zext i1 %cond to i32
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cond, i32 -1, i32 32)
+define i64 @fold_icmp_zext_argi1_eq_neg1_i32(i1 %cond) {
+ %zext.cond = zext i1 %cond to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cond, i32 -1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_sext_argi1_eq_1_i32(
+; CHECK: %sext.cond = sext i1 %cond to i32
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cond, i32 1, i32 32)
+define i64 @fold_icmp_sext_argi1_eq_1_i32(i1 %cond) {
+ %sext.cond = sext i1 %cond to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cond, i32 1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_sext_argi1_eq_neg1_i32(
+; CHECK: %sext.cond = sext i1 %cond to i32
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cond, i32 0, i32 33)
+define i64 @fold_icmp_sext_argi1_eq_neg1_i32(i1 %cond) {
+ %sext.cond = sext i1 %cond to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cond, i32 -1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_sext_argi1_eq_neg1_i64(
+; CHECK: %sext.cond = sext i1 %cond to i64
+; CHECK: call i64 @llvm.amdgcn.icmp.i64(i64 %sext.cond, i64 0, i32 33)
+define i64 @fold_icmp_sext_argi1_eq_neg1_i64(i1 %cond) {
+ %sext.cond = sext i1 %cond to i64
+ %mask = call i64 @llvm.amdgcn.icmp.i64(i64 %sext.cond, i64 -1, i32 32)
+ ret i64 %mask
+}
+
+; TODO: Should be able to fold to false
+; CHECK-LABEL: @fold_icmp_sext_icmp_eq_1_i32(
+; CHECK: %cmp = icmp eq i32 %a, %b
+; CHECK: %sext.cmp = sext i1 %cmp to i32
+; CHECK: %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 1, i32 32)
+define i64 @fold_icmp_sext_icmp_eq_1_i32(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %sext.cmp = sext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_sext_icmp_eq_neg1_i32(
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 32)
+define i64 @fold_icmp_sext_icmp_eq_neg1_i32(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %sext.cmp = sext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 -1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_icmp_sext_icmp_sge_neg1_i32(
+; CHECK: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 39)
+define i64 @fold_icmp_sext_icmp_sge_neg1_i32(i32 %a, i32 %b) {
+ %cmp = icmp sge i32 %a, %b
+ %sext.cmp = sext i1 %cmp to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 -1, i32 32)
+ ret i64 %mask
+}
+
+; CHECK-LABEL: @fold_not_icmp_ne_0_zext_icmp_sle_i32(
+; CHECK-NEXT: call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 38)
+define i64 @fold_not_icmp_ne_0_zext_icmp_sle_i32(i32 %a, i32 %b) {
+ %cmp = icmp sle i32 %a, %b
+ %not = xor i1 %cmp, true
+ %zext.cmp = zext i1 %not to i32
+ %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33)
+ ret i64 %mask
+}
+
+; --------------------------------------------------------------------
+; llvm.amdgcn.fcmp
+; --------------------------------------------------------------------
+
+declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32) nounwind readnone convergent
+
+; Make sure there's no crash for invalid input
+; CHECK-LABEL: @invalid_nonconstant_fcmp_code(
+; CHECK: call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 %c)
+define i64 @invalid_nonconstant_fcmp_code(float %a, float %b, i32 %c) {
+ %result = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 %c)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @invalid_fcmp_code(
+; CHECK: %under = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 -1)
+; CHECK: %over = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 16)
+define i64 @invalid_fcmp_code(float %a, float %b) {
+ %under = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 -1)
+ %over = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 16)
+ %or = or i64 %under, %over
+ ret i64 %or
+}
+
+; CHECK-LABEL: @fcmp_constant_inputs_false(
+; CHECK: ret i64 0
+define i64 @fcmp_constant_inputs_false() {
+ %result = call i64 @llvm.amdgcn.fcmp.f32(float 2.0, float 4.0, i32 1)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @fcmp_constant_inputs_true(
+; CHECK: ret i64 -1
+define i64 @fcmp_constant_inputs_true() {
+ %result = call i64 @llvm.amdgcn.fcmp.f32(float 2.0, float 4.0, i32 4)
+ ret i64 %result
+}
+
+; CHECK-LABEL: @fcmp_constant_to_rhs_olt(
+; CHECK: %result = call i64 @llvm.amdgcn.fcmp.f32(float %x, float 4.000000e+00, i32 2)
+define i64 @fcmp_constant_to_rhs_olt(float %x) {
+ %result = call i64 @llvm.amdgcn.fcmp.f32(float 4.0, float %x, i32 4)
+ ret i64 %result
+}
diff --git a/test/Transforms/InstCombine/and-or-icmps.ll b/test/Transforms/InstCombine/and-or-icmps.ll
index 3903472e9119..e3aeee293139 100644
--- a/test/Transforms/InstCombine/and-or-icmps.ll
+++ b/test/Transforms/InstCombine/and-or-icmps.ll
@@ -39,15 +39,167 @@ define i1 @PR2330(i32 %a, i32 %b) {
ret i1 %and
}
-define i1 @test(i32 %tmp1030) {
-; CHECK-LABEL: @test(
-; CHECK-NEXT: [[TMP1030_OFF:%.*]] = add i32 %tmp1030, -39
-; CHECK-NEXT: [[TMP1030_CMP:%.*]] = icmp ugt i32 [[TMP1030_OFF]], 1
-; CHECK-NEXT: ret i1 [[TMP1030_CMP]]
-;
- %tmp1037 = icmp ne i32 %tmp1030, 39
- %tmp1039 = icmp ne i32 %tmp1030, 40
- %tmp1042 = and i1 %tmp1037, %tmp1039
- ret i1 %tmp1042
+; if LHSC and RHSC differ only by one bit:
+; (X == C1 || X == C2) -> (X | (C1 ^ C2)) == C2
+; PR14708: https://bugs.llvm.org/show_bug.cgi?id=14708
+
+define i1 @or_eq_with_one_bit_diff_constants1(i32 %x) {
+; CHECK-LABEL: @or_eq_with_one_bit_diff_constants1(
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 51
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp eq i32 %x, 50
+ %cmp2 = icmp eq i32 %x, 51
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+; (X != C1 && X != C2) -> (X | (C1 ^ C2)) != C2
+
+define i1 @and_ne_with_one_bit_diff_constants1(i32 %x) {
+; CHECK-LABEL: @and_ne_with_one_bit_diff_constants1(
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 51
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp ne i32 %x, 51
+ %cmp2 = icmp ne i32 %x, 50
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; The constants are not necessarily off-by-one, just off-by-one-bit.
+
+define i1 @or_eq_with_one_bit_diff_constants2(i32 %x) {
+; CHECK-LABEL: @or_eq_with_one_bit_diff_constants2(
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, 32
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 97
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp eq i32 %x, 97
+ %cmp2 = icmp eq i32 %x, 65
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+define i1 @and_ne_with_one_bit_diff_constants2(i19 %x) {
+; CHECK-LABEL: @and_ne_with_one_bit_diff_constants2(
+; CHECK-NEXT: [[TMP1:%.*]] = or i19 %x, 128
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i19 [[TMP1]], 193
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp ne i19 %x, 65
+ %cmp2 = icmp ne i19 %x, 193
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; Make sure the constants are treated as unsigned when comparing them.
+
+define i1 @or_eq_with_one_bit_diff_constants3(i8 %x) {
+; CHECK-LABEL: @or_eq_with_one_bit_diff_constants3(
+; CHECK-NEXT: [[TMP1:%.*]] = or i8 %x, -128
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], -2
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp eq i8 %x, 254
+ %cmp2 = icmp eq i8 %x, 126
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+define i1 @and_ne_with_one_bit_diff_constants3(i8 %x) {
+; CHECK-LABEL: @and_ne_with_one_bit_diff_constants3(
+; CHECK-NEXT: [[TMP1:%.*]] = or i8 %x, -128
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i8 [[TMP1]], -63
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp ne i8 %x, 65
+ %cmp2 = icmp ne i8 %x, 193
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; Use an 'add' to eliminate an icmp if the constants are off-by-one (not off-by-one-bit).
+; (X == 13 | X == 14) -> X-13 <u 2
+
+define i1 @or_eq_with_diff_one(i8 %x) {
+; CHECK-LABEL: @or_eq_with_diff_one(
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 %x, -13
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 2
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp eq i8 %x, 13
+ %cmp2 = icmp eq i8 %x, 14
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+; (X != 40 | X != 39) -> X-39 >u 1
+
+define i1 @and_ne_with_diff_one(i32 %x) {
+; CHECK-LABEL: @and_ne_with_diff_one(
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 %x, -39
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[TMP1]], 1
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp ne i32 %x, 40
+ %cmp2 = icmp ne i32 %x, 39
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; Make sure the constants are treated as signed when comparing them.
+; PR32524: https://bugs.llvm.org/show_bug.cgi?id=32524
+
+define i1 @or_eq_with_diff_one_signed(i32 %x) {
+; CHECK-LABEL: @or_eq_with_diff_one_signed(
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 %x, 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 2
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp eq i32 %x, 0
+ %cmp2 = icmp eq i32 %x, -1
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+define i1 @and_ne_with_diff_one_signed(i64 %x) {
+; CHECK-LABEL: @and_ne_with_diff_one_signed(
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 %x, 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[TMP1]], 1
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp1 = icmp ne i64 %x, -1
+ %cmp2 = icmp ne i64 %x, 0
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; Vectors with splat constants get the same folds.
+
+define <2 x i1> @or_eq_with_one_bit_diff_constants2_splatvec(<2 x i32> %x) {
+; CHECK-LABEL: @or_eq_with_one_bit_diff_constants2_splatvec(
+; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> %x, <i32 32, i32 32>
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], <i32 97, i32 97>
+; CHECK-NEXT: ret <2 x i1> [[TMP2]]
+;
+ %cmp1 = icmp eq <2 x i32> %x, <i32 97, i32 97>
+ %cmp2 = icmp eq <2 x i32> %x, <i32 65, i32 65>
+ %or = or <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or
+}
+
+define <2 x i1> @and_ne_with_diff_one_splatvec(<2 x i32> %x) {
+; CHECK-LABEL: @and_ne_with_diff_one_splatvec(
+; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> %x, <i32 -39, i32 -39>
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <2 x i32> [[TMP1]], <i32 1, i32 1>
+; CHECK-NEXT: ret <2 x i1> [[TMP2]]
+;
+ %cmp1 = icmp ne <2 x i32> %x, <i32 40, i32 40>
+ %cmp2 = icmp ne <2 x i32> %x, <i32 39, i32 39>
+ %and = and <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %and
}
diff --git a/test/Transforms/InstCombine/and.ll b/test/Transforms/InstCombine/and.ll
index e45012878ed5..9a4d1e5758b3 100644
--- a/test/Transforms/InstCombine/and.ll
+++ b/test/Transforms/InstCombine/and.ll
@@ -176,7 +176,7 @@ define i8 @test16(i8 %A) {
define i8 @test17(i8 %X, i8 %Y) {
; CHECK-LABEL: @test17(
; CHECK-NEXT: [[Y_NOT:%.*]] = xor i8 %Y, -1
-; CHECK-NEXT: [[D:%.*]] = or i8 %X, [[Y_NOT]]
+; CHECK-NEXT: [[D:%.*]] = or i8 [[Y_NOT]], %X
; CHECK-NEXT: ret i8 [[D]]
;
%B = xor i8 %X, -1
@@ -311,19 +311,6 @@ define <2 x i1> @test25vec(<2 x i32> %A) {
ret <2 x i1> %D
}
-define i1 @test26(i32 %A) {
-; CHECK-LABEL: @test26(
-; CHECK-NEXT: [[A_OFF:%.*]] = add i32 %A, -49
-; CHECK-NEXT: [[A_CMP:%.*]] = icmp ugt i32 [[A_OFF]], 1
-; CHECK-NEXT: ret i1 [[A_CMP]]
-;
- %B = icmp ne i32 %A, 49
- %C = icmp ne i32 %A, 50
- ;; (A-49) > 1
- %D = and i1 %B, %C
- ret i1 %D
-}
-
define i8 @test27(i8 %A) {
; CHECK-LABEL: @test27(
; CHECK-NEXT: ret i8 0
@@ -382,6 +369,18 @@ define i32 @test31(i1 %X) {
ret i32 %A
}
+; Demanded bit analysis allows us to eliminate the add.
+
+define <2 x i32> @and_demanded_bits_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @and_demanded_bits_splat_vec(
+; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> %x, <i32 7, i32 7>
+; CHECK-NEXT: ret <2 x i32> [[Z]]
+;
+ %y = add <2 x i32> %x, <i32 8, i32 8>
+ %z = and <2 x i32> %y, <i32 7, i32 7>
+ ret <2 x i32> %z
+}
+
define i32 @test32(i32 %In) {
; CHECK-LABEL: @test32(
; CHECK-NEXT: ret i32 0
@@ -405,6 +404,42 @@ define i32 @test33(i32 %b) {
ret i32 %tmp.13
}
+define i32 @test33b(i32 %b) {
+; CHECK-LABEL: @test33b(
+; CHECK-NEXT: [[TMP_13:%.*]] = xor i32 [[B:%.*]], 1
+; CHECK-NEXT: ret i32 [[TMP_13]]
+;
+ %tmp.4.mask = and i32 %b, 1
+ %tmp.10 = xor i32 %tmp.4.mask, 1
+ %tmp.12 = and i32 %b, -2
+ %tmp.13 = or i32 %tmp.10, %tmp.12
+ ret i32 %tmp.13
+}
+
+define <2 x i32> @test33vec(<2 x i32> %b) {
+; CHECK-LABEL: @test33vec(
+; CHECK-NEXT: [[TMP_13:%.*]] = xor <2 x i32> [[B:%.*]], <i32 1, i32 1>
+; CHECK-NEXT: ret <2 x i32> [[TMP_13]]
+;
+ %tmp.4.mask = and <2 x i32> %b, <i32 1, i32 1>
+ %tmp.10 = xor <2 x i32> %tmp.4.mask, <i32 1, i32 1>
+ %tmp.12 = and <2 x i32> %b, <i32 -2, i32 -2>
+ %tmp.13 = or <2 x i32> %tmp.12, %tmp.10
+ ret <2 x i32> %tmp.13
+}
+
+define <2 x i32> @test33vecb(<2 x i32> %b) {
+; CHECK-LABEL: @test33vecb(
+; CHECK-NEXT: [[TMP_13:%.*]] = xor <2 x i32> [[B:%.*]], <i32 1, i32 1>
+; CHECK-NEXT: ret <2 x i32> [[TMP_13]]
+;
+ %tmp.4.mask = and <2 x i32> %b, <i32 1, i32 1>
+ %tmp.10 = xor <2 x i32> %tmp.4.mask, <i32 1, i32 1>
+ %tmp.12 = and <2 x i32> %b, <i32 -2, i32 -2>
+ %tmp.13 = or <2 x i32> %tmp.10, %tmp.12
+ ret <2 x i32> %tmp.13
+}
+
define i32 @test34(i32 %A, i32 %B) {
; CHECK-LABEL: @test34(
; CHECK-NEXT: ret i32 %B
@@ -425,3 +460,156 @@ define <2 x i32> @PR24942(<2 x i32> %x) {
ret <2 x i32> %and
}
+define i64 @test35(i32 %X) {
+; CHECK-LABEL: @test35(
+; CHECK-NEXT: %[[sub:.*]] = sub i32 0, %X
+; CHECK-NEXT: %[[and:.*]] = and i32 %[[sub]], 240
+; CHECK-NEXT: %[[cst:.*]] = zext i32 %[[and]] to i64
+; CHECK-NEXT: ret i64 %[[cst]]
+ %zext = zext i32 %X to i64
+ %zsub = sub i64 0, %zext
+ %res = and i64 %zsub, 240
+ ret i64 %res
+}
+
+define i64 @test36(i32 %X) {
+; CHECK-LABEL: @test36(
+; CHECK-NEXT: %[[sub:.*]] = add i32 %X, 7
+; CHECK-NEXT: %[[and:.*]] = and i32 %[[sub]], 240
+; CHECK-NEXT: %[[cst:.*]] = zext i32 %[[and]] to i64
+; CHECK-NEXT: ret i64 %[[cst]]
+ %zext = zext i32 %X to i64
+ %zsub = add i64 %zext, 7
+ %res = and i64 %zsub, 240
+ ret i64 %res
+}
+
+define i64 @test37(i32 %X) {
+; CHECK-LABEL: @test37(
+; CHECK-NEXT: %[[sub:.*]] = mul i32 %X, 7
+; CHECK-NEXT: %[[and:.*]] = and i32 %[[sub]], 240
+; CHECK-NEXT: %[[cst:.*]] = zext i32 %[[and]] to i64
+; CHECK-NEXT: ret i64 %[[cst]]
+ %zext = zext i32 %X to i64
+ %zsub = mul i64 %zext, 7
+ %res = and i64 %zsub, 240
+ ret i64 %res
+}
+
+define i64 @test38(i32 %X) {
+; CHECK-LABEL: @test38(
+; CHECK-NEXT: %[[and:.*]] = and i32 %X, 240
+; CHECK-NEXT: %[[cst:.*]] = zext i32 %[[and]] to i64
+; CHECK-NEXT: ret i64 %[[cst]]
+ %zext = zext i32 %X to i64
+ %zsub = xor i64 %zext, 7
+ %res = and i64 %zsub, 240
+ ret i64 %res
+}
+
+define i64 @test39(i32 %X) {
+; CHECK-LABEL: @test39(
+; CHECK-NEXT: %[[and:.*]] = and i32 %X, 240
+; CHECK-NEXT: %[[cst:.*]] = zext i32 %[[and]] to i64
+; CHECK-NEXT: ret i64 %[[cst]]
+ %zext = zext i32 %X to i64
+ %zsub = or i64 %zext, 7
+ %res = and i64 %zsub, 240
+ ret i64 %res
+}
+
+define i32 @test40(i1 %C) {
+; CHECK-LABEL: @test40(
+; CHECK-NEXT: [[A:%.*]] = select i1 [[C:%.*]], i32 104, i32 10
+; CHECK-NEXT: ret i32 [[A]]
+;
+ %A = select i1 %C, i32 1000, i32 10
+ %V = and i32 %A, 123
+ ret i32 %V
+}
+
+define <2 x i32> @test40vec(i1 %C) {
+; CHECK-LABEL: @test40vec(
+; CHECK-NEXT: [[A:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 104, i32 104>, <2 x i32> <i32 10, i32 10>
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
+ %V = and <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %V
+}
+
+define <2 x i32> @test40vec2(i1 %C) {
+; CHECK-LABEL: @test40vec2(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 104, i32 324>, <2 x i32> <i32 10, i32 12>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
+ %V = and <2 x i32> %A, <i32 123, i32 333>
+ ret <2 x i32> %V
+}
+
+define i32 @test41(i1 %which) {
+; CHECK-LABEL: @test41(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 104, [[ENTRY:%.*]] ], [ 10, [[DELAY]] ]
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi i32 [ 1000, %entry ], [ 10, %delay ]
+ %value = and i32 %A, 123
+ ret i32 %value
+}
+
+define <2 x i32> @test41vec(i1 %which) {
+; CHECK-LABEL: @test41vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 104, i32 104>, [[ENTRY:%.*]] ], [ <i32 10, i32 10>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
+ %value = and <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %value
+}
+
+define <2 x i32> @test41vec2(i1 %which) {
+; CHECK-LABEL: @test41vec2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 104, i32 324>, [[ENTRY:%.*]] ], [ <i32 10, i32 12>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
+ %value = and <2 x i32> %A, <i32 123, i32 333>
+ ret <2 x i32> %value
+}
diff --git a/test/Transforms/InstCombine/and2.ll b/test/Transforms/InstCombine/and2.ll
index 3d043b0864cd..001ac58891e4 100644
--- a/test/Transforms/InstCombine/and2.ll
+++ b/test/Transforms/InstCombine/and2.ll
@@ -45,21 +45,6 @@ define <4 x i32> @test5(<4 x i32> %A) {
ret <4 x i32> %2
}
-; Check that we combine "if x!=0 && x!=-1" into "if x+1u>1"
-define i32 @test6(i64 %x) nounwind {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[X_OFF:%.*]] = add i64 %x, 1
-; CHECK-NEXT: [[X_CMP:%.*]] = icmp ugt i64 [[X_OFF]], 1
-; CHECK-NEXT: [[LAND_EXT:%.*]] = zext i1 [[X_CMP]] to i32
-; CHECK-NEXT: ret i32 [[LAND_EXT]]
-;
- %cmp1 = icmp ne i64 %x, -1
- %not.cmp = icmp ne i64 %x, 0
- %.cmp1 = and i1 %cmp1, %not.cmp
- %land.ext = zext i1 %.cmp1 to i32
- ret i32 %land.ext
-}
-
define i1 @test7(i32 %i, i1 %b) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 %i, 0
@@ -110,6 +95,18 @@ define i64 @test9(i64 %x) {
ret i64 %and
}
+; combine -x & 1 into x & 1
+define <2 x i64> @test9vec(<2 x i64> %x) {
+; CHECK-LABEL: @test9vec(
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw <2 x i64> zeroinitializer, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[SUB]], <i64 1, i64 1>
+; CHECK-NEXT: ret <2 x i64> [[AND]]
+;
+ %sub = sub nsw <2 x i64> <i64 0, i64 0>, %x
+ %and = and <2 x i64> %sub, <i64 1, i64 1>
+ ret <2 x i64> %and
+}
+
define i64 @test10(i64 %x) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[AND:%.*]] = and i64 %x, 1
@@ -122,3 +119,63 @@ define i64 @test10(i64 %x) {
ret i64 %add
}
+; The add in this test is unnecessary because the LSBs of the LHS are 0 and the 'and' only consumes bits from those LSBs. It doesn't matter what happens to the upper bits.
+define i32 @test11(i32 %a, i32 %b) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[X:%.*]] = shl i32 [[A:%.*]], 8
+; CHECK-NEXT: [[Z:%.*]] = and i32 [[B:%.*]], 128
+; CHECK-NEXT: [[W:%.*]] = mul i32 [[Z]], [[X]]
+; CHECK-NEXT: ret i32 [[W]]
+;
+ %x = shl i32 %a, 8
+ %y = add i32 %x, %b
+ %z = and i32 %y, 128
+ %w = mul i32 %z, %x ; to keep the shift from being removed
+ ret i32 %w
+}
+
+; The add in this test is unnecessary because the LSBs of the RHS are 0 and the 'and' only consumes bits from those LSBs. It doesn't matter what happens to the upper bits.
+define i32 @test12(i32 %a, i32 %b) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[X:%.*]] = shl i32 [[A:%.*]], 8
+; CHECK-NEXT: [[Z:%.*]] = and i32 [[B:%.*]], 128
+; CHECK-NEXT: [[W:%.*]] = mul i32 [[Z]], [[X]]
+; CHECK-NEXT: ret i32 [[W]]
+;
+ %x = shl i32 %a, 8
+ %y = add i32 %b, %x
+ %z = and i32 %y, 128
+ %w = mul i32 %z, %x ; to keep the shift from being removed
+ ret i32 %w
+}
+
+; The sub in this test is unnecessary because the LSBs of the RHS are 0 and the 'and' only consumes bits from those LSBs. It doesn't matter what happens to the upper bits.
+define i32 @test13(i32 %a, i32 %b) {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[X:%.*]] = shl i32 [[A:%.*]], 8
+; CHECK-NEXT: [[Z:%.*]] = and i32 [[B:%.*]], 128
+; CHECK-NEXT: [[W:%.*]] = mul i32 [[Z]], [[X]]
+; CHECK-NEXT: ret i32 [[W]]
+;
+ %x = shl i32 %a, 8
+ %y = sub i32 %b, %x
+ %z = and i32 %y, 128
+ %w = mul i32 %z, %x ; to keep the shift from being removed
+ ret i32 %w
+}
+
+; The sub in this test cannot be removed because we need to keep the negation of %b. TODO: But we should be able to replace the LHS of it with a 0.
+define i32 @test14(i32 %a, i32 %b) {
+; CHECK-LABEL: @test14(
+; CHECK-NEXT: [[X:%.*]] = shl i32 [[A:%.*]], 8
+; CHECK-NEXT: [[Y:%.*]] = sub i32 0, [[B:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y]], 128
+; CHECK-NEXT: [[W:%.*]] = mul i32 [[Z]], [[X]]
+; CHECK-NEXT: ret i32 [[W]]
+;
+ %x = shl i32 %a, 8
+ %y = sub i32 %x, %b
+ %z = and i32 %y, 128
+ %w = mul i32 %z, %x ; to keep the shift from being removed
+ ret i32 %w
+}
diff --git a/test/Transforms/InstCombine/apint-shift.ll b/test/Transforms/InstCombine/apint-shift.ll
index e1e6b7c48c47..f339de35d77c 100644
--- a/test/Transforms/InstCombine/apint-shift.ll
+++ b/test/Transforms/InstCombine/apint-shift.ll
@@ -63,6 +63,8 @@ define i55 @test6(i55 %A) {
ret i55 %C
}
+; (X * C2) << C1 --> X * (C2 << C1)
+
define i55 @test6a(i55 %A) {
; CHECK-LABEL: @test6a(
; CHECK-NEXT: [[C:%.*]] = mul i55 %A, 6
@@ -73,6 +75,18 @@ define i55 @test6a(i55 %A) {
ret i55 %C
}
+; (X * C2) << C1 --> X * (C2 << C1)
+
+define <2 x i55> @test6a_vec(<2 x i55> %A) {
+; CHECK-LABEL: @test6a_vec(
+; CHECK-NEXT: [[C:%.*]] = mul <2 x i55> %A, <i55 6, i55 48>
+; CHECK-NEXT: ret <2 x i55> [[C]]
+;
+ %B = mul <2 x i55> %A, <i55 3, i55 12>
+ %C = shl <2 x i55> %B, <i55 1, i55 2>
+ ret <2 x i55> %C
+}
+
define i29 @test7(i8 %X) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: ret i29 -1
@@ -101,14 +115,150 @@ define i17 @test9(i17 %A) {
ret i17 %C
}
-define i19 @test10(i19 %A) {
+; shl (lshr X, C), C --> and X, C'
+
+define i19 @test10(i19 %X) {
; CHECK-LABEL: @test10(
-; CHECK-NEXT: [[B:%.*]] = and i19 %A, -262144
-; CHECK-NEXT: ret i19 [[B]]
+; CHECK-NEXT: [[SH1:%.*]] = and i19 %X, -262144
+; CHECK-NEXT: ret i19 [[SH1]]
+;
+ %sh1 = lshr i19 %X, 18
+ %sh2 = shl i19 %sh1, 18
+ ret i19 %sh2
+}
+
+; Two right shifts in the same direction:
+; lshr (lshr X, C1), C2 --> lshr X, C1 + C2
+
+define <2 x i19> @lshr_lshr_splat_vec(<2 x i19> %X) {
+; CHECK-LABEL: @lshr_lshr_splat_vec(
+; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i19> %X, <i19 5, i19 5>
+; CHECK-NEXT: ret <2 x i19> [[SH1]]
+;
+ %sh1 = lshr <2 x i19> %X, <i19 3, i19 3>
+ %sh2 = lshr <2 x i19> %sh1, <i19 2, i19 2>
+ ret <2 x i19> %sh2
+}
+
+define i9 @multiuse_lshr_lshr(i9 %x) {
+; CHECK-LABEL: @multiuse_lshr_lshr(
+; CHECK-NEXT: [[SH1:%.*]] = lshr i9 %x, 2
+; CHECK-NEXT: [[SH2:%.*]] = lshr i9 %x, 5
+; CHECK-NEXT: [[MUL:%.*]] = mul i9 [[SH1]], [[SH2]]
+; CHECK-NEXT: ret i9 [[MUL]]
+;
+ %sh1 = lshr i9 %x, 2
+ %sh2 = lshr i9 %sh1, 3
+ %mul = mul i9 %sh1, %sh2
+ ret i9 %mul
+}
+
+define <2 x i9> @multiuse_lshr_lshr_splat(<2 x i9> %x) {
+; CHECK-LABEL: @multiuse_lshr_lshr_splat(
+; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i9> %x, <i9 2, i9 2>
+; CHECK-NEXT: [[SH2:%.*]] = lshr <2 x i9> %x, <i9 5, i9 5>
+; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i9> [[SH1]], [[SH2]]
+; CHECK-NEXT: ret <2 x i9> [[MUL]]
+;
+ %sh1 = lshr <2 x i9> %x, <i9 2, i9 2>
+ %sh2 = lshr <2 x i9> %sh1, <i9 3, i9 3>
+ %mul = mul <2 x i9> %sh1, %sh2
+ ret <2 x i9> %mul
+}
+
+; Two left shifts in the same direction:
+; shl (shl X, C1), C2 --> shl X, C1 + C2
+
+define <2 x i19> @shl_shl_splat_vec(<2 x i19> %X) {
+; CHECK-LABEL: @shl_shl_splat_vec(
+; CHECK-NEXT: [[SH1:%.*]] = shl <2 x i19> %X, <i19 5, i19 5>
+; CHECK-NEXT: ret <2 x i19> [[SH1]]
+;
+ %sh1 = shl <2 x i19> %X, <i19 3, i19 3>
+ %sh2 = shl <2 x i19> %sh1, <i19 2, i19 2>
+ ret <2 x i19> %sh2
+}
+
+define i42 @multiuse_shl_shl(i42 %x) {
+; CHECK-LABEL: @multiuse_shl_shl(
+; CHECK-NEXT: [[SH1:%.*]] = shl i42 %x, 8
+; CHECK-NEXT: [[SH2:%.*]] = shl i42 %x, 17
+; CHECK-NEXT: [[MUL:%.*]] = mul i42 [[SH1]], [[SH2]]
+; CHECK-NEXT: ret i42 [[MUL]]
+;
+ %sh1 = shl i42 %x, 8
+ %sh2 = shl i42 %sh1, 9
+ %mul = mul i42 %sh1, %sh2
+ ret i42 %mul
+}
+
+define <2 x i42> @multiuse_shl_shl_splat(<2 x i42> %x) {
+; CHECK-LABEL: @multiuse_shl_shl_splat(
+; CHECK-NEXT: [[SH1:%.*]] = shl <2 x i42> %x, <i42 8, i42 8>
+; CHECK-NEXT: [[SH2:%.*]] = shl <2 x i42> %x, <i42 17, i42 17>
+; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i42> [[SH1]], [[SH2]]
+; CHECK-NEXT: ret <2 x i42> [[MUL]]
+;
+ %sh1 = shl <2 x i42> %x, <i42 8, i42 8>
+ %sh2 = shl <2 x i42> %sh1, <i42 9, i42 9>
+ %mul = mul <2 x i42> %sh1, %sh2
+ ret <2 x i42> %mul
+}
+
+; Equal shift amounts in opposite directions become bitwise 'and':
+; lshr (shl X, C), C --> and X, C'
+
+define <2 x i19> @eq_shl_lshr_splat_vec(<2 x i19> %X) {
+; CHECK-LABEL: @eq_shl_lshr_splat_vec(
+; CHECK-NEXT: [[SH1:%.*]] = and <2 x i19> %X, <i19 65535, i19 65535>
+; CHECK-NEXT: ret <2 x i19> [[SH1]]
+;
+ %sh1 = shl <2 x i19> %X, <i19 3, i19 3>
+ %sh2 = lshr <2 x i19> %sh1, <i19 3, i19 3>
+ ret <2 x i19> %sh2
+}
+
+; Equal shift amounts in opposite directions become bitwise 'and':
+; shl (lshr X, C), C --> and X, C'
+
+define <2 x i19> @eq_lshr_shl_splat_vec(<2 x i19> %X) {
+; CHECK-LABEL: @eq_lshr_shl_splat_vec(
+; CHECK-NEXT: [[SH1:%.*]] = and <2 x i19> %X, <i19 -8, i19 -8>
+; CHECK-NEXT: ret <2 x i19> [[SH1]]
+;
+ %sh1 = lshr <2 x i19> %X, <i19 3, i19 3>
+ %sh2 = shl <2 x i19> %sh1, <i19 3, i19 3>
+ ret <2 x i19> %sh2
+}
+
+; In general, we would need an 'and' for this transform, but the masked-off bits are known zero.
+; shl (lshr X, C1), C2 --> lshr X, C1 - C2
+
+define <2 x i7> @lshr_shl_splat_vec(<2 x i7> %X) {
+; CHECK-LABEL: @lshr_shl_splat_vec(
+; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i7> %X, <i7 -8, i7 -8>
+; CHECK-NEXT: [[SH1:%.*]] = lshr exact <2 x i7> [[MUL]], <i7 1, i7 1>
+; CHECK-NEXT: ret <2 x i7> [[SH1]]
+;
+ %mul = mul <2 x i7> %X, <i7 -8, i7 -8>
+ %sh1 = lshr exact <2 x i7> %mul, <i7 3, i7 3>
+ %sh2 = shl nuw nsw <2 x i7> %sh1, <i7 2, i7 2>
+ ret <2 x i7> %sh2
+}
+
+; In general, we would need an 'and' for this transform, but the masked-off bits are known zero.
+; lshr (shl X, C1), C2 --> shl X, C1 - C2
+
+define <2 x i7> @shl_lshr_splat_vec(<2 x i7> %X) {
+; CHECK-LABEL: @shl_lshr_splat_vec(
+; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i7> %X, <i7 9, i7 9>
+; CHECK-NEXT: [[SH1:%.*]] = shl nuw nsw <2 x i7> [[DIV]], <i7 1, i7 1>
+; CHECK-NEXT: ret <2 x i7> [[SH1]]
;
- %B = lshr i19 %A, 18
- %C = shl i19 %B, 18
- ret i19 %C
+ %div = udiv <2 x i7> %X, <i7 9, i7 9>
+ %sh1 = shl nuw <2 x i7> %div, <i7 3, i7 3>
+ %sh2 = lshr exact <2 x i7> %sh1, <i7 2, i7 2>
+ ret <2 x i7> %sh2
}
; Don't hide the shl from scalar evolution. DAGCombine will get it.
@@ -125,14 +275,29 @@ define i23 @test11(i23 %A) {
ret i23 %C
}
-define i47 @test12(i47 %A) {
+; shl (ashr X, C), C --> and X, C'
+
+define i47 @test12(i47 %X) {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: [[B1:%.*]] = and i47 %A, -256
-; CHECK-NEXT: ret i47 [[B1]]
+; CHECK-NEXT: [[SH11:%.*]] = and i47 %X, -256
+; CHECK-NEXT: ret i47 [[SH11]]
+;
+ %sh1 = ashr i47 %X, 8
+ %sh2 = shl i47 %sh1, 8
+ ret i47 %sh2
+}
+
+; FIXME: Same as above with vectors.
+
+define <2 x i47> @test12_splat_vec(<2 x i47> %X) {
+; CHECK-LABEL: @test12_splat_vec(
+; CHECK-NEXT: [[SH1:%.*]] = ashr <2 x i47> %X, <i47 8, i47 8>
+; CHECK-NEXT: [[SH2:%.*]] = shl nsw <2 x i47> [[SH1]], <i47 8, i47 8>
+; CHECK-NEXT: ret <2 x i47> [[SH2]]
;
- %B = ashr i47 %A, 8
- %C = shl i47 %B, 8
- ret i47 %C
+ %sh1 = ashr <2 x i47> %X, <i47 8, i47 8>
+ %sh2 = shl <2 x i47> %sh1, <i47 8, i47 8>
+ ret <2 x i47> %sh2
}
; Don't hide the shl from scalar evolution. DAGCombine will get it.
@@ -330,6 +495,66 @@ define i11 @test23(i44 %A) {
ret i11 %D
}
+; Fold lshr (shl X, C), C -> and X, C' regardless of the number of uses of the shl.
+
+define i44 @shl_lshr_eq_amt_multi_use(i44 %A) {
+; CHECK-LABEL: @shl_lshr_eq_amt_multi_use(
+; CHECK-NEXT: [[B:%.*]] = shl i44 %A, 33
+; CHECK-NEXT: [[C:%.*]] = and i44 %A, 2047
+; CHECK-NEXT: [[D:%.*]] = or i44 [[B]], [[C]]
+; CHECK-NEXT: ret i44 [[D]]
+;
+ %B = shl i44 %A, 33
+ %C = lshr i44 %B, 33
+ %D = add i44 %B, %C
+ ret i44 %D
+}
+
+; Fold vector lshr (shl X, C), C -> and X, C' regardless of the number of uses of the shl.
+
+define <2 x i44> @shl_lshr_eq_amt_multi_use_splat_vec(<2 x i44> %A) {
+; CHECK-LABEL: @shl_lshr_eq_amt_multi_use_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = shl <2 x i44> %A, <i44 33, i44 33>
+; CHECK-NEXT: [[C:%.*]] = and <2 x i44> %A, <i44 2047, i44 2047>
+; CHECK-NEXT: [[D:%.*]] = or <2 x i44> [[B]], [[C]]
+; CHECK-NEXT: ret <2 x i44> [[D]]
+;
+ %B = shl <2 x i44> %A, <i44 33, i44 33>
+ %C = lshr <2 x i44> %B, <i44 33, i44 33>
+ %D = add <2 x i44> %B, %C
+ ret <2 x i44> %D
+}
+
+; Fold shl (lshr X, C), C -> and X, C' regardless of the number of uses of the lshr.
+
+define i43 @lshr_shl_eq_amt_multi_use(i43 %A) {
+; CHECK-LABEL: @lshr_shl_eq_amt_multi_use(
+; CHECK-NEXT: [[B:%.*]] = lshr i43 %A, 23
+; CHECK-NEXT: [[C:%.*]] = and i43 %A, -8388608
+; CHECK-NEXT: [[D:%.*]] = mul i43 [[B]], [[C]]
+; CHECK-NEXT: ret i43 [[D]]
+;
+ %B = lshr i43 %A, 23
+ %C = shl i43 %B, 23
+ %D = mul i43 %B, %C
+ ret i43 %D
+}
+
+; Fold vector shl (lshr X, C), C -> and X, C' regardless of the number of uses of the lshr.
+
+define <2 x i43> @lshr_shl_eq_amt_multi_use_splat_vec(<2 x i43> %A) {
+; CHECK-LABEL: @lshr_shl_eq_amt_multi_use_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = lshr <2 x i43> %A, <i43 23, i43 23>
+; CHECK-NEXT: [[C:%.*]] = and <2 x i43> %A, <i43 -8388608, i43 -8388608>
+; CHECK-NEXT: [[D:%.*]] = mul <2 x i43> [[B]], [[C]]
+; CHECK-NEXT: ret <2 x i43> [[D]]
+;
+ %B = lshr <2 x i43> %A, <i43 23, i43 23>
+ %C = shl <2 x i43> %B, <i43 23, i43 23>
+ %D = mul <2 x i43> %B, %C
+ ret <2 x i43> %D
+}
+
define i37 @test25(i37 %tmp.2, i37 %AA) {
; CHECK-LABEL: @test25(
; CHECK-NEXT: [[TMP_3:%.*]] = and i37 %tmp.2, -131072
diff --git a/test/Transforms/InstCombine/apint-sub.ll b/test/Transforms/InstCombine/apint-sub.ll
index eb314ce3d1b2..1a4e62ff0d73 100644
--- a/test/Transforms/InstCombine/apint-sub.ll
+++ b/test/Transforms/InstCombine/apint-sub.ll
@@ -50,7 +50,7 @@ define i19 @test5(i19 %A, i19 %Bok, i19 %Cok) {
define i57 @test6(i57 %A, i57 %B) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i57 %B, -1
-; CHECK-NEXT: [[D:%.*]] = and i57 %A, [[B_NOT]]
+; CHECK-NEXT: [[D:%.*]] = and i57 [[B_NOT]], %A
; CHECK-NEXT: ret i57 [[D]]
;
%C = and i57 %A, %B
diff --git a/test/Transforms/InstCombine/assume.ll b/test/Transforms/InstCombine/assume.ll
index 6e690426db99..13fa6339e85a 100644
--- a/test/Transforms/InstCombine/assume.ll
+++ b/test/Transforms/InstCombine/assume.ll
@@ -176,13 +176,13 @@ define i32 @icmp2(i32 %a) #0 {
ret i32 %lnot.ext
}
-; FIXME: If the 'not' of a condition is known true, then the condition must be false.
+; If the 'not' of a condition is known true, then the condition must be false.
define i1 @assume_not(i1 %cond) {
; CHECK-LABEL: @assume_not(
; CHECK-NEXT: [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
; CHECK-NEXT: call void @llvm.assume(i1 [[NOTCOND]])
-; CHECK-NEXT: ret i1 [[COND]]
+; CHECK-NEXT: ret i1 false
;
%notcond = xor i1 %cond, true
call void @llvm.assume(i1 %notcond)
diff --git a/test/Transforms/InstCombine/bitcast-bigendian.ll b/test/Transforms/InstCombine/bitcast-bigendian.ll
index 1a91d11d8aee..e940f0fcec75 100644
--- a/test/Transforms/InstCombine/bitcast-bigendian.ll
+++ b/test/Transforms/InstCombine/bitcast-bigendian.ll
@@ -9,8 +9,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define float @test2(<2 x float> %A, <2 x i32> %B) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> %A, i32 1
-; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i32> %B to <2 x float>
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> [[A:%.*]], i32 1
+; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i32> [[B:%.*]] to <2 x float>
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[BC]], i32 1
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP24]], [[TMP4]]
; CHECK-NEXT: ret float [[ADD]]
@@ -29,8 +29,8 @@ define float @test2(<2 x float> %A, <2 x i32> %B) {
define float @test3(<2 x float> %A, <2 x i64> %B) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> %A, i32 0
-; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> %B to <4 x float>
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> [[A:%.*]], i32 0
+; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[B:%.*]] to <4 x float>
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x float> [[BC2]], i32 1
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP24]], [[TMP4]]
; CHECK-NEXT: ret float [[ADD]]
@@ -51,8 +51,8 @@ define float @test3(<2 x float> %A, <2 x i64> %B) {
define <2 x i32> @test4(i32 %A, i32 %B){
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 %B, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 %A, i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 [[B:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[A:%.*]], i32 1
; CHECK-NEXT: ret <2 x i32> [[TMP2]]
;
%tmp38 = zext i32 %A to i64
@@ -65,8 +65,8 @@ define <2 x i32> @test4(i32 %A, i32 %B){
define <2 x float> @test5(float %A, float %B) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float %B, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float %A, i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float [[B:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float [[A:%.*]], i32 1
; CHECK-NEXT: ret <2 x float> [[TMP2]]
;
%tmp37 = bitcast float %A to i32
@@ -81,9 +81,8 @@ define <2 x float> @test5(float %A, float %B) {
define <2 x float> @test6(float %A){
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float %A, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float 4.200000e+01, i32 1
-; CHECK-NEXT: ret <2 x float> [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> <float undef, float 4.200000e+01>, float [[A:%.*]], i32 0
+; CHECK-NEXT: ret <2 x float> [[TMP1]]
;
%tmp23 = bitcast float %A to i32
%tmp24 = zext i32 %tmp23 to i64
@@ -97,8 +96,8 @@ define <2 x float> @test6(float %A){
define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) {
; CHECK-LABEL: @xor_bitcast_vec_to_vec(
-; CHECK-NEXT: [[T21:%.*]] = xor <1 x i64> %a, <i64 4294967298>
-; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[T21]] to <2 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = xor <1 x i64> [[A:%.*]], <i64 4294967298>
+; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[TMP1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T2]]
;
%t1 = bitcast <1 x i64> %a to <2 x i32>
@@ -110,8 +109,8 @@ define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) {
define i64 @and_bitcast_vec_to_int(<2 x i32> %a) {
; CHECK-LABEL: @and_bitcast_vec_to_int(
-; CHECK-NEXT: [[T21:%.*]] = and <2 x i32> %a, <i32 0, i32 3>
-; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[T21]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 0, i32 3>
+; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
; CHECK-NEXT: ret i64 [[T2]]
;
%t1 = bitcast <2 x i32> %a to i64
@@ -123,8 +122,8 @@ define i64 @and_bitcast_vec_to_int(<2 x i32> %a) {
define <2 x i32> @or_bitcast_int_to_vec(i64 %a) {
; CHECK-LABEL: @or_bitcast_int_to_vec(
-; CHECK-NEXT: [[T21:%.*]] = or i64 %a, 4294967298
-; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[T21]] to <2 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[A:%.*]], 4294967298
+; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T2]]
;
%t1 = bitcast i64 %a to <2 x i32>
diff --git a/test/Transforms/InstCombine/bitcast.ll b/test/Transforms/InstCombine/bitcast.ll
index 08f49660f184..2e7f30fee14d 100644
--- a/test/Transforms/InstCombine/bitcast.ll
+++ b/test/Transforms/InstCombine/bitcast.ll
@@ -21,7 +21,7 @@ define i32 @test1(i64 %a) {
define <2 x i32> @xor_two_vector_bitcasts(<1 x i64> %a, <1 x i64> %b) {
; CHECK-LABEL: @xor_two_vector_bitcasts(
-; CHECK-NEXT: [[T31:%.*]] = xor <1 x i64> %a, %b
+; CHECK-NEXT: [[T31:%.*]] = xor <1 x i64> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[T3:%.*]] = bitcast <1 x i64> [[T31]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T3]]
;
@@ -35,8 +35,8 @@ define <2 x i32> @xor_two_vector_bitcasts(<1 x i64> %a, <1 x i64> %b) {
define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) {
; CHECK-LABEL: @xor_bitcast_vec_to_vec(
-; CHECK-NEXT: [[T21:%.*]] = xor <1 x i64> %a, <i64 8589934593>
-; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[T21]] to <2 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = xor <1 x i64> [[A:%.*]], <i64 8589934593>
+; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[TMP1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T2]]
;
%t1 = bitcast <1 x i64> %a to <2 x i32>
@@ -48,8 +48,8 @@ define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) {
define i64 @and_bitcast_vec_to_int(<2 x i32> %a) {
; CHECK-LABEL: @and_bitcast_vec_to_int(
-; CHECK-NEXT: [[T21:%.*]] = and <2 x i32> %a, <i32 3, i32 0>
-; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[T21]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 3, i32 0>
+; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
; CHECK-NEXT: ret i64 [[T2]]
;
%t1 = bitcast <2 x i32> %a to i64
@@ -61,8 +61,8 @@ define i64 @and_bitcast_vec_to_int(<2 x i32> %a) {
define <2 x i32> @or_bitcast_int_to_vec(i64 %a) {
; CHECK-LABEL: @or_bitcast_int_to_vec(
-; CHECK-NEXT: [[T21:%.*]] = or i64 %a, 8589934593
-; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[T21]] to <2 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[A:%.*]], 8589934593
+; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T2]]
;
%t1 = bitcast i64 %a to <2 x i32>
@@ -74,8 +74,8 @@ define <2 x i32> @or_bitcast_int_to_vec(i64 %a) {
define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_and_bitcast(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> %b to <4 x i32>
-; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[TMP1]], %a
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B:%.*]] to <4 x i32>
+; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
; CHECK-NEXT: ret <4 x i32> [[BC3]]
;
%bc1 = bitcast <4 x i32> %a to <2 x i64>
@@ -91,8 +91,8 @@ define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) {
define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_and_bitcast_to_fp(
-; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x float> %a to <2 x i64>
-; CHECK-NEXT: [[BC2:%.*]] = bitcast <8 x i16> %b to <2 x i64>
+; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x float> [[A:%.*]] to <2 x i64>
+; CHECK-NEXT: [[BC2:%.*]] = bitcast <8 x i16> [[B:%.*]] to <2 x i64>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[BC2]], [[BC1]]
; CHECK-NEXT: [[BC3:%.*]] = bitcast <2 x i64> [[AND]] to <4 x float>
; CHECK-NEXT: ret <4 x float> [[BC3]]
@@ -108,8 +108,8 @@ define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_or_bitcast(
-; CHECK-NEXT: [[BC1:%.*]] = bitcast i128 %a to <2 x i64>
-; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[BC1]], %b
+; CHECK-NEXT: [[BC1:%.*]] = bitcast i128 [[A:%.*]] to <2 x i64>
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[BC1]], [[B:%.*]]
; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[OR]] to i128
; CHECK-NEXT: ret i128 [[BC2]]
;
@@ -123,8 +123,8 @@ define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) {
define <4 x i32> @bitcast_xor_bitcast(<4 x i32> %a, i128 %b) {
; CHECK-LABEL: @bitcast_xor_bitcast(
-; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> %a to i128
-; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[BC1]], %b
+; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[A:%.*]] to i128
+; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[BC1]], [[B:%.*]]
; CHECK-NEXT: [[BC2:%.*]] = bitcast i128 [[XOR]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[BC2]]
;
@@ -138,8 +138,8 @@ define <4 x i32> @bitcast_xor_bitcast(<4 x i32> %a, i128 %b) {
define <4 x float> @bitcast_vector_select(<4 x float> %x, <2 x i64> %y, <4 x i1> %cmp) {
; CHECK-LABEL: @bitcast_vector_select(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %y to <4 x float>
-; CHECK-NEXT: [[T7:%.*]] = select <4 x i1> %cmp, <4 x float> %x, <4 x float> [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[Y:%.*]] to <4 x float>
+; CHECK-NEXT: [[T7:%.*]] = select <4 x i1> [[CMP:%.*]], <4 x float> [[X:%.*]], <4 x float> [[TMP1]]
; CHECK-NEXT: ret <4 x float> [[T7]]
;
%t4 = bitcast <4 x float> %x to <4 x i32>
@@ -151,8 +151,8 @@ define <4 x float> @bitcast_vector_select(<4 x float> %x, <2 x i64> %y, <4 x i1>
define float @bitcast_scalar_select_of_scalars(float %x, i32 %y, i1 %cmp) {
; CHECK-LABEL: @bitcast_scalar_select_of_scalars(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 %y to float
-; CHECK-NEXT: [[T7:%.*]] = select i1 %cmp, float %x, float [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[Y:%.*]] to float
+; CHECK-NEXT: [[T7:%.*]] = select i1 [[CMP:%.*]], float [[X:%.*]], float [[TMP1]]
; CHECK-NEXT: ret float [[T7]]
;
%t4 = bitcast float %x to i32
@@ -166,8 +166,8 @@ define float @bitcast_scalar_select_of_scalars(float %x, i32 %y, i1 %cmp) {
define float @bitcast_scalar_select_type_mismatch1(float %x, <4 x i8> %y, i1 %cmp) {
; CHECK-LABEL: @bitcast_scalar_select_type_mismatch1(
-; CHECK-NEXT: [[T4:%.*]] = bitcast float %x to <4 x i8>
-; CHECK-NEXT: [[T6:%.*]] = select i1 %cmp, <4 x i8> [[T4]], <4 x i8> %y
+; CHECK-NEXT: [[T4:%.*]] = bitcast float [[X:%.*]] to <4 x i8>
+; CHECK-NEXT: [[T6:%.*]] = select i1 [[CMP:%.*]], <4 x i8> [[T4]], <4 x i8> [[Y:%.*]]
; CHECK-NEXT: [[T7:%.*]] = bitcast <4 x i8> [[T6]] to float
; CHECK-NEXT: ret float [[T7]]
;
@@ -182,8 +182,8 @@ define float @bitcast_scalar_select_type_mismatch1(float %x, <4 x i8> %y, i1 %cm
define <4 x i8> @bitcast_scalar_select_type_mismatch2(<4 x i8> %x, float %y, i1 %cmp) {
; CHECK-LABEL: @bitcast_scalar_select_type_mismatch2(
-; CHECK-NEXT: [[T4:%.*]] = bitcast <4 x i8> %x to float
-; CHECK-NEXT: [[T6:%.*]] = select i1 %cmp, float [[T4]], float %y
+; CHECK-NEXT: [[T4:%.*]] = bitcast <4 x i8> [[X:%.*]] to float
+; CHECK-NEXT: [[T6:%.*]] = select i1 [[CMP:%.*]], float [[T4]], float [[Y:%.*]]
; CHECK-NEXT: [[T7:%.*]] = bitcast float [[T6]] to <4 x i8>
; CHECK-NEXT: ret <4 x i8> [[T7]]
;
@@ -195,8 +195,8 @@ define <4 x i8> @bitcast_scalar_select_type_mismatch2(<4 x i8> %x, float %y, i1
define <4 x float> @bitcast_scalar_select_of_vectors(<4 x float> %x, <2 x i64> %y, i1 %cmp) {
; CHECK-LABEL: @bitcast_scalar_select_of_vectors(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> %y to <4 x float>
-; CHECK-NEXT: [[T7:%.*]] = select i1 %cmp, <4 x float> %x, <4 x float> [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[Y:%.*]] to <4 x float>
+; CHECK-NEXT: [[T7:%.*]] = select i1 [[CMP:%.*]], <4 x float> [[X:%.*]], <4 x float> [[TMP1]]
; CHECK-NEXT: ret <4 x float> [[T7]]
;
%t4 = bitcast <4 x float> %x to <4 x i32>
@@ -210,9 +210,9 @@ define <4 x float> @bitcast_scalar_select_of_vectors(<4 x float> %x, <2 x i64> %
define float @bitcast_vector_select_no_fold1(float %x, <2 x i16> %y, <4 x i1> %cmp) {
; CHECK-LABEL: @bitcast_vector_select_no_fold1(
-; CHECK-NEXT: [[T4:%.*]] = bitcast float %x to <4 x i8>
-; CHECK-NEXT: [[T5:%.*]] = bitcast <2 x i16> %y to <4 x i8>
-; CHECK-NEXT: [[T6:%.*]] = select <4 x i1> %cmp, <4 x i8> [[T4]], <4 x i8> [[T5]]
+; CHECK-NEXT: [[T4:%.*]] = bitcast float [[X:%.*]] to <4 x i8>
+; CHECK-NEXT: [[T5:%.*]] = bitcast <2 x i16> [[Y:%.*]] to <4 x i8>
+; CHECK-NEXT: [[T6:%.*]] = select <4 x i1> [[CMP:%.*]], <4 x i8> [[T4]], <4 x i8> [[T5]]
; CHECK-NEXT: [[T7:%.*]] = bitcast <4 x i8> [[T6]] to float
; CHECK-NEXT: ret float [[T7]]
;
@@ -227,9 +227,9 @@ define float @bitcast_vector_select_no_fold1(float %x, <2 x i16> %y, <4 x i1> %c
define <2 x float> @bitcast_vector_select_no_fold2(<2 x float> %x, <4 x i16> %y, <8 x i1> %cmp) {
; CHECK-LABEL: @bitcast_vector_select_no_fold2(
-; CHECK-NEXT: [[T4:%.*]] = bitcast <2 x float> %x to <8 x i8>
-; CHECK-NEXT: [[T5:%.*]] = bitcast <4 x i16> %y to <8 x i8>
-; CHECK-NEXT: [[T6:%.*]] = select <8 x i1> %cmp, <8 x i8> [[T4]], <8 x i8> [[T5]]
+; CHECK-NEXT: [[T4:%.*]] = bitcast <2 x float> [[X:%.*]] to <8 x i8>
+; CHECK-NEXT: [[T5:%.*]] = bitcast <4 x i16> [[Y:%.*]] to <8 x i8>
+; CHECK-NEXT: [[T6:%.*]] = select <8 x i1> [[CMP:%.*]], <8 x i8> [[T4]], <8 x i8> [[T5]]
; CHECK-NEXT: [[T7:%.*]] = bitcast <8 x i8> [[T6]] to <2 x float>
; CHECK-NEXT: ret <2 x float> [[T7]]
;
@@ -244,8 +244,8 @@ define <2 x float> @bitcast_vector_select_no_fold2(<2 x float> %x, <4 x i16> %y,
; rdar://7892780
define float @test2(<2 x float> %A, <2 x i32> %B) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> %A, i32 0
-; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i32> %B to <2 x float>
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> [[A:%.*]], i32 0
+; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i32> [[B:%.*]] to <2 x float>
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[BC]], i32 0
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP24]], [[TMP4]]
; CHECK-NEXT: ret float [[ADD]]
@@ -266,8 +266,8 @@ define float @test2(<2 x float> %A, <2 x i32> %B) {
; rdar://7892780
define float @test3(<2 x float> %A, <2 x i64> %B) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> %A, i32 1
-; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> %B to <4 x float>
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> [[A:%.*]], i32 1
+; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[B:%.*]] to <4 x float>
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x float> [[BC2]], i32 2
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP24]], [[TMP4]]
; CHECK-NEXT: ret float [[ADD]]
@@ -290,7 +290,7 @@ define float @test3(<2 x float> %A, <2 x i64> %B) {
define float @bitcast_extelt1(<2 x float> %A) {
; CHECK-LABEL: @bitcast_extelt1(
-; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x float> %A, i32 0
+; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x float> [[A:%.*]], i32 0
; CHECK-NEXT: ret float [[BC2]]
;
%bc1 = bitcast <2 x float> %A to <2 x i32>
@@ -303,7 +303,7 @@ define float @bitcast_extelt1(<2 x float> %A) {
define i64 @bitcast_extelt2(<4 x float> %A) {
; CHECK-LABEL: @bitcast_extelt2(
-; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float> %A to <2 x i64>
+; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float> [[A:%.*]] to <2 x i64>
; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x i64> [[BC]], i32 1
; CHECK-NEXT: ret i64 [[BC2]]
;
@@ -317,7 +317,7 @@ define i64 @bitcast_extelt2(<4 x float> %A) {
define <2 x i32> @bitcast_extelt3(<2 x i32> %A) {
; CHECK-LABEL: @bitcast_extelt3(
-; CHECK-NEXT: [[BC1:%.*]] = bitcast <2 x i32> %A to <1 x i64>
+; CHECK-NEXT: [[BC1:%.*]] = bitcast <2 x i32> [[A:%.*]] to <1 x i64>
; CHECK-NEXT: [[EXT:%.*]] = extractelement <1 x i64> [[BC1]], i32 0
; CHECK-NEXT: [[BC2:%.*]] = bitcast i64 [[EXT]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[BC2]]
@@ -332,7 +332,7 @@ define <2 x i32> @bitcast_extelt3(<2 x i32> %A) {
define double @bitcast_extelt4(i128 %A) {
; CHECK-LABEL: @bitcast_extelt4(
-; CHECK-NEXT: [[BC:%.*]] = bitcast i128 %A to <2 x double>
+; CHECK-NEXT: [[BC:%.*]] = bitcast i128 [[A:%.*]] to <2 x double>
; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x double> [[BC]], i32 0
; CHECK-NEXT: ret double [[BC2]]
;
@@ -344,8 +344,8 @@ define double @bitcast_extelt4(i128 %A) {
define <2 x i32> @test4(i32 %A, i32 %B){
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 %A, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 %B, i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 [[A:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[B:%.*]], i32 1
; CHECK-NEXT: ret <2 x i32> [[TMP2]]
;
%tmp38 = zext i32 %A to i64
@@ -359,8 +359,8 @@ define <2 x i32> @test4(i32 %A, i32 %B){
; rdar://8360454
define <2 x float> @test5(float %A, float %B) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float %A, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float %B, i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float [[A:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float [[B:%.*]], i32 1
; CHECK-NEXT: ret <2 x float> [[TMP2]]
;
%tmp37 = bitcast float %A to i32
@@ -375,7 +375,7 @@ define <2 x float> @test5(float %A, float %B) {
define <2 x float> @test6(float %A){
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> <float 4.200000e+01, float undef>, float %A, i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> <float 4.200000e+01, float undef>, float [[A:%.*]], i32 1
; CHECK-NEXT: ret <2 x float> [[TMP1]]
;
%tmp23 = bitcast float %A to i32
@@ -422,7 +422,7 @@ define i32 @All111(i32 %in) {
define <2 x i16> @BitcastInsert(i32 %a) {
; CHECK-LABEL: @BitcastInsert(
-; CHECK-NEXT: [[R:%.*]] = bitcast i32 %a to <2 x i16>
+; CHECK-NEXT: [[R:%.*]] = bitcast i32 [[A:%.*]] to <2 x i16>
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%v = insertelement <1 x i32> undef, i32 %a, i32 0
@@ -433,7 +433,7 @@ define <2 x i16> @BitcastInsert(i32 %a) {
; PR17293
define <2 x i64> @test7(<2 x i8*>* %arg) nounwind {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[CAST:%.*]] = bitcast <2 x i8*>* %arg to <2 x i64>*
+; CHECK-NEXT: [[CAST:%.*]] = bitcast <2 x i8*>* [[ARG:%.*]] to <2 x i64>*
; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[CAST]], align 16
; CHECK-NEXT: ret <2 x i64> [[LOAD]]
;
@@ -452,25 +452,24 @@ define i8 @test8() {
@g = internal unnamed_addr global i32 undef
-; CHECK-LABEL: @constant_fold_vector_to_double(
-; CHECK: store volatile double 1.000000e+00,
-; CHECK: store volatile double 1.000000e+00,
-; CHECK: store volatile double 1.000000e+00,
-; CHECK: store volatile double 1.000000e+00,
-
-; CHECK: store volatile double 0xFFFFFFFFFFFFFFFF,
-; CHECK: store volatile double 0x162E000004D2,
-
-; CHECK: store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (i32* @g to i32)> to double),
-; CHECK: store volatile double 0x400000003F800000,
-
-; CHECK: store volatile double 0.000000e+00,
-; CHECK: store volatile double 0.000000e+00,
-; CHECK: store volatile double 0.000000e+00,
-; CHECK: store volatile double 0.000000e+00,
-; CHECK: store volatile double 0.000000e+00,
-; CHECK: store volatile double 0.000000e+00,
define void @constant_fold_vector_to_double() {
+; CHECK-LABEL: @constant_fold_vector_to_double(
+; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 1.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 0xFFFFFFFFFFFFFFFF, double* undef, align 8
+; CHECK-NEXT: store volatile double 0x162E000004D2, double* undef, align 8
+; CHECK-NEXT: store volatile double bitcast (<2 x i32> <i32 1234, i32 ptrtoint (i32* @g to i32)> to double), double* undef, align 8
+; CHECK-NEXT: store volatile double 0x400000003F800000, double* undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: store volatile double 0.000000e+00, double* undef, align 8
+; CHECK-NEXT: ret void
+;
store volatile double bitcast (<1 x i64> <i64 4607182418800017408> to double), double* undef
store volatile double bitcast (<2 x i32> <i32 0, i32 1072693248> to double), double* undef
store volatile double bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 16368> to double), double* undef
@@ -491,12 +490,14 @@ define void @constant_fold_vector_to_double() {
ret void
}
-; CHECK-LABEL: @constant_fold_vector_to_float(
-; CHECK: store volatile float 1.000000e+00,
-; CHECK: store volatile float 1.000000e+00,
-; CHECK: store volatile float 1.000000e+00,
-; CHECK: store volatile float 1.000000e+00,
define void @constant_fold_vector_to_float() {
+; CHECK-LABEL: @constant_fold_vector_to_float(
+; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
+; CHECK-NEXT: store volatile float 1.000000e+00, float* undef, align 4
+; CHECK-NEXT: ret void
+;
store volatile float bitcast (<1 x i32> <i32 1065353216> to float), float* undef
store volatile float bitcast (<2 x i16> <i16 0, i16 16256> to float), float* undef
store volatile float bitcast (<4 x i8> <i8 0, i8 0, i8 128, i8 63> to float), float* undef
@@ -505,10 +506,12 @@ define void @constant_fold_vector_to_float() {
ret void
}
-; CHECK-LABEL: @constant_fold_vector_to_half(
-; CHECK: store volatile half 0xH4000,
-; CHECK: store volatile half 0xH4000,
define void @constant_fold_vector_to_half() {
+; CHECK-LABEL: @constant_fold_vector_to_half(
+; CHECK-NEXT: store volatile half 0xH4000, half* undef, align 2
+; CHECK-NEXT: store volatile half 0xH4000, half* undef, align 2
+; CHECK-NEXT: ret void
+;
store volatile half bitcast (<2 x i8> <i8 0, i8 64> to half), half* undef
store volatile half bitcast (<4 x i4> <i4 0, i4 0, i4 0, i4 4> to half), half* undef
ret void
diff --git a/test/Transforms/InstCombine/bitreverse-fold.ll b/test/Transforms/InstCombine/bitreverse-fold.ll
index ecdfbc8cb5f9..b798ad33b3f0 100644
--- a/test/Transforms/InstCombine/bitreverse-fold.ll
+++ b/test/Transforms/InstCombine/bitreverse-fold.ll
@@ -37,6 +37,13 @@ define i32 @reverse_neg1_i32() {
ret i32 %x
}
+; CHECK-LABEL: @reverse_undef_i32(
+; CHECK-NEXT: ret i32 undef
+define i32 @reverse_undef_i32() {
+ %x = call i32 @llvm.bitreverse.i32(i32 undef)
+ ret i32 %x
+}
+
; CHECK-LABEL: @reverse_false_i1(
; CHECK-NEXT: ret i1 false
define i1 @reverse_false_i1() {
@@ -51,6 +58,13 @@ define i1 @reverse_true_i1() {
ret i1 %x
}
+; CHECK-LABEL: @reverse_undef_i1(
+; CHECK-NEXT: ret i1 undef
+define i1 @reverse_undef_i1() {
+ %x = call i1 @llvm.bitreverse.i1(i1 undef)
+ ret i1 %x
+}
+
; CHECK-LABEL: @reverse_false_v2i1(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
define <2 x i1> @reverse_false_v2i1() {
diff --git a/test/Transforms/InstCombine/bitreverse-known-bits.ll b/test/Transforms/InstCombine/bitreverse-known-bits.ll
new file mode 100644
index 000000000000..cd1523a3b06b
--- /dev/null
+++ b/test/Transforms/InstCombine/bitreverse-known-bits.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -S -instcombine | FileCheck %s
+
+declare i8 @llvm.bitreverse.i8(i8)
+declare i32 @llvm.bitreverse.i32(i32)
+
+; CHECK-LABEL: @test1
+; CHECK: ret i1 true
+define i1 @test1(i32 %arg) {
+ %a = or i32 %arg, 4294901760
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ %and = and i32 %b, 65535
+ %res = icmp eq i32 %and, 65535
+ ret i1 %res
+}
+
+; CHECK-LABEL: @test2
+; CHECK: ret i1 true
+define i1 @test2(i32 %arg) {
+ %a = or i32 %arg, 1
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ %c = and i32 %b, 2147483648
+ %d = call i32 @llvm.bitreverse.i32(i32 %c)
+ %res = icmp eq i32 %d, 1
+ ret i1 %res
+}
+
+; CHECK-LABEL: @test3
+; CHECK: ret i1 false
+define i1 @test3(i32 %arg) {
+ %a = or i32 %arg, 65536
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ %and = and i32 %b, 32768
+ %res = icmp eq i32 %and, 0
+ ret i1 %res
+}
+
+; CHECK-LABEL: @add_bitreverse
+; Make sure we process range metadata on bitreverse
+define i8 @add_bitreverse(i8 %a) {
+ %b = and i8 %a, 252
+ ; known bits for the bitreverse will say the result is in the range [0, 64)
+ ; but the metadata says [0, 16). So make sure the range metadata wins.
+ ; add %reverse, 1111 0000
+ ; should become
+ ; or %reverse, 1111 0000
+ %reverse = call i8 @llvm.bitreverse.i8(i8 %b), !range !1
+ %c = add i8 %reverse, -16
+; CHECK: or i8 %reverse, -16
+ ret i8 %c
+}
+!1 = !{i8 0, i8 16}
diff --git a/test/Transforms/InstCombine/bswap-fold.ll b/test/Transforms/InstCombine/bswap-fold.ll
index edf9572f1e11..91678a91962a 100644
--- a/test/Transforms/InstCombine/bswap-fold.ll
+++ b/test/Transforms/InstCombine/bswap-fold.ll
@@ -1,68 +1,75 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
-define i1 @test1(i16 %tmp2) {
-; CHECK-LABEL: @test1
-; CHECK-NEXT: %tmp = icmp eq i16 %tmp2, 256
-; CHECK-NEXT: ret i1 %tmp
- %tmp10 = call i16 @llvm.bswap.i16( i16 %tmp2 )
- %tmp = icmp eq i16 %tmp10, 1
- ret i1 %tmp
+define i1 @test1(i16 %t) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 %t, 256
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %tmp1 = call i16 @llvm.bswap.i16( i16 %t )
+ %tmp2 = icmp eq i16 %tmp1, 1
+ ret i1 %tmp2
}
define i1 @test2(i32 %tmp) {
-; CHECK-LABEL: @test2
-; CHECK-NEXT: %tmp.upgrd.1 = icmp eq i32 %tmp, 16777216
-; CHECK-NEXT: ret i1 %tmp.upgrd.1
- %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
- %tmp.upgrd.1 = icmp eq i32 %tmp34, 1
- ret i1 %tmp.upgrd.1
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[TMP_UPGRD_1:%.*]] = icmp eq i32 %tmp, 16777216
+; CHECK-NEXT: ret i1 [[TMP_UPGRD_1]]
+;
+ %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
+ %tmp.upgrd.1 = icmp eq i32 %tmp34, 1
+ ret i1 %tmp.upgrd.1
}
define i1 @test3(i64 %tmp) {
-; CHECK-LABEL: @test3
-; CHECK-NEXT: %tmp.upgrd.2 = icmp eq i64 %tmp, 72057594037927936
-; CHECK-NEXT: ret i1 %tmp.upgrd.2
- %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
- %tmp.upgrd.2 = icmp eq i64 %tmp34, 1
- ret i1 %tmp.upgrd.2
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[TMP_UPGRD_2:%.*]] = icmp eq i64 %tmp, 72057594037927936
+; CHECK-NEXT: ret i1 [[TMP_UPGRD_2]]
+;
+ %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
+ %tmp.upgrd.2 = icmp eq i64 %tmp34, 1
+ ret i1 %tmp.upgrd.2
}
; rdar://5992453
; A & 255
define i32 @test4(i32 %a) nounwind {
-; CHECK-LABEL: @test4
-; CHECK-NEXT: %tmp2 = and i32 %a, 255
-; CHECK-NEXT: ret i32 %tmp2
- %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
- %tmp4 = lshr i32 %tmp2, 24
- ret i32 %tmp4
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 %a, 255
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = lshr i32 %tmp2, 24
+ ret i32 %tmp4
}
; A
define i32 @test5(i32 %a) nounwind {
-; CHECK-LABEL: @test5
-; CHECK-NEXT: ret i32 %a
- %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
- %tmp4 = tail call i32 @llvm.bswap.i32( i32 %tmp2 )
- ret i32 %tmp4
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: ret i32 %a
+;
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = tail call i32 @llvm.bswap.i32( i32 %tmp2 )
+ ret i32 %tmp4
}
; a >> 24
define i32 @test6(i32 %a) nounwind {
-; CHECK-LABEL: @test6
-; CHECK-NEXT: %tmp2 = lshr i32 %a, 24
-; CHECK-NEXT: ret i32 %tmp2
- %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
- %tmp4 = and i32 %tmp2, 255
- ret i32 %tmp4
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 %a, 24
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = and i32 %tmp2, 255
+ ret i32 %tmp4
}
; PR5284
define i16 @test7(i32 %A) {
-; CHECK-LABEL: @test7
-; CHECK-NEXT: %1 = lshr i32 %A, 16
-; CHECK-NEXT: %D = trunc i32 %1 to i16
-; CHECK-NEXT: ret i16 %D
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 %A, 16
+; CHECK-NEXT: [[D:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT: ret i16 [[D]]
+;
%B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
%C = trunc i32 %B to i16
%D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
@@ -70,11 +77,12 @@ define i16 @test7(i32 %A) {
}
define i16 @test8(i64 %A) {
-; CHECK-LABEL: @test8
-; CHECK-NEXT: %1 = lshr i64 %A, 48
-; CHECK-NEXT: %D = trunc i64 %1 to i16
-; CHECK-NEXT: ret i16 %D
- %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 %A, 48
+; CHECK-NEXT: [[D:%.*]] = trunc i64 [[TMP1]] to i16
+; CHECK-NEXT: ret i16 [[D]]
+;
+ %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
%C = trunc i64 %B to i16
%D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
ret i16 %D
@@ -82,8 +90,9 @@ define i16 @test8(i64 %A) {
; Misc: Fold bswap(undef) to undef.
define i64 @foo() {
-; CHECK-LABEL: @foo
-; CHECK-NEXT: ret i64 undef
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: ret i64 undef
+;
%a = call i64 @llvm.bswap.i64(i64 undef)
ret i64 %a
}
@@ -92,20 +101,22 @@ define i64 @foo() {
; Fold: OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
; Fold: OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
define i16 @bs_and16i(i16 %a, i16 %b) #0 {
-; CHECK-LABEL: @bs_and16i
-; CHECK-NEXT: %1 = and i16 %a, 4391
-; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
-; CHECK-NEXT: ret i16 %2
+; CHECK-LABEL: @bs_and16i(
+; CHECK-NEXT: [[TMP1:%.*]] = and i16 %a, 4391
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
+; CHECK-NEXT: ret i16 [[TMP2]]
+;
%1 = tail call i16 @llvm.bswap.i16(i16 %a)
%2 = and i16 %1, 10001
ret i16 %2
}
define i16 @bs_and16(i16 %a, i16 %b) #0 {
-; CHECK-LABEL: @bs_and16
-; CHECK-NEXT: %1 = and i16 %a, %b
-; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
-; CHECK-NEXT: ret i16 %2
+; CHECK-LABEL: @bs_and16(
+; CHECK-NEXT: [[TMP1:%.*]] = and i16 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
+; CHECK-NEXT: ret i16 [[TMP2]]
+;
%tmp1 = tail call i16 @llvm.bswap.i16(i16 %a)
%tmp2 = tail call i16 @llvm.bswap.i16(i16 %b)
%tmp3 = and i16 %tmp1, %tmp2
@@ -113,10 +124,11 @@ define i16 @bs_and16(i16 %a, i16 %b) #0 {
}
define i16 @bs_or16(i16 %a, i16 %b) #0 {
-; CHECK-LABEL: @bs_or16
-; CHECK-NEXT: %1 = or i16 %a, %b
-; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
-; CHECK-NEXT: ret i16 %2
+; CHECK-LABEL: @bs_or16(
+; CHECK-NEXT: [[TMP1:%.*]] = or i16 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
+; CHECK-NEXT: ret i16 [[TMP2]]
+;
%tmp1 = tail call i16 @llvm.bswap.i16(i16 %a)
%tmp2 = tail call i16 @llvm.bswap.i16(i16 %b)
%tmp3 = or i16 %tmp1, %tmp2
@@ -124,10 +136,11 @@ define i16 @bs_or16(i16 %a, i16 %b) #0 {
}
define i16 @bs_xor16(i16 %a, i16 %b) #0 {
-; CHECK-LABEL: @bs_xor16
-; CHECK-NEXT: %1 = xor i16 %a, %b
-; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
-; CHECK-NEXT: ret i16 %2
+; CHECK-LABEL: @bs_xor16(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i16 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
+; CHECK-NEXT: ret i16 [[TMP2]]
+;
%tmp1 = tail call i16 @llvm.bswap.i16(i16 %a)
%tmp2 = tail call i16 @llvm.bswap.i16(i16 %b)
%tmp3 = xor i16 %tmp1, %tmp2
@@ -135,20 +148,22 @@ define i16 @bs_xor16(i16 %a, i16 %b) #0 {
}
define i32 @bs_and32i(i32 %a, i32 %b) #0 {
-; CHECK-LABEL: @bs_and32i
-; CHECK-NEXT: %1 = and i32 %a, -1585053440
-; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
-; CHECK-NEXT: ret i32 %2
+; CHECK-LABEL: @bs_and32i(
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 %a, -1585053440
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
%tmp2 = and i32 %tmp1, 100001
ret i32 %tmp2
}
define i32 @bs_and32(i32 %a, i32 %b) #0 {
-; CHECK-LABEL: @bs_and32
-; CHECK-NEXT: %1 = and i32 %a, %b
-; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
-; CHECK-NEXT: ret i32 %2
+; CHECK-LABEL: @bs_and32(
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
%tmp2 = tail call i32 @llvm.bswap.i32(i32 %b)
%tmp3 = and i32 %tmp1, %tmp2
@@ -156,10 +171,11 @@ define i32 @bs_and32(i32 %a, i32 %b) #0 {
}
define i32 @bs_or32(i32 %a, i32 %b) #0 {
-; CHECK-LABEL: @bs_or32
-; CHECK-NEXT: %1 = or i32 %a, %b
-; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
-; CHECK-NEXT: ret i32 %2
+; CHECK-LABEL: @bs_or32(
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
%tmp2 = tail call i32 @llvm.bswap.i32(i32 %b)
%tmp3 = or i32 %tmp1, %tmp2
@@ -167,10 +183,11 @@ define i32 @bs_or32(i32 %a, i32 %b) #0 {
}
define i32 @bs_xor32(i32 %a, i32 %b) #0 {
-; CHECK-LABEL: @bs_xor32
-; CHECK-NEXT: %1 = xor i32 %a, %b
-; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
-; CHECK-NEXT: ret i32 %2
+; CHECK-LABEL: @bs_xor32(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
%tmp2 = tail call i32 @llvm.bswap.i32(i32 %b)
%tmp3 = xor i32 %tmp1, %tmp2
@@ -178,20 +195,22 @@ define i32 @bs_xor32(i32 %a, i32 %b) #0 {
}
define i64 @bs_and64i(i64 %a, i64 %b) #0 {
-; CHECK-LABEL: @bs_and64i
-; CHECK-NEXT: %1 = and i64 %a, 129085117527228416
-; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
-; CHECK-NEXT: ret i64 %2
+; CHECK-LABEL: @bs_and64i(
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 %a, 129085117527228416
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
+; CHECK-NEXT: ret i64 [[TMP2]]
+;
%tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
%tmp2 = and i64 %tmp1, 1000000001
ret i64 %tmp2
}
define i64 @bs_and64(i64 %a, i64 %b) #0 {
-; CHECK-LABEL: @bs_and64
-; CHECK-NEXT: %1 = and i64 %a, %b
-; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
-; CHECK-NEXT: ret i64 %2
+; CHECK-LABEL: @bs_and64(
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
+; CHECK-NEXT: ret i64 [[TMP2]]
+;
%tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
%tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
%tmp3 = and i64 %tmp1, %tmp2
@@ -199,10 +218,11 @@ define i64 @bs_and64(i64 %a, i64 %b) #0 {
}
define i64 @bs_or64(i64 %a, i64 %b) #0 {
-; CHECK-LABEL: @bs_or64
-; CHECK-NEXT: %1 = or i64 %a, %b
-; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
-; CHECK-NEXT: ret i64 %2
+; CHECK-LABEL: @bs_or64(
+; CHECK-NEXT: [[TMP1:%.*]] = or i64 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
+; CHECK-NEXT: ret i64 [[TMP2]]
+;
%tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
%tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
%tmp3 = or i64 %tmp1, %tmp2
@@ -210,10 +230,11 @@ define i64 @bs_or64(i64 %a, i64 %b) #0 {
}
define i64 @bs_xor64(i64 %a, i64 %b) #0 {
-; CHECK-LABEL: @bs_xor64
-; CHECK-NEXT: %1 = xor i64 %a, %b
-; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
-; CHECK-NEXT: ret i64 %2
+; CHECK-LABEL: @bs_xor64(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i64 %a, %b
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
+; CHECK-NEXT: ret i64 [[TMP2]]
+;
%tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
%tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
%tmp3 = xor i64 %tmp1, %tmp2
diff --git a/test/Transforms/InstCombine/builtin-object-size-offset.ll b/test/Transforms/InstCombine/builtin-object-size-offset.ll
index 7ab24a9acd94..248cf644df89 100644
--- a/test/Transforms/InstCombine/builtin-object-size-offset.ll
+++ b/test/Transforms/InstCombine/builtin-object-size-offset.ll
@@ -26,25 +26,25 @@ entry:
%Big = alloca [20 x i8], align 16
%Small = alloca [10 x i8], align 1
%0 = getelementptr inbounds [20 x i8], [20 x i8]* %Big, i64 0, i64 0
- call void @llvm.lifetime.start(i64 20, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 20, i8* %0)
%1 = getelementptr inbounds [10 x i8], [10 x i8]* %Small, i64 0, i64 0
- call void @llvm.lifetime.start(i64 10, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 10, i8* %1)
%tobool = icmp ne i32 %N, 0
%add.ptr = getelementptr inbounds [20 x i8], [20 x i8]* %Big, i64 0, i64 10
%cond = select i1 %tobool, i8* %add.ptr, i8* %1
%2 = call i64 @llvm.objectsize.i64.p0i8(i8* %cond, i1 false)
%conv = trunc i64 %2 to i32
- call void @llvm.lifetime.end(i64 10, i8* %1)
- call void @llvm.lifetime.end(i64 20, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 10, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 20, i8* %0)
ret i32 %conv
; CHECK: ret i32 10
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define void @foo() {
entry:
diff --git a/test/Transforms/InstCombine/builtin-object-size-ptr.ll b/test/Transforms/InstCombine/builtin-object-size-ptr.ll
index b38513999dc1..ada3fc167026 100644
--- a/test/Transforms/InstCombine/builtin-object-size-ptr.ll
+++ b/test/Transforms/InstCombine/builtin-object-size-ptr.ll
@@ -16,19 +16,19 @@ define i32 @foo() #0 {
entry:
%var = alloca %struct.V, align 4
%0 = bitcast %struct.V* %var to i8*
- call void @llvm.lifetime.start(i64 28, i8* %0) #3
+ call void @llvm.lifetime.start.p0i8(i64 28, i8* %0) #3
%buf1 = getelementptr inbounds %struct.V, %struct.V* %var, i32 0, i32 0
%arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf1, i64 0, i64 1
%1 = call i64 @llvm.objectsize.i64.p0i8(i8* %arrayidx, i1 false)
%conv = trunc i64 %1 to i32
- call void @llvm.lifetime.end(i64 28, i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 28, i8* %0) #3
ret i32 %conv
; CHECK: ret i32 27
; CHECK-NOT: ret i32 -1
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #2
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
diff --git a/test/Transforms/InstCombine/call-guard.ll b/test/Transforms/InstCombine/call-guard.ll
new file mode 100644
index 000000000000..9664467f914b
--- /dev/null
+++ b/test/Transforms/InstCombine/call-guard.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define void @test_guard_adjacent_same_cond(i1 %A) {
+; CHECK-LABEL: @test_guard_adjacent_same_cond(
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %A) [ "deopt"() ]
+; CHECK-NEXT: ret void
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %A )[ "deopt"() ]
+ ret void
+}
+
+define void @test_guard_adjacent_diff_cond(i1 %A, i1 %B, i1 %C) {
+; CHECK-LABEL: @test_guard_adjacent_diff_cond(
+; CHECK-NEXT: %1 = and i1 %A, %B
+; CHECK-NEXT: %2 = and i1 %1, %C
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %2, i32 123) [ "deopt"() ]
+; CHECK-NEXT: ret void
+ call void(i1, ...) @llvm.experimental.guard( i1 %A, i32 123 )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %B, i32 456 )[ "deopt"() ]
+ call void(i1, ...) @llvm.experimental.guard( i1 %C, i32 789 )[ "deopt"() ]
+ ret void
+}
diff --git a/test/Transforms/InstCombine/call_nonnull_arg.ll b/test/Transforms/InstCombine/call_nonnull_arg.ll
index c502aa05731e..8127f4734fcd 100644
--- a/test/Transforms/InstCombine/call_nonnull_arg.ll
+++ b/test/Transforms/InstCombine/call_nonnull_arg.ll
@@ -31,7 +31,7 @@ dead:
unreachable
}
-; FIXME: The nonnull attribute in the 'bar' declaration could be
+; The nonnull attribute in the 'bar' declaration is
; propagated to the parameters of the 'baz' callsite.
declare void @bar(i8*, i8* nonnull)
@@ -40,7 +40,7 @@ declare void @baz(i8*, i8*)
define void @deduce_nonnull_from_another_call(i8* %a, i8* %b) {
; CHECK-LABEL: @deduce_nonnull_from_another_call(
; CHECK-NEXT: call void @bar(i8* %a, i8* %b)
-; CHECK-NEXT: call void @baz(i8* %b, i8* %b)
+; CHECK-NEXT: call void @baz(i8* nonnull %b, i8* nonnull %b)
; CHECK-NEXT: ret void
;
call void @bar(i8* %a, i8* %b)
diff --git a/test/Transforms/InstCombine/cast-call-combine-prof.ll b/test/Transforms/InstCombine/cast-call-combine-prof.ll
new file mode 100644
index 000000000000..05b71b666e24
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-call-combine-prof.ll
@@ -0,0 +1,53 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; Check that instcombine preserves !prof metadata when removing function
+; prototype casts.
+
+declare i32 @__gxx_personality_v0(...)
+declare void @__cxa_call_unexpected(i8*)
+declare void @foo(i16* %a)
+
+; CHECK-LABEL: @test_call()
+; CHECK: call void @foo(i16* null), !prof ![[PROF:[0-9]+]]
+define void @test_call() {
+ call void bitcast (void (i16*)* @foo to void (i8*)*) (i8* null), !prof !0
+ ret void
+}
+
+; CHECK-LABEL: @test_invoke()
+; CHECK: invoke void @foo(i16* null)
+; CHECK-NEXT: to label %done unwind label %lpad, !prof ![[PROF]]
+define void @test_invoke() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+ invoke void bitcast (void (i16*)* @foo to void (i8*)*) (i8* null)
+ to label %done unwind label %lpad, !prof !0
+
+done:
+ ret void
+
+lpad:
+ %lp = landingpad { i8*, i32 }
+ filter [0 x i8*] zeroinitializer
+ %ehptr = extractvalue { i8*, i32 } %lp, 0
+ tail call void @__cxa_call_unexpected(i8* %ehptr) noreturn nounwind
+ unreachable
+}
+
+; CHECK: ![[PROF]] = !{!"branch_weights", i32 2000}
+!0 = !{!"VP", i32 0, i64 2000, i64 -3913987384944532146, i64 2000}
+
+!llvm.module.flags = !{!1}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 1000}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 1000, i32 1}
+!13 = !{i32 999000, i64 1000, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
diff --git a/test/Transforms/InstCombine/compare-alloca.ll b/test/Transforms/InstCombine/compare-alloca.ll
index ca24da191779..414a07825f2f 100644
--- a/test/Transforms/InstCombine/compare-alloca.ll
+++ b/test/Transforms/InstCombine/compare-alloca.ll
@@ -72,15 +72,15 @@ define i1 @alloca_argument_compare_escaped_through_store(i64* %arg, i64** %ptr)
; CHECK: ret i1 %cmp
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define i1 @alloca_argument_compare_benign_instrs(i8* %arg) {
%alloc = alloca i8
- call void @llvm.lifetime.start(i64 1, i8* %alloc)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %alloc)
%cmp = icmp eq i8* %arg, %alloc
%x = load i8, i8* %arg
store i8 %x, i8* %alloc
- call void @llvm.lifetime.end(i64 1, i8* %alloc)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %alloc)
ret i1 %cmp
; CHECK-LABEL: alloca_argument_compare_benign_instrs
; CHECK: ret i1 false
diff --git a/test/Transforms/InstCombine/compare-unescaped.ll b/test/Transforms/InstCombine/compare-unescaped.ll
index 0e512aa28911..d15fc2fd4495 100644
--- a/test/Transforms/InstCombine/compare-unescaped.ll
+++ b/test/Transforms/InstCombine/compare-unescaped.ll
@@ -144,7 +144,7 @@ chk2:
ret i8* %n
; CHECK-LABEL: compare_ret_escape
; CHECK: %cmp = icmp eq i8* %n, %c
-; CHECK: %cmp2 = icmp eq i32* %bc, %lgp
+; CHECK: %cmp2 = icmp eq i32* %lgp, %bc
}
; The malloc call for %m cannot be elided since it is used in the call to function f.
diff --git a/test/Transforms/InstCombine/consecutive-fences.ll b/test/Transforms/InstCombine/consecutive-fences.ll
new file mode 100644
index 000000000000..6f1c41277386
--- /dev/null
+++ b/test/Transforms/InstCombine/consecutive-fences.ll
@@ -0,0 +1,47 @@
+; RUN: opt -instcombine -S %s | FileCheck %s
+
+; Make sure we collapse the fences in this case
+
+; CHECK-LABEL: define void @tinkywinky
+; CHECK-NEXT: fence seq_cst
+; CHECK-NEXT: fence singlethread acquire
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+
+define void @tinkywinky() {
+ fence seq_cst
+ fence seq_cst
+ fence seq_cst
+ fence singlethread acquire
+ fence singlethread acquire
+ fence singlethread acquire
+ ret void
+}
+
+; CHECK-LABEL: define void @dipsy
+; CHECK-NEXT: fence seq_cst
+; CHECK-NEXT: fence singlethread seq_cst
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+
+define void @dipsy() {
+ fence seq_cst
+ fence singlethread seq_cst
+ ret void
+}
+
+; CHECK-LABEL: define void @patatino
+; CHECK-NEXT: fence acquire
+; CHECK-NEXT: fence seq_cst
+; CHECK-NEXT: fence acquire
+; CHECK-NEXT: fence seq_cst
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+
+define void @patatino() {
+ fence acquire
+ fence seq_cst
+ fence acquire
+ fence seq_cst
+ ret void
+}
diff --git a/test/Transforms/InstCombine/constant-fold-math.ll b/test/Transforms/InstCombine/constant-fold-math.ll
index ce8d337c08bf..50cd6070896e 100644
--- a/test/Transforms/InstCombine/constant-fold-math.ll
+++ b/test/Transforms/InstCombine/constant-fold-math.ll
@@ -45,12 +45,4 @@ define double @constant_fold_fmuladd_f64() #0 {
ret double %x
}
-; The sqrt intrinsic is undefined for negative inputs besides -0.0.
-; CHECK-LABEL: @bad_sqrt
-; CHECK-NEXT: ret double undef
-define double @bad_sqrt() {
- %x = call double @llvm.sqrt.f64(double -2.000000e+00)
- ret double %x
-}
-
attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/convergent.ll b/test/Transforms/InstCombine/convergent.ll
index d4484cf4567e..9b9ae6f5352c 100644
--- a/test/Transforms/InstCombine/convergent.ll
+++ b/test/Transforms/InstCombine/convergent.ll
@@ -27,7 +27,7 @@ define i32 @no_extern() {
}
define i32 @indirect_call(i32 ()* %f) {
- ; CHECK call i32 %f() [[CONVERGENT_ATTR]]
+ ; CHECK: call i32 %f() [[CONVERGENT_ATTR]]
%a = call i32 %f() convergent
ret i32 %a
}
diff --git a/test/Transforms/InstCombine/deadcode.ll b/test/Transforms/InstCombine/deadcode.ll
index 8fe673d8c9c0..c5fa58babdbc 100644
--- a/test/Transforms/InstCombine/deadcode.ll
+++ b/test/Transforms/InstCombine/deadcode.ll
@@ -22,12 +22,12 @@ define i32* @test2(i32 %width) {
declare i8* @llvm.stacksave()
-declare void @llvm.lifetime.start(i64, i8*)
-declare void @llvm.lifetime.end(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
define void @test3() {
- call void @llvm.lifetime.start(i64 -1, i8* undef)
- call void @llvm.lifetime.end(i64 -1, i8* undef)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* undef)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* undef)
ret void
}
diff --git a/test/Transforms/InstCombine/debuginfo-dce.ll b/test/Transforms/InstCombine/debuginfo-dce.ll
new file mode 100644
index 000000000000..e23aef7334d5
--- /dev/null
+++ b/test/Transforms/InstCombine/debuginfo-dce.ll
@@ -0,0 +1,106 @@
+; RUN: opt -instcombine %s -S -o - | FileCheck %s
+; Verify that the eliminated instructions (bitcast, gep, load) are salvaged into
+; a DIExpression.
+;
+; Originally created from the following C source and then heavily isolated/reduced.
+;
+; struct entry {
+; struct entry *next;
+; };
+; void scan(struct entry *queue, struct entry *end)
+; {
+; struct entry *entry;
+; for (entry = (struct entry *)((char *)(queue->next) - 8);
+; &entry->next == end;
+; entry = (struct entry *)((char *)(entry->next) - 8)) {
+; }
+; }
+
+; ModuleID = '<stdin>'
+source_filename = "test.c"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+%struct.entry = type { %struct.entry* }
+
+; Function Attrs: nounwind ssp uwtable
+define void @salvage_load(%struct.entry** %queue) local_unnamed_addr #0 !dbg !14 {
+entry:
+ %im_not_dead = alloca %struct.entry*
+ %0 = load %struct.entry*, %struct.entry** %queue, align 8, !dbg !19
+ %1 = load %struct.entry*, %struct.entry** %queue, align 8, !dbg !19
+ call void @llvm.dbg.value(metadata %struct.entry* %1, i64 0, metadata !18, metadata !20), !dbg !19
+; CHECK: define void @salvage_load
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry** %queue, i64 0,
+; CHECK-SAME: metadata ![[LOAD_EXPR:[0-9]+]])
+ store %struct.entry* %1, %struct.entry** %im_not_dead, align 8
+ ret void, !dbg !21
+}
+
+; Function Attrs: nounwind ssp uwtable
+define void @salvage_bitcast(%struct.entry* %queue) local_unnamed_addr #0 !dbg !14 {
+entry:
+ %im_not_dead = alloca i8*
+ %0 = bitcast %struct.entry* %queue to i8*, !dbg !19
+ %1 = bitcast %struct.entry* %queue to i8*, !dbg !19
+ call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !18, metadata !20), !dbg !19
+; CHECK: define void @salvage_bitcast
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0,
+; CHECK-SAME: metadata ![[BITCAST_EXPR:[0-9]+]])
+ store i8* %1, i8** %im_not_dead, align 8
+ ret void, !dbg !21
+}
+
+; Function Attrs: nounwind ssp uwtable
+define void @salvage_gep(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 {
+entry:
+ %im_not_dead = alloca %struct.entry**
+ %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
+ %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
+ call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !20), !dbg !19
+; CHECK: define void @salvage_gep
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0,
+; CHECK-SAME: metadata ![[GEP_EXPR:[0-9]+]])
+ store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8
+ ret void, !dbg !21
+}
+
+; CHECK: ![[LOAD_EXPR]] = !DIExpression(DW_OP_deref, DW_OP_plus, 0)
+; CHECK: ![[BITCAST_EXPR]] = !DIExpression(DW_OP_plus, 0)
+; CHECK: ![[GEP_EXPR]] = !DIExpression(DW_OP_minus, 8, DW_OP_plus, 0)
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11, !12}
+!llvm.ident = !{!13}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (trunk 297628) (llvm/trunk 297643)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3)
+!1 = !DIFile(filename: "test.c", directory: "/")
+!2 = !{}
+!3 = !{!4, !8}
+!4 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !5, size: 64)
+!5 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "entry", file: !1, line: 1, size: 64, elements: !6)
+!6 = !{!7}
+!7 = !DIDerivedType(tag: DW_TAG_member, name: "next", scope: !5, file: !1, line: 2, baseType: !4, size: 64)
+!8 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !9, size: 64)
+!9 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char)
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i32 1, !"PIC Level", i32 2}
+!13 = !{!"clang version 5.0.0 (trunk 297628) (llvm/trunk 297643)"}
+!14 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17)
+!15 = !DISubroutineType(types: !16)
+!16 = !{null, !4, !4}
+!17 = !{!18}
+!18 = !DILocalVariable(name: "entry", scope: !14, file: !1, line: 6, type: !4)
+!19 = !DILocation(line: 6, column: 17, scope: !14)
+!20 = !DIExpression(DW_OP_plus, 0)
+!21 = !DILocation(line: 11, column: 1, scope: !14)
diff --git a/test/Transforms/InstCombine/double-float-shrink-2.ll b/test/Transforms/InstCombine/double-float-shrink-2.ll
index 7f6df92c96c5..4813614f26cb 100644
--- a/test/Transforms/InstCombine/double-float-shrink-2.ll
+++ b/test/Transforms/InstCombine/double-float-shrink-2.ll
@@ -1,28 +1,9 @@
-; RUN: opt < %s -instcombine -S -mtriple "i386-pc-linux" | FileCheck -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "i386-pc-win32" | FileCheck -check-prefix=DONT-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" | FileCheck -check-prefix=C89-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "i386-pc-mingw32" | FileCheck -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck -check-prefix=DO-SIMPLIFY %s
-; RUN: opt < %s -instcombine -S -mtriple "sparc-sun-solaris" | FileCheck -check-prefix=DO-SIMPLIFY %s
-
-; DO-SIMPLIFY: call float @floorf(
-; DO-SIMPLIFY: call float @ceilf(
-; DO-SIMPLIFY: call float @roundf(
-; DO-SIMPLIFY: call float @nearbyintf(
-; DO-SIMPLIFY: call float @truncf(
-; DO-SIMPLIFY: call float @fabsf(
-
-; C89-SIMPLIFY: call float @floorf(
-; C89-SIMPLIFY: call float @ceilf(
-; C89-SIMPLIFY: call double @round(
-; C89-SIMPLIFY: call double @nearbyint(
-
-; DONT-SIMPLIFY: call double @floor(
-; DONT-SIMPLIFY: call double @ceil(
-; DONT-SIMPLIFY: call double @round(
-; DONT-SIMPLIFY: call double @nearbyint(
-; DONT-SIMPLIFY: call double @trunc(
-; DONT-SIMPLIFY: call double @fabs(
+; RUN: opt < %s -instcombine -S -mtriple "i386-pc-linux" | FileCheck -check-prefix=DO-SIMPLIFY -check-prefix=ALL %s
+; RUN: opt < %s -instcombine -S -mtriple "i386-pc-win32" | FileCheck -check-prefix=DONT-SIMPLIFY -check-prefix=ALL %s
+; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-win32" | FileCheck -check-prefix=C89-SIMPLIFY -check-prefix=ALL %s
+; RUN: opt < %s -instcombine -S -mtriple "i386-pc-mingw32" | FileCheck -check-prefix=DO-SIMPLIFY -check-prefix=ALL %s
+; RUN: opt < %s -instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck -check-prefix=DO-SIMPLIFY -check-prefix=ALL %s
+; RUN: opt < %s -instcombine -S -mtriple "sparc-sun-solaris" | FileCheck -check-prefix=DO-SIMPLIFY -check-prefix=ALL %s
declare double @floor(double)
declare double @ceil(double)
@@ -31,7 +12,18 @@ declare double @nearbyint(double)
declare double @trunc(double)
declare double @fabs(double)
-define float @test_floor(float %C) {
+declare double @llvm.floor.f64(double)
+declare double @llvm.ceil.f64(double)
+declare double @llvm.round.f64(double)
+declare double @llvm.nearbyint.f64(double)
+declare double @llvm.trunc.f64(double)
+declare double @llvm.fabs.f64(double)
+
+; ALL-LABEL: @test_shrink_libcall_floor(
+; DO-SIMPLIFY: call float @llvm.floor.f32(
+; C89-SIMPLIFY: call float @llvm.floor.f32(
+; DONT-SIMPLIFY: call float @llvm.floor.f32(
+define float @test_shrink_libcall_floor(float %C) {
%D = fpext float %C to double
; --> floorf
%E = call double @floor(double %D)
@@ -39,7 +31,11 @@ define float @test_floor(float %C) {
ret float %F
}
-define float @test_ceil(float %C) {
+; ALL-LABEL: @test_shrink_libcall_ceil(
+; DO-SIMPLIFY: call float @llvm.ceil.f32(
+; C89-SIMPLIFY: call float @llvm.ceil.f32(
+; DONT-SIMPLIFY: call float @llvm.ceil.f32(
+define float @test_shrink_libcall_ceil(float %C) {
%D = fpext float %C to double
; --> ceilf
%E = call double @ceil(double %D)
@@ -47,7 +43,11 @@ define float @test_ceil(float %C) {
ret float %F
}
-define float @test_round(float %C) {
+; ALL-LABEL: @test_shrink_libcall_round(
+; DO-SIMPLIFY: call float @llvm.round.f32(
+; C89-SIMPLIFY: call double @round(
+; DONT-SIMPLIFY: call double @round(
+define float @test_shrink_libcall_round(float %C) {
%D = fpext float %C to double
; --> roundf
%E = call double @round(double %D)
@@ -55,7 +55,11 @@ define float @test_round(float %C) {
ret float %F
}
-define float @test_nearbyint(float %C) {
+; ALL-LABEL: @test_shrink_libcall_nearbyint(
+; DO-SIMPLIFY: call float @llvm.nearbyint.f32(
+; C89-SIMPLIFY: call double @nearbyint(
+; DONT-SIMPLIFY: call double @nearbyint(
+define float @test_shrink_libcall_nearbyint(float %C) {
%D = fpext float %C to double
; --> nearbyintf
%E = call double @nearbyint(double %D)
@@ -63,7 +67,10 @@ define float @test_nearbyint(float %C) {
ret float %F
}
-define float @test_trunc(float %C) {
+; ALL-LABEL: @test_shrink_libcall_trunc(
+; DO-SIMPLIFY: call float @llvm.trunc.f32(
+; DONT-SIMPLIFY: call double @trunc(
+define float @test_shrink_libcall_trunc(float %C) {
%D = fpext float %C to double
; --> truncf
%E = call double @trunc(double %D)
@@ -71,10 +78,386 @@ define float @test_trunc(float %C) {
ret float %F
}
-define float @test_fabs(float %C) {
+; ALL-LABEL: @test_shrink_libcall_fabs(
+; DO-SIMPLIFY: call float @llvm.fabs.f32(
+
+; This is replaced with the intrinsic, which does the right thing on
+; all platforms.
+; DONT-SIMPLIFY: call float @llvm.fabs.f32(
+define float @test_shrink_libcall_fabs(float %C) {
%D = fpext float %C to double
; --> fabsf
%E = call double @fabs(double %D)
%F = fptrunc double %E to float
ret float %F
}
+
+; Make sure fast math flags are preserved
+; ALL-LABEL: @test_shrink_libcall_fabs_fast(
+; DO-SIMPLIFY: call fast float @llvm.fabs.f32(
+define float @test_shrink_libcall_fabs_fast(float %C) {
+ %D = fpext float %C to double
+ ; --> fabsf
+ %E = call fast double @fabs(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_floor(
+; ALL: call float @llvm.floor.f32(
+define float @test_shrink_intrin_floor(float %C) {
+ %D = fpext float %C to double
+ ; --> floorf
+ %E = call double @llvm.floor.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_ceil(
+; ALL: call float @llvm.ceil.f32(
+define float @test_shrink_intrin_ceil(float %C) {
+ %D = fpext float %C to double
+ ; --> ceilf
+ %E = call double @llvm.ceil.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_round(
+; ALL: call float @llvm.round.f32(
+define float @test_shrink_intrin_round(float %C) {
+ %D = fpext float %C to double
+ ; --> roundf
+ %E = call double @llvm.round.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_nearbyint(
+; ALL: call float @llvm.nearbyint.f32(
+define float @test_shrink_intrin_nearbyint(float %C) {
+ %D = fpext float %C to double
+ ; --> nearbyintf
+ %E = call double @llvm.nearbyint.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_trunc(
+; ALL-SIMPLIFY: call float @llvm.trunc.f32(
+define float @test_shrink_intrin_trunc(float %C) {
+ %D = fpext float %C to double
+ %E = call double @llvm.trunc.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_fabs(
+; ALL: call float @llvm.fabs.f32(
+define float @test_shrink_intrin_fabs(float %C) {
+ %D = fpext float %C to double
+ %E = call double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; Make sure fast math flags are preserved
+; ALL-LABEL: @test_shrink_intrin_fabs_fast(
+; ALL: call fast float @llvm.fabs.f32(
+define float @test_shrink_intrin_fabs_fast(float %C) {
+ %D = fpext float %C to double
+ %E = call fast double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_floor(
+; ALL: call double @llvm.floor.f64(
+define float @test_no_shrink_intrin_floor(double %D) {
+ %E = call double @llvm.floor.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_ceil(
+; ALL: call double @llvm.ceil.f64(
+define float @test_no_shrink_intrin_ceil(double %D) {
+ %E = call double @llvm.ceil.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_round(
+; ALL: call double @llvm.round.f64(
+define float @test_no_shrink_intrin_round(double %D) {
+ %E = call double @llvm.round.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_nearbyint(
+; ALL: call double @llvm.nearbyint.f64(
+define float @test_no_shrink_intrin_nearbyint(double %D) {
+ %E = call double @llvm.nearbyint.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_trunc(
+; ALL-SIMPLIFY: call double @llvm.trunc.f64(
+define float @test_no_shrink_intrin_trunc(double %D) {
+ %E = call double @llvm.trunc.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_fabs_double_src(
+; ALL: call float @llvm.fabs.f32(
+define float @test_shrink_intrin_fabs_double_src(double %D) {
+ %E = call double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; Make sure fast math flags are preserved
+; ALL-LABEL: @test_shrink_intrin_fabs_fast_double_src(
+; ALL: call fast float @llvm.fabs.f32(
+define float @test_shrink_intrin_fabs_fast_double_src(double %D) {
+ %E = call fast double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
+; ALL: ret float 2.000000e+00
+define float @test_shrink_float_convertible_constant_intrin_floor() {
+ %E = call double @llvm.floor.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
+; ALL: ret float 3.000000e+00
+define float @test_shrink_float_convertible_constant_intrin_ceil() {
+ %E = call double @llvm.ceil.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_round(
+; ALL: ret float 2.000000e+00
+define float @test_shrink_float_convertible_constant_intrin_round() {
+ %E = call double @llvm.round.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
+; ALL: ret float 2.000000e+00
+define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
+ %E = call double @llvm.nearbyint.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
+; ALL: ret float 2.000000e+00
+define float @test_shrink_float_convertible_constant_intrin_trunc() {
+ %E = call double @llvm.trunc.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
+; ALL: ret float 0x4000CCCCC0000000
+define float @test_shrink_float_convertible_constant_intrin_fabs() {
+ %E = call double @llvm.fabs.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; Make sure fast math flags are preserved
+; ALL-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
+; ALL: ret float 0x4000CCCCC0000000
+define float @test_shrink_float_convertible_constant_intrin_fabs_fast() {
+ %E = call fast double @llvm.fabs.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
+; ALL-NEXT: %E = call double @llvm.floor.f64(double %D)
+; ALL-NEXT: %F = fptrunc double %E to half
+; ALL-NEXT: ret half %F
+define half @test_no_shrink_mismatched_type_intrin_floor(double %D) {
+ %E = call double @llvm.floor.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
+; ALL-NEXT: %E = call double @llvm.ceil.f64(double %D)
+; ALL-NEXT: %F = fptrunc double %E to half
+; ALL-NEXT: ret half %F
+define half @test_no_shrink_mismatched_type_intrin_ceil(double %D) {
+ %E = call double @llvm.ceil.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_round(
+; ALL-NEXT: %E = call double @llvm.round.f64(double %D)
+; ALL-NEXT: %F = fptrunc double %E to half
+; ALL-NEXT: ret half %F
+define half @test_no_shrink_mismatched_type_intrin_round(double %D) {
+ %E = call double @llvm.round.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
+; ALL-NEXT: %E = call double @llvm.nearbyint.f64(double %D)
+; ALL-NEXT: %F = fptrunc double %E to half
+; ALL-NEXT: ret half %F
+define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
+ %E = call double @llvm.nearbyint.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; ALL-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
+; ALL-NEXT: %E = call double @llvm.trunc.f64(double %D)
+; ALL-NEXT: %F = fptrunc double %E to half
+; ALL-NEXT: ret half %F
+define half @test_no_shrink_mismatched_type_intrin_trunc(double %D) {
+ %E = call double @llvm.trunc.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; ALL-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
+; ALL-NEXT: %1 = fptrunc double %D to half
+; ALL-NEXT: %F = call half @llvm.fabs.f16(half %1)
+; ALL-NEXT: ret half %F
+define half @test_shrink_mismatched_type_intrin_fabs_double_src(double %D) {
+ %E = call double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; Make sure fast math flags are preserved
+; ALL-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
+; ALL-NEXT: %1 = fptrunc double %D to half
+; ALL-NEXT: %F = call fast half @llvm.fabs.f16(half %1)
+; ALL-NEXT: ret half %F
+define half @test_mismatched_type_intrin_fabs_fast_double_src(double %D) {
+ %E = call fast double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_floor_fp16_src(
+; ALL-NEXT: %E = call half @llvm.floor.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+define float @test_shrink_intrin_floor_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call double @llvm.floor.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_ceil_fp16_src(
+; ALL-NEXT: %E = call half @llvm.ceil.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+; ALL-NEXT: ret float %F
+define float @test_shrink_intrin_ceil_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call double @llvm.ceil.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_round_fp16_src(
+; ALL-NEXT: %E = call half @llvm.round.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+; ALL-NEXT: ret float %F
+define float @test_shrink_intrin_round_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call double @llvm.round.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
+; ALL-NEXT: %E = call half @llvm.nearbyint.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+; ALL-NEXT: ret float %F
+define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call double @llvm.nearbyint.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_trunc_fp16_src(
+; ALL-NEXT: %E = call half @llvm.trunc.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+; ALL-NEXT: ret float %F
+define float @test_shrink_intrin_trunc_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call double @llvm.trunc.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_shrink_intrin_fabs_fp16_src(
+; ALL-NEXT: %E = call half @llvm.fabs.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+; ALL-NEXT: ret float %F
+define float @test_shrink_intrin_fabs_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; Make sure fast math flags are preserved
+; ALL-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
+; ALL-NEXT: %E = call fast half @llvm.fabs.f16(half %C)
+; ALL-NEXT: %1 = fpext half %E to double
+; ALL-NEXT: %F = fptrunc double %1 to float
+; ALL-NEXT: ret float %F
+define float @test_shrink_intrin_fabs_fast_fp16_src(half %C) {
+ %D = fpext half %C to double
+ %E = call fast double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
+; ALL: %D = fpext half %C to double
+; ALL: call double @llvm.floor.f64
+define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
+ %D = fpext half %C to double
+ store volatile double %D, double* undef
+ %E = call double @llvm.floor.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
+; ALL-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
+; ALL: %D = fpext half %C to double
+; ALL: call double @llvm.fabs.f64
+define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
+ %D = fpext half %C to double
+ store volatile double %D, double* undef
+ %E = call double @llvm.fabs.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
diff --git a/test/Transforms/InstCombine/element-atomic-memcpy-to-loads.ll b/test/Transforms/InstCombine/element-atomic-memcpy-to-loads.ll
new file mode 100644
index 000000000000..107440f10a5a
--- /dev/null
+++ b/test/Transforms/InstCombine/element-atomic-memcpy-to-loads.ll
@@ -0,0 +1,92 @@
+; RUN: opt -instcombine -unfold-element-atomic-memcpy-max-elements=8 -S < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Test basic unfolding
+define void @test1(i8* %Src, i8* %Dst) {
+; CHECK-LABEL: test1
+; CHECK-NOT: llvm.memcpy.element.atomic
+
+; CHECK-DAG: %memcpy_unfold.src_casted = bitcast i8* %Src to i32*
+; CHECK-DAG: %memcpy_unfold.dst_casted = bitcast i8* %Dst to i32*
+
+; CHECK-DAG: [[VAL1:%[^\s]+]] = load atomic i32, i32* %memcpy_unfold.src_casted unordered, align 4
+; CHECK-DAG: store atomic i32 [[VAL1]], i32* %memcpy_unfold.dst_casted unordered, align 8
+
+; CHECK-DAG: [[VAL2:%[^\s]+]] = load atomic i32, i32* %{{[^\s]+}} unordered, align 4
+; CHECK-DAG: store atomic i32 [[VAL2]], i32* %{{[^\s]+}} unordered, align 4
+
+; CHECK-DAG: [[VAL3:%[^\s]+]] = load atomic i32, i32* %{{[^\s]+}} unordered, align 4
+; CHECK-DAG: store atomic i32 [[VAL3]], i32* %{{[^\s]+}} unordered, align 4
+
+; CHECK-DAG: [[VAL4:%[^\s]+]] = load atomic i32, i32* %{{[^\s]+}} unordered, align 4
+; CHECK-DAG: store atomic i32 [[VAL4]], i32* %{{[^\s]+}} unordered, align 4
+entry:
+ call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 4 %Dst, i8* align 8 %Src, i64 4, i32 4)
+ ret void
+}
+
+; Test that we don't unfold too much
+define void @test2(i8* %Src, i8* %Dst) {
+; CHECK-LABEL: test2
+
+; CHECK-NOT: load
+; CHECK-NOT: store
+; CHECK: llvm.memcpy.element.atomic
+entry:
+ call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 4 %Dst, i8* align 4 %Src, i64 1000, i32 4)
+ ret void
+}
+
+; Test that we will not unfold into non native integers
+define void @test3(i8* %Src, i8* %Dst) {
+; CHECK-LABEL: test3
+
+; CHECK-NOT: load
+; CHECK-NOT: store
+; CHECK: llvm.memcpy.element.atomic
+entry:
+ call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 64 %Dst, i8* align 64 %Src, i64 4, i32 64)
+ ret void
+}
+
+; Test that we will eliminate redundant bitcasts
+define void @test4(i64* %Src, i64* %Dst) {
+; CHECK-LABEL: test4
+; CHECK-NOT: llvm.memcpy.element.atomic
+
+; CHECK-NOT: bitcast
+
+; CHECK-DAG: [[VAL1:%[^\s]+]] = load atomic i64, i64* %Src unordered, align 16
+; CHECK-DAG: store atomic i64 [[VAL1]], i64* %Dst unordered, align 16
+
+; CHECK-DAG: [[SRC_ADDR2:%[^ ]+]] = getelementptr i64, i64* %Src, i64 1
+; CHECK-DAG: [[DST_ADDR2:%[^ ]+]] = getelementptr i64, i64* %Dst, i64 1
+; CHECK-DAG: [[VAL2:%[^\s]+]] = load atomic i64, i64* [[SRC_ADDR2]] unordered, align 8
+; CHECK-DAG: store atomic i64 [[VAL2]], i64* [[DST_ADDR2]] unordered, align 8
+
+; CHECK-DAG: [[SRC_ADDR3:%[^ ]+]] = getelementptr i64, i64* %Src, i64 2
+; CHECK-DAG: [[DST_ADDR3:%[^ ]+]] = getelementptr i64, i64* %Dst, i64 2
+; CHECK-DAG: [[VAL3:%[^ ]+]] = load atomic i64, i64* [[SRC_ADDR3]] unordered, align 8
+; CHECK-DAG: store atomic i64 [[VAL3]], i64* [[DST_ADDR3]] unordered, align 8
+
+; CHECK-DAG: [[SRC_ADDR4:%[^ ]+]] = getelementptr i64, i64* %Src, i64 3
+; CHECK-DAG: [[DST_ADDR4:%[^ ]+]] = getelementptr i64, i64* %Dst, i64 3
+; CHECK-DAG: [[VAL4:%[^ ]+]] = load atomic i64, i64* [[SRC_ADDR4]] unordered, align 8
+; CHECK-DAG: store atomic i64 [[VAL4]], i64* [[DST_ADDR4]] unordered, align 8
+entry:
+ %Src.casted = bitcast i64* %Src to i8*
+ %Dst.casted = bitcast i64* %Dst to i8*
+ call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 16 %Dst.casted, i8* align 16 %Src.casted, i64 4, i32 8)
+ ret void
+}
+
+define void @test5(i8* %Src, i8* %Dst) {
+; CHECK-LABEL: test5
+
+; CHECK-NOT: llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 64 %Dst, i8* align 64 %Src, i64 0, i32 64)
+entry:
+ call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 64 %Dst, i8* align 64 %Src, i64 0, i32 64)
+ ret void
+}
+
+declare void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* nocapture, i8* nocapture, i64, i32)
diff --git a/test/Transforms/InstCombine/exact.ll b/test/Transforms/InstCombine/exact.ll
index 436d5081c7aa..96b6fd689964 100644
--- a/test/Transforms/InstCombine/exact.ll
+++ b/test/Transforms/InstCombine/exact.ll
@@ -99,12 +99,12 @@ define i64 @ashr1(i64 %X) {
ret i64 %B
}
-; FIXME: The ashr should be exact (like it is in the preceding test).
+; The vector ashr should be exact (like it is in the preceding test).
define <2 x i64> @ashr1_vec(<2 x i64> %X) {
; CHECK-LABEL: @ashr1_vec(
; CHECK-NEXT: [[A:%.*]] = shl <2 x i64> %X, <i64 8, i64 8>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[A]], <i64 2, i64 2>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[A]], <i64 2, i64 2>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%A = shl <2 x i64> %X, <i64 8, i64 8>
diff --git a/test/Transforms/InstCombine/fabs-libcall.ll b/test/Transforms/InstCombine/fabs-libcall.ll
new file mode 100644
index 000000000000..5733badfa8f9
--- /dev/null
+++ b/test/Transforms/InstCombine/fabs-libcall.ll
@@ -0,0 +1,21 @@
+; RUN: opt -S -mtriple=i686-apple-macosx -instcombine %s | FileCheck %s
+
+declare x86_fp80 @fabsl(x86_fp80)
+
+; CHECK-LABEL: @replace_fabs_call_f80(
+; CHECK-NEXT: %fabsl = call x86_fp80 @llvm.fabs.f80(x86_fp80 %x)
+; CHECK-NEXT: ret x86_fp80 %fabsl
+define x86_fp80 @replace_fabs_call_f80(x86_fp80 %x) {
+ %fabsl = tail call x86_fp80 @fabsl(x86_fp80 %x)
+ ret x86_fp80 %fabsl
+
+}
+
+; CHECK-LABEL: @fmf_replace_fabs_call_f80(
+; CHECK-NEXT: %fabsl = call nnan x86_fp80 @llvm.fabs.f80(x86_fp80 %x)
+; CHECK-NEXT: ret x86_fp80 %fabsl
+define x86_fp80 @fmf_replace_fabs_call_f80(x86_fp80 %x) {
+ %fabsl = tail call nnan x86_fp80 @fabsl(x86_fp80 %x)
+ ret x86_fp80 %fabsl
+}
+
diff --git a/test/Transforms/InstCombine/fabs.ll b/test/Transforms/InstCombine/fabs.ll
index aee853ae9eeb..a95f7b306b55 100644
--- a/test/Transforms/InstCombine/fabs.ll
+++ b/test/Transforms/InstCombine/fabs.ll
@@ -1,6 +1,10 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu < %s -instcombine -S | FileCheck %s
-; Make sure all library calls are eliminated when the input is known positive.
+; Make sure libcalls are replaced with intrinsic calls.
+
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
+declare fp128 @llvm.fabs.f128(fp128)
declare float @fabsf(float)
declare double @fabs(double)
@@ -8,46 +12,46 @@ declare fp128 @fabsl(fp128)
declare float @llvm.fma.f32(float, float, float)
declare float @llvm.fmuladd.f32(float, float, float)
-define float @square_fabs_call_f32(float %x) {
- %mul = fmul float %x, %x
- %fabsf = tail call float @fabsf(float %mul)
+define float @replace_fabs_call_f32(float %x) {
+ %fabsf = tail call float @fabsf(float %x)
ret float %fabsf
-; CHECK-LABEL: square_fabs_call_f32(
-; CHECK-NEXT: %mul = fmul float %x, %x
-; CHECK-NEXT: %fabsf = tail call float @fabsf(float %mul)
+; CHECK-LABEL: @replace_fabs_call_f32(
+; CHECK-NEXT: %fabsf = call float @llvm.fabs.f32(float %x)
; CHECK-NEXT: ret float %fabsf
}
-define double @square_fabs_call_f64(double %x) {
- %mul = fmul double %x, %x
- %fabs = tail call double @fabs(double %mul)
+define double @replace_fabs_call_f64(double %x) {
+ %fabs = tail call double @fabs(double %x)
ret double %fabs
-; CHECK-LABEL: square_fabs_call_f64(
-; CHECK-NEXT: %mul = fmul double %x, %x
-; CHECK-NEXT: %fabs = tail call double @fabs(double %mul)
+; CHECK-LABEL: @replace_fabs_call_f64(
+; CHECK-NEXT: %fabs = call double @llvm.fabs.f64(double %x)
; CHECK-NEXT: ret double %fabs
}
-define fp128 @square_fabs_call_f128(fp128 %x) {
- %mul = fmul fp128 %x, %x
- %fabsl = tail call fp128 @fabsl(fp128 %mul)
+define fp128 @replace_fabs_call_f128(fp128 %x) {
+ %fabsl = tail call fp128 @fabsl(fp128 %x)
ret fp128 %fabsl
-; CHECK-LABEL: square_fabs_call_f128(
-; CHECK-NEXT: %mul = fmul fp128 %x, %x
-; CHECK-NEXT: %fabsl = tail call fp128 @fabsl(fp128 %mul)
+; CHECK-LABEL: replace_fabs_call_f128(
+; CHECK-NEXT: %fabsl = call fp128 @llvm.fabs.f128(fp128 %x)
; CHECK-NEXT: ret fp128 %fabsl
}
+; Make sure fast math flags are preserved when replacing the libcall.
+define float @fmf_replace_fabs_call_f32(float %x) {
+ %fabsf = tail call nnan float @fabsf(float %x)
+ ret float %fabsf
+
+; CHECK-LABEL: @fmf_replace_fabs_call_f32(
+; CHECK-NEXT: %fabsf = call nnan float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: ret float %fabsf
+}
+
; Make sure all intrinsic calls are eliminated when the input is known
; positive.
-declare float @llvm.fabs.f32(float)
-declare double @llvm.fabs.f64(double)
-declare fp128 @llvm.fabs.f128(fp128)
-
; The fabs cannot be eliminated because %x may be a NaN
define float @square_fabs_intrinsic_f32(float %x) {
%mul = fmul float %x, %x
@@ -102,10 +106,8 @@ define float @square_fabs_shrink_call1(float %x) {
ret float %trunc
; CHECK-LABEL: square_fabs_shrink_call1(
-; CHECK-NEXT: %ext = fpext float %x to double
-; CHECK-NEXT: %sq = fmul double %ext, %ext
-; CHECK-NEXT: call double @fabs(double %sq)
-; CHECK-NEXT: %trunc = fptrunc double %fabs to float
+; CHECK-NEXT: fmul float %x, %x
+; CHECK-NEXT: %trunc = call float @llvm.fabs.f32(float
; CHECK-NEXT: ret float %trunc
}
@@ -118,8 +120,8 @@ define float @square_fabs_shrink_call2(float %x) {
; CHECK-LABEL: square_fabs_shrink_call2(
; CHECK-NEXT: %sq = fmul float %x, %x
-; CHECK-NEXT: %fabsf = call float @fabsf(float %sq)
-; CHECK-NEXT: ret float %fabsf
+; CHECK-NEXT: %trunc = call float @llvm.fabs.f32(float %sq)
+; CHECK-NEXT: ret float %trunc
}
; CHECK-LABEL: @fabs_select_constant_negative_positive(
@@ -214,3 +216,16 @@ define float @square_nnan_fmuladd_fabs_intrinsic_f32(float %x) {
; CHECK-NEXT: %fmuladd = call nnan float @llvm.fmuladd.f32(float %x, float %x, float 1.000000e+00)
; CHECK-NEXT: ret float %fmuladd
}
+
+; Don't introduce a second fpext
+; CHECK-LABEL: @multi_use_fabs_fpext(
+; CHECK: %fpext = fpext float %x to double
+; CHECK-NEXT: %fabs = call double @llvm.fabs.f64(double %fpext)
+; CHECK-NEXT: store volatile double %fpext, double* undef, align 8
+; CHECK-NEXT: ret double %fabs
+define double @multi_use_fabs_fpext(float %x) {
+ %fpext = fpext float %x to double
+ %fabs = call double @llvm.fabs.f64(double %fpext)
+ store volatile double %fpext, double* undef
+ ret double %fabs
+}
diff --git a/test/Transforms/InstCombine/fast-math.ll b/test/Transforms/InstCombine/fast-math.ll
index ad8a9247e4e1..6ddf3a58529f 100644
--- a/test/Transforms/InstCombine/fast-math.ll
+++ b/test/Transforms/InstCombine/fast-math.ll
@@ -831,3 +831,26 @@ define fp128 @min4(fp128 %a, fp128 %b) {
; CHECK-NEXT: select {{.*}} fp128 %a, fp128 %b
; CHECK-NEXT: ret
}
+
+define float @test55(i1 %which, float %a) {
+; CHECK-LABEL: @test55(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: [[PHITMP:%.*]] = fadd fast float [[A:%.*]], 1.000000e+00
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi float [ 3.000000e+00, [[ENTRY:%.*]] ], [ [[PHITMP]], [[DELAY]] ]
+; CHECK-NEXT: ret float [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi float [ 2.0, %entry ], [ %a, %delay ]
+ %value = fadd fast float %A, 1.0
+ ret float %value
+}
diff --git a/test/Transforms/InstCombine/fcmp.ll b/test/Transforms/InstCombine/fcmp.ll
index 7fd46f228183..40f7bf9b64fa 100644
--- a/test/Transforms/InstCombine/fcmp.ll
+++ b/test/Transforms/InstCombine/fcmp.ll
@@ -3,238 +3,291 @@
declare double @llvm.fabs.f64(double) nounwind readnone
define i1 @test1(float %x, float %y) nounwind {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%ext1 = fpext float %x to double
%ext2 = fpext float %y to double
%cmp = fcmp ogt double %ext1, %ext2
ret i1 %cmp
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: fcmp ogt float %x, %y
}
define i1 @test2(float %a) nounwind {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float %a, 1.000000e+00
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%ext = fpext float %a to double
%cmp = fcmp ogt double %ext, 1.000000e+00
ret i1 %cmp
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: fcmp ogt float %a, 1.0
}
define i1 @test3(float %a) nounwind {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[EXT:%.*]] = fpext float %a to double
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt double [[EXT]], 0x3FF0000000000001
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%ext = fpext float %a to double
%cmp = fcmp ogt double %ext, 0x3FF0000000000001 ; more precision than float.
ret i1 %cmp
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: fpext float %a to double
}
define i1 @test4(float %a) nounwind {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[EXT:%.*]] = fpext float %a to double
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt double [[EXT]], 0x36A0000000000000
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%ext = fpext float %a to double
%cmp = fcmp ogt double %ext, 0x36A0000000000000 ; denormal in float.
ret i1 %cmp
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: fpext float %a to double
}
define i1 @test5(float %a) nounwind {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float %a, -1.000000e+00
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%neg = fsub float -0.000000e+00, %a
%cmp = fcmp ogt float %neg, 1.000000e+00
ret i1 %cmp
-; CHECK-LABEL: @test5(
-; CHECK-NEXT: fcmp olt float %a, -1.0
}
define i1 @test6(float %x, float %y) nounwind {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%neg1 = fsub float -0.000000e+00, %x
%neg2 = fsub float -0.000000e+00, %y
%cmp = fcmp olt float %neg1, %neg2
ret i1 %cmp
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: fcmp ogt float %x, %y
}
define i1 @test7(float %x) nounwind readnone ssp noredzone {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float %x, 0.000000e+00
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%ext = fpext float %x to ppc_fp128
%cmp = fcmp ogt ppc_fp128 %ext, 0xM00000000000000000000000000000000
ret i1 %cmp
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: fcmp ogt float %x, 0.000000e+00
}
define float @test8(float %x) nounwind readnone optsize ssp {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float %x, 0.000000e+00
+; CHECK-NEXT: [[CONV2:%.*]] = uitofp i1 [[CMP]] to float
+; CHECK-NEXT: ret float [[CONV2]]
+;
%conv = fpext float %x to double
%cmp = fcmp olt double %conv, 0.000000e+00
%conv1 = zext i1 %cmp to i32
%conv2 = sitofp i32 %conv1 to float
ret float %conv2
; Float comparison to zero shouldn't cast to double.
-; CHECK-LABEL: @test8(
-; CHECK-NEXT: fcmp olt float %x, 0.000000e+00
}
declare double @fabs(double) nounwind readnone
define i32 @test9(double %a) nounwind {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: ret i32 0
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp olt double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test9(
-; CHECK-NOT: fabs
-; CHECK: ret i32 0
}
define i32 @test9_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test9_intrinsic(
+; CHECK-NEXT: ret i32 0
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp olt double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test9_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: ret i32 0
}
define i32 @test10(double %a) nounwind {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp ole double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test10(
-; CHECK-NOT: fabs
-; CHECK: fcmp oeq double %a, 0.000000e+00
}
define i32 @test10_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test10_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp ole double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test10_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp oeq double %a, 0.000000e+00
}
define i32 @test11(double %a) nounwind {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp one double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp ogt double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test11(
-; CHECK-NOT: fabs
-; CHECK: fcmp one double %a, 0.000000e+00
}
define i32 @test11_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test11_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp one double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp ogt double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test11_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp one double %a, 0.000000e+00
}
define i32 @test12(double %a) nounwind {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ord double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp oge double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test12(
-; CHECK-NOT: fabs
-; CHECK: fcmp ord double %a, 0.000000e+00
}
define i32 @test12_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test12_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ord double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp oge double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test12_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp ord double %a, 0.000000e+00
}
define i32 @test13(double %a) nounwind {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp une double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test13(
-; CHECK-NOT: fabs
-; CHECK: fcmp une double %a, 0.000000e+00
}
define i32 @test13_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test13_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp une double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test13_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp une double %a, 0.000000e+00
}
define i32 @test14(double %a) nounwind {
+; CHECK-LABEL: @test14(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp oeq double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test14(
-; CHECK-NOT: fabs
-; CHECK: fcmp oeq double %a, 0.000000e+00
}
define i32 @test14_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test14_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp oeq double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test14_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp oeq double %a, 0.000000e+00
}
define i32 @test15(double %a) nounwind {
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp one double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp one double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test15(
-; CHECK-NOT: fabs
-; CHECK: fcmp one double %a, 0.000000e+00
}
define i32 @test15_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test15_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp one double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp one double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test15_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp one double %a, 0.000000e+00
}
define i32 @test16(double %a) nounwind {
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @fabs(double %a) nounwind
%cmp = fcmp ueq double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test16(
-; CHECK-NOT: fabs
-; CHECK: fcmp ueq double %a, 0.000000e+00
}
define i32 @test16_intrinsic(double %a) nounwind {
+; CHECK-LABEL: @test16_intrinsic(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq double %a, 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double @llvm.fabs.f64(double %a) nounwind
%cmp = fcmp ueq double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
ret i32 %conv
-; CHECK-LABEL: @test16_intrinsic(
-; CHECK-NOT: fabs
-; CHECK: fcmp ueq double %a, 0.000000e+00
}
; Don't crash.
define i32 @test17(double %a, double (double)* %p) nounwind {
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: [[CALL:%.*]] = tail call double %p(double %a) #1
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq double [[CALL]], 0.000000e+00
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
%call = tail call double %p(double %a) nounwind
%cmp = fcmp ueq double %call, 0.000000e+00
%conv = zext i1 %cmp to i32
@@ -243,16 +296,18 @@ define i32 @test17(double %a, double (double)* %p) nounwind {
; Can fold fcmp with undef on one side by choosing NaN for the undef
define i32 @test18_undef_unordered(float %a) nounwind {
-; CHECK-LABEL: @test18_undef_unordered
-; CHECK: ret i32 1
+; CHECK-LABEL: @test18_undef_unordered(
+; CHECK-NEXT: ret i32 1
+;
%cmp = fcmp ueq float %a, undef
%conv = zext i1 %cmp to i32
ret i32 %conv
}
; Can fold fcmp with undef on one side by choosing NaN for the undef
define i32 @test18_undef_ordered(float %a) nounwind {
-; CHECK-LABEL: @test18_undef_ordered
-; CHECK: ret i32 0
+; CHECK-LABEL: @test18_undef_ordered(
+; CHECK-NEXT: ret i32 0
+;
%cmp = fcmp oeq float %a, undef
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -264,14 +319,18 @@ define i32 @test18_undef_ordered(float %a) nounwind {
; because whatever you choose for the first undef
; you can choose NaN for the other undef
define i1 @test19_undef_unordered() nounwind {
-; CHECK-LABEL: @test19_undef
-; CHECK: ret i1 true
+; CHECK-LABEL: @test19_undef_unordered(
+; CHECK-NEXT: ret i1 true
+;
%cmp = fcmp ueq float undef, undef
ret i1 %cmp
}
+
define i1 @test19_undef_ordered() nounwind {
-; CHECK-LABEL: @test19_undef
-; CHECK: ret i1 false
+; CHECK-LABEL: @test19_undef_ordered(
+; CHECK-NEXT: ret i1 false
+;
%cmp = fcmp oeq float undef, undef
ret i1 %cmp
}
+
diff --git a/test/Transforms/InstCombine/float-shrink-compare.ll b/test/Transforms/InstCombine/float-shrink-compare.ll
index a08f9531d217..e0925952bf44 100644
--- a/test/Transforms/InstCombine/float-shrink-compare.ll
+++ b/test/Transforms/InstCombine/float-shrink-compare.ll
@@ -3,171 +3,329 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-apple-macosx10.8.0"
define i32 @test1(float %x, float %y) nounwind uwtable {
- %1 = fpext float %x to double
- %2 = call double @ceil(double %1) nounwind readnone
- %3 = fpext float %y to double
- %4 = fcmp oeq double %2, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %ceil = call double @ceil(double %x.ext) nounwind readnone
+ %ext.y = fpext float %y to double
+ %cmp = fcmp oeq double %ceil, %ext.y
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test1(
-; CHECK-NEXT: %ceilf = call float @ceilf(float %x)
-; CHECK-NEXT: fcmp oeq float %ceilf, %y
+; CHECK-NEXT: %ceil = call float @llvm.ceil.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %ceil, %y
+}
+
+define i32 @test1_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %ceil = call double @llvm.ceil.f64(double %x.ext) nounwind readnone
+ %ext.y = fpext float %y to double
+ %cmp = fcmp oeq double %ceil, %ext.y
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test1_intrin(
+; CHECK-NEXT: %ceil = call float @llvm.ceil.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %ceil, %y
}
define i32 @test2(float %x, float %y) nounwind uwtable {
- %1 = fpext float %x to double
- %2 = call double @fabs(double %1) nounwind readnone
- %3 = fpext float %y to double
- %4 = fcmp oeq double %2, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %fabs = call double @fabs(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %fabs, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test2(
-; CHECK-NEXT: %fabsf = call float @fabsf(float %x)
-; CHECK-NEXT: fcmp oeq float %fabsf, %y
+; CHECK-NEXT: %fabs = call float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %fabs, %y
}
-define i32 @test3(float %x, float %y) nounwind uwtable {
+define i32 @test2_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %fabs = call double @llvm.fabs.f64(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %fabs, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test2_intrin(
+; CHECK-NEXT: %fabs = call float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %fabs, %y
+}
+
+define i32 @fmf_test2(float %x, float %y) nounwind uwtable {
%1 = fpext float %x to double
- %2 = call double @floor(double %1) nounwind readnone
+ %2 = call nnan double @fabs(double %1) nounwind readnone
%3 = fpext float %y to double
%4 = fcmp oeq double %2, %3
%5 = zext i1 %4 to i32
ret i32 %5
+; CHECK-LABEL: @fmf_test2(
+; CHECK-NEXT: [[FABS:%[0-9]+]] = call nnan float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: fcmp oeq float [[FABS]], %y
+}
+
+define i32 @test3(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %floor = call double @floor(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %floor, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test3(
-; CHECK-NEXT: %floorf = call float @floorf(float %x)
-; CHECK-NEXT: fcmp oeq float %floorf, %y
+; CHECK-NEXT: %floor = call float @llvm.floor.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %floor, %y
+}
+
+
+define i32 @test3_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %floor = call double @llvm.floor.f64(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %floor, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test3_intrin(
+; CHECK-NEXT: %floor = call float @llvm.floor.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %floor, %y
}
define i32 @test4(float %x, float %y) nounwind uwtable {
- %1 = fpext float %x to double
- %2 = call double @nearbyint(double %1) nounwind
- %3 = fpext float %y to double
- %4 = fcmp oeq double %2, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %nearbyint = call double @nearbyint(double %x.ext) nounwind
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %nearbyint, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test4(
-; CHECK-NEXT: %nearbyintf = call float @nearbyintf(float %x)
-; CHECK-NEXT: fcmp oeq float %nearbyintf, %y
+; CHECK-NEXT: %nearbyint = call float @llvm.nearbyint.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %nearbyint, %y
+}
+
+define i32 @shrink_nearbyint_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %nearbyint = call double @llvm.nearbyint.f64(double %x.ext) nounwind
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %nearbyint, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @shrink_nearbyint_intrin(
+; CHECK-NEXT: %nearbyint = call float @llvm.nearbyint.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %nearbyint, %y
}
define i32 @test5(float %x, float %y) nounwind uwtable {
- %1 = fpext float %x to double
- %2 = call double @rint(double %1) nounwind
- %3 = fpext float %y to double
- %4 = fcmp oeq double %2, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %rint = call double @rint(double %x.ext) nounwind
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %rint, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test5(
-; CHECK-NEXT: %rintf = call float @rintf(float %x)
-; CHECK-NEXT: fcmp oeq float %rintf, %y
+; CHECK-NEXT: %rint = call float @llvm.rint.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %rint, %y
}
define i32 @test6(float %x, float %y) nounwind uwtable {
- %1 = fpext float %x to double
- %2 = call double @round(double %1) nounwind readnone
- %3 = fpext float %y to double
- %4 = fcmp oeq double %2, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %round = call double @round(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %round, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test6(
-; CHECK-NEXT: %roundf = call float @roundf(float %x)
-; CHECK-NEXT: fcmp oeq float %roundf, %y
+; CHECK-NEXT: %round = call float @llvm.round.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %round, %y
+}
+
+define i32 @test6_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %round = call double @llvm.round.f64(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %round, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test6_intrin(
+; CHECK-NEXT: %round = call float @llvm.round.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %round, %y
}
define i32 @test7(float %x, float %y) nounwind uwtable {
- %1 = fpext float %x to double
- %2 = call double @trunc(double %1) nounwind
- %3 = fpext float %y to double
- %4 = fcmp oeq double %2, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %trunc = call double @trunc(double %x.ext) nounwind
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %trunc, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test7(
-; CHECK-NEXT: %truncf = call float @truncf(float %x)
-; CHECK-NEXT: fcmp oeq float %truncf, %y
+; CHECK-NEXT: %trunc = call float @llvm.trunc.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %trunc, %y
+}
+
+define i32 @test7_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %trunc = call double @llvm.trunc.f64(double %x.ext) nounwind
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %trunc, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test7_intrin(
+; CHECK-NEXT: %trunc = call float @llvm.trunc.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %trunc, %y
}
define i32 @test8(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @ceil(double %2) nounwind readnone
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %ceil = call double @ceil(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %ceil
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test8(
-; CHECK-NEXT: %ceilf = call float @ceilf(float %x)
-; CHECK-NEXT: fcmp oeq float %ceilf, %y
+; CHECK-NEXT: %ceil = call float @llvm.ceil.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %ceil, %y
+}
+
+define i32 @test8_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %ceil = call double @llvm.ceil.f64(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %ceil
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test8_intrin(
+; CHECK-NEXT: %ceil = call float @llvm.ceil.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %ceil, %y
}
define i32 @test9(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @fabs(double %2) nounwind readnone
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %fabs = call double @fabs(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %fabs
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test9(
-; CHECK-NEXT: %fabsf = call float @fabsf(float %x)
-; CHECK-NEXT: fcmp oeq float %fabsf, %y
+; CHECK-NEXT: %fabs = call float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %fabs, %y
+}
+
+define i32 @test9_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %fabs = call double @llvm.fabs.f64(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %fabs
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test9_intrin(
+; CHECK-NEXT: %fabs = call float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %fabs, %y
}
define i32 @test10(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @floor(double %2) nounwind readnone
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %floor = call double @floor(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %floor, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test10(
-; CHECK-NEXT: %floorf = call float @floorf(float %x)
-; CHECK-NEXT: fcmp oeq float %floorf, %y
+; CHECK-NEXT: %floor = call float @llvm.floor.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %floor, %y
+}
+
+define i32 @test10_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %floor = call double @llvm.floor.f64(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %floor, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test10_intrin(
+; CHECK-NEXT: %floor = call float @llvm.floor.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %floor, %y
}
define i32 @test11(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @nearbyint(double %2) nounwind
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %nearbyint = call double @nearbyint(double %x.ext) nounwind
+ %cmp = fcmp oeq double %nearbyint, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test11(
-; CHECK-NEXT: %nearbyintf = call float @nearbyintf(float %x)
-; CHECK-NEXT: fcmp oeq float %nearbyintf, %y
+; CHECK-NEXT: %nearbyint = call float @llvm.nearbyint.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %nearbyint, %y
+}
+
+
+define i32 @test11_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %nearbyint = call double @llvm.nearbyint.f64(double %x.ext) nounwind
+ %cmp = fcmp oeq double %nearbyint, %y.ext
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test11_intrin(
+; CHECK-NEXT: %nearbyint = call float @llvm.nearbyint.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %nearbyint, %y
}
define i32 @test12(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @rint(double %2) nounwind
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %rint = call double @rint(double %x.ext) nounwind
+ %cmp = fcmp oeq double %y.ext, %rint
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test12(
-; CHECK-NEXT: %rintf = call float @rintf(float %x)
-; CHECK-NEXT: fcmp oeq float %rintf, %y
+; CHECK-NEXT: %rint = call float @llvm.rint.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %rint, %y
}
define i32 @test13(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @round(double %2) nounwind readnone
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %round = call double @round(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %round
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test13(
-; CHECK-NEXT: %roundf = call float @roundf(float %x)
-; CHECK-NEXT: fcmp oeq float %roundf, %y
+; CHECK-NEXT: %round = call float @llvm.round.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %round, %y
+}
+
+define i32 @test13_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %round = call double @llvm.round.f64(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %round
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test13_intrin(
+; CHECK-NEXT: %round = call float @llvm.round.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %round, %y
}
define i32 @test14(float %x, float %y) nounwind uwtable {
- %1 = fpext float %y to double
- %2 = fpext float %x to double
- %3 = call double @trunc(double %2) nounwind
- %4 = fcmp oeq double %1, %3
- %5 = zext i1 %4 to i32
- ret i32 %5
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %trunc = call double @trunc(double %x.ext) nounwind
+ %cmp = fcmp oeq double %y.ext, %trunc
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
; CHECK-LABEL: @test14(
-; CHECK-NEXT: %truncf = call float @truncf(float %x)
-; CHECK-NEXT: fcmp oeq float %truncf, %y
+; CHECK-NEXT: %trunc = call float @llvm.trunc.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %trunc, %y
+}
+
+define i32 @test14_intrin(float %x, float %y) nounwind uwtable {
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %trunc = call double @llvm.trunc.f64(double %x.ext) nounwind
+ %cmp = fcmp oeq double %y.ext, %trunc
+ %cmp.ext = zext i1 %cmp to i32
+ ret i32 %cmp.ext
+; CHECK-LABEL: @test14_intrin(
+; CHECK-NEXT: %trunc = call float @llvm.trunc.f32(float %x)
+; CHECK-NEXT: fcmp oeq float %trunc, %y
}
define i32 @test15(float %x, float %y, float %z) nounwind uwtable {
@@ -269,3 +427,10 @@ declare double @round(double) nounwind readnone
declare double @trunc(double) nounwind readnone
declare double @fmin(double, double) nounwind readnone
declare double @fmax(double, double) nounwind readnone
+
+declare double @llvm.fabs.f64(double) nounwind readnone
+declare double @llvm.ceil.f64(double) nounwind readnone
+declare double @llvm.floor.f64(double) nounwind readnone
+declare double @llvm.nearbyint.f64(double) nounwind readnone
+declare double @llvm.round.f64(double) nounwind readnone
+declare double @llvm.trunc.f64(double) nounwind readnone
diff --git a/test/Transforms/InstCombine/fma.ll b/test/Transforms/InstCombine/fma.ll
index e41f1e7edd46..3808e07d89a0 100644
--- a/test/Transforms/InstCombine/fma.ll
+++ b/test/Transforms/InstCombine/fma.ll
@@ -78,7 +78,8 @@ define float @fmuladd_fneg_x_fneg_y(float %x, float %y, float %z) {
}
; CHECK-LABEL: @fmuladd_fneg_x_fneg_y_fast(
-; CHECK: %fmuladd = call fast float @llvm.fmuladd.f32(float %x, float %y, float %z)
+; CHECK-NEXT: %1 = fmul fast float %x, %y
+; CHECK-NEXT: %fmuladd = fadd fast float %1, %z
define float @fmuladd_fneg_x_fneg_y_fast(float %x, float %y, float %z) {
%x.fneg = fsub float -0.0, %x
%y.fneg = fsub float -0.0, %y
@@ -122,7 +123,8 @@ define float @fmuladd_fabs_x_fabs_x(float %x, float %z) {
}
; CHECK-LABEL: @fmuladd_fabs_x_fabs_x_fast(
-; CHECK: %fmuladd = call fast float @llvm.fmuladd.f32(float %x, float %x, float %z)
+; CHECK-NEXT: %1 = fmul fast float %x, %x
+; CHECK-NEXT: %fmuladd = fadd fast float %1, %z
define float @fmuladd_fabs_x_fabs_x_fast(float %x, float %z) {
%x.fabs = call float @llvm.fabs.f32(float %x)
%fmuladd = call fast float @llvm.fmuladd.f32(float %x.fabs, float %x.fabs, float %z)
@@ -144,7 +146,8 @@ define float @fma_k_y_z_fast(float %y, float %z) {
}
; CHECK-LABEL: @fmuladd_k_y_z_fast(
-; CHECK: %fmuladd = call fast float @llvm.fmuladd.f32(float %y, float 4.000000e+00, float %z)
+; CHECK: %1 = fmul fast float %y, 4.000000e+00
+; CHECK-NEXT: %fmuladd = fadd fast float %1, %z
define float @fmuladd_k_y_z_fast(float %y, float %z) {
%fmuladd = call fast float @llvm.fmuladd.f32(float 4.0, float %y, float %z)
ret float %fmuladd
diff --git a/test/Transforms/InstCombine/getelementptr.ll b/test/Transforms/InstCombine/getelementptr.ll
index 7ccbdf11fded..de8190da01c2 100644
--- a/test/Transforms/InstCombine/getelementptr.ll
+++ b/test/Transforms/InstCombine/getelementptr.ll
@@ -883,6 +883,33 @@ define %struct.C* @test46(%struct.C* %c1, %struct.C* %c2, i64 %N) {
; CHECK-NEXT: ret %struct.C* [[GEP]]
}
+define i32* @test47(i32* %I, i64 %C, i64 %D) {
+ %sub = sub i64 %D, %C
+ %A = getelementptr i32, i32* %I, i64 %C
+ %B = getelementptr i32, i32* %A, i64 %sub
+ ret i32* %B
+; CHECK-LABEL: @test47(
+; CHECK-NEXT: %B = getelementptr i32, i32* %I, i64 %D
+}
+
+define i32* @test48(i32* %I, i64 %C, i64 %D) {
+ %sub = sub i64 %D, %C
+ %A = getelementptr i32, i32* %I, i64 %sub
+ %B = getelementptr i32, i32* %A, i64 %C
+ ret i32* %B
+; CHECK-LABEL: @test48(
+; CHECK-NEXT: %B = getelementptr i32, i32* %I, i64 %D
+}
+
+define i32* @test49(i32* %I, i64 %C) {
+ %notC = xor i64 -1, %C
+ %A = getelementptr i32, i32* %I, i64 %C
+ %B = getelementptr i32, i32* %A, i64 %notC
+ ret i32* %B
+; CHECK-LABEL: @test49(
+; CHECK-NEXT: %B = getelementptr i32, i32* %I, i64 -1
+}
+
define i32 addrspace(1)* @ascast_0_gep(i32* %p) nounwind {
; CHECK-LABEL: @ascast_0_gep(
; CHECK-NOT: getelementptr
@@ -904,4 +931,15 @@ define i32 addrspace(1)* @ascast_0_0_gep([128 x i32]* %p) nounwind {
ret i32 addrspace(1)* %x
}
+define <2 x i32*> @PR32414(i32** %ptr) {
+; CHECK-LABEL: @PR32414(
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** %ptr to i32*
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], <2 x i64> <i64 0, i64 1>
+; CHECK-NEXT: ret <2 x i32*> [[TMP1]]
+;
+ %tmp0 = bitcast i32** %ptr to i32*
+ %tmp1 = getelementptr inbounds i32, i32* %tmp0, <2 x i64> <i64 0, i64 1>
+ ret <2 x i32*> %tmp1
+}
+
; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/InstCombine/icmp-add.ll b/test/Transforms/InstCombine/icmp-add.ll
new file mode 100644
index 000000000000..efeb9d5bb45b
--- /dev/null
+++ b/test/Transforms/InstCombine/icmp-add.ll
@@ -0,0 +1,247 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR1949
+
+define i1 @test1(i32 %a) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 %a, -5
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add i32 %a, 4
+ %c = icmp ult i32 %b, 4
+ ret i1 %c
+}
+
+define <2 x i1> @test1vec(<2 x i32> %a) {
+; CHECK-LABEL: @test1vec(
+; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i32> %a, <i32 -5, i32 -5>
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %b = add <2 x i32> %a, <i32 4, i32 4>
+ %c = icmp ult <2 x i32> %b, <i32 4, i32 4>
+ ret <2 x i1> %c
+}
+
+define i1 @test2(i32 %a) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 %a, 4
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = sub i32 %a, 4
+ %c = icmp ugt i32 %b, -5
+ ret i1 %c
+}
+
+define <2 x i1> @test2vec(<2 x i32> %a) {
+; CHECK-LABEL: @test2vec(
+; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i32> %a, <i32 4, i32 4>
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %b = sub <2 x i32> %a, <i32 4, i32 4>
+ %c = icmp ugt <2 x i32> %b, <i32 -5, i32 -5>
+ ret <2 x i1> %c
+}
+
+define i1 @test3(i32 %a) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 %a, 2147483643
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add i32 %a, 4
+ %c = icmp slt i32 %b, 2147483652
+ ret i1 %c
+}
+
+define <2 x i1> @test3vec(<2 x i32> %a) {
+; CHECK-LABEL: @test3vec(
+; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i32> %a, <i32 2147483643, i32 2147483643>
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %b = add <2 x i32> %a, <i32 4, i32 4>
+ %c = icmp slt <2 x i32> %b, <i32 2147483652, i32 2147483652>
+ ret <2 x i1> %c
+}
+
+define i1 @test4(i32 %a) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[C:%.*]] = icmp slt i32 %a, -4
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add i32 %a, 2147483652
+ %c = icmp sge i32 %b, 4
+ ret i1 %c
+}
+
+define <2 x i1> @test4vec(<2 x i32> %a) {
+; CHECK-LABEL: @test4vec(
+; CHECK-NEXT: [[C:%.*]] = icmp slt <2 x i32> %a, <i32 -4, i32 -4>
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %b = add <2 x i32> %a, <i32 2147483652, i32 2147483652>
+ %c = icmp sge <2 x i32> %b, <i32 4, i32 4>
+ ret <2 x i1> %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; This becomes equality because it's at the limit.
+
+define i1 @nsw_slt1(i8 %a) {
+; CHECK-LABEL: @nsw_slt1(
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 %a, -128
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add nsw i8 %a, 100
+ %c = icmp slt i8 %b, -27
+ ret i1 %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; This becomes equality because it's at the limit.
+
+define i1 @nsw_slt2(i8 %a) {
+; CHECK-LABEL: @nsw_slt2(
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 %a, 127
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add nsw i8 %a, -100
+ %c = icmp slt i8 %b, 27
+ ret i1 %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; Less than the limit, so the predicate doesn't change.
+
+define i1 @nsw_slt3(i8 %a) {
+; CHECK-LABEL: @nsw_slt3(
+; CHECK-NEXT: [[C:%.*]] = icmp slt i8 %a, -126
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add nsw i8 %a, 100
+ %c = icmp slt i8 %b, -26
+ ret i1 %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; Less than the limit, so the predicate doesn't change.
+
+define i1 @nsw_slt4(i8 %a) {
+; CHECK-LABEL: @nsw_slt4(
+; CHECK-NEXT: [[C:%.*]] = icmp slt i8 %a, 126
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add nsw i8 %a, -100
+ %c = icmp slt i8 %b, 26
+ ret i1 %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; Try sgt to make sure that works too.
+
+define i1 @nsw_sgt1(i8 %a) {
+; CHECK-LABEL: @nsw_sgt1(
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 %a, 127
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %b = add nsw i8 %a, -100
+ %c = icmp sgt i8 %b, 26
+ ret i1 %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; Try a vector type to make sure that works too.
+; FIXME: This should be 'eq 127' as above.
+
+define <2 x i1> @nsw_sgt2_splat_vec(<2 x i8> %a) {
+; CHECK-LABEL: @nsw_sgt2_splat_vec(
+; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i8> %a, <i8 -126, i8 -126>
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %b = add nsw <2 x i8> %a, <i8 100, i8 100>
+ %c = icmp sgt <2 x i8> %b, <i8 -26, i8 -26>
+ ret <2 x i1> %c
+}
+
+; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
+; Comparison with 0 doesn't need special-casing.
+
+define i1 @slt_zero_add_nsw(i32 %a) {
+; CHECK-LABEL: @slt_zero_add_nsw(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %a, -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %a, 1
+ %cmp = icmp slt i32 %add, 0
+ ret i1 %cmp
+}
+
+; The same fold should work with vectors.
+
+define <2 x i1> @slt_zero_add_nsw_splat_vec(<2 x i8> %a) {
+; CHECK-LABEL: @slt_zero_add_nsw_splat_vec(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %a, <i8 -1, i8 -1>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %add = add nsw <2 x i8> %a, <i8 1, i8 1>
+ %cmp = icmp slt <2 x i8> %add, zeroinitializer
+ ret <2 x i1> %cmp
+}
+
+; Test the edges - instcombine should not interfere with simplification to constants.
+; Constant subtraction does not overflow, but this is false.
+
+define i1 @nsw_slt3_ov_no(i8 %a) {
+; CHECK-LABEL: @nsw_slt3_ov_no(
+; CHECK-NEXT: ret i1 false
+;
+ %b = add nsw i8 %a, 100
+ %c = icmp slt i8 %b, -28
+ ret i1 %c
+}
+
+; Test the edges - instcombine should not interfere with simplification to constants.
+; Constant subtraction overflows. This is false.
+
+define i1 @nsw_slt4_ov(i8 %a) {
+; CHECK-LABEL: @nsw_slt4_ov(
+; CHECK-NEXT: ret i1 false
+;
+ %b = add nsw i8 %a, 100
+ %c = icmp slt i8 %b, -29
+ ret i1 %c
+}
+
+; Test the edges - instcombine should not interfere with simplification to constants.
+; Constant subtraction overflows. This is true.
+
+define i1 @nsw_slt5_ov(i8 %a) {
+; CHECK-LABEL: @nsw_slt5_ov(
+; CHECK-NEXT: ret i1 true
+;
+ %b = add nsw i8 %a, -100
+ %c = icmp slt i8 %b, 28
+ ret i1 %c
+}
+
+; InstCombine should not thwart this opportunity to simplify completely.
+
+define i1 @slt_zero_add_nsw_signbit(i8 %x) {
+; CHECK-LABEL: @slt_zero_add_nsw_signbit(
+; CHECK-NEXT: ret i1 true
+;
+ %y = add nsw i8 %x, -128
+ %z = icmp slt i8 %y, 0
+ ret i1 %z
+}
+
+; InstCombine should not thwart this opportunity to simplify completely.
+
+define i1 @slt_zero_add_nuw_signbit(i8 %x) {
+; CHECK-LABEL: @slt_zero_add_nuw_signbit(
+; CHECK-NEXT: ret i1 true
+;
+ %y = add nuw i8 %x, 128
+ %z = icmp slt i8 %y, 0
+ ret i1 %z
+}
+
diff --git a/test/Transforms/InstCombine/icmp-shl-nsw.ll b/test/Transforms/InstCombine/icmp-shl-nsw.ll
index 896a45625b9f..ba05302897e9 100644
--- a/test/Transforms/InstCombine/icmp-shl-nsw.ll
+++ b/test/Transforms/InstCombine/icmp-shl-nsw.ll
@@ -73,8 +73,7 @@ define <2 x i1> @icmp_shl_nsw_eq_vec(<2 x i32> %x) {
define i1 @icmp_sgt1(i8 %x) {
; CHECK-LABEL: @icmp_sgt1(
-; CHECK-NEXT: [[SHL_MASK:%.*]] = and i8 %x, 127
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[SHL_MASK]], 64
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 %x, -64
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -84,8 +83,7 @@ define i1 @icmp_sgt1(i8 %x) {
define i1 @icmp_sgt2(i8 %x) {
; CHECK-LABEL: @icmp_sgt2(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], -127
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -64
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -95,8 +93,7 @@ define i1 @icmp_sgt2(i8 %x) {
define i1 @icmp_sgt3(i8 %x) {
; CHECK-LABEL: @icmp_sgt3(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], -16
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -8
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -106,8 +103,7 @@ define i1 @icmp_sgt3(i8 %x) {
define i1 @icmp_sgt4(i8 %x) {
; CHECK-LABEL: @icmp_sgt4(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], -2
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -1
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -120,8 +116,7 @@ define i1 @icmp_sgt4(i8 %x) {
define i1 @icmp_sgt5(i8 %x) {
; CHECK-LABEL: @icmp_sgt5(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, 0
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -131,8 +126,7 @@ define i1 @icmp_sgt5(i8 %x) {
define i1 @icmp_sgt6(i8 %x) {
; CHECK-LABEL: @icmp_sgt6(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, 8
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -142,8 +136,7 @@ define i1 @icmp_sgt6(i8 %x) {
define i1 @icmp_sgt7(i8 %x) {
; CHECK-LABEL: @icmp_sgt7(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], 124
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, 62
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -155,8 +148,7 @@ define i1 @icmp_sgt7(i8 %x) {
define i1 @icmp_sgt8(i8 %x) {
; CHECK-LABEL: @icmp_sgt8(
-; CHECK-NEXT: [[SHL_MASK:%.*]] = and i8 %x, 127
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[SHL_MASK]], 63
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 %x, 63
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 1
@@ -170,8 +162,7 @@ define i1 @icmp_sgt8(i8 %x) {
define i1 @icmp_sgt9(i8 %x) {
; CHECK-LABEL: @icmp_sgt9(
-; CHECK-NEXT: [[SHL_MASK:%.*]] = and i8 %x, 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[SHL_MASK]], 0
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 %x, -1
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 7
@@ -181,8 +172,7 @@ define i1 @icmp_sgt9(i8 %x) {
define i1 @icmp_sgt10(i8 %x) {
; CHECK-LABEL: @icmp_sgt10(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 7
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], -127
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -1
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 7
@@ -192,8 +182,7 @@ define i1 @icmp_sgt10(i8 %x) {
define i1 @icmp_sgt11(i8 %x) {
; CHECK-LABEL: @icmp_sgt11(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw i8 %x, 7
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[SHL]], -2
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -1
; CHECK-NEXT: ret i1 [[CMP]]
;
%shl = shl nsw i8 %x, 7
@@ -205,8 +194,7 @@ define i1 @icmp_sgt11(i8 %x) {
define <2 x i1> @icmp_sgt11_vec(<2 x i8> %x) {
; CHECK-LABEL: @icmp_sgt11_vec(
-; CHECK-NEXT: [[SHL:%.*]] = shl nsw <2 x i8> %x, <i8 7, i8 7>
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> [[SHL]], <i8 -2, i8 -2>
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, <i8 -1, i8 -1>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%shl = shl nsw <2 x i8> %x, <i8 7, i8 7>
@@ -216,3 +204,153 @@ define <2 x i1> @icmp_sgt11_vec(<2 x i8> %x) {
; Known bits analysis returns false for compares with >=0.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Repeat the shl nsw + sgt tests with predicate changed to 'sle'.
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; Known bits analysis turns this into an equality predicate.
+
+define i1 @icmp_sle1(i8 %x) {
+; CHECK-LABEL: @icmp_sle1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 %x, -64
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, -128
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle2(i8 %x) {
+; CHECK-LABEL: @icmp_sle2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, -63
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, -127
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle3(i8 %x) {
+; CHECK-LABEL: @icmp_sle3(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, -7
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, -16
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle4(i8 %x) {
+; CHECK-LABEL: @icmp_sle4(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, -2
+ ret i1 %cmp
+}
+
+; x <=s -1 is a sign bit test.
+; x <=s 0 is a sign bit test.
+
+define i1 @icmp_sle5(i8 %x) {
+; CHECK-LABEL: @icmp_sle5(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, 1
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle6(i8 %x) {
+; CHECK-LABEL: @icmp_sle6(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 9
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, 16
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle7(i8 %x) {
+; CHECK-LABEL: @icmp_sle7(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 63
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, 124
+ ret i1 %cmp
+}
+
+; Known bits analysis turns this into an equality predicate.
+
+define i1 @icmp_sle8(i8 %x) {
+; CHECK-LABEL: @icmp_sle8(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 %x, 63
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp sle i8 %shl, 125
+ ret i1 %cmp
+}
+
+; Compares with 126 and 127 are recognized as always true.
+
+; Known bits analysis turns this into an equality predicate.
+
+define i1 @icmp_sle9(i8 %x) {
+; CHECK-LABEL: @icmp_sle9(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 %x, -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 7
+ %cmp = icmp sle i8 %shl, -128
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle10(i8 %x) {
+; CHECK-LABEL: @icmp_sle10(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 7
+ %cmp = icmp sle i8 %shl, -127
+ ret i1 %cmp
+}
+
+define i1 @icmp_sle11(i8 %x) {
+; CHECK-LABEL: @icmp_sle11(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 7
+ %cmp = icmp sle i8 %shl, -2
+ ret i1 %cmp
+}
+
+; Some of the earlier sgt/sle tests are transformed to eq/ne, but try a couple
+; of those explicitly, so we know no intermediate transforms are necessary.
+
+define i1 @icmp_eq1(i8 %x) {
+; CHECK-LABEL: @icmp_eq1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 %x, 6
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 1
+ %cmp = icmp eq i8 %shl, 12
+ ret i1 %cmp
+}
+
+define i1 @icmp_ne1(i8 %x) {
+; CHECK-LABEL: @icmp_ne1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 %x, -2
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %shl = shl nsw i8 %x, 6
+ %cmp = icmp ne i8 %shl, -128
+ ret i1 %cmp
+}
+
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index 32fe050bf83f..edfa9a102917 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -918,7 +918,7 @@ define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 %i to i16
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 %j to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[TMP2]], [[GEP1_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[TMP3]]
;
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
@@ -949,7 +949,7 @@ define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast_smaller(
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 %i, 2
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 %j to i16
-; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i16 [[TMP1]], [[GEP1_IDX]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
%bit = addrspacecast i8* %foo to i32 addrspace(1)*
@@ -981,7 +981,7 @@ define i1 @test61(i8* %foo, i64 %i, i64 %j) {
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32* [[BIT]], i64 %i
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8* %foo, i64 %j
; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32* [[GEP1]] to i8*
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[CAST1]], [[GEP2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[GEP2]], [[CAST1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%bit = bitcast i8* %foo to i32*
@@ -999,7 +999,7 @@ define i1 @test61_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32 addrspace(1)* [[BIT]], i16 %i
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8 addrspace(1)* %foo, i16 %j
; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32 addrspace(1)* [[GEP1]] to i8 addrspace(1)*
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 addrspace(1)* [[CAST1]], [[GEP2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 addrspace(1)* [[GEP2]], [[CAST1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
@@ -1123,19 +1123,6 @@ define i1 @test68(i32 %x) {
ret i1 %cmp
}
-; PR14708
-define i1 @test69(i32 %c) {
-; CHECK-LABEL: @test69(
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 %c, 32
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 97
-; CHECK-NEXT: ret i1 [[TMP2]]
-;
- %1 = icmp eq i32 %c, 97
- %2 = icmp eq i32 %c, 65
- %3 = or i1 %1, %2
- ret i1 %3
-}
-
; PR15940
define i1 @test70(i32 %X) {
; CHECK-LABEL: @test70(
@@ -1183,12 +1170,11 @@ define i1 @icmp_sext8trunc(i32 %x) {
ret i1 %cmp
}
-; FIXME: Vectors should fold the same way.
+; Vectors should fold the same way.
define <2 x i1> @icmp_sext8trunc_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_sext8trunc_vec(
-; CHECK-NEXT: [[SEXT1:%.*]] = shl <2 x i32> %x, <i32 24, i32 24>
-; CHECK-NEXT: [[SEXT:%.*]] = ashr <2 x i32> [[SEXT:%.*]]1, <i32 24, i32 24>
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i32> [[SEXT]], <i32 36, i32 36>
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> %x to <2 x i8>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[TMP1]], <i8 36, i8 36>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%trunc = trunc <2 x i32> %x to <2 x i8>
@@ -1877,6 +1863,55 @@ define <2 x i1> @icmp_and_X_-16_ne-16_vec(<2 x i32> %X) {
ret <2 x i1> %cmp
}
+; PR32524: https://bugs.llvm.org/show_bug.cgi?id=32524
+; X | C == C --> X <=u C (when C+1 is PowerOf2).
+
+define i1 @or1_eq1(i32 %x) {
+; CHECK-LABEL: @or1_eq1(
+; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 2
+; CHECK-NEXT: ret i1 [[T1]]
+;
+ %t0 = or i32 %x, 1
+ %t1 = icmp eq i32 %t0, 1
+ ret i1 %t1
+}
+
+; X | C == C --> X <=u C (when C+1 is PowerOf2).
+
+define <2 x i1> @or3_eq3_vec(<2 x i8> %x) {
+; CHECK-LABEL: @or3_eq3_vec(
+; CHECK-NEXT: [[T1:%.*]] = icmp ult <2 x i8> %x, <i8 4, i8 4>
+; CHECK-NEXT: ret <2 x i1> [[T1]]
+;
+ %t0 = or <2 x i8> %x, <i8 3, i8 3>
+ %t1 = icmp eq <2 x i8> %t0, <i8 3, i8 3>
+ ret <2 x i1> %t1
+}
+
+; X | C != C --> X >u C (when C+1 is PowerOf2).
+
+define i1 @or7_ne7(i32 %x) {
+; CHECK-LABEL: @or7_ne7(
+; CHECK-NEXT: [[T1:%.*]] = icmp ugt i32 %x, 7
+; CHECK-NEXT: ret i1 [[T1]]
+;
+ %t0 = or i32 %x, 7
+ %t1 = icmp ne i32 %t0, 7
+ ret i1 %t1
+}
+
+; X | C != C --> X >u C (when C+1 is PowerOf2).
+
+define <2 x i1> @or63_ne63_vec(<2 x i8> %x) {
+; CHECK-LABEL: @or63_ne63_vec(
+; CHECK-NEXT: [[T1:%.*]] = icmp ugt <2 x i8> %x, <i8 63, i8 63>
+; CHECK-NEXT: ret <2 x i1> [[T1]]
+;
+ %t0 = or <2 x i8> %x, <i8 63, i8 63>
+ %t1 = icmp ne <2 x i8> %t0, <i8 63, i8 63>
+ ret <2 x i1> %t1
+}
+
define i1 @shrink_constant(i32 %X) {
; CHECK-LABEL: @shrink_constant(
; CHECK-NEXT: [[XOR:%.*]] = xor i32 %X, -12
@@ -2232,16 +2267,6 @@ define i1 @icmp_sge_zero_add_nsw(i32 %a) {
ret i1 %cmp
}
-define i1 @icmp_slt_zero_add_nsw(i32 %a) {
-; CHECK-LABEL: @icmp_slt_zero_add_nsw(
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %a, -1
-; CHECK-NEXT: ret i1 [[CMP]]
-;
- %add = add nsw i32 %a, 1
- %cmp = icmp slt i32 %add, 0
- ret i1 %cmp
-}
-
define i1 @icmp_sle_zero_add_nsw(i32 %a) {
; CHECK-LABEL: @icmp_sle_zero_add_nsw(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %a, 0
@@ -2425,6 +2450,10 @@ define i1 @f10(i16 %p) {
ret i1 %cmp580
}
+; Note: fptosi is used in various tests below to ensure that operand complexity
+; canonicalization does not kick in, which would make some of the tests
+; equivalent to one another.
+
define i1 @cmp_sgt_rhs_dec(float %x, i32 %i) {
; CHECK-LABEL: @cmp_sgt_rhs_dec(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
@@ -2711,3 +2740,143 @@ define i1 @or_ptrtoint_mismatch(i8* %p, i32* %q) {
%b = icmp eq i64 %o, 0
ret i1 %b
}
+
+define i1 @icmp_add1_ugt(i32 %x, i32 %y) {
+; CHECK-LABEL: @icmp_add1_ugt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nuw i32 %x, 1
+ %cmp = icmp ugt i32 %add, %y
+ ret i1 %cmp
+}
+
+define i1 @icmp_add1_ule(i32 %x, i32 %y) {
+; CHECK-LABEL: @icmp_add1_ule(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nuw i32 %x, 1
+ %cmp = icmp ule i32 %add, %y
+ ret i1 %cmp
+}
+
+define i1 @cmp_uge_rhs_inc(float %x, i32 %i) {
+; CHECK-LABEL: @cmp_uge_rhs_inc(
+; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[CONV]], %i
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %conv = fptosi float %x to i32
+ %inc = add nuw i32 %i, 1
+ %cmp = icmp uge i32 %conv, %inc
+ ret i1 %cmp
+}
+
+define i1 @cmp_ult_rhs_inc(float %x, i32 %i) {
+; CHECK-LABEL: @cmp_ult_rhs_inc(
+; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[CONV]], %i
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %conv = fptosi float %x to i32
+ %inc = add nuw i32 %i, 1
+ %cmp = icmp ult i32 %conv, %inc
+ ret i1 %cmp
+}
+
+define i1 @cmp_sge_lhs_inc(i32 %x, i32 %y) {
+; CHECK-LABEL: @cmp_sge_lhs_inc(
+; CHECK-NEXT: [[INC:%.*]] = add
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[INC]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %inc = add nsw i32 %x, 1
+ %cmp = icmp sge i32 %inc, %y
+ ret i1 %cmp
+}
+
+define i1 @cmp_uge_lhs_inc(i32 %x, i32 %y) {
+; CHECK-LABEL: @cmp_uge_lhs_inc(
+; CHECK-NEXT: [[INC:%.*]] = add
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[INC]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %inc = add nuw i32 %x, 1
+ %cmp = icmp uge i32 %inc, %y
+ ret i1 %cmp
+}
+
+define i1 @cmp_sgt_lhs_dec(i32 %x, i32 %y) {
+; CHECK-LABEL: @cmp_sgt_lhs_dec(
+; CHECK-NEXT: [[DEC:%.*]] = {{add|sub}}
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[DEC]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %dec = sub nsw i32 %x, 1
+ %cmp = icmp sgt i32 %dec, %y
+ ret i1 %cmp
+}
+
+define i1 @cmp_ugt_lhs_dec(i32 %x, i32 %y) {
+; CHECK-LABEL: @cmp_ugt_lhs_dec(
+; CHECK-NEXT: [[DEC:%.*]] = {{add|sub}}
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[DEC]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %dec = sub nuw i32 %x, 1
+ %cmp = icmp ugt i32 %dec, %y
+ ret i1 %cmp
+}
+
+define i1 @cmp_sle_rhs_inc(float %x, i32 %y) {
+; CHECK-LABEL: @cmp_sle_rhs_inc(
+; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
+; CHECK-NEXT: [[INC:%.*]] = add
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[INC]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %conv = fptosi float %x to i32
+ %inc = add nsw i32 %y, 1
+ %cmp = icmp sle i32 %conv, %inc
+ ret i1 %cmp
+}
+
+define i1 @cmp_ule_rhs_inc(float %x, i32 %y) {
+; CHECK-LABEL: @cmp_ule_rhs_inc(
+; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
+; CHECK-NEXT: [[INC:%.*]] = add
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[INC]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %conv = fptosi float %x to i32
+ %inc = add nuw i32 %y, 1
+ %cmp = icmp ule i32 %conv, %inc
+ ret i1 %cmp
+}
+
+define i1 @cmp_slt_rhs_dec(float %x, i32 %y) {
+; CHECK-LABEL: @cmp_slt_rhs_dec(
+; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
+; CHECK-NEXT: [[DEC:%.*]] = {{add|sub}}
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[DEC]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %conv = fptosi float %x to i32
+ %dec = sub nsw i32 %y, 1
+ %cmp = icmp slt i32 %conv, %dec
+ ret i1 %cmp
+}
+
+define i1 @cmp_ult_rhs_dec(float %x, i32 %y) {
+; CHECK-LABEL: @cmp_ult_rhs_dec(
+; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32
+; CHECK-NEXT: [[DEC:%.*]] = {{add|sub}}
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[DEC]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %conv = fptosi float %x to i32
+ %dec = sub nuw i32 %y, 1
+ %cmp = icmp ult i32 %conv, %dec
+ ret i1 %cmp
+}
diff --git a/test/Transforms/InstCombine/insert-extract-shuffle.ll b/test/Transforms/InstCombine/insert-extract-shuffle.ll
index 4507deb7f023..29f774c5f62b 100644
--- a/test/Transforms/InstCombine/insert-extract-shuffle.ll
+++ b/test/Transforms/InstCombine/insert-extract-shuffle.ll
@@ -86,11 +86,8 @@ define <8 x float> @widen_extract4(<8 x float> %ins, <2 x float> %ext) {
define <8 x i16> @pr26015(<4 x i16> %t0) {
; CHECK-LABEL: @pr26015(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> %t0, <4 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i16> %t0, i32 2
-; CHECK-NEXT: [[T2:%.*]] = insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 undef, i16 undef>, i16 [[TMP2]], i32 3
-; CHECK-NEXT: [[T3:%.*]] = insertelement <8 x i16> [[T2]], i16 0, i32 6
-; CHECK-NEXT: [[T5:%.*]] = shufflevector <8 x i16> [[T3]], <8 x i16> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 11>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> %t0, <4 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[T5:%.*]] = shufflevector <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 undef>, <8 x i16> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 10, i32 4, i32 5, i32 6, i32 11>
; CHECK-NEXT: ret <8 x i16> [[T5]]
;
%t1 = extractelement <4 x i16> %t0, i32 2
@@ -110,8 +107,7 @@ define <8 x i16> @pr25999(<4 x i16> %t0, i1 %b) {
; CHECK-NEXT: br i1 %b, label %if, label %end
; CHECK: if:
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> %t0, <4 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[T2:%.*]] = insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 undef, i16 undef>, i16 [[T1]], i32 3
-; CHECK-NEXT: [[T3:%.*]] = insertelement <8 x i16> [[T2]], i16 0, i32 6
+; CHECK-NEXT: [[T3:%.*]] = insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 undef>, i16 [[T1]], i32 3
; CHECK-NEXT: [[T5:%.*]] = shufflevector <8 x i16> [[T3]], <8 x i16> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 11>
; CHECK-NEXT: ret <8 x i16> [[T5]]
; CHECK: end:
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 858f9c029b30..e8f5ddd329ff 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -351,33 +351,12 @@ define void @ctpop_cmp_vec(<2 x i32> %a, <2 x i1>* %b) {
; CHECK-NEXT: store volatile <2 x i1> %pop1.cmp, <2 x i1>* %b
}
-define i32 @cttz_simplify1a(i32 %x) nounwind readnone ssp {
- %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
- %shr3 = lshr i32 %tmp1, 5
- ret i32 %shr3
-
-; CHECK-LABEL: @cttz_simplify1a(
-; CHECK: icmp eq i32 %x, 0
-; CHECK-NEXT: zext i1
-; CHECK-NEXT: ret i32
-}
-
-define i32 @cttz_simplify1b(i32 %x) nounwind readnone ssp {
- %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
- %shr3 = lshr i32 %tmp1, 5
- ret i32 %shr3
-
-; CHECK-LABEL: @cttz_simplify1b(
-; CHECK-NEXT: ret i32 0
-}
-
-define i32 @ctlz_undef(i32 %Value) nounwind {
+define i32 @ctlz_undef(i32 %Value) {
; CHECK-LABEL: @ctlz_undef(
; CHECK-NEXT: ret i32 undef
;
%ctlz = call i32 @llvm.ctlz.i32(i32 0, i1 true)
ret i32 %ctlz
-
}
define i32 @ctlz_make_undef(i32 %a) {
diff --git a/test/Transforms/InstCombine/lifetime-asan.ll b/test/Transforms/InstCombine/lifetime-asan.ll
index f52c0202b773..7fdc1fcbc3b3 100644
--- a/test/Transforms/InstCombine/lifetime-asan.ll
+++ b/test/Transforms/InstCombine/lifetime-asan.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @foo(i8* nocapture)
define void @asan() sanitize_address {
@@ -9,8 +9,8 @@ entry:
; CHECK-LABEL: @asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start(i64 1, i8* %text)
- call void @llvm.lifetime.end(i64 1, i8* %text)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %text)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -25,8 +25,8 @@ entry:
; CHECK-LABEL: @no_asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start(i64 1, i8* %text)
- call void @llvm.lifetime.end(i64 1, i8* %text)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %text)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %text)
; CHECK-NO: call void @llvm.lifetime
call void @foo(i8* %text) ; Keep alloca alive
diff --git a/test/Transforms/InstCombine/lifetime.ll b/test/Transforms/InstCombine/lifetime.ll
index c296d29b99b9..71c676233b08 100644
--- a/test/Transforms/InstCombine/lifetime.ll
+++ b/test/Transforms/InstCombine/lifetime.ll
@@ -1,8 +1,8 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @foo(i8* nocapture, i8* nocapture)
define void @bar(i1 %flag) !dbg !4 {
@@ -17,11 +17,11 @@ entry:
; CHECK: bb3:
; CHECK-NEXT: call void @llvm.dbg.declare
; CHECK-NEXT: br label %fin
-; CHECK: call void @llvm.lifetime.start(i64 1, i8* %[[T]])
-; CHECK-NEXT: call void @llvm.lifetime.start(i64 1, i8* %[[B]])
+; CHECK: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[T]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[B]])
; CHECK-NEXT: call void @foo(i8* %[[B]], i8* %[[T]])
-; CHECK-NEXT: call void @llvm.lifetime.end(i64 1, i8* %[[B]])
-; CHECK-NEXT: call void @llvm.lifetime.end(i64 1, i8* %[[T]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[T]])
%text = alloca [1 x i8], align 1
%buff = alloca [1 x i8], align 1
%0 = getelementptr inbounds [1 x i8], [1 x i8]* %text, i64 0, i64 0
@@ -29,31 +29,31 @@ entry:
br i1 %flag, label %if, label %else
if:
- call void @llvm.lifetime.start(i64 1, i8* %0)
- call void @llvm.lifetime.start(i64 1, i8* %1)
- call void @llvm.lifetime.end(i64 1, i8* %1)
- call void @llvm.lifetime.end(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
br label %bb2
bb2:
- call void @llvm.lifetime.start(i64 1, i8* %0)
- call void @llvm.lifetime.start(i64 1, i8* %1)
- call void @llvm.lifetime.end(i64 1, i8* %0)
- call void @llvm.lifetime.end(i64 1, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
br label %bb3
bb3:
- call void @llvm.lifetime.start(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
call void @llvm.dbg.declare(metadata [1 x i8]* %text, metadata !14, metadata !25), !dbg !26
- call void @llvm.lifetime.end(i64 1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
br label %fin
else:
- call void @llvm.lifetime.start(i64 1, i8* %0)
- call void @llvm.lifetime.start(i64 1, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
call void @foo(i8* %1, i8* %0)
- call void @llvm.lifetime.end(i64 1, i8* %1)
- call void @llvm.lifetime.end(i64 1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
br label %fin
fin:
diff --git a/test/Transforms/InstCombine/load-cmp.ll b/test/Transforms/InstCombine/load-cmp.ll
index 75952e01c19c..5746b7aa28d5 100644
--- a/test/Transforms/InstCombine/load-cmp.ll
+++ b/test/Transforms/InstCombine/load-cmp.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -instcombine -S -default-data-layout="p:32:32:32-p1:16:16:16-n8:16:32:64" < %s | FileCheck %s
+; RUN: opt -instcombine -S -data-layout="p:32:32:32-p1:16:16:16-n8:16:32:64" < %s | FileCheck %s
@G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85,
i16 73, i16 82, i16 69, i16 68, i16 0]
diff --git a/test/Transforms/InstCombine/lshr.ll b/test/Transforms/InstCombine/lshr.ll
new file mode 100644
index 000000000000..b81371b03042
--- /dev/null
+++ b/test/Transforms/InstCombine/lshr.ll
@@ -0,0 +1,102 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare <2 x i8> @llvm.cttz.v2i8(<2 x i8>, i1) nounwind readnone
+declare <2 x i8> @llvm.ctlz.v2i8(<2 x i8>, i1) nounwind readnone
+declare <2 x i8> @llvm.ctpop.v2i8(<2 x i8>) nounwind readnone
+
+define i32 @lshr_ctlz_zero_is_not_undef(i32 %x) {
+; CHECK-LABEL: @lshr_ctlz_zero_is_not_undef(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 %x, 0
+; CHECK-NEXT: [[SH:%.*]] = zext i1 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[SH]]
+;
+ %ct = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+ %sh = lshr i32 %ct, 5
+ ret i32 %sh
+}
+
+define i32 @lshr_cttz_zero_is_not_undef(i32 %x) {
+; CHECK-LABEL: @lshr_cttz_zero_is_not_undef(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 %x, 0
+; CHECK-NEXT: [[SH:%.*]] = zext i1 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[SH]]
+;
+ %ct = call i32 @llvm.cttz.i32(i32 %x, i1 false)
+ %sh = lshr i32 %ct, 5
+ ret i32 %sh
+}
+
+define i32 @lshr_ctpop(i32 %x) {
+; CHECK-LABEL: @lshr_ctpop(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 %x, -1
+; CHECK-NEXT: [[SH:%.*]] = zext i1 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[SH]]
+;
+ %ct = call i32 @llvm.ctpop.i32(i32 %x)
+ %sh = lshr i32 %ct, 5
+ ret i32 %sh
+}
+
+define <2 x i8> @lshr_ctlz_zero_is_not_undef_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_ctlz_zero_is_not_undef_splat_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer
+; CHECK-NEXT: [[SH:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
+; CHECK-NEXT: ret <2 x i8> [[SH]]
+;
+ %ct = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %x, i1 false)
+ %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
+ ret <2 x i8> %sh
+}
+
+define <2 x i8> @lshr_cttz_zero_is_not_undef_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_cttz_zero_is_not_undef_splat_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer
+; CHECK-NEXT: [[SH:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
+; CHECK-NEXT: ret <2 x i8> [[SH]]
+;
+ %ct = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false)
+ %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
+ ret <2 x i8> %sh
+}
+
+define <2 x i8> @lshr_ctpop_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_ctpop_splat_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, <i8 -1, i8 -1>
+; CHECK-NEXT: [[SH:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
+; CHECK-NEXT: ret <2 x i8> [[SH]]
+;
+ %ct = call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> %x)
+ %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
+ ret <2 x i8> %sh
+}
+
+define i8 @lshr_exact(i8 %x) {
+; CHECK-LABEL: @lshr_exact(
+; CHECK-NEXT: [[SHL:%.*]] = shl i8 %x, 2
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SHL]], 4
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i8 [[ADD]], 2
+; CHECK-NEXT: ret i8 [[LSHR]]
+;
+ %shl = shl i8 %x, 2
+ %add = add i8 %shl, 4
+ %lshr = lshr i8 %add, 2
+ ret i8 %lshr
+}
+
+define <2 x i8> @lshr_exact_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_exact_splat_vec(
+; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> %x, <i8 2, i8 2>
+; CHECK-NEXT: [[ADD:%.*]] = add <2 x i8> [[SHL]], <i8 4, i8 4>
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact <2 x i8> [[ADD]], <i8 2, i8 2>
+; CHECK-NEXT: ret <2 x i8> [[LSHR]]
+;
+ %shl = shl <2 x i8> %x, <i8 2, i8 2>
+ %add = add <2 x i8> %shl, <i8 4, i8 4>
+ %lshr = lshr <2 x i8> %add, <i8 2, i8 2>
+ ret <2 x i8> %lshr
+}
+
diff --git a/test/Transforms/InstCombine/malloc-free-delete.ll b/test/Transforms/InstCombine/malloc-free-delete.ll
index 8fcb8214360d..7a5c7457e364 100644
--- a/test/Transforms/InstCombine/malloc-free-delete.ll
+++ b/test/Transforms/InstCombine/malloc-free-delete.ll
@@ -24,8 +24,8 @@ define i1 @foo() {
ret i1 %z
}
-declare void @llvm.lifetime.start(i64, i8*)
-declare void @llvm.lifetime.end(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
declare i64 @llvm.objectsize.i64(i8*, i1)
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
@@ -35,8 +35,8 @@ define void @test3(i8* %src) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: ret void
%a = call noalias i8* @malloc(i32 10)
- call void @llvm.lifetime.start(i64 10, i8* %a)
- call void @llvm.lifetime.end(i64 10, i8* %a)
+ call void @llvm.lifetime.start.p0i8(i64 10, i8* %a)
+ call void @llvm.lifetime.end.p0i8(i64 10, i8* %a)
%size = call i64 @llvm.objectsize.i64(i8* %a, i1 true)
store i8 42, i8* %a
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %src, i32 32, i32 1, i1 false)
diff --git a/test/Transforms/InstCombine/max-of-nots.ll b/test/Transforms/InstCombine/max-of-nots.ll
index 96fac5228970..519f1c6a90b0 100644
--- a/test/Transforms/InstCombine/max-of-nots.ll
+++ b/test/Transforms/InstCombine/max-of-nots.ll
@@ -90,6 +90,28 @@ define i32 @max_of_nots(i32 %x, i32 %y) {
ret i32 %smax96
}
+ ; negative test case (i.e. can not simplify) : ABS(MIN(NOT x,y))
+define i32 @abs_of_min_of_not(i32 %x, i32 %y) {
+; CHECK-LABEL: @abs_of_min_of_not(
+; CHECK-NEXT: xor
+; CHECK-NEXT: add
+; CHECK-NEXT: icmp sge
+; CHECK-NEXT: select
+; CHECK-NEXT: icmp sgt
+; CHECK-NEXT: sub
+; CHECK-NEXT: select
+; CHECK-NEXT: ret
+
+ %xord = xor i32 %x, -1
+ %yadd = add i32 %y, 2
+ %cond.i = icmp sge i32 %yadd, %xord
+ %min = select i1 %cond.i, i32 %xord, i32 %yadd
+ %cmp2 = icmp sgt i32 %min, -1
+ %sub = sub i32 0, %min
+ %abs = select i1 %cmp2, i32 %min, i32 %sub
+ ret i32 %abs
+}
+
define <2 x i32> @max_of_nots_vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @max_of_nots_vec(
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <2 x i32> %y, zeroinitializer
diff --git a/test/Transforms/InstCombine/memcmp-1.ll b/test/Transforms/InstCombine/memcmp-1.ll
index f9ff479e3add..96516f44e081 100644
--- a/test/Transforms/InstCombine/memcmp-1.ll
+++ b/test/Transforms/InstCombine/memcmp-1.ll
@@ -14,67 +14,76 @@ declare i32 @memcmp(i8*, i8*, i32)
define i32 @test_simplify1(i8* %mem, i32 %size) {
; CHECK-LABEL: @test_simplify1(
+; CHECK-NEXT: ret i32 0
+;
%ret = call i32 @memcmp(i8* %mem, i8* %mem, i32 %size)
ret i32 %ret
-; CHECK: ret i32 0
}
; Check memcmp(mem1, mem2, 0) -> 0.
define i32 @test_simplify2(i8* %mem1, i8* %mem2) {
; CHECK-LABEL: @test_simplify2(
+; CHECK-NEXT: ret i32 0
+;
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 0)
ret i32 %ret
-; CHECK: ret i32 0
}
;; Check memcmp(mem1, mem2, 1) -> *(unsigned char*)mem1 - *(unsigned char*)mem2.
define i32 @test_simplify3(i8* %mem1, i8* %mem2) {
; CHECK-LABEL: @test_simplify3(
+; CHECK-NEXT: [[LHSC:%.*]] = load i8, i8* %mem1, align 1
+; CHECK-NEXT: [[LHSV:%.*]] = zext i8 [[LHSC]] to i32
+; CHECK-NEXT: [[RHSC:%.*]] = load i8, i8* %mem2, align 1
+; CHECK-NEXT: [[RHSV:%.*]] = zext i8 [[RHSC]] to i32
+; CHECK-NEXT: [[CHARDIFF:%.*]] = sub nsw i32 [[LHSV]], [[RHSV]]
+; CHECK-NEXT: ret i32 [[CHARDIFF]]
+;
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 1)
-; CHECK: [[LOAD1:%[a-z]+]] = load i8, i8* %mem1, align 1
-; CHECK: [[ZEXT1:%[a-z]+]] = zext i8 [[LOAD1]] to i32
-; CHECK: [[LOAD2:%[a-z]+]] = load i8, i8* %mem2, align 1
-; CHECK: [[ZEXT2:%[a-z]+]] = zext i8 [[LOAD2]] to i32
-; CHECK: [[RET:%[a-z]+]] = sub nsw i32 [[ZEXT1]], [[ZEXT2]]
ret i32 %ret
-; CHECK: ret i32 [[RET]]
}
; Check memcmp(mem1, mem2, size) -> cnst, where all arguments are constants.
define i32 @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
+; CHECK-NEXT: ret i32 0
+;
%mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
%mem2 = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
ret i32 %ret
-; CHECK: ret i32 0
}
define i32 @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
+; CHECK-NEXT: ret i32 1
+;
%mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
%mem2 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
ret i32 %ret
-; CHECK: ret i32 1
}
define i32 @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
+; CHECK-NEXT: ret i32 -1
+;
%mem1 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
%mem2 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
ret i32 %ret
-; CHECK: ret i32 -1
}
; Check memcmp(mem1, mem2, 8)==0 -> *(int64_t*)mem1 == *(int64_t*)mem2
define i1 @test_simplify7(i64 %x, i64 %y) {
; CHECK-LABEL: @test_simplify7(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%x.addr = alloca i64, align 8
%y.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
@@ -84,14 +93,15 @@ define i1 @test_simplify7(i64 %x, i64 %y) {
%call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 8)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
-; CHECK: %cmp = icmp eq i64 %x, %y
-; CHECK: ret i1 %cmp
}
; Check memcmp(mem1, mem2, 4)==0 -> *(int32_t*)mem1 == *(int32_t*)mem2
define i1 @test_simplify8(i32 %x, i32 %y) {
; CHECK-LABEL: @test_simplify8(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%x.addr = alloca i32, align 4
%y.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
@@ -101,14 +111,15 @@ define i1 @test_simplify8(i32 %x, i32 %y) {
%call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 4)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
-; CHECK: %cmp = icmp eq i32 %x, %y
-; CHECK: ret i1 %cmp
}
; Check memcmp(mem1, mem2, 2)==0 -> *(int16_t*)mem1 == *(int16_t*)mem2
define i1 @test_simplify9(i16 %x, i16 %y) {
; CHECK-LABEL: @test_simplify9(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
%x.addr = alloca i16, align 2
%y.addr = alloca i16, align 2
store i16 %x, i16* %x.addr, align 2
@@ -118,6 +129,4 @@ define i1 @test_simplify9(i16 %x, i16 %y) {
%call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 2)
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
-; CHECK: %cmp = icmp eq i16 %x, %y
-; CHECK: ret i1 %cmp
}
diff --git a/test/Transforms/InstCombine/memcpy-addrspace.ll b/test/Transforms/InstCombine/memcpy-addrspace.ll
new file mode 100644
index 000000000000..17bc1d08f986
--- /dev/null
+++ b/test/Transforms/InstCombine/memcpy-addrspace.ll
@@ -0,0 +1,85 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@test.data = private unnamed_addr addrspace(2) constant [8 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7], align 4
+
+; CHECK-LABEL: test_load
+; CHECK: %[[GEP:.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 %x
+; CHECK: %{{.*}} = load i32, i32 addrspace(2)* %[[GEP]]
+; CHECK-NOT: alloca
+; CHECK-NOT: call void @llvm.memcpy.p0i8.p2i8.i64
+; CHECK-NOT: addrspacecast
+; CHECK-NOT: load i32, i32*
+define void @test_load(i32 addrspace(1)* %out, i64 %x) {
+entry:
+ %data = alloca [8 x i32], align 4
+ %0 = bitcast [8 x i32]* %data to i8*
+ call void @llvm.memcpy.p0i8.p2i8.i64(i8* %0, i8 addrspace(2)* bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i32 4, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
+ %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
+ store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
+ ret void
+}
+
+; CHECK-LABEL: test_load_bitcast_chain
+; CHECK: %[[GEP:.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 %x
+; CHECK: %{{.*}} = load i32, i32 addrspace(2)* %[[GEP]]
+; CHECK-NOT: alloca
+; CHECK-NOT: call void @llvm.memcpy.p0i8.p2i8.i64
+; CHECK-NOT: addrspacecast
+; CHECK-NOT: load i32, i32*
+define void @test_load_bitcast_chain(i32 addrspace(1)* %out, i64 %x) {
+entry:
+ %data = alloca [8 x i32], align 4
+ %0 = bitcast [8 x i32]* %data to i8*
+ call void @llvm.memcpy.p0i8.p2i8.i64(i8* %0, i8 addrspace(2)* bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i32 4, i1 false)
+ %1 = bitcast i8* %0 to i32*
+ %arrayidx = getelementptr inbounds i32, i32* %1, i64 %x
+ %2 = load i32, i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
+ store i32 %2, i32 addrspace(1)* %arrayidx1, align 4
+ ret void
+}
+
+; CHECK-LABEL: test_call
+; CHECK: alloca
+; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
+; CHECK-NOT: addrspacecast
+; CHECK: call i32 @foo(i32* %{{.*}})
+define void @test_call(i32 addrspace(1)* %out, i64 %x) {
+entry:
+ %data = alloca [8 x i32], align 4
+ %0 = bitcast [8 x i32]* %data to i8*
+ call void @llvm.memcpy.p0i8.p2i8.i64(i8* %0, i8 addrspace(2)* bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i32 4, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
+ %1 = call i32 @foo(i32* %arrayidx)
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
+ store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
+ ret void
+}
+
+; CHECK-LABEL: test_load_and_call
+; CHECK: alloca
+; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
+; CHECK: load i32, i32* %{{.*}}
+; CHECK: call i32 @foo(i32* %{{.*}})
+; CHECK-NOT: addrspacecast
+; CHECK-NOT: load i32, i32 addrspace(2)*
+define void @test_load_and_call(i32 addrspace(1)* %out, i64 %x, i64 %y) {
+entry:
+ %data = alloca [8 x i32], align 4
+ %0 = bitcast [8 x i32]* %data to i8*
+ call void @llvm.memcpy.p0i8.p2i8.i64(i8* %0, i8 addrspace(2)* bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i32 4, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
+ %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
+ store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
+ %2 = call i32 @foo(i32* %arrayidx)
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %y
+ store i32 %2, i32 addrspace(1)* %arrayidx2, align 4
+ ret void
+}
+
+
+declare void @llvm.memcpy.p0i8.p2i8.i64(i8* nocapture writeonly, i8 addrspace(2)* nocapture readonly, i64, i32, i1)
+declare i32 @foo(i32* %x)
diff --git a/test/Transforms/InstCombine/memcpy-from-global.ll b/test/Transforms/InstCombine/memcpy-from-global.ll
index da38087d7397..7c9384d89ba3 100644
--- a/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -126,11 +126,11 @@ define void @test4() {
ret void
}
-declare void @llvm.lifetime.start(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
define void @test5() {
%A = alloca %T
%a = bitcast %T* %A to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %a)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %a)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
call void @baz(i8* byval %a)
; CHECK-LABEL: @test5(
diff --git a/test/Transforms/InstCombine/memcpy-to-load.ll b/test/Transforms/InstCombine/memcpy-to-load.ll
index bcc9e188b965..fe5f0ac657f1 100644
--- a/test/Transforms/InstCombine/memcpy-to-load.ll
+++ b/test/Transforms/InstCombine/memcpy-to-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep "load double"
+; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin8"
@@ -10,4 +10,8 @@ entry:
ret void
}
+; Make sure that the memcpy has been replace with a load/store of i64
+; CHECK: [[TMP:%[0-9]+]] = load i64
+; CHECK: store i64 [[TMP]]
+
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/test/Transforms/InstCombine/memset_chk-1.ll b/test/Transforms/InstCombine/memset_chk-1.ll
index 9d08e96cb49b..79028502b641 100644
--- a/test/Transforms/InstCombine/memset_chk-1.ll
+++ b/test/Transforms/InstCombine/memset_chk-1.ll
@@ -69,7 +69,7 @@ define i32 @test_rauw(i8* %a, i8* %b, i8** %c) {
entry:
%call49 = call i64 @strlen(i8* %a)
%add180 = add i64 %call49, 1
- %yo107 = call i64 @llvm.objectsize.i64.p0i8(i8* %b, i1 false)
+ %yo107 = call i64 @llvm.objectsize.i64.p0i8(i8* %b, i1 false, i1 false)
%call50 = call i8* @__memmove_chk(i8* %b, i8* %a, i64 %add180, i64 %yo107)
; CHECK: %strlen = call i64 @strlen(i8* %b)
; CHECK-NEXT: %strchr2 = getelementptr i8, i8* %b, i64 %strlen
@@ -87,7 +87,7 @@ entry:
declare i8* @__memmove_chk(i8*, i8*, i64, i64)
declare i8* @strrchr(i8*, i32)
declare i64 @strlen(i8* nocapture)
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1)
+declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1)
declare i8* @__memset_chk(i8*, i32, i64, i64)
@@ -100,7 +100,7 @@ entry:
br i1 %cmp, label %cleanup, label %if.end
if.end:
%bc = bitcast i8* %call to float*
- %call2 = tail call i64 @llvm.objectsize.i64.p0i8(i8* nonnull %call, i1 false)
+ %call2 = tail call i64 @llvm.objectsize.i64.p0i8(i8* nonnull %call, i1 false, i1 false)
%call3 = tail call i8* @__memset_chk(i8* nonnull %call, i32 0, i64 %size, i64 %call2) #1
br label %cleanup
cleanup:
@@ -114,7 +114,7 @@ cleanup:
; CHECK-NEXT: br i1 %cmp, label %cleanup, label %if.end
; CHECK: if.end:
; CHECK-NEXT: %bc = bitcast i8* %call to float*
-; CHECK-NEXT: %call2 = tail call i64 @llvm.objectsize.i64.p0i8(i8* nonnull %call, i1 false)
+; CHECK-NEXT: %call2 = tail call i64 @llvm.objectsize.i64.p0i8(i8* nonnull %call, i1 false, i1 false)
; CHECK-NEXT: %call3 = tail call i8* @__memset_chk(i8* nonnull %call, i32 0, i64 %size, i64 %call2)
; CHECK-NEXT: br label %cleanup
; CHECK: cleanup:
diff --git a/test/Transforms/InstCombine/minmax-fold.ll b/test/Transforms/InstCombine/minmax-fold.ll
index bf46cefd8ab3..19a7341fdc28 100644
--- a/test/Transforms/InstCombine/minmax-fold.ll
+++ b/test/Transforms/InstCombine/minmax-fold.ll
@@ -339,14 +339,85 @@ define i32 @test75(i32 %x) {
ret i32 %retval
}
+; The next 4 tests are value clamping with constants:
+; https://llvm.org/bugs/show_bug.cgi?id=31693
+
+; (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
+
+define i32 @clamp_signed1(i32 %x) {
+; CHECK-LABEL: @clamp_signed1(
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 %x, 255
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], i32 %x, i32 255
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[MIN]], 15
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i32 [[MIN]], i32 15
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %cmp2 = icmp slt i32 %x, 255
+ %min = select i1 %cmp2, i32 %x, i32 255
+ %cmp1 = icmp slt i32 %x, 15
+ %r = select i1 %cmp1, i32 15, i32 %min
+ ret i32 %r
+}
+
+; (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
+
+define i32 @clamp_signed2(i32 %x) {
+; CHECK-LABEL: @clamp_signed2(
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 %x, 15
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], i32 %x, i32 15
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[MAX]], 255
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i32 [[MAX]], i32 255
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %cmp2 = icmp sgt i32 %x, 15
+ %max = select i1 %cmp2, i32 %x, i32 15
+ %cmp1 = icmp sgt i32 %x, 255
+ %r = select i1 %cmp1, i32 255, i32 %max
+ ret i32 %r
+}
+
+; (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
+
+define i32 @clamp_unsigned1(i32 %x) {
+; CHECK-LABEL: @clamp_unsigned1(
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 %x, 255
+; CHECK-NEXT: [[MIN:%.*]] = select i1 [[CMP2]], i32 %x, i32 255
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[MIN]], 15
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i32 [[MIN]], i32 15
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %cmp2 = icmp ult i32 %x, 255
+ %min = select i1 %cmp2, i32 %x, i32 255
+ %cmp1 = icmp ult i32 %x, 15
+ %r = select i1 %cmp1, i32 15, i32 %min
+ ret i32 %r
+}
+
+; (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
+
+define i32 @clamp_unsigned2(i32 %x) {
+; CHECK-LABEL: @clamp_unsigned2(
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 %x, 15
+; CHECK-NEXT: [[MAX:%.*]] = select i1 [[CMP2]], i32 %x, i32 15
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[MAX]], 255
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i32 [[MAX]], i32 255
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %cmp2 = icmp ugt i32 %x, 15
+ %max = select i1 %cmp2, i32 %x, i32 15
+ %cmp1 = icmp ugt i32 %x, 255
+ %r = select i1 %cmp1, i32 255, i32 %max
+ ret i32 %r
+}
+
; The next 3 min tests should canonicalize to the same form...and not infinite loop.
define double @PR31751_umin1(i32 %x) {
; CHECK-LABEL: @PR31751_umin1(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 %x, 2147483647
-; CHECK-NEXT: [[CONV1:%.*]] = select i1 [[TMP1]], i32 %x, i32 2147483647
-; CHECK-NEXT: [[TMP2:%.*]] = sitofp i32 [[CONV1]] to double
-; CHECK-NEXT: ret double [[TMP2]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP1]], i32 %x, i32 2147483647
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[SEL]] to double
+; CHECK-NEXT: ret double [[CONV]]
;
%cmp = icmp slt i32 %x, 0
%sel = select i1 %cmp, i32 2147483647, i32 %x
@@ -385,9 +456,9 @@ define double @PR31751_umin3(i32 %x) {
define double @PR31751_umax1(i32 %x) {
; CHECK-LABEL: @PR31751_umax1(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 %x, -2147483648
-; CHECK-NEXT: [[CONV1:%.*]] = select i1 [[TMP1]], i32 %x, i32 -2147483648
-; CHECK-NEXT: [[TMP2:%.*]] = sitofp i32 [[CONV1]] to double
-; CHECK-NEXT: ret double [[TMP2]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP1]], i32 %x, i32 -2147483648
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[SEL]] to double
+; CHECK-NEXT: ret double [[CONV]]
;
%cmp = icmp sgt i32 %x, -1
%sel = select i1 %cmp, i32 2147483648, i32 %x
@@ -420,3 +491,77 @@ define double @PR31751_umax3(i32 %x) {
%conv = sitofp i32 %sel to double
ret double %conv
}
+
+; The icmp/select form a canonical smax, so don't hide that by folding the final bitcast into the select.
+
+define float @bitcast_scalar_smax(float %x, float %y) {
+; CHECK-LABEL: @bitcast_scalar_smax(
+; CHECK-NEXT: [[BCX:%.*]] = bitcast float %x to i32
+; CHECK-NEXT: [[BCY:%.*]] = bitcast float %y to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[BCX]], [[BCY]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 [[BCX]], i32 [[BCY]]
+; CHECK-NEXT: [[BCS:%.*]] = bitcast i32 [[SEL]] to float
+; CHECK-NEXT: ret float [[BCS]]
+;
+ %bcx = bitcast float %x to i32
+ %bcy = bitcast float %y to i32
+ %cmp = icmp sgt i32 %bcx, %bcy
+ %sel = select i1 %cmp, i32 %bcx, i32 %bcy
+ %bcs = bitcast i32 %sel to float
+ ret float %bcs
+}
+
+; FIXME: Create a canonical umax by bitcasting the select.
+
+define float @bitcast_scalar_umax(float %x, float %y) {
+; CHECK-LABEL: @bitcast_scalar_umax(
+; CHECK-NEXT: [[BCX:%.*]] = bitcast float %x to i32
+; CHECK-NEXT: [[BCY:%.*]] = bitcast float %y to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[BCX]], [[BCY]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], float %x, float %y
+; CHECK-NEXT: ret float [[SEL]]
+;
+ %bcx = bitcast float %x to i32
+ %bcy = bitcast float %y to i32
+ %cmp = icmp ugt i32 %bcx, %bcy
+ %sel = select i1 %cmp, float %x, float %y
+ ret float %sel
+}
+
+; PR32306 - https://bugs.llvm.org/show_bug.cgi?id=32306
+; The icmp/select form a canonical smin, so don't hide that by folding the final bitcast into the select.
+
+define <8 x float> @bitcast_vector_smin(<8 x float> %x, <8 x float> %y) {
+; CHECK-LABEL: @bitcast_vector_smin(
+; CHECK-NEXT: [[BCX:%.*]] = bitcast <8 x float> %x to <8 x i32>
+; CHECK-NEXT: [[BCY:%.*]] = bitcast <8 x float> %y to <8 x i32>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i32> [[BCX]], [[BCY]]
+; CHECK-NEXT: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[BCX]], <8 x i32> [[BCY]]
+; CHECK-NEXT: [[BCS:%.*]] = bitcast <8 x i32> [[SEL]] to <8 x float>
+; CHECK-NEXT: ret <8 x float> [[BCS]]
+;
+ %bcx = bitcast <8 x float> %x to <8 x i32>
+ %bcy = bitcast <8 x float> %y to <8 x i32>
+ %cmp = icmp slt <8 x i32> %bcx, %bcy
+ %sel = select <8 x i1> %cmp, <8 x i32> %bcx, <8 x i32> %bcy
+ %bcs = bitcast <8 x i32> %sel to <8 x float>
+ ret <8 x float> %bcs
+}
+
+; FIXME: Create a canonical umin by bitcasting the select.
+
+define <8 x float> @bitcast_vector_umin(<8 x float> %x, <8 x float> %y) {
+; CHECK-LABEL: @bitcast_vector_umin(
+; CHECK-NEXT: [[BCX:%.*]] = bitcast <8 x float> %x to <8 x i32>
+; CHECK-NEXT: [[BCY:%.*]] = bitcast <8 x float> %y to <8 x i32>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i32> [[BCX]], [[BCY]]
+; CHECK-NEXT: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x float> %x, <8 x float> %y
+; CHECK-NEXT: ret <8 x float> [[SEL]]
+;
+ %bcx = bitcast <8 x float> %x to <8 x i32>
+ %bcy = bitcast <8 x float> %y to <8 x i32>
+ %cmp = icmp slt <8 x i32> %bcx, %bcy
+ %sel = select <8 x i1> %cmp, <8 x float> %x, <8 x float> %y
+ ret <8 x float> %sel
+}
+
diff --git a/test/Transforms/InstCombine/narrow-switch.ll b/test/Transforms/InstCombine/narrow-switch.ll
index c391fd2cd332..474bd820c8f8 100644
--- a/test/Transforms/InstCombine/narrow-switch.ll
+++ b/test/Transforms/InstCombine/narrow-switch.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Vary legal integer types in data layout.
-; RUN: opt < %s -instcombine -S -default-data-layout=n32 | FileCheck %s --check-prefix=ALL --check-prefix=CHECK32
-; RUN: opt < %s -instcombine -S -default-data-layout=n32:64 | FileCheck %s --check-prefix=ALL --check-prefix=CHECK64
+; RUN: opt < %s -instcombine -S -data-layout=n32 | FileCheck %s --check-prefix=ALL --check-prefix=CHECK32
+; RUN: opt < %s -instcombine -S -data-layout=n32:64 | FileCheck %s --check-prefix=ALL --check-prefix=CHECK64
; In all cases, the data-layout is irrelevant. We should shrink as much as possible in InstCombine
; and allow the backend to expand as much as needed to ensure optimal codegen for any target.
@@ -164,3 +164,45 @@ case124:
ret i8 5
}
+; Make sure the arithmetic evaluation of the switch
+; condition is evaluated on the original type
+define i32 @trunc32to16(i32 %a0) #0 {
+; ALL-LABEL: @trunc32to16(
+; ALL: switch i16
+; ALL-NEXT: i16 63, label %sw.bb
+; ALL-NEXT: i16 1, label %sw.bb1
+; ALL-NEXT: i16 100, label %sw.bb2
+; ALL-NEXT: ]
+;
+entry:
+ %retval = alloca i32, align 4
+ %xor = xor i32 %a0, 1034460917
+ %shr = lshr i32 %xor, 16
+ %add = add i32 %shr, -917677090
+ switch i32 %add, label %sw.epilog [
+ i32 -917677027, label %sw.bb
+ i32 -917677089, label %sw.bb1
+ i32 -917676990, label %sw.bb2
+ ]
+
+sw.bb: ; preds = %entry
+ store i32 90, i32* %retval, align 4
+ br label %return
+
+sw.bb1: ; preds = %entry
+ store i32 91, i32* %retval, align 4
+ br label %return
+
+sw.bb2: ; preds = %entry
+ store i32 92, i32* %retval, align 4
+ br label %return
+
+sw.epilog: ; preds = %entry
+ store i32 113, i32* %retval, align 4
+ br label %return
+
+return: ; preds = %sw.epilog, %sw.bb2,
+ %rval = load i32, i32* %retval, align 4
+ ret i32 %rval
+}
+
diff --git a/test/Transforms/InstCombine/narrow.ll b/test/Transforms/InstCombine/narrow.ll
index 0e000e8bdbeb..1df400aac973 100644
--- a/test/Transforms/InstCombine/narrow.ll
+++ b/test/Transforms/InstCombine/narrow.ll
@@ -97,3 +97,143 @@ define <2 x i32> @shrink_and_vec(<2 x i33> %a) {
ret <2 x i32> %trunc
}
+; FIXME:
+; This is based on an 'any_of' loop construct.
+; By narrowing the phi and logic op, we simplify away the zext and the final icmp.
+
+define i1 @searchArray1(i32 %needle, i32* %haystack) {
+; CHECK-LABEL: @searchArray1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOUND:%.*]] = phi i8 [ 0, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[INDVAR]] to i64
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, i32* [[HAYSTACK:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, i32* [[IDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[LD]], [[NEEDLE:%.*]]
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[CMP1]] to i8
+; CHECK-NEXT: [[OR]] = or i8 [[FOUND]], [[ZEXT]]
+; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVAR_NEXT]], 1000
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[OR]], 0
+; CHECK-NEXT: ret i1 [[TOBOOL]]
+;
+entry:
+ br label %loop
+
+loop:
+ %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %loop ]
+ %found = phi i8 [ 0, %entry ], [ %or, %loop ]
+ %idx = getelementptr i32, i32* %haystack, i32 %indvar
+ %ld = load i32, i32* %idx
+ %cmp1 = icmp eq i32 %ld, %needle
+ %zext = zext i1 %cmp1 to i8
+ %or = or i8 %found, %zext
+ %indvar.next = add i32 %indvar, 1
+ %exitcond = icmp eq i32 %indvar.next, 1000
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ %tobool = icmp ne i8 %or, 0
+ ret i1 %tobool
+}
+
+; FIXME:
+; This is based on an 'all_of' loop construct.
+; By narrowing the phi and logic op, we simplify away the zext and the final icmp.
+
+define i1 @searchArray2(i32 %hay, i32* %haystack) {
+; CHECK-LABEL: @searchArray2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOUND:%.*]] = phi i8 [ 1, [[ENTRY]] ], [ [[AND:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IDX:%.*]] = getelementptr i32, i32* [[HAYSTACK:%.*]], i64 [[INDVAR]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, i32* [[IDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[LD]], [[HAY:%.*]]
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[CMP1]] to i8
+; CHECK-NEXT: [[AND]] = and i8 [[FOUND]], [[ZEXT]]
+; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 1000
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[AND]], 0
+; CHECK-NEXT: ret i1 [[TOBOOL]]
+;
+entry:
+ br label %loop
+
+loop:
+ %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
+ %found = phi i8 [ 1, %entry ], [ %and, %loop ]
+ %idx = getelementptr i32, i32* %haystack, i64 %indvar
+ %ld = load i32, i32* %idx
+ %cmp1 = icmp eq i32 %ld, %hay
+ %zext = zext i1 %cmp1 to i8
+ %and = and i8 %found, %zext
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp eq i64 %indvar.next, 1000
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ %tobool = icmp ne i8 %and, 0
+ ret i1 %tobool
+}
+
+; FIXME:
+; Narrowing should work with an 'xor' and is not limited to bool types.
+
+define i32 @shrinkLogicAndPhi1(i8 %x, i1 %cond) {
+; CHECK-LABEL: @shrinkLogicAndPhi1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ENDIF:%.*]]
+; CHECK: if:
+; CHECK-NEXT: br label [[ENDIF]]
+; CHECK: endif:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 21, [[ENTRY:%.*]] ], [ 33, [[IF]] ]
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT: [[LOGIC:%.*]] = xor i32 [[PHI]], [[ZEXT]]
+; CHECK-NEXT: ret i32 [[LOGIC]]
+;
+entry:
+ br i1 %cond, label %if, label %endif
+if:
+ br label %endif
+endif:
+ %phi = phi i32 [ 21, %entry], [ 33, %if ]
+ %zext = zext i8 %x to i32
+ %logic = xor i32 %phi, %zext
+ ret i32 %logic
+}
+
+; FIXME:
+; Narrowing should work with an 'xor' and is not limited to bool types.
+; Test that commuting the xor operands does not inhibit optimization.
+
+define i32 @shrinkLogicAndPhi2(i8 %x, i1 %cond) {
+; CHECK-LABEL: @shrinkLogicAndPhi2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ENDIF:%.*]]
+; CHECK: if:
+; CHECK-NEXT: br label [[ENDIF]]
+; CHECK: endif:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 21, [[ENTRY:%.*]] ], [ 33, [[IF]] ]
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT: [[LOGIC:%.*]] = xor i32 [[PHI]], [[ZEXT]]
+; CHECK-NEXT: ret i32 [[LOGIC]]
+;
+entry:
+ br i1 %cond, label %if, label %endif
+if:
+ br label %endif
+endif:
+ %phi = phi i32 [ 21, %entry], [ 33, %if ]
+ %zext = zext i8 %x to i32
+ %logic = xor i32 %zext, %phi
+ ret i32 %logic
+}
+
diff --git a/test/Transforms/InstCombine/not-fcmp.ll b/test/Transforms/InstCombine/not-fcmp.ll
deleted file mode 100644
index 9718e0b905fc..000000000000
--- a/test/Transforms/InstCombine/not-fcmp.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
-; PR1570
-
-define i1 @f(float %X, float %Y) {
-entry:
- %tmp3 = fcmp olt float %X, %Y ; <i1> [#uses=1]
- %toBoolnot5 = xor i1 %tmp3, true ; <i1> [#uses=1]
- ret i1 %toBoolnot5
-; CHECK-LABEL: @f(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: %toBoolnot5 = fcmp uge float %X, %Y
-; CHECK-NEXT: ret i1 %toBoolnot5
-}
diff --git a/test/Transforms/InstCombine/not.ll b/test/Transforms/InstCombine/not.ll
index edb402a125ac..d0c242f65558 100644
--- a/test/Transforms/InstCombine/not.ll
+++ b/test/Transforms/InstCombine/not.ll
@@ -1,61 +1,95 @@
-; This test makes sure that these instructions are properly eliminated.
-;
-
; RUN: opt < %s -instcombine -S | FileCheck %s
-; CHECK-NOT: xor
define i32 @test1(i32 %A) {
- %B = xor i32 %A, -1
- %C = xor i32 %B, -1
- ret i32 %C
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: ret i32 %A
+;
+ %B = xor i32 %A, -1
+ %C = xor i32 %B, -1
+ ret i32 %C
}
-define i1 @test2(i32 %A, i32 %B) {
- ; Can change into setge
- %cond = icmp sle i32 %A, %B
- %Ret = xor i1 %cond, true
- ret i1 %Ret
+define i1 @invert_icmp(i32 %A, i32 %B) {
+; CHECK-LABEL: @invert_icmp(
+; CHECK-NEXT: [[NOT:%.*]] = icmp sgt i32 %A, %B
+; CHECK-NEXT: ret i1 [[NOT]]
+;
+ %cmp = icmp sle i32 %A, %B
+ %not = xor i1 %cmp, true
+ ret i1 %not
+}
+
+; PR1570
+
+define i1 @invert_fcmp(float %X, float %Y) {
+; CHECK-LABEL: @invert_fcmp(
+; CHECK-NEXT: [[NOT:%.*]] = fcmp uge float %X, %Y
+; CHECK-NEXT: ret i1 [[NOT]]
+;
+ %cmp = fcmp olt float %X, %Y
+ %not = xor i1 %cmp, true
+ ret i1 %not
}
; Test that De Morgan's law can be instcombined.
define i32 @test3(i32 %A, i32 %B) {
- %a = xor i32 %A, -1
- %b = xor i32 %B, -1
- %c = and i32 %a, %b
- %d = xor i32 %c, -1
- ret i32 %d
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[C_DEMORGAN:%.*]] = or i32 %A, %B
+; CHECK-NEXT: ret i32 [[C_DEMORGAN]]
+;
+ %a = xor i32 %A, -1
+ %b = xor i32 %B, -1
+ %c = and i32 %a, %b
+ %d = xor i32 %c, -1
+ ret i32 %d
}
; Test that De Morgan's law can work with constants.
define i32 @test4(i32 %A, i32 %B) {
- %a = xor i32 %A, -1
- %c = and i32 %a, 5
- %d = xor i32 %c, -1
- ret i32 %d
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[D1:%.*]] = or i32 %A, -6
+; CHECK-NEXT: ret i32 [[D1]]
+;
+ %a = xor i32 %A, -1
+ %c = and i32 %a, 5
+ %d = xor i32 %c, -1
+ ret i32 %d
}
; Test the mirror of De Morgan's law.
define i32 @test5(i32 %A, i32 %B) {
- %a = xor i32 %A, -1
- %b = xor i32 %B, -1
- %c = or i32 %a, %b
- %d = xor i32 %c, -1
- ret i32 %d
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: [[C_DEMORGAN:%.*]] = and i32 %A, %B
+; CHECK-NEXT: ret i32 [[C_DEMORGAN]]
+;
+ %a = xor i32 %A, -1
+ %b = xor i32 %B, -1
+ %c = or i32 %a, %b
+ %d = xor i32 %c, -1
+ ret i32 %d
}
; PR2298
define zeroext i8 @test6(i32 %a, i32 %b) {
-entry:
- %tmp1not = xor i32 %a, -1
- %tmp2not = xor i32 %b, -1
- %tmp3 = icmp slt i32 %tmp1not, %tmp2not
- %retval67 = zext i1 %tmp3 to i8
- ret i8 %retval67
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 %b, %a
+; CHECK-NEXT: [[RETVAL67:%.*]] = zext i1 [[TMP3]] to i8
+; CHECK-NEXT: ret i8 [[RETVAL67]]
+;
+ %tmp1not = xor i32 %a, -1
+ %tmp2not = xor i32 %b, -1
+ %tmp3 = icmp slt i32 %tmp1not, %tmp2not
+ %retval67 = zext i1 %tmp3 to i8
+ ret i8 %retval67
}
define <2 x i1> @test7(<2 x i32> %A, <2 x i32> %B) {
- %cond = icmp sle <2 x i32> %A, %B
- %Ret = xor <2 x i1> %cond, <i1 true, i1 true>
- ret <2 x i1> %Ret
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[RET:%.*]] = icmp sgt <2 x i32> %A, %B
+; CHECK-NEXT: ret <2 x i1> [[RET]]
+;
+ %cond = icmp sle <2 x i32> %A, %B
+ %Ret = xor <2 x i1> %cond, <i1 true, i1 true>
+ ret <2 x i1> %Ret
}
diff --git a/test/Transforms/InstCombine/nvvm-intrins.ll b/test/Transforms/InstCombine/nvvm-intrins.ll
new file mode 100644
index 000000000000..cb65b8fdc547
--- /dev/null
+++ b/test/Transforms/InstCombine/nvvm-intrins.ll
@@ -0,0 +1,471 @@
+; Check that nvvm intrinsics get simplified to target-generic intrinsics where
+; possible.
+;
+; We run this test twice; once with ftz on, and again with ftz off. Behold the
+; hackery:
+
+; RUN: cat %s > %t.ftz
+; RUN: echo 'attributes #0 = { "nvptx-f32ftz" = "true" }' >> %t.ftz
+; RUN: opt < %t.ftz -instcombine -S | FileCheck %s --check-prefix=CHECK --check-prefix=FTZ
+
+; RUN: cat %s > %t.noftz
+; RUN: echo 'attributes #0 = { "nvptx-f32ftz" = "false" }' >> %t.noftz
+; RUN: opt < %t.noftz -instcombine -S | FileCheck %s --check-prefix=CHECK --check-prefix=NOFTZ
+
+; We handle nvvm intrinsics with ftz variants as follows:
+; - If the module is in ftz mode, the ftz variant is transformed into the
+; regular llvm intrinsic, and the non-ftz variant is left alone.
+; - If the module is not in ftz mode, it's the reverse: Only the non-ftz
+; variant is transformed, and the ftz variant is left alone.
+
+; Check NVVM intrinsics that map directly to LLVM target-generic intrinsics.
+
+; CHECK-LABEL: @ceil_double
+define double @ceil_double(double %a) #0 {
+; CHECK: call double @llvm.ceil.f64
+ %ret = call double @llvm.nvvm.ceil.d(double %a)
+ ret double %ret
+}
+; CHECK-LABEL: @ceil_float
+define float @ceil_float(float %a) #0 {
+; NOFTZ: call float @llvm.ceil.f32
+; FTZ: call float @llvm.nvvm.ceil.f
+ %ret = call float @llvm.nvvm.ceil.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @ceil_float_ftz
+define float @ceil_float_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.ceil.ftz.f
+; FTZ: call float @llvm.ceil.f32
+ %ret = call float @llvm.nvvm.ceil.ftz.f(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: @fabs_double
+define double @fabs_double(double %a) #0 {
+; CHECK: call double @llvm.fabs.f64
+ %ret = call double @llvm.nvvm.fabs.d(double %a)
+ ret double %ret
+}
+; CHECK-LABEL: @fabs_float
+define float @fabs_float(float %a) #0 {
+; NOFTZ: call float @llvm.fabs.f32
+; FTZ: call float @llvm.nvvm.fabs.f
+ %ret = call float @llvm.nvvm.fabs.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @fabs_float_ftz
+define float @fabs_float_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.fabs.ftz.f
+; FTZ: call float @llvm.fabs.f32
+ %ret = call float @llvm.nvvm.fabs.ftz.f(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: @floor_double
+define double @floor_double(double %a) #0 {
+; CHECK: call double @llvm.floor.f64
+ %ret = call double @llvm.nvvm.floor.d(double %a)
+ ret double %ret
+}
+; CHECK-LABEL: @floor_float
+define float @floor_float(float %a) #0 {
+; NOFTZ: call float @llvm.floor.f32
+; FTZ: call float @llvm.nvvm.floor.f
+ %ret = call float @llvm.nvvm.floor.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @floor_float_ftz
+define float @floor_float_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.floor.ftz.f
+; FTZ: call float @llvm.floor.f32
+ %ret = call float @llvm.nvvm.floor.ftz.f(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: @fma_double
+define double @fma_double(double %a, double %b, double %c) #0 {
+; CHECK: call double @llvm.fma.f64
+ %ret = call double @llvm.nvvm.fma.rn.d(double %a, double %b, double %c)
+ ret double %ret
+}
+; CHECK-LABEL: @fma_float
+define float @fma_float(float %a, float %b, float %c) #0 {
+; NOFTZ: call float @llvm.fma.f32
+; FTZ: call float @llvm.nvvm.fma.rn.f
+ %ret = call float @llvm.nvvm.fma.rn.f(float %a, float %b, float %c)
+ ret float %ret
+}
+; CHECK-LABEL: @fma_float_ftz
+define float @fma_float_ftz(float %a, float %b, float %c) #0 {
+; NOFTZ: call float @llvm.nvvm.fma.rn.ftz.f
+; FTZ: call float @llvm.fma.f32
+ %ret = call float @llvm.nvvm.fma.rn.ftz.f(float %a, float %b, float %c)
+ ret float %ret
+}
+
+; CHECK-LABEL: @fmax_double
+define double @fmax_double(double %a, double %b) #0 {
+; CHECK: call double @llvm.maxnum.f64
+ %ret = call double @llvm.nvvm.fmax.d(double %a, double %b)
+ ret double %ret
+}
+; CHECK-LABEL: @fmax_float
+define float @fmax_float(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.maxnum.f32
+; FTZ: call float @llvm.nvvm.fmax.f
+ %ret = call float @llvm.nvvm.fmax.f(float %a, float %b)
+ ret float %ret
+}
+; CHECK-LABEL: @fmax_float_ftz
+define float @fmax_float_ftz(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.nvvm.fmax.ftz.f
+; FTZ: call float @llvm.maxnum.f32
+ %ret = call float @llvm.nvvm.fmax.ftz.f(float %a, float %b)
+ ret float %ret
+}
+
+; CHECK-LABEL: @fmin_double
+define double @fmin_double(double %a, double %b) #0 {
+; CHECK: call double @llvm.minnum.f64
+ %ret = call double @llvm.nvvm.fmin.d(double %a, double %b)
+ ret double %ret
+}
+; CHECK-LABEL: @fmin_float
+define float @fmin_float(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.minnum.f32
+; FTZ: call float @llvm.nvvm.fmin.f
+ %ret = call float @llvm.nvvm.fmin.f(float %a, float %b)
+ ret float %ret
+}
+; CHECK-LABEL: @fmin_float_ftz
+define float @fmin_float_ftz(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.nvvm.fmin.ftz.f
+; FTZ: call float @llvm.minnum.f32
+ %ret = call float @llvm.nvvm.fmin.ftz.f(float %a, float %b)
+ ret float %ret
+}
+
+; CHECK-LABEL: @round_double
+define double @round_double(double %a) #0 {
+; CHECK: call double @llvm.round.f64
+ %ret = call double @llvm.nvvm.round.d(double %a)
+ ret double %ret
+}
+; CHECK-LABEL: @round_float
+define float @round_float(float %a) #0 {
+; NOFTZ: call float @llvm.round.f32
+; FTZ: call float @llvm.nvvm.round.f
+ %ret = call float @llvm.nvvm.round.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @round_float_ftz
+define float @round_float_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.round.ftz.f
+; FTZ: call float @llvm.round.f32
+ %ret = call float @llvm.nvvm.round.ftz.f(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: @trunc_double
+define double @trunc_double(double %a) #0 {
+; CHECK: call double @llvm.trunc.f64
+ %ret = call double @llvm.nvvm.trunc.d(double %a)
+ ret double %ret
+}
+; CHECK-LABEL: @trunc_float
+define float @trunc_float(float %a) #0 {
+; NOFTZ: call float @llvm.trunc.f32
+; FTZ: call float @llvm.nvvm.trunc.f
+ %ret = call float @llvm.nvvm.trunc.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @trunc_float_ftz
+define float @trunc_float_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.trunc.ftz.f
+; FTZ: call float @llvm.trunc.f32
+ %ret = call float @llvm.nvvm.trunc.ftz.f(float %a)
+ ret float %ret
+}
+
+; Check NVVM intrinsics that correspond to LLVM cast operations.
+
+; CHECK-LABEL: @test_d2i
+define i32 @test_d2i(double %a) #0 {
+; CHECK: fptosi double %a to i32
+ %ret = call i32 @llvm.nvvm.d2i.rz(double %a)
+ ret i32 %ret
+}
+; CHECK-LABEL: @test_f2i
+define i32 @test_f2i(float %a) #0 {
+; CHECK: fptosi float %a to i32
+ %ret = call i32 @llvm.nvvm.f2i.rz(float %a)
+ ret i32 %ret
+}
+; CHECK-LABEL: @test_d2ll
+define i64 @test_d2ll(double %a) #0 {
+; CHECK: fptosi double %a to i64
+ %ret = call i64 @llvm.nvvm.d2ll.rz(double %a)
+ ret i64 %ret
+}
+; CHECK-LABEL: @test_f2ll
+define i64 @test_f2ll(float %a) #0 {
+; CHECK: fptosi float %a to i64
+ %ret = call i64 @llvm.nvvm.f2ll.rz(float %a)
+ ret i64 %ret
+}
+; CHECK-LABEL: @test_d2ui
+define i32 @test_d2ui(double %a) #0 {
+; CHECK: fptoui double %a to i32
+ %ret = call i32 @llvm.nvvm.d2ui.rz(double %a)
+ ret i32 %ret
+}
+; CHECK-LABEL: @test_f2ui
+define i32 @test_f2ui(float %a) #0 {
+; CHECK: fptoui float %a to i32
+ %ret = call i32 @llvm.nvvm.f2ui.rz(float %a)
+ ret i32 %ret
+}
+; CHECK-LABEL: @test_d2ull
+define i64 @test_d2ull(double %a) #0 {
+; CHECK: fptoui double %a to i64
+ %ret = call i64 @llvm.nvvm.d2ull.rz(double %a)
+ ret i64 %ret
+}
+; CHECK-LABEL: @test_f2ull
+define i64 @test_f2ull(float %a) #0 {
+; CHECK: fptoui float %a to i64
+ %ret = call i64 @llvm.nvvm.f2ull.rz(float %a)
+ ret i64 %ret
+}
+
+; CHECK-LABEL: @test_i2d
+define double @test_i2d(i32 %a) #0 {
+; CHECK: sitofp i32 %a to double
+ %ret = call double @llvm.nvvm.i2d.rz(i32 %a)
+ ret double %ret
+}
+; CHECK-LABEL: @test_i2f
+define float @test_i2f(i32 %a) #0 {
+; CHECK: sitofp i32 %a to float
+ %ret = call float @llvm.nvvm.i2f.rz(i32 %a)
+ ret float %ret
+}
+; CHECK-LABEL: @test_ll2d
+define double @test_ll2d(i64 %a) #0 {
+; CHECK: sitofp i64 %a to double
+ %ret = call double @llvm.nvvm.ll2d.rz(i64 %a)
+ ret double %ret
+}
+; CHECK-LABEL: @test_ll2f
+define float @test_ll2f(i64 %a) #0 {
+; CHECK: sitofp i64 %a to float
+ %ret = call float @llvm.nvvm.ll2f.rz(i64 %a)
+ ret float %ret
+}
+; CHECK-LABEL: @test_ui2d
+define double @test_ui2d(i32 %a) #0 {
+; CHECK: uitofp i32 %a to double
+ %ret = call double @llvm.nvvm.ui2d.rz(i32 %a)
+ ret double %ret
+}
+; CHECK-LABEL: @test_ui2f
+define float @test_ui2f(i32 %a) #0 {
+; CHECK: uitofp i32 %a to float
+ %ret = call float @llvm.nvvm.ui2f.rz(i32 %a)
+ ret float %ret
+}
+; CHECK-LABEL: @test_ull2d
+define double @test_ull2d(i64 %a) #0 {
+; CHECK: uitofp i64 %a to double
+ %ret = call double @llvm.nvvm.ull2d.rz(i64 %a)
+ ret double %ret
+}
+; CHECK-LABEL: @test_ull2f
+define float @test_ull2f(i64 %a) #0 {
+; CHECK: uitofp i64 %a to float
+ %ret = call float @llvm.nvvm.ull2f.rz(i64 %a)
+ ret float %ret
+}
+
+; Check NVVM intrinsics that map to LLVM binary operations.
+
+; CHECK-LABEL: @test_add_rn_d
+define double @test_add_rn_d(double %a, double %b) #0 {
+; CHECK: fadd
+ %ret = call double @llvm.nvvm.add.rn.d(double %a, double %b)
+ ret double %ret
+}
+; CHECK-LABEL: @test_add_rn_f
+define float @test_add_rn_f(float %a, float %b) #0 {
+; NOFTZ: fadd
+; FTZ: call float @llvm.nvvm.add.rn.f
+ %ret = call float @llvm.nvvm.add.rn.f(float %a, float %b)
+ ret float %ret
+}
+; CHECK-LABEL: @test_add_rn_f_ftz
+define float @test_add_rn_f_ftz(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.nvvm.add.rn.f
+; FTZ: fadd
+ %ret = call float @llvm.nvvm.add.rn.ftz.f(float %a, float %b)
+ ret float %ret
+}
+
+; CHECK-LABEL: @test_mul_rn_d
+define double @test_mul_rn_d(double %a, double %b) #0 {
+; CHECK: fmul
+ %ret = call double @llvm.nvvm.mul.rn.d(double %a, double %b)
+ ret double %ret
+}
+; CHECK-LABEL: @test_mul_rn_f
+define float @test_mul_rn_f(float %a, float %b) #0 {
+; NOFTZ: fmul
+; FTZ: call float @llvm.nvvm.mul.rn.f
+ %ret = call float @llvm.nvvm.mul.rn.f(float %a, float %b)
+ ret float %ret
+}
+; CHECK-LABEL: @test_mul_rn_f_ftz
+define float @test_mul_rn_f_ftz(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.nvvm.mul.rn.f
+; FTZ: fmul
+ %ret = call float @llvm.nvvm.mul.rn.ftz.f(float %a, float %b)
+ ret float %ret
+}
+
+; CHECK-LABEL: @test_div_rn_d
+define double @test_div_rn_d(double %a, double %b) #0 {
+; CHECK: fdiv
+ %ret = call double @llvm.nvvm.div.rn.d(double %a, double %b)
+ ret double %ret
+}
+; CHECK-LABEL: @test_div_rn_f
+define float @test_div_rn_f(float %a, float %b) #0 {
+; NOFTZ: fdiv
+; FTZ: call float @llvm.nvvm.div.rn.f
+ %ret = call float @llvm.nvvm.div.rn.f(float %a, float %b)
+ ret float %ret
+}
+; CHECK-LABEL: @test_div_rn_f_ftz
+define float @test_div_rn_f_ftz(float %a, float %b) #0 {
+; NOFTZ: call float @llvm.nvvm.div.rn.f
+; FTZ: fdiv
+ %ret = call float @llvm.nvvm.div.rn.ftz.f(float %a, float %b)
+ ret float %ret
+}
+
+; Check NVVM intrinsics that require us to emit custom IR.
+
+; CHECK-LABEL: @test_rcp_rn_f
+define float @test_rcp_rn_f(float %a) #0 {
+; NOFTZ: fdiv float 1.0{{.*}} %a
+; FTZ: call float @llvm.nvvm.rcp.rn.f
+ %ret = call float @llvm.nvvm.rcp.rn.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @test_rcp_rn_f_ftz
+define float @test_rcp_rn_f_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.rcp.rn.f
+; FTZ: fdiv float 1.0{{.*}} %a
+ %ret = call float @llvm.nvvm.rcp.rn.ftz.f(float %a)
+ ret float %ret
+}
+
+; CHECK-LABEL: @test_sqrt_rn_d
+define double @test_sqrt_rn_d(double %a) #0 {
+; CHECK: call double @llvm.sqrt.f64(double %a)
+ %ret = call double @llvm.nvvm.sqrt.rn.d(double %a)
+ ret double %ret
+}
+; nvvm.sqrt.f is a special case: It goes to a llvm.sqrt.f
+; CHECK-LABEL: @test_sqrt_f
+define float @test_sqrt_f(float %a) #0 {
+; CHECK: call float @llvm.sqrt.f32(float %a)
+ %ret = call float @llvm.nvvm.sqrt.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @test_sqrt_rn_f
+define float @test_sqrt_rn_f(float %a) #0 {
+; NOFTZ: call float @llvm.sqrt.f32(float %a)
+; FTZ: call float @llvm.nvvm.sqrt.rn.f
+ %ret = call float @llvm.nvvm.sqrt.rn.f(float %a)
+ ret float %ret
+}
+; CHECK-LABEL: @test_sqrt_rn_f_ftz
+define float @test_sqrt_rn_f_ftz(float %a) #0 {
+; NOFTZ: call float @llvm.nvvm.sqrt.rn.f
+; FTZ: call float @llvm.sqrt.f32(float %a)
+ %ret = call float @llvm.nvvm.sqrt.rn.ftz.f(float %a)
+ ret float %ret
+}
+
+declare double @llvm.nvvm.add.rn.d(double, double)
+declare float @llvm.nvvm.add.rn.f(float, float)
+declare float @llvm.nvvm.add.rn.ftz.f(float, float)
+declare double @llvm.nvvm.ceil.d(double)
+declare float @llvm.nvvm.ceil.f(float)
+declare float @llvm.nvvm.ceil.ftz.f(float)
+declare float @llvm.nvvm.d2f.rm(double)
+declare float @llvm.nvvm.d2f.rm.ftz(double)
+declare float @llvm.nvvm.d2f.rp(double)
+declare float @llvm.nvvm.d2f.rp.ftz(double)
+declare float @llvm.nvvm.d2f.rz(double)
+declare float @llvm.nvvm.d2f.rz.ftz(double)
+declare i32 @llvm.nvvm.d2i.rz(double)
+declare i64 @llvm.nvvm.d2ll.rz(double)
+declare i32 @llvm.nvvm.d2ui.rz(double)
+declare i64 @llvm.nvvm.d2ull.rz(double)
+declare double @llvm.nvvm.div.rn.d(double, double)
+declare float @llvm.nvvm.div.rn.f(float, float)
+declare float @llvm.nvvm.div.rn.ftz.f(float, float)
+declare i16 @llvm.nvvm.f2h.rz(float)
+declare i16 @llvm.nvvm.f2h.rz.ftz(float)
+declare i32 @llvm.nvvm.f2i.rz(float)
+declare i32 @llvm.nvvm.f2i.rz.ftz(float)
+declare i64 @llvm.nvvm.f2ll.rz(float)
+declare i64 @llvm.nvvm.f2ll.rz.ftz(float)
+declare i32 @llvm.nvvm.f2ui.rz(float)
+declare i32 @llvm.nvvm.f2ui.rz.ftz(float)
+declare i64 @llvm.nvvm.f2ull.rz(float)
+declare i64 @llvm.nvvm.f2ull.rz.ftz(float)
+declare double @llvm.nvvm.fabs.d(double)
+declare float @llvm.nvvm.fabs.f(float)
+declare float @llvm.nvvm.fabs.ftz.f(float)
+declare double @llvm.nvvm.floor.d(double)
+declare float @llvm.nvvm.floor.f(float)
+declare float @llvm.nvvm.floor.ftz.f(float)
+declare double @llvm.nvvm.fma.rn.d(double, double, double)
+declare float @llvm.nvvm.fma.rn.f(float, float, float)
+declare float @llvm.nvvm.fma.rn.ftz.f(float, float, float)
+declare double @llvm.nvvm.fmax.d(double, double)
+declare float @llvm.nvvm.fmax.f(float, float)
+declare float @llvm.nvvm.fmax.ftz.f(float, float)
+declare double @llvm.nvvm.fmin.d(double, double)
+declare float @llvm.nvvm.fmin.f(float, float)
+declare float @llvm.nvvm.fmin.ftz.f(float, float)
+declare double @llvm.nvvm.i2d.rz(i32)
+declare float @llvm.nvvm.i2f.rz(i32)
+declare double @llvm.nvvm.ll2d.rz(i64)
+declare float @llvm.nvvm.ll2f.rz(i64)
+declare double @llvm.nvvm.lohi.i2d(i32, i32)
+declare double @llvm.nvvm.mul.rn.d(double, double)
+declare float @llvm.nvvm.mul.rn.f(float, float)
+declare float @llvm.nvvm.mul.rn.ftz.f(float, float)
+declare double @llvm.nvvm.rcp.rm.d(double)
+declare double @llvm.nvvm.rcp.rn.d(double)
+declare float @llvm.nvvm.rcp.rn.f(float)
+declare float @llvm.nvvm.rcp.rn.ftz.f(float)
+declare double @llvm.nvvm.round.d(double)
+declare float @llvm.nvvm.round.f(float)
+declare float @llvm.nvvm.round.ftz.f(float)
+declare float @llvm.nvvm.sqrt.f(float)
+declare double @llvm.nvvm.sqrt.rn.d(double)
+declare float @llvm.nvvm.sqrt.rn.f(float)
+declare float @llvm.nvvm.sqrt.rn.ftz.f(float)
+declare double @llvm.nvvm.trunc.d(double)
+declare float @llvm.nvvm.trunc.f(float)
+declare float @llvm.nvvm.trunc.ftz.f(float)
+declare double @llvm.nvvm.ui2d.rz(i32)
+declare float @llvm.nvvm.ui2f.rn(i32)
+declare float @llvm.nvvm.ui2f.rz(i32)
+declare double @llvm.nvvm.ull2d.rz(i64)
+declare float @llvm.nvvm.ull2f.rz(i64)
diff --git a/test/Transforms/InstCombine/objsize.ll b/test/Transforms/InstCombine/objsize.ll
index 2af391f907cc..5c0a36f5feaa 100644
--- a/test/Transforms/InstCombine/objsize.ll
+++ b/test/Transforms/InstCombine/objsize.ll
@@ -8,7 +8,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define i32 @foo() nounwind {
; CHECK-LABEL: @foo(
; CHECK-NEXT: ret i32 60
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
ret i32 %1
}
@@ -16,7 +16,7 @@ define i8* @bar() nounwind {
; CHECK-LABEL: @bar(
entry:
%retval = alloca i8*
- %0 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
+ %0 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
%cmp = icmp ne i32 %0, -1
; CHECK: br i1 true
br i1 %cmp, label %cond.true, label %cond.false
@@ -33,7 +33,7 @@ cond.false:
define i32 @f() nounwind {
; CHECK-LABEL: @f(
; CHECK-NEXT: ret i32 0
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr ([60 x i8], [60 x i8]* @a, i32 1, i32 0), i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr ([60 x i8], [60 x i8]* @a, i32 1, i32 0), i1 false, i1 false)
ret i32 %1
}
@@ -42,7 +42,7 @@ define i32 @f() nounwind {
define i1 @baz() nounwind {
; CHECK-LABEL: @baz(
; CHECK-NEXT: objectsize
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([0 x i8], [0 x i8]* @window, i32 0, i32 0), i1 false)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([0 x i8], [0 x i8]* @window, i32 0, i32 0), i1 false, i1 false)
%2 = icmp eq i32 %1, -1
ret i1 %2
}
@@ -51,7 +51,7 @@ define void @test1(i8* %q, i32 %x) nounwind noinline {
; CHECK-LABEL: @test1(
; CHECK: objectsize.i32.p0i8
entry:
- %0 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([0 x i8], [0 x i8]* @window, i32 0, i32 10), i1 false) ; <i64> [#uses=1]
+ %0 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([0 x i8], [0 x i8]* @window, i32 0, i32 10), i1 false, i1 false) ; <i64> [#uses=1]
%1 = icmp eq i32 %0, -1 ; <i1> [#uses=1]
br i1 %1, label %"47", label %"46"
@@ -67,7 +67,7 @@ entry:
define i32 @test2() nounwind {
; CHECK-LABEL: @test2(
; CHECK-NEXT: ret i32 34
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr (i8, i8* bitcast ([9 x i32]* @.str5 to i8*), i32 2), i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr (i8, i8* bitcast ([9 x i32]* @.str5 to i8*), i32 2), i1 false, i1 false)
ret i32 %1
}
@@ -76,7 +76,9 @@ define i32 @test2() nounwind {
declare i8* @__memcpy_chk(i8*, i8*, i32, i32) nounwind
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1) nounwind readonly
+
+declare i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)*, i1, i1) nounwind readonly
declare i8* @__inline_memcpy_chk(i8*, i8*, i32) nounwind inlinehint
@@ -88,7 +90,7 @@ entry:
bb11:
%0 = getelementptr inbounds float, float* getelementptr inbounds ([480 x float], [480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
%1 = bitcast float* %0 to i8* ; <i8*> [#uses=1]
- %2 = call i32 @llvm.objectsize.i32.p0i8(i8* %1, i1 false) ; <i32> [#uses=1]
+ %2 = call i32 @llvm.objectsize.i32.p0i8(i8* %1, i1 false, i1 false) ; <i32> [#uses=1]
%3 = call i8* @__memcpy_chk(i8* undef, i8* undef, i32 512, i32 %2) nounwind ; <i8*> [#uses=0]
; CHECK: unreachable
unreachable
@@ -110,7 +112,7 @@ define i32 @test4(i8** %esc) nounwind ssp {
entry:
%0 = alloca %struct.data, align 8
%1 = bitcast %struct.data* %0 to i8*
- %2 = call i32 @llvm.objectsize.i32.p0i8(i8* %1, i1 false) nounwind
+ %2 = call i32 @llvm.objectsize.i32.p0i8(i8* %1, i1 false, i1 false) nounwind
; CHECK-NOT: @llvm.objectsize
; CHECK: @llvm.memset.p0i8.i32(i8* %1, i8 0, i32 1824, i32 8, i1 false)
%3 = call i8* @__memset_chk(i8* %1, i32 0, i32 1824, i32 %2) nounwind
@@ -125,7 +127,7 @@ define i8* @test5(i32 %n) nounwind ssp {
; CHECK-LABEL: @test5(
entry:
%0 = tail call noalias i8* @malloc(i32 20) nounwind
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %0, i1 false)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %0, i1 false, i1 false)
%2 = load i8*, i8** @s, align 8
; CHECK-NOT: @llvm.objectsize
; CHECK: @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 10, i32 1, i1 false)
@@ -137,7 +139,7 @@ define void @test6(i32 %n) nounwind ssp {
; CHECK-LABEL: @test6(
entry:
%0 = tail call noalias i8* @malloc(i32 20) nounwind
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %0, i1 false)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %0, i1 false, i1 false)
%2 = load i8*, i8** @s, align 8
; CHECK-NOT: @llvm.objectsize
; CHECK: @__memcpy_chk(i8* %0, i8* %1, i32 30, i32 20)
@@ -154,7 +156,7 @@ define i32 @test7(i8** %esc) {
%alloc = call noalias i8* @malloc(i32 48) nounwind
store i8* %alloc, i8** %esc
%gep = getelementptr inbounds i8, i8* %alloc, i32 16
- %objsize = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 false) nounwind readonly
+ %objsize = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 false, i1 false) nounwind readonly
; CHECK: ret i32 32
ret i32 %objsize
}
@@ -166,7 +168,7 @@ define i32 @test8(i8** %esc) {
%alloc = call noalias i8* @calloc(i32 5, i32 7) nounwind
store i8* %alloc, i8** %esc
%gep = getelementptr inbounds i8, i8* %alloc, i32 5
- %objsize = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 false) nounwind readonly
+ %objsize = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 false, i1 false) nounwind readonly
; CHECK: ret i32 30
ret i32 %objsize
}
@@ -178,7 +180,7 @@ declare noalias i8* @strndup(i8* nocapture, i32) nounwind
define i32 @test9(i8** %esc) {
%call = tail call i8* @strdup(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0)) nounwind
store i8* %call, i8** %esc, align 8
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true, i1 false)
; CHECK: ret i32 8
ret i32 %1
}
@@ -187,7 +189,7 @@ define i32 @test9(i8** %esc) {
define i32 @test10(i8** %esc) {
%call = tail call i8* @strndup(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i32 3) nounwind
store i8* %call, i8** %esc, align 8
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true, i1 false)
; CHECK: ret i32 4
ret i32 %1
}
@@ -196,7 +198,7 @@ define i32 @test10(i8** %esc) {
define i32 @test11(i8** %esc) {
%call = tail call i8* @strndup(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i32 7) nounwind
store i8* %call, i8** %esc, align 8
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true, i1 false)
; CHECK: ret i32 8
ret i32 %1
}
@@ -205,7 +207,7 @@ define i32 @test11(i8** %esc) {
define i32 @test12(i8** %esc) {
%call = tail call i8* @strndup(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i32 8) nounwind
store i8* %call, i8** %esc, align 8
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true, i1 false)
; CHECK: ret i32 8
ret i32 %1
}
@@ -214,7 +216,7 @@ define i32 @test12(i8** %esc) {
define i32 @test13(i8** %esc) {
%call = tail call i8* @strndup(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i32 57) nounwind
store i8* %call, i8** %esc, align 8
- %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true)
+ %1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %call, i1 true, i1 false)
; CHECK: ret i32 8
ret i32 %1
}
@@ -225,7 +227,7 @@ define i32 @test13(i8** %esc) {
; CHECK-NEXT: ret i32 60
define i32 @test18() {
%bc = bitcast [60 x i8]* @globalalias to i8*
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false, i1 false)
ret i32 %1
}
@@ -235,7 +237,67 @@ define i32 @test18() {
; CHECK: llvm.objectsize
define i32 @test19() {
%bc = bitcast [60 x i8]* @globalalias2 to i8*
- %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false)
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* %bc, i1 false, i1 false)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test20(
+; CHECK: ret i32 0
+define i32 @test20() {
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 false, i1 false)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test21(
+; CHECK: ret i32 0
+define i32 @test21() {
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 true, i1 false)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test22(
+; CHECK: llvm.objectsize
+define i32 @test22() {
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 false, i1 true)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test23(
+; CHECK: llvm.objectsize
+define i32 @test23() {
+ %1 = call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 true, i1 true)
ret i32 %1
}
+; 1 is an arbitrary non-zero address space.
+; CHECK-LABEL: @test24(
+; CHECK: ret i32 0
+define i32 @test24() {
+ %1 = call i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)* null, i1 false,
+ i1 false)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test25(
+; CHECK: ret i32 0
+define i32 @test25() {
+ %1 = call i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)* null, i1 true,
+ i1 false)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test26(
+; CHECK: ret i32 0
+define i32 @test26() {
+ %1 = call i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)* null, i1 false,
+ i1 true)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @test27(
+; CHECK: ret i32 0
+define i32 @test27() {
+ %1 = call i32 @llvm.objectsize.i32.p1i8(i8 addrspace(1)* null, i1 true,
+ i1 true)
+ ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
index 2c9088428bde..41e6d2d1f827 100644
--- a/test/Transforms/InstCombine/or.ll
+++ b/test/Transforms/InstCombine/or.ll
@@ -207,19 +207,6 @@ define <2 x i1> @test18vec(<2 x i32> %A) {
ret <2 x i1> %D
}
-define i1 @test19(i32 %A) {
-; CHECK-LABEL: @test19(
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 %A, 1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 51
-; CHECK-NEXT: ret i1 [[TMP2]]
-;
- %B = icmp eq i32 %A, 50
- %C = icmp eq i32 %A, 51
- ;; (A&-2) == 50
- %D = or i1 %B, %C
- ret i1 %D
-}
-
define i32 @test20(i32 %x) {
; CHECK-LABEL: @test20(
; CHECK-NEXT: ret i32 %x
@@ -490,7 +477,7 @@ define i32 @orsext_to_sel_multi_use(i32 %x, i1 %y) {
; CHECK-LABEL: @orsext_to_sel_multi_use(
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 %y to i32
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SEXT]], %x
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SEXT]], [[OR]]
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[OR]], [[SEXT]]
; CHECK-NEXT: ret i32 [[ADD]]
;
%sext = sext i1 %y to i32
@@ -521,7 +508,7 @@ define <2 x i132> @orsext_to_sel_vec_swap(<2 x i132> %x, <2 x i1> %y) {
define i32 @test39(i32 %a, i32 %b) {
; CHECK-LABEL: @test39(
-; CHECK-NEXT: [[OR:%.*]] = or i32 %a, %b
+; CHECK-NEXT: [[OR:%.*]] = or i32 %b, %a
; CHECK-NEXT: ret i32 [[OR]]
;
%xor = xor i32 %a, -1
@@ -542,6 +529,42 @@ define i32 @test40(i32 %a, i32 %b) {
ret i32 %or
}
+define i32 @test40b(i32 %a, i32 %b) {
+; CHECK-LABEL: @test40b(
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %a, -1
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], %b
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %and = and i32 %b, %a
+ %xor = xor i32 %a, -1
+ %or = or i32 %and, %xor
+ ret i32 %or
+}
+
+define i32 @test40c(i32 %a, i32 %b) {
+; CHECK-LABEL: @test40c(
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %a, -1
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], %b
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %and = and i32 %b, %a
+ %xor = xor i32 %a, -1
+ %or = or i32 %xor, %and
+ ret i32 %or
+}
+
+define i32 @test40d(i32 %a, i32 %b) {
+; CHECK-LABEL: @test40d(
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 %a, -1
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], %b
+; CHECK-NEXT: ret i32 [[OR]]
+;
+ %and = and i32 %a, %b
+ %xor = xor i32 %a, -1
+ %or = or i32 %xor, %and
+ ret i32 %or
+}
+
define i32 @test41(i32 %a, i32 %b) {
; CHECK-LABEL: @test41(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 %a, -1
@@ -701,3 +724,138 @@ define i1 @test48(i64 %x, i1 %b) {
%3 = or i1 %1, %.b
ret i1 %3
}
+
+define i32 @test49(i1 %C) {
+; CHECK-LABEL: @test49(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 1019, i32 123
+; CHECK-NEXT: ret i32 [[V]]
+;
+ %A = select i1 %C, i32 1000, i32 10
+ %V = or i32 %A, 123
+ ret i32 %V
+}
+
+define <2 x i32> @test49vec(i1 %C) {
+; CHECK-LABEL: @test49vec(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1019, i32 1019>, <2 x i32> <i32 123, i32 123>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
+ %V = or <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %V
+}
+
+define <2 x i32> @test49vec2(i1 %C) {
+; CHECK-LABEL: @test49vec2(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1019, i32 2509>, <2 x i32> <i32 123, i32 351>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
+ %V = or <2 x i32> %A, <i32 123, i32 333>
+ ret <2 x i32> %V
+}
+
+define i32 @test50(i1 %which) {
+; CHECK-LABEL: @test50(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1019, [[ENTRY:%.*]] ], [ 123, [[DELAY]] ]
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi i32 [ 1000, %entry ], [ 10, %delay ]
+ %value = or i32 %A, 123
+ ret i32 %value
+}
+
+define <2 x i32> @test50vec(i1 %which) {
+; CHECK-LABEL: @test50vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1019, i32 1019>, [[ENTRY:%.*]] ], [ <i32 123, i32 123>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
+ %value = or <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %value
+}
+
+define <2 x i32> @test50vec2(i1 %which) {
+; CHECK-LABEL: @test50vec2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1019, i32 2509>, [[ENTRY:%.*]] ], [ <i32 123, i32 351>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
+ %value = or <2 x i32> %A, <i32 123, i32 333>
+ ret <2 x i32> %value
+}
+
+define i8 @test51(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: @test51(
+; CHECK-NEXT: [[W:%.*]] = mul i8 [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[X:%.*]] = or i8 [[W]], [[A:%.*]]
+; CHECK-NEXT: ret i8 [[X]]
+;
+ %w = mul i8 %b, %c
+ %z = xor i8 %a, -1
+ %y = and i8 %w, %z
+ %x = or i8 %y, %a
+ ret i8 %x
+}
+
+define i8 @test52(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: @test52(
+; CHECK-NEXT: [[W:%.*]] = mul i8 [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[X:%.*]] = or i8 [[W]], [[A:%.*]]
+; CHECK-NEXT: ret i8 [[X]]
+;
+ %w = mul i8 %b, %c
+ %z = xor i8 %w, -1
+ %y = and i8 %z, %a
+ %x = or i8 %w, %y
+ ret i8 %x
+}
+
+define i8 @test53(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: @test53(
+; CHECK-NEXT: [[W:%.*]] = mul i8 [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[X:%.*]] = or i8 [[W]], [[A:%.*]]
+; CHECK-NEXT: ret i8 [[X]]
+;
+ %w = mul i8 %b, %c
+ %z = xor i8 %w, -1
+ %y = and i8 %z, %a
+ %x = or i8 %w, %y
+ ret i8 %x
+}
diff --git a/test/Transforms/InstCombine/phi-select-constant.ll b/test/Transforms/InstCombine/phi-select-constant.ll
new file mode 100644
index 000000000000..272594d7f4f9
--- /dev/null
+++ b/test/Transforms/InstCombine/phi-select-constant.ll
@@ -0,0 +1,57 @@
+; RUN: opt < %s -S -instcombine | FileCheck %s
+@A = extern_weak global i32, align 4
+@B = extern_weak global i32, align 4
+
+define i32 @foo(i1 %which) {
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+; CHECK-LABEL: @foo
+; CHECK-LABEL: final:
+; CHECK: phi i32 [ 1, %entry ], [ select (i1 icmp eq (i32* @A, i32* @B), i32 2, i32 1), %delay ]
+final:
+ %use2 = phi i1 [ false, %entry ], [ icmp eq (i32* @A, i32* @B), %delay ]
+ %value = select i1 %use2, i32 2, i32 1
+ ret i32 %value
+}
+
+
+; test folding of select into phi for vectors.
+define <4 x i64> @vec1(i1 %which) {
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+; CHECK-LABEL: @vec1
+; CHECK-LABEL: final:
+; CHECK: %phinode = phi <4 x i64> [ zeroinitializer, %entry ], [ <i64 0, i64 0, i64 126, i64 127>, %delay ]
+; CHECK-NOT: select
+; CHECK: ret <4 x i64> %phinode
+ %phinode = phi <4 x i1> [ <i1 true, i1 true, i1 true, i1 true>, %entry ], [ <i1 true, i1 true, i1 false, i1 false>, %delay ]
+ %sel = select <4 x i1> %phinode, <4 x i64> zeroinitializer, <4 x i64> <i64 124, i64 125, i64 126, i64 127>
+ ret <4 x i64> %sel
+}
+
+define <4 x i64> @vec2(i1 %which) {
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+; CHECK-LABEL: @vec2
+; CHECK-LABEL: final:
+; CHECK: %phinode = phi <4 x i64> [ <i64 124, i64 125, i64 126, i64 127>, %entry ], [ <i64 0, i64 125, i64 0, i64 127>, %delay ]
+; CHECK-NOT: select
+; CHECK: ret <4 x i64> %phinode
+ %phinode = phi <4 x i1> [ <i1 false, i1 false, i1 false, i1 false>, %entry ], [ <i1 true, i1 false, i1 true, i1 false>, %delay ]
+ %sel = select <4 x i1> %phinode, <4 x i64> zeroinitializer, <4 x i64> <i64 124, i64 125, i64 126, i64 127>
+ ret <4 x i64> %sel
+}
diff --git a/test/Transforms/InstCombine/phi-select-constexpr.ll b/test/Transforms/InstCombine/phi-select-constexpr.ll
deleted file mode 100644
index 054e0691d47a..000000000000
--- a/test/Transforms/InstCombine/phi-select-constexpr.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: opt < %s -S -instcombine | FileCheck %s
-@A = extern_weak global i32, align 4
-@B = extern_weak global i32, align 4
-
-define i32 @foo(i1 %which) {
-entry:
- br i1 %which, label %final, label %delay
-
-delay:
- br label %final
-
-; CHECK-LABEL: final:
-; CHECK: phi i32 [ 1, %entry ], [ select (i1 icmp eq (i32* @A, i32* @B), i32 2, i32 1), %delay ]
-final:
- %use2 = phi i1 [ false, %entry ], [ icmp eq (i32* @A, i32* @B), %delay ]
- %value = select i1 %use2, i32 2, i32 1
- ret i32 %value
-}
-
diff --git a/test/Transforms/InstCombine/pow-1.ll b/test/Transforms/InstCombine/pow-1.ll
index c9f71fd45721..602c20a1314b 100644
--- a/test/Transforms/InstCombine/pow-1.ll
+++ b/test/Transforms/InstCombine/pow-1.ll
@@ -72,7 +72,7 @@ define float @test_simplify7(float %x) {
; CHECK-LABEL: @test_simplify7(
%retval = call float @powf(float %x, float 0.5)
; CHECK-NEXT: [[SQRTF:%[a-z0-9]+]] = call float @sqrtf(float %x) [[NUW_RO:#[0-9]+]]
-; CHECK-NEXT: [[FABSF:%[a-z0-9]+]] = call float @fabsf(float [[SQRTF]]) [[NUW_RO]]
+; CHECK-NEXT: [[FABSF:%[a-z0-9]+]] = call float @llvm.fabs.f32(float [[SQRTF]])
; CHECK-NEXT: [[FCMP:%[a-z0-9]+]] = fcmp oeq float %x, 0xFFF0000000000000
; CHECK-NEXT: [[SELECT:%[a-z0-9]+]] = select i1 [[FCMP]], float 0x7FF0000000000000, float [[FABSF]]
ret float %retval
@@ -83,7 +83,7 @@ define double @test_simplify8(double %x) {
; CHECK-LABEL: @test_simplify8(
%retval = call double @pow(double %x, double 0.5)
; CHECK-NEXT: [[SQRT:%[a-z0-9]+]] = call double @sqrt(double %x) [[NUW_RO]]
-; CHECK-NEXT: [[FABS:%[a-z0-9]+]] = call double @fabs(double [[SQRT]]) [[NUW_RO]]
+; CHECK-NEXT: [[FABS:%[a-z0-9]+]] = call double @llvm.fabs.f64(double [[SQRT]])
; CHECK-NEXT: [[FCMP:%[a-z0-9]+]] = fcmp oeq double %x, 0xFFF0000000000000
; CHECK-NEXT: [[SELECT:%[a-z0-9]+]] = select i1 [[FCMP]], double 0x7FF0000000000000, double [[FABS]]
ret double %retval
@@ -163,7 +163,7 @@ define double @test_simplify17(double %x) {
; CHECK-LABEL: @test_simplify17(
%retval = call double @llvm.pow.f64(double %x, double 0.5)
; CHECK-NEXT: [[SQRT:%[a-z0-9]+]] = call double @sqrt(double %x)
-; CHECK-NEXT: [[FABS:%[a-z0-9]+]] = call double @fabs(double [[SQRT]])
+; CHECK-NEXT: [[FABS:%[a-z0-9]+]] = call double @llvm.fabs.f64(double [[SQRT]])
; CHECK-NEXT: [[FCMP:%[a-z0-9]+]] = fcmp oeq double %x, 0xFFF0000000000000
; CHECK-NEXT: [[SELECT:%[a-z0-9]+]] = select i1 [[FCMP]], double 0x7FF0000000000000, double [[FABS]]
ret double %retval
diff --git a/test/Transforms/InstCombine/pr17827.ll b/test/Transforms/InstCombine/pr17827.ll
index a3ed5e1697ec..ada6edab69c6 100644
--- a/test/Transforms/InstCombine/pr17827.ll
+++ b/test/Transforms/InstCombine/pr17827.ll
@@ -48,14 +48,14 @@ define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) {
}
; FIXME: Vectors should fold the same way.
+
define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) {
; CHECK-LABEL: @test_shift_and_cmp_changed1_vec(
; CHECK-NEXT: [[ANDP:%.*]] = and <2 x i8> %p, <i8 6, i8 6>
; CHECK-NEXT: [[ANDQ:%.*]] = and <2 x i8> %q, <i8 8, i8 8>
; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[ANDQ]], [[ANDP]]
; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> [[OR]], <i8 5, i8 5>
-; CHECK-NEXT: [[ASHR:%.*]] = ashr <2 x i8> [[SHL]], <i8 5, i8 5>
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[ASHR]], <i8 1, i8 1>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[SHL]], <i8 32, i8 32>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%andp = and <2 x i8> %p, <i8 6, i8 6>
diff --git a/test/Transforms/InstCombine/pr19420.ll b/test/Transforms/InstCombine/pr19420.ll
index 23fa0a409745..015f35eaaa53 100644
--- a/test/Transforms/InstCombine/pr19420.ll
+++ b/test/Transforms/InstCombine/pr19420.ll
@@ -1,36 +1,44 @@
; RUN: opt -S -instcombine < %s | FileCheck %s
-; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL
-; CHECK: mul <4 x i32> %in, <i32 0, i32 -32, i32 0, i32 -32>
-; CHECK-NEXT: ret
define <4 x i32> @test_FoldShiftByConstant_CreateSHL(<4 x i32> %in) {
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL(
+; CHECK-NEXT: [[VSHL_N:%.*]] = mul <4 x i32> %in, <i32 0, i32 -32, i32 0, i32 -32>
+; CHECK-NEXT: ret <4 x i32> [[VSHL_N]]
+;
%mul.i = mul <4 x i32> %in, <i32 0, i32 -1, i32 0, i32 -1>
%vshl_n = shl <4 x i32> %mul.i, <i32 5, i32 5, i32 5, i32 5>
ret <4 x i32> %vshl_n
}
-; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL2
-; CHECK: mul <8 x i16> %in, <i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32>
-; CHECK-NEXT: ret
define <8 x i16> @test_FoldShiftByConstant_CreateSHL2(<8 x i16> %in) {
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL2(
+; CHECK-NEXT: [[VSHL_N:%.*]] = mul <8 x i16> %in, <i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32>
+; CHECK-NEXT: ret <8 x i16> [[VSHL_N]]
+;
%mul.i = mul <8 x i16> %in, <i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1>
%vshl_n = shl <8 x i16> %mul.i, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ret <8 x i16> %vshl_n
}
-; CHECK-LABEL: @test_FoldShiftByConstant_CreateAnd
-; CHECK: mul <16 x i8> %in0, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
-; CHECK-NEXT: and <16 x i8> %vsra_n2, <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
-; CHECK-NEXT: ret
define <16 x i8> @test_FoldShiftByConstant_CreateAnd(<16 x i8> %in0) {
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateAnd(
+; CHECK-NEXT: [[VSRA_N2:%.*]] = mul <16 x i8> %in0, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
+; CHECK-NEXT: [[VSHL_N:%.*]] = and <16 x i8> [[VSRA_N2]], <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
+; CHECK-NEXT: ret <16 x i8> [[VSHL_N]]
+;
%vsra_n = ashr <16 x i8> %in0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
%tmp = add <16 x i8> %in0, %vsra_n
%vshl_n = shl <16 x i8> %tmp, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
ret <16 x i8> %vshl_n
}
-
define i32 @bar(i32 %x, i32 %y) {
+; CHECK-LABEL: @bar(
+; CHECK-NEXT: [[B1:%.*]] = shl i32 %y, 4
+; CHECK-NEXT: [[A2:%.*]] = add i32 [[B1]], %x
+; CHECK-NEXT: [[C:%.*]] = and i32 [[A2]], -16
+; CHECK-NEXT: ret i32 [[C]]
+;
%a = lshr i32 %x, 4
%b = add i32 %a, %y
%c = shl i32 %b, 4
@@ -38,16 +46,25 @@ define i32 @bar(i32 %x, i32 %y) {
}
define <2 x i32> @bar_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @bar_v2i32(
+; CHECK-NEXT: [[B1:%.*]] = shl <2 x i32> %y, <i32 5, i32 5>
+; CHECK-NEXT: [[A2:%.*]] = add <2 x i32> [[B1]], %x
+; CHECK-NEXT: [[C:%.*]] = and <2 x i32> [[A2]], <i32 -32, i32 -32>
+; CHECK-NEXT: ret <2 x i32> [[C]]
+;
%a = lshr <2 x i32> %x, <i32 5, i32 5>
%b = add <2 x i32> %a, %y
%c = shl <2 x i32> %b, <i32 5, i32 5>
ret <2 x i32> %c
}
-
-
-
define i32 @foo(i32 %x, i32 %y) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: [[C1:%.*]] = shl i32 %y, 4
+; CHECK-NEXT: [[X_MASK:%.*]] = and i32 %x, 128
+; CHECK-NEXT: [[D:%.*]] = add i32 [[X_MASK]], [[C1]]
+; CHECK-NEXT: ret i32 [[D]]
+;
%a = lshr i32 %x, 4
%b = and i32 %a, 8
%c = add i32 %b, %y
@@ -56,6 +73,13 @@ define i32 @foo(i32 %x, i32 %y) {
}
define <2 x i32> @foo_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @foo_v2i32(
+; CHECK-NEXT: [[A:%.*]] = lshr <2 x i32> %x, <i32 4, i32 4>
+; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[A]], <i32 8, i32 8>
+; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], %y
+; CHECK-NEXT: [[D:%.*]] = shl <2 x i32> [[C]], <i32 4, i32 4>
+; CHECK-NEXT: ret <2 x i32> [[D]]
+;
%a = lshr <2 x i32> %x, <i32 4, i32 4>
%b = and <2 x i32> %a, <i32 8, i32 8>
%c = add <2 x i32> %b, %y
@@ -63,5 +87,3 @@ define <2 x i32> @foo_v2i32(<2 x i32> %x, <2 x i32> %y) {
ret <2 x i32> %d
}
-
-
diff --git a/test/Transforms/InstCombine/pr31990_wrong_memcpy.ll b/test/Transforms/InstCombine/pr31990_wrong_memcpy.ll
new file mode 100644
index 000000000000..62ecd0311ffd
--- /dev/null
+++ b/test/Transforms/InstCombine/pr31990_wrong_memcpy.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S -instcombine %s -o - | FileCheck %s
+
+; Regression test of PR31990. A memcpy of one byte, copying 0xff, was
+; replaced with a single store of an i4 0xf.
+
+@g = constant i8 -1
+
+define void @foo() {
+entry:
+ %0 = alloca i8
+ %1 = bitcast i8* %0 to i4*
+ call void @bar(i4* %1)
+ %2 = bitcast i4* %1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %2, i8* @g, i32 1, i32 1, i1 false)
+ call void @gaz(i8* %2)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly,
+ i8* nocapture readonly, i32, i32, i1)
+declare void @bar(i4*)
+declare void @gaz(i8*)
+
+; The mempcy should be simplified to a single store of an i8, not i4
+; CHECK: store i8 -1
+; CHECK-NOT: store i4 -1
diff --git a/test/Transforms/InstCombine/prefetch-load.ll b/test/Transforms/InstCombine/prefetch-load.ll
new file mode 100644
index 000000000000..f98b7ae00bf1
--- /dev/null
+++ b/test/Transforms/InstCombine/prefetch-load.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+%struct.C = type { %struct.C*, i32 }
+
+; Check that we instcombine the load across the prefetch.
+
+; CHECK-LABEL: define signext i32 @foo
+define signext i32 @foo(%struct.C* %c) local_unnamed_addr #0 {
+; CHECK: store i32 %dec, i32* %length_
+; CHECK-NOT: load
+; CHECK: llvm.prefetch
+; CHECK-NEXT: ret
+entry:
+ %next_ = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 0
+ %0 = load %struct.C*, %struct.C** %next_, align 8
+ %next_1 = getelementptr inbounds %struct.C, %struct.C* %0, i32 0, i32 0
+ %1 = load %struct.C*, %struct.C** %next_1, align 8
+ store %struct.C* %1, %struct.C** %next_, align 8
+ %length_ = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 1
+ %2 = load i32, i32* %length_, align 8
+ %dec = add nsw i32 %2, -1
+ store i32 %dec, i32* %length_, align 8
+ %3 = bitcast %struct.C* %1 to i8*
+ call void @llvm.prefetch(i8* %3, i32 0, i32 0, i32 1)
+ %4 = load i32, i32* %length_, align 8
+ ret i32 %4
+}
+
+; Function Attrs: inaccessiblemem_or_argmemonly nounwind
+declare void @llvm.prefetch(i8* nocapture readonly, i32, i32, i32)
+
+attributes #0 = { noinline nounwind }
+; We've explicitly removed the function attrs from llvm.prefetch so we get the defaults.
+; attributes #1 = { inaccessiblemem_or_argmemonly nounwind }
diff --git a/test/Transforms/InstCombine/preserved-analyses.ll b/test/Transforms/InstCombine/preserved-analyses.ll
new file mode 100644
index 000000000000..767304aecf35
--- /dev/null
+++ b/test/Transforms/InstCombine/preserved-analyses.ll
@@ -0,0 +1,33 @@
+; This is really testing that instcombine preserves analyses correctly, so we
+; don't care much about the code other than it is something instcombine can
+; transform.
+;
+; RUN: opt < %s -disable-output -debug-pass-manager 2>&1 -aa-pipeline=basic-aa,globals-aa \
+; RUN: -passes='require<globals-aa>,function(require<aa>,instcombine),function(require<aa>)' \
+; RUN: | FileCheck %s --check-prefix=AA
+; AA: Running analysis: GlobalsAA
+; AA: Running analysis: AAManager
+; AA: Running analysis: BasicAA
+; AA: Running pass: InstCombinePass on test
+; AA-NOT: Invalidating analysis: GlobalsAA
+; AA-NOT: Invalidating analysis: AAmanager
+; AA-NOT: Invalidating analysis: BasicAA
+; AA: Running pass: RequireAnalysisPass<{{.*}}AAManager
+; AA-NOT: Running analysis: GlobalsAA
+; AA-NOT: Running analysis: AAmanager
+; AA-NOT: Running analysis: BasicAA
+;
+; RUN: opt < %s -disable-output -debug-pass-manager 2>&1 \
+; RUN: -passes='require<domtree>,instcombine,require<domtree>' \
+; RUN: | FileCheck %s --check-prefix=DT
+; DT: Running analysis: DominatorTreeAnalysis
+; DT: Running pass: InstCombinePass on test
+; DT-NOT: Invalidating analysis: DominatorTreeAnalysis
+; DT: Running pass: RequireAnalysisPass<{{.*}}DominatorTreeAnalysis
+; DT-NOT: Running analysis: DominatorTreeAnalysis
+
+define i32 @test(i32 %A) {
+ %B = add i32 %A, 5
+ %C = add i32 %B, -5
+ ret i32 %C
+}
diff --git a/test/Transforms/InstCombine/readnone-maythrow.ll b/test/Transforms/InstCombine/readnone-maythrow.ll
new file mode 100644
index 000000000000..f01e90263a30
--- /dev/null
+++ b/test/Transforms/InstCombine/readnone-maythrow.ll
@@ -0,0 +1,34 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+declare void @readnone_but_may_throw() readnone
+
+define void @f_0(i32* %ptr) {
+; CHECK-LABEL: @f_0(
+entry:
+; CHECK: store i32 10, i32* %ptr
+; CHECK-NEXT: call void @readnone_but_may_throw()
+; CHECK-NEXT: store i32 20, i32* %ptr, align 4
+; CHECK: ret void
+
+ store i32 10, i32* %ptr
+ call void @readnone_but_may_throw()
+ store i32 20, i32* %ptr
+ ret void
+}
+
+define void @f_1(i1 %cond, i32* %ptr) {
+; CHECK-LABEL: @f_1(
+; CHECK: store i32 10, i32* %ptr
+; CHECK-NEXT: call void @readnone_but_may_throw()
+
+ store i32 10, i32* %ptr
+ call void @readnone_but_may_throw()
+ br i1 %cond, label %left, label %merge
+
+left:
+ store i32 20, i32* %ptr
+ br label %merge
+
+merge:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/rem.ll b/test/Transforms/InstCombine/rem.ll
index 89a741c90707..7a7a134db9c5 100644
--- a/test/Transforms/InstCombine/rem.ll
+++ b/test/Transforms/InstCombine/rem.ll
@@ -1,28 +1,169 @@
-; This test makes sure that rem instructions are properly eliminated.
-;
; RUN: opt < %s -instcombine -S | FileCheck %s
-; END.
+
+define i64 @rem_signed(i64 %x1, i64 %y2) {
+; CHECK-LABEL: @rem_signed(
+; CHECK-NEXT: [[R:%.*]] = srem i64 %x1, %y2
+; CHECK-NEXT: ret i64 [[R]]
+;
+ %r = sdiv i64 %x1, %y2
+ %r7 = mul i64 %r, %y2
+ %r8 = sub i64 %x1, %r7
+ ret i64 %r8
+}
+
+define <4 x i32> @rem_signed_vec(<4 x i32> %t, <4 x i32> %u) {
+; CHECK-LABEL: @rem_signed_vec(
+; CHECK-NEXT: [[K:%.*]] = srem <4 x i32> %t, %u
+; CHECK-NEXT: ret <4 x i32> [[K]]
+;
+ %k = sdiv <4 x i32> %t, %u
+ %l = mul <4 x i32> %k, %u
+ %m = sub <4 x i32> %t, %l
+ ret <4 x i32> %m
+}
+
+define i64 @rem_unsigned(i64 %x1, i64 %y2) {
+; CHECK-LABEL: @rem_unsigned(
+; CHECK-NEXT: [[R:%.*]] = urem i64 %x1, %y2
+; CHECK-NEXT: ret i64 [[R]]
+;
+ %r = udiv i64 %x1, %y2
+ %r7 = mul i64 %r, %y2
+ %r8 = sub i64 %x1, %r7
+ ret i64 %r8
+}
+
+; PR28672 - https://llvm.org/bugs/show_bug.cgi?id=28672
+
+define i8 @big_divisor(i8 %x) {
+; CHECK-LABEL: @big_divisor(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 %x, -127
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 %x, 127
+; CHECK-NEXT: [[REM:%.*]] = select i1 [[TMP1]], i8 %x, i8 [[TMP2]]
+; CHECK-NEXT: ret i8 [[REM]]
+;
+ %rem = urem i8 %x, 129
+ ret i8 %rem
+}
+
+define i5 @biggest_divisor(i5 %x) {
+; CHECK-LABEL: @biggest_divisor(
+; CHECK-NEXT: [[NOT_:%.*]] = icmp eq i5 %x, -1
+; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[NOT_]] to i5
+; CHECK-NEXT: [[REM:%.*]] = add i5 [[TMP1]], %x
+; CHECK-NEXT: ret i5 [[REM]]
+;
+ %rem = urem i5 %x, -1
+ ret i5 %rem
+}
+
+define <2 x i4> @big_divisor_vec(<2 x i4> %x) {
+; CHECK-LABEL: @big_divisor_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i4> [[X:%.*]], <i4 -3, i4 -3>
+; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i4> [[X]], <i4 3, i4 3>
+; CHECK-NEXT: [[REM:%.*]] = select <2 x i1> [[TMP1]], <2 x i4> [[X]], <2 x i4> [[TMP2]]
+; CHECK-NEXT: ret <2 x i4> [[REM]]
+;
+ %rem = urem <2 x i4> %x, <i4 13, i4 13>
+ ret <2 x i4> %rem
+}
+
+define i8 @urem1(i8 %x, i8 %y) {
+; CHECK-LABEL: @urem1(
+; CHECK-NEXT: [[A:%.*]] = urem i8 %x, %y
+; CHECK-NEXT: ret i8 [[A]]
+;
+ %A = udiv i8 %x, %y
+ %B = mul i8 %A, %y
+ %C = sub i8 %x, %B
+ ret i8 %C
+}
+
+define i8 @srem1(i8 %x, i8 %y) {
+; CHECK-LABEL: @srem1(
+; CHECK-NEXT: [[A:%.*]] = srem i8 %x, %y
+; CHECK-NEXT: ret i8 [[A]]
+;
+ %A = sdiv i8 %x, %y
+ %B = mul i8 %A, %y
+ %C = sub i8 %x, %B
+ ret i8 %C
+}
+
+define i8 @urem2(i8 %x, i8 %y) {
+; CHECK-LABEL: @urem2(
+; CHECK-NEXT: [[A:%.*]] = urem i8 %x, %y
+; CHECK-NEXT: [[C:%.*]] = sub i8 0, [[A]]
+; CHECK-NEXT: ret i8 [[C]]
+;
+ %A = udiv i8 %x, %y
+ %B = mul i8 %A, %y
+ %C = sub i8 %B, %x
+ ret i8 %C
+}
+
+define i8 @urem3(i8 %x) {
+; CHECK-LABEL: @urem3(
+; CHECK-NEXT: [[A:%.*]] = urem i8 %x, 3
+; CHECK-NEXT: [[B1:%.*]] = sub i8 %x, [[A]]
+; CHECK-NEXT: [[C:%.*]] = add i8 [[B1]], %x
+; CHECK-NEXT: ret i8 [[C]]
+;
+ %A = udiv i8 %x, 3
+ %B = mul i8 %A, -3
+ %C = sub i8 %x, %B
+ ret i8 %C
+}
+
+; (((X / Y) * Y) / Y) -> X / Y
+
+define i32 @sdiv_mul_sdiv(i32 %x, i32 %y) {
+; CHECK-LABEL: @sdiv_mul_sdiv(
+; CHECK-NEXT: [[R:%.*]] = sdiv i32 %x, %y
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %div = sdiv i32 %x, %y
+ %mul = mul i32 %div, %y
+ %r = sdiv i32 %mul, %y
+ ret i32 %r
+}
+
+; (((X / Y) * Y) / Y) -> X / Y
+
+define i32 @udiv_mul_udiv(i32 %x, i32 %y) {
+; CHECK-LABEL: @udiv_mul_udiv(
+; CHECK-NEXT: [[R:%.*]] = udiv i32 %x, %y
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %div = udiv i32 %x, %y
+ %mul = mul i32 %div, %y
+ %r = udiv i32 %mul, %y
+ ret i32 %r
+}
define i32 @test1(i32 %A) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: ret i32 0
- %B = srem i32 %A, 1 ; ISA constant 0
- ret i32 %B
+; CHECK-NEXT: ret i32 0
+;
+ %B = srem i32 %A, 1 ; ISA constant 0
+ ret i32 %B
}
define i32 @test2(i32 %A) { ; 0 % X = 0, we don't need to preserve traps
; CHECK-LABEL: @test2(
-; CHECK-NEXT: ret i32 0
- %B = srem i32 0, %A
- ret i32 %B
+; CHECK-NEXT: ret i32 0
+;
+ %B = srem i32 0, %A
+ ret i32 %B
}
define i32 @test3(i32 %A) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[AND:%.*]] = and i32 %A, 7
-; CHECK-NEXT: ret i32 [[AND]]
- %B = urem i32 %A, 8
- ret i32 %B
+; CHECK-NEXT: [[B:%.*]] = and i32 %A, 7
+; CHECK-NEXT: ret i32 [[B]]
+;
+ %B = urem i32 %A, 8
+ ret i32 %B
}
define <2 x i32> @vec_power_of_2_constant_splat_divisor(<2 x i32> %A) {
@@ -45,12 +186,13 @@ define <2 x i19> @weird_vec_power_of_2_constant_splat_divisor(<2 x i19> %A) {
define i1 @test3a(i32 %A) {
; CHECK-LABEL: @test3a(
-; CHECK-NEXT: [[AND:%.*]] = and i32 %A, 7
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
-; CHECK-NEXT: ret i1 [[CMP]]
- %B = srem i32 %A, -8
- %C = icmp ne i32 %B, 0
- ret i1 %C
+; CHECK-NEXT: [[B1:%.*]] = and i32 %A, 7
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[B1]], 0
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %B = srem i32 %A, -8
+ %C = icmp ne i32 %B, 0
+ ret i1 %C
}
define <2 x i1> @test3a_vec(<2 x i32> %A) {
@@ -66,201 +208,221 @@ define <2 x i1> @test3a_vec(<2 x i32> %A) {
define i32 @test4(i32 %X, i1 %C) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[SEL:%.*]] = select i1 %C, i32 0, i32 7
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SEL]], %X
- %V = select i1 %C, i32 1, i32 8
- %R = urem i32 %X, %V
- ret i32 %R
+; CHECK-NEXT: [[TMP1:%.*]] = select i1 %C, i32 0, i32 7
+; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], %X
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %V = select i1 %C, i32 1, i32 8
+ %R = urem i32 %X, %V
+ ret i32 %R
}
define i32 @test5(i32 %X, i8 %B) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 %B to i32
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 32, [[ZEXT]]
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SHL]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD]], %X
-; CHECK-NEXT: ret i32 [[AND]]
- %shift.upgrd.1 = zext i8 %B to i32
- %Amt = shl i32 32, %shift.upgrd.1
- %V = urem i32 %X, %Amt
- ret i32 %V
+; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext i8 %B to i32
+; CHECK-NEXT: [[AMT:%.*]] = shl nuw i32 32, [[SHIFT_UPGRD_1]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[AMT]], -1
+; CHECK-NEXT: [[V:%.*]] = and i32 [[TMP1]], %X
+; CHECK-NEXT: ret i32 [[V]]
+;
+ %shift.upgrd.1 = zext i8 %B to i32
+ %Amt = shl i32 32, %shift.upgrd.1
+ %V = urem i32 %X, %Amt
+ ret i32 %V
}
define i32 @test6(i32 %A) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: ret i32 undef
- %B = srem i32 %A, 0 ;; undef
- ret i32 %B
+; CHECK-NEXT: ret i32 undef
+;
+ %B = srem i32 %A, 0 ;; undef
+ ret i32 %B
}
define i32 @test7(i32 %A) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: ret i32 0
- %B = mul i32 %A, 8
- %C = srem i32 %B, 4
- ret i32 %C
+; CHECK-NEXT: ret i32 0
+;
+ %B = mul i32 %A, 8
+ %C = srem i32 %B, 4
+ ret i32 %C
}
define i32 @test8(i32 %A) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: ret i32 0
- %B = shl i32 %A, 4
- %C = srem i32 %B, 8
- ret i32 %C
+; CHECK-NEXT: ret i32 0
+;
+ %B = shl i32 %A, 4
+ %C = srem i32 %B, 8
+ ret i32 %C
}
define i32 @test9(i32 %A) {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: ret i32 0
- %B = mul i32 %A, 64
- %C = urem i32 %B, 32
- ret i32 %C
+; CHECK-NEXT: ret i32 0
+;
+ %B = mul i32 %A, 64
+ %C = urem i32 %B, 32
+ ret i32 %C
}
define i32 @test10(i8 %c) {
; CHECK-LABEL: @test10(
-; CHECK-NEXT: ret i32 0
- %tmp.1 = zext i8 %c to i32
- %tmp.2 = mul i32 %tmp.1, 4
- %tmp.3 = sext i32 %tmp.2 to i64
- %tmp.5 = urem i64 %tmp.3, 4
- %tmp.6 = trunc i64 %tmp.5 to i32
- ret i32 %tmp.6
+; CHECK-NEXT: ret i32 0
+;
+ %tmp.1 = zext i8 %c to i32
+ %tmp.2 = mul i32 %tmp.1, 4
+ %tmp.3 = sext i32 %tmp.2 to i64
+ %tmp.5 = urem i64 %tmp.3, 4
+ %tmp.6 = trunc i64 %tmp.5 to i32
+ ret i32 %tmp.6
}
define i32 @test11(i32 %i) {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: ret i32 0
- %tmp.1 = and i32 %i, -2
- %tmp.3 = mul i32 %tmp.1, 2
- %tmp.5 = urem i32 %tmp.3, 4
- ret i32 %tmp.5
+; CHECK-NEXT: ret i32 0
+;
+ %tmp.1 = and i32 %i, -2
+ %tmp.3 = mul i32 %tmp.1, 2
+ %tmp.5 = urem i32 %tmp.3, 4
+ ret i32 %tmp.5
}
define i32 @test12(i32 %i) {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: ret i32 0
- %tmp.1 = and i32 %i, -4
- %tmp.5 = srem i32 %tmp.1, 2
- ret i32 %tmp.5
+; CHECK-NEXT: ret i32 0
+;
+ %tmp.1 = and i32 %i, -4
+ %tmp.5 = srem i32 %tmp.1, 2
+ ret i32 %tmp.5
}
define i32 @test13(i32 %i) {
; CHECK-LABEL: @test13(
-; CHECK-NEXT: ret i32 0
- %x = srem i32 %i, %i
- ret i32 %x
+; CHECK-NEXT: ret i32 0
+;
+ %x = srem i32 %i, %i
+ ret i32 %x
}
define i64 @test14(i64 %x, i32 %y) {
; CHECK-LABEL: @test14(
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 1, %y
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SHL]] to i64
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[ZEXT]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[ADD]], %x
-; CHECK-NEXT: ret i64 [[AND]]
- %shl = shl i32 1, %y
- %zext = zext i32 %shl to i64
- %urem = urem i64 %x, %zext
- ret i64 %urem
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 1, %y
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SHL]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[ZEXT]], -1
+; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP1]], %x
+; CHECK-NEXT: ret i64 [[UREM]]
+;
+ %shl = shl i32 1, %y
+ %zext = zext i32 %shl to i64
+ %urem = urem i64 %x, %zext
+ ret i64 %urem
}
define i64 @test15(i32 %x, i32 %y) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, %y
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SHL]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD]], %x
-; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[AND]] to i64
-; CHECK-NEXT: ret i64 [[ZEXT]]
- %shl = shl i32 1, %y
- %zext0 = zext i32 %shl to i64
- %zext1 = zext i32 %x to i64
- %urem = urem i64 %zext1, %zext0
- ret i64 %urem
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, %y
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[SHL]], -1
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], %x
+; CHECK-NEXT: [[UREM:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[UREM]]
+;
+ %shl = shl i32 1, %y
+ %zext0 = zext i32 %shl to i64
+ %zext1 = zext i32 %x to i64
+ %urem = urem i64 %zext1, %zext0
+ ret i64 %urem
}
define i32 @test16(i32 %x, i32 %y) {
; CHECK-LABEL: @test16(
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 %y, 11
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 4
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[AND]], 3
-; CHECK-NEXT: [[REM:%.*]] = and i32 [[OR]], %x
-; CHECK-NEXT: ret i32 [[REM]]
- %shr = lshr i32 %y, 11
- %and = and i32 %shr, 4
- %add = add i32 %and, 4
- %rem = urem i32 %x, %add
- ret i32 %rem
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 %y, 11
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[AND]], 3
+; CHECK-NEXT: [[REM:%.*]] = and i32 [[TMP1]], %x
+; CHECK-NEXT: ret i32 [[REM]]
+;
+ %shr = lshr i32 %y, 11
+ %and = and i32 %shr, 4
+ %add = add i32 %and, 4
+ %rem = urem i32 %x, %add
+ ret i32 %rem
}
define i32 @test17(i32 %X) {
; CHECK-LABEL: @test17(
-; CHECK-NEXT: icmp ne i32 %X, 1
-; CHECK-NEXT: zext i1
-; CHECK-NEXT: ret
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 %X, 1
+; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
%A = urem i32 1, %X
ret i32 %A
}
define i32 @test18(i16 %x, i32 %y) {
-; CHECK: @test18
-; CHECK-NEXT: [[SHL:%.*]] = shl i16 %x, 3
-; CHECK-NEXT: [[AND:%.*]] = and i16 [[SHL]], 32
-; CHECK-NEXT: [[XOR:%.*]] = xor i16 [[AND]], 63
-; CHECK-NEXT: [[EXT:%.*]] = zext i16 [[XOR]] to i32
-; CHECK-NEXT: [[REM:%.*]] = and i32 [[EXT]], %y
-; CHECK-NEXT: ret i32 [[REM]]
- %1 = and i16 %x, 4
- %2 = icmp ne i16 %1, 0
- %3 = select i1 %2, i32 32, i32 64
- %4 = urem i32 %y, %3
- ret i32 %4
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: [[TMP1:%.*]] = shl i16 %x, 3
+; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], 32
+; CHECK-NEXT: [[TMP3:%.*]] = xor i16 [[TMP2]], 63
+; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], %y
+; CHECK-NEXT: ret i32 [[TMP5]]
+;
+ %1 = and i16 %x, 4
+ %2 = icmp ne i16 %1, 0
+ %3 = select i1 %2, i32 32, i32 64
+ %4 = urem i32 %y, %3
+ ret i32 %4
}
define i32 @test19(i32 %x, i32 %y) {
-; CHECK: @test19
-; CHECK-NEXT: [[SHL1:%.*]] = shl i32 1, %x
-; CHECK-NEXT: [[SHL2:%.*]] = shl i32 1, %y
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL1]], [[SHL2]]
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[AND]], [[SHL1]]
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[ADD]], -1
-; CHECK-NEXT: [[REM:%.*]] = and i32 [[SUB]], %y
-; CHECK-NEXT: ret i32 [[REM]]
- %A = shl i32 1, %x
- %B = shl i32 1, %y
- %C = and i32 %A, %B
- %D = add i32 %C, %A
- %E = urem i32 %y, %D
- ret i32 %E
+; CHECK-LABEL: @test19(
+; CHECK-NEXT: [[A:%.*]] = shl i32 1, %x
+; CHECK-NEXT: [[B:%.*]] = shl i32 1, %y
+; CHECK-NEXT: [[C:%.*]] = and i32 [[A]], [[B]]
+; CHECK-NEXT: [[D:%.*]] = add i32 [[C]], [[A]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1
+; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], %y
+; CHECK-NEXT: ret i32 [[E]]
+;
+ %A = shl i32 1, %x
+ %B = shl i32 1, %y
+ %C = and i32 %A, %B
+ %D = add i32 %C, %A
+ %E = urem i32 %y, %D
+ ret i32 %E
}
define <2 x i64> @test20(<2 x i64> %X, <2 x i1> %C) {
; CHECK-LABEL: @test20(
-; CHECK-NEXT: select <2 x i1> %C, <2 x i64> <i64 1, i64 2>, <2 x i64> zeroinitializer
-; CHECK-NEXT: ret <2 x i64>
- %V = select <2 x i1> %C, <2 x i64> <i64 1, i64 2>, <2 x i64> <i64 8, i64 9>
- %R = urem <2 x i64> %V, <i64 2, i64 3>
- ret <2 x i64> %R
+; CHECK-NEXT: [[R:%.*]] = select <2 x i1> %C, <2 x i64> <i64 1, i64 2>, <2 x i64> zeroinitializer
+; CHECK-NEXT: ret <2 x i64> [[R]]
+;
+ %V = select <2 x i1> %C, <2 x i64> <i64 1, i64 2>, <2 x i64> <i64 8, i64 9>
+ %R = urem <2 x i64> %V, <i64 2, i64 3>
+ ret <2 x i64> %R
}
-define i32 @test21(i1 %c0, i32* %val) {
+define i32 @test21(i1 %c0, i32* %p) {
; CHECK-LABEL: @test21(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %c0, label %if.then, label %if.end
+; CHECK: if.then:
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* %p, align 4
+; CHECK-NEXT: [[PHITMP:%.*]] = srem i32 [[V]], 5
+; CHECK-NEXT: br label %if.end
+; CHECK: if.end:
+; CHECK-NEXT: [[LHS:%.*]] = phi i32 [ [[PHITMP]], %if.then ], [ 0, %entry ]
+; CHECK-NEXT: ret i32 [[LHS]]
+;
entry:
br i1 %c0, label %if.then, label %if.end
if.then:
-; CHECK: if.then:
-; CHECK-NEXT: %v = load volatile i32, i32* %val, align 4
-; CHECK-NEXT: %phitmp = srem i32 %v, 5
-
- %v = load volatile i32, i32* %val
+ %v = load volatile i32, i32* %p
br label %if.end
if.end:
-; CHECK: if.end:
-; CHECK-NEXT: %lhs = phi i32 [ %phitmp, %if.then ], [ 0, %entry ]
-; CHECK-NEXT: ret i32 %lhs
-
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
%rem = srem i32 %lhs, 5
ret i32 %rem
@@ -269,28 +431,34 @@ if.end:
@a = common global [5 x i16] zeroinitializer, align 2
@b = common global i16 0, align 2
-define i32 @pr27968_0(i1 %c0, i32* %val) {
+define i32 @pr27968_0(i1 %c0, i32* %p) {
; CHECK-LABEL: @pr27968_0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %c0, label %if.then, label %if.end
+; CHECK: if.then:
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* %p, align 4
+; CHECK-NEXT: br label %if.end
+; CHECK: if.end:
+; CHECK-NEXT: [[LHS:%.*]] = phi i32 [ [[V]], %if.then ], [ 5, %entry ]
+; CHECK-NEXT: br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label [[REM]].is.safe, label [[REM]].is.unsafe
+; CHECK: rem.is.safe:
+; CHECK-NEXT: [[REM:%.*]] = srem i32 [[LHS]], zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
+; CHECK-NEXT: ret i32 [[REM]]
+; CHECK: rem.is.unsafe:
+; CHECK-NEXT: ret i32 0
+;
entry:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %val
+ %v = load volatile i32, i32* %p
br label %if.end
-; CHECK: if.then:
-; CHECK-NOT: srem
-; CHECK: br label %if.end
-
if.end:
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label %rem.is.safe, label %rem.is.unsafe
rem.is.safe:
-; CHECK: rem.is.safe:
-; CHECK-NEXT: %rem = srem i32 %lhs, zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
-; CHECK-NEXT: ret i32 %rem
-
%rem = srem i32 %lhs, zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
ret i32 %rem
@@ -298,19 +466,29 @@ rem.is.unsafe:
ret i32 0
}
-define i32 @pr27968_1(i1 %c0, i1 %always_false, i32* %val) {
+define i32 @pr27968_1(i1 %c0, i1 %always_false, i32* %p) {
; CHECK-LABEL: @pr27968_1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %c0, label %if.then, label %if.end
+; CHECK: if.then:
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* %p, align 4
+; CHECK-NEXT: br label %if.end
+; CHECK: if.end:
+; CHECK-NEXT: [[LHS:%.*]] = phi i32 [ [[V]], %if.then ], [ 5, %entry ]
+; CHECK-NEXT: br i1 %always_false, label [[REM]].is.safe, label [[REM]].is.unsafe
+; CHECK: rem.is.safe:
+; CHECK-NEXT: [[REM:%.*]] = srem i32 [[LHS]], -2147483648
+; CHECK-NEXT: ret i32 [[REM]]
+; CHECK: rem.is.unsafe:
+; CHECK-NEXT: ret i32 0
+;
entry:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %val
+ %v = load volatile i32, i32* %p
br label %if.end
-; CHECK: if.then:
-; CHECK-NOT: srem
-; CHECK: br label %if.end
-
if.end:
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
br i1 %always_false, label %rem.is.safe, label %rem.is.unsafe
@@ -319,36 +497,38 @@ rem.is.safe:
%rem = srem i32 %lhs, -2147483648
ret i32 %rem
-; CHECK: rem.is.safe:
-; CHECK-NEXT: %rem = srem i32 %lhs, -2147483648
-; CHECK-NEXT: ret i32 %rem
-
rem.is.unsafe:
ret i32 0
}
-define i32 @pr27968_2(i1 %c0, i32* %val) {
+define i32 @pr27968_2(i1 %c0, i32* %p) {
; CHECK-LABEL: @pr27968_2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %c0, label %if.then, label %if.end
+; CHECK: if.then:
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* %p, align 4
+; CHECK-NEXT: br label %if.end
+; CHECK: if.end:
+; CHECK-NEXT: [[LHS:%.*]] = phi i32 [ [[V]], %if.then ], [ 5, %entry ]
+; CHECK-NEXT: br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label [[REM]].is.safe, label [[REM]].is.unsafe
+; CHECK: rem.is.safe:
+; CHECK-NEXT: [[REM:%.*]] = urem i32 [[LHS]], zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
+; CHECK-NEXT: ret i32 [[REM]]
+; CHECK: rem.is.unsafe:
+; CHECK-NEXT: ret i32 0
+;
entry:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %val
+ %v = load volatile i32, i32* %p
br label %if.end
-; CHECK: if.then:
-; CHECK-NOT: urem
-; CHECK: br label %if.end
-
if.end:
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
br i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b), label %rem.is.safe, label %rem.is.unsafe
rem.is.safe:
-; CHECK: rem.is.safe:
-; CHECK-NEXT: %rem = urem i32 %lhs, zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
-; CHECK-NEXT: ret i32 %rem
-
%rem = urem i32 %lhs, zext (i1 icmp eq (i16* getelementptr inbounds ([5 x i16], [5 x i16]* @a, i64 0, i64 4), i16* @b) to i32)
ret i32 %rem
@@ -356,20 +536,29 @@ rem.is.unsafe:
ret i32 0
}
-define i32 @pr27968_3(i1 %c0, i1 %always_false, i32* %val) {
+define i32 @pr27968_3(i1 %c0, i1 %always_false, i32* %p) {
; CHECK-LABEL: @pr27968_3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %c0, label %if.then, label %if.end
+; CHECK: if.then:
+; CHECK-NEXT: [[V:%.*]] = load volatile i32, i32* %p, align 4
+; CHECK-NEXT: [[PHITMP:%.*]] = and i32 [[V]], 2147483647
+; CHECK-NEXT: br label %if.end
+; CHECK: if.end:
+; CHECK-NEXT: [[LHS:%.*]] = phi i32 [ [[PHITMP]], %if.then ], [ 5, %entry ]
+; CHECK-NEXT: br i1 %always_false, label %rem.is.safe, label %rem.is.unsafe
+; CHECK: rem.is.safe:
+; CHECK-NEXT: ret i32 [[LHS]]
+; CHECK: rem.is.unsafe:
+; CHECK-NEXT: ret i32 0
+;
entry:
br i1 %c0, label %if.then, label %if.end
if.then:
- %v = load volatile i32, i32* %val
+ %v = load volatile i32, i32* %p
br label %if.end
-; CHECK: if.then:
-; CHECK-NEXT: %v = load volatile i32, i32* %val, align 4
-; CHECK-NEXT: %phitmp = and i32 %v, 2147483647
-; CHECK-NEXT: br label %if.end
-
if.end:
%lhs = phi i32 [ %v, %if.then ], [ 5, %entry ]
br i1 %always_false, label %rem.is.safe, label %rem.is.unsafe
@@ -381,3 +570,4 @@ rem.is.safe:
rem.is.unsafe:
ret i32 0
}
+
diff --git a/test/Transforms/InstCombine/select-bitext.ll b/test/Transforms/InstCombine/select-bitext.ll
index 6e374f5221d1..b66a9eef4ab6 100644
--- a/test/Transforms/InstCombine/select-bitext.ll
+++ b/test/Transforms/InstCombine/select-bitext.ll
@@ -100,7 +100,7 @@ define <2 x i64> @trunc_sel_larger_sext_vec(<2 x i32> %a, <2 x i1> %cmp) {
; CHECK-LABEL: @trunc_sel_larger_sext_vec(
; CHECK-NEXT: [[TRUNC:%.*]] = zext <2 x i32> %a to <2 x i64>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i64> [[TRUNC]], <i64 48, i64 48>
-; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i64> [[SEXT]], <i64 48, i64 48>
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 48, i64 48>
; CHECK-NEXT: [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i64> [[TMP1]], <2 x i64> <i64 42, i64 43>
; CHECK-NEXT: ret <2 x i64> [[EXT]]
;
@@ -127,7 +127,7 @@ define <2 x i32> @trunc_sel_smaller_sext_vec(<2 x i64> %a, <2 x i1> %cmp) {
; CHECK-LABEL: @trunc_sel_smaller_sext_vec(
; CHECK-NEXT: [[TRUNC:%.*]] = trunc <2 x i64> %a to <2 x i32>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i32> [[TRUNC]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i32> [[SEXT]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16>
; CHECK-NEXT: [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
; CHECK-NEXT: ret <2 x i32> [[EXT]]
;
@@ -153,7 +153,7 @@ define i32 @trunc_sel_equal_sext(i32 %a, i1 %cmp) {
define <2 x i32> @trunc_sel_equal_sext_vec(<2 x i32> %a, <2 x i1> %cmp) {
; CHECK-LABEL: @trunc_sel_equal_sext_vec(
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i32> %a, <i32 16, i32 16>
-; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i32> [[SEXT]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16>
; CHECK-NEXT: [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
; CHECK-NEXT: ret <2 x i32> [[EXT]]
;
diff --git a/test/Transforms/InstCombine/select-cmp-br.ll b/test/Transforms/InstCombine/select-cmp-br.ll
index 1dc7e153f5fb..59384ab7b1f0 100644
--- a/test/Transforms/InstCombine/select-cmp-br.ll
+++ b/test/Transforms/InstCombine/select-cmp-br.ll
@@ -1,155 +1,263 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Replace a 'select' with 'or' in 'select - cmp [eq|ne] - br' sequence
; RUN: opt -instcombine -S < %s | FileCheck %s
-%C = type <{ %struct.S }>
%struct.S = type { i64*, i32, i32 }
+%C = type <{ %struct.S }>
-declare void @bar(%struct.S *) #1
+declare void @bar(%struct.S*)
declare void @foobar()
-define void @test1(%C*) {
+define void @test1(%C* %arg) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
+; CHECK-NEXT: [[M:%.*]] = load i64*, i64** [[TMP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT: [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
+; CHECK-NEXT: [[NOT_TMP5:%.*]] = icmp ne i64* [[M]], [[N]]
+; CHECK-NEXT: [[TMP71:%.*]] = icmp eq %C* [[ARG]], null
+; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP71]], [[NOT_TMP5]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: ret void
+; CHECK: bb8:
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
+; CHECK-NEXT: tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb10:
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
+; CHECK-NEXT: [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT: br label [[BB]]
+;
entry:
- %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64*, i64** %1, align 8
- %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64*, i64** %2, align 8
- %3 = getelementptr inbounds i64, i64* %m, i64 9
- %4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
- %6 = icmp eq i64* %m, %n
- %7 = select i1 %6, %C* %0, %C* null
- %8 = icmp eq %C* %7, null
- br i1 %8, label %12, label %10
-
-; <label>:9 ; preds = %10, %12
+ %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
+ %m = load i64*, i64** %tmp, align 8
+ %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
+ %n = load i64*, i64** %tmp1, align 8
+ %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
+ %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
+ %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
+ %tmp5 = icmp eq i64* %m, %n
+ %tmp6 = select i1 %tmp5, %C* %arg, %C* null
+ %tmp7 = icmp eq %C* %tmp6, null
+ br i1 %tmp7, label %bb10, label %bb8
+
+bb: ; preds = %bb10, %bb8
ret void
-; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
- tail call void @bar(%struct.S* %11)
- br label %9
+bb8: ; preds = %entry
+ %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
+ tail call void @bar(%struct.S* %tmp9)
+ br label %bb
-; <label>:12 ; preds = %entry
- %13 = tail call i64 %5(%C* %0)
- br label %9
-; CHECK-LABEL: @test1(
-; CHECK-NOT: select
-; CHECK: or
-; CHECK-NOT: select
+bb10: ; preds = %entry
+ %tmp11 = tail call i64 %tmp4(%C* %arg)
+ br label %bb
}
-define void @test2(%C*) {
+define void @test2(%C* %arg) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
+; CHECK-NEXT: [[M:%.*]] = load i64*, i64** [[TMP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT: [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64* [[M]], [[N]]
+; CHECK-NEXT: [[TMP71:%.*]] = icmp eq %C* [[ARG]], null
+; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP71]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: ret void
+; CHECK: bb8:
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
+; CHECK-NEXT: tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb10:
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
+; CHECK-NEXT: [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT: br label [[BB]]
+;
entry:
- %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64*, i64** %1, align 8
- %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64*, i64** %2, align 8
- %3 = getelementptr inbounds i64, i64* %m, i64 9
- %4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
- %6 = icmp eq i64* %m, %n
- %7 = select i1 %6, %C* null, %C* %0
- %8 = icmp eq %C* %7, null
- br i1 %8, label %12, label %10
-
-; <label>:9 ; preds = %10, %12
+ %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
+ %m = load i64*, i64** %tmp, align 8
+ %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
+ %n = load i64*, i64** %tmp1, align 8
+ %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
+ %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
+ %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
+ %tmp5 = icmp eq i64* %m, %n
+ %tmp6 = select i1 %tmp5, %C* null, %C* %arg
+ %tmp7 = icmp eq %C* %tmp6, null
+ br i1 %tmp7, label %bb10, label %bb8
+
+bb: ; preds = %bb10, %bb8
ret void
-; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
- tail call void @bar(%struct.S* %11)
- br label %9
+bb8: ; preds = %entry
+ %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
+ tail call void @bar(%struct.S* %tmp9)
+ br label %bb
-; <label>:12 ; preds = %entry
- %13 = tail call i64 %5(%C* %0)
- br label %9
-; CHECK-LABEL: @test2(
-; CHECK-NOT: select
-; CHECK: or
-; CHECK-NOT: select
+bb10: ; preds = %entry
+ %tmp11 = tail call i64 %tmp4(%C* %arg)
+ br label %bb
}
-define void @test3(%C*) {
+define void @test3(%C* %arg) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
+; CHECK-NEXT: [[M:%.*]] = load i64*, i64** [[TMP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT: [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
+; CHECK-NEXT: [[NOT_TMP5:%.*]] = icmp ne i64* [[M]], [[N]]
+; CHECK-NEXT: [[TMP71:%.*]] = icmp eq %C* [[ARG]], null
+; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP71]], [[NOT_TMP5]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: ret void
+; CHECK: bb8:
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
+; CHECK-NEXT: tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb10:
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
+; CHECK-NEXT: [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT: br label [[BB]]
+;
entry:
- %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64*, i64** %1, align 8
- %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64*, i64** %2, align 8
- %3 = getelementptr inbounds i64, i64* %m, i64 9
- %4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
- %6 = icmp eq i64* %m, %n
- %7 = select i1 %6, %C* %0, %C* null
- %8 = icmp ne %C* %7, null
- br i1 %8, label %10, label %12
-
-; <label>:9 ; preds = %10, %12
+ %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
+ %m = load i64*, i64** %tmp, align 8
+ %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
+ %n = load i64*, i64** %tmp1, align 8
+ %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
+ %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
+ %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
+ %tmp5 = icmp eq i64* %m, %n
+ %tmp6 = select i1 %tmp5, %C* %arg, %C* null
+ %tmp7 = icmp ne %C* %tmp6, null
+ br i1 %tmp7, label %bb8, label %bb10
+
+bb: ; preds = %bb10, %bb8
ret void
-; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
- tail call void @bar(%struct.S* %11)
- br label %9
+bb8: ; preds = %entry
+ %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
+ tail call void @bar(%struct.S* %tmp9)
+ br label %bb
-; <label>:12 ; preds = %entry
- %13 = tail call i64 %5(%C* %0)
- br label %9
-; CHECK-LABEL: @test3(
-; CHECK-NOT: select
-; CHECK: or
-; CHECK-NOT: select
+bb10: ; preds = %entry
+ %tmp11 = tail call i64 %tmp4(%C* %arg)
+ br label %bb
}
-define void @test4(%C*) {
+define void @test4(%C* %arg) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
+; CHECK-NEXT: [[M:%.*]] = load i64*, i64** [[TMP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT: [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64* [[M]], [[N]]
+; CHECK-NEXT: [[TMP71:%.*]] = icmp eq %C* [[ARG]], null
+; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP71]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: ret void
+; CHECK: bb8:
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
+; CHECK-NEXT: tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb10:
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
+; CHECK-NEXT: [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
+; CHECK-NEXT: [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT: br label [[BB]]
+;
entry:
- %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64*, i64** %1, align 8
- %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64*, i64** %2, align 8
- %3 = getelementptr inbounds i64, i64* %m, i64 9
- %4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
- %6 = icmp eq i64* %m, %n
- %7 = select i1 %6, %C* null, %C* %0
- %8 = icmp ne %C* %7, null
- br i1 %8, label %10, label %12
-
-; <label>:9 ; preds = %10, %12
+ %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
+ %m = load i64*, i64** %tmp, align 8
+ %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
+ %n = load i64*, i64** %tmp1, align 8
+ %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
+ %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
+ %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
+ %tmp5 = icmp eq i64* %m, %n
+ %tmp6 = select i1 %tmp5, %C* null, %C* %arg
+ %tmp7 = icmp ne %C* %tmp6, null
+ br i1 %tmp7, label %bb8, label %bb10
+
+bb: ; preds = %bb10, %bb8
ret void
-; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
- tail call void @bar(%struct.S* %11)
- br label %9
+bb8: ; preds = %entry
+ %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
+ tail call void @bar(%struct.S* %tmp9)
+ br label %bb
-; <label>:12 ; preds = %entry
- %13 = tail call i64 %5(%C* %0)
- br label %9
-; CHECK-LABEL: @test4(
-; CHECK-NOT: select
-; CHECK: or
-; CHECK-NOT: select
+bb10: ; preds = %entry
+ %tmp11 = tail call i64 %tmp4(%C* %arg)
+ br label %bb
}
-define void @test5(%C*, i1) {
+define void @test5(%C* %arg, i1 %arg1) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq %C* [[ARG:%.*]], null
+; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[TMP21]], [[ARG1:%.*]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[BB5:%.*]], label [[BB3:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: ret void
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG]], i64 0, i32 0
+; CHECK-NEXT: tail call void @bar(%struct.S* [[TMP4]])
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb5:
+; CHECK-NEXT: tail call void @foobar()
+; CHECK-NEXT: br label [[BB]]
+;
entry:
- %2 = select i1 %1, %C* null, %C* %0
- %3 = icmp ne %C* %2, null
- br i1 %3, label %5, label %7
+ %tmp = select i1 %arg1, %C* null, %C* %arg
+ %tmp2 = icmp ne %C* %tmp, null
+ br i1 %tmp2, label %bb3, label %bb5
-; <label>:4 ; preds = %10, %12
+bb: ; preds = %bb5, %bb3
ret void
-; <label>:5 ; preds = %entry
- %6 = getelementptr inbounds %C, %C* %2, i64 0, i32 0
- tail call void @bar(%struct.S* %6)
- br label %4
+bb3: ; preds = %entry
+ %tmp4 = getelementptr inbounds %C, %C* %tmp, i64 0, i32 0
+ tail call void @bar(%struct.S* %tmp4)
+ br label %bb
-; <label>:7 ; preds = %entry
+bb5: ; preds = %entry
tail call void @foobar()
- br label %4
-; CHECK-LABEL: @test5(
-; CHECK-NOT: select
-; CHECK: or
-; CHECK-NOT: select
+ br label %bb
+}
+
+; Negative test. Must not trigger the select-cmp-br combine because the result
+; of the select is used in both flows following the br (the special case where
+; the conditional branch has the same target for both flows).
+define i32 @test6(i32 %arg, i1 %arg1) {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 undef, label [[BB:%.*]], label [[BB]]
+; CHECK: bb:
+; CHECK-NEXT: [[TMP:%.*]] = select i1 [[ARG1:%.*]], i32 [[ARG:%.*]], i32 0
+; CHECK-NEXT: ret i32 [[TMP]]
+;
+entry:
+ %tmp = select i1 %arg1, i32 %arg, i32 0
+ %tmp2 = icmp eq i32 %tmp, 0
+ br i1 %tmp2, label %bb, label %bb
+
+bb: ; preds = %entry, %entry
+ ret i32 %tmp
}
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index f8c96e7f3f67..c26380eaa71b 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -190,7 +190,7 @@ define <2 x i1> @test62vec(<2 x i1> %A, <2 x i1> %B) {
define i1 @test63(i1 %A, i1 %B) {
; CHECK-LABEL: @test63(
; CHECK-NEXT: [[NOT:%.*]] = xor i1 %A, true
-; CHECK-NEXT: [[C:%.*]] = or i1 %B, [[NOT]]
+; CHECK-NEXT: [[C:%.*]] = or i1 [[NOT]], %B
; CHECK-NEXT: ret i1 [[C]]
;
%not = xor i1 %A, true
@@ -201,7 +201,7 @@ define i1 @test63(i1 %A, i1 %B) {
define <2 x i1> @test63vec(<2 x i1> %A, <2 x i1> %B) {
; CHECK-LABEL: @test63vec(
; CHECK-NEXT: [[NOT:%.*]] = xor <2 x i1> %A, <i1 true, i1 true>
-; CHECK-NEXT: [[C:%.*]] = or <2 x i1> %B, [[NOT]]
+; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[NOT]], %B
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%not = xor <2 x i1> %A, <i1 true, i1 true>
@@ -1264,11 +1264,10 @@ define i32 @PR23757(i32 %x) {
define i32 @PR27137(i32 %a) {
; CHECK-LABEL: @PR27137(
; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 %a, -1
-; CHECK-NEXT: [[C0:%.*]] = icmp slt i32 %a, 0
+; CHECK-NEXT: [[C0:%.*]] = icmp sgt i32 [[NOT_A]], -1
; CHECK-NEXT: [[S0:%.*]] = select i1 [[C0]], i32 [[NOT_A]], i32 -1
; CHECK-NEXT: ret i32 [[S0]]
;
-
%not_a = xor i32 %a, -1
%c0 = icmp slt i32 %a, 0
%s0 = select i1 %c0, i32 %not_a, i32 -1
@@ -1299,11 +1298,22 @@ define <2 x i32> @select_icmp_slt0_xor_vec(<2 x i32> %x) {
ret <2 x i32> %x.xor
}
-; Make sure that undef elements of the select condition are translated into undef elements of the shuffle mask.
-
define <4 x i32> @canonicalize_to_shuffle(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: @canonicalize_to_shuffle(
-; CHECK-NEXT: [[SEL:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 undef, i32 6, i32 undef>
+; CHECK-NEXT: [[SEL:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+; CHECK-NEXT: ret <4 x i32> [[SEL]]
+;
+ %sel = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a, <4 x i32> %b
+ ret <4 x i32> %sel
+}
+
+; Undef elements of the select condition may not be translated into undef elements of a shuffle mask
+; because undef in a shuffle mask means we can return anything, not just one of the selected values.
+; https://bugs.llvm.org/show_bug.cgi?id=32486
+
+define <4 x i32> @undef_elts_in_condition(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: @undef_elts_in_condition(
+; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> <i1 true, i1 undef, i1 false, i1 undef>, <4 x i32> %a, <4 x i32> %b
; CHECK-NEXT: ret <4 x i32> [[SEL]]
;
%sel = select <4 x i1> <i1 true, i1 undef, i1 false, i1 undef>, <4 x i32> %a, <4 x i32> %b
@@ -1332,3 +1342,29 @@ define <4 x i32> @cannot_canonicalize_to_shuffle2(<4 x i32> %a, <4 x i32> %b) {
ret <4 x i32> %sel
}
+declare void @llvm.assume(i1)
+
+define i8 @assume_cond_true(i1 %cond, i8 %x, i8 %y) {
+; CHECK-LABEL: @assume_cond_true(
+; CHECK-NEXT: call void @llvm.assume(i1 %cond)
+; CHECK-NEXT: ret i8 %x
+;
+ call void @llvm.assume(i1 %cond)
+ %sel = select i1 %cond, i8 %x, i8 %y
+ ret i8 %sel
+}
+
+; computeKnownBitsFromAssume() understands the 'not' of an assumed condition.
+
+define i8 @assume_cond_false(i1 %cond, i8 %x, i8 %y) {
+; CHECK-LABEL: @assume_cond_false(
+; CHECK-NEXT: [[NOTCOND:%.*]] = xor i1 %cond, true
+; CHECK-NEXT: call void @llvm.assume(i1 [[NOTCOND]])
+; CHECK-NEXT: ret i8 %y
+;
+ %notcond = xor i1 %cond, true
+ call void @llvm.assume(i1 %notcond)
+ %sel = select i1 %cond, i8 %x, i8 %y
+ ret i8 %sel
+}
+
diff --git a/test/Transforms/InstCombine/select_meta.ll b/test/Transforms/InstCombine/select_meta.ll
index 82a85e5836dc..7d5771a0a81c 100644
--- a/test/Transforms/InstCombine/select_meta.ll
+++ b/test/Transforms/InstCombine/select_meta.ll
@@ -193,12 +193,11 @@ define i32 @test74(i32 %x) {
ret i32 %retval
}
-; FIXME:
; The compare should change, but the metadata remains the same because the select operands are not swapped.
define i32 @smin1(i32 %x) {
; CHECK-LABEL: @smin1(
; CHECK-NEXT: [[NOT_X:%.*]] = xor i32 %x, -1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[NOT_X]], -1
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 [[NOT_X]], i32 -1, !prof ![[MD1]]
; CHECK-NEXT: ret i32 [[SEL]]
;
@@ -208,13 +207,12 @@ define i32 @smin1(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, and the metadata is swapped because the select operands are swapped.
define i32 @smin2(i32 %x) {
; CHECK-LABEL: @smin2(
; CHECK-NEXT: [[NOT_X:%.*]] = xor i32 %x, -1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %x, 0
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1, i32 [[NOT_X]], !prof ![[MD1]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[NOT_X]], -1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 [[NOT_X]], i32 -1, !prof ![[MD3]]
; CHECK-NEXT: ret i32 [[SEL]]
;
%not_x = xor i32 %x, -1
@@ -223,12 +221,11 @@ define i32 @smin2(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, but the metadata remains the same because the select operands are not swapped.
define i32 @smax1(i32 %x) {
; CHECK-LABEL: @smax1(
; CHECK-NEXT: [[NOT_X:%.*]] = xor i32 %x, -1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[NOT_X]], -1
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 [[NOT_X]], i32 -1, !prof ![[MD1]]
; CHECK-NEXT: ret i32 [[SEL]]
;
@@ -238,13 +235,12 @@ define i32 @smax1(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, and the metadata is swapped because the select operands are swapped.
define i32 @smax2(i32 %x) {
; CHECK-LABEL: @smax2(
; CHECK-NEXT: [[NOT_X:%.*]] = xor i32 %x, -1
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 %x, 0
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1, i32 [[NOT_X]], !prof ![[MD1]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[NOT_X]], -1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 [[NOT_X]], i32 -1, !prof ![[MD3]]
; CHECK-NEXT: ret i32 [[SEL]]
;
%not_x = xor i32 %x, -1
@@ -253,11 +249,10 @@ define i32 @smax2(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, but the metadata remains the same because the select operands are not swapped.
define i32 @umin1(i32 %x) {
; CHECK-LABEL: @umin1(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 %x, -2147483648
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 %x, i32 -2147483648, !prof ![[MD1]]
; CHECK-NEXT: ret i32 [[SEL]]
;
@@ -266,12 +261,11 @@ define i32 @umin1(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, and the metadata is swapped because the select operands are swapped.
define i32 @umin2(i32 %x) {
; CHECK-LABEL: @umin2(
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %x, 0
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 2147483647, i32 %x, !prof ![[MD1]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 %x, 2147483647
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 %x, i32 2147483647, !prof ![[MD3]]
; CHECK-NEXT: ret i32 [[SEL]]
;
%cmp = icmp slt i32 %x, 0
@@ -279,11 +273,10 @@ define i32 @umin2(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, but the metadata remains the same because the select operands are not swapped.
define i32 @umax1(i32 %x) {
; CHECK-LABEL: @umax1(
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 %x, 2147483647
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 %x, i32 2147483647, !prof ![[MD1]]
; CHECK-NEXT: ret i32 [[SEL]]
;
@@ -292,12 +285,11 @@ define i32 @umax1(i32 %x) {
ret i32 %sel
}
-; FIXME:
; The compare should change, and the metadata is swapped because the select operands are swapped.
define i32 @umax2(i32 %x) {
; CHECK-LABEL: @umax2(
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 %x, -1
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -2147483648, i32 %x, !prof ![[MD1]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 %x, -2147483648
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 %x, i32 -2147483648, !prof ![[MD3]]
; CHECK-NEXT: ret i32 [[SEL]]
;
%cmp = icmp sgt i32 %x, -1
diff --git a/test/Transforms/InstCombine/shift-sra.ll b/test/Transforms/InstCombine/shift-sra.ll
index 75235500d513..4483e60b506a 100644
--- a/test/Transforms/InstCombine/shift-sra.ll
+++ b/test/Transforms/InstCombine/shift-sra.ll
@@ -1,26 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
define i32 @test1(i32 %X, i8 %A) {
- %shift.upgrd.1 = zext i8 %A to i32 ; <i32> [#uses=1]
- ; can be logical shift.
- %Y = ashr i32 %X, %shift.upgrd.1 ; <i32> [#uses=1]
- %Z = and i32 %Y, 1 ; <i32> [#uses=1]
- ret i32 %Z
; CHECK-LABEL: @test1(
-; CHECK: lshr i32 %X, %shift.upgrd.1
+; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext i8 %A to i32
+; CHECK-NEXT: [[Y1:%.*]] = lshr i32 %X, [[SHIFT_UPGRD_1]]
+; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y1]], 1
+; CHECK-NEXT: ret i32 [[Z]]
+;
+ %shift.upgrd.1 = zext i8 %A to i32
+ ; can be logical shift.
+ %Y = ashr i32 %X, %shift.upgrd.1
+ %Z = and i32 %Y, 1
+ ret i32 %Z
}
define i32 @test2(i8 %tmp) {
- %tmp3 = zext i8 %tmp to i32 ; <i32> [#uses=1]
- %tmp4 = add i32 %tmp3, 7 ; <i32> [#uses=1]
- %tmp5 = ashr i32 %tmp4, 3 ; <i32> [#uses=1]
- ret i32 %tmp5
; CHECK-LABEL: @test2(
-; CHECK: lshr i32 %tmp4, 3
+; CHECK-NEXT: [[TMP3:%.*]] = zext i8 %tmp to i32
+; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], 7
+; CHECK-NEXT: [[TMP51:%.*]] = lshr i32 [[TMP4]], 3
+; CHECK-NEXT: ret i32 [[TMP51]]
+;
+ %tmp3 = zext i8 %tmp to i32
+ %tmp4 = add i32 %tmp3, 7
+ %tmp5 = ashr i32 %tmp4, 3
+ ret i32 %tmp5
}
define i64 @test3(i1 %X, i64 %Y, i1 %Cond) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: br i1 %Cond, label %T, label %F
+; CHECK: T:
+; CHECK-NEXT: [[X2:%.*]] = sext i1 %X to i64
+; CHECK-NEXT: br label %C
+; CHECK: F:
+; CHECK-NEXT: [[Y2:%.*]] = ashr i64 %Y, 63
+; CHECK-NEXT: br label %C
+; CHECK: C:
+; CHECK-NEXT: [[P:%.*]] = phi i64 [ [[X2]], %T ], [ [[Y2]], %F ]
+; CHECK-NEXT: ret i64 [[P]]
+;
br i1 %Cond, label %T, label %F
T:
%X2 = sext i1 %X to i64
@@ -29,16 +50,24 @@ F:
%Y2 = ashr i64 %Y, 63
br label %C
C:
- %P = phi i64 [%X2, %T], [%Y2, %F]
+ %P = phi i64 [%X2, %T], [%Y2, %F]
%S = ashr i64 %P, 12
ret i64 %S
-
-; CHECK-LABEL: @test3(
-; CHECK: %P = phi i64
-; CHECK-NEXT: ret i64 %P
}
define i64 @test4(i1 %X, i64 %Y, i1 %Cond) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: br i1 %Cond, label %T, label %F
+; CHECK: T:
+; CHECK-NEXT: [[X2:%.*]] = sext i1 %X to i64
+; CHECK-NEXT: br label %C
+; CHECK: F:
+; CHECK-NEXT: [[Y2:%.*]] = ashr i64 %Y, 63
+; CHECK-NEXT: br label %C
+; CHECK: C:
+; CHECK-NEXT: [[P:%.*]] = phi i64 [ [[X2]], %T ], [ [[Y2]], %F ]
+; CHECK-NEXT: ret i64 [[P]]
+;
br i1 %Cond, label %T, label %F
T:
%X2 = sext i1 %X to i64
@@ -47,18 +76,29 @@ F:
%Y2 = ashr i64 %Y, 63
br label %C
C:
- %P = phi i64 [%X2, %T], [%Y2, %F]
+ %P = phi i64 [%X2, %T], [%Y2, %F]
%R = shl i64 %P, 12
%S = ashr i64 %R, 12
ret i64 %S
-
-; CHECK-LABEL: @test4(
-; CHECK: %P = phi i64
-; CHECK-NEXT: ret i64 %P
}
; rdar://7732987
define i32 @test5(i32 %Y) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: br i1 undef, label %A, label %C
+; CHECK: A:
+; CHECK-NEXT: br i1 undef, label %B, label %D
+; CHECK: B:
+; CHECK-NEXT: br label %D
+; CHECK: C:
+; CHECK-NEXT: br i1 undef, label %D, label %E
+; CHECK: D:
+; CHECK-NEXT: [[P:%.*]] = phi i32 [ 0, %A ], [ 0, %B ], [ %Y, %C ]
+; CHECK-NEXT: [[S:%.*]] = ashr i32 [[P]], 16
+; CHECK-NEXT: ret i32 [[S]]
+; CHECK: E:
+; CHECK-NEXT: ret i32 0
+;
br i1 undef, label %A, label %C
A:
br i1 undef, label %B, label %D
@@ -67,12 +107,59 @@ B:
C:
br i1 undef, label %D, label %E
D:
- %P = phi i32 [0, %A], [0, %B], [%Y, %C]
+ %P = phi i32 [0, %A], [0, %B], [%Y, %C]
%S = ashr i32 %P, 16
ret i32 %S
-; CHECK-LABEL: @test5(
-; CHECK: %P = phi i32
-; CHECK-NEXT: ashr i32 %P, 16
E:
ret i32 0
}
+
+; (X >>s C1) >>s C2 --> X >>s (C1 + C2)
+
+define i32 @ashr_ashr(i32 %x) {
+; CHECK-LABEL: @ashr_ashr(
+; CHECK-NEXT: [[SH2:%.*]] = ashr i32 %x, 12
+; CHECK-NEXT: ret i32 [[SH2]]
+;
+ %sh1 = ashr i32 %x, 5
+ %sh2 = ashr i32 %sh1, 7
+ ret i32 %sh2
+}
+
+; PR3851
+; (X >>s C1) >>s C2 --> X >>s (Bitwidth - 1)
+
+define i32 @ashr_overshift(i32 %x) {
+; CHECK-LABEL: @ashr_overshift(
+; CHECK-NEXT: [[SH2:%.*]] = ashr i32 %x, 31
+; CHECK-NEXT: ret i32 [[SH2]]
+;
+ %sh1 = ashr i32 %x, 15
+ %sh2 = ashr i32 %sh1, 17
+ ret i32 %sh2
+}
+
+; (X >>s C1) >>s C2 --> X >>s (C1 + C2)
+
+define <2 x i32> @ashr_ashr_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @ashr_ashr_splat_vec(
+; CHECK-NEXT: [[SH2:%.*]] = ashr <2 x i32> %x, <i32 12, i32 12>
+; CHECK-NEXT: ret <2 x i32> [[SH2]]
+;
+ %sh1 = ashr <2 x i32> %x, <i32 5, i32 5>
+ %sh2 = ashr <2 x i32> %sh1, <i32 7, i32 7>
+ ret <2 x i32> %sh2
+}
+
+; (X >>s C1) >>s C2 --> X >>s (Bitwidth - 1)
+
+define <2 x i32> @ashr_overshift_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @ashr_overshift_splat_vec(
+; CHECK-NEXT: [[SH2:%.*]] = ashr <2 x i32> %x, <i32 31, i32 31>
+; CHECK-NEXT: ret <2 x i32> [[SH2]]
+;
+ %sh1 = ashr <2 x i32> %x, <i32 15, i32 15>
+ %sh2 = ashr <2 x i32> %sh1, <i32 17, i32 17>
+ ret <2 x i32> %sh2
+}
+
diff --git a/test/Transforms/InstCombine/shift.ll b/test/Transforms/InstCombine/shift.ll
index c046a72110c2..60ba35557f70 100644
--- a/test/Transforms/InstCombine/shift.ll
+++ b/test/Transforms/InstCombine/shift.ll
@@ -1,6 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; This test makes sure that these instructions are properly eliminated.
-;
; RUN: opt < %s -instcombine -S | FileCheck %s
define i32 @test1(i32 %A) {
@@ -161,9 +159,8 @@ define i8 @test9(i8 %A) {
ret i8 %C
}
-;; This transformation is deferred to DAGCombine:
;; (A >> 7) << 7 === A & 128
-;; The shl may be valuable to scalar evolution.
+
define i8 @test10(i8 %A) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[B:%.*]] = and i8 %A, -128
@@ -454,9 +451,8 @@ define i32 @test25(i32 %tmp.2, i32 %AA) {
define <2 x i32> @test25_vector(<2 x i32> %tmp.2, <2 x i32> %AA) {
; CHECK-LABEL: @test25_vector(
-; CHECK-NEXT: [[TMP_3:%.*]] = lshr <2 x i32> %tmp.2, <i32 17, i32 17>
-; CHECK-NEXT: [[TMP_51:%.*]] = shl <2 x i32> [[TMP_3]], <i32 17, i32 17>
-; CHECK-NEXT: [[X2:%.*]] = add <2 x i32> [[TMP_51]], %AA
+; CHECK-NEXT: [[TMP_3:%.*]] = and <2 x i32> %tmp.2, <i32 -131072, i32 -131072>
+; CHECK-NEXT: [[X2:%.*]] = add <2 x i32> [[TMP_3]], %AA
; CHECK-NEXT: [[TMP_6:%.*]] = and <2 x i32> [[X2]], <i32 -131072, i32 -131072>
; CHECK-NEXT: ret <2 x i32> [[TMP_6]]
;
@@ -640,30 +636,25 @@ define <2 x i1> @test35vec(<2 x i32> %X) {
define i128 @test36(i128 %A, i128 %B) {
; CHECK-LABEL: @test36(
-; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP231:%.*]] = or i128 %B, %A
; CHECK-NEXT: [[INS:%.*]] = and i128 [[TMP231]], 18446744073709551615
; CHECK-NEXT: ret i128 [[INS]]
;
-entry:
%tmp27 = shl i128 %A, 64
%tmp23 = shl i128 %B, 64
%ins = or i128 %tmp23, %tmp27
%tmp45 = lshr i128 %ins, 64
ret i128 %tmp45
-
}
define i64 @test37(i128 %A, i32 %B) {
; CHECK-LABEL: @test37(
-; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP22:%.*]] = zext i32 %B to i128
; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i128 [[TMP22]], 32
; CHECK-NEXT: [[INS:%.*]] = or i128 [[TMP23]], %A
; CHECK-NEXT: [[TMP46:%.*]] = trunc i128 [[INS]] to i64
; CHECK-NEXT: ret i64 [[TMP46]]
;
-entry:
%tmp27 = shl i128 %A, 64
%tmp22 = zext i32 %B to i128
%tmp23 = shl i128 %tmp22, 96
@@ -671,7 +662,17 @@ entry:
%tmp45 = lshr i128 %ins, 64
%tmp46 = trunc i128 %tmp45 to i64
ret i64 %tmp46
+}
+define <2 x i32> @shl_nuw_nsw_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @shl_nuw_nsw_splat_vec(
+; CHECK-NEXT: [[T2:%.*]] = zext <2 x i8> %x to <2 x i32>
+; CHECK-NEXT: [[T3:%.*]] = shl nuw nsw <2 x i32> [[T2]], <i32 17, i32 17>
+; CHECK-NEXT: ret <2 x i32> [[T3]]
+;
+ %t2 = zext <2 x i8> %x to <2 x i32>
+ %t3 = shl <2 x i32> %t2, <i32 17, i32 17>
+ ret <2 x i32> %t3
}
define i32 @test38(i32 %x) nounwind readnone {
@@ -789,6 +790,8 @@ define i32 @test45(i32 %a) nounwind {
ret i32 %z
}
+; (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+
define i32 @test46(i32 %a) {
; CHECK-LABEL: @test46(
; CHECK-NEXT: [[Z:%.*]] = ashr exact i32 %a, 2
@@ -799,16 +802,44 @@ define i32 @test46(i32 %a) {
ret i32 %z
}
-define i32 @test47(i32 %a) {
+; (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+
+define <2 x i32> @test46_splat_vec(<2 x i32> %a) {
+; CHECK-LABEL: @test46_splat_vec(
+; CHECK-NEXT: [[Z:%.*]] = ashr exact <2 x i32> %a, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[Z]]
+;
+ %y = ashr exact <2 x i32> %a, <i32 3, i32 3>
+ %z = shl <2 x i32> %y, <i32 1, i32 1>
+ ret <2 x i32> %z
+}
+
+; (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+
+define i8 @test47(i8 %a) {
; CHECK-LABEL: @test47(
-; CHECK-NEXT: [[Z:%.*]] = lshr exact i32 %a, 2
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: [[Z:%.*]] = lshr exact i8 %a, 2
+; CHECK-NEXT: ret i8 [[Z]]
;
- %y = lshr exact i32 %a, 3
- %z = shl i32 %y, 1
- ret i32 %z
+ %y = lshr exact i8 %a, 3
+ %z = shl i8 %y, 1
+ ret i8 %z
+}
+
+; (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+
+define <2 x i8> @test47_splat_vec(<2 x i8> %a) {
+; CHECK-LABEL: @test47_splat_vec(
+; CHECK-NEXT: [[Z:%.*]] = lshr exact <2 x i8> %a, <i8 2, i8 2>
+; CHECK-NEXT: ret <2 x i8> [[Z]]
+;
+ %y = lshr exact <2 x i8> %a, <i8 3, i8 3>
+ %z = shl <2 x i8> %y, <i8 1, i8 1>
+ ret <2 x i8> %z
}
+; (X >>u,exact C1) << C2 --> X << (C2-C1) when C2 > C1
+
define i32 @test48(i32 %x) {
; CHECK-LABEL: @test48(
; CHECK-NEXT: [[B:%.*]] = shl i32 %x, 2
@@ -819,6 +850,32 @@ define i32 @test48(i32 %x) {
ret i32 %B
}
+; Verify that wrap flags are preserved from the original 'shl'.
+
+define i32 @test48_nuw_nsw(i32 %x) {
+; CHECK-LABEL: @test48_nuw_nsw(
+; CHECK-NEXT: [[B:%.*]] = shl nuw nsw i32 %x, 2
+; CHECK-NEXT: ret i32 [[B]]
+;
+ %A = lshr exact i32 %x, 1
+ %B = shl nuw nsw i32 %A, 3
+ ret i32 %B
+}
+
+; (X >>u,exact C1) << C2 --> X << (C2-C1) when splatted C2 > C1
+
+define <2 x i32> @test48_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test48_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = shl nuw nsw <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = lshr exact <2 x i32> %x, <i32 1, i32 1>
+ %B = shl nsw nuw <2 x i32> %A, <i32 3, i32 3>
+ ret <2 x i32> %B
+}
+
+; (X >>s,exact C1) << C2 --> X << (C2-C1) when C2 > C1
+
define i32 @test49(i32 %x) {
; CHECK-LABEL: @test49(
; CHECK-NEXT: [[B:%.*]] = shl i32 %x, 2
@@ -829,6 +886,32 @@ define i32 @test49(i32 %x) {
ret i32 %B
}
+; Verify that wrap flags are preserved from the original 'shl'.
+
+define i32 @test49_nuw_nsw(i32 %x) {
+; CHECK-LABEL: @test49_nuw_nsw(
+; CHECK-NEXT: [[B:%.*]] = shl nuw nsw i32 %x, 2
+; CHECK-NEXT: ret i32 [[B]]
+;
+ %A = ashr exact i32 %x, 1
+ %B = shl nuw nsw i32 %A, 3
+ ret i32 %B
+}
+
+; (X >>s,exact C1) << C2 --> X << (C2-C1) when splatted C2 > C1
+
+define <2 x i32> @test49_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test49_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = shl nuw nsw <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = ashr exact <2 x i32> %x, <i32 1, i32 1>
+ %B = shl nsw nuw <2 x i32> %A, <i32 3, i32 3>
+ ret <2 x i32> %B
+}
+
+; (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
+
define i32 @test50(i32 %x) {
; CHECK-LABEL: @test50(
; CHECK-NEXT: [[B:%.*]] = ashr i32 %x, 2
@@ -839,6 +922,21 @@ define i32 @test50(i32 %x) {
ret i32 %B
}
+; (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
+; Also, check that exact is propagated.
+
+define <2 x i32> @test50_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test50_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = shl nsw <2 x i32> %x, <i32 1, i32 1>
+ %B = ashr exact <2 x i32> %A, <i32 3, i32 3>
+ ret <2 x i32> %B
+}
+
+; (X <<nuw C1) >>u C2 --> X >>u (C2-C1)
+
define i32 @test51(i32 %x) {
; CHECK-LABEL: @test51(
; CHECK-NEXT: [[B:%.*]] = lshr i32 %x, 2
@@ -849,6 +947,48 @@ define i32 @test51(i32 %x) {
ret i32 %B
}
+; (X <<nuw C1) >>u C2 --> X >>u (C2-C1) with splats
+; Also, check that exact is propagated.
+
+define <2 x i32> @test51_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test51_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = lshr exact <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = shl nuw <2 x i32> %x, <i32 1, i32 1>
+ %B = lshr exact <2 x i32> %A, <i32 3, i32 3>
+ ret <2 x i32> %B
+}
+
+; (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
+; Also, check that exact is propagated.
+
+define i32 @test51_no_nuw(i32 %x) {
+; CHECK-LABEL: @test51_no_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 %x, 2
+; CHECK-NEXT: [[B:%.*]] = and i32 [[TMP1]], 536870911
+; CHECK-NEXT: ret i32 [[B]]
+;
+ %A = shl i32 %x, 1
+ %B = lshr exact i32 %A, 3
+ ret i32 %B
+}
+
+; (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
+
+define <2 x i32> @test51_no_nuw_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test51_no_nuw_splat_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[TMP1]], <i32 536870911, i32 536870911>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = shl <2 x i32> %x, <i32 1, i32 1>
+ %B = lshr <2 x i32> %A, <i32 3, i32 3>
+ ret <2 x i32> %B
+}
+
+; (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
+
define i32 @test52(i32 %x) {
; CHECK-LABEL: @test52(
; CHECK-NEXT: [[B:%.*]] = shl nsw i32 %x, 2
@@ -859,6 +999,20 @@ define i32 @test52(i32 %x) {
ret i32 %B
}
+; (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
+
+define <2 x i32> @test52_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test52_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = shl nsw <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = shl nsw <2 x i32> %x, <i32 3, i32 3>
+ %B = ashr <2 x i32> %A, <i32 1, i32 1>
+ ret <2 x i32> %B
+}
+
+; (X <<nuw C1) >>u C2 --> X <<nuw (C1 - C2)
+
define i32 @test53(i32 %x) {
; CHECK-LABEL: @test53(
; CHECK-NEXT: [[B:%.*]] = shl nuw i32 %x, 2
@@ -869,6 +1023,45 @@ define i32 @test53(i32 %x) {
ret i32 %B
}
+; (X <<nuw C1) >>u C2 --> X <<nuw (C1 - C2)
+
+define <2 x i32> @test53_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @test53_splat_vec(
+; CHECK-NEXT: [[B:%.*]] = shl nuw <2 x i32> %x, <i32 2, i32 2>
+; CHECK-NEXT: ret <2 x i32> [[B]]
+;
+ %A = shl nuw <2 x i32> %x, <i32 3, i32 3>
+ %B = lshr <2 x i32> %A, <i32 1, i32 1>
+ ret <2 x i32> %B
+}
+
+; (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
+
+define i8 @test53_no_nuw(i8 %x) {
+; CHECK-LABEL: @test53_no_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = shl i8 %x, 2
+; CHECK-NEXT: [[B:%.*]] = and i8 [[TMP1]], 124
+; CHECK-NEXT: ret i8 [[B]]
+;
+ %A = shl i8 %x, 3
+ %B = lshr i8 %A, 1
+ ret i8 %B
+}
+
+; (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
+; FIXME: Demanded bits should change the mask constant as it does for the scalar case.
+
+define <2 x i8> @test53_no_nuw_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @test53_no_nuw_splat_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i8> %x, <i8 2, i8 2>
+; CHECK-NEXT: [[B:%.*]] = and <2 x i8> [[TMP1]], <i8 127, i8 127>
+; CHECK-NEXT: ret <2 x i8> [[B]]
+;
+ %A = shl <2 x i8> %x, <i8 3, i8 3>
+ %B = lshr <2 x i8> %A, <i8 1, i8 1>
+ ret <2 x i8> %B
+}
+
define i32 @test54(i32 %x) {
; CHECK-LABEL: @test54(
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 %x, 3
@@ -1041,7 +1234,7 @@ define <2 x i65> @test_63(<2 x i64> %t) {
; CHECK-LABEL: @test_63(
; CHECK-NEXT: [[A:%.*]] = zext <2 x i64> %t to <2 x i65>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i65> [[A]], <i65 33, i65 33>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i65> [[SEXT]], <i65 33, i65 33>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i65> [[SEXT]], <i65 33, i65 33>
; CHECK-NEXT: ret <2 x i65> [[B]]
;
%a = zext <2 x i64> %t to <2 x i65>
@@ -1052,12 +1245,26 @@ define <2 x i65> @test_63(<2 x i64> %t) {
define i64 @test_64(i32 %t) {
; CHECK-LABEL: @test_64(
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 %t, 8
-; CHECK-NEXT: [[EXT:%.*]] = zext i32 [[SHL]] to i64
-; CHECK-NEXT: ret i64 [[EXT]]
-
+; CHECK-NEXT: [[TMP1:%.*]] = shl i32 %t, 8
+; CHECK-NEXT: [[SHL:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: ret i64 [[SHL]]
+;
%and = and i32 %t, 16777215
%ext = zext i32 %and to i64
%shl = shl i64 %ext, 8
ret i64 %shl
}
+
+define <2 x i64> @test_64_splat_vec(<2 x i32> %t) {
+; CHECK-LABEL: @test_64_splat_vec(
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> %t, <i32 16777215, i32 16777215>
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw <2 x i32> [[AND]], <i32 8, i32 8>
+; CHECK-NEXT: [[SHL:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT: ret <2 x i64> [[SHL]]
+;
+ %and = and <2 x i32> %t, <i32 16777215, i32 16777215>
+ %ext = zext <2 x i32> %and to <2 x i64>
+ %shl = shl <2 x i64> %ext, <i64 8, i64 8>
+ ret <2 x i64> %shl
+}
+
diff --git a/test/Transforms/InstCombine/shufflevec-bitcast.ll b/test/Transforms/InstCombine/shufflevec-bitcast.ll
new file mode 100644
index 000000000000..0f0365a07fb4
--- /dev/null
+++ b/test/Transforms/InstCombine/shufflevec-bitcast.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define void @test(<16 x i8> %w, i32* %o1, float* %o2) {
+
+; CHECK: %v.bc = bitcast <16 x i8> %w to <4 x i32>
+; CHECK-NEXT: %v.extract = extractelement <4 x i32> %v.bc, i32 3
+; CHECK-NEXT: %v.bc{{[0-9]*}} = bitcast <16 x i8> %w to <4 x float>
+; CHECK-NEXT: %v.extract{{[0-9]*}} = extractelement <4 x float> %v.bc{{[0-9]*}}, i32 3
+
+ %v = shufflevector <16 x i8> %w, <16 x i8> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+ %f = bitcast <4 x i8> %v to float
+ %i = bitcast <4 x i8> %v to i32
+ store i32 %i, i32* %o1, align 4
+ store float %f, float* %o2, align 4
+ ret void
+}
diff --git a/test/Transforms/InstCombine/signext.ll b/test/Transforms/InstCombine/signext.ll
index bccadeb396f2..ff92ec0a8e3c 100644
--- a/test/Transforms/InstCombine/signext.ll
+++ b/test/Transforms/InstCombine/signext.ll
@@ -61,6 +61,10 @@ define i32 @test5(i32 %x) {
ret i32 %tmp.4
}
+; If the shift amount equals the difference in width of the destination
+; and source scalar types:
+; ashr (shl (zext X), C), C --> sext X
+
define i32 @test6(i16 %P) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[TMP_5:%.*]] = sext i16 %P to i32
@@ -72,6 +76,19 @@ define i32 @test6(i16 %P) {
ret i32 %tmp.5
}
+; Vectors should get the same fold as above.
+
+define <2 x i32> @test6_splat_vec(<2 x i12> %P) {
+; CHECK-LABEL: @test6_splat_vec(
+; CHECK-NEXT: [[ASHR:%.*]] = sext <2 x i12> %P to <2 x i32>
+; CHECK-NEXT: ret <2 x i32> [[ASHR]]
+;
+ %z = zext <2 x i12> %P to <2 x i32>
+ %shl = shl <2 x i32> %z, <i32 20, i32 20>
+ %ashr = ashr <2 x i32> %shl, <i32 20, i32 20>
+ ret <2 x i32> %ashr
+}
+
define i32 @test7(i32 %x) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[SUB:%.*]] = ashr i32 %x, 5
diff --git a/test/Transforms/InstCombine/sitofp.ll b/test/Transforms/InstCombine/sitofp.ll
index 820977838836..149154723b95 100644
--- a/test/Transforms/InstCombine/sitofp.ll
+++ b/test/Transforms/InstCombine/sitofp.ll
@@ -1,41 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
-; CHECK-LABEL: test1
-; CHECK: ret i1 true
define i1 @test1(i8 %A) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: ret i1 true
+;
%B = sitofp i8 %A to double
%C = fcmp ult double %B, 128.0
ret i1 %C
}
-; CHECK-LABEL: test2
-; CHECK: ret i1 true
define i1 @test2(i8 %A) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: ret i1 true
+;
%B = sitofp i8 %A to double
%C = fcmp ugt double %B, -128.1
ret i1 %C
}
-; CHECK-LABEL: test3
-; CHECK: ret i1 true
define i1 @test3(i8 %A) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: ret i1 true
+;
%B = sitofp i8 %A to double
%C = fcmp ule double %B, 127.0
ret i1 %C
}
-; CHECK-LABEL: test4
-; CHECK: icmp ne i8 %A, 127
-; CHECK-NEXT: ret i1
define i1 @test4(i8 %A) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A:%.*]], 127
+; CHECK-NEXT: ret i1 [[C]]
+;
%B = sitofp i8 %A to double
%C = fcmp ult double %B, 127.0
ret i1 %C
}
-; CHECK-LABEL: test5
-; CHECK: ret i32
define i32 @test5(i32 %A) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: ret i32 [[A:%.*]]
+;
%B = sitofp i32 %A to double
%C = fptosi double %B to i32
%D = uitofp i32 %C to double
@@ -43,10 +49,11 @@ define i32 @test5(i32 %A) {
ret i32 %E
}
-; CHECK-LABEL: test6
-; CHECK: and i32 %A, 39
-; CHECK-NEXT: ret i32
define i32 @test6(i32 %A) {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: [[ADDCONV:%.*]] = and i32 [[A:%.*]], 39
+; CHECK-NEXT: ret i32 [[ADDCONV]]
+;
%B = and i32 %A, 7
%C = and i32 %A, 32
%D = sitofp i32 %B to double
@@ -56,35 +63,39 @@ define i32 @test6(i32 %A) {
ret i32 %G
}
-; CHECK-LABEL: test7
-; CHECK: ret i32
-define i32 @test7(i32 %A) nounwind {
+define i32 @test7(i32 %A) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: ret i32 [[A:%.*]]
+;
%B = sitofp i32 %A to double
%C = fptoui double %B to i32
ret i32 %C
}
-; CHECK-LABEL: test8
-; CHECK: ret i32
-define i32 @test8(i32 %A) nounwind {
+define i32 @test8(i32 %A) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: ret i32 [[A:%.*]]
+;
%B = uitofp i32 %A to double
%C = fptosi double %B to i32
ret i32 %C
}
-; CHECK-LABEL: test9
-; CHECK: zext i8
-; CHECK-NEXT: ret i32
-define i32 @test9(i8 %A) nounwind {
+define i32 @test9(i8 %A) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: [[C:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
%B = sitofp i8 %A to float
%C = fptoui float %B to i32
ret i32 %C
}
-; CHECK-LABEL: test10
-; CHECK: sext i8
-; CHECK-NEXT: ret i32
-define i32 @test10(i8 %A) nounwind {
+define i32 @test10(i8 %A) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[C:%.*]] = sext i8 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
%B = sitofp i8 %A to float
%C = fptosi float %B to i32
ret i32 %C
@@ -92,10 +103,12 @@ define i32 @test10(i8 %A) nounwind {
; If the input value is outside of the range of the output cast, it's
; undefined behavior, so we can assume it fits.
-; CHECK-LABEL: test11
-; CHECK: trunc
-; CHECK-NEXT: ret i8
-define i8 @test11(i32 %A) nounwind {
+
+define i8 @test11(i32 %A) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[C:%.*]] = trunc i32 [[A:%.*]] to i8
+; CHECK-NEXT: ret i8 [[C]]
+;
%B = sitofp i32 %A to float
%C = fptosi float %B to i8
ret i8 %C
@@ -103,82 +116,103 @@ define i8 @test11(i32 %A) nounwind {
; If the input value is negative, it'll be outside the range of the
; output cast, and thus undefined behavior.
-; CHECK-LABEL: test12
-; CHECK: zext i8
-; CHECK-NEXT: ret i32
-define i32 @test12(i8 %A) nounwind {
+
+define i32 @test12(i8 %A) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[C:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
%B = sitofp i8 %A to float
%C = fptoui float %B to i32
ret i32 %C
}
; This can't fold because the 25-bit input doesn't fit in the mantissa.
-; CHECK-LABEL: test13
-; CHECK: uitofp
-; CHECK-NEXT: fptoui
-define i32 @test13(i25 %A) nounwind {
+
+define i32 @test13(i25 %A) {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[B:%.*]] = uitofp i25 [[A:%.*]] to float
+; CHECK-NEXT: [[C:%.*]] = fptoui float [[B]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
%B = uitofp i25 %A to float
%C = fptoui float %B to i32
ret i32 %C
}
; But this one can.
-; CHECK-LABEL: test14
-; CHECK: zext i24
-; CHECK-NEXT: ret i32
-define i32 @test14(i24 %A) nounwind {
+
+define i32 @test14(i24 %A) {
+; CHECK-LABEL: @test14(
+; CHECK-NEXT: [[C:%.*]] = zext i24 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
%B = uitofp i24 %A to float
%C = fptoui float %B to i32
ret i32 %C
}
; And this one can too.
-; CHECK-LABEL: test15
-; CHECK: trunc i32
-; CHECK-NEXT: ret i24
-define i24 @test15(i32 %A) nounwind {
+
+define i24 @test15(i32 %A) {
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: [[C:%.*]] = trunc i32 [[A:%.*]] to i24
+; CHECK-NEXT: ret i24 [[C]]
+;
%B = uitofp i32 %A to float
%C = fptoui float %B to i24
ret i24 %C
}
-; This can fold because the 25-bit input is signed and we disard the sign bit.
-; CHECK-LABEL: test16
-; CHECK: zext
-define i32 @test16(i25 %A) nounwind {
- %B = sitofp i25 %A to float
- %C = fptoui float %B to i32
- ret i32 %C
+; This can fold because the 25-bit input is signed and we discard the sign bit.
+
+define i32 @test16(i25 %A) {
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: [[C:%.*]] = zext i25 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
+ %B = sitofp i25 %A to float
+ %C = fptoui float %B to i32
+ ret i32 %C
}
; This can't fold because the 26-bit input won't fit the mantissa
-; even after disarding the signed bit.
-; CHECK-LABEL: test17
-; CHECK: sitofp
-; CHECK-NEXT: fptoui
-define i32 @test17(i26 %A) nounwind {
- %B = sitofp i26 %A to float
- %C = fptoui float %B to i32
- ret i32 %C
+; even after discarding the signed bit.
+
+define i32 @test17(i26 %A) {
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: [[B:%.*]] = sitofp i26 [[A:%.*]] to float
+; CHECK-NEXT: [[C:%.*]] = fptoui float [[B]] to i32
+; CHECK-NEXT: ret i32 [[C]]
+;
+ %B = sitofp i26 %A to float
+ %C = fptoui float %B to i32
+ ret i32 %C
}
-; This can fold because the 54-bit output is signed and we disard the sign bit.
-; CHECK-LABEL: test18
-; CHECK: trunc
-define i54 @test18(i64 %A) nounwind {
- %B = sitofp i64 %A to double
- %C = fptosi double %B to i54
- ret i54 %C
+; This can fold because the 54-bit output is signed and we discard the sign bit.
+
+define i54 @test18(i64 %A) {
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: [[C:%.*]] = trunc i64 [[A:%.*]] to i54
+; CHECK-NEXT: ret i54 [[C]]
+;
+ %B = sitofp i64 %A to double
+ %C = fptosi double %B to i54
+ ret i54 %C
}
; This can't fold because the 55-bit output won't fit the mantissa
-; even after disarding the sign bit.
-; CHECK-LABEL: test19
-; CHECK: sitofp
-; CHECK-NEXT: fptosi
-define i55 @test19(i64 %A) nounwind {
- %B = sitofp i64 %A to double
- %C = fptosi double %B to i55
- ret i55 %C
+; even after discarding the sign bit.
+
+define i55 @test19(i64 %A) {
+; CHECK-LABEL: @test19(
+; CHECK-NEXT: [[B:%.*]] = sitofp i64 [[A:%.*]] to double
+; CHECK-NEXT: [[C:%.*]] = fptosi double [[B]] to i55
+; CHECK-NEXT: ret i55 [[C]]
+;
+ %B = sitofp i64 %A to double
+ %C = fptosi double %B to i55
+ ret i55 %C
}
diff --git a/test/Transforms/InstCombine/srem.ll b/test/Transforms/InstCombine/srem.ll
deleted file mode 100644
index beefe4fb8d3f..000000000000
--- a/test/Transforms/InstCombine/srem.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: opt < %s -instcombine -S | grep srem
-
-define i64 @foo(i64 %x1, i64 %y2) {
- %r = sdiv i64 %x1, %y2
- %r7 = mul i64 %r, %y2
- %r8 = sub i64 %x1, %r7
- ret i64 %r8
-}
diff --git a/test/Transforms/InstCombine/stpcpy_chk-1.ll b/test/Transforms/InstCombine/stpcpy_chk-1.ll
index 2fcc34b05227..45e6879c8d26 100644
--- a/test/Transforms/InstCombine/stpcpy_chk-1.ll
+++ b/test/Transforms/InstCombine/stpcpy_chk-1.ll
@@ -64,10 +64,10 @@ define i8* @test_simplify5() {
%dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
%src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
-; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
+; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
; CHECK-NEXT: %1 = call i8* @__memcpy_chk(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i32 %len)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 11)
- %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false)
+ %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false, i1 false)
%ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 %len)
ret i8* %ret
}
@@ -81,7 +81,7 @@ define i8* @test_simplify6() {
; CHECK-NEXT: %strlen = call i32 @strlen(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0))
; CHECK-NEXT: %1 = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 %strlen
; CHECK-NEXT: ret i8* %1
- %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false)
+ %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false, i1 false)
%ret = call i8* @__stpcpy_chk(i8* %dst, i8* %dst, i32 %len)
ret i8* %ret
}
@@ -100,4 +100,4 @@ define i8* @test_no_simplify1() {
}
declare i8* @__stpcpy_chk(i8*, i8*, i32) nounwind
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1) nounwind readonly
diff --git a/test/Transforms/InstCombine/strcpy_chk-1.ll b/test/Transforms/InstCombine/strcpy_chk-1.ll
index 7a21a49c993c..824776c6ca18 100644
--- a/test/Transforms/InstCombine/strcpy_chk-1.ll
+++ b/test/Transforms/InstCombine/strcpy_chk-1.ll
@@ -64,10 +64,10 @@ define i8* @test_simplify5() {
%dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
%src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
-; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
+; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
; CHECK-NEXT: %1 = call i8* @__memcpy_chk(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i32 %len)
; CHECK-NEXT: ret i8* %1
- %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false)
+ %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false, i1 false)
%ret = call i8* @__strcpy_chk(i8* %dst, i8* %src, i32 %len)
ret i8* %ret
}
@@ -78,10 +78,10 @@ define i8* @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
%dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
-; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false)
+; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false)
; CHECK-NEXT: %ret = call i8* @__strcpy_chk(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i32 %len)
; CHECK-NEXT: ret i8* %ret
- %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false)
+ %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false, i1 false)
%ret = call i8* @__strcpy_chk(i8* %dst, i8* %dst, i32 %len)
ret i8* %ret
}
@@ -100,4 +100,4 @@ define i8* @test_no_simplify1() {
}
declare i8* @__strcpy_chk(i8*, i8*, i32) nounwind
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1) nounwind readonly
+declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1) nounwind readonly
diff --git a/test/Transforms/InstCombine/sub-xor.ll b/test/Transforms/InstCombine/sub-xor.ll
index 9a0814c2c92f..812305d8e489 100644
--- a/test/Transforms/InstCombine/sub-xor.ll
+++ b/test/Transforms/InstCombine/sub-xor.ll
@@ -48,13 +48,3 @@ define i32 @test3(i32 %x) {
ret i32 %add
}
-define i32 @test4(i32 %x) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[ADD:%.*]] = add i32 %x, -2147483606
-; CHECK-NEXT: ret i32 [[ADD]]
-;
- %sub = xor i32 %x, 2147483648
- %add = add i32 %sub, 42
- ret i32 %add
-}
-
diff --git a/test/Transforms/InstCombine/sub.ll b/test/Transforms/InstCombine/sub.ll
index 32541f1f893e..2388301c726e 100644
--- a/test/Transforms/InstCombine/sub.ll
+++ b/test/Transforms/InstCombine/sub.ll
@@ -15,7 +15,7 @@ define i32 @test1(i32 %A) {
define i32 @test2(i32 %A) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: ret i32 %A
+; CHECK-NEXT: ret i32 [[A:%.*]]
;
%B = sub i32 %A, 0
ret i32 %B
@@ -23,7 +23,7 @@ define i32 @test2(i32 %A) {
define i32 @test3(i32 %A) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: ret i32 %A
+; CHECK-NEXT: ret i32 [[A:%.*]]
;
%B = sub i32 0, %A
%C = sub i32 0, %B
@@ -32,7 +32,7 @@ define i32 @test3(i32 %A) {
define i32 @test4(i32 %A, i32 %x) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[C:%.*]] = add i32 %x, %A
+; CHECK-NEXT: [[C:%.*]] = add i32 [[X:%.*]], [[A:%.*]]
; CHECK-NEXT: ret i32 [[C]]
;
%B = sub i32 0, %A
@@ -42,8 +42,8 @@ define i32 @test4(i32 %A, i32 %x) {
define i32 @test5(i32 %A, i32 %B, i32 %C) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[D1:%.*]] = sub i32 %C, %B
-; CHECK-NEXT: [[E:%.*]] = add i32 [[D1]], %A
+; CHECK-NEXT: [[D1:%.*]] = sub i32 [[C:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = add i32 [[D1]], [[A:%.*]]
; CHECK-NEXT: ret i32 [[E]]
;
%D = sub i32 %B, %C
@@ -53,8 +53,8 @@ define i32 @test5(i32 %A, i32 %B, i32 %C) {
define i32 @test6(i32 %A, i32 %B) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 %B, -1
-; CHECK-NEXT: [[D:%.*]] = and i32 %A, [[B_NOT]]
+; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1
+; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], [[A:%.*]]
; CHECK-NEXT: ret i32 [[D]]
;
%C = and i32 %A, %B
@@ -62,9 +62,20 @@ define i32 @test6(i32 %A, i32 %B) {
ret i32 %D
}
+define i32 @test6commuted(i32 %A, i32 %B) {
+; CHECK-LABEL: @test6commuted(
+; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1
+; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: ret i32 [[D]]
+;
+ %C = and i32 %B, %A
+ %D = sub i32 %A, %C
+ ret i32 %D
+}
+
define i32 @test7(i32 %A) {
; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[B:%.*]] = xor i32 %A, -1
+; CHECK-NEXT: [[B:%.*]] = xor i32 [[A:%.*]], -1
; CHECK-NEXT: ret i32 [[B]]
;
%B = sub i32 -1, %A
@@ -73,7 +84,7 @@ define i32 @test7(i32 %A) {
define i32 @test8(i32 %A) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: [[C:%.*]] = shl i32 %A, 3
+; CHECK-NEXT: [[C:%.*]] = shl i32 [[A:%.*]], 3
; CHECK-NEXT: ret i32 [[C]]
;
%B = mul i32 9, %A
@@ -83,7 +94,7 @@ define i32 @test8(i32 %A) {
define i32 @test9(i32 %A) {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: [[C:%.*]] = mul i32 %A, -2
+; CHECK-NEXT: [[C:%.*]] = mul i32 [[A:%.*]], -2
; CHECK-NEXT: ret i32 [[C]]
;
%B = mul i32 3, %A
@@ -93,7 +104,7 @@ define i32 @test9(i32 %A) {
define i32 @test10(i32 %A, i32 %B) {
; CHECK-LABEL: @test10(
-; CHECK-NEXT: [[E:%.*]] = mul i32 %A, %B
+; CHECK-NEXT: [[E:%.*]] = mul i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret i32 [[E]]
;
%C = sub i32 0, %A
@@ -104,7 +115,7 @@ define i32 @test10(i32 %A, i32 %B) {
define i32 @test10a(i32 %A) {
; CHECK-LABEL: @test10a(
-; CHECK-NEXT: [[E:%.*]] = mul i32 %A, -7
+; CHECK-NEXT: [[E:%.*]] = mul i32 [[A:%.*]], -7
; CHECK-NEXT: ret i32 [[E]]
;
%C = sub i32 0, %A
@@ -114,7 +125,7 @@ define i32 @test10a(i32 %A) {
define i1 @test11(i8 %A, i8 %B) {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: [[D:%.*]] = icmp ne i8 %A, %B
+; CHECK-NEXT: [[D:%.*]] = icmp ne i8 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret i1 [[D]]
;
%C = sub i8 %A, %B
@@ -124,7 +135,7 @@ define i1 @test11(i8 %A, i8 %B) {
define <2 x i1> @test11vec(<2 x i8> %A, <2 x i8> %B) {
; CHECK-LABEL: @test11vec(
-; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> %A, %B
+; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <2 x i1> [[D]]
;
%C = sub <2 x i8> %A, %B
@@ -134,7 +145,7 @@ define <2 x i1> @test11vec(<2 x i8> %A, <2 x i8> %B) {
define i32 @test12(i32 %A) {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: [[C:%.*]] = lshr i32 %A, 31
+; CHECK-NEXT: [[C:%.*]] = lshr i32 [[A:%.*]], 31
; CHECK-NEXT: ret i32 [[C]]
;
%B = ashr i32 %A, 31
@@ -144,7 +155,7 @@ define i32 @test12(i32 %A) {
define i32 @test13(i32 %A) {
; CHECK-LABEL: @test13(
-; CHECK-NEXT: [[C:%.*]] = ashr i32 %A, 31
+; CHECK-NEXT: [[C:%.*]] = ashr i32 [[A:%.*]], 31
; CHECK-NEXT: ret i32 [[C]]
;
%B = lshr i32 %A, 31
@@ -154,7 +165,7 @@ define i32 @test13(i32 %A) {
define <2 x i32> @test12vec(<2 x i32> %A) {
; CHECK-LABEL: @test12vec(
-; CHECK-NEXT: [[C:%.*]] = lshr <2 x i32> %A, <i32 31, i32 31>
+; CHECK-NEXT: [[C:%.*]] = lshr <2 x i32> [[A:%.*]], <i32 31, i32 31>
; CHECK-NEXT: ret <2 x i32> [[C]]
;
%B = ashr <2 x i32> %A, <i32 31, i32 31>
@@ -164,7 +175,7 @@ define <2 x i32> @test12vec(<2 x i32> %A) {
define <2 x i32> @test13vec(<2 x i32> %A) {
; CHECK-LABEL: @test13vec(
-; CHECK-NEXT: [[C:%.*]] = ashr <2 x i32> %A, <i32 31, i32 31>
+; CHECK-NEXT: [[C:%.*]] = ashr <2 x i32> [[A:%.*]], <i32 31, i32 31>
; CHECK-NEXT: ret <2 x i32> [[C]]
;
%B = lshr <2 x i32> %A, <i32 31, i32 31>
@@ -174,8 +185,8 @@ define <2 x i32> @test13vec(<2 x i32> %A) {
define i32 @test15(i32 %A, i32 %B) {
; CHECK-LABEL: @test15(
-; CHECK-NEXT: [[C:%.*]] = sub i32 0, %A
-; CHECK-NEXT: [[D:%.*]] = srem i32 %B, [[C]]
+; CHECK-NEXT: [[C:%.*]] = sub i32 0, [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = srem i32 [[B:%.*]], [[C]]
; CHECK-NEXT: ret i32 [[D]]
;
%C = sub i32 0, %A
@@ -185,7 +196,7 @@ define i32 @test15(i32 %A, i32 %B) {
define i32 @test16(i32 %A) {
; CHECK-LABEL: @test16(
-; CHECK-NEXT: [[Y:%.*]] = sdiv i32 %A, -1123
+; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[A:%.*]], -1123
; CHECK-NEXT: ret i32 [[Y]]
;
%X = sdiv i32 %A, 1123
@@ -197,7 +208,7 @@ define i32 @test16(i32 %A) {
; PR3142
define i32 @test17(i32 %A) {
; CHECK-LABEL: @test17(
-; CHECK-NEXT: [[B:%.*]] = sub i32 0, %A
+; CHECK-NEXT: [[B:%.*]] = sub i32 0, [[A:%.*]]
; CHECK-NEXT: [[C:%.*]] = sdiv i32 [[B]], 1234
; CHECK-NEXT: ret i32 [[C]]
;
@@ -218,7 +229,7 @@ define i64 @test18(i64 %Y) {
define i32 @test19(i32 %X, i32 %Y) {
; CHECK-LABEL: @test19(
-; CHECK-NEXT: ret i32 %X
+; CHECK-NEXT: ret i32 [[X:%.*]]
;
%Z = sub i32 %X, %Y
%Q = add i32 %Z, %Y
@@ -227,7 +238,7 @@ define i32 @test19(i32 %X, i32 %Y) {
define i1 @test20(i32 %g, i32 %h) {
; CHECK-LABEL: @test20(
-; CHECK-NEXT: [[TMP_4:%.*]] = icmp ne i32 %h, 0
+; CHECK-NEXT: [[TMP_4:%.*]] = icmp ne i32 [[H:%.*]], 0
; CHECK-NEXT: ret i1 [[TMP_4]]
;
%tmp.2 = sub i32 %g, %h
@@ -237,7 +248,7 @@ define i1 @test20(i32 %g, i32 %h) {
define i1 @test21(i32 %g, i32 %h) {
; CHECK-LABEL: @test21(
-; CHECK-NEXT: [[TMP_4:%.*]] = icmp ne i32 %h, 0
+; CHECK-NEXT: [[TMP_4:%.*]] = icmp ne i32 [[H:%.*]], 0
; CHECK-NEXT: ret i1 [[TMP_4]]
;
%tmp.2 = sub i32 %g, %h
@@ -248,7 +259,7 @@ define i1 @test21(i32 %g, i32 %h) {
; PR2298
define zeroext i1 @test22(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: @test22(
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 %b, %a
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[B:%.*]], [[A:%.*]]
; CHECK-NEXT: ret i1 [[TMP5]]
;
%tmp2 = sub i32 0, %a
@@ -260,7 +271,7 @@ define zeroext i1 @test22(i32 %a, i32 %b) nounwind {
; rdar://7362831
define i32 @test23(i8* %P, i64 %A){
; CHECK-LABEL: @test23(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 %A to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i32
; CHECK-NEXT: ret i32 [[TMP1]]
;
%B = getelementptr inbounds i8, i8* %P, i64 %A
@@ -274,7 +285,7 @@ define i32 @test23(i8* %P, i64 %A){
define i8 @test23_as1(i8 addrspace(1)* %P, i16 %A) {
; CHECK-LABEL: @test23_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 %A to i8
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[A:%.*]] to i8
; CHECK-NEXT: ret i8 [[TMP1]]
;
%B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
@@ -288,7 +299,7 @@ define i8 @test23_as1(i8 addrspace(1)* %P, i16 %A) {
define i64 @test24(i8* %P, i64 %A){
; CHECK-LABEL: @test24(
-; CHECK-NEXT: ret i64 %A
+; CHECK-NEXT: ret i64 [[A:%.*]]
;
%B = getelementptr inbounds i8, i8* %P, i64 %A
%C = ptrtoint i8* %B to i64
@@ -299,7 +310,7 @@ define i64 @test24(i8* %P, i64 %A){
define i16 @test24_as1(i8 addrspace(1)* %P, i16 %A) {
; CHECK-LABEL: @test24_as1(
-; CHECK-NEXT: ret i16 %A
+; CHECK-NEXT: ret i16 [[A:%.*]]
;
%B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
%C = ptrtoint i8 addrspace(1)* %B to i16
@@ -310,7 +321,7 @@ define i16 @test24_as1(i8 addrspace(1)* %P, i16 %A) {
define i64 @test24a(i8* %P, i64 %A){
; CHECK-LABEL: @test24a(
-; CHECK-NEXT: [[DIFF_NEG:%.*]] = sub i64 0, %A
+; CHECK-NEXT: [[DIFF_NEG:%.*]] = sub i64 0, [[A:%.*]]
; CHECK-NEXT: ret i64 [[DIFF_NEG]]
;
%B = getelementptr inbounds i8, i8* %P, i64 %A
@@ -322,7 +333,7 @@ define i64 @test24a(i8* %P, i64 %A){
define i16 @test24a_as1(i8 addrspace(1)* %P, i16 %A) {
; CHECK-LABEL: @test24a_as1(
-; CHECK-NEXT: [[DIFF_NEG:%.*]] = sub i16 0, %A
+; CHECK-NEXT: [[DIFF_NEG:%.*]] = sub i16 0, [[A:%.*]]
; CHECK-NEXT: ret i16 [[DIFF_NEG]]
;
%B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
@@ -337,7 +348,7 @@ define i16 @test24a_as1(i8 addrspace(1)* %P, i16 %A) {
define i64 @test24b(i8* %P, i64 %A){
; CHECK-LABEL: @test24b(
-; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i64 %A, 1
+; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i64 [[A:%.*]], 1
; CHECK-NEXT: ret i64 [[B_IDX]]
;
%B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
@@ -349,7 +360,7 @@ define i64 @test24b(i8* %P, i64 %A){
define i64 @test25(i8* %P, i64 %A){
; CHECK-LABEL: @test25(
-; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i64 %A, 1
+; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i64 [[A:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[B_IDX]], -84
; CHECK-NEXT: ret i64 [[TMP1]]
;
@@ -363,7 +374,7 @@ define i64 @test25(i8* %P, i64 %A){
define i16 @test25_as1(i8 addrspace(1)* %P, i64 %A) {
; CHECK-LABEL: @test25_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 %A to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16
; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw i16 [[TMP1]], 1
; CHECK-NEXT: [[TMP2:%.*]] = add i16 [[B_IDX]], -84
; CHECK-NEXT: ret i16 [[TMP2]]
@@ -376,7 +387,7 @@ define i16 @test25_as1(i8 addrspace(1)* %P, i64 %A) {
define i32 @test26(i32 %x) {
; CHECK-LABEL: @test26(
-; CHECK-NEXT: [[NEG:%.*]] = shl i32 -3, %x
+; CHECK-NEXT: [[NEG:%.*]] = shl i32 -3, [[X:%.*]]
; CHECK-NEXT: ret i32 [[NEG]]
;
%shl = shl i32 3, %x
@@ -386,8 +397,8 @@ define i32 @test26(i32 %x) {
define i32 @test27(i32 %x, i32 %y) {
; CHECK-LABEL: @test27(
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 %y, 3
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], %x
+; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[Y:%.*]], 3
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%mul = mul i32 %y, -8
@@ -395,10 +406,87 @@ define i32 @test27(i32 %x, i32 %y) {
ret i32 %sub
}
+define <2 x i32> @test27vec(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test27vec(
+; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i32> [[Y:%.*]], <i32 8, i32 6>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %mul = mul <2 x i32> %y, <i32 -8, i32 -6>
+ %sub = sub <2 x i32> %x, %mul
+ ret <2 x i32> %sub
+}
+
+define <2 x i32> @test27vecsplat(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test27vecsplat(
+; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i32> [[Y:%.*]], <i32 3, i32 3>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %mul = mul <2 x i32> %y, <i32 -8, i32 -8>
+ %sub = sub <2 x i32> %x, %mul
+ ret <2 x i32> %sub
+}
+
+define <2 x i32> @test27vecmixed(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test27vecmixed(
+; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i32> [[Y:%.*]], <i32 8, i32 -8>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %mul = mul <2 x i32> %y, <i32 -8, i32 8>
+ %sub = sub <2 x i32> %x, %mul
+ ret <2 x i32> %sub
+}
+
+define i32 @test27commuted(i32 %x, i32 %y) {
+; CHECK-LABEL: @test27commuted(
+; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[Y:%.*]], 3
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+ %mul = mul i32 -8, %y
+ %sub = sub i32 %x, %mul
+ ret i32 %sub
+}
+
+define <2 x i32> @test27commutedvec(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test27commutedvec(
+; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i32> [[Y:%.*]], <i32 8, i32 6>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %mul = mul <2 x i32> <i32 -8, i32 -6>, %y
+ %sub = sub <2 x i32> %x, %mul
+ ret <2 x i32> %sub
+}
+
+define <2 x i32> @test27commutedvecsplat(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test27commutedvecsplat(
+; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i32> [[Y:%.*]], <i32 3, i32 3>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %mul = mul <2 x i32> <i32 -8, i32 -8>, %y
+ %sub = sub <2 x i32> %x, %mul
+ ret <2 x i32> %sub
+}
+
+define <2 x i32> @test27commutedvecmixed(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @test27commutedvecmixed(
+; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i32> [[Y:%.*]], <i32 8, i32 -8>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %mul = mul <2 x i32> <i32 -8, i32 8>, %y
+ %sub = sub <2 x i32> %x, %mul
+ ret <2 x i32> %sub
+}
+
define i32 @test28(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test28(
-; CHECK-NEXT: [[TMP1:%.*]] = mul i32 %z, %y
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], %x
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Z:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%neg = sub i32 0, %z
@@ -407,9 +495,21 @@ define i32 @test28(i32 %x, i32 %y, i32 %z) {
ret i32 %sub
}
+define i32 @test28commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @test28commuted(
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+ %neg = sub i32 0, %z
+ %mul = mul i32 %y, %neg
+ %sub = sub i32 %x, %mul
+ ret i32 %sub
+}
+
define i64 @test29(i8* %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test29(
-; CHECK-NEXT: [[TMP1:%.*]] = sub i64 %i, %j
+; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[I:%.*]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[TMP1]]
;
%gep1 = getelementptr inbounds i8, i8* %foo, i64 %i
@@ -422,8 +522,8 @@ define i64 @test29(i8* %foo, i64 %i, i64 %j) {
define i64 @test30(i8* %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test30(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 %i, 2
-; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[GEP1_IDX]], %j
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i64 [[I:%.*]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i64 [[TMP1]]
;
%bit = bitcast i8* %foo to i32*
@@ -437,8 +537,8 @@ define i64 @test30(i8* %foo, i64 %i, i64 %j) {
define i16 @test30_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
; CHECK-LABEL: @test30_as1(
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 %i, 2
-; CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[GEP1_IDX]], %j
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nuw i16 [[I:%.*]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[GEP1_IDX]], [[J:%.*]]
; CHECK-NEXT: ret i16 [[TMP1]]
;
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
@@ -452,7 +552,7 @@ define i16 @test30_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
define <2 x i64> @test31(<2 x i64> %A) {
; CHECK-LABEL: @test31(
-; CHECK-NEXT: [[SUB:%.*]] = add <2 x i64> %A, <i64 3, i64 4>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i64> [[A:%.*]], <i64 3, i64 4>
; CHECK-NEXT: ret <2 x i64> [[SUB]]
;
%xor = xor <2 x i64> %A, <i64 -1, i64 -1>
@@ -462,7 +562,7 @@ define <2 x i64> @test31(<2 x i64> %A) {
define <2 x i64> @test32(<2 x i64> %A) {
; CHECK-LABEL: @test32(
-; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> <i64 3, i64 4>, %A
+; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> <i64 3, i64 4>, [[A:%.*]]
; CHECK-NEXT: ret <2 x i64> [[SUB]]
;
%add = add <2 x i64> %A, <i64 -1, i64 -1>
@@ -472,7 +572,7 @@ define <2 x i64> @test32(<2 x i64> %A) {
define <2 x i64> @test33(<2 x i1> %A) {
; CHECK-LABEL: @test33(
-; CHECK-NEXT: [[SUB:%.*]] = sext <2 x i1> %A to <2 x i64>
+; CHECK-NEXT: [[SUB:%.*]] = sext <2 x i1> [[A:%.*]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[SUB]]
;
%ext = zext <2 x i1> %A to <2 x i64>
@@ -482,7 +582,7 @@ define <2 x i64> @test33(<2 x i1> %A) {
define <2 x i64> @test34(<2 x i1> %A) {
; CHECK-LABEL: @test34(
-; CHECK-NEXT: [[SUB:%.*]] = zext <2 x i1> %A to <2 x i64>
+; CHECK-NEXT: [[SUB:%.*]] = zext <2 x i1> [[A:%.*]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[SUB]]
;
%ext = sext <2 x i1> %A to <2 x i64>
@@ -492,7 +592,7 @@ define <2 x i64> @test34(<2 x i1> %A) {
define <2 x i64> @test35(<2 x i64> %A) {
; CHECK-LABEL: @test35(
-; CHECK-NEXT: [[SUB:%.*]] = mul <2 x i64> %A, <i64 -2, i64 -3>
+; CHECK-NEXT: [[SUB:%.*]] = mul <2 x i64> [[A:%.*]], <i64 -2, i64 -3>
; CHECK-NEXT: ret <2 x i64> [[SUB]]
;
%mul = mul <2 x i64> %A, <i64 3, i64 4>
@@ -502,7 +602,7 @@ define <2 x i64> @test35(<2 x i64> %A) {
define <2 x i64> @test36(<2 x i64> %A) {
; CHECK-LABEL: @test36(
-; CHECK-NEXT: [[SUB:%.*]] = mul <2 x i64> %A, <i64 7, i64 15>
+; CHECK-NEXT: [[SUB:%.*]] = mul <2 x i64> [[A:%.*]], <i64 7, i64 15>
; CHECK-NEXT: ret <2 x i64> [[SUB]]
;
%shl = shl <2 x i64> %A, <i64 3, i64 4>
@@ -512,7 +612,7 @@ define <2 x i64> @test36(<2 x i64> %A) {
define <2 x i32> @test37(<2 x i32> %A) {
; CHECK-LABEL: @test37(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i32> %A, <i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i32> [[A:%.*]], <i32 -2147483648, i32 -2147483648>
; CHECK-NEXT: [[SUB:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[SUB]]
;
@@ -523,7 +623,7 @@ define <2 x i32> @test37(<2 x i32> %A) {
define i32 @test38(i32 %A) {
; CHECK-LABEL: @test38(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 %A, -2147483648
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[A:%.*]], -2147483648
; CHECK-NEXT: [[SUB:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: ret i32 [[SUB]]
;
@@ -534,7 +634,7 @@ define i32 @test38(i32 %A) {
define i32 @test39(i32 %A, i32 %x) {
; CHECK-LABEL: @test39(
-; CHECK-NEXT: [[C:%.*]] = add i32 %x, %A
+; CHECK-NEXT: [[C:%.*]] = add i32 [[X:%.*]], [[A:%.*]]
; CHECK-NEXT: ret i32 [[C]]
;
%B = sub i32 0, %A
@@ -544,8 +644,8 @@ define i32 @test39(i32 %A, i32 %x) {
define i16 @test40(i16 %a, i16 %b) {
; CHECK-LABEL: @test40(
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i16 %a, 1
-; CHECK-NEXT: [[ASHR1:%.*]] = ashr i16 %b, 1
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i16 [[A:%.*]], 1
+; CHECK-NEXT: [[ASHR1:%.*]] = ashr i16 [[B:%.*]], 1
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i16 [[ASHR]], [[ASHR1]]
; CHECK-NEXT: ret i16 [[SUB]]
;
@@ -557,8 +657,8 @@ define i16 @test40(i16 %a, i16 %b) {
define i32 @test41(i16 %a, i16 %b) {
; CHECK-LABEL: @test41(
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 %a to i32
-; CHECK-NEXT: [[CONV1:%.*]] = sext i16 %b to i32
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[A:%.*]] to i32
+; CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[B:%.*]] to i32
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[CONV]], [[CONV1]]
; CHECK-NEXT: ret i32 [[SUB]]
;
@@ -570,8 +670,8 @@ define i32 @test41(i16 %a, i16 %b) {
define i4 @test42(i4 %x, i4 %y) {
; CHECK-LABEL: @test42(
-; CHECK-NEXT: [[A:%.*]] = and i4 %y, 7
-; CHECK-NEXT: [[B:%.*]] = and i4 %x, 7
+; CHECK-NEXT: [[A:%.*]] = and i4 [[Y:%.*]], 7
+; CHECK-NEXT: [[B:%.*]] = and i4 [[X:%.*]], 7
; CHECK-NEXT: [[C:%.*]] = sub nsw i4 [[A]], [[B]]
; CHECK-NEXT: ret i4 [[C]]
;
@@ -583,8 +683,8 @@ define i4 @test42(i4 %x, i4 %y) {
define i4 @test43(i4 %x, i4 %y) {
; CHECK-LABEL: @test43(
-; CHECK-NEXT: [[A:%.*]] = or i4 %x, -8
-; CHECK-NEXT: [[B:%.*]] = and i4 %y, 7
+; CHECK-NEXT: [[A:%.*]] = or i4 [[X:%.*]], -8
+; CHECK-NEXT: [[B:%.*]] = and i4 [[Y:%.*]], 7
; CHECK-NEXT: [[C:%.*]] = sub nuw i4 [[A]], [[B]]
; CHECK-NEXT: ret i4 [[C]]
;
@@ -596,7 +696,7 @@ define i4 @test43(i4 %x, i4 %y) {
define i32 @test44(i32 %x) {
; CHECK-LABEL: @test44(
-; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 %x, -32768
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[X:%.*]], -32768
; CHECK-NEXT: ret i32 [[SUB]]
;
%sub = sub nsw i32 %x, 32768
@@ -605,7 +705,7 @@ define i32 @test44(i32 %x) {
define i32 @test45(i32 %x, i32 %y) {
; CHECK-LABEL: @test45(
-; CHECK-NEXT: [[SUB:%.*]] = and i32 %x, %y
+; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%or = or i32 %x, %y
@@ -614,10 +714,21 @@ define i32 @test45(i32 %x, i32 %y) {
ret i32 %sub
}
+define i32 @test45commuted(i32 %x, i32 %y) {
+; CHECK-LABEL: @test45commuted(
+; CHECK-NEXT: [[SUB:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+ %or = or i32 %x, %y
+ %xor = xor i32 %y, %x
+ %sub = sub i32 %or, %xor
+ ret i32 %sub
+}
+
define i32 @test46(i32 %x, i32 %y) {
; CHECK-LABEL: @test46(
-; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 %x, -1
-; CHECK-NEXT: [[SUB:%.*]] = and i32 %y, [[X_NOT]]
+; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 [[X:%.*]], -1
+; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], [[Y:%.*]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%or = or i32 %x, %y
@@ -625,10 +736,21 @@ define i32 @test46(i32 %x, i32 %y) {
ret i32 %sub
}
+define i32 @test46commuted(i32 %x, i32 %y) {
+; CHECK-LABEL: @test46commuted(
+; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 [[X:%.*]], -1
+; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+ %or = or i32 %y, %x
+ %sub = sub i32 %or, %x
+ ret i32 %sub
+}
+
define i32 @test47(i1 %A, i32 %B, i32 %C, i32 %D) {
; CHECK-LABEL: @test47(
-; CHECK-NEXT: [[TMP1:%.*]] = sub i32 %D, %C
-; CHECK-NEXT: [[SUB:%.*]] = select i1 %A, i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[D:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = select i1 [[A:%.*]], i32 [[TMP1]], i32 0
; CHECK-NEXT: ret i32 [[SUB]]
;
%sel0 = select i1 %A, i32 %D, i32 %B
@@ -639,8 +761,8 @@ define i32 @test47(i1 %A, i32 %B, i32 %C, i32 %D) {
define i32 @test48(i1 %A, i32 %B, i32 %C, i32 %D) {
; CHECK-LABEL: @test48(
-; CHECK-NEXT: [[TMP1:%.*]] = sub i32 %D, %C
-; CHECK-NEXT: [[SUB:%.*]] = select i1 %A, i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[D:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = select i1 [[A:%.*]], i32 0, i32 [[TMP1]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%sel0 = select i1 %A, i32 %B, i32 %D
@@ -653,8 +775,8 @@ define i32 @test48(i1 %A, i32 %B, i32 %C, i32 %D) {
define i8 @bool_sext_sub(i8 %x, i1 %y) {
; CHECK-LABEL: @bool_sext_sub(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i1 %y to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[TMP1]], %x
+; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[Y:%.*]] to i8
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i8 [[SUB]]
;
%sext = sext i1 %y to i8
@@ -666,8 +788,8 @@ define i8 @bool_sext_sub(i8 %x, i1 %y) {
define <2 x i8> @bool_sext_sub_vec(<2 x i8> %x, <2 x i1> %y) {
; CHECK-LABEL: @bool_sext_sub_vec(
-; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> %y to <2 x i8>
-; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[TMP1]], %x
+; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> [[Y:%.*]] to <2 x i8>
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret <2 x i8> [[SUB]]
;
%sext = sext <2 x i1> %y to <2 x i8>
@@ -679,8 +801,8 @@ define <2 x i8> @bool_sext_sub_vec(<2 x i8> %x, <2 x i1> %y) {
define <2 x i8> @bool_sext_sub_vec_nsw(<2 x i8> %x, <2 x i1> %y) {
; CHECK-LABEL: @bool_sext_sub_vec_nsw(
-; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> %y to <2 x i8>
-; CHECK-NEXT: [[SUB:%.*]] = add nsw <2 x i8> [[TMP1]], %x
+; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i1> [[Y:%.*]] to <2 x i8>
+; CHECK-NEXT: [[SUB:%.*]] = add nsw <2 x i8> [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret <2 x i8> [[SUB]]
;
%sext = sext <2 x i1> %y to <2 x i8>
@@ -692,8 +814,8 @@ define <2 x i8> @bool_sext_sub_vec_nsw(<2 x i8> %x, <2 x i1> %y) {
define i8 @bool_sext_sub_nuw(i8 %x, i1 %y) {
; CHECK-LABEL: @bool_sext_sub_nuw(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i1 %y to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[TMP1]], %x
+; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[Y:%.*]] to i8
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i8 [[SUB]]
;
%sext = sext i1 %y to i8
@@ -701,3 +823,169 @@ define i8 @bool_sext_sub_nuw(i8 %x, i1 %y) {
ret i8 %sub
}
+define i32 @test49(i32 %X) {
+; CHECK-LABEL: @test49(
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 1, [[X:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = and i32 [[SUB]], 64
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %sub = sub i32 129, %X
+ %res = and i32 %sub, 64
+ ret i32 %res
+}
+
+define i32 @test50(i32 %X) {
+; CHECK-LABEL: @test50(
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 1, [[X:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = and i32 [[SUB]], 127
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %sub = sub i32 129, %X
+ %res = and i32 %sub, 127
+ ret i32 %res
+}
+
+define i32 @test51(i32 %X) {
+; CHECK-LABEL: @test51(
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 126, [[X:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = and i32 [[SUB]], 64
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %sub = sub i32 254, %X
+ %res = and i32 %sub, 64
+ ret i32 %res
+}
+
+define i32 @test52(i32 %X) {
+; CHECK-LABEL: @test52(
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 126, [[X:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = and i32 [[SUB]], 127
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %sub = sub i32 254, %X
+ %res = and i32 %sub, 127
+ ret i32 %res
+}
+
+define <2 x i1> @test53(<2 x i1> %A, <2 x i1> %B) {
+; CHECK-LABEL: @test53(
+; CHECK-NEXT: [[SUB:%.*]] = xor <2 x i1> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <2 x i1> [[SUB]]
+;
+ %sub = sub <2 x i1> %A, %B
+ ret <2 x i1> %sub
+}
+
+define i32 @test54(i1 %C) {
+; CHECK-LABEL: @test54(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 -877, i32 113
+; CHECK-NEXT: ret i32 [[V]]
+;
+ %A = select i1 %C, i32 1000, i32 10
+ %V = sub i32 123, %A
+ ret i32 %V
+}
+
+define <2 x i32> @test54vec(i1 %C) {
+; CHECK-LABEL: @test54vec(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 -877, i32 -877>, <2 x i32> <i32 113, i32 113>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
+ %V = sub <2 x i32> <i32 123, i32 123>, %A
+ ret <2 x i32> %V
+}
+
+define <2 x i32> @test54vec2(i1 %C) {
+; CHECK-LABEL: @test54vec2(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 -877, i32 -2167>, <2 x i32> <i32 113, i32 303>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
+ %V = sub <2 x i32> <i32 123, i32 333>, %A
+ ret <2 x i32> %V
+}
+
+define i32 @test55(i1 %which) {
+; CHECK-LABEL: @test55(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ -877, [[ENTRY:%.*]] ], [ 113, [[DELAY]] ]
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi i32 [ 1000, %entry ], [ 10, %delay ]
+ %value = sub i32 123, %A
+ ret i32 %value
+}
+
+define <2 x i32> @test55vec(i1 %which) {
+; CHECK-LABEL: @test55vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 -877, i32 -877>, [[ENTRY:%.*]] ], [ <i32 113, i32 113>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
+ %value = sub <2 x i32> <i32 123, i32 123>, %A
+ ret <2 x i32> %value
+}
+
+define <2 x i32> @test55vec2(i1 %which) {
+; CHECK-LABEL: @test55vec2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 -877, i32 -2167>, [[ENTRY:%.*]] ], [ <i32 113, i32 303>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
+ %value = sub <2 x i32> <i32 123, i32 333>, %A
+ ret <2 x i32> %value
+}
+
+define i32 @test56(i32 %A, i32 %B) {
+; CHECK-LABEL: @test56(
+; CHECK-NEXT: [[Y:%.*]] = sub i32 0, [[B:%.*]]
+; CHECK-NEXT: ret i32 [[Y]]
+;
+ %X = add i32 %A, %B
+ %Y = sub i32 %A, %X
+ ret i32 %Y }
+
+define i32 @test57(i32 %A, i32 %B) {
+; CHECK-LABEL: @test57(
+; CHECK-NEXT: [[Y:%.*]] = sub i32 0, [[B:%.*]]
+; CHECK-NEXT: ret i32 [[Y]]
+;
+ %X = add i32 %B, %A
+ %Y = sub i32 %A, %X
+ ret i32 %Y }
diff --git a/test/Transforms/InstCombine/trunc.ll b/test/Transforms/InstCombine/trunc.ll
index eaa45bbb286c..5597b578f017 100644
--- a/test/Transforms/InstCombine/trunc.ll
+++ b/test/Transforms/InstCombine/trunc.ll
@@ -119,8 +119,8 @@ define i64 @test8(i32 %A, i32 %B) {
define i8 @test9(i32 %X) {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: [[X_TR:%.*]] = trunc i32 %X to i8
-; CHECK-NEXT: [[Z:%.*]] = and i8 [[X_TR]], 42
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %X to i8
+; CHECK-NEXT: [[Z:%.*]] = and i8 [[TMP1]], 42
; CHECK-NEXT: ret i8 [[Z]]
;
%Y = and i32 %X, 42
@@ -464,3 +464,72 @@ define <8 x i16> @trunc_shl_v8i16_v8i32_4(<8 x i32> %a) {
ret <8 x i16> %conv
}
+; Although the mask is the same value, we don't create a shuffle for types that the backend may not be able to handle:
+; trunc (shuffle X, C, Mask) --> shuffle (trunc X), C', Mask
+
+define <4 x i8> @wide_shuf(<4 x i32> %x) {
+; CHECK-LABEL: @wide_shuf(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> %x, <4 x i32> <i32 undef, i32 3634, i32 90, i32 undef>, <4 x i32> <i32 1, i32 5, i32 6, i32 2>
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc <4 x i32> [[SHUF]] to <4 x i8>
+; CHECK-NEXT: ret <4 x i8> [[TRUNC]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> <i32 35, i32 3634, i32 90, i32 -1>, <4 x i32> <i32 1, i32 5, i32 6, i32 2>
+ %trunc = trunc <4 x i32> %shuf to <4 x i8>
+ ret <4 x i8> %trunc
+}
+
+; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask
+
+define <4 x i8> @wide_splat1(<4 x i32> %x) {
+; CHECK-LABEL: @wide_splat1(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i32> %x to <4 x i8>
+; CHECK-NEXT: [[TRUNC:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: ret <4 x i8> [[TRUNC]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ %trunc = trunc <4 x i32> %shuf to <4 x i8>
+ ret <4 x i8> %trunc
+}
+
+; Test weird types.
+; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask
+
+define <3 x i31> @wide_splat2(<3 x i33> %x) {
+; CHECK-LABEL: @wide_splat2(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <3 x i33> %x to <3 x i31>
+; CHECK-NEXT: [[TRUNC:%.*]] = shufflevector <3 x i31> [[TMP1]], <3 x i31> undef, <3 x i32> <i32 1, i32 1, i32 1>
+; CHECK-NEXT: ret <3 x i31> [[TRUNC]]
+;
+ %shuf = shufflevector <3 x i33> %x, <3 x i33> undef, <3 x i32> <i32 1, i32 1, i32 1>
+ %trunc = trunc <3 x i33> %shuf to <3 x i31>
+ ret <3 x i31> %trunc
+}
+
+; FIXME:
+; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask
+; A mask with undef elements should still be considered a splat mask.
+
+define <3 x i31> @wide_splat3(<3 x i33> %x) {
+; CHECK-LABEL: @wide_splat3(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <3 x i33> %x, <3 x i33> undef, <3 x i32> <i32 undef, i32 1, i32 1>
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc <3 x i33> [[SHUF]] to <3 x i31>
+; CHECK-NEXT: ret <3 x i31> [[TRUNC]]
+;
+ %shuf = shufflevector <3 x i33> %x, <3 x i33> undef, <3 x i32> <i32 undef, i32 1, i32 1>
+ %trunc = trunc <3 x i33> %shuf to <3 x i31>
+ ret <3 x i31> %trunc
+}
+
+; TODO: The shuffle extends the length of the input vector. Should we shrink this?
+
+define <8 x i8> @wide_lengthening_splat(<4 x i16> %v) {
+; CHECK-LABEL: @wide_lengthening_splat(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TR:%.*]] = trunc <8 x i16> [[SHUF]] to <8 x i8>
+; CHECK-NEXT: ret <8 x i8> [[TR]]
+;
+ %shuf = shufflevector <4 x i16> %v, <4 x i16> %v, <8 x i32> zeroinitializer
+ %tr = trunc <8 x i16> %shuf to <8 x i8>
+ ret <8 x i8> %tr
+}
+
diff --git a/test/Transforms/InstCombine/type_pun.ll b/test/Transforms/InstCombine/type_pun.ll
index 098164cd029f..56d1ffcb5d31 100644
--- a/test/Transforms/InstCombine/type_pun.ll
+++ b/test/Transforms/InstCombine/type_pun.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
; Ensure that type punning using a union of vector and same-sized array
@@ -17,9 +18,10 @@ target datalayout = "p:32:32"
; Extracting the zeroth element in an i32 array.
define i32 @type_pun_zeroth(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_zeroth(
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %in to <4 x i32>
-; CHECK-NEXT: %[[EXT:.*]] = extractelement <4 x i32> %[[BC]], i32 0
-; CHECK-NEXT: ret i32 %[[EXT]]
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 0
+; CHECK-NEXT: ret i32 [[SROA_EXTRACT]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%1 = bitcast <4 x i8> %sroa to i32
ret i32 %1
@@ -28,9 +30,10 @@ define i32 @type_pun_zeroth(<16 x i8> %in) {
; Extracting the first element in an i32 array.
define i32 @type_pun_first(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_first(
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %in to <4 x i32>
-; CHECK-NEXT: %[[EXT:.*]] = extractelement <4 x i32> %[[BC]], i32 1
-; CHECK-NEXT: ret i32 %[[EXT]]
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 1
+; CHECK-NEXT: ret i32 [[SROA_EXTRACT]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%1 = bitcast <4 x i8> %sroa to i32
ret i32 %1
@@ -39,10 +42,11 @@ define i32 @type_pun_first(<16 x i8> %in) {
; Extracting an i32 that isn't aligned to any natural boundary.
define i32 @type_pun_misaligned(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_misaligned(
-; CHECK-NEXT: %[[SHUF:.*]] = shufflevector <16 x i8> %in, <16 x i8> undef, <16 x i32> <i32 6, i32 7, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %[[SHUF]] to <4 x i32>
-; CHECK-NEXT: %[[EXT:.*]] = extractelement <4 x i32> %[[BC]], i32 0
-; CHECK-NEXT: ret i32 %[[EXT]]
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = shufflevector <16 x i8> [[IN:%.*]], <16 x i8> undef, <16 x i32> <i32 6, i32 7, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[SROA_EXTRACT]] to <4 x i32>
+; CHECK-NEXT: [[SROA_EXTRACT1:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 0
+; CHECK-NEXT: ret i32 [[SROA_EXTRACT1]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 6, i32 7, i32 8, i32 9>
%1 = bitcast <4 x i8> %sroa to i32
ret i32 %1
@@ -51,10 +55,11 @@ define i32 @type_pun_misaligned(<16 x i8> %in) {
; Type punning to an array of pointers.
define i32* @type_pun_pointer(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_pointer(
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %in to <4 x i32>
-; CHECK-NEXT: %[[EXT:.*]] = extractelement <4 x i32> %[[BC]], i32 0
-; CHECK-NEXT: %[[I2P:.*]] = inttoptr i32 %[[EXT]] to i32*
-; CHECK-NEXT: ret i32* %[[I2P]]
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i32 [[SROA_EXTRACT]] to i32*
+; CHECK-NEXT: ret i32* [[TMP1]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%1 = bitcast <4 x i8> %sroa to i32
%2 = inttoptr i32 %1 to i32*
@@ -64,9 +69,10 @@ define i32* @type_pun_pointer(<16 x i8> %in) {
; Type punning to an array of 32-bit floating-point values.
define float @type_pun_float(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_float(
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %in to <4 x float>
-; CHECK-NEXT: %[[EXT:.*]] = extractelement <4 x float> %[[BC]], i32 0
-; CHECK-NEXT: ret float %[[EXT]]
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x float>
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x float> [[SROA_BC]], i32 0
+; CHECK-NEXT: ret float [[SROA_EXTRACT]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%1 = bitcast <4 x i8> %sroa to float
ret float %1
@@ -75,9 +81,10 @@ define float @type_pun_float(<16 x i8> %in) {
; Type punning to an array of 64-bit floating-point values.
define double @type_pun_double(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_double(
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %in to <2 x double>
-; CHECK-NEXT: %[[EXT:.*]] = extractelement <2 x double> %[[BC]], i32 0
-; CHECK-NEXT: ret double %[[EXT]]
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <2 x double>
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <2 x double> [[SROA_BC]], i32 0
+; CHECK-NEXT: ret double [[SROA_EXTRACT]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%1 = bitcast <8 x i8> %sroa to double
ret double %1
@@ -87,13 +94,14 @@ define double @type_pun_double(<16 x i8> %in) {
; Verify that multiple uses with different bitcast types are properly handled.
define { float, i32 } @type_pun_float_i32(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_float_i32(
-; CHECK-NEXT: %[[BCI:.*]] = bitcast <16 x i8> %in to <4 x i32>
-; CHECK-NEXT: %[[EXTI:.*]] = extractelement <4 x i32> %[[BCI]], i32 0
-; CHECK-NEXT: %[[BCF:.*]] = bitcast <16 x i8> %in to <4 x float>
-; CHECK-NEXT: %[[EXTF:.*]] = extractelement <4 x float> %[[BCF]], i32 0
-; CHECK-NEXT: %1 = insertvalue { float, i32 } undef, float %[[EXTF]], 0
-; CHECK-NEXT: %2 = insertvalue { float, i32 } %1, i32 %[[EXTI]], 1
-; CHECK-NEXT: ret { float, i32 } %2
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 0
+; CHECK-NEXT: [[SROA_BC1:%.*]] = bitcast <16 x i8> [[IN]] to <4 x float>
+; CHECK-NEXT: [[SROA_EXTRACT2:%.*]] = extractelement <4 x float> [[SROA_BC1]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { float, i32 } undef, float [[SROA_EXTRACT2]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { float, i32 } [[TMP1]], i32 [[SROA_EXTRACT]], 1
+; CHECK-NEXT: ret { float, i32 } [[TMP2]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%f = bitcast <4 x i8> %sroa to float
%i = bitcast <4 x i8> %sroa to i32
@@ -106,24 +114,29 @@ define { float, i32 } @type_pun_float_i32(<16 x i8> %in) {
; Verify that the bitcast is shared and dominates usage.
define i32 @type_pun_i32_ctrl(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_i32_ctrl(
-entry: ; CHECK-NEXT: entry:
-; CHECK-NEXT: %[[BC:.*]] = bitcast <16 x i8> %in to <4 x i32>
-; CHECK-NEXT: br
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SROA_BC:%.*]] = bitcast <16 x i8> [[IN:%.*]] to <4 x i32>
+; CHECK-NEXT: br i1 undef, label [[LEFT:%.*]], label [[RIGHT:%.*]]
+; CHECK: left:
+; CHECK-NEXT: [[SROA_EXTRACT1:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 0
+; CHECK-NEXT: br label [[TAIL:%.*]]
+; CHECK: right:
+; CHECK-NEXT: [[SROA_EXTRACT:%.*]] = extractelement <4 x i32> [[SROA_BC]], i32 0
+; CHECK-NEXT: br label [[TAIL]]
+; CHECK: tail:
+; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[SROA_EXTRACT1]], [[LEFT]] ], [ [[SROA_EXTRACT]], [[RIGHT]] ]
+; CHECK-NEXT: ret i32 [[I]]
+;
+entry:
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
br i1 undef, label %left, label %right
-left: ; CHECK: left:
-; CHECK-NEXT: %[[EXTL:.*]] = extractelement <4 x i32> %[[BC]], i32 0
-; CHECK-NEXT: br
+left:
%lhs = bitcast <4 x i8> %sroa to i32
br label %tail
-right: ; CHECK: right:
-; CHECK-NEXT: %[[EXTR:.*]] = extractelement <4 x i32> %[[BC]], i32 0
-; CHECK-NEXT: br
+right:
%rhs = bitcast <4 x i8> %sroa to i32
br label %tail
-tail: ; CHECK: tail:
-; CHECK-NEXT: %i = phi i32 [ %[[EXTL]], %left ], [ %[[EXTR]], %right ]
-; CHECK-NEXT: ret i32 %i
+tail:
%i = phi i32 [ %lhs, %left ], [ %rhs, %right ]
ret i32 %i
}
@@ -132,9 +145,10 @@ tail: ; CHECK: tail:
; should stay the same.
define i40 @type_pun_unhandled(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_unhandled(
-; CHECK-NEXT: %sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <5 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8>
-; CHECK-NEXT: %1 = bitcast <5 x i8> %sroa to i40
-; CHECK-NEXT: ret i40 %1
+; CHECK-NEXT: [[SROA:%.*]] = shufflevector <16 x i8> [[IN:%.*]], <16 x i8> undef, <5 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8>
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast <5 x i8> [[SROA]] to i40
+; CHECK-NEXT: ret i40 [[TMP1]]
+;
%sroa = shufflevector <16 x i8> %in, <16 x i8> undef, <5 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8>
%1 = bitcast <5 x i8> %sroa to i40
ret i40 %1
diff --git a/test/Transforms/InstCombine/urem.ll b/test/Transforms/InstCombine/urem.ll
deleted file mode 100644
index 0549d759eac4..000000000000
--- a/test/Transforms/InstCombine/urem.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-define i64 @rem_unsigned(i64 %x1, i64 %y2) {
-; CHECK-LABEL: @rem_unsigned(
-; CHECK-NEXT: [[R:%.*]] = urem i64 %x1, %y2
-; CHECK-NEXT: ret i64 [[R]]
-;
- %r = udiv i64 %x1, %y2
- %r7 = mul i64 %r, %y2
- %r8 = sub i64 %x1, %r7
- ret i64 %r8
-}
-
-; PR28672 - https://llvm.org/bugs/show_bug.cgi?id=28672
-
-define i8 @big_divisor(i8 %x) {
-; CHECK-LABEL: @big_divisor(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 %x, -127
-; CHECK-NEXT: [[TMP2:%.*]] = add i8 %x, 127
-; CHECK-NEXT: [[REM:%.*]] = select i1 [[TMP1]], i8 %x, i8 [[TMP2]]
-; CHECK-NEXT: ret i8 [[REM]]
-;
- %rem = urem i8 %x, 129
- ret i8 %rem
-}
-
-define i5 @biggest_divisor(i5 %x) {
-; CHECK-LABEL: @biggest_divisor(
-; CHECK-NEXT: [[NOT_:%.*]] = icmp eq i5 %x, -1
-; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[NOT_]] to i5
-; CHECK-NEXT: [[REM:%.*]] = add i5 [[TMP1]], %x
-; CHECK-NEXT: ret i5 [[REM]]
-;
- %rem = urem i5 %x, -1
- ret i5 %rem
-}
-
-; TODO: Should vector subtract of constant be canonicalized to add?
-define <2 x i4> @big_divisor_vec(<2 x i4> %x) {
-; CHECK-LABEL: @big_divisor_vec(
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i4> %x, <i4 -3, i4 -3>
-; CHECK-NEXT: [[TMP2:%.*]] = sub <2 x i4> %x, <i4 -3, i4 -3>
-; CHECK-NEXT: [[REM:%.*]] = select <2 x i1> [[TMP1]], <2 x i4> %x, <2 x i4> [[TMP2]]
-; CHECK-NEXT: ret <2 x i4> [[REM]]
-;
- %rem = urem <2 x i4> %x, <i4 13, i4 13>
- ret <2 x i4> %rem
-}
-
diff --git a/test/Transforms/InstCombine/vararg.ll b/test/Transforms/InstCombine/vararg.ll
index 263a7425a075..111cb4de7bc3 100644
--- a/test/Transforms/InstCombine/vararg.ll
+++ b/test/Transforms/InstCombine/vararg.ll
@@ -2,8 +2,8 @@
%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @llvm.va_start(i8*)
declare void @llvm.va_end(i8*)
declare void @llvm.va_copy(i8*, i8*)
@@ -17,14 +17,14 @@ entry:
%va1 = alloca %struct.__va_list, align 8
%0 = bitcast %struct.__va_list* %va0 to i8*
%1 = bitcast %struct.__va_list* %va1 to i8*
- call void @llvm.lifetime.start(i64 32, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %0)
call void @llvm.va_start(i8* %0)
- call void @llvm.lifetime.start(i64 32, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
call void @llvm.va_copy(i8* %1, i8* %0)
call void @llvm.va_end(i8* %1)
- call void @llvm.lifetime.end(i64 32, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
call void @llvm.va_end(i8* %0)
- call void @llvm.lifetime.end(i64 32, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %0)
ret i32 0
}
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
index 7c46adaf616e..5f27634da19c 100644
--- a/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -67,7 +67,7 @@ define i64 @test3(float %f, double %d) {
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]]
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]]
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]]
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]]
; CHECK-NEXT: ret i64 [[TMP15]]
;
%v00 = insertelement <4 x float> undef, float %f, i32 0
@@ -182,10 +182,9 @@ define <4 x float> @dead_shuffle_elt(<4 x float> %x, <2 x float> %y) nounwind {
define <2 x float> @test_fptrunc(double %f) {
; CHECK-LABEL: @test_fptrunc(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double %f, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double 0.000000e+00, i32 1
-; CHECK-NEXT: [[TMP3:%.*]] = fptrunc <2 x double> [[TMP2]] to <2 x float>
-; CHECK-NEXT: ret <2 x float> [[TMP3]]
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> <double undef, double 0.000000e+00>, double %f, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = fptrunc <2 x double> [[TMP1]] to <2 x float>
+; CHECK-NEXT: ret <2 x float> [[TMP2]]
;
%tmp9 = insertelement <4 x double> undef, double %f, i32 0
%tmp10 = insertelement <4 x double> %tmp9, double 0.000000e+00, i32 1
@@ -198,10 +197,9 @@ define <2 x float> @test_fptrunc(double %f) {
define <2 x double> @test_fpext(float %f) {
; CHECK-LABEL: @test_fpext(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float 0.000000e+00, i32 1
-; CHECK-NEXT: [[TMP3:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
-; CHECK-NEXT: ret <2 x double> [[TMP3]]
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> <float undef, float 0.000000e+00>, float %f, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
+; CHECK-NEXT: ret <2 x double> [[TMP2]]
;
%tmp9 = insertelement <4 x float> undef, float %f, i32 0
%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 1
@@ -223,8 +221,7 @@ define <4 x double> @test_shuffle(<4 x double> %f) {
define <4 x float> @test_select(float %f, float %g) {
; CHECK-LABEL: @test_select(
-; CHECK-NEXT: [[A0:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[A3:%.*]] = insertelement <4 x float> [[A0]], float 3.000000e+00, i32 3
+; CHECK-NEXT: [[A3:%.*]] = insertelement <4 x float> <float undef, float undef, float undef, float 3.000000e+00>, float %f, i32 0
; CHECK-NEXT: [[RET:%.*]] = shufflevector <4 x float> [[A3]], <4 x float> <float undef, float 4.000000e+00, float 5.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
; CHECK-NEXT: ret <4 x float> [[RET]]
;
diff --git a/test/Transforms/InstCombine/vec_sext.ll b/test/Transforms/InstCombine/vec_sext.ll
index 10947c1781e0..79a32d64b063 100644
--- a/test/Transforms/InstCombine/vec_sext.ll
+++ b/test/Transforms/InstCombine/vec_sext.ll
@@ -6,7 +6,7 @@ define <4 x i32> @psignd_3(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> zeroinitializer, %a
; CHECK-NEXT: [[B_LOBIT:%.*]] = ashr <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[T1:%.*]] = xor <4 x i32> [[B_LOBIT]], <i32 -1, i32 -1, i32 -1, i32 -1>
-; CHECK-NEXT: [[T2:%.*]] = and <4 x i32> %a, [[T1]]
+; CHECK-NEXT: [[T2:%.*]] = and <4 x i32> [[T1]], %a
; CHECK-NEXT: [[T3:%.*]] = and <4 x i32> [[B_LOBIT]], [[SUB]]
; CHECK-NEXT: [[COND:%.*]] = or <4 x i32> [[T2]], [[T3]]
; CHECK-NEXT: ret <4 x i32> [[COND]]
diff --git a/test/Transforms/InstCombine/vector-casts.ll b/test/Transforms/InstCombine/vector-casts.ll
index 8d01cad4b453..643ab6c5348f 100644
--- a/test/Transforms/InstCombine/vector-casts.ll
+++ b/test/Transforms/InstCombine/vector-casts.ll
@@ -110,23 +110,11 @@ define <2 x i64> @bar(<2 x i65> %t) {
ret <2 x i64> %b
}
-define <2 x i65> @foos(<2 x i64> %t) {
-; CHECK-LABEL: @foos(
-; CHECK-NEXT: [[A:%.*]] = zext <2 x i64> %t to <2 x i65>
-; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i65> [[A]], <i65 33, i65 33>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i65> [[SEXT]], <i65 33, i65 33>
-; CHECK-NEXT: ret <2 x i65> [[B]]
-;
- %a = trunc <2 x i64> %t to <2 x i32>
- %b = sext <2 x i32> %a to <2 x i65>
- ret <2 x i65> %b
-}
-
define <2 x i64> @bars(<2 x i65> %t) {
; CHECK-LABEL: @bars(
; CHECK-NEXT: [[A:%.*]] = trunc <2 x i65> %t to <2 x i64>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i64> [[A]], <i64 32, i64 32>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[SEXT]], <i64 32, i64 32>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 32, i64 32>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%a = trunc <2 x i65> %t to <2 x i32>
@@ -137,7 +125,7 @@ define <2 x i64> @bars(<2 x i65> %t) {
define <2 x i64> @quxs(<2 x i64> %t) {
; CHECK-LABEL: @quxs(
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i64> %t, <i64 32, i64 32>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[SEXT]], <i64 32, i64 32>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 32, i64 32>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%a = trunc <2 x i64> %t to <2 x i32>
@@ -148,7 +136,7 @@ define <2 x i64> @quxs(<2 x i64> %t) {
define <2 x i64> @quxt(<2 x i64> %t) {
; CHECK-LABEL: @quxt(
; CHECK-NEXT: [[A:%.*]] = shl <2 x i64> %t, <i64 32, i64 32>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[A]], <i64 32, i64 32>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[A]], <i64 32, i64 32>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%a = shl <2 x i64> %t, <i64 32, i64 32>
@@ -228,3 +216,91 @@ define <8 x i32> @pr24458(<8 x float> %n) {
ret <8 x i32> %wrong
}
+; Hoist a trunc to a scalar if we're inserting into an undef vector.
+; trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
+
+define <3 x i16> @trunc_inselt_undef(i32 %x) {
+; CHECK-LABEL: @trunc_inselt_undef(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %x to i16
+; CHECK-NEXT: [[TRUNC:%.*]] = insertelement <3 x i16> undef, i16 [[TMP1]], i32 1
+; CHECK-NEXT: ret <3 x i16> [[TRUNC]]
+;
+ %vec = insertelement <3 x i32> undef, i32 %x, i32 1
+ %trunc = trunc <3 x i32> %vec to <3 x i16>
+ ret <3 x i16> %trunc
+}
+
+; Hoist a trunc to a scalar if we're inserting into an undef vector.
+; trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
+
+define <2 x float> @fptrunc_inselt_undef(double %x, i32 %index) {
+; CHECK-LABEL: @fptrunc_inselt_undef(
+; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double %x to float
+; CHECK-NEXT: [[TRUNC:%.*]] = insertelement <2 x float> undef, float [[TMP1]], i32 %index
+; CHECK-NEXT: ret <2 x float> [[TRUNC]]
+;
+ %vec = insertelement <2 x double> <double undef, double undef>, double %x, i32 %index
+ %trunc = fptrunc <2 x double> %vec to <2 x float>
+ ret <2 x float> %trunc
+}
+
+; TODO: Strengthen the backend, so we can have this canonicalization.
+; Insert a scalar int into a constant vector and truncate:
+; trunc (inselt C, X, Index) --> inselt C, (trunc X), Index
+
+define <3 x i16> @trunc_inselt1(i32 %x) {
+; CHECK-LABEL: @trunc_inselt1(
+; CHECK-NEXT: [[VEC:%.*]] = insertelement <3 x i32> <i32 3, i32 undef, i32 65536>, i32 %x, i32 1
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc <3 x i32> [[VEC]] to <3 x i16>
+; CHECK-NEXT: ret <3 x i16> [[TRUNC]]
+;
+ %vec = insertelement <3 x i32> <i32 3, i32 -2, i32 65536>, i32 %x, i32 1
+ %trunc = trunc <3 x i32> %vec to <3 x i16>
+ ret <3 x i16> %trunc
+}
+
+; TODO: Strengthen the backend, so we can have this canonicalization.
+; Insert a scalar FP into a constant vector and FP truncate:
+; fptrunc (inselt C, X, Index) --> inselt C, (fptrunc X), Index
+
+define <2 x float> @fptrunc_inselt1(double %x, i32 %index) {
+; CHECK-LABEL: @fptrunc_inselt1(
+; CHECK-NEXT: [[VEC:%.*]] = insertelement <2 x double> <double undef, double 3.000000e+00>, double %x, i32 %index
+; CHECK-NEXT: [[TRUNC:%.*]] = fptrunc <2 x double> [[VEC]] to <2 x float>
+; CHECK-NEXT: ret <2 x float> [[TRUNC]]
+;
+ %vec = insertelement <2 x double> <double undef, double 3.0>, double %x, i32 %index
+ %trunc = fptrunc <2 x double> %vec to <2 x float>
+ ret <2 x float> %trunc
+}
+
+; TODO: Strengthen the backend, so we can have this canonicalization.
+; Insert a scalar int constant into a vector and truncate:
+; trunc (inselt X, C, Index) --> inselt (trunc X), C', Index
+
+define <8 x i16> @trunc_inselt2(<8 x i32> %x, i32 %index) {
+; CHECK-LABEL: @trunc_inselt2(
+; CHECK-NEXT: [[VEC:%.*]] = insertelement <8 x i32> %x, i32 1048576, i32 %index
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc <8 x i32> [[VEC]] to <8 x i16>
+; CHECK-NEXT: ret <8 x i16> [[TRUNC]]
+;
+ %vec = insertelement <8 x i32> %x, i32 1048576, i32 %index
+ %trunc = trunc <8 x i32> %vec to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+; TODO: Strengthen the backend, so we can have this canonicalization.
+; Insert a scalar FP constant into a vector and FP truncate:
+; fptrunc (inselt X, C, Index) --> inselt (fptrunc X), C', Index
+
+define <3 x float> @fptrunc_inselt2(<3 x double> %x) {
+; CHECK-LABEL: @fptrunc_inselt2(
+; CHECK-NEXT: [[VEC:%.*]] = insertelement <3 x double> %x, double 4.000000e+00, i32 2
+; CHECK-NEXT: [[TRUNC:%.*]] = fptrunc <3 x double> [[VEC]] to <3 x float>
+; CHECK-NEXT: ret <3 x float> [[TRUNC]]
+;
+ %vec = insertelement <3 x double> %x, double 4.0, i32 2
+ %trunc = fptrunc <3 x double> %vec to <3 x float>
+ ret <3 x float> %trunc
+}
+
diff --git a/test/Transforms/InstCombine/vector-srem.ll b/test/Transforms/InstCombine/vector-srem.ll
deleted file mode 100644
index 44b38596e684..000000000000
--- a/test/Transforms/InstCombine/vector-srem.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u) {
-; CHECK-LABEL: @foo(
-; CHECK-NEXT: [[K:%.*]] = srem <4 x i32> %t, %u
-; CHECK-NEXT: ret <4 x i32> [[K]]
-;
- %k = sdiv <4 x i32> %t, %u
- %l = mul <4 x i32> %k, %u
- %m = sub <4 x i32> %t, %l
- ret <4 x i32> %m
-}
diff --git a/test/Transforms/InstCombine/vector-urem.ll b/test/Transforms/InstCombine/vector-urem.ll
index 6cecc16069d3..34eebeef3bb1 100644
--- a/test/Transforms/InstCombine/vector-urem.ll
+++ b/test/Transforms/InstCombine/vector-urem.ll
@@ -19,11 +19,3 @@ define <4 x i32> @test_v4i32_const_pow2(<4 x i32> %a0) {
ret <4 x i32> %1
}
-define <4 x i32> @test_v4i32_const_pow2_or_zero(<4 x i32> %a0) {
-; CHECK-LABEL: @test_v4i32_const_pow2_or_zero(
-; CHECK-NEXT: [[TMP1:%.*]] = urem <4 x i32> %a0, <i32 1, i32 2, i32 0, i32 8>
-; CHECK-NEXT: ret <4 x i32> [[TMP1]]
-;
- %1 = urem <4 x i32> %a0, <i32 1, i32 2, i32 0, i32 8>
- ret <4 x i32> %1
-}
diff --git a/test/Transforms/InstCombine/vector_insertelt_shuffle.ll b/test/Transforms/InstCombine/vector_insertelt_shuffle.ll
index b3e614653cfa..c358509d690e 100644
--- a/test/Transforms/InstCombine/vector_insertelt_shuffle.ll
+++ b/test/Transforms/InstCombine/vector_insertelt_shuffle.ll
@@ -1,94 +1,95 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
-define<4 x float> @foo(<4 x float> %x) {
+; insertelements should fold to shuffle
+define <4 x float> @foo(<4 x float> %x) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: [[INS2:%.*]] = shufflevector <4 x float> %x, <4 x float> <float undef, float 1.000000e+00, float 2.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+; CHECK-NEXT: ret <4 x float> [[INS2]]
+;
%ins1 = insertelement<4 x float> %x, float 1.0, i32 1
%ins2 = insertelement<4 x float> %ins1, float 2.0, i32 2
- ret<4 x float> %ins2
+ ret <4 x float> %ins2
}
-; insertelements should fold to shuffle
-; CHECK-LABEL: @foo
-; CHECK-NEXT: shufflevector <4 x float> %{{.+}}, <4 x float> <float undef, float 1.000000e+00, float 2.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
-; CHECK-NEXT: ret <4 x float> %
+; Insert of a constant is canonicalized ahead of insert of a variable.
-define<4 x float> @bar(<4 x float> %x, float %a) {
+define <4 x float> @bar(<4 x float> %x, float %a) {
+; CHECK-LABEL: @bar(
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> %x, float 2.000000e+00, i32 2
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x float> [[TMP1]], float %a, i32 1
+; CHECK-NEXT: ret <4 x float> [[INS2]]
+;
%ins1 = insertelement<4 x float> %x, float %a, i32 1
%ins2 = insertelement<4 x float> %ins1, float 2.0, i32 2
- ret<4 x float> %ins2
+ ret <4 x float> %ins2
}
-; CHECK-LABEL: @bar
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 1
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 2.000000e+00, i32 2
-; CHECK-NEXT: ret <4 x float> %
-
-define<4 x float> @baz(<4 x float> %x, i32 %a) {
+define <4 x float> @baz(<4 x float> %x, i32 %a) {
+; CHECK-LABEL: @baz(
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <4 x float> %x, float 1.000000e+00, i32 1
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x float> [[INS1]], float 2.000000e+00, i32 %a
+; CHECK-NEXT: ret <4 x float> [[INS2]]
+;
%ins1 = insertelement<4 x float> %x, float 1.0, i32 1
%ins2 = insertelement<4 x float> %ins1, float 2.0, i32 %a
- ret<4 x float> %ins2
+ ret <4 x float> %ins2
}
-; CHECK-LABEL: @baz
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 1.000000e+00, i32 1
-; CHECK-NEXT: insertelement <4 x float> %ins1, float 2.000000e+00, i32 %
-; CHECK-NEXT: ret <4 x float> %
-
-define<4 x float> @bazz(<4 x float> %x, i32 %a) {
+; insertelements should fold to shuffle
+define <4 x float> @bazz(<4 x float> %x, i32 %a) {
+; CHECK-LABEL: @bazz(
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <4 x float> %x, float 1.000000e+00, i32 3
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x float> [[INS1]], float 5.000000e+00, i32 %a
+; CHECK-NEXT: [[INS5:%.*]] = shufflevector <4 x float> [[INS2]], <4 x float> <float undef, float 1.000000e+00, float 2.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+; CHECK-NEXT: [[INS6:%.*]] = insertelement <4 x float> [[INS5]], float 7.000000e+00, i32 %a
+; CHECK-NEXT: ret <4 x float> [[INS6]]
+;
%ins1 = insertelement<4 x float> %x, float 1.0, i32 3
%ins2 = insertelement<4 x float> %ins1, float 5.0, i32 %a
%ins3 = insertelement<4 x float> %ins2, float 3.0, i32 2
%ins4 = insertelement<4 x float> %ins3, float 1.0, i32 1
%ins5 = insertelement<4 x float> %ins4, float 2.0, i32 2
%ins6 = insertelement<4 x float> %ins5, float 7.0, i32 %a
- ret<4 x float> %ins6
+ ret <4 x float> %ins6
}
-; insertelements should fold to shuffle
-; CHECK-LABEL: @bazz
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 1.000000e+00, i32 3
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 5.000000e+00, i32 %
-; CHECK-NEXT: shufflevector <4 x float> %{{.+}}, <4 x float> <float undef, float 1.000000e+00, float 2.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 7.000000e+00, i32 %
-; CHECK-NEXT: ret <4 x float> %
-
-define<4 x float> @bazzz(<4 x float> %x) {
+define <4 x float> @bazzz(<4 x float> %x) {
+; CHECK-LABEL: @bazzz(
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x float> %x, float 2.000000e+00, i32 2
+; CHECK-NEXT: ret <4 x float> [[INS2]]
+;
%ins1 = insertelement<4 x float> %x, float 1.0, i32 5
%ins2 = insertelement<4 x float> %ins1, float 2.0, i32 2
- ret<4 x float> %ins2
+ ret <4 x float> %ins2
}
-; CHECK-LABEL: @bazzz
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 2.000000e+00, i32 2
-; CHECK-NEXT: ret <4 x float> %
-
-define<4 x float> @bazzzz(<4 x float> %x) {
+define <4 x float> @bazzzz(<4 x float> %x) {
+; CHECK-LABEL: @bazzzz(
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <4 x float> %x, float 1.000000e+00, i32 undef
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x float> %x, float 2.000000e+00, i32 2
+; CHECK-NEXT: ret <4 x float> [[INS2]]
+;
%ins1 = insertelement<4 x float> %x, float 1.0, i32 undef
%ins2 = insertelement<4 x float> %ins1, float 2.0, i32 2
- ret<4 x float> %ins2
+ ret <4 x float> %ins2
}
-; CHECK-LABEL: @bazzzz
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 1.000000e+00, i32 undef
-; CHECK-NEXT: insertelement <4 x float> %{{.+}}, float 2.000000e+00, i32 2
-; CHECK-NEXT: ret <4 x float> %
-
-define<4 x float> @bazzzzz() {
+define <4 x float> @bazzzzz() {
+; CHECK-LABEL: @bazzzzz(
+; CHECK-NEXT: ret <4 x float> <float 1.000000e+00, float 5.000000e+00, float 1.000000e+01, float 4.000000e+00>
+;
%ins1 = insertelement <4 x float> insertelement (<4 x float> <float 1.0, float 2.0, float 3.0, float undef>, float 4.0, i32 3), float 5.0, i32 1
%ins2 = insertelement<4 x float> %ins1, float 10.0, i32 2
- ret<4 x float> %ins2
+ ret <4 x float> %ins2
}
-; insertelements should fold to shuffle
-; CHECK-LABEL: @bazzzzz
-; CHECK-NEXT: ret <4 x float> <float 1.000000e+00, float 5.000000e+00, float 1.000000e+01, float 4.000000e+00>
-
-define<4 x float> @bazzzzzz(<4 x float> %x, i32 %a) {
+define <4 x float> @bazzzzzz(<4 x float> %x, i32 %a) {
+; CHECK-LABEL: @bazzzzzz(
+; CHECK-NEXT: ret <4 x float> <float undef, float 5.000000e+00, float undef, float 4.000000e+00>
+;
%ins1 = insertelement <4 x float> insertelement (<4 x float> shufflevector (<4 x float> undef, <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0> , <4 x i32> <i32 0, i32 5, i32 undef, i32 6> ), float 4.0, i32 3), float 5.0, i32 1
- ret<4 x float> %ins1
+ ret <4 x float> %ins1
}
-; insertelements should fold to shuffle
-; CHECK-LABEL: @bazzzzz
-; CHECK-NEXT: ret <4 x float> <float undef, float 5.000000e+00, float undef, float 4.000000e+00>
diff --git a/test/Transforms/InstCombine/win-math.ll b/test/Transforms/InstCombine/win-math.ll
index e6e79e2b84a0..36947791393d 100644
--- a/test/Transforms/InstCombine/win-math.ll
+++ b/test/Transforms/InstCombine/win-math.ll
@@ -56,15 +56,15 @@ declare double @ceil(double %x)
define float @float_ceil(float %x) nounwind readnone {
; WIN32-LABEL: @float_ceil(
; WIN32-NOT: float @ceilf
-; WIN32: double @ceil
+; WIN32: float @llvm.ceil.f32
; WIN64-LABEL: @float_ceil(
-; WIN64: float @ceilf
+; WIN64: float @llvm.ceil.f32
; WIN64-NOT: double @ceil
; MINGW32-LABEL: @float_ceil(
-; MINGW32: float @ceilf
+; MINGW32: float @llvm.ceil.f32
; MINGW32-NOT: double @ceil
; MINGW64-LABEL: @float_ceil(
-; MINGW64: float @ceilf
+; MINGW64: float @llvm.ceil.f32
; MINGW64-NOT: double @ceil
%1 = fpext float %x to double
%2 = call double @ceil(double %1)
@@ -137,15 +137,15 @@ declare double @floor(double %x)
define float @float_floor(float %x) nounwind readnone {
; WIN32-LABEL: @float_floor(
; WIN32-NOT: float @floorf
-; WIN32: double @floor
+; WIN32: float @llvm.floor.f32
; WIN64-LABEL: @float_floor(
-; WIN64: float @floorf
+; WIN64: float @llvm.floor.f32
; WIN64-NOT: double @floor
; MINGW32-LABEL: @float_floor(
-; MINGW32: float @floorf
+; MINGW32: float @llvm.floor.f32
; MINGW32-NOT: double @floor
; MINGW64-LABEL: @float_floor(
-; MINGW64: float @floorf
+; MINGW64: float @llvm.floor.f32
; MINGW64-NOT: double @floor
%1 = fpext float %x to double
%2 = call double @floor(double %1)
@@ -262,10 +262,10 @@ define float @float_round(float %x) nounwind readnone {
; WIN64-NOT: float @roundf
; WIN64: double @round
; MINGW32-LABEL: @float_round(
-; MINGW32: float @roundf
+; MINGW32: float @llvm.round.f32
; MINGW32-NOT: double @round
; MINGW64-LABEL: @float_round(
-; MINGW64: float @roundf
+; MINGW64: float @llvm.round.f32
; MINGW64-NOT: double @round
%1 = fpext float %x to double
%2 = call double @round(double %1)
@@ -274,21 +274,26 @@ define float @float_round(float %x) nounwind readnone {
}
declare float @powf(float, float)
-; win32 lacks sqrtf&fabsf, win64 lacks fabsf
+
+; win32 lacks sqrtf&fabsf, win64 lacks fabsf, but
+; calls to the intrinsics can be emitted instead.
define float @float_powsqrt(float %x) nounwind readnone {
; WIN32-LABEL: @float_powsqrt(
; WIN32-NOT: float @sqrtf
; WIN32: float @powf
+
; WIN64-LABEL: @float_powsqrt(
-; WIN64-NOT: float @sqrtf
-; WIN64: float @powf
+; WIN64: float @sqrtf
+; WIN64: float @llvm.fabs.f32(
+; WIN64-NOT: float @powf
+
; MINGW32-LABEL: @float_powsqrt(
; MINGW32: float @sqrtf
-; MINGW32: float @fabsf
+; MINGW32: float @llvm.fabs.f32
; MINGW32-NOT: float @powf
; MINGW64-LABEL: @float_powsqrt(
; MINGW64: float @sqrtf
-; MINGW64: float @fabsf
+; MINGW64: float @llvm.fabs.f32(
; MINGW64-NOT: float @powf
%1 = call float @powf(float %x, float 0.5)
ret float %1
diff --git a/test/Transforms/InstCombine/x86-avx2.ll b/test/Transforms/InstCombine/x86-avx2.ll
index 4c13b4c6ae74..f4045f788e2d 100644
--- a/test/Transforms/InstCombine/x86-avx2.ll
+++ b/test/Transforms/InstCombine/x86-avx2.ll
@@ -81,5 +81,29 @@ define <8 x float> @undef_test_vpermps(<8 x float> %a0) {
ret <8 x float> %a
}
+; Verify simplify demanded elts.
+
+define <8 x i32> @elts_test_vpermd(<8 x i32> %a0, i32 %a1) {
+; CHECK-LABEL: @elts_test_vpermd(
+; CHECK-NEXT: ret <8 x i32> %a0
+;
+ %1 = insertelement <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, i32 %a1, i32 0
+ %2 = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %1)
+ %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i32> %3
+}
+
+define <8 x float> @elts_test_vpermps(<8 x float> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @elts_test_vpermps(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1)
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x float> [[TMP2]]
+;
+ %1 = insertelement <8 x i32> %a1, i32 0, i32 7
+ %2 = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %1)
+ %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer
+ ret <8 x float> %3
+}
+
declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
diff --git a/test/Transforms/InstCombine/x86-avx512.ll b/test/Transforms/InstCombine/x86-avx512.ll
index d2a2580d8c24..2a24d93ce76a 100644
--- a/test/Transforms/InstCombine/x86-avx512.ll
+++ b/test/Transforms/InstCombine/x86-avx512.ll
@@ -6,10 +6,10 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_add_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> %a, float [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP4]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -21,7 +21,7 @@ define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_add_ss_round(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_add_ss_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -33,14 +33,14 @@ define <4 x float> @test_add_ss_round(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_add_ss_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> %c, i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> %a, float [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -52,7 +52,7 @@ define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
define <4 x float> @test_add_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_add_ss_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -83,10 +83,10 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_add_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> %a, double [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP4]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -96,7 +96,7 @@ define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_add_sd_round(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_add_sd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -106,14 +106,14 @@ define <2 x double> @test_add_sd_round(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_add_sd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> %c, i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> %a, double [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -123,7 +123,7 @@ define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
define <2 x double> @test_add_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_add_sd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -148,10 +148,10 @@ declare <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_sub_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> %a, float [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP4]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -163,7 +163,7 @@ define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_sub_ss_round(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_sub_ss_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -175,14 +175,14 @@ define <4 x float> @test_sub_ss_round(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_sub_ss_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> %c, i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> %a, float [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -194,7 +194,7 @@ define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
define <4 x float> @test_sub_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_sub_ss_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.sub.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -225,10 +225,10 @@ declare <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_sub_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> %a, double [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP4]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -238,7 +238,7 @@ define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_sub_sd_round(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_sub_sd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -248,14 +248,14 @@ define <2 x double> @test_sub_sd_round(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_sub_sd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> %c, i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> %a, double [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -265,7 +265,7 @@ define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
define <2 x double> @test_sub_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_sub_sd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.sub.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -290,10 +290,10 @@ declare <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_mul_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> %a, float [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP4]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -305,7 +305,7 @@ define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mul_ss_round(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_mul_ss_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -317,14 +317,14 @@ define <4 x float> @test_mul_ss_round(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mul_ss_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> %c, i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> %a, float [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -336,7 +336,7 @@ define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
define <4 x float> @test_mul_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mul_ss_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.mul.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -367,10 +367,10 @@ declare <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_mul_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> %a, double [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP4]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -380,7 +380,7 @@ define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_mul_sd_round(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_mul_sd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -390,14 +390,14 @@ define <2 x double> @test_mul_sd_round(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mul_sd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> %c, i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> %a, double [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -407,7 +407,7 @@ define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
define <2 x double> @test_mul_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mul_sd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.mul.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -432,10 +432,10 @@ declare <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_div_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> %a, float [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x float> [[A]], float [[TMP3]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP4]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -447,7 +447,7 @@ define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_div_ss_round(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_div_ss_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -459,14 +459,14 @@ define <4 x float> @test_div_ss_round(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_div_ss_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> %c, i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C:%.*]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], float [[TMP3]], float [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> %a, float [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x float> [[A]], float [[TMP7]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -478,7 +478,7 @@ define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
define <4 x float> @test_div_ss_mask_round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_div_ss_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.div.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -509,10 +509,10 @@ declare <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_div_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> %a, double [[TMP3]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[A]], double [[TMP3]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP4]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -522,7 +522,7 @@ define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_div_sd_round(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_div_sd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -532,14 +532,14 @@ define <2 x double> @test_div_sd_round(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_div_sd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> %a, i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> %b, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 %mask to <8 x i1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP4]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> %c, i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP5]], double [[TMP3]], double [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> %a, double [[TMP7]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[A]], double [[TMP7]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -549,7 +549,7 @@ define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
define <2 x double> @test_div_sd_mask_round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_div_sd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.div.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -574,7 +574,7 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_max_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_max_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 4)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -586,7 +586,7 @@ define <4 x float> @test_max_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_max_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_max_ss_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -617,7 +617,7 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_max_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_max_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 4)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -627,7 +627,7 @@ define <2 x double> @test_max_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_max_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_max_sd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -652,7 +652,7 @@ declare <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_min_ss(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: @test_min_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> undef, i8 -1, i32 4)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
@@ -664,7 +664,7 @@ define <4 x float> @test_min_ss(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test_min_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_min_ss_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.min.ss.round(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: ret <4 x float> [[TMP1]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -695,7 +695,7 @@ declare <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_min_sd(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: @test_min_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> undef, i8 -1, i32 4)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
@@ -705,7 +705,7 @@ define <2 x double> @test_min_sd(<2 x double> %a, <2 x double> %b) {
define <2 x double> @test_min_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_min_sd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.min.sd.round(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: ret <2 x double> [[TMP1]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -730,7 +730,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define i8 @test_cmp_ss(<4 x float> %a, <4 x float> %b, i8 %mask) {
; CHECK-LABEL: @test_cmp_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %a, <4 x float> %b, i32 3, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], i32 3, i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: ret i8 [[TMP1]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -747,7 +747,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32
define i8 @test_cmp_sd(<2 x double> %a, <2 x double> %b, i8 %mask) {
; CHECK-LABEL: @test_cmp_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %a, <2 x double> %b, i32 3, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], i32 3, i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: ret i8 [[TMP1]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -758,22 +758,22 @@ define i8 @test_cmp_sd(<2 x double> %a, <2 x double> %b, i8 %mask) {
define i64 @test(float %f, double %d) {
; CHECK-LABEL: @test(
-; CHECK-NEXT: [[V00:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> [[V00]], i32 4)
-; CHECK-NEXT: [[V10:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> [[V10]], i32 4)
-; CHECK-NEXT: [[V20:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.avx512.cvttss2si(<4 x float> [[V20]], i32 4)
-; CHECK-NEXT: [[V30:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> [[V30]], i32 4)
-; CHECK-NEXT: [[V40:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> [[V40]], i32 4)
-; CHECK-NEXT: [[V50:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> [[V50]], i32 4)
-; CHECK-NEXT: [[V60:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> [[V60]], i32 4)
-; CHECK-NEXT: [[V70:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> [[V70]], i32 4)
+; CHECK-NEXT: [[V03:%.*]] = insertelement <4 x float> undef, float [[F:%.*]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> [[V03]], i32 4)
+; CHECK-NEXT: [[V13:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> [[V13]], i32 4)
+; CHECK-NEXT: [[V23:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.avx512.cvttss2si(<4 x float> [[V23]], i32 4)
+; CHECK-NEXT: [[V33:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> [[V33]], i32 4)
+; CHECK-NEXT: [[V41:%.*]] = insertelement <2 x double> undef, double [[D:%.*]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> [[V41]], i32 4)
+; CHECK-NEXT: [[V51:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> [[V51]], i32 4)
+; CHECK-NEXT: [[V61:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> [[V61]], i32 4)
+; CHECK-NEXT: [[V71:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> [[V71]], i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]]
; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
@@ -781,7 +781,7 @@ define i64 @test(float %f, double %d) {
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]]
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]]
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]]
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]]
; CHECK-NEXT: ret i64 [[TMP15]]
;
%v00 = insertelement <4 x float> undef, float %f, i32 0
@@ -838,22 +838,22 @@ declare i64 @llvm.x86.avx512.cvttsd2si64(<2 x double>, i32)
define i64 @test2(float %f, double %d) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[V00:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> [[V00]], i32 4)
-; CHECK-NEXT: [[V10:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> [[V10]], i32 4)
-; CHECK-NEXT: [[V20:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> [[V20]], i32 4)
-; CHECK-NEXT: [[V30:%.*]] = insertelement <4 x float> undef, float %f, i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> [[V30]], i32 4)
-; CHECK-NEXT: [[V40:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> [[V40]], i32 4)
-; CHECK-NEXT: [[V50:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> [[V50]], i32 4)
-; CHECK-NEXT: [[V60:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> [[V60]], i32 4)
-; CHECK-NEXT: [[V70:%.*]] = insertelement <2 x double> undef, double %d, i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> [[V70]], i32 4)
+; CHECK-NEXT: [[V03:%.*]] = insertelement <4 x float> undef, float [[F:%.*]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> [[V03]], i32 4)
+; CHECK-NEXT: [[V13:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> [[V13]], i32 4)
+; CHECK-NEXT: [[V23:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> [[V23]], i32 4)
+; CHECK-NEXT: [[V33:%.*]] = insertelement <4 x float> undef, float [[F]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> [[V33]], i32 4)
+; CHECK-NEXT: [[V41:%.*]] = insertelement <2 x double> undef, double [[D:%.*]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> [[V41]], i32 4)
+; CHECK-NEXT: [[V51:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> [[V51]], i32 4)
+; CHECK-NEXT: [[V61:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> [[V61]], i32 4)
+; CHECK-NEXT: [[V71:%.*]] = insertelement <2 x double> undef, double [[D]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = tail call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> [[V71]], i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP0]], [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP4]], [[TMP6]]
; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
@@ -861,7 +861,7 @@ define i64 @test2(float %f, double %d) {
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP1]], [[TMP3]]
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP5]], [[TMP7]]
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], [[TMP13]]
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP11]]
; CHECK-NEXT: ret i64 [[TMP15]]
;
%v00 = insertelement <4 x float> undef, float %f, i32 0
@@ -920,8 +920,8 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4
define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask_vfmadd_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <4 x float> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <4 x float> [[RES]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
%2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2
@@ -935,7 +935,7 @@ define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x flo
define float @test_mask_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask_vfmadd_ss_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0
; CHECK-NEXT: ret float [[TMP2]]
;
@@ -963,8 +963,8 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask_vfmadd_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <2 x double> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <2 x double> [[RES]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
%2 = insertelement <2 x double> %c, double 2.000000e+00, i32 1
@@ -974,7 +974,7 @@ define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
define double @test_mask_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask_vfmadd_sd_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
; CHECK-NEXT: ret double [[TMP2]]
;
@@ -998,8 +998,8 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_maskz_vfmadd_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <4 x float> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <4 x float> [[RES]]
;
%1 = insertelement <4 x float> %b, float 1.000000e+00, i32 1
%2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2
@@ -1013,7 +1013,7 @@ define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
define float @test_maskz_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_maskz_vfmadd_ss_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0
; CHECK-NEXT: ret float [[TMP2]]
;
@@ -1041,8 +1041,8 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_maskz_vfmadd_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <2 x double> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <2 x double> [[RES]]
;
%1 = insertelement <2 x double> %b, double 1.000000e+00, i32 1
%2 = insertelement <2 x double> %c, double 2.000000e+00, i32 1
@@ -1052,7 +1052,7 @@ define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
define double @test_maskz_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_maskz_vfmadd_sd_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
; CHECK-NEXT: ret double [[TMP2]]
;
@@ -1076,8 +1076,8 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmadd_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <4 x float> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <4 x float> [[RES]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
%2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2
@@ -1091,7 +1091,7 @@ define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
define float @test_mask3_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmadd_ss_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0
; CHECK-NEXT: ret float [[TMP2]]
;
@@ -1119,8 +1119,8 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmadd_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <2 x double> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <2 x double> [[RES]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
%2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1
@@ -1130,7 +1130,7 @@ define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
define double @test_mask3_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmadd_sd_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
; CHECK-NEXT: ret double [[TMP2]]
;
@@ -1154,8 +1154,8 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <
define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmsub_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <4 x float> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <4 x float> [[RES]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
%2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2
@@ -1169,7 +1169,7 @@ define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x fl
define float @test_mask3_vfmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmsub_ss_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0
; CHECK-NEXT: ret float [[TMP2]]
;
@@ -1197,8 +1197,8 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>
define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmsub_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <2 x double> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <2 x double> [[RES]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
%2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1
@@ -1208,7 +1208,7 @@ define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x
define double @test_mask3_vfmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfmsub_sd_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
; CHECK-NEXT: ret double [[TMP2]]
;
@@ -1232,8 +1232,8 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float>, <4 x float>,
define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfnmsub_ss(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <4 x float> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <4 x float> [[RES]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
%2 = insertelement <4 x float> %1, float 2.000000e+00, i32 2
@@ -1247,7 +1247,7 @@ define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x f
define float @test_mask3_vfnmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfnmsub_ss_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[TMP1]], i32 0
; CHECK-NEXT: ret float [[TMP2]]
;
@@ -1275,8 +1275,8 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double>, <2 x double
define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfnmsub_sd(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
-; CHECK-NEXT: ret <2 x double> [[TMP1]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
+; CHECK-NEXT: ret <2 x double> [[RES]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
%2 = insertelement <2 x double> %b, double 2.000000e+00, i32 1
@@ -1286,7 +1286,7 @@ define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2
define double @test_mask3_vfnmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mask3_vfnmsub_sd_0(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
; CHECK-NEXT: ret double [[TMP2]]
;
@@ -1310,7 +1310,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32>, <8 x i32>, <8
define <8 x i32> @identity_test_permvar_si_256(<8 x i32> %a0) {
; CHECK-LABEL: @identity_test_permvar_si_256(
-; CHECK-NEXT: ret <8 x i32> %a0
+; CHECK-NEXT: ret <8 x i32> [[A0:%.*]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, i8 -1)
ret <8 x i32> %a
@@ -1318,8 +1318,8 @@ define <8 x i32> @identity_test_permvar_si_256(<8 x i32> %a0) {
define <8 x i32> @identity_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_si_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> %a0, <8 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> [[A0:%.*]], <8 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> %passthru, i8 %mask)
@@ -1328,7 +1328,7 @@ define <8 x i32> @identity_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %pa
define <8 x i32> @zero_test_permvar_si_256(<8 x i32> %a0) {
; CHECK-LABEL: @zero_test_permvar_si_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: ret <8 x i32> [[TMP1]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> undef, i8 -1)
@@ -1337,9 +1337,9 @@ define <8 x i32> @zero_test_permvar_si_256(<8 x i32> %a0) {
define <8 x i32> @zero_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_si_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i32> [[TMP3]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> %passthru, i8 %mask)
@@ -1348,7 +1348,7 @@ define <8 x i32> @zero_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passth
define <8 x i32> @shuffle_test_permvar_si_256(<8 x i32> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_si_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i32> [[TMP1]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x i32> undef, i8 -1)
@@ -1357,9 +1357,9 @@ define <8 x i32> @shuffle_test_permvar_si_256(<8 x i32> %a0) {
define <8 x i32> @shuffle_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_si_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i32> [[TMP3]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x i32> %passthru, i8 %mask)
@@ -1368,7 +1368,7 @@ define <8 x i32> @shuffle_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %pas
define <8 x i32> @undef_test_permvar_si_256(<8 x i32> %a0) {
; CHECK-LABEL: @undef_test_permvar_si_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i32> [[TMP1]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x i32> undef, i8 -1)
@@ -1377,9 +1377,9 @@ define <8 x i32> @undef_test_permvar_si_256(<8 x i32> %a0) {
define <8 x i32> @undef_test_permvar_si_256_mask(<8 x i32> %a0, <8 x i32> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_si_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP1]], <8 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i32> [[TMP3]]
;
%a = tail call <8 x i32> @llvm.x86.avx512.mask.permvar.si.256(<8 x i32> %a0, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x i32> %passthru, i8 %mask)
@@ -1390,7 +1390,7 @@ declare <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float>, <8 x i32>,
define <8 x float> @identity_test_permvar_sf_256(<8 x float> %a0) {
; CHECK-LABEL: @identity_test_permvar_sf_256(
-; CHECK-NEXT: ret <8 x float> %a0
+; CHECK-NEXT: ret <8 x float> [[A0:%.*]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x float> undef, i8 -1)
ret <8 x float> %a
@@ -1398,8 +1398,8 @@ define <8 x float> @identity_test_permvar_sf_256(<8 x float> %a0) {
define <8 x float> @identity_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_sf_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x float> %a0, <8 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[A0:%.*]], <8 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x float> [[TMP2]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x float> %passthru, i8 %mask)
@@ -1408,7 +1408,7 @@ define <8 x float> @identity_test_permvar_sf_256_mask(<8 x float> %a0, <8 x floa
define <8 x float> @zero_test_permvar_sf_256(<8 x float> %a0) {
; CHECK-LABEL: @zero_test_permvar_sf_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: ret <8 x float> [[TMP1]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> zeroinitializer, <8 x float> undef, i8 -1)
@@ -1417,9 +1417,9 @@ define <8 x float> @zero_test_permvar_sf_256(<8 x float> %a0) {
define <8 x float> @zero_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_sf_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x float> [[TMP3]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> zeroinitializer, <8 x float> %passthru, i8 %mask)
@@ -1428,7 +1428,7 @@ define <8 x float> @zero_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %
define <8 x float> @shuffle_test_permvar_sf_256(<8 x float> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_sf_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x float> [[TMP1]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x float> undef, i8 -1)
@@ -1437,9 +1437,9 @@ define <8 x float> @shuffle_test_permvar_sf_256(<8 x float> %a0) {
define <8 x float> @shuffle_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_sf_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x float> [[TMP3]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x float> %passthru, i8 %mask)
@@ -1448,7 +1448,7 @@ define <8 x float> @shuffle_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float
define <8 x float> @undef_test_permvar_sf_256(<8 x float> %a0) {
; CHECK-LABEL: @undef_test_permvar_sf_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x float> [[TMP1]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x float> undef, i8 -1)
@@ -1457,9 +1457,9 @@ define <8 x float> @undef_test_permvar_sf_256(<8 x float> %a0) {
define <8 x float> @undef_test_permvar_sf_256_mask(<8 x float> %a0, <8 x float> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_sf_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A0:%.*]], <8 x float> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x float> [[TMP1]], <8 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x float> [[TMP3]]
;
%a = tail call <8 x float> @llvm.x86.avx512.mask.permvar.sf.256(<8 x float> %a0, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <8 x float> %passthru, i8 %mask)
@@ -1470,7 +1470,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64>, <4 x i64>, <4
define <4 x i64> @identity_test_permvar_di_256(<4 x i64> %a0) {
; CHECK-LABEL: @identity_test_permvar_di_256(
-; CHECK-NEXT: ret <4 x i64> %a0
+; CHECK-NEXT: ret <4 x i64> [[A0:%.*]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x i64> undef, i8 -1)
ret <4 x i64> %a
@@ -1478,10 +1478,10 @@ define <4 x i64> @identity_test_permvar_di_256(<4 x i64> %a0) {
define <4 x i64> @identity_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_di_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> %a0, <4 x i64> %passthru
-; CHECK-NEXT: ret <4 x i64> [[TMP3]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[A0:%.*]], <4 x i64> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x i64> [[TMP2]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x i64> %passthru, i8 %mask)
ret <4 x i64> %a
@@ -1489,7 +1489,7 @@ define <4 x i64> @identity_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %pa
define <4 x i64> @zero_test_permvar_di_256(<4 x i64> %a0) {
; CHECK-LABEL: @zero_test_permvar_di_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: ret <4 x i64> [[TMP1]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> zeroinitializer, <4 x i64> undef, i8 -1)
@@ -1498,11 +1498,11 @@ define <4 x i64> @zero_test_permvar_di_256(<4 x i64> %a0) {
define <4 x i64> @zero_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_di_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x i64> [[TMP1]], <4 x i64> %passthru
-; CHECK-NEXT: ret <4 x i64> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[TMP1]], <4 x i64> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x i64> [[TMP3]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> zeroinitializer, <4 x i64> %passthru, i8 %mask)
ret <4 x i64> %a
@@ -1510,7 +1510,7 @@ define <4 x i64> @zero_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passth
define <4 x i64> @shuffle_test_permvar_di_256(<4 x i64> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_di_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <4 x i64> [[TMP1]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> <i64 3, i64 2, i64 1, i64 0>, <4 x i64> undef, i8 -1)
@@ -1519,11 +1519,11 @@ define <4 x i64> @shuffle_test_permvar_di_256(<4 x i64> %a0) {
define <4 x i64> @shuffle_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_di_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x i64> [[TMP1]], <4 x i64> %passthru
-; CHECK-NEXT: ret <4 x i64> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[TMP1]], <4 x i64> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x i64> [[TMP3]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> <i64 3, i64 2, i64 1, i64 0>, <4 x i64> %passthru, i8 %mask)
ret <4 x i64> %a
@@ -1531,7 +1531,7 @@ define <4 x i64> @shuffle_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %pas
define <4 x i64> @undef_test_permvar_di_256(<4 x i64> %a0) {
; CHECK-LABEL: @undef_test_permvar_di_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <4 x i64> [[TMP1]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> <i64 undef, i64 2, i64 1, i64 0>, <4 x i64> undef, i8 -1)
@@ -1540,11 +1540,11 @@ define <4 x i64> @undef_test_permvar_di_256(<4 x i64> %a0) {
define <4 x i64> @undef_test_permvar_di_256_mask(<4 x i64> %a0, <4 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_di_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x i64> [[TMP1]], <4 x i64> %passthru
-; CHECK-NEXT: ret <4 x i64> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[A0:%.*]], <4 x i64> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x i64> [[TMP1]], <4 x i64> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x i64> [[TMP3]]
;
%a = tail call <4 x i64> @llvm.x86.avx512.mask.permvar.di.256(<4 x i64> %a0, <4 x i64> <i64 undef, i64 2, i64 1, i64 0>, <4 x i64> %passthru, i8 %mask)
ret <4 x i64> %a
@@ -1554,7 +1554,7 @@ declare <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double>, <4 x i64
define <4 x double> @identity_test_permvar_df_256(<4 x double> %a0) {
; CHECK-LABEL: @identity_test_permvar_df_256(
-; CHECK-NEXT: ret <4 x double> %a0
+; CHECK-NEXT: ret <4 x double> [[A0:%.*]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x double> undef, i8 -1)
ret <4 x double> %a
@@ -1562,10 +1562,10 @@ define <4 x double> @identity_test_permvar_df_256(<4 x double> %a0) {
define <4 x double> @identity_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_df_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x double> %a0, <4 x double> %passthru
-; CHECK-NEXT: ret <4 x double> [[TMP3]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP1]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[A0:%.*]], <4 x double> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x double> [[TMP2]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x double> %passthru, i8 %mask)
ret <4 x double> %a
@@ -1573,7 +1573,7 @@ define <4 x double> @identity_test_permvar_df_256_mask(<4 x double> %a0, <4 x do
define <4 x double> @zero_test_permvar_df_256(<4 x double> %a0) {
; CHECK-LABEL: @zero_test_permvar_df_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: ret <4 x double> [[TMP1]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> zeroinitializer, <4 x double> undef, i8 -1)
@@ -1582,11 +1582,11 @@ define <4 x double> @zero_test_permvar_df_256(<4 x double> %a0) {
define <4 x double> @zero_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_df_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x double> [[TMP1]], <4 x double> %passthru
-; CHECK-NEXT: ret <4 x double> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[TMP1]], <4 x double> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x double> [[TMP3]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> zeroinitializer, <4 x double> %passthru, i8 %mask)
ret <4 x double> %a
@@ -1594,7 +1594,7 @@ define <4 x double> @zero_test_permvar_df_256_mask(<4 x double> %a0, <4 x double
define <4 x double> @shuffle_test_permvar_df_256(<4 x double> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_df_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <4 x double> [[TMP1]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> <i64 3, i64 2, i64 1, i64 0>, <4 x double> undef, i8 -1)
@@ -1603,11 +1603,11 @@ define <4 x double> @shuffle_test_permvar_df_256(<4 x double> %a0) {
define <4 x double> @shuffle_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_df_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x double> [[TMP1]], <4 x double> %passthru
-; CHECK-NEXT: ret <4 x double> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[TMP1]], <4 x double> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x double> [[TMP3]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> <i64 3, i64 2, i64 1, i64 0>, <4 x double> %passthru, i8 %mask)
ret <4 x double> %a
@@ -1615,7 +1615,7 @@ define <4 x double> @shuffle_test_permvar_df_256_mask(<4 x double> %a0, <4 x dou
define <4 x double> @undef_test_permvar_df_256(<4 x double> %a0) {
; CHECK-LABEL: @undef_test_permvar_df_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <4 x double> [[TMP1]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> <i64 undef, i64 2, i64 1, i64 0>, <4 x double> undef, i8 -1)
@@ -1624,11 +1624,11 @@ define <4 x double> @undef_test_permvar_df_256(<4 x double> %a0) {
define <4 x double> @undef_test_permvar_df_256_mask(<4 x double> %a0, <4 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_df_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x double> [[TMP1]], <4 x double> %passthru
-; CHECK-NEXT: ret <4 x double> [[TMP4]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A0:%.*]], <4 x double> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[EXTRACT:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[EXTRACT]], <4 x double> [[TMP1]], <4 x double> [[PASSTHRU:%.*]]
+; CHECK-NEXT: ret <4 x double> [[TMP3]]
;
%a = tail call <4 x double> @llvm.x86.avx512.mask.permvar.df.256(<4 x double> %a0, <4 x i64> <i64 undef, i64 2, i64 1, i64 0>, <4 x double> %passthru, i8 %mask)
ret <4 x double> %a
@@ -1638,7 +1638,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>,
define <16 x i32> @identity_test_permvar_si_512(<16 x i32> %a0) {
; CHECK-LABEL: @identity_test_permvar_si_512(
-; CHECK-NEXT: ret <16 x i32> %a0
+; CHECK-NEXT: ret <16 x i32> [[A0:%.*]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, <16 x i32> undef, i16 -1)
ret <16 x i32> %a
@@ -1646,8 +1646,8 @@ define <16 x i32> @identity_test_permvar_si_512(<16 x i32> %a0) {
define <16 x i32> @identity_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: @identity_test_permvar_si_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i32> %a0, <16 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i32> [[A0:%.*]], <16 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i32> [[TMP2]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, <16 x i32> %passthru, i16 %mask)
@@ -1656,7 +1656,7 @@ define <16 x i32> @identity_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32>
define <16 x i32> @zero_test_permvar_si_512(<16 x i32> %a0) {
; CHECK-LABEL: @zero_test_permvar_si_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> zeroinitializer
; CHECK-NEXT: ret <16 x i32> [[TMP1]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> zeroinitializer, <16 x i32> undef, i16 -1)
@@ -1665,9 +1665,9 @@ define <16 x i32> @zero_test_permvar_si_512(<16 x i32> %a0) {
define <16 x i32> @zero_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: @zero_test_permvar_si_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i32> [[TMP3]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> zeroinitializer, <16 x i32> %passthru, i16 %mask)
@@ -1676,7 +1676,7 @@ define <16 x i32> @zero_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %pas
define <16 x i32> @shuffle_test_permvar_si_512(<16 x i32> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_si_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x i32> [[TMP1]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x i32> undef, i16 -1)
@@ -1685,9 +1685,9 @@ define <16 x i32> @shuffle_test_permvar_si_512(<16 x i32> %a0) {
define <16 x i32> @shuffle_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_si_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i32> [[TMP3]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x i32> %passthru, i16 %mask)
@@ -1696,7 +1696,7 @@ define <16 x i32> @shuffle_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %
define <16 x i32> @undef_test_permvar_si_512(<16 x i32> %a0) {
; CHECK-LABEL: @undef_test_permvar_si_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x i32> [[TMP1]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x i32> undef, i16 -1)
@@ -1705,9 +1705,9 @@ define <16 x i32> @undef_test_permvar_si_512(<16 x i32> %a0) {
define <16 x i32> @undef_test_permvar_si_512_mask(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
; CHECK-LABEL: @undef_test_permvar_si_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A0:%.*]], <16 x i32> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i32> [[TMP1]], <16 x i32> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i32> [[TMP3]]
;
%a = tail call <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32> %a0, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x i32> %passthru, i16 %mask)
@@ -1718,7 +1718,7 @@ declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i3
define <16 x float> @identity_test_permvar_sf_512(<16 x float> %a0) {
; CHECK-LABEL: @identity_test_permvar_sf_512(
-; CHECK-NEXT: ret <16 x float> %a0
+; CHECK-NEXT: ret <16 x float> [[A0:%.*]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, <16 x float> undef, i16 -1)
ret <16 x float> %a
@@ -1726,8 +1726,8 @@ define <16 x float> @identity_test_permvar_sf_512(<16 x float> %a0) {
define <16 x float> @identity_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: @identity_test_permvar_sf_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x float> %a0, <16 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x float> [[A0:%.*]], <16 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP2]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, <16 x float> %passthru, i16 %mask)
@@ -1736,7 +1736,7 @@ define <16 x float> @identity_test_permvar_sf_512_mask(<16 x float> %a0, <16 x f
define <16 x float> @zero_test_permvar_sf_512(<16 x float> %a0) {
; CHECK-LABEL: @zero_test_permvar_sf_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> zeroinitializer
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> zeroinitializer, <16 x float> undef, i16 -1)
@@ -1745,9 +1745,9 @@ define <16 x float> @zero_test_permvar_sf_512(<16 x float> %a0) {
define <16 x float> @zero_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: @zero_test_permvar_sf_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> zeroinitializer, <16 x float> %passthru, i16 %mask)
@@ -1756,7 +1756,7 @@ define <16 x float> @zero_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float
define <16 x float> @shuffle_test_permvar_sf_512(<16 x float> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_sf_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> undef, i16 -1)
@@ -1765,9 +1765,9 @@ define <16 x float> @shuffle_test_permvar_sf_512(<16 x float> %a0) {
define <16 x float> @shuffle_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_sf_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %passthru, i16 %mask)
@@ -1776,7 +1776,7 @@ define <16 x float> @shuffle_test_permvar_sf_512_mask(<16 x float> %a0, <16 x fl
define <16 x float> @undef_test_permvar_sf_512(<16 x float> %a0) {
; CHECK-LABEL: @undef_test_permvar_sf_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> undef, i16 -1)
@@ -1785,9 +1785,9 @@ define <16 x float> @undef_test_permvar_sf_512(<16 x float> %a0) {
define <16 x float> @undef_test_permvar_sf_512_mask(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
; CHECK-LABEL: @undef_test_permvar_sf_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x float> [[A0:%.*]], <16 x float> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%a = tail call <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float> %a0, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %passthru, i16 %mask)
@@ -1798,7 +1798,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8
define <8 x i64> @identity_test_permvar_di_512(<8 x i64> %a0) {
; CHECK-LABEL: @identity_test_permvar_di_512(
-; CHECK-NEXT: ret <8 x i64> %a0
+; CHECK-NEXT: ret <8 x i64> [[A0:%.*]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, <8 x i64> undef, i8 -1)
ret <8 x i64> %a
@@ -1806,8 +1806,8 @@ define <8 x i64> @identity_test_permvar_di_512(<8 x i64> %a0) {
define <8 x i64> @identity_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_di_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i64> %a0, <8 x i64> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i64> [[A0:%.*]], <8 x i64> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i64> [[TMP2]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, <8 x i64> %passthru, i8 %mask)
@@ -1816,7 +1816,7 @@ define <8 x i64> @identity_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %pa
define <8 x i64> @zero_test_permvar_di_512(<8 x i64> %a0) {
; CHECK-LABEL: @zero_test_permvar_di_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: ret <8 x i64> [[TMP1]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> zeroinitializer, <8 x i64> undef, i8 -1)
@@ -1825,9 +1825,9 @@ define <8 x i64> @zero_test_permvar_di_512(<8 x i64> %a0) {
define <8 x i64> @zero_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_di_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i64> [[TMP3]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> zeroinitializer, <8 x i64> %passthru, i8 %mask)
@@ -1836,7 +1836,7 @@ define <8 x i64> @zero_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passth
define <8 x i64> @shuffle_test_permvar_di_512(<8 x i64> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_di_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i64> [[TMP1]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> undef, i8 -1)
@@ -1845,9 +1845,9 @@ define <8 x i64> @shuffle_test_permvar_di_512(<8 x i64> %a0) {
define <8 x i64> @shuffle_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_di_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i64> [[TMP3]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %passthru, i8 %mask)
@@ -1856,7 +1856,7 @@ define <8 x i64> @shuffle_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %pas
define <8 x i64> @undef_test_permvar_di_512(<8 x i64> %a0) {
; CHECK-LABEL: @undef_test_permvar_di_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i64> [[TMP1]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> undef, i8 -1)
@@ -1865,9 +1865,9 @@ define <8 x i64> @undef_test_permvar_di_512(<8 x i64> %a0) {
define <8 x i64> @undef_test_permvar_di_512_mask(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_di_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[A0:%.*]], <8 x i64> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i64> [[TMP1]], <8 x i64> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i64> [[TMP3]]
;
%a = tail call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %a0, <8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %passthru, i8 %mask)
@@ -1878,7 +1878,7 @@ declare <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double>, <8 x i64
define <8 x double> @identity_test_permvar_df_512(<8 x double> %a0) {
; CHECK-LABEL: @identity_test_permvar_df_512(
-; CHECK-NEXT: ret <8 x double> %a0
+; CHECK-NEXT: ret <8 x double> [[A0:%.*]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, <8 x double> undef, i8 -1)
ret <8 x double> %a
@@ -1886,8 +1886,8 @@ define <8 x double> @identity_test_permvar_df_512(<8 x double> %a0) {
define <8 x double> @identity_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_df_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x double> %a0, <8 x double> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x double> [[A0:%.*]], <8 x double> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP2]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, <8 x double> %passthru, i8 %mask)
@@ -1896,7 +1896,7 @@ define <8 x double> @identity_test_permvar_df_512_mask(<8 x double> %a0, <8 x do
define <8 x double> @zero_test_permvar_df_512(<8 x double> %a0) {
; CHECK-LABEL: @zero_test_permvar_df_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> zeroinitializer, <8 x double> undef, i8 -1)
@@ -1905,9 +1905,9 @@ define <8 x double> @zero_test_permvar_df_512(<8 x double> %a0) {
define <8 x double> @zero_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_df_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> zeroinitializer, <8 x double> %passthru, i8 %mask)
@@ -1916,7 +1916,7 @@ define <8 x double> @zero_test_permvar_df_512_mask(<8 x double> %a0, <8 x double
define <8 x double> @shuffle_test_permvar_df_512(<8 x double> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_df_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> undef, i8 -1)
@@ -1925,9 +1925,9 @@ define <8 x double> @shuffle_test_permvar_df_512(<8 x double> %a0) {
define <8 x double> @shuffle_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_df_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %passthru, i8 %mask)
@@ -1936,7 +1936,7 @@ define <8 x double> @shuffle_test_permvar_df_512_mask(<8 x double> %a0, <8 x dou
define <8 x double> @undef_test_permvar_df_512(<8 x double> %a0) {
; CHECK-LABEL: @undef_test_permvar_df_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> undef, i8 -1)
@@ -1945,9 +1945,9 @@ define <8 x double> @undef_test_permvar_df_512(<8 x double> %a0) {
define <8 x double> @undef_test_permvar_df_512_mask(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_df_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[A0:%.*]], <8 x double> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%a = tail call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %a0, <8 x i64> <i64 undef, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %passthru, i8 %mask)
@@ -1958,7 +1958,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16>, <8 x i16>, <8
define <8 x i16> @identity_test_permvar_hi_128(<8 x i16> %a0) {
; CHECK-LABEL: @identity_test_permvar_hi_128(
-; CHECK-NEXT: ret <8 x i16> %a0
+; CHECK-NEXT: ret <8 x i16> [[A0:%.*]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, <8 x i16> undef, i8 -1)
ret <8 x i16> %a
@@ -1966,8 +1966,8 @@ define <8 x i16> @identity_test_permvar_hi_128(<8 x i16> %a0) {
define <8 x i16> @identity_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) {
; CHECK-LABEL: @identity_test_permvar_hi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> %a0, <8 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[A0:%.*]], <8 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i16> [[TMP2]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, <8 x i16> %passthru, i8 %mask)
@@ -1976,7 +1976,7 @@ define <8 x i16> @identity_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %pa
define <8 x i16> @zero_test_permvar_hi_128(<8 x i16> %a0) {
; CHECK-LABEL: @zero_test_permvar_hi_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> zeroinitializer
; CHECK-NEXT: ret <8 x i16> [[TMP1]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i16> undef, i8 -1)
@@ -1985,9 +1985,9 @@ define <8 x i16> @zero_test_permvar_hi_128(<8 x i16> %a0) {
define <8 x i16> @zero_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) {
; CHECK-LABEL: @zero_test_permvar_hi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i16> [[TMP3]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> zeroinitializer, <8 x i16> %passthru, i8 %mask)
@@ -1996,7 +1996,7 @@ define <8 x i16> @zero_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passth
define <8 x i16> @shuffle_test_permvar_hi_128(<8 x i16> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_hi_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i16> [[TMP1]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> <i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <8 x i16> undef, i8 -1)
@@ -2005,9 +2005,9 @@ define <8 x i16> @shuffle_test_permvar_hi_128(<8 x i16> %a0) {
define <8 x i16> @shuffle_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_hi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i16> [[TMP3]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> <i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <8 x i16> %passthru, i8 %mask)
@@ -2016,7 +2016,7 @@ define <8 x i16> @shuffle_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %pas
define <8 x i16> @undef_test_permvar_hi_128(<8 x i16> %a0) {
; CHECK-LABEL: @undef_test_permvar_hi_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i16> [[TMP1]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> <i16 undef, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <8 x i16> undef, i8 -1)
@@ -2025,9 +2025,9 @@ define <8 x i16> @undef_test_permvar_hi_128(<8 x i16> %a0) {
define <8 x i16> @undef_test_permvar_hi_128_mask(<8 x i16> %a0, <8 x i16> %passthru, i8 %mask) {
; CHECK-LABEL: @undef_test_permvar_hi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A0:%.*]], <8 x i16> undef, <8 x i32> <i32 undef, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x i16> [[TMP1]], <8 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <8 x i16> [[TMP3]]
;
%a = tail call <8 x i16> @llvm.x86.avx512.mask.permvar.hi.128(<8 x i16> %a0, <8 x i16> <i16 undef, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <8 x i16> %passthru, i8 %mask)
@@ -2038,7 +2038,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16>, <16 x i16>,
define <16 x i16> @identity_test_permvar_hi_256(<16 x i16> %a0) {
; CHECK-LABEL: @identity_test_permvar_hi_256(
-; CHECK-NEXT: ret <16 x i16> %a0
+; CHECK-NEXT: ret <16 x i16> [[A0:%.*]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, <16 x i16> undef, i16 -1)
ret <16 x i16> %a
@@ -2046,8 +2046,8 @@ define <16 x i16> @identity_test_permvar_hi_256(<16 x i16> %a0) {
define <16 x i16> @identity_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) {
; CHECK-LABEL: @identity_test_permvar_hi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i16> %a0, <16 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i16> [[A0:%.*]], <16 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i16> [[TMP2]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, <16 x i16> %passthru, i16 %mask)
@@ -2056,7 +2056,7 @@ define <16 x i16> @identity_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16>
define <16 x i16> @zero_test_permvar_hi_256(<16 x i16> %a0) {
; CHECK-LABEL: @zero_test_permvar_hi_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> zeroinitializer
; CHECK-NEXT: ret <16 x i16> [[TMP1]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> zeroinitializer, <16 x i16> undef, i16 -1)
@@ -2065,9 +2065,9 @@ define <16 x i16> @zero_test_permvar_hi_256(<16 x i16> %a0) {
define <16 x i16> @zero_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) {
; CHECK-LABEL: @zero_test_permvar_hi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i16> [[TMP3]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> zeroinitializer, <16 x i16> %passthru, i16 %mask)
@@ -2076,7 +2076,7 @@ define <16 x i16> @zero_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %pas
define <16 x i16> @shuffle_test_permvar_hi_256(<16 x i16> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_hi_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x i16> [[TMP1]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> undef, i16 -1)
@@ -2085,9 +2085,9 @@ define <16 x i16> @shuffle_test_permvar_hi_256(<16 x i16> %a0) {
define <16 x i16> @shuffle_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_hi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i16> [[TMP3]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %passthru, i16 %mask)
@@ -2096,7 +2096,7 @@ define <16 x i16> @shuffle_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %
define <16 x i16> @undef_test_permvar_hi_256(<16 x i16> %a0) {
; CHECK-LABEL: @undef_test_permvar_hi_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x i16> [[TMP1]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> <i16 undef, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> undef, i16 -1)
@@ -2105,9 +2105,9 @@ define <16 x i16> @undef_test_permvar_hi_256(<16 x i16> %a0) {
define <16 x i16> @undef_test_permvar_hi_256_mask(<16 x i16> %a0, <16 x i16> %passthru, i16 %mask) {
; CHECK-LABEL: @undef_test_permvar_hi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i16> [[TMP1]], <16 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i16> [[TMP3]]
;
%a = tail call <16 x i16> @llvm.x86.avx512.mask.permvar.hi.256(<16 x i16> %a0, <16 x i16> <i16 undef, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %passthru, i16 %mask)
@@ -2118,7 +2118,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>,
define <32 x i16> @identity_test_permvar_hi_512(<32 x i16> %a0) {
; CHECK-LABEL: @identity_test_permvar_hi_512(
-; CHECK-NEXT: ret <32 x i16> %a0
+; CHECK-NEXT: ret <32 x i16> [[A0:%.*]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, <32 x i16> undef, i32 -1)
ret <32 x i16> %a
@@ -2126,8 +2126,8 @@ define <32 x i16> @identity_test_permvar_hi_512(<32 x i16> %a0) {
define <32 x i16> @identity_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; CHECK-LABEL: @identity_test_permvar_hi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <32 x i1> [[TMP1]], <32 x i16> %a0, <32 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <32 x i1> [[TMP1]], <32 x i16> [[A0:%.*]], <32 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i16> [[TMP2]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23, i16 24, i16 25, i16 26, i16 27, i16 28, i16 29, i16 30, i16 31>, <32 x i16> %passthru, i32 %mask)
@@ -2136,7 +2136,7 @@ define <32 x i16> @identity_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16>
define <32 x i16> @zero_test_permvar_hi_512(<32 x i16> %a0) {
; CHECK-LABEL: @zero_test_permvar_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> zeroinitializer
; CHECK-NEXT: ret <32 x i16> [[TMP1]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> zeroinitializer, <32 x i16> undef, i32 -1)
@@ -2145,9 +2145,9 @@ define <32 x i16> @zero_test_permvar_hi_512(<32 x i16> %a0) {
define <32 x i16> @zero_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; CHECK-LABEL: @zero_test_permvar_hi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i16> [[TMP3]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> zeroinitializer, <32 x i16> %passthru, i32 %mask)
@@ -2156,7 +2156,7 @@ define <32 x i16> @zero_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %pas
define <32 x i16> @shuffle_test_permvar_hi_512(<32 x i16> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <32 x i16> [[TMP1]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> undef, i32 -1)
@@ -2165,9 +2165,9 @@ define <32 x i16> @shuffle_test_permvar_hi_512(<32 x i16> %a0) {
define <32 x i16> @shuffle_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_hi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i16> [[TMP3]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %passthru, i32 %mask)
@@ -2176,7 +2176,7 @@ define <32 x i16> @shuffle_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %
define <32 x i16> @undef_test_permvar_hi_512(<32 x i16> %a0) {
; CHECK-LABEL: @undef_test_permvar_hi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <32 x i16> [[TMP1]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 undef, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> undef, i32 -1)
@@ -2185,9 +2185,9 @@ define <32 x i16> @undef_test_permvar_hi_512(<32 x i16> %a0) {
define <32 x i16> @undef_test_permvar_hi_512_mask(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) {
; CHECK-LABEL: @undef_test_permvar_hi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i16> [[A0:%.*]], <32 x i16> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i16> [[TMP1]], <32 x i16> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i16> [[TMP3]]
;
%a = tail call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %a0, <32 x i16> <i16 undef, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %passthru, i32 %mask)
@@ -2198,7 +2198,7 @@ declare <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8>, <16 x i8>, <16
define <16 x i8> @identity_test_permvar_qi_128(<16 x i8> %a0) {
; CHECK-LABEL: @identity_test_permvar_qi_128(
-; CHECK-NEXT: ret <16 x i8> %a0
+; CHECK-NEXT: ret <16 x i8> [[A0:%.*]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8> undef, i16 -1)
ret <16 x i8> %a
@@ -2206,8 +2206,8 @@ define <16 x i8> @identity_test_permvar_qi_128(<16 x i8> %a0) {
define <16 x i8> @identity_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: @identity_test_permvar_qi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> %a0, <16 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[A0:%.*]], <16 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i8> [[TMP2]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8> %passthru, i16 %mask)
@@ -2216,7 +2216,7 @@ define <16 x i8> @identity_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %pa
define <16 x i8> @zero_test_permvar_qi_128(<16 x i8> %a0) {
; CHECK-LABEL: @zero_test_permvar_qi_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> zeroinitializer
; CHECK-NEXT: ret <16 x i8> [[TMP1]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i8> undef, i16 -1)
@@ -2225,9 +2225,9 @@ define <16 x i8> @zero_test_permvar_qi_128(<16 x i8> %a0) {
define <16 x i8> @zero_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: @zero_test_permvar_qi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i8> [[TMP3]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i8> %passthru, i16 %mask)
@@ -2236,7 +2236,7 @@ define <16 x i8> @zero_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passth
define <16 x i8> @shuffle_test_permvar_qi_128(<16 x i8> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_qi_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x i8> [[TMP1]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> undef, i16 -1)
@@ -2245,9 +2245,9 @@ define <16 x i8> @shuffle_test_permvar_qi_128(<16 x i8> %a0) {
define <16 x i8> @shuffle_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_qi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i8> [[TMP3]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %passthru, i16 %mask)
@@ -2256,7 +2256,7 @@ define <16 x i8> @shuffle_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %pas
define <16 x i8> @undef_test_permvar_qi_128(<16 x i8> %a0) {
; CHECK-LABEL: @undef_test_permvar_qi_128(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <16 x i8> [[TMP1]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> undef, i16 -1)
@@ -2265,9 +2265,9 @@ define <16 x i8> @undef_test_permvar_qi_128(<16 x i8> %a0) {
define <16 x i8> @undef_test_permvar_qi_128_mask(<16 x i8> %a0, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: @undef_test_permvar_qi_128_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A0:%.*]], <16 x i8> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x i8> [[TMP1]], <16 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <16 x i8> [[TMP3]]
;
%a = tail call <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %passthru, i16 %mask)
@@ -2278,7 +2278,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8>, <32 x i8>, <32
define <32 x i8> @identity_test_permvar_qi_256(<32 x i8> %a0) {
; CHECK-LABEL: @identity_test_permvar_qi_256(
-; CHECK-NEXT: ret <32 x i8> %a0
+; CHECK-NEXT: ret <32 x i8> [[A0:%.*]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, <32 x i8> undef, i32 -1)
ret <32 x i8> %a
@@ -2286,8 +2286,8 @@ define <32 x i8> @identity_test_permvar_qi_256(<32 x i8> %a0) {
define <32 x i8> @identity_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: @identity_test_permvar_qi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <32 x i1> [[TMP1]], <32 x i8> %a0, <32 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <32 x i1> [[TMP1]], <32 x i8> [[A0:%.*]], <32 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i8> [[TMP2]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, <32 x i8> %passthru, i32 %mask)
@@ -2296,7 +2296,7 @@ define <32 x i8> @identity_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %pa
define <32 x i8> @zero_test_permvar_qi_256(<32 x i8> %a0) {
; CHECK-LABEL: @zero_test_permvar_qi_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> zeroinitializer
; CHECK-NEXT: ret <32 x i8> [[TMP1]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i8> undef, i32 -1)
@@ -2305,9 +2305,9 @@ define <32 x i8> @zero_test_permvar_qi_256(<32 x i8> %a0) {
define <32 x i8> @zero_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: @zero_test_permvar_qi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i8> [[TMP3]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i8> %passthru, i32 %mask)
@@ -2316,7 +2316,7 @@ define <32 x i8> @zero_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passth
define <32 x i8> @shuffle_test_permvar_qi_256(<32 x i8> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_qi_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <32 x i8> [[TMP1]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> <i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <32 x i8> undef, i32 -1)
@@ -2325,9 +2325,9 @@ define <32 x i8> @shuffle_test_permvar_qi_256(<32 x i8> %a0) {
define <32 x i8> @shuffle_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_qi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i8> [[TMP3]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> <i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <32 x i8> %passthru, i32 %mask)
@@ -2336,7 +2336,7 @@ define <32 x i8> @shuffle_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %pas
define <32 x i8> @undef_test_permvar_qi_256(<32 x i8> %a0) {
; CHECK-LABEL: @undef_test_permvar_qi_256(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <32 x i8> [[TMP1]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> <i8 undef, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <32 x i8> undef, i32 -1)
@@ -2345,9 +2345,9 @@ define <32 x i8> @undef_test_permvar_qi_256(<32 x i8> %a0) {
define <32 x i8> @undef_test_permvar_qi_256_mask(<32 x i8> %a0, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: @undef_test_permvar_qi_256_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 %mask to <32 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> [[A0:%.*]], <32 x i8> undef, <32 x i32> <i32 undef, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <32 x i1> [[TMP2]], <32 x i8> [[TMP1]], <32 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <32 x i8> [[TMP3]]
;
%a = tail call <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8> %a0, <32 x i8> <i8 undef, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <32 x i8> %passthru, i32 %mask)
@@ -2358,7 +2358,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8>, <64 x i8>, <64
define <64 x i8> @identity_test_permvar_qi_512(<64 x i8> %a0) {
; CHECK-LABEL: @identity_test_permvar_qi_512(
-; CHECK-NEXT: ret <64 x i8> %a0
+; CHECK-NEXT: ret <64 x i8> [[A0:%.*]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49, i8 50, i8 51, i8 52, i8 53, i8 54, i8 55, i8 56, i8 57, i8 58, i8 59, i8 60, i8 61, i8 62, i8 63>, <64 x i8> undef, i64 -1)
ret <64 x i8> %a
@@ -2366,8 +2366,8 @@ define <64 x i8> @identity_test_permvar_qi_512(<64 x i8> %a0) {
define <64 x i8> @identity_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: @identity_test_permvar_qi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 %mask to <64 x i1>
-; CHECK-NEXT: [[TMP2:%.*]] = select <64 x i1> [[TMP1]], <64 x i8> %a0, <64 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = select <64 x i1> [[TMP1]], <64 x i8> [[A0:%.*]], <64 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <64 x i8> [[TMP2]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49, i8 50, i8 51, i8 52, i8 53, i8 54, i8 55, i8 56, i8 57, i8 58, i8 59, i8 60, i8 61, i8 62, i8 63>, <64 x i8> %passthru, i64 %mask)
@@ -2376,7 +2376,7 @@ define <64 x i8> @identity_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %pa
define <64 x i8> @zero_test_permvar_qi_512(<64 x i8> %a0) {
; CHECK-LABEL: @zero_test_permvar_qi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> zeroinitializer
; CHECK-NEXT: ret <64 x i8> [[TMP1]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> zeroinitializer, <64 x i8> undef, i64 -1)
@@ -2385,9 +2385,9 @@ define <64 x i8> @zero_test_permvar_qi_512(<64 x i8> %a0) {
define <64 x i8> @zero_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: @zero_test_permvar_qi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 %mask to <64 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <64 x i8> [[TMP3]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> zeroinitializer, <64 x i8> %passthru, i64 %mask)
@@ -2396,7 +2396,7 @@ define <64 x i8> @zero_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passth
define <64 x i8> @shuffle_test_permvar_qi_512(<64 x i8> %a0) {
; CHECK-LABEL: @shuffle_test_permvar_qi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <64 x i8> [[TMP1]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> <i8 63, i8 62, i8 61, i8 60, i8 59, i8 58, i8 57, i8 56, i8 55, i8 54, i8 53, i8 52, i8 51, i8 50, i8 49, i8 48, i8 47, i8 46, i8 45, i8 44, i8 43, i8 42, i8 41, i8 40, i8 39, i8 38, i8 37, i8 36, i8 35, i8 34, i8 33, i8 32, i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <64 x i8> undef, i64 -1)
@@ -2405,9 +2405,9 @@ define <64 x i8> @shuffle_test_permvar_qi_512(<64 x i8> %a0) {
define <64 x i8> @shuffle_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: @shuffle_test_permvar_qi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 %mask to <64 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <64 x i8> [[TMP3]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> <i8 63, i8 62, i8 61, i8 60, i8 59, i8 58, i8 57, i8 56, i8 55, i8 54, i8 53, i8 52, i8 51, i8 50, i8 49, i8 48, i8 47, i8 46, i8 45, i8 44, i8 43, i8 42, i8 41, i8 40, i8 39, i8 38, i8 37, i8 36, i8 35, i8 34, i8 33, i8 32, i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <64 x i8> %passthru, i64 %mask)
@@ -2416,7 +2416,7 @@ define <64 x i8> @shuffle_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %pas
define <64 x i8> @undef_test_permvar_qi_512(<64 x i8> %a0) {
; CHECK-LABEL: @undef_test_permvar_qi_512(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 undef, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> <i32 undef, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <64 x i8> [[TMP1]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> <i8 undef, i8 62, i8 61, i8 60, i8 59, i8 58, i8 57, i8 56, i8 55, i8 54, i8 53, i8 52, i8 51, i8 50, i8 49, i8 48, i8 47, i8 46, i8 45, i8 44, i8 43, i8 42, i8 41, i8 40, i8 39, i8 38, i8 37, i8 36, i8 35, i8 34, i8 33, i8 32, i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <64 x i8> undef, i64 -1)
@@ -2425,9 +2425,9 @@ define <64 x i8> @undef_test_permvar_qi_512(<64 x i8> %a0) {
define <64 x i8> @undef_test_permvar_qi_512_mask(<64 x i8> %a0, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: @undef_test_permvar_qi_512_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 undef, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 %mask to <64 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> %passthru
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A0:%.*]], <64 x i8> undef, <64 x i32> <i32 undef, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <64 x i1> [[TMP2]], <64 x i8> [[TMP1]], <64 x i8> [[PASSTHRU:%.*]]
; CHECK-NEXT: ret <64 x i8> [[TMP3]]
;
%a = tail call <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8> %a0, <64 x i8> <i8 undef, i8 62, i8 61, i8 60, i8 59, i8 58, i8 57, i8 56, i8 55, i8 54, i8 53, i8 52, i8 51, i8 50, i8 49, i8 48, i8 47, i8 46, i8 45, i8 44, i8 43, i8 42, i8 41, i8 40, i8 39, i8 38, i8 37, i8 36, i8 35, i8 34, i8 33, i8 32, i8 31, i8 30, i8 29, i8 28, i8 27, i8 26, i8 25, i8 24, i8 23, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <64 x i8> %passthru, i64 %mask)
@@ -2438,7 +2438,7 @@ declare <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_add_ps(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_add_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd <16 x float> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fadd <16 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4)
@@ -2447,7 +2447,7 @@ define <16 x float> @test_add_ps(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_add_ps_round(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_add_ps_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
@@ -2456,9 +2456,9 @@ define <16 x float> @test_add_ps_round(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_add_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_add_ps_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd <16 x float> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fadd <16 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4)
@@ -2467,7 +2467,7 @@ define <16 x float> @test_add_ps_mask(<16 x float> %a, <16 x float> %b, <16 x fl
define <16 x float> @test_add_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_add_ps_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.add.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
@@ -2478,7 +2478,7 @@ declare <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double>, <8 x double>
define <8 x double> @test_add_pd(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_add_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd <8 x double> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fadd <8 x double> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4)
@@ -2487,7 +2487,7 @@ define <8 x double> @test_add_pd(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_add_pd_round(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_add_pd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
@@ -2496,9 +2496,9 @@ define <8 x double> @test_add_pd_round(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_add_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_add_pd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd <8 x double> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fadd <8 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4)
@@ -2507,7 +2507,7 @@ define <8 x double> @test_add_pd_mask(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x double> @test_add_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_add_pd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.add.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
@@ -2518,7 +2518,7 @@ declare <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_sub_ps(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_sub_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub <16 x float> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fsub <16 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4)
@@ -2527,7 +2527,7 @@ define <16 x float> @test_sub_ps(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_sub_ps_round(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_sub_ps_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
@@ -2536,9 +2536,9 @@ define <16 x float> @test_sub_ps_round(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_sub_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_sub_ps_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub <16 x float> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fsub <16 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4)
@@ -2547,7 +2547,7 @@ define <16 x float> @test_sub_ps_mask(<16 x float> %a, <16 x float> %b, <16 x fl
define <16 x float> @test_sub_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_sub_ps_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
@@ -2558,7 +2558,7 @@ declare <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double>, <8 x double>
define <8 x double> @test_sub_pd(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_sub_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x double> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x double> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4)
@@ -2567,7 +2567,7 @@ define <8 x double> @test_sub_pd(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_sub_pd_round(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_sub_pd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
@@ -2576,9 +2576,9 @@ define <8 x double> @test_sub_pd_round(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_sub_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_sub_pd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x double> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4)
@@ -2587,7 +2587,7 @@ define <8 x double> @test_sub_pd_mask(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x double> @test_sub_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_sub_pd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.sub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
@@ -2598,7 +2598,7 @@ declare <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_mul_ps(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_mul_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul <16 x float> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fmul <16 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4)
@@ -2607,7 +2607,7 @@ define <16 x float> @test_mul_ps(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_mul_ps_round(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_mul_ps_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
@@ -2616,9 +2616,9 @@ define <16 x float> @test_mul_ps_round(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_mul_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_mul_ps_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul <16 x float> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fmul <16 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4)
@@ -2627,7 +2627,7 @@ define <16 x float> @test_mul_ps_mask(<16 x float> %a, <16 x float> %b, <16 x fl
define <16 x float> @test_mul_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_mul_ps_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
@@ -2638,7 +2638,7 @@ declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double>
define <8 x double> @test_mul_pd(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_mul_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul <8 x double> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fmul <8 x double> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4)
@@ -2647,7 +2647,7 @@ define <8 x double> @test_mul_pd(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_mul_pd_round(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_mul_pd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
@@ -2656,9 +2656,9 @@ define <8 x double> @test_mul_pd_round(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_mul_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mul_pd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul <8 x double> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fmul <8 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4)
@@ -2667,7 +2667,7 @@ define <8 x double> @test_mul_pd_mask(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x double> @test_mul_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_mul_pd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
@@ -2678,7 +2678,7 @@ declare <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float>, <16 x float>
define <16 x float> @test_div_ps(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_div_ps(
-; CHECK-NEXT: [[TMP1:%.*]] = fdiv <16 x float> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fdiv <16 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 4)
@@ -2687,7 +2687,7 @@ define <16 x float> @test_div_ps(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_div_ps_round(<16 x float> %a, <16 x float> %b) {
; CHECK-LABEL: @test_div_ps_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> undef, i16 -1, i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> undef, i16 -1, i32 8)
@@ -2696,9 +2696,9 @@ define <16 x float> @test_div_ps_round(<16 x float> %a, <16 x float> %b) {
define <16 x float> @test_div_ps_mask(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_div_ps_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fdiv <16 x float> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 %mask to <16 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fdiv <16 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP2]], <16 x float> [[TMP1]], <16 x float> [[C:%.*]]
; CHECK-NEXT: ret <16 x float> [[TMP3]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4)
@@ -2707,7 +2707,7 @@ define <16 x float> @test_div_ps_mask(<16 x float> %a, <16 x float> %b, <16 x fl
define <16 x float> @test_div_ps_mask_round(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: @test_div_ps_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> [[A:%.*]], <16 x float> [[B:%.*]], <16 x float> [[C:%.*]], i16 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <16 x float> [[TMP1]]
;
%1 = tail call <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 8)
@@ -2718,7 +2718,7 @@ declare <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double>, <8 x double>
define <8 x double> @test_div_pd(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_div_pd(
-; CHECK-NEXT: [[TMP1:%.*]] = fdiv <8 x double> %a, %b
+; CHECK-NEXT: [[TMP1:%.*]] = fdiv <8 x double> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 4)
@@ -2727,7 +2727,7 @@ define <8 x double> @test_div_pd(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_div_pd_round(<8 x double> %a, <8 x double> %b) {
; CHECK-LABEL: @test_div_pd_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> undef, i8 -1, i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> undef, i8 -1, i32 8)
@@ -2736,9 +2736,9 @@ define <8 x double> @test_div_pd_round(<8 x double> %a, <8 x double> %b) {
define <8 x double> @test_div_pd_mask(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_div_pd_mask(
-; CHECK-NEXT: [[TMP1:%.*]] = fdiv <8 x double> %a, %b
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 %mask to <8 x i1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> %c
+; CHECK-NEXT: [[TMP1:%.*]] = fdiv <8 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1>
+; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP2]], <8 x double> [[TMP1]], <8 x double> [[C:%.*]]
; CHECK-NEXT: ret <8 x double> [[TMP3]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4)
@@ -2747,7 +2747,7 @@ define <8 x double> @test_div_pd_mask(<8 x double> %a, <8 x double> %b, <8 x dou
define <8 x double> @test_div_pd_mask_round(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; CHECK-LABEL: @test_div_pd_mask_round(
-; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> [[A:%.*]], <8 x double> [[B:%.*]], <8 x double> [[C:%.*]], i8 [[MASK:%.*]], i32 8)
; CHECK-NEXT: ret <8 x double> [[TMP1]]
;
%1 = tail call <8 x double> @llvm.x86.avx512.mask.div.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 8)
@@ -2758,8 +2758,8 @@ declare i32 @llvm.x86.avx512.vcomi.ss(<4 x float>, <4 x float>, i32, i32)
define i32 @test_comi_ss_0(float %a, float %b) {
; CHECK-LABEL: @test_comi_ss_0(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float %a, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float %b, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float [[A:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> undef, float [[B:%.*]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.x86.avx512.vcomi.ss(<4 x float> [[TMP1]], <4 x float> [[TMP2]], i32 0, i32 4)
; CHECK-NEXT: ret i32 [[TMP3]]
;
@@ -2779,8 +2779,8 @@ declare i32 @llvm.x86.avx512.vcomi.sd(<2 x double>, <2 x double>, i32, i32)
define i32 @test_comi_sd_0(double %a, double %b) {
; CHECK-LABEL: @test_comi_sd_0(
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double %a, i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double %b, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double [[A:%.*]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[B:%.*]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> [[TMP1]], <2 x double> [[TMP2]], i32 0, i32 4)
; CHECK-NEXT: ret i32 [[TMP3]]
;
diff --git a/test/Transforms/InstCombine/x86-muldq.ll b/test/Transforms/InstCombine/x86-muldq.ll
index 8b14a781f091..bcbb8919c403 100644
--- a/test/Transforms/InstCombine/x86-muldq.ll
+++ b/test/Transforms/InstCombine/x86-muldq.ll
@@ -2,6 +2,158 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
;
+; UNDEF Elts
+;
+
+define <2 x i64> @undef_pmuludq_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @undef_pmuludq_128(
+; CHECK-NEXT: ret <2 x i64> zeroinitializer
+;
+ %1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> undef, <4 x i32> undef)
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @undef_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @undef_pmuludq_256(
+; CHECK-NEXT: ret <4 x i64> zeroinitializer
+;
+ %1 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> undef, <8 x i32> undef)
+ ret <4 x i64> %1
+}
+
+define <8 x i64> @undef_pmuludq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @undef_pmuludq_512(
+; CHECK-NEXT: ret <8 x i64> zeroinitializer
+;
+ %1 = call <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32> undef, <16 x i32> undef)
+ ret <8 x i64> %1
+}
+
+define <2 x i64> @undef_pmuldq_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @undef_pmuldq_128(
+; CHECK-NEXT: ret <2 x i64> zeroinitializer
+;
+ %1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> undef, <4 x i32> undef)
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @undef_pmuldq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @undef_pmuldq_256(
+; CHECK-NEXT: ret <4 x i64> zeroinitializer
+;
+ %1 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> undef, <8 x i32> undef)
+ ret <4 x i64> %1
+}
+
+define <8 x i64> @undef_pmuldq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @undef_pmuldq_512(
+; CHECK-NEXT: ret <8 x i64> zeroinitializer
+;
+ %1 = call <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32> undef, <16 x i32> undef)
+ ret <8 x i64> %1
+}
+
+define <2 x i64> @undef_zero_pmuludq_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @undef_zero_pmuludq_128(
+; CHECK-NEXT: ret <2 x i64> zeroinitializer
+;
+ %1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> undef, <4 x i32> zeroinitializer)
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @undef_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @undef_zero_pmuludq_256(
+; CHECK-NEXT: ret <4 x i64> zeroinitializer
+;
+ %1 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> zeroinitializer, <8 x i32> undef)
+ ret <4 x i64> %1
+}
+
+define <8 x i64> @undef_zero_pmuludq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @undef_zero_pmuludq_512(
+; CHECK-NEXT: ret <8 x i64> zeroinitializer
+;
+ %1 = call <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32> undef, <16 x i32> zeroinitializer)
+ ret <8 x i64> %1
+}
+
+define <2 x i64> @undef_zero_pmuldq_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @undef_zero_pmuldq_128(
+; CHECK-NEXT: ret <2 x i64> zeroinitializer
+;
+ %1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> zeroinitializer, <4 x i32> undef)
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @undef_zero_pmuldq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @undef_zero_pmuldq_256(
+; CHECK-NEXT: ret <4 x i64> zeroinitializer
+;
+ %1 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> undef, <8 x i32> zeroinitializer)
+ ret <4 x i64> %1
+}
+
+define <8 x i64> @undef_zero_pmuldq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @undef_zero_pmuldq_512(
+; CHECK-NEXT: ret <8 x i64> zeroinitializer
+;
+ %1 = call <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32> zeroinitializer, <16 x i32> undef)
+ ret <8 x i64> %1
+}
+
+;
+; Constant Folding
+;
+
+define <2 x i64> @fold_pmuludq_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @fold_pmuludq_128(
+; CHECK-NEXT: ret <2 x i64> <i64 9223372030412324865, i64 4294967295>
+;
+ %1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2147483647, i32 1, i32 1, i32 3>)
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @fold_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @fold_pmuludq_256(
+; CHECK-NEXT: ret <4 x i64> zeroinitializer
+;
+ %1 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> zeroinitializer, <8 x i32> zeroinitializer)
+ ret <4 x i64> %1
+}
+
+define <8 x i64> @fold_pmuludq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @fold_pmuludq_512(
+; CHECK-NEXT: ret <8 x i64> <i64 0, i64 0, i64 255, i64 131070, i64 0, i64 -281474976645121, i64 140737488289792, i64 281470681743360>
+;
+ %1 = call <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32> <i32 0, i32 0, i32 undef, i32 0, i32 1, i32 1, i32 2, i32 2, i32 undef, i32 undef, i32 -1, i32 -1, i32 65536, i32 -1, i32 -65536, i32 undef>, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 1, i32 255, i32 -256, i32 65535, i32 -65536, i32 0, i32 -1, i32 -65535, i32 -65535, i32 2147483647, i32 2147483648, i32 65536, i32 -65535>)
+ ret <8 x i64> %1
+}
+
+define <2 x i64> @fold_pmuldq_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @fold_pmuldq_128(
+; CHECK-NEXT: ret <2 x i64> <i64 0, i64 2>
+;
+ %1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> <i32 undef, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 undef, i32 1, i32 -2, i32 3>)
+ ret <2 x i64> %1
+}
+
+define <4 x i64> @fold_pmuldq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @fold_pmuldq_256(
+; CHECK-NEXT: ret <4 x i64> <i64 0, i64 4294836225, i64 140737488289792, i64 -140737488355328>
+;
+ %1 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> <i32 undef, i32 1, i32 -65535, i32 128, i32 65536, i32 2147483647, i32 -2147483648, i32 65536>, <8 x i32> <i32 0, i32 -1, i32 -65535, i32 -65535, i32 2147483647, i32 2147483648, i32 65536, i32 -65535>)
+ ret <4 x i64> %1
+}
+
+define <8 x i64> @fold_pmuldq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @fold_pmuldq_512(
+; CHECK-NEXT: ret <8 x i64> zeroinitializer
+;
+ %1 = call <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32> zeroinitializer, <16 x i32> <i32 undef, i32 -1, i32 -3, i32 -1, i32 8, i32 10, i32 -256, i32 65536, i32 undef, i32 1, i32 -65535, i32 128, i32 65536, i32 2147483647, i32 -2147483648, i32 65536>)
+ ret <8 x i64> %1
+}
+
+;
; PMULUDQ/PMULDQ - only the even elements (0, 2, 4, 6) of the vXi32 inputs are required.
;
@@ -55,8 +207,8 @@ define <2 x i64> @test_demanded_elts_pmuldq_128(<4 x i32> %a0, <4 x i32> %a1) {
ret <2 x i64> %3
}
-define <4 x i64> @test_demanded_elts_pmuluq_256(<8 x i32> %a0, <8 x i32> %a1) {
-; CHECK-LABEL: @test_demanded_elts_pmuluq_256(
+define <4 x i64> @test_demanded_elts_pmuldq_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @test_demanded_elts_pmuldq_256(
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> %a1, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 undef>
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %a0, <8 x i32> [[TMP1]])
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
@@ -69,8 +221,8 @@ define <4 x i64> @test_demanded_elts_pmuluq_256(<8 x i32> %a0, <8 x i32> %a1) {
ret <4 x i64> %4
}
-define <8 x i64> @test_demanded_elts_pmuluq_512(<16 x i32> %a0, <16 x i32> %a1) {
-; CHECK-LABEL: @test_demanded_elts_pmuluq_512(
+define <8 x i64> @test_demanded_elts_pmuldq_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @test_demanded_elts_pmuldq_512(
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %a1, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 undef, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 15, i32 undef>
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32> %a0, <16 x i32> [[TMP1]])
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TMP2]], <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 3, i32 3, i32 4, i32 4, i32 7, i32 7>
diff --git a/test/Transforms/InstCombine/x86-pack.ll b/test/Transforms/InstCombine/x86-pack.ll
new file mode 100644
index 000000000000..f3c41a8aa476
--- /dev/null
+++ b/test/Transforms/InstCombine/x86-pack.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+;
+; UNDEF Elts
+;
+
+define <8 x i16> @undef_packssdw_128() {
+; CHECK-LABEL: @undef_packssdw_128(
+; CHECK-NEXT: ret <8 x i16> undef
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> undef, <4 x i32> undef)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @undef_packusdw_128() {
+; CHECK-LABEL: @undef_packusdw_128(
+; CHECK-NEXT: ret <8 x i16> undef
+;
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> undef, <4 x i32> undef)
+ ret <8 x i16> %1
+}
+
+define <16 x i8> @undef_packsswb_128() {
+; CHECK-LABEL: @undef_packsswb_128(
+; CHECK-NEXT: ret <16 x i8> undef
+;
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> undef, <8 x i16> undef)
+ ret <16 x i8> %1
+}
+
+define <16 x i8> @undef_packuswb_128() {
+; CHECK-LABEL: @undef_packuswb_128(
+; CHECK-NEXT: ret <16 x i8> undef
+;
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> undef, <8 x i16> undef)
+ ret <16 x i8> %1
+}
+
+define <16 x i16> @undef_packssdw_256() {
+; CHECK-LABEL: @undef_packssdw_256(
+; CHECK-NEXT: ret <16 x i16> undef
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> undef, <8 x i32> undef)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @undef_packusdw_256() {
+; CHECK-LABEL: @undef_packusdw_256(
+; CHECK-NEXT: ret <16 x i16> undef
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> undef, <8 x i32> undef)
+ ret <16 x i16> %1
+}
+
+define <32 x i8> @undef_packsswb_256() {
+; CHECK-LABEL: @undef_packsswb_256(
+; CHECK-NEXT: ret <32 x i8> undef
+;
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> undef, <16 x i16> undef)
+ ret <32 x i8> %1
+}
+
+define <32 x i8> @undef_packuswb_256() {
+; CHECK-LABEL: @undef_packuswb_256(
+; CHECK-NEXT: ret <32 x i8> undef
+;
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> undef, <16 x i16> undef)
+ ret <32 x i8> %1
+}
+
+define <32 x i16> @undef_packssdw_512() {
+; CHECK-LABEL: @undef_packssdw_512(
+; CHECK-NEXT: ret <32 x i16> undef
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> undef, <16 x i32> undef)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @undef_packusdw_512() {
+; CHECK-LABEL: @undef_packusdw_512(
+; CHECK-NEXT: ret <32 x i16> undef
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> undef, <16 x i32> undef)
+ ret <32 x i16> %1
+}
+
+define <64 x i8> @undef_packsswb_512() {
+; CHECK-LABEL: @undef_packsswb_512(
+; CHECK-NEXT: ret <64 x i8> undef
+;
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> undef, <32 x i16> undef)
+ ret <64 x i8> %1
+}
+
+define <64 x i8> @undef_packuswb_512() {
+; CHECK-LABEL: @undef_packuswb_512(
+; CHECK-NEXT: ret <64 x i8> undef
+;
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> undef, <32 x i16> undef)
+ ret <64 x i8> %1
+}
+
+;
+; Constant Folding
+;
+
+define <8 x i16> @fold_packssdw_128() {
+; CHECK-LABEL: @fold_packssdw_128(
+; CHECK-NEXT: ret <8 x i16> <i16 0, i16 -1, i16 32767, i16 -32768, i16 0, i16 0, i16 0, i16 0>
+;
+ %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> <i32 0, i32 -1, i32 65536, i32 -131072>, <4 x i32> zeroinitializer)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @fold_packusdw_128() {
+; CHECK-LABEL: @fold_packusdw_128(
+; CHECK-NEXT: ret <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 0, i16 -32768, i16 -1>
+;
+ %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> undef, <4 x i32> <i32 0, i32 -1, i32 32768, i32 65537>)
+ ret <8 x i16> %1
+}
+
+define <16 x i8> @fold_packsswb_128() {
+; CHECK-LABEL: @fold_packsswb_128(
+; CHECK-NEXT: ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>
+;
+ %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> undef)
+ ret <16 x i8> %1
+}
+
+define <16 x i8> @fold_packuswb_128() {
+; CHECK-LABEL: @fold_packuswb_128(
+; CHECK-NEXT: ret <16 x i8> <i8 0, i8 1, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 15, i8 0, i8 127, i8 0, i8 1, i8 0, i8 1, i8 0, i8 0>
+;
+ %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> <i16 0, i16 1, i16 -1, i16 255, i16 65535, i16 -32768, i16 -127, i16 15>, <8 x i16> <i16 -15, i16 127, i16 32768, i16 -65535, i16 -255, i16 1, i16 -1, i16 0>)
+ ret <16 x i8> %1
+}
+
+define <16 x i16> @fold_packssdw_256() {
+; CHECK-LABEL: @fold_packssdw_256(
+; CHECK-NEXT: ret <16 x i16> <i16 0, i16 256, i16 32767, i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 undef, i16 undef, i16 undef, i16 undef>
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> <i32 0, i32 256, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>, <8 x i32> undef)
+ ret <16 x i16> %1
+}
+
+define <16 x i16> @fold_packusdw_256() {
+; CHECK-LABEL: @fold_packusdw_256(
+; CHECK-NEXT: ret <16 x i16> <i16 0, i16 0, i16 0, i16 -1, i16 0, i16 256, i16 -1, i16 0, i16 127, i16 -32768, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 32767>
+;
+ %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> <i32 0, i32 -256, i32 -65535, i32 65536, i32 127, i32 32768, i32 32767, i32 -32767>, <8 x i32> <i32 0, i32 256, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>)
+ ret <16 x i16> %1
+}
+
+define <32 x i8> @fold_packsswb_256() {
+; CHECK-LABEL: @fold_packsswb_256(
+; CHECK-NEXT: ret <32 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+;
+ %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> undef, <16 x i16> zeroinitializer)
+ ret <32 x i8> %1
+}
+
+define <32 x i8> @fold_packuswb_256() {
+; CHECK-LABEL: @fold_packuswb_256(
+; CHECK-NEXT: ret <32 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64>
+;
+ %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> zeroinitializer, <16 x i16> <i16 0, i16 -127, i16 -128, i16 -32768, i16 65536, i16 255, i16 256, i16 512, i16 -1, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64>)
+ ret <32 x i8> %1
+}
+
+define <32 x i16> @fold_packssdw_512() {
+; CHECK-LABEL: @fold_packssdw_512(
+; CHECK-NEXT: ret <32 x i16> <i16 0, i16 512, i16 32767, i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 512, i16 32767, i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 -127, i16 -32768, i16 -32767, i16 32767, i16 undef, i16 undef, i16 undef, i16 undef>
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> <i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767, i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>, <16 x i32> undef)
+ ret <32 x i16> %1
+}
+
+define <32 x i16> @fold_packusdw_512() {
+; CHECK-LABEL: @fold_packusdw_512(
+; CHECK-NEXT: ret <32 x i16> <i16 0, i16 0, i16 0, i16 -1, i16 0, i16 512, i16 -1, i16 0, i16 127, i16 -32768, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 32767, i16 0, i16 0, i16 0, i16 -1, i16 0, i16 512, i16 -1, i16 0, i16 127, i16 -32768, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 32767>
+;
+ %1 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> <i32 0, i32 -512, i32 -65535, i32 65536, i32 127, i32 32768, i32 32767, i32 -32767, i32 0, i32 -512, i32 -65535, i32 65536, i32 127, i32 32768, i32 32767, i32 -32767>, <16 x i32> <i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767, i32 0, i32 512, i32 65535, i32 -65536, i32 -127, i32 -32768, i32 -32767, i32 32767>)
+ ret <32 x i16> %1
+}
+
+define <64 x i8> @fold_packsswb_512() {
+; CHECK-LABEL: @fold_packsswb_512(
+; CHECK-NEXT: ret <64 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+;
+ %1 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> undef, <32 x i16> zeroinitializer)
+ ret <64 x i8> %1
+}
+
+define <64 x i8> @fold_packuswb_512() {
+; CHECK-LABEL: @fold_packuswb_512(
+; CHECK-NEXT: ret <64 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64>
+;
+ %1 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> zeroinitializer, <32 x i16> <i16 0, i16 -127, i16 -128, i16 -32768, i16 65536, i16 255, i16 512, i16 512, i16 -1, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 0, i16 -127, i16 -128, i16 -32768, i16 65536, i16 255, i16 512, i16 512, i16 -1, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64>)
+ ret <64 x i8> %1
+}
+
+;
+; Demanded Elts
+;
+
+define <8 x i16> @elts_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @elts_packssdw_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> [[A0:%.*]], <4 x i32> undef)
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: ret <8 x i16> [[TMP2]]
+;
+ %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 undef, i32 undef>
+ %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 undef>
+ %3 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %1, <4 x i32> %2)
+ %4 = shufflevector <8 x i16> %3, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i16> %4
+}
+
+define <8 x i16> @elts_packusdw_128(<4 x i32> %a0, <4 x i32> %a1) {
+; CHECK-LABEL: @elts_packusdw_128(
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]])
+; CHECK-NEXT: ret <8 x i16> [[TMP1]]
+;
+ %1 = insertelement <4 x i32> %a0, i32 0, i32 0
+ %2 = insertelement <4 x i32> %a1, i32 0, i32 3
+ %3 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %1, <4 x i32> %2)
+ %4 = shufflevector <8 x i16> %3, <8 x i16> undef, <8 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 undef>
+ ret <8 x i16> %4
+}
+
+define <16 x i8> @elts_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: @elts_packsswb_128(
+; CHECK-NEXT: ret <16 x i8> zeroinitializer
+;
+ %1 = insertelement <8 x i16> %a0, i16 0, i32 0
+ %2 = insertelement <8 x i16> %a1, i16 0, i32 0
+ %3 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %1, <8 x i16> %2)
+ %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+ ret <16 x i8> %4
+}
+
+define <16 x i8> @elts_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: @elts_packuswb_128(
+; CHECK-NEXT: ret <16 x i8> undef
+;
+ %1 = insertelement <8 x i16> undef, i16 0, i32 0
+ %2 = insertelement <8 x i16> undef, i16 0, i32 0
+ %3 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %1, <8 x i16> %2)
+ %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ ret <16 x i8> %4
+}
+
+define <16 x i16> @elts_packssdw_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @elts_packssdw_256(
+; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> [[A0:%.*]], <8 x i32> undef)
+; CHECK-NEXT: ret <16 x i16> [[TMP1]]
+;
+ %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = shufflevector <8 x i32> %a1, <8 x i32> undef, <8 x i32> <i32 undef, i32 2, i32 1, i32 undef, i32 undef, i32 6, i32 5, i32 undef>
+ %3 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %1, <8 x i32> %2)
+ %4 = shufflevector <16 x i16> %3, <16 x i16> undef, <16 x i32> <i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 11, i32 12, i32 undef, i32 undef, i32 15>
+ ret <16 x i16> %4
+}
+
+define <16 x i16> @elts_packusdw_256(<8 x i32> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @elts_packusdw_256(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[A1:%.*]], <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> undef, <8 x i32> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[TMP2]], <16 x i16> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: ret <16 x i16> [[TMP3]]
+;
+ %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = shufflevector <8 x i32> %a1, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %3 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %1, <8 x i32> %2)
+ %4 = shufflevector <16 x i16> %3, <16 x i16> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i16> %4
+}
+
+define <32 x i8> @elts_packsswb_256(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: @elts_packsswb_256(
+; CHECK-NEXT: ret <32 x i8> zeroinitializer
+;
+ %1 = insertelement <16 x i16> %a0, i16 0, i32 0
+ %2 = insertelement <16 x i16> %a1, i16 0, i32 8
+ %3 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %1, <16 x i16> %2)
+ %4 = shufflevector <32 x i8> %3, <32 x i8> undef, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
+ ret <32 x i8> %4
+}
+
+define <32 x i8> @elts_packuswb_256(<16 x i16> %a0, <16 x i16> %a1) {
+; CHECK-LABEL: @elts_packuswb_256(
+; CHECK-NEXT: ret <32 x i8> undef
+;
+ %1 = insertelement <16 x i16> undef, i16 0, i32 1
+ %2 = insertelement <16 x i16> undef, i16 0, i32 0
+ %3 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %1, <16 x i16> %2)
+ %4 = shufflevector <32 x i8> %3, <32 x i8> undef, <32 x i32> zeroinitializer
+ ret <32 x i8> %4
+}
+
+define <32 x i16> @elts_packssdw_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @elts_packssdw_512(
+; CHECK-NEXT: [[TMP1:%.*]] = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> [[A0:%.*]], <16 x i32> undef)
+; CHECK-NEXT: ret <32 x i16> [[TMP1]]
+;
+ %1 = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 9, i32 8, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = shufflevector <16 x i32> %a1, <16 x i32> undef, <16 x i32> <i32 undef, i32 2, i32 1, i32 undef, i32 undef, i32 6, i32 5, i32 undef, i32 undef, i32 10, i32 9, i32 undef, i32 undef, i32 14, i32 13, i32 undef>
+ %3 = call <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32> %1, <16 x i32> %2)
+ %4 = shufflevector <32 x i16> %3, <32 x i16> undef, <32 x i32> <i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 11, i32 12, i32 undef, i32 undef, i32 15, i32 undef, i32 undef, i32 18, i32 19, i32 20, i32 undef, i32 undef, i32 23, i32 24, i32 undef, i32 undef, i32 27, i32 28, i32 undef, i32 undef, i32 31>
+ ret <32 x i16> %4
+}
+
+define <32 x i16> @elts_packusdw_512(<16 x i32> %a0, <16 x i32> %a1) {
+; CHECK-LABEL: @elts_packusdw_512(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> [[A1:%.*]], <16 x i32> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+; CHECK-NEXT: [[TMP2:%.*]] = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> undef, <16 x i32> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[TMP2]], <32 x i16> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 20, i32 21, i32 22, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: ret <32 x i16> [[TMP3]]
+;
+ %1 = shufflevector <16 x i32> %a0, <16 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = shufflevector <16 x i32> %a1, <16 x i32> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+ %3 = call <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32> %1, <16 x i32> %2)
+ %4 = shufflevector <32 x i16> %3, <32 x i16> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 20, i32 21, i32 22, i32 23, i32 undef, i32 undef, i32 undef, i32 undef, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <32 x i16> %4
+}
+
+define <64 x i8> @elts_packsswb_512(<32 x i16> %a0, <32 x i16> %a1) {
+; CHECK-LABEL: @elts_packsswb_512(
+; CHECK-NEXT: ret <64 x i8> zeroinitializer
+;
+ %1 = insertelement <32 x i16> %a0, i16 0, i32 0
+ %2 = insertelement <32 x i16> %a1, i16 0, i32 8
+ %3 = insertelement <32 x i16> %1, i16 0, i32 16
+ %4 = insertelement <32 x i16> %2, i16 0, i32 24
+ %5 = call <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16> %3, <32 x i16> %4)
+ %6 = shufflevector <64 x i8> %5, <64 x i8> undef, <64 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56>
+ ret <64 x i8> %6
+}
+
+define <64 x i8> @elts_packuswb_512(<32 x i16> %a0, <32 x i16> %a1) {
+; CHECK-LABEL: @elts_packuswb_512(
+; CHECK-NEXT: ret <64 x i8> undef
+;
+ %1 = insertelement <32 x i16> undef, i16 0, i32 1
+ %2 = insertelement <32 x i16> undef, i16 0, i32 0
+ %3 = call <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16> %1, <32 x i16> %2)
+ %4 = shufflevector <64 x i8> %3, <64 x i8> undef, <64 x i32> zeroinitializer
+ ret <64 x i8> %4
+}
+
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
+declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
+declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
+declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
+declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
+
+declare <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32>, <16 x i32>) nounwind readnone
+declare <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32>, <16 x i32>) nounwind readnone
+declare <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16>, <32 x i16>) nounwind readnone
+declare <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16>, <32 x i16>) nounwind readnone
diff --git a/test/Transforms/InstCombine/x86-pshufb.ll b/test/Transforms/InstCombine/x86-pshufb.ll
index b37884ddd58a..f181ef57fe20 100644
--- a/test/Transforms/InstCombine/x86-pshufb.ll
+++ b/test/Transforms/InstCombine/x86-pshufb.ll
@@ -468,6 +468,48 @@ define <64 x i8> @fold_with_allundef_elts_avx512(<64 x i8> %InVec) {
ret <64 x i8> %1
}
+; Demanded elts tests.
+
+define <16 x i8> @demanded_elts_insertion(<16 x i8> %InVec, <16 x i8> %BaseMask, i8 %M0, i8 %M15) {
+; CHECK-LABEL: @demanded_elts_insertion(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> %BaseMask)
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>
+; CHECK-NEXT: ret <16 x i8> [[TMP2]]
+;
+ %1 = insertelement <16 x i8> %BaseMask, i8 %M0, i32 0
+ %2 = insertelement <16 x i8> %1, i8 %M15, i32 15
+ %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> %2)
+ %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef>
+ ret <16 x i8> %4
+}
+
+define <32 x i8> @demanded_elts_insertion_avx2(<32 x i8> %InVec, <32 x i8> %BaseMask, i8 %M0, i8 %M22) {
+; CHECK-LABEL: @demanded_elts_insertion_avx2(
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <32 x i8> %BaseMask, i8 %M0, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> [[TMP1]])
+; CHECK-NEXT: ret <32 x i8> [[TMP2]]
+;
+ %1 = insertelement <32 x i8> %BaseMask, i8 %M0, i32 0
+ %2 = insertelement <32 x i8> %1, i8 %M22, i32 22
+ %3 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> %2)
+ %4 = shufflevector <32 x i8> %3, <32 x i8> undef, <32 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 undef, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ ret <32 x i8> %4
+}
+
+define <64 x i8> @demanded_elts_insertion_avx512(<64 x i8> %InVec, <64 x i8> %BaseMask, i8 %M0, i8 %M30) {
+; CHECK-LABEL: @demanded_elts_insertion_avx512(
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <64 x i8> undef, i8 %M0, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <64 x i8> [[TMP2]], <64 x i8> undef, <64 x i32> zeroinitializer
+; CHECK-NEXT: ret <64 x i8> [[TMP3]]
+;
+ %1 = insertelement <64 x i8> %BaseMask, i8 %M0, i32 0
+ %2 = insertelement <64 x i8> %1, i8 %M30, i32 30
+ %3 = tail call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> %InVec, <64 x i8> %2)
+ %4 = shufflevector <64 x i8> %3, <64 x i8> undef, <64 x i32> zeroinitializer
+ ret <64 x i8> %4
+}
+
declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>)
declare <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8>, <64 x i8>)
diff --git a/test/Transforms/InstCombine/x86-vpermil.ll b/test/Transforms/InstCombine/x86-vpermil.ll
index fad10d7ad5c5..f68eb36c4b58 100644
--- a/test/Transforms/InstCombine/x86-vpermil.ll
+++ b/test/Transforms/InstCombine/x86-vpermil.ll
@@ -221,6 +221,74 @@ define <8 x double> @undef_test_vpermilvar_pd_512(<8 x double> %v) {
ret <8 x double> %a
}
+; Simplify demanded elts
+
+define <4 x float> @elts_test_vpermilvar_ps(<4 x float> %a0, i32 %a1) {
+; CHECK-LABEL: @elts_test_vpermilvar_ps(
+; CHECK-NEXT: ret <4 x float> %a0
+;
+ %1 = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %a1, i32 3
+ %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %1)
+ %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
+ ret <4 x float> %3
+}
+
+define <8 x float> @elts_test_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) {
+; CHECK-LABEL: @elts_test_vpermilvar_ps_256(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 1, i32 undef, i32 6, i32 undef, i32 7>
+; CHECK-NEXT: ret <8 x float> [[TMP1]]
+;
+ %1 = shufflevector <8 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 3, i32 2, i32 1, i32 0>, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1)
+ %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 undef, i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7>
+ ret <8 x float> %3
+}
+
+define <16 x float> @elts_test_vpermilvar_ps_512(<16 x float> %a0, <16 x i32> %a1, i32 %a2) {
+; CHECK-LABEL: @elts_test_vpermilvar_ps_512(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %a1)
+; CHECK-NEXT: ret <16 x float> [[TMP1]]
+;
+ %1 = insertelement <16 x i32> %a1, i32 %a2, i32 0
+ %2 = tail call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %1)
+ %3 = shufflevector <16 x float> %2, <16 x float> undef, <16 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %3
+}
+
+define <2 x double> @elts_test_vpermilvar_pd(<2 x double> %a0, i64 %a1) {
+; CHECK-LABEL: @elts_test_vpermilvar_pd(
+; CHECK-NEXT: ret <2 x double> %a0
+;
+ %1 = insertelement <2 x i64> <i64 0, i64 2>, i64 %a1, i32 1
+ %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %1)
+ %3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
+ ret <2 x double> %3
+}
+
+define <4 x double> @elts_test_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) {
+; CHECK-LABEL: @elts_test_vpermilvar_pd_256(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 undef>
+; CHECK-NEXT: ret <4 x double> [[TMP1]]
+;
+ %1 = shufflevector <4 x i64> <i64 0, i64 2, i64 0, i64 2>, <4 x i64> %a1, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %1)
+ %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
+ ret <4 x double> %3
+}
+
+define <8 x double> @elts_test_vpermilvar_pd_512(<8 x double> %a0, <8 x i64> %a1, i64 %a2) {
+; CHECK-LABEL: @elts_test_vpermilvar_pd_512(
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i64> undef, i64 %a2, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = tail call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %a0, <8 x i64> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[TMP2]], <8 x double> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: ret <8 x double> [[TMP3]]
+;
+ %1 = insertelement <8 x i64> %a1, i64 %a2, i32 0
+ %2 = tail call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %a0, <8 x i64> %1)
+ %3 = shufflevector <8 x double> %2, <8 x double> undef, <8 x i32> zeroinitializer
+ ret <8 x double> %3
+}
+
declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>)
declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>)
declare <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double>, <8 x i64>)
diff --git a/test/Transforms/InstCombine/xor.ll b/test/Transforms/InstCombine/xor.ll
index cd137776bbfd..570155b16232 100644
--- a/test/Transforms/InstCombine/xor.ll
+++ b/test/Transforms/InstCombine/xor.ll
@@ -321,7 +321,7 @@ define i32 @test25(i32 %g, i32 %h) {
define i32 @test26(i32 %a, i32 %b) {
; CHECK-LABEL: @test26(
-; CHECK-NEXT: [[T4:%.*]] = and i32 %a, %b
+; CHECK-NEXT: [[T4:%.*]] = and i32 %b, %a
; CHECK-NEXT: ret i32 [[T4]]
;
%b2 = xor i32 %b, -1
@@ -352,3 +352,187 @@ define i32 @test28(i32 %indvar) {
%t214 = xor i32 %t7, -2147483648
ret i32 %t214
}
+
+define i32 @test29(i1 %C) {
+; CHECK-LABEL: @test29(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 915, i32 113
+; CHECK-NEXT: ret i32 [[V]]
+;
+ %A = select i1 %C, i32 1000, i32 10
+ %V = xor i32 %A, 123
+ ret i32 %V
+}
+
+define <2 x i32> @test29vec(i1 %C) {
+; CHECK-LABEL: @test29vec(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 915, i32 915>, <2 x i32> <i32 113, i32 113>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
+ %V = xor <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %V
+}
+
+define <2 x i32> @test29vec2(i1 %C) {
+; CHECK-LABEL: @test29vec2(
+; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 915, i32 2185>, <2 x i32> <i32 113, i32 339>
+; CHECK-NEXT: ret <2 x i32> [[V]]
+;
+ %A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
+ %V = xor <2 x i32> %A, <i32 123, i32 333>
+ ret <2 x i32> %V
+}
+
+define i32 @test30(i1 %which) {
+; CHECK-LABEL: @test30(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 915, [[ENTRY:%.*]] ], [ 113, [[DELAY]] ]
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi i32 [ 1000, %entry ], [ 10, %delay ]
+ %value = xor i32 %A, 123
+ ret i32 %value
+}
+
+define <2 x i32> @test30vec(i1 %which) {
+; CHECK-LABEL: @test30vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 915, i32 915>, [[ENTRY:%.*]] ], [ <i32 113, i32 113>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
+ %value = xor <2 x i32> %A, <i32 123, i32 123>
+ ret <2 x i32> %value
+}
+
+define <2 x i32> @test30vec2(i1 %which) {
+; CHECK-LABEL: @test30vec2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
+; CHECK: delay:
+; CHECK-NEXT: br label [[FINAL]]
+; CHECK: final:
+; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 915, i32 2185>, [[ENTRY:%.*]] ], [ <i32 113, i32 339>, [[DELAY]] ]
+; CHECK-NEXT: ret <2 x i32> [[A]]
+;
+entry:
+ br i1 %which, label %final, label %delay
+
+delay:
+ br label %final
+
+final:
+ %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
+ %value = xor <2 x i32> %A, <i32 123, i32 333>
+ ret <2 x i32> %value
+}
+
+define i32 @test31(i32 %A, i32 %B) {
+; CHECK-LABEL: @test31(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = or i32 %A, %B
+ %xor = xor i32 %B, %and
+ ret i32 %xor
+}
+
+define i32 @test32(i32 %A, i32 %B) {
+; CHECK-LABEL: @test32(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = or i32 %B, %A
+ %xor = xor i32 %B, %and
+ ret i32 %xor
+}
+
+define i32 @test33(i32 %A, i32 %B) {
+; CHECK-LABEL: @test33(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = or i32 %A, %B
+ %xor = xor i32 %and, %B
+ ret i32 %xor
+}
+
+define i32 @test34(i32 %A, i32 %B) {
+; CHECK-LABEL: @test34(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = or i32 %B, %A
+ %xor = xor i32 %and, %B
+ ret i32 %xor
+}
+
+define i32 @test35(i32 %A, i32 %B) {
+; CHECK-LABEL: @test35(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = and i32 %A, %B
+ %xor = xor i32 %B, %and
+ ret i32 %xor
+}
+
+define i32 @test36(i32 %A, i32 %B) {
+; CHECK-LABEL: @test36(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = and i32 %B, %A
+ %xor = xor i32 %B, %and
+ ret i32 %xor
+}
+
+define i32 @test37(i32 %A, i32 %B) {
+; CHECK-LABEL: @test37(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = and i32 %A, %B
+ %xor = xor i32 %and, %B
+ ret i32 %xor
+}
+
+define i32 @test38(i32 %A, i32 %B) {
+; CHECK-LABEL: @test38(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT: [[XOR:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = and i32 %B, %A
+ %xor = xor i32 %and, %B
+ ret i32 %xor
+}
diff --git a/test/Transforms/InstCombine/xor2.ll b/test/Transforms/InstCombine/xor2.ll
index f3591ed9c8a9..79e62723f143 100644
--- a/test/Transforms/InstCombine/xor2.ll
+++ b/test/Transforms/InstCombine/xor2.ll
@@ -110,7 +110,7 @@ define i32 @test6(i32 %x) {
define i32 @test7(i32 %a, i32 %b) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 %b, -1
-; CHECK-NEXT: [[XOR:%.*]] = or i32 %a, [[B_NOT]]
+; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], %a
; CHECK-NEXT: ret i32 [[XOR]]
;
%or = or i32 %a, %b
@@ -123,7 +123,7 @@ define i32 @test7(i32 %a, i32 %b) {
define i32 @test8(i32 %a, i32 %b) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 %b, -1
-; CHECK-NEXT: [[XOR:%.*]] = or i32 %a, [[B_NOT]]
+; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], %a
; CHECK-NEXT: ret i32 [[XOR]]
;
%neg = xor i32 %a, -1
@@ -144,6 +144,18 @@ define i32 @test9(i32 %b, i32 %c) {
ret i32 %xor2
}
+; (A & B) ^ (B ^ A) -> (A | B)
+define i32 @test9b(i32 %b, i32 %c) {
+; CHECK-LABEL: @test9b(
+; CHECK-NEXT: [[XOR2:%.*]] = or i32 [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: ret i32 [[XOR2]]
+;
+ %and = and i32 %b, %c
+ %xor = xor i32 %c, %b
+ %xor2 = xor i32 %and, %xor
+ ret i32 %xor2
+}
+
; (A ^ B) ^ (A & B) -> (A | B)
define i32 @test10(i32 %b, i32 %c) {
; CHECK-LABEL: @test10(
@@ -156,6 +168,18 @@ define i32 @test10(i32 %b, i32 %c) {
ret i32 %xor2
}
+; (A ^ B) ^ (A & B) -> (A | B)
+define i32 @test10b(i32 %b, i32 %c) {
+; CHECK-LABEL: @test10b(
+; CHECK-NEXT: [[XOR2:%.*]] = or i32 [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: ret i32 [[XOR2]]
+;
+ %xor = xor i32 %b, %c
+ %and = and i32 %c, %b
+ %xor2 = xor i32 %xor, %and
+ ret i32 %xor2
+}
+
define i32 @test11(i32 %A, i32 %B) {
; CHECK-LABEL: @test11(
; CHECK-NEXT: ret i32 0
diff --git a/test/Transforms/InstCombine/zero-point-zero-add.ll b/test/Transforms/InstCombine/zero-point-zero-add.ll
index e466e8ad7429..a23db75525e9 100644
--- a/test/Transforms/InstCombine/zero-point-zero-add.ll
+++ b/test/Transforms/InstCombine/zero-point-zero-add.ll
@@ -15,7 +15,7 @@ define double @test(double %X) {
define double @test1(double %X) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[Y:%.*]] = call double @fabs(double %X)
+; CHECK-NEXT: [[Y:%.*]] = call double @llvm.fabs.f64(double %X)
; CHECK-NEXT: ret double [[Y]]
;
%Y = call double @fabs(double %X)
diff --git a/test/Transforms/InstCombine/zext-or-icmp.ll b/test/Transforms/InstCombine/zext-or-icmp.ll
index 610e9a754f0d..afbe36da3e37 100644
--- a/test/Transforms/InstCombine/zext-or-icmp.ll
+++ b/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -19,3 +19,33 @@ define i8 @zext_or_icmp_icmp(i8 %a, i8 %b) {
; CHECK-NEXT: ret i8 %zext
}
+; Here, widening the or from i1 to i32 and removing one of the icmps would
+; widen an undef value (created by the out-of-range shift), increasing the
+; range of valid values for the return, so we can't do it.
+define i32 @dont_widen_undef() {
+entry:
+ br label %block2
+
+block1:
+ br label %block2
+
+block2:
+ %m.011 = phi i32 [ 33, %entry ], [ 0, %block1 ]
+ %cmp.i = icmp ugt i32 %m.011, 1
+ %m.1.op = lshr i32 1, %m.011
+ %sext.mask = and i32 %m.1.op, 65535
+ %cmp115 = icmp ne i32 %sext.mask, 0
+ %cmp1 = or i1 %cmp.i, %cmp115
+ %conv2 = zext i1 %cmp1 to i32
+ ret i32 %conv2
+
+; CHECK-LABEL: dont_widen_undef(
+; CHECK: %m.011 = phi i32 [ 33, %entry ], [ 0, %block1 ]
+; CHECK-NEXT: %cmp.i = icmp ugt i32 %m.011, 1
+; CHECK-NEXT: %m.1.op = lshr i32 1, %m.011
+; CHECK-NEXT: %sext.mask = and i32 %m.1.op, 65535
+; CHECK-NEXT: %cmp115 = icmp ne i32 %sext.mask, 0
+; CHECK-NEXT: %cmp1 = or i1 %cmp.i, %cmp115
+; CHECK-NEXT: %conv2 = zext i1 %cmp1 to i32
+; CHECK-NEXT: ret i32 %conv2
+}
diff --git a/test/Transforms/InstCombine/zext-phi.ll b/test/Transforms/InstCombine/zext-phi.ll
new file mode 100644
index 000000000000..5e352415c747
--- /dev/null
+++ b/test/Transforms/InstCombine/zext-phi.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n8:16:32:64"
+
+; Although i1 is not in the datalayout, we should treat it
+; as a legal type because it is a fundamental type in IR.
+; This means we should shrink the phi (sink the zexts).
+
+define i64 @sink_i1_casts(i1 %cond1, i1 %cond2) {
+; CHECK-LABEL: @sink_i1_casts(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 %cond1, label %if, label %end
+; CHECK: if:
+; CHECK-NEXT: br label %end
+; CHECK: end:
+; CHECK-NEXT: [[PHI_IN:%.*]] = phi i1 [ %cond1, %entry ], [ %cond2, %if ]
+; CHECK-NEXT: [[PHI:%.*]] = zext i1 [[PHI_IN]] to i64
+; CHECK-NEXT: ret i64 [[PHI]]
+;
+entry:
+ %z1 = zext i1 %cond1 to i64
+ br i1 %cond1, label %if, label %end
+
+if:
+ %z2 = zext i1 %cond2 to i64
+ br label %end
+
+end:
+ %phi = phi i64 [ %z1, %entry ], [ %z2, %if ]
+ ret i64 %phi
+}
+
diff --git a/test/Transforms/InstCombine/zext.ll b/test/Transforms/InstCombine/zext.ll
index 740509809d1c..887d839cb8c7 100644
--- a/test/Transforms/InstCombine/zext.ll
+++ b/test/Transforms/InstCombine/zext.ll
@@ -35,7 +35,7 @@ define <2 x i64> @test3(<2 x i64> %A) {
define <2 x i64> @test4(<2 x i64> %A) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 4294967295, i64 4294967295>
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 63, i64 63>
; CHECK-NEXT: [[XOR:%.*]] = and <2 x i64> [[TMP1]], <i64 23, i64 42>
; CHECK-NEXT: ret <2 x i64> [[XOR]]
;
diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll
index c6959d72961d..33fd978277d4 100644
--- a/test/Transforms/InstSimplify/AndOrXor.ll
+++ b/test/Transforms/InstSimplify/AndOrXor.ll
@@ -1,6 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instsimplify -S | FileCheck %s
+; add nsw (xor X, signbit), signbit --> X
+
+define <2 x i32> @add_nsw_signbit(<2 x i32> %x) {
+; CHECK-LABEL: @add_nsw_signbit(
+; CHECK-NEXT: ret <2 x i32> %x
+;
+ %y = xor <2 x i32> %x, <i32 -2147483648, i32 -2147483648>
+ %z = add nsw <2 x i32> %y, <i32 -2147483648, i32 -2147483648>
+ ret <2 x i32> %z
+}
+
+; add nuw (xor X, signbit), signbit --> X
+
+define <2 x i5> @add_nuw_signbit(<2 x i5> %x) {
+; CHECK-LABEL: @add_nuw_signbit(
+; CHECK-NEXT: ret <2 x i5> %x
+;
+ %y = xor <2 x i5> %x, <i5 -16, i5 -16>
+ %z = add nuw <2 x i5> %y, <i5 -16, i5 -16>
+ ret <2 x i5> %z
+}
+
define i64 @pow2(i32 %x) {
; CHECK-LABEL: @pow2(
; CHECK-NEXT: [[NEGX:%.*]] = sub i32 0, %x
diff --git a/test/Transforms/InstSimplify/addsub.ll b/test/Transforms/InstSimplify/addsub.ll
new file mode 100644
index 000000000000..2f19a4d205e7
--- /dev/null
+++ b/test/Transforms/InstSimplify/addsub.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define i1 @test1(i1 %a) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: ret i1 true
+;
+ %b = xor i1 %a, true
+ %res = sub i1 %a, %b
+ ret i1 %res
+}
+
+define <2 x i1> @test2(<2 x i1> %a) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
+;
+ %b = xor <2 x i1> %a, <i1 true, i1 true>
+ %res = sub <2 x i1> %a, %b
+ ret <2 x i1> %res
+}
+
+define i1 @test5(i1 %a) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: ret i1 false
+;
+ %res = add i1 %a, %a
+ ret i1 %res
+}
+
+define <2 x i1> @test6(<2 x i1> %a) {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %res = add <2 x i1> %a, %a
+ ret <2 x i1> %res
+}
+
+define i1 @test7(i1 %a) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: ret i1 [[A:%.*]]
+;
+ %c = xor i1 %a, true
+ %res = add i1 %c, true
+ ret i1 %res
+}
+
+; TODO: simplify this to %a
+define i1 @test8(i1 %a) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[C:%.*]] = add i1 [[A:%.*]], true
+; CHECK-NEXT: [[RES:%.*]] = xor i1 [[C]], true
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %c = add i1 %a, true
+ %res = xor i1 %c, true
+ ret i1 %res
+}
+
+define i1 @test9(i1 %a) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: ret i1 [[A:%.*]]
+;
+ %c = xor i1 %a, true
+ %res = sub i1 %c, true
+ ret i1 %res
+}
+
+; TODO: simplify this to %a
+define i1 @test10(i1 %a) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[C:%.*]] = sub i1 [[A:%.*]], true
+; CHECK-NEXT: [[RES:%.*]] = xor i1 [[C]], true
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %c = sub i1 %a, true
+ %res = xor i1 %c, true
+ ret i1 %res
+}
diff --git a/test/Transforms/InstSimplify/assume.ll b/test/Transforms/InstSimplify/assume.ll
index 2487a9c8bb15..66f2120f2928 100644
--- a/test/Transforms/InstSimplify/assume.ll
+++ b/test/Transforms/InstSimplify/assume.ll
@@ -1,5 +1,10 @@
; NOTE: Assertions have been autogenerated by update_test_checks.py
-; RUN: opt -instsimplify -S < %s | FileCheck %s
+; RUN: opt -instsimplify -S < %s 2>&1 -pass-remarks-analysis=.* | FileCheck %s
+
+; Verify that warnings are emitted for the 2nd and 3rd tests.
+
+; CHECK: remark: /tmp/s.c:1:13: Detected conflicting code assumptions.
+; CHECK: remark: /tmp/s.c:4:10: Detected conflicting code assumptions.
define void @test1() {
; CHECK-LABEL: @test1(
@@ -10,5 +15,58 @@ define void @test1() {
}
+; The alloca guarantees that the low bits of %a are zero because of alignment.
+; The assume says the opposite. The assume is processed last, so that's the
+; return value. There's no way to win (we can't undo transforms that happened
+; based on half-truths), so just don't crash.
+
+define i64 @PR31809() !dbg !7 {
+; CHECK-LABEL: @PR31809(
+; CHECK-NEXT: ret i64 3
+;
+ %a = alloca i32
+ %t1 = ptrtoint i32* %a to i64, !dbg !9
+ %cond = icmp eq i64 %t1, 3
+ call void @llvm.assume(i1 %cond)
+ ret i64 %t1
+}
+
+; Similar to above: there's no way to know which assumption is truthful,
+; so just don't crash. The second icmp+assume gets processed later, so that
+; determines the return value.
+
+define i8 @conflicting_assumptions(i8 %x) !dbg !10 {
+; CHECK-LABEL: @conflicting_assumptions(
+; CHECK-NEXT: call void @llvm.assume(i1 false)
+; CHECK-NEXT: [[COND2:%.*]] = icmp eq i8 %x, 4
+; CHECK-NEXT: call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT: ret i8 5
+;
+ %add = add i8 %x, 1, !dbg !11
+ %cond1 = icmp eq i8 %x, 3
+ call void @llvm.assume(i1 %cond1)
+ %cond2 = icmp eq i8 %x, 4
+ call void @llvm.assume(i1 %cond2)
+ ret i8 %add
+}
+
declare void @llvm.assume(i1) nounwind
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (trunk 282540) (llvm/trunk 282542)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "/tmp/s.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 4.0.0 (trunk 282540) (llvm/trunk 282542)"}
+!7 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !2)
+!9 = !DILocation(line: 1, column: 13, scope: !7)
+!10 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: true, unit: !0, variables: !2)
+!11 = !DILocation(line: 4, column: 10, scope: !10)
+!12 = !DILocation(line: 4, column: 3, scope: !10)
+
diff --git a/test/Transforms/InstSimplify/bitreverse.ll b/test/Transforms/InstSimplify/bitreverse.ll
new file mode 100644
index 000000000000..d87b68831fe5
--- /dev/null
+++ b/test/Transforms/InstSimplify/bitreverse.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -S -instsimplify | FileCheck %s
+
+declare i32 @llvm.bitreverse.i32(i32)
+
+; CHECK-LABEL: @test1(
+; CHECK: ret i1 false
+define i1 @test1(i32 %arg) {
+ %a = or i32 %arg, 1
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ %res = icmp eq i32 %b, 0
+ ret i1 %res
+}
+
+; CHECK-LABEL: @test2(
+; CHECK: ret i1 false
+define i1 @test2(i32 %arg) {
+ %a = or i32 %arg, 1024
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ %res = icmp eq i32 %b, 0
+ ret i1 %res
+}
+
+; CHECK-LABEL: @test3(
+; CHECK: ret i1 false
+define i1 @test3(i32 %arg) {
+ %a = and i32 %arg, 1
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ %and = and i32 %b, 1
+ %res = icmp eq i32 %and, 1
+ ret i1 %res
+}
diff --git a/test/Transforms/InstSimplify/div.ll b/test/Transforms/InstSimplify/div.ll
index b8ce34aaa37e..f096719359dc 100644
--- a/test/Transforms/InstSimplify/div.ll
+++ b/test/Transforms/InstSimplify/div.ll
@@ -1,10 +1,64 @@
; RUN: opt < %s -instsimplify -S | FileCheck %s
+; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
+
+define <2 x i8> @sdiv_zero_elt_vec_constfold(<2 x i8> %x) {
+; CHECK-LABEL: @sdiv_zero_elt_vec_constfold(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %div = sdiv <2 x i8> <i8 1, i8 2>, <i8 0, i8 -42>
+ ret <2 x i8> %div
+}
+
+define <2 x i8> @udiv_zero_elt_vec_constfold(<2 x i8> %x) {
+; CHECK-LABEL: @udiv_zero_elt_vec_constfold(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %div = udiv <2 x i8> <i8 1, i8 2>, <i8 42, i8 0>
+ ret <2 x i8> %div
+}
+
+define <2 x i8> @sdiv_zero_elt_vec(<2 x i8> %x) {
+; CHECK-LABEL: @sdiv_zero_elt_vec(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %div = sdiv <2 x i8> %x, <i8 -42, i8 0>
+ ret <2 x i8> %div
+}
+
+define <2 x i8> @udiv_zero_elt_vec(<2 x i8> %x) {
+; CHECK-LABEL: @udiv_zero_elt_vec(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %div = udiv <2 x i8> %x, <i8 0, i8 42>
+ ret <2 x i8> %div
+}
+
+; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
+; Thus, we can simplify this: if any element of 'y' is 0, we can do anything.
+; Therefore, assume that all elements of 'y' must be 1.
+
+define <2 x i1> @sdiv_bool_vec(<2 x i1> %x, <2 x i1> %y) {
+; CHECK-LABEL: @sdiv_bool_vec(
+; CHECK-NEXT: ret <2 x i1> %x
+;
+ %div = sdiv <2 x i1> %x, %y
+ ret <2 x i1> %div
+}
+
+define <2 x i1> @udiv_bool_vec(<2 x i1> %x, <2 x i1> %y) {
+; CHECK-LABEL: @udiv_bool_vec(
+; CHECK-NEXT: ret <2 x i1> %x
+;
+ %div = udiv <2 x i1> %x, %y
+ ret <2 x i1> %div
+}
+
declare i32 @external()
define i32 @div1() {
; CHECK-LABEL: @div1(
-; CHECK: [[CALL:%.*]] = call i32 @external(), !range !0
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @external(), !range !0
; CHECK-NEXT: ret i32 0
;
%call = call i32 @external(), !range !0
diff --git a/test/Transforms/InstSimplify/fdiv.ll b/test/Transforms/InstSimplify/fdiv.ll
index bb7f443f4238..6643afd81471 100644
--- a/test/Transforms/InstSimplify/fdiv.ll
+++ b/test/Transforms/InstSimplify/fdiv.ll
@@ -1,9 +1,25 @@
-; NOTE: Assertions have been autogenerated by update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instsimplify -S | FileCheck %s
+define float @fdiv_constant_fold() {
+; CHECK-LABEL: @fdiv_constant_fold(
+; CHECK-NEXT: ret float 1.500000e+00
+;
+ %f = fdiv float 3.0, 2.0
+ ret float %f
+}
+
+define float @frem_constant_fold() {
+; CHECK-LABEL: @frem_constant_fold(
+; CHECK-NEXT: ret float 1.000000e+00
+;
+ %f = frem float 3.0, 2.0
+ ret float %f
+}
+
define double @fdiv_of_undef(double %X) {
; CHECK-LABEL: @fdiv_of_undef(
-; CHECK: ret double undef
+; CHECK-NEXT: ret double undef
;
; undef / X -> undef
%r = fdiv double undef, %X
@@ -12,7 +28,7 @@ define double @fdiv_of_undef(double %X) {
define double @fdiv_by_undef(double %X) {
; CHECK-LABEL: @fdiv_by_undef(
-; CHECK: ret double undef
+; CHECK-NEXT: ret double undef
;
; X / undef -> undef
%r = fdiv double %X, undef
diff --git a/test/Transforms/InstSimplify/floating-point-arithmetic.ll b/test/Transforms/InstSimplify/floating-point-arithmetic.ll
index dfdb88dcc858..e635032e6b71 100644
--- a/test/Transforms/InstSimplify/floating-point-arithmetic.ll
+++ b/test/Transforms/InstSimplify/floating-point-arithmetic.ll
@@ -104,6 +104,7 @@ define float @PR22688(float %x) {
}
declare float @llvm.fabs.f32(float)
+declare float @llvm.sqrt.f32(float)
; CHECK-LABEL: @fabs_select_positive_constants(
; CHECK: %select = select i1 %cmp, float 1.000000e+00, float 2.000000e+00
@@ -195,3 +196,56 @@ define float @fabs_select_negnan_zero(float addrspace(1)* %out, i32 %c) {
%fabs = call float @llvm.fabs.f32(float %select)
ret float %fabs
}
+
+; CHECK-LABEL: @fabs_sqrt
+; CHECK: call float @llvm.sqrt.f32
+; CHECK: call float @llvm.fabs.f32
+define float @fabs_sqrt(float %a) {
+; The fabs can't be eliminated because llvm.sqrt.f32 may return -0 or NaN with
+; an arbitrary sign bit.
+ %sqrt = call float @llvm.sqrt.f32(float %a)
+ %fabs = call float @llvm.fabs.f32(float %sqrt)
+ ret float %fabs
+}
+
+; CHECK-LABEL: @fabs_sqrt_nnan
+; CHECK: call nnan float @llvm.sqrt.f32
+; CHECK: call float @llvm.fabs.f32
+define float @fabs_sqrt_nnan(float %a) {
+; The fabs can't be eliminated because the nnan sqrt may still return -0.
+ %sqrt = call nnan float @llvm.sqrt.f32(float %a)
+ %fabs = call float @llvm.fabs.f32(float %sqrt)
+ ret float %fabs
+}
+
+; CHECK-LABEL: @fabs_sqrt_nsz
+; CHECK: call nsz float @llvm.sqrt.f32
+; CHECK: call float @llvm.fabs.f32
+define float @fabs_sqrt_nsz(float %a) {
+; The fabs can't be eliminated because the nsz sqrt may still return NaN.
+ %sqrt = call nsz float @llvm.sqrt.f32(float %a)
+ %fabs = call float @llvm.fabs.f32(float %sqrt)
+ ret float %fabs
+}
+
+; CHECK-LABEL: @fabs_sqrt_nnan_nsz
+; CHECK: call nnan nsz float @llvm.sqrt.f32
+; CHECK-NOT: call float @llvm.fabs.f32
+define float @fabs_sqrt_nnan_nsz(float %a) {
+; The fabs can be eliminated because we're nsz and nnan.
+ %sqrt = call nnan nsz float @llvm.sqrt.f32(float %a)
+ %fabs = call float @llvm.fabs.f32(float %sqrt)
+ ret float %fabs
+}
+
+; CHECK-LABEL: @fabs_sqrt_nnan_fabs
+; CHECK: call float @llvm.fabs.f32
+; CHECK: call nnan float @llvm.sqrt.f32
+; CHECK-NOT: call float @llvm.fabs.f32
+define float @fabs_sqrt_nnan_fabs(float %a) {
+; The second fabs can be eliminated because the operand to sqrt cannot be -0.
+ %b = call float @llvm.fabs.f32(float %a)
+ %sqrt = call nnan float @llvm.sqrt.f32(float %b)
+ %fabs = call float @llvm.fabs.f32(float %sqrt)
+ ret float %fabs
+}
diff --git a/test/Transforms/InstSimplify/icmp-constant.ll b/test/Transforms/InstSimplify/icmp-constant.ll
index 85de1a45ea27..918722299b59 100644
--- a/test/Transforms/InstSimplify/icmp-constant.ll
+++ b/test/Transforms/InstSimplify/icmp-constant.ll
@@ -416,3 +416,158 @@ define <2 x i1> @tautological9_vec(<2 x i32> %x) {
ret <2 x i1> %cmp
}
+; The upper bound of the 'add' is 0.
+
+define i1 @add_nsw_neg_const1(i32 %x) {
+; CHECK-LABEL: @add_nsw_neg_const1(
+; CHECK-NEXT: ret i1 false
+;
+ %add = add nsw i32 %x, -2147483647
+ %cmp = icmp sgt i32 %add, 0
+ ret i1 %cmp
+}
+
+; InstCombine can fold this, but not InstSimplify.
+
+define i1 @add_nsw_neg_const2(i32 %x) {
+; CHECK-LABEL: @add_nsw_neg_const2(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, -2147483647
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD]], -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %x, -2147483647
+ %cmp = icmp sgt i32 %add, -1
+ ret i1 %cmp
+}
+
+; The upper bound of the 'add' is 1 (move the constants to prove we're doing range-based analysis).
+
+define i1 @add_nsw_neg_const3(i32 %x) {
+; CHECK-LABEL: @add_nsw_neg_const3(
+; CHECK-NEXT: ret i1 false
+;
+ %add = add nsw i32 %x, -2147483646
+ %cmp = icmp sgt i32 %add, 1
+ ret i1 %cmp
+}
+
+; InstCombine can fold this, but not InstSimplify.
+
+define i1 @add_nsw_neg_const4(i32 %x) {
+; CHECK-LABEL: @add_nsw_neg_const4(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, -2147483646
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %x, -2147483646
+ %cmp = icmp sgt i32 %add, 0
+ ret i1 %cmp
+}
+
+; The upper bound of the 'add' is 2147483647 - 42 = 2147483605 (move the constants again and try a different cmp predicate).
+
+define i1 @add_nsw_neg_const5(i32 %x) {
+; CHECK-LABEL: @add_nsw_neg_const5(
+; CHECK-NEXT: ret i1 true
+;
+ %add = add nsw i32 %x, -42
+ %cmp = icmp ne i32 %add, 2147483606
+ ret i1 %cmp
+}
+
+; InstCombine can fold this, but not InstSimplify.
+
+define i1 @add_nsw_neg_const6(i32 %x) {
+; CHECK-LABEL: @add_nsw_neg_const6(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, -42
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[ADD]], 2147483605
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %x, -42
+ %cmp = icmp ne i32 %add, 2147483605
+ ret i1 %cmp
+}
+
+; The lower bound of the 'add' is -1.
+
+define i1 @add_nsw_pos_const1(i32 %x) {
+; CHECK-LABEL: @add_nsw_pos_const1(
+; CHECK-NEXT: ret i1 false
+;
+ %add = add nsw i32 %x, 2147483647
+ %cmp = icmp slt i32 %add, -1
+ ret i1 %cmp
+}
+
+; InstCombine can fold this, but not InstSimplify.
+
+define i1 @add_nsw_pos_const2(i32 %x) {
+; CHECK-LABEL: @add_nsw_pos_const2(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, 2147483647
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %x, 2147483647
+ %cmp = icmp slt i32 %add, 0
+ ret i1 %cmp
+}
+
+; The lower bound of the 'add' is -2 (move the constants to prove we're doing range-based analysis).
+
+define i1 @add_nsw_pos_const3(i32 %x) {
+; CHECK-LABEL: @add_nsw_pos_const3(
+; CHECK-NEXT: ret i1 false
+;
+ %add = add nsw i32 %x, 2147483646
+ %cmp = icmp slt i32 %add, -2
+ ret i1 %cmp
+}
+
+; InstCombine can fold this, but not InstSimplify.
+
+define i1 @add_nsw_pos_const4(i32 %x) {
+; CHECK-LABEL: @add_nsw_pos_const4(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, 2147483646
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD]], -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %x, 2147483646
+ %cmp = icmp slt i32 %add, -1
+ ret i1 %cmp
+}
+
+; The lower bound of the 'add' is -2147483648 + 42 = -2147483606 (move the constants again and change the cmp predicate).
+
+define i1 @add_nsw_pos_const5(i32 %x) {
+; CHECK-LABEL: @add_nsw_pos_const5(
+; CHECK-NEXT: ret i1 false
+;
+ %add = add nsw i32 %x, 42
+ %cmp = icmp eq i32 %add, -2147483607
+ ret i1 %cmp
+}
+
+; InstCombine can fold this, but not InstSimplify.
+
+define i1 @add_nsw_pos_const6(i32 %x) {
+; CHECK-LABEL: @add_nsw_pos_const6(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, 42
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], -2147483606
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %add = add nsw i32 %x, 42
+ %cmp = icmp eq i32 %add, -2147483606
+ ret i1 %cmp
+}
+
+; Verify that vectors work too.
+
+define <2 x i1> @add_nsw_pos_const5_splat_vec(<2 x i32> %x) {
+; CHECK-LABEL: @add_nsw_pos_const5_splat_vec(
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
+;
+ %add = add nsw <2 x i32> %x, <i32 42, i32 42>
+ %cmp = icmp ne <2 x i32> %add, <i32 -2147483607, i32 -2147483607>
+ ret <2 x i1> %cmp
+}
+
diff --git a/test/Transforms/InstSimplify/mul.ll b/test/Transforms/InstSimplify/mul.ll
new file mode 100644
index 000000000000..0bf8f699a686
--- /dev/null
+++ b/test/Transforms/InstSimplify/mul.ll
@@ -0,0 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define <2 x i1> @test1(<2 x i1> %a) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %b = and <2 x i1> %a, <i1 true, i1 false>
+ %res = mul <2 x i1> %b, <i1 false, i1 true>
+ ret <2 x i1> %res
+}
diff --git a/test/Transforms/InstSimplify/rem.ll b/test/Transforms/InstSimplify/rem.ll
index c73d34346ded..b7f18f36b4b9 100644
--- a/test/Transforms/InstSimplify/rem.ll
+++ b/test/Transforms/InstSimplify/rem.ll
@@ -1,9 +1,63 @@
; NOTE: Assertions have been autogenerated by update_test_checks.py
; RUN: opt < %s -instsimplify -S | FileCheck %s
+; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
+
+define <2 x i8> @srem_zero_elt_vec_constfold(<2 x i8> %x) {
+; CHECK-LABEL: @srem_zero_elt_vec_constfold(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %rem = srem <2 x i8> <i8 1, i8 2>, <i8 0, i8 -42>
+ ret <2 x i8> %rem
+}
+
+define <2 x i8> @urem_zero_elt_vec_constfold(<2 x i8> %x) {
+; CHECK-LABEL: @urem_zero_elt_vec_constfold(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %rem = urem <2 x i8> <i8 1, i8 2>, <i8 42, i8 0>
+ ret <2 x i8> %rem
+}
+
+define <2 x i8> @srem_zero_elt_vec(<2 x i8> %x) {
+; CHECK-LABEL: @srem_zero_elt_vec(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %rem = srem <2 x i8> %x, <i8 -42, i8 0>
+ ret <2 x i8> %rem
+}
+
+define <2 x i8> @urem_zero_elt_vec(<2 x i8> %x) {
+; CHECK-LABEL: @urem_zero_elt_vec(
+; CHECK-NEXT: ret <2 x i8> undef
+;
+ %rem = urem <2 x i8> %x, <i8 0, i8 42>
+ ret <2 x i8> %rem
+}
+
+; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
+; Thus, we can simplify this: if any element of 'y' is 0, we can do anything.
+; Therefore, assume that all elements of 'y' must be 1.
+
+define <2 x i1> @srem_bool_vec(<2 x i1> %x, <2 x i1> %y) {
+; CHECK-LABEL: @srem_bool_vec(
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %rem = srem <2 x i1> %x, %y
+ ret <2 x i1> %rem
+}
+
+define <2 x i1> @urem_bool_vec(<2 x i1> %x, <2 x i1> %y) {
+; CHECK-LABEL: @urem_bool_vec(
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
+;
+ %rem = urem <2 x i1> %x, %y
+ ret <2 x i1> %rem
+}
+
define i32 @select1(i32 %x, i1 %b) {
; CHECK-LABEL: @select1(
-; CHECK: ret i32 0
+; CHECK-NEXT: ret i32 0
;
%rhs = select i1 %b, i32 %x, i32 1
%rem = srem i32 %x, %rhs
@@ -12,7 +66,7 @@ define i32 @select1(i32 %x, i1 %b) {
define i32 @select2(i32 %x, i1 %b) {
; CHECK-LABEL: @select2(
-; CHECK: ret i32 0
+; CHECK-NEXT: ret i32 0
;
%rhs = select i1 %b, i32 %x, i32 1
%rem = urem i32 %x, %rhs
@@ -21,40 +75,40 @@ define i32 @select2(i32 %x, i1 %b) {
define i32 @rem1(i32 %x, i32 %n) {
; CHECK-LABEL: @rem1(
-; CHECK: [[MOD:%.*]] = srem i32 %x, %n
+; CHECK-NEXT: [[MOD:%.*]] = srem i32 %x, %n
; CHECK-NEXT: ret i32 [[MOD]]
;
- %mod = srem i32 %x, %n
- %mod1 = srem i32 %mod, %n
- ret i32 %mod1
+ %mod = srem i32 %x, %n
+ %mod1 = srem i32 %mod, %n
+ ret i32 %mod1
}
define i32 @rem2(i32 %x, i32 %n) {
; CHECK-LABEL: @rem2(
-; CHECK: [[MOD:%.*]] = urem i32 %x, %n
+; CHECK-NEXT: [[MOD:%.*]] = urem i32 %x, %n
; CHECK-NEXT: ret i32 [[MOD]]
;
- %mod = urem i32 %x, %n
- %mod1 = urem i32 %mod, %n
- ret i32 %mod1
+ %mod = urem i32 %x, %n
+ %mod1 = urem i32 %mod, %n
+ ret i32 %mod1
}
define i32 @rem3(i32 %x, i32 %n) {
; CHECK-LABEL: @rem3(
-; CHECK: [[MOD:%.*]] = srem i32 %x, %n
+; CHECK-NEXT: [[MOD:%.*]] = srem i32 %x, %n
; CHECK-NEXT: [[MOD1:%.*]] = urem i32 [[MOD]], %n
; CHECK-NEXT: ret i32 [[MOD1]]
;
- %mod = srem i32 %x, %n
- %mod1 = urem i32 %mod, %n
- ret i32 %mod1
+ %mod = srem i32 %x, %n
+ %mod1 = urem i32 %mod, %n
+ ret i32 %mod1
}
declare i32 @external()
define i32 @rem4() {
; CHECK-LABEL: @rem4(
-; CHECK: [[CALL:%.*]] = call i32 @external(), !range !0
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @external(), !range !0
; CHECK-NEXT: ret i32 [[CALL]]
;
%call = call i32 @external(), !range !0
diff --git a/test/Transforms/InstSimplify/select.ll b/test/Transforms/InstSimplify/select.ll
index 1acb5c469d37..cb2502cf63c9 100644
--- a/test/Transforms/InstSimplify/select.ll
+++ b/test/Transforms/InstSimplify/select.ll
@@ -402,7 +402,8 @@ define i32* @select_icmp_pointers(i32* %x, i32* %y) {
ret i32* %sel
}
-; FIXME: If the condition is known, we don't need to select.
+; If the condition is known, we don't need to select, but we're not
+; doing this fold here to avoid compile-time cost.
declare void @llvm.assume(i1)
diff --git a/test/Transforms/InstSimplify/shift-knownbits.ll b/test/Transforms/InstSimplify/shift-knownbits.ll
index f50ea0582c6c..63b9b76fd22f 100644
--- a/test/Transforms/InstSimplify/shift-knownbits.ll
+++ b/test/Transforms/InstSimplify/shift-knownbits.ll
@@ -145,3 +145,46 @@ define i1 @shl_i1(i1 %a, i1 %b) {
ret i1 %shl
}
+; Simplify count leading/trailing zeros to zero if all valid bits are shifted out.
+
+declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
+declare <2 x i8> @llvm.cttz.v2i8(<2 x i8>, i1) nounwind readnone
+declare <2 x i8> @llvm.ctlz.v2i8(<2 x i8>, i1) nounwind readnone
+
+define i32 @lshr_ctlz_zero_is_undef(i32 %x) {
+; CHECK-LABEL: @lshr_ctlz_zero_is_undef(
+; CHECK-NEXT: ret i32 0
+;
+ %ct = call i32 @llvm.ctlz.i32(i32 %x, i1 true)
+ %sh = lshr i32 %ct, 5
+ ret i32 %sh
+}
+
+define i32 @lshr_cttz_zero_is_undef(i32 %x) {
+; CHECK-LABEL: @lshr_cttz_zero_is_undef(
+; CHECK-NEXT: ret i32 0
+;
+ %ct = call i32 @llvm.cttz.i32(i32 %x, i1 true)
+ %sh = lshr i32 %ct, 5
+ ret i32 %sh
+}
+
+define <2 x i8> @lshr_ctlz_zero_is_undef_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_ctlz_zero_is_undef_splat_vec(
+; CHECK-NEXT: ret <2 x i8> zeroinitializer
+;
+ %ct = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %x, i1 true)
+ %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
+ ret <2 x i8> %sh
+}
+
+define <2 x i8> @lshr_cttz_zero_is_undef_splat_vec(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_cttz_zero_is_undef_splat_vec(
+; CHECK-NEXT: ret <2 x i8> zeroinitializer
+;
+ %ct = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 true)
+ %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
+ ret <2 x i8> %sh
+}
+
diff --git a/test/Transforms/InstSimplify/shufflevector.ll b/test/Transforms/InstSimplify/shufflevector.ll
new file mode 100644
index 000000000000..c6d180da293f
--- /dev/null
+++ b/test/Transforms/InstSimplify/shufflevector.ll
@@ -0,0 +1,212 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define <4 x i32> @const_folding(<4 x i32> %x) {
+; CHECK-LABEL: @const_folding(
+; CHECK-NEXT: ret <4 x i32> zeroinitializer
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 4, i32 5, i32 4>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @const_folding1(<4 x i32> %x) {
+; CHECK-LABEL: @const_folding1(
+; CHECK-NEXT: ret <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+;
+ %shuf = shufflevector <4 x i32> <i32 5, i32 4, i32 5, i32 4>, <4 x i32> %x, <4 x i32> zeroinitializer
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @const_folding_negative(<3 x i32> %x) {
+; CHECK-LABEL: @const_folding_negative(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <3 x i32> [[X:%.*]], <3 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 5, i32 4>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %shuf = shufflevector <3 x i32> %x, <3 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 5, i32 4>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand(<4 x i32> %x) {
+; CHECK-LABEL: @splat_operand(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: ret <4 x i32> [[SPLAT]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> %splat, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @splat_operand1(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]], <4 x i32> zeroinitializer
+; CHECK-NEXT: ret <4 x i32> [[SPLAT]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> %splat, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @splat_operand2(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: ret <4 x i32> [[SPLAT]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> %splat, <4 x i32> %y, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand3(<4 x i32> %x) {
+; CHECK-LABEL: @splat_operand3(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: ret <4 x i32> [[SPLAT]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> zeroinitializer, <4 x i32> %splat, <4 x i32> <i32 7, i32 6, i32 5, i32 5>
+ ret <4 x i32> %shuf
+}
+
+define <8 x i32> @splat_operand_negative(<4 x i32> %x) {
+; CHECK-LABEL: @splat_operand_negative(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[SPLAT]], <4 x i32> undef, <8 x i32> <i32 0, i32 3, i32 2, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: ret <8 x i32> [[SHUF]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> %splat, <4 x i32> undef, <8 x i32> <i32 0, i32 3, i32 2, i32 1, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand_negative2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @splat_operand_negative2(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[SPLAT]], <4 x i32> [[Y:%.*]], <4 x i32> <i32 0, i32 3, i32 4, i32 1>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> %splat, <4 x i32> %y, <4 x i32> <i32 0, i32 3, i32 4, i32 1>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand_negative3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @splat_operand_negative3(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[Y:%.*]], <4 x i32> [[SPLAT]], <4 x i32> <i32 0, i32 3, i32 4, i32 1>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shuf = shufflevector <4 x i32> %y, <4 x i32> %splat, <4 x i32> <i32 0, i32 3, i32 4, i32 1>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @splat_operand_negative4(<4 x i32> %x) {
+; CHECK-LABEL: @splat_operand_negative4(
+; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[SPLAT]], <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %splat = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef>
+ %shuf = shufflevector <4 x i32> %splat, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @undef_mask(<4 x i32> %x) {
+; CHECK-LABEL: @undef_mask(
+; CHECK-NEXT: ret <4 x i32> undef
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> undef
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @identity_mask_0(<4 x i32> %x) {
+; CHECK-LABEL: @identity_mask_0(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @identity_mask_1(<4 x i32> %x) {
+; CHECK-LABEL: @identity_mask_1(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> undef, <4 x i32> [[X:%.*]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %shuf = shufflevector <4 x i32> undef, <4 x i32> %x, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @pseudo_identity_mask(<4 x i32> %x) {
+; CHECK-LABEL: @pseudo_identity_mask(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> [[X]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+; CHECK-NEXT: ret <4 x i32> [[SHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @const_operand(<4 x i32> %x) {
+; CHECK-LABEL: @const_operand(
+; CHECK-NEXT: ret <4 x i32> <i32 42, i32 45, i32 44, i32 43>
+;
+ %shuf = shufflevector <4 x i32> <i32 42, i32 43, i32 44, i32 45>, <4 x i32> %x, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
+ ret <4 x i32> %shuf
+}
+
+define <4 x i32> @merge(<4 x i32> %x) {
+; CHECK-LABEL: @merge(
+; CHECK-NEXT: [[LOWER:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[UPPER:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[MERGED:%.*]] = shufflevector <2 x i32> [[UPPER]], <2 x i32> [[LOWER]], <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+; CHECK-NEXT: ret <4 x i32> [[MERGED]]
+;
+ %lower = shufflevector <4 x i32> %x, <4 x i32> undef, <2 x i32> <i32 1, i32 0>
+ %upper = shufflevector <4 x i32> %x, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %merged = shufflevector <2 x i32> %upper, <2 x i32> %lower, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x i32> %merged
+}
+
+define <8 x double> @extract_and_concat(<8 x double> %x) {
+; CHECK-LABEL: @extract_and_concat(
+; CHECK-NEXT: [[S1:%.*]] = shufflevector <8 x double> [[X:%.*]], <8 x double> undef, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[S2:%.*]] = shufflevector <8 x double> [[X]], <8 x double> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[S3:%.*]] = shufflevector <8 x double> [[X]], <8 x double> undef, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT: [[S4:%.*]] = shufflevector <8 x double> [[X]], <8 x double> undef, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT: [[S5:%.*]] = shufflevector <2 x double> [[S1]], <2 x double> [[S2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[S6:%.*]] = shufflevector <2 x double> [[S3]], <2 x double> [[S4]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[S7:%.*]] = shufflevector <4 x double> [[S5]], <4 x double> [[S6]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: ret <8 x double> [[S7]]
+;
+ %s1 = shufflevector <8 x double> %x, <8 x double> undef, <2 x i32> <i32 0, i32 1>
+ %s2 = shufflevector <8 x double> %x, <8 x double> undef, <2 x i32> <i32 2, i32 3>
+ %s3 = shufflevector <8 x double> %x, <8 x double> undef, <2 x i32> <i32 4, i32 5>
+ %s4 = shufflevector <8 x double> %x, <8 x double> undef, <2 x i32> <i32 6, i32 7>
+ %s5 = shufflevector <2 x double> %s1, <2 x double> %s2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %s6 = shufflevector <2 x double> %s3, <2 x double> %s4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %s7 = shufflevector <4 x double> %s5, <4 x double> %s6, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %s7
+}
+
+; This case has intermediate lane crossings.
+
+define <8 x i64> @PR30630(<8 x i64> %x) {
+; CHECK-LABEL: @PR30630(
+; CHECK-NEXT: [[S1:%.*]] = shufflevector <8 x i64> [[X:%.*]], <8 x i64> undef, <2 x i32> <i32 0, i32 4>
+; CHECK-NEXT: [[S2:%.*]] = shufflevector <8 x i64> [[X]], <8 x i64> undef, <2 x i32> <i32 1, i32 5>
+; CHECK-NEXT: [[S3:%.*]] = shufflevector <8 x i64> [[X]], <8 x i64> undef, <2 x i32> <i32 2, i32 6>
+; CHECK-NEXT: [[S4:%.*]] = shufflevector <8 x i64> [[X]], <8 x i64> undef, <2 x i32> <i32 3, i32 7>
+; CHECK-NEXT: [[S5:%.*]] = shufflevector <2 x i64> [[S1]], <2 x i64> [[S2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[S6:%.*]] = shufflevector <2 x i64> [[S3]], <2 x i64> [[S4]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[S7:%.*]] = shufflevector <4 x i64> [[S5]], <4 x i64> [[S6]], <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: ret <8 x i64> [[S7]]
+;
+ %s1 = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 0, i32 4>
+ %s2 = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 1, i32 5>
+ %s3 = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 2, i32 6>
+ %s4 = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 3, i32 7>
+ %s5 = shufflevector <2 x i64> %s1, <2 x i64> %s2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %s6 = shufflevector <2 x i64> %s3, <2 x i64> %s4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %s7 = shufflevector <4 x i64> %s5, <4 x i64> %s6, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+ ret <8 x i64> %s7
+}
+
diff --git a/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll b/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll
index 2a257d490815..a038fd1a411b 100644
--- a/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll
+++ b/test/Transforms/InterleavedAccess/AArch64/interleaved-accesses.ll
@@ -565,3 +565,212 @@ define void @no_interleave(<4 x float> %a0) {
store <4 x float> %v0, <4 x float>* @g, align 16
ret void
}
+
+define void @load_factor2_wide2(<16 x i32>* %ptr) {
+; NEON-LABEL: @load_factor2_wide2(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: [[LDN:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32(<4 x i32>* [[TMP2]])
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN]], 1
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN]], 0
+; NEON-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
+; NEON-NEXT: [[LDN1:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32(<4 x i32>* [[TMP6]])
+; NEON-NEXT: [[TMP7:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN1]], 1
+; NEON-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN1]], 0
+; NEON-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor2_wide2(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <16 x i32>, <16 x i32>* %ptr, align 4
+ %v0 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %v1 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret void
+}
+
+define void @load_factor2_wide3(<24 x i32>* %ptr) {
+; NEON-LABEL: @load_factor2_wide3(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <24 x i32>* [[PTR:%.*]] to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: [[LDN:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32(<4 x i32>* [[TMP2]])
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN]], 1
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN]], 0
+; NEON-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
+; NEON-NEXT: [[LDN1:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32(<4 x i32>* [[TMP6]])
+; NEON-NEXT: [[TMP7:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN1]], 1
+; NEON-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN1]], 0
+; NEON-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP5]], i32 8
+; NEON-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
+; NEON-NEXT: [[LDN2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0v4i32(<4 x i32>* [[TMP10]])
+; NEON-NEXT: [[TMP11:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN2]], 1
+; NEON-NEXT: [[TMP12:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[LDN2]], 0
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; NEON-NEXT: [[TMP15:%.*]] = shufflevector <8 x i32> [[TMP13]], <8 x i32> [[TMP14]], <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP17:%.*]] = shufflevector <4 x i32> [[TMP12]], <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; NEON-NEXT: [[TMP18:%.*]] = shufflevector <8 x i32> [[TMP16]], <8 x i32> [[TMP17]], <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor2_wide3(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4
+ %v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <12 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22>
+ %v1 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <12 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23>
+ ret void
+}
+
+define void @load_factor3_wide(<24 x i32>* %ptr) {
+; NEON-LABEL: @load_factor3_wide(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <24 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: [[LDN:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0v4i32(<4 x i32>* [[TMP2]])
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 2
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 1
+; NEON-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 0
+; NEON-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[TMP1]], i32 12
+; NEON-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <4 x i32>*
+; NEON-NEXT: [[LDN1:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0v4i32(<4 x i32>* [[TMP7]])
+; NEON-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 2
+; NEON-NEXT: [[TMP9:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 1
+; NEON-NEXT: [[TMP10:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 0
+; NEON-NEXT: [[TMP11:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP12:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor3_wide(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4
+ %v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
+ %v1 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
+ %v2 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
+ ret void
+}
+
+define void @load_factor4_wide(<32 x i32>* %ptr) {
+; NEON-LABEL: @load_factor4_wide(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <32 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: [[LDN:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0v4i32(<4 x i32>* [[TMP2]])
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 3
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 2
+; NEON-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 1
+; NEON-NEXT: [[TMP6:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN]], 0
+; NEON-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP1]], i32 16
+; NEON-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>*
+; NEON-NEXT: [[LDN1:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0v4i32(<4 x i32>* [[TMP8]])
+; NEON-NEXT: [[TMP9:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 3
+; NEON-NEXT: [[TMP10:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 2
+; NEON-NEXT: [[TMP11:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 1
+; NEON-NEXT: [[TMP12:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[LDN1]], 0
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP15:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor4_wide(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <32 x i32>, <32 x i32>* %ptr, align 4
+ %v0 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
+ %v1 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
+ %v2 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
+ %v3 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
+ ret void
+}
+
+define void @store_factor2_wide(<16 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1) {
+; NEON-LABEL: @store_factor2_wide(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: call void @llvm.aarch64.neon.st2.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]])
+; NEON-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; NEON-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>*
+; NEON-NEXT: call void @llvm.aarch64.neon.st2.v4i32.p0v4i32(<4 x i32> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32>* [[TMP8]])
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @store_factor2_wide(
+; NO_NEON: ret void
+;
+ %interleaved.vec = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ store <16 x i32> %interleaved.vec, <16 x i32>* %ptr, align 4
+ ret void
+}
+
+define void @store_factor3_wide(<24 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2) {
+; NEON-LABEL: @store_factor3_wide(
+; NEON: [[TMP1:%.*]] = bitcast <24 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 16, i32 17, i32 18, i32 19>
+; NEON-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: call void @llvm.aarch64.neon.st3.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32>* [[TMP5]])
+; NEON-NEXT: [[TMP6:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP7:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; NEON-NEXT: [[TMP8:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 20, i32 21, i32 22, i32 23>
+; NEON-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP1]], i32 12
+; NEON-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
+; NEON-NEXT: call void @llvm.aarch64.neon.st3.v4i32.p0v4i32(<4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> [[TMP8]], <4 x i32>* [[TMP10]])
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @store_factor3_wide(
+; NO_NEON: ret void
+;
+ %s0 = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %s1 = shufflevector <8 x i32> %v2, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <16 x i32> %s0, <16 x i32> %s1, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
+ store <24 x i32> %interleaved.vec, <24 x i32>* %ptr, align 4
+ ret void
+}
+
+define void @store_factor4_wide(<32 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2, <8 x i32> %v3) {
+; NEON-LABEL: @store_factor4_wide(
+; NEON: [[TMP1:%.*]] = bitcast <32 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 16, i32 17, i32 18, i32 19>
+; NEON-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 24, i32 25, i32 26, i32 27>
+; NEON-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
+; NEON-NEXT: call void @llvm.aarch64.neon.st4.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]])
+; NEON-NEXT: [[TMP7:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP8:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; NEON-NEXT: [[TMP9:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 20, i32 21, i32 22, i32 23>
+; NEON-NEXT: [[TMP10:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 28, i32 29, i32 30, i32 31>
+; NEON-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[TMP1]], i32 16
+; NEON-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP11]] to <4 x i32>*
+; NEON-NEXT: call void @llvm.aarch64.neon.st4.v4i32.p0v4i32(<4 x i32> [[TMP7]], <4 x i32> [[TMP8]], <4 x i32> [[TMP9]], <4 x i32> [[TMP10]], <4 x i32>* [[TMP12]])
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @store_factor4_wide(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret void
+;
+ %s0 = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %s1 = shufflevector <8 x i32> %v2, <8 x i32> %v3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %interleaved.vec = shufflevector <16 x i32> %s0, <16 x i32> %s1, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
+ store <32 x i32> %interleaved.vec, <32 x i32>* %ptr, align 4
+ ret void
+}
+
+define void @load_factor2_fp128(<4 x fp128>* %ptr) {
+; NEON-LABEL: @load_factor2_fp128(
+; NEON-NOT: @llvm.aarch64.neon
+; NEON: ret void
+; NO_NEON-LABEL: @load_factor2_fp128(
+; NO_NEON-NOT: @llvm.aarch64.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <4 x fp128>, <4 x fp128>* %ptr, align 16
+ %v0 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> undef, <2 x i32> <i32 0, i32 2>
+ %v1 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> undef, <2 x i32> <i32 1, i32 3>
+ ret void
+}
diff --git a/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll b/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll
index 21eb8d7a1b0a..5938f9d7321d 100644
--- a/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll
+++ b/test/Transforms/InterleavedAccess/ARM/interleaved-accesses.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -mattr=+neon -interleaved-access -S | FileCheck %s -check-prefix=NEON
-; RUN: opt < %s -interleaved-access -S | FileCheck %s -check-prefix=NO_NEON
+; RUN: opt < %s -mattr=+neon -interleaved-access -S | FileCheck %s -check-prefixes=NEON,ALL
+; RUN: opt < %s -interleaved-access -S | FileCheck %s -check-prefixes=NO_NEON,ALL
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
target triple = "arm---eabi"
@@ -387,13 +387,31 @@ define void @store_address_space(<4 x i32> addrspace(1)* %ptr, <2 x i32> %v0, <2
ret void
}
+define void @load_f16_factor2(<8 x half>* %ptr) {
+; ALL-LABEL: @load_f16_factor2(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
+;
+ %interleaved.vec = load <8 x half>, <8 x half>* %ptr, align 4
+ %v0 = shufflevector <8 x half> %interleaved.vec, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %v1 = shufflevector <8 x half> %interleaved.vec, <8 x half> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret void
+}
+
+define void @store_f16_factor2(<8 x half>* %ptr, <4 x half> %v0, <4 x half> %v1) {
+; ALL-LABEL: @store_f16_factor2(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
+;
+ %interleaved.vec = shufflevector <4 x half> %v0, <4 x half> %v1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x half> %interleaved.vec, <8 x half>* %ptr, align 4
+ ret void
+}
+
define void @load_illegal_factor2(<3 x float>* %ptr) nounwind {
-; NEON-LABEL: @load_illegal_factor2(
-; NEON-NOT: @llvm.arm.neon
-; NEON: ret void
-; NO_NEON-LABEL: @load_illegal_factor2(
-; NO_NEON-NOT: @llvm.arm.neon
-; NO_NEON: ret void
+; ALL-LABEL: @load_illegal_factor2(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
;
%interleaved.vec = load <3 x float>, <3 x float>* %ptr, align 16
%v0 = shufflevector <3 x float> %interleaved.vec, <3 x float> undef, <3 x i32> <i32 0, i32 2, i32 undef>
@@ -401,12 +419,9 @@ define void @load_illegal_factor2(<3 x float>* %ptr) nounwind {
}
define void @store_illegal_factor2(<3 x float>* %ptr, <3 x float> %v0) nounwind {
-; NEON-LABEL: @store_illegal_factor2(
-; NEON-NOT: @llvm.arm.neon
-; NEON: ret void
-; NO_NEON-LABEL: @store_illegal_factor2(
-; NO_NEON-NOT: @llvm.arm.neon
-; NO_NEON: ret void
+; ALL-LABEL: @store_illegal_factor2(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
;
%interleaved.vec = shufflevector <3 x float> %v0, <3 x float> undef, <3 x i32> <i32 0, i32 2, i32 undef>
store <3 x float> %interleaved.vec, <3 x float>* %ptr, align 16
@@ -538,12 +553,9 @@ define void @store_general_mask_factor3_undefmultimid(<12 x i32>* %ptr, <32 x i3
}
define void @store_general_mask_factor3_undef_fail(<12 x i32>* %ptr, <32 x i32> %v0, <32 x i32> %v1) {
-; NEON-LABEL: @store_general_mask_factor3_undef_fail(
-; NEON-NOT: @llvm.arm.neon
-; NEON: ret void
-; NO_NEON-LABEL: @store_general_mask_factor3_undef_fail(
-; NO_NEON-NOT: @llvm.arm.neon
-; NO_NEON: ret void
+; ALL-LABEL: @store_general_mask_factor3_undef_fail(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
;
%interleaved.vec = shufflevector <32 x i32> %v0, <32 x i32> %v1, <12 x i32> <i32 4, i32 32, i32 16, i32 undef, i32 33, i32 17, i32 undef, i32 34, i32 18, i32 8, i32 35, i32 19>
store <12 x i32> %interleaved.vec, <12 x i32>* %ptr, align 4
@@ -568,12 +580,9 @@ define void @store_general_mask_factor3_undeflane(<12 x i32>* %ptr, <32 x i32> %
}
define void @store_general_mask_factor3_endstart_fail(<12 x i32>* %ptr, <32 x i32> %v0, <32 x i32> %v1) {
-; NEON-LABEL: @store_general_mask_factor3_endstart_fail(
-; NEON-NOT: @llvm.arm.neon
-; NEON: ret void
-; NO_NEON-LABEL: @store_general_mask_factor3_endstart_fail(
-; NO_NEON-NOT: @llvm.arm.neon
-; NO_NEON: ret void
+; ALL-LABEL: @store_general_mask_factor3_endstart_fail(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
;
%interleaved.vec = shufflevector <32 x i32> %v0, <32 x i32> %v1, <12 x i32> <i32 undef, i32 32, i32 16, i32 undef, i32 33, i32 17, i32 undef, i32 34, i32 18, i32 2, i32 35, i32 19>
store <12 x i32> %interleaved.vec, <12 x i32>* %ptr, align 4
@@ -598,12 +607,9 @@ define void @store_general_mask_factor3_endstart_pass(<12 x i32>* %ptr, <32 x i3
}
define void @store_general_mask_factor3_midstart_fail(<12 x i32>* %ptr, <32 x i32> %v0, <32 x i32> %v1) {
-; NEON-LABEL: @store_general_mask_factor3_midstart_fail(
-; NEON-NOT: @llvm.arm.neon
-; NEON: ret void
-; NO_NEON-LABEL: @store_general_mask_factor3_midstart_fail(
-; NO_NEON-NOT: @llvm.arm.neon
-; NO_NEON: ret void
+; ALL-LABEL: @store_general_mask_factor3_midstart_fail(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
;
%interleaved.vec = shufflevector <32 x i32> %v0, <32 x i32> %v1, <12 x i32> <i32 undef, i32 32, i32 16, i32 0, i32 33, i32 17, i32 undef, i32 34, i32 18, i32 undef, i32 35, i32 19>
store <12 x i32> %interleaved.vec, <12 x i32>* %ptr, align 4
@@ -630,17 +636,221 @@ define void @store_general_mask_factor3_midstart_pass(<12 x i32>* %ptr, <32 x i3
@g = external global <4 x float>
; The following does not give a valid interleaved store
-; NEON-LABEL: define void @no_interleave
-; NEON-NOT: call void @llvm.arm.neon.vst2
-; NEON: shufflevector
-; NEON: store
-; NEON: ret void
-; NO_NEON-LABEL: define void @no_interleave
-; NO_NEON: shufflevector
-; NO_NEON: store
-; NO_NEON: ret void
+; ALL-LABEL: define void @no_interleave
+; ALL-NOT: call void @llvm.arm.neon.vst2
+; ALL: shufflevector
+; ALL: store
+; ALL: ret void
define void @no_interleave(<4 x float> %a0) {
%v0 = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 0, i32 7, i32 1, i32 undef>
store <4 x float> %v0, <4 x float>* @g, align 16
ret void
}
+
+define void @load_factor2_wide2(<16 x i32>* %ptr) {
+; NEON-LABEL: @load_factor2_wide2(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[VLDN:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP2]], i32 4)
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN]], 1
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN]], 0
+; NEON-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to i8*
+; NEON-NEXT: [[VLDN1:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP6]], i32 4)
+; NEON-NEXT: [[TMP7:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN1]], 1
+; NEON-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN1]], 0
+; NEON-NEXT: [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor2_wide2(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <16 x i32>, <16 x i32>* %ptr, align 4
+ %v0 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %v1 = shufflevector <16 x i32> %interleaved.vec, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret void
+}
+
+define void @load_factor2_wide3(<24 x i32>* %ptr) {
+; NEON-LABEL: @load_factor2_wide3(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <24 x i32>* [[PTR:%.*]] to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[VLDN:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP2]], i32 4)
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN]], 1
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN]], 0
+; NEON-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to i8*
+; NEON-NEXT: [[VLDN1:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP6]], i32 4)
+; NEON-NEXT: [[TMP7:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN1]], 1
+; NEON-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN1]], 0
+; NEON-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP5]], i32 8
+; NEON-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to i8*
+; NEON-NEXT: [[VLDN2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32.p0i8(i8* [[TMP10]], i32 4)
+; NEON-NEXT: [[TMP11:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN2]], 1
+; NEON-NEXT: [[TMP12:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[VLDN2]], 0
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP7]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; NEON-NEXT: [[TMP15:%.*]] = shufflevector <8 x i32> [[TMP13]], <8 x i32> [[TMP14]], <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP17:%.*]] = shufflevector <4 x i32> [[TMP12]], <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; NEON-NEXT: [[TMP18:%.*]] = shufflevector <8 x i32> [[TMP16]], <8 x i32> [[TMP17]], <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor2_wide3(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4
+ %v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <12 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22>
+ %v1 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <12 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23>
+ ret void
+}
+
+define void @load_factor3_wide(<24 x i32>* %ptr) {
+; NEON-LABEL: @load_factor3_wide(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <24 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[VLDN:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32.p0i8(i8* [[TMP2]], i32 4)
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 2
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 1
+; NEON-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 0
+; NEON-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[TMP1]], i32 12
+; NEON-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to i8*
+; NEON-NEXT: [[VLDN1:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32.p0i8(i8* [[TMP7]], i32 4)
+; NEON-NEXT: [[TMP8:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 2
+; NEON-NEXT: [[TMP9:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 1
+; NEON-NEXT: [[TMP10:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 0
+; NEON-NEXT: [[TMP11:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP12:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor3_wide(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4
+ %v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
+ %v1 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
+ %v2 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
+ ret void
+}
+
+define void @load_factor4_wide(<32 x i32>* %ptr) {
+; NEON-LABEL: @load_factor4_wide(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <32 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[VLDN:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32.p0i8(i8* [[TMP2]], i32 4)
+; NEON-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 3
+; NEON-NEXT: [[TMP4:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 2
+; NEON-NEXT: [[TMP5:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 1
+; NEON-NEXT: [[TMP6:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN]], 0
+; NEON-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP1]], i32 16
+; NEON-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
+; NEON-NEXT: [[VLDN1:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32.p0i8(i8* [[TMP8]], i32 4)
+; NEON-NEXT: [[TMP9:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 3
+; NEON-NEXT: [[TMP10:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 2
+; NEON-NEXT: [[TMP11:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 1
+; NEON-NEXT: [[TMP12:%.*]] = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLDN1]], 0
+; NEON-NEXT: [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP15:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @load_factor4_wide(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = load <32 x i32>, <32 x i32>* %ptr, align 4
+ %v0 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
+ %v1 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
+ %v2 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
+ %v3 = shufflevector <32 x i32> %interleaved.vec, <32 x i32> undef, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
+ ret void
+}
+
+define void @store_factor2_wide(<16 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1) {
+; NEON-LABEL: @store_factor2_wide(
+; NEON-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: call void @llvm.arm.neon.vst2.p0i8.v4i32(i8* [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], i32 4)
+; NEON-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP1]], i32 8
+; NEON-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to i8*
+; NEON-NEXT: [[TMP7:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP8:%.*]] = shufflevector <8 x i32> %v0, <8 x i32> %v1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; NEON-NEXT: call void @llvm.arm.neon.vst2.p0i8.v4i32(i8* [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> [[TMP8]], i32 4)
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @store_factor2_wide(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %interleaved.vec = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ store <16 x i32> %interleaved.vec, <16 x i32>* %ptr, align 4
+ ret void
+}
+
+define void @store_factor3_wide(<24 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2) {
+; NEON-LABEL: @store_factor3_wide(
+; NEON: [[TMP1:%.*]] = bitcast <24 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 16, i32 17, i32 18, i32 19>
+; NEON-NEXT: call void @llvm.arm.neon.vst3.p0i8.v4i32(i8* [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], i32 4)
+; NEON-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[TMP1]], i32 12
+; NEON-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to i8*
+; NEON-NEXT: [[TMP8:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP9:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; NEON-NEXT: [[TMP10:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 20, i32 21, i32 22, i32 23>
+; NEON-NEXT: call void @llvm.arm.neon.vst3.p0i8.v4i32(i8* [[TMP7]], <4 x i32> [[TMP8]], <4 x i32> [[TMP9]], <4 x i32> [[TMP10]], i32 4)
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @store_factor3_wide(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %s0 = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %s1 = shufflevector <8 x i32> %v2, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <16 x i32> %s0, <16 x i32> %s1, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
+ store <24 x i32> %interleaved.vec, <24 x i32>* %ptr, align 4
+ ret void
+}
+
+define void @store_factor4_wide(<32 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2, <8 x i32> %v3) {
+; NEON-LABEL: @store_factor4_wide(
+; NEON: [[TMP1:%.*]] = bitcast <32 x i32>* %ptr to i32*
+; NEON-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to i8*
+; NEON-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NEON-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; NEON-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 16, i32 17, i32 18, i32 19>
+; NEON-NEXT: [[TMP6:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 24, i32 25, i32 26, i32 27>
+; NEON-NEXT: call void @llvm.arm.neon.vst4.p0i8.v4i32(i8* [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> [[TMP6]], i32 4)
+; NEON-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[TMP1]], i32 16
+; NEON-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
+; NEON-NEXT: [[TMP9:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NEON-NEXT: [[TMP10:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; NEON-NEXT: [[TMP11:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 20, i32 21, i32 22, i32 23>
+; NEON-NEXT: [[TMP12:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <4 x i32> <i32 28, i32 29, i32 30, i32 31>
+; NEON-NEXT: call void @llvm.arm.neon.vst4.p0i8.v4i32(i8* [[TMP8]], <4 x i32> [[TMP9]], <4 x i32> [[TMP10]], <4 x i32> [[TMP11]], <4 x i32> [[TMP12]], i32 4)
+; NEON-NEXT: ret void
+; NO_NEON-LABEL: @store_factor4_wide(
+; NO_NEON-NOT: @llvm.arm.neon
+; NO_NEON: ret void
+;
+ %s0 = shufflevector <8 x i32> %v0, <8 x i32> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %s1 = shufflevector <8 x i32> %v2, <8 x i32> %v3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %interleaved.vec = shufflevector <16 x i32> %s0, <16 x i32> %s1, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
+ store <32 x i32> %interleaved.vec, <32 x i32>* %ptr, align 4
+ ret void
+}
+
+define void @load_factor2_fp128(<4 x fp128>* %ptr) {
+; ALL-LABEL: @load_factor2_fp128(
+; ALL-NOT: @llvm.arm.neon
+; ALL: ret void
+;
+ %interleaved.vec = load <4 x fp128>, <4 x fp128>* %ptr, align 16
+ %v0 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> undef, <2 x i32> <i32 0, i32 2>
+ %v1 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> undef, <2 x i32> <i32 1, i32 3>
+ ret void
+}
diff --git a/test/Transforms/JumpThreading/guards.ll b/test/Transforms/JumpThreading/guards.ll
new file mode 100644
index 000000000000..eac2b5dcd85f
--- /dev/null
+++ b/test/Transforms/JumpThreading/guards.ll
@@ -0,0 +1,183 @@
+; RUN: opt < %s -jump-threading -dce -S | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+declare i32 @f1()
+declare i32 @f2()
+
+define i32 @branch_implies_guard(i32 %a) {
+; CHECK-LABEL: @branch_implies_guard(
+ %cond = icmp slt i32 %a, 10
+ br i1 %cond, label %T1, label %F1
+
+T1:
+; CHECK: T1.split
+; CHECK: %v1 = call i32 @f1()
+; CHECK-NEXT: %retVal
+; CHECK-NEXT: br label %Merge
+ %v1 = call i32 @f1()
+ br label %Merge
+
+F1:
+; CHECK: F1.split
+; CHECK: %v2 = call i32 @f2()
+; CHECK-NEXT: %retVal
+; CHECK-NEXT: %condGuard
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %condGuard
+; CHECK-NEXT: br label %Merge
+ %v2 = call i32 @f2()
+ br label %Merge
+
+Merge:
+; CHECK: Merge
+; CHECK-NOT: call void(i1, ...) @llvm.experimental.guard(
+ %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
+ %retVal = add i32 %retPhi, 10
+ %condGuard = icmp slt i32 %a, 20
+ call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
+ ret i32 %retVal
+}
+
+define i32 @not_branch_implies_guard(i32 %a) {
+; CHECK-LABEL: @not_branch_implies_guard(
+ %cond = icmp slt i32 %a, 20
+ br i1 %cond, label %T1, label %F1
+
+T1:
+; CHECK: T1.split:
+; CHECK-NEXT: %v1 = call i32 @f1()
+; CHECK-NEXT: %retVal
+; CHECK-NEXT: %condGuard
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %condGuard
+; CHECK-NEXT: br label %Merge
+ %v1 = call i32 @f1()
+ br label %Merge
+
+F1:
+; CHECK: F1.split:
+; CHECK-NEXT: %v2 = call i32 @f2()
+; CHECK-NEXT: %retVal
+; CHECK-NEXT: br label %Merge
+ %v2 = call i32 @f2()
+ br label %Merge
+
+Merge:
+; CHECK: Merge
+; CHECK-NOT: call void(i1, ...) @llvm.experimental.guard(
+ %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
+ %retVal = add i32 %retPhi, 10
+ %condGuard = icmp sgt i32 %a, 10
+ call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
+ ret i32 %retVal
+}
+
+define i32 @branch_overlaps_guard(i32 %a) {
+; CHECK-LABEL: @branch_overlaps_guard(
+ %cond = icmp slt i32 %a, 20
+ br i1 %cond, label %T1, label %F1
+
+T1:
+; CHECK: T1:
+; CHECK-NEXT: %v1 = call i32 @f1()
+; CHECK-NEXT: br label %Merge
+ %v1 = call i32 @f1()
+ br label %Merge
+
+F1:
+; CHECK: F1:
+; CHECK-NEXT: %v2 = call i32 @f2()
+; CHECK-NEXT: br label %Merge
+ %v2 = call i32 @f2()
+ br label %Merge
+
+Merge:
+; CHECK: Merge
+; CHECK: %condGuard = icmp slt i32 %a, 10
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
+ %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
+ %retVal = add i32 %retPhi, 10
+ %condGuard = icmp slt i32 %a, 10
+ call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
+ ret i32 %retVal
+}
+
+define i32 @branch_doesnt_overlap_guard(i32 %a) {
+; CHECK-LABEL: @branch_doesnt_overlap_guard(
+ %cond = icmp slt i32 %a, 10
+ br i1 %cond, label %T1, label %F1
+
+T1:
+; CHECK: T1:
+; CHECK-NEXT: %v1 = call i32 @f1()
+; CHECK-NEXT: br label %Merge
+ %v1 = call i32 @f1()
+ br label %Merge
+
+F1:
+; CHECK: F1:
+; CHECK-NEXT: %v2 = call i32 @f2()
+; CHECK-NEXT: br label %Merge
+ %v2 = call i32 @f2()
+ br label %Merge
+
+Merge:
+; CHECK: Merge
+; CHECK: %condGuard = icmp sgt i32 %a, 20
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
+ %retPhi = phi i32 [ %v1, %T1 ], [ %v2, %F1 ]
+ %retVal = add i32 %retPhi, 10
+ %condGuard = icmp sgt i32 %a, 20
+ call void(i1, ...) @llvm.experimental.guard(i1 %condGuard) [ "deopt"() ]
+ ret i32 %retVal
+}
+
+define i32 @not_a_diamond1(i32 %a, i1 %cond1) {
+; CHECK-LABEL: @not_a_diamond1(
+ br i1 %cond1, label %Pred, label %Exit
+
+Pred:
+; CHECK: Pred:
+; CHECK-NEXT: switch i32 %a, label %Exit
+ switch i32 %a, label %Exit [
+ i32 10, label %Merge
+ i32 20, label %Merge
+ ]
+
+Merge:
+; CHECK: Merge:
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
+; CHECK-NEXT: br label %Exit
+ call void(i1, ...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
+ br label %Exit
+
+Exit:
+; CHECK: Exit:
+; CHECK-NEXT: ret i32 %a
+ ret i32 %a
+}
+
+define void @not_a_diamond2(i32 %a, i1 %cond1) {
+; CHECK-LABEL: @not_a_diamond2(
+ br label %Parent
+
+Merge:
+ call void(i1, ...) @llvm.experimental.guard(i1 %cond1)[ "deopt"() ]
+ ret void
+
+Pred:
+; CHECK-NEXT: Pred:
+; CHECK-NEXT: switch i32 %a, label %Exit
+ switch i32 %a, label %Exit [
+ i32 10, label %Merge
+ i32 20, label %Merge
+ ]
+
+Parent:
+ br label %Pred
+
+Exit:
+; CHECK: Merge:
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
+; CHECK-NEXT: ret void
+ ret void
+}
diff --git a/test/Transforms/JumpThreading/thread-loads.ll b/test/Transforms/JumpThreading/thread-loads.ll
index f54672d19566..3606e796cdd5 100644
--- a/test/Transforms/JumpThreading/thread-loads.ll
+++ b/test/Transforms/JumpThreading/thread-loads.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -jump-threading -S | FileCheck %s
-; RUN: opt < %s -passes=jump-threading -S | FileCheck %s
+; RUN: opt < %s -aa-pipeline=basic-aa -passes=jump-threading -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin7"
@@ -302,6 +302,229 @@ ret2:
ret void
}
+define i32 @fn_noalias(i1 %c2,i64* noalias %P, i64* noalias %P2) {
+; CHECK-LABEL: @fn_noalias
+; CHECK-LABEL: cond1:
+; CHECK: %[[LD1:.*]] = load i64, i64* %P
+; CHECK: br i1 %c, label %[[THREAD:.*]], label %end
+; CHECK-LABEL: cond2:
+; CHECK: %[[LD2:.*]] = load i64, i64* %P
+; CHECK-LABEL: cond3:
+; CHECK: %[[PHI:.*]] = phi i64 [ %[[LD1]], %[[THREAD]] ], [ %[[LD2]], %cond2 ]
+; CHECK: call void @fn3(i64 %[[PHI]])
+entry:
+ br i1 %c2, label %cond2, label %cond1
+
+cond1:
+ %l1 = load i64, i64* %P
+ store i64 42, i64* %P2
+ %c = icmp eq i64 %l1, 0
+ br i1 %c, label %cond2, label %end
+
+cond2:
+ %l2 = load i64, i64* %P
+ call void @fn2(i64 %l2)
+ %c3 = icmp eq i64 %l2, 0
+ br i1 %c3, label %cond3, label %end
+
+cond3:
+ call void @fn3(i64 %l2)
+ br label %end
+
+end:
+ ret i32 0
+}
+
+; This tests if we can thread from %sw.bb.i to %do.body.preheader.i67 through
+; %sw.bb21.i. To make this happen, %l2 should be detected as a partically
+; redundant load with %l3 across the store to %phase in %sw.bb21.i.
+
+%struct.NEXT_MOVE = type { i32, i32, i32* }
+@hash_move = unnamed_addr global [65 x i32] zeroinitializer, align 4
+@current_move = internal global [65 x i32] zeroinitializer, align 4
+@last = internal unnamed_addr global [65 x i32*] zeroinitializer, align 8
+@next_status = internal unnamed_addr global [65 x %struct.NEXT_MOVE] zeroinitializer, align 8
+define fastcc i32 @Search(i64 %idxprom.i, i64 %idxprom.i89, i32 %c) {
+; CHECK-LABEL: @Search
+; CHECK-LABEL: sw.bb.i:
+; CHECK: %[[LD1:.*]] = load i32, i32* %arrayidx185, align 4
+; CHECK: %[[C1:.*]] = icmp eq i32 %[[LD1]], 0
+; CHECK: br i1 %[[C1]], label %sw.bb21.i.thread, label %if.then.i64
+; CHECK-LABEL: sw.bb21.i.thread:
+; CHECK: br label %[[THREAD_TO:.*]]
+; CHECK-LABEL: sw.bb21.i:
+; CHECK: %[[LD2:.*]] = load i32, i32* %arrayidx185, align 4
+; CHECK: %[[C2:.*]] = icmp eq i32 %[[LD2]], 0
+; CHECK:br i1 %[[C2]], label %[[THREAD_TO]], label %cleanup
+entry:
+ %arrayidx185 = getelementptr inbounds [65 x i32], [65 x i32]* @hash_move, i64 0, i64 %idxprom.i
+ %arrayidx307 = getelementptr inbounds [65 x i32], [65 x i32]* @current_move, i64 0, i64 %idxprom.i
+ %arrayidx89 = getelementptr inbounds [65 x i32*], [65 x i32*]* @last, i64 0, i64 %idxprom.i
+ %phase = getelementptr inbounds [65 x %struct.NEXT_MOVE], [65 x %struct.NEXT_MOVE]* @next_status, i64 0, i64 %idxprom.i, i32 0
+ br label %cond.true282
+
+cond.true282:
+ switch i32 %c, label %sw.default.i [
+ i32 1, label %sw.bb.i
+ i32 0, label %sw.bb21.i
+ ]
+
+sw.default.i:
+ br label %cleanup
+
+sw.bb.i:
+ %call.i62 = call fastcc i32* @GenerateCheckEvasions()
+ store i32* %call.i62, i32** %arrayidx89, align 8
+ %l2 = load i32, i32* %arrayidx185, align 4
+ %tobool.i63 = icmp eq i32 %l2, 0
+ br i1 %tobool.i63, label %sw.bb21.i, label %if.then.i64
+
+if.then.i64: ; preds = %sw.bb.i
+ store i32 7, i32* %phase, align 8
+ store i32 %l2, i32* %arrayidx307, align 4
+ %call16.i = call fastcc i32 @ValidMove(i32 %l2)
+ %tobool17.i = icmp eq i32 %call16.i, 0
+ br i1 %tobool17.i, label %if.else.i65, label %cleanup
+
+if.else.i65:
+ call void @f65()
+ br label %sw.bb21.i
+
+sw.bb21.i:
+ store i32 10, i32* %phase, align 8
+ %l3= load i32, i32* %arrayidx185, align 4
+ %tobool27.i = icmp eq i32 %l3, 0
+ br i1 %tobool27.i, label %do.body.preheader.i67, label %cleanup
+
+do.body.preheader.i67:
+ call void @f67()
+ ret i32 67
+
+cleanup:
+ call void @Cleanup()
+ ret i32 0
+}
+
+declare fastcc i32* @GenerateCheckEvasions()
+declare fastcc i32 @ValidMove(i32 %move)
+declare void @f67()
+declare void @Cleanup()
+declare void @f65()
+
+define i32 @fn_SinglePred(i1 %c2,i64* %P) {
+; CHECK-LABEL: @fn_SinglePred
+; CHECK-LABEL: entry:
+; CHECK: %[[L1:.*]] = load i64, i64* %P
+; CHECK: br i1 %c, label %cond3, label %cond1
+; CHECK-LABEL: cond2:
+; CHECK-NOT: load
+; CHECK: %[[PHI:.*]] = phi i64 [ %[[L1]], %cond1 ]
+; CHECK: call void @fn2(i64 %[[PHI]])
+; CHECK: br label %end
+; CHECK-LABEL: cond3:
+; CHECK: call void @fn2(i64 %l1)
+; CHECK: call void @fn3(i64 %l1)
+
+entry:
+ %l1 = load i64, i64* %P
+ %c = icmp eq i64 %l1, 0
+ br i1 %c, label %cond2, label %cond1
+
+cond1:
+ br i1 %c2, label %cond2, label %end
+
+cond2:
+ %l2 = load i64, i64* %P
+ call void @fn2(i64 %l2)
+ %c3 = icmp eq i64 %l2, 0
+ br i1 %c3, label %cond3, label %end
+
+cond3:
+ call void @fn3(i64 %l2)
+ br label %end
+
+end:
+ ret i32 0
+}
+
+define i32 @fn_SinglePredMultihop(i1 %c1, i1 %c2,i64* %P) {
+; CHECK-LABEL: @fn_SinglePredMultihop
+; CHECK-LABEL: entry:
+; CHECK: %[[L1:.*]] = load i64, i64* %P
+; CHECK: br i1 %c0, label %cond3, label %cond0
+; CHECK-LABEL: cond2:
+; CHECK-NOT: load
+; CHECK: %[[PHI:.*]] = phi i64 [ %[[L1]], %cond1 ]
+; CHECK: call void @fn2(i64 %[[PHI]])
+; CHECK: br label %end
+; CHECK-LABEL: cond3:
+; CHECK: call void @fn2(i64 %l1)
+; CHECK: call void @fn3(i64 %l1)
+
+entry:
+ %l1 = load i64, i64* %P
+ %c0 = icmp eq i64 %l1, 0
+ br i1 %c0, label %cond2, label %cond0
+
+cond0:
+ br i1 %c1, label %cond1, label %end
+
+cond1:
+ br i1 %c2, label %cond2, label %end
+
+cond2:
+ %l2 = load i64, i64* %P
+ call void @fn2(i64 %l2)
+ %c3 = icmp eq i64 %l2, 0
+ br i1 %c3, label %cond3, label %end
+
+cond3:
+ call void @fn3(i64 %l2)
+ br label %end
+
+end:
+ ret i32 0
+}
+
+declare void @fn2(i64)
+declare void @fn3(i64)
+
+
+; Make sure we phi-translate and make the partially redundant load in
+; merge fully redudant and then we can jump-thread the block with the
+; store.
+;
+; CHECK-LABEL: define i32 @phi_translate_partial_redundant_loads(i32, i32*, i32*
+; CHECK: merge.thread:
+; CHECK: store
+; CHECK: br label %left_x
+;
+; CHECK: left_x:
+; CHECK-NEXT: ret i32 20
+define i32 @phi_translate_partial_redundant_loads(i32, i32*, i32*) {
+ %cmp0 = icmp ne i32 %0, 0
+ br i1 %cmp0, label %left, label %right
+
+left:
+ store i32 1, i32* %1, align 4
+ br label %merge
+
+right:
+ br label %merge
+
+merge:
+ %phiptr = phi i32* [ %1, %left ], [ %2, %right ]
+ %newload = load i32, i32* %phiptr, align 4
+ %cmp1 = icmp slt i32 %newload, 5
+ br i1 %cmp1, label %left_x, label %right_x
+
+left_x:
+ ret i32 20
+
+right_x:
+ ret i32 10
+}
+
!0 = !{!3, !3, i64 0}
!1 = !{!"omnipotent char", !2}
!2 = !{!"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LICM/atomics.ll b/test/Transforms/LICM/atomics.ll
index d23cb49c5486..15c461aeca27 100644
--- a/test/Transforms/LICM/atomics.ll
+++ b/test/Transforms/LICM/atomics.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -S -basicaa -licm | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='lcssa,require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' < %s -S | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' < %s -S | FileCheck %s
; Check that we can hoist unordered loads
define i32 @test1(i32* nocapture %y) nounwind uwtable ssp {
@@ -60,8 +60,7 @@ end:
; CHECK-NEXT: br label %loop
}
-; Don't try to "sink" unordered stores yet; it is legal, but the machinery
-; isn't there.
+; We can sink an unordered store
define i32 @test4(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
entry:
br label %loop
@@ -75,6 +74,149 @@ loop:
end:
ret i32 %vala
; CHECK-LABEL: define i32 @test4(
+; CHECK-LABEL: loop:
+; CHECK: load atomic i32, i32* %y monotonic
+; CHECK-NOT: store
+; CHECK-LABEL: end:
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %vala
+; CHECK: store atomic i32 %[[LCSSAPHI]], i32* %x unordered, align 4
+}
+
+; We currently don't handle ordered atomics.
+define i32 @test5(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
+entry:
+ br label %loop
+
+loop:
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store atomic i32 %vala, i32* %x release, align 4
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test5(
; CHECK: load atomic i32, i32* %y monotonic
; CHECK-NEXT: store atomic
}
+
+; We currently don't touch volatiles
+define i32 @test6(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
+entry:
+ br label %loop
+
+loop:
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store volatile i32 %vala, i32* %x, align 4
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test6(
+; CHECK: load atomic i32, i32* %y monotonic
+; CHECK-NEXT: store volatile
+}
+
+; We currently don't touch volatiles
+define i32 @test6b(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
+entry:
+ br label %loop
+
+loop:
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store atomic volatile i32 %vala, i32* %x unordered, align 4
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test6b(
+; CHECK: load atomic i32, i32* %y monotonic
+; CHECK-NEXT: store atomic volatile
+}
+
+; Mixing unorder atomics and normal loads/stores is
+; current unimplemented
+define i32 @test7(i32* nocapture noalias %x, i32* nocapture %y) nounwind uwtable ssp {
+entry:
+ br label %loop
+
+loop:
+ store i32 5, i32* %x
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store atomic i32 %vala, i32* %x unordered, align 4
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test7(
+; CHECK: store i32 5, i32* %x
+; CHECK-NEXT: load atomic i32, i32* %y
+; CHECK-NEXT: store atomic i32
+}
+
+; Three provably noalias locations - we can sink normal and unordered, but
+; not monotonic
+define i32 @test7b(i32* nocapture noalias %x, i32* nocapture %y, i32* noalias nocapture %z) nounwind uwtable ssp {
+entry:
+ br label %loop
+
+loop:
+ store i32 5, i32* %x
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store atomic i32 %vala, i32* %z unordered, align 4
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test7b(
+; CHECK: load atomic i32, i32* %y monotonic
+
+; CHECK-LABEL: end:
+; CHECK: store i32 5, i32* %x
+; CHECK: store atomic i32 %{{.+}}, i32* %z unordered, align 4
+}
+
+
+define i32 @test8(i32* nocapture noalias %x, i32* nocapture %y) {
+entry:
+ br label %loop
+
+loop:
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store atomic i32 %vala, i32* %x unordered, align 4
+ fence release
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test8(
+; CHECK-LABEL: loop:
+; CHECK: load atomic i32, i32* %y monotonic
+; CHECK-NEXT: store atomic
+; CHECK-NEXT: fence
+}
+
+; Exact semantics of monotonic accesses are a bit vague in the C++ spec,
+; for the moment, be conservative and don't touch them.
+define i32 @test9(i32* nocapture noalias %x, i32* nocapture %y) {
+entry:
+ br label %loop
+
+loop:
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ store atomic i32 %vala, i32* %x monotonic, align 4
+ %exitcond = icmp ne i32 %vala, 0
+ br i1 %exitcond, label %end, label %loop
+
+end:
+ ret i32 %vala
+; CHECK-LABEL: define i32 @test9(
+; CHECK-LABEL: loop:
+; CHECK: load atomic i32, i32* %y monotonic
+; CHECK-NEXT: store atomic i32 %vala, i32* %x monotonic, align 4
+}
diff --git a/test/Transforms/LICM/constexpr.ll b/test/Transforms/LICM/constexpr.ll
index 8ffc73513600..488821ac8fd4 100644
--- a/test/Transforms/LICM/constexpr.ll
+++ b/test/Transforms/LICM/constexpr.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -S -basicaa -licm | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='lcssa,require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' < %s -S | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' < %s -S | FileCheck %s
; This fixes PR22460
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/Transforms/LICM/hoist-bitcast-load.ll b/test/Transforms/LICM/hoist-bitcast-load.ll
index 6ef00738820e..956c7283be31 100644
--- a/test/Transforms/LICM/hoist-bitcast-load.ll
+++ b/test/Transforms/LICM/hoist-bitcast-load.ll
@@ -1,5 +1,6 @@
; RUN: opt -S -basicaa -licm < %s | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='loop-simplify,require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(simplify-cfg,licm)' -S < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(simplify-cfg,licm)' -S < %s | FileCheck %s
+
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/LICM/hoist-deref-load.ll b/test/Transforms/LICM/hoist-deref-load.ll
index e67becdeb5e4..b48c9e5c7b14 100644
--- a/test/Transforms/LICM/hoist-deref-load.ll
+++ b/test/Transforms/LICM/hoist-deref-load.ll
@@ -1,5 +1,6 @@
; RUN: opt -S -basicaa -licm < %s | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='loop-simplify,require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(simplify-cfg,licm)' -S < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(simplify-cfg,licm)' -S < %s | FileCheck %s
+
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/LICM/hoist-fast-fdiv.ll b/test/Transforms/LICM/hoist-fast-fdiv.ll
new file mode 100644
index 000000000000..f61564fd726c
--- /dev/null
+++ b/test/Transforms/LICM/hoist-fast-fdiv.ll
@@ -0,0 +1,34 @@
+; RUN: opt -licm -S < %s | FileCheck %s
+
+; Function Attrs: noinline norecurse nounwind readnone ssp uwtable
+define zeroext i1 @f(double %v) #0 {
+entry:
+; CHECK-LABEL: @f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: fdiv fast double 1.000000e+00, %v
+ br label %loop
+
+loop: ; preds = %entry, %loop
+ %v3 = phi i32 [ 0, %entry ], [ %v11, %loop ]
+ %v4 = phi i32 [ 0, %entry ], [ %v12, %loop ]
+ %v5 = uitofp i32 %v4 to double
+
+; CHECK-LABEL: loop:
+; CHECK: fmul fast double
+; CHECK-NOT: fdiv
+ %v6 = fdiv fast double %v5, %v
+ %v7 = fptoui double %v6 to i64
+ %v8 = and i64 %v7, 1
+ %v9 = xor i64 %v8, 1
+ %v10 = trunc i64 %v9 to i32
+ %v11 = add i32 %v10, %v3
+ %v12 = add nuw i32 %v4, 1
+ %v13 = icmp eq i32 %v12, -1
+ br i1 %v13, label %end, label %loop
+
+end: ; preds = %loop
+ %v15 = phi i32 [ %v11, %loop ]
+ %v16 = icmp ne i32 %v15, 0
+ ret i1 %v16
+}
+
diff --git a/test/Transforms/LICM/hoist-nounwind.ll b/test/Transforms/LICM/hoist-nounwind.ll
index e9720235893a..9fc4903b8302 100644
--- a/test/Transforms/LICM/hoist-nounwind.ll
+++ b/test/Transforms/LICM/hoist-nounwind.ll
@@ -1,5 +1,5 @@
; RUN: opt -S -basicaa -licm < %s | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='lcssa,require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/LICM/hoist-round.ll b/test/Transforms/LICM/hoist-round.ll
index 9c6a3a180b50..87a7050668de 100644
--- a/test/Transforms/LICM/hoist-round.ll
+++ b/test/Transforms/LICM/hoist-round.ll
@@ -18,6 +18,7 @@ target datalayout = "E-m:e-p:32:32-i8:8:8-i16:16:16-i64:32:32-f64:32:32-v64:32:3
; CHECK: call float @llvm.copysign.f32
; CHECK: call float @llvm.minnum.f32
; CHECK: call float @llvm.maxnum.f32
+; CHECK: call float @llvm.powi.f32
; CHECK: for.body:
define void @test(float %arg1, float %arg2) {
@@ -40,7 +41,8 @@ for.body:
%tmp.8 = call float @llvm.copysign.f32(float %tmp.7, float %arg2)
%tmp.9 = call float @llvm.minnum.f32(float %tmp.8, float %arg2)
%tmp.10 = call float @llvm.maxnum.f32(float %tmp.9, float %arg2)
- call void @consume(float %tmp.10)
+ %tmp.11 = call float @llvm.powi.f32(float %tmp.10, i32 4)
+ call void @consume(float %tmp.11)
%IND.new = add i32 %IND, 1
br label %for.head
@@ -60,3 +62,4 @@ declare float @llvm.fabs.f32(float)
declare float @llvm.copysign.f32(float, float)
declare float @llvm.minnum.f32(float, float)
declare float @llvm.maxnum.f32(float, float)
+declare float @llvm.powi.f32(float, i32)
diff --git a/test/Transforms/LICM/hoisting.ll b/test/Transforms/LICM/hoisting.ll
index 29595b3e1cc0..cbd17689e939 100644
--- a/test/Transforms/LICM/hoisting.ll
+++ b/test/Transforms/LICM/hoisting.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -licm -S | FileCheck %s
-; RUN: opt -lcssa %s | opt -aa-pipeline=basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S | FileCheck %s
+; RUN: opt < %s -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' -S | FileCheck %s
@X = global i32 0 ; <i32*> [#uses=1]
@@ -149,3 +149,174 @@ latch:
return:
ret i32 %sum
}
+
+declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
+declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
+declare void @escaping.invariant.start({}*) nounwind
+; invariant.start dominates the load, and in this scope, the
+; load is invariant. So, we can hoist the `addrld` load out of the loop.
+define i32 @test_fence(i8* %addr, i32 %n, i8* %volatile) {
+; CHECK-LABEL: @test_fence
+; CHECK-LABEL: entry
+; CHECK: invariant.start
+; CHECK: %addrld = load atomic i32, i32* %addr.i unordered, align 8
+; CHECK: br label %loop
+entry:
+ %gep = getelementptr inbounds i8, i8* %addr, i64 8
+ %addr.i = bitcast i8* %gep to i32 *
+ store atomic i32 5, i32 * %addr.i unordered, align 8
+ fence release
+ %invst = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %gep)
+ br label %loop
+
+loop:
+ %indvar = phi i32 [ %indvar.next, %loop ], [ 0, %entry ]
+ %sum = phi i32 [ %sum.next, %loop ], [ 0, %entry ]
+ %volload = load atomic i8, i8* %volatile unordered, align 8
+ fence acquire
+ %volchk = icmp eq i8 %volload, 0
+ %addrld = load atomic i32, i32* %addr.i unordered, align 8
+ %sel = select i1 %volchk, i32 0, i32 %addrld
+ %sum.next = add i32 %sel, %sum
+ %indvar.next = add i32 %indvar, 1
+ %cond = icmp slt i32 %indvar.next, %n
+ br i1 %cond, label %loop, label %loopexit
+
+loopexit:
+ ret i32 %sum
+}
+
+
+
+; Same as test above, but the load is no longer invariant (presence of
+; invariant.end). We cannot hoist the addrld out of loop.
+define i32 @test_fence1(i8* %addr, i32 %n, i8* %volatile) {
+; CHECK-LABEL: @test_fence1
+; CHECK-LABEL: entry
+; CHECK: invariant.start
+; CHECK-NEXT: invariant.end
+; CHECK-NEXT: br label %loop
+entry:
+ %gep = getelementptr inbounds i8, i8* %addr, i64 8
+ %addr.i = bitcast i8* %gep to i32 *
+ store atomic i32 5, i32 * %addr.i unordered, align 8
+ fence release
+ %invst = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %gep)
+ call void @llvm.invariant.end.p0i8({}* %invst, i64 4, i8* %gep)
+ br label %loop
+
+loop:
+ %indvar = phi i32 [ %indvar.next, %loop ], [ 0, %entry ]
+ %sum = phi i32 [ %sum.next, %loop ], [ 0, %entry ]
+ %volload = load atomic i8, i8* %volatile unordered, align 8
+ fence acquire
+ %volchk = icmp eq i8 %volload, 0
+ %addrld = load atomic i32, i32* %addr.i unordered, align 8
+ %sel = select i1 %volchk, i32 0, i32 %addrld
+ %sum.next = add i32 %sel, %sum
+ %indvar.next = add i32 %indvar, 1
+ %cond = icmp slt i32 %indvar.next, %n
+ br i1 %cond, label %loop, label %loopexit
+
+loopexit:
+ ret i32 %sum
+}
+
+; same as test above, but instead of invariant.end, we have the result of
+; invariant.start escaping through a call. We cannot hoist the load.
+define i32 @test_fence2(i8* %addr, i32 %n, i8* %volatile) {
+; CHECK-LABEL: @test_fence2
+; CHECK-LABEL: entry
+; CHECK-NOT: load
+; CHECK: br label %loop
+entry:
+ %gep = getelementptr inbounds i8, i8* %addr, i64 8
+ %addr.i = bitcast i8* %gep to i32 *
+ store atomic i32 5, i32 * %addr.i unordered, align 8
+ fence release
+ %invst = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %gep)
+ call void @escaping.invariant.start({}* %invst)
+ br label %loop
+
+loop:
+ %indvar = phi i32 [ %indvar.next, %loop ], [ 0, %entry ]
+ %sum = phi i32 [ %sum.next, %loop ], [ 0, %entry ]
+ %volload = load atomic i8, i8* %volatile unordered, align 8
+ fence acquire
+ %volchk = icmp eq i8 %volload, 0
+ %addrld = load atomic i32, i32* %addr.i unordered, align 8
+ %sel = select i1 %volchk, i32 0, i32 %addrld
+ %sum.next = add i32 %sel, %sum
+ %indvar.next = add i32 %indvar, 1
+ %cond = icmp slt i32 %indvar.next, %n
+ br i1 %cond, label %loop, label %loopexit
+
+loopexit:
+ ret i32 %sum
+}
+
+; FIXME: invariant.start dominates the load, and in this scope, the
+; load is invariant. So, we can hoist the `addrld` load out of the loop.
+; Consider the loadoperand addr.i bitcasted before being passed to
+; invariant.start
+define i32 @test_fence3(i32* %addr, i32 %n, i8* %volatile) {
+; CHECK-LABEL: @test_fence3
+; CHECK-LABEL: entry
+; CHECK: invariant.start
+; CHECK-NOT: %addrld = load atomic i32, i32* %addr.i unordered, align 8
+; CHECK: br label %loop
+entry:
+ %addr.i = getelementptr inbounds i32, i32* %addr, i64 8
+ %gep = bitcast i32* %addr.i to i8 *
+ store atomic i32 5, i32 * %addr.i unordered, align 8
+ fence release
+ %invst = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %gep)
+ br label %loop
+
+loop:
+ %indvar = phi i32 [ %indvar.next, %loop ], [ 0, %entry ]
+ %sum = phi i32 [ %sum.next, %loop ], [ 0, %entry ]
+ %volload = load atomic i8, i8* %volatile unordered, align 8
+ fence acquire
+ %volchk = icmp eq i8 %volload, 0
+ %addrld = load atomic i32, i32* %addr.i unordered, align 8
+ %sel = select i1 %volchk, i32 0, i32 %addrld
+ %sum.next = add i32 %sel, %sum
+ %indvar.next = add i32 %indvar, 1
+ %cond = icmp slt i32 %indvar.next, %n
+ br i1 %cond, label %loop, label %loopexit
+
+loopexit:
+ ret i32 %sum
+}
+
+; We should not hoist the addrld out of the loop.
+define i32 @test_fence4(i32* %addr, i32 %n, i8* %volatile) {
+; CHECK-LABEL: @test_fence4
+; CHECK-LABEL: entry
+; CHECK-NOT: %addrld = load atomic i32, i32* %addr.i unordered, align 8
+; CHECK: br label %loop
+entry:
+ %addr.i = getelementptr inbounds i32, i32* %addr, i64 8
+ %gep = bitcast i32* %addr.i to i8 *
+ br label %loop
+
+loop:
+ %indvar = phi i32 [ %indvar.next, %loop ], [ 0, %entry ]
+ %sum = phi i32 [ %sum.next, %loop ], [ 0, %entry ]
+ store atomic i32 5, i32 * %addr.i unordered, align 8
+ fence release
+ %invst = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %gep)
+ %volload = load atomic i8, i8* %volatile unordered, align 8
+ fence acquire
+ %volchk = icmp eq i8 %volload, 0
+ %addrld = load atomic i32, i32* %addr.i unordered, align 8
+ %sel = select i1 %volchk, i32 0, i32 %addrld
+ %sum.next = add i32 %sel, %sum
+ %indvar.next = add i32 %indvar, 1
+ %cond = icmp slt i32 %indvar.next, %n
+ br i1 %cond, label %loop, label %loopexit
+
+loopexit:
+ ret i32 %sum
+}
diff --git a/test/Transforms/LICM/loopsink.ll b/test/Transforms/LICM/loopsink.ll
index 5004752d1031..b203ea8b51ad 100644
--- a/test/Transforms/LICM/loopsink.ll
+++ b/test/Transforms/LICM/loopsink.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -loop-sink < %s | FileCheck %s
+; RUN: opt -S -passes=loop-sink < %s | FileCheck %s
@g = global i32 0, align 4
diff --git a/test/Transforms/LICM/opt-remarks.ll b/test/Transforms/LICM/opt-remarks.ll
index f0ef386c9f9a..b44fc57131a5 100644
--- a/test/Transforms/LICM/opt-remarks.ll
+++ b/test/Transforms/LICM/opt-remarks.ll
@@ -10,7 +10,7 @@ Loop:
%j = phi i32 [ 0, %Entry ], [ %Next, %Loop ]
%addr = getelementptr i32, i32* %array, i32 %j
%a = load i32, i32* %addr
-; CHECK: remark: /tmp/kk.c:2:20: hosting load
+; CHECK: remark: /tmp/kk.c:2:20: hoisting load
%b = load i32, i32* %p, !dbg !8
%a2 = add i32 %a, %b
store i32 %a2, i32* %addr
diff --git a/test/Transforms/LICM/pr32129.ll b/test/Transforms/LICM/pr32129.ll
new file mode 100644
index 000000000000..2618afe46322
--- /dev/null
+++ b/test/Transforms/LICM/pr32129.ll
@@ -0,0 +1,18 @@
+; RUN: opt -S -licm -loop-unswitch -licm < %s | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define void @test() {
+; CHECK-LABEL: @test(
+; CHECK-NOT: guard
+entry:
+ br label %header
+
+header:
+ br label %loop
+
+loop:
+ %0 = icmp ult i32 0, 400
+ call void (i1, ...) @llvm.experimental.guard(i1 %0, i32 9) [ "deopt"() ]
+ br i1 undef, label %header, label %loop
+}
diff --git a/test/Transforms/LICM/scalar-promote-unwind.ll b/test/Transforms/LICM/scalar-promote-unwind.ll
new file mode 100644
index 000000000000..f1f52eed1d4c
--- /dev/null
+++ b/test/Transforms/LICM/scalar-promote-unwind.ll
@@ -0,0 +1,263 @@
+; RUN: opt < %s -basicaa -licm -S | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Make sure we don't hoist the store out of the loop; %a would
+; have the wrong value if f() unwinds
+
+define void @test1(i32* nocapture noalias %a, i1 zeroext %y) uwtable {
+entry:
+ br label %for.body
+
+for.body:
+ %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %0 = load i32, i32* %a, align 4
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* %a, align 4
+ br i1 %y, label %if.then, label %for.inc
+
+; CHECK: define void @test1
+; CHECK: load i32, i32*
+; CHECK-NEXT: add
+; CHECK-NEXT: store i32
+
+if.then:
+ tail call void @f()
+ br label %for.inc
+
+for.inc:
+ %inc = add nuw nsw i32 %i.03, 1
+ %exitcond = icmp eq i32 %inc, 10000
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:
+ ret void
+}
+
+; We can hoist the store out of the loop here; if f() unwinds,
+; the lifetime of %a ends.
+
+define void @test2(i1 zeroext %y) uwtable {
+entry:
+ %a = alloca i32
+ br label %for.body
+
+for.body:
+ %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %0 = load i32, i32* %a, align 4
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* %a, align 4
+ br i1 %y, label %if.then, label %for.inc
+
+if.then:
+ tail call void @f()
+ br label %for.inc
+
+for.inc:
+ %inc = add nuw nsw i32 %i.03, 1
+ %exitcond = icmp eq i32 %inc, 10000
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:
+; CHECK: define void @test2
+; CHECK: store i32
+; CHECK-NEXT: ret void
+ ret void
+}
+
+@_ZTIi = external constant i8*
+
+; In this test, the loop is within a try block. There is an explicit unwind edge out of the loop.
+; Make sure this edge is treated as a loop exit, and that the loads and stores are promoted as
+; expected
+define void @loop_within_tryblock() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ %a = alloca i32, align 4
+ store i32 0, i32* %a, align 4
+ br label %for.cond
+
+for.cond:
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %cmp = icmp slt i32 %i.0, 1024
+ br i1 %cmp, label %for.body, label %for.end
+
+; CHECK: for.body:
+; CHECK-NOT: load
+; CHECK-NOT: store
+; CHECK: invoke
+for.body:
+ %0 = load i32, i32* %a, align 4
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* %a, align 4
+ invoke void @boo()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i32 %i.0, 1
+ br label %for.cond
+
+; CHECK: lpad:
+; CHECK: store
+; CHECK: br
+lpad:
+ %1 = landingpad { i8*, i32 }
+ catch i8* bitcast (i8** @_ZTIi to i8*)
+ %2 = extractvalue { i8*, i32 } %1, 0
+ %3 = extractvalue { i8*, i32 } %1, 1
+ br label %catch.dispatch
+
+catch.dispatch:
+ %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #3
+ %matches = icmp eq i32 %3, %4
+ br i1 %matches, label %catch, label %eh.resume
+
+catch:
+ %5 = call i8* @__cxa_begin_catch(i8* %2) #3
+ %6 = bitcast i8* %5 to i32*
+ %7 = load i32, i32* %6, align 4
+ call void @__cxa_end_catch() #3
+ br label %try.cont
+
+try.cont:
+ ret void
+
+for.end:
+ br label %try.cont
+
+eh.resume:
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %2, 0
+ %lpad.val3 = insertvalue { i8*, i32 } %lpad.val, i32 %3, 1
+ resume { i8*, i32 } %lpad.val3
+}
+
+
+; The malloc'ed memory is not capture and therefore promoted.
+define void @malloc_no_capture() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ %call = call i8* @malloc(i64 4)
+ %0 = bitcast i8* %call to i32*
+ br label %for.body
+
+; CHECK: for.body:
+; CHECK-NOT: load
+; CHECK-NOT: store
+; CHECK: br
+for.body:
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.latch ]
+ %1 = load i32, i32* %0, align 4
+ %add = add nsw i32 %1, 1
+ store i32 %add, i32* %0, align 4
+ br label %for.call
+
+for.call:
+ invoke void @boo()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ br label %for.latch
+
+for.latch:
+ %inc = add i32 %i.0, 1
+ %cmp = icmp slt i32 %i.0, 1024
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ br label %fun.ret
+
+lpad:
+ %2 = landingpad { i8*, i32 }
+ catch i8* null
+ %3 = extractvalue { i8*, i32 } %2, 0
+ %4 = extractvalue { i8*, i32 } %2, 1
+ br label %catch
+
+catch:
+ %5 = call i8* @__cxa_begin_catch(i8* %3) #4
+ %6 = bitcast i32* %0 to i8*
+ call void @free(i8* %6)
+ call void @__cxa_end_catch()
+ br label %fun.ret
+
+fun.ret:
+ ret void
+}
+
+; The malloc'ed memory can be captured and therefore not promoted.
+define void @malloc_capture(i32** noalias %A) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ %call = call i8* @malloc(i64 4)
+ %0 = bitcast i8* %call to i32*
+ br label %for.body
+
+; CHECK: for.body:
+; CHECK: load
+; CHECK: store
+; CHECK: br
+for.body:
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.latch ]
+ %1 = load i32, i32* %0, align 4
+ %add = add nsw i32 %1, 1
+ store i32 %add, i32* %0, align 4
+ br label %for.call
+
+for.call:
+ invoke void @boo_readnone()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ br label %for.latch
+
+for.latch:
+ store i32* %0, i32** %A
+ %inc = add i32 %i.0, 1
+ %cmp = icmp slt i32 %i.0, 1024
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ br label %fun.ret
+
+lpad:
+ %2 = landingpad { i8*, i32 }
+ catch i8* null
+ %3 = extractvalue { i8*, i32 } %2, 0
+ %4 = extractvalue { i8*, i32 } %2, 1
+ br label %catch
+
+catch:
+ %5 = call i8* @__cxa_begin_catch(i8* %3) #4
+ %6 = bitcast i32* %0 to i8*
+ call void @free(i8* %6)
+ call void @__cxa_end_catch()
+ br label %fun.ret
+
+fun.ret:
+ ret void
+}
+
+; Function Attrs: nounwind
+declare noalias i8* @malloc(i64)
+
+; Function Attrs: nounwind
+declare void @free(i8* nocapture)
+
+declare void @boo()
+
+; This is an artifical example, readnone functions by definition cannot unwind
+; exceptions by calling the C++ exception throwing methods
+; This function should only be used to test malloc_capture.
+declare void @boo_readnone() readnone
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+declare i32 @llvm.eh.typeid.for(i8*)
+
+declare void @f() uwtable
diff --git a/test/Transforms/LICM/scalar_promote.ll b/test/Transforms/LICM/scalar-promote.ll
index c88701154b8f..89888546494f 100644
--- a/test/Transforms/LICM/scalar_promote.ll
+++ b/test/Transforms/LICM/scalar-promote.ll
@@ -378,6 +378,33 @@ exit:
ret i32 %ret
}
+define void @test10(i32 %i) {
+Entry:
+ br label %Loop
+; CHECK-LABEL: @test10(
+; CHECK: Entry:
+; CHECK-NEXT: load atomic i32, i32* @X unordered, align 4
+; CHECK-NEXT: br label %Loop
+
+
+Loop: ; preds = %Loop, %0
+ %j = phi i32 [ 0, %Entry ], [ %Next, %Loop ] ; <i32> [#uses=1]
+ %x = load atomic i32, i32* @X unordered, align 4
+ %x2 = add i32 %x, 1
+ store atomic i32 %x2, i32* @X unordered, align 4
+ %Next = add i32 %j, 1
+ %cond = icmp eq i32 %Next, 0
+ br i1 %cond, label %Out, label %Loop
+
+Out:
+ ret void
+; CHECK: Out:
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %x2
+; CHECK-NEXT: store atomic i32 %[[LCSSAPHI]], i32* @X unordered, align 4
+; CHECK-NEXT: ret void
+
+}
+
!0 = !{!4, !4, i64 0}
!1 = !{!"omnipotent char", !2}
!2 = !{!"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LICM/scalar_promote-unwind.ll b/test/Transforms/LICM/scalar_promote-unwind.ll
deleted file mode 100644
index dd3693b4af63..000000000000
--- a/test/Transforms/LICM/scalar_promote-unwind.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: opt < %s -basicaa -licm -S | FileCheck %s
-; RUN: opt -aa-pipeline=basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-; Make sure we don't hoist the store out of the loop; %a would
-; have the wrong value if f() unwinds
-
-define void @test1(i32* nocapture noalias %a, i1 zeroext %y) uwtable {
-entry:
- br label %for.body
-
-for.body:
- %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
- %0 = load i32, i32* %a, align 4
- %add = add nsw i32 %0, 1
- store i32 %add, i32* %a, align 4
- br i1 %y, label %if.then, label %for.inc
-
-; CHECK: define void @test1
-; CHECK: load i32, i32*
-; CHECK-NEXT: add
-; CHECK-NEXT: store i32
-
-if.then:
- tail call void @f()
- br label %for.inc
-
-for.inc:
- %inc = add nuw nsw i32 %i.03, 1
- %exitcond = icmp eq i32 %inc, 10000
- br i1 %exitcond, label %for.cond.cleanup, label %for.body
-
-for.cond.cleanup:
- ret void
-}
-
-; We can hoist the store out of the loop here; if f() unwinds,
-; the lifetime of %a ends.
-
-define void @test2(i1 zeroext %y) uwtable {
-entry:
- %a = alloca i32
- br label %for.body
-
-for.body:
- %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
- %0 = load i32, i32* %a, align 4
- %add = add nsw i32 %0, 1
- store i32 %add, i32* %a, align 4
- br i1 %y, label %if.then, label %for.inc
-
-if.then:
- tail call void @f()
- br label %for.inc
-
-for.inc:
- %inc = add nuw nsw i32 %i.03, 1
- %exitcond = icmp eq i32 %inc, 10000
- br i1 %exitcond, label %for.cond.cleanup, label %for.body
-
-for.cond.cleanup:
- ret void
-
-; CHECK: define void @test2
-; CHECK: store i32
-; CHECK-NEXT: ret void
- ret void
-}
-
-declare void @f() uwtable
diff --git a/test/Transforms/LICM/sink.ll b/test/Transforms/LICM/sink.ll
index cf169ddc12a9..70fa6fa13e3e 100644
--- a/test/Transforms/LICM/sink.ll
+++ b/test/Transforms/LICM/sink.ll
@@ -1,5 +1,7 @@
; RUN: opt -S -licm < %s | FileCheck %s --check-prefix=CHECK-LICM
; RUN: opt -S -licm < %s | opt -S -loop-sink | FileCheck %s --check-prefix=CHECK-SINK
+; RUN: opt -S < %s -passes='require<opt-remark-emit>,loop(licm),loop-sink' \
+; RUN: | FileCheck %s --check-prefix=CHECK-SINK
; Original source code:
; int g;
diff --git a/test/Transforms/LICM/unrolled-deeply-nested.ll b/test/Transforms/LICM/unrolled-deeply-nested.ll
new file mode 100644
index 000000000000..c0f2c9818000
--- /dev/null
+++ b/test/Transforms/LICM/unrolled-deeply-nested.ll
@@ -0,0 +1,76 @@
+; Test that LICM correctly detects conflicting accesses to memory in deeply
+; nested subloops. This works in the legacy PM due to a special retained map of
+; alias information for inner loops, and in the new PM it is recomputed for each
+; loop.
+;
+; RUN: opt -S -aa-pipeline=basic-aa -passes='require<opt-remark-emit>,loop(licm)' < %s | FileCheck %s
+; RUN: opt -S -basicaa -licm < %s | FileCheck %s
+
+define i32 @test(i32* %a, i64 %n.0, i64 %n.0.0, i64 %n.0.0.0, i64 %n.0.0.0.0) nounwind uwtable readonly {
+; CHECK-LABEL: define i32 @test
+entry:
+ %b = alloca i32
+ %c = alloca i32
+ %a.i8 = bitcast i32* %a to i8*
+ %b.i8 = bitcast i32* %b to i8*
+ %c.i8 = bitcast i32* %c to i8*
+ br label %l.0.header
+; CHECK: %b = alloca i32
+; CHECK: %c = alloca i32
+; CHECK: %[[AI8:.*]] = bitcast i32* %a to i8*
+; CHECK: %[[BI8:.*]] = bitcast i32* %b to i8*
+; CHECK: %[[CI8:.*]] = bitcast i32* %c to i8*
+; CHECK-NOT: load
+; CHECK: br
+
+l.0.header:
+ %iv.0 = phi i64 [ %iv.0.next, %l.0.latch ], [ 0, %entry ]
+ %iv.0.next = add i64 %iv.0, 1
+ %exitcond.0 = icmp eq i64 %iv.0.next, %n.0
+ %a.val = load i32, i32* %a
+ store i32 %a.val, i32* %b
+ %c.val = trunc i64 %iv.0 to i32
+ store i32 %c.val, i32* %c
+ br label %l.0.0.header
+; CHECK: %[[AV:.*]] = load i32, i32* %a
+; CHECK: store i32 %[[AV]], i32* %b
+; CHECK: %[[CT:.*]] = trunc i64 {{.*}} to i32
+; CHECK: store i32 %[[CT]], i32* %c
+; CHECK: br
+
+l.0.0.header:
+ %iv.0.0 = phi i64 [ %iv.0.0.next, %l.0.0.latch ], [ 0, %l.0.header ]
+ %iv.0.0.next = add i64 %iv.0.0, 1
+ %exitcond.0.0 = icmp eq i64 %iv.0.0.next, %n.0.0
+ br label %l.0.0.0.header
+; CHECK: br
+
+l.0.0.0.header:
+ %iv.0.0.0 = phi i64 [ %iv.0.0.0.next, %l.0.0.0.header ], [ 0, %l.0.0.header ]
+ %iv.0.0.0.next = add i64 %iv.0.0.0, 1
+ %exitcond.0.0.0 = icmp eq i64 %iv.0.0.0.next, %n.0.0.0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a.i8, i8* %c.i8, i64 4, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %b.i8, i8* %c.i8, i64 4, i32 1, i1 false)
+ br i1 %exitcond.0.0.0, label %l.0.0.0.header, label %l.0.0.latch
+; CHECK: call void @llvm.memcpy.{{.*}}(i8* %[[AI8]], i8* %[[CI8]], i64 4
+; CHECK: call void @llvm.memcpy.{{.*}}(i8* %[[BI8]], i8* %[[CI8]], i64 4
+; CHECK: br
+
+l.0.0.latch:
+ br i1 %exitcond.0.0, label %l.0.0.header, label %l.0.latch
+; CHECK: br
+
+l.0.latch:
+ %b.val = load i32, i32* %b
+ br i1 %exitcond.0, label %exit, label %l.0.header
+; CHECK: %[[BV:.*]] = load i32, i32* %b
+; CHECK: br
+
+exit:
+ %result.lcssa = phi i32 [ %b.val, %l.0.latch ]
+ ret i32 %b.val
+; CHECK: %[[LCSSA:.*]] = phi i32 [ %[[BV]], %{{.*}} ]
+; CHECK: ret i32 %[[LCSSA]]
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
diff --git a/test/Transforms/LoadCombine/deadcode.ll b/test/Transforms/LoadCombine/deadcode.ll
new file mode 100644
index 000000000000..ed72824ffb44
--- /dev/null
+++ b/test/Transforms/LoadCombine/deadcode.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -load-combine -S < %s | FileCheck %s
+
+; It has been detected that dead loops like the one in this test case can be
+; created by -jump-threading (it was detected by a csmith generated program).
+;
+; According to -verify this is valid input (even if it could be discussed if
+; the dead loop really satisfies SSA form).
+;
+; The problem found was that the -load-combine pass ends up in an infinite loop
+; when analysing the 'bb1' basic block.
+define void @test1() {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: ret void
+; CHECK: bb1:
+; CHECK-NEXT: [[_TMP4:%.*]] = load i16, i16* [[_TMP10:%.*]], align 1
+; CHECK-NEXT: [[_TMP10]] = getelementptr i16, i16* [[_TMP10]], i16 1
+; CHECK-NEXT: br label [[BB1:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[_TMP7:%.*]] = load i16, i16* [[_TMP12:%.*]], align 1
+; CHECK-NEXT: [[_TMP12]] = getelementptr i16, i16* [[_TMP12]], i16 1
+; CHECK-NEXT: br label [[BB2:%.*]]
+;
+ ret void
+
+bb1:
+ %_tmp4 = load i16, i16* %_tmp10, align 1
+ %_tmp10 = getelementptr i16, i16* %_tmp10, i16 1
+ br label %bb1
+
+; A second basic block. Running the test with -debug-pass=Executions shows
+; that we only run the Dominator Tree Construction one time for each function,
+; also when having multiple basic blocks in the function.
+bb2:
+ %_tmp7 = load i16, i16* %_tmp12, align 1
+ %_tmp12 = getelementptr i16, i16* %_tmp12, i16 1
+ br label %bb2
+
+}
diff --git a/test/Transforms/LoadCombine/load-combine-aa.ll b/test/Transforms/LoadCombine/load-combine-aa.ll
index fc639c0bc05d..5a577516fb47 100644
--- a/test/Transforms/LoadCombine/load-combine-aa.ll
+++ b/test/Transforms/LoadCombine/load-combine-aa.ll
@@ -1,4 +1,4 @@
-; RUN: opt -basicaa -load-combine -instcombine -S < %s | FileCheck %s
+; RUN: opt -basicaa -load-combine -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -22,6 +22,7 @@ define i64 @test1(i32* nocapture readonly noalias %a, i32* nocapture readonly no
define i64 @test2(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; CHECK-LABEL: @test2
+; CHECK-NOT: load i64
; CHECK: load i32, i32*
; CHECK: load i32, i32*
; CHECK: ret i64
@@ -37,3 +38,26 @@ define i64 @test2(i32* nocapture readonly %a, i32* nocapture readonly %b) {
ret i64 %add
}
+%rec11 = type { i16, i16, i16 }
+@str = global %rec11 { i16 1, i16 2, i16 3 }
+
+; PR31517 - Check that loads which span an aliasing store are not combined.
+define i16 @test3() {
+; CHECK-LABEL: @test3
+
+; CHECK-NOT: load i32
+; CHECK: load i16, i16*
+; CHECK: store i16
+; CHECK: load i16, i16*
+; CHECK: ret i16
+
+ %_tmp9 = getelementptr %rec11, %rec11* @str, i16 0, i32 1
+ %_tmp10 = load i16, i16* %_tmp9
+ %_tmp12 = getelementptr %rec11, %rec11* @str, i16 0, i32 0
+ store i16 %_tmp10, i16* %_tmp12
+ %_tmp13 = getelementptr %rec11, %rec11* @str, i16 0, i32 0
+ %_tmp14 = load i16, i16* %_tmp13
+ %_tmp15 = icmp eq i16 %_tmp14, 3
+ %_tmp16 = select i1 %_tmp15, i16 1, i16 0
+ ret i16 %_tmp16
+}
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
index e6904ee50bca..4b2dab47a20f 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
@@ -15,7 +15,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; NOSCOPE: load float
; NOSCOPE: store float
; NOSCOPE: store float
-define void @vectorize_alias_scope(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @vectorize_alias_scope(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
entry:
%a.idx.1 = getelementptr inbounds float, float addrspace(1)* %a, i64 1
store float 0.0, float addrspace(1)* %a, align 4, !noalias !0
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
index 4369dafa4258..368dc6ab361e 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
@@ -10,7 +10,7 @@ target triple = "amdgcn--"
; ALIGNED: load i8, i8* %ptr0, align 1{{$}}
; ALIGNED: load i8, i8* %ptr1, align 1{{$}}
-define void @load_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
+define amdgpu_kernel void @load_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i8], align 1
%ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset
%val0 = load i8, i8* %ptr0, align 1
@@ -27,7 +27,7 @@ define void @load_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %o
; ALIGNED: load i16, i16* %ptr0, align 1{{$}}
; ALIGNED: load i16, i16* %ptr1, align 1{{$}}
-define void @load_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
+define amdgpu_kernel void @load_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i16], align 1
%ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset
%val0 = load i16, i16* %ptr0, align 1
@@ -47,7 +47,7 @@ define void @load_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32
; ALIGNED: load i32, i32* %ptr0, align 1
; ALIGNED: load i32, i32* %ptr1, align 1
-define void @load_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
+define amdgpu_kernel void @load_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i32], align 1
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
%val0 = load i32, i32* %ptr0, align 1
@@ -64,8 +64,11 @@ define void @load_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32
; ALL: alloca [128 x i32], align 16
; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 4{{$}}
-define void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
+
+; FIXME: Should change alignment
+; ALIGNED: load i32
+; ALIGNED: load i32
+define amdgpu_kernel void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i32], align 16
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
%val0 = load i32, i32* %ptr0, align 1
@@ -82,7 +85,7 @@ define void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %
; ALIGNED: store i8 9, i8* %ptr0, align 1{{$}}
; ALIGNED: store i8 10, i8* %ptr1, align 1{{$}}
-define void @store_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
+define amdgpu_kernel void @store_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i8], align 1
%ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset
store i8 9, i8* %ptr0, align 1
@@ -97,7 +100,7 @@ define void @store_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %
; ALIGNED: store i16 9, i16* %ptr0, align 1{{$}}
; ALIGNED: store i16 10, i16* %ptr1, align 1{{$}}
-define void @store_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
+define amdgpu_kernel void @store_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i16], align 1
%ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset
store i16 9, i16* %ptr0, align 1
@@ -116,7 +119,7 @@ define void @store_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32
; ALIGNED: store i32 9, i32* %ptr0, align 1
; ALIGNED: store i32 10, i32* %ptr1, align 1
-define void @store_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
+define amdgpu_kernel void @store_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i32], align 1
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
store i32 9, i32* %ptr0, align 1
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
index 25abb98c6ebd..8a75b8743fa5 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; CHECK: sext i32 %id.x to i64
; CHECK: load <2 x float>
; CHECK: store <2 x float> zeroinitializer
-define void @basic_merge_sext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @basic_merge_sext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
entry:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%sext.id.x = sext i32 %id.x to i64
@@ -32,7 +32,7 @@ entry:
; CHECK: zext i32 %id.x to i64
; CHECK: load <2 x float>
; CHECK: store <2 x float>
-define void @basic_merge_zext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @basic_merge_zext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
entry:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%zext.id.x = zext i32 %id.x to i64
@@ -54,7 +54,7 @@ entry:
; CHECK-LABEL: @merge_op_zext_index(
; CHECK: load <2 x float>
; CHECK: store <2 x float>
-define void @merge_op_zext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
+define amdgpu_kernel void @merge_op_zext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
entry:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%shl = shl i32 %id.x, 2
@@ -81,7 +81,7 @@ entry:
; CHECK-LABEL: @merge_op_sext_index(
; CHECK: load <2 x float>
; CHECK: store <2 x float>
-define void @merge_op_sext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
+define amdgpu_kernel void @merge_op_sext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
entry:
%id.x = call i32 @llvm.amdgcn.workitem.id.x()
%shl = shl i32 %id.x, 2
@@ -112,7 +112,7 @@ entry:
; CHECK: loop:
; CHECK: load <2 x i32>
; CHECK: store <2 x i32>
-define void @zext_trunc_phi_1(i32 addrspace(1)* nocapture noalias %a, i32 addrspace(1)* nocapture noalias %b, i32 addrspace(1)* nocapture readonly noalias %c, i32 %n, i64 %arst, i64 %aoeu) #0 {
+define amdgpu_kernel void @zext_trunc_phi_1(i32 addrspace(1)* nocapture noalias %a, i32 addrspace(1)* nocapture noalias %b, i32 addrspace(1)* nocapture readonly noalias %c, i32 %n, i64 %arst, i64 %aoeu) #0 {
entry:
%cmp0 = icmp eq i32 %n, 0
br i1 %cmp0, label %exit, label %loop
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
index 2b2f9cbcf508..6182c09abcfe 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
@@ -11,7 +11,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK: load <2 x float>
; CHECK: %w = add i32 %y, 9
; CHECK: %foo = add i32 %z, %w
-define void @insert_load_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @insert_load_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
entry:
%a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
%c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %idx
@@ -38,7 +38,7 @@ entry:
; CHECK: %w = add i32 %y, 9
; CHECK: store <2 x float>
; CHECK: %foo = add i32 %z, %w
-define void @insert_store_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
+define amdgpu_kernel void @insert_store_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
entry:
%a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
%c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %idx
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
index 4d6240a9aa9d..3f6d7ee7dcac 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
@@ -8,7 +8,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK: store double 0.000000e+00, double addrspace(1)* %a,
; CHECK: load double
; CHECK: store double 0.000000e+00, double addrspace(1)* %a.idx.1
-define void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
+define amdgpu_kernel void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
entry:
%a.idx.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
%c.idx.1 = getelementptr inbounds double, double addrspace(1)* %c, i64 1
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
index fd0aaa615db0..0fcdc7b9083a 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
@@ -1,8 +1,9 @@
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-4 -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT4,ALIGNED,ALL %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8 -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ALIGNED,ALL %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefix=ELT8-UNALIGNED -check-prefix=ALL %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16 -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ALIGNED,ALL %s
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefix=ELT16-UNALIGNED -check-prefix=ALL %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-4,-unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT4,ELT4-ALIGNED,ALIGNED,ALL %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,-unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ELT8-ALIGNED,ALIGNED,ALL %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,-unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ELT16-ALIGNED,ALIGNED,ALL %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-4,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT4,ELT4-UNALIGNED,UNALIGNED,ALL %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ELT8-UNALIGNED,UNALIGNED,ALL %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ELT16-UNALIGNED,UNALIGNED,ALL %s
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
@@ -16,7 +17,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; ELT8-UNALIGNED: store <2 x i32>
; ELT16-UNALIGNED: store <4 x i32>
-define void @merge_private_store_4_vector_elts_loads_v4i32(i32* %out) #0 {
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32(i32* %out) #0 {
%out.gep.1 = getelementptr i32, i32* %out, i32 1
%out.gep.2 = getelementptr i32, i32* %out, i32 2
%out.gep.3 = getelementptr i32, i32* %out, i32 3
@@ -28,9 +29,63 @@ define void @merge_private_store_4_vector_elts_loads_v4i32(i32* %out) #0 {
ret void
}
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align1(
+; ALIGNED: store i32 9, i32* %out, align 1
+; ALIGNED: store i32 1, i32* %out.gep.1, align 1
+; ALIGNED: store i32 23, i32* %out.gep.2, align 1
+; ALIGNED: store i32 19, i32* %out.gep.3, align 1
+
+; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32>* %1, align 1
+
+; ELT8-UNALIGNED: store <2 x i32> <i32 9, i32 1>, <2 x i32>* %1, align 1
+; ELT8-UNALIGNED: store <2 x i32> <i32 23, i32 19>, <2 x i32>* %2, align 1
+
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1(i32* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32* %out, i32 3
+
+ store i32 9, i32* %out, align 1
+ store i32 1, i32* %out.gep.1, align 1
+ store i32 23, i32* %out.gep.2, align 1
+ store i32 19, i32* %out.gep.3, align 1
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align2(
+; ALIGNED: store i32 9, i32* %out, align 2
+; ALIGNED: store i32 1, i32* %out.gep.1, align 2
+; ALIGNED: store i32 23, i32* %out.gep.2, align 2
+; ALIGNED: store i32 19, i32* %out.gep.3, align 2
+
+; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32>* %1, align 2
+
+; ELT8-UNALIGNED: store <2 x i32>
+; ELT8-UNALIGNED: store <2 x i32>
+
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(i32* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32* %out, i32 3
+
+ store i32 9, i32* %out, align 2
+ store i32 1, i32* %out.gep.1, align 2
+ store i32 23, i32* %out.gep.2, align 2
+ store i32 19, i32* %out.gep.3, align 2
+ ret void
+}
+
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i8(
; ALL: store <4 x i8>
-define void @merge_private_store_4_vector_elts_loads_v4i8(i8* %out) #0 {
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8(i8* %out) #0 {
%out.gep.1 = getelementptr i8, i8* %out, i32 1
%out.gep.2 = getelementptr i8, i8* %out, i32 2
%out.gep.3 = getelementptr i8, i8* %out, i32 3
@@ -42,9 +97,28 @@ define void @merge_private_store_4_vector_elts_loads_v4i8(i8* %out) #0 {
ret void
}
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i8_align1(
+; ALIGNED: store i8
+; ALIGNED: store i8
+; ALIGNED: store i8
+; ALIGNED: store i8
+
+; UNALIGNED: store <4 x i8> <i8 9, i8 1, i8 23, i8 19>, <4 x i8>* %1, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8_align1(i8* %out) #0 {
+ %out.gep.1 = getelementptr i8, i8* %out, i32 1
+ %out.gep.2 = getelementptr i8, i8* %out, i32 2
+ %out.gep.3 = getelementptr i8, i8* %out, i32 3
+
+ store i8 9, i8* %out, align 1
+ store i8 1, i8* %out.gep.1, align 1
+ store i8 23, i8* %out.gep.2, align 1
+ store i8 19, i8* %out.gep.3, align 1
+ ret void
+}
+
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16(
; ALL: store <2 x i16>
-define void @merge_private_store_4_vector_elts_loads_v2i16(i16* %out) #0 {
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16* %out) #0 {
%out.gep.1 = getelementptr i16, i16* %out, i32 1
store i16 9, i16* %out, align 4
@@ -52,4 +126,106 @@ define void @merge_private_store_4_vector_elts_loads_v2i16(i16* %out) #0 {
ret void
}
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align2(
+; ALIGNED: store i16
+; ALIGNED: store i16
+
+; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16>* %1, align 2
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(i16* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16* %out, i32 1
+
+ store i16 9, i16* %out, align 2
+ store i16 12, i16* %out.gep.1, align 2
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align1(
+; ALIGNED: store i16
+; ALIGNED: store i16
+
+; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16>* %1, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align1(i16* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16* %out, i32 1
+
+ store i16 9, i16* %out, align 1
+ store i16 12, i16* %out.gep.1, align 1
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align8(
+; ALL: store <2 x i16> <i16 9, i16 12>, <2 x i16>* %1, align 8
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align8(i16* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16* %out, i32 1
+
+ store i16 9, i16* %out, align 8
+ store i16 12, i16* %out.gep.1, align 2
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_3_vector_elts_loads_v4i32
+; ELT4: store i32
+; ELT4: store i32
+; ELT4: store i32
+
+; ELT8-ALIGNED: store i32
+; ELT8-ALIGNED: store i32
+; ELT8-ALIGNED: store i32
+
+; ELT8-UNALIGNED: store <2 x i32>
+; ELT8-UNALIGNED: store i32
+
+; ELT16-ALIGNED: store i32
+; ELT16-ALIGNED: store i32
+; ELT16-ALIGNED: store i32
+
+; ELT16-UNALIGNED: store <3 x i32>
+define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32(i32* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32* %out, i32 2
+
+ store i32 9, i32* %out
+ store i32 1, i32* %out.gep.1
+ store i32 23, i32* %out.gep.2
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_3_vector_elts_loads_v4i32_align1(
+; ALIGNED: store i32
+; ALIGNED: store i32
+; ALIGNED: store i32
+
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+; ELT4-UNALIGNED: store i32
+
+; ELT8-UNALIGNED: store <2 x i32>
+; ELT8-UNALIGNED: store i32
+
+; ELT16-UNALIGNED: store <3 x i32>
+define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32_align1(i32* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32* %out, i32 2
+
+ store i32 9, i32* %out, align 1
+ store i32 1, i32* %out.gep.1, align 1
+ store i32 23, i32* %out.gep.2, align 1
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_3_vector_elts_loads_v4i8_align1(
+; ALIGNED: store i8
+; ALIGNED: store i8
+; ALIGNED: store i8
+
+; UNALIGNED: store <3 x i8>
+define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i8_align1(i8* %out) #0 {
+ %out.gep.1 = getelementptr i8, i8* %out, i8 1
+ %out.gep.2 = getelementptr i8, i8* %out, i8 2
+
+ store i8 9, i8* %out, align 1
+ store i8 1, i8* %out.gep.1, align 1
+ store i8 23, i8* %out.gep.2, align 1
+ ret void
+}
+
attributes #0 = { nounwind }
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
index d32387fa2c06..dbb7068eeae0 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK-LABEL: @merge_global_store_2_constants_i8(
; CHECK: store <2 x i8> <i8 -56, i8 123>, <2 x i8> addrspace(1)* %{{[0-9]+}}, align 2
-define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i8 123, i8 addrspace(1)* %out.gep.1
@@ -20,7 +20,7 @@ define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_2_constants_i8_natural_align
; CHECK: store <2 x i8>
-define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i8 123, i8 addrspace(1)* %out.gep.1
@@ -30,7 +30,7 @@ define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %o
; CHECK-LABEL: @merge_global_store_2_constants_i16
; CHECK: store <2 x i16> <i16 456, i16 123>, <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 123, i16 addrspace(1)* %out.gep.1
@@ -40,7 +40,7 @@ define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_2_constants_0_i16
; CHECK: store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 0, i16 addrspace(1)* %out.gep.1
@@ -50,7 +50,7 @@ define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_2_constants_i16_natural_align
; CHECK: store <2 x i16>
-define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 123, i16 addrspace(1)* %out.gep.1
@@ -60,7 +60,7 @@ define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)*
; CHECK-LABEL: @merge_global_store_2_constants_half_natural_align
; CHECK: store <2 x half>
-define void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
store half 2.0, half addrspace(1)* %out.gep.1
@@ -70,7 +70,7 @@ define void @merge_global_store_2_constants_half_natural_align(half addrspace(1)
; CHECK-LABEL: @merge_global_store_2_constants_i32
; CHECK: store <2 x i32> <i32 456, i32 123>, <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
@@ -80,7 +80,7 @@ define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_2_constants_i32_f32
; CHECK: store <2 x i32> <i32 456, i32 1065353216>, <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.1.bc = bitcast i32 addrspace(1)* %out.gep.1 to float addrspace(1)*
store float 1.0, float addrspace(1)* %out.gep.1.bc
@@ -90,7 +90,7 @@ define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_2_constants_f32_i32
; CHECK store <2 x float> <float 4.000000e+00, float 0x370EC00000000000>, <2 x float> addrspace(1)* %{{[0-9]+$}}
-define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)*
store i32 123, i32 addrspace(1)* %out.gep.1.bc
@@ -100,7 +100,7 @@ define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0
; CHECK-LABEL: @merge_global_store_4_constants_i32
; CHECK: store <4 x i32> <i32 1234, i32 123, i32 456, i32 333>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -114,7 +114,7 @@ define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_4_constants_f32_order
; CHECK: store <4 x float> <float 8.000000e+00, float 1.000000e+00, float 2.000000e+00, float 4.000000e+00>, <4 x float> addrspace(1)* %{{[0-9]+}}
-define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -129,7 +129,7 @@ define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out)
; First store is out of order.
; CHECK-LABEL: @merge_global_store_4_constants_f32
; CHECK: store <4 x float> <float 8.000000e+00, float 1.000000e+00, float 2.000000e+00, float 4.000000e+00>, <4 x float> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -143,7 +143,7 @@ define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_4_constants_mixed_i32_f32
; CHECK: store <4 x i32> <i32 1090519040, i32 11, i32 1073741824, i32 17>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -160,7 +160,7 @@ define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %o
; CHECK-LABEL: @merge_global_store_3_constants_i32
; CHECK: store <3 x i32> <i32 1234, i32 123, i32 456>, <3 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
@@ -172,7 +172,7 @@ define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_2_constants_i64
; CHECK: store <2 x i64> <i64 456, i64 123>, <2 x i64> addrspace(1)* %{{[0-9]+}}, align 8
-define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
store i64 123, i64 addrspace(1)* %out.gep.1
@@ -183,7 +183,7 @@ define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_global_store_4_constants_i64
; CHECK: store <2 x i64> <i64 456, i64 333>, <2 x i64> addrspace(1)* %{{[0-9]+}}, align 8
; CHECK: store <2 x i64> <i64 1234, i64 123>, <2 x i64> addrspace(1)* %{{[0-9]+}}, align 8
-define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
%out.gep.2 = getelementptr i64, i64 addrspace(1)* %out, i64 2
%out.gep.3 = getelementptr i64, i64 addrspace(1)* %out, i64 3
@@ -202,7 +202,7 @@ define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
; CHECK: [[INSERT0:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[ELT0]], i32 0
; CHECK: [[INSERT1:%[^ ]+]] = insertelement <2 x i32> [[INSERT0]], i32 [[ELT1]], i32 1
; CHECK: store <2 x i32> [[INSERT1]]
-define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
@@ -220,7 +220,7 @@ define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32
; CHECK: insertelement
; CHECK: insertelement
; CHECK: store <2 x i32>
-define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 2
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 3
@@ -241,7 +241,7 @@ define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(
; CHECK: [[INSERT0:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[ELT1]], i32 0
; CHECK: [[INSERT1:%[^ ]+]] = insertelement <2 x i32> [[INSERT0]], i32 [[ELT0]], i32 1
; CHECK: store <2 x i32> [[INSERT1]]
-define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
@@ -256,7 +256,7 @@ define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %
; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i32
; CHECK: load <4 x i32>
; CHECK: store <4 x i32>
-define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -279,7 +279,7 @@ define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32
; CHECK-LABEL: @merge_global_store_3_adjacent_loads_i32
; CHECK: load <3 x i32>
; CHECK: store <3 x i32>
-define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
@@ -298,7 +298,7 @@ define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32
; CHECK-LABEL: @merge_global_store_4_adjacent_loads_f32
; CHECK: load <4 x float>
; CHECK: store <4 x float>
-define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
@@ -321,7 +321,7 @@ define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, f
; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i32_nonzero_base
; CHECK: load <4 x i32>
; CHECK: store <4 x i32>
-define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 11
%in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 12
%in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 13
@@ -346,7 +346,7 @@ define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(
; CHECK-LABEL: @merge_global_store_4_adjacent_loads_inverse_i32
; CHECK: load <4 x i32>
; CHECK: store <4 x i32>
-define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -373,7 +373,7 @@ define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %
; CHECK-LABEL: @merge_global_store_4_adjacent_loads_shuffle_i32
; CHECK: load <4 x i32>
; CHECK: store <4 x i32>
-define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -408,7 +408,7 @@ define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %
; CHECK: insertelement <4 x i8>
; CHECK: insertelement <4 x i8>
; CHECK: store <4 x i8>
-define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
%out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
%out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
@@ -431,7 +431,7 @@ define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 ad
; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i8_natural_align
; CHECK: load <4 x i8>
; CHECK: store <4 x i8>
-define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
%out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
%out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
@@ -454,7 +454,7 @@ define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1
; CHECK-LABEL: @merge_global_store_4_vector_elts_loads_v4i32
; CHECK: load <4 x i32>
; CHECK: store <4 x i32>
-define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
@@ -474,7 +474,7 @@ define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out
; CHECK-LABEL: @merge_local_store_2_constants_i8
; CHECK: store <2 x i8> <i8 -56, i8 123>, <2 x i8> addrspace(3)* %{{[0-9]+}}, align 2
-define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i8, i8 addrspace(3)* %out, i32 1
store i8 123, i8 addrspace(3)* %out.gep.1
@@ -484,7 +484,7 @@ define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
; CHECK-LABEL: @merge_local_store_2_constants_i32
; CHECK: store <2 x i32> <i32 456, i32 123>, <2 x i32> addrspace(3)* %{{[0-9]+}}, align 4
-define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
store i32 123, i32 addrspace(3)* %out.gep.1
@@ -495,7 +495,7 @@ define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
; CHECK-LABEL: @merge_local_store_2_constants_i32_align_2
; CHECK: store i32
; CHECK: store i32
-define void @merge_local_store_2_constants_i32_align_2(i32 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_2_constants_i32_align_2(i32 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
store i32 123, i32 addrspace(3)* %out.gep.1, align 2
@@ -506,7 +506,7 @@ define void @merge_local_store_2_constants_i32_align_2(i32 addrspace(3)* %out) #
; CHECK-LABEL: @merge_local_store_4_constants_i32
; CHECK: store <2 x i32> <i32 456, i32 333>, <2 x i32> addrspace(3)*
; CHECK: store <2 x i32> <i32 1234, i32 123>, <2 x i32> addrspace(3)*
-define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
+define amdgpu_kernel void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(3)* %out, i32 3
@@ -521,7 +521,7 @@ define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
; CHECK-LABEL: @merge_global_store_5_constants_i32
; CHECK: store <4 x i32> <i32 9, i32 12, i32 16, i32 -12>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
; CHECK: store i32
-define void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
store i32 9, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 12, i32 addrspace(1)* %idx1, align 4
@@ -537,7 +537,7 @@ define void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
; CHECK-LABEL: @merge_global_store_6_constants_i32
; CHECK: store <4 x i32> <i32 13, i32 15, i32 62, i32 63>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
; CHECK: store <2 x i32> <i32 11, i32 123>, <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
store i32 13, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 15, i32 addrspace(1)* %idx1, align 4
@@ -555,7 +555,7 @@ define void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
; CHECK-LABEL: @merge_global_store_7_constants_i32
; CHECK: store <4 x i32> <i32 34, i32 999, i32 65, i32 33>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
; CHECK: store <3 x i32> <i32 98, i32 91, i32 212>, <3 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
store i32 34, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 999, i32 addrspace(1)* %idx1, align 4
@@ -575,7 +575,7 @@ define void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
; CHECK-LABEL: @merge_global_store_8_constants_i32
; CHECK: store <4 x i32> <i32 34, i32 999, i32 65, i32 33>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
; CHECK: store <4 x i32> <i32 98, i32 91, i32 212, i32 999>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
-define void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
store i32 34, i32 addrspace(1)* %out, align 4
%idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 999, i32 addrspace(1)* %idx1, align 4
@@ -597,7 +597,7 @@ define void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
; CHECK-LABEL: @copy_v3i32_align4
; CHECK: %vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4
; CHECK: store <3 x i32> %vec, <3 x i32> addrspace(1)* %out
-define void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 {
%vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4
store <3 x i32> %vec, <3 x i32> addrspace(1)* %out
ret void
@@ -606,7 +606,7 @@ define void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> a
; CHECK-LABEL: @copy_v3i64_align4
; CHECK: %vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4
; CHECK: store <3 x i64> %vec, <3 x i64> addrspace(1)* %out
-define void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> addrspace(1)* noalias %in) #0 {
%vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4
store <3 x i64> %vec, <3 x i64> addrspace(1)* %out
ret void
@@ -615,7 +615,7 @@ define void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> a
; CHECK-LABEL: @copy_v3f32_align4
; CHECK: %vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4
; CHECK: store <3 x float>
-define void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4
%fadd = fadd <3 x float> %vec, <float 1.0, float 2.0, float 4.0>
store <3 x float> %fadd, <3 x float> addrspace(1)* %out
@@ -625,7 +625,7 @@ define void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x floa
; CHECK-LABEL: @copy_v3f64_align4
; CHECK: %vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4
; CHECK: store <3 x double> %fadd, <3 x double> addrspace(1)* %out
-define void @copy_v3f64_align4(<3 x double> addrspace(1)* noalias %out, <3 x double> addrspace(1)* noalias %in) #0 {
+define amdgpu_kernel void @copy_v3f64_align4(<3 x double> addrspace(1)* noalias %out, <3 x double> addrspace(1)* noalias %in) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4
%fadd = fadd <3 x double> %vec, <double 1.0, double 2.0, double 4.0>
store <3 x double> %fadd, <3 x double> addrspace(1)* %out
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
index 8885d61014fc..226147df66a6 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
@@ -5,7 +5,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK-LABEL: @merge_v2i32_v2i32(
; CHECK: load <4 x i32>
; CHECK: store <4 x i32> zeroinitializer
-define void @merge_v2i32_v2i32(<2 x i32> addrspace(1)* nocapture %a, <2 x i32> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2i32_v2i32(<2 x i32> addrspace(1)* nocapture %a, <2 x i32> addrspace(1)* nocapture readonly %b) #0 {
entry:
%a.1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %a, i64 1
%b.1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %b, i64 1
@@ -22,7 +22,7 @@ entry:
; CHECK-LABEL: @merge_v1i32_v1i32(
; CHECK: load <2 x i32>
; CHECK: store <2 x i32> zeroinitializer
-define void @merge_v1i32_v1i32(<1 x i32> addrspace(1)* nocapture %a, <1 x i32> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v1i32_v1i32(<1 x i32> addrspace(1)* nocapture %a, <1 x i32> addrspace(1)* nocapture readonly %b) #0 {
entry:
%a.1 = getelementptr inbounds <1 x i32>, <1 x i32> addrspace(1)* %a, i64 1
%b.1 = getelementptr inbounds <1 x i32>, <1 x i32> addrspace(1)* %b, i64 1
@@ -41,7 +41,7 @@ entry:
; CHECK: load <3 x i32>
; CHECK: store <3 x i32> zeroinitializer
; CHECK: store <3 x i32> zeroinitializer
-define void @no_merge_v3i32_v3i32(<3 x i32> addrspace(1)* nocapture %a, <3 x i32> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @no_merge_v3i32_v3i32(<3 x i32> addrspace(1)* nocapture %a, <3 x i32> addrspace(1)* nocapture readonly %b) #0 {
entry:
%a.1 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %a, i64 1
%b.1 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %b, i64 1
@@ -58,7 +58,7 @@ entry:
; CHECK-LABEL: @merge_v2i16_v2i16(
; CHECK: load <4 x i16>
; CHECK: store <4 x i16> zeroinitializer
-define void @merge_v2i16_v2i16(<2 x i16> addrspace(1)* nocapture %a, <2 x i16> addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2i16_v2i16(<2 x i16> addrspace(1)* nocapture %a, <2 x i16> addrspace(1)* nocapture readonly %b) #0 {
entry:
%a.1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %a, i64 1
%b.1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %b, i64 1
@@ -76,7 +76,7 @@ entry:
; CHECK-LABEL: @merge_load_i32_v2i16(
; CHECK: load i32,
; CHECK: load <2 x i16>
-define void @merge_load_i32_v2i16(i32 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_i32_v2i16(i32 addrspace(1)* nocapture %a) #0 {
entry:
%a.1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 1
%a.1.cast = bitcast i32 addrspace(1)* %a.1 to <2 x i16> addrspace(1)*
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
index ba792f783533..f353106607d6 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
@@ -7,7 +7,7 @@
; CHECK-LABEL: @load_keep_base_alignment_missing_align(
; CHECK: load <2 x float>, <2 x float> addrspace(3)* %{{[0-9]+}}, align 4
-define void @load_keep_base_alignment_missing_align(float addrspace(1)* %out) {
+define amdgpu_kernel void @load_keep_base_alignment_missing_align(float addrspace(1)* %out) {
%ptr0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 11
%val0 = load float, float addrspace(3)* %ptr0
@@ -21,7 +21,7 @@ define void @load_keep_base_alignment_missing_align(float addrspace(1)* %out) {
; CHECK-LABEL: @store_keep_base_alignment_missing_align(
; CHECK: store <2 x float> zeroinitializer, <2 x float> addrspace(3)* %{{[0-9]+}}, align 4
-define void @store_keep_base_alignment_missing_align() {
+define amdgpu_kernel void @store_keep_base_alignment_missing_align() {
%arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 1
%arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 2
store float 0.0, float addrspace(3)* %arrayidx0
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
index 88eca363902f..8a78f3d7e9bc 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
@@ -11,7 +11,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:
; CHECK: store i32 0
; CHECK: store i32 0
-define void @no_crash(i32 %arg) {
+define amdgpu_kernel void @no_crash(i32 %arg) {
%tmp2 = add i32 %arg, 14
%tmp3 = getelementptr [16384 x i32], [16384 x i32] addrspace(3)* @0, i32 0, i32 %tmp2
%tmp4 = add i32 %arg, 15
@@ -37,7 +37,7 @@ define void @no_crash(i32 %arg) {
; CHECK: load i32
; CHECK: load i32
-define void @interleave_get_longest(i32 %arg) {
+define amdgpu_kernel void @interleave_get_longest(i32 %arg) {
%a1 = add i32 %arg, 1
%a2 = add i32 %arg, 2
%a3 = add i32 %arg, 3
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
index 4a429533df02..818189565b4c 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
@@ -5,7 +5,7 @@
; CHECK: store i32
; CHECK: store i32
; CHECK: store i32
-define void @no_implicit_float(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @no_implicit_float(i32 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
%out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
index 141e20a1f83c..28d29f8e8139 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
@@ -3,7 +3,7 @@
; CHECK-LABEL: @optnone(
; CHECK: store i32
; CHECK: store i32
-define void @optnone(i32 addrspace(1)* %out) noinline optnone {
+define amdgpu_kernel void @optnone(i32 addrspace(1)* %out) noinline optnone {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
@@ -13,7 +13,7 @@ define void @optnone(i32 addrspace(1)* %out) noinline optnone {
; CHECK-LABEL: @do_opt(
; CHECK: store <2 x i32>
-define void @do_opt(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @do_opt(i32 addrspace(1)* %out) {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
index 202e988ea5f1..65200b95d5e6 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
@@ -9,7 +9,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; CHECK: inttoptr i64 %{{[^ ]+}} to i8 addrspace(1)*
; CHECK: inttoptr i64 %{{[^ ]+}} to i8 addrspace(1)*
; CHECK: store <2 x i64> zeroinitializer
-define void @merge_v2p1i8(i8 addrspace(1)* addrspace(1)* nocapture %a, i8 addrspace(1)* addrspace(1)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2p1i8(i8 addrspace(1)* addrspace(1)* nocapture %a, i8 addrspace(1)* addrspace(1)* nocapture readonly %b) #0 {
entry:
%a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1
%b.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b, i64 1
@@ -28,7 +28,7 @@ entry:
; CHECK: inttoptr i32 %{{[^ ]+}} to i8 addrspace(3)*
; CHECK: inttoptr i32 %{{[^ ]+}} to i8 addrspace(3)*
; CHECK: store <2 x i32> zeroinitializer
-define void @merge_v2p3i8(i8 addrspace(3)* addrspace(3)* nocapture %a, i8 addrspace(3)* addrspace(3)* nocapture readonly %b) #0 {
+define amdgpu_kernel void @merge_v2p3i8(i8 addrspace(3)* addrspace(3)* nocapture %a, i8 addrspace(3)* addrspace(3)* nocapture readonly %b) #0 {
entry:
%a.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %a, i64 1
%b.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %b, i64 1
@@ -46,7 +46,7 @@ entry:
; CHECK: load <2 x i64>
; CHECK: [[ELT1:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 1
; CHECK: inttoptr i64 [[ELT1]] to i8 addrspace(1)*
-define void @merge_load_i64_ptr64(i64 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_i64_ptr64(i64 addrspace(1)* nocapture %a) #0 {
entry:
%a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
%a.1.cast = bitcast i64 addrspace(1)* %a.1 to i8 addrspace(1)* addrspace(1)*
@@ -61,7 +61,7 @@ entry:
; CHECK: load <2 x i64>
; CHECK: [[ELT0:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 0
; CHECK: inttoptr i64 [[ELT0]] to i8 addrspace(1)*
-define void @merge_load_ptr64_i64(i64 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_ptr64_i64(i64 addrspace(1)* nocapture %a) #0 {
entry:
%a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
%a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
@@ -76,7 +76,7 @@ entry:
; CHECK: [[ELT0:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr0 to i64
; CHECK: insertelement <2 x i64> undef, i64 [[ELT0]], i32 0
; CHECK: store <2 x i64>
-define void @merge_store_ptr64_i64(i64 addrspace(1)* nocapture %a, i8 addrspace(1)* %ptr0, i64 %val1) #0 {
+define amdgpu_kernel void @merge_store_ptr64_i64(i64 addrspace(1)* nocapture %a, i8 addrspace(1)* %ptr0, i64 %val1) #0 {
entry:
%a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
%a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
@@ -92,7 +92,7 @@ entry:
; CHECK: [[ELT1:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr1 to i64
; CHECK: insertelement <2 x i64> %{{[^ ]+}}, i64 [[ELT1]], i32 1
; CHECK: store <2 x i64>
-define void @merge_store_i64_ptr64(i8 addrspace(1)* addrspace(1)* nocapture %a, i64 %val0, i8 addrspace(1)* %ptr1) #0 {
+define amdgpu_kernel void @merge_store_i64_ptr64(i8 addrspace(1)* addrspace(1)* nocapture %a, i64 %val0, i8 addrspace(1)* %ptr1) #0 {
entry:
%a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1
%a.cast = bitcast i8 addrspace(1)* addrspace(1)* %a to i64 addrspace(1)*
@@ -107,7 +107,7 @@ entry:
; CHECK: load <2 x i32>
; CHECK: [[ELT1:%[^ ]+]] = extractelement <2 x i32> %{{[^ ]+}}, i32 1
; CHECK: inttoptr i32 [[ELT1]] to i8 addrspace(3)*
-define void @merge_load_i32_ptr32(i32 addrspace(3)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_i32_ptr32(i32 addrspace(3)* nocapture %a) #0 {
entry:
%a.1 = getelementptr inbounds i32, i32 addrspace(3)* %a, i32 1
%a.1.cast = bitcast i32 addrspace(3)* %a.1 to i8 addrspace(3)* addrspace(3)*
@@ -122,7 +122,7 @@ entry:
; CHECK: load <2 x i32>
; CHECK: [[ELT0:%[^ ]+]] = extractelement <2 x i32> %{{[^ ]+}}, i32 0
; CHECK: inttoptr i32 [[ELT0]] to i8 addrspace(3)*
-define void @merge_load_ptr32_i32(i32 addrspace(3)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_ptr32_i32(i32 addrspace(3)* nocapture %a) #0 {
entry:
%a.cast = bitcast i32 addrspace(3)* %a to i8 addrspace(3)* addrspace(3)*
%a.1 = getelementptr inbounds i32, i32 addrspace(3)* %a, i32 1
@@ -137,7 +137,7 @@ entry:
; CHECK: [[ELT0:%[^ ]+]] = ptrtoint i8 addrspace(3)* %ptr0 to i32
; CHECK: insertelement <2 x i32> undef, i32 [[ELT0]], i32 0
; CHECK: store <2 x i32>
-define void @merge_store_ptr32_i32(i32 addrspace(3)* nocapture %a, i8 addrspace(3)* %ptr0, i32 %val1) #0 {
+define amdgpu_kernel void @merge_store_ptr32_i32(i32 addrspace(3)* nocapture %a, i8 addrspace(3)* %ptr0, i32 %val1) #0 {
entry:
%a.cast = bitcast i32 addrspace(3)* %a to i8 addrspace(3)* addrspace(3)*
%a.1 = getelementptr inbounds i32, i32 addrspace(3)* %a, i32 1
@@ -152,7 +152,7 @@ entry:
; CHECK: [[ELT1:%[^ ]+]] = ptrtoint i8 addrspace(3)* %ptr1 to i32
; CHECK: insertelement <2 x i32> %{{[^ ]+}}, i32 [[ELT1]], i32 1
; CHECK: store <2 x i32>
-define void @merge_store_i32_ptr32(i8 addrspace(3)* addrspace(3)* nocapture %a, i32 %val0, i8 addrspace(3)* %ptr1) #0 {
+define amdgpu_kernel void @merge_store_i32_ptr32(i8 addrspace(3)* addrspace(3)* nocapture %a, i32 %val0, i8 addrspace(3)* %ptr1) #0 {
entry:
%a.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(3)* %a, i32 1
%a.cast = bitcast i8 addrspace(3)* addrspace(3)* %a to i32 addrspace(3)*
@@ -166,7 +166,7 @@ entry:
; CHECK-LABEL: @no_merge_store_ptr32_i64(
; CHECK: store i8 addrspace(3)*
; CHECK: store i64
-define void @no_merge_store_ptr32_i64(i64 addrspace(1)* nocapture %a, i8 addrspace(3)* %ptr0, i64 %val1) #0 {
+define amdgpu_kernel void @no_merge_store_ptr32_i64(i64 addrspace(1)* nocapture %a, i8 addrspace(3)* %ptr0, i64 %val1) #0 {
entry:
%a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(3)* addrspace(1)*
%a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
@@ -181,7 +181,7 @@ entry:
; CHECK-LABEL: @no_merge_store_i64_ptr32(
; CHECK: store i64
; CHECK: store i8 addrspace(3)*
-define void @no_merge_store_i64_ptr32(i8 addrspace(3)* addrspace(1)* nocapture %a, i64 %val0, i8 addrspace(3)* %ptr1) #0 {
+define amdgpu_kernel void @no_merge_store_i64_ptr32(i8 addrspace(3)* addrspace(1)* nocapture %a, i64 %val0, i8 addrspace(3)* %ptr1) #0 {
entry:
%a.1 = getelementptr inbounds i8 addrspace(3)*, i8 addrspace(3)* addrspace(1)* %a, i64 1
%a.cast = bitcast i8 addrspace(3)* addrspace(1)* %a to i64 addrspace(1)*
@@ -195,7 +195,7 @@ entry:
; CHECK-LABEL: @no_merge_load_i64_ptr32(
; CHECK: load i64,
; CHECK: load i8 addrspace(3)*,
-define void @no_merge_load_i64_ptr32(i64 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @no_merge_load_i64_ptr32(i64 addrspace(1)* nocapture %a) #0 {
entry:
%a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
%a.1.cast = bitcast i64 addrspace(1)* %a.1 to i8 addrspace(3)* addrspace(1)*
@@ -209,7 +209,7 @@ entry:
; CHECK-LABEL: @no_merge_load_ptr32_i64(
; CHECK: load i8 addrspace(3)*,
; CHECK: load i64,
-define void @no_merge_load_ptr32_i64(i64 addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @no_merge_load_ptr32_i64(i64 addrspace(1)* nocapture %a) #0 {
entry:
%a.cast = bitcast i64 addrspace(1)* %a to i8 addrspace(3)* addrspace(1)*
%a.1 = getelementptr inbounds i64, i64 addrspace(1)* %a, i64 1
@@ -226,7 +226,7 @@ entry:
; CHECK: load <2 x i8 addrspace(1)*>
; CHECK: store <2 x i8 addrspace(1)*>
; CHECK: store <2 x i8 addrspace(1)*>
-define void @merge_v2p1i8_v2p1i8(<2 x i8 addrspace(1)*> addrspace(1)* nocapture noalias %a, <2 x i8 addrspace(1)*> addrspace(1)* nocapture readonly noalias %b) #0 {
+define amdgpu_kernel void @merge_v2p1i8_v2p1i8(<2 x i8 addrspace(1)*> addrspace(1)* nocapture noalias %a, <2 x i8 addrspace(1)*> addrspace(1)* nocapture readonly noalias %b) #0 {
entry:
%a.1 = getelementptr inbounds <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*> addrspace(1)* %a, i64 1
%b.1 = getelementptr inbounds <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*> addrspace(1)* %b, i64 1
@@ -245,7 +245,7 @@ entry:
; CHECK: [[ELT0_INT:%[^ ]+]] = inttoptr i64 [[ELT0]] to i8 addrspace(1)*
; CHECK: [[ELT1_INT:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 1
; CHECK: bitcast i64 [[ELT1_INT]] to double
-define void @merge_load_ptr64_f64(double addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_ptr64_f64(double addrspace(1)* nocapture %a) #0 {
entry:
%a.cast = bitcast double addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
%a.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
@@ -262,7 +262,7 @@ entry:
; CHECK: bitcast i64 [[ELT0]] to double
; CHECK: [[ELT1:%[^ ]+]] = extractelement <2 x i64> %{{[^ ]+}}, i32 1
; CHECK: inttoptr i64 [[ELT1]] to i8 addrspace(1)*
-define void @merge_load_f64_ptr64(double addrspace(1)* nocapture %a) #0 {
+define amdgpu_kernel void @merge_load_f64_ptr64(double addrspace(1)* nocapture %a) #0 {
entry:
%a.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
%a.1.cast = bitcast double addrspace(1)* %a.1 to i8 addrspace(1)* addrspace(1)*
@@ -279,7 +279,7 @@ entry:
; CHECK: [[ELT1_INT:%[^ ]+]] = bitcast double %val1 to i64
; CHECK: insertelement <2 x i64> %{{[^ ]+}}, i64 [[ELT1_INT]], i32 1
; CHECK: store <2 x i64>
-define void @merge_store_ptr64_f64(double addrspace(1)* nocapture %a, i8 addrspace(1)* %ptr0, double %val1) #0 {
+define amdgpu_kernel void @merge_store_ptr64_f64(double addrspace(1)* nocapture %a, i8 addrspace(1)* %ptr0, double %val1) #0 {
entry:
%a.cast = bitcast double addrspace(1)* %a to i8 addrspace(1)* addrspace(1)*
%a.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
@@ -296,7 +296,7 @@ entry:
; CHECK: [[ELT1_INT:%[^ ]+]] = ptrtoint i8 addrspace(1)* %ptr1 to i64
; CHECK: insertelement <2 x i64> %{{[^ ]+}}, i64 [[ELT1_INT]], i32 1
; CHECK: store <2 x i64>
-define void @merge_store_f64_ptr64(i8 addrspace(1)* addrspace(1)* nocapture %a, double %val0, i8 addrspace(1)* %ptr1) #0 {
+define amdgpu_kernel void @merge_store_f64_ptr64(i8 addrspace(1)* addrspace(1)* nocapture %a, double %val0, i8 addrspace(1)* %ptr1) #0 {
entry:
%a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1
%a.cast = bitcast i8 addrspace(1)* addrspace(1)* %a to double addrspace(1)*
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
index d70c449e14d7..63e688e63fbb 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
@@ -9,7 +9,7 @@
; CHECK: store <4 x float>
; Function Attrs: nounwind
-define void @store_vectorize_with_alias(i8 addrspace(1)* %a, i8 addrspace(1)* %b) #0 {
+define amdgpu_kernel void @store_vectorize_with_alias(i8 addrspace(1)* %a, i8 addrspace(1)* %b) #0 {
bb:
%tmp = bitcast i8 addrspace(1)* %b to float addrspace(1)*
%tmp1 = load float, float addrspace(1)* %tmp, align 4
diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
index 18f62be27c82..412d2013f6b6 100644
--- a/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
+++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
@@ -16,7 +16,7 @@ declare void @use_v2i9(<2 x i9>)
; CHECK-LABEL: @merge_store_2_constants_i1(
; CHECK: store i1
; CHECK: store i1
-define void @merge_store_2_constants_i1(i1 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_store_2_constants_i1(i1 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i1, i1 addrspace(1)* %out, i32 1
store i1 true, i1 addrspace(1)* %out.gep.1
store i1 false, i1 addrspace(1)* %out
@@ -26,7 +26,7 @@ define void @merge_store_2_constants_i1(i1 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_store_2_constants_i2(
; CHECK: store i2 1
; CHECK: store i2 -1
-define void @merge_store_2_constants_i2(i2 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_store_2_constants_i2(i2 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i2, i2 addrspace(1)* %out, i32 1
store i2 1, i2 addrspace(1)* %out.gep.1
store i2 -1, i2 addrspace(1)* %out
@@ -36,7 +36,7 @@ define void @merge_store_2_constants_i2(i2 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_different_store_sizes_i1_i8(
; CHECK: store i1 true
; CHECK: store i8 123
-define void @merge_different_store_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_different_store_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
%out.i1 = bitcast i8 addrspace(1)* %out to i1 addrspace(1)*
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
store i1 true, i1 addrspace(1)* %out.i1
@@ -47,7 +47,7 @@ define void @merge_different_store_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_different_store_sizes_i8_i1(
; CHECK: store i8 123
; CHECK: store i1 true
-define void @merge_different_store_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_different_store_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
%out.i8 = bitcast i1 addrspace(1)* %out to i8 addrspace(1)*
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out.i8, i32 1
store i8 123, i8 addrspace(1)* %out.gep.1
@@ -58,7 +58,7 @@ define void @merge_different_store_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_store_2_constant_structs(
; CHECK: store %struct.foo
; CHECK: store %struct.foo
-define void @merge_store_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_store_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr %struct.foo, %struct.foo addrspace(1)* %out, i32 1
store %struct.foo { i32 12, i8 3 }, %struct.foo addrspace(1)* %out.gep.1
store %struct.foo { i32 92, i8 9 }, %struct.foo addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @merge_store_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_store_2_constants_v2i2(
; CHECK: store <2 x i2>
; CHECK: store <2 x i2>
-define void @merge_store_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_store_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr <2 x i2>, <2 x i2> addrspace(1)* %out, i32 1
store <2 x i2> <i2 1, i2 -1>, <2 x i2> addrspace(1)* %out.gep.1
store <2 x i2> <i2 -1, i2 1>, <2 x i2> addrspace(1)* %out
@@ -81,7 +81,7 @@ define void @merge_store_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_store_2_constants_v4i2(
; CHECK: store <4 x i2>
; CHECK: store <4 x i2>
-define void @merge_store_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_store_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr <4 x i2>, <4 x i2> addrspace(1)* %out, i32 1
store <4 x i2> <i2 1, i2 -1, i2 1, i2 -1>, <4 x i2> addrspace(1)* %out.gep.1
store <4 x i2> <i2 -1, i2 1, i2 -1, i2 1>, <4 x i2> addrspace(1)* %out
@@ -91,7 +91,7 @@ define void @merge_store_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_load_2_constants_i1(
; CHECK: load i1
; CHECK: load i1
-define void @merge_load_2_constants_i1(i1 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_load_2_constants_i1(i1 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i1, i1 addrspace(1)* %out, i32 1
%x = load i1, i1 addrspace(1)* %out.gep.1
%y = load i1, i1 addrspace(1)* %out
@@ -103,7 +103,7 @@ define void @merge_load_2_constants_i1(i1 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_load_2_constants_i2(
; CHECK: load i2
; CHECK: load i2
-define void @merge_load_2_constants_i2(i2 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_load_2_constants_i2(i2 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i2, i2 addrspace(1)* %out, i32 1
%x = load i2, i2 addrspace(1)* %out.gep.1
%y = load i2, i2 addrspace(1)* %out
@@ -115,7 +115,7 @@ define void @merge_load_2_constants_i2(i2 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_different_load_sizes_i1_i8(
; CHECK: load i1
; CHECK: load i8
-define void @merge_different_load_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_different_load_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
%out.i1 = bitcast i8 addrspace(1)* %out to i1 addrspace(1)*
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
%x = load i1, i1 addrspace(1)* %out.i1
@@ -128,7 +128,7 @@ define void @merge_different_load_sizes_i1_i8(i8 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_different_load_sizes_i8_i1(
; CHECK: load i8
; CHECK: load i1
-define void @merge_different_load_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_different_load_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
%out.i8 = bitcast i1 addrspace(1)* %out to i8 addrspace(1)*
%out.gep.1 = getelementptr i8, i8 addrspace(1)* %out.i8, i32 1
%x = load i8, i8 addrspace(1)* %out.gep.1
@@ -141,7 +141,7 @@ define void @merge_different_load_sizes_i8_i1(i1 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_load_2_constant_structs(
; CHECK: load %struct.foo
; CHECK: load %struct.foo
-define void @merge_load_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_load_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr %struct.foo, %struct.foo addrspace(1)* %out, i32 1
%x = load %struct.foo, %struct.foo addrspace(1)* %out.gep.1
%y = load %struct.foo, %struct.foo addrspace(1)* %out
@@ -153,7 +153,7 @@ define void @merge_load_2_constant_structs(%struct.foo addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_load_2_constants_v2i2(
; CHECK: load <2 x i2>
; CHECK: load <2 x i2>
-define void @merge_load_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_load_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr <2 x i2>, <2 x i2> addrspace(1)* %out, i32 1
%x = load <2 x i2>, <2 x i2> addrspace(1)* %out.gep.1
%y = load <2 x i2>, <2 x i2> addrspace(1)* %out
@@ -165,7 +165,7 @@ define void @merge_load_2_constants_v2i2(<2 x i2> addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_load_2_constants_v4i2(
; CHECK: load <4 x i2>
; CHECK: load <4 x i2>
-define void @merge_load_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_load_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr <4 x i2>, <4 x i2> addrspace(1)* %out, i32 1
%x = load <4 x i2>, <4 x i2> addrspace(1)* %out.gep.1
%y = load <4 x i2>, <4 x i2> addrspace(1)* %out
@@ -177,7 +177,7 @@ define void @merge_load_2_constants_v4i2(<4 x i2> addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_store_2_constants_i9(
; CHECK: store i9 3
; CHECK: store i9 -5
-define void @merge_store_2_constants_i9(i9 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_store_2_constants_i9(i9 addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr i9, i9 addrspace(1)* %out, i32 1
store i9 3, i9 addrspace(1)* %out.gep.1
store i9 -5, i9 addrspace(1)* %out
@@ -187,7 +187,7 @@ define void @merge_store_2_constants_i9(i9 addrspace(1)* %out) #0 {
; CHECK-LABEL: @merge_load_2_constants_v2i9(
; CHECK: load <2 x i9>
; CHECK: load <2 x i9>
-define void @merge_load_2_constants_v2i9(<2 x i9> addrspace(1)* %out) #0 {
+define amdgpu_kernel void @merge_load_2_constants_v2i9(<2 x i9> addrspace(1)* %out) #0 {
%out.gep.1 = getelementptr <2 x i9>, <2 x i9> addrspace(1)* %out, i32 1
%x = load <2 x i9>, <2 x i9> addrspace(1)* %out.gep.1
%y = load <2 x i9>, <2 x i9> addrspace(1)* %out
diff --git a/test/Transforms/LoadStoreVectorizer/X86/load-width.ll b/test/Transforms/LoadStoreVectorizer/X86/load-width.ll
new file mode 100644
index 000000000000..a61b25119a14
--- /dev/null
+++ b/test/Transforms/LoadStoreVectorizer/X86/load-width.ll
@@ -0,0 +1,38 @@
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -mcpu haswell -S -o - %s | FileCheck --check-prefix=CHECK-HSW %s
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -mcpu knl -S -o - %s | FileCheck --check-prefix=CHECK-KNL %s
+
+define <8 x double> @loadwidth_insert_extract(double* %ptr) {
+ %a = bitcast double* %ptr to <2 x double> *
+ %b = getelementptr <2 x double>, <2 x double>* %a, i32 1
+ %c = getelementptr <2 x double>, <2 x double>* %a, i32 2
+ %d = getelementptr <2 x double>, <2 x double>* %a, i32 3
+; CHECK-HSW: load <4 x double>
+; CHECK-HSW: load <4 x double>
+; CHECK-HSW-NOT: load
+; CHECK-KNL: load <8 x double>
+; CHECK-KNL-NOT: load
+ %la = load <2 x double>, <2 x double> *%a
+ %lb = load <2 x double>, <2 x double> *%b
+ %lc = load <2 x double>, <2 x double> *%c
+ %ld = load <2 x double>, <2 x double> *%d
+ ; Scalarize everything - Explicitly not a shufflevector to test this code
+ ; path in the LSV
+ %v1 = extractelement <2 x double> %la, i32 0
+ %v2 = extractelement <2 x double> %la, i32 1
+ %v3 = extractelement <2 x double> %lb, i32 0
+ %v4 = extractelement <2 x double> %lb, i32 1
+ %v5 = extractelement <2 x double> %lc, i32 0
+ %v6 = extractelement <2 x double> %lc, i32 1
+ %v7 = extractelement <2 x double> %ld, i32 0
+ %v8 = extractelement <2 x double> %ld, i32 1
+ ; Make a vector again
+ %i1 = insertelement <8 x double> undef, double %v1, i32 0
+ %i2 = insertelement <8 x double> %i1, double %v2, i32 1
+ %i3 = insertelement <8 x double> %i2, double %v3, i32 2
+ %i4 = insertelement <8 x double> %i3, double %v4, i32 3
+ %i5 = insertelement <8 x double> %i4, double %v5, i32 4
+ %i6 = insertelement <8 x double> %i5, double %v6, i32 5
+ %i7 = insertelement <8 x double> %i6, double %v7, i32 6
+ %i8 = insertelement <8 x double> %i7, double %v8, i32 7
+ ret <8 x double> %i8
+}
diff --git a/test/Transforms/LoopDeletion/invalidation.ll b/test/Transforms/LoopDeletion/invalidation.ll
new file mode 100644
index 000000000000..5564f90e1ea7
--- /dev/null
+++ b/test/Transforms/LoopDeletion/invalidation.ll
@@ -0,0 +1,42 @@
+; Ensure we don't run analyses over loops after they've been deleted. We run
+; one version with a no-op loop pass to make sure that the loop doesn't get
+; simplified away.
+;
+; RUN: opt < %s -passes='require<ivusers>,no-op-loop,require<ivusers>' -S \
+; RUN: | FileCheck %s --check-prefixes=CHECK,BEFORE
+; RUN: opt < %s -passes='require<ivusers>,loop-deletion,require<ivusers>' -S \
+; RUN: | FileCheck %s --check-prefixes=CHECK,AFTER
+
+
+define void @foo(i64 %n, i64 %m) nounwind {
+; CHECK-LABEL: @foo(
+
+entry:
+ br label %bb
+; CHECK: entry:
+; BEFORE-NEXT: br label %bb
+; AFTER-NEXT: br label %return
+
+bb:
+ %x.0 = phi i64 [ 0, %entry ], [ %t0, %bb2 ]
+ %t0 = add i64 %x.0, 1
+ %t1 = icmp slt i64 %x.0, %n
+ br i1 %t1, label %bb2, label %return
+; BEFORE: bb:
+; BEFORE: br i1 {{.*}}, label %bb2, label %return
+; AFTER-NOT: bb:
+; AFTER-NOT: br
+
+bb2:
+ %t2 = icmp slt i64 %x.0, %m
+ br i1 %t1, label %bb, label %return
+; BEFORE: bb2:
+; BEFORE: br i1 {{.*}}, label %bb, label %return
+; AFTER-NOT: bb2:
+; AFTER-NOT: br
+
+return:
+ ret void
+; CHECK: return:
+; CHECK-NEXT: ret void
+}
diff --git a/test/Transforms/LoopDeletion/multiple-exit-conditions.ll b/test/Transforms/LoopDeletion/multiple-exit-conditions.ll
index d7d6badb1650..e7b47211d570 100644
--- a/test/Transforms/LoopDeletion/multiple-exit-conditions.ll
+++ b/test/Transforms/LoopDeletion/multiple-exit-conditions.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -loop-deletion -S | FileCheck %s
-; RUN: opt < %s -passes='require<scalar-evolution>,loop(loop-deletion)' -S | FileCheck %s
+; RUN: opt < %s -passes='loop(loop-deletion)' -S | FileCheck %s
; ScalarEvolution can prove the loop iteration is finite, even though
; it can't represent the exact trip count as an expression. That's
diff --git a/test/Transforms/LoopDeletion/multiple-exits.ll b/test/Transforms/LoopDeletion/multiple-exits.ll
index dcf79057db54..760c3aae4ee7 100644
--- a/test/Transforms/LoopDeletion/multiple-exits.ll
+++ b/test/Transforms/LoopDeletion/multiple-exits.ll
@@ -1,80 +1,138 @@
-; RUN: opt < %s -loop-deletion -S | FileCheck %s
+; Checks whether dead loops with multiple exits can be eliminated.
+; Note that we loop simplify and LCSSA over the test cases to make sure the
+; critical components remain after those passes and are visible to the loop
+; deletion pass.
+;
+; RUN: opt < %s -loop-simplify -lcssa -S | FileCheck %s --check-prefixes=CHECK,BEFORE
+; RUN: opt < %s -loop-deletion -S | FileCheck %s --check-prefixes=CHECK,AFTER
+;
+; RUN: opt < %s -passes=no-op-loop -S | FileCheck %s --check-prefixes=CHECK,BEFORE
+; RUN: opt < %s -passes=loop-deletion -S | FileCheck %s --check-prefixes=CHECK,AFTER
-; Checks whether dead loops with multiple exits can be eliminated
define void @foo(i64 %n, i64 %m) nounwind {
; CHECK-LABEL: @foo(
-; CHECK: entry:
-; CHECK-NEXT: br label %return
-; CHECK: return:
-; CHECK-NEXT: ret void
entry:
br label %bb
+; CHECK: entry:
+; BEFORE-NEXT: br label %bb
+; AFTER-NEXT: br label %return
bb:
%x.0 = phi i64 [ 0, %entry ], [ %t0, %bb2 ]
%t0 = add i64 %x.0, 1
%t1 = icmp slt i64 %x.0, %n
br i1 %t1, label %bb2, label %return
+; BEFORE: bb:
+; BEFORE: br i1 {{.*}}, label %bb2, label %return
+; AFTER-NOT: bb:
+; AFTER-NOT: br
+
bb2:
%t2 = icmp slt i64 %x.0, %m
br i1 %t1, label %bb, label %return
+; BEFORE: bb2:
+; BEFORE: br i1 {{.*}}, label %bb, label %return
+; AFTER-NOT: bb2:
+; AFTER-NOT: br
return:
ret void
+; CHECK: return:
+; CHECK-NEXT: ret void
}
-define i64 @bar(i64 %n, i64 %m) nounwind {
-; CHECK-LABEL: @bar(
-; CHECK: entry:
-; CHECK-NEXT: br label %return
-
-; CHECK: return:
-; CHECK-NEXT: ret i64 10
+define i64 @bar(i64 %n, i64 %m, i64 %maybe_zero) nounwind {
+; CHECK-LABEL: @bar(
entry:
br label %bb
+; CHECK: entry:
+; BEFORE-NEXT: br label %bb
+; AFTER-NEXT: br label %return
bb:
%x.0 = phi i64 [ 0, %entry ], [ %t0, %bb3 ]
%t0 = add i64 %x.0, 1
%t1 = icmp slt i64 %x.0, %n
br i1 %t1, label %bb2, label %return
+; BEFORE: bb:
+; BEFORE: br i1 {{.*}}, label %bb2, label %return
+; AFTER-NOT: bb:
+; AFTER-NOT: br
+
bb2:
%t2 = icmp slt i64 %x.0, %m
+ ; This unused division prevents unifying this loop exit path with others
+ ; because it can be deleted but cannot be hoisted.
+ %unused1 = udiv i64 42, %maybe_zero
br i1 %t2, label %bb3, label %return
+; BEFORE: bb2:
+; BEFORE: br i1 {{.*}}, label %bb3, label %return
+; AFTER-NOT: bb2:
+; AFTER-NOT: br
+
bb3:
%t3 = icmp slt i64 %x.0, %m
+ ; This unused division prevents unifying this loop exit path with others
+ ; because it can be deleted but cannot be hoisted.
+ %unused2 = sdiv i64 42, %maybe_zero
br i1 %t3, label %bb, label %return
+; BEFORE: bb3:
+; BEFORE: br i1 {{.*}}, label %bb, label %return
+; AFTER-NOT: bb3:
+; AFTER-NOT: br
return:
%x.lcssa = phi i64 [ 10, %bb ], [ 10, %bb2 ], [ 10, %bb3 ]
ret i64 %x.lcssa
+; CHECK: return:
+; BEFORE-NEXT: %[[X:.*]] = phi i64 [ 10, %bb ], [ 10, %bb2 ], [ 10, %bb3 ]
+; AFTER-NEXT: %[[X:.*]] = phi i64 [ 10, %entry ]
+; CHECK-NEXT: ret i64 %[[X]]
}
-define i64 @baz(i64 %n, i64 %m) nounwind {
+; This function has a loop which looks like @bar's but that cannot be deleted
+; because which path we exit through determines which value is selected.
+define i64 @baz(i64 %n, i64 %m, i64 %maybe_zero) nounwind {
; CHECK-LABEL: @baz(
-; CHECK: return:
-; CHECK-NEXT: %x.lcssa = phi i64 [ 12, %bb ], [ 10, %bb2 ]
-; CHECK-NEXT: ret i64 %x.lcssa
entry:
br label %bb
+; CHECK: entry:
+; CHECK-NEXT: br label %bb
bb:
%x.0 = phi i64 [ 0, %entry ], [ %t0, %bb3 ]
%t0 = add i64 %x.0, 1
%t1 = icmp slt i64 %x.0, %n
br i1 %t1, label %bb2, label %return
+; CHECK: bb:
+; CHECK: br i1 {{.*}}, label %bb2, label %return
+
bb2:
%t2 = icmp slt i64 %x.0, %m
+ ; This unused division prevents unifying this loop exit path with others
+ ; because it can be deleted but cannot be hoisted.
+ %unused1 = udiv i64 42, %maybe_zero
br i1 %t2, label %bb3, label %return
+; CHECK: bb2:
+; CHECK: br i1 {{.*}}, label %bb3, label %return
+
bb3:
%t3 = icmp slt i64 %x.0, %m
+ ; This unused division prevents unifying this loop exit path with others
+ ; because it can be deleted but cannot be hoisted.
+ %unused2 = sdiv i64 42, %maybe_zero
br i1 %t3, label %bb, label %return
+; CHECK: bb3:
+; CHECK: br i1 {{.*}}, label %bb, label %return
return:
%x.lcssa = phi i64 [ 12, %bb ], [ 10, %bb2 ], [ 10, %bb3 ]
ret i64 %x.lcssa
+; CHECK: return:
+; CHECK-NEXT: %[[X:.*]] = phi i64 [ 12, %bb ], [ 10, %bb2 ], [ 10, %bb3 ]
+; CHECK-NEXT: ret i64 %[[X]]
}
diff --git a/test/Transforms/LoopIdiom/unroll.ll b/test/Transforms/LoopIdiom/unroll.ll
index 0cdfda254d78..5981c3e4492f 100644
--- a/test/Transforms/LoopIdiom/unroll.ll
+++ b/test/Transforms/LoopIdiom/unroll.ll
@@ -1,7 +1,7 @@
; RUN: opt -basicaa -loop-idiom < %s -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-; CHECK @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 2, i32 2, i32 2, i32 2], align 16
+; CHECK: @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 2, i32 2, i32 2, i32 2], align 16
target triple = "x86_64-apple-darwin10.0.0"
diff --git a/test/Transforms/LoopLoadElim/backward.ll b/test/Transforms/LoopLoadElim/backward.ll
index 7c750a51a2a3..c0cec75bdd37 100644
--- a/test/Transforms/LoopLoadElim/backward.ll
+++ b/test/Transforms/LoopLoadElim/backward.ll
@@ -1,4 +1,5 @@
; RUN: opt -loop-load-elim -S < %s | FileCheck %s
+; RUN: opt -passes=loop-load-elim -S < %s | FileCheck %s
; Simple st->ld forwarding derived from a lexical backward dep.
;
diff --git a/test/Transforms/LoopLoadElim/forward.ll b/test/Transforms/LoopLoadElim/forward.ll
index 9a0e03a317c8..0b270cab3edc 100644
--- a/test/Transforms/LoopLoadElim/forward.ll
+++ b/test/Transforms/LoopLoadElim/forward.ll
@@ -1,4 +1,5 @@
; RUN: opt -loop-load-elim -S < %s | FileCheck %s
+; RUN: opt -passes=loop-load-elim -S < %s | FileCheck %s
; Simple st->ld forwarding derived from a lexical forward dep.
;
diff --git a/test/Transforms/LoopPredication/basic.ll b/test/Transforms/LoopPredication/basic.ll
new file mode 100644
index 000000000000..6ce07819cb03
--- /dev/null
+++ b/test/Transforms/LoopPredication/basic.ll
@@ -0,0 +1,571 @@
+; RUN: opt -S -loop-predication < %s 2>&1 | FileCheck %s
+; RUN: opt -S -passes='require<scalar-evolution>,loop(loop-predication)' < %s 2>&1 | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+; CHECK-LABEL: @unsigned_loop_0_to_n_ult_check
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp ult i32 [[max_index]], %length
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %within.bounds = icmp ult i32 %i, %length
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
+; CHECK-LABEL: @unsigned_loop_0_to_n_ugt_check
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp ult i32 [[max_index]], %length
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %within.bounds = icmp ugt i32 %length, %i
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+
+define i32 @two_range_checks(i32* %array.1, i32 %length.1,
+ i32* %array.2, i32 %length.2, i32 %n) {
+; CHECK-LABEL: @two_range_checks
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond_1:[^ ]+]] = icmp ult i32 [[max_index]], %length.{{1|2}}
+; CHECK-NEXT: [[wide_cond_2:[^ ]+]] = icmp ult i32 [[max_index]], %length.{{1|2}}
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: [[wide_cond:[^ ]+]] = and i1 [[wide_cond_1]], [[wide_cond_2]]
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %within.bounds.1 = icmp ult i32 %i, %length.1
+ %within.bounds.2 = icmp ult i32 %i, %length.2
+ %within.bounds = and i1 %within.bounds.1, %within.bounds.2
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
+ %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %loop.acc.1 = add i32 %loop.acc, %array.1.i
+
+ %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
+ %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc.1, %array.2.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @three_range_checks(i32* %array.1, i32 %length.1,
+ i32* %array.2, i32 %length.2,
+ i32* %array.3, i32 %length.3, i32 %n) {
+; CHECK-LABEL: @three_range_checks
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond_1:[^ ]+]] = icmp ult i32 [[max_index]], %length.{{1|2|3}}
+; CHECK-NEXT: [[wide_cond_2:[^ ]+]] = icmp ult i32 [[max_index]], %length.{{1|2|3}}
+; CHECK-NEXT: [[wide_cond_3:[^ ]+]] = icmp ult i32 [[max_index]], %length.{{1|2|3}}
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: [[wide_cond_and:[^ ]+]] = and i1 [[wide_cond_1]], [[wide_cond_2]]
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = and i1 [[wide_cond_and]], [[wide_cond_3]]
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %within.bounds.1 = icmp ult i32 %i, %length.1
+ %within.bounds.2 = icmp ult i32 %i, %length.2
+ %within.bounds.3 = icmp ult i32 %i, %length.3
+ %within.bounds.1.and.2 = and i1 %within.bounds.1, %within.bounds.2
+ %within.bounds = and i1 %within.bounds.1.and.2, %within.bounds.3
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
+ %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %loop.acc.1 = add i32 %loop.acc, %array.1.i
+
+ %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
+ %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %loop.acc.2 = add i32 %loop.acc.1, %array.2.i
+
+ %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
+ %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc.2, %array.3.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @three_guards(i32* %array.1, i32 %length.1,
+ i32* %array.2, i32 %length.2,
+ i32* %array.3, i32 %length.3, i32 %n) {
+; CHECK-LABEL: @three_guards
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond_1:[^ ]+]] = icmp ult i32 [[max_index]], %length.1
+; CHECK-NEXT: [[wide_cond_2:[^ ]+]] = icmp ult i32 [[max_index]], %length.2
+; CHECK-NEXT: [[wide_cond_3:[^ ]+]] = icmp ult i32 [[max_index]], %length.3
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond_1]], i32 9) [ "deopt"() ]
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond_2]], i32 9) [ "deopt"() ]
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond_3]], i32 9) [ "deopt"() ]
+
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+
+ %within.bounds.1 = icmp ult i32 %i, %length.1
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.1, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
+ %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %loop.acc.1 = add i32 %loop.acc, %array.1.i
+
+ %within.bounds.2 = icmp ult i32 %i, %length.2
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.2, i32 9) [ "deopt"() ]
+
+ %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
+ %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %loop.acc.2 = add i32 %loop.acc.1, %array.2.i
+
+ %within.bounds.3 = icmp ult i32 %i, %length.3
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.3, i32 9) [ "deopt"() ]
+
+ %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
+ %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc.2, %array.3.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_start_to_n_sge_0_check(i32* %array, i32 %length, i32 %start, i32 %n) {
+; CHECK-LABEL: @signed_loop_start_to_n_sge_0_check
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp sge i32 %start, 0
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ %start, %loop.preheader ]
+ %within.bounds = icmp sge i32 %i, 0
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nsw i32 %i, 1
+ %continue = icmp slt i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_start_to_n_upper_slt_length_check(i32* %array, i32 %length, i32 %start, i32 %n) {
+; CHECK-LABEL: @signed_loop_start_to_n_upper_slt_length_check
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[start_1:[^ ]+]] = add i32 %start, 1
+; CHECK-NEXT: [[n_sgt_start_1:[^ ]+]] = icmp sgt i32 %n, [[start_1]]
+; CHECK-NEXT: [[smax:[^ ]+]] = select i1 [[n_sgt_start_1]], i32 %n, i32 [[start_1]]
+; CHECK-NEXT: [[max_index:[^ ]+]] = add i32 [[smax]], -1
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp slt i32 [[max_index]], %length
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ %start, %loop.preheader ]
+ %within.bounds = icmp slt i32 %i, %length
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nsw i32 %i, 1
+ %continue = icmp slt i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_start_to_n_both_checks(i32* %array, i32 %length, i32 %start, i32 %n) {
+; CHECK-LABEL: @signed_loop_start_to_n_both_checks
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[lower_check:[^ ]+]] = icmp sge i32 %start, 0
+; CHECK-NEXT: [[start_1:[^ ]+]] = add i32 %start, 1
+; CHECK-NEXT: [[n_sgt_start_1:[^ ]+]] = icmp sgt i32 %n, [[start_1]]
+; CHECK-NEXT: [[smax:[^ ]+]] = select i1 [[n_sgt_start_1]], i32 %n, i32 [[start_1]]
+; CHECK-NEXT: [[max_index:[^ ]+]] = add i32 [[smax]], -1
+; CHECK-NEXT: [[upper_check:[^ ]+]] = icmp slt i32 [[max_index]], %length
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: [[wide_cond:[^ ]+]] = and i1 [[lower_check]], [[upper_check]]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ %start, %loop.preheader ]
+ %within.bounds.1 = icmp slt i32 %i, %length
+ %within.bounds.2 = icmp sge i32 %i, 0
+ %within.bounds = and i1 %within.bounds.1, %within.bounds.2
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nsw i32 %i, 1
+ %continue = icmp slt i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i32 %n, i32 %x) {
+; CHECK-LABEL: @unsigned_loop_0_to_n_unrelated_condition
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp ult i32 [[max_index]], %length
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: %unrelated.cond = icmp ult i32 %x, %length
+; CHECK: [[guard_cond:[^ ]+]] = and i1 %unrelated.cond, [[wide_cond]]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[guard_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %within.bounds = icmp ult i32 %i, %length
+ %unrelated.cond = icmp ult i32 %x, %length
+ %guard.cond = and i1 %within.bounds, %unrelated.cond
+ call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+; Don't change the guard condition if there were no widened subconditions
+define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
+; CHECK-LABEL: @test_no_widened_conditions
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: %unrelated.cond.1 = icmp eq i32 %x1, %i
+; CHECK-NEXT: %unrelated.cond.2 = icmp eq i32 %x2, %i
+; CHECK-NEXT: %unrelated.cond.3 = icmp eq i32 %x3, %i
+; CHECK-NEXT: %unrelated.cond.and.1 = and i1 %unrelated.cond.1, %unrelated.cond.2
+; CHECK-NEXT: %guard.cond = and i1 %unrelated.cond.and.1, %unrelated.cond.3
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %unrelated.cond.1 = icmp eq i32 %x1, %i
+ %unrelated.cond.2 = icmp eq i32 %x2, %i
+ %unrelated.cond.3 = icmp eq i32 %x3, %i
+ %unrelated.cond.and.1 = and i1 %unrelated.cond.1, %unrelated.cond.2
+ %guard.cond = and i1 %unrelated.cond.and.1, %unrelated.cond.3
+
+ call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %start, i32 %n) {
+; CHECK-LABEL: @signed_loop_start_to_n_loop_variant_bound
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: %bound = add i32 %i, %x
+; CHECK-NEXT: %within.bounds = icmp slt i32 %i, %bound
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ %start, %loop.preheader ]
+ %bound = add i32 %i, %x
+ %within.bounds = icmp slt i32 %i, %bound
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nsw i32 %i, 1
+ %continue = icmp slt i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x, i32 %start, i32 %n) {
+; CHECK-LABEL: @signed_loop_start_to_n_non_monotonic_predicate
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: %guard.cond = icmp eq i32 %i, %x
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ %start, %loop.preheader ]
+ %guard.cond = icmp eq i32 %i, %x
+ call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nsw i32 %i, 1
+ %continue = icmp slt i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32 %n) {
+; CHECK-LABEL: @unsigned_loop_0_to_n_hoist_length
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[max_index:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[length:[^ ]+]] = zext i16 %length.i16 to i32
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp ult i32 [[max_index]], [[length]]
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %length = zext i16 %length.i16 to i32
+ %within.bounds = icmp ult i32 %i, %length
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
+define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32 %divider, i32 %n) {
+; CHECK-LABEL: @unsigned_loop_0_to_n_cant_hoist_length
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK-NEXT: %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+; CHECK-NEXT: %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+; CHECK-NEXT: %length.udiv = udiv i32 %length, %divider
+; CHECK-NEXT: %within.bounds = icmp ult i32 %i, %length.udiv
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %length.udiv = udiv i32 %length, %divider
+ %within.bounds = icmp ult i32 %i, %length.udiv
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
diff --git a/test/Transforms/LoopPredication/nested.ll b/test/Transforms/LoopPredication/nested.ll
new file mode 100644
index 000000000000..6b40cde3e575
--- /dev/null
+++ b/test/Transforms/LoopPredication/nested.ll
@@ -0,0 +1,160 @@
+; RUN: opt -S -loop-predication < %s 2>&1 | FileCheck %s
+; RUN: opt -S -passes='require<scalar-evolution>,loop(loop-predication)' < %s 2>&1 | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define i32 @signed_loop_0_to_n_nested_0_to_l_inner_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+; CHECK-LABEL: @signed_loop_0_to_n_nested_0_to_l_inner_index_check
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %outer.loop.preheader
+
+outer.loop.preheader:
+; CHECK: outer.loop.preheader:
+; CHECK: [[iteration_count:[^ ]+]] = add i32 %l, -1
+ br label %outer.loop
+
+outer.loop:
+ %outer.loop.acc = phi i32 [ %outer.loop.acc.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ]
+ %i = phi i32 [ %i.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ]
+ %tmp6 = icmp sle i32 %l, 0
+ br i1 %tmp6, label %outer.loop.inc, label %inner.loop.preheader
+
+inner.loop.preheader:
+; CHECK: inner.loop.preheader:
+; CHECK: [[wide_cond:[^ ]+]] = icmp slt i32 [[iteration_count]], %length
+ br label %inner.loop
+
+inner.loop:
+; CHECK: inner.loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %inner.loop.acc = phi i32 [ %inner.loop.acc.next, %inner.loop ], [ %outer.loop.acc, %inner.loop.preheader ]
+ %j = phi i32 [ %j.next, %inner.loop ], [ 0, %inner.loop.preheader ]
+
+ %within.bounds = icmp slt i32 %j, %length
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %j.i64 = zext i32 %j to i64
+ %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
+ %array.j = load i32, i32* %array.j.ptr, align 4
+ %inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
+
+ %j.next = add nsw i32 %j, 1
+ %inner.continue = icmp slt i32 %j.next, %l
+ br i1 %inner.continue, label %inner.loop, label %outer.loop.inc
+
+outer.loop.inc:
+ %outer.loop.acc.next = phi i32 [ %inner.loop.acc.next, %inner.loop ], [ %outer.loop.acc, %outer.loop ]
+ %i.next = add nsw i32 %i, 1
+ %outer.continue = icmp slt i32 %i.next, %n
+ br i1 %outer.continue, label %outer.loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %outer.loop.acc.next, %outer.loop.inc ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_0_to_n_nested_0_to_l_outer_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+; CHECK-LABEL: @signed_loop_0_to_n_nested_0_to_l_outer_index_check
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %outer.loop.preheader
+
+outer.loop.preheader:
+; CHECK: outer.loop.preheader:
+; CHECK: [[iteration_count:[^ ]+]] = add i32 %n, -1
+; CHECK: [[wide_cond:[^ ]+]] = icmp slt i32 [[iteration_count]], %length
+ br label %outer.loop
+
+outer.loop:
+ %outer.loop.acc = phi i32 [ %outer.loop.acc.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ]
+ %i = phi i32 [ %i.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ]
+ %tmp6 = icmp sle i32 %l, 0
+ br i1 %tmp6, label %outer.loop.inc, label %inner.loop.preheader
+
+inner.loop.preheader:
+ br label %inner.loop
+
+inner.loop:
+; CHECK: inner.loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+
+ %inner.loop.acc = phi i32 [ %inner.loop.acc.next, %inner.loop ], [ %outer.loop.acc, %inner.loop.preheader ]
+ %j = phi i32 [ %j.next, %inner.loop ], [ 0, %inner.loop.preheader ]
+
+ %within.bounds = icmp slt i32 %i, %length
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %inner.loop.acc.next = add i32 %inner.loop.acc, %array.i
+
+ %j.next = add nsw i32 %j, 1
+ %inner.continue = icmp slt i32 %j.next, %l
+ br i1 %inner.continue, label %inner.loop, label %outer.loop.inc
+
+outer.loop.inc:
+ %outer.loop.acc.next = phi i32 [ %inner.loop.acc.next, %inner.loop ], [ %outer.loop.acc, %outer.loop ]
+ %i.next = add nsw i32 %i, 1
+ %outer.continue = icmp slt i32 %i.next, %n
+ br i1 %outer.continue, label %outer.loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %outer.loop.acc.next, %outer.loop.inc ]
+ ret i32 %result
+}
+
+define i32 @signed_loop_0_to_n_nested_i_to_l_inner_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+; CHECK-LABEL: @signed_loop_0_to_n_nested_i_to_l_inner_index_check
+entry:
+ %tmp5 = icmp sle i32 %n, 0
+ br i1 %tmp5, label %exit, label %outer.loop.preheader
+
+outer.loop.preheader:
+ br label %outer.loop
+
+outer.loop:
+; CHECK: outer.loop:
+; CHECK: [[i_1:[^ ]+]] = add i32 %i, 1
+; CHECK-NEXT: [[l_sgt_i_1:[^ ]+]] = icmp sgt i32 %l, [[i_1]]
+; CHECK-NEXT: [[smax:[^ ]+]] = select i1 [[l_sgt_i_1]], i32 %l, i32 [[i_1]]
+; CHECK-NEXT: [[max_j:[^ ]+]] = add i32 [[smax]], -1
+ %outer.loop.acc = phi i32 [ %outer.loop.acc.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ]
+ %i = phi i32 [ %i.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ]
+ %tmp6 = icmp sle i32 %l, 0
+ br i1 %tmp6, label %outer.loop.inc, label %inner.loop.preheader
+
+inner.loop.preheader:
+; CHECK: inner.loop.preheader:
+; CHECK: [[wide_cond:[^ ]+]] = icmp slt i32 [[max_j]], %length
+ br label %inner.loop
+
+inner.loop:
+; CHECK: inner.loop:
+; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 [[wide_cond]], i32 9) [ "deopt"() ]
+ %inner.loop.acc = phi i32 [ %inner.loop.acc.next, %inner.loop ], [ %outer.loop.acc, %inner.loop.preheader ]
+ %j = phi i32 [ %j.next, %inner.loop ], [ %i, %inner.loop.preheader ]
+
+ %within.bounds = icmp slt i32 %j, %length
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %j.i64 = zext i32 %j to i64
+ %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
+ %array.j = load i32, i32* %array.j.ptr, align 4
+ %inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
+
+ %j.next = add nsw i32 %j, 1
+ %inner.continue = icmp slt i32 %j.next, %l
+ br i1 %inner.continue, label %inner.loop, label %outer.loop.inc
+
+outer.loop.inc:
+ %outer.loop.acc.next = phi i32 [ %inner.loop.acc.next, %inner.loop ], [ %outer.loop.acc, %outer.loop ]
+ %i.next = add nsw i32 %i, 1
+ %outer.continue = icmp slt i32 %i.next, %n
+ br i1 %outer.continue, label %outer.loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %outer.loop.acc.next, %outer.loop.inc ]
+ ret i32 %result
+} \ No newline at end of file
diff --git a/test/Transforms/LoopPredication/visited.ll b/test/Transforms/LoopPredication/visited.ll
new file mode 100644
index 000000000000..e9aae77f8e6f
--- /dev/null
+++ b/test/Transforms/LoopPredication/visited.ll
@@ -0,0 +1,140 @@
+; RUN: opt -S -loop-predication < %s 2>&1 | FileCheck %s
+; RUN: opt -S -passes='require<scalar-evolution>,loop(loop-predication)' < %s 2>&1 | FileCheck %s
+
+declare void @llvm.experimental.guard(i1, ...)
+
+define i32 @test_visited(i32* %array, i32 %length, i32 %n, i32 %x) {
+; CHECK-LABEL: @test_visited
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+; CHECK: loop.preheader:
+; CHECK: [[iteration_count:[^ ]+]] = add i32 %n, -1
+; CHECK-NEXT: [[wide_cond:[^ ]+]] = icmp ult i32 [[iteration_count]], %length
+; CHECK-NEXT: br label %loop
+ br label %loop
+
+loop:
+; CHECK: loop:
+; CHECK: %unrelated.cond = icmp eq i32 %x, %i
+; CHECK: [[guard_cond:[^ ]+]] = and i1 %unrelated.cond, [[wide_cond]]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[guard_cond]], i32 9) [ "deopt"() ]
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %within.bounds = icmp ult i32 %i, %length
+ %unrelated.cond = icmp eq i32 %x, %i
+ %guard.cond.2 = and i1 %within.bounds, %unrelated.cond
+ %guard.cond.3 = and i1 %guard.cond.2, %unrelated.cond
+ %guard.cond.4 = and i1 %guard.cond.3, %guard.cond.2
+ %guard.cond.5 = and i1 %guard.cond.4, %guard.cond.3
+ %guard.cond.6 = and i1 %guard.cond.5, %guard.cond.4
+ %guard.cond.7 = and i1 %guard.cond.6, %guard.cond.5
+ %guard.cond.8 = and i1 %guard.cond.7, %guard.cond.6
+ %guard.cond.9 = and i1 %guard.cond.8, %guard.cond.7
+ %guard.cond.10 = and i1 %guard.cond.9, %guard.cond.8
+ %guard.cond.11 = and i1 %guard.cond.10, %guard.cond.9
+ %guard.cond.12 = and i1 %guard.cond.11, %guard.cond.10
+ %guard.cond.13 = and i1 %guard.cond.12, %guard.cond.11
+ %guard.cond.14 = and i1 %guard.cond.13, %guard.cond.12
+ %guard.cond.15 = and i1 %guard.cond.14, %guard.cond.13
+ %guard.cond.16 = and i1 %guard.cond.15, %guard.cond.14
+ %guard.cond.17 = and i1 %guard.cond.16, %guard.cond.15
+ %guard.cond.18 = and i1 %guard.cond.17, %guard.cond.16
+ %guard.cond.19 = and i1 %guard.cond.18, %guard.cond.17
+ %guard.cond.20 = and i1 %guard.cond.19, %guard.cond.18
+ %guard.cond.21 = and i1 %guard.cond.20, %guard.cond.19
+ %guard.cond.22 = and i1 %guard.cond.21, %guard.cond.20
+ %guard.cond.23 = and i1 %guard.cond.22, %guard.cond.21
+ %guard.cond.24 = and i1 %guard.cond.23, %guard.cond.22
+ %guard.cond.25 = and i1 %guard.cond.24, %guard.cond.23
+ %guard.cond.26 = and i1 %guard.cond.25, %guard.cond.24
+ %guard.cond.27 = and i1 %guard.cond.26, %guard.cond.25
+ %guard.cond.28 = and i1 %guard.cond.27, %guard.cond.26
+ %guard.cond.29 = and i1 %guard.cond.28, %guard.cond.27
+ %guard.cond.30 = and i1 %guard.cond.29, %guard.cond.28
+ %guard.cond.31 = and i1 %guard.cond.30, %guard.cond.29
+ %guard.cond.32 = and i1 %guard.cond.31, %guard.cond.30
+ %guard.cond.33 = and i1 %guard.cond.32, %guard.cond.31
+ %guard.cond.34 = and i1 %guard.cond.33, %guard.cond.32
+ %guard.cond.35 = and i1 %guard.cond.34, %guard.cond.33
+ %guard.cond.36 = and i1 %guard.cond.35, %guard.cond.34
+ %guard.cond.37 = and i1 %guard.cond.36, %guard.cond.35
+ %guard.cond.38 = and i1 %guard.cond.37, %guard.cond.36
+ %guard.cond.39 = and i1 %guard.cond.38, %guard.cond.37
+ %guard.cond.40 = and i1 %guard.cond.39, %guard.cond.38
+ %guard.cond.41 = and i1 %guard.cond.40, %guard.cond.39
+ %guard.cond.42 = and i1 %guard.cond.41, %guard.cond.40
+ %guard.cond.43 = and i1 %guard.cond.42, %guard.cond.41
+ %guard.cond.44 = and i1 %guard.cond.43, %guard.cond.42
+ %guard.cond.45 = and i1 %guard.cond.44, %guard.cond.43
+ %guard.cond.46 = and i1 %guard.cond.45, %guard.cond.44
+ %guard.cond.47 = and i1 %guard.cond.46, %guard.cond.45
+ %guard.cond.48 = and i1 %guard.cond.47, %guard.cond.46
+ %guard.cond.49 = and i1 %guard.cond.48, %guard.cond.47
+ %guard.cond.50 = and i1 %guard.cond.49, %guard.cond.48
+ %guard.cond.51 = and i1 %guard.cond.50, %guard.cond.49
+ %guard.cond.52 = and i1 %guard.cond.51, %guard.cond.50
+ %guard.cond.53 = and i1 %guard.cond.52, %guard.cond.51
+ %guard.cond.54 = and i1 %guard.cond.53, %guard.cond.52
+ %guard.cond.55 = and i1 %guard.cond.54, %guard.cond.53
+ %guard.cond.56 = and i1 %guard.cond.55, %guard.cond.54
+ %guard.cond.57 = and i1 %guard.cond.56, %guard.cond.55
+ %guard.cond.58 = and i1 %guard.cond.57, %guard.cond.56
+ %guard.cond.59 = and i1 %guard.cond.58, %guard.cond.57
+ %guard.cond.60 = and i1 %guard.cond.59, %guard.cond.58
+ %guard.cond.61 = and i1 %guard.cond.60, %guard.cond.59
+ %guard.cond.62 = and i1 %guard.cond.61, %guard.cond.60
+ %guard.cond.63 = and i1 %guard.cond.62, %guard.cond.61
+ %guard.cond.64 = and i1 %guard.cond.63, %guard.cond.62
+ %guard.cond.65 = and i1 %guard.cond.64, %guard.cond.63
+ %guard.cond.66 = and i1 %guard.cond.65, %guard.cond.64
+ %guard.cond.67 = and i1 %guard.cond.66, %guard.cond.65
+ %guard.cond.68 = and i1 %guard.cond.67, %guard.cond.66
+ %guard.cond.69 = and i1 %guard.cond.68, %guard.cond.67
+ %guard.cond.70 = and i1 %guard.cond.69, %guard.cond.68
+ %guard.cond.71 = and i1 %guard.cond.70, %guard.cond.69
+ %guard.cond.72 = and i1 %guard.cond.71, %guard.cond.70
+ %guard.cond.73 = and i1 %guard.cond.72, %guard.cond.71
+ %guard.cond.74 = and i1 %guard.cond.73, %guard.cond.72
+ %guard.cond.75 = and i1 %guard.cond.74, %guard.cond.73
+ %guard.cond.76 = and i1 %guard.cond.75, %guard.cond.74
+ %guard.cond.77 = and i1 %guard.cond.76, %guard.cond.75
+ %guard.cond.78 = and i1 %guard.cond.77, %guard.cond.76
+ %guard.cond.79 = and i1 %guard.cond.78, %guard.cond.77
+ %guard.cond.80 = and i1 %guard.cond.79, %guard.cond.78
+ %guard.cond.81 = and i1 %guard.cond.80, %guard.cond.79
+ %guard.cond.82 = and i1 %guard.cond.81, %guard.cond.80
+ %guard.cond.83 = and i1 %guard.cond.82, %guard.cond.81
+ %guard.cond.84 = and i1 %guard.cond.83, %guard.cond.82
+ %guard.cond.85 = and i1 %guard.cond.84, %guard.cond.83
+ %guard.cond.86 = and i1 %guard.cond.85, %guard.cond.84
+ %guard.cond.87 = and i1 %guard.cond.86, %guard.cond.85
+ %guard.cond.88 = and i1 %guard.cond.87, %guard.cond.86
+ %guard.cond.89 = and i1 %guard.cond.88, %guard.cond.87
+ %guard.cond.90 = and i1 %guard.cond.89, %guard.cond.88
+ %guard.cond.91 = and i1 %guard.cond.90, %guard.cond.89
+ %guard.cond.92 = and i1 %guard.cond.91, %guard.cond.90
+ %guard.cond.93 = and i1 %guard.cond.92, %guard.cond.91
+ %guard.cond.94 = and i1 %guard.cond.93, %guard.cond.92
+ %guard.cond.95 = and i1 %guard.cond.94, %guard.cond.93
+ %guard.cond.96 = and i1 %guard.cond.95, %guard.cond.94
+ %guard.cond.97 = and i1 %guard.cond.96, %guard.cond.95
+ %guard.cond.98 = and i1 %guard.cond.97, %guard.cond.96
+ %guard.cond.99 = and i1 %guard.cond.98, %guard.cond.97
+ call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond.99, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+} \ No newline at end of file
diff --git a/test/Transforms/LoopRotate/phi-dbgvalue.ll b/test/Transforms/LoopRotate/phi-dbgvalue.ll
new file mode 100644
index 000000000000..aa8ca2f627bd
--- /dev/null
+++ b/test/Transforms/LoopRotate/phi-dbgvalue.ll
@@ -0,0 +1,79 @@
+; RUN: opt -S -loop-rotate < %s | FileCheck %s
+
+;CHECK-LABEL: func
+;CHECK-LABEL: entry
+;CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 %a
+;CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !13, metadata !11), !dbg !15
+;CHECK-LABEL: for.body:
+;CHECK-NEXT: [[I:%.*]] = phi i32 [ 1, %entry ], [ %inc, %for.body ]
+;CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 [[I]], i64 0, metadata !13, metadata !11), !dbg !15
+
+; Function Attrs: noinline nounwind
+define void @func(i32 %a) local_unnamed_addr #0 !dbg !6 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !10, metadata !11), !dbg !12
+ tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !13, metadata !11), !dbg !15
+ br label %for.cond, !dbg !16
+
+for.cond: ; preds = %for.body, %entry
+ %i.0 = phi i32 [ 1, %entry ], [ %inc, %for.body ]
+ tail call void @llvm.dbg.value(metadata i32 %i.0, i64 0, metadata !13, metadata !11), !dbg !15
+ %cmp = icmp slt i32 %i.0, 10, !dbg !17
+ br i1 %cmp, label %for.body, label %for.end, !dbg !20
+
+for.body: ; preds = %for.cond
+ %add = add nsw i32 %i.0, %a, !dbg !22
+ %call = tail call i32 @func2(i32 %i.0, i32 %add) #3, !dbg !24
+ %inc = add nsw i32 %i.0, 1, !dbg !25
+ tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !13, metadata !11), !dbg !15
+ br label %for.cond, !dbg !27, !llvm.loop !28
+
+for.end: ; preds = %for.cond
+ ret void, !dbg !31
+}
+
+declare i32 @func2(i32, i32) local_unnamed_addr
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2
+
+attributes #0 = { noinline nounwind }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 0f3ed908c1f13f83da4b240f7595eb8d05e0a754) (http://llvm.org/git/llvm.git 8e270f5a6b8ceb0f3ac3ef1ffb83c5e29b44ae68)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "debug-phi.c", directory: "/work/projects/src/tests/debug")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 5.0.0 (http://llvm.org/git/clang.git 0f3ed908c1f13f83da4b240f7595eb8d05e0a754) (http://llvm.org/git/llvm.git 8e270f5a6b8ceb0f3ac3ef1ffb83c5e29b44ae68)"}
+!6 = distinct !DISubprogram(name: "func", scope: !1, file: !1, line: 2, type: !7, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{null, !9}
+!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!10 = !DILocalVariable(name: "a", arg: 1, scope: !6, file: !1, line: 2, type: !9)
+!11 = !DIExpression()
+!12 = !DILocation(line: 2, column: 15, scope: !6)
+!13 = !DILocalVariable(name: "i", scope: !14, file: !1, line: 3, type: !9)
+!14 = distinct !DILexicalBlock(scope: !6, file: !1, line: 3, column: 3)
+!15 = !DILocation(line: 3, column: 11, scope: !14)
+!16 = !DILocation(line: 3, column: 7, scope: !14)
+!17 = !DILocation(line: 3, column: 20, scope: !18)
+!18 = !DILexicalBlockFile(scope: !19, file: !1, discriminator: 1)
+!19 = distinct !DILexicalBlock(scope: !14, file: !1, line: 3, column: 3)
+!20 = !DILocation(line: 3, column: 3, scope: !21)
+!21 = !DILexicalBlockFile(scope: !14, file: !1, discriminator: 1)
+!22 = !DILocation(line: 4, column: 15, scope: !23)
+!23 = distinct !DILexicalBlock(scope: !19, file: !1, line: 3, column: 31)
+!24 = !DILocation(line: 4, column: 5, scope: !23)
+!25 = !DILocation(line: 3, column: 27, scope: !26)
+!26 = !DILexicalBlockFile(scope: !19, file: !1, discriminator: 2)
+!27 = !DILocation(line: 3, column: 3, scope: !26)
+!28 = distinct !{!28, !29, !30}
+!29 = !DILocation(line: 3, column: 3, scope: !14)
+!30 = !DILocation(line: 5, column: 3, scope: !14)
+!31 = !DILocation(line: 6, column: 1, scope: !6)
diff --git a/test/Transforms/LoopSimplify/dbg-loc.ll b/test/Transforms/LoopSimplify/dbg-loc.ll
index 702a1ad16af6..98bfefd12238 100644
--- a/test/Transforms/LoopSimplify/dbg-loc.ll
+++ b/test/Transforms/LoopSimplify/dbg-loc.ll
@@ -23,6 +23,7 @@ entry:
for.body: ; preds = %entry, %length.exit
%begin.sink5 = phi %"Length"* [ %incdec.ptr, %length.exit ], [ %begin, %entry ]
+ tail call void @llvm.dbg.value(metadata %"Length"* %begin.sink5, i64 0, metadata !15, metadata !16), !dbg !17
%m_type.i.i.i = getelementptr inbounds %"Length", %"Length"* %begin.sink5, i64 0, i32 2, !dbg !9
%0 = load i8, i8* %m_type.i.i.i, align 1, !dbg !9
%cmp.i.i = icmp eq i8 %0, 9, !dbg !7
@@ -68,6 +69,9 @@ eh.resume: ; preds = %catch
resume { i8*, i32 } undef, !dbg !13
}
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
; CHECK-DAG: [[PREHEADER_LOC]] = !DILocation(line: 73, column: 27, scope: !{{[0-9]+}})
; CHECK-DAG: [[LOOPEXIT_LOC]] = !DILocation(line: 75, column: 9, scope: !{{[0-9]+}})
; CHECK-DAG: [[LPAD_PREHEADER_LOC]] = !DILocation(line: 85, column: 1, scope: !{{[0-9]+}})
@@ -93,3 +97,6 @@ eh.resume: ; preds = %catch
file: !5,
isOptimized: true, flags: "-O2",
splitDebugFilename: "abc.debug", emissionKind: 2)
+!15 = !DILocalVariable(name: "begin", arg: 1, scope: !6, file: !5, line: 71)
+!16 = !DIExpression()
+!17 = !DILocation(line: 71, column: 32, scope: !6)
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
new file mode 100644
index 000000000000..054c61d18795
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
@@ -0,0 +1,87 @@
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+; Make sure the pointer / address space of AtomicRMW is considered
+
+; OPT-LABEL: @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(
+
+; OPT-NOT: getelementptr
+
+; OPT: .lr.ph:
+; OPT: %lsr.iv2 = phi i32 addrspace(3)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
+; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv2, i32 16383
+; OPT: %tmp4 = atomicrmw add i32 addrspace(3)* %scevgep4, i32 undef seq_cst
+; OPT: %tmp7 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 undef seq_cst
+; OPT: %0 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 %tmp8 seq_cst
+; OPT: br i1 %exitcond
+define amdgpu_kernel void @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+bb:
+ %tmp = icmp sgt i32 %n, 0
+ br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
+
+.lr.ph.preheader: ; preds = %bb
+ br label %.lr.ph
+
+._crit_edge.loopexit: ; preds = %.lr.ph
+ br label %._crit_edge
+
+._crit_edge: ; preds = %._crit_edge.loopexit, %bb
+ ret void
+
+.lr.ph: ; preds = %.lr.ph, %.lr.ph.preheader
+ %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
+ %tmp1 = add nuw nsw i32 %indvars.iv, 16383
+ %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
+ %tmp4 = atomicrmw add i32 addrspace(3)* %tmp3, i32 undef seq_cst
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
+ %tmp7 = atomicrmw add i32 addrspace(3)* %tmp6, i32 undef seq_cst
+ %tmp8 = add nsw i32 %tmp7, %tmp4
+ atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+ %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
+ %exitcond = icmp eq i32 %indvars.iv.next, %n
+ br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
+}
+
+; OPT-LABEL: test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(
+; OPT-NOT: getelementptr
+
+; OPT: .lr.ph:
+; OPT: %lsr.iv2 = phi i32 addrspace(3)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
+; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv2, i32 16383
+; OPT: %tmp4 = cmpxchg i32 addrspace(3)* %scevgep4, i32 undef, i32 undef seq_cst monotonic
+define amdgpu_kernel void @test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+bb:
+ %tmp = icmp sgt i32 %n, 0
+ br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
+
+.lr.ph.preheader: ; preds = %bb
+ br label %.lr.ph
+
+._crit_edge.loopexit: ; preds = %.lr.ph
+ br label %._crit_edge
+
+._crit_edge: ; preds = %._crit_edge.loopexit, %bb
+ ret void
+
+.lr.ph: ; preds = %.lr.ph, %.lr.ph.preheader
+ %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
+ %tmp1 = add nuw nsw i32 %indvars.iv, 16383
+ %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
+ %tmp4 = cmpxchg i32 addrspace(3)* %tmp3, i32 undef, i32 undef seq_cst monotonic
+ %tmp4.0 = extractvalue { i32, i1 } %tmp4, 0
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
+ %tmp7 = cmpxchg i32 addrspace(3)* %tmp6, i32 undef, i32 undef seq_cst monotonic
+ %tmp7.0 = extractvalue { i32, i1 } %tmp7, 0
+ %tmp8 = add nsw i32 %tmp7.0, %tmp4.0
+ atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+ %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
+ %exitcond = icmp eq i32 %indvars.iv.next, %n
+ br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
+}
+
+attributes #0 = { nounwind } \ No newline at end of file
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
index bf61112a3c3e..c5ea1b915d91 100644
--- a/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; OPT: %lsr.iv2 = phi i8 addrspace(1)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(1)* %lsr.iv2, i64 4095
; OPT: load i8, i8 addrspace(1)* %scevgep4, align 1
-define void @test_global_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -48,7 +48,7 @@ bb:
; OPT: {{^}}.lr.ph:
; OPT: %lsr.iv3 = phi i8 addrspace(1)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(1)* %lsr.iv3, i64 1
-define void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -83,7 +83,7 @@ bb:
; OPT: %lsr.iv2 = phi i8 addrspace(3)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(3)* %lsr.iv2, i32 65535
; OPT: %tmp4 = load i8, i8 addrspace(3)* %scevgep4, align 1
-define void @test_local_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -122,7 +122,7 @@ bb:
; OPT: {{^}}.lr.ph:
; OPT: %lsr.iv3 = phi i8 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(3)* %lsr.iv3, i32 1
-define void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll
new file mode 100644
index 000000000000..02c3c05e7945
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "amdgcn--"
+
+; We need to compile this for a target where we have different address spaces,
+; and where pointers in those address spaces have different size.
+; E.g. for amdgcn-- pointers in address space 0 are 32 bits and pointers in
+; address space 1 are 64 bits.
+
+; We shouldn't crash. Check that we get a loop with the two stores.
+;CHECK-LABEL: foo:
+;CHECK: [[LOOP_LABEL:BB[0-9]+_[0-9]+]]:
+;CHECK: buffer_store_dword
+;CHECK: buffer_store_dword
+;CHECK: s_branch [[LOOP_LABEL]]
+
+define amdgpu_kernel void @foo() {
+entry:
+ br label %loop
+
+loop:
+ %idx0 = phi i32 [ %next_idx0, %loop ], [ 0, %entry ]
+ %0 = getelementptr inbounds i32, i32* null, i32 %idx0
+ %1 = getelementptr inbounds i32, i32 addrspace(1)* null, i32 %idx0
+ store i32 1, i32* %0
+ store i32 7, i32 addrspace(1)* %1
+ %next_idx0 = add nuw nsw i32 %idx0, 1
+ br label %loop
+}
+
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
index 8c83df5843d2..67b1926bdf27 100644
--- a/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
@@ -16,7 +16,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK: bb:
; CHECK: inttoptr i32 %lsr.iv.next2 to i8 addrspace(3)*
; CHECK: %c1 = icmp ne i8 addrspace(3)*
-define void @local_cmp_user(i32 %arg0) nounwind {
+define amdgpu_kernel void @local_cmp_user(i32 %arg0) nounwind {
entry:
br label %bb11
@@ -47,7 +47,7 @@ bb13:
; CHECK: bb:
; CHECK: inttoptr i64 %lsr.iv.next2 to i8 addrspace(1)*
; CHECK: icmp ne i8 addrspace(1)* %t
-define void @global_cmp_user(i64 %arg0) nounwind {
+define amdgpu_kernel void @global_cmp_user(i64 %arg0) nounwind {
entry:
br label %bb11
@@ -78,7 +78,7 @@ bb13:
; CHECK: bb:
; CHECK: %idxprom = sext i32 %lsr.iv1 to i64
; CHECK: getelementptr i8, i8 addrspace(1)* %t, i64 %idxprom
-define void @global_gep_user(i32 %arg0) nounwind {
+define amdgpu_kernel void @global_gep_user(i32 %arg0) nounwind {
entry:
br label %bb11
@@ -108,7 +108,7 @@ bb13:
; CHECK: bb
; CHECK: %p = getelementptr i8, i8 addrspace(1)* %t, i64 %ii.ext
-define void @global_sext_scale_user(i32 %arg0) nounwind {
+define amdgpu_kernel void @global_sext_scale_user(i32 %arg0) nounwind {
entry:
br label %bb11
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
index b3b696d42c59..9eba0c3051dc 100644
--- a/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
@@ -14,7 +14,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK: %scevgep = getelementptr i32, i32 addrspace(3)* %tmp1, i32 4
; CHECK:%tmp14 = load i32, i32 addrspace(3)* %scevgep
-define void @lsr_crash_preserve_addrspace_unknown_type() #0 {
+define amdgpu_kernel void @lsr_crash_preserve_addrspace_unknown_type() #0 {
bb:
br label %bb1
diff --git a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
index 788842101080..a9d1e8758766 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -1,5 +1,4 @@
; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
-; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 -addr-sink-using-gep=1 %s -o - | FileCheck %s -check-prefix=A9
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/canonical.ll b/test/Transforms/LoopStrengthReduce/X86/canonical.ll
new file mode 100644
index 000000000000..2dafbb408aad
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/canonical.ll
@@ -0,0 +1,65 @@
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu -loop-reduce -S < %s | FileCheck %s
+; Check LSR formula canonicalization will put loop invariant regs before
+; induction variable of current loop, so exprs involving loop invariant regs
+; can be promoted outside of current loop.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @foo(i32 %size, i32 %nsteps, i8* nocapture %maxarray, i8* nocapture readnone %buffer, i32 %init) local_unnamed_addr #0 {
+entry:
+ %cmp25 = icmp sgt i32 %nsteps, 0
+ br i1 %cmp25, label %for.cond1.preheader.lr.ph, label %for.end12
+
+for.cond1.preheader.lr.ph: ; preds = %entry
+ %cmp223 = icmp sgt i32 %size, 1
+ %t0 = sext i32 %init to i64
+ %wide.trip.count = zext i32 %size to i64
+ %wide.trip.count31 = zext i32 %nsteps to i64
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc10, %for.cond1.preheader.lr.ph
+ %indvars.iv28 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next29, %for.inc10 ]
+ br i1 %cmp223, label %for.body3.lr.ph, label %for.inc10
+
+for.body3.lr.ph: ; preds = %for.cond1.preheader
+ %t1 = add nsw i64 %indvars.iv28, %t0
+ %t2 = trunc i64 %indvars.iv28 to i8
+ br label %for.body3
+
+; Make sure loop invariant items are grouped together so that load address can
+; be represented in one getelementptr.
+; CHECK-LABEL: for.body3:
+; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ 1, %for.body3.lr.ph ], [ {{.*}}, %for.body3 ]
+; CHECK-NOT: = phi i64
+; CHECK-NEXT: [[LOADADDR:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK-NEXT: = load i8, i8* [[LOADADDR]], align 1
+; CHECK: br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
+
+for.body3: ; preds = %for.body3, %for.body3.lr.ph
+ %indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
+ %t5 = trunc i64 %indvars.iv to i8
+ %t3 = add nsw i64 %t1, %indvars.iv
+ %arrayidx = getelementptr inbounds i8, i8* %maxarray, i64 %t3
+ %t4 = load i8, i8* %arrayidx, align 1
+ %add5 = add i8 %t4, %t5
+ %add6 = add i8 %add5, %t2
+ %arrayidx9 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
+ store i8 %add6, i8* %arrayidx9, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
+
+for.inc10.loopexit: ; preds = %for.body3
+ br label %for.inc10
+
+for.inc10: ; preds = %for.inc10.loopexit, %for.cond1.preheader
+ %indvars.iv.next29 = add nuw nsw i64 %indvars.iv28, 1
+ %exitcond32 = icmp eq i64 %indvars.iv.next29, %wide.trip.count31
+ br i1 %exitcond32, label %for.end12.loopexit, label %for.cond1.preheader
+
+for.end12.loopexit: ; preds = %for.inc10
+ br label %for.end12
+
+for.end12: ; preds = %for.end12.loopexit, %entry
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/incorrect-offset-scaling.ll b/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll
index 7ffb0936d105..3adb8bcf514d 100644
--- a/test/Analysis/ScalarEvolution/incorrect-offset-scaling.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll
@@ -36,13 +36,13 @@ ib: ; preds = %if6
%r4 = mul i64 %r3, %r0
%r5 = add i64 %r2, %r4
%r6 = icmp ult i64 %r5, undef
-; CHECK %2 = mul i64 %lsr.iv, %r3
-; CHECK %3 = add i64 %1, -1
-; CHECK %4 = add i64 %0, %r3
-; CHECK %r6
+; CHECK: [[MUL1:%[0-9]+]] = mul i64 %lsr.iv, %r3
+; CHECK: [[ADD1:%[0-9]+]] = add i64 [[MUL1]], -1
+; CHECK: add i64 %{{.}}, [[ADD1]]
+; CHECK: %r6
%r7 = getelementptr i64, i64* undef, i64 %r5
store i64 1, i64* %r7, align 8
-; CHECK %5 = mul i64 %lsr.iv, %r3
-; CHECK %6 = add i64 %5, -1
+; CHECK: [[MUL2:%[0-9]+]] = mul i64 %lsr.iv, %r3
+; CHECK: [[ADD2:%[0-9]+]] = add i64 [[MUL2]], -1
br label %L
}
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index ab7d4f1baa81..fb63b66137f3 100644
--- a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -1,7 +1,5 @@
; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
new file mode 100644
index 000000000000..4888536bdf81
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT test checks that LSR optimize compare for static counter to compare with 0.
+
+; BOTH: for.body:
+; INSN: icmp eq i64 %lsr.iv.next, 0
+; REGS: icmp eq i64 %indvars.iv.next, 1024
+
+; LLC test checks that LSR optimize compare for static counter.
+; That means that instead of creating the following:
+; movl %ecx, (%rdx,%rax,4)
+; incq %rax
+; cmpq $1024, %rax
+; LSR should optimize out cmp:
+; movl %ecx, 4096(%rdx,%rax)
+; addq $4, %rax
+; or
+; movl %ecx, 4096(%rdx,%rax,4)
+; incq %rax
+
+; CHECK: LBB0_1:
+; CHECK-NEXT: movl 4096(%{{.+}},[[REG:%[0-9a-z]+]]
+; CHECK-NEXT: addl 4096(%{{.+}},[[REG]]
+; CHECK-NEXT: movl %{{.+}}, 4096(%{{.+}},[[REG]]
+; CHECK-NOT: cmp
+; CHECK: jne
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
new file mode 100644
index 000000000000..3273cb4e6b5b
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT checks that LSR prefers less instructions to less registers.
+; For x86 LSR should prefer complicated address to new lsr induction
+; variables.
+
+; BOTH: for.body:
+; INSN: getelementptr i32, i32* %x, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %y, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %q, i64 %indvars.iv
+; REGS %lsr.iv4 = phi
+; REGS %lsr.iv2 = phi
+; REGS %lsr.iv1 = phi
+; REGS: getelementptr i32, i32* %lsr.iv1, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv2, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv4, i64 1
+
+; LLC checks that LSR prefers less instructions to less registers.
+; LSR should prefer complicated address to additonal add instructions.
+
+; CHECK: LBB0_2:
+; CHECK-NEXT: movl (%r{{.+}},
+; CHECK-NEXT: addl (%r{{.+}},
+; CHECK-NEXT: movl %e{{.+}}, (%r{{.+}},
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q, i32 %n) {
+entry:
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body, %for.body.preheader
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll b/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll
new file mode 100644
index 000000000000..b563eb3ad994
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll
@@ -0,0 +1,65 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+; Check when we use an outerloop induction variable inside of an innerloop
+; induction value expr, LSR can still choose to use single induction variable
+; for the innerloop and share it in multiple induction value exprs.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i32 %size, i32 %nsteps, i32 %hsize, i32* %lined, i8* %maxarray) {
+entry:
+ %cmp215 = icmp sgt i32 %size, 1
+ %t0 = zext i32 %size to i64
+ %t1 = sext i32 %nsteps to i64
+ %sub2 = sub i64 %t0, 2
+ br label %for.body
+
+for.body: ; preds = %for.inc, %entry
+ %indvars.iv2 = phi i64 [ %indvars.iv.next3, %for.inc ], [ 0, %entry ]
+ %t2 = mul nsw i64 %indvars.iv2, %t0
+ br i1 %cmp215, label %for.body2.preheader, label %for.inc
+
+for.body2.preheader: ; preds = %for.body
+ br label %for.body2
+
+; Check LSR only generates one induction variable for for.body2 and the induction
+; variable will be shared by multiple array accesses.
+; CHECK: for.body2:
+; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ %lsr.iv.next, %for.body2 ], [ 0, %for.body2.preheader ]
+; CHECK-NOT: = phi i64 [ {{.*}}, %for.body2 ], [ {{.*}}, %for.body2.preheader ]
+; CHECK: [[SCEVGEP1:%[^,]+]] = getelementptr i8, i8* %maxarray, i64 [[LSR]]
+; CHECK: [[SCEVGEP2:%[^,]+]] = getelementptr i8, i8* [[SCEVGEP1]], i64 1
+; CHECK: {{.*}} = load i8, i8* [[SCEVGEP2]], align 1
+; CHECK: [[SCEVGEP3:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK: {{.*}} = load i8, i8* [[SCEVGEP3]], align 1
+; CHECK: [[SCEVGEP4:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK: store i8 {{.*}}, i8* [[SCEVGEP4]], align 1
+; CHECK: br i1 %exitcond, label %for.body2, label %for.inc.loopexit
+
+for.body2: ; preds = %for.body2.preheader, %for.body2
+ %indvars.iv = phi i64 [ 1, %for.body2.preheader ], [ %indvars.iv.next, %for.body2 ]
+ %arrayidx1 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
+ %v1 = load i8, i8* %arrayidx1, align 1
+ %idx2 = add nsw i64 %indvars.iv, %sub2
+ %arrayidx2 = getelementptr inbounds i8, i8* %maxarray, i64 %idx2
+ %v2 = load i8, i8* %arrayidx2, align 1
+ %tmpv = xor i8 %v1, %v2
+ %t4 = add nsw i64 %t2, %indvars.iv
+ %add.ptr = getelementptr inbounds i8, i8* %maxarray, i64 %t4
+ store i8 %tmpv, i8* %add.ptr, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %wide.trip.count = zext i32 %size to i64
+ %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.body2, label %for.inc.loopexit
+
+for.inc.loopexit: ; preds = %for.body2
+ br label %for.inc
+
+for.inc: ; preds = %for.inc.loopexit, %for.body
+ %indvars.iv.next3 = add nuw nsw i64 %indvars.iv2, 1
+ %cmp = icmp slt i64 %indvars.iv.next3, %t1
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.inc
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll b/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll
new file mode 100644
index 000000000000..a69d6adc0f03
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll
@@ -0,0 +1,97 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+; We find it is very bad to allow LSR formula containing SCEVAddRecExpr Reg
+; from siblings of current loop. When one loop is LSR optimized, it can
+; insert lsr.iv for other sibling loops, which sometimes leads to many extra
+; lsr.iv inserted for loops.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@cond = common local_unnamed_addr global i64 0, align 8
+
+; Check there is no extra lsr.iv generated in foo.
+; CHECK-LABEL: @foo(
+; CHECK-NOT: lsr.iv{{[0-9]+}} =
+;
+define void @foo(i64 %N) local_unnamed_addr {
+entry:
+ br label %do.body
+
+do.body: ; preds = %do.body, %entry
+ %i.0 = phi i64 [ 0, %entry ], [ %inc, %do.body ]
+ tail call void @goo(i64 %i.0, i64 %i.0)
+ %inc = add nuw nsw i64 %i.0, 1
+ %t0 = load i64, i64* @cond, align 8
+ %tobool = icmp eq i64 %t0, 0
+ br i1 %tobool, label %do.body2.preheader, label %do.body
+
+do.body2.preheader: ; preds = %do.body
+ br label %do.body2
+
+do.body2: ; preds = %do.body2.preheader, %do.body2
+ %i.1 = phi i64 [ %inc3, %do.body2 ], [ 0, %do.body2.preheader ]
+ %j.1 = phi i64 [ %inc4, %do.body2 ], [ %inc, %do.body2.preheader ]
+ tail call void @goo(i64 %i.1, i64 %j.1)
+ %inc3 = add nuw nsw i64 %i.1, 1
+ %inc4 = add nsw i64 %j.1, 1
+ %t1 = load i64, i64* @cond, align 8
+ %tobool6 = icmp eq i64 %t1, 0
+ br i1 %tobool6, label %do.body8.preheader, label %do.body2
+
+do.body8.preheader: ; preds = %do.body2
+ br label %do.body8
+
+do.body8: ; preds = %do.body8.preheader, %do.body8
+ %i.2 = phi i64 [ %inc9, %do.body8 ], [ 0, %do.body8.preheader ]
+ %j.2 = phi i64 [ %inc10, %do.body8 ], [ %inc4, %do.body8.preheader ]
+ tail call void @goo(i64 %i.2, i64 %j.2)
+ %inc9 = add nuw nsw i64 %i.2, 1
+ %inc10 = add nsw i64 %j.2, 1
+ %t2 = load i64, i64* @cond, align 8
+ %tobool12 = icmp eq i64 %t2, 0
+ br i1 %tobool12, label %do.body14.preheader, label %do.body8
+
+do.body14.preheader: ; preds = %do.body8
+ br label %do.body14
+
+do.body14: ; preds = %do.body14.preheader, %do.body14
+ %i.3 = phi i64 [ %inc15, %do.body14 ], [ 0, %do.body14.preheader ]
+ %j.3 = phi i64 [ %inc16, %do.body14 ], [ %inc10, %do.body14.preheader ]
+ tail call void @goo(i64 %i.3, i64 %j.3)
+ %inc15 = add nuw nsw i64 %i.3, 1
+ %inc16 = add nsw i64 %j.3, 1
+ %t3 = load i64, i64* @cond, align 8
+ %tobool18 = icmp eq i64 %t3, 0
+ br i1 %tobool18, label %do.body20.preheader, label %do.body14
+
+do.body20.preheader: ; preds = %do.body14
+ br label %do.body20
+
+do.body20: ; preds = %do.body20.preheader, %do.body20
+ %i.4 = phi i64 [ %inc21, %do.body20 ], [ 0, %do.body20.preheader ]
+ %j.4 = phi i64 [ %inc22, %do.body20 ], [ %inc16, %do.body20.preheader ]
+ tail call void @goo(i64 %i.4, i64 %j.4)
+ %inc21 = add nuw nsw i64 %i.4, 1
+ %inc22 = add nsw i64 %j.4, 1
+ %t4 = load i64, i64* @cond, align 8
+ %tobool24 = icmp eq i64 %t4, 0
+ br i1 %tobool24, label %do.body26.preheader, label %do.body20
+
+do.body26.preheader: ; preds = %do.body20
+ br label %do.body26
+
+do.body26: ; preds = %do.body26.preheader, %do.body26
+ %i.5 = phi i64 [ %inc27, %do.body26 ], [ 0, %do.body26.preheader ]
+ %j.5 = phi i64 [ %inc28, %do.body26 ], [ %inc22, %do.body26.preheader ]
+ tail call void @goo(i64 %i.5, i64 %j.5)
+ %inc27 = add nuw nsw i64 %i.5, 1
+ %inc28 = add nsw i64 %j.5, 1
+ %t5 = load i64, i64* @cond, align 8
+ %tobool30 = icmp eq i64 %t5, 0
+ br i1 %tobool30, label %do.end31, label %do.body26
+
+do.end31: ; preds = %do.body26
+ ret void
+}
+
+declare void @goo(i64, i64) local_unnamed_addr
+
diff --git a/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll b/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll
index e732ddc2bc84..ca8cc32469d8 100644
--- a/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll
+++ b/test/Transforms/LoopUnroll/AMDGPU/unroll-barrier.ll
@@ -6,7 +6,7 @@
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK-NOT: br
-define void @test_unroll_convergent_barrier(i32 addrspace(1)* noalias nocapture %out, i32 addrspace(1)* noalias nocapture %in) #0 {
+define amdgpu_kernel void @test_unroll_convergent_barrier(i32 addrspace(1)* noalias nocapture %out, i32 addrspace(1)* noalias nocapture %in) #0 {
entry:
br label %for.body
diff --git a/test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll b/test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll
new file mode 100644
index 000000000000..e986c3dc2a28
--- /dev/null
+++ b/test/Transforms/LoopUnroll/AMDGPU/unroll-for-private.ll
@@ -0,0 +1,154 @@
+; RUN: opt -mtriple=amdgcn-unknown-amdhsa -loop-unroll -S -amdgpu-unroll-threshold-private=20000 %s | FileCheck %s
+
+; Check that we full unroll loop to be able to eliminate alloca
+; CHECK-LABEL: @non_invariant_ind
+; CHECK: for.body:
+; CHECK-NOT: br
+; CHECK: store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+; CHECK: ret void
+
+define amdgpu_kernel void @non_invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
+entry:
+ %arr = alloca [64 x i32], align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ %arrayidx5 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %x
+ %tmp15 = load i32, i32* %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %idxprom = sext i32 %i.015 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %add = add nsw i32 %i.015, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx3 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %rem
+ store i32 %tmp16, i32* %arrayidx3, align 4
+ %inc = add nuw nsw i32 %i.015, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; Check that we unroll inner loop but not outer
+; CHECK-LABEL: @invariant_ind
+; CHECK: %[[exitcond:[^ ]+]] = icmp eq i32 %{{.*}}, 32
+; CHECK: br i1 %[[exitcond]]
+; CHECK-NOT: icmp eq i32 %{{.*}}, 100
+
+define amdgpu_kernel void @invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
+entry:
+ %arr = alloca [64 x i32], align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.cond2.preheader
+
+for.cond2.preheader: ; preds = %for.cond.cleanup5, %entry
+ %i.026 = phi i32 [ 0, %entry ], [ %inc10, %for.cond.cleanup5 ]
+ %idxprom = sext i32 %i.026 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp15 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ br label %for.body6
+
+for.cond.cleanup: ; preds = %for.cond.cleanup5
+ %arrayidx13 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %x
+ %tmp16 = load i32, i32* %arrayidx13, align 4
+ %arrayidx15 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp16, i32 addrspace(1)* %arrayidx15, align 4
+ ret void
+
+for.cond.cleanup5: ; preds = %for.body6
+ %inc10 = add nuw nsw i32 %i.026, 1
+ %exitcond27 = icmp eq i32 %inc10, 32
+ br i1 %exitcond27, label %for.cond.cleanup, label %for.cond2.preheader
+
+for.body6: ; preds = %for.body6, %for.cond2.preheader
+ %j.025 = phi i32 [ 0, %for.cond2.preheader ], [ %inc, %for.body6 ]
+ %add = add nsw i32 %j.025, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx8 = getelementptr inbounds [64 x i32], [64 x i32]* %arr, i32 0, i32 %rem
+ store i32 %tmp15, i32* %arrayidx8, align 4
+ %inc = add nuw nsw i32 %j.025, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup5, label %for.body6
+}
+
+; Check we do not enforce unroll if alloca is too big
+; CHECK-LABEL: @too_big
+; CHECK: for.body:
+; CHECK: icmp eq i32 %{{.*}}, 100
+; CHECK: br
+
+define amdgpu_kernel void @too_big(i32 addrspace(1)* nocapture %a, i32 %x) {
+entry:
+ %arr = alloca [256 x i32], align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ %arrayidx5 = getelementptr inbounds [256 x i32], [256 x i32]* %arr, i32 0, i32 %x
+ %tmp15 = load i32, i32* %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %idxprom = sext i32 %i.015 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %add = add nsw i32 %i.015, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx3 = getelementptr inbounds [256 x i32], [256 x i32]* %arr, i32 0, i32 %rem
+ store i32 %tmp16, i32* %arrayidx3, align 4
+ %inc = add nuw nsw i32 %i.015, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+; Check we do not enforce unroll if alloca is dynamic
+; CHECK-LABEL: @dynamic_size_alloca(
+; CHECK: alloca i32, i32 %n
+; CHECK: for.body:
+; CHECK: icmp eq i32 %{{.*}}, 100
+; CHECK: br
+
+define amdgpu_kernel void @dynamic_size_alloca(i32 addrspace(1)* nocapture %a, i32 %n, i32 %x) {
+entry:
+ %arr = alloca i32, i32 %n, align 4
+ %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ %arrayidx5 = getelementptr inbounds i32, i32* %arr, i32 %x
+ %tmp15 = load i32, i32* %arrayidx5, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
+ store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %idxprom = sext i32 %i.015 to i64
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
+ %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %add = add nsw i32 %i.015, %tmp1
+ %rem = srem i32 %add, 64
+ %arrayidx3 = getelementptr inbounds i32, i32* %arr, i32 %rem
+ store i32 %tmp16, i32* %arrayidx3, align 4
+ %inc = add nuw nsw i32 %i.015, 1
+ %exitcond = icmp eq i32 %inc, 100
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
+
+declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+declare i32 @llvm.amdgcn.workgroup.id.x() #1
+
+declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #1
+
+attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/LoopUnroll/basic.ll b/test/Transforms/LoopUnroll/basic.ll
index 2bfd3e6de8fc..e965f2a19c04 100644
--- a/test/Transforms/LoopUnroll/basic.ll
+++ b/test/Transforms/LoopUnroll/basic.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -loop-unroll -S | FileCheck %s
+; RUN: opt < %s -passes='require<opt-remark-emit>,loop(unroll)' -S | FileCheck %s
; This should not unroll since the address of the loop header is taken.
diff --git a/test/Transforms/LoopUnroll/epilog_const_phi.ll b/test/Transforms/LoopUnroll/epilog_const_phi.ll
new file mode 100644
index 000000000000..22e525760942
--- /dev/null
+++ b/test/Transforms/LoopUnroll/epilog_const_phi.ll
@@ -0,0 +1,65 @@
+; RUN: opt -S -loop-unroll -unroll-runtime < %s | FileCheck %s
+
+; Epilog unroll allows to keep PHI constant value.
+; For the test this means that after unroll XOR could be deleted.
+; Check that we do epilogue reminder here.
+
+; CHECK-LABEL: const_phi_val
+; CHECK: for.body.epil
+
+; Function Attrs: norecurse nounwind uwtable
+define void @const_phi_val(i32 %i0, i32* nocapture %a) {
+entry:
+ %cmp6 = icmp slt i32 %i0, 1000
+ br i1 %cmp6, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ %tmp = sext i32 %i0 to i64
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.preheader
+ %indvars.iv = phi i64 [ %tmp, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %s.08 = phi i32 [ 0, %for.body.preheader ], [ %xor, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ store i32 %s.08, i32* %arrayidx, align 4
+ %xor = xor i32 %s.08, 1
+ %indvars.iv.next = add nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
+
+; When there is no phi with const coming from preheader,
+; there is no need to do epilogue unrolling.
+
+; CHECK-LABEL: var_phi_val
+; CHECK: for.body.prol
+
+; Function Attrs: norecurse nounwind uwtable
+define void @var_phi_val(i32 %i0, i32* nocapture %a) {
+entry:
+ %cmp6 = icmp slt i32 %i0, 1000
+ br i1 %cmp6, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ %tmp = sext i32 %i0 to i64
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.preheader
+ %indvars.iv = phi i64 [ %tmp, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %indvars.iv.next = add nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll b/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll
index e5694fbeb0ce..9bbd21accc8e 100644
--- a/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-bad-cost.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -loop-unroll < %s | FileCheck %s
+; RUN: opt < %s -passes='require<opt-remark-emit>,loop(unroll-full)' -S | FileCheck %s
; LLVM should not try to fully unroll this loop.
diff --git a/test/Transforms/LoopUnroll/full-unroll-crashers.ll b/test/Transforms/LoopUnroll/full-unroll-crashers.ll
index 9f1529139de0..d83e56635e8c 100644
--- a/test/Transforms/LoopUnroll/full-unroll-crashers.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-crashers.ll
@@ -1,5 +1,6 @@
; Check that we don't crash on corner cases.
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=1 -unroll-max-percent-threshold-boost=200 -o /dev/null
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=1 -unroll-max-percent-threshold-boost=200 -o /dev/null
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@known_constant = internal unnamed_addr constant [10 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 16
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics-2.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics-2.ll
index 26124fb32ca2..a143056affe7 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics-2.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics-2.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@unknown_global = internal unnamed_addr global [9 x i32] [i32 0, i32 -1, i32 0, i32 -1, i32 5, i32 -1, i32 0, i32 -1, i32 0], align 16
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics-cmp.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics-cmp.ll
index 8bddb1b225c2..e65f794286ee 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics-cmp.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics-cmp.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@known_constant = internal unnamed_addr constant [10 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 16
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics-dce.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics-dce.ll
index 83c105ca23f5..57de3041f571 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics-dce.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics-dce.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=12 -unroll-max-percent-threshold-boost=400 | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=12 -unroll-max-percent-threshold-boost=400 | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@known_constant = internal unnamed_addr constant [10 x i32] [i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0, i32 0], align 16
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics-geps.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics-geps.ll
index 230912538d23..238869d120ba 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics-geps.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics-geps.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; When examining gep-instructions we shouldn't consider them simplified if the
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics-phi-prop.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics-phi-prop.ll
index a1fab3cc71e1..aa517cb1589d 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics-phi-prop.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics-phi-prop.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=100 -unroll-threshold=10 -unroll-max-percent-threshold-boost=200 | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define i64 @propagate_loop_phis() {
diff --git a/test/Transforms/LoopUnroll/full-unroll-heuristics.ll b/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
index 7189fbb34833..a2fe1f5e3fda 100644
--- a/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
@@ -21,6 +21,15 @@
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=200 | FileCheck %s -check-prefix=TEST2
; RUN: opt < %s -S -loop-unroll -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=100 | FileCheck %s -check-prefix=TEST3
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-max-percent-threshold-boost=100 | FileCheck %s -check-prefix=TEST1
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=200 | FileCheck %s -check-prefix=TEST2
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=100 | FileCheck %s -check-prefix=TEST3
+
+; Check that these work when the unroller has partial unrolling enabled too.
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=10 -unroll-max-percent-threshold-boost=100 | FileCheck %s -check-prefix=TEST1
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=200 | FileCheck %s -check-prefix=TEST2
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-max-iteration-count-to-analyze=1000 -unroll-threshold=20 -unroll-max-percent-threshold-boost=100 | FileCheck %s -check-prefix=TEST3
+
; If the absolute threshold is too low, we should not unroll:
; TEST1: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
@@ -32,6 +41,7 @@
; And check that we don't crash when we're not allowed to do any analysis.
; RUN: opt < %s -loop-unroll -unroll-max-iteration-count-to-analyze=0 -disable-output
+; RUN: opt < %s -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-max-iteration-count-to-analyze=0 -disable-output
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@known_constant = internal unnamed_addr constant [9 x i32] [i32 0, i32 -1, i32 0, i32 -1, i32 5, i32 -1, i32 0, i32 -1, i32 0], align 16
diff --git a/test/Transforms/LoopUnroll/full-unroll-keep-first-exit.ll b/test/Transforms/LoopUnroll/full-unroll-keep-first-exit.ll
index e70ff4156d35..682d1b35c1fa 100644
--- a/test/Transforms/LoopUnroll/full-unroll-keep-first-exit.ll
+++ b/test/Transforms/LoopUnroll/full-unroll-keep-first-exit.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -loop-unroll < %s | FileCheck %s
+; RUN: opt -S -passes='require<opt-remark-emit>,loop(unroll-full)' < %s | FileCheck %s
; Unroll twice, with first loop exit kept
; CHECK-LABEL: @s32_max1
diff --git a/test/Transforms/LoopUnroll/partial-unroll-const-bounds.ll b/test/Transforms/LoopUnroll/partial-unroll-const-bounds.ll
index 49c823a28c7f..8e5a866f8ca7 100644
--- a/test/Transforms/LoopUnroll/partial-unroll-const-bounds.ll
+++ b/test/Transforms/LoopUnroll/partial-unroll-const-bounds.ll
@@ -1,4 +1,8 @@
-; RUN: opt < %s -S -unroll-threshold=20 -loop-unroll -unroll-allow-partial -unroll-runtime -unroll-allow-remainder -unroll-max-percent-threshold-boost=100 | FileCheck %s
+; RUN: opt < %s -S -unroll-partial-threshold=20 -unroll-threshold=20 -loop-unroll -unroll-allow-partial -unroll-runtime -unroll-allow-remainder -unroll-max-percent-threshold-boost=100 | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-partial-threshold=20 -unroll-threshold=20 -unroll-allow-partial -unroll-runtime -unroll-allow-remainder -unroll-max-percent-threshold-boost=100 | FileCheck %s
+;
+; Also check that the simple unroller doesn't allow the partial unrolling.
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll-full)' -unroll-partial-threshold=20 -unroll-threshold=20 -unroll-allow-partial -unroll-runtime -unroll-allow-remainder -unroll-max-percent-threshold-boost=100 | FileCheck %s --check-prefix=CHECK-NO-UNROLL
; The Loop TripCount is 9. However unroll factors 3 or 9 exceed given threshold.
; The test checks that we choose a smaller, power-of-two, unroll count and do not give up on unrolling.
@@ -8,6 +12,10 @@
; CHECK: for.body.1:
; CHECK: store
+; CHECK-NO-UNROLL: for.body:
+; CHECK-NO-UNROLL: store
+; CHECK-NO-UNROLL-NOT: store
+
define void @foo(i32* nocapture %a, i32* nocapture readonly %b) nounwind uwtable {
entry:
br label %for.body
diff --git a/test/Transforms/LoopUnroll/peel-loop-irreducible.ll b/test/Transforms/LoopUnroll/peel-loop-irreducible.ll
new file mode 100644
index 000000000000..32a7a0732e10
--- /dev/null
+++ b/test/Transforms/LoopUnroll/peel-loop-irreducible.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -S -loop-unroll -unroll-force-peel-count=1 | FileCheck %s
+
+; Check we don't peel loops where the latch is not the exiting block.
+; CHECK-LABEL: @invariant_backedge_irreducible
+; CHECK: entry:
+; CHECK: br label %header
+; CHECK-NOT: peel
+; CHECK: header:
+; CHECK: br i1 {{.*}} label %latch, label %exiting
+; CHECK: latch:
+; CHECK: br i1 {{.*}} label %header, label %exiting
+; CHECK: exiting:
+; CHECK: br i1 {{.*}} label %latch, label %exit
+
+define i32 @invariant_backedge_irreducible(i32 %a, i32 %b) {
+entry:
+ br label %header
+
+header:
+ %i = phi i32 [ 0, %entry ], [ %inc, %latch ]
+ %cmp.phi = phi i1 [ false, %entry ], [ %cmp, %latch ]
+ br i1 %cmp.phi, label %latch, label %exiting
+
+latch:
+ %inc = add i32 %i, 1
+ %cmp = icmp slt i32 %i, 1000
+ br i1 %cmp, label %header, label %exiting
+
+exiting:
+ %cmp.exiting = phi i1 [ %cmp.phi, %header ], [ %cmp, %latch ]
+ br i1 %cmp.exiting, label %latch, label %exit
+
+exit:
+ ret i32 0
+}
+
diff --git a/test/Transforms/LoopUnroll/peel-loop-not-forced.ll b/test/Transforms/LoopUnroll/peel-loop-not-forced.ll
new file mode 100644
index 000000000000..3dcac87f8242
--- /dev/null
+++ b/test/Transforms/LoopUnroll/peel-loop-not-forced.ll
@@ -0,0 +1,53 @@
+; RUN: opt < %s -S -loop-unroll -unroll-threshold=4 | FileCheck %s
+
+define i32 @invariant_backedge_1(i32 %a, i32 %b) {
+; CHECK-LABEL: @invariant_backedge_1
+; CHECK-NOT: %plus = phi
+; CHECK: loop.peel:
+; CHECK: loop:
+; CHECK: %i = phi
+; CHECK: %sum = phi
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %inc, %loop ]
+ %sum = phi i32 [ 0, %entry ], [ %incsum, %loop ]
+ %plus = phi i32 [ %a, %entry ], [ %b, %loop ]
+
+ %incsum = add i32 %sum, %plus
+ %inc = add i32 %i, 1
+ %cmp = icmp slt i32 %i, 1000
+
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %sum
+}
+
+; Peeling should fail due to method size.
+define i32 @invariant_backedge_2(i32 %a, i32 %b) {
+; CHECK-LABEL: @invariant_backedge_2
+; CHECK-NOT: loop.peel:
+; CHECK: loop:
+; CHECK: %i = phi
+; CHECK: %sum = phi
+; CHECK: %plus = phi
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %inc, %loop ]
+ %sum = phi i32 [ 0, %entry ], [ %incsum2, %loop ]
+ %plus = phi i32 [ %a, %entry ], [ %b, %loop ]
+
+ %incsum = add i32 %sum, %plus
+ %incsum2 = add i32 %incsum, %plus
+ %inc = add i32 %i, 1
+ %cmp = icmp slt i32 %i, 1000
+
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %sum
+}
diff --git a/test/Transforms/LoopUnroll/peel-loop-pgo.ll b/test/Transforms/LoopUnroll/peel-loop-pgo.ll
index a87d5643e7e9..20c3878d03a7 100644
--- a/test/Transforms/LoopUnroll/peel-loop-pgo.ll
+++ b/test/Transforms/LoopUnroll/peel-loop-pgo.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -S -debug-only=loop-unroll -loop-unroll -unroll-allow-peeling 2>&1 | FileCheck %s
+; RUN: opt < %s -S -debug-only=loop-unroll -loop-unroll 2>&1 | FileCheck %s
; REQUIRES: asserts
; Make sure we use the profile information correctly to peel-off 3 iterations
diff --git a/test/Transforms/LoopUnroll/peel-loop.ll b/test/Transforms/LoopUnroll/peel-loop.ll
index 249122022387..bf0801fc760a 100644
--- a/test/Transforms/LoopUnroll/peel-loop.ll
+++ b/test/Transforms/LoopUnroll/peel-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -S -loop-unroll -unroll-force-peel-count=3 -simplifycfg -instcombine | FileCheck %s
+; RUN: opt < %s -S -loop-unroll -unroll-force-peel-count=3 -verify-dom-info -simplifycfg -instcombine | FileCheck %s
; Basic loop peeling - check that we can peel-off the first 3 loop iterations
; when explicitly requested.
diff --git a/test/Transforms/LoopUnroll/peel-loop2.ll b/test/Transforms/LoopUnroll/peel-loop2.ll
new file mode 100644
index 000000000000..99e90797e199
--- /dev/null
+++ b/test/Transforms/LoopUnroll/peel-loop2.ll
@@ -0,0 +1,61 @@
+; RUN: opt -S -loop-unroll -unroll-force-peel-count=1 -verify-dom-info <%s
+
+; Check if loop composed of several BBs is peeled correctly.
+
+declare void @funcb()
+@Comma = external global i8
+define void @funca(i8* readnone %b, i8* readnone %e) {
+entry:
+ %cmp2 = icmp eq i8* %b, %e
+ br i1 %cmp2, label %for.end, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %b.addr.03 = phi i8* [ %incdec.ptr, %for.inc ], [ %b, %for.body.preheader ]
+ %0 = load i8, i8* @Comma
+ %tobool = icmp eq i8 %0, 0
+ br i1 %tobool, label %for.inc, label %if.then
+
+if.then:
+ tail call void @funcb()
+ store i8 1, i8* @Comma
+ br label %for.inc
+
+for.inc:
+ %incdec.ptr = getelementptr inbounds i8, i8* %b.addr.03, i64 1
+ %cmp = icmp eq i8* %incdec.ptr, %e
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; CHECK_LABEL: @funca
+
+; Peeled iteration
+; CHECK: %[[REG1:[0-9]+]] = load i8, i8* @Comma
+; CHECK: %[[REG2:.*]] = icmp eq i8 %[[REG1]], 0
+; CHECK: br i1 %[[REG2]], label %{{.*}}, label %[[IFTHEN:.*]]
+; CHECK: [[IFTHEN]]:
+; CHECK: call void @funcb()
+; CHECK: store i8 1, i8* @Comma
+; CHECK: br label %[[FORINC]]
+; CHECK: [[FORINC]]:
+; CHECK: %[[REG3:.*]] = getelementptr inbounds i8, i8* %b, i64 1
+; CHECK: %[[REG4:.*]] = icmp eq i8* %[[REG3]], %e
+; CHECK: br i1 %[[REG4]]
+
+; main body
+; CHECK: %[[REG1b:.*]] = load i8, i8* @Comma
+; CHECK: %[[REG2b:.*]] = icmp eq i8 %[[REG1b]], 0
+; CHECK: br i1 %[[REG2b]], label %{{.*}}, label %[[IFTHENb:.*]]
+; CHECK: [[IFTHENb]]:
+; CHECK: call void @funcb()
+; CHECK: store i8 1, i8* @Comma
+; CHECK: br label %[[FORINCb]]
+; CHECK: [[FORINCb]]:
+; CHECK: %[[REG3b:.*]] = getelementptr inbounds i8, i8* %b, i64 1
+; CHECK: %[[REG4b:.*]] = icmp eq i8* %[[REG3b]], %e
+; CHECK: br i1 %[[REG4b]]
diff --git a/test/Transforms/LoopUnroll/pr31718.ll b/test/Transforms/LoopUnroll/pr31718.ll
new file mode 100644
index 000000000000..014ef7e501ec
--- /dev/null
+++ b/test/Transforms/LoopUnroll/pr31718.ll
@@ -0,0 +1,55 @@
+; RUN: opt -loop-unroll -verify-loop-lcssa -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@b = external local_unnamed_addr global i32, align 4
+
+; CHECK-LABEL: @main
+; CHECK: exit.loopexit:
+; CHECK: {{.*}} = phi i32 [ %d.0, %h3 ]
+; CHECK: br label %exit
+; CHECK: exit.loopexit1:
+; CHECK: {{.*}} = phi i32 [ %d.0, %h3.1 ]
+; CHECK: br label %exit
+
+define void @main() local_unnamed_addr #0 {
+ph1:
+ br label %h1
+
+h1:
+ %d.0 = phi i32 [ %1, %latch1 ], [ undef, %ph1 ]
+ br label %ph2
+
+ph2:
+ br label %h2
+
+h2:
+ %0 = phi i32 [ 0, %ph2 ], [ %inc, %latch2 ]
+ br label %h3
+
+h3:
+ br i1 undef, label %latch3, label %exit
+
+latch3:
+ br i1 false, label %exit3, label %h3
+
+exit3:
+ br label %latch2
+
+latch2:
+ %inc = add nuw nsw i32 %0, 1
+ %cmp = icmp slt i32 %inc, 2
+ br i1 %cmp, label %h2, label %exit2
+
+exit2:
+ br i1 undef, label %latch1, label %ph2
+
+latch1: ; preds = %exit2
+ %1 = load i32, i32* @b, align 4
+ br label %h1
+
+exit:
+ %d.0.lcssa = phi i32 [ %d.0, %h3 ]
+ ret void
+}
diff --git a/test/Transforms/LoopUnroll/revisit.ll b/test/Transforms/LoopUnroll/revisit.ll
new file mode 100644
index 000000000000..fddf6cd1c4e8
--- /dev/null
+++ b/test/Transforms/LoopUnroll/revisit.ll
@@ -0,0 +1,156 @@
+; This test checks that nested loops are revisited in various scenarios when
+; unrolling. Note that if we ever start doing outer loop peeling a test case
+; for that should be added here that will look essentially like a hybrid of the
+; current two cases.
+;
+; RUN: opt < %s -disable-output -debug-pass-manager 2>&1 \
+; RUN: -passes='require<opt-remark-emit>,loop(unroll)' \
+; RUN: | FileCheck %s
+;
+; Also run in a special mode that visits children.
+; RUN: opt < %s -disable-output -debug-pass-manager -unroll-revisit-child-loops 2>&1 \
+; RUN: -passes='require<opt-remark-emit>,loop(unroll)' \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-CHILDREN
+
+; Basic test is fully unrolled and we revisit the post-unroll new sibling
+; loops, including the ones that used to be child loops.
+define void @full_unroll(i1* %ptr) {
+; CHECK-LABEL: FunctionToLoopPassAdaptor{{.*}} on full_unroll
+; CHECK-NOT: LoopUnrollPass
+
+entry:
+ br label %l0
+
+l0:
+ %cond.0 = load volatile i1, i1* %ptr
+ br i1 %cond.0, label %l0.0.ph, label %exit
+
+l0.0.ph:
+ br label %l0.0
+
+l0.0:
+ %iv = phi i32 [ %iv.next, %l0.0.latch ], [ 0, %l0.0.ph ]
+ %iv.next = add i32 %iv, 1
+ br label %l0.0.0.ph
+
+l0.0.0.ph:
+ br label %l0.0.0
+
+l0.0.0:
+ %cond.0.0.0 = load volatile i1, i1* %ptr
+ br i1 %cond.0.0.0, label %l0.0.0, label %l0.0.1.ph
+; CHECK: LoopUnrollPass on Loop at depth 3 containing: %l0.0.0<header>
+; CHECK-NOT: LoopUnrollPass
+
+l0.0.1.ph:
+ br label %l0.0.1
+
+l0.0.1:
+ %cond.0.0.1 = load volatile i1, i1* %ptr
+ br i1 %cond.0.0.1, label %l0.0.1, label %l0.0.latch
+; CHECK: LoopUnrollPass on Loop at depth 3 containing: %l0.0.1<header>
+; CHECK-NOT: LoopUnrollPass
+
+l0.0.latch:
+ %cmp = icmp slt i32 %iv.next, 2
+ br i1 %cmp, label %l0.0, label %l0.latch
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0
+; CHECK-NOT: LoopUnrollPass
+;
+; Unrolling occurs, so we visit what were the inner loops twice over. First we
+; visit their clones, and then we visit the original loops re-parented.
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0.1.1<header>
+; CHECK-NOT: LoopUnrollPass
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0.0.1<header>
+; CHECK-NOT: LoopUnrollPass
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0.1<header>
+; CHECK-NOT: LoopUnrollPass
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0.0<header>
+; CHECK-NOT: LoopUnrollPass
+
+l0.latch:
+ br label %l0
+; CHECK: LoopUnrollPass on Loop at depth 1 containing: %l0<header>
+; CHECK-NOT: LoopUnrollPass
+
+exit:
+ ret void
+}
+
+; Now we test forced runtime partial unrolling with metadata. Here we end up
+; duplicating child loops without changing their structure and so they aren't by
+; default visited, but will be visited with a special parameter.
+define void @partial_unroll(i32 %count, i1* %ptr) {
+; CHECK-LABEL: FunctionToLoopPassAdaptor{{.*}} on partial_unroll
+; CHECK-NOT: LoopUnrollPass
+
+entry:
+ br label %l0
+
+l0:
+ %cond.0 = load volatile i1, i1* %ptr
+ br i1 %cond.0, label %l0.0.ph, label %exit
+
+l0.0.ph:
+ br label %l0.0
+
+l0.0:
+ %iv = phi i32 [ %iv.next, %l0.0.latch ], [ 0, %l0.0.ph ]
+ %iv.next = add i32 %iv, 1
+ br label %l0.0.0.ph
+
+l0.0.0.ph:
+ br label %l0.0.0
+
+l0.0.0:
+ %cond.0.0.0 = load volatile i1, i1* %ptr
+ br i1 %cond.0.0.0, label %l0.0.0, label %l0.0.1.ph
+; CHECK: LoopUnrollPass on Loop at depth 3 containing: %l0.0.0<header>
+; CHECK-NOT: LoopUnrollPass
+
+l0.0.1.ph:
+ br label %l0.0.1
+
+l0.0.1:
+ %cond.0.0.1 = load volatile i1, i1* %ptr
+ br i1 %cond.0.0.1, label %l0.0.1, label %l0.0.latch
+; CHECK: LoopUnrollPass on Loop at depth 3 containing: %l0.0.1<header>
+; CHECK-NOT: LoopUnrollPass
+
+l0.0.latch:
+ %cmp = icmp slt i32 %iv.next, %count
+ br i1 %cmp, label %l0.0, label %l0.latch, !llvm.loop !1
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0
+; CHECK-NOT: LoopUnrollPass
+;
+; Partial unrolling occurs which introduces both new child loops and new sibling
+; loops. We only visit the child loops in a special mode, not by default.
+; CHECK-CHILDREN: LoopUnrollPass on Loop at depth 3 containing: %l0.0.0<header>
+; CHECK-CHILDREN-NOT: LoopUnrollPass
+; CHECK-CHILDREN: LoopUnrollPass on Loop at depth 3 containing: %l0.0.1<header>
+; CHECK-CHILDREN-NOT: LoopUnrollPass
+; CHECK-CHILDREN: LoopUnrollPass on Loop at depth 3 containing: %l0.0.0.1<header>
+; CHECK-CHILDREN-NOT: LoopUnrollPass
+; CHECK-CHILDREN: LoopUnrollPass on Loop at depth 3 containing: %l0.0.1.1<header>
+; CHECK-CHILDREN-NOT: LoopUnrollPass
+;
+; When we revisit children, we also revisit the current loop.
+; CHECK-CHILDREN: LoopUnrollPass on Loop at depth 2 containing: %l0.0<header>
+; CHECK-CHILDREN-NOT: LoopUnrollPass
+;
+; Revisit the children of the outer loop that are part of the epilogue.
+;
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0.0.epil<header>
+; CHECK-NOT: LoopUnrollPass
+; CHECK: LoopUnrollPass on Loop at depth 2 containing: %l0.0.1.epil<header>
+; CHECK-NOT: LoopUnrollPass
+l0.latch:
+ br label %l0
+; CHECK: LoopUnrollPass on Loop at depth 1 containing: %l0<header>
+; CHECK-NOT: LoopUnrollPass
+
+exit:
+ ret void
+}
+!1 = !{!1, !2}
+!2 = !{!"llvm.loop.unroll.count", i32 2}
diff --git a/test/Transforms/LoopUnroll/runtime-loop.ll b/test/Transforms/LoopUnroll/runtime-loop.ll
index b5299bb17f82..04661314eb1d 100644
--- a/test/Transforms/LoopUnroll/runtime-loop.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop.ll
@@ -1,6 +1,9 @@
; RUN: opt < %s -S -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true | FileCheck %s -check-prefix=EPILOG
; RUN: opt < %s -S -loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime=true -unroll-runtime-epilog=true | FileCheck %s -check-prefix=EPILOG
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime=true -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Tests for unrolling loops with run-time trip counts
diff --git a/test/Transforms/LoopUnroll/runtime-loop1.ll b/test/Transforms/LoopUnroll/runtime-loop1.ll
index 5d7c64824788..d32c83571b5a 100644
--- a/test/Transforms/LoopUnroll/runtime-loop1.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop1.ll
@@ -1,6 +1,9 @@
; RUN: opt < %s -S -loop-unroll -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=true | FileCheck %s -check-prefix=EPILOG
; RUN: opt < %s -S -loop-unroll -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=true | FileCheck %s -check-prefix=EPILOG
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime -unroll-count=2 -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
+
; This tests that setting the unroll count works
diff --git a/test/Transforms/LoopUnroll/runtime-loop2.ll b/test/Transforms/LoopUnroll/runtime-loop2.ll
index 3ce8702a9463..7e7fb9787130 100644
--- a/test/Transforms/LoopUnroll/runtime-loop2.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop2.ll
@@ -1,5 +1,8 @@
-; RUN: opt < %s -S -loop-unroll -unroll-threshold=25 -unroll-runtime -unroll-runtime-epilog=true -unroll-count=8 | FileCheck %s -check-prefix=EPILOG
-; RUN: opt < %s -S -loop-unroll -unroll-threshold=25 -unroll-runtime -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
+; RUN: opt < %s -S -loop-unroll -unroll-threshold=25 -unroll-partial-threshold=25 -unroll-runtime -unroll-runtime-epilog=true -unroll-count=8 | FileCheck %s -check-prefix=EPILOG
+; RUN: opt < %s -S -loop-unroll -unroll-threshold=25 -unroll-partial-threshold=25 -unroll-runtime -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
+
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-threshold=25 -unroll-partial-threshold=25 -unroll-runtime -unroll-runtime-epilog=true -unroll-count=8 | FileCheck %s -check-prefix=EPILOG
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-threshold=25 -unroll-partial-threshold=25 -unroll-runtime -unroll-runtime-epilog=false | FileCheck %s -check-prefix=PROLOG
; Choose a smaller, power-of-two, unroll count if the loop is too large.
; This test makes sure we're not unrolling 'odd' counts
diff --git a/test/Transforms/LoopUnroll/runtime-loop3.ll b/test/Transforms/LoopUnroll/runtime-loop3.ll
index fd13ebfa0b84..253993ee42d4 100644
--- a/test/Transforms/LoopUnroll/runtime-loop3.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop3.ll
@@ -1,5 +1,6 @@
; REQUIRES: asserts
-; RUN: opt < %s -disable-output -stats -loop-unroll -unroll-runtime -unroll-threshold=400 -info-output-file - | FileCheck %s --check-prefix=STATS
+; RUN: opt < %s -disable-output -stats -loop-unroll -unroll-runtime -unroll-partial-threshold=200 -unroll-threshold=400 -info-output-file - | FileCheck %s --check-prefix=STATS
+; RUN: opt < %s -disable-output -stats -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime -unroll-partial-threshold=200 -unroll-threshold=400 -info-output-file - | FileCheck %s --check-prefix=STATS
; Test that nested loops can be unrolled. We need to increase threshold to do it
diff --git a/test/Transforms/LoopUnroll/runtime-loop5.ll b/test/Transforms/LoopUnroll/runtime-loop5.ll
index e8d51775ce18..86a26baca657 100644
--- a/test/Transforms/LoopUnroll/runtime-loop5.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop5.ll
@@ -1,6 +1,9 @@
; RUN: opt < %s -S -loop-unroll -unroll-runtime=true -unroll-count=16 | FileCheck --check-prefix=UNROLL-16 %s
; RUN: opt < %s -S -loop-unroll -unroll-runtime=true -unroll-count=4 | FileCheck --check-prefix=UNROLL-4 %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime=true -unroll-count=16 | FileCheck --check-prefix=UNROLL-16 %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll)' -unroll-runtime=true -unroll-count=4 | FileCheck --check-prefix=UNROLL-4 %s
+
; Given that the trip-count of this loop is a 3-bit value, we cannot
; safely unroll it with a count of anything more than 8.
@@ -11,9 +14,6 @@ entry:
%cmp1 = icmp eq i3 %n, 0
br i1 %cmp1, label %for.end, label %for.body
-; UNROLL-16-NOT: for.body.prol:
-; UNROLL-4: for.body.prol:
-
for.body: ; preds = %for.body, %entry
; UNROLL-16-LABEL: for.body:
; UNROLL-4-LABEL: for.body:
@@ -39,6 +39,10 @@ for.body: ; preds = %for.body, %entry
; UNROLL-16-LABEL: for.end
; UNROLL-4-LABEL: for.end
+
+; UNROLL-16-NOT: for.body.epil:
+; UNROLL-4: for.body.epil:
+
for.end: ; preds = %for.body, %entry
%sum.0.lcssa = phi i3 [ 0, %entry ], [ %add, %for.body ]
ret i3 %sum.0.lcssa
diff --git a/test/Transforms/LoopUnroll/unloop.ll b/test/Transforms/LoopUnroll/unloop.ll
index db7bad5322c5..6af13a55d6b9 100644
--- a/test/Transforms/LoopUnroll/unloop.ll
+++ b/test/Transforms/LoopUnroll/unloop.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -S -loop-unroll -verify-loop-info | FileCheck %s
-; RUN: opt < %s -S -passes='function(require<scalar-evolution>,require<targetir>,require<opt-remark-emit>,loop(unroll),verify<loops>)' | FileCheck %s
+; RUN: opt < %s -S -passes='require<opt-remark-emit>,loop(unroll),verify<loops>' | FileCheck %s
;
; Unit tests for LoopInfo::markAsRemoved.
diff --git a/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll b/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll
index f7add40b9d15..6778a52b3af8 100644
--- a/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll
+++ b/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll
@@ -3,12 +3,12 @@
@known_constant = internal unnamed_addr constant [9 x i32] [i32 0, i32 -1, i32 0, i32 -1, i32 5, i32 -1, i32 0, i32 -1, i32 0], align 16
; CHECK-LABEL: @bar_prof
-; CHECK: loop.prol:
; CHECK: loop:
; CHECK: %mul = mul
; CHECK: %mul.1 = mul
; CHECK: %mul.2 = mul
; CHECK: %mul.3 = mul
+; CHECK: loop.epil:
define i32 @bar_prof(i32* noalias nocapture readonly %src, i64 %c) !prof !1 {
entry:
br label %loop
@@ -32,7 +32,7 @@ loop.end:
}
; CHECK-LABEL: @bar_prof_flat
-; CHECK-NOT: loop.prol
+; CHECK-NOT: loop.epil
define i32 @bar_prof_flat(i32* noalias nocapture readonly %src, i64 %c) !prof !1 {
entry:
br label %loop
diff --git a/test/Transforms/LoopUnroll/unroll-pragmas.ll b/test/Transforms/LoopUnroll/unroll-pragmas.ll
index 2843e627b3c1..88f32c92d694 100644
--- a/test/Transforms/LoopUnroll/unroll-pragmas.ll
+++ b/test/Transforms/LoopUnroll/unroll-pragmas.ll
@@ -171,10 +171,6 @@ for.end: ; preds = %for.body, %entry
; should be duplicated (original and 4x unrolled).
;
; CHECK-LABEL: @runtime_loop_with_count4(
-; CHECK: for.body.prol:
-; CHECK: store
-; CHECK-NOT: store
-; CHECK: br i1
; CHECK: for.body
; CHECK: store
; CHECK: store
@@ -182,6 +178,10 @@ for.end: ; preds = %for.body, %entry
; CHECK: store
; CHECK-NOT: store
; CHECK: br i1
+; CHECK: for.body.epil:
+; CHECK: store
+; CHECK-NOT: store
+; CHECK: br i1
define void @runtime_loop_with_count4(i32* nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
@@ -287,10 +287,6 @@ for.end: ; preds = %for.body
; (original and 8x).
;
; CHECK-LABEL: @runtime_loop_with_enable(
-; CHECK: for.body.prol:
-; CHECK: store
-; CHECK-NOT: store
-; CHECK: br i1
; CHECK: for.body:
; CHECK: store i32
; CHECK: store i32
@@ -302,6 +298,10 @@ for.end: ; preds = %for.body
; CHECK: store i32
; CHECK-NOT: store i32
; CHECK: br i1
+; CHECK: for.body.epil:
+; CHECK: store
+; CHECK-NOT: store
+; CHECK: br i1
define void @runtime_loop_with_enable(i32* nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
@@ -328,16 +328,16 @@ for.end: ; preds = %for.body, %entry
; should be duplicated (original and 3x unrolled).
;
; CHECK-LABEL: @runtime_loop_with_count3(
-; CHECK: for.body.prol:
-; CHECK: store
-; CHECK-NOT: store
-; CHECK: br i1
; CHECK: for.body
; CHECK: store
; CHECK: store
; CHECK: store
; CHECK-NOT: store
; CHECK: br i1
+; CHECK: for.body.epil:
+; CHECK: store
+; CHECK-NOT: store
+; CHECK: br i1
define void @runtime_loop_with_count3(i32* nocapture %a, i32 %b) {
entry:
%cmp3 = icmp sgt i32 %b, 0
diff --git a/test/Transforms/LoopUnroll/update-loop-info-in-subloops.ll b/test/Transforms/LoopUnroll/update-loop-info-in-subloops.ll
index adbf47defe8f..6748ebefa522 100644
--- a/test/Transforms/LoopUnroll/update-loop-info-in-subloops.ll
+++ b/test/Transforms/LoopUnroll/update-loop-info-in-subloops.ll
@@ -1,4 +1,5 @@
; RUN: opt -S < %s -loop-unroll -block-freq | FileCheck %s
+; RUN: opt -S < %s -passes='require<opt-remark-emit>,loop(unroll),require<block-freq>' | FileCheck %s
; Crasher from PR20987.
; CHECK: define void @update_loop_info_in_subloops
diff --git a/test/Transforms/LoopUnswitch/AMDGPU/divergent-unswitch.ll b/test/Transforms/LoopUnswitch/AMDGPU/divergent-unswitch.ll
new file mode 100644
index 000000000000..1f106bd894a8
--- /dev/null
+++ b/test/Transforms/LoopUnswitch/AMDGPU/divergent-unswitch.ll
@@ -0,0 +1,85 @@
+; RUN: opt -mtriple=amdgcn-- -O3 -S %s | FileCheck %s
+
+; Check that loop unswitch happened and condition hoisted out of the loop.
+; Condition is uniform so all targets should perform unswitching.
+
+; CHECK-LABEL: {{^}}define amdgpu_kernel void @uniform_unswitch
+; CHECK: entry:
+; CHECK-NEXT: [[LOOP_COND:%[a-z0-9]+]] = icmp
+; CHECK-NEXT: [[IF_COND:%[a-z0-9]+]] = icmp eq i32 %x, 123456
+; CHECK-NEXT: and i1 [[LOOP_COND]], [[IF_COND]]
+; CHECK-NEXT: br i1
+
+define amdgpu_kernel void @uniform_unswitch(i32 * nocapture %out, i32 %n, i32 %x) {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body.lr.ph, label %for.cond.cleanup
+
+for.body.lr.ph: ; preds = %entry
+ %cmp1 = icmp eq i32 %x, 123456
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.inc
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.inc, %for.body.lr.ph
+ %i.07 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %arrayidx = getelementptr inbounds i32, i32 * %out, i32 %i.07
+ store i32 %i.07, i32 * %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %inc = add nuw nsw i32 %i.07, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; Check that loop unswitch does not happen if condition is divergent.
+
+; CHECK-LABEL: {{^}}define amdgpu_kernel void @divergent_unswitch
+; CHECK: entry:
+; CHECK: icmp
+; CHECK: [[IF_COND:%[a-z0-9]+]] = icmp {{.*}} 567890
+; CHECK: br label
+; CHECK: br i1 [[IF_COND]]
+
+define amdgpu_kernel void @divergent_unswitch(i32 * nocapture %out, i32 %n) {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body.lr.ph, label %for.cond.cleanup
+
+for.body.lr.ph: ; preds = %entry
+ %call = tail call i32 @llvm.amdgcn.workitem.id.x() #0
+ %cmp2 = icmp eq i32 %call, 567890
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.inc
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.inc, %for.body.lr.ph
+ %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
+ br i1 %cmp2, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %arrayidx = getelementptr inbounds i32, i32 * %out, i32 %i.010
+ store i32 %i.010, i32 * %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %inc = add nuw nsw i32 %i.010, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/LoopUnswitch/AMDGPU/lit.local.cfg b/test/Transforms/LoopUnswitch/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..2a665f06be72
--- /dev/null
+++ b/test/Transforms/LoopUnswitch/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AMDGPU' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/LoopUnswitch/basictest.ll b/test/Transforms/LoopUnswitch/basictest.ll
index a02a463764dd..3add848395ae 100644
--- a/test/Transforms/LoopUnswitch/basictest.ll
+++ b/test/Transforms/LoopUnswitch/basictest.ll
@@ -101,6 +101,217 @@ loop_exit:
; CHECK: }
}
+; Make sure we unswitch %a == 0 out of the loop.
+;
+; CHECK: define void @and_i2_as_switch_input(i2
+; CHECK: entry:
+; This is an indication that the loop has been unswitched.
+; CHECK: icmp eq i2 %a, 0
+; CHECK: br
+; There should be no more unswitching after the 1st unswitch.
+; CHECK-NOT: icmp eq
+; CHECK: ret
+define void @and_i2_as_switch_input(i2 %a) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i2 [ 0, %entry ], [ %inc, %for.inc ]
+ %and = and i2 %a, %i
+ %and1 = and i2 %and, %i
+ switch i2 %and1, label %sw.default [
+ i2 0, label %sw.bb
+ i2 1, label %sw.bb1
+ ]
+
+sw.bb:
+ br label %sw.epilog
+
+sw.bb1:
+ br label %sw.epilog
+
+sw.default:
+ br label %sw.epilog
+
+sw.epilog:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i2 %i, 1
+ %cmp = icmp slt i2 %inc, 3
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; Make sure we unswitch %a == !0 out of the loop.
+;
+; CHECK: define void @or_i2_as_switch_input(i2
+; CHECK: entry:
+; This is an indication that the loop has been unswitched.
+; CHECK: icmp eq i2 %a, -1
+; CHECK: br
+; There should be no more unswitching after the 1st unswitch.
+; CHECK-NOT: icmp eq
+; CHECK: ret
+define void @or_i2_as_switch_input(i2 %a) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i2 [ 0, %entry ], [ %inc, %for.inc ]
+ %or = or i2 %a, %i
+ %or1 = or i2 %or, %i
+ switch i2 %or1, label %sw.default [
+ i2 2, label %sw.bb
+ i2 3, label %sw.bb1
+ ]
+
+sw.bb:
+ br label %sw.epilog
+
+sw.bb1:
+ br label %sw.epilog
+
+sw.default:
+ br label %sw.epilog
+
+sw.epilog:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i2 %i, 1
+ %cmp = icmp slt i2 %inc, 3
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; Make sure we unswitch %a == !0 out of the loop. Even we do not
+; have it as a case value. Unswitching it out allows us to simplify
+; the or operator chain.
+;
+; CHECK: define void @or_i2_as_switch_input_unswitch_default(i2
+; CHECK: entry:
+; This is an indication that the loop has been unswitched.
+; CHECK: icmp eq i2 %a, -1
+; CHECK: br
+; There should be no more unswitching after the 1st unswitch.
+; CHECK-NOT: icmp eq
+; CHECK: ret
+define void @or_i2_as_switch_input_unswitch_default(i2 %a) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i2 [ 0, %entry ], [ %inc, %for.inc ]
+ %or = or i2 %a, %i
+ %or1 = or i2 %or, %i
+ switch i2 %or1, label %sw.default [
+ i2 1, label %sw.bb
+ i2 2, label %sw.bb1
+ ]
+
+sw.bb:
+ br label %sw.epilog
+
+sw.bb1:
+ br label %sw.epilog
+
+sw.default:
+ br label %sw.epilog
+
+sw.epilog:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i2 %i, 1
+ %cmp = icmp slt i2 %inc, 3
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; Make sure we don't unswitch, as we can not find an input value %a
+; that will effectively unswitch 0 or 3 out of the loop.
+;
+; CHECK: define void @and_or_i2_as_switch_input(i2
+; CHECK: entry:
+; This is an indication that the loop has NOT been unswitched.
+; CHECK-NOT: icmp
+; CHECK: br
+define void @and_or_i2_as_switch_input(i2 %a) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i2 [ 0, %entry ], [ %inc, %for.inc ]
+ %and = and i2 %a, %i
+ %or = or i2 %and, %i
+ switch i2 %or, label %sw.default [
+ i2 0, label %sw.bb
+ i2 3, label %sw.bb1
+ ]
+
+sw.bb:
+ br label %sw.epilog
+
+sw.bb1:
+ br label %sw.epilog
+
+sw.default:
+ br label %sw.epilog
+
+sw.epilog:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i2 %i, 1
+ %cmp = icmp slt i2 %inc, 3
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; Make sure we don't unswitch, as we can not find an input value %a
+; that will effectively unswitch true/false out of the loop.
+;
+; CHECK: define void @and_or_i1_as_branch_input(i1
+; CHECK: entry:
+; This is an indication that the loop has NOT been unswitched.
+; CHECK-NOT: icmp
+; CHECK: br
+define void @and_or_i1_as_branch_input(i1 %a) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i1 [ 0, %entry ], [ %inc, %for.inc ]
+ %and = and i1 %a, %i
+ %or = or i1 %and, %i
+ br i1 %or, label %sw.bb, label %sw.bb1
+
+sw.bb:
+ br label %sw.epilog
+
+sw.bb1:
+ br label %sw.epilog
+
+sw.epilog:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i1 %i, 1
+ %cmp = icmp slt i1 %inc, 1
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
declare void @incf() noreturn
declare void @decf() noreturn
diff --git a/test/Transforms/LoopUnswitch/cold-loop.ll b/test/Transforms/LoopUnswitch/cold-loop.ll
deleted file mode 100644
index 1fbc08038bbd..000000000000
--- a/test/Transforms/LoopUnswitch/cold-loop.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: opt < %s -loop-unswitch -loop-unswitch-with-block-frequency -S 2>&1 | FileCheck %s
-
-;; trivial condition should be unswithed regardless of coldness.
-define i32 @test1(i1 %cond1, i1 %cond2) !prof !1 {
- br i1 %cond1, label %loop_begin, label %loop_exit, !prof !0
-
-loop_begin:
-; CHECK: br i1 true, label %continue, label %loop_exit.loopexit
- br i1 %cond2, label %continue, label %loop_exit ; trivial condition
-
-continue:
- call void @some_func1() noreturn nounwind
- br label %loop_begin
-
-loop_exit:
- ret i32 0
-}
-
-;; cold non-trivial condition should not be unswitched.
-define i32 @test2(i32* %var, i1 %cond1, i1 %cond2) !prof !1 {
- br i1 %cond1, label %loop_begin, label %loop_exit, !prof !0
-
-loop_begin:
- store i32 1, i32* %var
-; CHECK: br i1 %cond2, label %continue1, label %continue2
- br i1 %cond2, label %continue1, label %continue2 ; non-trivial condition
-
-continue1:
- call void @some_func1() noreturn nounwind
- br label %joint
-
-continue2:
- call void @some_func2() noreturn nounwind
- br label %joint
-
-joint:
-;; unswitching will duplicate these calls.
- call void @some_func3() noreturn nounwind
- call void @some_func4() noreturn nounwind
- br label %loop_begin
-
-loop_exit:
- ret i32 0
-}
-
-declare void @some_func1() noreturn
-declare void @some_func2() noreturn
-declare void @some_func3() noreturn
-declare void @some_func4() noreturn
-
-!0 = !{!"branch_weights", i32 1, i32 100000000}
-!1 = !{!"function_entry_count", i64 100}
diff --git a/test/Transforms/LoopUnswitch/copy-metadata.ll b/test/Transforms/LoopUnswitch/copy-metadata.ll
index 2a634c25a23d..3302bce9a6e5 100644
--- a/test/Transforms/LoopUnswitch/copy-metadata.ll
+++ b/test/Transforms/LoopUnswitch/copy-metadata.ll
@@ -3,11 +3,11 @@
; This test checks if unswitched condition preserve make.implicit metadata.
define i32 @test(i1 %cond) {
-; CHECK: br i1 %cond, label %..split_crit_edge, label %.loop_exit.split_crit_edge, !make.implicit !0
+; CHECK-LABEL: @test(
+; CHECK: br i1 %cond, label %..split_crit_edge, label %.loop_exit.split_crit_edge, !make.implicit !0
br label %loop_begin
loop_begin:
-; CHECK: br i1 true, label %continue, label %loop_exit, !make.implicit !0
br i1 %cond, label %continue, label %loop_exit, !make.implicit !0
continue:
diff --git a/test/Transforms/LoopUnswitch/crash.ll b/test/Transforms/LoopUnswitch/crash.ll
index 101fb7a2c2ce..b273a123c39c 100644
--- a/test/Transforms/LoopUnswitch/crash.ll
+++ b/test/Transforms/LoopUnswitch/crash.ll
@@ -30,7 +30,7 @@ return: ; preds = %return.loopexit, %list_Length.exit9
ret void
}
-define void @test2(i32 %x1, i32 %y1, i32 %z1, i32 %r1) nounwind {
+define void @test2() nounwind {
entry:
br label %bb.nph
diff --git a/test/Transforms/LoopUnswitch/simplify-with-nonvalness.ll b/test/Transforms/LoopUnswitch/simplify-with-nonvalness.ll
new file mode 100644
index 000000000000..d033b083a1b8
--- /dev/null
+++ b/test/Transforms/LoopUnswitch/simplify-with-nonvalness.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -loop-unswitch -verify-loop-info -S < %s 2>&1 | FileCheck %s
+
+; There are 1 case and 1 default case in the switch. after we unswitch, we know the
+; %a is definitely not 0 in one of the unswitched loop, make sure we take advantage
+; of that and simplify the branches in the loop.
+;
+; CHECK: define void @simplify_with_nonvalness(
+
+; This is the loop in which we know %a is definitely 0.
+; CHECK: sw.bb.us:
+; CHECK: br i1 true, label %if.then.us, label %if.end.us
+
+; This is the loop in which we do not know what %a is but we know %a is definitely NOT 0.
+; Make sure we use that information to simplify.
+; The icmp eq i32 %a, 0 in one of the unswitched loop is simplified to false.
+; CHECK: sw.bb.split:
+; CHECK: br i1 false, label %if.then, label %if.end
+
+define void @simplify_with_nonvalness(i32 %a) #0 {
+entry:
+ br label %for.cond
+
+for.cond:
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %cmp = icmp slt i32 %i.0, 1024
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+ switch i32 %a, label %sw.default [
+ i32 0, label %sw.bb
+ ]
+
+sw.bb:
+ %cmp1 = icmp eq i32 %a, 0
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then:
+ call void (...) @bar()
+ br label %if.end
+
+if.end:
+ br label %sw.epilog
+
+sw.default:
+ br label %sw.epilog
+
+sw.epilog:
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i32 %i.0, 1
+ br label %for.cond
+
+for.end:
+ ret void
+}
+
+declare void @bar(...)
diff --git a/test/Transforms/LoopUnswitch/trivial-unswitch.ll b/test/Transforms/LoopUnswitch/trivial-unswitch.ll
index db3328278dae..2def5b6f0334 100644
--- a/test/Transforms/LoopUnswitch/trivial-unswitch.ll
+++ b/test/Transforms/LoopUnswitch/trivial-unswitch.ll
@@ -44,4 +44,48 @@ loop_exit:
ret i32 0
}
-declare void @some_func() noreturn \ No newline at end of file
+
+; We will not be able trivially unswitch on the SwitchInst, as its input
+; is a constant. However, since its a constant we should be able to figure
+; out that the switch can be folded into a unconditional branch to %continue.
+; Then we unswitch on the br inst in %continue.
+;
+; CHECK: define i32 @test2(
+; This is an indication that the loop has been unswitched on %cond1.
+; CHECK: br i1 %cond1, label %..split_crit_edge, label %.loop_exit.split_crit_edge
+
+; CHECK: ..split_crit_edge: ; preds = %0
+; CHECK: br label %.split
+
+; CHECK: .split: ; preds = %..split_crit_edge
+; CHECK: br label %loop_begin
+
+; CHECK: loop_begin: ; preds = %do_something, %.split
+; CHECK: switch i32
+
+; CHECK: continue: ; preds = %loop_begin
+; CHECK: %var_val = load i32, i32* %var
+; CHECK: br i1 true, label %do_something, label %loop_exit
+
+define i32 @test2(i32* %var, i1 %cond1) {
+ br label %loop_begin
+
+loop_begin:
+ switch i32 1, label %continue [
+ i32 0, label %loop_exit
+ i32 1, label %continue
+ ]
+
+continue:
+ %var_val = load i32, i32* %var
+ br i1 %cond1, label %do_something, label %loop_exit
+
+do_something:
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+
+loop_exit:
+ ret i32 0
+}
+
+declare void @some_func() noreturn
diff --git a/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll b/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll
index 21b59f87d042..37a6d4e79984 100644
--- a/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll
+++ b/test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll
@@ -1,40 +1,55 @@
-; RUN: opt < %s -loop-vectorize -simplifycfg -S | FileCheck %s
-; RUN: opt < %s -force-vector-width=2 -loop-vectorize -simplifycfg -S | FileCheck %s
+; REQUIRES: asserts
+; RUN: opt < %s -loop-vectorize -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=COST
+; RUN: opt < %s -loop-vectorize -force-vector-width=2 -instcombine -simplifycfg -S | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnu"
-; CHECK-LABEL: predicated_udiv_scalarized_operand
-;
; This test checks that we correctly compute the scalarized operands for a
; user-specified vectorization factor when interleaving is disabled. We use the
-; "optsize" attribute to disable all interleaving calculations.
+; "optsize" attribute to disable all interleaving calculations. A cost of 4
+; for %tmp4 indicates that we would scalarize it's operand (%tmp3), giving
+; %tmp4 a lower scalarization overhead.
;
-; CHECK: vector.body:
-; CHECK: %wide.load = load <2 x i64>, <2 x i64>* {{.*}}, align 4
-; CHECK: br i1 {{.*}}, label %[[IF0:.+]], label %[[CONT0:.+]]
-; CHECK: [[IF0]]:
-; CHECK: %[[T00:.+]] = extractelement <2 x i64> %wide.load, i32 0
-; CHECK: %[[T01:.+]] = extractelement <2 x i64> %wide.load, i32 0
-; CHECK: %[[T02:.+]] = add nsw i64 %[[T01]], %x
-; CHECK: %[[T03:.+]] = udiv i64 %[[T00]], %[[T02]]
-; CHECK: %[[T04:.+]] = insertelement <2 x i64> undef, i64 %[[T03]], i32 0
-; CHECK: br label %[[CONT0]]
-; CHECK: [[CONT0]]:
-; CHECK: %[[T05:.+]] = phi <2 x i64> [ undef, %vector.body ], [ %[[T04]], %[[IF0]] ]
-; CHECK: br i1 {{.*}}, label %[[IF1:.+]], label %[[CONT1:.+]]
-; CHECK: [[IF1]]:
-; CHECK: %[[T06:.+]] = extractelement <2 x i64> %wide.load, i32 1
-; CHECK: %[[T07:.+]] = extractelement <2 x i64> %wide.load, i32 1
-; CHECK: %[[T08:.+]] = add nsw i64 %[[T07]], %x
-; CHECK: %[[T09:.+]] = udiv i64 %[[T06]], %[[T08]]
-; CHECK: %[[T10:.+]] = insertelement <2 x i64> %[[T05]], i64 %[[T09]], i32 1
-; CHECK: br label %[[CONT1]]
-; CHECK: [[CONT1]]:
-; CHECK: phi <2 x i64> [ %[[T05]], %[[CONT0]] ], [ %[[T10]], %[[IF1]] ]
-; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
-
-define i64 @predicated_udiv_scalarized_operand(i64* %a, i1 %c, i64 %x) optsize {
+; COST-LABEL: predicated_udiv_scalarized_operand
+; COST: LV: Found an estimated cost of 4 for VF 2 For instruction: %tmp4 = udiv i64 %tmp2, %tmp3
+;
+; CHECK-LABEL: @predicated_udiv_scalarized_operand(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %entry ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE2:.*]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %entry ], [ [[TMP17:%.*]], %[[PRED_UDIV_CONTINUE2]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, i64* %a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>*
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i64> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0
+; CHECK-NEXT: br i1 [[TMP3]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]]
+; CHECK: [[PRED_UDIV_IF]]:
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP5]], %x
+; CHECK-NEXT: [[TMP7:%.*]] = udiv i64 [[TMP4]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> undef, i64 [[TMP7]], i32 0
+; CHECK-NEXT: br label %[[PRED_UDIV_CONTINUE]]
+; CHECK: [[PRED_UDIV_CONTINUE]]:
+; CHECK-NEXT: [[TMP9:%.*]] = phi <2 x i64> [ undef, %vector.body ], [ [[TMP8]], %[[PRED_UDIV_IF]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1
+; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2]]
+; CHECK: [[PRED_UDIV_IF1]]:
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1
+; CHECK-NEXT: [[TMP13:%.*]] = add nsw i64 [[TMP12]], %x
+; CHECK-NEXT: [[TMP14:%.*]] = udiv i64 [[TMP11]], [[TMP13]]
+; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i64> [[TMP9]], i64 [[TMP14]], i32 1
+; CHECK-NEXT: br label %[[PRED_UDIV_CONTINUE2]]
+; CHECK: [[PRED_UDIV_CONTINUE2]]:
+; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x i64> [ [[TMP9]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP15]], %[[PRED_UDIV_IF1]] ]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i64> [[TMP16]], <2 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP17]] = add <2 x i64> [[VEC_PHI]], [[PREDPHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define i64 @predicated_udiv_scalarized_operand(i64* %a, i64 %x) optsize {
entry:
br label %for.body
@@ -43,7 +58,8 @@ for.body:
%r = phi i64 [ 0, %entry ], [ %tmp6, %for.inc ]
%tmp0 = getelementptr inbounds i64, i64* %a, i64 %i
%tmp2 = load i64, i64* %tmp0, align 4
- br i1 %c, label %if.then, label %for.inc
+ %cond0 = icmp sgt i64 %tmp2, 0
+ br i1 %cond0, label %if.then, label %for.inc
if.then:
%tmp3 = add nsw i64 %tmp2, %x
@@ -54,8 +70,8 @@ for.inc:
%tmp5 = phi i64 [ %tmp2, %for.body ], [ %tmp4, %if.then]
%tmp6 = add i64 %r, %tmp5
%i.next = add nuw nsw i64 %i, 1
- %cond = icmp slt i64 %i.next, 100
- br i1 %cond, label %for.body, label %for.end
+ %cond1 = icmp slt i64 %i.next, 100
+ br i1 %cond1, label %for.body, label %for.end
for.end:
%tmp7 = phi i64 [ %tmp6, %for.inc ]
diff --git a/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll b/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll
deleted file mode 100644
index fc68adb59df3..000000000000
--- a/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll
+++ /dev/null
@@ -1,341 +0,0 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -dce -instcombine -S | FileCheck %s
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -dce -instcombine -S | FileCheck %s --check-prefix=UNROLL
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-IC
-
-target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-
-; CHECK-LABEL: @recurrence_1
-;
-; void recurrence_1(int *a, int *b, int n) {
-; for(int i = 0; i < n; i++)
-; b[i] = a[i] + a[i - 1]
-; }
-;
-; CHECK: vector.ph:
-; CHECK: %vector.recur.init = insertelement <4 x i32> undef, i32 %pre_load, i32 3
-;
-; CHECK: vector.body:
-; CHECK: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
-; CHECK: [[L1]] = load <4 x i32>
-; CHECK: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; CHECK: middle.block:
-; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
-;
-; CHECK: scalar.ph:
-; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %pre_load, %vector.memcheck ], [ %pre_load, %min.iters.checked ], [ %pre_load, %for.preheader ]
-;
-; CHECK: scalar.body:
-; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
-;
-; UNROLL: vector.body:
-; UNROLL: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
-; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i32>
-; UNROLL: [[L2]] = load <4 x i32>
-; UNROLL: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL: {{.*}} = shufflevector <4 x i32> [[L1]], <4 x i32> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; UNROLL: middle.block:
-; UNROLL: %vector.recur.extract = extractelement <4 x i32> [[L2]], i32 3
-;
-define void @recurrence_1(i32* nocapture readonly %a, i32* nocapture %b, i32 %n) {
-entry:
- br label %for.preheader
-
-for.preheader:
- %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 0
- %pre_load = load i32, i32* %arrayidx.phi.trans.insert
- br label %scalar.body
-
-scalar.body:
- %0 = phi i32 [ %pre_load, %for.preheader ], [ %1, %scalar.body ]
- %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx32 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
- %1 = load i32, i32* %arrayidx32
- %arrayidx34 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %add35 = add i32 %1, %0
- store i32 %add35, i32* %arrayidx34
- %lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %n
- br i1 %exitcond, label %for.exit, label %scalar.body
-
-for.exit:
- ret void
-}
-
-; CHECK-LABEL: @recurrence_2
-;
-; int recurrence_2(int *a, int n) {
-; int minmax;
-; for (int i = 0; i < n; ++i)
-; minmax = min(minmax, max(a[i] - a[i-1], 0));
-; return minmax;
-; }
-;
-; CHECK: vector.ph:
-; CHECK: %vector.recur.init = insertelement <4 x i32> undef, i32 %.pre, i32 3
-;
-; CHECK: vector.body:
-; CHECK: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
-; CHECK: [[L1]] = load <4 x i32>
-; CHECK: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; CHECK: middle.block:
-; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
-;
-; CHECK: scalar.ph:
-; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %.pre, %min.iters.checked ], [ %.pre, %for.preheader ]
-;
-; CHECK: scalar.body:
-; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
-;
-; UNROLL: vector.body:
-; UNROLL: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
-; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i32>
-; UNROLL: [[L2]] = load <4 x i32>
-; UNROLL: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL: {{.*}} = shufflevector <4 x i32> [[L1]], <4 x i32> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; UNROLL: middle.block:
-; UNROLL: %vector.recur.extract = extractelement <4 x i32> [[L2]], i32 3
-;
-define i32 @recurrence_2(i32* nocapture readonly %a, i32 %n) {
-entry:
- %cmp27 = icmp sgt i32 %n, 0
- br i1 %cmp27, label %for.preheader, label %for.cond.cleanup
-
-for.preheader:
- %arrayidx2.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 -1
- %.pre = load i32, i32* %arrayidx2.phi.trans.insert, align 4
- br label %scalar.body
-
-for.cond.cleanup.loopexit:
- %minmax.0.cond.lcssa = phi i32 [ %minmax.0.cond, %scalar.body ]
- br label %for.cond.cleanup
-
-for.cond.cleanup:
- %minmax.0.lcssa = phi i32 [ undef, %entry ], [ %minmax.0.cond.lcssa, %for.cond.cleanup.loopexit ]
- ret i32 %minmax.0.lcssa
-
-scalar.body:
- %0 = phi i32 [ %.pre, %for.preheader ], [ %1, %scalar.body ]
- %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
- %minmax.028 = phi i32 [ undef, %for.preheader ], [ %minmax.0.cond, %scalar.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx, align 4
- %sub3 = sub nsw i32 %1, %0
- %cmp4 = icmp sgt i32 %sub3, 0
- %cond = select i1 %cmp4, i32 %sub3, i32 0
- %cmp5 = icmp slt i32 %minmax.028, %cond
- %minmax.0.cond = select i1 %cmp5, i32 %minmax.028, i32 %cond
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %n
- br i1 %exitcond, label %for.cond.cleanup.loopexit, label %scalar.body
-}
-
-; CHECK-LABEL: @recurrence_3
-;
-; void recurrence_3(short *a, double *b, int n, float f, short p) {
-; b[0] = (double)a[0] - f * (double)p;
-; for (int i = 1; i < n; i++)
-; b[i] = (double)a[i] - f * (double)a[i - 1];
-; }
-;
-;
-; CHECK: vector.ph:
-; CHECK: %vector.recur.init = insertelement <4 x i16> undef, i16 %0, i32 3
-;
-; CHECK: vector.body:
-; CHECK: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
-; CHECK: [[L1]] = load <4 x i16>
-; CHECK: {{.*}} = shufflevector <4 x i16> %vector.recur, <4 x i16> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; CHECK: middle.block:
-; CHECK: %vector.recur.extract = extractelement <4 x i16> [[L1]], i32 3
-;
-; CHECK: scalar.ph:
-; CHECK: %scalar.recur.init = phi i16 [ %vector.recur.extract, %middle.block ], [ %0, %vector.memcheck ], [ %0, %min.iters.checked ], [ %0, %for.preheader ]
-;
-; CHECK: scalar.body:
-; CHECK: %scalar.recur = phi i16 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
-;
-; UNROLL: vector.body:
-; UNROLL: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
-; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i16>
-; UNROLL: [[L2]] = load <4 x i16>
-; UNROLL: {{.*}} = shufflevector <4 x i16> %vector.recur, <4 x i16> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL: {{.*}} = shufflevector <4 x i16> [[L1]], <4 x i16> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; UNROLL: middle.block:
-; UNROLL: %vector.recur.extract = extractelement <4 x i16> [[L2]], i32 3
-;
-define void @recurrence_3(i16* nocapture readonly %a, double* nocapture %b, i32 %n, float %f, i16 %p) {
-entry:
- %0 = load i16, i16* %a, align 2
- %conv = sitofp i16 %0 to double
- %conv1 = fpext float %f to double
- %conv2 = sitofp i16 %p to double
- %mul = fmul fast double %conv2, %conv1
- %sub = fsub fast double %conv, %mul
- store double %sub, double* %b, align 8
- %cmp25 = icmp sgt i32 %n, 1
- br i1 %cmp25, label %for.preheader, label %for.end
-
-for.preheader:
- br label %scalar.body
-
-scalar.body:
- %1 = phi i16 [ %0, %for.preheader ], [ %2, %scalar.body ]
- %advars.iv = phi i64 [ %advars.iv.next, %scalar.body ], [ 1, %for.preheader ]
- %arrayidx5 = getelementptr inbounds i16, i16* %a, i64 %advars.iv
- %2 = load i16, i16* %arrayidx5, align 2
- %conv6 = sitofp i16 %2 to double
- %conv11 = sitofp i16 %1 to double
- %mul12 = fmul fast double %conv11, %conv1
- %sub13 = fsub fast double %conv6, %mul12
- %arrayidx15 = getelementptr inbounds double, double* %b, i64 %advars.iv
- store double %sub13, double* %arrayidx15, align 8
- %advars.iv.next = add nuw nsw i64 %advars.iv, 1
- %lftr.wideiv = trunc i64 %advars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %n
- br i1 %exitcond, label %for.end.loopexit, label %scalar.body
-
-for.end.loopexit:
- br label %for.end
-
-for.end:
- ret void
-}
-
-; CHECK-LABEL: @PR26734
-;
-; void PR26734(short *a, int *b, int *c, int d, short *e) {
-; for (; d != 21; d++) {
-; *b &= *c;
-; *e = *a - 6;
-; *c = *e;
-; }
-; }
-;
-; CHECK-NOT: vector.ph:
-;
-define void @PR26734(i16* %a, i32* %b, i32* %c, i32 %d, i16* %e) {
-entry:
- %cmp4 = icmp eq i32 %d, 21
- br i1 %cmp4, label %entry.for.end_crit_edge, label %for.body.lr.ph
-
-entry.for.end_crit_edge:
- %.pre = load i32, i32* %b, align 4
- br label %for.end
-
-for.body.lr.ph:
- %0 = load i16, i16* %a, align 2
- %sub = add i16 %0, -6
- %conv2 = sext i16 %sub to i32
- %c.promoted = load i32, i32* %c, align 4
- %b.promoted = load i32, i32* %b, align 4
- br label %for.body
-
-for.body:
- %inc7 = phi i32 [ %d, %for.body.lr.ph ], [ %inc, %for.body ]
- %and6 = phi i32 [ %b.promoted, %for.body.lr.ph ], [ %and, %for.body ]
- %conv25 = phi i32 [ %c.promoted, %for.body.lr.ph ], [ %conv2, %for.body ]
- %and = and i32 %and6, %conv25
- %inc = add nsw i32 %inc7, 1
- %cmp = icmp eq i32 %inc, 21
- br i1 %cmp, label %for.cond.for.end_crit_edge, label %for.body
-
-for.cond.for.end_crit_edge:
- %and.lcssa = phi i32 [ %and, %for.body ]
- store i32 %conv2, i32* %c, align 4
- store i32 %and.lcssa, i32* %b, align 4
- store i16 %sub, i16* %e, align 2
- br label %for.end
-
-for.end:
- ret void
-}
-
-; CHECK-LABEL: @PR27246
-;
-; int PR27246() {
-; unsigned int e, n;
-; for (int i = 1; i < 49; ++i) {
-; for (int k = i; k > 1; --k)
-; e = k;
-; n = e;
-; }
-; return n;
-; }
-;
-; CHECK-NOT: vector.ph:
-;
-define i32 @PR27246() {
-entry:
- br label %for.cond1.preheader
-
-for.cond1.preheader:
- %i.016 = phi i32 [ 1, %entry ], [ %inc, %for.cond.cleanup3 ]
- %e.015 = phi i32 [ undef, %entry ], [ %e.1.lcssa, %for.cond.cleanup3 ]
- br label %for.cond1
-
-for.cond.cleanup:
- %e.1.lcssa.lcssa = phi i32 [ %e.1.lcssa, %for.cond.cleanup3 ]
- ret i32 %e.1.lcssa.lcssa
-
-for.cond1:
- %e.1 = phi i32 [ %k.0, %for.cond1 ], [ %e.015, %for.cond1.preheader ]
- %k.0 = phi i32 [ %dec, %for.cond1 ], [ %i.016, %for.cond1.preheader ]
- %cmp2 = icmp sgt i32 %k.0, 1
- %dec = add nsw i32 %k.0, -1
- br i1 %cmp2, label %for.cond1, label %for.cond.cleanup3
-
-for.cond.cleanup3:
- %e.1.lcssa = phi i32 [ %e.1, %for.cond1 ]
- %inc = add nuw nsw i32 %i.016, 1
- %exitcond = icmp eq i32 %inc, 49
- br i1 %exitcond, label %for.cond.cleanup, label %for.cond1.preheader
-}
-
-; CHECK-LABEL: @PR29559
-;
-; UNROLL-NO-IC: vector.ph:
-; UNROLL-NO-IC: br label %vector.body
-;
-; UNROLL-NO-IC: vector.body:
-; UNROLL-NO-IC: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; UNROLL-NO-IC: %vector.recur = phi <4 x float*> [ undef, %vector.ph ], [ %[[I4:.+]], %vector.body ]
-; UNROLL-NO-IC: %[[G1:.+]] = getelementptr inbounds [3 x float], [3 x float]* undef, i64 0, i64 0
-; UNROLL-NO-IC: %[[I1:.+]] = insertelement <4 x float*> undef, float* %[[G1]], i32 0
-; UNROLL-NO-IC: %[[I2:.+]] = insertelement <4 x float*> %[[I1]], float* %[[G1]], i32 1
-; UNROLL-NO-IC: %[[I3:.+]] = insertelement <4 x float*> %[[I2]], float* %[[G1]], i32 2
-; UNROLL-NO-IC: %[[I4]] = insertelement <4 x float*> %[[I3]], float* %[[G1]], i32 3
-; UNROLL-NO-IC: {{.*}} = shufflevector <4 x float*> %vector.recur, <4 x float*> %[[I4]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL-NO-IC: {{.*}} = shufflevector <4 x float*> %[[I4]], <4 x float*> %[[I4]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-;
-; UNROLL-NO-IC: middle.block:
-; UNROLL-NO-IC: %vector.recur.extract = extractelement <4 x float*> %[[I4]], i32 3
-;
-; UNROLL-NO-IC: scalar.ph:
-; UNROLL-NO-IC: %scalar.recur.init = phi float* [ %vector.recur.extract, %middle.block ], [ undef, %min.iters.checked ], [ undef, %entry ]
-;
-; UNROLL-NO-IC: scalar.body:
-; UNROLL-NO-IC: %scalar.recur = phi float* [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
-;
-define void @PR29559() {
-entry:
- br label %scalar.body
-
-scalar.body:
- %i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ]
- %tmp2 = phi float* [ undef, %entry ], [ %tmp3, %scalar.body ]
- %tmp3 = getelementptr inbounds [3 x float], [3 x float]* undef, i64 0, i64 0
- %i.next = add nuw nsw i64 %i, 1
- %cond = icmp eq i64 %i.next, undef
- br i1 %cond, label %for.end, label %scalar.body
-
-for.end:
- ret void
-}
diff --git a/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll b/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll
new file mode 100644
index 000000000000..e8ef42562356
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -force-vector-width=1 -force-vector-interleave=2 -loop-vectorize -S | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+; CHECK-LABEL: @non_primary_iv_trunc_free(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5
+; CHECK-NEXT: [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 5
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[INDUCTION]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[INDUCTION1]] to i32
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @non_primary_iv_trunc_free(i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = trunc i64 %i to i32
+ %i.next = add nuw nsw i64 %i, 5
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/AArch64/interleaved-vs-scalar.ll b/test/Transforms/LoopVectorize/AArch64/interleaved-vs-scalar.ll
new file mode 100644
index 000000000000..0ebb7a92edae
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/interleaved-vs-scalar.ll
@@ -0,0 +1,38 @@
+; REQUIRES: asserts
+; RUN: opt < %s -force-vector-width=2 -force-vector-interleave=1 -loop-vectorize -S --debug-only=loop-vectorize 2>&1 | FileCheck %s
+
+; This test shows extremely high interleaving cost that, probably, should be fixed.
+; Due to the high cost, interleaving is not beneficial and the cost model chooses to scalarize
+; the load instructions.
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+%pair = type { i8, i8 }
+
+; CHECK-LABEL: test
+; CHECK: Found an estimated cost of 20 for VF 2 For instruction: {{.*}} load i8
+; CHECK: Found an estimated cost of 0 for VF 2 For instruction: {{.*}} load i8
+; CHECK: vector.body
+; CHECK: load i8
+; CHECK: load i8
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+
+define void @test(%pair* %p, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr %pair, %pair* %p, i64 %i, i32 0
+ %tmp1 = load i8, i8* %tmp0, align 1
+ %tmp2 = getelementptr %pair, %pair* %p, i64 %i, i32 1
+ %tmp3 = load i8, i8* %tmp2, align 1
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp eq i64 %i.next, %n
+ br i1 %cond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll b/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll
index df1f9c619408..54ee8fc6e73f 100644
--- a/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll
+++ b/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll
@@ -1,81 +1,189 @@
-; RUN: opt -S -debug-only=loop-vectorize -loop-vectorize -instcombine < %s 2>&1 | FileCheck %s
+; RUN: opt -loop-vectorize -force-vector-width=2 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_2
+; RUN: opt -loop-vectorize -force-vector-width=4 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_4
+; RUN: opt -loop-vectorize -force-vector-width=8 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_8
+; RUN: opt -loop-vectorize -force-vector-width=16 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_16
; REQUIRES: asserts
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnueabi"
-@AB = common global [1024 x i8] zeroinitializer, align 4
-@CD = common global [1024 x i8] zeroinitializer, align 4
+%i8.2 = type {i8, i8}
+define void @i8_factor_2(%i8.2* %data, i64 %n) {
+entry:
+ br label %for.body
+
+; VF_8-LABEL: Checking a loop in "i8_factor_2"
+; VF_8: Found an estimated cost of 2 for VF 8 For instruction: %tmp2 = load i8, i8* %tmp0, align 1
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i8, i8* %tmp1, align 1
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i8 0, i8* %tmp0, align 1
+; VF_8-NEXT: Found an estimated cost of 2 for VF 8 For instruction: store i8 0, i8* %tmp1, align 1
+; VF_16-LABEL: Checking a loop in "i8_factor_2"
+; VF_16: Found an estimated cost of 2 for VF 16 For instruction: %tmp2 = load i8, i8* %tmp0, align 1
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i8, i8* %tmp1, align 1
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i8 0, i8* %tmp0, align 1
+; VF_16-NEXT: Found an estimated cost of 2 for VF 16 For instruction: store i8 0, i8* %tmp1, align 1
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i8.2, %i8.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i8.2, %i8.2* %data, i64 %i, i32 1
+ %tmp2 = load i8, i8* %tmp0, align 1
+ %tmp3 = load i8, i8* %tmp1, align 1
+ store i8 0, i8* %tmp0, align 1
+ store i8 0, i8* %tmp1, align 1
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+%i16.2 = type {i16, i16}
+define void @i16_factor_2(%i16.2* %data, i64 %n) {
+entry:
+ br label %for.body
+
+; VF_4-LABEL: Checking a loop in "i16_factor_2"
+; VF_4: Found an estimated cost of 2 for VF 4 For instruction: %tmp2 = load i16, i16* %tmp0, align 2
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i16, i16* %tmp1, align 2
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: store i16 0, i16* %tmp0, align 2
+; VF_4-NEXT: Found an estimated cost of 2 for VF 4 For instruction: store i16 0, i16* %tmp1, align 2
+; VF_8-LABEL: Checking a loop in "i16_factor_2"
+; VF_8: Found an estimated cost of 2 for VF 8 For instruction: %tmp2 = load i16, i16* %tmp0, align 2
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i16, i16* %tmp1, align 2
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i16 0, i16* %tmp0, align 2
+; VF_8-NEXT: Found an estimated cost of 2 for VF 8 For instruction: store i16 0, i16* %tmp1, align 2
+; VF_16-LABEL: Checking a loop in "i16_factor_2"
+; VF_16: Found an estimated cost of 4 for VF 16 For instruction: %tmp2 = load i16, i16* %tmp0, align 2
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i16, i16* %tmp1, align 2
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i16 0, i16* %tmp0, align 2
+; VF_16-NEXT: Found an estimated cost of 4 for VF 16 For instruction: store i16 0, i16* %tmp1, align 2
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i16.2, %i16.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i16.2, %i16.2* %data, i64 %i, i32 1
+ %tmp2 = load i16, i16* %tmp0, align 2
+ %tmp3 = load i16, i16* %tmp1, align 2
+ store i16 0, i16* %tmp0, align 2
+ store i16 0, i16* %tmp1, align 2
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
-define void @test_byte_interleaved_cost(i8 %C, i8 %D) {
+for.end:
+ ret void
+}
+
+%i32.2 = type {i32, i32}
+define void @i32_factor_2(%i32.2* %data, i64 %n) {
entry:
br label %for.body
-; 8xi8 and 16xi8 are valid i8 vector types, so the cost of the interleaved
-; access group is 2.
-
-; CHECK: LV: Checking a loop in "test_byte_interleaved_cost"
-; CHECK: LV: Found an estimated cost of 2 for VF 8 For instruction: %tmp = load i8, i8* %arrayidx0, align 4
-; CHECK: LV: Found an estimated cost of 2 for VF 16 For instruction: %tmp = load i8, i8* %arrayidx0, align 4
-
-for.body: ; preds = %for.body, %entry
- %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %indvars.iv
- %tmp = load i8, i8* %arrayidx0, align 4
- %tmp1 = or i64 %indvars.iv, 1
- %arrayidx1 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %tmp1
- %tmp2 = load i8, i8* %arrayidx1, align 4
- %add = add nsw i8 %tmp, %C
- %mul = mul nsw i8 %tmp2, %D
- %arrayidx2 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %indvars.iv
- store i8 %add, i8* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %tmp1
- store i8 %mul, i8* %arrayidx3, align 4
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
- %cmp = icmp slt i64 %indvars.iv.next, 1024
- br i1 %cmp, label %for.body, label %for.end
-
-for.end: ; preds = %for.body
+; VF_2-LABEL: Checking a loop in "i32_factor_2"
+; VF_2: Found an estimated cost of 2 for VF 2 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_2-NEXT: Found an estimated cost of 2 for VF 2 For instruction: store i32 0, i32* %tmp1, align 4
+; VF_4-LABEL: Checking a loop in "i32_factor_2"
+; VF_4: Found an estimated cost of 2 for VF 4 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_4-NEXT: Found an estimated cost of 2 for VF 4 For instruction: store i32 0, i32* %tmp1, align 4
+; VF_8-LABEL: Checking a loop in "i32_factor_2"
+; VF_8: Found an estimated cost of 4 for VF 8 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_8-NEXT: Found an estimated cost of 4 for VF 8 For instruction: store i32 0, i32* %tmp1, align 4
+; VF_16-LABEL: Checking a loop in "i32_factor_2"
+; VF_16: Found an estimated cost of 8 for VF 16 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_16-NEXT: Found an estimated cost of 8 for VF 16 For instruction: store i32 0, i32* %tmp1, align 4
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i32.2, %i32.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i32.2, %i32.2* %data, i64 %i, i32 1
+ %tmp2 = load i32, i32* %tmp0, align 4
+ %tmp3 = load i32, i32* %tmp1, align 4
+ store i32 0, i32* %tmp0, align 4
+ store i32 0, i32* %tmp1, align 4
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
ret void
}
-%ig.factor.8 = type { double*, double, double, double, double, double, double, double }
-define double @wide_interleaved_group(%ig.factor.8* %s, double %a, double %b, i32 %n) {
+%i64.2 = type {i64, i64}
+define void @i64_factor_2(%i64.2* %data, i64 %n) {
entry:
br label %for.body
-; Check the default cost of a strided load with a factor that is greater than
-; the maximum allowed. In this test, the interleave factor would be 8, which is
-; not supported.
+; VF_2-LABEL: Checking a loop in "i64_factor_2"
+; VF_2: Found an estimated cost of 2 for VF 2 For instruction: %tmp2 = load i64, i64* %tmp0, align 8
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: %tmp3 = load i64, i64* %tmp1, align 8
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: store i64 0, i64* %tmp0, align 8
+; VF_2-NEXT: Found an estimated cost of 2 for VF 2 For instruction: store i64 0, i64* %tmp1, align 8
+; VF_4-LABEL: Checking a loop in "i64_factor_2"
+; VF_4: Found an estimated cost of 4 for VF 4 For instruction: %tmp2 = load i64, i64* %tmp0, align 8
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i64, i64* %tmp1, align 8
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: store i64 0, i64* %tmp0, align 8
+; VF_4-NEXT: Found an estimated cost of 4 for VF 4 For instruction: store i64 0, i64* %tmp1, align 8
+; VF_8-LABEL: Checking a loop in "i64_factor_2"
+; VF_8: Found an estimated cost of 8 for VF 8 For instruction: %tmp2 = load i64, i64* %tmp0, align 8
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i64, i64* %tmp1, align 8
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i64 0, i64* %tmp0, align 8
+; VF_8-NEXT: Found an estimated cost of 8 for VF 8 For instruction: store i64 0, i64* %tmp1, align 8
+; VF_16-LABEL: Checking a loop in "i64_factor_2"
+; VF_16: Found an estimated cost of 16 for VF 16 For instruction: %tmp2 = load i64, i64* %tmp0, align 8
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i64, i64* %tmp1, align 8
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i64 0, i64* %tmp0, align 8
+; VF_16-NEXT: Found an estimated cost of 16 for VF 16 For instruction: store i64 0, i64* %tmp1, align 8
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i64.2, %i64.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i64.2, %i64.2* %data, i64 %i, i32 1
+ %tmp2 = load i64, i64* %tmp0, align 8
+ %tmp3 = load i64, i64* %tmp1, align 8
+ store i64 0, i64* %tmp0, align 8
+ store i64 0, i64* %tmp1, align 8
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
-; CHECK: LV: Checking a loop in "wide_interleaved_group"
-; CHECK: LV: Found an estimated cost of 6 for VF 2 For instruction: %1 = load double, double* %0, align 8
-; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: %5 = load double, double* %4, align 8
-; CHECK: LV: Found an estimated cost of 10 for VF 2 For instruction: store double %9, double* %10, align 8
+%i64.8 = type {i64, i64, i64, i64, i64, i64, i64, i64}
+define void @i64_factor_8(%i64.8* %data, i64 %n) {
+entry:
+ br label %for.body
+; The interleave factor in this test is 8, which is greater than the maximum
+; allowed factor for AArch64 (4). Thus, we will fall back to the basic TTI
+; implementation for determining the cost of the interleaved load group. The
+; stores do not form a legal interleaved group because the group would contain
+; gaps.
+;
+; VF_2-LABEL: Checking a loop in "i64_factor_8"
+; VF_2: Found an estimated cost of 6 for VF 2 For instruction: %tmp2 = load i64, i64* %tmp0, align 8
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: %tmp3 = load i64, i64* %tmp1, align 8
+; VF_2-NEXT: Found an estimated cost of 7 for VF 2 For instruction: store i64 0, i64* %tmp0, align 8
+; VF_2-NEXT: Found an estimated cost of 7 for VF 2 For instruction: store i64 0, i64* %tmp1, align 8
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
- %r = phi double [ 0.000000e+00, %entry ], [ %12, %for.body ]
- %0 = getelementptr inbounds %ig.factor.8, %ig.factor.8* %s, i64 %i, i32 2
- %1 = load double, double* %0, align 8
- %2 = fcmp fast olt double %1, %a
- %3 = select i1 %2, double 0.000000e+00, double %1
- %4 = getelementptr inbounds %ig.factor.8, %ig.factor.8* %s, i64 %i, i32 6
- %5 = load double, double* %4, align 8
- %6 = fcmp fast olt double %5, %a
- %7 = select i1 %6, double 0.000000e+00, double %5
- %8 = fmul fast double %7, %b
- %9 = fadd fast double %8, %3
- %10 = getelementptr inbounds %ig.factor.8, %ig.factor.8* %s, i64 %i, i32 3
- store double %9, double* %10, align 8
- %11 = fmul fast double %9, %9
- %12 = fadd fast double %11, %r
+ %tmp0 = getelementptr inbounds %i64.8, %i64.8* %data, i64 %i, i32 2
+ %tmp1 = getelementptr inbounds %i64.8, %i64.8* %data, i64 %i, i32 6
+ %tmp2 = load i64, i64* %tmp0, align 8
+ %tmp3 = load i64, i64* %tmp1, align 8
+ store i64 0, i64* %tmp0, align 8
+ store i64 0, i64* %tmp1, align 8
%i.next = add nuw nsw i64 %i, 1
- %13 = trunc i64 %i.next to i32
- %cond = icmp eq i32 %13, %n
- br i1 %cond, label %for.exit, label %for.body
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
-for.exit:
- %r.lcssa = phi double [ %12, %for.body ]
- ret double %r.lcssa
+for.end:
+ ret void
}
diff --git a/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll b/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll
index c7ced757581a..d06e3fdba39c 100644
--- a/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll
+++ b/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll
@@ -234,12 +234,27 @@ for.body: ; preds = %entry, %for.body
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
-; CHECK-LABEL: @add_phifail2(
-; CHECK: load <16 x i8>, <16 x i8>*
-; CHECK: add nuw nsw <16 x i32>
-; CHECK: store <16 x i8>
; Function Attrs: nounwind
+; When we vectorize this loop, we generate correct code
+; even when %len exactly divides VF (since we extract from the second last index
+; and pass this to the for.cond.cleanup block). Vectorized loop returns
+; the correct value a_phi = p[len -2]
define i8 @add_phifail2(i8* noalias nocapture readonly %p, i8* noalias nocapture %q, i32 %len) #0 {
+; CHECK-LABEL: @add_phifail2(
+; CHECK: vector.body:
+; CHECK: %wide.load = load <16 x i8>, <16 x i8>*
+; CHECK: %[[L1:.+]] = zext <16 x i8> %wide.load to <16 x i32>
+; CHECK: add nuw nsw <16 x i32>
+; CHECK: store <16 x i8>
+; CHECK: add i64 %index, 16
+; CHECK: icmp eq i64 %index.next, %n.vec
+; CHECK: middle.block:
+; CHECK: %vector.recur.extract = extractelement <16 x i32> %[[L1]], i32 15
+; CHECK: %vector.recur.extract.for.phi = extractelement <16 x i32> %[[L1]], i32 14
+; CHECK: for.cond.cleanup:
+; CHECK: %a_phi.lcssa = phi i32 [ %scalar.recur, %for.body ], [ %vector.recur.extract.for.phi, %middle.block ]
+; CHECK: %ret = trunc i32 %a_phi.lcssa to i8
+; CHECK: ret i8 %ret
entry:
br label %for.body
diff --git a/test/Transforms/LoopVectorize/AArch64/pr31900.ll b/test/Transforms/LoopVectorize/AArch64/pr31900.ll
new file mode 100644
index 000000000000..5ea38a4a246d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/pr31900.ll
@@ -0,0 +1,37 @@
+; RUN: opt -S -mtriple=aarch64-apple-ios -loop-vectorize -enable-interleaved-mem-accesses -force-vector-width=2 < %s | FileCheck %s
+
+; Reproducer for address space fault in the LoopVectorizer (pr31900). Added
+; different sized address space pointers (p:16:16-p4:32:16) to the aarch64
+; datalayout to reproduce the fault.
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128-p:16:16-p4:32:16"
+
+; Check that all the loads are scalarized
+; CHECK: load i16, i16*
+; CHECK: load i16, i16*
+; CHECK: load i16, i16 addrspace(4)*
+; CHECK: load i16, i16 addrspace(4)*
+
+%rec1445 = type { i16, i16, i16, i16, i16 }
+
+define void @foo() {
+bb1:
+ br label %bb4
+
+bb4:
+ %tmp1 = phi i16 [ undef, %bb1 ], [ %_tmp1013, %bb4 ]
+ %tmp2 = phi %rec1445* [ undef, %bb1 ], [ %_tmp1015, %bb4 ]
+ %tmp3 = phi %rec1445 addrspace(4)* [ undef, %bb1 ], [ %_tmp1017, %bb4 ]
+ %0 = getelementptr %rec1445, %rec1445* %tmp2, i16 0, i32 1
+ %_tmp987 = load i16, i16* %0, align 1
+ %1 = getelementptr %rec1445, %rec1445 addrspace(4)* %tmp3, i32 0, i32 1
+ %_tmp993 = load i16, i16 addrspace(4)* %1, align 1
+ %_tmp1013 = add i16 %tmp1, 1
+ %_tmp1015 = getelementptr %rec1445, %rec1445* %tmp2, i16 1
+ %_tmp1017 = getelementptr %rec1445, %rec1445 addrspace(4)* %tmp3, i32 1
+ %_tmp1019 = icmp ult i16 %_tmp1013, 24
+ br i1 %_tmp1019, label %bb4, label %bb16
+
+bb16:
+ unreachable
+}
diff --git a/test/Transforms/LoopVectorize/AArch64/smallest-and-widest-types.ll b/test/Transforms/LoopVectorize/AArch64/smallest-and-widest-types.ll
new file mode 100644
index 000000000000..1ae7dadeffd7
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/smallest-and-widest-types.ll
@@ -0,0 +1,33 @@
+; REQUIRES: asserts
+; RUN: opt < %s -loop-vectorize -debug-only=loop-vectorize -disable-output 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+; CHECK-LABEL: Checking a loop in "interleaved_access"
+; CHECK: The Smallest and Widest types: 64 / 64 bits
+;
+define void @interleaved_access(i8** %A, i64 %N) {
+for.ph:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next.3, %for.body ], [ 0, %for.ph ]
+ %tmp0 = getelementptr inbounds i8*, i8** %A, i64 %i
+ store i8* null, i8** %tmp0, align 8
+ %i.next.0 = add nuw nsw i64 %i, 1
+ %tmp1 = getelementptr inbounds i8*, i8** %A, i64 %i.next.0
+ store i8* null, i8** %tmp1, align 8
+ %i.next.1 = add nsw i64 %i, 2
+ %tmp2 = getelementptr inbounds i8*, i8** %A, i64 %i.next.1
+ store i8* null, i8** %tmp2, align 8
+ %i.next.2 = add nsw i64 %i, 3
+ %tmp3 = getelementptr inbounds i8*, i8** %A, i64 %i.next.2
+ store i8* null, i8** %tmp3, align 8
+ %i.next.3 = add nsw i64 %i, 4
+ %cond = icmp slt i64 %i.next.3, %N
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/AMDGPU/lit.local.cfg b/test/Transforms/LoopVectorize/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..2a665f06be72
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AMDGPU' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/LoopVectorize/AMDGPU/unroll-in-loop-vectorizer.ll b/test/Transforms/LoopVectorize/AMDGPU/unroll-in-loop-vectorizer.ll
new file mode 100644
index 000000000000..f303ed5377e2
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AMDGPU/unroll-in-loop-vectorizer.ll
@@ -0,0 +1,28 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji -loop-vectorize < %s | FileCheck %s
+
+
+; For AMDGPU, loop unroll in loop vectorizer is disabled when VF==1.
+;
+; CHECK-LABEL: @small_loop(
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: ret
+define amdgpu_kernel void @small_loop(i32* nocapture %inArray, i32 %size) nounwind {
+entry:
+ %0 = icmp sgt i32 %size, 0
+ br i1 %0, label %loop, label %exit
+
+loop: ; preds = %entry, %loop
+ %iv = phi i32 [ %iv1, %loop ], [ 0, %entry ]
+ %1 = getelementptr inbounds i32, i32* %inArray, i32 %iv
+ %2 = load i32, i32* %1, align 4
+ %3 = add nsw i32 %2, 6
+ store i32 %3, i32* %1, align 4
+ %iv1 = add i32 %iv, 1
+; %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %cond = icmp eq i32 %iv1, %size
+ br i1 %cond, label %exit, label %loop
+
+exit: ; preds = %loop, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll b/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll
index de3626b57d83..29adec049f67 100644
--- a/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll
+++ b/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll
@@ -1,39 +1,147 @@
-; RUN: opt -S -debug-only=loop-vectorize -loop-vectorize -instcombine < %s 2>&1 | FileCheck %s
+; RUN: opt -loop-vectorize -force-vector-width=2 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_2
+; RUN: opt -loop-vectorize -force-vector-width=4 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_4
+; RUN: opt -loop-vectorize -force-vector-width=8 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_8
+; RUN: opt -loop-vectorize -force-vector-width=16 -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=VF_16
; REQUIRES: asserts
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "armv8--linux-gnueabihf"
-@AB = common global [1024 x i8] zeroinitializer, align 4
-@CD = common global [1024 x i8] zeroinitializer, align 4
+%i8.2 = type {i8, i8}
+define void @i8_factor_2(%i8.2* %data, i64 %n) {
+entry:
+ br label %for.body
+
+; VF_8-LABEL: Checking a loop in "i8_factor_2"
+; VF_8: Found an estimated cost of 2 for VF 8 For instruction: %tmp2 = load i8, i8* %tmp0, align 1
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i8, i8* %tmp1, align 1
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i8 0, i8* %tmp0, align 1
+; VF_8-NEXT: Found an estimated cost of 2 for VF 8 For instruction: store i8 0, i8* %tmp1, align 1
+; VF_16-LABEL: Checking a loop in "i8_factor_2"
+; VF_16: Found an estimated cost of 2 for VF 16 For instruction: %tmp2 = load i8, i8* %tmp0, align 1
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i8, i8* %tmp1, align 1
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i8 0, i8* %tmp0, align 1
+; VF_16-NEXT: Found an estimated cost of 2 for VF 16 For instruction: store i8 0, i8* %tmp1, align 1
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i8.2, %i8.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i8.2, %i8.2* %data, i64 %i, i32 1
+ %tmp2 = load i8, i8* %tmp0, align 1
+ %tmp3 = load i8, i8* %tmp1, align 1
+ store i8 0, i8* %tmp0, align 1
+ store i8 0, i8* %tmp1, align 1
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
-define void @test_byte_interleaved_cost(i8 %C, i8 %D) {
+%i16.2 = type {i16, i16}
+define void @i16_factor_2(%i16.2* %data, i64 %n) {
entry:
br label %for.body
-; 8xi8 and 16xi8 are valid i8 vector types, so the cost of the interleaved
-; access group is 2.
-
-; CHECK: LV: Found an estimated cost of 2 for VF 8 For instruction: %tmp = load i8, i8* %arrayidx0, align 4
-; CHECK: LV: Found an estimated cost of 2 for VF 16 For instruction: %tmp = load i8, i8* %arrayidx0, align 4
-
-for.body: ; preds = %for.body, %entry
- %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %indvars.iv
- %tmp = load i8, i8* %arrayidx0, align 4
- %tmp1 = or i64 %indvars.iv, 1
- %arrayidx1 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %tmp1
- %tmp2 = load i8, i8* %arrayidx1, align 4
- %add = add nsw i8 %tmp, %C
- %mul = mul nsw i8 %tmp2, %D
- %arrayidx2 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %indvars.iv
- store i8 %add, i8* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %tmp1
- store i8 %mul, i8* %arrayidx3, align 4
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
- %cmp = icmp slt i64 %indvars.iv.next, 1024
- br i1 %cmp, label %for.body, label %for.end
-
-for.end: ; preds = %for.body
+; VF_4-LABEL: Checking a loop in "i16_factor_2"
+; VF_4: Found an estimated cost of 2 for VF 4 For instruction: %tmp2 = load i16, i16* %tmp0, align 2
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i16, i16* %tmp1, align 2
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: store i16 0, i16* %tmp0, align 2
+; VF_4-NEXT: Found an estimated cost of 2 for VF 4 For instruction: store i16 0, i16* %tmp1, align 2
+; VF_8-LABEL: Checking a loop in "i16_factor_2"
+; VF_8: Found an estimated cost of 2 for VF 8 For instruction: %tmp2 = load i16, i16* %tmp0, align 2
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i16, i16* %tmp1, align 2
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i16 0, i16* %tmp0, align 2
+; VF_8-NEXT: Found an estimated cost of 2 for VF 8 For instruction: store i16 0, i16* %tmp1, align 2
+; VF_16-LABEL: Checking a loop in "i16_factor_2"
+; VF_16: Found an estimated cost of 4 for VF 16 For instruction: %tmp2 = load i16, i16* %tmp0, align 2
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i16, i16* %tmp1, align 2
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i16 0, i16* %tmp0, align 2
+; VF_16-NEXT: Found an estimated cost of 4 for VF 16 For instruction: store i16 0, i16* %tmp1, align 2
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i16.2, %i16.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i16.2, %i16.2* %data, i64 %i, i32 1
+ %tmp2 = load i16, i16* %tmp0, align 2
+ %tmp3 = load i16, i16* %tmp1, align 2
+ store i16 0, i16* %tmp0, align 2
+ store i16 0, i16* %tmp1, align 2
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+%i32.2 = type {i32, i32}
+define void @i32_factor_2(%i32.2* %data, i64 %n) {
+entry:
+ br label %for.body
+
+; VF_2-LABEL: Checking a loop in "i32_factor_2"
+; VF_2: Found an estimated cost of 2 for VF 2 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_2-NEXT: Found an estimated cost of 0 for VF 2 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_2-NEXT: Found an estimated cost of 2 for VF 2 For instruction: store i32 0, i32* %tmp1, align 4
+; VF_4-LABEL: Checking a loop in "i32_factor_2"
+; VF_4: Found an estimated cost of 2 for VF 4 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_4-NEXT: Found an estimated cost of 2 for VF 4 For instruction: store i32 0, i32* %tmp1, align 4
+; VF_8-LABEL: Checking a loop in "i32_factor_2"
+; VF_8: Found an estimated cost of 4 for VF 8 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_8-NEXT: Found an estimated cost of 4 for VF 8 For instruction: store i32 0, i32* %tmp1, align 4
+; VF_16-LABEL: Checking a loop in "i32_factor_2"
+; VF_16: Found an estimated cost of 8 for VF 16 For instruction: %tmp2 = load i32, i32* %tmp0, align 4
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: %tmp3 = load i32, i32* %tmp1, align 4
+; VF_16-NEXT: Found an estimated cost of 0 for VF 16 For instruction: store i32 0, i32* %tmp0, align 4
+; VF_16-NEXT: Found an estimated cost of 8 for VF 16 For instruction: store i32 0, i32* %tmp1, align 4
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %i32.2, %i32.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %i32.2, %i32.2* %data, i64 %i, i32 1
+ %tmp2 = load i32, i32* %tmp0, align 4
+ %tmp3 = load i32, i32* %tmp1, align 4
+ store i32 0, i32* %tmp0, align 4
+ store i32 0, i32* %tmp1, align 4
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+%half.2 = type {half, half}
+define void @half_factor_2(%half.2* %data, i64 %n) {
+entry:
+ br label %for.body
+
+; VF_4-LABEL: Checking a loop in "half_factor_2"
+; VF_4: Found an estimated cost of 40 for VF 4 For instruction: %tmp2 = load half, half* %tmp0, align 2
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load half, half* %tmp1, align 2
+; VF_4-NEXT: Found an estimated cost of 0 for VF 4 For instruction: store half 0xH0000, half* %tmp0, align 2
+; VF_4-NEXT: Found an estimated cost of 32 for VF 4 For instruction: store half 0xH0000, half* %tmp1, align 2
+; VF_8-LABEL: Checking a loop in "half_factor_2"
+; VF_8: Found an estimated cost of 80 for VF 8 For instruction: %tmp2 = load half, half* %tmp0, align 2
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: %tmp3 = load half, half* %tmp1, align 2
+; VF_8-NEXT: Found an estimated cost of 0 for VF 8 For instruction: store half 0xH0000, half* %tmp0, align 2
+; VF_8-NEXT: Found an estimated cost of 64 for VF 8 For instruction: store half 0xH0000, half* %tmp1, align 2
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds %half.2, %half.2* %data, i64 %i, i32 0
+ %tmp1 = getelementptr inbounds %half.2, %half.2* %data, i64 %i, i32 1
+ %tmp2 = load half, half* %tmp0, align 2
+ %tmp3 = load half, half* %tmp1, align 2
+ store half 0., half* %tmp0, align 2
+ store half 0., half* %tmp1, align 2
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
ret void
}
diff --git a/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll b/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll
new file mode 100644
index 000000000000..d2e594520332
--- /dev/null
+++ b/test/Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll
@@ -0,0 +1,38 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=2 -debug-only=loop-vectorize \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+
+; Check costs for branches inside a vectorized loop around predicated
+; blocks. Each such branch will be guarded with an extractelement from the
+; vector compare plus a test under mask instruction. This cost is modelled on
+; the extractelement of i1.
+
+define void @fun(i32* %arr, i64 %trip.count) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %indvars.iv
+ %l = load i32, i32* %arrayidx, align 4
+ %cmp55 = icmp sgt i32 %l, 0
+ br i1 %cmp55, label %if.then, label %for.inc
+
+if.then:
+ %sub = sub nsw i32 0, %l
+ store i32 %sub, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc:
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %trip.count
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ ret void
+
+; CHECK: LV: Found an estimated cost of 5 for VF 2 For instruction: br i1 %cmp55, label %if.then, label %for.inc
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: br label %for.inc
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond, label %for.end.loopexit, label %for.body
+}
diff --git a/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg b/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg
new file mode 100644
index 000000000000..2f3cf7d3f043
--- /dev/null
+++ b/test/Transforms/LoopVectorize/SystemZ/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'SystemZ' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll b/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll
new file mode 100644
index 000000000000..e7096c29b994
--- /dev/null
+++ b/test/Transforms/LoopVectorize/SystemZ/load-store-scalarization-cost.ll
@@ -0,0 +1,33 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=4 -debug-only=loop-vectorize \
+; RUN: -disable-output -enable-interleaved-mem-accesses=false < %s 2>&1 | \
+; RUN: FileCheck %s
+;
+; Check that a scalarized load/store does not get a cost for insterts/
+; extracts, since z13 supports element load/store.
+
+define void @fun(i32* %data, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i32, i32* %data, i64 %i
+ %tmp1 = load i32, i32* %tmp0, align 4
+ %tmp2 = add i32 %tmp1, 1
+ store i32 %tmp2, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %tmp2, i32* %tmp0, align 4
+
+; CHECK: LV: Scalarizing: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Scalarizing: store i32 %tmp2, i32* %tmp0, align 4
+}
+
diff --git a/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll b/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll
new file mode 100644
index 000000000000..5c15ee4f2d9f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll
@@ -0,0 +1,70 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=4 -debug-only=loop-vectorize \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+;
+; Check that the loop vectorizer performs memory interleaving with accurate
+; cost estimations.
+
+
+; Simple case where just the load is interleaved, because the store group
+; would have gaps.
+define void @fun0(i32* %data, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i32, i32* %data, i64 %i
+ %tmp1 = load i32, i32* %tmp0, align 4
+ %tmp2 = add i32 %tmp1, 1
+ store i32 %tmp2, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Creating an interleave group with: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Found an estimated cost of 3 for VF 4 For instruction: %tmp1 = load i32, i32* %tmp0, align 4
+; (vl; vl; vperm)
+}
+
+; Interleaving of both load and stores.
+define void @fun1(i32* %data, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i32, i32* %data, i64 %i
+ %tmp1 = load i32, i32* %tmp0, align 4
+ %i_1 = add i64 %i, 1
+ %tmp2 = getelementptr inbounds i32, i32* %data, i64 %i_1
+ %tmp3 = load i32, i32* %tmp2, align 4
+ store i32 %tmp1, i32* %tmp2, align 4
+ store i32 %tmp3, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Creating an interleave group with: store i32 %tmp3, i32* %tmp0, align 4
+; CHECK: LV: Inserted: store i32 %tmp1, i32* %tmp2, align 4
+; CHECK: into the interleave group with store i32 %tmp3, i32* %tmp0, align 4
+; CHECK: LV: Creating an interleave group with: %tmp3 = load i32, i32* %tmp2, align 4
+; CHECK: LV: Inserted: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: into the interleave group with %tmp3 = load i32, i32* %tmp2, align 4
+
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: %tmp1 = load i32, i32* %tmp0, align 4
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: %tmp3 = load i32, i32* %tmp2, align 4
+; (vl; vl; vperm, vpkg)
+
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction: store i32 %tmp1, i32* %tmp2, align 4
+; CHECK: LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %tmp3, i32* %tmp0, align 4
+; (vmrlf; vmrhf; vst; vst)
+}
+
diff --git a/test/Transforms/LoopVectorize/X86/avx512.ll b/test/Transforms/LoopVectorize/X86/avx512.ll
index fb01454c253b..1eb1cd3f5d7a 100644
--- a/test/Transforms/LoopVectorize/X86/avx512.ll
+++ b/test/Transforms/LoopVectorize/X86/avx512.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-apple-macosx10.9.0"
; loop.
; CHECK-LABEL: f:
-; CHECK: vmovups %zmm{{.}},
+; CHECK: vmovdqu32 %zmm{{.}},
; CHECK-NOT: %ymm
define void @f(i32* %a, i32 %n) {
diff --git a/test/Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll b/test/Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll
index 32bfcd2275ac..82f2e064a581 100644
--- a/test/Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll
+++ b/test/Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll
@@ -13,22 +13,33 @@ target triple = "x86_64-unknown-linux-gnu"
; scatter operation. %tmp3 (and the induction variable) should not be marked
; uniform-after-vectorization.
;
-; CHECK: LV: Found uniform instruction: %tmp0 = getelementptr inbounds %data, %data* %d, i64 0, i32 3, i64 %i
-; CHECK-NOT: LV: Found uniform instruction: %tmp3 = getelementptr inbounds %data, %data* %d, i64 0, i32 0, i64 %i
-; CHECK-NOT: LV: Found uniform instruction: %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
-; CHECK-NOT: LV: Found uniform instruction: %i.next = add nuw nsw i64 %i, 5
-; CHECK: vector.body:
-; CHECK: %vec.ind = phi <16 x i64>
-; CHECK: %[[T0:.+]] = extractelement <16 x i64> %vec.ind, i32 0
-; CHECK: %[[T1:.+]] = getelementptr inbounds %data, %data* %d, i64 0, i32 3, i64 %[[T0]]
-; CHECK: %[[T2:.+]] = bitcast float* %[[T1]] to <80 x float>*
-; CHECK: load <80 x float>, <80 x float>* %[[T2]], align 4
-; CHECK: %[[T3:.+]] = getelementptr inbounds %data, %data* %d, i64 0, i32 0, i64 %[[T0]]
-; CHECK: %[[T4:.+]] = bitcast float* %[[T3]] to <80 x float>*
-; CHECK: load <80 x float>, <80 x float>* %[[T4]], align 4
-; CHECK: %VectorGep = getelementptr inbounds %data, %data* %d, i64 0, i32 0, <16 x i64> %vec.ind
-; CHECK: call void @llvm.masked.scatter.v16f32({{.*}}, <16 x float*> %VectorGep, {{.*}})
-; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+; CHECK: LV: Found uniform instruction: %tmp0 = getelementptr inbounds %data, %data* %d, i64 0, i32 3, i64 %i
+; CHECK-NOT: LV: Found uniform instruction: %tmp3 = getelementptr inbounds %data, %data* %d, i64 0, i32 0, i64 %i
+; CHECK-NOT: LV: Found uniform instruction: %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+; CHECK-NOT: LV: Found uniform instruction: %i.next = add nuw nsw i64 %i, 5
+; CHECK: vector.ph:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x float> undef, float %x, i32 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x float> [[BROADCAST_SPLATINSERT]], <16 x float> undef, <16 x i32> zeroinitializer
+; CHECK-NEXT: br label %vector.body
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ <i64 0, i64 5, i64 10, i64 15, i64 20, i64 25, i64 30, i64 35, i64 40, i64 45, i64 50, i64 55, i64 60, i64 65, i64 70, i64 75>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds %data, %data* %d, i64 0, i32 3, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[TMP0]] to <80 x float>*
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <80 x float>, <80 x float>* [[TMP1]], align 4
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <80 x float> [[WIDE_VEC]], <80 x float> undef, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75>
+; CHECK-NEXT: [[TMP2:%.*]] = fmul <16 x float> [[BROADCAST_SPLAT]], [[STRIDED_VEC]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds %data, %data* %d, i64 0, i32 0, <16 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[BC:%.*]] = bitcast <16 x float*> [[TMP3]] to <16 x <80 x float>*>
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <16 x <80 x float>*> [[BC]], i32 0
+; CHECK-NEXT: [[WIDE_VEC1:%.*]] = load <80 x float>, <80 x float>* [[TMP4]], align 4
+; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <80 x float> [[WIDE_VEC1]], <80 x float> undef, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75>
+; CHECK-NEXT: [[TMP5:%.*]] = fadd <16 x float> [[STRIDED_VEC2]], [[TMP2]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v16f32(<16 x float> [[TMP5]], <16 x float*> [[TMP3]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], <i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80, i64 80>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
%data = type { [32000 x float], [3 x i32], [4 x i8], [32000 x float] }
diff --git a/test/Transforms/LoopVectorize/X86/gather-vs-interleave.ll b/test/Transforms/LoopVectorize/X86/gather-vs-interleave.ll
new file mode 100644
index 000000000000..76b6cae5c3b4
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/gather-vs-interleave.ll
@@ -0,0 +1,41 @@
+; RUN: opt -loop-vectorize -S -mcpu=skylake-avx512 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; This test checks that "gather" operation is choosen since it's cost is better
+; than interleaving pattern.
+;
+;unsigned long A[SIZE];
+;unsigned long B[SIZE];
+;
+;void foo() {
+; for (int i=0; i<N; i+=8) {
+; B[i] = A[i] + 5;
+; }
+;}
+
+@A = global [10240 x i64] zeroinitializer, align 16
+@B = global [10240 x i64] zeroinitializer, align 16
+
+
+; CHECK_LABEL: strided_load_i64
+; CHECK: masked.gather
+define void @strided_load_i64() {
+ br label %1
+
+; <label>:1: ; preds = %0, %1
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [10240 x i64], [10240 x i64]* @A, i64 0, i64 %indvars.iv
+ %3 = load i64, i64* %2, align 16
+ %4 = add i64 %3, 5
+ %5 = getelementptr inbounds [10240 x i64], [10240 x i64]* @B, i64 0, i64 %indvars.iv
+ store i64 %4, i64* %5, align 16
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 8
+ %6 = icmp slt i64 %indvars.iv.next, 1024
+ br i1 %6, label %1, label %7
+
+; <label>:7: ; preds = %1
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/X86/int128_no_gather.ll b/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
index fbea275cb40f..4d7c0b6f64b8 100644
--- a/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
+++ b/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
@@ -71,6 +71,6 @@ declare i32 @printf(i8*, ...) #1
; Function Attrs: nounwind
declare i32 @puts(i8* nocapture readonly) #2
-attributes #0 = { noinline nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pcommit,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pcommit,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/test/Transforms/LoopVectorize/X86/interleaving.ll b/test/Transforms/LoopVectorize/X86/interleaving.ll
index de5db5324381..9294c92b5759 100644
--- a/test/Transforms/LoopVectorize/X86/interleaving.ll
+++ b/test/Transforms/LoopVectorize/X86/interleaving.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -mtriple=x86_64-pc_linux -loop-vectorize -instcombine < %s | FileCheck %s --check-prefix=NORMAL
+; RUN: opt -S -mtriple=x86_64-pc_linux -loop-vectorize -instcombine -mcpu=slm < %s | FileCheck %s --check-prefix=NORMAL
; RUN: opt -S -mtriple=x86_64-pc_linux -loop-vectorize -instcombine -mcpu=atom < %s | FileCheck %s --check-prefix=ATOM
; NORMAL-LABEL: foo
diff --git a/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/test/Transforms/LoopVectorize/X86/metadata-enable.ll
index 74c0c16086fe..e1793bcc3218 100644
--- a/test/Transforms/LoopVectorize/X86/metadata-enable.ll
+++ b/test/Transforms/LoopVectorize/X86/metadata-enable.ll
@@ -1,13 +1,14 @@
; RUN: opt < %s -mcpu=corei7 -O1 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1
; RUN: opt < %s -mcpu=corei7 -O2 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O2
-; RUN: opt < %s -mcpu=corei7 -O3 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3
+; RUN: opt < %s -mcpu=corei7 -O3 -S -unroll-threshold=150 -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3
+; RUN: opt < %s -mcpu=corei7 -O3 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3DEFAULT
; RUN: opt < %s -mcpu=corei7 -Os -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=Os
; RUN: opt < %s -mcpu=corei7 -Oz -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=Oz
; RUN: opt < %s -mcpu=corei7 -O1 -vectorize-loops -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1VEC
; RUN: opt < %s -mcpu=corei7 -Oz -vectorize-loops -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=OzVEC
; RUN: opt < %s -mcpu=corei7 -O1 -loop-vectorize -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1VEC2
; RUN: opt < %s -mcpu=corei7 -Oz -loop-vectorize -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=OzVEC2
-; RUN: opt < %s -mcpu=corei7 -O3 -disable-loop-vectorization -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3DIS
+; RUN: opt < %s -mcpu=corei7 -O3 -unroll-threshold=150 -disable-loop-vectorization -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3DIS
; This file tests the llvm.loop.vectorize.enable metadata forcing
; vectorization even when optimization levels are too low, or when
@@ -25,6 +26,9 @@ target triple = "x86_64-unknown-linux-gnu"
; O3-LABEL: @enabled(
; O3: store <4 x i32>
; O3: ret i32
+; O3DEFAULT-LABEL: @enabled(
+; O3DEFAULT: store <4 x i32>
+; O3DEFAULT: ret i32
; Pragma always wins!
; O3DIS-LABEL: @enabled(
; O3DIS: store <4 x i32>
@@ -77,6 +81,9 @@ for.end: ; preds = %for.body
; O3-LABEL: @nopragma(
; O3: store <4 x i32>
; O3: ret i32
+; O3DEFAULT-LABEL: @nopragma(
+; O3DEFAULT: store <4 x i32>
+; O3DEFAULT: ret i32
; O3DIS-LABEL: @nopragma(
; O3DIS-NOT: store <4 x i32>
; O3DIS: ret i32
@@ -128,6 +135,9 @@ for.end: ; preds = %for.body
; O3-LABEL: @disabled(
; O3-NOT: store <4 x i32>
; O3: ret i32
+; O3DEFAULT-LABEL: @disabled(
+; O3DEFAULT: store <4 x i32>
+; O3DEFAULT: ret i32
; O3DIS-LABEL: @disabled(
; O3DIS-NOT: store <4 x i32>
; O3DIS: ret i32
diff --git a/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/test/Transforms/LoopVectorize/X86/scatter_crash.ll
index ec67e632efbd..bda4b2454ee2 100755
--- a/test/Transforms/LoopVectorize/X86/scatter_crash.ll
+++ b/test/Transforms/LoopVectorize/X86/scatter_crash.ll
@@ -16,97 +16,23 @@ target triple = "x86_64-apple-macosx10.11.0"
define void @_Z3fn1v() #0 {
; CHECK-LABEL: @_Z3fn1v(
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX:%.*]].next, %vector.body ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [
-; CHECK-NEXT: [[VEC_IND3:%.*]] = phi <16 x i64> [
-; CHECK-NEXT: [[SHL:%.*]] = shl i64 %index, 1
-; CHECK-NEXT: %offset.idx = add i64 [[SHL]], 8
-; CHECK-NEXT: [[IND00:%.*]] = add i64 %offset.idx, 0
-; CHECK-NEXT: [[IND02:%.*]] = add i64 %offset.idx, 2
-; CHECK-NEXT: [[IND04:%.*]] = add i64 %offset.idx, 4
-; CHECK-NEXT: [[IND06:%.*]] = add i64 %offset.idx, 6
-; CHECK-NEXT: [[IND08:%.*]] = add i64 %offset.idx, 8
-; CHECK-NEXT: [[IND10:%.*]] = add i64 %offset.idx, 10
-; CHECK-NEXT: [[IND12:%.*]] = add i64 %offset.idx, 12
-; CHECK-NEXT: [[IND14:%.*]] = add i64 %offset.idx, 14
-; CHECK-NEXT: [[IND16:%.*]] = add i64 %offset.idx, 16
-; CHECK-NEXT: [[IND18:%.*]] = add i64 %offset.idx, 18
-; CHECK-NEXT: [[IND20:%.*]] = add i64 %offset.idx, 20
-; CHECK-NEXT: [[IND22:%.*]] = add i64 %offset.idx, 22
-; CHECK-NEXT: [[IND24:%.*]] = add i64 %offset.idx, 24
-; CHECK-NEXT: [[IND26:%.*]] = add i64 %offset.idx, 26
-; CHECK-NEXT: [[IND28:%.*]] = add i64 %offset.idx, 28
-; CHECK-NEXT: [[IND30:%.*]] = add i64 %offset.idx, 30
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ <i64 8, i64 10, i64 12, i64 14, i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30, i64 32, i64 34, i64 36, i64 38>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[VEC_IND3:%.*]] = phi <16 x i64> [ <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14, i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30>, %vector.ph ], [ [[VEC_IND_NEXT4:%.*]], %vector.body ]
; CHECK-NEXT: [[TMP10:%.*]] = sub nsw <16 x i64> <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>, [[VEC_IND]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND00]]
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND02]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND04]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND06]]
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND08]]
-; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND10]]
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND12]]
-; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND14]]
-; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND16]]
-; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND18]]
-; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND20]]
-; CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND22]]
-; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND24]]
-; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND26]]
-; CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND28]]
-; CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, i64 [[IND30]]
-; CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x [10 x i32]*> undef, [10 x i32]* [[TMP12]], i32 0
-; CHECK-NEXT: [[TMP16:%.*]] = insertelement <16 x [10 x i32]*> [[TMP13]], [10 x i32]* [[TMP15]], i32 1
-; CHECK-NEXT: [[TMP19:%.*]] = insertelement <16 x [10 x i32]*> [[TMP16]], [10 x i32]* [[TMP18]], i32 2
-; CHECK-NEXT: [[TMP22:%.*]] = insertelement <16 x [10 x i32]*> [[TMP19]], [10 x i32]* [[TMP21]], i32 3
-; CHECK-NEXT: [[TMP25:%.*]] = insertelement <16 x [10 x i32]*> [[TMP22]], [10 x i32]* [[TMP24]], i32 4
-; CHECK-NEXT: [[TMP28:%.*]] = insertelement <16 x [10 x i32]*> [[TMP25]], [10 x i32]* [[TMP27]], i32 5
-; CHECK-NEXT: [[TMP31:%.*]] = insertelement <16 x [10 x i32]*> [[TMP28]], [10 x i32]* [[TMP30]], i32 6
-; CHECK-NEXT: [[TMP34:%.*]] = insertelement <16 x [10 x i32]*> [[TMP31]], [10 x i32]* [[TMP33]], i32 7
-; CHECK-NEXT: [[TMP37:%.*]] = insertelement <16 x [10 x i32]*> [[TMP34]], [10 x i32]* [[TMP36]], i32 8
-; CHECK-NEXT: [[TMP40:%.*]] = insertelement <16 x [10 x i32]*> [[TMP37]], [10 x i32]* [[TMP39]], i32 9
-; CHECK-NEXT: [[TMP43:%.*]] = insertelement <16 x [10 x i32]*> [[TMP40]], [10 x i32]* [[TMP42]], i32 10
-; CHECK-NEXT: [[TMP46:%.*]] = insertelement <16 x [10 x i32]*> [[TMP43]], [10 x i32]* [[TMP45]], i32 11
-; CHECK-NEXT: [[TMP49:%.*]] = insertelement <16 x [10 x i32]*> [[TMP46]], [10 x i32]* [[TMP48]], i32 12
-; CHECK-NEXT: [[TMP52:%.*]] = insertelement <16 x [10 x i32]*> [[TMP49]], [10 x i32]* [[TMP51]], i32 13
-; CHECK-NEXT: [[TMP55:%.*]] = insertelement <16 x [10 x i32]*> [[TMP52]], [10 x i32]* [[TMP54]], i32 14
-; CHECK-NEXT: [[TMP58:%.*]] = insertelement <16 x [10 x i32]*> [[TMP55]], [10 x i32]* [[TMP57]], i32 15
-; CHECK-NEXT: [[TMP59:%.*]] = add nsw <16 x i64> [[TMP10]], [[VEC_IND3]]
-; CHECK-NEXT: [[TMP61:%.*]] = extractelement <16 x i64> [[TMP59]], i32 0
-; CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP12]], i64 [[TMP61]], i64 0
-; CHECK-NEXT: [[TMP65:%.*]] = extractelement <16 x i64> [[TMP59]], i32 1
-; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP15]], i64 [[TMP65]], i64 0
-; CHECK-NEXT: [[TMP69:%.*]] = extractelement <16 x i64> [[TMP59]], i32 2
-; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP18]], i64 [[TMP69]], i64 0
-; CHECK-NEXT: [[TMP73:%.*]] = extractelement <16 x i64> [[TMP59]], i32 3
-; CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP21]], i64 [[TMP73]], i64 0
-; CHECK-NEXT: [[TMP77:%.*]] = extractelement <16 x i64> [[TMP59]], i32 4
-; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP24]], i64 [[TMP77]], i64 0
-; CHECK-NEXT: [[TMP81:%.*]] = extractelement <16 x i64> [[TMP59]], i32 5
-; CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP27]], i64 [[TMP81]], i64 0
-; CHECK-NEXT: [[TMP85:%.*]] = extractelement <16 x i64> [[TMP59]], i32 6
-; CHECK-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP30]], i64 [[TMP85]], i64 0
-; CHECK-NEXT: [[TMP89:%.*]] = extractelement <16 x i64> [[TMP59]], i32 7
-; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP33]], i64 [[TMP89]], i64 0
-; CHECK-NEXT: [[TMP93:%.*]] = extractelement <16 x i64> [[TMP59]], i32 8
-; CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP36]], i64 [[TMP93]], i64 0
-; CHECK-NEXT: [[TMP97:%.*]] = extractelement <16 x i64> [[TMP59]], i32 9
-; CHECK-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP39]], i64 [[TMP97]], i64 0
-; CHECK-NEXT: [[TMP101:%.*]] = extractelement <16 x i64> [[TMP59]], i32 10
-; CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP42]], i64 [[TMP101]], i64 0
-; CHECK-NEXT: [[TMP105:%.*]] = extractelement <16 x i64> [[TMP59]], i32 11
-; CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP45]], i64 [[TMP105]], i64 0
-; CHECK-NEXT: [[TMP109:%.*]] = extractelement <16 x i64> [[TMP59]], i32 12
-; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP48]], i64 [[TMP109]], i64 0
-; CHECK-NEXT: [[TMP113:%.*]] = extractelement <16 x i64> [[TMP59]], i32 13
-; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP51]], i64 [[TMP113]], i64 0
-; CHECK-NEXT: [[TMP117:%.*]] = extractelement <16 x i64> [[TMP59]], i32 14
-; CHECK-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP54]], i64 [[TMP117]], i64 0
-; CHECK-NEXT: [[TMP121:%.*]] = extractelement <16 x i64> [[TMP59]], i32 15
-; CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP57]], i64 [[TMP121]], i64 0
-; CHECK-NEXT: [[VECTORGEP:%.*]] = getelementptr inbounds [10 x i32], <16 x [10 x i32]*> [[TMP58]], <16 x i64> [[TMP59]], i64 0
-; CHECK-NEXT: call void @llvm.masked.scatter.v16i32(<16 x i32> <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>, <16 x i32*> [[VECTORGEP]], i32 16, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
-; CHECK: [[STEP_ADD:%.*]] = add <16 x i64> [[VEC_IND]], <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
-; CHECK: [[STEP_ADD4:%.*]] = add <16 x i64> [[VEC_IND3]], <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* @d, i64 0, <16 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP12:%.*]] = add nsw <16 x i64> [[TMP10]], [[VEC_IND3]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [10 x i32], <16 x [10 x i32]*> [[TMP11]], <16 x i64> [[TMP12]], i64 0
+; CHECK-NEXT: call void @llvm.masked.scatter.v16i32(<16 x i32> <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>, <16 x i32*> [[TMP13]], i32 16, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT: [[TMP14:%.*]] = or <16 x i64> [[VEC_IND3]], <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+; CHECK-NEXT: [[TMP15:%.*]] = add nsw <16 x i64> [[TMP10]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [10 x i32], <16 x [10 x i32]*> [[TMP11]], <16 x i64> [[TMP15]], i64 0
+; CHECK-NEXT: call void @llvm.masked.scatter.v16i32(<16 x i32> <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>, <16 x i32*> [[TMP16]], i32 8, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+; CHECK-NEXT: [[VEC_IND_NEXT4]] = add <16 x i64> [[VEC_IND3]], <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
entry:
%0 = load i32, i32* @c, align 4
%cmp34 = icmp sgt i32 %0, 8
diff --git a/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll b/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
index f28e6be23529..b2933c4b56f2 100644
--- a/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
+++ b/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
@@ -1,4 +1,6 @@
; RUN: opt < %s -loop-vectorize -S -pass-remarks-missed='loop-vectorize' -pass-remarks-analysis='loop-vectorize' 2>&1 | FileCheck %s
+; RUN: opt < %s -loop-vectorize -o /dev/null -pass-remarks-output=%t.yaml
+; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
; C/C++ code for tests
; void test(int *A, int Length) {
@@ -42,6 +44,61 @@
; CHECK-NOT: x i32>
; CHECK: ret
+; YAML: --- !Analysis
+; YAML-NEXT: Pass: loop-vectorize
+; YAML-NEXT: Name: CantComputeNumberOfIterations
+; YAML-NEXT: DebugLoc: { File: source.cpp, Line: 4, Column: 5 }
+; YAML-NEXT: Function: _Z4testPii
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'loop not vectorized: '
+; YAML-NEXT: - String: could not determine number of loop iterations
+; YAML-NEXT: ...
+; YAML-NEXT: --- !Missed
+; YAML-NEXT: Pass: loop-vectorize
+; YAML-NEXT: Name: MissedDetails
+; YAML-NEXT: DebugLoc: { File: source.cpp, Line: 4, Column: 5 }
+; YAML-NEXT: Function: _Z4testPii
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: loop not vectorized
+; YAML-NEXT: ...
+; YAML-NEXT: --- !Analysis
+; YAML-NEXT: Pass: loop-vectorize
+; YAML-NEXT: Name: AllDisabled
+; YAML-NEXT: DebugLoc: { File: source.cpp, Line: 13, Column: 5 }
+; YAML-NEXT: Function: _Z13test_disabledPii
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'loop not vectorized: vectorization and interleaving are explicitly disabled, or vectorize width and interleave count are both set to 1'
+; YAML-NEXT: ...
+; YAML-NEXT: --- !Analysis
+; YAML-NEXT: Pass: ''
+; YAML-NEXT: Name: CantIdentifyArrayBounds
+; YAML-NEXT: DebugLoc: { File: source.cpp, Line: 19, Column: 5 }
+; YAML-NEXT: Function: _Z17test_array_boundsPiS_i
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'loop not vectorized: '
+; YAML-NEXT: - String: cannot identify array bounds
+; YAML-NEXT: ...
+; YAML-NEXT: --- !Missed
+; YAML-NEXT: Pass: loop-vectorize
+; YAML-NEXT: Name: MissedDetails
+; YAML-NEXT: DebugLoc: { File: source.cpp, Line: 19, Column: 5 }
+; YAML-NEXT: Function: _Z17test_array_boundsPiS_i
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: loop not vectorized
+; YAML-NEXT: - String: ' (Force='
+; YAML-NEXT: - Force: 'true'
+; YAML-NEXT: - String: ')'
+; YAML-NEXT: ...
+; YAML-NEXT: --- !Failure
+; YAML-NEXT: Pass: loop-vectorize
+; YAML-NEXT: Name: FailedRequestedVectorization
+; YAML-NEXT: DebugLoc: { File: source.cpp, Line: 19, Column: 5 }
+; YAML-NEXT: Function: _Z17test_array_boundsPiS_i
+; YAML-NEXT: Args:
+; YAML-NEXT: - String: 'loop not vectorized: '
+; YAML-NEXT: - String: failed explicitly specified loop vectorization
+; YAML-NEXT: ...
+
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: nounwind optsize ssp uwtable
diff --git a/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll b/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
index fc9f97328fb7..91466e65078f 100644
--- a/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
+++ b/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -pass-remarks-analysis='loop-vectorize' -mtriple=x86_64-unknown-linux -S 2>&1 | FileCheck %s
+; RUN: opt < %s -loop-vectorize -pass-remarks-missed='loop-vectorize' -mtriple=x86_64-unknown-linux -S 2>&1 | FileCheck %s
; Verify analysis remarks are generated when interleaving is not beneficial.
; CHECK: remark: vectorization-remarks-profitable.c:5:17: the cost-model indicates that vectorization is not beneficial
diff --git a/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
index 88b2aa36b08c..125829090c3f 100644
--- a/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
+++ b/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
@@ -438,3 +438,53 @@ for.end:
%tmp5 = phi i32 [ %tmp2, %for.body ]
ret i32 %tmp5
}
+
+; INTER-LABEL: bitcast_pointer_operand
+;
+; Check that a pointer operand having a user other than a memory access is
+; recognized as uniform after vectorization. In this test case, %tmp1 is a
+; bitcast that is used by a load and a getelementptr instruction (%tmp2). Once
+; %tmp2 is marked uniform, %tmp1 should be marked uniform as well.
+;
+; INTER: LV: Found uniform instruction: %cond = icmp slt i64 %i.next, %n
+; INTER-NEXT: LV: Found uniform instruction: %tmp2 = getelementptr inbounds i8, i8* %tmp1, i64 3
+; INTER-NEXT: LV: Found uniform instruction: %tmp6 = getelementptr inbounds i8, i8* %B, i64 %i
+; INTER-NEXT: LV: Found uniform instruction: %tmp1 = bitcast i64* %tmp0 to i8*
+; INTER-NEXT: LV: Found uniform instruction: %tmp0 = getelementptr inbounds i64, i64* %A, i64 %i
+; INTER-NEXT: LV: Found uniform instruction: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+; INTER-NEXT: LV: Found uniform instruction: %i.next = add nuw nsw i64 %i, 1
+; INTER: vector.body:
+; INTER-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; INTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, i64* %A, i64 [[INDEX]]
+; INTER-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to <32 x i8>*
+; INTER-NEXT: [[WIDE_VEC:%.*]] = load <32 x i8>, <32 x i8>* [[TMP5]], align 1
+; INTER-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
+; INTER-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> undef, <4 x i32> <i32 3, i32 11, i32 19, i32 27>
+; INTER-NEXT: [[TMP6:%.*]] = xor <4 x i8> [[STRIDED_VEC5]], [[STRIDED_VEC]]
+; INTER-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, i8* %B, i64 [[INDEX]]
+; INTER-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>*
+; INTER-NEXT: store <4 x i8> [[TMP6]], <4 x i8>* [[TMP8]], align 1
+; INTER-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @bitcast_pointer_operand(i64* %A, i8* %B, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
+ %tmp0 = getelementptr inbounds i64, i64* %A, i64 %i
+ %tmp1 = bitcast i64* %tmp0 to i8*
+ %tmp2 = getelementptr inbounds i8, i8* %tmp1, i64 3
+ %tmp3 = load i8, i8* %tmp2, align 1
+ %tmp4 = load i8, i8* %tmp1, align 1
+ %tmp5 = xor i8 %tmp3, %tmp4
+ %tmp6 = getelementptr inbounds i8, i8* %B, i64 %i
+ store i8 %tmp5, i8* %tmp6
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/discriminator.ll b/test/Transforms/LoopVectorize/discriminator.ll
new file mode 100644
index 000000000000..b7d34582dbd8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/discriminator.ll
@@ -0,0 +1,70 @@
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s | FileCheck --check-prefix=LOOPVEC_4_1 %s
+; RUN: opt -S -loop-vectorize -force-vector-width=2 -force-vector-interleave=3 < %s | FileCheck --check-prefix=LOOPVEC_2_3 %s
+; RUN: opt -S -loop-unroll -unroll-count=5 < %s | FileCheck --check-prefix=LOOPUNROLL_5 %s
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=4 -loop-unroll -unroll-count=2 < %s | FileCheck --check-prefix=LOOPVEC_UNROLL %s
+
+; Test if vectorization/unroll factor is recorded in discriminator.
+;
+; Original source code:
+; 1 int *a;
+; 2 int *b;
+; 3
+; 4 void foo() {
+; 5 for (int i = 0; i < 4096; i++)
+; 6 a[i] += b[i];
+; 7 }
+
+@a = local_unnamed_addr global i32* null, align 8
+@b = local_unnamed_addr global i32* null, align 8
+
+define void @_Z3foov() local_unnamed_addr #0 !dbg !6 {
+ %1 = load i32*, i32** @b, align 8, !dbg !8, !tbaa !9
+ %2 = load i32*, i32** @a, align 8, !dbg !13, !tbaa !9
+ br label %3, !dbg !14
+
+; <label>:3: ; preds = %3, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %3 ]
+ %4 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv, !dbg !8
+ %5 = load i32, i32* %4, align 4, !dbg !8, !tbaa !15
+ %6 = getelementptr inbounds i32, i32* %2, i64 %indvars.iv, !dbg !13
+ %7 = load i32, i32* %6, align 4, !dbg !17, !tbaa !15
+ %8 = add nsw i32 %7, %5, !dbg !17
+ store i32 %8, i32* %6, align 4, !dbg !17, !tbaa !15
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !18
+ %exitcond = icmp eq i64 %indvars.iv.next, 4096, !dbg !19
+ br i1 %exitcond, label %9, label %3, !dbg !14, !llvm.loop !20
+
+; <label>:9: ; preds = %3
+ ret void, !dbg !21
+}
+
+;LOOPVEC_4_1: discriminator: 17
+;LOOPVEC_2_3: discriminator: 25
+;LOOPUNROLL_5: discriminator: 21
+; When unrolling after loop vectorize, both vec_body and remainder loop
+; are unrolled.
+;LOOPVEC_UNROLL: discriminator: 385
+;LOOPVEC_UNROLL: discriminator: 9
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, debugInfoForProfiling: true)
+!1 = !DIFile(filename: "a.cc", directory: "/")
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 4, unit: !0)
+!8 = !DILocation(line: 6, column: 13, scope: !6)
+!9 = !{!10, !10, i64 0}
+!10 = !{!"any pointer", !11, i64 0}
+!11 = !{!"omnipotent char", !12, i64 0}
+!12 = !{!"Simple C++ TBAA"}
+!13 = !DILocation(line: 6, column: 5, scope: !6)
+!14 = !DILocation(line: 5, column: 3, scope: !6)
+!15 = !{!16, !16, i64 0}
+!16 = !{!"int", !11, i64 0}
+!17 = !DILocation(line: 6, column: 10, scope: !6)
+!18 = !DILocation(line: 5, column: 30, scope: !6)
+!19 = !DILocation(line: 5, column: 21, scope: !6)
+!20 = distinct !{!20, !14}
+!21 = !DILocation(line: 7, column: 1, scope: !6)
diff --git a/test/Transforms/LoopVectorize/first-order-recurrence.ll b/test/Transforms/LoopVectorize/first-order-recurrence.ll
new file mode 100644
index 000000000000..3d1c78038e32
--- /dev/null
+++ b/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -0,0 +1,398 @@
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -dce -instcombine -S | FileCheck %s --check-prefix=UNROLL
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-IC
+; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-VF
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; void recurrence_1(int *a, int *b, int n) {
+; for(int i = 0; i < n; i++)
+; b[i] = a[i] + a[i - 1]
+; }
+;
+; CHECK-LABEL: @recurrence_1(
+; CHECK: vector.ph:
+; CHECK: %vector.recur.init = insertelement <4 x i32> undef, i32 %pre_load, i32 3
+; CHECK: vector.body:
+; CHECK: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
+; CHECK: [[L1]] = load <4 x i32>
+; CHECK: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; CHECK: middle.block:
+; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
+; CHECK: scalar.ph:
+; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %pre_load, %vector.memcheck ], [ %pre_load, %min.iters.checked ], [ %pre_load, %for.preheader ]
+; CHECK: scalar.body:
+; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
+;
+; UNROLL-LABEL: @recurrence_1(
+; UNROLL: vector.body:
+; UNROLL: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
+; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i32>
+; UNROLL: [[L2]] = load <4 x i32>
+; UNROLL: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL: {{.*}} = shufflevector <4 x i32> [[L1]], <4 x i32> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL: middle.block:
+; UNROLL: %vector.recur.extract = extractelement <4 x i32> [[L2]], i32 3
+;
+define void @recurrence_1(i32* nocapture readonly %a, i32* nocapture %b, i32 %n) {
+entry:
+ br label %for.preheader
+
+for.preheader:
+ %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 0
+ %pre_load = load i32, i32* %arrayidx.phi.trans.insert
+ br label %scalar.body
+
+scalar.body:
+ %0 = phi i32 [ %pre_load, %for.preheader ], [ %1, %scalar.body ]
+ %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %arrayidx32 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
+ %1 = load i32, i32* %arrayidx32
+ %arrayidx34 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
+ %add35 = add i32 %1, %0
+ store i32 %add35, i32* %arrayidx34
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.exit, label %scalar.body
+
+for.exit:
+ ret void
+}
+
+; int recurrence_2(int *a, int n) {
+; int minmax;
+; for (int i = 0; i < n; ++i)
+; minmax = min(minmax, max(a[i] - a[i-1], 0));
+; return minmax;
+; }
+;
+; CHECK-LABEL: @recurrence_2(
+; CHECK: vector.ph:
+; CHECK: %vector.recur.init = insertelement <4 x i32> undef, i32 %.pre, i32 3
+; CHECK: vector.body:
+; CHECK: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
+; CHECK: [[L1]] = load <4 x i32>
+; CHECK: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; CHECK: middle.block:
+; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
+; CHECK: scalar.ph:
+; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %.pre, %min.iters.checked ], [ %.pre, %for.preheader ]
+; CHECK: scalar.body:
+; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
+;
+; UNROLL-LABEL: @recurrence_2(
+; UNROLL: vector.body:
+; UNROLL: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
+; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i32>
+; UNROLL: [[L2]] = load <4 x i32>
+; UNROLL: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL: {{.*}} = shufflevector <4 x i32> [[L1]], <4 x i32> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL: middle.block:
+; UNROLL: %vector.recur.extract = extractelement <4 x i32> [[L2]], i32 3
+;
+define i32 @recurrence_2(i32* nocapture readonly %a, i32 %n) {
+entry:
+ %cmp27 = icmp sgt i32 %n, 0
+ br i1 %cmp27, label %for.preheader, label %for.cond.cleanup
+
+for.preheader:
+ %arrayidx2.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 -1
+ %.pre = load i32, i32* %arrayidx2.phi.trans.insert, align 4
+ br label %scalar.body
+
+for.cond.cleanup.loopexit:
+ %minmax.0.cond.lcssa = phi i32 [ %minmax.0.cond, %scalar.body ]
+ br label %for.cond.cleanup
+
+for.cond.cleanup:
+ %minmax.0.lcssa = phi i32 [ undef, %entry ], [ %minmax.0.cond.lcssa, %for.cond.cleanup.loopexit ]
+ ret i32 %minmax.0.lcssa
+
+scalar.body:
+ %0 = phi i32 [ %.pre, %for.preheader ], [ %1, %scalar.body ]
+ %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
+ %minmax.028 = phi i32 [ undef, %for.preheader ], [ %minmax.0.cond, %scalar.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %1 = load i32, i32* %arrayidx, align 4
+ %sub3 = sub nsw i32 %1, %0
+ %cmp4 = icmp sgt i32 %sub3, 0
+ %cond = select i1 %cmp4, i32 %sub3, i32 0
+ %cmp5 = icmp slt i32 %minmax.028, %cond
+ %minmax.0.cond = select i1 %cmp5, i32 %minmax.028, i32 %cond
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %scalar.body
+}
+
+; void recurrence_3(short *a, double *b, int n, float f, short p) {
+; b[0] = (double)a[0] - f * (double)p;
+; for (int i = 1; i < n; i++)
+; b[i] = (double)a[i] - f * (double)a[i - 1];
+; }
+;
+; CHECK-LABEL: @recurrence_3(
+; CHECK: vector.ph:
+; CHECK: %vector.recur.init = insertelement <4 x i16> undef, i16 %0, i32 3
+; CHECK: vector.body:
+; CHECK: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
+; CHECK: [[L1]] = load <4 x i16>
+; CHECK: {{.*}} = shufflevector <4 x i16> %vector.recur, <4 x i16> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; CHECK: middle.block:
+; CHECK: %vector.recur.extract = extractelement <4 x i16> [[L1]], i32 3
+; CHECK: scalar.ph:
+; CHECK: %scalar.recur.init = phi i16 [ %vector.recur.extract, %middle.block ], [ %0, %vector.memcheck ], [ %0, %min.iters.checked ], [ %0, %for.preheader ]
+; CHECK: scalar.body:
+; CHECK: %scalar.recur = phi i16 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
+;
+; UNROLL-LABEL: @recurrence_3(
+; UNROLL: vector.body:
+; UNROLL: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
+; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i16>
+; UNROLL: [[L2]] = load <4 x i16>
+; UNROLL: {{.*}} = shufflevector <4 x i16> %vector.recur, <4 x i16> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL: {{.*}} = shufflevector <4 x i16> [[L1]], <4 x i16> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL: middle.block:
+; UNROLL: %vector.recur.extract = extractelement <4 x i16> [[L2]], i32 3
+;
+define void @recurrence_3(i16* nocapture readonly %a, double* nocapture %b, i32 %n, float %f, i16 %p) {
+entry:
+ %0 = load i16, i16* %a, align 2
+ %conv = sitofp i16 %0 to double
+ %conv1 = fpext float %f to double
+ %conv2 = sitofp i16 %p to double
+ %mul = fmul fast double %conv2, %conv1
+ %sub = fsub fast double %conv, %mul
+ store double %sub, double* %b, align 8
+ %cmp25 = icmp sgt i32 %n, 1
+ br i1 %cmp25, label %for.preheader, label %for.end
+
+for.preheader:
+ br label %scalar.body
+
+scalar.body:
+ %1 = phi i16 [ %0, %for.preheader ], [ %2, %scalar.body ]
+ %advars.iv = phi i64 [ %advars.iv.next, %scalar.body ], [ 1, %for.preheader ]
+ %arrayidx5 = getelementptr inbounds i16, i16* %a, i64 %advars.iv
+ %2 = load i16, i16* %arrayidx5, align 2
+ %conv6 = sitofp i16 %2 to double
+ %conv11 = sitofp i16 %1 to double
+ %mul12 = fmul fast double %conv11, %conv1
+ %sub13 = fsub fast double %conv6, %mul12
+ %arrayidx15 = getelementptr inbounds double, double* %b, i64 %advars.iv
+ store double %sub13, double* %arrayidx15, align 8
+ %advars.iv.next = add nuw nsw i64 %advars.iv, 1
+ %lftr.wideiv = trunc i64 %advars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end.loopexit, label %scalar.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; void PR26734(short *a, int *b, int *c, int d, short *e) {
+; for (; d != 21; d++) {
+; *b &= *c;
+; *e = *a - 6;
+; *c = *e;
+; }
+; }
+;
+; CHECK-LABEL: @PR26734(
+; CHECK-NOT: vector.ph:
+; CHECK: }
+;
+define void @PR26734(i16* %a, i32* %b, i32* %c, i32 %d, i16* %e) {
+entry:
+ %cmp4 = icmp eq i32 %d, 21
+ br i1 %cmp4, label %entry.for.end_crit_edge, label %for.body.lr.ph
+
+entry.for.end_crit_edge:
+ %.pre = load i32, i32* %b, align 4
+ br label %for.end
+
+for.body.lr.ph:
+ %0 = load i16, i16* %a, align 2
+ %sub = add i16 %0, -6
+ %conv2 = sext i16 %sub to i32
+ %c.promoted = load i32, i32* %c, align 4
+ %b.promoted = load i32, i32* %b, align 4
+ br label %for.body
+
+for.body:
+ %inc7 = phi i32 [ %d, %for.body.lr.ph ], [ %inc, %for.body ]
+ %and6 = phi i32 [ %b.promoted, %for.body.lr.ph ], [ %and, %for.body ]
+ %conv25 = phi i32 [ %c.promoted, %for.body.lr.ph ], [ %conv2, %for.body ]
+ %and = and i32 %and6, %conv25
+ %inc = add nsw i32 %inc7, 1
+ %cmp = icmp eq i32 %inc, 21
+ br i1 %cmp, label %for.cond.for.end_crit_edge, label %for.body
+
+for.cond.for.end_crit_edge:
+ %and.lcssa = phi i32 [ %and, %for.body ]
+ store i32 %conv2, i32* %c, align 4
+ store i32 %and.lcssa, i32* %b, align 4
+ store i16 %sub, i16* %e, align 2
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; int PR27246() {
+; unsigned int e, n;
+; for (int i = 1; i < 49; ++i) {
+; for (int k = i; k > 1; --k)
+; e = k;
+; n = e;
+; }
+; return n;
+; }
+;
+; CHECK-LABEL: @PR27246(
+; CHECK-NOT: vector.ph:
+; CHECK: }
+;
+define i32 @PR27246() {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader:
+ %i.016 = phi i32 [ 1, %entry ], [ %inc, %for.cond.cleanup3 ]
+ %e.015 = phi i32 [ undef, %entry ], [ %e.1.lcssa, %for.cond.cleanup3 ]
+ br label %for.cond1
+
+for.cond.cleanup:
+ %e.1.lcssa.lcssa = phi i32 [ %e.1.lcssa, %for.cond.cleanup3 ]
+ ret i32 %e.1.lcssa.lcssa
+
+for.cond1:
+ %e.1 = phi i32 [ %k.0, %for.cond1 ], [ %e.015, %for.cond1.preheader ]
+ %k.0 = phi i32 [ %dec, %for.cond1 ], [ %i.016, %for.cond1.preheader ]
+ %cmp2 = icmp sgt i32 %k.0, 1
+ %dec = add nsw i32 %k.0, -1
+ br i1 %cmp2, label %for.cond1, label %for.cond.cleanup3
+
+for.cond.cleanup3:
+ %e.1.lcssa = phi i32 [ %e.1, %for.cond1 ]
+ %inc = add nuw nsw i32 %i.016, 1
+ %exitcond = icmp eq i32 %inc, 49
+ br i1 %exitcond, label %for.cond.cleanup, label %for.cond1.preheader
+}
+
+; UNROLL-NO-IC-LABEL: @PR30183(
+; UNROLL-NO-IC: vector.ph:
+; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> undef, i32 [[PRE_LOAD:%.*]], i32 3
+; UNROLL-NO-IC-NEXT: br label %vector.body
+; UNROLL-NO-IC: vector.body:
+; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], %vector.ph ], [ [[TMP42:%.*]], %vector.body ]
+; UNROLL-NO-IC: [[TMP27:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP28:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP29:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP30:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = load i32, i32* {{.*}}
+; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> undef, i32 [[TMP27]], i32 0
+; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = insertelement <4 x i32> [[TMP35]], i32 [[TMP28]], i32 1
+; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP36]], i32 [[TMP29]], i32 2
+; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = insertelement <4 x i32> [[TMP37]], i32 [[TMP30]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP39:%.*]] = insertelement <4 x i32> undef, i32 [[TMP31]], i32 0
+; UNROLL-NO-IC-NEXT: [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP32]], i32 1
+; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> [[TMP40]], i32 [[TMP33]], i32 2
+; UNROLL-NO-IC-NEXT: [[TMP42]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP34]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[TMP38]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: [[TMP44:%.*]] = shufflevector <4 x i32> [[TMP38]], <4 x i32> [[TMP42]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
+; UNROLL-NO-IC: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @PR30183(i32 %pre_load, i32* %a, i32* %b, i64 %n) {
+entry:
+ br label %scalar.body
+
+scalar.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ]
+ %tmp0 = phi i32 [ %pre_load, %entry ], [ %tmp2, %scalar.body ]
+ %i.next = add nuw nsw i64 %i, 2
+ %tmp1 = getelementptr inbounds i32, i32* %a, i64 %i.next
+ %tmp2 = load i32, i32* %tmp1
+ %cond = icmp eq i64 %i.next,%n
+ br i1 %cond, label %for.end, label %scalar.body
+
+for.end:
+ ret void
+}
+
+; UNROLL-NO-IC-LABEL: @constant_folded_previous_value(
+; UNROLL-NO-IC: vector.body:
+; UNROLL-NO-IC: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 undef, i64 undef, i64 undef, i64 0>, %vector.ph ], [ <i64 1, i64 1, i64 1, i64 1>, %vector.body ]
+; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> <i64 1, i64 1, i64 1, i64 1>, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @constant_folded_previous_value() {
+entry:
+ br label %scalar.body
+
+scalar.body:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ]
+ %tmp2 = phi i64 [ 0, %entry ], [ %tmp3, %scalar.body ]
+ %tmp3 = add i64 0, 1
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp eq i64 %i.next, undef
+ br i1 %cond, label %for.end, label %scalar.body
+
+for.end:
+ ret void
+}
+
+; We vectorize this first order recurrence, by generating two
+; extracts for the phi `val.phi` - one at the last index and
+; another at the second last index. We need these 2 extracts because
+; the first order recurrence phi is used outside the loop, so we require the phi
+; itself and not its update (addx).
+; UNROLL-NO-IC-LABEL: extract_second_last_iteration
+; UNROLL-NO-IC: vector.body
+; UNROLL-NO-IC: %step.add = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
+; UNROLL-NO-IC: %[[L1:.+]] = add <4 x i32> %vec.ind, %broadcast.splat
+; UNROLL-NO-IC: %[[L2:.+]] = add <4 x i32> %step.add, %broadcast.splat
+; UNROLL-NO-IC: %index.next = add i32 %index, 8
+; UNROLL-NO-IC: icmp eq i32 %index.next, 96
+; UNROLL-NO-IC: middle.block
+; UNROLL-NO-IC: icmp eq i32 96, 96
+; UNROLL-NO-IC: %vector.recur.extract = extractelement <4 x i32> %[[L2]], i32 3
+; UNROLL-NO-IC: %vector.recur.extract.for.phi = extractelement <4 x i32> %[[L2]], i32 2
+; UNROLL-NO-IC: for.end
+; UNROLL-NO-IC: %val.phi.lcssa = phi i32 [ %scalar.recur, %for.body ], [ %vector.recur.extract.for.phi, %middle.block ]
+; Check the case when unrolled but not vectorized.
+; UNROLL-NO-VF-LABEL: extract_second_last_iteration
+; UNROLL-NO-VF: vector.body:
+; UNROLL-NO-VF: %induction = add i32 %index, 0
+; UNROLL-NO-VF: %induction1 = add i32 %index, 1
+; UNROLL-NO-VF: %[[L1:.+]] = add i32 %induction, %x
+; UNROLL-NO-VF: %[[L2:.+]] = add i32 %induction1, %x
+; UNROLL-NO-VF: %index.next = add i32 %index, 2
+; UNROLL-NO-VF: icmp eq i32 %index.next, 96
+; UNROLL-NO-VF: for.end:
+; UNROLL-NO-VF: %val.phi.lcssa = phi i32 [ %scalar.recur, %for.body ], [ %[[L1]], %middle.block ]
+define i32 @extract_second_last_iteration(i32* %cval, i32 %x) {
+entry:
+ br label %for.body
+
+for.body:
+ %inc.phi = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %val.phi = phi i32 [ 0, %entry ], [ %addx, %for.body ]
+ %inc = add i32 %inc.phi, 1
+ %bc = zext i32 %inc.phi to i64
+ %addx = add i32 %inc.phi, %x
+ %cmp = icmp eq i32 %inc.phi, 95
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+ ret i32 %val.phi
+}
diff --git a/test/Transforms/LoopVectorize/float-induction.ll b/test/Transforms/LoopVectorize/float-induction.ll
index 79bddf471c26..8eec6e262c1a 100644
--- a/test/Transforms/LoopVectorize/float-induction.ll
+++ b/test/Transforms/LoopVectorize/float-induction.ll
@@ -1,43 +1,7 @@
; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck --check-prefix VEC4_INTERL1 %s
; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -force-vector-width=4 -dce -instcombine -S | FileCheck --check-prefix VEC4_INTERL2 %s
; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -force-vector-width=1 -dce -instcombine -S | FileCheck --check-prefix VEC1_INTERL2 %s
-
-; VEC4_INTERL1-LABEL: @fp_iv_loop1(
-; VEC4_INTERL1: %[[FP_INC:.*]] = load float, float* @fp_inc
-; VEC4_INTERL1: vector.body:
-; VEC4_INTERL1: %[[FP_INDEX:.*]] = sitofp i64 {{.*}} to float
-; VEC4_INTERL1: %[[VEC_INCR:.*]] = fmul fast float {{.*}}, %[[FP_INDEX]]
-; VEC4_INTERL1: %[[FP_OFFSET_IDX:.*]] = fsub fast float %init, %[[VEC_INCR]]
-; VEC4_INTERL1: %[[BRCT_INSERT:.*]] = insertelement <4 x float> undef, float %[[FP_OFFSET_IDX]], i32 0
-; VEC4_INTERL1-NEXT: %[[BRCT_SPLAT:.*]] = shufflevector <4 x float> %[[BRCT_INSERT]], <4 x float> undef, <4 x i32> zeroinitializer
-; VEC4_INTERL1: %[[BRCT_INSERT:.*]] = insertelement {{.*}} %[[FP_INC]]
-; VEC4_INTERL1-NEXT: %[[FP_INC_BCST:.*]] = shufflevector <4 x float> %[[BRCT_INSERT]], {{.*}} zeroinitializer
-; VEC4_INTERL1: %[[VSTEP:.*]] = fmul fast <4 x float> %[[FP_INC_BCST]], <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
-; VEC4_INTERL1-NEXT: %[[VEC_INDUCTION:.*]] = fsub fast <4 x float> %[[BRCT_SPLAT]], %[[VSTEP]]
-; VEC4_INTERL1: store <4 x float> %[[VEC_INDUCTION]]
-
-; VEC4_INTERL2-LABEL: @fp_iv_loop1(
-; VEC4_INTERL2: %[[FP_INC:.*]] = load float, float* @fp_inc
-; VEC4_INTERL2: vector.body:
-; VEC4_INTERL2: %[[INDEX:.*]] = sitofp i64 {{.*}} to float
-; VEC4_INTERL2: %[[VEC_INCR:.*]] = fmul fast float %{{.*}}, %[[INDEX]]
-; VEC4_INTERL2: fsub fast float %init, %[[VEC_INCR]]
-; VEC4_INTERL2: %[[VSTEP1:.*]] = fmul fast <4 x float> %{{.*}}, <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
-; VEC4_INTERL2-NEXT: %[[VEC_INDUCTION1:.*]] = fsub fast <4 x float> {{.*}}, %[[VSTEP1]]
-; VEC4_INTERL2: %[[VSTEP2:.*]] = fmul fast <4 x float> %{{.*}}, <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>
-; VEC4_INTERL2-NEXT: %[[VEC_INDUCTION2:.*]] = fsub fast <4 x float> {{.*}}, %[[VSTEP2]]
-; VEC4_INTERL2: store <4 x float> %[[VEC_INDUCTION1]]
-; VEC4_INTERL2: store <4 x float> %[[VEC_INDUCTION2]]
-
-; VEC1_INTERL2-LABEL: @fp_iv_loop1(
-; VEC1_INTERL2: %[[FP_INC:.*]] = load float, float* @fp_inc
-; VEC1_INTERL2: vector.body:
-; VEC1_INTERL2: %[[INDEX:.*]] = sitofp i64 {{.*}} to float
-; VEC1_INTERL2: %[[STEP:.*]] = fmul fast float %{{.*}}, %[[INDEX]]
-; VEC1_INTERL2: %[[FP_OFFSET_IDX:.*]] = fsub fast float %init, %[[STEP]]
-; VEC1_INTERL2: %[[SCALAR_INDUCTION2:.*]] = fsub fast float %[[FP_OFFSET_IDX]], %[[FP_INC]]
-; VEC1_INTERL2: store float %[[FP_OFFSET_IDX]]
-; VEC1_INTERL2: store float %[[SCALAR_INDUCTION2]]
+; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -dce -simplifycfg -instcombine -S | FileCheck --check-prefix VEC2_INTERL1_PRED_STORE %s
@fp_inc = common global float 0.000000e+00, align 4
@@ -49,6 +13,71 @@
; }
;}
+; VEC4_INTERL1-LABEL: @fp_iv_loop1(
+; VEC4_INTERL1: vector.ph:
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> undef, float %fpinc, i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: [[TMP5:%.*]] = fmul fast <4 x float> [[DOTSPLAT3]], <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
+; VEC4_INTERL1-NEXT: [[INDUCTION4:%.*]] = fsub fast <4 x float> [[DOTSPLAT]], [[TMP5]]
+; VEC4_INTERL1-NEXT: [[TMP6:%.*]] = fmul fast float %fpinc, 4.000000e+00
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <4 x float> undef, float [[TMP6]], i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT5]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: br label %vector.body
+; VEC4_INTERL1: vector.body:
+; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION4]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP9:%.*]] = bitcast float* [[TMP8]] to <4 x float>*
+; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], <4 x float>* [[TMP9]], align 4
+; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fsub fast <4 x float> [[VEC_IND]], [[DOTSPLAT6]]
+; VEC4_INTERL1: br i1 {{.*}}, label %middle.block, label %vector.body
+
+; VEC4_INTERL2-LABEL: @fp_iv_loop1(
+; VEC4_INTERL2: vector.ph:
+; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT3:%.*]] = insertelement <4 x float> undef, float %fpinc, i32 0
+; VEC4_INTERL2-NEXT: [[DOTSPLAT4:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT3]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL2-NEXT: [[TMP5:%.*]] = fmul fast <4 x float> [[DOTSPLAT4]], <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
+; VEC4_INTERL2-NEXT: [[INDUCTION5:%.*]] = fsub fast <4 x float> [[DOTSPLAT]], [[TMP5]]
+; VEC4_INTERL2-NEXT: [[TMP6:%.*]] = fmul fast float %fpinc, 4.000000e+00
+; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> undef, float [[TMP6]], i32 0
+; VEC4_INTERL2-NEXT: [[DOTSPLAT7:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT6]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL2-NEXT: br label %vector.body
+; VEC4_INTERL2: vector.body:
+; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL2-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION5]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL2-NEXT: [[STEP_ADD:%.*]] = fsub fast <4 x float> [[VEC_IND]], [[DOTSPLAT7]]
+; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to <4 x float>*
+; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND]], <4 x float>* [[TMP10]], align 4
+; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = getelementptr float, float* [[TMP9]], i64 4
+; VEC4_INTERL2-NEXT: [[TMP12:%.*]] = bitcast float* [[TMP11]] to <4 x float>*
+; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD]], <4 x float>* [[TMP12]], align 4
+; VEC4_INTERL2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
+; VEC4_INTERL2-NEXT: [[VEC_IND_NEXT]] = fsub fast <4 x float> [[STEP_ADD]], [[DOTSPLAT7]]
+; VEC4_INTERL2: br i1 {{.*}}, label %middle.block, label %vector.body
+
+; VEC1_INTERL2-LABEL: @fp_iv_loop1(
+; VEC1_INTERL2: vector.ph:
+; VEC1_INTERL2-NEXT: br label %vector.body
+; VEC1_INTERL2: vector.body:
+; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
+; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = sitofp i64 [[INDEX]] to float
+; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fmul fast float %fpinc, [[TMP6]]
+; VEC1_INTERL2-NEXT: [[FP_OFFSET_IDX:%.*]] = fsub fast float %init, [[TMP7]]
+; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = fsub fast float [[FP_OFFSET_IDX]], %fpinc
+; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDUCTION2]]
+; VEC1_INTERL2-NEXT: store float [[FP_OFFSET_IDX]], float* [[TMP9]], align 4
+; VEC1_INTERL2-NEXT: store float [[TMP8]], float* [[TMP10]], align 4
+; VEC1_INTERL2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; VEC1_INTERL2: br i1 {{.*}}, label %middle.block, label %vector.body
+
define void @fp_iv_loop1(float %init, float* noalias nocapture %A, i32 %N) #1 {
entry:
%cmp4 = icmp sgt i32 %N, 0
@@ -85,15 +114,20 @@ for.end: ; preds = %for.end.loopexit, %
;}
; VEC4_INTERL1-LABEL: @fp_iv_loop2(
-; VEC4_INTERL1: vector.body
-; VEC4_INTERL1: %[[index:.*]] = phi i64 [ 0, %vector.ph ]
-; VEC4_INTERL1: sitofp i64 %[[index]] to float
-; VEC4_INTERL1: %[[VAR1:.*]] = fmul fast float {{.*}}, 5.000000e-01
-; VEC4_INTERL1: %[[VAR2:.*]] = fadd fast float %[[VAR1]]
-; VEC4_INTERL1: insertelement <4 x float> undef, float %[[VAR2]], i32 0
-; VEC4_INTERL1: shufflevector <4 x float> {{.*}}, <4 x float> undef, <4 x i32> zeroinitializer
-; VEC4_INTERL1: fadd fast <4 x float> {{.*}}, <float 0.000000e+00, float 5.000000e-01, float 1.000000e+00, float 1.500000e+00>
-; VEC4_INTERL1: store <4 x float>
+; VEC4_INTERL1: vector.ph:
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: [[INDUCTION2:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], <float 0.000000e+00, float 5.000000e-01, float 1.000000e+00, float 1.500000e+00>
+; VEC4_INTERL1-NEXT: br label %vector.body
+; VEC4_INTERL1: vector.body:
+; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ [[INDUCTION2]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP8:%.*]] = bitcast float* [[TMP7]] to <4 x float>*
+; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], <4 x float>* [[TMP8]], align 4
+; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fadd fast <4 x float> [[VEC_IND]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+; VEC4_INTERL1: br i1 {{.*}}, label %middle.block, label %vector.body
define void @fp_iv_loop2(float %init, float* noalias nocapture %A, i32 %N) #0 {
entry:
@@ -133,14 +167,43 @@ for.end: ; preds = %for.end.loopexit, %
; C[i] = y;
; }
;}
+
; VEC4_INTERL1-LABEL: @fp_iv_loop3(
-; VEC4_INTERL1: vector.body
-; VEC4_INTERL1: %[[index:.*]] = phi i64 [ 0, %vector.ph ]
-; VEC4_INTERL1: sitofp i64 %[[index]] to float
-; VEC4_INTERL1: %[[VAR1:.*]] = fmul fast float {{.*}}, -5.000000e-01
-; VEC4_INTERL1: fadd fast float %[[VAR1]]
-; VEC4_INTERL1: fadd fast <4 x float> {{.*}}, <float -5.000000e-01, float -1.000000e+00, float -1.500000e+00, float -2.000000e+00>
-; VEC4_INTERL1: store <4 x float>
+; VEC4_INTERL1: for.body.lr.ph:
+; VEC4_INTERL1: [[TMP0:%.*]] = load float, float* @fp_inc, align 4
+; VEC4_INTERL1: vector.ph:
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float %init, i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <4 x float> undef, float [[TMP0]], i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT5]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: [[TMP7:%.*]] = fmul fast <4 x float> [[DOTSPLAT6]], <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
+; VEC4_INTERL1-NEXT: [[INDUCTION7:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], [[TMP7]]
+; VEC4_INTERL1-NEXT: [[TMP8:%.*]] = fmul fast float [[TMP0]], 4.000000e+00
+; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT8:%.*]] = insertelement <4 x float> undef, float [[TMP8]], i32 0
+; VEC4_INTERL1-NEXT: [[DOTSPLAT9:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT8]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: [[BROADCAST_SPLATINSERT12:%.*]] = insertelement <4 x float> undef, float [[TMP0]], i32 0
+; VEC4_INTERL1-NEXT: [[BROADCAST_SPLAT13:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT12]], <4 x float> undef, <4 x i32> zeroinitializer
+; VEC4_INTERL1-NEXT: br label [[VECTOR_BODY:%.*]]
+; VEC4_INTERL1: vector.body:
+; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 0x3FB99999A0000000, float 0xBFD99999A0000000, float 0xBFECCCCCC0000000, float 0xBFF6666660000000>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[VEC_IND10:%.*]] = phi <4 x float> [ [[INDUCTION7]], %vector.ph ], [ [[VEC_IND_NEXT11:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP13:%.*]] = bitcast float* [[TMP12]] to <4 x float>*
+; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND10]], <4 x float>* [[TMP13]], align 4
+; VEC4_INTERL1-NEXT: [[TMP14:%.*]] = fadd fast <4 x float> [[VEC_IND10]], [[BROADCAST_SPLAT13]]
+; VEC4_INTERL1-NEXT: [[TMP15:%.*]] = fadd fast <4 x float> [[VEC_IND]], <float -5.000000e-01, float -5.000000e-01, float -5.000000e-01, float -5.000000e-01>
+; VEC4_INTERL1-NEXT: [[TMP16:%.*]] = fadd fast <4 x float> [[TMP15]], [[TMP14]]
+; VEC4_INTERL1-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, float* %B, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP18:%.*]] = bitcast float* [[TMP17]] to <4 x float>*
+; VEC4_INTERL1-NEXT: store <4 x float> [[TMP16]], <4 x float>* [[TMP18]], align 4
+; VEC4_INTERL1-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, float* %C, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP20:%.*]] = bitcast float* [[TMP19]] to <4 x float>*
+; VEC4_INTERL1-NEXT: store <4 x float> [[TMP15]], <4 x float>* [[TMP20]], align 4
+; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fadd fast <4 x float> [[VEC_IND]], <float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00>
+; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT11]] = fadd fast <4 x float> [[VEC_IND10]], [[DOTSPLAT9]]
+; VEC4_INTERL1: br i1 {{.*}}, label %middle.block, label %vector.body
define void @fp_iv_loop3(float %init, float* noalias nocapture %A, float* noalias nocapture %B, float* noalias nocapture %C, i32 %N) #1 {
entry:
@@ -186,10 +249,17 @@ for.end:
;}
; VEC4_INTERL1-LABEL: @fp_iv_loop4(
-; VEC4_INTERL1: vector.body
-; VEC4_INTERL1-NOT: fmul fast <4 x float>
-; VEC4_INTERL1: %[[induction:.*]] = fadd fast <4 x float> %{{.*}}, <float 0.000000e+00, float 5.000000e-01, float 1.000000e+00, float 1.500000e+00>
-; VEC4_INTERL1: store <4 x float> %[[induction]]
+; VEC4_INTERL1: vector.ph:
+; VEC4_INTERL1-NEXT: br label %vector.body
+; VEC4_INTERL1: vector.body:
+; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[VEC_IND:%.*]] = phi <4 x float> [ <float 1.000000e+00, float 1.500000e+00, float 2.000000e+00, float 2.500000e+00>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; VEC4_INTERL1-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: [[TMP8:%.*]] = bitcast float* [[TMP7]] to <4 x float>*
+; VEC4_INTERL1-NEXT: store <4 x float> [[VEC_IND]], <4 x float>* [[TMP8]], align 4
+; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; VEC4_INTERL1-NEXT: [[VEC_IND_NEXT]] = fadd fast <4 x float> [[VEC_IND]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+; VEC4_INTERL1: br i1 {{.*}}, label %middle.block, label %vector.body
define void @fp_iv_loop4(float* noalias nocapture %A, i32 %N) {
entry:
@@ -216,3 +286,55 @@ for.end.loopexit: ; preds = %for.body
for.end: ; preds = %for.end.loopexit, %entry
ret void
}
+
+; VEC2_INTERL1_PRED_STORE-LABEL: @non_primary_iv_float_scalar(
+; VEC2_INTERL1_PRED_STORE: vector.body:
+; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ], [ 0, %min.iters.checked ]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = sitofp i64 [[INDEX]] to float
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* %A, i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <2 x float>*
+; VEC2_INTERL1_PRED_STORE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* [[TMP3]], align 4
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP4:%.*]] = fcmp fast oeq <2 x float> [[WIDE_LOAD]], zeroinitializer
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
+; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[TMP5]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; VEC2_INTERL1_PRED_STORE: [[PRED_STORE_IF]]:
+; VEC2_INTERL1_PRED_STORE-NEXT: store float [[TMP1]], float* [[TMP2]], align 4
+; VEC2_INTERL1_PRED_STORE-NEXT: br label %[[PRED_STORE_CONTINUE]]
+; VEC2_INTERL1_PRED_STORE: [[PRED_STORE_CONTINUE]]:
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
+; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7]]
+; VEC2_INTERL1_PRED_STORE: [[PRED_STORE_IF6]]:
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP9:%.*]] = fadd fast float [[TMP1]], 1.000000e+00
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP10:%.*]] = or i64 [[INDEX]], 1
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, float* %A, i64 [[TMP10]]
+; VEC2_INTERL1_PRED_STORE-NEXT: store float [[TMP9]], float* [[TMP11]], align 4
+; VEC2_INTERL1_PRED_STORE-NEXT: br label %[[PRED_STORE_CONTINUE7]]
+; VEC2_INTERL1_PRED_STORE: [[PRED_STORE_CONTINUE7]]:
+; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; VEC2_INTERL1_PRED_STORE: br i1 {{.*}}, label %middle.block, label %vector.body
+
+define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.inc ], [ 0, %entry ]
+ %j = phi float [ %j.next, %for.inc ], [ 0.0, %entry ]
+ %tmp0 = getelementptr inbounds float, float* %A, i64 %i
+ %tmp1 = load float, float* %tmp0, align 4
+ %tmp2 = fcmp fast oeq float %tmp1, 0.0
+ br i1 %tmp2, label %if.pred, label %for.inc
+
+if.pred:
+ store float %j, float* %tmp0, align 4
+ br label %for.inc
+
+for.inc:
+ %i.next = add nuw nsw i64 %i, 1
+ %j.next = fadd fast float %j, 1.0
+ %cond = icmp slt i64 %i.next, %N
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/if-conversion.ll b/test/Transforms/LoopVectorize/if-conversion.ll
index acf7b12540d3..d3a16e2075d1 100644
--- a/test/Transforms/LoopVectorize/if-conversion.ll
+++ b/test/Transforms/LoopVectorize/if-conversion.ll
@@ -18,9 +18,9 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
;CHECK-LABEL: @function0(
;CHECK: load <4 x i32>
+;CHECK: icmp sle <4 x i32>
;CHECK: mul <4 x i32>
;CHECK: add <4 x i32>
-;CHECK: icmp sle <4 x i32>
;CHECK: select <4 x i1>
;CHECK: ret i32
define i32 @function0(i32* nocapture %a, i32* nocapture %b, i32 %start, i32 %end) nounwind uwtable ssp {
@@ -71,8 +71,8 @@ for.end:
;CHECK-LABEL: @reduction_func(
;CHECK: load <4 x i32>
-;CHECK: add <4 x i32>
;CHECK: icmp slt <4 x i32>
+;CHECK: add <4 x i32>
;CHECK: select <4 x i1>
;CHECK: ret i32
define i32 @reduction_func(i32* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
diff --git a/test/Transforms/LoopVectorize/if-pred-stores.ll b/test/Transforms/LoopVectorize/if-pred-stores.ll
index c4368148caf9..a1837b352eef 100644
--- a/test/Transforms/LoopVectorize/if-pred-stores.ll
+++ b/test/Transforms/LoopVectorize/if-pred-stores.ll
@@ -11,6 +11,7 @@ entry:
; VEC-LABEL: test
; VEC: %[[v0:.+]] = add i64 %index, 0
+; VEC: %[[v2:.+]] = getelementptr inbounds i32, i32* %f, i64 %[[v0]]
; VEC: %[[v8:.+]] = icmp sgt <2 x i32> %{{.*}}, <i32 100, i32 100>
; VEC: %[[v10:.+]] = and <2 x i1> %[[v8]], <i1 true, i1 true>
; VEC: %[[o1:.+]] = or <2 x i1> zeroinitializer, %[[v10]]
@@ -21,7 +22,6 @@ entry:
; VEC: [[cond]]:
; VEC: %[[v13:.+]] = extractelement <2 x i32> %wide.load, i32 0
; VEC: %[[v9a:.+]] = add nsw i32 %[[v13]], 20
-; VEC: %[[v2:.+]] = getelementptr inbounds i32, i32* %f, i64 %[[v0]]
; VEC: store i32 %[[v9a]], i32* %[[v2]], align 4
; VEC: br label %[[else:.+]]
;
diff --git a/test/Transforms/LoopVectorize/induction-step.ll b/test/Transforms/LoopVectorize/induction-step.ll
index f56456e82dfa..33e8ed067160 100644
--- a/test/Transforms/LoopVectorize/induction-step.ll
+++ b/test/Transforms/LoopVectorize/induction-step.ll
@@ -12,11 +12,30 @@
;}
; CHECK-LABEL: @induction_with_global(
-; CHECK: %[[INT_INC:.*]] = load i32, i32* @int_inc, align 4
-; CHECK: vector.body:
-; CHECK: %[[VAR1:.*]] = insertelement <8 x i32> undef, i32 %[[INT_INC]], i32 0
-; CHECK: %[[VAR2:.*]] = shufflevector <8 x i32> %[[VAR1]], <8 x i32> undef, <8 x i32> zeroinitializer
-; CHECK: mul <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, %[[VAR2]]
+; CHECK: for.body.lr.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @int_inc, align 4
+; CHECK: vector.ph:
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i32> undef, i32 %init, i32 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <8 x i32> undef, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT2]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = mul <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, [[DOTSPLAT3]]
+; CHECK-NEXT: [[INDUCTION4:%.*]] = add <8 x i32> [[DOTSPLAT]], [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[TMP0]], 8
+; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <8 x i32> undef, i32 [[TMP7]], i32 0
+; CHECK-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT5]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: br label %vector.body
+; CHECK: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK-NEXT: %vec.ind = phi <8 x i32> [ [[INDUCTION4]], %vector.ph ], [ %vec.ind.next, %vector.body ]
+; CHECK: [[TMP8:%.*]] = add i64 %index, 0
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, i32* [[TMP9]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <8 x i32>*
+; CHECK-NEXT: store <8 x i32> %vec.ind, <8 x i32>* [[TMP11]], align 4
+; CHECK: %index.next = add i64 %index, 8
+; CHECK-NEXT: %vec.ind.next = add <8 x i32> %vec.ind, [[DOTSPLAT6]]
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -66,13 +85,28 @@ for.end: ; preds = %for.end.loopexit, %
;}
; CHECK-LABEL: @induction_with_loop_inv(
-; CHECK: for.cond1.preheader:
-; CHECK: %[[INDVAR0:.*]] = phi i32 [ 0,
-; CHECK: %[[INDVAR1:.*]] = phi i32 [ 0,
-; CHECK: vector.body:
-; CHECK: %[[VAR1:.*]] = insertelement <8 x i32> undef, i32 %[[INDVAR1]], i32 0
-; CHECK: %[[VAR2:.*]] = shufflevector <8 x i32> %[[VAR1]], <8 x i32> undef, <8 x i32> zeroinitializer
-; CHECK: mul <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, %[[VAR2]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i32> undef, i32 %x.011, i32 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <8 x i32> undef, i32 %j.012, i32 0
+; CHECK-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT2]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, [[DOTSPLAT3]]
+; CHECK-NEXT: [[INDUCTION4:%.*]] = add <8 x i32> [[DOTSPLAT]], [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i32 %j.012, 8
+; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <8 x i32> undef, i32 [[TMP5]], i32 0
+; CHECK-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT5]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: br label %vector.body
+; CHECK: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK-NEXT: %vec.ind = phi <8 x i32> [ [[INDUCTION4]], %vector.ph ], [ %vec.ind.next, %vector.body ]
+; CHECK: [[TMP6:%.*]] = add i64 %index, 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i32 0
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <8 x i32>*
+; CHECK-NEXT: store <8 x i32> %vec.ind, <8 x i32>* [[TMP9]], align 4
+; CHECK: %index.next = add i64 %index, 8
+; CHECK-NEXT: %vec.ind.next = add <8 x i32> %vec.ind, [[DOTSPLAT6]]
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
define i32 @induction_with_loop_inv(i32 %init, i32* noalias nocapture %A, i32 %N, i32 %M) {
entry:
@@ -122,3 +156,46 @@ for.end6: ; preds = %for.end6.loopexit,
%x.0.lcssa = phi i32 [ %init, %entry ], [ %x.1.lcssa.lcssa, %for.end6.loopexit ]
ret i32 %x.0.lcssa
}
+
+
+; CHECK-LABEL: @non_primary_iv_loop_inv_trunc(
+; CHECK: vector.ph:
+; CHECK: [[TMP3:%.*]] = trunc i64 %step to i32
+; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <8 x i32> undef, i32 [[TMP3]], i32 0
+; CHECK-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT5]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, [[DOTSPLAT6]]
+; CHECK-NEXT: [[INDUCTION7:%.*]] = add <8 x i32> zeroinitializer, [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP3]], 8
+; CHECK-NEXT: [[DOTSPLATINSERT8:%.*]] = insertelement <8 x i32> undef, i32 [[TMP5]], i32 0
+; CHECK-NEXT: [[DOTSPLAT9:%.*]] = shufflevector <8 x i32> [[DOTSPLATINSERT8]], <8 x i32> undef, <8 x i32> zeroinitializer
+; CHECK-NEXT: br label %vector.body
+; CHECK: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK: [[VEC_IND10:%.*]] = phi <8 x i32> [ [[INDUCTION7]], %vector.ph ], [ [[VEC_IND_NEXT11:%.*]], %vector.body ]
+; CHECK: [[TMP6:%.*]] = add i64 %index, 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i32 0
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <8 x i32>*
+; CHECK-NEXT: store <8 x i32> [[VEC_IND10]], <8 x i32>* [[TMP9]], align 4
+; CHECK-NEXT: %index.next = add i64 %index, 8
+; CHECK: [[VEC_IND_NEXT11]] = add <8 x i32> [[VEC_IND10]], [[DOTSPLAT9]]
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+
+define void @non_primary_iv_loop_inv_trunc(i32* %a, i64 %n, i64 %step) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %j = phi i64 [ %j.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i
+ %tmp1 = trunc i64 %j to i32
+ store i32 %tmp1, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 1
+ %j.next = add nuw nsw i64 %j, %step
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/induction.ll b/test/Transforms/LoopVectorize/induction.ll
index 6213b4a7c2e9..0d7d9fe0c1b8 100644
--- a/test/Transforms/LoopVectorize/induction.ll
+++ b/test/Transforms/LoopVectorize/induction.ll
@@ -7,11 +7,19 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Make sure that we can handle multiple integer induction variables.
+;
; CHECK-LABEL: @multi_int_induction(
-; CHECK: vector.body:
-; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECK: %[[VAR:.*]] = trunc i64 %index to i32
-; CHECK: %offset.idx = add i32 190, %[[VAR]]
+; CHECK: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK-NEXT: %vec.ind = phi <2 x i32> [ <i32 190, i32 191>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+; CHECK: [[TMP3:%.*]] = add i64 %index, 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* %A, i64 [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <2 x i32>*
+; CHECK-NEXT: store <2 x i32> %vec.ind, <2 x i32>* [[TMP6]], align 4
+; CHECK: %index.next = add i64 %index, 2
+; CHECK-NEXT: %vec.ind.next = add <2 x i32> %vec.ind, <i32 2, i32 2>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
define void @multi_int_induction(i32* %A, i32 %N) {
for.body.lr.ph:
br label %for.body
@@ -765,3 +773,79 @@ for.body:
exit:
ret void
}
+
+; CHECK-LABEL: @non_primary_iv_trunc(
+; CHECK: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 2>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; CHECK: [[TMP3:%.*]] = add i64 %index, 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* %a, i64 [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <2 x i32>*
+; CHECK-NEXT: store <2 x i32> [[VEC_IND]], <2 x i32>* [[TMP6]], align 4
+; CHECK-NEXT: %index.next = add i64 %index, 2
+; CHECK: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 4, i32 4>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+define void @non_primary_iv_trunc(i32* %a, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %j = phi i64 [ %j.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i
+ %tmp1 = trunc i64 %j to i32
+ store i32 %tmp1, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 1
+ %j.next = add nuw nsw i64 %j, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; PR32419. Ensure we transform truncated non-primary induction variables. In
+; the test case below we replace %tmp1 with a new induction variable. Because
+; the truncated value is non-primary, we must compute an offset from the
+; primary induction variable.
+;
+; CHECK-LABEL: @PR32419(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %[[PRED_UREM_CONTINUE4:.*]] ]
+; CHECK: [[OFFSET_IDX:%.*]] = add i32 -20, [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[OFFSET_IDX]] to i16
+; CHECK: [[TMP8:%.*]] = add i16 [[TMP1]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = urem i16 %b, [[TMP8]]
+; CHECK: [[TMP15:%.*]] = add i16 [[TMP1]], 1
+; CHECK-NEXT: [[TMP16:%.*]] = urem i16 %b, [[TMP15]]
+; CHECK: [[PRED_UREM_CONTINUE4]]:
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define i32 @PR32419(i32 %a, i16 %b) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i32 [ -20, %entry ], [ %i.next, %for.inc ]
+ %tmp0 = phi i32 [ %a, %entry ], [ %tmp6, %for.inc ]
+ %tmp1 = trunc i32 %i to i16
+ %tmp2 = icmp eq i16 %tmp1, 0
+ br i1 %tmp2, label %for.inc, label %for.cond
+
+for.cond:
+ %tmp3 = urem i16 %b, %tmp1
+ br label %for.inc
+
+for.inc:
+ %tmp4 = phi i16 [ %tmp3, %for.cond ], [ 0, %for.body ]
+ %tmp5 = sext i16 %tmp4 to i32
+ %tmp6 = or i32 %tmp0, %tmp5
+ %i.next = add nsw i32 %i, 1
+ %cond = icmp eq i32 %i.next, 0
+ br i1 %cond, label %for.end, label %for.body
+
+for.end:
+ %tmp7 = phi i32 [ %tmp6, %for.inc ]
+ ret i32 %tmp7
+}
diff --git a/test/Transforms/LoopVectorize/lcssa-crash.ll b/test/Transforms/LoopVectorize/lcssa-crash.ll
index e6bd6ed61e22..3d3ef9e05935 100644
--- a/test/Transforms/LoopVectorize/lcssa-crash.ll
+++ b/test/Transforms/LoopVectorize/lcssa-crash.ll
@@ -37,3 +37,26 @@ L0:
L1:
ret void
}
+
+; This loop has different uniform instructions before and after LCSSA.
+define void @test3() {
+entry:
+ %add41 = add i32 undef, undef
+ %idxprom4736 = zext i32 %add41 to i64
+ br label %while.body
+
+while.body:
+ %idxprom4738 = phi i64 [ %idxprom47, %while.body ], [ %idxprom4736, %entry ]
+ %pos.337 = phi i32 [ %inc46, %while.body ], [ %add41, %entry ]
+ %inc46 = add i32 %pos.337, 1
+ %arrayidx48 = getelementptr inbounds [1024 x i8], [1024 x i8]* undef, i64 0, i64 %idxprom4738
+ store i8 0, i8* %arrayidx48, align 1
+ %and43 = and i32 %inc46, 3
+ %cmp44 = icmp eq i32 %and43, 0
+ %idxprom47 = zext i32 %inc46 to i64
+ br i1 %cmp44, label %while.end, label %while.body
+
+while.end:
+ %add58 = add i32 %inc46, 4
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/lifetime.ll b/test/Transforms/LoopVectorize/lifetime.ll
index 6e525ca1d822..860fe2d983cd 100644
--- a/test/Transforms/LoopVectorize/lifetime.ll
+++ b/test/Transforms/LoopVectorize/lifetime.ll
@@ -13,23 +13,23 @@ define void @test(i32 *%d) {
entry:
%arr = alloca [1024 x i32], align 16
%0 = bitcast [1024 x i32]* %arr to i8*
- call void @llvm.lifetime.start(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%1 = load i32, i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
- call void @llvm.lifetime.start(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end
for.end:
- call void @llvm.lifetime.end(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
ret void
}
@@ -42,24 +42,24 @@ define void @testbitcast(i32 *%d) {
entry:
%arr = alloca [1024 x i32], align 16
%0 = bitcast [1024 x i32]* %arr to i8*
- call void @llvm.lifetime.start(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%1 = bitcast [1024 x i32]* %arr to i8*
- call void @llvm.lifetime.end(i64 4096, i8* %1) #1
+ call void @llvm.lifetime.end.p0i8(i64 4096, i8* %1) #1
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%2 = load i32, i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
- call void @llvm.lifetime.start(i64 4096, i8* %1) #1
+ call void @llvm.lifetime.start.p0i8(i64 4096, i8* %1) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end
for.end:
- call void @llvm.lifetime.end(i64 4096, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
ret void
}
@@ -77,11 +77,11 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = getelementptr [1024 x i32], [1024 x i32]* %arr, i32 0, i64 %indvars.iv
%1 = bitcast [1024 x i32]* %arr to i8*
- call void @llvm.lifetime.end(i64 4096, i8* %1) #1
+ call void @llvm.lifetime.end.p0i8(i64 4096, i8* %1) #1
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%2 = load i32, i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
- call void @llvm.lifetime.start(i64 4096, i8* %1) #1
+ call void @llvm.lifetime.start.p0i8(i64 4096, i8* %1) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
@@ -91,6 +91,6 @@ for.end:
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
diff --git a/test/Transforms/LoopVectorize/loop-scalars.ll b/test/Transforms/LoopVectorize/loop-scalars.ll
new file mode 100644
index 000000000000..4dcd5993c128
--- /dev/null
+++ b/test/Transforms/LoopVectorize/loop-scalars.ll
@@ -0,0 +1,143 @@
+; REQUIRES: asserts
+; RUN: opt < %s -loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -instcombine -debug-only=loop-vectorize -disable-output -print-after=instcombine 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: vector_gep
+; CHECK-NOT: LV: Found scalar instruction: %tmp0 = getelementptr inbounds i32, i32* %b, i64 %i
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* %b, <2 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32** [[TMP2]] to <2 x i32*>*
+; CHECK-NEXT: store <2 x i32*> [[TMP1]], <2 x i32*>* [[TMP3]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], <i64 2, i64 2>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @vector_gep(i32** %a, i32 *%b, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %b, i64 %i
+ %tmp1 = getelementptr inbounds i32*, i32** %a, i64 %i
+ store i32* %tmp0, i32** %tmp1, align 8
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: scalar_store
+; CHECK: LV: Found scalar instruction: %tmp1 = getelementptr inbounds i32*, i32** %a, i64 %i
+; CHECK-NEXT: LV: Found scalar instruction: %tmp0 = getelementptr inbounds i32, i32* %b, i64 %i
+; CHECK-NEXT: LV: Found scalar instruction: %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+; CHECK-NEXT: LV: Found scalar instruction: %i.next = add nuw nsw i64 %i, 2
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* %b, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* %b, i64 [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[TMP4]]
+; CHECK-NEXT: store i32* [[TMP5]], i32** [[TMP7]], align 8
+; CHECK-NEXT: store i32* [[TMP6]], i32** [[TMP8]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @scalar_store(i32** %a, i32 *%b, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %b, i64 %i
+ %tmp1 = getelementptr inbounds i32*, i32** %a, i64 %i
+ store i32* %tmp0, i32** %tmp1, align 8
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: expansion
+; CHECK: LV: Found scalar instruction: %tmp3 = getelementptr inbounds i32*, i32** %tmp2, i64 %i
+; CHECK-NEXT: LV: Found scalar instruction: %tmp1 = bitcast i64* %tmp0 to i32*
+; CHECK-NEXT: LV: Found scalar instruction: %tmp2 = getelementptr inbounds i32*, i32** %a, i64 0
+; CHECK-NEXT: LV: Found scalar instruction: %tmp0 = getelementptr inbounds i64, i64* %b, i64 %i
+; CHECK-NEXT: LV: Found scalar instruction: %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+; CHECK-NEXT: LV: Found scalar instruction: %i.next = add nuw nsw i64 %i, 2
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, i64* %b, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, i64* %b, i64 [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[TMP4]]
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast i32** [[TMP7]] to i64**
+; CHECK-NEXT: store i64* [[TMP5]], i64** [[TMP9]], align 8
+; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32** [[TMP8]] to i64**
+; CHECK-NEXT: store i64* [[TMP6]], i64** [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @expansion(i32** %a, i64 *%b, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i64, i64* %b, i64 %i
+ %tmp1 = bitcast i64* %tmp0 to i32*
+ %tmp2 = getelementptr inbounds i32*, i32** %a, i64 0
+ %tmp3 = getelementptr inbounds i32*, i32** %tmp2, i64 %i
+ store i32* %tmp1, i32** %tmp3, align 8
+ %i.next = add nuw nsw i64 %i, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: no_gep_or_bitcast
+; CHECK-NOT: LV: Found scalar instruction: %tmp1 = load i32*, i32** %tmp0, align 8
+; CHECK: LV: Found scalar instruction: %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+; CHECK-NEXT: LV: Found scalar instruction: %i.next = add nuw nsw i64 %i, 1
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32** [[TMP1]] to <2 x i32*>*
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32*>, <2 x i32*>* [[TMP2]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32*> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT: store i32 0, i32* [[TMP3]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32*> [[WIDE_LOAD]], i32 1
+; CHECK-NEXT: store i32 0, i32* [[TMP4]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @no_gep_or_bitcast(i32** noalias %a, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32*, i32** %a, i64 %i
+ %tmp1 = load i32*, i32** %tmp0, align 8
+ store i32 0, i32* %tmp1, align 8
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll b/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
index a310b10a5c81..5c87dc435c7c 100644
--- a/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
+++ b/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
@@ -13,9 +13,9 @@
; int v3[Z][Z];
; } s;
;
-; void slow_function (s* const obj) {
+; void slow_function (s* const obj, int z) {
; for (int j=0; j<Z; j++) {
-; for (int k=0; k<Z; k++) {
+; for (int k=0; k<z; k++) {
; int x = obj->v1[k] + obj->v2[j];
; obj->v3[j][k] += x;
; }
@@ -31,7 +31,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
%struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] }
-define void @Test(%struct.s* nocapture %obj) #0 {
+define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
br label %.outer.preheader
@@ -59,6 +59,6 @@ define void @Test(%struct.s* nocapture %obj) #0 {
%8 = add nsw i32 %5, %7
store i32 %8, i32* %6
%j.next = add nuw nsw i64 %j, 1
- %exitcond.inner = icmp eq i64 %j.next, 32
+ %exitcond.inner = icmp eq i64 %j.next, %z
br i1 %exitcond.inner, label %.outer, label %.inner
}
diff --git a/test/Transforms/LoopVectorize/partial-lcssa.ll b/test/Transforms/LoopVectorize/partial-lcssa.ll
new file mode 100644
index 000000000000..1306ed971c47
--- /dev/null
+++ b/test/Transforms/LoopVectorize/partial-lcssa.ll
@@ -0,0 +1,54 @@
+; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -S | FileCheck %s
+; We vectorize the inner loop, so we have to put it in LCSSA form.
+; However, there's no reason to touch the outer loop.
+
+; CHECK-LABEL: @foo
+; CHECK-LABEL: for.end.inner.loopexit:
+; CHECK: %[[LCSSAPHI:.*]] = phi i64 [ %indvars.iv, %for.body.inner ], [ %{{.*}}, %middle.block ]
+; CHECK: store i64 %[[LCSSAPHI]], i64* %O1, align 4
+; CHECK-LABEL: for.end.outer.loopexit
+; CHECK: store i64 %indvars.outer, i64* %O2, align 4
+
+
+define i64 @foo(i32* nocapture %A, i32* nocapture %B, i64 %n, i64 %m, i64* %O1, i64* %O2) {
+entry:
+ %cmp = icmp sgt i64 %n, 0
+ br i1 %cmp, label %for.body.outer.preheader, label %for.end.outer
+
+for.body.outer.preheader: ; preds = %entry
+ br label %for.body.outer
+
+for.body.outer: ; preds = %for.body.outer.preheader, %for.end.inner
+ %indvars.outer = phi i64 [ %indvars.outer.next, %for.end.inner ], [ 0, %for.body.outer.preheader ]
+ %cmp2 = icmp sgt i64 %m, 0
+ br i1 %cmp2, label %for.body.inner.preheader, label %for.end.inner
+
+for.body.inner.preheader: ; preds = %for.body.outer
+ br label %for.body.inner
+
+for.body.inner: ; preds = %for.body.inner.preheader, %for.body.inner
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body.inner ], [ 0, %for.body.inner.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %v = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
+ store i32 %v, i32* %arrayidx2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv, %n
+ br i1 %exitcond, label %for.end.inner.loopexit, label %for.body.inner
+
+for.end.inner.loopexit: ; preds = %for.body.inner
+ store i64 %indvars.iv, i64 *%O1, align 4
+ br label %for.end.inner
+
+for.end.inner: ; preds = %for.end.inner.loopexit, %for.body.outer
+ %indvars.outer.next = add i64 %indvars.outer, 1
+ %exitcond.outer = icmp eq i64 %indvars.outer, %m
+ br i1 %exitcond.outer, label %for.end.outer.loopexit, label %for.body.outer
+
+for.end.outer.loopexit: ; preds = %for.end.inner
+ store i64 %indvars.outer, i64 *%O2, align 4
+ br label %for.end.outer
+
+for.end.outer: ; preds = %for.end.outer.loopexit, %entry
+ ret i64 undef
+}
diff --git a/test/Transforms/LoopVectorize/pr31098.ll b/test/Transforms/LoopVectorize/pr31098.ll
new file mode 100644
index 000000000000..368a948557c3
--- /dev/null
+++ b/test/Transforms/LoopVectorize/pr31098.ll
@@ -0,0 +1,100 @@
+; REQUIRES: asserts
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -debug-only=loop-accesses < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that the compile-time-unknown depenendece-distance is resolved
+; statically. Due to the non-unit stride of the accesses in this testcase
+; we are currently not able to create runtime dependence checks, and therefore
+; if we don't resolve the dependence statically we cannot vectorize the loop.
+;
+; Specifically in this example, during dependence analysis we get 6 unknown
+; dependence distances between the 8 real/imaginary accesses below:
+; dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D.
+; At compile time we can prove for all of the above that |dist|>loopBound*step
+; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude
+; that there are no dependencies (without runtime tests):
+; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc.
+
+; #include <stdlib.h>
+; class Complex {
+; private:
+; float real_;
+; float imaginary_;
+;
+; public:
+; Complex() : real_(0), imaginary_(0) { }
+; Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { }
+; Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { }
+;
+; inline float real() const { return real_; }
+; inline float imaginary() const { return imaginary_; }
+;
+; Complex operator+(const Complex& rhs) const
+; {
+; return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_);
+; }
+;
+; Complex operator-(const Complex& rhs) const
+; {
+; return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_);
+; }
+; };
+;
+; void Test(Complex *out, size_t size)
+; {
+; size_t D = size / 2;
+; for (size_t offset = 0; offset < D; ++offset)
+; {
+; Complex t0 = out[offset];
+; Complex t1 = out[offset + D];
+; out[offset] = t1 + t0;
+; out[offset + D] = t0 - t1;
+; }
+; }
+
+; CHECK-LABEL: Test
+; CHECK: LAA: No unsafe dependent memory operations in loop. We don't need runtime memory checks.
+; CHECK: vector.body:
+; CHECK: <4 x i32>
+
+%class.Complex = type { float, float }
+
+define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr {
+entry:
+ %div = lshr i64 %size, 1
+ %cmp47 = icmp eq i64 %div, 0
+ br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.cond.cleanup.loopexit:
+ br label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+ %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0
+ %1 = load float, float* %0, align 4
+ %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1
+ %2 = load float, float* %imaginary_.i.i, align 4
+ %add = add nuw i64 %offset.048, %div
+ %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0
+ %4 = load float, float* %3, align 4
+ %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1
+ %5 = load float, float* %imaginary_.i.i28, align 4
+ %add.i = fadd fast float %4, %1
+ %add4.i = fadd fast float %5, %2
+ store float %add.i, float* %0, align 4
+ store float %add4.i, float* %imaginary_.i.i, align 4
+ %sub.i = fsub fast float %1, %4
+ %sub4.i = fsub fast float %2, %5
+ store float %sub.i, float* %3, align 4
+ store float %sub4.i, float* %imaginary_.i.i28, align 4
+ %inc = add nuw nsw i64 %offset.048, 1
+ %exitcond = icmp eq i64 %inc, %div
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
diff --git a/test/Transforms/LoopVectorize/pr31190.ll b/test/Transforms/LoopVectorize/pr31190.ll
index afb1754983cd..1ff8b2ba7ce4 100644
--- a/test/Transforms/LoopVectorize/pr31190.ll
+++ b/test/Transforms/LoopVectorize/pr31190.ll
@@ -9,13 +9,6 @@
; Since %inc54 is the IV of the outer loop, and %0 equivalent to it,
; we get the situation described above.
-; This test uses the new PM, because with the old PM, running loop-vectorize
-; would explicitly run loop-simplify. Even though this loop is already in
-; simplified form, loop-simplify would still clean up the phi.
-; The reason this matters is that in a real optimizer pipeline, LICM can create
-; such PHIs, and since it preserves loop simplified form, the cleanup has
-; no chance to run.
-
; Code that leads to this situation can look something like:
;
; int a, b[1], c;
@@ -28,11 +21,14 @@
;
; The PHI is an artifact of the register promotion of c.
+; Note that we can no longer get the vectorizer to actually see such PHIs,
+; because LV now simplifies the loop internally, but the test is still
+; useful as a regression test, and in case loop-simplify behavior changes.
+
@c = external global i32, align 4
@a = external global i32, align 4
@b = external global [1 x i32], align 4
-; CHECK: LV: PHI is a recurrence with respect to an outer loop.
; CHECK: LV: Not vectorizing: Cannot prove legality.
; CHECK-LABEL: @test
define void @test() {
diff --git a/test/Transforms/LoopVectorize/reduction.ll b/test/Transforms/LoopVectorize/reduction.ll
index 4b300e04ea26..f521b623fad2 100644
--- a/test/Transforms/LoopVectorize/reduction.ll
+++ b/test/Transforms/LoopVectorize/reduction.ll
@@ -493,3 +493,49 @@ exit:
%inc.2 = add nsw i32 %inc511.1.inc4.1, 2
ret i32 %inc.2
}
+
+;CHECK-LABEL: @reduction_sum_multiuse(
+;CHECK: phi <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+;CHECK: add <4 x i32>
+;CHECK: extractelement <4 x i32> %{{.*}}, i32 0
+;CHECK: %sum.lcssa = phi i32 [ %[[SCALAR:.*]], %.lr.ph ], [ %[[VECTOR:.*]], %middle.block ]
+;CHECK: %sum.copy = phi i32 [ %[[SCALAR]], %.lr.ph ], [ %[[VECTOR]], %middle.block ]
+;CHECK: ret i32
+define i32 @reduction_sum_multiuse(i32 %n, i32* noalias nocapture %A, i32* noalias nocapture %B) {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph.preheader, label %end
+.lr.ph.preheader: ; preds = %0
+ br label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
+ %sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %.lr.ph.preheader ]
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %3 = load i32, i32* %2, align 4
+ %4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
+ %5 = load i32, i32* %4, align 4
+ %6 = trunc i64 %indvars.iv to i32
+ %7 = add i32 %sum.02, %6
+ %8 = add i32 %7, %3
+ %9 = add i32 %8, %5
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %sum.lcssa = phi i32 [ %9, %.lr.ph ]
+ %sum.copy = phi i32 [ %9, %.lr.ph ]
+ br label %end
+
+end:
+ %f1 = phi i32 [ 0, %0 ], [ %sum.lcssa, %._crit_edge ]
+ %f2 = phi i32 [ 0, %0 ], [ %sum.copy, %._crit_edge ]
+ %final = add i32 %f1, %f2
+ ret i32 %final
+}
diff --git a/test/Transforms/LoopVectorize/reverse_iter.ll b/test/Transforms/LoopVectorize/reverse_iter.ll
index a6e2abda36d9..bd057698280b 100644
--- a/test/Transforms/LoopVectorize/reverse_iter.ll
+++ b/test/Transforms/LoopVectorize/reverse_iter.ll
@@ -2,7 +2,8 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; Make sure that the reverse iterators are calculated using 64bit arithmetic, not 32.
+; PR15882: This test ensures that we do not produce wrapping arithmetic when
+; creating constant reverse step vectors.
;
; int foo(int n, int *A) {
; int sum;
@@ -13,7 +14,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
;
;CHECK-LABEL: @foo(
-;CHECK: <i64 0, i64 -1, i64 -2, i64 -3>
+;CHECK: <i32 0, i32 -1, i32 -2, i32 -3>
;CHECK: ret
define i32 @foo(i32 %n, i32* nocapture %A) {
%1 = icmp sgt i32 %n, 0
diff --git a/test/Transforms/LoopVectorize/unroll-novec-memcheck-metadata.ll b/test/Transforms/LoopVectorize/unroll-novec-memcheck-metadata.ll
new file mode 100644
index 000000000000..d3112b82d1d5
--- /dev/null
+++ b/test/Transforms/LoopVectorize/unroll-novec-memcheck-metadata.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -force-vector-width=1 -S | FileCheck --enable-var-scope %s
+
+; Make sure we attach memcheck metadata to scalarized memory operations even if
+; we're only unrolling.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; CHECK-LABEL: vector.memcheck:
+; CHECK-LABEL: vector.body:
+; CHECK: load i32, {{.*}} !alias.scope ![[$MD1:[0-9]+]]
+; CHECK-LABEL: middle.block:
+; CHECK-DAG: ![[$MD1]] = !{![[MD2:[0-9]+]]}
+; CHECK-DAG: ![[MD2]] = distinct !{![[MD2]], ![[MD3:[0-9]+]]}
+; CHECK-DAG: ![[MD3]] = distinct !{![[MD3]], !"LVerDomain"}
+
+; Function Attrs: norecurse nounwind uwtable
+define void @test(i32* nocapture readonly %a, i32* nocapture %b) local_unnamed_addr #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
+ %add = add nsw i32 %0, 77
+ %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 10000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { norecurse nounwind uwtable }
diff --git a/test/Transforms/LoopVectorize/vector-geps.ll b/test/Transforms/LoopVectorize/vector-geps.ll
new file mode 100644
index 000000000000..bd79499d5d34
--- /dev/null
+++ b/test/Transforms/LoopVectorize/vector-geps.ll
@@ -0,0 +1,61 @@
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: @vector_gep_stored(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* %b, <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32** [[TMP2]] to <4 x i32*>*
+; CHECK-NEXT: store <4 x i32*> [[TMP1]], <4 x i32*>* [[TMP3]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @vector_gep_stored(i32** %a, i32 *%b, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %b, i64 %i
+ %tmp1 = getelementptr inbounds i32*, i32** %a, i64 %i
+ store i32* %tmp0, i32** %tmp1, align 8
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: @uniform_vector_gep_stored(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* %b, i64 1
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32*> undef, i32* [[TMP1]], i32 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32*> [[DOTSPLATINSERT]], <4 x i32*> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32*, i32** %a, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32** [[TMP2]] to <4 x i32*>*
+; CHECK-NEXT: store <4 x i32*> [[DOTSPLAT]], <4 x i32*>* [[TMP3]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @uniform_vector_gep_stored(i32** %a, i32 *%b, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %b, i64 1
+ %tmp1 = getelementptr inbounds i32*, i32** %a, i64 %i
+ store i32* %tmp0, i32** %tmp1, align 8
+ %i.next = add nuw nsw i64 %i, 1
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVersioning/loop-invariant-bound.ll b/test/Transforms/LoopVersioning/loop-invariant-bound.ll
index 3411adbf245e..01c5a55bd5b2 100644
--- a/test/Transforms/LoopVersioning/loop-invariant-bound.ll
+++ b/test/Transforms/LoopVersioning/loop-invariant-bound.ll
@@ -8,12 +8,13 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
%Partials.215 = type { [2 x %Dual.213] }
; Function Attrs: sspreq
-define void @"julia_axpy!_65480"(%Dual.212*) {
+define void @"julia_axpy!_65480"(%Dual.212*, %Dual.212* %other) {
top:
br label %if24
; CHECK-NOT: %bc = bitcast i64* %v2.sroa.0.0..sroa_cast
-; CHECK: %bound0
+; CHECK: %bound0 = icmp ult i8* %[[x:[a-z0-9]+]], %[[y:[a-z0-9]+]]
+; CHECK-NOT: %bound1 = icmp ult i8* %[[y]], %[[x]]
if24: ; preds = %if24, %top
%"#temp#1.sroa.3.02" = phi i64 [ undef, %top ], [ %2, %if24 ]
@@ -24,7 +25,7 @@ if24: ; preds = %if24, %top
%v2.sroa.0.0..sroa_cast = bitcast %Dual.212* %0 to i64*
%v2.sroa.0.0.copyload = load i64, i64* %v2.sroa.0.0..sroa_cast, align 1
%3 = add i64 %"#temp#1.sroa.0.01", -1
- %4 = getelementptr inbounds %Dual.212, %Dual.212* undef, i64 %3, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
+ %4 = getelementptr inbounds %Dual.212, %Dual.212* %other, i64 0, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
%5 = bitcast double* %4 to i64*
store i64 undef, i64* %5, align 8
%notlhs27 = icmp eq i64 %2, undef
diff --git a/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll b/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
index ff6c25087aa5..791c2e3210c8 100644
--- a/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
+++ b/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll
@@ -16,7 +16,7 @@
; CHECK-NEXT: %add8 = add nsw i32 %[[induction]], %add
; CHECK-NEXT: %inc = add nuw i32 %j.113, 1
; CHECK-NEXT: %cmp2 = icmp ult i32 %inc, %itr
-; CHECK-NEXT: br i1 %cmp2, label %for.body3, label %for.inc11.loopexit.loopexit6, !llvm.loop !5
+; CHECK-NEXT: br i1 %cmp2, label %for.body3, label %for.inc11.loopexit.loopexit7, !llvm.loop !5
define i32 @foo(i32* nocapture %var1, i32* nocapture readnone %var2, i32* nocapture %var3, i32 %itr) #0 {
entry:
%cmp14 = icmp eq i32 %itr, 0
diff --git a/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll b/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll
index 928a6527badc..53add6338022 100644
--- a/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll
+++ b/test/Transforms/LoopVersioningLICM/loopversioningLICM2.ll
@@ -7,7 +7,7 @@
; CHECK: Loop: Loop at depth 2 containing: %for.body3.us<header><latch><exiting>
; CHECK-NEXT: Loop Versioning found to be beneficial
;
-; CHECK: for.cond1.for.inc17_crit_edge.us.loopexit5: ; preds = %for.body3.us
+; CHECK: for.cond1.for.inc17_crit_edge.us.loopexit6: ; preds = %for.body3.us
; CHECK-NEXT: %add14.us.lcssa = phi float [ %add14.us, %for.body3.us ]
; CHECK-NEXT: store float %add14.us.lcssa, float* %arrayidx.us, align 4, !alias.scope !0, !noalias !0
; CHECK-NEXT: br label %for.cond1.for.inc17_crit_edge.us
diff --git a/test/Transforms/LowerTypeTests/Inputs/import.yaml b/test/Transforms/LowerTypeTests/Inputs/import.yaml
new file mode 100644
index 000000000000..d4a5c2c3c255
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/Inputs/import.yaml
@@ -0,0 +1,31 @@
+---
+TypeIdMap:
+ allones7:
+ TTRes:
+ Kind: AllOnes
+ SizeM1BitWidth: 7
+ allones32:
+ TTRes:
+ Kind: AllOnes
+ SizeM1BitWidth: 32
+ bytearray7:
+ TTRes:
+ Kind: ByteArray
+ SizeM1BitWidth: 7
+ bytearray32:
+ TTRes:
+ Kind: ByteArray
+ SizeM1BitWidth: 32
+ inline5:
+ TTRes:
+ Kind: Inline
+ SizeM1BitWidth: 5
+ inline6:
+ TTRes:
+ Kind: Inline
+ SizeM1BitWidth: 6
+ single:
+ TTRes:
+ Kind: Single
+ SizeM1BitWidth: 0
+...
diff --git a/test/Transforms/LowerTypeTests/Inputs/use-typeid1-typeid2.yaml b/test/Transforms/LowerTypeTests/Inputs/use-typeid1-typeid2.yaml
new file mode 100644
index 000000000000..031b2e8de04e
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/Inputs/use-typeid1-typeid2.yaml
@@ -0,0 +1,5 @@
+---
+GlobalValueMap:
+ 42:
+ - TypeTests: [14276520915468743435, 15427464259790519041] # guid("typeid1"), guid("typeid2")
+...
diff --git a/test/Transforms/LowerTypeTests/export-allones.ll b/test/Transforms/LowerTypeTests/export-allones.ll
new file mode 100644
index 000000000000..a642ec87355f
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/export-allones.ll
@@ -0,0 +1,161 @@
+; RUN: opt -S -lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/use-typeid1-typeid2.yaml -lowertypetests-write-summary=%t < %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+@foo = constant [2048 x i8] zeroinitializer, !type !0, !type !1, !type !2, !type !3, !type !4, !type !5, !type !6, !type !7, !type !8, !type !9, !type !10, !type !11, !type !12, !type !13, !type !14, !type !15, !type !16, !type !17, !type !18, !type !19, !type !20, !type !21, !type !22, !type !23, !type !24, !type !25, !type !26, !type !27, !type !28, !type !29, !type !30, !type !31, !type !32, !type !33, !type !34, !type !35, !type !36, !type !37, !type !38, !type !39, !type !40, !type !41, !type !42, !type !43, !type !44, !type !45, !type !46, !type !47, !type !48, !type !49, !type !50, !type !51, !type !52, !type !53, !type !54, !type !55, !type !56, !type !57, !type !58, !type !59, !type !60, !type !61, !type !62, !type !63, !type !64, !type !65, !type !66, !type !67, !type !68, !type !69, !type !70, !type !71, !type !72, !type !73, !type !74, !type !75, !type !76, !type !77, !type !78, !type !79, !type !80, !type !81, !type !82, !type !83, !type !84, !type !85, !type !86, !type !87, !type !88, !type !89, !type !90, !type !91, !type !92, !type !93, !type !94, !type !95, !type !96, !type !97, !type !98, !type !99, !type !100, !type !101, !type !102, !type !103, !type !104, !type !105, !type !106, !type !107, !type !108, !type !109, !type !110, !type !111, !type !112, !type !113, !type !114, !type !115, !type !116, !type !117, !type !118, !type !119, !type !120, !type !121, !type !122, !type !123, !type !124, !type !125, !type !126, !type !127, !type !128, !type !129, !type !130
+
+!0 = !{i32 0, !"typeid1"}
+!1 = !{i32 2, !"typeid1"}
+
+!2 = !{i32 4, !"typeid2"}
+!3 = !{i32 8, !"typeid2"}
+!4 = !{i32 12, !"typeid2"}
+!5 = !{i32 16, !"typeid2"}
+!6 = !{i32 20, !"typeid2"}
+!7 = !{i32 24, !"typeid2"}
+!8 = !{i32 28, !"typeid2"}
+!9 = !{i32 32, !"typeid2"}
+!10 = !{i32 36, !"typeid2"}
+!11 = !{i32 40, !"typeid2"}
+!12 = !{i32 44, !"typeid2"}
+!13 = !{i32 48, !"typeid2"}
+!14 = !{i32 52, !"typeid2"}
+!15 = !{i32 56, !"typeid2"}
+!16 = !{i32 60, !"typeid2"}
+!17 = !{i32 64, !"typeid2"}
+!18 = !{i32 68, !"typeid2"}
+!19 = !{i32 72, !"typeid2"}
+!20 = !{i32 76, !"typeid2"}
+!21 = !{i32 80, !"typeid2"}
+!22 = !{i32 84, !"typeid2"}
+!23 = !{i32 88, !"typeid2"}
+!24 = !{i32 92, !"typeid2"}
+!25 = !{i32 96, !"typeid2"}
+!26 = !{i32 100, !"typeid2"}
+!27 = !{i32 104, !"typeid2"}
+!28 = !{i32 108, !"typeid2"}
+!29 = !{i32 112, !"typeid2"}
+!30 = !{i32 116, !"typeid2"}
+!31 = !{i32 120, !"typeid2"}
+!32 = !{i32 124, !"typeid2"}
+!33 = !{i32 128, !"typeid2"}
+!34 = !{i32 132, !"typeid2"}
+!35 = !{i32 136, !"typeid2"}
+!36 = !{i32 140, !"typeid2"}
+!37 = !{i32 144, !"typeid2"}
+!38 = !{i32 148, !"typeid2"}
+!39 = !{i32 152, !"typeid2"}
+!40 = !{i32 156, !"typeid2"}
+!41 = !{i32 160, !"typeid2"}
+!42 = !{i32 164, !"typeid2"}
+!43 = !{i32 168, !"typeid2"}
+!44 = !{i32 172, !"typeid2"}
+!45 = !{i32 176, !"typeid2"}
+!46 = !{i32 180, !"typeid2"}
+!47 = !{i32 184, !"typeid2"}
+!48 = !{i32 188, !"typeid2"}
+!49 = !{i32 192, !"typeid2"}
+!50 = !{i32 196, !"typeid2"}
+!51 = !{i32 200, !"typeid2"}
+!52 = !{i32 204, !"typeid2"}
+!53 = !{i32 208, !"typeid2"}
+!54 = !{i32 212, !"typeid2"}
+!55 = !{i32 216, !"typeid2"}
+!56 = !{i32 220, !"typeid2"}
+!57 = !{i32 224, !"typeid2"}
+!58 = !{i32 228, !"typeid2"}
+!59 = !{i32 232, !"typeid2"}
+!60 = !{i32 236, !"typeid2"}
+!61 = !{i32 240, !"typeid2"}
+!62 = !{i32 244, !"typeid2"}
+!63 = !{i32 248, !"typeid2"}
+!64 = !{i32 252, !"typeid2"}
+!65 = !{i32 256, !"typeid2"}
+!66 = !{i32 260, !"typeid2"}
+!67 = !{i32 264, !"typeid2"}
+!68 = !{i32 268, !"typeid2"}
+!69 = !{i32 272, !"typeid2"}
+!70 = !{i32 276, !"typeid2"}
+!71 = !{i32 280, !"typeid2"}
+!72 = !{i32 284, !"typeid2"}
+!73 = !{i32 288, !"typeid2"}
+!74 = !{i32 292, !"typeid2"}
+!75 = !{i32 296, !"typeid2"}
+!76 = !{i32 300, !"typeid2"}
+!77 = !{i32 304, !"typeid2"}
+!78 = !{i32 308, !"typeid2"}
+!79 = !{i32 312, !"typeid2"}
+!80 = !{i32 316, !"typeid2"}
+!81 = !{i32 320, !"typeid2"}
+!82 = !{i32 324, !"typeid2"}
+!83 = !{i32 328, !"typeid2"}
+!84 = !{i32 332, !"typeid2"}
+!85 = !{i32 336, !"typeid2"}
+!86 = !{i32 340, !"typeid2"}
+!87 = !{i32 344, !"typeid2"}
+!88 = !{i32 348, !"typeid2"}
+!89 = !{i32 352, !"typeid2"}
+!90 = !{i32 356, !"typeid2"}
+!91 = !{i32 360, !"typeid2"}
+!92 = !{i32 364, !"typeid2"}
+!93 = !{i32 368, !"typeid2"}
+!94 = !{i32 372, !"typeid2"}
+!95 = !{i32 376, !"typeid2"}
+!96 = !{i32 380, !"typeid2"}
+!97 = !{i32 384, !"typeid2"}
+!98 = !{i32 388, !"typeid2"}
+!99 = !{i32 392, !"typeid2"}
+!100 = !{i32 396, !"typeid2"}
+!101 = !{i32 400, !"typeid2"}
+!102 = !{i32 404, !"typeid2"}
+!103 = !{i32 408, !"typeid2"}
+!104 = !{i32 412, !"typeid2"}
+!105 = !{i32 416, !"typeid2"}
+!106 = !{i32 420, !"typeid2"}
+!107 = !{i32 424, !"typeid2"}
+!108 = !{i32 428, !"typeid2"}
+!109 = !{i32 432, !"typeid2"}
+!110 = !{i32 436, !"typeid2"}
+!111 = !{i32 440, !"typeid2"}
+!112 = !{i32 444, !"typeid2"}
+!113 = !{i32 448, !"typeid2"}
+!114 = !{i32 452, !"typeid2"}
+!115 = !{i32 456, !"typeid2"}
+!116 = !{i32 460, !"typeid2"}
+!117 = !{i32 464, !"typeid2"}
+!118 = !{i32 468, !"typeid2"}
+!119 = !{i32 472, !"typeid2"}
+!120 = !{i32 476, !"typeid2"}
+!121 = !{i32 480, !"typeid2"}
+!122 = !{i32 484, !"typeid2"}
+!123 = !{i32 488, !"typeid2"}
+!124 = !{i32 492, !"typeid2"}
+!125 = !{i32 496, !"typeid2"}
+!126 = !{i32 500, !"typeid2"}
+!127 = !{i32 504, !"typeid2"}
+!128 = !{i32 508, !"typeid2"}
+!129 = !{i32 512, !"typeid2"}
+!130 = !{i32 516, !"typeid2"}
+
+; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
+
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
+; CHECK: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
+; CHECK: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 1 to i8*)
+
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
+; CHECK: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
+; CHECK: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 128 to i8*)
+
+; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid1:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: AllOnes
+; SUMMARY-NEXT: SizeM1BitWidth: 7
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: typeid2:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: AllOnes
+; SUMMARY-NEXT: SizeM1BitWidth: 32
+; SUMMARY-NEXT: WPDRes:
diff --git a/test/Transforms/LowerTypeTests/export-bytearray.ll b/test/Transforms/LowerTypeTests/export-bytearray.ll
new file mode 100644
index 000000000000..7565b85df30f
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/export-bytearray.ll
@@ -0,0 +1,40 @@
+; RUN: opt -S -lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/use-typeid1-typeid2.yaml -lowertypetests-write-summary=%t < %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+@foo = constant [2048 x i8] zeroinitializer, !type !0, !type !1, !type !2, !type !3
+
+!0 = !{i32 0, !"typeid1"}
+!1 = !{i32 130, !"typeid1"}
+!2 = !{i32 4, !"typeid2"}
+!3 = !{i32 1032, !"typeid2"}
+
+; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
+; CHECK: [[B:@[0-9]+]] = private constant [258 x i8] c"\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01"
+
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
+; CHECK: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
+; CHECK: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 65 to i8*)
+; CHECK: @__typeid_typeid1_byte_array = hidden alias i8, i8* @bits.1
+; CHECK: @__typeid_typeid1_bit_mask = hidden alias i8, inttoptr (i8 2 to i8*)
+
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
+; CHECK: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
+; CHECK: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 257 to i8*)
+; CHECK: @__typeid_typeid2_byte_array = hidden alias i8, i8* @bits
+; CHECK: @__typeid_typeid2_bit_mask = hidden alias i8, inttoptr (i8 1 to i8*)
+
+; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+; CHECK: @bits = private alias i8, getelementptr inbounds ([258 x i8], [258 x i8]* [[B]], i64 0, i64 0)
+; CHECK: @bits.1 = private alias i8, getelementptr inbounds ([258 x i8], [258 x i8]* [[B]], i64 0, i64 0)
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid1:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: ByteArray
+; SUMMARY-NEXT: SizeM1BitWidth: 7
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: typeid2:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: ByteArray
+; SUMMARY-NEXT: SizeM1BitWidth: 32
+; SUMMARY-NEXT: WPDRes:
diff --git a/test/Transforms/LowerTypeTests/export-inline.ll b/test/Transforms/LowerTypeTests/export-inline.ll
new file mode 100644
index 000000000000..1da5866e88cc
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/export-inline.ll
@@ -0,0 +1,35 @@
+; RUN: opt -S -lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/use-typeid1-typeid2.yaml -lowertypetests-write-summary=%t < %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+@foo = constant [2048 x i8] zeroinitializer, !type !0, !type !1, !type !2, !type !3
+
+!0 = !{i32 0, !"typeid1"}
+!1 = !{i32 6, !"typeid1"}
+!2 = !{i32 4, !"typeid2"}
+!3 = !{i32 136, !"typeid2"}
+
+; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
+
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
+; CHECK: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
+; CHECK: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 3 to i8*)
+; CHECK: @__typeid_typeid1_inline_bits = hidden alias i8, inttoptr (i32 9 to i8*)
+
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
+; CHECK: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
+; CHECK: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 33 to i8*)
+; CHECK: @__typeid_typeid2_inline_bits = hidden alias i8, inttoptr (i64 8589934593 to i8*)
+
+; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid1:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Inline
+; SUMMARY-NEXT: SizeM1BitWidth: 5
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: typeid2:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Inline
+; SUMMARY-NEXT: SizeM1BitWidth: 6
+; SUMMARY-NEXT: WPDRes:
diff --git a/test/Transforms/LowerTypeTests/export-single.ll b/test/Transforms/LowerTypeTests/export-single.ll
new file mode 100644
index 000000000000..92e810c09776
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/export-single.ll
@@ -0,0 +1,17 @@
+; RUN: opt -S -lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/use-typeid1-typeid2.yaml -lowertypetests-write-summary=%t < %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+@foo = constant i32 42, !type !0
+
+!0 = !{i32 0, !"typeid1"}
+
+; CHECK: [[G:@[0-9]+]] = private constant { i32 } { i32 42 }
+
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, bitcast ({ i32 }* [[G]] to i8*)
+; CHECK: @foo = alias i32, getelementptr inbounds ({ i32 }, { i32 }* [[G]], i32 0, i32 0)
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid1:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Single
+; SUMMARY-NEXT: SizeM1BitWidth: 0
diff --git a/test/Transforms/LowerTypeTests/external-global.ll b/test/Transforms/LowerTypeTests/external-global.ll
new file mode 100644
index 000000000000..0b80374aed74
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/external-global.ll
@@ -0,0 +1,14 @@
+; RUN: opt -S -lowertypetests -lowertypetests-summary-action=export -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-scei-ps4"
+
+; CHECK: @dipsy = external
+@dipsy = external constant i8, !type !0
+
+define void @tinkywinky() {
+ store i8* @dipsy, i8** undef
+ ret void
+}
+
+!0 = !{i64 16, !"teletubbies"}
diff --git a/test/Transforms/LowerTypeTests/import-unsat.ll b/test/Transforms/LowerTypeTests/import-unsat.ll
index 7410bc4b4d88..76b244001986 100644
--- a/test/Transforms/LowerTypeTests/import-unsat.ll
+++ b/test/Transforms/LowerTypeTests/import-unsat.ll
@@ -4,8 +4,7 @@
; SUMMARY: GlobalValueMap:
; SUMMARY-NEXT: 42:
-; SUMMARY-NEXT: - TypeTests:
-; SUMMARY-NEXT: - 123
+; SUMMARY-NEXT: - TypeTests: [ 123 ]
; SUMMARY-NEXT: TypeIdMap:
; SUMMARY-NEXT: typeid1:
; SUMMARY-NEXT: TTRes:
diff --git a/test/Transforms/LowerTypeTests/import.ll b/test/Transforms/LowerTypeTests/import.ll
new file mode 100644
index 000000000000..1a5aceccd631
--- /dev/null
+++ b/test/Transforms/LowerTypeTests/import.ll
@@ -0,0 +1,170 @@
+; RUN: opt -S -lowertypetests -lowertypetests-summary-action=import -lowertypetests-read-summary=%S/Inputs/import.yaml < %s | FileCheck %s
+
+target datalayout = "e-p:64:64"
+
+declare i1 @llvm.type.test(i8* %ptr, metadata %bitset) nounwind readnone
+
+; CHECK-DAG: @__typeid_single_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_inline6_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_inline6_align = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_inline6_size_m1 = external hidden global i8, !absolute_symbol !1
+; CHECK-DAG: @__typeid_inline6_inline_bits = external hidden global i8, !absolute_symbol !2
+; CHECK-DAG: @__typeid_inline5_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_inline5_align = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_inline5_size_m1 = external hidden global i8, !absolute_symbol !3
+; CHECK-DAG: @__typeid_inline5_inline_bits = external hidden global i8, !absolute_symbol !4
+; CHECK-DAG: @__typeid_bytearray32_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_bytearray32_align = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_bytearray32_size_m1 = external hidden global i8, !absolute_symbol !4
+; CHECK-DAG: @__typeid_bytearray32_byte_array = external hidden global i8
+; CHECK-DAG: @__typeid_bytearray32_bit_mask = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_bytearray7_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_bytearray7_align = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_bytearray7_size_m1 = external hidden global i8, !absolute_symbol !5
+; CHECK-DAG: @__typeid_bytearray7_byte_array = external hidden global i8
+; CHECK-DAG: @__typeid_bytearray7_bit_mask = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_allones32_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_allones32_align = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_allones32_size_m1 = external hidden global i8, !absolute_symbol !4
+; CHECK-DAG: @__typeid_allones7_global_addr = external hidden global i8
+; CHECK-DAG: @__typeid_allones7_align = external hidden global i8, !absolute_symbol !0
+; CHECK-DAG: @__typeid_allones7_size_m1 = external hidden global i8, !absolute_symbol !5
+
+; CHECK: define i1 @allones7(i8* [[p:%.*]])
+define i1 @allones7(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[sub:%.*]] = sub i64 [[pi]], ptrtoint (i8* @__typeid_allones7_global_addr to i64)
+ ; CHECK-NEXT: [[lshr:%.*]] = lshr i64 [[sub]], zext (i8 ptrtoint (i8* @__typeid_allones7_align to i8) to i64)
+ ; CHECK-NEXT: [[shl:%.*]] = shl i64 [[sub]], zext (i8 sub (i8 64, i8 ptrtoint (i8* @__typeid_allones7_align to i8)) to i64)
+ ; CHECK-NEXT: [[or:%.*]] = or i64 [[lshr]], [[shl]]
+ ; CHECK-NEXT: [[ule:%.*]] = icmp ule i64 [[or]], ptrtoint (i8* @__typeid_allones7_size_m1 to i64)
+ ; CHECK-NEXT: ret i1 [[ule]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"allones7")
+ ret i1 %x
+}
+
+; CHECK: define i1 @allones32(i8* [[p:%.*]])
+define i1 @allones32(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[sub:%.*]] = sub i64 [[pi]], ptrtoint (i8* @__typeid_allones32_global_addr to i64)
+ ; CHECK-NEXT: [[lshr:%.*]] = lshr i64 [[sub]], zext (i8 ptrtoint (i8* @__typeid_allones32_align to i8) to i64)
+ ; CHECK-NEXT: [[shl:%.*]] = shl i64 [[sub]], zext (i8 sub (i8 64, i8 ptrtoint (i8* @__typeid_allones32_align to i8)) to i64)
+ ; CHECK-NEXT: [[or:%.*]] = or i64 [[lshr]], [[shl]]
+ ; CHECK-NEXT: [[ule:%.*]] = icmp ule i64 [[or]], ptrtoint (i8* @__typeid_allones32_size_m1 to i64)
+ ; CHECK-NEXT: ret i1 [[ule]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"allones32")
+ ret i1 %x
+}
+
+; CHECK: define i1 @bytearray7(i8* [[p:%.*]])
+define i1 @bytearray7(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[sub:%.*]] = sub i64 [[pi]], ptrtoint (i8* @__typeid_bytearray7_global_addr to i64)
+ ; CHECK-NEXT: [[lshr:%.*]] = lshr i64 [[sub]], zext (i8 ptrtoint (i8* @__typeid_bytearray7_align to i8) to i64)
+ ; CHECK-NEXT: [[shl:%.*]] = shl i64 [[sub]], zext (i8 sub (i8 64, i8 ptrtoint (i8* @__typeid_bytearray7_align to i8)) to i64)
+ ; CHECK-NEXT: [[or:%.*]] = or i64 [[lshr]], [[shl]]
+ ; CHECK-NEXT: [[ule:%.*]] = icmp ule i64 [[or]], ptrtoint (i8* @__typeid_bytearray7_size_m1 to i64)
+ ; CHECK-NEXT: br i1 [[ule]], label %[[t:.*]], label %[[f:.*]]
+
+ ; CHECK: [[t]]:
+ ; CHECK-NEXT: [[gep:%.*]] = getelementptr i8, i8* @__typeid_bytearray7_byte_array, i64 [[or]]
+ ; CHECK-NEXT: [[load:%.*]] = load i8, i8* [[gep]]
+ ; CHECK-NEXT: [[and:%.*]] = and i8 [[load]], ptrtoint (i8* @__typeid_bytearray7_bit_mask to i8)
+ ; CHECK-NEXT: [[ne:%.*]] = icmp ne i8 [[and]], 0
+ ; CHECK-NEXT: br label %[[f]]
+
+ ; CHECK: [[f]]:
+ ; CHECK-NEXT: [[phi:%.*]] = phi i1 [ false, %0 ], [ [[ne]], %[[t]] ]
+ ; CHECK-NEXT: ret i1 [[phi]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"bytearray7")
+ ret i1 %x
+}
+
+; CHECK: define i1 @bytearray32(i8* [[p:%.*]])
+define i1 @bytearray32(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[sub:%.*]] = sub i64 [[pi]], ptrtoint (i8* @__typeid_bytearray32_global_addr to i64)
+ ; CHECK-NEXT: [[lshr:%.*]] = lshr i64 [[sub]], zext (i8 ptrtoint (i8* @__typeid_bytearray32_align to i8) to i64)
+ ; CHECK-NEXT: [[shl:%.*]] = shl i64 [[sub]], zext (i8 sub (i8 64, i8 ptrtoint (i8* @__typeid_bytearray32_align to i8)) to i64)
+ ; CHECK-NEXT: [[or:%.*]] = or i64 [[lshr]], [[shl]]
+ ; CHECK-NEXT: [[ule:%.*]] = icmp ule i64 [[or]], ptrtoint (i8* @__typeid_bytearray32_size_m1 to i64)
+ ; CHECK-NEXT: br i1 [[ule]], label %[[t:.*]], label %[[f:.*]]
+
+ ; CHECK: [[t]]:
+ ; CHECK-NEXT: [[gep:%.*]] = getelementptr i8, i8* @__typeid_bytearray32_byte_array, i64 [[or]]
+ ; CHECK-NEXT: [[load:%.*]] = load i8, i8* [[gep]]
+ ; CHECK-NEXT: [[and:%.*]] = and i8 [[load]], ptrtoint (i8* @__typeid_bytearray32_bit_mask to i8)
+ ; CHECK-NEXT: [[ne:%.*]] = icmp ne i8 [[and]], 0
+ ; CHECK-NEXT: br label %[[f]]
+
+ ; CHECK: [[f]]:
+ ; CHECK-NEXT: [[phi:%.*]] = phi i1 [ false, %0 ], [ [[ne]], %[[t]] ]
+ ; CHECK-NEXT: ret i1 [[phi]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"bytearray32")
+ ret i1 %x
+}
+
+; CHECK: define i1 @inline5(i8* [[p:%.*]])
+define i1 @inline5(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[sub:%.*]] = sub i64 [[pi]], ptrtoint (i8* @__typeid_inline5_global_addr to i64)
+ ; CHECK-NEXT: [[lshr:%.*]] = lshr i64 [[sub]], zext (i8 ptrtoint (i8* @__typeid_inline5_align to i8) to i64)
+ ; CHECK-NEXT: [[shl:%.*]] = shl i64 [[sub]], zext (i8 sub (i8 64, i8 ptrtoint (i8* @__typeid_inline5_align to i8)) to i64)
+ ; CHECK-NEXT: [[or:%.*]] = or i64 [[lshr]], [[shl]]
+ ; CHECK-NEXT: [[ule:%.*]] = icmp ule i64 [[or]], ptrtoint (i8* @__typeid_inline5_size_m1 to i64)
+ ; CHECK-NEXT: br i1 [[ule]], label %[[t:.*]], label %[[f:.*]]
+
+ ; CHECK: [[t]]:
+ ; CHECK-NEXT: [[trunc:%.*]] = trunc i64 [[or]] to i32
+ ; CHECK-NEXT: [[and:%.*]] = and i32 [[trunc]], 31
+ ; CHECK-NEXT: [[shl2:%.*]] = shl i32 1, [[and]]
+ ; CHECK-NEXT: [[and2:%.*]] = and i32 ptrtoint (i8* @__typeid_inline5_inline_bits to i32), [[shl2]]
+ ; CHECK-NEXT: [[ne:%.*]] = icmp ne i32 [[and2]], 0
+ ; CHECK-NEXT: br label %[[f]]
+
+ ; CHECK: [[f]]:
+ ; CHECK-NEXT: [[phi:%.*]] = phi i1 [ false, %0 ], [ [[ne]], %[[t]] ]
+ ; CHECK-NEXT: ret i1 [[phi]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"inline5")
+ ret i1 %x
+}
+
+; CHECK: define i1 @inline6(i8* [[p:%.*]])
+define i1 @inline6(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[sub:%.*]] = sub i64 [[pi]], ptrtoint (i8* @__typeid_inline6_global_addr to i64)
+ ; CHECK-NEXT: [[lshr:%.*]] = lshr i64 [[sub]], zext (i8 ptrtoint (i8* @__typeid_inline6_align to i8) to i64)
+ ; CHECK-NEXT: [[shl:%.*]] = shl i64 [[sub]], zext (i8 sub (i8 64, i8 ptrtoint (i8* @__typeid_inline6_align to i8)) to i64)
+ ; CHECK-NEXT: [[or:%.*]] = or i64 [[lshr]], [[shl]]
+ ; CHECK-NEXT: [[ule:%.*]] = icmp ule i64 [[or]], ptrtoint (i8* @__typeid_inline6_size_m1 to i64)
+ ; CHECK-NEXT: br i1 [[ule]], label %[[t:.*]], label %[[f:.*]]
+
+ ; CHECK: [[t]]:
+ ; CHECK-NEXT: [[and:%.*]] = and i64 [[or]], 63
+ ; CHECK-NEXT: [[shl2:%.*]] = shl i64 1, [[and]]
+ ; CHECK-NEXT: [[and2:%.*]] = and i64 ptrtoint (i8* @__typeid_inline6_inline_bits to i64), [[shl2]]
+ ; CHECK-NEXT: [[ne:%.*]] = icmp ne i64 [[and2]], 0
+ ; CHECK-NEXT: br label %[[f]]
+
+ ; CHECK: [[f]]:
+ ; CHECK-NEXT: [[phi:%.*]] = phi i1 [ false, %0 ], [ [[ne]], %[[t]] ]
+ ; CHECK-NEXT: ret i1 [[phi]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"inline6")
+ ret i1 %x
+}
+
+; CHECK: define i1 @single(i8* [[p:%.*]])
+define i1 @single(i8* %p) {
+ ; CHECK-NEXT: [[pi:%.*]] = ptrtoint i8* [[p]] to i64
+ ; CHECK-NEXT: [[eq:%.*]] = icmp eq i64 [[pi]], ptrtoint (i8* @__typeid_single_global_addr to i64)
+ ; CHECK-NEXT: ret i1 [[eq]]
+ %x = call i1 @llvm.type.test(i8* %p, metadata !"single")
+ ret i1 %x
+}
+
+; CHECK: !0 = !{i64 0, i64 256}
+; CHECK: !1 = !{i64 0, i64 64}
+; CHECK: !2 = !{i64 -1, i64 -1}
+; CHECK: !3 = !{i64 0, i64 32}
+; CHECK: !4 = !{i64 0, i64 4294967296}
+; CHECK: !5 = !{i64 0, i64 128}
diff --git a/test/Transforms/Mem2Reg/ignore-lifetime.ll b/test/Transforms/Mem2Reg/ignore-lifetime.ll
index 12adaffc7714..b996a659237a 100644
--- a/test/Transforms/Mem2Reg/ignore-lifetime.ll
+++ b/test/Transforms/Mem2Reg/ignore-lifetime.ll
@@ -1,16 +1,16 @@
; RUN: opt -mem2reg -S -o - < %s | FileCheck %s
-declare void @llvm.lifetime.start(i64 %size, i8* nocapture %ptr)
-declare void @llvm.lifetime.end(i64 %size, i8* nocapture %ptr)
+declare void @llvm.lifetime.start.p0i8(i64 %size, i8* nocapture %ptr)
+declare void @llvm.lifetime.end.p0i8(i64 %size, i8* nocapture %ptr)
define void @test1() {
; CHECK: test1
; CHECK-NOT: alloca
%A = alloca i32
%B = bitcast i32* %A to i8*
- call void @llvm.lifetime.start(i64 2, i8* %B)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %B)
store i32 1, i32* %A
- call void @llvm.lifetime.end(i64 2, i8* %B)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %B)
ret void
}
@@ -19,8 +19,8 @@ define void @test2() {
; CHECK-NOT: alloca
%A = alloca {i8, i16}
%B = getelementptr {i8, i16}, {i8, i16}* %A, i32 0, i32 0
- call void @llvm.lifetime.start(i64 2, i8* %B)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %B)
store {i8, i16} zeroinitializer, {i8, i16}* %A
- call void @llvm.lifetime.end(i64 2, i8* %B)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %B)
ret void
}
diff --git a/test/Transforms/Mem2Reg/preserve-nonnull-load-metadata.ll b/test/Transforms/Mem2Reg/preserve-nonnull-load-metadata.ll
new file mode 100644
index 000000000000..33a5b124c555
--- /dev/null
+++ b/test/Transforms/Mem2Reg/preserve-nonnull-load-metadata.ll
@@ -0,0 +1,89 @@
+; RUN: opt < %s -mem2reg -S | FileCheck %s
+
+; This tests that mem2reg preserves the !nonnull metadata on loads
+; from allocas that get optimized out.
+
+; Check the case where the alloca in question has a single store.
+define float* @single_store(float** %arg) {
+; CHECK-LABEL: define float* @single_store
+; CHECK: %arg.load = load float*, float** %arg, align 8
+; CHECK: [[ASSUME:%(.*)]] = icmp ne float* %arg.load, null
+; CHECK: call void @llvm.assume(i1 {{.*}}[[ASSUME]])
+; CHECK: ret float* %arg.load
+entry:
+ %buf = alloca float*
+ %arg.load = load float*, float** %arg, align 8
+ store float* %arg.load, float** %buf, align 8
+ %buf.load = load float*, float **%buf, !nonnull !0
+ ret float* %buf.load
+}
+
+; Check the case where the alloca in question has more than one
+; store but still within one basic block.
+define float* @single_block(float** %arg) {
+; CHECK-LABEL: define float* @single_block
+; CHECK: %arg.load = load float*, float** %arg, align 8
+; CHECK: [[ASSUME:%(.*)]] = icmp ne float* %arg.load, null
+; CHECK: call void @llvm.assume(i1 {{.*}}[[ASSUME]])
+; CHECK: ret float* %arg.load
+entry:
+ %buf = alloca float*
+ %arg.load = load float*, float** %arg, align 8
+ store float* null, float** %buf, align 8
+ store float* %arg.load, float** %buf, align 8
+ %buf.load = load float*, float **%buf, !nonnull !0
+ ret float* %buf.load
+}
+
+; Check the case where the alloca in question has more than one
+; store and also reads ands writes in multiple blocks.
+define float* @multi_block(float** %arg) {
+; CHECK-LABEL: define float* @multi_block
+; CHECK-LABEL: entry:
+; CHECK: %arg.load = load float*, float** %arg, align 8
+; CHECK: br label %next
+; CHECK-LABEL: next:
+; CHECK: [[ASSUME:%(.*)]] = icmp ne float* %arg.load, null
+; CHECK: call void @llvm.assume(i1 {{.*}}[[ASSUME]])
+; CHECK: ret float* %arg.load
+entry:
+ %buf = alloca float*
+ %arg.load = load float*, float** %arg, align 8
+ store float* null, float** %buf, align 8
+ br label %next
+next:
+ store float* %arg.load, float** %buf, align 8
+ %buf.load = load float*, float** %buf, !nonnull !0
+ ret float* %buf.load
+}
+
+; Check that we don't add an assume if it's not
+; necessary i.e. the value is already implied to be nonnull
+define float* @no_assume(float** %arg) {
+; CHECK-LABEL: define float* @no_assume
+; CHECK-LABEL: entry:
+; CHECK: %arg.load = load float*, float** %arg, align 8
+; CHECK: %cn = icmp ne float* %arg.load, null
+; CHECK: br i1 %cn, label %next, label %fin
+; CHECK-LABEL: next:
+; CHECK-NOT: call void @llvm.assume
+; CHECK: ret float* %arg.load
+; CHECK-LABEL: fin:
+; CHECK: ret float* null
+entry:
+ %buf = alloca float*
+ %arg.load = load float*, float** %arg, align 8
+ %cn = icmp ne float* %arg.load, null
+ br i1 %cn, label %next, label %fin
+next:
+; At this point the above nonnull check ensures that
+; the value %arg.load is nonnull in this block and thus
+; we need not add the assume.
+ store float* %arg.load, float** %buf, align 8
+ %buf.load = load float*, float** %buf, !nonnull !0
+ ret float* %buf.load
+fin:
+ ret float* null
+}
+
+!0 = !{}
diff --git a/test/Transforms/MemCpyOpt/lifetime.ll b/test/Transforms/MemCpyOpt/lifetime.ll
index 6a7e44692daa..77b495f2b583 100644
--- a/test/Transforms/MemCpyOpt/lifetime.ll
+++ b/test/Transforms/MemCpyOpt/lifetime.ll
@@ -4,8 +4,8 @@
; @llvm.lifetime.start and @llvm.memcpy.
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
define void @_ZN4CordC2EOS_(i8* nocapture dereferenceable(16) %arg1) {
bb:
@@ -14,11 +14,11 @@ bb:
; CHECK: ret void
%tmp = alloca [8 x i8], align 8
%tmp5 = bitcast [8 x i8]* %tmp to i8*
- call void @llvm.lifetime.start(i64 16, i8* %tmp5)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %tmp5)
%tmp10 = getelementptr inbounds i8, i8* %tmp5, i64 7
store i8 0, i8* %tmp10, align 1
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arg1, i8* %tmp5, i64 16, i32 8, i1 false)
- call void @llvm.lifetime.end(i64 16, i8* %tmp5)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %tmp5)
ret void
}
diff --git a/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll b/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
index e3e57f09d88f..e21dc87cb6a0 100644
--- a/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
+++ b/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
@@ -7,11 +7,11 @@ define void @foo([8 x i64]* noalias nocapture sret dereferenceable(64) %sret) {
entry-block:
%a = alloca [8 x i64], align 8
%a.cast = bitcast [8 x i64]* %a to i8*
- call void @llvm.lifetime.start(i64 64, i8* %a.cast)
+ call void @llvm.lifetime.start.p0i8(i64 64, i8* %a.cast)
call void @llvm.memset.p0i8.i64(i8* %a.cast, i8 0, i64 64, i32 8, i1 false)
%sret.cast = bitcast [8 x i64]* %sret to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %sret.cast, i8* %a.cast, i64 64, i32 8, i1 false)
- call void @llvm.lifetime.end(i64 64, i8* %a.cast)
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %a.cast)
ret void
; CHECK-LABEL: @foo(
@@ -25,14 +25,14 @@ define void @bar([8 x i64]* noalias nocapture sret dereferenceable(64) %sret, [8
entry-block:
%a = alloca [8 x i64], align 8
%a.cast = bitcast [8 x i64]* %a to i8*
- call void @llvm.lifetime.start(i64 64, i8* %a.cast)
+ call void @llvm.lifetime.start.p0i8(i64 64, i8* %a.cast)
call void @llvm.memset.p0i8.i64(i8* %a.cast, i8 0, i64 64, i32 8, i1 false)
%sret.cast = bitcast [8 x i64]* %sret to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %sret.cast, i8* %a.cast, i64 64, i32 8, i1 false)
call void @llvm.memset.p0i8.i64(i8* %a.cast, i8 42, i64 32, i32 8, i1 false)
%out.cast = bitcast [8 x i64]* %out to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out.cast, i8* %a.cast, i64 64, i32 8, i1 false)
- call void @llvm.lifetime.end(i64 64, i8* %a.cast)
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %a.cast)
ret void
; CHECK-LABEL: @bar(
@@ -48,8 +48,8 @@ entry-block:
; CHECK: ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) nounwind
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
diff --git a/test/Transforms/MemCpyOpt/memcpy-undef.ll b/test/Transforms/MemCpyOpt/memcpy-undef.ll
index c75d020c0786..06a41829a4ee 100644
--- a/test/Transforms/MemCpyOpt/memcpy-undef.ll
+++ b/test/Transforms/MemCpyOpt/memcpy-undef.ll
@@ -22,7 +22,7 @@ define i32 @test1(%struct.foo* nocapture %foobie) nounwind noinline ssp uwtable
}
define void @test2(i8* sret noalias nocapture %out, i8* %in) nounwind noinline ssp uwtable {
- call void @llvm.lifetime.start(i64 8, i8* %in)
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %in)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 8, i32 1, i1 false)
ret void
@@ -32,7 +32,7 @@ define void @test2(i8* sret noalias nocapture %out, i8* %in) nounwind noinline s
}
define void @test3(i8* sret noalias nocapture %out, i8* %in) nounwind noinline ssp uwtable {
- call void @llvm.lifetime.start(i64 4, i8* %in)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %in)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 8, i32 1, i1 false)
ret void
@@ -43,4 +43,4 @@ define void @test3(i8* sret noalias nocapture %out, i8* %in) nounwind noinline s
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/Transforms/MemCpyOpt/memcpy.ll b/test/Transforms/MemCpyOpt/memcpy.ll
index 6181543cfc63..e4d50f7157de 100644
--- a/test/Transforms/MemCpyOpt/memcpy.ll
+++ b/test/Transforms/MemCpyOpt/memcpy.ll
@@ -76,8 +76,21 @@ define void @test4(i8 *%P) {
; CHECK-NEXT: call void @test4a(
}
+; Make sure we don't remove the memcpy if the source address space doesn't match the byval argument
+define void @test4_addrspace(i8 addrspace(1)* %P) {
+ %A = alloca %1
+ %a = bitcast %1* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* %P, i64 8, i32 4, i1 false)
+ call void @test4a(i8* align 1 byval %a)
+ ret void
+; CHECK-LABEL: @test4_addrspace(
+; CHECK: call void @llvm.memcpy.p0i8.p1i8.i64(
+; CHECK-NEXT: call void @test4a(
+}
+
declare void @test4a(i8* align 1 byval)
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
%struct.S = type { i128, [4 x i8]}
@@ -202,6 +215,21 @@ define void @test10(%opaque* noalias nocapture sret %x, i32 %y) {
ret void
}
+; don't create new addressspacecasts when we don't know they're safe for the target
+define void @test11([20 x i32] addrspace(1)* nocapture dereferenceable(80) %P) {
+ %A = alloca [20 x i32], align 4
+ %a = bitcast [20 x i32]* %A to i8*
+ %b = bitcast [20 x i32] addrspace(1)* %P to i8 addrspace(1)*
+ call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 80, i32 4, i1 false)
+ call void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* %b, i8* %a, i64 80, i32 4, i1 false)
+ ret void
+; CHECK-LABEL: @test11(
+; CHECK-NOT: addrspacecast
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
declare void @f1(%struct.big* nocapture sret)
declare void @f2(%struct.big*)
diff --git a/test/Transforms/MemCpyOpt/pr29105.ll b/test/Transforms/MemCpyOpt/pr29105.ll
index 0d3778372266..03b176c4d245 100644
--- a/test/Transforms/MemCpyOpt/pr29105.ll
+++ b/test/Transforms/MemCpyOpt/pr29105.ll
@@ -11,25 +11,25 @@ entry-block:
%0 = bitcast [2048 x i64]* %tmp0 to i8*
%tmp2 = alloca %Foo, align 8
%x.sroa.0.0..sroa_cast6 = bitcast [2048 x i64]* %x.sroa.0 to i8*
- call void @llvm.lifetime.start(i64 16384, i8* %x.sroa.0.0..sroa_cast6)
- call void @llvm.lifetime.start(i64 16384, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 16384, i8* %x.sroa.0.0..sroa_cast6)
+ call void @llvm.lifetime.start.p0i8(i64 16384, i8* %0)
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 16384, i32 8, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x.sroa.0.0..sroa_cast6, i8* %0, i64 16384, i32 8, i1 false)
- call void @llvm.lifetime.end(i64 16384, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 16384, i8* %0)
%1 = bitcast %Foo* %tmp2 to i8*
- call void @llvm.lifetime.start(i64 16384, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 16384, i8* %1)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %x.sroa.0.0..sroa_cast6, i64 16384, i32 8, i1 false)
call void @bar(%Foo* noalias nocapture nonnull dereferenceable(16384) %tmp2)
- call void @llvm.lifetime.end(i64 16384, i8* %1)
- call void @llvm.lifetime.end(i64 16384, i8* %x.sroa.0.0..sroa_cast6)
+ call void @llvm.lifetime.end.p0i8(i64 16384, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 16384, i8* %x.sroa.0.0..sroa_cast6)
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
declare void @bar(%Foo* noalias nocapture readonly dereferenceable(16384)) unnamed_addr #0
diff --git a/test/Transforms/MergeFunc/mergefunc-preserve-debug-info.ll b/test/Transforms/MergeFunc/mergefunc-preserve-debug-info.ll
new file mode 100644
index 000000000000..cf76893d4aab
--- /dev/null
+++ b/test/Transforms/MergeFunc/mergefunc-preserve-debug-info.ll
@@ -0,0 +1,223 @@
+; RUN: opt -O0 -S -mergefunc -mergefunc-preserve-debug-info < %s | FileCheck %s --check-prefix=OPTIMIZATION_LEVEL_0
+; RUN: opt -O2 -S -mergefunc -mergefunc-preserve-debug-info < %s | FileCheck %s --check-prefix=OPTIMIZATION_LEVEL_2
+
+; Preserve debug info in thunks under -mergefunc -mergefunc-preserve-debug-info
+;
+; We test that:
+; At -O0 we have preserved the generated @llvm.dbg.declare debug intrinsics.
+; At -O2 we have preserved the generated @llvm.dbg.value debug intrinsics.
+; At -O0, stores from the incoming parameters to locations on the stack-frame
+; and allocas that create these locations on the stack-frame are preserved.
+; Debug info got generated for the call made by the thunk and for its return value.
+; The foregoing is the only content of a thunk's entry block.
+; A thunk makes a tail call to the shared implementation.
+; A thunk's call site is preserved to point to the thunk (with only -mergefunc the
+; call site is modified to point to the shared implementation) when both occur
+; within the same translation unit.
+
+; The source code that was used to test and generate this LLVM IR is:
+;
+; int maxA(int x, int y) {
+; int i, m, j;
+; if (x > y)
+; m = x;
+; else
+; m = y;
+; return m;
+; }
+;
+; int maxB(int x, int y) {
+; int i, m, j;
+; if (x > y)
+; m = x;
+; else
+; m = y;
+; return m;
+; }
+;
+; void f(void) {
+;
+; maxA(3, 4);
+; maxB(1, 9);
+; }
+
+; Function Attrs: nounwind uwtable
+define i32 @maxA(i32 %x, i32 %y) !dbg !6 {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %m = alloca i32, align 4
+ %j = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !11, metadata !12), !dbg !13
+ store i32 %y, i32* %y.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %y.addr, metadata !14, metadata !12), !dbg !15
+ call void @llvm.dbg.declare(metadata i32* %i, metadata !16, metadata !12), !dbg !17
+ call void @llvm.dbg.declare(metadata i32* %m, metadata !18, metadata !12), !dbg !19
+ call void @llvm.dbg.declare(metadata i32* %j, metadata !20, metadata !12), !dbg !21
+ %0 = load i32, i32* %x.addr, align 4, !dbg !22
+ %1 = load i32, i32* %y.addr, align 4, !dbg !24
+ %cmp = icmp sgt i32 %0, %1, !dbg !25
+ br i1 %cmp, label %if.then, label %if.else, !dbg !26
+
+if.then: ; preds = %entry
+ %2 = load i32, i32* %x.addr, align 4, !dbg !27
+ store i32 %2, i32* %m, align 4, !dbg !28
+ br label %if.end, !dbg !29
+
+if.else: ; preds = %entry
+ %3 = load i32, i32* %y.addr, align 4, !dbg !30
+ store i32 %3, i32* %m, align 4, !dbg !31
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %4 = load i32, i32* %m, align 4, !dbg !32
+ ret i32 %4, !dbg !33
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+; Function Attrs: nounwind uwtable
+define i32 @maxB(i32 %x, i32 %y) !dbg !34 {
+
+; OPTIMIZATION_LEVEL_0: define i32 @maxB(i32 %x, i32 %y)
+; OPTIMIZATION_LEVEL_0-NEXT: entry:
+; OPTIMIZATION_LEVEL_0-NEXT: %x.addr = alloca i32, align 4
+; OPTIMIZATION_LEVEL_0-NEXT: %y.addr = alloca i32, align 4
+; OPTIMIZATION_LEVEL_0-NEXT: store i32 %x, i32* %x.addr, align 4
+; OPTIMIZATION_LEVEL_0-NEXT: call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !{{[0-9]+}}, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_0-NEXT: store i32 %y, i32* %y.addr, align 4
+; OPTIMIZATION_LEVEL_0-NEXT: call void @llvm.dbg.declare(metadata i32* %y.addr, metadata !{{[0-9]+}}, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_0-NEXT: %0 = tail call i32 @maxA(i32 %x, i32 %y), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_0-NEXT: ret i32 %0, !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_0-NEXT: }
+
+; OPTIMIZATION_LEVEL_2: define i32 @maxB(i32 %x, i32 %y)
+; OPTIMIZATION_LEVEL_2-NEXT: entry:
+; OPTIMIZATION_LEVEL_2-NEXT: tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !{{[0-9]+}}, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_2-NEXT: tail call void @llvm.dbg.value(metadata i32 %y, i64 0, metadata !{{[0-9]+}}, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_2-NEXT: %0 = tail call i32 @maxA(i32 %x, i32 %y) #{{[0-9]+}}, !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_2-NEXT: ret i32 %0, !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_2-NEXT: }
+
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %m = alloca i32, align 4
+ %j = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !35, metadata !12), !dbg !36
+ store i32 %y, i32* %y.addr, align 4
+ call void @llvm.dbg.declare(metadata i32* %y.addr, metadata !37, metadata !12), !dbg !38
+ call void @llvm.dbg.declare(metadata i32* %i, metadata !39, metadata !12), !dbg !40
+ call void @llvm.dbg.declare(metadata i32* %m, metadata !41, metadata !12), !dbg !42
+ call void @llvm.dbg.declare(metadata i32* %j, metadata !43, metadata !12), !dbg !44
+ %0 = load i32, i32* %x.addr, align 4, !dbg !45
+ %1 = load i32, i32* %y.addr, align 4, !dbg !47
+ %cmp = icmp sgt i32 %0, %1, !dbg !48
+ br i1 %cmp, label %if.then, label %if.else, !dbg !49
+
+if.then: ; preds = %entry
+ %2 = load i32, i32* %x.addr, align 4, !dbg !50
+ store i32 %2, i32* %m, align 4, !dbg !51
+ br label %if.end, !dbg !52
+
+if.else: ; preds = %entry
+ %3 = load i32, i32* %y.addr, align 4, !dbg !53
+ store i32 %3, i32* %m, align 4, !dbg !54
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %4 = load i32, i32* %m, align 4, !dbg !55
+ ret i32 %4, !dbg !56
+}
+
+; Function Attrs: nounwind uwtable
+define void @f() !dbg !57 {
+entry:
+
+; OPTIMIZATION_LEVEL_0: define void @f()
+; OPTIMIZATION_LEVEL_0-NEXT: entry:
+; OPTIMIZATION_LEVEL_0-NEXT: %call = call i32 @maxA(i32 3, i32 4), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_0-NEXT: %call1 = call i32 @maxB(i32 1, i32 9), !dbg !{{[0-9]+}}
+; OPTIMIZATION_LEVEL_0-NEXT: ret void, !dbg !{{[0-9]+}}
+
+; OPTIMIZATION_LEVEL_2: define void @f()
+; OPTIMIZATION_LEVEL_2-NEXT: entry:
+; OPTIMIZATION_LEVEL_2-NEXT: ret void, !dbg !{{[0-9]+}}
+
+ %call = call i32 @maxA(i32 3, i32 4), !dbg !60
+ %call1 = call i32 @maxB(i32 1, i32 9), !dbg !61
+ ret void, !dbg !62
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "mergefunc-preserve-debug-info.c", directory: "")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!""}
+!6 = distinct !DISubprogram(name: "maxA", scope: !7, file: !7, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DIFile(filename: "./mergefunc-preserve-debug-info.c", directory: "")
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10, !10, !10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !DILocalVariable(name: "x", arg: 1, scope: !6, file: !7, line: 1, type: !10)
+!12 = !DIExpression()
+!13 = !DILocation(line: 1, column: 14, scope: !6)
+!14 = !DILocalVariable(name: "y", arg: 2, scope: !6, file: !7, line: 1, type: !10)
+!15 = !DILocation(line: 1, column: 21, scope: !6)
+!16 = !DILocalVariable(name: "i", scope: !6, file: !7, line: 2, type: !10)
+!17 = !DILocation(line: 2, column: 7, scope: !6)
+!18 = !DILocalVariable(name: "m", scope: !6, file: !7, line: 2, type: !10)
+!19 = !DILocation(line: 2, column: 10, scope: !6)
+!20 = !DILocalVariable(name: "j", scope: !6, file: !7, line: 2, type: !10)
+!21 = !DILocation(line: 2, column: 13, scope: !6)
+!22 = !DILocation(line: 3, column: 7, scope: !23)
+!23 = distinct !DILexicalBlock(scope: !6, file: !7, line: 3, column: 7)
+!24 = !DILocation(line: 3, column: 11, scope: !23)
+!25 = !DILocation(line: 3, column: 9, scope: !23)
+!26 = !DILocation(line: 3, column: 7, scope: !6)
+!27 = !DILocation(line: 4, column: 9, scope: !23)
+!28 = !DILocation(line: 4, column: 7, scope: !23)
+!29 = !DILocation(line: 4, column: 5, scope: !23)
+!30 = !DILocation(line: 6, column: 9, scope: !23)
+!31 = !DILocation(line: 6, column: 7, scope: !23)
+!32 = !DILocation(line: 7, column: 10, scope: !6)
+!33 = !DILocation(line: 7, column: 3, scope: !6)
+!34 = distinct !DISubprogram(name: "maxB", scope: !7, file: !7, line: 10, type: !8, isLocal: false, isDefinition: true, scopeLine: 10, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!35 = !DILocalVariable(name: "x", arg: 1, scope: !34, file: !7, line: 10, type: !10)
+!36 = !DILocation(line: 10, column: 14, scope: !34)
+!37 = !DILocalVariable(name: "y", arg: 2, scope: !34, file: !7, line: 10, type: !10)
+!38 = !DILocation(line: 10, column: 21, scope: !34)
+!39 = !DILocalVariable(name: "i", scope: !34, file: !7, line: 11, type: !10)
+!40 = !DILocation(line: 11, column: 7, scope: !34)
+!41 = !DILocalVariable(name: "m", scope: !34, file: !7, line: 11, type: !10)
+!42 = !DILocation(line: 11, column: 10, scope: !34)
+!43 = !DILocalVariable(name: "j", scope: !34, file: !7, line: 11, type: !10)
+!44 = !DILocation(line: 11, column: 13, scope: !34)
+!45 = !DILocation(line: 12, column: 7, scope: !46)
+!46 = distinct !DILexicalBlock(scope: !34, file: !7, line: 12, column: 7)
+!47 = !DILocation(line: 12, column: 11, scope: !46)
+!48 = !DILocation(line: 12, column: 9, scope: !46)
+!49 = !DILocation(line: 12, column: 7, scope: !34)
+!50 = !DILocation(line: 13, column: 9, scope: !46)
+!51 = !DILocation(line: 13, column: 7, scope: !46)
+!52 = !DILocation(line: 13, column: 5, scope: !46)
+!53 = !DILocation(line: 15, column: 9, scope: !46)
+!54 = !DILocation(line: 15, column: 7, scope: !46)
+!55 = !DILocation(line: 16, column: 10, scope: !34)
+!56 = !DILocation(line: 16, column: 3, scope: !34)
+!57 = distinct !DISubprogram(name: "f", scope: !7, file: !7, line: 19, type: !58, isLocal: false, isDefinition: true, scopeLine: 19, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!58 = !DISubroutineType(types: !59)
+!59 = !{null}
+!60 = !DILocation(line: 21, column: 3, scope: !57)
+!61 = !DILocation(line: 22, column: 3, scope: !57)
+!62 = !DILocation(line: 23, column: 1, scope: !57)
diff --git a/test/Transforms/MetaRenamer/metarenamer.ll b/test/Transforms/MetaRenamer/metarenamer.ll
index 213fbe3bbff7..7b527ae54cb1 100644
--- a/test/Transforms/MetaRenamer/metarenamer.ll
+++ b/test/Transforms/MetaRenamer/metarenamer.ll
@@ -96,3 +96,18 @@ define i32 @varargs_func_6_xxx(i32 %arg_1_xxx, i32 %arg_2_xxx, ...) nounwind uwt
store i32 %arg_2_xxx, i32* %2, align 4
ret i32 6
}
+
+declare noalias i8* @malloc(i32)
+declare void @free(i8* nocapture)
+
+define void @dont_rename_lib_funcs() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP:%.*]] = call i8* @malloc(i32 23)
+; CHECK-NEXT: call void @free(i8* [[TMP]])
+; CHECK-NEXT: ret void
+;
+ %x = call i8* @malloc(i32 23)
+ call void @free(i8* %x)
+ ret void
+}
diff --git a/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll b/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll
index 402de50c72cf..27a798bf7dd1 100644
--- a/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll
+++ b/test/Transforms/NewGVN/2007-07-26-PhiErasure.ll
@@ -1,4 +1,4 @@
-; XFAIL: *
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -newgvn -S | FileCheck %s
%struct..0anon = type { i32 }
@@ -9,37 +9,34 @@
@n_spills = external global i32 ; <i32*> [#uses=2]
define i32 @reload(%struct.rtx_def* %first, i32 %global, %struct.FILE* %dumpfile) {
+; CHECK-LABEL: @reload(
+; CHECK-NEXT: cond_next2835.1:
+; CHECK-NEXT: br label [[BB2928:%.*]]
+; CHECK: bb2928:
+; CHECK-NEXT: br i1 false, label [[COND_NEXT2943:%.*]], label [[COND_TRUE2935:%.*]]
+; CHECK: cond_true2935:
+; CHECK-NEXT: br label [[COND_NEXT2943]]
+; CHECK: cond_next2943:
+; CHECK-NEXT: br i1 false, label [[BB2982_PREHEADER:%.*]], label [[BB2928]]
+; CHECK: bb2982.preheader:
+; CHECK-NEXT: store i8 undef, i8* null
+; CHECK-NEXT: ret i32 undef
+;
cond_next2835.1: ; preds = %cond_next2861
- %tmp2922 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]
- br label %bb2928
+ %tmp2922 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]
+ br label %bb2928
bb2928: ; preds = %cond_next2835.1, %cond_next2943
- br i1 false, label %cond_next2943, label %cond_true2935
+ br i1 false, label %cond_next2943, label %cond_true2935
cond_true2935: ; preds = %bb2928
- br label %cond_next2943
+ br label %cond_next2943
cond_next2943: ; preds = %cond_true2935, %bb2928
- br i1 false, label %bb2982.preheader, label %bb2928
+ br i1 false, label %bb2982.preheader, label %bb2928
bb2982.preheader: ; preds = %cond_next2943
- %tmp298316 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]
- ret i32 %tmp298316
+ %tmp298316 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]
+ ret i32 %tmp298316
}
-
-; CHECK: define i32 @reload(%struct.rtx_def* %first, i32 %global, %struct.FILE* %dumpfile) {
-; CHECK-NEXT: cond_next2835.1:
-; CHECK-NEXT: br label %bb2928
-; CHECK: bb2928:
-; CHECK-NEXT: br i1 false, label %bb2928.cond_next2943_crit_edge, label %cond_true2935
-; CHECK: bb2928.cond_next2943_crit_edge:
-; CHECK-NEXT: br label %cond_next2943
-; CHECK: cond_true2935:
-; CHECK-NEXT: br label %cond_next2943
-; CHECK: cond_next2943:
-; CHECK-NEXT: br i1 false, label %bb2982.preheader, label %bb2928
-; CHECK: bb2982.preheader:
-; CHECK-NEXT: %tmp298316 = load i32, i32* @n_spills, align 4
-; CHECK-NEXT: ret i32 %tmp298316
-; CHECK-NEXT: }
diff --git a/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll b/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll
index 4b47b06f1657..86c80d1d5f21 100644
--- a/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll
+++ b/test/Transforms/NewGVN/2011-07-07-MatchIntrinsicExtract.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt < %s -newgvn -S | FileCheck %s
;
@@ -9,7 +8,8 @@ entry:
%uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%uadd.0 = extractvalue %0 %uadd, 0
%add1 = add i64 %a, %b
- ret i64 %add1
+ %add2 = add i64 %add1, %uadd.0
+ ret i64 %add2
}
; CHECK-LABEL: @test1(
@@ -21,7 +21,8 @@ entry:
%usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
%usub.0 = extractvalue %0 %usub, 0
%sub1 = sub i64 %a, %b
- ret i64 %sub1
+ %add2 = add i64 %sub1, %usub.0
+ ret i64 %add2
}
; CHECK-LABEL: @test2(
@@ -33,7 +34,8 @@ entry:
%umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
%umul.0 = extractvalue %0 %umul, 0
%mul1 = mul i64 %a, %b
- ret i64 %mul1
+ %add2 = add i64 %mul1, %umul.0
+ ret i64 %add2
}
; CHECK-LABEL: @test3(
@@ -45,7 +47,8 @@ entry:
%sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
%sadd.0 = extractvalue %0 %sadd, 0
%add1 = add i64 %a, %b
- ret i64 %add1
+ %add2 = add i64 %add1, %sadd.0
+ ret i64 %add2
}
; CHECK-LABEL: @test4(
@@ -57,7 +60,8 @@ entry:
%ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
%ssub.0 = extractvalue %0 %ssub, 0
%sub1 = sub i64 %a, %b
- ret i64 %sub1
+ %add2 = add i64 %sub1, %ssub.0
+ ret i64 %add2
}
; CHECK-LABEL: @test5(
@@ -69,7 +73,8 @@ entry:
%smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
%smul.0 = extractvalue %0 %smul, 0
%mul1 = mul i64 %a, %b
- ret i64 %mul1
+ %add2 = add i64 %mul1, %smul.0
+ ret i64 %add2
}
; CHECK-LABEL: @test6(
diff --git a/test/Transforms/NewGVN/basic-cyclic-opt.ll b/test/Transforms/NewGVN/basic-cyclic-opt.ll
index 523ed2612e3c..7830d7ea78a5 100644
--- a/test/Transforms/NewGVN/basic-cyclic-opt.ll
+++ b/test/Transforms/NewGVN/basic-cyclic-opt.ll
@@ -169,7 +169,6 @@ define i32 @vnum_test3(i32* %data) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = icmp slt i32 [[I_0]], 30
; CHECK-NEXT: br i1 [[TMP10]], label [[BB11:%.*]], label [[BB14:%.*]]
; CHECK: bb11:
-; CHECK-NEXT: store i32 0, i32* [[TMP9]], align 4
; CHECK-NEXT: br label [[BB14]]
; CHECK: bb14:
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 1
@@ -228,6 +227,87 @@ bb23: ; preds = %bb4
ret i32 %p.0
}
+;; This is an irreducible test case that will cause a memoryphi node loop
+;; in the two blocks.
+;; It's equivalent to something like
+;; *a = 0
+;; if (<....>) goto loopmiddle
+;; loopstart:
+;; loopmiddle:
+;; load *a
+;; *a = 0
+;; if (<....>) goto loopstart otherwise goto loopend
+;; loopend:
+;; load *a
+;; add the results of the loads
+;; return them
+;;
+;; Both loads should equal 0, but it requires being
+;; completely optimistic about MemoryPhis, otherwise
+;; we will not be able to see through the cycle.
+define i8 @irreducible_memoryphi(i8* noalias %arg, i8* noalias %arg2) {
+; CHECK-LABEL: @irreducible_memoryphi(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: store i8 0, i8* [[ARG:%.*]]
+; CHECK-NEXT: br i1 undef, label [[BB2:%.*]], label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: br i1 undef, label [[BB1]], label [[BB3:%.*]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i8 0
+;
+bb:
+ store i8 0, i8 *%arg
+ br i1 undef, label %bb2, label %bb1
+
+bb1: ; preds = %bb2, %bb
+ br label %bb2
+
+bb2: ; preds = %bb1, %bb
+ %tmp2 = load i8, i8* %arg
+ store i8 0, i8 *%arg
+ br i1 undef, label %bb1, label %bb3
+
+bb3: ; preds = %bb2
+ %tmp = load i8, i8* %arg
+ %tmp3 = add i8 %tmp, %tmp2
+ ret i8 %tmp3
+}
+;; This is an irreducible test case that will cause a phi node loop
+;; in the two blocks
+;;
+;; It should return 0, but it requires being
+;; completely optimistic about phis, otherwise
+;; we will not be able to see through the cycle.
+define i32 @irreducible_phi(i32 %arg) {
+; CHECK-LABEL: @irreducible_phi(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br i1 undef, label [[BB2:%.*]], label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: br i1 undef, label [[BB1]], label [[BB3:%.*]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i32 0
+;
+bb:
+ %tmp = add i32 0, %arg
+ br i1 undef, label %bb2, label %bb1
+
+bb1: ; preds = %bb2, %bb
+ %phi1 = phi i32 [%tmp, %bb], [%phi2, %bb2]
+ br label %bb2
+
+bb2: ; preds = %bb1, %bb
+ %phi2 = phi i32 [%tmp, %bb], [%phi1, %bb1]
+ br i1 undef, label %bb1, label %bb3
+
+bb3: ; preds = %bb2
+ ; This should be zero
+ %tmp3 = sub i32 %tmp, %phi2
+ ret i32 %tmp3
+}
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.ident = !{!0, !0, !0}
diff --git a/test/Transforms/NewGVN/bitcast-of-call.ll b/test/Transforms/NewGVN/bitcast-of-call.ll
index 7b25038275b5..2b817fbcd01c 100644
--- a/test/Transforms/NewGVN/bitcast-of-call.ll
+++ b/test/Transforms/NewGVN/bitcast-of-call.ll
@@ -1,14 +1,20 @@
-; XFAIL: *
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -newgvn -S | FileCheck %s
; PR2213
define i32* @f(i8* %x) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP:%.*]] = call i8* @m(i32 12)
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[TMP]] to i32*
+; CHECK-NEXT: ret i32* [[TMP1]]
+;
entry:
- %tmp = call i8* @m( i32 12 ) ; <i8*> [#uses=2]
- %tmp1 = bitcast i8* %tmp to i32* ; <i32*> [#uses=0]
- %tmp2 = bitcast i8* %tmp to i32* ; <i32*> [#uses=0]
-; CHECK-NOT: %tmp2
- ret i32* %tmp2
+ %tmp = call i8* @m( i32 12 ) ; <i8*> [#uses=2]
+ %tmp1 = bitcast i8* %tmp to i32* ; <i32*> [#uses=0]
+ %tmp3 = bitcast i32* %tmp1 to i8*
+ %tmp2 = bitcast i8* %tmp3 to i32* ; <i32*> [#uses=0]
+ ret i32* %tmp2
}
declare i8* @m(i32)
diff --git a/test/Transforms/NewGVN/calloc-load-removal.ll b/test/Transforms/NewGVN/calloc-load-removal.ll
index e6870442064b..cdeb971a23e2 100644
--- a/test/Transforms/NewGVN/calloc-load-removal.ll
+++ b/test/Transforms/NewGVN/calloc-load-removal.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s
; RUN: opt -S -basicaa -newgvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS
; Check that loads from calloc are recognized as being zero.
diff --git a/test/Transforms/NewGVN/calls-nonlocal.ll b/test/Transforms/NewGVN/calls-nonlocal.ll
index 292060db812e..6e918050d591 100644
--- a/test/Transforms/NewGVN/calls-nonlocal.ll
+++ b/test/Transforms/NewGVN/calls-nonlocal.ll
@@ -1,4 +1,6 @@
; XFAIL: *
+;; NewGVN zaps the strlens, but currently takes two iterations to evaluate the conditions, because
+;; we prune predicateinfo, and the icmps only become equivalent after the strlens are zapped
; Two occurrences of strlen should be zapped.
; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
diff --git a/test/Transforms/NewGVN/cond_br2.ll b/test/Transforms/NewGVN/cond_br2.ll
index e511ff7ed514..ff7a76d14695 100644
--- a/test/Transforms/NewGVN/cond_br2.ll
+++ b/test/Transforms/NewGVN/cond_br2.ll
@@ -19,7 +19,7 @@ define void @_Z4testv() #0 personality i8* bitcast (i32 (...)* @__gxx_personalit
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
%0 = bitcast %"class.llvm::SmallVector"* %sv to i8*
- call void @llvm.lifetime.start(i64 64, i8* %0) #1
+ call void @llvm.lifetime.start.p0i8(i64 64, i8* %0) #1
%BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
%1 = bitcast %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i to i8*
@@ -95,7 +95,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end(i64 64, i8* %0) #1
+ call void @llvm.lifetime.end.p0i8(i64 64, i8* %0) #1
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -114,14 +114,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare i32 @__gxx_personality_v0(...)
declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(%"class.llvm::SmallVector"*) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"*, i64, i64) #2
diff --git a/test/Transforms/NewGVN/condprop-xfail.ll b/test/Transforms/NewGVN/condprop-xfail.ll
new file mode 100644
index 000000000000..5c049617f875
--- /dev/null
+++ b/test/Transforms/NewGVN/condprop-xfail.ll
@@ -0,0 +1,123 @@
+; XFAIL: *
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+
+@a = external global i32 ; <i32*> [#uses=7]
+
+;; NewGVN takes two passes to get this, because we prune predicateinfo
+; CHECK-LABEL: @test1(
+define i32 @test1() nounwind {
+entry:
+ %0 = load i32, i32* @a, align 4
+ %1 = icmp eq i32 %0, 4
+ br i1 %1, label %bb, label %bb1
+
+bb: ; preds = %entry
+ br label %bb8
+
+bb1: ; preds = %entry
+ %2 = load i32, i32* @a, align 4
+ %3 = icmp eq i32 %2, 5
+ br i1 %3, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ br label %bb8
+
+bb3: ; preds = %bb1
+ %4 = load i32, i32* @a, align 4
+ %5 = icmp eq i32 %4, 4
+; CHECK: br i1 false, label %bb4, label %bb5
+ br i1 %5, label %bb4, label %bb5
+
+bb4: ; preds = %bb3
+ %6 = load i32, i32* @a, align 4
+ %7 = add i32 %6, 5
+ br label %bb8
+
+bb5: ; preds = %bb3
+ %8 = load i32, i32* @a, align 4
+ %9 = icmp eq i32 %8, 5
+; CHECK: br i1 false, label %bb6, label %bb7
+ br i1 %9, label %bb6, label %bb7
+
+bb6: ; preds = %bb5
+ %10 = load i32, i32* @a, align 4
+ %11 = add i32 %10, 4
+ br label %bb8
+
+bb7: ; preds = %bb5
+ %12 = load i32, i32* @a, align 4
+ br label %bb8
+
+bb8: ; preds = %bb7, %bb6, %bb4, %bb2, %bb
+ %.0 = phi i32 [ %12, %bb7 ], [ %11, %bb6 ], [ %7, %bb4 ], [ 4, %bb2 ], [ 5, %bb ]
+ br label %return
+
+return: ; preds = %bb8
+ ret i32 %.0
+}
+;; NewGVN takes two passes to get test[6,8] and test[6,8]_fp's main part
+;; The icmp ne requires an equality table that inserts the inequalities for each
+;; discovered equality while processing.
+; CHECK-LABEL: @test6(
+define i1 @test6(i32 %x, i32 %y) {
+ %cmp2 = icmp ne i32 %x, %y
+ %cmp = icmp eq i32 %x, %y
+ %cmp3 = icmp eq i32 %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+; CHECK: ret i1 false
+ ret i1 %cmp2
+
+different:
+; CHECK: ret i1 false
+ ret i1 %cmp3
+}
+
+; CHECK-LABEL: @test6_fp(
+define i1 @test6_fp(float %x, float %y) {
+ %cmp2 = fcmp une float %x, %y
+ %cmp = fcmp oeq float %x, %y
+ %cmp3 = fcmp oeq float %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+; CHECK: ret i1 false
+ ret i1 %cmp2
+
+different:
+; CHECK: ret i1 false
+ ret i1 %cmp3
+}
+; CHECK-LABEL: @test8(
+define i1 @test8(i32 %x, i32 %y) {
+ %cmp2 = icmp sle i32 %x, %y
+ %cmp = icmp sgt i32 %x, %y
+ %cmp3 = icmp sgt i32 %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+; CHECK: ret i1 false
+ ret i1 %cmp2
+
+different:
+; CHECK: ret i1 false
+ ret i1 %cmp3
+}
+
+; CHECK-LABEL: @test8_fp(
+define i1 @test8_fp(float %x, float %y) {
+ %cmp2 = fcmp ule float %x, %y
+ %cmp = fcmp ogt float %x, %y
+ %cmp3 = fcmp ogt float %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+; CHECK: ret i1 false
+ ret i1 %cmp2
+
+different:
+; CHECK: ret i1 false
+ ret i1 %cmp3
+}
+
diff --git a/test/Transforms/NewGVN/condprop.ll b/test/Transforms/NewGVN/condprop.ll
index 898690dec199..6eb9bb6b2619 100644
--- a/test/Transforms/NewGVN/condprop.ll
+++ b/test/Transforms/NewGVN/condprop.ll
@@ -1,266 +1,211 @@
-; XFAIL: *
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
-@a = external global i32 ; <i32*> [#uses=7]
-
-; CHECK-LABEL: @test1(
-define i32 @test1() nounwind {
-entry:
- %0 = load i32, i32* @a, align 4
- %1 = icmp eq i32 %0, 4
- br i1 %1, label %bb, label %bb1
-
-bb: ; preds = %entry
- br label %bb8
-
-bb1: ; preds = %entry
- %2 = load i32, i32* @a, align 4
- %3 = icmp eq i32 %2, 5
- br i1 %3, label %bb2, label %bb3
-
-bb2: ; preds = %bb1
- br label %bb8
-
-bb3: ; preds = %bb1
- %4 = load i32, i32* @a, align 4
- %5 = icmp eq i32 %4, 4
-; CHECK: br i1 false, label %bb4, label %bb5
- br i1 %5, label %bb4, label %bb5
-
-bb4: ; preds = %bb3
- %6 = load i32, i32* @a, align 4
- %7 = add i32 %6, 5
- br label %bb8
-
-bb5: ; preds = %bb3
- %8 = load i32, i32* @a, align 4
- %9 = icmp eq i32 %8, 5
-; CHECK: br i1 false, label %bb6, label %bb7
- br i1 %9, label %bb6, label %bb7
-
-bb6: ; preds = %bb5
- %10 = load i32, i32* @a, align 4
- %11 = add i32 %10, 4
- br label %bb8
-
-bb7: ; preds = %bb5
- %12 = load i32, i32* @a, align 4
- br label %bb8
-
-bb8: ; preds = %bb7, %bb6, %bb4, %bb2, %bb
- %.0 = phi i32 [ %12, %bb7 ], [ %11, %bb6 ], [ %7, %bb4 ], [ 4, %bb2 ], [ 5, %bb ]
- br label %return
-
-return: ; preds = %bb8
- ret i32 %.0
-}
declare void @foo(i1)
declare void @bar(i32)
-; CHECK-LABEL: @test3(
define void @test3(i32 %x, i32 %y) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[XZ:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[YZ:%.*]] = icmp eq i32 [[Y:%.*]], 0
+; CHECK-NEXT: [[Z:%.*]] = and i1 [[XZ]], [[YZ]]
+; CHECK-NEXT: br i1 [[Z]], label [[BOTH_ZERO:%.*]], label [[NOPE:%.*]]
+; CHECK: both_zero:
+; CHECK-NEXT: call void @foo(i1 true)
+; CHECK-NEXT: call void @foo(i1 true)
+; CHECK-NEXT: call void @bar(i32 0)
+; CHECK-NEXT: call void @bar(i32 0)
+; CHECK-NEXT: ret void
+; CHECK: nope:
+; CHECK-NEXT: call void @foo(i1 false)
+; CHECK-NEXT: ret void
+;
%xz = icmp eq i32 %x, 0
%yz = icmp eq i32 %y, 0
%z = and i1 %xz, %yz
br i1 %z, label %both_zero, label %nope
both_zero:
call void @foo(i1 %xz)
-; CHECK: call void @foo(i1 true)
call void @foo(i1 %yz)
-; CHECK: call void @foo(i1 true)
call void @bar(i32 %x)
-; CHECK: call void @bar(i32 0)
call void @bar(i32 %y)
-; CHECK: call void @bar(i32 0)
ret void
nope:
call void @foo(i1 %z)
-; CHECK: call void @foo(i1 false)
ret void
}
-
-; CHECK-LABEL: @test4(
define void @test4(i1 %b, i32 %x) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: br i1 [[B:%.*]], label [[SW:%.*]], label [[CASE3:%.*]]
+; CHECK: sw:
+; CHECK-NEXT: switch i32 [[X:%.*]], label [[DEFAULT:%.*]] [
+; CHECK-NEXT: i32 0, label [[CASE0:%.*]]
+; CHECK-NEXT: i32 1, label [[CASE1:%.*]]
+; CHECK-NEXT: i32 2, label [[CASE0]]
+; CHECK-NEXT: i32 3, label [[CASE3]]
+; CHECK-NEXT: i32 4, label [[DEFAULT]]
+; CHECK-NEXT: ]
+; CHECK: default:
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: ret void
+; CHECK: case0:
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: ret void
+; CHECK: case1:
+; CHECK-NEXT: call void @bar(i32 1)
+; CHECK-NEXT: ret void
+; CHECK: case3:
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: ret void
+;
br i1 %b, label %sw, label %case3
sw:
switch i32 %x, label %default [
- i32 0, label %case0
- i32 1, label %case1
- i32 2, label %case0
- i32 3, label %case3
- i32 4, label %default
+ i32 0, label %case0
+ i32 1, label %case1
+ i32 2, label %case0
+ i32 3, label %case3
+ i32 4, label %default
]
default:
-; CHECK: default:
call void @bar(i32 %x)
-; CHECK: call void @bar(i32 %x)
ret void
case0:
-; CHECK: case0:
call void @bar(i32 %x)
-; CHECK: call void @bar(i32 %x)
ret void
case1:
-; CHECK: case1:
call void @bar(i32 %x)
-; CHECK: call void @bar(i32 1)
ret void
case3:
-; CHECK: case3:
call void @bar(i32 %x)
-; CHECK: call void @bar(i32 %x)
ret void
}
-; CHECK-LABEL: @test5(
define i1 @test5(i32 %x, i32 %y) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 false
+; CHECK: different:
+; CHECK-NEXT: ret i1 false
+;
%cmp = icmp eq i32 %x, %y
br i1 %cmp, label %same, label %different
same:
%cmp2 = icmp ne i32 %x, %y
-; CHECK: ret i1 false
ret i1 %cmp2
different:
%cmp3 = icmp eq i32 %x, %y
-; CHECK: ret i1 false
ret i1 %cmp3
}
-; CHECK-LABEL: @test6(
-define i1 @test6(i32 %x, i32 %y) {
- %cmp2 = icmp ne i32 %x, %y
- %cmp = icmp eq i32 %x, %y
- %cmp3 = icmp eq i32 %x, %y
- br i1 %cmp, label %same, label %different
-
-same:
-; CHECK: ret i1 false
- ret i1 %cmp2
-different:
-; CHECK: ret i1 false
- ret i1 %cmp3
-}
-
-; CHECK-LABEL: @test6_fp(
-define i1 @test6_fp(float %x, float %y) {
- %cmp2 = fcmp une float %x, %y
- %cmp = fcmp oeq float %x, %y
- %cmp3 = fcmp oeq float %x, %y
- br i1 %cmp, label %same, label %different
-
-same:
-; CHECK: ret i1 false
- ret i1 %cmp2
-
-different:
-; CHECK: ret i1 false
- ret i1 %cmp3
-}
-
-; CHECK-LABEL: @test7(
define i1 @test7(i32 %x, i32 %y) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 false
+; CHECK: different:
+; CHECK-NEXT: ret i1 false
+;
%cmp = icmp sgt i32 %x, %y
br i1 %cmp, label %same, label %different
same:
%cmp2 = icmp sle i32 %x, %y
-; CHECK: ret i1 false
ret i1 %cmp2
different:
%cmp3 = icmp sgt i32 %x, %y
-; CHECK: ret i1 false
ret i1 %cmp3
}
-; CHECK-LABEL: @test7_fp(
define i1 @test7_fp(float %x, float %y) {
+; CHECK-LABEL: @test7_fp(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 false
+; CHECK: different:
+; CHECK-NEXT: ret i1 false
+;
%cmp = fcmp ogt float %x, %y
br i1 %cmp, label %same, label %different
same:
%cmp2 = fcmp ule float %x, %y
-; CHECK: ret i1 false
ret i1 %cmp2
different:
%cmp3 = fcmp ogt float %x, %y
-; CHECK: ret i1 false
- ret i1 %cmp3
-}
-
-; CHECK-LABEL: @test8(
-define i1 @test8(i32 %x, i32 %y) {
- %cmp2 = icmp sle i32 %x, %y
- %cmp = icmp sgt i32 %x, %y
- %cmp3 = icmp sgt i32 %x, %y
- br i1 %cmp, label %same, label %different
-
-same:
-; CHECK: ret i1 false
- ret i1 %cmp2
-
-different:
-; CHECK: ret i1 false
- ret i1 %cmp3
-}
-
-; CHECK-LABEL: @test8_fp(
-define i1 @test8_fp(float %x, float %y) {
- %cmp2 = fcmp ule float %x, %y
- %cmp = fcmp ogt float %x, %y
- %cmp3 = fcmp ogt float %x, %y
- br i1 %cmp, label %same, label %different
-
-same:
-; CHECK: ret i1 false
- ret i1 %cmp2
-
-different:
-; CHECK: ret i1 false
ret i1 %cmp3
}
; PR1768
-; CHECK-LABEL: @test9(
define i32 @test9(i32 %i, i32 %j) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I:%.*]], [[J:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[RET:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: ret i32 0
+; CHECK: ret:
+; CHECK-NEXT: ret i32 5
+;
%cmp = icmp eq i32 %i, %j
br i1 %cmp, label %cond_true, label %ret
cond_true:
%diff = sub i32 %i, %j
ret i32 %diff
-; CHECK: ret i32 0
ret:
ret i32 5
-; CHECK: ret i32 5
}
; PR1768
-; CHECK-LABEL: @test10(
define i32 @test10(i32 %j, i32 %i) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I:%.*]], [[J:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[RET:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: ret i32 0
+; CHECK: ret:
+; CHECK-NEXT: ret i32 5
+;
%cmp = icmp eq i32 %i, %j
br i1 %cmp, label %cond_true, label %ret
cond_true:
%diff = sub i32 %i, %j
ret i32 %diff
-; CHECK: ret i32 0
ret:
ret i32 5
-; CHECK: ret i32 5
}
declare i32 @yogibar()
-; CHECK-LABEL: @test11(
define i32 @test11(i32 %x) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[V0:%.*]] = call i32 @yogibar()
+; CHECK-NEXT: [[V1:%.*]] = call i32 @yogibar()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V0]], [[V1]]
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[NEXT:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: ret i32 [[V0]]
+; CHECK: next:
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X:%.*]], [[V0]]
+; CHECK-NEXT: br i1 [[CMP2]], label [[COND_TRUE2:%.*]], label [[NEXT2:%.*]]
+; CHECK: cond_true2:
+; CHECK-NEXT: ret i32 [[X]]
+; CHECK: next2:
+; CHECK-NEXT: ret i32 0
+;
%v0 = call i32 @yogibar()
%v1 = call i32 @yogibar()
%cmp = icmp eq i32 %v0, %v1
@@ -268,7 +213,6 @@ define i32 @test11(i32 %x) {
cond_true:
ret i32 %v1
-; CHECK: ret i32 %v0
next:
%cmp2 = icmp eq i32 %x, %v0
@@ -276,14 +220,23 @@ next:
cond_true2:
ret i32 %v0
-; CHECK: ret i32 %x
next2:
ret i32 0
}
-; CHECK-LABEL: @test12(
define i32 @test12(i32 %x) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: br label [[RET:%.*]]
+; CHECK: cond_false:
+; CHECK-NEXT: br label [[RET]]
+; CHECK: ret:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ 0, [[COND_TRUE]] ], [ [[X]], [[COND_FALSE]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
%cmp = icmp eq i32 %x, 0
br i1 %cmp, label %cond_true, label %cond_false
@@ -295,6 +248,5 @@ cond_false:
ret:
%res = phi i32 [ %x, %cond_true ], [ %x, %cond_false ]
-; CHECK: %res = phi i32 [ 0, %cond_true ], [ %x, %cond_false ]
ret i32 %res
}
diff --git a/test/Transforms/NewGVN/deadstore.ll b/test/Transforms/NewGVN/deadstore.ll
new file mode 100644
index 000000000000..778f42b77581
--- /dev/null
+++ b/test/Transforms/NewGVN/deadstore.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+
+;; Most of these are borrowed from transforms/DSE/simple.ll
+;; NewGVN should be able to eliminate any stores of the same value that are actually redundnat.
+
+;; tmp5 is store of the same value to the same location as the load.
+define void @test12({ i32, i32 }* %x) nounwind {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[X:%.*]], i32 0, i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[X]], i32 0, i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
+; CHECK-NEXT: [[TMP17:%.*]] = sub i32 0, [[TMP8]]
+; CHECK-NEXT: store i32 [[TMP17]], i32* [[TMP7]], align 4
+; CHECK-NEXT: ret void
+;
+ %tmp4 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 0
+ %tmp5 = load i32, i32* %tmp4, align 4
+ %tmp7 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 1
+ %tmp8 = load i32, i32* %tmp7, align 4
+ %tmp17 = sub i32 0, %tmp8
+ store i32 %tmp5, i32* %tmp4, align 4
+ store i32 %tmp17, i32* %tmp7, align 4
+ ret void
+}
+; Remove redundant store if loaded value is in another block.
+define i32 @test26(i1 %c, i32* %p) {
+; CHECK-LABEL: @test26(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB3:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: br label [[BB3]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %v = load i32, i32* %p, align 4
+ br i1 %c, label %bb1, label %bb2
+bb1:
+ br label %bb3
+bb2:
+ store i32 %v, i32* %p, align 4
+ br label %bb3
+bb3:
+ ret i32 0
+}
+
+declare void @unknown_func()
+; Remove redundant store, which is in the same loop as the load.
+define i32 @test33(i1 %c, i32* %p, i32 %i) {
+; CHECK-LABEL: @test33(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[BB2:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: call void @unknown_func()
+; CHECK-NEXT: br i1 undef, label [[BB1]], label [[BB3:%.*]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br label %bb1
+bb1:
+ %v = load i32, i32* %p, align 4
+ br label %bb2
+bb2:
+ store i32 %v, i32* %p, align 4
+ ; Might read and overwrite value at %p, but doesn't matter.
+ call void @unknown_func()
+ br i1 undef, label %bb1, label %bb3
+bb3:
+ ret i32 0
+}
diff --git a/test/Transforms/NewGVN/debugloc.ll b/test/Transforms/NewGVN/debugloc.ll
new file mode 100644
index 000000000000..55597a078bbc
--- /dev/null
+++ b/test/Transforms/NewGVN/debugloc.ll
@@ -0,0 +1,78 @@
+; XFAIL: *
+; RUN: opt < %s -newgvn -S | FileCheck %s
+; CHECK: {{^}}for.body:
+; CHECK-NEXT: [[VREG1:%[^ ]+]] = phi{{.*}}[[VREG2:%[^ ]+]],{{.*}}%.sink,
+; CHECK-NOT: !dbg
+; CHECK-SAME: {{$}}
+; CHECK: {{^}}for.inc:
+; CHECK-NEXT: [[VREG2]] = phi{{.*}}%inc,{{.*}}[[VREG1]]
+
+target triple = "x86_64-unknown-linux-gnu"
+
+@g = external local_unnamed_addr global i32, align 4
+
+; Function Attrs: nounwind uwtable
+define void @foo(i32 %x, i32 %y, i32 %z) local_unnamed_addr #0 !dbg !4 {
+entry:
+ %not.tobool = icmp eq i32 %x, 0, !dbg !8
+ %.sink = zext i1 %not.tobool to i32, !dbg !8
+ store i32 %.sink, i32* @g, align 4, !tbaa !9
+ %cmp8 = icmp sgt i32 %y, 0, !dbg !13
+ br i1 %cmp8, label %for.body.preheader, label %for.end, !dbg !17
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !19
+
+for.body: ; preds = %for.body.preheader, %for.inc
+ %i.09 = phi i32 [ %inc4, %for.inc ], [ 0, %for.body.preheader ]
+ %cmp1 = icmp sgt i32 %i.09, %z, !dbg !19
+ br i1 %cmp1, label %if.then2, label %for.inc, !dbg !21
+
+if.then2: ; preds = %for.body
+ %0 = load i32, i32* @g, align 4, !dbg !22, !tbaa !9
+ %inc = add nsw i32 %0, 1, !dbg !22
+ store i32 %inc, i32* @g, align 4, !dbg !22, !tbaa !9
+ br label %for.inc, !dbg !23
+
+for.inc: ; preds = %for.body, %if.then2
+ %inc4 = add nuw nsw i32 %i.09, 1, !dbg !24
+ %exitcond = icmp ne i32 %inc4, %y, !dbg !13
+ br i1 %exitcond, label %for.body, label %for.end.loopexit, !dbg !17
+
+for.end.loopexit: ; preds = %for.inc
+ br label %for.end, !dbg !26
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void, !dbg !26
+}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, type: !5, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!5 = !DISubroutineType(types: !6)
+!6 = !{null, !7, !7, !7}
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !DILocation(line: 4, column: 7, scope: !4)
+!9 = !{!10, !10, i64 0}
+!10 = !{!"int", !11, i64 0}
+!11 = !{!"omnipotent char", !12, i64 0}
+!12 = !{!"Simple C/C++ TBAA"}
+!13 = !DILocation(line: 10, column: 13, scope: !14)
+!14 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 1)
+!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 10, column: 3)
+!16 = distinct !DILexicalBlock(scope: !4, file: !1, line: 10, column: 3)
+!17 = !DILocation(line: 10, column: 3, scope: !18)
+!18 = !DILexicalBlockFile(scope: !16, file: !1, discriminator: 1)
+!19 = !DILocation(line: 11, column: 11, scope: !20)
+!20 = distinct !DILexicalBlock(scope: !15, file: !1, line: 11, column: 9)
+!21 = !DILocation(line: 11, column: 9, scope: !15)
+!22 = !DILocation(line: 12, column: 8, scope: !20)
+!23 = !DILocation(line: 12, column: 7, scope: !20)
+!24 = !DILocation(line: 10, column: 20, scope: !25)
+!25 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 2)
+!26 = !DILocation(line: 13, column: 1, scope: !4)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
diff --git a/test/Transforms/NewGVN/edge.ll b/test/Transforms/NewGVN/edge.ll
index 2d453bda5a4a..a8afc140e218 100644
--- a/test/Transforms/NewGVN/edge.ll
+++ b/test/Transforms/NewGVN/edge.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -newgvn -S < %s | FileCheck %s
define i32 @f1(i32 %x) {
diff --git a/test/Transforms/NewGVN/fence.ll b/test/Transforms/NewGVN/fence.ll
index ac4270d9aceb..190fd7344922 100644
--- a/test/Transforms/NewGVN/fence.ll
+++ b/test/Transforms/NewGVN/fence.ll
@@ -1,6 +1,7 @@
; XFAIL: *
; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s
+@a = external constant i32
; We can value forward across the fence since we can (semantically)
; reorder the following load before the fence.
define i32 @test(i32* %addr.i) {
@@ -53,6 +54,25 @@ define i32 @test3(i32* noalias %addr.i, i32* noalias %otheraddr) {
ret i32 %res
}
+; We can forward the value forward the load
+; across both the fences, because the load is from
+; a constant memory location.
+define i32 @test4(i32* %addr) {
+; CHECK-LABEL: @test4
+; CHECK-NOT: load
+; CHECK: fence release
+; CHECK: store
+; CHECK: fence seq_cst
+; CHECK: ret i32 0
+ %var = load i32, i32* @a
+ fence release
+ store i32 42, i32* %addr, align 8
+ fence seq_cst
+ %var2 = load i32, i32* @a
+ %var3 = sub i32 %var, %var2
+ ret i32 %var3
+}
+
; Another example of why forwarding across an acquire fence is problematic
; can be seen in a normal locking operation. Say we had:
; *p = 5; unlock(l); lock(l); use(p);
diff --git a/test/Transforms/NewGVN/flags.ll b/test/Transforms/NewGVN/flags.ll
index d03edd6776c9..e849ae2afb64 100644
--- a/test/Transforms/NewGVN/flags.ll
+++ b/test/Transforms/NewGVN/flags.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -newgvn -S < %s | FileCheck %s
declare void @use(i1)
diff --git a/test/Transforms/NewGVN/fold-const-expr.ll b/test/Transforms/NewGVN/fold-const-expr.ll
index 20b74277b1ac..acd7c8df2530 100644
--- a/test/Transforms/NewGVN/fold-const-expr.ll
+++ b/test/Transforms/NewGVN/fold-const-expr.ll
@@ -1,11 +1,10 @@
-; XFAIL: *
; GVN failed to do constant expression folding and expanded
; them unfolded in many places, producing exponentially large const
; expressions. As a result, the compilation never fisished.
; This test checks that we are folding constant expression
; PR 28418
; RUN: opt -newgvn -S < %s | FileCheck %s
-
+;; NewGVN fails this due to not having load coercion
%2 = type { i32, i32, i32, i32, i32 }
define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) {
%tmp1 = alloca %2, align 4
diff --git a/test/Transforms/NewGVN/lifetime-simple.ll b/test/Transforms/NewGVN/lifetime-simple.ll
index 63e361c49eb9..382c7da2b3fb 100644
--- a/test/Transforms/NewGVN/lifetime-simple.ll
+++ b/test/Transforms/NewGVN/lifetime-simple.ll
@@ -8,13 +8,13 @@ define i8 @test(i8* %P) nounwind {
; CHECK-NOT: load
; CHECK: lifetime.end
entry:
- call void @llvm.lifetime.start(i64 32, i8* %P)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
%0 = load i8, i8* %P
store i8 1, i8* %P
- call void @llvm.lifetime.end(i64 32, i8* %P)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
%1 = load i8, i8* %P
ret i8 %1
}
-declare void @llvm.lifetime.start(i64 %S, i8* nocapture %P) readonly
-declare void @llvm.lifetime.end(i64 %S, i8* nocapture %P)
+declare void @llvm.lifetime.start.p0i8(i64 %S, i8* nocapture %P) readonly
+declare void @llvm.lifetime.end.p0i8(i64 %S, i8* nocapture %P)
diff --git a/test/Transforms/NewGVN/load-constant-mem.ll b/test/Transforms/NewGVN/load-constant-mem.ll
index 215258b934c0..4c1624e09f60 100644
--- a/test/Transforms/NewGVN/load-constant-mem.ll
+++ b/test/Transforms/NewGVN/load-constant-mem.ll
@@ -1,19 +1,21 @@
-; RUN: opt < %s -basicaa -newgvn -instcombine -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
; PR4189
@G = external constant [4 x i32]
define i32 @test(i8* %p, i32 %i) nounwind {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P:%.*]] = getelementptr [4 x i32], [4 x i32]* @G, i32 0, i32 [[I:%.*]]
+; CHECK-NEXT: store i8 4, i8* [[P:%.*]]
+; CHECK-NEXT: ret i32 0
+;
entry:
- %P = getelementptr [4 x i32], [4 x i32]* @G, i32 0, i32 %i
- %A = load i32, i32* %P
- store i8 4, i8* %p
- %B = load i32, i32* %P
- %C = sub i32 %A, %B
- ret i32 %C
+ %P = getelementptr [4 x i32], [4 x i32]* @G, i32 0, i32 %i
+ %A = load i32, i32* %P
+ store i8 4, i8* %p
+ %B = load i32, i32* %P
+ %C = sub i32 %A, %B
+ ret i32 %C
}
-; CHECK: define i32 @test(i8* %p, i32 %i) #0 {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: store i8 4, i8* %p, align 1
-; CHECK-NEXT: ret i32 0
-; CHECK-NEXT: }
diff --git a/test/Transforms/NewGVN/loadforward.ll b/test/Transforms/NewGVN/loadforward.ll
new file mode 100644
index 000000000000..d66b5332601f
--- /dev/null
+++ b/test/Transforms/NewGVN/loadforward.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+%rec11 = type { i16, i16, i16 }
+
+@str = global %rec11 { i16 1, i16 2, i16 3 }
+
+;; Test that we forward the first store to the second load
+define i16 @bazinga() {
+; CHECK-LABEL: @bazinga(
+; CHECK-NEXT: [[_TMP10:%.*]] = load i16, i16* getelementptr inbounds (%rec11, %rec11* @str, i16 0, i32 1)
+; CHECK-NEXT: store i16 [[_TMP10]], i16* getelementptr inbounds (%rec11, %rec11* @str, i16 0, i32 0)
+; CHECK-NEXT: [[_TMP15:%.*]] = icmp eq i16 [[_TMP10]], 3
+; CHECK-NEXT: [[_TMP16:%.*]] = select i1 [[_TMP15]], i16 1, i16 0
+; CHECK-NEXT: br label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: ret i16 [[_TMP16]]
+;
+ %_tmp9 = getelementptr %rec11, %rec11* @str, i16 0, i32 1
+ %_tmp10 = load i16, i16* %_tmp9
+ %_tmp12 = getelementptr %rec11, %rec11* @str, i16 0, i32 0
+ store i16 %_tmp10, i16* %_tmp12
+ %_tmp13 = getelementptr %rec11, %rec11* @str, i16 0, i32 0
+ %_tmp14 = load i16, i16* %_tmp13
+ %_tmp15 = icmp eq i16 %_tmp14, 3
+ %_tmp16 = select i1 %_tmp15, i16 1, i16 0
+ br label %bb1
+
+bb1:
+ ret i16 %_tmp16
+}
diff --git a/test/Transforms/NewGVN/malloc-load-removal.ll b/test/Transforms/NewGVN/malloc-load-removal.ll
index c91b6e17f79d..72f4839a5545 100644
--- a/test/Transforms/NewGVN/malloc-load-removal.ll
+++ b/test/Transforms/NewGVN/malloc-load-removal.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -S -basicaa -newgvn < %s | FileCheck %s
; RUN: opt -S -basicaa -newgvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS
; PR13694
diff --git a/test/Transforms/NewGVN/phi-edge-handling.ll b/test/Transforms/NewGVN/phi-edge-handling.ll
new file mode 100644
index 000000000000..6451006a6949
--- /dev/null
+++ b/test/Transforms/NewGVN/phi-edge-handling.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -newgvn -S | FileCheck %s
+
+
+;; Block 6 is reachable, but edge 6->4 is not
+;; This means the phi value is undef, not 0
+; Function Attrs: ssp uwtable
+define i16 @hoge() local_unnamed_addr #0 align 2 {
+; CHECK-LABEL: @hoge(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: switch i8 undef, label [[BB7:%.*]] [
+; CHECK-NEXT: i8 0, label [[BB1:%.*]]
+; CHECK-NEXT: i8 12, label [[BB2:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB6:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb3:
+; CHECK-NEXT: unreachable
+; CHECK: bb4:
+; CHECK-NEXT: ret i16 undef
+; CHECK: bb6:
+; CHECK-NEXT: br i1 true, label [[BB3:%.*]], label [[BB4]], !llvm.loop !1
+; CHECK: bb7:
+; CHECK-NEXT: unreachable
+;
+bb:
+ switch i8 undef, label %bb7 [
+ i8 0, label %bb1
+ i8 12, label %bb2
+ ]
+
+bb1: ; preds = %bb
+ br label %bb6
+
+bb2: ; preds = %bb
+ br label %bb4
+
+bb3: ; preds = %bb6
+ unreachable
+
+bb4: ; preds = %bb6, %bb2
+ %tmp = phi i16 [ 0, %bb6 ], [ undef, %bb2 ]
+ ret i16 %tmp
+
+bb6: ; preds = %bb4
+ br i1 true, label %bb3, label %bb4, !llvm.loop !1
+
+bb7: ; preds = %bb
+ unreachable
+}
+
+attributes #0 = { ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 5.0.0 (http://llvm.org/git/clang.git a8b933d4d1d133594fdaed35ee5814514b738f6d) (/Users/dannyb/sources/llvm-clean fc630a9b5613f544c07a8f16abcc173793df62cf)"}
+!1 = distinct !{!1, !2}
+!2 = !{!"llvm.loop.unroll.disable"}
diff --git a/test/Transforms/NewGVN/pr10820.ll b/test/Transforms/NewGVN/pr10820.ll
index d7a02b570aa0..dbb1376874db 100644
--- a/test/Transforms/NewGVN/pr10820.ll
+++ b/test/Transforms/NewGVN/pr10820.ll
@@ -1,6 +1,6 @@
; XFAIL: *
; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
-
+; NewGVN fails this due to missing load coercion
target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/NewGVN/pr14166.ll b/test/Transforms/NewGVN/pr14166.ll
index daf27cdb7fd7..c526c50bc75d 100644
--- a/test/Transforms/NewGVN/pr14166.ll
+++ b/test/Transforms/NewGVN/pr14166.ll
@@ -1,5 +1,6 @@
; XFAIL: *
; RUN: opt -disable-basicaa -newgvn -S < %s | FileCheck %s
+; NewGVN fails this due to missing load coercion
target datalayout = "e-p:32:32:32"
target triple = "i386-pc-linux-gnu"
define <2 x i32> @test1() {
diff --git a/test/Transforms/NewGVN/pr17732.ll b/test/Transforms/NewGVN/pr17732.ll
index 4a194e6a08b5..6aee6ebeb065 100644
--- a/test/Transforms/NewGVN/pr17732.ll
+++ b/test/Transforms/NewGVN/pr17732.ll
@@ -1,6 +1,4 @@
-; XFAIL: *
; RUN: opt -newgvn -S -o - < %s | FileCheck %s
-
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/NewGVN/pr31594.ll b/test/Transforms/NewGVN/pr31594.ll
index 0cdac1a7fff4..8ef8aa66df1f 100644
--- a/test/Transforms/NewGVN/pr31594.ll
+++ b/test/Transforms/NewGVN/pr31594.ll
@@ -3,7 +3,7 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @patatino(i8* %blah, i32 %choice) {
+define i1 @patatino(i8* %blah, i32 %choice) {
; CHECK-LABEL: @patatino(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[WHILE_COND:%.*]]
@@ -19,8 +19,10 @@ define void @patatino(i8* %blah, i32 %choice) {
; CHECK-NEXT: br label [[WHILE_COND]]
; CHECK: while.end:
; CHECK-NEXT: store i8 0, i8* [[FOO]], align 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[BLAH]], align 1
+; CHECK-NEXT: [[LOADED:%.*]] = icmp eq i8 [[TMP0]], 0
; CHECK-NEXT: store i8 0, i8* [[BLAH]], align 1
-; CHECK-NEXT: ret void
+; CHECK-NEXT: ret i1 [[LOADED]]
;
entry:
br label %while.cond
@@ -48,7 +50,7 @@ while.end:
%0 = load i8, i8* %blah, align 1
%loaded = icmp eq i8 %0, 0
store i8 0, i8* %blah, align 1
- ret void
+ ret i1 %loaded
}
@@ -75,6 +77,7 @@ define void @foo(i8* %arg) {
; CHECK-NEXT: i8 6, label [[BB8:%.*]]
; CHECK-NEXT: ]
; CHECK: bb8:
+; CHECK-NEXT: store i8 undef, i8* null
; CHECK-NEXT: br label [[BB4]]
; CHECK: bb9:
; CHECK-NEXT: store i8 0, i8* [[ARG]], !g !0
diff --git a/test/Transforms/NewGVN/pr31613.ll b/test/Transforms/NewGVN/pr31613.ll
index d3a41830c789..d96ea18466ad 100644
--- a/test/Transforms/NewGVN/pr31613.ll
+++ b/test/Transforms/NewGVN/pr31613.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+; RUN: opt < %s -basicaa -newgvn -enable-store-refinement -S | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; Both of these tests are tests of phi nodes that end up all equivalent to each other
@@ -78,21 +78,18 @@ define void @e() {
; CHECK-NEXT: br label [[H:%.*]]
; CHECK: h:
; CHECK-NEXT: call void @c.d.p(i64 8, i8* undef)
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[F]]
; CHECK-NEXT: [[J:%.*]] = load i32, i32* null
-; CHECK-NEXT: [[K:%.*]] = icmp eq i32 [[I]], [[J]]
-; CHECK-NEXT: br i1 [[K]], label [[L:%.*]], label [[Q:%.*]]
+; CHECK-NEXT: br i1 true, label [[L:%.*]], label [[Q:%.*]]
; CHECK: l:
; CHECK-NEXT: br label [[R:%.*]]
; CHECK: q:
-; CHECK-NEXT: [[M:%.*]] = load %struct.a*, %struct.a** null
+; CHECK-NEXT: store i8 undef, i8* null
; CHECK-NEXT: br label [[R]]
; CHECK: r:
; CHECK-NEXT: switch i32 undef, label [[N:%.*]] [
; CHECK-NEXT: i32 0, label [[S:%.*]]
; CHECK-NEXT: ]
; CHECK: s:
-; CHECK-NEXT: store i32 undef, i32* [[F]], !g !0
; CHECK-NEXT: br label [[H]]
; CHECK: n:
; CHECK-NEXT: [[O:%.*]] = load %struct.a*, %struct.a** null
diff --git a/test/Transforms/NewGVN/pr31682.ll b/test/Transforms/NewGVN/pr31682.ll
index 108e1e19afbd..96103fad15c2 100644
--- a/test/Transforms/NewGVN/pr31682.ll
+++ b/test/Transforms/NewGVN/pr31682.ll
@@ -12,7 +12,6 @@ define void @bar() {
; CHECK-NEXT: [[TMP:%.*]] = load %struct.foo*, %struct.foo** @global
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_FOO:%.*]], %struct.foo* [[TMP]], i64 0, i32 1
; CHECK-NEXT: br i1 undef, label [[BB2]], label [[BB7:%.*]]
; CHECK: bb7:
; CHECK-NEXT: br label [[BB10:%.*]]
diff --git a/test/Transforms/NewGVN/pr31758.ll b/test/Transforms/NewGVN/pr31758.ll
new file mode 100644
index 000000000000..6052ca973aff
--- /dev/null
+++ b/test/Transforms/NewGVN/pr31758.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -newgvn %s -S -o - | FileCheck %s
+
+%struct.dipsy = type {}
+%struct.fluttershy = type { %struct.dipsy* }
+%struct.patatino = type {}
+
+define void @tinkywinky() {
+; CHECK-LABEL: @tinkywinky(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br label [[BB90:%.*]]
+; CHECK: bb90:
+; CHECK-NEXT: br label [[BB90]]
+; CHECK: bb138:
+; CHECK-NEXT: store i8 undef, i8* null
+; CHECK-NEXT: br label [[BB138:%.*]]
+;
+bb:
+ br label %bb90
+
+bb90:
+ %tmp = getelementptr inbounds %struct.fluttershy, %struct.fluttershy* undef, i64 0, i32 0
+ %tmp91 = bitcast %struct.dipsy** %tmp to %struct.patatino**
+ %tmp92 = load %struct.patatino*, %struct.patatino** %tmp91, align 8
+ %tmp99 = getelementptr inbounds %struct.patatino, %struct.patatino* %tmp92
+ %tmp134 = getelementptr inbounds %struct.fluttershy, %struct.fluttershy* undef, i64 0, i32 0
+ %tmp135 = bitcast %struct.dipsy** %tmp134 to %struct.patatino**
+ %tmp136 = load %struct.patatino*, %struct.patatino** %tmp135, align 8
+ br label %bb90
+
+bb138:
+ %tmp139 = getelementptr inbounds %struct.patatino, %struct.patatino* %tmp136
+ br label %bb138
+}
diff --git a/test/Transforms/NewGVN/pr32403.ll b/test/Transforms/NewGVN/pr32403.ll
new file mode 100644
index 000000000000..2552e0e66ab9
--- /dev/null
+++ b/test/Transforms/NewGVN/pr32403.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;RUN: opt -newgvn -S < %s | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+; Function Attrs: nounwind ssp uwtable
+define void @reorder_ref_pic_list() local_unnamed_addr {
+; CHECK-LABEL: @reorder_ref_pic_list(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 undef, label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
+; CHECK: for.body.preheader:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[REFIDXLX_0:%.*]] = phi i32 [ [[INC_I51:%.*]], [[IF_ELSE58:%.*]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: br i1 undef, label [[IF_THEN13:%.*]], label [[IF_ELSE58]]
+; CHECK: if.then13:
+; CHECK-NEXT: [[INC_I:%.*]] = add nsw i32 [[REFIDXLX_0]], 1
+; CHECK-NEXT: br label [[FOR_BODY8_I:%.*]]
+; CHECK: for.body8.i:
+; CHECK-NEXT: br i1 undef, label [[FOR_INC24_I:%.*]], label [[IF_THEN17_I:%.*]]
+; CHECK: if.then17.i:
+; CHECK-NEXT: br label [[FOR_INC24_I]]
+; CHECK: for.inc24.i:
+; CHECK-NEXT: br label [[FOR_BODY8_I]]
+; CHECK: if.else58:
+; CHECK-NEXT: [[INC_I51]] = add nsw i32 [[REFIDXLX_0]], 1
+; CHECK-NEXT: br label [[FOR_BODY]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br i1 undef, label %for.end, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %if.else58, %for.body.preheader
+ %refIdxLX.0 = phi i32 [ %inc.i51, %if.else58 ], [ 0, %for.body.preheader ]
+ br i1 undef, label %if.then13, label %if.else58
+
+if.then13: ; preds = %for.body
+ %inc.i = add nsw i32 %refIdxLX.0, 1
+ br label %for.body8.i
+
+for.body8.i: ; preds = %for.inc24.i, %if.then13
+ %nIdx.052.i = phi i32 [ %inc.i, %if.then13 ], [ %nIdx.1.i, %for.inc24.i ]
+ br i1 undef, label %for.inc24.i, label %if.then17.i
+
+if.then17.i: ; preds = %for.body8.i
+ br label %for.inc24.i
+
+for.inc24.i: ; preds = %if.then17.i, %for.body8.i
+ %nIdx.1.i = phi i32 [ undef, %if.then17.i ], [ %nIdx.052.i, %for.body8.i ]
+ br label %for.body8.i
+
+if.else58: ; preds = %for.body
+ %inc.i51 = add nsw i32 %refIdxLX.0, 1
+ br label %for.body
+
+for.end: ; preds = %entry
+ ret void
+}
+
+
+
diff --git a/test/Transforms/NewGVN/pr32607.ll b/test/Transforms/NewGVN/pr32607.ll
new file mode 100644
index 000000000000..203ac75e2d07
--- /dev/null
+++ b/test/Transforms/NewGVN/pr32607.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -newgvn %s -S -o - | FileCheck %s
+define hidden void @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: top:
+; CHECK-NEXT: br label [[IF:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[TMP0:%.*]] = phi double [ [[TMP1:%.*]], [[IF]] ], [ undef, [[TOP:%.*]] ]
+; CHECK-NEXT: [[TMP1]] = fadd double [[TMP0]], undef
+; CHECK-NEXT: br i1 false, label [[L50:%.*]], label [[IF]]
+; CHECK: L50:
+; CHECK-NEXT: store i8 undef, i8* null
+; CHECK-NEXT: ret void
+;
+top:
+ %.promoted = load double, double* undef, align 8
+ br label %if
+
+;; This is really a multi-valued phi, because the phi is defined by an expression of the phi.
+;; This means that we can't propagate the value over the backedge, because we'll just cycle
+;; through every value.
+
+if: ; preds = %if, %top
+ %0 = phi double [ %1, %if ], [ %.promoted, %top ]
+ %1 = fadd double %0, undef
+ br i1 false, label %L50, label %if
+
+L50: ; preds = %if
+ %.lcssa = phi double [ %1, %if ]
+ store double %.lcssa, double* undef, align 8
+ ret void
+}
+
diff --git a/test/Transforms/NewGVN/predicates.ll b/test/Transforms/NewGVN/predicates.ll
new file mode 100644
index 000000000000..61b35c5e5c67
--- /dev/null
+++ b/test/Transforms/NewGVN/predicates.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -basicaa -newgvn -S < %s | FileCheck %s
+
+; Function Attrs: noinline norecurse nounwind readonly ssp uwtable
+define i32 @mp_unsgn_cmp(i32 %n, i32* nocapture readonly %in1, i32* nocapture readonly %in2) local_unnamed_addr {
+; CHECK-LABEL: @mp_unsgn_cmp(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[N:%.*]], -1
+; CHECK-NEXT: br i1 [[CMP11]], label [[FOR_INC_PREHEADER:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: for.inc.preheader:
+; CHECK-NEXT: br label [[FOR_INC:%.*]]
+; CHECK: for.inc:
+; CHECK-NEXT: [[STOREMERGE2:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_INC]] ], [ 0, [[FOR_INC_PREHEADER]] ]
+; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE2]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[IN1:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[IN2:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE2]], 1
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[STOREMERGE2]], [[N]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[SUB]], 0
+; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[CMP2]], [[CMP1]]
+; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_INC]], label [[FOR_END:%.*]]
+; CHECK: for.end:
+; CHECK-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[SUB]], 0
+; CHECK-NEXT: br i1 [[CMP5]], label [[IF_END8:%.*]], label [[IF_ELSE]]
+; CHECK: if.else:
+; CHECK-NEXT: [[SUB1_LCSSA4:%.*]] = phi i32 [ [[SUB]], [[FOR_END]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp slt i32 [[SUB1_LCSSA4]], 0
+; CHECK-NEXT: [[DOTSUB1_LCSSA:%.*]] = select i1 [[CMP6]], i32 -1, i32 [[SUB1_LCSSA4]]
+; CHECK-NEXT: ret i32 [[DOTSUB1_LCSSA]]
+; CHECK: if.end8:
+; CHECK-NEXT: ret i32 1
+;
+entry:
+ %cmp11 = icmp sgt i32 %n, -1
+ br i1 %cmp11, label %for.inc.preheader, label %if.else
+
+for.inc.preheader: ; preds = %entry
+ br label %for.inc
+
+for.inc: ; preds = %for.inc.preheader, %for.inc
+ %storemerge2 = phi i32 [ %inc, %for.inc ], [ 0, %for.inc.preheader ]
+ %idxprom = sext i32 %storemerge2 to i64
+ %arrayidx = getelementptr inbounds i32, i32* %in1, i64 %idxprom
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %in2, i64 %idxprom
+ %1 = load i32, i32* %arrayidx4, align 4
+ %sub = sub nsw i32 %0, %1
+ %inc = add nsw i32 %storemerge2, 1
+ %cmp1 = icmp slt i32 %storemerge2, %n
+ %cmp2 = icmp eq i32 %sub, 0
+ %or.cond = and i1 %cmp2, %cmp1
+;; This is a self-critical edge to for.inc. If we insert predicate info on it, we will insert
+;; predicateinfo at the end of this block, and think it dominates everthing using only dfs
+;; numbers, instead of proper edge dominance. We would then proceed to propagate the true value
+;; of sub == 0 everywhere, making this function only ever return 0.
+ br i1 %or.cond, label %for.inc, label %for.end
+
+for.end: ; preds = %for.inc
+ %sub.lcssa = phi i32 [ %sub, %for.inc ]
+ %cmp5 = icmp sgt i32 %sub.lcssa, 0
+ br i1 %cmp5, label %if.end8, label %if.else
+
+if.else: ; preds = %entry, %for.end
+ %sub1.lcssa4 = phi i32 [ %sub.lcssa, %for.end ], [ 0, %entry ]
+ %cmp6 = icmp slt i32 %sub1.lcssa4, 0
+ %.sub1.lcssa = select i1 %cmp6, i32 -1, i32 %sub1.lcssa4
+ ret i32 %.sub1.lcssa
+
+if.end8: ; preds = %for.end
+ ret i32 1
+}
+
+
+;; This test will generate a copy of a copy of predicateinfo to the multiple uses
+;; of branch conditions below. Make sure we don't try to extract operand info.
+; Function Attrs: uwtable
+define fastcc void @barney() {
+; CHECK-LABEL: @barney(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br label [[BB22:%.*]]
+; CHECK: bb22:
+; CHECK-NEXT: br i1 undef, label [[BB29:%.*]], label [[BB35:%.*]]
+; CHECK: bb29:
+; CHECK-NEXT: br i1 true, label [[BB33:%.*]], label [[BB35]]
+; CHECK: bb33:
+; CHECK-NEXT: br i1 true, label [[BB35]], label [[BB35]]
+; CHECK: bb35:
+; CHECK-NEXT: unreachable
+;
+bb:
+ br label %bb22
+bb22: ; preds = %bb21
+ %tmp23 = icmp eq i32 undef, 2
+ br i1 %tmp23, label %bb29, label %bb35
+
+
+bb29: ; preds = %bb28
+ br i1 %tmp23, label %bb33, label %bb35
+
+
+bb33: ; preds = %bb31
+ br i1 %tmp23, label %bb35, label %bb35
+
+
+bb35: ; preds = %bb33, %bb29, %bb22
+ unreachable
+}
+
diff --git a/test/Transforms/NewGVN/propagate-ir-flags.ll b/test/Transforms/NewGVN/propagate-ir-flags.ll
index bb2f78d41d4f..f8904e87582b 100644
--- a/test/Transforms/NewGVN/propagate-ir-flags.ll
+++ b/test/Transforms/NewGVN/propagate-ir-flags.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt < %s -newgvn -S | FileCheck %s
; CHECK-LABEL: func_fast
diff --git a/test/Transforms/NewGVN/readattrs.ll b/test/Transforms/NewGVN/readattrs.ll
index be5fbf5a806f..29ddb97ca1bb 100644
--- a/test/Transforms/NewGVN/readattrs.ll
+++ b/test/Transforms/NewGVN/readattrs.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -newgvn -S -o - < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
diff --git a/test/Transforms/NewGVN/refine-stores.ll b/test/Transforms/NewGVN/refine-stores.ll
new file mode 100644
index 000000000000..a48f2fe7fdb6
--- /dev/null
+++ b/test/Transforms/NewGVN/refine-stores.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+;; Now that we do store refinement, we have to verify that we add fake uses
+;; when we skip existing stores.
+;; We also are testing that various variations that cause stores to move classes
+;; have the right class movement happen
+;; All of these tests result in verification failures if it does not.
+%struct.eggs = type {}
+
+define void @spam(i32 *%a) {
+; CHECK-LABEL: @spam(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[FOO:%.*]] = bitcast i32* [[A:%.*]] to %struct.eggs**
+; CHECK-NEXT: store %struct.eggs* null, %struct.eggs** [[FOO]]
+; CHECK-NEXT: br label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br i1 undef, label [[BB3:%.*]], label [[BB2:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: call void @baz()
+; CHECK-NEXT: br label [[BB1]]
+; CHECK: bb3:
+; CHECK-NEXT: store i32 0, i32* undef
+; CHECK-NEXT: store %struct.eggs* null, %struct.eggs** [[FOO]]
+; CHECK-NEXT: unreachable
+;
+bb:
+ %foo = bitcast i32 *%a to %struct.eggs**
+ store %struct.eggs* null, %struct.eggs** %foo
+ br label %bb1
+
+bb1: ; preds = %bb2, %bb
+ br i1 undef, label %bb3, label %bb2
+
+bb2: ; preds = %bb1
+ call void @baz()
+ br label %bb1
+
+bb3: ; preds = %bb1
+ store i32 0, i32* undef
+;; This store is defined by a memoryphi of the call and the first store
+;; At first, we will prove it equivalent to the first store above.
+;; Then the call will become reachable, and the equivalence will be removed
+;; Without it being a use of the first store, we will not update the store
+;; to reflect this.
+ store %struct.eggs* null, %struct.eggs** %foo
+ unreachable
+}
+
+declare void @baz()
+
+
+define void @a() {
+; CHECK-LABEL: @a(
+; CHECK-NEXT: b:
+; CHECK-NEXT: br label [[C:%.*]]
+; CHECK: c:
+; CHECK-NEXT: store i64 undef, i64* null
+; CHECK-NEXT: br label [[E:%.*]]
+; CHECK: e:
+; CHECK-NEXT: [[G:%.*]] = load i64*, i64** null
+; CHECK-NEXT: store i64* undef, i64** null
+; CHECK-NEXT: br i1 undef, label [[C]], label [[E]]
+;
+b:
+ br label %c
+
+c: ; preds = %e, %b
+ %d = phi i64* [ undef, %b ], [ null, %e ]
+ store i64 undef, i64* %d
+ br label %e
+
+e: ; preds = %e, %c
+;; The memory for this load starts out equivalent to just the store in c, we later discover the store after us, and
+;; need to make sure the right set of values get marked as changed after memory leaders change
+ %g = load i64*, i64** null
+ %0 = bitcast i64* %g to i64*
+ store i64* undef, i64** null
+ br i1 undef, label %c, label %e
+}
+
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+source_filename = "bugpoint-output-daef094.bc"
+target triple = "x86_64-apple-darwin16.5.0"
+
+%struct.hoge = type {}
+
+define void @widget(%struct.hoge* %arg) {
+; CHECK-LABEL: @widget(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP:%.*]] = phi %struct.hoge* [ [[ARG:%.*]], [[BB:%.*]] ], [ null, [[BB1]] ]
+; CHECK-NEXT: store %struct.hoge* [[TMP]], %struct.hoge** undef
+; CHECK-NEXT: br i1 undef, label [[BB1]], label [[BB2:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP8:%.*]], [[BB7:%.*]] ], [ 0, [[BB1]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[TMP3]], 0
+; CHECK-NEXT: br i1 [[TMP4]], label [[BB7]], label [[BB5:%.*]]
+; CHECK: bb5:
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* null
+; CHECK-NEXT: call void @quux()
+; CHECK-NEXT: store i64 [[TMP6]], i64* undef
+; CHECK-NEXT: br label [[BB7]]
+; CHECK: bb7:
+; CHECK-NEXT: [[TMP8]] = add i64 [[TMP3]], 1
+; CHECK-NEXT: br label [[BB2]]
+;
+bb:
+ br label %bb1
+
+bb1: ; preds = %bb1, %bb
+ %tmp = phi %struct.hoge* [ %arg, %bb ], [ null, %bb1 ]
+ store %struct.hoge* %tmp, %struct.hoge** undef
+ br i1 undef, label %bb1, label %bb2
+
+bb2: ; preds = %bb7, %bb1
+ %tmp3 = phi i64 [ %tmp8, %bb7 ], [ 0, %bb1 ]
+ %tmp4 = icmp eq i64 %tmp3, 0
+ br i1 %tmp4, label %bb7, label %bb5
+
+bb5: ; preds = %bb2
+ ;; Originally thought equal to the store that comes after it until the phi edges
+ ;; are completely traversed
+ %tmp6 = load i64, i64* null
+ call void @quux()
+ store i64 %tmp6, i64* undef
+ br label %bb7
+
+bb7: ; preds = %bb5, %bb2
+ %tmp8 = add i64 %tmp3, 1
+ br label %bb2
+}
+
+declare void @quux()
+; ModuleID = 'short.ll'
+source_filename = "short.ll"
+
+%struct.a = type {}
+
+define void @b() {
+; CHECK-LABEL: @b(
+; CHECK-NEXT: [[C:%.*]] = alloca [[STRUCT_A:%.*]]
+; CHECK-NEXT: br label [[D:%.*]]
+; CHECK: m:
+; CHECK-NEXT: unreachable
+; CHECK: d:
+; CHECK-NEXT: [[G:%.*]] = bitcast %struct.a* [[C]] to i8*
+; CHECK-NEXT: [[F:%.*]] = bitcast i8* [[G]] to i32*
+; CHECK-NEXT: [[E:%.*]] = load i32, i32* [[F]]
+; CHECK-NEXT: br i1 undef, label [[I:%.*]], label [[J:%.*]]
+; CHECK: i:
+; CHECK-NEXT: br i1 undef, label [[K:%.*]], label [[M:%.*]]
+; CHECK: k:
+; CHECK-NEXT: br label [[L:%.*]]
+; CHECK: l:
+; CHECK-NEXT: unreachable
+; CHECK: j:
+; CHECK-NEXT: br label [[M]]
+;
+ %c = alloca %struct.a
+ br label %d
+
+m: ; preds = %j, %i
+ store i32 %e, i32* %f
+ unreachable
+
+d: ; preds = %0
+ %g = bitcast %struct.a* %c to i8*
+ %h = getelementptr i8, i8* %g
+ %f = bitcast i8* %h to i32*
+ %e = load i32, i32* %f
+ br i1 undef, label %i, label %j
+
+i: ; preds = %d
+ br i1 undef, label %k, label %m
+
+k: ; preds = %i
+ br label %l
+
+l: ; preds = %k
+ %n = phi i32 [ %e, %k ]
+ ;; Becomes equal and then not equal to the other store, and
+ ;; along the way, the load.
+ store i32 %n, i32* %f
+ unreachable
+
+j: ; preds = %d
+ br label %m
+}
diff --git a/test/Transforms/NewGVN/rle-nonlocal.ll b/test/Transforms/NewGVN/rle-nonlocal.ll
index 89f5a6affdec..d318cd5240d8 100644
--- a/test/Transforms/NewGVN/rle-nonlocal.ll
+++ b/test/Transforms/NewGVN/rle-nonlocal.ll
@@ -1,23 +1,37 @@
-; XFAIL: *
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
define i32 @main(i32** %p, i32 %x, i32 %y) {
+; CHECK-LABEL: @main(
+; CHECK-NEXT: block1:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; CHECK: block2:
+; CHECK-NEXT: [[A:%.*]] = load i32*, i32** [[P:%.*]]
+; CHECK-NEXT: br label [[BLOCK4:%.*]]
+; CHECK: block3:
+; CHECK-NEXT: [[B:%.*]] = load i32*, i32** [[P]]
+; CHECK-NEXT: br label [[BLOCK4]]
+; CHECK: block4:
+; CHECK-NEXT: [[EXISTINGPHI:%.*]] = phi i32* [ [[A]], [[BLOCK2]] ], [ [[B]], [[BLOCK3]] ]
+; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[EXISTINGPHI]]
+; CHECK-NEXT: [[E:%.*]] = add i32 [[C]], [[C]]
+; CHECK-NEXT: ret i32 [[E]]
+;
block1:
- %cmp = icmp eq i32 %x, %y
- br i1 %cmp , label %block2, label %block3
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp , label %block2, label %block3
block2:
- %a = load i32*, i32** %p
- br label %block4
+ %a = load i32*, i32** %p
+ br label %block4
block3:
%b = load i32*, i32** %p
br label %block4
block4:
-; CHECK-NOT: %existingPHI = phi
-; CHECK: %DEAD = phi
- %existingPHI = phi i32* [ %a, %block2 ], [ %b, %block3 ]
+ %existingPHI = phi i32* [ %a, %block2 ], [ %b, %block3 ]
%DEAD = load i32*, i32** %p
%c = load i32, i32* %DEAD
%d = load i32, i32* %existingPHI
diff --git a/test/Transforms/NewGVN/rle.ll b/test/Transforms/NewGVN/rle.ll
new file mode 100644
index 000000000000..902abe979ea8
--- /dev/null
+++ b/test/Transforms/NewGVN/rle.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basicaa -newgvn -S -die | FileCheck %s
+; RUN: opt < %s -data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basicaa -newgvn -S -die | FileCheck %s
+; memset -> i16 forwarding.
+define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp {
+entry:
+ %conv = bitcast i16* %A to i8*
+ tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
+ %arrayidx = getelementptr inbounds i16, i16* %A, i64 42
+ %tmp2 = load i16, i16* %arrayidx
+ ret i16 %tmp2
+; CHECK-LABEL: @memset_to_i16_local(
+; CHECK-NOT: load
+; CHECK: ret i16 257
+}
+
+@GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
+@GCst_as1 = addrspace(1) constant {i32, float, i32 } { i32 42, float 14., i32 97 }
+
+; memset -> float forwarding.
+define float @memcpy_to_float_local(float* %A) nounwind ssp {
+entry:
+ %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
+ %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
+ %tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
+ ret float %tmp2
+; CHECK-LABEL: @memcpy_to_float_local(
+; CHECK-NOT: load
+; CHECK: ret float 1.400000e+01
+}
+; memcpy from address space 1
+define float @memcpy_to_float_local_as1(float* %A) nounwind ssp {
+entry:
+ %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
+ tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* %conv, i8 addrspace(1)* bitcast ({i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i32 1, i1 false)
+ %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
+ %tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
+ ret float %tmp2
+; CHECK-LABEL: @memcpy_to_float_local_as1(
+; CHECK-NOT: load
+; CHECK: ret float 1.400000e+01
+}
+
+; PR6642
+define i32 @memset_to_load() nounwind readnone {
+entry:
+ %x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
+ %tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
+ call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
+ %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %x, i32 0, i32 0 ; <i32*>
+ %tmp1 = load i32, i32* %arraydecay ; <i32> [#uses=1]
+ ret i32 %tmp1
+; CHECK-LABEL: @memset_to_load(
+; CHECK: ret i32 0
+}
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
diff --git a/test/Transforms/NewGVN/storeoverstore.ll b/test/Transforms/NewGVN/storeoverstore.ll
index 63f40c511e3c..49b55d430dc7 100644
--- a/test/Transforms/NewGVN/storeoverstore.ll
+++ b/test/Transforms/NewGVN/storeoverstore.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -newgvn -S < %s | FileCheck %s
; RUN: opt -passes=newgvn -S -o - %s | FileCheck %s
@@ -7,31 +8,35 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; stores of the same value do not change the memory state to eliminate them.
define i32 @foo(i32*, i32) {
-; CHECK-LABEL: @foo
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: store i32 5, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP1:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]]
+; CHECK: br label [[TMP5]]
+; CHECK: [[DOT0:%.*]] = phi i32 [ 10, [[TMP4]] ], [ 5, [[TMP2:%.*]] ]
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP6:%.*]], label [[TMP8:%.*]]
+; CHECK: [[TMP7:%.*]] = add nsw i32 [[DOT0]], 5
+; CHECK-NEXT: br label [[TMP8]]
+; CHECK: [[DOT1:%.*]] = phi i32 [ [[TMP7]], [[TMP6]] ], [ [[DOT0]], [[TMP5]] ]
+; CHECK-NEXT: ret i32 [[DOT1]]
+;
store i32 5, i32* %0, align 4
%3 = icmp ne i32 %1, 0
br i1 %3, label %4, label %7
; <label>:4: ; preds = %2
-; CHECK-NOT: load
%5 = load i32, i32* %0, align 4
-; CHECK-NOT: add
%6 = add nsw i32 5, %5
br label %7
; <label>:7: ; preds = %4, %2
%.0 = phi i32 [ %6, %4 ], [ 5, %2 ]
-; CHECK: phi i32 [ 10, %4 ], [ 5, %2 ]
store i32 5, i32* %0, align 4
-; CHECK-NOT: icmp
%8 = icmp ne i32 %1, 0
-; CHECK: br i1 %3
br i1 %8, label %9, label %12
; <label>:9: ; preds = %7
-; CHECK-NOT: load
%10 = load i32, i32* %0, align 4
-; CHECK: add nsw i32 %.0, 5
%11 = add nsw i32 %.0, %10
br label %12
@@ -43,15 +48,25 @@ define i32 @foo(i32*, i32) {
;; This is similar to the above, but it is a conditional store of the same value
;; which requires value numbering MemoryPhi properly to resolve.
define i32 @foo2(i32*, i32) {
-; CHECK-LABEL: @foo2
+; CHECK-LABEL: @foo2(
+; CHECK-NEXT: store i32 5, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP1:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]]
+; CHECK: br label [[TMP6:%.*]]
+; CHECK: br label [[TMP6]]
+; CHECK: [[DOT0:%.*]] = phi i32 [ 10, [[TMP4]] ], [ 5, [[TMP5]] ]
+; CHECK-NEXT: br i1 [[TMP3]], label [[TMP7:%.*]], label [[TMP9:%.*]]
+; CHECK: [[TMP8:%.*]] = add nsw i32 [[DOT0]], 5
+; CHECK-NEXT: br label [[TMP9]]
+; CHECK: [[DOT1:%.*]] = phi i32 [ [[TMP8]], [[TMP7]] ], [ [[DOT0]], [[TMP6]] ]
+; CHECK-NEXT: ret i32 [[DOT1]]
+;
store i32 5, i32* %0, align 4
%3 = icmp ne i32 %1, 0
br i1 %3, label %4, label %7
; <label>:4: ; preds = %2
-; CHECK-NOT: load
%5 = load i32, i32* %0, align 4
-; CHECK-NOT: add
%6 = add nsw i32 5, %5
br label %8
@@ -60,17 +75,12 @@ define i32 @foo2(i32*, i32) {
br label %8
; <label>:8: ; preds = %7, %4
-; CHECK: phi i32 [ 10, %4 ], [ 5, %5 ]
%.0 = phi i32 [ %6, %4 ], [ 5, %7 ]
-; CHECK-NOT: icmp
%9 = icmp ne i32 %1, 0
-; CHECK: br i1 %3
br i1 %9, label %10, label %13
; <label>:10: ; preds = %8
-; CHECK-NOT: load
%11 = load i32, i32* %0, align 4
-; CHECK: add nsw i32 %.0, 5
%12 = add nsw i32 %.0, %11
br label %13
diff --git a/test/Transforms/NewGVN/tbaa.ll b/test/Transforms/NewGVN/tbaa.ll
index 47e20fae7f9c..3dcc4f8acc14 100644
--- a/test/Transforms/NewGVN/tbaa.ll
+++ b/test/Transforms/NewGVN/tbaa.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -tbaa -basicaa -newgvn -S < %s | FileCheck %s
define i32 @test1(i8* %p, i8* %q) {
diff --git a/test/Transforms/NewGVN/volatile-nonvolatile.ll b/test/Transforms/NewGVN/volatile-nonvolatile.ll
index 8c74f8b28efb..46d29bad0f4d 100644
--- a/test/Transforms/NewGVN/volatile-nonvolatile.ll
+++ b/test/Transforms/NewGVN/volatile-nonvolatile.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -tbaa -newgvn -S < %s | FileCheck %s
%struct.t = type { i32* }
diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll
index 2b83bdb9bfbf..a02f7b701912 100644
--- a/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -243,6 +243,19 @@ entry:
ret void
}
+; This used to crash.
+; CHECK-LABEL: define i8* @test13(
+; CHECK: tail call void @objc_storeStrong(i8** %{{.*}}, i8* %[[NEW:.*]])
+; CHECK-NEXT: ret i8* %[[NEW]]
+
+define i8* @test13(i8* %a0, i8* %a1, i8** %addr, i8* %new) {
+ %old = load i8*, i8** %addr, align 8
+ call void @objc_release(i8* %old)
+ %retained = call i8* @objc_retain(i8* %new)
+ store i8* %retained, i8** %addr, align 8
+ ret i8* %retained
+}
+
!0 = !{}
; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/PGOProfile/Inputs/memop_size_annotation.proftext b/test/Transforms/PGOProfile/Inputs/memop_size_annotation.proftext
new file mode 100644
index 000000000000..400b29df3036
--- /dev/null
+++ b/test/Transforms/PGOProfile/Inputs/memop_size_annotation.proftext
@@ -0,0 +1,27 @@
+# IR level Instrumentation Flag
+:ir
+foo
+# Func Hash:
+53929068288
+# Num Counters:
+3
+# Counter Values:
+556
+20
+1
+# Num Value Kinds:
+1
+# ValueKind = IPVK_MemOPSize:
+1
+# NumValueSites:
+1
+9
+7:33
+2:88
+9:72
+4:66
+1:99
+5:55
+6:44
+3:77
+8:22
diff --git a/test/Transforms/PGOProfile/Inputs/thinlto_samplepgo_icp.ll b/test/Transforms/PGOProfile/Inputs/thinlto_samplepgo_icp.ll
new file mode 100644
index 000000000000..22860f52b5d3
--- /dev/null
+++ b/test/Transforms/PGOProfile/Inputs/thinlto_samplepgo_icp.ll
@@ -0,0 +1,27 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@fptr = external local_unnamed_addr global void ()*, align 8
+
+; Function Attrs: norecurse nounwind uwtable
+define void @_Z6updatei(i32 %i) local_unnamed_addr #0 {
+entry:
+ store void ()* @_ZL3foov, void ()** @fptr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define internal void @_ZL3foov() #1 {
+entry:
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3}
+!llvm.ident = !{!31}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 297016)", isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug, enums: !2)
+!1 = !DIFile(filename: "b.cc", directory: "/ssd/llvm/abc/small")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!31 = !{!"clang version 5.0.0 (trunk 297016)"}
diff --git a/test/Transforms/PGOProfile/comdat_internal.ll b/test/Transforms/PGOProfile/comdat_internal.ll
index 7df6f91fe729..74630179105a 100644
--- a/test/Transforms/PGOProfile/comdat_internal.ll
+++ b/test/Transforms/PGOProfile/comdat_internal.ll
@@ -12,11 +12,11 @@ $foo = comdat any
@bar = global i32 ()* @foo, align 8
; CHECK: @__llvm_profile_raw_version = constant i64 {{[0-9]+}}, comdat
-; CHECK: @__profn__stdin__foo = private constant [11 x i8] c"<stdin>:foo"
+; CHECK-NOT: __profn__stdin__foo
; CHECK: @__profc__stdin__foo.[[FOO_HASH]] = private global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", comdat($__profv__stdin__foo.[[FOO_HASH]]), align 8
-; CHECK: @__profd__stdin__foo.[[FOO_HASH]] = private global { i64, i64, i64*, i8*, i8*, i32, [1 x i16] } { i64 -5640069336071256030, i64 [[FOO_HASH]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__stdin__foo.[[FOO_HASH]], i32 0, i32 0), i8* null
+; CHECK: @__profd__stdin__foo.[[FOO_HASH]] = private global { i64, i64, i64*, i8*, i8*, i32, [2 x i16] } { i64 -5640069336071256030, i64 [[FOO_HASH]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__stdin__foo.[[FOO_HASH]], i32 0, i32 0), i8* null
; CHECK-NOT: bitcast (i32 ()* @foo to i8*)
-; CHECK-SAME: , i8* null, i32 1, [1 x i16] zeroinitializer }, section "__llvm_prf_data", comdat($__profv__stdin__foo.[[FOO_HASH]]), align 8
+; CHECK-SAME: , i8* null, i32 1, [2 x i16] zeroinitializer }, section "__llvm_prf_data", comdat($__profv__stdin__foo.[[FOO_HASH]]), align 8
; CHECK: @__llvm_prf_nm
; CHECK: @llvm.used
diff --git a/test/Transforms/PGOProfile/indirect_call_promotion.ll b/test/Transforms/PGOProfile/indirect_call_promotion.ll
index c35166505eb9..b892c130152c 100644
--- a/test/Transforms/PGOProfile/indirect_call_promotion.ll
+++ b/test/Transforms/PGOProfile/indirect_call_promotion.ll
@@ -1,4 +1,6 @@
; RUN: opt < %s -pgo-icall-prom -S | FileCheck %s --check-prefix=ICALL-PROM
+; RUN: opt < %s -pgo-icall-prom -S -icp-samplepgo | FileCheck %s --check-prefix=ICALL-PROM
+; RUN: opt < %s -pgo-icall-prom -S -icp-samplepgo | FileCheck %s --check-prefix=ICALL-PROM-SAMPLEPGO
; RUN: opt < %s -passes=pgo-icall-prom -S | FileCheck %s --check-prefix=ICALL-PROM
; RUN: opt < %s -pgo-icall-prom -S -pass-remarks=pgo-icall-prom -icp-count-threshold=0 -icp-percent-threshold=0 -icp-max-prom=4 2>&1 | FileCheck %s --check-prefix=PASS-REMARK
; RUN: opt < %s -passes=pgo-icall-prom -S -pass-remarks=pgo-icall-prom -icp-count-threshold=0 -icp-percent-threshold=0 -icp-max-prom=4 2>&1 | FileCheck %s --check-prefix=PASS-REMARK
@@ -40,6 +42,7 @@ entry:
; ICALL-PROM: br i1 [[CMP]], label %if.true.direct_targ, label %if.false.orig_indirect, !prof [[BRANCH_WEIGHT:![0-9]+]]
; ICALL-PROM: if.true.direct_targ:
; ICALL-PROM: [[DIRCALL_RET:%[0-9]+]] = call i32 @func4()
+; ICALL-PROM-SAMPLEPGO: call i32 @func4(), !prof [[CALL_METADATA:![0-9]+]]
; ICALL-PROM: br label %if.end.icp
%call = call i32 %tmp(), !prof !1
; ICALL-PROM: if.false.orig_indirect:
@@ -54,3 +57,4 @@ entry:
; ICALL-PROM: [[BRANCH_WEIGHT]] = !{!"branch_weights", i32 1030, i32 570}
; ICALL-PROM: [[NEW_VP_METADATA]] = !{!"VP", i32 0, i64 570, i64 -4377547752858689819, i64 410}
+; ICALL-PROM-SAMPLEPGO: [[CALL_METADATA]] = !{!"branch_weights", i32 1030}
diff --git a/test/Transforms/PGOProfile/memcpy.ll b/test/Transforms/PGOProfile/memcpy.ll
new file mode 100644
index 000000000000..9db4a4a2dd4c
--- /dev/null
+++ b/test/Transforms/PGOProfile/memcpy.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -pgo-instr-gen -instrprof -S | FileCheck %s
+; RUN: opt <%s -passes=pgo-instr-gen,instrprof -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i8* %dst, i8* %src, i32* %a, i32 %n) {
+entry:
+ br label %for.cond
+
+for.cond:
+ %i.0 = phi i32 [ 0, %entry ], [ %add, %for.cond1 ]
+ %cmp = icmp slt i32 %i.0, %n
+ br i1 %cmp, label %for.cond1, label %for.end6
+
+for.cond1:
+ %j.0 = phi i32 [ %inc, %for.body3 ], [ 0, %for.cond ]
+ %idx.ext = sext i32 %i.0 to i64
+ %add.ptr = getelementptr inbounds i32, i32* %a, i64 %idx.ext
+ %0 = load i32, i32* %add.ptr, align 4
+ %cmp2 = icmp slt i32 %j.0, %0
+ %add = add nsw i32 %i.0, 1
+ br i1 %cmp2, label %for.body3, label %for.cond
+
+for.body3:
+ %conv = sext i32 %add to i64
+; CHECK: call void @__llvm_profile_instrument_range(i64 %conv, i8* bitcast ({ i64, i64, i64*, i8*, i8*, i32, [2 x i16] }* @__profd_foo to i8*), i32 0, i64 0, i64 8, i64 8192)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %conv, i32 1, i1 false)
+ %inc = add nsw i32 %j.0, 1
+ br label %for.cond1
+
+for.end6:
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
diff --git a/test/Transforms/PGOProfile/memop_size_annotation.ll b/test/Transforms/PGOProfile/memop_size_annotation.ll
new file mode 100644
index 000000000000..5481d12b1af1
--- /dev/null
+++ b/test/Transforms/PGOProfile/memop_size_annotation.ll
@@ -0,0 +1,59 @@
+; RUN: llvm-profdata merge %S/Inputs/memop_size_annotation.proftext -o %t.profdata
+; RUN: opt < %s -pgo-instr-use -memop-max-annotations=9 -pgo-test-profile-file=%t.profdata -S | FileCheck %s --check-prefixes=MEMOP_ANNOTATION,MEMOP_ANNOTATION9
+; RUN: opt < %s -passes=pgo-instr-use -memop-max-annotations=9 -pgo-test-profile-file=%t.profdata -S | FileCheck %s --check-prefixes=MEMOP_ANNOTATION,MEMOP_ANNOTATION9
+; RUN: opt < %s -pgo-instr-use -pgo-test-profile-file=%t.profdata -S | FileCheck %s --check-prefixes=MEMOP_ANNOTATION,MEMOP_ANNOTATION4
+; RUN: opt < %s -passes=pgo-instr-use -pgo-test-profile-file=%t.profdata -S | FileCheck %s --check-prefixes=MEMOP_ANNOTATION,MEMOP_ANNOTATION4
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i8* %dst, i8* %src, i32* %a, i32 %n) {
+entry:
+ br label %for.cond
+
+for.cond:
+ %i.0 = phi i32 [ 0, %entry ], [ %inc5, %for.inc4 ]
+ %cmp = icmp slt i32 %i.0, %n
+ br i1 %cmp, label %for.body, label %for.end6
+
+for.body:
+ br label %for.cond1
+
+for.cond1:
+ %j.0 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ]
+ %idx.ext = sext i32 %i.0 to i64
+ %add.ptr = getelementptr inbounds i32, i32* %a, i64 %idx.ext
+ %0 = load i32, i32* %add.ptr, align 4
+ %cmp2 = icmp slt i32 %j.0, %0
+ br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:
+ %add = add nsw i32 %i.0, 1
+ %conv = sext i32 %add to i64
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %conv, i32 1, i1 false)
+; MEMOP_ANNOTATION: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %conv, i32 1, i1 false)
+; MEMOP_ANNOTATION-SAME: !prof ![[MEMOP_VALUESITE:[0-9]+]]
+; MEMOP_ANNOTATION9: ![[MEMOP_VALUESITE]] = !{!"VP", i32 1, i64 556, i64 1, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22}
+; MEMOP_ANNOTATION4: ![[MEMOP_VALUESITE]] = !{!"VP", i32 1, i64 556, i64 1, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72}
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i32 %j.0, 1
+ br label %for.cond1
+
+for.end:
+ br label %for.inc4
+
+for.inc4:
+ %inc5 = add nsw i32 %i.0, 1
+ br label %for.cond
+
+for.end6:
+ ret void
+}
+
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+
+declare void @llvm.lifetime.end(i64, i8* nocapture)
diff --git a/test/Transforms/PGOProfile/memop_size_opt.ll b/test/Transforms/PGOProfile/memop_size_opt.ll
new file mode 100644
index 000000000000..c7c42f3c1d33
--- /dev/null
+++ b/test/Transforms/PGOProfile/memop_size_opt.ll
@@ -0,0 +1,100 @@
+; RUN: opt < %s -passes=pgo-memop-opt -pgo-memop-count-threshold=90 -pgo-memop-percent-threshold=15 -S | FileCheck %s --check-prefix=MEMOP_OPT
+; RUN: opt < %s -pgo-memop-opt -pgo-memop-count-threshold=90 -pgo-memop-percent-threshold=15 -S | FileCheck %s --check-prefix=MEMOP_OPT
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i8* %dst, i8* %src, i32* %a, i32 %n) !prof !27 {
+entry:
+ br label %for.cond
+
+for.cond:
+ %i.0 = phi i32 [ 0, %entry ], [ %inc5, %for.inc4 ]
+ %cmp = icmp slt i32 %i.0, %n
+ br i1 %cmp, label %for.body, label %for.end6, !prof !28
+
+for.body:
+ br label %for.cond1
+
+for.cond1:
+ %j.0 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ]
+ %idx.ext = sext i32 %i.0 to i64
+ %add.ptr = getelementptr inbounds i32, i32* %a, i64 %idx.ext
+ %0 = load i32, i32* %add.ptr, align 4
+ %cmp2 = icmp slt i32 %j.0, %0
+ br i1 %cmp2, label %for.body3, label %for.end, !prof !29
+
+for.body3:
+ %add = add nsw i32 %i.0, 1
+ %conv = sext i32 %add to i64
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %conv, i32 1, i1 false), !prof !30
+ br label %for.inc
+
+; MEMOP_OPT: switch i64 %conv, label %[[Default_LABEL:.*]] [
+; MEMOP_OPT: i64 1, label %[[CASE_1_LABEL:.*]]
+; MEMOP_OPT: ], !prof [[SWITCH_BW:![0-9]+]]
+; MEMOP_OPT: [[CASE_1_LABEL]]:
+; MEMOP_OPT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 1, i32 1, i1 false)
+; MEMOP_OPT: br label %[[MERGE_LABEL:.*]]
+; MEMOP_OPT: [[Default_LABEL]]:
+; MEMOP_OPT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %conv, i32 1, i1 false)
+; MEMOP_OPT-NOT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %conv, i32 1, i1 false), !prof
+; MEMOP_OPT: br label %[[MERGE_LABEL]]
+; MEMOP_OPT: [[MERGE_LABEL]]:
+; MEMOP_OPT: br label %for.inc
+; MEMOP_OPT: [[SWITCH_BW]] = !{!"branch_weights", i32 457, i32 99}
+
+for.inc:
+ %inc = add nsw i32 %j.0, 1
+ br label %for.cond1
+
+for.end:
+ br label %for.inc4
+
+for.inc4:
+ %inc5 = add nsw i32 %i.0, 1
+ br label %for.cond
+
+for.end6:
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 1, !"ProfileSummary", !1}
+!1 = !{!2, !3, !4, !5, !6, !7, !8, !9}
+!2 = !{!"ProfileFormat", !"InstrProf"}
+!3 = !{!"TotalCount", i64 579}
+!4 = !{!"MaxCount", i64 556}
+!5 = !{!"MaxInternalCount", i64 20}
+!6 = !{!"MaxFunctionCount", i64 556}
+!7 = !{!"NumCounts", i64 6}
+!8 = !{!"NumFunctions", i64 3}
+!9 = !{!"DetailedSummary", !10}
+!10 = !{!11, !12, !13, !14, !15, !16, !16, !17, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26}
+!11 = !{i32 10000, i64 556, i32 1}
+!12 = !{i32 100000, i64 556, i32 1}
+!13 = !{i32 200000, i64 556, i32 1}
+!14 = !{i32 300000, i64 556, i32 1}
+!15 = !{i32 400000, i64 556, i32 1}
+!16 = !{i32 500000, i64 556, i32 1}
+!17 = !{i32 600000, i64 556, i32 1}
+!18 = !{i32 700000, i64 556, i32 1}
+!19 = !{i32 800000, i64 556, i32 1}
+!20 = !{i32 900000, i64 556, i32 1}
+!21 = !{i32 950000, i64 556, i32 1}
+!22 = !{i32 990000, i64 20, i32 2}
+!23 = !{i32 999000, i64 1, i32 5}
+!24 = !{i32 999900, i64 1, i32 5}
+!25 = !{i32 999990, i64 1, i32 5}
+!26 = !{i32 999999, i64 1, i32 5}
+!27 = !{!"function_entry_count", i64 1}
+!28 = !{!"branch_weights", i32 20, i32 1}
+!29 = !{!"branch_weights", i32 556, i32 20}
+!30 = !{!"VP", i32 1, i64 556, i64 1, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22}
+
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+
+declare void @llvm.lifetime.end(i64, i8* nocapture)
diff --git a/test/Transforms/PGOProfile/multiple_hash_profile.ll b/test/Transforms/PGOProfile/multiple_hash_profile.ll
index f4041830f8f8..6da94826a954 100644
--- a/test/Transforms/PGOProfile/multiple_hash_profile.ll
+++ b/test/Transforms/PGOProfile/multiple_hash_profile.ll
@@ -27,8 +27,8 @@ entry:
%cmp.i = icmp sgt i32 %i, 2
%mul.i = select i1 %cmp.i, i32 1, i32 %i
; CHECK: %mul.i = select i1 %cmp.i, i32 1, i32 %i
-; CHECK-SAME !prof ![[BW:[0-9]+]]
-; CHECK ![[BW]] = !{!"branch_weights", i32 12, i32 6}
+; CHECK-SAME: !prof ![[BW:[0-9]+]]
+; CHECK: ![[BW]] = !{!"branch_weights", i32 12, i32 6}
%retval.0.i = mul nsw i32 %mul.i, %i
ret i32 %retval.0.i
}
diff --git a/test/Transforms/PGOProfile/statics_counter_naming.ll b/test/Transforms/PGOProfile/statics_counter_naming.ll
index c882406ffe54..c329ddba9300 100644
--- a/test/Transforms/PGOProfile/statics_counter_naming.ll
+++ b/test/Transforms/PGOProfile/statics_counter_naming.ll
@@ -1,9 +1,14 @@
-; RUN: opt %s -pgo-instr-gen -S | FileCheck %s --check-prefix=GEN
-; RUN: opt %s -passes=pgo-instr-gen -S | FileCheck %s --check-prefix=GEN
+; RUN: opt %s -pgo-instr-gen -static-func-full-module-prefix=false -S | FileCheck %s --check-prefix=NOPATH
+; RUN: opt %s -passes=pgo-instr-gen -static-func-full-module-prefix=false -S | FileCheck %s --check-prefix=NOPATH
+; RUN: opt %s --pgo-instr-gen -static-func-strip-dirname-prefix=1000 -S | FileCheck %s --check-prefix=NOPATH
+; RUN: opt %s -passes=pgo-instr-gen -static-func-strip-dirname-prefix=1000 -S | FileCheck %s --check-prefix=NOPATH
+; RUN: opt %s --pgo-instr-gen -static-func-strip-dirname-prefix=1 -S | FileCheck %s --check-prefix=HASPATH
+; RUN: opt %s -passes=pgo-instr-gen -static-func-strip-dirname-prefix=1 -S | FileCheck %s --check-prefix=HASPATH
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; GEN: @__profn_statics_counter_naming.ll_func = private constant [30 x i8] c"statics_counter_naming.ll:func"
+; NOPATH: @__profn_statics_counter_naming.ll_func = private constant [30 x i8] c"statics_counter_naming.ll:func"
+; HASPATH-NOT: @__profn_statics_counter_naming.ll_func = private constant [30 x i8] c"statics_counter_naming.ll:func"
define internal i32 @func() {
entry:
diff --git a/test/Transforms/PGOProfile/thinlto_samplepgo_icp.ll b/test/Transforms/PGOProfile/thinlto_samplepgo_icp.ll
new file mode 100644
index 000000000000..dfb6816db5f2
--- /dev/null
+++ b/test/Transforms/PGOProfile/thinlto_samplepgo_icp.ll
@@ -0,0 +1,63 @@
+; Do setup work for all below tests: generate bitcode and combined index
+; RUN: opt -module-summary %s -o %t.bc
+; RUN: opt -module-summary %p/Inputs/thinlto_samplepgo_icp.ll -o %t2.bc
+; RUN: llvm-lto -thinlto -o %t3 %t.bc %t2.bc
+
+; Checks if calls to static target functions are properly imported and promoted
+; by ICP. Note that the GUID in the profile is from the oroginal name.
+; RUN: opt -function-import -summary-file %t3.thinlto.bc %t.bc -o %t4.bc -print-imports 2>&1 | FileCheck %s --check-prefix=IMPORTS
+; IMPORTS: Import _ZL3foov.llvm.0
+; RUN: opt %t4.bc -icp-lto -pgo-icall-prom -S -icp-count-threshold=1 | FileCheck %s --check-prefix=ICALL-PROM
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@fptr = local_unnamed_addr global void ()* null, align 8
+
+; Function Attrs: norecurse uwtable
+define i32 @main() local_unnamed_addr #0 !prof !34 {
+entry:
+ %0 = load void ()*, void ()** @fptr, align 8
+; ICALL-PROM: br i1 %{{[0-9]+}}, label %if.true.direct_targ, label %if.false.orig_indirect
+ tail call void %0(), !prof !40
+ ret i32 0
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3,!4}
+!llvm.ident = !{!31}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (trunk 297016)", isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug, enums: !2)
+!1 = !DIFile(filename: "main.cc", directory: ".")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"ProfileSummary", !5}
+!5 = !{!6, !7, !8, !9, !10, !11, !12, !13}
+!6 = !{!"ProfileFormat", !"SampleProfile"}
+!7 = !{!"TotalCount", i64 3003}
+!8 = !{!"MaxCount", i64 3000}
+!9 = !{!"MaxInternalCount", i64 0}
+!10 = !{!"MaxFunctionCount", i64 0}
+!11 = !{!"NumCounts", i64 3}
+!12 = !{!"NumFunctions", i64 1}
+!13 = !{!"DetailedSummary", !14}
+!14 = !{!15, !16, !17, !18, !19, !20, !20, !21, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30}
+!15 = !{i32 10000, i64 3000, i32 1}
+!16 = !{i32 100000, i64 3000, i32 1}
+!17 = !{i32 200000, i64 3000, i32 1}
+!18 = !{i32 300000, i64 3000, i32 1}
+!19 = !{i32 400000, i64 3000, i32 1}
+!20 = !{i32 500000, i64 3000, i32 1}
+!21 = !{i32 600000, i64 3000, i32 1}
+!22 = !{i32 700000, i64 3000, i32 1}
+!23 = !{i32 800000, i64 3000, i32 1}
+!24 = !{i32 900000, i64 3000, i32 1}
+!25 = !{i32 950000, i64 3000, i32 1}
+!26 = !{i32 990000, i64 3000, i32 1}
+!27 = !{i32 999000, i64 3000, i32 1}
+!28 = !{i32 999900, i64 2, i32 2}
+!29 = !{i32 999990, i64 2, i32 2}
+!30 = !{i32 999999, i64 2, i32 2}
+!31 = !{!"clang version 5.0.0 (trunk 297016)"}
+!34 = !{!"function_entry_count", i64 1}
+!40 = !{!"VP", i32 0, i64 3000, i64 -8789629626369651636, i64 3000}
diff --git a/test/Transforms/RewriteStatepointsForGC/base-vector.ll b/test/Transforms/RewriteStatepointsForGC/base-vector.ll
index 9026275cf682..c34462f45169 100644
--- a/test/Transforms/RewriteStatepointsForGC/base-vector.ll
+++ b/test/Transforms/RewriteStatepointsForGC/base-vector.ll
@@ -88,6 +88,7 @@ entry:
}
declare void @use(i64 addrspace(1)*) "gc-leaf-function"
+declare void @use_vec(<4 x i64 addrspace(1)*>) "gc-leaf-function"
define void @test5(i1 %cnd, i64 addrspace(1)* %obj) gc "statepoint-example" {
; CHECK-LABEL: @test5
@@ -245,3 +246,17 @@ next:
ret i64 addrspace(1)* %bdv
}
declare void @do_safepoint()
+
+define void @test11(<4 x i64 addrspace(1)*> %vec1) gc "statepoint-example" {
+; CHECK-LABEL: @test11(
+; CHECK: @llvm.experimental.gc.statepoint.p0f_isVoidf{{.*}}<4 x i64 addrspace(1)*> %vec1)
+; CHECK: %vec1.relocated = call coldcc <4 x i8 addrspace(1)*> @llvm.experimental.gc.relocate.v4p1i8
+; CHECK: %vec1.relocated.casted = bitcast <4 x i8 addrspace(1)*> %vec1.relocated to <4 x i64 addrspace(1)*>
+; CHECK: %vec2.remat = getelementptr i64, <4 x i64 addrspace(1)*> %vec1.relocated.casted, i32 1024
+; CHECK: call void @use_vec(<4 x i64 addrspace(1)*> %vec2.remat)
+entry:
+ %vec2 = getelementptr i64, <4 x i64 addrspace(1)*> %vec1, i32 1024
+ call void @do_safepoint() [ "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
+ call void @use_vec(<4 x i64 addrspace(1) *> %vec2)
+ ret void
+}
diff --git a/test/Transforms/SCCP/indirectbr.ll b/test/Transforms/SCCP/indirectbr.ll
new file mode 100644
index 000000000000..b977961ca49b
--- /dev/null
+++ b/test/Transforms/SCCP/indirectbr.ll
@@ -0,0 +1,76 @@
+; RUN: opt -S -sccp < %s | FileCheck %s
+
+declare void @BB0_f()
+declare void @BB1_f()
+
+; Make sure we can eliminate what is in BB0 as we know that the indirectbr is going to BB1.
+;
+; CHECK-LABEL: define void @indbrtest1(
+; CHECK-NOT: call void @BB0_f()
+; CHECK: ret void
+define void @indbrtest1() {
+entry:
+ indirectbr i8* blockaddress(@indbrtest1, %BB1), [label %BB0, label %BB1]
+BB0:
+ call void @BB0_f()
+ br label %BB1
+BB1:
+ call void @BB1_f()
+ ret void
+}
+
+; Make sure we can eliminate what is in BB0 as we know that the indirectbr is going to BB1
+; by looking through the casts. The casts should be folded away when they are visited
+; before the indirectbr instruction.
+;
+; CHECK-LABEL: define void @indbrtest2(
+; CHECK-NOT: call void @BB0_f()
+; CHECK: ret void
+define void @indbrtest2() {
+entry:
+ %a = ptrtoint i8* blockaddress(@indbrtest2, %BB1) to i64
+ %b = inttoptr i64 %a to i8*
+ %c = bitcast i8* %b to i8*
+ indirectbr i8* %b, [label %BB0, label %BB1]
+BB0:
+ call void @BB0_f()
+ br label %BB1
+BB1:
+ call void @BB1_f()
+ ret void
+}
+
+; Make sure we can not eliminate BB0 as we do not know the target of the indirectbr.
+;
+; CHECK-LABEL: define void @indbrtest3(
+; CHECK: call void @BB0_f()
+; CHECK: ret void
+define void @indbrtest3(i8** %Q) {
+entry:
+ %t = load i8*, i8** %Q
+ indirectbr i8* %t, [label %BB0, label %BB1]
+BB0:
+ call void @BB0_f()
+ br label %BB1
+BB1:
+ call void @BB1_f()
+ ret void
+}
+
+; Make sure we eliminate BB1 as we pick the first successor on undef.
+;
+; CHECK-LABEL: define void @indbrtest4(
+; CHECK: call void @BB0_f()
+; CHECK: ret void
+define void @indbrtest4(i8** %Q) {
+entry:
+ indirectbr i8* undef, [label %BB0, label %BB1]
+BB0:
+ call void @BB0_f()
+ br label %BB1
+BB1:
+ call void @BB1_f()
+ ret void
+}
+
+
diff --git a/test/Transforms/SCCP/loadtest.ll b/test/Transforms/SCCP/loadtest.ll
index b88b44b76040..89c7371625ad 100644
--- a/test/Transforms/SCCP/loadtest.ll
+++ b/test/Transforms/SCCP/loadtest.ll
@@ -1,7 +1,7 @@
; This test makes sure that these instructions are properly constant propagated.
-; RUN: opt < %s -default-data-layout="e-p:32:32" -sccp -S | FileCheck %s
-; RUN: opt < %s -default-data-layout="E-p:32:32" -sccp -S | FileCheck %s
+; RUN: opt < %s -data-layout="e-p:32:32" -sccp -S | FileCheck %s
+; RUN: opt < %s -data-layout="E-p:32:32" -sccp -S | FileCheck %s
; CHECK-NOT: load
diff --git a/test/Transforms/SCCP/overdefined-div.ll b/test/Transforms/SCCP/overdefined-div.ll
new file mode 100644
index 000000000000..f0b16155c178
--- /dev/null
+++ b/test/Transforms/SCCP/overdefined-div.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -sccp -S | FileCheck %s
+
+; Test that SCCP has basic knowledge of when div can nuke overdefined values.
+
+; 0 / X = 0 even if X is overdefined.
+; CHECK-LABEL: test1
+; CHECK-NEXT: ret i32 0
+define i32 @test1(i32 %foo) {
+ %tinkywinky = udiv i32 0, %foo
+ ret i32 %tinkywinky
+}
+
+; CHECK-LABEL: test2
+; CHECK-NEXT: ret i32 0
+define i32 @test2(i32 %foo) {
+ %tinkywinky = sdiv i32 0, %foo
+ ret i32 %tinkywinky
+}
+
+; CHECK-LABEL: test3
+; CHECK: ret i32 %tinkywinky
+define i32 @test3(i32 %foo) {
+ %tinkywinky = udiv i32 %foo, 0
+ ret i32 %tinkywinky
+}
+
+; CHECK-LABEL: test4
+; CHECK: ret i32 %tinkywinky
+define i32 @test4(i32 %foo) {
+ %tinkywinky = sdiv i32 %foo, 0
+ ret i32 %tinkywinky
+}
diff --git a/test/Transforms/SLPVectorizer/AArch64/gather-root.ll b/test/Transforms/SLPVectorizer/AArch64/gather-root.ll
index 2a9fc9e1c03e..b7fa5452f251 100644
--- a/test/Transforms/SLPVectorizer/AArch64/gather-root.ll
+++ b/test/Transforms/SLPVectorizer/AArch64/gather-root.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -slp-vectorizer -S | FileCheck %s --check-prefix=DEFAULT
; RUN: opt < %s -slp-schedule-budget=0 -slp-min-tree-size=0 -slp-threshold=-30 -slp-vectorizer -S | FileCheck %s --check-prefix=GATHER
; RUN: opt < %s -slp-schedule-budget=0 -slp-threshold=-30 -slp-vectorizer -S | FileCheck %s --check-prefix=MAX-COST
@@ -8,7 +9,7 @@ target triple = "aarch64--linux-gnu"
@a = common global [80 x i8] zeroinitializer, align 16
; DEFAULT-LABEL: @PR28330(
-; DEFAULT: %tmp17 = phi i32 [ %tmp34, %for.body ], [ 0, %entry ]
+; DEFAULT: %tmp17 = phi i32 [ %bin.extra, %for.body ], [ 0, %entry ]
; DEFAULT: %[[S0:.+]] = select <8 x i1> %1, <8 x i32> <i32 -720, i32 -720, i32 -720, i32 -720, i32 -720, i32 -720, i32 -720, i32 -720>, <8 x i32> <i32 -80, i32 -80, i32 -80, i32 -80, i32 -80, i32 -80, i32 -80, i32 -80>
; DEFAULT: %[[R0:.+]] = shufflevector <8 x i32> %[[S0]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
; DEFAULT: %[[R1:.+]] = add <8 x i32> %[[S0]], %[[R0]]
@@ -17,10 +18,10 @@ target triple = "aarch64--linux-gnu"
; DEFAULT: %[[R4:.+]] = shufflevector <8 x i32> %[[R3]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; DEFAULT: %[[R5:.+]] = add <8 x i32> %[[R3]], %[[R4]]
; DEFAULT: %[[R6:.+]] = extractelement <8 x i32> %[[R5]], i32 0
-; DEFAULT: %tmp34 = add i32 %[[R6]], %tmp17
+; DEFAULT: %bin.extra = add i32 %[[R6]], %tmp17
;
; GATHER-LABEL: @PR28330(
-; GATHER: %tmp17 = phi i32 [ %tmp34, %for.body ], [ 0, %entry ]
+; GATHER: %tmp17 = phi i32 [ %bin.extra, %for.body ], [ 0, %entry ]
; GATHER: %tmp19 = select i1 %tmp1, i32 -720, i32 -80
; GATHER: %tmp21 = select i1 %tmp3, i32 -720, i32 -80
; GATHER: %tmp23 = select i1 %tmp5, i32 -720, i32 -80
@@ -44,7 +45,7 @@ target triple = "aarch64--linux-gnu"
; GATHER: %[[R4:.+]] = shufflevector <8 x i32> %[[R3]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; GATHER: %[[R5:.+]] = add <8 x i32> %[[R3]], %[[R4]]
; GATHER: %[[R6:.+]] = extractelement <8 x i32> %[[R5]], i32 0
-; GATHER: %tmp34 = add i32 %[[R6]], %tmp17
+; GATHER: %bin.extra = add i32 %[[R6]], %tmp17
;
; MAX-COST-LABEL: @PR28330(
; MAX-COST-NOT: shufflevector
@@ -89,3 +90,126 @@ for.body:
%tmp34 = add i32 %tmp32, %tmp33
br label %for.body
}
+
+define void @PR32038(i32 %n) {
+; DEFAULT-LABEL: @PR32038(
+; DEFAULT-NEXT: entry:
+; DEFAULT-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* bitcast (i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 1) to <8 x i8>*), align 1
+; DEFAULT-NEXT: [[TMP1:%.*]] = icmp eq <8 x i8> [[TMP0]], zeroinitializer
+; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
+; DEFAULT: for.body:
+; DEFAULT-NEXT: [[TMP17:%.*]] = phi i32 [ [[BIN_EXTRA:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; DEFAULT-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> <i32 -720, i32 -720, i32 -720, i32 -720, i32 -720, i32 -720, i32 -720, i32 -720>, <8 x i32> <i32 -80, i32 -80, i32 -80, i32 -80, i32 -80, i32 -80, i32 -80, i32 -80>
+; DEFAULT-NEXT: [[TMP20:%.*]] = add i32 -5, undef
+; DEFAULT-NEXT: [[TMP22:%.*]] = add i32 [[TMP20]], undef
+; DEFAULT-NEXT: [[TMP24:%.*]] = add i32 [[TMP22]], undef
+; DEFAULT-NEXT: [[TMP26:%.*]] = add i32 [[TMP24]], undef
+; DEFAULT-NEXT: [[TMP28:%.*]] = add i32 [[TMP26]], undef
+; DEFAULT-NEXT: [[TMP30:%.*]] = add i32 [[TMP28]], undef
+; DEFAULT-NEXT: [[TMP32:%.*]] = add i32 [[TMP30]], undef
+; DEFAULT-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; DEFAULT-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP2]], [[RDX_SHUF]]
+; DEFAULT-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; DEFAULT-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; DEFAULT-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; DEFAULT-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
+; DEFAULT-NEXT: [[BIN_EXTRA]] = add i32 [[TMP3]], -5
+; DEFAULT-NEXT: [[TMP34:%.*]] = add i32 [[TMP32]], undef
+; DEFAULT-NEXT: br label [[FOR_BODY]]
+;
+; GATHER-LABEL: @PR32038(
+; GATHER-NEXT: entry:
+; GATHER-NEXT: [[TMP0:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 1), align 1
+; GATHER-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 0
+; GATHER-NEXT: [[TMP2:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 2), align 2
+; GATHER-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], 0
+; GATHER-NEXT: [[TMP4:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 3), align 1
+; GATHER-NEXT: [[TMP5:%.*]] = icmp eq i8 [[TMP4]], 0
+; GATHER-NEXT: [[TMP6:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 4), align 4
+; GATHER-NEXT: [[TMP7:%.*]] = icmp eq i8 [[TMP6]], 0
+; GATHER-NEXT: [[TMP8:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 5), align 1
+; GATHER-NEXT: [[TMP9:%.*]] = icmp eq i8 [[TMP8]], 0
+; GATHER-NEXT: [[TMP10:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 6), align 2
+; GATHER-NEXT: [[TMP11:%.*]] = icmp eq i8 [[TMP10]], 0
+; GATHER-NEXT: [[TMP12:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 7), align 1
+; GATHER-NEXT: [[TMP13:%.*]] = icmp eq i8 [[TMP12]], 0
+; GATHER-NEXT: [[TMP14:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 8), align 8
+; GATHER-NEXT: [[TMP15:%.*]] = icmp eq i8 [[TMP14]], 0
+; GATHER-NEXT: br label [[FOR_BODY:%.*]]
+; GATHER: for.body:
+; GATHER-NEXT: [[TMP17:%.*]] = phi i32 [ [[BIN_EXTRA:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; GATHER-NEXT: [[TMP19:%.*]] = select i1 [[TMP1]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP20:%.*]] = add i32 -5, [[TMP19]]
+; GATHER-NEXT: [[TMP21:%.*]] = select i1 [[TMP3]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP22:%.*]] = add i32 [[TMP20]], [[TMP21]]
+; GATHER-NEXT: [[TMP23:%.*]] = select i1 [[TMP5]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP24:%.*]] = add i32 [[TMP22]], [[TMP23]]
+; GATHER-NEXT: [[TMP25:%.*]] = select i1 [[TMP7]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP26:%.*]] = add i32 [[TMP24]], [[TMP25]]
+; GATHER-NEXT: [[TMP27:%.*]] = select i1 [[TMP9]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP28:%.*]] = add i32 [[TMP26]], [[TMP27]]
+; GATHER-NEXT: [[TMP29:%.*]] = select i1 [[TMP11]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP30:%.*]] = add i32 [[TMP28]], [[TMP29]]
+; GATHER-NEXT: [[TMP31:%.*]] = select i1 [[TMP13]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP32:%.*]] = add i32 [[TMP30]], [[TMP31]]
+; GATHER-NEXT: [[TMP33:%.*]] = select i1 [[TMP15]], i32 -720, i32 -80
+; GATHER-NEXT: [[TMP0:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0
+; GATHER-NEXT: [[TMP1:%.*]] = insertelement <8 x i32> [[TMP0]], i32 [[TMP21]], i32 1
+; GATHER-NEXT: [[TMP2:%.*]] = insertelement <8 x i32> [[TMP1]], i32 [[TMP23]], i32 2
+; GATHER-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> [[TMP2]], i32 [[TMP25]], i32 3
+; GATHER-NEXT: [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 [[TMP27]], i32 4
+; GATHER-NEXT: [[TMP5:%.*]] = insertelement <8 x i32> [[TMP4]], i32 [[TMP29]], i32 5
+; GATHER-NEXT: [[TMP6:%.*]] = insertelement <8 x i32> [[TMP5]], i32 [[TMP31]], i32 6
+; GATHER-NEXT: [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 [[TMP33]], i32 7
+; GATHER-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP7]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; GATHER-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP7]], [[RDX_SHUF]]
+; GATHER-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; GATHER-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; GATHER-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; GATHER-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; GATHER-NEXT: [[TMP8:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
+; GATHER-NEXT: [[BIN_EXTRA]] = add i32 [[TMP8]], -5
+; GATHER-NEXT: [[TMP34:%.*]] = add i32 [[TMP32]], [[TMP33]]
+; GATHER-NEXT: br label [[FOR_BODY]]
+;
+; MAX-COST-LABEL: @PR32038(
+entry:
+ %tmp0 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 1), align 1
+ %tmp1 = icmp eq i8 %tmp0, 0
+ %tmp2 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 2), align 2
+ %tmp3 = icmp eq i8 %tmp2, 0
+ %tmp4 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 3), align 1
+ %tmp5 = icmp eq i8 %tmp4, 0
+ %tmp6 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 4), align 4
+ %tmp7 = icmp eq i8 %tmp6, 0
+ %tmp8 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 5), align 1
+ %tmp9 = icmp eq i8 %tmp8, 0
+ %tmp10 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 6), align 2
+ %tmp11 = icmp eq i8 %tmp10, 0
+ %tmp12 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 7), align 1
+ %tmp13 = icmp eq i8 %tmp12, 0
+ %tmp14 = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 8), align 8
+ %tmp15 = icmp eq i8 %tmp14, 0
+ br label %for.body
+
+for.body:
+ %tmp17 = phi i32 [ %tmp34, %for.body ], [ 0, %entry ]
+ %tmp19 = select i1 %tmp1, i32 -720, i32 -80
+ %tmp20 = add i32 -5, %tmp19
+ %tmp21 = select i1 %tmp3, i32 -720, i32 -80
+ %tmp22 = add i32 %tmp20, %tmp21
+ %tmp23 = select i1 %tmp5, i32 -720, i32 -80
+ %tmp24 = add i32 %tmp22, %tmp23
+ %tmp25 = select i1 %tmp7, i32 -720, i32 -80
+ %tmp26 = add i32 %tmp24, %tmp25
+ %tmp27 = select i1 %tmp9, i32 -720, i32 -80
+ %tmp28 = add i32 %tmp26, %tmp27
+ %tmp29 = select i1 %tmp11, i32 -720, i32 -80
+ %tmp30 = add i32 %tmp28, %tmp29
+ %tmp31 = select i1 %tmp13, i32 -720, i32 -80
+ %tmp32 = add i32 %tmp30, %tmp31
+ %tmp33 = select i1 %tmp15, i32 -720, i32 -80
+ %tmp34 = add i32 %tmp32, %tmp33
+ br label %for.body
+}
diff --git a/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll b/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll
index 35763953911b..63c6d77954d8 100644
--- a/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll
+++ b/test/Transforms/SLPVectorizer/AMDGPU/simplebb.ll
@@ -9,7 +9,7 @@ target datalayout = "e-p:32:32:32-p3:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
; Simple 3-pair chain with loads and stores
-define void @test1_as_3_3_3(double addrspace(3)* %a, double addrspace(3)* %b, double addrspace(3)* %c) {
+define amdgpu_kernel void @test1_as_3_3_3(double addrspace(3)* %a, double addrspace(3)* %b, double addrspace(3)* %c) {
; CHECK-LABEL: @test1_as_3_3_3(
; CHECK: load <2 x double>, <2 x double> addrspace(3)*
; CHECK: load <2 x double>, <2 x double> addrspace(3)*
@@ -29,7 +29,7 @@ define void @test1_as_3_3_3(double addrspace(3)* %a, double addrspace(3)* %b, do
ret void
}
-define void @test1_as_3_0_0(double addrspace(3)* %a, double* %b, double* %c) {
+define amdgpu_kernel void @test1_as_3_0_0(double addrspace(3)* %a, double* %b, double* %c) {
; CHECK-LABEL: @test1_as_3_0_0(
; CHECK: load <2 x double>, <2 x double> addrspace(3)*
; CHECK: load <2 x double>, <2 x double>*
@@ -49,7 +49,7 @@ define void @test1_as_3_0_0(double addrspace(3)* %a, double* %b, double* %c) {
ret void
}
-define void @test1_as_0_0_3(double* %a, double* %b, double addrspace(3)* %c) {
+define amdgpu_kernel void @test1_as_0_0_3(double* %a, double* %b, double addrspace(3)* %c) {
; CHECK-LABEL: @test1_as_0_0_3(
; CHECK: load <2 x double>, <2 x double>*
; CHECK: load <2 x double>, <2 x double>*
diff --git a/test/Transforms/SLPVectorizer/SystemZ/SLP-cmp-cost-query.ll b/test/Transforms/SLPVectorizer/SystemZ/SLP-cmp-cost-query.ll
new file mode 100644
index 000000000000..1a32f6590663
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/SystemZ/SLP-cmp-cost-query.ll
@@ -0,0 +1,36 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=systemz-unknown -mcpu=z13 -slp-vectorizer -debug-only=SLP \
+; RUN: -S -disable-output < %s 2>&1 | FileCheck %s
+;
+; Check that SLP vectorizer gets the right cost difference for a compare
+; node.
+
+; Function Attrs: norecurse nounwind readonly
+define void @fun(i8* nocapture, i32 zeroext) local_unnamed_addr #0 {
+.lr.ph.preheader:
+ br label %.lr.ph
+
+.lr.ph: ; preds = %.lr.ph.preheader, %.lr.ph
+ %2 = phi i32 [ %., %.lr.ph ], [ undef, %.lr.ph.preheader ]
+ %3 = phi i32 [ %.9, %.lr.ph ], [ undef, %.lr.ph.preheader ]
+ %4 = icmp ult i32 %2, %1
+ %5 = select i1 %4, i32 0, i32 %1
+ %. = sub i32 %2, %5
+ %6 = icmp ult i32 %3, %1
+ %7 = select i1 %6, i32 0, i32 %1
+ %.9 = sub i32 %3, %7
+ %8 = zext i32 %. to i64
+ %9 = getelementptr inbounds i8, i8* %0, i64 %8
+ %10 = load i8, i8* %9, align 1
+ %11 = zext i32 %.9 to i64
+ %12 = getelementptr inbounds i8, i8* %0, i64 %11
+ %13 = load i8, i8* %12, align 1
+ %14 = icmp eq i8 %10, %13
+ br i1 %14, label %.lr.ph, label %._crit_edge
+
+._crit_edge: ; preds = %.lr.ph
+ ret void
+
+; CHECK: SLP: Adding cost -1 for bundle that starts with %4 = icmp ult i32 %2, %1.
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/bitreverse.ll b/test/Transforms/SLPVectorizer/X86/bitreverse.ll
index c6d65bbe6840..749e93b04134 100644
--- a/test/Transforms/SLPVectorizer/X86/bitreverse.ll
+++ b/test/Transforms/SLPVectorizer/X86/bitreverse.ll
@@ -22,29 +22,11 @@ declare i16 @llvm.bitreverse.i16(i16)
declare i8 @llvm.bitreverse.i8(i8)
define void @bitreverse_2i64() #0 {
-; SSE-LABEL: @bitreverse_2i64(
-; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
-; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; SSE-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 0), align 8
-; SSE-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 1), align 8
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_2i64(
-; AVX-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
-; AVX-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
-; AVX-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; AVX-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; AVX-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 0), align 8
-; AVX-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 1), align 8
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_2i64(
-; XOP-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*), align 8
-; XOP-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP1]])
-; XOP-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* bitcast ([4 x i64]* @dst64 to <2 x i64>*), align 8
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_2i64(
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP1]])
+; CHECK-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* bitcast ([4 x i64]* @dst64 to <2 x i64>*), align 8
+; CHECK-NEXT: ret void
;
%ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
%ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
@@ -57,40 +39,19 @@ define void @bitreverse_2i64() #0 {
define void @bitreverse_4i64() #0 {
; SSE-LABEL: @bitreverse_4i64(
-; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
-; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4
-; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2), align 4
-; SSE-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD3]])
-; SSE-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 0), align 4
-; SSE-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 1), align 4
-; SSE-NEXT: store i64 [[BITREVERSE2]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 2), align 4
-; SSE-NEXT: store i64 [[BITREVERSE3]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 3), align 4
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2) to <2 x i64>*), align 4
+; SSE-NEXT: [[TMP3:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP1]])
+; SSE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP2]])
+; SSE-NEXT: store <2 x i64> [[TMP3]], <2 x i64>* bitcast ([4 x i64]* @dst64 to <2 x i64>*), align 4
+; SSE-NEXT: store <2 x i64> [[TMP4]], <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 2) to <2 x i64>*), align 4
; SSE-NEXT: ret void
;
-; AVX1-LABEL: @bitreverse_4i64(
-; AVX1-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
-; AVX1-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4
-; AVX1-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2), align 4
-; AVX1-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
-; AVX1-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; AVX1-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; AVX1-NEXT: [[BITREVERSE2:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD2]])
-; AVX1-NEXT: [[BITREVERSE3:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD3]])
-; AVX1-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 0), align 4
-; AVX1-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 1), align 4
-; AVX1-NEXT: store i64 [[BITREVERSE2]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 2), align 4
-; AVX1-NEXT: store i64 [[BITREVERSE3]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 3), align 4
-; AVX1-NEXT: ret void
-;
-; AVX2-LABEL: @bitreverse_4i64(
-; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*), align 4
-; AVX2-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> [[TMP1]])
-; AVX2-NEXT: store <4 x i64> [[TMP2]], <4 x i64>* bitcast ([4 x i64]* @dst64 to <4 x i64>*), align 4
-; AVX2-NEXT: ret void
+; AVX-LABEL: @bitreverse_4i64(
+; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> [[TMP1]])
+; AVX-NEXT: store <4 x i64> [[TMP2]], <4 x i64>* bitcast ([4 x i64]* @dst64 to <4 x i64>*), align 4
+; AVX-NEXT: ret void
;
; XOP-LABEL: @bitreverse_4i64(
; XOP-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*), align 4
@@ -114,32 +75,11 @@ define void @bitreverse_4i64() #0 {
}
define void @bitreverse_4i32() #0 {
-; SSE-LABEL: @bitreverse_4i32(
-; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
-; SSE-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4
-; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 4
-; SSE-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD3]])
-; SSE-NEXT: store i32 [[BITREVERSE0]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 0), align 4
-; SSE-NEXT: store i32 [[BITREVERSE1]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 1), align 4
-; SSE-NEXT: store i32 [[BITREVERSE2]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 2), align 4
-; SSE-NEXT: store i32 [[BITREVERSE3]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 3), align 4
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_4i32(
-; AVX-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 4
-; AVX-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
-; AVX-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 4
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_4i32(
-; XOP-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 4
-; XOP-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
-; XOP-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 4
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_4i32(
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 4
+; CHECK-NEXT: ret void
;
%ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
%ld1 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4
@@ -158,30 +98,12 @@ define void @bitreverse_4i32() #0 {
define void @bitreverse_8i32() #0 {
; SSE-LABEL: @bitreverse_8i32(
-; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
-; SSE-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 2
-; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 2
-; SSE-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2
-; SSE-NEXT: [[LD4:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4), align 2
-; SSE-NEXT: [[LD5:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 5), align 2
-; SSE-NEXT: [[LD6:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 6), align 2
-; SSE-NEXT: [[LD7:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD7]])
-; SSE-NEXT: store i32 [[BITREVERSE0]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 0), align 2
-; SSE-NEXT: store i32 [[BITREVERSE1]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 1), align 2
-; SSE-NEXT: store i32 [[BITREVERSE2]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 2), align 2
-; SSE-NEXT: store i32 [[BITREVERSE3]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 3), align 2
-; SSE-NEXT: store i32 [[BITREVERSE4]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 4), align 2
-; SSE-NEXT: store i32 [[BITREVERSE5]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 5), align 2
-; SSE-NEXT: store i32 [[BITREVERSE6]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 6), align 2
-; SSE-NEXT: store i32 [[BITREVERSE7]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 7), align 2
+; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4) to <4 x i32>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
+; SSE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP2]])
+; SSE-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 2
+; SSE-NEXT: store <4 x i32> [[TMP4]], <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 4) to <4 x i32>*), align 2
; SSE-NEXT: ret void
;
; AVX-LABEL: @bitreverse_8i32(
@@ -224,44 +146,11 @@ define void @bitreverse_8i32() #0 {
}
define void @bitreverse_8i16() #0 {
-; SSE-LABEL: @bitreverse_8i16(
-; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align 2
-; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align 2
-; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align 2
-; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align 2
-; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align 2
-; SSE-NEXT: [[LD5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align 2
-; SSE-NEXT: [[LD6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align 2
-; SSE-NEXT: [[LD7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align 2
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD7]])
-; SSE-NEXT: store i16 [[BITREVERSE0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), align 2
-; SSE-NEXT: store i16 [[BITREVERSE1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), align 2
-; SSE-NEXT: store i16 [[BITREVERSE2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), align 2
-; SSE-NEXT: store i16 [[BITREVERSE3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), align 2
-; SSE-NEXT: store i16 [[BITREVERSE4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), align 2
-; SSE-NEXT: store i16 [[BITREVERSE5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), align 2
-; SSE-NEXT: store i16 [[BITREVERSE6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), align 2
-; SSE-NEXT: store i16 [[BITREVERSE7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), align 2
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_8i16(
-; AVX-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
-; AVX-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
-; AVX-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_8i16(
-; XOP-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
-; XOP-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
-; XOP-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_8i16(
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
+; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
+; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
+; CHECK-NEXT: ret void
;
%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align 2
%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align 2
@@ -292,54 +181,12 @@ define void @bitreverse_8i16() #0 {
define void @bitreverse_16i16() #0 {
; SSE-LABEL: @bitreverse_16i16(
-; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align 2
-; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align 2
-; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align 2
-; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align 2
-; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align 2
-; SSE-NEXT: [[LD5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align 2
-; SSE-NEXT: [[LD6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align 2
-; SSE-NEXT: [[LD7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align 2
-; SSE-NEXT: [[LD8:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 8), align 2
-; SSE-NEXT: [[LD9:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 9), align 2
-; SSE-NEXT: [[LD10:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 10), align 2
-; SSE-NEXT: [[LD11:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 11), align 2
-; SSE-NEXT: [[LD12:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 12), align 2
-; SSE-NEXT: [[LD13:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 13), align 2
-; SSE-NEXT: [[LD14:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 14), align 2
-; SSE-NEXT: [[LD15:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 15), align 2
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD7]])
-; SSE-NEXT: [[BITREVERSE8:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD8]])
-; SSE-NEXT: [[BITREVERSE9:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD9]])
-; SSE-NEXT: [[BITREVERSE10:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD10]])
-; SSE-NEXT: [[BITREVERSE11:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD11]])
-; SSE-NEXT: [[BITREVERSE12:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD12]])
-; SSE-NEXT: [[BITREVERSE13:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD13]])
-; SSE-NEXT: [[BITREVERSE14:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD14]])
-; SSE-NEXT: [[BITREVERSE15:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD15]])
-; SSE-NEXT: store i16 [[BITREVERSE0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), align 2
-; SSE-NEXT: store i16 [[BITREVERSE1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), align 2
-; SSE-NEXT: store i16 [[BITREVERSE2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), align 2
-; SSE-NEXT: store i16 [[BITREVERSE3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), align 2
-; SSE-NEXT: store i16 [[BITREVERSE4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), align 2
-; SSE-NEXT: store i16 [[BITREVERSE5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), align 2
-; SSE-NEXT: store i16 [[BITREVERSE6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), align 2
-; SSE-NEXT: store i16 [[BITREVERSE7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), align 2
-; SSE-NEXT: store i16 [[BITREVERSE8]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 8), align 2
-; SSE-NEXT: store i16 [[BITREVERSE9]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 9), align 2
-; SSE-NEXT: store i16 [[BITREVERSE10]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 10), align 2
-; SSE-NEXT: store i16 [[BITREVERSE11]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 11), align 2
-; SSE-NEXT: store i16 [[BITREVERSE12]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 12), align 2
-; SSE-NEXT: store i16 [[BITREVERSE13]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 13), align 2
-; SSE-NEXT: store i16 [[BITREVERSE14]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 14), align 2
-; SSE-NEXT: store i16 [[BITREVERSE15]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 15), align 2
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
+; SSE-NEXT: [[TMP4:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP2]])
+; SSE-NEXT: store <8 x i16> [[TMP3]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP4]], <8 x i16>* bitcast (i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 8) to <8 x i16>*), align 2
; SSE-NEXT: ret void
;
; AVX-LABEL: @bitreverse_16i16(
@@ -406,68 +253,11 @@ define void @bitreverse_16i16() #0 {
}
define void @bitreverse_16i8() #0 {
-; SSE-LABEL: @bitreverse_16i8(
-; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
-; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
-; SSE-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
-; SSE-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
-; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
-; SSE-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
-; SSE-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
-; SSE-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
-; SSE-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 8), align 1
-; SSE-NEXT: [[LD9:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 9), align 1
-; SSE-NEXT: [[LD10:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 10), align 1
-; SSE-NEXT: [[LD11:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 11), align 1
-; SSE-NEXT: [[LD12:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 12), align 1
-; SSE-NEXT: [[LD13:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 13), align 1
-; SSE-NEXT: [[LD14:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 14), align 1
-; SSE-NEXT: [[LD15:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 15), align 1
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD7]])
-; SSE-NEXT: [[BITREVERSE8:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD8]])
-; SSE-NEXT: [[BITREVERSE9:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD9]])
-; SSE-NEXT: [[BITREVERSE10:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD10]])
-; SSE-NEXT: [[BITREVERSE11:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD11]])
-; SSE-NEXT: [[BITREVERSE12:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD12]])
-; SSE-NEXT: [[BITREVERSE13:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD13]])
-; SSE-NEXT: [[BITREVERSE14:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD14]])
-; SSE-NEXT: [[BITREVERSE15:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD15]])
-; SSE-NEXT: store i8 [[BITREVERSE0]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 0), align 1
-; SSE-NEXT: store i8 [[BITREVERSE1]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 1), align 1
-; SSE-NEXT: store i8 [[BITREVERSE2]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 2), align 1
-; SSE-NEXT: store i8 [[BITREVERSE3]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 3), align 1
-; SSE-NEXT: store i8 [[BITREVERSE4]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 4), align 1
-; SSE-NEXT: store i8 [[BITREVERSE5]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 5), align 1
-; SSE-NEXT: store i8 [[BITREVERSE6]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 6), align 1
-; SSE-NEXT: store i8 [[BITREVERSE7]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 7), align 1
-; SSE-NEXT: store i8 [[BITREVERSE8]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 8), align 1
-; SSE-NEXT: store i8 [[BITREVERSE9]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 9), align 1
-; SSE-NEXT: store i8 [[BITREVERSE10]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 10), align 1
-; SSE-NEXT: store i8 [[BITREVERSE11]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 11), align 1
-; SSE-NEXT: store i8 [[BITREVERSE12]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 12), align 1
-; SSE-NEXT: store i8 [[BITREVERSE13]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 13), align 1
-; SSE-NEXT: store i8 [[BITREVERSE14]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 14), align 1
-; SSE-NEXT: store i8 [[BITREVERSE15]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 15), align 1
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_16i8(
-; AVX-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; AVX-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; AVX-NEXT: store <16 x i8> [[TMP2]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_16i8(
-; XOP-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; XOP-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; XOP-NEXT: store <16 x i8> [[TMP2]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_16i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
+; CHECK-NEXT: store <16 x i8> [[TMP2]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
;
%ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
%ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
@@ -521,122 +311,14 @@ define void @bitreverse_16i8() #0 {
}
define void @bitreverse_32i8() #0 {
-; SSE-LABEL: @bitreverse_32i8(
-; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
-; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
-; SSE-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
-; SSE-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
-; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
-; SSE-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
-; SSE-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
-; SSE-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
-; SSE-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 8), align 1
-; SSE-NEXT: [[LD9:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 9), align 1
-; SSE-NEXT: [[LD10:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 10), align 1
-; SSE-NEXT: [[LD11:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 11), align 1
-; SSE-NEXT: [[LD12:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 12), align 1
-; SSE-NEXT: [[LD13:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 13), align 1
-; SSE-NEXT: [[LD14:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 14), align 1
-; SSE-NEXT: [[LD15:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 15), align 1
-; SSE-NEXT: [[LD16:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16), align 1
-; SSE-NEXT: [[LD17:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 17), align 1
-; SSE-NEXT: [[LD18:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 18), align 1
-; SSE-NEXT: [[LD19:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 19), align 1
-; SSE-NEXT: [[LD20:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 20), align 1
-; SSE-NEXT: [[LD21:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 21), align 1
-; SSE-NEXT: [[LD22:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 22), align 1
-; SSE-NEXT: [[LD23:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 23), align 1
-; SSE-NEXT: [[LD24:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 24), align 1
-; SSE-NEXT: [[LD25:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 25), align 1
-; SSE-NEXT: [[LD26:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 26), align 1
-; SSE-NEXT: [[LD27:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 27), align 1
-; SSE-NEXT: [[LD28:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 28), align 1
-; SSE-NEXT: [[LD29:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 29), align 1
-; SSE-NEXT: [[LD30:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 30), align 1
-; SSE-NEXT: [[LD31:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 31), align 1
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD7]])
-; SSE-NEXT: [[BITREVERSE8:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD8]])
-; SSE-NEXT: [[BITREVERSE9:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD9]])
-; SSE-NEXT: [[BITREVERSE10:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD10]])
-; SSE-NEXT: [[BITREVERSE11:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD11]])
-; SSE-NEXT: [[BITREVERSE12:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD12]])
-; SSE-NEXT: [[BITREVERSE13:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD13]])
-; SSE-NEXT: [[BITREVERSE14:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD14]])
-; SSE-NEXT: [[BITREVERSE15:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD15]])
-; SSE-NEXT: [[BITREVERSE16:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD16]])
-; SSE-NEXT: [[BITREVERSE17:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD17]])
-; SSE-NEXT: [[BITREVERSE18:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD18]])
-; SSE-NEXT: [[BITREVERSE19:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD19]])
-; SSE-NEXT: [[BITREVERSE20:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD20]])
-; SSE-NEXT: [[BITREVERSE21:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD21]])
-; SSE-NEXT: [[BITREVERSE22:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD22]])
-; SSE-NEXT: [[BITREVERSE23:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD23]])
-; SSE-NEXT: [[BITREVERSE24:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD24]])
-; SSE-NEXT: [[BITREVERSE25:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD25]])
-; SSE-NEXT: [[BITREVERSE26:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD26]])
-; SSE-NEXT: [[BITREVERSE27:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD27]])
-; SSE-NEXT: [[BITREVERSE28:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD28]])
-; SSE-NEXT: [[BITREVERSE29:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD29]])
-; SSE-NEXT: [[BITREVERSE30:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD30]])
-; SSE-NEXT: [[BITREVERSE31:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD31]])
-; SSE-NEXT: store i8 [[BITREVERSE0]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 0), align 1
-; SSE-NEXT: store i8 [[BITREVERSE1]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 1), align 1
-; SSE-NEXT: store i8 [[BITREVERSE2]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 2), align 1
-; SSE-NEXT: store i8 [[BITREVERSE3]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 3), align 1
-; SSE-NEXT: store i8 [[BITREVERSE4]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 4), align 1
-; SSE-NEXT: store i8 [[BITREVERSE5]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 5), align 1
-; SSE-NEXT: store i8 [[BITREVERSE6]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 6), align 1
-; SSE-NEXT: store i8 [[BITREVERSE7]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 7), align 1
-; SSE-NEXT: store i8 [[BITREVERSE8]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 8), align 1
-; SSE-NEXT: store i8 [[BITREVERSE9]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 9), align 1
-; SSE-NEXT: store i8 [[BITREVERSE10]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 10), align 1
-; SSE-NEXT: store i8 [[BITREVERSE11]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 11), align 1
-; SSE-NEXT: store i8 [[BITREVERSE12]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 12), align 1
-; SSE-NEXT: store i8 [[BITREVERSE13]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 13), align 1
-; SSE-NEXT: store i8 [[BITREVERSE14]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 14), align 1
-; SSE-NEXT: store i8 [[BITREVERSE15]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 15), align 1
-; SSE-NEXT: store i8 [[BITREVERSE16]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16), align 1
-; SSE-NEXT: store i8 [[BITREVERSE17]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 17), align 1
-; SSE-NEXT: store i8 [[BITREVERSE18]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 18), align 1
-; SSE-NEXT: store i8 [[BITREVERSE19]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 19), align 1
-; SSE-NEXT: store i8 [[BITREVERSE20]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 20), align 1
-; SSE-NEXT: store i8 [[BITREVERSE21]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 21), align 1
-; SSE-NEXT: store i8 [[BITREVERSE22]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 22), align 1
-; SSE-NEXT: store i8 [[BITREVERSE23]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 23), align 1
-; SSE-NEXT: store i8 [[BITREVERSE24]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 24), align 1
-; SSE-NEXT: store i8 [[BITREVERSE25]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 25), align 1
-; SSE-NEXT: store i8 [[BITREVERSE26]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 26), align 1
-; SSE-NEXT: store i8 [[BITREVERSE27]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 27), align 1
-; SSE-NEXT: store i8 [[BITREVERSE28]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 28), align 1
-; SSE-NEXT: store i8 [[BITREVERSE29]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 29), align 1
-; SSE-NEXT: store i8 [[BITREVERSE30]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 30), align 1
-; SSE-NEXT: store i8 [[BITREVERSE31]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 31), align 1
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_32i8(
-; AVX-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; AVX-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16) to <16 x i8>*), align 1
-; AVX-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; AVX-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP2]])
-; AVX-NEXT: store <16 x i8> [[TMP3]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; AVX-NEXT: store <16 x i8> [[TMP4]], <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16) to <16 x i8>*), align 1
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_32i8(
-; XOP-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; XOP-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16) to <16 x i8>*), align 1
-; XOP-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; XOP-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP2]])
-; XOP-NEXT: store <16 x i8> [[TMP3]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; XOP-NEXT: store <16 x i8> [[TMP4]], <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16) to <16 x i8>*), align 1
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_32i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
+; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP2]])
+; CHECK-NEXT: store <16 x i8> [[TMP3]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP4]], <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
;
%ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
%ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
diff --git a/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll b/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
new file mode 100644
index 000000000000..ba0059ed4e51
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
@@ -0,0 +1,167 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -S -o - -mtriple=x86-64-unknown-linux -mcpu=bdver2 -instcombine | FileCheck %s
+
+define <2 x i8> @g(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @g(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <2 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <2 x i8> undef, i8 [[X0X0]], i32 0
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <2 x i8> [[INS1]], i8 [[Y1Y1]], i32 1
+; CHECK-NEXT: ret <2 x i8> [[INS2]]
+;
+ %x0 = extractelement <2 x i8> %x, i32 0
+ %y1 = extractelement <2 x i8> %y, i32 1
+ %x0x0 = mul i8 %x0, %x0
+ %y1y1 = mul i8 %y1, %y1
+ %ins1 = insertelement <2 x i8> undef, i8 %x0x0, i32 0
+ %ins2 = insertelement <2 x i8> %ins1, i8 %y1y1, i32 1
+ ret <2 x i8> %ins2
+}
+
+define <4 x i8> @h(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @h(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <4 x i8> undef, i8 [[X0X0]], i32 0
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x i8> [[INS1]], i8 [[X3X3]], i32 1
+; CHECK-NEXT: [[INS3:%.*]] = insertelement <4 x i8> [[INS2]], i8 [[Y1Y1]], i32 2
+; CHECK-NEXT: [[INS4:%.*]] = insertelement <4 x i8> [[INS3]], i8 [[Y2Y2]], i32 3
+; CHECK-NEXT: ret <4 x i8> [[INS4]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
+ %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
+ %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
+ %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
+ ret <4 x i8> %ins4
+}
+
+define <4 x i8> @h_undef(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @h_undef(
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x i8> undef, i8 [[X3X3]], i32 1
+; CHECK-NEXT: [[INS3:%.*]] = insertelement <4 x i8> [[INS2]], i8 [[Y1Y1]], i32 2
+; CHECK-NEXT: [[INS4:%.*]] = insertelement <4 x i8> [[INS3]], i8 [[Y2Y2]], i32 3
+; CHECK-NEXT: ret <4 x i8> [[INS4]]
+;
+ %x0 = extractelement <4 x i8> undef, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
+ %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
+ %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
+ %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
+ ret <4 x i8> %ins4
+}
+
+define i8 @i(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @i(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X0X0]], [[X3X3]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[Y1Y1]], [[Y2Y2]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i8 [[TMP3]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %1 = add i8 %x0x0, %x3x3
+ %2 = add i8 %y1y1, %y2y2
+ %3 = add i8 %1, %2
+ ret i8 %3
+}
+
+define i8 @j(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @j(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X0X0]], [[X3X3]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[Y1Y1]], [[Y2Y2]]
+; CHECK-NEXT: [[TMP3:%.*]] = sdiv i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i8 [[TMP3]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %1 = add i8 %x0x0, %x3x3
+ %2 = add i8 %y1y1, %y2y2
+ %3 = sdiv i8 %1, %2
+ ret i8 %3
+}
+
+define i8 @k(<4 x i8> %x) {
+; CHECK-LABEL: @k(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[X1:%.*]] = extractelement <4 x i8> [[X]], i32 1
+; CHECK-NEXT: [[X2:%.*]] = extractelement <4 x i8> [[X]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[X1X1:%.*]] = mul i8 [[X1]], [[X1]]
+; CHECK-NEXT: [[X2X2:%.*]] = mul i8 [[X2]], [[X2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X0X0]], [[X3X3]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X1X1]], [[X2X2]]
+; CHECK-NEXT: [[TMP3:%.*]] = sdiv i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i8 [[TMP3]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %x1 = extractelement <4 x i8> %x, i32 1
+ %x2 = extractelement <4 x i8> %x, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %x1x1 = mul i8 %x1, %x1
+ %x2x2 = mul i8 %x2, %x2
+ %1 = add i8 %x0x0, %x3x3
+ %2 = add i8 %x1x1, %x2x2
+ %3 = sdiv i8 %1, %2
+ ret i8 %3
+}
diff --git a/test/Transforms/SLPVectorizer/X86/extractelement.ll b/test/Transforms/SLPVectorizer/X86/extractelement.ll
new file mode 100644
index 000000000000..10675f3be8a6
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/extractelement.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -march=core-avx2 | FileCheck %s
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -march=core-avx2 -slp-threshold=-1 -slp-vectorize-hor-store | FileCheck %s --check-prefix=THRESH1
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -march=core-avx2 -slp-threshold=-2 -slp-vectorize-hor-store | FileCheck %s --check-prefix=THRESH2
+
+@a = global float 0.000000e+00, align 4
+
+define float @f(<2 x float> %x) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X:%.*]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x float> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP1]], i32 1
+; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP2]], [[TMP3]]
+; CHECK-NEXT: ret float [[ADD]]
+;
+ %x0 = extractelement <2 x float> %x, i32 0
+ %x1 = extractelement <2 x float> %x, i32 1
+ %x0x0 = fmul float %x0, %x0
+ %x1x1 = fmul float %x1, %x1
+ %add = fadd float %x0x0, %x1x1
+ ret float %add
+}
+
+define float @f_used_out_of_tree(<2 x float> %x) {
+; THRESH2-LABEL: @f_used_out_of_tree(
+; THRESH2-NEXT: [[TMP1:%.*]] = extractelement <2 x float> [[X:%.*]], i32 0
+; THRESH2-NEXT: [[TMP2:%.*]] = fmul <2 x float> [[X]], [[X]]
+; THRESH2-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; THRESH2-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP2]], i32 1
+; THRESH2-NEXT: [[ADD:%.*]] = fadd float [[TMP3]], [[TMP4]]
+; THRESH2-NEXT: store float [[ADD]], float* @a
+; THRESH2-NEXT: ret float [[TMP1]]
+;
+ %x0 = extractelement <2 x float> %x, i32 0
+ %x1 = extractelement <2 x float> %x, i32 1
+ %x0x0 = fmul float %x0, %x0
+ %x1x1 = fmul float %x1, %x1
+ %add = fadd float %x0x0, %x1x1
+ store float %add, float* @a
+ ret float %x0
+}
+
+define float @f_used_twice_in_tree(<2 x float> %x) {
+; THRESH1-LABEL: @f_used_twice_in_tree(
+; THRESH1-NEXT: [[TMP1:%.*]] = extractelement <2 x float> [[X:%.*]], i32 1
+; THRESH1-NEXT: [[TMP2:%.*]] = insertelement <2 x float> undef, float [[TMP1]], i32 0
+; THRESH1-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[TMP1]], i32 1
+; THRESH1-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[X]], [[TMP3]]
+; THRESH1-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP4]], i32 0
+; THRESH1-NEXT: [[TMP6:%.*]] = extractelement <2 x float> [[TMP4]], i32 1
+; THRESH1-NEXT: [[ADD:%.*]] = fadd float [[TMP5]], [[TMP6]]
+; THRESH1-NEXT: ret float [[ADD]]
+;
+ %x0 = extractelement <2 x float> %x, i32 0
+ %x1 = extractelement <2 x float> %x, i32 1
+ %x0x0 = fmul float %x0, %x1
+ %x1x1 = fmul float %x1, %x1
+ %add = fadd float %x0x0, %x1x1
+ ret float %add
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/horizontal-list.ll b/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
index 3f9fffb9b624..73844037f12e 100644
--- a/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
+++ b/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 | FileCheck %s
+; RUN: opt -slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -slp-threshold=-10 | FileCheck %s --check-prefix=THRESHOLD
@n = external local_unnamed_addr global i32, align 4
@arr = common local_unnamed_addr global [20 x float] zeroinitializer, align 16
@@ -12,29 +13,55 @@ define float @baz() {
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL4:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL4]], [[CONV]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL4_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD]]
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <2 x float> [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP7]], i32 0
-; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP8]], [[ADD_1]]
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP7]], i32 1
-; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP9]], [[ADD_2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr to <2 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr1 to <2 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <2 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP3]], i32 0
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP3]], i32 1
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP5]], [[ADD]]
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2) to <2 x float>*), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2) to <2 x float>*), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP9]], [[ADD_1]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP10]], [[ADD_2]]
; CHECK-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV]]
-; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[MUL4]], [[ADD7]]
-; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD19]]
-; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP8]], [[ADD19_1]]
-; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP9]], [[ADD19_2]]
+; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[TMP4]], [[ADD7]]
+; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[TMP5]], [[ADD19]]
+; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP9]], [[ADD19_1]]
+; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP10]], [[ADD19_2]]
; CHECK-NEXT: store float [[ADD19_3]], float* @res, align 4
; CHECK-NEXT: ret float [[ADD19_3]]
;
+; THRESHOLD-LABEL: @baz(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr1 to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <2 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP3]], i32 0
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP3]], i32 1
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP5]], [[ADD]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2) to <2 x float>*), align 8
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2) to <2 x float>*), align 8
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP9]], [[ADD_1]]
+; THRESHOLD-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP10]], [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD19:%.*]] = fadd fast float [[TMP4]], [[ADD7]]
+; THRESHOLD-NEXT: [[ADD19_1:%.*]] = fadd fast float [[TMP5]], [[ADD19]]
+; THRESHOLD-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP9]], [[ADD19_1]]
+; THRESHOLD-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP10]], [[ADD19_2]]
+; THRESHOLD-NEXT: store float [[ADD19_3]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[ADD19_3]]
+;
entry:
%0 = load i32, i32* @n, align 4
%mul = mul nsw i32 %0, 3
@@ -70,42 +97,62 @@ define float @bazz() {
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL4:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL4]], [[CONV]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL4_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD]]
-; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
-; CHECK-NEXT: [[MUL4_2:%.*]] = fmul fast float [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL4_2]], [[ADD_1]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
-; CHECK-NEXT: [[TMP8:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
-; CHECK-NEXT: [[MUL4_3:%.*]] = fmul fast float [[TMP8]], [[TMP7]]
-; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL4_3]], [[ADD_2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr to <8 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr1 to <8 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
; CHECK-NEXT: [[MUL5:%.*]] = shl nsw i32 [[TMP0]], 2
; CHECK-NEXT: [[CONV6:%.*]] = sitofp i32 [[MUL5]] to float
; CHECK-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV6]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 4), align 16
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 4), align 16
-; CHECK-NEXT: [[MUL18:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[MUL18]], [[ADD7]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 5), align 4
-; CHECK-NEXT: [[TMP12:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 5), align 4
-; CHECK-NEXT: [[MUL18_1:%.*]] = fmul fast float [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[MUL18_1]], [[ADD19]]
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 6) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 6) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <2 x float> [[TMP14]], [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x float> [[TMP15]], i32 0
-; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP16]], [[ADD19_1]]
-; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x float> [[TMP15]], i32 1
-; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP17]], [[ADD19_2]]
-; CHECK-NEXT: store float [[ADD19_3]], float* @res, align 4
-; CHECK-NEXT: ret float [[ADD19_3]]
+; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float undef, [[ADD7]]
+; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float undef, [[ADD19]]
+; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float undef, [[ADD19_1]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP3]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV6]]
+; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float undef, [[ADD19_2]]
+; CHECK-NEXT: store float [[BIN_EXTRA5]], float* @res, align 4
+; CHECK-NEXT: ret float [[BIN_EXTRA5]]
+;
+; THRESHOLD-LABEL: @bazz(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr to <8 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr1 to <8 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[MUL5:%.*]] = shl nsw i32 [[TMP0]], 2
+; THRESHOLD-NEXT: [[CONV6:%.*]] = sitofp i32 [[MUL5]] to float
+; THRESHOLD-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV6]]
+; THRESHOLD-NEXT: [[ADD19:%.*]] = fadd fast float undef, [[ADD7]]
+; THRESHOLD-NEXT: [[ADD19_1:%.*]] = fadd fast float undef, [[ADD19]]
+; THRESHOLD-NEXT: [[ADD19_2:%.*]] = fadd fast float undef, [[ADD19_1]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP3]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV6]]
+; THRESHOLD-NEXT: [[ADD19_3:%.*]] = fadd fast float undef, [[ADD19_2]]
+; THRESHOLD-NEXT: store float [[BIN_EXTRA5]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA5]]
;
entry:
%0 = load i32, i32* @n, align 4
@@ -155,24 +202,39 @@ define float @bazzz() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = fmul fast float [[CONV]], [[TMP11]]
-; CHECK-NEXT: store float [[TMP12]], float* @res, align 4
-; CHECK-NEXT: ret float [[TMP12]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; CHECK-NEXT: store float [[TMP8]], float* @res, align 4
+; CHECK-NEXT: ret float [[TMP8]]
+;
+; THRESHOLD-LABEL: @bazzz(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; THRESHOLD-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; THRESHOLD-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; THRESHOLD-NEXT: store float [[TMP8]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[TMP8]]
;
entry:
%0 = load i32, i32* @n, align 4
@@ -202,26 +264,42 @@ define i32 @foo() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = fmul fast float [[CONV]], [[TMP11]]
-; CHECK-NEXT: [[CONV4:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; CHECK-NEXT: [[CONV4:%.*]] = fptosi float [[TMP8]] to i32
; CHECK-NEXT: store i32 [[CONV4]], i32* @n, align 4
; CHECK-NEXT: ret i32 [[CONV4]]
;
+; THRESHOLD-LABEL: @foo(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; THRESHOLD-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; THRESHOLD-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; THRESHOLD-NEXT: [[CONV4:%.*]] = fptosi float [[TMP8]] to i32
+; THRESHOLD-NEXT: store i32 [[CONV4]], i32* @n, align 4
+; THRESHOLD-NEXT: ret i32 [[CONV4]]
+;
entry:
%0 = load i32, i32* @n, align 4
%conv = sitofp i32 %0 to float
@@ -269,6 +347,28 @@ define float @bar() {
; CHECK-NEXT: store float [[MAX_0_MUL3_2]], float* @res, align 4
; CHECK-NEXT: ret float [[MAX_0_MUL3_2]]
;
+; THRESHOLD-LABEL: @bar(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr1 to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP0]]
+; THRESHOLD-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP2]], i32 1
+; THRESHOLD-NEXT: [[CMP4:%.*]] = fcmp fast ogt float [[TMP3]], [[TMP4]]
+; THRESHOLD-NEXT: [[MAX_0_MUL3:%.*]] = select i1 [[CMP4]], float [[TMP3]], float [[TMP4]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[MUL3_1:%.*]] = fmul fast float [[TMP6]], [[TMP5]]
+; THRESHOLD-NEXT: [[CMP4_1:%.*]] = fcmp fast ogt float [[MAX_0_MUL3]], [[MUL3_1]]
+; THRESHOLD-NEXT: [[MAX_0_MUL3_1:%.*]] = select i1 [[CMP4_1]], float [[MAX_0_MUL3]], float [[MUL3_1]]
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[TMP8:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[MUL3_2:%.*]] = fmul fast float [[TMP8]], [[TMP7]]
+; THRESHOLD-NEXT: [[CMP4_2:%.*]] = fcmp fast ogt float [[MAX_0_MUL3_1]], [[MUL3_2]]
+; THRESHOLD-NEXT: [[MAX_0_MUL3_2:%.*]] = select i1 [[CMP4_2]], float [[MAX_0_MUL3_1]], float [[MUL3_2]]
+; THRESHOLD-NEXT: store float [[MAX_0_MUL3_2]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[MAX_0_MUL3_2]]
+;
entry:
%0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
%1 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
@@ -292,3 +392,1344 @@ entry:
ret float %max.0.mul3.2
}
+define float @f(float* nocapture readonly %x) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <16 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float undef, undef
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; CHECK-NEXT: [[ARRAYIDX_32:%.*]] = getelementptr inbounds float, float* [[X]], i64 32
+; CHECK-NEXT: [[ARRAYIDX_33:%.*]] = getelementptr inbounds float, float* [[X]], i64 33
+; CHECK-NEXT: [[ARRAYIDX_34:%.*]] = getelementptr inbounds float, float* [[X]], i64 34
+; CHECK-NEXT: [[ARRAYIDX_35:%.*]] = getelementptr inbounds float, float* [[X]], i64 35
+; CHECK-NEXT: [[ARRAYIDX_36:%.*]] = getelementptr inbounds float, float* [[X]], i64 36
+; CHECK-NEXT: [[ARRAYIDX_37:%.*]] = getelementptr inbounds float, float* [[X]], i64 37
+; CHECK-NEXT: [[ARRAYIDX_38:%.*]] = getelementptr inbounds float, float* [[X]], i64 38
+; CHECK-NEXT: [[ARRAYIDX_39:%.*]] = getelementptr inbounds float, float* [[X]], i64 39
+; CHECK-NEXT: [[ARRAYIDX_40:%.*]] = getelementptr inbounds float, float* [[X]], i64 40
+; CHECK-NEXT: [[ARRAYIDX_41:%.*]] = getelementptr inbounds float, float* [[X]], i64 41
+; CHECK-NEXT: [[ARRAYIDX_42:%.*]] = getelementptr inbounds float, float* [[X]], i64 42
+; CHECK-NEXT: [[ARRAYIDX_43:%.*]] = getelementptr inbounds float, float* [[X]], i64 43
+; CHECK-NEXT: [[ARRAYIDX_44:%.*]] = getelementptr inbounds float, float* [[X]], i64 44
+; CHECK-NEXT: [[ARRAYIDX_45:%.*]] = getelementptr inbounds float, float* [[X]], i64 45
+; CHECK-NEXT: [[ARRAYIDX_46:%.*]] = getelementptr inbounds float, float* [[X]], i64 46
+; CHECK-NEXT: [[ARRAYIDX_47:%.*]] = getelementptr inbounds float, float* [[X]], i64 47
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_16]] to <32 x float>*
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x float>, <32 x float>* [[TMP2]], align 4
+; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; CHECK-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; CHECK-NEXT: [[ADD_32:%.*]] = fadd fast float undef, [[ADD_31]]
+; CHECK-NEXT: [[ADD_33:%.*]] = fadd fast float undef, [[ADD_32]]
+; CHECK-NEXT: [[ADD_34:%.*]] = fadd fast float undef, [[ADD_33]]
+; CHECK-NEXT: [[ADD_35:%.*]] = fadd fast float undef, [[ADD_34]]
+; CHECK-NEXT: [[ADD_36:%.*]] = fadd fast float undef, [[ADD_35]]
+; CHECK-NEXT: [[ADD_37:%.*]] = fadd fast float undef, [[ADD_36]]
+; CHECK-NEXT: [[ADD_38:%.*]] = fadd fast float undef, [[ADD_37]]
+; CHECK-NEXT: [[ADD_39:%.*]] = fadd fast float undef, [[ADD_38]]
+; CHECK-NEXT: [[ADD_40:%.*]] = fadd fast float undef, [[ADD_39]]
+; CHECK-NEXT: [[ADD_41:%.*]] = fadd fast float undef, [[ADD_40]]
+; CHECK-NEXT: [[ADD_42:%.*]] = fadd fast float undef, [[ADD_41]]
+; CHECK-NEXT: [[ADD_43:%.*]] = fadd fast float undef, [[ADD_42]]
+; CHECK-NEXT: [[ADD_44:%.*]] = fadd fast float undef, [[ADD_43]]
+; CHECK-NEXT: [[ADD_45:%.*]] = fadd fast float undef, [[ADD_44]]
+; CHECK-NEXT: [[ADD_46:%.*]] = fadd fast float undef, [[ADD_45]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP3]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; CHECK-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <16 x float> [[TMP1]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX10:%.*]] = fadd fast <16 x float> [[TMP1]], [[RDX_SHUF9]]
+; CHECK-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <16 x float> [[BIN_RDX10]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX12:%.*]] = fadd fast <16 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; CHECK-NEXT: [[RDX_SHUF13:%.*]] = shufflevector <16 x float> [[BIN_RDX12]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX14:%.*]] = fadd fast <16 x float> [[BIN_RDX12]], [[RDX_SHUF13]]
+; CHECK-NEXT: [[RDX_SHUF15:%.*]] = shufflevector <16 x float> [[BIN_RDX14]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX16:%.*]] = fadd fast <16 x float> [[BIN_RDX14]], [[RDX_SHUF15]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <16 x float> [[BIN_RDX16]], i32 0
+; CHECK-NEXT: [[BIN_RDX17:%.*]] = fadd fast float [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[ADD_47:%.*]] = fadd fast float undef, [[ADD_46]]
+; CHECK-NEXT: ret float [[BIN_RDX17]]
+;
+; THRESHOLD-LABEL: @f(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; THRESHOLD-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; THRESHOLD-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; THRESHOLD-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; THRESHOLD-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; THRESHOLD-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; THRESHOLD-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; THRESHOLD-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <16 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float undef, undef
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; THRESHOLD-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; THRESHOLD-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; THRESHOLD-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; THRESHOLD-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; THRESHOLD-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; THRESHOLD-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; THRESHOLD-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; THRESHOLD-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; THRESHOLD-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; THRESHOLD-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; THRESHOLD-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; THRESHOLD-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; THRESHOLD-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; THRESHOLD-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; THRESHOLD-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; THRESHOLD-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; THRESHOLD-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; THRESHOLD-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; THRESHOLD-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; THRESHOLD-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; THRESHOLD-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; THRESHOLD-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; THRESHOLD-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; THRESHOLD-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; THRESHOLD-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; THRESHOLD-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; THRESHOLD-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; THRESHOLD-NEXT: [[ARRAYIDX_32:%.*]] = getelementptr inbounds float, float* [[X]], i64 32
+; THRESHOLD-NEXT: [[ARRAYIDX_33:%.*]] = getelementptr inbounds float, float* [[X]], i64 33
+; THRESHOLD-NEXT: [[ARRAYIDX_34:%.*]] = getelementptr inbounds float, float* [[X]], i64 34
+; THRESHOLD-NEXT: [[ARRAYIDX_35:%.*]] = getelementptr inbounds float, float* [[X]], i64 35
+; THRESHOLD-NEXT: [[ARRAYIDX_36:%.*]] = getelementptr inbounds float, float* [[X]], i64 36
+; THRESHOLD-NEXT: [[ARRAYIDX_37:%.*]] = getelementptr inbounds float, float* [[X]], i64 37
+; THRESHOLD-NEXT: [[ARRAYIDX_38:%.*]] = getelementptr inbounds float, float* [[X]], i64 38
+; THRESHOLD-NEXT: [[ARRAYIDX_39:%.*]] = getelementptr inbounds float, float* [[X]], i64 39
+; THRESHOLD-NEXT: [[ARRAYIDX_40:%.*]] = getelementptr inbounds float, float* [[X]], i64 40
+; THRESHOLD-NEXT: [[ARRAYIDX_41:%.*]] = getelementptr inbounds float, float* [[X]], i64 41
+; THRESHOLD-NEXT: [[ARRAYIDX_42:%.*]] = getelementptr inbounds float, float* [[X]], i64 42
+; THRESHOLD-NEXT: [[ARRAYIDX_43:%.*]] = getelementptr inbounds float, float* [[X]], i64 43
+; THRESHOLD-NEXT: [[ARRAYIDX_44:%.*]] = getelementptr inbounds float, float* [[X]], i64 44
+; THRESHOLD-NEXT: [[ARRAYIDX_45:%.*]] = getelementptr inbounds float, float* [[X]], i64 45
+; THRESHOLD-NEXT: [[ARRAYIDX_46:%.*]] = getelementptr inbounds float, float* [[X]], i64 46
+; THRESHOLD-NEXT: [[ARRAYIDX_47:%.*]] = getelementptr inbounds float, float* [[X]], i64 47
+; THRESHOLD-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_16]] to <32 x float>*
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load <32 x float>, <32 x float>* [[TMP2]], align 4
+; THRESHOLD-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; THRESHOLD-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; THRESHOLD-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; THRESHOLD-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; THRESHOLD-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; THRESHOLD-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; THRESHOLD-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; THRESHOLD-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; THRESHOLD-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; THRESHOLD-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; THRESHOLD-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; THRESHOLD-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; THRESHOLD-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; THRESHOLD-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; THRESHOLD-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; THRESHOLD-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; THRESHOLD-NEXT: [[ADD_32:%.*]] = fadd fast float undef, [[ADD_31]]
+; THRESHOLD-NEXT: [[ADD_33:%.*]] = fadd fast float undef, [[ADD_32]]
+; THRESHOLD-NEXT: [[ADD_34:%.*]] = fadd fast float undef, [[ADD_33]]
+; THRESHOLD-NEXT: [[ADD_35:%.*]] = fadd fast float undef, [[ADD_34]]
+; THRESHOLD-NEXT: [[ADD_36:%.*]] = fadd fast float undef, [[ADD_35]]
+; THRESHOLD-NEXT: [[ADD_37:%.*]] = fadd fast float undef, [[ADD_36]]
+; THRESHOLD-NEXT: [[ADD_38:%.*]] = fadd fast float undef, [[ADD_37]]
+; THRESHOLD-NEXT: [[ADD_39:%.*]] = fadd fast float undef, [[ADD_38]]
+; THRESHOLD-NEXT: [[ADD_40:%.*]] = fadd fast float undef, [[ADD_39]]
+; THRESHOLD-NEXT: [[ADD_41:%.*]] = fadd fast float undef, [[ADD_40]]
+; THRESHOLD-NEXT: [[ADD_42:%.*]] = fadd fast float undef, [[ADD_41]]
+; THRESHOLD-NEXT: [[ADD_43:%.*]] = fadd fast float undef, [[ADD_42]]
+; THRESHOLD-NEXT: [[ADD_44:%.*]] = fadd fast float undef, [[ADD_43]]
+; THRESHOLD-NEXT: [[ADD_45:%.*]] = fadd fast float undef, [[ADD_44]]
+; THRESHOLD-NEXT: [[ADD_46:%.*]] = fadd fast float undef, [[ADD_45]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP3]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; THRESHOLD-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; THRESHOLD-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <16 x float> [[TMP1]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX10:%.*]] = fadd fast <16 x float> [[TMP1]], [[RDX_SHUF9]]
+; THRESHOLD-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <16 x float> [[BIN_RDX10]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX12:%.*]] = fadd fast <16 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; THRESHOLD-NEXT: [[RDX_SHUF13:%.*]] = shufflevector <16 x float> [[BIN_RDX12]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX14:%.*]] = fadd fast <16 x float> [[BIN_RDX12]], [[RDX_SHUF13]]
+; THRESHOLD-NEXT: [[RDX_SHUF15:%.*]] = shufflevector <16 x float> [[BIN_RDX14]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX16:%.*]] = fadd fast <16 x float> [[BIN_RDX14]], [[RDX_SHUF15]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = extractelement <16 x float> [[BIN_RDX16]], i32 0
+; THRESHOLD-NEXT: [[BIN_RDX17:%.*]] = fadd fast float [[TMP4]], [[TMP5]]
+; THRESHOLD-NEXT: [[ADD_47:%.*]] = fadd fast float undef, [[ADD_46]]
+; THRESHOLD-NEXT: ret float [[BIN_RDX17]]
+;
+ entry:
+ %0 = load float, float* %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx.1, align 4
+ %add.1 = fadd fast float %1, %0
+ %arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx.2, align 4
+ %add.2 = fadd fast float %2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx.3, align 4
+ %add.3 = fadd fast float %3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx.4, align 4
+ %add.4 = fadd fast float %4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx.5, align 4
+ %add.5 = fadd fast float %5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx.6, align 4
+ %add.6 = fadd fast float %6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx.7, align 4
+ %add.7 = fadd fast float %7, %add.6
+ %arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
+ %8 = load float, float* %arrayidx.8, align 4
+ %add.8 = fadd fast float %8, %add.7
+ %arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
+ %9 = load float, float* %arrayidx.9, align 4
+ %add.9 = fadd fast float %9, %add.8
+ %arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
+ %10 = load float, float* %arrayidx.10, align 4
+ %add.10 = fadd fast float %10, %add.9
+ %arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
+ %11 = load float, float* %arrayidx.11, align 4
+ %add.11 = fadd fast float %11, %add.10
+ %arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
+ %12 = load float, float* %arrayidx.12, align 4
+ %add.12 = fadd fast float %12, %add.11
+ %arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
+ %13 = load float, float* %arrayidx.13, align 4
+ %add.13 = fadd fast float %13, %add.12
+ %arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
+ %14 = load float, float* %arrayidx.14, align 4
+ %add.14 = fadd fast float %14, %add.13
+ %arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
+ %15 = load float, float* %arrayidx.15, align 4
+ %add.15 = fadd fast float %15, %add.14
+ %arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
+ %16 = load float, float* %arrayidx.16, align 4
+ %add.16 = fadd fast float %16, %add.15
+ %arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
+ %17 = load float, float* %arrayidx.17, align 4
+ %add.17 = fadd fast float %17, %add.16
+ %arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
+ %18 = load float, float* %arrayidx.18, align 4
+ %add.18 = fadd fast float %18, %add.17
+ %arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
+ %19 = load float, float* %arrayidx.19, align 4
+ %add.19 = fadd fast float %19, %add.18
+ %arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
+ %20 = load float, float* %arrayidx.20, align 4
+ %add.20 = fadd fast float %20, %add.19
+ %arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
+ %21 = load float, float* %arrayidx.21, align 4
+ %add.21 = fadd fast float %21, %add.20
+ %arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
+ %22 = load float, float* %arrayidx.22, align 4
+ %add.22 = fadd fast float %22, %add.21
+ %arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
+ %23 = load float, float* %arrayidx.23, align 4
+ %add.23 = fadd fast float %23, %add.22
+ %arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
+ %24 = load float, float* %arrayidx.24, align 4
+ %add.24 = fadd fast float %24, %add.23
+ %arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
+ %25 = load float, float* %arrayidx.25, align 4
+ %add.25 = fadd fast float %25, %add.24
+ %arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
+ %26 = load float, float* %arrayidx.26, align 4
+ %add.26 = fadd fast float %26, %add.25
+ %arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
+ %27 = load float, float* %arrayidx.27, align 4
+ %add.27 = fadd fast float %27, %add.26
+ %arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
+ %28 = load float, float* %arrayidx.28, align 4
+ %add.28 = fadd fast float %28, %add.27
+ %arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
+ %29 = load float, float* %arrayidx.29, align 4
+ %add.29 = fadd fast float %29, %add.28
+ %arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
+ %30 = load float, float* %arrayidx.30, align 4
+ %add.30 = fadd fast float %30, %add.29
+ %arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
+ %31 = load float, float* %arrayidx.31, align 4
+ %add.31 = fadd fast float %31, %add.30
+ %arrayidx.32 = getelementptr inbounds float, float* %x, i64 32
+ %32 = load float, float* %arrayidx.32, align 4
+ %add.32 = fadd fast float %32, %add.31
+ %arrayidx.33 = getelementptr inbounds float, float* %x, i64 33
+ %33 = load float, float* %arrayidx.33, align 4
+ %add.33 = fadd fast float %33, %add.32
+ %arrayidx.34 = getelementptr inbounds float, float* %x, i64 34
+ %34 = load float, float* %arrayidx.34, align 4
+ %add.34 = fadd fast float %34, %add.33
+ %arrayidx.35 = getelementptr inbounds float, float* %x, i64 35
+ %35 = load float, float* %arrayidx.35, align 4
+ %add.35 = fadd fast float %35, %add.34
+ %arrayidx.36 = getelementptr inbounds float, float* %x, i64 36
+ %36 = load float, float* %arrayidx.36, align 4
+ %add.36 = fadd fast float %36, %add.35
+ %arrayidx.37 = getelementptr inbounds float, float* %x, i64 37
+ %37 = load float, float* %arrayidx.37, align 4
+ %add.37 = fadd fast float %37, %add.36
+ %arrayidx.38 = getelementptr inbounds float, float* %x, i64 38
+ %38 = load float, float* %arrayidx.38, align 4
+ %add.38 = fadd fast float %38, %add.37
+ %arrayidx.39 = getelementptr inbounds float, float* %x, i64 39
+ %39 = load float, float* %arrayidx.39, align 4
+ %add.39 = fadd fast float %39, %add.38
+ %arrayidx.40 = getelementptr inbounds float, float* %x, i64 40
+ %40 = load float, float* %arrayidx.40, align 4
+ %add.40 = fadd fast float %40, %add.39
+ %arrayidx.41 = getelementptr inbounds float, float* %x, i64 41
+ %41 = load float, float* %arrayidx.41, align 4
+ %add.41 = fadd fast float %41, %add.40
+ %arrayidx.42 = getelementptr inbounds float, float* %x, i64 42
+ %42 = load float, float* %arrayidx.42, align 4
+ %add.42 = fadd fast float %42, %add.41
+ %arrayidx.43 = getelementptr inbounds float, float* %x, i64 43
+ %43 = load float, float* %arrayidx.43, align 4
+ %add.43 = fadd fast float %43, %add.42
+ %arrayidx.44 = getelementptr inbounds float, float* %x, i64 44
+ %44 = load float, float* %arrayidx.44, align 4
+ %add.44 = fadd fast float %44, %add.43
+ %arrayidx.45 = getelementptr inbounds float, float* %x, i64 45
+ %45 = load float, float* %arrayidx.45, align 4
+ %add.45 = fadd fast float %45, %add.44
+ %arrayidx.46 = getelementptr inbounds float, float* %x, i64 46
+ %46 = load float, float* %arrayidx.46, align 4
+ %add.46 = fadd fast float %46, %add.45
+ %arrayidx.47 = getelementptr inbounds float, float* %x, i64 47
+ %47 = load float, float* %arrayidx.47, align 4
+ %add.47 = fadd fast float %47, %add.46
+ ret float %add.47
+}
+
+define float @f1(float* nocapture readonly %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @f1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[REM:%.*]] = srem i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[REM]] to float
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <32 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x float>, <32 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP1]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[CONV]]
+; CHECK-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; CHECK-NEXT: ret float [[BIN_EXTRA]]
+;
+; THRESHOLD-LABEL: @f1(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[REM:%.*]] = srem i32 [[A:%.*]], [[B:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[REM]] to float
+; THRESHOLD-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; THRESHOLD-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; THRESHOLD-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; THRESHOLD-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; THRESHOLD-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; THRESHOLD-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; THRESHOLD-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; THRESHOLD-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; THRESHOLD-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; THRESHOLD-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; THRESHOLD-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; THRESHOLD-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; THRESHOLD-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; THRESHOLD-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; THRESHOLD-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; THRESHOLD-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; THRESHOLD-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; THRESHOLD-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; THRESHOLD-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; THRESHOLD-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; THRESHOLD-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; THRESHOLD-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; THRESHOLD-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; THRESHOLD-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <32 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <32 x float>, <32 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; THRESHOLD-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; THRESHOLD-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; THRESHOLD-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; THRESHOLD-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; THRESHOLD-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; THRESHOLD-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; THRESHOLD-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; THRESHOLD-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; THRESHOLD-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; THRESHOLD-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; THRESHOLD-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; THRESHOLD-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; THRESHOLD-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; THRESHOLD-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; THRESHOLD-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; THRESHOLD-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; THRESHOLD-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; THRESHOLD-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; THRESHOLD-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; THRESHOLD-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; THRESHOLD-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; THRESHOLD-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; THRESHOLD-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; THRESHOLD-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; THRESHOLD-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; THRESHOLD-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP1]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; THRESHOLD-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA]]
+;
+ entry:
+ %rem = srem i32 %a, %b
+ %conv = sitofp i32 %rem to float
+ %0 = load float, float* %x, align 4
+ %add = fadd fast float %0, %conv
+ %arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx.1, align 4
+ %add.1 = fadd fast float %1, %add
+ %arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx.2, align 4
+ %add.2 = fadd fast float %2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx.3, align 4
+ %add.3 = fadd fast float %3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx.4, align 4
+ %add.4 = fadd fast float %4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx.5, align 4
+ %add.5 = fadd fast float %5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx.6, align 4
+ %add.6 = fadd fast float %6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx.7, align 4
+ %add.7 = fadd fast float %7, %add.6
+ %arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
+ %8 = load float, float* %arrayidx.8, align 4
+ %add.8 = fadd fast float %8, %add.7
+ %arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
+ %9 = load float, float* %arrayidx.9, align 4
+ %add.9 = fadd fast float %9, %add.8
+ %arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
+ %10 = load float, float* %arrayidx.10, align 4
+ %add.10 = fadd fast float %10, %add.9
+ %arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
+ %11 = load float, float* %arrayidx.11, align 4
+ %add.11 = fadd fast float %11, %add.10
+ %arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
+ %12 = load float, float* %arrayidx.12, align 4
+ %add.12 = fadd fast float %12, %add.11
+ %arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
+ %13 = load float, float* %arrayidx.13, align 4
+ %add.13 = fadd fast float %13, %add.12
+ %arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
+ %14 = load float, float* %arrayidx.14, align 4
+ %add.14 = fadd fast float %14, %add.13
+ %arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
+ %15 = load float, float* %arrayidx.15, align 4
+ %add.15 = fadd fast float %15, %add.14
+ %arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
+ %16 = load float, float* %arrayidx.16, align 4
+ %add.16 = fadd fast float %16, %add.15
+ %arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
+ %17 = load float, float* %arrayidx.17, align 4
+ %add.17 = fadd fast float %17, %add.16
+ %arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
+ %18 = load float, float* %arrayidx.18, align 4
+ %add.18 = fadd fast float %18, %add.17
+ %arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
+ %19 = load float, float* %arrayidx.19, align 4
+ %add.19 = fadd fast float %19, %add.18
+ %arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
+ %20 = load float, float* %arrayidx.20, align 4
+ %add.20 = fadd fast float %20, %add.19
+ %arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
+ %21 = load float, float* %arrayidx.21, align 4
+ %add.21 = fadd fast float %21, %add.20
+ %arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
+ %22 = load float, float* %arrayidx.22, align 4
+ %add.22 = fadd fast float %22, %add.21
+ %arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
+ %23 = load float, float* %arrayidx.23, align 4
+ %add.23 = fadd fast float %23, %add.22
+ %arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
+ %24 = load float, float* %arrayidx.24, align 4
+ %add.24 = fadd fast float %24, %add.23
+ %arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
+ %25 = load float, float* %arrayidx.25, align 4
+ %add.25 = fadd fast float %25, %add.24
+ %arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
+ %26 = load float, float* %arrayidx.26, align 4
+ %add.26 = fadd fast float %26, %add.25
+ %arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
+ %27 = load float, float* %arrayidx.27, align 4
+ %add.27 = fadd fast float %27, %add.26
+ %arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
+ %28 = load float, float* %arrayidx.28, align 4
+ %add.28 = fadd fast float %28, %add.27
+ %arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
+ %29 = load float, float* %arrayidx.29, align 4
+ %add.29 = fadd fast float %29, %add.28
+ %arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
+ %30 = load float, float* %arrayidx.30, align 4
+ %add.30 = fadd fast float %30, %add.29
+ %arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
+ %31 = load float, float* %arrayidx.31, align 4
+ %add.31 = fadd fast float %31, %add.30
+ ret float %add.31
+}
+
+define float @loadadd31(float* nocapture readonly %x) {
+; CHECK-LABEL: @loadadd31(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_2]] to <4 x float>*
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast float* [[ARRAYIDX_6]] to <8 x float>*
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x float>, <8 x float>* [[TMP4]], align 4
+; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[ARRAYIDX_14]] to <16 x float>*
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x float>, <16 x float>* [[TMP6]], align 4
+; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <16 x float> [[TMP7]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x float> [[TMP7]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <16 x float> [[BIN_RDX]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <16 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <16 x float> [[BIN_RDX2]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <16 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <16 x float> [[BIN_RDX4]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <16 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <16 x float> [[BIN_RDX6]], i32 0
+; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <8 x float> [[TMP5]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <8 x float> [[TMP5]], [[RDX_SHUF7]]
+; CHECK-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <8 x float> [[BIN_RDX8]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX10:%.*]] = fadd fast <8 x float> [[BIN_RDX8]], [[RDX_SHUF9]]
+; CHECK-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <8 x float> [[BIN_RDX10]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX12:%.*]] = fadd fast <8 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <8 x float> [[BIN_RDX12]], i32 0
+; CHECK-NEXT: [[BIN_RDX13:%.*]] = fadd fast float [[TMP8]], [[TMP9]]
+; CHECK-NEXT: [[RDX_SHUF14:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX15:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF14]]
+; CHECK-NEXT: [[RDX_SHUF16:%.*]] = shufflevector <4 x float> [[BIN_RDX15]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX17:%.*]] = fadd fast <4 x float> [[BIN_RDX15]], [[RDX_SHUF16]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[BIN_RDX17]], i32 0
+; CHECK-NEXT: [[BIN_RDX18:%.*]] = fadd fast float [[BIN_RDX13]], [[TMP10]]
+; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[BIN_RDX18]], [[TMP1]]
+; CHECK-NEXT: [[TMP12:%.*]] = fadd fast float [[TMP11]], [[TMP0]]
+; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT: ret float [[TMP12]]
+;
+; THRESHOLD-LABEL: @loadadd31(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[TMP0]]
+; THRESHOLD-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_2]] to <4 x float>*
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; THRESHOLD-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; THRESHOLD-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; THRESHOLD-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; THRESHOLD-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; THRESHOLD-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; THRESHOLD-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; THRESHOLD-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; THRESHOLD-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; THRESHOLD-NEXT: [[TMP4:%.*]] = bitcast float* [[ARRAYIDX_6]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP5:%.*]] = load <8 x float>, <8 x float>* [[TMP4]], align 4
+; THRESHOLD-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; THRESHOLD-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; THRESHOLD-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; THRESHOLD-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; THRESHOLD-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; THRESHOLD-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; THRESHOLD-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; THRESHOLD-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; THRESHOLD-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; THRESHOLD-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; THRESHOLD-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; THRESHOLD-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; THRESHOLD-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; THRESHOLD-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; THRESHOLD-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; THRESHOLD-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; THRESHOLD-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; THRESHOLD-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; THRESHOLD-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; THRESHOLD-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; THRESHOLD-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; THRESHOLD-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; THRESHOLD-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; THRESHOLD-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; THRESHOLD-NEXT: [[TMP6:%.*]] = bitcast float* [[ARRAYIDX_14]] to <16 x float>*
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load <16 x float>, <16 x float>* [[TMP6]], align 4
+; THRESHOLD-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; THRESHOLD-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; THRESHOLD-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; THRESHOLD-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; THRESHOLD-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; THRESHOLD-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; THRESHOLD-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; THRESHOLD-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; THRESHOLD-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; THRESHOLD-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; THRESHOLD-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; THRESHOLD-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; THRESHOLD-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; THRESHOLD-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; THRESHOLD-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <16 x float> [[TMP7]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x float> [[TMP7]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <16 x float> [[BIN_RDX]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <16 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <16 x float> [[BIN_RDX2]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <16 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <16 x float> [[BIN_RDX4]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX6:%.*]] = fadd fast <16 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <16 x float> [[BIN_RDX6]], i32 0
+; THRESHOLD-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <8 x float> [[TMP5]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX8:%.*]] = fadd fast <8 x float> [[TMP5]], [[RDX_SHUF7]]
+; THRESHOLD-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <8 x float> [[BIN_RDX8]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX10:%.*]] = fadd fast <8 x float> [[BIN_RDX8]], [[RDX_SHUF9]]
+; THRESHOLD-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <8 x float> [[BIN_RDX10]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX12:%.*]] = fadd fast <8 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <8 x float> [[BIN_RDX12]], i32 0
+; THRESHOLD-NEXT: [[BIN_RDX13:%.*]] = fadd fast float [[TMP8]], [[TMP9]]
+; THRESHOLD-NEXT: [[RDX_SHUF14:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX15:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF14]]
+; THRESHOLD-NEXT: [[RDX_SHUF16:%.*]] = shufflevector <4 x float> [[BIN_RDX15]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX17:%.*]] = fadd fast <4 x float> [[BIN_RDX15]], [[RDX_SHUF16]]
+; THRESHOLD-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[BIN_RDX17]], i32 0
+; THRESHOLD-NEXT: [[BIN_RDX18:%.*]] = fadd fast float [[BIN_RDX13]], [[TMP10]]
+; THRESHOLD-NEXT: [[TMP11:%.*]] = fadd fast float [[BIN_RDX18]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP12:%.*]] = fadd fast float [[TMP11]], [[TMP0]]
+; THRESHOLD-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; THRESHOLD-NEXT: ret float [[TMP12]]
+;
+ entry:
+ %arrayidx = getelementptr inbounds float, float* %x, i64 1
+ %0 = load float, float* %arrayidx, align 4
+ %arrayidx.1 = getelementptr inbounds float, float* %x, i64 2
+ %1 = load float, float* %arrayidx.1, align 4
+ %add.1 = fadd fast float %1, %0
+ %arrayidx.2 = getelementptr inbounds float, float* %x, i64 3
+ %2 = load float, float* %arrayidx.2, align 4
+ %add.2 = fadd fast float %2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float* %x, i64 4
+ %3 = load float, float* %arrayidx.3, align 4
+ %add.3 = fadd fast float %3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float* %x, i64 5
+ %4 = load float, float* %arrayidx.4, align 4
+ %add.4 = fadd fast float %4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float* %x, i64 6
+ %5 = load float, float* %arrayidx.5, align 4
+ %add.5 = fadd fast float %5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float* %x, i64 7
+ %6 = load float, float* %arrayidx.6, align 4
+ %add.6 = fadd fast float %6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float* %x, i64 8
+ %7 = load float, float* %arrayidx.7, align 4
+ %add.7 = fadd fast float %7, %add.6
+ %arrayidx.8 = getelementptr inbounds float, float* %x, i64 9
+ %8 = load float, float* %arrayidx.8, align 4
+ %add.8 = fadd fast float %8, %add.7
+ %arrayidx.9 = getelementptr inbounds float, float* %x, i64 10
+ %9 = load float, float* %arrayidx.9, align 4
+ %add.9 = fadd fast float %9, %add.8
+ %arrayidx.10 = getelementptr inbounds float, float* %x, i64 11
+ %10 = load float, float* %arrayidx.10, align 4
+ %add.10 = fadd fast float %10, %add.9
+ %arrayidx.11 = getelementptr inbounds float, float* %x, i64 12
+ %11 = load float, float* %arrayidx.11, align 4
+ %add.11 = fadd fast float %11, %add.10
+ %arrayidx.12 = getelementptr inbounds float, float* %x, i64 13
+ %12 = load float, float* %arrayidx.12, align 4
+ %add.12 = fadd fast float %12, %add.11
+ %arrayidx.13 = getelementptr inbounds float, float* %x, i64 14
+ %13 = load float, float* %arrayidx.13, align 4
+ %add.13 = fadd fast float %13, %add.12
+ %arrayidx.14 = getelementptr inbounds float, float* %x, i64 15
+ %14 = load float, float* %arrayidx.14, align 4
+ %add.14 = fadd fast float %14, %add.13
+ %arrayidx.15 = getelementptr inbounds float, float* %x, i64 16
+ %15 = load float, float* %arrayidx.15, align 4
+ %add.15 = fadd fast float %15, %add.14
+ %arrayidx.16 = getelementptr inbounds float, float* %x, i64 17
+ %16 = load float, float* %arrayidx.16, align 4
+ %add.16 = fadd fast float %16, %add.15
+ %arrayidx.17 = getelementptr inbounds float, float* %x, i64 18
+ %17 = load float, float* %arrayidx.17, align 4
+ %add.17 = fadd fast float %17, %add.16
+ %arrayidx.18 = getelementptr inbounds float, float* %x, i64 19
+ %18 = load float, float* %arrayidx.18, align 4
+ %add.18 = fadd fast float %18, %add.17
+ %arrayidx.19 = getelementptr inbounds float, float* %x, i64 20
+ %19 = load float, float* %arrayidx.19, align 4
+ %add.19 = fadd fast float %19, %add.18
+ %arrayidx.20 = getelementptr inbounds float, float* %x, i64 21
+ %20 = load float, float* %arrayidx.20, align 4
+ %add.20 = fadd fast float %20, %add.19
+ %arrayidx.21 = getelementptr inbounds float, float* %x, i64 22
+ %21 = load float, float* %arrayidx.21, align 4
+ %add.21 = fadd fast float %21, %add.20
+ %arrayidx.22 = getelementptr inbounds float, float* %x, i64 23
+ %22 = load float, float* %arrayidx.22, align 4
+ %add.22 = fadd fast float %22, %add.21
+ %arrayidx.23 = getelementptr inbounds float, float* %x, i64 24
+ %23 = load float, float* %arrayidx.23, align 4
+ %add.23 = fadd fast float %23, %add.22
+ %arrayidx.24 = getelementptr inbounds float, float* %x, i64 25
+ %24 = load float, float* %arrayidx.24, align 4
+ %add.24 = fadd fast float %24, %add.23
+ %arrayidx.25 = getelementptr inbounds float, float* %x, i64 26
+ %25 = load float, float* %arrayidx.25, align 4
+ %add.25 = fadd fast float %25, %add.24
+ %arrayidx.26 = getelementptr inbounds float, float* %x, i64 27
+ %26 = load float, float* %arrayidx.26, align 4
+ %add.26 = fadd fast float %26, %add.25
+ %arrayidx.27 = getelementptr inbounds float, float* %x, i64 28
+ %27 = load float, float* %arrayidx.27, align 4
+ %add.27 = fadd fast float %27, %add.26
+ %arrayidx.28 = getelementptr inbounds float, float* %x, i64 29
+ %28 = load float, float* %arrayidx.28, align 4
+ %add.28 = fadd fast float %28, %add.27
+ %arrayidx.29 = getelementptr inbounds float, float* %x, i64 30
+ %29 = load float, float* %arrayidx.29, align 4
+ %add.29 = fadd fast float %29, %add.28
+ ret float %add.29
+}
+
+define float @extra_args(float* nocapture readonly %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @extra_args(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4]], [[CONV]]
+; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; CHECK-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; CHECK-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; CHECK-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; CHECK-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; CHECK-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; CHECK-NEXT: ret float [[BIN_EXTRA5]]
+;
+; THRESHOLD-LABEL: @extra_args(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; THRESHOLD-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; THRESHOLD-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; THRESHOLD-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; THRESHOLD-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; THRESHOLD-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA5]]
+;
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %conv = sitofp i32 %mul to float
+ %0 = load float, float* %x, align 4
+ %add = fadd fast float %conv, 3.000000e+00
+ %add1 = fadd fast float %0, %add
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %1, %add1
+ %add5 = fadd fast float %add4, %conv
+ %arrayidx3.1 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx3.1, align 4
+ %add4.1 = fadd fast float %2, %add5
+ %arrayidx3.2 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx3.2, align 4
+ %add4.2 = fadd fast float %3, %add4.1
+ %arrayidx3.3 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx3.3, align 4
+ %add4.3 = fadd fast float %4, %add4.2
+ %arrayidx3.4 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx3.4, align 4
+ %add4.4 = fadd fast float %5, %add4.3
+ %arrayidx3.5 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx3.5, align 4
+ %add4.5 = fadd fast float %6, %add4.4
+ %arrayidx3.6 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx3.6, align 4
+ %add4.6 = fadd fast float %7, %add4.5
+ ret float %add4.6
+}
+
+define float @extra_args_same_several_times(float* nocapture readonly %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @extra_args_same_several_times(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; CHECK-NEXT: [[ADD41:%.*]] = fadd fast float [[ADD4]], 5.000000e+00
+; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD41]], [[CONV]]
+; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; CHECK-NEXT: [[ADD4_11:%.*]] = fadd fast float [[ADD4_1]], 5.000000e+00
+; CHECK-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_11]]
+; CHECK-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; CHECK-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; CHECK-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], 5.000000e+00
+; CHECK-NEXT: [[BIN_EXTRA6:%.*]] = fadd fast float [[BIN_EXTRA5]], 5.000000e+00
+; CHECK-NEXT: [[BIN_EXTRA7:%.*]] = fadd fast float [[BIN_EXTRA6]], [[CONV]]
+; CHECK-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; CHECK-NEXT: ret float [[BIN_EXTRA7]]
+;
+; THRESHOLD-LABEL: @extra_args_same_several_times(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; THRESHOLD-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; THRESHOLD-NEXT: [[ADD41:%.*]] = fadd fast float [[ADD4]], 5.000000e+00
+; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD41]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; THRESHOLD-NEXT: [[ADD4_11:%.*]] = fadd fast float [[ADD4_1]], 5.000000e+00
+; THRESHOLD-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_11]]
+; THRESHOLD-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; THRESHOLD-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; THRESHOLD-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], 5.000000e+00
+; THRESHOLD-NEXT: [[BIN_EXTRA6:%.*]] = fadd fast float [[BIN_EXTRA5]], 5.000000e+00
+; THRESHOLD-NEXT: [[BIN_EXTRA7:%.*]] = fadd fast float [[BIN_EXTRA6]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA7]]
+;
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %conv = sitofp i32 %mul to float
+ %0 = load float, float* %x, align 4
+ %add = fadd fast float %conv, 3.000000e+00
+ %add1 = fadd fast float %0, %add
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %1, %add1
+ %add41 = fadd fast float %add4, 5.000000e+00
+ %add5 = fadd fast float %add41, %conv
+ %arrayidx3.1 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx3.1, align 4
+ %add4.1 = fadd fast float %2, %add5
+ %add4.11 = fadd fast float %add4.1, 5.000000e+00
+ %arrayidx3.2 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx3.2, align 4
+ %add4.2 = fadd fast float %3, %add4.11
+ %arrayidx3.3 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx3.3, align 4
+ %add4.3 = fadd fast float %4, %add4.2
+ %arrayidx3.4 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx3.4, align 4
+ %add4.4 = fadd fast float %5, %add4.3
+ %arrayidx3.5 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx3.5, align 4
+ %add4.5 = fadd fast float %6, %add4.4
+ %arrayidx3.6 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx3.6, align 4
+ %add4.6 = fadd fast float %7, %add4.5
+ ret float %add4.6
+}
+
+define float @extra_args_no_replace(float* nocapture readonly %x, i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @extra_args_no_replace(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[CONVC:%.*]] = sitofp i32 [[C:%.*]] to float
+; CHECK-NEXT: [[ADDC:%.*]] = fadd fast float [[CONVC]], 3.000000e+00
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], [[ADDC]]
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD4]]
+; CHECK-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; CHECK-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4_3]], [[CONV]]
+; CHECK-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD5]]
+; CHECK-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; CHECK-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; CHECK-NEXT: ret float [[BIN_EXTRA5]]
+;
+; THRESHOLD-LABEL: @extra_args_no_replace(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[CONVC:%.*]] = sitofp i32 [[C:%.*]] to float
+; THRESHOLD-NEXT: [[ADDC:%.*]] = fadd fast float [[CONVC]], 3.000000e+00
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], [[ADDC]]
+; THRESHOLD-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; THRESHOLD-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD4]]
+; THRESHOLD-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; THRESHOLD-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4_3]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD5]]
+; THRESHOLD-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA5]]
+;
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %conv = sitofp i32 %mul to float
+ %0 = load float, float* %x, align 4
+ %convc = sitofp i32 %c to float
+ %addc = fadd fast float %convc, 3.000000e+00
+ %add = fadd fast float %conv, %addc
+ %add1 = fadd fast float %0, %add
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %1, %add1
+ %arrayidx3.1 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx3.1, align 4
+ %add4.1 = fadd fast float %2, %add4
+ %arrayidx3.2 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx3.2, align 4
+ %add4.2 = fadd fast float %3, %add4.1
+ %arrayidx3.3 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx3.3, align 4
+ %add4.3 = fadd fast float %4, %add4.2
+ %add5 = fadd fast float %add4.3, %conv
+ %arrayidx3.4 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx3.4, align 4
+ %add4.4 = fadd fast float %5, %add5
+ %arrayidx3.5 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx3.5, align 4
+ %add4.5 = fadd fast float %6, %add4.4
+ %arrayidx3.6 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx3.6, align 4
+ %add4.6 = fadd fast float %7, %add4.5
+ ret float %add4.6
+}
+
+define i32 @wobble(i32 %arg, i32 %bar) {
+; CHECK-LABEL: @wobble(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[ARG:%.*]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[ARG]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[ARG]], i32 2
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[ARG]], i32 3
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[BAR:%.*]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[BAR]], i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[BAR]], i32 2
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[BAR]], i32 3
+; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i32> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP8]], i32 3
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq <4 x i32> [[TMP8]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = sext <4 x i1> [[TMP10]] to <4 x i32>
+; CHECK-NEXT: [[R1:%.*]] = add nuw i32 [[ARG]], undef
+; CHECK-NEXT: [[R2:%.*]] = add nsw i32 [[R1]], undef
+; CHECK-NEXT: [[R3:%.*]] = add nsw i32 [[R2]], undef
+; CHECK-NEXT: [[R4:%.*]] = add nsw i32 [[R3]], undef
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = add nuw i32 [[TMP12]], [[ARG]]
+; CHECK-NEXT: [[BIN_EXTRA3:%.*]] = add nsw i32 [[BIN_EXTRA]], [[TMP9]]
+; CHECK-NEXT: [[R5:%.*]] = add nsw i32 [[R4]], undef
+; CHECK-NEXT: ret i32 [[BIN_EXTRA3]]
+;
+; THRESHOLD-LABEL: @wobble(
+; THRESHOLD-NEXT: bb:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[ARG:%.*]], i32 0
+; THRESHOLD-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[ARG]], i32 1
+; THRESHOLD-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[ARG]], i32 2
+; THRESHOLD-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[ARG]], i32 3
+; THRESHOLD-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[BAR:%.*]], i32 0
+; THRESHOLD-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[BAR]], i32 1
+; THRESHOLD-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[BAR]], i32 2
+; THRESHOLD-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[BAR]], i32 3
+; THRESHOLD-NEXT: [[TMP8:%.*]] = xor <4 x i32> [[TMP3]], [[TMP7]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP8]], i32 3
+; THRESHOLD-NEXT: [[TMP10:%.*]] = icmp eq <4 x i32> [[TMP8]], zeroinitializer
+; THRESHOLD-NEXT: [[TMP11:%.*]] = sext <4 x i1> [[TMP10]] to <4 x i32>
+; THRESHOLD-NEXT: [[R1:%.*]] = add nuw i32 [[ARG]], undef
+; THRESHOLD-NEXT: [[R2:%.*]] = add nsw i32 [[R1]], undef
+; THRESHOLD-NEXT: [[R3:%.*]] = add nsw i32 [[R2]], undef
+; THRESHOLD-NEXT: [[R4:%.*]] = add nsw i32 [[R3]], undef
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = add nuw i32 [[TMP12]], [[ARG]]
+; THRESHOLD-NEXT: [[BIN_EXTRA3:%.*]] = add nsw i32 [[BIN_EXTRA]], [[TMP9]]
+; THRESHOLD-NEXT: [[R5:%.*]] = add nsw i32 [[R4]], undef
+; THRESHOLD-NEXT: ret i32 [[BIN_EXTRA3]]
+;
+ bb:
+ %x1 = xor i32 %arg, %bar
+ %i1 = icmp eq i32 %x1, 0
+ %s1 = sext i1 %i1 to i32
+ %x2 = xor i32 %arg, %bar
+ %i2 = icmp eq i32 %x2, 0
+ %s2 = sext i1 %i2 to i32
+ %x3 = xor i32 %arg, %bar
+ %i3 = icmp eq i32 %x3, 0
+ %s3 = sext i1 %i3 to i32
+ %x4 = xor i32 %arg, %bar
+ %i4 = icmp eq i32 %x4, 0
+ %s4 = sext i1 %i4 to i32
+ %r1 = add nuw i32 %arg, %s1
+ %r2 = add nsw i32 %r1, %s2
+ %r3 = add nsw i32 %r2, %s3
+ %r4 = add nsw i32 %r3, %s4
+ %r5 = add nsw i32 %r4, %x4
+ ret i32 %r5
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/horizontal.ll b/test/Transforms/SLPVectorizer/X86/horizontal.ll
index f6efd26a4c20..080f850f91cf 100644
--- a/test/Transforms/SLPVectorizer/X86/horizontal.ll
+++ b/test/Transforms/SLPVectorizer/X86/horizontal.ll
@@ -624,9 +624,9 @@ define void @i32_red_example4(i32* %res) {
; STORE-LABEL: @i32_red_example4(
; STORE: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([32 x i32]* @arr_i32 to <4 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <4 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -647,11 +647,11 @@ define void @i32_red_example8(i32* %res) {
; STORE-LABEL: @i32_red_example8(
; STORE: [[TMP0:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr_i32 to <8 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP0]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <8 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; STORE-NEXT: [[BIN_RDX4:%.*]] = add nsw <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -680,13 +680,13 @@ define void @i32_red_example16(i32* %res) {
; STORE-LABEL: @i32_red_example16(
; STORE: [[TMP0:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([32 x i32]* @arr_i32 to <16 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <16 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <16 x i32> [[BIN_RDX]], <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <16 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <16 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <16 x i32> [[BIN_RDX2]], <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX4:%.*]] = add <16 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; STORE-NEXT: [[BIN_RDX4:%.*]] = add nsw <16 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; STORE-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <16 x i32> [[BIN_RDX4]], <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX6:%.*]] = add <16 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
+; STORE-NEXT: [[BIN_RDX6:%.*]] = add nsw <16 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <16 x i32> [[BIN_RDX6]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -731,15 +731,15 @@ define void @i32_red_example32(i32* %res) {
; STORE-LABEL: @i32_red_example32(
; STORE: [[TMP0:%.*]] = load <32 x i32>, <32 x i32>* bitcast ([32 x i32]* @arr_i32 to <32 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <32 x i32> [[TMP0]], <32 x i32> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <32 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <32 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x i32> [[BIN_RDX]], <32 x i32> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <32 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <32 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x i32> [[BIN_RDX2]], <32 x i32> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX4:%.*]] = add <32 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; STORE-NEXT: [[BIN_RDX4:%.*]] = add nsw <32 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; STORE-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x i32> [[BIN_RDX4]], <32 x i32> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX6:%.*]] = add <32 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
+; STORE-NEXT: [[BIN_RDX6:%.*]] = add nsw <32 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
; STORE-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x i32> [[BIN_RDX6]], <32 x i32> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX8:%.*]] = add <32 x i32> [[BIN_RDX6]], [[RDX_SHUF7]]
+; STORE-NEXT: [[BIN_RDX8:%.*]] = add nsw <32 x i32> [[BIN_RDX6]], [[RDX_SHUF7]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <32 x i32> [[BIN_RDX8]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -812,3 +812,98 @@ entry:
ret void
}
+declare i32 @foobar(i32)
+
+define void @i32_red_call(i32 %val) {
+; CHECK-LABEL: @i32_red_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP3]], [[ADD_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP4]], [[ADD_2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP5]], [[ADD_3]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP6]], [[ADD_4]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP7]], [[ADD_5]]
+; CHECK-NEXT: [[RES:%.*]] = call i32 @foobar(i32 [[ADD_6]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+ %1 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+ %add = add nsw i32 %1, %0
+ %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+ %add.1 = add nsw i32 %2, %add
+ %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+ %add.2 = add nsw i32 %3, %add.1
+ %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+ %add.3 = add nsw i32 %4, %add.2
+ %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+ %add.4 = add nsw i32 %5, %add.3
+ %6 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+ %add.5 = add nsw i32 %6, %add.4
+ %7 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+ %add.6 = add nsw i32 %7, %add.5
+ %res = call i32 @foobar(i32 %add.6)
+ ret void
+}
+
+define void @i32_red_invoke(i32 %val) personality i32 (...)* @__gxx_personality_v0 {
+; CHECK-LABEL: @i32_red_invoke(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP3]], [[ADD_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP4]], [[ADD_2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP5]], [[ADD_3]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP6]], [[ADD_4]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP7]], [[ADD_5]]
+; CHECK-NEXT: [[RES:%.*]] = invoke i32 @foobar(i32 [[ADD_6]])
+; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
+; CHECK: exception:
+; CHECK-NEXT: [[CLEANUP:%.*]] = landingpad i8
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[NORMAL]]
+; CHECK: normal:
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+ %1 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+ %add = add nsw i32 %1, %0
+ %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+ %add.1 = add nsw i32 %2, %add
+ %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+ %add.2 = add nsw i32 %3, %add.1
+ %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+ %add.3 = add nsw i32 %4, %add.2
+ %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+ %add.4 = add nsw i32 %5, %add.3
+ %6 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+ %add.5 = add nsw i32 %6, %add.4
+ %7 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+ %add.6 = add nsw i32 %7, %add.5
+ %res = invoke i32 @foobar(i32 %add.6) to label %normal unwind label %exception
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/Transforms/SLPVectorizer/X86/reduction_loads.ll b/test/Transforms/SLPVectorizer/X86/reduction_loads.ll
index 76b3b9174a51..47a6a44611d8 100644
--- a/test/Transforms/SLPVectorizer/X86/reduction_loads.ll
+++ b/test/Transforms/SLPVectorizer/X86/reduction_loads.ll
@@ -5,17 +5,17 @@
define i32 @test(i32* nocapture readonly %p) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* %p, i64 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* %p, i64 2
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* %p, i64 3
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* %p, i64 4
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* %p, i64 5
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* %p, i64 6
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* %p, i64 7
-; CHECK-NEXT: br label %for.body
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* %p to <8 x i32>*
+; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[BIN_EXTRA:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, [[TMP1]]
; CHECK-NEXT: [[ADD:%.*]] = add i32 undef, [[SUM]]
@@ -32,10 +32,11 @@ define i32 @test(i32* nocapture readonly %p) {
; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
-; CHECK-NEXT: [[ADD_7:%.*]] = add i32 [[TMP3]], [[SUM]]
-; CHECK-NEXT: br i1 true, label %for.end, label %for.body
+; CHECK-NEXT: [[BIN_EXTRA]] = add i32 [[TMP3]], [[SUM]]
+; CHECK-NEXT: [[ADD_7:%.*]] = add i32 undef, [[ADD_6]]
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
-; CHECK-NEXT: ret i32 [[ADD_7]]
+; CHECK-NEXT: ret i32 [[BIN_EXTRA]]
;
entry:
%arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/scheduling.ll b/test/Transforms/SLPVectorizer/X86/scheduling.ll
index 5377ee82cf97..c4f521c8963e 100644
--- a/test/Transforms/SLPVectorizer/X86/scheduling.ll
+++ b/test/Transforms/SLPVectorizer/X86/scheduling.ll
@@ -8,11 +8,11 @@ define i32 @foo(i32* nocapture readonly %diff) #0 {
; CHECK: [[S1:%.+]] = add nsw <4 x i32>
; CHECK: store <4 x i32> [[S1]],
; CHECK: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[S1]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[S1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[BIN_RDX:%.*]] = add nsw <4 x i32> [[S1]], [[RDX_SHUF]]
; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = add nsw <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
-; CHECK-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP15]],
+; CHECK: [[ADD52:%.*]] = add nsw i32 [[TMP15]],
; CHECK: ret i32 [[ADD52]]
;
entry:
diff --git a/test/Transforms/SLPVectorizer/X86/store-jumbled.ll b/test/Transforms/SLPVectorizer/X86/store-jumbled.ll
new file mode 100644
index 000000000000..1b2c76384e0b
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/store-jumbled.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
+
+
+
+define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) {
+; CHECK-LABEL: @jumbled-load(
+; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
+; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1
+; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2
+; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4
+; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
+; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4
+; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
+; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4
+; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1
+; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4
+; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
+; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4
+; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3
+; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_1]], [[LOAD_5]]
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_6]]
+; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_3]], [[LOAD_7]]
+; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_4]], [[LOAD_8]]
+; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
+; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
+; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
+; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
+; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_9]], align 4
+; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_7]], align 4
+; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_10]], align 4
+; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_8]], align 4
+; CHECK-NEXT: ret i32 undef
+;
+ %in.addr = getelementptr inbounds i32, i32* %in, i64 0
+ %load.1 = load i32, i32* %in.addr, align 4
+ %gep.1 = getelementptr inbounds i32, i32* %in.addr, i64 1
+ %load.2 = load i32, i32* %gep.1, align 4
+ %gep.2 = getelementptr inbounds i32, i32* %in.addr, i64 2
+ %load.3 = load i32, i32* %gep.2, align 4
+ %gep.3 = getelementptr inbounds i32, i32* %in.addr, i64 3
+ %load.4 = load i32, i32* %gep.3, align 4
+ %inn.addr = getelementptr inbounds i32, i32* %inn, i64 0
+ %load.5 = load i32, i32* %inn.addr, align 4
+ %gep.4 = getelementptr inbounds i32, i32* %inn.addr, i64 1
+ %load.6 = load i32, i32* %gep.4, align 4
+ %gep.5 = getelementptr inbounds i32, i32* %inn.addr, i64 2
+ %load.7 = load i32, i32* %gep.5, align 4
+ %gep.6 = getelementptr inbounds i32, i32* %inn.addr, i64 3
+ %load.8 = load i32, i32* %gep.6, align 4
+ %mul.1 = mul i32 %load.1, %load.5
+ %mul.2 = mul i32 %load.2, %load.6
+ %mul.3 = mul i32 %load.3, %load.7
+ %mul.4 = mul i32 %load.4, %load.8
+ %gep.7 = getelementptr inbounds i32, i32* %out, i64 0
+ %gep.8 = getelementptr inbounds i32, i32* %out, i64 1
+ %gep.9 = getelementptr inbounds i32, i32* %out, i64 2
+ %gep.10 = getelementptr inbounds i32, i32* %out, i64 3
+ store i32 %mul.1, i32* %gep.9, align 4
+ store i32 %mul.2, i32* %gep.7, align 4
+ store i32 %mul.3, i32* %gep.10, align 4
+ store i32 %mul.4, i32* %gep.8, align 4
+
+ ret i32 undef
+}
diff --git a/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
new file mode 100644
index 000000000000..2b593b78652f
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
@@ -0,0 +1,984 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck %s
+
+define void @add0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @add0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> <i32 1, i32 1, i32 2, i32 3>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %add = add nsw i32 %0, 1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %add, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %add3 = add nsw i32 %1, 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %add3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %add6 = add nsw i32 %2, 2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %add6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %add9 = add nsw i32 %3, 3
+ store i32 %add9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @add1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[TMP0]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP1]], 1
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[ADD3]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP2]], 2
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[ADD6]], i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP3]], 3
+; CHECK-NEXT: store i32 [[ADD9]], i32* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %0, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %add3 = add nsw i32 %1, 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %add3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %add6 = add nsw i32 %2, 2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %add6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %add9 = add nsw i32 %3, 3
+ store i32 %add9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @sub0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[SUB]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[TMP1]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SUB5:%.*]] = add nsw i32 [[TMP2]], -2
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[SUB5]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = add nsw i32 [[TMP3]], -3
+; CHECK-NEXT: store i32 [[SUB8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %1, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub5 = add nsw i32 %2, -2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = add nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @sub1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> <i32 4, i32 -1, i32 -2, i32 -3>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %add = add nsw i32 %0, 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %add, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %sub = add nsw i32 %1, -1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %sub, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub5 = add nsw i32 %2, -2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = add nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub2(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @sub2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> <i32 -1, i32 -1, i32 -2, i32 -3>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %sub3 = add nsw i32 %1, -1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %sub3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub6 = add nsw i32 %2, -2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %sub9 = add nsw i32 %3, -3
+ store i32 %sub9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @addsub0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @addsub0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[SUB]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[TMP1]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SUB5:%.*]] = add nsw i32 [[TMP2]], -2
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[SUB5]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP3]], -3
+; CHECK-NEXT: store i32 [[SUB8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %1, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub5 = add nsw i32 %2, -2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = sub nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @addsub1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @addsub1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[SUB]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB1:%.*]] = sub nsw i32 [[TMP1]], -1
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[SUB1]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[TMP2]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP3]], -3
+; CHECK-NEXT: store i32 [[SUB8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %sub1 = sub nsw i32 %1, -1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %sub1, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %2, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = sub nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @mul(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @mul(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 257
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[MUL]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP1]], -3
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[MUL3]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[TMP2]], i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[MUL9:%.*]] = mul nsw i32 [[TMP3]], -9
+; CHECK-NEXT: store i32 [[MUL9]], i32* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %mul = mul nsw i32 %0, 257
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %mul, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %mul3 = mul nsw i32 %1, -3
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %mul3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %2, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %mul9 = mul nsw i32 %3, -9
+ store i32 %mul9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @shl0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @shl0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[TMP0]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[TMP1]], 1
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[SHL]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SHL5:%.*]] = shl i32 [[TMP2]], 2
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[SHL5]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SHL8:%.*]] = shl i32 [[TMP3]], 3
+; CHECK-NEXT: store i32 [[SHL8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %0, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %shl = shl i32 %1, 1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %shl, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %shl5 = shl i32 %2, 2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %shl5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %shl8 = shl i32 %3, 3
+ store i32 %shl8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @shl1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @shl1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = shl <4 x i32> [[TMP1]], <i32 7, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %shl = shl i32 %0, 7
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %shl, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %shl3 = shl i32 %1, 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %shl3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %shl6 = shl i32 %2, 2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %shl6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %shl9 = shl i32 %3, 3
+ store i32 %shl9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add0f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add0f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, 1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd fast float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd fast float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd fast float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add1f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add1f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[TMP0]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[TMP1]], 1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[ADD3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd fast float [[TMP2]], 2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd fast float [[TMP3]], 3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %0, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd fast float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd fast float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd fast float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub0f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub0f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[ADD]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[TMP1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd fast float [[TMP2]], -2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd fast float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %1, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd fast float %3, -3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub1f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub1f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, 4.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub = fadd fast float %1, -1.000000e+00
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub5 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub5, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fadd fast float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub2f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub2f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fadd fast float %1, -1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub6 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fadd fast float %3, -3.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @addsub0f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @addsub0f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[TMP1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SUB5:%.*]] = fadd fast float [[TMP2]], -2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[SUB5]], float* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = fsub fast float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[SUB8]], float* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %1, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub5 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub5, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fsub fast float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @addsub1f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @addsub1f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB1:%.*]] = fsub fast float [[TMP1]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[SUB1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[TMP2]], float* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = fsub fast float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[SUB8]], float* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub1 = fsub fast float %1, -1.000000e+00
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub1, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %2, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fsub fast float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @mulf(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @mulf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fmul fast float [[TMP0]], 2.570000e+02
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB3:%.*]] = fmul fast float [[TMP1]], -3.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[SUB3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[TMP2]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[SUB9:%.*]] = fmul fast float [[TMP3]], -9.000000e+00
+; CHECK-NEXT: store float [[SUB9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fmul fast float %0, 2.570000e+02
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fmul fast float %1, -3.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %2, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fmul fast float %3, -9.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add0fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add0fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd float %0, 1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add1fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add1fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[TMP0]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = fadd float [[TMP1]], 1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[ADD3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd float [[TMP2]], 2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd float [[TMP3]], 3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %0, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub0fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub0fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[ADD]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[TMP1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd float [[TMP2]], -2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %1, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd float %3, -3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub1fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub1fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd float %0, 4.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub = fadd float %1, -1.000000e+00
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub5 = fadd float %2, -2.000000e+00
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub5, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fadd float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub2fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub2fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fadd float %1, -1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub6 = fadd float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fadd float %3, -3.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @mulfn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @mulfn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fmul float [[TMP0]], 2.570000e+02
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB3:%.*]] = fmul float [[TMP1]], -3.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[SUB3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[TMP2]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[SUB9:%.*]] = fmul fast float [[TMP3]], -9.000000e+00
+; CHECK-NEXT: store float [[SUB9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fmul float %0, 2.570000e+02
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fmul float %1, -3.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %2, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fmul fast float %3, -9.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/vector.ll b/test/Transforms/SLPVectorizer/X86/vector.ll
index 02a18979c659..e1f3fa50ccdb 100644
--- a/test/Transforms/SLPVectorizer/X86/vector.ll
+++ b/test/Transforms/SLPVectorizer/X86/vector.ll
@@ -1,14 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
; Make sure that we are not crashing or changing the code.
-;CHECK: test
-;CHECK: icmp
-;CHECK: ret
define void @test(<4 x i32> %in, <4 x i32> %in2) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: [[K:%.*]] = icmp eq <4 x i32> [[IN:%.*]], [[IN2:%.*]]
+; CHECK-NEXT: ret void
+;
%k = icmp eq <4 x i32> %in, %in2
ret void
}
+define i1 @cmpv2f32(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @cmpv2f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X0:%.*]] = extractelement <2 x i32> [[X:%.*]], i32 0
+; CHECK-NEXT: [[Y0:%.*]] = extractelement <2 x i32> [[Y:%.*]], i32 0
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i32 [[X0]], [[Y0]]
+; CHECK-NEXT: br i1 [[CMP0]], label [[IF:%.*]], label [[ENDIF:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[X1:%.*]] = extractelement <2 x i32> [[X]], i32 1
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <2 x i32> [[Y]], i32 1
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[X1]], [[Y1]]
+; CHECK-NEXT: br label [[ENDIF]]
+; CHECK: endif:
+; CHECK-NEXT: [[AND_OF_CMPS:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[CMP1]], [[IF]] ]
+; CHECK-NEXT: ret i1 [[AND_OF_CMPS]]
+;
+ entry:
+ %x0 = extractelement <2 x i32> %x, i32 0
+ %y0 = extractelement <2 x i32> %y, i32 0
+ %cmp0 = icmp eq i32 %x0, %y0
+ br i1 %cmp0, label %if, label %endif
+
+ if:
+ %x1 = extractelement <2 x i32> %x, i32 1
+ %y1 = extractelement <2 x i32> %y, i32 1
+ %cmp1 = icmp eq i32 %x1, %y1
+ br label %endif
+
+ endif:
+ %and_of_cmps = phi i1 [ false, %entry ], [ %cmp1, %if ]
+ ret i1 %and_of_cmps
+}
+
diff --git a/test/Transforms/SROA/alloca-address-space.ll b/test/Transforms/SROA/alloca-address-space.ll
new file mode 100644
index 000000000000..6b3b3abbff5f
--- /dev/null
+++ b/test/Transforms/SROA/alloca-address-space.ll
@@ -0,0 +1,84 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64-A2"
+
+declare void @llvm.memcpy.p2i8.p2i8.i32(i8 addrspace(2)* nocapture, i8 addrspace(2)* nocapture readonly, i32, i32, i1)
+declare void @llvm.memcpy.p1i8.p2i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(2)* nocapture readonly, i32, i32, i1)
+declare void @llvm.memcpy.p2i8.p1i8.i32(i8 addrspace(2)* nocapture, i8 addrspace(1)* nocapture readonly, i32, i32, i1)
+declare void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i32, i32, i1)
+
+
+
+; CHECK-LABEL: @test_address_space_1_1(
+; CHECK: load <2 x i64>, <2 x i64> addrspace(1)* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(1)* {{.*}}, align 2
+; CHECK: ret void
+define void @test_address_space_1_1(<2 x i64> addrspace(1)* %a, i16 addrspace(1)* %b) {
+ %aa = alloca <2 x i64>, align 16, addrspace(2)
+ %aptr = bitcast <2 x i64> addrspace(1)* %a to i8 addrspace(1)*
+ %aaptr = bitcast <2 x i64> addrspace(2)* %aa to i8 addrspace(2)*
+ call void @llvm.memcpy.p2i8.p1i8.i32(i8 addrspace(2)* %aaptr, i8 addrspace(1)* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16 addrspace(1)* %b to i8 addrspace(1)*
+ call void @llvm.memcpy.p1i8.p2i8.i32(i8 addrspace(1)* %bptr, i8 addrspace(2)* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @test_address_space_1_0(
+; CHECK: load <2 x i64>, <2 x i64> addrspace(1)* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(2)* {{.*}}, align 2
+; CHECK: ret void
+define void @test_address_space_1_0(<2 x i64> addrspace(1)* %a, i16 addrspace(2)* %b) {
+ %aa = alloca <2 x i64>, align 16, addrspace(2)
+ %aptr = bitcast <2 x i64> addrspace(1)* %a to i8 addrspace(1)*
+ %aaptr = bitcast <2 x i64> addrspace(2)* %aa to i8 addrspace(2)*
+ call void @llvm.memcpy.p2i8.p1i8.i32(i8 addrspace(2)* %aaptr, i8 addrspace(1)* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16 addrspace(2)* %b to i8 addrspace(2)*
+ call void @llvm.memcpy.p2i8.p2i8.i32(i8 addrspace(2)* %bptr, i8 addrspace(2)* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @test_address_space_0_1(
+; CHECK: load <2 x i64>, <2 x i64> addrspace(2)* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(1)* {{.*}}, align 2
+; CHECK: ret void
+define void @test_address_space_0_1(<2 x i64> addrspace(2)* %a, i16 addrspace(1)* %b) {
+ %aa = alloca <2 x i64>, align 16, addrspace(2)
+ %aptr = bitcast <2 x i64> addrspace(2)* %a to i8 addrspace(2)*
+ %aaptr = bitcast <2 x i64> addrspace(2)* %aa to i8 addrspace(2)*
+ call void @llvm.memcpy.p2i8.p2i8.i32(i8 addrspace(2)* %aaptr, i8 addrspace(2)* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16 addrspace(1)* %b to i8 addrspace(1)*
+ call void @llvm.memcpy.p1i8.p2i8.i32(i8 addrspace(1)* %bptr, i8 addrspace(2)* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+%struct.struct_test_27.0.13 = type { i32, float, i64, i8, [4 x i32] }
+
+; CHECK-LABEL: @copy_struct(
+; CHECK-NOT: memcpy
+define void @copy_struct([5 x i64] %in.coerce) {
+for.end:
+ %in = alloca %struct.struct_test_27.0.13, align 8, addrspace(2)
+ %0 = bitcast %struct.struct_test_27.0.13 addrspace(2)* %in to [5 x i64] addrspace(2)*
+ store [5 x i64] %in.coerce, [5 x i64] addrspace(2)* %0, align 8
+ %scevgep9 = getelementptr %struct.struct_test_27.0.13, %struct.struct_test_27.0.13 addrspace(2)* %in, i32 0, i32 4, i32 0
+ %scevgep910 = bitcast i32 addrspace(2)* %scevgep9 to i8 addrspace(2)*
+ call void @llvm.memcpy.p1i8.p2i8.i32(i8 addrspace(1)* undef, i8 addrspace(2)* %scevgep910, i32 16, i32 4, i1 false)
+ ret void
+}
+
+%union.anon = type { i32* }
+
+@g = common global i32 0, align 4
+@l = common addrspace(3) global i32 0, align 4
+
+; Make sure an illegal bitcast isn't introduced
+; CHECK-LABEL: @pr27557(
+; CHECK: %[[CAST:.*]] = bitcast i32* addrspace(2)* {{.*}} to i32 addrspace(3)* addrspace(2)*
+; CHECK: store i32 addrspace(3)* @l, i32 addrspace(3)* addrspace(2)* %[[CAST]]
+define void @pr27557() {
+ %1 = alloca %union.anon, align 8, addrspace(2)
+ %2 = bitcast %union.anon addrspace(2)* %1 to i32* addrspace(2)*
+ store i32* @g, i32* addrspace(2)* %2, align 8
+ %3 = bitcast %union.anon addrspace(2)* %1 to i32 addrspace(3)* addrspace(2)*
+ store i32 addrspace(3)* @l, i32 addrspace(3)* addrspace(2)* %3, align 8
+ ret void
+}
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
index 70096f37be05..aa00e89ea04f 100644
--- a/test/Transforms/SROA/basictest.ll
+++ b/test/Transforms/SROA/basictest.ll
@@ -3,8 +3,8 @@
target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define i32 @test0() {
; CHECK-LABEL: @test0(
@@ -16,22 +16,22 @@ entry:
%a2 = alloca float
%a1.i8 = bitcast i32* %a1 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %a1.i8)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %a1.i8)
store i32 0, i32* %a1
%v1 = load i32, i32* %a1
- call void @llvm.lifetime.end(i64 4, i8* %a1.i8)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %a1.i8)
%a2.i8 = bitcast float* %a2 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %a2.i8)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %a2.i8)
store float 0.0, float* %a2
%v2 = load float , float * %a2
%v2.int = bitcast float %v2 to i32
%sum1 = add i32 %v1, %v2.int
- call void @llvm.lifetime.end(i64 4, i8* %a2.i8)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %a2.i8)
ret i32 %sum1
}
@@ -1057,7 +1057,7 @@ define void @PR14059.1(double* %d) {
entry:
%X.sroa.0.i = alloca double, align 8
%0 = bitcast double* %X.sroa.0.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
; Store to the low 32-bits...
%X.sroa.0.0.cast2.i = bitcast double* %X.sroa.0.i to i32*
@@ -1084,7 +1084,7 @@ entry:
%accum.real.i = load double, double* %d, align 8
%add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
store double %add.r.i, double* %d, align 8
- call void @llvm.lifetime.end(i64 -1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
ret void
}
@@ -1652,7 +1652,7 @@ define void @PR25873(%struct.STest* %outData) {
entry:
%tmpData = alloca %struct.STest, align 8
%0 = bitcast %struct.STest* %tmpData to i8*
- call void @llvm.lifetime.start(i64 16, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
%x = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 0
store float 1.230000e+02, float* %x, align 8
%y = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 1
@@ -1664,7 +1664,7 @@ entry:
store i64 %3, i64* %2, align 8
%4 = bitcast %struct.STest* %outData to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %0, i64 16, i32 4, i1 false)
- call void @llvm.lifetime.end(i64 16, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
ret void
}
@@ -1677,10 +1677,10 @@ define void @PR27999() unnamed_addr {
entry-block:
%0 = alloca [2 x i64], align 8
%1 = bitcast [2 x i64]* %0 to i8*
- call void @llvm.lifetime.start(i64 16, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
%2 = getelementptr inbounds [2 x i64], [2 x i64]* %0, i32 0, i32 1
%3 = bitcast i64* %2 to i8*
- call void @llvm.lifetime.end(i64 8, i8* %3)
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %3)
ret void
}
@@ -1692,6 +1692,6 @@ bb1:
%e.7.sroa.6.i = alloca i32, align 1
%e.7.sroa.6.0.load81.i = load i32, i32* %e.7.sroa.6.i, align 1
%0 = bitcast i32* %e.7.sroa.6.i to i8*
- call void @llvm.lifetime.end(i64 2, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %0)
ret void
}
diff --git a/test/Transforms/SROA/pr26972.ll b/test/Transforms/SROA/pr26972.ll
index a71058c05b98..3140a805fc4b 100644
--- a/test/Transforms/SROA/pr26972.ll
+++ b/test/Transforms/SROA/pr26972.ll
@@ -10,8 +10,8 @@ target triple = "x86_64-pc-linux"
define void @fn1() {
%a = alloca [1073741825 x i32], align 16
%t0 = bitcast [1073741825 x i32]* %a to i8*
- call void @llvm.lifetime.end(i64 4294967300, i8* %t0)
+ call void @llvm.lifetime.end.p0i8(i64 4294967300, i8* %t0)
ret void
}
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
diff --git a/test/Transforms/SROA/preserve-nonnull.ll b/test/Transforms/SROA/preserve-nonnull.ll
new file mode 100644
index 000000000000..fc5ce6a445fa
--- /dev/null
+++ b/test/Transforms/SROA/preserve-nonnull.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+;
+; Make sure that SROA doesn't lose nonnull metadata
+; on loads from allocas that get optimized out.
+
+; CHECK-LABEL: define float* @yummy_nonnull
+; CHECK: [[RETURN:%(.*)]] = load float*, float** %arg, align 8
+; CHECK: [[ASSUME:%(.*)]] = icmp ne float* {{.*}}[[RETURN]], null
+; CHECK: call void @llvm.assume(i1 {{.*}}[[ASSUME]])
+; CHECK: ret float* {{.*}}[[RETURN]]
+
+define float* @yummy_nonnull(float** %arg) {
+entry-block:
+ %buf = alloca float*
+
+ %_arg_i8 = bitcast float** %arg to i8*
+ %_buf_i8 = bitcast float** %buf to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %_buf_i8, i8* %_arg_i8, i64 8, i32 8, i1 false)
+
+ %ret = load float*, float** %buf, align 8, !nonnull !0
+ ret float* %ret
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+
+!0 = !{}
diff --git a/test/Transforms/SROA/vector-lifetime-intrinsic.ll b/test/Transforms/SROA/vector-lifetime-intrinsic.ll
index 37cf394382ac..abb5cb2ea334 100644
--- a/test/Transforms/SROA/vector-lifetime-intrinsic.ll
+++ b/test/Transforms/SROA/vector-lifetime-intrinsic.ll
@@ -3,10 +3,10 @@
target datalayout = "e-p:64:32-i64:32-v32:32-n32-S64"
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
; CHECK: @wombat
; CHECK-NOT: alloca
@@ -15,12 +15,12 @@ define void @wombat(<4 x float> %arg1) {
bb:
%tmp = alloca <4 x float>, align 16
%tmp8 = bitcast <4 x float>* %tmp to i8*
- call void @llvm.lifetime.start(i64 16, i8* %tmp8)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %tmp8)
store <4 x float> %arg1, <4 x float>* %tmp, align 16
%tmp17 = bitcast <4 x float>* %tmp to <3 x float>*
%tmp18 = load <3 x float>, <3 x float>* %tmp17
%tmp20 = bitcast <4 x float>* %tmp to i8*
- call void @llvm.lifetime.end(i64 16, i8* %tmp20)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %tmp20)
call void @wombat3(<3 x float> %tmp18)
ret void
}
diff --git a/test/Transforms/SafeStack/AArch64/abi_ssp.ll b/test/Transforms/SafeStack/AArch64/abi_ssp.ll
index 5d584d0a76b9..c78b20aaa01a 100644
--- a/test/Transforms/SafeStack/AArch64/abi_ssp.ll
+++ b/test/Transforms/SafeStack/AArch64/abi_ssp.ll
@@ -1,5 +1,5 @@
-; RUN: opt -safe-stack -S -mtriple=aarch64-linux-android < %s -o - | FileCheck --check-prefix=TLS %s
-
+; RUN: opt -safe-stack -S -mtriple=aarch64-linux-android < %s -o - | FileCheck --check-prefixes=TLS,ANDROID %s
+; RUN: opt -safe-stack -S -mtriple=aarch64-unknown-fuchsia < %s -o - | FileCheck --check-prefixes=TLS,FUCHSIA %s
define void @foo() nounwind uwtable safestack sspreq {
entry:
@@ -7,7 +7,8 @@ entry:
; TLS: call i8* @llvm.thread.pointer()
; TLS: %[[TP2:.*]] = call i8* @llvm.thread.pointer()
-; TLS: %[[B:.*]] = getelementptr i8, i8* %[[TP2]], i32 40
+; ANDROID: %[[B:.*]] = getelementptr i8, i8* %[[TP2]], i32 40
+; FUCHSIA: %[[B:.*]] = getelementptr i8, i8* %[[TP2]], i32 -16
; TLS: %[[C:.*]] = bitcast i8* %[[B]] to i8**
; TLS: %[[StackGuard:.*]] = load i8*, i8** %[[C]]
; TLS: store i8* %[[StackGuard]], i8** %[[StackGuardSlot:.*]]
diff --git a/test/Transforms/SafeStack/X86/abi_ssp.ll b/test/Transforms/SafeStack/X86/abi_ssp.ll
index ba4ced5b8820..b489e07a8868 100644
--- a/test/Transforms/SafeStack/X86/abi_ssp.ll
+++ b/test/Transforms/SafeStack/X86/abi_ssp.ll
@@ -1,18 +1,25 @@
-; RUN: opt -safe-stack -S -mtriple=i686-pc-linux-gnu < %s -o - | FileCheck --check-prefix=TLS --check-prefix=TLS32 %s
-; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=TLS --check-prefix=TLS64 %s
-; RUN: opt -safe-stack -S -mtriple=i686-linux-android < %s -o - | FileCheck --check-prefix=TLS --check-prefix=TLS32 %s
-; RUN: opt -safe-stack -S -mtriple=x86_64-linux-android < %s -o - | FileCheck --check-prefix=TLS --check-prefix=TLS64 %s
+; RUN: opt -safe-stack -S -mtriple=i686-pc-linux-gnu < %s -o - | FileCheck --check-prefixes=COMMON,TLS32 %s
+; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefixes=COMMON,TLS64 %s
+
+; RUN: opt -safe-stack -S -mtriple=i686-linux-android < %s -o - | FileCheck --check-prefixes=COMMON,GLOBAL32 %s
+; RUN: opt -safe-stack -S -mtriple=i686-linux-android24 < %s -o - | FileCheck --check-prefixes=COMMON,TLS32 %s
+
+; RUN: opt -safe-stack -S -mtriple=x86_64-linux-android < %s -o - | FileCheck --check-prefixes=COMMON,TLS64 %s
+
+; RUN: opt -safe-stack -S -mtriple=x86_64-unknown-fuchsia < %s -o - | FileCheck --check-prefixes=COMMON,FUCHSIA64 %s
define void @foo() safestack sspreq {
entry:
; TLS32: %[[StackGuard:.*]] = load i8*, i8* addrspace(256)* inttoptr (i32 20 to i8* addrspace(256)*)
; TLS64: %[[StackGuard:.*]] = load i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; TLS: store i8* %[[StackGuard]], i8** %[[StackGuardSlot:.*]]
+; FUCHSIA64: %[[StackGuard:.*]] = load i8*, i8* addrspace(257)* inttoptr (i32 16 to i8* addrspace(257)*)
+; GLOBAL32: %[[StackGuard:.*]] = load i8*, i8** @__stack_chk_guard
+; COMMON: store i8* %[[StackGuard]], i8** %[[StackGuardSlot:.*]]
%a = alloca i8, align 1
call void @Capture(i8* %a)
-; TLS: %[[A:.*]] = load i8*, i8** %[[StackGuardSlot]]
-; TLS: icmp ne i8* %[[StackGuard]], %[[A]]
+; COMMON: %[[A:.*]] = load i8*, i8** %[[StackGuardSlot]]
+; COMMON: icmp ne i8* %[[StackGuard]], %[[A]]
ret void
}
diff --git a/test/Transforms/SafeStack/X86/call.ll b/test/Transforms/SafeStack/X86/call.ll
index cbac4ce1bb0d..2d78bb1a6898 100644
--- a/test/Transforms/SafeStack/X86/call.ll
+++ b/test/Transforms/SafeStack/X86/call.ll
@@ -159,8 +159,8 @@ define void @call_lifetime(i32* %p) {
entry:
%q = alloca [100 x i8], align 16
%0 = bitcast [100 x i8]* %q to i8*
- call void @llvm.lifetime.start(i64 100, i8* %0)
- call void @llvm.lifetime.end(i64 100, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 100, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 100, i8* %0)
ret void
}
@@ -174,5 +174,5 @@ declare void @readnone0(i8* nocapture readnone, i8* nocapture)
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind argmemonly
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind argmemonly
diff --git a/test/Transforms/SafeStack/X86/coloring-ssp.ll b/test/Transforms/SafeStack/X86/coloring-ssp.ll
index d71babe200df..3b04fdf13fbc 100644
--- a/test/Transforms/SafeStack/X86/coloring-ssp.ll
+++ b/test/Transforms/SafeStack/X86/coloring-ssp.ll
@@ -16,19 +16,19 @@ entry:
%x0 = bitcast i64* %x to i8*
%y0 = bitcast i64* %y to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -16
call void @capture64(i64* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -16
call void @capture64(i64* %y)
- call void @llvm.lifetime.end(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %y0)
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @capture64(i64*)
diff --git a/test/Transforms/SafeStack/X86/coloring.ll b/test/Transforms/SafeStack/X86/coloring.ll
index 3ed9ccb43f39..76bdf37dbf4e 100644
--- a/test/Transforms/SafeStack/X86/coloring.ll
+++ b/test/Transforms/SafeStack/X86/coloring.ll
@@ -10,35 +10,35 @@ entry:
%x1 = alloca i32, align 4
%x2 = alloca i32, align 4
%0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 4, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
; CHECK: %[[A1:.*]] = getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: %[[A2:.*]] = bitcast i8* %[[A1]] to i32*
; CHECK: call void @capture(i32* nonnull %[[A2]])
call void @capture(i32* nonnull %x)
- call void @llvm.lifetime.end(i64 4, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %0)
%1 = bitcast i32* %x1 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %1)
; CHECK: %[[B1:.*]] = getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: %[[B2:.*]] = bitcast i8* %[[B1]] to i32*
; CHECK: call void @capture(i32* nonnull %[[B2]])
call void @capture(i32* nonnull %x1)
- call void @llvm.lifetime.end(i64 4, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %1)
%2 = bitcast i32* %x2 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %2)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %2)
; CHECK: %[[C1:.*]] = getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: %[[C2:.*]] = bitcast i8* %[[C1]] to i32*
; CHECK: call void @capture(i32* nonnull %[[C2]])
call void @capture(i32* nonnull %x2)
- call void @llvm.lifetime.end(i64 4, i8* %2)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %2)
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @capture(i32*)
diff --git a/test/Transforms/SafeStack/X86/coloring2.ll b/test/Transforms/SafeStack/X86/coloring2.ll
index f3ac6d735c9d..2a8f871945ff 100644
--- a/test/Transforms/SafeStack/X86/coloring2.ll
+++ b/test/Transforms/SafeStack/X86/coloring2.ll
@@ -15,21 +15,21 @@ entry:
%y0 = bitcast i32* %y to i8*
%z0 = bitcast i32* %z to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %z0)
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %z0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
call void @capture32(i32* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
call void @capture32(i32* %y)
- call void @llvm.lifetime.end(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
call void @capture32(i32* %z)
- call void @llvm.lifetime.end(i64 -1, i8* %z0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %z0)
ret void
}
@@ -44,11 +44,11 @@ entry:
%y = alloca i32, align 4
%x0 = bitcast i32* %x to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
call void @capture32(i32* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
call void @capture32(i32* %y)
@@ -70,21 +70,21 @@ entry:
%y0 = bitcast i32* %y to i8*
%z0 = bitcast i64* %z to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
call void @capture32(i32* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
call void @capture32(i32* %y)
- call void @llvm.lifetime.end(i64 -1, i8* %y0)
- call void @llvm.lifetime.start(i64 -1, i8* %z0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %z0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
call void @capture64(i64* %z)
- call void @llvm.lifetime.end(i64 -1, i8* %z0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %z0)
ret void
}
@@ -103,9 +103,9 @@ entry:
%y0 = bitcast i32* %y to i8*
%z0 = bitcast i64* %z to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
- call void @llvm.lifetime.start(i64 -1, i8* %z0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %z0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -16
call void @capture32(i32* %x)
@@ -116,9 +116,9 @@ entry:
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
call void @capture64(i64* %z)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
- call void @llvm.lifetime.end(i64 -1, i8* %y0)
- call void @llvm.lifetime.end(i64 -1, i8* %z0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %z0)
ret void
}
@@ -156,9 +156,9 @@ entry:
%z1 = alloca i64, align 8
%z2 = alloca i64, align 8
%0 = bitcast i64* %x1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
%1 = bitcast i64* %x2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %1)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %x1)
@@ -169,7 +169,7 @@ entry:
if.then: ; preds = %entry
%2 = bitcast i64* %y to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %2)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -24
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %y)
@@ -177,29 +177,29 @@ if.then: ; preds = %entry
if.then3: ; preds = %if.then
%3 = bitcast i64* %y1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %3)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %3)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %y1)
- call void @llvm.lifetime.end(i64 -1, i8* %3)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %3)
br label %if.end
if.else: ; preds = %if.then
%4 = bitcast i64* %y2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %4)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %4)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %y2)
- call void @llvm.lifetime.end(i64 -1, i8* %4)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %4)
br label %if.end
if.end: ; preds = %if.else, %if.then3
- call void @llvm.lifetime.end(i64 -1, i8* %2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %2)
br label %if.end9
if.else4: ; preds = %entry
%5 = bitcast i64* %z to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %5)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %5)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -24
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %z)
@@ -207,29 +207,29 @@ if.else4: ; preds = %entry
if.then6: ; preds = %if.else4
%6 = bitcast i64* %z1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %6)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %6)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %z1)
- call void @llvm.lifetime.end(i64 -1, i8* %6)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %6)
br label %if.end8
if.else7: ; preds = %if.else4
%7 = bitcast i64* %z2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %7)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %7)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(i64* nonnull %z2)
- call void @llvm.lifetime.end(i64 -1, i8* %7)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %7)
br label %if.end8
if.end8: ; preds = %if.else7, %if.then6
- call void @llvm.lifetime.end(i64 -1, i8* %5)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %5)
br label %if.end9
if.end9: ; preds = %if.end8, %if.end
- call void @llvm.lifetime.end(i64 -1, i8* %1)
- call void @llvm.lifetime.end(i64 -1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
ret void
}
@@ -243,21 +243,21 @@ entry:
%y = alloca i32, align 4
%x0 = bitcast i32* %x to i8*
%y0 = bitcast i32* %y to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
; CHECK: call void @capture32(
call void @capture32(i32* %y)
- call void @llvm.lifetime.end(i64 -1, i8* %y0)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
ret void
bb3:
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
ret void
}
@@ -270,18 +270,18 @@ entry:
%y = alloca i32, align 4
%x0 = bitcast i32* %x to i8*
%y0 = bitcast i32* %y to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %y)
- call void @llvm.lifetime.end(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %y0)
ret void
bb3:
ret void
@@ -297,14 +297,14 @@ entry:
%y = alloca i32, align 4
%x0 = bitcast i32* %x to i8*
%y0 = bitcast i32* %y to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %y)
@@ -323,14 +323,14 @@ entry:
%y = alloca i32, align 4
%x0 = bitcast i32* %x to i8*
%y0 = bitcast i32* %y to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %x0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %y)
@@ -352,10 +352,10 @@ entry:
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(i32* %x)
- call void @llvm.lifetime.end(i64 -1, i8* %x0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %x0)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start(i64 -1, i8* %y0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %y0)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
; CHECK: call void @capture32(
call void @capture32(i32* %y)
@@ -374,29 +374,29 @@ entry:
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
%0 = bitcast [100 x i32]* %A.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
%1 = bitcast [100 x i32]* %B.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %1)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -400
; CHECK: call void @capture100x32(
call void @capture100x32([100 x i32]* %A.i)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -800
; CHECK: call void @capture100x32(
call void @capture100x32([100 x i32]* %B.i)
- call void @llvm.lifetime.end(i64 -1, i8* %0)
- call void @llvm.lifetime.end(i64 -1, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1)
%2 = bitcast [100 x i32]* %A.i1 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %2)
%3 = bitcast [100 x i32]* %B.i2 to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %3)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %3)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -400
; CHECK: call void @capture100x32(
call void @capture100x32([100 x i32]* %A.i1)
; CHECK: getelementptr i8, i8* %[[USP]], i32 -800
; CHECK: call void @capture100x32(
call void @capture100x32([100 x i32]* %B.i2)
- call void @llvm.lifetime.end(i64 -1, i8* %2)
- call void @llvm.lifetime.end(i64 -1, i8* %3)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %2)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %3)
ret void
}
@@ -408,11 +408,11 @@ entry:
%buf1 = alloca i8, i32 100000, align 16
%buf2 = alloca i8, i32 100000, align 16
- call void @llvm.lifetime.start(i64 -1, i8* %buf1)
- call void @llvm.lifetime.end(i64 -1, i8* %buf1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %buf1)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %buf1)
- call void @llvm.lifetime.start(i64 -1, i8* %buf1)
- call void @llvm.lifetime.start(i64 -1, i8* %buf2)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %buf1)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %buf2)
call void @capture8(i8* %buf1)
call void @capture8(i8* %buf2)
ret void
@@ -435,13 +435,13 @@ entry:
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
%0 = bitcast [100 x i32]* %A.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0) nounwind
%1 = bitcast [100 x i32]* %B.i to i8*
- call void @llvm.lifetime.start(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %1) nounwind
call void @capture100x32([100 x i32]* %A.i)
call void @capture100x32([100 x i32]* %B.i)
- call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
- call void @llvm.lifetime.end(i64 -1, i8* %1) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1) nounwind
br label %block2
block2:
@@ -464,13 +464,13 @@ entry:
%b8 = bitcast [4 x %struct.Klass]* %b.i to i8*
; I am used outside the lifetime zone below:
%z2 = getelementptr inbounds [4 x %struct.Klass], [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0
- call void @llvm.lifetime.start(i64 -1, i8* %a8)
- call void @llvm.lifetime.start(i64 -1, i8* %b8)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %a8)
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %b8)
call void @capture8(i8* %a8)
call void @capture8(i8* %b8)
%z3 = load i32, i32* %z2, align 16
- call void @llvm.lifetime.end(i64 -1, i8* %a8)
- call void @llvm.lifetime.end(i64 -1, i8* %b8)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %a8)
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %b8)
ret i32 %z3
}
@@ -480,12 +480,12 @@ entry:
; CHECK: %[[USP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
; CHECK-NEXT: getelementptr i8, i8* %[[USP]], i32 -16
%x = alloca i8, align 4
- call void @llvm.lifetime.start(i64 4, i8* %x) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x) nounwind
br label %l2
l2:
call void @capture8(i8* %x)
- call void @llvm.lifetime.end(i64 4, i8* %x) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %x) nounwind
br label %l2
}
@@ -498,23 +498,23 @@ entry:
; CHECK-NEXT: getelementptr i8, i8* %[[USP]], i32 -16
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start(i64 4, i8* %x) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x) nounwind
br label %l2
l2:
; CHECK: getelementptr i8, i8* %[[USP]], i32 -8
- call void @llvm.lifetime.start(i64 4, i8* %y) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %y) nounwind
call void @capture8(i8* %y)
- call void @llvm.lifetime.end(i64 4, i8* %y) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %y) nounwind
; CHECK: getelementptr i8, i8* %[[USP]], i32 -4
- call void @llvm.lifetime.start(i64 4, i8* %x) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x) nounwind
call void @capture8(i8* %x)
br label %l2
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @capture8(i8*)
declare void @capture32(i32*)
declare void @capture64(i64*)
diff --git a/test/Transforms/SafeStack/X86/debug-loc2.ll b/test/Transforms/SafeStack/X86/debug-loc2.ll
index 35e9b7711d2f..8059a722fd45 100644
--- a/test/Transforms/SafeStack/X86/debug-loc2.ll
+++ b/test/Transforms/SafeStack/X86/debug-loc2.ll
@@ -40,12 +40,12 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
declare void @capture(i32*) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #3
diff --git a/test/Transforms/SafeStack/X86/layout-frag.ll b/test/Transforms/SafeStack/X86/layout-frag.ll
index 125eb0f8be9a..b127defc2c5d 100644
--- a/test/Transforms/SafeStack/X86/layout-frag.ll
+++ b/test/Transforms/SafeStack/X86/layout-frag.ll
@@ -14,16 +14,16 @@ entry:
%x0a = bitcast i64* %x0 to i8*
%x2a = bitcast i64* %x2 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %x0a)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x0a)
call void @capture64(i64* %x0)
- call void @llvm.lifetime.end(i64 4, i8* %x0a)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %x0a)
- call void @llvm.lifetime.start(i64 4, i8* %x1)
- call void @llvm.lifetime.start(i64 4, i8* %x2a)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x1)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %x2a)
call void @capture8(i8* %x1)
call void @capture64(i64* %x2)
- call void @llvm.lifetime.end(i64 4, i8* %x1)
- call void @llvm.lifetime.end(i64 4, i8* %x2a)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %x1)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %x2a)
; Test that i64 allocas share space.
; CHECK: getelementptr i8, i8* %unsafe_stack_ptr, i32 -8
@@ -33,7 +33,7 @@ entry:
ret void
}
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
declare void @capture8(i8*)
declare void @capture64(i64*)
diff --git a/test/Transforms/SampleProfile/Inputs/import.prof b/test/Transforms/SampleProfile/Inputs/import.prof
new file mode 100644
index 000000000000..efadc0c5c9c6
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/import.prof
@@ -0,0 +1,4 @@
+main:10000:0
+ 3: foo:1000
+ 3: bar:200
+ 4: baz:10
diff --git a/test/Transforms/SampleProfile/Inputs/indirect-call.afdo b/test/Transforms/SampleProfile/Inputs/indirect-call.afdo
new file mode 100644
index 000000000000..2d5b345e960e
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/indirect-call.afdo
Binary files differ
diff --git a/test/Transforms/SampleProfile/Inputs/indirect-call.prof b/test/Transforms/SampleProfile/Inputs/indirect-call.prof
new file mode 100644
index 000000000000..428d4cedef5a
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/indirect-call.prof
@@ -0,0 +1,13 @@
+test:63067:0
+ 4: 3345 _Z3barv:1398 _Z3foov:2059
+test_inline:3000:0
+ 5: foo_inline1:3000
+ 1: 3000
+ 5: foo_inline2:4000
+ 1: 4000
+test_noinline:3000:0
+ 5: foo_noinline:3000
+ 1: 3000
+test_direct:3000:0
+ 5: foo_direct:3000
+ 1: 3000
diff --git a/test/Transforms/SampleProfile/branch.ll b/test/Transforms/SampleProfile/branch.ll
index 2ef01a76b0f0..5a5160e6343a 100644
--- a/test/Transforms/SampleProfile/branch.ll
+++ b/test/Transforms/SampleProfile/branch.ll
@@ -87,7 +87,9 @@ for.cond: ; preds = %for.inc, %if.then.2
%6 = load i32, i32* %u, align 4, !dbg !46
%7 = load i32, i32* %limit, align 4, !dbg !48
%cmp5 = icmp slt i32 %6, %7, !dbg !49
- br i1 %cmp5, label %for.body, label %for.end, !dbg !50
+ br i1 %cmp5, label %for.body, label %for.end, !dbg !50, !prof !80
+; CHECK: edge for.cond -> for.body probability is 0x73333333 / 0x80000000 = 90.00%
+; CHECK: edge for.cond -> for.end probability is 0x0ccccccd / 0x80000000 = 10.00%
for.body: ; preds = %for.cond
call void @llvm.dbg.declare(metadata double* %x, metadata !51, metadata !17), !dbg !53
@@ -237,3 +239,4 @@ attributes #4 = { nounwind readonly }
!77 = !DILocation(line: 20, column: 4, scope: !6)
!78 = !DILocation(line: 21, column: 4, scope: !6)
!79 = !DILocation(line: 22, column: 2, scope: !6)
+!80 = !{!"branch_weights", i32 90, i32 10}
diff --git a/test/Transforms/SampleProfile/calls.ll b/test/Transforms/SampleProfile/calls.ll
index 45909ddf3e54..3539c771627a 100644
--- a/test/Transforms/SampleProfile/calls.ll
+++ b/test/Transforms/SampleProfile/calls.ll
@@ -48,8 +48,8 @@ while.cond: ; preds = %if.end, %entry
store i32 %inc, i32* %i, align 4, !dbg !14
%cmp = icmp slt i32 %0, 400000000, !dbg !14
br i1 %cmp, label %while.body, label %while.end, !dbg !14
-; CHECK: edge while.cond -> while.body probability is 0x7d9eb367 / 0x80000000 = 98.14% [HOT edge]
-; CHECK: edge while.cond -> while.end probability is 0x02614c99 / 0x80000000 = 1.86%
+; CHECK: edge while.cond -> while.body probability is 0x77f2798d / 0x80000000 = 93.71% [HOT edge]
+; CHECK: edge while.cond -> while.end probability is 0x080d8673 / 0x80000000 = 6.29%
while.body: ; preds = %while.cond
%1 = load i32, i32* %i, align 4, !dbg !16
@@ -59,8 +59,8 @@ while.body: ; preds = %while.cond
; both branches out of while.body had the same weight. In reality,
; the edge while.body->if.then is taken most of the time.
;
-; CHECK: edge while.body -> if.else probability is 0x00059704 / 0x80000000 = 0.02%
-; CHECK: edge while.body -> if.then probability is 0x7ffa68fc / 0x80000000 = 99.98% [HOT edge]
+; CHECK: edge while.body -> if.else probability is 0x0005b1e0 / 0x80000000 = 0.02%
+; CHECK: edge while.body -> if.then probability is 0x7ffa4e20 / 0x80000000 = 99.98% [HOT edge]
if.then: ; preds = %while.body
@@ -103,14 +103,14 @@ declare i32 @printf(i8*, ...) #2
!12 = !DILocation(line: 8, scope: !7)
!13 = !DILocation(line: 9, scope: !7)
!14 = !DILocation(line: 9, scope: !15)
-!15 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !7)
+!15 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !7)
!16 = !DILocation(line: 10, scope: !17)
!17 = distinct !DILexicalBlock(line: 10, column: 0, file: !1, scope: !7)
!18 = !DILocation(line: 10, scope: !19)
-!19 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !17)
+!19 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !17)
!20 = !DILocation(line: 10, scope: !21)
-!21 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !17)
+!21 = !DILexicalBlockFile(discriminator: 4, file: !1, scope: !17)
!22 = !DILocation(line: 10, scope: !23)
-!23 = !DILexicalBlockFile(discriminator: 3, file: !1, scope: !17)
+!23 = !DILexicalBlockFile(discriminator: 6, file: !1, scope: !17)
!24 = !DILocation(line: 11, scope: !7)
!25 = !DILocation(line: 12, scope: !7)
diff --git a/test/Transforms/SampleProfile/cov-zero-samples.ll b/test/Transforms/SampleProfile/cov-zero-samples.ll
index 7ccaa3e7d756..5239d74fdc6e 100644
--- a/test/Transforms/SampleProfile/cov-zero-samples.ll
+++ b/test/Transforms/SampleProfile/cov-zero-samples.ll
@@ -106,7 +106,7 @@ attributes #0 = { nounwind readnone }
!13 = !{!14, !14}
!14 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
!15 = !DILocation(line: 5, column: 27, scope: !16)
-!16 = !DILexicalBlockFile(scope: !11, file: !3, discriminator: 3)
+!16 = !DILexicalBlockFile(scope: !11, file: !3, discriminator: 6)
!17 = distinct !DISubprogram(name: "main", scope: !3, file: !3, line: 7, type: !18, isLocal: false, isDefinition: true, scopeLine: 7, flags: DIFlagPrototyped, isOptimized: false, unit: !2, variables: !4)
!18 = !DISubroutineType(types: !19)
!19 = !{!14}
@@ -118,7 +118,7 @@ attributes #0 = { nounwind readnone }
!25 = !DILocation(line: 9, column: 18, scope: !24)
!26 = !DILocation(line: 9, column: 8, scope: !24)
!27 = !DILocation(line: 9, column: 25, scope: !28)
-!28 = !DILexicalBlockFile(scope: !29, file: !3, discriminator: 1)
+!28 = !DILexicalBlockFile(scope: !29, file: !3, discriminator: 2)
!29 = distinct !DILexicalBlock(scope: !24, file: !3, line: 9, column: 3)
!30 = !DILocation(line: 9, column: 29, scope: !28)
!31 = !DILocation(line: 9, column: 27, scope: !28)
@@ -130,7 +130,7 @@ attributes #0 = { nounwind readnone }
!37 = !DILocation(line: 10, column: 11, scope: !34)
!38 = !DILocation(line: 10, column: 9, scope: !35)
!39 = !DILocation(line: 10, column: 36, scope: !40)
-!40 = !DILexicalBlockFile(scope: !34, file: !3, discriminator: 1)
+!40 = !DILexicalBlockFile(scope: !34, file: !3, discriminator: 2)
!41 = !DILocation(line: 10, column: 23, scope: !40)
!42 = !DILocation(line: 10, column: 20, scope: !40)
!43 = !DILocation(line: 10, column: 16, scope: !40)
@@ -139,7 +139,7 @@ attributes #0 = { nounwind readnone }
!46 = !DILocation(line: 11, column: 9, scope: !35)
!47 = !DILocation(line: 12, column: 3, scope: !35)
!48 = !DILocation(line: 9, column: 33, scope: !49)
-!49 = !DILexicalBlockFile(scope: !29, file: !3, discriminator: 2)
+!49 = !DILexicalBlockFile(scope: !29, file: !3, discriminator: 4)
!50 = !DILocation(line: 9, column: 3, scope: !49)
!51 = !DILocation(line: 13, column: 25, scope: !17)
!52 = !DILocation(line: 13, column: 3, scope: !17)
diff --git a/test/Transforms/SampleProfile/discriminator.ll b/test/Transforms/SampleProfile/discriminator.ll
index d0b96a9ea16e..85f6cbe8fb4a 100644
--- a/test/Transforms/SampleProfile/discriminator.ll
+++ b/test/Transforms/SampleProfile/discriminator.ll
@@ -79,12 +79,12 @@ while.end: ; preds = %while.cond
!10 = !DILocation(line: 2, scope: !4)
!11 = !DILocation(line: 3, scope: !4)
!12 = !DILocation(line: 3, scope: !13)
-!13 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !4)
+!13 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !4)
!14 = !DILocation(line: 4, scope: !15)
!15 = distinct !DILexicalBlock(line: 4, column: 0, file: !1, scope: !16)
!16 = distinct !DILexicalBlock(line: 3, column: 0, file: !1, scope: !4)
!17 = !DILocation(line: 4, scope: !18)
-!18 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !15)
+!18 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !15)
!19 = !DILocation(line: 5, scope: !16)
!20 = !DILocation(line: 6, scope: !16)
!21 = !DILocation(line: 7, scope: !4)
diff --git a/test/Transforms/SampleProfile/early-inline.ll b/test/Transforms/SampleProfile/early-inline.ll
index 780ff4751f40..51e7d243c187 100644
--- a/test/Transforms/SampleProfile/early-inline.ll
+++ b/test/Transforms/SampleProfile/early-inline.ll
@@ -41,8 +41,8 @@ declare i32 @__gxx_personality_v0(...)
!1 = !DIFile(filename: "a", directory: "b/")
!3 = !{i32 2, !"Dwarf Version", i32 4}
!4 = !{i32 2, !"Debug Info Version", i32 3}
-!6 = distinct !DISubprogram(linkageName: "_Z3foov", scope: !1, line: 5, scopeLine: 5, unit: !0)
+!6 = distinct !DISubprogram(linkageName: "_Z3foov", scope: !1, file: !1, line: 5, scopeLine: 5, unit: !0)
!9 = !DILocation(line: 6, column: 3, scope: !6)
!10 = !DILocation(line: 8, column: 5, scope: !11)
!11 = distinct !DILexicalBlock(scope: !6, file: !1, line: 7, column: 7)
-!12 = distinct !DISubprogram(linkageName: "_ZL3barv", scope: !1, line: 20, scopeLine: 20, unit: !0)
+!12 = distinct !DISubprogram(linkageName: "_ZL3barv", scope: !1, file: !1, line: 20, scopeLine: 20, unit: !0)
diff --git a/test/Transforms/SampleProfile/fnptr.ll b/test/Transforms/SampleProfile/fnptr.ll
index 0c671a7882f6..1b01d0c0c857 100644
--- a/test/Transforms/SampleProfile/fnptr.ll
+++ b/test/Transforms/SampleProfile/fnptr.ll
@@ -8,10 +8,10 @@
; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/fnptr.prof | opt -analyze -branch-prob | FileCheck %s
; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/fnptr.binprof | opt -analyze -branch-prob | FileCheck %s
-; CHECK: edge for.body3 -> if.then probability is 0x19f584f3 / 0x80000000 = 20.28%
-; CHECK: edge for.body3 -> if.else probability is 0x660a7b0d / 0x80000000 = 79.72%
-; CHECK: edge for.inc -> for.inc12 probability is 0x000f92fb / 0x80000000 = 0.05%
-; CHECK: edge for.inc -> for.body3 probability is 0x7ff06d05 / 0x80000000 = 99.95%
+; CHECK: edge for.body3 -> if.then probability is 0x1a56a56a / 0x80000000 = 20.58%
+; CHECK: edge for.body3 -> if.else probability is 0x65a95a96 / 0x80000000 = 79.42%
+; CHECK: edge for.inc -> for.inc12 probability is 0x000fbd1c / 0x80000000 = 0.05%
+; CHECK: edge for.inc -> for.body3 probability is 0x7ff042e4 / 0x80000000 = 99.95%
; CHECK: edge for.inc12 -> for.end14 probability is 0x04000000 / 0x80000000 = 3.12%
; CHECK: edge for.inc12 -> for.cond1.preheader probability is 0x7c000000 / 0x80000000 = 96.88%
diff --git a/test/Transforms/SampleProfile/import.ll b/test/Transforms/SampleProfile/import.ll
new file mode 100644
index 000000000000..1ee45fb4fd3e
--- /dev/null
+++ b/test/Transforms/SampleProfile/import.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/import.prof -S | FileCheck %s
+
+; Tests whether the functions in the inline stack are added to the
+; function_entry_count metadata.
+
+declare void @foo()
+
+define void @main() !dbg !7 {
+ call void @foo(), !dbg !18
+ ret void
+}
+
+; GUIDs of foo and bar should be included in the metadata to make sure hot
+; inline stacks are imported.
+; CHECK: !{!"function_entry_count", i64 1, i64 6699318081062747564, i64 -2012135647395072713}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.5 ", isOptimized: false, emissionKind: NoDebug, file: !1, enums: !2, retainedTypes: !2, globals: !2, imports: !2)
+!1 = !DIFile(filename: "calls.cc", directory: ".")
+!2 = !{}
+!6 = !DISubroutineType(types: !2)
+!7 = distinct !DISubprogram(name: "main", line: 7, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !0, scopeLine: 7, file: !1, scope: !1, type: !6, variables: !2)
+!8 = !{i32 2, !"Dwarf Version", i32 4}
+!9 = !{i32 1, !"Debug Info Version", i32 3}
+!10 = !{!"clang version 3.5 "}
+!15 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !7)
+!17 = distinct !DILexicalBlock(line: 10, column: 0, file: !1, scope: !7)
+!18 = !DILocation(line: 10, scope: !17)
diff --git a/test/Transforms/SampleProfile/indirect-call-gcc.ll b/test/Transforms/SampleProfile/indirect-call-gcc.ll
new file mode 100644
index 000000000000..678c7931250e
--- /dev/null
+++ b/test/Transforms/SampleProfile/indirect-call-gcc.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/indirect-call.afdo -S | FileCheck %s
+
+; Checks if indirect call targets are read correctly when reading from gcc
+; format profile.
+; It is expected to fail on certain architectures as gcc profile reader does
+; not work.
+; XFAIL: powerpc64-, s390x, mips-, mips64-, sparc
+
+define void @test(void ()*) !dbg !3 {
+ %2 = alloca void ()*
+ store void ()* %0, void ()** %2
+ %3 = load void ()*, void ()** %2
+ ; CHECK: call {{.*}}, !prof ![[PROF:[0-9]+]]
+ call void %3(), !dbg !4
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1)
+!1 = !DIFile(filename: "test.cc", directory: "/")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, unit: !0)
+!4 = !DILocation(line: 5, scope: !3)
+; CHECK: ![[PROF]] = !{!"VP", i32 0, i64 3457, i64 9191153033785521275, i64 2059, i64 -1069303473483922844, i64 1398}
diff --git a/test/Transforms/SampleProfile/indirect-call.ll b/test/Transforms/SampleProfile/indirect-call.ll
new file mode 100644
index 000000000000..5a4913d6358f
--- /dev/null
+++ b/test/Transforms/SampleProfile/indirect-call.ll
@@ -0,0 +1,82 @@
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/indirect-call.prof -S | FileCheck %s
+
+; CHECK-LABEL: @test
+define void @test(void ()*) !dbg !3 {
+ %2 = alloca void ()*
+ store void ()* %0, void ()** %2
+ %3 = load void ()*, void ()** %2
+ ; CHECK: call {{.*}}, !prof ![[PROF:[0-9]+]]
+ call void %3(), !dbg !4
+ ret void
+}
+
+; CHECK-LABEL: @test_inline
+; If the indirect call is promoted and inlined in profile, we should promote and inline it.
+define void @test_inline(i64* (i32*)*, i32* %x) !dbg !3 {
+ %2 = alloca i64* (i32*)*
+ store i64* (i32*)* %0, i64* (i32*)** %2
+ %3 = load i64* (i32*)*, i64* (i32*)** %2
+; CHECK: icmp {{.*}} @foo_inline2
+; CHECK: if.true.direct_targ:
+; CHECK-NOT: call
+; CHECK: if.false.orig_indirect:
+; CHECK: icmp {{.*}} @foo_inline1
+; CHECK: if.true.direct_targ1:
+; CHECK-NOT: call
+; CHECK: if.false.orig_indirect2:
+; CHECK: call
+ call i64* %3(i32* %x), !dbg !5
+ ret void
+}
+
+; CHECK-LABEL: @test_noinline
+; If the indirect call target is not available, we should not promote it.
+define void @test_noinline(void ()*) !dbg !3 {
+ %2 = alloca void ()*
+ store void ()* %0, void ()** %2
+ %3 = load void ()*, void ()** %2
+; CHECK-NOT: icmp
+; CHECK: call
+ call void %3(), !dbg !5
+ ret void
+}
+
+@x = global i32 0, align 4
+
+define i32* @foo_inline1(i32* %x) !dbg !3 {
+ ret i32* %x
+}
+
+define i32* @foo_inline2(i32* %x) !dbg !3 {
+ ret i32* %x
+}
+
+define i32 @foo_noinline(i32 %x) !dbg !3 {
+ ret i32 %x
+}
+
+define void @foo_direct() !dbg !3 {
+ ret void
+}
+
+; CHECK-LABEL: @test_direct
+; We should not promote a direct call.
+define void @test_direct() !dbg !3 {
+; CHECK-NOT: icmp
+; CHECK: call
+ call void @foo_alias(), !dbg !5
+ ret void
+}
+
+@foo_alias = alias void (), void ()* @foo_direct
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1)
+!1 = !DIFile(filename: "test.cc", directory: "/")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, unit: !0)
+!4 = !DILocation(line: 5, scope: !3)
+!5 = !DILocation(line: 6, scope: !3)
+; CHECK: ![[PROF]] = !{!"VP", i32 0, i64 3457, i64 9191153033785521275, i64 2059, i64 -1069303473483922844, i64 1398}
diff --git a/test/Transforms/SampleProfile/inline-coverage.ll b/test/Transforms/SampleProfile/inline-coverage.ll
index c88e7f865fa2..080876a46471 100644
--- a/test/Transforms/SampleProfile/inline-coverage.ll
+++ b/test/Transforms/SampleProfile/inline-coverage.ll
@@ -16,7 +16,7 @@
; 11 return sum > 0 ? 0 : 1;
; 12 }
;
-; CHECK: remark: coverage.cc:10:12: inlined hot callee '_Z3fool' with 172746 samples into 'main'
+; CHECK: remark: coverage.cc:10:12: inlined hot callee '_Z3fool' into 'main'
; CHECK: remark: coverage.cc:9:21: Applied 23478 samples from profile (offset: 2.1)
; CHECK: remark: coverage.cc:10:16: Applied 23478 samples from profile (offset: 3)
; CHECK: remark: coverage.cc:4:10: Applied 31878 samples from profile (offset: 1)
@@ -120,7 +120,7 @@ for.end: ; preds = %for.cond
!27 = !DILocation(line: 9, column: 12, scope: !26)
!28 = !DILocation(line: 9, column: 8, scope: !26)
!29 = !DILocation(line: 9, column: 19, scope: !30)
-!30 = !DILexicalBlockFile(scope: !31, file: !1, discriminator: 1)
+!30 = !DILexicalBlockFile(scope: !31, file: !1, discriminator: 2)
!31 = distinct !DILexicalBlock(scope: !26, file: !1, line: 9, column: 3)
!32 = !DILocation(line: 9, column: 21, scope: !30)
!33 = !DILocation(line: 9, column: 3, scope: !30)
diff --git a/test/Transforms/SampleProfile/inline.ll b/test/Transforms/SampleProfile/inline.ll
index ed353834137b..3ed8988968f6 100644
--- a/test/Transforms/SampleProfile/inline.ll
+++ b/test/Transforms/SampleProfile/inline.ll
@@ -96,14 +96,14 @@ declare i32 @printf(i8*, ...) #2
!12 = !DILocation(line: 8, scope: !7)
!13 = !DILocation(line: 9, scope: !7)
!14 = !DILocation(line: 9, scope: !15)
-!15 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !7)
+!15 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !7)
!16 = !DILocation(line: 10, scope: !17)
!17 = distinct !DILexicalBlock(line: 10, column: 0, file: !1, scope: !7)
!18 = !DILocation(line: 10, scope: !19)
-!19 = !DILexicalBlockFile(discriminator: 1, file: !1, scope: !17)
+!19 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !17)
!20 = !DILocation(line: 10, scope: !21)
-!21 = !DILexicalBlockFile(discriminator: 2, file: !1, scope: !17)
+!21 = !DILexicalBlockFile(discriminator: 4, file: !1, scope: !17)
!22 = !DILocation(line: 10, scope: !23)
-!23 = !DILexicalBlockFile(discriminator: 3, file: !1, scope: !17)
+!23 = !DILexicalBlockFile(discriminator: 6, file: !1, scope: !17)
!24 = !DILocation(line: 11, scope: !7)
!25 = !DILocation(line: 12, scope: !7)
diff --git a/test/Transforms/SampleProfile/propagate.ll b/test/Transforms/SampleProfile/propagate.ll
index 45e3b8003ffc..5a4922bde935 100644
--- a/test/Transforms/SampleProfile/propagate.ll
+++ b/test/Transforms/SampleProfile/propagate.ll
@@ -244,7 +244,7 @@ attributes #3 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!31 = !DILocation(line: 7, column: 15, scope: !29)
!32 = !DILocation(line: 7, column: 10, scope: !29)
!33 = !DILocation(line: 7, column: 22, scope: !34)
-!34 = !DILexicalBlockFile(scope: !35, file: !1, discriminator: 1)
+!34 = !DILexicalBlockFile(scope: !35, file: !1, discriminator: 2)
!35 = distinct !DILexicalBlock(scope: !29, file: !1, line: 7, column: 5)
!36 = !DILocation(line: 7, column: 26, scope: !34)
!37 = !DILocation(line: 7, column: 24, scope: !34)
@@ -275,7 +275,7 @@ attributes #3 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!62 = !DILocation(line: 14, column: 24, scope: !59)
!63 = !DILocation(line: 14, column: 14, scope: !59)
!64 = !DILocation(line: 14, column: 31, scope: !65)
-!65 = !DILexicalBlockFile(scope: !66, file: !1, discriminator: 1)
+!65 = !DILexicalBlockFile(scope: !66, file: !1, discriminator: 2)
!66 = distinct !DILexicalBlock(scope: !59, file: !1, line: 14, column: 9)
!67 = !DILocation(line: 14, column: 33, scope: !65)
!68 = !DILocation(line: 14, column: 9, scope: !65)
@@ -285,11 +285,11 @@ attributes #3 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!72 = !DILocation(line: 16, column: 13, scope: !70)
!73 = !DILocation(line: 17, column: 9, scope: !70)
!74 = !DILocation(line: 14, column: 41, scope: !75)
-!75 = !DILexicalBlockFile(scope: !66, file: !1, discriminator: 2)
+!75 = !DILexicalBlockFile(scope: !66, file: !1, discriminator: 4)
!76 = !DILocation(line: 14, column: 9, scope: !75)
!77 = !DILocation(line: 19, column: 5, scope: !41)
!78 = !DILocation(line: 7, column: 30, scope: !79)
-!79 = !DILexicalBlockFile(scope: !35, file: !1, discriminator: 2)
+!79 = !DILexicalBlockFile(scope: !35, file: !1, discriminator: 4)
!80 = !DILocation(line: 7, column: 5, scope: !79)
!81 = !DILocation(line: 21, column: 10, scope: !6)
!82 = !DILocation(line: 21, column: 14, scope: !6)
@@ -313,5 +313,5 @@ attributes #3 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-
!100 = !DILocation(line: 28, column: 57, scope: !86)
!101 = !DILocation(line: 28, column: 47, scope: !86)
!102 = !DILocation(line: 28, column: 3, scope: !103)
-!103 = !DILexicalBlockFile(scope: !86, file: !1, discriminator: 1)
+!103 = !DILexicalBlockFile(scope: !86, file: !1, discriminator: 2)
!104 = !DILocation(line: 29, column: 3, scope: !86)
diff --git a/test/Transforms/SampleProfile/remarks.ll b/test/Transforms/SampleProfile/remarks.ll
index 908e4f8b10b4..dfb075ee00ea 100644
--- a/test/Transforms/SampleProfile/remarks.ll
+++ b/test/Transforms/SampleProfile/remarks.ll
@@ -19,7 +19,7 @@
; We are expecting foo() to be inlined in main() (almost all the cycles are
; spent inside foo).
-; CHECK: remark: remarks.cc:13:21: inlined hot callee '_Z3foov' with 623868 samples into 'main'
+; CHECK: remark: remarks.cc:13:21: inlined hot callee '_Z3foov' into 'main'
; The back edge for the loop is the hottest edge in the loop subgraph.
; CHECK: remark: remarks.cc:6:9: most popular destination for conditional branches at remarks.cc:5:3
@@ -33,11 +33,11 @@ entry:
%sum = alloca i64, align 8
%i = alloca i32, align 4
%0 = bitcast i64* %sum to i8*, !dbg !19
- call void @llvm.lifetime.start(i64 8, i8* %0) #4, !dbg !19
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %0) #4, !dbg !19
call void @llvm.dbg.declare(metadata i64* %sum, metadata !9, metadata !20), !dbg !21
store i64 0, i64* %sum, align 8, !dbg !21, !tbaa !22
%1 = bitcast i32* %i to i8*, !dbg !26
- call void @llvm.lifetime.start(i64 4, i8* %1) #4, !dbg !26
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %1) #4, !dbg !26
call void @llvm.dbg.declare(metadata i32* %i, metadata !10, metadata !20), !dbg !27
store i32 0, i32* %i, align 4, !dbg !27, !tbaa !28
br label %for.cond, !dbg !26
@@ -49,7 +49,7 @@ for.cond: ; preds = %for.inc, %entry
for.cond.cleanup: ; preds = %for.cond
%3 = bitcast i32* %i to i8*, !dbg !36
- call void @llvm.lifetime.end(i64 4, i8* %3) #4, !dbg !36
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %3) #4, !dbg !36
br label %for.end
for.body: ; preds = %for.cond
@@ -88,12 +88,12 @@ for.inc: ; preds = %if.end
for.end: ; preds = %for.cond.cleanup
%10 = load i64, i64* %sum, align 8, !dbg !53, !tbaa !22
%11 = bitcast i64* %sum to i8*, !dbg !54
- call void @llvm.lifetime.end(i64 8, i8* %11) #4, !dbg !54
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %11) #4, !dbg !54
ret i64 %10, !dbg !55
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -102,7 +102,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
declare i32 @rand() #3
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @main() #0 !dbg !13 {
diff --git a/test/Transforms/Scalarizer/vector-gep.ll b/test/Transforms/Scalarizer/vector-gep.ll
new file mode 100644
index 000000000000..eacddf136a32
--- /dev/null
+++ b/test/Transforms/Scalarizer/vector-gep.ll
@@ -0,0 +1,122 @@
+; RUN: opt -S -scalarizer %s | FileCheck %s
+
+; Check that the scalarizer can handle vector GEPs with scalar indices
+
+@vec = global <4 x i16*> <i16* null, i16* null, i16* null, i16* null>
+@index = global i16 1
+@ptr = global [4 x i16] [i16 1, i16 2, i16 3, i16 4]
+@ptrptr = global i16* null
+
+; constant index
+define void @test1() {
+bb:
+ %0 = load <4 x i16*>, <4 x i16*>* @vec
+ %1 = getelementptr i16, <4 x i16*> %0, i16 1
+
+ ret void
+}
+
+;CHECK-LABEL: @test1
+;CHECK: %[[I0:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 0
+;CHECK: getelementptr i16, i16* %[[I0]], i16 1
+;CHECK: %[[I1:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 1
+;CHECK: getelementptr i16, i16* %[[I1]], i16 1
+;CHECK: %[[I2:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 2
+;CHECK: getelementptr i16, i16* %[[I2]], i16 1
+;CHECK: %[[I3:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 3
+;CHECK: getelementptr i16, i16* %[[I3]], i16 1
+
+; non-constant index
+define void @test2() {
+bb:
+ %0 = load <4 x i16*>, <4 x i16*>* @vec
+ %index = load i16, i16* @index
+ %1 = getelementptr i16, <4 x i16*> %0, i16 %index
+
+ ret void
+}
+
+;CHECK-LABEL: @test2
+;CHECK: %0 = load <4 x i16*>, <4 x i16*>* @vec
+;CHECK: %[[I0:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 0
+;CHECK: %[[I1:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 1
+;CHECK: %[[I2:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 2
+;CHECK: %[[I3:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 3
+;CHECK: %index = load i16, i16* @index
+;CHECK: %.splatinsert = insertelement <4 x i16> undef, i16 %index, i32 0
+;CHECK: %.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> undef, <4 x i32> zeroinitializer
+;CHECK: %.splat[[I0]] = extractelement <4 x i16> %.splat, i32 0
+;CHECK: getelementptr i16, i16* %[[I0]], i16 %.splat[[I0]]
+;CHECK: %.splat[[I1]] = extractelement <4 x i16> %.splat, i32 1
+;CHECK: getelementptr i16, i16* %[[I1]], i16 %.splat[[I1]]
+;CHECK: %.splat[[I2]] = extractelement <4 x i16> %.splat, i32 2
+;CHECK: getelementptr i16, i16* %[[I2]], i16 %.splat[[I2]]
+;CHECK: %.splat[[I3]] = extractelement <4 x i16> %.splat, i32 3
+;CHECK: getelementptr i16, i16* %[[I3]], i16 %.splat[[I3]]
+
+
+; Check that the scalarizer can handle vector GEPs with scalar pointer
+
+; constant pointer
+define void @test3() {
+bb:
+ %0 = bitcast [4 x i16]* @ptr to i16*
+ %1 = getelementptr i16, i16* %0, <4 x i16> <i16 0, i16 1, i16 2, i16 3>
+
+ ret void
+}
+
+;CHECK-LABEL: @test3
+;CHECK: %0 = bitcast [4 x i16]* @ptr to i16*
+;CHECK: %.splatinsert = insertelement <4 x i16*> undef, i16* %0, i32 0
+;CHECK: %.splat = shufflevector <4 x i16*> %.splatinsert, <4 x i16*> undef, <4 x i32> zeroinitializer
+;CHECK: %.splat[[I0:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 0
+;CHECK: getelementptr i16, i16* %.splat[[I0]], i16 0
+;CHECK: %.splat[[I1:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 1
+;CHECK: getelementptr i16, i16* %.splat[[I1]], i16 1
+;CHECK: %.splat[[I2:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 2
+;CHECK: getelementptr i16, i16* %.splat[[I2]], i16 2
+;CHECK: %.splat[[I3:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 3
+;CHECK: getelementptr i16, i16* %.splat[[I3]], i16 3
+
+; non-constant pointer
+define void @test4() {
+bb:
+ %0 = load i16*, i16** @ptrptr
+ %1 = getelementptr i16, i16* %0, <4 x i16> <i16 0, i16 1, i16 2, i16 3>
+
+ ret void
+}
+
+;CHECK-LABEL: @test4
+;CHECK: %0 = load i16*, i16** @ptrptr
+;CHECK: %.splatinsert = insertelement <4 x i16*> undef, i16* %0, i32 0
+;CHECK: %.splat = shufflevector <4 x i16*> %.splatinsert, <4 x i16*> undef, <4 x i32> zeroinitializer
+;CHECK: %.splat[[I0:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 0
+;CHECK: getelementptr i16, i16* %.splat[[I0]], i16 0
+;CHECK: %.splat[[I1:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 1
+;CHECK: getelementptr i16, i16* %.splat[[I1]], i16 1
+;CHECK: %.splat[[I2:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 2
+;CHECK: getelementptr i16, i16* %.splat[[I2]], i16 2
+;CHECK: %.splat[[I3:.i[0-9]*]] = extractelement <4 x i16*> %.splat, i32 3
+;CHECK: getelementptr i16, i16* %.splat[[I3]], i16 3
+
+; constant index, inbounds
+define void @test5() {
+bb:
+ %0 = load <4 x i16*>, <4 x i16*>* @vec
+ %1 = getelementptr inbounds i16, <4 x i16*> %0, i16 1
+
+ ret void
+}
+
+;CHECK-LABEL: @test5
+;CHECK: %[[I0:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 0
+;CHECK: getelementptr inbounds i16, i16* %[[I0]], i16 1
+;CHECK: %[[I1:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 1
+;CHECK: getelementptr inbounds i16, i16* %[[I1]], i16 1
+;CHECK: %[[I2:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 2
+;CHECK: getelementptr inbounds i16, i16* %[[I2]], i16 1
+;CHECK: %[[I3:.i[0-9]*]] = extractelement <4 x i16*> %0, i32 3
+;CHECK: getelementptr inbounds i16, i16* %[[I3]], i16 1
+
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll b/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
index 5815ae627373..23ec0ca25544 100644
--- a/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
+++ b/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn-addrspace-addressing-modes.ll
@@ -9,7 +9,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 1
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 32
; IR: getelementptr inbounds float, float addrspace(2)* [[BASE_PTR]], i64 33
-define void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+define amdgpu_kernel void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
%tmp = sext i32 %y to i64
%tmp1 = sext i32 %x to i64
%tmp2 = getelementptr inbounds [4096 x [32 x float]], [4096 x [32 x float]] addrspace(2)* @array, i64 0, i64 %tmp1, i64 %tmp
@@ -42,7 +42,7 @@ define void @sum_of_array(i32 %x, i32 %y, float addrspace(1)* nocapture %output)
; IR: add i32 %x, 256
; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
; IR: getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-define void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+define amdgpu_kernel void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
%tmp = sext i32 %y to i64
%tmp1 = sext i32 %x to i64
%tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(2)* @array2, i64 0, i64 %tmp1, i64 %tmp
@@ -74,7 +74,7 @@ define void @sum_of_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 255
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 16128
; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i32 16383
-define void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
+define amdgpu_kernel void @sum_of_lds_array_over_max_mubuf_offset(i32 %x, i32 %y, float addrspace(1)* nocapture %output) {
%tmp2 = getelementptr inbounds [4096 x [4 x float]], [4096 x [4 x float]] addrspace(3)* @lds_array, i32 0, i32 %x, i32 %y
%tmp4 = load float, float addrspace(3)* %tmp2, align 4
%tmp5 = fadd float %tmp4, 0.000000e+00
diff --git a/test/Transforms/SimplifyCFG/ARM/switch-to-lookup-table.ll b/test/Transforms/SimplifyCFG/ARM/switch-to-lookup-table.ll
index 16f028d2e85a..90a9aa4d95b7 100644
--- a/test/Transforms/SimplifyCFG/ARM/switch-to-lookup-table.ll
+++ b/test/Transforms/SimplifyCFG/ARM/switch-to-lookup-table.ll
@@ -1,8 +1,8 @@
-; RUN: opt -S -simplifycfg -mtriple=arm -relocation-model=static < %s | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
-; RUN: opt -S -simplifycfg -mtriple=arm -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
-; RUN: opt -S -simplifycfg -mtriple=arm -relocation-model=ropi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
-; RUN: opt -S -simplifycfg -mtriple=arm -relocation-model=rwpi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
-; RUN: opt -S -simplifycfg -mtriple=arm -relocation-model=ropi-rwpi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
+; RUN: opt -S -latesimplifycfg -mtriple=arm -relocation-model=static < %s | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
+; RUN: opt -S -latesimplifycfg -mtriple=arm -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
+; RUN: opt -S -latesimplifycfg -mtriple=arm -relocation-model=ropi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
+; RUN: opt -S -latesimplifycfg -mtriple=arm -relocation-model=rwpi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
+; RUN: opt -S -latesimplifycfg -mtriple=arm -relocation-model=ropi-rwpi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
; CHECK: @{{.*}} = private unnamed_addr constant [3 x i32] [i32 1234, i32 5678, i32 15532]
; ENABLE: @{{.*}} = private unnamed_addr constant [3 x i32*] [i32* @c1, i32* @c2, i32* @c3]
diff --git a/test/Transforms/SimplifyCFG/CoveredLookupTable.ll b/test/Transforms/SimplifyCFG/CoveredLookupTable.ll
index 8b45a590bb1f..a42349e3d874 100644
--- a/test/Transforms/SimplifyCFG/CoveredLookupTable.ll
+++ b/test/Transforms/SimplifyCFG/CoveredLookupTable.ll
@@ -1,4 +1,4 @@
-; RUN: opt -simplifycfg -S %s | FileCheck %s
+; RUN: opt -latesimplifycfg -S %s | FileCheck %s
; rdar://15268442
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll b/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
index f3e5506ad933..ae6ff6d10bcf 100644
--- a/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -simplifycfg < %s -mtriple=x86_64-apple-darwin12.0.0 | FileCheck %s
+; RUN: opt -S -latesimplifycfg < %s -mtriple=x86_64-apple-darwin12.0.0 | FileCheck %s
; rdar://17887153
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin12.0.0"
diff --git a/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll b/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
index 26008700f5be..734312bc7285 100644
--- a/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -simplifycfg < %s -mtriple=x86_64-apple-darwin12.0.0 | FileCheck %s
+; RUN: opt -S -latesimplifycfg < %s -mtriple=x86_64-apple-darwin12.0.0 | FileCheck %s
; rdar://17735071
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin12.0.0"
diff --git a/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll b/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
index 81c153483c66..4b9227b029ec 100644
--- a/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -simplifycfg -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+; RUN: opt < %s -latesimplifycfg -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -1178,8 +1178,9 @@ return:
ret i32 %retval.0
; CHECK-LABEL: @reuse_cmp2(
; CHECK: entry:
-; CHECK-NEXT: %switch.tableidx = sub i32 %x, 0
-; CHECK-NEXT: [[C:%.+]] = icmp ult i32 %switch.tableidx, 4
+; CHECK-NEXT: %switch = icmp ult i32 %x, 4
+; CHECK-NEXT: %x. = select i1 %switch, i32 %x, i32 4
+; CHECK-NEXT: [[C:%.+]] = icmp ne i32 %x., 4
; CHECK: [[R:%.+]] = select i1 [[C]], i32 {{.*}}, i32 100
; CHECK-NEXT: ret i32 [[R]]
}
diff --git a/test/Transforms/SimplifyCFG/critedge-assume.ll b/test/Transforms/SimplifyCFG/critedge-assume.ll
new file mode 100644
index 000000000000..bfeb65769deb
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/critedge-assume.ll
@@ -0,0 +1,83 @@
+; RUN: opt -o %t %s -instcombine -simplifycfg -thinlto-bc -verify-assumption-cache
+; RUN: llvm-dis -o - %t | FileCheck %s
+
+; Test that the simplifycfg pass correctly updates the assumption cache
+; when it clones the llvm.assume call as part of creating a critical
+; edge. To do that, we set up a pass pipeline such that (1) an assumption
+; cache is created for foo before simplifycfg updates it, and (2) foo's
+; assumption cache is verified after simplifycfg has run. To satisfy 1, we
+; run the instcombine pass first in our pipeline. To satisfy 2, we use the
+; ThinLTOBitcodeWriter pass to write bitcode (that pass uses the assumption
+; cache). That ensures that the pass manager does not call releaseMemory()
+; on the AssumptionCacheTracker before the end of the pipeline, which would
+; wipe out the bad assumption cache before it is verified.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%class.F = type { i8 }
+%class.B = type { i8 }
+%class.A = type { %class.C }
+%class.C = type { i32 (...)** }
+
+define void @foo(%class.F* %this, %class.B* %out) {
+entry:
+ %call = tail call i32 @_ZNK1F5beginEv(%class.F* %this)
+ %call2 = tail call i32 @_ZNK1F3endEv(%class.F* %this)
+ %cmp.i22 = icmp eq i32 %call, %call2
+ br i1 %cmp.i22, label %while.end, label %while.body.preheader
+
+while.body.preheader:
+ br label %while.body
+
+while.body:
+ %frame_node.sroa.0.023 = phi i32 [ %inc.i, %_ZN10unique_ptrD2Ev.exit ], [ %call, %while.body.preheader ]
+ %call8 = tail call i8* @_Znwm(i64 8)
+ %inc.i = add nsw i32 %frame_node.sroa.0.023, 1
+ %cmp = icmp eq i32 %inc.i, %call2
+ br i1 %cmp, label %_ZN10unique_ptrD2Ev.exit, label %if.then
+
+if.then:
+ tail call void @_ZN1B6appendEv(%class.B* %out)
+ br label %_ZN10unique_ptrD2Ev.exit
+
+_ZN10unique_ptrD2Ev.exit:
+ %x1 = bitcast i8* %call8 to void (%class.A*)***
+ %vtable.i.i = load void (%class.A*)**, void (%class.A*)*** %x1, align 8
+ %x2 = bitcast void (%class.A*)** %vtable.i.i to i8*
+ %x3 = tail call i1 @llvm.type.test(i8* %x2, metadata !"foo")
+ ; CHECK: call void @llvm.assume
+ ; CHECK: call void @llvm.assume
+ tail call void @llvm.assume(i1 %x3) #5
+ br i1 %cmp, label %while.end.loopexit, label %while.body
+
+while.end.loopexit:
+ br label %while.end
+
+while.end:
+ ret void
+}
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+
+declare i32 @_ZNK1F5beginEv(%class.F*)
+
+declare i32 @_ZNK1F3endEv(%class.F*)
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+
+declare noalias nonnull i8* @_Znwm(i64)
+
+declare void @_ZN1B6appendEv(%class.B*)
+
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+
+declare i1 @llvm.type.test(i8*, metadata)
+
+declare void @llvm.assume(i1)
+
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"clang version 5.0.0 "}
diff --git a/test/Transforms/SimplifyCFG/div-rem-pairs.ll b/test/Transforms/SimplifyCFG/div-rem-pairs.ll
new file mode 100644
index 000000000000..85ffe1f4e0f3
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/div-rem-pairs.ll
@@ -0,0 +1,119 @@
+; RUN: opt -simplifycfg -S < %s | FileCheck %s
+
+; FIXME: Hoist the sdiv because it's safe and free.
+; PR31028 - https://bugs.llvm.org/show_bug.cgi?id=31028
+
+define i32 @hoist_sdiv(i32 %a, i32 %b) {
+; CHECK-LABEL: @hoist_sdiv(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[REM:%.*]] = srem i32 %a, %b
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42
+; CHECK-NEXT: br i1 [[CMP]], label %if, label %end
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b
+; CHECK-NEXT: br label %end
+; CHECK: end:
+; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ]
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ %rem = srem i32 %a, %b
+ %cmp = icmp eq i32 %rem, 42
+ br i1 %cmp, label %if, label %end
+
+if:
+ %div = sdiv i32 %a, %b
+ br label %end
+
+end:
+ %ret = phi i32 [ %div, %if ], [ 3, %entry ]
+ ret i32 %ret
+}
+
+; FIXME: Hoist the udiv because it's safe and free.
+
+define i64 @hoist_udiv(i64 %a, i64 %b) {
+; CHECK-LABEL: @hoist_udiv(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[REM:%.*]] = urem i64 %a, %b
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[REM]], 42
+; CHECK-NEXT: br i1 [[CMP]], label %if, label %end
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = udiv i64 %a, %b
+; CHECK-NEXT: br label %end
+; CHECK: end:
+; CHECK-NEXT: [[RET:%.*]] = phi i64 [ [[DIV]], %if ], [ 3, %entry ]
+; CHECK-NEXT: ret i64 [[RET]]
+;
+entry:
+ %rem = urem i64 %a, %b
+ %cmp = icmp eq i64 %rem, 42
+ br i1 %cmp, label %if, label %end
+
+if:
+ %div = udiv i64 %a, %b
+ br label %end
+
+end:
+ %ret = phi i64 [ %div, %if ], [ 3, %entry ]
+ ret i64 %ret
+}
+
+; FIXME: Hoist the srem because it's safe and likely free.
+
+define i16 @hoist_srem(i16 %a, i16 %b) {
+; CHECK-LABEL: @hoist_srem(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42
+; CHECK-NEXT: br i1 [[CMP]], label %if, label %end
+; CHECK: if:
+; CHECK-NEXT: [[REM:%.*]] = srem i16 %a, %b
+; CHECK-NEXT: br label %end
+; CHECK: end:
+; CHECK-NEXT: [[RET:%.*]] = phi i16 [ [[REM]], %if ], [ 3, %entry ]
+; CHECK-NEXT: ret i16 [[RET]]
+;
+entry:
+ %div = sdiv i16 %a, %b
+ %cmp = icmp eq i16 %div, 42
+ br i1 %cmp, label %if, label %end
+
+if:
+ %rem = srem i16 %a, %b
+ br label %end
+
+end:
+ %ret = phi i16 [ %rem, %if ], [ 3, %entry ]
+ ret i16 %ret
+}
+
+; FIXME: Hoist the urem because it's safe and likely free.
+
+define i8 @hoist_urem(i8 %a, i8 %b) {
+; CHECK-LABEL: @hoist_urem(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, %b
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42
+; CHECK-NEXT: br i1 [[CMP]], label %if, label %end
+; CHECK: if:
+; CHECK-NEXT: [[REM:%.*]] = urem i8 %a, %b
+; CHECK-NEXT: br label %end
+; CHECK: end:
+; CHECK-NEXT: [[RET:%.*]] = phi i8 [ [[REM]], %if ], [ 3, %entry ]
+; CHECK-NEXT: ret i8 [[RET]]
+;
+entry:
+ %div = udiv i8 %a, %b
+ %cmp = icmp eq i8 %div, 42
+ br i1 %cmp, label %if, label %end
+
+if:
+ %rem = urem i8 %a, %b
+ br label %end
+
+end:
+ %ret = phi i8 [ %rem, %if ], [ 3, %entry ]
+ ret i8 %ret
+}
+
diff --git a/test/Transforms/SimplifyCFG/empty-cleanuppad.ll b/test/Transforms/SimplifyCFG/empty-cleanuppad.ll
index 9f657a81a05b..f2e0114a2a35 100644
--- a/test/Transforms/SimplifyCFG/empty-cleanuppad.ll
+++ b/test/Transforms/SimplifyCFG/empty-cleanuppad.ll
@@ -413,14 +413,14 @@ return: ; preds = %invoke.cont, %catch
define i32 @f9() personality i32 (...)* @__CxxFrameHandler3 {
entry:
%s = alloca i8, align 1
- call void @llvm.lifetime.start(i64 1, i8* nonnull %s)
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %s)
%bc = bitcast i8* %s to %struct.S2*
invoke void @"\01??1S2@@QEAA@XZ"(%struct.S2* %bc)
to label %try.cont unwind label %ehcleanup
ehcleanup:
%cleanup.pad = cleanuppad within none []
- call void @llvm.lifetime.end(i64 1, i8* nonnull %s)
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %s)
cleanupret from %cleanup.pad unwind label %catch.dispatch
catch.dispatch:
@@ -466,5 +466,5 @@ declare void @use_x(i32 %x)
declare i32 @__CxxFrameHandler3(...)
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
diff --git a/test/Transforms/SimplifyCFG/lifetime.ll b/test/Transforms/SimplifyCFG/lifetime.ll
index 7c66be529500..270fe4d54422 100644
--- a/test/Transforms/SimplifyCFG/lifetime.ll
+++ b/test/Transforms/SimplifyCFG/lifetime.ll
@@ -10,11 +10,11 @@
define void @foo(i1 %x) {
entry:
%a = alloca i8
- call void @llvm.lifetime.start(i64 -1, i8* %a) nounwind
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %a) nounwind
br i1 %x, label %bb0, label %bb1
bb0:
- call void @llvm.lifetime.end(i64 -1, i8* %a) nounwind
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %a) nounwind
br label %bb1
bb1:
@@ -24,6 +24,6 @@ bb1:
declare void @f()
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
diff --git a/test/Transforms/SimplifyCFG/merge-cond-stores.ll b/test/Transforms/SimplifyCFG/merge-cond-stores.ll
index 77e3158d9bbd..d5d0224a4b24 100644
--- a/test/Transforms/SimplifyCFG/merge-cond-stores.ll
+++ b/test/Transforms/SimplifyCFG/merge-cond-stores.ll
@@ -1,16 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -simplifycfg -instcombine < %s -simplifycfg-merge-cond-stores=true -simplifycfg-merge-cond-stores-aggressively=false -phi-node-folding-threshold=2 -S | FileCheck %s
-; CHECK-LABEL: @test_simple
; This test should succeed and end up if-converted.
-; CHECK: icmp eq i32 %b, 0
-; CHECK-NEXT: icmp ne i32 %a, 0
-; CHECK-NEXT: xor i1 %x2, true
-; CHECK-NEXT: %[[x:.*]] = or i1 %{{.*}}, %{{.*}}
-; CHECK-NEXT: br i1 %[[x]]
-; CHECK: store
-; CHECK-NOT: store
-; CHECK: ret
define void @test_simple(i32* %p, i32 %a, i32 %b) {
+; CHECK-LABEL: @test_simple(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[X2:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X2]], true
+; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
+; CHECK: [[NOT_X2:%.*]] = xor i1 [[X2]], true
+; CHECK-NEXT: [[DOT:%.*]] = zext i1 [[NOT_X2]] to i32
+; CHECK-NEXT: store i32 [[DOT]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[TMP4]]
+; CHECK: ret void
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %fallthrough, label %yes1
@@ -31,12 +36,26 @@ end:
ret void
}
-; CHECK-LABEL: @test_recursive
; This test should entirely fold away, leaving one large basic block.
-; CHECK: store
-; CHECK-NOT: store
-; CHECK: ret
define void @test_recursive(i32* %p, i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: @test_recursive(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = or i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[X4:%.*]] = icmp eq i32 [[D:%.*]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[TMP0]], [[C:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP1]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[X4]], true
+; CHECK-NEXT: [[TMP4:%.*]] = or i1 [[TMP3]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP6:%.*]]
+; CHECK: [[X3:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT: [[NOT_X2:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[DOT:%.*]] = zext i1 [[NOT_X2]] to i32
+; CHECK-NEXT: [[DOT_:%.*]] = select i1 [[X3]], i32 [[DOT]], i32 2
+; CHECK-NEXT: [[DOT__:%.*]] = select i1 [[X4]], i32 [[DOT_]], i32 3
+; CHECK-NEXT: store i32 [[DOT__]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[TMP6]]
+; CHECK: ret void
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %fallthrough, label %yes1
@@ -74,13 +93,31 @@ end:
ret void
}
-; CHECK-LABEL: @test_not_ifconverted
; The code in each diamond is too large - it won't be if-converted so our
; heuristics should say no.
-; CHECK: store
-; CHECK: store
-; CHECK: ret
define void @test_not_ifconverted(i32* %p, i32 %a, i32 %b) {
+; CHECK-LABEL: @test_not_ifconverted(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X1:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: br i1 [[X1]], label [[FALLTHROUGH:%.*]], label [[YES1:%.*]]
+; CHECK: yes1:
+; CHECK-NEXT: [[Y1:%.*]] = or i32 [[B:%.*]], 55
+; CHECK-NEXT: [[Y2:%.*]] = add i32 [[Y1]], 24
+; CHECK-NEXT: [[Y3:%.*]] = and i32 [[Y2]], 67
+; CHECK-NEXT: store i32 [[Y3]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[FALLTHROUGH]]
+; CHECK: fallthrough:
+; CHECK-NEXT: [[X2:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: br i1 [[X2]], label [[END:%.*]], label [[YES2:%.*]]
+; CHECK: yes2:
+; CHECK-NEXT: [[Z1:%.*]] = or i32 [[A]], 55
+; CHECK-NEXT: [[Z2:%.*]] = add i32 [[Z1]], 24
+; CHECK-NEXT: [[Z3:%.*]] = and i32 [[Z2]], 67
+; CHECK-NEXT: store i32 [[Z3]], i32* [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %fallthrough, label %yes1
@@ -107,13 +144,26 @@ end:
ret void
}
-; CHECK-LABEL: @test_aliasing1
; The store to %p clobbers the previous store, so if-converting this would
; be illegal.
-; CHECK: store
-; CHECK: store
-; CHECK: ret
define void @test_aliasing1(i32* %p, i32 %a, i32 %b) {
+; CHECK-LABEL: @test_aliasing1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X1:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: br i1 [[X1]], label [[FALLTHROUGH:%.*]], label [[YES1:%.*]]
+; CHECK: yes1:
+; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[FALLTHROUGH]]
+; CHECK: fallthrough:
+; CHECK-NEXT: [[Y1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: [[X2:%.*]] = icmp eq i32 [[Y1]], 0
+; CHECK-NEXT: br i1 [[X2]], label [[END:%.*]], label [[YES2:%.*]]
+; CHECK: yes2:
+; CHECK-NEXT: store i32 1, i32* [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %fallthrough, label %yes1
@@ -135,12 +185,25 @@ end:
ret void
}
-; CHECK-LABEL: @test_aliasing2
; The load from %q aliases with %p, so if-converting this would be illegal.
-; CHECK: store
-; CHECK: store
-; CHECK: ret
define void @test_aliasing2(i32* %p, i32* %q, i32 %a, i32 %b) {
+; CHECK-LABEL: @test_aliasing2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X1:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: br i1 [[X1]], label [[FALLTHROUGH:%.*]], label [[YES1:%.*]]
+; CHECK: yes1:
+; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[FALLTHROUGH]]
+; CHECK: fallthrough:
+; CHECK-NEXT: [[Y1:%.*]] = load i32, i32* [[Q:%.*]], align 4
+; CHECK-NEXT: [[X2:%.*]] = icmp eq i32 [[Y1]], 0
+; CHECK-NEXT: br i1 [[X2]], label [[END:%.*]], label [[YES2:%.*]]
+; CHECK: yes2:
+; CHECK-NEXT: store i32 1, i32* [[P]], align 4
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: ret void
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %fallthrough, label %yes1
@@ -164,12 +227,24 @@ end:
declare void @f()
-; CHECK-LABEL: @test_diamond_simple
; This should get if-converted.
-; CHECK: store
-; CHECK-NOT: store
-; CHECK: ret
define i32 @test_diamond_simple(i32* %p, i32* %q, i32 %a, i32 %b) {
+; CHECK-LABEL: @test_diamond_simple(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X1:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: [[Z1:%.*]] = add i32 [[A]], [[B:%.*]]
+; CHECK-NEXT: [[Z2:%.*]] = select i1 [[X1]], i32 [[Z1]], i32 0
+; CHECK-NEXT: [[X2:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[Z3:%.*]] = sub i32 [[Z2]], [[B]]
+; CHECK-NEXT: [[Z4:%.*]] = select i1 [[X2]], i32 [[Z3]], i32 3
+; CHECK-NEXT: [[TMP0:%.*]] = or i32 [[A]], [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label [[TMP3:%.*]], label [[TMP2:%.*]]
+; CHECK: [[SIMPLIFYCFG_MERGE:%.*]] = select i1 [[X2]], i32 [[Z2]], i32 1
+; CHECK-NEXT: store i32 [[SIMPLIFYCFG_MERGE]], i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[TMP3]]
+; CHECK: ret i32 [[Z4]]
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %no1, label %yes1
@@ -200,14 +275,36 @@ end:
ret i32 %z4
}
-; CHECK-LABEL: @test_diamond_alias3
; Now there is a call to f() in the bottom branch. The store in the first
; branch would now be reordered with respect to the call if we if-converted,
; so we must not.
-; CHECK: store
-; CHECK: store
-; CHECK: ret
define i32 @test_diamond_alias3(i32* %p, i32* %q, i32 %a, i32 %b) {
+; CHECK-LABEL: @test_diamond_alias3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X1:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: br i1 [[X1]], label [[NO1:%.*]], label [[YES1:%.*]]
+; CHECK: yes1:
+; CHECK-NEXT: store i32 0, i32* [[P:%.*]], align 4
+; CHECK-NEXT: br label [[FALLTHROUGH:%.*]]
+; CHECK: no1:
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: [[Z1:%.*]] = add i32 [[A]], [[B:%.*]]
+; CHECK-NEXT: br label [[FALLTHROUGH]]
+; CHECK: fallthrough:
+; CHECK-NEXT: [[Z2:%.*]] = phi i32 [ [[Z1]], [[NO1]] ], [ 0, [[YES1]] ]
+; CHECK-NEXT: [[X2:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: br i1 [[X2]], label [[NO2:%.*]], label [[YES2:%.*]]
+; CHECK: yes2:
+; CHECK-NEXT: store i32 1, i32* [[P]], align 4
+; CHECK-NEXT: br label [[END:%.*]]
+; CHECK: no2:
+; CHECK-NEXT: call void @f()
+; CHECK-NEXT: [[Z3:%.*]] = sub i32 [[Z2]], [[B]]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[Z4:%.*]] = phi i32 [ [[Z3]], [[NO2]] ], [ 3, [[YES2]] ]
+; CHECK-NEXT: ret i32 [[Z4]]
+;
entry:
%x1 = icmp eq i32 %a, 0
br i1 %x1, label %no1, label %yes1
diff --git a/test/Transforms/SimplifyCFG/rangereduce.ll b/test/Transforms/SimplifyCFG/rangereduce.ll
index 36e932b37be5..13bbdfe83d07 100644
--- a/test/Transforms/SimplifyCFG/rangereduce.ll
+++ b/test/Transforms/SimplifyCFG/rangereduce.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -simplifycfg -S | FileCheck %s
+; RUN: opt < %s -latesimplifycfg -S | FileCheck %s
target datalayout = "e-n32"
diff --git a/test/Transforms/SimplifyCFG/remove-debug-2.ll b/test/Transforms/SimplifyCFG/remove-debug-2.ll
new file mode 100644
index 000000000000..6362f53e14c1
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/remove-debug-2.ll
@@ -0,0 +1,68 @@
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+
+; Check if the debug info for hoisted store for "ret = 0" is removed
+;
+; int foo(int x) {
+; int ret = 1;
+; if (x)
+; ret = 0;
+; return ret;
+; }
+;
+; CHECK: store i32 1,{{.+}}!dbg ![[DLOC1:[0-9]+]]
+; CHECK: icmp ne {{.+}}!dbg ![[DLOC2:[0-9]+]]
+; CHECK: [[VREG:%[^ ]+]] = select
+; CHECK: store i32 [[VREG]]
+; CHECK-NOT: !dbg
+; CHECK-SAME: {{$}}
+; CHECK: ret {{.+}}!dbg ![[DLOC3:[0-9]+]]
+; CHECK: ![[DLOC1]] = !DILocation(line: 2
+; CHECK: ![[DLOC2]] = !DILocation(line: 3
+; CHECK: ![[DLOC3]] = !DILocation(line: 5
+
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind uwtable
+define i32 @foo(i32) !dbg !6 {
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ store i32 %0, i32* %2, align 4
+ store i32 1, i32* %3, align 4, !dbg !14
+ %4 = load i32, i32* %2, align 4, !dbg !15
+ %5 = icmp ne i32 %4, 0, !dbg !15
+ br i1 %5, label %6, label %7, !dbg !17
+
+; <label>:6: ; preds = %1
+ store i32 0, i32* %3, align 4, !dbg !18
+ br label %7, !dbg !19
+
+; <label>:7: ; preds = %6, %1
+ %8 = load i32, i32* %3, align 4, !dbg !20
+ ret i32 %8, !dbg !21
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1)
+!1 = !DIFile(filename: "foo.c", directory: "b/")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{}
+!6 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9, !9}
+!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!10 = !DILocalVariable(name: "x", arg: 1, scope: !6, file: !1, line: 1, type: !9)
+!11 = !DIExpression()
+!12 = !DILocation(line: 1, column: 13, scope: !6)
+!13 = !DILocalVariable(name: "ret", scope: !6, file: !1, line: 2, type: !9)
+!14 = !DILocation(line: 2, column: 7, scope: !6)
+!15 = !DILocation(line: 3, column: 7, scope: !16)
+!16 = distinct !DILexicalBlock(scope: !6, file: !1, line: 3, column: 7)
+!17 = !DILocation(line: 3, column: 7, scope: !6)
+!18 = !DILocation(line: 4, column: 9, scope: !16)
+!19 = !DILocation(line: 4, column: 5, scope: !16)
+!20 = !DILocation(line: 5, column: 10, scope: !6)
+!21 = !DILocation(line: 5, column: 3, scope: !6)
diff --git a/test/Transforms/SimplifyCFG/switch_create.ll b/test/Transforms/SimplifyCFG/switch_create.ll
index 29d3a34a05e6..c752636ae83d 100644
--- a/test/Transforms/SimplifyCFG/switch_create.ll
+++ b/test/Transforms/SimplifyCFG/switch_create.ll
@@ -1,5 +1,5 @@
; RUN: opt -S -simplifycfg < %s | FileCheck %s
-; RUN: opt -S -default-data-layout="p:32:32-p1:16:16" -simplifycfg < %s | FileCheck -check-prefix=CHECK -check-prefix=DL %s
+; RUN: opt -S -data-layout="p:32:32-p1:16:16" -simplifycfg < %s | FileCheck -check-prefix=CHECK -check-prefix=DL %s
declare void @foo1()
diff --git a/test/Transforms/StraightLineStrengthReduce/AMDGPU/reassociate-geps-and-slsr-addrspace.ll b/test/Transforms/StraightLineStrengthReduce/AMDGPU/reassociate-geps-and-slsr-addrspace.ll
index f2853aca698f..9554ae690316 100644
--- a/test/Transforms/StraightLineStrengthReduce/AMDGPU/reassociate-geps-and-slsr-addrspace.ll
+++ b/test/Transforms/StraightLineStrengthReduce/AMDGPU/reassociate-geps-and-slsr-addrspace.ll
@@ -6,7 +6,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK-LABEL: @slsr_after_reassociate_global_geps_mubuf_max_offset(
; CHECK: [[b1:%[0-9]+]] = getelementptr float, float addrspace(1)* %arr, i64 [[bump:%[0-9]+]]
; CHECK: [[b2:%[0-9]+]] = getelementptr float, float addrspace(1)* [[b1]], i64 [[bump]]
-define void @slsr_after_reassociate_global_geps_mubuf_max_offset(float addrspace(1)* %out, float addrspace(1)* noalias %arr, i32 %i) {
+define amdgpu_kernel void @slsr_after_reassociate_global_geps_mubuf_max_offset(float addrspace(1)* %out, float addrspace(1)* noalias %arr, i32 %i) {
bb:
%i2 = shl nsw i32 %i, 1
%j1 = add nsw i32 %i, 1023
@@ -33,7 +33,7 @@ bb:
; CHECK: %tmp = sext i32 %j1 to i64
; CHECK: getelementptr inbounds float, float addrspace(1)* %arr, i64 %tmp
; CHECK: getelementptr inbounds float, float addrspace(1)* %arr, i64 %tmp5
-define void @slsr_after_reassociate_global_geps_over_mubuf_max_offset(float addrspace(1)* %out, float addrspace(1)* noalias %arr, i32 %i) {
+define amdgpu_kernel void @slsr_after_reassociate_global_geps_over_mubuf_max_offset(float addrspace(1)* %out, float addrspace(1)* noalias %arr, i32 %i) {
bb:
%i2 = shl nsw i32 %i, 1
%j1 = add nsw i32 %i, 1024
@@ -61,7 +61,7 @@ bb:
; CHECK: [[B2:%[0-9]+]] = getelementptr float, float addrspace(3)* [[B1]], i32 %i
; CHECK: getelementptr inbounds float, float addrspace(3)* [[B2]], i32 16383
-define void @slsr_after_reassociate_lds_geps_ds_max_offset(float addrspace(1)* %out, float addrspace(3)* noalias %arr, i32 %i) {
+define amdgpu_kernel void @slsr_after_reassociate_lds_geps_ds_max_offset(float addrspace(1)* %out, float addrspace(3)* noalias %arr, i32 %i) {
bb:
%i2 = shl nsw i32 %i, 1
%j1 = add nsw i32 %i, 16383
@@ -86,7 +86,7 @@ bb:
; CHECK: getelementptr inbounds float, float addrspace(3)* %arr, i32 %j1
; CHECK: %j2 = add i32 %j1, %i
; CHECK: getelementptr inbounds float, float addrspace(3)* %arr, i32 %j2
-define void @slsr_after_reassociate_lds_geps_over_ds_max_offset(float addrspace(1)* %out, float addrspace(3)* noalias %arr, i32 %i) {
+define amdgpu_kernel void @slsr_after_reassociate_lds_geps_over_ds_max_offset(float addrspace(1)* %out, float addrspace(3)* noalias %arr, i32 %i) {
bb:
%i2 = shl nsw i32 %i, 1
%j1 = add nsw i32 %i, 16384
diff --git a/test/Transforms/StripSymbols/strip-dead-debug-info.ll b/test/Transforms/StripSymbols/strip-dead-debug-info.ll
index 0e252d70465e..d18c07d54a90 100644
--- a/test/Transforms/StripSymbols/strip-dead-debug-info.ll
+++ b/test/Transforms/StripSymbols/strip-dead-debug-info.ll
@@ -3,6 +3,9 @@
; CHECK: ModuleID = '{{.*}}'
; CHECK-NOT: "bar"
; CHECK-NOT: "abcd"
+; CHECK-NOT: "GCC"
+; CHECK: "Globals"
+; CHECK: "abcd2"
source_filename = "test/Transforms/StripSymbols/strip-dead-debug-info.ll"
@@ -21,7 +24,7 @@ entry:
define i32 @foo(i32 %i) #2 !dbg !15 {
entry:
tail call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !18, metadata !19), !dbg !20
- %.0 = load i32, i32* @xyz, align 4
+ %.0 = load i32, i32* @xyz, align 4, !dbg !30
ret i32 %.0, !dbg !21
}
@@ -29,7 +32,7 @@ attributes #0 = { nounwind readnone }
attributes #1 = { nounwind readnone ssp }
attributes #2 = { nounwind readonly ssp }
-!llvm.dbg.cu = !{!4}
+!llvm.dbg.cu = !{!4, !23, !24, !28}
!llvm.module.flags = !{!9}
!0 = !DIGlobalVariableExpression(var: !1)
@@ -55,4 +58,11 @@ attributes #2 = { nounwind readonly ssp }
!20 = !DILocation(line: 7, scope: !15)
!21 = !DILocation(line: 10, scope: !22)
!22 = distinct !DILexicalBlock(scope: !15, file: !2, line: 7)
-
+!23 = distinct !DICompileUnit(language: DW_LANG_C89, file: !2, producer: "GCC", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !5, retainedTypes: !5, globals: !5)
+!24 = distinct !DICompileUnit(language: DW_LANG_C89, file: !2, producer: "Globals", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !5, retainedTypes: !5, globals: !25)
+!25 = !{!26}
+!26 = !DIGlobalVariableExpression(var: !27, expr: !DIExpression(DW_OP_constu, 0, DW_OP_stack_value))
+!27 = !DIGlobalVariable(name: "abcd2", scope: !2, file: !2, line: 2, type: !3, isLocal: true, isDefinition: true)
+!28 = distinct !DICompileUnit(language: DW_LANG_C89, file: !2, producer: "InlineTest", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !5, retainedTypes: !5, globals: !5)
+!29 = distinct !DISubprogram(name: "inlinefunc", linkageName: "inlinefunc", scope: null, file: !2, line: 7, type: !16, isLocal: false, isDefinition: true, isOptimized: true, unit: !28)
+!30 = !DILocation(line: 100, scope: !29, inlinedAt: !21)
diff --git a/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll b/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll
index a635be10d465..9d3a84396cfc 100644
--- a/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll
+++ b/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll
@@ -6,46 +6,51 @@
target triple = "amdgcn--"
-declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #0
-declare <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) #2
-
-define amdgpu_vs void @wrapper(i32 inreg, i32) {
+define amdgpu_vs void @wrapper(i32 inreg %arg, i32 %arg1) {
main_body:
- %2 = add i32 %1, %0
- %3 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> undef, i32 0, i32 %2)
- %4 = extractelement <4 x float> %3, i32 1
- %5 = fptosi float %4 to i32
- %6 = insertelement <2 x i32> undef, i32 %5, i32 1
+ %tmp = add i32 %arg1, %arg
+ %tmp2 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> undef, i32 %tmp, i32 0, i1 false, i1 false)
+ %tmp3 = extractelement <4 x float> %tmp2, i32 1
+ %tmp4 = fptosi float %tmp3 to i32
+ %tmp5 = insertelement <2 x i32> undef, i32 %tmp4, i32 1
br label %loop11.i
loop11.i: ; preds = %endif46.i, %main_body
- %7 = phi i32 [ 0, %main_body ], [ %15, %endif46.i ]
- %8 = icmp sgt i32 %7, 999
- br i1 %8, label %main.exit, label %if16.i
+ %tmp6 = phi i32 [ 0, %main_body ], [ %tmp14, %endif46.i ]
+ %tmp7 = icmp sgt i32 %tmp6, 999
+ br i1 %tmp7, label %main.exit, label %if16.i
if16.i: ; preds = %loop11.i
- %9 = call <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32> %6, <8 x i32> undef, i32 15, i1 true, i1 false, i1 false, i1 false)
- %10 = extractelement <4 x float> %9, i32 0
- %11 = fcmp ult float 0.000000e+00, %10
- br i1 %11, label %if28.i, label %endif46.i
+ %tmp8 = call <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32> %tmp5, <8 x i32> undef, i32 15, i1 true, i1 false, i1 false, i1 false)
+ %tmp9 = extractelement <4 x float> %tmp8, i32 0
+ %tmp10 = fcmp ult float 0.000000e+00, %tmp9
+ br i1 %tmp10, label %if28.i, label %endif46.i
if28.i: ; preds = %if16.i
- %12 = bitcast float %10 to i32
- %13 = shl i32 %12, 16
- %14 = bitcast i32 %13 to float
+ %tmp11 = bitcast float %tmp9 to i32
+ %tmp12 = shl i32 %tmp11, 16
+ %tmp13 = bitcast i32 %tmp12 to float
br label %main.exit
endif46.i: ; preds = %if16.i
- %15 = add i32 %7, 1
+ %tmp14 = add i32 %tmp6, 1
br label %loop11.i
main.exit: ; preds = %if28.i, %loop11.i
- %16 = phi float [ %14, %if28.i ], [ 0x36F0800000000000, %loop11.i ]
- call void @llvm.SI.export(i32 15, i32 0, i32 0, i32 32, i32 0, float %16, float 0.000000e+00, float 0.000000e+00, float 0x36A0000000000000)
+ %tmp15 = phi float [ %tmp13, %if28.i ], [ 0x36F0800000000000, %loop11.i ]
+ call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp15, float 0.000000e+00, float 0.000000e+00, float 0x36A0000000000000, i1 false, i1 false) #0
ret void
}
-attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind readonly }
-attributes #2 = { nounwind }
+; Function Attrs: nounwind
+declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2
+
+; Function Attrs: nounwind readonly
+declare <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readonly }
diff --git a/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll b/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll
new file mode 100644
index 000000000000..eeda79324497
--- /dev/null
+++ b/test/Transforms/ThinLTOBitcodeWriter/circular-reference.ll
@@ -0,0 +1,9 @@
+; RUN: opt -thinlto-bc -o %t %s
+; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
+; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
+
+; M0: @g = external constant
+; M1: @g = constant
+@g = constant i8* bitcast (i8** @g to i8*), !type !0
+
+!0 = !{i32 0, !"typeid"}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/comdat.ll b/test/Transforms/ThinLTOBitcodeWriter/comdat.ll
new file mode 100644
index 000000000000..caea48e0a543
--- /dev/null
+++ b/test/Transforms/ThinLTOBitcodeWriter/comdat.ll
@@ -0,0 +1,80 @@
+; RUN: opt -thinlto-bc -o %t %s
+; RUN: llvm-modextract -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=THIN %s
+; RUN: llvm-modextract -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=MERGED %s
+
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.0.24215"
+
+; Internal comdat leader with type metadata. All comdat members need to live
+; in the merged module, and the comdat needs to be renamed.
+; MERGED: ${{"?lwt[^ ]+}} = comdat any
+$lwt = comdat any
+
+; External comdat leader, type metadata on non-leader. All comdat
+; members need to live in the merged module, internal members need to
+; be renamed.
+; MERGED: $nlwt = comdat any
+$nlwt = comdat any
+
+; Comdat with two members without type metadata. All comdat members live in
+; the ThinLTO module and no renaming needs to take place.
+; THIN: $nt = comdat any
+$nt = comdat any
+
+; MERGED: @lwt_aliasee = private unnamed_addr global
+; MERGED-SAME: comdat(${{"?lwt[^ ]+}})
+@lwt_aliasee = private unnamed_addr global [1 x i8*] [i8* null], comdat($lwt), !type !0
+
+; MERGED: {{@"?lwt_nl[^ ]+}} = hidden unnamed_addr global
+; MERGED-SAME: comdat(${{"?lwt[^ ]+}})
+; THIN: {{@"?lwt_nl[^ ]+}} = external hidden
+@lwt_nl = internal unnamed_addr global i32 0, comdat($lwt)
+
+; MERGED: @nlwt_aliasee = private unnamed_addr global
+; MERGED-SAME: comdat($nlwt)
+@nlwt_aliasee = private unnamed_addr global [1 x i8*] [i8* null], comdat($nlwt), !type !0
+
+; MERGED: @nlwt = unnamed_addr global
+; MERGED-SAME: comdat
+; THIN: @nlwt = external
+@nlwt = unnamed_addr global i32 0, comdat
+
+; THIN: @nt = internal
+; THIN-SAME: comdat
+@nt = internal unnamed_addr global [1 x i8*] [i8* null], comdat
+
+; THIN: @nt_nl = internal
+; THIN-SAME: comdat($nt)
+@nt_nl = internal unnamed_addr global i32 0, comdat($nt)
+
+; MERGED: {{@"?lwt[^ ]+}} = hidden unnamed_addr alias
+; THIN: {{@"?lwt[^ ]+}} = external hidden
+@lwt = internal unnamed_addr alias [1 x i8*], [1 x i8*]* @lwt_aliasee
+
+; MERGED: {{@"?nlwt_nl[^ ]+}} = hidden unnamed_addr alias
+; THIN: {{@"?nlwt_nl[^ ]+}} = external hidden
+@nlwt_nl = internal unnamed_addr alias [1 x i8*], [1 x i8*]* @nlwt_aliasee
+
+; The functions below exist just to make sure the globals are used.
+define i8* @lwt_fun() {
+ %1 = load i32, i32* @lwt_nl
+ %2 = getelementptr inbounds [1 x i8*], [1 x i8*]* @lwt, i32 0, i32 %1
+ %3 = load i8*, i8** %2
+ ret i8* %3
+}
+
+define i8* @nlwt_fun() {
+ %1 = load i32, i32* @nlwt
+ %2 = getelementptr inbounds [1 x i8*], [1 x i8*]* @nlwt_nl, i32 0, i32 %1
+ %3 = load i8*, i8** %2
+ ret i8* %3
+}
+
+define i8* @nt_fun() {
+ %1 = load i32, i32* @nt_nl
+ %2 = getelementptr inbounds [1 x i8*], [1 x i8*]* @nt, i32 0, i32 %1
+ %3 = load i8*, i8** %2
+ ret i8* %3
+}
+
+!0 = !{i64 8, !"?AVA@@"}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll b/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll
new file mode 100644
index 000000000000..d555ab0c1f6d
--- /dev/null
+++ b/test/Transforms/ThinLTOBitcodeWriter/filter-alias.ll
@@ -0,0 +1,16 @@
+; RUN: opt -thinlto-bc -o %t %s
+; RUN: llvm-modextract -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-modextract -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=CHECK1 %s
+; CHECK0: @al = external global i8*
+; CHECK1: @al = unnamed_addr alias i8*,
+
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc19.0.24215"
+
+$al = comdat any
+
+@anon = private unnamed_addr constant { [1 x i8*] } { [1 x i8*] [i8* null] }, comdat($al), !type !0
+
+@al = external unnamed_addr alias i8*, getelementptr inbounds ({ [1 x i8*] }, { [1 x i8*] }* @anon, i32 0, i32 0, i32 1)
+
+!0 = !{i64 8, !"?AVA@@"}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/no-type-md.ll b/test/Transforms/ThinLTOBitcodeWriter/no-type-md.ll
index f1ada67abe50..753e07a326b7 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/no-type-md.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/no-type-md.ll
@@ -1,6 +1,30 @@
-; RUN: opt -thinlto-bc -o %t %s
-; RUN: llvm-dis -o - %t | FileCheck %s
-; RUN: llvm-bcanalyzer -dump %t | FileCheck --check-prefix=BCA %s
+; Generate bitcode files with summary, as well as minimized bitcode without
+; the debug metadata for the thin link.
+; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t.thinlink.bc -o %t.bc %s
+; RUN: llvm-dis -o - %t.bc | FileCheck %s
+; RUN: llvm-dis -o - %t.thinlink.bc | FileCheck --check-prefix=NODEBUG %s
+; RUN: llvm-bcanalyzer -dump %t.bc | FileCheck --check-prefix=BCA %s
+
+; Make sure the combined index files produced by both the normal and the
+; thin link bitcode files are identical
+; RUN: llvm-lto -thinlto -o %t3 %t.bc
+; Copy the minimized bitcode to the regular bitcode path so the module
+; paths in the index are the same (save and restore the regular bitcode
+; for use again further down).
+; RUN: mv %t.bc %t.bc.sv
+; RUN: cp %t.thinlink.bc %t.bc
+; RUN: llvm-lto -thinlto -o %t4 %t.bc
+; RUN: mv %t.bc.sv %t.bc
+; RUN: diff %t3.thinlto.bc %t4.thinlto.bc
+
+; Try again using -thinlto-action to produce combined index
+; RUN: rm -f %t3.thinlto.bc %t4.thinlto.bc
+; RUN: llvm-lto -thinlto-action=thinlink -o %t3.thinlto.bc %t.bc
+; Copy the minimized bitcode to the regular bitcode path so the module
+; paths in the index are the same.
+; RUN: cp %t.thinlink.bc %t.bc
+; RUN: llvm-lto -thinlto-action=thinlink -o %t4.thinlto.bc %t.bc
+; RUN: diff %t3.thinlto.bc %t4.thinlto.bc
; BCA: <GLOBALVAL_SUMMARY_BLOCK
@@ -11,3 +35,10 @@
define void @f() {
ret void
}
+
+; CHECK: !llvm.dbg.cu
+; NODEBUG-NOT: !llvm.dbg.cu
+!llvm.dbg.cu = !{}
+
+!1 = !{i32 2, !"Debug Info Version", i32 3}
+!llvm.module.flags = !{!1}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll
new file mode 100644
index 000000000000..087796b5031c
--- /dev/null
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc-internal.ll
@@ -0,0 +1,21 @@
+; RUN: opt -thinlto-bc -o %t %s
+; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
+; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
+
+define [1 x i8*]* @source() {
+ ret [1 x i8*]* @g
+}
+
+; M0: @"g$84f59439b469192440047efc8de357fb" = external hidden constant [1 x i8*]{{$}}
+; M1: @"g$84f59439b469192440047efc8de357fb" = hidden constant [1 x i8*] [i8* bitcast (i64 (i8*)* @"ok$84f59439b469192440047efc8de357fb" to i8*)]
+@g = internal constant [1 x i8*] [
+ i8* bitcast (i64 (i8*)* @ok to i8*)
+], !type !0
+
+; M0: define hidden i64 @"ok$84f59439b469192440047efc8de357fb"
+; M1: define available_externally hidden i64 @"ok$84f59439b469192440047efc8de357fb"
+define internal i64 @ok(i8* %this) {
+ ret i64 42
+}
+
+!0 = !{i32 0, !"typeid"}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll
new file mode 100644
index 000000000000..0793459af414
--- /dev/null
+++ b/test/Transforms/ThinLTOBitcodeWriter/split-vfunc.ll
@@ -0,0 +1,75 @@
+; RUN: opt -thinlto-bc -o %t %s
+; RUN: llvm-modextract -b -n 0 -o - %t | llvm-dis | FileCheck --check-prefix=M0 %s
+; RUN: llvm-modextract -b -n 1 -o - %t | llvm-dis | FileCheck --check-prefix=M1 %s
+
+; M0: @g = external constant [9 x i8*]{{$}}
+; M1: @g = constant [9 x i8*]
+@g = constant [9 x i8*] [
+ i8* bitcast (i64 (i8*)* @ok1 to i8*),
+ i8* bitcast (i64 (i8*, i64)* @ok2 to i8*),
+ i8* bitcast (void (i8*)* @wrongtype1 to i8*),
+ i8* bitcast (i128 (i8*)* @wrongtype2 to i8*),
+ i8* bitcast (i64 ()* @wrongtype3 to i8*),
+ i8* bitcast (i64 (i8*, i8*)* @wrongtype4 to i8*),
+ i8* bitcast (i64 (i8*, i128)* @wrongtype5 to i8*),
+ i8* bitcast (i64 (i8*)* @usesthis to i8*),
+ i8* bitcast (i8 (i8*)* @reads to i8*)
+], !type !0
+
+; M0: define i64 @ok1
+; M1: define available_externally i64 @ok1
+define i64 @ok1(i8* %this) {
+ ret i64 42
+}
+
+; M0: define i64 @ok2
+; M1: define available_externally i64 @ok2
+define i64 @ok2(i8* %this, i64 %arg) {
+ ret i64 %arg
+}
+
+; M0: define void @wrongtype1
+; M1: declare void @wrongtype1()
+define void @wrongtype1(i8*) {
+ ret void
+}
+
+; M0: define i128 @wrongtype2
+; M1: declare void @wrongtype2()
+define i128 @wrongtype2(i8*) {
+ ret i128 0
+}
+
+; M0: define i64 @wrongtype3
+; M1: declare void @wrongtype3()
+define i64 @wrongtype3() {
+ ret i64 0
+}
+
+; M0: define i64 @wrongtype4
+; M1: declare void @wrongtype4()
+define i64 @wrongtype4(i8*, i8*) {
+ ret i64 0
+}
+
+; M0: define i64 @wrongtype5
+; M1: declare void @wrongtype5()
+define i64 @wrongtype5(i8*, i128) {
+ ret i64 0
+}
+
+; M0: define i64 @usesthis
+; M1: declare void @usesthis()
+define i64 @usesthis(i8* %this) {
+ %i = ptrtoint i8* %this to i64
+ ret i64 %i
+}
+
+; M0: define i8 @reads
+; M1: declare void @reads()
+define i8 @reads(i8* %this) {
+ %l = load i8, i8* %this
+ ret i8 %l
+}
+
+!0 = !{i32 0, !"typeid"}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/split.ll b/test/Transforms/ThinLTOBitcodeWriter/split.ll
index e08e92328b51..d37d10bd3560 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/split.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/split.ll
@@ -1,20 +1,37 @@
-; RUN: opt -thinlto-bc -o %t %s
-; RUN: llvm-modextract -b -n 0 -o %t0 %t
-; RUN: llvm-modextract -b -n 1 -o %t1 %t
+; Generate bitcode files with summary, as well as minimized bitcode without
+; the debug metadata for the thin link.
+; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t2 -o %t %s
+; RUN: llvm-modextract -b -n 0 -o %t0.bc %t
+; RUN: llvm-modextract -b -n 1 -o %t1.bc %t
+; RUN: llvm-modextract -b -n 0 -o %t0.thinlink.bc %t2
+; RUN: llvm-modextract -b -n 1 -o %t1.thinlink.bc %t2
; RUN: not llvm-modextract -b -n 2 -o - %t 2>&1 | FileCheck --check-prefix=ERROR %s
-; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=M0 %s
-; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=M1 %s
-; RUN: llvm-bcanalyzer -dump %t0 | FileCheck --check-prefix=BCA0 %s
-; RUN: llvm-bcanalyzer -dump %t1 | FileCheck --check-prefix=BCA1 %s
+; RUN: llvm-dis -o - %t0.bc | FileCheck --check-prefix=M0 %s
+; RUN: llvm-dis -o - %t1.bc | FileCheck --check-prefix=M1 %s
+; RUN: llvm-dis -o - %t0.thinlink.bc | FileCheck --check-prefix=NODEBUG %s
+; RUN: llvm-dis -o - %t1.thinlink.bc | FileCheck --check-prefix=NODEBUG %s
+; RUN: llvm-bcanalyzer -dump %t0.bc | FileCheck --check-prefix=BCA0 %s
+; RUN: llvm-bcanalyzer -dump %t1.bc | FileCheck --check-prefix=BCA1 %s
+
+; Make sure the combined index files produced by both the normal and the
+; thin link bitcode files are identical
+; RUN: llvm-lto -thinlto -o %t3 %t0.bc
+; Copy the minimized bitcode to the regular bitcode path so the module
+; paths in the index are the same.
+; RUN: cp %t0.thinlink.bc %t0.bc
+; RUN: llvm-lto -thinlto -o %t4 %t0.bc
+; RUN: diff %t3.thinlto.bc %t4.thinlto.bc
; ERROR: llvm-modextract: error: module index out of range; bitcode file contains 2 module(s)
; BCA0: <GLOBALVAL_SUMMARY_BLOCK
; BCA1-NOT: <GLOBALVAL_SUMMARY_BLOCK
+$g = comdat any
+
; M0: @g = external global i8{{$}}
-; M1: @g = global i8 42, !type !0
-@g = global i8 42, !type !0
+; M1: @g = global i8 42, comdat, !type !0
+@g = global i8 42, comdat, !type !0
; M0: define i8* @f()
; M1-NOT: @f()
@@ -24,3 +41,11 @@ define i8* @f() {
; M1: !0 = !{i32 0, !"typeid"}
!0 = !{i32 0, !"typeid"}
+
+; M0: !llvm.dbg.cu
+; M1-NOT: !llvm.dbg.cu
+; NODEBUG-NOT: !llvm.dbg.cu
+!llvm.dbg.cu = !{}
+
+!1 = !{i32 2, !"Debug Info Version", i32 3}
+!llvm.module.flags = !{!1}
diff --git a/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll b/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll
index fbc97a000971..718013e39b3e 100644
--- a/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll
+++ b/test/Transforms/ThinLTOBitcodeWriter/unsplittable.ll
@@ -1,6 +1,9 @@
-; RUN: opt -thinlto-bc -o %t %s
+; RUN: opt -thinlto-bc -thin-link-bitcode-file=%t2 -o %t %s
; RUN: llvm-dis -o - %t | FileCheck %s
; RUN: llvm-bcanalyzer -dump %t | FileCheck --check-prefix=BCA %s
+; When not splitting the module, the thin link bitcode file should simply be a
+; copy of the regular module.
+; RUN: diff %t %t2
; BCA-NOT: <GLOBALVAL_SUMMARY_BLOCK
diff --git a/test/Transforms/Util/MemorySSA/invariant-groups.ll b/test/Transforms/Util/MemorySSA/invariant-groups.ll
deleted file mode 100644
index 57247fe3b2b5..000000000000
--- a/test/Transforms/Util/MemorySSA/invariant-groups.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
-;
-; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
-; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
-; them when/if we decide to support invariant groups.
-
-@g = external global i32
-
-define i32 @foo(i32* %a) {
-; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: store i32 0
- store i32 0, i32* %a, align 4, !llvm.invariant.group !0
-
-; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: store i32 1
- store i32 1, i32* @g, align 4
-
- %1 = bitcast i32* %a to i8*
- %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
- %a32 = bitcast i8* %a8 to i32*
-
-; CHECK: MemoryUse(2)
-; CHECK-NEXT: %2 = load i32
- %2 = load i32, i32* %a32, align 4, !llvm.invariant.group !0
- ret i32 %2
-}
-
-declare i8* @llvm.invariant.group.barrier(i8*)
-
-!0 = !{!"group1"}
diff --git a/test/Transforms/Util/PredicateInfo/condprop.ll b/test/Transforms/Util/PredicateInfo/condprop.ll
new file mode 100644
index 000000000000..79c76baa6f61
--- /dev/null
+++ b/test/Transforms/Util/PredicateInfo/condprop.ll
@@ -0,0 +1,471 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -print-predicateinfo -analyze < %s 2>&1 | FileCheck %s
+
+@a = external global i32 ; <i32*> [#uses=7]
+
+define i32 @test1() nounwind {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 4
+; CHECK-NEXT: br i1 [[TMP1]], label [[BB:%.*]], label [[BB1:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: br label [[BB8:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 5
+; CHECK-NEXT: br i1 [[TMP3]], label [[BB2:%.*]], label [[BB3:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: br label [[BB8]]
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 4
+; CHECK-NEXT: br i1 [[TMP5]], label [[BB4:%.*]], label [[BB5:%.*]]
+; CHECK: bb4:
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 5
+; CHECK-NEXT: br label [[BB8]]
+; CHECK: bb5:
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 5
+; CHECK-NEXT: br i1 [[TMP9]], label [[BB6:%.*]], label [[BB7:%.*]]
+; CHECK: bb6:
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], 4
+; CHECK-NEXT: br label [[BB8]]
+; CHECK: bb7:
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* @a, align 4
+; CHECK-NEXT: br label [[BB8]]
+; CHECK: bb8:
+; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ [[TMP12]], [[BB7]] ], [ [[TMP11]], [[BB6]] ], [ [[TMP7]], [[BB4]] ], [ 4, [[BB2]] ], [ 5, [[BB]] ]
+; CHECK-NEXT: br label [[RETURN:%.*]]
+; CHECK: return:
+; CHECK-NEXT: ret i32 [[DOT0]]
+;
+entry:
+ %0 = load i32, i32* @a, align 4
+ %1 = icmp eq i32 %0, 4
+ br i1 %1, label %bb, label %bb1
+
+bb: ; preds = %entry
+ br label %bb8
+
+bb1: ; preds = %entry
+ %2 = load i32, i32* @a, align 4
+ %3 = icmp eq i32 %2, 5
+ br i1 %3, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ br label %bb8
+
+bb3: ; preds = %bb1
+ %4 = load i32, i32* @a, align 4
+ %5 = icmp eq i32 %4, 4
+ br i1 %5, label %bb4, label %bb5
+
+bb4: ; preds = %bb3
+ %6 = load i32, i32* @a, align 4
+ %7 = add i32 %6, 5
+ br label %bb8
+
+bb5: ; preds = %bb3
+ %8 = load i32, i32* @a, align 4
+ %9 = icmp eq i32 %8, 5
+ br i1 %9, label %bb6, label %bb7
+
+bb6: ; preds = %bb5
+ %10 = load i32, i32* @a, align 4
+ %11 = add i32 %10, 4
+ br label %bb8
+
+bb7: ; preds = %bb5
+ %12 = load i32, i32* @a, align 4
+ br label %bb8
+
+bb8: ; preds = %bb7, %bb6, %bb4, %bb2, %bb
+ %.0 = phi i32 [ %12, %bb7 ], [ %11, %bb6 ], [ %7, %bb4 ], [ 4, %bb2 ], [ 5, %bb ]
+ br label %return
+
+return: ; preds = %bb8
+ ret i32 %.0
+}
+
+declare void @foo(i1)
+declare void @bar(i32)
+
+define void @test3(i32 %x, i32 %y) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[XZ:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[YZ:%.*]] = icmp eq i32 [[Y:%.*]], 0
+; CHECK-NEXT: [[Z:%.*]] = and i1 [[XZ]], [[YZ]]
+; CHECK: [[XZ_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[XZ]])
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[YZ_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[YZ]])
+; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[Z]])
+; CHECK-NEXT: br i1 [[Z]], label [[BOTH_ZERO:%.*]], label [[NOPE:%.*]]
+; CHECK: both_zero:
+; CHECK-NEXT: call void @foo(i1 [[XZ_0]])
+; CHECK-NEXT: call void @foo(i1 [[YZ_0]])
+; CHECK-NEXT: call void @bar(i32 [[X_0]])
+; CHECK-NEXT: call void @bar(i32 [[Y_0]])
+; CHECK-NEXT: ret void
+; CHECK: nope:
+; CHECK-NEXT: call void @foo(i1 [[Z_0]])
+; CHECK-NEXT: ret void
+;
+ %xz = icmp eq i32 %x, 0
+ %yz = icmp eq i32 %y, 0
+ %z = and i1 %xz, %yz
+ br i1 %z, label %both_zero, label %nope
+both_zero:
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ ret void
+nope:
+ call void @foo(i1 %z)
+ ret void
+}
+
+define void @test4(i1 %b, i32 %x) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: br i1 [[B:%.*]], label [[SW:%.*]], label [[CASE3:%.*]]
+; CHECK: sw:
+; CHECK: i32 0, label [[CASE0:%.*]]
+; CHECK-NEXT: i32 1, label [[CASE1:%.*]]
+; CHECK-NEXT: i32 2, label [[CASE0]]
+; CHECK-NEXT: i32 3, label [[CASE3]]
+; CHECK-NEXT: i32 4, label [[DEFAULT:%.*]]
+; CHECK-NEXT: ] Edge: [label [[SW]],label %case1] }
+; CHECK-NEXT: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X:%.*]])
+; CHECK-NEXT: switch i32 [[X]], label [[DEFAULT]] [
+; CHECK-NEXT: i32 0, label [[CASE0]]
+; CHECK-NEXT: i32 1, label [[CASE1]]
+; CHECK-NEXT: i32 2, label [[CASE0]]
+; CHECK-NEXT: i32 3, label [[CASE3]]
+; CHECK-NEXT: i32 4, label [[DEFAULT]]
+; CHECK-NEXT: ]
+; CHECK: default:
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: ret void
+; CHECK: case0:
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: ret void
+; CHECK: case1:
+; CHECK-NEXT: call void @bar(i32 [[X_0]])
+; CHECK-NEXT: ret void
+; CHECK: case3:
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: ret void
+;
+ br i1 %b, label %sw, label %case3
+sw:
+ switch i32 %x, label %default [
+ i32 0, label %case0
+ i32 1, label %case1
+ i32 2, label %case0
+ i32 3, label %case3
+ i32 4, label %default
+ ]
+default:
+ call void @bar(i32 %x)
+ ret void
+case0:
+ call void @bar(i32 %x)
+ ret void
+case1:
+ call void @bar(i32 %x)
+ ret void
+case3:
+ call void @bar(i32 %x)
+ ret void
+}
+
+define i1 @test5(i32 %x, i32 %y) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[X_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK: [[Y_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[X_0]], [[Y_0]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[X_1]], [[Y_1]]
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ %cmp2 = icmp ne i32 %x, %y
+ ret i1 %cmp2
+
+different:
+ %cmp3 = icmp eq i32 %x, %y
+ ret i1 %cmp3
+}
+
+define i1 @test6(i32 %x, i32 %y) {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp2 = icmp ne i32 %x, %y
+ %cmp = icmp eq i32 %x, %y
+ %cmp3 = icmp eq i32 %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ ret i1 %cmp2
+
+different:
+ ret i1 %cmp3
+}
+
+define i1 @test6_fp(float %x, float %y) {
+; CHECK-LABEL: @test6_fp(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp une float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[X]], [[Y]]
+; CHECK-NEXT: [[CMP3:%.*]] = fcmp oeq float [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp2 = fcmp une float %x, %y
+ %cmp = fcmp oeq float %x, %y
+ %cmp3 = fcmp oeq float %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ ret i1 %cmp2
+
+different:
+ ret i1 %cmp3
+}
+
+define i1 @test7(i32 %x, i32 %y) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[X_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK: [[Y_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X_0]], [[Y_0]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[X_1]], [[Y_1]]
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp = icmp sgt i32 %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ %cmp2 = icmp sle i32 %x, %y
+ ret i1 %cmp2
+
+different:
+ %cmp3 = icmp sgt i32 %x, %y
+ ret i1 %cmp3
+}
+
+define i1 @test7_fp(float %x, float %y) {
+; CHECK-LABEL: @test7_fp(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X:%.*]], [[Y:%.*]]
+; CHECK: [[X_0:%.*]] = call float @llvm.ssa.copy.f32(float [[X]])
+; CHECK: [[X_1:%.*]] = call float @llvm.ssa.copy.f32(float [[X]])
+; CHECK: [[Y_0:%.*]] = call float @llvm.ssa.copy.f32(float [[Y]])
+; CHECK: [[Y_1:%.*]] = call float @llvm.ssa.copy.f32(float [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ule float [[X_0]], [[Y_0]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: [[CMP3:%.*]] = fcmp ogt float [[X_1]], [[Y_1]]
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp = fcmp ogt float %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ %cmp2 = fcmp ule float %x, %y
+ ret i1 %cmp2
+
+different:
+ %cmp3 = fcmp ogt float %x, %y
+ ret i1 %cmp3
+}
+
+define i1 @test8(i32 %x, i32 %y) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], [[Y]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp2 = icmp sle i32 %x, %y
+ %cmp = icmp sgt i32 %x, %y
+ %cmp3 = icmp sgt i32 %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ ret i1 %cmp2
+
+different:
+ ret i1 %cmp3
+}
+
+define i1 @test8_fp(float %x, float %y) {
+; CHECK-LABEL: @test8_fp(
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ule float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X]], [[Y]]
+; CHECK-NEXT: [[CMP3:%.*]] = fcmp ogt float [[X]], [[Y]]
+; CHECK-NEXT: br i1 [[CMP]], label [[SAME:%.*]], label [[DIFFERENT:%.*]]
+; CHECK: same:
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: different:
+; CHECK-NEXT: ret i1 [[CMP3]]
+;
+ %cmp2 = fcmp ule float %x, %y
+ %cmp = fcmp ogt float %x, %y
+ %cmp3 = fcmp ogt float %x, %y
+ br i1 %cmp, label %same, label %different
+
+same:
+ ret i1 %cmp2
+
+different:
+ ret i1 %cmp3
+}
+
+define i32 @test9(i32 %i, i32 %j) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I:%.*]], [[J:%.*]]
+; CHECK: [[I_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[I]])
+; CHECK: [[J_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[J]])
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[RET:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[I_0]], [[J_0]]
+; CHECK-NEXT: ret i32 [[DIFF]]
+; CHECK: ret:
+; CHECK-NEXT: ret i32 5
+;
+ %cmp = icmp eq i32 %i, %j
+ br i1 %cmp, label %cond_true, label %ret
+
+cond_true:
+ %diff = sub i32 %i, %j
+ ret i32 %diff
+
+ret:
+ ret i32 5
+}
+
+define i32 @test10(i32 %j, i32 %i) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I:%.*]], [[J:%.*]]
+; CHECK: [[I_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[I]])
+; CHECK: [[J_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[J]])
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[RET:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[I_0]], [[J_0]]
+; CHECK-NEXT: ret i32 [[DIFF]]
+; CHECK: ret:
+; CHECK-NEXT: ret i32 5
+;
+ %cmp = icmp eq i32 %i, %j
+ br i1 %cmp, label %cond_true, label %ret
+
+cond_true:
+ %diff = sub i32 %i, %j
+ ret i32 %diff
+
+ret:
+ ret i32 5
+}
+
+declare i32 @yogibar()
+
+define i32 @test11(i32 %x) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[V0:%.*]] = call i32 @yogibar()
+; CHECK-NEXT: [[V1:%.*]] = call i32 @yogibar()
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V0]], [[V1]]
+; CHECK: [[V0_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[V0]])
+; CHECK: [[V1_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[V1]])
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[NEXT:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: ret i32 [[V1_0]]
+; CHECK: next:
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X:%.*]], [[V0_0]]
+; CHECK: [[V0_0_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[V0_0]])
+; CHECK-NEXT: br i1 [[CMP2]], label [[COND_TRUE2:%.*]], label [[NEXT2:%.*]]
+; CHECK: cond_true2:
+; CHECK-NEXT: ret i32 [[V0_0_1]]
+; CHECK: next2:
+; CHECK-NEXT: ret i32 0
+;
+ %v0 = call i32 @yogibar()
+ %v1 = call i32 @yogibar()
+ %cmp = icmp eq i32 %v0, %v1
+ br i1 %cmp, label %cond_true, label %next
+
+cond_true:
+ ret i32 %v1
+
+next:
+ %cmp2 = icmp eq i32 %x, %v0
+ br i1 %cmp2, label %cond_true2, label %next2
+
+cond_true2:
+ ret i32 %v0
+
+next2:
+ ret i32 0
+}
+
+define i32 @test12(i32 %x) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[X_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+; CHECK: cond_true:
+; CHECK-NEXT: br label [[RET:%.*]]
+; CHECK: cond_false:
+; CHECK-NEXT: br label [[RET]]
+; CHECK: ret:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[X_0]], [[COND_TRUE]] ], [ [[X_1]], [[COND_FALSE]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %cond_true, label %cond_false
+
+cond_true:
+ br label %ret
+
+cond_false:
+ br label %ret
+
+ret:
+ %res = phi i32 [ %x, %cond_true ], [ %x, %cond_false ]
+ ret i32 %res
+}
diff --git a/test/Transforms/Util/PredicateInfo/diamond.ll b/test/Transforms/Util/PredicateInfo/diamond.ll
new file mode 100644
index 000000000000..e3f56d88caf0
--- /dev/null
+++ b/test/Transforms/Util/PredicateInfo/diamond.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s
+define i1 @f(i32 %x, i1 %y) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: br i1 [[Y:%.*]], label [[BB0:%.*]], label [[BB1:%.*]]
+; CHECK: bb0:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[X:%.*]], 0
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK-NEXT: br i1 [[CMP]], label [[BB2:%.*]], label [[BB3:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[X2:%.*]] = add nuw nsw i32 [[X]], 1
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X2]], 2
+; CHECK: [[X2_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X2]])
+; CHECK-NEXT: br i1 [[CMP2]], label [[BB2]], label [[BB3]]
+; CHECK: bb2:
+; CHECK-NEXT: [[X3:%.*]] = phi i32 [ [[X_0]], [[BB0]] ], [ [[X2_0]], [[BB1]] ]
+; CHECK-NEXT: br label [[BB3]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i1 false
+;
+ br i1 %y, label %bb0, label %bb1
+ bb0:
+ %cmp = icmp sge i32 %x, 0 ; x > 0
+ br i1 %cmp, label %bb2, label %bb3
+ bb1:
+ %x2 = add nsw nuw i32 %x, 1
+ %cmp2 = icmp sge i32 %x2, 2 ; x+1 > 2 / x > 1
+ br i1 %cmp2, label %bb2, label %bb3
+ bb2:
+ %x3 = phi i32 [ %x, %bb0 ], [ %x2, %bb1 ]
+ br label %bb3
+ bb3:
+ ret i1 0
+}
+
+define i1 @g(i32 %x, i1 %y) {
+; CHECK-LABEL: @g(
+; CHECK-NEXT: br i1 [[Y:%.*]], label [[BB0:%.*]], label [[BB1:%.*]]
+; CHECK: bb0:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[X:%.*]], 0
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK-NEXT: br i1 [[CMP]], label [[BB3:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[X2:%.*]] = add nuw nsw i32 [[X]], 1
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X2]], 2
+; CHECK: [[X2_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X2]])
+; CHECK-NEXT: br i1 [[CMP2]], label [[BB3]], label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: [[X3:%.*]] = phi i32 [ [[X_0]], [[BB0]] ], [ [[X2_0]], [[BB1]] ]
+; CHECK-NEXT: br label [[BB3]]
+; CHECK: bb3:
+; CHECK-NEXT: ret i1 false
+;
+ br i1 %y, label %bb0, label %bb1
+ bb0:
+ %cmp = icmp sge i32 %x, 0 ; x > 0
+ br i1 %cmp, label %bb3, label %bb2
+ bb1:
+ %x2 = add nsw nuw i32 %x, 1
+ %cmp2 = icmp sge i32 %x2, 2 ; x+1 > 2 / x > 1
+ br i1 %cmp2, label %bb3, label %bb2
+ bb2:
+ %x3 = phi i32 [ %x, %bb0 ], [ %x2, %bb1 ]
+ br label %bb3
+ bb3:
+ ret i1 0
+}
+
diff --git a/test/Transforms/Util/PredicateInfo/edge.ll b/test/Transforms/Util/PredicateInfo/edge.ll
new file mode 100644
index 000000000000..6c58540e1050
--- /dev/null
+++ b/test/Transforms/Util/PredicateInfo/edge.ll
@@ -0,0 +1,242 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -print-predicateinfo -analyze < %s 2>&1 | FileCheck %s
+
+define i32 @f1(i32 %x) {
+; CHECK-LABEL: @f1(
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK-NEXT: br i1 [[CMP]], label [[BB2:%.*]], label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[X_0]], [[BB0:%.*]] ], [ 0, [[BB1]] ]
+; CHECK-NEXT: [[FOO:%.*]] = add i32 [[COND]], [[X]]
+; CHECK-NEXT: ret i32 [[FOO]]
+;
+bb0:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %bb2, label %bb1
+bb1:
+ br label %bb2
+bb2:
+ %cond = phi i32 [ %x, %bb0 ], [ 0, %bb1 ]
+ %foo = add i32 %cond, %x
+ ret i32 %foo
+}
+
+define i32 @f2(i32 %x) {
+; CHECK-LABEL: @f2(
+; CHECK-NEXT: bb0:
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[X:%.*]], 0
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK-NEXT: br i1 [[CMP]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[X_0]], [[BB0:%.*]] ], [ 0, [[BB1]] ]
+; CHECK-NEXT: [[FOO:%.*]] = add i32 [[COND]], [[X]]
+; CHECK-NEXT: ret i32 [[FOO]]
+;
+bb0:
+ %cmp = icmp ne i32 %x, 0
+ br i1 %cmp, label %bb1, label %bb2
+bb1:
+ br label %bb2
+bb2:
+ %cond = phi i32 [ %x, %bb0 ], [ 0, %bb1 ]
+ %foo = add i32 %cond, %x
+ ret i32 %foo
+}
+
+define i32 @f3(i32 %x) {
+; CHECK-LABEL: @f3(
+; CHECK-NEXT: bb0:
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X:%.*]])
+; CHECK-NEXT: switch i32 [[X]], label [[BB1:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB2:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[BB2]]
+; CHECK: bb2:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[X_0]], [[BB0:%.*]] ], [ 0, [[BB1]] ]
+; CHECK-NEXT: [[FOO:%.*]] = add i32 [[COND]], [[X]]
+; CHECK-NEXT: ret i32 [[FOO]]
+;
+bb0:
+ switch i32 %x, label %bb1 [ i32 0, label %bb2]
+bb1:
+ br label %bb2
+bb2:
+ %cond = phi i32 [ %x, %bb0 ], [ 0, %bb1 ]
+ %foo = add i32 %cond, %x
+ ret i32 %foo
+}
+
+
+define double @fcmp_oeq_not_zero(double %x, double %y) {
+; CHECK-LABEL: @fcmp_oeq_not_zero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[Y:%.*]], 2.000000e+00
+; CHECK: [[Y_0:%.*]] = call double @llvm.ssa.copy.f64(double [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[RETURN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X:%.*]], [[Y_0]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[IF]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
+entry:
+ %cmp = fcmp oeq double %y, 2.0
+ br i1 %cmp, label %if, label %return
+
+if:
+ %div = fdiv double %x, %y
+ br label %return
+
+return:
+ %retval = phi double [ %div, %if ], [ %x, %entry ]
+ ret double %retval
+
+}
+
+define double @fcmp_une_not_zero(double %x, double %y) {
+; CHECK-LABEL: @fcmp_une_not_zero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double [[Y:%.*]], 2.000000e+00
+; CHECK: [[Y_0:%.*]] = call double @llvm.ssa.copy.f64(double [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[ELSE:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X:%.*]], [[Y_0]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[ELSE]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
+entry:
+ %cmp = fcmp une double %y, 2.0
+ br i1 %cmp, label %return, label %else
+
+else:
+ %div = fdiv double %x, %y
+ br label %return
+
+return:
+ %retval = phi double [ %div, %else ], [ %x, %entry ]
+ ret double %retval
+
+}
+
+define double @fcmp_oeq_zero(double %x, double %y) {
+; CHECK-LABEL: @fcmp_oeq_zero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[Y:%.*]], 0.000000e+00
+; CHECK: [[Y_0:%.*]] = call double @llvm.ssa.copy.f64(double [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[RETURN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X:%.*]], [[Y_0]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[IF]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
+entry:
+ %cmp = fcmp oeq double %y, 0.0
+ br i1 %cmp, label %if, label %return
+
+if:
+ %div = fdiv double %x, %y
+ br label %return
+
+return:
+ %retval = phi double [ %div, %if ], [ %x, %entry ]
+ ret double %retval
+
+}
+
+define double @fcmp_une_zero(double %x, double %y) {
+; CHECK-LABEL: @fcmp_une_zero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double [[Y:%.*]], -0.000000e+00
+; CHECK: [[Y_0:%.*]] = call double @llvm.ssa.copy.f64(double [[Y]])
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[ELSE:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X:%.*]], [[Y_0]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[ELSE]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
+entry:
+ %cmp = fcmp une double %y, -0.0
+ br i1 %cmp, label %return, label %else
+
+else:
+ %div = fdiv double %x, %y
+ br label %return
+
+return:
+ %retval = phi double [ %div, %else ], [ %x, %entry ]
+ ret double %retval
+
+}
+
+
+define double @fcmp_oeq_maybe_zero(double %x, double %y, double %z1, double %z2) {
+; CHECK-LABEL: @fcmp_oeq_maybe_zero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[Z:%.*]] = fadd double [[Z1:%.*]], [[Z2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq double [[Y:%.*]], [[Z]]
+; CHECK: [[Z_0:%.*]] = call double @llvm.ssa.copy.f64(double [[Z]])
+; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[RETURN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X:%.*]], [[Z_0]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[IF]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
+entry:
+ %z = fadd double %z1, %z2
+ %cmp = fcmp oeq double %y, %z
+ br i1 %cmp, label %if, label %return
+
+if:
+ %div = fdiv double %x, %z
+ br label %return
+
+return:
+ %retval = phi double [ %div, %if ], [ %x, %entry ]
+ ret double %retval
+
+}
+
+define double @fcmp_une_maybe_zero(double %x, double %y, double %z1, double %z2) {
+; CHECK-LABEL: @fcmp_une_maybe_zero(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[Z:%.*]] = fadd double [[Z1:%.*]], [[Z2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une double [[Y:%.*]], [[Z]]
+; CHECK: [[Z_0:%.*]] = call double @llvm.ssa.copy.f64(double [[Z]])
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[ELSE:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[DIV:%.*]] = fdiv double [[X:%.*]], [[Z_0]]
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi double [ [[DIV]], [[ELSE]] ], [ [[X]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret double [[RETVAL]]
+;
+entry:
+ %z = fadd double %z1, %z2
+ %cmp = fcmp une double %y, %z
+ br i1 %cmp, label %return, label %else
+
+else:
+ %div = fdiv double %x, %z
+ br label %return
+
+return:
+ %retval = phi double [ %div, %else ], [ %x, %entry ]
+ ret double %retval
+
+}
diff --git a/test/Transforms/Util/PredicateInfo/testandor.ll b/test/Transforms/Util/PredicateInfo/testandor.ll
new file mode 100644
index 000000000000..5942ed155318
--- /dev/null
+++ b/test/Transforms/Util/PredicateInfo/testandor.ll
@@ -0,0 +1,211 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s
+
+declare void @foo(i1)
+declare void @bar(i32)
+declare void @llvm.assume(i1)
+
+define void @testor(i32 %x, i32 %y) {
+; CHECK-LABEL: @testor(
+; CHECK-NEXT: [[XZ:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[YZ:%.*]] = icmp eq i32 [[Y:%.*]], 0
+; CHECK-NEXT: [[Z:%.*]] = or i1 [[XZ]], [[YZ]]
+; CHECK: [[XZ_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[XZ]])
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[YZ_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[YZ]])
+; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[Z]])
+; CHECK-NEXT: br i1 [[Z]], label [[ONEOF:%.*]], label [[NEITHER:%.*]]
+; CHECK: oneof:
+; CHECK-NEXT: call void @foo(i1 [[XZ]])
+; CHECK-NEXT: call void @foo(i1 [[YZ]])
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: call void @bar(i32 [[Y]])
+; CHECK-NEXT: ret void
+; CHECK: neither:
+; CHECK-NEXT: call void @foo(i1 [[XZ_0]])
+; CHECK-NEXT: call void @foo(i1 [[YZ_0]])
+; CHECK-NEXT: call void @bar(i32 [[X_0]])
+; CHECK-NEXT: call void @bar(i32 [[Y_0]])
+; CHECK-NEXT: call void @foo(i1 [[Z_0]])
+; CHECK-NEXT: ret void
+;
+ %xz = icmp eq i32 %x, 0
+ %yz = icmp eq i32 %y, 0
+ %z = or i1 %xz, %yz
+ br i1 %z, label %oneof, label %neither
+oneof:
+;; Should not insert on the true edge for or
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ ret void
+neither:
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ call void @foo(i1 %z)
+ ret void
+}
+define void @testand(i32 %x, i32 %y) {
+; CHECK-LABEL: @testand(
+; CHECK-NEXT: [[XZ:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[YZ:%.*]] = icmp eq i32 [[Y:%.*]], 0
+; CHECK-NEXT: [[Z:%.*]] = and i1 [[XZ]], [[YZ]]
+; CHECK: [[XZ_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[XZ]])
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[YZ_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[YZ]])
+; CHECK: [[Y_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[Z]])
+; CHECK-NEXT: br i1 [[Z]], label [[BOTH:%.*]], label [[NOPE:%.*]]
+; CHECK: both:
+; CHECK-NEXT: call void @foo(i1 [[XZ_0]])
+; CHECK-NEXT: call void @foo(i1 [[YZ_0]])
+; CHECK-NEXT: call void @bar(i32 [[X_0]])
+; CHECK-NEXT: call void @bar(i32 [[Y_0]])
+; CHECK-NEXT: ret void
+; CHECK: nope:
+; CHECK-NEXT: call void @foo(i1 [[XZ]])
+; CHECK-NEXT: call void @foo(i1 [[YZ]])
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: call void @bar(i32 [[Y]])
+; CHECK-NEXT: call void @foo(i1 [[Z_0]])
+; CHECK-NEXT: ret void
+;
+ %xz = icmp eq i32 %x, 0
+ %yz = icmp eq i32 %y, 0
+ %z = and i1 %xz, %yz
+ br i1 %z, label %both, label %nope
+both:
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ ret void
+nope:
+;; Should not insert on the false edge for and
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ call void @foo(i1 %z)
+ ret void
+}
+define void @testandsame(i32 %x, i32 %y) {
+; CHECK-LABEL: @testandsame(
+; CHECK-NEXT: [[XGT:%.*]] = icmp sgt i32 [[X:%.*]], 0
+; CHECK-NEXT: [[XLT:%.*]] = icmp slt i32 [[X]], 100
+; CHECK-NEXT: [[Z:%.*]] = and i1 [[XGT]], [[XLT]]
+; CHECK: [[XGT_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[XGT]])
+; CHECK: [[X_0:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[X_0_1:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X_0]])
+; CHECK: [[XLT_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[XLT]])
+; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[Z]])
+; CHECK-NEXT: br i1 [[Z]], label [[BOTH:%.*]], label [[NOPE:%.*]]
+; CHECK: both:
+; CHECK-NEXT: call void @foo(i1 [[XGT_0]])
+; CHECK-NEXT: call void @foo(i1 [[XLT_0]])
+; CHECK-NEXT: call void @bar(i32 [[X_0_1]])
+; CHECK-NEXT: ret void
+; CHECK: nope:
+; CHECK-NEXT: call void @foo(i1 [[XGT]])
+; CHECK-NEXT: call void @foo(i1 [[XLT]])
+; CHECK-NEXT: call void @foo(i1 [[Z_0]])
+; CHECK-NEXT: ret void
+;
+ %xgt = icmp sgt i32 %x, 0
+ %xlt = icmp slt i32 %x, 100
+ %z = and i1 %xgt, %xlt
+ br i1 %z, label %both, label %nope
+both:
+ call void @foo(i1 %xgt)
+ call void @foo(i1 %xlt)
+ call void @bar(i32 %x)
+ ret void
+nope:
+ call void @foo(i1 %xgt)
+ call void @foo(i1 %xlt)
+ call void @foo(i1 %z)
+ ret void
+}
+
+define void @testandassume(i32 %x, i32 %y) {
+; CHECK-LABEL: @testandassume(
+; CHECK-NEXT: [[XZ:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[YZ:%.*]] = icmp eq i32 [[Y:%.*]], 0
+; CHECK-NEXT: [[Z:%.*]] = and i1 [[XZ]], [[YZ]]
+; CHECK: [[TMP1:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[XZ]])
+; CHECK: [[TMP2:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[X]])
+; CHECK: [[TMP3:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[YZ]])
+; CHECK: [[TMP4:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[Y]])
+; CHECK: [[TMP5:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[Z]])
+; CHECK-NEXT: call void @llvm.assume(i1 [[TMP5]])
+; CHECK: [[DOT0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[TMP1]])
+; CHECK: [[DOT01:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[TMP2]])
+; CHECK: [[DOT02:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[TMP3]])
+; CHECK: [[DOT03:%.*]] = call i32 @llvm.ssa.copy.i32(i32 [[TMP4]])
+; CHECK: [[DOT04:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[TMP5]])
+; CHECK-NEXT: br i1 [[TMP5]], label [[BOTH:%.*]], label [[NOPE:%.*]]
+; CHECK: both:
+; CHECK-NEXT: call void @foo(i1 [[DOT0]])
+; CHECK-NEXT: call void @foo(i1 [[DOT02]])
+; CHECK-NEXT: call void @bar(i32 [[DOT01]])
+; CHECK-NEXT: call void @bar(i32 [[DOT03]])
+; CHECK-NEXT: ret void
+; CHECK: nope:
+; CHECK-NEXT: call void @foo(i1 [[DOT04]])
+; CHECK-NEXT: ret void
+;
+ %xz = icmp eq i32 %x, 0
+ %yz = icmp eq i32 %y, 0
+ %z = and i1 %xz, %yz
+ call void @llvm.assume(i1 %z)
+ br i1 %z, label %both, label %nope
+both:
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ ret void
+nope:
+ call void @foo(i1 %z)
+ ret void
+}
+
+;; Unlike and/or for branches, assume is *always* true, so we only match and for it
+define void @testorassume(i32 %x, i32 %y) {
+;
+; CHECK-LABEL: @testorassume(
+; CHECK-NEXT: [[XZ:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[YZ:%.*]] = icmp eq i32 [[Y:%.*]], 0
+; CHECK-NEXT: [[Z:%.*]] = or i1 [[XZ]], [[YZ]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[Z]])
+; CHECK: [[Z_0:%.*]] = call i1 @llvm.ssa.copy.i1(i1 [[Z]])
+; CHECK-NEXT: br i1 [[Z]], label [[BOTH:%.*]], label [[NOPE:%.*]]
+; CHECK: both:
+; CHECK-NEXT: call void @foo(i1 [[XZ]])
+; CHECK-NEXT: call void @foo(i1 [[YZ]])
+; CHECK-NEXT: call void @bar(i32 [[X]])
+; CHECK-NEXT: call void @bar(i32 [[Y]])
+; CHECK-NEXT: ret void
+; CHECK: nope:
+; CHECK-NEXT: call void @foo(i1 [[Z_0]])
+; CHECK-NEXT: ret void
+;
+ %xz = icmp eq i32 %x, 0
+ %yz = icmp eq i32 %y, 0
+ %z = or i1 %xz, %yz
+ call void @llvm.assume(i1 %z)
+ br i1 %z, label %both, label %nope
+both:
+ call void @foo(i1 %xz)
+ call void @foo(i1 %yz)
+ call void @bar(i32 %x)
+ call void @bar(i32 %y)
+ ret void
+nope:
+ call void @foo(i1 %z)
+ ret void
+}
diff --git a/test/Transforms/Util/clone-dicompileunit.ll b/test/Transforms/Util/clone-dicompileunit.ll
new file mode 100644
index 000000000000..3f7b5981752d
--- /dev/null
+++ b/test/Transforms/Util/clone-dicompileunit.ll
@@ -0,0 +1,66 @@
+; RUN: opt -run-twice -verify -disable-debug-info-type-map -S -o - %s | FileCheck %s
+
+; Generated using:
+; $ cat p.cpp
+; void sink(void *);
+; class A {
+; public:
+; template <typename> void m_fn2() { static int a; }
+; virtual void m_fn1();
+; };
+; void foo() {
+; class B : public A {
+; public:
+; B() { m_fn2<B>(); }
+; };
+; sink(new B);
+; }
+; $ clang++ -target x86_64-unknown-linux -fvisibility=hidden -O2 -g2 -flto -S p.cpp -o p.ll
+; # then manually removed function/gv definitions
+
+; Test that when the module is cloned it does not contain a reference to
+; the original DICompileUnit as a result of a collision between the cloned
+; DISubprogram for m_fn2<B> (which refers to the non-ODR entity B via
+; template parameters) and the original DISubprogram.
+
+; CHECK: DICompileUnit
+; CHECK-NOT: DICompileUnit
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux"
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!28, !29}
+!llvm.ident = !{!30}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 ", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
+!1 = !DIFile(filename: "p.cpp", directory: "/usr/local/google/home/pcc/b682773-2-repro/small2")
+!2 = !{}
+!3 = !{!4}
+!4 = !DIGlobalVariableExpression(var: !5)
+!5 = distinct !DIGlobalVariable(name: "a", scope: !6, file: !1, line: 5, type: !27, isLocal: true, isDefinition: true)
+!6 = distinct !DISubprogram(name: "m_fn2<B>", linkageName: "_ZN1A5m_fn2IZ3foovE1BEEvv", scope: !7, file: !1, line: 5, type: !8, isLocal: true, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, templateParams: !11, declaration: !23, variables: !24)
+!7 = !DICompositeType(tag: DW_TAG_class_type, name: "A", file: !1, line: 3, flags: DIFlagFwdDecl, identifier: "_ZTS1A")
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10}
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer)
+!11 = !{!12}
+!12 = !DITemplateTypeParameter(type: !13)
+!13 = distinct !DICompositeType(tag: DW_TAG_class_type, name: "B", scope: !14, file: !1, line: 10, size: 64, elements: !17, vtableHolder: !7)
+!14 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !1, file: !1, line: 9, type: !15, isLocal: false, isDefinition: true, scopeLine: 9, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!15 = !DISubroutineType(types: !16)
+!16 = !{null}
+!17 = !{!18, !19}
+!18 = !DIDerivedType(tag: DW_TAG_inheritance, scope: !13, baseType: !7, flags: DIFlagPublic)
+!19 = !DISubprogram(name: "B", scope: !13, file: !1, line: 12, type: !20, isLocal: false, isDefinition: false, scopeLine: 12, flags: DIFlagPublic | DIFlagPrototyped, isOptimized: true)
+!20 = !DISubroutineType(types: !21)
+!21 = !{null, !22}
+!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !13, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer)
+!23 = !DISubprogram(name: "m_fn2<B>", linkageName: "_ZN1A5m_fn2IZ3foovE1BEEvv", scope: !7, file: !1, line: 5, type: !8, isLocal: false, isDefinition: false, scopeLine: 5, flags: DIFlagPublic | DIFlagPrototyped, isOptimized: true, templateParams: !11)
+!24 = !{!25}
+!25 = !DILocalVariable(name: "this", arg: 1, scope: !6, type: !26, flags: DIFlagArtificial | DIFlagObjectPointer)
+!26 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!27 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!28 = !{i32 2, !"Dwarf Version", i32 4}
+!29 = !{i32 2, !"Debug Info Version", i32 3}
+!30 = !{!"clang version 5.0.0 "}
diff --git a/test/Transforms/Util/simplify-dbg-declare-load.ll b/test/Transforms/Util/simplify-dbg-declare-load.ll
index 21d305450860..4ea88fa81e05 100644
--- a/test/Transforms/Util/simplify-dbg-declare-load.ll
+++ b/test/Transforms/Util/simplify-dbg-declare-load.ll
@@ -19,7 +19,7 @@ fail: ; preds = %top
unreachable
idxend: ; preds = %top
-; CHECK-NOT call void @llvm.dbg.value(metadata %foo* %cp,
+; CHECK-NOT: call void @llvm.dbg.value(metadata %foo* %cp,
%0 = load volatile %foo, %foo* %cp, align 8
; CHECK: call void @llvm.dbg.value(metadata %foo %0,
store volatile %foo %0, %foo* undef, align 8
diff --git a/test/Transforms/Util/strip-nonlinetable-debuginfo-loops.ll b/test/Transforms/Util/strip-nonlinetable-debuginfo-loops.ll
new file mode 100644
index 000000000000..5f88e31da9fc
--- /dev/null
+++ b/test/Transforms/Util/strip-nonlinetable-debuginfo-loops.ll
@@ -0,0 +1,71 @@
+; RUN: opt -S -strip-nonlinetable-debuginfo %s -o %t
+; RUN: cat %t | FileCheck %s
+; RUN: cat %t | FileCheck %s --check-prefix=NEGATIVE
+; void f(volatile int *i) {
+; while (--*i) {}
+; }
+source_filename = "/tmp/loop.c"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+define void @f(i32* %i) local_unnamed_addr #0 !dbg !7 {
+entry:
+ tail call void @llvm.dbg.value(metadata i32* %i, i64 0, metadata !14, metadata !15), !dbg !16
+ br label %while.cond, !dbg !17
+
+while.cond: ; preds = %while.cond, %entry
+ %0 = load volatile i32, i32* %i, align 4, !dbg !18, !tbaa !19
+ %dec = add nsw i32 %0, -1, !dbg !18
+ store volatile i32 %dec, i32* %i, align 4, !dbg !18, !tbaa !19
+ %tobool = icmp eq i32 %dec, 0, !dbg !17
+ ; CHECK: !llvm.loop ![[LOOP:[0-9]+]]
+ br i1 %tobool, label %while.end, label %while.cond, !dbg !17, !llvm.loop !23
+
+while.end: ; preds = %while.cond
+ ret void, !dbg !25
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+; CHECK: ![[CU:.*]] = distinct !DICompileUnit(language: DW_LANG_C99,
+; CHECK-SAME: emissionKind: LineTablesOnly
+; NEGATIVE-NOT: !DICompileUnit({{.*}} emissionKind: FullDebug
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (trunk 298880) (llvm/trunk 298875)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/loop.c", directory: "/")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 5.0.0 (trunk 298880) (llvm/trunk 298875)"}
+; CHECK: ![[F:[0-9]]] = distinct !DISubprogram(name: "f", scope: !1
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !13)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10}
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64)
+!11 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !12)
+; NEGATIVE-NOT: !DIBasicType(name: "int",
+!12 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!13 = !{!14}
+!14 = !DILocalVariable(name: "i", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!15 = !DIExpression()
+!16 = !DILocation(line: 1, column: 22, scope: !7)
+; CHECK: ![[BEGIN:[0-9]+]] = !DILocation(line: 2, column: 3, scope: ![[F]])
+!17 = !DILocation(line: 2, column: 3, scope: !7)
+!18 = !DILocation(line: 2, column: 10, scope: !7)
+!19 = !{!20, !20, i64 0}
+!20 = !{!"int", !21, i64 0}
+!21 = !{!"omnipotent char", !22, i64 0}
+!22 = !{!"Simple C/C++ TBAA"}
+; CHECK: ![[LOOP]] = distinct !{![[LOOP]], ![[BEGIN]], ![[END:[0-9]+]]}
+!23 = distinct !{!23, !17, !24}
+; CHECK: ![[END]] = !DILocation(line: 3, column: 3, scope: ![[F]])
+!24 = !DILocation(line: 3, column: 3, scope: !7)
+!25 = !DILocation(line: 4, column: 1, scope: !7)
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/export.yaml b/test/Transforms/WholeProgramDevirt/Inputs/export.yaml
new file mode 100644
index 000000000000..0f6f59de7522
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/export.yaml
@@ -0,0 +1,20 @@
+---
+GlobalValueMap:
+ 42:
+ - TypeTestAssumeVCalls:
+ - GUID: 14276520915468743435 # typeid1
+ Offset: 0
+ TypeCheckedLoadVCalls:
+ - GUID: 15427464259790519041 # typeid2
+ Offset: 0
+ TypeTestAssumeConstVCalls:
+ - VFunc:
+ GUID: 3515965990081467659 # typeid3
+ Offset: 0
+ Args: [12, 24]
+ TypeCheckedLoadConstVCalls:
+ - VFunc:
+ GUID: 17525413373118030901 # typeid4
+ Offset: 0
+ Args: [24, 12]
+...
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/import-indir.yaml b/test/Transforms/WholeProgramDevirt/Inputs/import-indir.yaml
new file mode 100644
index 000000000000..1cb3ad3f134c
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/import-indir.yaml
@@ -0,0 +1,41 @@
+---
+GlobalValueMap:
+ 42:
+ - TypeTestAssumeVCalls:
+ - GUID: 123
+ Offset: 0
+ - GUID: 456
+ Offset: 4
+ TypeCheckedLoadVCalls:
+ - GUID: 789
+ Offset: 8
+ - GUID: 1234
+ Offset: 16
+ TypeTestAssumeConstVCalls:
+ - VFunc:
+ GUID: 123
+ Offset: 4
+ Args: [12, 24]
+ TypeCheckedLoadConstVCalls:
+ - VFunc:
+ GUID: 456
+ Offset: 8
+ Args: [24, 12]
+TypeIdMap:
+ typeid1:
+ WPDRes:
+ 0:
+ Kind: Indir
+ 4:
+ Kind: Indir
+ ResByArg:
+ "":
+ Kind: UniformRetVal
+ Info: 12
+ 12:
+ Kind: UniformRetVal
+ Info: 24
+ "12,24":
+ Kind: UniformRetVal
+ Info: 48
+...
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/import-single-impl.yaml b/test/Transforms/WholeProgramDevirt/Inputs/import-single-impl.yaml
new file mode 100644
index 000000000000..26764eb3b29c
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/import-single-impl.yaml
@@ -0,0 +1,13 @@
+---
+TypeIdMap:
+ typeid1:
+ WPDRes:
+ 0:
+ Kind: SingleImpl
+ SingleImplName: singleimpl1
+ typeid2:
+ WPDRes:
+ 8:
+ Kind: SingleImpl
+ SingleImplName: singleimpl2
+...
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/import-uniform-ret-val.yaml b/test/Transforms/WholeProgramDevirt/Inputs/import-uniform-ret-val.yaml
new file mode 100644
index 000000000000..f1daae63b678
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/import-uniform-ret-val.yaml
@@ -0,0 +1,19 @@
+---
+TypeIdMap:
+ typeid1:
+ WPDRes:
+ 0:
+ Kind: Indir
+ ResByArg:
+ 1:
+ Kind: UniformRetVal
+ Info: 42
+ typeid2:
+ WPDRes:
+ 8:
+ Kind: Indir
+ ResByArg:
+ 1:
+ Kind: UniformRetVal
+ Info: 42
+...
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val0.yaml b/test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val0.yaml
new file mode 100644
index 000000000000..597b17877767
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val0.yaml
@@ -0,0 +1,11 @@
+---
+TypeIdMap:
+ typeid2:
+ WPDRes:
+ 8:
+ Kind: Indir
+ ResByArg:
+ 3:
+ Kind: UniqueRetVal
+ Info: 0
+...
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val1.yaml b/test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val1.yaml
new file mode 100644
index 000000000000..737ef1173c3c
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/import-unique-ret-val1.yaml
@@ -0,0 +1,11 @@
+---
+TypeIdMap:
+ typeid2:
+ WPDRes:
+ 8:
+ Kind: Indir
+ ResByArg:
+ 3:
+ Kind: UniqueRetVal
+ Info: 1
+...
diff --git a/test/Transforms/WholeProgramDevirt/Inputs/import-vcp.yaml b/test/Transforms/WholeProgramDevirt/Inputs/import-vcp.yaml
new file mode 100644
index 000000000000..4fbee126d0ea
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/Inputs/import-vcp.yaml
@@ -0,0 +1,19 @@
+---
+TypeIdMap:
+ typeid1:
+ WPDRes:
+ 0:
+ Kind: Indir
+ ResByArg:
+ 1:
+ Kind: VirtualConstProp
+ Info: 0
+ typeid2:
+ WPDRes:
+ 8:
+ Kind: Indir
+ ResByArg:
+ 3:
+ Kind: VirtualConstProp
+ Info: 0
+...
diff --git a/test/Transforms/WholeProgramDevirt/bad-read-from-vtable.ll b/test/Transforms/WholeProgramDevirt/bad-read-from-vtable.ll
index 4885be777566..e5d0e74b22e2 100644
--- a/test/Transforms/WholeProgramDevirt/bad-read-from-vtable.ll
+++ b/test/Transforms/WholeProgramDevirt/bad-read-from-vtable.ll
@@ -3,8 +3,8 @@
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [2 x i8*] [i8* zeroinitializer, i8* bitcast (void (i8*)* @vf to i8*)], !type !0
-@vt2 = global i8* bitcast (void (i8*)* @vf to i8*), !type !1
+@vt1 = constant [2 x i8*] [i8* zeroinitializer, i8* bitcast (void (i8*)* @vf to i8*)], !type !0
+@vt2 = constant i8* bitcast (void (i8*)* @vf to i8*), !type !1
define void @vf(i8* %this) {
ret void
diff --git a/test/Transforms/WholeProgramDevirt/export-nothing.ll b/test/Transforms/WholeProgramDevirt/export-nothing.ll
new file mode 100644
index 000000000000..e0814efbf9c0
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/export-nothing.ll
@@ -0,0 +1,7 @@
+; RUN: opt -wholeprogramdevirt -wholeprogramdevirt-summary-action=export -wholeprogramdevirt-write-summary=%t -o /dev/null %s
+; RUN: FileCheck %s < %t
+
+; CHECK: ---
+; CHECK-NEXT: GlobalValueMap:
+; CHECK-NEXT: TypeIdMap:
+; CHECK-NEXT: ...
diff --git a/test/Transforms/WholeProgramDevirt/export-single-impl.ll b/test/Transforms/WholeProgramDevirt/export-single-impl.ll
new file mode 100644
index 000000000000..f4f3fd054c46
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/export-single-impl.ll
@@ -0,0 +1,78 @@
+; RUN: opt -wholeprogramdevirt -wholeprogramdevirt-summary-action=export -wholeprogramdevirt-read-summary=%S/Inputs/export.yaml -wholeprogramdevirt-write-summary=%t -S -o - %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid1:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: SingleImpl
+; SUMMARY-NEXT: SingleImplName: vf1
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: typeid2:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: SingleImpl
+; SUMMARY-NEXT: SingleImplName: vf2
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: typeid3:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: SingleImpl
+; SUMMARY-NEXT: SingleImplName: vf3
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: typeid4:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: SingleImpl
+; SUMMARY-NEXT: SingleImplName: 'vf4$merged'
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: ...
+
+; CHECK: @vt1 = constant void (i8*)* @vf1
+@vt1 = constant void (i8*)* @vf1, !type !0
+
+; CHECK: @vt2 = constant void (i8*)* @vf2
+@vt2 = constant void (i8*)* @vf2, !type !1
+
+@vt3 = constant void (i8*)* @vf3, !type !2
+
+; CHECK: @vt4 = constant void (i8*)* @"vf4$merged"
+@vt4 = constant void (i8*)* @vf4, !type !3
+
+@vt5 = constant void (i8*)* @vf5, !type !4
+
+; CHECK: declare void @vf1(i8*)
+declare void @vf1(i8*)
+
+; CHECK: define void @vf2(i8*)
+define void @vf2(i8*) {
+ ret void
+}
+
+declare void @vf3(i8*)
+
+; CHECK: define hidden void @"vf4$merged"
+define internal void @vf4(i8*) {
+ ret void
+}
+
+declare void @vf5(i8*)
+
+!0 = !{i32 0, !"typeid1"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
+!3 = !{i32 0, !"typeid4"}
+!4 = !{i32 0, !5}
+!5 = distinct !{}
diff --git a/test/Transforms/WholeProgramDevirt/export-uniform-ret-val.ll b/test/Transforms/WholeProgramDevirt/export-uniform-ret-val.ll
new file mode 100644
index 000000000000..1d7030c41fd0
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/export-uniform-ret-val.ll
@@ -0,0 +1,36 @@
+; RUN: opt -wholeprogramdevirt -wholeprogramdevirt-summary-action=export -wholeprogramdevirt-read-summary=%S/Inputs/export.yaml -wholeprogramdevirt-write-summary=%t -S -o - %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+; SUMMARY: - TypeTests:
+; SUMMARY-NEXT: TypeTestAssumeVCalls:
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid4:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: 24,12:
+; SUMMARY-NEXT: Kind: UniformRetVal
+; SUMMARY-NEXT: Info: 36
+
+; CHECK: @vt4a = constant i32 (i8*, i32, i32)* @vf4a
+@vt4a = constant i32 (i8*, i32, i32)* @vf4a, !type !0
+
+; CHECK: @vt4b = constant i32 (i8*, i32, i32)* @vf4b
+@vt4b = constant i32 (i8*, i32, i32)* @vf4b, !type !0
+
+define i32 @vf4a(i8*, i32 %x, i32 %y) {
+ %z = add i32 %x, %y
+ ret i32 %z
+}
+
+define i32 @vf4b(i8*, i32 %x, i32 %y) {
+ ret i32 36
+}
+
+!0 = !{i32 0, !"typeid4"}
diff --git a/test/Transforms/WholeProgramDevirt/export-unique-ret-val.ll b/test/Transforms/WholeProgramDevirt/export-unique-ret-val.ll
new file mode 100644
index 000000000000..174a573b5b0d
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/export-unique-ret-val.ll
@@ -0,0 +1,79 @@
+; RUN: opt -wholeprogramdevirt -wholeprogramdevirt-summary-action=export -wholeprogramdevirt-read-summary=%S/Inputs/export.yaml -wholeprogramdevirt-write-summary=%t -S -o - %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+; SUMMARY: - TypeTests:
+; SUMMARY-NEXT: TypeTestAssumeVCalls:
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid3:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: 12,24:
+; SUMMARY-NEXT: Kind: UniqueRetVal
+; SUMMARY-NEXT: Info: 0
+; SUMMARY-NEXT: typeid4:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: 24,12:
+; SUMMARY-NEXT: Kind: UniqueRetVal
+; SUMMARY-NEXT: Info: 1
+
+; CHECK: @vt3a = constant i1 (i8*, i32, i32)* @vf3a
+@vt3a = constant i1 (i8*, i32, i32)* @vf3a, !type !0
+
+; CHECK: @vt3b = constant i1 (i8*, i32, i32)* @vf3b
+@vt3b = constant i1 (i8*, i32, i32)* @vf3b, !type !0
+
+; CHECK: @vt3c = constant i1 (i8*, i32, i32)* @vf3c
+@vt3c = constant i1 (i8*, i32, i32)* @vf3c, !type !0
+
+; CHECK: @vt4a = constant i1 (i8*, i32, i32)* @vf4a
+@vt4a = constant i1 (i8*, i32, i32)* @vf4a, !type !1
+
+; CHECK: @vt4b = constant i1 (i8*, i32, i32)* @vf4b
+@vt4b = constant i1 (i8*, i32, i32)* @vf4b, !type !1
+
+; CHECK: @vt4c = constant i1 (i8*, i32, i32)* @vf4c
+@vt4c = constant i1 (i8*, i32, i32)* @vf4c, !type !1
+
+; CHECK: @__typeid_typeid3_0_12_24_unique_member = hidden alias i8, bitcast (i1 (i8*, i32, i32)** @vt3b to i8*)
+; CHECK: @__typeid_typeid4_0_24_12_unique_member = hidden alias i8, bitcast (i1 (i8*, i32, i32)** @vt4b to i8*)
+
+define i1 @vf3a(i8*, i32, i32) {
+ ret i1 true
+}
+
+define i1 @vf3b(i8*, i32, i32) {
+ ret i1 false
+}
+
+define i1 @vf3c(i8*, i32, i32) {
+ ret i1 true
+}
+
+define i1 @vf4a(i8*, i32, i32) {
+ ret i1 false
+}
+
+define i1 @vf4b(i8*, i32, i32) {
+ ret i1 true
+}
+
+define i1 @vf4c(i8*, i32, i32) {
+ ret i1 false
+}
+
+!0 = !{i32 0, !"typeid3"}
+!1 = !{i32 0, !"typeid4"}
diff --git a/test/Transforms/WholeProgramDevirt/export-unsuccessful-checked.ll b/test/Transforms/WholeProgramDevirt/export-unsuccessful-checked.ll
new file mode 100644
index 000000000000..0785ade28570
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/export-unsuccessful-checked.ll
@@ -0,0 +1,28 @@
+; RUN: opt -wholeprogramdevirt -wholeprogramdevirt-summary-action=export -wholeprogramdevirt-read-summary=%S/Inputs/export.yaml -wholeprogramdevirt-write-summary=%t -o /dev/null %s
+; RUN: FileCheck %s < %t
+
+; CHECK: - TypeTests: [ 15427464259790519041, 17525413373118030901 ]
+; CHECK-NEXT: TypeTestAssumeVCalls:
+
+@vt1a = constant void (i8*)* @vf1a, !type !0
+@vt1b = constant void (i8*)* @vf1b, !type !0
+@vt2a = constant void (i8*)* @vf2a, !type !1
+@vt2b = constant void (i8*)* @vf2b, !type !1
+@vt3a = constant void (i8*)* @vf3a, !type !2
+@vt3b = constant void (i8*)* @vf3b, !type !2
+@vt4a = constant void (i8*)* @vf4a, !type !3
+@vt4b = constant void (i8*)* @vf4b, !type !3
+
+declare void @vf1a(i8*)
+declare void @vf1b(i8*)
+declare void @vf2a(i8*)
+declare void @vf2b(i8*)
+declare void @vf3a(i8*)
+declare void @vf3b(i8*)
+declare void @vf4a(i8*)
+declare void @vf4b(i8*)
+
+!0 = !{i32 0, !"typeid1"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
+!3 = !{i32 0, !"typeid4"}
diff --git a/test/Transforms/WholeProgramDevirt/export-vcp.ll b/test/Transforms/WholeProgramDevirt/export-vcp.ll
new file mode 100644
index 000000000000..8e6e69b9bd43
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/export-vcp.ll
@@ -0,0 +1,83 @@
+; RUN: opt -wholeprogramdevirt -wholeprogramdevirt-summary-action=export -wholeprogramdevirt-read-summary=%S/Inputs/export.yaml -wholeprogramdevirt-write-summary=%t -S -o - %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+; SUMMARY: TypeIdMap:
+; SUMMARY-NEXT: typeid3:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: 12,24:
+; SUMMARY-NEXT: Kind: VirtualConstProp
+; SUMMARY-NEXT: Info: 0
+; SUMMARY-NEXT: typeid4:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: 24,12:
+; SUMMARY-NEXT: Kind: VirtualConstProp
+; SUMMARY-NEXT: Info: 0
+
+; CHECK: [[CVT3A:.*]] = private constant { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] } { [8 x i8] zeroinitializer, i1 (i8*, i32, i32)* @vf0i1, [0 x i8] zeroinitializer }, !type !0
+@vt3a = constant i1 (i8*, i32, i32)* @vf0i1, !type !0
+
+; CHECK: [[CVT3B:.*]] = private constant { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", i1 (i8*, i32, i32)* @vf1i1, [0 x i8] zeroinitializer }, !type !0
+@vt3b = constant i1 (i8*, i32, i32)* @vf1i1, !type !0
+
+; CHECK: [[CVT3C:.*]] = private constant { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] } { [8 x i8] zeroinitializer, i1 (i8*, i32, i32)* @vf0i1, [0 x i8] zeroinitializer }, !type !0
+@vt3c = constant i1 (i8*, i32, i32)* @vf0i1, !type !0
+
+; CHECK: [[CVT3D:.*]] = private constant { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", i1 (i8*, i32, i32)* @vf1i1, [0 x i8] zeroinitializer }, !type !0
+@vt3d = constant i1 (i8*, i32, i32)* @vf1i1, !type !0
+
+; CHECK: [[CVT4A:.*]] = private constant { [8 x i8], i32 (i8*, i32, i32)*, [0 x i8] } { [8 x i8] c"\00\00\00\00\01\00\00\00", i32 (i8*, i32, i32)* @vf1i32, [0 x i8] zeroinitializer }, !type !1
+@vt4a = constant i32 (i8*, i32, i32)* @vf1i32, !type !1
+
+; CHECK: [[CVT4B:.*]] = private constant { [8 x i8], i32 (i8*, i32, i32)*, [0 x i8] } { [8 x i8] c"\00\00\00\00\02\00\00\00", i32 (i8*, i32, i32)* @vf2i32, [0 x i8] zeroinitializer }, !type !1
+@vt4b = constant i32 (i8*, i32, i32)* @vf2i32, !type !1
+
+; CHECK: @__typeid_typeid3_0_12_24_byte = hidden alias i8, inttoptr (i32 -1 to i8*)
+; CHECK: @__typeid_typeid3_0_12_24_bit = hidden alias i8, inttoptr (i8 1 to i8*)
+; CHECK: @__typeid_typeid4_0_24_12_byte = hidden alias i8, inttoptr (i32 -4 to i8*)
+; CHECK: @__typeid_typeid4_0_24_12_bit = hidden alias i8, inttoptr (i8 1 to i8*)
+
+; CHECK: @vt3a = alias i1 (i8*, i32, i32)*, getelementptr inbounds ({ [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }, { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }* [[CVT3A]], i32 0, i32 1)
+; CHECK: @vt3b = alias i1 (i8*, i32, i32)*, getelementptr inbounds ({ [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }, { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }* [[CVT3B]], i32 0, i32 1)
+; CHECK: @vt3c = alias i1 (i8*, i32, i32)*, getelementptr inbounds ({ [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }, { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }* [[CVT3C]], i32 0, i32 1)
+; CHECK: @vt3d = alias i1 (i8*, i32, i32)*, getelementptr inbounds ({ [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }, { [8 x i8], i1 (i8*, i32, i32)*, [0 x i8] }* [[CVT3D]], i32 0, i32 1)
+; CHECK: @vt4a = alias i32 (i8*, i32, i32)*, getelementptr inbounds ({ [8 x i8], i32 (i8*, i32, i32)*, [0 x i8] }, { [8 x i8], i32 (i8*, i32, i32)*, [0 x i8] }* [[CVT4A]], i32 0, i32 1)
+; CHECK: @vt4b = alias i32 (i8*, i32, i32)*, getelementptr inbounds ({ [8 x i8], i32 (i8*, i32, i32)*, [0 x i8] }, { [8 x i8], i32 (i8*, i32, i32)*, [0 x i8] }* [[CVT4B]], i32 0, i32 1)
+
+define i1 @vf0i1(i8* %this, i32, i32) readnone {
+ ret i1 0
+}
+
+define i1 @vf1i1(i8* %this, i32, i32) readnone {
+ ret i1 1
+}
+
+define i32 @vf1i32(i8* %this, i32, i32) readnone {
+ ret i32 1
+}
+
+define i32 @vf2i32(i8* %this, i32, i32) readnone {
+ ret i32 2
+}
+
+; CHECK: !0 = !{i32 8, !"typeid3"}
+; CHECK: !1 = !{i32 8, !"typeid4"}
+
+!0 = !{i32 0, !"typeid3"}
+!1 = !{i32 0, !"typeid4"}
diff --git a/test/Transforms/WholeProgramDevirt/import-indir.ll b/test/Transforms/WholeProgramDevirt/import-indir.ll
new file mode 100644
index 000000000000..1de9352eeb22
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/import-indir.ll
@@ -0,0 +1,95 @@
+; Test that we correctly import an indir resolution for type identifier "typeid1".
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-indir.yaml -wholeprogramdevirt-write-summary=%t < %s | FileCheck %s
+; RUN: FileCheck --check-prefix=SUMMARY %s < %t
+
+; SUMMARY: GlobalValueMap:
+; SUMMARY-NEXT: 42:
+; SUMMARY-NEXT: - TypeTests:
+; SUMMARY-NEXT: TypeTestAssumeVCalls:
+; SUMMARY-NEXT: - GUID: 123
+; SUMMARY-NEXT: Offset: 0
+; SUMMARY-NEXT: - GUID: 456
+; SUMMARY-NEXT: Offset: 4
+; SUMMARY-NEXT: TypeCheckedLoadVCalls:
+; SUMMARY-NEXT: - GUID: 789
+; SUMMARY-NEXT: Offset: 8
+; SUMMARY-NEXT: - GUID: 1234
+; SUMMARY-NEXT: Offset: 16
+; SUMMARY-NEXT: TypeTestAssumeConstVCalls:
+; SUMMARY-NEXT: - VFunc:
+; SUMMARY-NEXT: GUID: 123
+; SUMMARY-NEXT: Offset: 4
+; SUMMARY-NEXT: Args: [ 12, 24 ]
+; SUMMARY-NEXT: TypeCheckedLoadConstVCalls:
+; SUMMARY-NEXT: - VFunc:
+; SUMMARY-NEXT: GUID: 456
+; SUMMARY-NEXT: Offset: 8
+; SUMMARY-NEXT: Args: [ 24, 12 ]
+; SUMMARY-NEXT: TypeIdMap:
+; SUMMARY-NEXT: typeid1:
+; SUMMARY-NEXT: TTRes:
+; SUMMARY-NEXT: Kind: Unsat
+; SUMMARY-NEXT: SizeM1BitWidth: 0
+; SUMMARY-NEXT: WPDRes:
+; SUMMARY-NEXT: 0:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: 4:
+; SUMMARY-NEXT: Kind: Indir
+; SUMMARY-NEXT: SingleImplName: ''
+; SUMMARY-NEXT: ResByArg:
+; SUMMARY-NEXT: :
+; SUMMARY-NEXT: Kind: UniformRetVal
+; SUMMARY-NEXT: Info: 12
+; SUMMARY-NEXT: 12:
+; SUMMARY-NEXT: Kind: UniformRetVal
+; SUMMARY-NEXT: Info: 24
+; SUMMARY-NEXT: 12,24:
+; SUMMARY-NEXT: Kind: UniformRetVal
+; SUMMARY-NEXT: Info: 48
+
+target datalayout = "e-p:32:32"
+
+declare void @llvm.assume(i1)
+declare void @llvm.trap()
+declare {i8*, i1} @llvm.type.checked.load(i8*, i32, metadata)
+declare i1 @llvm.type.test(i8*, metadata)
+
+; CHECK: define i1 @f1
+define i1 @f1(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid1")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*, i32)*
+ ; CHECK: call i1 %
+ %result = call i1 %fptr_casted(i8* %obj, i32 5)
+ ret i1 %result
+}
+
+; CHECK: define i1 @f2
+define i1 @f2(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 4, metadata !"typeid1")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %p = extractvalue {i8*, i1} %pair, 1
+ ; CHECK: [[P:%.*]] = call i1 @llvm.type.test
+ ; CHECK: br i1 [[P]]
+ br i1 %p, label %cont, label %trap
+
+cont:
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*, i32)*
+ ; CHECK: call i1 %
+ %result = call i1 %fptr_casted(i8* %obj, i32 undef)
+ ret i1 %result
+
+trap:
+ call void @llvm.trap()
+ unreachable
+}
diff --git a/test/Transforms/WholeProgramDevirt/import.ll b/test/Transforms/WholeProgramDevirt/import.ll
new file mode 100644
index 000000000000..7f34b04ce119
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/import.ll
@@ -0,0 +1,108 @@
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-single-impl.yaml < %s | FileCheck --check-prefixes=CHECK,SINGLE-IMPL %s
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-uniform-ret-val.yaml < %s | FileCheck --check-prefixes=CHECK,UNIFORM-RET-VAL %s
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-unique-ret-val0.yaml < %s | FileCheck --check-prefixes=CHECK,UNIQUE-RET-VAL0 %s
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-unique-ret-val1.yaml < %s | FileCheck --check-prefixes=CHECK,UNIQUE-RET-VAL1 %s
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-vcp.yaml < %s | FileCheck --check-prefixes=CHECK,VCP,VCP64 %s
+; RUN: opt -S -wholeprogramdevirt -wholeprogramdevirt-summary-action=import -wholeprogramdevirt-read-summary=%S/Inputs/import-vcp.yaml -mtriple=i686-unknown-linux -data-layout=e-p:32:32 < %s | FileCheck --check-prefixes=CHECK,VCP,VCP32 %s
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+; VCP: @__typeid_typeid1_0_1_byte = external hidden global i8, !absolute_symbol !0
+; VCP: @__typeid_typeid1_0_1_bit = external hidden global i8, !absolute_symbol !1
+; VCP: @__typeid_typeid2_8_3_byte = external hidden global i8, !absolute_symbol !0
+; VCP: @__typeid_typeid2_8_3_bit = external hidden global i8, !absolute_symbol !1
+
+; Test cases where the argument values are known and we can apply virtual
+; constant propagation.
+
+; CHECK: define i32 @call1
+define i32 @call1(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [3 x i8*]**
+ %vtable = load [3 x i8*]*, [3 x i8*]** %vtableptr
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid1")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 0
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to i32 (i8*, i32)*
+ ; SINGLE-IMPL: call i32 bitcast (void ()* @singleimpl1 to i32 (i8*, i32)*)
+ %result = call i32 %fptr_casted(i8* %obj, i32 1)
+ ; UNIFORM-RET-VAL: ret i32 42
+ ; VCP: [[VT1:%.*]] = bitcast {{.*}} to i8*
+ ; VCP: [[GEP1:%.*]] = getelementptr i8, i8* [[VT1]], i32 ptrtoint (i8* @__typeid_typeid1_0_1_byte to i32)
+ ; VCP: [[BC1:%.*]] = bitcast i8* [[GEP1]] to i32*
+ ; VCP: [[LOAD1:%.*]] = load i32, i32* [[BC1]]
+ ; VCP: ret i32 [[LOAD1]]
+ ret i32 %result
+}
+
+; Test cases where the argument values are unknown, so we cannot apply virtual
+; constant propagation.
+
+; CHECK: define i1 @call2
+define i1 @call2(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 8, metadata !"typeid2")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %p = extractvalue {i8*, i1} %pair, 1
+ ; SINGLE-IMPL: br i1 true,
+ br i1 %p, label %cont, label %trap
+
+cont:
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*, i32)*
+ ; SINGLE-IMPL: call i1 bitcast (void ()* @singleimpl2 to i1 (i8*, i32)*)
+ ; UNIFORM-RET-VAL: call i1 %
+ ; UNIQUE-RET-VAL0: call i1 %
+ ; UNIQUE-RET-VAL1: call i1 %
+ %result = call i1 %fptr_casted(i8* %obj, i32 undef)
+ ret i1 %result
+
+trap:
+ call void @llvm.trap()
+ unreachable
+}
+
+; CHECK: define i1 @call3
+define i1 @call3(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 8, metadata !"typeid2")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %p = extractvalue {i8*, i1} %pair, 1
+ br i1 %p, label %cont, label %trap
+
+cont:
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*, i32)*
+ %result = call i1 %fptr_casted(i8* %obj, i32 3)
+ ; UNIQUE-RET-VAL0: icmp ne i8* %vtablei8, @__typeid_typeid2_8_3_unique_member
+ ; UNIQUE-RET-VAL1: icmp eq i8* %vtablei8, @__typeid_typeid2_8_3_unique_member
+ ; VCP: [[VT2:%.*]] = bitcast {{.*}} to i8*
+ ; VCP: [[GEP2:%.*]] = getelementptr i8, i8* [[VT2]], i32 ptrtoint (i8* @__typeid_typeid2_8_3_byte to i32)
+ ; VCP: [[LOAD2:%.*]] = load i8, i8* [[GEP2]]
+ ; VCP: [[AND2:%.*]] = and i8 [[LOAD2]], ptrtoint (i8* @__typeid_typeid2_8_3_bit to i8)
+ ; VCP: [[ICMP2:%.*]] = icmp ne i8 [[AND2]], 0
+ ; VCP: ret i1 [[ICMP2]]
+ ret i1 %result
+
+trap:
+ call void @llvm.trap()
+ unreachable
+}
+
+; SINGLE-IMPL-DAG: declare void @singleimpl1()
+; SINGLE-IMPL-DAG: declare void @singleimpl2()
+
+; VCP32: !0 = !{i32 -1, i32 -1}
+; VCP64: !0 = !{i64 0, i64 4294967296}
+
+; VCP32: !1 = !{i32 0, i32 256}
+; VCP64: !1 = !{i64 0, i64 256}
+
+declare void @llvm.assume(i1)
+declare void @llvm.trap()
+declare {i8*, i1} @llvm.type.checked.load(i8*, i32, metadata)
+declare i1 @llvm.type.test(i8*, metadata)
diff --git a/test/Transforms/WholeProgramDevirt/unique-retval.ll b/test/Transforms/WholeProgramDevirt/unique-retval.ll
index 50b938c43e4a..e9ae176fe8ac 100644
--- a/test/Transforms/WholeProgramDevirt/unique-retval.ll
+++ b/test/Transforms/WholeProgramDevirt/unique-retval.ll
@@ -33,8 +33,8 @@ define i1 @call1(i8* %obj) {
ret i1 %result
}
-; CHECK: define i1 @call2
-define i1 @call2(i8* %obj) {
+; CHECK: define i32 @call2
+define i32 @call2(i8* %obj) {
%vtableptr = bitcast i8* %obj to [1 x i8*]**
%vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
; CHECK: [[VT2:%[^ ]*]] = bitcast [1 x i8*]* {{.*}} to i8*
@@ -43,10 +43,13 @@ define i1 @call2(i8* %obj) {
call void @llvm.assume(i1 %p)
%fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
- %fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[RES1:%[^ ]*]] = icmp ne i8* [[VT1]], bitcast ([1 x i8*]* @vt2 to i8*)
- %result = call i1 %fptr_casted(i8* %obj)
- ret i1 %result
+ ; Intentional type mismatch to test zero extend.
+ %fptr_casted = bitcast i8* %fptr to i32 (i8*)*
+ ; CHECK: [[RES2:%[^ ]*]] = icmp ne i8* [[VT1]], bitcast ([1 x i8*]* @vt2 to i8*)
+ %result = call i32 %fptr_casted(i8* %obj)
+ ; CHECK: [[ZEXT2:%[^ ]*]] = zext i1 [[RES2]] to i32
+ ; CHECK: ret i32 [[ZEXT2:%[^ ]*]]
+ ret i32 %result
}
declare i1 @llvm.type.test(i8*, metadata)
diff --git a/test/Transforms/WholeProgramDevirt/vcp-accesses-memory.ll b/test/Transforms/WholeProgramDevirt/vcp-accesses-memory.ll
index b5d51f2d4637..ca76383c4943 100644
--- a/test/Transforms/WholeProgramDevirt/vcp-accesses-memory.ll
+++ b/test/Transforms/WholeProgramDevirt/vcp-accesses-memory.ll
@@ -1,21 +1,37 @@
; RUN: opt -S -wholeprogramdevirt %s | FileCheck %s
+; RUN: opt -S -passes=wholeprogramdevirt %s | FileCheck %s
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1 to i8*)], !type !0
-@vt2 = global [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2 to i8*)], !type !0
+@vt1 = constant [2 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1a to i8*), i8* bitcast (i32 (i8*, i32)* @vf1b to i8*)], !type !0
+@vt2 = constant [2 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2a to i8*), i8* bitcast (i32 (i8*, i32)* @vf2b to i8*)], !type !0
-define i32 @vf1(i8* %this, i32 %arg) {
+@sink = external global i32
+
+define i32 @vf1a(i8* %this, i32 %arg) {
+ store i32 %arg, i32* @sink
+ ret i32 %arg
+}
+
+define i32 @vf2a(i8* %this, i32 %arg) {
+ store i32 %arg, i32* @sink
+ ret i32 %arg
+}
+
+define i32 @vf1b(i8* %this, i32 %arg) {
ret i32 %arg
}
-define i32 @vf2(i8* %this, i32 %arg) {
+define i32 @vf2b(i8* %this, i32 %arg) {
ret i32 %arg
}
-; CHECK: define i32 @call
-define i32 @call(i8* %obj) {
+; Test that we don't apply VCP if the virtual function body accesses memory,
+; even if the function returns a constant.
+
+; CHECK: define i32 @call1
+define i32 @call1(i8* %obj) {
%vtableptr = bitcast i8* %obj to [1 x i8*]**
%vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
%vtablei8 = bitcast [1 x i8*]* %vtable to i8*
@@ -29,6 +45,24 @@ define i32 @call(i8* %obj) {
ret i32 %result
}
+; Test that we can apply VCP regardless of the function attributes by analyzing
+; the function body itself.
+
+; CHECK: define i32 @call2
+define i32 @call2(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 1
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to i32 (i8*, i32)*
+ %result = call i32 %fptr_casted(i8* %obj, i32 1)
+ ; CHECK: ret i32 1
+ ret i32 %result
+}
+
declare i1 @llvm.type.test(i8*, metadata)
declare void @llvm.assume(i1)
diff --git a/test/Transforms/WholeProgramDevirt/vcp-decl.ll b/test/Transforms/WholeProgramDevirt/vcp-decl.ll
new file mode 100644
index 000000000000..1c4e2fbe97aa
--- /dev/null
+++ b/test/Transforms/WholeProgramDevirt/vcp-decl.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -wholeprogramdevirt %s | FileCheck %s
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+@vt1 = constant [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2 to i8*)], !type !0
+
+declare i32 @vf1(i8* %this, i32 %arg) readnone
+
+define i32 @vf2(i8* %this, i32 %arg) readnone {
+ ret i32 %arg
+}
+
+; CHECK: define i32 @fn
+define i32 @fn(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to i32 (i8*, i32)*
+ ; CHECK: call i32 %
+ %result = call i32 %fptr_casted(i8* %obj, i32 1)
+ ret i32 %result
+}
+declare i1 @llvm.type.test(i8*, metadata)
+declare void @llvm.assume(i1)
+
+!0 = !{i32 0, !"typeid"}
diff --git a/test/Transforms/WholeProgramDevirt/vcp-no-this.ll b/test/Transforms/WholeProgramDevirt/vcp-no-this.ll
index c564665471cf..ce76c8e6797e 100644
--- a/test/Transforms/WholeProgramDevirt/vcp-no-this.ll
+++ b/test/Transforms/WholeProgramDevirt/vcp-no-this.ll
@@ -3,8 +3,8 @@
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [1 x i8*] [i8* bitcast (i32 ()* @vf1 to i8*)], !type !0
-@vt2 = global [1 x i8*] [i8* bitcast (i32 ()* @vf2 to i8*)], !type !0
+@vt1 = constant [1 x i8*] [i8* bitcast (i32 ()* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (i32 ()* @vf2 to i8*)], !type !0
define i32 @vf1() readnone {
ret i32 1
diff --git a/test/Transforms/WholeProgramDevirt/vcp-non-constant-arg.ll b/test/Transforms/WholeProgramDevirt/vcp-non-constant-arg.ll
index 197c923c3a1c..cc2ff33296a9 100644
--- a/test/Transforms/WholeProgramDevirt/vcp-non-constant-arg.ll
+++ b/test/Transforms/WholeProgramDevirt/vcp-non-constant-arg.ll
@@ -3,8 +3,8 @@
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1 to i8*)], !type !0
-@vt2 = global [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2 to i8*)], !type !0
+@vt1 = constant [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2 to i8*)], !type !0
define i32 @vf1(i8* %this, i32 %arg) readnone {
ret i32 %arg
diff --git a/test/Transforms/WholeProgramDevirt/vcp-too-wide-ints.ll b/test/Transforms/WholeProgramDevirt/vcp-too-wide-ints.ll
index 93936d5e1d27..c24c3b4be683 100644
--- a/test/Transforms/WholeProgramDevirt/vcp-too-wide-ints.ll
+++ b/test/Transforms/WholeProgramDevirt/vcp-too-wide-ints.ll
@@ -3,33 +3,63 @@
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [1 x i8*] [i8* bitcast (i128 (i8*, i128)* @vf1 to i8*)], !type !0
-@vt2 = global [1 x i8*] [i8* bitcast (i128 (i8*, i128)* @vf2 to i8*)], !type !0
+@vt1 = constant [1 x i8*] [i8* bitcast (i64 (i8*, i128)* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (i64 (i8*, i128)* @vf2 to i8*)], !type !0
+@vt3 = constant [1 x i8*] [i8* bitcast (i128 (i8*, i64)* @vf3 to i8*)], !type !1
+@vt4 = constant [1 x i8*] [i8* bitcast (i128 (i8*, i64)* @vf4 to i8*)], !type !1
-define i128 @vf1(i8* %this, i128 %arg) readnone {
- ret i128 %arg
+define i64 @vf1(i8* %this, i128 %arg) readnone {
+ %argtrunc = trunc i128 %arg to i64
+ ret i64 %argtrunc
}
-define i128 @vf2(i8* %this, i128 %arg) readnone {
- ret i128 %arg
+define i64 @vf2(i8* %this, i128 %arg) readnone {
+ %argtrunc = trunc i128 %arg to i64
+ ret i64 %argtrunc
}
-; CHECK: define i128 @call
-define i128 @call(i8* %obj) {
+define i128 @vf3(i8* %this, i64 %arg) readnone {
+ %argzext = zext i64 %arg to i128
+ ret i128 %argzext
+}
+
+define i128 @vf4(i8* %this, i64 %arg) readnone {
+ %argzext = zext i64 %arg to i128
+ ret i128 %argzext
+}
+
+; CHECK: define i64 @call1
+define i64 @call1(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid1")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
+ %fptr = load i8*, i8** %fptrptr
+ %fptr_casted = bitcast i8* %fptr to i64 (i8*, i128)*
+ ; CHECK: call i64 %
+ %result = call i64 %fptr_casted(i8* %obj, i128 1)
+ ret i64 %result
+}
+
+; CHECK: define i128 @call2
+define i128 @call2(i8* %obj) {
%vtableptr = bitcast i8* %obj to [1 x i8*]**
%vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
%vtablei8 = bitcast [1 x i8*]* %vtable to i8*
- %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid")
+ %p = call i1 @llvm.type.test(i8* %vtablei8, metadata !"typeid2")
call void @llvm.assume(i1 %p)
%fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
- %fptr_casted = bitcast i8* %fptr to i128 (i8*, i128)*
+ %fptr_casted = bitcast i8* %fptr to i128 (i8*, i64)*
; CHECK: call i128 %
- %result = call i128 %fptr_casted(i8* %obj, i128 1)
+ %result = call i128 %fptr_casted(i8* %obj, i64 1)
ret i128 %result
}
declare i1 @llvm.type.test(i8*, metadata)
declare void @llvm.assume(i1)
-!0 = !{i32 0, !"typeid"}
+!0 = !{i32 0, !"typeid1"}
+!1 = !{i32 0, !"typeid2"}
diff --git a/test/Transforms/WholeProgramDevirt/vcp-type-mismatch.ll b/test/Transforms/WholeProgramDevirt/vcp-type-mismatch.ll
index 3124889a7070..7016263f8f7b 100644
--- a/test/Transforms/WholeProgramDevirt/vcp-type-mismatch.ll
+++ b/test/Transforms/WholeProgramDevirt/vcp-type-mismatch.ll
@@ -1,10 +1,16 @@
; RUN: opt -S -wholeprogramdevirt %s | FileCheck %s
+; Test that we correctly handle function type mismatches in argument counts
+; and bitwidths. We handle an argument count mismatch by refusing
+; to optimize. For bitwidth mismatches, we allow the optimization in order
+; to simplify the implementation. This is legal because the bitwidth mismatch
+; gives the call undefined behavior.
+
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1 to i8*)], !type !0
-@vt2 = global [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2 to i8*)], !type !0
+@vt1 = constant [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (i32 (i8*, i32)* @vf2 to i8*)], !type !0
define i32 @vf1(i8* %this, i32 %arg) readnone {
ret i32 %arg
@@ -24,8 +30,8 @@ define i32 @bad_arg_type(i8* %obj) {
%fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i32 (i8*, i64)*
- ; CHECK: call i32 %
%result = call i32 %fptr_casted(i8* %obj, i64 1)
+ ; CHECK: ret i32 1
ret i32 %result
}
@@ -54,8 +60,8 @@ define i64 @bad_return_type(i8* %obj) {
%fptrptr = getelementptr [1 x i8*], [1 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i64 (i8*, i32)*
- ; CHECK: call i64 %
%result = call i64 %fptr_casted(i8* %obj, i32 1)
+ ; CHECK: ret i64 1
ret i64 %result
}
diff --git a/test/Transforms/WholeProgramDevirt/vcp-uses-this.ll b/test/Transforms/WholeProgramDevirt/vcp-uses-this.ll
index fc4dee37dba7..542402e16577 100644
--- a/test/Transforms/WholeProgramDevirt/vcp-uses-this.ll
+++ b/test/Transforms/WholeProgramDevirt/vcp-uses-this.ll
@@ -3,8 +3,8 @@
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-@vt1 = global [1 x i8*] [i8* bitcast (i32 (i8*)* @vf1 to i8*)], !type !0
-@vt2 = global [1 x i8*] [i8* bitcast (i32 (i8*)* @vf2 to i8*)], !type !0
+@vt1 = constant [1 x i8*] [i8* bitcast (i32 (i8*)* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (i32 (i8*)* @vf2 to i8*)], !type !0
define i32 @vf1(i8* %this) readnone {
%this_int = ptrtoint i8* %this to i32
diff --git a/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll b/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
index 530fe8aa89d0..080ed6caac5e 100644
--- a/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
+++ b/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
@@ -78,7 +78,7 @@ define i1 @call1(i8* %obj) {
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 -5
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i32 -5
; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
@@ -98,7 +98,7 @@ define i1 @call2(i8* %obj) {
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 1
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 -5
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i32 -5
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
@@ -118,7 +118,7 @@ define i32 @call3(i8* %obj) {
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 2
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i32 (i8*)*
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 -4
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i32 -4
; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
%result = call i32 %fptr_casted(i8* %obj)
diff --git a/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll b/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
index fcf00d6d86c2..3299f7bce65b 100644
--- a/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
+++ b/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
@@ -87,7 +87,7 @@ define i1 @call1(i8* %obj) {
%pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 0, metadata !"typeid")
%fptr = extractvalue {i8*, i1} %pair, 0
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 -5
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i32 -5
; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
@@ -108,7 +108,7 @@ define i1 @call2(i8* %obj) {
%pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 8, metadata !"typeid")
%fptr = extractvalue {i8*, i1} %pair, 0
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 -5
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i32 -5
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
@@ -129,7 +129,7 @@ define i32 @call3(i8* %obj) {
%pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 16, metadata !"typeid")
%fptr = extractvalue {i8*, i1} %pair, 0
%fptr_casted = bitcast i8* %fptr to i32 (i8*)*
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 -4
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i32 -4
; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
%result = call i32 %fptr_casted(i8* %obj)
diff --git a/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll b/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
index 75ec6ba95ef1..14360c78d950 100644
--- a/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
+++ b/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
@@ -73,7 +73,7 @@ define i1 @call1(i8* %obj) {
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 28
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i32 28
; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
@@ -93,7 +93,7 @@ define i1 @call2(i8* %obj) {
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 1
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 28
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i32 28
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
@@ -113,7 +113,7 @@ define i32 @call3(i8* %obj) {
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 2
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i32 (i8*)*
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 24
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i32 24
; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
%result = call i32 %fptr_casted(i8* %obj)
diff --git a/test/Verifier/amdgpu-cc.ll b/test/Verifier/amdgpu-cc.ll
new file mode 100644
index 000000000000..68c7f309b6e1
--- /dev/null
+++ b/test/Verifier/amdgpu-cc.ll
@@ -0,0 +1,55 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+; CHECK: Calling convention requires void return type
+; CHECK-NEXT: i32 ()* @nonvoid_cc_amdgpu_kernel
+define amdgpu_kernel i32 @nonvoid_cc_amdgpu_kernel() {
+ ret i32 0
+}
+
+; CHECK: Calling convention does not support varargs or perfect forwarding!
+; CHECK-NEXT: void (...)* @varargs_amdgpu_kernel
+define amdgpu_kernel void @varargs_amdgpu_kernel(...) {
+ ret void
+}
+
+; CHECK: Calling convention does not allow sret
+; CHECK-NEXT: void (i32*)* @sret_cc_amdgpu_kernel
+define amdgpu_kernel void @sret_cc_amdgpu_kernel(i32* sret %ptr) {
+ ret void
+}
+
+; CHECK: Calling convention does not support varargs or perfect forwarding!
+; CHECK-NEXT: void (...)* @varargs_amdgpu_vs
+define amdgpu_vs void @varargs_amdgpu_vs(...) {
+ ret void
+}
+
+; CHECK: Calling convention does not support varargs or perfect forwarding!
+; CHECK-NEXT: void (...)* @varargs_amdgpu_gs
+define amdgpu_gs void @varargs_amdgpu_gs(...) {
+ ret void
+}
+
+; CHECK: Calling convention does not support varargs or perfect forwarding!
+; CHECK-NEXT: void (...)* @varargs_amdgpu_ps
+define amdgpu_ps void @varargs_amdgpu_ps(...) {
+ ret void
+}
+
+; CHECK: Calling convention does not support varargs or perfect forwarding!
+; CHECK-NEXT: void (...)* @varargs_amdgpu_cs
+define amdgpu_cs void @varargs_amdgpu_cs(...) {
+ ret void
+}
+
+; CHECK: Calling convention requires void return type
+; CHECK-NEXT: i32 ()* @nonvoid_cc_spir_kernel
+define spir_kernel i32 @nonvoid_cc_spir_kernel() {
+ ret i32 0
+}
+
+; CHECK: Calling convention does not support varargs or perfect forwarding!
+; CHECK-NEXT: void (...)* @varargs_spir_kernel
+define spir_kernel void @varargs_spir_kernel(...) {
+ ret void
+}
diff --git a/test/Verifier/dbg-line-without-file.ll b/test/Verifier/dbg-line-without-file.ll
new file mode 100644
index 000000000000..4d5725959ef1
--- /dev/null
+++ b/test/Verifier/dbg-line-without-file.ll
@@ -0,0 +1,15 @@
+; RUN: not llvm-as -disable-output <%s 2>&1 | FileCheck %s
+; CHECK: assembly parsed, but does not verify
+; CHECK: line specified with no file
+
+define void @foo() !dbg !3 {
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!llvm.dbg.cu = !{!1}
+
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+!1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2)
+!2 = !DIFile(filename: "foo.c", directory: "")
+!3 = distinct !DISubprogram(name: "foo", scope: !1, line: 1, unit: !1)
diff --git a/test/Verifier/dbg-orphaned-compileunit.ll b/test/Verifier/dbg-orphaned-compileunit.ll
index 0be14a2fa662..9ab72824624d 100644
--- a/test/Verifier/dbg-orphaned-compileunit.ll
+++ b/test/Verifier/dbg-orphaned-compileunit.ll
@@ -1,6 +1,7 @@
; RUN: not llvm-as -disable-output <%s 2>&1 | FileCheck %s
; CHECK: assembly parsed, but does not verify
-; CHECK-NEXT: All DICompileUnits must be listed in llvm.dbg.cu
+; CHECK-NEXT: DICompileUnit not listed in llvm.dbg.cu
+; CHECK-NEXT: !0 = distinct !DICompileUnit(language: DW_LANG_Fortran77, file: !1, isOptimized: false, runtimeVersion: 0, emissionKind: NoDebug)
!named = !{!1}
!llvm.module.flags = !{!0}
diff --git a/test/Verifier/diderivedtype-address-space-atomic-type.ll b/test/Verifier/diderivedtype-address-space-atomic-type.ll
new file mode 100644
index 000000000000..f7926ed94946
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-atomic-type.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-const-type.ll b/test/Verifier/diderivedtype-address-space-const-type.ll
new file mode 100644
index 000000000000..deba63943816
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-const-type.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-friend.ll b/test/Verifier/diderivedtype-address-space-friend.ll
new file mode 100644
index 000000000000..d3d3df47ed28
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-friend.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_friend, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-inheritance.ll b/test/Verifier/diderivedtype-address-space-inheritance.ll
new file mode 100644
index 000000000000..2020f030d7e8
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-inheritance.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_inheritance, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-member.ll b/test/Verifier/diderivedtype-address-space-member.ll
new file mode 100644
index 000000000000..366bc4896bb2
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-member.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_member, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-ptr-to-member-type.ll b/test/Verifier/diderivedtype-address-space-ptr-to-member-type.ll
new file mode 100644
index 000000000000..0ae6539d3662
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-ptr-to-member-type.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_ptr_to_member_type, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-restrict-type.ll b/test/Verifier/diderivedtype-address-space-restrict-type.ll
new file mode 100644
index 000000000000..b140a9e28b40
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-restrict-type.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_restrict_type, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-rvalue-reference-type.ll b/test/Verifier/diderivedtype-address-space-rvalue-reference-type.ll
new file mode 100644
index 000000000000..5bcdc3b8d527
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-rvalue-reference-type.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_rvalue_reference_type, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-typedef.ll b/test/Verifier/diderivedtype-address-space-typedef.ll
new file mode 100644
index 000000000000..03a5c6af88d3
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-typedef.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_typedef, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diderivedtype-address-space-volatile-type.ll b/test/Verifier/diderivedtype-address-space-volatile-type.ll
new file mode 100644
index 000000000000..e8e70bc7959a
--- /dev/null
+++ b/test/Verifier/diderivedtype-address-space-volatile-type.ll
@@ -0,0 +1,6 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0, !1}
+!0 = !DIBasicType(tag: DW_TAG_base_type, name: "name", size: 1, align: 2, encoding: DW_ATE_unsigned_char)
+; CHECK: DWARF address space only applies to pointer or reference types
+!1 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !0, size: 32, align: 32, dwarfAddressSpace: 1)
diff --git a/test/Verifier/diexpression-swap.ll b/test/Verifier/diexpression-swap.ll
new file mode 100644
index 000000000000..b227c54bfa88
--- /dev/null
+++ b/test/Verifier/diexpression-swap.ll
@@ -0,0 +1,5 @@
+; RUN: not opt -S < %s 2>&1 | FileCheck %s
+
+!named = !{!0}
+; CHECK: invalid expression
+!0 = !DIExpression(DW_OP_swap)
diff --git a/test/Verifier/fnarg-debuginfo.ll b/test/Verifier/fnarg-debuginfo.ll
new file mode 100644
index 000000000000..7cbe9ce93b97
--- /dev/null
+++ b/test/Verifier/fnarg-debuginfo.ll
@@ -0,0 +1,26 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+define void @foo() !dbg !2 {
+entry:
+ %a = alloca i32
+ ; CHECK: conflicting debug info for argument
+ call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !3, metadata !DIExpression()), !dbg !6
+ call void @llvm.dbg.declare(metadata i32* %a, metadata !4, metadata !DIExpression()), !dbg !6
+ ret void, !dbg !6
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", emissionKind: FullDebug)
+!1 = !DIFile(filename: "x.c", directory: "/")
+!2 = distinct !DISubprogram(name: "foo", scope: !0, isDefinition: true, unit: !0)
+!3 = !DILocalVariable(name: "a", arg: 1, scope: !2, file: !1, line: 1, type: !5)
+!4 = !DILocalVariable(name: "b", arg: 1, scope: !2, file: !1, line: 1, type: !5)
+!5 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!6 = !DILocation(line: 1, scope: !2)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/Verifier/fnarg-nodebug.ll b/test/Verifier/fnarg-nodebug.ll
new file mode 100644
index 000000000000..0c66f5d6d1a0
--- /dev/null
+++ b/test/Verifier/fnarg-nodebug.ll
@@ -0,0 +1,59 @@
+; RUN: llvm-as < %s -o %t
+; RUN: llvm-dis < %t -o - | FileCheck %s
+; Created at -O1 from:
+; int sink(int);
+; __attribute__((always_inline)) int f(int i) { return sink(i); }
+; __attribute__((always_inline)) int g(int j) { return sink(j); }
+; __attribute__((nodebug)) int nodebug(int k) { return f(k)+g(k); }
+source_filename = "t.c"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.12.0"
+
+declare i32 @sink(i32) local_unnamed_addr
+
+define i32 @nodebug(i32 %k) local_unnamed_addr #2 {
+entry:
+; This should not set off the FnArg Verifier. The two variables are in differrent scopes.
+ tail call void @llvm.dbg.value(metadata i32 %k, i64 0, metadata !12, metadata !13) #4, !dbg !14
+ %call.k = tail call i32 @sink(i32 %k) #4, !dbg !15
+ tail call void @llvm.dbg.value(metadata i32 %k, i64 0, metadata !19, metadata !13) #4, !dbg !20
+ %call.k3 = tail call i32 @sink(i32 %k) #4, !dbg !21
+ %add = add nsw i32 %call.k3, %call.k
+ ret i32 %add
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #3
+
+attributes #2 = { nounwind ssp uwtable }
+attributes #3 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (trunk 297153) (llvm/trunk 297155)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "t.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 5.0.0 (trunk 297153) (llvm/trunk 297155)"}
+!7 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 2, type: !8, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !11)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10, !10}
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !{!12}
+; CHECK: !DILocalVariable(name: "i", arg: 1
+!12 = !DILocalVariable(name: "i", arg: 1, scope: !7, file: !1, line: 2, type: !10)
+!13 = !DIExpression()
+!14 = !DILocation(line: 2, column: 42, scope: !7)
+!15 = !DILocation(line: 2, column: 54, scope: !7)
+!16 = !DILocation(line: 2, column: 47, scope: !7)
+!17 = distinct !DISubprogram(name: "g", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !18)
+!18 = !{!19}
+; CHECK: !DILocalVariable(name: "j", arg: 1
+!19 = !DILocalVariable(name: "j", arg: 1, scope: !17, file: !1, line: 3, type: !10)
+!20 = !DILocation(line: 3, column: 42, scope: !17)
+!21 = !DILocation(line: 3, column: 54, scope: !17)
+!22 = !DILocation(line: 3, column: 47, scope: !17)
diff --git a/test/Verifier/fp-intrinsics.ll b/test/Verifier/fp-intrinsics.ll
new file mode 100644
index 000000000000..0a308115cc35
--- /dev/null
+++ b/test/Verifier/fp-intrinsics.ll
@@ -0,0 +1,43 @@
+; RUN: opt -verify -S < %s 2>&1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: sed -e s/.T2:// %s | not opt -verify -disable-output 2>&1 | FileCheck --check-prefix=CHECK2 %s
+; RUN: sed -e s/.T3:// %s | not opt -verify -disable-output 2>&1 | FileCheck --check-prefix=CHECK3 %s
+
+; Common declaration used for all runs.
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+
+; Test that the verifier accepts legal code, and that the correct attributes are
+; attached to the FP intrinsic.
+; CHECK1: declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) #[[ATTR:[0-9]+]]
+; CHECK1: attributes #[[ATTR]] = { inaccessiblememonly nounwind }
+; Note: FP exceptions aren't usually caught through normal unwind mechanisms,
+; but we may want to revisit this for asynchronous exception handling.
+define double @f1(double %a, double %b) {
+entry:
+ %fadd = call double @llvm.experimental.constrained.fadd.f64(
+ double %a, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret double %fadd
+}
+
+; Test an illegal value for the rounding mode argument.
+; CHECK2: invalid rounding mode argument
+;T2: define double @f2(double %a, double %b) {
+;T2: entry:
+;T2: %fadd = call double @llvm.experimental.constrained.fadd.f64(
+;T2: double %a, double %b,
+;T2: metadata !"round.dynomite",
+;T2: metadata !"fpexcept.strict")
+;T2: ret double %fadd
+;T2: }
+
+; Test an illegal value for the exception behavior argument.
+; CHECK3: invalid exception behavior argument
+;T3: define double @f2(double %a, double %b) {
+;T3: entry:
+;T3: %fadd = call double @llvm.experimental.constrained.fadd.f64(
+;T3: double %a, double %b,
+;T3: metadata !"round.dynamic",
+;T3: metadata !"fpexcept.restrict")
+;T3: ret double %fadd
+;T3: }
diff --git a/test/Verifier/function-metadata-bad.ll b/test/Verifier/function-metadata-bad.ll
index 9e7ba225408e..b3bd3c27c6d4 100644
--- a/test/Verifier/function-metadata-bad.ll
+++ b/test/Verifier/function-metadata-bad.ll
@@ -14,7 +14,7 @@ define i32 @bad2() !prof !1 {
}
!1 = !{!"function_entry_count"}
-; CHECK-NEXT: !prof annotations should have exactly 2 operands
+; CHECK-NEXT: !prof annotations should have no less than 2 operands
; CHECK-NEXT: !1 = !{!"function_entry_count"}
diff --git a/test/Verifier/metadata-function-prof.ll b/test/Verifier/metadata-function-prof.ll
index d84a7fe54402..70548b1fa41f 100644
--- a/test/Verifier/metadata-function-prof.ll
+++ b/test/Verifier/metadata-function-prof.ll
@@ -12,4 +12,4 @@ define void @f3() !prof !0 !prof !0 {
unreachable
}
-!0 = !{}
+!0 = !{!"function_entry_count", i64 100}
diff --git a/test/tools/dsymutil/X86/generate-empty-CU.test b/test/tools/dsymutil/X86/generate-empty-CU.test
new file mode 100644
index 000000000000..233611460b62
--- /dev/null
+++ b/test/tools/dsymutil/X86/generate-empty-CU.test
@@ -0,0 +1,33 @@
+# RUN: llvm-dsymutil -f -o - -oso-prepend-path=%p/.. -y %s | llvm-dwarfdump - | FileCheck %s
+
+# This test on links the Dwarf for an LTO binary and on purpose doesn't retain
+# any symbol in the second CU out of 3. This is the only case where dsymutil
+# will generate an empty CU and it requires special handling.
+
+---
+triple: 'x86_64-apple-darwin'
+objects:
+ - filename: /Inputs/basic-lto.macho.x86_64.o
+ timestamp: 1417654896
+ symbols:
+ - { sym: _main, objAddr: 0x0000000000000000, binAddr: 0x0000000100000F40, size: 0x00000010 }
+ - { sym: _bar, objAddr: 0x0000000000000050, binAddr: 0x0000000100000F90, size: 0x00000024 }
+...
+
+.debug_info contents:
+CHECK: Compile Unit: length = 0x0000007d version = 0x0002 abbr_offset = 0x0000 addr_size = 0x08 (next unit at 0x00000081)
+
+CHECK: DW_TAG_compile_unit
+CHECK: DW_AT_name {{.*}} "basic1.c"
+CHECK: DW_TAG_subprogram
+ DW_AT_name {{.*}} "main"
+
+CHECK: Compile Unit: length = 0x00000007 version = 0x0002 abbr_offset = 0x0000 addr_size = 0x08 (next unit at 0x0000008c)
+
+CHECK: Compile Unit: length = 0x00000089 version = 0x0002 abbr_offset = 0x0000 addr_size = 0x08 (next unit at 0x00000119)
+
+CHECK: DW_TAG_compile_unit
+CHECK: DW_AT_name {{.*}} "basic3.c"
+
+CHECK: DW_TAG_subprogram [7] *
+CHECK: DW_AT_name {{.*}} = "bar"
diff --git a/test/tools/gold/X86/Inputs/thinlto_weak_library1.ll b/test/tools/gold/X86/Inputs/thinlto_weak_library1.ll
new file mode 100644
index 000000000000..319ce6be9b14
--- /dev/null
+++ b/test/tools/gold/X86/Inputs/thinlto_weak_library1.ll
@@ -0,0 +1,17 @@
+; ModuleID = 'thinlto_weak_library1.c'
+source_filename = "thinlto_weak_library1.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define weak i32 @f() local_unnamed_addr {
+entry:
+ ret i32 1
+}
+
+; Function Attrs: norecurse nounwind readnone uwtable
+define i32 @test1() local_unnamed_addr {
+entry:
+ %call = tail call i32 @f()
+ ret i32 %call
+}
diff --git a/test/tools/gold/X86/Inputs/thinlto_weak_library2.ll b/test/tools/gold/X86/Inputs/thinlto_weak_library2.ll
new file mode 100644
index 000000000000..684549aa320e
--- /dev/null
+++ b/test/tools/gold/X86/Inputs/thinlto_weak_library2.ll
@@ -0,0 +1,20 @@
+; ModuleID = 'thinlto_weak_library2.c'
+source_filename = "thinlto_weak_library2.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define weak i32 @f() local_unnamed_addr {
+entry:
+ ret i32 2
+}
+
+; Function Attrs: nounwind uwtable
+define void @test2() local_unnamed_addr {
+entry:
+ tail call i32 (...) @test1()
+ tail call i32 @f()
+ ret void
+}
+
+declare i32 @test1(...) local_unnamed_addr
diff --git a/test/tools/gold/X86/cache.ll b/test/tools/gold/X86/cache.ll
index cef983c4a1ac..8d22a8606df3 100644
--- a/test/tools/gold/X86/cache.ll
+++ b/test/tools/gold/X86/cache.ll
@@ -2,7 +2,7 @@
; RUN: opt -module-summary %s -o %t.o
; RUN: opt -module-summary %p/Inputs/cache.ll -o %t2.o
-; RUN: rm -Rf %t.cache && mkdir %t.cache
+; RUN: rm -Rf %t.cache
; RUN: %gold -m elf_x86_64 -plugin %llvmshlibdir/LLVMgold.so \
; RUN: --plugin-opt=thinlto \
; RUN: --plugin-opt=cache-dir=%t.cache \
@@ -16,7 +16,7 @@
; RUN: opt -module-hash -module-summary %s -o %t.o
; RUN: opt -module-hash -module-summary %p/Inputs/cache.ll -o %t2.o
-; RUN: rm -Rf %t.cache && mkdir %t.cache
+; RUN: rm -Rf %t.cache
; RUN: %gold -m elf_x86_64 -plugin %llvmshlibdir/LLVMgold.so \
; RUN: --plugin-opt=thinlto \
; RUN: --plugin-opt=cache-dir=%t.cache \
diff --git a/test/tools/gold/X86/error-unopenable.ll b/test/tools/gold/X86/error-unopenable.ll
new file mode 100644
index 000000000000..c8532a5b187f
--- /dev/null
+++ b/test/tools/gold/X86/error-unopenable.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-as -o %t %s
+; RUN: not %gold -plugin %llvmshlibdir/LLVMgold.so \
+; RUN: --plugin-opt=obj-path=%T/nonexistent-dir/foo.o \
+; RUN: %t -o %t2 2>&1 | FileCheck %s
+
+; CHECK: Could not open file {{.*}}nonexistent-dir
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/tools/gold/X86/parallel.ll b/test/tools/gold/X86/parallel.ll
index 52232918c805..4de694c94c88 100644
--- a/test/tools/gold/X86/parallel.ll
+++ b/test/tools/gold/X86/parallel.ll
@@ -1,9 +1,9 @@
; RUN: llvm-as -o %t.bc %s
-; RUN: rm -f %t.opt.bc0 %t.opt.bc1 %t.o0 %t.o1
+; RUN: rm -f %t.0.5.precodegen.bc %t.1.5.precodegen.bc %t.o %t.o1
; RUN: env LD_PRELOAD=%llvmshlibdir/LLVMgold.so %gold -plugin %llvmshlibdir/LLVMgold.so -u foo -u bar -plugin-opt lto-partitions=2 -plugin-opt save-temps -m elf_x86_64 -o %t %t.bc
; RUN: llvm-dis %t.0.5.precodegen.bc -o - | FileCheck --check-prefix=CHECK-BC0 %s
; RUN: llvm-dis %t.1.5.precodegen.bc -o - | FileCheck --check-prefix=CHECK-BC1 %s
-; RUN: llvm-nm %t.o0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-nm %t.o | FileCheck --check-prefix=CHECK0 %s
; RUN: llvm-nm %t.o1 | FileCheck --check-prefix=CHECK1 %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/tools/gold/X86/stats.ll b/test/tools/gold/X86/stats.ll
index 15aa080d6fc0..255a2bd90bcd 100644
--- a/test/tools/gold/X86/stats.ll
+++ b/test/tools/gold/X86/stats.ll
@@ -5,6 +5,13 @@
; RUN: -m elf_x86_64 \
; RUN: -plugin-opt=-stats %t.o -o %t2 2>&1 | FileCheck %s
+; RUN: llvm-as %s -o %t.o
+; RUN: %gold -plugin %llvmshlibdir/LLVMgold.so -shared \
+; RUN: -m elf_x86_64 \
+; RUN: -plugin-opt=thinlto \
+; RUN: -plugin-opt=thinlto-index-only \
+; RUN: -plugin-opt=-stats %t.o -o %t2 2>&1 | FileCheck %s
+
; CHECK: Statistics Collected
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/test/tools/gold/X86/thinlto.ll b/test/tools/gold/X86/thinlto.ll
index aee7268dfb96..5e1d913eb09e 100644
--- a/test/tools/gold/X86/thinlto.ll
+++ b/test/tools/gold/X86/thinlto.ll
@@ -61,13 +61,14 @@
; RUN: llvm-nm %t4 | FileCheck %s --check-prefix=NM
; Test --plugin-opt=obj-path to ensure unique object files generated.
+; RUN: rm -f %t5.o %t5.o1
; RUN: %gold -plugin %llvmshlibdir/LLVMgold.so \
; RUN: -m elf_x86_64 \
; RUN: --plugin-opt=thinlto \
; RUN: --plugin-opt=jobs=2 \
; RUN: --plugin-opt=obj-path=%t5.o \
; RUN: -shared %t.o %t2.o -o %t4
-; RUN: llvm-nm %t5.o0 | FileCheck %s --check-prefix=NM2
+; RUN: llvm-nm %t5.o | FileCheck %s --check-prefix=NM2
; RUN: llvm-nm %t5.o1 | FileCheck %s --check-prefix=NM2
; NM: T f
diff --git a/test/tools/gold/X86/thinlto_object_suffix_replace.ll b/test/tools/gold/X86/thinlto_object_suffix_replace.ll
new file mode 100644
index 000000000000..af4adad1655e
--- /dev/null
+++ b/test/tools/gold/X86/thinlto_object_suffix_replace.ll
@@ -0,0 +1,41 @@
+; Test to make sure the thinlto-object-suffix-replace option is handled
+; correctly.
+
+; Generate bitcode file with summary, as well as a minimized bitcode without
+; the debug metadata for the thin link.
+; RUN: opt -thinlto-bc %s -thin-link-bitcode-file=%t1.thinlink.bc -o %t1.o
+
+; First perform the thin link on the normal bitcode file, and save the
+; resulting index.
+; RUN: %gold -plugin %llvmshlibdir/LLVMgold.so \
+; RUN: -m elf_x86_64 \
+; RUN: --plugin-opt=thinlto \
+; RUN: --plugin-opt=thinlto-index-only \
+; RUN: -shared %t1.o -o %t3
+; RUN: cp %t1.o.thinlto.bc %t1.o.thinlto.bc.orig
+
+; Next perform the thin link on the minimized bitcode file, and compare dump
+; of the resulting index to the above dump to ensure they are identical.
+; RUN: rm -f %t1.o.thinlto.bc
+; Make sure it isn't inadvertently using the regular bitcode file.
+; RUN: rm -f %t1.o
+; RUN: %gold -plugin %llvmshlibdir/LLVMgold.so \
+; RUN: -m elf_x86_64 \
+; RUN: --plugin-opt=thinlto \
+; RUN: --plugin-opt=thinlto-index-only \
+; RUN: --plugin-opt=thinlto-object-suffix-replace=".thinlink.bc;.o" \
+; RUN: -shared %t1.thinlink.bc -o %t3
+; RUN: diff %t1.o.thinlto.bc.orig %t1.o.thinlto.bc
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @f() {
+entry:
+ ret void
+}
+
+!llvm.dbg.cu = !{}
+
+!1 = !{i32 2, !"Debug Info Version", i32 3}
+!llvm.module.flags = !{!1}
diff --git a/test/tools/gold/X86/thinlto_weak_library.ll b/test/tools/gold/X86/thinlto_weak_library.ll
new file mode 100644
index 000000000000..6a04fc0db0e4
--- /dev/null
+++ b/test/tools/gold/X86/thinlto_weak_library.ll
@@ -0,0 +1,41 @@
+; Test to ensure that ThinLTO sorts the modules before passing to the
+; final native link based on the linker's determination of which
+; object within a static library contains the prevailing def of a symbol.
+
+; First generate bitcode with a module summary index for each file
+; RUN: opt -module-summary %s -o %t.o
+; RUN: opt -module-summary %p/Inputs/thinlto_weak_library1.ll -o %t2.o
+; RUN: opt -module-summary %p/Inputs/thinlto_weak_library2.ll -o %t3.o
+
+; Although the objects are ordered "%t2.o %t3.o" in the library, the
+; linker selects %t3.o first since it satisfies a strong reference from
+; %t.o. It later selects %t2.o based on the strong ref from %t3.o.
+; Therefore, %t3.o's copy of @f is prevailing, and we need to link
+; %t3.o before %t2.o in the final native link.
+; RUN: %gold -plugin %llvmshlibdir/LLVMgold.so \
+; RUN: --plugin-opt=thinlto \
+; RUN: --plugin-opt=save-temps \
+; RUN: -m elf_x86_64 \
+; RUN: -o %t4 \
+; RUN: %t.o \
+; RUN: --start-lib %t2.o %t3.o --end-lib
+
+; Make sure we completely dropped the definition of the non-prevailing
+; copy of f() (and didn't simply convert to available_externally, which
+; would incorrectly enable inlining).
+; RUN: llvm-dis %t2.o.1.promote.bc -o - | FileCheck %s
+; CHECK: declare i32 @f()
+
+; ModuleID = 'thinlto_weak_library.c'
+source_filename = "thinlto_weak_library.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define i32 @main() local_unnamed_addr {
+entry:
+ tail call void (...) @test2()
+ ret i32 0
+}
+
+declare void @test2(...) local_unnamed_addr
diff --git a/test/tools/gold/X86/thinlto_weak_resolution.ll b/test/tools/gold/X86/thinlto_weak_resolution.ll
index 8215c42f10cd..ab609cca878a 100644
--- a/test/tools/gold/X86/thinlto_weak_resolution.ll
+++ b/test/tools/gold/X86/thinlto_weak_resolution.ll
@@ -13,18 +13,14 @@
; RUN: llvm-nm %t3.o | FileCheck %s
; CHECK: weakfunc
-; Most of the preempted functions should have been eliminated (the plugin will
-; set linkage of odr functions to available_externally and linkonce functions
-; are removed by globaldce). FIXME: Need to introduce combined index linkage
-; that means "drop this function" so we can avoid importing linkonce functions
-; and drop weak functions.
+; The preempted functions should have been eliminated (the plugin will
+; set linkage of odr functions to available_externally, and convert
+; linkonce and weak to declarations).
; RUN: llvm-dis %t2.o.4.opt.bc -o - | FileCheck --check-prefix=OPT2 %s
; OPT2-NOT: @
-; OPT2: @weakfunc
-; OPT2-NOT: @
; RUN: llvm-dis %t.o.3.import.bc -o - | FileCheck --check-prefix=IMPORT %s
-; RUN llvm-dis %t2.o.3.import.bc -o - | FileCheck --check-prefix=IMPORT2 %s
+; RUN: llvm-dis %t2.o.3.import.bc -o - | FileCheck --check-prefix=IMPORT2 %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -83,7 +79,7 @@ entry:
ret void
}
; IMPORT: define weak void @linkoncefunc()
-; IMPORT2: define linkonce void @linkoncefunc()
+; IMPORT2: declare void @linkoncefunc()
define linkonce void @linkoncefunc() #0 {
entry:
ret void
@@ -95,7 +91,7 @@ entry:
ret void
}
; IMPORT: define weak void @weakfunc()
-; IMPORT2: define weak void @weakfunc()
+; IMPORT2: declare void @weakfunc()
define weak void @weakfunc() #0 {
entry:
ret void
diff --git a/test/tools/llvm-ar/Inputs/absolute-paths.lib b/test/tools/llvm-ar/Inputs/absolute-paths.lib
new file mode 100644
index 000000000000..a4d8147dfef5
--- /dev/null
+++ b/test/tools/llvm-ar/Inputs/absolute-paths.lib
Binary files differ
diff --git a/test/tools/llvm-ar/absolute-paths.test b/test/tools/llvm-ar/absolute-paths.test
new file mode 100644
index 000000000000..0b42d7d2dcbd
--- /dev/null
+++ b/test/tools/llvm-ar/absolute-paths.test
@@ -0,0 +1,20 @@
+MSVC's lib.exe produces archives with absolute paths to the members. It's useful
+for llvm-ar to extract them to their basename in the CWD, since usually the
+directories in the path in the archive won't exist during archive extraction.
+
+Get a temp clean cwd to extract into.
+RUN: rm -rf %t && mkdir %t && cd %t
+
+RUN: llvm-ar t %S/Inputs/absolute-paths.lib | FileCheck %s --check-prefix=CHECK-LIST
+CHECK-LIST: C:/src/llvm-project/build/dne/b.o
+CHECK-LIST: C:/src/llvm-project/build/dne/a.o
+
+Check that a.o comes out and defines foo.
+RUN: llvm-ar x %S/Inputs/absolute-paths.lib 'C:/src/llvm-project/build/dne/a.o'
+RUN: llvm-nm a.o | FileCheck %s --check-prefix=CHECK-A
+CHECK-A: T foo
+
+Check that b.o comes out and defines bar.
+RUN: llvm-ar x %S/Inputs/absolute-paths.lib C:/src/llvm-project/build/dne/b.o
+RUN: llvm-nm b.o | FileCheck %s --check-prefix=CHECK-B
+CHECK-B: T bar
diff --git a/test/tools/llvm-config/paths.test b/test/tools/llvm-config/paths.test
new file mode 100644
index 000000000000..419f155ae1f8
--- /dev/null
+++ b/test/tools/llvm-config/paths.test
@@ -0,0 +1,21 @@
+# Check directory options for obvious issues.
+
+RUN: llvm-config --bindir 2>&1 | FileCheck --check-prefix=CHECK-BINDIR %s
+CHECK-BINDIR: {{.*}}{{/|\\}}bin
+CHECK-BINDIR-NOT: error:
+CHECK-BINDIR-NOT: warning
+
+RUN: llvm-config --includedir 2>&1 | FileCheck --check-prefix=CHECK-INCLUDEDIR %s
+CHECK-INCLUDEDIR: {{.*}}{{/|\\}}include
+CHECK-INCLUDEDIR-NOT: error:
+CHECK-INCLUDEDIR-NOT: warning
+
+RUN: llvm-config --libdir 2>&1 | FileCheck --check-prefix=CHECK-LIBDIR %s
+CHECK-LIBDIR: {{.*}}{{/|\\}}lib{{.*}}
+CHECK-LIBDIR-NOT: error:
+CHECK-LIBDIR-NOT: warning
+
+RUN: llvm-config --cmakedir 2>&1 | FileCheck --check-prefix=CHECK-CMAKEDIR %s
+CHECK-CMAKEDIR: {{.*}}{{/|\\}}cmake{{/|\\}}llvm
+CHECK-CMAKEDIR-NOT: error:
+CHECK-CMAKEDIR-NOT: warning
diff --git a/test/tools/llvm-cov/Inputs/multiple-files2.covmapping b/test/tools/llvm-cov/Inputs/multiple-files2.covmapping
new file mode 100644
index 000000000000..770817a53806
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/multiple-files2.covmapping
Binary files differ
diff --git a/test/tools/llvm-cov/demangle.test b/test/tools/llvm-cov/demangle.test
index 90a26b734067..5ca113262e40 100644
--- a/test/tools/llvm-cov/demangle.test
+++ b/test/tools/llvm-cov/demangle.test
@@ -4,5 +4,8 @@ RUN: llvm-cov show %S/Inputs/templateInstantiations.covmapping -instr-profile %S
RUN: llvm-profdata merge %S/Inputs/hideUnexecutedSubviews.proftext -o %t.profdata
RUN: llvm-cov show %S/Inputs/templateInstantiations.covmapping -instr-profile %t.profdata -Xdemangler sed -Xdemangler 's/_/X/g' -filename-equivalence %S/showTemplateInstantiations.cpp | FileCheck %s
+// Check that we demangle names when printing out function summaries.
+RUN: llvm-cov report -show-functions %S/Inputs/templateInstantiations.covmapping -instr-profile %S/Inputs/templateInstantiations.profdata -Xdemangler sed -Xdemangler 's/_/X/g' -filename-equivalence %S/showTemplateInstantiations.cpp | FileCheck %s
+
CHECK-DAG: XZ4funcIbEiTX
CHECK-DAG: XZ4funcIiEiTX
diff --git a/test/tools/llvm-cov/multiple-files.test b/test/tools/llvm-cov/multiple-files.test
index 0b3fb855fedc..d0dbdd8c0fcf 100644
--- a/test/tools/llvm-cov/multiple-files.test
+++ b/test/tools/llvm-cov/multiple-files.test
@@ -1,9 +1,15 @@
// RUN: llvm-profdata merge %S/Inputs/multiple-files.proftext -o %t.profdata
-// RUN: llvm-cov report %S/Inputs/multiple-files.covmapping -instr-profile %t.profdata | FileCheck %s
+// RUN: llvm-cov report %S/Inputs/multiple-files.covmapping -instr-profile %t.profdata | FileCheck %s -check-prefix=MANY_COMPONENTS
+// RUN: llvm-cov report %S/Inputs/multiple-files2.covmapping -instr-profile %t.profdata | FileCheck %s -check-prefix=ONE_COMPONENT
-// CHECK: Filename
-// CHECK-NEXT: ---
-// CHECK-NEXT: {{^}}a{{[/\\]}}f2.c
-// CHECK-NEXT: {{^}}b{{[/\\]}}c{{[/\\]}}f4.c
-// CHECK-NEXT: {{^}}b{{[/\\]}}f3.c
-// CHECK-NEXT: {{^}}f1.c
+// MANY_COMPONENTS: Filename
+// MANY_COMPONENTS-NEXT: ---
+// MANY_COMPONENTS-NEXT: {{^}}a{{[/\\]}}f2.c
+// MANY_COMPONENTS-NEXT: {{^}}b{{[/\\]}}c{{[/\\]}}f4.c
+// MANY_COMPONENTS-NEXT: {{^}}b{{[/\\]}}f3.c
+// MANY_COMPONENTS-NEXT: {{^}}f1.c
+
+// ONE_COMPONENT: Filename
+// ONE_COMPONENT-NEXT: ---
+// ONE_COMPONENT-NEXT: {{^}}cov.c
+// ONE_COMPONENT-NEXT: {{^}}cov.h
diff --git a/test/tools/llvm-cov/report.cpp b/test/tools/llvm-cov/report.cpp
index c28dd7589408..49425eb5f624 100644
--- a/test/tools/llvm-cov/report.cpp
+++ b/test/tools/llvm-cov/report.cpp
@@ -1,6 +1,6 @@
// RUN: llvm-cov report %S/Inputs/report.covmapping -instr-profile %S/Inputs/report.profdata -filename-equivalence 2>&1 | FileCheck %s
-// RUN: llvm-cov report %S/Inputs/report.covmapping -instr-profile %S/Inputs/report.profdata -filename-equivalence report.cpp 2>&1 | FileCheck -check-prefix=FILT %s
-// RUN: llvm-cov report %S/Inputs/report.covmapping -instr-profile %S/Inputs/report.profdata -filename-equivalence report.cpp does-not-exist.cpp 2>&1 | FileCheck -check-prefix=FILT %s
+// RUN: llvm-cov report -show-functions %S/Inputs/report.covmapping -instr-profile %S/Inputs/report.profdata -filename-equivalence report.cpp 2>&1 | FileCheck -check-prefix=FILT %s
+// RUN: llvm-cov report -show-functions %S/Inputs/report.covmapping -instr-profile %S/Inputs/report.profdata -filename-equivalence report.cpp does-not-exist.cpp 2>&1 | FileCheck -check-prefix=FILT %s
// CHECK: Regions Missed Regions Cover Functions Missed Functions Executed Instantiations Missed Insts. Executed Lines Missed Lines Cover
// CHECK-NEXT: ---
diff --git a/test/tools/llvm-cov/warnings.h b/test/tools/llvm-cov/warnings.h
index 0517b6a7c875..a06e02f92d56 100644
--- a/test/tools/llvm-cov/warnings.h
+++ b/test/tools/llvm-cov/warnings.h
@@ -1,5 +1,7 @@
// RUN: llvm-cov show %S/Inputs/prevent_false_instantiations.covmapping -instr-profile %S/Inputs/elf_binary_comdat.profdata -filename-equivalence /dev/null | FileCheck %s -allow-empty -check-prefix=FAKE-FILE-STDOUT
// RUN: llvm-cov show %S/Inputs/prevent_false_instantiations.covmapping -instr-profile %S/Inputs/elf_binary_comdat.profdata -filename-equivalence /dev/null 2>&1 | FileCheck %s -check-prefix=FAKE-FILE-STDERR
+// RUN: not llvm-cov report %S/Inputs/prevent_false_instantiations.covmapping -instr-profile %S/Inputs/elf_binary_comdat.profdata -format=html
+// RUN: not llvm-cov export %S/Inputs/prevent_false_instantiations.covmapping -instr-profile %S/Inputs/elf_binary_comdat.profdata -format=html
// FAKE-FILE-STDOUT-NOT: warning: The file '{{.*}}' isn't covered.
// FAKE-FILE-STDERR: warning: The file '{{.*}}' isn't covered.
diff --git a/test/tools/llvm-cxxfilt/coff-import.test b/test/tools/llvm-cxxfilt/coff-import.test
new file mode 100644
index 000000000000..35494d7a8326
--- /dev/null
+++ b/test/tools/llvm-cxxfilt/coff-import.test
@@ -0,0 +1,5 @@
+RUN: llvm-cxxfilt -_ ___imp__ZSt6futureIvE | FileCheck %s
+RUN: llvm-cxxfilt __imp__ZSt6futureIvE | FileCheck %s
+
+CHECK: import thunk for std::future<void>
+
diff --git a/test/tools/llvm-cxxfilt/types.test b/test/tools/llvm-cxxfilt/types.test
new file mode 100644
index 000000000000..4f0b2ecaab16
--- /dev/null
+++ b/test/tools/llvm-cxxfilt/types.test
@@ -0,0 +1,5 @@
+RUN: llvm-cxxfilt -t f i | FileCheck %s
+
+CHECK: float
+CHECK-NEXT: int
+
diff --git a/test/tools/llvm-cxxfilt/underscore.test b/test/tools/llvm-cxxfilt/underscore.test
new file mode 100644
index 000000000000..4a4ce898a9c3
--- /dev/null
+++ b/test/tools/llvm-cxxfilt/underscore.test
@@ -0,0 +1,11 @@
+RUN: llvm-cxxfilt -_ __ZN2ns1fE _ZSt1f _f | FileCheck %s -check-prefix CHECK-STRIPPED
+RUN: llvm-cxxfilt __ZN2ns1fE _ZSt1f _f | FileCheck %s -check-prefix CHECK-UNSTRIPPED
+
+CHECK-STRIPPED: ns::f
+CHECK-STRIPPED: _ZSt1f
+CHECK-STRIPPED: _f
+
+CHECK-UNSTRIPPED: __ZN2ns1fE
+CHECK-UNSTRIPPED: std::f
+CHECK-UNSTRIPPED: _f
+
diff --git a/test/tools/llvm-dwp/X86/compressfail.test b/test/tools/llvm-dwp/X86/compressfail.test
index 78b6255724fc..b1961e82a761 100644
--- a/test/tools/llvm-dwp/X86/compressfail.test
+++ b/test/tools/llvm-dwp/X86/compressfail.test
@@ -4,4 +4,4 @@ RUN: not llvm-dwp %p/../Inputs/invalid_compressed.dwo -o %t 2>&1 | FileCheck %s
REQUIRES: zlib
-CHECK: error: failure while decompressing compressed section: 'zdebug_{{.*}}.dwo'
+CHECK: error: failure while decompressing compressed section: '.zdebug_{{.*}}.dwo'
diff --git a/test/tools/llvm-dwp/X86/nocompress.test b/test/tools/llvm-dwp/X86/nocompress.test
index 1de9444dd3eb..c3df6b3fccaf 100644
--- a/test/tools/llvm-dwp/X86/nocompress.test
+++ b/test/tools/llvm-dwp/X86/nocompress.test
@@ -2,4 +2,4 @@ RUN: not llvm-dwp %p/../Inputs/compress/a.dwo -o %t 2>&1 | FileCheck %s
REQUIRES: nozlib
-CHECK: error: zlib not available
+CHECK: error: failure while decompressing compressed section: '.zdebug_{{.*}}.dwo', zlib is not available
diff --git a/test/tools/llvm-extract/recursive.ll b/test/tools/llvm-extract/recursive.ll
new file mode 100644
index 000000000000..54813dba7968
--- /dev/null
+++ b/test/tools/llvm-extract/recursive.ll
@@ -0,0 +1,32 @@
+; RUN: llvm-extract -func=a --recursive %s -S | FileCheck --check-prefix=CHECK-AB %s
+; RUN: llvm-extract -func=a --recursive --delete %s -S | FileCheck --check-prefix=CHECK-CD %s
+; RUN: llvm-extract -func=d --recursive %s -S | FileCheck --check-prefix=CHECK-CD %s
+
+; CHECK-AB: define void @a
+; CHECK-AB: define void @b
+; CHECK-AB-NOT: define void @c
+; CHECK-AB-NOT: define void @d
+
+; CHECK-CD-NOT: define void @a
+; CHECK-CD-NOT: define void @b
+; CHECK-CD: define void @c
+; CHECK-CD: define void @d
+
+define void @a() {
+ call void @b()
+ ret void
+}
+
+define void @b() {
+ ret void
+}
+
+define void @c() {
+ call void @d()
+ ret void
+}
+
+define void @d() {
+ call void @c()
+ ret void
+}
diff --git a/test/tools/llvm-lto2/X86/nodatalayout.ll b/test/tools/llvm-lto2/X86/nodatalayout.ll
index ee5cfb0e4705..f5f44e3e4a9f 100644
--- a/test/tools/llvm-lto2/X86/nodatalayout.ll
+++ b/test/tools/llvm-lto2/X86/nodatalayout.ll
@@ -1,7 +1,7 @@
; RUN: llvm-as < %s > %t1.bc
; Reject input modules without a datalayout.
-; RUN: not llvm-lto2 %t1.bc -o %t.o \
+; RUN: not llvm-lto2 run %t1.bc -o %t.o \
; RUN: -r %t1.bc,patatino,px 2>&1 | FileCheck %s
; CHECK: input module has no datalayout
diff --git a/test/tools/llvm-lto2/X86/pipeline.ll b/test/tools/llvm-lto2/X86/pipeline.ll
index d08659db3af3..dbec9ab22527 100644
--- a/test/tools/llvm-lto2/X86/pipeline.ll
+++ b/test/tools/llvm-lto2/X86/pipeline.ll
@@ -1,11 +1,15 @@
; RUN: llvm-as < %s > %t1.bc
; Try a custom pipeline
-; RUN: llvm-lto2 %t1.bc -o %t.o -save-temps \
+; RUN: llvm-lto2 run %t1.bc -o %t.o -save-temps \
; RUN: -r %t1.bc,patatino,px -opt-pipeline loweratomic \
; RUN: -aa-pipeline basic-aa
; RUN: llvm-dis < %t.o.0.4.opt.bc | FileCheck %s --check-prefix=CUSTOM
+; Try the new pass manager LTO default pipeline (make sure the option
+; is accepted).
+; RUN: llvm-lto2 run %t1.bc -o %t.o -lto-use-new-pm -r %t1.bc,patatino,px
+
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -19,13 +23,13 @@ define void @patatino() {
; CUSTOM-NEXT: }
; Check that invalid pipelines are caught as errors.
-; RUN: not llvm-lto2 %t1.bc -o %t.o \
+; RUN: not llvm-lto2 run %t1.bc -o %t.o \
; RUN: -r %t1.bc,patatino,px -opt-pipeline foogoo 2>&1 | \
; RUN: FileCheck %s --check-prefix=ERR
; ERR: LLVM ERROR: unable to parse pass pipeline description: foogoo
-; RUN: not llvm-lto2 %t1.bc -o %t.o \
+; RUN: not llvm-lto2 run %t1.bc -o %t.o \
; RUN: -r %t1.bc,patatino,px -aa-pipeline patatino \
; RUN: -opt-pipeline loweratomic 2>&1 | \
; RUN: FileCheck %s --check-prefix=AAERR
diff --git a/test/tools/llvm-lto2/errors.ll b/test/tools/llvm-lto2/errors.ll
index 25c05430c935..aa12a6717599 100644
--- a/test/tools/llvm-lto2/errors.ll
+++ b/test/tools/llvm-lto2/errors.ll
@@ -1,8 +1,8 @@
; RUN: llvm-as %s -o %t.bc
-; RUN: not llvm-lto2 -o %t2.o %t.bc 2>&1 | FileCheck --check-prefix=ERR1 %s
-; RUN: not llvm-lto2 -o %t2.o -r %t.bc,foo,p -r %t.bc,bar,p %t.bc 2>&1 | FileCheck --check-prefix=ERR2 %s
-; RUN: not llvm-lto2 -o %t2.o -r %t.bc,foo,q %t.bc 2>&1 | FileCheck --check-prefix=ERR3 %s
-; RUN: not llvm-lto2 -o %t2.o -r foo %t.bc 2>&1 | FileCheck --check-prefix=ERR4 %s
+; RUN: not llvm-lto2 run -o %t2.o %t.bc 2>&1 | FileCheck --check-prefix=ERR1 %s
+; RUN: not llvm-lto2 run -o %t2.o -r %t.bc,foo,p -r %t.bc,bar,p %t.bc 2>&1 | FileCheck --check-prefix=ERR2 %s
+; RUN: not llvm-lto2 run -o %t2.o -r %t.bc,foo,q %t.bc 2>&1 | FileCheck --check-prefix=ERR3 %s
+; RUN: not llvm-lto2 run -o %t2.o -r foo %t.bc 2>&1 | FileCheck --check-prefix=ERR4 %s
; ERR1: missing symbol resolution for {{.*}}.bc,foo
; ERR2: unused symbol resolution for {{.*}}.bc,bar
@@ -10,5 +10,6 @@
; ERR4: invalid resolution: foo
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
@foo = global i32 0
diff --git a/test/tools/llvm-nm/ARM/Inputs/print-size.macho-armv7m b/test/tools/llvm-nm/ARM/Inputs/print-size.macho-armv7m
new file mode 100644
index 000000000000..0014b68add7d
--- /dev/null
+++ b/test/tools/llvm-nm/ARM/Inputs/print-size.macho-armv7m
Binary files differ
diff --git a/test/tools/llvm-nm/ARM/lit.local.cfg b/test/tools/llvm-nm/ARM/lit.local.cfg
new file mode 100644
index 000000000000..236e1d344166
--- /dev/null
+++ b/test/tools/llvm-nm/ARM/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'ARM' in config.root.targets:
+ config.unsupported = True
diff --git a/test/tools/llvm-nm/ARM/macho-print-size.test b/test/tools/llvm-nm/ARM/macho-print-size.test
new file mode 100644
index 000000000000..98784587ee76
--- /dev/null
+++ b/test/tools/llvm-nm/ARM/macho-print-size.test
@@ -0,0 +1,3 @@
+@ RUN: llvm-nm -print-size -arch armv7m %p/Inputs/print-size.macho-armv7m 2>&1 | FileCheck %s
+
+@ CHECK: warning sizes with -print-size for Mach-O files are always zero.
diff --git a/test/tools/llvm-nm/lit.local.cfg b/test/tools/llvm-nm/lit.local.cfg
index c8625f4d9d24..447a7375519f 100644
--- a/test/tools/llvm-nm/lit.local.cfg
+++ b/test/tools/llvm-nm/lit.local.cfg
@@ -1,2 +1,4 @@
if not 'X86' in config.root.targets:
config.unsupported = True
+
+config.suffixes = ['.s', '.test', '.yaml']
diff --git a/test/tools/llvm-nm/wasm/exports.yaml b/test/tools/llvm-nm/wasm/exports.yaml
new file mode 100644
index 000000000000..f219c5b3ce1d
--- /dev/null
+++ b/test/tools/llvm-nm/wasm/exports.yaml
@@ -0,0 +1,22 @@
+# RUN: yaml2obj < %s | llvm-nm - | FileCheck %s
+
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - I32
+ - Type: EXPORT
+ Exports:
+ - Name: foo
+ Kind: FUNCTION
+ Index: 0x00000000
+ - Name: bar
+ Kind: GLOBAL
+ Index: 0x00000000
+
+# CHECK: 00000001 D bar
+# CHECK: 00000000 T foo
diff --git a/test/tools/llvm-nm/wasm/imports.yaml b/test/tools/llvm-nm/wasm/imports.yaml
new file mode 100644
index 000000000000..3842d678ca7f
--- /dev/null
+++ b/test/tools/llvm-nm/wasm/imports.yaml
@@ -0,0 +1,25 @@
+# RUN: yaml2obj < %s | llvm-nm - | FileCheck %s
+
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - I32
+ - Type: IMPORT
+ Imports:
+ - Module: env
+ Field: foo
+ Kind: FUNCTION
+ SigIndex: 0
+ - Module: env
+ Field: bar
+ Kind: GLOBAL
+ GlobalType: I32
+ GlobalMutable: false
+
+# CHECK: U bar
+# CHECK: U foo
diff --git a/test/tools/llvm-objdump/AArch64/Inputs/print-armv8crypto.obj.macho-aarch64 b/test/tools/llvm-objdump/AArch64/Inputs/print-armv8crypto.obj.macho-aarch64
new file mode 100644
index 000000000000..b6ea824431c1
--- /dev/null
+++ b/test/tools/llvm-objdump/AArch64/Inputs/print-armv8crypto.obj.macho-aarch64
Binary files differ
diff --git a/test/tools/llvm-objdump/AArch64/mach-print-armv8crypto.test b/test/tools/llvm-objdump/AArch64/mach-print-armv8crypto.test
new file mode 100644
index 000000000000..e5ac0cb453e4
--- /dev/null
+++ b/test/tools/llvm-objdump/AArch64/mach-print-armv8crypto.test
@@ -0,0 +1,3 @@
+RUN: llvm-objdump -d -m -no-show-raw-insn %p/Inputs/print-armv8crypto.obj.macho-aarch64 | FileCheck %s
+
+CHECK: 0: sha1su0.4s v0, v1, v2
diff --git a/test/tools/llvm-objdump/AArch64/macho-print-mrs.test b/test/tools/llvm-objdump/AArch64/macho-print-mrs.test
index cc1d14faf8d3..c629a4de3876 100644
--- a/test/tools/llvm-objdump/AArch64/macho-print-mrs.test
+++ b/test/tools/llvm-objdump/AArch64/macho-print-mrs.test
@@ -1,3 +1,3 @@
RUN: llvm-objdump -d -m -no-show-raw-insn %p/Inputs/print-mrs.obj.macho-aarch64 | FileCheck %s
-CHECK: 0: mrs x0, S3_7_C15_C2_0
+CHECK: 0: mrs x0, CPM_IOACC_CTL_EL3
diff --git a/test/tools/llvm-objdump/AMDGPU/Inputs/source-lines.cl b/test/tools/llvm-objdump/AMDGPU/Inputs/source-lines.cl
new file mode 100644
index 000000000000..9179056318bd
--- /dev/null
+++ b/test/tools/llvm-objdump/AMDGPU/Inputs/source-lines.cl
@@ -0,0 +1,6 @@
+kernel void source_lines_test(global int *Out) {
+ int var0 = 0x777;
+ int var1 = 0x888;
+ int var2 = var0 + var1;
+ *Out = var2;
+}
diff --git a/test/tools/llvm-objdump/AMDGPU/lit.local.cfg b/test/tools/llvm-objdump/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..2a665f06be72
--- /dev/null
+++ b/test/tools/llvm-objdump/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AMDGPU' in config.root.targets:
+ config.unsupported = True
diff --git a/test/tools/llvm-objdump/AMDGPU/source-lines.ll b/test/tools/llvm-objdump/AMDGPU/source-lines.ll
new file mode 100644
index 000000000000..94c4952e3386
--- /dev/null
+++ b/test/tools/llvm-objdump/AMDGPU/source-lines.ll
@@ -0,0 +1,109 @@
+; RUN: sed -e "s,SRC_COMPDIR,%/p/Inputs,g" %s > %t.ll
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -filetype=obj -O0 -o %t.o %t.ll
+; RUN: llvm-objdump -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -disassemble -line-numbers %t.o | FileCheck --check-prefix=LINE %t.ll
+; RUN: llvm-objdump -triple=amdgcn-amd-amdhsa -mcpu=gfx800 -disassemble -source %t.o | FileCheck --check-prefix=SOURCE %t.ll
+
+; Prologue.
+; LINE: source_lines_test:
+; LINE-NEXT: ; {{.*}}source-lines.cl:1
+; Kernel.
+; LINE: ; {{.*}}source-lines.cl:2
+; LINE: v_mov_b32_e32 v{{[0-9]+}}, 0x777
+; LINE: ; {{.*}}source-lines.cl:3
+; LINE: v_mov_b32_e32 v{{[0-9]+}}, 0x888
+; LINE: ; {{.*}}source-lines.cl:4
+; LINE: v_add_i32_e32
+; LINE: ; {{.*}}source-lines.cl:5
+; LINE: flat_store_dword
+; Epilogue.
+; LINE: ; {{.*}}source-lines.cl:6
+; LINE-NEXT: s_endpgm
+
+; Prologue.
+; SOURCE: source_lines_test:
+; SOURCE-NEXT: ; kernel void source_lines_test(global int *Out) {
+; Kernel.
+; SOURCE: ; int var0 = 0x777;
+; SOURCE: v_mov_b32_e32 v{{[0-9]+}}, 0x777
+; SOURCE: ; int var1 = 0x888;
+; SOURCE: v_mov_b32_e32 v{{[0-9]+}}, 0x888
+; SOURCE: ; int var2 = var0 + var1;
+; SOURCE: v_add_i32_e32
+; SOURCE: ; *Out = var2;
+; SOURCE: flat_store_dword
+; Epilogue.
+; SOURCE: ; }
+; SOURCE-NEXT: s_endpgm
+
+; ModuleID = 'source-lines.cl'
+source_filename = "source-lines.cl"
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "amdgcn-amd-amdhsa"
+
+; Function Attrs: noinline nounwind
+define amdgpu_kernel void @source_lines_test(i32 addrspace(1)* %Out) #0 !dbg !7 !kernel_arg_addr_space !12 !kernel_arg_access_qual !13 !kernel_arg_type !14 !kernel_arg_base_type !14 !kernel_arg_type_qual !15 {
+entry:
+ %Out.addr = alloca i32 addrspace(1)*, align 4
+ %var0 = alloca i32, align 4
+ %var1 = alloca i32, align 4
+ %var2 = alloca i32, align 4
+ store i32 addrspace(1)* %Out, i32 addrspace(1)** %Out.addr, align 4
+ call void @llvm.dbg.declare(metadata i32 addrspace(1)** %Out.addr, metadata !16, metadata !17), !dbg !18
+ call void @llvm.dbg.declare(metadata i32* %var0, metadata !19, metadata !17), !dbg !20
+ store i32 1911, i32* %var0, align 4, !dbg !20
+ call void @llvm.dbg.declare(metadata i32* %var1, metadata !21, metadata !17), !dbg !22
+ store i32 2184, i32* %var1, align 4, !dbg !22
+ call void @llvm.dbg.declare(metadata i32* %var2, metadata !23, metadata !17), !dbg !24
+ %0 = load i32, i32* %var0, align 4, !dbg !25
+ %1 = load i32, i32* %var1, align 4, !dbg !26
+ %add = add nsw i32 %0, %1, !dbg !27
+ store i32 %add, i32* %var2, align 4, !dbg !24
+ %2 = load i32, i32* %var2, align 4, !dbg !28
+ %3 = load i32 addrspace(1)*, i32 addrspace(1)** %Out.addr, align 4, !dbg !29
+ store i32 %2, i32 addrspace(1)* %3, align 4, !dbg !30
+ ret void, !dbg !31
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="gfx700" "target-features"="+fp64-fp16-denormals,-fp32-denormals" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!opencl.ocl.version = !{!3}
+!llvm.module.flags = !{!4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "source-lines.cl", directory: "SRC_COMPDIR")
+!2 = !{}
+!3 = !{i32 1, i32 0}
+!4 = !{i32 2, !"Dwarf Version", i32 2}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{!"clang version 5.0.0"}
+!7 = distinct !DISubprogram(name: "source_lines_test", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!8 = !DISubroutineType(types: !9)
+!9 = !{null, !10}
+!10 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !11, size: 64)
+!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!12 = !{i32 1}
+!13 = !{!"none"}
+!14 = !{!"int*"}
+!15 = !{!""}
+!16 = !DILocalVariable(name: "Out", arg: 1, scope: !7, file: !1, line: 1, type: !10)
+!17 = !DIExpression()
+!18 = !DILocation(line: 1, column: 43, scope: !7)
+!19 = !DILocalVariable(name: "var0", scope: !7, file: !1, line: 2, type: !11)
+!20 = !DILocation(line: 2, column: 7, scope: !7)
+!21 = !DILocalVariable(name: "var1", scope: !7, file: !1, line: 3, type: !11)
+!22 = !DILocation(line: 3, column: 7, scope: !7)
+!23 = !DILocalVariable(name: "var2", scope: !7, file: !1, line: 4, type: !11)
+!24 = !DILocation(line: 4, column: 7, scope: !7)
+!25 = !DILocation(line: 4, column: 14, scope: !7)
+!26 = !DILocation(line: 4, column: 21, scope: !7)
+!27 = !DILocation(line: 4, column: 19, scope: !7)
+!28 = !DILocation(line: 5, column: 10, scope: !7)
+!29 = !DILocation(line: 5, column: 4, scope: !7)
+!30 = !DILocation(line: 5, column: 8, scope: !7)
+!31 = !DILocation(line: 6, column: 1, scope: !7)
diff --git a/test/tools/llvm-objdump/ARM/Inputs/divs.macho-armv7s b/test/tools/llvm-objdump/ARM/Inputs/divs.macho-armv7s
new file mode 100644
index 000000000000..b877d2d5180d
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/Inputs/divs.macho-armv7s
Binary files differ
diff --git a/test/tools/llvm-objdump/ARM/macho-nomcpu-armv7s.test b/test/tools/llvm-objdump/ARM/macho-nomcpu-armv7s.test
new file mode 100644
index 000000000000..ff7daa8c2142
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/macho-nomcpu-armv7s.test
@@ -0,0 +1,3 @@
+@ RUN: llvm-objdump -m -d %p/Inputs/divs.macho-armv7s | FileCheck %s
+
+@ CHECK: 10 f0 10 e7 sdiv r0, r0, r0
diff --git a/test/tools/llvm-objdump/ARM/v5t-subarch.s b/test/tools/llvm-objdump/ARM/v5t-subarch.s
new file mode 100644
index 000000000000..c9ce2cf6cebd
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v5t-subarch.s
@@ -0,0 +1,10 @@
+@ RUN: llvm-mc < %s -triple armv5t-elf -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.arch armv5t
+
+clz:
+clz r0, r1
+
+@ CHECK-LABEL: clz
+@ CHECK: 11 0f 6f e1
+
diff --git a/test/tools/llvm-objdump/ARM/v5te-subarch.s b/test/tools/llvm-objdump/ARM/v5te-subarch.s
new file mode 100644
index 000000000000..87e8548fb014
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v5te-subarch.s
@@ -0,0 +1,10 @@
+@ RUN: llvm-mc < %s -triple armv5te-elf -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.arch armv5te
+
+strd:
+strd r0, r1, [r2, +r3]
+
+@ CHECK-LABEL strd
+@ CHECK: f3 00 82 e1 strd r0, r1, [r2, r3]
+
diff --git a/test/tools/llvm-objdump/ARM/v5tej-subarch.s b/test/tools/llvm-objdump/ARM/v5tej-subarch.s
new file mode 100644
index 000000000000..42dc87389a34
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v5tej-subarch.s
@@ -0,0 +1,7 @@
+@ RUN: llvm-mc < %s -triple armv5tej-elf -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+bxj:
+bxj r0
+
+@ CHECK-LABEL: bxj
+@ CHECK: 20 ff 2f e1 bxj r0
diff --git a/test/tools/llvm-objdump/ARM/v6-neg-subfeatures.s b/test/tools/llvm-objdump/ARM/v6-neg-subfeatures.s
new file mode 100644
index 000000000000..1a8d8f283336
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v6-neg-subfeatures.s
@@ -0,0 +1,10 @@
+@ RUN: llvm-mc < %s -triple armv6 -mattr=+vfp2 -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.eabi_attribute Tag_FP_arch, 1 // VFP2
+
+vfp2:
+ vadd.f32 s0, s1, s2
+
+@CHECK-LABEL: vfp2
+@CHECK-NOT: 81 0a 30 ee vadd.f32 s0, s1, s2
+@CHECK: unknown
diff --git a/test/tools/llvm-objdump/ARM/v6-subarch.s b/test/tools/llvm-objdump/ARM/v6-subarch.s
new file mode 100644
index 000000000000..c70761e7c8c5
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v6-subarch.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc < %s -triple armv6-elf -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.arch armv6
+
+umaal:
+umaal r0, r1, r2, r3
+
+@ CHECK-LABEL:umaal
+@ CHECK: 92 03 41 e0 umaal r0, r1, r2, r3
diff --git a/test/tools/llvm-objdump/ARM/v6-subfeatures.s b/test/tools/llvm-objdump/ARM/v6-subfeatures.s
new file mode 100644
index 000000000000..e8e806740d19
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v6-subfeatures.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc < %s -triple armv6 -mattr=+vfp2 -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.eabi_attribute Tag_FP_arch, 2 // VFP2
+
+vfp2:
+ vadd.f32 s0, s1, s2
+
+@CHECK-LABEL: vfp2
+@CHECK: 81 0a 30 ee vadd.f32 s0, s1, s2
diff --git a/test/tools/llvm-objdump/ARM/v6k-subarch.s b/test/tools/llvm-objdump/ARM/v6k-subarch.s
new file mode 100644
index 000000000000..8df4ce5e2576
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v6k-subarch.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc < %s -triple armv6k-elf -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.arch armv6k
+
+clrex:
+clrex
+
+@ CHECK-LABEL: clrex
+@ CHECK: 1f f0 7f f5 clrex
diff --git a/test/tools/llvm-objdump/ARM/v6m-subarch.s b/test/tools/llvm-objdump/ARM/v6m-subarch.s
new file mode 100644
index 000000000000..f4c56989c17e
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v6m-subarch.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc < %s -triple armv6m-elf -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s
+
+.arch armv6m
+
+dmb:
+dmb
+
+@ CHECK-LABEL: dmb
+@ CHECK: bf f3 5f 8f dmb sy
diff --git a/test/tools/llvm-objdump/ARM/v6t2-subarch.s b/test/tools/llvm-objdump/ARM/v6t2-subarch.s
new file mode 100644
index 000000000000..36e134342972
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v6t2-subarch.s
@@ -0,0 +1,10 @@
+@ RUN: llvm-mc < %s -triple armv6t2-elf -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s
+
+.arch armv6t2
+
+.thumb
+umaalt2:
+umaal r0, r1, r2, r3
+
+@ CHECK-LABEL: umaalt2
+@ CHECK: e2 fb 63 01 umaal r0, r1, r2, r3
diff --git a/test/tools/llvm-objdump/ARM/v7a-neg-subfeature.s b/test/tools/llvm-objdump/ARM/v7a-neg-subfeature.s
new file mode 100644
index 000000000000..8083e5b0eee0
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v7a-neg-subfeature.s
@@ -0,0 +1,44 @@
+@ RUN: llvm-mc < %s -triple armv7a -mattr=+vfp3,+neon,+fp16,+hwdiv-arm,+hwdiv -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+@ RUN: llvm-mc < %s -triple armv7a -mattr=+vfp3,+neon,+fp16,+hwdiv-arm,+hwdiv -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s --check-prefix=CHECK-THUMB
+
+.eabi_attribute Tag_FP_arch, 0 // disallow vfp
+
+vfp2:
+ vmla.f32 s0, s1, s2
+
+@CHECK-LABEL: vfp2
+@CHECK-NOT: 81 0a 00 ee vmla.f32 s0, s1, s2
+@CHECK: unknown
+
+vfp3:
+ vmov.f32 s0, #0.5
+
+@CHECK-LABEL: vfp3
+@CHECK-NOT: 00 0a b6 ee vmov.f32 s0, #5.000000e-01
+
+neon:
+ vmla.f32 d0, d1, d2
+
+@CHECK-LABEL: neon
+@CHECK-NOT: 12 0d 01 f2 vmla.f32 d0, d1, d2
+@CHECK: unknown
+
+fp16:
+ vcvt.f32.f16 q0, d2
+
+@CHECK-LABEL: fp16
+@CHECK-NOT: 02 07 b6 f3 vcvt.f32.f16 q0, d2
+
+div_arm:
+ udiv r0, r1, r2
+
+@CHECK-LABEL: div_arm
+@CHECK-NOT: 11 f2 30 e7 udiv r0, r1, r2
+@CHECK: unknown
+
+.thumb
+div_thumb:
+ udiv r0, r1, r2
+
+@CHECK-LABEL: div_thumb
+@CHECK-THUMB-NOT: b1 fb f2 f0 udiv r0, r1, r2
diff --git a/test/tools/llvm-objdump/ARM/v7a-subfeature.s b/test/tools/llvm-objdump/ARM/v7a-subfeature.s
new file mode 100644
index 000000000000..f43554579a36
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v7a-subfeature.s
@@ -0,0 +1,36 @@
+@ RUN: llvm-mc < %s -triple armv7a -mattr=+vfp3,+neon,+fp16,+hwdiv-arm -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s
+
+.eabi_attribute Tag_FP_arch, 3 // VFP3
+.eabi_attribute Tag_Advanced_SIMD_arch, 2 // SIMDv1 with fp16
+.eabi_attribute Tag_DIV_use, 2 // permitted
+
+vfp2:
+ vmla.f32 s0, s1, s2
+
+@CHECK-LABEL: vfp2
+@CHECK: 81 0a 00 ee vmla.f32 s0, s1, s2
+
+vfp3:
+ vmov.f32 s0, #0.5
+
+@CHECK-LABEL: vfp3
+@CHECK: 00 0a b6 ee vmov.f32 s0, #5.000000e-01
+
+neon:
+ vmla.f32 d0, d1, d2
+
+@CHECK-LABEL: neon
+@CHECK: 12 0d 01 f2 vmla.f32 d0, d1, d2
+
+fp16:
+ vcvt.f32.f16 q0, d2
+
+@CHECK-LABEL: fp16
+@CHECK: 02 07 b6 f3 vcvt.f32.f16 q0, d2
+
+div:
+ udiv r0, r1, r2
+
+@CHECK-LABEL: div
+@CHECK: 11 f2 30 e7 udiv r0, r1, r2
+
diff --git a/test/tools/llvm-objdump/ARM/v7m-neg-subfeatures.s b/test/tools/llvm-objdump/ARM/v7m-neg-subfeatures.s
new file mode 100644
index 000000000000..b3a79c7d5c2b
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v7m-neg-subfeatures.s
@@ -0,0 +1,18 @@
+@ RUN: llvm-mc < %s -triple armv7m -mattr=+vfp4 -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s
+
+.eabi_attribute Tag_CPU_arch, 10 // v7
+.eabi_attribute Tag_FP_arch, 0 // VFP4
+
+.thumb
+vfp2:
+ vmla.f32 s0, s1, s2
+
+@CHECK-LABEL: vfp2
+@CHECK-NOT: 00 ee 81 0a vmla.f32 s0, s1, s2
+
+.thumb
+vfp4:
+ vmov.f32 s0, #0.5
+
+@CHECK-LABEL:vfp4
+@CHECK-NOT: b6 ee 00 0a vmov.f32 s0, #5.000000e-01
diff --git a/test/tools/llvm-objdump/ARM/v7m-subarch.s b/test/tools/llvm-objdump/ARM/v7m-subarch.s
new file mode 100644
index 000000000000..8f6ff57e0ff3
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v7m-subarch.s
@@ -0,0 +1,10 @@
+@ RUN: llvm-mc < %s -triple armv7m-elf -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s
+
+.arch armv7m
+
+umlal:
+umlal r0, r1, r2, r3
+
+@ CHECK-LABEL: umlal
+@ CHECK: e2 fb 03 01 umlal r0, r1, r2, r3
+
diff --git a/test/tools/llvm-objdump/ARM/v7m-subfeatures.s b/test/tools/llvm-objdump/ARM/v7m-subfeatures.s
new file mode 100644
index 000000000000..c7a40af8ae90
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v7m-subfeatures.s
@@ -0,0 +1,26 @@
+@ RUN: llvm-mc < %s -triple armv7m -mattr=+vfp4 -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s
+
+.eabi_attribute Tag_CPU_arch, 10 // v7
+.eabi_attribute Tag_CPU_arch_profile, 0x4D // 'M' profile
+.eabi_attribute Tag_FP_arch, 5 // VFP4
+
+.thumb
+vfp2:
+ vmla.f32 s0, s1, s2
+
+@CHECK-LABEL: vfp2
+@CHECK: 00 ee 81 0a vmla.f32 s0, s1, s2
+
+.thumb
+vfp4:
+ vmov.f32 s0, #0.5
+
+@CHECK-LABEL: vfp4
+@CHECK: b6 ee 00 0a vmov.f32 s0, #5.000000e-01
+
+.thumb
+div:
+ udiv r0, r1, r2
+
+@CHECK-LABEL: div
+@CHECK: b1 fb f2 f0 udiv r0, r1, r2
diff --git a/test/tools/llvm-objdump/ARM/v7r-subfeatures.s b/test/tools/llvm-objdump/ARM/v7r-subfeatures.s
new file mode 100644
index 000000000000..bbe40a193879
--- /dev/null
+++ b/test/tools/llvm-objdump/ARM/v7r-subfeatures.s
@@ -0,0 +1,20 @@
+@ RUN: llvm-mc < %s -triple armv7r -mattr=+hwdiv-arm -filetype=obj | llvm-objdump -triple=thumb -d - | FileCheck %s
+@ RUN: llvm-mc < %s -triple armv7r -mattr=+hwdiv-arm -filetype=obj | llvm-objdump -triple=arm -d - | FileCheck %s --check-prefix=CHECK-ARM
+
+.eabi_attribute Tag_CPU_arch, 10 // v7
+.eabi_attribute Tag_CPU_arch_profile, 0x52 // 'R' profile
+
+.arm
+div_arm:
+ udiv r0, r1, r2
+
+@CHECK-LABEL: div_arm
+@CHECK-NOT: udiv r0, r1, r2
+@CHECK-ARM-NOT: udiv r0, r1, r2
+
+.thumb
+div_thumb:
+ udiv r0, r1, r2
+
+@CHECK-LABEL: div_thumb
+@CHECK: b1 fb f2 f0 udiv r0, r1, r2
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-add-addr-imm-scaled b/test/tools/llvm-objdump/Inputs/macho-bind-add-addr-imm-scaled
new file mode 100755
index 000000000000..2180437408c9
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-add-addr-imm-scaled
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-add_addr_uleb b/test/tools/llvm-objdump/Inputs/macho-bind-add_addr_uleb
new file mode 100755
index 000000000000..fc950db155a6
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-add_addr_uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-bad-opcode-value b/test/tools/llvm-objdump/Inputs/macho-bind-bad-opcode-value
new file mode 100755
index 000000000000..c9195314c8e1
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-bad-opcode-value
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-bind-add-addr-uleb b/test/tools/llvm-objdump/Inputs/macho-bind-bind-add-addr-uleb
new file mode 100755
index 000000000000..11abd6246b8d
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-bind-add-addr-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-do-bind-no-segIndex b/test/tools/llvm-objdump/Inputs/macho-bind-do-bind-no-segIndex
new file mode 100755
index 000000000000..cc4f09708c42
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-do-bind-no-segIndex
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb
new file mode 100755
index 000000000000..7769195d44c9
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-malformed-uleb128 b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-malformed-uleb128
new file mode 100755
index 000000000000..0d5410e976e6
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-malformed-uleb128
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-too-big b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-too-big
new file mode 100755
index 000000000000..40564b5a262f
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-ordinal-uleb-too-big
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-dylib-special-imm b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-special-imm
new file mode 100755
index 000000000000..09bf10ded896
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-dylib-special-imm
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-seg-too-big b/test/tools/llvm-objdump/Inputs/macho-bind-seg-too-big
new file mode 100755
index 000000000000..20be9957919e
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-seg-too-big
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-segoff-too-big b/test/tools/llvm-objdump/Inputs/macho-bind-segoff-too-big
new file mode 100755
index 000000000000..3f8e5ee8384c
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-segoff-too-big
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-set-addend-sleb b/test/tools/llvm-objdump/Inputs/macho-bind-set-addend-sleb
new file mode 100755
index 000000000000..726b96d3de30
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-set-addend-sleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-set-symbol b/test/tools/llvm-objdump/Inputs/macho-bind-set-symbol
new file mode 100755
index 000000000000..b8201c3ad198
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-set-symbol
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-set-type-imm b/test/tools/llvm-objdump/Inputs/macho-bind-set-type-imm
new file mode 100755
index 000000000000..002057e6b86f
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-set-type-imm
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-bind-uleb-times-skipping-uleb b/test/tools/llvm-objdump/Inputs/macho-bind-uleb-times-skipping-uleb
new file mode 100755
index 000000000000..81ab8130f66a
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-bind-uleb-times-skipping-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-do-bind-no-dylib-ordinal b/test/tools/llvm-objdump/Inputs/macho-do-bind-no-dylib-ordinal
new file mode 100755
index 000000000000..77daede78684
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-do-bind-no-dylib-ordinal
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-do-bind-no-symbol b/test/tools/llvm-objdump/Inputs/macho-do-bind-no-symbol
new file mode 100755
index 000000000000..0592b9bfe407
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-do-bind-no-symbol
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-lazy-do-bind-add-addr-imm-scaled b/test/tools/llvm-objdump/Inputs/macho-lazy-do-bind-add-addr-imm-scaled
new file mode 100755
index 000000000000..a7d5abeef743
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-lazy-do-bind-add-addr-imm-scaled
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-lazy-do-bind-uleb-times-skipping-uleb b/test/tools/llvm-objdump/Inputs/macho-lazy-do-bind-uleb-times-skipping-uleb
new file mode 100755
index 000000000000..1f0288342c48
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-lazy-do-bind-uleb-times-skipping-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-lazy-do_bind_add_addr_uleb b/test/tools/llvm-objdump/Inputs/macho-lazy-do_bind_add_addr_uleb
new file mode 100755
index 000000000000..63f034688ff2
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-lazy-do_bind_add_addr_uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-imm-scaled b/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-imm-scaled
new file mode 100755
index 000000000000..6b0c1bd4566d
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-imm-scaled
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb b/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb
new file mode 100755
index 000000000000..e409590dc2dc
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb-too-big b/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb-too-big
new file mode 100755
index 000000000000..68b72ec6a3b5
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-add-addr-uleb-too-big
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-bad-opcode-value b/test/tools/llvm-objdump/Inputs/macho-rebase-bad-opcode-value
new file mode 100755
index 000000000000..59e0d4fe619b
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-bad-opcode-value
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-imm-times b/test/tools/llvm-objdump/Inputs/macho-rebase-imm-times
new file mode 100755
index 000000000000..be2286baf6b3
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-imm-times
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-seg-too-big b/test/tools/llvm-objdump/Inputs/macho-rebase-seg-too-big
new file mode 100755
index 000000000000..12b52328a968
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-seg-too-big
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-segoff-too-big b/test/tools/llvm-objdump/Inputs/macho-rebase-segoff-too-big
new file mode 100755
index 000000000000..4dfb19dea80f
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-segoff-too-big
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-set-type-imm b/test/tools/llvm-objdump/Inputs/macho-rebase-set-type-imm
new file mode 100755
index 000000000000..947db0ee915f
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-set-type-imm
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-malformed-uleb128 b/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-malformed-uleb128
new file mode 100755
index 000000000000..045f425b2233
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-malformed-uleb128
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-times b/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-times
new file mode 100755
index 000000000000..c12f256f6600
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-times
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-times-skipping-uleb b/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-times-skipping-uleb
new file mode 100755
index 000000000000..5bec8ca11578
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-rebase-uleb-times-skipping-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-imm b/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-imm
new file mode 100755
index 000000000000..1d8785c55d03
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-imm
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-uleb b/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-uleb
new file mode 100755
index 000000000000..bf7babc09e6f
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-ordinal-uleb
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-special-imm b/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-special-imm
new file mode 100755
index 000000000000..d13f6ec981eb
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/macho-weak-bind-set-dylib-special-imm
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/test.wasm b/test/tools/llvm-objdump/Inputs/test.wasm
index b24ac79c7163..d3906eeaf6f8 100644
--- a/test/tools/llvm-objdump/Inputs/test.wasm
+++ b/test/tools/llvm-objdump/Inputs/test.wasm
Binary files differ
diff --git a/test/tools/llvm-objdump/Mips/disassemble-all.test b/test/tools/llvm-objdump/Mips/disassemble-all.test
new file mode 100644
index 000000000000..4554a0e030ae
--- /dev/null
+++ b/test/tools/llvm-objdump/Mips/disassemble-all.test
@@ -0,0 +1,16 @@
+# RUN: yaml2obj %s | llvm-objdump -D -
+
+# Test that -D does not crash llvm-objdump encounters a section who size is a
+# not a multiple of the size of an instruction.
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_MIPS
+Sections:
+ - Name: .note.llvm.crash
+ Type: SHT_NOTE
+ Address: 0x0
+ Content: 002E746578
diff --git a/test/tools/llvm-objdump/Mips/lit.local.cfg b/test/tools/llvm-objdump/Mips/lit.local.cfg
new file mode 100644
index 000000000000..a3183a25afaa
--- /dev/null
+++ b/test/tools/llvm-objdump/Mips/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'Mips' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/tools/llvm-objdump/X86/Inputs/Objc2.64bit.obj.dylib-x86_64 b/test/tools/llvm-objdump/X86/Inputs/Objc2.64bit.obj.dylib-x86_64
new file mode 100755
index 000000000000..07d465bfb633
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/Objc2.64bit.obj.dylib-x86_64
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/macho-invalid-bind-entry b/test/tools/llvm-objdump/X86/Inputs/macho-invalid-bind-entry
new file mode 100644
index 000000000000..afdd0838c911
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/macho-invalid-bind-entry
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/nofirst-symbol.macho-x86_64 b/test/tools/llvm-objdump/X86/Inputs/nofirst-symbol.macho-x86_64
new file mode 100644
index 000000000000..4d1ef25e6769
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/nofirst-symbol.macho-x86_64
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/note.macho-x86 b/test/tools/llvm-objdump/X86/Inputs/note.macho-x86
new file mode 100644
index 000000000000..588a09d1e343
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/note.macho-x86
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/openbsd-phdrs.elf-x86-64 b/test/tools/llvm-objdump/X86/Inputs/openbsd-phdrs.elf-x86-64
new file mode 100644
index 000000000000..dab75bf96692
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/openbsd-phdrs.elf-x86-64
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/phdr-note.elf-x86-64 b/test/tools/llvm-objdump/X86/Inputs/phdr-note.elf-x86-64
new file mode 100644
index 000000000000..2825c6840ca8
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/phdr-note.elf-x86-64
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/phdrs.elf-x86-64 b/test/tools/llvm-objdump/X86/Inputs/phdrs.elf-x86-64
new file mode 100644
index 000000000000..078574ab5801
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/phdrs.elf-x86-64
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/stripped-elf.so b/test/tools/llvm-objdump/X86/Inputs/stripped-elf.so
new file mode 100644
index 000000000000..b88b77501d9f
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/stripped-elf.so
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/stub-nosyms.macho-x86_64 b/test/tools/llvm-objdump/X86/Inputs/stub-nosyms.macho-x86_64
new file mode 100644
index 000000000000..a7f122b55084
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/stub-nosyms.macho-x86_64
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/Inputs/thread.macho-i386 b/test/tools/llvm-objdump/X86/Inputs/thread.macho-i386
new file mode 100755
index 000000000000..f4b805d9b032
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/Inputs/thread.macho-i386
Binary files differ
diff --git a/test/tools/llvm-objdump/X86/invalid-macho-build-version.yaml b/test/tools/llvm-objdump/X86/invalid-macho-build-version.yaml
new file mode 100644
index 000000000000..a81bb2dea600
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/invalid-macho-build-version.yaml
@@ -0,0 +1,44 @@
+# RUN: yaml2obj %s | not llvm-objdump -macho -private-headers -
+
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x01000007
+ cpusubtype: 0x00000003
+ filetype: 0x00000004
+ ncmds: 2
+ sizeofcmds: 192
+ flags: 0x00000000
+ reserved: 0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 152
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 8192
+ fileoff: 0
+ filesize: 3099
+ maxprot: 7
+ initprot: 5
+ nsects: 1
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x0000000100001160
+ size: 3099
+ offset: 0x00001160
+ align: 4
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 80
+ platform: 2
+ minos: 0x00080000
+ sdk: 0x00090000
+ ntools: 0
+...
diff --git a/test/tools/llvm-objdump/X86/macho-build-version.yaml b/test/tools/llvm-objdump/X86/macho-build-version.yaml
new file mode 100644
index 000000000000..acefb6956d11
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/macho-build-version.yaml
@@ -0,0 +1,57 @@
+# RUN: yaml2obj %s | llvm-objdump -macho -private-headers - | FileCheck %s
+
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x01000007
+ cpusubtype: 0x00000003
+ filetype: 0x00000004
+ ncmds: 2
+ sizeofcmds: 192
+ flags: 0x00000000
+ reserved: 0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 152
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 8192
+ fileoff: 0
+ filesize: 3099
+ maxprot: 7
+ initprot: 5
+ nsects: 1
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x0000000100001160
+ size: 3099
+ offset: 0x00001160
+ align: 4
+ reloff: 0x00000000
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x00000000
+ reserved2: 0x00000000
+ reserved3: 0x00000000
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 2
+ minos: 0x00080000
+ sdk: 0x00090000
+ ntools: 1
+ Tools:
+ - tool: 1
+ version: 0x00000000
+...
+
+CHECK: Load command 1
+CHECK-NEXT: cmd LC_BUILD_VERSION
+CHECK-NEXT: cmdsize 32
+CHECK-NEXT: platform ios
+CHECK-NEXT: sdk 9.0
+CHECK-NEXT: minos 8.0
+CHECK-NEXT: ntools 1
+CHECK-NEXT: tool clang
+CHECK-NEXT: version n/a
diff --git a/test/tools/llvm-objdump/X86/macho-info-plist-nofollow.test b/test/tools/llvm-objdump/X86/macho-info-plist-nofollow.test
new file mode 100644
index 000000000000..12ad166c5a2f
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/macho-info-plist-nofollow.test
@@ -0,0 +1,10 @@
+# RUN: llvm-mc < %s -triple x86_64-apple-darwin -filetype=obj | llvm-objdump -m -info-plist - | FileCheck %s
+
+.section __TEXT, __info_plist
+.ascii "This is the (__TEXT,__info_plist) section\n"
+.section __TEXT, __follow
+.asciz "This is the (__TEXT,__follow) section\n"
+
+# CHECK: Contents of (__TEXT,__info_plist) section
+# CHECK: This is the (__TEXT,__info_plist) section
+# CHECK-NOT: This is the (__TEXT,__follow) section
diff --git a/test/tools/llvm-objdump/X86/macho-nofirst-symbol-disassembly.test b/test/tools/llvm-objdump/X86/macho-nofirst-symbol-disassembly.test
new file mode 100644
index 000000000000..98964ac8047a
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/macho-nofirst-symbol-disassembly.test
@@ -0,0 +1,8 @@
+// RUN: llvm-objdump -d -m %p/Inputs/nofirst-symbol.macho-x86_64 | FileCheck %s
+
+CHECK: 0: 90 nop
+CHECK: _foo:
+CHECK: 1: c3 retq
+CHECK: _bar:
+CHECK: 2: 90 nop
+CHECK: 3: c3 retq
diff --git a/test/tools/llvm-objdump/X86/macho-objc-meta-data.test b/test/tools/llvm-objdump/X86/macho-objc-meta-data.test
index f4abf6cdb49e..0bdb39cdff84 100644
--- a/test/tools/llvm-objdump/X86/macho-objc-meta-data.test
+++ b/test/tools/llvm-objdump/X86/macho-objc-meta-data.test
@@ -5,6 +5,7 @@
# RUN: llvm-objdump -m -objc-meta-data %p/Inputs/Objc1.32bit.exe.macho-i386 | FileCheck %s -check-prefix=OBJC1_32BIT_EXE
# RUN: llvm-objdump -m -objc-meta-data %p/Inputs/Objc1.32bit.obj.macho-i386 | FileCheck %s -check-prefix=OBJC1_32BIT_OBJ
# RUN: llvm-objdump -m -section __OBJC,__protocol %p/Inputs/Objc1.32bit.exe.macho-i386 | FileCheck %s -check-prefix=PROTOCOL
+# RUN: llvm-objdump -m -objc-meta-data %p/Inputs/Objc2.64bit.obj.dylib-x86_64 | FileCheck %s -check-prefix=OBJC2_64BIT_DYLIB
OBJC2_64BIT_EXE: Contents of (__DATA,__objc_classlist) section
OBJC2_64BIT_EXE: 0000000100002028 0x1000029f0
@@ -1037,3 +1038,64 @@ PROTOCOL: types 0x00002e04 @8@0:4
PROTOCOL: class_methods 0x00000000 (not in an __OBJC section)
PROTOCOL: instance_methods 0x00000000 (not in an __OBJC section)
PROTOCOL: class_methods 0x00000000 (not in an __OBJC section)
+
+OBJC2_64BIT_DYLIB: Contents of (__DATA_CONST,__objc_classlist) section
+OBJC2_64BIT_DYLIB: 000000000000c038 0x8030 _OBJC_CLASS_$_Test
+OBJC2_64BIT_DYLIB: isa 0x8008 _OBJC_METACLASS_$_Test
+OBJC2_64BIT_DYLIB: superclass 0x0
+OBJC2_64BIT_DYLIB: cache 0x0
+OBJC2_64BIT_DYLIB: vtable 0x0
+OBJC2_64BIT_DYLIB: data 0xc120 (struct class_ro_t *)
+OBJC2_64BIT_DYLIB: flags 0x0
+OBJC2_64BIT_DYLIB: instanceStart 8
+OBJC2_64BIT_DYLIB: instanceSize 16
+OBJC2_64BIT_DYLIB: reserved 0x0
+OBJC2_64BIT_DYLIB: ivarLayout 0x0
+OBJC2_64BIT_DYLIB: name 0x4f59 Test
+OBJC2_64BIT_DYLIB: baseMethods 0xc090 (struct method_list_t *)
+OBJC2_64BIT_DYLIB: entsize 24
+OBJC2_64BIT_DYLIB: count 3
+OBJC2_64BIT_DYLIB: name 0x4f5e testMethod
+OBJC2_64BIT_DYLIB: types 0x4f89 v16@0:8
+OBJC2_64BIT_DYLIB: imp -[Test testMethod]
+OBJC2_64BIT_DYLIB: name 0x4f69 testProp
+OBJC2_64BIT_DYLIB: types 0x4f91 Q16@0:8
+OBJC2_64BIT_DYLIB: imp -[Test testProp]
+OBJC2_64BIT_DYLIB: name 0x4f72 setTestProp:
+OBJC2_64BIT_DYLIB: types 0x4f99 v24@0:8Q16
+OBJC2_64BIT_DYLIB: imp -[Test setTestProp:]
+OBJC2_64BIT_DYLIB: baseProtocols 0x0
+OBJC2_64BIT_DYLIB: ivars 0xc0e0
+OBJC2_64BIT_DYLIB: entsize 32
+OBJC2_64BIT_DYLIB: count 1
+OBJC2_64BIT_DYLIB: offset 0x8000 8
+OBJC2_64BIT_DYLIB: name 0x4f7f _testProp
+OBJC2_64BIT_DYLIB: type 0x4fa4 Q
+OBJC2_64BIT_DYLIB: alignment 3
+OBJC2_64BIT_DYLIB: size 8
+OBJC2_64BIT_DYLIB: weakIvarLayout 0x0
+OBJC2_64BIT_DYLIB: baseProperties 0xc108
+OBJC2_64BIT_DYLIB: entsize 16
+OBJC2_64BIT_DYLIB: count 1
+OBJC2_64BIT_DYLIB: name 0x4f42 testProp
+OBJC2_64BIT_DYLIB: attributes 0x4f4b TQ,V_testProp
+OBJC2_64BIT_DYLIB: Meta Class
+OBJC2_64BIT_DYLIB: isa 0x0
+OBJC2_64BIT_DYLIB: superclass 0x0
+OBJC2_64BIT_DYLIB: cache 0x0
+OBJC2_64BIT_DYLIB: vtable 0x0
+OBJC2_64BIT_DYLIB: data 0xc048 (struct class_ro_t *)
+OBJC2_64BIT_DYLIB: flags 0x1 RO_META
+OBJC2_64BIT_DYLIB: instanceStart 40
+OBJC2_64BIT_DYLIB: instanceSize 40
+OBJC2_64BIT_DYLIB: reserved 0x0
+OBJC2_64BIT_DYLIB: ivarLayout 0x0
+OBJC2_64BIT_DYLIB: name 0x4f59 Test
+OBJC2_64BIT_DYLIB: baseMethods 0x0 (struct method_list_t *)
+OBJC2_64BIT_DYLIB: baseProtocols 0x0
+OBJC2_64BIT_DYLIB: ivars 0x0
+OBJC2_64BIT_DYLIB: weakIvarLayout 0x0
+OBJC2_64BIT_DYLIB: baseProperties 0x0
+OBJC2_64BIT_DYLIB: Contents of (__DATA_CONST,__objc_imageinfo) section
+OBJC2_64BIT_DYLIB: version 0
+OBJC2_64BIT_DYLIB: flags 0x40
diff --git a/test/tools/llvm-objdump/X86/macho-print-thread.test b/test/tools/llvm-objdump/X86/macho-print-thread.test
new file mode 100644
index 000000000000..12f71de1db2f
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/macho-print-thread.test
@@ -0,0 +1,11 @@
+RUN: llvm-objdump -macho -private-headers %p/Inputs/thread.macho-i386 | FileCheck %s
+
+CHECK: Load command 9
+CHECK: cmd LC_UNIXTHREAD
+CHECK: cmdsize 80
+CHECK: flavor i386_THREAD_STATE
+CHECK: count i386_THREAD_STATE_COUNT
+CHECK: eax 0x00000000 ebx 0x00000000 ecx 0x00000000 edx 0x00000000
+CHECK: edi 0x00000000 esi 0x00000000 ebp 0x00000000 esp 0x00000000
+CHECK: ss 0x00000000 eflags 0x00000000 eip 0x00001db0 cs 0x00000000
+CHECK: ds 0x00000000 es 0x00000000 fs 0x00000000 gs 0x00000000
diff --git a/test/tools/llvm-objdump/X86/macho-private-headers.test b/test/tools/llvm-objdump/X86/macho-private-headers.test
index e0c68d7cd97a..d1ea0cf140b7 100644
--- a/test/tools/llvm-objdump/X86/macho-private-headers.test
+++ b/test/tools/llvm-objdump/X86/macho-private-headers.test
@@ -23,6 +23,8 @@
// RUN: | FileCheck %s -check-prefix=NON_VERBOSE
// RUN: llvm-objdump -p %p/Inputs/codesig.macho-x86_64 \
// RUN: | FileCheck %s -check-prefix=CODESIG
+// RUN: llvm-objdump -p %p/Inputs/note.macho-x86 \
+// RUN: | FileCheck %s -check-prefix=NOTE
CHECK: Mach header
CHECK: magic cputype cpusubtype caps filetype ncmds sizeofcmds flags
@@ -544,3 +546,9 @@ CODESIG: cmd LC_CODE_SIGNATURE
CODESIG: cmdsize 16
CODESIG: dataoff 8496
CODESIG: datasize 64
+
+NOTE: cmd LC_NOTE
+NOTE: cmdsize 40
+NOTE: data_owner DATA OWNER
+NOTE: offset 68
+NOTE: size 8
diff --git a/test/tools/llvm-objdump/X86/macho-stub-nosyms-disassembly.test b/test/tools/llvm-objdump/X86/macho-stub-nosyms-disassembly.test
new file mode 100644
index 000000000000..af66b0e7a63f
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/macho-stub-nosyms-disassembly.test
@@ -0,0 +1,3 @@
+// RUN: llvm-objdump -d -m -no-show-raw-insn -full-leading-addr -print-imm-hex %p/Inputs/stub-nosyms.macho-x86_64 | FileCheck %s
+
+CHECK: 0000000000000001 callq 0x7 ## symbol stub for: _foo
diff --git a/test/tools/llvm-objdump/X86/malformed-machos.test b/test/tools/llvm-objdump/X86/malformed-machos.test
index 83ebfc4364e6..292666a37254 100644
--- a/test/tools/llvm-objdump/X86/malformed-machos.test
+++ b/test/tools/llvm-objdump/X86/malformed-machos.test
@@ -63,3 +63,6 @@ INVALID-SYMBOL-STRX-UNIVERSAL: macho-invalid-symbol-strx-universal' (for archite
RUN: not llvm-objdump -macho -disassemble %p/Inputs/macho-invalid-symbol-lib_ordinal 2>&1 | FileCheck -check-prefix INVALID-SYMBOL-LIB_ORDINAL %s
INVALID-SYMBOL-LIB_ORDINAL: macho-invalid-symbol-lib_ordinal': truncated or malformed object (bad library ordinal: 7 for symbol at index 2)
+
+RUN: not llvm-objdump -macho -objc-meta-data %p/Inputs/macho-invalid-bind-entry 2>&1 | FileCheck -check-prefix INVALID-BIND-ENTRY %s
+INVALID-BIND-ENTRY: macho-invalid-bind-entry': truncated or malformed object (for BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB bad library ordinal: 83 (max 0) for opcode at: 0x0)
diff --git a/test/tools/llvm-objdump/X86/openbsd-headers.test b/test/tools/llvm-objdump/X86/openbsd-headers.test
index 48f496ca616e..fb6072efabb7 100644
--- a/test/tools/llvm-objdump/X86/openbsd-headers.test
+++ b/test/tools/llvm-objdump/X86/openbsd-headers.test
@@ -9,7 +9,7 @@
## 0x65a3dbe7 is the value of PT_OPENBSD_WXNEEDED,
## 0x65a41be6 is the value of PT_OPENBSD_BOOTDATA
## SECTIONS { . = SIZEOF_HEADERS; .all : { *(.*) } : text }
-RUN: llvm-objdump -p %p/../../../Object/Inputs/openbsd-phdrs.elf-x86-64 \
+RUN: llvm-objdump -p %p/Inputs/openbsd-phdrs.elf-x86-64 \
RUN: | FileCheck %s
CHECK: OPENBSD_RANDOMIZE off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**3
diff --git a/test/tools/llvm-objdump/X86/phdrs.test b/test/tools/llvm-objdump/X86/phdrs.test
index 646f6af8733a..63b8fdeb6ef6 100644
--- a/test/tools/llvm-objdump/X86/phdrs.test
+++ b/test/tools/llvm-objdump/X86/phdrs.test
@@ -11,7 +11,7 @@
## d:
## .long 2
##
-RUN: llvm-objdump -p %p/../../../Object/Inputs/phdrs.elf-x86-64 \
+RUN: llvm-objdump -p %p/Inputs/phdrs.elf-x86-64 \
RUN: | FileCheck %s
CHECK: RELRO off 0x0000000000001000 vaddr 0x0000000000201000 paddr 0x0000000000201000 align 2**0
@@ -25,7 +25,7 @@ CHECK-NEXT: filesz 0x0000000000000004 memsz 0x0000000000001000 flags r--
## .section .note.test,"a",@note
## .quad 42
-RUN: llvm-objdump -p %p/../../../Object/Inputs/phdr-note.elf-x86-64 \
+RUN: llvm-objdump -p %p/Inputs/phdr-note.elf-x86-64 \
RUN: | FileCheck %s --check-prefix=NOTE
NOTE: NOTE off 0x0000000000000200 vaddr 0x0000000000000200 paddr 0x0000000000000200 align 2**0
diff --git a/test/tools/llvm-objdump/X86/stripped-shared.test b/test/tools/llvm-objdump/X86/stripped-shared.test
new file mode 100644
index 000000000000..c57155f4cd7b
--- /dev/null
+++ b/test/tools/llvm-objdump/X86/stripped-shared.test
@@ -0,0 +1,10 @@
+// This test checks that dynamic symbols are used when disassembling elf files.
+// RUN: llvm-objdump -d %p/Inputs/stripped-elf.so | FileCheck %s
+
+# CHECK: .init
+# CHECK: .plt
+# CHECK: .text
+# CHECK: func0
+# CHECK: func1
+# CHECK: func2
+# CHECK: .fini
diff --git a/test/tools/llvm-objdump/macho-bad-bind.test b/test/tools/llvm-objdump/macho-bad-bind.test
new file mode 100644
index 000000000000..98fd08f4009e
--- /dev/null
+++ b/test/tools/llvm-objdump/macho-bad-bind.test
@@ -0,0 +1,101 @@
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-dylib-ordinal-uleb 2>&1 | FileCheck -check-prefix DYLIB-ORDINAL-ULEB %s
+DYLIB-ORDINAL-ULEB: macho-bind-dylib-ordinal-uleb': truncated or malformed object (for BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB bad library ordinal: 355 (max 1) for opcode at: 0x0)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-dylib-ordinal-uleb-malformed-uleb128 2>&1 | FileCheck -check-prefix DYLIB-ORDINAL-ULEB-MALFORMED-ULEB128 %s
+DYLIB-ORDINAL-ULEB-MALFORMED-ULEB128: macho-bind-dylib-ordinal-uleb-malformed-uleb128': truncated or malformed object (for BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB malformed uleb128, extends past end for opcode at: 0x0)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-dylib-ordinal-uleb-too-big 2>&1 | FileCheck -check-prefix DYLIB-ORDINAL-ULEB-TOO-BIG %s
+DYLIB-ORDINAL-ULEB-TOO-BIG: macho-bind-dylib-ordinal-uleb-too-big': truncated or malformed object (for BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB uleb128 too big for uint64 for opcode at: 0x0)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-dylib-special-imm 2>&1 | FileCheck -check-prefix DYLIB-SPECIAL-IMM %s
+DYLIB-SPECIAL-IMM: macho-bind-dylib-special-imm': truncated or malformed object (for BIND_OPCODE_SET_DYLIB_SPECIAL_IMM unknown special ordinal: -5 for opcode at: 0x0)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-set-symbol 2>&1 | FileCheck -check-prefix BIND-SET-SYMBOL %s
+BIND-SET-SYMBOL: macho-bind-set-symbol': truncated or malformed object (for BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM symbol name extends past opcodes for opcode at: 0x2)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-set-type-imm 2>&1 | FileCheck -check-prefix SET-TYPE-IMM %s
+SET-TYPE-IMM: macho-bind-set-type-imm': truncated or malformed object (for BIND_OPCODE_SET_TYPE_IMM bad bind type: 5 for opcode at: 0x14)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-set-addend-sleb 2>&1 | FileCheck -check-prefix SET-ADDEND-SLEB %s
+SET-ADDEND-SLEB: macho-bind-set-addend-sleb': truncated or malformed object (for BIND_OPCODE_SET_ADDEND_SLEB malformed sleb128, extends past end for opcode at: 0x14)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-seg-too-big 2>&1 | FileCheck -check-prefix SEG-TOO-BIG %s
+SEG-TOO-BIG: macho-bind-seg-too-big': truncated or malformed object (for BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB bad segIndex (too large) for opcode at: 0x15)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-segoff-too-big 2>&1 | FileCheck -check-prefix SEGOFF-TOO-BIG %s
+SEGOFF-TOO-BIG: macho-bind-segoff-too-big': truncated or malformed object (for BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB bad segOffset, too large for opcode at: 0x15)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-add_addr_uleb 2>&1 | FileCheck -check-prefix ADD_ADDR_ULEB %s
+ADD_ADDR_ULEB: macho-bind-add_addr_uleb': truncated or malformed object (for BIND_OPCODE_ADD_ADDR_ULEB bad segOffset, too large for opcode at: 0x17)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-do-bind-no-segIndex 2>&1 | FileCheck -check-prefix BIND-NO-SEGINDEX %s
+BIND-NO-SEGINDEX: macho-bind-do-bind-no-segIndex': truncated or malformed object (for BIND_OPCODE_DO_BIND missing preceding *_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB for opcode at: 0x15)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-bind-add-addr-uleb 2>&1 | FileCheck -check-prefix ADD-ADDR-ULEB %s
+ADD-ADDR-ULEB: macho-bind-bind-add-addr-uleb': truncated or malformed object (for BIND_OPCODE_ADD_ADDR_ULEB (after adding ULEB) bad segOffset, too large for opcode at: 0x18)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-add-addr-imm-scaled 2>&1 | FileCheck -check-prefix ADD-ADDR-IMM-SCALED %s
+ADD-ADDR-IMM-SCALED: macho-bind-add-addr-imm-scaled': truncated or malformed object (for BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED (after adding immediate times the pointer size) bad segOffset, too large for opcode at: 0x17)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-uleb-times-skipping-uleb 2>&1 | FileCheck -check-prefix ULEB-TIMES-SKIPPING-ULEB %s
+ULEB-TIMES-SKIPPING-ULEB: macho-bind-uleb-times-skipping-uleb': truncated or malformed object (for BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB bad count and skip, too large for opcode at: 0x17)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-do-bind-no-symbol 2>&1 | FileCheck -check-prefix DO-BIND-NO-SYMBOL %s
+DO-BIND-NO-SYMBOL: macho-do-bind-no-symbol': truncated or malformed object (for BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB missing preceding BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM for opcode at: 0x5)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-do-bind-no-dylib-ordinal 2>&1 | FileCheck -check-prefix DO-BIND-NO-DYLIB-ORDINAL %s
+DO-BIND-NO-DYLIB-ORDINAL: macho-do-bind-no-dylib-ordinal': truncated or malformed object (for BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB missing preceding BIND_OPCODE_SET_DYLIB_ORDINAL_* for opcode at: 0x15)
+
+RUN: not llvm-objdump -macho -bind %p/Inputs/macho-bind-bad-opcode-value 2>&1 | FileCheck -check-prefix BAD-OPCODE-VALUE %s
+BAD-OPCODE-VALUE: macho-bind-bad-opcode-value': truncated or malformed object (bad bind info (bad opcode value 0xD0 for opcode at: 0x18)
+
+RUN: not llvm-objdump -macho -lazy-bind %p/Inputs/macho-lazy-do_bind_add_addr_uleb 2>&1 | FileCheck -check-prefix LAZY_DO_BIND_ADD_ADDR_ULEB %s
+LAZY_DO_BIND_ADD_ADDR_ULEB: macho-lazy-do_bind_add_addr_uleb': truncated or malformed object (BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB not allowed in lazy bind table for opcode at: 0xC)
+
+RUN: not llvm-objdump -macho -lazy-bind %p/Inputs/macho-lazy-do-bind-add-addr-imm-scaled 2>&1 | FileCheck -check-prefix LAZY-DO-BIND-ADD-ADDR-IMM-SCALED %s
+LAZY-DO-BIND-ADD-ADDR-IMM-SCALED: macho-lazy-do-bind-add-addr-imm-scaled': truncated or malformed object (BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED not allowed in lazy bind table for opcode at: 0xC)
+
+RUN: not llvm-objdump -macho -lazy-bind %p/Inputs/macho-lazy-do-bind-uleb-times-skipping-uleb 2>&1 | FileCheck -check-prefix LAZY-DO-BIND-ULEB-TIMES-SKIPPING-ULEB %s
+LAZY-DO-BIND-ULEB-TIMES-SKIPPING-ULEB: macho-lazy-do-bind-uleb-times-skipping-uleb': truncated or malformed object (BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB not allowed in lazy bind table for opcode at: 0xC)
+
+RUN: not llvm-objdump -macho -weak-bind %p/Inputs/macho-weak-bind-set-dylib-ordinal-imm 2>&1 | FileCheck -check-prefix WEAK-BIND-SET-DYLIB-ORDINAL-IMM %s
+WEAK-BIND-SET-DYLIB-ORDINAL-IMM: macho-weak-bind-set-dylib-ordinal-imm': truncated or malformed object (BIND_OPCODE_SET_DYLIB_ORDINAL_IMM not allowed in weak bind table for opcode at: 0x2)
+
+RUN: not llvm-objdump -macho -weak-bind %p/Inputs/macho-weak-bind-set-dylib-ordinal-uleb 2>&1 | FileCheck -check-prefix WEAK-BIND-SET-DYLIB-ORDINAL-ULEB %s
+WEAK-BIND-SET-DYLIB-ORDINAL-ULEB: macho-weak-bind-set-dylib-ordinal-uleb': truncated or malformed object (BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB not allowed in weak bind table for opcode at: 0x2)
+
+RUN: not llvm-objdump -macho -weak-bind %p/Inputs/macho-weak-bind-set-dylib-special-imm 2>&1 | FileCheck -check-prefix WEAK-BIND-SET-DYLIB-SPECIAL-IMM %s
+WEAK-BIND-SET-DYLIB-SPECIAL-IMM: macho-weak-bind-set-dylib-special-imm': truncated or malformed object (BIND_OPCODE_SET_DYLIB_SPECIAL_IMM not allowed in weak bind table for opcode at: 0x2)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-set-type-imm 2>&1 | FileCheck -check-prefix REBASE-SET-TYPE-IMM %s
+REBASE-SET-TYPE-IMM: macho-rebase-set-type-imm': truncated or malformed object (for REBASE_OPCODE_SET_TYPE_IMM bad bind type: 5 for opcode at: 0x0)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-uleb-malformed-uleb128 2>&1 | FileCheck -check-prefix REBASE-ULEB-MALFORMED-ULEB128 %s
+REBASE-ULEB-MALFORMED-ULEB128: macho-rebase-uleb-malformed-uleb128': truncated or malformed object (for REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB malformed uleb128, extends past end for opcode at: 0x1)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-seg-too-big 2>&1 | FileCheck -check-prefix REBASE-SEG-TOO-BIG %s
+REBASE-SEG-TOO-BIG: macho-rebase-seg-too-big': truncated or malformed object (for REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB bad segIndex (too large) for opcode at: 0x1)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-segoff-too-big 2>&1 | FileCheck -check-prefix REBASE-SEGOFF-TOO-BIG %s
+REBASE-SEGOFF-TOO-BIG: macho-rebase-segoff-too-big': truncated or malformed object (for REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB bad segOffset, too large for opcode at: 0x1)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-add-addr-uleb 2>&1 | FileCheck -check-prefix REBASE-ADD-ADDR-ULEB %s
+REBASE-ADD-ADDR-ULEB: macho-rebase-add-addr-uleb': truncated or malformed object (for REBASE_OPCODE_ADD_ADDR_ULEB bad segOffset, too large for opcode at: 0x3)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-add-addr-imm-scaled 2>&1 | FileCheck -check-prefix REBASE-ADD-ADDR-IMM-SCALED %s
+REBASE-ADD-ADDR-IMM-SCALED: macho-rebase-add-addr-imm-scaled': truncated or malformed object (for REBASE_OPCODE_ADD_ADDR_IMM_SCALED (after adding immediate times the pointer size) bad segOffset, too large for opcode at: 0x3)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-imm-times 2>&1 | FileCheck -check-prefix REBASE-IMM-TIMES %s
+REBASE-IMM-TIMES: macho-rebase-imm-times': truncated or malformed object (for REBASE_OPCODE_DO_REBASE_IMM_TIMES bad count and skip, too large for opcode at: 0x3)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-uleb-times 2>&1 | FileCheck -check-prefix REBASE-ULEB-TIMES %s
+REBASE-ULEB-TIMES: macho-rebase-uleb-times': truncated or malformed object (for REBASE_OPCODE_DO_REBASE_ULEB_TIMES bad count and skip, too large for opcode at: 0x3)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-add-addr-uleb-too-big 2>&1 | FileCheck -check-prefix REBASE-ADD-ADDR-ULEB-TOO-BIG %s
+REBASE-ADD-ADDR-ULEB-TOO-BIG: macho-rebase-add-addr-uleb-too-big': truncated or malformed object (for REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB bad count and skip, too large for opcode at: 0x3)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-uleb-times-skipping-uleb 2>&1 | FileCheck -check-prefix REBASE-ULEB-TIMES-SKIPPING-ULEB %s
+REBASE-ULEB-TIMES-SKIPPING-ULEB: macho-rebase-uleb-times-skipping-uleb': truncated or malformed object (for REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB bad count and skip, too large for opcode at: 0x3)
+
+RUN: not llvm-objdump -macho -rebase %p/Inputs/macho-rebase-bad-opcode-value 2>&1 | FileCheck -check-prefix REBASE-BAD-OPCODE-VALUE %s
+REBASE-BAD-OPCODE-VALUE: macho-rebase-bad-opcode-value': truncated or malformed object (bad rebase info (bad opcode value 0xD0 for opcode at: 0x4)
diff --git a/test/tools/llvm-objdump/macho-bad-ordinal.test b/test/tools/llvm-objdump/macho-bad-ordinal.test
index 16badcc878d8..fb49f77f0751 100644
--- a/test/tools/llvm-objdump/macho-bad-ordinal.test
+++ b/test/tools/llvm-objdump/macho-bad-ordinal.test
@@ -1,6 +1,4 @@
-# RUN: llvm-objdump -macho -bind -lazy-bind %p/Inputs/bad-ordinal.macho-x86_64 \
-# RUN: | FileCheck %s
+# RUN: not llvm-objdump -macho -lazy-bind %p/Inputs/bad-ordinal.macho-x86_64 \
+# RUN: 2>&1 | FileCheck %s
-
-# CHECK: __DATA __nl_symbol_ptr 0x100001000 pointer 0 <<bad library ordinal>> dyld_stub_binder
-# CHECK: __DATA __la_symbol_ptr 0x100001010 <<bad library ordinal>> _printf
+# CHECK: bad-ordinal.macho-x86_64': truncated or malformed object (for BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB bad library ordinal: 2 (max 1) for opcode at: 0x2)
diff --git a/test/tools/llvm-objdump/malformed-macho.test b/test/tools/llvm-objdump/malformed-macho.test
index 0bc2ce8e898f..924d58a1df24 100644
--- a/test/tools/llvm-objdump/malformed-macho.test
+++ b/test/tools/llvm-objdump/malformed-macho.test
@@ -1,2 +1,3 @@
-RUN: not llvm-objdump -macho -s %p/Inputs/malformed-macho.bin 2>&1 | FileCheck %s -check-prefix=MALFORMED
-MALFORMED: The file was not recognized as a valid object file
+RUN: llvm-objdump -macho -private-header %p/Inputs/malformed-macho.bin %p/Inputs/empty.macho-armv7 2>&1 | FileCheck %s -check-prefix=MALFORMED
+MALFORMED: is not an object file
+MALFORMED-NEXT: Mach header
diff --git a/test/tools/llvm-objdump/wasm.txt b/test/tools/llvm-objdump/wasm.txt
index ebda2249f8b8..4aa40c6c9df8 100644
--- a/test/tools/llvm-objdump/wasm.txt
+++ b/test/tools/llvm-objdump/wasm.txt
@@ -9,12 +9,12 @@
# CHECK: 4 EXPORT 0000000e 0000000000000000
# CHECK: 5 ELEM 00000007 0000000000000000
# CHECK: 6 CODE 0000002a 0000000000000000 TEXT
-# CHECK: 7 name 0000002c 0000000000000000
+# CHECK: 7 name 0000003c 0000000000000000
# RUN: llvm-objdump -p %p/Inputs/test.wasm | FileCheck %s -check-prefix CHECK-HEADER
# CHECK-HEADER: Program Header:
-# CHECK-HEADER: Version: 0xd
+# CHECK-HEADER: Version: 0x1
# RUN: llvm-objdump -s --section=CODE %p/Inputs/test.wasm | FileCheck %s -check-prefix CHECK-SECTIONS
diff --git a/test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.cpp b/test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.cpp
new file mode 100644
index 000000000000..b52af1495336
--- /dev/null
+++ b/test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.cpp
@@ -0,0 +1,167 @@
+// Compile with "cl /c /Zi /GR- SimplePaddingTest.cpp"
+// Link with "link SimplePaddingTest.obj /debug /nodefaultlib /entry:main"
+
+#include <stdint.h>
+
+extern "C" using at_exit_handler = void();
+
+int atexit(at_exit_handler handler) { return 0; }
+
+struct SimplePadNoPadding {
+ int32_t X;
+ int32_t Y;
+ // No padding anywhere, sizeof(T) = 8
+} A;
+
+struct SimplePadUnion {
+ union {
+ int32_t X;
+ int64_t Y;
+ struct {
+ int32_t X;
+ // 4 bytes of padding here
+ int64_t Y;
+ } Z;
+ };
+ // Since the padding occurs at a location that is occupied by other storage
+ // (namely the Y member), the storage will still be considered used, and so
+ // there will be no unused bytes in the larger class. But in the debug
+ // info for the nested struct, we should see padding.
+ // sizeof(SimplePadUnion) == sizeof(Z) == 16
+} B;
+
+struct SimplePadNoPadding2 {
+ bool A;
+ bool B;
+ bool C;
+ bool D;
+ // No padding anywhere, sizeof(T) = 4
+} C;
+
+struct alignas(4) SimplePadFields1 {
+ char A;
+ char B;
+ char C;
+ // 1 byte of padding here, sizeof(T) = 4
+} E;
+
+struct SimplePadFields2 {
+ int32_t Y;
+ char X;
+} F;
+
+struct SimplePadBase {
+ // Make sure this class is 4 bytes, and the derived class requires 8 byte
+ // alignment, so that padding is inserted between base and derived.
+ int32_t X;
+ // No padding here
+} G;
+
+struct SimplePadDerived : public SimplePadBase {
+ // 4 bytes of padding here due to Y requiring 8 byte alignment.
+ // Thus, sizeof(T) = 16
+ int64_t Y;
+} H;
+
+struct SimplePadEmptyBase1 {};
+struct SimplePadEmptyBase2 {};
+
+struct SimplePadEmpty : public SimplePadEmptyBase1, SimplePadEmptyBase2 {
+ // Bases have to occupy at least 1 byte of storage, so this requires
+ // 2 bytes of padding, plus 1 byte for each base, yielding sizeof(T) = 8
+ int32_t X;
+} I;
+
+struct SimplePadVfptr {
+ virtual ~SimplePadVfptr() {}
+ static void operator delete(void *ptr, size_t sz) {}
+ int32_t X;
+} J;
+
+struct NonEmptyBase1 {
+ bool X;
+};
+
+struct NonEmptyBase2 {
+ bool Y;
+};
+
+struct SimplePadMultiInherit : public NonEmptyBase1, public NonEmptyBase2 {
+ // X and Y from the 2 bases will get squished together, leaving 2 bytes
+ // of padding necessary for proper alignment of an int32.
+ // Therefore, sizeof(T) = 2 + 2 + 4 = 8
+ int32_t X;
+} K;
+
+struct SimplePadMultiInherit2 : public SimplePadFields1, SimplePadFields2 {
+ // There should be 1 byte of padding after the first class, and
+ // 3 bytes of padding after the second class.
+ int32_t X;
+} L;
+
+struct OneLevelInherit : public NonEmptyBase1 {
+ short Y;
+};
+
+struct SimplePadTwoLevelInherit : public OneLevelInherit {
+ // OneLevelInherit has nested padding because of its base,
+ // and then padding again because of this class. So each
+ // class should be 4 bytes, yielding sizeof(T) = 12.
+ int64_t Z;
+} M;
+
+struct SimplePadAggregate {
+ NonEmptyBase1 X;
+ int32_t Y;
+ // the presence of X will cause 3 bytes of padding to be injected.
+} N;
+
+struct SimplePadVtable1 {
+ static void operator delete(void *ptr, size_t sz) {}
+ virtual ~SimplePadVtable1() {}
+ virtual void A1() {}
+ virtual void B1() {}
+} O;
+
+struct SimplePadVtable2 {
+ static void operator delete(void *ptr, size_t sz) {}
+ virtual ~SimplePadVtable2() {}
+ virtual void X2() {}
+ virtual void Y2() {}
+ virtual void Z2() {}
+} P;
+
+struct SimplePadVtable3 {
+ static void operator delete(void *ptr, size_t sz) {}
+ virtual ~SimplePadVtable3() {}
+ virtual void Foo3() {}
+ virtual void Bar3() {}
+ virtual void Baz3() {}
+ virtual void Buzz3() {}
+} Q;
+
+struct SimplePadMultiVTables
+ : public SimplePadVtable1,
+ public SimplePadVtable2,
+ public SimplePadVtable3 {
+
+ ~SimplePadMultiVTables() override {}
+ static void operator delete(void *ptr, size_t sz) {}
+
+ // SimplePadVtable1 overrides
+ void A1() override {}
+
+ // SimplePadVtable2 overrides
+ void Y2() override {}
+ void Z2() override {}
+
+ // SimplePadVtable3 overrides
+ void Bar3() override {}
+ void Baz3() override {}
+ void Buzz3() override {}
+} R;
+
+int main(int argc, char **argv) {
+
+ return 0;
+}
diff --git a/test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.pdb b/test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.pdb
new file mode 100644
index 000000000000..f0bd496c1c85
--- /dev/null
+++ b/test/tools/llvm-pdbdump/Inputs/SimplePaddingTest.pdb
Binary files differ
diff --git a/test/tools/llvm-pdbdump/class-layout.test b/test/tools/llvm-pdbdump/class-layout.test
index d2e98de2a201..c0083d176eac 100644
--- a/test/tools/llvm-pdbdump/class-layout.test
+++ b/test/tools/llvm-pdbdump/class-layout.test
@@ -14,44 +14,42 @@
; GLOBALS_TEST-DAG: GlobalsTest::Enum GlobalsTest::EnumVar
; MEMBERS_TEST: ---TYPES---
-; MEMBERS_TEST: class MembersTest::A {
+; MEMBERS_TEST: class MembersTest::A [sizeof = 16] {
; MEMBERS_TEST-DAG: typedef int NestedTypedef
; MEMBERS_TEST-DAG: enum NestedEnum
-; MEMBERS_TEST: public:
-; MEMBERS_TEST-NEXT: void MemberFunc()
-; MEMBERS_TEST-NEXT: private:
-; MEMBERS_TEST-DAG: int IntMemberVar
-; MEMBERS_TEST-DAG: double DoubleMemberVar
+; MEMBERS_TEST: void MemberFunc()
+; MEMBERS_TEST-DAG: data +0x00 [sizeof=4] int IntMemberVar
+; MEMBERS_TEST-NEXT: <padding> (4 bytes)
+; MEMBERS_TEST-NEXT: data +0x08 [sizeof=8] double DoubleMemberVar
; MEMBERS_TEST: }
; BASE_CLASS_A: ---TYPES---
-; BASE_CLASS_A: class BaseClassTest::A {}
+; BASE_CLASS_A: class BaseClassTest::A [sizeof = 1] {}
; BASE_CLASS_B: ---TYPES---
-; BASE_CLASS_B: class BaseClassTest::B
+; BASE_CLASS_B: class BaseClassTest::B [sizeof = 4]
; BASE_CLASS_B-NEXT: : public virtual BaseClassTest::A {
; BASE_CLASS_C: ---TYPES---
-; BASE_CLASS_C: class BaseClassTest::C
+; BASE_CLASS_C: class BaseClassTest::C [sizeof = 4]
; BASE_CLASS_C-NEXT: : public virtual BaseClassTest::A {
; BASE_CLASS_D: ---TYPES---
-; BASE_CLASS_D: class BaseClassTest::D
+; BASE_CLASS_D: class BaseClassTest::D [sizeof = 8]
; BASE_CLASS_D-DAG: protected BaseClassTest::B
; BASE_CLASS_D-DAG: private BaseClassTest::C
; BASE_CLASS_D-DAG: protected virtual BaseClassTest::A
; UDT_KIND_TEST: ---TYPES---
-; UDT_KIND_TEST-DAG: union UdtKindTest::C {}
-; UDT_KIND_TEST-DAG: class UdtKindTest::B {}
-; UDT_KIND_TEST-DAG: struct UdtKindTest::A {}
+; UDT_KIND_TEST-DAG: union UdtKindTest::C [sizeof = 1] {}
+; UDT_KIND_TEST-DAG: class UdtKindTest::B [sizeof = 1] {}
+; UDT_KIND_TEST-DAG: struct UdtKindTest::A [sizeof = 1] {}
; BITFIELD_TEST: ---TYPES---
-; BITFIELD_TEST: struct BitFieldTest::A {
-; BITFIELD_TEST-NEXT: public:
-; BITFIELD_TEST-NEXT: +0x00 int Bits1 : 1
-; BITFIELD_TEST-NEXT: +0x00 int Bits2 : 2
-; BITFIELD_TEST-NEXT: +0x00 int Bits3 : 3
-; BITFIELD_TEST-NEXT: +0x00 int Bits4 : 4
-; BITFIELD_TEST-NEXT: +0x00 int Bits22 : 22
-; BITFIELD_TEST-NEXT: +0x04 int Offset0x04
+; BITFIELD_TEST: struct BitFieldTest::A [sizeof = 8] {
+; BITFIELD_TEST-NEXT: +0x00 [sizeof=4] int Bits1 : 1
+; BITFIELD_TEST-NEXT: +0x00 [sizeof=4] int Bits2 : 2
+; BITFIELD_TEST-NEXT: +0x00 [sizeof=4] int Bits3 : 3
+; BITFIELD_TEST-NEXT: +0x00 [sizeof=4] int Bits4 : 4
+; BITFIELD_TEST-NEXT: +0x00 [sizeof=4] int Bits22 : 22
+; BITFIELD_TEST-NEXT: +0x04 [sizeof=4] int Offset0x04
diff --git a/test/tools/llvm-pdbdump/enum-layout.test b/test/tools/llvm-pdbdump/enum-layout.test
index 21e1867175f6..df447c65bbae 100644
--- a/test/tools/llvm-pdbdump/enum-layout.test
+++ b/test/tools/llvm-pdbdump/enum-layout.test
@@ -10,7 +10,7 @@
; MEMBER_ENUM: ---TYPES---
; MEMBER_ENUM: Classes:
-; MEMBER_ENUM: struct __vc_attributes::threadingAttribute {
+; MEMBER_ENUM: struct __vc_attributes::threadingAttribute [sizeof = 4] {
; MEMBER_ENUM-NEXT: enum threading_e {
; MEMBER_ENUM-NEXT: apartment = 1
; MEMBER_ENUM-NEXT: single = 2
diff --git a/test/tools/llvm-pdbdump/regex-filter.test b/test/tools/llvm-pdbdump/regex-filter.test
index b845f5a28cff..d2f500e88c33 100644
--- a/test/tools/llvm-pdbdump/regex-filter.test
+++ b/test/tools/llvm-pdbdump/regex-filter.test
@@ -1,9 +1,16 @@
; RUN: llvm-pdbdump pretty -symbols -globals -types %p/Inputs/FilterTest.pdb \
; RUN: | FileCheck --check-prefix=NO_FILTER %s
+
; RUN: llvm-pdbdump pretty -types -exclude-types="GlobalTypedef|NestedTypedef" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=EXCLUDE_TYPEDEFS %s
+; RUN: llvm-pdbdump pretty -classes -enums %p/Inputs/FilterTest.pdb \
+; RUN: | FileCheck --check-prefix=EXCLUDE_TYPEDEFS %s
+
; RUN: llvm-pdbdump pretty -types -exclude-types="GlobalEnum|NestedEnum" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=EXCLUDE_ENUMS %s
+; RUN: llvm-pdbdump pretty -classes -typedefs %p/Inputs/FilterTest.pdb \
+; RUN: | FileCheck --check-prefix=EXCLUDE_ENUMS %s
+
; RUN: llvm-pdbdump pretty -types -symbols -globals -exclude-symbols="MemberVar|GlobalVar" \
; RUN: %p/Inputs/FilterTest.pdb | FileCheck --check-prefix=EXCLUDE_VARS %s
; RUN: llvm-pdbdump pretty -types -exclude-types="FilterTestClass" \
@@ -36,31 +43,25 @@
; NO_FILTER-DAG: GlobalEnum GlobalEnumVar
; EXCLUDE_TYPEDEFS: ---TYPES---
-; EXCLUDE_TYPEDEFS: Enums:
-; EXCLUDE_TYPEDEFS: GlobalEnum
-; EXCLUDE_TYPEDEFS: Typedefs
; EXCLUDE_TYPEDEFS-NOT: GlobalTypedef
-; EXCLUDE_TYPEDEFS: Classes
-; EXCLUDE_TYPEDEFS: class FilterTestClass
; EXCLUDE_TYPEDEFS-NOT: NestedTypedef
-; EXCLUDE_TYPEDEFS: private:
+; EXCLUDE_TYPEDEFS-DAG: GlobalEnum
+; EXCLUDE_TYPEDEFS-DAG: NestedEnum
+; EXCLUDE_TYPEDEFS: class FilterTestClass
; EXCLUDE_ENUMS: ---TYPES---
-; EXCLUDE_ENUMS: Enums:
; EXCLUDE_ENUMS-NOT: GlobalEnum
-; EXCLUDE_ENUMS: Typedefs
+; EXCLUDE_ENUMS-NOT: NestedEnum
; EXCLUDE_ENUMS: GlobalTypedef
-; EXCLUDE_ENUMS: Classes
; EXCLUDE_ENUMS: class FilterTestClass
-; EXCLUDE_ENUMS-NOT: NestedEnum
-; EXCLUDE_ENUMS: private:
; EXCLUDE_VARS: ---TYPES---
-; EXCLUDE_VARS: Classes:
-; EXCLUDE_VARS: class FilterTestClass
-; EXCLUDE_VARS: private:
; EXCLUDE_VARS-NOT: IntMemberVar
; EXCLUDE_VARS-NOT: DoubleMemberVar
+; EXCLUDE_VARS-DAG: GlobalEnum
+; EXCLUDE_VARS-DAG: NestedEnum
+; EXCLUDE_VARS: GlobalTypedef
+; EXCLUDE_VARS: class FilterTestClass
; EXCLUDE_VARS: ---GLOBALS---
; EXCLUDE_VARS-NOT: DoubleGlobalVar
; EXCLUDE_VARS-NOT: IntGlobalVar
diff --git a/test/tools/llvm-pdbdump/simple-padding-graphical.test b/test/tools/llvm-pdbdump/simple-padding-graphical.test
new file mode 100644
index 000000000000..aacb0a330450
--- /dev/null
+++ b/test/tools/llvm-pdbdump/simple-padding-graphical.test
@@ -0,0 +1,121 @@
+; RUN: llvm-pdbdump pretty -classes -class-definitions=graphical \
+; RUN: -include-types=SimplePad %p/Inputs/SimplePaddingTest.pdb > %t
+
+; RUN: FileCheck -input-file=%t %s -check-prefix=NO_PADDING
+; RUN: FileCheck -input-file=%t %s -check-prefix=UNION
+; RUN: FileCheck -input-file=%t %s -check-prefix=NESTED_UNION
+; RUN: FileCheck -input-file=%t %s -check-prefix=PAD_FROM_FIELDS1
+; RUN: FileCheck -input-file=%t %s -check-prefix=PAD_FROM_FIELDS2
+; RUN: FileCheck -input-file=%t %s -check-prefix=NO_PAD_IN_BASE
+; RUN: FileCheck -input-file=%t %s -check-prefix=PAD_IN_DERIVED
+; RUN: FileCheck -input-file=%t %s -check-prefix=EMPTY_BASE
+; RUN: FileCheck -input-file=%t %s -check-prefix=VFPTR
+; RUN: FileCheck -input-file=%t %s -check-prefix=MULTIPLE_INHERIT
+; RUN: FileCheck -input-file=%t %s -check-prefix=MULTIPLE_INHERIT2
+; RUN: FileCheck -input-file=%t %s -check-prefix=DEEP_INHERIT
+; RUN: FileCheck -input-file=%t %s -check-prefix=AGGREGATE
+
+; NO_PADDING: struct SimplePadNoPadding [sizeof = 8] {
+; NO_PADDING-NEXT: data +0x00 [sizeof=4] int X
+; NO_PADDING-NEXT: data +0x04 [sizeof=4] int Y
+; NO_PADDING-NEXT: }
+
+; UNION: struct SimplePadUnion [sizeof = 16] {
+; UNION-NEXT: data +0x00 [sizeof=4] int X
+; UNION-NEXT: data +0x00 [sizeof=8] __int64 Y
+; UNION-NEXT: data +0x00 [sizeof=16] SimplePadUnion::
+; UNION-NEXT: data +0x00 [sizeof=4] int X
+; UNION-NEXT: <padding> (4 bytes)
+; UNION-NEXT: data +0x08 [sizeof=8] __int64 Y
+; UNION-NEXT: }
+
+; NESTED_UNION: struct {{SimplePadUnion::.*}} [sizeof = 16] {
+; NESTED_UNION-NEXT: data +0x00 [sizeof=4] int X
+; NESTED_UNION-NEXT: <padding> (4 bytes)
+; NESTED_UNION-NEXT: data +0x08 [sizeof=8] __int64 Y
+; NESTED_UNION-NEXT: }
+
+; PAD_FROM_FIELDS1: struct SimplePadFields1 [sizeof = 4] {
+; PAD_FROM_FIELDS1-NEXT: data +0x00 [sizeof=1] char A
+; PAD_FROM_FIELDS1-NEXT: data +0x01 [sizeof=1] char B
+; PAD_FROM_FIELDS1-NEXT: data +0x02 [sizeof=1] char C
+; PAD_FROM_FIELDS1-NEXT: <padding> (1 bytes)
+; PAD_FROM_FIELDS1-NEXT: }
+
+; PAD_FROM_FIELDS2: struct SimplePadFields2 [sizeof = 8] {
+; PAD_FROM_FIELDS2-NEXT: data +0x00 [sizeof=4] int Y
+; PAD_FROM_FIELDS2-NEXT: data +0x04 [sizeof=1] char X
+; PAD_FROM_FIELDS2-NEXT: <padding> (3 bytes)
+; PAD_FROM_FIELDS2-NEXT: }
+
+; NO_PAD_IN_BASE: struct SimplePadBase [sizeof = 4] {
+; NO_PAD_IN_BASE-NEXT: data +0x00 [sizeof=4] int X
+; NO_PAD_IN_BASE-NEXT: }
+
+; PAD_IN_DERIVED: struct SimplePadDerived [sizeof = 16]
+; PAD_IN_DERIVED-NEXT: : public SimplePadBase {
+; PAD_IN_DERIVED-NEXT: base +0x00 [sizeof=4] SimplePadBase
+; PAD_IN_DERIVED-NEXT: data +0x00 [sizeof=4] int X
+; PAD_IN_DERIVED-NEXT: <padding> (4 bytes)
+; PAD_IN_DERIVED-NEXT: data +0x08 [sizeof=8] __int64 Y
+; PAD_IN_DERIVED-NEXT: }
+
+; EMPTY_BASE: struct SimplePadEmpty [sizeof = 8]
+; EMPTY_BASE-NEXT: : public SimplePadEmptyBase1
+; EMPTY_BASE-NEXT: , public SimplePadEmptyBase2 {
+; EMPTY_BASE-NEXT: base +0x00 [sizeof=1] SimplePadEmptyBase1
+; EMPTY_BASE-NEXT: base +0x01 [sizeof=1] SimplePadEmptyBase2
+; EMPTY_BASE-NEXT: <padding> (2 bytes)
+; EMPTY_BASE-NEXT: data +0x04 [sizeof=4] int X
+; EMPTY_BASE-NEXT: }
+
+; VFPTR: struct SimplePadVfptr [sizeof = 8] {
+; VFPTR-NEXT: vfptr +0x00 [sizeof=4]
+; VFPTR-NEXT: [0] &SimplePadVfptr::~SimplePadVfptr
+; VFPTR-NEXT: data +0x04 [sizeof=4] int X
+; VFPTR-NEXT: }
+
+; MULTIPLE_INHERIT: struct SimplePadMultiInherit [sizeof = 8]
+; MULTIPLE_INHERIT-NEXT: : public NonEmptyBase1
+; MULTIPLE_INHERIT-NEXT: , public NonEmptyBase2 {
+; MULTIPLE_INHERIT-NEXT: base +0x00 [sizeof=1] NonEmptyBase1
+; MULTIPLE_INHERIT-NEXT: data +0x00 [sizeof=1] bool X
+; MULTIPLE_INHERIT-NEXT: base +0x01 [sizeof=1] NonEmptyBase2
+; MULTIPLE_INHERIT-NEXT: data +0x01 [sizeof=1] bool Y
+; MULTIPLE_INHERIT-NEXT: <padding> (2 bytes)
+; MULTIPLE_INHERIT-NEXT: data +0x04 [sizeof=4] int X
+; MULTIPLE_INHERIT-NEXT: }
+
+; MULTIPLE_INHERIT2: SimplePadMultiInherit2 [sizeof = 16]
+; MULTIPLE_INHERIT2-NEXT: : public SimplePadFields1
+; MULTIPLE_INHERIT2-NEXT: , public SimplePadFields2 {
+; MULTIPLE_INHERIT2-NEXT: base +0x00 [sizeof=4] SimplePadFields1
+; MULTIPLE_INHERIT2-NEXT: data +0x00 [sizeof=1] char A
+; MULTIPLE_INHERIT2-NEXT: data +0x01 [sizeof=1] char B
+; MULTIPLE_INHERIT2-NEXT: data +0x02 [sizeof=1] char C
+; MULTIPLE_INHERIT2-NEXT: <padding> (1 bytes)
+; MULTIPLE_INHERIT2-NEXT: base +0x04 [sizeof=8] SimplePadFields2
+; MULTIPLE_INHERIT2-NEXT: data +0x04 [sizeof=4] int Y
+; MULTIPLE_INHERIT2-NEXT: data +0x08 [sizeof=1] char X
+; MULTIPLE_INHERIT2-NEXT: <padding> (3 bytes)
+; MULTIPLE_INHERIT2-NEXT: data +0x0c [sizeof=4] int X
+; MULTIPLE_INHERIT2-NEXT: }
+
+; DEEP_INHERIT: struct SimplePadTwoLevelInherit [sizeof = 16]
+; DEEP_INHERIT-NEXT: : public OneLevelInherit {
+; DEEP_INHERIT-NEXT: base +0x00 [sizeof=4] OneLevelInherit
+; DEEP_INHERIT-NEXT: base +0x00 [sizeof=1] NonEmptyBase1
+; DEEP_INHERIT-NEXT: data +0x00 [sizeof=1] bool X
+; DEEP_INHERIT-NEXT: <padding> (1 bytes)
+; DEEP_INHERIT-NEXT: data +0x02 [sizeof=2] short Y
+; DEEP_INHERIT-NEXT: <padding> (4 bytes)
+; DEEP_INHERIT-NEXT: data +0x08 [sizeof=8] __int64 Z
+; DEEP_INHERIT-NEXT: }
+
+
+; AGGREGATE: struct SimplePadAggregate [sizeof = 8] {
+; AGGREGATE-NEXT: data +0x00 [sizeof=1] NonEmptyBase1 X
+; AGGREGATE-NEXT: data +0x00 [sizeof=1] bool X
+; AGGREGATE-NEXT: <padding> (3 bytes)
+; AGGREGATE-NEXT: data +0x04 [sizeof=4] int Y
+; AGGREGATE-NEXT: }
diff --git a/test/tools/llvm-pdbdump/simple-padding-text.test b/test/tools/llvm-pdbdump/simple-padding-text.test
new file mode 100644
index 000000000000..b33af565f65a
--- /dev/null
+++ b/test/tools/llvm-pdbdump/simple-padding-text.test
@@ -0,0 +1,94 @@
+; RUN: llvm-pdbdump pretty -classes -class-definitions=layout-members \
+; RUN: -include-types=SimplePad %p/Inputs/SimplePaddingTest.pdb > %t
+
+; RUN: FileCheck -input-file=%t %s -check-prefix=NO_PADDING
+; RUN: FileCheck -input-file=%t %s -check-prefix=UNION
+; RUN: FileCheck -input-file=%t %s -check-prefix=NESTED_UNION
+; RUN: FileCheck -input-file=%t %s -check-prefix=PAD_FROM_FIELDS1
+; RUN: FileCheck -input-file=%t %s -check-prefix=PAD_FROM_FIELDS2
+; RUN: FileCheck -input-file=%t %s -check-prefix=NO_PAD_IN_BASE
+; RUN: FileCheck -input-file=%t %s -check-prefix=PAD_IN_DERIVED
+; RUN: FileCheck -input-file=%t %s -check-prefix=EMPTY_BASE
+; RUN: FileCheck -input-file=%t %s -check-prefix=VFPTR
+; RUN: FileCheck -input-file=%t %s -check-prefix=MULTIPLE_INHERIT
+; RUN: FileCheck -input-file=%t %s -check-prefix=MULTIPLE_INHERIT2
+; RUN: FileCheck -input-file=%t %s -check-prefix=DEEP_INHERIT
+; RUN: FileCheck -input-file=%t %s -check-prefix=AGGREGATE
+
+; NO_PADDING: struct SimplePadNoPadding [sizeof = 8] {
+; NO_PADDING-NEXT: data +0x00 [sizeof=4] int X
+; NO_PADDING-NEXT: data +0x04 [sizeof=4] int Y
+; NO_PADDING-NEXT: }
+
+; UNION: struct SimplePadUnion [sizeof = 16] {
+; UNION-NEXT: data +0x00 [sizeof=4] int X
+; UNION-NEXT: data +0x00 [sizeof=8] __int64 Y
+; UNION-NEXT: data +0x00 [sizeof=16] SimplePadUnion::
+; UNION-NEXT: }
+
+; NESTED_UNION: struct {{SimplePadUnion::.*}} [sizeof = 16] {
+; NESTED_UNION-NEXT: data +0x00 [sizeof=4] int X
+; NESTED_UNION-NEXT: <padding> (4 bytes)
+; NESTED_UNION-NEXT: data +0x08 [sizeof=8] __int64 Y
+; NESTED_UNION-NEXT: }
+
+; PAD_FROM_FIELDS1: struct SimplePadFields1 [sizeof = 4] {
+; PAD_FROM_FIELDS1-NEXT: data +0x00 [sizeof=1] char A
+; PAD_FROM_FIELDS1-NEXT: data +0x01 [sizeof=1] char B
+; PAD_FROM_FIELDS1-NEXT: data +0x02 [sizeof=1] char C
+; PAD_FROM_FIELDS1-NEXT: <padding> (1 bytes)
+; PAD_FROM_FIELDS1-NEXT: }
+
+; PAD_FROM_FIELDS2: struct SimplePadFields2 [sizeof = 8] {
+; PAD_FROM_FIELDS2-NEXT: data +0x00 [sizeof=4] int Y
+; PAD_FROM_FIELDS2-NEXT: data +0x04 [sizeof=1] char X
+; PAD_FROM_FIELDS2-NEXT: <padding> (3 bytes)
+; PAD_FROM_FIELDS2-NEXT: }
+
+; NO_PAD_IN_BASE: struct SimplePadBase [sizeof = 4] {
+; NO_PAD_IN_BASE-NEXT: data +0x00 [sizeof=4] int X
+; NO_PAD_IN_BASE-NEXT: }
+
+; PAD_IN_DERIVED: struct SimplePadDerived [sizeof = 16]
+; PAD_IN_DERIVED-NEXT: public SimplePadBase {
+; PAD_IN_DERIVED-NEXT: <padding> (4 bytes)
+; PAD_IN_DERIVED-NEXT: data +0x08 [sizeof=8] __int64 Y
+; PAD_IN_DERIVED-NEXT: }
+
+; EMPTY_BASE: struct SimplePadEmpty [sizeof = 8]
+; EMPTY_BASE-NEXT: : public SimplePadEmptyBase1
+; EMPTY_BASE-NEXT: , public SimplePadEmptyBase2 {
+; EMPTY_BASE-NEXT: <padding> (2 bytes)
+; EMPTY_BASE-NEXT: data +0x04 [sizeof=4] int X
+; EMPTY_BASE-NEXT: }
+
+; VFPTR: struct SimplePadVfptr [sizeof = 8] {
+; VFPTR-NEXT: vfptr +0x00 [sizeof=4]
+; VFPTR-NEXT: data +0x04 [sizeof=4] int X
+; VFPTR-NEXT: }
+
+; MULTIPLE_INHERIT: struct SimplePadMultiInherit [sizeof = 8]
+; MULTIPLE_INHERIT-NEXT: : public NonEmptyBase1
+; MULTIPLE_INHERIT-NEXT: , public NonEmptyBase2 {
+; MULTIPLE_INHERIT-NEXT: <padding> (2 bytes)
+; MULTIPLE_INHERIT-NEXT: data +0x04 [sizeof=4] int X
+; MULTIPLE_INHERIT-NEXT: }
+
+; MULTIPLE_INHERIT2: SimplePadMultiInherit2 [sizeof = 16]
+; MULTIPLE_INHERIT2-NEXT: : public SimplePadFields1
+; MULTIPLE_INHERIT2-NEXT: , public SimplePadFields2 {
+; MULTIPLE_INHERIT2-NEXT: data +0x0c [sizeof=4] int X
+; MULTIPLE_INHERIT2-NEXT: }
+
+; DEEP_INHERIT: struct SimplePadTwoLevelInherit [sizeof = 16]
+; DEEP_INHERIT-NEXT: : public OneLevelInherit {
+; DEEP_INHERIT-NEXT: <padding> (4 bytes)
+; DEEP_INHERIT-NEXT: data +0x08 [sizeof=8] __int64 Z
+; DEEP_INHERIT-NEXT: }
+
+
+; AGGREGATE: struct SimplePadAggregate [sizeof = 8] {
+; AGGREGATE-NEXT: data +0x00 [sizeof=1] NonEmptyBase1 X
+; AGGREGATE-NEXT: <padding> (3 bytes)
+; AGGREGATE-NEXT: data +0x04 [sizeof=4] int Y
+; AGGREGATE-NEXT: }
diff --git a/test/tools/llvm-profdata/memop-size-prof.proftext b/test/tools/llvm-profdata/memop-size-prof.proftext
new file mode 100644
index 000000000000..882fc1ecf296
--- /dev/null
+++ b/test/tools/llvm-profdata/memop-size-prof.proftext
@@ -0,0 +1,123 @@
+# RUN: llvm-profdata show -memop-sizes -ic-targets -function=foo %s | FileCheck %s --check-prefixes=MEMOP,MEMOP_SUM,ICALL,ICALL_SUM
+# RUN: llvm-profdata show -memop-sizes -ic-targets -counts -text -function=foo %s | FileCheck %s --check-prefixes=TEXT,MEMOP_TEXT,ICALL_TEXT
+# RUN: llvm-profdata merge -o %t.profdata %s
+# RUN: llvm-profdata show -memop-sizes -ic-targets -function=foo %t.profdata | FileCheck %s --check-prefixes=MEMOP,MEMOP_SUM,ICALL,ICALL_SUM
+# RUN: llvm-profdata merge -o %t.proftext -text %s
+# RUN: llvm-profdata show -memop-sizes -ic-targets -function=foo %t.proftext| FileCheck %s --check-prefixes=MEMOP,MEMOP_SUM,ICALL,ICALL_SUM
+
+# IR level Instrumentation Flag
+:ir
+ic1
+# Func Hash:
+10
+# Num Counters:
+2
+# Counter Values:
+999000
+359800
+
+ic2
+# Func Hash:
+10
+# Num Counters:
+2
+# Counter Values:
+1001000
+360200
+
+foo
+# Func Hash:
+35277121310
+# Num Counters:
+3
+# Counter Values:
+20
+556
+1
+# Num Value Kinds:
+2
+# Value Kind IPVK_IndirectCallTarget
+0
+# NumSites
+3
+# Values for each site
+0
+2
+ic2:1000
+ic1:100
+1
+ic2:20000
+#ICALL: Indirect Target Results:
+#ICALL-NEXT: [ 1, ic2, 1000 ]
+#ICALL-NEXT: [ 1, ic1, 100 ]
+#ICALL-NEXT: [ 2, ic2, 20000 ]
+
+# ValueKind = IPVK_MemOPSize:
+1
+# NumValueSites:
+1
+9
+1:99
+2:88
+3:77
+9:72
+4:66
+5:55
+6:44
+7:33
+8:22
+
+#MEMOP: Memory Instrinsic Size Results:
+#MEMOP-NEXT: [ 0, 1, 99 ]
+#MEMOP-NEXT: [ 0, 2, 88 ]
+#MEMOP-NEXT: [ 0, 3, 77 ]
+#MEMOP-NEXT: [ 0, 9, 72 ]
+#MEMOP-NEXT: [ 0, 4, 66 ]
+#MEMOP-NEXT: [ 0, 5, 55 ]
+#MEMOP-NEXT: [ 0, 6, 44 ]
+#MEMOP-NEXT: [ 0, 7, 33 ]
+#MEMOP-NEXT: [ 0, 8, 22 ]
+
+#ICALL_SUM: Statistics for indirect call sites profile:
+#ICALL_SUM: Total number of sites: 3
+#ICALL_SUM: Total number of sites with values: 2
+#ICALL_SUM: Total number of profiled values: 3
+#ICALL_SUM: Value sites histogram:
+#ICALL_SUM: NumTargets, SiteCount
+#ICALL_SUM: 1, 1
+#ICALL_SUM: 2, 1
+
+#MEMOP_SUM: Statistics for memory intrinsic calls sizes profile:
+#MEMOP_SUM: Total number of sites: 1
+#MEMOP_SUM: Total number of sites with values: 1
+#MEMOP_SUM: Total number of profiled values: 9
+#MEMOP_SUM: Value sites histogram:
+#MEMOP_SUM: NumTargets, SiteCount
+#MEMOP_SUM: 9, 1
+
+#TEXT: # Num Value Kinds:
+#TEXT: 2
+#ICALL_TEXT: # ValueKind = IPVK_IndirectCallTarget:
+#ICALL_TEXT: 0
+#ICALL_TEXT: # NumValueSites:
+#ICALL_TEXT: 3
+#ICALL_TEXT: 0
+#ICALL_TEXT: 2
+#ICALL_TEXT: ic2:1000
+#ICALL_TEXT: ic1:100
+#ICALL_TEXT: 1
+#ICALL_TEXT: ic2:20000
+#MEMOP_TEXT: # ValueKind = IPVK_MemOPSize:
+#MEMOP_TEXT: 1
+#MEMOP_TEXT: # NumValueSites:
+#MEMOP_TEXT: 1
+#MEMOP_TEXT: 9
+#MEMOP_TEXT: 1:99
+#MEMOP_TEXT: 2:88
+#MEMOP_TEXT: 3:77
+#MEMOP_TEXT: 9:72
+#MEMOP_TEXT: 4:66
+#MEMOP_TEXT: 5:55
+#MEMOP_TEXT: 6:44
+#MEMOP_TEXT: 7:33
+#MEMOP_TEXT: 8:22
diff --git a/test/tools/llvm-profdata/value-prof.proftext b/test/tools/llvm-profdata/value-prof.proftext
index b5979c842498..31a7698895dd 100644
--- a/test/tools/llvm-profdata/value-prof.proftext
+++ b/test/tools/llvm-profdata/value-prof.proftext
@@ -46,13 +46,13 @@ foo2:1000
foo2:20000
#ICTXT: Indirect Call Site Count: 3
-#ICTXT-NEXT: Indirect Target Results:
+#ICTXT-NEXT: Indirect Target Results:
#ICTXT-NEXT: [ 1, foo, 100 ]
#ICTXT-NEXT: [ 1, foo2, 1000 ]
#ICTXT-NEXT: [ 2, foo2, 20000 ]
#IC: Indirect Call Site Count: 3
-#IC-NEXT: Indirect Target Results:
+#IC-NEXT: Indirect Target Results:
#IC-NEXT: [ 1, foo2, 1000 ]
#IC-NEXT: [ 1, foo, 100 ]
#IC-NEXT: [ 2, foo2, 20000 ]
@@ -63,10 +63,19 @@ foo2:20000
#ICTEXT-NEXT: foo2:20000
#
-#ICSUM: Total Number of Indirect Call Sites : 3
-#ICSUM: Total Number of Sites With Values : 2
-#ICSUM: Total Number of Profiled Values : 3
-#ICSUM: NumTargets, SiteCount
-#ICSUM 1, 1
-#ICSUM 2, 1
+bar
+# Func Hash:
+10
+# Num Counters:
+2
+# Counter Values:
+999000
+359800
+#ICSUM: Statistics for indirect call sites profile:
+#ICSUM: Total number of sites: 3
+#ICSUM: Total number of sites with values: 2
+#ICSUM: Total number of profiled values: 3
+#ICSUM: NumTargets, SiteCount
+#ICSUM 1, 1
+#ICSUM 2, 1
diff --git a/test/tools/llvm-readobj/Inputs/codeview-cycle.obj b/test/tools/llvm-readobj/Inputs/codeview-cycle.obj
new file mode 100644
index 000000000000..85c2d0e55fe4
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/codeview-cycle.obj
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/codeview-label.obj b/test/tools/llvm-readobj/Inputs/codeview-label.obj
new file mode 100644
index 000000000000..ae49a061bb7c
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/codeview-label.obj
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/codeview-merging-anon.obj b/test/tools/llvm-readobj/Inputs/codeview-merging-anon.obj
new file mode 100644
index 000000000000..3cb58fbd4d54
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/codeview-merging-anon.obj
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/codeview-unsorted.obj b/test/tools/llvm-readobj/Inputs/codeview-unsorted.obj
new file mode 100644
index 000000000000..08a376de3b96
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/codeview-unsorted.obj
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.wasm b/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
new file mode 100644
index 000000000000..f14192f1798b
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.wasm
Binary files differ
diff --git a/test/tools/llvm-readobj/codeview-label.test b/test/tools/llvm-readobj/codeview-label.test
new file mode 100644
index 000000000000..3bf6debe0d7f
--- /dev/null
+++ b/test/tools/llvm-readobj/codeview-label.test
@@ -0,0 +1,16 @@
+; RUN: llvm-readobj -codeview %S/Inputs/codeview-label.obj | FileCheck %s
+
+; CHECK-LABEL: Label (0x1000) {
+; CHECK-NEXT: TypeLeafKind: LF_LABEL (0xE)
+; CHECK-NEXT: Mode: Near (0x0)
+; CHECK-NEXT: }
+
+; To reproduce codeview-label.obj:
+; $ cat codeview-label.asm
+; .model flat, C
+; .code
+; public foo
+; foo:
+; ret
+; end
+; $ ml -c -Zi codeview-label.asm
diff --git a/test/tools/llvm-readobj/codeview-merging-anon.test b/test/tools/llvm-readobj/codeview-merging-anon.test
new file mode 100644
index 000000000000..cf0484074d02
--- /dev/null
+++ b/test/tools/llvm-readobj/codeview-merging-anon.test
@@ -0,0 +1,29 @@
+# Test what happens when the first type record (0x1000) is a LF_FIELDLIST
+# record.
+
+# Steps to regenerate input:
+# $ cat t.c
+# struct { int x; } o;
+# $ cl -Z7 t.c
+
+RUN: llvm-readobj -codeview %S/Inputs/codeview-merging-anon.obj | FileCheck %s
+RUN: llvm-readobj -codeview-merged-types %S/Inputs/codeview-merging-anon.obj | FileCheck %s
+
+CHECK-LABEL: FieldList (0x1000) {
+CHECK-NEXT: TypeLeafKind: LF_FIELDLIST (0x1203)
+CHECK-NEXT: DataMember {
+CHECK-NEXT: TypeLeafKind: LF_MEMBER (0x150D)
+CHECK-NEXT: AccessSpecifier: Public (0x3)
+CHECK-NEXT: Type: int (0x74)
+CHECK-NEXT: FieldOffset: 0x0
+CHECK-NEXT: Name: x
+CHECK-NEXT: }
+CHECK-NEXT: }
+CHECK-LABEL: Struct (0x1001) {
+CHECK: TypeLeafKind: LF_STRUCTURE (0x1505)
+CHECK: MemberCount: 1
+CHECK: FieldList: <field list> (0x1000)
+CHECK: Name: <unnamed-tag>
+CHECK: LinkageName: .?AU<unnamed-tag>@@
+CHECK: }
+CHECK-LABEL: StringId
diff --git a/test/tools/llvm-readobj/codeview-merging-cycle.test b/test/tools/llvm-readobj/codeview-merging-cycle.test
new file mode 100644
index 000000000000..3a96be9ca985
--- /dev/null
+++ b/test/tools/llvm-readobj/codeview-merging-cycle.test
@@ -0,0 +1,19 @@
+; RUN: not llvm-readobj -codeview-merged-types %S/Inputs/codeview-cycle.obj 2>&1 | FileCheck %s
+
+; CHECK: Error{{.*}} input type graph contains cycles
+
+; To reproduce codeview-cycle.obj:
+; $ cat codeview-cycle.asm
+; .model flat, C
+; .code
+; pfoo_list TYPEDEF PTR foo_list
+; foo_list STRUCT
+; next pfoo_list ?
+; data dd ?
+; foo_list ENDS
+; public foo
+; foo proc dst:ptr foo_list
+; ret
+; foo endp
+; end
+; $ ml -c -Zi codeview-cycle.asm
diff --git a/test/tools/llvm-readobj/codeview-merging-unsorted.test b/test/tools/llvm-readobj/codeview-merging-unsorted.test
new file mode 100644
index 000000000000..6aaab3a891ee
--- /dev/null
+++ b/test/tools/llvm-readobj/codeview-merging-unsorted.test
@@ -0,0 +1,40 @@
+; RUN: llvm-readobj -codeview %S/Inputs/codeview-unsorted.obj | FileCheck %s
+; RUN: llvm-readobj -codeview-merged-types %S/Inputs/codeview-unsorted.obj | FileCheck %s --check-prefix=MERGED
+
+; The input type stream has records that refer to later type indices in the same
+; stream:
+
+; CHECK: Pointer (0x1000)
+; CHECK: Struct (0x1001)
+; CHECK: FieldList: {{.*}} (0x1002)
+; CHECK: FieldList (0x1002)
+; CHECK: Pointer (0x1003)
+; CHECK: Procedure (0x1004)
+; CHECK: ArgListType: {{.*}} (0x1005)
+; CHECK: ArgList (0x1005)
+
+; MERGED: Pointer (0x1000)
+; MERGED: FieldList (0x1001)
+; MERGED: Struct (0x1002)
+; MERGED: FieldList: {{.*}} (0x1001)
+; MERGED: Pointer (0x1003)
+; MERGED: ArgList (0x1004)
+; MERGED: Procedure (0x1005)
+; MERGED: ArgListType: {{.*}} (0x1004)
+
+
+; To reproduce codeview-unsorted.obj:
+; $ cat codeview-unsorted.asm
+; .model flat, C
+; .code
+; PBYTE TYPEDEF PTR BYTE
+; foo_list STRUCT
+; next PBYTE ?
+; data dd ?
+; foo_list ENDS
+; public foo
+; foo proc dst:ptr foo_list
+; ret
+; foo endp
+; end
+; $ ml -c -Zi codeview-unsorted.asm
diff --git a/test/tools/llvm-readobj/codeview-merging.test b/test/tools/llvm-readobj/codeview-merging.test
index 60894eff33eb..4d453e5a1167 100644
--- a/test/tools/llvm-readobj/codeview-merging.test
+++ b/test/tools/llvm-readobj/codeview-merging.test
@@ -21,6 +21,15 @@ RUN: llvm-readobj -codeview %S/Inputs/codeview-merging-1.obj | FileCheck %s --ch
RUN: llvm-readobj -codeview %S/Inputs/codeview-merging-2.obj | FileCheck %s --check-prefix=OBJ2
RUN: llvm-readobj -codeview-merged-types %S/Inputs/codeview-merging-1.obj %S/Inputs/codeview-merging-2.obj | FileCheck %s
+OBJ1: Procedure ({{.*}}) {
+OBJ1-NEXT: TypeLeafKind: LF_PROCEDURE (0x1008)
+OBJ1-NEXT: ReturnType: int (0x74)
+OBJ1-NEXT: CallingConvention: NearC (0x0)
+OBJ1-NEXT: FunctionOptions [ (0x0)
+OBJ1-NEXT: ]
+OBJ1-NEXT: NumParameters: 1
+OBJ1-NEXT: ArgListType: (A*) (0x1002)
+OBJ1-NEXT: }
OBJ1: FuncId (0x100D) {
OBJ1-NEXT: TypeLeafKind: LF_FUNC_ID (0x1601)
OBJ1-NEXT: ParentScope: 0x0
@@ -50,16 +59,55 @@ OBJ2-NEXT: Name: g
OBJ2-NEXT: }
OBJ2-NOT: FuncId
-CHECK: FuncId (0x100D) {
+CHECK: MergedTypeStream [
+CHECK: Procedure ({{.*}}) {
+CHECK-NEXT: TypeLeafKind: LF_PROCEDURE (0x1008)
+CHECK-NEXT: ReturnType: int (0x74)
+CHECK-NEXT: CallingConvention: NearC (0x0)
+CHECK-NEXT: FunctionOptions [ (0x0)
+CHECK-NEXT: ]
+CHECK-NEXT: NumParameters: 1
+CHECK-NEXT: ArgListType: (A*) (0x1002)
+CHECK-NEXT: }
+CHECK: Struct (0x1007) {
+CHECK-NEXT: TypeLeafKind: LF_STRUCTURE (0x1505)
+CHECK-NEXT: MemberCount: 1
+CHECK-NEXT: Properties [ (0x200)
+CHECK-NEXT: HasUniqueName (0x200)
+CHECK-NEXT: ]
+CHECK-NEXT: FieldList: <field list> (0x1006)
+CHECK-NEXT: DerivedFrom: 0x0
+CHECK-NEXT: VShape: 0x0
+CHECK-NEXT: SizeOf: 8
+CHECK-NEXT: Name: B
+CHECK-NEXT: LinkageName: .?AUB@@
+CHECK-NEXT: }
+CHECK: ]
+
+CHECK: MergedIDStream [
+CHECK-NEXT: StringId (0x1000) {
+CHECK-NEXT: TypeLeafKind: LF_STRING_ID (0x1605)
+CHECK-NEXT: Id: 0x0
+CHECK-NEXT: StringData: d:\src\llvm\build\t.cpp
+CHECK-NEXT: }
+# Test that we contextually dump item ids and type ids from different databases.
+CHECK-NEXT: UdtSourceLine (0x1001) {
+CHECK-NEXT: TypeLeafKind: LF_UDT_SRC_LINE (0x1606)
+CHECK-NEXT: UDT: B (0x1007)
+CHECK-NEXT: SourceFile: d:\src\llvm\build\t.cpp (0x1000)
+CHECK-NEXT: LineNumber: 3
+CHECK-NEXT: }
+CHECK: FuncId (0x1002) {
CHECK-NEXT: TypeLeafKind: LF_FUNC_ID (0x1601)
CHECK-NEXT: ParentScope: 0x0
-CHECK-NEXT: FunctionType: int (B*) (0x100C)
+CHECK-NEXT: FunctionType: int (B*)
CHECK-NEXT: Name: g
CHECK-NEXT: }
-CHECK-NEXT: FuncId (0x100E) {
+CHECK-NEXT: FuncId (0x1003) {
CHECK-NEXT: TypeLeafKind: LF_FUNC_ID (0x1601)
CHECK-NEXT: ParentScope: 0x0
-CHECK-NEXT: FunctionType: int (A*) (0x1003)
+CHECK-NEXT: FunctionType: int (A*)
CHECK-NEXT: Name: f
CHECK-NEXT: }
CHECK-NOT: FuncId
+CHECK: ]
diff --git a/test/tools/llvm-readobj/file-headers.test b/test/tools/llvm-readobj/file-headers.test
index 662c9b6bd4d7..47fb24de1b60 100644
--- a/test/tools/llvm-readobj/file-headers.test
+++ b/test/tools/llvm-readobj/file-headers.test
@@ -26,6 +26,11 @@ RUN: llvm-readobj -h %p/Inputs/magic.coff-importlib \
RUN: | FileCheck %s -check-prefix COFF-IMPORTLIB
RUN: llvm-readobj -h %p/Inputs/trivial.obj.elf-lanai \
RUN: | FileCheck %s -check-prefix ELF-LANAI
+# trivial.obj.wasm was generated using the following command:
+# echo "extern int bar, baz; int foo() { return bar + baz + (int)&foo; }" | \
+# ./bin/clang -c -o trivial.obj.wasm -target wasm32-unknown-unknown-wasm -x c -
+RUN: llvm-readobj -h %p/Inputs/trivial.obj.wasm \
+RUN: | FileCheck %s -check-prefix WASM
COFF-ARM: File: {{(.*[/\\])?}}trivial.obj.coff-arm
COFF-ARM-NEXT: Format: COFF-ARM
@@ -367,3 +372,8 @@ ELF-LANAI-NEXT: SectionHeaderEntrySize: 40
ELF-LANAI-NEXT: SectionHeaderCount: 8
ELF-LANAI-NEXT: StringTableSectionIndex: 1
ELF-LANAI-NEXT: }
+
+WASM: Format: WASM
+WASM-NEXT: Arch: wasm32
+WASM-NEXT: AddressSize: 32bit
+WASM-NEXT: Version: 0x1
diff --git a/test/tools/llvm-readobj/relocations.test b/test/tools/llvm-readobj/relocations.test
index 229fef54fb8b..475ac1d7e296 100644
--- a/test/tools/llvm-readobj/relocations.test
+++ b/test/tools/llvm-readobj/relocations.test
@@ -16,6 +16,8 @@ RUN: llvm-readobj -r --expand-relocs %p/Inputs/trivial.obj.macho-ppc64 \
RUN: | FileCheck %s -check-prefix MACHO-PPC64
RUN: llvm-readobj -r -expand-relocs %p/Inputs/trivial.obj.macho-arm \
RUN: | FileCheck %s -check-prefix MACHO-ARM
+RUN: llvm-readobj -r --expand-relocs %p/Inputs/trivial.obj.wasm \
+RUN: | FileCheck %s -check-prefix WASM
COFF: Relocations [
COFF-NEXT: Section (1) .text {
@@ -283,3 +285,26 @@ MACHO-ARM-NEXT: Value: 0x4
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: ]
+
+WASM: Relocations [
+WASM-NEXT: Section (8) CODE {
+WASM-NEXT: Relocation {
+WASM-NEXT: Type: R_WEBASSEMBLY_TABLE_INDEX_SLEB (1)
+WASM-NEXT: Offset: 0x6
+WASM-NEXT: Index: 0x0
+WASM-NEXT: Addend: 0x0
+WASM-NEXT: }
+WASM-NEXT: Relocation {
+WASM-NEXT: Type: R_WEBASSEMBLY_GLOBAL_ADDR_LEB (3)
+WASM-NEXT: Offset: 0x15
+WASM-NEXT: Index: 0x0
+WASM-NEXT: Addend: 0x0
+WASM-NEXT: }
+WASM-NEXT: Relocation {
+WASM-NEXT: Type: R_WEBASSEMBLY_GLOBAL_ADDR_LEB (3)
+WASM-NEXT: Offset: 0x24
+WASM-NEXT: Index: 0x1
+WASM-NEXT: Addend: 0x0
+WASM-NEXT: }
+WASM-NEXT: }
+WASM-NEXT: ]
diff --git a/test/tools/llvm-readobj/sections.test b/test/tools/llvm-readobj/sections.test
index 54654e7070ef..312c47fe4c82 100644
--- a/test/tools/llvm-readobj/sections.test
+++ b/test/tools/llvm-readobj/sections.test
@@ -14,6 +14,8 @@ RUN: llvm-readobj -s %p/Inputs/trivial.obj.macho-ppc64 \
RUN: | FileCheck %s -check-prefix MACHO-PPC64
RUN: llvm-readobj -s %p/Inputs/trivial.obj.macho-arm \
RUN: | FileCheck %s -check-prefix MACHO-ARM
+RUN: llvm-readobj -s %p/Inputs/trivial.obj.wasm \
+RUN: | FileCheck %s -check-prefix WASM
COFF: Sections [
COFF-NEXT: Section {
@@ -490,3 +492,58 @@ MACHO-ARM-NEXT: Reserved1: 0x0
MACHO-ARM-NEXT: Reserved2: 0x0
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT:]
+
+WASM: Sections [
+WASM-NEXT: Section {
+WASM-NEXT: Type: TYPE (0x1)
+WASM-NEXT: Size: 5
+WASM-NEXT: Offset: 8
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: IMPORT (0x2)
+WASM-NEXT: Size: 23
+WASM-NEXT: Offset: 19
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: FUNCTION (0x3)
+WASM-NEXT: Size: 2
+WASM-NEXT: Offset: 48
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: TABLE (0x4)
+WASM-NEXT: Size: 4
+WASM-NEXT: Offset: 56
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: MEMORY (0x5)
+WASM-NEXT: Size: 3
+WASM-NEXT: Offset: 66
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: EXPORT (0x7)
+WASM-NEXT: Size: 7
+WASM-NEXT: Offset: 75
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: ELEM (0x9)
+WASM-NEXT: Size: 7
+WASM-NEXT: Offset: 88
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CODE (0xA)
+WASM-NEXT: Size: 61
+WASM-NEXT: Offset: 101
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CUSTOM (0x0)
+WASM-NEXT: Size: 17
+WASM-NEXT: Offset: 168
+WASM-NEXT: Name: name
+WASM-NEXT: }
+WASM-NEXT: Section {
+WASM-NEXT: Type: CUSTOM (0x0)
+WASM-NEXT: Size: 24
+WASM-NEXT: Offset: 191
+WASM-NEXT: Name: reloc.CODE
+WASM-NEXT: }
+WASM-NEXT:]
diff --git a/test/tools/llvm-readobj/symbols.test b/test/tools/llvm-readobj/symbols.test
index 71955e0d8235..1037c2866023 100644
--- a/test/tools/llvm-readobj/symbols.test
+++ b/test/tools/llvm-readobj/symbols.test
@@ -2,6 +2,8 @@ RUN: llvm-readobj -t %p/Inputs/trivial.obj.coff-i386 \
RUN: | FileCheck %s -check-prefix COFF
RUN: llvm-readobj -t %p/Inputs/trivial.obj.elf-i386 \
RUN: | FileCheck %s -check-prefix ELF
+RUN: llvm-readobj -t %p/Inputs/trivial.obj.wasm \
+RUN: | FileCheck %s -check-prefix WASM
COFF: Symbols [
COFF-NEXT: Symbol {
@@ -68,3 +70,22 @@ ELF-NEXT: Type: Object (0x1)
ELF-NEXT: Other: 0
ELF-NEXT: Section: .rodata.str1.1 (0x5)
ELF-NEXT: }
+
+WASM: Symbols [
+WASM-NEXT: Symbol {
+WASM-NEXT: Name: bar
+WASM-NEXT: Type: GLOBAL_IMPORT (0x2)
+WASM-NEXT: }
+WASM-NEXT: Symbol {
+WASM-NEXT: Name: baz
+WASM-NEXT: Type: GLOBAL_IMPORT (0x2)
+WASM-NEXT: }
+WASM-NEXT: Symbol {
+WASM-NEXT: Name: foo
+WASM-NEXT: Type: FUNCTION_EXPORT (0x1)
+WASM-NEXT: }
+WASM-NEXT: Symbol {
+WASM-NEXT: Name: foo
+WASM-NEXT: Type: DEBUG_FUNCTION_NAME (0x4)
+WASM-NEXT: }
+WASM-NEXT: ]
diff --git a/test/tools/llvm-strings/Inputs/numbers b/test/tools/llvm-strings/Inputs/numbers
new file mode 100644
index 000000000000..c9e9e05f445e
--- /dev/null
+++ b/test/tools/llvm-strings/Inputs/numbers
@@ -0,0 +1,10 @@
+one
+two
+three
+four
+five
+six
+seven
+eight
+nine
+ten
diff --git a/test/tools/llvm-strings/radix.test b/test/tools/llvm-strings/radix.test
new file mode 100644
index 000000000000..c81d9fe66c33
--- /dev/null
+++ b/test/tools/llvm-strings/radix.test
@@ -0,0 +1,33 @@
+RUN: llvm-strings %S/Inputs/numbers | FileCheck %s -check-prefix CHECK-NONE
+RUN: llvm-strings -t d %S/Inputs/numbers | FileCheck %s -check-prefix CHECK-DEC
+RUN: llvm-strings -t o %S/Inputs/numbers | FileCheck %s -check-prefix CHECK-OCT
+RUN: llvm-strings -t x %S/Inputs/numbers | FileCheck %s -check-prefix CHECK-HEX
+
+CHECK-NONE: three
+CHECK-NONE: four
+CHECK-NONE: five
+CHECK-NONE: seven
+CHECK-NONE: eight
+CHECK-NONE: nine
+
+CHECK-DEC: 8 three
+CHECK-DEC: 14 four
+CHECK-DEC: 19 five
+CHECK-DEC: 28 seven
+CHECK-DEC: 34 eight
+CHECK-DEC: 40 nine
+
+CHECK-OCT: 10 three
+CHECK-OCT: 16 four
+CHECK-OCT: 23 five
+CHECK-OCT: 34 seven
+CHECK-OCT: 42 eight
+CHECK-OCT: 50 nine
+
+CHECK-HEX: 8 three
+CHECK-HEX: e four
+CHECK-HEX: 13 five
+CHECK-HEX: 1c seven
+CHECK-HEX: 22 eight
+CHECK-HEX: 28 nine
+
diff --git a/test/tools/llvm-symbolizer/Inputs/discrim b/test/tools/llvm-symbolizer/Inputs/discrim
new file mode 100644
index 000000000000..ec61fe960bff
--- /dev/null
+++ b/test/tools/llvm-symbolizer/Inputs/discrim
Binary files differ
diff --git a/test/tools/llvm-symbolizer/Inputs/discrim.c b/test/tools/llvm-symbolizer/Inputs/discrim.c
new file mode 100644
index 000000000000..decbce8d454e
--- /dev/null
+++ b/test/tools/llvm-symbolizer/Inputs/discrim.c
@@ -0,0 +1,8 @@
+static volatile int do_mul;
+static volatile int do_inc;
+
+int main () {
+ int x = 1;
+ if (do_mul) x *= 2; else x /= 2;
+ return do_inc ? ++x : --x;
+}
diff --git a/test/tools/llvm-symbolizer/Inputs/discrim.inp b/test/tools/llvm-symbolizer/Inputs/discrim.inp
new file mode 100644
index 000000000000..f8ad6018d709
--- /dev/null
+++ b/test/tools/llvm-symbolizer/Inputs/discrim.inp
@@ -0,0 +1,5 @@
+some text
+0x4004f2
+0x400509
+0x40050d
+some more text
diff --git a/test/tools/llvm-symbolizer/sym-verbose.test b/test/tools/llvm-symbolizer/sym-verbose.test
new file mode 100644
index 000000000000..ef66db919faa
--- /dev/null
+++ b/test/tools/llvm-symbolizer/sym-verbose.test
@@ -0,0 +1,39 @@
+#static volatile int do_mul;
+#static volatile int do_inc;
+#
+#int main () {
+# int x = 1;
+# if (do_mul) x *= 2; else x /= 2;
+# return do_inc ? ++x : --x;
+#}
+#Build as : clang -g -O2 discrim.c -o discrim
+
+RUN: llvm-symbolizer -verbose -print-address -obj=%p/Inputs/discrim < %p/Inputs/discrim.inp | FileCheck %s
+
+#CHECK: some text
+
+#CHECK: 0x4004f2
+#CHECK-NEXT: main
+#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c
+#CHECK-NEXT: Function start line: 4
+#CHECK-NEXT: Line: 6
+#CHECK-NEXT: Column: 7
+#CHECK-NOT: Discriminator: 0
+
+#CHECK: 0x400509
+#CHECK-NEXT: main
+#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c
+#CHECK-NEXT: Function start line: 4
+#CHECK-NEXT: Line: 7
+#CHECK-NEXT: Column: 3
+#CHECK-NEXT: Discriminator: 1
+
+#CHECK: 0x40050d
+#CHECK-NEXT: main
+#CHECK-NEXT: Filename: /tmp{{[\\/]}}discrim.c
+#CHECK-NEXT: Function start line: 4
+#CHECK-NEXT: Line: 7
+#CHECK-NEXT: Column: 3
+#CHECK-NEXT: Discriminator: 2
+
+#CHECK: some more text
diff --git a/test/tools/llvm-xray/X86/Inputs/fdr-log-version-1.xray b/test/tools/llvm-xray/X86/Inputs/fdr-log-version-1.xray
new file mode 100644
index 000000000000..628be9a5dc20
--- /dev/null
+++ b/test/tools/llvm-xray/X86/Inputs/fdr-log-version-1.xray
Binary files differ
diff --git a/test/tools/llvm-xray/X86/Inputs/simple-instrmap.yaml b/test/tools/llvm-xray/X86/Inputs/simple-instrmap.yaml
index e9c9f2e8d3c8..c1f6915646a0 100644
--- a/test/tools/llvm-xray/X86/Inputs/simple-instrmap.yaml
+++ b/test/tools/llvm-xray/X86/Inputs/simple-instrmap.yaml
@@ -7,4 +7,16 @@
- { id: 2, address: 0x3, function: 0x2, kind: function-exit, always-instrument: true}
- { id: 3, address: 0x3, function: 0x3, kind: function-enter, always-instrument: true}
- { id: 3, address: 0x4, function: 0x3, kind: function-exit, always-instrument: true}
+- { id: 4, address: 0x4, function: 0x4, kind: function-enter, always-instrument: true}
+- { id: 4, address: 0x5, function: 0x4, kind: function-exit, always-instrument: true}
+- { id: 5, address: 0x5, function: 0x5, kind: function-enter, always-instrument: true}
+- { id: 5, address: 0x6, function: 0x5, kind: function-exit, always-instrument: true}
+- { id: 6, address: 0x6, function: 0x6, kind: function-enter, always-instrument: true}
+- { id: 6, address: 0x7, function: 0x6, kind: function-exit, always-instrument: true}
+- { id: 7, address: 0x7, function: 0x7, kind: function-enter, always-instrument: true}
+- { id: 7, address: 0x8, function: 0x7, kind: function-exit, always-instrument: true}
+- { id: 8, address: 0x8, function: 0x8, kind: function-enter, always-instrument: true}
+- { id: 8, address: 0x9, function: 0x8, kind: function-exit, always-instrument: true}
+- { id: 9, address: 0x9, function: 0x9, kind: function-enter, always-instrument: true}
+- { id: 9, address: 0xA, function: 0x9, kind: function-exit, always-instrument: true}
...
diff --git a/test/tools/llvm-xray/X86/account-deduce-tail-call.yaml b/test/tools/llvm-xray/X86/account-deduce-tail-call.yaml
index 6e926974141f..e8b46cbf1766 100644
--- a/test/tools/llvm-xray/X86/account-deduce-tail-call.yaml
+++ b/test/tools/llvm-xray/X86/account-deduce-tail-call.yaml
@@ -1,4 +1,4 @@
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -d | FileCheck %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -d | FileCheck %s
---
header:
version: 1
diff --git a/test/tools/llvm-xray/X86/account-keep-going.yaml b/test/tools/llvm-xray/X86/account-keep-going.yaml
index 1b234c0d7e8e..76011ee8e6e5 100644
--- a/test/tools/llvm-xray/X86/account-keep-going.yaml
+++ b/test/tools/llvm-xray/X86/account-keep-going.yaml
@@ -1,4 +1,4 @@
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -k | FileCheck %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -k | FileCheck %s
---
header:
version: 1
diff --git a/test/tools/llvm-xray/X86/account-simple-case.yaml b/test/tools/llvm-xray/X86/account-simple-case.yaml
index f1f2bbdbccbd..c995a7a77dfe 100644
--- a/test/tools/llvm-xray/X86/account-simple-case.yaml
+++ b/test/tools/llvm-xray/X86/account-simple-case.yaml
@@ -1,4 +1,4 @@
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml | FileCheck %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml | FileCheck %s
---
header:
version: 1
diff --git a/test/tools/llvm-xray/X86/account-simple-sorting.yaml b/test/tools/llvm-xray/X86/account-simple-sorting.yaml
index 208809a091eb..e0f32696caf0 100644
--- a/test/tools/llvm-xray/X86/account-simple-sorting.yaml
+++ b/test/tools/llvm-xray/X86/account-simple-sorting.yaml
@@ -1,13 +1,13 @@
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml | FileCheck --check-prefix DEFAULT %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s count | FileCheck --check-prefix COUNT-ASC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s min | FileCheck --check-prefix MIN-ASC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s max | FileCheck --check-prefix MAX-ASC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s sum | FileCheck --check-prefix SUM-ASC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml | FileCheck --check-prefix DEFAULT %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s count | FileCheck --check-prefix COUNT-ASC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s min | FileCheck --check-prefix MIN-ASC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s max | FileCheck --check-prefix MAX-ASC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s sum | FileCheck --check-prefix SUM-ASC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s count -r dsc | FileCheck --check-prefix COUNT-DSC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s min -r dsc | FileCheck --check-prefix MIN-DSC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s max -r dsc | FileCheck --check-prefix MAX-DSC %s
-#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -t yaml -s sum -r dsc | FileCheck --check-prefix SUM-DSC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s count -r dsc | FileCheck --check-prefix COUNT-DSC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s min -r dsc | FileCheck --check-prefix MIN-DSC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s max -r dsc | FileCheck --check-prefix MAX-DSC %s
+#RUN: llvm-xray account %s -o - -m %S/Inputs/simple-instrmap.yaml -s sum -r dsc | FileCheck --check-prefix SUM-DSC %s
---
header:
version: 1
diff --git a/test/tools/llvm-xray/X86/convert-fdr-to-yaml.txt b/test/tools/llvm-xray/X86/convert-fdr-to-yaml.txt
new file mode 100644
index 000000000000..5a5852e72018
--- /dev/null
+++ b/test/tools/llvm-xray/X86/convert-fdr-to-yaml.txt
@@ -0,0 +1,24 @@
+; RUN: llvm-xray convert %S/Inputs/fdr-log-version-1.xray -f=yaml -o - | FileCheck %s
+
+; CHECK: ---
+; CHECK-NEXT: header:
+; CHECK-NEXT: version: 1
+; CHECK-NEXT: type: 1
+; CHECK-NEXT: constant-tsc: true
+; CHECK-NEXT: nonstop-tsc: true
+; CHECK-NEXT: cycle-frequency: 5678
+; CHECK-NEXT: records:
+; CHECK-NEXT: - { type: 0, func-id: 1, function: '1', cpu: 5, thread: 5, kind: function-enter, tsc: 7238225556407340 }
+; CHECK-NEXT: - { type: 0, func-id: 1, function: '1', cpu: 5, thread: 5, kind: function-exit, tsc: 7238225556407346 }
+; CHECK-NEXT: - { type: 0, func-id: 2, function: '2', cpu: 5, thread: 5, kind: function-enter, tsc: 7238225556407347 }
+; CHECK-NEXT: - { type: 0, func-id: 3, function: '3', cpu: 5, thread: 5, kind: function-enter, tsc: 7238225556407387 }
+; CHECK-NEXT: - { type: 0, func-id: 3, function: '3', cpu: 5, thread: 5, kind: function-exit, tsc: 7238225556407437 }
+; CHECK-NEXT: - { type: 0, func-id: 2, function: '2', cpu: 5, thread: 5, kind: function-exit, tsc: 7238225556407467 }
+; CHECK-NEXT: - { type: 0, func-id: 4, function: '4', cpu: 5, thread: 5, kind: function-enter, tsc: 7238225556407492 }
+; CHECK-NEXT: - { type: 0, func-id: 5, function: '5', cpu: 5, thread: 5, kind: function-enter, tsc: 7238225556407517 }
+; CHECK-NEXT: - { type: 0, func-id: 5, function: '5', cpu: 5, thread: 5, kind: function-exit, tsc: 7238225556407542 }
+; CHECK-NEXT: - { type: 0, func-id: 268435455, function: '268435455', cpu: 5, thread: 5, kind: function-enter, tsc: 7238225556407552 }
+; CHECK-NEXT: - { type: 0, func-id: 268435455, function: '268435455', cpu: 5, thread: 5, kind: function-exit, tsc: 7238225556407562 }
+; CHECK-NEXT: - { type: 0, func-id: 6, function: '6', cpu: 6, thread: 5, kind: function-enter, tsc: 7238225556407682 }
+; CHECK-NEXT: - { type: 0, func-id: 6, function: '6', cpu: 6, thread: 5, kind: function-exit, tsc: 7238225556407755 }
+; CHECK-NEXT: ...
diff --git a/test/tools/llvm-xray/X86/convert-with-yaml-instrmap.txt b/test/tools/llvm-xray/X86/convert-with-yaml-instrmap.txt
index c2b611492470..6837072a1fc5 100644
--- a/test/tools/llvm-xray/X86/convert-with-yaml-instrmap.txt
+++ b/test/tools/llvm-xray/X86/convert-with-yaml-instrmap.txt
@@ -1,4 +1,4 @@
-; RUN: llvm-xray convert -m %S/Inputs/simple-xray-instrmap.yaml -t yaml %S/Inputs/naive-log-simple.xray -f=yaml -o - | FileCheck %s
+; RUN: llvm-xray convert -m %S/Inputs/simple-xray-instrmap.yaml %S/Inputs/naive-log-simple.xray -f=yaml -o - | FileCheck %s
; CHECK: ---
; CHECK-NEXT: header:
diff --git a/test/tools/llvm-xray/X86/graph-color-simple-case.yaml b/test/tools/llvm-xray/X86/graph-color-simple-case.yaml
new file mode 100644
index 000000000000..3950c8c99962
--- /dev/null
+++ b/test/tools/llvm-xray/X86/graph-color-simple-case.yaml
@@ -0,0 +1,75 @@
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e sum -c sum \
+#RUN: | FileCheck %s -check-prefix=EDGE
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -v sum -b sum \
+#RUN: | FileCheck %s -check-prefix=VERTEX
+---
+header:
+ version: 1
+ type: 0
+ constant-tsc: true
+ nonstop-tsc: true
+ cycle-frequency: 1
+records:
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-enter, tsc: 10000 }
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-exit, tsc: 10010 }
+ - { type: 0, func-id: 2, cpu: 1, thread: 111, kind: function-enter, tsc: 10100 }
+ - { type: 0, func-id: 2, cpu: 1, thread: 111, kind: function-exit, tsc: 10120 }
+ - { type: 0, func-id: 3, cpu: 1, thread: 111, kind: function-enter, tsc: 10200 }
+ - { type: 0, func-id: 3, cpu: 1, thread: 111, kind: function-exit, tsc: 10230 }
+ - { type: 0, func-id: 4, cpu: 1, thread: 111, kind: function-enter, tsc: 10300 }
+ - { type: 0, func-id: 4, cpu: 1, thread: 111, kind: function-exit, tsc: 10340 }
+ - { type: 0, func-id: 5, cpu: 1, thread: 111, kind: function-enter, tsc: 10400 }
+ - { type: 0, func-id: 5, cpu: 1, thread: 111, kind: function-exit, tsc: 10450 }
+ - { type: 0, func-id: 6, cpu: 1, thread: 111, kind: function-enter, tsc: 10500 }
+ - { type: 0, func-id: 6, cpu: 1, thread: 111, kind: function-exit, tsc: 10560 }
+ - { type: 0, func-id: 7, cpu: 1, thread: 111, kind: function-enter, tsc: 10600 }
+ - { type: 0, func-id: 7, cpu: 1, thread: 111, kind: function-exit, tsc: 10670 }
+ - { type: 0, func-id: 8, cpu: 1, thread: 111, kind: function-enter, tsc: 10700 }
+ - { type: 0, func-id: 8, cpu: 1, thread: 111, kind: function-exit, tsc: 10780 }
+ - { type: 0, func-id: 9, cpu: 1, thread: 111, kind: function-enter, tsc: 10800 }
+ - { type: 0, func-id: 9, cpu: 1, thread: 111, kind: function-exit, tsc: 10890 }
+---
+
+
+#EDGE: digraph xray {
+#EDGE-DAG: F0 -> F7 [label="7.{{[0-9]*}}e+01" color="#B00000"];
+#EDGE-DAG: F0 -> F2 [label="2.{{[0-9]*}}e+01" color="#FC9963"];
+#EDGE-DAG: F0 -> F9 [label="9.{{[0-9]*}}e+01" color="#7F0000"];
+#EDGE-DAG: F0 -> F4 [label="4.{{[0-9]*}}e+01" color="#E75339"];
+#EDGE-DAG: F0 -> F6 [label="6.{{[0-9]*}}e+01" color="#C4150D"];
+#EDGE-DAG: F0 -> F1 [label="1.{{[0-9]*}}e+01" color="#FDC48D"];
+#EDGE-DAG: F0 -> F8 [label="8.{{[0-9]*}}e+01" color="#970000"];
+#EDGE-DAG: F0 -> F3 [label="3.{{[0-9]*}}e+01" color="#F4744E"];
+#EDGE-DAG: F0 -> F5 [label="5.{{[0-9]*}}e+01" color="#D83220"];
+#EDGE-DAG: F7 [label="@(7)"];
+#EDGE-DAG: F2 [label="@(2)"];
+#EDGE-DAG: F9 [label="@(9)"];
+#EDGE-DAG: F4 [label="@(4)"];
+#EDGE-DAG: F6 [label="@(6)"];
+#EDGE-DAG: F1 [label="@(1)"];
+#EDGE-DAG: F8 [label="@(8)"];
+#EDGE-DAG: F3 [label="@(3)"];
+#EDGE-DAG: F5 [label="@(5)"];
+#EDGE-NEXT: }
+#
+#VERTEX: digraph xray {
+#VERTEX-DAG: node [shape=record];
+#VERTEX-DAG: F0 -> F7 [label=""];
+#VERTEX-DAG: F0 -> F2 [label=""];
+#VERTEX-DAG: F0 -> F9 [label=""];
+#VERTEX-DAG: F0 -> F4 [label=""];
+#VERTEX-DAG: F0 -> F6 [label=""];
+#VERTEX-DAG: F0 -> F1 [label=""];
+#VERTEX-DAG: F0 -> F8 [label=""];
+#VERTEX-DAG: F0 -> F3 [label=""];
+#VERTEX-DAG: F0 -> F5 [label=""];
+#VERTEX-DAG: F7 [label="{@(7)|7.{{[0-9]*}}e+01}" color="#B00000"];
+#VERTEX-DAG: F2 [label="{@(2)|2.{{[0-9]*}}e+01}" color="#FC9963"];
+#VERTEX-DAG: F9 [label="{@(9)|9.{{[0-9]*}}e+01}" color="#7F0000"];
+#VERTEX-DAG: F4 [label="{@(4)|4.{{[0-9]*}}e+01}" color="#E75339"];
+#VERTEX-DAG: F6 [label="{@(6)|6.{{[0-9]*}}e+01}" color="#C4150D"];
+#VERTEX-DAG: F1 [label="{@(1)|1.{{[0-9]*}}e+01}" color="#FDC48D"];
+#VERTEX-DAG: F8 [label="{@(8)|8.{{[0-9]*}}e+01}" color="#970000"];
+#VERTEX-DAG: F3 [label="{@(3)|3.{{[0-9]*}}e+01}" color="#F4744E"];
+#VERTEX-DAG: F5 [label="{@(5)|5.{{[0-9]*}}e+01}" color="#D83220"];
+#VERTEX-NEXT: }
diff --git a/test/tools/llvm-xray/X86/graph-deduce-tail-call.yaml b/test/tools/llvm-xray/X86/graph-deduce-tail-call.yaml
new file mode 100644
index 000000000000..6f756bf018f9
--- /dev/null
+++ b/test/tools/llvm-xray/X86/graph-deduce-tail-call.yaml
@@ -0,0 +1,75 @@
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d \
+#RUN: | FileCheck %s -check-prefix=EMPTY
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e count \
+#RUN: | FileCheck %s -check-prefix=COUNT
+#
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e min \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e med \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e 90p \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e 99p \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e max \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -d -e sum \
+#RUN: | FileCheck %s -check-prefix=TIME
+#
+---
+header:
+ version: 1
+ type: 0
+ constant-tsc: true
+ nonstop-tsc: true
+ cycle-frequency: 0
+records:
+# Here we reconstruct the following call trace:
+#
+# f1()
+# f2()
+# f3()
+#
+# But we find that we're missing an exit record for f2() because it's
+# tail-called f3(). We make sure that if we see a trace like this that we can
+# deduce tail calls, and account the time (potentially wrongly) to f2() when
+# f1() exits. That is because we don't go back to f3()'s entry record to
+# properly do the math on the timing of f2().
+#
+# Note that by default, tail/sibling call deduction is disabled, and is enabled
+# with a flag "-d" or "-deduce-sibling-calls".
+#
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-enter, tsc: 10000 }
+ - { type: 0, func-id: 2, cpu: 1, thread: 111, kind: function-enter, tsc: 10001 }
+ - { type: 0, func-id: 3, cpu: 1, thread: 111, kind: function-enter, tsc: 10002 }
+ - { type: 0, func-id: 3, cpu: 1, thread: 111, kind: function-exit, tsc: 10003 }
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-exit, tsc: 10004 }
+...
+
+#EMPTY: digraph xray {
+#EMPTY-DAG: F0 -> F1 [label=""];
+#EMPTY-DAG: F1 -> F2 [label=""];
+#EMPTY-DAG: F2 -> F3 [label=""];
+#EMPTY-DAG: F1 [label="@(1)"];
+#EMPTY-DAG: F2 [label="@(2)"];
+#EMPTY-DAG: F3 [label="@(3)"];
+#EMPTY-NEXT: }
+
+#COUNT: digraph xray {
+#COUNT-DAG: F0 -> F1 [label="1"];
+#COUNT-DAG: F1 -> F2 [label="1"];
+#COUNT-DAG: F2 -> F3 [label="1"];
+#COUNT-DAG: F1 [label="@(1)"];
+#COUNT-DAG: F2 [label="@(2)"];
+#COUNT-DAG: F3 [label="@(3)"];
+#COUNT-NEXT: }
+
+
+#TIME: digraph xray {
+#TIME-DAG: F0 -> F1 [label="4.{{.*}}"];
+#TIME-DAG: F1 -> F2 [label="3.{{.*}}"];
+#TIME-DAG: F2 -> F3 [label="1.{{.*}}"];
+#TIME-DAG: F1 [label="@(1)"];
+#TIME-DAG: F2 [label="@(2)"];
+#TIME-DAG: F3 [label="@(3)"];
+#TIME-NEXT: }
diff --git a/test/tools/llvm-xray/X86/graph-simple-case.yaml b/test/tools/llvm-xray/X86/graph-simple-case.yaml
new file mode 100644
index 000000000000..b0d6dcf2fb4c
--- /dev/null
+++ b/test/tools/llvm-xray/X86/graph-simple-case.yaml
@@ -0,0 +1,44 @@
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml \
+#RUN: | FileCheck %s -check-prefix=EMPTY
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e count \
+#RUN: | FileCheck %s -check-prefix=COUNT
+#
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e min \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e med \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e 90p \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e 99p \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e max \
+#RUN: | FileCheck %s -check-prefix=TIME
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml -e sum \
+#RUN: | FileCheck %s -check-prefix=TIME
+---
+header:
+ version: 1
+ type: 0
+ constant-tsc: true
+ nonstop-tsc: true
+ cycle-frequency: 2601000000
+records:
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-enter, tsc: 10001 }
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-exit, tsc: 10100 }
+...
+
+
+#EMPTY: digraph xray {
+#EMPTY-NEXT: F0 -> F1 [label=""];
+#EMPTY-NEXT: F1 [label="@(1)"];
+#EMPTY-NEXT: }
+
+#COUNT: digraph xray {
+#COUNT-NEXT: F0 -> F1 [label="1"];
+#COUNT-NEXT: F1 [label="@(1)"];
+#COUNT-NEXT: }
+
+#TIME: digraph xray {
+#TIME-NEXT: F0 -> F1 [label="3.8{{.*}}e-08"];
+#TIME-NEXT: F1 [label="@(1)"];
+#TIME-NEXT: }
diff --git a/test/tools/llvm-xray/X86/graph-zero-latency-calls.yaml b/test/tools/llvm-xray/X86/graph-zero-latency-calls.yaml
new file mode 100644
index 000000000000..602f209072af
--- /dev/null
+++ b/test/tools/llvm-xray/X86/graph-zero-latency-calls.yaml
@@ -0,0 +1,20 @@
+#RUN: llvm-xray graph %s -o - -m %S/Inputs/simple-instrmap.yaml | FileCheck %s
+
+---
+header:
+ version: 1
+ type: 0
+ constant-tsc: true
+ nonstop-tsc: true
+ cycle-frequency: 2601000000
+records:
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-enter, tsc: 10001 }
+ - { type: 0, func-id: 2, cpu: 1, thread: 111, kind: function-enter, tsc: 10002 }
+ - { type: 0, func-id: 2, cpu: 1, thread: 111, kind: function-exit, tsc: 10002 }
+ - { type: 0, func-id: 1, cpu: 1, thread: 111, kind: function-exit, tsc: 10100 }
+...
+
+#CHECK: digraph xray {
+#CHECK-DAG: F0 -> F1 [{{.*}}];
+#CHECK-DAG: F1 -> F2 [{{.*}}];
+#CHECK-DAG: }
diff --git a/test/tools/llvm-xray/X86/no-subcommand-noassert.txt b/test/tools/llvm-xray/X86/no-subcommand-noassert.txt
new file mode 100644
index 000000000000..3de328d73a43
--- /dev/null
+++ b/test/tools/llvm-xray/X86/no-subcommand-noassert.txt
@@ -0,0 +1,3 @@
+; RUN: llvm-xray | FileCheck %s
+
+; CHECK: OVERVIEW: XRay Tools
diff --git a/test/tools/sancov/AArch64/print_coverage_pcs.test b/test/tools/sancov/AArch64/print_coverage_pcs.test
index b71eabb8a357..d5fff4db1320 100644
--- a/test/tools/sancov/AArch64/print_coverage_pcs.test
+++ b/test/tools/sancov/AArch64/print_coverage_pcs.test
@@ -1,4 +1,4 @@
REQUIRES: aarch64-registered-target
RUN: not sancov -print-coverage-pcs %p/../Inputs/test-linux_android_aarch64 2>&1 | FileCheck %s --check-prefix=AARCH64
-AARCH64: Error: __sanitizer_cov* functions not found
+AARCH64: ERROR: __sanitizer_cov* functions not found
diff --git a/test/tools/sancov/Inputs/src_blacklist.txt b/test/tools/sancov/Inputs/src_blacklist.txt
index 8f5cbc838f4a..c215d9ac627f 100644
--- a/test/tools/sancov/Inputs/src_blacklist.txt
+++ b/test/tools/sancov/Inputs/src_blacklist.txt
@@ -1,3 +1,3 @@
# this path looks like sancov/Inputs/../Inputs/ in the binary.
# Make sure it is filtered out correctly.
-src:*/sancov/Inputs/foo.cpp
+src:*/sancov/Inputs/test.cpp
diff --git a/test/tools/sancov/blacklist.test b/test/tools/sancov/blacklist.test
index 6af1799a481d..53f48534dc95 100644
--- a/test/tools/sancov/blacklist.test
+++ b/test/tools/sancov/blacklist.test
@@ -1,10 +1,26 @@
REQUIRES: x86_64-linux
+RUN: sancov -covered-functions %p/Inputs/test-linux_x86_64 %p/Inputs/test-linux_x86_64.0.sancov | FileCheck %s --check-prefix=ALL
RUN: sancov -covered-functions -blacklist %p/Inputs/fun_blacklist.txt %p/Inputs/test-linux_x86_64 %p/Inputs/test-linux_x86_64.0.sancov | FileCheck %s
RUN: sancov -covered-functions -blacklist %p/Inputs/src_blacklist.txt %p/Inputs/test-linux_x86_64 %p/Inputs/test-linux_x86_64.1.sancov | FileCheck --check-prefix=CHECK1 %s
+RUN: sancov -print-coverage-stats %p/Inputs/test-linux_x86_64 %p/Inputs/test-linux_x86_64.1.sancov | FileCheck --check-prefix=STATS %s
+RUN: sancov -print-coverage-stats -blacklist %p/Inputs/fun_blacklist.txt %p/Inputs/test-linux_x86_64 %p/Inputs/test-linux_x86_64.1.sancov | FileCheck --check-prefix=STATS-BLIST %s
-CHECK-NOT: Inputs{{[/\\]}}test.cpp:12 bar(std::string)
-CHECK: Inputs{{[/\\]}}test.cpp:14 main
+ALL: test.cpp:12 bar(std::string)
+ALL: test.cpp:14 main
+
+CHECK-NOT: test.cpp:12 bar(std::string)
+CHECK: test.cpp:14 main
+
+CHECK1-NOT: test.cpp:12 bar(std::string)
+CHECK1-NOT: test.cpp:14 main
+
+STATS: all-edges: 9
+STATS: cov-edges: 7
+STATS: all-functions: 3
+STATS: cov-functions: 3
+
+STATS-BLIST: all-edges: 8
+STATS-BLIST: cov-edges: 6
+STATS-BLIST: all-functions: 2
+STATS-BLIST: cov-functions: 2
-CHECK1-NOT: foo
-CHECK1: Inputs{{[/\\]}}test.cpp:12 bar(std::string)
-CHECK1: Inputs{{[/\\]}}test.cpp:14 main
diff --git a/test/tools/sancov/validation.test b/test/tools/sancov/validation.test
new file mode 100644
index 000000000000..437870cf597b
--- /dev/null
+++ b/test/tools/sancov/validation.test
@@ -0,0 +1,6 @@
+REQUIRES: x86_64-linux
+RUN: not sancov -covered-functions %p/Inputs/test-linux_x86_64 2>&1 | FileCheck --check-prefix=NOCFILE %s
+
+NOCFILE: WARNING: No coverage file for {{.*}}test-linux_x86_64
+NOCFILE: ERROR: No valid coverage files given.
+
diff --git a/test/tools/yaml2obj/invalid_output_file.test b/test/tools/yaml2obj/invalid_output_file.test
new file mode 100644
index 000000000000..3045a0b21f56
--- /dev/null
+++ b/test/tools/yaml2obj/invalid_output_file.test
@@ -0,0 +1,4 @@
+# RUN: not yaml2obj -o %p/path/does/not/exist 2>&1 | FileCheck %s
+
+# Don't check the OS-dependent message "No such file or directory".
+# CHECK: yaml2obj: Error opening '{{.*}}/path/does/not/exist': {{.*}}
diff --git a/test/tools/yaml2obj/lit.local.cfg b/test/tools/yaml2obj/lit.local.cfg
new file mode 100644
index 000000000000..8169b9f95e11
--- /dev/null
+++ b/test/tools/yaml2obj/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.yaml']
diff --git a/test/tools/yaml2obj/missing_document_tag.yaml b/test/tools/yaml2obj/missing_document_tag.yaml
index 3cec172d5801..8cfd9a16700a 100644
--- a/test/tools/yaml2obj/missing_document_tag.yaml
+++ b/test/tools/yaml2obj/missing_document_tag.yaml
@@ -4,3 +4,6 @@
DummyData:
foo: 0
...
+
+# CHECK: YAML:4:1: error: YAML Object File missing document type tag!
+# CHECK: yaml2obj: Failed to parse YAML file!
diff --git a/test/tools/yaml2obj/unsupported_document_tag.yaml b/test/tools/yaml2obj/unsupported_document_tag.yaml
index e73d450a9bb8..b25b08096cfb 100644
--- a/test/tools/yaml2obj/unsupported_document_tag.yaml
+++ b/test/tools/yaml2obj/unsupported_document_tag.yaml
@@ -5,4 +5,4 @@ DummyData:
foo: 0
...
-#check error: YAML Object File unsupported document type tag '!unsupported-tag'!
+# CHECK: error: YAML Object File unsupported document type tag '!unsupported-tag'!